don't use SEM_UNDO because of ridiculously small undo limits on some
[kai/samba.git] / source3 / locking / shmem_sysv.c
1 /* 
2    Unix SMB/Netbios implementation.
3    Version 1.9.
4    Shared memory functions - SYSV IPC implementation
5    Copyright (C) Andrew Tridgell 1997
6    
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License as published by
9    the Free Software Foundation; either version 2 of the License, or
10    (at your option) any later version.
11    
12    This program is distributed in the hope that it will be useful,
13    but WITHOUT ANY WARRANTY; without even the implied warranty of
14    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15    GNU General Public License for more details.
16    
17    You should have received a copy of the GNU General Public License
18    along with this program; if not, write to the Free Software
19    Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20
21 */
22
23 #include "includes.h"
24
25
26 #ifdef USE_SYSV_IPC
27
28 extern int DEBUGLEVEL;
29
30 #define SHMEM_KEY ((key_t)0x280267)
31 #define SEMAPHORE_KEY (SHMEM_KEY+2)
32
33 #define SHM_MAGIC 0x53484100
34 #define SHM_VERSION 2
35
36 #ifdef SHM_R
37 #define IPC_PERMS ((SHM_R | SHM_W) | (SHM_R>>3) | (SHM_R>>6))
38 #else
39 #define IPC_PERMS 0644
40 #endif
41
42 #ifdef SECURE_SEMAPHORES
43 /* secure semaphores are slow because we have to do a become_root()
44    on every call! */
45 #define SEMAPHORE_PERMS IPC_PERMS
46 #else
47 #define SEMAPHORE_PERMS 0666
48 #endif
49
50 #ifdef SEMMSL
51 #define SHMEM_HASH_SIZE (SEMMSL-1)
52 #else
53 #define SHMEM_HASH_SIZE 63
54 #endif
55
56 #define MIN_SHM_SIZE 0x1000
57
58 static int shm_id;
59 static int sem_id;
60 static int shm_size;
61 static int hash_size;
62 static int global_lock_count;
63
64 struct ShmHeader {
65    int shm_magic;
66    int shm_version;
67    int total_size;      /* in bytes */
68    BOOL consistent;
69    int first_free_off;
70    int userdef_off;    /* a userdefined offset. can be used to store
71                           root of tree or list */
72    struct {             /* a cell is a range of bytes of sizeof(struct
73                            ShmBlockDesc) size */
74            int cells_free;
75            int cells_used;
76            int cells_system; /* number of cells used as allocated
77                                 block descriptors */
78    } statistics;
79 };
80
81 #define SHM_NOT_FREE_OFF (-1)
82 struct ShmBlockDesc
83 {
84    int next;    /* offset of next block in the free list or
85                    SHM_NOT_FREE_OFF when block in use */
86    int size;   /* user size in BlockDescSize units */
87 };
88
89 #define EOList_Addr     NULL
90 #define EOList_Off      (0)
91
92 #define CellSize        sizeof(struct ShmBlockDesc)
93
94 /* HeaderSize aligned on a 8 byte boundary */
95 #define AlignedHeaderSize ((sizeof(struct ShmHeader)+7) & ~7)
96
97 static struct ShmHeader *shm_header_p = NULL;
98 static BOOL shm_initialize_called = False;
99
100 static int read_only;
101
102 static BOOL sem_change(int i, int op)
103 {
104 #ifdef SECURE_SEMAPHORES
105         extern struct current_user current_user;
106         int became_root=0;
107 #endif
108         struct sembuf sb;
109         int ret;
110
111         if (read_only) return True;
112
113 #ifdef SECURE_SEMAPHORES
114         if (current_user.uid != 0) {
115                 become_root(0);
116                 became_root = 1;
117         }
118 #endif
119
120         sb.sem_num = i;
121         sb.sem_op = op;
122         sb.sem_flg = 0;
123
124         ret = semop(sem_id, &sb, 1);
125
126         if (ret != 0) {
127                 DEBUG(0,("ERROR: sem_change(%d,%d) failed (%s)\n", 
128                          i, op, strerror(errno)));
129         }
130
131 #ifdef SECURE_SEMAPHORES
132         if (became_root) {
133                 unbecome_root(0);
134         }
135 #endif
136
137         return ret == 0;
138 }
139
140 static BOOL global_lock(void)
141 {
142         global_lock_count++;
143         if (global_lock_count == 1)
144                 return sem_change(0, -1);
145         return True;
146 }
147
148 static BOOL global_unlock(void)
149 {
150         global_lock_count--;
151         if (global_lock_count == 0)
152                 return sem_change(0, 1);
153         return True;
154 }
155
156 static void *shm_offset2addr(int offset)
157 {
158    if (offset == 0 )
159       return (void *)(0);
160    
161    if (!shm_header_p)
162       return (void *)(0);
163    
164    return (void *)((char *)shm_header_p + offset);
165 }
166
167 static int shm_addr2offset(void *addr)
168 {
169    if (!addr)
170       return 0;
171    
172    if (!shm_header_p)
173       return 0;
174    
175    return (int)((char *)addr - (char *)shm_header_p);
176 }
177
178
179 static int shm_alloc(int size)
180 {
181         unsigned num_cells ;
182         struct ShmBlockDesc *scanner_p;
183         struct ShmBlockDesc *prev_p;
184         struct ShmBlockDesc *new_p;
185         int result_offset;
186    
187    
188         if (!shm_header_p) {
189                 /* not mapped yet */
190                 DEBUG(0,("ERROR shm_alloc : shmem not mapped\n"));
191                 return 0;
192         }
193         
194         global_lock();
195         
196         if (!shm_header_p->consistent) {
197                 DEBUG(0,("ERROR shm_alloc : shmem not consistent\n"));
198                 global_unlock();
199                 return 0;
200         }
201         
202         /* calculate the number of cells */
203         num_cells = (size + (CellSize-1)) / CellSize;
204         
205         /* set start of scan */
206         prev_p = (struct ShmBlockDesc *)shm_offset2addr(shm_header_p->first_free_off);
207         scanner_p =     prev_p ;
208         
209         /* scan the free list to find a matching free space */
210         while ((scanner_p != EOList_Addr) && (scanner_p->size < num_cells)) {
211                 prev_p = scanner_p;
212                 scanner_p = (struct ShmBlockDesc *)shm_offset2addr(scanner_p->next);
213         }
214    
215         /* at this point scanner point to a block header or to the end of
216            the list */
217         if (scanner_p == EOList_Addr) {
218                 DEBUG(0,("ERROR shm_alloc : alloc of %d bytes failed\n",size));
219                 global_unlock();
220                 return (0);
221         }
222    
223         /* going to modify shared mem */
224         shm_header_p->consistent = False;
225         
226         /* if we found a good one : scanner == the good one */
227         if (scanner_p->size > num_cells + 2) {
228                 /* Make a new one */
229                 new_p = scanner_p + 1 + num_cells;
230                 new_p->size = scanner_p->size - (num_cells + 1);
231                 new_p->next = scanner_p->next;
232                 scanner_p->size = num_cells;
233                 scanner_p->next = shm_addr2offset(new_p);
234
235                 shm_header_p->statistics.cells_free -= 1;
236                 shm_header_p->statistics.cells_system += 1;
237         }
238
239         /* take it from the free list */
240         if (prev_p == scanner_p) {
241                 shm_header_p->first_free_off = scanner_p->next;
242         } else {
243                 prev_p->next = scanner_p->next;
244         }
245         shm_header_p->statistics.cells_free -= scanner_p->size;
246         shm_header_p->statistics.cells_used += scanner_p->size;
247
248         result_offset = shm_addr2offset(&(scanner_p[1]));
249         scanner_p->next = SHM_NOT_FREE_OFF;
250
251         /* end modification of shared mem */
252         shm_header_p->consistent = True;
253
254         global_unlock();
255         
256         DEBUG(6,("shm_alloc : allocated %d bytes at offset %d\n",
257                  size,result_offset));
258
259         return result_offset;
260 }   
261
262 static void shm_solve_neighbors(struct ShmBlockDesc *head_p )
263 {
264         struct ShmBlockDesc *next_p;
265    
266         /* Check if head_p and head_p->next are neighbors and if so
267            join them */
268         if ( head_p == EOList_Addr ) return ;
269         if ( head_p->next == EOList_Off ) return ;
270    
271         next_p = (struct ShmBlockDesc *)shm_offset2addr(head_p->next);
272         if ((head_p + head_p->size + 1) == next_p) {
273                 head_p->size += next_p->size + 1; /* adapt size */
274                 head_p->next = next_p->next; /* link out */
275       
276                 shm_header_p->statistics.cells_free += 1;
277                 shm_header_p->statistics.cells_system -= 1;
278         }
279 }
280
281
282 static BOOL shm_free(int offset)
283 {
284         struct ShmBlockDesc *header_p; /* pointer to header of
285                                           block to free */
286         struct ShmBlockDesc *scanner_p; /* used to scan the list */
287         struct ShmBlockDesc *prev_p; /* holds previous in the
288                                         list */
289    
290         if (!shm_header_p) {
291                 /* not mapped yet */
292                 DEBUG(0,("ERROR shm_free : shmem not mapped\n"));
293                 return False;
294         }
295         
296         global_lock();
297         
298         if (!shm_header_p->consistent) {
299                 DEBUG(0,("ERROR shm_free : shmem not consistent\n"));
300                 global_unlock();
301                 return False;
302         }
303         
304         /* make pointer to header of block */
305         header_p = ((struct ShmBlockDesc *)shm_offset2addr(offset) - 1); 
306         
307         if (header_p->next != SHM_NOT_FREE_OFF) {
308                 DEBUG(0,("ERROR shm_free : bad offset (%d)\n",offset));
309                 global_unlock();
310                 return False;
311         }
312         
313         /* find a place in the free_list to put the header in */
314         
315         /* set scanner and previous pointer to start of list */
316         prev_p = (struct ShmBlockDesc *)
317                 shm_offset2addr(shm_header_p->first_free_off);
318         scanner_p = prev_p ;
319         
320         while ((scanner_p != EOList_Addr) && 
321                (scanner_p < header_p)) { 
322                 /* while we didn't scan past its position */
323                 prev_p = scanner_p ;
324                 scanner_p = (struct ShmBlockDesc *)
325                         shm_offset2addr(scanner_p->next);
326         }
327         
328         shm_header_p->consistent = False;
329         
330         DEBUG(6,("shm_free : freeing %d bytes at offset %d\n",
331                  header_p->size*CellSize,offset));
332         
333         if (scanner_p == prev_p) {
334                 shm_header_p->statistics.cells_free += header_p->size;
335                 shm_header_p->statistics.cells_used -= header_p->size;
336                 
337                 /* we must free it at the beginning of the list */
338                 shm_header_p->first_free_off = shm_addr2offset(header_p);
339                 /* set the free_list_pointer to this block_header */
340                 
341                 /* scanner is the one that was first in the list */
342                 header_p->next = shm_addr2offset(scanner_p);
343                 shm_solve_neighbors(header_p); 
344                 
345                 shm_header_p->consistent = True;
346         } else {
347                 shm_header_p->statistics.cells_free += header_p->size;
348                 shm_header_p->statistics.cells_used -= header_p->size;
349                 
350                 prev_p->next = shm_addr2offset(header_p);
351                 header_p->next = shm_addr2offset(scanner_p);
352                 shm_solve_neighbors(header_p) ;
353                 shm_solve_neighbors(prev_p) ;
354            
355                 shm_header_p->consistent = True;
356         }
357
358         global_unlock();
359         return True;
360 }
361
362
363 /* 
364  * Function to create the hash table for the share mode entries. Called
365  * when smb shared memory is global locked.
366  */
367 static BOOL shm_create_hash_table(unsigned int hash_entries)
368 {
369         int size = hash_entries * sizeof(int);
370
371         global_lock();
372         shm_header_p->userdef_off = shm_alloc(size);
373
374         if(shm_header_p->userdef_off == 0) {
375                 DEBUG(0,("shm_create_hash_table: Failed to create hash table of size %d\n",
376                          size));
377                 global_unlock();
378                 return False;
379         }
380
381         /* Clear hash buckets. */
382         memset(shm_offset2addr(shm_header_p->userdef_off), '\0', size);
383         global_unlock();
384         return True;
385 }
386
387
388 static BOOL shm_validate_header(int size)
389 {
390         if(!shm_header_p) {
391                 /* not mapped yet */
392                 DEBUG(0,("ERROR shm_validate_header : shmem not mapped\n"));
393                 return False;
394         }
395    
396         if(shm_header_p->shm_magic != SHM_MAGIC) {
397                 DEBUG(0,("ERROR shm_validate_header : bad magic\n"));
398                 return False;
399         }
400
401         if(shm_header_p->shm_version != SHM_VERSION) {
402                 DEBUG(0,("ERROR shm_validate_header : bad version %X\n",
403                          shm_header_p->shm_version));
404                 return False;
405         }
406    
407         if(shm_header_p->total_size != size) {
408                 DEBUG(0,("ERROR shmem size mismatch (old = %d, new = %d)\n",
409                          shm_header_p->total_size,size));
410                 return False;
411         }
412
413         if(!shm_header_p->consistent) {
414                 DEBUG(0,("ERROR shmem not consistent\n"));
415                 return False;
416         }
417         return True;
418 }
419
420
421 static BOOL shm_initialize(int size)
422 {
423         struct ShmBlockDesc * first_free_block_p;
424         
425         DEBUG(5,("shm_initialize : initializing shmem size %d\n",size));
426    
427         if( !shm_header_p ) {
428                 /* not mapped yet */
429                 DEBUG(0,("ERROR shm_initialize : shmem not mapped\n"));
430                 return False;
431         }
432    
433         shm_header_p->shm_magic = SHM_MAGIC;
434         shm_header_p->shm_version = SHM_VERSION;
435         shm_header_p->total_size = size;
436         shm_header_p->first_free_off = AlignedHeaderSize;
437         shm_header_p->userdef_off = 0;
438         
439         first_free_block_p = (struct ShmBlockDesc *)
440                 shm_offset2addr(shm_header_p->first_free_off);
441         first_free_block_p->next = EOList_Off;
442         first_free_block_p->size = 
443                 (size - (AlignedHeaderSize+CellSize))/CellSize;   
444         shm_header_p->statistics.cells_free = first_free_block_p->size;
445         shm_header_p->statistics.cells_used = 0;
446         shm_header_p->statistics.cells_system = 1;
447    
448         shm_header_p->consistent = True;
449    
450         shm_initialize_called = True;
451
452         return True;
453 }
454    
455 static BOOL shm_close( void )
456 {
457         return True;
458 }
459
460
461 static int shm_get_userdef_off(void)
462 {
463    if (!shm_header_p)
464       return 0;
465    else
466       return shm_header_p->userdef_off;
467 }
468
469
470 /*******************************************************************
471   Lock a particular hash bucket entry.
472   ******************************************************************/
473 static BOOL shm_lock_hash_entry(unsigned int entry)
474 {
475         return sem_change(entry+1, -1);
476 }
477
478 /*******************************************************************
479   Unlock a particular hash bucket entry.
480   ******************************************************************/
481 static BOOL shm_unlock_hash_entry(unsigned int entry)
482 {
483         return sem_change(entry+1, 1);
484 }
485
486
487 /*******************************************************************
488   Gather statistics on shared memory usage.
489   ******************************************************************/
490 static BOOL shm_get_usage(int *bytes_free,
491                           int *bytes_used,
492                           int *bytes_overhead)
493 {
494         if(!shm_header_p) {
495                 /* not mapped yet */
496                 DEBUG(0,("ERROR shm_free : shmem not mapped\n"));
497                 return False;
498         }
499
500         *bytes_free = shm_header_p->statistics.cells_free * CellSize;
501         *bytes_used = shm_header_p->statistics.cells_used * CellSize;
502         *bytes_overhead = shm_header_p->statistics.cells_system * CellSize + 
503                 AlignedHeaderSize;
504         
505         return True;
506 }
507
508
509 /*******************************************************************
510 hash a number into a hash_entry
511   ******************************************************************/
512 static unsigned shm_hash_size(void)
513 {
514         return hash_size;
515 }
516
517
518 static struct shmem_ops shmops = {
519         shm_close,
520         shm_alloc,
521         shm_free,
522         shm_get_userdef_off,
523         shm_offset2addr,
524         shm_addr2offset,
525         shm_lock_hash_entry,
526         shm_unlock_hash_entry,
527         shm_get_usage,
528         shm_hash_size,
529 };
530
531 /*******************************************************************
532   open the shared memory
533   ******************************************************************/
534 struct shmem_ops *sysv_shm_open(int ronly)
535 {
536         BOOL created_new = False;
537         BOOL other_processes;
538         struct shmid_ds shm_ds;
539         struct semid_ds sem_ds;
540         union semun su;
541         int i;
542         int pid;
543
544         read_only = ronly;
545
546         shm_size = lp_shmem_size();
547
548         DEBUG(4,("Trying sysv shmem open of size %d\n", shm_size));
549
550         /* first the semaphore */
551         sem_id = semget(SEMAPHORE_KEY, 0, 0);
552         if (sem_id == -1) {
553                 if (read_only) return NULL;
554
555                 hash_size = SHMEM_HASH_SIZE;
556
557                 while (hash_size > 1) {
558                         sem_id = semget(SEMAPHORE_KEY, hash_size+1, 
559                                         IPC_CREAT|IPC_EXCL| SEMAPHORE_PERMS);
560                         if (sem_id != -1 || errno != EINVAL) break;
561                         hash_size--;
562                 }
563
564                 if (sem_id == -1) {
565                         DEBUG(0,("Can't create or use semaphore %s\n", 
566                                  strerror(errno)));
567                 }   
568
569                 if (sem_id != -1) {
570                         su.val = 1;
571                         for (i=0;i<hash_size+1;i++) {
572                                 if (semctl(sem_id, i, SETVAL, su) != 0) {
573                                         DEBUG(1,("Failed to init semaphore %d\n", i));
574                                 }
575                         }
576                 }
577         }
578         if (shm_id == -1) {
579                 sem_id = semget(SEMAPHORE_KEY, 0, 0);
580         }
581         if (sem_id == -1) {
582                 DEBUG(0,("Can't create or use semaphore %s\n", 
583                          strerror(errno)));
584                 return NULL;
585         }   
586
587         su.buf = &sem_ds;
588         if (semctl(sem_id, 0, IPC_STAT, su) != 0) {
589                 DEBUG(0,("ERROR shm_open : can't IPC_STAT\n"));
590         }
591         hash_size = sem_ds.sem_nsems-1;
592
593         if (!read_only) {
594                 if (sem_ds.sem_perm.cuid != 0 || sem_ds.sem_perm.cgid != 0) {
595                         DEBUG(0,("ERROR: root did not create the semaphore\n"));
596                         return NULL;
597                 }
598
599                 if (semctl(sem_id, 0, GETVAL, su) == 0 &&
600                     !process_exists((pid=semctl(sem_id, 0, GETPID, su)))) {
601                         DEBUG(0,("WARNING: clearing global IPC lock set by dead process %d\n",
602                                  pid));
603                         su.val = 1;
604                         if (semctl(sem_id, 0, SETVAL, su) != 0) {
605                                 DEBUG(0,("ERROR: Failed to clear global lock\n"));
606                         }
607                 }
608
609                 sem_ds.sem_perm.mode = SEMAPHORE_PERMS;
610                 if (semctl(sem_id, 0, IPC_SET, su) != 0) {
611                         DEBUG(0,("ERROR shm_open : can't IPC_SET\n"));
612                 }
613         }
614
615         
616         
617         if (!global_lock())
618                 return NULL;
619
620
621         for (i=1;i<hash_size+1;i++) {
622                 if (semctl(sem_id, i, GETVAL, su) == 0 && 
623                     !process_exists((pid=semctl(sem_id, i, GETPID, su)))) {
624                         DEBUG(1,("WARNING: clearing IPC lock %d set by dead process %d\n", 
625                                  i, pid));
626                         su.val = 1;
627                         if (semctl(sem_id, i, SETVAL, su) != 0) {
628                                 DEBUG(0,("ERROR: Failed to clear IPC lock %d\n", i));
629                         }
630                 }
631         }
632         
633         /* try to use an existing key */
634         shm_id = shmget(SHMEM_KEY, shm_size, 0);
635         
636         /* if that failed then create one */
637         if (shm_id == -1) {
638                 if (read_only) return NULL;
639                 while (shm_size > MIN_SHM_SIZE) {
640                         shm_id = shmget(SHMEM_KEY, shm_size, 
641                                         IPC_CREAT | IPC_EXCL | IPC_PERMS);
642                         if (shm_id != -1 || errno != EINVAL) break;
643                         shm_size *= 0.9;
644                 }
645                 created_new = (shm_id != -1);
646         }
647         
648         if (shm_id == -1) {
649                 DEBUG(0,("Can't create or use IPC area\n"));
650                 global_unlock();
651                 return NULL;
652         }   
653         
654         
655         shm_header_p = (struct ShmHeader *)shmat(shm_id, 0, 
656                                                  read_only?SHM_RDONLY:0);
657         if ((int)shm_header_p == -1) {
658                 DEBUG(0,("Can't attach to IPC area\n"));
659                 global_unlock();
660                 return NULL;
661         }
662
663         /* to find out if some other process is already mapping the file,
664            we use a registration file containing the processids of the file
665            mapping processes */
666         if (shmctl(shm_id, IPC_STAT, &shm_ds) != 0) {
667                 DEBUG(0,("ERROR shm_open : can't IPC_STAT\n"));
668         }
669
670         if (!read_only) {
671                 if (shm_ds.shm_perm.cuid != 0 || shm_ds.shm_perm.cgid != 0) {
672                         DEBUG(0,("ERROR: root did not create the shmem\n"));
673                         global_unlock();
674                         return NULL;
675                 }
676         }
677
678         shm_size = shm_ds.shm_segsz;
679
680         other_processes = (shm_ds.shm_nattch > 1);
681
682         if (!read_only && !other_processes) {
683                 memset((char *)shm_header_p, 0, shm_size);
684                 shm_initialize(shm_size);
685                 shm_create_hash_table(hash_size);
686                 DEBUG(3,("Initialised IPC area of size %d\n", shm_size));
687         } else if (!shm_validate_header(shm_size)) {
688                 /* existing file is corrupt, samba admin should remove
689                    it by hand */
690                 DEBUG(0,("ERROR shm_open : corrupt IPC area - remove it!\n"));
691                 global_unlock();
692                 return NULL;
693         }
694    
695         global_unlock();
696         return &shmops;
697 }
698
699
700
701 #else 
702  int ipc_dummy_procedure(void)
703 {return 0;}
704 #endif