Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/x86/linux...
[sfrench/cifs-2.6.git] / arch / sparc64 / kernel / sys_sparc.c
1 /* $Id: sys_sparc.c,v 1.57 2002/02/09 19:49:30 davem Exp $
2  * linux/arch/sparc64/kernel/sys_sparc.c
3  *
4  * This file contains various random system calls that
5  * have a non-standard calling sequence on the Linux/sparc
6  * platform.
7  */
8
9 #include <linux/errno.h>
10 #include <linux/types.h>
11 #include <linux/sched.h>
12 #include <linux/fs.h>
13 #include <linux/file.h>
14 #include <linux/mm.h>
15 #include <linux/sem.h>
16 #include <linux/msg.h>
17 #include <linux/shm.h>
18 #include <linux/stat.h>
19 #include <linux/mman.h>
20 #include <linux/utsname.h>
21 #include <linux/smp.h>
22 #include <linux/slab.h>
23 #include <linux/syscalls.h>
24 #include <linux/ipc.h>
25 #include <linux/personality.h>
26 #include <linux/random.h>
27
28 #include <asm/uaccess.h>
29 #include <asm/utrap.h>
30 #include <asm/perfctr.h>
31 #include <asm/unistd.h>
32
33 /* #define DEBUG_UNIMP_SYSCALL */
34
35 asmlinkage unsigned long sys_getpagesize(void)
36 {
37         return PAGE_SIZE;
38 }
39
40 #define VA_EXCLUDE_START (0x0000080000000000UL - (1UL << 32UL))
41 #define VA_EXCLUDE_END   (0xfffff80000000000UL + (1UL << 32UL))
42
43 /* Does addr --> addr+len fall within 4GB of the VA-space hole or
44  * overflow past the end of the 64-bit address space?
45  */
46 static inline int invalid_64bit_range(unsigned long addr, unsigned long len)
47 {
48         unsigned long va_exclude_start, va_exclude_end;
49
50         va_exclude_start = VA_EXCLUDE_START;
51         va_exclude_end   = VA_EXCLUDE_END;
52
53         if (unlikely(len >= va_exclude_start))
54                 return 1;
55
56         if (unlikely((addr + len) < addr))
57                 return 1;
58
59         if (unlikely((addr >= va_exclude_start && addr < va_exclude_end) ||
60                      ((addr + len) >= va_exclude_start &&
61                       (addr + len) < va_exclude_end)))
62                 return 1;
63
64         return 0;
65 }
66
67 /* Does start,end straddle the VA-space hole?  */
68 static inline int straddles_64bit_va_hole(unsigned long start, unsigned long end)
69 {
70         unsigned long va_exclude_start, va_exclude_end;
71
72         va_exclude_start = VA_EXCLUDE_START;
73         va_exclude_end   = VA_EXCLUDE_END;
74
75         if (likely(start < va_exclude_start && end < va_exclude_start))
76                 return 0;
77
78         if (likely(start >= va_exclude_end && end >= va_exclude_end))
79                 return 0;
80
81         return 1;
82 }
83
84 /* These functions differ from the default implementations in
85  * mm/mmap.c in two ways:
86  *
87  * 1) For file backed MAP_SHARED mmap()'s we D-cache color align,
88  *    for fixed such mappings we just validate what the user gave us.
89  * 2) For 64-bit tasks we avoid mapping anything within 4GB of
90  *    the spitfire/niagara VA-hole.
91  */
92
93 static inline unsigned long COLOUR_ALIGN(unsigned long addr,
94                                          unsigned long pgoff)
95 {
96         unsigned long base = (addr+SHMLBA-1)&~(SHMLBA-1);
97         unsigned long off = (pgoff<<PAGE_SHIFT) & (SHMLBA-1);
98
99         return base + off;
100 }
101
102 static inline unsigned long COLOUR_ALIGN_DOWN(unsigned long addr,
103                                               unsigned long pgoff)
104 {
105         unsigned long base = addr & ~(SHMLBA-1);
106         unsigned long off = (pgoff<<PAGE_SHIFT) & (SHMLBA-1);
107
108         if (base + off <= addr)
109                 return base + off;
110         return base - off;
111 }
112
113 unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags)
114 {
115         struct mm_struct *mm = current->mm;
116         struct vm_area_struct * vma;
117         unsigned long task_size = TASK_SIZE;
118         unsigned long start_addr;
119         int do_color_align;
120
121         if (flags & MAP_FIXED) {
122                 /* We do not accept a shared mapping if it would violate
123                  * cache aliasing constraints.
124                  */
125                 if ((flags & MAP_SHARED) &&
126                     ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
127                         return -EINVAL;
128                 return addr;
129         }
130
131         if (test_thread_flag(TIF_32BIT))
132                 task_size = STACK_TOP32;
133         if (unlikely(len > task_size || len >= VA_EXCLUDE_START))
134                 return -ENOMEM;
135
136         do_color_align = 0;
137         if (filp || (flags & MAP_SHARED))
138                 do_color_align = 1;
139
140         if (addr) {
141                 if (do_color_align)
142                         addr = COLOUR_ALIGN(addr, pgoff);
143                 else
144                         addr = PAGE_ALIGN(addr);
145
146                 vma = find_vma(mm, addr);
147                 if (task_size - len >= addr &&
148                     (!vma || addr + len <= vma->vm_start))
149                         return addr;
150         }
151
152         if (len > mm->cached_hole_size) {
153                 start_addr = addr = mm->free_area_cache;
154         } else {
155                 start_addr = addr = TASK_UNMAPPED_BASE;
156                 mm->cached_hole_size = 0;
157         }
158
159         task_size -= len;
160
161 full_search:
162         if (do_color_align)
163                 addr = COLOUR_ALIGN(addr, pgoff);
164         else
165                 addr = PAGE_ALIGN(addr);
166
167         for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
168                 /* At this point:  (!vma || addr < vma->vm_end). */
169                 if (addr < VA_EXCLUDE_START &&
170                     (addr + len) >= VA_EXCLUDE_START) {
171                         addr = VA_EXCLUDE_END;
172                         vma = find_vma(mm, VA_EXCLUDE_END);
173                 }
174                 if (unlikely(task_size < addr)) {
175                         if (start_addr != TASK_UNMAPPED_BASE) {
176                                 start_addr = addr = TASK_UNMAPPED_BASE;
177                                 mm->cached_hole_size = 0;
178                                 goto full_search;
179                         }
180                         return -ENOMEM;
181                 }
182                 if (likely(!vma || addr + len <= vma->vm_start)) {
183                         /*
184                          * Remember the place where we stopped the search:
185                          */
186                         mm->free_area_cache = addr + len;
187                         return addr;
188                 }
189                 if (addr + mm->cached_hole_size < vma->vm_start)
190                         mm->cached_hole_size = vma->vm_start - addr;
191
192                 addr = vma->vm_end;
193                 if (do_color_align)
194                         addr = COLOUR_ALIGN(addr, pgoff);
195         }
196 }
197
198 unsigned long
199 arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
200                           const unsigned long len, const unsigned long pgoff,
201                           const unsigned long flags)
202 {
203         struct vm_area_struct *vma;
204         struct mm_struct *mm = current->mm;
205         unsigned long task_size = STACK_TOP32;
206         unsigned long addr = addr0;
207         int do_color_align;
208
209         /* This should only ever run for 32-bit processes.  */
210         BUG_ON(!test_thread_flag(TIF_32BIT));
211
212         if (flags & MAP_FIXED) {
213                 /* We do not accept a shared mapping if it would violate
214                  * cache aliasing constraints.
215                  */
216                 if ((flags & MAP_SHARED) &&
217                     ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
218                         return -EINVAL;
219                 return addr;
220         }
221
222         if (unlikely(len > task_size))
223                 return -ENOMEM;
224
225         do_color_align = 0;
226         if (filp || (flags & MAP_SHARED))
227                 do_color_align = 1;
228
229         /* requesting a specific address */
230         if (addr) {
231                 if (do_color_align)
232                         addr = COLOUR_ALIGN(addr, pgoff);
233                 else
234                         addr = PAGE_ALIGN(addr);
235
236                 vma = find_vma(mm, addr);
237                 if (task_size - len >= addr &&
238                     (!vma || addr + len <= vma->vm_start))
239                         return addr;
240         }
241
242         /* check if free_area_cache is useful for us */
243         if (len <= mm->cached_hole_size) {
244                 mm->cached_hole_size = 0;
245                 mm->free_area_cache = mm->mmap_base;
246         }
247
248         /* either no address requested or can't fit in requested address hole */
249         addr = mm->free_area_cache;
250         if (do_color_align) {
251                 unsigned long base = COLOUR_ALIGN_DOWN(addr-len, pgoff);
252
253                 addr = base + len;
254         }
255
256         /* make sure it can fit in the remaining address space */
257         if (likely(addr > len)) {
258                 vma = find_vma(mm, addr-len);
259                 if (!vma || addr <= vma->vm_start) {
260                         /* remember the address as a hint for next time */
261                         return (mm->free_area_cache = addr-len);
262                 }
263         }
264
265         if (unlikely(mm->mmap_base < len))
266                 goto bottomup;
267
268         addr = mm->mmap_base-len;
269         if (do_color_align)
270                 addr = COLOUR_ALIGN_DOWN(addr, pgoff);
271
272         do {
273                 /*
274                  * Lookup failure means no vma is above this address,
275                  * else if new region fits below vma->vm_start,
276                  * return with success:
277                  */
278                 vma = find_vma(mm, addr);
279                 if (likely(!vma || addr+len <= vma->vm_start)) {
280                         /* remember the address as a hint for next time */
281                         return (mm->free_area_cache = addr);
282                 }
283
284                 /* remember the largest hole we saw so far */
285                 if (addr + mm->cached_hole_size < vma->vm_start)
286                         mm->cached_hole_size = vma->vm_start - addr;
287
288                 /* try just below the current vma->vm_start */
289                 addr = vma->vm_start-len;
290                 if (do_color_align)
291                         addr = COLOUR_ALIGN_DOWN(addr, pgoff);
292         } while (likely(len < vma->vm_start));
293
294 bottomup:
295         /*
296          * A failed mmap() very likely causes application failure,
297          * so fall back to the bottom-up function here. This scenario
298          * can happen with large stack limits and large mmap()
299          * allocations.
300          */
301         mm->cached_hole_size = ~0UL;
302         mm->free_area_cache = TASK_UNMAPPED_BASE;
303         addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
304         /*
305          * Restore the topdown base:
306          */
307         mm->free_area_cache = mm->mmap_base;
308         mm->cached_hole_size = ~0UL;
309
310         return addr;
311 }
312
313 /* Try to align mapping such that we align it as much as possible. */
314 unsigned long get_fb_unmapped_area(struct file *filp, unsigned long orig_addr, unsigned long len, unsigned long pgoff, unsigned long flags)
315 {
316         unsigned long align_goal, addr = -ENOMEM;
317
318         if (flags & MAP_FIXED) {
319                 /* Ok, don't mess with it. */
320                 return get_unmapped_area(NULL, orig_addr, len, pgoff, flags);
321         }
322         flags &= ~MAP_SHARED;
323
324         align_goal = PAGE_SIZE;
325         if (len >= (4UL * 1024 * 1024))
326                 align_goal = (4UL * 1024 * 1024);
327         else if (len >= (512UL * 1024))
328                 align_goal = (512UL * 1024);
329         else if (len >= (64UL * 1024))
330                 align_goal = (64UL * 1024);
331
332         do {
333                 addr = get_unmapped_area(NULL, orig_addr, len + (align_goal - PAGE_SIZE), pgoff, flags);
334                 if (!(addr & ~PAGE_MASK)) {
335                         addr = (addr + (align_goal - 1UL)) & ~(align_goal - 1UL);
336                         break;
337                 }
338
339                 if (align_goal == (4UL * 1024 * 1024))
340                         align_goal = (512UL * 1024);
341                 else if (align_goal == (512UL * 1024))
342                         align_goal = (64UL * 1024);
343                 else
344                         align_goal = PAGE_SIZE;
345         } while ((addr & ~PAGE_MASK) && align_goal > PAGE_SIZE);
346
347         /* Mapping is smaller than 64K or larger areas could not
348          * be obtained.
349          */
350         if (addr & ~PAGE_MASK)
351                 addr = get_unmapped_area(NULL, orig_addr, len, pgoff, flags);
352
353         return addr;
354 }
355
356 /* Essentially the same as PowerPC... */
357 void arch_pick_mmap_layout(struct mm_struct *mm)
358 {
359         unsigned long random_factor = 0UL;
360
361         if (current->flags & PF_RANDOMIZE) {
362                 random_factor = get_random_int();
363                 if (test_thread_flag(TIF_32BIT))
364                         random_factor &= ((1 * 1024 * 1024) - 1);
365                 else
366                         random_factor = ((random_factor << PAGE_SHIFT) &
367                                          0xffffffffUL);
368         }
369
370         /*
371          * Fall back to the standard layout if the personality
372          * bit is set, or if the expected stack growth is unlimited:
373          */
374         if (!test_thread_flag(TIF_32BIT) ||
375             (current->personality & ADDR_COMPAT_LAYOUT) ||
376             current->signal->rlim[RLIMIT_STACK].rlim_cur == RLIM_INFINITY ||
377             sysctl_legacy_va_layout) {
378                 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
379                 mm->get_unmapped_area = arch_get_unmapped_area;
380                 mm->unmap_area = arch_unmap_area;
381         } else {
382                 /* We know it's 32-bit */
383                 unsigned long task_size = STACK_TOP32;
384                 unsigned long gap;
385
386                 gap = current->signal->rlim[RLIMIT_STACK].rlim_cur;
387                 if (gap < 128 * 1024 * 1024)
388                         gap = 128 * 1024 * 1024;
389                 if (gap > (task_size / 6 * 5))
390                         gap = (task_size / 6 * 5);
391
392                 mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
393                 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
394                 mm->unmap_area = arch_unmap_area_topdown;
395         }
396 }
397
398 asmlinkage unsigned long sparc_brk(unsigned long brk)
399 {
400         /* People could try to be nasty and use ta 0x6d in 32bit programs */
401         if (test_thread_flag(TIF_32BIT) && brk >= STACK_TOP32)
402                 return current->mm->brk;
403
404         if (unlikely(straddles_64bit_va_hole(current->mm->brk, brk)))
405                 return current->mm->brk;
406
407         return sys_brk(brk);
408 }
409                                                                 
410 /*
411  * sys_pipe() is the normal C calling standard for creating
412  * a pipe. It's not the way unix traditionally does this, though.
413  */
414 asmlinkage long sparc_pipe(struct pt_regs *regs)
415 {
416         int fd[2];
417         int error;
418
419         error = do_pipe(fd);
420         if (error)
421                 goto out;
422         regs->u_regs[UREG_I1] = fd[1];
423         error = fd[0];
424 out:
425         return error;
426 }
427
428 /*
429  * sys_ipc() is the de-multiplexer for the SysV IPC calls..
430  *
431  * This is really horribly ugly.
432  */
433
434 asmlinkage long sys_ipc(unsigned int call, int first, unsigned long second,
435                         unsigned long third, void __user *ptr, long fifth)
436 {
437         long err;
438
439         /* No need for backward compatibility. We can start fresh... */
440         if (call <= SEMCTL) {
441                 switch (call) {
442                 case SEMOP:
443                         err = sys_semtimedop(first, ptr,
444                                              (unsigned)second, NULL);
445                         goto out;
446                 case SEMTIMEDOP:
447                         err = sys_semtimedop(first, ptr, (unsigned)second,
448                                 (const struct timespec __user *) fifth);
449                         goto out;
450                 case SEMGET:
451                         err = sys_semget(first, (int)second, (int)third);
452                         goto out;
453                 case SEMCTL: {
454                         err = sys_semctl(first, third,
455                                          (int)second | IPC_64,
456                                          (union semun) ptr);
457                         goto out;
458                 }
459                 default:
460                         err = -ENOSYS;
461                         goto out;
462                 };
463         }
464         if (call <= MSGCTL) {
465                 switch (call) {
466                 case MSGSND:
467                         err = sys_msgsnd(first, ptr, (size_t)second,
468                                          (int)third);
469                         goto out;
470                 case MSGRCV:
471                         err = sys_msgrcv(first, ptr, (size_t)second, fifth,
472                                          (int)third);
473                         goto out;
474                 case MSGGET:
475                         err = sys_msgget((key_t)first, (int)second);
476                         goto out;
477                 case MSGCTL:
478                         err = sys_msgctl(first, (int)second | IPC_64, ptr);
479                         goto out;
480                 default:
481                         err = -ENOSYS;
482                         goto out;
483                 };
484         }
485         if (call <= SHMCTL) {
486                 switch (call) {
487                 case SHMAT: {
488                         ulong raddr;
489                         err = do_shmat(first, ptr, (int)second, &raddr);
490                         if (!err) {
491                                 if (put_user(raddr,
492                                              (ulong __user *) third))
493                                         err = -EFAULT;
494                         }
495                         goto out;
496                 }
497                 case SHMDT:
498                         err = sys_shmdt(ptr);
499                         goto out;
500                 case SHMGET:
501                         err = sys_shmget(first, (size_t)second, (int)third);
502                         goto out;
503                 case SHMCTL:
504                         err = sys_shmctl(first, (int)second | IPC_64, ptr);
505                         goto out;
506                 default:
507                         err = -ENOSYS;
508                         goto out;
509                 };
510         } else {
511                 err = -ENOSYS;
512         }
513 out:
514         return err;
515 }
516
517 asmlinkage long sparc64_newuname(struct new_utsname __user *name)
518 {
519         int ret = sys_newuname(name);
520         
521         if (current->personality == PER_LINUX32 && !ret) {
522                 ret = (copy_to_user(name->machine, "sparc\0\0", 8)
523                        ? -EFAULT : 0);
524         }
525         return ret;
526 }
527
528 asmlinkage long sparc64_personality(unsigned long personality)
529 {
530         int ret;
531
532         if (current->personality == PER_LINUX32 &&
533             personality == PER_LINUX)
534                 personality = PER_LINUX32;
535         ret = sys_personality(personality);
536         if (ret == PER_LINUX32)
537                 ret = PER_LINUX;
538
539         return ret;
540 }
541
542 int sparc64_mmap_check(unsigned long addr, unsigned long len,
543                 unsigned long flags)
544 {
545         if (test_thread_flag(TIF_32BIT)) {
546                 if (len >= STACK_TOP32)
547                         return -EINVAL;
548
549                 if ((flags & MAP_FIXED) && addr > STACK_TOP32 - len)
550                         return -EINVAL;
551         } else {
552                 if (len >= VA_EXCLUDE_START)
553                         return -EINVAL;
554
555                 if ((flags & MAP_FIXED) && invalid_64bit_range(addr, len))
556                         return -EINVAL;
557         }
558
559         return 0;
560 }
561
562 /* Linux version of mmap */
563 asmlinkage unsigned long sys_mmap(unsigned long addr, unsigned long len,
564         unsigned long prot, unsigned long flags, unsigned long fd,
565         unsigned long off)
566 {
567         struct file * file = NULL;
568         unsigned long retval = -EBADF;
569
570         if (!(flags & MAP_ANONYMOUS)) {
571                 file = fget(fd);
572                 if (!file)
573                         goto out;
574         }
575         flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
576         len = PAGE_ALIGN(len);
577
578         down_write(&current->mm->mmap_sem);
579         retval = do_mmap(file, addr, len, prot, flags, off);
580         up_write(&current->mm->mmap_sem);
581
582         if (file)
583                 fput(file);
584 out:
585         return retval;
586 }
587
588 asmlinkage long sys64_munmap(unsigned long addr, size_t len)
589 {
590         long ret;
591
592         if (invalid_64bit_range(addr, len))
593                 return -EINVAL;
594
595         down_write(&current->mm->mmap_sem);
596         ret = do_munmap(current->mm, addr, len);
597         up_write(&current->mm->mmap_sem);
598         return ret;
599 }
600
601 extern unsigned long do_mremap(unsigned long addr,
602         unsigned long old_len, unsigned long new_len,
603         unsigned long flags, unsigned long new_addr);
604                 
605 asmlinkage unsigned long sys64_mremap(unsigned long addr,
606         unsigned long old_len, unsigned long new_len,
607         unsigned long flags, unsigned long new_addr)
608 {
609         struct vm_area_struct *vma;
610         unsigned long ret = -EINVAL;
611
612         if (test_thread_flag(TIF_32BIT))
613                 goto out;
614         if (unlikely(new_len >= VA_EXCLUDE_START))
615                 goto out;
616         if (unlikely(invalid_64bit_range(addr, old_len)))
617                 goto out;
618
619         down_write(&current->mm->mmap_sem);
620         if (flags & MREMAP_FIXED) {
621                 if (invalid_64bit_range(new_addr, new_len))
622                         goto out_sem;
623         } else if (invalid_64bit_range(addr, new_len)) {
624                 unsigned long map_flags = 0;
625                 struct file *file = NULL;
626
627                 ret = -ENOMEM;
628                 if (!(flags & MREMAP_MAYMOVE))
629                         goto out_sem;
630
631                 vma = find_vma(current->mm, addr);
632                 if (vma) {
633                         if (vma->vm_flags & VM_SHARED)
634                                 map_flags |= MAP_SHARED;
635                         file = vma->vm_file;
636                 }
637
638                 /* MREMAP_FIXED checked above. */
639                 new_addr = get_unmapped_area(file, addr, new_len,
640                                     vma ? vma->vm_pgoff : 0,
641                                     map_flags);
642                 ret = new_addr;
643                 if (new_addr & ~PAGE_MASK)
644                         goto out_sem;
645                 flags |= MREMAP_FIXED;
646         }
647         ret = do_mremap(addr, old_len, new_len, flags, new_addr);
648 out_sem:
649         up_write(&current->mm->mmap_sem);
650 out:
651         return ret;       
652 }
653
654 /* we come to here via sys_nis_syscall so it can setup the regs argument */
655 asmlinkage unsigned long c_sys_nis_syscall(struct pt_regs *regs)
656 {
657         static int count;
658         
659         /* Don't make the system unusable, if someone goes stuck */
660         if (count++ > 5)
661                 return -ENOSYS;
662
663         printk ("Unimplemented SPARC system call %ld\n",regs->u_regs[1]);
664 #ifdef DEBUG_UNIMP_SYSCALL      
665         show_regs (regs);
666 #endif
667
668         return -ENOSYS;
669 }
670
671 /* #define DEBUG_SPARC_BREAKPOINT */
672
673 asmlinkage void sparc_breakpoint(struct pt_regs *regs)
674 {
675         siginfo_t info;
676
677         if (test_thread_flag(TIF_32BIT)) {
678                 regs->tpc &= 0xffffffff;
679                 regs->tnpc &= 0xffffffff;
680         }
681 #ifdef DEBUG_SPARC_BREAKPOINT
682         printk ("TRAP: Entering kernel PC=%lx, nPC=%lx\n", regs->tpc, regs->tnpc);
683 #endif
684         info.si_signo = SIGTRAP;
685         info.si_errno = 0;
686         info.si_code = TRAP_BRKPT;
687         info.si_addr = (void __user *)regs->tpc;
688         info.si_trapno = 0;
689         force_sig_info(SIGTRAP, &info, current);
690 #ifdef DEBUG_SPARC_BREAKPOINT
691         printk ("TRAP: Returning to space: PC=%lx nPC=%lx\n", regs->tpc, regs->tnpc);
692 #endif
693 }
694
695 extern void check_pending(int signum);
696
697 asmlinkage long sys_getdomainname(char __user *name, int len)
698 {
699         int nlen, err;
700
701         if (len < 0)
702                 return -EINVAL;
703
704         down_read(&uts_sem);
705         
706         nlen = strlen(utsname()->domainname) + 1;
707         err = -EINVAL;
708         if (nlen > len)
709                 goto out;
710
711         err = -EFAULT;
712         if (!copy_to_user(name, utsname()->domainname, nlen))
713                 err = 0;
714
715 out:
716         up_read(&uts_sem);
717         return err;
718 }
719
720 asmlinkage long solaris_syscall(struct pt_regs *regs)
721 {
722         static int count;
723
724         regs->tpc = regs->tnpc;
725         regs->tnpc += 4;
726         if (test_thread_flag(TIF_32BIT)) {
727                 regs->tpc &= 0xffffffff;
728                 regs->tnpc &= 0xffffffff;
729         }
730         if (++count <= 5) {
731                 printk ("For Solaris binary emulation you need solaris module loaded\n");
732                 show_regs (regs);
733         }
734         send_sig(SIGSEGV, current, 1);
735
736         return -ENOSYS;
737 }
738
739 #ifndef CONFIG_SUNOS_EMUL
740 asmlinkage long sunos_syscall(struct pt_regs *regs)
741 {
742         static int count;
743
744         regs->tpc = regs->tnpc;
745         regs->tnpc += 4;
746         if (test_thread_flag(TIF_32BIT)) {
747                 regs->tpc &= 0xffffffff;
748                 regs->tnpc &= 0xffffffff;
749         }
750         if (++count <= 20)
751                 printk ("SunOS binary emulation not compiled in\n");
752         force_sig(SIGSEGV, current);
753
754         return -ENOSYS;
755 }
756 #endif
757
758 asmlinkage long sys_utrap_install(utrap_entry_t type,
759                                   utrap_handler_t new_p,
760                                   utrap_handler_t new_d,
761                                   utrap_handler_t __user *old_p,
762                                   utrap_handler_t __user *old_d)
763 {
764         if (type < UT_INSTRUCTION_EXCEPTION || type > UT_TRAP_INSTRUCTION_31)
765                 return -EINVAL;
766         if (new_p == (utrap_handler_t)(long)UTH_NOCHANGE) {
767                 if (old_p) {
768                         if (!current_thread_info()->utraps) {
769                                 if (put_user(NULL, old_p))
770                                         return -EFAULT;
771                         } else {
772                                 if (put_user((utrap_handler_t)(current_thread_info()->utraps[type]), old_p))
773                                         return -EFAULT;
774                         }
775                 }
776                 if (old_d) {
777                         if (put_user(NULL, old_d))
778                                 return -EFAULT;
779                 }
780                 return 0;
781         }
782         if (!current_thread_info()->utraps) {
783                 current_thread_info()->utraps =
784                         kzalloc((UT_TRAP_INSTRUCTION_31+1)*sizeof(long), GFP_KERNEL);
785                 if (!current_thread_info()->utraps)
786                         return -ENOMEM;
787                 current_thread_info()->utraps[0] = 1;
788         } else {
789                 if ((utrap_handler_t)current_thread_info()->utraps[type] != new_p &&
790                     current_thread_info()->utraps[0] > 1) {
791                         long *p = current_thread_info()->utraps;
792
793                         current_thread_info()->utraps =
794                                 kmalloc((UT_TRAP_INSTRUCTION_31+1)*sizeof(long),
795                                         GFP_KERNEL);
796                         if (!current_thread_info()->utraps) {
797                                 current_thread_info()->utraps = p;
798                                 return -ENOMEM;
799                         }
800                         p[0]--;
801                         current_thread_info()->utraps[0] = 1;
802                         memcpy(current_thread_info()->utraps+1, p+1,
803                                UT_TRAP_INSTRUCTION_31*sizeof(long));
804                 }
805         }
806         if (old_p) {
807                 if (put_user((utrap_handler_t)(current_thread_info()->utraps[type]), old_p))
808                         return -EFAULT;
809         }
810         if (old_d) {
811                 if (put_user(NULL, old_d))
812                         return -EFAULT;
813         }
814         current_thread_info()->utraps[type] = (long)new_p;
815
816         return 0;
817 }
818
819 long sparc_memory_ordering(unsigned long model, struct pt_regs *regs)
820 {
821         if (model >= 3)
822                 return -EINVAL;
823         regs->tstate = (regs->tstate & ~TSTATE_MM) | (model << 14);
824         return 0;
825 }
826
827 asmlinkage long sys_rt_sigaction(int sig,
828                                  const struct sigaction __user *act,
829                                  struct sigaction __user *oact,
830                                  void __user *restorer,
831                                  size_t sigsetsize)
832 {
833         struct k_sigaction new_ka, old_ka;
834         int ret;
835
836         /* XXX: Don't preclude handling different sized sigset_t's.  */
837         if (sigsetsize != sizeof(sigset_t))
838                 return -EINVAL;
839
840         if (act) {
841                 new_ka.ka_restorer = restorer;
842                 if (copy_from_user(&new_ka.sa, act, sizeof(*act)))
843                         return -EFAULT;
844         }
845
846         ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
847
848         if (!ret && oact) {
849                 if (copy_to_user(oact, &old_ka.sa, sizeof(*oact)))
850                         return -EFAULT;
851         }
852
853         return ret;
854 }
855
856 /* Invoked by rtrap code to update performance counters in
857  * user space.
858  */
859 asmlinkage void update_perfctrs(void)
860 {
861         unsigned long pic, tmp;
862
863         read_pic(pic);
864         tmp = (current_thread_info()->kernel_cntd0 += (unsigned int)pic);
865         __put_user(tmp, current_thread_info()->user_cntd0);
866         tmp = (current_thread_info()->kernel_cntd1 += (pic >> 32));
867         __put_user(tmp, current_thread_info()->user_cntd1);
868         reset_pic();
869 }
870
871 asmlinkage long sys_perfctr(int opcode, unsigned long arg0, unsigned long arg1, unsigned long arg2)
872 {
873         int err = 0;
874
875         switch(opcode) {
876         case PERFCTR_ON:
877                 current_thread_info()->pcr_reg = arg2;
878                 current_thread_info()->user_cntd0 = (u64 __user *) arg0;
879                 current_thread_info()->user_cntd1 = (u64 __user *) arg1;
880                 current_thread_info()->kernel_cntd0 =
881                         current_thread_info()->kernel_cntd1 = 0;
882                 write_pcr(arg2);
883                 reset_pic();
884                 set_thread_flag(TIF_PERFCTR);
885                 break;
886
887         case PERFCTR_OFF:
888                 err = -EINVAL;
889                 if (test_thread_flag(TIF_PERFCTR)) {
890                         current_thread_info()->user_cntd0 =
891                                 current_thread_info()->user_cntd1 = NULL;
892                         current_thread_info()->pcr_reg = 0;
893                         write_pcr(0);
894                         clear_thread_flag(TIF_PERFCTR);
895                         err = 0;
896                 }
897                 break;
898
899         case PERFCTR_READ: {
900                 unsigned long pic, tmp;
901
902                 if (!test_thread_flag(TIF_PERFCTR)) {
903                         err = -EINVAL;
904                         break;
905                 }
906                 read_pic(pic);
907                 tmp = (current_thread_info()->kernel_cntd0 += (unsigned int)pic);
908                 err |= __put_user(tmp, current_thread_info()->user_cntd0);
909                 tmp = (current_thread_info()->kernel_cntd1 += (pic >> 32));
910                 err |= __put_user(tmp, current_thread_info()->user_cntd1);
911                 reset_pic();
912                 break;
913         }
914
915         case PERFCTR_CLRPIC:
916                 if (!test_thread_flag(TIF_PERFCTR)) {
917                         err = -EINVAL;
918                         break;
919                 }
920                 current_thread_info()->kernel_cntd0 =
921                         current_thread_info()->kernel_cntd1 = 0;
922                 reset_pic();
923                 break;
924
925         case PERFCTR_SETPCR: {
926                 u64 __user *user_pcr = (u64 __user *)arg0;
927
928                 if (!test_thread_flag(TIF_PERFCTR)) {
929                         err = -EINVAL;
930                         break;
931                 }
932                 err |= __get_user(current_thread_info()->pcr_reg, user_pcr);
933                 write_pcr(current_thread_info()->pcr_reg);
934                 current_thread_info()->kernel_cntd0 =
935                         current_thread_info()->kernel_cntd1 = 0;
936                 reset_pic();
937                 break;
938         }
939
940         case PERFCTR_GETPCR: {
941                 u64 __user *user_pcr = (u64 __user *)arg0;
942
943                 if (!test_thread_flag(TIF_PERFCTR)) {
944                         err = -EINVAL;
945                         break;
946                 }
947                 err |= __put_user(current_thread_info()->pcr_reg, user_pcr);
948                 break;
949         }
950
951         default:
952                 err = -EINVAL;
953                 break;
954         };
955         return err;
956 }
957
958 /*
959  * Do a system call from kernel instead of calling sys_execve so we
960  * end up with proper pt_regs.
961  */
962 int kernel_execve(const char *filename, char *const argv[], char *const envp[])
963 {
964         long __res;
965         register long __g1 __asm__ ("g1") = __NR_execve;
966         register long __o0 __asm__ ("o0") = (long)(filename);
967         register long __o1 __asm__ ("o1") = (long)(argv);
968         register long __o2 __asm__ ("o2") = (long)(envp);
969         asm volatile ("t 0x6d\n\t"
970                       "sub %%g0, %%o0, %0\n\t"
971                       "movcc %%xcc, %%o0, %0\n\t"
972                       : "=r" (__res), "=&r" (__o0)
973                       : "1" (__o0), "r" (__o1), "r" (__o2), "r" (__g1)
974                       : "cc");
975         return __res;
976 }