tracing: Make sure RCU is watching before calling a stack trace
[sfrench/cifs-2.6.git] / drivers / char / mem.c
1 /*
2  *  linux/drivers/char/mem.c
3  *
4  *  Copyright (C) 1991, 1992  Linus Torvalds
5  *
6  *  Added devfs support.
7  *    Jan-11-1998, C. Scott Ananian <cananian@alumni.princeton.edu>
8  *  Shared /dev/zero mmapping support, Feb 2000, Kanoj Sarcar <kanoj@sgi.com>
9  */
10
11 #include <linux/mm.h>
12 #include <linux/miscdevice.h>
13 #include <linux/slab.h>
14 #include <linux/vmalloc.h>
15 #include <linux/mman.h>
16 #include <linux/random.h>
17 #include <linux/init.h>
18 #include <linux/raw.h>
19 #include <linux/tty.h>
20 #include <linux/capability.h>
21 #include <linux/ptrace.h>
22 #include <linux/device.h>
23 #include <linux/highmem.h>
24 #include <linux/backing-dev.h>
25 #include <linux/shmem_fs.h>
26 #include <linux/splice.h>
27 #include <linux/pfn.h>
28 #include <linux/export.h>
29 #include <linux/io.h>
30 #include <linux/uio.h>
31
32 #include <linux/uaccess.h>
33
34 #ifdef CONFIG_IA64
35 # include <linux/efi.h>
36 #endif
37
38 #define DEVPORT_MINOR   4
39
40 static inline unsigned long size_inside_page(unsigned long start,
41                                              unsigned long size)
42 {
43         unsigned long sz;
44
45         sz = PAGE_SIZE - (start & (PAGE_SIZE - 1));
46
47         return min(sz, size);
48 }
49
50 #ifndef ARCH_HAS_VALID_PHYS_ADDR_RANGE
51 static inline int valid_phys_addr_range(phys_addr_t addr, size_t count)
52 {
53         return addr + count <= __pa(high_memory);
54 }
55
56 static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
57 {
58         return 1;
59 }
60 #endif
61
62 #ifdef CONFIG_STRICT_DEVMEM
63 static inline int page_is_allowed(unsigned long pfn)
64 {
65         return devmem_is_allowed(pfn);
66 }
67 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
68 {
69         u64 from = ((u64)pfn) << PAGE_SHIFT;
70         u64 to = from + size;
71         u64 cursor = from;
72
73         while (cursor < to) {
74                 if (!devmem_is_allowed(pfn))
75                         return 0;
76                 cursor += PAGE_SIZE;
77                 pfn++;
78         }
79         return 1;
80 }
81 #else
82 static inline int page_is_allowed(unsigned long pfn)
83 {
84         return 1;
85 }
86 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
87 {
88         return 1;
89 }
90 #endif
91
92 #ifndef unxlate_dev_mem_ptr
93 #define unxlate_dev_mem_ptr unxlate_dev_mem_ptr
94 void __weak unxlate_dev_mem_ptr(phys_addr_t phys, void *addr)
95 {
96 }
97 #endif
98
99 /*
100  * This funcion reads the *physical* memory. The f_pos points directly to the
101  * memory location.
102  */
103 static ssize_t read_mem(struct file *file, char __user *buf,
104                         size_t count, loff_t *ppos)
105 {
106         phys_addr_t p = *ppos;
107         ssize_t read, sz;
108         void *ptr;
109
110         if (p != *ppos)
111                 return 0;
112
113         if (!valid_phys_addr_range(p, count))
114                 return -EFAULT;
115         read = 0;
116 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
117         /* we don't have page 0 mapped on sparc and m68k.. */
118         if (p < PAGE_SIZE) {
119                 sz = size_inside_page(p, count);
120                 if (sz > 0) {
121                         if (clear_user(buf, sz))
122                                 return -EFAULT;
123                         buf += sz;
124                         p += sz;
125                         count -= sz;
126                         read += sz;
127                 }
128         }
129 #endif
130
131         while (count > 0) {
132                 unsigned long remaining;
133                 int allowed;
134
135                 sz = size_inside_page(p, count);
136
137                 allowed = page_is_allowed(p >> PAGE_SHIFT);
138                 if (!allowed)
139                         return -EPERM;
140                 if (allowed == 2) {
141                         /* Show zeros for restricted memory. */
142                         remaining = clear_user(buf, sz);
143                 } else {
144                         /*
145                          * On ia64 if a page has been mapped somewhere as
146                          * uncached, then it must also be accessed uncached
147                          * by the kernel or data corruption may occur.
148                          */
149                         ptr = xlate_dev_mem_ptr(p);
150                         if (!ptr)
151                                 return -EFAULT;
152
153                         remaining = copy_to_user(buf, ptr, sz);
154
155                         unxlate_dev_mem_ptr(p, ptr);
156                 }
157
158                 if (remaining)
159                         return -EFAULT;
160
161                 buf += sz;
162                 p += sz;
163                 count -= sz;
164                 read += sz;
165         }
166
167         *ppos += read;
168         return read;
169 }
170
171 static ssize_t write_mem(struct file *file, const char __user *buf,
172                          size_t count, loff_t *ppos)
173 {
174         phys_addr_t p = *ppos;
175         ssize_t written, sz;
176         unsigned long copied;
177         void *ptr;
178
179         if (p != *ppos)
180                 return -EFBIG;
181
182         if (!valid_phys_addr_range(p, count))
183                 return -EFAULT;
184
185         written = 0;
186
187 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
188         /* we don't have page 0 mapped on sparc and m68k.. */
189         if (p < PAGE_SIZE) {
190                 sz = size_inside_page(p, count);
191                 /* Hmm. Do something? */
192                 buf += sz;
193                 p += sz;
194                 count -= sz;
195                 written += sz;
196         }
197 #endif
198
199         while (count > 0) {
200                 int allowed;
201
202                 sz = size_inside_page(p, count);
203
204                 allowed = page_is_allowed(p >> PAGE_SHIFT);
205                 if (!allowed)
206                         return -EPERM;
207
208                 /* Skip actual writing when a page is marked as restricted. */
209                 if (allowed == 1) {
210                         /*
211                          * On ia64 if a page has been mapped somewhere as
212                          * uncached, then it must also be accessed uncached
213                          * by the kernel or data corruption may occur.
214                          */
215                         ptr = xlate_dev_mem_ptr(p);
216                         if (!ptr) {
217                                 if (written)
218                                         break;
219                                 return -EFAULT;
220                         }
221
222                         copied = copy_from_user(ptr, buf, sz);
223                         unxlate_dev_mem_ptr(p, ptr);
224                         if (copied) {
225                                 written += sz - copied;
226                                 if (written)
227                                         break;
228                                 return -EFAULT;
229                         }
230                 }
231
232                 buf += sz;
233                 p += sz;
234                 count -= sz;
235                 written += sz;
236         }
237
238         *ppos += written;
239         return written;
240 }
241
242 int __weak phys_mem_access_prot_allowed(struct file *file,
243         unsigned long pfn, unsigned long size, pgprot_t *vma_prot)
244 {
245         return 1;
246 }
247
248 #ifndef __HAVE_PHYS_MEM_ACCESS_PROT
249
250 /*
251  * Architectures vary in how they handle caching for addresses
252  * outside of main memory.
253  *
254  */
255 #ifdef pgprot_noncached
256 static int uncached_access(struct file *file, phys_addr_t addr)
257 {
258 #if defined(CONFIG_IA64)
259         /*
260          * On ia64, we ignore O_DSYNC because we cannot tolerate memory
261          * attribute aliases.
262          */
263         return !(efi_mem_attributes(addr) & EFI_MEMORY_WB);
264 #elif defined(CONFIG_MIPS)
265         {
266                 extern int __uncached_access(struct file *file,
267                                              unsigned long addr);
268
269                 return __uncached_access(file, addr);
270         }
271 #else
272         /*
273          * Accessing memory above the top the kernel knows about or through a
274          * file pointer
275          * that was marked O_DSYNC will be done non-cached.
276          */
277         if (file->f_flags & O_DSYNC)
278                 return 1;
279         return addr >= __pa(high_memory);
280 #endif
281 }
282 #endif
283
284 static pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
285                                      unsigned long size, pgprot_t vma_prot)
286 {
287 #ifdef pgprot_noncached
288         phys_addr_t offset = pfn << PAGE_SHIFT;
289
290         if (uncached_access(file, offset))
291                 return pgprot_noncached(vma_prot);
292 #endif
293         return vma_prot;
294 }
295 #endif
296
297 #ifndef CONFIG_MMU
298 static unsigned long get_unmapped_area_mem(struct file *file,
299                                            unsigned long addr,
300                                            unsigned long len,
301                                            unsigned long pgoff,
302                                            unsigned long flags)
303 {
304         if (!valid_mmap_phys_addr_range(pgoff, len))
305                 return (unsigned long) -EINVAL;
306         return pgoff << PAGE_SHIFT;
307 }
308
309 /* permit direct mmap, for read, write or exec */
310 static unsigned memory_mmap_capabilities(struct file *file)
311 {
312         return NOMMU_MAP_DIRECT |
313                 NOMMU_MAP_READ | NOMMU_MAP_WRITE | NOMMU_MAP_EXEC;
314 }
315
316 static unsigned zero_mmap_capabilities(struct file *file)
317 {
318         return NOMMU_MAP_COPY;
319 }
320
321 /* can't do an in-place private mapping if there's no MMU */
322 static inline int private_mapping_ok(struct vm_area_struct *vma)
323 {
324         return vma->vm_flags & VM_MAYSHARE;
325 }
326 #else
327
328 static inline int private_mapping_ok(struct vm_area_struct *vma)
329 {
330         return 1;
331 }
332 #endif
333
334 static const struct vm_operations_struct mmap_mem_ops = {
335 #ifdef CONFIG_HAVE_IOREMAP_PROT
336         .access = generic_access_phys
337 #endif
338 };
339
340 static int mmap_mem(struct file *file, struct vm_area_struct *vma)
341 {
342         size_t size = vma->vm_end - vma->vm_start;
343
344         if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size))
345                 return -EINVAL;
346
347         if (!private_mapping_ok(vma))
348                 return -ENOSYS;
349
350         if (!range_is_allowed(vma->vm_pgoff, size))
351                 return -EPERM;
352
353         if (!phys_mem_access_prot_allowed(file, vma->vm_pgoff, size,
354                                                 &vma->vm_page_prot))
355                 return -EINVAL;
356
357         vma->vm_page_prot = phys_mem_access_prot(file, vma->vm_pgoff,
358                                                  size,
359                                                  vma->vm_page_prot);
360
361         vma->vm_ops = &mmap_mem_ops;
362
363         /* Remap-pfn-range will mark the range VM_IO */
364         if (remap_pfn_range(vma,
365                             vma->vm_start,
366                             vma->vm_pgoff,
367                             size,
368                             vma->vm_page_prot)) {
369                 return -EAGAIN;
370         }
371         return 0;
372 }
373
374 static int mmap_kmem(struct file *file, struct vm_area_struct *vma)
375 {
376         unsigned long pfn;
377
378         /* Turn a kernel-virtual address into a physical page frame */
379         pfn = __pa((u64)vma->vm_pgoff << PAGE_SHIFT) >> PAGE_SHIFT;
380
381         /*
382          * RED-PEN: on some architectures there is more mapped memory than
383          * available in mem_map which pfn_valid checks for. Perhaps should add a
384          * new macro here.
385          *
386          * RED-PEN: vmalloc is not supported right now.
387          */
388         if (!pfn_valid(pfn))
389                 return -EIO;
390
391         vma->vm_pgoff = pfn;
392         return mmap_mem(file, vma);
393 }
394
395 /*
396  * This function reads the *virtual* memory as seen by the kernel.
397  */
398 static ssize_t read_kmem(struct file *file, char __user *buf,
399                          size_t count, loff_t *ppos)
400 {
401         unsigned long p = *ppos;
402         ssize_t low_count, read, sz;
403         char *kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
404         int err = 0;
405
406         read = 0;
407         if (p < (unsigned long) high_memory) {
408                 low_count = count;
409                 if (count > (unsigned long)high_memory - p)
410                         low_count = (unsigned long)high_memory - p;
411
412 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
413                 /* we don't have page 0 mapped on sparc and m68k.. */
414                 if (p < PAGE_SIZE && low_count > 0) {
415                         sz = size_inside_page(p, low_count);
416                         if (clear_user(buf, sz))
417                                 return -EFAULT;
418                         buf += sz;
419                         p += sz;
420                         read += sz;
421                         low_count -= sz;
422                         count -= sz;
423                 }
424 #endif
425                 while (low_count > 0) {
426                         sz = size_inside_page(p, low_count);
427
428                         /*
429                          * On ia64 if a page has been mapped somewhere as
430                          * uncached, then it must also be accessed uncached
431                          * by the kernel or data corruption may occur
432                          */
433                         kbuf = xlate_dev_kmem_ptr((void *)p);
434                         if (!virt_addr_valid(kbuf))
435                                 return -ENXIO;
436
437                         if (copy_to_user(buf, kbuf, sz))
438                                 return -EFAULT;
439                         buf += sz;
440                         p += sz;
441                         read += sz;
442                         low_count -= sz;
443                         count -= sz;
444                 }
445         }
446
447         if (count > 0) {
448                 kbuf = (char *)__get_free_page(GFP_KERNEL);
449                 if (!kbuf)
450                         return -ENOMEM;
451                 while (count > 0) {
452                         sz = size_inside_page(p, count);
453                         if (!is_vmalloc_or_module_addr((void *)p)) {
454                                 err = -ENXIO;
455                                 break;
456                         }
457                         sz = vread(kbuf, (char *)p, sz);
458                         if (!sz)
459                                 break;
460                         if (copy_to_user(buf, kbuf, sz)) {
461                                 err = -EFAULT;
462                                 break;
463                         }
464                         count -= sz;
465                         buf += sz;
466                         read += sz;
467                         p += sz;
468                 }
469                 free_page((unsigned long)kbuf);
470         }
471         *ppos = p;
472         return read ? read : err;
473 }
474
475
476 static ssize_t do_write_kmem(unsigned long p, const char __user *buf,
477                                 size_t count, loff_t *ppos)
478 {
479         ssize_t written, sz;
480         unsigned long copied;
481
482         written = 0;
483 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
484         /* we don't have page 0 mapped on sparc and m68k.. */
485         if (p < PAGE_SIZE) {
486                 sz = size_inside_page(p, count);
487                 /* Hmm. Do something? */
488                 buf += sz;
489                 p += sz;
490                 count -= sz;
491                 written += sz;
492         }
493 #endif
494
495         while (count > 0) {
496                 void *ptr;
497
498                 sz = size_inside_page(p, count);
499
500                 /*
501                  * On ia64 if a page has been mapped somewhere as uncached, then
502                  * it must also be accessed uncached by the kernel or data
503                  * corruption may occur.
504                  */
505                 ptr = xlate_dev_kmem_ptr((void *)p);
506                 if (!virt_addr_valid(ptr))
507                         return -ENXIO;
508
509                 copied = copy_from_user(ptr, buf, sz);
510                 if (copied) {
511                         written += sz - copied;
512                         if (written)
513                                 break;
514                         return -EFAULT;
515                 }
516                 buf += sz;
517                 p += sz;
518                 count -= sz;
519                 written += sz;
520         }
521
522         *ppos += written;
523         return written;
524 }
525
526 /*
527  * This function writes to the *virtual* memory as seen by the kernel.
528  */
529 static ssize_t write_kmem(struct file *file, const char __user *buf,
530                           size_t count, loff_t *ppos)
531 {
532         unsigned long p = *ppos;
533         ssize_t wrote = 0;
534         ssize_t virtr = 0;
535         char *kbuf; /* k-addr because vwrite() takes vmlist_lock rwlock */
536         int err = 0;
537
538         if (p < (unsigned long) high_memory) {
539                 unsigned long to_write = min_t(unsigned long, count,
540                                                (unsigned long)high_memory - p);
541                 wrote = do_write_kmem(p, buf, to_write, ppos);
542                 if (wrote != to_write)
543                         return wrote;
544                 p += wrote;
545                 buf += wrote;
546                 count -= wrote;
547         }
548
549         if (count > 0) {
550                 kbuf = (char *)__get_free_page(GFP_KERNEL);
551                 if (!kbuf)
552                         return wrote ? wrote : -ENOMEM;
553                 while (count > 0) {
554                         unsigned long sz = size_inside_page(p, count);
555                         unsigned long n;
556
557                         if (!is_vmalloc_or_module_addr((void *)p)) {
558                                 err = -ENXIO;
559                                 break;
560                         }
561                         n = copy_from_user(kbuf, buf, sz);
562                         if (n) {
563                                 err = -EFAULT;
564                                 break;
565                         }
566                         vwrite(kbuf, (char *)p, sz);
567                         count -= sz;
568                         buf += sz;
569                         virtr += sz;
570                         p += sz;
571                 }
572                 free_page((unsigned long)kbuf);
573         }
574
575         *ppos = p;
576         return virtr + wrote ? : err;
577 }
578
579 static ssize_t read_port(struct file *file, char __user *buf,
580                          size_t count, loff_t *ppos)
581 {
582         unsigned long i = *ppos;
583         char __user *tmp = buf;
584
585         if (!access_ok(VERIFY_WRITE, buf, count))
586                 return -EFAULT;
587         while (count-- > 0 && i < 65536) {
588                 if (__put_user(inb(i), tmp) < 0)
589                         return -EFAULT;
590                 i++;
591                 tmp++;
592         }
593         *ppos = i;
594         return tmp-buf;
595 }
596
597 static ssize_t write_port(struct file *file, const char __user *buf,
598                           size_t count, loff_t *ppos)
599 {
600         unsigned long i = *ppos;
601         const char __user *tmp = buf;
602
603         if (!access_ok(VERIFY_READ, buf, count))
604                 return -EFAULT;
605         while (count-- > 0 && i < 65536) {
606                 char c;
607
608                 if (__get_user(c, tmp)) {
609                         if (tmp > buf)
610                                 break;
611                         return -EFAULT;
612                 }
613                 outb(c, i);
614                 i++;
615                 tmp++;
616         }
617         *ppos = i;
618         return tmp-buf;
619 }
620
621 static ssize_t read_null(struct file *file, char __user *buf,
622                          size_t count, loff_t *ppos)
623 {
624         return 0;
625 }
626
627 static ssize_t write_null(struct file *file, const char __user *buf,
628                           size_t count, loff_t *ppos)
629 {
630         return count;
631 }
632
633 static ssize_t read_iter_null(struct kiocb *iocb, struct iov_iter *to)
634 {
635         return 0;
636 }
637
638 static ssize_t write_iter_null(struct kiocb *iocb, struct iov_iter *from)
639 {
640         size_t count = iov_iter_count(from);
641         iov_iter_advance(from, count);
642         return count;
643 }
644
645 static int pipe_to_null(struct pipe_inode_info *info, struct pipe_buffer *buf,
646                         struct splice_desc *sd)
647 {
648         return sd->len;
649 }
650
651 static ssize_t splice_write_null(struct pipe_inode_info *pipe, struct file *out,
652                                  loff_t *ppos, size_t len, unsigned int flags)
653 {
654         return splice_from_pipe(pipe, out, ppos, len, flags, pipe_to_null);
655 }
656
657 static ssize_t read_iter_zero(struct kiocb *iocb, struct iov_iter *iter)
658 {
659         size_t written = 0;
660
661         while (iov_iter_count(iter)) {
662                 size_t chunk = iov_iter_count(iter), n;
663
664                 if (chunk > PAGE_SIZE)
665                         chunk = PAGE_SIZE;      /* Just for latency reasons */
666                 n = iov_iter_zero(chunk, iter);
667                 if (!n && iov_iter_count(iter))
668                         return written ? written : -EFAULT;
669                 written += n;
670                 if (signal_pending(current))
671                         return written ? written : -ERESTARTSYS;
672                 cond_resched();
673         }
674         return written;
675 }
676
677 static int mmap_zero(struct file *file, struct vm_area_struct *vma)
678 {
679 #ifndef CONFIG_MMU
680         return -ENOSYS;
681 #endif
682         if (vma->vm_flags & VM_SHARED)
683                 return shmem_zero_setup(vma);
684         return 0;
685 }
686
687 static unsigned long get_unmapped_area_zero(struct file *file,
688                                 unsigned long addr, unsigned long len,
689                                 unsigned long pgoff, unsigned long flags)
690 {
691 #ifdef CONFIG_MMU
692         if (flags & MAP_SHARED) {
693                 /*
694                  * mmap_zero() will call shmem_zero_setup() to create a file,
695                  * so use shmem's get_unmapped_area in case it can be huge;
696                  * and pass NULL for file as in mmap.c's get_unmapped_area(),
697                  * so as not to confuse shmem with our handle on "/dev/zero".
698                  */
699                 return shmem_get_unmapped_area(NULL, addr, len, pgoff, flags);
700         }
701
702         /* Otherwise flags & MAP_PRIVATE: with no shmem object beneath it */
703         return current->mm->get_unmapped_area(file, addr, len, pgoff, flags);
704 #else
705         return -ENOSYS;
706 #endif
707 }
708
709 static ssize_t write_full(struct file *file, const char __user *buf,
710                           size_t count, loff_t *ppos)
711 {
712         return -ENOSPC;
713 }
714
715 /*
716  * Special lseek() function for /dev/null and /dev/zero.  Most notably, you
717  * can fopen() both devices with "a" now.  This was previously impossible.
718  * -- SRB.
719  */
720 static loff_t null_lseek(struct file *file, loff_t offset, int orig)
721 {
722         return file->f_pos = 0;
723 }
724
725 /*
726  * The memory devices use the full 32/64 bits of the offset, and so we cannot
727  * check against negative addresses: they are ok. The return value is weird,
728  * though, in that case (0).
729  *
730  * also note that seeking relative to the "end of file" isn't supported:
731  * it has no meaning, so it returns -EINVAL.
732  */
733 static loff_t memory_lseek(struct file *file, loff_t offset, int orig)
734 {
735         loff_t ret;
736
737         inode_lock(file_inode(file));
738         switch (orig) {
739         case SEEK_CUR:
740                 offset += file->f_pos;
741         case SEEK_SET:
742                 /* to avoid userland mistaking f_pos=-9 as -EBADF=-9 */
743                 if ((unsigned long long)offset >= -MAX_ERRNO) {
744                         ret = -EOVERFLOW;
745                         break;
746                 }
747                 file->f_pos = offset;
748                 ret = file->f_pos;
749                 force_successful_syscall_return();
750                 break;
751         default:
752                 ret = -EINVAL;
753         }
754         inode_unlock(file_inode(file));
755         return ret;
756 }
757
758 static int open_port(struct inode *inode, struct file *filp)
759 {
760         return capable(CAP_SYS_RAWIO) ? 0 : -EPERM;
761 }
762
763 #define zero_lseek      null_lseek
764 #define full_lseek      null_lseek
765 #define write_zero      write_null
766 #define write_iter_zero write_iter_null
767 #define open_mem        open_port
768 #define open_kmem       open_mem
769
770 static const struct file_operations __maybe_unused mem_fops = {
771         .llseek         = memory_lseek,
772         .read           = read_mem,
773         .write          = write_mem,
774         .mmap           = mmap_mem,
775         .open           = open_mem,
776 #ifndef CONFIG_MMU
777         .get_unmapped_area = get_unmapped_area_mem,
778         .mmap_capabilities = memory_mmap_capabilities,
779 #endif
780 };
781
782 static const struct file_operations __maybe_unused kmem_fops = {
783         .llseek         = memory_lseek,
784         .read           = read_kmem,
785         .write          = write_kmem,
786         .mmap           = mmap_kmem,
787         .open           = open_kmem,
788 #ifndef CONFIG_MMU
789         .get_unmapped_area = get_unmapped_area_mem,
790         .mmap_capabilities = memory_mmap_capabilities,
791 #endif
792 };
793
794 static const struct file_operations null_fops = {
795         .llseek         = null_lseek,
796         .read           = read_null,
797         .write          = write_null,
798         .read_iter      = read_iter_null,
799         .write_iter     = write_iter_null,
800         .splice_write   = splice_write_null,
801 };
802
803 static const struct file_operations __maybe_unused port_fops = {
804         .llseek         = memory_lseek,
805         .read           = read_port,
806         .write          = write_port,
807         .open           = open_port,
808 };
809
810 static const struct file_operations zero_fops = {
811         .llseek         = zero_lseek,
812         .write          = write_zero,
813         .read_iter      = read_iter_zero,
814         .write_iter     = write_iter_zero,
815         .mmap           = mmap_zero,
816         .get_unmapped_area = get_unmapped_area_zero,
817 #ifndef CONFIG_MMU
818         .mmap_capabilities = zero_mmap_capabilities,
819 #endif
820 };
821
822 static const struct file_operations full_fops = {
823         .llseek         = full_lseek,
824         .read_iter      = read_iter_zero,
825         .write          = write_full,
826 };
827
828 static const struct memdev {
829         const char *name;
830         umode_t mode;
831         const struct file_operations *fops;
832         fmode_t fmode;
833 } devlist[] = {
834 #ifdef CONFIG_DEVMEM
835          [1] = { "mem", 0, &mem_fops, FMODE_UNSIGNED_OFFSET },
836 #endif
837 #ifdef CONFIG_DEVKMEM
838          [2] = { "kmem", 0, &kmem_fops, FMODE_UNSIGNED_OFFSET },
839 #endif
840          [3] = { "null", 0666, &null_fops, 0 },
841 #ifdef CONFIG_DEVPORT
842          [4] = { "port", 0, &port_fops, 0 },
843 #endif
844          [5] = { "zero", 0666, &zero_fops, 0 },
845          [7] = { "full", 0666, &full_fops, 0 },
846          [8] = { "random", 0666, &random_fops, 0 },
847          [9] = { "urandom", 0666, &urandom_fops, 0 },
848 #ifdef CONFIG_PRINTK
849         [11] = { "kmsg", 0644, &kmsg_fops, 0 },
850 #endif
851 };
852
853 static int memory_open(struct inode *inode, struct file *filp)
854 {
855         int minor;
856         const struct memdev *dev;
857
858         minor = iminor(inode);
859         if (minor >= ARRAY_SIZE(devlist))
860                 return -ENXIO;
861
862         dev = &devlist[minor];
863         if (!dev->fops)
864                 return -ENXIO;
865
866         filp->f_op = dev->fops;
867         filp->f_mode |= dev->fmode;
868
869         if (dev->fops->open)
870                 return dev->fops->open(inode, filp);
871
872         return 0;
873 }
874
875 static const struct file_operations memory_fops = {
876         .open = memory_open,
877         .llseek = noop_llseek,
878 };
879
880 static char *mem_devnode(struct device *dev, umode_t *mode)
881 {
882         if (mode && devlist[MINOR(dev->devt)].mode)
883                 *mode = devlist[MINOR(dev->devt)].mode;
884         return NULL;
885 }
886
887 static struct class *mem_class;
888
889 static int __init chr_dev_init(void)
890 {
891         int minor;
892
893         if (register_chrdev(MEM_MAJOR, "mem", &memory_fops))
894                 printk("unable to get major %d for memory devs\n", MEM_MAJOR);
895
896         mem_class = class_create(THIS_MODULE, "mem");
897         if (IS_ERR(mem_class))
898                 return PTR_ERR(mem_class);
899
900         mem_class->devnode = mem_devnode;
901         for (minor = 1; minor < ARRAY_SIZE(devlist); minor++) {
902                 if (!devlist[minor].name)
903                         continue;
904
905                 /*
906                  * Create /dev/port?
907                  */
908                 if ((minor == DEVPORT_MINOR) && !arch_has_dev_port())
909                         continue;
910
911                 device_create(mem_class, NULL, MKDEV(MEM_MAJOR, minor),
912                               NULL, devlist[minor].name);
913         }
914
915         return tty_init();
916 }
917
918 fs_initcall(chr_dev_init);