iwlwifi: pcie: fix polling in various places
[cascardo/linux.git] / drivers / char / mem.c
1 /*
2  *  linux/drivers/char/mem.c
3  *
4  *  Copyright (C) 1991, 1992  Linus Torvalds
5  *
6  *  Added devfs support.
7  *    Jan-11-1998, C. Scott Ananian <cananian@alumni.princeton.edu>
8  *  Shared /dev/zero mmapping support, Feb 2000, Kanoj Sarcar <kanoj@sgi.com>
9  */
10
11 #include <linux/mm.h>
12 #include <linux/miscdevice.h>
13 #include <linux/slab.h>
14 #include <linux/vmalloc.h>
15 #include <linux/mman.h>
16 #include <linux/random.h>
17 #include <linux/init.h>
18 #include <linux/raw.h>
19 #include <linux/tty.h>
20 #include <linux/capability.h>
21 #include <linux/ptrace.h>
22 #include <linux/device.h>
23 #include <linux/highmem.h>
24 #include <linux/backing-dev.h>
25 #include <linux/splice.h>
26 #include <linux/pfn.h>
27 #include <linux/export.h>
28 #include <linux/io.h>
29 #include <linux/aio.h>
30
31 #include <asm/uaccess.h>
32
33 #ifdef CONFIG_IA64
34 # include <linux/efi.h>
35 #endif
36
37 #define DEVPORT_MINOR   4
38
39 static inline unsigned long size_inside_page(unsigned long start,
40                                              unsigned long size)
41 {
42         unsigned long sz;
43
44         sz = PAGE_SIZE - (start & (PAGE_SIZE - 1));
45
46         return min(sz, size);
47 }
48
49 #ifndef ARCH_HAS_VALID_PHYS_ADDR_RANGE
50 static inline int valid_phys_addr_range(phys_addr_t addr, size_t count)
51 {
52         return addr + count <= __pa(high_memory);
53 }
54
55 static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
56 {
57         return 1;
58 }
59 #endif
60
61 #ifdef CONFIG_STRICT_DEVMEM
62 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
63 {
64         u64 from = ((u64)pfn) << PAGE_SHIFT;
65         u64 to = from + size;
66         u64 cursor = from;
67
68         while (cursor < to) {
69                 if (!devmem_is_allowed(pfn)) {
70                         printk(KERN_INFO
71                 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
72                                 current->comm, from, to);
73                         return 0;
74                 }
75                 cursor += PAGE_SIZE;
76                 pfn++;
77         }
78         return 1;
79 }
80 #else
81 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
82 {
83         return 1;
84 }
85 #endif
86
87 void __weak unxlate_dev_mem_ptr(unsigned long phys, void *addr)
88 {
89 }
90
91 /*
92  * This funcion reads the *physical* memory. The f_pos points directly to the
93  * memory location.
94  */
95 static ssize_t read_mem(struct file *file, char __user *buf,
96                         size_t count, loff_t *ppos)
97 {
98         phys_addr_t p = *ppos;
99         ssize_t read, sz;
100         char *ptr;
101
102         if (p != *ppos)
103                 return 0;
104
105         if (!valid_phys_addr_range(p, count))
106                 return -EFAULT;
107         read = 0;
108 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
109         /* we don't have page 0 mapped on sparc and m68k.. */
110         if (p < PAGE_SIZE) {
111                 sz = size_inside_page(p, count);
112                 if (sz > 0) {
113                         if (clear_user(buf, sz))
114                                 return -EFAULT;
115                         buf += sz;
116                         p += sz;
117                         count -= sz;
118                         read += sz;
119                 }
120         }
121 #endif
122
123         while (count > 0) {
124                 unsigned long remaining;
125
126                 sz = size_inside_page(p, count);
127
128                 if (!range_is_allowed(p >> PAGE_SHIFT, count))
129                         return -EPERM;
130
131                 /*
132                  * On ia64 if a page has been mapped somewhere as uncached, then
133                  * it must also be accessed uncached by the kernel or data
134                  * corruption may occur.
135                  */
136                 ptr = xlate_dev_mem_ptr(p);
137                 if (!ptr)
138                         return -EFAULT;
139
140                 remaining = copy_to_user(buf, ptr, sz);
141                 unxlate_dev_mem_ptr(p, ptr);
142                 if (remaining)
143                         return -EFAULT;
144
145                 buf += sz;
146                 p += sz;
147                 count -= sz;
148                 read += sz;
149         }
150
151         *ppos += read;
152         return read;
153 }
154
155 static ssize_t write_mem(struct file *file, const char __user *buf,
156                          size_t count, loff_t *ppos)
157 {
158         phys_addr_t p = *ppos;
159         ssize_t written, sz;
160         unsigned long copied;
161         void *ptr;
162
163         if (p != *ppos)
164                 return -EFBIG;
165
166         if (!valid_phys_addr_range(p, count))
167                 return -EFAULT;
168
169         written = 0;
170
171 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
172         /* we don't have page 0 mapped on sparc and m68k.. */
173         if (p < PAGE_SIZE) {
174                 sz = size_inside_page(p, count);
175                 /* Hmm. Do something? */
176                 buf += sz;
177                 p += sz;
178                 count -= sz;
179                 written += sz;
180         }
181 #endif
182
183         while (count > 0) {
184                 sz = size_inside_page(p, count);
185
186                 if (!range_is_allowed(p >> PAGE_SHIFT, sz))
187                         return -EPERM;
188
189                 /*
190                  * On ia64 if a page has been mapped somewhere as uncached, then
191                  * it must also be accessed uncached by the kernel or data
192                  * corruption may occur.
193                  */
194                 ptr = xlate_dev_mem_ptr(p);
195                 if (!ptr) {
196                         if (written)
197                                 break;
198                         return -EFAULT;
199                 }
200
201                 copied = copy_from_user(ptr, buf, sz);
202                 unxlate_dev_mem_ptr(p, ptr);
203                 if (copied) {
204                         written += sz - copied;
205                         if (written)
206                                 break;
207                         return -EFAULT;
208                 }
209
210                 buf += sz;
211                 p += sz;
212                 count -= sz;
213                 written += sz;
214         }
215
216         *ppos += written;
217         return written;
218 }
219
220 int __weak phys_mem_access_prot_allowed(struct file *file,
221         unsigned long pfn, unsigned long size, pgprot_t *vma_prot)
222 {
223         return 1;
224 }
225
226 #ifndef __HAVE_PHYS_MEM_ACCESS_PROT
227
228 /*
229  * Architectures vary in how they handle caching for addresses
230  * outside of main memory.
231  *
232  */
233 #ifdef pgprot_noncached
234 static int uncached_access(struct file *file, phys_addr_t addr)
235 {
236 #if defined(CONFIG_IA64)
237         /*
238          * On ia64, we ignore O_DSYNC because we cannot tolerate memory
239          * attribute aliases.
240          */
241         return !(efi_mem_attributes(addr) & EFI_MEMORY_WB);
242 #elif defined(CONFIG_MIPS)
243         {
244                 extern int __uncached_access(struct file *file,
245                                              unsigned long addr);
246
247                 return __uncached_access(file, addr);
248         }
249 #else
250         /*
251          * Accessing memory above the top the kernel knows about or through a
252          * file pointer
253          * that was marked O_DSYNC will be done non-cached.
254          */
255         if (file->f_flags & O_DSYNC)
256                 return 1;
257         return addr >= __pa(high_memory);
258 #endif
259 }
260 #endif
261
262 static pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
263                                      unsigned long size, pgprot_t vma_prot)
264 {
265 #ifdef pgprot_noncached
266         phys_addr_t offset = pfn << PAGE_SHIFT;
267
268         if (uncached_access(file, offset))
269                 return pgprot_noncached(vma_prot);
270 #endif
271         return vma_prot;
272 }
273 #endif
274
275 #ifndef CONFIG_MMU
276 static unsigned long get_unmapped_area_mem(struct file *file,
277                                            unsigned long addr,
278                                            unsigned long len,
279                                            unsigned long pgoff,
280                                            unsigned long flags)
281 {
282         if (!valid_mmap_phys_addr_range(pgoff, len))
283                 return (unsigned long) -EINVAL;
284         return pgoff << PAGE_SHIFT;
285 }
286
287 /* can't do an in-place private mapping if there's no MMU */
288 static inline int private_mapping_ok(struct vm_area_struct *vma)
289 {
290         return vma->vm_flags & VM_MAYSHARE;
291 }
292 #else
293 #define get_unmapped_area_mem   NULL
294
295 static inline int private_mapping_ok(struct vm_area_struct *vma)
296 {
297         return 1;
298 }
299 #endif
300
301 static const struct vm_operations_struct mmap_mem_ops = {
302 #ifdef CONFIG_HAVE_IOREMAP_PROT
303         .access = generic_access_phys
304 #endif
305 };
306
307 static int mmap_mem(struct file *file, struct vm_area_struct *vma)
308 {
309         size_t size = vma->vm_end - vma->vm_start;
310
311         if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size))
312                 return -EINVAL;
313
314         if (!private_mapping_ok(vma))
315                 return -ENOSYS;
316
317         if (!range_is_allowed(vma->vm_pgoff, size))
318                 return -EPERM;
319
320         if (!phys_mem_access_prot_allowed(file, vma->vm_pgoff, size,
321                                                 &vma->vm_page_prot))
322                 return -EINVAL;
323
324         vma->vm_page_prot = phys_mem_access_prot(file, vma->vm_pgoff,
325                                                  size,
326                                                  vma->vm_page_prot);
327
328         vma->vm_ops = &mmap_mem_ops;
329
330         /* Remap-pfn-range will mark the range VM_IO */
331         if (remap_pfn_range(vma,
332                             vma->vm_start,
333                             vma->vm_pgoff,
334                             size,
335                             vma->vm_page_prot)) {
336                 return -EAGAIN;
337         }
338         return 0;
339 }
340
341 #ifdef CONFIG_DEVKMEM
342 static int mmap_kmem(struct file *file, struct vm_area_struct *vma)
343 {
344         unsigned long pfn;
345
346         /* Turn a kernel-virtual address into a physical page frame */
347         pfn = __pa((u64)vma->vm_pgoff << PAGE_SHIFT) >> PAGE_SHIFT;
348
349         /*
350          * RED-PEN: on some architectures there is more mapped memory than
351          * available in mem_map which pfn_valid checks for. Perhaps should add a
352          * new macro here.
353          *
354          * RED-PEN: vmalloc is not supported right now.
355          */
356         if (!pfn_valid(pfn))
357                 return -EIO;
358
359         vma->vm_pgoff = pfn;
360         return mmap_mem(file, vma);
361 }
362 #endif
363
364 #ifdef CONFIG_DEVKMEM
365 /*
366  * This function reads the *virtual* memory as seen by the kernel.
367  */
368 static ssize_t read_kmem(struct file *file, char __user *buf,
369                          size_t count, loff_t *ppos)
370 {
371         unsigned long p = *ppos;
372         ssize_t low_count, read, sz;
373         char *kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
374         int err = 0;
375
376         read = 0;
377         if (p < (unsigned long) high_memory) {
378                 low_count = count;
379                 if (count > (unsigned long)high_memory - p)
380                         low_count = (unsigned long)high_memory - p;
381
382 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
383                 /* we don't have page 0 mapped on sparc and m68k.. */
384                 if (p < PAGE_SIZE && low_count > 0) {
385                         sz = size_inside_page(p, low_count);
386                         if (clear_user(buf, sz))
387                                 return -EFAULT;
388                         buf += sz;
389                         p += sz;
390                         read += sz;
391                         low_count -= sz;
392                         count -= sz;
393                 }
394 #endif
395                 while (low_count > 0) {
396                         sz = size_inside_page(p, low_count);
397
398                         /*
399                          * On ia64 if a page has been mapped somewhere as
400                          * uncached, then it must also be accessed uncached
401                          * by the kernel or data corruption may occur
402                          */
403                         kbuf = xlate_dev_kmem_ptr((char *)p);
404
405                         if (copy_to_user(buf, kbuf, sz))
406                                 return -EFAULT;
407                         buf += sz;
408                         p += sz;
409                         read += sz;
410                         low_count -= sz;
411                         count -= sz;
412                 }
413         }
414
415         if (count > 0) {
416                 kbuf = (char *)__get_free_page(GFP_KERNEL);
417                 if (!kbuf)
418                         return -ENOMEM;
419                 while (count > 0) {
420                         sz = size_inside_page(p, count);
421                         if (!is_vmalloc_or_module_addr((void *)p)) {
422                                 err = -ENXIO;
423                                 break;
424                         }
425                         sz = vread(kbuf, (char *)p, sz);
426                         if (!sz)
427                                 break;
428                         if (copy_to_user(buf, kbuf, sz)) {
429                                 err = -EFAULT;
430                                 break;
431                         }
432                         count -= sz;
433                         buf += sz;
434                         read += sz;
435                         p += sz;
436                 }
437                 free_page((unsigned long)kbuf);
438         }
439         *ppos = p;
440         return read ? read : err;
441 }
442
443
444 static ssize_t do_write_kmem(unsigned long p, const char __user *buf,
445                                 size_t count, loff_t *ppos)
446 {
447         ssize_t written, sz;
448         unsigned long copied;
449
450         written = 0;
451 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
452         /* we don't have page 0 mapped on sparc and m68k.. */
453         if (p < PAGE_SIZE) {
454                 sz = size_inside_page(p, count);
455                 /* Hmm. Do something? */
456                 buf += sz;
457                 p += sz;
458                 count -= sz;
459                 written += sz;
460         }
461 #endif
462
463         while (count > 0) {
464                 char *ptr;
465
466                 sz = size_inside_page(p, count);
467
468                 /*
469                  * On ia64 if a page has been mapped somewhere as uncached, then
470                  * it must also be accessed uncached by the kernel or data
471                  * corruption may occur.
472                  */
473                 ptr = xlate_dev_kmem_ptr((char *)p);
474
475                 copied = copy_from_user(ptr, buf, sz);
476                 if (copied) {
477                         written += sz - copied;
478                         if (written)
479                                 break;
480                         return -EFAULT;
481                 }
482                 buf += sz;
483                 p += sz;
484                 count -= sz;
485                 written += sz;
486         }
487
488         *ppos += written;
489         return written;
490 }
491
492 /*
493  * This function writes to the *virtual* memory as seen by the kernel.
494  */
495 static ssize_t write_kmem(struct file *file, const char __user *buf,
496                           size_t count, loff_t *ppos)
497 {
498         unsigned long p = *ppos;
499         ssize_t wrote = 0;
500         ssize_t virtr = 0;
501         char *kbuf; /* k-addr because vwrite() takes vmlist_lock rwlock */
502         int err = 0;
503
504         if (p < (unsigned long) high_memory) {
505                 unsigned long to_write = min_t(unsigned long, count,
506                                                (unsigned long)high_memory - p);
507                 wrote = do_write_kmem(p, buf, to_write, ppos);
508                 if (wrote != to_write)
509                         return wrote;
510                 p += wrote;
511                 buf += wrote;
512                 count -= wrote;
513         }
514
515         if (count > 0) {
516                 kbuf = (char *)__get_free_page(GFP_KERNEL);
517                 if (!kbuf)
518                         return wrote ? wrote : -ENOMEM;
519                 while (count > 0) {
520                         unsigned long sz = size_inside_page(p, count);
521                         unsigned long n;
522
523                         if (!is_vmalloc_or_module_addr((void *)p)) {
524                                 err = -ENXIO;
525                                 break;
526                         }
527                         n = copy_from_user(kbuf, buf, sz);
528                         if (n) {
529                                 err = -EFAULT;
530                                 break;
531                         }
532                         vwrite(kbuf, (char *)p, sz);
533                         count -= sz;
534                         buf += sz;
535                         virtr += sz;
536                         p += sz;
537                 }
538                 free_page((unsigned long)kbuf);
539         }
540
541         *ppos = p;
542         return virtr + wrote ? : err;
543 }
544 #endif
545
546 #ifdef CONFIG_DEVPORT
547 static ssize_t read_port(struct file *file, char __user *buf,
548                          size_t count, loff_t *ppos)
549 {
550         unsigned long i = *ppos;
551         char __user *tmp = buf;
552
553         if (!access_ok(VERIFY_WRITE, buf, count))
554                 return -EFAULT;
555         while (count-- > 0 && i < 65536) {
556                 if (__put_user(inb(i), tmp) < 0)
557                         return -EFAULT;
558                 i++;
559                 tmp++;
560         }
561         *ppos = i;
562         return tmp-buf;
563 }
564
565 static ssize_t write_port(struct file *file, const char __user *buf,
566                           size_t count, loff_t *ppos)
567 {
568         unsigned long i = *ppos;
569         const char __user *tmp = buf;
570
571         if (!access_ok(VERIFY_READ, buf, count))
572                 return -EFAULT;
573         while (count-- > 0 && i < 65536) {
574                 char c;
575                 if (__get_user(c, tmp)) {
576                         if (tmp > buf)
577                                 break;
578                         return -EFAULT;
579                 }
580                 outb(c, i);
581                 i++;
582                 tmp++;
583         }
584         *ppos = i;
585         return tmp-buf;
586 }
587 #endif
588
589 static ssize_t read_null(struct file *file, char __user *buf,
590                          size_t count, loff_t *ppos)
591 {
592         return 0;
593 }
594
595 static ssize_t write_null(struct file *file, const char __user *buf,
596                           size_t count, loff_t *ppos)
597 {
598         return count;
599 }
600
601 static ssize_t aio_read_null(struct kiocb *iocb, const struct iovec *iov,
602                              unsigned long nr_segs, loff_t pos)
603 {
604         return 0;
605 }
606
607 static ssize_t aio_write_null(struct kiocb *iocb, const struct iovec *iov,
608                               unsigned long nr_segs, loff_t pos)
609 {
610         return iov_length(iov, nr_segs);
611 }
612
613 static int pipe_to_null(struct pipe_inode_info *info, struct pipe_buffer *buf,
614                         struct splice_desc *sd)
615 {
616         return sd->len;
617 }
618
619 static ssize_t splice_write_null(struct pipe_inode_info *pipe, struct file *out,
620                                  loff_t *ppos, size_t len, unsigned int flags)
621 {
622         return splice_from_pipe(pipe, out, ppos, len, flags, pipe_to_null);
623 }
624
625 static ssize_t read_zero(struct file *file, char __user *buf,
626                          size_t count, loff_t *ppos)
627 {
628         size_t written;
629
630         if (!count)
631                 return 0;
632
633         if (!access_ok(VERIFY_WRITE, buf, count))
634                 return -EFAULT;
635
636         written = 0;
637         while (count) {
638                 unsigned long unwritten;
639                 size_t chunk = count;
640
641                 if (chunk > PAGE_SIZE)
642                         chunk = PAGE_SIZE;      /* Just for latency reasons */
643                 unwritten = __clear_user(buf, chunk);
644                 written += chunk - unwritten;
645                 if (unwritten)
646                         break;
647                 if (signal_pending(current))
648                         return written ? written : -ERESTARTSYS;
649                 buf += chunk;
650                 count -= chunk;
651                 cond_resched();
652         }
653         return written ? written : -EFAULT;
654 }
655
656 static ssize_t aio_read_zero(struct kiocb *iocb, const struct iovec *iov,
657                              unsigned long nr_segs, loff_t pos)
658 {
659         size_t written = 0;
660         unsigned long i;
661         ssize_t ret;
662
663         for (i = 0; i < nr_segs; i++) {
664                 ret = read_zero(iocb->ki_filp, iov[i].iov_base, iov[i].iov_len,
665                                 &pos);
666                 if (ret < 0)
667                         break;
668                 written += ret;
669         }
670
671         return written ? written : -EFAULT;
672 }
673
674 static int mmap_zero(struct file *file, struct vm_area_struct *vma)
675 {
676 #ifndef CONFIG_MMU
677         return -ENOSYS;
678 #endif
679         if (vma->vm_flags & VM_SHARED)
680                 return shmem_zero_setup(vma);
681         return 0;
682 }
683
684 static ssize_t write_full(struct file *file, const char __user *buf,
685                           size_t count, loff_t *ppos)
686 {
687         return -ENOSPC;
688 }
689
690 /*
691  * Special lseek() function for /dev/null and /dev/zero.  Most notably, you
692  * can fopen() both devices with "a" now.  This was previously impossible.
693  * -- SRB.
694  */
695 static loff_t null_lseek(struct file *file, loff_t offset, int orig)
696 {
697         return file->f_pos = 0;
698 }
699
700 /*
701  * The memory devices use the full 32/64 bits of the offset, and so we cannot
702  * check against negative addresses: they are ok. The return value is weird,
703  * though, in that case (0).
704  *
705  * also note that seeking relative to the "end of file" isn't supported:
706  * it has no meaning, so it returns -EINVAL.
707  */
708 static loff_t memory_lseek(struct file *file, loff_t offset, int orig)
709 {
710         loff_t ret;
711
712         mutex_lock(&file_inode(file)->i_mutex);
713         switch (orig) {
714         case SEEK_CUR:
715                 offset += file->f_pos;
716         case SEEK_SET:
717                 /* to avoid userland mistaking f_pos=-9 as -EBADF=-9 */
718                 if (IS_ERR_VALUE((unsigned long long)offset)) {
719                         ret = -EOVERFLOW;
720                         break;
721                 }
722                 file->f_pos = offset;
723                 ret = file->f_pos;
724                 force_successful_syscall_return();
725                 break;
726         default:
727                 ret = -EINVAL;
728         }
729         mutex_unlock(&file_inode(file)->i_mutex);
730         return ret;
731 }
732
733 static int open_port(struct inode *inode, struct file *filp)
734 {
735         return capable(CAP_SYS_RAWIO) ? 0 : -EPERM;
736 }
737
738 #define zero_lseek      null_lseek
739 #define full_lseek      null_lseek
740 #define write_zero      write_null
741 #define read_full       read_zero
742 #define aio_write_zero  aio_write_null
743 #define open_mem        open_port
744 #define open_kmem       open_mem
745
746 static const struct file_operations mem_fops = {
747         .llseek         = memory_lseek,
748         .read           = read_mem,
749         .write          = write_mem,
750         .mmap           = mmap_mem,
751         .open           = open_mem,
752         .get_unmapped_area = get_unmapped_area_mem,
753 };
754
755 #ifdef CONFIG_DEVKMEM
756 static const struct file_operations kmem_fops = {
757         .llseek         = memory_lseek,
758         .read           = read_kmem,
759         .write          = write_kmem,
760         .mmap           = mmap_kmem,
761         .open           = open_kmem,
762         .get_unmapped_area = get_unmapped_area_mem,
763 };
764 #endif
765
766 static const struct file_operations null_fops = {
767         .llseek         = null_lseek,
768         .read           = read_null,
769         .write          = write_null,
770         .aio_read       = aio_read_null,
771         .aio_write      = aio_write_null,
772         .splice_write   = splice_write_null,
773 };
774
775 #ifdef CONFIG_DEVPORT
776 static const struct file_operations port_fops = {
777         .llseek         = memory_lseek,
778         .read           = read_port,
779         .write          = write_port,
780         .open           = open_port,
781 };
782 #endif
783
784 static const struct file_operations zero_fops = {
785         .llseek         = zero_lseek,
786         .read           = read_zero,
787         .write          = write_zero,
788         .aio_read       = aio_read_zero,
789         .aio_write      = aio_write_zero,
790         .mmap           = mmap_zero,
791 };
792
793 /*
794  * capabilities for /dev/zero
795  * - permits private mappings, "copies" are taken of the source of zeros
796  * - no writeback happens
797  */
798 static struct backing_dev_info zero_bdi = {
799         .name           = "char/mem",
800         .capabilities   = BDI_CAP_MAP_COPY | BDI_CAP_NO_ACCT_AND_WRITEBACK,
801 };
802
803 static const struct file_operations full_fops = {
804         .llseek         = full_lseek,
805         .read           = read_full,
806         .write          = write_full,
807 };
808
809 static const struct memdev {
810         const char *name;
811         umode_t mode;
812         const struct file_operations *fops;
813         struct backing_dev_info *dev_info;
814 } devlist[] = {
815          [1] = { "mem", 0, &mem_fops, &directly_mappable_cdev_bdi },
816 #ifdef CONFIG_DEVKMEM
817          [2] = { "kmem", 0, &kmem_fops, &directly_mappable_cdev_bdi },
818 #endif
819          [3] = { "null", 0666, &null_fops, NULL },
820 #ifdef CONFIG_DEVPORT
821          [4] = { "port", 0, &port_fops, NULL },
822 #endif
823          [5] = { "zero", 0666, &zero_fops, &zero_bdi },
824          [7] = { "full", 0666, &full_fops, NULL },
825          [8] = { "random", 0666, &random_fops, NULL },
826          [9] = { "urandom", 0666, &urandom_fops, NULL },
827 #ifdef CONFIG_PRINTK
828         [11] = { "kmsg", 0644, &kmsg_fops, NULL },
829 #endif
830 };
831
832 static int memory_open(struct inode *inode, struct file *filp)
833 {
834         int minor;
835         const struct memdev *dev;
836
837         minor = iminor(inode);
838         if (minor >= ARRAY_SIZE(devlist))
839                 return -ENXIO;
840
841         dev = &devlist[minor];
842         if (!dev->fops)
843                 return -ENXIO;
844
845         filp->f_op = dev->fops;
846         if (dev->dev_info)
847                 filp->f_mapping->backing_dev_info = dev->dev_info;
848
849         /* Is /dev/mem or /dev/kmem ? */
850         if (dev->dev_info == &directly_mappable_cdev_bdi)
851                 filp->f_mode |= FMODE_UNSIGNED_OFFSET;
852
853         if (dev->fops->open)
854                 return dev->fops->open(inode, filp);
855
856         return 0;
857 }
858
859 static const struct file_operations memory_fops = {
860         .open = memory_open,
861         .llseek = noop_llseek,
862 };
863
864 static char *mem_devnode(struct device *dev, umode_t *mode)
865 {
866         if (mode && devlist[MINOR(dev->devt)].mode)
867                 *mode = devlist[MINOR(dev->devt)].mode;
868         return NULL;
869 }
870
871 static struct class *mem_class;
872
873 static int __init chr_dev_init(void)
874 {
875         int minor;
876         int err;
877
878         err = bdi_init(&zero_bdi);
879         if (err)
880                 return err;
881
882         if (register_chrdev(MEM_MAJOR, "mem", &memory_fops))
883                 printk("unable to get major %d for memory devs\n", MEM_MAJOR);
884
885         mem_class = class_create(THIS_MODULE, "mem");
886         if (IS_ERR(mem_class))
887                 return PTR_ERR(mem_class);
888
889         mem_class->devnode = mem_devnode;
890         for (minor = 1; minor < ARRAY_SIZE(devlist); minor++) {
891                 if (!devlist[minor].name)
892                         continue;
893
894                 /*
895                  * Create /dev/port?
896                  */
897                 if ((minor == DEVPORT_MINOR) && !arch_has_dev_port())
898                         continue;
899
900                 device_create(mem_class, NULL, MKDEV(MEM_MAJOR, minor),
901                               NULL, devlist[minor].name);
902         }
903
904         return tty_init();
905 }
906
907 fs_initcall(chr_dev_init);