at v3.10-rc5 949 lines 20 kB view raw
1/* 2 * linux/drivers/char/mem.c 3 * 4 * Copyright (C) 1991, 1992 Linus Torvalds 5 * 6 * Added devfs support. 7 * Jan-11-1998, C. Scott Ananian <cananian@alumni.princeton.edu> 8 * Shared /dev/zero mmapping support, Feb 2000, Kanoj Sarcar <kanoj@sgi.com> 9 */ 10 11#include <linux/mm.h> 12#include <linux/miscdevice.h> 13#include <linux/slab.h> 14#include <linux/vmalloc.h> 15#include <linux/mman.h> 16#include <linux/random.h> 17#include <linux/init.h> 18#include <linux/raw.h> 19#include <linux/tty.h> 20#include <linux/capability.h> 21#include <linux/ptrace.h> 22#include <linux/device.h> 23#include <linux/highmem.h> 24#include <linux/crash_dump.h> 25#include <linux/backing-dev.h> 26#include <linux/bootmem.h> 27#include <linux/splice.h> 28#include <linux/pfn.h> 29#include <linux/export.h> 30#include <linux/io.h> 31#include <linux/aio.h> 32 33#include <asm/uaccess.h> 34 35#ifdef CONFIG_IA64 36# include <linux/efi.h> 37#endif 38 39#define DEVPORT_MINOR 4 40 41static inline unsigned long size_inside_page(unsigned long start, 42 unsigned long size) 43{ 44 unsigned long sz; 45 46 sz = PAGE_SIZE - (start & (PAGE_SIZE - 1)); 47 48 return min(sz, size); 49} 50 51#ifndef ARCH_HAS_VALID_PHYS_ADDR_RANGE 52static inline int valid_phys_addr_range(phys_addr_t addr, size_t count) 53{ 54 return addr + count <= __pa(high_memory); 55} 56 57static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t size) 58{ 59 return 1; 60} 61#endif 62 63#ifdef CONFIG_STRICT_DEVMEM 64static inline int range_is_allowed(unsigned long pfn, unsigned long size) 65{ 66 u64 from = ((u64)pfn) << PAGE_SHIFT; 67 u64 to = from + size; 68 u64 cursor = from; 69 70 while (cursor < to) { 71 if (!devmem_is_allowed(pfn)) { 72 printk(KERN_INFO 73 "Program %s tried to access /dev/mem between %Lx->%Lx.\n", 74 current->comm, from, to); 75 return 0; 76 } 77 cursor += PAGE_SIZE; 78 pfn++; 79 } 80 return 1; 81} 82#else 83static inline int range_is_allowed(unsigned long pfn, unsigned long size) 84{ 85 return 1; 86} 87#endif 88 89void __weak unxlate_dev_mem_ptr(unsigned long phys, void *addr) 90{ 91} 92 93/* 94 * This funcion reads the *physical* memory. The f_pos points directly to the 95 * memory location. 96 */ 97static ssize_t read_mem(struct file *file, char __user *buf, 98 size_t count, loff_t *ppos) 99{ 100 phys_addr_t p = *ppos; 101 ssize_t read, sz; 102 char *ptr; 103 104 if (!valid_phys_addr_range(p, count)) 105 return -EFAULT; 106 read = 0; 107#ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED 108 /* we don't have page 0 mapped on sparc and m68k.. */ 109 if (p < PAGE_SIZE) { 110 sz = size_inside_page(p, count); 111 if (sz > 0) { 112 if (clear_user(buf, sz)) 113 return -EFAULT; 114 buf += sz; 115 p += sz; 116 count -= sz; 117 read += sz; 118 } 119 } 120#endif 121 122 while (count > 0) { 123 unsigned long remaining; 124 125 sz = size_inside_page(p, count); 126 127 if (!range_is_allowed(p >> PAGE_SHIFT, count)) 128 return -EPERM; 129 130 /* 131 * On ia64 if a page has been mapped somewhere as uncached, then 132 * it must also be accessed uncached by the kernel or data 133 * corruption may occur. 134 */ 135 ptr = xlate_dev_mem_ptr(p); 136 if (!ptr) 137 return -EFAULT; 138 139 remaining = copy_to_user(buf, ptr, sz); 140 unxlate_dev_mem_ptr(p, ptr); 141 if (remaining) 142 return -EFAULT; 143 144 buf += sz; 145 p += sz; 146 count -= sz; 147 read += sz; 148 } 149 150 *ppos += read; 151 return read; 152} 153 154static ssize_t write_mem(struct file *file, const char __user *buf, 155 size_t count, loff_t *ppos) 156{ 157 phys_addr_t p = *ppos; 158 ssize_t written, sz; 159 unsigned long copied; 160 void *ptr; 161 162 if (!valid_phys_addr_range(p, count)) 163 return -EFAULT; 164 165 written = 0; 166 167#ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED 168 /* we don't have page 0 mapped on sparc and m68k.. */ 169 if (p < PAGE_SIZE) { 170 sz = size_inside_page(p, count); 171 /* Hmm. Do something? */ 172 buf += sz; 173 p += sz; 174 count -= sz; 175 written += sz; 176 } 177#endif 178 179 while (count > 0) { 180 sz = size_inside_page(p, count); 181 182 if (!range_is_allowed(p >> PAGE_SHIFT, sz)) 183 return -EPERM; 184 185 /* 186 * On ia64 if a page has been mapped somewhere as uncached, then 187 * it must also be accessed uncached by the kernel or data 188 * corruption may occur. 189 */ 190 ptr = xlate_dev_mem_ptr(p); 191 if (!ptr) { 192 if (written) 193 break; 194 return -EFAULT; 195 } 196 197 copied = copy_from_user(ptr, buf, sz); 198 unxlate_dev_mem_ptr(p, ptr); 199 if (copied) { 200 written += sz - copied; 201 if (written) 202 break; 203 return -EFAULT; 204 } 205 206 buf += sz; 207 p += sz; 208 count -= sz; 209 written += sz; 210 } 211 212 *ppos += written; 213 return written; 214} 215 216int __weak phys_mem_access_prot_allowed(struct file *file, 217 unsigned long pfn, unsigned long size, pgprot_t *vma_prot) 218{ 219 return 1; 220} 221 222#ifndef __HAVE_PHYS_MEM_ACCESS_PROT 223 224/* 225 * Architectures vary in how they handle caching for addresses 226 * outside of main memory. 227 * 228 */ 229#ifdef pgprot_noncached 230static int uncached_access(struct file *file, phys_addr_t addr) 231{ 232#if defined(CONFIG_IA64) 233 /* 234 * On ia64, we ignore O_DSYNC because we cannot tolerate memory 235 * attribute aliases. 236 */ 237 return !(efi_mem_attributes(addr) & EFI_MEMORY_WB); 238#elif defined(CONFIG_MIPS) 239 { 240 extern int __uncached_access(struct file *file, 241 unsigned long addr); 242 243 return __uncached_access(file, addr); 244 } 245#else 246 /* 247 * Accessing memory above the top the kernel knows about or through a 248 * file pointer 249 * that was marked O_DSYNC will be done non-cached. 250 */ 251 if (file->f_flags & O_DSYNC) 252 return 1; 253 return addr >= __pa(high_memory); 254#endif 255} 256#endif 257 258static pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, 259 unsigned long size, pgprot_t vma_prot) 260{ 261#ifdef pgprot_noncached 262 phys_addr_t offset = pfn << PAGE_SHIFT; 263 264 if (uncached_access(file, offset)) 265 return pgprot_noncached(vma_prot); 266#endif 267 return vma_prot; 268} 269#endif 270 271#ifndef CONFIG_MMU 272static unsigned long get_unmapped_area_mem(struct file *file, 273 unsigned long addr, 274 unsigned long len, 275 unsigned long pgoff, 276 unsigned long flags) 277{ 278 if (!valid_mmap_phys_addr_range(pgoff, len)) 279 return (unsigned long) -EINVAL; 280 return pgoff << PAGE_SHIFT; 281} 282 283/* can't do an in-place private mapping if there's no MMU */ 284static inline int private_mapping_ok(struct vm_area_struct *vma) 285{ 286 return vma->vm_flags & VM_MAYSHARE; 287} 288#else 289#define get_unmapped_area_mem NULL 290 291static inline int private_mapping_ok(struct vm_area_struct *vma) 292{ 293 return 1; 294} 295#endif 296 297static const struct vm_operations_struct mmap_mem_ops = { 298#ifdef CONFIG_HAVE_IOREMAP_PROT 299 .access = generic_access_phys 300#endif 301}; 302 303static int mmap_mem(struct file *file, struct vm_area_struct *vma) 304{ 305 size_t size = vma->vm_end - vma->vm_start; 306 307 if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size)) 308 return -EINVAL; 309 310 if (!private_mapping_ok(vma)) 311 return -ENOSYS; 312 313 if (!range_is_allowed(vma->vm_pgoff, size)) 314 return -EPERM; 315 316 if (!phys_mem_access_prot_allowed(file, vma->vm_pgoff, size, 317 &vma->vm_page_prot)) 318 return -EINVAL; 319 320 vma->vm_page_prot = phys_mem_access_prot(file, vma->vm_pgoff, 321 size, 322 vma->vm_page_prot); 323 324 vma->vm_ops = &mmap_mem_ops; 325 326 /* Remap-pfn-range will mark the range VM_IO */ 327 if (remap_pfn_range(vma, 328 vma->vm_start, 329 vma->vm_pgoff, 330 size, 331 vma->vm_page_prot)) { 332 return -EAGAIN; 333 } 334 return 0; 335} 336 337#ifdef CONFIG_DEVKMEM 338static int mmap_kmem(struct file *file, struct vm_area_struct *vma) 339{ 340 unsigned long pfn; 341 342 /* Turn a kernel-virtual address into a physical page frame */ 343 pfn = __pa((u64)vma->vm_pgoff << PAGE_SHIFT) >> PAGE_SHIFT; 344 345 /* 346 * RED-PEN: on some architectures there is more mapped memory than 347 * available in mem_map which pfn_valid checks for. Perhaps should add a 348 * new macro here. 349 * 350 * RED-PEN: vmalloc is not supported right now. 351 */ 352 if (!pfn_valid(pfn)) 353 return -EIO; 354 355 vma->vm_pgoff = pfn; 356 return mmap_mem(file, vma); 357} 358#endif 359 360#ifdef CONFIG_CRASH_DUMP 361/* 362 * Read memory corresponding to the old kernel. 363 */ 364static ssize_t read_oldmem(struct file *file, char __user *buf, 365 size_t count, loff_t *ppos) 366{ 367 unsigned long pfn, offset; 368 size_t read = 0, csize; 369 int rc = 0; 370 371 while (count) { 372 pfn = *ppos / PAGE_SIZE; 373 if (pfn > saved_max_pfn) 374 return read; 375 376 offset = (unsigned long)(*ppos % PAGE_SIZE); 377 if (count > PAGE_SIZE - offset) 378 csize = PAGE_SIZE - offset; 379 else 380 csize = count; 381 382 rc = copy_oldmem_page(pfn, buf, csize, offset, 1); 383 if (rc < 0) 384 return rc; 385 buf += csize; 386 *ppos += csize; 387 read += csize; 388 count -= csize; 389 } 390 return read; 391} 392#endif 393 394#ifdef CONFIG_DEVKMEM 395/* 396 * This function reads the *virtual* memory as seen by the kernel. 397 */ 398static ssize_t read_kmem(struct file *file, char __user *buf, 399 size_t count, loff_t *ppos) 400{ 401 unsigned long p = *ppos; 402 ssize_t low_count, read, sz; 403 char *kbuf; /* k-addr because vread() takes vmlist_lock rwlock */ 404 int err = 0; 405 406 read = 0; 407 if (p < (unsigned long) high_memory) { 408 low_count = count; 409 if (count > (unsigned long)high_memory - p) 410 low_count = (unsigned long)high_memory - p; 411 412#ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED 413 /* we don't have page 0 mapped on sparc and m68k.. */ 414 if (p < PAGE_SIZE && low_count > 0) { 415 sz = size_inside_page(p, low_count); 416 if (clear_user(buf, sz)) 417 return -EFAULT; 418 buf += sz; 419 p += sz; 420 read += sz; 421 low_count -= sz; 422 count -= sz; 423 } 424#endif 425 while (low_count > 0) { 426 sz = size_inside_page(p, low_count); 427 428 /* 429 * On ia64 if a page has been mapped somewhere as 430 * uncached, then it must also be accessed uncached 431 * by the kernel or data corruption may occur 432 */ 433 kbuf = xlate_dev_kmem_ptr((char *)p); 434 435 if (copy_to_user(buf, kbuf, sz)) 436 return -EFAULT; 437 buf += sz; 438 p += sz; 439 read += sz; 440 low_count -= sz; 441 count -= sz; 442 } 443 } 444 445 if (count > 0) { 446 kbuf = (char *)__get_free_page(GFP_KERNEL); 447 if (!kbuf) 448 return -ENOMEM; 449 while (count > 0) { 450 sz = size_inside_page(p, count); 451 if (!is_vmalloc_or_module_addr((void *)p)) { 452 err = -ENXIO; 453 break; 454 } 455 sz = vread(kbuf, (char *)p, sz); 456 if (!sz) 457 break; 458 if (copy_to_user(buf, kbuf, sz)) { 459 err = -EFAULT; 460 break; 461 } 462 count -= sz; 463 buf += sz; 464 read += sz; 465 p += sz; 466 } 467 free_page((unsigned long)kbuf); 468 } 469 *ppos = p; 470 return read ? read : err; 471} 472 473 474static ssize_t do_write_kmem(unsigned long p, const char __user *buf, 475 size_t count, loff_t *ppos) 476{ 477 ssize_t written, sz; 478 unsigned long copied; 479 480 written = 0; 481#ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED 482 /* we don't have page 0 mapped on sparc and m68k.. */ 483 if (p < PAGE_SIZE) { 484 sz = size_inside_page(p, count); 485 /* Hmm. Do something? */ 486 buf += sz; 487 p += sz; 488 count -= sz; 489 written += sz; 490 } 491#endif 492 493 while (count > 0) { 494 char *ptr; 495 496 sz = size_inside_page(p, count); 497 498 /* 499 * On ia64 if a page has been mapped somewhere as uncached, then 500 * it must also be accessed uncached by the kernel or data 501 * corruption may occur. 502 */ 503 ptr = xlate_dev_kmem_ptr((char *)p); 504 505 copied = copy_from_user(ptr, buf, sz); 506 if (copied) { 507 written += sz - copied; 508 if (written) 509 break; 510 return -EFAULT; 511 } 512 buf += sz; 513 p += sz; 514 count -= sz; 515 written += sz; 516 } 517 518 *ppos += written; 519 return written; 520} 521 522/* 523 * This function writes to the *virtual* memory as seen by the kernel. 524 */ 525static ssize_t write_kmem(struct file *file, const char __user *buf, 526 size_t count, loff_t *ppos) 527{ 528 unsigned long p = *ppos; 529 ssize_t wrote = 0; 530 ssize_t virtr = 0; 531 char *kbuf; /* k-addr because vwrite() takes vmlist_lock rwlock */ 532 int err = 0; 533 534 if (p < (unsigned long) high_memory) { 535 unsigned long to_write = min_t(unsigned long, count, 536 (unsigned long)high_memory - p); 537 wrote = do_write_kmem(p, buf, to_write, ppos); 538 if (wrote != to_write) 539 return wrote; 540 p += wrote; 541 buf += wrote; 542 count -= wrote; 543 } 544 545 if (count > 0) { 546 kbuf = (char *)__get_free_page(GFP_KERNEL); 547 if (!kbuf) 548 return wrote ? wrote : -ENOMEM; 549 while (count > 0) { 550 unsigned long sz = size_inside_page(p, count); 551 unsigned long n; 552 553 if (!is_vmalloc_or_module_addr((void *)p)) { 554 err = -ENXIO; 555 break; 556 } 557 n = copy_from_user(kbuf, buf, sz); 558 if (n) { 559 err = -EFAULT; 560 break; 561 } 562 vwrite(kbuf, (char *)p, sz); 563 count -= sz; 564 buf += sz; 565 virtr += sz; 566 p += sz; 567 } 568 free_page((unsigned long)kbuf); 569 } 570 571 *ppos = p; 572 return virtr + wrote ? : err; 573} 574#endif 575 576#ifdef CONFIG_DEVPORT 577static ssize_t read_port(struct file *file, char __user *buf, 578 size_t count, loff_t *ppos) 579{ 580 unsigned long i = *ppos; 581 char __user *tmp = buf; 582 583 if (!access_ok(VERIFY_WRITE, buf, count)) 584 return -EFAULT; 585 while (count-- > 0 && i < 65536) { 586 if (__put_user(inb(i), tmp) < 0) 587 return -EFAULT; 588 i++; 589 tmp++; 590 } 591 *ppos = i; 592 return tmp-buf; 593} 594 595static ssize_t write_port(struct file *file, const char __user *buf, 596 size_t count, loff_t *ppos) 597{ 598 unsigned long i = *ppos; 599 const char __user *tmp = buf; 600 601 if (!access_ok(VERIFY_READ, buf, count)) 602 return -EFAULT; 603 while (count-- > 0 && i < 65536) { 604 char c; 605 if (__get_user(c, tmp)) { 606 if (tmp > buf) 607 break; 608 return -EFAULT; 609 } 610 outb(c, i); 611 i++; 612 tmp++; 613 } 614 *ppos = i; 615 return tmp-buf; 616} 617#endif 618 619static ssize_t read_null(struct file *file, char __user *buf, 620 size_t count, loff_t *ppos) 621{ 622 return 0; 623} 624 625static ssize_t write_null(struct file *file, const char __user *buf, 626 size_t count, loff_t *ppos) 627{ 628 return count; 629} 630 631static ssize_t aio_read_null(struct kiocb *iocb, const struct iovec *iov, 632 unsigned long nr_segs, loff_t pos) 633{ 634 return 0; 635} 636 637static ssize_t aio_write_null(struct kiocb *iocb, const struct iovec *iov, 638 unsigned long nr_segs, loff_t pos) 639{ 640 return iov_length(iov, nr_segs); 641} 642 643static int pipe_to_null(struct pipe_inode_info *info, struct pipe_buffer *buf, 644 struct splice_desc *sd) 645{ 646 return sd->len; 647} 648 649static ssize_t splice_write_null(struct pipe_inode_info *pipe, struct file *out, 650 loff_t *ppos, size_t len, unsigned int flags) 651{ 652 return splice_from_pipe(pipe, out, ppos, len, flags, pipe_to_null); 653} 654 655static ssize_t read_zero(struct file *file, char __user *buf, 656 size_t count, loff_t *ppos) 657{ 658 size_t written; 659 660 if (!count) 661 return 0; 662 663 if (!access_ok(VERIFY_WRITE, buf, count)) 664 return -EFAULT; 665 666 written = 0; 667 while (count) { 668 unsigned long unwritten; 669 size_t chunk = count; 670 671 if (chunk > PAGE_SIZE) 672 chunk = PAGE_SIZE; /* Just for latency reasons */ 673 unwritten = __clear_user(buf, chunk); 674 written += chunk - unwritten; 675 if (unwritten) 676 break; 677 if (signal_pending(current)) 678 return written ? written : -ERESTARTSYS; 679 buf += chunk; 680 count -= chunk; 681 cond_resched(); 682 } 683 return written ? written : -EFAULT; 684} 685 686static ssize_t aio_read_zero(struct kiocb *iocb, const struct iovec *iov, 687 unsigned long nr_segs, loff_t pos) 688{ 689 size_t written = 0; 690 unsigned long i; 691 ssize_t ret; 692 693 for (i = 0; i < nr_segs; i++) { 694 ret = read_zero(iocb->ki_filp, iov[i].iov_base, iov[i].iov_len, 695 &pos); 696 if (ret < 0) 697 break; 698 written += ret; 699 } 700 701 return written ? written : -EFAULT; 702} 703 704static int mmap_zero(struct file *file, struct vm_area_struct *vma) 705{ 706#ifndef CONFIG_MMU 707 return -ENOSYS; 708#endif 709 if (vma->vm_flags & VM_SHARED) 710 return shmem_zero_setup(vma); 711 return 0; 712} 713 714static ssize_t write_full(struct file *file, const char __user *buf, 715 size_t count, loff_t *ppos) 716{ 717 return -ENOSPC; 718} 719 720/* 721 * Special lseek() function for /dev/null and /dev/zero. Most notably, you 722 * can fopen() both devices with "a" now. This was previously impossible. 723 * -- SRB. 724 */ 725static loff_t null_lseek(struct file *file, loff_t offset, int orig) 726{ 727 return file->f_pos = 0; 728} 729 730/* 731 * The memory devices use the full 32/64 bits of the offset, and so we cannot 732 * check against negative addresses: they are ok. The return value is weird, 733 * though, in that case (0). 734 * 735 * also note that seeking relative to the "end of file" isn't supported: 736 * it has no meaning, so it returns -EINVAL. 737 */ 738static loff_t memory_lseek(struct file *file, loff_t offset, int orig) 739{ 740 loff_t ret; 741 742 mutex_lock(&file_inode(file)->i_mutex); 743 switch (orig) { 744 case SEEK_CUR: 745 offset += file->f_pos; 746 case SEEK_SET: 747 /* to avoid userland mistaking f_pos=-9 as -EBADF=-9 */ 748 if ((unsigned long long)offset >= ~0xFFFULL) { 749 ret = -EOVERFLOW; 750 break; 751 } 752 file->f_pos = offset; 753 ret = file->f_pos; 754 force_successful_syscall_return(); 755 break; 756 default: 757 ret = -EINVAL; 758 } 759 mutex_unlock(&file_inode(file)->i_mutex); 760 return ret; 761} 762 763static int open_port(struct inode *inode, struct file *filp) 764{ 765 return capable(CAP_SYS_RAWIO) ? 0 : -EPERM; 766} 767 768#define zero_lseek null_lseek 769#define full_lseek null_lseek 770#define write_zero write_null 771#define read_full read_zero 772#define aio_write_zero aio_write_null 773#define open_mem open_port 774#define open_kmem open_mem 775#define open_oldmem open_mem 776 777static const struct file_operations mem_fops = { 778 .llseek = memory_lseek, 779 .read = read_mem, 780 .write = write_mem, 781 .mmap = mmap_mem, 782 .open = open_mem, 783 .get_unmapped_area = get_unmapped_area_mem, 784}; 785 786#ifdef CONFIG_DEVKMEM 787static const struct file_operations kmem_fops = { 788 .llseek = memory_lseek, 789 .read = read_kmem, 790 .write = write_kmem, 791 .mmap = mmap_kmem, 792 .open = open_kmem, 793 .get_unmapped_area = get_unmapped_area_mem, 794}; 795#endif 796 797static const struct file_operations null_fops = { 798 .llseek = null_lseek, 799 .read = read_null, 800 .write = write_null, 801 .aio_read = aio_read_null, 802 .aio_write = aio_write_null, 803 .splice_write = splice_write_null, 804}; 805 806#ifdef CONFIG_DEVPORT 807static const struct file_operations port_fops = { 808 .llseek = memory_lseek, 809 .read = read_port, 810 .write = write_port, 811 .open = open_port, 812}; 813#endif 814 815static const struct file_operations zero_fops = { 816 .llseek = zero_lseek, 817 .read = read_zero, 818 .write = write_zero, 819 .aio_read = aio_read_zero, 820 .aio_write = aio_write_zero, 821 .mmap = mmap_zero, 822}; 823 824/* 825 * capabilities for /dev/zero 826 * - permits private mappings, "copies" are taken of the source of zeros 827 * - no writeback happens 828 */ 829static struct backing_dev_info zero_bdi = { 830 .name = "char/mem", 831 .capabilities = BDI_CAP_MAP_COPY | BDI_CAP_NO_ACCT_AND_WRITEBACK, 832}; 833 834static const struct file_operations full_fops = { 835 .llseek = full_lseek, 836 .read = read_full, 837 .write = write_full, 838}; 839 840#ifdef CONFIG_CRASH_DUMP 841static const struct file_operations oldmem_fops = { 842 .read = read_oldmem, 843 .open = open_oldmem, 844 .llseek = default_llseek, 845}; 846#endif 847 848static const struct memdev { 849 const char *name; 850 umode_t mode; 851 const struct file_operations *fops; 852 struct backing_dev_info *dev_info; 853} devlist[] = { 854 [1] = { "mem", 0, &mem_fops, &directly_mappable_cdev_bdi }, 855#ifdef CONFIG_DEVKMEM 856 [2] = { "kmem", 0, &kmem_fops, &directly_mappable_cdev_bdi }, 857#endif 858 [3] = { "null", 0666, &null_fops, NULL }, 859#ifdef CONFIG_DEVPORT 860 [4] = { "port", 0, &port_fops, NULL }, 861#endif 862 [5] = { "zero", 0666, &zero_fops, &zero_bdi }, 863 [7] = { "full", 0666, &full_fops, NULL }, 864 [8] = { "random", 0666, &random_fops, NULL }, 865 [9] = { "urandom", 0666, &urandom_fops, NULL }, 866#ifdef CONFIG_PRINTK 867 [11] = { "kmsg", 0644, &kmsg_fops, NULL }, 868#endif 869#ifdef CONFIG_CRASH_DUMP 870 [12] = { "oldmem", 0, &oldmem_fops, NULL }, 871#endif 872}; 873 874static int memory_open(struct inode *inode, struct file *filp) 875{ 876 int minor; 877 const struct memdev *dev; 878 879 minor = iminor(inode); 880 if (minor >= ARRAY_SIZE(devlist)) 881 return -ENXIO; 882 883 dev = &devlist[minor]; 884 if (!dev->fops) 885 return -ENXIO; 886 887 filp->f_op = dev->fops; 888 if (dev->dev_info) 889 filp->f_mapping->backing_dev_info = dev->dev_info; 890 891 /* Is /dev/mem or /dev/kmem ? */ 892 if (dev->dev_info == &directly_mappable_cdev_bdi) 893 filp->f_mode |= FMODE_UNSIGNED_OFFSET; 894 895 if (dev->fops->open) 896 return dev->fops->open(inode, filp); 897 898 return 0; 899} 900 901static const struct file_operations memory_fops = { 902 .open = memory_open, 903 .llseek = noop_llseek, 904}; 905 906static char *mem_devnode(struct device *dev, umode_t *mode) 907{ 908 if (mode && devlist[MINOR(dev->devt)].mode) 909 *mode = devlist[MINOR(dev->devt)].mode; 910 return NULL; 911} 912 913static struct class *mem_class; 914 915static int __init chr_dev_init(void) 916{ 917 int minor; 918 int err; 919 920 err = bdi_init(&zero_bdi); 921 if (err) 922 return err; 923 924 if (register_chrdev(MEM_MAJOR, "mem", &memory_fops)) 925 printk("unable to get major %d for memory devs\n", MEM_MAJOR); 926 927 mem_class = class_create(THIS_MODULE, "mem"); 928 if (IS_ERR(mem_class)) 929 return PTR_ERR(mem_class); 930 931 mem_class->devnode = mem_devnode; 932 for (minor = 1; minor < ARRAY_SIZE(devlist); minor++) { 933 if (!devlist[minor].name) 934 continue; 935 936 /* 937 * Create /dev/port? 938 */ 939 if ((minor == DEVPORT_MINOR) && !arch_has_dev_port()) 940 continue; 941 942 device_create(mem_class, NULL, MKDEV(MEM_MAJOR, minor), 943 NULL, devlist[minor].name); 944 } 945 946 return tty_init(); 947} 948 949fs_initcall(chr_dev_init);