Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'akpm' (incoming from Andrew)

Merge second patch-bomb from Andrew Morton:
- various misc bits
- the rest of MM
- add generic fixmap.h, use it
- backlight updates
- dynamic_debug updates
- printk() updates
- checkpatch updates
- binfmt_elf
- ramfs
- init/
- autofs4
- drivers/rtc
- nilfs
- hfsplus
- Documentation/
- coredump
- procfs
- fork
- exec
- kexec
- kdump
- partitions
- rapidio
- rbtree
- userns
- memstick
- w1
- decompressors

* emailed patches from Andrew Morton <akpm@linux-foundation.org>: (197 commits)
lib/decompress_unlz4.c: always set an error return code on failures
romfs: fix returm err while getting inode in fill_super
drivers/w1/masters/w1-gpio.c: add strong pullup emulation
drivers/memstick/host/rtsx_pci_ms.c: fix ms card data transfer bug
userns: relax the posix_acl_valid() checks
arch/sh/kernel/dwarf.c: use rbtree postorder iteration helper instead of solution using repeated rb_erase()
fs-ext3-use-rbtree-postorder-iteration-helper-instead-of-opencoding-fix
fs/ext3: use rbtree postorder iteration helper instead of opencoding
fs/jffs2: use rbtree postorder iteration helper instead of opencoding
fs/ext4: use rbtree postorder iteration helper instead of opencoding
fs/ubifs: use rbtree postorder iteration helper instead of opencoding
net/netfilter/ipset/ip_set_hash_netiface.c: use rbtree postorder iteration instead of opencoding
rbtree/test: test rbtree_postorder_for_each_entry_safe()
rbtree/test: move rb_node to the middle of the test struct
rapidio: add modular rapidio core build into powerpc and mips branches
partitions/efi: complete documentation of gpt kernel param purpose
kdump: add /sys/kernel/vmcoreinfo ABI documentation
kdump: fix exported size of vmcoreinfo note
kexec: add sysctl to disable kexec_load
fs/exec.c: call arch_pick_mmap_layout() only once
...

+3339 -2187
+14
Documentation/ABI/testing/sysfs-kernel-vmcoreinfo
··· 1 + What: /sys/kernel/vmcoreinfo 2 + Date: October 2007 3 + KernelVersion: 2.6.24 4 + Contact: Ken'ichi Ohmichi <oomichi@mxs.nes.nec.co.jp> 5 + Kexec Mailing List <kexec@lists.infradead.org> 6 + Vivek Goyal <vgoyal@redhat.com> 7 + Description 8 + Shows physical address and size of vmcoreinfo ELF note. 9 + First value contains physical address of note in hex and 10 + second value contains the size of note in hex. This ELF 11 + note info is parsed by second kernel and exported to user 12 + space as part of ELF note in /proc/vmcore file. This note 13 + contains various information like struct size, symbol 14 + values, page size etc.
+15 -6
Documentation/blockdev/ramdisk.txt
··· 36 36 rescue floppy disk. 37 37 38 38 39 - 2) Kernel Command Line Parameters 39 + 2) Parameters 40 40 --------------------------------- 41 + 42 + 2a) Kernel Command Line Parameters 41 43 42 44 ramdisk_size=N 43 45 ============== 44 46 45 47 This parameter tells the RAM disk driver to set up RAM disks of N k size. The 46 - default is 4096 (4 MB) (8192 (8 MB) on S390). 48 + default is 4096 (4 MB). 47 49 48 - ramdisk_blocksize=N 49 - =================== 50 + 2b) Module parameters 50 51 51 - This parameter tells the RAM disk driver how many bytes to use per block. The 52 - default is 1024 (BLOCK_SIZE). 52 + rd_nr 53 + ===== 54 + /dev/ramX devices created. 53 55 56 + max_part 57 + ======== 58 + Maximum partition number. 59 + 60 + rd_size 61 + ======= 62 + See ramdisk_size. 54 63 55 64 3) Using "rdev -r" 56 65 ------------------
+1 -1
Documentation/cpu-hotplug.txt
··· 285 285 return NOTIFY_OK; 286 286 } 287 287 288 - static struct notifier_block foobar_cpu_notifer = 288 + static struct notifier_block foobar_cpu_notifier = 289 289 { 290 290 .notifier_call = foobar_cpu_callback, 291 291 };
+27
Documentation/devicetree/bindings/rtc/haoyu,hym8563.txt
··· 1 + Haoyu Microelectronics HYM8563 Real Time Clock 2 + 3 + The HYM8563 provides basic rtc and alarm functionality 4 + as well as a clock output of up to 32kHz. 5 + 6 + Required properties: 7 + - compatible: should be: "haoyu,hym8563" 8 + - reg: i2c address 9 + - interrupts: rtc alarm/event interrupt 10 + - #clock-cells: the value should be 0 11 + 12 + Example: 13 + 14 + hym8563: hym8563@51 { 15 + compatible = "haoyu,hym8563"; 16 + reg = <0x51>; 17 + 18 + interrupts = <13 IRQ_TYPE_EDGE_FALLING>; 19 + 20 + #clock-cells = <0>; 21 + }; 22 + 23 + device { 24 + ... 25 + clocks = <&hym8563>; 26 + ... 27 + };
+12
Documentation/devicetree/bindings/rtc/maxim,ds1742.txt
··· 1 + * Maxim (Dallas) DS1742/DS1743 Real Time Clock 2 + 3 + Required properties: 4 + - compatible: Should contain "maxim,ds1742". 5 + - reg: Physical base address of the RTC and length of memory 6 + mapped region. 7 + 8 + Example: 9 + rtc: rtc@10000000 { 10 + compatible = "maxim,ds1742"; 11 + reg = <0x10000000 0x800>; 12 + };
+1
Documentation/devicetree/bindings/vendor-prefixes.txt
··· 34 34 GEFanuc GE Fanuc Intelligent Platforms Embedded Systems, Inc. 35 35 gef GE Fanuc Intelligent Platforms Embedded Systems, Inc. 36 36 gmt Global Mixed-mode Technology, Inc. 37 + haoyu Haoyu Microelectronic Co. Ltd. 37 38 hisilicon Hisilicon Limited. 38 39 hp Hewlett Packard 39 40 ibm International Business Machines (IBM)
+9
Documentation/dynamic-debug-howto.txt
··· 108 108 109 109 ~# cat query-batch-file > <debugfs>/dynamic_debug/control 110 110 111 + A another way is to use wildcard. The match rule support '*' (matches 112 + zero or more characters) and '?' (matches exactly one character).For 113 + example, you can match all usb drivers: 114 + 115 + ~# echo "file drivers/usb/* +p" > <debugfs>/dynamic_debug/control 116 + 111 117 At the syntactical level, a command comprises a sequence of match 112 118 specifications, followed by a flags change specification. 113 119 ··· 320 314 // enable messages for NFS calls READ, READLINK, READDIR and READDIR+. 321 315 nullarbor:~ # echo -n 'format "nfsd: READ" +p' > 322 316 <debugfs>/dynamic_debug/control 317 + 318 + // enable messages in files of which the pathes include string "usb" 319 + nullarbor:~ # echo -n '*usb* +p' > <debugfs>/dynamic_debug/control 323 320 324 321 // enable all messages 325 322 nullarbor:~ # echo -n '+p' > <debugfs>/dynamic_debug/control
+45 -13
Documentation/filesystems/00-INDEX
··· 10 10 - info and examples for the distributed AFS (Andrew File System) fs. 11 11 affs.txt 12 12 - info and mount options for the Amiga Fast File System. 13 + autofs4-mount-control.txt 14 + - info on device control operations for autofs4 module. 13 15 automount-support.txt 14 16 - information about filesystem automount support. 15 17 befs.txt 16 18 - information about the BeOS filesystem for Linux. 17 19 bfs.txt 18 20 - info for the SCO UnixWare Boot Filesystem (BFS). 21 + btrfs.txt 22 + - info for the BTRFS filesystem. 23 + caching/ 24 + - directory containing filesystem cache documentation. 19 25 ceph.txt 20 - - info for the Ceph Distributed File System 21 - cifs.txt 22 - - description of the CIFS filesystem. 26 + - info for the Ceph Distributed File System. 27 + cifs/ 28 + - directory containing CIFS filesystem documentation and example code. 23 29 coda.txt 24 30 - description of the CODA filesystem. 25 31 configfs/ 26 32 - directory containing configfs documentation and example code. 27 33 cramfs.txt 28 34 - info on the cram filesystem for small storage (ROMs etc). 29 - dentry-locking.txt 30 - - info on the RCU-based dcache locking model. 35 + debugfs.txt 36 + - info on the debugfs filesystem. 37 + devpts.txt 38 + - info on the devpts filesystem. 31 39 directory-locking 32 40 - info about the locking scheme used for directory operations. 33 41 dlmfs.txt ··· 43 35 dnotify.txt 44 36 - info about directory notification in Linux. 45 37 dnotify_test.c 46 - - example program for dnotify 38 + - example program for dnotify. 47 39 ecryptfs.txt 48 40 - docs on eCryptfs: stacked cryptographic filesystem for Linux. 49 41 efivarfs.txt ··· 56 48 - info, mount options and specifications for the Ext3 filesystem. 57 49 ext4.txt 58 50 - info, mount options and specifications for the Ext4 filesystem. 59 - files.txt 60 - - info on file management in the Linux kernel. 61 51 f2fs.txt 62 52 - info and mount options for the F2FS filesystem. 53 + fiemap.txt 54 + - info on fiemap ioctl. 55 + files.txt 56 + - info on file management in the Linux kernel. 63 57 fuse.txt 64 58 - info on the Filesystem in User SpacE including mount options. 59 + gfs2-glocks.txt 60 + - info on the Global File System 2 - Glock internal locking rules. 61 + gfs2-uevents.txt 62 + - info on the Global File System 2 - uevents. 65 63 gfs2.txt 66 64 - info on the Global File System 2. 67 65 hfs.txt ··· 98 84 - info and mount options for the NTFS filesystem (Windows NT). 99 85 ocfs2.txt 100 86 - info and mount options for the OCFS2 clustered filesystem. 87 + omfs.txt 88 + - info on the Optimized MPEG FileSystem. 89 + path-lookup.txt 90 + - info on path walking and name lookup locking. 91 + pohmelfs/ 92 + - directory containing pohmelfs filesystem documentation. 101 93 porting 102 94 - various information on filesystem porting. 103 95 proc.txt 104 96 - info on Linux's /proc filesystem. 97 + qnx6.txt 98 + - info on the QNX6 filesystem. 99 + quota.txt 100 + - info on Quota subsystem. 105 101 ramfs-rootfs-initramfs.txt 106 102 - info on the 'in memory' filesystems ramfs, rootfs and initramfs. 107 - reiser4.txt 108 - - info on the Reiser4 filesystem based on dancing tree algorithms. 109 103 relay.txt 110 104 - info on relay, for efficient streaming from kernel to user space. 111 105 romfs.txt 112 106 - description of the ROMFS filesystem. 113 107 seq_file.txt 114 - - how to use the seq_file API 108 + - how to use the seq_file API. 115 109 sharedsubtree.txt 116 110 - a description of shared subtrees for namespaces. 117 111 spufs.txt 118 112 - info and mount options for the SPU filesystem used on Cell. 113 + squashfs.txt 114 + - info on the squashfs filesystem. 119 115 sysfs-pci.txt 120 116 - info on accessing PCI device resources through sysfs. 117 + sysfs-tagging.txt 118 + - info on sysfs tagging to avoid duplicates. 121 119 sysfs.txt 122 120 - info on sysfs, a ram-based filesystem for exporting kernel objects. 123 121 sysv-fs.txt 124 122 - info on the SystemV/V7/Xenix/Coherent filesystem. 125 123 tmpfs.txt 126 124 - info on tmpfs, a filesystem that holds all files in virtual memory. 125 + ubifs.txt 126 + - info on the Unsorted Block Images FileSystem. 127 127 udf.txt 128 128 - info and mount options for the UDF filesystem. 129 129 ufs.txt 130 130 - info on the ufs filesystem. 131 131 vfat.txt 132 - - info on using the VFAT filesystem used in Windows NT and Windows 95 132 + - info on using the VFAT filesystem used in Windows NT and Windows 95. 133 133 vfs.txt 134 - - overview of the Virtual File System 134 + - overview of the Virtual File System. 135 + xfs-delayed-logging-design.txt 136 + - info on the XFS Delayed Logging Design. 137 + xfs-self-describing-metadata.txt 138 + - info on XFS Self Describing Metadata. 135 139 xfs.txt 136 140 - info and mount options for the XFS filesystem. 137 141 xip.txt
+56
Documentation/filesystems/nilfs2.txt
··· 81 81 block device when blocks are freed. This is useful 82 82 for SSD devices and sparse/thinly-provisioned LUNs. 83 83 84 + Ioctls 85 + ====== 86 + 87 + There is some NILFS2 specific functionality which can be accessed by applications 88 + through the system call interfaces. The list of all NILFS2 specific ioctls are 89 + shown in the table below. 90 + 91 + Table of NILFS2 specific ioctls 92 + .............................................................................. 93 + Ioctl Description 94 + NILFS_IOCTL_CHANGE_CPMODE Change mode of given checkpoint between 95 + checkpoint and snapshot state. This ioctl is 96 + used in chcp and mkcp utilities. 97 + 98 + NILFS_IOCTL_DELETE_CHECKPOINT Remove checkpoint from NILFS2 file system. 99 + This ioctl is used in rmcp utility. 100 + 101 + NILFS_IOCTL_GET_CPINFO Return info about requested checkpoints. This 102 + ioctl is used in lscp utility and by 103 + nilfs_cleanerd daemon. 104 + 105 + NILFS_IOCTL_GET_CPSTAT Return checkpoints statistics. This ioctl is 106 + used by lscp, rmcp utilities and by 107 + nilfs_cleanerd daemon. 108 + 109 + NILFS_IOCTL_GET_SUINFO Return segment usage info about requested 110 + segments. This ioctl is used in lssu, 111 + nilfs_resize utilities and by nilfs_cleanerd 112 + daemon. 113 + 114 + NILFS_IOCTL_GET_SUSTAT Return segment usage statistics. This ioctl 115 + is used in lssu, nilfs_resize utilities and 116 + by nilfs_cleanerd daemon. 117 + 118 + NILFS_IOCTL_GET_VINFO Return information on virtual block addresses. 119 + This ioctl is used by nilfs_cleanerd daemon. 120 + 121 + NILFS_IOCTL_GET_BDESCS Return information about descriptors of disk 122 + block numbers. This ioctl is used by 123 + nilfs_cleanerd daemon. 124 + 125 + NILFS_IOCTL_CLEAN_SEGMENTS Do garbage collection operation in the 126 + environment of requested parameters from 127 + userspace. This ioctl is used by 128 + nilfs_cleanerd daemon. 129 + 130 + NILFS_IOCTL_SYNC Make a checkpoint. This ioctl is used in 131 + mkcp utility. 132 + 133 + NILFS_IOCTL_RESIZE Resize NILFS2 volume. This ioctl is used 134 + by nilfs_resize utility. 135 + 136 + NILFS_IOCTL_SET_ALLOC_RANGE Define lower limit of segments in bytes and 137 + upper limit of segments in bytes. This ioctl 138 + is used by nilfs_resize utility. 139 + 84 140 NILFS2 usage 85 141 ============ 86 142
+3 -3
Documentation/filesystems/sysfs.txt
··· 108 108 is equivalent to doing: 109 109 110 110 static struct device_attribute dev_attr_foo = { 111 - .attr = { 111 + .attr = { 112 112 .name = "foo", 113 113 .mode = S_IWUSR | S_IRUGO, 114 - .show = show_foo, 115 - .store = store_foo, 116 114 }, 115 + .show = show_foo, 116 + .store = store_foo, 117 117 }; 118 118 119 119
+10 -1
Documentation/kernel-parameters.txt
··· 1059 1059 debugfs files are removed at module unload time. 1060 1060 1061 1061 gpt [EFI] Forces disk with valid GPT signature but 1062 - invalid Protective MBR to be treated as GPT. 1062 + invalid Protective MBR to be treated as GPT. If the 1063 + primary GPT is corrupted, it enables the backup/alternate 1064 + GPT to be used instead. 1063 1065 1064 1066 grcan.enable0= [HW] Configuration of physical interface 0. Determines 1065 1067 the "Enable 0" bit of the configuration register. ··· 1462 1460 kmemleak= [KNL] Boot-time kmemleak enable/disable 1463 1461 Valid arguments: on, off 1464 1462 Default: on 1463 + 1464 + kmemcheck= [X86] Boot-time kmemcheck enable/disable/one-shot mode 1465 + Valid arguments: 0, 1, 2 1466 + kmemcheck=0 (disabled) 1467 + kmemcheck=1 (enabled) 1468 + kmemcheck=2 (one-shot mode) 1469 + Default: 2 (one-shot mode) 1465 1470 1466 1471 kstack=N [X86] Print N words from the kernel stack 1467 1472 in oops dumps.
+9 -2
Documentation/printk-formats.txt
··· 55 55 For printing struct resources. The 'R' and 'r' specifiers result in a 56 56 printed resource with ('R') or without ('r') a decoded flags member. 57 57 58 - Physical addresses: 58 + Physical addresses types phys_addr_t: 59 59 60 - %pa 0x01234567 or 0x0123456789abcdef 60 + %pa[p] 0x01234567 or 0x0123456789abcdef 61 61 62 62 For printing a phys_addr_t type (and its derivatives, such as 63 63 resource_size_t) which can vary based on build options, regardless of 64 64 the width of the CPU data path. Passed by reference. 65 + 66 + DMA addresses types dma_addr_t: 67 + 68 + %pad 0x01234567 or 0x0123456789abcdef 69 + 70 + For printing a dma_addr_t type which can vary based on build options, 71 + regardless of the width of the CPU data path. Passed by reference. 65 72 66 73 Raw buffer as a hex string: 67 74 %*ph 00 01 02 ... 3f
+14 -1
Documentation/sysctl/kernel.txt
··· 33 33 - domainname 34 34 - hostname 35 35 - hotplug 36 + - kexec_load_disabled 36 37 - kptr_restrict 37 38 - kstack_depth_to_print [ X86 only ] 38 39 - l2cr [ PPC only ] ··· 288 287 289 288 ============================================================== 290 289 290 + kexec_load_disabled: 291 + 292 + A toggle indicating if the kexec_load syscall has been disabled. This 293 + value defaults to 0 (false: kexec_load enabled), but can be set to 1 294 + (true: kexec_load disabled). Once true, kexec can no longer be used, and 295 + the toggle cannot be set back to false. This allows a kexec image to be 296 + loaded before disabling the syscall, allowing a system to set up (and 297 + later use) an image without it being altered. Generally used together 298 + with the "modules_disabled" sysctl. 299 + 300 + ============================================================== 301 + 291 302 kptr_restrict: 292 303 293 304 This toggle indicates whether restrictions are placed on ··· 344 331 in an otherwise modular kernel. This toggle defaults to off 345 332 (0), but can be set true (1). Once true, modules can be 346 333 neither loaded nor unloaded, and the toggle cannot be set back 347 - to false. 334 + to false. Generally used with the "kexec_load_disabled" toggle. 348 335 349 336 ============================================================== 350 337
+9 -9
Documentation/trace/postprocess/trace-vmscan-postprocess.pl
··· 123 123 124 124 # Static regex used. Specified like this for readability and for use with /o 125 125 # (process_pid) (cpus ) ( time ) (tpoint ) (details) 126 - my $regex_traceevent = '\s*([a-zA-Z0-9-]*)\s*(\[[0-9]*\])\s*([0-9.]*):\s*([a-zA-Z_]*):\s*(.*)'; 126 + my $regex_traceevent = '\s*([a-zA-Z0-9-]*)\s*(\[[0-9]*\])(\s*[dX.][Nnp.][Hhs.][0-9a-fA-F.]*|)\s*([0-9.]*):\s*([a-zA-Z_]*):\s*(.*)'; 127 127 my $regex_statname = '[-0-9]*\s\((.*)\).*'; 128 128 my $regex_statppid = '[-0-9]*\s\(.*\)\s[A-Za-z]\s([0-9]*).*'; 129 129 ··· 270 270 while ($traceevent = <STDIN>) { 271 271 if ($traceevent =~ /$regex_traceevent/o) { 272 272 $process_pid = $1; 273 - $timestamp = $3; 274 - $tracepoint = $4; 273 + $timestamp = $4; 274 + $tracepoint = $5; 275 275 276 276 $process_pid =~ /(.*)-([0-9]*)$/; 277 277 my $process = $1; ··· 299 299 $perprocesspid{$process_pid}->{MM_VMSCAN_DIRECT_RECLAIM_BEGIN}++; 300 300 $perprocesspid{$process_pid}->{STATE_DIRECT_BEGIN} = $timestamp; 301 301 302 - $details = $5; 302 + $details = $6; 303 303 if ($details !~ /$regex_direct_begin/o) { 304 304 print "WARNING: Failed to parse mm_vmscan_direct_reclaim_begin as expected\n"; 305 305 print " $details\n"; ··· 322 322 $perprocesspid{$process_pid}->{HIGH_DIRECT_RECLAIM_LATENCY}[$index] = "$order-$latency"; 323 323 } 324 324 } elsif ($tracepoint eq "mm_vmscan_kswapd_wake") { 325 - $details = $5; 325 + $details = $6; 326 326 if ($details !~ /$regex_kswapd_wake/o) { 327 327 print "WARNING: Failed to parse mm_vmscan_kswapd_wake as expected\n"; 328 328 print " $details\n"; ··· 356 356 } elsif ($tracepoint eq "mm_vmscan_wakeup_kswapd") { 357 357 $perprocesspid{$process_pid}->{MM_VMSCAN_WAKEUP_KSWAPD}++; 358 358 359 - $details = $5; 359 + $details = $6; 360 360 if ($details !~ /$regex_wakeup_kswapd/o) { 361 361 print "WARNING: Failed to parse mm_vmscan_wakeup_kswapd as expected\n"; 362 362 print " $details\n"; ··· 366 366 my $order = $3; 367 367 $perprocesspid{$process_pid}->{MM_VMSCAN_WAKEUP_KSWAPD_PERORDER}[$order]++; 368 368 } elsif ($tracepoint eq "mm_vmscan_lru_isolate") { 369 - $details = $5; 369 + $details = $6; 370 370 if ($details !~ /$regex_lru_isolate/o) { 371 371 print "WARNING: Failed to parse mm_vmscan_lru_isolate as expected\n"; 372 372 print " $details\n"; ··· 387 387 } 388 388 $perprocesspid{$process_pid}->{HIGH_NR_CONTIG_DIRTY} += $nr_contig_dirty; 389 389 } elsif ($tracepoint eq "mm_vmscan_lru_shrink_inactive") { 390 - $details = $5; 390 + $details = $6; 391 391 if ($details !~ /$regex_lru_shrink_inactive/o) { 392 392 print "WARNING: Failed to parse mm_vmscan_lru_shrink_inactive as expected\n"; 393 393 print " $details\n"; ··· 397 397 my $nr_reclaimed = $4; 398 398 $perprocesspid{$process_pid}->{HIGH_NR_RECLAIMED} += $nr_reclaimed; 399 399 } elsif ($tracepoint eq "mm_vmscan_writepage") { 400 - $details = $5; 400 + $details = $6; 401 401 if ($details !~ /$regex_writepage/o) { 402 402 print "WARNING: Failed to parse mm_vmscan_writepage as expected\n"; 403 403 print " $details\n";
-130
Documentation/vm/locking
··· 1 - Started Oct 1999 by Kanoj Sarcar <kanojsarcar@yahoo.com> 2 - 3 - The intent of this file is to have an uptodate, running commentary 4 - from different people about how locking and synchronization is done 5 - in the Linux vm code. 6 - 7 - page_table_lock & mmap_sem 8 - -------------------------------------- 9 - 10 - Page stealers pick processes out of the process pool and scan for 11 - the best process to steal pages from. To guarantee the existence 12 - of the victim mm, a mm_count inc and a mmdrop are done in swap_out(). 13 - Page stealers hold kernel_lock to protect against a bunch of races. 14 - The vma list of the victim mm is also scanned by the stealer, 15 - and the page_table_lock is used to preserve list sanity against the 16 - process adding/deleting to the list. This also guarantees existence 17 - of the vma. Vma existence is not guaranteed once try_to_swap_out() 18 - drops the page_table_lock. To guarantee the existence of the underlying 19 - file structure, a get_file is done before the swapout() method is 20 - invoked. The page passed into swapout() is guaranteed not to be reused 21 - for a different purpose because the page reference count due to being 22 - present in the user's pte is not released till after swapout() returns. 23 - 24 - Any code that modifies the vmlist, or the vm_start/vm_end/ 25 - vm_flags:VM_LOCKED/vm_next of any vma *in the list* must prevent 26 - kswapd from looking at the chain. 27 - 28 - The rules are: 29 - 1. To scan the vmlist (look but don't touch) you must hold the 30 - mmap_sem with read bias, i.e. down_read(&mm->mmap_sem) 31 - 2. To modify the vmlist you need to hold the mmap_sem with 32 - read&write bias, i.e. down_write(&mm->mmap_sem) *AND* 33 - you need to take the page_table_lock. 34 - 3. The swapper takes _just_ the page_table_lock, this is done 35 - because the mmap_sem can be an extremely long lived lock 36 - and the swapper just cannot sleep on that. 37 - 4. The exception to this rule is expand_stack, which just 38 - takes the read lock and the page_table_lock, this is ok 39 - because it doesn't really modify fields anybody relies on. 40 - 5. You must be able to guarantee that while holding page_table_lock 41 - or page_table_lock of mm A, you will not try to get either lock 42 - for mm B. 43 - 44 - The caveats are: 45 - 1. find_vma() makes use of, and updates, the mmap_cache pointer hint. 46 - The update of mmap_cache is racy (page stealer can race with other code 47 - that invokes find_vma with mmap_sem held), but that is okay, since it 48 - is a hint. This can be fixed, if desired, by having find_vma grab the 49 - page_table_lock. 50 - 51 - 52 - Code that add/delete elements from the vmlist chain are 53 - 1. callers of insert_vm_struct 54 - 2. callers of merge_segments 55 - 3. callers of avl_remove 56 - 57 - Code that changes vm_start/vm_end/vm_flags:VM_LOCKED of vma's on 58 - the list: 59 - 1. expand_stack 60 - 2. mprotect 61 - 3. mlock 62 - 4. mremap 63 - 64 - It is advisable that changes to vm_start/vm_end be protected, although 65 - in some cases it is not really needed. Eg, vm_start is modified by 66 - expand_stack(), it is hard to come up with a destructive scenario without 67 - having the vmlist protection in this case. 68 - 69 - The page_table_lock nests with the inode i_mmap_mutex and the kmem cache 70 - c_spinlock spinlocks. This is okay, since the kmem code asks for pages after 71 - dropping c_spinlock. The page_table_lock also nests with pagecache_lock and 72 - pagemap_lru_lock spinlocks, and no code asks for memory with these locks 73 - held. 74 - 75 - The page_table_lock is grabbed while holding the kernel_lock spinning monitor. 76 - 77 - The page_table_lock is a spin lock. 78 - 79 - Note: PTL can also be used to guarantee that no new clones using the 80 - mm start up ... this is a loose form of stability on mm_users. For 81 - example, it is used in copy_mm to protect against a racing tlb_gather_mmu 82 - single address space optimization, so that the zap_page_range (from 83 - truncate) does not lose sending ipi's to cloned threads that might 84 - be spawned underneath it and go to user mode to drag in pte's into tlbs. 85 - 86 - swap_lock 87 - -------------- 88 - The swap devices are chained in priority order from the "swap_list" header. 89 - The "swap_list" is used for the round-robin swaphandle allocation strategy. 90 - The #free swaphandles is maintained in "nr_swap_pages". These two together 91 - are protected by the swap_lock. 92 - 93 - The swap_lock also protects all the device reference counts on the 94 - corresponding swaphandles, maintained in the "swap_map" array, and the 95 - "highest_bit" and "lowest_bit" fields. 96 - 97 - The swap_lock is a spinlock, and is never acquired from intr level. 98 - 99 - To prevent races between swap space deletion or async readahead swapins 100 - deciding whether a swap handle is being used, ie worthy of being read in 101 - from disk, and an unmap -> swap_free making the handle unused, the swap 102 - delete and readahead code grabs a temp reference on the swaphandle to 103 - prevent warning messages from swap_duplicate <- read_swap_cache_async. 104 - 105 - Swap cache locking 106 - ------------------ 107 - Pages are added into the swap cache with kernel_lock held, to make sure 108 - that multiple pages are not being added (and hence lost) by associating 109 - all of them with the same swaphandle. 110 - 111 - Pages are guaranteed not to be removed from the scache if the page is 112 - "shared": ie, other processes hold reference on the page or the associated 113 - swap handle. The only code that does not follow this rule is shrink_mmap, 114 - which deletes pages from the swap cache if no process has a reference on 115 - the page (multiple processes might have references on the corresponding 116 - swap handle though). lookup_swap_cache() races with shrink_mmap, when 117 - establishing a reference on a scache page, so, it must check whether the 118 - page it located is still in the swapcache, or shrink_mmap deleted it. 119 - (This race is due to the fact that shrink_mmap looks at the page ref 120 - count with pagecache_lock, but then drops pagecache_lock before deleting 121 - the page from the scache). 122 - 123 - do_wp_page and do_swap_page have MP races in them while trying to figure 124 - out whether a page is "shared", by looking at the page_count + swap_count. 125 - To preserve the sum of the counts, the page lock _must_ be acquired before 126 - calling is_page_shared (else processes might switch their swap_count refs 127 - to the page count refs, after the page count ref has been snapshotted). 128 - 129 - Swap device deletion code currently breaks all the scache assumptions, 130 - since it grabs neither mmap_sem nor page_table_lock.
+11 -1
MAINTAINERS
··· 93 93 N: Files and directories with regex patterns. 94 94 N: [^a-z]tegra all files whose path contains the word tegra 95 95 One pattern per line. Multiple N: lines acceptable. 96 + scripts/get_maintainer.pl has different behavior for files that 97 + match F: pattern and matches of N: patterns. By default, 98 + get_maintainer will not look at git log history when an F: pattern 99 + match occurs. When an N: match occurs, git log history is used 100 + to also notify the people that have git commit signatures. 96 101 X: Files and directories that are NOT maintained, same rules as F: 97 102 Files exclusions are tested before file matches. 98 103 Can be useful for excluding a specific subdirectory, for instance: ··· 3380 3375 L: linux-fbdev@vger.kernel.org 3381 3376 S: Maintained 3382 3377 F: drivers/video/exynos/exynos_dp* 3383 - F: include/video/exynos_dp* 3384 3378 3385 3379 EXYNOS MIPI DISPLAY DRIVERS 3386 3380 M: Inki Dae <inki.dae@samsung.com> ··· 3989 3985 S: Orphan 3990 3986 F: Documentation/filesystems/hfs.txt 3991 3987 F: fs/hfs/ 3988 + 3989 + HFSPLUS FILESYSTEM 3990 + L: linux-fsdevel@vger.kernel.org 3991 + S: Orphan 3992 + F: Documentation/filesystems/hfsplus.txt 3993 + F: fs/hfsplus/ 3992 3994 3993 3995 HGA FRAMEBUFFER DRIVER 3994 3996 M: Ferenc Bakonyi <fero@drama.obuda.kando.hu>
+4 -4
arch/alpha/Kconfig
··· 539 539 depends on ALPHA_SABLE || ALPHA_LYNX || ALPHA_RAWHIDE || ALPHA_DP264 || ALPHA_WILDFIRE || ALPHA_TITAN || ALPHA_GENERIC || ALPHA_SHARK || ALPHA_MARVEL 540 540 ---help--- 541 541 This enables support for systems with more than one CPU. If you have 542 - a system with only one CPU, like most personal computers, say N. If 543 - you have a system with more than one CPU, say Y. 542 + a system with only one CPU, say N. If you have a system with more 543 + than one CPU, say Y. 544 544 545 - If you say N here, the kernel will run on single and multiprocessor 545 + If you say N here, the kernel will run on uni- and multiprocessor 546 546 machines, but will use only one CPU of a multiprocessor machine. If 547 547 you say Y here, the kernel will run on many, but not all, 548 - singleprocessor machines. On a singleprocessor machine, the kernel 548 + uniprocessor machines. On a uniprocessor machine, the kernel 549 549 will run faster if you say N here. 550 550 551 551 See also the SMP-HOWTO available at
+2 -2
arch/arc/Kconfig
··· 128 128 default n 129 129 help 130 130 This enables support for systems with more than one CPU. If you have 131 - a system with only one CPU, like most personal computers, say N. If 132 - you have a system with more than one CPU, say Y. 131 + a system with only one CPU, say N. If you have a system with more 132 + than one CPU, say Y. 133 133 134 134 if SMP 135 135
+6 -6
arch/arm/Kconfig
··· 1470 1470 depends on MMU || ARM_MPU 1471 1471 help 1472 1472 This enables support for systems with more than one CPU. If you have 1473 - a system with only one CPU, like most personal computers, say N. If 1474 - you have a system with more than one CPU, say Y. 1473 + a system with only one CPU, say N. If you have a system with more 1474 + than one CPU, say Y. 1475 1475 1476 - If you say N here, the kernel will run on single and multiprocessor 1476 + If you say N here, the kernel will run on uni- and multiprocessor 1477 1477 machines, but will use only one CPU of a multiprocessor machine. If 1478 - you say Y here, the kernel will run on many, but not all, single 1479 - processor machines. On a single processor machine, the kernel will 1480 - run faster if you say N here. 1478 + you say Y here, the kernel will run on many, but not all, 1479 + uniprocessor machines. On a uniprocessor machine, the kernel 1480 + will run faster if you say N here. 1481 1481 1482 1482 See also <file:Documentation/x86/i386/IO-APIC.txt>, 1483 1483 <file:Documentation/nmi_watchdog.txt> and the SMP-HOWTO available at
+4
arch/cris/include/asm/io.h
··· 169 169 } 170 170 171 171 #define inb_p(port) inb(port) 172 + #define inw_p(port) inw(port) 173 + #define inl_p(port) inl(port) 172 174 #define outb_p(val, port) outb((val), (port)) 175 + #define outw_p(val, port) outw((val), (port)) 176 + #define outl_p(val, port) outl((val), (port)) 173 177 174 178 /* 175 179 * Convert a physical pointer to a virtual kernel pointer for /dev/mem
+1 -39
arch/hexagon/include/asm/fixmap.h
··· 26 26 */ 27 27 #include <asm/mem-layout.h> 28 28 29 - /* 30 - * Full fixmap support involves set_fixmap() functions, but 31 - * these may not be needed if all we're after is an area for 32 - * highmem kernel mappings. 33 - */ 34 - #define __fix_to_virt(x) (FIXADDR_TOP - ((x) << PAGE_SHIFT)) 35 - #define __virt_to_fix(x) ((FIXADDR_TOP - ((x)&PAGE_MASK)) >> PAGE_SHIFT) 36 - 37 - extern void __this_fixmap_does_not_exist(void); 38 - 39 - /** 40 - * fix_to_virt -- "index to address" translation. 41 - * 42 - * If anyone tries to use the idx directly without translation, 43 - * we catch the bug with a NULL-deference kernel oops. Illegal 44 - * ranges of incoming indices are caught too. 45 - */ 46 - static inline unsigned long fix_to_virt(const unsigned int idx) 47 - { 48 - /* 49 - * This branch gets completely eliminated after inlining, 50 - * except when someone tries to use fixaddr indices in an 51 - * illegal way. (such as mixing up address types or using 52 - * out-of-range indices). 53 - * 54 - * If it doesn't get removed, the linker will complain 55 - * loudly with a reasonably clear error message.. 56 - */ 57 - if (idx >= __end_of_fixed_addresses) 58 - __this_fixmap_does_not_exist(); 59 - 60 - return __fix_to_virt(idx); 61 - } 62 - 63 - static inline unsigned long virt_to_fix(const unsigned long vaddr) 64 - { 65 - BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START); 66 - return __virt_to_fix(vaddr); 67 - } 29 + #include <asm-generic/fixmap.h> 68 30 69 31 #define kmap_get_fixmap_pte(vaddr) \ 70 32 pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), \
+1
arch/ia64/Kconfig
··· 104 104 config DMI 105 105 bool 106 106 default y 107 + select DMI_SCAN_MACHINE_NON_EFI_FALLBACK 107 108 108 109 config EFI 109 110 bool
+5 -3
arch/ia64/include/asm/dmi.h
··· 5 5 #include <asm/io.h> 6 6 7 7 /* Use normal IO mappings for DMI */ 8 - #define dmi_ioremap ioremap 9 - #define dmi_iounmap(x,l) iounmap(x) 10 - #define dmi_alloc(l) kzalloc(l, GFP_ATOMIC) 8 + #define dmi_early_remap ioremap 9 + #define dmi_early_unmap(x, l) iounmap(x) 10 + #define dmi_remap ioremap 11 + #define dmi_unmap iounmap 12 + #define dmi_alloc(l) kzalloc(l, GFP_ATOMIC) 11 13 12 14 #endif
+1
arch/ia64/include/asm/processor.h
··· 71 71 #include <linux/compiler.h> 72 72 #include <linux/threads.h> 73 73 #include <linux/types.h> 74 + #include <linux/bitops.h> 74 75 75 76 #include <asm/fpu.h> 76 77 #include <asm/page.h>
+4 -4
arch/m32r/Kconfig
··· 277 277 bool "Symmetric multi-processing support" 278 278 ---help--- 279 279 This enables support for systems with more than one CPU. If you have 280 - a system with only one CPU, like most personal computers, say N. If 281 - you have a system with more than one CPU, say Y. 280 + a system with only one CPU, say N. If you have a system with more 281 + than one CPU, say Y. 282 282 283 - If you say N here, the kernel will run on single and multiprocessor 283 + If you say N here, the kernel will run on uni- and multiprocessor 284 284 machines, but will use only one CPU of a multiprocessor machine. If 285 285 you say Y here, the kernel will run on many, but not all, 286 - singleprocessor machines. On a singleprocessor machine, the kernel 286 + uniprocessor machines. On a uniprocessor machine, the kernel 287 287 will run faster if you say N here. 288 288 289 289 People using multiprocessor machines who say Y here should also say
+1 -31
arch/metag/include/asm/fixmap.h
··· 51 51 #define FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT) 52 52 #define FIXADDR_START ((FIXADDR_TOP - FIXADDR_SIZE) & PMD_MASK) 53 53 54 - #define __fix_to_virt(x) (FIXADDR_TOP - ((x) << PAGE_SHIFT)) 55 - #define __virt_to_fix(x) ((FIXADDR_TOP - ((x)&PAGE_MASK)) >> PAGE_SHIFT) 56 - 57 - extern void __this_fixmap_does_not_exist(void); 58 - /* 59 - * 'index to address' translation. If anyone tries to use the idx 60 - * directly without tranlation, we catch the bug with a NULL-deference 61 - * kernel oops. Illegal ranges of incoming indices are caught too. 62 - */ 63 - static inline unsigned long fix_to_virt(const unsigned int idx) 64 - { 65 - /* 66 - * this branch gets completely eliminated after inlining, 67 - * except when someone tries to use fixaddr indices in an 68 - * illegal way. (such as mixing up address types or using 69 - * out-of-range indices). 70 - * 71 - * If it doesn't get removed, the linker will complain 72 - * loudly with a reasonably clear error message.. 73 - */ 74 - if (idx >= __end_of_fixed_addresses) 75 - __this_fixmap_does_not_exist(); 76 - 77 - return __fix_to_virt(idx); 78 - } 79 - 80 - static inline unsigned long virt_to_fix(const unsigned long vaddr) 81 - { 82 - BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START); 83 - return __virt_to_fix(vaddr); 84 - } 54 + #include <asm-generic/fixmap.h> 85 55 86 56 #define kmap_get_fixmap_pte(vaddr) \ 87 57 pte_offset_kernel( \
+1
arch/microblaze/Kconfig
··· 30 30 select MODULES_USE_ELF_RELA 31 31 select CLONE_BACKWARDS3 32 32 select CLKSRC_OF 33 + select BUILDTIME_EXTABLE_SORT 33 34 34 35 config SWAP 35 36 def_bool n
+2 -42
arch/microblaze/include/asm/fixmap.h
··· 58 58 extern void __set_fixmap(enum fixed_addresses idx, 59 59 phys_addr_t phys, pgprot_t flags); 60 60 61 - #define set_fixmap(idx, phys) \ 62 - __set_fixmap(idx, phys, PAGE_KERNEL) 63 - /* 64 - * Some hardware wants to get fixmapped without caching. 65 - */ 66 - #define set_fixmap_nocache(idx, phys) \ 67 - __set_fixmap(idx, phys, PAGE_KERNEL_CI) 68 - 69 - #define clear_fixmap(idx) \ 70 - __set_fixmap(idx, 0, __pgprot(0)) 71 - 72 61 #define __FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT) 73 62 #define FIXADDR_START (FIXADDR_TOP - __FIXADDR_SIZE) 74 63 75 - #define __fix_to_virt(x) (FIXADDR_TOP - ((x) << PAGE_SHIFT)) 76 - #define __virt_to_fix(x) ((FIXADDR_TOP - ((x)&PAGE_MASK)) >> PAGE_SHIFT) 64 + #define FIXMAP_PAGE_NOCACHE PAGE_KERNEL_CI 77 65 78 - extern void __this_fixmap_does_not_exist(void); 79 - 80 - /* 81 - * 'index to address' translation. If anyone tries to use the idx 82 - * directly without tranlation, we catch the bug with a NULL-deference 83 - * kernel oops. Illegal ranges of incoming indices are caught too. 84 - */ 85 - static __always_inline unsigned long fix_to_virt(const unsigned int idx) 86 - { 87 - /* 88 - * this branch gets completely eliminated after inlining, 89 - * except when someone tries to use fixaddr indices in an 90 - * illegal way. (such as mixing up address types or using 91 - * out-of-range indices). 92 - * 93 - * If it doesn't get removed, the linker will complain 94 - * loudly with a reasonably clear error message.. 95 - */ 96 - if (idx >= __end_of_fixed_addresses) 97 - __this_fixmap_does_not_exist(); 98 - 99 - return __fix_to_virt(idx); 100 - } 101 - 102 - static inline unsigned long virt_to_fix(const unsigned long vaddr) 103 - { 104 - BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START); 105 - return __virt_to_fix(vaddr); 106 - } 66 + #include <asm-generic/fixmap.h> 107 67 108 68 #endif /* !__ASSEMBLY__ */ 109 69 #endif
+5 -5
arch/mips/Kconfig
··· 2129 2129 depends on SYS_SUPPORTS_SMP 2130 2130 help 2131 2131 This enables support for systems with more than one CPU. If you have 2132 - a system with only one CPU, like most personal computers, say N. If 2133 - you have a system with more than one CPU, say Y. 2132 + a system with only one CPU, say N. If you have a system with more 2133 + than one CPU, say Y. 2134 2134 2135 - If you say N here, the kernel will run on single and multiprocessor 2135 + If you say N here, the kernel will run on uni- and multiprocessor 2136 2136 machines, but will use only one CPU of a multiprocessor machine. If 2137 2137 you say Y here, the kernel will run on many, but not all, 2138 - singleprocessor machines. On a singleprocessor machine, the kernel 2138 + uniprocessor machines. On a uniprocessor machine, the kernel 2139 2139 will run faster if you say N here. 2140 2140 2141 2141 People using multiprocessor machines who say Y here should also say ··· 2430 2430 source "drivers/pci/hotplug/Kconfig" 2431 2431 2432 2432 config RAPIDIO 2433 - bool "RapidIO support" 2433 + tristate "RapidIO support" 2434 2434 depends on PCI 2435 2435 default n 2436 2436 help
+1 -32
arch/mips/include/asm/fixmap.h
··· 71 71 #define FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT) 72 72 #define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE) 73 73 74 - #define __fix_to_virt(x) (FIXADDR_TOP - ((x) << PAGE_SHIFT)) 75 - #define __virt_to_fix(x) ((FIXADDR_TOP - ((x)&PAGE_MASK)) >> PAGE_SHIFT) 76 - 77 - extern void __this_fixmap_does_not_exist(void); 78 - 79 - /* 80 - * 'index to address' translation. If anyone tries to use the idx 81 - * directly without tranlation, we catch the bug with a NULL-deference 82 - * kernel oops. Illegal ranges of incoming indices are caught too. 83 - */ 84 - static inline unsigned long fix_to_virt(const unsigned int idx) 85 - { 86 - /* 87 - * this branch gets completely eliminated after inlining, 88 - * except when someone tries to use fixaddr indices in an 89 - * illegal way. (such as mixing up address types or using 90 - * out-of-range indices). 91 - * 92 - * If it doesn't get removed, the linker will complain 93 - * loudly with a reasonably clear error message.. 94 - */ 95 - if (idx >= __end_of_fixed_addresses) 96 - __this_fixmap_does_not_exist(); 97 - 98 - return __fix_to_virt(idx); 99 - } 100 - 101 - static inline unsigned long virt_to_fix(const unsigned long vaddr) 102 - { 103 - BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START); 104 - return __virt_to_fix(vaddr); 105 - } 74 + #include <asm-generic/fixmap.h> 106 75 107 76 #define kmap_get_fixmap_pte(vaddr) \ 108 77 pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), (vaddr)), (vaddr)), (vaddr))
+4 -4
arch/mn10300/Kconfig
··· 184 184 depends on MN10300_PROC_MN2WS0038 || MN10300_PROC_MN2WS0050 185 185 ---help--- 186 186 This enables support for systems with more than one CPU. If you have 187 - a system with only one CPU, like most personal computers, say N. If 188 - you have a system with more than one CPU, say Y. 187 + a system with only one CPU, say N. If you have a system with more 188 + than one CPU, say Y. 189 189 190 - If you say N here, the kernel will run on single and multiprocessor 190 + If you say N here, the kernel will run on uni- and multiprocessor 191 191 machines, but will use only one CPU of a multiprocessor machine. If 192 192 you say Y here, the kernel will run on many, but not all, 193 - singleprocessor machines. On a singleprocessor machine, the kernel 193 + uniprocessor machines. On a uniprocessor machine, the kernel 194 194 will run faster if you say N here. 195 195 196 196 See also <file:Documentation/x86/i386/IO-APIC.txt>,
+4 -4
arch/parisc/Kconfig
··· 229 229 bool "Symmetric multi-processing support" 230 230 ---help--- 231 231 This enables support for systems with more than one CPU. If you have 232 - a system with only one CPU, like most personal computers, say N. If 233 - you have a system with more than one CPU, say Y. 232 + a system with only one CPU, say N. If you have a system with more 233 + than one CPU, say Y. 234 234 235 - If you say N here, the kernel will run on single and multiprocessor 235 + If you say N here, the kernel will run on uni- and multiprocessor 236 236 machines, but will use only one CPU of a multiprocessor machine. If 237 237 you say Y here, the kernel will run on many, but not all, 238 - singleprocessor machines. On a singleprocessor machine, the kernel 238 + uniprocessor machines. On a uniprocessor machine, the kernel 239 239 will run faster if you say N here. 240 240 241 241 See also <file:Documentation/nmi_watchdog.txt> and the SMP-HOWTO
+2 -2
arch/powerpc/Kconfig
··· 794 794 default n 795 795 796 796 config RAPIDIO 797 - bool "RapidIO support" 797 + tristate "RapidIO support" 798 798 depends on HAS_RAPIDIO || PCI 799 799 help 800 800 If you say Y here, the kernel will include drivers and ··· 802 802 803 803 config FSL_RIO 804 804 bool "Freescale Embedded SRIO Controller support" 805 - depends on RAPIDIO && HAS_RAPIDIO 805 + depends on RAPIDIO = y && HAS_RAPIDIO 806 806 default "n" 807 807 ---help--- 808 808 Include support for RapidIO controller on Freescale embedded
+2 -42
arch/powerpc/include/asm/fixmap.h
··· 58 58 extern void __set_fixmap (enum fixed_addresses idx, 59 59 phys_addr_t phys, pgprot_t flags); 60 60 61 - #define set_fixmap(idx, phys) \ 62 - __set_fixmap(idx, phys, PAGE_KERNEL) 63 - /* 64 - * Some hardware wants to get fixmapped without caching. 65 - */ 66 - #define set_fixmap_nocache(idx, phys) \ 67 - __set_fixmap(idx, phys, PAGE_KERNEL_NCG) 68 - 69 - #define clear_fixmap(idx) \ 70 - __set_fixmap(idx, 0, __pgprot(0)) 71 - 72 61 #define __FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT) 73 62 #define FIXADDR_START (FIXADDR_TOP - __FIXADDR_SIZE) 74 63 75 - #define __fix_to_virt(x) (FIXADDR_TOP - ((x) << PAGE_SHIFT)) 76 - #define __virt_to_fix(x) ((FIXADDR_TOP - ((x)&PAGE_MASK)) >> PAGE_SHIFT) 64 + #define FIXMAP_PAGE_NOCACHE PAGE_KERNEL_NCG 77 65 78 - extern void __this_fixmap_does_not_exist(void); 79 - 80 - /* 81 - * 'index to address' translation. If anyone tries to use the idx 82 - * directly without tranlation, we catch the bug with a NULL-deference 83 - * kernel oops. Illegal ranges of incoming indices are caught too. 84 - */ 85 - static __always_inline unsigned long fix_to_virt(const unsigned int idx) 86 - { 87 - /* 88 - * this branch gets completely eliminated after inlining, 89 - * except when someone tries to use fixaddr indices in an 90 - * illegal way. (such as mixing up address types or using 91 - * out-of-range indices). 92 - * 93 - * If it doesn't get removed, the linker will complain 94 - * loudly with a reasonably clear error message.. 95 - */ 96 - if (idx >= __end_of_fixed_addresses) 97 - __this_fixmap_does_not_exist(); 98 - 99 - return __fix_to_virt(idx); 100 - } 101 - 102 - static inline unsigned long virt_to_fix(const unsigned long vaddr) 103 - { 104 - BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START); 105 - return __virt_to_fix(vaddr); 106 - } 66 + #include <asm-generic/fixmap.h> 107 67 108 68 #endif /* !__ASSEMBLY__ */ 109 69 #endif
+2 -2
arch/s390/Kconfig
··· 334 334 a system with only one CPU, like most personal computers, say N. If 335 335 you have a system with more than one CPU, say Y. 336 336 337 - If you say N here, the kernel will run on single and multiprocessor 337 + If you say N here, the kernel will run on uni- and multiprocessor 338 338 machines, but will use only one CPU of a multiprocessor machine. If 339 339 you say Y here, the kernel will run on many, but not all, 340 - singleprocessor machines. On a singleprocessor machine, the kernel 340 + uniprocessor machines. On a uniprocessor machine, the kernel 341 341 will run faster if you say N here. 342 342 343 343 See also the SMP-HOWTO available at
+4 -4
arch/sh/Kconfig
··· 701 701 depends on SYS_SUPPORTS_SMP 702 702 ---help--- 703 703 This enables support for systems with more than one CPU. If you have 704 - a system with only one CPU, like most personal computers, say N. If 705 - you have a system with more than one CPU, say Y. 704 + a system with only one CPU, say N. If you have a system with more 705 + than one CPU, say Y. 706 706 707 - If you say N here, the kernel will run on single and multiprocessor 707 + If you say N here, the kernel will run on uni- and multiprocessor 708 708 machines, but will use only one CPU of a multiprocessor machine. If 709 709 you say Y here, the kernel will run on many, but not all, 710 - singleprocessor machines. On a singleprocessor machine, the kernel 710 + uniprocessor machines. On a uniprocessor machine, the kernel 711 711 will run faster if you say N here. 712 712 713 713 People using multiprocessor machines who say Y here should also say
+2 -37
arch/sh/include/asm/fixmap.h
··· 79 79 unsigned long phys, pgprot_t flags); 80 80 extern void __clear_fixmap(enum fixed_addresses idx, pgprot_t flags); 81 81 82 - #define set_fixmap(idx, phys) \ 83 - __set_fixmap(idx, phys, PAGE_KERNEL) 84 - /* 85 - * Some hardware wants to get fixmapped without caching. 86 - */ 87 - #define set_fixmap_nocache(idx, phys) \ 88 - __set_fixmap(idx, phys, PAGE_KERNEL_NOCACHE) 89 82 /* 90 83 * used by vmalloc.c. 91 84 * ··· 94 101 #define FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT) 95 102 #define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE) 96 103 97 - #define __fix_to_virt(x) (FIXADDR_TOP - ((x) << PAGE_SHIFT)) 98 - #define __virt_to_fix(x) ((FIXADDR_TOP - ((x)&PAGE_MASK)) >> PAGE_SHIFT) 104 + #define FIXMAP_PAGE_NOCACHE PAGE_KERNEL_NOCACHE 99 105 100 - extern void __this_fixmap_does_not_exist(void); 106 + #include <asm-generic/fixmap.h> 101 107 102 - /* 103 - * 'index to address' translation. If anyone tries to use the idx 104 - * directly without tranlation, we catch the bug with a NULL-deference 105 - * kernel oops. Illegal ranges of incoming indices are caught too. 106 - */ 107 - static inline unsigned long fix_to_virt(const unsigned int idx) 108 - { 109 - /* 110 - * this branch gets completely eliminated after inlining, 111 - * except when someone tries to use fixaddr indices in an 112 - * illegal way. (such as mixing up address types or using 113 - * out-of-range indices). 114 - * 115 - * If it doesn't get removed, the linker will complain 116 - * loudly with a reasonably clear error message.. 117 - */ 118 - if (idx >= __end_of_fixed_addresses) 119 - __this_fixmap_does_not_exist(); 120 - 121 - return __fix_to_virt(idx); 122 - } 123 - 124 - static inline unsigned long virt_to_fix(const unsigned long vaddr) 125 - { 126 - BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START); 127 - return __virt_to_fix(vaddr); 128 - } 129 108 #endif
+4 -14
arch/sh/kernel/dwarf.c
··· 995 995 996 996 static void dwarf_unwinder_cleanup(void) 997 997 { 998 - struct rb_node **fde_rb_node = &fde_root.rb_node; 999 - struct rb_node **cie_rb_node = &cie_root.rb_node; 998 + struct dwarf_fde *fde, *next_fde; 999 + struct dwarf_cie *cie, *next_cie; 1000 1000 1001 1001 /* 1002 1002 * Deallocate all the memory allocated for the DWARF unwinder. 1003 1003 * Traverse all the FDE/CIE lists and remove and free all the 1004 1004 * memory associated with those data structures. 1005 1005 */ 1006 - while (*fde_rb_node) { 1007 - struct dwarf_fde *fde; 1008 - 1009 - fde = rb_entry(*fde_rb_node, struct dwarf_fde, node); 1010 - rb_erase(*fde_rb_node, &fde_root); 1006 + rbtree_postorder_for_each_entry_safe(fde, next_fde, &fde_root, node) 1011 1007 kfree(fde); 1012 - } 1013 1008 1014 - while (*cie_rb_node) { 1015 - struct dwarf_cie *cie; 1016 - 1017 - cie = rb_entry(*cie_rb_node, struct dwarf_cie, node); 1018 - rb_erase(*cie_rb_node, &cie_root); 1009 + rbtree_postorder_for_each_entry_safe(cie, next_cie, &cie_root, node) 1019 1010 kfree(cie); 1020 - } 1021 1011 1022 1012 kmem_cache_destroy(dwarf_reg_cachep); 1023 1013 kmem_cache_destroy(dwarf_frame_cachep);
+2 -2
arch/sparc/Kconfig
··· 152 152 a system with only one CPU, say N. If you have a system with more 153 153 than one CPU, say Y. 154 154 155 - If you say N here, the kernel will run on single and multiprocessor 155 + If you say N here, the kernel will run on uni- and multiprocessor 156 156 machines, but will use only one CPU of a multiprocessor machine. If 157 157 you say Y here, the kernel will run on many, but not all, 158 - singleprocessor machines. On a singleprocessor machine, the kernel 158 + uniprocessor machines. On a uniprocessor machine, the kernel 159 159 will run faster if you say N here. 160 160 161 161 People using multiprocessor machines who say Y here should also say
+1 -32
arch/tile/include/asm/fixmap.h
··· 25 25 #include <asm/kmap_types.h> 26 26 #endif 27 27 28 - #define __fix_to_virt(x) (FIXADDR_TOP - ((x) << PAGE_SHIFT)) 29 - #define __virt_to_fix(x) ((FIXADDR_TOP - ((x)&PAGE_MASK)) >> PAGE_SHIFT) 30 - 31 28 /* 32 29 * Here we define all the compile-time 'special' virtual 33 30 * addresses. The point is to have a constant address at ··· 80 83 #define FIXADDR_START (FIXADDR_TOP + PAGE_SIZE - __FIXADDR_SIZE) 81 84 #define FIXADDR_BOOT_START (FIXADDR_TOP + PAGE_SIZE - __FIXADDR_BOOT_SIZE) 82 85 83 - extern void __this_fixmap_does_not_exist(void); 84 - 85 - /* 86 - * 'index to address' translation. If anyone tries to use the idx 87 - * directly without tranlation, we catch the bug with a NULL-deference 88 - * kernel oops. Illegal ranges of incoming indices are caught too. 89 - */ 90 - static __always_inline unsigned long fix_to_virt(const unsigned int idx) 91 - { 92 - /* 93 - * this branch gets completely eliminated after inlining, 94 - * except when someone tries to use fixaddr indices in an 95 - * illegal way. (such as mixing up address types or using 96 - * out-of-range indices). 97 - * 98 - * If it doesn't get removed, the linker will complain 99 - * loudly with a reasonably clear error message.. 100 - */ 101 - if (idx >= __end_of_fixed_addresses) 102 - __this_fixmap_does_not_exist(); 103 - 104 - return __fix_to_virt(idx); 105 - } 106 - 107 - static inline unsigned long virt_to_fix(const unsigned long vaddr) 108 - { 109 - BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START); 110 - return __virt_to_fix(vaddr); 111 - } 86 + #include <asm-generic/fixmap.h> 112 87 113 88 #endif /* !__ASSEMBLY__ */ 114 89
+1 -39
arch/um/include/asm/fixmap.h
··· 43 43 extern void __set_fixmap (enum fixed_addresses idx, 44 44 unsigned long phys, pgprot_t flags); 45 45 46 - #define set_fixmap(idx, phys) \ 47 - __set_fixmap(idx, phys, PAGE_KERNEL) 48 - /* 49 - * Some hardware wants to get fixmapped without caching. 50 - */ 51 - #define set_fixmap_nocache(idx, phys) \ 52 - __set_fixmap(idx, phys, PAGE_KERNEL_NOCACHE) 53 46 /* 54 47 * used by vmalloc.c. 55 48 * ··· 55 62 #define FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT) 56 63 #define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE) 57 64 58 - #define __fix_to_virt(x) (FIXADDR_TOP - ((x) << PAGE_SHIFT)) 59 - #define __virt_to_fix(x) ((FIXADDR_TOP - ((x)&PAGE_MASK)) >> PAGE_SHIFT) 60 - 61 - extern void __this_fixmap_does_not_exist(void); 62 - 63 - /* 64 - * 'index to address' translation. If anyone tries to use the idx 65 - * directly without tranlation, we catch the bug with a NULL-deference 66 - * kernel oops. Illegal ranges of incoming indices are caught too. 67 - */ 68 - static inline unsigned long fix_to_virt(const unsigned int idx) 69 - { 70 - /* 71 - * this branch gets completely eliminated after inlining, 72 - * except when someone tries to use fixaddr indices in an 73 - * illegal way. (such as mixing up address types or using 74 - * out-of-range indices). 75 - * 76 - * If it doesn't get removed, the linker will complain 77 - * loudly with a reasonably clear error message.. 78 - */ 79 - if (idx >= __end_of_fixed_addresses) 80 - __this_fixmap_does_not_exist(); 81 - 82 - return __fix_to_virt(idx); 83 - } 84 - 85 - static inline unsigned long virt_to_fix(const unsigned long vaddr) 86 - { 87 - BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START); 88 - return __virt_to_fix(vaddr); 89 - } 65 + #include <asm-generic/fixmap.h> 90 66 91 67 #endif
+5 -4
arch/x86/Kconfig
··· 279 279 bool "Symmetric multi-processing support" 280 280 ---help--- 281 281 This enables support for systems with more than one CPU. If you have 282 - a system with only one CPU, like most personal computers, say N. If 283 - you have a system with more than one CPU, say Y. 282 + a system with only one CPU, say N. If you have a system with more 283 + than one CPU, say Y. 284 284 285 - If you say N here, the kernel will run on single and multiprocessor 285 + If you say N here, the kernel will run on uni- and multiprocessor 286 286 machines, but will use only one CPU of a multiprocessor machine. If 287 287 you say Y here, the kernel will run on many, but not all, 288 - singleprocessor machines. On a singleprocessor machine, the kernel 288 + uniprocessor machines. On a uniprocessor machine, the kernel 289 289 will run faster if you say N here. 290 290 291 291 Note that if you say Y here and choose architecture "586" or ··· 731 731 # The code disables itself when not needed. 732 732 config DMI 733 733 default y 734 + select DMI_SCAN_MACHINE_NON_EFI_FALLBACK 734 735 bool "Enable DMI scanning" if EXPERT 735 736 ---help--- 736 737 Enabled scanning of DMI to identify machine quirks. Say Y
+4 -2
arch/x86/include/asm/dmi.h
··· 13 13 } 14 14 15 15 /* Use early IO mappings for DMI because it's initialized early */ 16 - #define dmi_ioremap early_ioremap 17 - #define dmi_iounmap early_iounmap 16 + #define dmi_early_remap early_ioremap 17 + #define dmi_early_unmap early_iounmap 18 + #define dmi_remap ioremap 19 + #define dmi_unmap iounmap 18 20 19 21 #endif /* _ASM_X86_DMI_H */
+1 -58
arch/x86/include/asm/fixmap.h
··· 175 175 } 176 176 #endif 177 177 178 - #define set_fixmap(idx, phys) \ 179 - __set_fixmap(idx, phys, PAGE_KERNEL) 180 - 181 - /* 182 - * Some hardware wants to get fixmapped without caching. 183 - */ 184 - #define set_fixmap_nocache(idx, phys) \ 185 - __set_fixmap(idx, phys, PAGE_KERNEL_NOCACHE) 186 - 187 - #define clear_fixmap(idx) \ 188 - __set_fixmap(idx, 0, __pgprot(0)) 189 - 190 - #define __fix_to_virt(x) (FIXADDR_TOP - ((x) << PAGE_SHIFT)) 191 - #define __virt_to_fix(x) ((FIXADDR_TOP - ((x)&PAGE_MASK)) >> PAGE_SHIFT) 192 - 193 - extern void __this_fixmap_does_not_exist(void); 194 - 195 - /* 196 - * 'index to address' translation. If anyone tries to use the idx 197 - * directly without translation, we catch the bug with a NULL-deference 198 - * kernel oops. Illegal ranges of incoming indices are caught too. 199 - */ 200 - static __always_inline unsigned long fix_to_virt(const unsigned int idx) 201 - { 202 - /* 203 - * this branch gets completely eliminated after inlining, 204 - * except when someone tries to use fixaddr indices in an 205 - * illegal way. (such as mixing up address types or using 206 - * out-of-range indices). 207 - * 208 - * If it doesn't get removed, the linker will complain 209 - * loudly with a reasonably clear error message.. 210 - */ 211 - if (idx >= __end_of_fixed_addresses) 212 - __this_fixmap_does_not_exist(); 213 - 214 - return __fix_to_virt(idx); 215 - } 216 - 217 - static inline unsigned long virt_to_fix(const unsigned long vaddr) 218 - { 219 - BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START); 220 - return __virt_to_fix(vaddr); 221 - } 222 - 223 - /* Return an pointer with offset calculated */ 224 - static __always_inline unsigned long 225 - __set_fixmap_offset(enum fixed_addresses idx, phys_addr_t phys, pgprot_t flags) 226 - { 227 - __set_fixmap(idx, phys, flags); 228 - return fix_to_virt(idx) + (phys & (PAGE_SIZE - 1)); 229 - } 230 - 231 - #define set_fixmap_offset(idx, phys) \ 232 - __set_fixmap_offset(idx, phys, PAGE_KERNEL) 233 - 234 - #define set_fixmap_offset_nocache(idx, phys) \ 235 - __set_fixmap_offset(idx, phys, PAGE_KERNEL_NOCACHE) 178 + #include <asm-generic/fixmap.h> 236 179 237 180 #endif /* !__ASSEMBLY__ */ 238 181 #endif /* _ASM_X86_FIXMAP_H */
+4 -4
arch/x86/mm/gup.c
··· 108 108 109 109 static inline void get_head_page_multiple(struct page *page, int nr) 110 110 { 111 - VM_BUG_ON(page != compound_head(page)); 112 - VM_BUG_ON(page_count(page) == 0); 111 + VM_BUG_ON_PAGE(page != compound_head(page), page); 112 + VM_BUG_ON_PAGE(page_count(page) == 0, page); 113 113 atomic_add(nr, &page->_count); 114 114 SetPageReferenced(page); 115 115 } ··· 135 135 head = pte_page(pte); 136 136 page = head + ((addr & ~PMD_MASK) >> PAGE_SHIFT); 137 137 do { 138 - VM_BUG_ON(compound_head(page) != head); 138 + VM_BUG_ON_PAGE(compound_head(page) != head, page); 139 139 pages[*nr] = page; 140 140 if (PageTail(page)) 141 141 get_huge_page_tail(page); ··· 212 212 head = pte_page(pte); 213 213 page = head + ((addr & ~PUD_MASK) >> PAGE_SHIFT); 214 214 do { 215 - VM_BUG_ON(compound_head(page) != head); 215 + VM_BUG_ON_PAGE(compound_head(page) != head, page); 216 216 pages[*nr] = page; 217 217 if (PageTail(page)) 218 218 get_huge_page_tail(page);
+2 -1
drivers/block/Kconfig
··· 368 368 For details, read <file:Documentation/blockdev/ramdisk.txt>. 369 369 370 370 To compile this driver as a module, choose M here: the 371 - module will be called rd. 371 + module will be called brd. An alias "rd" has been defined 372 + for historical reasons. 372 373 373 374 Most normal users won't need the RAM disk functionality, and can 374 375 thus say N here.
+3
drivers/firmware/Kconfig
··· 108 108 under /sys/firmware/dmi when this option is enabled and 109 109 loaded. 110 110 111 + config DMI_SCAN_MACHINE_NON_EFI_FALLBACK 112 + bool 113 + 111 114 config ISCSI_IBFT_FIND 112 115 bool "iSCSI Boot Firmware Table Attributes" 113 116 depends on X86
+10 -10
drivers/firmware/dmi_scan.c
··· 116 116 { 117 117 u8 *buf; 118 118 119 - buf = dmi_ioremap(dmi_base, dmi_len); 119 + buf = dmi_early_remap(dmi_base, dmi_len); 120 120 if (buf == NULL) 121 121 return -1; 122 122 ··· 124 124 125 125 add_device_randomness(buf, dmi_len); 126 126 127 - dmi_iounmap(buf, dmi_len); 127 + dmi_early_unmap(buf, dmi_len); 128 128 return 0; 129 129 } 130 130 ··· 527 527 * needed during early boot. This also means we can 528 528 * iounmap the space when we're done with it. 529 529 */ 530 - p = dmi_ioremap(efi.smbios, 32); 530 + p = dmi_early_remap(efi.smbios, 32); 531 531 if (p == NULL) 532 532 goto error; 533 533 memcpy_fromio(buf, p, 32); 534 - dmi_iounmap(p, 32); 534 + dmi_early_unmap(p, 32); 535 535 536 536 if (!dmi_present(buf)) { 537 537 dmi_available = 1; 538 538 goto out; 539 539 } 540 - } else { 541 - p = dmi_ioremap(0xF0000, 0x10000); 540 + } else if (IS_ENABLED(CONFIG_DMI_SCAN_MACHINE_NON_EFI_FALLBACK)) { 541 + p = dmi_early_remap(0xF0000, 0x10000); 542 542 if (p == NULL) 543 543 goto error; 544 544 ··· 554 554 memcpy_fromio(buf + 16, q, 16); 555 555 if (!dmi_present(buf)) { 556 556 dmi_available = 1; 557 - dmi_iounmap(p, 0x10000); 557 + dmi_early_unmap(p, 0x10000); 558 558 goto out; 559 559 } 560 560 memcpy(buf, buf + 16, 16); 561 561 } 562 - dmi_iounmap(p, 0x10000); 562 + dmi_early_unmap(p, 0x10000); 563 563 } 564 564 error: 565 565 pr_info("DMI not present or invalid.\n"); ··· 831 831 if (!dmi_available) 832 832 return -1; 833 833 834 - buf = ioremap(dmi_base, dmi_len); 834 + buf = dmi_remap(dmi_base, dmi_len); 835 835 if (buf == NULL) 836 836 return -1; 837 837 838 838 dmi_table(buf, dmi_len, dmi_num, decode, private_data); 839 839 840 - iounmap(buf); 840 + dmi_unmap(buf); 841 841 return 0; 842 842 } 843 843 EXPORT_SYMBOL_GPL(dmi_walk);
+2 -2
drivers/gpu/drm/gma500/backlight.c
··· 26 26 #include "intel_bios.h" 27 27 #include "power.h" 28 28 29 + #ifdef CONFIG_BACKLIGHT_CLASS_DEVICE 29 30 static void do_gma_backlight_set(struct drm_device *dev) 30 31 { 31 - #ifdef CONFIG_BACKLIGHT_CLASS_DEVICE 32 32 struct drm_psb_private *dev_priv = dev->dev_private; 33 33 backlight_update_status(dev_priv->backlight_device); 34 - #endif 35 34 } 35 + #endif 36 36 37 37 void gma_backlight_enable(struct drm_device *dev) 38 38 {
+1 -1
drivers/mailbox/omap-mbox.h
··· 52 52 53 53 struct omap_mbox { 54 54 const char *name; 55 - unsigned int irq; 55 + int irq; 56 56 struct omap_mbox_queue *txq, *rxq; 57 57 struct omap_mbox_ops *ops; 58 58 struct device *dev;
+20 -10
drivers/memstick/host/rtsx_pci_ms.c
··· 145 145 unsigned int length = sg->length; 146 146 u16 sec_cnt = (u16)(length / 512); 147 147 u8 val, trans_mode, dma_dir; 148 + struct memstick_dev *card = host->msh->card; 149 + bool pro_card = card->id.type == MEMSTICK_TYPE_PRO; 148 150 149 151 dev_dbg(ms_dev(host), "%s: tpc = 0x%02x, data_dir = %s, length = %d\n", 150 152 __func__, tpc, (data_dir == READ) ? "READ" : "WRITE", ··· 154 152 155 153 if (data_dir == READ) { 156 154 dma_dir = DMA_DIR_FROM_CARD; 157 - trans_mode = MS_TM_AUTO_READ; 155 + trans_mode = pro_card ? MS_TM_AUTO_READ : MS_TM_NORMAL_READ; 158 156 } else { 159 157 dma_dir = DMA_DIR_TO_CARD; 160 - trans_mode = MS_TM_AUTO_WRITE; 158 + trans_mode = pro_card ? MS_TM_AUTO_WRITE : MS_TM_NORMAL_WRITE; 161 159 } 162 160 163 161 rtsx_pci_init_cmd(pcr); 164 162 165 163 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, MS_TPC, 0xFF, tpc); 166 - rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, MS_SECTOR_CNT_H, 167 - 0xFF, (u8)(sec_cnt >> 8)); 168 - rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, MS_SECTOR_CNT_L, 169 - 0xFF, (u8)sec_cnt); 164 + if (pro_card) { 165 + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, MS_SECTOR_CNT_H, 166 + 0xFF, (u8)(sec_cnt >> 8)); 167 + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, MS_SECTOR_CNT_L, 168 + 0xFF, (u8)sec_cnt); 169 + } 170 170 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, MS_TRANS_CFG, 0xFF, cfg); 171 171 172 172 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, IRQSTAT0, ··· 196 192 } 197 193 198 194 rtsx_pci_read_register(pcr, MS_TRANS_CFG, &val); 199 - if (val & (MS_INT_CMDNK | MS_INT_ERR | MS_CRC16_ERR | MS_RDY_TIMEOUT)) 200 - return -EIO; 195 + if (pro_card) { 196 + if (val & (MS_INT_CMDNK | MS_INT_ERR | 197 + MS_CRC16_ERR | MS_RDY_TIMEOUT)) 198 + return -EIO; 199 + } else { 200 + if (val & (MS_CRC16_ERR | MS_RDY_TIMEOUT)) 201 + return -EIO; 202 + } 201 203 202 204 return 0; 203 205 } ··· 472 462 clock = 19000000; 473 463 ssc_depth = RTSX_SSC_DEPTH_500K; 474 464 475 - err = rtsx_pci_write_register(pcr, MS_CFG, 476 - 0x18, MS_BUS_WIDTH_1); 465 + err = rtsx_pci_write_register(pcr, MS_CFG, 0x58, 466 + MS_BUS_WIDTH_1 | PUSH_TIME_DEFAULT); 477 467 if (err < 0) 478 468 return err; 479 469 } else if (value == MEMSTICK_PAR4) {
+1 -1
drivers/mfd/max8998.c
··· 175 175 if (IS_ENABLED(CONFIG_OF) && i2c->dev.of_node) { 176 176 const struct of_device_id *match; 177 177 match = of_match_node(max8998_dt_match, i2c->dev.of_node); 178 - return (int)match->data; 178 + return (int)(long)match->data; 179 179 } 180 180 181 181 return (int)id->driver_data;
+1 -1
drivers/mfd/tps65217.c
··· 170 170 "Failed to find matching dt id\n"); 171 171 return -EINVAL; 172 172 } 173 - chip_id = (unsigned int)match->data; 173 + chip_id = (unsigned int)(unsigned long)match->data; 174 174 status_off = of_property_read_bool(client->dev.of_node, 175 175 "ti,pmic-shutdown-controller"); 176 176 }
+12 -1
drivers/rtc/Kconfig
··· 212 212 This driver can also be built as a module. If so, the module 213 213 will be called rtc-ds3232. 214 214 215 + config RTC_DRV_HYM8563 216 + tristate "Haoyu Microelectronics HYM8563" 217 + depends on I2C && OF 218 + help 219 + Say Y to enable support for the HYM8563 I2C RTC chip. Apart 220 + from the usual rtc functions it provides a clock output of 221 + up to 32kHz. 222 + 223 + This driver can also be built as a module. If so, the module 224 + will be called rtc-hym8563. 225 + 215 226 config RTC_DRV_LP8788 216 227 tristate "TI LP8788 RTC driver" 217 228 depends on MFD_LP8788 ··· 648 637 649 638 config RTC_DRV_CMOS 650 639 tristate "PC-style 'CMOS'" 651 - depends on X86 || ARM || M32R || ATARI || PPC || MIPS || SPARC64 640 + depends on X86 || ARM || M32R || PPC || MIPS || SPARC64 652 641 default y if X86 653 642 help 654 643 Say "yes" here to get direct support for the real time clock
+1
drivers/rtc/Makefile
··· 55 55 obj-$(CONFIG_RTC_DRV_FM3130) += rtc-fm3130.o 56 56 obj-$(CONFIG_RTC_DRV_GENERIC) += rtc-generic.o 57 57 obj-$(CONFIG_RTC_DRV_HID_SENSOR_TIME) += rtc-hid-sensor-time.o 58 + obj-$(CONFIG_RTC_DRV_HYM8563) += rtc-hym8563.o 58 59 obj-$(CONFIG_RTC_DRV_IMXDI) += rtc-imxdi.o 59 60 obj-$(CONFIG_RTC_DRV_ISL1208) += rtc-isl1208.o 60 61 obj-$(CONFIG_RTC_DRV_ISL12022) += rtc-isl12022.o
+20 -4
drivers/rtc/class.c
··· 14 14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 15 15 16 16 #include <linux/module.h> 17 + #include <linux/of.h> 17 18 #include <linux/rtc.h> 18 19 #include <linux/kdev_t.h> 19 20 #include <linux/idr.h> ··· 158 157 { 159 158 struct rtc_device *rtc; 160 159 struct rtc_wkalrm alrm; 161 - int id, err; 160 + int of_id = -1, id = -1, err; 162 161 163 - id = ida_simple_get(&rtc_ida, 0, 0, GFP_KERNEL); 162 + if (dev->of_node) 163 + of_id = of_alias_get_id(dev->of_node, "rtc"); 164 + else if (dev->parent && dev->parent->of_node) 165 + of_id = of_alias_get_id(dev->parent->of_node, "rtc"); 166 + 167 + if (of_id >= 0) { 168 + id = ida_simple_get(&rtc_ida, of_id, of_id + 1, 169 + GFP_KERNEL); 170 + if (id < 0) 171 + dev_warn(dev, "/aliases ID %d not available\n", 172 + of_id); 173 + } 174 + 164 175 if (id < 0) { 165 - err = id; 166 - goto exit; 176 + id = ida_simple_get(&rtc_ida, 0, 0, GFP_KERNEL); 177 + if (id < 0) { 178 + err = id; 179 + goto exit; 180 + } 167 181 } 168 182 169 183 rtc = kzalloc(sizeof(struct rtc_device), GFP_KERNEL);
+3 -16
drivers/rtc/rtc-as3722.c
··· 198 198 199 199 device_init_wakeup(&pdev->dev, 1); 200 200 201 - as3722_rtc->rtc = rtc_device_register("as3722", &pdev->dev, 201 + as3722_rtc->rtc = devm_rtc_device_register(&pdev->dev, "as3722-rtc", 202 202 &as3722_rtc_ops, THIS_MODULE); 203 203 if (IS_ERR(as3722_rtc->rtc)) { 204 204 ret = PTR_ERR(as3722_rtc->rtc); ··· 209 209 as3722_rtc->alarm_irq = platform_get_irq(pdev, 0); 210 210 dev_info(&pdev->dev, "RTC interrupt %d\n", as3722_rtc->alarm_irq); 211 211 212 - ret = request_threaded_irq(as3722_rtc->alarm_irq, NULL, 212 + ret = devm_request_threaded_irq(&pdev->dev, as3722_rtc->alarm_irq, NULL, 213 213 as3722_alarm_irq, IRQF_ONESHOT | IRQF_EARLY_RESUME, 214 214 "rtc-alarm", as3722_rtc); 215 215 if (ret < 0) { 216 216 dev_err(&pdev->dev, "Failed to request alarm IRQ %d: %d\n", 217 217 as3722_rtc->alarm_irq, ret); 218 - goto scrub; 218 + return ret; 219 219 } 220 220 disable_irq(as3722_rtc->alarm_irq); 221 - return 0; 222 - scrub: 223 - rtc_device_unregister(as3722_rtc->rtc); 224 - return ret; 225 - } 226 - 227 - static int as3722_rtc_remove(struct platform_device *pdev) 228 - { 229 - struct as3722_rtc *as3722_rtc = platform_get_drvdata(pdev); 230 - 231 - free_irq(as3722_rtc->alarm_irq, as3722_rtc); 232 - rtc_device_unregister(as3722_rtc->rtc); 233 221 return 0; 234 222 } 235 223 ··· 248 260 249 261 static struct platform_driver as3722_rtc_driver = { 250 262 .probe = as3722_rtc_probe, 251 - .remove = as3722_rtc_remove, 252 263 .driver = { 253 264 .name = "as3722-rtc", 254 265 .pm = &as3722_rtc_pm_ops,
+3 -5
drivers/rtc/rtc-cmos.c
··· 756 756 irq_handler_t rtc_cmos_int_handler; 757 757 758 758 if (is_hpet_enabled()) { 759 - int err; 760 - 761 759 rtc_cmos_int_handler = hpet_rtc_interrupt; 762 - err = hpet_register_irq_handler(cmos_interrupt); 763 - if (err != 0) { 760 + retval = hpet_register_irq_handler(cmos_interrupt); 761 + if (retval) { 764 762 dev_warn(dev, "hpet_register_irq_handler " 765 763 " failed in rtc_init()."); 766 764 goto cleanup1; ··· 1173 1175 .remove = __exit_p(cmos_platform_remove), 1174 1176 .shutdown = cmos_platform_shutdown, 1175 1177 .driver = { 1176 - .name = (char *) driver_name, 1178 + .name = driver_name, 1177 1179 #ifdef CONFIG_PM 1178 1180 .pm = &cmos_pm_ops, 1179 1181 #endif
-1
drivers/rtc/rtc-ds1305.c
··· 787 787 cancel_work_sync(&ds1305->work); 788 788 } 789 789 790 - spi_set_drvdata(spi, NULL); 791 790 return 0; 792 791 } 793 792
+9 -1
drivers/rtc/rtc-ds1742.c
··· 13 13 */ 14 14 15 15 #include <linux/bcd.h> 16 - #include <linux/init.h> 17 16 #include <linux/kernel.h> 18 17 #include <linux/gfp.h> 19 18 #include <linux/delay.h> 20 19 #include <linux/jiffies.h> 21 20 #include <linux/rtc.h> 21 + #include <linux/of.h> 22 + #include <linux/of_device.h> 22 23 #include <linux/platform_device.h> 23 24 #include <linux/io.h> 24 25 #include <linux/module.h> ··· 216 215 return 0; 217 216 } 218 217 218 + static struct of_device_id __maybe_unused ds1742_rtc_of_match[] = { 219 + { .compatible = "maxim,ds1742", }, 220 + { } 221 + }; 222 + MODULE_DEVICE_TABLE(of, ds1742_rtc_of_match); 223 + 219 224 static struct platform_driver ds1742_rtc_driver = { 220 225 .probe = ds1742_rtc_probe, 221 226 .remove = ds1742_rtc_remove, 222 227 .driver = { 223 228 .name = "rtc-ds1742", 224 229 .owner = THIS_MODULE, 230 + .of_match_table = ds1742_rtc_of_match, 225 231 }, 226 232 }; 227 233
+606
drivers/rtc/rtc-hym8563.c
··· 1 + /* 2 + * Haoyu HYM8563 RTC driver 3 + * 4 + * Copyright (C) 2013 MundoReader S.L. 5 + * Author: Heiko Stuebner <heiko@sntech.de> 6 + * 7 + * based on rtc-HYM8563 8 + * Copyright (C) 2010 ROCKCHIP, Inc. 9 + * 10 + * This software is licensed under the terms of the GNU General Public 11 + * License version 2, as published by the Free Software Foundation, and 12 + * may be copied, distributed, and modified under those terms. 13 + * 14 + * This program is distributed in the hope that it will be useful, 15 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 16 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 17 + * GNU General Public License for more details. 18 + */ 19 + 20 + #include <linux/module.h> 21 + #include <linux/clk-provider.h> 22 + #include <linux/i2c.h> 23 + #include <linux/bcd.h> 24 + #include <linux/rtc.h> 25 + 26 + #define HYM8563_CTL1 0x00 27 + #define HYM8563_CTL1_TEST BIT(7) 28 + #define HYM8563_CTL1_STOP BIT(5) 29 + #define HYM8563_CTL1_TESTC BIT(3) 30 + 31 + #define HYM8563_CTL2 0x01 32 + #define HYM8563_CTL2_TI_TP BIT(4) 33 + #define HYM8563_CTL2_AF BIT(3) 34 + #define HYM8563_CTL2_TF BIT(2) 35 + #define HYM8563_CTL2_AIE BIT(1) 36 + #define HYM8563_CTL2_TIE BIT(0) 37 + 38 + #define HYM8563_SEC 0x02 39 + #define HYM8563_SEC_VL BIT(7) 40 + #define HYM8563_SEC_MASK 0x7f 41 + 42 + #define HYM8563_MIN 0x03 43 + #define HYM8563_MIN_MASK 0x7f 44 + 45 + #define HYM8563_HOUR 0x04 46 + #define HYM8563_HOUR_MASK 0x3f 47 + 48 + #define HYM8563_DAY 0x05 49 + #define HYM8563_DAY_MASK 0x3f 50 + 51 + #define HYM8563_WEEKDAY 0x06 52 + #define HYM8563_WEEKDAY_MASK 0x07 53 + 54 + #define HYM8563_MONTH 0x07 55 + #define HYM8563_MONTH_CENTURY BIT(7) 56 + #define HYM8563_MONTH_MASK 0x1f 57 + 58 + #define HYM8563_YEAR 0x08 59 + 60 + #define HYM8563_ALM_MIN 0x09 61 + #define HYM8563_ALM_HOUR 0x0a 62 + #define HYM8563_ALM_DAY 0x0b 63 + #define HYM8563_ALM_WEEK 0x0c 64 + 65 + /* Each alarm check can be disabled by setting this bit in the register */ 66 + #define HYM8563_ALM_BIT_DISABLE BIT(7) 67 + 68 + #define HYM8563_CLKOUT 0x0d 69 + #define HYM8563_CLKOUT_DISABLE BIT(7) 70 + #define HYM8563_CLKOUT_32768 0 71 + #define HYM8563_CLKOUT_1024 1 72 + #define HYM8563_CLKOUT_32 2 73 + #define HYM8563_CLKOUT_1 3 74 + #define HYM8563_CLKOUT_MASK 3 75 + 76 + #define HYM8563_TMR_CTL 0x0e 77 + #define HYM8563_TMR_CTL_ENABLE BIT(7) 78 + #define HYM8563_TMR_CTL_4096 0 79 + #define HYM8563_TMR_CTL_64 1 80 + #define HYM8563_TMR_CTL_1 2 81 + #define HYM8563_TMR_CTL_1_60 3 82 + #define HYM8563_TMR_CTL_MASK 3 83 + 84 + #define HYM8563_TMR_CNT 0x0f 85 + 86 + struct hym8563 { 87 + struct i2c_client *client; 88 + struct rtc_device *rtc; 89 + bool valid; 90 + #ifdef CONFIG_COMMON_CLK 91 + struct clk_hw clkout_hw; 92 + #endif 93 + }; 94 + 95 + /* 96 + * RTC handling 97 + */ 98 + 99 + static int hym8563_rtc_read_time(struct device *dev, struct rtc_time *tm) 100 + { 101 + struct i2c_client *client = to_i2c_client(dev); 102 + struct hym8563 *hym8563 = i2c_get_clientdata(client); 103 + u8 buf[7]; 104 + int ret; 105 + 106 + if (!hym8563->valid) { 107 + dev_warn(&client->dev, "no valid clock/calendar values available\n"); 108 + return -EPERM; 109 + } 110 + 111 + ret = i2c_smbus_read_i2c_block_data(client, HYM8563_SEC, 7, buf); 112 + 113 + tm->tm_sec = bcd2bin(buf[0] & HYM8563_SEC_MASK); 114 + tm->tm_min = bcd2bin(buf[1] & HYM8563_MIN_MASK); 115 + tm->tm_hour = bcd2bin(buf[2] & HYM8563_HOUR_MASK); 116 + tm->tm_mday = bcd2bin(buf[3] & HYM8563_DAY_MASK); 117 + tm->tm_wday = bcd2bin(buf[4] & HYM8563_WEEKDAY_MASK); /* 0 = Sun */ 118 + tm->tm_mon = bcd2bin(buf[5] & HYM8563_MONTH_MASK) - 1; /* 0 = Jan */ 119 + tm->tm_year = bcd2bin(buf[6]) + 100; 120 + 121 + return 0; 122 + } 123 + 124 + static int hym8563_rtc_set_time(struct device *dev, struct rtc_time *tm) 125 + { 126 + struct i2c_client *client = to_i2c_client(dev); 127 + struct hym8563 *hym8563 = i2c_get_clientdata(client); 128 + u8 buf[7]; 129 + int ret; 130 + 131 + /* Years >= 2100 are to far in the future, 19XX is to early */ 132 + if (tm->tm_year < 100 || tm->tm_year >= 200) 133 + return -EINVAL; 134 + 135 + buf[0] = bin2bcd(tm->tm_sec); 136 + buf[1] = bin2bcd(tm->tm_min); 137 + buf[2] = bin2bcd(tm->tm_hour); 138 + buf[3] = bin2bcd(tm->tm_mday); 139 + buf[4] = bin2bcd(tm->tm_wday); 140 + buf[5] = bin2bcd(tm->tm_mon + 1); 141 + 142 + /* 143 + * While the HYM8563 has a century flag in the month register, 144 + * it does not seem to carry it over a subsequent write/read. 145 + * So we'll limit ourself to 100 years, starting at 2000 for now. 146 + */ 147 + buf[6] = tm->tm_year - 100; 148 + 149 + /* 150 + * CTL1 only contains TEST-mode bits apart from stop, 151 + * so no need to read the value first 152 + */ 153 + ret = i2c_smbus_write_byte_data(client, HYM8563_CTL1, 154 + HYM8563_CTL1_STOP); 155 + if (ret < 0) 156 + return ret; 157 + 158 + ret = i2c_smbus_write_i2c_block_data(client, HYM8563_SEC, 7, buf); 159 + if (ret < 0) 160 + return ret; 161 + 162 + ret = i2c_smbus_write_byte_data(client, HYM8563_CTL1, 0); 163 + if (ret < 0) 164 + return ret; 165 + 166 + hym8563->valid = true; 167 + 168 + return 0; 169 + } 170 + 171 + static int hym8563_rtc_alarm_irq_enable(struct device *dev, 172 + unsigned int enabled) 173 + { 174 + struct i2c_client *client = to_i2c_client(dev); 175 + int data; 176 + 177 + data = i2c_smbus_read_byte_data(client, HYM8563_CTL2); 178 + if (data < 0) 179 + return data; 180 + 181 + if (enabled) 182 + data |= HYM8563_CTL2_AIE; 183 + else 184 + data &= ~HYM8563_CTL2_AIE; 185 + 186 + return i2c_smbus_write_byte_data(client, HYM8563_CTL2, data); 187 + }; 188 + 189 + static int hym8563_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alm) 190 + { 191 + struct i2c_client *client = to_i2c_client(dev); 192 + struct rtc_time *alm_tm = &alm->time; 193 + u8 buf[4]; 194 + int ret; 195 + 196 + ret = i2c_smbus_read_i2c_block_data(client, HYM8563_ALM_MIN, 4, buf); 197 + if (ret < 0) 198 + return ret; 199 + 200 + /* The alarm only has a minute accuracy */ 201 + alm_tm->tm_sec = -1; 202 + 203 + alm_tm->tm_min = (buf[0] & HYM8563_ALM_BIT_DISABLE) ? 204 + -1 : 205 + bcd2bin(buf[0] & HYM8563_MIN_MASK); 206 + alm_tm->tm_hour = (buf[1] & HYM8563_ALM_BIT_DISABLE) ? 207 + -1 : 208 + bcd2bin(buf[1] & HYM8563_HOUR_MASK); 209 + alm_tm->tm_mday = (buf[2] & HYM8563_ALM_BIT_DISABLE) ? 210 + -1 : 211 + bcd2bin(buf[2] & HYM8563_DAY_MASK); 212 + alm_tm->tm_wday = (buf[3] & HYM8563_ALM_BIT_DISABLE) ? 213 + -1 : 214 + bcd2bin(buf[3] & HYM8563_WEEKDAY_MASK); 215 + 216 + alm_tm->tm_mon = -1; 217 + alm_tm->tm_year = -1; 218 + 219 + ret = i2c_smbus_read_byte_data(client, HYM8563_CTL2); 220 + if (ret < 0) 221 + return ret; 222 + 223 + if (ret & HYM8563_CTL2_AIE) 224 + alm->enabled = 1; 225 + 226 + return 0; 227 + } 228 + 229 + static int hym8563_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alm) 230 + { 231 + struct i2c_client *client = to_i2c_client(dev); 232 + struct rtc_time *alm_tm = &alm->time; 233 + u8 buf[4]; 234 + int ret; 235 + 236 + /* 237 + * The alarm has no seconds so deal with it 238 + */ 239 + if (alm_tm->tm_sec) { 240 + alm_tm->tm_sec = 0; 241 + alm_tm->tm_min++; 242 + if (alm_tm->tm_min >= 60) { 243 + alm_tm->tm_min = 0; 244 + alm_tm->tm_hour++; 245 + if (alm_tm->tm_hour >= 24) { 246 + alm_tm->tm_hour = 0; 247 + alm_tm->tm_mday++; 248 + if (alm_tm->tm_mday > 31) 249 + alm_tm->tm_mday = 0; 250 + } 251 + } 252 + } 253 + 254 + ret = i2c_smbus_read_byte_data(client, HYM8563_CTL2); 255 + if (ret < 0) 256 + return ret; 257 + 258 + ret &= ~HYM8563_CTL2_AIE; 259 + 260 + ret = i2c_smbus_write_byte_data(client, HYM8563_CTL2, ret); 261 + if (ret < 0) 262 + return ret; 263 + 264 + buf[0] = (alm_tm->tm_min < 60 && alm_tm->tm_min >= 0) ? 265 + bin2bcd(alm_tm->tm_min) : HYM8563_ALM_BIT_DISABLE; 266 + 267 + buf[1] = (alm_tm->tm_hour < 24 && alm_tm->tm_hour >= 0) ? 268 + bin2bcd(alm_tm->tm_hour) : HYM8563_ALM_BIT_DISABLE; 269 + 270 + buf[2] = (alm_tm->tm_mday <= 31 && alm_tm->tm_mday >= 1) ? 271 + bin2bcd(alm_tm->tm_mday) : HYM8563_ALM_BIT_DISABLE; 272 + 273 + buf[3] = (alm_tm->tm_wday < 7 && alm_tm->tm_wday >= 0) ? 274 + bin2bcd(alm_tm->tm_wday) : HYM8563_ALM_BIT_DISABLE; 275 + 276 + ret = i2c_smbus_write_i2c_block_data(client, HYM8563_ALM_MIN, 4, buf); 277 + if (ret < 0) 278 + return ret; 279 + 280 + return hym8563_rtc_alarm_irq_enable(dev, alm->enabled); 281 + } 282 + 283 + static const struct rtc_class_ops hym8563_rtc_ops = { 284 + .read_time = hym8563_rtc_read_time, 285 + .set_time = hym8563_rtc_set_time, 286 + .alarm_irq_enable = hym8563_rtc_alarm_irq_enable, 287 + .read_alarm = hym8563_rtc_read_alarm, 288 + .set_alarm = hym8563_rtc_set_alarm, 289 + }; 290 + 291 + /* 292 + * Handling of the clkout 293 + */ 294 + 295 + #ifdef CONFIG_COMMON_CLK 296 + #define clkout_hw_to_hym8563(_hw) container_of(_hw, struct hym8563, clkout_hw) 297 + 298 + static int clkout_rates[] = { 299 + 32768, 300 + 1024, 301 + 32, 302 + 1, 303 + }; 304 + 305 + static unsigned long hym8563_clkout_recalc_rate(struct clk_hw *hw, 306 + unsigned long parent_rate) 307 + { 308 + struct hym8563 *hym8563 = clkout_hw_to_hym8563(hw); 309 + struct i2c_client *client = hym8563->client; 310 + int ret = i2c_smbus_read_byte_data(client, HYM8563_CLKOUT); 311 + 312 + if (ret < 0 || ret & HYM8563_CLKOUT_DISABLE) 313 + return 0; 314 + 315 + ret &= HYM8563_CLKOUT_MASK; 316 + return clkout_rates[ret]; 317 + } 318 + 319 + static long hym8563_clkout_round_rate(struct clk_hw *hw, unsigned long rate, 320 + unsigned long *prate) 321 + { 322 + int i; 323 + 324 + for (i = 0; i < ARRAY_SIZE(clkout_rates); i++) 325 + if (clkout_rates[i] <= rate) 326 + return clkout_rates[i]; 327 + 328 + return 0; 329 + } 330 + 331 + static int hym8563_clkout_set_rate(struct clk_hw *hw, unsigned long rate, 332 + unsigned long parent_rate) 333 + { 334 + struct hym8563 *hym8563 = clkout_hw_to_hym8563(hw); 335 + struct i2c_client *client = hym8563->client; 336 + int ret = i2c_smbus_read_byte_data(client, HYM8563_CLKOUT); 337 + int i; 338 + 339 + if (ret < 0) 340 + return ret; 341 + 342 + for (i = 0; i < ARRAY_SIZE(clkout_rates); i++) 343 + if (clkout_rates[i] == rate) { 344 + ret &= ~HYM8563_CLKOUT_MASK; 345 + ret |= i; 346 + return i2c_smbus_write_byte_data(client, 347 + HYM8563_CLKOUT, ret); 348 + } 349 + 350 + return -EINVAL; 351 + } 352 + 353 + static int hym8563_clkout_control(struct clk_hw *hw, bool enable) 354 + { 355 + struct hym8563 *hym8563 = clkout_hw_to_hym8563(hw); 356 + struct i2c_client *client = hym8563->client; 357 + int ret = i2c_smbus_read_byte_data(client, HYM8563_CLKOUT); 358 + 359 + if (ret < 0) 360 + return ret; 361 + 362 + if (enable) 363 + ret &= ~HYM8563_CLKOUT_DISABLE; 364 + else 365 + ret |= HYM8563_CLKOUT_DISABLE; 366 + 367 + return i2c_smbus_write_byte_data(client, HYM8563_CLKOUT, ret); 368 + } 369 + 370 + static int hym8563_clkout_prepare(struct clk_hw *hw) 371 + { 372 + return hym8563_clkout_control(hw, 1); 373 + } 374 + 375 + static void hym8563_clkout_unprepare(struct clk_hw *hw) 376 + { 377 + hym8563_clkout_control(hw, 0); 378 + } 379 + 380 + static int hym8563_clkout_is_prepared(struct clk_hw *hw) 381 + { 382 + struct hym8563 *hym8563 = clkout_hw_to_hym8563(hw); 383 + struct i2c_client *client = hym8563->client; 384 + int ret = i2c_smbus_read_byte_data(client, HYM8563_CLKOUT); 385 + 386 + if (ret < 0) 387 + return ret; 388 + 389 + return !(ret & HYM8563_CLKOUT_DISABLE); 390 + } 391 + 392 + static const struct clk_ops hym8563_clkout_ops = { 393 + .prepare = hym8563_clkout_prepare, 394 + .unprepare = hym8563_clkout_unprepare, 395 + .is_prepared = hym8563_clkout_is_prepared, 396 + .recalc_rate = hym8563_clkout_recalc_rate, 397 + .round_rate = hym8563_clkout_round_rate, 398 + .set_rate = hym8563_clkout_set_rate, 399 + }; 400 + 401 + static struct clk *hym8563_clkout_register_clk(struct hym8563 *hym8563) 402 + { 403 + struct i2c_client *client = hym8563->client; 404 + struct device_node *node = client->dev.of_node; 405 + struct clk *clk; 406 + struct clk_init_data init; 407 + int ret; 408 + 409 + ret = i2c_smbus_write_byte_data(client, HYM8563_CLKOUT, 410 + HYM8563_CLKOUT_DISABLE); 411 + if (ret < 0) 412 + return ERR_PTR(ret); 413 + 414 + init.name = "hym8563-clkout"; 415 + init.ops = &hym8563_clkout_ops; 416 + init.flags = CLK_IS_ROOT; 417 + init.parent_names = NULL; 418 + init.num_parents = 0; 419 + hym8563->clkout_hw.init = &init; 420 + 421 + /* register the clock */ 422 + clk = clk_register(&client->dev, &hym8563->clkout_hw); 423 + 424 + if (!IS_ERR(clk)) 425 + of_clk_add_provider(node, of_clk_src_simple_get, clk); 426 + 427 + return clk; 428 + } 429 + #endif 430 + 431 + /* 432 + * The alarm interrupt is implemented as a level-low interrupt in the 433 + * hym8563, while the timer interrupt uses a falling edge. 434 + * We don't use the timer at all, so the interrupt is requested to 435 + * use the level-low trigger. 436 + */ 437 + static irqreturn_t hym8563_irq(int irq, void *dev_id) 438 + { 439 + struct hym8563 *hym8563 = (struct hym8563 *)dev_id; 440 + struct i2c_client *client = hym8563->client; 441 + struct mutex *lock = &hym8563->rtc->ops_lock; 442 + int data, ret; 443 + 444 + mutex_lock(lock); 445 + 446 + /* Clear the alarm flag */ 447 + 448 + data = i2c_smbus_read_byte_data(client, HYM8563_CTL2); 449 + if (data < 0) { 450 + dev_err(&client->dev, "%s: error reading i2c data %d\n", 451 + __func__, data); 452 + goto out; 453 + } 454 + 455 + data &= ~HYM8563_CTL2_AF; 456 + 457 + ret = i2c_smbus_write_byte_data(client, HYM8563_CTL2, data); 458 + if (ret < 0) { 459 + dev_err(&client->dev, "%s: error writing i2c data %d\n", 460 + __func__, ret); 461 + } 462 + 463 + out: 464 + mutex_unlock(lock); 465 + return IRQ_HANDLED; 466 + } 467 + 468 + static int hym8563_init_device(struct i2c_client *client) 469 + { 470 + int ret; 471 + 472 + /* Clear stop flag if present */ 473 + ret = i2c_smbus_write_byte_data(client, HYM8563_CTL1, 0); 474 + if (ret < 0) 475 + return ret; 476 + 477 + ret = i2c_smbus_read_byte_data(client, HYM8563_CTL2); 478 + if (ret < 0) 479 + return ret; 480 + 481 + /* Disable alarm and timer interrupts */ 482 + ret &= ~HYM8563_CTL2_AIE; 483 + ret &= ~HYM8563_CTL2_TIE; 484 + 485 + /* Clear any pending alarm and timer flags */ 486 + if (ret & HYM8563_CTL2_AF) 487 + ret &= ~HYM8563_CTL2_AF; 488 + 489 + if (ret & HYM8563_CTL2_TF) 490 + ret &= ~HYM8563_CTL2_TF; 491 + 492 + ret &= ~HYM8563_CTL2_TI_TP; 493 + 494 + return i2c_smbus_write_byte_data(client, HYM8563_CTL2, ret); 495 + } 496 + 497 + #ifdef CONFIG_PM_SLEEP 498 + static int hym8563_suspend(struct device *dev) 499 + { 500 + struct i2c_client *client = to_i2c_client(dev); 501 + int ret; 502 + 503 + if (device_may_wakeup(dev)) { 504 + ret = enable_irq_wake(client->irq); 505 + if (ret) { 506 + dev_err(dev, "enable_irq_wake failed, %d\n", ret); 507 + return ret; 508 + } 509 + } 510 + 511 + return 0; 512 + } 513 + 514 + static int hym8563_resume(struct device *dev) 515 + { 516 + struct i2c_client *client = to_i2c_client(dev); 517 + 518 + if (device_may_wakeup(dev)) 519 + disable_irq_wake(client->irq); 520 + 521 + return 0; 522 + } 523 + #endif 524 + 525 + static SIMPLE_DEV_PM_OPS(hym8563_pm_ops, hym8563_suspend, hym8563_resume); 526 + 527 + static int hym8563_probe(struct i2c_client *client, 528 + const struct i2c_device_id *id) 529 + { 530 + struct hym8563 *hym8563; 531 + int ret; 532 + 533 + hym8563 = devm_kzalloc(&client->dev, sizeof(*hym8563), GFP_KERNEL); 534 + if (!hym8563) 535 + return -ENOMEM; 536 + 537 + hym8563->client = client; 538 + i2c_set_clientdata(client, hym8563); 539 + 540 + device_set_wakeup_capable(&client->dev, true); 541 + 542 + ret = hym8563_init_device(client); 543 + if (ret) { 544 + dev_err(&client->dev, "could not init device, %d\n", ret); 545 + return ret; 546 + } 547 + 548 + ret = devm_request_threaded_irq(&client->dev, client->irq, 549 + NULL, hym8563_irq, 550 + IRQF_TRIGGER_LOW | IRQF_ONESHOT, 551 + client->name, hym8563); 552 + if (ret < 0) { 553 + dev_err(&client->dev, "irq %d request failed, %d\n", 554 + client->irq, ret); 555 + return ret; 556 + } 557 + 558 + /* check state of calendar information */ 559 + ret = i2c_smbus_read_byte_data(client, HYM8563_SEC); 560 + if (ret < 0) 561 + return ret; 562 + 563 + hym8563->valid = !(ret & HYM8563_SEC_VL); 564 + dev_dbg(&client->dev, "rtc information is %s\n", 565 + hym8563->valid ? "valid" : "invalid"); 566 + 567 + hym8563->rtc = devm_rtc_device_register(&client->dev, client->name, 568 + &hym8563_rtc_ops, THIS_MODULE); 569 + if (IS_ERR(hym8563->rtc)) 570 + return PTR_ERR(hym8563->rtc); 571 + 572 + #ifdef CONFIG_COMMON_CLK 573 + hym8563_clkout_register_clk(hym8563); 574 + #endif 575 + 576 + return 0; 577 + } 578 + 579 + static const struct i2c_device_id hym8563_id[] = { 580 + { "hym8563", 0 }, 581 + {}, 582 + }; 583 + MODULE_DEVICE_TABLE(i2c, hym8563_id); 584 + 585 + static struct of_device_id hym8563_dt_idtable[] = { 586 + { .compatible = "haoyu,hym8563" }, 587 + {}, 588 + }; 589 + MODULE_DEVICE_TABLE(of, hym8563_dt_idtable); 590 + 591 + static struct i2c_driver hym8563_driver = { 592 + .driver = { 593 + .name = "rtc-hym8563", 594 + .owner = THIS_MODULE, 595 + .pm = &hym8563_pm_ops, 596 + .of_match_table = hym8563_dt_idtable, 597 + }, 598 + .probe = hym8563_probe, 599 + .id_table = hym8563_id, 600 + }; 601 + 602 + module_i2c_driver(hym8563_driver); 603 + 604 + MODULE_AUTHOR("Heiko Stuebner <heiko@sntech.de>"); 605 + MODULE_DESCRIPTION("HYM8563 RTC driver"); 606 + MODULE_LICENSE("GPL");
+5 -6
drivers/rtc/rtc-max8907.c
··· 51 51 { 52 52 struct max8907_rtc *rtc = data; 53 53 54 - regmap_update_bits(rtc->regmap, MAX8907_REG_ALARM0_CNTL, 0x7f, 0); 54 + regmap_write(rtc->regmap, MAX8907_REG_ALARM0_CNTL, 0); 55 55 56 56 rtc_update_irq(rtc->rtc_dev, 1, RTC_IRQF | RTC_AF); 57 57 ··· 64 64 bcd2bin(regs[RTC_YEAR1]) - 1900; 65 65 tm->tm_mon = bcd2bin(regs[RTC_MONTH] & 0x1f) - 1; 66 66 tm->tm_mday = bcd2bin(regs[RTC_DATE] & 0x3f); 67 - tm->tm_wday = (regs[RTC_WEEKDAY] & 0x07) - 1; 67 + tm->tm_wday = (regs[RTC_WEEKDAY] & 0x07); 68 68 if (regs[RTC_HOUR] & HOUR_12) { 69 69 tm->tm_hour = bcd2bin(regs[RTC_HOUR] & 0x01f); 70 70 if (tm->tm_hour == 12) ··· 88 88 regs[RTC_YEAR1] = bin2bcd(low); 89 89 regs[RTC_MONTH] = bin2bcd(tm->tm_mon + 1); 90 90 regs[RTC_DATE] = bin2bcd(tm->tm_mday); 91 - regs[RTC_WEEKDAY] = tm->tm_wday + 1; 91 + regs[RTC_WEEKDAY] = tm->tm_wday; 92 92 regs[RTC_HOUR] = bin2bcd(tm->tm_hour); 93 93 regs[RTC_MIN] = bin2bcd(tm->tm_min); 94 94 regs[RTC_SEC] = bin2bcd(tm->tm_sec); ··· 153 153 tm_to_regs(&alrm->time, regs); 154 154 155 155 /* Disable alarm while we update the target time */ 156 - ret = regmap_update_bits(rtc->regmap, MAX8907_REG_ALARM0_CNTL, 0x7f, 0); 156 + ret = regmap_write(rtc->regmap, MAX8907_REG_ALARM0_CNTL, 0); 157 157 if (ret < 0) 158 158 return ret; 159 159 ··· 163 163 return ret; 164 164 165 165 if (alrm->enabled) 166 - ret = regmap_update_bits(rtc->regmap, MAX8907_REG_ALARM0_CNTL, 167 - 0x7f, 0x7f); 166 + ret = regmap_write(rtc->regmap, MAX8907_REG_ALARM0_CNTL, 0x77); 168 167 169 168 return ret; 170 169 }
+5 -5
drivers/rtc/rtc-mxc.c
··· 391 391 pdata->clk = devm_clk_get(&pdev->dev, NULL); 392 392 if (IS_ERR(pdata->clk)) { 393 393 dev_err(&pdev->dev, "unable to get clock!\n"); 394 - ret = PTR_ERR(pdata->clk); 395 - goto exit_free_pdata; 394 + return PTR_ERR(pdata->clk); 396 395 } 397 396 398 - clk_prepare_enable(pdata->clk); 397 + ret = clk_prepare_enable(pdata->clk); 398 + if (ret) 399 + return ret; 400 + 399 401 rate = clk_get_rate(pdata->clk); 400 402 401 403 if (rate == 32768) ··· 448 446 449 447 exit_put_clk: 450 448 clk_disable_unprepare(pdata->clk); 451 - 452 - exit_free_pdata: 453 449 454 450 return ret; 455 451 }
+1 -4
drivers/rtc/rtc-pcf2127.c
··· 197 197 pcf2127_driver.driver.name, 198 198 &pcf2127_rtc_ops, THIS_MODULE); 199 199 200 - if (IS_ERR(pcf2127->rtc)) 201 - return PTR_ERR(pcf2127->rtc); 202 - 203 - return 0; 200 + return PTR_ERR_OR_ZERO(pcf2127->rtc); 204 201 } 205 202 206 203 static const struct i2c_device_id pcf2127_id[] = {
+68 -13
drivers/rtc/rtc-rx8581.c
··· 52 52 #define RX8581_CTRL_STOP 0x02 /* STOP bit */ 53 53 #define RX8581_CTRL_RESET 0x01 /* RESET bit */ 54 54 55 + struct rx8581 { 56 + struct i2c_client *client; 57 + struct rtc_device *rtc; 58 + s32 (*read_block_data)(const struct i2c_client *client, u8 command, 59 + u8 length, u8 *values); 60 + s32 (*write_block_data)(const struct i2c_client *client, u8 command, 61 + u8 length, const u8 *values); 62 + }; 63 + 55 64 static struct i2c_driver rx8581_driver; 65 + 66 + static int rx8581_read_block_data(const struct i2c_client *client, u8 command, 67 + u8 length, u8 *values) 68 + { 69 + s32 i, data; 70 + 71 + for (i = 0; i < length; i++) { 72 + data = i2c_smbus_read_byte_data(client, command + i); 73 + if (data < 0) 74 + return data; 75 + values[i] = data; 76 + } 77 + return i; 78 + } 79 + 80 + static int rx8581_write_block_data(const struct i2c_client *client, u8 command, 81 + u8 length, const u8 *values) 82 + { 83 + s32 i, ret; 84 + 85 + for (i = 0; i < length; i++) { 86 + ret = i2c_smbus_write_byte_data(client, command + i, 87 + values[i]); 88 + if (ret < 0) 89 + return ret; 90 + } 91 + return length; 92 + } 56 93 57 94 /* 58 95 * In the routines that deal directly with the rx8581 hardware, we use ··· 99 62 { 100 63 unsigned char date[7]; 101 64 int data, err; 65 + struct rx8581 *rx8581 = i2c_get_clientdata(client); 102 66 103 67 /* First we ensure that the "update flag" is not set, we read the 104 68 * time and date then re-read the "update flag". If the update flag ··· 118 80 err = i2c_smbus_write_byte_data(client, 119 81 RX8581_REG_FLAG, (data & ~RX8581_FLAG_UF)); 120 82 if (err != 0) { 121 - dev_err(&client->dev, "Unable to write device " 122 - "flags\n"); 83 + dev_err(&client->dev, "Unable to write device flags\n"); 123 84 return -EIO; 124 85 } 125 86 } 126 87 127 88 /* Now read time and date */ 128 - err = i2c_smbus_read_i2c_block_data(client, RX8581_REG_SC, 89 + err = rx8581->read_block_data(client, RX8581_REG_SC, 129 90 7, date); 130 91 if (err < 0) { 131 92 dev_err(&client->dev, "Unable to read date\n"); ··· 177 140 { 178 141 int data, err; 179 142 unsigned char buf[7]; 143 + struct rx8581 *rx8581 = i2c_get_clientdata(client); 180 144 181 145 dev_dbg(&client->dev, "%s: secs=%d, mins=%d, hours=%d, " 182 146 "mday=%d, mon=%d, year=%d, wday=%d\n", ··· 214 176 } 215 177 216 178 /* write register's data */ 217 - err = i2c_smbus_write_i2c_block_data(client, RX8581_REG_SC, 7, buf); 179 + err = rx8581->write_block_data(client, RX8581_REG_SC, 7, buf); 218 180 if (err < 0) { 219 181 dev_err(&client->dev, "Unable to write to date registers\n"); 220 182 return -EIO; ··· 269 231 static int rx8581_probe(struct i2c_client *client, 270 232 const struct i2c_device_id *id) 271 233 { 272 - struct rtc_device *rtc; 234 + struct rx8581 *rx8581; 273 235 274 236 dev_dbg(&client->dev, "%s\n", __func__); 275 237 276 - if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) 277 - return -ENODEV; 238 + if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA) 239 + && !i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_I2C_BLOCK)) 240 + return -EIO; 241 + 242 + rx8581 = devm_kzalloc(&client->dev, sizeof(struct rx8581), GFP_KERNEL); 243 + if (!rx8581) 244 + return -ENOMEM; 245 + 246 + i2c_set_clientdata(client, rx8581); 247 + rx8581->client = client; 248 + 249 + if (i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_I2C_BLOCK)) { 250 + rx8581->read_block_data = i2c_smbus_read_i2c_block_data; 251 + rx8581->write_block_data = i2c_smbus_write_i2c_block_data; 252 + } else { 253 + rx8581->read_block_data = rx8581_read_block_data; 254 + rx8581->write_block_data = rx8581_write_block_data; 255 + } 278 256 279 257 dev_info(&client->dev, "chip found, driver version " DRV_VERSION "\n"); 280 258 281 - rtc = devm_rtc_device_register(&client->dev, rx8581_driver.driver.name, 282 - &rx8581_rtc_ops, THIS_MODULE); 259 + rx8581->rtc = devm_rtc_device_register(&client->dev, 260 + rx8581_driver.driver.name, &rx8581_rtc_ops, THIS_MODULE); 283 261 284 - if (IS_ERR(rtc)) 285 - return PTR_ERR(rtc); 286 - 287 - i2c_set_clientdata(client, rtc); 262 + if (IS_ERR(rx8581->rtc)) { 263 + dev_err(&client->dev, 264 + "unable to register the class device\n"); 265 + return PTR_ERR(rx8581->rtc); 266 + } 288 267 289 268 return 0; 290 269 }
+2
drivers/rtc/rtc-s5m.c
··· 639 639 s5m_rtc_enable_smpl(info, false); 640 640 } 641 641 642 + #ifdef CONFIG_PM_SLEEP 642 643 static int s5m_rtc_resume(struct device *dev) 643 644 { 644 645 struct s5m_rtc_info *info = dev_get_drvdata(dev); ··· 661 660 662 661 return ret; 663 662 } 663 + #endif /* CONFIG_PM_SLEEP */ 664 664 665 665 static SIMPLE_DEV_PM_OPS(s5m_rtc_pm_ops, s5m_rtc_suspend, s5m_rtc_resume); 666 666
+13 -25
drivers/rtc/rtc-twl.c
··· 479 479 u8 rd_reg; 480 480 481 481 if (irq <= 0) 482 - goto out1; 482 + return ret; 483 483 484 484 /* Initialize the register map */ 485 485 if (twl_class_is_4030()) ··· 489 489 490 490 ret = twl_rtc_read_u8(&rd_reg, REG_RTC_STATUS_REG); 491 491 if (ret < 0) 492 - goto out1; 492 + return ret; 493 493 494 494 if (rd_reg & BIT_RTC_STATUS_REG_POWER_UP_M) 495 495 dev_warn(&pdev->dev, "Power up reset detected.\n"); ··· 500 500 /* Clear RTC Power up reset and pending alarm interrupts */ 501 501 ret = twl_rtc_write_u8(rd_reg, REG_RTC_STATUS_REG); 502 502 if (ret < 0) 503 - goto out1; 503 + return ret; 504 504 505 505 if (twl_class_is_6030()) { 506 506 twl6030_interrupt_unmask(TWL6030_RTC_INT_MASK, ··· 512 512 dev_info(&pdev->dev, "Enabling TWL-RTC\n"); 513 513 ret = twl_rtc_write_u8(BIT_RTC_CTRL_REG_STOP_RTC_M, REG_RTC_CTRL_REG); 514 514 if (ret < 0) 515 - goto out1; 515 + return ret; 516 516 517 517 /* ensure interrupts are disabled, bootloaders can be strange */ 518 518 ret = twl_rtc_write_u8(0, REG_RTC_INTERRUPTS_REG); ··· 522 522 /* init cached IRQ enable bits */ 523 523 ret = twl_rtc_read_u8(&rtc_irq_bits, REG_RTC_INTERRUPTS_REG); 524 524 if (ret < 0) 525 - goto out1; 525 + return ret; 526 526 527 527 device_init_wakeup(&pdev->dev, 1); 528 528 529 - rtc = rtc_device_register(pdev->name, 530 - &pdev->dev, &twl_rtc_ops, THIS_MODULE); 529 + rtc = devm_rtc_device_register(&pdev->dev, pdev->name, 530 + &twl_rtc_ops, THIS_MODULE); 531 531 if (IS_ERR(rtc)) { 532 - ret = PTR_ERR(rtc); 533 532 dev_err(&pdev->dev, "can't register RTC device, err %ld\n", 534 533 PTR_ERR(rtc)); 535 - goto out1; 534 + return PTR_ERR(rtc); 536 535 } 537 536 538 - ret = request_threaded_irq(irq, NULL, twl_rtc_interrupt, 539 - IRQF_TRIGGER_RISING | IRQF_ONESHOT, 540 - dev_name(&rtc->dev), rtc); 537 + ret = devm_request_threaded_irq(&pdev->dev, irq, NULL, 538 + twl_rtc_interrupt, 539 + IRQF_TRIGGER_RISING | IRQF_ONESHOT, 540 + dev_name(&rtc->dev), rtc); 541 541 if (ret < 0) { 542 542 dev_err(&pdev->dev, "IRQ is not free.\n"); 543 - goto out2; 543 + return ret; 544 544 } 545 545 546 546 platform_set_drvdata(pdev, rtc); 547 547 return 0; 548 - 549 - out2: 550 - rtc_device_unregister(rtc); 551 - out1: 552 - return ret; 553 548 } 554 549 555 550 /* ··· 554 559 static int twl_rtc_remove(struct platform_device *pdev) 555 560 { 556 561 /* leave rtc running, but disable irqs */ 557 - struct rtc_device *rtc = platform_get_drvdata(pdev); 558 - int irq = platform_get_irq(pdev, 0); 559 - 560 562 mask_rtc_irq_bit(BIT_RTC_INTERRUPTS_REG_IT_ALARM_M); 561 563 mask_rtc_irq_bit(BIT_RTC_INTERRUPTS_REG_IT_TIMER_M); 562 564 if (twl_class_is_6030()) { ··· 563 571 REG_INT_MSK_STS_A); 564 572 } 565 573 566 - 567 - free_irq(irq, rtc); 568 - 569 - rtc_device_unregister(rtc); 570 574 return 0; 571 575 } 572 576
+12 -38
drivers/rtc/rtc-vr41xx.c
··· 293 293 if (!res) 294 294 return -EBUSY; 295 295 296 - rtc1_base = ioremap(res->start, resource_size(res)); 296 + rtc1_base = devm_ioremap(&pdev->dev, res->start, resource_size(res)); 297 297 if (!rtc1_base) 298 298 return -EBUSY; 299 299 ··· 303 303 goto err_rtc1_iounmap; 304 304 } 305 305 306 - rtc2_base = ioremap(res->start, resource_size(res)); 306 + rtc2_base = devm_ioremap(&pdev->dev, res->start, resource_size(res)); 307 307 if (!rtc2_base) { 308 308 retval = -EBUSY; 309 309 goto err_rtc1_iounmap; 310 310 } 311 311 312 - rtc = rtc_device_register(rtc_name, &pdev->dev, &vr41xx_rtc_ops, THIS_MODULE); 312 + rtc = devm_rtc_device_register(&pdev->dev, rtc_name, &vr41xx_rtc_ops, 313 + THIS_MODULE); 313 314 if (IS_ERR(rtc)) { 314 315 retval = PTR_ERR(rtc); 315 316 goto err_iounmap_all; ··· 331 330 aie_irq = platform_get_irq(pdev, 0); 332 331 if (aie_irq <= 0) { 333 332 retval = -EBUSY; 334 - goto err_device_unregister; 333 + goto err_iounmap_all; 335 334 } 336 335 337 - retval = request_irq(aie_irq, elapsedtime_interrupt, 0, 338 - "elapsed_time", pdev); 336 + retval = devm_request_irq(&pdev->dev, aie_irq, elapsedtime_interrupt, 0, 337 + "elapsed_time", pdev); 339 338 if (retval < 0) 340 - goto err_device_unregister; 339 + goto err_iounmap_all; 341 340 342 341 pie_irq = platform_get_irq(pdev, 1); 343 342 if (pie_irq <= 0) { 344 343 retval = -EBUSY; 345 - goto err_free_irq; 344 + goto err_iounmap_all; 346 345 } 347 346 348 - retval = request_irq(pie_irq, rtclong1_interrupt, 0, 349 - "rtclong1", pdev); 347 + retval = devm_request_irq(&pdev->dev, pie_irq, rtclong1_interrupt, 0, 348 + "rtclong1", pdev); 350 349 if (retval < 0) 351 - goto err_free_irq; 350 + goto err_iounmap_all; 352 351 353 352 platform_set_drvdata(pdev, rtc); 354 353 ··· 359 358 360 359 return 0; 361 360 362 - err_free_irq: 363 - free_irq(aie_irq, pdev); 364 - 365 - err_device_unregister: 366 - rtc_device_unregister(rtc); 367 - 368 361 err_iounmap_all: 369 - iounmap(rtc2_base); 370 362 rtc2_base = NULL; 371 363 372 364 err_rtc1_iounmap: 373 - iounmap(rtc1_base); 374 365 rtc1_base = NULL; 375 366 376 367 return retval; 377 - } 378 - 379 - static int rtc_remove(struct platform_device *pdev) 380 - { 381 - struct rtc_device *rtc; 382 - 383 - rtc = platform_get_drvdata(pdev); 384 - if (rtc) 385 - rtc_device_unregister(rtc); 386 - 387 - free_irq(aie_irq, pdev); 388 - free_irq(pie_irq, pdev); 389 - if (rtc1_base) 390 - iounmap(rtc1_base); 391 - if (rtc2_base) 392 - iounmap(rtc2_base); 393 - 394 - return 0; 395 368 } 396 369 397 370 /* work with hotplug and coldplug */ ··· 373 398 374 399 static struct platform_driver rtc_platform_driver = { 375 400 .probe = rtc_probe, 376 - .remove = rtc_remove, 377 401 .driver = { 378 402 .name = rtc_name, 379 403 .owner = THIS_MODULE,
+4
drivers/video/aty/aty128fb.c
··· 357 357 static bool mtrr = true; 358 358 #endif 359 359 360 + #ifdef CONFIG_FB_ATY128_BACKLIGHT 360 361 #ifdef CONFIG_PMAC_BACKLIGHT 361 362 static int backlight = 1; 362 363 #else 363 364 static int backlight = 0; 365 + #endif 364 366 #endif 365 367 366 368 /* PLL constants */ ··· 1673 1671 default_crt_on = simple_strtoul(this_opt+4, NULL, 0); 1674 1672 continue; 1675 1673 } else if (!strncmp(this_opt, "backlight:", 10)) { 1674 + #ifdef CONFIG_FB_ATY128_BACKLIGHT 1676 1675 backlight = simple_strtoul(this_opt+10, NULL, 0); 1676 + #endif 1677 1677 continue; 1678 1678 } 1679 1679 #ifdef CONFIG_MTRR
+2 -4
drivers/video/backlight/hp680_bl.c
··· 110 110 memset(&props, 0, sizeof(struct backlight_properties)); 111 111 props.type = BACKLIGHT_RAW; 112 112 props.max_brightness = HP680_MAX_INTENSITY; 113 - bd = backlight_device_register("hp680-bl", &pdev->dev, NULL, 114 - &hp680bl_ops, &props); 113 + bd = devm_backlight_device_register(&pdev->dev, "hp680-bl", &pdev->dev, 114 + NULL, &hp680bl_ops, &props); 115 115 if (IS_ERR(bd)) 116 116 return PTR_ERR(bd); 117 117 ··· 130 130 bd->props.brightness = 0; 131 131 bd->props.power = 0; 132 132 hp680bl_send_intensity(bd); 133 - 134 - backlight_device_unregister(bd); 135 133 136 134 return 0; 137 135 }
+3 -12
drivers/video/backlight/jornada720_bl.c
··· 115 115 memset(&props, 0, sizeof(struct backlight_properties)); 116 116 props.type = BACKLIGHT_RAW; 117 117 props.max_brightness = BL_MAX_BRIGHT; 118 - bd = backlight_device_register(S1D_DEVICENAME, &pdev->dev, NULL, 119 - &jornada_bl_ops, &props); 120 118 119 + bd = devm_backlight_device_register(&pdev->dev, S1D_DEVICENAME, 120 + &pdev->dev, NULL, &jornada_bl_ops, 121 + &props); 121 122 if (IS_ERR(bd)) { 122 123 ret = PTR_ERR(bd); 123 124 dev_err(&pdev->dev, "failed to register device, err=%x\n", ret); ··· 140 139 return 0; 141 140 } 142 141 143 - static int jornada_bl_remove(struct platform_device *pdev) 144 - { 145 - struct backlight_device *bd = platform_get_drvdata(pdev); 146 - 147 - backlight_device_unregister(bd); 148 - 149 - return 0; 150 - } 151 - 152 142 static struct platform_driver jornada_bl_driver = { 153 143 .probe = jornada_bl_probe, 154 - .remove = jornada_bl_remove, 155 144 .driver = { 156 145 .name = "jornada_bl", 157 146 },
+2 -11
drivers/video/backlight/jornada720_lcd.c
··· 100 100 struct lcd_device *lcd_device; 101 101 int ret; 102 102 103 - lcd_device = lcd_device_register(S1D_DEVICENAME, &pdev->dev, NULL, &jornada_lcd_props); 103 + lcd_device = devm_lcd_device_register(&pdev->dev, S1D_DEVICENAME, 104 + &pdev->dev, NULL, &jornada_lcd_props); 104 105 105 106 if (IS_ERR(lcd_device)) { 106 107 ret = PTR_ERR(lcd_device); ··· 120 119 return 0; 121 120 } 122 121 123 - static int jornada_lcd_remove(struct platform_device *pdev) 124 - { 125 - struct lcd_device *lcd_device = platform_get_drvdata(pdev); 126 - 127 - lcd_device_unregister(lcd_device); 128 - 129 - return 0; 130 - } 131 - 132 122 static struct platform_driver jornada_lcd_driver = { 133 123 .probe = jornada_lcd_probe, 134 - .remove = jornada_lcd_remove, 135 124 .driver = { 136 125 .name = "jornada_lcd", 137 126 },
+1 -1
drivers/video/backlight/kb3886_bl.c
··· 78 78 static unsigned long kb3886bl_flags; 79 79 #define KB3886BL_SUSPENDED 0x01 80 80 81 - static struct dmi_system_id __initdata kb3886bl_device_table[] = { 81 + static struct dmi_system_id kb3886bl_device_table[] __initdata = { 82 82 { 83 83 .ident = "Sahara Touch-iT", 84 84 .matches = {
+2 -4
drivers/video/backlight/l4f00242t03.c
··· 223 223 return PTR_ERR(priv->core_reg); 224 224 } 225 225 226 - priv->ld = lcd_device_register("l4f00242t03", 227 - &spi->dev, priv, &l4f_ops); 226 + priv->ld = devm_lcd_device_register(&spi->dev, "l4f00242t03", &spi->dev, 227 + priv, &l4f_ops); 228 228 if (IS_ERR(priv->ld)) 229 229 return PTR_ERR(priv->ld); 230 230 ··· 243 243 struct l4f00242t03_priv *priv = spi_get_drvdata(spi); 244 244 245 245 l4f00242t03_lcd_power_set(priv->ld, FB_BLANK_POWERDOWN); 246 - lcd_device_unregister(priv->ld); 247 - 248 246 return 0; 249 247 } 250 248
+1 -1
drivers/video/backlight/lp855x_bl.c
··· 125 125 return false; 126 126 } 127 127 128 - return (addr >= start && addr <= end); 128 + return addr >= start && addr <= end; 129 129 } 130 130 131 131 static int lp8557_bl_off(struct lp855x *lp)
+3 -3
drivers/video/backlight/lp8788_bl.c
··· 63 63 64 64 static inline bool is_brightness_ctrl_by_pwm(enum lp8788_bl_ctrl_mode mode) 65 65 { 66 - return (mode == LP8788_BL_COMB_PWM_BASED); 66 + return mode == LP8788_BL_COMB_PWM_BASED; 67 67 } 68 68 69 69 static inline bool is_brightness_ctrl_by_register(enum lp8788_bl_ctrl_mode mode) 70 70 { 71 - return (mode == LP8788_BL_REGISTER_ONLY || 72 - mode == LP8788_BL_COMB_REGISTER_BASED); 71 + return mode == LP8788_BL_REGISTER_ONLY || 72 + mode == LP8788_BL_COMB_REGISTER_BASED; 73 73 } 74 74 75 75 static int lp8788_backlight_configure(struct lp8788_bl *bl)
+2 -12
drivers/video/backlight/omap1_bl.c
··· 146 146 memset(&props, 0, sizeof(struct backlight_properties)); 147 147 props.type = BACKLIGHT_RAW; 148 148 props.max_brightness = OMAPBL_MAX_INTENSITY; 149 - dev = backlight_device_register("omap-bl", &pdev->dev, bl, &omapbl_ops, 150 - &props); 149 + dev = devm_backlight_device_register(&pdev->dev, "omap-bl", &pdev->dev, 150 + bl, &omapbl_ops, &props); 151 151 if (IS_ERR(dev)) 152 152 return PTR_ERR(dev); 153 153 ··· 170 170 return 0; 171 171 } 172 172 173 - static int omapbl_remove(struct platform_device *pdev) 174 - { 175 - struct backlight_device *dev = platform_get_drvdata(pdev); 176 - 177 - backlight_device_unregister(dev); 178 - 179 - return 0; 180 - } 181 - 182 173 static SIMPLE_DEV_PM_OPS(omapbl_pm_ops, omapbl_suspend, omapbl_resume); 183 174 184 175 static struct platform_driver omapbl_driver = { 185 176 .probe = omapbl_probe, 186 - .remove = omapbl_remove, 187 177 .driver = { 188 178 .name = "omap-bl", 189 179 .pm = &omapbl_pm_ops,
+3 -6
drivers/video/backlight/ot200_bl.c
··· 118 118 props.brightness = 100; 119 119 props.type = BACKLIGHT_RAW; 120 120 121 - bl = backlight_device_register(dev_name(&pdev->dev), &pdev->dev, data, 122 - &ot200_backlight_ops, &props); 121 + bl = devm_backlight_device_register(&pdev->dev, dev_name(&pdev->dev), 122 + &pdev->dev, data, &ot200_backlight_ops, 123 + &props); 123 124 if (IS_ERR(bl)) { 124 125 dev_err(&pdev->dev, "failed to register backlight\n"); 125 126 retval = PTR_ERR(bl); ··· 138 137 139 138 static int ot200_backlight_remove(struct platform_device *pdev) 140 139 { 141 - struct backlight_device *bl = platform_get_drvdata(pdev); 142 - 143 - backlight_device_unregister(bl); 144 - 145 140 /* on module unload set brightness to 100% */ 146 141 cs5535_mfgpt_write(pwm_timer, MFGPT_REG_COUNTER, 0); 147 142 cs5535_mfgpt_write(pwm_timer, MFGPT_REG_SETUP, MFGPT_SETUP_CNTEN);
+3 -4
drivers/video/backlight/tosa_bl.c
··· 105 105 memset(&props, 0, sizeof(struct backlight_properties)); 106 106 props.type = BACKLIGHT_RAW; 107 107 props.max_brightness = 512 - 1; 108 - data->bl = backlight_device_register("tosa-bl", &client->dev, data, 109 - &bl_ops, &props); 108 + data->bl = devm_backlight_device_register(&client->dev, "tosa-bl", 109 + &client->dev, data, &bl_ops, 110 + &props); 110 111 if (IS_ERR(data->bl)) { 111 112 ret = PTR_ERR(data->bl); 112 113 goto err_reg; ··· 129 128 { 130 129 struct tosa_bl_data *data = i2c_get_clientdata(client); 131 130 132 - backlight_device_unregister(data->bl); 133 131 data->bl = NULL; 134 - 135 132 return 0; 136 133 } 137 134
+2 -4
drivers/video/backlight/tosa_lcd.c
··· 206 206 207 207 tosa_lcd_tg_on(data); 208 208 209 - data->lcd = lcd_device_register("tosa-lcd", &spi->dev, data, 210 - &tosa_lcd_ops); 209 + data->lcd = devm_lcd_device_register(&spi->dev, "tosa-lcd", &spi->dev, 210 + data, &tosa_lcd_ops); 211 211 212 212 if (IS_ERR(data->lcd)) { 213 213 ret = PTR_ERR(data->lcd); ··· 225 225 static int tosa_lcd_remove(struct spi_device *spi) 226 226 { 227 227 struct tosa_lcd_data *data = spi_get_drvdata(spi); 228 - 229 - lcd_device_unregister(data->lcd); 230 228 231 229 if (data->i2c) 232 230 i2c_unregister_device(data->i2c);
+2 -1
drivers/vlynq/vlynq.c
··· 762 762 763 763 device_unregister(&dev->dev); 764 764 iounmap(dev->local); 765 - release_mem_region(dev->regs_start, dev->regs_end - dev->regs_start); 765 + release_mem_region(dev->regs_start, 766 + dev->regs_end - dev->regs_start + 1); 766 767 767 768 kfree(dev); 768 769
+22
drivers/w1/masters/w1-gpio.c
··· 18 18 #include <linux/of_gpio.h> 19 19 #include <linux/err.h> 20 20 #include <linux/of.h> 21 + #include <linux/delay.h> 21 22 22 23 #include "../w1.h" 23 24 #include "../w1_int.h" 25 + 26 + static u8 w1_gpio_set_pullup(void *data, int delay) 27 + { 28 + struct w1_gpio_platform_data *pdata = data; 29 + 30 + if (delay) { 31 + pdata->pullup_duration = delay; 32 + } else { 33 + if (pdata->pullup_duration) { 34 + gpio_direction_output(pdata->pin, 1); 35 + 36 + msleep(pdata->pullup_duration); 37 + 38 + gpio_direction_input(pdata->pin); 39 + } 40 + pdata->pullup_duration = 0; 41 + } 42 + 43 + return 0; 44 + } 24 45 25 46 static void w1_gpio_write_bit_dir(void *data, u8 bit) 26 47 { ··· 153 132 } else { 154 133 gpio_direction_input(pdata->pin); 155 134 master->write_bit = w1_gpio_write_bit_dir; 135 + master->set_pullup = w1_gpio_set_pullup; 156 136 } 157 137 158 138 err = w1_add_master_device(master);
-12
drivers/w1/w1_int.c
··· 117 117 printk(KERN_ERR "w1_add_master_device: invalid function set\n"); 118 118 return(-EINVAL); 119 119 } 120 - /* While it would be electrically possible to make a device that 121 - * generated a strong pullup in bit bang mode, only hardware that 122 - * controls 1-wire time frames are even expected to support a strong 123 - * pullup. w1_io.c would need to support calling set_pullup before 124 - * the last write_bit operation of a w1_write_8 which it currently 125 - * doesn't. 126 - */ 127 - if (!master->write_byte && !master->touch_bit && master->set_pullup) { 128 - printk(KERN_ERR "w1_add_master_device: set_pullup requires " 129 - "write_byte or touch_bit, disabling\n"); 130 - master->set_pullup = NULL; 131 - } 132 120 133 121 /* Lock until the device is added (or not) to w1_masters. */ 134 122 mutex_lock(&w1_mlock);
+2 -2
fs/autofs4/autofs_i.h
··· 104 104 u32 magic; 105 105 int pipefd; 106 106 struct file *pipe; 107 - pid_t oz_pgrp; 107 + struct pid *oz_pgrp; 108 108 int catatonic; 109 109 int version; 110 110 int sub_version; ··· 140 140 filesystem without "magic".) */ 141 141 142 142 static inline int autofs4_oz_mode(struct autofs_sb_info *sbi) { 143 - return sbi->catatonic || task_pgrp_nr(current) == sbi->oz_pgrp; 143 + return sbi->catatonic || task_pgrp(current) == sbi->oz_pgrp; 144 144 } 145 145 146 146 /* Does a dentry have some pending activity? */
+14 -2
fs/autofs4/dev-ioctl.c
··· 346 346 { 347 347 int pipefd; 348 348 int err = 0; 349 + struct pid *new_pid = NULL; 349 350 350 351 if (param->setpipefd.pipefd == -1) 351 352 return -EINVAL; ··· 358 357 mutex_unlock(&sbi->wq_mutex); 359 358 return -EBUSY; 360 359 } else { 361 - struct file *pipe = fget(pipefd); 360 + struct file *pipe; 361 + 362 + new_pid = get_task_pid(current, PIDTYPE_PGID); 363 + 364 + if (ns_of_pid(new_pid) != ns_of_pid(sbi->oz_pgrp)) { 365 + AUTOFS_WARN("Not allowed to change PID namespace"); 366 + err = -EINVAL; 367 + goto out; 368 + } 369 + 370 + pipe = fget(pipefd); 362 371 if (!pipe) { 363 372 err = -EBADF; 364 373 goto out; ··· 378 367 fput(pipe); 379 368 goto out; 380 369 } 381 - sbi->oz_pgrp = task_pgrp_nr(current); 370 + swap(sbi->oz_pgrp, new_pid); 382 371 sbi->pipefd = pipefd; 383 372 sbi->pipe = pipe; 384 373 sbi->catatonic = 0; 385 374 } 386 375 out: 376 + put_pid(new_pid); 387 377 mutex_unlock(&sbi->wq_mutex); 388 378 return err; 389 379 }
+14
fs/autofs4/expire.c
··· 402 402 goto next; 403 403 } 404 404 405 + if (dentry->d_inode && S_ISLNK(dentry->d_inode->i_mode)) { 406 + DPRINTK("checking symlink %p %.*s", 407 + dentry, (int)dentry->d_name.len, dentry->d_name.name); 408 + /* 409 + * A symlink can't be "busy" in the usual sense so 410 + * just check last used for expire timeout. 411 + */ 412 + if (autofs4_can_expire(dentry, timeout, do_now)) { 413 + expired = dentry; 414 + goto found; 415 + } 416 + goto next; 417 + } 418 + 405 419 if (simple_empty(dentry)) 406 420 goto next; 407 421
+35 -14
fs/autofs4/inode.c
··· 56 56 * just call kill_anon_super when we are called from 57 57 * deactivate_super. 58 58 */ 59 - if (sbi) /* Free wait queues, close pipe */ 59 + if (sbi) { 60 + /* Free wait queues, close pipe */ 60 61 autofs4_catatonic_mode(sbi); 62 + put_pid(sbi->oz_pgrp); 63 + } 61 64 62 65 DPRINTK("shutting down"); 63 66 kill_litter_super(sb); ··· 83 80 if (!gid_eq(root_inode->i_gid, GLOBAL_ROOT_GID)) 84 81 seq_printf(m, ",gid=%u", 85 82 from_kgid_munged(&init_user_ns, root_inode->i_gid)); 86 - seq_printf(m, ",pgrp=%d", sbi->oz_pgrp); 83 + seq_printf(m, ",pgrp=%d", pid_vnr(sbi->oz_pgrp)); 87 84 seq_printf(m, ",timeout=%lu", sbi->exp_timeout/HZ); 88 85 seq_printf(m, ",minproto=%d", sbi->min_proto); 89 86 seq_printf(m, ",maxproto=%d", sbi->max_proto); ··· 127 124 }; 128 125 129 126 static int parse_options(char *options, int *pipefd, kuid_t *uid, kgid_t *gid, 130 - pid_t *pgrp, unsigned int *type, int *minproto, int *maxproto) 127 + int *pgrp, bool *pgrp_set, unsigned int *type, 128 + int *minproto, int *maxproto) 131 129 { 132 130 char *p; 133 131 substring_t args[MAX_OPT_ARGS]; ··· 136 132 137 133 *uid = current_uid(); 138 134 *gid = current_gid(); 139 - *pgrp = task_pgrp_nr(current); 140 135 141 136 *minproto = AUTOFS_MIN_PROTO_VERSION; 142 137 *maxproto = AUTOFS_MAX_PROTO_VERSION; ··· 174 171 if (match_int(args, &option)) 175 172 return 1; 176 173 *pgrp = option; 174 + *pgrp_set = true; 177 175 break; 178 176 case Opt_minproto: 179 177 if (match_int(args, &option)) ··· 210 206 int pipefd; 211 207 struct autofs_sb_info *sbi; 212 208 struct autofs_info *ino; 209 + int pgrp; 210 + bool pgrp_set = false; 211 + int ret = -EINVAL; 213 212 214 213 sbi = kzalloc(sizeof(*sbi), GFP_KERNEL); 215 214 if (!sbi) 216 - goto fail_unlock; 215 + return -ENOMEM; 217 216 DPRINTK("starting up, sbi = %p",sbi); 218 217 219 218 s->s_fs_info = sbi; ··· 225 218 sbi->pipe = NULL; 226 219 sbi->catatonic = 1; 227 220 sbi->exp_timeout = 0; 228 - sbi->oz_pgrp = task_pgrp_nr(current); 221 + sbi->oz_pgrp = NULL; 229 222 sbi->sb = s; 230 223 sbi->version = 0; 231 224 sbi->sub_version = 0; ··· 250 243 * Get the root inode and dentry, but defer checking for errors. 251 244 */ 252 245 ino = autofs4_new_ino(sbi); 253 - if (!ino) 246 + if (!ino) { 247 + ret = -ENOMEM; 254 248 goto fail_free; 249 + } 255 250 root_inode = autofs4_get_inode(s, S_IFDIR | 0755); 256 251 root = d_make_root(root_inode); 257 252 if (!root) ··· 264 255 265 256 /* Can this call block? */ 266 257 if (parse_options(data, &pipefd, &root_inode->i_uid, &root_inode->i_gid, 267 - &sbi->oz_pgrp, &sbi->type, &sbi->min_proto, 268 - &sbi->max_proto)) { 258 + &pgrp, &pgrp_set, &sbi->type, &sbi->min_proto, 259 + &sbi->max_proto)) { 269 260 printk("autofs: called with bogus options\n"); 270 261 goto fail_dput; 262 + } 263 + 264 + if (pgrp_set) { 265 + sbi->oz_pgrp = find_get_pid(pgrp); 266 + if (!sbi->oz_pgrp) { 267 + pr_warn("autofs: could not find process group %d\n", 268 + pgrp); 269 + goto fail_dput; 270 + } 271 + } else { 272 + sbi->oz_pgrp = get_task_pid(current, PIDTYPE_PGID); 271 273 } 272 274 273 275 if (autofs_type_trigger(sbi->type)) ··· 304 284 sbi->version = sbi->max_proto; 305 285 sbi->sub_version = AUTOFS_PROTO_SUBVERSION; 306 286 307 - DPRINTK("pipe fd = %d, pgrp = %u", pipefd, sbi->oz_pgrp); 287 + DPRINTK("pipe fd = %d, pgrp = %u", pipefd, pid_nr(sbi->oz_pgrp)); 308 288 pipe = fget(pipefd); 309 - 289 + 310 290 if (!pipe) { 311 291 printk("autofs: could not open pipe file descriptor\n"); 312 292 goto fail_dput; 313 293 } 314 - if (autofs_prepare_pipe(pipe) < 0) 294 + ret = autofs_prepare_pipe(pipe); 295 + if (ret < 0) 315 296 goto fail_fput; 316 297 sbi->pipe = pipe; 317 298 sbi->pipefd = pipefd; ··· 337 316 fail_ino: 338 317 kfree(ino); 339 318 fail_free: 319 + put_pid(sbi->oz_pgrp); 340 320 kfree(sbi); 341 321 s->s_fs_info = NULL; 342 - fail_unlock: 343 - return -EINVAL; 322 + return ret; 344 323 } 345 324 346 325 struct inode *autofs4_get_inode(struct super_block *sb, umode_t mode)
+3 -3
fs/autofs4/root.c
··· 558 558 dget(dentry); 559 559 atomic_inc(&ino->count); 560 560 p_ino = autofs4_dentry_ino(dentry->d_parent); 561 - if (p_ino && dentry->d_parent != dentry) 561 + if (p_ino && !IS_ROOT(dentry)) 562 562 atomic_inc(&p_ino->count); 563 563 564 564 dir->i_mtime = CURRENT_TIME; ··· 593 593 594 594 if (atomic_dec_and_test(&ino->count)) { 595 595 p_ino = autofs4_dentry_ino(dentry->d_parent); 596 - if (p_ino && dentry->d_parent != dentry) 596 + if (p_ino && !IS_ROOT(dentry)) 597 597 atomic_dec(&p_ino->count); 598 598 } 599 599 dput(ino->dentry); ··· 732 732 dget(dentry); 733 733 atomic_inc(&ino->count); 734 734 p_ino = autofs4_dentry_ino(dentry->d_parent); 735 - if (p_ino && dentry->d_parent != dentry) 735 + if (p_ino && !IS_ROOT(dentry)) 736 736 atomic_inc(&p_ino->count); 737 737 inc_nlink(dir); 738 738 dir->i_mtime = CURRENT_TIME;
+4
fs/autofs4/symlink.c
··· 14 14 15 15 static void *autofs4_follow_link(struct dentry *dentry, struct nameidata *nd) 16 16 { 17 + struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb); 18 + struct autofs_info *ino = autofs4_dentry_ino(dentry); 19 + if (ino && !autofs4_oz_mode(sbi)) 20 + ino->last_used = jiffies; 17 21 nd_set_link(nd, dentry->d_inode->i_private); 18 22 return NULL; 19 23 }
+14 -2
fs/autofs4/waitq.c
··· 347 347 struct qstr qstr; 348 348 char *name; 349 349 int status, ret, type; 350 + pid_t pid; 351 + pid_t tgid; 350 352 351 353 /* In catatonic mode, we don't wait for nobody */ 352 354 if (sbi->catatonic) 355 + return -ENOENT; 356 + 357 + /* 358 + * Try translating pids to the namespace of the daemon. 359 + * 360 + * Zero means failure: we are in an unrelated pid namespace. 361 + */ 362 + pid = task_pid_nr_ns(current, ns_of_pid(sbi->oz_pgrp)); 363 + tgid = task_tgid_nr_ns(current, ns_of_pid(sbi->oz_pgrp)); 364 + if (pid == 0 || tgid == 0) 353 365 return -ENOENT; 354 366 355 367 if (!dentry->d_inode) { ··· 429 417 wq->ino = autofs4_get_ino(sbi); 430 418 wq->uid = current_uid(); 431 419 wq->gid = current_gid(); 432 - wq->pid = current->pid; 433 - wq->tgid = current->tgid; 420 + wq->pid = pid; 421 + wq->tgid = tgid; 434 422 wq->status = -EINTR; /* Status return if interrupted */ 435 423 wq->wait_ctr = 2; 436 424
-3
fs/binfmt_elf.c
··· 543 543 * libraries. There is no binary dependent code anywhere else. 544 544 */ 545 545 546 - #define INTERPRETER_NONE 0 547 - #define INTERPRETER_ELF 2 548 - 549 546 #ifndef STACK_RND_MASK 550 547 #define STACK_RND_MASK (0x7ff >> (PAGE_SHIFT - 12)) /* 8MB of VA */ 551 548 #endif
-1
fs/coredump.c
··· 40 40 41 41 #include <trace/events/task.h> 42 42 #include "internal.h" 43 - #include "coredump.h" 44 43 45 44 #include <trace/events/sched.h> 46 45
-6
fs/coredump.h
··· 1 - #ifndef _FS_COREDUMP_H 2 - #define _FS_COREDUMP_H 3 - 4 - extern int __get_dumpable(unsigned long mm_flags); 5 - 6 - #endif
+25 -95
fs/exec.c
··· 62 62 63 63 #include <trace/events/task.h> 64 64 #include "internal.h" 65 - #include "coredump.h" 66 65 67 66 #include <trace/events/sched.h> 68 67 ··· 842 843 tsk->active_mm = mm; 843 844 activate_mm(active_mm, mm); 844 845 task_unlock(tsk); 845 - arch_pick_mmap_layout(mm); 846 846 if (old_mm) { 847 847 up_read(&old_mm->mmap_sem); 848 848 BUG_ON(active_mm != old_mm); ··· 1086 1088 bprm->mm = NULL; /* We're using it now */ 1087 1089 1088 1090 set_fs(USER_DS); 1089 - current->flags &= 1090 - ~(PF_RANDOMIZE | PF_FORKNOEXEC | PF_KTHREAD | PF_NOFREEZE); 1091 + current->flags &= ~(PF_RANDOMIZE | PF_FORKNOEXEC | PF_KTHREAD | 1092 + PF_NOFREEZE | PF_NO_SETAFFINITY); 1091 1093 flush_thread(); 1092 1094 current->personality &= ~bprm->per_clear; 1093 1095 ··· 1137 1139 1138 1140 /* An exec changes our domain. We are no longer part of the thread 1139 1141 group */ 1140 - 1141 1142 current->self_exec_id++; 1142 - 1143 1143 flush_signal_handlers(current, 0); 1144 1144 do_close_on_exec(current->files); 1145 1145 } ··· 1168 1172 if (bprm->cred) { 1169 1173 mutex_unlock(&current->signal->cred_guard_mutex); 1170 1174 abort_creds(bprm->cred); 1175 + } 1176 + if (bprm->file) { 1177 + allow_write_access(bprm->file); 1178 + fput(bprm->file); 1171 1179 } 1172 1180 /* If a binfmt changed the interp, free it. */ 1173 1181 if (bprm->interp != bprm->filename) ··· 1224 1224 * - the caller must hold ->cred_guard_mutex to protect against 1225 1225 * PTRACE_ATTACH 1226 1226 */ 1227 - static int check_unsafe_exec(struct linux_binprm *bprm) 1227 + static void check_unsafe_exec(struct linux_binprm *bprm) 1228 1228 { 1229 1229 struct task_struct *p = current, *t; 1230 1230 unsigned n_fs; 1231 - int res = 0; 1232 1231 1233 1232 if (p->ptrace) { 1234 1233 if (p->ptrace & PT_PTRACE_CAP) ··· 1243 1244 if (current->no_new_privs) 1244 1245 bprm->unsafe |= LSM_UNSAFE_NO_NEW_PRIVS; 1245 1246 1247 + t = p; 1246 1248 n_fs = 1; 1247 1249 spin_lock(&p->fs->lock); 1248 1250 rcu_read_lock(); 1249 - for (t = next_thread(p); t != p; t = next_thread(t)) { 1251 + while_each_thread(p, t) { 1250 1252 if (t->fs == p->fs) 1251 1253 n_fs++; 1252 1254 } 1253 1255 rcu_read_unlock(); 1254 1256 1255 - if (p->fs->users > n_fs) { 1257 + if (p->fs->users > n_fs) 1256 1258 bprm->unsafe |= LSM_UNSAFE_SHARE; 1257 - } else { 1258 - res = -EAGAIN; 1259 - if (!p->fs->in_exec) { 1260 - p->fs->in_exec = 1; 1261 - res = 1; 1262 - } 1263 - } 1259 + else 1260 + p->fs->in_exec = 1; 1264 1261 spin_unlock(&p->fs->lock); 1265 - 1266 - return res; 1267 1262 } 1268 1263 1269 - /* 1270 - * Fill the binprm structure from the inode. 1264 + /* 1265 + * Fill the binprm structure from the inode. 1271 1266 * Check permissions, then read the first 128 (BINPRM_BUF_SIZE) bytes 1272 1267 * 1273 1268 * This may be called multiple times for binary chains (scripts for example). ··· 1423 1430 audit_bprm(bprm); 1424 1431 trace_sched_process_exec(current, old_pid, bprm); 1425 1432 ptrace_event(PTRACE_EVENT_EXEC, old_vpid); 1426 - current->did_exec = 1; 1427 1433 proc_exec_connector(current); 1428 - 1429 - if (bprm->file) { 1430 - allow_write_access(bprm->file); 1431 - fput(bprm->file); 1432 - bprm->file = NULL; /* to catch use-after-free */ 1433 - } 1434 1434 } 1435 1435 1436 1436 return ret; ··· 1439 1453 struct linux_binprm *bprm; 1440 1454 struct file *file; 1441 1455 struct files_struct *displaced; 1442 - bool clear_in_exec; 1443 1456 int retval; 1444 1457 1445 1458 /* ··· 1470 1485 if (retval) 1471 1486 goto out_free; 1472 1487 1473 - retval = check_unsafe_exec(bprm); 1474 - if (retval < 0) 1475 - goto out_free; 1476 - clear_in_exec = retval; 1488 + check_unsafe_exec(bprm); 1477 1489 current->in_execve = 1; 1478 1490 1479 1491 file = open_exec(filename); ··· 1486 1504 1487 1505 retval = bprm_mm_init(bprm); 1488 1506 if (retval) 1489 - goto out_file; 1507 + goto out_unmark; 1490 1508 1491 1509 bprm->argc = count(argv, MAX_ARG_STRINGS); 1492 1510 if ((retval = bprm->argc) < 0) ··· 1533 1551 mmput(bprm->mm); 1534 1552 } 1535 1553 1536 - out_file: 1537 - if (bprm->file) { 1538 - allow_write_access(bprm->file); 1539 - fput(bprm->file); 1540 - } 1541 - 1542 1554 out_unmark: 1543 - if (clear_in_exec) 1544 - current->fs->in_exec = 0; 1555 + current->fs->in_exec = 0; 1545 1556 current->in_execve = 0; 1546 1557 1547 1558 out_free: ··· 1584 1609 if (new) 1585 1610 __module_get(new->module); 1586 1611 } 1587 - 1588 1612 EXPORT_SYMBOL(set_binfmt); 1589 1613 1590 1614 /* 1591 - * set_dumpable converts traditional three-value dumpable to two flags and 1592 - * stores them into mm->flags. It modifies lower two bits of mm->flags, but 1593 - * these bits are not changed atomically. So get_dumpable can observe the 1594 - * intermediate state. To avoid doing unexpected behavior, get get_dumpable 1595 - * return either old dumpable or new one by paying attention to the order of 1596 - * modifying the bits. 1597 - * 1598 - * dumpable | mm->flags (binary) 1599 - * old new | initial interim final 1600 - * ---------+----------------------- 1601 - * 0 1 | 00 01 01 1602 - * 0 2 | 00 10(*) 11 1603 - * 1 0 | 01 00 00 1604 - * 1 2 | 01 11 11 1605 - * 2 0 | 11 10(*) 00 1606 - * 2 1 | 11 11 01 1607 - * 1608 - * (*) get_dumpable regards interim value of 10 as 11. 1615 + * set_dumpable stores three-value SUID_DUMP_* into mm->flags. 1609 1616 */ 1610 1617 void set_dumpable(struct mm_struct *mm, int value) 1611 1618 { 1612 - switch (value) { 1613 - case SUID_DUMP_DISABLE: 1614 - clear_bit(MMF_DUMPABLE, &mm->flags); 1615 - smp_wmb(); 1616 - clear_bit(MMF_DUMP_SECURELY, &mm->flags); 1617 - break; 1618 - case SUID_DUMP_USER: 1619 - set_bit(MMF_DUMPABLE, &mm->flags); 1620 - smp_wmb(); 1621 - clear_bit(MMF_DUMP_SECURELY, &mm->flags); 1622 - break; 1623 - case SUID_DUMP_ROOT: 1624 - set_bit(MMF_DUMP_SECURELY, &mm->flags); 1625 - smp_wmb(); 1626 - set_bit(MMF_DUMPABLE, &mm->flags); 1627 - break; 1628 - } 1629 - } 1619 + unsigned long old, new; 1630 1620 1631 - int __get_dumpable(unsigned long mm_flags) 1632 - { 1633 - int ret; 1621 + if (WARN_ON((unsigned)value > SUID_DUMP_ROOT)) 1622 + return; 1634 1623 1635 - ret = mm_flags & MMF_DUMPABLE_MASK; 1636 - return (ret > SUID_DUMP_USER) ? SUID_DUMP_ROOT : ret; 1637 - } 1638 - 1639 - /* 1640 - * This returns the actual value of the suid_dumpable flag. For things 1641 - * that are using this for checking for privilege transitions, it must 1642 - * test against SUID_DUMP_USER rather than treating it as a boolean 1643 - * value. 1644 - */ 1645 - int get_dumpable(struct mm_struct *mm) 1646 - { 1647 - return __get_dumpable(mm->flags); 1624 + do { 1625 + old = ACCESS_ONCE(mm->flags); 1626 + new = (old & ~MMF_DUMPABLE_MASK) | value; 1627 + } while (cmpxchg(&mm->flags, old, new) != old); 1648 1628 } 1649 1629 1650 1630 SYSCALL_DEFINE3(execve,
+8 -34
fs/ext3/dir.c
··· 309 309 */ 310 310 static void free_rb_tree_fname(struct rb_root *root) 311 311 { 312 - struct rb_node *n = root->rb_node; 313 - struct rb_node *parent; 314 - struct fname *fname; 312 + struct fname *fname, *next; 315 313 316 - while (n) { 317 - /* Do the node's children first */ 318 - if (n->rb_left) { 319 - n = n->rb_left; 320 - continue; 321 - } 322 - if (n->rb_right) { 323 - n = n->rb_right; 324 - continue; 325 - } 326 - /* 327 - * The node has no children; free it, and then zero 328 - * out parent's link to it. Finally go to the 329 - * beginning of the loop and try to free the parent 330 - * node. 331 - */ 332 - parent = rb_parent(n); 333 - fname = rb_entry(n, struct fname, rb_hash); 334 - while (fname) { 335 - struct fname * old = fname; 314 + rbtree_postorder_for_each_entry_safe(fname, next, root, rb_hash) 315 + do { 316 + struct fname *old = fname; 336 317 fname = fname->next; 337 - kfree (old); 338 - } 339 - if (!parent) 340 - *root = RB_ROOT; 341 - else if (parent->rb_left == n) 342 - parent->rb_left = NULL; 343 - else if (parent->rb_right == n) 344 - parent->rb_right = NULL; 345 - n = parent; 346 - } 347 - } 318 + kfree(old); 319 + } while (fname); 348 320 321 + *root = RB_ROOT; 322 + } 349 323 350 324 static struct dir_private_info *ext3_htree_create_dir_info(struct file *filp, 351 325 loff_t pos)
+4 -29
fs/ext4/block_validity.c
··· 180 180 /* Called when the filesystem is unmounted */ 181 181 void ext4_release_system_zone(struct super_block *sb) 182 182 { 183 - struct rb_node *n = EXT4_SB(sb)->system_blks.rb_node; 184 - struct rb_node *parent; 185 - struct ext4_system_zone *entry; 183 + struct ext4_system_zone *entry, *n; 186 184 187 - while (n) { 188 - /* Do the node's children first */ 189 - if (n->rb_left) { 190 - n = n->rb_left; 191 - continue; 192 - } 193 - if (n->rb_right) { 194 - n = n->rb_right; 195 - continue; 196 - } 197 - /* 198 - * The node has no children; free it, and then zero 199 - * out parent's link to it. Finally go to the 200 - * beginning of the loop and try to free the parent 201 - * node. 202 - */ 203 - parent = rb_parent(n); 204 - entry = rb_entry(n, struct ext4_system_zone, node); 185 + rbtree_postorder_for_each_entry_safe(entry, n, 186 + &EXT4_SB(sb)->system_blks, node) 205 187 kmem_cache_free(ext4_system_zone_cachep, entry); 206 - if (!parent) 207 - EXT4_SB(sb)->system_blks = RB_ROOT; 208 - else if (parent->rb_left == n) 209 - parent->rb_left = NULL; 210 - else if (parent->rb_right == n) 211 - parent->rb_right = NULL; 212 - n = parent; 213 - } 188 + 214 189 EXT4_SB(sb)->system_blks = RB_ROOT; 215 190 } 216 191
+4 -29
fs/ext4/dir.c
··· 353 353 */ 354 354 static void free_rb_tree_fname(struct rb_root *root) 355 355 { 356 - struct rb_node *n = root->rb_node; 357 - struct rb_node *parent; 358 - struct fname *fname; 356 + struct fname *fname, *next; 359 357 360 - while (n) { 361 - /* Do the node's children first */ 362 - if (n->rb_left) { 363 - n = n->rb_left; 364 - continue; 365 - } 366 - if (n->rb_right) { 367 - n = n->rb_right; 368 - continue; 369 - } 370 - /* 371 - * The node has no children; free it, and then zero 372 - * out parent's link to it. Finally go to the 373 - * beginning of the loop and try to free the parent 374 - * node. 375 - */ 376 - parent = rb_parent(n); 377 - fname = rb_entry(n, struct fname, rb_hash); 358 + rbtree_postorder_for_each_entry_safe(fname, next, root, rb_hash) 378 359 while (fname) { 379 360 struct fname *old = fname; 380 361 fname = fname->next; 381 362 kfree(old); 382 363 } 383 - if (!parent) 384 - *root = RB_ROOT; 385 - else if (parent->rb_left == n) 386 - parent->rb_left = NULL; 387 - else if (parent->rb_right == n) 388 - parent->rb_right = NULL; 389 - n = parent; 390 - } 364 + 365 + *root = RB_ROOT; 391 366 } 392 367 393 368
-59
fs/hfsplus/inode.c
··· 178 178 .d_compare = hfsplus_compare_dentry, 179 179 }; 180 180 181 - static struct dentry *hfsplus_file_lookup(struct inode *dir, 182 - struct dentry *dentry, unsigned int flags) 183 - { 184 - struct hfs_find_data fd; 185 - struct super_block *sb = dir->i_sb; 186 - struct inode *inode = NULL; 187 - struct hfsplus_inode_info *hip; 188 - int err; 189 - 190 - if (HFSPLUS_IS_RSRC(dir) || strcmp(dentry->d_name.name, "rsrc")) 191 - goto out; 192 - 193 - inode = HFSPLUS_I(dir)->rsrc_inode; 194 - if (inode) 195 - goto out; 196 - 197 - inode = new_inode(sb); 198 - if (!inode) 199 - return ERR_PTR(-ENOMEM); 200 - 201 - hip = HFSPLUS_I(inode); 202 - inode->i_ino = dir->i_ino; 203 - INIT_LIST_HEAD(&hip->open_dir_list); 204 - mutex_init(&hip->extents_lock); 205 - hip->extent_state = 0; 206 - hip->flags = 0; 207 - hip->userflags = 0; 208 - set_bit(HFSPLUS_I_RSRC, &hip->flags); 209 - 210 - err = hfs_find_init(HFSPLUS_SB(sb)->cat_tree, &fd); 211 - if (!err) { 212 - err = hfsplus_find_cat(sb, dir->i_ino, &fd); 213 - if (!err) 214 - err = hfsplus_cat_read_inode(inode, &fd); 215 - hfs_find_exit(&fd); 216 - } 217 - if (err) { 218 - iput(inode); 219 - return ERR_PTR(err); 220 - } 221 - hip->rsrc_inode = dir; 222 - HFSPLUS_I(dir)->rsrc_inode = inode; 223 - igrab(dir); 224 - 225 - /* 226 - * __mark_inode_dirty expects inodes to be hashed. Since we don't 227 - * want resource fork inodes in the regular inode space, we make them 228 - * appear hashed, but do not put on any lists. hlist_del() 229 - * will work fine and require no locking. 230 - */ 231 - hlist_add_fake(&inode->i_hash); 232 - 233 - mark_inode_dirty(inode); 234 - out: 235 - d_add(dentry, inode); 236 - return NULL; 237 - } 238 - 239 181 static void hfsplus_get_perms(struct inode *inode, 240 182 struct hfsplus_perm *perms, int dir) 241 183 { ··· 327 385 } 328 386 329 387 static const struct inode_operations hfsplus_file_inode_operations = { 330 - .lookup = hfsplus_file_lookup, 331 388 .setattr = hfsplus_setattr, 332 389 .setxattr = generic_setxattr, 333 390 .getxattr = generic_getxattr,
+2 -26
fs/jffs2/nodelist.c
··· 564 564 they're killed. */ 565 565 void jffs2_kill_fragtree(struct rb_root *root, struct jffs2_sb_info *c) 566 566 { 567 - struct jffs2_node_frag *frag; 568 - struct jffs2_node_frag *parent; 569 - 570 - if (!root->rb_node) 571 - return; 567 + struct jffs2_node_frag *frag, *next; 572 568 573 569 dbg_fragtree("killing\n"); 574 - 575 - frag = (rb_entry(root->rb_node, struct jffs2_node_frag, rb)); 576 - while(frag) { 577 - if (frag->rb.rb_left) { 578 - frag = frag_left(frag); 579 - continue; 580 - } 581 - if (frag->rb.rb_right) { 582 - frag = frag_right(frag); 583 - continue; 584 - } 585 - 570 + rbtree_postorder_for_each_entry_safe(frag, next, root, rb) { 586 571 if (frag->node && !(--frag->node->frags)) { 587 572 /* Not a hole, and it's the final remaining frag 588 573 of this node. Free the node */ ··· 576 591 577 592 jffs2_free_full_dnode(frag->node); 578 593 } 579 - parent = frag_parent(frag); 580 - if (parent) { 581 - if (frag_left(parent) == frag) 582 - parent->rb.rb_left = NULL; 583 - else 584 - parent->rb.rb_right = NULL; 585 - } 586 594 587 595 jffs2_free_node_frag(frag); 588 - frag = parent; 589 - 590 596 cond_resched(); 591 597 } 592 598 }
+3 -23
fs/jffs2/readinode.c
··· 543 543 544 544 static void jffs2_free_tmp_dnode_info_list(struct rb_root *list) 545 545 { 546 - struct rb_node *this; 547 - struct jffs2_tmp_dnode_info *tn; 546 + struct jffs2_tmp_dnode_info *tn, *next; 548 547 549 - this = list->rb_node; 550 - 551 - /* Now at bottom of tree */ 552 - while (this) { 553 - if (this->rb_left) 554 - this = this->rb_left; 555 - else if (this->rb_right) 556 - this = this->rb_right; 557 - else { 558 - tn = rb_entry(this, struct jffs2_tmp_dnode_info, rb); 548 + rbtree_postorder_for_each_entry_safe(tn, next, list, rb) { 559 549 jffs2_free_full_dnode(tn->fn); 560 550 jffs2_free_tmp_dnode_info(tn); 561 - 562 - this = rb_parent(this); 563 - if (!this) 564 - break; 565 - 566 - if (this->rb_left == &tn->rb) 567 - this->rb_left = NULL; 568 - else if (this->rb_right == &tn->rb) 569 - this->rb_right = NULL; 570 - else BUG(); 571 - } 572 551 } 552 + 573 553 *list = RB_ROOT; 574 554 } 575 555
+2 -1
fs/logfs/segment.c
··· 62 62 page = read_cache_page(mapping, index, filler, sb); 63 63 else { 64 64 page = find_or_create_page(mapping, index, GFP_NOFS); 65 - unlock_page(page); 65 + if (page) 66 + unlock_page(page); 66 67 } 67 68 return page; 68 69 }
+370 -1
fs/nilfs2/ioctl.c
··· 37 37 #include "sufile.h" 38 38 #include "dat.h" 39 39 40 - 40 + /** 41 + * nilfs_ioctl_wrap_copy - wrapping function of get/set metadata info 42 + * @nilfs: nilfs object 43 + * @argv: vector of arguments from userspace 44 + * @dir: set of direction flags 45 + * @dofunc: concrete function of get/set metadata info 46 + * 47 + * Description: nilfs_ioctl_wrap_copy() gets/sets metadata info by means of 48 + * calling dofunc() function on the basis of @argv argument. 49 + * 50 + * Return Value: On success, 0 is returned and requested metadata info 51 + * is copied into userspace. On error, one of the following 52 + * negative error codes is returned. 53 + * 54 + * %-EINVAL - Invalid arguments from userspace. 55 + * 56 + * %-ENOMEM - Insufficient amount of memory available. 57 + * 58 + * %-EFAULT - Failure during execution of requested operation. 59 + */ 41 60 static int nilfs_ioctl_wrap_copy(struct the_nilfs *nilfs, 42 61 struct nilfs_argv *argv, int dir, 43 62 ssize_t (*dofunc)(struct the_nilfs *, ··· 74 55 return 0; 75 56 76 57 if (argv->v_size > PAGE_SIZE) 58 + return -EINVAL; 59 + 60 + /* 61 + * Reject pairs of a start item position (argv->v_index) and a 62 + * total count (argv->v_nmembs) which leads position 'pos' to 63 + * overflow by the increment at the end of the loop. 64 + */ 65 + if (argv->v_index > ~(__u64)0 - argv->v_nmembs) 77 66 return -EINVAL; 78 67 79 68 buf = (void *)__get_free_pages(GFP_NOFS, 0); ··· 126 99 return ret; 127 100 } 128 101 102 + /** 103 + * nilfs_ioctl_getflags - ioctl to support lsattr 104 + */ 129 105 static int nilfs_ioctl_getflags(struct inode *inode, void __user *argp) 130 106 { 131 107 unsigned int flags = NILFS_I(inode)->i_flags & FS_FL_USER_VISIBLE; ··· 136 106 return put_user(flags, (int __user *)argp); 137 107 } 138 108 109 + /** 110 + * nilfs_ioctl_setflags - ioctl to support chattr 111 + */ 139 112 static int nilfs_ioctl_setflags(struct inode *inode, struct file *filp, 140 113 void __user *argp) 141 114 { ··· 191 158 return ret; 192 159 } 193 160 161 + /** 162 + * nilfs_ioctl_getversion - get info about a file's version (generation number) 163 + */ 194 164 static int nilfs_ioctl_getversion(struct inode *inode, void __user *argp) 195 165 { 196 166 return put_user(inode->i_generation, (int __user *)argp); 197 167 } 198 168 169 + /** 170 + * nilfs_ioctl_change_cpmode - change checkpoint mode (checkpoint/snapshot) 171 + * @inode: inode object 172 + * @filp: file object 173 + * @cmd: ioctl's request code 174 + * @argp: pointer on argument from userspace 175 + * 176 + * Description: nilfs_ioctl_change_cpmode() function changes mode of 177 + * given checkpoint between checkpoint and snapshot state. This ioctl 178 + * is used in chcp and mkcp utilities. 179 + * 180 + * Return Value: On success, 0 is returned and mode of a checkpoint is 181 + * changed. On error, one of the following negative error codes 182 + * is returned. 183 + * 184 + * %-EPERM - Operation not permitted. 185 + * 186 + * %-EFAULT - Failure during checkpoint mode changing. 187 + */ 199 188 static int nilfs_ioctl_change_cpmode(struct inode *inode, struct file *filp, 200 189 unsigned int cmd, void __user *argp) 201 190 { ··· 253 198 return ret; 254 199 } 255 200 201 + /** 202 + * nilfs_ioctl_delete_checkpoint - remove checkpoint 203 + * @inode: inode object 204 + * @filp: file object 205 + * @cmd: ioctl's request code 206 + * @argp: pointer on argument from userspace 207 + * 208 + * Description: nilfs_ioctl_delete_checkpoint() function removes 209 + * checkpoint from NILFS2 file system. This ioctl is used in rmcp 210 + * utility. 211 + * 212 + * Return Value: On success, 0 is returned and a checkpoint is 213 + * removed. On error, one of the following negative error codes 214 + * is returned. 215 + * 216 + * %-EPERM - Operation not permitted. 217 + * 218 + * %-EFAULT - Failure during checkpoint removing. 219 + */ 256 220 static int 257 221 nilfs_ioctl_delete_checkpoint(struct inode *inode, struct file *filp, 258 222 unsigned int cmd, void __user *argp) ··· 303 229 return ret; 304 230 } 305 231 232 + /** 233 + * nilfs_ioctl_do_get_cpinfo - callback method getting info about checkpoints 234 + * @nilfs: nilfs object 235 + * @posp: pointer on array of checkpoint's numbers 236 + * @flags: checkpoint mode (checkpoint or snapshot) 237 + * @buf: buffer for storing checkponts' info 238 + * @size: size in bytes of one checkpoint info item in array 239 + * @nmembs: number of checkpoints in array (numbers and infos) 240 + * 241 + * Description: nilfs_ioctl_do_get_cpinfo() function returns info about 242 + * requested checkpoints. The NILFS_IOCTL_GET_CPINFO ioctl is used in 243 + * lscp utility and by nilfs_cleanerd daemon. 244 + * 245 + * Return value: count of nilfs_cpinfo structures in output buffer. 246 + */ 306 247 static ssize_t 307 248 nilfs_ioctl_do_get_cpinfo(struct the_nilfs *nilfs, __u64 *posp, int flags, 308 249 void *buf, size_t size, size_t nmembs) ··· 331 242 return ret; 332 243 } 333 244 245 + /** 246 + * nilfs_ioctl_get_cpstat - get checkpoints statistics 247 + * @inode: inode object 248 + * @filp: file object 249 + * @cmd: ioctl's request code 250 + * @argp: pointer on argument from userspace 251 + * 252 + * Description: nilfs_ioctl_get_cpstat() returns information about checkpoints. 253 + * The NILFS_IOCTL_GET_CPSTAT ioctl is used by lscp, rmcp utilities 254 + * and by nilfs_cleanerd daemon. 255 + * 256 + * Return Value: On success, 0 is returned, and checkpoints information is 257 + * copied into userspace pointer @argp. On error, one of the following 258 + * negative error codes is returned. 259 + * 260 + * %-EIO - I/O error. 261 + * 262 + * %-ENOMEM - Insufficient amount of memory available. 263 + * 264 + * %-EFAULT - Failure during getting checkpoints statistics. 265 + */ 334 266 static int nilfs_ioctl_get_cpstat(struct inode *inode, struct file *filp, 335 267 unsigned int cmd, void __user *argp) 336 268 { ··· 370 260 return ret; 371 261 } 372 262 263 + /** 264 + * nilfs_ioctl_do_get_suinfo - callback method getting segment usage info 265 + * @nilfs: nilfs object 266 + * @posp: pointer on array of segment numbers 267 + * @flags: *not used* 268 + * @buf: buffer for storing suinfo array 269 + * @size: size in bytes of one suinfo item in array 270 + * @nmembs: count of segment numbers and suinfos in array 271 + * 272 + * Description: nilfs_ioctl_do_get_suinfo() function returns segment usage 273 + * info about requested segments. The NILFS_IOCTL_GET_SUINFO ioctl is used 274 + * in lssu, nilfs_resize utilities and by nilfs_cleanerd daemon. 275 + * 276 + * Return value: count of nilfs_suinfo structures in output buffer. 277 + */ 373 278 static ssize_t 374 279 nilfs_ioctl_do_get_suinfo(struct the_nilfs *nilfs, __u64 *posp, int flags, 375 280 void *buf, size_t size, size_t nmembs) ··· 398 273 return ret; 399 274 } 400 275 276 + /** 277 + * nilfs_ioctl_get_sustat - get segment usage statistics 278 + * @inode: inode object 279 + * @filp: file object 280 + * @cmd: ioctl's request code 281 + * @argp: pointer on argument from userspace 282 + * 283 + * Description: nilfs_ioctl_get_sustat() returns segment usage statistics. 284 + * The NILFS_IOCTL_GET_SUSTAT ioctl is used in lssu, nilfs_resize utilities 285 + * and by nilfs_cleanerd daemon. 286 + * 287 + * Return Value: On success, 0 is returned, and segment usage information is 288 + * copied into userspace pointer @argp. On error, one of the following 289 + * negative error codes is returned. 290 + * 291 + * %-EIO - I/O error. 292 + * 293 + * %-ENOMEM - Insufficient amount of memory available. 294 + * 295 + * %-EFAULT - Failure during getting segment usage statistics. 296 + */ 401 297 static int nilfs_ioctl_get_sustat(struct inode *inode, struct file *filp, 402 298 unsigned int cmd, void __user *argp) 403 299 { ··· 437 291 return ret; 438 292 } 439 293 294 + /** 295 + * nilfs_ioctl_do_get_vinfo - callback method getting virtual blocks info 296 + * @nilfs: nilfs object 297 + * @posp: *not used* 298 + * @flags: *not used* 299 + * @buf: buffer for storing array of nilfs_vinfo structures 300 + * @size: size in bytes of one vinfo item in array 301 + * @nmembs: count of vinfos in array 302 + * 303 + * Description: nilfs_ioctl_do_get_vinfo() function returns information 304 + * on virtual block addresses. The NILFS_IOCTL_GET_VINFO ioctl is used 305 + * by nilfs_cleanerd daemon. 306 + * 307 + * Return value: count of nilfs_vinfo structures in output buffer. 308 + */ 440 309 static ssize_t 441 310 nilfs_ioctl_do_get_vinfo(struct the_nilfs *nilfs, __u64 *posp, int flags, 442 311 void *buf, size_t size, size_t nmembs) ··· 464 303 return ret; 465 304 } 466 305 306 + /** 307 + * nilfs_ioctl_do_get_bdescs - callback method getting disk block descriptors 308 + * @nilfs: nilfs object 309 + * @posp: *not used* 310 + * @flags: *not used* 311 + * @buf: buffer for storing array of nilfs_bdesc structures 312 + * @size: size in bytes of one bdesc item in array 313 + * @nmembs: count of bdescs in array 314 + * 315 + * Description: nilfs_ioctl_do_get_bdescs() function returns information 316 + * about descriptors of disk block numbers. The NILFS_IOCTL_GET_BDESCS ioctl 317 + * is used by nilfs_cleanerd daemon. 318 + * 319 + * Return value: count of nilfs_bdescs structures in output buffer. 320 + */ 467 321 static ssize_t 468 322 nilfs_ioctl_do_get_bdescs(struct the_nilfs *nilfs, __u64 *posp, int flags, 469 323 void *buf, size_t size, size_t nmembs) ··· 505 329 return nmembs; 506 330 } 507 331 332 + /** 333 + * nilfs_ioctl_get_bdescs - get disk block descriptors 334 + * @inode: inode object 335 + * @filp: file object 336 + * @cmd: ioctl's request code 337 + * @argp: pointer on argument from userspace 338 + * 339 + * Description: nilfs_ioctl_do_get_bdescs() function returns information 340 + * about descriptors of disk block numbers. The NILFS_IOCTL_GET_BDESCS ioctl 341 + * is used by nilfs_cleanerd daemon. 342 + * 343 + * Return Value: On success, 0 is returned, and disk block descriptors are 344 + * copied into userspace pointer @argp. On error, one of the following 345 + * negative error codes is returned. 346 + * 347 + * %-EINVAL - Invalid arguments from userspace. 348 + * 349 + * %-EIO - I/O error. 350 + * 351 + * %-ENOMEM - Insufficient amount of memory available. 352 + * 353 + * %-EFAULT - Failure during getting disk block descriptors. 354 + */ 508 355 static int nilfs_ioctl_get_bdescs(struct inode *inode, struct file *filp, 509 356 unsigned int cmd, void __user *argp) 510 357 { ··· 551 352 return ret; 552 353 } 553 354 355 + /** 356 + * nilfs_ioctl_move_inode_block - prepare data/node block for moving by GC 357 + * @inode: inode object 358 + * @vdesc: descriptor of virtual block number 359 + * @buffers: list of moving buffers 360 + * 361 + * Description: nilfs_ioctl_move_inode_block() function registers data/node 362 + * buffer in the GC pagecache and submit read request. 363 + * 364 + * Return Value: On success, 0 is returned. On error, one of the following 365 + * negative error codes is returned. 366 + * 367 + * %-EIO - I/O error. 368 + * 369 + * %-ENOMEM - Insufficient amount of memory available. 370 + * 371 + * %-ENOENT - Requested block doesn't exist. 372 + * 373 + * %-EEXIST - Blocks conflict is detected. 374 + */ 554 375 static int nilfs_ioctl_move_inode_block(struct inode *inode, 555 376 struct nilfs_vdesc *vdesc, 556 377 struct list_head *buffers) ··· 616 397 return 0; 617 398 } 618 399 400 + /** 401 + * nilfs_ioctl_move_blocks - move valid inode's blocks during garbage collection 402 + * @sb: superblock object 403 + * @argv: vector of arguments from userspace 404 + * @buf: array of nilfs_vdesc structures 405 + * 406 + * Description: nilfs_ioctl_move_blocks() function reads valid data/node 407 + * blocks that garbage collector specified with the array of nilfs_vdesc 408 + * structures and stores them into page caches of GC inodes. 409 + * 410 + * Return Value: Number of processed nilfs_vdesc structures or 411 + * error code, otherwise. 412 + */ 619 413 static int nilfs_ioctl_move_blocks(struct super_block *sb, 620 414 struct nilfs_argv *argv, void *buf) 621 415 { ··· 694 462 return ret; 695 463 } 696 464 465 + /** 466 + * nilfs_ioctl_delete_checkpoints - delete checkpoints 467 + * @nilfs: nilfs object 468 + * @argv: vector of arguments from userspace 469 + * @buf: array of periods of checkpoints numbers 470 + * 471 + * Description: nilfs_ioctl_delete_checkpoints() function deletes checkpoints 472 + * in the period from p_start to p_end, excluding p_end itself. The checkpoints 473 + * which have been already deleted are ignored. 474 + * 475 + * Return Value: Number of processed nilfs_period structures or 476 + * error code, otherwise. 477 + * 478 + * %-EIO - I/O error. 479 + * 480 + * %-ENOMEM - Insufficient amount of memory available. 481 + * 482 + * %-EINVAL - invalid checkpoints. 483 + */ 697 484 static int nilfs_ioctl_delete_checkpoints(struct the_nilfs *nilfs, 698 485 struct nilfs_argv *argv, void *buf) 699 486 { ··· 730 479 return nmembs; 731 480 } 732 481 482 + /** 483 + * nilfs_ioctl_free_vblocknrs - free virtual block numbers 484 + * @nilfs: nilfs object 485 + * @argv: vector of arguments from userspace 486 + * @buf: array of virtual block numbers 487 + * 488 + * Description: nilfs_ioctl_free_vblocknrs() function frees 489 + * the virtual block numbers specified by @buf and @argv->v_nmembs. 490 + * 491 + * Return Value: Number of processed virtual block numbers or 492 + * error code, otherwise. 493 + * 494 + * %-EIO - I/O error. 495 + * 496 + * %-ENOMEM - Insufficient amount of memory available. 497 + * 498 + * %-ENOENT - The virtual block number have not been allocated. 499 + */ 733 500 static int nilfs_ioctl_free_vblocknrs(struct the_nilfs *nilfs, 734 501 struct nilfs_argv *argv, void *buf) 735 502 { ··· 759 490 return (ret < 0) ? ret : nmembs; 760 491 } 761 492 493 + /** 494 + * nilfs_ioctl_mark_blocks_dirty - mark blocks dirty 495 + * @nilfs: nilfs object 496 + * @argv: vector of arguments from userspace 497 + * @buf: array of block descriptors 498 + * 499 + * Description: nilfs_ioctl_mark_blocks_dirty() function marks 500 + * metadata file or data blocks as dirty. 501 + * 502 + * Return Value: Number of processed block descriptors or 503 + * error code, otherwise. 504 + * 505 + * %-ENOMEM - Insufficient memory available. 506 + * 507 + * %-EIO - I/O error 508 + * 509 + * %-ENOENT - the specified block does not exist (hole block) 510 + */ 762 511 static int nilfs_ioctl_mark_blocks_dirty(struct the_nilfs *nilfs, 763 512 struct nilfs_argv *argv, void *buf) 764 513 { ··· 858 571 return ret; 859 572 } 860 573 574 + /** 575 + * nilfs_ioctl_clean_segments - clean segments 576 + * @inode: inode object 577 + * @filp: file object 578 + * @cmd: ioctl's request code 579 + * @argp: pointer on argument from userspace 580 + * 581 + * Description: nilfs_ioctl_clean_segments() function makes garbage 582 + * collection operation in the environment of requested parameters 583 + * from userspace. The NILFS_IOCTL_CLEAN_SEGMENTS ioctl is used by 584 + * nilfs_cleanerd daemon. 585 + * 586 + * Return Value: On success, 0 is returned or error code, otherwise. 587 + */ 861 588 static int nilfs_ioctl_clean_segments(struct inode *inode, struct file *filp, 862 589 unsigned int cmd, void __user *argp) 863 590 { ··· 983 682 return ret; 984 683 } 985 684 685 + /** 686 + * nilfs_ioctl_sync - make a checkpoint 687 + * @inode: inode object 688 + * @filp: file object 689 + * @cmd: ioctl's request code 690 + * @argp: pointer on argument from userspace 691 + * 692 + * Description: nilfs_ioctl_sync() function constructs a logical segment 693 + * for checkpointing. This function guarantees that all modified data 694 + * and metadata are written out to the device when it successfully 695 + * returned. 696 + * 697 + * Return Value: On success, 0 is retured. On errors, one of the following 698 + * negative error code is returned. 699 + * 700 + * %-EROFS - Read only filesystem. 701 + * 702 + * %-EIO - I/O error 703 + * 704 + * %-ENOSPC - No space left on device (only in a panic state). 705 + * 706 + * %-ERESTARTSYS - Interrupted. 707 + * 708 + * %-ENOMEM - Insufficient memory available. 709 + * 710 + * %-EFAULT - Failure during execution of requested operation. 711 + */ 986 712 static int nilfs_ioctl_sync(struct inode *inode, struct file *filp, 987 713 unsigned int cmd, void __user *argp) 988 714 { ··· 1038 710 return 0; 1039 711 } 1040 712 713 + /** 714 + * nilfs_ioctl_resize - resize NILFS2 volume 715 + * @inode: inode object 716 + * @filp: file object 717 + * @argp: pointer on argument from userspace 718 + * 719 + * Return Value: On success, 0 is returned or error code, otherwise. 720 + */ 1041 721 static int nilfs_ioctl_resize(struct inode *inode, struct file *filp, 1042 722 void __user *argp) 1043 723 { ··· 1071 735 return ret; 1072 736 } 1073 737 738 + /** 739 + * nilfs_ioctl_set_alloc_range - limit range of segments to be allocated 740 + * @inode: inode object 741 + * @argp: pointer on argument from userspace 742 + * 743 + * Decription: nilfs_ioctl_set_alloc_range() function defines lower limit 744 + * of segments in bytes and upper limit of segments in bytes. 745 + * The NILFS_IOCTL_SET_ALLOC_RANGE is used by nilfs_resize utility. 746 + * 747 + * Return Value: On success, 0 is returned or error code, otherwise. 748 + */ 1074 749 static int nilfs_ioctl_set_alloc_range(struct inode *inode, void __user *argp) 1075 750 { 1076 751 struct the_nilfs *nilfs = inode->i_sb->s_fs_info; ··· 1114 767 return ret; 1115 768 } 1116 769 770 + /** 771 + * nilfs_ioctl_get_info - wrapping function of get metadata info 772 + * @inode: inode object 773 + * @filp: file object 774 + * @cmd: ioctl's request code 775 + * @argp: pointer on argument from userspace 776 + * @membsz: size of an item in bytes 777 + * @dofunc: concrete function of getting metadata info 778 + * 779 + * Description: nilfs_ioctl_get_info() gets metadata info by means of 780 + * calling dofunc() function. 781 + * 782 + * Return Value: On success, 0 is returned and requested metadata info 783 + * is copied into userspace. On error, one of the following 784 + * negative error codes is returned. 785 + * 786 + * %-EINVAL - Invalid arguments from userspace. 787 + * 788 + * %-ENOMEM - Insufficient amount of memory available. 789 + * 790 + * %-EFAULT - Failure during execution of requested operation. 791 + */ 1117 792 static int nilfs_ioctl_get_info(struct inode *inode, struct file *filp, 1118 793 unsigned int cmd, void __user *argp, 1119 794 size_t membsz,
+2 -1
fs/pipe.c
··· 663 663 wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLRDNORM); 664 664 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN); 665 665 } 666 - if (ret > 0) { 666 + if (ret > 0 && sb_start_write_trylock(file_inode(filp)->i_sb)) { 667 667 int err = file_update_time(filp); 668 668 if (err) 669 669 ret = err; 670 + sb_end_write(file_inode(filp)->i_sb); 670 671 } 671 672 return ret; 672 673 }
-10
fs/posix_acl.c
··· 149 149 { 150 150 const struct posix_acl_entry *pa, *pe; 151 151 int state = ACL_USER_OBJ; 152 - kuid_t prev_uid = INVALID_UID; 153 - kgid_t prev_gid = INVALID_GID; 154 152 int needs_mask = 0; 155 153 156 154 FOREACH_ACL_ENTRY(pa, acl, pe) { ··· 167 169 return -EINVAL; 168 170 if (!uid_valid(pa->e_uid)) 169 171 return -EINVAL; 170 - if (uid_valid(prev_uid) && 171 - uid_lte(pa->e_uid, prev_uid)) 172 - return -EINVAL; 173 - prev_uid = pa->e_uid; 174 172 needs_mask = 1; 175 173 break; 176 174 ··· 182 188 return -EINVAL; 183 189 if (!gid_valid(pa->e_gid)) 184 190 return -EINVAL; 185 - if (gid_valid(prev_gid) && 186 - gid_lte(pa->e_gid, prev_gid)) 187 - return -EINVAL; 188 - prev_gid = pa->e_gid; 189 191 needs_mask = 1; 190 192 break; 191 193
+4 -14
fs/proc/array.c
··· 140 140 "t (tracing stop)", /* 8 */ 141 141 "Z (zombie)", /* 16 */ 142 142 "X (dead)", /* 32 */ 143 - "x (dead)", /* 64 */ 144 - "K (wakekill)", /* 128 */ 145 - "W (waking)", /* 256 */ 146 - "P (parked)", /* 512 */ 147 143 }; 148 144 149 145 static inline const char *get_task_state(struct task_struct *tsk) 150 146 { 151 - unsigned int state = (tsk->state & TASK_REPORT) | tsk->exit_state; 152 - const char * const *p = &task_state_array[0]; 147 + unsigned int state = (tsk->state | tsk->exit_state) & TASK_REPORT; 153 148 154 - BUILD_BUG_ON(1 + ilog2(TASK_STATE_MAX) != ARRAY_SIZE(task_state_array)); 149 + BUILD_BUG_ON(1 + ilog2(TASK_REPORT) != ARRAY_SIZE(task_state_array)-1); 155 150 156 - while (state) { 157 - p++; 158 - state >>= 1; 159 - } 160 - return *p; 151 + return task_state_array[fls(state)]; 161 152 } 162 153 163 154 static inline void task_state(struct seq_file *m, struct pid_namespace *ns, ··· 444 453 min_flt += t->min_flt; 445 454 maj_flt += t->maj_flt; 446 455 gtime += task_gtime(t); 447 - t = next_thread(t); 448 - } while (t != task); 456 + } while_each_thread(task, t); 449 457 450 458 min_flt += sig->min_flt; 451 459 maj_flt += sig->maj_flt;
+36 -33
fs/proc/base.c
··· 1658 1658 return 0; 1659 1659 } 1660 1660 1661 + static inline bool proc_inode_is_dead(struct inode *inode) 1662 + { 1663 + return !proc_pid(inode)->tasks[PIDTYPE_PID].first; 1664 + } 1665 + 1661 1666 int pid_delete_dentry(const struct dentry *dentry) 1662 1667 { 1663 1668 /* Is the task we represent dead? 1664 1669 * If so, then don't put the dentry on the lru list, 1665 1670 * kill it immediately. 1666 1671 */ 1667 - return !proc_pid(dentry->d_inode)->tasks[PIDTYPE_PID].first; 1672 + return proc_inode_is_dead(dentry->d_inode); 1668 1673 } 1669 1674 1670 1675 const struct dentry_operations pid_dentry_operations = ··· 3097 3092 * In the case of a seek we start with the leader and walk nr 3098 3093 * threads past it. 3099 3094 */ 3100 - static struct task_struct *first_tid(struct task_struct *leader, 3101 - int tid, int nr, struct pid_namespace *ns) 3095 + static struct task_struct *first_tid(struct pid *pid, int tid, loff_t f_pos, 3096 + struct pid_namespace *ns) 3102 3097 { 3103 - struct task_struct *pos; 3098 + struct task_struct *pos, *task; 3099 + unsigned long nr = f_pos; 3100 + 3101 + if (nr != f_pos) /* 32bit overflow? */ 3102 + return NULL; 3104 3103 3105 3104 rcu_read_lock(); 3106 - /* Attempt to start with the pid of a thread */ 3107 - if (tid && (nr > 0)) { 3105 + task = pid_task(pid, PIDTYPE_PID); 3106 + if (!task) 3107 + goto fail; 3108 + 3109 + /* Attempt to start with the tid of a thread */ 3110 + if (tid && nr) { 3108 3111 pos = find_task_by_pid_ns(tid, ns); 3109 - if (pos && (pos->group_leader == leader)) 3112 + if (pos && same_thread_group(pos, task)) 3110 3113 goto found; 3111 3114 } 3112 3115 3113 3116 /* If nr exceeds the number of threads there is nothing todo */ 3114 - pos = NULL; 3115 - if (nr && nr >= get_nr_threads(leader)) 3116 - goto out; 3117 + if (nr >= get_nr_threads(task)) 3118 + goto fail; 3117 3119 3118 3120 /* If we haven't found our starting place yet start 3119 3121 * with the leader and walk nr threads forward. 3120 3122 */ 3121 - for (pos = leader; nr > 0; --nr) { 3122 - pos = next_thread(pos); 3123 - if (pos == leader) { 3124 - pos = NULL; 3125 - goto out; 3126 - } 3127 - } 3123 + pos = task = task->group_leader; 3124 + do { 3125 + if (!nr--) 3126 + goto found; 3127 + } while_each_thread(task, pos); 3128 + fail: 3129 + pos = NULL; 3130 + goto out; 3128 3131 found: 3129 3132 get_task_struct(pos); 3130 3133 out: ··· 3165 3152 /* for the /proc/TGID/task/ directories */ 3166 3153 static int proc_task_readdir(struct file *file, struct dir_context *ctx) 3167 3154 { 3168 - struct task_struct *leader = NULL; 3169 - struct task_struct *task = get_proc_task(file_inode(file)); 3155 + struct inode *inode = file_inode(file); 3156 + struct task_struct *task; 3170 3157 struct pid_namespace *ns; 3171 3158 int tid; 3172 3159 3173 - if (!task) 3174 - return -ENOENT; 3175 - rcu_read_lock(); 3176 - if (pid_alive(task)) { 3177 - leader = task->group_leader; 3178 - get_task_struct(leader); 3179 - } 3180 - rcu_read_unlock(); 3181 - put_task_struct(task); 3182 - if (!leader) 3160 + if (proc_inode_is_dead(inode)) 3183 3161 return -ENOENT; 3184 3162 3185 3163 if (!dir_emit_dots(file, ctx)) 3186 - goto out; 3164 + return 0; 3187 3165 3188 3166 /* f_version caches the tgid value that the last readdir call couldn't 3189 3167 * return. lseek aka telldir automagically resets f_version to 0. ··· 3182 3178 ns = file->f_dentry->d_sb->s_fs_info; 3183 3179 tid = (int)file->f_version; 3184 3180 file->f_version = 0; 3185 - for (task = first_tid(leader, tid, ctx->pos - 2, ns); 3181 + for (task = first_tid(proc_pid(inode), tid, ctx->pos - 2, ns); 3186 3182 task; 3187 3183 task = next_tid(task), ctx->pos++) { 3188 3184 char name[PROC_NUMBUF]; ··· 3198 3194 break; 3199 3195 } 3200 3196 } 3201 - out: 3202 - put_task_struct(leader); 3197 + 3203 3198 return 0; 3204 3199 } 3205 3200
+1 -1
fs/proc/cmdline.c
··· 26 26 proc_create("cmdline", 0, NULL, &cmdline_proc_fops); 27 27 return 0; 28 28 } 29 - module_init(proc_cmdline_init); 29 + fs_initcall(proc_cmdline_init);
+1 -1
fs/proc/consoles.c
··· 109 109 proc_create("consoles", 0, NULL, &proc_consoles_operations); 110 110 return 0; 111 111 } 112 - module_init(proc_consoles_init); 112 + fs_initcall(proc_consoles_init);
+1 -1
fs/proc/cpuinfo.c
··· 21 21 proc_create("cpuinfo", 0, NULL, &proc_cpuinfo_operations); 22 22 return 0; 23 23 } 24 - module_init(proc_cpuinfo_init); 24 + fs_initcall(proc_cpuinfo_init);
+1 -1
fs/proc/devices.c
··· 67 67 proc_create("devices", 0, NULL, &proc_devinfo_operations); 68 68 return 0; 69 69 } 70 - module_init(proc_devices_init); 70 + fs_initcall(proc_devices_init);
+1 -2
fs/proc/generic.c
··· 49 49 setattr_copy(inode, iattr); 50 50 mark_inode_dirty(inode); 51 51 52 - de->uid = inode->i_uid; 53 - de->gid = inode->i_gid; 52 + proc_set_user(de, inode->i_uid, inode->i_gid); 54 53 de->mode = inode->i_mode; 55 54 return 0; 56 55 }
+1 -1
fs/proc/interrupts.c
··· 50 50 proc_create("interrupts", 0, NULL, &proc_interrupts_operations); 51 51 return 0; 52 52 } 53 - module_init(proc_interrupts_init); 53 + fs_initcall(proc_interrupts_init);
+1 -1
fs/proc/kcore.c
··· 639 639 640 640 return 0; 641 641 } 642 - module_init(proc_kcore_init); 642 + fs_initcall(proc_kcore_init);
+1 -1
fs/proc/kmsg.c
··· 61 61 proc_create("kmsg", S_IRUSR, NULL, &proc_kmsg_operations); 62 62 return 0; 63 63 } 64 - module_init(proc_kmsg_init); 64 + fs_initcall(proc_kmsg_init);
+1 -1
fs/proc/loadavg.c
··· 42 42 proc_create("loadavg", 0, NULL, &loadavg_proc_fops); 43 43 return 0; 44 44 } 45 - module_init(proc_loadavg_init); 45 + fs_initcall(proc_loadavg_init);
+1 -1
fs/proc/meminfo.c
··· 220 220 proc_create("meminfo", 0, NULL, &meminfo_proc_fops); 221 221 return 0; 222 222 } 223 - module_init(proc_meminfo_init); 223 + fs_initcall(proc_meminfo_init);
+1 -1
fs/proc/nommu.c
··· 131 131 return 0; 132 132 } 133 133 134 - module_init(proc_nommu_init); 134 + fs_initcall(proc_nommu_init);
+6 -4
fs/proc/page.c
··· 118 118 /* 119 119 * PageTransCompound can be true for non-huge compound pages (slab 120 120 * pages or pages allocated by drivers with __GFP_COMP) because it 121 - * just checks PG_head/PG_tail, so we need to check PageLRU to make 122 - * sure a given page is a thp, not a non-huge compound page. 121 + * just checks PG_head/PG_tail, so we need to check PageLRU/PageAnon 122 + * to make sure a given page is a thp, not a non-huge compound page. 123 123 */ 124 - else if (PageTransCompound(page) && PageLRU(compound_trans_head(page))) 124 + else if (PageTransCompound(page) && 125 + (PageLRU(compound_trans_head(page)) || 126 + PageAnon(compound_trans_head(page)))) 125 127 u |= 1 << KPF_THP; 126 128 127 129 /* ··· 219 217 proc_create("kpageflags", S_IRUSR, NULL, &proc_kpageflags_operations); 220 218 return 0; 221 219 } 222 - module_init(proc_page_init); 220 + fs_initcall(proc_page_init);
+3 -2
fs/proc/proc_devtree.c
··· 74 74 return NULL; 75 75 76 76 if (!strncmp(name, "security-", 9)) 77 - ent->size = 0; /* don't leak number of password chars */ 77 + proc_set_size(ent, 0); /* don't leak number of password chars */ 78 78 else 79 - ent->size = pp->length; 79 + proc_set_size(ent, pp->length); 80 80 81 81 return ent; 82 82 } ··· 232 232 return; 233 233 root = of_find_node_by_path("/"); 234 234 if (root == NULL) { 235 + remove_proc_entry("device-tree", NULL); 235 236 pr_debug("/proc/device-tree: can't find root\n"); 236 237 return; 237 238 }
+1 -1
fs/proc/softirqs.c
··· 41 41 proc_create("softirqs", 0, NULL, &proc_softirqs_operations); 42 42 return 0; 43 43 } 44 - module_init(proc_softirqs_init); 44 + fs_initcall(proc_softirqs_init);
+1 -1
fs/proc/stat.c
··· 221 221 proc_create("stat", 0, NULL, &proc_stat_operations); 222 222 return 0; 223 223 } 224 - module_init(proc_stat_init); 224 + fs_initcall(proc_stat_init);
+1 -1
fs/proc/uptime.c
··· 49 49 proc_create("uptime", 0, NULL, &uptime_proc_fops); 50 50 return 0; 51 51 } 52 - module_init(proc_uptime_init); 52 + fs_initcall(proc_uptime_init);
+1 -1
fs/proc/version.c
··· 31 31 proc_create("version", 0, NULL, &version_proc_fops); 32 32 return 0; 33 33 } 34 - module_init(proc_version_init); 34 + fs_initcall(proc_version_init);
+1 -1
fs/proc/vmcore.c
··· 1082 1082 proc_vmcore->size = vmcore_size; 1083 1083 return 0; 1084 1084 } 1085 - module_init(vmcore_init) 1085 + fs_initcall(vmcore_init); 1086 1086 1087 1087 /* Cleanup function for vmcore module. */ 1088 1088 void vmcore_cleanup(void)
+1 -6
fs/proc_namespace.c
··· 234 234 235 235 rcu_read_lock(); 236 236 nsp = task_nsproxy(task); 237 - if (!nsp) { 237 + if (!nsp || !nsp->mnt_ns) { 238 238 rcu_read_unlock(); 239 239 put_task_struct(task); 240 240 goto err; 241 241 } 242 242 ns = nsp->mnt_ns; 243 - if (!ns) { 244 - rcu_read_unlock(); 245 - put_task_struct(task); 246 - goto err; 247 - } 248 243 get_mnt_ns(ns); 249 244 rcu_read_unlock(); 250 245 task_lock(task);
-7
fs/ramfs/file-mmu.c
··· 30 30 31 31 #include "internal.h" 32 32 33 - const struct address_space_operations ramfs_aops = { 34 - .readpage = simple_readpage, 35 - .write_begin = simple_write_begin, 36 - .write_end = simple_write_end, 37 - .set_page_dirty = __set_page_dirty_no_writeback, 38 - }; 39 - 40 33 const struct file_operations ramfs_file_operations = { 41 34 .read = do_sync_read, 42 35 .aio_read = generic_file_aio_read,
+8 -9
fs/ramfs/file-nommu.c
··· 27 27 #include "internal.h" 28 28 29 29 static int ramfs_nommu_setattr(struct dentry *, struct iattr *); 30 - 31 - const struct address_space_operations ramfs_aops = { 32 - .readpage = simple_readpage, 33 - .write_begin = simple_write_begin, 34 - .write_end = simple_write_end, 35 - .set_page_dirty = __set_page_dirty_no_writeback, 36 - }; 30 + static unsigned long ramfs_nommu_get_unmapped_area(struct file *file, 31 + unsigned long addr, 32 + unsigned long len, 33 + unsigned long pgoff, 34 + unsigned long flags); 35 + static int ramfs_nommu_mmap(struct file *file, struct vm_area_struct *vma); 37 36 38 37 const struct file_operations ramfs_file_operations = { 39 38 .mmap = ramfs_nommu_mmap, ··· 196 197 * - the pages to be mapped must exist 197 198 * - the pages be physically contiguous in sequence 198 199 */ 199 - unsigned long ramfs_nommu_get_unmapped_area(struct file *file, 200 + static unsigned long ramfs_nommu_get_unmapped_area(struct file *file, 200 201 unsigned long addr, unsigned long len, 201 202 unsigned long pgoff, unsigned long flags) 202 203 { ··· 255 256 /* 256 257 * set up a mapping for shared memory segments 257 258 */ 258 - int ramfs_nommu_mmap(struct file *file, struct vm_area_struct *vma) 259 + static int ramfs_nommu_mmap(struct file *file, struct vm_area_struct *vma) 259 260 { 260 261 if (!(vma->vm_flags & VM_SHARED)) 261 262 return -ENOSYS;
+7
fs/ramfs/inode.c
··· 43 43 static const struct super_operations ramfs_ops; 44 44 static const struct inode_operations ramfs_dir_inode_operations; 45 45 46 + static const struct address_space_operations ramfs_aops = { 47 + .readpage = simple_readpage, 48 + .write_begin = simple_write_begin, 49 + .write_end = simple_write_end, 50 + .set_page_dirty = __set_page_dirty_no_writeback, 51 + }; 52 + 46 53 static struct backing_dev_info ramfs_backing_dev_info = { 47 54 .name = "ramfs", 48 55 .ra_pages = 0, /* No readahead */
-1
fs/ramfs/internal.h
··· 10 10 */ 11 11 12 12 13 - extern const struct address_space_operations ramfs_aops; 14 13 extern const struct inode_operations ramfs_file_inode_operations;
-2
fs/reiserfs/reiserfs.h
··· 1958 1958 #define MAX_US_INT 0xffff 1959 1959 1960 1960 // reiserfs version 2 has max offset 60 bits. Version 1 - 32 bit offset 1961 - #define U32_MAX (~(__u32)0) 1962 - 1963 1961 static inline loff_t max_reiserfs_offset(struct inode *inode) 1964 1962 { 1965 1963 if (get_inode_item_key_version(inode) == KEY_FORMAT_3_5)
+2 -4
fs/romfs/super.c
··· 533 533 534 534 root = romfs_iget(sb, pos); 535 535 if (IS_ERR(root)) 536 - goto error; 536 + return PTR_ERR(root); 537 537 538 538 sb->s_root = d_make_root(root); 539 539 if (!sb->s_root) 540 - goto error; 540 + return -ENOMEM; 541 541 542 542 return 0; 543 543 544 - error: 545 - return -EINVAL; 546 544 error_rsb_inval: 547 545 ret = -EINVAL; 548 546 error_rsb:
+3 -19
fs/ubifs/debug.c
··· 2118 2118 */ 2119 2119 static void free_inodes(struct fsck_data *fsckd) 2120 2120 { 2121 - struct rb_node *this = fsckd->inodes.rb_node; 2122 - struct fsck_inode *fscki; 2121 + struct fsck_inode *fscki, *n; 2123 2122 2124 - while (this) { 2125 - if (this->rb_left) 2126 - this = this->rb_left; 2127 - else if (this->rb_right) 2128 - this = this->rb_right; 2129 - else { 2130 - fscki = rb_entry(this, struct fsck_inode, rb); 2131 - this = rb_parent(this); 2132 - if (this) { 2133 - if (this->rb_left == &fscki->rb) 2134 - this->rb_left = NULL; 2135 - else 2136 - this->rb_right = NULL; 2137 - } 2138 - kfree(fscki); 2139 - } 2140 - } 2123 + rbtree_postorder_for_each_entry_safe(fscki, n, &fsckd->inodes, rb) 2124 + kfree(fscki); 2141 2125 } 2142 2126 2143 2127 /**
+2 -19
fs/ubifs/log.c
··· 574 574 */ 575 575 static void destroy_done_tree(struct rb_root *done_tree) 576 576 { 577 - struct rb_node *this = done_tree->rb_node; 578 - struct done_ref *dr; 577 + struct done_ref *dr, *n; 579 578 580 - while (this) { 581 - if (this->rb_left) { 582 - this = this->rb_left; 583 - continue; 584 - } else if (this->rb_right) { 585 - this = this->rb_right; 586 - continue; 587 - } 588 - dr = rb_entry(this, struct done_ref, rb); 589 - this = rb_parent(this); 590 - if (this) { 591 - if (this->rb_left == &dr->rb) 592 - this->rb_left = NULL; 593 - else 594 - this->rb_right = NULL; 595 - } 579 + rbtree_postorder_for_each_entry_safe(dr, n, done_tree, rb) 596 580 kfree(dr); 597 - } 598 581 } 599 582 600 583 /**
+2 -19
fs/ubifs/orphan.c
··· 815 815 816 816 static void dbg_free_check_tree(struct rb_root *root) 817 817 { 818 - struct rb_node *this = root->rb_node; 819 - struct check_orphan *o; 818 + struct check_orphan *o, *n; 820 819 821 - while (this) { 822 - if (this->rb_left) { 823 - this = this->rb_left; 824 - continue; 825 - } else if (this->rb_right) { 826 - this = this->rb_right; 827 - continue; 828 - } 829 - o = rb_entry(this, struct check_orphan, rb); 830 - this = rb_parent(this); 831 - if (this) { 832 - if (this->rb_left == &o->rb) 833 - this->rb_left = NULL; 834 - else 835 - this->rb_right = NULL; 836 - } 820 + rbtree_postorder_for_each_entry_safe(o, n, root, rb) 837 821 kfree(o); 838 - } 839 822 } 840 823 841 824 static int dbg_orphan_check(struct ubifs_info *c, struct ubifs_zbranch *zbr,
+3 -18
fs/ubifs/recovery.c
··· 1335 1335 */ 1336 1336 void ubifs_destroy_size_tree(struct ubifs_info *c) 1337 1337 { 1338 - struct rb_node *this = c->size_tree.rb_node; 1339 - struct size_entry *e; 1338 + struct size_entry *e, *n; 1340 1339 1341 - while (this) { 1342 - if (this->rb_left) { 1343 - this = this->rb_left; 1344 - continue; 1345 - } else if (this->rb_right) { 1346 - this = this->rb_right; 1347 - continue; 1348 - } 1349 - e = rb_entry(this, struct size_entry, rb); 1340 + rbtree_postorder_for_each_entry_safe(e, n, &c->size_tree, rb) { 1350 1341 if (e->inode) 1351 1342 iput(e->inode); 1352 - this = rb_parent(this); 1353 - if (this) { 1354 - if (this->rb_left == &e->rb) 1355 - this->rb_left = NULL; 1356 - else 1357 - this->rb_right = NULL; 1358 - } 1359 1343 kfree(e); 1360 1344 } 1345 + 1361 1346 c->size_tree = RB_ROOT; 1362 1347 } 1363 1348
+3 -19
fs/ubifs/super.c
··· 873 873 */ 874 874 static void free_buds(struct ubifs_info *c) 875 875 { 876 - struct rb_node *this = c->buds.rb_node; 877 - struct ubifs_bud *bud; 876 + struct ubifs_bud *bud, *n; 878 877 879 - while (this) { 880 - if (this->rb_left) 881 - this = this->rb_left; 882 - else if (this->rb_right) 883 - this = this->rb_right; 884 - else { 885 - bud = rb_entry(this, struct ubifs_bud, rb); 886 - this = rb_parent(this); 887 - if (this) { 888 - if (this->rb_left == &bud->rb) 889 - this->rb_left = NULL; 890 - else 891 - this->rb_right = NULL; 892 - } 893 - kfree(bud); 894 - } 895 - } 878 + rbtree_postorder_for_each_entry_safe(bud, n, &c->buds, rb) 879 + kfree(bud); 896 880 } 897 881 898 882 /**
+3 -19
fs/ubifs/tnc.c
··· 178 178 */ 179 179 void destroy_old_idx(struct ubifs_info *c) 180 180 { 181 - struct rb_node *this = c->old_idx.rb_node; 182 - struct ubifs_old_idx *old_idx; 181 + struct ubifs_old_idx *old_idx, *n; 183 182 184 - while (this) { 185 - if (this->rb_left) { 186 - this = this->rb_left; 187 - continue; 188 - } else if (this->rb_right) { 189 - this = this->rb_right; 190 - continue; 191 - } 192 - old_idx = rb_entry(this, struct ubifs_old_idx, rb); 193 - this = rb_parent(this); 194 - if (this) { 195 - if (this->rb_left == &old_idx->rb) 196 - this->rb_left = NULL; 197 - else 198 - this->rb_right = NULL; 199 - } 183 + rbtree_postorder_for_each_entry_safe(old_idx, n, &c->old_idx, rb) 200 184 kfree(old_idx); 201 - } 185 + 202 186 c->old_idx = RB_ROOT; 203 187 } 204 188
+97
include/asm-generic/fixmap.h
··· 1 + /* 2 + * fixmap.h: compile-time virtual memory allocation 3 + * 4 + * This file is subject to the terms and conditions of the GNU General Public 5 + * License. See the file "COPYING" in the main directory of this archive 6 + * for more details. 7 + * 8 + * Copyright (C) 1998 Ingo Molnar 9 + * 10 + * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 11 + * x86_32 and x86_64 integration by Gustavo F. Padovan, February 2009 12 + * Break out common bits to asm-generic by Mark Salter, November 2013 13 + */ 14 + 15 + #ifndef __ASM_GENERIC_FIXMAP_H 16 + #define __ASM_GENERIC_FIXMAP_H 17 + 18 + #include <linux/bug.h> 19 + 20 + #define __fix_to_virt(x) (FIXADDR_TOP - ((x) << PAGE_SHIFT)) 21 + #define __virt_to_fix(x) ((FIXADDR_TOP - ((x)&PAGE_MASK)) >> PAGE_SHIFT) 22 + 23 + #ifndef __ASSEMBLY__ 24 + /* 25 + * 'index to address' translation. If anyone tries to use the idx 26 + * directly without translation, we catch the bug with a NULL-deference 27 + * kernel oops. Illegal ranges of incoming indices are caught too. 28 + */ 29 + static __always_inline unsigned long fix_to_virt(const unsigned int idx) 30 + { 31 + BUILD_BUG_ON(idx >= __end_of_fixed_addresses); 32 + return __fix_to_virt(idx); 33 + } 34 + 35 + static inline unsigned long virt_to_fix(const unsigned long vaddr) 36 + { 37 + BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START); 38 + return __virt_to_fix(vaddr); 39 + } 40 + 41 + /* 42 + * Provide some reasonable defaults for page flags. 43 + * Not all architectures use all of these different types and some 44 + * architectures use different names. 45 + */ 46 + #ifndef FIXMAP_PAGE_NORMAL 47 + #define FIXMAP_PAGE_NORMAL PAGE_KERNEL 48 + #endif 49 + #ifndef FIXMAP_PAGE_NOCACHE 50 + #define FIXMAP_PAGE_NOCACHE PAGE_KERNEL_NOCACHE 51 + #endif 52 + #ifndef FIXMAP_PAGE_IO 53 + #define FIXMAP_PAGE_IO PAGE_KERNEL_IO 54 + #endif 55 + #ifndef FIXMAP_PAGE_CLEAR 56 + #define FIXMAP_PAGE_CLEAR __pgprot(0) 57 + #endif 58 + 59 + #ifndef set_fixmap 60 + #define set_fixmap(idx, phys) \ 61 + __set_fixmap(idx, phys, FIXMAP_PAGE_NORMAL) 62 + #endif 63 + 64 + #ifndef clear_fixmap 65 + #define clear_fixmap(idx) \ 66 + __set_fixmap(idx, 0, FIXMAP_PAGE_CLEAR) 67 + #endif 68 + 69 + /* Return a pointer with offset calculated */ 70 + #define __set_fixmap_offset(idx, phys, flags) \ 71 + ({ \ 72 + unsigned long addr; \ 73 + __set_fixmap(idx, phys, flags); \ 74 + addr = fix_to_virt(idx) + ((phys) & (PAGE_SIZE - 1)); \ 75 + addr; \ 76 + }) 77 + 78 + #define set_fixmap_offset(idx, phys) \ 79 + __set_fixmap_offset(idx, phys, FIXMAP_PAGE_NORMAL) 80 + 81 + /* 82 + * Some hardware wants to get fixmapped without caching. 83 + */ 84 + #define set_fixmap_nocache(idx, phys) \ 85 + __set_fixmap(idx, phys, FIXMAP_PAGE_NOCACHE) 86 + 87 + #define set_fixmap_offset_nocache(idx, phys) \ 88 + __set_fixmap_offset(idx, phys, FIXMAP_PAGE_NOCACHE) 89 + 90 + /* 91 + * Some fixmaps are for IO 92 + */ 93 + #define set_fixmap_io(idx, phys) \ 94 + __set_fixmap(idx, phys, FIXMAP_PAGE_IO) 95 + 96 + #endif /* __ASSEMBLY__ */ 97 + #endif /* __ASM_GENERIC_FIXMAP_H */
-49
include/asm-generic/int-l64.h
··· 1 - /* 2 - * asm-generic/int-l64.h 3 - * 4 - * Integer declarations for architectures which use "long" 5 - * for 64-bit types. 6 - */ 7 - #ifndef _ASM_GENERIC_INT_L64_H 8 - #define _ASM_GENERIC_INT_L64_H 9 - 10 - #include <uapi/asm-generic/int-l64.h> 11 - 12 - 13 - #ifndef __ASSEMBLY__ 14 - 15 - typedef signed char s8; 16 - typedef unsigned char u8; 17 - 18 - typedef signed short s16; 19 - typedef unsigned short u16; 20 - 21 - typedef signed int s32; 22 - typedef unsigned int u32; 23 - 24 - typedef signed long s64; 25 - typedef unsigned long u64; 26 - 27 - #define S8_C(x) x 28 - #define U8_C(x) x ## U 29 - #define S16_C(x) x 30 - #define U16_C(x) x ## U 31 - #define S32_C(x) x 32 - #define U32_C(x) x ## U 33 - #define S64_C(x) x ## L 34 - #define U64_C(x) x ## UL 35 - 36 - #else /* __ASSEMBLY__ */ 37 - 38 - #define S8_C(x) x 39 - #define U8_C(x) x 40 - #define S16_C(x) x 41 - #define U16_C(x) x 42 - #define S32_C(x) x 43 - #define U32_C(x) x 44 - #define S64_C(x) x 45 - #define U64_C(x) x 46 - 47 - #endif /* __ASSEMBLY__ */ 48 - 49 - #endif /* _ASM_GENERIC_INT_L64_H */
+2 -2
include/linux/cache.h
··· 1 1 #ifndef __LINUX_CACHE_H 2 2 #define __LINUX_CACHE_H 3 3 4 - #include <linux/kernel.h> 4 + #include <uapi/linux/kernel.h> 5 5 #include <asm/cache.h> 6 6 7 7 #ifndef L1_CACHE_ALIGN 8 - #define L1_CACHE_ALIGN(x) ALIGN(x, L1_CACHE_BYTES) 8 + #define L1_CACHE_ALIGN(x) __ALIGN_KERNEL(x, L1_CACHE_BYTES) 9 9 #endif 10 10 11 11 #ifndef SMP_CACHE_BYTES
-17
include/linux/ceph/decode.h
··· 8 8 9 9 #include <linux/ceph/types.h> 10 10 11 - /* This seemed to be the easiest place to define these */ 12 - 13 - #define U8_MAX ((u8)(~0U)) 14 - #define U16_MAX ((u16)(~0U)) 15 - #define U32_MAX ((u32)(~0U)) 16 - #define U64_MAX ((u64)(~0ULL)) 17 - 18 - #define S8_MAX ((s8)(U8_MAX >> 1)) 19 - #define S16_MAX ((s16)(U16_MAX >> 1)) 20 - #define S32_MAX ((s32)(U32_MAX >> 1)) 21 - #define S64_MAX ((s64)(U64_MAX >> 1LL)) 22 - 23 - #define S8_MIN ((s8)(-S8_MAX - 1)) 24 - #define S16_MIN ((s16)(-S16_MAX - 1)) 25 - #define S32_MIN ((s32)(-S32_MAX - 1)) 26 - #define S64_MIN ((s64)(-S64_MAX - 1LL)) 27 - 28 11 /* 29 12 * in all cases, 30 13 * void **p pointer to position pointer
+2
include/linux/genalloc.h
··· 30 30 #ifndef __GENALLOC_H__ 31 31 #define __GENALLOC_H__ 32 32 33 + #include <linux/spinlock_types.h> 34 + 33 35 struct device; 34 36 struct device_node; 35 37
+1
include/linux/gfp.h
··· 1 1 #ifndef __LINUX_GFP_H 2 2 #define __LINUX_GFP_H 3 3 4 + #include <linux/mmdebug.h> 4 5 #include <linux/mmzone.h> 5 6 #include <linux/stddef.h> 6 7 #include <linux/linkage.h>
+2 -1
include/linux/hugetlb.h
··· 2 2 #define _LINUX_HUGETLB_H 3 3 4 4 #include <linux/mm_types.h> 5 + #include <linux/mmdebug.h> 5 6 #include <linux/fs.h> 6 7 #include <linux/hugetlb_inline.h> 7 8 #include <linux/cgroup.h> ··· 355 354 356 355 static inline struct hstate *page_hstate(struct page *page) 357 356 { 358 - VM_BUG_ON(!PageHuge(page)); 357 + VM_BUG_ON_PAGE(!PageHuge(page), page); 359 358 return size_to_hstate(PAGE_SIZE << compound_order(page)); 360 359 } 361 360
+3 -2
include/linux/hugetlb_cgroup.h
··· 15 15 #ifndef _LINUX_HUGETLB_CGROUP_H 16 16 #define _LINUX_HUGETLB_CGROUP_H 17 17 18 + #include <linux/mmdebug.h> 18 19 #include <linux/res_counter.h> 19 20 20 21 struct hugetlb_cgroup; ··· 29 28 30 29 static inline struct hugetlb_cgroup *hugetlb_cgroup_from_page(struct page *page) 31 30 { 32 - VM_BUG_ON(!PageHuge(page)); 31 + VM_BUG_ON_PAGE(!PageHuge(page), page); 33 32 34 33 if (compound_order(page) < HUGETLB_CGROUP_MIN_ORDER) 35 34 return NULL; ··· 39 38 static inline 40 39 int set_hugetlb_cgroup(struct page *page, struct hugetlb_cgroup *h_cg) 41 40 { 42 - VM_BUG_ON(!PageHuge(page)); 41 + VM_BUG_ON_PAGE(!PageHuge(page), page); 43 42 44 43 if (compound_order(page) < HUGETLB_CGROUP_MIN_ORDER) 45 44 return -1;
+13
include/linux/kernel.h
··· 29 29 #define ULLONG_MAX (~0ULL) 30 30 #define SIZE_MAX (~(size_t)0) 31 31 32 + #define U8_MAX ((u8)~0U) 33 + #define S8_MAX ((s8)(U8_MAX>>1)) 34 + #define S8_MIN ((s8)(-S8_MAX - 1)) 35 + #define U16_MAX ((u16)~0U) 36 + #define S16_MAX ((s16)(U16_MAX>>1)) 37 + #define S16_MIN ((s16)(-S16_MAX - 1)) 38 + #define U32_MAX ((u32)~0U) 39 + #define S32_MAX ((s32)(U32_MAX>>1)) 40 + #define S32_MIN ((s32)(-S32_MAX - 1)) 41 + #define U64_MAX ((u64)~0ULL) 42 + #define S64_MAX ((s64)(U64_MAX>>1)) 43 + #define S64_MIN ((s64)(-S64_MAX - 1)) 44 + 32 45 #define STACK_MAGIC 0xdeadbeef 33 46 34 47 #define REPEAT_BYTE(x) ((~0ul / 0xff) * (x))
+1
include/linux/kexec.h
··· 170 170 171 171 extern struct kimage *kexec_image; 172 172 extern struct kimage *kexec_crash_image; 173 + extern int kexec_load_disabled; 173 174 174 175 #ifndef kexec_flush_icache_page 175 176 #define kexec_flush_icache_page(page)
+1
include/linux/memblock.h
··· 61 61 phys_addr_t memblock_find_in_range(phys_addr_t start, phys_addr_t end, 62 62 phys_addr_t size, phys_addr_t align); 63 63 phys_addr_t get_allocated_memblock_reserved_regions_info(phys_addr_t *addr); 64 + phys_addr_t get_allocated_memblock_memory_regions_info(phys_addr_t *addr); 64 65 void memblock_allow_resize(void); 65 66 int memblock_add_node(phys_addr_t base, phys_addr_t size, int nid); 66 67 int memblock_add(phys_addr_t base, phys_addr_t size);
+13 -10
include/linux/memcontrol.h
··· 497 497 void __memcg_kmem_uncharge_pages(struct page *page, int order); 498 498 499 499 int memcg_cache_id(struct mem_cgroup *memcg); 500 - int memcg_register_cache(struct mem_cgroup *memcg, struct kmem_cache *s, 501 - struct kmem_cache *root_cache); 502 - void memcg_release_cache(struct kmem_cache *cachep); 503 - void memcg_cache_list_add(struct mem_cgroup *memcg, struct kmem_cache *cachep); 500 + int memcg_alloc_cache_params(struct mem_cgroup *memcg, struct kmem_cache *s, 501 + struct kmem_cache *root_cache); 502 + void memcg_free_cache_params(struct kmem_cache *s); 503 + void memcg_register_cache(struct kmem_cache *s); 504 + void memcg_unregister_cache(struct kmem_cache *s); 504 505 505 506 int memcg_update_cache_size(struct kmem_cache *s, int num_groups); 506 507 void memcg_update_array_size(int num_groups); ··· 641 640 return -1; 642 641 } 643 642 644 - static inline int 645 - memcg_register_cache(struct mem_cgroup *memcg, struct kmem_cache *s, 646 - struct kmem_cache *root_cache) 643 + static inline int memcg_alloc_cache_params(struct mem_cgroup *memcg, 644 + struct kmem_cache *s, struct kmem_cache *root_cache) 647 645 { 648 646 return 0; 649 647 } 650 648 651 - static inline void memcg_release_cache(struct kmem_cache *cachep) 649 + static inline void memcg_free_cache_params(struct kmem_cache *s) 652 650 { 653 651 } 654 652 655 - static inline void memcg_cache_list_add(struct mem_cgroup *memcg, 656 - struct kmem_cache *s) 653 + static inline void memcg_register_cache(struct kmem_cache *s) 654 + { 655 + } 656 + 657 + static inline void memcg_unregister_cache(struct kmem_cache *s) 657 658 { 658 659 } 659 660
+13 -14
include/linux/mm.h
··· 5 5 6 6 #ifdef __KERNEL__ 7 7 8 + #include <linux/mmdebug.h> 8 9 #include <linux/gfp.h> 9 10 #include <linux/bug.h> 10 11 #include <linux/list.h> ··· 304 303 */ 305 304 static inline int put_page_testzero(struct page *page) 306 305 { 307 - VM_BUG_ON(atomic_read(&page->_count) == 0); 306 + VM_BUG_ON_PAGE(atomic_read(&page->_count) == 0, page); 308 307 return atomic_dec_and_test(&page->_count); 309 308 } 310 309 ··· 365 364 static inline void compound_lock(struct page *page) 366 365 { 367 366 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 368 - VM_BUG_ON(PageSlab(page)); 367 + VM_BUG_ON_PAGE(PageSlab(page), page); 369 368 bit_spin_lock(PG_compound_lock, &page->flags); 370 369 #endif 371 370 } ··· 373 372 static inline void compound_unlock(struct page *page) 374 373 { 375 374 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 376 - VM_BUG_ON(PageSlab(page)); 375 + VM_BUG_ON_PAGE(PageSlab(page), page); 377 376 bit_spin_unlock(PG_compound_lock, &page->flags); 378 377 #endif 379 378 } ··· 448 447 */ 449 448 static inline bool compound_tail_refcounted(struct page *page) 450 449 { 451 - VM_BUG_ON(!PageHead(page)); 450 + VM_BUG_ON_PAGE(!PageHead(page), page); 452 451 return __compound_tail_refcounted(page); 453 452 } 454 453 ··· 457 456 /* 458 457 * __split_huge_page_refcount() cannot run from under us. 459 458 */ 460 - VM_BUG_ON(!PageTail(page)); 461 - VM_BUG_ON(page_mapcount(page) < 0); 462 - VM_BUG_ON(atomic_read(&page->_count) != 0); 459 + VM_BUG_ON_PAGE(!PageTail(page), page); 460 + VM_BUG_ON_PAGE(page_mapcount(page) < 0, page); 461 + VM_BUG_ON_PAGE(atomic_read(&page->_count) != 0, page); 463 462 if (compound_tail_refcounted(page->first_page)) 464 463 atomic_inc(&page->_mapcount); 465 464 } ··· 475 474 * Getting a normal page or the head of a compound page 476 475 * requires to already have an elevated page->_count. 477 476 */ 478 - VM_BUG_ON(atomic_read(&page->_count) <= 0); 477 + VM_BUG_ON_PAGE(atomic_read(&page->_count) <= 0, page); 479 478 atomic_inc(&page->_count); 480 479 } 481 480 ··· 512 511 513 512 static inline void __SetPageBuddy(struct page *page) 514 513 { 515 - VM_BUG_ON(atomic_read(&page->_mapcount) != -1); 514 + VM_BUG_ON_PAGE(atomic_read(&page->_mapcount) != -1, page); 516 515 atomic_set(&page->_mapcount, PAGE_BUDDY_MAPCOUNT_VALUE); 517 516 } 518 517 519 518 static inline void __ClearPageBuddy(struct page *page) 520 519 { 521 - VM_BUG_ON(!PageBuddy(page)); 520 + VM_BUG_ON_PAGE(!PageBuddy(page), page); 522 521 atomic_set(&page->_mapcount, -1); 523 522 } 524 523 ··· 1402 1401 * slab code uses page->slab_cache and page->first_page (for tail 1403 1402 * pages), which share storage with page->ptl. 1404 1403 */ 1405 - VM_BUG_ON(*(unsigned long *)&page->ptl); 1404 + VM_BUG_ON_PAGE(*(unsigned long *)&page->ptl, page); 1406 1405 if (!ptlock_alloc(page)) 1407 1406 return false; 1408 1407 spin_lock_init(ptlock_ptr(page)); ··· 1493 1492 static inline void pgtable_pmd_page_dtor(struct page *page) 1494 1493 { 1495 1494 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 1496 - VM_BUG_ON(page->pmd_huge_pte); 1495 + VM_BUG_ON_PAGE(page->pmd_huge_pte, page); 1497 1496 #endif 1498 1497 ptlock_free(page); 1499 1498 } ··· 2029 2028 extern void shake_page(struct page *p, int access); 2030 2029 extern atomic_long_t num_poisoned_pages; 2031 2030 extern int soft_offline_page(struct page *page, int flags); 2032 - 2033 - extern void dump_page(struct page *page); 2034 2031 2035 2032 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS) 2036 2033 extern void clear_huge_page(struct page *page,
+9
include/linux/mmdebug.h
··· 1 1 #ifndef LINUX_MM_DEBUG_H 2 2 #define LINUX_MM_DEBUG_H 1 3 3 4 + struct page; 5 + 6 + extern void dump_page(struct page *page, char *reason); 7 + extern void dump_page_badflags(struct page *page, char *reason, 8 + unsigned long badflags); 9 + 4 10 #ifdef CONFIG_DEBUG_VM 5 11 #define VM_BUG_ON(cond) BUG_ON(cond) 12 + #define VM_BUG_ON_PAGE(cond, page) \ 13 + do { if (unlikely(cond)) { dump_page(page, NULL); BUG(); } } while (0) 6 14 #else 7 15 #define VM_BUG_ON(cond) BUILD_BUG_ON_INVALID(cond) 16 + #define VM_BUG_ON_PAGE(cond, page) VM_BUG_ON(cond) 8 17 #endif 9 18 10 19 #ifdef CONFIG_DEBUG_VIRTUAL
+6 -1
include/linux/of.h
··· 377 377 return false; 378 378 } 379 379 380 + /* Kill an unused variable warning on a device_node pointer */ 381 + static inline void __of_use_dn(const struct device_node *np) 382 + { 383 + } 384 + 380 385 #define for_each_child_of_node(parent, child) \ 381 - while (0) 386 + while (__of_use_dn(parent), __of_use_dn(child), 0) 382 387 383 388 #define for_each_available_child_of_node(parent, child) \ 384 389 while (0)
+5 -5
include/linux/page-flags.h
··· 412 412 */ 413 413 static inline int PageTransHuge(struct page *page) 414 414 { 415 - VM_BUG_ON(PageTail(page)); 415 + VM_BUG_ON_PAGE(PageTail(page), page); 416 416 return PageHead(page); 417 417 } 418 418 ··· 460 460 */ 461 461 static inline int PageSlabPfmemalloc(struct page *page) 462 462 { 463 - VM_BUG_ON(!PageSlab(page)); 463 + VM_BUG_ON_PAGE(!PageSlab(page), page); 464 464 return PageActive(page); 465 465 } 466 466 467 467 static inline void SetPageSlabPfmemalloc(struct page *page) 468 468 { 469 - VM_BUG_ON(!PageSlab(page)); 469 + VM_BUG_ON_PAGE(!PageSlab(page), page); 470 470 SetPageActive(page); 471 471 } 472 472 473 473 static inline void __ClearPageSlabPfmemalloc(struct page *page) 474 474 { 475 - VM_BUG_ON(!PageSlab(page)); 475 + VM_BUG_ON_PAGE(!PageSlab(page), page); 476 476 __ClearPageActive(page); 477 477 } 478 478 479 479 static inline void ClearPageSlabPfmemalloc(struct page *page) 480 480 { 481 - VM_BUG_ON(!PageSlab(page)); 481 + VM_BUG_ON_PAGE(!PageSlab(page), page); 482 482 ClearPageActive(page); 483 483 } 484 484
+5 -5
include/linux/pagemap.h
··· 162 162 * disabling preempt, and hence no need for the "speculative get" that 163 163 * SMP requires. 164 164 */ 165 - VM_BUG_ON(page_count(page) == 0); 165 + VM_BUG_ON_PAGE(page_count(page) == 0, page); 166 166 atomic_inc(&page->_count); 167 167 168 168 #else ··· 175 175 return 0; 176 176 } 177 177 #endif 178 - VM_BUG_ON(PageTail(page)); 178 + VM_BUG_ON_PAGE(PageTail(page), page); 179 179 180 180 return 1; 181 181 } ··· 191 191 # ifdef CONFIG_PREEMPT_COUNT 192 192 VM_BUG_ON(!in_atomic()); 193 193 # endif 194 - VM_BUG_ON(page_count(page) == 0); 194 + VM_BUG_ON_PAGE(page_count(page) == 0, page); 195 195 atomic_add(count, &page->_count); 196 196 197 197 #else 198 198 if (unlikely(!atomic_add_unless(&page->_count, count, 0))) 199 199 return 0; 200 200 #endif 201 - VM_BUG_ON(PageCompound(page) && page != compound_head(page)); 201 + VM_BUG_ON_PAGE(PageCompound(page) && page != compound_head(page), page); 202 202 203 203 return 1; 204 204 } ··· 210 210 211 211 static inline void page_unfreeze_refs(struct page *page, int count) 212 212 { 213 - VM_BUG_ON(page_count(page) != 0); 213 + VM_BUG_ON_PAGE(page_count(page) != 0, page); 214 214 VM_BUG_ON(count == 0); 215 215 216 216 atomic_set(&page->_count, count);
+1
include/linux/parser.h
··· 29 29 int match_int(substring_t *, int *result); 30 30 int match_octal(substring_t *, int *result); 31 31 int match_hex(substring_t *, int *result); 32 + bool match_wildcard(const char *pattern, const char *str); 32 33 size_t match_strlcpy(char *, const substring_t *, size_t); 33 34 char *match_strdup(const substring_t *);
+1
include/linux/percpu.h
··· 1 1 #ifndef __LINUX_PERCPU_H 2 2 #define __LINUX_PERCPU_H 3 3 4 + #include <linux/mmdebug.h> 4 5 #include <linux/preempt.h> 5 6 #include <linux/smp.h> 6 7 #include <linux/cpumask.h>
+10 -9
include/linux/printk.h
··· 5 5 #include <linux/init.h> 6 6 #include <linux/kern_levels.h> 7 7 #include <linux/linkage.h> 8 + #include <linux/cache.h> 8 9 9 10 extern const char linux_banner[]; 10 11 extern const char linux_proc_banner[]; ··· 254 253 */ 255 254 256 255 #ifdef CONFIG_PRINTK 257 - #define printk_once(fmt, ...) \ 258 - ({ \ 259 - static bool __print_once; \ 260 - \ 261 - if (!__print_once) { \ 262 - __print_once = true; \ 263 - printk(fmt, ##__VA_ARGS__); \ 264 - } \ 256 + #define printk_once(fmt, ...) \ 257 + ({ \ 258 + static bool __print_once __read_mostly; \ 259 + \ 260 + if (!__print_once) { \ 261 + __print_once = true; \ 262 + printk(fmt, ##__VA_ARGS__); \ 263 + } \ 265 264 }) 266 265 #else 267 - #define printk_once(fmt, ...) \ 266 + #define printk_once(fmt, ...) \ 268 267 no_printk(fmt, ##__VA_ARGS__) 269 268 #endif 270 269
-7
include/linux/ramfs.h
··· 14 14 } 15 15 #else 16 16 extern int ramfs_nommu_expand_for_mapping(struct inode *inode, size_t newsize); 17 - extern unsigned long ramfs_nommu_get_unmapped_area(struct file *file, 18 - unsigned long addr, 19 - unsigned long len, 20 - unsigned long pgoff, 21 - unsigned long flags); 22 - 23 - extern int ramfs_nommu_mmap(struct file *file, struct vm_area_struct *vma); 24 17 #endif 25 18 26 19 extern const struct file_operations ramfs_file_operations;
+19 -11
include/linux/sched.h
··· 229 229 /* get_task_state() */ 230 230 #define TASK_REPORT (TASK_RUNNING | TASK_INTERRUPTIBLE | \ 231 231 TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \ 232 - __TASK_TRACED) 232 + __TASK_TRACED | EXIT_ZOMBIE | EXIT_DEAD) 233 233 234 234 #define task_is_traced(task) ((task->state & __TASK_TRACED) != 0) 235 235 #define task_is_stopped(task) ((task->state & __TASK_STOPPED) != 0) ··· 391 391 static inline void arch_pick_mmap_layout(struct mm_struct *mm) {} 392 392 #endif 393 393 394 - 395 - extern void set_dumpable(struct mm_struct *mm, int value); 396 - extern int get_dumpable(struct mm_struct *mm); 397 - 398 394 #define SUID_DUMP_DISABLE 0 /* No setuid dumping */ 399 395 #define SUID_DUMP_USER 1 /* Dump as user of process */ 400 396 #define SUID_DUMP_ROOT 2 /* Dump as root */ 401 397 402 398 /* mm flags */ 403 - /* dumpable bits */ 404 - #define MMF_DUMPABLE 0 /* core dump is permitted */ 405 - #define MMF_DUMP_SECURELY 1 /* core file is readable only by root */ 406 399 400 + /* for SUID_DUMP_* above */ 407 401 #define MMF_DUMPABLE_BITS 2 408 402 #define MMF_DUMPABLE_MASK ((1 << MMF_DUMPABLE_BITS) - 1) 403 + 404 + extern void set_dumpable(struct mm_struct *mm, int value); 405 + /* 406 + * This returns the actual value of the suid_dumpable flag. For things 407 + * that are using this for checking for privilege transitions, it must 408 + * test against SUID_DUMP_USER rather than treating it as a boolean 409 + * value. 410 + */ 411 + static inline int __get_dumpable(unsigned long mm_flags) 412 + { 413 + return mm_flags & MMF_DUMPABLE_MASK; 414 + } 415 + 416 + static inline int get_dumpable(struct mm_struct *mm) 417 + { 418 + return __get_dumpable(mm->flags); 419 + } 409 420 410 421 /* coredump filter bits */ 411 422 #define MMF_DUMP_ANON_PRIVATE 2 ··· 1239 1228 /* Used for emulating ABI behavior of previous Linux versions */ 1240 1229 unsigned int personality; 1241 1230 1242 - unsigned did_exec:1; 1243 1231 unsigned in_execve:1; /* Tell the LSMs that the process is doing an 1244 1232 * execve */ 1245 1233 unsigned in_iowait:1; ··· 2294 2284 extern struct mm_struct *mm_access(struct task_struct *task, unsigned int mode); 2295 2285 /* Remove the current tasks stale references to the old mm_struct */ 2296 2286 extern void mm_release(struct task_struct *, struct mm_struct *); 2297 - /* Allocate a new mm structure and copy contents from tsk->mm */ 2298 - extern struct mm_struct *dup_mm(struct task_struct *tsk); 2299 2287 2300 2288 extern int copy_thread(unsigned long, unsigned long, unsigned long, 2301 2289 struct task_struct *);
+4
include/linux/sched/sysctl.h
··· 99 99 void __user *buffer, size_t *lenp, 100 100 loff_t *ppos); 101 101 102 + extern int sysctl_numa_balancing(struct ctl_table *table, int write, 103 + void __user *buffer, size_t *lenp, 104 + loff_t *ppos); 105 + 102 106 #endif /* _SCHED_SYSCTL_H */
+7 -2
include/linux/slab.h
··· 513 513 * 514 514 * Both the root cache and the child caches will have it. For the root cache, 515 515 * this will hold a dynamically allocated array large enough to hold 516 - * information about the currently limited memcgs in the system. 516 + * information about the currently limited memcgs in the system. To allow the 517 + * array to be accessed without taking any locks, on relocation we free the old 518 + * version only after a grace period. 517 519 * 518 520 * Child caches will hold extra metadata needed for its operation. Fields are: 519 521 * ··· 530 528 struct memcg_cache_params { 531 529 bool is_root_cache; 532 530 union { 533 - struct kmem_cache *memcg_caches[0]; 531 + struct { 532 + struct rcu_head rcu_head; 533 + struct kmem_cache *memcg_caches[0]; 534 + }; 534 535 struct { 535 536 struct mem_cgroup *memcg; 536 537 struct list_head list;
+1
include/linux/w1-gpio.h
··· 20 20 unsigned int is_open_drain:1; 21 21 void (*enable_external_pullup)(int enable); 22 22 unsigned int ext_pullup_enable_pin; 23 + unsigned int pullup_duration; 23 24 }; 24 25 25 26 #endif /* _LINUX_W1_GPIO_H */
+1 -2
include/uapi/asm-generic/types.h
··· 1 1 #ifndef _ASM_GENERIC_TYPES_H 2 2 #define _ASM_GENERIC_TYPES_H 3 3 /* 4 - * int-ll64 is used practically everywhere now, 5 - * so use it as a reasonable default. 4 + * int-ll64 is used everywhere now. 6 5 */ 7 6 #include <asm-generic/int-ll64.h> 8 7
+1
include/uapi/linux/dn.h
··· 1 1 #ifndef _LINUX_DN_H 2 2 #define _LINUX_DN_H 3 3 4 + #include <linux/ioctl.h> 4 5 #include <linux/types.h> 5 6 #include <linux/if_ether.h> 6 7
+1 -1
include/uapi/linux/nfs4.h
··· 150 150 #define NFS4_SECINFO_STYLE4_CURRENT_FH 0 151 151 #define NFS4_SECINFO_STYLE4_PARENT 1 152 152 153 - #define NFS4_MAX_UINT64 (~(u64)0) 153 + #define NFS4_MAX_UINT64 (~(__u64)0) 154 154 155 155 /* An NFS4 sessions server must support at least NFS4_MAX_OPS operations. 156 156 * If a compound requires more operations, adjust NFS4_MAX_OPS accordingly.
+1 -1
include/uapi/linux/perf_event.h
··· 788 788 #define PERF_MEM_TLB_SHIFT 26 789 789 790 790 #define PERF_MEM_S(a, s) \ 791 - (((u64)PERF_MEM_##a##_##s) << PERF_MEM_##a##_SHIFT) 791 + (((__u64)PERF_MEM_##a##_##s) << PERF_MEM_##a##_SHIFT) 792 792 793 793 /* 794 794 * single taken branch record layout:
+1
include/uapi/linux/ppp-ioctl.h
··· 12 12 13 13 #include <linux/types.h> 14 14 #include <linux/compiler.h> 15 + #include <linux/ppp_defs.h> 15 16 16 17 /* 17 18 * Bit definitions for flags argument to PPPIOCGFLAGS/PPPIOCSFLAGS.
+1 -1
init/initramfs.c
··· 583 583 { 584 584 char *err = unpack_to_rootfs(__initramfs_start, __initramfs_size); 585 585 if (err) 586 - panic(err); /* Failed to decompress INTERNAL initramfs */ 586 + panic("%s", err); /* Failed to decompress INTERNAL initramfs */ 587 587 if (initrd_start) { 588 588 #ifdef CONFIG_BLK_DEV_RAM 589 589 int fd;
+4 -7
init/main.c
··· 99 99 static inline void mark_rodata_ro(void) { } 100 100 #endif 101 101 102 - #ifdef CONFIG_TC 103 - extern void tc_init(void); 104 - #endif 105 - 106 102 /* 107 103 * Debug helper: via this flag we know that we are in 'early bootup code' 108 104 * where only the boot processor is running with IRQ disabled. This means ··· 278 282 unsigned int i; 279 283 for (i = 0; envp_init[i]; i++) { 280 284 if (i == MAX_INIT_ENVS) { 281 - panic_later = "Too many boot env vars at `%s'"; 285 + panic_later = "env"; 282 286 panic_param = param; 283 287 } 284 288 if (!strncmp(param, envp_init[i], val - param)) ··· 290 294 unsigned int i; 291 295 for (i = 0; argv_init[i]; i++) { 292 296 if (i == MAX_INIT_ARGS) { 293 - panic_later = "Too many boot init vars at `%s'"; 297 + panic_later = "init"; 294 298 panic_param = param; 295 299 } 296 300 } ··· 582 586 */ 583 587 console_init(); 584 588 if (panic_later) 585 - panic(panic_later, panic_param); 589 + panic("Too many boot %s vars at `%s'", panic_later, 590 + panic_param); 586 591 587 592 lockdep_info(); 588 593
+3 -7
kernel/fork.c
··· 800 800 * Allocate a new mm structure and copy contents from the 801 801 * mm structure of the passed in task structure. 802 802 */ 803 - struct mm_struct *dup_mm(struct task_struct *tsk) 803 + static struct mm_struct *dup_mm(struct task_struct *tsk) 804 804 { 805 805 struct mm_struct *mm, *oldmm = current->mm; 806 806 int err; 807 - 808 - if (!oldmm) 809 - return NULL; 810 807 811 808 mm = allocate_mm(); 812 809 if (!mm) ··· 1226 1229 if (!try_module_get(task_thread_info(p)->exec_domain->module)) 1227 1230 goto bad_fork_cleanup_count; 1228 1231 1229 - p->did_exec = 0; 1230 1232 delayacct_tsk_init(p); /* Must remain after dup_task_struct() */ 1231 1233 copy_flags(clone_flags, p); 1232 1234 INIT_LIST_HEAD(&p->children); ··· 1650 1654 return do_fork(SIGCHLD, 0, 0, NULL, NULL); 1651 1655 #else 1652 1656 /* can not support in nommu mode */ 1653 - return(-EINVAL); 1657 + return -EINVAL; 1654 1658 #endif 1655 1659 } 1656 1660 #endif ··· 1658 1662 #ifdef __ARCH_WANT_SYS_VFORK 1659 1663 SYSCALL_DEFINE0(vfork) 1660 1664 { 1661 - return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, 0, 1665 + return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, 0, 1662 1666 0, NULL, NULL); 1663 1667 } 1664 1668 #endif
+2 -1
kernel/kexec.c
··· 932 932 */ 933 933 struct kimage *kexec_image; 934 934 struct kimage *kexec_crash_image; 935 + int kexec_load_disabled; 935 936 936 937 static DEFINE_MUTEX(kexec_mutex); 937 938 ··· 943 942 int result; 944 943 945 944 /* We only trust the superuser with rebooting the system. */ 946 - if (!capable(CAP_SYS_BOOT)) 945 + if (!capable(CAP_SYS_BOOT) || kexec_load_disabled) 947 946 return -EPERM; 948 947 949 948 /*
+1 -1
kernel/ksysfs.c
··· 126 126 { 127 127 return sprintf(buf, "%lx %x\n", 128 128 paddr_vmcoreinfo_note(), 129 - (unsigned int)vmcoreinfo_max_size); 129 + (unsigned int)sizeof(vmcoreinfo_note)); 130 130 } 131 131 KERNEL_ATTR_RO(vmcoreinfo); 132 132
+6 -3
kernel/printk/printk.c
··· 1595 1595 * either merge it with the current buffer and flush, or if 1596 1596 * there was a race with interrupts (prefix == true) then just 1597 1597 * flush it out and store this line separately. 1598 + * If the preceding printk was from a different task and missed 1599 + * a newline, flush and append the newline. 1598 1600 */ 1599 - if (cont.len && cont.owner == current) { 1600 - if (!(lflags & LOG_PREFIX)) 1601 - stored = cont_add(facility, level, text, text_len); 1601 + if (cont.len) { 1602 + if (cont.owner == current && !(lflags & LOG_PREFIX)) 1603 + stored = cont_add(facility, level, text, 1604 + text_len); 1602 1605 cont_flush(LOG_NEWLINE); 1603 1606 } 1604 1607
+23 -1
kernel/sched/core.c
··· 1770 1770 numabalancing_enabled = enabled; 1771 1771 } 1772 1772 #endif /* CONFIG_SCHED_DEBUG */ 1773 - #endif /* CONFIG_NUMA_BALANCING */ 1773 + 1774 + #ifdef CONFIG_PROC_SYSCTL 1775 + int sysctl_numa_balancing(struct ctl_table *table, int write, 1776 + void __user *buffer, size_t *lenp, loff_t *ppos) 1777 + { 1778 + struct ctl_table t; 1779 + int err; 1780 + int state = numabalancing_enabled; 1781 + 1782 + if (write && !capable(CAP_SYS_ADMIN)) 1783 + return -EPERM; 1784 + 1785 + t = *table; 1786 + t.data = &state; 1787 + err = proc_dointvec_minmax(&t, write, buffer, lenp, ppos); 1788 + if (err < 0) 1789 + return err; 1790 + if (write) 1791 + set_numabalancing_state(state); 1792 + return err; 1793 + } 1794 + #endif 1795 + #endif 1774 1796 1775 1797 /* 1776 1798 * fork()/clone()-time setup:
+3 -4
kernel/signal.c
··· 2047 2047 if (task_set_jobctl_pending(current, signr | gstop)) 2048 2048 sig->group_stop_count++; 2049 2049 2050 - for (t = next_thread(current); t != current; 2051 - t = next_thread(t)) { 2050 + t = current; 2051 + while_each_thread(current, t) { 2052 2052 /* 2053 2053 * Setting state to TASK_STOPPED for a group 2054 2054 * stop is always done with the siglock held, ··· 3125 3125 rm_from_queue_full(&mask, &t->signal->shared_pending); 3126 3126 do { 3127 3127 rm_from_queue_full(&mask, &t->pending); 3128 - t = next_thread(t); 3129 - } while (t != current); 3128 + } while_each_thread(current, t); 3130 3129 } 3131 3130 } 3132 3131
+3 -5
kernel/sys.c
··· 895 895 * only important on a multi-user system anyway, to make sure one user 896 896 * can't send a signal to a process owned by another. -TYT, 12/12/91 897 897 * 898 - * Auch. Had to add the 'did_exec' flag to conform completely to POSIX. 899 - * LBT 04.03.94 898 + * !PF_FORKNOEXEC check to conform completely to POSIX. 900 899 */ 901 900 SYSCALL_DEFINE2(setpgid, pid_t, pid, pid_t, pgid) 902 901 { ··· 931 932 if (task_session(p) != task_session(group_leader)) 932 933 goto out; 933 934 err = -EACCES; 934 - if (p->did_exec) 935 + if (!(p->flags & PF_FORKNOEXEC)) 935 936 goto out; 936 937 } else { 937 938 err = -ESRCH; ··· 1571 1572 t = p; 1572 1573 do { 1573 1574 accumulate_thread_rusage(t, r); 1574 - t = next_thread(t); 1575 - } while (t != p); 1575 + } while_each_thread(p, t); 1576 1576 break; 1577 1577 1578 1578 default:
+22
kernel/sysctl.c
··· 62 62 #include <linux/capability.h> 63 63 #include <linux/binfmts.h> 64 64 #include <linux/sched/sysctl.h> 65 + #include <linux/kexec.h> 65 66 66 67 #include <asm/uaccess.h> 67 68 #include <asm/processor.h> ··· 390 389 .mode = 0644, 391 390 .proc_handler = proc_dointvec, 392 391 }, 392 + { 393 + .procname = "numa_balancing", 394 + .data = NULL, /* filled in by handler */ 395 + .maxlen = sizeof(unsigned int), 396 + .mode = 0644, 397 + .proc_handler = sysctl_numa_balancing, 398 + .extra1 = &zero, 399 + .extra2 = &one, 400 + }, 393 401 #endif /* CONFIG_NUMA_BALANCING */ 394 402 #endif /* CONFIG_SCHED_DEBUG */ 395 403 { ··· 613 603 .maxlen = sizeof(__disable_trace_on_warning), 614 604 .mode = 0644, 615 605 .proc_handler = proc_dointvec, 606 + }, 607 + #endif 608 + #ifdef CONFIG_KEXEC 609 + { 610 + .procname = "kexec_load_disabled", 611 + .data = &kexec_load_disabled, 612 + .maxlen = sizeof(int), 613 + .mode = 0644, 614 + /* only handle a transition from default "0" to "1" */ 615 + .proc_handler = proc_dointvec_minmax, 616 + .extra1 = &one, 617 + .extra2 = &one, 616 618 }, 617 619 #endif 618 620 #ifdef CONFIG_MODULES
+27
lib/Kconfig.debug
··· 1595 1595 1596 1596 If unsure, say N. 1597 1597 1598 + config TEST_MODULE 1599 + tristate "Test module loading with 'hello world' module" 1600 + default n 1601 + depends on m 1602 + help 1603 + This builds the "test_module" module that emits "Hello, world" 1604 + on printk when loaded. It is designed to be used for basic 1605 + evaluation of the module loading subsystem (for example when 1606 + validating module verification). It lacks any extra dependencies, 1607 + and will not normally be loaded by the system unless explicitly 1608 + requested by name. 1609 + 1610 + If unsure, say N. 1611 + 1612 + config TEST_USER_COPY 1613 + tristate "Test user/kernel boundary protections" 1614 + default n 1615 + depends on m 1616 + help 1617 + This builds the "test_user_copy" module that runs sanity checks 1618 + on the copy_to/from_user infrastructure, making sure basic 1619 + user/kernel boundary testing is working. If it fails to load, 1620 + a regression has been detected in the user/kernel memory boundary 1621 + protections. 1622 + 1623 + If unsure, say N. 1624 + 1598 1625 source "samples/Kconfig" 1599 1626 1600 1627 source "lib/Kconfig.kgdb"
+2
lib/Makefile
··· 31 31 obj-$(CONFIG_TEST_STRING_HELPERS) += test-string_helpers.o 32 32 obj-y += kstrtox.o 33 33 obj-$(CONFIG_TEST_KSTRTOX) += test-kstrtox.o 34 + obj-$(CONFIG_TEST_MODULE) += test_module.o 35 + obj-$(CONFIG_TEST_USER_COPY) += test_user_copy.o 34 36 35 37 ifeq ($(CONFIG_DEBUG_KOBJECT),y) 36 38 CFLAGS_kobject.o += -DDEBUG
+6 -8
lib/cmdline.c
··· 49 49 * 3 - hyphen found to denote a range 50 50 */ 51 51 52 - int get_option (char **str, int *pint) 52 + int get_option(char **str, int *pint) 53 53 { 54 54 char *cur = *str; 55 55 56 56 if (!cur || !(*cur)) 57 57 return 0; 58 - *pint = simple_strtol (cur, str, 0); 58 + *pint = simple_strtol(cur, str, 0); 59 59 if (cur == *str) 60 60 return 0; 61 61 if (**str == ',') { ··· 67 67 68 68 return 1; 69 69 } 70 + EXPORT_SYMBOL(get_option); 70 71 71 72 /** 72 73 * get_options - Parse a string into a list of integers ··· 85 84 * the parse to end (typically a null terminator, if @str is 86 85 * completely parseable). 87 86 */ 88 - 87 + 89 88 char *get_options(const char *str, int nints, int *ints) 90 89 { 91 90 int res, i = 1; 92 91 93 92 while (i < nints) { 94 - res = get_option ((char **)&str, ints + i); 93 + res = get_option((char **)&str, ints + i); 95 94 if (res == 0) 96 95 break; 97 96 if (res == 3) { ··· 113 112 ints[0] = i - 1; 114 113 return (char *)str; 115 114 } 115 + EXPORT_SYMBOL(get_options); 116 116 117 117 /** 118 118 * memparse - parse a string with mem suffixes into a number ··· 154 152 155 153 return ret; 156 154 } 157 - 158 - 159 155 EXPORT_SYMBOL(memparse); 160 - EXPORT_SYMBOL(get_option); 161 - EXPORT_SYMBOL(get_options);
+1
lib/decompress_unlz4.c
··· 141 141 goto exit_2; 142 142 } 143 143 144 + ret = -1; 144 145 if (flush && flush(outp, dest_len) != dest_len) 145 146 goto exit_2; 146 147 if (output)
+10 -5
lib/dynamic_debug.c
··· 8 8 * By Greg Banks <gnb@melbourne.sgi.com> 9 9 * Copyright (c) 2008 Silicon Graphics Inc. All Rights Reserved. 10 10 * Copyright (C) 2011 Bart Van Assche. All Rights Reserved. 11 + * Copyright (C) 2013 Du, Changbin <changbin.du@gmail.com> 11 12 */ 12 13 13 14 #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__ ··· 25 24 #include <linux/sysctl.h> 26 25 #include <linux/ctype.h> 27 26 #include <linux/string.h> 27 + #include <linux/parser.h> 28 28 #include <linux/string_helpers.h> 29 29 #include <linux/uaccess.h> 30 30 #include <linux/dynamic_debug.h> ··· 149 147 list_for_each_entry(dt, &ddebug_tables, link) { 150 148 151 149 /* match against the module name */ 152 - if (query->module && strcmp(query->module, dt->mod_name)) 150 + if (query->module && 151 + !match_wildcard(query->module, dt->mod_name)) 153 152 continue; 154 153 155 154 for (i = 0; i < dt->num_ddebugs; i++) { ··· 158 155 159 156 /* match against the source filename */ 160 157 if (query->filename && 161 - strcmp(query->filename, dp->filename) && 162 - strcmp(query->filename, kbasename(dp->filename)) && 163 - strcmp(query->filename, trim_prefix(dp->filename))) 158 + !match_wildcard(query->filename, dp->filename) && 159 + !match_wildcard(query->filename, 160 + kbasename(dp->filename)) && 161 + !match_wildcard(query->filename, 162 + trim_prefix(dp->filename))) 164 163 continue; 165 164 166 165 /* match against the function */ 167 166 if (query->function && 168 - strcmp(query->function, dp->function)) 167 + !match_wildcard(query->function, dp->function)) 169 168 continue; 170 169 171 170 /* match against the format */
-1
lib/kstrtox.c
··· 92 92 rv = _parse_integer(s, base, &_res); 93 93 if (rv & KSTRTOX_OVERFLOW) 94 94 return -ERANGE; 95 - rv &= ~KSTRTOX_OVERFLOW; 96 95 if (rv == 0) 97 96 return -EINVAL; 98 97 s += rv;
+56 -6
lib/parser.c
··· 113 113 114 114 return p->token; 115 115 } 116 + EXPORT_SYMBOL(match_token); 116 117 117 118 /** 118 119 * match_number: scan a number in the given base from a substring_t ··· 164 163 { 165 164 return match_number(s, result, 0); 166 165 } 166 + EXPORT_SYMBOL(match_int); 167 167 168 168 /** 169 169 * match_octal: - scan an octal representation of an integer from a substring_t ··· 179 177 { 180 178 return match_number(s, result, 8); 181 179 } 180 + EXPORT_SYMBOL(match_octal); 182 181 183 182 /** 184 183 * match_hex: - scan a hex representation of an integer from a substring_t ··· 194 191 { 195 192 return match_number(s, result, 16); 196 193 } 194 + EXPORT_SYMBOL(match_hex); 195 + 196 + /** 197 + * match_wildcard: - parse if a string matches given wildcard pattern 198 + * @pattern: wildcard pattern 199 + * @str: the string to be parsed 200 + * 201 + * Description: Parse the string @str to check if matches wildcard 202 + * pattern @pattern. The pattern may contain two type wildcardes: 203 + * '*' - matches zero or more characters 204 + * '?' - matches one character 205 + * If it's matched, return true, else return false. 206 + */ 207 + bool match_wildcard(const char *pattern, const char *str) 208 + { 209 + const char *s = str; 210 + const char *p = pattern; 211 + bool star = false; 212 + 213 + while (*s) { 214 + switch (*p) { 215 + case '?': 216 + s++; 217 + p++; 218 + break; 219 + case '*': 220 + star = true; 221 + str = s; 222 + if (!*++p) 223 + return true; 224 + pattern = p; 225 + break; 226 + default: 227 + if (*s == *p) { 228 + s++; 229 + p++; 230 + } else { 231 + if (!star) 232 + return false; 233 + str++; 234 + s = str; 235 + p = pattern; 236 + } 237 + break; 238 + } 239 + } 240 + 241 + if (*p == '*') 242 + ++p; 243 + return !*p; 244 + } 245 + EXPORT_SYMBOL(match_wildcard); 197 246 198 247 /** 199 248 * match_strlcpy: - Copy the characters from a substring_t to a sized buffer ··· 268 213 } 269 214 return ret; 270 215 } 216 + EXPORT_SYMBOL(match_strlcpy); 271 217 272 218 /** 273 219 * match_strdup: - allocate a new string with the contents of a substring_t ··· 286 230 match_strlcpy(p, s, sz); 287 231 return p; 288 232 } 289 - 290 - EXPORT_SYMBOL(match_token); 291 - EXPORT_SYMBOL(match_int); 292 - EXPORT_SYMBOL(match_octal); 293 - EXPORT_SYMBOL(match_hex); 294 - EXPORT_SYMBOL(match_strlcpy); 295 233 EXPORT_SYMBOL(match_strdup);
+12 -1
lib/rbtree_test.c
··· 8 8 #define CHECK_LOOPS 100 9 9 10 10 struct test_node { 11 - struct rb_node rb; 12 11 u32 key; 12 + struct rb_node rb; 13 13 14 14 /* following fields used for testing augmented rbtree functionality */ 15 15 u32 val; ··· 114 114 return count; 115 115 } 116 116 117 + static void check_postorder_foreach(int nr_nodes) 118 + { 119 + struct test_node *cur, *n; 120 + int count = 0; 121 + rbtree_postorder_for_each_entry_safe(cur, n, &root, rb) 122 + count++; 123 + 124 + WARN_ON_ONCE(count != nr_nodes); 125 + } 126 + 117 127 static void check_postorder(int nr_nodes) 118 128 { 119 129 struct rb_node *rb; ··· 158 148 WARN_ON_ONCE(count < (1 << black_path_count(rb_last(&root))) - 1); 159 149 160 150 check_postorder(nr_nodes); 151 + check_postorder_foreach(nr_nodes); 161 152 } 162 153 163 154 static void check_augmented(int nr_nodes)
+33
lib/test_module.c
··· 1 + /* 2 + * This module emits "Hello, world" on printk when loaded. 3 + * 4 + * It is designed to be used for basic evaluation of the module loading 5 + * subsystem (for example when validating module signing/verification). It 6 + * lacks any extra dependencies, and will not normally be loaded by the 7 + * system unless explicitly requested by name. 8 + */ 9 + 10 + #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 11 + 12 + #include <linux/init.h> 13 + #include <linux/module.h> 14 + #include <linux/printk.h> 15 + 16 + static int __init test_module_init(void) 17 + { 18 + pr_warn("Hello, world\n"); 19 + 20 + return 0; 21 + } 22 + 23 + module_init(test_module_init); 24 + 25 + static void __exit test_module_exit(void) 26 + { 27 + pr_warn("Goodbye\n"); 28 + } 29 + 30 + module_exit(test_module_exit); 31 + 32 + MODULE_AUTHOR("Kees Cook <keescook@chromium.org>"); 33 + MODULE_LICENSE("GPL");
+110
lib/test_user_copy.c
··· 1 + /* 2 + * Kernel module for testing copy_to/from_user infrastructure. 3 + * 4 + * Copyright 2013 Google Inc. All Rights Reserved 5 + * 6 + * Authors: 7 + * Kees Cook <keescook@chromium.org> 8 + * 9 + * This software is licensed under the terms of the GNU General Public 10 + * License version 2, as published by the Free Software Foundation, and 11 + * may be copied, distributed, and modified under those terms. 12 + * 13 + * This program is distributed in the hope that it will be useful, 14 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 + * GNU General Public License for more details. 17 + */ 18 + 19 + #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 20 + 21 + #include <linux/mman.h> 22 + #include <linux/module.h> 23 + #include <linux/sched.h> 24 + #include <linux/slab.h> 25 + #include <linux/uaccess.h> 26 + #include <linux/vmalloc.h> 27 + 28 + #define test(condition, msg) \ 29 + ({ \ 30 + int cond = (condition); \ 31 + if (cond) \ 32 + pr_warn("%s\n", msg); \ 33 + cond; \ 34 + }) 35 + 36 + static int __init test_user_copy_init(void) 37 + { 38 + int ret = 0; 39 + char *kmem; 40 + char __user *usermem; 41 + char *bad_usermem; 42 + unsigned long user_addr; 43 + unsigned long value = 0x5A; 44 + 45 + kmem = kmalloc(PAGE_SIZE * 2, GFP_KERNEL); 46 + if (!kmem) 47 + return -ENOMEM; 48 + 49 + user_addr = vm_mmap(NULL, 0, PAGE_SIZE * 2, 50 + PROT_READ | PROT_WRITE | PROT_EXEC, 51 + MAP_ANONYMOUS | MAP_PRIVATE, 0); 52 + if (user_addr >= (unsigned long)(TASK_SIZE)) { 53 + pr_warn("Failed to allocate user memory\n"); 54 + kfree(kmem); 55 + return -ENOMEM; 56 + } 57 + 58 + usermem = (char __user *)user_addr; 59 + bad_usermem = (char *)user_addr; 60 + 61 + /* Legitimate usage: none of these should fail. */ 62 + ret |= test(copy_from_user(kmem, usermem, PAGE_SIZE), 63 + "legitimate copy_from_user failed"); 64 + ret |= test(copy_to_user(usermem, kmem, PAGE_SIZE), 65 + "legitimate copy_to_user failed"); 66 + ret |= test(get_user(value, (unsigned long __user *)usermem), 67 + "legitimate get_user failed"); 68 + ret |= test(put_user(value, (unsigned long __user *)usermem), 69 + "legitimate put_user failed"); 70 + 71 + /* Invalid usage: none of these should succeed. */ 72 + ret |= test(!copy_from_user(kmem, (char __user *)(kmem + PAGE_SIZE), 73 + PAGE_SIZE), 74 + "illegal all-kernel copy_from_user passed"); 75 + ret |= test(!copy_from_user(bad_usermem, (char __user *)kmem, 76 + PAGE_SIZE), 77 + "illegal reversed copy_from_user passed"); 78 + ret |= test(!copy_to_user((char __user *)kmem, kmem + PAGE_SIZE, 79 + PAGE_SIZE), 80 + "illegal all-kernel copy_to_user passed"); 81 + ret |= test(!copy_to_user((char __user *)kmem, bad_usermem, 82 + PAGE_SIZE), 83 + "illegal reversed copy_to_user passed"); 84 + ret |= test(!get_user(value, (unsigned long __user *)kmem), 85 + "illegal get_user passed"); 86 + ret |= test(!put_user(value, (unsigned long __user *)kmem), 87 + "illegal put_user passed"); 88 + 89 + vm_munmap(user_addr, PAGE_SIZE * 2); 90 + kfree(kmem); 91 + 92 + if (ret == 0) { 93 + pr_info("tests passed.\n"); 94 + return 0; 95 + } 96 + 97 + return -EINVAL; 98 + } 99 + 100 + module_init(test_user_copy_init); 101 + 102 + static void __exit test_user_copy_exit(void) 103 + { 104 + pr_info("unloaded.\n"); 105 + } 106 + 107 + module_exit(test_user_copy_exit); 108 + 109 + MODULE_AUTHOR("Kees Cook <keescook@chromium.org>"); 110 + MODULE_LICENSE("GPL");
+27 -6
lib/vsprintf.c
··· 1155 1155 return number(buf, end, *(const netdev_features_t *)addr, spec); 1156 1156 } 1157 1157 1158 + static noinline_for_stack 1159 + char *address_val(char *buf, char *end, const void *addr, 1160 + struct printf_spec spec, const char *fmt) 1161 + { 1162 + unsigned long long num; 1163 + 1164 + spec.flags |= SPECIAL | SMALL | ZEROPAD; 1165 + spec.base = 16; 1166 + 1167 + switch (fmt[1]) { 1168 + case 'd': 1169 + num = *(const dma_addr_t *)addr; 1170 + spec.field_width = sizeof(dma_addr_t) * 2 + 2; 1171 + break; 1172 + case 'p': 1173 + default: 1174 + num = *(const phys_addr_t *)addr; 1175 + spec.field_width = sizeof(phys_addr_t) * 2 + 2; 1176 + break; 1177 + } 1178 + 1179 + return number(buf, end, num, spec); 1180 + } 1181 + 1158 1182 int kptr_restrict __read_mostly; 1159 1183 1160 1184 /* ··· 1242 1218 * N no separator 1243 1219 * The maximum supported length is 64 bytes of the input. Consider 1244 1220 * to use print_hex_dump() for the larger input. 1245 - * - 'a' For a phys_addr_t type and its derivative types (passed by reference) 1221 + * - 'a[pd]' For address types [p] phys_addr_t, [d] dma_addr_t and derivatives 1222 + * (default assumed to be phys_addr_t, passed by reference) 1246 1223 * - 'd[234]' For a dentry name (optionally 2-4 last components) 1247 1224 * - 'D[234]' Same as 'd' but for a struct file 1248 1225 * ··· 1378 1353 } 1379 1354 break; 1380 1355 case 'a': 1381 - spec.flags |= SPECIAL | SMALL | ZEROPAD; 1382 - spec.field_width = sizeof(phys_addr_t) * 2 + 2; 1383 - spec.base = 16; 1384 - return number(buf, end, 1385 - (unsigned long long) *((phys_addr_t *)ptr), spec); 1356 + return address_val(buf, end, ptr, spec, fmt); 1386 1357 case 'd': 1387 1358 return dentry_name(buf, end, ptr, spec, fmt); 1388 1359 case 'D':
+2 -2
mm/balloon_compaction.c
··· 267 267 put_page(page); 268 268 } else { 269 269 WARN_ON(1); 270 - dump_page(page); 270 + dump_page(page, "not movable balloon page"); 271 271 } 272 272 unlock_page(page); 273 273 } ··· 287 287 BUG_ON(!trylock_page(newpage)); 288 288 289 289 if (WARN_ON(!__is_movable_balloon_page(page))) { 290 - dump_page(page); 290 + dump_page(page, "not movable balloon page"); 291 291 unlock_page(newpage); 292 292 return rc; 293 293 }
+3 -3
mm/cleancache.c
··· 237 237 goto out; 238 238 } 239 239 240 - VM_BUG_ON(!PageLocked(page)); 240 + VM_BUG_ON_PAGE(!PageLocked(page), page); 241 241 fake_pool_id = page->mapping->host->i_sb->cleancache_poolid; 242 242 if (fake_pool_id < 0) 243 243 goto out; ··· 279 279 return; 280 280 } 281 281 282 - VM_BUG_ON(!PageLocked(page)); 282 + VM_BUG_ON_PAGE(!PageLocked(page), page); 283 283 fake_pool_id = page->mapping->host->i_sb->cleancache_poolid; 284 284 if (fake_pool_id < 0) 285 285 return; ··· 318 318 if (pool_id < 0) 319 319 return; 320 320 321 - VM_BUG_ON(!PageLocked(page)); 321 + VM_BUG_ON_PAGE(!PageLocked(page), page); 322 322 if (cleancache_get_key(mapping->host, &key) >= 0) { 323 323 cleancache_ops->invalidate_page(pool_id, 324 324 key, page->index);
+5 -2
mm/compaction.c
··· 523 523 if (!isolation_suitable(cc, page)) 524 524 goto next_pageblock; 525 525 526 - /* Skip if free */ 526 + /* 527 + * Skip if free. page_order cannot be used without zone->lock 528 + * as nothing prevents parallel allocations or buddy merging. 529 + */ 527 530 if (PageBuddy(page)) 528 531 continue; 529 532 ··· 604 601 if (__isolate_lru_page(page, mode) != 0) 605 602 continue; 606 603 607 - VM_BUG_ON(PageTransCompound(page)); 604 + VM_BUG_ON_PAGE(PageTransCompound(page), page); 608 605 609 606 /* Successfully isolated */ 610 607 cc->finished_update_migrate = true;
+8 -8
mm/filemap.c
··· 409 409 { 410 410 int error; 411 411 412 - VM_BUG_ON(!PageLocked(old)); 413 - VM_BUG_ON(!PageLocked(new)); 414 - VM_BUG_ON(new->mapping); 412 + VM_BUG_ON_PAGE(!PageLocked(old), old); 413 + VM_BUG_ON_PAGE(!PageLocked(new), new); 414 + VM_BUG_ON_PAGE(new->mapping, new); 415 415 416 416 error = radix_tree_preload(gfp_mask & ~__GFP_HIGHMEM); 417 417 if (!error) { ··· 461 461 { 462 462 int error; 463 463 464 - VM_BUG_ON(!PageLocked(page)); 465 - VM_BUG_ON(PageSwapBacked(page)); 464 + VM_BUG_ON_PAGE(!PageLocked(page), page); 465 + VM_BUG_ON_PAGE(PageSwapBacked(page), page); 466 466 467 467 error = mem_cgroup_cache_charge(page, current->mm, 468 468 gfp_mask & GFP_RECLAIM_MASK); ··· 607 607 */ 608 608 void unlock_page(struct page *page) 609 609 { 610 - VM_BUG_ON(!PageLocked(page)); 610 + VM_BUG_ON_PAGE(!PageLocked(page), page); 611 611 clear_bit_unlock(PG_locked, &page->flags); 612 612 smp_mb__after_clear_bit(); 613 613 wake_up_page(page, PG_locked); ··· 760 760 page_cache_release(page); 761 761 goto repeat; 762 762 } 763 - VM_BUG_ON(page->index != offset); 763 + VM_BUG_ON_PAGE(page->index != offset, page); 764 764 } 765 765 return page; 766 766 } ··· 1656 1656 put_page(page); 1657 1657 goto retry_find; 1658 1658 } 1659 - VM_BUG_ON(page->index != offset); 1659 + VM_BUG_ON_PAGE(page->index != offset, page); 1660 1660 1661 1661 /* 1662 1662 * We have a locked page in the page cache, now we need to check
+26 -20
mm/huge_memory.c
··· 130 130 (unsigned long) nr_free_buffer_pages() / 20); 131 131 recommended_min <<= (PAGE_SHIFT-10); 132 132 133 - if (recommended_min > min_free_kbytes) 133 + if (recommended_min > min_free_kbytes) { 134 + if (user_min_free_kbytes >= 0) 135 + pr_info("raising min_free_kbytes from %d to %lu " 136 + "to help transparent hugepage allocations\n", 137 + min_free_kbytes, recommended_min); 138 + 134 139 min_free_kbytes = recommended_min; 140 + } 135 141 setup_per_zone_wmarks(); 136 142 return 0; 137 143 } ··· 661 655 hugepage_exit_sysfs(hugepage_kobj); 662 656 return err; 663 657 } 664 - module_init(hugepage_init) 658 + subsys_initcall(hugepage_init); 665 659 666 660 static int __init setup_transparent_hugepage(char *str) 667 661 { ··· 718 712 pgtable_t pgtable; 719 713 spinlock_t *ptl; 720 714 721 - VM_BUG_ON(!PageCompound(page)); 715 + VM_BUG_ON_PAGE(!PageCompound(page), page); 722 716 pgtable = pte_alloc_one(mm, haddr); 723 717 if (unlikely(!pgtable)) 724 718 return VM_FAULT_OOM; ··· 899 893 goto out; 900 894 } 901 895 src_page = pmd_page(pmd); 902 - VM_BUG_ON(!PageHead(src_page)); 896 + VM_BUG_ON_PAGE(!PageHead(src_page), src_page); 903 897 get_page(src_page); 904 898 page_dup_rmap(src_page); 905 899 add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR); ··· 1073 1067 ptl = pmd_lock(mm, pmd); 1074 1068 if (unlikely(!pmd_same(*pmd, orig_pmd))) 1075 1069 goto out_free_pages; 1076 - VM_BUG_ON(!PageHead(page)); 1070 + VM_BUG_ON_PAGE(!PageHead(page), page); 1077 1071 1078 1072 pmdp_clear_flush(vma, haddr, pmd); 1079 1073 /* leave pmd empty until pte is filled */ ··· 1139 1133 goto out_unlock; 1140 1134 1141 1135 page = pmd_page(orig_pmd); 1142 - VM_BUG_ON(!PageCompound(page) || !PageHead(page)); 1136 + VM_BUG_ON_PAGE(!PageCompound(page) || !PageHead(page), page); 1143 1137 if (page_mapcount(page) == 1) { 1144 1138 pmd_t entry; 1145 1139 entry = pmd_mkyoung(orig_pmd); ··· 1217 1211 add_mm_counter(mm, MM_ANONPAGES, HPAGE_PMD_NR); 1218 1212 put_huge_zero_page(); 1219 1213 } else { 1220 - VM_BUG_ON(!PageHead(page)); 1214 + VM_BUG_ON_PAGE(!PageHead(page), page); 1221 1215 page_remove_rmap(page); 1222 1216 put_page(page); 1223 1217 } ··· 1255 1249 goto out; 1256 1250 1257 1251 page = pmd_page(*pmd); 1258 - VM_BUG_ON(!PageHead(page)); 1252 + VM_BUG_ON_PAGE(!PageHead(page), page); 1259 1253 if (flags & FOLL_TOUCH) { 1260 1254 pmd_t _pmd; 1261 1255 /* ··· 1280 1274 } 1281 1275 } 1282 1276 page += (addr & ~HPAGE_PMD_MASK) >> PAGE_SHIFT; 1283 - VM_BUG_ON(!PageCompound(page)); 1277 + VM_BUG_ON_PAGE(!PageCompound(page), page); 1284 1278 if (flags & FOLL_GET) 1285 1279 get_page_foll(page); 1286 1280 ··· 1438 1432 } else { 1439 1433 page = pmd_page(orig_pmd); 1440 1434 page_remove_rmap(page); 1441 - VM_BUG_ON(page_mapcount(page) < 0); 1435 + VM_BUG_ON_PAGE(page_mapcount(page) < 0, page); 1442 1436 add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR); 1443 - VM_BUG_ON(!PageHead(page)); 1437 + VM_BUG_ON_PAGE(!PageHead(page), page); 1444 1438 atomic_long_dec(&tlb->mm->nr_ptes); 1445 1439 spin_unlock(ptl); 1446 1440 tlb_remove_page(tlb, page); ··· 2182 2176 if (unlikely(!page)) 2183 2177 goto out; 2184 2178 2185 - VM_BUG_ON(PageCompound(page)); 2186 - BUG_ON(!PageAnon(page)); 2187 - VM_BUG_ON(!PageSwapBacked(page)); 2179 + VM_BUG_ON_PAGE(PageCompound(page), page); 2180 + VM_BUG_ON_PAGE(!PageAnon(page), page); 2181 + VM_BUG_ON_PAGE(!PageSwapBacked(page), page); 2188 2182 2189 2183 /* cannot use mapcount: can't collapse if there's a gup pin */ 2190 2184 if (page_count(page) != 1) ··· 2207 2201 } 2208 2202 /* 0 stands for page_is_file_cache(page) == false */ 2209 2203 inc_zone_page_state(page, NR_ISOLATED_ANON + 0); 2210 - VM_BUG_ON(!PageLocked(page)); 2211 - VM_BUG_ON(PageLRU(page)); 2204 + VM_BUG_ON_PAGE(!PageLocked(page), page); 2205 + VM_BUG_ON_PAGE(PageLRU(page), page); 2212 2206 2213 2207 /* If there is no mapped pte young don't collapse the page */ 2214 2208 if (pte_young(pteval) || PageReferenced(page) || ··· 2238 2232 } else { 2239 2233 src_page = pte_page(pteval); 2240 2234 copy_user_highpage(page, src_page, address, vma); 2241 - VM_BUG_ON(page_mapcount(src_page) != 1); 2235 + VM_BUG_ON_PAGE(page_mapcount(src_page) != 1, src_page); 2242 2236 release_pte_page(src_page); 2243 2237 /* 2244 2238 * ptl mostly unnecessary, but preempt has to ··· 2317 2311 struct vm_area_struct *vma, unsigned long address, 2318 2312 int node) 2319 2313 { 2320 - VM_BUG_ON(*hpage); 2314 + VM_BUG_ON_PAGE(*hpage, *hpage); 2321 2315 /* 2322 2316 * Allocate the page while the vma is still valid and under 2323 2317 * the mmap_sem read mode so there is no memory allocation ··· 2586 2580 */ 2587 2581 node = page_to_nid(page); 2588 2582 khugepaged_node_load[node]++; 2589 - VM_BUG_ON(PageCompound(page)); 2583 + VM_BUG_ON_PAGE(PageCompound(page), page); 2590 2584 if (!PageLRU(page) || PageLocked(page) || !PageAnon(page)) 2591 2585 goto out_unmap; 2592 2586 /* cannot use mapcount: can't collapse if there's a gup pin */ ··· 2882 2876 return; 2883 2877 } 2884 2878 page = pmd_page(*pmd); 2885 - VM_BUG_ON(!page_count(page)); 2879 + VM_BUG_ON_PAGE(!page_count(page), page); 2886 2880 get_page(page); 2887 2881 spin_unlock(ptl); 2888 2882 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
+5 -5
mm/hugetlb.c
··· 584 584 1 << PG_active | 1 << PG_reserved | 585 585 1 << PG_private | 1 << PG_writeback); 586 586 } 587 - VM_BUG_ON(hugetlb_cgroup_from_page(page)); 587 + VM_BUG_ON_PAGE(hugetlb_cgroup_from_page(page), page); 588 588 set_compound_page_dtor(page, NULL); 589 589 set_page_refcounted(page); 590 590 arch_release_hugepage(page); ··· 1089 1089 * no users -- drop the buddy allocator's reference. 1090 1090 */ 1091 1091 put_page_testzero(page); 1092 - VM_BUG_ON(page_count(page)); 1092 + VM_BUG_ON_PAGE(page_count(page), page); 1093 1093 enqueue_huge_page(h, page); 1094 1094 } 1095 1095 free: ··· 3503 3503 3504 3504 bool isolate_huge_page(struct page *page, struct list_head *list) 3505 3505 { 3506 - VM_BUG_ON(!PageHead(page)); 3506 + VM_BUG_ON_PAGE(!PageHead(page), page); 3507 3507 if (!get_page_unless_zero(page)) 3508 3508 return false; 3509 3509 spin_lock(&hugetlb_lock); ··· 3514 3514 3515 3515 void putback_active_hugepage(struct page *page) 3516 3516 { 3517 - VM_BUG_ON(!PageHead(page)); 3517 + VM_BUG_ON_PAGE(!PageHead(page), page); 3518 3518 spin_lock(&hugetlb_lock); 3519 3519 list_move_tail(&page->lru, &(page_hstate(page))->hugepage_activelist); 3520 3520 spin_unlock(&hugetlb_lock); ··· 3523 3523 3524 3524 bool is_hugepage_active(struct page *page) 3525 3525 { 3526 - VM_BUG_ON(!PageHuge(page)); 3526 + VM_BUG_ON_PAGE(!PageHuge(page), page); 3527 3527 /* 3528 3528 * This function can be called for a tail page because the caller, 3529 3529 * scan_movable_pages, scans through a given pfn-range which typically
+1 -1
mm/hugetlb_cgroup.c
··· 390 390 if (hugetlb_cgroup_disabled()) 391 391 return; 392 392 393 - VM_BUG_ON(!PageHuge(oldhpage)); 393 + VM_BUG_ON_PAGE(!PageHuge(oldhpage), oldhpage); 394 394 spin_lock(&hugetlb_lock); 395 395 h_cg = hugetlb_cgroup_from_page(oldhpage); 396 396 set_hugetlb_cgroup(oldhpage, NULL);
+11 -8
mm/internal.h
··· 27 27 */ 28 28 static inline void set_page_refcounted(struct page *page) 29 29 { 30 - VM_BUG_ON(PageTail(page)); 31 - VM_BUG_ON(atomic_read(&page->_count)); 30 + VM_BUG_ON_PAGE(PageTail(page), page); 31 + VM_BUG_ON_PAGE(atomic_read(&page->_count), page); 32 32 set_page_count(page, 1); 33 33 } 34 34 ··· 46 46 * speculative page access (like in 47 47 * page_cache_get_speculative()) on tail pages. 48 48 */ 49 - VM_BUG_ON(atomic_read(&page->first_page->_count) <= 0); 49 + VM_BUG_ON_PAGE(atomic_read(&page->first_page->_count) <= 0, page); 50 50 if (get_page_head) 51 51 atomic_inc(&page->first_page->_count); 52 52 get_huge_page_tail(page); ··· 71 71 * Getting a normal page or the head of a compound page 72 72 * requires to already have an elevated page->_count. 73 73 */ 74 - VM_BUG_ON(atomic_read(&page->_count) <= 0); 74 + VM_BUG_ON_PAGE(atomic_read(&page->_count) <= 0, page); 75 75 atomic_inc(&page->_count); 76 76 } 77 77 } ··· 99 99 #ifdef CONFIG_MEMORY_FAILURE 100 100 extern bool is_free_buddy_page(struct page *page); 101 101 #endif 102 + extern int user_min_free_kbytes; 102 103 103 104 #if defined CONFIG_COMPACTION || defined CONFIG_CMA 104 105 ··· 143 142 #endif 144 143 145 144 /* 146 - * function for dealing with page's order in buddy system. 147 - * zone->lock is already acquired when we use these. 148 - * So, we don't need atomic page->flags operations here. 145 + * This function returns the order of a free page in the buddy system. In 146 + * general, page_zone(page)->lock must be held by the caller to prevent the 147 + * page from being allocated in parallel and returning garbage as the order. 148 + * If a caller does not hold page_zone(page)->lock, it must guarantee that the 149 + * page cannot be allocated or merged in parallel. 149 150 */ 150 151 static inline unsigned long page_order(struct page *page) 151 152 { ··· 176 173 static inline int mlocked_vma_newpage(struct vm_area_struct *vma, 177 174 struct page *page) 178 175 { 179 - VM_BUG_ON(PageLRU(page)); 176 + VM_BUG_ON_PAGE(PageLRU(page), page); 180 177 181 178 if (likely((vma->vm_flags & (VM_LOCKED | VM_SPECIAL)) != VM_LOCKED)) 182 179 return 0;
+7 -7
mm/ksm.c
··· 1898 1898 int ret = SWAP_AGAIN; 1899 1899 int search_new_forks = 0; 1900 1900 1901 - VM_BUG_ON(!PageKsm(page)); 1901 + VM_BUG_ON_PAGE(!PageKsm(page), page); 1902 1902 1903 1903 /* 1904 1904 * Rely on the page lock to protect against concurrent modifications 1905 1905 * to that page's node of the stable tree. 1906 1906 */ 1907 - VM_BUG_ON(!PageLocked(page)); 1907 + VM_BUG_ON_PAGE(!PageLocked(page), page); 1908 1908 1909 1909 stable_node = page_stable_node(page); 1910 1910 if (!stable_node) ··· 1958 1958 { 1959 1959 struct stable_node *stable_node; 1960 1960 1961 - VM_BUG_ON(!PageLocked(oldpage)); 1962 - VM_BUG_ON(!PageLocked(newpage)); 1963 - VM_BUG_ON(newpage->mapping != oldpage->mapping); 1961 + VM_BUG_ON_PAGE(!PageLocked(oldpage), oldpage); 1962 + VM_BUG_ON_PAGE(!PageLocked(newpage), newpage); 1963 + VM_BUG_ON_PAGE(newpage->mapping != oldpage->mapping, newpage); 1964 1964 1965 1965 stable_node = page_stable_node(newpage); 1966 1966 if (stable_node) { 1967 - VM_BUG_ON(stable_node->kpfn != page_to_pfn(oldpage)); 1967 + VM_BUG_ON_PAGE(stable_node->kpfn != page_to_pfn(oldpage), oldpage); 1968 1968 stable_node->kpfn = page_to_pfn(newpage); 1969 1969 /* 1970 1970 * newpage->mapping was set in advance; now we need smp_wmb() ··· 2345 2345 out: 2346 2346 return err; 2347 2347 } 2348 - module_init(ksm_init) 2348 + subsys_initcall(ksm_init);
+16 -13
mm/memblock.c
··· 266 266 } 267 267 } 268 268 269 + #ifdef CONFIG_ARCH_DISCARD_MEMBLOCK 270 + 269 271 phys_addr_t __init_memblock get_allocated_memblock_reserved_regions_info( 270 272 phys_addr_t *addr) 271 273 { 272 274 if (memblock.reserved.regions == memblock_reserved_init_regions) 273 - return 0; 274 - 275 - /* 276 - * Don't allow nobootmem allocator to free reserved memory regions 277 - * array if 278 - * - CONFIG_DEBUG_FS is enabled; 279 - * - CONFIG_ARCH_DISCARD_MEMBLOCK is not enabled; 280 - * - reserved memory regions array have been resized during boot. 281 - * Otherwise debug_fs entry "sys/kernel/debug/memblock/reserved" 282 - * will show garbage instead of state of memory reservations. 283 - */ 284 - if (IS_ENABLED(CONFIG_DEBUG_FS) && 285 - !IS_ENABLED(CONFIG_ARCH_DISCARD_MEMBLOCK)) 286 275 return 0; 287 276 288 277 *addr = __pa(memblock.reserved.regions); ··· 279 290 return PAGE_ALIGN(sizeof(struct memblock_region) * 280 291 memblock.reserved.max); 281 292 } 293 + 294 + phys_addr_t __init_memblock get_allocated_memblock_memory_regions_info( 295 + phys_addr_t *addr) 296 + { 297 + if (memblock.memory.regions == memblock_memory_init_regions) 298 + return 0; 299 + 300 + *addr = __pa(memblock.memory.regions); 301 + 302 + return PAGE_ALIGN(sizeof(struct memblock_region) * 303 + memblock.memory.max); 304 + } 305 + 306 + #endif 282 307 283 308 /** 284 309 * memblock_double_array - double the size of the memblock regions array
+257 -273
mm/memcontrol.c
··· 49 49 #include <linux/sort.h> 50 50 #include <linux/fs.h> 51 51 #include <linux/seq_file.h> 52 - #include <linux/vmalloc.h> 53 52 #include <linux/vmpressure.h> 54 53 #include <linux/mm_inline.h> 55 54 #include <linux/page_cgroup.h> ··· 149 150 * matches memcg->dead_count of the hierarchy root group. 150 151 */ 151 152 struct mem_cgroup *last_visited; 152 - unsigned long last_dead_count; 153 + int last_dead_count; 153 154 154 155 /* scan generation, increased every round-trip */ 155 156 unsigned int generation; ··· 380 381 /* WARNING: nodeinfo must be the last member here */ 381 382 }; 382 383 383 - static size_t memcg_size(void) 384 - { 385 - return sizeof(struct mem_cgroup) + 386 - nr_node_ids * sizeof(struct mem_cgroup_per_node *); 387 - } 388 - 389 384 /* internal only representation about the status of kmem accounting. */ 390 385 enum { 391 - KMEM_ACCOUNTED_ACTIVE = 0, /* accounted by this cgroup itself */ 392 - KMEM_ACCOUNTED_ACTIVATED, /* static key enabled. */ 386 + KMEM_ACCOUNTED_ACTIVE, /* accounted by this cgroup itself */ 393 387 KMEM_ACCOUNTED_DEAD, /* dead memcg with pending kmem charges */ 394 388 }; 395 - 396 - /* We account when limit is on, but only after call sites are patched */ 397 - #define KMEM_ACCOUNTED_MASK \ 398 - ((1 << KMEM_ACCOUNTED_ACTIVE) | (1 << KMEM_ACCOUNTED_ACTIVATED)) 399 389 400 390 #ifdef CONFIG_MEMCG_KMEM 401 391 static inline void memcg_kmem_set_active(struct mem_cgroup *memcg) ··· 395 407 static bool memcg_kmem_is_active(struct mem_cgroup *memcg) 396 408 { 397 409 return test_bit(KMEM_ACCOUNTED_ACTIVE, &memcg->kmem_account_flags); 398 - } 399 - 400 - static void memcg_kmem_set_activated(struct mem_cgroup *memcg) 401 - { 402 - set_bit(KMEM_ACCOUNTED_ACTIVATED, &memcg->kmem_account_flags); 403 - } 404 - 405 - static void memcg_kmem_clear_activated(struct mem_cgroup *memcg) 406 - { 407 - clear_bit(KMEM_ACCOUNTED_ACTIVATED, &memcg->kmem_account_flags); 408 410 } 409 411 410 412 static void memcg_kmem_mark_dead(struct mem_cgroup *memcg) ··· 1117 1139 * skipped and we should continue the tree walk. 1118 1140 * last_visited css is safe to use because it is 1119 1141 * protected by css_get and the tree walk is rcu safe. 1142 + * 1143 + * We do not take a reference on the root of the tree walk 1144 + * because we might race with the root removal when it would 1145 + * be the only node in the iterated hierarchy and mem_cgroup_iter 1146 + * would end up in an endless loop because it expects that at 1147 + * least one valid node will be returned. Root cannot disappear 1148 + * because caller of the iterator should hold it already so 1149 + * skipping css reference should be safe. 1120 1150 */ 1121 1151 if (next_css) { 1122 - struct mem_cgroup *mem = mem_cgroup_from_css(next_css); 1152 + if ((next_css->flags & CSS_ONLINE) && 1153 + (next_css == &root->css || css_tryget(next_css))) 1154 + return mem_cgroup_from_css(next_css); 1123 1155 1124 - if (css_tryget(&mem->css)) 1125 - return mem; 1126 - else { 1127 - prev_css = next_css; 1128 - goto skip_node; 1129 - } 1156 + prev_css = next_css; 1157 + goto skip_node; 1130 1158 } 1131 1159 1132 1160 return NULL; ··· 1166 1182 if (iter->last_dead_count == *sequence) { 1167 1183 smp_rmb(); 1168 1184 position = iter->last_visited; 1169 - if (position && !css_tryget(&position->css)) 1185 + 1186 + /* 1187 + * We cannot take a reference to root because we might race 1188 + * with root removal and returning NULL would end up in 1189 + * an endless loop on the iterator user level when root 1190 + * would be returned all the time. 1191 + */ 1192 + if (position && position != root && 1193 + !css_tryget(&position->css)) 1170 1194 position = NULL; 1171 1195 } 1172 1196 return position; ··· 1183 1191 static void mem_cgroup_iter_update(struct mem_cgroup_reclaim_iter *iter, 1184 1192 struct mem_cgroup *last_visited, 1185 1193 struct mem_cgroup *new_position, 1194 + struct mem_cgroup *root, 1186 1195 int sequence) 1187 1196 { 1188 - if (last_visited) 1197 + /* root reference counting symmetric to mem_cgroup_iter_load */ 1198 + if (last_visited && last_visited != root) 1189 1199 css_put(&last_visited->css); 1190 1200 /* 1191 1201 * We store the sequence count from the time @last_visited was ··· 1262 1268 memcg = __mem_cgroup_iter_next(root, last_visited); 1263 1269 1264 1270 if (reclaim) { 1265 - mem_cgroup_iter_update(iter, last_visited, memcg, seq); 1271 + mem_cgroup_iter_update(iter, last_visited, memcg, root, 1272 + seq); 1266 1273 1267 1274 if (!memcg) 1268 1275 iter->generation++; ··· 1860 1865 break; 1861 1866 }; 1862 1867 points = oom_badness(task, memcg, NULL, totalpages); 1863 - if (points > chosen_points) { 1864 - if (chosen) 1865 - put_task_struct(chosen); 1866 - chosen = task; 1867 - chosen_points = points; 1868 - get_task_struct(chosen); 1869 - } 1868 + if (!points || points < chosen_points) 1869 + continue; 1870 + /* Prefer thread group leaders for display purposes */ 1871 + if (points == chosen_points && 1872 + thread_group_leader(chosen)) 1873 + continue; 1874 + 1875 + if (chosen) 1876 + put_task_struct(chosen); 1877 + chosen = task; 1878 + chosen_points = points; 1879 + get_task_struct(chosen); 1870 1880 } 1871 1881 css_task_iter_end(&it); 1872 1882 } ··· 2904 2904 unsigned short id; 2905 2905 swp_entry_t ent; 2906 2906 2907 - VM_BUG_ON(!PageLocked(page)); 2907 + VM_BUG_ON_PAGE(!PageLocked(page), page); 2908 2908 2909 2909 pc = lookup_page_cgroup(page); 2910 2910 lock_page_cgroup(pc); ··· 2938 2938 bool anon; 2939 2939 2940 2940 lock_page_cgroup(pc); 2941 - VM_BUG_ON(PageCgroupUsed(pc)); 2941 + VM_BUG_ON_PAGE(PageCgroupUsed(pc), page); 2942 2942 /* 2943 2943 * we don't need page_cgroup_lock about tail pages, becase they are not 2944 2944 * accessed by any other context at this point. ··· 2973 2973 if (lrucare) { 2974 2974 if (was_on_lru) { 2975 2975 lruvec = mem_cgroup_zone_lruvec(zone, pc->mem_cgroup); 2976 - VM_BUG_ON(PageLRU(page)); 2976 + VM_BUG_ON_PAGE(PageLRU(page), page); 2977 2977 SetPageLRU(page); 2978 2978 add_page_to_lru_list(page, lruvec, page_lru(page)); 2979 2979 } ··· 2999 2999 static DEFINE_MUTEX(set_limit_mutex); 3000 3000 3001 3001 #ifdef CONFIG_MEMCG_KMEM 3002 + static DEFINE_MUTEX(activate_kmem_mutex); 3003 + 3002 3004 static inline bool memcg_can_account_kmem(struct mem_cgroup *memcg) 3003 3005 { 3004 3006 return !mem_cgroup_disabled() && !mem_cgroup_is_root(memcg) && 3005 - (memcg->kmem_account_flags & KMEM_ACCOUNTED_MASK) == 3006 - KMEM_ACCOUNTED_MASK; 3007 + memcg_kmem_is_active(memcg); 3007 3008 } 3008 3009 3009 3010 /* ··· 3103 3102 css_put(&memcg->css); 3104 3103 } 3105 3104 3106 - void memcg_cache_list_add(struct mem_cgroup *memcg, struct kmem_cache *cachep) 3107 - { 3108 - if (!memcg) 3109 - return; 3110 - 3111 - mutex_lock(&memcg->slab_caches_mutex); 3112 - list_add(&cachep->memcg_params->list, &memcg->memcg_slab_caches); 3113 - mutex_unlock(&memcg->slab_caches_mutex); 3114 - } 3115 - 3116 3105 /* 3117 3106 * helper for acessing a memcg's index. It will be used as an index in the 3118 3107 * child cache array in kmem_cache, and also to derive its name. This function ··· 3111 3120 int memcg_cache_id(struct mem_cgroup *memcg) 3112 3121 { 3113 3122 return memcg ? memcg->kmemcg_id : -1; 3114 - } 3115 - 3116 - /* 3117 - * This ends up being protected by the set_limit mutex, during normal 3118 - * operation, because that is its main call site. 3119 - * 3120 - * But when we create a new cache, we can call this as well if its parent 3121 - * is kmem-limited. That will have to hold set_limit_mutex as well. 3122 - */ 3123 - static int memcg_update_cache_sizes(struct mem_cgroup *memcg) 3124 - { 3125 - int num, ret; 3126 - 3127 - num = ida_simple_get(&kmem_limited_groups, 3128 - 0, MEMCG_CACHES_MAX_SIZE, GFP_KERNEL); 3129 - if (num < 0) 3130 - return num; 3131 - /* 3132 - * After this point, kmem_accounted (that we test atomically in 3133 - * the beginning of this conditional), is no longer 0. This 3134 - * guarantees only one process will set the following boolean 3135 - * to true. We don't need test_and_set because we're protected 3136 - * by the set_limit_mutex anyway. 3137 - */ 3138 - memcg_kmem_set_activated(memcg); 3139 - 3140 - ret = memcg_update_all_caches(num+1); 3141 - if (ret) { 3142 - ida_simple_remove(&kmem_limited_groups, num); 3143 - memcg_kmem_clear_activated(memcg); 3144 - return ret; 3145 - } 3146 - 3147 - memcg->kmemcg_id = num; 3148 - INIT_LIST_HEAD(&memcg->memcg_slab_caches); 3149 - mutex_init(&memcg->slab_caches_mutex); 3150 - return 0; 3151 3123 } 3152 3124 3153 3125 static size_t memcg_caches_array_size(int num_groups) ··· 3149 3195 3150 3196 if (num_groups > memcg_limited_groups_array_size) { 3151 3197 int i; 3198 + struct memcg_cache_params *new_params; 3152 3199 ssize_t size = memcg_caches_array_size(num_groups); 3153 3200 3154 3201 size *= sizeof(void *); 3155 3202 size += offsetof(struct memcg_cache_params, memcg_caches); 3156 3203 3157 - s->memcg_params = kzalloc(size, GFP_KERNEL); 3158 - if (!s->memcg_params) { 3159 - s->memcg_params = cur_params; 3204 + new_params = kzalloc(size, GFP_KERNEL); 3205 + if (!new_params) 3160 3206 return -ENOMEM; 3161 - } 3162 3207 3163 - s->memcg_params->is_root_cache = true; 3208 + new_params->is_root_cache = true; 3164 3209 3165 3210 /* 3166 3211 * There is the chance it will be bigger than ··· 3173 3220 for (i = 0; i < memcg_limited_groups_array_size; i++) { 3174 3221 if (!cur_params->memcg_caches[i]) 3175 3222 continue; 3176 - s->memcg_params->memcg_caches[i] = 3223 + new_params->memcg_caches[i] = 3177 3224 cur_params->memcg_caches[i]; 3178 3225 } 3179 3226 ··· 3186 3233 * bigger than the others. And all updates will reset this 3187 3234 * anyway. 3188 3235 */ 3189 - kfree(cur_params); 3236 + rcu_assign_pointer(s->memcg_params, new_params); 3237 + if (cur_params) 3238 + kfree_rcu(cur_params, rcu_head); 3190 3239 } 3191 3240 return 0; 3192 3241 } 3193 3242 3194 - int memcg_register_cache(struct mem_cgroup *memcg, struct kmem_cache *s, 3195 - struct kmem_cache *root_cache) 3243 + int memcg_alloc_cache_params(struct mem_cgroup *memcg, struct kmem_cache *s, 3244 + struct kmem_cache *root_cache) 3196 3245 { 3197 3246 size_t size; 3198 3247 ··· 3222 3267 return 0; 3223 3268 } 3224 3269 3225 - void memcg_release_cache(struct kmem_cache *s) 3270 + void memcg_free_cache_params(struct kmem_cache *s) 3271 + { 3272 + kfree(s->memcg_params); 3273 + } 3274 + 3275 + void memcg_register_cache(struct kmem_cache *s) 3226 3276 { 3227 3277 struct kmem_cache *root; 3228 3278 struct mem_cgroup *memcg; 3229 3279 int id; 3230 3280 3231 - /* 3232 - * This happens, for instance, when a root cache goes away before we 3233 - * add any memcg. 3234 - */ 3235 - if (!s->memcg_params) 3281 + if (is_root_cache(s)) 3236 3282 return; 3237 3283 3238 - if (s->memcg_params->is_root_cache) 3239 - goto out; 3240 - 3241 - memcg = s->memcg_params->memcg; 3242 - id = memcg_cache_id(memcg); 3284 + /* 3285 + * Holding the slab_mutex assures nobody will touch the memcg_caches 3286 + * array while we are modifying it. 3287 + */ 3288 + lockdep_assert_held(&slab_mutex); 3243 3289 3244 3290 root = s->memcg_params->root_cache; 3245 - root->memcg_params->memcg_caches[id] = NULL; 3291 + memcg = s->memcg_params->memcg; 3292 + id = memcg_cache_id(memcg); 3293 + 3294 + css_get(&memcg->css); 3295 + 3296 + 3297 + /* 3298 + * Since readers won't lock (see cache_from_memcg_idx()), we need a 3299 + * barrier here to ensure nobody will see the kmem_cache partially 3300 + * initialized. 3301 + */ 3302 + smp_wmb(); 3303 + 3304 + /* 3305 + * Initialize the pointer to this cache in its parent's memcg_params 3306 + * before adding it to the memcg_slab_caches list, otherwise we can 3307 + * fail to convert memcg_params_to_cache() while traversing the list. 3308 + */ 3309 + VM_BUG_ON(root->memcg_params->memcg_caches[id]); 3310 + root->memcg_params->memcg_caches[id] = s; 3311 + 3312 + mutex_lock(&memcg->slab_caches_mutex); 3313 + list_add(&s->memcg_params->list, &memcg->memcg_slab_caches); 3314 + mutex_unlock(&memcg->slab_caches_mutex); 3315 + } 3316 + 3317 + void memcg_unregister_cache(struct kmem_cache *s) 3318 + { 3319 + struct kmem_cache *root; 3320 + struct mem_cgroup *memcg; 3321 + int id; 3322 + 3323 + if (is_root_cache(s)) 3324 + return; 3325 + 3326 + /* 3327 + * Holding the slab_mutex assures nobody will touch the memcg_caches 3328 + * array while we are modifying it. 3329 + */ 3330 + lockdep_assert_held(&slab_mutex); 3331 + 3332 + root = s->memcg_params->root_cache; 3333 + memcg = s->memcg_params->memcg; 3334 + id = memcg_cache_id(memcg); 3246 3335 3247 3336 mutex_lock(&memcg->slab_caches_mutex); 3248 3337 list_del(&s->memcg_params->list); 3249 3338 mutex_unlock(&memcg->slab_caches_mutex); 3250 3339 3340 + /* 3341 + * Clear the pointer to this cache in its parent's memcg_params only 3342 + * after removing it from the memcg_slab_caches list, otherwise we can 3343 + * fail to convert memcg_params_to_cache() while traversing the list. 3344 + */ 3345 + VM_BUG_ON(!root->memcg_params->memcg_caches[id]); 3346 + root->memcg_params->memcg_caches[id] = NULL; 3347 + 3251 3348 css_put(&memcg->css); 3252 - out: 3253 - kfree(s->memcg_params); 3254 3349 } 3255 3350 3256 3351 /* ··· 3359 3354 * So if we aren't down to zero, we'll just schedule a worker and try 3360 3355 * again 3361 3356 */ 3362 - if (atomic_read(&cachep->memcg_params->nr_pages) != 0) { 3357 + if (atomic_read(&cachep->memcg_params->nr_pages) != 0) 3363 3358 kmem_cache_shrink(cachep); 3364 - if (atomic_read(&cachep->memcg_params->nr_pages) == 0) 3365 - return; 3366 - } else 3359 + else 3367 3360 kmem_cache_destroy(cachep); 3368 3361 } 3369 3362 ··· 3397 3394 schedule_work(&cachep->memcg_params->destroy); 3398 3395 } 3399 3396 3400 - /* 3401 - * This lock protects updaters, not readers. We want readers to be as fast as 3402 - * they can, and they will either see NULL or a valid cache value. Our model 3403 - * allow them to see NULL, in which case the root memcg will be selected. 3404 - * 3405 - * We need this lock because multiple allocations to the same cache from a non 3406 - * will span more than one worker. Only one of them can create the cache. 3407 - */ 3408 - static DEFINE_MUTEX(memcg_cache_mutex); 3409 - 3410 - /* 3411 - * Called with memcg_cache_mutex held 3412 - */ 3413 - static struct kmem_cache *kmem_cache_dup(struct mem_cgroup *memcg, 3414 - struct kmem_cache *s) 3397 + static struct kmem_cache *memcg_create_kmem_cache(struct mem_cgroup *memcg, 3398 + struct kmem_cache *s) 3415 3399 { 3416 3400 struct kmem_cache *new; 3417 3401 static char *tmp_name = NULL; 3402 + static DEFINE_MUTEX(mutex); /* protects tmp_name */ 3418 3403 3419 - lockdep_assert_held(&memcg_cache_mutex); 3404 + BUG_ON(!memcg_can_account_kmem(memcg)); 3420 3405 3406 + mutex_lock(&mutex); 3421 3407 /* 3422 3408 * kmem_cache_create_memcg duplicates the given name and 3423 3409 * cgroup_name for this name requires RCU context. ··· 3429 3437 3430 3438 if (new) 3431 3439 new->allocflags |= __GFP_KMEMCG; 3440 + else 3441 + new = s; 3432 3442 3443 + mutex_unlock(&mutex); 3433 3444 return new; 3434 - } 3435 - 3436 - static struct kmem_cache *memcg_create_kmem_cache(struct mem_cgroup *memcg, 3437 - struct kmem_cache *cachep) 3438 - { 3439 - struct kmem_cache *new_cachep; 3440 - int idx; 3441 - 3442 - BUG_ON(!memcg_can_account_kmem(memcg)); 3443 - 3444 - idx = memcg_cache_id(memcg); 3445 - 3446 - mutex_lock(&memcg_cache_mutex); 3447 - new_cachep = cache_from_memcg_idx(cachep, idx); 3448 - if (new_cachep) { 3449 - css_put(&memcg->css); 3450 - goto out; 3451 - } 3452 - 3453 - new_cachep = kmem_cache_dup(memcg, cachep); 3454 - if (new_cachep == NULL) { 3455 - new_cachep = cachep; 3456 - css_put(&memcg->css); 3457 - goto out; 3458 - } 3459 - 3460 - atomic_set(&new_cachep->memcg_params->nr_pages , 0); 3461 - 3462 - cachep->memcg_params->memcg_caches[idx] = new_cachep; 3463 - /* 3464 - * the readers won't lock, make sure everybody sees the updated value, 3465 - * so they won't put stuff in the queue again for no reason 3466 - */ 3467 - wmb(); 3468 - out: 3469 - mutex_unlock(&memcg_cache_mutex); 3470 - return new_cachep; 3471 3445 } 3472 3446 3473 3447 void kmem_cache_destroy_memcg_children(struct kmem_cache *s) ··· 3453 3495 * 3454 3496 * Still, we don't want anyone else freeing memcg_caches under our 3455 3497 * noses, which can happen if a new memcg comes to life. As usual, 3456 - * we'll take the set_limit_mutex to protect ourselves against this. 3498 + * we'll take the activate_kmem_mutex to protect ourselves against 3499 + * this. 3457 3500 */ 3458 - mutex_lock(&set_limit_mutex); 3501 + mutex_lock(&activate_kmem_mutex); 3459 3502 for_each_memcg_cache_index(i) { 3460 3503 c = cache_from_memcg_idx(s, i); 3461 3504 if (!c) ··· 3479 3520 cancel_work_sync(&c->memcg_params->destroy); 3480 3521 kmem_cache_destroy(c); 3481 3522 } 3482 - mutex_unlock(&set_limit_mutex); 3523 + mutex_unlock(&activate_kmem_mutex); 3483 3524 } 3484 3525 3485 3526 struct create_work { ··· 3511 3552 3512 3553 cw = container_of(w, struct create_work, work); 3513 3554 memcg_create_kmem_cache(cw->memcg, cw->cachep); 3555 + css_put(&cw->memcg->css); 3514 3556 kfree(cw); 3515 3557 } 3516 3558 ··· 3571 3611 gfp_t gfp) 3572 3612 { 3573 3613 struct mem_cgroup *memcg; 3574 - int idx; 3614 + struct kmem_cache *memcg_cachep; 3575 3615 3576 3616 VM_BUG_ON(!cachep->memcg_params); 3577 3617 VM_BUG_ON(!cachep->memcg_params->is_root_cache); ··· 3585 3625 if (!memcg_can_account_kmem(memcg)) 3586 3626 goto out; 3587 3627 3588 - idx = memcg_cache_id(memcg); 3589 - 3590 - /* 3591 - * barrier to mare sure we're always seeing the up to date value. The 3592 - * code updating memcg_caches will issue a write barrier to match this. 3593 - */ 3594 - read_barrier_depends(); 3595 - if (likely(cache_from_memcg_idx(cachep, idx))) { 3596 - cachep = cache_from_memcg_idx(cachep, idx); 3628 + memcg_cachep = cache_from_memcg_idx(cachep, memcg_cache_id(memcg)); 3629 + if (likely(memcg_cachep)) { 3630 + cachep = memcg_cachep; 3597 3631 goto out; 3598 3632 } 3599 3633 ··· 3741 3787 if (!memcg) 3742 3788 return; 3743 3789 3744 - VM_BUG_ON(mem_cgroup_is_root(memcg)); 3790 + VM_BUG_ON_PAGE(mem_cgroup_is_root(memcg), page); 3745 3791 memcg_uncharge_kmem(memcg, PAGE_SIZE << order); 3746 3792 } 3747 3793 #else ··· 3820 3866 bool anon = PageAnon(page); 3821 3867 3822 3868 VM_BUG_ON(from == to); 3823 - VM_BUG_ON(PageLRU(page)); 3869 + VM_BUG_ON_PAGE(PageLRU(page), page); 3824 3870 /* 3825 3871 * The page is isolated from LRU. So, collapse function 3826 3872 * will not handle this page. But page splitting can happen. ··· 3913 3959 parent = root_mem_cgroup; 3914 3960 3915 3961 if (nr_pages > 1) { 3916 - VM_BUG_ON(!PageTransHuge(page)); 3962 + VM_BUG_ON_PAGE(!PageTransHuge(page), page); 3917 3963 flags = compound_lock_irqsave(page); 3918 3964 } 3919 3965 ··· 3947 3993 3948 3994 if (PageTransHuge(page)) { 3949 3995 nr_pages <<= compound_order(page); 3950 - VM_BUG_ON(!PageTransHuge(page)); 3996 + VM_BUG_ON_PAGE(!PageTransHuge(page), page); 3951 3997 /* 3952 3998 * Never OOM-kill a process for a huge page. The 3953 3999 * fault handler will fall back to regular pages. ··· 3967 4013 { 3968 4014 if (mem_cgroup_disabled()) 3969 4015 return 0; 3970 - VM_BUG_ON(page_mapped(page)); 3971 - VM_BUG_ON(page->mapping && !PageAnon(page)); 4016 + VM_BUG_ON_PAGE(page_mapped(page), page); 4017 + VM_BUG_ON_PAGE(page->mapping && !PageAnon(page), page); 3972 4018 VM_BUG_ON(!mm); 3973 4019 return mem_cgroup_charge_common(page, mm, gfp_mask, 3974 4020 MEM_CGROUP_CHARGE_TYPE_ANON); ··· 4172 4218 4173 4219 if (PageTransHuge(page)) { 4174 4220 nr_pages <<= compound_order(page); 4175 - VM_BUG_ON(!PageTransHuge(page)); 4221 + VM_BUG_ON_PAGE(!PageTransHuge(page), page); 4176 4222 } 4177 4223 /* 4178 4224 * Check if our page_cgroup is valid ··· 4264 4310 /* early check. */ 4265 4311 if (page_mapped(page)) 4266 4312 return; 4267 - VM_BUG_ON(page->mapping && !PageAnon(page)); 4313 + VM_BUG_ON_PAGE(page->mapping && !PageAnon(page), page); 4268 4314 /* 4269 4315 * If the page is in swap cache, uncharge should be deferred 4270 4316 * to the swap path, which also properly accounts swap usage ··· 4284 4330 4285 4331 void mem_cgroup_uncharge_cache_page(struct page *page) 4286 4332 { 4287 - VM_BUG_ON(page_mapped(page)); 4288 - VM_BUG_ON(page->mapping); 4333 + VM_BUG_ON_PAGE(page_mapped(page), page); 4334 + VM_BUG_ON_PAGE(page->mapping, page); 4289 4335 __mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_CACHE, false); 4290 4336 } 4291 4337 ··· 5143 5189 return val; 5144 5190 } 5145 5191 5146 - static int memcg_update_kmem_limit(struct cgroup_subsys_state *css, u64 val) 5147 - { 5148 - int ret = -EINVAL; 5149 5192 #ifdef CONFIG_MEMCG_KMEM 5150 - struct mem_cgroup *memcg = mem_cgroup_from_css(css); 5193 + /* should be called with activate_kmem_mutex held */ 5194 + static int __memcg_activate_kmem(struct mem_cgroup *memcg, 5195 + unsigned long long limit) 5196 + { 5197 + int err = 0; 5198 + int memcg_id; 5199 + 5200 + if (memcg_kmem_is_active(memcg)) 5201 + return 0; 5202 + 5203 + /* 5204 + * We are going to allocate memory for data shared by all memory 5205 + * cgroups so let's stop accounting here. 5206 + */ 5207 + memcg_stop_kmem_account(); 5208 + 5151 5209 /* 5152 5210 * For simplicity, we won't allow this to be disabled. It also can't 5153 5211 * be changed if the cgroup has children already, or if tasks had ··· 5173 5207 * of course permitted. 5174 5208 */ 5175 5209 mutex_lock(&memcg_create_mutex); 5176 - mutex_lock(&set_limit_mutex); 5177 - if (!memcg->kmem_account_flags && val != RES_COUNTER_MAX) { 5178 - if (cgroup_task_count(css->cgroup) || memcg_has_children(memcg)) { 5179 - ret = -EBUSY; 5180 - goto out; 5181 - } 5182 - ret = res_counter_set_limit(&memcg->kmem, val); 5183 - VM_BUG_ON(ret); 5184 - 5185 - ret = memcg_update_cache_sizes(memcg); 5186 - if (ret) { 5187 - res_counter_set_limit(&memcg->kmem, RES_COUNTER_MAX); 5188 - goto out; 5189 - } 5190 - static_key_slow_inc(&memcg_kmem_enabled_key); 5191 - /* 5192 - * setting the active bit after the inc will guarantee no one 5193 - * starts accounting before all call sites are patched 5194 - */ 5195 - memcg_kmem_set_active(memcg); 5196 - } else 5197 - ret = res_counter_set_limit(&memcg->kmem, val); 5198 - out: 5199 - mutex_unlock(&set_limit_mutex); 5210 + if (cgroup_task_count(memcg->css.cgroup) || memcg_has_children(memcg)) 5211 + err = -EBUSY; 5200 5212 mutex_unlock(&memcg_create_mutex); 5201 - #endif 5213 + if (err) 5214 + goto out; 5215 + 5216 + memcg_id = ida_simple_get(&kmem_limited_groups, 5217 + 0, MEMCG_CACHES_MAX_SIZE, GFP_KERNEL); 5218 + if (memcg_id < 0) { 5219 + err = memcg_id; 5220 + goto out; 5221 + } 5222 + 5223 + /* 5224 + * Make sure we have enough space for this cgroup in each root cache's 5225 + * memcg_params. 5226 + */ 5227 + err = memcg_update_all_caches(memcg_id + 1); 5228 + if (err) 5229 + goto out_rmid; 5230 + 5231 + memcg->kmemcg_id = memcg_id; 5232 + INIT_LIST_HEAD(&memcg->memcg_slab_caches); 5233 + mutex_init(&memcg->slab_caches_mutex); 5234 + 5235 + /* 5236 + * We couldn't have accounted to this cgroup, because it hasn't got the 5237 + * active bit set yet, so this should succeed. 5238 + */ 5239 + err = res_counter_set_limit(&memcg->kmem, limit); 5240 + VM_BUG_ON(err); 5241 + 5242 + static_key_slow_inc(&memcg_kmem_enabled_key); 5243 + /* 5244 + * Setting the active bit after enabling static branching will 5245 + * guarantee no one starts accounting before all call sites are 5246 + * patched. 5247 + */ 5248 + memcg_kmem_set_active(memcg); 5249 + out: 5250 + memcg_resume_kmem_account(); 5251 + return err; 5252 + 5253 + out_rmid: 5254 + ida_simple_remove(&kmem_limited_groups, memcg_id); 5255 + goto out; 5256 + } 5257 + 5258 + static int memcg_activate_kmem(struct mem_cgroup *memcg, 5259 + unsigned long long limit) 5260 + { 5261 + int ret; 5262 + 5263 + mutex_lock(&activate_kmem_mutex); 5264 + ret = __memcg_activate_kmem(memcg, limit); 5265 + mutex_unlock(&activate_kmem_mutex); 5202 5266 return ret; 5203 5267 } 5204 5268 5205 - #ifdef CONFIG_MEMCG_KMEM 5269 + static int memcg_update_kmem_limit(struct mem_cgroup *memcg, 5270 + unsigned long long val) 5271 + { 5272 + int ret; 5273 + 5274 + if (!memcg_kmem_is_active(memcg)) 5275 + ret = memcg_activate_kmem(memcg, val); 5276 + else 5277 + ret = res_counter_set_limit(&memcg->kmem, val); 5278 + return ret; 5279 + } 5280 + 5206 5281 static int memcg_propagate_kmem(struct mem_cgroup *memcg) 5207 5282 { 5208 5283 int ret = 0; 5209 5284 struct mem_cgroup *parent = parent_mem_cgroup(memcg); 5285 + 5210 5286 if (!parent) 5211 - goto out; 5287 + return 0; 5212 5288 5213 - memcg->kmem_account_flags = parent->kmem_account_flags; 5289 + mutex_lock(&activate_kmem_mutex); 5214 5290 /* 5215 - * When that happen, we need to disable the static branch only on those 5216 - * memcgs that enabled it. To achieve this, we would be forced to 5217 - * complicate the code by keeping track of which memcgs were the ones 5218 - * that actually enabled limits, and which ones got it from its 5219 - * parents. 5220 - * 5221 - * It is a lot simpler just to do static_key_slow_inc() on every child 5222 - * that is accounted. 5291 + * If the parent cgroup is not kmem-active now, it cannot be activated 5292 + * after this point, because it has at least one child already. 5223 5293 */ 5224 - if (!memcg_kmem_is_active(memcg)) 5225 - goto out; 5226 - 5227 - /* 5228 - * __mem_cgroup_free() will issue static_key_slow_dec() because this 5229 - * memcg is active already. If the later initialization fails then the 5230 - * cgroup core triggers the cleanup so we do not have to do it here. 5231 - */ 5232 - static_key_slow_inc(&memcg_kmem_enabled_key); 5233 - 5234 - mutex_lock(&set_limit_mutex); 5235 - memcg_stop_kmem_account(); 5236 - ret = memcg_update_cache_sizes(memcg); 5237 - memcg_resume_kmem_account(); 5238 - mutex_unlock(&set_limit_mutex); 5239 - out: 5294 + if (memcg_kmem_is_active(parent)) 5295 + ret = __memcg_activate_kmem(memcg, RES_COUNTER_MAX); 5296 + mutex_unlock(&activate_kmem_mutex); 5240 5297 return ret; 5298 + } 5299 + #else 5300 + static int memcg_update_kmem_limit(struct mem_cgroup *memcg, 5301 + unsigned long long val) 5302 + { 5303 + return -EINVAL; 5241 5304 } 5242 5305 #endif /* CONFIG_MEMCG_KMEM */ 5243 5306 ··· 5301 5306 else if (type == _MEMSWAP) 5302 5307 ret = mem_cgroup_resize_memsw_limit(memcg, val); 5303 5308 else if (type == _KMEM) 5304 - ret = memcg_update_kmem_limit(css, val); 5309 + ret = memcg_update_kmem_limit(memcg, val); 5305 5310 else 5306 5311 return -EINVAL; 5307 5312 break; ··· 6400 6405 static struct mem_cgroup *mem_cgroup_alloc(void) 6401 6406 { 6402 6407 struct mem_cgroup *memcg; 6403 - size_t size = memcg_size(); 6408 + size_t size; 6404 6409 6405 - /* Can be very big if nr_node_ids is very big */ 6406 - if (size < PAGE_SIZE) 6407 - memcg = kzalloc(size, GFP_KERNEL); 6408 - else 6409 - memcg = vzalloc(size); 6410 + size = sizeof(struct mem_cgroup); 6411 + size += nr_node_ids * sizeof(struct mem_cgroup_per_node *); 6410 6412 6413 + memcg = kzalloc(size, GFP_KERNEL); 6411 6414 if (!memcg) 6412 6415 return NULL; 6413 6416 ··· 6416 6423 return memcg; 6417 6424 6418 6425 out_free: 6419 - if (size < PAGE_SIZE) 6420 - kfree(memcg); 6421 - else 6422 - vfree(memcg); 6426 + kfree(memcg); 6423 6427 return NULL; 6424 6428 } 6425 6429 ··· 6434 6444 static void __mem_cgroup_free(struct mem_cgroup *memcg) 6435 6445 { 6436 6446 int node; 6437 - size_t size = memcg_size(); 6438 6447 6439 6448 mem_cgroup_remove_from_trees(memcg); 6440 6449 ··· 6454 6465 * the cgroup_lock. 6455 6466 */ 6456 6467 disarm_static_keys(memcg); 6457 - if (size < PAGE_SIZE) 6458 - kfree(memcg); 6459 - else 6460 - vfree(memcg); 6468 + kfree(memcg); 6461 6469 } 6462 6470 6463 6471 /* ··· 6535 6549 { 6536 6550 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 6537 6551 struct mem_cgroup *parent = mem_cgroup_from_css(css_parent(css)); 6538 - int error = 0; 6539 6552 6540 6553 if (css->cgroup->id > MEM_CGROUP_ID_MAX) 6541 6554 return -ENOSPC; ··· 6569 6584 if (parent != root_mem_cgroup) 6570 6585 mem_cgroup_subsys.broken_hierarchy = true; 6571 6586 } 6572 - 6573 - error = memcg_init_kmem(memcg, &mem_cgroup_subsys); 6574 6587 mutex_unlock(&memcg_create_mutex); 6575 - return error; 6588 + 6589 + return memcg_init_kmem(memcg, &mem_cgroup_subsys); 6576 6590 } 6577 6591 6578 6592 /* ··· 6880 6896 enum mc_target_type ret = MC_TARGET_NONE; 6881 6897 6882 6898 page = pmd_page(pmd); 6883 - VM_BUG_ON(!page || !PageHead(page)); 6899 + VM_BUG_ON_PAGE(!page || !PageHead(page), page); 6884 6900 if (!move_anon()) 6885 6901 return ret; 6886 6902 pc = lookup_page_cgroup(page);
+11 -10
mm/memory-failure.c
··· 856 856 * the pages and send SIGBUS to the processes if the data was dirty. 857 857 */ 858 858 static int hwpoison_user_mappings(struct page *p, unsigned long pfn, 859 - int trapno, int flags) 859 + int trapno, int flags, struct page **hpagep) 860 860 { 861 861 enum ttu_flags ttu = TTU_UNMAP | TTU_IGNORE_MLOCK | TTU_IGNORE_ACCESS; 862 862 struct address_space *mapping; 863 863 LIST_HEAD(tokill); 864 864 int ret; 865 865 int kill = 1, forcekill; 866 - struct page *hpage = compound_head(p); 866 + struct page *hpage = *hpagep; 867 867 struct page *ppage; 868 868 869 869 if (PageReserved(p) || PageSlab(p)) ··· 942 942 * We pinned the head page for hwpoison handling, 943 943 * now we split the thp and we are interested in 944 944 * the hwpoisoned raw page, so move the refcount 945 - * to it. 945 + * to it. Similarly, page lock is shifted. 946 946 */ 947 947 if (hpage != p) { 948 948 put_page(hpage); 949 949 get_page(p); 950 + lock_page(p); 951 + unlock_page(hpage); 952 + *hpagep = p; 950 953 } 951 954 /* THP is split, so ppage should be the real poisoned page. */ 952 955 ppage = p; ··· 967 964 if (kill) 968 965 collect_procs(ppage, &tokill); 969 966 970 - if (hpage != ppage) 971 - lock_page(ppage); 972 - 973 967 ret = try_to_unmap(ppage, ttu); 974 968 if (ret != SWAP_SUCCESS) 975 969 printk(KERN_ERR "MCE %#lx: failed to unmap page (mapcount=%d)\n", 976 970 pfn, page_mapcount(ppage)); 977 - 978 - if (hpage != ppage) 979 - unlock_page(ppage); 980 971 981 972 /* 982 973 * Now that the dirty bit has been propagated to the ··· 1190 1193 /* 1191 1194 * Now take care of user space mappings. 1192 1195 * Abort on fail: __delete_from_page_cache() assumes unmapped page. 1196 + * 1197 + * When the raw error page is thp tail page, hpage points to the raw 1198 + * page after thp split. 1193 1199 */ 1194 - if (hwpoison_user_mappings(p, pfn, trapno, flags) != SWAP_SUCCESS) { 1200 + if (hwpoison_user_mappings(p, pfn, trapno, flags, &hpage) 1201 + != SWAP_SUCCESS) { 1195 1202 printk(KERN_ERR "MCE %#lx: cannot unmap page, give up\n", pfn); 1196 1203 res = -EBUSY; 1197 1204 goto out;
+5 -5
mm/memory.c
··· 289 289 return 0; 290 290 batch = tlb->active; 291 291 } 292 - VM_BUG_ON(batch->nr > batch->max); 292 + VM_BUG_ON_PAGE(batch->nr > batch->max, page); 293 293 294 294 return batch->max - batch->nr; 295 295 } ··· 671 671 current->comm, 672 672 (long long)pte_val(pte), (long long)pmd_val(*pmd)); 673 673 if (page) 674 - dump_page(page); 674 + dump_page(page, "bad pte"); 675 675 printk(KERN_ALERT 676 676 "addr:%p vm_flags:%08lx anon_vma:%p mapping:%p index:%lx\n", 677 677 (void *)addr, vma->vm_flags, vma->anon_vma, mapping, index); ··· 2702 2702 goto unwritable_page; 2703 2703 } 2704 2704 } else 2705 - VM_BUG_ON(!PageLocked(old_page)); 2705 + VM_BUG_ON_PAGE(!PageLocked(old_page), old_page); 2706 2706 2707 2707 /* 2708 2708 * Since we dropped the lock we need to revalidate ··· 3358 3358 if (unlikely(!(ret & VM_FAULT_LOCKED))) 3359 3359 lock_page(vmf.page); 3360 3360 else 3361 - VM_BUG_ON(!PageLocked(vmf.page)); 3361 + VM_BUG_ON_PAGE(!PageLocked(vmf.page), vmf.page); 3362 3362 3363 3363 /* 3364 3364 * Should we do an early C-O-W break? ··· 3395 3395 goto unwritable_page; 3396 3396 } 3397 3397 } else 3398 - VM_BUG_ON(!PageLocked(page)); 3398 + VM_BUG_ON_PAGE(!PageLocked(page), page); 3399 3399 page_mkwrite = 1; 3400 3400 } 3401 3401 }
+5 -4
mm/memory_hotplug.c
··· 1107 1107 if (ret) 1108 1108 return ret; 1109 1109 1110 - lock_memory_hotplug(); 1111 - 1112 1110 res = register_memory_resource(start, size); 1113 1111 ret = -EEXIST; 1114 1112 if (!res) 1115 - goto out; 1113 + return ret; 1116 1114 1117 1115 { /* Stupid hack to suppress address-never-null warning */ 1118 1116 void *p = NODE_DATA(nid); 1119 1117 new_pgdat = !p; 1120 1118 } 1119 + 1120 + lock_memory_hotplug(); 1121 + 1121 1122 new_node = !node_online(nid); 1122 1123 if (new_node) { 1123 1124 pgdat = hotadd_new_pgdat(nid, start); ··· 1310 1309 #ifdef CONFIG_DEBUG_VM 1311 1310 printk(KERN_ALERT "removing pfn %lx from LRU failed\n", 1312 1311 pfn); 1313 - dump_page(page); 1312 + dump_page(page, "failed to remove from LRU"); 1314 1313 #endif 1315 1314 put_page(page); 1316 1315 /* Because we don't have big zone->lock. we should
+3 -5
mm/mempolicy.c
··· 1199 1199 } 1200 1200 1201 1201 if (PageHuge(page)) { 1202 - if (vma) 1203 - return alloc_huge_page_noerr(vma, address, 1); 1204 - else 1205 - return NULL; 1202 + BUG_ON(!vma); 1203 + return alloc_huge_page_noerr(vma, address, 1); 1206 1204 } 1207 1205 /* 1208 1206 * if !vma, alloc_page_vma() will use task or system default policy ··· 2666 2668 2667 2669 if (nr_node_ids > 1 && !numabalancing_override) { 2668 2670 printk(KERN_INFO "Enabling automatic NUMA balancing. " 2669 - "Configure with numa_balancing= or sysctl"); 2671 + "Configure with numa_balancing= or the kernel.numa_balancing sysctl"); 2670 2672 set_numabalancing_state(numabalancing_default); 2671 2673 } 2672 2674 }
+3 -5
mm/migrate.c
··· 499 499 if (PageUptodate(page)) 500 500 SetPageUptodate(newpage); 501 501 if (TestClearPageActive(page)) { 502 - VM_BUG_ON(PageUnevictable(page)); 502 + VM_BUG_ON_PAGE(PageUnevictable(page), page); 503 503 SetPageActive(newpage); 504 504 } else if (TestClearPageUnevictable(page)) 505 505 SetPageUnevictable(newpage); ··· 871 871 * free the metadata, so the page can be freed. 872 872 */ 873 873 if (!page->mapping) { 874 - VM_BUG_ON(PageAnon(page)); 874 + VM_BUG_ON_PAGE(PageAnon(page), page); 875 875 if (page_has_private(page)) { 876 876 try_to_free_buffers(page); 877 877 goto uncharge; ··· 1618 1618 { 1619 1619 int page_lru; 1620 1620 1621 - VM_BUG_ON(compound_order(page) && !PageTransHuge(page)); 1621 + VM_BUG_ON_PAGE(compound_order(page) && !PageTransHuge(page), page); 1622 1622 1623 1623 /* Avoid migrating to a node that is nearly full */ 1624 1624 if (!migrate_balanced_pgdat(pgdat, 1UL << compound_order(page))) ··· 1752 1752 (GFP_TRANSHUGE | GFP_THISNODE) & ~__GFP_WAIT, HPAGE_PMD_ORDER); 1753 1753 if (!new_page) 1754 1754 goto out_fail; 1755 - 1756 - page_cpupid_xchg_last(new_page, page_cpupid_last(page)); 1757 1755 1758 1756 isolated = numamigrate_isolate_page(pgdat, page); 1759 1757 if (!isolated) {
-7
mm/mincore.c
··· 225 225 226 226 end = min(vma->vm_end, addr + (pages << PAGE_SHIFT)); 227 227 228 - if (is_vm_hugetlb_page(vma)) { 229 - mincore_hugetlb_page_range(vma, addr, end, vec); 230 - return (end - addr) >> PAGE_SHIFT; 231 - } 232 - 233 - end = pmd_addr_end(addr, end); 234 - 235 228 if (is_vm_hugetlb_page(vma)) 236 229 mincore_hugetlb_page_range(vma, addr, end, vec); 237 230 else
+62 -46
mm/mlock.c
··· 91 91 } 92 92 93 93 /* 94 + * Isolate a page from LRU with optional get_page() pin. 95 + * Assumes lru_lock already held and page already pinned. 96 + */ 97 + static bool __munlock_isolate_lru_page(struct page *page, bool getpage) 98 + { 99 + if (PageLRU(page)) { 100 + struct lruvec *lruvec; 101 + 102 + lruvec = mem_cgroup_page_lruvec(page, page_zone(page)); 103 + if (getpage) 104 + get_page(page); 105 + ClearPageLRU(page); 106 + del_page_from_lru_list(page, lruvec, page_lru(page)); 107 + return true; 108 + } 109 + 110 + return false; 111 + } 112 + 113 + /* 94 114 * Finish munlock after successful page isolation 95 115 * 96 116 * Page must be locked. This is a wrapper for try_to_munlock() ··· 146 126 static void __munlock_isolation_failed(struct page *page) 147 127 { 148 128 if (PageUnevictable(page)) 149 - count_vm_event(UNEVICTABLE_PGSTRANDED); 129 + __count_vm_event(UNEVICTABLE_PGSTRANDED); 150 130 else 151 - count_vm_event(UNEVICTABLE_PGMUNLOCKED); 131 + __count_vm_event(UNEVICTABLE_PGMUNLOCKED); 152 132 } 153 133 154 134 /** ··· 172 152 unsigned int munlock_vma_page(struct page *page) 173 153 { 174 154 unsigned int nr_pages; 155 + struct zone *zone = page_zone(page); 175 156 176 157 BUG_ON(!PageLocked(page)); 177 158 178 - if (TestClearPageMlocked(page)) { 179 - nr_pages = hpage_nr_pages(page); 180 - mod_zone_page_state(page_zone(page), NR_MLOCK, -nr_pages); 181 - if (!isolate_lru_page(page)) 182 - __munlock_isolated_page(page); 183 - else 184 - __munlock_isolation_failed(page); 185 - } else { 186 - nr_pages = hpage_nr_pages(page); 187 - } 188 - 189 159 /* 190 - * Regardless of the original PageMlocked flag, we determine nr_pages 191 - * after touching the flag. This leaves a possible race with a THP page 192 - * split, such that a whole THP page was munlocked, but nr_pages == 1. 193 - * Returning a smaller mask due to that is OK, the worst that can 194 - * happen is subsequent useless scanning of the former tail pages. 195 - * The NR_MLOCK accounting can however become broken. 160 + * Serialize with any parallel __split_huge_page_refcount() which 161 + * might otherwise copy PageMlocked to part of the tail pages before 162 + * we clear it in the head page. It also stabilizes hpage_nr_pages(). 196 163 */ 164 + spin_lock_irq(&zone->lru_lock); 165 + 166 + nr_pages = hpage_nr_pages(page); 167 + if (!TestClearPageMlocked(page)) 168 + goto unlock_out; 169 + 170 + __mod_zone_page_state(zone, NR_MLOCK, -nr_pages); 171 + 172 + if (__munlock_isolate_lru_page(page, true)) { 173 + spin_unlock_irq(&zone->lru_lock); 174 + __munlock_isolated_page(page); 175 + goto out; 176 + } 177 + __munlock_isolation_failed(page); 178 + 179 + unlock_out: 180 + spin_unlock_irq(&zone->lru_lock); 181 + 182 + out: 197 183 return nr_pages - 1; 198 184 } 199 185 ··· 279 253 static bool __putback_lru_fast_prepare(struct page *page, struct pagevec *pvec, 280 254 int *pgrescued) 281 255 { 282 - VM_BUG_ON(PageLRU(page)); 283 - VM_BUG_ON(!PageLocked(page)); 256 + VM_BUG_ON_PAGE(PageLRU(page), page); 257 + VM_BUG_ON_PAGE(!PageLocked(page), page); 284 258 285 259 if (page_mapcount(page) <= 1 && page_evictable(page)) { 286 260 pagevec_add(pvec, page); ··· 336 310 struct page *page = pvec->pages[i]; 337 311 338 312 if (TestClearPageMlocked(page)) { 339 - struct lruvec *lruvec; 340 - int lru; 341 - 342 - if (PageLRU(page)) { 343 - lruvec = mem_cgroup_page_lruvec(page, zone); 344 - lru = page_lru(page); 345 - /* 346 - * We already have pin from follow_page_mask() 347 - * so we can spare the get_page() here. 348 - */ 349 - ClearPageLRU(page); 350 - del_page_from_lru_list(page, lruvec, lru); 351 - } else { 352 - __munlock_isolation_failed(page); 353 - goto skip_munlock; 354 - } 355 - 356 - } else { 357 - skip_munlock: 358 313 /* 359 - * We won't be munlocking this page in the next phase 360 - * but we still need to release the follow_page_mask() 361 - * pin. We cannot do it under lru_lock however. If it's 362 - * the last pin, __page_cache_release would deadlock. 314 + * We already have pin from follow_page_mask() 315 + * so we can spare the get_page() here. 363 316 */ 364 - pagevec_add(&pvec_putback, pvec->pages[i]); 365 - pvec->pages[i] = NULL; 317 + if (__munlock_isolate_lru_page(page, false)) 318 + continue; 319 + else 320 + __munlock_isolation_failed(page); 366 321 } 322 + 323 + /* 324 + * We won't be munlocking this page in the next phase 325 + * but we still need to release the follow_page_mask() 326 + * pin. We cannot do it under lru_lock however. If it's 327 + * the last pin, __page_cache_release() would deadlock. 328 + */ 329 + pagevec_add(&pvec_putback, pvec->pages[i]); 330 + pvec->pages[i] = NULL; 367 331 } 368 332 delta_munlocked = -nr + pagevec_count(&pvec_putback); 369 333 __mod_zone_page_state(zone, NR_MLOCK, delta_munlocked);
+1 -2
mm/mm_init.c
··· 202 202 203 203 return 0; 204 204 } 205 - 206 - __initcall(mm_sysfs_init); 205 + pure_initcall(mm_sysfs_init);
+13 -5
mm/mmap.c
··· 894 894 static inline int is_mergeable_vma(struct vm_area_struct *vma, 895 895 struct file *file, unsigned long vm_flags) 896 896 { 897 - if (vma->vm_flags ^ vm_flags) 897 + /* 898 + * VM_SOFTDIRTY should not prevent from VMA merging, if we 899 + * match the flags but dirty bit -- the caller should mark 900 + * merged VMA as dirty. If dirty bit won't be excluded from 901 + * comparison, we increase pressue on the memory system forcing 902 + * the kernel to generate new VMAs when old one could be 903 + * extended instead. 904 + */ 905 + if ((vma->vm_flags ^ vm_flags) & ~VM_SOFTDIRTY) 898 906 return 0; 899 907 if (vma->vm_file != file) 900 908 return 0; ··· 1091 1083 return a->vm_end == b->vm_start && 1092 1084 mpol_equal(vma_policy(a), vma_policy(b)) && 1093 1085 a->vm_file == b->vm_file && 1094 - !((a->vm_flags ^ b->vm_flags) & ~(VM_READ|VM_WRITE|VM_EXEC)) && 1086 + !((a->vm_flags ^ b->vm_flags) & ~(VM_READ|VM_WRITE|VM_EXEC|VM_SOFTDIRTY)) && 1095 1087 b->vm_pgoff == a->vm_pgoff + ((b->vm_start - a->vm_start) >> PAGE_SHIFT); 1096 1088 } 1097 1089 ··· 3150 3142 sysctl_user_reserve_kbytes = min(free_kbytes / 32, 1UL << 17); 3151 3143 return 0; 3152 3144 } 3153 - module_init(init_user_reserve) 3145 + subsys_initcall(init_user_reserve); 3154 3146 3155 3147 /* 3156 3148 * Initialise sysctl_admin_reserve_kbytes. ··· 3171 3163 sysctl_admin_reserve_kbytes = min(free_kbytes / 32, 1UL << 13); 3172 3164 return 0; 3173 3165 } 3174 - module_init(init_admin_reserve) 3166 + subsys_initcall(init_admin_reserve); 3175 3167 3176 3168 /* 3177 3169 * Reinititalise user and admin reserves if memory is added or removed. ··· 3241 3233 3242 3234 return 0; 3243 3235 } 3244 - module_init(init_reserve_notifier) 3236 + subsys_initcall(init_reserve_notifier);
+1 -2
mm/mmu_notifier.c
··· 329 329 { 330 330 return init_srcu_struct(&srcu); 331 331 } 332 - 333 - module_init(mmu_notifier_init); 332 + subsys_initcall(mmu_notifier_init);
+19 -6
mm/nobootmem.c
··· 45 45 if (!addr) 46 46 return NULL; 47 47 48 - memblock_reserve(addr, size); 48 + if (memblock_reserve(addr, size)) 49 + return NULL; 50 + 49 51 ptr = phys_to_virt(addr); 50 52 memset(ptr, 0, size); 51 53 /* ··· 116 114 static unsigned long __init free_low_memory_core_early(void) 117 115 { 118 116 unsigned long count = 0; 119 - phys_addr_t start, end, size; 117 + phys_addr_t start, end; 120 118 u64 i; 121 119 122 120 for_each_free_mem_range(i, NUMA_NO_NODE, &start, &end, NULL) 123 121 count += __free_memory_core(start, end); 124 122 125 - /* free range that is used for reserved array if we allocate it */ 126 - size = get_allocated_memblock_reserved_regions_info(&start); 127 - if (size) 128 - count += __free_memory_core(start, start + size); 123 + #ifdef CONFIG_ARCH_DISCARD_MEMBLOCK 124 + { 125 + phys_addr_t size; 126 + 127 + /* Free memblock.reserved array if it was allocated */ 128 + size = get_allocated_memblock_reserved_regions_info(&start); 129 + if (size) 130 + count += __free_memory_core(start, start + size); 131 + 132 + /* Free memblock.memory array if it was allocated */ 133 + size = get_allocated_memblock_memory_regions_info(&start); 134 + if (size) 135 + count += __free_memory_core(start, start + size); 136 + } 137 + #endif 129 138 130 139 return count; 131 140 }
+8 -4
mm/oom_kill.c
··· 327 327 break; 328 328 }; 329 329 points = oom_badness(p, NULL, nodemask, totalpages); 330 - if (points > chosen_points) { 331 - chosen = p; 332 - chosen_points = points; 333 - } 330 + if (!points || points < chosen_points) 331 + continue; 332 + /* Prefer thread group leaders for display purposes */ 333 + if (points == chosen_points && thread_group_leader(chosen)) 334 + continue; 335 + 336 + chosen = p; 337 + chosen_points = points; 334 338 } 335 339 if (chosen) 336 340 get_task_struct(chosen);
+72 -30
mm/page_alloc.c
··· 205 205 }; 206 206 207 207 int min_free_kbytes = 1024; 208 - int user_min_free_kbytes; 208 + int user_min_free_kbytes = -1; 209 209 210 210 static unsigned long __meminitdata nr_kernel_pages; 211 211 static unsigned long __meminitdata nr_all_pages; ··· 295 295 } 296 296 #endif 297 297 298 - static void bad_page(struct page *page) 298 + static void bad_page(struct page *page, char *reason, unsigned long bad_flags) 299 299 { 300 300 static unsigned long resume; 301 301 static unsigned long nr_shown; ··· 329 329 330 330 printk(KERN_ALERT "BUG: Bad page state in process %s pfn:%05lx\n", 331 331 current->comm, page_to_pfn(page)); 332 - dump_page(page); 332 + dump_page_badflags(page, reason, bad_flags); 333 333 334 334 print_modules(); 335 335 dump_stack(); ··· 383 383 int bad = 0; 384 384 385 385 if (unlikely(compound_order(page) != order)) { 386 - bad_page(page); 386 + bad_page(page, "wrong compound order", 0); 387 387 bad++; 388 388 } 389 389 ··· 392 392 for (i = 1; i < nr_pages; i++) { 393 393 struct page *p = page + i; 394 394 395 - if (unlikely(!PageTail(p) || (p->first_page != page))) { 396 - bad_page(page); 395 + if (unlikely(!PageTail(p))) { 396 + bad_page(page, "PageTail not set", 0); 397 + bad++; 398 + } else if (unlikely(p->first_page != page)) { 399 + bad_page(page, "first_page not consistent", 0); 397 400 bad++; 398 401 } 399 402 __ClearPageTail(p); ··· 509 506 return 0; 510 507 511 508 if (page_is_guard(buddy) && page_order(buddy) == order) { 512 - VM_BUG_ON(page_count(buddy) != 0); 509 + VM_BUG_ON_PAGE(page_count(buddy) != 0, buddy); 513 510 return 1; 514 511 } 515 512 516 513 if (PageBuddy(buddy) && page_order(buddy) == order) { 517 - VM_BUG_ON(page_count(buddy) != 0); 514 + VM_BUG_ON_PAGE(page_count(buddy) != 0, buddy); 518 515 return 1; 519 516 } 520 517 return 0; ··· 564 561 565 562 page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1); 566 563 567 - VM_BUG_ON(page_idx & ((1 << order) - 1)); 568 - VM_BUG_ON(bad_range(zone, page)); 564 + VM_BUG_ON_PAGE(page_idx & ((1 << order) - 1), page); 565 + VM_BUG_ON_PAGE(bad_range(zone, page), page); 569 566 570 567 while (order < MAX_ORDER-1) { 571 568 buddy_idx = __find_buddy_index(page_idx, order); ··· 621 618 622 619 static inline int free_pages_check(struct page *page) 623 620 { 624 - if (unlikely(page_mapcount(page) | 625 - (page->mapping != NULL) | 626 - (atomic_read(&page->_count) != 0) | 627 - (page->flags & PAGE_FLAGS_CHECK_AT_FREE) | 628 - (mem_cgroup_bad_page_check(page)))) { 629 - bad_page(page); 621 + char *bad_reason = NULL; 622 + unsigned long bad_flags = 0; 623 + 624 + if (unlikely(page_mapcount(page))) 625 + bad_reason = "nonzero mapcount"; 626 + if (unlikely(page->mapping != NULL)) 627 + bad_reason = "non-NULL mapping"; 628 + if (unlikely(atomic_read(&page->_count) != 0)) 629 + bad_reason = "nonzero _count"; 630 + if (unlikely(page->flags & PAGE_FLAGS_CHECK_AT_FREE)) { 631 + bad_reason = "PAGE_FLAGS_CHECK_AT_FREE flag(s) set"; 632 + bad_flags = PAGE_FLAGS_CHECK_AT_FREE; 633 + } 634 + if (unlikely(mem_cgroup_bad_page_check(page))) 635 + bad_reason = "cgroup check failed"; 636 + if (unlikely(bad_reason)) { 637 + bad_page(page, bad_reason, bad_flags); 630 638 return 1; 631 639 } 632 640 page_cpupid_reset_last(page); ··· 827 813 area--; 828 814 high--; 829 815 size >>= 1; 830 - VM_BUG_ON(bad_range(zone, &page[size])); 816 + VM_BUG_ON_PAGE(bad_range(zone, &page[size]), &page[size]); 831 817 832 818 #ifdef CONFIG_DEBUG_PAGEALLOC 833 819 if (high < debug_guardpage_minorder()) { ··· 857 843 */ 858 844 static inline int check_new_page(struct page *page) 859 845 { 860 - if (unlikely(page_mapcount(page) | 861 - (page->mapping != NULL) | 862 - (atomic_read(&page->_count) != 0) | 863 - (page->flags & PAGE_FLAGS_CHECK_AT_PREP) | 864 - (mem_cgroup_bad_page_check(page)))) { 865 - bad_page(page); 846 + char *bad_reason = NULL; 847 + unsigned long bad_flags = 0; 848 + 849 + if (unlikely(page_mapcount(page))) 850 + bad_reason = "nonzero mapcount"; 851 + if (unlikely(page->mapping != NULL)) 852 + bad_reason = "non-NULL mapping"; 853 + if (unlikely(atomic_read(&page->_count) != 0)) 854 + bad_reason = "nonzero _count"; 855 + if (unlikely(page->flags & PAGE_FLAGS_CHECK_AT_PREP)) { 856 + bad_reason = "PAGE_FLAGS_CHECK_AT_PREP flag set"; 857 + bad_flags = PAGE_FLAGS_CHECK_AT_PREP; 858 + } 859 + if (unlikely(mem_cgroup_bad_page_check(page))) 860 + bad_reason = "cgroup check failed"; 861 + if (unlikely(bad_reason)) { 862 + bad_page(page, bad_reason, bad_flags); 866 863 return 1; 867 864 } 868 865 return 0; ··· 980 955 981 956 for (page = start_page; page <= end_page;) { 982 957 /* Make sure we are not inadvertently changing nodes */ 983 - VM_BUG_ON(page_to_nid(page) != zone_to_nid(zone)); 958 + VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page); 984 959 985 960 if (!pfn_valid_within(page_to_pfn(page))) { 986 961 page++; ··· 1429 1404 { 1430 1405 int i; 1431 1406 1432 - VM_BUG_ON(PageCompound(page)); 1433 - VM_BUG_ON(!page_count(page)); 1407 + VM_BUG_ON_PAGE(PageCompound(page), page); 1408 + VM_BUG_ON_PAGE(!page_count(page), page); 1434 1409 1435 1410 #ifdef CONFIG_KMEMCHECK 1436 1411 /* ··· 1577 1552 zone_statistics(preferred_zone, zone, gfp_flags); 1578 1553 local_irq_restore(flags); 1579 1554 1580 - VM_BUG_ON(bad_range(zone, page)); 1555 + VM_BUG_ON_PAGE(bad_range(zone, page), page); 1581 1556 if (prep_new_page(page, order, gfp_flags)) 1582 1557 goto again; 1583 1558 return page; ··· 5754 5729 int min_free_kbytes_sysctl_handler(ctl_table *table, int write, 5755 5730 void __user *buffer, size_t *length, loff_t *ppos) 5756 5731 { 5757 - proc_dointvec(table, write, buffer, length, ppos); 5732 + int rc; 5733 + 5734 + rc = proc_dointvec_minmax(table, write, buffer, length, ppos); 5735 + if (rc) 5736 + return rc; 5737 + 5758 5738 if (write) { 5759 5739 user_min_free_kbytes = min_free_kbytes; 5760 5740 setup_per_zone_wmarks(); ··· 6026 5996 pfn = page_to_pfn(page); 6027 5997 bitmap = get_pageblock_bitmap(zone, pfn); 6028 5998 bitidx = pfn_to_bitidx(zone, pfn); 6029 - VM_BUG_ON(!zone_spans_pfn(zone, pfn)); 5999 + VM_BUG_ON_PAGE(!zone_spans_pfn(zone, pfn), page); 6030 6000 6031 6001 for (; start_bitidx <= end_bitidx; start_bitidx++, value <<= 1) 6032 6002 if (flags & value) ··· 6524 6494 printk(")\n"); 6525 6495 } 6526 6496 6527 - void dump_page(struct page *page) 6497 + void dump_page_badflags(struct page *page, char *reason, unsigned long badflags) 6528 6498 { 6529 6499 printk(KERN_ALERT 6530 6500 "page:%p count:%d mapcount:%d mapping:%p index:%#lx\n", 6531 6501 page, atomic_read(&page->_count), page_mapcount(page), 6532 6502 page->mapping, page->index); 6533 6503 dump_page_flags(page->flags); 6504 + if (reason) 6505 + pr_alert("page dumped because: %s\n", reason); 6506 + if (page->flags & badflags) { 6507 + pr_alert("bad because of flags:\n"); 6508 + dump_page_flags(page->flags & badflags); 6509 + } 6534 6510 mem_cgroup_print_bad_page(page); 6535 6511 } 6512 + 6513 + void dump_page(struct page *page, char *reason) 6514 + { 6515 + dump_page_badflags(page, reason, 0); 6516 + } 6517 + EXPORT_SYMBOL_GPL(dump_page);
+2 -2
mm/page_io.c
··· 320 320 int ret = 0; 321 321 struct swap_info_struct *sis = page_swap_info(page); 322 322 323 - VM_BUG_ON(!PageLocked(page)); 324 - VM_BUG_ON(PageUptodate(page)); 323 + VM_BUG_ON_PAGE(!PageLocked(page), page); 324 + VM_BUG_ON_PAGE(PageUptodate(page), page); 325 325 if (frontswap_load(page) == 0) { 326 326 SetPageUptodate(page); 327 327 unlock_page(page);
+7 -7
mm/rmap.c
··· 848 848 static bool invalid_mkclean_vma(struct vm_area_struct *vma, void *arg) 849 849 { 850 850 if (vma->vm_flags & VM_SHARED) 851 - return 0; 851 + return false; 852 852 853 - return 1; 853 + return true; 854 854 } 855 855 856 856 int page_mkclean(struct page *page) ··· 894 894 { 895 895 struct anon_vma *anon_vma = vma->anon_vma; 896 896 897 - VM_BUG_ON(!PageLocked(page)); 897 + VM_BUG_ON_PAGE(!PageLocked(page), page); 898 898 VM_BUG_ON(!anon_vma); 899 - VM_BUG_ON(page->index != linear_page_index(vma, address)); 899 + VM_BUG_ON_PAGE(page->index != linear_page_index(vma, address), page); 900 900 901 901 anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON; 902 902 page->mapping = (struct address_space *) anon_vma; ··· 995 995 if (unlikely(PageKsm(page))) 996 996 return; 997 997 998 - VM_BUG_ON(!PageLocked(page)); 998 + VM_BUG_ON_PAGE(!PageLocked(page), page); 999 999 /* address might be in next vma when migration races vma_adjust */ 1000 1000 if (first) 1001 1001 __page_set_anon_rmap(page, vma, address, exclusive); ··· 1481 1481 .anon_lock = page_lock_anon_vma_read, 1482 1482 }; 1483 1483 1484 - VM_BUG_ON(!PageHuge(page) && PageTransHuge(page)); 1484 + VM_BUG_ON_PAGE(!PageHuge(page) && PageTransHuge(page), page); 1485 1485 1486 1486 /* 1487 1487 * During exec, a temporary VMA is setup and later moved. ··· 1533 1533 1534 1534 }; 1535 1535 1536 - VM_BUG_ON(!PageLocked(page) || PageLRU(page)); 1536 + VM_BUG_ON_PAGE(!PageLocked(page) || PageLRU(page), page); 1537 1537 1538 1538 ret = rmap_walk(page, &rwc); 1539 1539 return ret;
+4 -4
mm/shmem.c
··· 285 285 { 286 286 int error; 287 287 288 - VM_BUG_ON(!PageLocked(page)); 289 - VM_BUG_ON(!PageSwapBacked(page)); 288 + VM_BUG_ON_PAGE(!PageLocked(page), page); 289 + VM_BUG_ON_PAGE(!PageSwapBacked(page), page); 290 290 291 291 page_cache_get(page); 292 292 page->mapping = mapping; ··· 491 491 continue; 492 492 if (!unfalloc || !PageUptodate(page)) { 493 493 if (page->mapping == mapping) { 494 - VM_BUG_ON(PageWriteback(page)); 494 + VM_BUG_ON_PAGE(PageWriteback(page), page); 495 495 truncate_inode_page(mapping, page); 496 496 } 497 497 } ··· 568 568 lock_page(page); 569 569 if (!unfalloc || !PageUptodate(page)) { 570 570 if (page->mapping == mapping) { 571 - VM_BUG_ON(PageWriteback(page)); 571 + VM_BUG_ON_PAGE(PageWriteback(page), page); 572 572 truncate_inode_page(mapping, page); 573 573 } 574 574 }
+25 -1
mm/slab.h
··· 160 160 return s->name; 161 161 } 162 162 163 + /* 164 + * Note, we protect with RCU only the memcg_caches array, not per-memcg caches. 165 + * That said the caller must assure the memcg's cache won't go away. Since once 166 + * created a memcg's cache is destroyed only along with the root cache, it is 167 + * true if we are going to allocate from the cache or hold a reference to the 168 + * root cache by other means. Otherwise, we should hold either the slab_mutex 169 + * or the memcg's slab_caches_mutex while calling this function and accessing 170 + * the returned value. 171 + */ 163 172 static inline struct kmem_cache * 164 173 cache_from_memcg_idx(struct kmem_cache *s, int idx) 165 174 { 175 + struct kmem_cache *cachep; 176 + struct memcg_cache_params *params; 177 + 166 178 if (!s->memcg_params) 167 179 return NULL; 168 - return s->memcg_params->memcg_caches[idx]; 180 + 181 + rcu_read_lock(); 182 + params = rcu_dereference(s->memcg_params); 183 + cachep = params->memcg_caches[idx]; 184 + rcu_read_unlock(); 185 + 186 + /* 187 + * Make sure we will access the up-to-date value. The code updating 188 + * memcg_caches issues a write barrier to match this (see 189 + * memcg_register_cache()). 190 + */ 191 + smp_read_barrier_depends(); 192 + return cachep; 169 193 } 170 194 171 195 static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s)
+54 -36
mm/slab_common.c
··· 171 171 struct kmem_cache *parent_cache) 172 172 { 173 173 struct kmem_cache *s = NULL; 174 - int err = 0; 174 + int err; 175 175 176 176 get_online_cpus(); 177 177 mutex_lock(&slab_mutex); 178 178 179 - if (!kmem_cache_sanity_check(memcg, name, size) == 0) 180 - goto out_locked; 179 + err = kmem_cache_sanity_check(memcg, name, size); 180 + if (err) 181 + goto out_unlock; 182 + 183 + if (memcg) { 184 + /* 185 + * Since per-memcg caches are created asynchronously on first 186 + * allocation (see memcg_kmem_get_cache()), several threads can 187 + * try to create the same cache, but only one of them may 188 + * succeed. Therefore if we get here and see the cache has 189 + * already been created, we silently return NULL. 190 + */ 191 + if (cache_from_memcg_idx(parent_cache, memcg_cache_id(memcg))) 192 + goto out_unlock; 193 + } 181 194 182 195 /* 183 196 * Some allocators will constraint the set of valid flags to a subset ··· 202 189 203 190 s = __kmem_cache_alias(memcg, name, size, align, flags, ctor); 204 191 if (s) 205 - goto out_locked; 192 + goto out_unlock; 206 193 194 + err = -ENOMEM; 207 195 s = kmem_cache_zalloc(kmem_cache, GFP_KERNEL); 208 - if (s) { 209 - s->object_size = s->size = size; 210 - s->align = calculate_alignment(flags, align, size); 211 - s->ctor = ctor; 196 + if (!s) 197 + goto out_unlock; 212 198 213 - if (memcg_register_cache(memcg, s, parent_cache)) { 214 - kmem_cache_free(kmem_cache, s); 215 - err = -ENOMEM; 216 - goto out_locked; 217 - } 199 + s->object_size = s->size = size; 200 + s->align = calculate_alignment(flags, align, size); 201 + s->ctor = ctor; 218 202 219 - s->name = kstrdup(name, GFP_KERNEL); 220 - if (!s->name) { 221 - kmem_cache_free(kmem_cache, s); 222 - err = -ENOMEM; 223 - goto out_locked; 224 - } 203 + s->name = kstrdup(name, GFP_KERNEL); 204 + if (!s->name) 205 + goto out_free_cache; 225 206 226 - err = __kmem_cache_create(s, flags); 227 - if (!err) { 228 - s->refcount = 1; 229 - list_add(&s->list, &slab_caches); 230 - memcg_cache_list_add(memcg, s); 231 - } else { 232 - kfree(s->name); 233 - kmem_cache_free(kmem_cache, s); 234 - } 235 - } else 236 - err = -ENOMEM; 207 + err = memcg_alloc_cache_params(memcg, s, parent_cache); 208 + if (err) 209 + goto out_free_cache; 237 210 238 - out_locked: 211 + err = __kmem_cache_create(s, flags); 212 + if (err) 213 + goto out_free_cache; 214 + 215 + s->refcount = 1; 216 + list_add(&s->list, &slab_caches); 217 + memcg_register_cache(s); 218 + 219 + out_unlock: 239 220 mutex_unlock(&slab_mutex); 240 221 put_online_cpus(); 241 222 242 - if (err) { 243 - 223 + /* 224 + * There is no point in flooding logs with warnings or especially 225 + * crashing the system if we fail to create a cache for a memcg. In 226 + * this case we will be accounting the memcg allocation to the root 227 + * cgroup until we succeed to create its own cache, but it isn't that 228 + * critical. 229 + */ 230 + if (err && !memcg) { 244 231 if (flags & SLAB_PANIC) 245 232 panic("kmem_cache_create: Failed to create slab '%s'. Error %d\n", 246 233 name, err); ··· 249 236 name, err); 250 237 dump_stack(); 251 238 } 252 - 253 239 return NULL; 254 240 } 255 - 256 241 return s; 242 + 243 + out_free_cache: 244 + memcg_free_cache_params(s); 245 + kfree(s->name); 246 + kmem_cache_free(kmem_cache, s); 247 + goto out_unlock; 257 248 } 258 249 259 250 struct kmem_cache * ··· 280 263 list_del(&s->list); 281 264 282 265 if (!__kmem_cache_shutdown(s)) { 266 + memcg_unregister_cache(s); 283 267 mutex_unlock(&slab_mutex); 284 268 if (s->flags & SLAB_DESTROY_BY_RCU) 285 269 rcu_barrier(); 286 270 287 - memcg_release_cache(s); 271 + memcg_free_cache_params(s); 288 272 kfree(s->name); 289 273 kmem_cache_free(kmem_cache, s); 290 274 } else {
+6 -6
mm/slub.c
··· 1559 1559 new.freelist = freelist; 1560 1560 } 1561 1561 1562 - VM_BUG_ON(new.frozen); 1562 + VM_BUG_ON_PAGE(new.frozen, &new); 1563 1563 new.frozen = 1; 1564 1564 1565 1565 if (!__cmpxchg_double_slab(s, page, ··· 1812 1812 set_freepointer(s, freelist, prior); 1813 1813 new.counters = counters; 1814 1814 new.inuse--; 1815 - VM_BUG_ON(!new.frozen); 1815 + VM_BUG_ON_PAGE(!new.frozen, &new); 1816 1816 1817 1817 } while (!__cmpxchg_double_slab(s, page, 1818 1818 prior, counters, ··· 1840 1840 1841 1841 old.freelist = page->freelist; 1842 1842 old.counters = page->counters; 1843 - VM_BUG_ON(!old.frozen); 1843 + VM_BUG_ON_PAGE(!old.frozen, &old); 1844 1844 1845 1845 /* Determine target state of the slab */ 1846 1846 new.counters = old.counters; ··· 1952 1952 1953 1953 old.freelist = page->freelist; 1954 1954 old.counters = page->counters; 1955 - VM_BUG_ON(!old.frozen); 1955 + VM_BUG_ON_PAGE(!old.frozen, &old); 1956 1956 1957 1957 new.counters = old.counters; 1958 1958 new.freelist = old.freelist; ··· 2225 2225 counters = page->counters; 2226 2226 2227 2227 new.counters = counters; 2228 - VM_BUG_ON(!new.frozen); 2228 + VM_BUG_ON_PAGE(!new.frozen, &new); 2229 2229 2230 2230 new.inuse = page->objects; 2231 2231 new.frozen = freelist != NULL; ··· 2319 2319 * page is pointing to the page from which the objects are obtained. 2320 2320 * That page must be frozen for per cpu allocations to work. 2321 2321 */ 2322 - VM_BUG_ON(!c->page->frozen); 2322 + VM_BUG_ON_PAGE(!c->page->frozen, c->page); 2323 2323 c->freelist = get_freepointer(s, freelist); 2324 2324 c->tid = next_tid(c->tid); 2325 2325 local_irq_restore(flags);
+18 -18
mm/swap.c
··· 57 57 58 58 spin_lock_irqsave(&zone->lru_lock, flags); 59 59 lruvec = mem_cgroup_page_lruvec(page, zone); 60 - VM_BUG_ON(!PageLRU(page)); 60 + VM_BUG_ON_PAGE(!PageLRU(page), page); 61 61 __ClearPageLRU(page); 62 62 del_page_from_lru_list(page, lruvec, page_off_lru(page)); 63 63 spin_unlock_irqrestore(&zone->lru_lock, flags); ··· 130 130 * __split_huge_page_refcount cannot race 131 131 * here. 132 132 */ 133 - VM_BUG_ON(!PageHead(page_head)); 134 - VM_BUG_ON(page_mapcount(page) != 0); 133 + VM_BUG_ON_PAGE(!PageHead(page_head), page_head); 134 + VM_BUG_ON_PAGE(page_mapcount(page) != 0, page); 135 135 if (put_page_testzero(page_head)) { 136 136 /* 137 137 * If this is the tail of a slab ··· 148 148 * the compound page enters the buddy 149 149 * allocator. 150 150 */ 151 - VM_BUG_ON(PageSlab(page_head)); 151 + VM_BUG_ON_PAGE(PageSlab(page_head), page_head); 152 152 __put_compound_page(page_head); 153 153 } 154 154 return; ··· 199 199 __put_single_page(page); 200 200 return; 201 201 } 202 - VM_BUG_ON(page_head != page->first_page); 202 + VM_BUG_ON_PAGE(page_head != page->first_page, page); 203 203 /* 204 204 * We can release the refcount taken by 205 205 * get_page_unless_zero() now that ··· 207 207 * compound_lock. 208 208 */ 209 209 if (put_page_testzero(page_head)) 210 - VM_BUG_ON(1); 210 + VM_BUG_ON_PAGE(1, page_head); 211 211 /* __split_huge_page_refcount will wait now */ 212 - VM_BUG_ON(page_mapcount(page) <= 0); 212 + VM_BUG_ON_PAGE(page_mapcount(page) <= 0, page); 213 213 atomic_dec(&page->_mapcount); 214 - VM_BUG_ON(atomic_read(&page_head->_count) <= 0); 215 - VM_BUG_ON(atomic_read(&page->_count) != 0); 214 + VM_BUG_ON_PAGE(atomic_read(&page_head->_count) <= 0, page_head); 215 + VM_BUG_ON_PAGE(atomic_read(&page->_count) != 0, page); 216 216 compound_unlock_irqrestore(page_head, flags); 217 217 218 218 if (put_page_testzero(page_head)) { ··· 223 223 } 224 224 } else { 225 225 /* page_head is a dangling pointer */ 226 - VM_BUG_ON(PageTail(page)); 226 + VM_BUG_ON_PAGE(PageTail(page), page); 227 227 goto out_put_single; 228 228 } 229 229 } ··· 264 264 * page. __split_huge_page_refcount 265 265 * cannot race here. 266 266 */ 267 - VM_BUG_ON(!PageHead(page_head)); 267 + VM_BUG_ON_PAGE(!PageHead(page_head), page_head); 268 268 __get_page_tail_foll(page, true); 269 269 return true; 270 270 } else { ··· 604 604 */ 605 605 void lru_cache_add(struct page *page) 606 606 { 607 - VM_BUG_ON(PageActive(page) && PageUnevictable(page)); 608 - VM_BUG_ON(PageLRU(page)); 607 + VM_BUG_ON_PAGE(PageActive(page) && PageUnevictable(page), page); 608 + VM_BUG_ON_PAGE(PageLRU(page), page); 609 609 __lru_cache_add(page); 610 610 } 611 611 ··· 846 846 } 847 847 848 848 lruvec = mem_cgroup_page_lruvec(page, zone); 849 - VM_BUG_ON(!PageLRU(page)); 849 + VM_BUG_ON_PAGE(!PageLRU(page), page); 850 850 __ClearPageLRU(page); 851 851 del_page_from_lru_list(page, lruvec, page_off_lru(page)); 852 852 } ··· 888 888 { 889 889 const int file = 0; 890 890 891 - VM_BUG_ON(!PageHead(page)); 892 - VM_BUG_ON(PageCompound(page_tail)); 893 - VM_BUG_ON(PageLRU(page_tail)); 891 + VM_BUG_ON_PAGE(!PageHead(page), page); 892 + VM_BUG_ON_PAGE(PageCompound(page_tail), page); 893 + VM_BUG_ON_PAGE(PageLRU(page_tail), page); 894 894 VM_BUG_ON(NR_CPUS != 1 && 895 895 !spin_is_locked(&lruvec_zone(lruvec)->lru_lock)); 896 896 ··· 929 929 int active = PageActive(page); 930 930 enum lru_list lru = page_lru(page); 931 931 932 - VM_BUG_ON(PageLRU(page)); 932 + VM_BUG_ON_PAGE(PageLRU(page), page); 933 933 934 934 SetPageLRU(page); 935 935 add_page_to_lru_list(page, lruvec, lru);
+8 -8
mm/swap_state.c
··· 83 83 int error; 84 84 struct address_space *address_space; 85 85 86 - VM_BUG_ON(!PageLocked(page)); 87 - VM_BUG_ON(PageSwapCache(page)); 88 - VM_BUG_ON(!PageSwapBacked(page)); 86 + VM_BUG_ON_PAGE(!PageLocked(page), page); 87 + VM_BUG_ON_PAGE(PageSwapCache(page), page); 88 + VM_BUG_ON_PAGE(!PageSwapBacked(page), page); 89 89 90 90 page_cache_get(page); 91 91 SetPageSwapCache(page); ··· 139 139 swp_entry_t entry; 140 140 struct address_space *address_space; 141 141 142 - VM_BUG_ON(!PageLocked(page)); 143 - VM_BUG_ON(!PageSwapCache(page)); 144 - VM_BUG_ON(PageWriteback(page)); 142 + VM_BUG_ON_PAGE(!PageLocked(page), page); 143 + VM_BUG_ON_PAGE(!PageSwapCache(page), page); 144 + VM_BUG_ON_PAGE(PageWriteback(page), page); 145 145 146 146 entry.val = page_private(page); 147 147 address_space = swap_address_space(entry); ··· 165 165 swp_entry_t entry; 166 166 int err; 167 167 168 - VM_BUG_ON(!PageLocked(page)); 169 - VM_BUG_ON(!PageUptodate(page)); 168 + VM_BUG_ON_PAGE(!PageLocked(page), page); 169 + VM_BUG_ON_PAGE(!PageUptodate(page), page); 170 170 171 171 entry = get_swap_page(); 172 172 if (!entry.val)
+6 -5
mm/swapfile.c
··· 616 616 } 617 617 } 618 618 offset = si->lowest_bit; 619 - while (++offset < scan_base) { 619 + while (offset < scan_base) { 620 620 if (!si->swap_map[offset]) { 621 621 spin_lock(&si->lock); 622 622 goto checks; ··· 629 629 cond_resched(); 630 630 latency_ration = LATENCY_LIMIT; 631 631 } 632 + offset++; 632 633 } 633 634 spin_lock(&si->lock); 634 635 ··· 907 906 { 908 907 int count; 909 908 910 - VM_BUG_ON(!PageLocked(page)); 909 + VM_BUG_ON_PAGE(!PageLocked(page), page); 911 910 if (unlikely(PageKsm(page))) 912 911 return 0; 913 912 count = page_mapcount(page); ··· 927 926 */ 928 927 int try_to_free_swap(struct page *page) 929 928 { 930 - VM_BUG_ON(!PageLocked(page)); 929 + VM_BUG_ON_PAGE(!PageLocked(page), page); 931 930 932 931 if (!PageSwapCache(page)) 933 932 return 0; ··· 2715 2714 */ 2716 2715 struct address_space *__page_file_mapping(struct page *page) 2717 2716 { 2718 - VM_BUG_ON(!PageSwapCache(page)); 2717 + VM_BUG_ON_PAGE(!PageSwapCache(page), page); 2719 2718 return page_swap_info(page)->swap_file->f_mapping; 2720 2719 } 2721 2720 EXPORT_SYMBOL_GPL(__page_file_mapping); ··· 2723 2722 pgoff_t __page_file_index(struct page *page) 2724 2723 { 2725 2724 swp_entry_t swap = { .val = page_private(page) }; 2726 - VM_BUG_ON(!PageSwapCache(page)); 2725 + VM_BUG_ON_PAGE(!PageSwapCache(page), page); 2727 2726 return swp_offset(swap); 2728 2727 } 2729 2728 EXPORT_SYMBOL_GPL(__page_file_index);
+41 -23
mm/vmscan.c
··· 281 281 nr_pages_scanned, lru_pages, 282 282 max_pass, delta, total_scan); 283 283 284 - while (total_scan >= batch_size) { 284 + /* 285 + * Normally, we should not scan less than batch_size objects in one 286 + * pass to avoid too frequent shrinker calls, but if the slab has less 287 + * than batch_size objects in total and we are really tight on memory, 288 + * we will try to reclaim all available objects, otherwise we can end 289 + * up failing allocations although there are plenty of reclaimable 290 + * objects spread over several slabs with usage less than the 291 + * batch_size. 292 + * 293 + * We detect the "tight on memory" situations by looking at the total 294 + * number of objects we want to scan (total_scan). If it is greater 295 + * than the total number of objects on slab (max_pass), we must be 296 + * scanning at high prio and therefore should try to reclaim as much as 297 + * possible. 298 + */ 299 + while (total_scan >= batch_size || 300 + total_scan >= max_pass) { 285 301 unsigned long ret; 302 + unsigned long nr_to_scan = min(batch_size, total_scan); 286 303 287 - shrinkctl->nr_to_scan = batch_size; 304 + shrinkctl->nr_to_scan = nr_to_scan; 288 305 ret = shrinker->scan_objects(shrinker, shrinkctl); 289 306 if (ret == SHRINK_STOP) 290 307 break; 291 308 freed += ret; 292 309 293 - count_vm_events(SLABS_SCANNED, batch_size); 294 - total_scan -= batch_size; 310 + count_vm_events(SLABS_SCANNED, nr_to_scan); 311 + total_scan -= nr_to_scan; 295 312 296 313 cond_resched(); 297 314 } ··· 369 352 } 370 353 371 354 list_for_each_entry(shrinker, &shrinker_list, list) { 372 - for_each_node_mask(shrinkctl->nid, shrinkctl->nodes_to_scan) { 373 - if (!node_online(shrinkctl->nid)) 374 - continue; 375 - 376 - if (!(shrinker->flags & SHRINKER_NUMA_AWARE) && 377 - (shrinkctl->nid != 0)) 378 - break; 379 - 355 + if (!(shrinker->flags & SHRINKER_NUMA_AWARE)) { 356 + shrinkctl->nid = 0; 380 357 freed += shrink_slab_node(shrinkctl, shrinker, 381 - nr_pages_scanned, lru_pages); 358 + nr_pages_scanned, lru_pages); 359 + continue; 360 + } 361 + 362 + for_each_node_mask(shrinkctl->nid, shrinkctl->nodes_to_scan) { 363 + if (node_online(shrinkctl->nid)) 364 + freed += shrink_slab_node(shrinkctl, shrinker, 365 + nr_pages_scanned, lru_pages); 382 366 383 367 } 384 368 } ··· 621 603 bool is_unevictable; 622 604 int was_unevictable = PageUnevictable(page); 623 605 624 - VM_BUG_ON(PageLRU(page)); 606 + VM_BUG_ON_PAGE(PageLRU(page), page); 625 607 626 608 redo: 627 609 ClearPageUnevictable(page); ··· 812 794 if (!trylock_page(page)) 813 795 goto keep; 814 796 815 - VM_BUG_ON(PageActive(page)); 816 - VM_BUG_ON(page_zone(page) != zone); 797 + VM_BUG_ON_PAGE(PageActive(page), page); 798 + VM_BUG_ON_PAGE(page_zone(page) != zone, page); 817 799 818 800 sc->nr_scanned++; 819 801 ··· 1097 1079 /* Not a candidate for swapping, so reclaim swap space. */ 1098 1080 if (PageSwapCache(page) && vm_swap_full()) 1099 1081 try_to_free_swap(page); 1100 - VM_BUG_ON(PageActive(page)); 1082 + VM_BUG_ON_PAGE(PageActive(page), page); 1101 1083 SetPageActive(page); 1102 1084 pgactivate++; 1103 1085 keep_locked: 1104 1086 unlock_page(page); 1105 1087 keep: 1106 1088 list_add(&page->lru, &ret_pages); 1107 - VM_BUG_ON(PageLRU(page) || PageUnevictable(page)); 1089 + VM_BUG_ON_PAGE(PageLRU(page) || PageUnevictable(page), page); 1108 1090 } 1109 1091 1110 1092 free_hot_cold_page_list(&free_pages, 1); ··· 1258 1240 page = lru_to_page(src); 1259 1241 prefetchw_prev_lru_page(page, src, flags); 1260 1242 1261 - VM_BUG_ON(!PageLRU(page)); 1243 + VM_BUG_ON_PAGE(!PageLRU(page), page); 1262 1244 1263 1245 switch (__isolate_lru_page(page, mode)) { 1264 1246 case 0: ··· 1313 1295 { 1314 1296 int ret = -EBUSY; 1315 1297 1316 - VM_BUG_ON(!page_count(page)); 1298 + VM_BUG_ON_PAGE(!page_count(page), page); 1317 1299 1318 1300 if (PageLRU(page)) { 1319 1301 struct zone *zone = page_zone(page); ··· 1384 1366 struct page *page = lru_to_page(page_list); 1385 1367 int lru; 1386 1368 1387 - VM_BUG_ON(PageLRU(page)); 1369 + VM_BUG_ON_PAGE(PageLRU(page), page); 1388 1370 list_del(&page->lru); 1389 1371 if (unlikely(!page_evictable(page))) { 1390 1372 spin_unlock_irq(&zone->lru_lock); ··· 1604 1586 page = lru_to_page(list); 1605 1587 lruvec = mem_cgroup_page_lruvec(page, zone); 1606 1588 1607 - VM_BUG_ON(PageLRU(page)); 1589 + VM_BUG_ON_PAGE(PageLRU(page), page); 1608 1590 SetPageLRU(page); 1609 1591 1610 1592 nr_pages = hpage_nr_pages(page); ··· 3719 3701 if (page_evictable(page)) { 3720 3702 enum lru_list lru = page_lru_base_type(page); 3721 3703 3722 - VM_BUG_ON(PageActive(page)); 3704 + VM_BUG_ON_PAGE(PageActive(page), page); 3723 3705 ClearPageUnevictable(page); 3724 3706 del_page_from_lru_list(page, lruvec, LRU_UNEVICTABLE); 3725 3707 add_page_to_lru_list(page, lruvec, lru);
+2 -2
mm/zswap.c
··· 77 77 **********************************/ 78 78 /* Enable/disable zswap (disabled by default, fixed at boot for now) */ 79 79 static bool zswap_enabled __read_mostly; 80 - module_param_named(enabled, zswap_enabled, bool, 0); 80 + module_param_named(enabled, zswap_enabled, bool, 0444); 81 81 82 82 /* Compressor to be used by zswap (fixed at boot for now) */ 83 83 #define ZSWAP_COMPRESSOR_DEFAULT "lzo" 84 84 static char *zswap_compressor = ZSWAP_COMPRESSOR_DEFAULT; 85 - module_param_named(compressor, zswap_compressor, charp, 0); 85 + module_param_named(compressor, zswap_compressor, charp, 0444); 86 86 87 87 /* The maximum percentage of memory that the compressed pool can occupy */ 88 88 static unsigned int zswap_max_pool_percent = 20;
-1
net/ipv4/tcp_illinois.c
··· 23 23 #define ALPHA_MIN ((3*ALPHA_SCALE)/10) /* ~0.3 */ 24 24 #define ALPHA_MAX (10*ALPHA_SCALE) /* 10.0 */ 25 25 #define ALPHA_BASE ALPHA_SCALE /* 1.0 */ 26 - #define U32_MAX ((u32)~0U) 27 26 #define RTT_MAX (U32_MAX / ALPHA_MAX) /* 3.3 secs */ 28 27 29 28 #define BETA_SHIFT 6
+4 -23
net/netfilter/ipset/ip_set_hash_netiface.c
··· 46 46 static void 47 47 rbtree_destroy(struct rb_root *root) 48 48 { 49 - struct rb_node *p, *n = root->rb_node; 50 - struct iface_node *node; 49 + struct iface_node *node, *next; 51 50 52 - /* Non-recursive destroy, like in ext3 */ 53 - while (n) { 54 - if (n->rb_left) { 55 - n = n->rb_left; 56 - continue; 57 - } 58 - if (n->rb_right) { 59 - n = n->rb_right; 60 - continue; 61 - } 62 - p = rb_parent(n); 63 - node = rb_entry(n, struct iface_node, node); 64 - if (!p) 65 - *root = RB_ROOT; 66 - else if (p->rb_left == n) 67 - p->rb_left = NULL; 68 - else if (p->rb_right == n) 69 - p->rb_right = NULL; 70 - 51 + rbtree_postorder_for_each_entry_safe(node, next, root, node) 71 52 kfree(node); 72 - n = p; 73 - } 53 + 54 + *root = RB_ROOT; 74 55 } 75 56 76 57 static int
+163 -17
scripts/checkpatch.pl
··· 29 29 my $summary_file = 0; 30 30 my $show_types = 0; 31 31 my $fix = 0; 32 + my $fix_inplace = 0; 32 33 my $root; 33 34 my %debug; 34 35 my %camelcase = (); ··· 77 76 "<inputfile>.EXPERIMENTAL-checkpatch-fixes" 78 77 with potential errors corrected to the preferred 79 78 checkpatch style 79 + --fix-inplace EXPERIMENTAL - may create horrible results 80 + Is the same as --fix, but overwrites the input 81 + file. It's your fault if there's no backup or git 80 82 --ignore-perl-version override checking of perl version. expect 81 83 runtime errors. 82 84 -h, --help, --version display this help and exit ··· 135 131 'mailback!' => \$mailback, 136 132 'summary-file!' => \$summary_file, 137 133 'fix!' => \$fix, 134 + 'fix-inplace!' => \$fix_inplace, 138 135 'ignore-perl-version!' => \$ignore_perl_version, 139 136 'debug=s' => \%debug, 140 137 'test-only=s' => \$tst_only, ··· 144 139 ) or help(1); 145 140 146 141 help(0) if ($help); 142 + 143 + $fix = 1 if ($fix_inplace); 147 144 148 145 my $exit = 0; 149 146 ··· 1970 1963 } 1971 1964 1972 1965 # Check for FSF mailing addresses. 1973 - if ($rawline =~ /You should have received a copy/ || 1974 - $rawline =~ /write to the Free Software/ || 1975 - $rawline =~ /59 Temple Place/ || 1976 - $rawline =~ /51 Franklin Street/) { 1966 + if ($rawline =~ /\bwrite to the Free/i || 1967 + $rawline =~ /\b59\s+Temple\s+Pl/i || 1968 + $rawline =~ /\b51\s+Franklin\s+St/i) { 1977 1969 my $herevet = "$here\n" . cat_vet($rawline) . "\n"; 1978 1970 my $msg_type = \&ERROR; 1979 1971 $msg_type = \&CHK if ($file); 1980 1972 &{$msg_type}("FSF_MAILING_ADDRESS", 1981 - "Do not include the paragraph about writing to the Free Software Foundation's mailing address from the sample GPL notice. The FSF has changed addresses in the past, and may do so again. Linux already includes a copy of the GPL.\n" . $herevet) 1973 + "Do not include the paragraph about writing to the Free Software Foundation's mailing address from the sample GPL notice. The FSF has changed addresses in the past, and may do so again. Linux already includes a copy of the GPL.\n" . $herevet) 1982 1974 } 1983 1975 1984 1976 # check for Kconfig help text having a real description ··· 2040 2034 "Use of $flag is deprecated, please use \`$replacement->{$flag} instead.\n" . $herecurr) if ($replacement->{$flag}); 2041 2035 } 2042 2036 2037 + # check for DT compatible documentation 2038 + if (defined $root && $realfile =~ /\.dts/ && 2039 + $rawline =~ /^\+\s*compatible\s*=/) { 2040 + my @compats = $rawline =~ /\"([a-zA-Z0-9\-\,\.\+_]+)\"/g; 2041 + 2042 + foreach my $compat (@compats) { 2043 + my $compat2 = $compat; 2044 + my $dt_path = $root . "/Documentation/devicetree/bindings/"; 2045 + $compat2 =~ s/\,[a-z]*\-/\,<\.\*>\-/; 2046 + `grep -Erq "$compat|$compat2" $dt_path`; 2047 + if ( $? >> 8 ) { 2048 + WARN("UNDOCUMENTED_DT_STRING", 2049 + "DT compatible string \"$compat\" appears un-documented -- check $dt_path\n" . $herecurr); 2050 + } 2051 + 2052 + my $vendor = $compat; 2053 + my $vendor_path = $dt_path . "vendor-prefixes.txt"; 2054 + next if (! -f $vendor_path); 2055 + $vendor =~ s/^([a-zA-Z0-9]+)\,.*/$1/; 2056 + `grep -Eq "$vendor" $vendor_path`; 2057 + if ( $? >> 8 ) { 2058 + WARN("UNDOCUMENTED_DT_STRING", 2059 + "DT compatible string vendor \"$vendor\" appears un-documented -- check $vendor_path\n" . $herecurr); 2060 + } 2061 + } 2062 + } 2063 + 2043 2064 # check we are in a valid source file if not then ignore this hunk 2044 2065 next if ($realfile !~ /\.(h|c|s|S|pl|sh)$/); 2045 2066 ··· 2082 2049 } 2083 2050 2084 2051 # Check for user-visible strings broken across lines, which breaks the ability 2085 - # to grep for the string. Limited to strings used as parameters (those 2086 - # following an open parenthesis), which almost completely eliminates false 2087 - # positives, as well as warning only once per parameter rather than once per 2088 - # line of the string. Make an exception when the previous string ends in a 2089 - # newline (multiple lines in one string constant) or \n\t (common in inline 2090 - # assembly to indent the instruction on the following line). 2052 + # to grep for the string. Make exceptions when the previous string ends in a 2053 + # newline (multiple lines in one string constant) or '\t', '\r', ';', or '{' 2054 + # (common in inline assembly) or is a octal \123 or hexadecimal \xaf value 2091 2055 if ($line =~ /^\+\s*"/ && 2092 2056 $prevline =~ /"\s*$/ && 2093 - $prevline =~ /\(/ && 2094 - $prevrawline !~ /\\n(?:\\t)*"\s*$/) { 2057 + $prevrawline !~ /(?:\\(?:[ntr]|[0-7]{1,3}|x[0-9a-fA-F]{1,2})|;\s*|\{\s*)"\s*$/) { 2095 2058 WARN("SPLIT_STRING", 2096 2059 "quoted string split across lines\n" . $hereprev); 2097 2060 } ··· 2144 2115 if (WARN("SPACE_BEFORE_TAB", 2145 2116 "please, no space before tabs\n" . $herevet) && 2146 2117 $fix) { 2147 - $fixed[$linenr - 1] =~ 2148 - s/(^\+.*) +\t/$1\t/; 2118 + while ($fixed[$linenr - 1] =~ 2119 + s/(^\+.*) {8,8}+\t/$1\t\t/) {} 2120 + while ($fixed[$linenr - 1] =~ 2121 + s/(^\+.*) +\t/$1\t/) {} 2149 2122 } 2150 2123 } 2151 2124 ··· 2836 2805 } 2837 2806 } 2838 2807 2808 + # Function pointer declarations 2809 + # check spacing between type, funcptr, and args 2810 + # canonical declaration is "type (*funcptr)(args...)" 2811 + # 2812 + # the $Declare variable will capture all spaces after the type 2813 + # so check it for trailing missing spaces or multiple spaces 2814 + if ($line =~ /^.\s*($Declare)\((\s*)\*(\s*)$Ident(\s*)\)(\s*)\(/) { 2815 + my $declare = $1; 2816 + my $pre_pointer_space = $2; 2817 + my $post_pointer_space = $3; 2818 + my $funcname = $4; 2819 + my $post_funcname_space = $5; 2820 + my $pre_args_space = $6; 2821 + 2822 + if ($declare !~ /\s$/) { 2823 + WARN("SPACING", 2824 + "missing space after return type\n" . $herecurr); 2825 + } 2826 + 2827 + # unnecessary space "type (*funcptr)(args...)" 2828 + elsif ($declare =~ /\s{2,}$/) { 2829 + WARN("SPACING", 2830 + "Multiple spaces after return type\n" . $herecurr); 2831 + } 2832 + 2833 + # unnecessary space "type ( *funcptr)(args...)" 2834 + if (defined $pre_pointer_space && 2835 + $pre_pointer_space =~ /^\s/) { 2836 + WARN("SPACING", 2837 + "Unnecessary space after function pointer open parenthesis\n" . $herecurr); 2838 + } 2839 + 2840 + # unnecessary space "type (* funcptr)(args...)" 2841 + if (defined $post_pointer_space && 2842 + $post_pointer_space =~ /^\s/) { 2843 + WARN("SPACING", 2844 + "Unnecessary space before function pointer name\n" . $herecurr); 2845 + } 2846 + 2847 + # unnecessary space "type (*funcptr )(args...)" 2848 + if (defined $post_funcname_space && 2849 + $post_funcname_space =~ /^\s/) { 2850 + WARN("SPACING", 2851 + "Unnecessary space after function pointer name\n" . $herecurr); 2852 + } 2853 + 2854 + # unnecessary space "type (*funcptr) (args...)" 2855 + if (defined $pre_args_space && 2856 + $pre_args_space =~ /^\s/) { 2857 + WARN("SPACING", 2858 + "Unnecessary space before function pointer arguments\n" . $herecurr); 2859 + } 2860 + 2861 + if (show_type("SPACING") && $fix) { 2862 + $fixed[$linenr - 1] =~ 2863 + s/^(.\s*$Declare)\(\s*\*\s*($Ident)\s*\)\s*\(/rtrim($1) . " " . "\(\*$2\)\("/ex; 2864 + } 2865 + } 2866 + 2839 2867 # check for spacing round square brackets; allowed: 2840 2868 # 1. with a type on the left -- int [] a; 2841 2869 # 2. at the beginning of a line for slice initialisers -- [0...10] = 5, ··· 3215 3125 } 3216 3126 3217 3127 # check for whitespace before a non-naked semicolon 3218 - if ($line =~ /^\+.*\S\s+;/) { 3128 + if ($line =~ /^\+.*\S\s+;\s*$/) { 3219 3129 if (WARN("SPACING", 3220 3130 "space prohibited before semicolon\n" . $herecurr) && 3221 3131 $fix) { ··· 3336 3246 } elsif ($spacing !~ /\s+/) { 3337 3247 ERROR("SPACING", 3338 3248 "space required before the open parenthesis '('\n" . $herecurr); 3249 + } 3250 + } 3251 + 3252 + # if statements using unnecessary parentheses - ie: if ((foo == bar)) 3253 + if ($^V && $^V ge 5.10.0 && 3254 + $line =~ /\bif\s*((?:\(\s*){2,})/) { 3255 + my $openparens = $1; 3256 + my $count = $openparens =~ tr@\(@\(@; 3257 + my $msg = ""; 3258 + if ($line =~ /\bif\s*(?:\(\s*){$count,$count}$LvalOrFunc\s*($Compare)\s*$LvalOrFunc(?:\s*\)){$count,$count}/) { 3259 + my $comp = $4; #Not $1 because of $LvalOrFunc 3260 + $msg = " - maybe == should be = ?" if ($comp eq "=="); 3261 + WARN("UNNECESSARY_PARENTHESES", 3262 + "Unnecessary parentheses$msg\n" . $herecurr); 3339 3263 } 3340 3264 } 3341 3265 ··· 4087 3983 } 4088 3984 } 4089 3985 3986 + # Check for memcpy(foo, bar, ETH_ALEN) that could be ether_addr_copy(foo, bar) 3987 + if ($^V && $^V ge 5.10.0 && 3988 + $line =~ /^\+(?:.*?)\bmemcpy\s*\(\s*$FuncArg\s*,\s*$FuncArg\s*\,\s*ETH_ALEN\s*\)/s) { 3989 + if (WARN("PREFER_ETHER_ADDR_COPY", 3990 + "Prefer ether_addr_copy() over memcpy() if the Ethernet addresses are __aligned(2)\n" . $herecurr) && 3991 + $fix) { 3992 + $fixed[$linenr - 1] =~ s/\bmemcpy\s*\(\s*$FuncArg\s*,\s*$FuncArg\s*\,\s*ETH_ALEN\s*\)/ether_addr_copy($2, $7)/; 3993 + } 3994 + } 3995 + 4090 3996 # typecasts on min/max could be min_t/max_t 4091 3997 if ($^V && $^V ge 5.10.0 && 4092 3998 defined $stat && ··· 4231 4117 "$1 uses number as first arg, sizeof is generally wrong\n" . $herecurr); 4232 4118 } 4233 4119 4120 + # check for GFP_NOWAIT use 4121 + if ($line =~ /\b__GFP_NOFAIL\b/) { 4122 + WARN("__GFP_NOFAIL", 4123 + "Use of __GFP_NOFAIL is deprecated, no new users should be added\n" . $herecurr); 4124 + } 4125 + 4234 4126 # check for multiple semicolons 4235 4127 if ($line =~ /;\s*;\s*$/) { 4236 4128 if (WARN("ONE_SEMICOLON", 4237 4129 "Statements terminations use 1 semicolon\n" . $herecurr) && 4238 4130 $fix) { 4239 4131 $fixed[$linenr - 1] =~ s/(\s*;\s*){2,}$/;/g; 4132 + } 4133 + } 4134 + 4135 + # check for case / default statements not preceeded by break/fallthrough/switch 4136 + if ($line =~ /^.\s*(?:case\s+(?:$Ident|$Constant)\s*|default):/) { 4137 + my $has_break = 0; 4138 + my $has_statement = 0; 4139 + my $count = 0; 4140 + my $prevline = $linenr; 4141 + while ($prevline > 1 && $count < 3 && !$has_break) { 4142 + $prevline--; 4143 + my $rline = $rawlines[$prevline - 1]; 4144 + my $fline = $lines[$prevline - 1]; 4145 + last if ($fline =~ /^\@\@/); 4146 + next if ($fline =~ /^\-/); 4147 + next if ($fline =~ /^.(?:\s*(?:case\s+(?:$Ident|$Constant)[\s$;]*|default):[\s$;]*)*$/); 4148 + $has_break = 1 if ($rline =~ /fall[\s_-]*(through|thru)/i); 4149 + next if ($fline =~ /^.[\s$;]*$/); 4150 + $has_statement = 1; 4151 + $count++; 4152 + $has_break = 1 if ($fline =~ /\bswitch\b|\b(?:break\s*;[\s$;]*$|return\b|goto\b|continue\b)/); 4153 + } 4154 + if (!$has_break && $has_statement) { 4155 + WARN("MISSING_BREAK", 4156 + "Possible switch case/default not preceeded by break or fallthrough comment\n" . $herecurr); 4240 4157 } 4241 4158 } 4242 4159 ··· 4506 4361 hash_show_words(\%ignore_type, "Ignored"); 4507 4362 4508 4363 if ($clean == 0 && $fix && "@rawlines" ne "@fixed") { 4509 - my $newfile = $filename . ".EXPERIMENTAL-checkpatch-fixes"; 4364 + my $newfile = $filename; 4365 + $newfile .= ".EXPERIMENTAL-checkpatch-fixes" if (!$fix_inplace); 4510 4366 my $linecount = 0; 4511 4367 my $f; 4512 4368
+85 -6
scripts/get_maintainer.pl
··· 98 98 "available" => '(which("git") ne "") && (-d ".git")', 99 99 "find_signers_cmd" => 100 100 "git log --no-color --follow --since=\$email_git_since " . 101 + '--numstat --no-merges ' . 101 102 '--format="GitCommit: %H%n' . 102 103 'GitAuthor: %an <%ae>%n' . 103 104 'GitDate: %aD%n' . ··· 107 106 " -- \$file", 108 107 "find_commit_signers_cmd" => 109 108 "git log --no-color " . 109 + '--numstat ' . 110 110 '--format="GitCommit: %H%n' . 111 111 'GitAuthor: %an <%ae>%n' . 112 112 'GitDate: %aD%n' . ··· 116 114 " -1 \$commit", 117 115 "find_commit_author_cmd" => 118 116 "git log --no-color " . 117 + '--numstat ' . 119 118 '--format="GitCommit: %H%n' . 120 119 'GitAuthor: %an <%ae>%n' . 121 120 'GitDate: %aD%n' . ··· 128 125 "blame_commit_pattern" => "^([0-9a-f]+) ", 129 126 "author_pattern" => "^GitAuthor: (.*)", 130 127 "subject_pattern" => "^GitSubject: (.*)", 128 + "stat_pattern" => "^(\\d+)\\t(\\d+)\\t\$file\$", 131 129 ); 132 130 133 131 my %VCS_cmds_hg = ( ··· 156 152 "blame_commit_pattern" => "^([ 0-9a-f]+):", 157 153 "author_pattern" => "^HgAuthor: (.*)", 158 154 "subject_pattern" => "^HgSubject: (.*)", 155 + "stat_pattern" => "^(\\d+)\t(\\d+)\t\$file\$", 159 156 ); 160 157 161 158 my $conf = which_conf(".get_maintainer.conf"); ··· 1274 1269 } 1275 1270 1276 1271 sub vcs_find_signers { 1277 - my ($cmd) = @_; 1272 + my ($cmd, $file) = @_; 1278 1273 my $commits; 1279 1274 my @lines = (); 1280 1275 my @signatures = (); 1276 + my @authors = (); 1277 + my @stats = (); 1281 1278 1282 1279 @lines = &{$VCS_cmds{"execute_cmd"}}($cmd); 1283 1280 1284 1281 my $pattern = $VCS_cmds{"commit_pattern"}; 1282 + my $author_pattern = $VCS_cmds{"author_pattern"}; 1283 + my $stat_pattern = $VCS_cmds{"stat_pattern"}; 1284 + 1285 + $stat_pattern =~ s/(\$\w+)/$1/eeg; #interpolate $stat_pattern 1285 1286 1286 1287 $commits = grep(/$pattern/, @lines); # of commits 1287 1288 1289 + @authors = grep(/$author_pattern/, @lines); 1288 1290 @signatures = grep(/^[ \t]*${signature_pattern}.*\@.*$/, @lines); 1291 + @stats = grep(/$stat_pattern/, @lines); 1289 1292 1290 - return (0, @signatures) if !@signatures; 1293 + # print("stats: <@stats>\n"); 1294 + 1295 + return (0, \@signatures, \@authors, \@stats) if !@signatures; 1291 1296 1292 1297 save_commits_by_author(@lines) if ($interactive); 1293 1298 save_commits_by_signer(@lines) if ($interactive); ··· 1306 1291 @signatures = grep(!/${penguin_chiefs}/i, @signatures); 1307 1292 } 1308 1293 1294 + my ($author_ref, $authors_ref) = extract_formatted_signatures(@authors); 1309 1295 my ($types_ref, $signers_ref) = extract_formatted_signatures(@signatures); 1310 1296 1311 - return ($commits, @$signers_ref); 1297 + return ($commits, $signers_ref, $authors_ref, \@stats); 1312 1298 } 1313 1299 1314 1300 sub vcs_find_author { ··· 1865 1849 sub vcs_file_signoffs { 1866 1850 my ($file) = @_; 1867 1851 1852 + my $authors_ref; 1853 + my $signers_ref; 1854 + my $stats_ref; 1855 + my @authors = (); 1868 1856 my @signers = (); 1857 + my @stats = (); 1869 1858 my $commits; 1870 1859 1871 1860 $vcs_used = vcs_exists(); ··· 1879 1858 my $cmd = $VCS_cmds{"find_signers_cmd"}; 1880 1859 $cmd =~ s/(\$\w+)/$1/eeg; # interpolate $cmd 1881 1860 1882 - ($commits, @signers) = vcs_find_signers($cmd); 1861 + ($commits, $signers_ref, $authors_ref, $stats_ref) = vcs_find_signers($cmd, $file); 1862 + 1863 + @signers = @{$signers_ref} if defined $signers_ref; 1864 + @authors = @{$authors_ref} if defined $authors_ref; 1865 + @stats = @{$stats_ref} if defined $stats_ref; 1866 + 1867 + # print("commits: <$commits>\nsigners:<@signers>\nauthors: <@authors>\nstats: <@stats>\n"); 1883 1868 1884 1869 foreach my $signer (@signers) { 1885 1870 $signer = deduplicate_email($signer); 1886 1871 } 1887 1872 1888 1873 vcs_assign("commit_signer", $commits, @signers); 1874 + vcs_assign("authored", $commits, @authors); 1875 + if ($#authors == $#stats) { 1876 + my $stat_pattern = $VCS_cmds{"stat_pattern"}; 1877 + $stat_pattern =~ s/(\$\w+)/$1/eeg; #interpolate $stat_pattern 1878 + 1879 + my $added = 0; 1880 + my $deleted = 0; 1881 + for (my $i = 0; $i <= $#stats; $i++) { 1882 + if ($stats[$i] =~ /$stat_pattern/) { 1883 + $added += $1; 1884 + $deleted += $2; 1885 + } 1886 + } 1887 + my @tmp_authors = uniq(@authors); 1888 + foreach my $author (@tmp_authors) { 1889 + $author = deduplicate_email($author); 1890 + } 1891 + @tmp_authors = uniq(@tmp_authors); 1892 + my @list_added = (); 1893 + my @list_deleted = (); 1894 + foreach my $author (@tmp_authors) { 1895 + my $auth_added = 0; 1896 + my $auth_deleted = 0; 1897 + for (my $i = 0; $i <= $#stats; $i++) { 1898 + if ($author eq deduplicate_email($authors[$i]) && 1899 + $stats[$i] =~ /$stat_pattern/) { 1900 + $auth_added += $1; 1901 + $auth_deleted += $2; 1902 + } 1903 + } 1904 + for (my $i = 0; $i < $auth_added; $i++) { 1905 + push(@list_added, $author); 1906 + } 1907 + for (my $i = 0; $i < $auth_deleted; $i++) { 1908 + push(@list_deleted, $author); 1909 + } 1910 + } 1911 + vcs_assign("added_lines", $added, @list_added); 1912 + vcs_assign("removed_lines", $deleted, @list_deleted); 1913 + } 1889 1914 } 1890 1915 1891 1916 sub vcs_file_blame { ··· 1954 1887 if ($email_git_blame_signatures) { 1955 1888 if (vcs_is_hg()) { 1956 1889 my $commit_count; 1890 + my $commit_authors_ref; 1891 + my $commit_signers_ref; 1892 + my $stats_ref; 1893 + my @commit_authors = (); 1957 1894 my @commit_signers = (); 1958 1895 my $commit = join(" -r ", @commits); 1959 1896 my $cmd; ··· 1965 1894 $cmd = $VCS_cmds{"find_commit_signers_cmd"}; 1966 1895 $cmd =~ s/(\$\w+)/$1/eeg; #substitute variables in $cmd 1967 1896 1968 - ($commit_count, @commit_signers) = vcs_find_signers($cmd); 1897 + ($commit_count, $commit_signers_ref, $commit_authors_ref, $stats_ref) = vcs_find_signers($cmd, $file); 1898 + @commit_authors = @{$commit_authors_ref} if defined $commit_authors_ref; 1899 + @commit_signers = @{$commit_signers_ref} if defined $commit_signers_ref; 1969 1900 1970 1901 push(@signers, @commit_signers); 1971 1902 } else { 1972 1903 foreach my $commit (@commits) { 1973 1904 my $commit_count; 1905 + my $commit_authors_ref; 1906 + my $commit_signers_ref; 1907 + my $stats_ref; 1908 + my @commit_authors = (); 1974 1909 my @commit_signers = (); 1975 1910 my $cmd; 1976 1911 1977 1912 $cmd = $VCS_cmds{"find_commit_signers_cmd"}; 1978 1913 $cmd =~ s/(\$\w+)/$1/eeg; #substitute variables in $cmd 1979 1914 1980 - ($commit_count, @commit_signers) = vcs_find_signers($cmd); 1915 + ($commit_count, $commit_signers_ref, $commit_authors_ref, $stats_ref) = vcs_find_signers($cmd, $file); 1916 + @commit_authors = @{$commit_authors_ref} if defined $commit_authors_ref; 1917 + @commit_signers = @{$commit_signers_ref} if defined $commit_signers_ref; 1981 1918 1982 1919 push(@signers, @commit_signers); 1983 1920 }
+5 -1
scripts/headers_check.pl
··· 65 65 66 66 sub check_declarations 67 67 { 68 - if ($line =~m/^(\s*extern|unsigned|char|short|int|long|void)\b/) { 68 + # soundcard.h is what it is 69 + if ($line =~ m/^void seqbuf_dump\(void\);/) { 70 + return; 71 + } 72 + if ($line =~ m/^(\s*extern|unsigned|char|short|int|long|void)\b/) { 69 73 printf STDERR "$filename:$lineno: " . 70 74 "userspace cannot reference function or " . 71 75 "variable defined in the kernel\n";
+5
scripts/sortextable.c
··· 39 39 #define EM_AARCH64 183 40 40 #endif 41 41 42 + #ifndef EM_MICROBLAZE 43 + #define EM_MICROBLAZE 189 44 + #endif 45 + 42 46 static int fd_map; /* File descriptor for file being modified. */ 43 47 static int mmap_failed; /* Boolean flag. */ 44 48 static void *ehdr_curr; /* current ElfXX_Ehdr * for resource cleanup */ ··· 279 275 case EM_ARCOMPACT: 280 276 case EM_ARM: 281 277 case EM_AARCH64: 278 + case EM_MICROBLAZE: 282 279 case EM_MIPS: 283 280 break; 284 281 } /* end switch */
+1
tools/testing/selftests/Makefile
··· 9 9 TARGETS += timers 10 10 TARGETS += vm 11 11 TARGETS += powerpc 12 + TARGETS += user 12 13 13 14 all: 14 15 for TARGET in $(TARGETS); do \
+13
tools/testing/selftests/user/Makefile
··· 1 + # Makefile for user memory selftests 2 + 3 + # No binaries, but make sure arg-less "make" doesn't trigger "run_tests" 4 + all: 5 + 6 + run_tests: all 7 + @if /sbin/modprobe test_user_copy ; then \ 8 + rmmod test_user_copy; \ 9 + echo "user_copy: ok"; \ 10 + else \ 11 + echo "user_copy: [FAIL]"; \ 12 + exit 1; \ 13 + fi