Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branches 'common/dma' and 'sh/stable-updates' into sh-latest

+2581 -554
+56
Documentation/ABI/testing/sysfs-class-backlight-driver-adp8870
··· 1 + What: /sys/class/backlight/<backlight>/<ambient light zone>_max 2 + What: /sys/class/backlight/<backlight>/l1_daylight_max 3 + What: /sys/class/backlight/<backlight>/l2_bright_max 4 + What: /sys/class/backlight/<backlight>/l3_office_max 5 + What: /sys/class/backlight/<backlight>/l4_indoor_max 6 + What: /sys/class/backlight/<backlight>/l5_dark_max 7 + Date: Mai 2011 8 + KernelVersion: 2.6.40 9 + Contact: device-drivers-devel@blackfin.uclinux.org 10 + Description: 11 + Control the maximum brightness for <ambient light zone> 12 + on this <backlight>. Values are between 0 and 127. This file 13 + will also show the brightness level stored for this 14 + <ambient light zone>. 15 + 16 + What: /sys/class/backlight/<backlight>/<ambient light zone>_dim 17 + What: /sys/class/backlight/<backlight>/l2_bright_dim 18 + What: /sys/class/backlight/<backlight>/l3_office_dim 19 + What: /sys/class/backlight/<backlight>/l4_indoor_dim 20 + What: /sys/class/backlight/<backlight>/l5_dark_dim 21 + Date: Mai 2011 22 + KernelVersion: 2.6.40 23 + Contact: device-drivers-devel@blackfin.uclinux.org 24 + Description: 25 + Control the dim brightness for <ambient light zone> 26 + on this <backlight>. Values are between 0 and 127, typically 27 + set to 0. Full off when the backlight is disabled. 28 + This file will also show the dim brightness level stored for 29 + this <ambient light zone>. 30 + 31 + What: /sys/class/backlight/<backlight>/ambient_light_level 32 + Date: Mai 2011 33 + KernelVersion: 2.6.40 34 + Contact: device-drivers-devel@blackfin.uclinux.org 35 + Description: 36 + Get conversion value of the light sensor. 37 + This value is updated every 80 ms (when the light sensor 38 + is enabled). Returns integer between 0 (dark) and 39 + 8000 (max ambient brightness) 40 + 41 + What: /sys/class/backlight/<backlight>/ambient_light_zone 42 + Date: Mai 2011 43 + KernelVersion: 2.6.40 44 + Contact: device-drivers-devel@blackfin.uclinux.org 45 + Description: 46 + Get/Set current ambient light zone. Reading returns 47 + integer between 1..5 (1 = daylight, 2 = bright, ..., 5 = dark). 48 + Writing a value between 1..5 forces the backlight controller 49 + to enter the corresponding ambient light zone. 50 + Writing 0 returns to normal/automatic ambient light level 51 + operation. The ambient light sensing feature on these devices 52 + is an extension to the API documented in 53 + Documentation/ABI/stable/sysfs-class-backlight. 54 + It can be enabled by writing the value stored in 55 + /sys/class/backlight/<backlight>/max_brightness to 56 + /sys/class/backlight/<backlight>/brightness.
+2 -2
Documentation/accounting/cgroupstats.txt
··· 21 21 To extract cgroup statistics a utility very similar to getdelays.c 22 22 has been developed, the sample output of the utility is shown below 23 23 24 - ~/balbir/cgroupstats # ./getdelays -C "/cgroup/a" 24 + ~/balbir/cgroupstats # ./getdelays -C "/sys/fs/cgroup/a" 25 25 sleeping 1, blocked 0, running 1, stopped 0, uninterruptible 0 26 - ~/balbir/cgroupstats # ./getdelays -C "/cgroup" 26 + ~/balbir/cgroupstats # ./getdelays -C "/sys/fs/cgroup" 27 27 sleeping 155, blocked 0, running 1, stopped 0, uninterruptible 2
+17 -14
Documentation/cgroups/blkio-controller.txt
··· 28 28 - Enable group scheduling in CFQ 29 29 CONFIG_CFQ_GROUP_IOSCHED=y 30 30 31 - - Compile and boot into kernel and mount IO controller (blkio). 31 + - Compile and boot into kernel and mount IO controller (blkio); see 32 + cgroups.txt, Why are cgroups needed?. 32 33 33 - mount -t cgroup -o blkio none /cgroup 34 + mount -t tmpfs cgroup_root /sys/fs/cgroup 35 + mkdir /sys/fs/cgroup/blkio 36 + mount -t cgroup -o blkio none /sys/fs/cgroup/blkio 34 37 35 38 - Create two cgroups 36 - mkdir -p /cgroup/test1/ /cgroup/test2 39 + mkdir -p /sys/fs/cgroup/blkio/test1/ /sys/fs/cgroup/blkio/test2 37 40 38 41 - Set weights of group test1 and test2 39 - echo 1000 > /cgroup/test1/blkio.weight 40 - echo 500 > /cgroup/test2/blkio.weight 42 + echo 1000 > /sys/fs/cgroup/blkio/test1/blkio.weight 43 + echo 500 > /sys/fs/cgroup/blkio/test2/blkio.weight 41 44 42 45 - Create two same size files (say 512MB each) on same disk (file1, file2) and 43 46 launch two dd threads in different cgroup to read those files. ··· 49 46 echo 3 > /proc/sys/vm/drop_caches 50 47 51 48 dd if=/mnt/sdb/zerofile1 of=/dev/null & 52 - echo $! > /cgroup/test1/tasks 53 - cat /cgroup/test1/tasks 49 + echo $! > /sys/fs/cgroup/blkio/test1/tasks 50 + cat /sys/fs/cgroup/blkio/test1/tasks 54 51 55 52 dd if=/mnt/sdb/zerofile2 of=/dev/null & 56 - echo $! > /cgroup/test2/tasks 57 - cat /cgroup/test2/tasks 53 + echo $! > /sys/fs/cgroup/blkio/test2/tasks 54 + cat /sys/fs/cgroup/blkio/test2/tasks 58 55 59 56 - At macro level, first dd should finish first. To get more precise data, keep 60 57 on looking at (with the help of script), at blkio.disk_time and ··· 71 68 - Enable throttling in block layer 72 69 CONFIG_BLK_DEV_THROTTLING=y 73 70 74 - - Mount blkio controller 75 - mount -t cgroup -o blkio none /cgroup/blkio 71 + - Mount blkio controller (see cgroups.txt, Why are cgroups needed?) 72 + mount -t cgroup -o blkio none /sys/fs/cgroup/blkio 76 73 77 74 - Specify a bandwidth rate on particular device for root group. The format 78 75 for policy is "<major>:<minor> <byes_per_second>". 79 76 80 - echo "8:16 1048576" > /cgroup/blkio/blkio.read_bps_device 77 + echo "8:16 1048576" > /sys/fs/cgroup/blkio/blkio.read_bps_device 81 78 82 79 Above will put a limit of 1MB/second on reads happening for root group 83 80 on device having major/minor number 8:16. ··· 111 108 CFQ and throttling will practically treat all groups at same level. 112 109 113 110 pivot 114 - / | \ \ 111 + / / \ \ 115 112 root test1 test2 test3 116 113 117 114 Down the line we can implement hierarchical accounting/control support ··· 152 149 153 150 Following is the format. 154 151 155 - #echo dev_maj:dev_minor weight > /path/to/cgroup/blkio.weight_device 152 + # echo dev_maj:dev_minor weight > blkio.weight_device 156 153 Configure weight=300 on /dev/sdb (8:16) in this cgroup 157 154 # echo 8:16 300 > blkio.weight_device 158 155 # cat blkio.weight_device
+36 -24
Documentation/cgroups/cgroups.txt
··· 138 138 the admin can easily set up a script which receives exec notifications 139 139 and depending on who is launching the browser he can 140 140 141 - # echo browser_pid > /mnt/<restype>/<userclass>/tasks 141 + # echo browser_pid > /sys/fs/cgroup/<restype>/<userclass>/tasks 142 142 143 143 With only a single hierarchy, he now would potentially have to create 144 144 a separate cgroup for every browser launched and associate it with 145 - approp network and other resource class. This may lead to 145 + appropriate network and other resource class. This may lead to 146 146 proliferation of such cgroups. 147 147 148 148 Also lets say that the administrator would like to give enhanced network ··· 153 153 With ability to write pids directly to resource classes, it's just a 154 154 matter of : 155 155 156 - # echo pid > /mnt/network/<new_class>/tasks 156 + # echo pid > /sys/fs/cgroup/network/<new_class>/tasks 157 157 (after some time) 158 - # echo pid > /mnt/network/<orig_class>/tasks 158 + # echo pid > /sys/fs/cgroup/network/<orig_class>/tasks 159 159 160 160 Without this ability, he would have to split the cgroup into 161 161 multiple separate ones and then associate the new cgroups with the ··· 310 310 To start a new job that is to be contained within a cgroup, using 311 311 the "cpuset" cgroup subsystem, the steps are something like: 312 312 313 - 1) mkdir /dev/cgroup 314 - 2) mount -t cgroup -ocpuset cpuset /dev/cgroup 315 - 3) Create the new cgroup by doing mkdir's and write's (or echo's) in 316 - the /dev/cgroup virtual file system. 317 - 4) Start a task that will be the "founding father" of the new job. 318 - 5) Attach that task to the new cgroup by writing its pid to the 319 - /dev/cgroup tasks file for that cgroup. 320 - 6) fork, exec or clone the job tasks from this founding father task. 313 + 1) mount -t tmpfs cgroup_root /sys/fs/cgroup 314 + 2) mkdir /sys/fs/cgroup/cpuset 315 + 3) mount -t cgroup -ocpuset cpuset /sys/fs/cgroup/cpuset 316 + 4) Create the new cgroup by doing mkdir's and write's (or echo's) in 317 + the /sys/fs/cgroup virtual file system. 318 + 5) Start a task that will be the "founding father" of the new job. 319 + 6) Attach that task to the new cgroup by writing its pid to the 320 + /sys/fs/cgroup/cpuset/tasks file for that cgroup. 321 + 7) fork, exec or clone the job tasks from this founding father task. 321 322 322 323 For example, the following sequence of commands will setup a cgroup 323 324 named "Charlie", containing just CPUs 2 and 3, and Memory Node 1, 324 325 and then start a subshell 'sh' in that cgroup: 325 326 326 - mount -t cgroup cpuset -ocpuset /dev/cgroup 327 - cd /dev/cgroup 327 + mount -t tmpfs cgroup_root /sys/fs/cgroup 328 + mkdir /sys/fs/cgroup/cpuset 329 + mount -t cgroup cpuset -ocpuset /sys/fs/cgroup/cpuset 330 + cd /sys/fs/cgroup/cpuset 328 331 mkdir Charlie 329 332 cd Charlie 330 333 /bin/echo 2-3 > cpuset.cpus ··· 348 345 virtual filesystem. 349 346 350 347 To mount a cgroup hierarchy with all available subsystems, type: 351 - # mount -t cgroup xxx /dev/cgroup 348 + # mount -t cgroup xxx /sys/fs/cgroup 352 349 353 350 The "xxx" is not interpreted by the cgroup code, but will appear in 354 351 /proc/mounts so may be any useful identifying string that you like. ··· 357 354 if cpusets are enabled the user will have to populate the cpus and mems files 358 355 for each new cgroup created before that group can be used. 359 356 357 + As explained in section `1.2 Why are cgroups needed?' you should create 358 + different hierarchies of cgroups for each single resource or group of 359 + resources you want to control. Therefore, you should mount a tmpfs on 360 + /sys/fs/cgroup and create directories for each cgroup resource or resource 361 + group. 362 + 363 + # mount -t tmpfs cgroup_root /sys/fs/cgroup 364 + # mkdir /sys/fs/cgroup/rg1 365 + 360 366 To mount a cgroup hierarchy with just the cpuset and memory 361 367 subsystems, type: 362 - # mount -t cgroup -o cpuset,memory hier1 /dev/cgroup 368 + # mount -t cgroup -o cpuset,memory hier1 /sys/fs/cgroup/rg1 363 369 364 370 To change the set of subsystems bound to a mounted hierarchy, just 365 371 remount with different options: 366 - # mount -o remount,cpuset,blkio hier1 /dev/cgroup 372 + # mount -o remount,cpuset,blkio hier1 /sys/fs/cgroup/rg1 367 373 368 374 Now memory is removed from the hierarchy and blkio is added. 369 375 370 376 Note this will add blkio to the hierarchy but won't remove memory or 371 377 cpuset, because the new options are appended to the old ones: 372 - # mount -o remount,blkio /dev/cgroup 378 + # mount -o remount,blkio /sys/fs/cgroup/rg1 373 379 374 380 To Specify a hierarchy's release_agent: 375 381 # mount -t cgroup -o cpuset,release_agent="/sbin/cpuset_release_agent" \ 376 - xxx /dev/cgroup 382 + xxx /sys/fs/cgroup/rg1 377 383 378 384 Note that specifying 'release_agent' more than once will return failure. 379 385 ··· 391 379 the ability to arbitrarily bind/unbind subsystems from an existing 392 380 cgroup hierarchy is intended to be implemented in the future. 393 381 394 - Then under /dev/cgroup you can find a tree that corresponds to the 395 - tree of the cgroups in the system. For instance, /dev/cgroup 382 + Then under /sys/fs/cgroup/rg1 you can find a tree that corresponds to the 383 + tree of the cgroups in the system. For instance, /sys/fs/cgroup/rg1 396 384 is the cgroup that holds the whole system. 397 385 398 386 If you want to change the value of release_agent: 399 - # echo "/sbin/new_release_agent" > /dev/cgroup/release_agent 387 + # echo "/sbin/new_release_agent" > /sys/fs/cgroup/rg1/release_agent 400 388 401 389 It can also be changed via remount. 402 390 403 - If you want to create a new cgroup under /dev/cgroup: 404 - # cd /dev/cgroup 391 + If you want to create a new cgroup under /sys/fs/cgroup/rg1: 392 + # cd /sys/fs/cgroup/rg1 405 393 # mkdir my_cgroup 406 394 407 395 Now you want to do something with this cgroup.
+9 -10
Documentation/cgroups/cpuacct.txt
··· 10 10 11 11 Accounting groups can be created by first mounting the cgroup filesystem. 12 12 13 - # mkdir /cgroups 14 - # mount -t cgroup -ocpuacct none /cgroups 13 + # mount -t cgroup -ocpuacct none /sys/fs/cgroup 15 14 16 - With the above step, the initial or the parent accounting group 17 - becomes visible at /cgroups. At bootup, this group includes all the 18 - tasks in the system. /cgroups/tasks lists the tasks in this cgroup. 19 - /cgroups/cpuacct.usage gives the CPU time (in nanoseconds) obtained by 20 - this group which is essentially the CPU time obtained by all the tasks 15 + With the above step, the initial or the parent accounting group becomes 16 + visible at /sys/fs/cgroup. At bootup, this group includes all the tasks in 17 + the system. /sys/fs/cgroup/tasks lists the tasks in this cgroup. 18 + /sys/fs/cgroup/cpuacct.usage gives the CPU time (in nanoseconds) obtained 19 + by this group which is essentially the CPU time obtained by all the tasks 21 20 in the system. 22 21 23 - New accounting groups can be created under the parent group /cgroups. 22 + New accounting groups can be created under the parent group /sys/fs/cgroup. 24 23 25 - # cd /cgroups 24 + # cd /sys/fs/cgroup 26 25 # mkdir g1 27 26 # echo $$ > g1 28 27 29 28 The above steps create a new group g1 and move the current shell 30 29 process (bash) into it. CPU time consumed by this bash and its children 31 30 can be obtained from g1/cpuacct.usage and the same is accumulated in 32 - /cgroups/cpuacct.usage also. 31 + /sys/fs/cgroup/cpuacct.usage also. 33 32 34 33 cpuacct.stat file lists a few statistics which further divide the 35 34 CPU time obtained by the cgroup into user and system times. Currently
+14 -14
Documentation/cgroups/cpusets.txt
··· 661 661 662 662 To start a new job that is to be contained within a cpuset, the steps are: 663 663 664 - 1) mkdir /dev/cpuset 665 - 2) mount -t cgroup -ocpuset cpuset /dev/cpuset 664 + 1) mkdir /sys/fs/cgroup/cpuset 665 + 2) mount -t cgroup -ocpuset cpuset /sys/fs/cgroup/cpuset 666 666 3) Create the new cpuset by doing mkdir's and write's (or echo's) in 667 - the /dev/cpuset virtual file system. 667 + the /sys/fs/cgroup/cpuset virtual file system. 668 668 4) Start a task that will be the "founding father" of the new job. 669 669 5) Attach that task to the new cpuset by writing its pid to the 670 - /dev/cpuset tasks file for that cpuset. 670 + /sys/fs/cgroup/cpuset tasks file for that cpuset. 671 671 6) fork, exec or clone the job tasks from this founding father task. 672 672 673 673 For example, the following sequence of commands will setup a cpuset 674 674 named "Charlie", containing just CPUs 2 and 3, and Memory Node 1, 675 675 and then start a subshell 'sh' in that cpuset: 676 676 677 - mount -t cgroup -ocpuset cpuset /dev/cpuset 678 - cd /dev/cpuset 677 + mount -t cgroup -ocpuset cpuset /sys/fs/cgroup/cpuset 678 + cd /sys/fs/cgroup/cpuset 679 679 mkdir Charlie 680 680 cd Charlie 681 681 /bin/echo 2-3 > cpuset.cpus ··· 710 710 virtual filesystem. 711 711 712 712 To mount it, type: 713 - # mount -t cgroup -o cpuset cpuset /dev/cpuset 713 + # mount -t cgroup -o cpuset cpuset /sys/fs/cgroup/cpuset 714 714 715 - Then under /dev/cpuset you can find a tree that corresponds to the 716 - tree of the cpusets in the system. For instance, /dev/cpuset 715 + Then under /sys/fs/cgroup/cpuset you can find a tree that corresponds to the 716 + tree of the cpusets in the system. For instance, /sys/fs/cgroup/cpuset 717 717 is the cpuset that holds the whole system. 718 718 719 - If you want to create a new cpuset under /dev/cpuset: 720 - # cd /dev/cpuset 719 + If you want to create a new cpuset under /sys/fs/cgroup/cpuset: 720 + # cd /sys/fs/cgroup/cpuset 721 721 # mkdir my_cpuset 722 722 723 723 Now you want to do something with this cpuset. ··· 765 765 766 766 The command 767 767 768 - mount -t cpuset X /dev/cpuset 768 + mount -t cpuset X /sys/fs/cgroup/cpuset 769 769 770 770 is equivalent to 771 771 772 - mount -t cgroup -ocpuset,noprefix X /dev/cpuset 773 - echo "/sbin/cpuset_release_agent" > /dev/cpuset/release_agent 772 + mount -t cgroup -ocpuset,noprefix X /sys/fs/cgroup/cpuset 773 + echo "/sbin/cpuset_release_agent" > /sys/fs/cgroup/cpuset/release_agent 774 774 775 775 2.2 Adding/removing cpus 776 776 ------------------------
+3 -3
Documentation/cgroups/devices.txt
··· 22 22 An entry is added using devices.allow, and removed using 23 23 devices.deny. For instance 24 24 25 - echo 'c 1:3 mr' > /cgroups/1/devices.allow 25 + echo 'c 1:3 mr' > /sys/fs/cgroup/1/devices.allow 26 26 27 27 allows cgroup 1 to read and mknod the device usually known as 28 28 /dev/null. Doing 29 29 30 - echo a > /cgroups/1/devices.deny 30 + echo a > /sys/fs/cgroup/1/devices.deny 31 31 32 32 will remove the default 'a *:* rwm' entry. Doing 33 33 34 - echo a > /cgroups/1/devices.allow 34 + echo a > /sys/fs/cgroup/1/devices.allow 35 35 36 36 will add the 'a *:* rwm' entry to the whitelist. 37 37
+10 -10
Documentation/cgroups/freezer-subsystem.txt
··· 59 59 60 60 * Examples of usage : 61 61 62 - # mkdir /containers 63 - # mount -t cgroup -ofreezer freezer /containers 64 - # mkdir /containers/0 65 - # echo $some_pid > /containers/0/tasks 62 + # mkdir /sys/fs/cgroup/freezer 63 + # mount -t cgroup -ofreezer freezer /sys/fs/cgroup/freezer 64 + # mkdir /sys/fs/cgroup/freezer/0 65 + # echo $some_pid > /sys/fs/cgroup/freezer/0/tasks 66 66 67 67 to get status of the freezer subsystem : 68 68 69 - # cat /containers/0/freezer.state 69 + # cat /sys/fs/cgroup/freezer/0/freezer.state 70 70 THAWED 71 71 72 72 to freeze all tasks in the container : 73 73 74 - # echo FROZEN > /containers/0/freezer.state 75 - # cat /containers/0/freezer.state 74 + # echo FROZEN > /sys/fs/cgroup/freezer/0/freezer.state 75 + # cat /sys/fs/cgroup/freezer/0/freezer.state 76 76 FREEZING 77 - # cat /containers/0/freezer.state 77 + # cat /sys/fs/cgroup/freezer/0/freezer.state 78 78 FROZEN 79 79 80 80 to unfreeze all tasks in the container : 81 81 82 - # echo THAWED > /containers/0/freezer.state 83 - # cat /containers/0/freezer.state 82 + # echo THAWED > /sys/fs/cgroup/freezer/0/freezer.state 83 + # cat /sys/fs/cgroup/freezer/0/freezer.state 84 84 THAWED 85 85 86 86 This is the basic mechanism which should do the right thing for user space task
+39 -19
Documentation/cgroups/memory.txt
··· 1 1 Memory Resource Controller 2 2 3 - NOTE: The Memory Resource Controller has been generically been referred 4 - to as the memory controller in this document. Do not confuse memory 5 - controller used here with the memory controller that is used in hardware. 3 + NOTE: The Memory Resource Controller has generically been referred to as the 4 + memory controller in this document. Do not confuse memory controller 5 + used here with the memory controller that is used in hardware. 6 6 7 7 (For editors) 8 8 In this document: ··· 70 70 (See sysctl's vm.swappiness) 71 71 memory.move_charge_at_immigrate # set/show controls of moving charges 72 72 memory.oom_control # set/show oom controls. 73 + memory.numa_stat # show the number of memory usage per numa node 73 74 74 75 1. History 75 76 ··· 182 181 page will eventually get charged for it (once it is uncharged from 183 182 the cgroup that brought it in -- this will happen on memory pressure). 184 183 185 - Exception: If CONFIG_CGROUP_CGROUP_MEM_RES_CTLR_SWAP is not used.. 184 + Exception: If CONFIG_CGROUP_CGROUP_MEM_RES_CTLR_SWAP is not used. 186 185 When you do swapoff and make swapped-out pages of shmem(tmpfs) to 187 186 be backed into memory in force, charges for pages are accounted against the 188 187 caller of swapoff rather than the users of shmem. ··· 214 213 OS point of view. 215 214 216 215 * What happens when a cgroup hits memory.memsw.limit_in_bytes 217 - When a cgroup his memory.memsw.limit_in_bytes, it's useless to do swap-out 216 + When a cgroup hits memory.memsw.limit_in_bytes, it's useless to do swap-out 218 217 in this cgroup. Then, swap-out will not be done by cgroup routine and file 219 218 caches are dropped. But as mentioned above, global LRU can do swapout memory 220 219 from it for sanity of the system's memory management state. You can't forbid ··· 264 263 c. Enable CONFIG_CGROUP_MEM_RES_CTLR 265 264 d. Enable CONFIG_CGROUP_MEM_RES_CTLR_SWAP (to use swap extension) 266 265 267 - 1. Prepare the cgroups 268 - # mkdir -p /cgroups 269 - # mount -t cgroup none /cgroups -o memory 266 + 1. Prepare the cgroups (see cgroups.txt, Why are cgroups needed?) 267 + # mount -t tmpfs none /sys/fs/cgroup 268 + # mkdir /sys/fs/cgroup/memory 269 + # mount -t cgroup none /sys/fs/cgroup/memory -o memory 270 270 271 271 2. Make the new group and move bash into it 272 - # mkdir /cgroups/0 273 - # echo $$ > /cgroups/0/tasks 272 + # mkdir /sys/fs/cgroup/memory/0 273 + # echo $$ > /sys/fs/cgroup/memory/0/tasks 274 274 275 275 Since now we're in the 0 cgroup, we can alter the memory limit: 276 - # echo 4M > /cgroups/0/memory.limit_in_bytes 276 + # echo 4M > /sys/fs/cgroup/memory/0/memory.limit_in_bytes 277 277 278 278 NOTE: We can use a suffix (k, K, m, M, g or G) to indicate values in kilo, 279 279 mega or gigabytes. (Here, Kilo, Mega, Giga are Kibibytes, Mebibytes, Gibibytes.) ··· 282 280 NOTE: We can write "-1" to reset the *.limit_in_bytes(unlimited). 283 281 NOTE: We cannot set limits on the root cgroup any more. 284 282 285 - # cat /cgroups/0/memory.limit_in_bytes 283 + # cat /sys/fs/cgroup/memory/0/memory.limit_in_bytes 286 284 4194304 287 285 288 286 We can check the usage: 289 - # cat /cgroups/0/memory.usage_in_bytes 287 + # cat /sys/fs/cgroup/memory/0/memory.usage_in_bytes 290 288 1216512 291 289 292 290 A successful write to this file does not guarantee a successful set of ··· 466 464 If you want to know more exact memory usage, you should use RSS+CACHE(+SWAP) 467 465 value in memory.stat(see 5.2). 468 466 467 + 5.6 numa_stat 468 + 469 + This is similar to numa_maps but operates on a per-memcg basis. This is 470 + useful for providing visibility into the numa locality information within 471 + an memcg since the pages are allowed to be allocated from any physical 472 + node. One of the usecases is evaluating application performance by 473 + combining this information with the application's cpu allocation. 474 + 475 + We export "total", "file", "anon" and "unevictable" pages per-node for 476 + each memcg. The ouput format of memory.numa_stat is: 477 + 478 + total=<total pages> N0=<node 0 pages> N1=<node 1 pages> ... 479 + file=<total file pages> N0=<node 0 pages> N1=<node 1 pages> ... 480 + anon=<total anon pages> N0=<node 0 pages> N1=<node 1 pages> ... 481 + unevictable=<total anon pages> N0=<node 0 pages> N1=<node 1 pages> ... 482 + 483 + And we have total = file + anon + unevictable. 484 + 469 485 6. Hierarchy support 470 486 471 487 The memory controller supports a deep hierarchy and hierarchical accounting. ··· 491 471 cgroup filesystem. Consider for example, the following cgroup filesystem 492 472 hierarchy 493 473 494 - root 474 + root 495 475 / | \ 496 - / | \ 497 - a b c 498 - | \ 499 - | \ 500 - d e 476 + / | \ 477 + a b c 478 + | \ 479 + | \ 480 + d e 501 481 502 482 In the diagram above, with hierarchical accounting enabled, all memory 503 483 usage of e, is accounted to its ancestors up until the root (i.e, c and root),
-17
Documentation/feature-removal-schedule.txt
··· 481 481 482 482 ---------------------------- 483 483 484 - What: namespace cgroup (ns_cgroup) 485 - When: 2.6.38 486 - Why: The ns_cgroup leads to some problems: 487 - * cgroup creation is out-of-control 488 - * cgroup name can conflict when pids are looping 489 - * it is not possible to have a single process handling 490 - a lot of namespaces without falling in a exponential creation time 491 - * we may want to create a namespace without creating a cgroup 492 - 493 - The ns_cgroup is replaced by a compatibility flag 'clone_children', 494 - where a newly created cgroup will copy the parent cgroup values. 495 - The userspace has to manually create a cgroup and add a task to 496 - the 'tasks' file. 497 - Who: Daniel Lezcano <daniel.lezcano@free.fr> 498 - 499 - ---------------------------- 500 - 501 484 What: iwlwifi disable_hw_scan module parameters 502 485 When: 2.6.40 503 486 Why: Hareware scan is the prefer method for iwlwifi devices for
+3 -1
Documentation/kmemleak.txt
··· 11 11 reported via /sys/kernel/debug/kmemleak. A similar method is used by the 12 12 Valgrind tool (memcheck --leak-check) to detect the memory leaks in 13 13 user-space applications. 14 - Kmemleak is supported on x86, arm, powerpc, sparc, sh, microblaze and tile. 14 + 15 + Please check DEBUG_KMEMLEAK dependencies in lib/Kconfig.debug for supported 16 + architectures. 15 17 16 18 Usage 17 19 -----
+1 -1
Documentation/md.txt
··· 555 555 sync_min 556 556 sync_max 557 557 The two values, given as numbers of sectors, indicate a range 558 - withing the array where 'check'/'repair' will operate. Must be 558 + within the array where 'check'/'repair' will operate. Must be 559 559 a multiple of chunk_size. When it reaches "sync_max" it will 560 560 pause, rather than complete. 561 561 You can use 'select' or 'poll' on "sync_completed" to wait for
+117 -2
Documentation/printk-formats.txt
··· 9 9 size_t %zu or %zx 10 10 ssize_t %zd or %zx 11 11 12 - Raw pointer value SHOULD be printed with %p. 12 + Raw pointer value SHOULD be printed with %p. The kernel supports 13 + the following extended format specifiers for pointer types: 14 + 15 + Symbols/Function Pointers: 16 + 17 + %pF versatile_init+0x0/0x110 18 + %pf versatile_init 19 + %pS versatile_init+0x0/0x110 20 + %ps versatile_init 21 + %pB prev_fn_of_versatile_init+0x88/0x88 22 + 23 + For printing symbols and function pointers. The 'S' and 's' specifiers 24 + result in the symbol name with ('S') or without ('s') offsets. Where 25 + this is used on a kernel without KALLSYMS - the symbol address is 26 + printed instead. 27 + 28 + The 'B' specifier results in the symbol name with offsets and should be 29 + used when printing stack backtraces. The specifier takes into 30 + consideration the effect of compiler optimisations which may occur 31 + when tail-call's are used and marked with the noreturn GCC attribute. 32 + 33 + On ia64, ppc64 and parisc64 architectures function pointers are 34 + actually function descriptors which must first be resolved. The 'F' and 35 + 'f' specifiers perform this resolution and then provide the same 36 + functionality as the 'S' and 's' specifiers. 37 + 38 + Kernel Pointers: 39 + 40 + %pK 0x01234567 or 0x0123456789abcdef 41 + 42 + For printing kernel pointers which should be hidden from unprivileged 43 + users. The behaviour of %pK depends on the kptr_restrict sysctl - see 44 + Documentation/sysctl/kernel.txt for more details. 45 + 46 + Struct Resources: 47 + 48 + %pr [mem 0x60000000-0x6fffffff flags 0x2200] or 49 + [mem 0x0000000060000000-0x000000006fffffff flags 0x2200] 50 + %pR [mem 0x60000000-0x6fffffff pref] or 51 + [mem 0x0000000060000000-0x000000006fffffff pref] 52 + 53 + For printing struct resources. The 'R' and 'r' specifiers result in a 54 + printed resource with ('R') or without ('r') a decoded flags member. 55 + 56 + MAC/FDDI addresses: 57 + 58 + %pM 00:01:02:03:04:05 59 + %pMF 00-01-02-03-04-05 60 + %pm 000102030405 61 + 62 + For printing 6-byte MAC/FDDI addresses in hex notation. The 'M' and 'm' 63 + specifiers result in a printed address with ('M') or without ('m') byte 64 + separators. The default byte separator is the colon (':'). 65 + 66 + Where FDDI addresses are concerned the 'F' specifier can be used after 67 + the 'M' specifier to use dash ('-') separators instead of the default 68 + separator. 69 + 70 + IPv4 addresses: 71 + 72 + %pI4 1.2.3.4 73 + %pi4 001.002.003.004 74 + %p[Ii][hnbl] 75 + 76 + For printing IPv4 dot-separated decimal addresses. The 'I4' and 'i4' 77 + specifiers result in a printed address with ('i4') or without ('I4') 78 + leading zeros. 79 + 80 + The additional 'h', 'n', 'b', and 'l' specifiers are used to specify 81 + host, network, big or little endian order addresses respectively. Where 82 + no specifier is provided the default network/big endian order is used. 83 + 84 + IPv6 addresses: 85 + 86 + %pI6 0001:0002:0003:0004:0005:0006:0007:0008 87 + %pi6 00010002000300040005000600070008 88 + %pI6c 1:2:3:4:5:6:7:8 89 + 90 + For printing IPv6 network-order 16-bit hex addresses. The 'I6' and 'i6' 91 + specifiers result in a printed address with ('I6') or without ('i6') 92 + colon-separators. Leading zeros are always used. 93 + 94 + The additional 'c' specifier can be used with the 'I' specifier to 95 + print a compressed IPv6 address as described by 96 + http://tools.ietf.org/html/rfc5952 97 + 98 + UUID/GUID addresses: 99 + 100 + %pUb 00010203-0405-0607-0809-0a0b0c0d0e0f 101 + %pUB 00010203-0405-0607-0809-0A0B0C0D0E0F 102 + %pUl 03020100-0504-0706-0809-0a0b0c0e0e0f 103 + %pUL 03020100-0504-0706-0809-0A0B0C0E0E0F 104 + 105 + For printing 16-byte UUID/GUIDs addresses. The additional 'l', 'L', 106 + 'b' and 'B' specifiers are used to specify a little endian order in 107 + lower ('l') or upper case ('L') hex characters - and big endian order 108 + in lower ('b') or upper case ('B') hex characters. 109 + 110 + Where no additional specifiers are used the default little endian 111 + order with lower case hex characters will be printed. 112 + 113 + struct va_format: 114 + 115 + %pV 116 + 117 + For printing struct va_format structures. These contain a format string 118 + and va_list as follows: 119 + 120 + struct va_format { 121 + const char *fmt; 122 + va_list *va; 123 + }; 124 + 125 + Do not use this feature without some mechanism to verify the 126 + correctness of the format string and va_list arguments. 13 127 14 128 u64 SHOULD be printed with %llu/%llx, (unsigned long long): 15 129 ··· 146 32 Thank you for your cooperation and attention. 147 33 148 34 149 - By Randy Dunlap <rdunlap@xenotime.net> 35 + By Randy Dunlap <rdunlap@xenotime.net> and 36 + Andrew Murray <amurray@mpc-data.co.uk>
+4 -3
Documentation/scheduler/sched-design-CFS.txt
··· 223 223 group created using the pseudo filesystem. See example steps below to create 224 224 task groups and modify their CPU share using the "cgroups" pseudo filesystem. 225 225 226 - # mkdir /dev/cpuctl 227 - # mount -t cgroup -ocpu none /dev/cpuctl 228 - # cd /dev/cpuctl 226 + # mount -t tmpfs cgroup_root /sys/fs/cgroup 227 + # mkdir /sys/fs/cgroup/cpu 228 + # mount -t cgroup -ocpu none /sys/fs/cgroup/cpu 229 + # cd /sys/fs/cgroup/cpu 229 230 230 231 # mkdir multimedia # create "multimedia" group of tasks 231 232 # mkdir browser # create "browser" group of tasks
+3 -4
Documentation/scheduler/sched-rt-group.txt
··· 129 129 Enabling CONFIG_RT_GROUP_SCHED lets you explicitly allocate real 130 130 CPU bandwidth to task groups. 131 131 132 - This uses the /cgroup virtual file system and 133 - "/cgroup/<cgroup>/cpu.rt_runtime_us" to control the CPU time reserved for each 134 - control group. 132 + This uses the cgroup virtual file system and "<cgroup>/cpu.rt_runtime_us" 133 + to control the CPU time reserved for each control group. 135 134 136 135 For more information on working with control groups, you should read 137 136 Documentation/cgroups/cgroups.txt as well. ··· 149 150 =============== 150 151 151 152 There is work in progress to make the scheduling period for each group 152 - ("/cgroup/<cgroup>/cpu.rt_period_us") configurable as well. 153 + ("<cgroup>/cpu.rt_period_us") configurable as well. 153 154 154 155 The constraint on the period is that a subgroup must have a smaller or 155 156 equal period to its parent. But realistically its not very useful _yet_
+3 -3
Documentation/vm/hwpoison.txt
··· 129 129 of the memcg. 130 130 131 131 Example: 132 - mkdir /cgroup/hwpoison 132 + mkdir /sys/fs/cgroup/mem/hwpoison 133 133 134 134 usemem -m 100 -s 1000 & 135 - echo `jobs -p` > /cgroup/hwpoison/tasks 135 + echo `jobs -p` > /sys/fs/cgroup/mem/hwpoison/tasks 136 136 137 - memcg_ino=$(ls -id /cgroup/hwpoison | cut -f1 -d' ') 137 + memcg_ino=$(ls -id /sys/fs/cgroup/mem/hwpoison | cut -f1 -d' ') 138 138 echo $memcg_ino > /debug/hwpoison/corrupt-filter-memcg 139 139 140 140 page-types -p `pidof init` --hwpoison # shall do nothing
+17 -4
MAINTAINERS
··· 1889 1889 W: http://www.codemonkey.org.uk/projects/cpufreq/ 1890 1890 T: git git://git.kernel.org/pub/scm/linux/kernel/git/davej/cpufreq.git 1891 1891 S: Maintained 1892 - F: arch/x86/kernel/cpu/cpufreq/ 1893 1892 F: drivers/cpufreq/ 1894 1893 F: include/linux/cpufreq.h 1895 1894 ··· 3819 3820 F: drivers/leds/ 3820 3821 F: include/linux/leds.h 3821 3822 3823 + LEGACY EEPROM DRIVER 3824 + M: Jean Delvare <khali@linux-fr.org> 3825 + S: Maintained 3826 + F: Documentation/misc-devices/eeprom 3827 + F: drivers/misc/eeprom/eeprom.c 3828 + 3822 3829 LEGO USB Tower driver 3823 3830 M: Juergen Stuber <starblue@users.sourceforge.net> 3824 3831 L: legousb-devel@lists.sourceforge.net ··· 4150 4145 F: mm/ 4151 4146 4152 4147 MEMORY RESOURCE CONTROLLER 4153 - M: Balbir Singh <balbir@linux.vnet.ibm.com> 4148 + M: Balbir Singh <bsingharora@gmail.com> 4154 4149 M: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp> 4155 4150 M: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> 4156 4151 L: linux-mm@kvack.org ··· 4895 4890 F: arch/*/include/asm/percpu.h 4896 4891 4897 4892 PER-TASK DELAY ACCOUNTING 4898 - M: Balbir Singh <balbir@linux.vnet.ibm.com> 4893 + M: Balbir Singh <bsingharora@gmail.com> 4899 4894 S: Maintained 4900 4895 F: include/linux/delayacct.h 4901 4896 F: kernel/delayacct.c ··· 6103 6098 F: Documentation/target/ 6104 6099 6105 6100 TASKSTATS STATISTICS INTERFACE 6106 - M: Balbir Singh <balbir@linux.vnet.ibm.com> 6101 + M: Balbir Singh <bsingharora@gmail.com> 6107 6102 S: Maintained 6108 6103 F: Documentation/accounting/taskstats* 6109 6104 F: include/linux/taskstats* ··· 6721 6716 S: Maintained 6722 6717 F: Documentation/filesystems/vfat.txt 6723 6718 F: fs/fat/ 6719 + 6720 + VIDEOBUF2 FRAMEWORK 6721 + M: Pawel Osciak <pawel@osciak.com> 6722 + M: Marek Szyprowski <m.szyprowski@samsung.com> 6723 + L: linux-media@vger.kernel.org 6724 + S: Maintained 6725 + F: drivers/media/video/videobuf2-* 6726 + F: include/media/videobuf2-* 6724 6727 6725 6728 VIRTIO CONSOLE DRIVER 6726 6729 M: Amit Shah <amit.shah@redhat.com>
+7 -4
arch/alpha/kernel/osf_sys.c
··· 409 409 return -EFAULT; 410 410 411 411 len = namelen; 412 - if (namelen > 32) 412 + if (len > 32) 413 413 len = 32; 414 414 415 415 down_read(&uts_sem); ··· 594 594 down_read(&uts_sem); 595 595 res = sysinfo_table[offset]; 596 596 len = strlen(res)+1; 597 - if (len > count) 597 + if ((unsigned long)len > (unsigned long)count) 598 598 len = count; 599 599 if (copy_to_user(buf, res, len)) 600 600 err = -EFAULT; ··· 649 649 return 1; 650 650 651 651 case GSI_GET_HWRPB: 652 - if (nbytes < sizeof(*hwrpb)) 652 + if (nbytes > sizeof(*hwrpb)) 653 653 return -EINVAL; 654 654 if (copy_to_user(buffer, hwrpb, nbytes) != 0) 655 655 return -EFAULT; ··· 1008 1008 { 1009 1009 struct rusage r; 1010 1010 long ret, err; 1011 + unsigned int status = 0; 1011 1012 mm_segment_t old_fs; 1012 1013 1013 1014 if (!ur) ··· 1017 1016 old_fs = get_fs(); 1018 1017 1019 1018 set_fs (KERNEL_DS); 1020 - ret = sys_wait4(pid, ustatus, options, (struct rusage __user *) &r); 1019 + ret = sys_wait4(pid, (unsigned int __user *) &status, options, 1020 + (struct rusage __user *) &r); 1021 1021 set_fs (old_fs); 1022 1022 1023 1023 if (!access_ok(VERIFY_WRITE, ur, sizeof(*ur))) 1024 1024 return -EFAULT; 1025 1025 1026 1026 err = 0; 1027 + err |= put_user(status, ustatus); 1027 1028 err |= __put_user(r.ru_utime.tv_sec, &ur->ru_utime.tv_sec); 1028 1029 err |= __put_user(r.ru_utime.tv_usec, &ur->ru_utime.tv_usec); 1029 1030 err |= __put_user(r.ru_stime.tv_sec, &ur->ru_stime.tv_sec);
+3 -3
arch/arm/boot/compressed/head.S
··· 691 691 692 692 .word 0x41069260 @ ARM926EJ-S (v5TEJ) 693 693 .word 0xff0ffff0 694 - b __arm926ejs_mmu_cache_on 695 - b __armv4_mmu_cache_off 696 - b __armv5tej_mmu_cache_flush 694 + W(b) __arm926ejs_mmu_cache_on 695 + W(b) __armv4_mmu_cache_off 696 + W(b) __armv5tej_mmu_cache_flush 697 697 698 698 .word 0x00007000 @ ARM7 IDs 699 699 .word 0x0000f000
+1 -1
arch/arm/configs/davinci_all_defconfig
··· 157 157 CONFIG_LEDS_TRIGGERS=y 158 158 CONFIG_LEDS_TRIGGER_TIMER=m 159 159 CONFIG_LEDS_TRIGGER_HEARTBEAT=m 160 - CONFIG_RTC_CLASS=m 160 + CONFIG_RTC_CLASS=y 161 161 CONFIG_EXT2_FS=y 162 162 CONFIG_EXT3_FS=y 163 163 CONFIG_XFS_FS=m
+1 -1
arch/arm/configs/netx_defconfig
··· 60 60 # CONFIG_VGA_CONSOLE is not set 61 61 CONFIG_FRAMEBUFFER_CONSOLE=y 62 62 CONFIG_LOGO=y 63 - CONFIG_RTC_CLASS=m 63 + CONFIG_RTC_CLASS=y 64 64 CONFIG_INOTIFY=y 65 65 CONFIG_TMPFS=y 66 66 CONFIG_JFFS2_FS=y
+1 -1
arch/arm/configs/viper_defconfig
··· 142 142 CONFIG_USB_FILE_STORAGE=m 143 143 CONFIG_USB_G_SERIAL=m 144 144 CONFIG_USB_G_PRINTER=m 145 - CONFIG_RTC_CLASS=m 145 + CONFIG_RTC_CLASS=y 146 146 CONFIG_RTC_DRV_DS1307=m 147 147 CONFIG_RTC_DRV_SA1100=m 148 148 CONFIG_EXT2_FS=m
+1 -1
arch/arm/configs/xcep_defconfig
··· 73 73 # CONFIG_VGA_CONSOLE is not set 74 74 # CONFIG_HID_SUPPORT is not set 75 75 # CONFIG_USB_SUPPORT is not set 76 - CONFIG_RTC_CLASS=m 76 + CONFIG_RTC_CLASS=y 77 77 CONFIG_RTC_DRV_SA1100=m 78 78 CONFIG_DMADEVICES=y 79 79 # CONFIG_DNOTIFY is not set
+1 -1
arch/arm/configs/zeus_defconfig
··· 158 158 CONFIG_LEDS_TRIGGER_BACKLIGHT=m 159 159 CONFIG_LEDS_TRIGGER_GPIO=m 160 160 CONFIG_LEDS_TRIGGER_DEFAULT_ON=m 161 - CONFIG_RTC_CLASS=m 161 + CONFIG_RTC_CLASS=y 162 162 CONFIG_RTC_DRV_ISL1208=m 163 163 CONFIG_RTC_DRV_PXA=m 164 164 CONFIG_EXT2_FS=y
+3
arch/arm/kernel/devtree.c
··· 76 76 unsigned long dt_root; 77 77 const char *model; 78 78 79 + if (!dt_phys) 80 + return NULL; 81 + 79 82 devtree = phys_to_virt(dt_phys); 80 83 81 84 /* check device tree validity */
+5 -1
arch/arm/kernel/entry-armv.S
··· 435 435 usr_entry 436 436 kuser_cmpxchg_check 437 437 438 + #ifdef CONFIG_IRQSOFF_TRACER 439 + bl trace_hardirqs_off 440 + #endif 441 + 438 442 get_thread_info tsk 439 443 #ifdef CONFIG_PREEMPT 440 444 ldr r8, [tsk, #TI_PREEMPT] @ get preempt count ··· 457 453 #endif 458 454 459 455 mov why, #0 460 - b ret_to_user 456 + b ret_to_user_from_irq 461 457 UNWIND(.fnend ) 462 458 ENDPROC(__irq_usr) 463 459
+2
arch/arm/kernel/entry-common.S
··· 64 64 ENTRY(ret_to_user) 65 65 ret_slow_syscall: 66 66 disable_irq @ disable interrupts 67 + ENTRY(ret_to_user_from_irq) 67 68 ldr r1, [tsk, #TI_FLAGS] 68 69 tst r1, #_TIF_WORK_MASK 69 70 bne work_pending ··· 76 75 arch_ret_to_user r1, lr 77 76 78 77 restore_user_regs fast = 0, offset = 0 78 + ENDPROC(ret_to_user_from_irq) 79 79 ENDPROC(ret_to_user) 80 80 81 81 /*
+2 -2
arch/arm/kernel/traps.c
··· 139 139 fs = get_fs(); 140 140 set_fs(KERNEL_DS); 141 141 142 - for (i = -4; i < 1; i++) { 142 + for (i = -4; i < 1 + !!thumb; i++) { 143 143 unsigned int val, bad; 144 144 145 145 if (thumb) ··· 563 563 if (!pmd_present(*pmd)) 564 564 goto bad_access; 565 565 pte = pte_offset_map_lock(mm, pmd, addr, &ptl); 566 - if (!pte_present(*pte) || !pte_dirty(*pte)) { 566 + if (!pte_present(*pte) || !pte_write(*pte) || !pte_dirty(*pte)) { 567 567 pte_unmap_unlock(pte, ptl); 568 568 goto bad_access; 569 569 }
+1 -1
arch/arm/mach-davinci/devices-da8xx.c
··· 494 494 .resource = da850_mcasp_resources, 495 495 }; 496 496 497 - struct platform_device davinci_pcm_device = { 497 + static struct platform_device davinci_pcm_device = { 498 498 .name = "davinci-pcm-audio", 499 499 .id = -1, 500 500 };
+1 -1
arch/arm/mach-davinci/devices.c
··· 298 298 299 299 /*-------------------------------------------------------------------------*/ 300 300 301 - struct platform_device davinci_pcm_device = { 301 + static struct platform_device davinci_pcm_device = { 302 302 .name = "davinci-pcm-audio", 303 303 .id = -1, 304 304 };
+4 -3
arch/arm/mach-davinci/gpio.c
··· 252 252 static void 253 253 gpio_irq_handler(unsigned irq, struct irq_desc *desc) 254 254 { 255 - struct davinci_gpio_regs __iomem *g = irq2regs(irq); 255 + struct davinci_gpio_regs __iomem *g; 256 256 u32 mask = 0xffff; 257 + 258 + g = (__force struct davinci_gpio_regs __iomem *) irq_desc_get_handler_data(desc); 257 259 258 260 /* we only care about one bank */ 259 261 if (irq & 1) ··· 424 422 425 423 /* set up all irqs in this bank */ 426 424 irq_set_chained_handler(bank_irq, gpio_irq_handler); 427 - irq_set_chip_data(bank_irq, (__force void *)g); 428 - irq_set_handler_data(bank_irq, (void *)irq); 425 + irq_set_handler_data(bank_irq, (__force void *)g); 429 426 430 427 for (i = 0; i < 16 && gpio < ngpio; i++, irq++, gpio++) { 431 428 irq_set_chip(irq, &gpio_irqchip);
+1
arch/arm/mach-footbridge/dc21285-timer.c
··· 103 103 clockevents_calc_mult_shift(ce, mem_fclk_21285, 5); 104 104 ce->max_delta_ns = clockevent_delta2ns(0xffffff, ce); 105 105 ce->min_delta_ns = clockevent_delta2ns(0x000004, ce); 106 + ce->cpumask = cpumask_of(smp_processor_id()); 106 107 107 108 clockevents_register_device(ce); 108 109 }
+3 -2
arch/arm/mach-footbridge/include/mach/debug-macro.S
··· 26 26 #include <asm/hardware/debug-8250.S> 27 27 28 28 #else 29 + #include <mach/hardware.h> 29 30 /* For EBSA285 debugging */ 30 31 .equ dc21285_high, ARMCSR_BASE & 0xff000000 31 32 .equ dc21285_low, ARMCSR_BASE & 0x00ffffff ··· 37 36 .else 38 37 mov \rp, #0 39 38 .endif 40 - orr \rv, \rp, #0x42000000 41 - orr \rp, \rp, #dc21285_high 39 + orr \rv, \rp, #dc21285_high 40 + orr \rp, \rp, #0x42000000 42 41 .endm 43 42 44 43 .macro senduart,rd,rx
+2
arch/arm/mach-mxs/ocotp.c
··· 16 16 #include <linux/err.h> 17 17 #include <linux/mutex.h> 18 18 19 + #include <asm/processor.h> /* for cpu_relax() */ 20 + 19 21 #include <mach/mxs.h> 20 22 21 23 #define OCOTP_WORD_OFFSET 0x20
+1 -1
arch/arm/mach-u300/clock.h
··· 31 31 bool reset; 32 32 __u16 clk_val; 33 33 __s8 usecount; 34 - __u32 res_reg; 34 + void __iomem * res_reg; 35 35 __u16 res_mask; 36 36 37 37 bool hw_ctrld;
+10 -12
arch/arm/mach-u300/include/mach/u300-regs.h
··· 18 18 * the defines are used for setting up the I/O memory mapping. 19 19 */ 20 20 21 + #ifdef __ASSEMBLER__ 22 + #define IOMEM(a) (a) 23 + #else 24 + #define IOMEM(a) (void __iomem *) a 25 + #endif 26 + 21 27 /* NAND Flash CS0 */ 22 28 #define U300_NAND_CS0_PHYS_BASE 0x80000000 23 29 ··· 54 48 #endif 55 49 56 50 /* 57 - * All the following peripherals are specified at their PHYSICAL address, 58 - * so if you need to access them (in the kernel), you MUST use the macros 59 - * defined in <asm/io.h> to map to the IO_ADDRESS_AHB() IO_ADDRESS_FAST() 60 - * etc. 61 - */ 62 - 63 - /* 64 51 * AHB peripherals 65 52 */ 66 53 ··· 62 63 63 64 /* Vectored Interrupt Controller 0, servicing 32 interrupts */ 64 65 #define U300_INTCON0_BASE (U300_AHB_PER_PHYS_BASE+0x1000) 65 - #define U300_INTCON0_VBASE (U300_AHB_PER_VIRT_BASE+0x1000) 66 + #define U300_INTCON0_VBASE IOMEM(U300_AHB_PER_VIRT_BASE+0x1000) 66 67 67 68 /* Vectored Interrupt Controller 1, servicing 32 interrupts */ 68 69 #define U300_INTCON1_BASE (U300_AHB_PER_PHYS_BASE+0x2000) 69 - #define U300_INTCON1_VBASE (U300_AHB_PER_VIRT_BASE+0x2000) 70 + #define U300_INTCON1_VBASE IOMEM(U300_AHB_PER_VIRT_BASE+0x2000) 70 71 71 72 /* Memory Stick Pro (MSPRO) controller */ 72 73 #define U300_MSPRO_BASE (U300_AHB_PER_PHYS_BASE+0x3000) ··· 114 115 115 116 /* SYSCON */ 116 117 #define U300_SYSCON_BASE (U300_SLOW_PER_PHYS_BASE+0x1000) 117 - #define U300_SYSCON_VBASE (U300_SLOW_PER_VIRT_BASE+0x1000) 118 + #define U300_SYSCON_VBASE IOMEM(U300_SLOW_PER_VIRT_BASE+0x1000) 118 119 119 120 /* Watchdog */ 120 121 #define U300_WDOG_BASE (U300_SLOW_PER_PHYS_BASE+0x2000) ··· 124 125 125 126 /* APP side special timer */ 126 127 #define U300_TIMER_APP_BASE (U300_SLOW_PER_PHYS_BASE+0x4000) 127 - #define U300_TIMER_APP_VBASE (U300_SLOW_PER_VIRT_BASE+0x4000) 128 + #define U300_TIMER_APP_VBASE IOMEM(U300_SLOW_PER_VIRT_BASE+0x4000) 128 129 129 130 /* Keypad */ 130 131 #define U300_KEYPAD_BASE (U300_SLOW_PER_PHYS_BASE+0x5000) ··· 179 180 /* 180 181 * Virtual accessor macros for static devices 181 182 */ 182 - 183 183 184 184 #endif
+1 -2
arch/arm/mach-u300/timer.c
··· 411 411 /* Use general purpose timer 2 as clock source */ 412 412 if (clocksource_mmio_init(U300_TIMER_APP_VBASE + U300_TIMER_APP_GPT2CC, 413 413 "GPT2", rate, 300, 32, clocksource_mmio_readl_up)) 414 - printk(KERN_ERR "timer: failed to initialize clock " 415 - "source %s\n", clocksource_u300_1mhz.name); 414 + pr_err("timer: failed to initialize U300 clock source\n"); 416 415 417 416 clockevents_calc_mult_shift(&clockevent_u300_1mhz, 418 417 rate, APPTIMER_MIN_RANGE);
+7 -8
arch/arm/mach-vexpress/v2m.c
··· 46 46 }, 47 47 }; 48 48 49 - static void __init v2m_init_early(void) 50 - { 51 - ct_desc->init_early(); 52 - versatile_sched_clock_init(MMIO_P2V(V2M_SYS_24MHZ), 24000000); 53 - } 54 - 55 49 static void __init v2m_timer_init(void) 56 50 { 57 51 u32 scctrl; ··· 359 365 }, 360 366 }; 361 367 368 + static void __init v2m_init_early(void) 369 + { 370 + ct_desc->init_early(); 371 + clkdev_add_table(v2m_lookups, ARRAY_SIZE(v2m_lookups)); 372 + versatile_sched_clock_init(MMIO_P2V(V2M_SYS_24MHZ), 24000000); 373 + } 374 + 362 375 static void v2m_power_off(void) 363 376 { 364 377 if (v2m_cfg_write(SYS_CFG_SHUTDOWN | SYS_CFG_SITE_MB, 0)) ··· 418 417 static void __init v2m_init(void) 419 418 { 420 419 int i; 421 - 422 - clkdev_add_table(v2m_lookups, ARRAY_SIZE(v2m_lookups)); 423 420 424 421 platform_device_register(&v2m_pcie_i2c_device); 425 422 platform_device_register(&v2m_ddc_i2c_device);
+8 -9
arch/arm/mm/context.c
··· 24 24 25 25 /* 26 26 * We fork()ed a process, and we need a new context for the child 27 - * to run in. 27 + * to run in. We reserve version 0 for initial tasks so we will 28 + * always allocate an ASID. The ASID 0 is reserved for the TTBR 29 + * register changing sequence. 28 30 */ 29 31 void __init_new_context(struct task_struct *tsk, struct mm_struct *mm) 30 32 { ··· 36 34 37 35 static void flush_context(void) 38 36 { 39 - u32 ttb; 40 - /* Copy TTBR1 into TTBR0 */ 41 - asm volatile("mrc p15, 0, %0, c2, c0, 1\n" 42 - "mcr p15, 0, %0, c2, c0, 0" 43 - : "=r" (ttb)); 37 + /* set the reserved ASID before flushing the TLB */ 38 + asm("mcr p15, 0, %0, c13, c0, 1\n" : : "r" (0)); 44 39 isb(); 45 40 local_flush_tlb_all(); 46 41 if (icache_is_vivt_asid_tagged()) { ··· 93 94 return; 94 95 95 96 smp_rmb(); 96 - asid = cpu_last_asid + cpu; 97 + asid = cpu_last_asid + cpu + 1; 97 98 98 99 flush_context(); 99 100 set_mm_context(mm, asid); ··· 143 144 * to start a new version and flush the TLB. 144 145 */ 145 146 if (unlikely((asid & ~ASID_MASK) == 0)) { 146 - asid = cpu_last_asid + smp_processor_id(); 147 + asid = cpu_last_asid + smp_processor_id() + 1; 147 148 flush_context(); 148 149 #ifdef CONFIG_SMP 149 150 smp_wmb(); 150 151 smp_call_function(reset_context, NULL, 1); 151 152 #endif 152 - cpu_last_asid += NR_CPUS - 1; 153 + cpu_last_asid += NR_CPUS; 153 154 } 154 155 155 156 set_mm_context(mm, asid);
+10 -2
arch/arm/mm/init.c
··· 331 331 #endif 332 332 #ifdef CONFIG_BLK_DEV_INITRD 333 333 if (phys_initrd_size && 334 + !memblock_is_region_memory(phys_initrd_start, phys_initrd_size)) { 335 + pr_err("INITRD: 0x%08lx+0x%08lx is not a memory region - disabling initrd\n", 336 + phys_initrd_start, phys_initrd_size); 337 + phys_initrd_start = phys_initrd_size = 0; 338 + } 339 + if (phys_initrd_size && 334 340 memblock_is_region_reserved(phys_initrd_start, phys_initrd_size)) { 335 341 pr_err("INITRD: 0x%08lx+0x%08lx overlaps in-use memory region - disabling initrd\n", 336 342 phys_initrd_start, phys_initrd_size); ··· 641 635 " modules : 0x%08lx - 0x%08lx (%4ld MB)\n" 642 636 " .init : 0x%p" " - 0x%p" " (%4d kB)\n" 643 637 " .text : 0x%p" " - 0x%p" " (%4d kB)\n" 644 - " .data : 0x%p" " - 0x%p" " (%4d kB)\n", 638 + " .data : 0x%p" " - 0x%p" " (%4d kB)\n" 639 + " .bss : 0x%p" " - 0x%p" " (%4d kB)\n", 645 640 646 641 MLK(UL(CONFIG_VECTORS_BASE), UL(CONFIG_VECTORS_BASE) + 647 642 (PAGE_SIZE)), ··· 664 657 665 658 MLK_ROUNDUP(__init_begin, __init_end), 666 659 MLK_ROUNDUP(_text, _etext), 667 - MLK_ROUNDUP(_sdata, _edata)); 660 + MLK_ROUNDUP(_sdata, _edata), 661 + MLK_ROUNDUP(__bss_start, __bss_stop)); 668 662 669 663 #undef MLK 670 664 #undef MLM
+1 -1
arch/arm/mm/proc-arm7tdmi.S
··· 146 146 .long 0 147 147 .long 0 148 148 .long v4_cache_fns 149 - .size __arm7tdmi_proc_info, . - __arm7dmi_proc_info 149 + .size __arm7tdmi_proc_info, . - __arm7tdmi_proc_info 150 150 151 151 .type __triscenda7_proc_info, #object 152 152 __triscenda7_proc_info:
+1 -1
arch/arm/mm/proc-arm9tdmi.S
··· 116 116 .long 0 117 117 .long 0 118 118 .long v4_cache_fns 119 - .size __arm9tdmi_proc_info, . - __arm9dmi_proc_info 119 + .size __arm9tdmi_proc_info, . - __arm9tdmi_proc_info 120 120 121 121 .type __p2001_proc_info, #object 122 122 __p2001_proc_info:
+6 -4
arch/arm/mm/proc-v7.S
··· 108 108 #ifdef CONFIG_ARM_ERRATA_430973 109 109 mcr p15, 0, r2, c7, c5, 6 @ flush BTAC/BTB 110 110 #endif 111 - mrc p15, 0, r2, c2, c0, 1 @ load TTB 1 112 - mcr p15, 0, r2, c2, c0, 0 @ into TTB 0 111 + #ifdef CONFIG_ARM_ERRATA_754322 112 + dsb 113 + #endif 114 + mcr p15, 0, r2, c13, c0, 1 @ set reserved context ID 115 + isb 116 + 1: mcr p15, 0, r0, c2, c0, 0 @ set TTB 0 113 117 isb 114 118 #ifdef CONFIG_ARM_ERRATA_754322 115 119 dsb 116 120 #endif 117 121 mcr p15, 0, r1, c13, c0, 1 @ set context ID 118 - isb 119 - mcr p15, 0, r0, c2, c0, 0 @ set TTB 0 120 122 isb 121 123 #endif 122 124 mov pc, lr
+4 -2
arch/arm/plat-mxc/devices/platform-imx-dma.c
··· 139 139 #endif 140 140 141 141 #ifdef CONFIG_SOC_IMX51 142 - static struct sdma_script_start_addrs addr_imx51_to1 = { 142 + static struct sdma_script_start_addrs addr_imx51 = { 143 143 .ap_2_ap_addr = 642, 144 144 .uart_2_mcu_addr = 817, 145 145 .mcu_2_app_addr = 747, ··· 196 196 197 197 #if defined(CONFIG_SOC_IMX51) 198 198 if (cpu_is_mx51()) { 199 - imx51_imx_sdma_data.pdata.script_addrs = &addr_imx51_to1; 199 + int to_version = mx51_revision() >> 4; 200 + imx51_imx_sdma_data.pdata.to_version = to_version; 201 + imx51_imx_sdma_data.pdata.script_addrs = &addr_imx51; 200 202 ret = imx_add_imx_sdma(&imx51_imx_sdma_data); 201 203 } else 202 204 #endif
+1
arch/avr32/configs/atngw100_defconfig
··· 5 5 CONFIG_LOG_BUF_SHIFT=14 6 6 CONFIG_RELAY=y 7 7 CONFIG_BLK_DEV_INITRD=y 8 + CONFIG_CC_OPTIMIZE_FOR_SIZE=y 8 9 # CONFIG_SYSCTL_SYSCALL is not set 9 10 # CONFIG_BASE_FULL is not set 10 11 # CONFIG_COMPAT_BRK is not set
+1
arch/avr32/configs/atngw100_evklcd100_defconfig
··· 5 5 CONFIG_LOG_BUF_SHIFT=14 6 6 CONFIG_RELAY=y 7 7 CONFIG_BLK_DEV_INITRD=y 8 + CONFIG_CC_OPTIMIZE_FOR_SIZE=y 8 9 # CONFIG_SYSCTL_SYSCALL is not set 9 10 # CONFIG_BASE_FULL is not set 10 11 # CONFIG_COMPAT_BRK is not set
+1
arch/avr32/configs/atngw100_evklcd101_defconfig
··· 5 5 CONFIG_LOG_BUF_SHIFT=14 6 6 CONFIG_RELAY=y 7 7 CONFIG_BLK_DEV_INITRD=y 8 + CONFIG_CC_OPTIMIZE_FOR_SIZE=y 8 9 # CONFIG_SYSCTL_SYSCALL is not set 9 10 # CONFIG_BASE_FULL is not set 10 11 # CONFIG_COMPAT_BRK is not set
+2 -1
arch/avr32/configs/atngw100_mrmt_defconfig
··· 7 7 CONFIG_LOG_BUF_SHIFT=14 8 8 CONFIG_SYSFS_DEPRECATED_V2=y 9 9 CONFIG_BLK_DEV_INITRD=y 10 + CONFIG_CC_OPTIMIZE_FOR_SIZE=y 10 11 # CONFIG_SYSCTL_SYSCALL is not set 11 12 # CONFIG_BASE_FULL is not set 12 13 # CONFIG_SLUB_DEBUG is not set ··· 110 109 CONFIG_LEDS_TRIGGERS=y 111 110 CONFIG_LEDS_TRIGGER_TIMER=y 112 111 CONFIG_LEDS_TRIGGER_HEARTBEAT=y 113 - CONFIG_RTC_CLASS=m 112 + CONFIG_RTC_CLASS=y 114 113 CONFIG_RTC_DRV_S35390A=m 115 114 CONFIG_RTC_DRV_AT32AP700X=m 116 115 CONFIG_DMADEVICES=y
+1
arch/avr32/configs/atngw100mkii_defconfig
··· 5 5 CONFIG_LOG_BUF_SHIFT=14 6 6 CONFIG_RELAY=y 7 7 CONFIG_BLK_DEV_INITRD=y 8 + CONFIG_CC_OPTIMIZE_FOR_SIZE=y 8 9 # CONFIG_SYSCTL_SYSCALL is not set 9 10 # CONFIG_BASE_FULL is not set 10 11 # CONFIG_COMPAT_BRK is not set
+1
arch/avr32/configs/atngw100mkii_evklcd100_defconfig
··· 5 5 CONFIG_LOG_BUF_SHIFT=14 6 6 CONFIG_RELAY=y 7 7 CONFIG_BLK_DEV_INITRD=y 8 + CONFIG_CC_OPTIMIZE_FOR_SIZE=y 8 9 # CONFIG_SYSCTL_SYSCALL is not set 9 10 # CONFIG_BASE_FULL is not set 10 11 # CONFIG_COMPAT_BRK is not set
+1
arch/avr32/configs/atngw100mkii_evklcd101_defconfig
··· 5 5 CONFIG_LOG_BUF_SHIFT=14 6 6 CONFIG_RELAY=y 7 7 CONFIG_BLK_DEV_INITRD=y 8 + CONFIG_CC_OPTIMIZE_FOR_SIZE=y 8 9 # CONFIG_SYSCTL_SYSCALL is not set 9 10 # CONFIG_BASE_FULL is not set 10 11 # CONFIG_COMPAT_BRK is not set
+1
arch/avr32/configs/atstk1002_defconfig
··· 5 5 CONFIG_LOG_BUF_SHIFT=14 6 6 CONFIG_RELAY=y 7 7 CONFIG_BLK_DEV_INITRD=y 8 + CONFIG_CC_OPTIMIZE_FOR_SIZE=y 8 9 # CONFIG_SYSCTL_SYSCALL is not set 9 10 # CONFIG_BASE_FULL is not set 10 11 # CONFIG_COMPAT_BRK is not set
+1
arch/avr32/configs/atstk1003_defconfig
··· 5 5 CONFIG_LOG_BUF_SHIFT=14 6 6 CONFIG_RELAY=y 7 7 CONFIG_BLK_DEV_INITRD=y 8 + CONFIG_CC_OPTIMIZE_FOR_SIZE=y 8 9 # CONFIG_SYSCTL_SYSCALL is not set 9 10 # CONFIG_BASE_FULL is not set 10 11 # CONFIG_COMPAT_BRK is not set
+1
arch/avr32/configs/atstk1004_defconfig
··· 5 5 CONFIG_LOG_BUF_SHIFT=14 6 6 CONFIG_RELAY=y 7 7 CONFIG_BLK_DEV_INITRD=y 8 + CONFIG_CC_OPTIMIZE_FOR_SIZE=y 8 9 # CONFIG_SYSCTL_SYSCALL is not set 9 10 # CONFIG_BASE_FULL is not set 10 11 # CONFIG_COMPAT_BRK is not set
+1
arch/avr32/configs/atstk1006_defconfig
··· 5 5 CONFIG_LOG_BUF_SHIFT=14 6 6 CONFIG_RELAY=y 7 7 CONFIG_BLK_DEV_INITRD=y 8 + CONFIG_CC_OPTIMIZE_FOR_SIZE=y 8 9 # CONFIG_SYSCTL_SYSCALL is not set 9 10 # CONFIG_BASE_FULL is not set 10 11 # CONFIG_COMPAT_BRK is not set
+1
arch/avr32/configs/favr-32_defconfig
··· 6 6 CONFIG_SYSFS_DEPRECATED_V2=y 7 7 CONFIG_RELAY=y 8 8 CONFIG_BLK_DEV_INITRD=y 9 + CONFIG_CC_OPTIMIZE_FOR_SIZE=y 9 10 # CONFIG_SYSCTL_SYSCALL is not set 10 11 # CONFIG_BASE_FULL is not set 11 12 # CONFIG_COMPAT_BRK is not set
+1
arch/avr32/configs/hammerhead_defconfig
··· 7 7 CONFIG_LOG_BUF_SHIFT=14 8 8 CONFIG_SYSFS_DEPRECATED_V2=y 9 9 CONFIG_BLK_DEV_INITRD=y 10 + CONFIG_CC_OPTIMIZE_FOR_SIZE=y 10 11 # CONFIG_SYSCTL_SYSCALL is not set 11 12 # CONFIG_BASE_FULL is not set 12 13 # CONFIG_COMPAT_BRK is not set
+1
arch/avr32/configs/merisc_defconfig
··· 7 7 CONFIG_LOG_BUF_SHIFT=14 8 8 CONFIG_SYSFS_DEPRECATED_V2=y 9 9 CONFIG_BLK_DEV_INITRD=y 10 + CONFIG_CC_OPTIMIZE_FOR_SIZE=y 10 11 # CONFIG_SYSCTL_SYSCALL is not set 11 12 # CONFIG_BASE_FULL is not set 12 13 CONFIG_MODULES=y
+1
arch/avr32/configs/mimc200_defconfig
··· 7 7 CONFIG_LOG_BUF_SHIFT=14 8 8 CONFIG_SYSFS_DEPRECATED_V2=y 9 9 CONFIG_BLK_DEV_INITRD=y 10 + CONFIG_CC_OPTIMIZE_FOR_SIZE=y 10 11 # CONFIG_SYSCTL_SYSCALL is not set 11 12 # CONFIG_BASE_FULL is not set 12 13 # CONFIG_COMPAT_BRK is not set
-1
arch/avr32/include/asm/processor.h
··· 131 131 */ 132 132 #define start_thread(regs, new_pc, new_sp) \ 133 133 do { \ 134 - set_fs(USER_DS); \ 135 134 memset(regs, 0, sizeof(*regs)); \ 136 135 regs->sr = MODE_USER; \ 137 136 regs->pc = new_pc & ~1; \
+2 -1
arch/avr32/mach-at32ap/at32ap700x.c
··· 1043 1043 data->regs = (void __iomem *)pdev->resource[0].start; 1044 1044 } 1045 1045 1046 + pdev->id = line; 1046 1047 pdata = pdev->dev.platform_data; 1047 - pdata->num = portnr; 1048 + pdata->num = line; 1048 1049 at32_usarts[line] = pdev; 1049 1050 } 1050 1051
+12
arch/avr32/mach-at32ap/include/mach/cpu.h
··· 31 31 #define cpu_is_at91sam9263() (0) 32 32 #define cpu_is_at91sam9rl() (0) 33 33 #define cpu_is_at91cap9() (0) 34 + #define cpu_is_at91cap9_revB() (0) 35 + #define cpu_is_at91cap9_revC() (0) 34 36 #define cpu_is_at91sam9g10() (0) 37 + #define cpu_is_at91sam9g20() (0) 35 38 #define cpu_is_at91sam9g45() (0) 36 39 #define cpu_is_at91sam9g45es() (0) 40 + #define cpu_is_at91sam9m10() (0) 41 + #define cpu_is_at91sam9g46() (0) 42 + #define cpu_is_at91sam9m11() (0) 43 + #define cpu_is_at91sam9x5() (0) 44 + #define cpu_is_at91sam9g15() (0) 45 + #define cpu_is_at91sam9g35() (0) 46 + #define cpu_is_at91sam9x35() (0) 47 + #define cpu_is_at91sam9g25() (0) 48 + #define cpu_is_at91sam9x25() (0) 37 49 38 50 #endif /* __ASM_ARCH_CPU_H */
+1 -3
arch/avr32/mach-at32ap/intc.c
··· 167 167 return 0; 168 168 } 169 169 170 - static int intc_resume(void) 170 + static void intc_resume(void) 171 171 { 172 172 int i; 173 173 174 174 for (i = 0; i < 64; i++) 175 175 intc_writel(&intc0, INTPR0 + 4 * i, intc0.saved_ipr[i]); 176 - 177 - return 0; 178 176 } 179 177 #else 180 178 #define intc_suspend NULL
+1 -1
arch/blackfin/configs/CM-BF548_defconfig
··· 112 112 CONFIG_USB_G_PRINTER=m 113 113 CONFIG_MMC=m 114 114 CONFIG_SDH_BFIN=m 115 - CONFIG_RTC_CLASS=m 115 + CONFIG_RTC_CLASS=y 116 116 CONFIG_RTC_DRV_BFIN=m 117 117 CONFIG_EXT2_FS=m 118 118 # CONFIG_DNOTIFY is not set
+45 -7
arch/m68k/Kconfig.nommu
··· 14 14 bool 15 15 default n 16 16 17 + config M68000 18 + bool 19 + help 20 + The Freescale (was Motorola) 68000 CPU is the first generation of 21 + the well known M68K family of processors. The CPU core as well as 22 + being available as a stand alone CPU was also used in many 23 + System-On-Chip devices (eg 68328, 68302, etc). It does not contain 24 + a paging MMU. 25 + 26 + config MCPU32 27 + bool 28 + help 29 + The Freescale (was then Motorola) CPU32 is a CPU core that is 30 + based on the 68020 processor. For the most part it is used in 31 + System-On-Chip parts, and does not contain a paging MMU. 32 + 33 + config COLDFIRE 34 + bool 35 + select GENERIC_GPIO 36 + select ARCH_REQUIRE_GPIOLIB 37 + help 38 + The Freescale ColdFire family of processors is a modern derivitive 39 + of the 68000 processor family. They are mainly targeted at embedded 40 + applications, and are all System-On-Chip (SOC) devices, as opposed 41 + to stand alone CPUs. They implement a subset of the original 68000 42 + processor instruction set. 43 + 17 44 config COLDFIRE_SW_A7 18 45 bool 19 46 default n ··· 63 36 64 37 config M68328 65 38 bool "MC68328" 39 + select M68000 66 40 help 67 41 Motorola 68328 processor support. 68 42 69 43 config M68EZ328 70 44 bool "MC68EZ328" 45 + select M68000 71 46 help 72 47 Motorola 68EX328 processor support. 73 48 74 49 config M68VZ328 75 50 bool "MC68VZ328" 51 + select M68000 76 52 help 77 53 Motorola 68VZ328 processor support. 78 54 79 55 config M68360 80 56 bool "MC68360" 57 + select MCPU32 81 58 help 82 59 Motorola 68360 processor support. 83 60 84 61 config M5206 85 62 bool "MCF5206" 63 + select COLDFIRE 86 64 select COLDFIRE_SW_A7 87 65 select HAVE_MBAR 88 66 help ··· 95 63 96 64 config M5206e 97 65 bool "MCF5206e" 66 + select COLDFIRE 98 67 select COLDFIRE_SW_A7 99 68 select HAVE_MBAR 100 69 help ··· 103 70 104 71 config M520x 105 72 bool "MCF520x" 73 + select COLDFIRE 106 74 select GENERIC_CLOCKEVENTS 107 75 select HAVE_CACHE_SPLIT 108 76 help ··· 111 77 112 78 config M523x 113 79 bool "MCF523x" 80 + select COLDFIRE 114 81 select GENERIC_CLOCKEVENTS 115 82 select HAVE_CACHE_SPLIT 116 83 select HAVE_IPSBAR ··· 120 85 121 86 config M5249 122 87 bool "MCF5249" 88 + select COLDFIRE 123 89 select COLDFIRE_SW_A7 124 90 select HAVE_MBAR 125 91 help ··· 128 92 129 93 config M5271 130 94 bool "MCF5271" 95 + select COLDFIRE 131 96 select HAVE_CACHE_SPLIT 132 97 select HAVE_IPSBAR 133 98 help ··· 136 99 137 100 config M5272 138 101 bool "MCF5272" 102 + select COLDFIRE 139 103 select COLDFIRE_SW_A7 140 104 select HAVE_MBAR 141 105 help ··· 144 106 145 107 config M5275 146 108 bool "MCF5275" 109 + select COLDFIRE 147 110 select HAVE_CACHE_SPLIT 148 111 select HAVE_IPSBAR 149 112 help ··· 152 113 153 114 config M528x 154 115 bool "MCF528x" 116 + select COLDFIRE 155 117 select GENERIC_CLOCKEVENTS 156 118 select HAVE_CACHE_SPLIT 157 119 select HAVE_IPSBAR ··· 161 121 162 122 config M5307 163 123 bool "MCF5307" 124 + select COLDFIRE 164 125 select COLDFIRE_SW_A7 165 126 select HAVE_CACHE_CB 166 127 select HAVE_MBAR ··· 170 129 171 130 config M532x 172 131 bool "MCF532x" 132 + select COLDFIRE 173 133 select HAVE_CACHE_CB 174 134 help 175 135 Freescale (Motorola) ColdFire 532x processor support. 176 136 177 137 config M5407 178 138 bool "MCF5407" 139 + select COLDFIRE 179 140 select COLDFIRE_SW_A7 180 141 select HAVE_CACHE_CB 181 142 select HAVE_MBAR ··· 186 143 187 144 config M547x 188 145 bool "MCF547x" 146 + select COLDFIRE 189 147 select HAVE_CACHE_CB 190 148 select HAVE_MBAR 191 149 help ··· 194 150 195 151 config M548x 196 152 bool "MCF548x" 153 + select COLDFIRE 197 154 select HAVE_CACHE_CB 198 155 select HAVE_MBAR 199 156 help ··· 211 166 config M54xx 212 167 bool 213 168 depends on (M548x || M547x) 214 - default y 215 - 216 - config COLDFIRE 217 - bool 218 - depends on (M5206 || M5206e || M520x || M523x || M5249 || M527x || M5272 || M528x || M5307 || M532x || M5407 || M54xx) 219 - select GENERIC_GPIO 220 - select ARCH_REQUIRE_GPIOLIB 221 169 default y 222 170 223 171 config CLOCK_SET
+1 -2
arch/m68k/kernel/m68k_ksyms.c
··· 14 14 EXPORT_SYMBOL(__lshrdi3); 15 15 EXPORT_SYMBOL(__muldi3); 16 16 17 - #if !defined(__mc68020__) && !defined(__mc68030__) && \ 18 - !defined(__mc68040__) && !defined(__mc68060__) && !defined(__mcpu32__) 17 + #if defined(CONFIG_M68000) || defined(CONFIG_COLDFIRE) 19 18 /* 20 19 * Simpler 68k and ColdFire parts also need a few other gcc functions. 21 20 */
+10 -10
arch/m68k/kernel/vmlinux.lds_no.S
··· 84 84 /* Kernel symbol table: Normal symbols */ 85 85 . = ALIGN(4); 86 86 __start___ksymtab = .; 87 - *(__ksymtab) 87 + *(SORT(___ksymtab+*)) 88 88 __stop___ksymtab = .; 89 89 90 90 /* Kernel symbol table: GPL-only symbols */ 91 91 __start___ksymtab_gpl = .; 92 - *(__ksymtab_gpl) 92 + *(SORT(___ksymtab_gpl+*)) 93 93 __stop___ksymtab_gpl = .; 94 94 95 95 /* Kernel symbol table: Normal unused symbols */ 96 96 __start___ksymtab_unused = .; 97 - *(__ksymtab_unused) 97 + *(SORT(___ksymtab_unused+*)) 98 98 __stop___ksymtab_unused = .; 99 99 100 100 /* Kernel symbol table: GPL-only unused symbols */ 101 101 __start___ksymtab_unused_gpl = .; 102 - *(__ksymtab_unused_gpl) 102 + *(SORT(___ksymtab_unused_gpl+*)) 103 103 __stop___ksymtab_unused_gpl = .; 104 104 105 105 /* Kernel symbol table: GPL-future symbols */ 106 106 __start___ksymtab_gpl_future = .; 107 - *(__ksymtab_gpl_future) 107 + *(SORT(___ksymtab_gpl_future+*)) 108 108 __stop___ksymtab_gpl_future = .; 109 109 110 110 /* Kernel symbol table: Normal symbols */ 111 111 __start___kcrctab = .; 112 - *(__kcrctab) 112 + *(SORT(___kcrctab+*)) 113 113 __stop___kcrctab = .; 114 114 115 115 /* Kernel symbol table: GPL-only symbols */ 116 116 __start___kcrctab_gpl = .; 117 - *(__kcrctab_gpl) 117 + *(SORT(___kcrctab_gpl+*)) 118 118 __stop___kcrctab_gpl = .; 119 119 120 120 /* Kernel symbol table: Normal unused symbols */ 121 121 __start___kcrctab_unused = .; 122 - *(__kcrctab_unused) 122 + *(SORT(___kcrctab_unused+*)) 123 123 __stop___kcrctab_unused = .; 124 124 125 125 /* Kernel symbol table: GPL-only unused symbols */ 126 126 __start___kcrctab_unused_gpl = .; 127 - *(__kcrctab_unused_gpl) 127 + *(SORT(___kcrctab_unused_gpl+*)) 128 128 __stop___kcrctab_unused_gpl = .; 129 129 130 130 /* Kernel symbol table: GPL-future symbols */ 131 131 __start___kcrctab_gpl_future = .; 132 - *(__kcrctab_gpl_future) 132 + *(SORT(___kcrctab_gpl_future+*)) 133 133 __stop___kcrctab_gpl_future = .; 134 134 135 135 /* Kernel symbol table: strings */
+4 -5
arch/m68k/lib/memcpy.c
··· 34 34 if (temp) { 35 35 long *lto = to; 36 36 const long *lfrom = from; 37 - #if defined(__mc68020__) || defined(__mc68030__) || \ 38 - defined(__mc68040__) || defined(__mc68060__) || defined(__mcpu32__) 37 + #if defined(CONFIG_M68000) || defined(CONFIG_COLDFIRE) 38 + for (; temp; temp--) 39 + *lto++ = *lfrom++; 40 + #else 39 41 asm volatile ( 40 42 " movel %2,%3\n" 41 43 " andw #7,%3\n" ··· 58 56 " jpl 4b" 59 57 : "=a" (lfrom), "=a" (lto), "=d" (temp), "=&d" (temp1) 60 58 : "0" (lfrom), "1" (lto), "2" (temp)); 61 - #else 62 - for (; temp; temp--) 63 - *lto++ = *lfrom++; 64 59 #endif 65 60 to = lto; 66 61 from = lfrom;
+4 -5
arch/m68k/lib/memset.c
··· 32 32 temp = count >> 2; 33 33 if (temp) { 34 34 long *ls = s; 35 - #if defined(__mc68020__) || defined(__mc68030__) || \ 36 - defined(__mc68040__) || defined(__mc68060__) || defined(__mcpu32__) 35 + #if defined(CONFIG_M68000) || defined(CONFIG_COLDFIRE) 36 + for (; temp; temp--) 37 + *ls++ = c; 38 + #else 37 39 size_t temp1; 38 40 asm volatile ( 39 41 " movel %1,%2\n" ··· 57 55 " jpl 1b" 58 56 : "=a" (ls), "=d" (temp), "=&d" (temp1) 59 57 : "d" (c), "0" (ls), "1" (temp)); 60 - #else 61 - for (; temp; temp--) 62 - *ls++ = c; 63 58 #endif 64 59 s = ls; 65 60 }
+10 -11
arch/m68k/lib/muldi3.c
··· 19 19 the Free Software Foundation, 59 Temple Place - Suite 330, 20 20 Boston, MA 02111-1307, USA. */ 21 21 22 - #if defined(__mc68020__) || defined(__mc68030__) || \ 23 - defined(__mc68040__) || defined(__mc68060__) || defined(__mcpu32__) 24 - 25 - #define umul_ppmm(w1, w0, u, v) \ 26 - __asm__ ("mulu%.l %3,%1:%0" \ 27 - : "=d" ((USItype)(w0)), \ 28 - "=d" ((USItype)(w1)) \ 29 - : "%0" ((USItype)(u)), \ 30 - "dmi" ((USItype)(v))) 31 - 32 - #else 22 + #if defined(CONFIG_M68000) || defined(CONFIG_COLDFIRE) 33 23 34 24 #define SI_TYPE_SIZE 32 35 25 #define __BITS4 (SI_TYPE_SIZE / 4) ··· 50 60 (w1) = __x3 + __ll_highpart (__x1); \ 51 61 (w0) = __ll_lowpart (__x1) * __ll_B + __ll_lowpart (__x0); \ 52 62 } while (0) 63 + 64 + #else 65 + 66 + #define umul_ppmm(w1, w0, u, v) \ 67 + __asm__ ("mulu%.l %3,%1:%0" \ 68 + : "=d" ((USItype)(w0)), \ 69 + "=d" ((USItype)(w1)) \ 70 + : "%0" ((USItype)(u)), \ 71 + "dmi" ((USItype)(v))) 53 72 54 73 #endif 55 74
+1 -1
arch/mips/configs/mtx1_defconfig
··· 678 678 CONFIG_LEDS_TRIGGER_TIMER=y 679 679 CONFIG_LEDS_TRIGGER_HEARTBEAT=y 680 680 CONFIG_LEDS_TRIGGER_DEFAULT_ON=y 681 - CONFIG_RTC_CLASS=m 681 + CONFIG_RTC_CLASS=y 682 682 CONFIG_RTC_INTF_DEV_UIE_EMUL=y 683 683 CONFIG_RTC_DRV_TEST=m 684 684 CONFIG_RTC_DRV_DS1307=m
+1 -1
arch/powerpc/configs/52xx/pcm030_defconfig
··· 85 85 CONFIG_USB_OHCI_HCD_PPC_OF_BE=y 86 86 # CONFIG_USB_OHCI_HCD_PCI is not set 87 87 CONFIG_USB_STORAGE=m 88 - CONFIG_RTC_CLASS=m 88 + CONFIG_RTC_CLASS=y 89 89 CONFIG_RTC_DRV_PCF8563=m 90 90 CONFIG_EXT2_FS=m 91 91 CONFIG_EXT3_FS=m
+1 -1
arch/powerpc/configs/ps3_defconfig
··· 141 141 # CONFIG_USB_EHCI_HCD_PPC_OF is not set 142 142 CONFIG_USB_OHCI_HCD=m 143 143 CONFIG_USB_STORAGE=m 144 - CONFIG_RTC_CLASS=m 144 + CONFIG_RTC_CLASS=y 145 145 CONFIG_RTC_DRV_PS3=m 146 146 CONFIG_EXT2_FS=m 147 147 CONFIG_EXT3_FS=m
+48
arch/sh/boards/mach-ecovec24/setup.c
··· 20 20 #include <linux/io.h> 21 21 #include <linux/delay.h> 22 22 #include <linux/usb/r8a66597.h> 23 + #include <linux/usb/renesas_usbhs.h> 23 24 #include <linux/i2c.h> 24 25 #include <linux/i2c/tsc2007.h> 25 26 #include <linux/spi/spi.h> ··· 231 230 }, 232 231 .num_resources = ARRAY_SIZE(usb1_common_resources), 233 232 .resource = usb1_common_resources, 233 + }; 234 + 235 + /* 236 + * USBHS 237 + */ 238 + static int usbhs_get_id(struct platform_device *pdev) 239 + { 240 + return gpio_get_value(GPIO_PTB3); 241 + } 242 + 243 + static struct renesas_usbhs_platform_info usbhs_info = { 244 + .platform_callback = { 245 + .get_id = usbhs_get_id, 246 + }, 247 + .driver_param = { 248 + .buswait_bwait = 4, 249 + .detection_delay = 5, 250 + }, 251 + }; 252 + 253 + static struct resource usbhs_resources[] = { 254 + [0] = { 255 + .start = 0xa4d90000, 256 + .end = 0xa4d90124 - 1, 257 + .flags = IORESOURCE_MEM, 258 + }, 259 + [1] = { 260 + .start = 66, 261 + .end = 66, 262 + .flags = IORESOURCE_IRQ, 263 + }, 264 + }; 265 + 266 + static struct platform_device usbhs_device = { 267 + .name = "renesas_usbhs", 268 + .id = 1, 269 + .dev = { 270 + .dma_mask = NULL, /* not use dma */ 271 + .coherent_dma_mask = 0xffffffff, 272 + .platform_data = &usbhs_info, 273 + }, 274 + .num_resources = ARRAY_SIZE(usbhs_resources), 275 + .resource = usbhs_resources, 276 + .archdata = { 277 + .hwblk_id = HWBLK_USB1, 278 + }, 234 279 }; 235 280 236 281 /* LCDC */ ··· 944 897 &sh_eth_device, 945 898 &usb0_host_device, 946 899 &usb1_common_device, 900 + &usbhs_device, 947 901 &lcdc_device, 948 902 &ceu0_device, 949 903 &ceu1_device,
+1 -1
arch/sh/configs/titan_defconfig
··· 227 227 CONFIG_USB_SERIAL_GENERIC=y 228 228 CONFIG_USB_SERIAL_ARK3116=m 229 229 CONFIG_USB_SERIAL_PL2303=m 230 - CONFIG_RTC_CLASS=m 230 + CONFIG_RTC_CLASS=y 231 231 CONFIG_RTC_DRV_SH=m 232 232 CONFIG_EXT2_FS=y 233 233 CONFIG_EXT3_FS=y
+8
arch/sh/include/cpu-sh4/cpu/sh7724.h
··· 298 298 SHDMA_SLAVE_SCIF4_RX, 299 299 SHDMA_SLAVE_SCIF5_TX, 300 300 SHDMA_SLAVE_SCIF5_RX, 301 + SHDMA_SLAVE_USB0D0_TX, 302 + SHDMA_SLAVE_USB0D0_RX, 303 + SHDMA_SLAVE_USB0D1_TX, 304 + SHDMA_SLAVE_USB0D1_RX, 305 + SHDMA_SLAVE_USB1D0_TX, 306 + SHDMA_SLAVE_USB1D0_RX, 307 + SHDMA_SLAVE_USB1D1_TX, 308 + SHDMA_SLAVE_USB1D1_RX, 301 309 SHDMA_SLAVE_SDHI0_TX, 302 310 SHDMA_SLAVE_SDHI0_RX, 303 311 SHDMA_SLAVE_SDHI1_TX,
+40
arch/sh/kernel/cpu/sh4a/setup-sh7724.c
··· 93 93 .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT), 94 94 .mid_rid = 0x36, 95 95 }, { 96 + .slave_id = SHDMA_SLAVE_USB0D0_TX, 97 + .addr = 0xA4D80100, 98 + .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_32BIT), 99 + .mid_rid = 0x73, 100 + }, { 101 + .slave_id = SHDMA_SLAVE_USB0D0_RX, 102 + .addr = 0xA4D80100, 103 + .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_32BIT), 104 + .mid_rid = 0x73, 105 + }, { 106 + .slave_id = SHDMA_SLAVE_USB0D1_TX, 107 + .addr = 0xA4D80120, 108 + .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_32BIT), 109 + .mid_rid = 0x77, 110 + }, { 111 + .slave_id = SHDMA_SLAVE_USB0D1_RX, 112 + .addr = 0xA4D80120, 113 + .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_32BIT), 114 + .mid_rid = 0x77, 115 + }, { 116 + .slave_id = SHDMA_SLAVE_USB1D0_TX, 117 + .addr = 0xA4D90100, 118 + .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_32BIT), 119 + .mid_rid = 0xab, 120 + }, { 121 + .slave_id = SHDMA_SLAVE_USB1D0_RX, 122 + .addr = 0xA4D90100, 123 + .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_32BIT), 124 + .mid_rid = 0xab, 125 + }, { 126 + .slave_id = SHDMA_SLAVE_USB1D1_TX, 127 + .addr = 0xA4D90120, 128 + .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_32BIT), 129 + .mid_rid = 0xaf, 130 + }, { 131 + .slave_id = SHDMA_SLAVE_USB1D1_RX, 132 + .addr = 0xA4D90120, 133 + .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_32BIT), 134 + .mid_rid = 0xaf, 135 + }, { 96 136 .slave_id = SHDMA_SLAVE_SDHI0_TX, 97 137 .addr = 0x04ce0030, 98 138 .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_16BIT),
+4 -4
arch/x86/kernel/apic/x2apic_uv_x.c
··· 632 632 633 633 /* Direct Legacy VGA I/O traffic to designated IOH */ 634 634 int uv_set_vga_state(struct pci_dev *pdev, bool decode, 635 - unsigned int command_bits, bool change_bridge) 635 + unsigned int command_bits, u32 flags) 636 636 { 637 637 int domain, bus, rc; 638 638 639 - PR_DEVEL("devfn %x decode %d cmd %x chg_brdg %d\n", 640 - pdev->devfn, decode, command_bits, change_bridge); 639 + PR_DEVEL("devfn %x decode %d cmd %x flags %d\n", 640 + pdev->devfn, decode, command_bits, flags); 641 641 642 - if (!change_bridge) 642 + if (!(flags & PCI_VGA_STATE_CHANGE_BRIDGE)) 643 643 return 0; 644 644 645 645 if ((command_bits & PCI_COMMAND_IO) == 0)
+2 -2
arch/x86/kernel/process.c
··· 337 337 * Powermanagement idle function, if any.. 338 338 */ 339 339 void (*pm_idle)(void); 340 - #if defined(CONFIG_APM_MODULE) && defined(CONFIG_APM_CPU_IDLE) 340 + #ifdef CONFIG_APM_MODULE 341 341 EXPORT_SYMBOL(pm_idle); 342 342 #endif 343 343 ··· 399 399 cpu_relax(); 400 400 } 401 401 } 402 - #if defined(CONFIG_APM_MODULE) && defined(CONFIG_APM_CPU_IDLE) 402 + #ifdef CONFIG_APM_MODULE 403 403 EXPORT_SYMBOL(default_idle); 404 404 #endif 405 405
+23 -2
drivers/char/hpet.c
··· 163 163 * This has the effect of treating non-periodic like periodic. 164 164 */ 165 165 if ((devp->hd_flags & (HPET_IE | HPET_PERIODIC)) == HPET_IE) { 166 - unsigned long m, t; 166 + unsigned long m, t, mc, base, k; 167 + struct hpet __iomem *hpet = devp->hd_hpet; 168 + struct hpets *hpetp = devp->hd_hpets; 167 169 168 170 t = devp->hd_ireqfreq; 169 171 m = read_counter(&devp->hd_timer->hpet_compare); 170 - write_counter(t + m, &devp->hd_timer->hpet_compare); 172 + mc = read_counter(&hpet->hpet_mc); 173 + /* The time for the next interrupt would logically be t + m, 174 + * however, if we are very unlucky and the interrupt is delayed 175 + * for longer than t then we will completely miss the next 176 + * interrupt if we set t + m and an application will hang. 177 + * Therefore we need to make a more complex computation assuming 178 + * that there exists a k for which the following is true: 179 + * k * t + base < mc + delta 180 + * (k + 1) * t + base > mc + delta 181 + * where t is the interval in hpet ticks for the given freq, 182 + * base is the theoretical start value 0 < base < t, 183 + * mc is the main counter value at the time of the interrupt, 184 + * delta is the time it takes to write the a value to the 185 + * comparator. 186 + * k may then be computed as (mc - base + delta) / t . 187 + */ 188 + base = mc % t; 189 + k = (mc - base + hpetp->hp_delta) / t; 190 + write_counter(t * (k + 1) + base, 191 + &devp->hd_timer->hpet_compare); 171 192 } 172 193 173 194 if (devp->hd_flags & HPET_SHARED_IRQ)
+1
drivers/cpufreq/cpufreq_stats.c
··· 387 387 unregister_hotcpu_notifier(&cpufreq_stat_cpu_notifier); 388 388 for_each_online_cpu(cpu) { 389 389 cpufreq_stats_free_table(cpu); 390 + cpufreq_stats_free_sysfs(cpu); 390 391 } 391 392 } 392 393
+8 -14
drivers/dma/shdma.c
··· 130 130 131 131 static unsigned int calc_xmit_shift(struct sh_dmae_chan *sh_chan, u32 chcr) 132 132 { 133 - struct sh_dmae_device *shdev = container_of(sh_chan->common.device, 134 - struct sh_dmae_device, common); 133 + struct sh_dmae_device *shdev = to_sh_dev(sh_chan); 135 134 struct sh_dmae_pdata *pdata = shdev->pdata; 136 135 int cnt = ((chcr & pdata->ts_low_mask) >> pdata->ts_low_shift) | 137 136 ((chcr & pdata->ts_high_mask) >> pdata->ts_high_shift); ··· 143 144 144 145 static u32 log2size_to_chcr(struct sh_dmae_chan *sh_chan, int l2size) 145 146 { 146 - struct sh_dmae_device *shdev = container_of(sh_chan->common.device, 147 - struct sh_dmae_device, common); 147 + struct sh_dmae_device *shdev = to_sh_dev(sh_chan); 148 148 struct sh_dmae_pdata *pdata = shdev->pdata; 149 149 int i; 150 150 ··· 207 209 208 210 static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val) 209 211 { 210 - struct sh_dmae_device *shdev = container_of(sh_chan->common.device, 211 - struct sh_dmae_device, common); 212 + struct sh_dmae_device *shdev = to_sh_dev(sh_chan); 212 213 struct sh_dmae_pdata *pdata = shdev->pdata; 213 214 const struct sh_dmae_channel *chan_pdata = &pdata->channel[sh_chan->id]; 214 215 u16 __iomem *addr = shdev->dmars; 215 - int shift = chan_pdata->dmars_bit; 216 + unsigned int shift = chan_pdata->dmars_bit; 216 217 217 218 if (dmae_is_busy(sh_chan)) 218 219 return -EBUSY; ··· 293 296 static const struct sh_dmae_slave_config *sh_dmae_find_slave( 294 297 struct sh_dmae_chan *sh_chan, struct sh_dmae_slave *param) 295 298 { 296 - struct dma_device *dma_dev = sh_chan->common.device; 297 - struct sh_dmae_device *shdev = container_of(dma_dev, 298 - struct sh_dmae_device, common); 299 + struct sh_dmae_device *shdev = to_sh_dev(sh_chan); 299 300 struct sh_dmae_pdata *pdata = shdev->pdata; 300 301 int i; 301 302 ··· 766 771 767 772 spin_lock_bh(&sh_chan->desc_lock); 768 773 /* DMA work check */ 769 - if (dmae_is_busy(sh_chan)) { 770 - spin_unlock_bh(&sh_chan->desc_lock); 771 - return; 772 - } 774 + if (dmae_is_busy(sh_chan)) 775 + goto sh_chan_xfer_ld_queue_end; 773 776 774 777 /* Find the first not transferred descriptor */ 775 778 list_for_each_entry(desc, &sh_chan->ld_queue, node) ··· 781 788 break; 782 789 } 783 790 791 + sh_chan_xfer_ld_queue_end: 784 792 spin_unlock_bh(&sh_chan->desc_lock); 785 793 } 786 794
+2
drivers/dma/shdma.h
··· 52 52 #define to_sh_chan(chan) container_of(chan, struct sh_dmae_chan, common) 53 53 #define to_sh_desc(lh) container_of(lh, struct sh_desc, node) 54 54 #define tx_to_sh_desc(tx) container_of(tx, struct sh_desc, async_tx) 55 + #define to_sh_dev(chan) container_of(chan->common.device,\ 56 + struct sh_dmae_device, common) 55 57 56 58 #endif /* __DMA_SHDMA_H */
+9 -8
drivers/gpu/drm/drm_bufs.c
··· 46 46 list_for_each_entry(entry, &dev->maplist, head) { 47 47 /* 48 48 * Because the kernel-userspace ABI is fixed at a 32-bit offset 49 - * while PCI resources may live above that, we ignore the map 50 - * offset for maps of type _DRM_FRAMEBUFFER or _DRM_REGISTERS. 51 - * It is assumed that each driver will have only one resource of 52 - * each type. 49 + * while PCI resources may live above that, we only compare the 50 + * lower 32 bits of the map offset for maps of type 51 + * _DRM_FRAMEBUFFER or _DRM_REGISTERS. 52 + * It is assumed that if a driver have more than one resource 53 + * of each type, the lower 32 bits are different. 53 54 */ 54 55 if (!entry->map || 55 56 map->type != entry->map->type || ··· 60 59 case _DRM_SHM: 61 60 if (map->flags != _DRM_CONTAINS_LOCK) 62 61 break; 62 + return entry; 63 63 case _DRM_REGISTERS: 64 64 case _DRM_FRAME_BUFFER: 65 - return entry; 65 + if ((entry->map->offset & 0xffffffff) == 66 + (map->offset & 0xffffffff)) 67 + return entry; 66 68 default: /* Make gcc happy */ 67 69 ; 68 70 } ··· 186 182 kfree(map); 187 183 return -EINVAL; 188 184 } 189 - #endif 190 - #ifdef __alpha__ 191 - map->offset += dev->hose->mem_space->start; 192 185 #endif 193 186 /* Some drivers preinitialize some maps, without the X Server 194 187 * needing to be aware of it. Therefore, we just return success
+1 -1
drivers/gpu/drm/drm_crtc.c
··· 1113 1113 if (card_res->count_fbs >= fb_count) { 1114 1114 copied = 0; 1115 1115 fb_id = (uint32_t __user *)(unsigned long)card_res->fb_id_ptr; 1116 - list_for_each_entry(fb, &file_priv->fbs, head) { 1116 + list_for_each_entry(fb, &file_priv->fbs, filp_head) { 1117 1117 if (put_user(fb->base.id, fb_id + copied)) { 1118 1118 ret = -EFAULT; 1119 1119 goto out;
+1 -1
drivers/gpu/drm/drm_vm.c
··· 526 526 static resource_size_t drm_core_get_reg_ofs(struct drm_device *dev) 527 527 { 528 528 #ifdef __alpha__ 529 - return dev->hose->dense_mem_base - dev->hose->mem_space->start; 529 + return dev->hose->dense_mem_base; 530 530 #else 531 531 return 0; 532 532 #endif
+4 -2
drivers/gpu/drm/i915/i915_gem.c
··· 465 465 466 466 page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT, 467 467 GFP_HIGHUSER | __GFP_RECLAIMABLE); 468 - if (IS_ERR(page)) 469 - return PTR_ERR(page); 468 + if (IS_ERR(page)) { 469 + ret = PTR_ERR(page); 470 + goto out; 471 + } 470 472 471 473 if (do_bit17_swizzling) { 472 474 slow_shmem_bit17_copy(page,
-19
drivers/gpu/drm/mga/mga_drv.h
··· 195 195 196 196 #define mga_flush_write_combine() DRM_WRITEMEMORYBARRIER() 197 197 198 - #if defined(__linux__) && defined(__alpha__) 199 - #define MGA_BASE(reg) ((unsigned long)(dev_priv->mmio->handle)) 200 - #define MGA_ADDR(reg) (MGA_BASE(reg) + reg) 201 - 202 - #define MGA_DEREF(reg) (*(volatile u32 *)MGA_ADDR(reg)) 203 - #define MGA_DEREF8(reg) (*(volatile u8 *)MGA_ADDR(reg)) 204 - 205 - #define MGA_READ(reg) (_MGA_READ((u32 *)MGA_ADDR(reg))) 206 - #define MGA_READ8(reg) (_MGA_READ((u8 *)MGA_ADDR(reg))) 207 - #define MGA_WRITE(reg, val) do { DRM_WRITEMEMORYBARRIER(); MGA_DEREF(reg) = val; } while (0) 208 - #define MGA_WRITE8(reg, val) do { DRM_WRITEMEMORYBARRIER(); MGA_DEREF8(reg) = val; } while (0) 209 - 210 - static inline u32 _MGA_READ(u32 *addr) 211 - { 212 - DRM_MEMORYBARRIER(); 213 - return *(volatile u32 *)addr; 214 - } 215 - #else 216 198 #define MGA_READ8(reg) DRM_READ8(dev_priv->mmio, (reg)) 217 199 #define MGA_READ(reg) DRM_READ32(dev_priv->mmio, (reg)) 218 200 #define MGA_WRITE8(reg, val) DRM_WRITE8(dev_priv->mmio, (reg), (val)) 219 201 #define MGA_WRITE(reg, val) DRM_WRITE32(dev_priv->mmio, (reg), (val)) 220 - #endif 221 202 222 203 #define DWGREG0 0x1c00 223 204 #define DWGREG0_END 0x1dff
+1 -1
drivers/gpu/drm/radeon/evergreen.c
··· 2944 2944 radeon_fence_process(rdev); 2945 2945 break; 2946 2946 case 233: /* GUI IDLE */ 2947 - DRM_DEBUG("IH: CP EOP\n"); 2947 + DRM_DEBUG("IH: GUI idle\n"); 2948 2948 rdev->pm.gui_idle = true; 2949 2949 wake_up(&rdev->irq.idle_queue); 2950 2950 break;
+7 -1
drivers/gpu/drm/radeon/r100_track.h
··· 63 63 unsigned num_arrays; 64 64 unsigned max_indx; 65 65 unsigned color_channel_mask; 66 - struct r100_cs_track_array arrays[11]; 66 + struct r100_cs_track_array arrays[16]; 67 67 struct r100_cs_track_cb cb[R300_MAX_CB]; 68 68 struct r100_cs_track_cb zb; 69 69 struct r100_cs_track_cb aa; ··· 146 146 ib = p->ib->ptr; 147 147 track = (struct r100_cs_track *)p->track; 148 148 c = radeon_get_ib_value(p, idx++) & 0x1F; 149 + if (c > 16) { 150 + DRM_ERROR("Only 16 vertex buffers are allowed %d\n", 151 + pkt->opcode); 152 + r100_cs_dump_packet(p, pkt); 153 + return -EINVAL; 154 + } 149 155 track->num_arrays = c; 150 156 for (i = 0; i < (c - 1); i+=2, idx+=3) { 151 157 r = r100_cs_packet_next_reloc(p, &reloc);
+1 -1
drivers/gpu/drm/radeon/r600.c
··· 3444 3444 radeon_fence_process(rdev); 3445 3445 break; 3446 3446 case 233: /* GUI IDLE */ 3447 - DRM_DEBUG("IH: CP EOP\n"); 3447 + DRM_DEBUG("IH: GUI idle\n"); 3448 3448 rdev->pm.gui_idle = true; 3449 3449 wake_up(&rdev->irq.idle_queue); 3450 3450 break;
+1
drivers/gpu/drm/radeon/radeon.h
··· 165 165 uint32_t default_sclk; 166 166 uint32_t default_dispclk; 167 167 uint32_t dp_extclk; 168 + uint32_t max_pixel_clock; 168 169 }; 169 170 170 171 /*
+4
drivers/gpu/drm/radeon/radeon_atombios.c
··· 1246 1246 } 1247 1247 *dcpll = *p1pll; 1248 1248 1249 + rdev->clock.max_pixel_clock = le16_to_cpu(firmware_info->info.usMaxPixelClock); 1250 + if (rdev->clock.max_pixel_clock == 0) 1251 + rdev->clock.max_pixel_clock = 40000; 1252 + 1249 1253 return true; 1250 1254 } 1251 1255
+5 -3
drivers/gpu/drm/radeon/radeon_clocks.c
··· 117 117 p1pll->reference_div = RREG32_PLL(RADEON_PPLL_REF_DIV) & 0x3ff; 118 118 if (p1pll->reference_div < 2) 119 119 p1pll->reference_div = 12; 120 - p2pll->reference_div = p1pll->reference_div; 120 + p2pll->reference_div = p1pll->reference_div; 121 121 122 122 /* These aren't in the device-tree */ 123 123 if (rdev->family >= CHIP_R420) { ··· 139 139 p2pll->pll_out_min = 12500; 140 140 p2pll->pll_out_max = 35000; 141 141 } 142 + /* not sure what the max should be in all cases */ 143 + rdev->clock.max_pixel_clock = 35000; 142 144 143 145 spll->reference_freq = mpll->reference_freq = p1pll->reference_freq; 144 146 spll->reference_div = mpll->reference_div = ··· 153 151 else 154 152 rdev->clock.default_sclk = 155 153 radeon_legacy_get_engine_clock(rdev); 156 - 154 + 157 155 val = of_get_property(dp, "ATY,MCLK", NULL); 158 156 if (val && *val) 159 157 rdev->clock.default_mclk = (*val) / 10; ··· 162 160 radeon_legacy_get_memory_clock(rdev); 163 161 164 162 DRM_INFO("Using device-tree clock info\n"); 165 - 163 + 166 164 return true; 167 165 } 168 166 #else
+7 -3
drivers/gpu/drm/radeon/radeon_combios.c
··· 866 866 rdev->clock.default_sclk = sclk; 867 867 rdev->clock.default_mclk = mclk; 868 868 869 + if (RBIOS32(pll_info + 0x16)) 870 + rdev->clock.max_pixel_clock = RBIOS32(pll_info + 0x16); 871 + else 872 + rdev->clock.max_pixel_clock = 35000; /* might need something asic specific */ 873 + 869 874 return true; 870 875 } 871 876 return false; ··· 1553 1548 (rdev->pdev->subsystem_device == 0x4a48)) { 1554 1549 /* Mac X800 */ 1555 1550 rdev->mode_info.connector_table = CT_MAC_X800; 1556 - } else if ((rdev->pdev->device == 0x4150) && 1557 - (rdev->pdev->subsystem_vendor == 0x1002) && 1558 - (rdev->pdev->subsystem_device == 0x4150)) { 1551 + } else if (of_machine_is_compatible("PowerMac7,2") || 1552 + of_machine_is_compatible("PowerMac7,3")) { 1559 1553 /* Mac G5 9600 */ 1560 1554 rdev->mode_info.connector_table = CT_MAC_G5_9600; 1561 1555 } else
+12 -1
drivers/gpu/drm/radeon/radeon_connectors.c
··· 626 626 static int radeon_vga_mode_valid(struct drm_connector *connector, 627 627 struct drm_display_mode *mode) 628 628 { 629 + struct drm_device *dev = connector->dev; 630 + struct radeon_device *rdev = dev->dev_private; 631 + 629 632 /* XXX check mode bandwidth */ 630 - /* XXX verify against max DAC output frequency */ 633 + 634 + if ((mode->clock / 10) > rdev->clock.max_pixel_clock) 635 + return MODE_CLOCK_HIGH; 636 + 631 637 return MODE_OK; 632 638 } 633 639 ··· 1021 1015 } else 1022 1016 return MODE_CLOCK_HIGH; 1023 1017 } 1018 + 1019 + /* check against the max pixel clock */ 1020 + if ((mode->clock / 10) > rdev->clock.max_pixel_clock) 1021 + return MODE_CLOCK_HIGH; 1022 + 1024 1023 return MODE_OK; 1025 1024 } 1026 1025
-3
drivers/gpu/drm/savage/savage_bci.c
··· 647 647 ret = drm_addmap(dev, aperture_base, SAVAGE_APERTURE_SIZE, 648 648 _DRM_FRAME_BUFFER, _DRM_WRITE_COMBINING, 649 649 &dev_priv->aperture); 650 - if (ret) 651 - return ret; 652 - 653 650 return ret; 654 651 } 655 652
+10 -10
drivers/leds/Kconfig
··· 1 + config LEDS_GPIO_REGISTER 2 + bool 3 + help 4 + This option provides the function gpio_led_register_device. 5 + As this function is used by arch code it must not be compiled as a 6 + module. 7 + 1 8 menuconfig NEW_LEDS 2 9 bool "LED Support" 3 10 help ··· 14 7 This is not related to standard keyboard LEDs which are controlled 15 8 via the input system. 16 9 10 + if NEW_LEDS 11 + 17 12 config LEDS_CLASS 18 13 bool "LED Class Support" 19 - depends on NEW_LEDS 20 14 help 21 15 This option enables the led sysfs class in /sys/class/leds. You'll 22 16 need this to do anything useful with LEDs. If unsure, say N. 23 - 24 - config LEDS_GPIO_REGISTER 25 - bool 26 - help 27 - This option provides the function gpio_led_register_device. 28 - As this function is used by arch code it must not be compiled as a 29 - module. 30 - 31 - if NEW_LEDS 32 17 33 18 comment "LED drivers" 34 19 ··· 390 391 391 392 config LEDS_ASIC3 392 393 bool "LED support for the HTC ASIC3" 394 + depends on LEDS_CLASS 393 395 depends on MFD_ASIC3 394 396 default y 395 397 help
+92 -12
drivers/md/bitmap.c
··· 534 534 kunmap_atomic(sb, KM_USER0); 535 535 } 536 536 537 + /* 538 + * bitmap_new_disk_sb 539 + * @bitmap 540 + * 541 + * This function is somewhat the reverse of bitmap_read_sb. bitmap_read_sb 542 + * reads and verifies the on-disk bitmap superblock and populates bitmap_info. 543 + * This function verifies 'bitmap_info' and populates the on-disk bitmap 544 + * structure, which is to be written to disk. 545 + * 546 + * Returns: 0 on success, -Exxx on error 547 + */ 548 + static int bitmap_new_disk_sb(struct bitmap *bitmap) 549 + { 550 + bitmap_super_t *sb; 551 + unsigned long chunksize, daemon_sleep, write_behind; 552 + int err = -EINVAL; 553 + 554 + bitmap->sb_page = alloc_page(GFP_KERNEL); 555 + if (IS_ERR(bitmap->sb_page)) { 556 + err = PTR_ERR(bitmap->sb_page); 557 + bitmap->sb_page = NULL; 558 + return err; 559 + } 560 + bitmap->sb_page->index = 0; 561 + 562 + sb = kmap_atomic(bitmap->sb_page, KM_USER0); 563 + 564 + sb->magic = cpu_to_le32(BITMAP_MAGIC); 565 + sb->version = cpu_to_le32(BITMAP_MAJOR_HI); 566 + 567 + chunksize = bitmap->mddev->bitmap_info.chunksize; 568 + BUG_ON(!chunksize); 569 + if (!is_power_of_2(chunksize)) { 570 + kunmap_atomic(sb, KM_USER0); 571 + printk(KERN_ERR "bitmap chunksize not a power of 2\n"); 572 + return -EINVAL; 573 + } 574 + sb->chunksize = cpu_to_le32(chunksize); 575 + 576 + daemon_sleep = bitmap->mddev->bitmap_info.daemon_sleep; 577 + if (!daemon_sleep || 578 + (daemon_sleep < 1) || (daemon_sleep > MAX_SCHEDULE_TIMEOUT)) { 579 + printk(KERN_INFO "Choosing daemon_sleep default (5 sec)\n"); 580 + daemon_sleep = 5 * HZ; 581 + } 582 + sb->daemon_sleep = cpu_to_le32(daemon_sleep); 583 + bitmap->mddev->bitmap_info.daemon_sleep = daemon_sleep; 584 + 585 + /* 586 + * FIXME: write_behind for RAID1. If not specified, what 587 + * is a good choice? We choose COUNTER_MAX / 2 arbitrarily. 588 + */ 589 + write_behind = bitmap->mddev->bitmap_info.max_write_behind; 590 + if (write_behind > COUNTER_MAX) 591 + write_behind = COUNTER_MAX / 2; 592 + sb->write_behind = cpu_to_le32(write_behind); 593 + bitmap->mddev->bitmap_info.max_write_behind = write_behind; 594 + 595 + /* keep the array size field of the bitmap superblock up to date */ 596 + sb->sync_size = cpu_to_le64(bitmap->mddev->resync_max_sectors); 597 + 598 + memcpy(sb->uuid, bitmap->mddev->uuid, 16); 599 + 600 + bitmap->flags |= BITMAP_STALE; 601 + sb->state |= cpu_to_le32(BITMAP_STALE); 602 + bitmap->events_cleared = bitmap->mddev->events; 603 + sb->events_cleared = cpu_to_le64(bitmap->mddev->events); 604 + 605 + bitmap->flags |= BITMAP_HOSTENDIAN; 606 + sb->version = cpu_to_le32(BITMAP_MAJOR_HOSTENDIAN); 607 + 608 + kunmap_atomic(sb, KM_USER0); 609 + 610 + return 0; 611 + } 612 + 537 613 /* read the superblock from the bitmap file and initialize some bitmap fields */ 538 614 static int bitmap_read_sb(struct bitmap *bitmap) 539 615 { ··· 651 575 reason = "unrecognized superblock version"; 652 576 else if (chunksize < 512) 653 577 reason = "bitmap chunksize too small"; 654 - else if ((1 << ffz(~chunksize)) != chunksize) 578 + else if (!is_power_of_2(chunksize)) 655 579 reason = "bitmap chunksize not a power of 2"; 656 580 else if (daemon_sleep < 1 || daemon_sleep > MAX_SCHEDULE_TIMEOUT) 657 581 reason = "daemon sleep period out of range"; ··· 1152 1076 } 1153 1077 1154 1078 printk(KERN_INFO "%s: bitmap initialized from disk: " 1155 - "read %lu/%lu pages, set %lu bits\n", 1156 - bmname(bitmap), bitmap->file_pages, num_pages, bit_cnt); 1079 + "read %lu/%lu pages, set %lu of %lu bits\n", 1080 + bmname(bitmap), bitmap->file_pages, num_pages, bit_cnt, chunks); 1157 1081 1158 1082 return 0; 1159 1083 ··· 1408 1332 return 0; 1409 1333 } 1410 1334 1411 - if (unlikely((*bmc & COUNTER_MAX) == COUNTER_MAX)) { 1335 + if (unlikely(COUNTER(*bmc) == COUNTER_MAX)) { 1412 1336 DEFINE_WAIT(__wait); 1413 1337 /* note that it is safe to do the prepare_to_wait 1414 1338 * after the test as long as we do it before dropping ··· 1480 1404 sysfs_notify_dirent_safe(bitmap->sysfs_can_clear); 1481 1405 } 1482 1406 1483 - if (!success && ! (*bmc & NEEDED_MASK)) 1407 + if (!success && !NEEDED(*bmc)) 1484 1408 *bmc |= NEEDED_MASK; 1485 1409 1486 - if ((*bmc & COUNTER_MAX) == COUNTER_MAX) 1410 + if (COUNTER(*bmc) == COUNTER_MAX) 1487 1411 wake_up(&bitmap->overflow_wait); 1488 1412 1489 1413 (*bmc)--; ··· 1804 1728 vfs_fsync(file, 1); 1805 1729 } 1806 1730 /* read superblock from bitmap file (this sets mddev->bitmap_info.chunksize) */ 1807 - if (!mddev->bitmap_info.external) 1808 - err = bitmap_read_sb(bitmap); 1809 - else { 1731 + if (!mddev->bitmap_info.external) { 1732 + /* 1733 + * If 'MD_ARRAY_FIRST_USE' is set, then device-mapper is 1734 + * instructing us to create a new on-disk bitmap instance. 1735 + */ 1736 + if (test_and_clear_bit(MD_ARRAY_FIRST_USE, &mddev->flags)) 1737 + err = bitmap_new_disk_sb(bitmap); 1738 + else 1739 + err = bitmap_read_sb(bitmap); 1740 + } else { 1810 1741 err = 0; 1811 1742 if (mddev->bitmap_info.chunksize == 0 || 1812 1743 mddev->bitmap_info.daemon_sleep == 0) ··· 1837 1754 bitmap->chunks = chunks; 1838 1755 bitmap->pages = pages; 1839 1756 bitmap->missing_pages = pages; 1840 - bitmap->counter_bits = COUNTER_BITS; 1841 - 1842 - bitmap->syncchunk = ~0UL; 1843 1757 1844 1758 #ifdef INJECT_FATAL_FAULT_1 1845 1759 bitmap->bp = NULL;
-10
drivers/md/bitmap.h
··· 85 85 typedef __u16 bitmap_counter_t; 86 86 #define COUNTER_BITS 16 87 87 #define COUNTER_BIT_SHIFT 4 88 - #define COUNTER_BYTE_RATIO (COUNTER_BITS / 8) 89 88 #define COUNTER_BYTE_SHIFT (COUNTER_BIT_SHIFT - 3) 90 89 91 90 #define NEEDED_MASK ((bitmap_counter_t) (1 << (COUNTER_BITS - 1))) ··· 195 196 196 197 mddev_t *mddev; /* the md device that the bitmap is for */ 197 198 198 - int counter_bits; /* how many bits per block counter */ 199 - 200 199 /* bitmap chunksize -- how much data does each bit represent? */ 201 200 unsigned long chunkshift; /* chunksize = 2^chunkshift (for bitops) */ 202 201 unsigned long chunks; /* total number of data chunks for the array */ 203 - 204 - /* We hold a count on the chunk currently being synced, and drop 205 - * it when the last block is started. If the resync is aborted 206 - * midway, we need to be able to drop that count, so we remember 207 - * the counted chunk.. 208 - */ 209 - unsigned long syncchunk; 210 202 211 203 __u64 events_cleared; 212 204 int need_sync;
+30 -11
drivers/md/md.c
··· 351 351 mddev->suspended = 0; 352 352 wake_up(&mddev->sb_wait); 353 353 mddev->pers->quiesce(mddev, 0); 354 + 355 + md_wakeup_thread(mddev->thread); 356 + md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */ 354 357 } 355 358 EXPORT_SYMBOL_GPL(mddev_resume); 356 359 ··· 1753 1750 }, 1754 1751 }; 1755 1752 1753 + static void sync_super(mddev_t *mddev, mdk_rdev_t *rdev) 1754 + { 1755 + if (mddev->sync_super) { 1756 + mddev->sync_super(mddev, rdev); 1757 + return; 1758 + } 1759 + 1760 + BUG_ON(mddev->major_version >= ARRAY_SIZE(super_types)); 1761 + 1762 + super_types[mddev->major_version].sync_super(mddev, rdev); 1763 + } 1764 + 1756 1765 static int match_mddev_units(mddev_t *mddev1, mddev_t *mddev2) 1757 1766 { 1758 1767 mdk_rdev_t *rdev, *rdev2; ··· 1796 1781 1797 1782 if (list_empty(&mddev->disks)) 1798 1783 return 0; /* nothing to do */ 1799 - if (blk_get_integrity(mddev->gendisk)) 1800 - return 0; /* already registered */ 1784 + if (!mddev->gendisk || blk_get_integrity(mddev->gendisk)) 1785 + return 0; /* shouldn't register, or already is */ 1801 1786 list_for_each_entry(rdev, &mddev->disks, same_set) { 1802 1787 /* skip spares and non-functional disks */ 1803 1788 if (test_bit(Faulty, &rdev->flags)) ··· 2183 2168 /* Don't update this superblock */ 2184 2169 rdev->sb_loaded = 2; 2185 2170 } else { 2186 - super_types[mddev->major_version]. 2187 - sync_super(mddev, rdev); 2171 + sync_super(mddev, rdev); 2188 2172 rdev->sb_loaded = 1; 2189 2173 } 2190 2174 } ··· 2476 2462 if (rdev->raid_disk == -1) 2477 2463 return -EEXIST; 2478 2464 /* personality does all needed checks */ 2479 - if (rdev->mddev->pers->hot_add_disk == NULL) 2465 + if (rdev->mddev->pers->hot_remove_disk == NULL) 2480 2466 return -EINVAL; 2481 2467 err = rdev->mddev->pers-> 2482 2468 hot_remove_disk(rdev->mddev, rdev->raid_disk); ··· 4633 4619 if (mddev->flags) 4634 4620 md_update_sb(mddev, 0); 4635 4621 4636 - md_wakeup_thread(mddev->thread); 4637 - md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */ 4638 - 4639 4622 md_new_event(mddev); 4640 4623 sysfs_notify_dirent_safe(mddev->sysfs_state); 4641 4624 sysfs_notify_dirent_safe(mddev->sysfs_action); ··· 4653 4642 bitmap_destroy(mddev); 4654 4643 goto out; 4655 4644 } 4645 + 4646 + md_wakeup_thread(mddev->thread); 4647 + md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */ 4648 + 4656 4649 set_capacity(mddev->gendisk, mddev->array_sectors); 4657 4650 revalidate_disk(mddev->gendisk); 4658 4651 mddev->changed = 1; ··· 5274 5259 if (mddev->degraded) 5275 5260 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); 5276 5261 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 5262 + if (!err) 5263 + md_new_event(mddev); 5277 5264 md_wakeup_thread(mddev->thread); 5278 5265 return err; 5279 5266 } ··· 6883 6866 * Tune reconstruction: 6884 6867 */ 6885 6868 window = 32*(PAGE_SIZE/512); 6886 - printk(KERN_INFO "md: using %dk window, over a total of %llu blocks.\n", 6887 - window/2,(unsigned long long) max_sectors/2); 6869 + printk(KERN_INFO "md: using %dk window, over a total of %lluk.\n", 6870 + window/2, (unsigned long long)max_sectors/2); 6888 6871 6889 6872 atomic_set(&mddev->recovery_active, 0); 6890 6873 last_check = 0; ··· 7062 7045 } 7063 7046 EXPORT_SYMBOL_GPL(md_do_sync); 7064 7047 7065 - 7066 7048 static int remove_and_add_spares(mddev_t *mddev) 7067 7049 { 7068 7050 mdk_rdev_t *rdev; ··· 7173 7157 */ 7174 7158 void md_check_recovery(mddev_t *mddev) 7175 7159 { 7160 + if (mddev->suspended) 7161 + return; 7162 + 7176 7163 if (mddev->bitmap) 7177 7164 bitmap_daemon_work(mddev); 7178 7165
+2
drivers/md/md.h
··· 124 124 #define MD_CHANGE_DEVS 0 /* Some device status has changed */ 125 125 #define MD_CHANGE_CLEAN 1 /* transition to or from 'clean' */ 126 126 #define MD_CHANGE_PENDING 2 /* switch from 'clean' to 'active' in progress */ 127 + #define MD_ARRAY_FIRST_USE 3 /* First use of array, needs initialization */ 127 128 128 129 int suspended; 129 130 atomic_t active_io; ··· 331 330 atomic_t flush_pending; 332 331 struct work_struct flush_work; 333 332 struct work_struct event_work; /* used by dm to report failure event */ 333 + void (*sync_super)(mddev_t *mddev, mdk_rdev_t *rdev); 334 334 }; 335 335 336 336
+17 -7
drivers/md/raid1.c
··· 497 497 return best_disk; 498 498 } 499 499 500 - static int raid1_congested(void *data, int bits) 500 + int md_raid1_congested(mddev_t *mddev, int bits) 501 501 { 502 - mddev_t *mddev = data; 503 502 conf_t *conf = mddev->private; 504 503 int i, ret = 0; 505 - 506 - if (mddev_congested(mddev, bits)) 507 - return 1; 508 504 509 505 rcu_read_lock(); 510 506 for (i = 0; i < mddev->raid_disks; i++) { 511 507 mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev); 512 508 if (rdev && !test_bit(Faulty, &rdev->flags)) { 513 509 struct request_queue *q = bdev_get_queue(rdev->bdev); 510 + 511 + BUG_ON(!q); 514 512 515 513 /* Note the '|| 1' - when read_balance prefers 516 514 * non-congested targets, it can be removed ··· 522 524 rcu_read_unlock(); 523 525 return ret; 524 526 } 527 + EXPORT_SYMBOL_GPL(md_raid1_congested); 525 528 529 + static int raid1_congested(void *data, int bits) 530 + { 531 + mddev_t *mddev = data; 532 + 533 + return mddev_congested(mddev, bits) || 534 + md_raid1_congested(mddev, bits); 535 + } 526 536 527 537 static void flush_pending_writes(conf_t *conf) 528 538 { ··· 1978 1972 return PTR_ERR(conf); 1979 1973 1980 1974 list_for_each_entry(rdev, &mddev->disks, same_set) { 1975 + if (!mddev->gendisk) 1976 + continue; 1981 1977 disk_stack_limits(mddev->gendisk, rdev->bdev, 1982 1978 rdev->data_offset << 9); 1983 1979 /* as we don't honour merge_bvec_fn, we must never risk ··· 2021 2013 2022 2014 md_set_array_sectors(mddev, raid1_size(mddev, 0, 0)); 2023 2015 2024 - mddev->queue->backing_dev_info.congested_fn = raid1_congested; 2025 - mddev->queue->backing_dev_info.congested_data = mddev; 2016 + if (mddev->queue) { 2017 + mddev->queue->backing_dev_info.congested_fn = raid1_congested; 2018 + mddev->queue->backing_dev_info.congested_data = mddev; 2019 + } 2026 2020 return md_integrity_register(mddev); 2027 2021 } 2028 2022
+2
drivers/md/raid1.h
··· 126 126 */ 127 127 #define R1BIO_Returned 6 128 128 129 + extern int md_raid1_congested(mddev_t *mddev, int bits); 130 + 129 131 #endif
+8 -8
drivers/md/raid5.c
··· 129 129 130 130 static inline void raid5_set_bi_hw_segments(struct bio *bio, unsigned int cnt) 131 131 { 132 - bio->bi_phys_segments = raid5_bi_phys_segments(bio) || (cnt << 16); 132 + bio->bi_phys_segments = raid5_bi_phys_segments(bio) | (cnt << 16); 133 133 } 134 134 135 135 /* Find first data disk in a raid6 stripe */ ··· 514 514 bi = &sh->dev[i].req; 515 515 516 516 bi->bi_rw = rw; 517 - if (rw == WRITE) 517 + if (rw & WRITE) 518 518 bi->bi_end_io = raid5_end_write_request; 519 519 else 520 520 bi->bi_end_io = raid5_end_read_request; ··· 548 548 bi->bi_io_vec[0].bv_offset = 0; 549 549 bi->bi_size = STRIPE_SIZE; 550 550 bi->bi_next = NULL; 551 - if (rw == WRITE && 551 + if ((rw & WRITE) && 552 552 test_bit(R5_ReWrite, &sh->dev[i].flags)) 553 553 atomic_add(STRIPE_SECTORS, 554 554 &rdev->corrected_errors); 555 555 generic_make_request(bi); 556 556 } else { 557 - if (rw == WRITE) 557 + if (rw & WRITE) 558 558 set_bit(STRIPE_DEGRADED, &sh->state); 559 559 pr_debug("skip op %ld on disc %d for sector %llu\n", 560 560 bi->bi_rw, i, (unsigned long long)sh->sector); ··· 585 585 init_async_submit(&submit, flags, tx, NULL, NULL, NULL); 586 586 587 587 bio_for_each_segment(bvl, bio, i) { 588 - int len = bio_iovec_idx(bio, i)->bv_len; 588 + int len = bvl->bv_len; 589 589 int clen; 590 590 int b_offset = 0; 591 591 ··· 601 601 clen = len; 602 602 603 603 if (clen > 0) { 604 - b_offset += bio_iovec_idx(bio, i)->bv_offset; 605 - bio_page = bio_iovec_idx(bio, i)->bv_page; 604 + b_offset += bvl->bv_offset; 605 + bio_page = bvl->bv_page; 606 606 if (frombio) 607 607 tx = async_memcpy(page, bio_page, page_offset, 608 608 b_offset, clen, &submit); ··· 4858 4858 printk(KERN_INFO "md/raid:%s: device %s operational as raid" 4859 4859 " disk %d\n", 4860 4860 mdname(mddev), bdevname(rdev->bdev, b), raid_disk); 4861 - } else 4861 + } else if (rdev->saved_raid_disk != raid_disk) 4862 4862 /* Cannot rely on bitmap to complete recovery */ 4863 4863 conf->fullsync = 1; 4864 4864 }
+2
drivers/misc/apds990x.c
··· 609 609 return ret; 610 610 } 611 611 612 + #if defined(CONFIG_PM) || defined(CONFIG_PM_RUNTIME) 612 613 static int apds990x_chip_on(struct apds990x_chip *chip) 613 614 { 614 615 int err = regulator_bulk_enable(ARRAY_SIZE(chip->regs), ··· 625 624 apds990x_mode_on(chip); 626 625 return 0; 627 626 } 627 + #endif 628 628 629 629 static int apds990x_chip_off(struct apds990x_chip *chip) 630 630 {
+1 -1
drivers/misc/cs5535-mfgpt.c
··· 174 174 timer_nr = t < max ? (int) t : -1; 175 175 } else { 176 176 /* check if the requested timer's available */ 177 - if (test_bit(timer_nr, mfgpt->avail)) 177 + if (!test_bit(timer_nr, mfgpt->avail)) 178 178 timer_nr = -1; 179 179 } 180 180
+1 -1
drivers/misc/spear13xx_pcie_gadget.c
··· 845 845 err_iounmap_app: 846 846 iounmap(config->va_app_base); 847 847 err_kzalloc: 848 - kfree(config); 848 + kfree(target); 849 849 err_rel_res: 850 850 release_mem_region(res1->start, resource_size(res1)); 851 851 err_rel_res0:
+10 -2
drivers/mmc/host/mmci.c
··· 1144 1144 else if (ret != -ENOSYS) 1145 1145 goto err_gpio_cd; 1146 1146 1147 + /* 1148 + * A gpio pin that will detect cards when inserted and removed 1149 + * will most likely want to trigger on the edges if it is 1150 + * 0 when ejected and 1 when inserted (or mutatis mutandis 1151 + * for the inverted case) so we request triggers on both 1152 + * edges. 1153 + */ 1147 1154 ret = request_any_context_irq(gpio_to_irq(plat->gpio_cd), 1148 - mmci_cd_irq, 0, 1149 - DRIVER_NAME " (cd)", host); 1155 + mmci_cd_irq, 1156 + IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING, 1157 + DRIVER_NAME " (cd)", host); 1150 1158 if (ret >= 0) 1151 1159 host->gpio_cd_irq = gpio_to_irq(plat->gpio_cd); 1152 1160 }
+2 -2
drivers/pci/pci.c
··· 3271 3271 } 3272 3272 3273 3273 static int pci_set_vga_state_arch(struct pci_dev *dev, bool decode, 3274 - unsigned int command_bits, bool change_bridge) 3274 + unsigned int command_bits, u32 flags) 3275 3275 { 3276 3276 if (arch_set_vga_state) 3277 3277 return arch_set_vga_state(dev, decode, command_bits, 3278 - change_bridge); 3278 + flags); 3279 3279 return 0; 3280 3280 } 3281 3281
+3 -1
drivers/tty/serial/pch_uart.c
··· 1397 1397 int fifosize, base_baud; 1398 1398 int port_type; 1399 1399 struct pch_uart_driver_data *board; 1400 + const char *board_name; 1400 1401 1401 1402 board = &drv_dat[id->driver_data]; 1402 1403 port_type = board->port_type; ··· 1413 1412 base_baud = 1843200; /* 1.8432MHz */ 1414 1413 1415 1414 /* quirk for CM-iTC board */ 1416 - if (strstr(dmi_get_system_info(DMI_BOARD_NAME), "CM-iTC")) 1415 + board_name = dmi_get_system_info(DMI_BOARD_NAME); 1416 + if (board_name && strstr(board_name, "CM-iTC")) 1417 1417 base_baud = 192000000; /* 192.0MHz */ 1418 1418 1419 1419 switch (port_type) {
+12
drivers/video/backlight/Kconfig
··· 302 302 To compile this driver as a module, choose M here: the module will 303 303 be called adp8860_bl. 304 304 305 + config BACKLIGHT_ADP8870 306 + tristate "Backlight Driver for ADP8870 using WLED" 307 + depends on BACKLIGHT_CLASS_DEVICE && I2C 308 + select NEW_LEDS 309 + select LEDS_CLASS 310 + help 311 + If you have a LCD backlight connected to the ADP8870, 312 + say Y here to enable this driver. 313 + 314 + To compile this driver as a module, choose M here: the module will 315 + be called adp8870_bl. 316 + 305 317 config BACKLIGHT_88PM860X 306 318 tristate "Backlight Driver for 88PM8606 using WLED" 307 319 depends on MFD_88PM860X
+1
drivers/video/backlight/Makefile
··· 34 34 obj-$(CONFIG_BACKLIGHT_ADX) += adx_bl.o 35 35 obj-$(CONFIG_BACKLIGHT_ADP5520) += adp5520_bl.o 36 36 obj-$(CONFIG_BACKLIGHT_ADP8860) += adp8860_bl.o 37 + obj-$(CONFIG_BACKLIGHT_ADP8870) += adp8870_bl.o 37 38 obj-$(CONFIG_BACKLIGHT_88PM860X) += 88pm860x_bl.o 38 39 obj-$(CONFIG_BACKLIGHT_PCF50633) += pcf50633-backlight.o 39 40
+1012
drivers/video/backlight/adp8870_bl.c
··· 1 + /* 2 + * Backlight driver for Analog Devices ADP8870 Backlight Devices 3 + * 4 + * Copyright 2009-2011 Analog Devices Inc. 5 + * 6 + * Licensed under the GPL-2 or later. 7 + */ 8 + 9 + #include <linux/module.h> 10 + #include <linux/version.h> 11 + #include <linux/init.h> 12 + #include <linux/errno.h> 13 + #include <linux/pm.h> 14 + #include <linux/platform_device.h> 15 + #include <linux/i2c.h> 16 + #include <linux/fb.h> 17 + #include <linux/backlight.h> 18 + #include <linux/leds.h> 19 + #include <linux/workqueue.h> 20 + #include <linux/slab.h> 21 + 22 + #include <linux/i2c/adp8870.h> 23 + #define ADP8870_EXT_FEATURES 24 + #define ADP8870_USE_LEDS 25 + 26 + 27 + #define ADP8870_MFDVID 0x00 /* Manufacturer and device ID */ 28 + #define ADP8870_MDCR 0x01 /* Device mode and status */ 29 + #define ADP8870_INT_STAT 0x02 /* Interrupts status */ 30 + #define ADP8870_INT_EN 0x03 /* Interrupts enable */ 31 + #define ADP8870_CFGR 0x04 /* Configuration register */ 32 + #define ADP8870_BLSEL 0x05 /* Sink enable backlight or independent */ 33 + #define ADP8870_PWMLED 0x06 /* PWM Enable Selection Register */ 34 + #define ADP8870_BLOFF 0x07 /* Backlight off timeout */ 35 + #define ADP8870_BLDIM 0x08 /* Backlight dim timeout */ 36 + #define ADP8870_BLFR 0x09 /* Backlight fade in and out rates */ 37 + #define ADP8870_BLMX1 0x0A /* Backlight (Brightness Level 1-daylight) maximum current */ 38 + #define ADP8870_BLDM1 0x0B /* Backlight (Brightness Level 1-daylight) dim current */ 39 + #define ADP8870_BLMX2 0x0C /* Backlight (Brightness Level 2-bright) maximum current */ 40 + #define ADP8870_BLDM2 0x0D /* Backlight (Brightness Level 2-bright) dim current */ 41 + #define ADP8870_BLMX3 0x0E /* Backlight (Brightness Level 3-office) maximum current */ 42 + #define ADP8870_BLDM3 0x0F /* Backlight (Brightness Level 3-office) dim current */ 43 + #define ADP8870_BLMX4 0x10 /* Backlight (Brightness Level 4-indoor) maximum current */ 44 + #define ADP8870_BLDM4 0x11 /* Backlight (Brightness Level 4-indoor) dim current */ 45 + #define ADP8870_BLMX5 0x12 /* Backlight (Brightness Level 5-dark) maximum current */ 46 + #define ADP8870_BLDM5 0x13 /* Backlight (Brightness Level 5-dark) dim current */ 47 + #define ADP8870_ISCLAW 0x1A /* Independent sink current fade law register */ 48 + #define ADP8870_ISCC 0x1B /* Independent sink current control register */ 49 + #define ADP8870_ISCT1 0x1C /* Independent Sink Current Timer Register LED[7:5] */ 50 + #define ADP8870_ISCT2 0x1D /* Independent Sink Current Timer Register LED[4:1] */ 51 + #define ADP8870_ISCF 0x1E /* Independent sink current fade register */ 52 + #define ADP8870_ISC1 0x1F /* Independent Sink Current LED1 */ 53 + #define ADP8870_ISC2 0x20 /* Independent Sink Current LED2 */ 54 + #define ADP8870_ISC3 0x21 /* Independent Sink Current LED3 */ 55 + #define ADP8870_ISC4 0x22 /* Independent Sink Current LED4 */ 56 + #define ADP8870_ISC5 0x23 /* Independent Sink Current LED5 */ 57 + #define ADP8870_ISC6 0x24 /* Independent Sink Current LED6 */ 58 + #define ADP8870_ISC7 0x25 /* Independent Sink Current LED7 (Brightness Level 1-daylight) */ 59 + #define ADP8870_ISC7_L2 0x26 /* Independent Sink Current LED7 (Brightness Level 2-bright) */ 60 + #define ADP8870_ISC7_L3 0x27 /* Independent Sink Current LED7 (Brightness Level 3-office) */ 61 + #define ADP8870_ISC7_L4 0x28 /* Independent Sink Current LED7 (Brightness Level 4-indoor) */ 62 + #define ADP8870_ISC7_L5 0x29 /* Independent Sink Current LED7 (Brightness Level 5-dark) */ 63 + #define ADP8870_CMP_CTL 0x2D /* ALS Comparator Control Register */ 64 + #define ADP8870_ALS1_EN 0x2E /* Main ALS comparator level enable */ 65 + #define ADP8870_ALS2_EN 0x2F /* Second ALS comparator level enable */ 66 + #define ADP8870_ALS1_STAT 0x30 /* Main ALS Comparator Status Register */ 67 + #define ADP8870_ALS2_STAT 0x31 /* Second ALS Comparator Status Register */ 68 + #define ADP8870_L2TRP 0x32 /* L2 comparator reference */ 69 + #define ADP8870_L2HYS 0x33 /* L2 hysteresis */ 70 + #define ADP8870_L3TRP 0x34 /* L3 comparator reference */ 71 + #define ADP8870_L3HYS 0x35 /* L3 hysteresis */ 72 + #define ADP8870_L4TRP 0x36 /* L4 comparator reference */ 73 + #define ADP8870_L4HYS 0x37 /* L4 hysteresis */ 74 + #define ADP8870_L5TRP 0x38 /* L5 comparator reference */ 75 + #define ADP8870_L5HYS 0x39 /* L5 hysteresis */ 76 + #define ADP8870_PH1LEVL 0x40 /* First phototransistor ambient light level-low byte register */ 77 + #define ADP8870_PH1LEVH 0x41 /* First phototransistor ambient light level-high byte register */ 78 + #define ADP8870_PH2LEVL 0x42 /* Second phototransistor ambient light level-low byte register */ 79 + #define ADP8870_PH2LEVH 0x43 /* Second phototransistor ambient light level-high byte register */ 80 + 81 + #define ADP8870_MANUFID 0x3 /* Analog Devices AD8870 Manufacturer and device ID */ 82 + #define ADP8870_DEVID(x) ((x) & 0xF) 83 + #define ADP8870_MANID(x) ((x) >> 4) 84 + 85 + /* MDCR Device mode and status */ 86 + #define D7ALSEN (1 << 7) 87 + #define INT_CFG (1 << 6) 88 + #define NSTBY (1 << 5) 89 + #define DIM_EN (1 << 4) 90 + #define GDWN_DIS (1 << 3) 91 + #define SIS_EN (1 << 2) 92 + #define CMP_AUTOEN (1 << 1) 93 + #define BLEN (1 << 0) 94 + 95 + /* ADP8870_ALS1_EN Main ALS comparator level enable */ 96 + #define L5_EN (1 << 3) 97 + #define L4_EN (1 << 2) 98 + #define L3_EN (1 << 1) 99 + #define L2_EN (1 << 0) 100 + 101 + #define CFGR_BLV_SHIFT 3 102 + #define CFGR_BLV_MASK 0x7 103 + #define ADP8870_FLAG_LED_MASK 0xFF 104 + 105 + #define FADE_VAL(in, out) ((0xF & (in)) | ((0xF & (out)) << 4)) 106 + #define BL_CFGR_VAL(law, blv) ((((blv) & CFGR_BLV_MASK) << CFGR_BLV_SHIFT) | ((0x3 & (law)) << 1)) 107 + #define ALS_CMPR_CFG_VAL(filt) ((0x7 & (filt)) << 1) 108 + 109 + struct adp8870_bl { 110 + struct i2c_client *client; 111 + struct backlight_device *bl; 112 + struct adp8870_led *led; 113 + struct adp8870_backlight_platform_data *pdata; 114 + struct mutex lock; 115 + unsigned long cached_daylight_max; 116 + int id; 117 + int revid; 118 + int current_brightness; 119 + }; 120 + 121 + struct adp8870_led { 122 + struct led_classdev cdev; 123 + struct work_struct work; 124 + struct i2c_client *client; 125 + enum led_brightness new_brightness; 126 + int id; 127 + int flags; 128 + }; 129 + 130 + static int adp8870_read(struct i2c_client *client, int reg, uint8_t *val) 131 + { 132 + int ret; 133 + 134 + ret = i2c_smbus_read_byte_data(client, reg); 135 + if (ret < 0) { 136 + dev_err(&client->dev, "failed reading at 0x%02x\n", reg); 137 + return ret; 138 + } 139 + 140 + *val = ret; 141 + return 0; 142 + } 143 + 144 + 145 + static int adp8870_write(struct i2c_client *client, u8 reg, u8 val) 146 + { 147 + int ret = i2c_smbus_write_byte_data(client, reg, val); 148 + if (ret) 149 + dev_err(&client->dev, "failed to write\n"); 150 + 151 + return ret; 152 + } 153 + 154 + static int adp8870_set_bits(struct i2c_client *client, int reg, uint8_t bit_mask) 155 + { 156 + struct adp8870_bl *data = i2c_get_clientdata(client); 157 + uint8_t reg_val; 158 + int ret; 159 + 160 + mutex_lock(&data->lock); 161 + 162 + ret = adp8870_read(client, reg, &reg_val); 163 + 164 + if (!ret && ((reg_val & bit_mask) == 0)) { 165 + reg_val |= bit_mask; 166 + ret = adp8870_write(client, reg, reg_val); 167 + } 168 + 169 + mutex_unlock(&data->lock); 170 + return ret; 171 + } 172 + 173 + static int adp8870_clr_bits(struct i2c_client *client, int reg, uint8_t bit_mask) 174 + { 175 + struct adp8870_bl *data = i2c_get_clientdata(client); 176 + uint8_t reg_val; 177 + int ret; 178 + 179 + mutex_lock(&data->lock); 180 + 181 + ret = adp8870_read(client, reg, &reg_val); 182 + 183 + if (!ret && (reg_val & bit_mask)) { 184 + reg_val &= ~bit_mask; 185 + ret = adp8870_write(client, reg, reg_val); 186 + } 187 + 188 + mutex_unlock(&data->lock); 189 + return ret; 190 + } 191 + 192 + /* 193 + * Independent sink / LED 194 + */ 195 + #if defined(ADP8870_USE_LEDS) 196 + static void adp8870_led_work(struct work_struct *work) 197 + { 198 + struct adp8870_led *led = container_of(work, struct adp8870_led, work); 199 + adp8870_write(led->client, ADP8870_ISC1 + led->id - 1, 200 + led->new_brightness >> 1); 201 + } 202 + 203 + static void adp8870_led_set(struct led_classdev *led_cdev, 204 + enum led_brightness value) 205 + { 206 + struct adp8870_led *led; 207 + 208 + led = container_of(led_cdev, struct adp8870_led, cdev); 209 + led->new_brightness = value; 210 + /* 211 + * Use workqueue for IO since I2C operations can sleep. 212 + */ 213 + schedule_work(&led->work); 214 + } 215 + 216 + static int adp8870_led_setup(struct adp8870_led *led) 217 + { 218 + struct i2c_client *client = led->client; 219 + int ret = 0; 220 + 221 + ret = adp8870_write(client, ADP8870_ISC1 + led->id - 1, 0); 222 + if (ret) 223 + return ret; 224 + 225 + ret = adp8870_set_bits(client, ADP8870_ISCC, 1 << (led->id - 1)); 226 + if (ret) 227 + return ret; 228 + 229 + if (led->id > 4) 230 + ret = adp8870_set_bits(client, ADP8870_ISCT1, 231 + (led->flags & 0x3) << ((led->id - 5) * 2)); 232 + else 233 + ret = adp8870_set_bits(client, ADP8870_ISCT2, 234 + (led->flags & 0x3) << ((led->id - 1) * 2)); 235 + 236 + return ret; 237 + } 238 + 239 + static int __devinit adp8870_led_probe(struct i2c_client *client) 240 + { 241 + struct adp8870_backlight_platform_data *pdata = 242 + client->dev.platform_data; 243 + struct adp8870_bl *data = i2c_get_clientdata(client); 244 + struct adp8870_led *led, *led_dat; 245 + struct led_info *cur_led; 246 + int ret, i; 247 + 248 + 249 + led = kcalloc(pdata->num_leds, sizeof(*led), GFP_KERNEL); 250 + if (led == NULL) { 251 + dev_err(&client->dev, "failed to alloc memory\n"); 252 + return -ENOMEM; 253 + } 254 + 255 + ret = adp8870_write(client, ADP8870_ISCLAW, pdata->led_fade_law); 256 + if (ret) 257 + goto err_free; 258 + 259 + ret = adp8870_write(client, ADP8870_ISCT1, 260 + (pdata->led_on_time & 0x3) << 6); 261 + if (ret) 262 + goto err_free; 263 + 264 + ret = adp8870_write(client, ADP8870_ISCF, 265 + FADE_VAL(pdata->led_fade_in, pdata->led_fade_out)); 266 + if (ret) 267 + goto err_free; 268 + 269 + for (i = 0; i < pdata->num_leds; ++i) { 270 + cur_led = &pdata->leds[i]; 271 + led_dat = &led[i]; 272 + 273 + led_dat->id = cur_led->flags & ADP8870_FLAG_LED_MASK; 274 + 275 + if (led_dat->id > 7 || led_dat->id < 1) { 276 + dev_err(&client->dev, "Invalid LED ID %d\n", 277 + led_dat->id); 278 + goto err; 279 + } 280 + 281 + if (pdata->bl_led_assign & (1 << (led_dat->id - 1))) { 282 + dev_err(&client->dev, "LED %d used by Backlight\n", 283 + led_dat->id); 284 + goto err; 285 + } 286 + 287 + led_dat->cdev.name = cur_led->name; 288 + led_dat->cdev.default_trigger = cur_led->default_trigger; 289 + led_dat->cdev.brightness_set = adp8870_led_set; 290 + led_dat->cdev.brightness = LED_OFF; 291 + led_dat->flags = cur_led->flags >> FLAG_OFFT_SHIFT; 292 + led_dat->client = client; 293 + led_dat->new_brightness = LED_OFF; 294 + INIT_WORK(&led_dat->work, adp8870_led_work); 295 + 296 + ret = led_classdev_register(&client->dev, &led_dat->cdev); 297 + if (ret) { 298 + dev_err(&client->dev, "failed to register LED %d\n", 299 + led_dat->id); 300 + goto err; 301 + } 302 + 303 + ret = adp8870_led_setup(led_dat); 304 + if (ret) { 305 + dev_err(&client->dev, "failed to write\n"); 306 + i++; 307 + goto err; 308 + } 309 + } 310 + 311 + data->led = led; 312 + 313 + return 0; 314 + 315 + err: 316 + for (i = i - 1; i >= 0; --i) { 317 + led_classdev_unregister(&led[i].cdev); 318 + cancel_work_sync(&led[i].work); 319 + } 320 + 321 + err_free: 322 + kfree(led); 323 + 324 + return ret; 325 + } 326 + 327 + static int __devexit adp8870_led_remove(struct i2c_client *client) 328 + { 329 + struct adp8870_backlight_platform_data *pdata = 330 + client->dev.platform_data; 331 + struct adp8870_bl *data = i2c_get_clientdata(client); 332 + int i; 333 + 334 + for (i = 0; i < pdata->num_leds; i++) { 335 + led_classdev_unregister(&data->led[i].cdev); 336 + cancel_work_sync(&data->led[i].work); 337 + } 338 + 339 + kfree(data->led); 340 + return 0; 341 + } 342 + #else 343 + static int __devinit adp8870_led_probe(struct i2c_client *client) 344 + { 345 + return 0; 346 + } 347 + 348 + static int __devexit adp8870_led_remove(struct i2c_client *client) 349 + { 350 + return 0; 351 + } 352 + #endif 353 + 354 + static int adp8870_bl_set(struct backlight_device *bl, int brightness) 355 + { 356 + struct adp8870_bl *data = bl_get_data(bl); 357 + struct i2c_client *client = data->client; 358 + int ret = 0; 359 + 360 + if (data->pdata->en_ambl_sens) { 361 + if ((brightness > 0) && (brightness < ADP8870_MAX_BRIGHTNESS)) { 362 + /* Disable Ambient Light auto adjust */ 363 + ret = adp8870_clr_bits(client, ADP8870_MDCR, 364 + CMP_AUTOEN); 365 + if (ret) 366 + return ret; 367 + ret = adp8870_write(client, ADP8870_BLMX1, brightness); 368 + if (ret) 369 + return ret; 370 + } else { 371 + /* 372 + * MAX_BRIGHTNESS -> Enable Ambient Light auto adjust 373 + * restore daylight l1 sysfs brightness 374 + */ 375 + ret = adp8870_write(client, ADP8870_BLMX1, 376 + data->cached_daylight_max); 377 + if (ret) 378 + return ret; 379 + 380 + ret = adp8870_set_bits(client, ADP8870_MDCR, 381 + CMP_AUTOEN); 382 + if (ret) 383 + return ret; 384 + } 385 + } else { 386 + ret = adp8870_write(client, ADP8870_BLMX1, brightness); 387 + if (ret) 388 + return ret; 389 + } 390 + 391 + if (data->current_brightness && brightness == 0) 392 + ret = adp8870_set_bits(client, 393 + ADP8870_MDCR, DIM_EN); 394 + else if (data->current_brightness == 0 && brightness) 395 + ret = adp8870_clr_bits(client, 396 + ADP8870_MDCR, DIM_EN); 397 + 398 + if (!ret) 399 + data->current_brightness = brightness; 400 + 401 + return ret; 402 + } 403 + 404 + static int adp8870_bl_update_status(struct backlight_device *bl) 405 + { 406 + int brightness = bl->props.brightness; 407 + if (bl->props.power != FB_BLANK_UNBLANK) 408 + brightness = 0; 409 + 410 + if (bl->props.fb_blank != FB_BLANK_UNBLANK) 411 + brightness = 0; 412 + 413 + return adp8870_bl_set(bl, brightness); 414 + } 415 + 416 + static int adp8870_bl_get_brightness(struct backlight_device *bl) 417 + { 418 + struct adp8870_bl *data = bl_get_data(bl); 419 + 420 + return data->current_brightness; 421 + } 422 + 423 + static const struct backlight_ops adp8870_bl_ops = { 424 + .update_status = adp8870_bl_update_status, 425 + .get_brightness = adp8870_bl_get_brightness, 426 + }; 427 + 428 + static int adp8870_bl_setup(struct backlight_device *bl) 429 + { 430 + struct adp8870_bl *data = bl_get_data(bl); 431 + struct i2c_client *client = data->client; 432 + struct adp8870_backlight_platform_data *pdata = data->pdata; 433 + int ret = 0; 434 + 435 + ret = adp8870_write(client, ADP8870_BLSEL, ~pdata->bl_led_assign); 436 + if (ret) 437 + return ret; 438 + 439 + ret = adp8870_write(client, ADP8870_PWMLED, pdata->pwm_assign); 440 + if (ret) 441 + return ret; 442 + 443 + ret = adp8870_write(client, ADP8870_BLMX1, pdata->l1_daylight_max); 444 + if (ret) 445 + return ret; 446 + 447 + ret = adp8870_write(client, ADP8870_BLDM1, pdata->l1_daylight_dim); 448 + if (ret) 449 + return ret; 450 + 451 + if (pdata->en_ambl_sens) { 452 + data->cached_daylight_max = pdata->l1_daylight_max; 453 + ret = adp8870_write(client, ADP8870_BLMX2, 454 + pdata->l2_bright_max); 455 + if (ret) 456 + return ret; 457 + ret = adp8870_write(client, ADP8870_BLDM2, 458 + pdata->l2_bright_dim); 459 + if (ret) 460 + return ret; 461 + 462 + ret = adp8870_write(client, ADP8870_BLMX3, 463 + pdata->l3_office_max); 464 + if (ret) 465 + return ret; 466 + ret = adp8870_write(client, ADP8870_BLDM3, 467 + pdata->l3_office_dim); 468 + if (ret) 469 + return ret; 470 + 471 + ret = adp8870_write(client, ADP8870_BLMX4, 472 + pdata->l4_indoor_max); 473 + if (ret) 474 + return ret; 475 + 476 + ret = adp8870_write(client, ADP8870_BLDM4, 477 + pdata->l4_indor_dim); 478 + if (ret) 479 + return ret; 480 + 481 + ret = adp8870_write(client, ADP8870_BLMX5, 482 + pdata->l5_dark_max); 483 + if (ret) 484 + return ret; 485 + 486 + ret = adp8870_write(client, ADP8870_BLDM5, 487 + pdata->l5_dark_dim); 488 + if (ret) 489 + return ret; 490 + 491 + ret = adp8870_write(client, ADP8870_L2TRP, pdata->l2_trip); 492 + if (ret) 493 + return ret; 494 + 495 + ret = adp8870_write(client, ADP8870_L2HYS, pdata->l2_hyst); 496 + if (ret) 497 + return ret; 498 + 499 + ret = adp8870_write(client, ADP8870_L3TRP, pdata->l3_trip); 500 + if (ret) 501 + return ret; 502 + 503 + ret = adp8870_write(client, ADP8870_L3HYS, pdata->l3_hyst); 504 + if (ret) 505 + return ret; 506 + 507 + ret = adp8870_write(client, ADP8870_L4TRP, pdata->l4_trip); 508 + if (ret) 509 + return ret; 510 + 511 + ret = adp8870_write(client, ADP8870_L4HYS, pdata->l4_hyst); 512 + if (ret) 513 + return ret; 514 + 515 + ret = adp8870_write(client, ADP8870_L5TRP, pdata->l5_trip); 516 + if (ret) 517 + return ret; 518 + 519 + ret = adp8870_write(client, ADP8870_L5HYS, pdata->l5_hyst); 520 + if (ret) 521 + return ret; 522 + 523 + ret = adp8870_write(client, ADP8870_ALS1_EN, L5_EN | L4_EN | 524 + L3_EN | L2_EN); 525 + if (ret) 526 + return ret; 527 + 528 + ret = adp8870_write(client, ADP8870_CMP_CTL, 529 + ALS_CMPR_CFG_VAL(pdata->abml_filt)); 530 + if (ret) 531 + return ret; 532 + } 533 + 534 + ret = adp8870_write(client, ADP8870_CFGR, 535 + BL_CFGR_VAL(pdata->bl_fade_law, 0)); 536 + if (ret) 537 + return ret; 538 + 539 + ret = adp8870_write(client, ADP8870_BLFR, FADE_VAL(pdata->bl_fade_in, 540 + pdata->bl_fade_out)); 541 + if (ret) 542 + return ret; 543 + /* 544 + * ADP8870 Rev0 requires GDWN_DIS bit set 545 + */ 546 + 547 + ret = adp8870_set_bits(client, ADP8870_MDCR, BLEN | DIM_EN | NSTBY | 548 + (data->revid == 0 ? GDWN_DIS : 0)); 549 + 550 + return ret; 551 + } 552 + 553 + static ssize_t adp8870_show(struct device *dev, char *buf, int reg) 554 + { 555 + struct adp8870_bl *data = dev_get_drvdata(dev); 556 + int error; 557 + uint8_t reg_val; 558 + 559 + mutex_lock(&data->lock); 560 + error = adp8870_read(data->client, reg, &reg_val); 561 + mutex_unlock(&data->lock); 562 + 563 + if (error < 0) 564 + return error; 565 + 566 + return sprintf(buf, "%u\n", reg_val); 567 + } 568 + 569 + static ssize_t adp8870_store(struct device *dev, const char *buf, 570 + size_t count, int reg) 571 + { 572 + struct adp8870_bl *data = dev_get_drvdata(dev); 573 + unsigned long val; 574 + int ret; 575 + 576 + ret = strict_strtoul(buf, 10, &val); 577 + if (ret) 578 + return ret; 579 + 580 + mutex_lock(&data->lock); 581 + adp8870_write(data->client, reg, val); 582 + mutex_unlock(&data->lock); 583 + 584 + return count; 585 + } 586 + 587 + static ssize_t adp8870_bl_l5_dark_max_show(struct device *dev, 588 + struct device_attribute *attr, char *buf) 589 + { 590 + return adp8870_show(dev, buf, ADP8870_BLMX5); 591 + } 592 + 593 + static ssize_t adp8870_bl_l5_dark_max_store(struct device *dev, 594 + struct device_attribute *attr, const char *buf, size_t count) 595 + { 596 + return adp8870_store(dev, buf, count, ADP8870_BLMX5); 597 + } 598 + static DEVICE_ATTR(l5_dark_max, 0664, adp8870_bl_l5_dark_max_show, 599 + adp8870_bl_l5_dark_max_store); 600 + 601 + 602 + static ssize_t adp8870_bl_l4_indoor_max_show(struct device *dev, 603 + struct device_attribute *attr, char *buf) 604 + { 605 + return adp8870_show(dev, buf, ADP8870_BLMX4); 606 + } 607 + 608 + static ssize_t adp8870_bl_l4_indoor_max_store(struct device *dev, 609 + struct device_attribute *attr, const char *buf, size_t count) 610 + { 611 + return adp8870_store(dev, buf, count, ADP8870_BLMX4); 612 + } 613 + static DEVICE_ATTR(l4_indoor_max, 0664, adp8870_bl_l4_indoor_max_show, 614 + adp8870_bl_l4_indoor_max_store); 615 + 616 + 617 + static ssize_t adp8870_bl_l3_office_max_show(struct device *dev, 618 + struct device_attribute *attr, char *buf) 619 + { 620 + return adp8870_show(dev, buf, ADP8870_BLMX3); 621 + } 622 + 623 + static ssize_t adp8870_bl_l3_office_max_store(struct device *dev, 624 + struct device_attribute *attr, const char *buf, size_t count) 625 + { 626 + return adp8870_store(dev, buf, count, ADP8870_BLMX3); 627 + } 628 + 629 + static DEVICE_ATTR(l3_office_max, 0664, adp8870_bl_l3_office_max_show, 630 + adp8870_bl_l3_office_max_store); 631 + 632 + static ssize_t adp8870_bl_l2_bright_max_show(struct device *dev, 633 + struct device_attribute *attr, char *buf) 634 + { 635 + return adp8870_show(dev, buf, ADP8870_BLMX2); 636 + } 637 + 638 + static ssize_t adp8870_bl_l2_bright_max_store(struct device *dev, 639 + struct device_attribute *attr, const char *buf, size_t count) 640 + { 641 + return adp8870_store(dev, buf, count, ADP8870_BLMX2); 642 + } 643 + static DEVICE_ATTR(l2_bright_max, 0664, adp8870_bl_l2_bright_max_show, 644 + adp8870_bl_l2_bright_max_store); 645 + 646 + static ssize_t adp8870_bl_l1_daylight_max_show(struct device *dev, 647 + struct device_attribute *attr, char *buf) 648 + { 649 + return adp8870_show(dev, buf, ADP8870_BLMX1); 650 + } 651 + 652 + static ssize_t adp8870_bl_l1_daylight_max_store(struct device *dev, 653 + struct device_attribute *attr, const char *buf, size_t count) 654 + { 655 + struct adp8870_bl *data = dev_get_drvdata(dev); 656 + int ret = strict_strtoul(buf, 10, &data->cached_daylight_max); 657 + if (ret) 658 + return ret; 659 + 660 + return adp8870_store(dev, buf, count, ADP8870_BLMX1); 661 + } 662 + static DEVICE_ATTR(l1_daylight_max, 0664, adp8870_bl_l1_daylight_max_show, 663 + adp8870_bl_l1_daylight_max_store); 664 + 665 + static ssize_t adp8870_bl_l5_dark_dim_show(struct device *dev, 666 + struct device_attribute *attr, char *buf) 667 + { 668 + return adp8870_show(dev, buf, ADP8870_BLDM5); 669 + } 670 + 671 + static ssize_t adp8870_bl_l5_dark_dim_store(struct device *dev, 672 + struct device_attribute *attr, 673 + const char *buf, size_t count) 674 + { 675 + return adp8870_store(dev, buf, count, ADP8870_BLDM5); 676 + } 677 + static DEVICE_ATTR(l5_dark_dim, 0664, adp8870_bl_l5_dark_dim_show, 678 + adp8870_bl_l5_dark_dim_store); 679 + 680 + static ssize_t adp8870_bl_l4_indoor_dim_show(struct device *dev, 681 + struct device_attribute *attr, char *buf) 682 + { 683 + return adp8870_show(dev, buf, ADP8870_BLDM4); 684 + } 685 + 686 + static ssize_t adp8870_bl_l4_indoor_dim_store(struct device *dev, 687 + struct device_attribute *attr, 688 + const char *buf, size_t count) 689 + { 690 + return adp8870_store(dev, buf, count, ADP8870_BLDM4); 691 + } 692 + static DEVICE_ATTR(l4_indoor_dim, 0664, adp8870_bl_l4_indoor_dim_show, 693 + adp8870_bl_l4_indoor_dim_store); 694 + 695 + 696 + static ssize_t adp8870_bl_l3_office_dim_show(struct device *dev, 697 + struct device_attribute *attr, char *buf) 698 + { 699 + return adp8870_show(dev, buf, ADP8870_BLDM3); 700 + } 701 + 702 + static ssize_t adp8870_bl_l3_office_dim_store(struct device *dev, 703 + struct device_attribute *attr, 704 + const char *buf, size_t count) 705 + { 706 + return adp8870_store(dev, buf, count, ADP8870_BLDM3); 707 + } 708 + static DEVICE_ATTR(l3_office_dim, 0664, adp8870_bl_l3_office_dim_show, 709 + adp8870_bl_l3_office_dim_store); 710 + 711 + static ssize_t adp8870_bl_l2_bright_dim_show(struct device *dev, 712 + struct device_attribute *attr, char *buf) 713 + { 714 + return adp8870_show(dev, buf, ADP8870_BLDM2); 715 + } 716 + 717 + static ssize_t adp8870_bl_l2_bright_dim_store(struct device *dev, 718 + struct device_attribute *attr, 719 + const char *buf, size_t count) 720 + { 721 + return adp8870_store(dev, buf, count, ADP8870_BLDM2); 722 + } 723 + static DEVICE_ATTR(l2_bright_dim, 0664, adp8870_bl_l2_bright_dim_show, 724 + adp8870_bl_l2_bright_dim_store); 725 + 726 + static ssize_t adp8870_bl_l1_daylight_dim_show(struct device *dev, 727 + struct device_attribute *attr, char *buf) 728 + { 729 + return adp8870_show(dev, buf, ADP8870_BLDM1); 730 + } 731 + 732 + static ssize_t adp8870_bl_l1_daylight_dim_store(struct device *dev, 733 + struct device_attribute *attr, 734 + const char *buf, size_t count) 735 + { 736 + return adp8870_store(dev, buf, count, ADP8870_BLDM1); 737 + } 738 + static DEVICE_ATTR(l1_daylight_dim, 0664, adp8870_bl_l1_daylight_dim_show, 739 + adp8870_bl_l1_daylight_dim_store); 740 + 741 + #ifdef ADP8870_EXT_FEATURES 742 + static ssize_t adp8870_bl_ambient_light_level_show(struct device *dev, 743 + struct device_attribute *attr, char *buf) 744 + { 745 + struct adp8870_bl *data = dev_get_drvdata(dev); 746 + int error; 747 + uint8_t reg_val; 748 + uint16_t ret_val; 749 + 750 + mutex_lock(&data->lock); 751 + error = adp8870_read(data->client, ADP8870_PH1LEVL, &reg_val); 752 + if (error < 0) { 753 + mutex_unlock(&data->lock); 754 + return error; 755 + } 756 + ret_val = reg_val; 757 + error = adp8870_read(data->client, ADP8870_PH1LEVH, &reg_val); 758 + mutex_unlock(&data->lock); 759 + 760 + if (error < 0) 761 + return error; 762 + 763 + /* Return 13-bit conversion value for the first light sensor */ 764 + ret_val += (reg_val & 0x1F) << 8; 765 + 766 + return sprintf(buf, "%u\n", ret_val); 767 + } 768 + static DEVICE_ATTR(ambient_light_level, 0444, 769 + adp8870_bl_ambient_light_level_show, NULL); 770 + 771 + static ssize_t adp8870_bl_ambient_light_zone_show(struct device *dev, 772 + struct device_attribute *attr, char *buf) 773 + { 774 + struct adp8870_bl *data = dev_get_drvdata(dev); 775 + int error; 776 + uint8_t reg_val; 777 + 778 + mutex_lock(&data->lock); 779 + error = adp8870_read(data->client, ADP8870_CFGR, &reg_val); 780 + mutex_unlock(&data->lock); 781 + 782 + if (error < 0) 783 + return error; 784 + 785 + return sprintf(buf, "%u\n", 786 + ((reg_val >> CFGR_BLV_SHIFT) & CFGR_BLV_MASK) + 1); 787 + } 788 + 789 + static ssize_t adp8870_bl_ambient_light_zone_store(struct device *dev, 790 + struct device_attribute *attr, 791 + const char *buf, size_t count) 792 + { 793 + struct adp8870_bl *data = dev_get_drvdata(dev); 794 + unsigned long val; 795 + uint8_t reg_val; 796 + int ret; 797 + 798 + ret = strict_strtoul(buf, 10, &val); 799 + if (ret) 800 + return ret; 801 + 802 + if (val == 0) { 803 + /* Enable automatic ambient light sensing */ 804 + adp8870_set_bits(data->client, ADP8870_MDCR, CMP_AUTOEN); 805 + } else if ((val > 0) && (val < 6)) { 806 + /* Disable automatic ambient light sensing */ 807 + adp8870_clr_bits(data->client, ADP8870_MDCR, CMP_AUTOEN); 808 + 809 + /* Set user supplied ambient light zone */ 810 + mutex_lock(&data->lock); 811 + adp8870_read(data->client, ADP8870_CFGR, &reg_val); 812 + reg_val &= ~(CFGR_BLV_MASK << CFGR_BLV_SHIFT); 813 + reg_val |= (val - 1) << CFGR_BLV_SHIFT; 814 + adp8870_write(data->client, ADP8870_CFGR, reg_val); 815 + mutex_unlock(&data->lock); 816 + } 817 + 818 + return count; 819 + } 820 + static DEVICE_ATTR(ambient_light_zone, 0664, 821 + adp8870_bl_ambient_light_zone_show, 822 + adp8870_bl_ambient_light_zone_store); 823 + #endif 824 + 825 + static struct attribute *adp8870_bl_attributes[] = { 826 + &dev_attr_l5_dark_max.attr, 827 + &dev_attr_l5_dark_dim.attr, 828 + &dev_attr_l4_indoor_max.attr, 829 + &dev_attr_l4_indoor_dim.attr, 830 + &dev_attr_l3_office_max.attr, 831 + &dev_attr_l3_office_dim.attr, 832 + &dev_attr_l2_bright_max.attr, 833 + &dev_attr_l2_bright_dim.attr, 834 + &dev_attr_l1_daylight_max.attr, 835 + &dev_attr_l1_daylight_dim.attr, 836 + #ifdef ADP8870_EXT_FEATURES 837 + &dev_attr_ambient_light_level.attr, 838 + &dev_attr_ambient_light_zone.attr, 839 + #endif 840 + NULL 841 + }; 842 + 843 + static const struct attribute_group adp8870_bl_attr_group = { 844 + .attrs = adp8870_bl_attributes, 845 + }; 846 + 847 + static int __devinit adp8870_probe(struct i2c_client *client, 848 + const struct i2c_device_id *id) 849 + { 850 + struct backlight_properties props; 851 + struct backlight_device *bl; 852 + struct adp8870_bl *data; 853 + struct adp8870_backlight_platform_data *pdata = 854 + client->dev.platform_data; 855 + uint8_t reg_val; 856 + int ret; 857 + 858 + if (!i2c_check_functionality(client->adapter, 859 + I2C_FUNC_SMBUS_BYTE_DATA)) { 860 + dev_err(&client->dev, "SMBUS Byte Data not Supported\n"); 861 + return -EIO; 862 + } 863 + 864 + if (!pdata) { 865 + dev_err(&client->dev, "no platform data?\n"); 866 + return -EINVAL; 867 + } 868 + 869 + ret = adp8870_read(client, ADP8870_MFDVID, &reg_val); 870 + if (ret < 0) 871 + return -EIO; 872 + 873 + if (ADP8870_MANID(reg_val) != ADP8870_MANUFID) { 874 + dev_err(&client->dev, "failed to probe\n"); 875 + return -ENODEV; 876 + } 877 + 878 + data = kzalloc(sizeof(*data), GFP_KERNEL); 879 + if (data == NULL) 880 + return -ENOMEM; 881 + 882 + data->revid = ADP8870_DEVID(reg_val); 883 + data->client = client; 884 + data->pdata = pdata; 885 + data->id = id->driver_data; 886 + data->current_brightness = 0; 887 + i2c_set_clientdata(client, data); 888 + 889 + mutex_init(&data->lock); 890 + 891 + memset(&props, 0, sizeof(props)); 892 + props.type = BACKLIGHT_RAW; 893 + props.max_brightness = props.brightness = ADP8870_MAX_BRIGHTNESS; 894 + bl = backlight_device_register(dev_driver_string(&client->dev), 895 + &client->dev, data, &adp8870_bl_ops, &props); 896 + if (IS_ERR(bl)) { 897 + dev_err(&client->dev, "failed to register backlight\n"); 898 + ret = PTR_ERR(bl); 899 + goto out2; 900 + } 901 + 902 + data->bl = bl; 903 + 904 + if (pdata->en_ambl_sens) 905 + ret = sysfs_create_group(&bl->dev.kobj, 906 + &adp8870_bl_attr_group); 907 + 908 + if (ret) { 909 + dev_err(&client->dev, "failed to register sysfs\n"); 910 + goto out1; 911 + } 912 + 913 + ret = adp8870_bl_setup(bl); 914 + if (ret) { 915 + ret = -EIO; 916 + goto out; 917 + } 918 + 919 + backlight_update_status(bl); 920 + 921 + dev_info(&client->dev, "Rev.%d Backlight\n", data->revid); 922 + 923 + if (pdata->num_leds) 924 + adp8870_led_probe(client); 925 + 926 + return 0; 927 + 928 + out: 929 + if (data->pdata->en_ambl_sens) 930 + sysfs_remove_group(&data->bl->dev.kobj, 931 + &adp8870_bl_attr_group); 932 + out1: 933 + backlight_device_unregister(bl); 934 + out2: 935 + i2c_set_clientdata(client, NULL); 936 + kfree(data); 937 + 938 + return ret; 939 + } 940 + 941 + static int __devexit adp8870_remove(struct i2c_client *client) 942 + { 943 + struct adp8870_bl *data = i2c_get_clientdata(client); 944 + 945 + adp8870_clr_bits(client, ADP8870_MDCR, NSTBY); 946 + 947 + if (data->led) 948 + adp8870_led_remove(client); 949 + 950 + if (data->pdata->en_ambl_sens) 951 + sysfs_remove_group(&data->bl->dev.kobj, 952 + &adp8870_bl_attr_group); 953 + 954 + backlight_device_unregister(data->bl); 955 + i2c_set_clientdata(client, NULL); 956 + kfree(data); 957 + 958 + return 0; 959 + } 960 + 961 + #ifdef CONFIG_PM 962 + static int adp8870_i2c_suspend(struct i2c_client *client, pm_message_t message) 963 + { 964 + adp8870_clr_bits(client, ADP8870_MDCR, NSTBY); 965 + 966 + return 0; 967 + } 968 + 969 + static int adp8870_i2c_resume(struct i2c_client *client) 970 + { 971 + adp8870_set_bits(client, ADP8870_MDCR, NSTBY); 972 + 973 + return 0; 974 + } 975 + #else 976 + #define adp8870_i2c_suspend NULL 977 + #define adp8870_i2c_resume NULL 978 + #endif 979 + 980 + static const struct i2c_device_id adp8870_id[] = { 981 + { "adp8870", 0 }, 982 + { } 983 + }; 984 + MODULE_DEVICE_TABLE(i2c, adp8870_id); 985 + 986 + static struct i2c_driver adp8870_driver = { 987 + .driver = { 988 + .name = KBUILD_MODNAME, 989 + }, 990 + .probe = adp8870_probe, 991 + .remove = __devexit_p(adp8870_remove), 992 + .suspend = adp8870_i2c_suspend, 993 + .resume = adp8870_i2c_resume, 994 + .id_table = adp8870_id, 995 + }; 996 + 997 + static int __init adp8870_init(void) 998 + { 999 + return i2c_add_driver(&adp8870_driver); 1000 + } 1001 + module_init(adp8870_init); 1002 + 1003 + static void __exit adp8870_exit(void) 1004 + { 1005 + i2c_del_driver(&adp8870_driver); 1006 + } 1007 + module_exit(adp8870_exit); 1008 + 1009 + MODULE_LICENSE("GPL v2"); 1010 + MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>"); 1011 + MODULE_DESCRIPTION("ADP8870 Backlight driver"); 1012 + MODULE_ALIAS("platform:adp8870-backlight");
+1 -1
drivers/w1/masters/Kconfig
··· 42 42 43 43 config W1_MASTER_DS1WM 44 44 tristate "Maxim DS1WM 1-wire busmaster" 45 - depends on W1 45 + depends on W1 && GENERIC_HARDIRQS 46 46 help 47 47 Say Y here to enable the DS1WM 1-wire driver, such as that 48 48 in HP iPAQ devices like h5xxx, h2200, and ASIC3-based like
+3 -3
fs/cifs/cache.c
··· 92 92 break; 93 93 94 94 default: 95 - cERROR(1, "CIFS: Unknown network family '%d'", sa->sa_family); 95 + cERROR(1, "Unknown network family '%d'", sa->sa_family); 96 96 key_len = 0; 97 97 break; 98 98 } ··· 152 152 153 153 sharename = extract_sharename(tcon->treeName); 154 154 if (IS_ERR(sharename)) { 155 - cFYI(1, "CIFS: couldn't extract sharename\n"); 155 + cFYI(1, "%s: couldn't extract sharename\n", __func__); 156 156 sharename = NULL; 157 157 return 0; 158 158 } ··· 302 302 pagevec_init(&pvec, 0); 303 303 first = 0; 304 304 305 - cFYI(1, "cifs inode 0x%p now uncached", cifsi); 305 + cFYI(1, "%s: cifs inode 0x%p now uncached", __func__, cifsi); 306 306 307 307 for (;;) { 308 308 nr_pages = pagevec_lookup(&pvec,
+33
fs/cifs/cifsfs.c
··· 352 352 } 353 353 } 354 354 355 + static void 356 + cifs_show_security(struct seq_file *s, struct TCP_Server_Info *server) 357 + { 358 + seq_printf(s, ",sec="); 359 + 360 + switch (server->secType) { 361 + case LANMAN: 362 + seq_printf(s, "lanman"); 363 + break; 364 + case NTLMv2: 365 + seq_printf(s, "ntlmv2"); 366 + break; 367 + case NTLM: 368 + seq_printf(s, "ntlm"); 369 + break; 370 + case Kerberos: 371 + seq_printf(s, "krb5"); 372 + break; 373 + case RawNTLMSSP: 374 + seq_printf(s, "ntlmssp"); 375 + break; 376 + default: 377 + /* shouldn't ever happen */ 378 + seq_printf(s, "unknown"); 379 + break; 380 + } 381 + 382 + if (server->sec_mode & (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) 383 + seq_printf(s, "i"); 384 + } 385 + 355 386 /* 356 387 * cifs_show_options() is for displaying mount options in /proc/mounts. 357 388 * Not all settable options are displayed but most of the important ··· 395 364 struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb); 396 365 struct sockaddr *srcaddr; 397 366 srcaddr = (struct sockaddr *)&tcon->ses->server->srcaddr; 367 + 368 + cifs_show_security(s, tcon->ses->server); 398 369 399 370 seq_printf(s, ",unc=%s", tcon->treeName); 400 371
+1 -1
fs/cifs/cifsfs.h
··· 129 129 extern const struct export_operations cifs_export_ops; 130 130 #endif /* CIFS_NFSD_EXPORT */ 131 131 132 - #define CIFS_VERSION "1.72" 132 + #define CIFS_VERSION "1.73" 133 133 #endif /* _CIFSFS_H */
+12 -11
fs/cifs/connect.c
··· 152 152 mid_entry->callback(mid_entry); 153 153 } 154 154 155 - while (server->tcpStatus == CifsNeedReconnect) { 155 + do { 156 156 try_to_freeze(); 157 157 158 158 /* we should try only the port we connected to before */ ··· 167 167 server->tcpStatus = CifsNeedNegotiate; 168 168 spin_unlock(&GlobalMid_Lock); 169 169 } 170 - } 170 + } while (server->tcpStatus == CifsNeedReconnect); 171 171 172 172 return rc; 173 173 } ··· 2149 2149 } 2150 2150 2151 2151 static inline struct tcon_link * 2152 - cifs_sb_master_tlink(struct cifs_sb_info *cifs_sb); 2152 + cifs_sb_master_tlink(struct cifs_sb_info *cifs_sb) 2153 + { 2154 + return cifs_sb->master_tlink; 2155 + } 2153 2156 2154 2157 static int 2155 2158 compare_mount_options(struct super_block *sb, struct cifs_mnt_data *mnt_data) ··· 3174 3171 return rc; 3175 3172 } 3176 3173 3174 + /* 3175 + * Issue a TREE_CONNECT request. Note that for IPC$ shares, that the tcon 3176 + * pointer may be NULL. 3177 + */ 3177 3178 int 3178 3179 CIFSTCon(unsigned int xid, struct cifs_ses *ses, 3179 3180 const char *tree, struct cifs_tcon *tcon, ··· 3212 3205 pSMB->AndXCommand = 0xFF; 3213 3206 pSMB->Flags = cpu_to_le16(TCON_EXTENDED_SECINFO); 3214 3207 bcc_ptr = &pSMB->Password[0]; 3215 - if ((ses->server->sec_mode) & SECMODE_USER) { 3208 + if (!tcon || (ses->server->sec_mode & SECMODE_USER)) { 3216 3209 pSMB->PasswordLength = cpu_to_le16(1); /* minimum */ 3217 3210 *bcc_ptr = 0; /* password is null byte */ 3218 3211 bcc_ptr++; /* skip password */ ··· 3378 3371 } 3379 3372 if (rc == 0) { 3380 3373 spin_lock(&GlobalMid_Lock); 3381 - if (server->tcpStatus != CifsExiting) 3374 + if (server->tcpStatus == CifsNeedNegotiate) 3382 3375 server->tcpStatus = CifsGood; 3383 3376 else 3384 3377 rc = -EHOSTDOWN; ··· 3489 3482 kfree(vol_info); 3490 3483 3491 3484 return tcon; 3492 - } 3493 - 3494 - static inline struct tcon_link * 3495 - cifs_sb_master_tlink(struct cifs_sb_info *cifs_sb) 3496 - { 3497 - return cifs_sb->master_tlink; 3498 3485 } 3499 3486 3500 3487 struct cifs_tcon *
+24 -27
fs/cifs/fscache.c
··· 28 28 server->fscache = 29 29 fscache_acquire_cookie(cifs_fscache_netfs.primary_index, 30 30 &cifs_fscache_server_index_def, server); 31 - cFYI(1, "CIFS: get client cookie (0x%p/0x%p)", server, 32 - server->fscache); 31 + cFYI(1, "%s: (0x%p/0x%p)", __func__, server, 32 + server->fscache); 33 33 } 34 34 35 35 void cifs_fscache_release_client_cookie(struct TCP_Server_Info *server) 36 36 { 37 - cFYI(1, "CIFS: release client cookie (0x%p/0x%p)", server, 38 - server->fscache); 37 + cFYI(1, "%s: (0x%p/0x%p)", __func__, server, 38 + server->fscache); 39 39 fscache_relinquish_cookie(server->fscache, 0); 40 40 server->fscache = NULL; 41 41 } ··· 47 47 tcon->fscache = 48 48 fscache_acquire_cookie(server->fscache, 49 49 &cifs_fscache_super_index_def, tcon); 50 - cFYI(1, "CIFS: get superblock cookie (0x%p/0x%p)", 51 - server->fscache, tcon->fscache); 50 + cFYI(1, "%s: (0x%p/0x%p)", __func__, server->fscache, 51 + tcon->fscache); 52 52 } 53 53 54 54 void cifs_fscache_release_super_cookie(struct cifs_tcon *tcon) 55 55 { 56 - cFYI(1, "CIFS: releasing superblock cookie (0x%p)", tcon->fscache); 56 + cFYI(1, "%s: (0x%p)", __func__, tcon->fscache); 57 57 fscache_relinquish_cookie(tcon->fscache, 0); 58 58 tcon->fscache = NULL; 59 59 } ··· 70 70 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_FSCACHE) { 71 71 cifsi->fscache = fscache_acquire_cookie(tcon->fscache, 72 72 &cifs_fscache_inode_object_def, cifsi); 73 - cFYI(1, "CIFS: got FH cookie (0x%p/0x%p)", tcon->fscache, 74 - cifsi->fscache); 73 + cFYI(1, "%s: got FH cookie (0x%p/0x%p)", __func__, 74 + tcon->fscache, cifsi->fscache); 75 75 } 76 76 } 77 77 ··· 80 80 struct cifsInodeInfo *cifsi = CIFS_I(inode); 81 81 82 82 if (cifsi->fscache) { 83 - cFYI(1, "CIFS releasing inode cookie (0x%p)", 84 - cifsi->fscache); 83 + cFYI(1, "%s: (0x%p)", __func__, cifsi->fscache); 85 84 fscache_relinquish_cookie(cifsi->fscache, 0); 86 85 cifsi->fscache = NULL; 87 86 } ··· 91 92 struct cifsInodeInfo *cifsi = CIFS_I(inode); 92 93 93 94 if (cifsi->fscache) { 94 - cFYI(1, "CIFS disabling inode cookie (0x%p)", 95 - cifsi->fscache); 95 + cFYI(1, "%s: (0x%p)", __func__, cifsi->fscache); 96 96 fscache_relinquish_cookie(cifsi->fscache, 1); 97 97 cifsi->fscache = NULL; 98 98 } ··· 119 121 cifs_sb_master_tcon(cifs_sb)->fscache, 120 122 &cifs_fscache_inode_object_def, 121 123 cifsi); 122 - cFYI(1, "CIFS: new cookie 0x%p oldcookie 0x%p", 123 - cifsi->fscache, old); 124 + cFYI(1, "%s: new cookie 0x%p oldcookie 0x%p", 125 + __func__, cifsi->fscache, old); 124 126 } 125 127 } 126 128 ··· 130 132 struct inode *inode = page->mapping->host; 131 133 struct cifsInodeInfo *cifsi = CIFS_I(inode); 132 134 133 - cFYI(1, "CIFS: fscache release page (0x%p/0x%p)", 134 - page, cifsi->fscache); 135 + cFYI(1, "%s: (0x%p/0x%p)", __func__, page, 136 + cifsi->fscache); 135 137 if (!fscache_maybe_release_page(cifsi->fscache, page, gfp)) 136 138 return 0; 137 139 } ··· 142 144 static void cifs_readpage_from_fscache_complete(struct page *page, void *ctx, 143 145 int error) 144 146 { 145 - cFYI(1, "CFS: readpage_from_fscache_complete (0x%p/%d)", 146 - page, error); 147 + cFYI(1, "%s: (0x%p/%d)", __func__, page, error); 147 148 if (!error) 148 149 SetPageUptodate(page); 149 150 unlock_page(page); ··· 155 158 { 156 159 int ret; 157 160 158 - cFYI(1, "CIFS: readpage_from_fscache(fsc:%p, p:%p, i:0x%p", 161 + cFYI(1, "%s: (fsc:%p, p:%p, i:0x%p", __func__, 159 162 CIFS_I(inode)->fscache, page, inode); 160 163 ret = fscache_read_or_alloc_page(CIFS_I(inode)->fscache, page, 161 164 cifs_readpage_from_fscache_complete, ··· 164 167 switch (ret) { 165 168 166 169 case 0: /* page found in fscache, read submitted */ 167 - cFYI(1, "CIFS: readpage_from_fscache: submitted"); 170 + cFYI(1, "%s: submitted", __func__); 168 171 return ret; 169 172 case -ENOBUFS: /* page won't be cached */ 170 173 case -ENODATA: /* page not in cache */ 171 - cFYI(1, "CIFS: readpage_from_fscache %d", ret); 174 + cFYI(1, "%s: %d", __func__, ret); 172 175 return 1; 173 176 174 177 default: ··· 187 190 { 188 191 int ret; 189 192 190 - cFYI(1, "CIFS: __cifs_readpages_from_fscache (0x%p/%u/0x%p)", 193 + cFYI(1, "%s: (0x%p/%u/0x%p)", __func__, 191 194 CIFS_I(inode)->fscache, *nr_pages, inode); 192 195 ret = fscache_read_or_alloc_pages(CIFS_I(inode)->fscache, mapping, 193 196 pages, nr_pages, ··· 196 199 mapping_gfp_mask(mapping)); 197 200 switch (ret) { 198 201 case 0: /* read submitted to the cache for all pages */ 199 - cFYI(1, "CIFS: readpages_from_fscache: submitted"); 202 + cFYI(1, "%s: submitted", __func__); 200 203 return ret; 201 204 202 205 case -ENOBUFS: /* some pages are not cached and can't be */ 203 206 case -ENODATA: /* some pages are not cached */ 204 - cFYI(1, "CIFS: readpages_from_fscache: no page"); 207 + cFYI(1, "%s: no page", __func__); 205 208 return 1; 206 209 207 210 default: ··· 215 218 { 216 219 int ret; 217 220 218 - cFYI(1, "CIFS: readpage_to_fscache(fsc: %p, p: %p, i: %p", 221 + cFYI(1, "%s: (fsc: %p, p: %p, i: %p)", __func__, 219 222 CIFS_I(inode)->fscache, page, inode); 220 223 ret = fscache_write_page(CIFS_I(inode)->fscache, page, GFP_KERNEL); 221 224 if (ret != 0) ··· 227 230 struct cifsInodeInfo *cifsi = CIFS_I(inode); 228 231 struct fscache_cookie *cookie = cifsi->fscache; 229 232 230 - cFYI(1, "CIFS: fscache invalidatepage (0x%p/0x%p)", page, cookie); 233 + cFYI(1, "%s: (0x%p/0x%p)", __func__, page, cookie); 231 234 fscache_wait_on_page_write(cookie, page); 232 235 fscache_uncache_page(cookie, page); 233 236 }
+1 -1
include/asm-generic/pgtable.h
··· 88 88 pmd_t pmd = *pmdp; 89 89 pmd_clear(mm, address, pmdp); 90 90 return pmd; 91 - }) 91 + } 92 92 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 93 93 #endif 94 94
+153
include/linux/i2c/adp8870.h
··· 1 + /* 2 + * Definitions and platform data for Analog Devices 3 + * Backlight drivers ADP8870 4 + * 5 + * Copyright 2009-2010 Analog Devices Inc. 6 + * 7 + * Licensed under the GPL-2 or later. 8 + */ 9 + 10 + #ifndef __LINUX_I2C_ADP8870_H 11 + #define __LINUX_I2C_ADP8870_H 12 + 13 + #define ID_ADP8870 8870 14 + 15 + #define ADP8870_MAX_BRIGHTNESS 0x7F 16 + #define FLAG_OFFT_SHIFT 8 17 + 18 + /* 19 + * LEDs subdevice platform data 20 + */ 21 + 22 + #define ADP8870_LED_DIS_BLINK (0 << FLAG_OFFT_SHIFT) 23 + #define ADP8870_LED_OFFT_600ms (1 << FLAG_OFFT_SHIFT) 24 + #define ADP8870_LED_OFFT_1200ms (2 << FLAG_OFFT_SHIFT) 25 + #define ADP8870_LED_OFFT_1800ms (3 << FLAG_OFFT_SHIFT) 26 + 27 + #define ADP8870_LED_ONT_200ms 0 28 + #define ADP8870_LED_ONT_600ms 1 29 + #define ADP8870_LED_ONT_800ms 2 30 + #define ADP8870_LED_ONT_1200ms 3 31 + 32 + #define ADP8870_LED_D7 (7) 33 + #define ADP8870_LED_D6 (6) 34 + #define ADP8870_LED_D5 (5) 35 + #define ADP8870_LED_D4 (4) 36 + #define ADP8870_LED_D3 (3) 37 + #define ADP8870_LED_D2 (2) 38 + #define ADP8870_LED_D1 (1) 39 + 40 + /* 41 + * Backlight subdevice platform data 42 + */ 43 + 44 + #define ADP8870_BL_D7 (1 << 6) 45 + #define ADP8870_BL_D6 (1 << 5) 46 + #define ADP8870_BL_D5 (1 << 4) 47 + #define ADP8870_BL_D4 (1 << 3) 48 + #define ADP8870_BL_D3 (1 << 2) 49 + #define ADP8870_BL_D2 (1 << 1) 50 + #define ADP8870_BL_D1 (1 << 0) 51 + 52 + #define ADP8870_FADE_T_DIS 0 /* Fade Timer Disabled */ 53 + #define ADP8870_FADE_T_300ms 1 /* 0.3 Sec */ 54 + #define ADP8870_FADE_T_600ms 2 55 + #define ADP8870_FADE_T_900ms 3 56 + #define ADP8870_FADE_T_1200ms 4 57 + #define ADP8870_FADE_T_1500ms 5 58 + #define ADP8870_FADE_T_1800ms 6 59 + #define ADP8870_FADE_T_2100ms 7 60 + #define ADP8870_FADE_T_2400ms 8 61 + #define ADP8870_FADE_T_2700ms 9 62 + #define ADP8870_FADE_T_3000ms 10 63 + #define ADP8870_FADE_T_3500ms 11 64 + #define ADP8870_FADE_T_4000ms 12 65 + #define ADP8870_FADE_T_4500ms 13 66 + #define ADP8870_FADE_T_5000ms 14 67 + #define ADP8870_FADE_T_5500ms 15 /* 5.5 Sec */ 68 + 69 + #define ADP8870_FADE_LAW_LINEAR 0 70 + #define ADP8870_FADE_LAW_SQUARE 1 71 + #define ADP8870_FADE_LAW_CUBIC1 2 72 + #define ADP8870_FADE_LAW_CUBIC2 3 73 + 74 + #define ADP8870_BL_AMBL_FILT_80ms 0 /* Light sensor filter time */ 75 + #define ADP8870_BL_AMBL_FILT_160ms 1 76 + #define ADP8870_BL_AMBL_FILT_320ms 2 77 + #define ADP8870_BL_AMBL_FILT_640ms 3 78 + #define ADP8870_BL_AMBL_FILT_1280ms 4 79 + #define ADP8870_BL_AMBL_FILT_2560ms 5 80 + #define ADP8870_BL_AMBL_FILT_5120ms 6 81 + #define ADP8870_BL_AMBL_FILT_10240ms 7 /* 10.24 sec */ 82 + 83 + /* 84 + * Blacklight current 0..30mA 85 + */ 86 + #define ADP8870_BL_CUR_mA(I) ((I * 127) / 30) 87 + 88 + /* 89 + * L2 comparator current 0..1106uA 90 + */ 91 + #define ADP8870_L2_COMP_CURR_uA(I) ((I * 255) / 1106) 92 + 93 + /* 94 + * L3 comparator current 0..551uA 95 + */ 96 + #define ADP8870_L3_COMP_CURR_uA(I) ((I * 255) / 551) 97 + 98 + /* 99 + * L4 comparator current 0..275uA 100 + */ 101 + #define ADP8870_L4_COMP_CURR_uA(I) ((I * 255) / 275) 102 + 103 + /* 104 + * L5 comparator current 0..138uA 105 + */ 106 + #define ADP8870_L5_COMP_CURR_uA(I) ((I * 255) / 138) 107 + 108 + struct adp8870_backlight_platform_data { 109 + u8 bl_led_assign; /* 1 = Backlight 0 = Individual LED */ 110 + u8 pwm_assign; /* 1 = Enables PWM mode */ 111 + 112 + u8 bl_fade_in; /* Backlight Fade-In Timer */ 113 + u8 bl_fade_out; /* Backlight Fade-Out Timer */ 114 + u8 bl_fade_law; /* fade-on/fade-off transfer characteristic */ 115 + 116 + u8 en_ambl_sens; /* 1 = enable ambient light sensor */ 117 + u8 abml_filt; /* Light sensor filter time */ 118 + 119 + u8 l1_daylight_max; /* use BL_CUR_mA(I) 0 <= I <= 30 mA */ 120 + u8 l1_daylight_dim; /* typ = 0, use BL_CUR_mA(I) 0 <= I <= 30 mA */ 121 + u8 l2_bright_max; /* use BL_CUR_mA(I) 0 <= I <= 30 mA */ 122 + u8 l2_bright_dim; /* typ = 0, use BL_CUR_mA(I) 0 <= I <= 30 mA */ 123 + u8 l3_office_max; /* use BL_CUR_mA(I) 0 <= I <= 30 mA */ 124 + u8 l3_office_dim; /* typ = 0, use BL_CUR_mA(I) 0 <= I <= 30 mA */ 125 + u8 l4_indoor_max; /* use BL_CUR_mA(I) 0 <= I <= 30 mA */ 126 + u8 l4_indor_dim; /* typ = 0, use BL_CUR_mA(I) 0 <= I <= 30 mA */ 127 + u8 l5_dark_max; /* use BL_CUR_mA(I) 0 <= I <= 30 mA */ 128 + u8 l5_dark_dim; /* typ = 0, use BL_CUR_mA(I) 0 <= I <= 30 mA */ 129 + 130 + u8 l2_trip; /* use L2_COMP_CURR_uA(I) 0 <= I <= 1106 uA */ 131 + u8 l2_hyst; /* use L2_COMP_CURR_uA(I) 0 <= I <= 1106 uA */ 132 + u8 l3_trip; /* use L3_COMP_CURR_uA(I) 0 <= I <= 551 uA */ 133 + u8 l3_hyst; /* use L3_COMP_CURR_uA(I) 0 <= I <= 551 uA */ 134 + u8 l4_trip; /* use L4_COMP_CURR_uA(I) 0 <= I <= 275 uA */ 135 + u8 l4_hyst; /* use L4_COMP_CURR_uA(I) 0 <= I <= 275 uA */ 136 + u8 l5_trip; /* use L5_COMP_CURR_uA(I) 0 <= I <= 138 uA */ 137 + u8 l5_hyst; /* use L6_COMP_CURR_uA(I) 0 <= I <= 138 uA */ 138 + 139 + /** 140 + * Independent Current Sinks / LEDS 141 + * Sinks not assigned to the Backlight can be exposed to 142 + * user space using the LEDS CLASS interface 143 + */ 144 + 145 + int num_leds; 146 + struct led_info *leds; 147 + u8 led_fade_in; /* LED Fade-In Timer */ 148 + u8 led_fade_out; /* LED Fade-Out Timer */ 149 + u8 led_fade_law; /* fade-on/fade-off transfer characteristic */ 150 + u8 led_on_time; 151 + }; 152 + 153 + #endif /* __LINUX_I2C_ADP8870_H */
+2 -2
include/linux/kernel.h
··· 671 671 672 672 #ifdef __CHECKER__ 673 673 #define BUILD_BUG_ON_NOT_POWER_OF_2(n) 674 - #define BUILD_BUG_ON_ZERO(e) 675 - #define BUILD_BUG_ON_NULL(e) 674 + #define BUILD_BUG_ON_ZERO(e) (0) 675 + #define BUILD_BUG_ON_NULL(e) ((void*)0) 676 676 #define BUILD_BUG_ON(condition) 677 677 #else /* __CHECKER__ */ 678 678
+1
include/linux/kmsg_dump.h
··· 12 12 #ifndef _LINUX_KMSG_DUMP_H 13 13 #define _LINUX_KMSG_DUMP_H 14 14 15 + #include <linux/errno.h> 15 16 #include <linux/list.h> 16 17 17 18 enum kmsg_dump_reason {
+6
include/linux/memcontrol.h
··· 84 84 85 85 extern struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page); 86 86 extern struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p); 87 + extern struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm); 87 88 88 89 static inline 89 90 int mm_match_cgroup(const struct mm_struct *mm, const struct mem_cgroup *cgroup) ··· 243 242 } 244 243 245 244 static inline struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page) 245 + { 246 + return NULL; 247 + } 248 + 249 + static inline struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm) 246 250 { 247 251 return NULL; 248 252 }
+2 -6
include/linux/swap.h
··· 358 358 extern struct mm_struct *swap_token_mm; 359 359 extern void grab_swap_token(struct mm_struct *); 360 360 extern void __put_swap_token(struct mm_struct *); 361 + extern void disable_swap_token(struct mem_cgroup *memcg); 361 362 362 363 static inline int has_swap_token(struct mm_struct *mm) 363 364 { ··· 369 368 { 370 369 if (has_swap_token(mm)) 371 370 __put_swap_token(mm); 372 - } 373 - 374 - static inline void disable_swap_token(void) 375 - { 376 - put_swap_token(swap_token_mm); 377 371 } 378 372 379 373 #ifdef CONFIG_CGROUP_MEM_RES_CTLR ··· 496 500 return 0; 497 501 } 498 502 499 - static inline void disable_swap_token(void) 503 + static inline void disable_swap_token(struct mem_cgroup *memcg) 500 504 { 501 505 } 502 506
+1 -1
include/linux/topology.h
··· 60 60 * (in whatever arch specific measurement units returned by node_distance()) 61 61 * then switch on zone reclaim on boot. 62 62 */ 63 - #define RECLAIM_DISTANCE 20 63 + #define RECLAIM_DISTANCE 30 64 64 #endif 65 65 #ifndef PENALTY_FOR_NODE_WITH_CPUS 66 66 #define PENALTY_FOR_NODE_WITH_CPUS (1)
+1 -1
include/linux/uts.h
··· 9 9 #endif 10 10 11 11 #ifndef UTS_NODENAME 12 - #define UTS_NODENAME "(none)" /* set by sethostname() */ 12 + #define UTS_NODENAME CONFIG_DEFAULT_HOSTNAME /* set by sethostname() */ 13 13 #endif 14 14 15 15 #ifndef UTS_DOMAINNAME
+83
include/trace/events/vmscan.h
··· 6 6 7 7 #include <linux/types.h> 8 8 #include <linux/tracepoint.h> 9 + #include <linux/mm.h> 10 + #include <linux/memcontrol.h> 9 11 #include "gfpflags.h" 10 12 11 13 #define RECLAIM_WB_ANON 0x0001u ··· 312 310 show_reclaim_flags(__entry->reclaim_flags)) 313 311 ); 314 312 313 + TRACE_EVENT(replace_swap_token, 314 + TP_PROTO(struct mm_struct *old_mm, 315 + struct mm_struct *new_mm), 316 + 317 + TP_ARGS(old_mm, new_mm), 318 + 319 + TP_STRUCT__entry( 320 + __field(struct mm_struct*, old_mm) 321 + __field(unsigned int, old_prio) 322 + __field(struct mm_struct*, new_mm) 323 + __field(unsigned int, new_prio) 324 + ), 325 + 326 + TP_fast_assign( 327 + __entry->old_mm = old_mm; 328 + __entry->old_prio = old_mm ? old_mm->token_priority : 0; 329 + __entry->new_mm = new_mm; 330 + __entry->new_prio = new_mm->token_priority; 331 + ), 332 + 333 + TP_printk("old_token_mm=%p old_prio=%u new_token_mm=%p new_prio=%u", 334 + __entry->old_mm, __entry->old_prio, 335 + __entry->new_mm, __entry->new_prio) 336 + ); 337 + 338 + DECLARE_EVENT_CLASS(put_swap_token_template, 339 + TP_PROTO(struct mm_struct *swap_token_mm), 340 + 341 + TP_ARGS(swap_token_mm), 342 + 343 + TP_STRUCT__entry( 344 + __field(struct mm_struct*, swap_token_mm) 345 + ), 346 + 347 + TP_fast_assign( 348 + __entry->swap_token_mm = swap_token_mm; 349 + ), 350 + 351 + TP_printk("token_mm=%p", __entry->swap_token_mm) 352 + ); 353 + 354 + DEFINE_EVENT(put_swap_token_template, put_swap_token, 355 + TP_PROTO(struct mm_struct *swap_token_mm), 356 + TP_ARGS(swap_token_mm) 357 + ); 358 + 359 + DEFINE_EVENT_CONDITION(put_swap_token_template, disable_swap_token, 360 + TP_PROTO(struct mm_struct *swap_token_mm), 361 + TP_ARGS(swap_token_mm), 362 + TP_CONDITION(swap_token_mm != NULL) 363 + ); 364 + 365 + TRACE_EVENT_CONDITION(update_swap_token_priority, 366 + TP_PROTO(struct mm_struct *mm, 367 + unsigned int old_prio, 368 + struct mm_struct *swap_token_mm), 369 + 370 + TP_ARGS(mm, old_prio, swap_token_mm), 371 + 372 + TP_CONDITION(mm->token_priority != old_prio), 373 + 374 + TP_STRUCT__entry( 375 + __field(struct mm_struct*, mm) 376 + __field(unsigned int, old_prio) 377 + __field(unsigned int, new_prio) 378 + __field(struct mm_struct*, swap_token_mm) 379 + __field(unsigned int, swap_token_prio) 380 + ), 381 + 382 + TP_fast_assign( 383 + __entry->mm = mm; 384 + __entry->old_prio = old_prio; 385 + __entry->new_prio = mm->token_priority; 386 + __entry->swap_token_mm = swap_token_mm; 387 + __entry->swap_token_prio = swap_token_mm ? swap_token_mm->token_priority : 0; 388 + ), 389 + 390 + TP_printk("mm=%p old_prio=%u new_prio=%u swap_token_mm=%p token_prio=%u", 391 + __entry->mm, __entry->old_prio, __entry->new_prio, 392 + __entry->swap_token_mm, __entry->swap_token_prio) 393 + ); 315 394 316 395 #endif /* _TRACE_VMSCAN_H */ 317 396
+9 -1
init/Kconfig
··· 19 19 config CONSTRUCTORS 20 20 bool 21 21 depends on !UML 22 - default y 23 22 24 23 config HAVE_IRQ_WORK 25 24 bool ··· 202 203 (both compression and decompression) is the fastest. 203 204 204 205 endchoice 206 + 207 + config DEFAULT_HOSTNAME 208 + string "Default hostname" 209 + default "(none)" 210 + help 211 + This option determines the default system hostname before userspace 212 + calls sethostname(2). The kernel traditionally uses "(none)" here, 213 + but you may wish to use a different default here to make a minimal 214 + system more usable with less configuration. 205 215 206 216 config SWAP 207 217 bool "Support for paging of anonymous memory (swap)"
-3
init/calibrate.c
··· 93 93 * If the upper limit and lower limit of the timer_rate is 94 94 * >= 12.5% apart, redo calibration. 95 95 */ 96 - printk(KERN_DEBUG "calibrate_delay_direct() timer_rate_max=%lu " 97 - "timer_rate_min=%lu pre_start=%lu pre_end=%lu\n", 98 - timer_rate_max, timer_rate_min, pre_start, pre_end); 99 96 if (start >= post_end) 100 97 printk(KERN_NOTICE "calibrate_delay_direct() ignoring " 101 98 "timer_rate as we had a TSC wrap around"
+15 -16
kernel/exit.c
··· 561 561 562 562 #ifdef CONFIG_MM_OWNER 563 563 /* 564 - * Task p is exiting and it owned mm, lets find a new owner for it 564 + * A task is exiting. If it owned this mm, find a new owner for the mm. 565 565 */ 566 - static inline int 567 - mm_need_new_owner(struct mm_struct *mm, struct task_struct *p) 568 - { 569 - /* 570 - * If there are other users of the mm and the owner (us) is exiting 571 - * we need to find a new owner to take on the responsibility. 572 - */ 573 - if (atomic_read(&mm->mm_users) <= 1) 574 - return 0; 575 - if (mm->owner != p) 576 - return 0; 577 - return 1; 578 - } 579 - 580 566 void mm_update_next_owner(struct mm_struct *mm) 581 567 { 582 568 struct task_struct *c, *g, *p = current; 583 569 584 570 retry: 585 - if (!mm_need_new_owner(mm, p)) 571 + /* 572 + * If the exiting or execing task is not the owner, it's 573 + * someone else's problem. 574 + */ 575 + if (mm->owner != p) 586 576 return; 577 + /* 578 + * The current owner is exiting/execing and there are no other 579 + * candidates. Do not leave the mm pointing to a possibly 580 + * freed task structure. 581 + */ 582 + if (atomic_read(&mm->mm_users) <= 1) { 583 + mm->owner = NULL; 584 + return; 585 + } 587 586 588 587 read_lock(&tasklist_lock); 589 588 /*
+2 -1
kernel/gcov/Kconfig
··· 2 2 3 3 config GCOV_KERNEL 4 4 bool "Enable gcov-based kernel profiling" 5 - depends on DEBUG_FS && CONSTRUCTORS 5 + depends on DEBUG_FS 6 + select CONSTRUCTORS 6 7 default n 7 8 ---help--- 8 9 This option enables gcov-based code profiling (e.g. for code coverage
+5 -1
kernel/sched_rt.c
··· 1096 1096 * to move current somewhere else, making room for our non-migratable 1097 1097 * task. 1098 1098 */ 1099 - if (p->prio == rq->curr->prio && !need_resched()) 1099 + if (p->prio == rq->curr->prio && !test_tsk_need_resched(rq->curr)) 1100 1100 check_preempt_equal_prio(rq, p); 1101 1101 #endif 1102 1102 } ··· 1238 1238 struct cpumask *lowest_mask = __get_cpu_var(local_cpu_mask); 1239 1239 int this_cpu = smp_processor_id(); 1240 1240 int cpu = task_cpu(task); 1241 + 1242 + /* Make sure the mask is initialized first */ 1243 + if (unlikely(!lowest_mask)) 1244 + return -1; 1241 1245 1242 1246 if (task->rt.nr_cpus_allowed == 1) 1243 1247 return -1; /* No other targets possible */
+1 -1
kernel/signal.c
··· 2365 2365 /** 2366 2366 * sys_rt_sigprocmask - change the list of currently blocked signals 2367 2367 * @how: whether to add, remove, or set signals 2368 - * @set: stores pending signals 2368 + * @nset: stores pending signals 2369 2369 * @oset: previous value of signal mask if non-null 2370 2370 * @sigsetsize: size of sigset_t type 2371 2371 */
+1 -1
lib/bitmap.c
··· 572 572 573 573 /** 574 574 * __bitmap_parselist - convert list format ASCII string to bitmap 575 - * @bp: read nul-terminated user string from this buffer 575 + * @buf: read nul-terminated user string from this buffer 576 576 * @buflen: buffer size in bytes. If string is smaller than this 577 577 * then it must be terminated with a \0. 578 578 * @is_user: location of buffer, 0 indicates kernel space
+54 -22
mm/compaction.c
··· 144 144 int nr_freepages = cc->nr_freepages; 145 145 struct list_head *freelist = &cc->freepages; 146 146 147 + /* 148 + * Initialise the free scanner. The starting point is where we last 149 + * scanned from (or the end of the zone if starting). The low point 150 + * is the end of the pageblock the migration scanner is using. 151 + */ 147 152 pfn = cc->free_pfn; 148 153 low_pfn = cc->migrate_pfn + pageblock_nr_pages; 149 - high_pfn = low_pfn; 154 + 155 + /* 156 + * Take care that if the migration scanner is at the end of the zone 157 + * that the free scanner does not accidentally move to the next zone 158 + * in the next isolation cycle. 159 + */ 160 + high_pfn = min(low_pfn, pfn); 150 161 151 162 /* 152 163 * Isolate free pages until enough are available to migrate the ··· 251 240 return isolated > (inactive + active) / 2; 252 241 } 253 242 243 + /* possible outcome of isolate_migratepages */ 244 + typedef enum { 245 + ISOLATE_ABORT, /* Abort compaction now */ 246 + ISOLATE_NONE, /* No pages isolated, continue scanning */ 247 + ISOLATE_SUCCESS, /* Pages isolated, migrate */ 248 + } isolate_migrate_t; 249 + 254 250 /* 255 251 * Isolate all pages that can be migrated from the block pointed to by 256 252 * the migrate scanner within compact_control. 257 253 */ 258 - static unsigned long isolate_migratepages(struct zone *zone, 254 + static isolate_migrate_t isolate_migratepages(struct zone *zone, 259 255 struct compact_control *cc) 260 256 { 261 257 unsigned long low_pfn, end_pfn; ··· 279 261 /* Do not cross the free scanner or scan within a memory hole */ 280 262 if (end_pfn > cc->free_pfn || !pfn_valid(low_pfn)) { 281 263 cc->migrate_pfn = end_pfn; 282 - return 0; 264 + return ISOLATE_NONE; 283 265 } 284 266 285 267 /* ··· 288 270 * delay for some time until fewer pages are isolated 289 271 */ 290 272 while (unlikely(too_many_isolated(zone))) { 273 + /* async migration should just abort */ 274 + if (!cc->sync) 275 + return ISOLATE_ABORT; 276 + 291 277 congestion_wait(BLK_RW_ASYNC, HZ/10); 292 278 293 279 if (fatal_signal_pending(current)) 294 - return 0; 280 + return ISOLATE_ABORT; 295 281 } 296 282 297 283 /* Time to isolate some pages for migration */ ··· 380 358 381 359 trace_mm_compaction_isolate_migratepages(nr_scanned, nr_isolated); 382 360 383 - return cc->nr_migratepages; 361 + return ISOLATE_SUCCESS; 384 362 } 385 363 386 364 /* ··· 442 420 if (cc->free_pfn <= cc->migrate_pfn) 443 421 return COMPACT_COMPLETE; 444 422 445 - /* Compaction run is not finished if the watermark is not met */ 446 - watermark = low_wmark_pages(zone); 447 - watermark += (1 << cc->order); 448 - 449 - if (!zone_watermark_ok(zone, cc->order, watermark, 0, 0)) 450 - return COMPACT_CONTINUE; 451 - 452 423 /* 453 424 * order == -1 is expected when compacting via 454 425 * /proc/sys/vm/compact_memory 455 426 */ 456 427 if (cc->order == -1) 428 + return COMPACT_CONTINUE; 429 + 430 + /* Compaction run is not finished if the watermark is not met */ 431 + watermark = low_wmark_pages(zone); 432 + watermark += (1 << cc->order); 433 + 434 + if (!zone_watermark_ok(zone, cc->order, watermark, 0, 0)) 457 435 return COMPACT_CONTINUE; 458 436 459 437 /* Direct compactor: Is a suitable page free? */ ··· 483 461 unsigned long watermark; 484 462 485 463 /* 464 + * order == -1 is expected when compacting via 465 + * /proc/sys/vm/compact_memory 466 + */ 467 + if (order == -1) 468 + return COMPACT_CONTINUE; 469 + 470 + /* 486 471 * Watermarks for order-0 must be met for compaction. Note the 2UL. 487 472 * This is because during migration, copies of pages need to be 488 473 * allocated and for a short time, the footprint is higher ··· 499 470 return COMPACT_SKIPPED; 500 471 501 472 /* 502 - * order == -1 is expected when compacting via 503 - * /proc/sys/vm/compact_memory 504 - */ 505 - if (order == -1) 506 - return COMPACT_CONTINUE; 507 - 508 - /* 509 473 * fragmentation index determines if allocation failures are due to 510 474 * low memory or external fragmentation 511 475 * 512 - * index of -1 implies allocations might succeed dependingon watermarks 476 + * index of -1000 implies allocations might succeed depending on 477 + * watermarks 513 478 * index towards 0 implies failure is due to lack of memory 514 479 * index towards 1000 implies failure is due to fragmentation 515 480 * ··· 513 490 if (fragindex >= 0 && fragindex <= sysctl_extfrag_threshold) 514 491 return COMPACT_SKIPPED; 515 492 516 - if (fragindex == -1 && zone_watermark_ok(zone, order, watermark, 0, 0)) 493 + if (fragindex == -1000 && zone_watermark_ok(zone, order, watermark, 494 + 0, 0)) 517 495 return COMPACT_PARTIAL; 518 496 519 497 return COMPACT_CONTINUE; ··· 546 522 unsigned long nr_migrate, nr_remaining; 547 523 int err; 548 524 549 - if (!isolate_migratepages(zone, cc)) 525 + switch (isolate_migratepages(zone, cc)) { 526 + case ISOLATE_ABORT: 527 + ret = COMPACT_PARTIAL; 528 + goto out; 529 + case ISOLATE_NONE: 550 530 continue; 531 + case ISOLATE_SUCCESS: 532 + ; 533 + } 551 534 552 535 nr_migrate = cc->nr_migratepages; 553 536 err = migrate_pages(&cc->migratepages, compaction_alloc, ··· 578 547 579 548 } 580 549 550 + out: 581 551 /* Release free pages and check accounting */ 582 552 cc->nr_freepages -= release_freepages(&cc->freepages); 583 553 VM_BUG_ON(cc->nr_freepages != 0);
+1 -4
mm/huge_memory.c
··· 2234 2234 while (likely(khugepaged_enabled())) { 2235 2235 #ifndef CONFIG_NUMA 2236 2236 hpage = khugepaged_alloc_hugepage(); 2237 - if (unlikely(!hpage)) { 2238 - count_vm_event(THP_COLLAPSE_ALLOC_FAILED); 2237 + if (unlikely(!hpage)) 2239 2238 break; 2240 - } 2241 - count_vm_event(THP_COLLAPSE_ALLOC); 2242 2239 #else 2243 2240 if (IS_ERR(hpage)) { 2244 2241 khugepaged_alloc_sleep();
+8
mm/hugetlb.c
··· 1111 1111 WARN_ON(page_count(page) != 1); 1112 1112 prep_compound_huge_page(page, h->order); 1113 1113 prep_new_huge_page(h, page, page_to_nid(page)); 1114 + /* 1115 + * If we had gigantic hugepages allocated at boot time, we need 1116 + * to restore the 'stolen' pages to totalram_pages in order to 1117 + * fix confusing memory reports from free(1) and another 1118 + * side-effects, like CommitLimit going negative. 1119 + */ 1120 + if (h->order > (MAX_ORDER - 1)) 1121 + totalram_pages += 1 << h->order; 1114 1122 } 1115 1123 } 1116 1124
+6
mm/ksm.c
··· 1302 1302 slot = list_entry(slot->mm_list.next, struct mm_slot, mm_list); 1303 1303 ksm_scan.mm_slot = slot; 1304 1304 spin_unlock(&ksm_mmlist_lock); 1305 + /* 1306 + * Although we tested list_empty() above, a racing __ksm_exit 1307 + * of the last mm on the list may have removed it since then. 1308 + */ 1309 + if (slot == &ksm_mm_head) 1310 + return NULL; 1305 1311 next_mm: 1306 1312 ksm_scan.address = 0; 1307 1313 ksm_scan.rmap_list = &slot->rmap_list;
+54 -27
mm/memcontrol.c
··· 359 359 static void mem_cgroup_get(struct mem_cgroup *mem); 360 360 static void mem_cgroup_put(struct mem_cgroup *mem); 361 361 static struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *mem); 362 - static void drain_all_stock_async(void); 362 + static void drain_all_stock_async(struct mem_cgroup *mem); 363 363 364 364 static struct mem_cgroup_per_zone * 365 365 mem_cgroup_zoneinfo(struct mem_cgroup *mem, int nid, int zid) ··· 735 735 struct mem_cgroup, css); 736 736 } 737 737 738 - static struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm) 738 + struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm) 739 739 { 740 740 struct mem_cgroup *mem = NULL; 741 741 ··· 1663 1663 excess = res_counter_soft_limit_excess(&root_mem->res) >> PAGE_SHIFT; 1664 1664 1665 1665 /* If memsw_is_minimum==1, swap-out is of-no-use. */ 1666 - if (root_mem->memsw_is_minimum) 1666 + if (!check_soft && root_mem->memsw_is_minimum) 1667 1667 noswap = true; 1668 1668 1669 1669 while (1) { 1670 1670 victim = mem_cgroup_select_victim(root_mem); 1671 1671 if (victim == root_mem) { 1672 1672 loop++; 1673 - if (loop >= 1) 1674 - drain_all_stock_async(); 1673 + /* 1674 + * We are not draining per cpu cached charges during 1675 + * soft limit reclaim because global reclaim doesn't 1676 + * care about charges. It tries to free some memory and 1677 + * charges will not give any. 1678 + */ 1679 + if (!check_soft && loop >= 1) 1680 + drain_all_stock_async(root_mem); 1675 1681 if (loop >= 2) { 1676 1682 /* 1677 1683 * If we have not been able to reclaim ··· 1940 1934 struct mem_cgroup *cached; /* this never be root cgroup */ 1941 1935 unsigned int nr_pages; 1942 1936 struct work_struct work; 1937 + unsigned long flags; 1938 + #define FLUSHING_CACHED_CHARGE (0) 1943 1939 }; 1944 1940 static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock); 1945 - static atomic_t memcg_drain_count; 1941 + static DEFINE_MUTEX(percpu_charge_mutex); 1946 1942 1947 1943 /* 1948 1944 * Try to consume stocked charge on this cpu. If success, one page is consumed ··· 1992 1984 { 1993 1985 struct memcg_stock_pcp *stock = &__get_cpu_var(memcg_stock); 1994 1986 drain_stock(stock); 1987 + clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags); 1995 1988 } 1996 1989 1997 1990 /* ··· 2017 2008 * expects some charges will be back to res_counter later but cannot wait for 2018 2009 * it. 2019 2010 */ 2020 - static void drain_all_stock_async(void) 2011 + static void drain_all_stock_async(struct mem_cgroup *root_mem) 2021 2012 { 2022 - int cpu; 2023 - /* This function is for scheduling "drain" in asynchronous way. 2024 - * The result of "drain" is not directly handled by callers. Then, 2025 - * if someone is calling drain, we don't have to call drain more. 2026 - * Anyway, WORK_STRUCT_PENDING check in queue_work_on() will catch if 2027 - * there is a race. We just do loose check here. 2013 + int cpu, curcpu; 2014 + /* 2015 + * If someone calls draining, avoid adding more kworker runs. 2028 2016 */ 2029 - if (atomic_read(&memcg_drain_count)) 2017 + if (!mutex_trylock(&percpu_charge_mutex)) 2030 2018 return; 2031 2019 /* Notify other cpus that system-wide "drain" is running */ 2032 - atomic_inc(&memcg_drain_count); 2033 2020 get_online_cpus(); 2021 + /* 2022 + * Get a hint for avoiding draining charges on the current cpu, 2023 + * which must be exhausted by our charging. It is not required that 2024 + * this be a precise check, so we use raw_smp_processor_id() instead of 2025 + * getcpu()/putcpu(). 2026 + */ 2027 + curcpu = raw_smp_processor_id(); 2034 2028 for_each_online_cpu(cpu) { 2035 2029 struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu); 2036 - schedule_work_on(cpu, &stock->work); 2030 + struct mem_cgroup *mem; 2031 + 2032 + if (cpu == curcpu) 2033 + continue; 2034 + 2035 + mem = stock->cached; 2036 + if (!mem) 2037 + continue; 2038 + if (mem != root_mem) { 2039 + if (!root_mem->use_hierarchy) 2040 + continue; 2041 + /* check whether "mem" is under tree of "root_mem" */ 2042 + if (!css_is_ancestor(&mem->css, &root_mem->css)) 2043 + continue; 2044 + } 2045 + if (!test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) 2046 + schedule_work_on(cpu, &stock->work); 2037 2047 } 2038 2048 put_online_cpus(); 2039 - atomic_dec(&memcg_drain_count); 2049 + mutex_unlock(&percpu_charge_mutex); 2040 2050 /* We don't wait for flush_work */ 2041 2051 } 2042 2052 ··· 2063 2035 static void drain_all_stock_sync(void) 2064 2036 { 2065 2037 /* called when force_empty is called */ 2066 - atomic_inc(&memcg_drain_count); 2038 + mutex_lock(&percpu_charge_mutex); 2067 2039 schedule_on_each_cpu(drain_local_stock); 2068 - atomic_dec(&memcg_drain_count); 2040 + mutex_unlock(&percpu_charge_mutex); 2069 2041 } 2070 2042 2071 2043 /* ··· 4668 4640 { 4669 4641 .name = "numa_stat", 4670 4642 .open = mem_control_numa_stat_open, 4643 + .mode = S_IRUGO, 4671 4644 }, 4672 4645 #endif 4673 4646 }; ··· 5443 5414 struct cgroup *old_cont, 5444 5415 struct task_struct *p) 5445 5416 { 5446 - struct mm_struct *mm; 5417 + struct mm_struct *mm = get_task_mm(p); 5447 5418 5448 - if (!mc.to) 5449 - /* no need to move charge */ 5450 - return; 5451 - 5452 - mm = get_task_mm(p); 5453 5419 if (mm) { 5454 - mem_cgroup_move_charge(mm); 5420 + if (mc.to) 5421 + mem_cgroup_move_charge(mm); 5422 + put_swap_token(mm); 5455 5423 mmput(mm); 5456 5424 } 5457 - mem_cgroup_clear_mc(); 5425 + if (mc.to) 5426 + mem_cgroup_clear_mc(); 5458 5427 } 5459 5428 #else /* !CONFIG_MMU */ 5460 5429 static int mem_cgroup_can_attach(struct cgroup_subsys *ss,
+3 -1
mm/memory-failure.c
··· 52 52 #include <linux/swapops.h> 53 53 #include <linux/hugetlb.h> 54 54 #include <linux/memory_hotplug.h> 55 + #include <linux/mm_inline.h> 55 56 #include "internal.h" 56 57 57 58 int sysctl_memory_failure_early_kill __read_mostly = 0; ··· 1469 1468 put_page(page); 1470 1469 if (!ret) { 1471 1470 LIST_HEAD(pagelist); 1472 - 1471 + inc_zone_page_state(page, NR_ISOLATED_ANON + 1472 + page_is_file_cache(page)); 1473 1473 list_add(&page->lru, &pagelist); 1474 1474 ret = migrate_pages(&pagelist, new_page, MPOL_MF_MOVE_ALL, 1475 1475 0, true);
+5 -3
mm/memory.c
··· 1112 1112 int force_flush = 0; 1113 1113 int rss[NR_MM_COUNTERS]; 1114 1114 spinlock_t *ptl; 1115 + pte_t *start_pte; 1115 1116 pte_t *pte; 1116 1117 1117 1118 again: 1118 1119 init_rss_vec(rss); 1119 - pte = pte_offset_map_lock(mm, pmd, addr, &ptl); 1120 + start_pte = pte_offset_map_lock(mm, pmd, addr, &ptl); 1121 + pte = start_pte; 1120 1122 arch_enter_lazy_mmu_mode(); 1121 1123 do { 1122 1124 pte_t ptent = *pte; ··· 1198 1196 1199 1197 add_mm_rss_vec(mm, rss); 1200 1198 arch_leave_lazy_mmu_mode(); 1201 - pte_unmap_unlock(pte - 1, ptl); 1199 + pte_unmap_unlock(start_pte, ptl); 1202 1200 1203 1201 /* 1204 1202 * mmu_gather ran out of room to batch pages, we break out of ··· 1298 1296 1299 1297 /** 1300 1298 * unmap_vmas - unmap a range of memory covered by a list of vma's 1301 - * @tlbp: address of the caller's struct mmu_gather 1299 + * @tlb: address of the caller's struct mmu_gather 1302 1300 * @vma: the starting vma 1303 1301 * @start_addr: virtual address at which to start unmapping 1304 1302 * @end_addr: virtual address at which to end unmapping
+6
mm/memory_hotplug.c
··· 494 494 /* init node's zones as empty zones, we don't have any present pages.*/ 495 495 free_area_init_node(nid, zones_size, start_pfn, zholes_size); 496 496 497 + /* 498 + * The node we allocated has no zone fallback lists. For avoiding 499 + * to access not-initialized zonelist, build here. 500 + */ 501 + build_all_zonelists(NULL); 502 + 497 503 return pgdat; 498 504 } 499 505
+53 -18
mm/page_cgroup.c
··· 162 162 } 163 163 #endif 164 164 165 - static int __meminit init_section_page_cgroup(unsigned long pfn) 165 + static int __meminit init_section_page_cgroup(unsigned long pfn, int nid) 166 166 { 167 167 struct page_cgroup *base, *pc; 168 168 struct mem_section *section; 169 169 unsigned long table_size; 170 170 unsigned long nr; 171 - int nid, index; 171 + int index; 172 172 173 173 nr = pfn_to_section_nr(pfn); 174 174 section = __nr_to_section(nr); ··· 176 176 if (section->page_cgroup) 177 177 return 0; 178 178 179 - nid = page_to_nid(pfn_to_page(pfn)); 180 179 table_size = sizeof(struct page_cgroup) * PAGES_PER_SECTION; 181 180 base = alloc_page_cgroup(table_size, nid); 182 181 ··· 195 196 pc = base + index; 196 197 init_page_cgroup(pc, nr); 197 198 } 198 - 199 + /* 200 + * The passed "pfn" may not be aligned to SECTION. For the calculation 201 + * we need to apply a mask. 202 + */ 203 + pfn &= PAGE_SECTION_MASK; 199 204 section->page_cgroup = base - pfn; 200 205 total_usage += table_size; 201 206 return 0; ··· 228 225 start = start_pfn & ~(PAGES_PER_SECTION - 1); 229 226 end = ALIGN(start_pfn + nr_pages, PAGES_PER_SECTION); 230 227 228 + if (nid == -1) { 229 + /* 230 + * In this case, "nid" already exists and contains valid memory. 231 + * "start_pfn" passed to us is a pfn which is an arg for 232 + * online__pages(), and start_pfn should exist. 233 + */ 234 + nid = pfn_to_nid(start_pfn); 235 + VM_BUG_ON(!node_state(nid, N_ONLINE)); 236 + } 237 + 231 238 for (pfn = start; !fail && pfn < end; pfn += PAGES_PER_SECTION) { 232 239 if (!pfn_present(pfn)) 233 240 continue; 234 - fail = init_section_page_cgroup(pfn); 241 + fail = init_section_page_cgroup(pfn, nid); 235 242 } 236 243 if (!fail) 237 244 return 0; ··· 297 284 void __init page_cgroup_init(void) 298 285 { 299 286 unsigned long pfn; 300 - int fail = 0; 287 + int nid; 301 288 302 289 if (mem_cgroup_disabled()) 303 290 return; 304 291 305 - for (pfn = 0; !fail && pfn < max_pfn; pfn += PAGES_PER_SECTION) { 306 - if (!pfn_present(pfn)) 307 - continue; 308 - fail = init_section_page_cgroup(pfn); 292 + for_each_node_state(nid, N_HIGH_MEMORY) { 293 + unsigned long start_pfn, end_pfn; 294 + 295 + start_pfn = node_start_pfn(nid); 296 + end_pfn = node_end_pfn(nid); 297 + /* 298 + * start_pfn and end_pfn may not be aligned to SECTION and the 299 + * page->flags of out of node pages are not initialized. So we 300 + * scan [start_pfn, the biggest section's pfn < end_pfn) here. 301 + */ 302 + for (pfn = start_pfn; 303 + pfn < end_pfn; 304 + pfn = ALIGN(pfn + 1, PAGES_PER_SECTION)) { 305 + 306 + if (!pfn_valid(pfn)) 307 + continue; 308 + /* 309 + * Nodes's pfns can be overlapping. 310 + * We know some arch can have a nodes layout such as 311 + * -------------pfn--------------> 312 + * N0 | N1 | N2 | N0 | N1 | N2|.... 313 + */ 314 + if (pfn_to_nid(pfn) != nid) 315 + continue; 316 + if (init_section_page_cgroup(pfn, nid)) 317 + goto oom; 318 + } 309 319 } 310 - if (fail) { 311 - printk(KERN_CRIT "try 'cgroup_disable=memory' boot option\n"); 312 - panic("Out of memory"); 313 - } else { 314 - hotplug_memory_notifier(page_cgroup_callback, 0); 315 - } 320 + hotplug_memory_notifier(page_cgroup_callback, 0); 316 321 printk(KERN_INFO "allocated %ld bytes of page_cgroup\n", total_usage); 317 - printk(KERN_INFO "please try 'cgroup_disable=memory' option if you don't" 318 - " want memory cgroups\n"); 322 + printk(KERN_INFO "please try 'cgroup_disable=memory' option if you " 323 + "don't want memory cgroups\n"); 324 + return; 325 + oom: 326 + printk(KERN_CRIT "try 'cgroup_disable=memory' boot option\n"); 327 + panic("Out of memory"); 319 328 } 320 329 321 330 void __meminit pgdat_page_cgroup_init(struct pglist_data *pgdat)
+86 -19
mm/thrash.c
··· 21 21 #include <linux/mm.h> 22 22 #include <linux/sched.h> 23 23 #include <linux/swap.h> 24 + #include <linux/memcontrol.h> 25 + 26 + #include <trace/events/vmscan.h> 27 + 28 + #define TOKEN_AGING_INTERVAL (0xFF) 24 29 25 30 static DEFINE_SPINLOCK(swap_token_lock); 26 31 struct mm_struct *swap_token_mm; 32 + struct mem_cgroup *swap_token_memcg; 27 33 static unsigned int global_faults; 34 + static unsigned int last_aging; 35 + 36 + #ifdef CONFIG_CGROUP_MEM_RES_CTLR 37 + static struct mem_cgroup *swap_token_memcg_from_mm(struct mm_struct *mm) 38 + { 39 + struct mem_cgroup *memcg; 40 + 41 + memcg = try_get_mem_cgroup_from_mm(mm); 42 + if (memcg) 43 + css_put(mem_cgroup_css(memcg)); 44 + 45 + return memcg; 46 + } 47 + #else 48 + static struct mem_cgroup *swap_token_memcg_from_mm(struct mm_struct *mm) 49 + { 50 + return NULL; 51 + } 52 + #endif 28 53 29 54 void grab_swap_token(struct mm_struct *mm) 30 55 { 31 56 int current_interval; 57 + unsigned int old_prio = mm->token_priority; 32 58 33 59 global_faults++; 34 60 ··· 64 38 return; 65 39 66 40 /* First come first served */ 67 - if (swap_token_mm == NULL) { 68 - mm->token_priority = mm->token_priority + 2; 69 - swap_token_mm = mm; 70 - goto out; 41 + if (!swap_token_mm) 42 + goto replace_token; 43 + 44 + if ((global_faults - last_aging) > TOKEN_AGING_INTERVAL) { 45 + swap_token_mm->token_priority /= 2; 46 + last_aging = global_faults; 71 47 } 72 48 73 - if (mm != swap_token_mm) { 74 - if (current_interval < mm->last_interval) 75 - mm->token_priority++; 76 - else { 77 - if (likely(mm->token_priority > 0)) 78 - mm->token_priority--; 79 - } 80 - /* Check if we deserve the token */ 81 - if (mm->token_priority > swap_token_mm->token_priority) { 82 - mm->token_priority += 2; 83 - swap_token_mm = mm; 84 - } 85 - } else { 86 - /* Token holder came in again! */ 49 + if (mm == swap_token_mm) { 87 50 mm->token_priority += 2; 51 + goto update_priority; 88 52 } 53 + 54 + if (current_interval < mm->last_interval) 55 + mm->token_priority++; 56 + else { 57 + if (likely(mm->token_priority > 0)) 58 + mm->token_priority--; 59 + } 60 + 61 + /* Check if we deserve the token */ 62 + if (mm->token_priority > swap_token_mm->token_priority) 63 + goto replace_token; 64 + 65 + update_priority: 66 + trace_update_swap_token_priority(mm, old_prio, swap_token_mm); 89 67 90 68 out: 91 69 mm->faultstamp = global_faults; 92 70 mm->last_interval = current_interval; 93 71 spin_unlock(&swap_token_lock); 72 + return; 73 + 74 + replace_token: 75 + mm->token_priority += 2; 76 + trace_replace_swap_token(swap_token_mm, mm); 77 + swap_token_mm = mm; 78 + swap_token_memcg = swap_token_memcg_from_mm(mm); 79 + last_aging = global_faults; 80 + goto out; 94 81 } 95 82 96 83 /* Called on process exit. */ 97 84 void __put_swap_token(struct mm_struct *mm) 98 85 { 99 86 spin_lock(&swap_token_lock); 100 - if (likely(mm == swap_token_mm)) 87 + if (likely(mm == swap_token_mm)) { 88 + trace_put_swap_token(swap_token_mm); 101 89 swap_token_mm = NULL; 90 + swap_token_memcg = NULL; 91 + } 102 92 spin_unlock(&swap_token_lock); 93 + } 94 + 95 + static bool match_memcg(struct mem_cgroup *a, struct mem_cgroup *b) 96 + { 97 + if (!a) 98 + return true; 99 + if (!b) 100 + return true; 101 + if (a == b) 102 + return true; 103 + return false; 104 + } 105 + 106 + void disable_swap_token(struct mem_cgroup *memcg) 107 + { 108 + /* memcg reclaim don't disable unrelated mm token. */ 109 + if (match_memcg(memcg, swap_token_memcg)) { 110 + spin_lock(&swap_token_lock); 111 + if (match_memcg(memcg, swap_token_memcg)) { 112 + trace_disable_swap_token(swap_token_mm); 113 + swap_token_mm = NULL; 114 + swap_token_memcg = NULL; 115 + } 116 + spin_unlock(&swap_token_lock); 117 + } 103 118 }
+16 -4
mm/vmscan.c
··· 1124 1124 nr_lumpy_dirty++; 1125 1125 scan++; 1126 1126 } else { 1127 - /* the page is freed already. */ 1128 - if (!page_count(cursor_page)) 1127 + /* 1128 + * Check if the page is freed already. 1129 + * 1130 + * We can't use page_count() as that 1131 + * requires compound_head and we don't 1132 + * have a pin on the page here. If a 1133 + * page is tail, we may or may not 1134 + * have isolated the head, so assume 1135 + * it's not free, it'd be tricky to 1136 + * track the head status without a 1137 + * page pin. 1138 + */ 1139 + if (!PageTail(cursor_page) && 1140 + !atomic_read(&cursor_page->_count)) 1129 1141 continue; 1130 1142 break; 1131 1143 } ··· 2093 2081 for (priority = DEF_PRIORITY; priority >= 0; priority--) { 2094 2082 sc->nr_scanned = 0; 2095 2083 if (!priority) 2096 - disable_swap_token(); 2084 + disable_swap_token(sc->mem_cgroup); 2097 2085 total_scanned += shrink_zones(priority, zonelist, sc); 2098 2086 /* 2099 2087 * Don't shrink slabs when reclaiming memory from ··· 2419 2407 2420 2408 /* The swap token gets in the way of swapout... */ 2421 2409 if (!priority) 2422 - disable_swap_token(); 2410 + disable_swap_token(NULL); 2423 2411 2424 2412 all_zones_ok = 1; 2425 2413 balanced = 0;
+5
scripts/checkpatch.pl
··· 1943 1943 WARN("LINUX_VERSION_CODE should be avoided, code should be for the version to which it is merged\n" . $herecurr); 1944 1944 } 1945 1945 1946 + # check for uses of printk_ratelimit 1947 + if ($line =~ /\bprintk_ratelimit\s*\(/) { 1948 + WARN("Prefer printk_ratelimited or pr_<level>_ratelimited to printk_ratelimit\n" . $herecurr); 1949 + } 1950 + 1946 1951 # printk should use KERN_* levels. Note that follow on printk's on the 1947 1952 # same line do not need a level, so we use the current block context 1948 1953 # to try and find and validate the current printk. In summary the current
+1 -1
security/tomoyo/mount.c
··· 138 138 } 139 139 if (need_dev) { 140 140 /* Get mount point or device file. */ 141 - if (kern_path(dev_name, LOOKUP_FOLLOW, &path)) { 141 + if (!dev_name || kern_path(dev_name, LOOKUP_FOLLOW, &path)) { 142 142 error = -ENOENT; 143 143 goto out; 144 144 }