Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge commit 'v2.6.39' into for-2.6.40/core

Since for-2.6.40/core was forked off the 2.6.39 devel tree, we've
had churn in the core area that makes it difficult to handle
patches for eg cfq or blk-throttle. Instead of requiring that they
be based in older versions with bugs that have been fixed later
in the rc cycle, merge in 2.6.39 final.

Also fixes up conflicts in the below files.

Conflicts:
drivers/block/paride/pcd.c
drivers/cdrom/viocd.c
drivers/ide/ide-cd.c

Signed-off-by: Jens Axboe <jaxboe@fusionio.com>

+8483 -4484
+1
Documentation/DocBook/media-entities.tmpl
··· 294 294 <!ENTITY sub-srggb10 SYSTEM "v4l/pixfmt-srggb10.xml"> 295 295 <!ENTITY sub-srggb8 SYSTEM "v4l/pixfmt-srggb8.xml"> 296 296 <!ENTITY sub-y10 SYSTEM "v4l/pixfmt-y10.xml"> 297 + <!ENTITY sub-y12 SYSTEM "v4l/pixfmt-y12.xml"> 297 298 <!ENTITY sub-pixfmt SYSTEM "v4l/pixfmt.xml"> 298 299 <!ENTITY sub-cropcap SYSTEM "v4l/vidioc-cropcap.xml"> 299 300 <!ENTITY sub-dbg-g-register SYSTEM "v4l/vidioc-dbg-g-register.xml">
+1 -1
Documentation/DocBook/v4l/media-ioc-setup-link.xml
··· 34 34 <varlistentry> 35 35 <term><parameter>request</parameter></term> 36 36 <listitem> 37 - <para>MEDIA_IOC_ENUM_LINKS</para> 37 + <para>MEDIA_IOC_SETUP_LINK</para> 38 38 </listitem> 39 39 </varlistentry> 40 40 <varlistentry>
+79
Documentation/DocBook/v4l/pixfmt-y12.xml
··· 1 + <refentry id="V4L2-PIX-FMT-Y12"> 2 + <refmeta> 3 + <refentrytitle>V4L2_PIX_FMT_Y12 ('Y12 ')</refentrytitle> 4 + &manvol; 5 + </refmeta> 6 + <refnamediv> 7 + <refname><constant>V4L2_PIX_FMT_Y12</constant></refname> 8 + <refpurpose>Grey-scale image</refpurpose> 9 + </refnamediv> 10 + <refsect1> 11 + <title>Description</title> 12 + 13 + <para>This is a grey-scale image with a depth of 12 bits per pixel. Pixels 14 + are stored in 16-bit words with unused high bits padded with 0. The least 15 + significant byte is stored at lower memory addresses (little-endian).</para> 16 + 17 + <example> 18 + <title><constant>V4L2_PIX_FMT_Y12</constant> 4 &times; 4 19 + pixel image</title> 20 + 21 + <formalpara> 22 + <title>Byte Order.</title> 23 + <para>Each cell is one byte. 24 + <informaltable frame="none"> 25 + <tgroup cols="9" align="center"> 26 + <colspec align="left" colwidth="2*" /> 27 + <tbody valign="top"> 28 + <row> 29 + <entry>start&nbsp;+&nbsp;0:</entry> 30 + <entry>Y'<subscript>00low</subscript></entry> 31 + <entry>Y'<subscript>00high</subscript></entry> 32 + <entry>Y'<subscript>01low</subscript></entry> 33 + <entry>Y'<subscript>01high</subscript></entry> 34 + <entry>Y'<subscript>02low</subscript></entry> 35 + <entry>Y'<subscript>02high</subscript></entry> 36 + <entry>Y'<subscript>03low</subscript></entry> 37 + <entry>Y'<subscript>03high</subscript></entry> 38 + </row> 39 + <row> 40 + <entry>start&nbsp;+&nbsp;8:</entry> 41 + <entry>Y'<subscript>10low</subscript></entry> 42 + <entry>Y'<subscript>10high</subscript></entry> 43 + <entry>Y'<subscript>11low</subscript></entry> 44 + <entry>Y'<subscript>11high</subscript></entry> 45 + <entry>Y'<subscript>12low</subscript></entry> 46 + <entry>Y'<subscript>12high</subscript></entry> 47 + <entry>Y'<subscript>13low</subscript></entry> 48 + <entry>Y'<subscript>13high</subscript></entry> 49 + </row> 50 + <row> 51 + <entry>start&nbsp;+&nbsp;16:</entry> 52 + <entry>Y'<subscript>20low</subscript></entry> 53 + <entry>Y'<subscript>20high</subscript></entry> 54 + <entry>Y'<subscript>21low</subscript></entry> 55 + <entry>Y'<subscript>21high</subscript></entry> 56 + <entry>Y'<subscript>22low</subscript></entry> 57 + <entry>Y'<subscript>22high</subscript></entry> 58 + <entry>Y'<subscript>23low</subscript></entry> 59 + <entry>Y'<subscript>23high</subscript></entry> 60 + </row> 61 + <row> 62 + <entry>start&nbsp;+&nbsp;24:</entry> 63 + <entry>Y'<subscript>30low</subscript></entry> 64 + <entry>Y'<subscript>30high</subscript></entry> 65 + <entry>Y'<subscript>31low</subscript></entry> 66 + <entry>Y'<subscript>31high</subscript></entry> 67 + <entry>Y'<subscript>32low</subscript></entry> 68 + <entry>Y'<subscript>32high</subscript></entry> 69 + <entry>Y'<subscript>33low</subscript></entry> 70 + <entry>Y'<subscript>33high</subscript></entry> 71 + </row> 72 + </tbody> 73 + </tgroup> 74 + </informaltable> 75 + </para> 76 + </formalpara> 77 + </example> 78 + </refsect1> 79 + </refentry>
+1
Documentation/DocBook/v4l/pixfmt.xml
··· 696 696 &sub-packed-yuv; 697 697 &sub-grey; 698 698 &sub-y10; 699 + &sub-y12; 699 700 &sub-y16; 700 701 &sub-yuyv; 701 702 &sub-uyvy;
+59
Documentation/DocBook/v4l/subdev-formats.xml
··· 456 456 <entry>b<subscript>1</subscript></entry> 457 457 <entry>b<subscript>0</subscript></entry> 458 458 </row> 459 + <row id="V4L2-MBUS-FMT-SGBRG8-1X8"> 460 + <entry>V4L2_MBUS_FMT_SGBRG8_1X8</entry> 461 + <entry>0x3013</entry> 462 + <entry></entry> 463 + <entry>-</entry> 464 + <entry>-</entry> 465 + <entry>-</entry> 466 + <entry>-</entry> 467 + <entry>g<subscript>7</subscript></entry> 468 + <entry>g<subscript>6</subscript></entry> 469 + <entry>g<subscript>5</subscript></entry> 470 + <entry>g<subscript>4</subscript></entry> 471 + <entry>g<subscript>3</subscript></entry> 472 + <entry>g<subscript>2</subscript></entry> 473 + <entry>g<subscript>1</subscript></entry> 474 + <entry>g<subscript>0</subscript></entry> 475 + </row> 459 476 <row id="V4L2-MBUS-FMT-SGRBG8-1X8"> 460 477 <entry>V4L2_MBUS_FMT_SGRBG8_1X8</entry> 461 478 <entry>0x3002</entry> ··· 489 472 <entry>g<subscript>2</subscript></entry> 490 473 <entry>g<subscript>1</subscript></entry> 491 474 <entry>g<subscript>0</subscript></entry> 475 + </row> 476 + <row id="V4L2-MBUS-FMT-SRGGB8-1X8"> 477 + <entry>V4L2_MBUS_FMT_SRGGB8_1X8</entry> 478 + <entry>0x3014</entry> 479 + <entry></entry> 480 + <entry>-</entry> 481 + <entry>-</entry> 482 + <entry>-</entry> 483 + <entry>-</entry> 484 + <entry>r<subscript>7</subscript></entry> 485 + <entry>r<subscript>6</subscript></entry> 486 + <entry>r<subscript>5</subscript></entry> 487 + <entry>r<subscript>4</subscript></entry> 488 + <entry>r<subscript>3</subscript></entry> 489 + <entry>r<subscript>2</subscript></entry> 490 + <entry>r<subscript>1</subscript></entry> 491 + <entry>r<subscript>0</subscript></entry> 492 492 </row> 493 493 <row id="V4L2-MBUS-FMT-SBGGR10-DPCM8-1X8"> 494 494 <entry>V4L2_MBUS_FMT_SBGGR10_DPCM8_1X8</entry> ··· 2192 2158 <entry>u<subscript>2</subscript></entry> 2193 2159 <entry>u<subscript>1</subscript></entry> 2194 2160 <entry>u<subscript>0</subscript></entry> 2161 + </row> 2162 + <row id="V4L2-MBUS-FMT-Y12-1X12"> 2163 + <entry>V4L2_MBUS_FMT_Y12_1X12</entry> 2164 + <entry>0x2013</entry> 2165 + <entry></entry> 2166 + <entry>-</entry> 2167 + <entry>-</entry> 2168 + <entry>-</entry> 2169 + <entry>-</entry> 2170 + <entry>-</entry> 2171 + <entry>-</entry> 2172 + <entry>-</entry> 2173 + <entry>-</entry> 2174 + <entry>y<subscript>11</subscript></entry> 2175 + <entry>y<subscript>10</subscript></entry> 2176 + <entry>y<subscript>9</subscript></entry> 2177 + <entry>y<subscript>8</subscript></entry> 2178 + <entry>y<subscript>7</subscript></entry> 2179 + <entry>y<subscript>6</subscript></entry> 2180 + <entry>y<subscript>5</subscript></entry> 2181 + <entry>y<subscript>4</subscript></entry> 2182 + <entry>y<subscript>3</subscript></entry> 2183 + <entry>y<subscript>2</subscript></entry> 2184 + <entry>y<subscript>1</subscript></entry> 2185 + <entry>y<subscript>0</subscript></entry> 2195 2186 </row> 2196 2187 <row id="V4L2-MBUS-FMT-UYVY8-1X16"> 2197 2188 <entry>V4L2_MBUS_FMT_UYVY8_1X16</entry>
+13 -2
Documentation/cgroups/memory.txt
··· 52 52 tasks # attach a task(thread) and show list of threads 53 53 cgroup.procs # show list of processes 54 54 cgroup.event_control # an interface for event_fd() 55 - memory.usage_in_bytes # show current memory(RSS+Cache) usage. 56 - memory.memsw.usage_in_bytes # show current memory+Swap usage 55 + memory.usage_in_bytes # show current res_counter usage for memory 56 + (See 5.5 for details) 57 + memory.memsw.usage_in_bytes # show current res_counter usage for memory+Swap 58 + (See 5.5 for details) 57 59 memory.limit_in_bytes # set/show limit of memory usage 58 60 memory.memsw.limit_in_bytes # set/show limit of memory+Swap usage 59 61 memory.failcnt # show the number of memory usage hits limits ··· 454 452 455 453 You can reset failcnt by writing 0 to failcnt file. 456 454 # echo 0 > .../memory.failcnt 455 + 456 + 5.5 usage_in_bytes 457 + 458 + For efficiency, as other kernel components, memory cgroup uses some optimization 459 + to avoid unnecessary cacheline false sharing. usage_in_bytes is affected by the 460 + method and doesn't show 'exact' value of memory(and swap) usage, it's an fuzz 461 + value for efficient access. (Of course, when necessary, it's synchronized.) 462 + If you want to know more exact memory usage, you should use RSS+CACHE(+SWAP) 463 + value in memory.stat(see 5.2). 457 464 458 465 6. Hierarchy support 459 466
+2 -2
Documentation/flexible-arrays.txt
··· 66 66 entering atomic context, using: 67 67 68 68 int flex_array_prealloc(struct flex_array *array, unsigned int start, 69 - unsigned int end, gfp_t flags); 69 + unsigned int nr_elements, gfp_t flags); 70 70 71 71 This function will ensure that memory for the elements indexed in the range 72 - defined by start and end has been allocated. Thereafter, a 72 + defined by start and nr_elements has been allocated. Thereafter, a 73 73 flex_array_put() call on an element in that range is guaranteed not to 74 74 block. 75 75
+19 -17
Documentation/hwmon/adm1021
··· 14 14 Prefix: 'gl523sm' 15 15 Addresses scanned: I2C 0x18 - 0x1a, 0x29 - 0x2b, 0x4c - 0x4e 16 16 Datasheet: 17 - * Intel Xeon Processor 18 - Prefix: - any other - may require 'force_adm1021' parameter 19 - Addresses scanned: none 20 - Datasheet: Publicly available at Intel website 21 17 * Maxim MAX1617 22 18 Prefix: 'max1617' 23 19 Addresses scanned: I2C 0x18 - 0x1a, 0x29 - 0x2b, 0x4c - 0x4e ··· 87 91 ADM1021-clones do faster measurements, but there is really no good reason 88 92 for that. 89 93 90 - Xeon support 91 - ------------ 92 94 93 - Some Xeon processors have real max1617, adm1021, or compatible chips 94 - within them, with two temperature sensors. 95 + Netburst-based Xeon support 96 + --------------------------- 95 97 96 - Other Xeons have chips with only one sensor. 98 + Some Xeon processors based on the Netburst (early Pentium 4, from 2001 to 99 + 2003) microarchitecture had real MAX1617, ADM1021, or compatible chips 100 + within them, with two temperature sensors. Other Xeon processors of this 101 + era (with 400 MHz FSB) had chips with only one temperature sensor. 97 102 98 - If you have a Xeon, and the adm1021 module loads, and both temperatures 99 - appear valid, then things are good. 103 + If you have such an old Xeon, and you get two valid temperatures when 104 + loading the adm1021 module, then things are good. 100 105 101 - If the adm1021 module doesn't load, you should try this: 102 - modprobe adm1021 force_adm1021=BUS,ADDRESS 103 - ADDRESS can only be 0x18, 0x1a, 0x29, 0x2b, 0x4c, or 0x4e. 106 + If nothing happens when loading the adm1021 module, and you are certain 107 + that your specific Xeon processor model includes compatible sensors, you 108 + will have to explicitly instantiate the sensor chips from user-space. See 109 + method 4 in Documentation/i2c/instantiating-devices. Possible slave 110 + addresses are 0x18, 0x1a, 0x29, 0x2b, 0x4c, or 0x4e. It is likely that 111 + only temp2 will be correct and temp1 will have to be ignored. 104 112 105 - If you have dual Xeons you may have appear to have two separate 106 - adm1021-compatible chips, or two single-temperature sensors, at distinct 107 - addresses. 113 + Previous generations of the Xeon processor (based on Pentium II/III) 114 + didn't have these sensors. Next generations of Xeon processors (533 MHz 115 + FSB and faster) lost them, until the Core-based generation which 116 + introduced integrated digital thermal sensors. These are supported by 117 + the coretemp driver.
+20 -9
Documentation/hwmon/lm90
··· 32 32 Addresses scanned: I2C 0x4c and 0x4d 33 33 Datasheet: Publicly available at the ON Semiconductor website 34 34 http://www.onsemi.com/PowerSolutions/product.do?id=ADT7461 35 + * Analog Devices ADT7461A 36 + Prefix: 'adt7461a' 37 + Addresses scanned: I2C 0x4c and 0x4d 38 + Datasheet: Publicly available at the ON Semiconductor website 39 + http://www.onsemi.com/PowerSolutions/product.do?id=ADT7461A 40 + * ON Semiconductor NCT1008 41 + Prefix: 'nct1008' 42 + Addresses scanned: I2C 0x4c and 0x4d 43 + Datasheet: Publicly available at the ON Semiconductor website 44 + http://www.onsemi.com/PowerSolutions/product.do?id=NCT1008 35 45 * Maxim MAX6646 36 46 Prefix: 'max6646' 37 47 Addresses scanned: I2C 0x4d ··· 159 149 * ALERT is triggered by open remote sensor. 160 150 * SMBus PEC support for Write Byte and Receive Byte transactions. 161 151 162 - ADT7461: 152 + ADT7461, ADT7461A, NCT1008: 163 153 * Extended temperature range (breaks compatibility) 164 154 * Lower resolution for remote temperature 165 155 ··· 205 195 Only the local hysteresis can be set from user-space, and the same delta 206 196 applies to the remote hysteresis. 207 197 208 - The lm90 driver will not update its values more frequently than every 209 - other second; reading them more often will do no harm, but will return 210 - 'old' values. 198 + The lm90 driver will not update its values more frequently than configured with 199 + the update_interval attribute; reading them more often will do no harm, but will 200 + return 'old' values. 211 201 212 202 SMBus Alert Support 213 203 ------------------- ··· 215 205 This driver has basic support for SMBus alert. When an alert is received, 216 206 the status register is read and the faulty temperature channel is logged. 217 207 218 - The Analog Devices chips (ADM1032 and ADT7461) do not implement the SMBus 219 - alert protocol properly so additional care is needed: the ALERT output is 220 - disabled when an alert is received, and is re-enabled only when the alarm 221 - is gone. Otherwise the chip would block alerts from other chips in the bus 222 - as long as the alarm is active. 208 + The Analog Devices chips (ADM1032, ADT7461 and ADT7461A) and ON 209 + Semiconductor chips (NCT1008) do not implement the SMBus alert protocol 210 + properly so additional care is needed: the ALERT output is disabled when 211 + an alert is received, and is re-enabled only when the alarm is gone. 212 + Otherwise the chip would block alerts from other chips in the bus as long 213 + as the alarm is active. 223 214 224 215 PEC Support 225 216 -----------
+62
Documentation/hwmon/max16064
··· 1 + Kernel driver max16064 2 + ====================== 3 + 4 + Supported chips: 5 + * Maxim MAX16064 6 + Prefix: 'max16064' 7 + Addresses scanned: - 8 + Datasheet: http://datasheets.maxim-ic.com/en/ds/MAX16064.pdf 9 + 10 + Author: Guenter Roeck <guenter.roeck@ericsson.com> 11 + 12 + 13 + Description 14 + ----------- 15 + 16 + This driver supports hardware montoring for Maxim MAX16064 Quad Power-Supply 17 + Controller with Active-Voltage Output Control and PMBus Interface. 18 + 19 + The driver is a client driver to the core PMBus driver. 20 + Please see Documentation/hwmon/pmbus for details on PMBus client drivers. 21 + 22 + 23 + Usage Notes 24 + ----------- 25 + 26 + This driver does not auto-detect devices. You will have to instantiate the 27 + devices explicitly. Please see Documentation/i2c/instantiating-devices for 28 + details. 29 + 30 + 31 + Platform data support 32 + --------------------- 33 + 34 + The driver supports standard PMBus driver platform data. 35 + 36 + 37 + Sysfs entries 38 + ------------- 39 + 40 + The following attributes are supported. Limits are read-write; all other 41 + attributes are read-only. 42 + 43 + in[1-4]_label "vout[1-4]" 44 + in[1-4]_input Measured voltage. From READ_VOUT register. 45 + in[1-4]_min Minumum Voltage. From VOUT_UV_WARN_LIMIT register. 46 + in[1-4]_max Maximum voltage. From VOUT_OV_WARN_LIMIT register. 47 + in[1-4]_lcrit Critical minumum Voltage. VOUT_UV_FAULT_LIMIT register. 48 + in[1-4]_crit Critical maximum voltage. From VOUT_OV_FAULT_LIMIT register. 49 + in[1-4]_min_alarm Voltage low alarm. From VOLTAGE_UV_WARNING status. 50 + in[1-4]_max_alarm Voltage high alarm. From VOLTAGE_OV_WARNING status. 51 + in[1-4]_lcrit_alarm Voltage critical low alarm. From VOLTAGE_UV_FAULT status. 52 + in[1-4]_crit_alarm Voltage critical high alarm. From VOLTAGE_OV_FAULT status. 53 + 54 + temp1_input Measured temperature. From READ_TEMPERATURE_1 register. 55 + temp1_max Maximum temperature. From OT_WARN_LIMIT register. 56 + temp1_crit Critical high temperature. From OT_FAULT_LIMIT register. 57 + temp1_max_alarm Chip temperature high alarm. Set by comparing 58 + READ_TEMPERATURE_1 with OT_WARN_LIMIT if TEMP_OT_WARNING 59 + status is set. 60 + temp1_crit_alarm Chip temperature critical high alarm. Set by comparing 61 + READ_TEMPERATURE_1 with OT_FAULT_LIMIT if TEMP_OT_FAULT 62 + status is set.
+79
Documentation/hwmon/max34440
··· 1 + Kernel driver max34440 2 + ====================== 3 + 4 + Supported chips: 5 + * Maxim MAX34440 6 + Prefixes: 'max34440' 7 + Addresses scanned: - 8 + Datasheet: http://datasheets.maxim-ic.com/en/ds/MAX34440.pdf 9 + * Maxim MAX34441 10 + PMBus 5-Channel Power-Supply Manager and Intelligent Fan Controller 11 + Prefixes: 'max34441' 12 + Addresses scanned: - 13 + Datasheet: http://datasheets.maxim-ic.com/en/ds/MAX34441.pdf 14 + 15 + Author: Guenter Roeck <guenter.roeck@ericsson.com> 16 + 17 + 18 + Description 19 + ----------- 20 + 21 + This driver supports hardware montoring for Maxim MAX34440 PMBus 6-Channel 22 + Power-Supply Manager and MAX34441 PMBus 5-Channel Power-Supply Manager 23 + and Intelligent Fan Controller. 24 + 25 + The driver is a client driver to the core PMBus driver. Please see 26 + Documentation/hwmon/pmbus for details on PMBus client drivers. 27 + 28 + 29 + Usage Notes 30 + ----------- 31 + 32 + This driver does not auto-detect devices. You will have to instantiate the 33 + devices explicitly. Please see Documentation/i2c/instantiating-devices for 34 + details. 35 + 36 + 37 + Platform data support 38 + --------------------- 39 + 40 + The driver supports standard PMBus driver platform data. 41 + 42 + 43 + Sysfs entries 44 + ------------- 45 + 46 + The following attributes are supported. Limits are read-write; all other 47 + attributes are read-only. 48 + 49 + in[1-6]_label "vout[1-6]". 50 + in[1-6]_input Measured voltage. From READ_VOUT register. 51 + in[1-6]_min Minumum Voltage. From VOUT_UV_WARN_LIMIT register. 52 + in[1-6]_max Maximum voltage. From VOUT_OV_WARN_LIMIT register. 53 + in[1-6]_lcrit Critical minumum Voltage. VOUT_UV_FAULT_LIMIT register. 54 + in[1-6]_crit Critical maximum voltage. From VOUT_OV_FAULT_LIMIT register. 55 + in[1-6]_min_alarm Voltage low alarm. From VOLTAGE_UV_WARNING status. 56 + in[1-6]_max_alarm Voltage high alarm. From VOLTAGE_OV_WARNING status. 57 + in[1-6]_lcrit_alarm Voltage critical low alarm. From VOLTAGE_UV_FAULT status. 58 + in[1-6]_crit_alarm Voltage critical high alarm. From VOLTAGE_OV_FAULT status. 59 + 60 + curr[1-6]_label "iout[1-6]". 61 + curr[1-6]_input Measured current. From READ_IOUT register. 62 + curr[1-6]_max Maximum current. From IOUT_OC_WARN_LIMIT register. 63 + curr[1-6]_crit Critical maximum current. From IOUT_OC_FAULT_LIMIT register. 64 + curr[1-6]_max_alarm Current high alarm. From IOUT_OC_WARNING status. 65 + curr[1-6]_crit_alarm Current critical high alarm. From IOUT_OC_FAULT status. 66 + 67 + in6 and curr6 attributes only exist for MAX34440. 68 + 69 + temp[1-8]_input Measured temperatures. From READ_TEMPERATURE_1 register. 70 + temp1 is the chip's internal temperature. temp2..temp5 71 + are remote I2C temperature sensors. For MAX34441, temp6 72 + is a remote thermal-diode sensor. For MAX34440, temp6..8 73 + are remote I2C temperature sensors. 74 + temp[1-8]_max Maximum temperature. From OT_WARN_LIMIT register. 75 + temp[1-8]_crit Critical high temperature. From OT_FAULT_LIMIT register. 76 + temp[1-8]_max_alarm Temperature high alarm. 77 + temp[1-8]_crit_alarm Temperature critical high alarm. 78 + 79 + temp7 and temp8 attributes only exist for MAX34440.
+69
Documentation/hwmon/max8688
··· 1 + Kernel driver max8688 2 + ===================== 3 + 4 + Supported chips: 5 + * Maxim MAX8688 6 + Prefix: 'max8688' 7 + Addresses scanned: - 8 + Datasheet: http://datasheets.maxim-ic.com/en/ds/MAX8688.pdf 9 + 10 + Author: Guenter Roeck <guenter.roeck@ericsson.com> 11 + 12 + 13 + Description 14 + ----------- 15 + 16 + This driver supports hardware montoring for Maxim MAX8688 Digital Power-Supply 17 + Controller/Monitor with PMBus Interface. 18 + 19 + The driver is a client driver to the core PMBus driver. Please see 20 + Documentation/hwmon/pmbus for details on PMBus client drivers. 21 + 22 + 23 + Usage Notes 24 + ----------- 25 + 26 + This driver does not auto-detect devices. You will have to instantiate the 27 + devices explicitly. Please see Documentation/i2c/instantiating-devices for 28 + details. 29 + 30 + 31 + Platform data support 32 + --------------------- 33 + 34 + The driver supports standard PMBus driver platform data. 35 + 36 + 37 + Sysfs entries 38 + ------------- 39 + 40 + The following attributes are supported. Limits are read-write; all other 41 + attributes are read-only. 42 + 43 + in1_label "vout1" 44 + in1_input Measured voltage. From READ_VOUT register. 45 + in1_min Minumum Voltage. From VOUT_UV_WARN_LIMIT register. 46 + in1_max Maximum voltage. From VOUT_OV_WARN_LIMIT register. 47 + in1_lcrit Critical minumum Voltage. VOUT_UV_FAULT_LIMIT register. 48 + in1_crit Critical maximum voltage. From VOUT_OV_FAULT_LIMIT register. 49 + in1_min_alarm Voltage low alarm. From VOLTAGE_UV_WARNING status. 50 + in1_max_alarm Voltage high alarm. From VOLTAGE_OV_WARNING status. 51 + in1_lcrit_alarm Voltage critical low alarm. From VOLTAGE_UV_FAULT status. 52 + in1_crit_alarm Voltage critical high alarm. From VOLTAGE_OV_FAULT status. 53 + 54 + curr1_label "iout1" 55 + curr1_input Measured current. From READ_IOUT register. 56 + curr1_max Maximum current. From IOUT_OC_WARN_LIMIT register. 57 + curr1_crit Critical maximum current. From IOUT_OC_FAULT_LIMIT register. 58 + curr1_max_alarm Current high alarm. From IOUT_OC_WARN_LIMIT register. 59 + curr1_crit_alarm Current critical high alarm. From IOUT_OC_FAULT status. 60 + 61 + temp1_input Measured temperature. From READ_TEMPERATURE_1 register. 62 + temp1_max Maximum temperature. From OT_WARN_LIMIT register. 63 + temp1_crit Critical high temperature. From OT_FAULT_LIMIT register. 64 + temp1_max_alarm Chip temperature high alarm. Set by comparing 65 + READ_TEMPERATURE_1 with OT_WARN_LIMIT if TEMP_OT_WARNING 66 + status is set. 67 + temp1_crit_alarm Chip temperature critical high alarm. Set by comparing 68 + READ_TEMPERATURE_1 with OT_FAULT_LIMIT if TEMP_OT_FAULT 69 + status is set.
+8 -26
Documentation/hwmon/pmbus
··· 13 13 Prefix: 'ltc2978' 14 14 Addresses scanned: - 15 15 Datasheet: http://cds.linear.com/docs/Datasheet/2978fa.pdf 16 - * Maxim MAX16064 17 - Quad Power-Supply Controller 18 - Prefix: 'max16064' 19 - Addresses scanned: - 20 - Datasheet: http://datasheets.maxim-ic.com/en/ds/MAX16064.pdf 21 - * Maxim MAX34440 22 - PMBus 6-Channel Power-Supply Manager 23 - Prefixes: 'max34440' 24 - Addresses scanned: - 25 - Datasheet: http://datasheets.maxim-ic.com/en/ds/MAX34440.pdf 26 - * Maxim MAX34441 27 - PMBus 5-Channel Power-Supply Manager and Intelligent Fan Controller 28 - Prefixes: 'max34441' 29 - Addresses scanned: - 30 - Datasheet: http://datasheets.maxim-ic.com/en/ds/MAX34441.pdf 31 - * Maxim MAX8688 32 - Digital Power-Supply Controller/Monitor 33 - Prefix: 'max8688' 34 - Addresses scanned: - 35 - Datasheet: http://datasheets.maxim-ic.com/en/ds/MAX8688.pdf 36 16 * Generic PMBus devices 37 17 Prefix: 'pmbus' 38 18 Addresses scanned: - ··· 155 175 From IIN_OC_FAULT_LIMIT or IOUT_OC_FAULT_LIMIT register. 156 176 currX_alarm Current high alarm. 157 177 From IIN_OC_WARNING or IOUT_OC_WARNING status. 178 + currX_max_alarm Current high alarm. 179 + From IIN_OC_WARN_LIMIT or IOUT_OC_WARN_LIMIT status. 158 180 currX_lcrit_alarm Output current critical low alarm. 159 181 From IOUT_UC_FAULT status. 160 182 currX_crit_alarm Current critical high alarm. 161 183 From IIN_OC_FAULT or IOUT_OC_FAULT status. 162 - currX_label "iin" or "vinY" 184 + currX_label "iin" or "ioutY" 163 185 164 186 powerX_input Measured power. From READ_PIN or READ_POUT register. 165 187 powerX_cap Output power cap. From POUT_MAX register. ··· 175 193 From POUT_OP_FAULT status. 176 194 powerX_label "pin" or "poutY" 177 195 178 - tempX_input Measured tempererature. 196 + tempX_input Measured temperature. 179 197 From READ_TEMPERATURE_X register. 180 - tempX_min Mimimum tempererature. From UT_WARN_LIMIT register. 181 - tempX_max Maximum tempererature. From OT_WARN_LIMIT register. 182 - tempX_lcrit Critical low tempererature. 198 + tempX_min Mimimum temperature. From UT_WARN_LIMIT register. 199 + tempX_max Maximum temperature. From OT_WARN_LIMIT register. 200 + tempX_lcrit Critical low temperature. 183 201 From UT_FAULT_LIMIT register. 184 - tempX_crit Critical high tempererature. 202 + tempX_crit Critical high temperature. 185 203 From OT_FAULT_LIMIT register. 186 204 tempX_min_alarm Chip temperature low alarm. Set by comparing 187 205 READ_TEMPERATURE_X with UT_WARN_LIMIT if
+4 -4
Documentation/hwmon/smm665
··· 150 150 in9_crit_alarm AIN1 critical alarm 151 151 in10_crit_alarm AIN2 critical alarm 152 152 153 - temp1_input Chip tempererature 154 - temp1_min Mimimum chip tempererature 155 - temp1_max Maximum chip tempererature 156 - temp1_crit Critical chip tempererature 153 + temp1_input Chip temperature 154 + temp1_min Mimimum chip temperature 155 + temp1_max Maximum chip temperature 156 + temp1_crit Critical chip temperature 157 157 temp1_crit_alarm Temperature critical alarm
+109
Documentation/hwmon/submitting-patches
··· 1 + How to Get Your Patch Accepted Into the Hwmon Subsystem 2 + ------------------------------------------------------- 3 + 4 + This text is is a collection of suggestions for people writing patches or 5 + drivers for the hwmon subsystem. Following these suggestions will greatly 6 + increase the chances of your change being accepted. 7 + 8 + 9 + 1. General 10 + ---------- 11 + 12 + * It should be unnecessary to mention, but please read and follow 13 + Documentation/SubmitChecklist 14 + Documentation/SubmittingDrivers 15 + Documentation/SubmittingPatches 16 + Documentation/CodingStyle 17 + 18 + * If your patch generates checkpatch warnings, please refrain from explanations 19 + such as "I don't like that coding style". Keep in mind that each unnecessary 20 + warning helps hiding a real problem. If you don't like the kernel coding 21 + style, don't write kernel drivers. 22 + 23 + * Please test your patch thoroughly. We are not your test group. 24 + Sometimes a patch can not or not completely be tested because of missing 25 + hardware. In such cases, you should test-build the code on at least one 26 + architecture. If run-time testing was not achieved, it should be written 27 + explicitly below the patch header. 28 + 29 + * If your patch (or the driver) is affected by configuration options such as 30 + CONFIG_SMP or CONFIG_HOTPLUG, make sure it compiles for all configuration 31 + variants. 32 + 33 + 34 + 2. Adding functionality to existing drivers 35 + ------------------------------------------- 36 + 37 + * Make sure the documentation in Documentation/hwmon/<driver_name> is up to 38 + date. 39 + 40 + * Make sure the information in Kconfig is up to date. 41 + 42 + * If the added functionality requires some cleanup or structural changes, split 43 + your patch into a cleanup part and the actual addition. This makes it easier 44 + to review your changes, and to bisect any resulting problems. 45 + 46 + * Never mix bug fixes, cleanup, and functional enhancements in a single patch. 47 + 48 + 49 + 3. New drivers 50 + -------------- 51 + 52 + * Running your patch or driver file(s) through checkpatch does not mean its 53 + formatting is clean. If unsure about formatting in your new driver, run it 54 + through Lindent. Lindent is not perfect, and you may have to do some minor 55 + cleanup, but it is a good start. 56 + 57 + * Consider adding yourself to MAINTAINERS. 58 + 59 + * Document the driver in Documentation/hwmon/<driver_name>. 60 + 61 + * Add the driver to Kconfig and Makefile in alphabetical order. 62 + 63 + * Make sure that all dependencies are listed in Kconfig. For new drivers, it 64 + is most likely prudent to add a dependency on EXPERIMENTAL. 65 + 66 + * Avoid forward declarations if you can. Rearrange the code if necessary. 67 + 68 + * Avoid calculations in macros and macro-generated functions. While such macros 69 + may save a line or so in the source, it obfuscates the code and makes code 70 + review more difficult. It may also result in code which is more complicated 71 + than necessary. Use inline functions or just regular functions instead. 72 + 73 + * If the driver has a detect function, make sure it is silent. Debug messages 74 + and messages printed after a successful detection are acceptable, but it 75 + must not print messages such as "Chip XXX not found/supported". 76 + 77 + Keep in mind that the detect function will run for all drivers supporting an 78 + address if a chip is detected on that address. Unnecessary messages will just 79 + pollute the kernel log and not provide any value. 80 + 81 + * Provide a detect function if and only if a chip can be detected reliably. 82 + 83 + * Avoid writing to chip registers in the detect function. If you have to write, 84 + only do it after you have already gathered enough data to be certain that the 85 + detection is going to be successful. 86 + 87 + Keep in mind that the chip might not be what your driver believes it is, and 88 + writing to it might cause a bad misconfiguration. 89 + 90 + * Make sure there are no race conditions in the probe function. Specifically, 91 + completely initialize your chip first, then create sysfs entries and register 92 + with the hwmon subsystem. 93 + 94 + * Do not provide support for deprecated sysfs attributes. 95 + 96 + * Do not create non-standard attributes unless really needed. If you have to use 97 + non-standard attributes, or you believe you do, discuss it on the mailing list 98 + first. Either case, provide a detailed explanation why you need the 99 + non-standard attribute(s). 100 + Standard attributes are specified in Documentation/hwmon/sysfs-interface. 101 + 102 + * When deciding which sysfs attributes to support, look at the chip's 103 + capabilities. While we do not expect your driver to support everything the 104 + chip may offer, it should at least support all limits and alarms. 105 + 106 + * Last but not least, please check if a driver for your chip already exists 107 + before starting to write a new driver. Especially for temperature sensors, 108 + new chips are often variants of previously released chips. In some cases, 109 + a presumably new chip may simply have been relabeled.
+10
Documentation/md.txt
··· 552 552 within the array where IO will be blocked. This is currently 553 553 only supported for raid4/5/6. 554 554 555 + sync_min 556 + sync_max 557 + The two values, given as numbers of sectors, indicate a range 558 + withing the array where 'check'/'repair' will operate. Must be 559 + a multiple of chunk_size. When it reaches "sync_max" it will 560 + pause, rather than complete. 561 + You can use 'select' or 'poll' on "sync_completed" to wait for 562 + that number to reach sync_max. Then you can either increase 563 + "sync_max", or can write 'idle' to "sync_action". 564 + 555 565 556 566 Each active md device may also have attributes specific to the 557 567 personality module that manages it.
+3 -3
Documentation/sound/alsa/SB-Live-mixer.txt
··· 87 87 The result is forwarded to the ADC capture FIFO (thus to the standard capture 88 88 PCM device). 89 89 90 - name='Music Playback Volume',index=0 90 + name='Synth Playback Volume',index=0 91 91 92 92 This control is used to attenuate samples for left and right MIDI FX-bus 93 93 accumulators. ALSA uses accumulators 4 and 5 for left and right MIDI samples. 94 94 The result samples are forwarded to the front DAC PCM slots of the AC97 codec. 95 95 96 - name='Music Capture Volume',index=0 97 - name='Music Capture Switch',index=0 96 + name='Synth Capture Volume',index=0 97 + name='Synth Capture Switch',index=0 98 98 99 99 These controls are used to attenuate samples for left and right MIDI FX-bus 100 100 accumulator. ALSA uses accumulators 4 and 5 for left and right PCM.
+3 -3
Documentation/video4linux/sh_mobile_ceu_camera.txt
··· 37 37 -1'- 38 38 39 39 In the above chart minuses and slashes represent "real" data amounts, points and 40 - accents represent "useful" data, basically, CEU scaled amd cropped output, 40 + accents represent "useful" data, basically, CEU scaled and cropped output, 41 41 mapped back onto the client's source plane. 42 42 43 43 Such a configuration can be produced by user requests: ··· 65 65 66 66 1. Calculate current sensor scales: 67 67 68 - scale_s = ((3') - (3)) / ((2') - (2)) 68 + scale_s = ((2') - (2)) / ((3') - (3)) 69 69 70 70 2. Calculate "effective" input crop (sensor subwindow) - CEU crop scaled back at 71 71 current sensor scales onto input window - this is user S_CROP: ··· 80 80 4. Calculate sensor output window by applying combined scales to real input 81 81 window: 82 82 83 - width_s_out = ((2') - (2)) / scale_comb 83 + width_s_out = ((7') - (7)) = ((2') - (2)) / scale_comb 84 84 85 85 5. Apply iterative sensor S_FMT for sensor output window. 86 86
+40
Documentation/workqueue.txt
··· 12 12 4. Application Programming Interface (API) 13 13 5. Example Execution Scenarios 14 14 6. Guidelines 15 + 7. Debugging 15 16 16 17 17 18 1. Introduction ··· 380 379 * Unless work items are expected to consume a huge amount of CPU 381 380 cycles, using a bound wq is usually beneficial due to the increased 382 381 level of locality in wq operations and work item execution. 382 + 383 + 384 + 7. Debugging 385 + 386 + Because the work functions are executed by generic worker threads 387 + there are a few tricks needed to shed some light on misbehaving 388 + workqueue users. 389 + 390 + Worker threads show up in the process list as: 391 + 392 + root 5671 0.0 0.0 0 0 ? S 12:07 0:00 [kworker/0:1] 393 + root 5672 0.0 0.0 0 0 ? S 12:07 0:00 [kworker/1:2] 394 + root 5673 0.0 0.0 0 0 ? S 12:12 0:00 [kworker/0:0] 395 + root 5674 0.0 0.0 0 0 ? S 12:13 0:00 [kworker/1:0] 396 + 397 + If kworkers are going crazy (using too much cpu), there are two types 398 + of possible problems: 399 + 400 + 1. Something beeing scheduled in rapid succession 401 + 2. A single work item that consumes lots of cpu cycles 402 + 403 + The first one can be tracked using tracing: 404 + 405 + $ echo workqueue:workqueue_queue_work > /sys/kernel/debug/tracing/set_event 406 + $ cat /sys/kernel/debug/tracing/trace_pipe > out.txt 407 + (wait a few secs) 408 + ^C 409 + 410 + If something is busy looping on work queueing, it would be dominating 411 + the output and the offender can be determined with the work item 412 + function. 413 + 414 + For the second type of problems it should be possible to just check 415 + the stack trace of the offending worker thread. 416 + 417 + $ cat /proc/THE_OFFENDING_KWORKER/stack 418 + 419 + The work item's function should be trivially visible in the stack 420 + trace.
+58 -56
MAINTAINERS
··· 151 151 F: drivers/net/hamradio/6pack.c 152 152 153 153 8169 10/100/1000 GIGABIT ETHERNET DRIVER 154 + M: Realtek linux nic maintainers <nic_swsd@realtek.com> 154 155 M: Francois Romieu <romieu@fr.zoreil.com> 155 156 L: netdev@vger.kernel.org 156 157 S: Maintained ··· 1032 1031 S: Maintained 1033 1032 F: arch/arm/mach-s3c64xx/ 1034 1033 1035 - ARM/S5P ARM ARCHITECTURES 1034 + ARM/S5P EXYNOS ARM ARCHITECTURES 1036 1035 M: Kukjin Kim <kgene.kim@samsung.com> 1037 1036 L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 1038 1037 L: linux-samsung-soc@vger.kernel.org (moderated for non-subscribers) 1039 1038 S: Maintained 1040 1039 F: arch/arm/mach-s5p*/ 1040 + F: arch/arm/mach-exynos*/ 1041 1041 1042 1042 ARM/SAMSUNG MOBILE MACHINE SUPPORT 1043 1043 M: Kyungmin Park <kyungmin.park@samsung.com> ··· 2809 2807 M: Grant Likely <grant.likely@secretlab.ca> 2810 2808 S: Maintained 2811 2809 T: git git://git.secretlab.ca/git/linux-2.6.git 2812 - F: Documentation/gpio/gpio.txt 2810 + F: Documentation/gpio.txt 2813 2811 F: drivers/gpio/ 2814 2812 F: include/linux/gpio* 2813 + 2814 + GRE DEMULTIPLEXER DRIVER 2815 + M: Dmitry Kozlov <xeb@mail.ru> 2816 + L: netdev@vger.kernel.org 2817 + S: Maintained 2818 + F: net/ipv4/gre.c 2819 + F: include/net/gre.h 2815 2820 2816 2821 GRETH 10/100/1G Ethernet MAC device driver 2817 2822 M: Kristoffer Glembo <kristoffer@gaisler.com> 2818 2823 L: netdev@vger.kernel.org 2819 2824 S: Maintained 2820 2825 F: drivers/net/greth* 2821 - 2822 - HARD DRIVE ACTIVE PROTECTION SYSTEM (HDAPS) DRIVER 2823 - M: Frank Seidel <frank@f-seidel.de> 2824 - L: platform-driver-x86@vger.kernel.org 2825 - W: http://www.kernel.org/pub/linux/kernel/people/fseidel/hdaps/ 2826 - S: Maintained 2827 - F: drivers/platform/x86/hdaps.c 2828 - 2829 - HWPOISON MEMORY FAILURE HANDLING 2830 - M: Andi Kleen <andi@firstfloor.org> 2831 - L: linux-mm@kvack.org 2832 - T: git git://git.kernel.org/pub/scm/linux/kernel/git/ak/linux-mce-2.6.git hwpoison 2833 - S: Maintained 2834 - F: mm/memory-failure.c 2835 - F: mm/hwpoison-inject.c 2836 - 2837 - HYPERVISOR VIRTUAL CONSOLE DRIVER 2838 - L: linuxppc-dev@lists.ozlabs.org 2839 - S: Odd Fixes 2840 - F: drivers/tty/hvc/ 2841 - 2842 - iSCSI BOOT FIRMWARE TABLE (iBFT) DRIVER 2843 - M: Peter Jones <pjones@redhat.com> 2844 - M: Konrad Rzeszutek Wilk <konrad@kernel.org> 2845 - S: Maintained 2846 - F: drivers/firmware/iscsi_ibft* 2847 2826 2848 2827 GSPCA FINEPIX SUBDRIVER 2849 2828 M: Frank Zago <frank@zago.net> ··· 2875 2892 T: git git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-2.6.git 2876 2893 S: Maintained 2877 2894 F: drivers/media/video/gspca/ 2895 + 2896 + HARD DRIVE ACTIVE PROTECTION SYSTEM (HDAPS) DRIVER 2897 + M: Frank Seidel <frank@f-seidel.de> 2898 + L: platform-driver-x86@vger.kernel.org 2899 + W: http://www.kernel.org/pub/linux/kernel/people/fseidel/hdaps/ 2900 + S: Maintained 2901 + F: drivers/platform/x86/hdaps.c 2902 + 2903 + HWPOISON MEMORY FAILURE HANDLING 2904 + M: Andi Kleen <andi@firstfloor.org> 2905 + L: linux-mm@kvack.org 2906 + T: git git://git.kernel.org/pub/scm/linux/kernel/git/ak/linux-mce-2.6.git hwpoison 2907 + S: Maintained 2908 + F: mm/memory-failure.c 2909 + F: mm/hwpoison-inject.c 2910 + 2911 + HYPERVISOR VIRTUAL CONSOLE DRIVER 2912 + L: linuxppc-dev@lists.ozlabs.org 2913 + S: Odd Fixes 2914 + F: drivers/tty/hvc/ 2878 2915 2879 2916 HARDWARE MONITORING 2880 2917 M: Jean Delvare <khali@linux-fr.org> ··· 3478 3475 F: Documentation/isapnp.txt 3479 3476 F: drivers/pnp/isapnp/ 3480 3477 F: include/linux/isapnp.h 3478 + 3479 + iSCSI BOOT FIRMWARE TABLE (iBFT) DRIVER 3480 + M: Peter Jones <pjones@redhat.com> 3481 + M: Konrad Rzeszutek Wilk <konrad@kernel.org> 3482 + S: Maintained 3483 + F: drivers/firmware/iscsi_ibft* 3481 3484 3482 3485 ISCSI 3483 3486 M: Mike Christie <michaelc@cs.wisc.edu> ··· 4996 4987 F: drivers/pps/ 4997 4988 F: include/linux/pps*.h 4998 4989 4990 + PPTP DRIVER 4991 + M: Dmitry Kozlov <xeb@mail.ru> 4992 + L: netdev@vger.kernel.org 4993 + S: Maintained 4994 + F: drivers/net/pptp.c 4995 + W: http://sourceforge.net/projects/accel-pptp 4996 + 4999 4997 PREEMPTIBLE KERNEL 5000 4998 M: Robert Love <rml@tech9.net> 5001 4999 L: kpreempt-tech@lists.sourceforge.net ··· 5411 5395 F: include/media/*7146* 5412 5396 5413 5397 SAMSUNG AUDIO (ASoC) DRIVERS 5414 - M: Jassi Brar <jassi.brar@samsung.com> 5398 + M: Jassi Brar <jassisinghbrar@gmail.com> 5415 5399 L: alsa-devel@alsa-project.org (moderated for non-subscribers) 5416 5400 S: Supported 5417 5401 F: sound/soc/samsung ··· 6570 6554 F: drivers/usb/host/uhci* 6571 6555 6572 6556 USB "USBNET" DRIVER FRAMEWORK 6573 - M: David Brownell <dbrownell@users.sourceforge.net> 6557 + M: Oliver Neukum <oneukum@suse.de> 6574 6558 L: netdev@vger.kernel.org 6575 6559 W: http://www.linux-usb.org/usbnet 6576 6560 S: Maintained ··· 6936 6920 S: Maintained 6937 6921 F: drivers/platform/x86 6938 6922 6923 + XEN HYPERVISOR INTERFACE 6924 + M: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com> 6925 + M: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> 6926 + L: xen-devel@lists.xensource.com (moderated for non-subscribers) 6927 + L: virtualization@lists.linux-foundation.org 6928 + S: Supported 6929 + F: arch/x86/xen/ 6930 + F: drivers/*/xen-*front.c 6931 + F: drivers/xen/ 6932 + F: arch/x86/include/asm/xen/ 6933 + F: include/xen/ 6934 + 6939 6935 XEN NETWORK BACKEND DRIVER 6940 6936 M: Ian Campbell <ian.campbell@citrix.com> 6941 6937 L: xen-devel@lists.xensource.com (moderated for non-subscribers) ··· 6968 6940 S: Supported 6969 6941 F: arch/x86/xen/*swiotlb* 6970 6942 F: drivers/xen/*swiotlb* 6971 - 6972 - XEN HYPERVISOR INTERFACE 6973 - M: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com> 6974 - M: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> 6975 - L: xen-devel@lists.xensource.com (moderated for non-subscribers) 6976 - L: virtualization@lists.linux-foundation.org 6977 - S: Supported 6978 - F: arch/x86/xen/ 6979 - F: drivers/*/xen-*front.c 6980 - F: drivers/xen/ 6981 - F: arch/x86/include/asm/xen/ 6982 - F: include/xen/ 6983 6943 6984 6944 XFS FILESYSTEM 6985 6945 P: Silicon Graphics Inc ··· 7037 7021 M: "Maciej W. Rozycki" <macro@linux-mips.org> 7038 7022 S: Maintained 7039 7023 F: drivers/tty/serial/zs.* 7040 - 7041 - GRE DEMULTIPLEXER DRIVER 7042 - M: Dmitry Kozlov <xeb@mail.ru> 7043 - L: netdev@vger.kernel.org 7044 - S: Maintained 7045 - F: net/ipv4/gre.c 7046 - F: include/net/gre.h 7047 - 7048 - PPTP DRIVER 7049 - M: Dmitry Kozlov <xeb@mail.ru> 7050 - L: netdev@vger.kernel.org 7051 - S: Maintained 7052 - F: drivers/net/pptp.c 7053 - W: http://sourceforge.net/projects/accel-pptp 7054 7024 7055 7025 THE REST 7056 7026 M: Linus Torvalds <torvalds@linux-foundation.org>
+1 -1
Makefile
··· 1 1 VERSION = 2 2 2 PATCHLEVEL = 6 3 3 SUBLEVEL = 39 4 - EXTRAVERSION = -rc4 4 + EXTRAVERSION = 5 5 NAME = Flesh-Eating Bats with Fangs 6 6 7 7 # *DOCUMENTATION*
+5 -1
arch/alpha/include/asm/unistd.h
··· 452 452 #define __NR_fanotify_init 494 453 453 #define __NR_fanotify_mark 495 454 454 #define __NR_prlimit64 496 455 + #define __NR_name_to_handle_at 497 456 + #define __NR_open_by_handle_at 498 457 + #define __NR_clock_adjtime 499 458 + #define __NR_syncfs 500 455 459 456 460 #ifdef __KERNEL__ 457 461 458 - #define NR_SYSCALLS 497 462 + #define NR_SYSCALLS 501 459 463 460 464 #define __ARCH_WANT_IPC_PARSE_VERSION 461 465 #define __ARCH_WANT_OLD_READDIR
+8 -4
arch/alpha/kernel/systbls.S
··· 498 498 .quad sys_ni_syscall /* sys_timerfd */ 499 499 .quad sys_eventfd 500 500 .quad sys_recvmmsg 501 - .quad sys_fallocate /* 480 */ 501 + .quad sys_fallocate /* 480 */ 502 502 .quad sys_timerfd_create 503 503 .quad sys_timerfd_settime 504 504 .quad sys_timerfd_gettime 505 505 .quad sys_signalfd4 506 - .quad sys_eventfd2 /* 485 */ 506 + .quad sys_eventfd2 /* 485 */ 507 507 .quad sys_epoll_create1 508 508 .quad sys_dup3 509 509 .quad sys_pipe2 510 510 .quad sys_inotify_init1 511 - .quad sys_preadv /* 490 */ 511 + .quad sys_preadv /* 490 */ 512 512 .quad sys_pwritev 513 513 .quad sys_rt_tgsigqueueinfo 514 514 .quad sys_perf_event_open 515 515 .quad sys_fanotify_init 516 - .quad sys_fanotify_mark /* 495 */ 516 + .quad sys_fanotify_mark /* 495 */ 517 517 .quad sys_prlimit64 518 + .quad sys_name_to_handle_at 519 + .quad sys_open_by_handle_at 520 + .quad sys_clock_adjtime 521 + .quad sys_syncfs /* 500 */ 518 522 519 523 .size sys_call_table, . - sys_call_table 520 524 .type sys_call_table, @object
+1 -2
arch/alpha/kernel/time.c
··· 375 375 376 376 static inline void register_rpcc_clocksource(long cycle_freq) 377 377 { 378 - clocksource_calc_mult_shift(&clocksource_rpcc, cycle_freq, 4); 379 - clocksource_register(&clocksource_rpcc); 378 + clocksource_register_hz(&clocksource_rpcc, cycle_freq); 380 379 } 381 380 #else /* !CONFIG_SMP */ 382 381 static inline void register_rpcc_clocksource(long cycle_freq)
+1 -1
arch/arm/boot/compressed/Makefile
··· 74 74 ZBSSADDR := $(CONFIG_ZBOOT_ROM_BSS) 75 75 else 76 76 ZTEXTADDR := 0 77 - ZBSSADDR := ALIGN(4) 77 + ZBSSADDR := ALIGN(8) 78 78 endif 79 79 80 80 SEDFLAGS = s/TEXT_START/$(ZTEXTADDR)/;s/BSS_START/$(ZBSSADDR)/
+24 -11
arch/arm/boot/compressed/head.S
··· 179 179 bl cache_on 180 180 181 181 restart: adr r0, LC0 182 - ldmia r0, {r1, r2, r3, r5, r6, r9, r11, r12} 183 - ldr sp, [r0, #32] 182 + ldmia r0, {r1, r2, r3, r6, r9, r11, r12} 183 + ldr sp, [r0, #28] 184 184 185 185 /* 186 186 * We might be running at a different address. We need 187 187 * to fix up various pointers. 188 188 */ 189 189 sub r0, r0, r1 @ calculate the delta offset 190 - add r5, r5, r0 @ _start 191 190 add r6, r6, r0 @ _edata 192 191 193 192 #ifndef CONFIG_ZBOOT_ROM ··· 205 206 /* 206 207 * Check to see if we will overwrite ourselves. 207 208 * r4 = final kernel address 208 - * r5 = start of this image 209 209 * r9 = size of decompressed image 210 210 * r10 = end of this image, including bss/stack/malloc space if non XIP 211 211 * We basically want: 212 - * r4 >= r10 -> OK 213 - * r4 + image length <= r5 -> OK 212 + * r4 - 16k page directory >= r10 -> OK 213 + * r4 + image length <= current position (pc) -> OK 214 214 */ 215 + add r10, r10, #16384 215 216 cmp r4, r10 216 217 bhs wont_overwrite 217 218 add r10, r4, r9 218 - cmp r10, r5 219 + ARM( cmp r10, pc ) 220 + THUMB( mov lr, pc ) 221 + THUMB( cmp r10, lr ) 219 222 bls wont_overwrite 220 223 221 224 /* 222 225 * Relocate ourselves past the end of the decompressed kernel. 223 - * r5 = start of this image 224 226 * r6 = _edata 225 227 * r10 = end of the decompressed kernel 226 228 * Because we always copy ahead, we need to do it from the end and go 227 229 * backward in case the source and destination overlap. 228 230 */ 229 - /* Round up to next 256-byte boundary. */ 230 - add r10, r10, #256 231 + /* 232 + * Bump to the next 256-byte boundary with the size of 233 + * the relocation code added. This avoids overwriting 234 + * ourself when the offset is small. 235 + */ 236 + add r10, r10, #((reloc_code_end - restart + 256) & ~255) 231 237 bic r10, r10, #255 238 + 239 + /* Get start of code we want to copy and align it down. */ 240 + adr r5, restart 241 + bic r5, r5, #31 232 242 233 243 sub r9, r6, r5 @ size to copy 234 244 add r9, r9, #31 @ rounded up to a multiple ··· 252 244 253 245 /* Preserve offset to relocated code. */ 254 246 sub r6, r9, r6 247 + 248 + #ifndef CONFIG_ZBOOT_ROM 249 + /* cache_clean_flush may use the stack, so relocate it */ 250 + add sp, sp, r6 251 + #endif 255 252 256 253 bl cache_clean_flush 257 254 ··· 346 333 LC0: .word LC0 @ r1 347 334 .word __bss_start @ r2 348 335 .word _end @ r3 349 - .word _start @ r5 350 336 .word _edata @ r6 351 337 .word _image_size @ r9 352 338 .word _got_start @ r11 ··· 1074 1062 #endif 1075 1063 1076 1064 .ltorg 1065 + reloc_code_end: 1077 1066 1078 1067 .align 1079 1068 .section ".stack", "aw", %nobits
+1
arch/arm/boot/compressed/vmlinux.lds.in
··· 54 54 .bss : { *(.bss) } 55 55 _end = .; 56 56 57 + . = ALIGN(8); /* the stack must be 64-bit aligned */ 57 58 .stack : { *(.stack) } 58 59 59 60 .stab 0 : { *(.stab) }
+48
arch/arm/configs/at91x40_defconfig
··· 1 + CONFIG_EXPERIMENTAL=y 2 + CONFIG_LOG_BUF_SHIFT=14 3 + CONFIG_EMBEDDED=y 4 + # CONFIG_HOTPLUG is not set 5 + # CONFIG_ELF_CORE is not set 6 + # CONFIG_FUTEX is not set 7 + # CONFIG_TIMERFD is not set 8 + # CONFIG_VM_EVENT_COUNTERS is not set 9 + # CONFIG_COMPAT_BRK is not set 10 + CONFIG_SLAB=y 11 + # CONFIG_LBDAF is not set 12 + # CONFIG_BLK_DEV_BSG is not set 13 + # CONFIG_IOSCHED_DEADLINE is not set 14 + # CONFIG_IOSCHED_CFQ is not set 15 + # CONFIG_MMU is not set 16 + CONFIG_ARCH_AT91=y 17 + CONFIG_ARCH_AT91X40=y 18 + CONFIG_MACH_AT91EB01=y 19 + CONFIG_AT91_EARLY_USART0=y 20 + CONFIG_CPU_ARM7TDMI=y 21 + CONFIG_SET_MEM_PARAM=y 22 + CONFIG_DRAM_BASE=0x01000000 23 + CONFIG_DRAM_SIZE=0x00400000 24 + CONFIG_FLASH_MEM_BASE=0x01400000 25 + CONFIG_PROCESSOR_ID=0x14000040 26 + CONFIG_ZBOOT_ROM_TEXT=0x0 27 + CONFIG_ZBOOT_ROM_BSS=0x0 28 + CONFIG_BINFMT_FLAT=y 29 + # CONFIG_SUSPEND is not set 30 + # CONFIG_FW_LOADER is not set 31 + CONFIG_MTD=y 32 + CONFIG_MTD_PARTITIONS=y 33 + CONFIG_MTD_CHAR=y 34 + CONFIG_MTD_BLOCK=y 35 + CONFIG_MTD_RAM=y 36 + CONFIG_MTD_ROM=y 37 + CONFIG_BLK_DEV_RAM=y 38 + # CONFIG_INPUT is not set 39 + # CONFIG_SERIO is not set 40 + # CONFIG_VT is not set 41 + # CONFIG_DEVKMEM is not set 42 + # CONFIG_HW_RANDOM is not set 43 + # CONFIG_HWMON is not set 44 + # CONFIG_USB_SUPPORT is not set 45 + CONFIG_EXT2_FS=y 46 + # CONFIG_DNOTIFY is not set 47 + CONFIG_ROMFS_FS=y 48 + # CONFIG_ENABLE_MUST_CHECK is not set
+1
arch/arm/include/asm/cputype.h
··· 2 2 #define __ASM_ARM_CPUTYPE_H 3 3 4 4 #include <linux/stringify.h> 5 + #include <linux/kernel.h> 5 6 6 7 #define CPUID_ID 0 7 8 #define CPUID_CACHETYPE 1
+3
arch/arm/include/asm/kprobes.h
··· 39 39 struct kprobe; 40 40 typedef void (kprobe_insn_handler_t)(struct kprobe *, struct pt_regs *); 41 41 42 + typedef unsigned long (kprobe_check_cc)(unsigned long); 43 + 42 44 /* Architecture specific copy of original instruction. */ 43 45 struct arch_specific_insn { 44 46 kprobe_opcode_t *insn; 45 47 kprobe_insn_handler_t *insn_handler; 48 + kprobe_check_cc *insn_check_cc; 46 49 }; 47 50 48 51 struct prev_kprobe {
+1 -1
arch/arm/include/asm/system.h
··· 159 159 #include <mach/barriers.h> 160 160 #elif defined(CONFIG_ARM_DMA_MEM_BUFFERABLE) || defined(CONFIG_SMP) 161 161 #define mb() do { dsb(); outer_sync(); } while (0) 162 - #define rmb() dmb() 162 + #define rmb() dsb() 163 163 #define wmb() mb() 164 164 #else 165 165 #include <asm/memory.h>
+4
arch/arm/include/asm/unistd.h
··· 396 396 #define __NR_fanotify_init (__NR_SYSCALL_BASE+367) 397 397 #define __NR_fanotify_mark (__NR_SYSCALL_BASE+368) 398 398 #define __NR_prlimit64 (__NR_SYSCALL_BASE+369) 399 + #define __NR_name_to_handle_at (__NR_SYSCALL_BASE+370) 400 + #define __NR_open_by_handle_at (__NR_SYSCALL_BASE+371) 401 + #define __NR_clock_adjtime (__NR_SYSCALL_BASE+372) 402 + #define __NR_syncfs (__NR_SYSCALL_BASE+373) 399 403 400 404 /* 401 405 * The following SWIs are ARM private.
+4
arch/arm/kernel/calls.S
··· 379 379 CALL(sys_fanotify_init) 380 380 CALL(sys_fanotify_mark) 381 381 CALL(sys_prlimit64) 382 + /* 370 */ CALL(sys_name_to_handle_at) 383 + CALL(sys_open_by_handle_at) 384 + CALL(sys_clock_adjtime) 385 + CALL(sys_syncfs) 382 386 #ifndef syscalls_counted 383 387 .equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls 384 388 #define syscalls_counted
+465 -328
arch/arm/kernel/kprobes-decode.c
··· 34 34 * 35 35 * *) If the PC is written to by the instruction, the 36 36 * instruction must be fully simulated in software. 37 - * If it is a conditional instruction, the handler 38 - * will use insn[0] to copy its condition code to 39 - * set r0 to 1 and insn[1] to "mov pc, lr" to return. 40 37 * 41 38 * *) Otherwise, a modified form of the instruction is 42 39 * directly executed. Its handler calls the ··· 65 68 66 69 #define branch_displacement(insn) sign_extend(((insn) & 0xffffff) << 2, 25) 67 70 71 + #define is_r15(insn, bitpos) (((insn) & (0xf << bitpos)) == (0xf << bitpos)) 72 + 73 + /* 74 + * Test if load/store instructions writeback the address register. 75 + * if P (bit 24) == 0 or W (bit 21) == 1 76 + */ 77 + #define is_writeback(insn) ((insn ^ 0x01000000) & 0x01200000) 78 + 68 79 #define PSR_fs (PSR_f|PSR_s) 69 80 70 81 #define KPROBE_RETURN_INSTRUCTION 0xe1a0f00e /* mov pc, lr */ 71 - #define SET_R0_TRUE_INSTRUCTION 0xe3a00001 /* mov r0, #1 */ 72 - 73 - #define truecc_insn(insn) (((insn) & 0xf0000000) | \ 74 - (SET_R0_TRUE_INSTRUCTION & 0x0fffffff)) 75 82 76 83 typedef long (insn_0arg_fn_t)(void); 77 84 typedef long (insn_1arg_fn_t)(long); ··· 420 419 421 420 static void __kprobes simulate_bbl(struct kprobe *p, struct pt_regs *regs) 422 421 { 423 - insn_1arg_fn_t *i_fn = (insn_1arg_fn_t *)&p->ainsn.insn[0]; 424 422 kprobe_opcode_t insn = p->opcode; 425 423 long iaddr = (long)p->addr; 426 424 int disp = branch_displacement(insn); 427 - 428 - if (!insnslot_1arg_rflags(0, regs->ARM_cpsr, i_fn)) 429 - return; 430 425 431 426 if (insn & (1 << 24)) 432 427 regs->ARM_lr = iaddr + 4; ··· 443 446 444 447 static void __kprobes simulate_blx2bx(struct kprobe *p, struct pt_regs *regs) 445 448 { 446 - insn_1arg_fn_t *i_fn = (insn_1arg_fn_t *)&p->ainsn.insn[0]; 447 449 kprobe_opcode_t insn = p->opcode; 448 450 int rm = insn & 0xf; 449 451 long rmv = regs->uregs[rm]; 450 - 451 - if (!insnslot_1arg_rflags(0, regs->ARM_cpsr, i_fn)) 452 - return; 453 452 454 453 if (insn & (1 << 5)) 455 454 regs->ARM_lr = (long)p->addr + 4; ··· 456 463 regs->ARM_cpsr |= PSR_T_BIT; 457 464 } 458 465 466 + static void __kprobes simulate_mrs(struct kprobe *p, struct pt_regs *regs) 467 + { 468 + kprobe_opcode_t insn = p->opcode; 469 + int rd = (insn >> 12) & 0xf; 470 + unsigned long mask = 0xf8ff03df; /* Mask out execution state */ 471 + regs->uregs[rd] = regs->ARM_cpsr & mask; 472 + } 473 + 459 474 static void __kprobes simulate_ldm1stm1(struct kprobe *p, struct pt_regs *regs) 460 475 { 461 - insn_1arg_fn_t *i_fn = (insn_1arg_fn_t *)&p->ainsn.insn[0]; 462 476 kprobe_opcode_t insn = p->opcode; 463 477 int rn = (insn >> 16) & 0xf; 464 478 int lbit = insn & (1 << 20); ··· 475 475 long *addr = (long *)regs->uregs[rn]; 476 476 int reg_bit_vector; 477 477 int reg_count; 478 - 479 - if (!insnslot_1arg_rflags(0, regs->ARM_cpsr, i_fn)) 480 - return; 481 478 482 479 reg_count = 0; 483 480 reg_bit_vector = insn & 0xffff; ··· 507 510 508 511 static void __kprobes simulate_stm1_pc(struct kprobe *p, struct pt_regs *regs) 509 512 { 510 - insn_1arg_fn_t *i_fn = (insn_1arg_fn_t *)&p->ainsn.insn[0]; 511 - 512 - if (!insnslot_1arg_rflags(0, regs->ARM_cpsr, i_fn)) 513 - return; 514 - 515 513 regs->ARM_pc = (long)p->addr + str_pc_offset; 516 514 simulate_ldm1stm1(p, regs); 517 515 regs->ARM_pc = (long)p->addr + 4; ··· 517 525 regs->uregs[12] = regs->uregs[13]; 518 526 } 519 527 520 - static void __kprobes emulate_ldcstc(struct kprobe *p, struct pt_regs *regs) 521 - { 522 - insn_1arg_fn_t *i_fn = (insn_1arg_fn_t *)&p->ainsn.insn[0]; 523 - kprobe_opcode_t insn = p->opcode; 524 - int rn = (insn >> 16) & 0xf; 525 - long rnv = regs->uregs[rn]; 526 - 527 - /* Save Rn in case of writeback. */ 528 - regs->uregs[rn] = insnslot_1arg_rflags(rnv, regs->ARM_cpsr, i_fn); 529 - } 530 - 531 528 static void __kprobes emulate_ldrd(struct kprobe *p, struct pt_regs *regs) 532 529 { 533 530 insn_2arg_fn_t *i_fn = (insn_2arg_fn_t *)&p->ainsn.insn[0]; 534 531 kprobe_opcode_t insn = p->opcode; 532 + long ppc = (long)p->addr + 8; 535 533 int rd = (insn >> 12) & 0xf; 536 534 int rn = (insn >> 16) & 0xf; 537 535 int rm = insn & 0xf; /* rm may be invalid, don't care. */ 536 + long rmv = (rm == 15) ? ppc : regs->uregs[rm]; 537 + long rnv = (rn == 15) ? ppc : regs->uregs[rn]; 538 538 539 539 /* Not following the C calling convention here, so need asm(). */ 540 540 __asm__ __volatile__ ( ··· 538 554 "str r0, %[rn] \n\t" /* in case of writeback */ 539 555 "str r2, %[rd0] \n\t" 540 556 "str r3, %[rd1] \n\t" 541 - : [rn] "+m" (regs->uregs[rn]), 557 + : [rn] "+m" (rnv), 542 558 [rd0] "=m" (regs->uregs[rd]), 543 559 [rd1] "=m" (regs->uregs[rd+1]) 544 - : [rm] "m" (regs->uregs[rm]), 560 + : [rm] "m" (rmv), 545 561 [cpsr] "r" (regs->ARM_cpsr), 546 562 [i_fn] "r" (i_fn) 547 563 : "r0", "r1", "r2", "r3", "lr", "cc" 548 564 ); 565 + if (is_writeback(insn)) 566 + regs->uregs[rn] = rnv; 549 567 } 550 568 551 569 static void __kprobes emulate_strd(struct kprobe *p, struct pt_regs *regs) 552 570 { 553 571 insn_4arg_fn_t *i_fn = (insn_4arg_fn_t *)&p->ainsn.insn[0]; 554 572 kprobe_opcode_t insn = p->opcode; 573 + long ppc = (long)p->addr + 8; 555 574 int rd = (insn >> 12) & 0xf; 556 575 int rn = (insn >> 16) & 0xf; 557 576 int rm = insn & 0xf; 558 - long rnv = regs->uregs[rn]; 559 - long rmv = regs->uregs[rm]; /* rm/rmv may be invalid, don't care. */ 577 + long rnv = (rn == 15) ? ppc : regs->uregs[rn]; 578 + /* rm/rmv may be invalid, don't care. */ 579 + long rmv = (rm == 15) ? ppc : regs->uregs[rm]; 580 + long rnv_wb; 560 581 561 - regs->uregs[rn] = insnslot_4arg_rflags(rnv, rmv, regs->uregs[rd], 582 + rnv_wb = insnslot_4arg_rflags(rnv, rmv, regs->uregs[rd], 562 583 regs->uregs[rd+1], 563 584 regs->ARM_cpsr, i_fn); 585 + if (is_writeback(insn)) 586 + regs->uregs[rn] = rnv_wb; 564 587 } 565 588 566 589 static void __kprobes emulate_ldr(struct kprobe *p, struct pt_regs *regs) ··· 621 630 regs->uregs[rn] = rnv_wb; /* Save Rn in case of writeback. */ 622 631 } 623 632 624 - static void __kprobes emulate_mrrc(struct kprobe *p, struct pt_regs *regs) 625 - { 626 - insn_llret_0arg_fn_t *i_fn = (insn_llret_0arg_fn_t *)&p->ainsn.insn[0]; 627 - kprobe_opcode_t insn = p->opcode; 628 - union reg_pair fnr; 629 - int rd = (insn >> 12) & 0xf; 630 - int rn = (insn >> 16) & 0xf; 631 - 632 - fnr.dr = insnslot_llret_0arg_rflags(regs->ARM_cpsr, i_fn); 633 - regs->uregs[rn] = fnr.r0; 634 - regs->uregs[rd] = fnr.r1; 635 - } 636 - 637 - static void __kprobes emulate_mcrr(struct kprobe *p, struct pt_regs *regs) 638 - { 639 - insn_2arg_fn_t *i_fn = (insn_2arg_fn_t *)&p->ainsn.insn[0]; 640 - kprobe_opcode_t insn = p->opcode; 641 - int rd = (insn >> 12) & 0xf; 642 - int rn = (insn >> 16) & 0xf; 643 - long rnv = regs->uregs[rn]; 644 - long rdv = regs->uregs[rd]; 645 - 646 - insnslot_2arg_rflags(rnv, rdv, regs->ARM_cpsr, i_fn); 647 - } 648 - 649 633 static void __kprobes emulate_sat(struct kprobe *p, struct pt_regs *regs) 650 634 { 651 635 insn_1arg_fn_t *i_fn = (insn_1arg_fn_t *)&p->ainsn.insn[0]; ··· 654 688 insnslot_0arg_rflags(regs->ARM_cpsr, i_fn); 655 689 } 656 690 657 - static void __kprobes emulate_rd12(struct kprobe *p, struct pt_regs *regs) 691 + static void __kprobes emulate_nop(struct kprobe *p, struct pt_regs *regs) 658 692 { 659 - insn_0arg_fn_t *i_fn = (insn_0arg_fn_t *)&p->ainsn.insn[0]; 693 + } 694 + 695 + static void __kprobes 696 + emulate_rd12_modify(struct kprobe *p, struct pt_regs *regs) 697 + { 698 + insn_1arg_fn_t *i_fn = (insn_1arg_fn_t *)&p->ainsn.insn[0]; 660 699 kprobe_opcode_t insn = p->opcode; 661 700 int rd = (insn >> 12) & 0xf; 701 + long rdv = regs->uregs[rd]; 662 702 663 - regs->uregs[rd] = insnslot_0arg_rflags(regs->ARM_cpsr, i_fn); 703 + regs->uregs[rd] = insnslot_1arg_rflags(rdv, regs->ARM_cpsr, i_fn); 664 704 } 665 705 666 - static void __kprobes emulate_ird12(struct kprobe *p, struct pt_regs *regs) 706 + static void __kprobes 707 + emulate_rd12rn0_modify(struct kprobe *p, struct pt_regs *regs) 667 708 { 668 - insn_1arg_fn_t *i_fn = (insn_1arg_fn_t *)&p->ainsn.insn[0]; 709 + insn_2arg_fn_t *i_fn = (insn_2arg_fn_t *)&p->ainsn.insn[0]; 669 710 kprobe_opcode_t insn = p->opcode; 670 - int ird = (insn >> 12) & 0xf; 671 - 672 - insnslot_1arg_rflags(regs->uregs[ird], regs->ARM_cpsr, i_fn); 673 - } 674 - 675 - static void __kprobes emulate_rn16(struct kprobe *p, struct pt_regs *regs) 676 - { 677 - insn_1arg_fn_t *i_fn = (insn_1arg_fn_t *)&p->ainsn.insn[0]; 678 - kprobe_opcode_t insn = p->opcode; 679 - int rn = (insn >> 16) & 0xf; 711 + int rd = (insn >> 12) & 0xf; 712 + int rn = insn & 0xf; 713 + long rdv = regs->uregs[rd]; 680 714 long rnv = regs->uregs[rn]; 681 715 682 - insnslot_1arg_rflags(rnv, regs->ARM_cpsr, i_fn); 716 + regs->uregs[rd] = insnslot_2arg_rflags(rdv, rnv, regs->ARM_cpsr, i_fn); 683 717 } 684 718 685 719 static void __kprobes emulate_rd12rm0(struct kprobe *p, struct pt_regs *regs) ··· 785 819 } 786 820 787 821 static void __kprobes 822 + emulate_alu_tests_imm(struct kprobe *p, struct pt_regs *regs) 823 + { 824 + insn_1arg_fn_t *i_fn = (insn_1arg_fn_t *)&p->ainsn.insn[0]; 825 + kprobe_opcode_t insn = p->opcode; 826 + int rn = (insn >> 16) & 0xf; 827 + long rnv = (rn == 15) ? (long)p->addr + 8 : regs->uregs[rn]; 828 + 829 + insnslot_1arg_rwflags(rnv, &regs->ARM_cpsr, i_fn); 830 + } 831 + 832 + static void __kprobes 788 833 emulate_alu_rflags(struct kprobe *p, struct pt_regs *regs) 789 834 { 790 835 insn_3arg_fn_t *i_fn = (insn_3arg_fn_t *)&p->ainsn.insn[0]; ··· 831 854 insnslot_3arg_rwflags(rnv, rmv, rsv, &regs->ARM_cpsr, i_fn); 832 855 } 833 856 857 + static void __kprobes 858 + emulate_alu_tests(struct kprobe *p, struct pt_regs *regs) 859 + { 860 + insn_3arg_fn_t *i_fn = (insn_3arg_fn_t *)&p->ainsn.insn[0]; 861 + kprobe_opcode_t insn = p->opcode; 862 + long ppc = (long)p->addr + 8; 863 + int rn = (insn >> 16) & 0xf; 864 + int rs = (insn >> 8) & 0xf; /* rs/rsv may be invalid, don't care. */ 865 + int rm = insn & 0xf; 866 + long rnv = (rn == 15) ? ppc : regs->uregs[rn]; 867 + long rmv = (rm == 15) ? ppc : regs->uregs[rm]; 868 + long rsv = regs->uregs[rs]; 869 + 870 + insnslot_3arg_rwflags(rnv, rmv, rsv, &regs->ARM_cpsr, i_fn); 871 + } 872 + 834 873 static enum kprobe_insn __kprobes 835 874 prep_emulate_ldr_str(kprobe_opcode_t insn, struct arch_specific_insn *asi) 836 875 { 837 - int ibit = (insn & (1 << 26)) ? 25 : 22; 876 + int not_imm = (insn & (1 << 26)) ? (insn & (1 << 25)) 877 + : (~insn & (1 << 22)); 878 + 879 + if (is_writeback(insn) && is_r15(insn, 16)) 880 + return INSN_REJECTED; /* Writeback to PC */ 838 881 839 882 insn &= 0xfff00fff; 840 883 insn |= 0x00001000; /* Rn = r0, Rd = r1 */ 841 - if (insn & (1 << ibit)) { 884 + if (not_imm) { 842 885 insn &= ~0xf; 843 886 insn |= 2; /* Rm = r2 */ 844 887 } ··· 868 871 } 869 872 870 873 static enum kprobe_insn __kprobes 874 + prep_emulate_rd12_modify(kprobe_opcode_t insn, struct arch_specific_insn *asi) 875 + { 876 + if (is_r15(insn, 12)) 877 + return INSN_REJECTED; /* Rd is PC */ 878 + 879 + insn &= 0xffff0fff; /* Rd = r0 */ 880 + asi->insn[0] = insn; 881 + asi->insn_handler = emulate_rd12_modify; 882 + return INSN_GOOD; 883 + } 884 + 885 + static enum kprobe_insn __kprobes 886 + prep_emulate_rd12rn0_modify(kprobe_opcode_t insn, 887 + struct arch_specific_insn *asi) 888 + { 889 + if (is_r15(insn, 12)) 890 + return INSN_REJECTED; /* Rd is PC */ 891 + 892 + insn &= 0xffff0ff0; /* Rd = r0 */ 893 + insn |= 0x00000001; /* Rn = r1 */ 894 + asi->insn[0] = insn; 895 + asi->insn_handler = emulate_rd12rn0_modify; 896 + return INSN_GOOD; 897 + } 898 + 899 + static enum kprobe_insn __kprobes 871 900 prep_emulate_rd12rm0(kprobe_opcode_t insn, struct arch_specific_insn *asi) 872 901 { 902 + if (is_r15(insn, 12)) 903 + return INSN_REJECTED; /* Rd is PC */ 904 + 873 905 insn &= 0xffff0ff0; /* Rd = r0, Rm = r0 */ 874 906 asi->insn[0] = insn; 875 907 asi->insn_handler = emulate_rd12rm0; ··· 906 880 } 907 881 908 882 static enum kprobe_insn __kprobes 909 - prep_emulate_rd12(kprobe_opcode_t insn, struct arch_specific_insn *asi) 910 - { 911 - insn &= 0xffff0fff; /* Rd = r0 */ 912 - asi->insn[0] = insn; 913 - asi->insn_handler = emulate_rd12; 914 - return INSN_GOOD; 915 - } 916 - 917 - static enum kprobe_insn __kprobes 918 883 prep_emulate_rd12rn16rm0_wflags(kprobe_opcode_t insn, 919 884 struct arch_specific_insn *asi) 920 885 { 886 + if (is_r15(insn, 12)) 887 + return INSN_REJECTED; /* Rd is PC */ 888 + 921 889 insn &= 0xfff00ff0; /* Rd = r0, Rn = r0 */ 922 890 insn |= 0x00000001; /* Rm = r1 */ 923 891 asi->insn[0] = insn; ··· 923 903 prep_emulate_rd16rs8rm0_wflags(kprobe_opcode_t insn, 924 904 struct arch_specific_insn *asi) 925 905 { 906 + if (is_r15(insn, 16)) 907 + return INSN_REJECTED; /* Rd is PC */ 908 + 926 909 insn &= 0xfff0f0f0; /* Rd = r0, Rs = r0 */ 927 910 insn |= 0x00000001; /* Rm = r1 */ 928 911 asi->insn[0] = insn; ··· 937 914 prep_emulate_rd16rn12rs8rm0_wflags(kprobe_opcode_t insn, 938 915 struct arch_specific_insn *asi) 939 916 { 917 + if (is_r15(insn, 16)) 918 + return INSN_REJECTED; /* Rd is PC */ 919 + 940 920 insn &= 0xfff000f0; /* Rd = r0, Rn = r0 */ 941 921 insn |= 0x00000102; /* Rs = r1, Rm = r2 */ 942 922 asi->insn[0] = insn; ··· 951 925 prep_emulate_rdhi16rdlo12rs8rm0_wflags(kprobe_opcode_t insn, 952 926 struct arch_specific_insn *asi) 953 927 { 928 + if (is_r15(insn, 16) || is_r15(insn, 12)) 929 + return INSN_REJECTED; /* RdHi or RdLo is PC */ 930 + 954 931 insn &= 0xfff000f0; /* RdHi = r0, RdLo = r1 */ 955 932 insn |= 0x00001203; /* Rs = r2, Rm = r3 */ 956 933 asi->insn[0] = insn; ··· 974 945 static enum kprobe_insn __kprobes 975 946 space_1111(kprobe_opcode_t insn, struct arch_specific_insn *asi) 976 947 { 977 - /* CPS mmod == 1 : 1111 0001 0000 xx10 xxxx xxxx xx0x xxxx */ 978 - /* RFE : 1111 100x x0x1 xxxx xxxx 1010 xxxx xxxx */ 979 - /* SRS : 1111 100x x1x0 1101 xxxx 0101 xxxx xxxx */ 980 - if ((insn & 0xfff30020) == 0xf1020000 || 981 - (insn & 0xfe500f00) == 0xf8100a00 || 982 - (insn & 0xfe5f0f00) == 0xf84d0500) 983 - return INSN_REJECTED; 984 - 985 - /* PLD : 1111 01x1 x101 xxxx xxxx xxxx xxxx xxxx : */ 986 - if ((insn & 0xfd700000) == 0xf4500000) { 987 - insn &= 0xfff0ffff; /* Rn = r0 */ 988 - asi->insn[0] = insn; 989 - asi->insn_handler = emulate_rn16; 990 - return INSN_GOOD; 948 + /* memory hint : 1111 0100 x001 xxxx xxxx xxxx xxxx xxxx : */ 949 + /* PLDI : 1111 0100 x101 xxxx xxxx xxxx xxxx xxxx : */ 950 + /* PLDW : 1111 0101 x001 xxxx xxxx xxxx xxxx xxxx : */ 951 + /* PLD : 1111 0101 x101 xxxx xxxx xxxx xxxx xxxx : */ 952 + if ((insn & 0xfe300000) == 0xf4100000) { 953 + asi->insn_handler = emulate_nop; 954 + return INSN_GOOD_NO_SLOT; 991 955 } 992 956 993 957 /* BLX(1) : 1111 101x xxxx xxxx xxxx xxxx xxxx xxxx : */ ··· 989 967 return INSN_GOOD_NO_SLOT; 990 968 } 991 969 992 - /* SETEND : 1111 0001 0000 0001 xxxx xxxx 0000 xxxx */ 993 - /* CDP2 : 1111 1110 xxxx xxxx xxxx xxxx xxx0 xxxx */ 994 - if ((insn & 0xffff00f0) == 0xf1010000 || 995 - (insn & 0xff000010) == 0xfe000000) { 996 - asi->insn[0] = insn; 997 - asi->insn_handler = emulate_none; 998 - return INSN_GOOD; 999 - } 970 + /* CPS : 1111 0001 0000 xxx0 xxxx xxxx xx0x xxxx */ 971 + /* SETEND: 1111 0001 0000 0001 xxxx xxxx 0000 xxxx */ 1000 972 973 + /* SRS : 1111 100x x1x0 xxxx xxxx xxxx xxxx xxxx */ 974 + /* RFE : 1111 100x x0x1 xxxx xxxx xxxx xxxx xxxx */ 975 + 976 + /* Coprocessor instructions... */ 1001 977 /* MCRR2 : 1111 1100 0100 xxxx xxxx xxxx xxxx xxxx : (Rd != Rn) */ 1002 978 /* MRRC2 : 1111 1100 0101 xxxx xxxx xxxx xxxx xxxx : (Rd != Rn) */ 1003 - if ((insn & 0xffe00000) == 0xfc400000) { 1004 - insn &= 0xfff00fff; /* Rn = r0 */ 1005 - insn |= 0x00001000; /* Rd = r1 */ 1006 - asi->insn[0] = insn; 1007 - asi->insn_handler = 1008 - (insn & (1 << 20)) ? emulate_mrrc : emulate_mcrr; 1009 - return INSN_GOOD; 1010 - } 979 + /* LDC2 : 1111 110x xxx1 xxxx xxxx xxxx xxxx xxxx */ 980 + /* STC2 : 1111 110x xxx0 xxxx xxxx xxxx xxxx xxxx */ 981 + /* CDP2 : 1111 1110 xxxx xxxx xxxx xxxx xxx0 xxxx */ 982 + /* MCR2 : 1111 1110 xxx0 xxxx xxxx xxxx xxx1 xxxx */ 983 + /* MRC2 : 1111 1110 xxx1 xxxx xxxx xxxx xxx1 xxxx */ 1011 984 1012 - /* LDC2 : 1111 110x xxx1 xxxx xxxx xxxx xxxx xxxx */ 1013 - /* STC2 : 1111 110x xxx0 xxxx xxxx xxxx xxxx xxxx */ 1014 - if ((insn & 0xfe000000) == 0xfc000000) { 1015 - insn &= 0xfff0ffff; /* Rn = r0 */ 1016 - asi->insn[0] = insn; 1017 - asi->insn_handler = emulate_ldcstc; 1018 - return INSN_GOOD; 1019 - } 1020 - 1021 - /* MCR2 : 1111 1110 xxx0 xxxx xxxx xxxx xxx1 xxxx */ 1022 - /* MRC2 : 1111 1110 xxx1 xxxx xxxx xxxx xxx1 xxxx */ 1023 - insn &= 0xffff0fff; /* Rd = r0 */ 1024 - asi->insn[0] = insn; 1025 - asi->insn_handler = (insn & (1 << 20)) ? emulate_rd12 : emulate_ird12; 1026 - return INSN_GOOD; 985 + return INSN_REJECTED; 1027 986 } 1028 987 1029 988 static enum kprobe_insn __kprobes ··· 1013 1010 /* cccc 0001 0xx0 xxxx xxxx xxxx xxxx xxx0 xxxx */ 1014 1011 if ((insn & 0x0f900010) == 0x01000000) { 1015 1012 1016 - /* BXJ : cccc 0001 0010 xxxx xxxx xxxx 0010 xxxx */ 1017 - /* MSR : cccc 0001 0x10 xxxx xxxx xxxx 0000 xxxx */ 1018 - if ((insn & 0x0ff000f0) == 0x01200020 || 1019 - (insn & 0x0fb000f0) == 0x01200000) 1020 - return INSN_REJECTED; 1021 - 1022 - /* MRS : cccc 0001 0x00 xxxx xxxx xxxx 0000 xxxx */ 1023 - if ((insn & 0x0fb00010) == 0x01000000) 1024 - return prep_emulate_rd12(insn, asi); 1013 + /* MRS cpsr : cccc 0001 0000 xxxx xxxx xxxx 0000 xxxx */ 1014 + if ((insn & 0x0ff000f0) == 0x01000000) { 1015 + if (is_r15(insn, 12)) 1016 + return INSN_REJECTED; /* Rd is PC */ 1017 + asi->insn_handler = simulate_mrs; 1018 + return INSN_GOOD_NO_SLOT; 1019 + } 1025 1020 1026 1021 /* SMLALxy : cccc 0001 0100 xxxx xxxx xxxx 1xx0 xxxx */ 1027 1022 if ((insn & 0x0ff00090) == 0x01400080) 1028 - return prep_emulate_rdhi16rdlo12rs8rm0_wflags(insn, asi); 1023 + return prep_emulate_rdhi16rdlo12rs8rm0_wflags(insn, 1024 + asi); 1029 1025 1030 1026 /* SMULWy : cccc 0001 0010 xxxx xxxx xxxx 1x10 xxxx */ 1031 1027 /* SMULxy : cccc 0001 0110 xxxx xxxx xxxx 1xx0 xxxx */ ··· 1033 1031 return prep_emulate_rd16rs8rm0_wflags(insn, asi); 1034 1032 1035 1033 /* SMLAxy : cccc 0001 0000 xxxx xxxx xxxx 1xx0 xxxx : Q */ 1036 - /* SMLAWy : cccc 0001 0010 xxxx xxxx xxxx 0x00 xxxx : Q */ 1037 - return prep_emulate_rd16rn12rs8rm0_wflags(insn, asi); 1034 + /* SMLAWy : cccc 0001 0010 xxxx xxxx xxxx 1x00 xxxx : Q */ 1035 + if ((insn & 0x0ff00090) == 0x01000080 || 1036 + (insn & 0x0ff000b0) == 0x01200080) 1037 + return prep_emulate_rd16rn12rs8rm0_wflags(insn, asi); 1038 1038 1039 + /* BXJ : cccc 0001 0010 xxxx xxxx xxxx 0010 xxxx */ 1040 + /* MSR : cccc 0001 0x10 xxxx xxxx xxxx 0000 xxxx */ 1041 + /* MRS spsr : cccc 0001 0100 xxxx xxxx xxxx 0000 xxxx */ 1042 + 1043 + /* Other instruction encodings aren't yet defined */ 1044 + return INSN_REJECTED; 1039 1045 } 1040 1046 1041 1047 /* cccc 0001 0xx0 xxxx xxxx xxxx xxxx 0xx1 xxxx */ 1042 1048 else if ((insn & 0x0f900090) == 0x01000010) { 1043 1049 1044 - /* BKPT : 1110 0001 0010 xxxx xxxx xxxx 0111 xxxx */ 1045 - if ((insn & 0xfff000f0) == 0xe1200070) 1046 - return INSN_REJECTED; 1047 - 1048 1050 /* BLX(2) : cccc 0001 0010 xxxx xxxx xxxx 0011 xxxx */ 1049 1051 /* BX : cccc 0001 0010 xxxx xxxx xxxx 0001 xxxx */ 1050 1052 if ((insn & 0x0ff000d0) == 0x01200010) { 1051 - asi->insn[0] = truecc_insn(insn); 1053 + if ((insn & 0x0ff000ff) == 0x0120003f) 1054 + return INSN_REJECTED; /* BLX pc */ 1052 1055 asi->insn_handler = simulate_blx2bx; 1053 - return INSN_GOOD; 1056 + return INSN_GOOD_NO_SLOT; 1054 1057 } 1055 1058 1056 1059 /* CLZ : cccc 0001 0110 xxxx xxxx xxxx 0001 xxxx */ ··· 1066 1059 /* QSUB : cccc 0001 0010 xxxx xxxx xxxx 0101 xxxx :Q */ 1067 1060 /* QDADD : cccc 0001 0100 xxxx xxxx xxxx 0101 xxxx :Q */ 1068 1061 /* QDSUB : cccc 0001 0110 xxxx xxxx xxxx 0101 xxxx :Q */ 1069 - return prep_emulate_rd12rn16rm0_wflags(insn, asi); 1062 + if ((insn & 0x0f9000f0) == 0x01000050) 1063 + return prep_emulate_rd12rn16rm0_wflags(insn, asi); 1064 + 1065 + /* BKPT : 1110 0001 0010 xxxx xxxx xxxx 0111 xxxx */ 1066 + /* SMC : cccc 0001 0110 xxxx xxxx xxxx 0111 xxxx */ 1067 + 1068 + /* Other instruction encodings aren't yet defined */ 1069 + return INSN_REJECTED; 1070 1070 } 1071 1071 1072 1072 /* cccc 0000 xxxx xxxx xxxx xxxx xxxx 1001 xxxx */ 1073 - else if ((insn & 0x0f000090) == 0x00000090) { 1073 + else if ((insn & 0x0f0000f0) == 0x00000090) { 1074 1074 1075 1075 /* MUL : cccc 0000 0000 xxxx xxxx xxxx 1001 xxxx : */ 1076 1076 /* MULS : cccc 0000 0001 xxxx xxxx xxxx 1001 xxxx :cc */ 1077 1077 /* MLA : cccc 0000 0010 xxxx xxxx xxxx 1001 xxxx : */ 1078 1078 /* MLAS : cccc 0000 0011 xxxx xxxx xxxx 1001 xxxx :cc */ 1079 1079 /* UMAAL : cccc 0000 0100 xxxx xxxx xxxx 1001 xxxx : */ 1080 + /* undef : cccc 0000 0101 xxxx xxxx xxxx 1001 xxxx : */ 1081 + /* MLS : cccc 0000 0110 xxxx xxxx xxxx 1001 xxxx : */ 1082 + /* undef : cccc 0000 0111 xxxx xxxx xxxx 1001 xxxx : */ 1080 1083 /* UMULL : cccc 0000 1000 xxxx xxxx xxxx 1001 xxxx : */ 1081 1084 /* UMULLS : cccc 0000 1001 xxxx xxxx xxxx 1001 xxxx :cc */ 1082 1085 /* UMLAL : cccc 0000 1010 xxxx xxxx xxxx 1001 xxxx : */ ··· 1095 1078 /* SMULLS : cccc 0000 1101 xxxx xxxx xxxx 1001 xxxx :cc */ 1096 1079 /* SMLAL : cccc 0000 1110 xxxx xxxx xxxx 1001 xxxx : */ 1097 1080 /* SMLALS : cccc 0000 1111 xxxx xxxx xxxx 1001 xxxx :cc */ 1098 - if ((insn & 0x0fe000f0) == 0x00000090) { 1099 - return prep_emulate_rd16rs8rm0_wflags(insn, asi); 1100 - } else if ((insn & 0x0fe000f0) == 0x00200090) { 1101 - return prep_emulate_rd16rn12rs8rm0_wflags(insn, asi); 1102 - } else { 1103 - return prep_emulate_rdhi16rdlo12rs8rm0_wflags(insn, asi); 1104 - } 1081 + if ((insn & 0x00d00000) == 0x00500000) 1082 + return INSN_REJECTED; 1083 + else if ((insn & 0x00e00000) == 0x00000000) 1084 + return prep_emulate_rd16rs8rm0_wflags(insn, asi); 1085 + else if ((insn & 0x00a00000) == 0x00200000) 1086 + return prep_emulate_rd16rn12rs8rm0_wflags(insn, asi); 1087 + else 1088 + return prep_emulate_rdhi16rdlo12rs8rm0_wflags(insn, 1089 + asi); 1105 1090 } 1106 1091 1107 1092 /* cccc 000x xxxx xxxx xxxx xxxx xxxx 1xx1 xxxx */ ··· 1111 1092 1112 1093 /* SWP : cccc 0001 0000 xxxx xxxx xxxx 1001 xxxx */ 1113 1094 /* SWPB : cccc 0001 0100 xxxx xxxx xxxx 1001 xxxx */ 1114 - /* LDRD : cccc 000x xxx0 xxxx xxxx xxxx 1101 xxxx */ 1115 - /* STRD : cccc 000x xxx0 xxxx xxxx xxxx 1111 xxxx */ 1095 + /* ??? : cccc 0001 0x01 xxxx xxxx xxxx 1001 xxxx */ 1096 + /* ??? : cccc 0001 0x10 xxxx xxxx xxxx 1001 xxxx */ 1097 + /* ??? : cccc 0001 0x11 xxxx xxxx xxxx 1001 xxxx */ 1116 1098 /* STREX : cccc 0001 1000 xxxx xxxx xxxx 1001 xxxx */ 1117 1099 /* LDREX : cccc 0001 1001 xxxx xxxx xxxx 1001 xxxx */ 1100 + /* STREXD: cccc 0001 1010 xxxx xxxx xxxx 1001 xxxx */ 1101 + /* LDREXD: cccc 0001 1011 xxxx xxxx xxxx 1001 xxxx */ 1102 + /* STREXB: cccc 0001 1100 xxxx xxxx xxxx 1001 xxxx */ 1103 + /* LDREXB: cccc 0001 1101 xxxx xxxx xxxx 1001 xxxx */ 1104 + /* STREXH: cccc 0001 1110 xxxx xxxx xxxx 1001 xxxx */ 1105 + /* LDREXH: cccc 0001 1111 xxxx xxxx xxxx 1001 xxxx */ 1106 + 1107 + /* LDRD : cccc 000x xxx0 xxxx xxxx xxxx 1101 xxxx */ 1108 + /* STRD : cccc 000x xxx0 xxxx xxxx xxxx 1111 xxxx */ 1118 1109 /* LDRH : cccc 000x xxx1 xxxx xxxx xxxx 1011 xxxx */ 1119 1110 /* STRH : cccc 000x xxx0 xxxx xxxx xxxx 1011 xxxx */ 1120 1111 /* LDRSB : cccc 000x xxx1 xxxx xxxx xxxx 1101 xxxx */ 1121 1112 /* LDRSH : cccc 000x xxx1 xxxx xxxx xxxx 1111 xxxx */ 1122 - if ((insn & 0x0fb000f0) == 0x01000090) { 1123 - /* SWP/SWPB */ 1124 - return prep_emulate_rd12rn16rm0_wflags(insn, asi); 1113 + if ((insn & 0x0f0000f0) == 0x01000090) { 1114 + if ((insn & 0x0fb000f0) == 0x01000090) { 1115 + /* SWP/SWPB */ 1116 + return prep_emulate_rd12rn16rm0_wflags(insn, 1117 + asi); 1118 + } else { 1119 + /* STREX/LDREX variants and unallocaed space */ 1120 + return INSN_REJECTED; 1121 + } 1122 + 1125 1123 } else if ((insn & 0x0e1000d0) == 0x00000d0) { 1126 1124 /* STRD/LDRD */ 1125 + if ((insn & 0x0000e000) == 0x0000e000) 1126 + return INSN_REJECTED; /* Rd is LR or PC */ 1127 + if (is_writeback(insn) && is_r15(insn, 16)) 1128 + return INSN_REJECTED; /* Writeback to PC */ 1129 + 1127 1130 insn &= 0xfff00fff; 1128 1131 insn |= 0x00002000; /* Rn = r0, Rd = r2 */ 1129 - if (insn & (1 << 22)) { 1130 - /* I bit */ 1132 + if (!(insn & (1 << 22))) { 1133 + /* Register index */ 1131 1134 insn &= ~0xf; 1132 1135 insn |= 1; /* Rm = r1 */ 1133 1136 } ··· 1159 1118 return INSN_GOOD; 1160 1119 } 1161 1120 1121 + /* LDRH/STRH/LDRSB/LDRSH */ 1122 + if (is_r15(insn, 12)) 1123 + return INSN_REJECTED; /* Rd is PC */ 1162 1124 return prep_emulate_ldr_str(insn, asi); 1163 1125 } 1164 1126 ··· 1169 1125 1170 1126 /* 1171 1127 * ALU op with S bit and Rd == 15 : 1172 - * cccc 000x xxx1 xxxx 1111 xxxx xxxx xxxx 1128 + * cccc 000x xxx1 xxxx 1111 xxxx xxxx xxxx 1173 1129 */ 1174 1130 if ((insn & 0x0e10f000) == 0x0010f000) 1175 1131 return INSN_REJECTED; ··· 1198 1154 insn |= 0x00000200; /* Rs = r2 */ 1199 1155 } 1200 1156 asi->insn[0] = insn; 1201 - asi->insn_handler = (insn & (1 << 20)) ? /* S-bit */ 1157 + 1158 + if ((insn & 0x0f900000) == 0x01100000) { 1159 + /* 1160 + * TST : cccc 0001 0001 xxxx xxxx xxxx xxxx xxxx 1161 + * TEQ : cccc 0001 0011 xxxx xxxx xxxx xxxx xxxx 1162 + * CMP : cccc 0001 0101 xxxx xxxx xxxx xxxx xxxx 1163 + * CMN : cccc 0001 0111 xxxx xxxx xxxx xxxx xxxx 1164 + */ 1165 + asi->insn_handler = emulate_alu_tests; 1166 + } else { 1167 + /* ALU ops which write to Rd */ 1168 + asi->insn_handler = (insn & (1 << 20)) ? /* S-bit */ 1202 1169 emulate_alu_rwflags : emulate_alu_rflags; 1170 + } 1203 1171 return INSN_GOOD; 1204 1172 } 1205 1173 1206 1174 static enum kprobe_insn __kprobes 1207 1175 space_cccc_001x(kprobe_opcode_t insn, struct arch_specific_insn *asi) 1208 1176 { 1177 + /* MOVW : cccc 0011 0000 xxxx xxxx xxxx xxxx xxxx */ 1178 + /* MOVT : cccc 0011 0100 xxxx xxxx xxxx xxxx xxxx */ 1179 + if ((insn & 0x0fb00000) == 0x03000000) 1180 + return prep_emulate_rd12_modify(insn, asi); 1181 + 1182 + /* hints : cccc 0011 0010 0000 xxxx xxxx xxxx xxxx */ 1183 + if ((insn & 0x0fff0000) == 0x03200000) { 1184 + unsigned op2 = insn & 0x000000ff; 1185 + if (op2 == 0x01 || op2 == 0x04) { 1186 + /* YIELD : cccc 0011 0010 0000 xxxx xxxx 0000 0001 */ 1187 + /* SEV : cccc 0011 0010 0000 xxxx xxxx 0000 0100 */ 1188 + asi->insn[0] = insn; 1189 + asi->insn_handler = emulate_none; 1190 + return INSN_GOOD; 1191 + } else if (op2 <= 0x03) { 1192 + /* NOP : cccc 0011 0010 0000 xxxx xxxx 0000 0000 */ 1193 + /* WFE : cccc 0011 0010 0000 xxxx xxxx 0000 0010 */ 1194 + /* WFI : cccc 0011 0010 0000 xxxx xxxx 0000 0011 */ 1195 + /* 1196 + * We make WFE and WFI true NOPs to avoid stalls due 1197 + * to missing events whilst processing the probe. 1198 + */ 1199 + asi->insn_handler = emulate_nop; 1200 + return INSN_GOOD_NO_SLOT; 1201 + } 1202 + /* For DBG and unallocated hints it's safest to reject them */ 1203 + return INSN_REJECTED; 1204 + } 1205 + 1209 1206 /* 1210 1207 * MSR : cccc 0011 0x10 xxxx xxxx xxxx xxxx xxxx 1211 - * Undef : cccc 0011 0100 xxxx xxxx xxxx xxxx xxxx 1212 1208 * ALU op with S bit and Rd == 15 : 1213 1209 * cccc 001x xxx1 xxxx 1111 xxxx xxxx xxxx 1214 1210 */ 1215 1211 if ((insn & 0x0fb00000) == 0x03200000 || /* MSR */ 1216 - (insn & 0x0ff00000) == 0x03400000 || /* Undef */ 1217 1212 (insn & 0x0e10f000) == 0x0210f000) /* ALU s-bit, R15 */ 1218 1213 return INSN_REJECTED; 1219 1214 ··· 1263 1180 * *S (bit 20) updates condition codes 1264 1181 * ADC/SBC/RSC reads the C flag 1265 1182 */ 1266 - insn &= 0xffff0fff; /* Rd = r0 */ 1183 + insn &= 0xfff00fff; /* Rn = r0 and Rd = r0 */ 1267 1184 asi->insn[0] = insn; 1268 - asi->insn_handler = (insn & (1 << 20)) ? /* S-bit */ 1185 + 1186 + if ((insn & 0x0f900000) == 0x03100000) { 1187 + /* 1188 + * TST : cccc 0011 0001 xxxx xxxx xxxx xxxx xxxx 1189 + * TEQ : cccc 0011 0011 xxxx xxxx xxxx xxxx xxxx 1190 + * CMP : cccc 0011 0101 xxxx xxxx xxxx xxxx xxxx 1191 + * CMN : cccc 0011 0111 xxxx xxxx xxxx xxxx xxxx 1192 + */ 1193 + asi->insn_handler = emulate_alu_tests_imm; 1194 + } else { 1195 + /* ALU ops which write to Rd */ 1196 + asi->insn_handler = (insn & (1 << 20)) ? /* S-bit */ 1269 1197 emulate_alu_imm_rwflags : emulate_alu_imm_rflags; 1198 + } 1270 1199 return INSN_GOOD; 1271 1200 } 1272 1201 ··· 1287 1192 { 1288 1193 /* SEL : cccc 0110 1000 xxxx xxxx xxxx 1011 xxxx GE: !!! */ 1289 1194 if ((insn & 0x0ff000f0) == 0x068000b0) { 1195 + if (is_r15(insn, 12)) 1196 + return INSN_REJECTED; /* Rd is PC */ 1290 1197 insn &= 0xfff00ff0; /* Rd = r0, Rn = r0 */ 1291 1198 insn |= 0x00000001; /* Rm = r1 */ 1292 1199 asi->insn[0] = insn; ··· 1302 1205 /* USAT16 : cccc 0110 1110 xxxx xxxx xxxx 0011 xxxx :Q */ 1303 1206 if ((insn & 0x0fa00030) == 0x06a00010 || 1304 1207 (insn & 0x0fb000f0) == 0x06a00030) { 1208 + if (is_r15(insn, 12)) 1209 + return INSN_REJECTED; /* Rd is PC */ 1305 1210 insn &= 0xffff0ff0; /* Rd = r0, Rm = r0 */ 1306 1211 asi->insn[0] = insn; 1307 1212 asi->insn_handler = emulate_sat; ··· 1312 1213 1313 1214 /* REV : cccc 0110 1011 xxxx xxxx xxxx 0011 xxxx */ 1314 1215 /* REV16 : cccc 0110 1011 xxxx xxxx xxxx 1011 xxxx */ 1216 + /* RBIT : cccc 0110 1111 xxxx xxxx xxxx 0011 xxxx */ 1315 1217 /* REVSH : cccc 0110 1111 xxxx xxxx xxxx 1011 xxxx */ 1316 1218 if ((insn & 0x0ff00070) == 0x06b00030 || 1317 - (insn & 0x0ff000f0) == 0x06f000b0) 1219 + (insn & 0x0ff00070) == 0x06f00030) 1318 1220 return prep_emulate_rd12rm0(insn, asi); 1319 1221 1222 + /* ??? : cccc 0110 0000 xxxx xxxx xxxx xxx1 xxxx : */ 1320 1223 /* SADD16 : cccc 0110 0001 xxxx xxxx xxxx 0001 xxxx :GE */ 1321 1224 /* SADDSUBX : cccc 0110 0001 xxxx xxxx xxxx 0011 xxxx :GE */ 1322 1225 /* SSUBADDX : cccc 0110 0001 xxxx xxxx xxxx 0101 xxxx :GE */ 1323 1226 /* SSUB16 : cccc 0110 0001 xxxx xxxx xxxx 0111 xxxx :GE */ 1324 1227 /* SADD8 : cccc 0110 0001 xxxx xxxx xxxx 1001 xxxx :GE */ 1228 + /* ??? : cccc 0110 0001 xxxx xxxx xxxx 1011 xxxx : */ 1229 + /* ??? : cccc 0110 0001 xxxx xxxx xxxx 1101 xxxx : */ 1325 1230 /* SSUB8 : cccc 0110 0001 xxxx xxxx xxxx 1111 xxxx :GE */ 1326 1231 /* QADD16 : cccc 0110 0010 xxxx xxxx xxxx 0001 xxxx : */ 1327 1232 /* QADDSUBX : cccc 0110 0010 xxxx xxxx xxxx 0011 xxxx : */ 1328 1233 /* QSUBADDX : cccc 0110 0010 xxxx xxxx xxxx 0101 xxxx : */ 1329 1234 /* QSUB16 : cccc 0110 0010 xxxx xxxx xxxx 0111 xxxx : */ 1330 1235 /* QADD8 : cccc 0110 0010 xxxx xxxx xxxx 1001 xxxx : */ 1236 + /* ??? : cccc 0110 0010 xxxx xxxx xxxx 1011 xxxx : */ 1237 + /* ??? : cccc 0110 0010 xxxx xxxx xxxx 1101 xxxx : */ 1331 1238 /* QSUB8 : cccc 0110 0010 xxxx xxxx xxxx 1111 xxxx : */ 1332 1239 /* SHADD16 : cccc 0110 0011 xxxx xxxx xxxx 0001 xxxx : */ 1333 1240 /* SHADDSUBX : cccc 0110 0011 xxxx xxxx xxxx 0011 xxxx : */ 1334 1241 /* SHSUBADDX : cccc 0110 0011 xxxx xxxx xxxx 0101 xxxx : */ 1335 1242 /* SHSUB16 : cccc 0110 0011 xxxx xxxx xxxx 0111 xxxx : */ 1336 1243 /* SHADD8 : cccc 0110 0011 xxxx xxxx xxxx 1001 xxxx : */ 1244 + /* ??? : cccc 0110 0011 xxxx xxxx xxxx 1011 xxxx : */ 1245 + /* ??? : cccc 0110 0011 xxxx xxxx xxxx 1101 xxxx : */ 1337 1246 /* SHSUB8 : cccc 0110 0011 xxxx xxxx xxxx 1111 xxxx : */ 1247 + /* ??? : cccc 0110 0100 xxxx xxxx xxxx xxx1 xxxx : */ 1338 1248 /* UADD16 : cccc 0110 0101 xxxx xxxx xxxx 0001 xxxx :GE */ 1339 1249 /* UADDSUBX : cccc 0110 0101 xxxx xxxx xxxx 0011 xxxx :GE */ 1340 1250 /* USUBADDX : cccc 0110 0101 xxxx xxxx xxxx 0101 xxxx :GE */ 1341 1251 /* USUB16 : cccc 0110 0101 xxxx xxxx xxxx 0111 xxxx :GE */ 1342 1252 /* UADD8 : cccc 0110 0101 xxxx xxxx xxxx 1001 xxxx :GE */ 1253 + /* ??? : cccc 0110 0101 xxxx xxxx xxxx 1011 xxxx : */ 1254 + /* ??? : cccc 0110 0101 xxxx xxxx xxxx 1101 xxxx : */ 1343 1255 /* USUB8 : cccc 0110 0101 xxxx xxxx xxxx 1111 xxxx :GE */ 1344 1256 /* UQADD16 : cccc 0110 0110 xxxx xxxx xxxx 0001 xxxx : */ 1345 1257 /* UQADDSUBX : cccc 0110 0110 xxxx xxxx xxxx 0011 xxxx : */ 1346 1258 /* UQSUBADDX : cccc 0110 0110 xxxx xxxx xxxx 0101 xxxx : */ 1347 1259 /* UQSUB16 : cccc 0110 0110 xxxx xxxx xxxx 0111 xxxx : */ 1348 1260 /* UQADD8 : cccc 0110 0110 xxxx xxxx xxxx 1001 xxxx : */ 1261 + /* ??? : cccc 0110 0110 xxxx xxxx xxxx 1011 xxxx : */ 1262 + /* ??? : cccc 0110 0110 xxxx xxxx xxxx 1101 xxxx : */ 1349 1263 /* UQSUB8 : cccc 0110 0110 xxxx xxxx xxxx 1111 xxxx : */ 1350 1264 /* UHADD16 : cccc 0110 0111 xxxx xxxx xxxx 0001 xxxx : */ 1351 1265 /* UHADDSUBX : cccc 0110 0111 xxxx xxxx xxxx 0011 xxxx : */ 1352 1266 /* UHSUBADDX : cccc 0110 0111 xxxx xxxx xxxx 0101 xxxx : */ 1353 1267 /* UHSUB16 : cccc 0110 0111 xxxx xxxx xxxx 0111 xxxx : */ 1354 1268 /* UHADD8 : cccc 0110 0111 xxxx xxxx xxxx 1001 xxxx : */ 1269 + /* ??? : cccc 0110 0111 xxxx xxxx xxxx 1011 xxxx : */ 1270 + /* ??? : cccc 0110 0111 xxxx xxxx xxxx 1101 xxxx : */ 1355 1271 /* UHSUB8 : cccc 0110 0111 xxxx xxxx xxxx 1111 xxxx : */ 1272 + if ((insn & 0x0f800010) == 0x06000010) { 1273 + if ((insn & 0x00300000) == 0x00000000 || 1274 + (insn & 0x000000e0) == 0x000000a0 || 1275 + (insn & 0x000000e0) == 0x000000c0) 1276 + return INSN_REJECTED; /* Unallocated space */ 1277 + return prep_emulate_rd12rn16rm0_wflags(insn, asi); 1278 + } 1279 + 1356 1280 /* PKHBT : cccc 0110 1000 xxxx xxxx xxxx x001 xxxx : */ 1357 1281 /* PKHTB : cccc 0110 1000 xxxx xxxx xxxx x101 xxxx : */ 1282 + if ((insn & 0x0ff00030) == 0x06800010) 1283 + return prep_emulate_rd12rn16rm0_wflags(insn, asi); 1284 + 1358 1285 /* SXTAB16 : cccc 0110 1000 xxxx xxxx xxxx 0111 xxxx : */ 1359 - /* SXTB : cccc 0110 1010 xxxx xxxx xxxx 0111 xxxx : */ 1286 + /* SXTB16 : cccc 0110 1000 1111 xxxx xxxx 0111 xxxx : */ 1287 + /* ??? : cccc 0110 1001 xxxx xxxx xxxx 0111 xxxx : */ 1360 1288 /* SXTAB : cccc 0110 1010 xxxx xxxx xxxx 0111 xxxx : */ 1289 + /* SXTB : cccc 0110 1010 1111 xxxx xxxx 0111 xxxx : */ 1361 1290 /* SXTAH : cccc 0110 1011 xxxx xxxx xxxx 0111 xxxx : */ 1291 + /* SXTH : cccc 0110 1011 1111 xxxx xxxx 0111 xxxx : */ 1362 1292 /* UXTAB16 : cccc 0110 1100 xxxx xxxx xxxx 0111 xxxx : */ 1293 + /* UXTB16 : cccc 0110 1100 1111 xxxx xxxx 0111 xxxx : */ 1294 + /* ??? : cccc 0110 1101 xxxx xxxx xxxx 0111 xxxx : */ 1363 1295 /* UXTAB : cccc 0110 1110 xxxx xxxx xxxx 0111 xxxx : */ 1296 + /* UXTB : cccc 0110 1110 1111 xxxx xxxx 0111 xxxx : */ 1364 1297 /* UXTAH : cccc 0110 1111 xxxx xxxx xxxx 0111 xxxx : */ 1365 - return prep_emulate_rd12rn16rm0_wflags(insn, asi); 1298 + /* UXTH : cccc 0110 1111 1111 xxxx xxxx 0111 xxxx : */ 1299 + if ((insn & 0x0f8000f0) == 0x06800070) { 1300 + if ((insn & 0x00300000) == 0x00100000) 1301 + return INSN_REJECTED; /* Unallocated space */ 1302 + 1303 + if ((insn & 0x000f0000) == 0x000f0000) 1304 + return prep_emulate_rd12rm0(insn, asi); 1305 + else 1306 + return prep_emulate_rd12rn16rm0_wflags(insn, asi); 1307 + } 1308 + 1309 + /* Other instruction encodings aren't yet defined */ 1310 + return INSN_REJECTED; 1366 1311 } 1367 1312 1368 1313 static enum kprobe_insn __kprobes ··· 1416 1273 if ((insn & 0x0ff000f0) == 0x03f000f0) 1417 1274 return INSN_REJECTED; 1418 1275 1419 - /* USADA8 : cccc 0111 1000 xxxx xxxx xxxx 0001 xxxx */ 1420 - /* USAD8 : cccc 0111 1000 xxxx 1111 xxxx 0001 xxxx */ 1421 - if ((insn & 0x0ff000f0) == 0x07800010) 1422 - return prep_emulate_rd16rn12rs8rm0_wflags(insn, asi); 1423 - 1424 1276 /* SMLALD : cccc 0111 0100 xxxx xxxx xxxx 00x1 xxxx */ 1425 1277 /* SMLSLD : cccc 0111 0100 xxxx xxxx xxxx 01x1 xxxx */ 1426 1278 if ((insn & 0x0ff00090) == 0x07400010) 1427 1279 return prep_emulate_rdhi16rdlo12rs8rm0_wflags(insn, asi); 1428 1280 1429 1281 /* SMLAD : cccc 0111 0000 xxxx xxxx xxxx 00x1 xxxx :Q */ 1282 + /* SMUAD : cccc 0111 0000 xxxx 1111 xxxx 00x1 xxxx :Q */ 1430 1283 /* SMLSD : cccc 0111 0000 xxxx xxxx xxxx 01x1 xxxx :Q */ 1284 + /* SMUSD : cccc 0111 0000 xxxx 1111 xxxx 01x1 xxxx : */ 1431 1285 /* SMMLA : cccc 0111 0101 xxxx xxxx xxxx 00x1 xxxx : */ 1432 - /* SMMLS : cccc 0111 0101 xxxx xxxx xxxx 11x1 xxxx : */ 1286 + /* SMMUL : cccc 0111 0101 xxxx 1111 xxxx 00x1 xxxx : */ 1287 + /* USADA8 : cccc 0111 1000 xxxx xxxx xxxx 0001 xxxx : */ 1288 + /* USAD8 : cccc 0111 1000 xxxx 1111 xxxx 0001 xxxx : */ 1433 1289 if ((insn & 0x0ff00090) == 0x07000010 || 1434 1290 (insn & 0x0ff000d0) == 0x07500010 || 1435 - (insn & 0x0ff000d0) == 0x075000d0) 1291 + (insn & 0x0ff000f0) == 0x07800010) { 1292 + 1293 + if ((insn & 0x0000f000) == 0x0000f000) 1294 + return prep_emulate_rd16rs8rm0_wflags(insn, asi); 1295 + else 1296 + return prep_emulate_rd16rn12rs8rm0_wflags(insn, asi); 1297 + } 1298 + 1299 + /* SMMLS : cccc 0111 0101 xxxx xxxx xxxx 11x1 xxxx : */ 1300 + if ((insn & 0x0ff000d0) == 0x075000d0) 1436 1301 return prep_emulate_rd16rn12rs8rm0_wflags(insn, asi); 1437 1302 1438 - /* SMUSD : cccc 0111 0000 xxxx xxxx xxxx 01x1 xxxx : */ 1439 - /* SMUAD : cccc 0111 0000 xxxx 1111 xxxx 00x1 xxxx :Q */ 1440 - /* SMMUL : cccc 0111 0101 xxxx 1111 xxxx 00x1 xxxx : */ 1441 - return prep_emulate_rd16rs8rm0_wflags(insn, asi); 1303 + /* SBFX : cccc 0111 101x xxxx xxxx xxxx x101 xxxx : */ 1304 + /* UBFX : cccc 0111 111x xxxx xxxx xxxx x101 xxxx : */ 1305 + if ((insn & 0x0fa00070) == 0x07a00050) 1306 + return prep_emulate_rd12rm0(insn, asi); 1307 + 1308 + /* BFI : cccc 0111 110x xxxx xxxx xxxx x001 xxxx : */ 1309 + /* BFC : cccc 0111 110x xxxx xxxx xxxx x001 1111 : */ 1310 + if ((insn & 0x0fe00070) == 0x07c00010) { 1311 + 1312 + if ((insn & 0x0000000f) == 0x0000000f) 1313 + return prep_emulate_rd12_modify(insn, asi); 1314 + else 1315 + return prep_emulate_rd12rn0_modify(insn, asi); 1316 + } 1317 + 1318 + return INSN_REJECTED; 1442 1319 } 1443 1320 1444 1321 static enum kprobe_insn __kprobes ··· 1472 1309 /* STRB : cccc 01xx x1x0 xxxx xxxx xxxx xxxx xxxx */ 1473 1310 /* STRBT : cccc 01x0 x110 xxxx xxxx xxxx xxxx xxxx */ 1474 1311 /* STRT : cccc 01x0 x010 xxxx xxxx xxxx xxxx xxxx */ 1312 + 1313 + if ((insn & 0x00500000) == 0x00500000 && is_r15(insn, 12)) 1314 + return INSN_REJECTED; /* LDRB into PC */ 1315 + 1475 1316 return prep_emulate_ldr_str(insn, asi); 1476 1317 } 1477 1318 ··· 1490 1323 1491 1324 /* LDM(1) : cccc 100x x0x1 xxxx xxxx xxxx xxxx xxxx */ 1492 1325 /* STM(1) : cccc 100x x0x0 xxxx xxxx xxxx xxxx xxxx */ 1493 - asi->insn[0] = truecc_insn(insn); 1494 1326 asi->insn_handler = ((insn & 0x108000) == 0x008000) ? /* STM & R15 */ 1495 1327 simulate_stm1_pc : simulate_ldm1stm1; 1496 - return INSN_GOOD; 1328 + return INSN_GOOD_NO_SLOT; 1497 1329 } 1498 1330 1499 1331 static enum kprobe_insn __kprobes ··· 1500 1334 { 1501 1335 /* B : cccc 1010 xxxx xxxx xxxx xxxx xxxx xxxx */ 1502 1336 /* BL : cccc 1011 xxxx xxxx xxxx xxxx xxxx xxxx */ 1503 - asi->insn[0] = truecc_insn(insn); 1504 1337 asi->insn_handler = simulate_bbl; 1505 - return INSN_GOOD; 1338 + return INSN_GOOD_NO_SLOT; 1506 1339 } 1507 1340 1508 1341 static enum kprobe_insn __kprobes 1509 - space_cccc_1100_010x(kprobe_opcode_t insn, struct arch_specific_insn *asi) 1342 + space_cccc_11xx(kprobe_opcode_t insn, struct arch_specific_insn *asi) 1510 1343 { 1344 + /* Coprocessor instructions... */ 1511 1345 /* MCRR : cccc 1100 0100 xxxx xxxx xxxx xxxx xxxx : (Rd!=Rn) */ 1512 1346 /* MRRC : cccc 1100 0101 xxxx xxxx xxxx xxxx xxxx : (Rd!=Rn) */ 1513 - insn &= 0xfff00fff; 1514 - insn |= 0x00001000; /* Rn = r0, Rd = r1 */ 1515 - asi->insn[0] = insn; 1516 - asi->insn_handler = (insn & (1 << 20)) ? emulate_mrrc : emulate_mcrr; 1517 - return INSN_GOOD; 1347 + /* LDC : cccc 110x xxx1 xxxx xxxx xxxx xxxx xxxx */ 1348 + /* STC : cccc 110x xxx0 xxxx xxxx xxxx xxxx xxxx */ 1349 + /* CDP : cccc 1110 xxxx xxxx xxxx xxxx xxx0 xxxx */ 1350 + /* MCR : cccc 1110 xxx0 xxxx xxxx xxxx xxx1 xxxx */ 1351 + /* MRC : cccc 1110 xxx1 xxxx xxxx xxxx xxx1 xxxx */ 1352 + 1353 + /* SVC : cccc 1111 xxxx xxxx xxxx xxxx xxxx xxxx */ 1354 + 1355 + return INSN_REJECTED; 1518 1356 } 1519 1357 1520 - static enum kprobe_insn __kprobes 1521 - space_cccc_110x(kprobe_opcode_t insn, struct arch_specific_insn *asi) 1358 + static unsigned long __kprobes __check_eq(unsigned long cpsr) 1522 1359 { 1523 - /* LDC : cccc 110x xxx1 xxxx xxxx xxxx xxxx xxxx */ 1524 - /* STC : cccc 110x xxx0 xxxx xxxx xxxx xxxx xxxx */ 1525 - insn &= 0xfff0ffff; /* Rn = r0 */ 1526 - asi->insn[0] = insn; 1527 - asi->insn_handler = emulate_ldcstc; 1528 - return INSN_GOOD; 1360 + return cpsr & PSR_Z_BIT; 1529 1361 } 1530 1362 1531 - static enum kprobe_insn __kprobes 1532 - space_cccc_111x(kprobe_opcode_t insn, struct arch_specific_insn *asi) 1363 + static unsigned long __kprobes __check_ne(unsigned long cpsr) 1533 1364 { 1534 - /* BKPT : 1110 0001 0010 xxxx xxxx xxxx 0111 xxxx */ 1535 - /* SWI : cccc 1111 xxxx xxxx xxxx xxxx xxxx xxxx */ 1536 - if ((insn & 0xfff000f0) == 0xe1200070 || 1537 - (insn & 0x0f000000) == 0x0f000000) 1538 - return INSN_REJECTED; 1539 - 1540 - /* CDP : cccc 1110 xxxx xxxx xxxx xxxx xxx0 xxxx */ 1541 - if ((insn & 0x0f000010) == 0x0e000000) { 1542 - asi->insn[0] = insn; 1543 - asi->insn_handler = emulate_none; 1544 - return INSN_GOOD; 1545 - } 1546 - 1547 - /* MCR : cccc 1110 xxx0 xxxx xxxx xxxx xxx1 xxxx */ 1548 - /* MRC : cccc 1110 xxx1 xxxx xxxx xxxx xxx1 xxxx */ 1549 - insn &= 0xffff0fff; /* Rd = r0 */ 1550 - asi->insn[0] = insn; 1551 - asi->insn_handler = (insn & (1 << 20)) ? emulate_rd12 : emulate_ird12; 1552 - return INSN_GOOD; 1365 + return (~cpsr) & PSR_Z_BIT; 1553 1366 } 1367 + 1368 + static unsigned long __kprobes __check_cs(unsigned long cpsr) 1369 + { 1370 + return cpsr & PSR_C_BIT; 1371 + } 1372 + 1373 + static unsigned long __kprobes __check_cc(unsigned long cpsr) 1374 + { 1375 + return (~cpsr) & PSR_C_BIT; 1376 + } 1377 + 1378 + static unsigned long __kprobes __check_mi(unsigned long cpsr) 1379 + { 1380 + return cpsr & PSR_N_BIT; 1381 + } 1382 + 1383 + static unsigned long __kprobes __check_pl(unsigned long cpsr) 1384 + { 1385 + return (~cpsr) & PSR_N_BIT; 1386 + } 1387 + 1388 + static unsigned long __kprobes __check_vs(unsigned long cpsr) 1389 + { 1390 + return cpsr & PSR_V_BIT; 1391 + } 1392 + 1393 + static unsigned long __kprobes __check_vc(unsigned long cpsr) 1394 + { 1395 + return (~cpsr) & PSR_V_BIT; 1396 + } 1397 + 1398 + static unsigned long __kprobes __check_hi(unsigned long cpsr) 1399 + { 1400 + cpsr &= ~(cpsr >> 1); /* PSR_C_BIT &= ~PSR_Z_BIT */ 1401 + return cpsr & PSR_C_BIT; 1402 + } 1403 + 1404 + static unsigned long __kprobes __check_ls(unsigned long cpsr) 1405 + { 1406 + cpsr &= ~(cpsr >> 1); /* PSR_C_BIT &= ~PSR_Z_BIT */ 1407 + return (~cpsr) & PSR_C_BIT; 1408 + } 1409 + 1410 + static unsigned long __kprobes __check_ge(unsigned long cpsr) 1411 + { 1412 + cpsr ^= (cpsr << 3); /* PSR_N_BIT ^= PSR_V_BIT */ 1413 + return (~cpsr) & PSR_N_BIT; 1414 + } 1415 + 1416 + static unsigned long __kprobes __check_lt(unsigned long cpsr) 1417 + { 1418 + cpsr ^= (cpsr << 3); /* PSR_N_BIT ^= PSR_V_BIT */ 1419 + return cpsr & PSR_N_BIT; 1420 + } 1421 + 1422 + static unsigned long __kprobes __check_gt(unsigned long cpsr) 1423 + { 1424 + unsigned long temp = cpsr ^ (cpsr << 3); /* PSR_N_BIT ^= PSR_V_BIT */ 1425 + temp |= (cpsr << 1); /* PSR_N_BIT |= PSR_Z_BIT */ 1426 + return (~temp) & PSR_N_BIT; 1427 + } 1428 + 1429 + static unsigned long __kprobes __check_le(unsigned long cpsr) 1430 + { 1431 + unsigned long temp = cpsr ^ (cpsr << 3); /* PSR_N_BIT ^= PSR_V_BIT */ 1432 + temp |= (cpsr << 1); /* PSR_N_BIT |= PSR_Z_BIT */ 1433 + return temp & PSR_N_BIT; 1434 + } 1435 + 1436 + static unsigned long __kprobes __check_al(unsigned long cpsr) 1437 + { 1438 + return true; 1439 + } 1440 + 1441 + static kprobe_check_cc * const condition_checks[16] = { 1442 + &__check_eq, &__check_ne, &__check_cs, &__check_cc, 1443 + &__check_mi, &__check_pl, &__check_vs, &__check_vc, 1444 + &__check_hi, &__check_ls, &__check_ge, &__check_lt, 1445 + &__check_gt, &__check_le, &__check_al, &__check_al 1446 + }; 1554 1447 1555 1448 /* Return: 1556 1449 * INSN_REJECTED If instruction is one not allowed to kprobe, ··· 1626 1401 enum kprobe_insn __kprobes 1627 1402 arm_kprobe_decode_insn(kprobe_opcode_t insn, struct arch_specific_insn *asi) 1628 1403 { 1404 + asi->insn_check_cc = condition_checks[insn>>28]; 1629 1405 asi->insn[1] = KPROBE_RETURN_INSTRUCTION; 1630 1406 1631 - if ((insn & 0xf0000000) == 0xf0000000) { 1407 + if ((insn & 0xf0000000) == 0xf0000000) 1632 1408 1633 1409 return space_1111(insn, asi); 1634 1410 1635 - } else if ((insn & 0x0e000000) == 0x00000000) { 1411 + else if ((insn & 0x0e000000) == 0x00000000) 1636 1412 1637 1413 return space_cccc_000x(insn, asi); 1638 1414 1639 - } else if ((insn & 0x0e000000) == 0x02000000) { 1415 + else if ((insn & 0x0e000000) == 0x02000000) 1640 1416 1641 1417 return space_cccc_001x(insn, asi); 1642 1418 1643 - } else if ((insn & 0x0f000010) == 0x06000010) { 1419 + else if ((insn & 0x0f000010) == 0x06000010) 1644 1420 1645 1421 return space_cccc_0110__1(insn, asi); 1646 1422 1647 - } else if ((insn & 0x0f000010) == 0x07000010) { 1423 + else if ((insn & 0x0f000010) == 0x07000010) 1648 1424 1649 1425 return space_cccc_0111__1(insn, asi); 1650 1426 1651 - } else if ((insn & 0x0c000000) == 0x04000000) { 1427 + else if ((insn & 0x0c000000) == 0x04000000) 1652 1428 1653 1429 return space_cccc_01xx(insn, asi); 1654 1430 1655 - } else if ((insn & 0x0e000000) == 0x08000000) { 1431 + else if ((insn & 0x0e000000) == 0x08000000) 1656 1432 1657 1433 return space_cccc_100x(insn, asi); 1658 1434 1659 - } else if ((insn & 0x0e000000) == 0x0a000000) { 1435 + else if ((insn & 0x0e000000) == 0x0a000000) 1660 1436 1661 1437 return space_cccc_101x(insn, asi); 1662 1438 1663 - } else if ((insn & 0x0fe00000) == 0x0c400000) { 1664 - 1665 - return space_cccc_1100_010x(insn, asi); 1666 - 1667 - } else if ((insn & 0x0e000000) == 0x0c000000) { 1668 - 1669 - return space_cccc_110x(insn, asi); 1670 - 1671 - } 1672 - 1673 - return space_cccc_111x(insn, asi); 1439 + return space_cccc_11xx(insn, asi); 1674 1440 } 1675 1441 1676 1442 void __init arm_kprobe_decode_init(void) 1677 1443 { 1678 1444 find_str_pc_offset(); 1679 1445 } 1680 - 1681 - 1682 - /* 1683 - * All ARM instructions listed below. 1684 - * 1685 - * Instructions and their general purpose registers are given. 1686 - * If a particular register may not use R15, it is prefixed with a "!". 1687 - * If marked with a "*" means the value returned by reading R15 1688 - * is implementation defined. 1689 - * 1690 - * ADC/ADD/AND/BIC/CMN/CMP/EOR/MOV/MVN/ORR/RSB/RSC/SBC/SUB/TEQ 1691 - * TST: Rd, Rn, Rm, !Rs 1692 - * BX: Rm 1693 - * BLX(2): !Rm 1694 - * BX: Rm (R15 legal, but discouraged) 1695 - * BXJ: !Rm, 1696 - * CLZ: !Rd, !Rm 1697 - * CPY: Rd, Rm 1698 - * LDC/2,STC/2 immediate offset & unindex: Rn 1699 - * LDC/2,STC/2 immediate pre/post-indexed: !Rn 1700 - * LDM(1/3): !Rn, register_list 1701 - * LDM(2): !Rn, !register_list 1702 - * LDR,STR,PLD immediate offset: Rd, Rn 1703 - * LDR,STR,PLD register offset: Rd, Rn, !Rm 1704 - * LDR,STR,PLD scaled register offset: Rd, !Rn, !Rm 1705 - * LDR,STR immediate pre/post-indexed: Rd, !Rn 1706 - * LDR,STR register pre/post-indexed: Rd, !Rn, !Rm 1707 - * LDR,STR scaled register pre/post-indexed: Rd, !Rn, !Rm 1708 - * LDRB,STRB immediate offset: !Rd, Rn 1709 - * LDRB,STRB register offset: !Rd, Rn, !Rm 1710 - * LDRB,STRB scaled register offset: !Rd, !Rn, !Rm 1711 - * LDRB,STRB immediate pre/post-indexed: !Rd, !Rn 1712 - * LDRB,STRB register pre/post-indexed: !Rd, !Rn, !Rm 1713 - * LDRB,STRB scaled register pre/post-indexed: !Rd, !Rn, !Rm 1714 - * LDRT,LDRBT,STRBT immediate pre/post-indexed: !Rd, !Rn 1715 - * LDRT,LDRBT,STRBT register pre/post-indexed: !Rd, !Rn, !Rm 1716 - * LDRT,LDRBT,STRBT scaled register pre/post-indexed: !Rd, !Rn, !Rm 1717 - * LDRH/SH/SB/D,STRH/SH/SB/D immediate offset: !Rd, Rn 1718 - * LDRH/SH/SB/D,STRH/SH/SB/D register offset: !Rd, Rn, !Rm 1719 - * LDRH/SH/SB/D,STRH/SH/SB/D immediate pre/post-indexed: !Rd, !Rn 1720 - * LDRH/SH/SB/D,STRH/SH/SB/D register pre/post-indexed: !Rd, !Rn, !Rm 1721 - * LDREX: !Rd, !Rn 1722 - * MCR/2: !Rd 1723 - * MCRR/2,MRRC/2: !Rd, !Rn 1724 - * MLA: !Rd, !Rn, !Rm, !Rs 1725 - * MOV: Rd 1726 - * MRC/2: !Rd (if Rd==15, only changes cond codes, not the register) 1727 - * MRS,MSR: !Rd 1728 - * MUL: !Rd, !Rm, !Rs 1729 - * PKH{BT,TB}: !Rd, !Rn, !Rm 1730 - * QDADD,[U]QADD/16/8/SUBX: !Rd, !Rm, !Rn 1731 - * QDSUB,[U]QSUB/16/8/ADDX: !Rd, !Rm, !Rn 1732 - * REV/16/SH: !Rd, !Rm 1733 - * RFE: !Rn 1734 - * {S,U}[H]ADD{16,8,SUBX},{S,U}[H]SUB{16,8,ADDX}: !Rd, !Rn, !Rm 1735 - * SEL: !Rd, !Rn, !Rm 1736 - * SMLA<x><y>,SMLA{D,W<y>},SMLSD,SMML{A,S}: !Rd, !Rn, !Rm, !Rs 1737 - * SMLAL<x><y>,SMLA{D,LD},SMLSLD,SMMULL,SMULW<y>: !RdHi, !RdLo, !Rm, !Rs 1738 - * SMMUL,SMUAD,SMUL<x><y>,SMUSD: !Rd, !Rm, !Rs 1739 - * SSAT/16: !Rd, !Rm 1740 - * STM(1/2): !Rn, register_list* (R15 in reg list not recommended) 1741 - * STRT immediate pre/post-indexed: Rd*, !Rn 1742 - * STRT register pre/post-indexed: Rd*, !Rn, !Rm 1743 - * STRT scaled register pre/post-indexed: Rd*, !Rn, !Rm 1744 - * STREX: !Rd, !Rn, !Rm 1745 - * SWP/B: !Rd, !Rn, !Rm 1746 - * {S,U}XTA{B,B16,H}: !Rd, !Rn, !Rm 1747 - * {S,U}XT{B,B16,H}: !Rd, !Rm 1748 - * UM{AA,LA,UL}L: !RdHi, !RdLo, !Rm, !Rs 1749 - * USA{D8,A8,T,T16}: !Rd, !Rm, !Rs 1750 - * 1751 - * May transfer control by writing R15 (possible mode changes or alternate 1752 - * mode accesses marked by "*"): 1753 - * ALU op (* with s-bit), B, BL, BKPT, BLX(1/2), BX, BXJ, CPS*, CPY, 1754 - * LDM(1), LDM(2/3)*, LDR, MOV, RFE*, SWI* 1755 - * 1756 - * Instructions that do not take general registers, nor transfer control: 1757 - * CDP/2, SETEND, SRS* 1758 - */
+2 -1
arch/arm/kernel/kprobes.c
··· 134 134 struct kprobe_ctlblk *kcb) 135 135 { 136 136 regs->ARM_pc += 4; 137 - p->ainsn.insn_handler(p, regs); 137 + if (p->ainsn.insn_check_cc(regs->ARM_cpsr)) 138 + p->ainsn.insn_handler(p, regs); 138 139 } 139 140 140 141 /*
+2 -1
arch/arm/kernel/perf_event.c
··· 746 746 747 747 tail = (struct frame_tail __user *)regs->ARM_fp - 1; 748 748 749 - while (tail && !((unsigned long)tail & 0x3)) 749 + while ((entry->nr < PERF_MAX_STACK_DEPTH) && 750 + tail && !((unsigned long)tail & 0x3)) 750 751 tail = user_backtrace(tail, entry); 751 752 } 752 753
+8
arch/arm/kernel/ptrace.c
··· 767 767 768 768 #ifdef CONFIG_HAVE_HW_BREAKPOINT 769 769 case PTRACE_GETHBPREGS: 770 + if (ptrace_get_breakpoints(child) < 0) 771 + return -ESRCH; 772 + 770 773 ret = ptrace_gethbpregs(child, addr, 771 774 (unsigned long __user *)data); 775 + ptrace_put_breakpoints(child); 772 776 break; 773 777 case PTRACE_SETHBPREGS: 778 + if (ptrace_get_breakpoints(child) < 0) 779 + return -ESRCH; 780 + 774 781 ret = ptrace_sethbpregs(child, addr, 775 782 (unsigned long __user *)data); 783 + ptrace_put_breakpoints(child); 776 784 break; 777 785 #endif 778 786
+53 -37
arch/arm/kernel/signal.c
··· 597 597 return err; 598 598 } 599 599 600 - static inline void setup_syscall_restart(struct pt_regs *regs) 601 - { 602 - regs->ARM_r0 = regs->ARM_ORIG_r0; 603 - regs->ARM_pc -= thumb_mode(regs) ? 2 : 4; 604 - } 605 - 606 600 /* 607 601 * OK, we're invoking a handler 608 602 */ 609 603 static int 610 604 handle_signal(unsigned long sig, struct k_sigaction *ka, 611 605 siginfo_t *info, sigset_t *oldset, 612 - struct pt_regs * regs, int syscall) 606 + struct pt_regs * regs) 613 607 { 614 608 struct thread_info *thread = current_thread_info(); 615 609 struct task_struct *tsk = current; 616 610 int usig = sig; 617 611 int ret; 618 - 619 - /* 620 - * If we were from a system call, check for system call restarting... 621 - */ 622 - if (syscall) { 623 - switch (regs->ARM_r0) { 624 - case -ERESTART_RESTARTBLOCK: 625 - case -ERESTARTNOHAND: 626 - regs->ARM_r0 = -EINTR; 627 - break; 628 - case -ERESTARTSYS: 629 - if (!(ka->sa.sa_flags & SA_RESTART)) { 630 - regs->ARM_r0 = -EINTR; 631 - break; 632 - } 633 - /* fallthrough */ 634 - case -ERESTARTNOINTR: 635 - setup_syscall_restart(regs); 636 - } 637 - } 638 612 639 613 /* 640 614 * translate the signal ··· 659 685 */ 660 686 static void do_signal(struct pt_regs *regs, int syscall) 661 687 { 688 + unsigned int retval = 0, continue_addr = 0, restart_addr = 0; 662 689 struct k_sigaction ka; 663 690 siginfo_t info; 664 691 int signr; ··· 673 698 if (!user_mode(regs)) 674 699 return; 675 700 701 + /* 702 + * If we were from a system call, check for system call restarting... 703 + */ 704 + if (syscall) { 705 + continue_addr = regs->ARM_pc; 706 + restart_addr = continue_addr - (thumb_mode(regs) ? 2 : 4); 707 + retval = regs->ARM_r0; 708 + 709 + /* 710 + * Prepare for system call restart. We do this here so that a 711 + * debugger will see the already changed PSW. 712 + */ 713 + switch (retval) { 714 + case -ERESTARTNOHAND: 715 + case -ERESTARTSYS: 716 + case -ERESTARTNOINTR: 717 + regs->ARM_r0 = regs->ARM_ORIG_r0; 718 + regs->ARM_pc = restart_addr; 719 + break; 720 + case -ERESTART_RESTARTBLOCK: 721 + regs->ARM_r0 = -EINTR; 722 + break; 723 + } 724 + } 725 + 676 726 if (try_to_freeze()) 677 727 goto no_signal; 678 728 729 + /* 730 + * Get the signal to deliver. When running under ptrace, at this 731 + * point the debugger may change all our registers ... 732 + */ 679 733 signr = get_signal_to_deliver(&info, &ka, regs, NULL); 680 734 if (signr > 0) { 681 735 sigset_t *oldset; 736 + 737 + /* 738 + * Depending on the signal settings we may need to revert the 739 + * decision to restart the system call. But skip this if a 740 + * debugger has chosen to restart at a different PC. 741 + */ 742 + if (regs->ARM_pc == restart_addr) { 743 + if (retval == -ERESTARTNOHAND 744 + || (retval == -ERESTARTSYS 745 + && !(ka.sa.sa_flags & SA_RESTART))) { 746 + regs->ARM_r0 = -EINTR; 747 + regs->ARM_pc = continue_addr; 748 + } 749 + } 682 750 683 751 if (test_thread_flag(TIF_RESTORE_SIGMASK)) 684 752 oldset = &current->saved_sigmask; 685 753 else 686 754 oldset = &current->blocked; 687 - if (handle_signal(signr, &ka, &info, oldset, regs, syscall) == 0) { 755 + if (handle_signal(signr, &ka, &info, oldset, regs) == 0) { 688 756 /* 689 757 * A signal was successfully delivered; the saved 690 758 * sigmask will have been stored in the signal frame, ··· 741 723 } 742 724 743 725 no_signal: 744 - /* 745 - * No signal to deliver to the process - restart the syscall. 746 - */ 747 726 if (syscall) { 748 - if (regs->ARM_r0 == -ERESTART_RESTARTBLOCK) { 727 + /* 728 + * Handle restarting a different system call. As above, 729 + * if a debugger has chosen to restart at a different PC, 730 + * ignore the restart. 731 + */ 732 + if (retval == -ERESTART_RESTARTBLOCK 733 + && regs->ARM_pc == continue_addr) { 749 734 if (thumb_mode(regs)) { 750 735 regs->ARM_r7 = __NR_restart_syscall - __NR_SYSCALL_BASE; 751 736 regs->ARM_pc -= 2; ··· 770 749 } 771 750 #endif 772 751 } 773 - } 774 - if (regs->ARM_r0 == -ERESTARTNOHAND || 775 - regs->ARM_r0 == -ERESTARTSYS || 776 - regs->ARM_r0 == -ERESTARTNOINTR) { 777 - setup_syscall_restart(regs); 778 752 } 779 753 780 754 /* If there's no signal to deliver, we just put the saved sigmask
+1 -1
arch/arm/kernel/smp.c
··· 479 479 { 480 480 } 481 481 482 - static void broadcast_timer_setup(struct clock_event_device *evt) 482 + static void __cpuinit broadcast_timer_setup(struct clock_event_device *evt) 483 483 { 484 484 evt->name = "dummy_timer"; 485 485 evt->features = CLOCK_EVT_FEAT_ONESHOT |
+1 -1
arch/arm/kernel/sys_oabi-compat.c
··· 311 311 long err; 312 312 int i; 313 313 314 - if (nsops < 1) 314 + if (nsops < 1 || nsops > SEMOPM) 315 315 return -EINVAL; 316 316 sops = kmalloc(sizeof(*sops) * nsops, GFP_KERNEL); 317 317 if (!sops)
+1
arch/arm/mach-at91/Kconfig
··· 83 83 select CPU_ARM926T 84 84 select GENERIC_CLOCKEVENTS 85 85 select HAVE_FB_ATMEL 86 + select HAVE_NET_MACB 86 87 87 88 config ARCH_AT572D940HF 88 89 bool "AT572D940HF"
+6 -1
arch/arm/mach-at91/board-eb01.c
··· 30 30 #include <mach/board.h> 31 31 #include "generic.h" 32 32 33 + static void __init at91eb01_init_irq(void) 34 + { 35 + at91x40_init_interrupts(NULL); 36 + } 37 + 33 38 static void __init at91eb01_map_io(void) 34 39 { 35 40 at91x40_initialize(40000000); ··· 43 38 MACHINE_START(AT91EB01, "Atmel AT91 EB01") 44 39 /* Maintainer: Greg Ungerer <gerg@snapgear.com> */ 45 40 .timer = &at91x40_timer, 46 - .init_irq = at91x40_init_interrupts, 41 + .init_irq = at91eb01_init_irq, 47 42 .map_io = at91eb01_map_io, 48 43 MACHINE_END 49 44
+28
arch/arm/mach-at91/include/mach/cpu.h
··· 27 27 #define ARCH_ID_AT91SAM9G45 0x819b05a0 28 28 #define ARCH_ID_AT91SAM9G45MRL 0x819b05a2 /* aka 9G45-ES2 & non ES lots */ 29 29 #define ARCH_ID_AT91SAM9G45ES 0x819b05a1 /* 9G45-ES (Engineering Sample) */ 30 + #define ARCH_ID_AT91SAM9X5 0x819a05a0 30 31 #define ARCH_ID_AT91CAP9 0x039A03A0 31 32 32 33 #define ARCH_ID_AT91SAM9XE128 0x329973a0 ··· 55 54 #define ARCH_EXID_AT91SAM9M10 0x00000002 56 55 #define ARCH_EXID_AT91SAM9G46 0x00000003 57 56 #define ARCH_EXID_AT91SAM9G45 0x00000004 57 + 58 + #define ARCH_EXID_AT91SAM9G15 0x00000000 59 + #define ARCH_EXID_AT91SAM9G35 0x00000001 60 + #define ARCH_EXID_AT91SAM9X35 0x00000002 61 + #define ARCH_EXID_AT91SAM9G25 0x00000003 62 + #define ARCH_EXID_AT91SAM9X25 0x00000004 58 63 59 64 static inline unsigned long at91_exid_identify(void) 60 65 { ··· 148 141 #define cpu_is_at91sam9m10() (0) 149 142 #define cpu_is_at91sam9g46() (0) 150 143 #define cpu_is_at91sam9m11() (0) 144 + #endif 145 + 146 + #ifdef CONFIG_ARCH_AT91SAM9X5 147 + #define cpu_is_at91sam9x5() (at91_cpu_identify() == ARCH_ID_AT91SAM9X5) 148 + #define cpu_is_at91sam9g15() (cpu_is_at91sam9x5() && \ 149 + (at91_exid_identify() == ARCH_EXID_AT91SAM9G15)) 150 + #define cpu_is_at91sam9g35() (cpu_is_at91sam9x5() && \ 151 + (at91_exid_identify() == ARCH_EXID_AT91SAM9G35)) 152 + #define cpu_is_at91sam9x35() (cpu_is_at91sam9x5() && \ 153 + (at91_exid_identify() == ARCH_EXID_AT91SAM9X35)) 154 + #define cpu_is_at91sam9g25() (cpu_is_at91sam9x5() && \ 155 + (at91_exid_identify() == ARCH_EXID_AT91SAM9G25)) 156 + #define cpu_is_at91sam9x25() (cpu_is_at91sam9x5() && \ 157 + (at91_exid_identify() == ARCH_EXID_AT91SAM9X25)) 158 + #else 159 + #define cpu_is_at91sam9x5() (0) 160 + #define cpu_is_at91sam9g15() (0) 161 + #define cpu_is_at91sam9g35() (0) 162 + #define cpu_is_at91sam9x35() (0) 163 + #define cpu_is_at91sam9g25() (0) 164 + #define cpu_is_at91sam9x25() (0) 151 165 #endif 152 166 153 167 #ifdef CONFIG_ARCH_AT91CAP9
+6
arch/arm/mach-davinci/Kconfig
··· 63 63 depends on ARCH_DAVINCI_DM644x 64 64 select MISC_DEVICES 65 65 select EEPROM_AT24 66 + select I2C 66 67 help 67 68 Configure this option to specify the whether the board used 68 69 for development is a DM644x EVM ··· 73 72 depends on ARCH_DAVINCI_DM644x 74 73 select MISC_DEVICES 75 74 select EEPROM_AT24 75 + select I2C 76 76 help 77 77 Say Y here to select the Lyrtech Small Form Factor 78 78 Software Defined Radio (SFFSDR) board. ··· 107 105 select MACH_DAVINCI_DM6467TEVM 108 106 select MISC_DEVICES 109 107 select EEPROM_AT24 108 + select I2C 110 109 help 111 110 Configure this option to specify the whether the board used 112 111 for development is a DM6467 EVM ··· 121 118 depends on ARCH_DAVINCI_DM365 122 119 select MISC_DEVICES 123 120 select EEPROM_AT24 121 + select I2C 124 122 help 125 123 Configure this option to specify whether the board used 126 124 for development is a DM365 EVM ··· 133 129 select GPIO_PCF857X 134 130 select MISC_DEVICES 135 131 select EEPROM_AT24 132 + select I2C 136 133 help 137 134 Say Y here to select the TI DA830/OMAP-L137/AM17x Evaluation Module. 138 135 ··· 210 205 depends on ARCH_DAVINCI_DA850 211 206 select MISC_DEVICES 212 207 select EEPROM_AT24 208 + select I2C 213 209 help 214 210 Say Y here to select the Critical Link MityDSP-L138/MityARM-1808 215 211 System on Module. Information on this SoM may be found at
+2 -2
arch/arm/mach-davinci/board-mityomapl138.c
··· 29 29 #include <mach/mux.h> 30 30 #include <mach/spi.h> 31 31 32 - #define MITYOMAPL138_PHY_ID "0:03" 32 + #define MITYOMAPL138_PHY_ID "" 33 33 34 34 #define FACTORY_CONFIG_MAGIC 0x012C0138 35 35 #define FACTORY_CONFIG_VERSION 0x00010001 ··· 414 414 415 415 static struct platform_device mityomapl138_nandflash_device = { 416 416 .name = "davinci_nand", 417 - .id = 0, 417 + .id = 1, 418 418 .dev = { 419 419 .platform_data = &mityomapl138_nandflash_data, 420 420 },
+9 -3
arch/arm/mach-davinci/devices-da8xx.c
··· 39 39 #define DA8XX_GPIO_BASE 0x01e26000 40 40 #define DA8XX_I2C1_BASE 0x01e28000 41 41 #define DA8XX_SPI0_BASE 0x01c41000 42 - #define DA8XX_SPI1_BASE 0x01f0e000 42 + #define DA830_SPI1_BASE 0x01e12000 43 + #define DA850_SPI1_BASE 0x01f0e000 43 44 44 45 #define DA8XX_EMAC_CTRL_REG_OFFSET 0x3000 45 46 #define DA8XX_EMAC_MOD_REG_OFFSET 0x2000 ··· 763 762 764 763 static struct resource da8xx_spi1_resources[] = { 765 764 [0] = { 766 - .start = DA8XX_SPI1_BASE, 767 - .end = DA8XX_SPI1_BASE + SZ_4K - 1, 765 + .start = DA830_SPI1_BASE, 766 + .end = DA830_SPI1_BASE + SZ_4K - 1, 768 767 .flags = IORESOURCE_MEM, 769 768 }, 770 769 [1] = { ··· 832 831 " %d\n", __func__, instance, ret); 833 832 834 833 da8xx_spi_pdata[instance].num_chipselect = len; 834 + 835 + if (instance == 1 && cpu_is_davinci_da850()) { 836 + da8xx_spi1_resources[0].start = DA850_SPI1_BASE; 837 + da8xx_spi1_resources[0].end = DA850_SPI1_BASE + SZ_4K - 1; 838 + } 835 839 836 840 return platform_device_register(&da8xx_spi_device[instance]); 837 841 }
+1 -1
arch/arm/mach-davinci/dm355.c
··· 314 314 .name = "timer2", 315 315 .parent = &pll1_aux_clk, 316 316 .lpsc = DAVINCI_LPSC_TIMER2, 317 - .usecount = 1, /* REVISIT: why can't' this be disabled? */ 317 + .usecount = 1, /* REVISIT: why can't this be disabled? */ 318 318 }; 319 319 320 320 static struct clk timer3_clk = {
+1 -1
arch/arm/mach-davinci/dm644x.c
··· 274 274 .name = "timer2", 275 275 .parent = &pll1_aux_clk, 276 276 .lpsc = DAVINCI_LPSC_TIMER2, 277 - .usecount = 1, /* REVISIT: why can't' this be disabled? */ 277 + .usecount = 1, /* REVISIT: why can't this be disabled? */ 278 278 }; 279 279 280 280 static struct clk_lookup dm644x_clks[] = {
+8 -5
arch/arm/mach-davinci/include/mach/debug-macro.S
··· 24 24 25 25 #define UART_SHIFT 2 26 26 27 + #define davinci_uart_v2p(x) ((x) - PAGE_OFFSET + PLAT_PHYS_OFFSET) 28 + #define davinci_uart_p2v(x) ((x) - PLAT_PHYS_OFFSET + PAGE_OFFSET) 29 + 27 30 .pushsection .data 28 31 davinci_uart_phys: .word 0 29 32 davinci_uart_virt: .word 0 ··· 37 34 /* Use davinci_uart_phys/virt if already configured */ 38 35 10: mrc p15, 0, \rp, c1, c0 39 36 tst \rp, #1 @ MMU enabled? 40 - ldreq \rp, =__virt_to_phys(davinci_uart_phys) 37 + ldreq \rp, =davinci_uart_v2p(davinci_uart_phys) 41 38 ldrne \rp, =davinci_uart_phys 42 39 add \rv, \rp, #4 @ davinci_uart_virt 43 40 ldr \rp, [\rp, #0] ··· 51 48 tst \rp, #1 @ MMU enabled? 52 49 53 50 /* Copy uart phys address from decompressor uart info */ 54 - ldreq \rv, =__virt_to_phys(davinci_uart_phys) 51 + ldreq \rv, =davinci_uart_v2p(davinci_uart_phys) 55 52 ldrne \rv, =davinci_uart_phys 56 53 ldreq \rp, =DAVINCI_UART_INFO 57 - ldrne \rp, =__phys_to_virt(DAVINCI_UART_INFO) 54 + ldrne \rp, =davinci_uart_p2v(DAVINCI_UART_INFO) 58 55 ldr \rp, [\rp, #0] 59 56 str \rp, [\rv] 60 57 61 58 /* Copy uart virt address from decompressor uart info */ 62 - ldreq \rv, =__virt_to_phys(davinci_uart_virt) 59 + ldreq \rv, =davinci_uart_v2p(davinci_uart_virt) 63 60 ldrne \rv, =davinci_uart_virt 64 61 ldreq \rp, =DAVINCI_UART_INFO 65 - ldrne \rp, =__phys_to_virt(DAVINCI_UART_INFO) 62 + ldrne \rp, =davinci_uart_p2v(DAVINCI_UART_INFO) 66 63 ldr \rp, [\rp, #4] 67 64 str \rp, [\rv] 68 65
+1 -1
arch/arm/mach-davinci/include/mach/serial.h
··· 22 22 * 23 23 * This area sits just below the page tables (see arch/arm/kernel/head.S). 24 24 */ 25 - #define DAVINCI_UART_INFO (PHYS_OFFSET + 0x3ff8) 25 + #define DAVINCI_UART_INFO (PLAT_PHYS_OFFSET + 0x3ff8) 26 26 27 27 #define DAVINCI_UART0_BASE (IO_PHYS + 0x20000) 28 28 #define DAVINCI_UART1_BASE (IO_PHYS + 0x20400)
+8 -3
arch/arm/mach-mx3/mach-vpr200.c
··· 257 257 .workaround = FLS_USB2_WORKAROUND_ENGCM09152, 258 258 }; 259 259 260 + static int vpr200_usbh_init(struct platform_device *pdev) 261 + { 262 + return mx35_initialize_usb_hw(pdev->id, 263 + MXC_EHCI_INTERFACE_SINGLE_UNI | MXC_EHCI_INTERNAL_PHY); 264 + } 265 + 260 266 /* USB HOST config */ 261 267 static const struct mxc_usbh_platform_data usb_host_pdata __initconst = { 262 - .portsc = MXC_EHCI_MODE_SERIAL, 263 - .flags = MXC_EHCI_INTERFACE_SINGLE_UNI | 264 - MXC_EHCI_INTERNAL_PHY, 268 + .init = vpr200_usbh_init, 269 + .portsc = MXC_EHCI_MODE_SERIAL, 265 270 }; 266 271 267 272 static struct platform_device *devices[] __initdata = {
+1 -1
arch/arm/mach-mx5/board-mx53_loco.c
··· 193 193 .wakeup = wake, \ 194 194 } 195 195 196 - static const struct gpio_keys_button loco_buttons[] __initconst = { 196 + static struct gpio_keys_button loco_buttons[] = { 197 197 GPIO_BUTTON(MX53_LOCO_POWER, KEY_POWER, 1, "power", 0), 198 198 GPIO_BUTTON(MX53_LOCO_UI1, KEY_VOLUMEUP, 1, "volume-up", 0), 199 199 GPIO_BUTTON(MX53_LOCO_UI2, KEY_VOLUMEDOWN, 1, "volume-down", 0),
+6 -1
arch/arm/mach-mxs/clock-mx28.c
··· 295 295 unsigned long diff, parent_rate, calc_rate; \ 296 296 int i; \ 297 297 \ 298 - parent_rate = clk_get_rate(clk->parent); \ 299 298 div_max = BM_CLKCTRL_##dr##_DIV >> BP_CLKCTRL_##dr##_DIV; \ 300 299 bm_busy = BM_CLKCTRL_##dr##_BUSY; \ 301 300 \ 302 301 if (clk->parent == &ref_xtal_clk) { \ 302 + parent_rate = clk_get_rate(clk->parent); \ 303 303 div = DIV_ROUND_UP(parent_rate, rate); \ 304 304 if (clk == &cpu_clk) { \ 305 305 div_max = BM_CLKCTRL_CPU_DIV_XTAL >> \ ··· 309 309 if (div == 0 || div > div_max) \ 310 310 return -EINVAL; \ 311 311 } else { \ 312 + /* \ 313 + * hack alert: this block modifies clk->parent, too, \ 314 + * so the base to use it the grand parent. \ 315 + */ \ 316 + parent_rate = clk_get_rate(clk->parent->parent); \ 312 317 rate >>= PARENT_RATE_SHIFT; \ 313 318 parent_rate >>= PARENT_RATE_SHIFT; \ 314 319 diff = parent_rate; \
+1 -1
arch/arm/mach-omap2/Makefile
··· 68 68 obj-$(CONFIG_OMAP_SMARTREFLEX_CLASS3) += smartreflex-class3.o 69 69 70 70 AFLAGS_sleep24xx.o :=-Wa,-march=armv6 71 - AFLAGS_sleep34xx.o :=-Wa,-march=armv7-a 71 + AFLAGS_sleep34xx.o :=-Wa,-march=armv7-a$(plus_sec) 72 72 73 73 ifeq ($(CONFIG_PM_VERBOSE),y) 74 74 CFLAGS_pm_bus.o += -DDEBUG
+7 -2
arch/arm/mach-omap2/board-rx51.c
··· 141 141 static void __init rx51_map_io(void) 142 142 { 143 143 omap2_set_globals_3xxx(); 144 - rx51_video_mem_init(); 145 144 omap34xx_map_common_io(); 145 + } 146 + 147 + static void __init rx51_reserve(void) 148 + { 149 + rx51_video_mem_init(); 150 + omap_reserve(); 146 151 } 147 152 148 153 MACHINE_START(NOKIA_RX51, "Nokia RX-51 board") 149 154 /* Maintainer: Lauri Leukkunen <lauri.leukkunen@nokia.com> */ 150 155 .boot_params = 0x80000100, 151 - .reserve = omap_reserve, 156 + .reserve = rx51_reserve, 152 157 .map_io = rx51_map_io, 153 158 .init_early = rx51_init_early, 154 159 .init_irq = omap_init_irq,
+1
arch/arm/mach-omap2/clkt34xx_dpll3m2.c
··· 115 115 sdrc_cs0->rfr_ctrl, sdrc_cs0->actim_ctrla, 116 116 sdrc_cs0->actim_ctrlb, sdrc_cs0->mr, 117 117 0, 0, 0, 0); 118 + clk->rate = rate; 118 119 119 120 return 0; 120 121 }
+2 -7
arch/arm/mach-omap2/clock44xx_data.c
··· 3116 3116 CLK(NULL, "dsp_fck", &dsp_fck, CK_443X), 3117 3117 CLK("omapdss_dss", "sys_clk", &dss_sys_clk, CK_443X), 3118 3118 CLK("omapdss_dss", "tv_clk", &dss_tv_clk, CK_443X), 3119 - CLK("omapdss_dss", "dss_clk", &dss_dss_clk, CK_443X), 3120 3119 CLK("omapdss_dss", "video_clk", &dss_48mhz_clk, CK_443X), 3121 - CLK("omapdss_dss", "fck", &dss_fck, CK_443X), 3122 - /* 3123 - * On OMAP4, DSS ick is a dummy clock; this is needed for compatibility 3124 - * with OMAP2/3. 3125 - */ 3126 - CLK("omapdss_dss", "ick", &dummy_ck, CK_443X), 3120 + CLK("omapdss_dss", "fck", &dss_dss_clk, CK_443X), 3121 + CLK("omapdss_dss", "ick", &dss_fck, CK_443X), 3127 3122 CLK(NULL, "efuse_ctrl_cust_fck", &efuse_ctrl_cust_fck, CK_443X), 3128 3123 CLK(NULL, "emif1_fck", &emif1_fck, CK_443X), 3129 3124 CLK(NULL, "emif2_fck", &emif2_fck, CK_443X),
+17
arch/arm/mach-omap2/cm2xxx_3xxx.c
··· 247 247 u32 per_cm_clksel; 248 248 u32 emu_cm_clksel; 249 249 u32 emu_cm_clkstctrl; 250 + u32 pll_cm_autoidle; 250 251 u32 pll_cm_autoidle2; 251 252 u32 pll_cm_clksel4; 252 253 u32 pll_cm_clksel5; ··· 320 319 omap2_cm_read_mod_reg(OMAP3430_EMU_MOD, CM_CLKSEL1); 321 320 cm_context.emu_cm_clkstctrl = 322 321 omap2_cm_read_mod_reg(OMAP3430_EMU_MOD, OMAP2_CM_CLKSTCTRL); 322 + /* 323 + * As per erratum i671, ROM code does not respect the PER DPLL 324 + * programming scheme if CM_AUTOIDLE_PLL.AUTO_PERIPH_DPLL == 1. 325 + * In this case, even though this register has been saved in 326 + * scratchpad contents, we need to restore AUTO_PERIPH_DPLL 327 + * by ourselves. So, we need to save it anyway. 328 + */ 329 + cm_context.pll_cm_autoidle = 330 + omap2_cm_read_mod_reg(PLL_MOD, CM_AUTOIDLE); 323 331 cm_context.pll_cm_autoidle2 = 324 332 omap2_cm_read_mod_reg(PLL_MOD, CM_AUTOIDLE2); 325 333 cm_context.pll_cm_clksel4 = ··· 451 441 CM_CLKSEL1); 452 442 omap2_cm_write_mod_reg(cm_context.emu_cm_clkstctrl, OMAP3430_EMU_MOD, 453 443 OMAP2_CM_CLKSTCTRL); 444 + /* 445 + * As per erratum i671, ROM code does not respect the PER DPLL 446 + * programming scheme if CM_AUTOIDLE_PLL.AUTO_PERIPH_DPLL == 1. 447 + * In this case, we need to restore AUTO_PERIPH_DPLL by ourselves. 448 + */ 449 + omap2_cm_write_mod_reg(cm_context.pll_cm_autoidle, PLL_MOD, 450 + CM_AUTOIDLE); 454 451 omap2_cm_write_mod_reg(cm_context.pll_cm_autoidle2, PLL_MOD, 455 452 CM_AUTOIDLE2); 456 453 omap2_cm_write_mod_reg(cm_context.pll_cm_clksel4, PLL_MOD,
+7 -1
arch/arm/mach-omap2/control.c
··· 316 316 omap2_cm_read_mod_reg(WKUP_MOD, CM_CLKSEL); 317 317 prcm_block_contents.cm_clken_pll = 318 318 omap2_cm_read_mod_reg(PLL_MOD, CM_CLKEN); 319 + /* 320 + * As per erratum i671, ROM code does not respect the PER DPLL 321 + * programming scheme if CM_AUTOIDLE_PLL..AUTO_PERIPH_DPLL == 1. 322 + * Then, in anycase, clear these bits to avoid extra latencies. 323 + */ 319 324 prcm_block_contents.cm_autoidle_pll = 320 - omap2_cm_read_mod_reg(PLL_MOD, OMAP3430_CM_AUTOIDLE_PLL); 325 + omap2_cm_read_mod_reg(PLL_MOD, CM_AUTOIDLE) & 326 + ~OMAP3430_AUTO_PERIPH_DPLL_MASK; 321 327 prcm_block_contents.cm_clksel1_pll = 322 328 omap2_cm_read_mod_reg(PLL_MOD, OMAP3430_CM_CLKSEL1_PLL); 323 329 prcm_block_contents.cm_clksel2_pll =
+5 -1
arch/arm/mach-omap2/omap_hwmod_2420_data.c
··· 1639 1639 1640 1640 static struct omap_hwmod omap2420_gpio1_hwmod = { 1641 1641 .name = "gpio1", 1642 + .flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET, 1642 1643 .mpu_irqs = omap242x_gpio1_irqs, 1643 1644 .mpu_irqs_cnt = ARRAY_SIZE(omap242x_gpio1_irqs), 1644 1645 .main_clk = "gpios_fck", ··· 1670 1669 1671 1670 static struct omap_hwmod omap2420_gpio2_hwmod = { 1672 1671 .name = "gpio2", 1672 + .flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET, 1673 1673 .mpu_irqs = omap242x_gpio2_irqs, 1674 1674 .mpu_irqs_cnt = ARRAY_SIZE(omap242x_gpio2_irqs), 1675 1675 .main_clk = "gpios_fck", ··· 1701 1699 1702 1700 static struct omap_hwmod omap2420_gpio3_hwmod = { 1703 1701 .name = "gpio3", 1702 + .flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET, 1704 1703 .mpu_irqs = omap242x_gpio3_irqs, 1705 1704 .mpu_irqs_cnt = ARRAY_SIZE(omap242x_gpio3_irqs), 1706 1705 .main_clk = "gpios_fck", ··· 1732 1729 1733 1730 static struct omap_hwmod omap2420_gpio4_hwmod = { 1734 1731 .name = "gpio4", 1732 + .flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET, 1735 1733 .mpu_irqs = omap242x_gpio4_irqs, 1736 1734 .mpu_irqs_cnt = ARRAY_SIZE(omap242x_gpio4_irqs), 1737 1735 .main_clk = "gpios_fck", ··· 1786 1782 static struct omap_hwmod_addr_space omap2420_dma_system_addrs[] = { 1787 1783 { 1788 1784 .pa_start = 0x48056000, 1789 - .pa_end = 0x4a0560ff, 1785 + .pa_end = 0x48056fff, 1790 1786 .flags = ADDR_TYPE_RT 1791 1787 }, 1792 1788 };
+6 -1
arch/arm/mach-omap2/omap_hwmod_2430_data.c
··· 1742 1742 1743 1743 static struct omap_hwmod omap2430_gpio1_hwmod = { 1744 1744 .name = "gpio1", 1745 + .flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET, 1745 1746 .mpu_irqs = omap243x_gpio1_irqs, 1746 1747 .mpu_irqs_cnt = ARRAY_SIZE(omap243x_gpio1_irqs), 1747 1748 .main_clk = "gpios_fck", ··· 1773 1772 1774 1773 static struct omap_hwmod omap2430_gpio2_hwmod = { 1775 1774 .name = "gpio2", 1775 + .flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET, 1776 1776 .mpu_irqs = omap243x_gpio2_irqs, 1777 1777 .mpu_irqs_cnt = ARRAY_SIZE(omap243x_gpio2_irqs), 1778 1778 .main_clk = "gpios_fck", ··· 1804 1802 1805 1803 static struct omap_hwmod omap2430_gpio3_hwmod = { 1806 1804 .name = "gpio3", 1805 + .flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET, 1807 1806 .mpu_irqs = omap243x_gpio3_irqs, 1808 1807 .mpu_irqs_cnt = ARRAY_SIZE(omap243x_gpio3_irqs), 1809 1808 .main_clk = "gpios_fck", ··· 1835 1832 1836 1833 static struct omap_hwmod omap2430_gpio4_hwmod = { 1837 1834 .name = "gpio4", 1835 + .flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET, 1838 1836 .mpu_irqs = omap243x_gpio4_irqs, 1839 1837 .mpu_irqs_cnt = ARRAY_SIZE(omap243x_gpio4_irqs), 1840 1838 .main_clk = "gpios_fck", ··· 1866 1862 1867 1863 static struct omap_hwmod omap2430_gpio5_hwmod = { 1868 1864 .name = "gpio5", 1865 + .flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET, 1869 1866 .mpu_irqs = omap243x_gpio5_irqs, 1870 1867 .mpu_irqs_cnt = ARRAY_SIZE(omap243x_gpio5_irqs), 1871 1868 .main_clk = "gpio5_fck", ··· 1920 1915 static struct omap_hwmod_addr_space omap2430_dma_system_addrs[] = { 1921 1916 { 1922 1917 .pa_start = 0x48056000, 1923 - .pa_end = 0x4a0560ff, 1918 + .pa_end = 0x48056fff, 1924 1919 .flags = ADDR_TYPE_RT 1925 1920 }, 1926 1921 };
+7 -1
arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
··· 2141 2141 2142 2142 static struct omap_hwmod omap3xxx_gpio1_hwmod = { 2143 2143 .name = "gpio1", 2144 + .flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET, 2144 2145 .mpu_irqs = omap3xxx_gpio1_irqs, 2145 2146 .mpu_irqs_cnt = ARRAY_SIZE(omap3xxx_gpio1_irqs), 2146 2147 .main_clk = "gpio1_ick", ··· 2178 2177 2179 2178 static struct omap_hwmod omap3xxx_gpio2_hwmod = { 2180 2179 .name = "gpio2", 2180 + .flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET, 2181 2181 .mpu_irqs = omap3xxx_gpio2_irqs, 2182 2182 .mpu_irqs_cnt = ARRAY_SIZE(omap3xxx_gpio2_irqs), 2183 2183 .main_clk = "gpio2_ick", ··· 2215 2213 2216 2214 static struct omap_hwmod omap3xxx_gpio3_hwmod = { 2217 2215 .name = "gpio3", 2216 + .flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET, 2218 2217 .mpu_irqs = omap3xxx_gpio3_irqs, 2219 2218 .mpu_irqs_cnt = ARRAY_SIZE(omap3xxx_gpio3_irqs), 2220 2219 .main_clk = "gpio3_ick", ··· 2252 2249 2253 2250 static struct omap_hwmod omap3xxx_gpio4_hwmod = { 2254 2251 .name = "gpio4", 2252 + .flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET, 2255 2253 .mpu_irqs = omap3xxx_gpio4_irqs, 2256 2254 .mpu_irqs_cnt = ARRAY_SIZE(omap3xxx_gpio4_irqs), 2257 2255 .main_clk = "gpio4_ick", ··· 2289 2285 2290 2286 static struct omap_hwmod omap3xxx_gpio5_hwmod = { 2291 2287 .name = "gpio5", 2288 + .flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET, 2292 2289 .mpu_irqs = omap3xxx_gpio5_irqs, 2293 2290 .mpu_irqs_cnt = ARRAY_SIZE(omap3xxx_gpio5_irqs), 2294 2291 .main_clk = "gpio5_ick", ··· 2326 2321 2327 2322 static struct omap_hwmod omap3xxx_gpio6_hwmod = { 2328 2323 .name = "gpio6", 2324 + .flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET, 2329 2325 .mpu_irqs = omap3xxx_gpio6_irqs, 2330 2326 .mpu_irqs_cnt = ARRAY_SIZE(omap3xxx_gpio6_irqs), 2331 2327 .main_clk = "gpio6_ick", ··· 2392 2386 static struct omap_hwmod_addr_space omap3xxx_dma_system_addrs[] = { 2393 2387 { 2394 2388 .pa_start = 0x48056000, 2395 - .pa_end = 0x4a0560ff, 2389 + .pa_end = 0x48056fff, 2396 2390 .flags = ADDR_TYPE_RT 2397 2391 }, 2398 2392 };
+1 -1
arch/arm/mach-omap2/omap_hwmod_44xx_data.c
··· 885 885 static struct omap_hwmod_addr_space omap44xx_dma_system_addrs[] = { 886 886 { 887 887 .pa_start = 0x4a056000, 888 - .pa_end = 0x4a0560ff, 888 + .pa_end = 0x4a056fff, 889 889 .flags = ADDR_TYPE_RT 890 890 }, 891 891 };
+2 -2
arch/arm/mach-omap2/omap_l3_smx.c
··· 196 196 /* No timeout error for debug sources */ 197 197 } 198 198 199 - base = ((l3->rt) + (*(omap3_l3_bases[int_type] + err_source))); 200 - 201 199 /* identify the error source */ 202 200 for (err_source = 0; !(status & (1 << err_source)); err_source++) 203 201 ; 202 + 203 + base = l3->rt + *(omap3_l3_bases[int_type] + err_source); 204 204 error = omap3_l3_readll(base, L3_ERROR_LOG); 205 205 206 206 if (error) {
+1
arch/arm/mach-omap2/pm.c
··· 89 89 if (cpu_is_omap44xx()) { 90 90 _init_omap_device("l3_main_1", &l3_dev); 91 91 _init_omap_device("dsp", &dsp_dev); 92 + _init_omap_device("iva", &iva_dev); 92 93 } else { 93 94 _init_omap_device("l3_main", &l3_dev); 94 95 }
-1
arch/arm/mach-omap2/voltage.c
··· 114 114 sys_clk_speed /= 1000; 115 115 116 116 /* Generic voltage parameters */ 117 - vdd->curr_volt = 1200000; 118 117 vdd->volt_scale = vp_forceupdate_scale_voltage; 119 118 vdd->vp_enabled = false; 120 119
+1 -1
arch/arm/mach-pxa/hx4700.c
··· 711 711 static struct regulator_init_data bq24022_init_data = { 712 712 .constraints = { 713 713 .max_uA = 500000, 714 - .valid_ops_mask = REGULATOR_CHANGE_CURRENT, 714 + .valid_ops_mask = REGULATOR_CHANGE_CURRENT|REGULATOR_CHANGE_STATUS, 715 715 }, 716 716 .num_consumer_supplies = ARRAY_SIZE(bq24022_consumers), 717 717 .consumer_supplies = bq24022_consumers,
+1 -1
arch/arm/mach-pxa/magician.c
··· 599 599 static struct regulator_init_data bq24022_init_data = { 600 600 .constraints = { 601 601 .max_uA = 500000, 602 - .valid_ops_mask = REGULATOR_CHANGE_CURRENT, 602 + .valid_ops_mask = REGULATOR_CHANGE_CURRENT | REGULATOR_CHANGE_STATUS, 603 603 }, 604 604 .num_consumer_supplies = ARRAY_SIZE(bq24022_consumers), 605 605 .consumer_supplies = bq24022_consumers,
+1 -1
arch/arm/mach-realview/include/mach/barriers.h
··· 4 4 * operation to deadlock the system. 5 5 */ 6 6 #define mb() dsb() 7 - #define rmb() dmb() 7 + #define rmb() dsb() 8 8 #define wmb() mb()
+5
arch/arm/mach-s3c2440/mach-gta02.c
··· 409 409 .num_resources = 0, 410 410 }; 411 411 412 + static struct platform_device gta02_dfbmcs320_device = { 413 + .name = "dfbmcs320", 414 + }; 415 + 412 416 static struct i2c_board_info gta02_i2c_devs[] __initdata = { 413 417 { 414 418 I2C_BOARD_INFO("pcf50633", 0x73), ··· 527 523 &s3c_device_iis, 528 524 &samsung_asoc_dma, 529 525 &s3c_device_i2c0, 526 + &gta02_dfbmcs320_device, 530 527 &gta02_buttons_device, 531 528 &s3c_device_adc, 532 529 &s3c_device_ts,
+1 -1
arch/arm/mach-tegra/include/mach/barriers.h
··· 23 23 24 24 #include <asm/outercache.h> 25 25 26 - #define rmb() dmb() 26 + #define rmb() dsb() 27 27 #define wmb() do { dsb(); outer_sync(); } while (0) 28 28 #define mb() wmb() 29 29
+10 -9
arch/arm/mach-ux500/board-mop500.c
··· 178 178 .irq = NOMADIK_GPIO_TO_IRQ(217), 179 179 .platform_data = &mop500_tc35892_data, 180 180 }, 181 - }; 182 - 183 - /* I2C0 devices only available prior to HREFv60 */ 184 - static struct i2c_board_info __initdata mop500_i2c0_old_devices[] = { 181 + /* I2C0 devices only available prior to HREFv60 */ 185 182 { 186 183 I2C_BOARD_INFO("tps61052", 0x33), 187 184 .platform_data = &mop500_tps61052_data, 188 185 }, 189 186 }; 187 + 188 + #define NUM_PRE_V60_I2C0_DEVICES 1 190 189 191 190 static struct i2c_board_info __initdata mop500_i2c2_devices[] = { 192 191 { ··· 424 425 425 426 static void __init mop500_init_machine(void) 426 427 { 428 + int i2c0_devs; 429 + 427 430 /* 428 431 * The HREFv60 board removed a GPIO expander and routed 429 432 * all these GPIO pins to the internal GPIO controller ··· 449 448 450 449 platform_device_register(&ab8500_device); 451 450 452 - i2c_register_board_info(0, mop500_i2c0_devices, 453 - ARRAY_SIZE(mop500_i2c0_devices)); 454 - if (!machine_is_hrefv60()) 455 - i2c_register_board_info(0, mop500_i2c0_old_devices, 456 - ARRAY_SIZE(mop500_i2c0_old_devices)); 451 + i2c0_devs = ARRAY_SIZE(mop500_i2c0_devices); 452 + if (machine_is_hrefv60()) 453 + i2c0_devs -= NUM_PRE_V60_I2C0_DEVICES; 454 + 455 + i2c_register_board_info(0, mop500_i2c0_devices, i2c0_devs); 457 456 i2c_register_board_info(2, mop500_i2c2_devices, 458 457 ARRAY_SIZE(mop500_i2c2_devices)); 459 458 }
+15 -1
arch/arm/mm/init.c
··· 392 392 * Convert start_pfn/end_pfn to a struct page pointer. 393 393 */ 394 394 start_pg = pfn_to_page(start_pfn - 1) + 1; 395 - end_pg = pfn_to_page(end_pfn); 395 + end_pg = pfn_to_page(end_pfn - 1) + 1; 396 396 397 397 /* 398 398 * Convert to physical addresses, and ··· 426 426 427 427 bank_start = bank_pfn_start(bank); 428 428 429 + #ifdef CONFIG_SPARSEMEM 430 + /* 431 + * Take care not to free memmap entries that don't exist 432 + * due to SPARSEMEM sections which aren't present. 433 + */ 434 + bank_start = min(bank_start, 435 + ALIGN(prev_bank_end, PAGES_PER_SECTION)); 436 + #endif 429 437 /* 430 438 * If we had a previous bank, and there is a space 431 439 * between the current bank and the previous, free it. ··· 448 440 */ 449 441 prev_bank_end = ALIGN(bank_pfn_end(bank), MAX_ORDER_NR_PAGES); 450 442 } 443 + 444 + #ifdef CONFIG_SPARSEMEM 445 + if (!IS_ALIGNED(prev_bank_end, PAGES_PER_SECTION)) 446 + free_memmap(prev_bank_end, 447 + ALIGN(prev_bank_end, PAGES_PER_SECTION)); 448 + #endif 451 449 } 452 450 453 451 static void __init free_highpages(void)
+1 -1
arch/arm/mm/proc-xscale.S
··· 395 395 teq r2, #DMA_TO_DEVICE 396 396 beq xscale_dma_clean_range 397 397 b xscale_dma_flush_range 398 - ENDPROC(xscsale_dma_a0_map_area) 398 + ENDPROC(xscale_dma_a0_map_area) 399 399 400 400 /* 401 401 * dma_unmap_area(start, size, dir)
+7
arch/arm/plat-mxc/gpio.c
··· 295 295 return 0; 296 296 } 297 297 298 + /* 299 + * This lock class tells lockdep that GPIO irqs are in a different 300 + * category than their parents, so it won't report false recursion. 301 + */ 302 + static struct lock_class_key gpio_lock_class; 303 + 298 304 int __init mxc_gpio_init(struct mxc_gpio_port *port, int cnt) 299 305 { 300 306 int i, j; ··· 317 311 __raw_writel(~0, port[i].base + GPIO_ISR); 318 312 for (j = port[i].virtual_irq_start; 319 313 j < port[i].virtual_irq_start + 32; j++) { 314 + irq_set_lockdep_class(j, &gpio_lock_class); 320 315 irq_set_chip_and_handler(j, &gpio_irq_chip, 321 316 handle_level_irq); 322 317 set_irq_flags(j, IRQF_VALID);
+2
arch/arm/plat-mxc/ssi-fiq.S
··· 124 124 1: 125 125 @ return from FIQ 126 126 subs pc, lr, #4 127 + 128 + .align 127 129 imx_ssi_fiq_base: 128 130 .word 0x0 129 131 imx_ssi_fiq_rx_buffer:
+2
arch/arm/plat-omap/iommu.c
··· 793 793 clk_enable(obj->clk); 794 794 errs = iommu_report_fault(obj, &da); 795 795 clk_disable(obj->clk); 796 + if (errs == 0) 797 + return IRQ_HANDLED; 796 798 797 799 /* Fault callback or TLB/PTE Dynamic loading */ 798 800 if (obj->isr && !obj->isr(obj, da, errs, obj->isr_priv))
+2
arch/m68k/mm/motorola.c
··· 300 300 zones_size[ZONE_DMA] = m68k_memory[i].size >> PAGE_SHIFT; 301 301 free_area_init_node(i, zones_size, 302 302 m68k_memory[i].addr >> PAGE_SHIFT, NULL); 303 + if (node_present_pages(i)) 304 + node_set_state(i, N_NORMAL_MEMORY); 303 305 } 304 306 } 305 307
-5
arch/mips/Kconfig
··· 997 997 config IRQ_GIC 998 998 bool 999 999 1000 - config IRQ_CPU_OCTEON 1001 - bool 1002 - 1003 1000 config MIPS_BOARDS_GEN 1004 1001 bool 1005 1002 ··· 1356 1359 config CPU_CAVIUM_OCTEON 1357 1360 bool "Cavium Octeon processor" 1358 1361 depends on SYS_HAS_CPU_CAVIUM_OCTEON 1359 - select IRQ_CPU 1360 - select IRQ_CPU_OCTEON 1361 1362 select CPU_HAS_PREFETCH 1362 1363 select CPU_SUPPORTS_64BIT_KERNEL 1363 1364 select SYS_SUPPORTS_SMP
+33 -28
arch/mips/alchemy/devboards/db1x00/board_setup.c
··· 127 127 void __init board_setup(void) 128 128 { 129 129 unsigned long bcsr1, bcsr2; 130 - u32 pin_func; 131 130 132 131 bcsr1 = DB1000_BCSR_PHYS_ADDR; 133 132 bcsr2 = DB1000_BCSR_PHYS_ADDR + DB1000_BCSR_HEXLED_OFS; 134 - 135 - pin_func = 0; 136 133 137 134 #ifdef CONFIG_MIPS_DB1000 138 135 printk(KERN_INFO "AMD Alchemy Au1000/Db1000 Board\n"); ··· 161 164 /* Not valid for Au1550 */ 162 165 #if defined(CONFIG_IRDA) && \ 163 166 (defined(CONFIG_SOC_AU1000) || defined(CONFIG_SOC_AU1100)) 164 - /* Set IRFIRSEL instead of GPIO15 */ 165 - pin_func = au_readl(SYS_PINFUNC) | SYS_PF_IRF; 166 - au_writel(pin_func, SYS_PINFUNC); 167 - /* Power off until the driver is in use */ 168 - bcsr_mod(BCSR_RESETS, BCSR_RESETS_IRDA_MODE_MASK, 169 - BCSR_RESETS_IRDA_MODE_OFF); 167 + { 168 + u32 pin_func; 169 + 170 + /* Set IRFIRSEL instead of GPIO15 */ 171 + pin_func = au_readl(SYS_PINFUNC) | SYS_PF_IRF; 172 + au_writel(pin_func, SYS_PINFUNC); 173 + /* Power off until the driver is in use */ 174 + bcsr_mod(BCSR_RESETS, BCSR_RESETS_IRDA_MODE_MASK, 175 + BCSR_RESETS_IRDA_MODE_OFF); 176 + } 170 177 #endif 171 178 bcsr_write(BCSR_PCMCIA, 0); /* turn off PCMCIA power */ 172 179 ··· 178 177 alchemy_gpio1_input_enable(); 179 178 180 179 #ifdef CONFIG_MIPS_MIRAGE 181 - /* GPIO[20] is output */ 182 - alchemy_gpio_direction_output(20, 0); 180 + { 181 + u32 pin_func; 183 182 184 - /* Set GPIO[210:208] instead of SSI_0 */ 185 - pin_func = au_readl(SYS_PINFUNC) | SYS_PF_S0; 183 + /* GPIO[20] is output */ 184 + alchemy_gpio_direction_output(20, 0); 186 185 187 - /* Set GPIO[215:211] for LEDs */ 188 - pin_func |= 5 << 2; 186 + /* Set GPIO[210:208] instead of SSI_0 */ 187 + pin_func = au_readl(SYS_PINFUNC) | SYS_PF_S0; 189 188 190 - /* Set GPIO[214:213] for more LEDs */ 191 - pin_func |= 5 << 12; 189 + /* Set GPIO[215:211] for LEDs */ 190 + pin_func |= 5 << 2; 192 191 193 - /* Set GPIO[207:200] instead of PCMCIA/LCD */ 194 - pin_func |= SYS_PF_LCD | SYS_PF_PC; 195 - au_writel(pin_func, SYS_PINFUNC); 192 + /* Set GPIO[214:213] for more LEDs */ 193 + pin_func |= 5 << 12; 196 194 197 - /* 198 - * Enable speaker amplifier. This should 199 - * be part of the audio driver. 200 - */ 201 - alchemy_gpio_direction_output(209, 1); 195 + /* Set GPIO[207:200] instead of PCMCIA/LCD */ 196 + pin_func |= SYS_PF_LCD | SYS_PF_PC; 197 + au_writel(pin_func, SYS_PINFUNC); 202 198 203 - pm_power_off = mirage_power_off; 204 - _machine_halt = mirage_power_off; 205 - _machine_restart = (void(*)(char *))mips_softreset; 199 + /* 200 + * Enable speaker amplifier. This should 201 + * be part of the audio driver. 202 + */ 203 + alchemy_gpio_direction_output(209, 1); 204 + 205 + pm_power_off = mirage_power_off; 206 + _machine_halt = mirage_power_off; 207 + _machine_restart = (void(*)(char *))mips_softreset; 208 + } 206 209 #endif 207 210 208 211 #ifdef CONFIG_MIPS_BOSPORUS
+2 -3
arch/mips/alchemy/xxs1500/init.c
··· 51 51 prom_init_cmdline(); 52 52 53 53 memsize_str = prom_getenv("memsize"); 54 - if (!memsize_str) 54 + if (!memsize_str || strict_strtoul(memsize_str, 0, &memsize)) 55 55 memsize = 0x04000000; 56 - else 57 - strict_strtoul(memsize_str, 0, &memsize); 56 + 58 57 add_memory_region(0, memsize, BOOT_MEM_RAM); 59 58 } 60 59
+1 -3
arch/mips/ar7/gpio.c
··· 325 325 size = 0x1f; 326 326 } 327 327 328 - gpch->regs = ioremap_nocache(AR7_REGS_GPIO, 329 - AR7_REGS_GPIO + 0x10); 330 - 328 + gpch->regs = ioremap_nocache(AR7_REGS_GPIO, size); 331 329 if (!gpch->regs) { 332 330 printk(KERN_ERR "%s: failed to ioremap regs\n", 333 331 gpch->chip.label);
+1 -1
arch/mips/boot/compressed/calc_vmlinuz_load_addr.c
··· 16 16 17 17 int main(int argc, char *argv[]) 18 18 { 19 + unsigned long long vmlinux_size, vmlinux_load_addr, vmlinuz_load_addr; 19 20 struct stat sb; 20 - uint64_t vmlinux_size, vmlinux_load_addr, vmlinuz_load_addr; 21 21 22 22 if (argc != 3) { 23 23 fprintf(stderr, "Usage: %s <pathname> <vmlinux_load_addr>\n",
+4 -11
arch/mips/cavium-octeon/Kconfig
··· 1 - config CAVIUM_OCTEON_SPECIFIC_OPTIONS 2 - bool "Enable Octeon specific options" 3 - depends on CPU_CAVIUM_OCTEON 4 - default "y" 1 + if CPU_CAVIUM_OCTEON 5 2 6 3 config CAVIUM_CN63XXP1 7 4 bool "Enable CN63XXP1 errata worarounds" 8 - depends on CAVIUM_OCTEON_SPECIFIC_OPTIONS 9 5 default "n" 10 6 help 11 7 The CN63XXP1 chip requires build time workarounds to ··· 12 16 13 17 config CAVIUM_OCTEON_2ND_KERNEL 14 18 bool "Build the kernel to be used as a 2nd kernel on the same chip" 15 - depends on CAVIUM_OCTEON_SPECIFIC_OPTIONS 16 19 default "n" 17 20 help 18 21 This option configures this kernel to be linked at a different ··· 21 26 22 27 config CAVIUM_OCTEON_HW_FIX_UNALIGNED 23 28 bool "Enable hardware fixups of unaligned loads and stores" 24 - depends on CAVIUM_OCTEON_SPECIFIC_OPTIONS 25 29 default "y" 26 30 help 27 31 Configure the Octeon hardware to automatically fix unaligned loads ··· 32 38 33 39 config CAVIUM_OCTEON_CVMSEG_SIZE 34 40 int "Number of L1 cache lines reserved for CVMSEG memory" 35 - depends on CAVIUM_OCTEON_SPECIFIC_OPTIONS 36 41 range 0 54 37 42 default 1 38 43 help ··· 43 50 44 51 config CAVIUM_OCTEON_LOCK_L2 45 52 bool "Lock often used kernel code in the L2" 46 - depends on CAVIUM_OCTEON_SPECIFIC_OPTIONS 47 53 default "y" 48 54 help 49 55 Enable locking parts of the kernel into the L2 cache. ··· 85 93 config ARCH_SPARSEMEM_ENABLE 86 94 def_bool y 87 95 select SPARSEMEM_STATIC 88 - depends on CPU_CAVIUM_OCTEON 89 96 90 97 config CAVIUM_OCTEON_HELPER 91 98 def_bool y ··· 98 107 99 108 config SWIOTLB 100 109 def_bool y 101 - depends on CPU_CAVIUM_OCTEON 102 110 select IOMMU_HELPER 103 111 select NEED_SG_DMA_LENGTH 112 + 113 + 114 + endif # CPU_CAVIUM_OCTEON
+1 -1
arch/mips/include/asm/cache.h
··· 17 17 #define SMP_CACHE_SHIFT L1_CACHE_SHIFT 18 18 #define SMP_CACHE_BYTES L1_CACHE_BYTES 19 19 20 - #define __read_mostly __attribute__((__section__(".data.read_mostly"))) 20 + #define __read_mostly __attribute__((__section__(".data..read_mostly"))) 21 21 22 22 #endif /* _ASM_CACHE_H */
+3
arch/mips/include/asm/cevt-r4k.h
··· 14 14 #ifndef __ASM_CEVT_R4K_H 15 15 #define __ASM_CEVT_R4K_H 16 16 17 + #include <linux/clockchips.h> 18 + #include <asm/time.h> 19 + 17 20 DECLARE_PER_CPU(struct clock_event_device, mips_clockevent_device); 18 21 19 22 void mips_event_handler(struct clock_event_device *dev);
+2
arch/mips/include/asm/dma-mapping.h
··· 5 5 #include <asm/cache.h> 6 6 #include <asm-generic/dma-coherent.h> 7 7 8 + #ifndef CONFIG_SGI_IP27 /* Kludge to fix 2.6.39 build for IP27 */ 8 9 #include <dma-coherence.h> 10 + #endif 9 11 10 12 extern struct dma_map_ops *mips_dma_map_ops; 11 13
+1
arch/mips/include/asm/hugetlb.h
··· 70 70 static inline void huge_ptep_clear_flush(struct vm_area_struct *vma, 71 71 unsigned long addr, pte_t *ptep) 72 72 { 73 + flush_tlb_mm(vma->vm_mm); 73 74 } 74 75 75 76 static inline int huge_pte_none(pte_t pte)
+1 -1
arch/mips/include/asm/mach-bcm63xx/bcm963xx_tag.h
··· 88 88 char kernel_crc[CRC_LEN]; 89 89 /* 228-235: Unused at present */ 90 90 char reserved1[8]; 91 - /* 236-239: CRC32 of header excluding tagVersion */ 91 + /* 236-239: CRC32 of header excluding last 20 bytes */ 92 92 char header_crc[CRC_LEN]; 93 93 /* 240-255: Unused at present */ 94 94 char reserved2[16];
+2 -3
arch/mips/jazz/jazzdma.c
··· 211 211 */ 212 212 int vdma_remap(unsigned long laddr, unsigned long paddr, unsigned long size) 213 213 { 214 - int first, pages, npages; 214 + int first, pages; 215 215 216 216 if (laddr > 0xffffff) { 217 217 if (vdma_debug) ··· 228 228 return -EINVAL; /* invalid physical address */ 229 229 } 230 230 231 - npages = pages = 232 - (((paddr & (VDMA_PAGESIZE - 1)) + size) >> 12) + 1; 231 + pages = (((paddr & (VDMA_PAGESIZE - 1)) + size) >> 12) + 1; 233 232 first = laddr >> 12; 234 233 if (vdma_debug) 235 234 printk("vdma_remap: first=%x, pages=%x\n", first, pages);
+1 -3
arch/mips/jz4740/dma.c
··· 242 242 243 243 static void jz4740_dma_chan_irq(struct jz4740_dma_chan *dma) 244 244 { 245 - uint32_t status; 246 - 247 - status = jz4740_dma_read(JZ_REG_DMA_STATUS_CTRL(dma->id)); 245 + (void) jz4740_dma_read(JZ_REG_DMA_STATUS_CTRL(dma->id)); 248 246 249 247 jz4740_dma_write_mask(JZ_REG_DMA_STATUS_CTRL(dma->id), 0, 250 248 JZ_DMA_STATUS_CTRL_ENABLE | JZ_DMA_STATUS_CTRL_TRANSFER_DONE);
+1 -1
arch/mips/jz4740/time.c
··· 89 89 90 90 static struct clock_event_device jz4740_clockevent = { 91 91 .name = "jz4740-timer", 92 - .features = CLOCK_EVT_FEAT_PERIODIC, 92 + .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, 93 93 .set_next_event = jz4740_clockevent_set_next, 94 94 .set_mode = jz4740_clockevent_set_mode, 95 95 .rating = 200,
+2
arch/mips/jz4740/timer.c
··· 27 27 { 28 28 writel(BIT(16), jz4740_timer_base + JZ_REG_TIMER_STOP_CLEAR); 29 29 } 30 + EXPORT_SYMBOL_GPL(jz4740_timer_enable_watchdog); 30 31 31 32 void jz4740_timer_disable_watchdog(void) 32 33 { 33 34 writel(BIT(16), jz4740_timer_base + JZ_REG_TIMER_STOP_SET); 34 35 } 36 + EXPORT_SYMBOL_GPL(jz4740_timer_disable_watchdog); 35 37 36 38 void __init jz4740_timer_init(void) 37 39 {
+3 -2
arch/mips/kernel/ftrace.c
··· 23 23 24 24 #define JAL 0x0c000000 /* jump & link: ip --> ra, jump to target */ 25 25 #define ADDR_MASK 0x03ffffff /* op_code|addr : 31...26|25 ....0 */ 26 + #define JUMP_RANGE_MASK ((1UL << 28) - 1) 26 27 27 28 #define INSN_NOP 0x00000000 /* nop */ 28 29 #define INSN_JAL(addr) \ ··· 45 44 46 45 /* jal (ftrace_caller + 8), jump over the first two instruction */ 47 46 buf = (u32 *)&insn_jal_ftrace_caller; 48 - uasm_i_jal(&buf, (FTRACE_ADDR + 8)); 47 + uasm_i_jal(&buf, (FTRACE_ADDR + 8) & JUMP_RANGE_MASK); 49 48 50 49 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 51 50 /* j ftrace_graph_caller */ 52 51 buf = (u32 *)&insn_j_ftrace_graph_caller; 53 - uasm_i_j(&buf, (unsigned long)ftrace_graph_caller); 52 + uasm_i_j(&buf, (unsigned long)ftrace_graph_caller & JUMP_RANGE_MASK); 54 53 #endif 55 54 } 56 55
+2 -2
arch/mips/kernel/ptrace.c
··· 540 540 secure_computing(regs->regs[2]); 541 541 542 542 if (unlikely(current->audit_context) && entryexit) 543 - audit_syscall_exit(AUDITSC_RESULT(regs->regs[2]), 544 - regs->regs[2]); 543 + audit_syscall_exit(AUDITSC_RESULT(regs->regs[7]), 544 + -regs->regs[2]); 545 545 546 546 if (!(current->ptrace & PT_PTRACED)) 547 547 goto out;
+1 -1
arch/mips/kernel/scall32-o32.S
··· 565 565 sys sys_ioprio_get 2 /* 4315 */ 566 566 sys sys_utimensat 4 567 567 sys sys_signalfd 3 568 - sys sys_ni_syscall 0 568 + sys sys_ni_syscall 0 /* was timerfd */ 569 569 sys sys_eventfd 1 570 570 sys sys_fallocate 6 /* 4320 */ 571 571 sys sys_timerfd_create 2
+1 -1
arch/mips/kernel/scall64-64.S
··· 404 404 PTR sys_ioprio_get 405 405 PTR sys_utimensat /* 5275 */ 406 406 PTR sys_signalfd 407 - PTR sys_ni_syscall 407 + PTR sys_ni_syscall /* was timerfd */ 408 408 PTR sys_eventfd 409 409 PTR sys_fallocate 410 410 PTR sys_timerfd_create /* 5280 */
+1 -1
arch/mips/kernel/scall64-n32.S
··· 403 403 PTR sys_ioprio_get 404 404 PTR compat_sys_utimensat 405 405 PTR compat_sys_signalfd /* 6280 */ 406 - PTR sys_ni_syscall 406 + PTR sys_ni_syscall /* was timerfd */ 407 407 PTR sys_eventfd 408 408 PTR sys_fallocate 409 409 PTR sys_timerfd_create
+1 -1
arch/mips/kernel/scall64-o32.S
··· 522 522 PTR sys_ioprio_get /* 4315 */ 523 523 PTR compat_sys_utimensat 524 524 PTR compat_sys_signalfd 525 - PTR sys_ni_syscall 525 + PTR sys_ni_syscall /* was timerfd */ 526 526 PTR sys_eventfd 527 527 PTR sys32_fallocate /* 4320 */ 528 528 PTR sys_timerfd_create
+2 -4
arch/mips/kernel/traps.c
··· 374 374 unsigned long dvpret = dvpe(); 375 375 #endif /* CONFIG_MIPS_MT_SMTC */ 376 376 377 - notify_die(DIE_OOPS, str, regs, 0, regs_to_trapnr(regs), SIGSEGV); 377 + if (notify_die(DIE_OOPS, str, regs, 0, regs_to_trapnr(regs), SIGSEGV) == NOTIFY_STOP) 378 + sig = 0; 378 379 379 380 console_verbose(); 380 381 spin_lock_irq(&die_lock); ··· 383 382 #ifdef CONFIG_MIPS_MT_SMTC 384 383 mips_mt_regdump(dvpret); 385 384 #endif /* CONFIG_MIPS_MT_SMTC */ 386 - 387 - if (notify_die(DIE_OOPS, str, regs, 0, regs_to_trapnr(regs), SIGSEGV) == NOTIFY_STOP) 388 - sig = 0; 389 385 390 386 printk("%s[#%d]:\n", str, ++die_counter); 391 387 show_registers(regs);
+1
arch/mips/kernel/vmlinux.lds.S
··· 74 74 INIT_TASK_DATA(PAGE_SIZE) 75 75 NOSAVE_DATA 76 76 CACHELINE_ALIGNED_DATA(1 << CONFIG_MIPS_L1_CACHE_SHIFT) 77 + READ_MOSTLY_DATA(1 << CONFIG_MIPS_L1_CACHE_SHIFT) 77 78 DATA_DATA 78 79 CONSTRUCTORS 79 80 }
+3 -2
arch/mips/loongson/common/env.c
··· 29 29 30 30 #define parse_even_earlier(res, option, p) \ 31 31 do { \ 32 - int ret; \ 32 + unsigned int tmp __maybe_unused; \ 33 + \ 33 34 if (strncmp(option, (char *)p, strlen(option)) == 0) \ 34 - ret = strict_strtol((char *)p + strlen(option"="), 10, &res); \ 35 + tmp = strict_strtol((char *)p + strlen(option"="), 10, &res); \ 35 36 } while (0) 36 37 37 38 void __init prom_init_env(void)
-2
arch/mips/mm/c-r4k.c
··· 1075 1075 unsigned long flags, addr, begin, end, pow2; 1076 1076 unsigned int config = read_c0_config(); 1077 1077 struct cpuinfo_mips *c = &current_cpu_data; 1078 - int tmp; 1079 1078 1080 1079 if (config & CONF_SC) 1081 1080 return 0; ··· 1107 1108 1108 1109 /* Now search for the wrap around point. */ 1109 1110 pow2 = (128 * 1024); 1110 - tmp = 0; 1111 1111 for (addr = begin + (128 * 1024); addr < end; addr = begin + pow2) { 1112 1112 cache_op(Index_Load_Tag_SD, addr); 1113 1113 __asm__ __volatile__("nop; nop; nop; nop;"); /* hazard... */
+2 -2
arch/mips/mm/tlbex.c
··· 1151 1151 struct uasm_reloc *r = relocs; 1152 1152 u32 *f; 1153 1153 unsigned int final_len; 1154 - struct mips_huge_tlb_info htlb_info; 1155 - enum vmalloc64_mode vmalloc_mode; 1154 + struct mips_huge_tlb_info htlb_info __maybe_unused; 1155 + enum vmalloc64_mode vmalloc_mode __maybe_unused; 1156 1156 1157 1157 memset(tlb_handler, 0, sizeof(tlb_handler)); 1158 1158 memset(labels, 0, sizeof(labels));
+3 -11
arch/mips/mti-malta/malta-init.c
··· 193 193 194 194 void __init prom_init(void) 195 195 { 196 - int result; 197 - 198 196 prom_argc = fw_arg0; 199 197 _prom_argv = (int *) fw_arg1; 200 198 _prom_envp = (int *) fw_arg2; ··· 358 360 #ifdef CONFIG_SERIAL_8250_CONSOLE 359 361 console_config(); 360 362 #endif 361 - /* Early detection of CMP support */ 362 - result = gcmp_probe(GCMP_BASE_ADDR, GCMP_ADDRSPACE_SZ); 363 - 364 363 #ifdef CONFIG_MIPS_CMP 365 - if (result) 364 + /* Early detection of CMP support */ 365 + if (gcmp_probe(GCMP_BASE_ADDR, GCMP_ADDRSPACE_SZ)) 366 366 register_smp_ops(&cmp_smp_ops); 367 + else 367 368 #endif 368 369 #ifdef CONFIG_MIPS_MT_SMP 369 - #ifdef CONFIG_MIPS_CMP 370 - if (!result) 371 370 register_smp_ops(&vsmp_smp_ops); 372 - #else 373 - register_smp_ops(&vsmp_smp_ops); 374 - #endif 375 371 #endif 376 372 #ifdef CONFIG_MIPS_MT_SMTC 377 373 register_smp_ops(&msmtc_smp_ops);
+1 -2
arch/mips/mti-malta/malta-int.c
··· 56 56 static inline int mips_pcibios_iack(void) 57 57 { 58 58 int irq; 59 - u32 dummy; 60 59 61 60 /* 62 61 * Determine highest priority pending interrupt by performing ··· 82 83 BONITO_PCIMAP_CFG = 0x20000; 83 84 84 85 /* Flush Bonito register block */ 85 - dummy = BONITO_PCIMAP_CFG; 86 + (void) BONITO_PCIMAP_CFG; 86 87 iob(); /* sync */ 87 88 88 89 irq = __raw_readl((u32 *)_pcictrl_bonito_pcicfg);
+1 -1
arch/mips/pmc-sierra/msp71xx/msp_irq_per.c
··· 97 97 98 98 static struct irq_chip msp_per_irq_controller = { 99 99 .name = "MSP_PER", 100 - .irq_enable = unmask_per_irq. 100 + .irq_enable = unmask_per_irq, 101 101 .irq_disable = mask_per_irq, 102 102 .irq_ack = msp_per_irq_ack, 103 103 #ifdef CONFIG_SMP
+1 -1
arch/mips/power/hibernate.S
··· 35 35 0: 36 36 PTR_L t1, PBE_ADDRESS(t0) /* source */ 37 37 PTR_L t2, PBE_ORIG_ADDRESS(t0) /* destination */ 38 - PTR_ADDIU t3, t1, PAGE_SIZE 38 + PTR_ADDU t3, t1, PAGE_SIZE 39 39 1: 40 40 REG_L t8, (t1) 41 41 REG_S t8, (t2)
+1 -1
arch/mips/rb532/gpio.c
··· 185 185 struct resource *r; 186 186 187 187 r = rb532_gpio_reg0_res; 188 - rb532_gpio_chip->regbase = ioremap_nocache(r->start, r->end - r->start); 188 + rb532_gpio_chip->regbase = ioremap_nocache(r->start, resource_size(r)); 189 189 190 190 if (!rb532_gpio_chip->regbase) { 191 191 printk(KERN_ERR "rb532: cannot remap GPIO register 0\n");
+2 -2
arch/mips/sgi-ip22/ip22-platform.c
··· 132 132 */ 133 133 static int __init sgiseeq_devinit(void) 134 134 { 135 - unsigned int tmp; 135 + unsigned int pbdma __maybe_unused; 136 136 int res, i; 137 137 138 138 eth0_pd.hpc = hpc3c0; ··· 151 151 152 152 /* Second HPC is missing? */ 153 153 if (ip22_is_fullhouse() || 154 - get_dbe(tmp, (unsigned int *)&hpc3c1->pbdma[1])) 154 + get_dbe(pbdma, (unsigned int *)&hpc3c1->pbdma[1])) 155 155 return 0; 156 156 157 157 sgimc->giopar |= SGIMC_GIOPAR_MASTEREXP1 | SGIMC_GIOPAR_EXP164 |
+2 -2
arch/mips/sgi-ip22/ip22-time.c
··· 32 32 static unsigned long dosample(void) 33 33 { 34 34 u32 ct0, ct1; 35 - u8 msb, lsb; 35 + u8 msb; 36 36 37 37 /* Start the counter. */ 38 38 sgint->tcword = (SGINT_TCWORD_CNT2 | SGINT_TCWORD_CALL | ··· 46 46 /* Latch and spin until top byte of counter2 is zero */ 47 47 do { 48 48 writeb(SGINT_TCWORD_CNT2 | SGINT_TCWORD_CLAT, &sgint->tcword); 49 - lsb = readb(&sgint->tcnt2); 49 + (void) readb(&sgint->tcnt2); 50 50 msb = readb(&sgint->tcnt2); 51 51 ct1 = read_c0_count(); 52 52 } while (msb);
+1 -2
arch/mips/sgi-ip27/ip27-hubio.c
··· 29 29 unsigned long xtalk_addr, size_t size) 30 30 { 31 31 nasid_t nasid = COMPACT_TO_NASID_NODEID(cnode); 32 - volatile hubreg_t junk; 33 32 unsigned i; 34 33 35 34 /* use small-window mapping if possible */ ··· 63 64 * after we write it. 64 65 */ 65 66 IIO_ITTE_PUT(nasid, i, HUB_PIO_MAP_TO_MEM, widget, xtalk_addr); 66 - junk = HUB_L(IIO_ITTE_GET(nasid, i)); 67 + (void) HUB_L(IIO_ITTE_GET(nasid, i)); 67 68 68 69 return NODE_BWIN_BASE(nasid, widget) + (xtalk_addr % BWIN_SIZE); 69 70 }
-3
arch/mips/sgi-ip27/ip27-klnuma.c
··· 54 54 55 55 static __init void set_ktext_source(nasid_t client_nasid, nasid_t server_nasid) 56 56 { 57 - cnodeid_t client_cnode; 58 57 kern_vars_t *kvp; 59 - 60 - client_cnode = NASID_TO_COMPACT_NODEID(client_nasid); 61 58 62 59 kvp = &hub_data(client_nasid)->kern_vars; 63 60
+2 -2
arch/mips/sni/time.c
··· 95 95 static __init unsigned long dosample(void) 96 96 { 97 97 u32 ct0, ct1; 98 - volatile u8 msb, lsb; 98 + volatile u8 msb; 99 99 100 100 /* Start the counter. */ 101 101 outb_p(0x34, 0x43); ··· 108 108 /* Latch and spin until top byte of counter0 is zero */ 109 109 do { 110 110 outb(0x00, 0x43); 111 - lsb = inb(0x40); 111 + (void) inb(0x40); 112 112 msb = inb(0x40); 113 113 ct1 = read_c0_count(); 114 114 } while (msb);
+3 -1
arch/parisc/mm/init.c
··· 266 266 } 267 267 memset(pfnnid_map, 0xff, sizeof(pfnnid_map)); 268 268 269 - for (i = 0; i < npmem_ranges; i++) 269 + for (i = 0; i < npmem_ranges; i++) { 270 + node_set_state(i, N_NORMAL_MEMORY); 270 271 node_set_online(i); 272 + } 271 273 #endif 272 274 273 275 /*
+2 -2
arch/powerpc/include/asm/8xx_immap.h
··· 393 393 uint fec_addr_low; /* lower 32 bits of station address */ 394 394 ushort fec_addr_high; /* upper 16 bits of station address */ 395 395 ushort res1; /* reserved */ 396 - uint fec_hash_table_high; /* upper 32-bits of hash table */ 397 - uint fec_hash_table_low; /* lower 32-bits of hash table */ 396 + uint fec_grp_hash_table_high; /* upper 32-bits of hash table */ 397 + uint fec_grp_hash_table_low; /* lower 32-bits of hash table */ 398 398 uint fec_r_des_start; /* beginning of Rx descriptor ring */ 399 399 uint fec_x_des_start; /* beginning of Tx descriptor ring */ 400 400 uint fec_r_buff_size; /* Rx buffer size */
+1 -1
arch/powerpc/include/asm/uninorth.h
··· 60 60 * 61 61 * Obviously, the GART is not cache coherent and so any change to it 62 62 * must be flushed to memory (or maybe just make the GART space non 63 - * cachable). AGP memory itself does't seem to be cache coherent neither. 63 + * cachable). AGP memory itself doesn't seem to be cache coherent neither. 64 64 * 65 65 * In order to invalidate the GART (which is probably necessary to inval 66 66 * the bridge internal TLBs), the following sequence has to be written,
+11 -1
arch/powerpc/kernel/ptrace.c
··· 933 933 if (data && !(data & DABR_TRANSLATION)) 934 934 return -EIO; 935 935 #ifdef CONFIG_HAVE_HW_BREAKPOINT 936 + if (ptrace_get_breakpoints(task) < 0) 937 + return -ESRCH; 938 + 936 939 bp = thread->ptrace_bps[0]; 937 940 if ((!data) || !(data & (DABR_DATA_WRITE | DABR_DATA_READ))) { 938 941 if (bp) { 939 942 unregister_hw_breakpoint(bp); 940 943 thread->ptrace_bps[0] = NULL; 941 944 } 945 + ptrace_put_breakpoints(task); 942 946 return 0; 943 947 } 944 948 if (bp) { ··· 952 948 (DABR_DATA_WRITE | DABR_DATA_READ), 953 949 &attr.bp_type); 954 950 ret = modify_user_hw_breakpoint(bp, &attr); 955 - if (ret) 951 + if (ret) { 952 + ptrace_put_breakpoints(task); 956 953 return ret; 954 + } 957 955 thread->ptrace_bps[0] = bp; 956 + ptrace_put_breakpoints(task); 958 957 thread->dabr = data; 959 958 return 0; 960 959 } ··· 972 965 ptrace_triggered, task); 973 966 if (IS_ERR(bp)) { 974 967 thread->ptrace_bps[0] = NULL; 968 + ptrace_put_breakpoints(task); 975 969 return PTR_ERR(bp); 976 970 } 971 + 972 + ptrace_put_breakpoints(task); 977 973 978 974 #endif /* CONFIG_HAVE_HW_BREAKPOINT */ 979 975
+5 -2
arch/powerpc/platforms/83xx/suspend.c
··· 318 318 .end = mpc83xx_suspend_end, 319 319 }; 320 320 321 + static struct of_device_id pmc_match[]; 321 322 static int pmc_probe(struct platform_device *ofdev) 322 323 { 324 + const struct of_device_id *match; 323 325 struct device_node *np = ofdev->dev.of_node; 324 326 struct resource res; 325 327 struct pmc_type *type; 326 328 int ret = 0; 327 329 328 - if (!ofdev->dev.of_match) 330 + match = of_match_device(pmc_match, &ofdev->dev); 331 + if (!match) 329 332 return -EINVAL; 330 333 331 - type = ofdev->dev.of_match->data; 334 + type = match->data; 332 335 333 336 if (!of_device_is_available(np)) 334 337 return -ENODEV;
+5 -2
arch/powerpc/sysdev/fsl_msi.c
··· 304 304 return 0; 305 305 } 306 306 307 + static const struct of_device_id fsl_of_msi_ids[]; 307 308 static int __devinit fsl_of_msi_probe(struct platform_device *dev) 308 309 { 310 + const struct of_device_id *match; 309 311 struct fsl_msi *msi; 310 312 struct resource res; 311 313 int err, i, j, irq_index, count; ··· 318 316 u32 offset; 319 317 static const u32 all_avail[] = { 0, NR_MSI_IRQS }; 320 318 321 - if (!dev->dev.of_match) 319 + match = of_match_device(fsl_of_msi_ids, &dev->dev); 320 + if (!match) 322 321 return -EINVAL; 323 - features = dev->dev.of_match->data; 322 + features = match->data; 324 323 325 324 printk(KERN_DEBUG "Setting up Freescale MSI support\n"); 326 325
+1 -1
arch/s390/crypto/prng.c
··· 76 76 77 77 /* Add the entropy */ 78 78 while (nbytes >= 8) { 79 - *((__u64 *)parm_block) ^= *((__u64 *)buf+i*8); 79 + *((__u64 *)parm_block) ^= *((__u64 *)(buf+i)); 80 80 prng_add_entropy(); 81 81 i += 8; 82 82 nbytes -= 8;
+15 -2
arch/s390/include/asm/diag.h
··· 9 9 #define _ASM_S390_DIAG_H 10 10 11 11 /* 12 - * Diagnose 10: Release pages 12 + * Diagnose 10: Release page range 13 13 */ 14 - extern void diag10(unsigned long addr); 14 + static inline void diag10_range(unsigned long start_pfn, unsigned long num_pfn) 15 + { 16 + unsigned long start_addr, end_addr; 17 + 18 + start_addr = start_pfn << PAGE_SHIFT; 19 + end_addr = (start_pfn + num_pfn - 1) << PAGE_SHIFT; 20 + 21 + asm volatile( 22 + "0: diag %0,%1,0x10\n" 23 + "1:\n" 24 + EX_TABLE(0b, 1b) 25 + EX_TABLE(1b, 1b) 26 + : : "a" (start_addr), "a" (end_addr)); 27 + } 15 28 16 29 /* 17 30 * Diagnose 14: Input spool file manipulation
+1 -1
arch/s390/include/asm/mmu_context.h
··· 23 23 #ifdef CONFIG_64BIT 24 24 mm->context.asce_bits |= _ASCE_TYPE_REGION3; 25 25 #endif 26 - if (current->mm->context.alloc_pgste) { 26 + if (current->mm && current->mm->context.alloc_pgste) { 27 27 /* 28 28 * alloc_pgste indicates, that any NEW context will be created 29 29 * with extended page tables. The old context is unchanged. The
-21
arch/s390/kernel/diag.c
··· 9 9 #include <asm/diag.h> 10 10 11 11 /* 12 - * Diagnose 10: Release pages 13 - */ 14 - void diag10(unsigned long addr) 15 - { 16 - if (addr >= 0x7ff00000) 17 - return; 18 - asm volatile( 19 - #ifdef CONFIG_64BIT 20 - " sam31\n" 21 - " diag %0,%0,0x10\n" 22 - "0: sam64\n" 23 - #else 24 - " diag %0,%0,0x10\n" 25 - "0:\n" 26 - #endif 27 - EX_TABLE(0b, 0b) 28 - : : "a" (addr)); 29 - } 30 - EXPORT_SYMBOL(diag10); 31 - 32 - /* 33 12 * Diagnose 14: Input spool file manipulation 34 13 */ 35 14 int diag14(unsigned long rx, unsigned long ry1, unsigned long subcode)
+1
arch/s390/kernel/dis.c
··· 672 672 { "rp", 0x77, INSTR_S_RD }, 673 673 { "stcke", 0x78, INSTR_S_RD }, 674 674 { "sacf", 0x79, INSTR_S_RD }, 675 + { "spp", 0x80, INSTR_S_RD }, 675 676 { "stsi", 0x7d, INSTR_S_RD }, 676 677 { "srnm", 0x99, INSTR_S_RD }, 677 678 { "stfpc", 0x9c, INSTR_S_RD },
+1 -1
arch/s390/kernel/entry.S
··· 836 836 stosm __SF_EMPTY(%r15),0x04 # now we can turn dat on 837 837 basr %r14,0 838 838 l %r14,restart_addr-.(%r14) 839 - br %r14 # branch to start_secondary 839 + basr %r14,%r14 # branch to start_secondary 840 840 restart_addr: 841 841 .long start_secondary 842 842 .align 8
+1 -1
arch/s390/kernel/entry64.S
··· 841 841 mvc __LC_SYSTEM_TIMER(8),__TI_system_timer(%r1) 842 842 xc __LC_STEAL_TIMER(8),__LC_STEAL_TIMER 843 843 stosm __SF_EMPTY(%r15),0x04 # now we can turn dat on 844 - jg start_secondary 844 + brasl %r14,start_secondary 845 845 .align 8 846 846 restart_vtime: 847 847 .long 0x7fffffff,0xffffffff
+2 -2
arch/s390/kvm/sie64a.S
··· 48 48 tm __TI_flags+7(%r2),_TIF_EXIT_SIE 49 49 jz 0f 50 50 larl %r2,sie_exit # work pending, leave sie 51 - stg %r2,__LC_RETURN_PSW+8 51 + stg %r2,SPI_PSW+8(0,%r15) 52 52 br %r14 53 53 0: larl %r2,sie_reenter # re-enter with guest id 54 - stg %r2,__LC_RETURN_PSW+8 54 + stg %r2,SPI_PSW+8(0,%r15) 55 55 1: br %r14 56 56 57 57 /*
+1 -1
arch/s390/mm/cmm.c
··· 91 91 } else 92 92 free_page((unsigned long) npa); 93 93 } 94 - diag10(addr); 94 + diag10_range(addr >> PAGE_SHIFT, 1); 95 95 pa->pages[pa->index++] = addr; 96 96 (*counter)++; 97 97 spin_unlock(&cmm_lock);
+3 -3
arch/s390/mm/fault.c
··· 543 543 struct task_struct *tsk; 544 544 __u16 subcode; 545 545 546 - kstat_cpu(smp_processor_id()).irqs[EXTINT_PFL]++; 547 546 /* 548 547 * Get the external interruption subcode & pfault 549 548 * initial/completion signal bit. VM stores this ··· 552 553 subcode = ext_int_code >> 16; 553 554 if ((subcode & 0xff00) != __SUBCODE_MASK) 554 555 return; 556 + kstat_cpu(smp_processor_id()).irqs[EXTINT_PFL]++; 555 557 556 558 /* 557 559 * Get the token (= address of the task structure of the affected task). 558 560 */ 559 561 #ifdef CONFIG_64BIT 560 - tsk = *(struct task_struct **) param64; 562 + tsk = (struct task_struct *) param64; 561 563 #else 562 - tsk = *(struct task_struct **) param32; 564 + tsk = (struct task_struct *) param32; 563 565 #endif 564 566 565 567 if (subcode & 0x0080) {
+3 -2
arch/s390/mm/pageattr.c
··· 24 24 WARN_ON_ONCE(1); 25 25 continue; 26 26 } 27 - ptep = pte_offset_kernel(pmdp, addr + i * PAGE_SIZE); 27 + ptep = pte_offset_kernel(pmdp, addr); 28 28 29 29 pte = *ptep; 30 30 pte = set(pte); 31 - ptep_invalidate(&init_mm, addr + i * PAGE_SIZE, ptep); 31 + ptep_invalidate(&init_mm, addr, ptep); 32 32 *ptep = pte; 33 + addr += PAGE_SIZE; 33 34 } 34 35 } 35 36
+4 -10
arch/s390/oprofile/hwsampler.c
··· 1021 1021 return rc; 1022 1022 } 1023 1023 1024 - long hwsampler_query_min_interval(void) 1024 + unsigned long hwsampler_query_min_interval(void) 1025 1025 { 1026 - if (min_sampler_rate) 1027 - return min_sampler_rate; 1028 - else 1029 - return -EINVAL; 1026 + return min_sampler_rate; 1030 1027 } 1031 1028 1032 - long hwsampler_query_max_interval(void) 1029 + unsigned long hwsampler_query_max_interval(void) 1033 1030 { 1034 - if (max_sampler_rate) 1035 - return max_sampler_rate; 1036 - else 1037 - return -EINVAL; 1031 + return max_sampler_rate; 1038 1032 } 1039 1033 1040 1034 unsigned long hwsampler_get_sample_overflow_count(unsigned int cpu)
+2 -2
arch/s390/oprofile/hwsampler.h
··· 102 102 int hwsampler_shutdown(void); 103 103 int hwsampler_allocate(unsigned long sdbt, unsigned long sdb); 104 104 int hwsampler_deallocate(void); 105 - long hwsampler_query_min_interval(void); 106 - long hwsampler_query_max_interval(void); 105 + unsigned long hwsampler_query_min_interval(void); 106 + unsigned long hwsampler_query_max_interval(void); 107 107 int hwsampler_start_all(unsigned long interval); 108 108 int hwsampler_stop_all(void); 109 109 int hwsampler_deactivate(unsigned int cpu);
+2 -6
arch/s390/oprofile/init.c
··· 145 145 * create hwsampler files only if hwsampler_setup() succeeds. 146 146 */ 147 147 oprofile_min_interval = hwsampler_query_min_interval(); 148 - if (oprofile_min_interval < 0) { 149 - oprofile_min_interval = 0; 148 + if (oprofile_min_interval == 0) 150 149 return -ENODEV; 151 - } 152 150 oprofile_max_interval = hwsampler_query_max_interval(); 153 - if (oprofile_max_interval < 0) { 154 - oprofile_max_interval = 0; 151 + if (oprofile_max_interval == 0) 155 152 return -ENODEV; 156 - } 157 153 158 154 if (oprofile_timer_init(ops)) 159 155 return -ENODEV;
+4
arch/sh/kernel/ptrace_32.c
··· 117 117 118 118 set_tsk_thread_flag(child, TIF_SINGLESTEP); 119 119 120 + if (ptrace_get_breakpoints(child) < 0) 121 + return; 122 + 120 123 set_single_step(child, pc); 124 + ptrace_put_breakpoints(child); 121 125 } 122 126 123 127 void user_disable_single_step(struct task_struct *child)
+1 -1
arch/sparc/kernel/apc.c
··· 165 165 return 0; 166 166 } 167 167 168 - static struct of_device_id __initdata apc_match[] = { 168 + static struct of_device_id apc_match[] = { 169 169 { 170 170 .name = APC_OBPNAME, 171 171 },
+4 -1
arch/sparc/kernel/pci_sabre.c
··· 452 452 sabre_scan_bus(pbm, &op->dev); 453 453 } 454 454 455 + static const struct of_device_id sabre_match[]; 455 456 static int __devinit sabre_probe(struct platform_device *op) 456 457 { 458 + const struct of_device_id *match; 457 459 const struct linux_prom64_registers *pr_regs; 458 460 struct device_node *dp = op->dev.of_node; 459 461 struct pci_pbm_info *pbm; ··· 465 463 const u32 *vdma; 466 464 u64 clear_irq; 467 465 468 - hummingbird_p = op->dev.of_match && (op->dev.of_match->data != NULL); 466 + match = of_match_device(sabre_match, &op->dev); 467 + hummingbird_p = match && (match->data != NULL); 469 468 if (!hummingbird_p) { 470 469 struct device_node *cpu_dp; 471 470
+6 -2
arch/sparc/kernel/pci_schizo.c
··· 1458 1458 return err; 1459 1459 } 1460 1460 1461 + static const struct of_device_id schizo_match[]; 1461 1462 static int __devinit schizo_probe(struct platform_device *op) 1462 1463 { 1463 - if (!op->dev.of_match) 1464 + const struct of_device_id *match; 1465 + 1466 + match = of_match_device(schizo_match, &op->dev); 1467 + if (!match) 1464 1468 return -EINVAL; 1465 - return __schizo_init(op, (unsigned long) op->dev.of_match->data); 1469 + return __schizo_init(op, (unsigned long)match->data); 1466 1470 } 1467 1471 1468 1472 /* The ordering of this table is very important. Some Tomatillo
+1 -1
arch/sparc/kernel/pmc.c
··· 69 69 return 0; 70 70 } 71 71 72 - static struct of_device_id __initdata pmc_match[] = { 72 + static struct of_device_id pmc_match[] = { 73 73 { 74 74 .name = PMC_OBPNAME, 75 75 },
+7 -3
arch/sparc/kernel/smp_32.c
··· 53 53 void __cpuinit smp_store_cpu_info(int id) 54 54 { 55 55 int cpu_node; 56 + int mid; 56 57 57 58 cpu_data(id).udelay_val = loops_per_jiffy; 58 59 ··· 61 60 cpu_data(id).clock_tick = prom_getintdefault(cpu_node, 62 61 "clock-frequency", 0); 63 62 cpu_data(id).prom_node = cpu_node; 64 - cpu_data(id).mid = cpu_get_hwmid(cpu_node); 63 + mid = cpu_get_hwmid(cpu_node); 65 64 66 - if (cpu_data(id).mid < 0) 67 - panic("No MID found for CPU%d at node 0x%08d", id, cpu_node); 65 + if (mid < 0) { 66 + printk(KERN_NOTICE "No MID found for CPU%d at node 0x%08d", id, cpu_node); 67 + mid = 0; 68 + } 69 + cpu_data(id).mid = mid; 68 70 } 69 71 70 72 void __init smp_cpus_done(unsigned int max_cpus)
+1 -1
arch/sparc/kernel/time_32.c
··· 168 168 return 0; 169 169 } 170 170 171 - static struct of_device_id __initdata clock_match[] = { 171 + static struct of_device_id clock_match[] = { 172 172 { 173 173 .name = "eeprom", 174 174 },
+9 -3
arch/sparc/lib/checksum_32.S
··· 289 289 290 290 /* Also, handle the alignment code out of band. */ 291 291 cc_dword_align: 292 - cmp %g1, 6 293 - bl,a ccte 292 + cmp %g1, 16 293 + bge 1f 294 + srl %g1, 1, %o3 295 + 2: cmp %o3, 0 296 + be,a ccte 294 297 andcc %g1, 0xf, %o3 295 - andcc %o0, 0x1, %g0 298 + andcc %o3, %o0, %g0 ! Check %o0 only (%o1 has the same last 2 bits) 299 + be,a 2b 300 + srl %o3, 1, %o3 301 + 1: andcc %o0, 0x1, %g0 296 302 bne ccslow 297 303 andcc %o0, 0x2, %g0 298 304 be 1f
+1 -1
arch/um/Kconfig.um
··· 47 47 48 48 config HPPFS 49 49 tristate "HoneyPot ProcFS (EXPERIMENTAL)" 50 - depends on EXPERIMENTAL 50 + depends on EXPERIMENTAL && PROC_FS 51 51 help 52 52 hppfs (HoneyPot ProcFS) is a filesystem which allows UML /proc 53 53 entries to be overridden, removed, or fabricated from the host.
+4 -1
arch/um/include/asm/thread_info.h
··· 49 49 { 50 50 struct thread_info *ti; 51 51 unsigned long mask = THREAD_SIZE - 1; 52 - ti = (struct thread_info *) (((unsigned long) &ti) & ~mask); 52 + void *p; 53 + 54 + asm volatile ("" : "=r" (p) : "0" (&ti)); 55 + ti = (struct thread_info *) (((unsigned long)p) & ~mask); 53 56 return ti; 54 57 } 55 58
+22 -1
arch/um/os-Linux/util.c
··· 5 5 6 6 #include <stdio.h> 7 7 #include <stdlib.h> 8 + #include <unistd.h> 8 9 #include <errno.h> 9 10 #include <signal.h> 10 11 #include <string.h> ··· 76 75 host.release, host.version, host.machine); 77 76 } 78 77 78 + /* 79 + * We cannot use glibc's abort(). It makes use of tgkill() which 80 + * has no effect within UML's kernel threads. 81 + * After that glibc would execute an invalid instruction to kill 82 + * the calling process and UML crashes with SIGSEGV. 83 + */ 84 + static inline void __attribute__ ((noreturn)) uml_abort(void) 85 + { 86 + sigset_t sig; 87 + 88 + fflush(NULL); 89 + 90 + if (!sigemptyset(&sig) && !sigaddset(&sig, SIGABRT)) 91 + sigprocmask(SIG_UNBLOCK, &sig, 0); 92 + 93 + for (;;) 94 + if (kill(getpid(), SIGABRT) < 0) 95 + exit(127); 96 + } 97 + 79 98 void os_dump_core(void) 80 99 { 81 100 int pid; ··· 137 116 while ((pid = waitpid(-1, NULL, WNOHANG | __WALL)) > 0) 138 117 os_kill_ptraced_process(pid, 0); 139 118 140 - abort(); 119 + uml_abort(); 141 120 }
+1 -1
arch/um/sys-i386/Makefile
··· 4 4 5 5 obj-y = bug.o bugs.o checksum.o delay.o fault.o ksyms.o ldt.o ptrace.o \ 6 6 ptrace_user.o setjmp.o signal.o stub.o stub_segv.o syscalls.o sysrq.o \ 7 - sys_call_table.o tls.o 7 + sys_call_table.o tls.o atomic64_cx8_32.o 8 8 9 9 obj-$(CONFIG_BINFMT_ELF) += elfcore.o 10 10
+225
arch/um/sys-i386/atomic64_cx8_32.S
··· 1 + /* 2 + * atomic64_t for 586+ 3 + * 4 + * Copied from arch/x86/lib/atomic64_cx8_32.S 5 + * 6 + * Copyright © 2010 Luca Barbieri 7 + * 8 + * This program is free software; you can redistribute it and/or modify 9 + * it under the terms of the GNU General Public License as published by 10 + * the Free Software Foundation; either version 2 of the License, or 11 + * (at your option) any later version. 12 + * 13 + */ 14 + 15 + #include <linux/linkage.h> 16 + #include <asm/alternative-asm.h> 17 + #include <asm/dwarf2.h> 18 + 19 + .macro SAVE reg 20 + pushl_cfi %\reg 21 + CFI_REL_OFFSET \reg, 0 22 + .endm 23 + 24 + .macro RESTORE reg 25 + popl_cfi %\reg 26 + CFI_RESTORE \reg 27 + .endm 28 + 29 + .macro read64 reg 30 + movl %ebx, %eax 31 + movl %ecx, %edx 32 + /* we need LOCK_PREFIX since otherwise cmpxchg8b always does the write */ 33 + LOCK_PREFIX 34 + cmpxchg8b (\reg) 35 + .endm 36 + 37 + ENTRY(atomic64_read_cx8) 38 + CFI_STARTPROC 39 + 40 + read64 %ecx 41 + ret 42 + CFI_ENDPROC 43 + ENDPROC(atomic64_read_cx8) 44 + 45 + ENTRY(atomic64_set_cx8) 46 + CFI_STARTPROC 47 + 48 + 1: 49 + /* we don't need LOCK_PREFIX since aligned 64-bit writes 50 + * are atomic on 586 and newer */ 51 + cmpxchg8b (%esi) 52 + jne 1b 53 + 54 + ret 55 + CFI_ENDPROC 56 + ENDPROC(atomic64_set_cx8) 57 + 58 + ENTRY(atomic64_xchg_cx8) 59 + CFI_STARTPROC 60 + 61 + movl %ebx, %eax 62 + movl %ecx, %edx 63 + 1: 64 + LOCK_PREFIX 65 + cmpxchg8b (%esi) 66 + jne 1b 67 + 68 + ret 69 + CFI_ENDPROC 70 + ENDPROC(atomic64_xchg_cx8) 71 + 72 + .macro addsub_return func ins insc 73 + ENTRY(atomic64_\func\()_return_cx8) 74 + CFI_STARTPROC 75 + SAVE ebp 76 + SAVE ebx 77 + SAVE esi 78 + SAVE edi 79 + 80 + movl %eax, %esi 81 + movl %edx, %edi 82 + movl %ecx, %ebp 83 + 84 + read64 %ebp 85 + 1: 86 + movl %eax, %ebx 87 + movl %edx, %ecx 88 + \ins\()l %esi, %ebx 89 + \insc\()l %edi, %ecx 90 + LOCK_PREFIX 91 + cmpxchg8b (%ebp) 92 + jne 1b 93 + 94 + 10: 95 + movl %ebx, %eax 96 + movl %ecx, %edx 97 + RESTORE edi 98 + RESTORE esi 99 + RESTORE ebx 100 + RESTORE ebp 101 + ret 102 + CFI_ENDPROC 103 + ENDPROC(atomic64_\func\()_return_cx8) 104 + .endm 105 + 106 + addsub_return add add adc 107 + addsub_return sub sub sbb 108 + 109 + .macro incdec_return func ins insc 110 + ENTRY(atomic64_\func\()_return_cx8) 111 + CFI_STARTPROC 112 + SAVE ebx 113 + 114 + read64 %esi 115 + 1: 116 + movl %eax, %ebx 117 + movl %edx, %ecx 118 + \ins\()l $1, %ebx 119 + \insc\()l $0, %ecx 120 + LOCK_PREFIX 121 + cmpxchg8b (%esi) 122 + jne 1b 123 + 124 + 10: 125 + movl %ebx, %eax 126 + movl %ecx, %edx 127 + RESTORE ebx 128 + ret 129 + CFI_ENDPROC 130 + ENDPROC(atomic64_\func\()_return_cx8) 131 + .endm 132 + 133 + incdec_return inc add adc 134 + incdec_return dec sub sbb 135 + 136 + ENTRY(atomic64_dec_if_positive_cx8) 137 + CFI_STARTPROC 138 + SAVE ebx 139 + 140 + read64 %esi 141 + 1: 142 + movl %eax, %ebx 143 + movl %edx, %ecx 144 + subl $1, %ebx 145 + sbb $0, %ecx 146 + js 2f 147 + LOCK_PREFIX 148 + cmpxchg8b (%esi) 149 + jne 1b 150 + 151 + 2: 152 + movl %ebx, %eax 153 + movl %ecx, %edx 154 + RESTORE ebx 155 + ret 156 + CFI_ENDPROC 157 + ENDPROC(atomic64_dec_if_positive_cx8) 158 + 159 + ENTRY(atomic64_add_unless_cx8) 160 + CFI_STARTPROC 161 + SAVE ebp 162 + SAVE ebx 163 + /* these just push these two parameters on the stack */ 164 + SAVE edi 165 + SAVE esi 166 + 167 + movl %ecx, %ebp 168 + movl %eax, %esi 169 + movl %edx, %edi 170 + 171 + read64 %ebp 172 + 1: 173 + cmpl %eax, 0(%esp) 174 + je 4f 175 + 2: 176 + movl %eax, %ebx 177 + movl %edx, %ecx 178 + addl %esi, %ebx 179 + adcl %edi, %ecx 180 + LOCK_PREFIX 181 + cmpxchg8b (%ebp) 182 + jne 1b 183 + 184 + movl $1, %eax 185 + 3: 186 + addl $8, %esp 187 + CFI_ADJUST_CFA_OFFSET -8 188 + RESTORE ebx 189 + RESTORE ebp 190 + ret 191 + 4: 192 + cmpl %edx, 4(%esp) 193 + jne 2b 194 + xorl %eax, %eax 195 + jmp 3b 196 + CFI_ENDPROC 197 + ENDPROC(atomic64_add_unless_cx8) 198 + 199 + ENTRY(atomic64_inc_not_zero_cx8) 200 + CFI_STARTPROC 201 + SAVE ebx 202 + 203 + read64 %esi 204 + 1: 205 + testl %eax, %eax 206 + je 4f 207 + 2: 208 + movl %eax, %ebx 209 + movl %edx, %ecx 210 + addl $1, %ebx 211 + adcl $0, %ecx 212 + LOCK_PREFIX 213 + cmpxchg8b (%esi) 214 + jne 1b 215 + 216 + movl $1, %eax 217 + 3: 218 + RESTORE ebx 219 + ret 220 + 4: 221 + testl %edx, %edx 222 + jne 2b 223 + jmp 3b 224 + CFI_ENDPROC 225 + ENDPROC(atomic64_inc_not_zero_cx8)
+1 -1
arch/x86/boot/memory.c
··· 91 91 if (oreg.ax > 15*1024) { 92 92 return -1; /* Bogus! */ 93 93 } else if (oreg.ax == 15*1024) { 94 - boot_params.alt_mem_k = (oreg.dx << 6) + oreg.ax; 94 + boot_params.alt_mem_k = (oreg.bx << 6) + oreg.ax; 95 95 } else { 96 96 /* 97 97 * This ignores memory above 16MB if we have a memory
+1
arch/x86/include/asm/apicdef.h
··· 78 78 #define APIC_DEST_LOGICAL 0x00800 79 79 #define APIC_DEST_PHYSICAL 0x00000 80 80 #define APIC_DM_FIXED 0x00000 81 + #define APIC_DM_FIXED_MASK 0x00700 81 82 #define APIC_DM_LOWEST 0x00100 82 83 #define APIC_DM_SMI 0x00200 83 84 #define APIC_DM_REMRD 0x00300
+11 -11
arch/x86/include/asm/gart.h
··· 66 66 * Don't enable translation but enable GART IO and CPU accesses. 67 67 * Also, set DISTLBWALKPRB since GART tables memory is UC. 68 68 */ 69 - ctl = DISTLBWALKPRB | order << 1; 69 + ctl = order << 1; 70 70 71 71 pci_write_config_dword(dev, AMD64_GARTAPERTURECTL, ctl); 72 72 } ··· 75 75 { 76 76 u32 tmp, ctl; 77 77 78 - /* address of the mappings table */ 79 - addr >>= 12; 80 - tmp = (u32) addr<<4; 81 - tmp &= ~0xf; 82 - pci_write_config_dword(dev, AMD64_GARTTABLEBASE, tmp); 78 + /* address of the mappings table */ 79 + addr >>= 12; 80 + tmp = (u32) addr<<4; 81 + tmp &= ~0xf; 82 + pci_write_config_dword(dev, AMD64_GARTTABLEBASE, tmp); 83 83 84 - /* Enable GART translation for this hammer. */ 85 - pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &ctl); 86 - ctl |= GARTEN; 87 - ctl &= ~(DISGARTCPU | DISGARTIO); 88 - pci_write_config_dword(dev, AMD64_GARTAPERTURECTL, ctl); 84 + /* Enable GART translation for this hammer. */ 85 + pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &ctl); 86 + ctl |= GARTEN | DISTLBWALKPRB; 87 + ctl &= ~(DISGARTCPU | DISGARTIO); 88 + pci_write_config_dword(dev, AMD64_GARTAPERTURECTL, ctl); 89 89 } 90 90 91 91 static inline int aperture_valid(u64 aper_base, u32 aper_size, u32 min_size)
+1 -1
arch/x86/include/asm/io_apic.h
··· 150 150 extern void ioapic_and_gsi_init(void); 151 151 extern void ioapic_insert_resources(void); 152 152 153 - int io_apic_setup_irq_pin(unsigned int irq, int node, struct io_apic_irq_attr *attr); 153 + int io_apic_setup_irq_pin_once(unsigned int irq, int node, struct io_apic_irq_attr *attr); 154 154 155 155 extern struct IO_APIC_route_entry **alloc_ioapic_entries(void); 156 156 extern void free_ioapic_entries(struct IO_APIC_route_entry **ioapic_entries);
+1 -1
arch/x86/include/asm/numa.h
··· 51 51 #endif /* CONFIG_NUMA */ 52 52 53 53 #ifdef CONFIG_DEBUG_PER_CPU_MAPS 54 - struct cpumask __cpuinit *debug_cpumask_set_cpu(int cpu, int enable); 54 + void debug_cpumask_set_cpu(int cpu, int node, bool enable); 55 55 #endif 56 56 57 57 #endif /* _ASM_X86_NUMA_H */
+1
arch/x86/include/asm/pgtable_types.h
··· 299 299 /* Install a pte for a particular vaddr in kernel space. */ 300 300 void set_pte_vaddr(unsigned long vaddr, pte_t pte); 301 301 302 + extern void native_pagetable_reserve(u64 start, u64 end); 302 303 #ifdef CONFIG_X86_32 303 304 extern void native_pagetable_setup_start(pgd_t *base); 304 305 extern void native_pagetable_setup_done(pgd_t *base);
+13 -4
arch/x86/include/asm/uv/uv_bau.h
··· 94 94 /* after this # consecutive successes, bump up the throttle if it was lowered */ 95 95 #define COMPLETE_THRESHOLD 5 96 96 97 + #define UV_LB_SUBNODEID 0x10 98 + 97 99 /* 98 100 * number of entries in the destination side payload queue 99 101 */ ··· 126 124 * The distribution specification (32 bytes) is interpreted as a 256-bit 127 125 * distribution vector. Adjacent bits correspond to consecutive even numbered 128 126 * nodeIDs. The result of adding the index of a given bit to the 15-bit 129 - * 'base_dest_nodeid' field of the header corresponds to the 127 + * 'base_dest_nasid' field of the header corresponds to the 130 128 * destination nodeID associated with that specified bit. 131 129 */ 132 130 struct bau_target_uvhubmask { ··· 178 176 struct bau_msg_header { 179 177 unsigned int dest_subnodeid:6; /* must be 0x10, for the LB */ 180 178 /* bits 5:0 */ 181 - unsigned int base_dest_nodeid:15; /* nasid of the */ 179 + unsigned int base_dest_nasid:15; /* nasid of the */ 182 180 /* bits 20:6 */ /* first bit in uvhub map */ 183 181 unsigned int command:8; /* message type */ 184 182 /* bits 28:21 */ ··· 380 378 unsigned long d_rcanceled; /* number of messages canceled by resets */ 381 379 }; 382 380 381 + struct hub_and_pnode { 382 + short uvhub; 383 + short pnode; 384 + }; 383 385 /* 384 386 * one per-cpu; to locate the software tables 385 387 */ ··· 405 399 int baudisabled; 406 400 int set_bau_off; 407 401 short cpu; 402 + short osnode; 408 403 short uvhub_cpu; 409 404 short uvhub; 410 405 short cpus_in_socket; 411 406 short cpus_in_uvhub; 407 + short partition_base_pnode; 412 408 unsigned short message_number; 413 409 unsigned short uvhub_quiesce; 414 410 short socket_acknowledge_count[DEST_Q_SIZE]; ··· 430 422 int congested_period; 431 423 cycles_t period_time; 432 424 long period_requests; 425 + struct hub_and_pnode *target_hub_and_pnode; 433 426 }; 434 427 435 428 static inline int bau_uvhub_isset(int uvhub, struct bau_target_uvhubmask *dstp) 436 429 { 437 430 return constant_test_bit(uvhub, &dstp->bits[0]); 438 431 } 439 - static inline void bau_uvhub_set(int uvhub, struct bau_target_uvhubmask *dstp) 432 + static inline void bau_uvhub_set(int pnode, struct bau_target_uvhubmask *dstp) 440 433 { 441 - __set_bit(uvhub, &dstp->bits[0]); 434 + __set_bit(pnode, &dstp->bits[0]); 442 435 } 443 436 static inline void bau_uvhubs_clear(struct bau_target_uvhubmask *dstp, 444 437 int nbits)
+2
arch/x86/include/asm/uv/uv_hub.h
··· 398 398 unsigned short nr_online_cpus; 399 399 unsigned short pnode; 400 400 short memory_nid; 401 + spinlock_t nmi_lock; 402 + unsigned long nmi_count; 401 403 }; 402 404 extern struct uv_blade_info *uv_blade_info; 403 405 extern short *uv_node_to_blade;
+15 -1
arch/x86/include/asm/uv/uv_mmrs.h
··· 5 5 * 6 6 * SGI UV MMR definitions 7 7 * 8 - * Copyright (C) 2007-2010 Silicon Graphics, Inc. All rights reserved. 8 + * Copyright (C) 2007-2011 Silicon Graphics, Inc. All rights reserved. 9 9 */ 10 10 11 11 #ifndef _ASM_X86_UV_UV_MMRS_H ··· 1099 1099 } s; 1100 1100 }; 1101 1101 1102 + /* ========================================================================= */ 1103 + /* UVH_SCRATCH5 */ 1104 + /* ========================================================================= */ 1105 + #define UVH_SCRATCH5 0x2d0200UL 1106 + #define UVH_SCRATCH5_32 0x00778 1107 + 1108 + #define UVH_SCRATCH5_SCRATCH5_SHFT 0 1109 + #define UVH_SCRATCH5_SCRATCH5_MASK 0xffffffffffffffffUL 1110 + union uvh_scratch5_u { 1111 + unsigned long v; 1112 + struct uvh_scratch5_s { 1113 + unsigned long scratch5 : 64; /* RW, W1CS */ 1114 + } s; 1115 + }; 1102 1116 1103 1117 #endif /* __ASM_UV_MMRS_X86_H__ */
+12
arch/x86/include/asm/x86_init.h
··· 68 68 }; 69 69 70 70 /** 71 + * struct x86_init_mapping - platform specific initial kernel pagetable setup 72 + * @pagetable_reserve: reserve a range of addresses for kernel pagetable usage 73 + * 74 + * For more details on the purpose of this hook, look in 75 + * init_memory_mapping and the commit that added it. 76 + */ 77 + struct x86_init_mapping { 78 + void (*pagetable_reserve)(u64 start, u64 end); 79 + }; 80 + 81 + /** 71 82 * struct x86_init_paging - platform specific paging functions 72 83 * @pagetable_setup_start: platform specific pre paging_init() call 73 84 * @pagetable_setup_done: platform specific post paging_init() call ··· 134 123 struct x86_init_mpparse mpparse; 135 124 struct x86_init_irqs irqs; 136 125 struct x86_init_oem oem; 126 + struct x86_init_mapping mapping; 137 127 struct x86_init_paging paging; 138 128 struct x86_init_timers timers; 139 129 struct x86_init_iommu iommu;
+1 -1
arch/x86/kernel/aperture_64.c
··· 499 499 * Don't enable translation yet but enable GART IO and CPU 500 500 * accesses and set DISTLBWALKPRB since GART table memory is UC. 501 501 */ 502 - u32 ctl = DISTLBWALKPRB | aper_order << 1; 502 + u32 ctl = aper_order << 1; 503 503 504 504 bus = amd_nb_bus_dev_ranges[i].bus; 505 505 dev_base = amd_nb_bus_dev_ranges[i].dev_base;
+5 -5
arch/x86/kernel/apic/io_apic.c
··· 128 128 } 129 129 early_param("noapic", parse_noapic); 130 130 131 - static int io_apic_setup_irq_pin_once(unsigned int irq, int node, 132 - struct io_apic_irq_attr *attr); 131 + static int io_apic_setup_irq_pin(unsigned int irq, int node, 132 + struct io_apic_irq_attr *attr); 133 133 134 134 /* Will be called in mpparse/acpi/sfi codes for saving IRQ info */ 135 135 void mp_save_irq(struct mpc_intsrc *m) ··· 3570 3570 } 3571 3571 #endif /* CONFIG_HT_IRQ */ 3572 3572 3573 - int 3573 + static int 3574 3574 io_apic_setup_irq_pin(unsigned int irq, int node, struct io_apic_irq_attr *attr) 3575 3575 { 3576 3576 struct irq_cfg *cfg = alloc_irq_and_cfg_at(irq, node); ··· 3585 3585 return ret; 3586 3586 } 3587 3587 3588 - static int io_apic_setup_irq_pin_once(unsigned int irq, int node, 3589 - struct io_apic_irq_attr *attr) 3588 + int io_apic_setup_irq_pin_once(unsigned int irq, int node, 3589 + struct io_apic_irq_attr *attr) 3590 3590 { 3591 3591 unsigned int id = attr->ioapic, pin = attr->ioapic_pin; 3592 3592 int ret;
+43 -5
arch/x86/kernel/apic/x2apic_uv_x.c
··· 37 37 #include <asm/smp.h> 38 38 #include <asm/x86_init.h> 39 39 #include <asm/emergency-restart.h> 40 + #include <asm/nmi.h> 41 + 42 + /* BMC sets a bit this MMR non-zero before sending an NMI */ 43 + #define UVH_NMI_MMR UVH_SCRATCH5 44 + #define UVH_NMI_MMR_CLEAR (UVH_NMI_MMR + 8) 45 + #define UV_NMI_PENDING_MASK (1UL << 63) 46 + DEFINE_PER_CPU(unsigned long, cpu_last_nmi_count); 40 47 41 48 DEFINE_PER_CPU(int, x2apic_extra_bits); 42 49 ··· 649 642 */ 650 643 int uv_handle_nmi(struct notifier_block *self, unsigned long reason, void *data) 651 644 { 645 + unsigned long real_uv_nmi; 646 + int bid; 647 + 652 648 if (reason != DIE_NMIUNKNOWN) 653 649 return NOTIFY_OK; 654 650 655 651 if (in_crash_kexec) 656 652 /* do nothing if entering the crash kernel */ 657 653 return NOTIFY_OK; 654 + 658 655 /* 659 - * Use a lock so only one cpu prints at a time 660 - * to prevent intermixed output. 656 + * Each blade has an MMR that indicates when an NMI has been sent 657 + * to cpus on the blade. If an NMI is detected, atomically 658 + * clear the MMR and update a per-blade NMI count used to 659 + * cause each cpu on the blade to notice a new NMI. 660 + */ 661 + bid = uv_numa_blade_id(); 662 + real_uv_nmi = (uv_read_local_mmr(UVH_NMI_MMR) & UV_NMI_PENDING_MASK); 663 + 664 + if (unlikely(real_uv_nmi)) { 665 + spin_lock(&uv_blade_info[bid].nmi_lock); 666 + real_uv_nmi = (uv_read_local_mmr(UVH_NMI_MMR) & UV_NMI_PENDING_MASK); 667 + if (real_uv_nmi) { 668 + uv_blade_info[bid].nmi_count++; 669 + uv_write_local_mmr(UVH_NMI_MMR_CLEAR, UV_NMI_PENDING_MASK); 670 + } 671 + spin_unlock(&uv_blade_info[bid].nmi_lock); 672 + } 673 + 674 + if (likely(__get_cpu_var(cpu_last_nmi_count) == uv_blade_info[bid].nmi_count)) 675 + return NOTIFY_DONE; 676 + 677 + __get_cpu_var(cpu_last_nmi_count) = uv_blade_info[bid].nmi_count; 678 + 679 + /* 680 + * Use a lock so only one cpu prints at a time. 681 + * This prevents intermixed output. 661 682 */ 662 683 spin_lock(&uv_nmi_lock); 663 - pr_info("NMI stack dump cpu %u:\n", smp_processor_id()); 684 + pr_info("UV NMI stack dump cpu %u:\n", smp_processor_id()); 664 685 dump_stack(); 665 686 spin_unlock(&uv_nmi_lock); 666 687 ··· 696 661 } 697 662 698 663 static struct notifier_block uv_dump_stack_nmi_nb = { 699 - .notifier_call = uv_handle_nmi 664 + .notifier_call = uv_handle_nmi, 665 + .priority = NMI_LOCAL_LOW_PRIOR - 1, 700 666 }; 701 667 702 668 void uv_register_nmi_notifier(void) ··· 756 720 printk(KERN_DEBUG "UV: Found %d blades\n", uv_num_possible_blades()); 757 721 758 722 bytes = sizeof(struct uv_blade_info) * uv_num_possible_blades(); 759 - uv_blade_info = kmalloc(bytes, GFP_KERNEL); 723 + uv_blade_info = kzalloc(bytes, GFP_KERNEL); 760 724 BUG_ON(!uv_blade_info); 725 + 761 726 for (blade = 0; blade < uv_num_possible_blades(); blade++) 762 727 uv_blade_info[blade].memory_nid = -1; 763 728 ··· 784 747 uv_blade_info[blade].pnode = pnode; 785 748 uv_blade_info[blade].nr_possible_cpus = 0; 786 749 uv_blade_info[blade].nr_online_cpus = 0; 750 + spin_lock_init(&uv_blade_info[blade].nmi_lock); 787 751 max_pnode = max(pnode, max_pnode); 788 752 blade++; 789 753 }
+5
arch/x86/kernel/apm_32.c
··· 228 228 #include <linux/kthread.h> 229 229 #include <linux/jiffies.h> 230 230 #include <linux/acpi.h> 231 + #include <linux/syscore_ops.h> 231 232 232 233 #include <asm/system.h> 233 234 #include <asm/uaccess.h> ··· 1239 1238 1240 1239 local_irq_disable(); 1241 1240 sysdev_suspend(PMSG_SUSPEND); 1241 + syscore_suspend(); 1242 1242 1243 1243 local_irq_enable(); 1244 1244 ··· 1257 1255 apm_error("suspend", err); 1258 1256 err = (err == APM_SUCCESS) ? 0 : -EIO; 1259 1257 1258 + syscore_resume(); 1260 1259 sysdev_resume(); 1261 1260 local_irq_enable(); 1262 1261 ··· 1283 1280 1284 1281 local_irq_disable(); 1285 1282 sysdev_suspend(PMSG_SUSPEND); 1283 + syscore_suspend(); 1286 1284 local_irq_enable(); 1287 1285 1288 1286 err = set_system_power_state(APM_STATE_STANDBY); ··· 1291 1287 apm_error("standby", err); 1292 1288 1293 1289 local_irq_disable(); 1290 + syscore_resume(); 1294 1291 sysdev_resume(); 1295 1292 local_irq_enable(); 1296 1293
+1 -1
arch/x86/kernel/cpu/amd.c
··· 613 613 #endif 614 614 615 615 /* As a rule processors have APIC timer running in deep C states */ 616 - if (c->x86 >= 0xf && !cpu_has_amd_erratum(amd_erratum_400)) 616 + if (c->x86 > 0xf && !cpu_has_amd_erratum(amd_erratum_400)) 617 617 set_cpu_cap(c, X86_FEATURE_ARAT); 618 618 619 619 /*
+1
arch/x86/kernel/cpu/mcheck/mce_amd.c
··· 509 509 out_free: 510 510 if (b) { 511 511 kobject_put(&b->kobj); 512 + list_del(&b->miscj); 512 513 kfree(b); 513 514 } 514 515 return err;
+7 -5
arch/x86/kernel/cpu/mcheck/therm_throt.c
··· 446 446 */ 447 447 rdmsr(MSR_IA32_MISC_ENABLE, l, h); 448 448 449 + h = lvtthmr_init; 449 450 /* 450 451 * The initial value of thermal LVT entries on all APs always reads 451 452 * 0x10000 because APs are woken up by BSP issuing INIT-SIPI-SIPI 452 453 * sequence to them and LVT registers are reset to 0s except for 453 454 * the mask bits which are set to 1s when APs receive INIT IPI. 454 - * Always restore the value that BIOS has programmed on AP based on 455 - * BSP's info we saved since BIOS is always setting the same value 456 - * for all threads/cores 455 + * If BIOS takes over the thermal interrupt and sets its interrupt 456 + * delivery mode to SMI (not fixed), it restores the value that the 457 + * BIOS has programmed on AP based on BSP's info we saved since BIOS 458 + * is always setting the same value for all threads/cores. 457 459 */ 458 - apic_write(APIC_LVTTHMR, lvtthmr_init); 460 + if ((h & APIC_DM_FIXED_MASK) != APIC_DM_FIXED) 461 + apic_write(APIC_LVTTHMR, lvtthmr_init); 459 462 460 - h = lvtthmr_init; 461 463 462 464 if ((l & MSR_IA32_MISC_ENABLE_TM1) && (h & APIC_DM_SMI)) { 463 465 printk(KERN_DEBUG
+17 -5
arch/x86/kernel/cpu/perf_event.c
··· 586 586 return -EOPNOTSUPP; 587 587 } 588 588 589 + /* 590 + * Do not allow config1 (extended registers) to propagate, 591 + * there's no sane user-space generalization yet: 592 + */ 589 593 if (attr->type == PERF_TYPE_RAW) 590 - return x86_pmu_extra_regs(event->attr.config, event); 594 + return 0; 591 595 592 596 if (attr->type == PERF_TYPE_HW_CACHE) 593 597 return set_ext_hw_attr(hwc, event); ··· 613 609 /* 614 610 * Branch tracing: 615 611 */ 616 - if ((attr->config == PERF_COUNT_HW_BRANCH_INSTRUCTIONS) && 617 - (hwc->sample_period == 1)) { 612 + if (attr->config == PERF_COUNT_HW_BRANCH_INSTRUCTIONS && 613 + !attr->freq && hwc->sample_period == 1) { 618 614 /* BTS is not supported by this architecture. */ 619 615 if (!x86_pmu.bts_active) 620 616 return -EOPNOTSUPP; ··· 1288 1284 1289 1285 cpuc = &__get_cpu_var(cpu_hw_events); 1290 1286 1287 + /* 1288 + * Some chipsets need to unmask the LVTPC in a particular spot 1289 + * inside the nmi handler. As a result, the unmasking was pushed 1290 + * into all the nmi handlers. 1291 + * 1292 + * This generic handler doesn't seem to have any issues where the 1293 + * unmasking occurs so it was left at the top. 1294 + */ 1295 + apic_write(APIC_LVTPC, APIC_DM_NMI); 1296 + 1291 1297 for (idx = 0; idx < x86_pmu.num_counters; idx++) { 1292 1298 if (!test_bit(idx, cpuc->active_mask)) { 1293 1299 /* ··· 1383 1369 default: 1384 1370 return NOTIFY_DONE; 1385 1371 } 1386 - 1387 - apic_write(APIC_LVTPC, APIC_DM_NMI); 1388 1372 1389 1373 handled = x86_pmu.handle_irq(args->regs); 1390 1374 if (!handled)
+18 -4
arch/x86/kernel/cpu/perf_event_amd.c
··· 8 8 [ C(L1D) ] = { 9 9 [ C(OP_READ) ] = { 10 10 [ C(RESULT_ACCESS) ] = 0x0040, /* Data Cache Accesses */ 11 - [ C(RESULT_MISS) ] = 0x0041, /* Data Cache Misses */ 11 + [ C(RESULT_MISS) ] = 0x0141, /* Data Cache Misses */ 12 12 }, 13 13 [ C(OP_WRITE) ] = { 14 14 [ C(RESULT_ACCESS) ] = 0x0142, /* Data Cache Refills :system */ ··· 427 427 * 428 428 * Exceptions: 429 429 * 430 + * 0x000 FP PERF_CTL[3], PERF_CTL[5:3] (*) 430 431 * 0x003 FP PERF_CTL[3] 432 + * 0x004 FP PERF_CTL[3], PERF_CTL[5:3] (*) 431 433 * 0x00B FP PERF_CTL[3] 432 434 * 0x00D FP PERF_CTL[3] 433 435 * 0x023 DE PERF_CTL[2:0] ··· 450 448 * 0x0DF LS PERF_CTL[5:0] 451 449 * 0x1D6 EX PERF_CTL[5:0] 452 450 * 0x1D8 EX PERF_CTL[5:0] 451 + * 452 + * (*) depending on the umask all FPU counters may be used 453 453 */ 454 454 455 455 static struct event_constraint amd_f15_PMC0 = EVENT_CONSTRAINT(0, 0x01, 0); ··· 464 460 static struct event_constraint * 465 461 amd_get_event_constraints_f15h(struct cpu_hw_events *cpuc, struct perf_event *event) 466 462 { 467 - unsigned int event_code = amd_get_event_code(&event->hw); 463 + struct hw_perf_event *hwc = &event->hw; 464 + unsigned int event_code = amd_get_event_code(hwc); 468 465 469 466 switch (event_code & AMD_EVENT_TYPE_MASK) { 470 467 case AMD_EVENT_FP: 471 468 switch (event_code) { 469 + case 0x000: 470 + if (!(hwc->config & 0x0000F000ULL)) 471 + break; 472 + if (!(hwc->config & 0x00000F00ULL)) 473 + break; 474 + return &amd_f15_PMC3; 475 + case 0x004: 476 + if (hweight_long(hwc->config & ARCH_PERFMON_EVENTSEL_UMASK) <= 1) 477 + break; 478 + return &amd_f15_PMC3; 472 479 case 0x003: 473 480 case 0x00B: 474 481 case 0x00D: 475 482 return &amd_f15_PMC3; 476 - default: 477 - return &amd_f15_PMC53; 478 483 } 484 + return &amd_f15_PMC53; 479 485 case AMD_EVENT_LS: 480 486 case AMD_EVENT_DC: 481 487 case AMD_EVENT_EX_LS:
+84 -41
arch/x86/kernel/cpu/perf_event_intel.c
··· 25 25 /* 26 26 * Intel PerfMon, used on Core and later. 27 27 */ 28 - static const u64 intel_perfmon_event_map[] = 28 + static u64 intel_perfmon_event_map[PERF_COUNT_HW_MAX] __read_mostly = 29 29 { 30 30 [PERF_COUNT_HW_CPU_CYCLES] = 0x003c, 31 31 [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0, ··· 184 184 }, 185 185 }, 186 186 [ C(LL ) ] = { 187 - /* 188 - * TBD: Need Off-core Response Performance Monitoring support 189 - */ 190 187 [ C(OP_READ) ] = { 191 - /* OFFCORE_RESPONSE_0.ANY_DATA.LOCAL_CACHE */ 188 + /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */ 192 189 [ C(RESULT_ACCESS) ] = 0x01b7, 193 - /* OFFCORE_RESPONSE_1.ANY_DATA.ANY_LLC_MISS */ 194 - [ C(RESULT_MISS) ] = 0x01bb, 190 + /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */ 191 + [ C(RESULT_MISS) ] = 0x01b7, 195 192 }, 196 193 [ C(OP_WRITE) ] = { 197 - /* OFFCORE_RESPONSE_0.ANY_RFO.LOCAL_CACHE */ 194 + /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */ 198 195 [ C(RESULT_ACCESS) ] = 0x01b7, 199 - /* OFFCORE_RESPONSE_1.ANY_RFO.ANY_LLC_MISS */ 200 - [ C(RESULT_MISS) ] = 0x01bb, 196 + /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */ 197 + [ C(RESULT_MISS) ] = 0x01b7, 201 198 }, 202 199 [ C(OP_PREFETCH) ] = { 203 - /* OFFCORE_RESPONSE_0.PREFETCH.LOCAL_CACHE */ 200 + /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */ 204 201 [ C(RESULT_ACCESS) ] = 0x01b7, 205 - /* OFFCORE_RESPONSE_1.PREFETCH.ANY_LLC_MISS */ 206 - [ C(RESULT_MISS) ] = 0x01bb, 202 + /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */ 203 + [ C(RESULT_MISS) ] = 0x01b7, 207 204 }, 208 205 }, 209 206 [ C(DTLB) ] = { ··· 282 285 }, 283 286 [ C(LL ) ] = { 284 287 [ C(OP_READ) ] = { 285 - /* OFFCORE_RESPONSE_0.ANY_DATA.LOCAL_CACHE */ 288 + /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */ 286 289 [ C(RESULT_ACCESS) ] = 0x01b7, 287 - /* OFFCORE_RESPONSE_1.ANY_DATA.ANY_LLC_MISS */ 288 - [ C(RESULT_MISS) ] = 0x01bb, 290 + /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */ 291 + [ C(RESULT_MISS) ] = 0x01b7, 289 292 }, 290 293 /* 291 294 * Use RFO, not WRITEBACK, because a write miss would typically occur 292 295 * on RFO. 293 296 */ 294 297 [ C(OP_WRITE) ] = { 295 - /* OFFCORE_RESPONSE_1.ANY_RFO.LOCAL_CACHE */ 296 - [ C(RESULT_ACCESS) ] = 0x01bb, 297 - /* OFFCORE_RESPONSE_0.ANY_RFO.ANY_LLC_MISS */ 298 + /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */ 299 + [ C(RESULT_ACCESS) ] = 0x01b7, 300 + /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */ 298 301 [ C(RESULT_MISS) ] = 0x01b7, 299 302 }, 300 303 [ C(OP_PREFETCH) ] = { 301 - /* OFFCORE_RESPONSE_0.PREFETCH.LOCAL_CACHE */ 304 + /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */ 302 305 [ C(RESULT_ACCESS) ] = 0x01b7, 303 - /* OFFCORE_RESPONSE_1.PREFETCH.ANY_LLC_MISS */ 304 - [ C(RESULT_MISS) ] = 0x01bb, 306 + /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */ 307 + [ C(RESULT_MISS) ] = 0x01b7, 305 308 }, 306 309 }, 307 310 [ C(DTLB) ] = { ··· 349 352 }; 350 353 351 354 /* 352 - * OFFCORE_RESPONSE MSR bits (subset), See IA32 SDM Vol 3 30.6.1.3 355 + * Nehalem/Westmere MSR_OFFCORE_RESPONSE bits; 356 + * See IA32 SDM Vol 3B 30.6.1.3 353 357 */ 354 358 355 - #define DMND_DATA_RD (1 << 0) 356 - #define DMND_RFO (1 << 1) 357 - #define DMND_WB (1 << 3) 358 - #define PF_DATA_RD (1 << 4) 359 - #define PF_DATA_RFO (1 << 5) 360 - #define RESP_UNCORE_HIT (1 << 8) 361 - #define RESP_MISS (0xf600) /* non uncore hit */ 359 + #define NHM_DMND_DATA_RD (1 << 0) 360 + #define NHM_DMND_RFO (1 << 1) 361 + #define NHM_DMND_IFETCH (1 << 2) 362 + #define NHM_DMND_WB (1 << 3) 363 + #define NHM_PF_DATA_RD (1 << 4) 364 + #define NHM_PF_DATA_RFO (1 << 5) 365 + #define NHM_PF_IFETCH (1 << 6) 366 + #define NHM_OFFCORE_OTHER (1 << 7) 367 + #define NHM_UNCORE_HIT (1 << 8) 368 + #define NHM_OTHER_CORE_HIT_SNP (1 << 9) 369 + #define NHM_OTHER_CORE_HITM (1 << 10) 370 + /* reserved */ 371 + #define NHM_REMOTE_CACHE_FWD (1 << 12) 372 + #define NHM_REMOTE_DRAM (1 << 13) 373 + #define NHM_LOCAL_DRAM (1 << 14) 374 + #define NHM_NON_DRAM (1 << 15) 375 + 376 + #define NHM_ALL_DRAM (NHM_REMOTE_DRAM|NHM_LOCAL_DRAM) 377 + 378 + #define NHM_DMND_READ (NHM_DMND_DATA_RD) 379 + #define NHM_DMND_WRITE (NHM_DMND_RFO|NHM_DMND_WB) 380 + #define NHM_DMND_PREFETCH (NHM_PF_DATA_RD|NHM_PF_DATA_RFO) 381 + 382 + #define NHM_L3_HIT (NHM_UNCORE_HIT|NHM_OTHER_CORE_HIT_SNP|NHM_OTHER_CORE_HITM) 383 + #define NHM_L3_MISS (NHM_NON_DRAM|NHM_ALL_DRAM|NHM_REMOTE_CACHE_FWD) 384 + #define NHM_L3_ACCESS (NHM_L3_HIT|NHM_L3_MISS) 362 385 363 386 static __initconst const u64 nehalem_hw_cache_extra_regs 364 387 [PERF_COUNT_HW_CACHE_MAX] ··· 387 370 { 388 371 [ C(LL ) ] = { 389 372 [ C(OP_READ) ] = { 390 - [ C(RESULT_ACCESS) ] = DMND_DATA_RD|RESP_UNCORE_HIT, 391 - [ C(RESULT_MISS) ] = DMND_DATA_RD|RESP_MISS, 373 + [ C(RESULT_ACCESS) ] = NHM_DMND_READ|NHM_L3_ACCESS, 374 + [ C(RESULT_MISS) ] = NHM_DMND_READ|NHM_L3_MISS, 392 375 }, 393 376 [ C(OP_WRITE) ] = { 394 - [ C(RESULT_ACCESS) ] = DMND_RFO|DMND_WB|RESP_UNCORE_HIT, 395 - [ C(RESULT_MISS) ] = DMND_RFO|DMND_WB|RESP_MISS, 377 + [ C(RESULT_ACCESS) ] = NHM_DMND_WRITE|NHM_L3_ACCESS, 378 + [ C(RESULT_MISS) ] = NHM_DMND_WRITE|NHM_L3_MISS, 396 379 }, 397 380 [ C(OP_PREFETCH) ] = { 398 - [ C(RESULT_ACCESS) ] = PF_DATA_RD|PF_DATA_RFO|RESP_UNCORE_HIT, 399 - [ C(RESULT_MISS) ] = PF_DATA_RD|PF_DATA_RFO|RESP_MISS, 381 + [ C(RESULT_ACCESS) ] = NHM_DMND_PREFETCH|NHM_L3_ACCESS, 382 + [ C(RESULT_MISS) ] = NHM_DMND_PREFETCH|NHM_L3_MISS, 400 383 }, 401 384 } 402 385 }; ··· 408 391 { 409 392 [ C(L1D) ] = { 410 393 [ C(OP_READ) ] = { 411 - [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI */ 412 - [ C(RESULT_MISS) ] = 0x0140, /* L1D_CACHE_LD.I_STATE */ 394 + [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */ 395 + [ C(RESULT_MISS) ] = 0x0151, /* L1D.REPL */ 413 396 }, 414 397 [ C(OP_WRITE) ] = { 415 - [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI */ 416 - [ C(RESULT_MISS) ] = 0x0141, /* L1D_CACHE_ST.I_STATE */ 398 + [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES */ 399 + [ C(RESULT_MISS) ] = 0x0251, /* L1D.M_REPL */ 417 400 }, 418 401 [ C(OP_PREFETCH) ] = { 419 402 [ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */ ··· 950 933 951 934 cpuc = &__get_cpu_var(cpu_hw_events); 952 935 936 + /* 937 + * Some chipsets need to unmask the LVTPC in a particular spot 938 + * inside the nmi handler. As a result, the unmasking was pushed 939 + * into all the nmi handlers. 940 + * 941 + * This handler doesn't seem to have any issues with the unmasking 942 + * so it was left at the top. 943 + */ 944 + apic_write(APIC_LVTPC, APIC_DM_NMI); 945 + 953 946 intel_pmu_disable_all(); 954 947 handled = intel_pmu_drain_bts_buffer(); 955 948 status = intel_pmu_get_status(); ··· 1024 997 { 1025 998 struct hw_perf_event *hwc = &event->hw; 1026 999 unsigned int hw_event, bts_event; 1000 + 1001 + if (event->attr.freq) 1002 + return NULL; 1027 1003 1028 1004 hw_event = hwc->config & INTEL_ARCH_EVENT_MASK; 1029 1005 bts_event = x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS); ··· 1335 1305 * AJ106 could possibly be worked around by not allowing LBR 1336 1306 * usage from PEBS, including the fixup. 1337 1307 * AJ68 could possibly be worked around by always programming 1338 - * a pebs_event_reset[0] value and coping with the lost events. 1308 + * a pebs_event_reset[0] value and coping with the lost events. 1339 1309 * 1340 1310 * But taken together it might just make sense to not enable PEBS on 1341 1311 * these chips. ··· 1439 1409 x86_pmu.percore_constraints = intel_nehalem_percore_constraints; 1440 1410 x86_pmu.enable_all = intel_pmu_nhm_enable_all; 1441 1411 x86_pmu.extra_regs = intel_nehalem_extra_regs; 1412 + 1413 + if (ebx & 0x40) { 1414 + /* 1415 + * Erratum AAJ80 detected, we work it around by using 1416 + * the BR_MISP_EXEC.ANY event. This will over-count 1417 + * branch-misses, but it's still much better than the 1418 + * architectural event which is often completely bogus: 1419 + */ 1420 + intel_perfmon_event_map[PERF_COUNT_HW_BRANCH_MISSES] = 0x7f89; 1421 + 1422 + pr_cont("erratum AAJ80 worked around, "); 1423 + } 1442 1424 pr_cont("Nehalem events, "); 1443 1425 break; 1444 1426 ··· 1467 1425 1468 1426 case 37: /* 32 nm nehalem, "Clarkdale" */ 1469 1427 case 44: /* 32 nm nehalem, "Gulftown" */ 1428 + case 47: /* 32 nm Xeon E7 */ 1470 1429 memcpy(hw_cache_event_ids, westmere_hw_cache_event_ids, 1471 1430 sizeof(hw_cache_event_ids)); 1472 1431 memcpy(hw_cache_extra_regs, nehalem_hw_cache_extra_regs,
+14 -5
arch/x86/kernel/cpu/perf_event_p4.c
··· 947 947 if (!x86_perf_event_set_period(event)) 948 948 continue; 949 949 if (perf_event_overflow(event, 1, &data, regs)) 950 - p4_pmu_disable_event(event); 950 + x86_pmu_stop(event, 0); 951 951 } 952 952 953 - if (handled) { 954 - /* p4 quirk: unmask it again */ 955 - apic_write(APIC_LVTPC, apic_read(APIC_LVTPC) & ~APIC_LVT_MASKED); 953 + if (handled) 956 954 inc_irq_stat(apic_perf_irqs); 957 - } 955 + 956 + /* 957 + * When dealing with the unmasking of the LVTPC on P4 perf hw, it has 958 + * been observed that the OVF bit flag has to be cleared first _before_ 959 + * the LVTPC can be unmasked. 960 + * 961 + * The reason is the NMI line will continue to be asserted while the OVF 962 + * bit is set. This causes a second NMI to generate if the LVTPC is 963 + * unmasked before the OVF bit is cleared, leading to unknown NMI 964 + * messages. 965 + */ 966 + apic_write(APIC_LVTPC, APIC_DM_NMI); 958 967 959 968 return handled; 960 969 }
+1 -1
arch/x86/kernel/devicetree.c
··· 391 391 392 392 set_io_apic_irq_attr(&attr, idx, line, it->trigger, it->polarity); 393 393 394 - return io_apic_setup_irq_pin(*out_hwirq, cpu_to_node(0), &attr); 394 + return io_apic_setup_irq_pin_once(*out_hwirq, cpu_to_node(0), &attr); 395 395 } 396 396 397 397 static void __init ioapic_add_ofnode(struct device_node *np)
+3 -2
arch/x86/kernel/kprobes.c
··· 1183 1183 struct pt_regs *regs) 1184 1184 { 1185 1185 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 1186 + unsigned long flags; 1186 1187 1187 1188 /* This is possible if op is under delayed unoptimizing */ 1188 1189 if (kprobe_disabled(&op->kp)) 1189 1190 return; 1190 1191 1191 - preempt_disable(); 1192 + local_irq_save(flags); 1192 1193 if (kprobe_running()) { 1193 1194 kprobes_inc_nmissed_count(&op->kp); 1194 1195 } else { ··· 1208 1207 opt_pre_handler(&op->kp, regs); 1209 1208 __this_cpu_write(current_kprobe, NULL); 1210 1209 } 1211 - preempt_enable_no_resched(); 1210 + local_irq_restore(flags); 1212 1211 } 1213 1212 1214 1213 static int __kprobes copy_optimized_instructions(u8 *dest, u8 *src)
+8 -1
arch/x86/kernel/pci-gart_64.c
··· 81 81 #define AGPEXTERN 82 82 #endif 83 83 84 + /* GART can only remap to physical addresses < 1TB */ 85 + #define GART_MAX_PHYS_ADDR (1ULL << 40) 86 + 84 87 /* backdoor interface to AGP driver */ 85 88 AGPEXTERN int agp_memory_reserved; 86 89 AGPEXTERN __u32 *agp_gatt_table; ··· 215 212 size_t size, int dir, unsigned long align_mask) 216 213 { 217 214 unsigned long npages = iommu_num_pages(phys_mem, size, PAGE_SIZE); 218 - unsigned long iommu_page = alloc_iommu(dev, npages, align_mask); 215 + unsigned long iommu_page; 219 216 int i; 220 217 218 + if (unlikely(phys_mem + size > GART_MAX_PHYS_ADDR)) 219 + return bad_dma_addr; 220 + 221 + iommu_page = alloc_iommu(dev, npages, align_mask); 221 222 if (iommu_page == -1) { 222 223 if (!nonforced_iommu(dev, phys_mem, size)) 223 224 return phys_mem;
+26 -10
arch/x86/kernel/ptrace.c
··· 608 608 unsigned len, type; 609 609 struct perf_event *bp; 610 610 611 + if (ptrace_get_breakpoints(tsk) < 0) 612 + return -ESRCH; 613 + 611 614 data &= ~DR_CONTROL_RESERVED; 612 615 old_dr7 = ptrace_get_dr7(thread->ptrace_bps); 613 616 restore: ··· 658 655 } 659 656 goto restore; 660 657 } 658 + 659 + ptrace_put_breakpoints(tsk); 660 + 661 661 return ((orig_ret < 0) ? orig_ret : rc); 662 662 } 663 663 ··· 674 668 675 669 if (n < HBP_NUM) { 676 670 struct perf_event *bp; 671 + 672 + if (ptrace_get_breakpoints(tsk) < 0) 673 + return -ESRCH; 674 + 677 675 bp = thread->ptrace_bps[n]; 678 676 if (!bp) 679 - return 0; 680 - val = bp->hw.info.address; 677 + val = 0; 678 + else 679 + val = bp->hw.info.address; 680 + 681 + ptrace_put_breakpoints(tsk); 681 682 } else if (n == 6) { 682 683 val = thread->debugreg6; 683 684 } else if (n == 7) { ··· 699 686 struct perf_event *bp; 700 687 struct thread_struct *t = &tsk->thread; 701 688 struct perf_event_attr attr; 689 + int err = 0; 690 + 691 + if (ptrace_get_breakpoints(tsk) < 0) 692 + return -ESRCH; 702 693 703 694 if (!t->ptrace_bps[nr]) { 704 695 ptrace_breakpoint_init(&attr); ··· 726 709 * writing for the user. And anyway this is the previous 727 710 * behaviour. 728 711 */ 729 - if (IS_ERR(bp)) 730 - return PTR_ERR(bp); 712 + if (IS_ERR(bp)) { 713 + err = PTR_ERR(bp); 714 + goto put; 715 + } 731 716 732 717 t->ptrace_bps[nr] = bp; 733 718 } else { 734 - int err; 735 - 736 719 bp = t->ptrace_bps[nr]; 737 720 738 721 attr = bp->attr; 739 722 attr.bp_addr = addr; 740 723 err = modify_user_hw_breakpoint(bp, &attr); 741 - if (err) 742 - return err; 743 724 } 744 725 745 - 746 - return 0; 726 + put: 727 + ptrace_put_breakpoints(tsk); 728 + return err; 747 729 } 748 730 749 731 /*
+6 -6
arch/x86/kernel/reboot_32.S
··· 21 21 /* Get our own relocated address */ 22 22 call 1f 23 23 1: popl %ebx 24 - subl $1b, %ebx 24 + subl $(1b - r_base), %ebx 25 25 26 26 /* Compute the equivalent real-mode segment */ 27 27 movl %ebx, %ecx 28 28 shrl $4, %ecx 29 29 30 30 /* Patch post-real-mode segment jump */ 31 - movw dispatch_table(%ebx,%eax,2),%ax 32 - movw %ax, 101f(%ebx) 33 - movw %cx, 102f(%ebx) 31 + movw (dispatch_table - r_base)(%ebx,%eax,2),%ax 32 + movw %ax, (101f - r_base)(%ebx) 33 + movw %cx, (102f - r_base)(%ebx) 34 34 35 35 /* Set up the IDT for real mode. */ 36 - lidtl machine_real_restart_idt(%ebx) 36 + lidtl (machine_real_restart_idt - r_base)(%ebx) 37 37 38 38 /* 39 39 * Set up a GDT from which we can load segment descriptors for real 40 40 * mode. The GDT is not used in real mode; it is just needed here to 41 41 * prepare the descriptors. 42 42 */ 43 - lgdtl machine_real_restart_gdt(%ebx) 43 + lgdtl (machine_real_restart_gdt - r_base)(%ebx) 44 44 45 45 /* 46 46 * Load the data segment registers with 16-bit compatible values
-23
arch/x86/kernel/smpboot.c
··· 312 312 identify_secondary_cpu(c); 313 313 } 314 314 315 - static void __cpuinit check_cpu_siblings_on_same_node(int cpu1, int cpu2) 316 - { 317 - int node1 = early_cpu_to_node(cpu1); 318 - int node2 = early_cpu_to_node(cpu2); 319 - 320 - /* 321 - * Our CPU scheduler assumes all logical cpus in the same physical cpu 322 - * share the same node. But, buggy ACPI or NUMA emulation might assign 323 - * them to different node. Fix it. 324 - */ 325 - if (node1 != node2) { 326 - pr_warning("CPU %d in node %d and CPU %d in node %d are in the same physical CPU. forcing same node %d\n", 327 - cpu1, node1, cpu2, node2, node2); 328 - 329 - numa_remove_cpu(cpu1); 330 - numa_set_node(cpu1, node2); 331 - numa_add_cpu(cpu1); 332 - } 333 - } 334 - 335 315 static void __cpuinit link_thread_siblings(int cpu1, int cpu2) 336 316 { 337 317 cpumask_set_cpu(cpu1, cpu_sibling_mask(cpu2)); ··· 320 340 cpumask_set_cpu(cpu2, cpu_core_mask(cpu1)); 321 341 cpumask_set_cpu(cpu1, cpu_llc_shared_mask(cpu2)); 322 342 cpumask_set_cpu(cpu2, cpu_llc_shared_mask(cpu1)); 323 - check_cpu_siblings_on_same_node(cpu1, cpu2); 324 343 } 325 344 326 345 ··· 361 382 per_cpu(cpu_llc_id, cpu) == per_cpu(cpu_llc_id, i)) { 362 383 cpumask_set_cpu(i, cpu_llc_shared_mask(cpu)); 363 384 cpumask_set_cpu(cpu, cpu_llc_shared_mask(i)); 364 - check_cpu_siblings_on_same_node(cpu, i); 365 385 } 366 386 if (c->phys_proc_id == cpu_data(i).phys_proc_id) { 367 387 cpumask_set_cpu(i, cpu_core_mask(cpu)); 368 388 cpumask_set_cpu(cpu, cpu_core_mask(i)); 369 - check_cpu_siblings_on_same_node(cpu, i); 370 389 /* 371 390 * Does this new cpu bringup a new core? 372 391 */
+4
arch/x86/kernel/x86_init.c
··· 61 61 .banner = default_banner, 62 62 }, 63 63 64 + .mapping = { 65 + .pagetable_reserve = native_pagetable_reserve, 66 + }, 67 + 64 68 .paging = { 65 69 .pagetable_setup_start = native_pagetable_setup_start, 66 70 .pagetable_setup_done = native_pagetable_setup_done,
+22 -2
arch/x86/mm/init.c
··· 81 81 end, pgt_buf_start << PAGE_SHIFT, pgt_buf_top << PAGE_SHIFT); 82 82 } 83 83 84 + void __init native_pagetable_reserve(u64 start, u64 end) 85 + { 86 + memblock_x86_reserve_range(start, end, "PGTABLE"); 87 + } 88 + 84 89 struct map_range { 85 90 unsigned long start; 86 91 unsigned long end; ··· 277 272 278 273 __flush_tlb_all(); 279 274 275 + /* 276 + * Reserve the kernel pagetable pages we used (pgt_buf_start - 277 + * pgt_buf_end) and free the other ones (pgt_buf_end - pgt_buf_top) 278 + * so that they can be reused for other purposes. 279 + * 280 + * On native it just means calling memblock_x86_reserve_range, on Xen it 281 + * also means marking RW the pagetable pages that we allocated before 282 + * but that haven't been used. 283 + * 284 + * In fact on xen we mark RO the whole range pgt_buf_start - 285 + * pgt_buf_top, because we have to make sure that when 286 + * init_memory_mapping reaches the pagetable pages area, it maps 287 + * RO all the pagetable pages, including the ones that are beyond 288 + * pgt_buf_end at that time. 289 + */ 280 290 if (!after_bootmem && pgt_buf_end > pgt_buf_start) 281 - memblock_x86_reserve_range(pgt_buf_start << PAGE_SHIFT, 282 - pgt_buf_end << PAGE_SHIFT, "PGTABLE"); 291 + x86_init.mapping.pagetable_reserve(PFN_PHYS(pgt_buf_start), 292 + PFN_PHYS(pgt_buf_end)); 283 293 284 294 if (!after_bootmem) 285 295 early_memtest(start, end);
+17 -22
arch/x86/mm/numa.c
··· 213 213 return per_cpu(x86_cpu_to_node_map, cpu); 214 214 } 215 215 216 - struct cpumask __cpuinit *debug_cpumask_set_cpu(int cpu, int enable) 216 + void debug_cpumask_set_cpu(int cpu, int node, bool enable) 217 217 { 218 - int node = early_cpu_to_node(cpu); 219 218 struct cpumask *mask; 220 219 char buf[64]; 221 220 222 221 if (node == NUMA_NO_NODE) { 223 222 /* early_cpu_to_node() already emits a warning and trace */ 224 - return NULL; 223 + return; 225 224 } 226 225 mask = node_to_cpumask_map[node]; 227 226 if (!mask) { 228 227 pr_err("node_to_cpumask_map[%i] NULL\n", node); 229 228 dump_stack(); 230 - return NULL; 231 - } 232 - 233 - cpulist_scnprintf(buf, sizeof(buf), mask); 234 - printk(KERN_DEBUG "%s cpu %d node %d: mask now %s\n", 235 - enable ? "numa_add_cpu" : "numa_remove_cpu", 236 - cpu, node, buf); 237 - return mask; 238 - } 239 - 240 - # ifndef CONFIG_NUMA_EMU 241 - static void __cpuinit numa_set_cpumask(int cpu, int enable) 242 - { 243 - struct cpumask *mask; 244 - 245 - mask = debug_cpumask_set_cpu(cpu, enable); 246 - if (!mask) 247 229 return; 230 + } 248 231 249 232 if (enable) 250 233 cpumask_set_cpu(cpu, mask); 251 234 else 252 235 cpumask_clear_cpu(cpu, mask); 236 + 237 + cpulist_scnprintf(buf, sizeof(buf), mask); 238 + printk(KERN_DEBUG "%s cpu %d node %d: mask now %s\n", 239 + enable ? "numa_add_cpu" : "numa_remove_cpu", 240 + cpu, node, buf); 241 + return; 242 + } 243 + 244 + # ifndef CONFIG_NUMA_EMU 245 + static void __cpuinit numa_set_cpumask(int cpu, bool enable) 246 + { 247 + debug_cpumask_set_cpu(cpu, early_cpu_to_node(cpu), enable); 253 248 } 254 249 255 250 void __cpuinit numa_add_cpu(int cpu) 256 251 { 257 - numa_set_cpumask(cpu, 1); 252 + numa_set_cpumask(cpu, true); 258 253 } 259 254 260 255 void __cpuinit numa_remove_cpu(int cpu) 261 256 { 262 - numa_set_cpumask(cpu, 0); 257 + numa_set_cpumask(cpu, false); 263 258 } 264 259 # endif /* !CONFIG_NUMA_EMU */ 265 260
+1 -1
arch/x86/mm/numa_64.c
··· 306 306 bi->end = min(bi->end, high); 307 307 308 308 /* and there's no empty block */ 309 - if (bi->start == bi->end) { 309 + if (bi->start >= bi->end) { 310 310 numa_remove_memblk_from(i--, mi); 311 311 continue; 312 312 }
+6 -14
arch/x86/mm/numa_emulation.c
··· 454 454 cpumask_clear_cpu(cpu, node_to_cpumask_map[i]); 455 455 } 456 456 #else /* !CONFIG_DEBUG_PER_CPU_MAPS */ 457 - static void __cpuinit numa_set_cpumask(int cpu, int enable) 457 + static void __cpuinit numa_set_cpumask(int cpu, bool enable) 458 458 { 459 - struct cpumask *mask; 460 - int nid, physnid, i; 459 + int nid, physnid; 461 460 462 461 nid = early_cpu_to_node(cpu); 463 462 if (nid == NUMA_NO_NODE) { ··· 466 467 467 468 physnid = emu_nid_to_phys[nid]; 468 469 469 - for_each_online_node(i) { 470 + for_each_online_node(nid) { 470 471 if (emu_nid_to_phys[nid] != physnid) 471 472 continue; 472 473 473 - mask = debug_cpumask_set_cpu(cpu, enable); 474 - if (!mask) 475 - return; 476 - 477 - if (enable) 478 - cpumask_set_cpu(cpu, mask); 479 - else 480 - cpumask_clear_cpu(cpu, mask); 474 + debug_cpumask_set_cpu(cpu, nid, enable); 481 475 } 482 476 } 483 477 484 478 void __cpuinit numa_add_cpu(int cpu) 485 479 { 486 - numa_set_cpumask(cpu, 1); 480 + numa_set_cpumask(cpu, true); 487 481 } 488 482 489 483 void __cpuinit numa_remove_cpu(int cpu) 490 484 { 491 - numa_set_cpumask(cpu, 0); 485 + numa_set_cpumask(cpu, false); 492 486 } 493 487 #endif /* !CONFIG_DEBUG_PER_CPU_MAPS */
+3 -3
arch/x86/platform/ce4100/falconfalls.dts
··· 347 347 "pciclass0c03"; 348 348 349 349 reg = <0x16800 0x0 0x0 0x0 0x0>; 350 - interrupts = <22 3>; 350 + interrupts = <22 1>; 351 351 }; 352 352 353 353 usb@d,1 { ··· 357 357 "pciclass0c03"; 358 358 359 359 reg = <0x16900 0x0 0x0 0x0 0x0>; 360 - interrupts = <22 3>; 360 + interrupts = <22 1>; 361 361 }; 362 362 363 363 sata@e,0 { ··· 367 367 "pciclass0106"; 368 368 369 369 reg = <0x17000 0x0 0x0 0x0 0x0>; 370 - interrupts = <23 3>; 370 + interrupts = <23 1>; 371 371 }; 372 372 373 373 flash@f,0 {
+64 -30
arch/x86/platform/uv/tlb_uv.c
··· 699 699 struct mm_struct *mm, 700 700 unsigned long va, unsigned int cpu) 701 701 { 702 - int tcpu; 703 - int uvhub; 704 702 int locals = 0; 705 703 int remotes = 0; 706 704 int hubs = 0; 705 + int tcpu; 706 + int tpnode; 707 707 struct bau_desc *bau_desc; 708 708 struct cpumask *flush_mask; 709 709 struct ptc_stats *stat; 710 710 struct bau_control *bcp; 711 711 struct bau_control *tbcp; 712 + struct hub_and_pnode *hpp; 712 713 713 714 /* kernel was booted 'nobau' */ 714 715 if (nobau) ··· 751 750 bau_desc += UV_ITEMS_PER_DESCRIPTOR * bcp->uvhub_cpu; 752 751 bau_uvhubs_clear(&bau_desc->distribution, UV_DISTRIBUTION_SIZE); 753 752 754 - /* cpu statistics */ 755 753 for_each_cpu(tcpu, flush_mask) { 756 - uvhub = uv_cpu_to_blade_id(tcpu); 757 - bau_uvhub_set(uvhub, &bau_desc->distribution); 758 - if (uvhub == bcp->uvhub) 754 + /* 755 + * The distribution vector is a bit map of pnodes, relative 756 + * to the partition base pnode (and the partition base nasid 757 + * in the header). 758 + * Translate cpu to pnode and hub using an array stored 759 + * in local memory. 760 + */ 761 + hpp = &bcp->socket_master->target_hub_and_pnode[tcpu]; 762 + tpnode = hpp->pnode - bcp->partition_base_pnode; 763 + bau_uvhub_set(tpnode, &bau_desc->distribution); 764 + if (hpp->uvhub == bcp->uvhub) 759 765 locals++; 760 766 else 761 767 remotes++; ··· 863 855 * an interrupt, but causes an error message to be returned to 864 856 * the sender. 865 857 */ 866 - static void uv_enable_timeouts(void) 858 + static void __init uv_enable_timeouts(void) 867 859 { 868 860 int uvhub; 869 861 int nuvhubs; ··· 1334 1326 } 1335 1327 1336 1328 /* 1337 - * initialize the sending side's sending buffers 1329 + * Initialize the sending side's sending buffers. 1338 1330 */ 1339 1331 static void 1340 - uv_activation_descriptor_init(int node, int pnode) 1332 + uv_activation_descriptor_init(int node, int pnode, int base_pnode) 1341 1333 { 1342 1334 int i; 1343 1335 int cpu; ··· 1360 1352 n = pa >> uv_nshift; 1361 1353 m = pa & uv_mmask; 1362 1354 1355 + /* the 14-bit pnode */ 1363 1356 uv_write_global_mmr64(pnode, UVH_LB_BAU_SB_DESCRIPTOR_BASE, 1364 1357 (n << UV_DESC_BASE_PNODE_SHIFT | m)); 1365 - 1366 1358 /* 1367 - * initializing all 8 (UV_ITEMS_PER_DESCRIPTOR) descriptors for each 1359 + * Initializing all 8 (UV_ITEMS_PER_DESCRIPTOR) descriptors for each 1368 1360 * cpu even though we only use the first one; one descriptor can 1369 1361 * describe a broadcast to 256 uv hubs. 1370 1362 */ ··· 1373 1365 memset(bd2, 0, sizeof(struct bau_desc)); 1374 1366 bd2->header.sw_ack_flag = 1; 1375 1367 /* 1376 - * base_dest_nodeid is the nasid of the first uvhub 1377 - * in the partition. The bit map will indicate uvhub numbers, 1378 - * which are 0-N in a partition. Pnodes are unique system-wide. 1368 + * The base_dest_nasid set in the message header is the nasid 1369 + * of the first uvhub in the partition. The bit map will 1370 + * indicate destination pnode numbers relative to that base. 1371 + * They may not be consecutive if nasid striding is being used. 1379 1372 */ 1380 - bd2->header.base_dest_nodeid = UV_PNODE_TO_NASID(uv_partition_base_pnode); 1381 - bd2->header.dest_subnodeid = 0x10; /* the LB */ 1373 + bd2->header.base_dest_nasid = UV_PNODE_TO_NASID(base_pnode); 1374 + bd2->header.dest_subnodeid = UV_LB_SUBNODEID; 1382 1375 bd2->header.command = UV_NET_ENDPOINT_INTD; 1383 1376 bd2->header.int_both = 1; 1384 1377 /* ··· 1451 1442 /* 1452 1443 * Initialization of each UV hub's structures 1453 1444 */ 1454 - static void __init uv_init_uvhub(int uvhub, int vector) 1445 + static void __init uv_init_uvhub(int uvhub, int vector, int base_pnode) 1455 1446 { 1456 1447 int node; 1457 1448 int pnode; ··· 1459 1450 1460 1451 node = uvhub_to_first_node(uvhub); 1461 1452 pnode = uv_blade_to_pnode(uvhub); 1462 - uv_activation_descriptor_init(node, pnode); 1453 + uv_activation_descriptor_init(node, pnode, base_pnode); 1463 1454 uv_payload_queue_init(node, pnode); 1464 1455 /* 1465 - * the below initialization can't be in firmware because the 1466 - * messaging IRQ will be determined by the OS 1456 + * The below initialization can't be in firmware because the 1457 + * messaging IRQ will be determined by the OS. 1467 1458 */ 1468 1459 apicid = uvhub_to_first_apicid(uvhub) | uv_apicid_hibits; 1469 1460 uv_write_global_mmr64(pnode, UVH_BAU_DATA_CONFIG, ··· 1500 1491 /* 1501 1492 * initialize the bau_control structure for each cpu 1502 1493 */ 1503 - static int __init uv_init_per_cpu(int nuvhubs) 1494 + static int __init uv_init_per_cpu(int nuvhubs, int base_part_pnode) 1504 1495 { 1505 1496 int i; 1506 1497 int cpu; 1498 + int tcpu; 1507 1499 int pnode; 1508 1500 int uvhub; 1509 1501 int have_hmaster; ··· 1538 1528 bcp = &per_cpu(bau_control, cpu); 1539 1529 memset(bcp, 0, sizeof(struct bau_control)); 1540 1530 pnode = uv_cpu_hub_info(cpu)->pnode; 1531 + if ((pnode - base_part_pnode) >= UV_DISTRIBUTION_SIZE) { 1532 + printk(KERN_EMERG 1533 + "cpu %d pnode %d-%d beyond %d; BAU disabled\n", 1534 + cpu, pnode, base_part_pnode, 1535 + UV_DISTRIBUTION_SIZE); 1536 + return 1; 1537 + } 1538 + bcp->osnode = cpu_to_node(cpu); 1539 + bcp->partition_base_pnode = uv_partition_base_pnode; 1541 1540 uvhub = uv_cpu_hub_info(cpu)->numa_blade_id; 1542 1541 *(uvhub_mask + (uvhub/8)) |= (1 << (uvhub%8)); 1543 1542 bdp = &uvhub_descs[uvhub]; ··· 1555 1536 bdp->pnode = pnode; 1556 1537 /* kludge: 'assuming' one node per socket, and assuming that 1557 1538 disabling a socket just leaves a gap in node numbers */ 1558 - socket = (cpu_to_node(cpu) & 1); 1539 + socket = bcp->osnode & 1; 1559 1540 bdp->socket_mask |= (1 << socket); 1560 1541 sdp = &bdp->socket[socket]; 1561 1542 sdp->cpu_number[sdp->num_cpus] = cpu; ··· 1604 1585 nextsocket: 1605 1586 socket++; 1606 1587 socket_mask = (socket_mask >> 1); 1588 + /* each socket gets a local array of pnodes/hubs */ 1589 + bcp = smaster; 1590 + bcp->target_hub_and_pnode = kmalloc_node( 1591 + sizeof(struct hub_and_pnode) * 1592 + num_possible_cpus(), GFP_KERNEL, bcp->osnode); 1593 + memset(bcp->target_hub_and_pnode, 0, 1594 + sizeof(struct hub_and_pnode) * 1595 + num_possible_cpus()); 1596 + for_each_present_cpu(tcpu) { 1597 + bcp->target_hub_and_pnode[tcpu].pnode = 1598 + uv_cpu_hub_info(tcpu)->pnode; 1599 + bcp->target_hub_and_pnode[tcpu].uvhub = 1600 + uv_cpu_hub_info(tcpu)->numa_blade_id; 1601 + } 1607 1602 } 1608 1603 } 1609 1604 kfree(uvhub_descs); ··· 1670 1637 spin_lock_init(&disable_lock); 1671 1638 congested_cycles = microsec_2_cycles(congested_response_us); 1672 1639 1673 - if (uv_init_per_cpu(nuvhubs)) { 1640 + uv_partition_base_pnode = 0x7fffffff; 1641 + for (uvhub = 0; uvhub < nuvhubs; uvhub++) { 1642 + if (uv_blade_nr_possible_cpus(uvhub) && 1643 + (uv_blade_to_pnode(uvhub) < uv_partition_base_pnode)) 1644 + uv_partition_base_pnode = uv_blade_to_pnode(uvhub); 1645 + } 1646 + 1647 + if (uv_init_per_cpu(nuvhubs, uv_partition_base_pnode)) { 1674 1648 nobau = 1; 1675 1649 return 0; 1676 1650 } 1677 1651 1678 - uv_partition_base_pnode = 0x7fffffff; 1679 - for (uvhub = 0; uvhub < nuvhubs; uvhub++) 1680 - if (uv_blade_nr_possible_cpus(uvhub) && 1681 - (uv_blade_to_pnode(uvhub) < uv_partition_base_pnode)) 1682 - uv_partition_base_pnode = uv_blade_to_pnode(uvhub); 1683 - 1684 1652 vector = UV_BAU_MESSAGE; 1685 1653 for_each_possible_blade(uvhub) 1686 1654 if (uv_blade_nr_possible_cpus(uvhub)) 1687 - uv_init_uvhub(uvhub, vector); 1655 + uv_init_uvhub(uvhub, vector, uv_partition_base_pnode); 1688 1656 1689 1657 uv_enable_timeouts(); 1690 1658 alloc_intr_gate(vector, uv_bau_message_intr1);
+25 -5
arch/x86/xen/mmu.c
··· 1275 1275 { 1276 1276 } 1277 1277 1278 + static __init void xen_mapping_pagetable_reserve(u64 start, u64 end) 1279 + { 1280 + /* reserve the range used */ 1281 + native_pagetable_reserve(start, end); 1282 + 1283 + /* set as RW the rest */ 1284 + printk(KERN_DEBUG "xen: setting RW the range %llx - %llx\n", end, 1285 + PFN_PHYS(pgt_buf_top)); 1286 + while (end < PFN_PHYS(pgt_buf_top)) { 1287 + make_lowmem_page_readwrite(__va(end)); 1288 + end += PAGE_SIZE; 1289 + } 1290 + } 1291 + 1278 1292 static void xen_post_allocator_init(void); 1279 1293 1280 1294 static __init void xen_pagetable_setup_done(pgd_t *base) ··· 1487 1473 #endif 1488 1474 } 1489 1475 1476 + #ifdef CONFIG_X86_32 1490 1477 static __init pte_t mask_rw_pte(pte_t *ptep, pte_t pte) 1491 1478 { 1492 - unsigned long pfn = pte_pfn(pte); 1493 - 1494 - #ifdef CONFIG_X86_32 1495 1479 /* If there's an existing pte, then don't allow _PAGE_RW to be set */ 1496 1480 if (pte_val_ma(*ptep) & _PAGE_PRESENT) 1497 1481 pte = __pte_ma(((pte_val_ma(*ptep) & _PAGE_RW) | ~_PAGE_RW) & 1498 1482 pte_val_ma(pte)); 1499 - #endif 1483 + 1484 + return pte; 1485 + } 1486 + #else /* CONFIG_X86_64 */ 1487 + static __init pte_t mask_rw_pte(pte_t *ptep, pte_t pte) 1488 + { 1489 + unsigned long pfn = pte_pfn(pte); 1500 1490 1501 1491 /* 1502 1492 * If the new pfn is within the range of the newly allocated ··· 1509 1491 * it is RO. 1510 1492 */ 1511 1493 if (((!is_early_ioremap_ptep(ptep) && 1512 - pfn >= pgt_buf_start && pfn < pgt_buf_end)) || 1494 + pfn >= pgt_buf_start && pfn < pgt_buf_top)) || 1513 1495 (is_early_ioremap_ptep(ptep) && pfn != (pgt_buf_end - 1))) 1514 1496 pte = pte_wrprotect(pte); 1515 1497 1516 1498 return pte; 1517 1499 } 1500 + #endif /* CONFIG_X86_64 */ 1518 1501 1519 1502 /* Init-time set_pte while constructing initial pagetables, which 1520 1503 doesn't allow RO pagetable pages to be remapped RW */ ··· 2119 2100 2120 2101 void __init xen_init_mmu_ops(void) 2121 2102 { 2103 + x86_init.mapping.pagetable_reserve = xen_mapping_pagetable_reserve; 2122 2104 x86_init.paging.pagetable_setup_start = xen_pagetable_setup_start; 2123 2105 x86_init.paging.pagetable_setup_done = xen_pagetable_setup_done; 2124 2106 pv_mmu_ops = xen_mmu_ops;
+1 -1
arch/x86/xen/setup.c
··· 227 227 228 228 memcpy(map_raw, map, sizeof(map)); 229 229 e820.nr_map = 0; 230 - xen_extra_mem_start = mem_end; 230 + xen_extra_mem_start = max((1ULL << 32), mem_end); 231 231 for (i = 0; i < memmap.nr_entries; i++) { 232 232 unsigned long long end; 233 233
+6 -12
arch/xtensa/kernel/irq.c
··· 64 64 65 65 int arch_show_interrupts(struct seq_file *p, int prec) 66 66 { 67 - int j; 68 - 69 - seq_printf(p, "%*s: ", prec, "NMI"); 70 - for_each_online_cpu(j) 71 - seq_printf(p, "%10u ", nmi_count(j)); 72 - seq_putc(p, '\n'); 73 67 seq_printf(p, "%*s: ", prec, "ERR"); 74 68 seq_printf(p, "%10u\n", atomic_read(&irq_err_count)); 75 69 return 0; 76 70 } 77 71 78 - static void xtensa_irq_mask(struct irq_chip *d) 72 + static void xtensa_irq_mask(struct irq_data *d) 79 73 { 80 74 cached_irq_mask &= ~(1 << d->irq); 81 75 set_sr (cached_irq_mask, INTENABLE); 82 76 } 83 77 84 - static void xtensa_irq_unmask(struct irq_chip *d) 78 + static void xtensa_irq_unmask(struct irq_data *d) 85 79 { 86 80 cached_irq_mask |= 1 << d->irq; 87 81 set_sr (cached_irq_mask, INTENABLE); 88 82 } 89 83 90 - static void xtensa_irq_enable(struct irq_chip *d) 84 + static void xtensa_irq_enable(struct irq_data *d) 91 85 { 92 86 variant_irq_enable(d->irq); 93 87 xtensa_irq_unmask(d->irq); 94 88 } 95 89 96 - static void xtensa_irq_disable(struct irq_chip *d) 90 + static void xtensa_irq_disable(struct irq_data *d) 97 91 { 98 92 xtensa_irq_mask(d->irq); 99 93 variant_irq_disable(d->irq); 100 94 } 101 95 102 - static void xtensa_irq_ack(struct irq_chip *d) 96 + static void xtensa_irq_ack(struct irq_data *d) 103 97 { 104 98 set_sr(1 << d->irq, INTCLEAR); 105 99 } 106 100 107 - static int xtensa_irq_retrigger(struct irq_chip *d) 101 + static int xtensa_irq_retrigger(struct irq_data *d) 108 102 { 109 103 set_sr (1 << d->irq, INTSET); 110 104 return 1;
+7
block/blk-cgroup.c
··· 114 114 } 115 115 EXPORT_SYMBOL_GPL(cgroup_to_blkio_cgroup); 116 116 117 + struct blkio_cgroup *task_blkio_cgroup(struct task_struct *tsk) 118 + { 119 + return container_of(task_subsys_state(tsk, blkio_subsys_id), 120 + struct blkio_cgroup, css); 121 + } 122 + EXPORT_SYMBOL_GPL(task_blkio_cgroup); 123 + 117 124 static inline void 118 125 blkio_update_group_weight(struct blkio_group *blkg, unsigned int weight) 119 126 {
+3
block/blk-cgroup.h
··· 291 291 #if defined(CONFIG_BLK_CGROUP) || defined(CONFIG_BLK_CGROUP_MODULE) 292 292 extern struct blkio_cgroup blkio_root_cgroup; 293 293 extern struct blkio_cgroup *cgroup_to_blkio_cgroup(struct cgroup *cgroup); 294 + extern struct blkio_cgroup *task_blkio_cgroup(struct task_struct *tsk); 294 295 extern void blkiocg_add_blkio_group(struct blkio_cgroup *blkcg, 295 296 struct blkio_group *blkg, void *key, dev_t dev, 296 297 enum blkio_policy_id plid); ··· 315 314 struct cgroup; 316 315 static inline struct blkio_cgroup * 317 316 cgroup_to_blkio_cgroup(struct cgroup *cgroup) { return NULL; } 317 + static inline struct blkio_cgroup * 318 + task_blkio_cgroup(struct task_struct *tsk) { return NULL; } 318 319 319 320 static inline void blkiocg_add_blkio_group(struct blkio_cgroup *blkcg, 320 321 struct blkio_group *blkg, void *key, dev_t dev,
+5 -12
block/blk-core.c
··· 292 292 /** 293 293 * __blk_run_queue - run a single device queue 294 294 * @q: The queue to run 295 - * @force_kblockd: Don't run @q->request_fn directly. Use kblockd. 296 295 * 297 296 * Description: 298 297 * See @blk_run_queue. This variant must be called with the queue lock ··· 302 303 if (unlikely(blk_queue_stopped(q))) 303 304 return; 304 305 305 - /* 306 - * Only recurse once to avoid overrunning the stack, let the unplug 307 - * handling reinvoke the handler shortly if we already got there. 308 - */ 309 - if (!queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) { 310 - q->request_fn(q); 311 - queue_flag_clear(QUEUE_FLAG_REENTER, q); 312 - } else 313 - queue_delayed_work(kblockd_workqueue, &q->delay_work, 0); 306 + q->request_fn(q); 314 307 } 315 308 EXPORT_SYMBOL(__blk_run_queue); 316 309 ··· 316 325 */ 317 326 void blk_run_queue_async(struct request_queue *q) 318 327 { 319 - if (likely(!blk_queue_stopped(q))) 328 + if (likely(!blk_queue_stopped(q))) { 329 + __cancel_delayed_work(&q->delay_work); 320 330 queue_delayed_work(kblockd_workqueue, &q->delay_work, 0); 331 + } 321 332 } 333 + EXPORT_SYMBOL(blk_run_queue_async); 322 334 323 335 /** 324 336 * blk_run_queue - run a single device queue ··· 2781 2787 2782 2788 local_irq_restore(flags); 2783 2789 } 2784 - EXPORT_SYMBOL(blk_flush_plug_list); 2785 2790 2786 2791 void blk_finish_plug(struct blk_plug *plug) 2787 2792 {
+5 -3
block/blk-sysfs.c
··· 66 66 67 67 if (rl->count[BLK_RW_SYNC] >= q->nr_requests) { 68 68 blk_set_queue_full(q, BLK_RW_SYNC); 69 - } else if (rl->count[BLK_RW_SYNC]+1 <= q->nr_requests) { 69 + } else { 70 70 blk_clear_queue_full(q, BLK_RW_SYNC); 71 71 wake_up(&rl->wait[BLK_RW_SYNC]); 72 72 } 73 73 74 74 if (rl->count[BLK_RW_ASYNC] >= q->nr_requests) { 75 75 blk_set_queue_full(q, BLK_RW_ASYNC); 76 - } else if (rl->count[BLK_RW_ASYNC]+1 <= q->nr_requests) { 76 + } else { 77 77 blk_clear_queue_full(q, BLK_RW_ASYNC); 78 78 wake_up(&rl->wait[BLK_RW_ASYNC]); 79 79 } ··· 509 509 return ret; 510 510 511 511 ret = kobject_add(&q->kobj, kobject_get(&dev->kobj), "%s", "queue"); 512 - if (ret < 0) 512 + if (ret < 0) { 513 + blk_trace_remove_sysfs(dev); 513 514 return ret; 515 + } 514 516 515 517 kobject_uevent(&q->kobj, KOBJ_ADD); 516 518
+4 -5
block/blk-throttle.c
··· 160 160 } 161 161 162 162 static struct throtl_grp * throtl_find_alloc_tg(struct throtl_data *td, 163 - struct cgroup *cgroup) 163 + struct blkio_cgroup *blkcg) 164 164 { 165 - struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup); 166 165 struct throtl_grp *tg = NULL; 167 166 void *key = td; 168 167 struct backing_dev_info *bdi = &td->queue->backing_dev_info; ··· 228 229 229 230 static struct throtl_grp * throtl_get_tg(struct throtl_data *td) 230 231 { 231 - struct cgroup *cgroup; 232 232 struct throtl_grp *tg = NULL; 233 + struct blkio_cgroup *blkcg; 233 234 234 235 rcu_read_lock(); 235 - cgroup = task_cgroup(current, blkio_subsys_id); 236 - tg = throtl_find_alloc_tg(td, cgroup); 236 + blkcg = task_blkio_cgroup(current); 237 + tg = throtl_find_alloc_tg(td, blkcg); 237 238 if (!tg) 238 239 tg = &td->root_tg; 239 240 rcu_read_unlock();
-1
block/blk.h
··· 22 22 void blk_delete_timer(struct request *); 23 23 void blk_add_timer(struct request *); 24 24 void __generic_unplug_device(struct request_queue *); 25 - void blk_run_queue_async(struct request_queue *q); 26 25 27 26 /* 28 27 * Internal atomic flags for request handling
+13 -22
block/cfq-iosched.c
··· 1014 1014 cfqg->needs_update = true; 1015 1015 } 1016 1016 1017 - static struct cfq_group * 1018 - cfq_find_alloc_cfqg(struct cfq_data *cfqd, struct cgroup *cgroup, int create) 1017 + static struct cfq_group * cfq_find_alloc_cfqg(struct cfq_data *cfqd, 1018 + struct blkio_cgroup *blkcg, int create) 1019 1019 { 1020 - struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup); 1021 1020 struct cfq_group *cfqg = NULL; 1022 1021 void *key = cfqd; 1023 1022 int i, j; ··· 1078 1079 */ 1079 1080 static struct cfq_group *cfq_get_cfqg(struct cfq_data *cfqd, int create) 1080 1081 { 1081 - struct cgroup *cgroup; 1082 + struct blkio_cgroup *blkcg; 1082 1083 struct cfq_group *cfqg = NULL; 1083 1084 1084 1085 rcu_read_lock(); 1085 - cgroup = task_cgroup(current, blkio_subsys_id); 1086 - cfqg = cfq_find_alloc_cfqg(cfqd, cgroup, create); 1086 + blkcg = task_blkio_cgroup(current); 1087 + cfqg = cfq_find_alloc_cfqg(cfqd, blkcg, create); 1087 1088 if (!cfqg && create) 1088 1089 cfqg = &cfqd->root_group; 1089 1090 rcu_read_unlock(); ··· 2581 2582 } 2582 2583 2583 2584 /* 2584 - * Must always be called with the rcu_read_lock() held 2585 - */ 2586 - static void 2587 - __call_for_each_cic(struct io_context *ioc, 2588 - void (*func)(struct io_context *, struct cfq_io_context *)) 2589 - { 2590 - struct cfq_io_context *cic; 2591 - struct hlist_node *n; 2592 - 2593 - hlist_for_each_entry_rcu(cic, n, &ioc->cic_list, cic_list) 2594 - func(ioc, cic); 2595 - } 2596 - 2597 - /* 2598 2585 * Call func for each cic attached to this ioc. 2599 2586 */ 2600 2587 static void 2601 2588 call_for_each_cic(struct io_context *ioc, 2602 2589 void (*func)(struct io_context *, struct cfq_io_context *)) 2603 2590 { 2591 + struct cfq_io_context *cic; 2592 + struct hlist_node *n; 2593 + 2604 2594 rcu_read_lock(); 2605 - __call_for_each_cic(ioc, func); 2595 + 2596 + hlist_for_each_entry_rcu(cic, n, &ioc->cic_list, cic_list) 2597 + func(ioc, cic); 2598 + 2606 2599 rcu_read_unlock(); 2607 2600 } 2608 2601 ··· 2655 2664 * should be ok to iterate over the known list, we will see all cic's 2656 2665 * since no new ones are added. 2657 2666 */ 2658 - __call_for_each_cic(ioc, cic_free_func); 2667 + call_for_each_cic(ioc, cic_free_func); 2659 2668 } 2660 2669 2661 2670 static void cfq_put_cooperator(struct cfq_queue *cfqq)
+2 -1
block/elevator.c
··· 666 666 q->boundary_rq = rq; 667 667 } 668 668 } else if (!(rq->cmd_flags & REQ_ELVPRIV) && 669 - where == ELEVATOR_INSERT_SORT) 669 + (where == ELEVATOR_INSERT_SORT || 670 + where == ELEVATOR_INSERT_SORT_MERGE)) 670 671 where = ELEVATOR_INSERT_BACK; 671 672 672 673 switch (where) {
+6 -2
block/genhd.c
··· 1588 1588 1589 1589 spin_unlock_irq(&ev->lock); 1590 1590 1591 - /* tell userland about new events */ 1591 + /* 1592 + * Tell userland about new events. Only the events listed in 1593 + * @disk->events are reported. Unlisted events are processed the 1594 + * same internally but never get reported to userland. 1595 + */ 1592 1596 for (i = 0; i < ARRAY_SIZE(disk_uevents); i++) 1593 - if (events & (1 << i)) 1597 + if (events & disk->events & (1 << i)) 1594 1598 envp[nr_events++] = disk_uevents[i]; 1595 1599 1596 1600 if (nr_events)
+4
drivers/acpi/scan.c
··· 943 943 if (ACPI_SUCCESS(status)) 944 944 device->flags.lockable = 1; 945 945 946 + /* Power resources cannot be power manageable. */ 947 + if (device->device_type == ACPI_BUS_TYPE_POWER) 948 + return 0; 949 + 946 950 /* Presence of _PS0|_PR0 indicates 'power manageable' */ 947 951 status = acpi_get_handle(device->handle, "_PS0", &temp); 948 952 if (ACPI_FAILURE(status))
+7 -1
drivers/ata/ahci.c
··· 150 150 { 151 151 AHCI_HFLAGS (AHCI_HFLAG_NO_FPDMA_AA | AHCI_HFLAG_NO_PMP | 152 152 AHCI_HFLAG_YES_NCQ), 153 - .flags = AHCI_FLAG_COMMON, 153 + .flags = AHCI_FLAG_COMMON | ATA_FLAG_NO_DIPM, 154 154 .pio_mask = ATA_PIO4, 155 155 .udma_mask = ATA_UDMA6, 156 156 .port_ops = &ahci_ops, ··· 261 261 { PCI_VDEVICE(INTEL, 0x1d06), board_ahci }, /* PBG RAID */ 262 262 { PCI_VDEVICE(INTEL, 0x2826), board_ahci }, /* PBG RAID */ 263 263 { PCI_VDEVICE(INTEL, 0x2323), board_ahci }, /* DH89xxCC AHCI */ 264 + { PCI_VDEVICE(INTEL, 0x1e02), board_ahci }, /* Panther Point AHCI */ 265 + { PCI_VDEVICE(INTEL, 0x1e03), board_ahci }, /* Panther Point AHCI */ 266 + { PCI_VDEVICE(INTEL, 0x1e04), board_ahci }, /* Panther Point RAID */ 267 + { PCI_VDEVICE(INTEL, 0x1e05), board_ahci }, /* Panther Point RAID */ 268 + { PCI_VDEVICE(INTEL, 0x1e06), board_ahci }, /* Panther Point RAID */ 269 + { PCI_VDEVICE(INTEL, 0x1e07), board_ahci }, /* Panther Point RAID */ 264 270 265 271 /* JMicron 360/1/3/5/6, match class to avoid IDE function */ 266 272 { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
+4
drivers/ata/ahci.h
··· 229 229 EM_CTL_ALHD = (1 << 26), /* Activity LED */ 230 230 EM_CTL_XMT = (1 << 25), /* Transmit Only */ 231 231 EM_CTL_SMB = (1 << 24), /* Single Message Buffer */ 232 + EM_CTL_SGPIO = (1 << 19), /* SGPIO messages supported */ 233 + EM_CTL_SES = (1 << 18), /* SES-2 messages supported */ 234 + EM_CTL_SAFTE = (1 << 17), /* SAF-TE messages supported */ 235 + EM_CTL_LED = (1 << 16), /* LED messages supported */ 232 236 233 237 /* em message type */ 234 238 EM_MSG_TYPE_LED = (1 << 0), /* LED */
+8
drivers/ata/ata_piix.c
··· 309 309 { 0x8086, 0x1d00, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata }, 310 310 /* SATA Controller IDE (PBG) */ 311 311 { 0x8086, 0x1d08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, 312 + /* SATA Controller IDE (Panther Point) */ 313 + { 0x8086, 0x1e00, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata }, 314 + /* SATA Controller IDE (Panther Point) */ 315 + { 0x8086, 0x1e01, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata }, 316 + /* SATA Controller IDE (Panther Point) */ 317 + { 0x8086, 0x1e08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, 318 + /* SATA Controller IDE (Panther Point) */ 319 + { 0x8086, 0x1e09, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, 312 320 { } /* terminate list */ 313 321 }; 314 322
+37 -2
drivers/ata/libahci.c
··· 109 109 static ssize_t ahci_store_em_buffer(struct device *dev, 110 110 struct device_attribute *attr, 111 111 const char *buf, size_t size); 112 + static ssize_t ahci_show_em_supported(struct device *dev, 113 + struct device_attribute *attr, char *buf); 112 114 113 115 static DEVICE_ATTR(ahci_host_caps, S_IRUGO, ahci_show_host_caps, NULL); 114 116 static DEVICE_ATTR(ahci_host_cap2, S_IRUGO, ahci_show_host_cap2, NULL); ··· 118 116 static DEVICE_ATTR(ahci_port_cmd, S_IRUGO, ahci_show_port_cmd, NULL); 119 117 static DEVICE_ATTR(em_buffer, S_IWUSR | S_IRUGO, 120 118 ahci_read_em_buffer, ahci_store_em_buffer); 119 + static DEVICE_ATTR(em_message_supported, S_IRUGO, ahci_show_em_supported, NULL); 121 120 122 121 struct device_attribute *ahci_shost_attrs[] = { 123 122 &dev_attr_link_power_management_policy, ··· 129 126 &dev_attr_ahci_host_version, 130 127 &dev_attr_ahci_port_cmd, 131 128 &dev_attr_em_buffer, 129 + &dev_attr_em_message_supported, 132 130 NULL 133 131 }; 134 132 EXPORT_SYMBOL_GPL(ahci_shost_attrs); ··· 345 341 spin_unlock_irqrestore(ap->lock, flags); 346 342 347 343 return size; 344 + } 345 + 346 + static ssize_t ahci_show_em_supported(struct device *dev, 347 + struct device_attribute *attr, char *buf) 348 + { 349 + struct Scsi_Host *shost = class_to_shost(dev); 350 + struct ata_port *ap = ata_shost_to_port(shost); 351 + struct ahci_host_priv *hpriv = ap->host->private_data; 352 + void __iomem *mmio = hpriv->mmio; 353 + u32 em_ctl; 354 + 355 + em_ctl = readl(mmio + HOST_EM_CTL); 356 + 357 + return sprintf(buf, "%s%s%s%s\n", 358 + em_ctl & EM_CTL_LED ? "led " : "", 359 + em_ctl & EM_CTL_SAFTE ? "saf-te " : "", 360 + em_ctl & EM_CTL_SES ? "ses-2 " : "", 361 + em_ctl & EM_CTL_SGPIO ? "sgpio " : ""); 348 362 } 349 363 350 364 /** ··· 1919 1897 ahci_enable_fbs(ap); 1920 1898 1921 1899 pp->intr_mask |= PORT_IRQ_BAD_PMP; 1922 - writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK); 1900 + 1901 + /* 1902 + * We must not change the port interrupt mask register if the 1903 + * port is marked frozen, the value in pp->intr_mask will be 1904 + * restored later when the port is thawed. 1905 + * 1906 + * Note that during initialization, the port is marked as 1907 + * frozen since the irq handler is not yet registered. 1908 + */ 1909 + if (!(ap->pflags & ATA_PFLAG_FROZEN)) 1910 + writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK); 1923 1911 } 1924 1912 1925 1913 static void ahci_pmp_detach(struct ata_port *ap) ··· 1945 1913 writel(cmd, port_mmio + PORT_CMD); 1946 1914 1947 1915 pp->intr_mask &= ~PORT_IRQ_BAD_PMP; 1948 - writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK); 1916 + 1917 + /* see comment above in ahci_pmp_attach() */ 1918 + if (!(ap->pflags & ATA_PFLAG_FROZEN)) 1919 + writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK); 1949 1920 } 1950 1921 1951 1922 int ahci_port_resume(struct ata_port *ap)
+2 -1
drivers/ata/libata-core.c
··· 4139 4139 */ 4140 4140 { "PIONEER DVD-RW DVRTD08", "1.00", ATA_HORKAGE_NOSETXFER }, 4141 4141 { "PIONEER DVD-RW DVR-212D", "1.28", ATA_HORKAGE_NOSETXFER }, 4142 + { "PIONEER DVD-RW DVR-216D", "1.08", ATA_HORKAGE_NOSETXFER }, 4142 4143 4143 4144 /* End Marker */ 4144 4145 { } ··· 5481 5480 if (!ap) 5482 5481 return NULL; 5483 5482 5484 - ap->pflags |= ATA_PFLAG_INITIALIZING; 5483 + ap->pflags |= ATA_PFLAG_INITIALIZING | ATA_PFLAG_FROZEN; 5485 5484 ap->lock = &host->lock; 5486 5485 ap->print_id = -1; 5487 5486 ap->host = host;
+4 -2
drivers/ata/libata-eh.c
··· 3316 3316 struct ata_eh_context *ehc = &link->eh_context; 3317 3317 struct ata_device *dev, *link_dev = NULL, *lpm_dev = NULL; 3318 3318 enum ata_lpm_policy old_policy = link->lpm_policy; 3319 + bool no_dipm = link->ap->flags & ATA_FLAG_NO_DIPM; 3319 3320 unsigned int hints = ATA_LPM_EMPTY | ATA_LPM_HIPM; 3320 3321 unsigned int err_mask; 3321 3322 int rc; ··· 3333 3332 */ 3334 3333 ata_for_each_dev(dev, link, ENABLED) { 3335 3334 bool hipm = ata_id_has_hipm(dev->id); 3336 - bool dipm = ata_id_has_dipm(dev->id); 3335 + bool dipm = ata_id_has_dipm(dev->id) && !no_dipm; 3337 3336 3338 3337 /* find the first enabled and LPM enabled devices */ 3339 3338 if (!link_dev) ··· 3390 3389 3391 3390 /* host config updated, enable DIPM if transitioning to MIN_POWER */ 3392 3391 ata_for_each_dev(dev, link, ENABLED) { 3393 - if (policy == ATA_LPM_MIN_POWER && ata_id_has_dipm(dev->id)) { 3392 + if (policy == ATA_LPM_MIN_POWER && !no_dipm && 3393 + ata_id_has_dipm(dev->id)) { 3394 3394 err_mask = ata_dev_set_feature(dev, 3395 3395 SETFEATURES_SATA_ENABLE, SATA_DIPM); 3396 3396 if (err_mask && err_mask != AC_ERR_DEV) {
+19 -3
drivers/ata/pata_at91.c
··· 33 33 34 34 35 35 #define DRV_NAME "pata_at91" 36 - #define DRV_VERSION "0.1" 36 + #define DRV_VERSION "0.2" 37 37 38 38 #define CF_IDE_OFFSET 0x00c00000 39 39 #define CF_ALT_IDE_OFFSET 0x00e00000 40 40 #define CF_IDE_RES_SIZE 0x08 41 + #define NCS_RD_PULSE_LIMIT 0x3f /* maximal value for pulse bitfields */ 41 42 42 43 struct at91_ide_info { 43 44 unsigned long mode; ··· 50 49 void __iomem *alt_addr; 51 50 }; 52 51 53 - static const struct ata_timing initial_timing = 54 - {XFER_PIO_0, 70, 290, 240, 600, 165, 150, 600, 0}; 52 + static const struct ata_timing initial_timing = { 53 + .mode = XFER_PIO_0, 54 + .setup = 70, 55 + .act8b = 290, 56 + .rec8b = 240, 57 + .cyc8b = 600, 58 + .active = 165, 59 + .recover = 150, 60 + .dmack_hold = 0, 61 + .cycle = 600, 62 + .udma = 0 63 + }; 55 64 56 65 static unsigned long calc_mck_cycles(unsigned long ns, unsigned long mck_hz) 57 66 { ··· 120 109 /* (CS0, CS1, DIR, OE) <= (CFCE1, CFCE2, CFRNW, NCSX) timings */ 121 110 ncs_read_setup = 1; 122 111 ncs_read_pulse = read_cycle - 2; 112 + if (ncs_read_pulse > NCS_RD_PULSE_LIMIT) { 113 + ncs_read_pulse = NCS_RD_PULSE_LIMIT; 114 + dev_warn(dev, "ncs_read_pulse limited to maximal value %lu\n", 115 + ncs_read_pulse); 116 + } 123 117 124 118 /* Write timings same as read timings */ 125 119 write_cycle = read_cycle;
+5 -2
drivers/atm/fore200e.c
··· 2643 2643 } 2644 2644 2645 2645 #ifdef CONFIG_SBUS 2646 + static const struct of_device_id fore200e_sba_match[]; 2646 2647 static int __devinit fore200e_sba_probe(struct platform_device *op) 2647 2648 { 2649 + const struct of_device_id *match; 2648 2650 const struct fore200e_bus *bus; 2649 2651 struct fore200e *fore200e; 2650 2652 static int index = 0; 2651 2653 int err; 2652 2654 2653 - if (!op->dev.of_match) 2655 + match = of_match_device(fore200e_sba_match, &op->dev); 2656 + if (!match) 2654 2657 return -EINVAL; 2655 - bus = op->dev.of_match->data; 2658 + bus = match->data; 2656 2659 2657 2660 fore200e = kzalloc(sizeof(struct fore200e), GFP_KERNEL); 2658 2661 if (!fore200e)
+1
drivers/base/power/main.c
··· 63 63 dev->power.wakeup = NULL; 64 64 spin_lock_init(&dev->power.lock); 65 65 pm_runtime_init(dev); 66 + INIT_LIST_HEAD(&dev->power.entry); 66 67 } 67 68 68 69 /**
+1 -1
drivers/base/power/wakeup.c
··· 258 258 if (!!dev->power.can_wakeup == !!capable) 259 259 return; 260 260 261 - if (device_is_registered(dev)) { 261 + if (device_is_registered(dev) && !list_empty(&dev->power.entry)) { 262 262 if (capable) { 263 263 if (wakeup_sysfs_add(dev)) 264 264 return;
+2
drivers/base/syscore.c
··· 73 73 74 74 return ret; 75 75 } 76 + EXPORT_SYMBOL_GPL(syscore_suspend); 76 77 77 78 /** 78 79 * syscore_resume - Execute all the registered system core resume callbacks. ··· 96 95 "Interrupts enabled after %pF\n", ops->resume); 97 96 } 98 97 } 98 + EXPORT_SYMBOL_GPL(syscore_resume); 99 99 #endif /* CONFIG_PM_SLEEP */ 100 100 101 101 /**
-1
drivers/block/DAC960.c
··· 2547 2547 disk->major = MajorNumber; 2548 2548 disk->first_minor = n << DAC960_MaxPartitionsBits; 2549 2549 disk->fops = &DAC960_BlockDeviceOperations; 2550 - disk->events = DISK_EVENT_MEDIA_CHANGE; 2551 2550 } 2552 2551 /* 2553 2552 Indicate the Block Device Registration completed successfully,
-1
drivers/block/amiflop.c
··· 1736 1736 disk->major = FLOPPY_MAJOR; 1737 1737 disk->first_minor = drive; 1738 1738 disk->fops = &floppy_fops; 1739 - disk->events = DISK_EVENT_MEDIA_CHANGE; 1740 1739 sprintf(disk->disk_name, "fd%d", drive); 1741 1740 disk->private_data = &unit[drive]; 1742 1741 set_capacity(disk, 880*2);
-1
drivers/block/ataflop.c
··· 1964 1964 unit[i].disk->first_minor = i; 1965 1965 sprintf(unit[i].disk->disk_name, "fd%d", i); 1966 1966 unit[i].disk->fops = &floppy_fops; 1967 - unit[i].disk->events = DISK_EVENT_MEDIA_CHANGE; 1968 1967 unit[i].disk->private_data = &unit[i]; 1969 1968 unit[i].disk->queue = blk_init_queue(do_fd_request, 1970 1969 &ataflop_lock);
-1
drivers/block/floppy.c
··· 4205 4205 disks[dr]->major = FLOPPY_MAJOR; 4206 4206 disks[dr]->first_minor = TOMINOR(dr); 4207 4207 disks[dr]->fops = &floppy_fops; 4208 - disks[dr]->events = DISK_EVENT_MEDIA_CHANGE; 4209 4208 sprintf(disks[dr]->disk_name, "fd%d", dr); 4210 4209 4211 4210 init_timer(&motor_off_timer[dr]);
-1
drivers/block/paride/pd.c
··· 837 837 p->fops = &pd_fops; 838 838 p->major = major; 839 839 p->first_minor = (disk - pd) << PD_BITS; 840 - p->events = DISK_EVENT_MEDIA_CHANGE; 841 840 disk->gd = p; 842 841 p->private_data = disk; 843 842 p->queue = pd_queue;
-1
drivers/block/paride/pf.c
··· 294 294 disk->first_minor = unit; 295 295 strcpy(disk->disk_name, pf->name); 296 296 disk->fops = &pf_fops; 297 - disk->events = DISK_EVENT_MEDIA_CHANGE; 298 297 if (!(*drives[unit])[D_PRT]) 299 298 pf_drive_count++; 300 299 }
+158 -23
drivers/block/rbd.c
··· 92 92 struct list_head node; 93 93 }; 94 94 95 + struct rbd_req_coll; 96 + 95 97 /* 96 98 * a single io request 97 99 */ ··· 102 100 struct bio *bio; /* cloned bio */ 103 101 struct page **pages; /* list of used pages */ 104 102 u64 len; 103 + int coll_index; 104 + struct rbd_req_coll *coll; 105 + }; 106 + 107 + struct rbd_req_status { 108 + int done; 109 + int rc; 110 + u64 bytes; 111 + }; 112 + 113 + /* 114 + * a collection of requests 115 + */ 116 + struct rbd_req_coll { 117 + int total; 118 + int num_done; 119 + struct kref kref; 120 + struct rbd_req_status status[0]; 105 121 }; 106 122 107 123 struct rbd_snap { ··· 436 416 rbd_dev->client = NULL; 437 417 } 438 418 419 + /* 420 + * Destroy requests collection 421 + */ 422 + static void rbd_coll_release(struct kref *kref) 423 + { 424 + struct rbd_req_coll *coll = 425 + container_of(kref, struct rbd_req_coll, kref); 426 + 427 + dout("rbd_coll_release %p\n", coll); 428 + kfree(coll); 429 + } 439 430 440 431 /* 441 432 * Create a new header structure, translate header format from the on-disk ··· 621 590 return len; 622 591 } 623 592 593 + static int rbd_get_num_segments(struct rbd_image_header *header, 594 + u64 ofs, u64 len) 595 + { 596 + u64 start_seg = ofs >> header->obj_order; 597 + u64 end_seg = (ofs + len - 1) >> header->obj_order; 598 + return end_seg - start_seg + 1; 599 + } 600 + 624 601 /* 625 602 * bio helpers 626 603 */ ··· 774 735 kfree(ops); 775 736 } 776 737 738 + static void rbd_coll_end_req_index(struct request *rq, 739 + struct rbd_req_coll *coll, 740 + int index, 741 + int ret, u64 len) 742 + { 743 + struct request_queue *q; 744 + int min, max, i; 745 + 746 + dout("rbd_coll_end_req_index %p index %d ret %d len %lld\n", 747 + coll, index, ret, len); 748 + 749 + if (!rq) 750 + return; 751 + 752 + if (!coll) { 753 + blk_end_request(rq, ret, len); 754 + return; 755 + } 756 + 757 + q = rq->q; 758 + 759 + spin_lock_irq(q->queue_lock); 760 + coll->status[index].done = 1; 761 + coll->status[index].rc = ret; 762 + coll->status[index].bytes = len; 763 + max = min = coll->num_done; 764 + while (max < coll->total && coll->status[max].done) 765 + max++; 766 + 767 + for (i = min; i<max; i++) { 768 + __blk_end_request(rq, coll->status[i].rc, 769 + coll->status[i].bytes); 770 + coll->num_done++; 771 + kref_put(&coll->kref, rbd_coll_release); 772 + } 773 + spin_unlock_irq(q->queue_lock); 774 + } 775 + 776 + static void rbd_coll_end_req(struct rbd_request *req, 777 + int ret, u64 len) 778 + { 779 + rbd_coll_end_req_index(req->rq, req->coll, req->coll_index, ret, len); 780 + } 781 + 777 782 /* 778 783 * Send ceph osd request 779 784 */ ··· 832 749 int flags, 833 750 struct ceph_osd_req_op *ops, 834 751 int num_reply, 752 + struct rbd_req_coll *coll, 753 + int coll_index, 835 754 void (*rbd_cb)(struct ceph_osd_request *req, 836 755 struct ceph_msg *msg), 837 756 struct ceph_osd_request **linger_req, ··· 848 763 struct ceph_osd_request_head *reqhead; 849 764 struct rbd_image_header *header = &dev->header; 850 765 851 - ret = -ENOMEM; 852 766 req_data = kzalloc(sizeof(*req_data), GFP_NOIO); 853 - if (!req_data) 854 - goto done; 767 + if (!req_data) { 768 + if (coll) 769 + rbd_coll_end_req_index(rq, coll, coll_index, 770 + -ENOMEM, len); 771 + return -ENOMEM; 772 + } 855 773 856 - dout("rbd_do_request len=%lld ofs=%lld\n", len, ofs); 774 + if (coll) { 775 + req_data->coll = coll; 776 + req_data->coll_index = coll_index; 777 + } 778 + 779 + dout("rbd_do_request obj=%s ofs=%lld len=%lld\n", obj, len, ofs); 857 780 858 781 down_read(&header->snap_rwsem); 859 782 ··· 870 777 ops, 871 778 false, 872 779 GFP_NOIO, pages, bio); 873 - if (IS_ERR(req)) { 780 + if (!req) { 874 781 up_read(&header->snap_rwsem); 875 - ret = PTR_ERR(req); 782 + ret = -ENOMEM; 876 783 goto done_pages; 877 784 } 878 785 ··· 921 828 ret = ceph_osdc_wait_request(&dev->client->osdc, req); 922 829 if (ver) 923 830 *ver = le64_to_cpu(req->r_reassert_version.version); 924 - dout("reassert_ver=%lld\n", le64_to_cpu(req->r_reassert_version.version)); 831 + dout("reassert_ver=%lld\n", 832 + le64_to_cpu(req->r_reassert_version.version)); 925 833 ceph_osdc_put_request(req); 926 834 } 927 835 return ret; ··· 931 837 bio_chain_put(req_data->bio); 932 838 ceph_osdc_put_request(req); 933 839 done_pages: 840 + rbd_coll_end_req(req_data, ret, len); 934 841 kfree(req_data); 935 - done: 936 - if (rq) 937 - blk_end_request(rq, ret, len); 938 842 return ret; 939 843 } 940 844 ··· 966 874 bytes = req_data->len; 967 875 } 968 876 969 - blk_end_request(req_data->rq, rc, bytes); 877 + rbd_coll_end_req(req_data, rc, bytes); 970 878 971 879 if (req_data->bio) 972 880 bio_chain_put(req_data->bio); ··· 1026 934 flags, 1027 935 ops, 1028 936 2, 937 + NULL, 0, 1029 938 NULL, 1030 939 linger_req, ver); 1031 940 if (ret < 0) ··· 1052 959 u64 snapid, 1053 960 int opcode, int flags, int num_reply, 1054 961 u64 ofs, u64 len, 1055 - struct bio *bio) 962 + struct bio *bio, 963 + struct rbd_req_coll *coll, 964 + int coll_index) 1056 965 { 1057 966 char *seg_name; 1058 967 u64 seg_ofs; ··· 1090 995 flags, 1091 996 ops, 1092 997 num_reply, 998 + coll, coll_index, 1093 999 rbd_req_cb, 0, NULL); 1000 + 1001 + rbd_destroy_ops(ops); 1094 1002 done: 1095 1003 kfree(seg_name); 1096 1004 return ret; ··· 1106 1008 struct rbd_device *rbd_dev, 1107 1009 struct ceph_snap_context *snapc, 1108 1010 u64 ofs, u64 len, 1109 - struct bio *bio) 1011 + struct bio *bio, 1012 + struct rbd_req_coll *coll, 1013 + int coll_index) 1110 1014 { 1111 1015 return rbd_do_op(rq, rbd_dev, snapc, CEPH_NOSNAP, 1112 1016 CEPH_OSD_OP_WRITE, 1113 1017 CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK, 1114 1018 2, 1115 - ofs, len, bio); 1019 + ofs, len, bio, coll, coll_index); 1116 1020 } 1117 1021 1118 1022 /* ··· 1124 1024 struct rbd_device *rbd_dev, 1125 1025 u64 snapid, 1126 1026 u64 ofs, u64 len, 1127 - struct bio *bio) 1027 + struct bio *bio, 1028 + struct rbd_req_coll *coll, 1029 + int coll_index) 1128 1030 { 1129 1031 return rbd_do_op(rq, rbd_dev, NULL, 1130 1032 (snapid ? snapid : CEPH_NOSNAP), 1131 1033 CEPH_OSD_OP_READ, 1132 1034 CEPH_OSD_FLAG_READ, 1133 1035 2, 1134 - ofs, len, bio); 1036 + ofs, len, bio, coll, coll_index); 1135 1037 } 1136 1038 1137 1039 /* ··· 1165 1063 { 1166 1064 struct ceph_osd_req_op *ops; 1167 1065 struct page **pages = NULL; 1168 - int ret = rbd_create_rw_ops(&ops, 1, CEPH_OSD_OP_NOTIFY_ACK, 0); 1066 + int ret; 1067 + 1068 + ret = rbd_create_rw_ops(&ops, 1, CEPH_OSD_OP_NOTIFY_ACK, 0); 1169 1069 if (ret < 0) 1170 1070 return ret; 1171 1071 ··· 1181 1077 CEPH_OSD_FLAG_READ, 1182 1078 ops, 1183 1079 1, 1080 + NULL, 0, 1184 1081 rbd_simple_req_cb, 0, NULL); 1185 1082 1186 1083 rbd_destroy_ops(ops); ··· 1379 1274 return ret; 1380 1275 } 1381 1276 1277 + static struct rbd_req_coll *rbd_alloc_coll(int num_reqs) 1278 + { 1279 + struct rbd_req_coll *coll = 1280 + kzalloc(sizeof(struct rbd_req_coll) + 1281 + sizeof(struct rbd_req_status) * num_reqs, 1282 + GFP_ATOMIC); 1283 + 1284 + if (!coll) 1285 + return NULL; 1286 + coll->total = num_reqs; 1287 + kref_init(&coll->kref); 1288 + return coll; 1289 + } 1290 + 1382 1291 /* 1383 1292 * block device queue callback 1384 1293 */ ··· 1410 1291 bool do_write; 1411 1292 int size, op_size = 0; 1412 1293 u64 ofs; 1294 + int num_segs, cur_seg = 0; 1295 + struct rbd_req_coll *coll; 1413 1296 1414 1297 /* peek at request from block layer */ 1415 1298 if (!rq) ··· 1442 1321 do_write ? "write" : "read", 1443 1322 size, blk_rq_pos(rq) * 512ULL); 1444 1323 1324 + num_segs = rbd_get_num_segments(&rbd_dev->header, ofs, size); 1325 + coll = rbd_alloc_coll(num_segs); 1326 + if (!coll) { 1327 + spin_lock_irq(q->queue_lock); 1328 + __blk_end_request_all(rq, -ENOMEM); 1329 + goto next; 1330 + } 1331 + 1445 1332 do { 1446 1333 /* a bio clone to be passed down to OSD req */ 1447 1334 dout("rq->bio->bi_vcnt=%d\n", rq->bio->bi_vcnt); ··· 1457 1328 rbd_dev->header.block_name, 1458 1329 ofs, size, 1459 1330 NULL, NULL); 1331 + kref_get(&coll->kref); 1460 1332 bio = bio_chain_clone(&rq_bio, &next_bio, &bp, 1461 1333 op_size, GFP_ATOMIC); 1462 1334 if (!bio) { 1463 - spin_lock_irq(q->queue_lock); 1464 - __blk_end_request_all(rq, -ENOMEM); 1465 - goto next; 1335 + rbd_coll_end_req_index(rq, coll, cur_seg, 1336 + -ENOMEM, op_size); 1337 + goto next_seg; 1466 1338 } 1339 + 1467 1340 1468 1341 /* init OSD command: write or read */ 1469 1342 if (do_write) 1470 1343 rbd_req_write(rq, rbd_dev, 1471 1344 rbd_dev->header.snapc, 1472 1345 ofs, 1473 - op_size, bio); 1346 + op_size, bio, 1347 + coll, cur_seg); 1474 1348 else 1475 1349 rbd_req_read(rq, rbd_dev, 1476 1350 cur_snap_id(rbd_dev), 1477 1351 ofs, 1478 - op_size, bio); 1352 + op_size, bio, 1353 + coll, cur_seg); 1479 1354 1355 + next_seg: 1480 1356 size -= op_size; 1481 1357 ofs += op_size; 1482 1358 1359 + cur_seg++; 1483 1360 rq_bio = next_bio; 1484 1361 } while (size > 0); 1362 + kref_put(&coll->kref, rbd_coll_release); 1485 1363 1486 1364 if (bp) 1487 1365 bio_pair_release(bp); 1488 - 1489 1366 spin_lock_irq(q->queue_lock); 1490 1367 next: 1491 1368 rq = blk_fetch_request(q);
-1
drivers/block/swim.c
··· 858 858 swd->unit[drive].disk->first_minor = drive; 859 859 sprintf(swd->unit[drive].disk->disk_name, "fd%d", drive); 860 860 swd->unit[drive].disk->fops = &floppy_fops; 861 - swd->unit[drive].disk->events = DISK_EVENT_MEDIA_CHANGE; 862 861 swd->unit[drive].disk->private_data = &swd->unit[drive]; 863 862 swd->unit[drive].disk->queue = swd->queue; 864 863 set_capacity(swd->unit[drive].disk, 2880);
-1
drivers/block/swim3.c
··· 1163 1163 disk->major = FLOPPY_MAJOR; 1164 1164 disk->first_minor = i; 1165 1165 disk->fops = &floppy_fops; 1166 - disk->events = DISK_EVENT_MEDIA_CHANGE; 1167 1166 disk->private_data = &floppy_states[i]; 1168 1167 disk->queue = swim3_queue; 1169 1168 disk->flags |= GENHD_FL_REMOVABLE;
-1
drivers/block/ub.c
··· 2334 2334 disk->major = UB_MAJOR; 2335 2335 disk->first_minor = lun->id * UB_PARTS_PER_LUN; 2336 2336 disk->fops = &ub_bd_fops; 2337 - disk->events = DISK_EVENT_MEDIA_CHANGE; 2338 2337 disk->private_data = lun; 2339 2338 disk->driverfs_dev = &sc->intf->dev; 2340 2339
-1
drivers/block/xsysace.c
··· 1005 1005 ace->gd->major = ace_major; 1006 1006 ace->gd->first_minor = ace->id * ACE_NUM_MINORS; 1007 1007 ace->gd->fops = &ace_fops; 1008 - ace->gd->events = DISK_EVENT_MEDIA_CHANGE; 1009 1008 ace->gd->queue = ace->queue; 1010 1009 ace->gd->private_data = ace; 1011 1010 snprintf(ace->gd->disk_name, 32, "xs%c", ace->id + 'a');
-1
drivers/cdrom/gdrom.c
··· 803 803 goto probe_fail_cdrom_register; 804 804 } 805 805 gd.disk->fops = &gdrom_bdops; 806 - gd.disk->events = DISK_EVENT_MEDIA_CHANGE; 807 806 /* latch on to the interrupt */ 808 807 err = gdrom_set_interrupt_handlers(); 809 808 if (err)
+15 -4
drivers/char/agp/generic.c
··· 115 115 struct agp_memory *new; 116 116 unsigned long alloc_size = num_agp_pages*sizeof(struct page *); 117 117 118 + if (INT_MAX/sizeof(struct page *) < num_agp_pages) 119 + return NULL; 120 + 118 121 new = kzalloc(sizeof(struct agp_memory), GFP_KERNEL); 119 122 if (new == NULL) 120 123 return NULL; ··· 237 234 int scratch_pages; 238 235 struct agp_memory *new; 239 236 size_t i; 237 + int cur_memory; 240 238 241 239 if (!bridge) 242 240 return NULL; 243 241 244 - if ((atomic_read(&bridge->current_memory_agp) + page_count) > bridge->max_memory_agp) 242 + cur_memory = atomic_read(&bridge->current_memory_agp); 243 + if ((cur_memory + page_count > bridge->max_memory_agp) || 244 + (cur_memory + page_count < page_count)) 245 245 return NULL; 246 246 247 247 if (type >= AGP_USER_TYPES) { ··· 1095 1089 return -EINVAL; 1096 1090 } 1097 1091 1098 - /* AK: could wrap */ 1099 - if ((pg_start + mem->page_count) > num_entries) 1092 + if (((pg_start + mem->page_count) > num_entries) || 1093 + ((pg_start + mem->page_count) < pg_start)) 1100 1094 return -EINVAL; 1101 1095 1102 1096 j = pg_start; ··· 1130 1124 { 1131 1125 size_t i; 1132 1126 struct agp_bridge_data *bridge; 1133 - int mask_type; 1127 + int mask_type, num_entries; 1134 1128 1135 1129 bridge = mem->bridge; 1136 1130 if (!bridge) ··· 1140 1134 return 0; 1141 1135 1142 1136 if (type != mem->type) 1137 + return -EINVAL; 1138 + 1139 + num_entries = agp_num_entries(); 1140 + if (((pg_start + mem->page_count) > num_entries) || 1141 + ((pg_start + mem->page_count) < pg_start)) 1143 1142 return -EINVAL; 1144 1143 1145 1144 mask_type = bridge->driver->agp_type_to_mask_type(bridge, type);
+5 -2
drivers/char/hw_random/n2-drv.c
··· 619 619 pr_info("%s", version); 620 620 } 621 621 622 + static const struct of_device_id n2rng_match[]; 622 623 static int __devinit n2rng_probe(struct platform_device *op) 623 624 { 625 + const struct of_device_id *match; 624 626 int victoria_falls; 625 627 int err = -ENOMEM; 626 628 struct n2rng *np; 627 629 628 - if (!op->dev.of_match) 630 + match = of_match_device(n2rng_match, &op->dev); 631 + if (!match) 629 632 return -EINVAL; 630 - victoria_falls = (op->dev.of_match->data != NULL); 633 + victoria_falls = (match->data != NULL); 631 634 632 635 n2rng_driver_version(); 633 636 np = kzalloc(sizeof(*np), GFP_KERNEL);
+5 -2
drivers/char/ipmi/ipmi_si_intf.c
··· 2554 2554 }; 2555 2555 #endif /* CONFIG_PCI */ 2556 2556 2557 + static struct of_device_id ipmi_match[]; 2557 2558 static int __devinit ipmi_probe(struct platform_device *dev) 2558 2559 { 2559 2560 #ifdef CONFIG_OF 2561 + const struct of_device_id *match; 2560 2562 struct smi_info *info; 2561 2563 struct resource resource; 2562 2564 const __be32 *regsize, *regspacing, *regshift; ··· 2568 2566 2569 2567 dev_info(&dev->dev, "probing via device tree\n"); 2570 2568 2571 - if (!dev->dev.of_match) 2569 + match = of_match_device(ipmi_match, &dev->dev); 2570 + if (!match) 2572 2571 return -EINVAL; 2573 2572 2574 2573 ret = of_address_to_resource(np, 0, &resource); ··· 2604 2601 return -ENOMEM; 2605 2602 } 2606 2603 2607 - info->si_type = (enum si_type) dev->dev.of_match->data; 2604 + info->si_type = (enum si_type) match->data; 2608 2605 info->addr_source = SI_DEVICETREE; 2609 2606 info->irq_setup = std_irq_setup; 2610 2607
-11
drivers/char/virtio_console.c
··· 1280 1280 spin_lock_irq(&pdrvdata_lock); 1281 1281 list_del(&port->cons.list); 1282 1282 spin_unlock_irq(&pdrvdata_lock); 1283 - #if 0 1284 - /* 1285 - * hvc_remove() not called as removing one hvc port 1286 - * results in other hvc ports getting frozen. 1287 - * 1288 - * Once this is resolved in hvc, this functionality 1289 - * will be enabled. Till that is done, the -EPIPE 1290 - * return from get_chars() above will help 1291 - * hvc_console.c to clean up on ports we remove here. 1292 - */ 1293 1283 hvc_remove(port->cons.hvc); 1294 - #endif 1295 1284 } 1296 1285 1297 1286 /* Remove unused data this port might have received. */
+9 -5
drivers/char/xilinx_hwicap/xilinx_hwicap.c
··· 715 715 } 716 716 717 717 #ifdef CONFIG_OF 718 - static int __devinit hwicap_of_probe(struct platform_device *op) 718 + static int __devinit hwicap_of_probe(struct platform_device *op, 719 + const struct hwicap_driver_config *config) 719 720 { 720 721 struct resource res; 721 722 const unsigned int *id; 722 723 const char *family; 723 724 int rc; 724 - const struct hwicap_driver_config *config = op->dev.of_match->data; 725 725 const struct config_registers *regs; 726 726 727 727 ··· 751 751 regs); 752 752 } 753 753 #else 754 - static inline int hwicap_of_probe(struct platform_device *op) 754 + static inline int hwicap_of_probe(struct platform_device *op, 755 + const struct hwicap_driver_config *config) 755 756 { 756 757 return -EINVAL; 757 758 } 758 759 #endif /* CONFIG_OF */ 759 760 761 + static const struct of_device_id __devinitconst hwicap_of_match[]; 760 762 static int __devinit hwicap_drv_probe(struct platform_device *pdev) 761 763 { 764 + const struct of_device_id *match; 762 765 struct resource *res; 763 766 const struct config_registers *regs; 764 767 const char *family; 765 768 766 - if (pdev->dev.of_match) 767 - return hwicap_of_probe(pdev); 769 + match = of_match_device(hwicap_of_match, &pdev->dev); 770 + if (match) 771 + return hwicap_of_probe(pdev, match->data); 768 772 769 773 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 770 774 if (!res)
+9 -10
drivers/clk/clkdev.c
··· 32 32 * Then we take the most specific entry - with the following 33 33 * order of precedence: dev+con > dev only > con only. 34 34 */ 35 - static struct clk *clk_find(const char *dev_id, const char *con_id) 35 + static struct clk_lookup *clk_find(const char *dev_id, const char *con_id) 36 36 { 37 - struct clk_lookup *p; 38 - struct clk *clk = NULL; 37 + struct clk_lookup *p, *cl = NULL; 39 38 int match, best = 0; 40 39 41 40 list_for_each_entry(p, &clocks, node) { ··· 51 52 } 52 53 53 54 if (match > best) { 54 - clk = p->clk; 55 + cl = p; 55 56 if (match != 3) 56 57 best = match; 57 58 else 58 59 break; 59 60 } 60 61 } 61 - return clk; 62 + return cl; 62 63 } 63 64 64 65 struct clk *clk_get_sys(const char *dev_id, const char *con_id) 65 66 { 66 - struct clk *clk; 67 + struct clk_lookup *cl; 67 68 68 69 mutex_lock(&clocks_mutex); 69 - clk = clk_find(dev_id, con_id); 70 - if (clk && !__clk_get(clk)) 71 - clk = NULL; 70 + cl = clk_find(dev_id, con_id); 71 + if (cl && !__clk_get(cl->clk)) 72 + cl = NULL; 72 73 mutex_unlock(&clocks_mutex); 73 74 74 - return clk ? clk : ERR_PTR(-ENOENT); 75 + return cl ? cl->clk : ERR_PTR(-ENOENT); 75 76 } 76 77 EXPORT_SYMBOL(clk_get_sys); 77 78
+1
drivers/connector/connector.c
··· 142 142 cbq->callback(msg, nsp); 143 143 kfree_skb(skb); 144 144 cn_queue_release_callback(cbq); 145 + err = 0; 145 146 } 146 147 147 148 return err;
+78 -10
drivers/edac/amd64_edac.c
··· 211 211 212 212 scrubval = scrubval & 0x001F; 213 213 214 - amd64_debug("pci-read, sdram scrub control value: %d\n", scrubval); 215 - 216 214 for (i = 0; i < ARRAY_SIZE(scrubrates); i++) { 217 215 if (scrubrates[i].scrubval == scrubval) { 218 216 retval = scrubrates[i].bandwidth; ··· 931 933 /* On F10h and later ErrAddr is MC4_ADDR[47:1] */ 932 934 static u64 get_error_address(struct mce *m) 933 935 { 936 + struct cpuinfo_x86 *c = &boot_cpu_data; 937 + u64 addr; 934 938 u8 start_bit = 1; 935 939 u8 end_bit = 47; 936 940 937 - if (boot_cpu_data.x86 == 0xf) { 941 + if (c->x86 == 0xf) { 938 942 start_bit = 3; 939 943 end_bit = 39; 940 944 } 941 945 942 - return m->addr & GENMASK(start_bit, end_bit); 946 + addr = m->addr & GENMASK(start_bit, end_bit); 947 + 948 + /* 949 + * Erratum 637 workaround 950 + */ 951 + if (c->x86 == 0x15) { 952 + struct amd64_pvt *pvt; 953 + u64 cc6_base, tmp_addr; 954 + u32 tmp; 955 + u8 mce_nid, intlv_en; 956 + 957 + if ((addr & GENMASK(24, 47)) >> 24 != 0x00fdf7) 958 + return addr; 959 + 960 + mce_nid = amd_get_nb_id(m->extcpu); 961 + pvt = mcis[mce_nid]->pvt_info; 962 + 963 + amd64_read_pci_cfg(pvt->F1, DRAM_LOCAL_NODE_LIM, &tmp); 964 + intlv_en = tmp >> 21 & 0x7; 965 + 966 + /* add [47:27] + 3 trailing bits */ 967 + cc6_base = (tmp & GENMASK(0, 20)) << 3; 968 + 969 + /* reverse and add DramIntlvEn */ 970 + cc6_base |= intlv_en ^ 0x7; 971 + 972 + /* pin at [47:24] */ 973 + cc6_base <<= 24; 974 + 975 + if (!intlv_en) 976 + return cc6_base | (addr & GENMASK(0, 23)); 977 + 978 + amd64_read_pci_cfg(pvt->F1, DRAM_LOCAL_NODE_BASE, &tmp); 979 + 980 + /* faster log2 */ 981 + tmp_addr = (addr & GENMASK(12, 23)) << __fls(intlv_en + 1); 982 + 983 + /* OR DramIntlvSel into bits [14:12] */ 984 + tmp_addr |= (tmp & GENMASK(21, 23)) >> 9; 985 + 986 + /* add remaining [11:0] bits from original MC4_ADDR */ 987 + tmp_addr |= addr & GENMASK(0, 11); 988 + 989 + return cc6_base | tmp_addr; 990 + } 991 + 992 + return addr; 943 993 } 944 994 945 995 static void read_dram_base_limit_regs(struct amd64_pvt *pvt, unsigned range) 946 996 { 997 + struct cpuinfo_x86 *c = &boot_cpu_data; 947 998 int off = range << 3; 948 999 949 1000 amd64_read_pci_cfg(pvt->F1, DRAM_BASE_LO + off, &pvt->ranges[range].base.lo); 950 1001 amd64_read_pci_cfg(pvt->F1, DRAM_LIMIT_LO + off, &pvt->ranges[range].lim.lo); 951 1002 952 - if (boot_cpu_data.x86 == 0xf) 1003 + if (c->x86 == 0xf) 953 1004 return; 954 1005 955 1006 if (!dram_rw(pvt, range)) ··· 1006 959 1007 960 amd64_read_pci_cfg(pvt->F1, DRAM_BASE_HI + off, &pvt->ranges[range].base.hi); 1008 961 amd64_read_pci_cfg(pvt->F1, DRAM_LIMIT_HI + off, &pvt->ranges[range].lim.hi); 962 + 963 + /* Factor in CC6 save area by reading dst node's limit reg */ 964 + if (c->x86 == 0x15) { 965 + struct pci_dev *f1 = NULL; 966 + u8 nid = dram_dst_node(pvt, range); 967 + u32 llim; 968 + 969 + f1 = pci_get_domain_bus_and_slot(0, 0, PCI_DEVFN(0x18 + nid, 1)); 970 + if (WARN_ON(!f1)) 971 + return; 972 + 973 + amd64_read_pci_cfg(f1, DRAM_LOCAL_NODE_LIM, &llim); 974 + 975 + pvt->ranges[range].lim.lo &= GENMASK(0, 15); 976 + 977 + /* {[39:27],111b} */ 978 + pvt->ranges[range].lim.lo |= ((llim & 0x1fff) << 3 | 0x7) << 16; 979 + 980 + pvt->ranges[range].lim.hi &= GENMASK(0, 7); 981 + 982 + /* [47:40] */ 983 + pvt->ranges[range].lim.hi |= llim >> 13; 984 + 985 + pci_dev_put(f1); 986 + } 1009 987 } 1010 988 1011 989 static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr, ··· 1475 1403 return -EINVAL; 1476 1404 } 1477 1405 1478 - if (intlv_en && 1479 - (intlv_sel != ((sys_addr >> 12) & intlv_en))) { 1480 - amd64_warn("Botched intlv bits, en: 0x%x, sel: 0x%x\n", 1481 - intlv_en, intlv_sel); 1406 + if (intlv_en && (intlv_sel != ((sys_addr >> 12) & intlv_en))) 1482 1407 return -EINVAL; 1483 - } 1484 1408 1485 1409 sys_addr = f1x_swap_interleaved_region(pvt, sys_addr); 1486 1410
+3
drivers/edac/amd64_edac.h
··· 196 196 197 197 #define DCT_CFG_SEL 0x10C 198 198 199 + #define DRAM_LOCAL_NODE_BASE 0x120 200 + #define DRAM_LOCAL_NODE_LIM 0x124 201 + 199 202 #define DRAM_BASE_HI 0x140 200 203 #define DRAM_LIMIT_HI 0x144 201 204
+5 -6
drivers/edac/edac_mc_sysfs.c
··· 458 458 return -EINVAL; 459 459 460 460 new_bw = mci->set_sdram_scrub_rate(mci, bandwidth); 461 - if (new_bw >= 0) { 462 - edac_printk(KERN_DEBUG, EDAC_MC, "Scrub rate set to %d\n", new_bw); 463 - return count; 461 + if (new_bw < 0) { 462 + edac_printk(KERN_WARNING, EDAC_MC, 463 + "Error setting scrub rate to: %lu\n", bandwidth); 464 + return -EINVAL; 464 465 } 465 466 466 - edac_printk(KERN_DEBUG, EDAC_MC, "Error setting scrub rate to: %lu\n", bandwidth); 467 - return -EINVAL; 467 + return count; 468 468 } 469 469 470 470 /* ··· 483 483 return bandwidth; 484 484 } 485 485 486 - edac_printk(KERN_DEBUG, EDAC_MC, "Read scrub rate: %d\n", bandwidth); 487 486 return sprintf(data, "%d\n", bandwidth); 488 487 } 489 488
+1 -1
drivers/edac/ppc4xx_edac.c
··· 1019 1019 struct ppc4xx_edac_pdata *pdata = NULL; 1020 1020 const struct device_node *np = op->dev.of_node; 1021 1021 1022 - if (op->dev.of_match == NULL) 1022 + if (of_match_device(ppc4xx_edac_match, &op->dev) == NULL) 1023 1023 return -EINVAL; 1024 1024 1025 1025 /* Initial driver pointers and private data */
+27 -16
drivers/firewire/ohci.c
··· 2199 2199 { 2200 2200 struct fw_ohci *ohci; 2201 2201 unsigned long flags; 2202 - int ret = -EBUSY; 2203 2202 __be32 *next_config_rom; 2204 2203 dma_addr_t uninitialized_var(next_config_rom_bus); 2205 2204 ··· 2239 2240 2240 2241 spin_lock_irqsave(&ohci->lock, flags); 2241 2242 2243 + /* 2244 + * If there is not an already pending config_rom update, 2245 + * push our new allocation into the ohci->next_config_rom 2246 + * and then mark the local variable as null so that we 2247 + * won't deallocate the new buffer. 2248 + * 2249 + * OTOH, if there is a pending config_rom update, just 2250 + * use that buffer with the new config_rom data, and 2251 + * let this routine free the unused DMA allocation. 2252 + */ 2253 + 2242 2254 if (ohci->next_config_rom == NULL) { 2243 2255 ohci->next_config_rom = next_config_rom; 2244 2256 ohci->next_config_rom_bus = next_config_rom_bus; 2245 - 2246 - copy_config_rom(ohci->next_config_rom, config_rom, length); 2247 - 2248 - ohci->next_header = config_rom[0]; 2249 - ohci->next_config_rom[0] = 0; 2250 - 2251 - reg_write(ohci, OHCI1394_ConfigROMmap, 2252 - ohci->next_config_rom_bus); 2253 - ret = 0; 2257 + next_config_rom = NULL; 2254 2258 } 2255 2259 2260 + copy_config_rom(ohci->next_config_rom, config_rom, length); 2261 + 2262 + ohci->next_header = config_rom[0]; 2263 + ohci->next_config_rom[0] = 0; 2264 + 2265 + reg_write(ohci, OHCI1394_ConfigROMmap, ohci->next_config_rom_bus); 2266 + 2256 2267 spin_unlock_irqrestore(&ohci->lock, flags); 2268 + 2269 + /* If we didn't use the DMA allocation, delete it. */ 2270 + if (next_config_rom != NULL) 2271 + dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE, 2272 + next_config_rom, next_config_rom_bus); 2257 2273 2258 2274 /* 2259 2275 * Now initiate a bus reset to have the changes take ··· 2277 2263 * controller could need to access it before the bus reset 2278 2264 * takes effect. 2279 2265 */ 2280 - if (ret == 0) 2281 - fw_schedule_bus_reset(&ohci->card, true, true); 2282 - else 2283 - dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE, 2284 - next_config_rom, next_config_rom_bus); 2285 2266 2286 - return ret; 2267 + fw_schedule_bus_reset(&ohci->card, true, true); 2268 + 2269 + return 0; 2287 2270 } 2288 2271 2289 2272 static void ohci_send_request(struct fw_card *card, struct fw_packet *packet)
+1
drivers/gpu/drm/Kconfig
··· 24 24 depends on DRM 25 25 select FB 26 26 select FRAMEBUFFER_CONSOLE if !EXPERT 27 + select FRAMEBUFFER_CONSOLE_DETECT_PRIMARY if FRAMEBUFFER_CONSOLE 27 28 help 28 29 FB and CRTC helpers for KMS drivers. 29 30
+42 -11
drivers/gpu/drm/drm_fb_helper.c
··· 342 342 } 343 343 EXPORT_SYMBOL(drm_fb_helper_debug_leave); 344 344 345 + bool drm_fb_helper_restore_fbdev_mode(struct drm_fb_helper *fb_helper) 346 + { 347 + bool error = false; 348 + int i, ret; 349 + for (i = 0; i < fb_helper->crtc_count; i++) { 350 + struct drm_mode_set *mode_set = &fb_helper->crtc_info[i].mode_set; 351 + ret = drm_crtc_helper_set_config(mode_set); 352 + if (ret) 353 + error = true; 354 + } 355 + return error; 356 + } 357 + EXPORT_SYMBOL(drm_fb_helper_restore_fbdev_mode); 358 + 345 359 bool drm_fb_helper_force_kernel_mode(void) 346 360 { 347 - int i = 0; 348 361 bool ret, error = false; 349 362 struct drm_fb_helper *helper; 350 363 ··· 365 352 return false; 366 353 367 354 list_for_each_entry(helper, &kernel_fb_helper_list, kernel_fb_list) { 368 - for (i = 0; i < helper->crtc_count; i++) { 369 - struct drm_mode_set *mode_set = &helper->crtc_info[i].mode_set; 370 - ret = drm_crtc_helper_set_config(mode_set); 371 - if (ret) 372 - error = true; 373 - } 355 + if (helper->dev->switch_power_state == DRM_SWITCH_POWER_OFF) 356 + continue; 357 + 358 + ret = drm_fb_helper_restore_fbdev_mode(helper); 359 + if (ret) 360 + error = true; 374 361 } 375 362 return error; 376 363 } ··· 1516 1503 } 1517 1504 EXPORT_SYMBOL(drm_fb_helper_initial_config); 1518 1505 1519 - bool drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper) 1506 + /** 1507 + * drm_fb_helper_hotplug_event - respond to a hotplug notification by 1508 + * probing all the outputs attached to the fb. 1509 + * @fb_helper: the drm_fb_helper 1510 + * 1511 + * LOCKING: 1512 + * Called at runtime, must take mode config lock. 1513 + * 1514 + * Scan the connectors attached to the fb_helper and try to put together a 1515 + * setup after *notification of a change in output configuration. 1516 + * 1517 + * RETURNS: 1518 + * 0 on success and a non-zero error code otherwise. 1519 + */ 1520 + int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper) 1520 1521 { 1522 + struct drm_device *dev = fb_helper->dev; 1521 1523 int count = 0; 1522 1524 u32 max_width, max_height, bpp_sel; 1523 1525 bool bound = false, crtcs_bound = false; 1524 1526 struct drm_crtc *crtc; 1525 1527 1526 1528 if (!fb_helper->fb) 1527 - return false; 1529 + return 0; 1528 1530 1529 - list_for_each_entry(crtc, &fb_helper->dev->mode_config.crtc_list, head) { 1531 + mutex_lock(&dev->mode_config.mutex); 1532 + list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 1530 1533 if (crtc->fb) 1531 1534 crtcs_bound = true; 1532 1535 if (crtc->fb == fb_helper->fb) ··· 1551 1522 1552 1523 if (!bound && crtcs_bound) { 1553 1524 fb_helper->delayed_hotplug = true; 1554 - return false; 1525 + mutex_unlock(&dev->mode_config.mutex); 1526 + return 0; 1555 1527 } 1556 1528 DRM_DEBUG_KMS("\n"); 1557 1529 ··· 1563 1533 count = drm_fb_helper_probe_connector_modes(fb_helper, max_width, 1564 1534 max_height); 1565 1535 drm_setup_crtcs(fb_helper); 1536 + mutex_unlock(&dev->mode_config.mutex); 1566 1537 1567 1538 return drm_fb_helper_single_fb_probe(fb_helper, bpp_sel); 1568 1539 }
+23
drivers/gpu/drm/drm_irq.c
··· 932 932 933 933 void drm_vblank_off(struct drm_device *dev, int crtc) 934 934 { 935 + struct drm_pending_vblank_event *e, *t; 936 + struct timeval now; 935 937 unsigned long irqflags; 938 + unsigned int seq; 936 939 937 940 spin_lock_irqsave(&dev->vbl_lock, irqflags); 938 941 vblank_disable_and_save(dev, crtc); 939 942 DRM_WAKEUP(&dev->vbl_queue[crtc]); 943 + 944 + /* Send any queued vblank events, lest the natives grow disquiet */ 945 + seq = drm_vblank_count_and_time(dev, crtc, &now); 946 + list_for_each_entry_safe(e, t, &dev->vblank_event_list, base.link) { 947 + if (e->pipe != crtc) 948 + continue; 949 + DRM_DEBUG("Sending premature vblank event on disable: \ 950 + wanted %d, current %d\n", 951 + e->event.sequence, seq); 952 + 953 + e->event.sequence = seq; 954 + e->event.tv_sec = now.tv_sec; 955 + e->event.tv_usec = now.tv_usec; 956 + drm_vblank_put(dev, e->pipe); 957 + list_move_tail(&e->base.link, &e->base.file_priv->event_list); 958 + wake_up_interruptible(&e->base.file_priv->event_wait); 959 + trace_drm_vblank_event_delivered(e->base.pid, e->pipe, 960 + e->event.sequence); 961 + } 962 + 940 963 spin_unlock_irqrestore(&dev->vbl_lock, irqflags); 941 964 } 942 965 EXPORT_SYMBOL(drm_vblank_off);
+3 -3
drivers/gpu/drm/drm_mm.c
··· 431 431 void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new) 432 432 { 433 433 list_replace(&old->node_list, &new->node_list); 434 - list_replace(&old->node_list, &new->hole_stack); 434 + list_replace(&old->hole_stack, &new->hole_stack); 435 435 new->hole_follows = old->hole_follows; 436 436 new->mm = old->mm; 437 437 new->start = old->start; ··· 699 699 entry->size); 700 700 total_used += entry->size; 701 701 if (entry->hole_follows) { 702 - hole_start = drm_mm_hole_node_start(&mm->head_node); 703 - hole_end = drm_mm_hole_node_end(&mm->head_node); 702 + hole_start = drm_mm_hole_node_start(entry); 703 + hole_end = drm_mm_hole_node_end(entry); 704 704 hole_size = hole_end - hole_start; 705 705 seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: free\n", 706 706 hole_start, hole_end, hole_size);
+1 -1
drivers/gpu/drm/i915/i915_dma.c
··· 2207 2207 drm_i915_private_t *dev_priv = dev->dev_private; 2208 2208 2209 2209 if (!dev_priv || drm_core_check_feature(dev, DRIVER_MODESET)) { 2210 - drm_fb_helper_restore(); 2210 + intel_fb_restore_mode(dev); 2211 2211 vga_switcheroo_process_delayed_switch(); 2212 2212 return; 2213 2213 }
+1 -1
drivers/gpu/drm/i915/i915_drv.c
··· 49 49 unsigned int i915_powersave = 1; 50 50 module_param_named(powersave, i915_powersave, int, 0600); 51 51 52 - unsigned int i915_semaphores = 1; 52 + unsigned int i915_semaphores = 0; 53 53 module_param_named(semaphores, i915_semaphores, int, 0600); 54 54 55 55 unsigned int i915_enable_rc6 = 0;
+45 -36
drivers/gpu/drm/i915/intel_display.c
··· 3771 3771 int entries, tlb_miss; 3772 3772 3773 3773 crtc = intel_get_crtc_for_plane(dev, plane); 3774 - if (crtc->fb == NULL || !crtc->enabled) 3774 + if (crtc->fb == NULL || !crtc->enabled) { 3775 + *cursor_wm = cursor->guard_size; 3776 + *plane_wm = display->guard_size; 3775 3777 return false; 3778 + } 3776 3779 3777 3780 htotal = crtc->mode.htotal; 3778 3781 hdisplay = crtc->mode.hdisplay; ··· 5605 5602 intel_clock_t clock; 5606 5603 5607 5604 if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0) 5608 - fp = FP0(pipe); 5605 + fp = I915_READ(FP0(pipe)); 5609 5606 else 5610 - fp = FP1(pipe); 5607 + fp = I915_READ(FP1(pipe)); 5611 5608 5612 5609 clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT; 5613 5610 if (IS_PINEVIEW(dev)) { ··· 6218 6215 return ret; 6219 6216 } 6220 6217 6221 - static void intel_crtc_reset(struct drm_crtc *crtc) 6222 - { 6223 - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 6224 - 6225 - /* Reset flags back to the 'unknown' status so that they 6226 - * will be correctly set on the initial modeset. 6227 - */ 6228 - intel_crtc->dpms_mode = -1; 6229 - } 6230 - 6231 - static struct drm_crtc_helper_funcs intel_helper_funcs = { 6232 - .dpms = intel_crtc_dpms, 6233 - .mode_fixup = intel_crtc_mode_fixup, 6234 - .mode_set = intel_crtc_mode_set, 6235 - .mode_set_base = intel_pipe_set_base, 6236 - .mode_set_base_atomic = intel_pipe_set_base_atomic, 6237 - .load_lut = intel_crtc_load_lut, 6238 - .disable = intel_crtc_disable, 6239 - }; 6240 - 6241 - static const struct drm_crtc_funcs intel_crtc_funcs = { 6242 - .reset = intel_crtc_reset, 6243 - .cursor_set = intel_crtc_cursor_set, 6244 - .cursor_move = intel_crtc_cursor_move, 6245 - .gamma_set = intel_crtc_gamma_set, 6246 - .set_config = drm_crtc_helper_set_config, 6247 - .destroy = intel_crtc_destroy, 6248 - .page_flip = intel_crtc_page_flip, 6249 - }; 6250 - 6251 6218 static void intel_sanitize_modesetting(struct drm_device *dev, 6252 6219 int pipe, int plane) 6253 6220 { ··· 6253 6280 intel_disable_plane(dev_priv, plane, pipe); 6254 6281 intel_disable_pipe(dev_priv, pipe); 6255 6282 } 6283 + 6284 + static void intel_crtc_reset(struct drm_crtc *crtc) 6285 + { 6286 + struct drm_device *dev = crtc->dev; 6287 + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 6288 + 6289 + /* Reset flags back to the 'unknown' status so that they 6290 + * will be correctly set on the initial modeset. 6291 + */ 6292 + intel_crtc->dpms_mode = -1; 6293 + 6294 + /* We need to fix up any BIOS configuration that conflicts with 6295 + * our expectations. 6296 + */ 6297 + intel_sanitize_modesetting(dev, intel_crtc->pipe, intel_crtc->plane); 6298 + } 6299 + 6300 + static struct drm_crtc_helper_funcs intel_helper_funcs = { 6301 + .dpms = intel_crtc_dpms, 6302 + .mode_fixup = intel_crtc_mode_fixup, 6303 + .mode_set = intel_crtc_mode_set, 6304 + .mode_set_base = intel_pipe_set_base, 6305 + .mode_set_base_atomic = intel_pipe_set_base_atomic, 6306 + .load_lut = intel_crtc_load_lut, 6307 + .disable = intel_crtc_disable, 6308 + }; 6309 + 6310 + static const struct drm_crtc_funcs intel_crtc_funcs = { 6311 + .reset = intel_crtc_reset, 6312 + .cursor_set = intel_crtc_cursor_set, 6313 + .cursor_move = intel_crtc_cursor_move, 6314 + .gamma_set = intel_crtc_gamma_set, 6315 + .set_config = drm_crtc_helper_set_config, 6316 + .destroy = intel_crtc_destroy, 6317 + .page_flip = intel_crtc_page_flip, 6318 + }; 6256 6319 6257 6320 static void intel_crtc_init(struct drm_device *dev, int pipe) 6258 6321 { ··· 6339 6330 6340 6331 setup_timer(&intel_crtc->idle_timer, intel_crtc_idle_timer, 6341 6332 (unsigned long)intel_crtc); 6342 - 6343 - intel_sanitize_modesetting(dev, intel_crtc->pipe, intel_crtc->plane); 6344 6333 } 6345 6334 6346 6335 int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data, ··· 6579 6572 return ERR_PTR(-ENOENT); 6580 6573 6581 6574 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL); 6582 - if (!intel_fb) 6575 + if (!intel_fb) { 6576 + drm_gem_object_unreference_unlocked(&obj->base); 6583 6577 return ERR_PTR(-ENOMEM); 6578 + } 6584 6579 6585 6580 ret = intel_framebuffer_init(dev, intel_fb, mode_cmd, obj); 6586 6581 if (ret) {
+15 -2
drivers/gpu/drm/i915/intel_dp.c
··· 1470 1470 1471 1471 if (!HAS_PCH_CPT(dev) && 1472 1472 I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) { 1473 - struct intel_crtc *intel_crtc = to_intel_crtc(intel_dp->base.base.crtc); 1473 + struct drm_crtc *crtc = intel_dp->base.base.crtc; 1474 + 1474 1475 /* Hardware workaround: leaving our transcoder select 1475 1476 * set to transcoder B while it's off will prevent the 1476 1477 * corresponding HDMI output on transcoder A. ··· 1486 1485 /* Changes to enable or select take place the vblank 1487 1486 * after being written. 1488 1487 */ 1489 - intel_wait_for_vblank(dev, intel_crtc->pipe); 1488 + if (crtc == NULL) { 1489 + /* We can arrive here never having been attached 1490 + * to a CRTC, for instance, due to inheriting 1491 + * random state from the BIOS. 1492 + * 1493 + * If the pipe is not running, play safe and 1494 + * wait for the clocks to stabilise before 1495 + * continuing. 1496 + */ 1497 + POSTING_READ(intel_dp->output_reg); 1498 + msleep(50); 1499 + } else 1500 + intel_wait_for_vblank(dev, to_intel_crtc(crtc)->pipe); 1490 1501 } 1491 1502 1492 1503 I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
+1
drivers/gpu/drm/i915/intel_drv.h
··· 338 338 struct drm_file *file_priv); 339 339 340 340 extern void intel_fb_output_poll_changed(struct drm_device *dev); 341 + extern void intel_fb_restore_mode(struct drm_device *dev); 341 342 #endif /* __INTEL_DRV_H__ */
+10
drivers/gpu/drm/i915/intel_fb.c
··· 264 264 drm_i915_private_t *dev_priv = dev->dev_private; 265 265 drm_fb_helper_hotplug_event(&dev_priv->fbdev->helper); 266 266 } 267 + 268 + void intel_fb_restore_mode(struct drm_device *dev) 269 + { 270 + int ret; 271 + drm_i915_private_t *dev_priv = dev->dev_private; 272 + 273 + ret = drm_fb_helper_restore_fbdev_mode(&dev_priv->fbdev->helper); 274 + if (ret) 275 + DRM_DEBUG("failed to restore crtc mode\n"); 276 + }
+3
drivers/gpu/drm/i915/intel_lvds.c
··· 539 539 struct drm_device *dev = dev_priv->dev; 540 540 struct drm_connector *connector = dev_priv->int_lvds_connector; 541 541 542 + if (dev->switch_power_state != DRM_SWITCH_POWER_ON) 543 + return NOTIFY_OK; 544 + 542 545 /* 543 546 * check and update the status of LVDS connector after receiving 544 547 * the LID nofication event.
+5 -4
drivers/gpu/drm/i915/intel_tv.c
··· 1151 1151 (video_levels->blank << TV_BLANK_LEVEL_SHIFT))); 1152 1152 { 1153 1153 int pipeconf_reg = PIPECONF(pipe); 1154 - int dspcntr_reg = DSPCNTR(pipe); 1154 + int dspcntr_reg = DSPCNTR(intel_crtc->plane); 1155 1155 int pipeconf = I915_READ(pipeconf_reg); 1156 1156 int dspcntr = I915_READ(dspcntr_reg); 1157 - int dspbase_reg = DSPADDR(pipe); 1157 + int dspbase_reg = DSPADDR(intel_crtc->plane); 1158 1158 int xpos = 0x0, ypos = 0x0; 1159 1159 unsigned int xsize, ysize; 1160 1160 /* Pipe must be off here */ ··· 1378 1378 if (type < 0) 1379 1379 return connector_status_disconnected; 1380 1380 1381 + intel_tv->type = type; 1381 1382 intel_tv_find_better_format(connector); 1383 + 1382 1384 return connector_status_connected; 1383 1385 } 1384 1386 ··· 1672 1670 * 1673 1671 * More recent chipsets favour HDMI rather than integrated S-Video. 1674 1672 */ 1675 - connector->polled = 1676 - DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT; 1673 + connector->polled = DRM_CONNECTOR_POLL_CONNECT; 1677 1674 1678 1675 drm_connector_init(dev, connector, &intel_tv_connector_funcs, 1679 1676 DRM_MODE_CONNECTOR_SVIDEO);
+1 -1
drivers/gpu/drm/nouveau/nouveau_dma.c
··· 83 83 return ret; 84 84 85 85 /* NV_MEMORY_TO_MEMORY_FORMAT requires a notifier object */ 86 - ret = nouveau_notifier_alloc(chan, NvNotify0, 32, 0xfd0, 0x1000, 86 + ret = nouveau_notifier_alloc(chan, NvNotify0, 32, 0xfe0, 0x1000, 87 87 &chan->m2mf_ntfy); 88 88 if (ret) 89 89 return ret;
+3
drivers/gpu/drm/nouveau/nouveau_drv.h
··· 682 682 /* For PFIFO and PGRAPH. */ 683 683 spinlock_t context_switch_lock; 684 684 685 + /* VM/PRAMIN flush, legacy PRAMIN aperture */ 686 + spinlock_t vm_lock; 687 + 685 688 /* RAMIN configuration, RAMFC, RAMHT and RAMRO offsets */ 686 689 struct nouveau_ramht *ramht; 687 690 struct nouveau_gpuobj *ramfc;
+2 -2
drivers/gpu/drm/nouveau/nouveau_fbcon.c
··· 181 181 OUT_RING (chan, 0); 182 182 } 183 183 184 - nouveau_bo_wr32(chan->notifier_bo, chan->m2mf_ntfy + 3, 0xffffffff); 184 + nouveau_bo_wr32(chan->notifier_bo, chan->m2mf_ntfy/4 + 3, 0xffffffff); 185 185 FIRE_RING(chan); 186 186 mutex_unlock(&chan->mutex); 187 187 188 188 ret = -EBUSY; 189 189 for (i = 0; i < 100000; i++) { 190 - if (!nouveau_bo_rd32(chan->notifier_bo, chan->m2mf_ntfy + 3)) { 190 + if (!nouveau_bo_rd32(chan->notifier_bo, chan->m2mf_ntfy/4 + 3)) { 191 191 ret = 0; 192 192 break; 193 193 }
+1 -3
drivers/gpu/drm/nouveau/nouveau_mem.c
··· 152 152 { 153 153 struct drm_nouveau_private *dev_priv = dev->dev_private; 154 154 155 - nouveau_bo_ref(NULL, &dev_priv->vga_ram); 156 - 157 155 ttm_bo_device_release(&dev_priv->ttm.bdev); 158 156 159 157 nouveau_ttm_global_release(dev_priv); ··· 396 398 dma_bits = 40; 397 399 } else 398 400 if (drm_pci_device_is_pcie(dev) && 399 - dev_priv->chipset != 0x40 && 401 + dev_priv->chipset > 0x40 && 400 402 dev_priv->chipset != 0x45) { 401 403 if (pci_dma_supported(dev->pdev, DMA_BIT_MASK(39))) 402 404 dma_bits = 39;
+7 -4
drivers/gpu/drm/nouveau/nouveau_notifier.c
··· 35 35 { 36 36 struct drm_device *dev = chan->dev; 37 37 struct nouveau_bo *ntfy = NULL; 38 - uint32_t flags; 38 + uint32_t flags, ttmpl; 39 39 int ret; 40 40 41 - if (nouveau_vram_notify) 41 + if (nouveau_vram_notify) { 42 42 flags = NOUVEAU_GEM_DOMAIN_VRAM; 43 - else 43 + ttmpl = TTM_PL_FLAG_VRAM; 44 + } else { 44 45 flags = NOUVEAU_GEM_DOMAIN_GART; 46 + ttmpl = TTM_PL_FLAG_TT; 47 + } 45 48 46 49 ret = nouveau_gem_new(dev, NULL, PAGE_SIZE, 0, flags, 0, 0, &ntfy); 47 50 if (ret) 48 51 return ret; 49 52 50 - ret = nouveau_bo_pin(ntfy, flags); 53 + ret = nouveau_bo_pin(ntfy, ttmpl); 51 54 if (ret) 52 55 goto out_err; 53 56
+6 -4
drivers/gpu/drm/nouveau/nouveau_object.c
··· 1039 1039 { 1040 1040 struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private; 1041 1041 struct drm_device *dev = gpuobj->dev; 1042 + unsigned long flags; 1042 1043 1043 1044 if (gpuobj->pinst == ~0 || !dev_priv->ramin_available) { 1044 1045 u64 ptr = gpuobj->vinst + offset; 1045 1046 u32 base = ptr >> 16; 1046 1047 u32 val; 1047 1048 1048 - spin_lock(&dev_priv->ramin_lock); 1049 + spin_lock_irqsave(&dev_priv->vm_lock, flags); 1049 1050 if (dev_priv->ramin_base != base) { 1050 1051 dev_priv->ramin_base = base; 1051 1052 nv_wr32(dev, 0x001700, dev_priv->ramin_base); 1052 1053 } 1053 1054 val = nv_rd32(dev, 0x700000 + (ptr & 0xffff)); 1054 - spin_unlock(&dev_priv->ramin_lock); 1055 + spin_unlock_irqrestore(&dev_priv->vm_lock, flags); 1055 1056 return val; 1056 1057 } 1057 1058 ··· 1064 1063 { 1065 1064 struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private; 1066 1065 struct drm_device *dev = gpuobj->dev; 1066 + unsigned long flags; 1067 1067 1068 1068 if (gpuobj->pinst == ~0 || !dev_priv->ramin_available) { 1069 1069 u64 ptr = gpuobj->vinst + offset; 1070 1070 u32 base = ptr >> 16; 1071 1071 1072 - spin_lock(&dev_priv->ramin_lock); 1072 + spin_lock_irqsave(&dev_priv->vm_lock, flags); 1073 1073 if (dev_priv->ramin_base != base) { 1074 1074 dev_priv->ramin_base = base; 1075 1075 nv_wr32(dev, 0x001700, dev_priv->ramin_base); 1076 1076 } 1077 1077 nv_wr32(dev, 0x700000 + (ptr & 0xffff), val); 1078 - spin_unlock(&dev_priv->ramin_lock); 1078 + spin_unlock_irqrestore(&dev_priv->vm_lock, flags); 1079 1079 return; 1080 1080 } 1081 1081
+5 -3
drivers/gpu/drm/nouveau/nouveau_sgdma.c
··· 42 42 43 43 nvbe->nr_pages = 0; 44 44 while (num_pages--) { 45 - if (dma_addrs[nvbe->nr_pages] != DMA_ERROR_CODE) { 45 + /* this code path isn't called and is incorrect anyways */ 46 + if (0) { /*dma_addrs[nvbe->nr_pages] != DMA_ERROR_CODE)*/ 46 47 nvbe->pages[nvbe->nr_pages] = 47 48 dma_addrs[nvbe->nr_pages]; 48 49 nvbe->ttm_alloced[nvbe->nr_pages] = true; ··· 56 55 be->func->clear(be); 57 56 return -EFAULT; 58 57 } 58 + nvbe->ttm_alloced[nvbe->nr_pages] = false; 59 59 } 60 60 61 61 nvbe->nr_pages++; ··· 429 427 u32 aper_size, align; 430 428 int ret; 431 429 432 - if (dev_priv->card_type >= NV_50 || drm_pci_device_is_pcie(dev)) 430 + if (dev_priv->card_type >= NV_40 && drm_pci_device_is_pcie(dev)) 433 431 aper_size = 512 * 1024 * 1024; 434 432 else 435 433 aper_size = 64 * 1024 * 1024; ··· 459 457 dev_priv->gart_info.func = &nv50_sgdma_backend; 460 458 } else 461 459 if (drm_pci_device_is_pcie(dev) && 462 - dev_priv->chipset != 0x40 && dev_priv->chipset != 0x45) { 460 + dev_priv->chipset > 0x40 && dev_priv->chipset != 0x45) { 463 461 if (nv44_graph_class(dev)) { 464 462 dev_priv->gart_info.func = &nv44_sgdma_backend; 465 463 align = 512 * 1024;
+6
drivers/gpu/drm/nouveau/nouveau_state.c
··· 608 608 spin_lock_init(&dev_priv->channels.lock); 609 609 spin_lock_init(&dev_priv->tile.lock); 610 610 spin_lock_init(&dev_priv->context_switch_lock); 611 + spin_lock_init(&dev_priv->vm_lock); 611 612 612 613 /* Make the CRTCs and I2C buses accessible */ 613 614 ret = engine->display.early_init(dev); ··· 767 766 engine->gpio.takedown(dev); 768 767 engine->mc.takedown(dev); 769 768 engine->display.late_takedown(dev); 769 + 770 + if (dev_priv->vga_ram) { 771 + nouveau_bo_unpin(dev_priv->vga_ram); 772 + nouveau_bo_ref(NULL, &dev_priv->vga_ram); 773 + } 770 774 771 775 mutex_lock(&dev->struct_mutex); 772 776 ttm_bo_clean_mm(&dev_priv->ttm.bdev, TTM_PL_VRAM);
+6 -4
drivers/gpu/drm/nouveau/nv50_instmem.c
··· 404 404 nv50_instmem_flush(struct drm_device *dev) 405 405 { 406 406 struct drm_nouveau_private *dev_priv = dev->dev_private; 407 + unsigned long flags; 407 408 408 - spin_lock(&dev_priv->ramin_lock); 409 + spin_lock_irqsave(&dev_priv->vm_lock, flags); 409 410 nv_wr32(dev, 0x00330c, 0x00000001); 410 411 if (!nv_wait(dev, 0x00330c, 0x00000002, 0x00000000)) 411 412 NV_ERROR(dev, "PRAMIN flush timeout\n"); 412 - spin_unlock(&dev_priv->ramin_lock); 413 + spin_unlock_irqrestore(&dev_priv->vm_lock, flags); 413 414 } 414 415 415 416 void 416 417 nv84_instmem_flush(struct drm_device *dev) 417 418 { 418 419 struct drm_nouveau_private *dev_priv = dev->dev_private; 420 + unsigned long flags; 419 421 420 - spin_lock(&dev_priv->ramin_lock); 422 + spin_lock_irqsave(&dev_priv->vm_lock, flags); 421 423 nv_wr32(dev, 0x070000, 0x00000001); 422 424 if (!nv_wait(dev, 0x070000, 0x00000002, 0x00000000)) 423 425 NV_ERROR(dev, "PRAMIN flush timeout\n"); 424 - spin_unlock(&dev_priv->ramin_lock); 426 + spin_unlock_irqrestore(&dev_priv->vm_lock, flags); 425 427 } 426 428
+3 -2
drivers/gpu/drm/nouveau/nv50_vm.c
··· 174 174 nv50_vm_flush_engine(struct drm_device *dev, int engine) 175 175 { 176 176 struct drm_nouveau_private *dev_priv = dev->dev_private; 177 + unsigned long flags; 177 178 178 - spin_lock(&dev_priv->ramin_lock); 179 + spin_lock_irqsave(&dev_priv->vm_lock, flags); 179 180 nv_wr32(dev, 0x100c80, (engine << 16) | 1); 180 181 if (!nv_wait(dev, 0x100c80, 0x00000001, 0x00000000)) 181 182 NV_ERROR(dev, "vm flush timeout: engine %d\n", engine); 182 - spin_unlock(&dev_priv->ramin_lock); 183 + spin_unlock_irqrestore(&dev_priv->vm_lock, flags); 183 184 }
+3 -2
drivers/gpu/drm/nouveau/nvc0_vm.c
··· 104 104 struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem; 105 105 struct drm_device *dev = vm->dev; 106 106 struct nouveau_vm_pgd *vpgd; 107 + unsigned long flags; 107 108 u32 engine = (dev_priv->chan_vm == vm) ? 1 : 5; 108 109 109 110 pinstmem->flush(vm->dev); 110 111 111 - spin_lock(&dev_priv->ramin_lock); 112 + spin_lock_irqsave(&dev_priv->vm_lock, flags); 112 113 list_for_each_entry(vpgd, &vm->pgd_list, head) { 113 114 /* looks like maybe a "free flush slots" counter, the 114 115 * faster you write to 0x100cbc to more it decreases ··· 126 125 nv_rd32(dev, 0x100c80), engine); 127 126 } 128 127 } 129 - spin_unlock(&dev_priv->ramin_lock); 128 + spin_unlock_irqrestore(&dev_priv->vm_lock, flags); 130 129 }
+3 -3
drivers/gpu/drm/radeon/atom.c
··· 135 135 case ATOM_IIO_MOVE_INDEX: 136 136 temp &= 137 137 ~((0xFFFFFFFF >> (32 - CU8(base + 1))) << 138 - CU8(base + 2)); 138 + CU8(base + 3)); 139 139 temp |= 140 140 ((index >> CU8(base + 2)) & 141 141 (0xFFFFFFFF >> (32 - CU8(base + 1)))) << CU8(base + ··· 145 145 case ATOM_IIO_MOVE_DATA: 146 146 temp &= 147 147 ~((0xFFFFFFFF >> (32 - CU8(base + 1))) << 148 - CU8(base + 2)); 148 + CU8(base + 3)); 149 149 temp |= 150 150 ((data >> CU8(base + 2)) & 151 151 (0xFFFFFFFF >> (32 - CU8(base + 1)))) << CU8(base + ··· 155 155 case ATOM_IIO_MOVE_ATTR: 156 156 temp &= 157 157 ~((0xFFFFFFFF >> (32 - CU8(base + 1))) << 158 - CU8(base + 2)); 158 + CU8(base + 3)); 159 159 temp |= 160 160 ((ctx-> 161 161 io_attr >> CU8(base + 2)) & (0xFFFFFFFF >> (32 -
+1 -5
drivers/gpu/drm/radeon/atombios_crtc.c
··· 532 532 else 533 533 pll->flags |= RADEON_PLL_PREFER_LOW_REF_DIV; 534 534 535 - if ((rdev->family == CHIP_R600) || 536 - (rdev->family == CHIP_RV610) || 537 - (rdev->family == CHIP_RV630) || 538 - (rdev->family == CHIP_RV670)) 535 + if (rdev->family < CHIP_RV770) 539 536 pll->flags |= RADEON_PLL_PREFER_MINM_OVER_MAXP; 540 537 } else { 541 538 pll->flags |= RADEON_PLL_LEGACY; ··· 562 565 if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { 563 566 if (ss_enabled) { 564 567 if (ss->refdiv) { 565 - pll->flags |= RADEON_PLL_PREFER_MINM_OVER_MAXP; 566 568 pll->flags |= RADEON_PLL_USE_REF_DIV; 567 569 pll->reference_div = ss->refdiv; 568 570 if (ASIC_IS_AVIVO(rdev))
+58 -55
drivers/gpu/drm/radeon/evergreen.c
··· 353 353 struct drm_display_mode *mode, 354 354 struct drm_display_mode *other_mode) 355 355 { 356 - u32 tmp = 0; 356 + u32 tmp; 357 357 /* 358 358 * Line Buffer Setup 359 359 * There are 3 line buffers, each one shared by 2 display controllers. ··· 363 363 * first display controller 364 364 * 0 - first half of lb (3840 * 2) 365 365 * 1 - first 3/4 of lb (5760 * 2) 366 - * 2 - whole lb (7680 * 2) 366 + * 2 - whole lb (7680 * 2), other crtc must be disabled 367 367 * 3 - first 1/4 of lb (1920 * 2) 368 368 * second display controller 369 369 * 4 - second half of lb (3840 * 2) 370 370 * 5 - second 3/4 of lb (5760 * 2) 371 - * 6 - whole lb (7680 * 2) 371 + * 6 - whole lb (7680 * 2), other crtc must be disabled 372 372 * 7 - last 1/4 of lb (1920 * 2) 373 373 */ 374 - if (mode && other_mode) { 375 - if (mode->hdisplay > other_mode->hdisplay) { 376 - if (mode->hdisplay > 2560) 377 - tmp = 1; /* 3/4 */ 378 - else 379 - tmp = 0; /* 1/2 */ 380 - } else if (other_mode->hdisplay > mode->hdisplay) { 381 - if (other_mode->hdisplay > 2560) 382 - tmp = 3; /* 1/4 */ 383 - else 384 - tmp = 0; /* 1/2 */ 385 - } else 374 + /* this can get tricky if we have two large displays on a paired group 375 + * of crtcs. Ideally for multiple large displays we'd assign them to 376 + * non-linked crtcs for maximum line buffer allocation. 377 + */ 378 + if (radeon_crtc->base.enabled && mode) { 379 + if (other_mode) 386 380 tmp = 0; /* 1/2 */ 387 - } else if (mode) 388 - tmp = 2; /* whole */ 389 - else if (other_mode) 390 - tmp = 3; /* 1/4 */ 381 + else 382 + tmp = 2; /* whole */ 383 + } else 384 + tmp = 0; 391 385 392 386 /* second controller of the pair uses second half of the lb */ 393 387 if (radeon_crtc->crtc_id % 2) 394 388 tmp += 4; 395 389 WREG32(DC_LB_MEMORY_SPLIT + radeon_crtc->crtc_offset, tmp); 396 390 397 - switch (tmp) { 398 - case 0: 399 - case 4: 400 - default: 401 - if (ASIC_IS_DCE5(rdev)) 402 - return 4096 * 2; 403 - else 404 - return 3840 * 2; 405 - case 1: 406 - case 5: 407 - if (ASIC_IS_DCE5(rdev)) 408 - return 6144 * 2; 409 - else 410 - return 5760 * 2; 411 - case 2: 412 - case 6: 413 - if (ASIC_IS_DCE5(rdev)) 414 - return 8192 * 2; 415 - else 416 - return 7680 * 2; 417 - case 3: 418 - case 7: 419 - if (ASIC_IS_DCE5(rdev)) 420 - return 2048 * 2; 421 - else 422 - return 1920 * 2; 391 + if (radeon_crtc->base.enabled && mode) { 392 + switch (tmp) { 393 + case 0: 394 + case 4: 395 + default: 396 + if (ASIC_IS_DCE5(rdev)) 397 + return 4096 * 2; 398 + else 399 + return 3840 * 2; 400 + case 1: 401 + case 5: 402 + if (ASIC_IS_DCE5(rdev)) 403 + return 6144 * 2; 404 + else 405 + return 5760 * 2; 406 + case 2: 407 + case 6: 408 + if (ASIC_IS_DCE5(rdev)) 409 + return 8192 * 2; 410 + else 411 + return 7680 * 2; 412 + case 3: 413 + case 7: 414 + if (ASIC_IS_DCE5(rdev)) 415 + return 2048 * 2; 416 + else 417 + return 1920 * 2; 418 + } 423 419 } 420 + 421 + /* controller not enabled, so no lb used */ 422 + return 0; 424 423 } 425 424 426 425 static u32 evergreen_get_number_of_dram_channels(struct radeon_device *rdev) ··· 862 863 SYSTEM_ACCESS_MODE_NOT_IN_SYS | 863 864 SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU | 864 865 EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5); 865 - WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp); 866 - WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp); 867 - WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp); 866 + if (rdev->flags & RADEON_IS_IGP) { 867 + WREG32(FUS_MC_VM_MD_L1_TLB0_CNTL, tmp); 868 + WREG32(FUS_MC_VM_MD_L1_TLB1_CNTL, tmp); 869 + WREG32(FUS_MC_VM_MD_L1_TLB2_CNTL, tmp); 870 + } else { 871 + WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp); 872 + WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp); 873 + WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp); 874 + } 868 875 WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp); 869 876 WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp); 870 877 WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp); ··· 1780 1775 1781 1776 1782 1777 mc_shared_chmap = RREG32(MC_SHARED_CHMAP); 1783 - mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG); 1778 + if (rdev->flags & RADEON_IS_IGP) 1779 + mc_arb_ramcfg = RREG32(FUS_MC_ARB_RAMCFG); 1780 + else 1781 + mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG); 1784 1782 1785 1783 switch (rdev->config.evergreen.max_tile_pipes) { 1786 1784 case 1: ··· 2589 2581 u32 wptr, tmp; 2590 2582 2591 2583 if (rdev->wb.enabled) 2592 - wptr = rdev->wb.wb[R600_WB_IH_WPTR_OFFSET/4]; 2584 + wptr = le32_to_cpu(rdev->wb.wb[R600_WB_IH_WPTR_OFFSET/4]); 2593 2585 else 2594 2586 wptr = RREG32(IH_RB_WPTR); 2595 2587 ··· 2931 2923 evergreen_blit_fini(rdev); 2932 2924 rdev->asic->copy = NULL; 2933 2925 dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r); 2934 - } 2935 - /* XXX: ontario has problems blitting to gart at the moment */ 2936 - if (rdev->family == CHIP_PALM) { 2937 - rdev->asic->copy = NULL; 2938 - radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size); 2939 2926 } 2940 2927 2941 2928 /* allocate wb buffer */
+6
drivers/gpu/drm/radeon/evergreend.h
··· 200 200 #define BURSTLENGTH_SHIFT 9 201 201 #define BURSTLENGTH_MASK 0x00000200 202 202 #define CHANSIZE_OVERRIDE (1 << 11) 203 + #define FUS_MC_ARB_RAMCFG 0x2768 203 204 #define MC_VM_AGP_TOP 0x2028 204 205 #define MC_VM_AGP_BOT 0x202C 205 206 #define MC_VM_AGP_BASE 0x2030 ··· 222 221 #define MC_VM_MD_L1_TLB0_CNTL 0x2654 223 222 #define MC_VM_MD_L1_TLB1_CNTL 0x2658 224 223 #define MC_VM_MD_L1_TLB2_CNTL 0x265C 224 + 225 + #define FUS_MC_VM_MD_L1_TLB0_CNTL 0x265C 226 + #define FUS_MC_VM_MD_L1_TLB1_CNTL 0x2660 227 + #define FUS_MC_VM_MD_L1_TLB2_CNTL 0x2664 228 + 225 229 #define MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR 0x203C 226 230 #define MC_VM_SYSTEM_APERTURE_HIGH_ADDR 0x2038 227 231 #define MC_VM_SYSTEM_APERTURE_LOW_ADDR 0x2034
+9 -9
drivers/gpu/drm/radeon/ni.c
··· 674 674 675 675 cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE); 676 676 cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG); 677 - cgts_tcc_disable = RREG32(CGTS_TCC_DISABLE); 677 + cgts_tcc_disable = 0xff000000; 678 678 gc_user_rb_backend_disable = RREG32(GC_USER_RB_BACKEND_DISABLE); 679 679 gc_user_shader_pipe_config = RREG32(GC_USER_SHADER_PIPE_CONFIG); 680 680 cgts_user_tcc_disable = RREG32(CGTS_USER_TCC_DISABLE); ··· 871 871 872 872 smx_dc_ctl0 = RREG32(SMX_DC_CTL0); 873 873 smx_dc_ctl0 &= ~NUMBER_OF_SETS(0x1ff); 874 - smx_dc_ctl0 |= NUMBER_OF_SETS(rdev->config.evergreen.sx_num_of_sets); 874 + smx_dc_ctl0 |= NUMBER_OF_SETS(rdev->config.cayman.sx_num_of_sets); 875 875 WREG32(SMX_DC_CTL0, smx_dc_ctl0); 876 876 877 877 WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(4) | CRC_SIMD_ID_WADDR_DISABLE); ··· 887 887 888 888 WREG32(TA_CNTL_AUX, DISABLE_CUBE_ANISO); 889 889 890 - WREG32(SX_EXPORT_BUFFER_SIZES, (COLOR_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_size / 4) - 1) | 891 - POSITION_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_pos_size / 4) - 1) | 892 - SMX_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_smx_size / 4) - 1))); 890 + WREG32(SX_EXPORT_BUFFER_SIZES, (COLOR_BUFFER_SIZE((rdev->config.cayman.sx_max_export_size / 4) - 1) | 891 + POSITION_BUFFER_SIZE((rdev->config.cayman.sx_max_export_pos_size / 4) - 1) | 892 + SMX_BUFFER_SIZE((rdev->config.cayman.sx_max_export_smx_size / 4) - 1))); 893 893 894 - WREG32(PA_SC_FIFO_SIZE, (SC_PRIM_FIFO_SIZE(rdev->config.evergreen.sc_prim_fifo_size) | 895 - SC_HIZ_TILE_FIFO_SIZE(rdev->config.evergreen.sc_hiz_tile_fifo_size) | 896 - SC_EARLYZ_TILE_FIFO_SIZE(rdev->config.evergreen.sc_earlyz_tile_fifo_size))); 894 + WREG32(PA_SC_FIFO_SIZE, (SC_PRIM_FIFO_SIZE(rdev->config.cayman.sc_prim_fifo_size) | 895 + SC_HIZ_TILE_FIFO_SIZE(rdev->config.cayman.sc_hiz_tile_fifo_size) | 896 + SC_EARLYZ_TILE_FIFO_SIZE(rdev->config.cayman.sc_earlyz_tile_fifo_size))); 897 897 898 898 899 899 WREG32(VGT_NUM_INSTANCES, 1); 900 900 901 901 WREG32(CP_PERFMON_CNTL, 0); 902 902 903 - WREG32(SQ_MS_FIFO_SIZES, (CACHE_FIFO_SIZE(16 * rdev->config.evergreen.sq_num_cf_insts) | 903 + WREG32(SQ_MS_FIFO_SIZES, (CACHE_FIFO_SIZE(16 * rdev->config.cayman.sq_num_cf_insts) | 904 904 FETCH_FIFO_HIWATER(0x4) | 905 905 DONE_FIFO_HIWATER(0xe0) | 906 906 ALU_UPDATE_FIFO_HIWATER(0x8)));
+1 -1
drivers/gpu/drm/radeon/r600.c
··· 3231 3231 u32 wptr, tmp; 3232 3232 3233 3233 if (rdev->wb.enabled) 3234 - wptr = rdev->wb.wb[R600_WB_IH_WPTR_OFFSET/4]; 3234 + wptr = le32_to_cpu(rdev->wb.wb[R600_WB_IH_WPTR_OFFSET/4]); 3235 3235 else 3236 3236 wptr = RREG32(IH_RB_WPTR); 3237 3237
+16 -7
drivers/gpu/drm/radeon/radeon_atombios.c
··· 431 431 } 432 432 } 433 433 434 - /* Acer laptop (Acer TravelMate 5730G) has an HDMI port 434 + /* Acer laptop (Acer TravelMate 5730/5730G) has an HDMI port 435 435 * on the laptop and a DVI port on the docking station and 436 436 * both share the same encoder, hpd pin, and ddc line. 437 437 * So while the bios table is technically correct, ··· 440 440 * with different crtcs which isn't possible on the hardware 441 441 * side and leaves no crtcs for LVDS or VGA. 442 442 */ 443 - if ((dev->pdev->device == 0x95c4) && 443 + if (((dev->pdev->device == 0x95c4) || (dev->pdev->device == 0x9591)) && 444 444 (dev->pdev->subsystem_vendor == 0x1025) && 445 445 (dev->pdev->subsystem_device == 0x013c)) { 446 446 if ((*connector_type == DRM_MODE_CONNECTOR_DVII) && ··· 1574 1574 ATOM_FAKE_EDID_PATCH_RECORD *fake_edid_record; 1575 1575 ATOM_PANEL_RESOLUTION_PATCH_RECORD *panel_res_record; 1576 1576 bool bad_record = false; 1577 - u8 *record = (u8 *)(mode_info->atom_context->bios + 1578 - data_offset + 1579 - le16_to_cpu(lvds_info->info.usModePatchTableOffset)); 1577 + u8 *record; 1578 + 1579 + if ((frev == 1) && (crev < 2)) 1580 + /* absolute */ 1581 + record = (u8 *)(mode_info->atom_context->bios + 1582 + le16_to_cpu(lvds_info->info.usModePatchTableOffset)); 1583 + else 1584 + /* relative */ 1585 + record = (u8 *)(mode_info->atom_context->bios + 1586 + data_offset + 1587 + le16_to_cpu(lvds_info->info.usModePatchTableOffset)); 1580 1588 while (*record != ATOM_RECORD_END_TYPE) { 1581 1589 switch (*record) { 1582 1590 case LCD_MODE_PATCH_RECORD_MODE_TYPE: ··· 1607 1599 memcpy((u8 *)edid, (u8 *)&fake_edid_record->ucFakeEDIDString[0], 1608 1600 fake_edid_record->ucFakeEDIDLength); 1609 1601 1610 - if (drm_edid_is_valid(edid)) 1602 + if (drm_edid_is_valid(edid)) { 1611 1603 rdev->mode_info.bios_hardcoded_edid = edid; 1612 - else 1604 + rdev->mode_info.bios_hardcoded_edid_size = edid_size; 1605 + } else 1613 1606 kfree(edid); 1614 1607 } 1615 1608 }
+27 -2
drivers/gpu/drm/radeon/radeon_atpx_handler.c
··· 15 15 #define ATPX_VERSION 0 16 16 #define ATPX_GPU_PWR 2 17 17 #define ATPX_MUX_SELECT 3 18 + #define ATPX_I2C_MUX_SELECT 4 19 + #define ATPX_SWITCH_START 5 20 + #define ATPX_SWITCH_END 6 18 21 19 22 #define ATPX_INTEGRATED 0 20 23 #define ATPX_DISCRETE 1 ··· 152 149 return radeon_atpx_execute(handle, ATPX_MUX_SELECT, mux_id); 153 150 } 154 151 152 + static int radeon_atpx_switch_i2c_mux(acpi_handle handle, int mux_id) 153 + { 154 + return radeon_atpx_execute(handle, ATPX_I2C_MUX_SELECT, mux_id); 155 + } 156 + 157 + static int radeon_atpx_switch_start(acpi_handle handle, int gpu_id) 158 + { 159 + return radeon_atpx_execute(handle, ATPX_SWITCH_START, gpu_id); 160 + } 161 + 162 + static int radeon_atpx_switch_end(acpi_handle handle, int gpu_id) 163 + { 164 + return radeon_atpx_execute(handle, ATPX_SWITCH_END, gpu_id); 165 + } 155 166 156 167 static int radeon_atpx_switchto(enum vga_switcheroo_client_id id) 157 168 { 169 + int gpu_id; 170 + 158 171 if (id == VGA_SWITCHEROO_IGD) 159 - radeon_atpx_switch_mux(radeon_atpx_priv.atpx_handle, 0); 172 + gpu_id = ATPX_INTEGRATED; 160 173 else 161 - radeon_atpx_switch_mux(radeon_atpx_priv.atpx_handle, 1); 174 + gpu_id = ATPX_DISCRETE; 175 + 176 + radeon_atpx_switch_start(radeon_atpx_priv.atpx_handle, gpu_id); 177 + radeon_atpx_switch_mux(radeon_atpx_priv.atpx_handle, gpu_id); 178 + radeon_atpx_switch_i2c_mux(radeon_atpx_priv.atpx_handle, gpu_id); 179 + radeon_atpx_switch_end(radeon_atpx_priv.atpx_handle, gpu_id); 180 + 162 181 return 0; 163 182 } 164 183
+12 -17
drivers/gpu/drm/radeon/radeon_connectors.c
··· 1199 1199 if (router->ddc_valid || router->cd_valid) { 1200 1200 radeon_connector->router_bus = radeon_i2c_lookup(rdev, &router->i2c_info); 1201 1201 if (!radeon_connector->router_bus) 1202 - goto failed; 1202 + DRM_ERROR("Failed to assign router i2c bus! Check dmesg for i2c errors.\n"); 1203 1203 } 1204 1204 switch (connector_type) { 1205 1205 case DRM_MODE_CONNECTOR_VGA: ··· 1208 1208 if (i2c_bus->valid) { 1209 1209 radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus); 1210 1210 if (!radeon_connector->ddc_bus) 1211 - goto failed; 1211 + DRM_ERROR("VGA: Failed to assign ddc bus! Check dmesg for i2c errors.\n"); 1212 1212 } 1213 1213 radeon_connector->dac_load_detect = true; 1214 1214 drm_connector_attach_property(&radeon_connector->base, ··· 1226 1226 if (i2c_bus->valid) { 1227 1227 radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus); 1228 1228 if (!radeon_connector->ddc_bus) 1229 - goto failed; 1229 + DRM_ERROR("DVIA: Failed to assign ddc bus! Check dmesg for i2c errors.\n"); 1230 1230 } 1231 1231 radeon_connector->dac_load_detect = true; 1232 1232 drm_connector_attach_property(&radeon_connector->base, ··· 1249 1249 if (i2c_bus->valid) { 1250 1250 radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus); 1251 1251 if (!radeon_connector->ddc_bus) 1252 - goto failed; 1252 + DRM_ERROR("DVI: Failed to assign ddc bus! Check dmesg for i2c errors.\n"); 1253 1253 } 1254 1254 subpixel_order = SubPixelHorizontalRGB; 1255 1255 drm_connector_attach_property(&radeon_connector->base, ··· 1290 1290 if (i2c_bus->valid) { 1291 1291 radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus); 1292 1292 if (!radeon_connector->ddc_bus) 1293 - goto failed; 1293 + DRM_ERROR("HDMI: Failed to assign ddc bus! Check dmesg for i2c errors.\n"); 1294 1294 } 1295 1295 drm_connector_attach_property(&radeon_connector->base, 1296 1296 rdev->mode_info.coherent_mode_property, ··· 1329 1329 else 1330 1330 radeon_dig_connector->dp_i2c_bus = radeon_i2c_create_dp(dev, i2c_bus, "DP-auxch"); 1331 1331 if (!radeon_dig_connector->dp_i2c_bus) 1332 - goto failed; 1332 + DRM_ERROR("DP: Failed to assign dp ddc bus! Check dmesg for i2c errors.\n"); 1333 1333 radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus); 1334 1334 if (!radeon_connector->ddc_bus) 1335 - goto failed; 1335 + DRM_ERROR("DP: Failed to assign ddc bus! Check dmesg for i2c errors.\n"); 1336 1336 } 1337 1337 subpixel_order = SubPixelHorizontalRGB; 1338 1338 drm_connector_attach_property(&radeon_connector->base, ··· 1381 1381 if (i2c_bus->valid) { 1382 1382 radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus); 1383 1383 if (!radeon_connector->ddc_bus) 1384 - goto failed; 1384 + DRM_ERROR("LVDS: Failed to assign ddc bus! Check dmesg for i2c errors.\n"); 1385 1385 } 1386 1386 drm_connector_attach_property(&radeon_connector->base, 1387 1387 dev->mode_config.scaling_mode_property, ··· 1457 1457 if (i2c_bus->valid) { 1458 1458 radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus); 1459 1459 if (!radeon_connector->ddc_bus) 1460 - goto failed; 1460 + DRM_ERROR("VGA: Failed to assign ddc bus! Check dmesg for i2c errors.\n"); 1461 1461 } 1462 1462 radeon_connector->dac_load_detect = true; 1463 1463 drm_connector_attach_property(&radeon_connector->base, ··· 1475 1475 if (i2c_bus->valid) { 1476 1476 radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus); 1477 1477 if (!radeon_connector->ddc_bus) 1478 - goto failed; 1478 + DRM_ERROR("DVIA: Failed to assign ddc bus! Check dmesg for i2c errors.\n"); 1479 1479 } 1480 1480 radeon_connector->dac_load_detect = true; 1481 1481 drm_connector_attach_property(&radeon_connector->base, ··· 1493 1493 if (i2c_bus->valid) { 1494 1494 radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus); 1495 1495 if (!radeon_connector->ddc_bus) 1496 - goto failed; 1496 + DRM_ERROR("DVI: Failed to assign ddc bus! Check dmesg for i2c errors.\n"); 1497 1497 } 1498 1498 if (connector_type == DRM_MODE_CONNECTOR_DVII) { 1499 1499 radeon_connector->dac_load_detect = true; ··· 1538 1538 if (i2c_bus->valid) { 1539 1539 radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus); 1540 1540 if (!radeon_connector->ddc_bus) 1541 - goto failed; 1541 + DRM_ERROR("LVDS: Failed to assign ddc bus! Check dmesg for i2c errors.\n"); 1542 1542 } 1543 1543 drm_connector_attach_property(&radeon_connector->base, 1544 1544 dev->mode_config.scaling_mode_property, ··· 1567 1567 radeon_legacy_backlight_init(radeon_encoder, connector); 1568 1568 } 1569 1569 } 1570 - return; 1571 - 1572 - failed: 1573 - drm_connector_cleanup(connector); 1574 - kfree(connector); 1575 1570 }
+3 -3
drivers/gpu/drm/radeon/radeon_cursor.c
··· 167 167 return -EINVAL; 168 168 } 169 169 170 - radeon_crtc->cursor_width = width; 171 - radeon_crtc->cursor_height = height; 172 - 173 170 obj = drm_gem_object_lookup(crtc->dev, file_priv, handle); 174 171 if (!obj) { 175 172 DRM_ERROR("Cannot find cursor object %x for crtc %d\n", handle, radeon_crtc->crtc_id); ··· 176 179 ret = radeon_gem_object_pin(obj, RADEON_GEM_DOMAIN_VRAM, &gpu_addr); 177 180 if (ret) 178 181 goto fail; 182 + 183 + radeon_crtc->cursor_width = width; 184 + radeon_crtc->cursor_height = height; 179 185 180 186 radeon_lock_cursor(crtc, true); 181 187 /* XXX only 27 bit offset for legacy cursor */
+3 -3
drivers/gpu/drm/radeon/radeon_gart.c
··· 181 181 p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); 182 182 183 183 for (i = 0; i < pages; i++, p++) { 184 - /* On TTM path, we only use the DMA API if TTM_PAGE_FLAG_DMA32 185 - * is requested. */ 186 - if (dma_addr[i] != DMA_ERROR_CODE) { 184 + /* we reverted the patch using dma_addr in TTM for now but this 185 + * code stops building on alpha so just comment it out for now */ 186 + if (0) { /*dma_addr[i] != DMA_ERROR_CODE) */ 187 187 rdev->gart.ttm_alloced[p] = true; 188 188 rdev->gart.pages_addr[p] = dma_addr[i]; 189 189 } else {
+6
drivers/gpu/drm/radeon/radeon_i2c.c
··· 1096 1096 if (!radeon_connector->router.ddc_valid) 1097 1097 return; 1098 1098 1099 + if (!radeon_connector->router_bus) 1100 + return; 1101 + 1099 1102 radeon_i2c_get_byte(radeon_connector->router_bus, 1100 1103 radeon_connector->router.i2c_addr, 1101 1104 0x3, &val); ··· 1122 1119 u8 val; 1123 1120 1124 1121 if (!radeon_connector->router.cd_valid) 1122 + return; 1123 + 1124 + if (!radeon_connector->router_bus) 1125 1125 return; 1126 1126 1127 1127 radeon_i2c_get_byte(radeon_connector->router_bus,
+16
drivers/gpu/drm/radeon/radeon_kms.c
··· 221 221 return -EINVAL; 222 222 } 223 223 break; 224 + case RADEON_INFO_NUM_TILE_PIPES: 225 + if (rdev->family >= CHIP_CAYMAN) 226 + value = rdev->config.cayman.max_tile_pipes; 227 + else if (rdev->family >= CHIP_CEDAR) 228 + value = rdev->config.evergreen.max_tile_pipes; 229 + else if (rdev->family >= CHIP_RV770) 230 + value = rdev->config.rv770.max_tile_pipes; 231 + else if (rdev->family >= CHIP_R600) 232 + value = rdev->config.r600.max_tile_pipes; 233 + else { 234 + return -EINVAL; 235 + } 236 + break; 237 + case RADEON_INFO_FUSION_GART_WORKING: 238 + value = 1; 239 + break; 224 240 default: 225 241 DRM_DEBUG_KMS("Invalid request %d\n", info->request); 226 242 return -EINVAL;
+1
drivers/gpu/drm/radeon/reg_srcs/cayman
··· 33 33 0x00008E48 SQ_EX_ALLOC_TABLE_SLOTS 34 34 0x00009100 SPI_CONFIG_CNTL 35 35 0x0000913C SPI_CONFIG_CNTL_1 36 + 0x00009508 TA_CNTL_AUX 36 37 0x00009830 DB_DEBUG 37 38 0x00009834 DB_DEBUG2 38 39 0x00009838 DB_DEBUG3
+1
drivers/gpu/drm/radeon/reg_srcs/evergreen
··· 46 46 0x00008E48 SQ_EX_ALLOC_TABLE_SLOTS 47 47 0x00009100 SPI_CONFIG_CNTL 48 48 0x0000913C SPI_CONFIG_CNTL_1 49 + 0x00009508 TA_CNTL_AUX 49 50 0x00009700 VC_CNTL 50 51 0x00009714 VC_ENHANCE 51 52 0x00009830 DB_DEBUG
+1
drivers/gpu/drm/radeon/reg_srcs/r600
··· 708 708 0x00028D0C DB_RENDER_CONTROL 709 709 0x00028D10 DB_RENDER_OVERRIDE 710 710 0x0002880C DB_SHADER_CONTROL 711 + 0x00028D28 DB_SRESULTS_COMPARE_STATE0 711 712 0x00028D2C DB_SRESULTS_COMPARE_STATE1 712 713 0x00028430 DB_STENCILREFMASK 713 714 0x00028434 DB_STENCILREFMASK_BF
+3 -3
drivers/gpu/vga/vga_switcheroo.c
··· 219 219 int i; 220 220 struct vga_switcheroo_client *active = NULL; 221 221 222 - if (new_client->active == true) 223 - return 0; 224 - 225 222 for (i = 0; i < VGA_SWITCHEROO_MAX_CLIENTS; i++) { 226 223 if (vgasr_priv.clients[i].active == true) { 227 224 active = &vgasr_priv.clients[i]; ··· 368 371 ret = vgasr_priv.handler->switchto(client_id); 369 372 goto out; 370 373 } 374 + 375 + if (client->active == true) 376 + goto out; 371 377 372 378 /* okay we want a switch - test if devices are willing to switch */ 373 379 can_switch = true;
+5 -6
drivers/hwmon/Kconfig
··· 110 110 help 111 111 If you say yes here you get support for Analog Devices ADM1021 112 112 and ADM1023 sensor chips and clones: Maxim MAX1617 and MAX1617A, 113 - Genesys Logic GL523SM, National Semiconductor LM84, TI THMC10, 114 - and the XEON processor built-in sensor. 113 + Genesys Logic GL523SM, National Semiconductor LM84 and TI THMC10. 115 114 116 115 This driver can also be built as a module. If so, the module 117 116 will be called adm1021. ··· 617 618 depends on I2C 618 619 help 619 620 If you say yes here you get support for National Semiconductor LM90, 620 - LM86, LM89 and LM99, Analog Devices ADM1032 and ADT7461, Maxim 621 - MAX6646, MAX6647, MAX6648, MAX6649, MAX6657, MAX6658, MAX6659, 622 - MAX6680, MAX6681, MAX6692, MAX6695, MAX6696, and Winbond/Nuvoton 623 - W83L771W/G/AWG/ASG sensor chips. 621 + LM86, LM89 and LM99, Analog Devices ADM1032, ADT7461, and ADT7461A, 622 + Maxim MAX6646, MAX6647, MAX6648, MAX6649, MAX6657, MAX6658, MAX6659, 623 + MAX6680, MAX6681, MAX6692, MAX6695, MAX6696, ON Semiconductor NCT1008, 624 + and Winbond/Nuvoton W83L771W/G/AWG/ASG sensor chips. 624 625 625 626 This driver can also be built as a module. If so, the module 626 627 will be called lm90.
+4 -2
drivers/hwmon/lm85.c
··· 1094 1094 &sensor_dev_attr_pwm1_auto_pwm_minctl.dev_attr.attr, 1095 1095 &sensor_dev_attr_pwm2_auto_pwm_minctl.dev_attr.attr, 1096 1096 &sensor_dev_attr_pwm3_auto_pwm_minctl.dev_attr.attr, 1097 + NULL 1097 1098 }; 1098 1099 1099 1100 static const struct attribute_group lm85_group_minctl = { ··· 1105 1104 &sensor_dev_attr_temp1_auto_temp_off.dev_attr.attr, 1106 1105 &sensor_dev_attr_temp2_auto_temp_off.dev_attr.attr, 1107 1106 &sensor_dev_attr_temp3_auto_temp_off.dev_attr.attr, 1107 + NULL 1108 1108 }; 1109 1109 1110 1110 static const struct attribute_group lm85_group_temp_off = { ··· 1331 1329 if (data->type != emc6d103s) { 1332 1330 err = sysfs_create_group(&client->dev.kobj, &lm85_group_minctl); 1333 1331 if (err) 1334 - goto err_kfree; 1332 + goto err_remove_files; 1335 1333 err = sysfs_create_group(&client->dev.kobj, 1336 1334 &lm85_group_temp_off); 1337 1335 if (err) 1338 - goto err_kfree; 1336 + goto err_remove_files; 1339 1337 } 1340 1338 1341 1339 /* The ADT7463/68 have an optional VRM 10 mode where pin 21 is used
+15 -7
drivers/hwmon/lm90.c
··· 49 49 * chips, but support three temperature sensors instead of two. MAX6695 50 50 * and MAX6696 only differ in the pinout so they can be treated identically. 51 51 * 52 - * This driver also supports the ADT7461 chip from Analog Devices. 53 - * It's supported in both compatibility and extended mode. It is mostly 54 - * compatible with LM90 except for a data format difference for the 55 - * temperature value registers. 52 + * This driver also supports ADT7461 and ADT7461A from Analog Devices as well as 53 + * NCT1008 from ON Semiconductor. The chips are supported in both compatibility 54 + * and extended mode. They are mostly compatible with LM90 except for a data 55 + * format difference for the temperature value registers. 56 56 * 57 57 * Since the LM90 was the first chipset supported by this driver, most 58 58 * comments will refer to this chipset, but are actually general and ··· 88 88 * Addresses to scan 89 89 * Address is fully defined internally and cannot be changed except for 90 90 * MAX6659, MAX6680 and MAX6681. 91 - * LM86, LM89, LM90, LM99, ADM1032, ADM1032-1, ADT7461, MAX6649, MAX6657, 92 - * MAX6658 and W83L771 have address 0x4c. 93 - * ADM1032-2, ADT7461-2, LM89-1, LM99-1 and MAX6646 have address 0x4d. 91 + * LM86, LM89, LM90, LM99, ADM1032, ADM1032-1, ADT7461, ADT7461A, MAX6649, 92 + * MAX6657, MAX6658, NCT1008 and W83L771 have address 0x4c. 93 + * ADM1032-2, ADT7461-2, ADT7461A-2, LM89-1, LM99-1, MAX6646, and NCT1008D 94 + * have address 0x4d. 94 95 * MAX6647 has address 0x4e. 95 96 * MAX6659 can have address 0x4c, 0x4d or 0x4e. 96 97 * MAX6680 and MAX6681 can have address 0x18, 0x19, 0x1a, 0x29, 0x2a, 0x2b, ··· 175 174 static const struct i2c_device_id lm90_id[] = { 176 175 { "adm1032", adm1032 }, 177 176 { "adt7461", adt7461 }, 177 + { "adt7461a", adt7461 }, 178 178 { "lm90", lm90 }, 179 179 { "lm86", lm86 }, 180 180 { "lm89", lm86 }, ··· 190 188 { "max6681", max6680 }, 191 189 { "max6695", max6696 }, 192 190 { "max6696", max6696 }, 191 + { "nct1008", adt7461 }, 193 192 { "w83l771", w83l771 }, 194 193 { } 195 194 }; ··· 1156 1153 && (reg_config1 & 0x1B) == 0x00 1157 1154 && reg_convrate <= 0x0A) { 1158 1155 name = "adt7461"; 1156 + } else 1157 + if (chip_id == 0x57 /* ADT7461A, NCT1008 */ 1158 + && (reg_config1 & 0x1B) == 0x00 1159 + && reg_convrate <= 0x0A) { 1160 + name = "adt7461a"; 1159 1161 } 1160 1162 } else 1161 1163 if (man_id == 0x4D) { /* Maxim */
-1
drivers/hwmon/pmbus_core.c
··· 139 139 * A single status register covers multiple attributes, 140 140 * so we keep them all together. 141 141 */ 142 - u8 status_bits; 143 142 u8 status[PB_NUM_STATUS_REG]; 144 143 145 144 u8 currpage;
+1 -2
drivers/hwmon/twl4030-madc-hwmon.c
··· 98 98 static int __devinit twl4030_madc_hwmon_probe(struct platform_device *pdev) 99 99 { 100 100 int ret; 101 - int status; 102 101 struct device *hwmon; 103 102 104 103 ret = sysfs_create_group(&pdev->dev.kobj, &twl4030_madc_group); ··· 106 107 hwmon = hwmon_device_register(&pdev->dev); 107 108 if (IS_ERR(hwmon)) { 108 109 dev_err(&pdev->dev, "hwmon_device_register failed.\n"); 109 - status = PTR_ERR(hwmon); 110 + ret = PTR_ERR(hwmon); 110 111 goto err_reg; 111 112 } 112 113
+5
drivers/i2c/busses/i2c-i801.c
··· 134 134 SMBHSTSTS_BUS_ERR | SMBHSTSTS_DEV_ERR | \ 135 135 SMBHSTSTS_INTR) 136 136 137 + /* Older devices have their ID defined in <linux/pci_ids.h> */ 138 + #define PCI_DEVICE_ID_INTEL_COUGARPOINT_SMBUS 0x1c22 139 + #define PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS 0x1d22 137 140 /* Patsburg also has three 'Integrated Device Function' SMBus controllers */ 138 141 #define PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS_IDF0 0x1d70 139 142 #define PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS_IDF1 0x1d71 140 143 #define PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS_IDF2 0x1d72 144 + #define PCI_DEVICE_ID_INTEL_DH89XXCC_SMBUS 0x2330 145 + #define PCI_DEVICE_ID_INTEL_5_3400_SERIES_SMBUS 0x3b30 141 146 142 147 struct i801_priv { 143 148 struct i2c_adapter adapter;
+6 -3
drivers/i2c/busses/i2c-mpc.c
··· 560 560 .timeout = HZ, 561 561 }; 562 562 563 + static const struct of_device_id mpc_i2c_of_match[]; 563 564 static int __devinit fsl_i2c_probe(struct platform_device *op) 564 565 { 566 + const struct of_device_id *match; 565 567 struct mpc_i2c *i2c; 566 568 const u32 *prop; 567 569 u32 clock = MPC_I2C_CLOCK_LEGACY; 568 570 int result = 0; 569 571 int plen; 570 572 571 - if (!op->dev.of_match) 573 + match = of_match_device(mpc_i2c_of_match, &op->dev); 574 + if (!match) 572 575 return -EINVAL; 573 576 574 577 i2c = kzalloc(sizeof(*i2c), GFP_KERNEL); ··· 608 605 clock = *prop; 609 606 } 610 607 611 - if (op->dev.of_match->data) { 612 - struct mpc_i2c_data *data = op->dev.of_match->data; 608 + if (match->data) { 609 + struct mpc_i2c_data *data = match->data; 613 610 data->setup(op->dev.of_node, i2c, clock, data->prescaler); 614 611 } else { 615 612 /* Backwards compatibility */
+14 -13
drivers/i2c/busses/i2c-parport.c
··· 1 1 /* ------------------------------------------------------------------------ * 2 2 * i2c-parport.c I2C bus over parallel port * 3 3 * ------------------------------------------------------------------------ * 4 - Copyright (C) 2003-2010 Jean Delvare <khali@linux-fr.org> 4 + Copyright (C) 2003-2011 Jean Delvare <khali@linux-fr.org> 5 5 6 6 Based on older i2c-philips-par.c driver 7 7 Copyright (C) 1995-2000 Simon G. Vogl ··· 33 33 #include <linux/i2c-algo-bit.h> 34 34 #include <linux/i2c-smbus.h> 35 35 #include <linux/slab.h> 36 + #include <linux/list.h> 37 + #include <linux/mutex.h> 36 38 #include "i2c-parport.h" 37 39 38 40 /* ----- Device list ------------------------------------------------------ */ ··· 45 43 struct i2c_algo_bit_data algo_data; 46 44 struct i2c_smbus_alert_setup alert_data; 47 45 struct i2c_client *ara; 48 - struct i2c_par *next; 46 + struct list_head node; 49 47 }; 50 48 51 - static struct i2c_par *adapter_list; 49 + static LIST_HEAD(adapter_list); 50 + static DEFINE_MUTEX(adapter_list_lock); 52 51 53 52 /* ----- Low-level parallel port access ----------------------------------- */ 54 53 ··· 231 228 } 232 229 233 230 /* Add the new adapter to the list */ 234 - adapter->next = adapter_list; 235 - adapter_list = adapter; 231 + mutex_lock(&adapter_list_lock); 232 + list_add_tail(&adapter->node, &adapter_list); 233 + mutex_unlock(&adapter_list_lock); 236 234 return; 237 235 238 236 ERROR1: ··· 245 241 246 242 static void i2c_parport_detach (struct parport *port) 247 243 { 248 - struct i2c_par *adapter, *prev; 244 + struct i2c_par *adapter, *_n; 249 245 250 246 /* Walk the list */ 251 - for (prev = NULL, adapter = adapter_list; adapter; 252 - prev = adapter, adapter = adapter->next) { 247 + mutex_lock(&adapter_list_lock); 248 + list_for_each_entry_safe(adapter, _n, &adapter_list, node) { 253 249 if (adapter->pdev->port == port) { 254 250 if (adapter->ara) { 255 251 parport_disable_irq(port); ··· 263 259 264 260 parport_release(adapter->pdev); 265 261 parport_unregister_device(adapter->pdev); 266 - if (prev) 267 - prev->next = adapter->next; 268 - else 269 - adapter_list = adapter->next; 262 + list_del(&adapter->node); 270 263 kfree(adapter); 271 - return; 272 264 } 273 265 } 266 + mutex_unlock(&adapter_list_lock); 274 267 } 275 268 276 269 static struct parport_driver i2c_parport_driver = {
+1 -1
drivers/i2c/busses/i2c-pnx.c
··· 65 65 jiffies, expires); 66 66 67 67 timer->expires = jiffies + expires; 68 - timer->data = (unsigned long)&alg_data; 68 + timer->data = (unsigned long)alg_data; 69 69 70 70 add_timer(timer); 71 71 }
+6
drivers/ide/ide-cd_ioctl.c
··· 79 79 return CDS_DRIVE_NOT_READY; 80 80 } 81 81 82 + /* 83 + * ide-cd always generates media changed event if media is missing, which 84 + * makes it impossible to use for proper event reporting, so disk->events 85 + * is cleared to 0 and the following function is used only to trigger 86 + * revalidation and never propagated to userland. 87 + */ 82 88 unsigned int ide_cdrom_check_events_real(struct cdrom_device_info *cdi, 83 89 unsigned int clearing, int slot_nr) 84 90 {
+6 -1
drivers/ide/ide-gd.c
··· 298 298 return 0; 299 299 } 300 300 301 + /* 302 + * The following is used to force revalidation on the first open on 303 + * removeable devices, and never gets reported to userland as 304 + * genhd->events is 0. This is intended as removeable ide disk 305 + * can't really detect MEDIA_CHANGE events. 306 + */ 301 307 ret = drive->dev_flags & IDE_DFLAG_MEDIA_CHANGED; 302 308 drive->dev_flags &= ~IDE_DFLAG_MEDIA_CHANGED; 303 309 ··· 419 413 if (drive->dev_flags & IDE_DFLAG_REMOVABLE) 420 414 g->flags = GENHD_FL_REMOVABLE; 421 415 g->fops = &ide_gd_ops; 422 - g->events = DISK_EVENT_MEDIA_CHANGE; 423 416 add_disk(g); 424 417 return 0; 425 418
+1 -1
drivers/infiniband/hw/qib/qib_iba6120.c
··· 1799 1799 /* 1800 1800 * Keep chip from being accessed until we are ready. Use 1801 1801 * writeq() directly, to allow the write even though QIB_PRESENT 1802 - * isn't' set. 1802 + * isn't set. 1803 1803 */ 1804 1804 dd->flags &= ~(QIB_INITTED | QIB_PRESENT); 1805 1805 dd->int_counter = 0; /* so we check interrupts work again */
+1 -1
drivers/infiniband/hw/qib/qib_iba7220.c
··· 2111 2111 /* 2112 2112 * Keep chip from being accessed until we are ready. Use 2113 2113 * writeq() directly, to allow the write even though QIB_PRESENT 2114 - * isn't' set. 2114 + * isn't set. 2115 2115 */ 2116 2116 dd->flags &= ~(QIB_INITTED | QIB_PRESENT); 2117 2117 dd->int_counter = 0; /* so we check interrupts work again */
+1 -1
drivers/infiniband/hw/qib/qib_iba7322.c
··· 3299 3299 /* 3300 3300 * Keep chip from being accessed until we are ready. Use 3301 3301 * writeq() directly, to allow the write even though QIB_PRESENT 3302 - * isn't' set. 3302 + * isn't set. 3303 3303 */ 3304 3304 dd->flags &= ~(QIB_INITTED | QIB_PRESENT | QIB_BADINTR); 3305 3305 dd->flags |= QIB_DOING_RESET;
+10 -3
drivers/input/touchscreen/ads7846.c
··· 281 281 u8 command; 282 282 u8 ref_off; 283 283 u16 scratch; 284 - __be16 sample; 285 284 struct spi_message msg; 286 285 struct spi_transfer xfer[6]; 286 + /* 287 + * DMA (thus cache coherency maintenance) requires the 288 + * transfer buffers to live in their own cache lines. 289 + */ 290 + __be16 sample ____cacheline_aligned; 287 291 }; 288 292 289 293 struct ads7845_ser_req { 290 294 u8 command[3]; 291 - u8 pwrdown[3]; 292 - u8 sample[3]; 293 295 struct spi_message msg; 294 296 struct spi_transfer xfer[2]; 297 + /* 298 + * DMA (thus cache coherency maintenance) requires the 299 + * transfer buffers to live in their own cache lines. 300 + */ 301 + u8 sample[3] ____cacheline_aligned; 295 302 }; 296 303 297 304 static int ads7846_read12_ser(struct device *dev, unsigned command)
+64 -11
drivers/input/touchscreen/wm831x-ts.c
··· 68 68 unsigned int pd_irq; 69 69 bool pressure; 70 70 bool pen_down; 71 + struct work_struct pd_data_work; 71 72 }; 73 + 74 + static void wm831x_pd_data_work(struct work_struct *work) 75 + { 76 + struct wm831x_ts *wm831x_ts = 77 + container_of(work, struct wm831x_ts, pd_data_work); 78 + 79 + if (wm831x_ts->pen_down) { 80 + enable_irq(wm831x_ts->data_irq); 81 + dev_dbg(wm831x_ts->wm831x->dev, "IRQ PD->DATA done\n"); 82 + } else { 83 + enable_irq(wm831x_ts->pd_irq); 84 + dev_dbg(wm831x_ts->wm831x->dev, "IRQ DATA->PD done\n"); 85 + } 86 + } 72 87 73 88 static irqreturn_t wm831x_ts_data_irq(int irq, void *irq_data) 74 89 { ··· 125 110 } 126 111 127 112 if (!wm831x_ts->pen_down) { 113 + /* Switch from data to pen down */ 114 + dev_dbg(wm831x->dev, "IRQ DATA->PD\n"); 115 + 128 116 disable_irq_nosync(wm831x_ts->data_irq); 129 117 130 118 /* Don't need data any more */ ··· 146 128 ABS_PRESSURE, 0); 147 129 148 130 input_report_key(wm831x_ts->input_dev, BTN_TOUCH, 0); 131 + 132 + schedule_work(&wm831x_ts->pd_data_work); 133 + } else { 134 + input_report_key(wm831x_ts->input_dev, BTN_TOUCH, 1); 149 135 } 150 136 151 137 input_sync(wm831x_ts->input_dev); ··· 163 141 struct wm831x *wm831x = wm831x_ts->wm831x; 164 142 int ena = 0; 165 143 144 + if (wm831x_ts->pen_down) 145 + return IRQ_HANDLED; 146 + 147 + disable_irq_nosync(wm831x_ts->pd_irq); 148 + 166 149 /* Start collecting data */ 167 150 if (wm831x_ts->pressure) 168 151 ena |= WM831X_TCH_Z_ENA; ··· 176 149 WM831X_TCH_X_ENA | WM831X_TCH_Y_ENA | WM831X_TCH_Z_ENA, 177 150 WM831X_TCH_X_ENA | WM831X_TCH_Y_ENA | ena); 178 151 179 - input_report_key(wm831x_ts->input_dev, BTN_TOUCH, 1); 180 - input_sync(wm831x_ts->input_dev); 181 - 182 152 wm831x_set_bits(wm831x, WM831X_INTERRUPT_STATUS_1, 183 153 WM831X_TCHPD_EINT, WM831X_TCHPD_EINT); 184 154 185 155 wm831x_ts->pen_down = true; 186 - enable_irq(wm831x_ts->data_irq); 156 + 157 + /* Switch from pen down to data */ 158 + dev_dbg(wm831x->dev, "IRQ PD->DATA\n"); 159 + schedule_work(&wm831x_ts->pd_data_work); 187 160 188 161 return IRQ_HANDLED; 189 162 } ··· 209 182 struct wm831x_ts *wm831x_ts = input_get_drvdata(idev); 210 183 struct wm831x *wm831x = wm831x_ts->wm831x; 211 184 185 + /* Shut the controller down, disabling all other functionality too */ 212 186 wm831x_set_bits(wm831x, WM831X_TOUCH_CONTROL_1, 213 - WM831X_TCH_ENA | WM831X_TCH_CVT_ENA | 214 - WM831X_TCH_X_ENA | WM831X_TCH_Y_ENA | 215 - WM831X_TCH_Z_ENA, 0); 187 + WM831X_TCH_ENA | WM831X_TCH_X_ENA | 188 + WM831X_TCH_Y_ENA | WM831X_TCH_Z_ENA, 0); 216 189 217 - if (wm831x_ts->pen_down) 190 + /* Make sure any pending IRQs are done, the above will prevent 191 + * new ones firing. 192 + */ 193 + synchronize_irq(wm831x_ts->data_irq); 194 + synchronize_irq(wm831x_ts->pd_irq); 195 + 196 + /* Make sure the IRQ completion work is quiesced */ 197 + flush_work_sync(&wm831x_ts->pd_data_work); 198 + 199 + /* If we ended up with the pen down then make sure we revert back 200 + * to pen detection state for the next time we start up. 201 + */ 202 + if (wm831x_ts->pen_down) { 218 203 disable_irq(wm831x_ts->data_irq); 204 + enable_irq(wm831x_ts->pd_irq); 205 + wm831x_ts->pen_down = false; 206 + } 219 207 } 220 208 221 209 static __devinit int wm831x_ts_probe(struct platform_device *pdev) ··· 240 198 struct wm831x_pdata *core_pdata = dev_get_platdata(pdev->dev.parent); 241 199 struct wm831x_touch_pdata *pdata = NULL; 242 200 struct input_dev *input_dev; 243 - int error; 201 + int error, irqf; 244 202 245 203 if (core_pdata) 246 204 pdata = core_pdata->touch; ··· 254 212 255 213 wm831x_ts->wm831x = wm831x; 256 214 wm831x_ts->input_dev = input_dev; 215 + INIT_WORK(&wm831x_ts->pd_data_work, wm831x_pd_data_work); 257 216 258 217 /* 259 218 * If we have a direct IRQ use it, otherwise use the interrupt ··· 313 270 wm831x_set_bits(wm831x, WM831X_TOUCH_CONTROL_1, 314 271 WM831X_TCH_RATE_MASK, 6); 315 272 273 + if (pdata && pdata->data_irqf) 274 + irqf = pdata->data_irqf; 275 + else 276 + irqf = IRQF_TRIGGER_HIGH; 277 + 316 278 error = request_threaded_irq(wm831x_ts->data_irq, 317 279 NULL, wm831x_ts_data_irq, 318 - IRQF_ONESHOT, 280 + irqf | IRQF_ONESHOT, 319 281 "Touchscreen data", wm831x_ts); 320 282 if (error) { 321 283 dev_err(&pdev->dev, "Failed to request data IRQ %d: %d\n", ··· 329 281 } 330 282 disable_irq(wm831x_ts->data_irq); 331 283 284 + if (pdata && pdata->pd_irqf) 285 + irqf = pdata->pd_irqf; 286 + else 287 + irqf = IRQF_TRIGGER_HIGH; 288 + 332 289 error = request_threaded_irq(wm831x_ts->pd_irq, 333 290 NULL, wm831x_ts_pen_down_irq, 334 - IRQF_ONESHOT, 291 + irqf | IRQF_ONESHOT, 335 292 "Touchscreen pen down", wm831x_ts); 336 293 if (error) { 337 294 dev_err(&pdev->dev, "Failed to request pen down IRQ %d: %d\n",
+1
drivers/leds/leds-lm3530.c
··· 349 349 {LM3530_NAME, 0}, 350 350 {} 351 351 }; 352 + MODULE_DEVICE_TABLE(i2c, lm3530_id); 352 353 353 354 static struct i2c_driver lm3530_i2c_driver = { 354 355 .probe = lm3530_probe,
+1
drivers/md/md.c
··· 3170 3170 mddev->layout = mddev->new_layout; 3171 3171 mddev->chunk_sectors = mddev->new_chunk_sectors; 3172 3172 mddev->delta_disks = 0; 3173 + mddev->degraded = 0; 3173 3174 if (mddev->pers->sync_request == NULL) { 3174 3175 /* this is now an array without redundancy, so 3175 3176 * it must always be in_sync
+4 -1
drivers/md/raid5.c
··· 5151 5151 5152 5152 mddev->queue->backing_dev_info.congested_data = mddev; 5153 5153 mddev->queue->backing_dev_info.congested_fn = raid5_congested; 5154 - mddev->queue->queue_lock = &conf->device_lock; 5155 5154 5156 5155 chunk_size = mddev->chunk_sectors << 9; 5157 5156 blk_queue_io_min(mddev->queue, chunk_size); ··· 5678 5679 static void *raid45_takeover_raid0(mddev_t *mddev, int level) 5679 5680 { 5680 5681 struct raid0_private_data *raid0_priv = mddev->private; 5682 + sector_t sectors; 5681 5683 5682 5684 /* for raid0 takeover only one zone is supported */ 5683 5685 if (raid0_priv->nr_strip_zones > 1) { ··· 5687 5687 return ERR_PTR(-EINVAL); 5688 5688 } 5689 5689 5690 + sectors = raid0_priv->strip_zone[0].zone_end; 5691 + sector_div(sectors, raid0_priv->strip_zone[0].nb_dev); 5692 + mddev->dev_sectors = sectors; 5690 5693 mddev->new_level = level; 5691 5694 mddev->new_layout = ALGORITHM_PARITY_N; 5692 5695 mddev->new_chunk_sectors = mddev->chunk_sectors;
+1 -10
drivers/media/common/tuners/tda18271-common.c
··· 533 533 if (tda_fail(ret)) 534 534 goto fail; 535 535 536 - regs[R_MPD] = (0x77 & pd); 537 - 538 - switch (priv->mode) { 539 - case TDA18271_ANALOG: 540 - regs[R_MPD] &= ~0x08; 541 - break; 542 - case TDA18271_DIGITAL: 543 - regs[R_MPD] |= 0x08; 544 - break; 545 - } 536 + regs[R_MPD] = (0x7f & pd); 546 537 547 538 div = ((d * (freq / 1000)) << 7) / 125; 548 539
+11 -10
drivers/media/common/tuners/tda18271-fe.c
··· 579 579 #define RF3 2 580 580 u32 rf_default[3]; 581 581 u32 rf_freq[3]; 582 - u8 prog_cal[3]; 583 - u8 prog_tab[3]; 582 + s32 prog_cal[3]; 583 + s32 prog_tab[3]; 584 584 585 585 i = tda18271_lookup_rf_band(fe, &freq, NULL); 586 586 ··· 602 602 return bcal; 603 603 604 604 tda18271_calc_rf_cal(fe, &rf_freq[rf]); 605 - prog_tab[rf] = regs[R_EB14]; 605 + prog_tab[rf] = (s32)regs[R_EB14]; 606 606 607 607 if (1 == bcal) 608 - prog_cal[rf] = tda18271_calibrate_rf(fe, rf_freq[rf]); 608 + prog_cal[rf] = 609 + (s32)tda18271_calibrate_rf(fe, rf_freq[rf]); 609 610 else 610 611 prog_cal[rf] = prog_tab[rf]; 611 612 612 613 switch (rf) { 613 614 case RF1: 614 615 map[i].rf_a1 = 0; 615 - map[i].rf_b1 = (s32)(prog_cal[RF1] - prog_tab[RF1]); 616 + map[i].rf_b1 = (prog_cal[RF1] - prog_tab[RF1]); 616 617 map[i].rf1 = rf_freq[RF1] / 1000; 617 618 break; 618 619 case RF2: 619 - dividend = (s32)(prog_cal[RF2] - prog_tab[RF2]) - 620 - (s32)(prog_cal[RF1] + prog_tab[RF1]); 620 + dividend = (prog_cal[RF2] - prog_tab[RF2] - 621 + prog_cal[RF1] + prog_tab[RF1]); 621 622 divisor = (s32)(rf_freq[RF2] - rf_freq[RF1]) / 1000; 622 623 map[i].rf_a1 = (dividend / divisor); 623 624 map[i].rf2 = rf_freq[RF2] / 1000; 624 625 break; 625 626 case RF3: 626 - dividend = (s32)(prog_cal[RF3] - prog_tab[RF3]) - 627 - (s32)(prog_cal[RF2] + prog_tab[RF2]); 627 + dividend = (prog_cal[RF3] - prog_tab[RF3] - 628 + prog_cal[RF2] + prog_tab[RF2]); 628 629 divisor = (s32)(rf_freq[RF3] - rf_freq[RF2]) / 1000; 629 630 map[i].rf_a2 = (dividend / divisor); 630 - map[i].rf_b2 = (s32)(prog_cal[RF2] - prog_tab[RF2]); 631 + map[i].rf_b2 = (prog_cal[RF2] - prog_tab[RF2]); 631 632 map[i].rf3 = rf_freq[RF3] / 1000; 632 633 break; 633 634 default:
+6 -6
drivers/media/common/tuners/tda18271-maps.c
··· 229 229 static struct tda18271_map tda18271_rf_band[] = { 230 230 { .rfmax = 47900, .val = 0x00 }, 231 231 { .rfmax = 61100, .val = 0x01 }, 232 - /* { .rfmax = 152600, .val = 0x02 }, */ 233 - { .rfmax = 121200, .val = 0x02 }, 232 + { .rfmax = 152600, .val = 0x02 }, 234 233 { .rfmax = 164700, .val = 0x03 }, 235 234 { .rfmax = 203500, .val = 0x04 }, 236 235 { .rfmax = 457800, .val = 0x05 }, ··· 447 448 { .rfmax = 150000, .val = 0xb0 }, 448 449 { .rfmax = 151000, .val = 0xb1 }, 449 450 { .rfmax = 152000, .val = 0xb7 }, 450 - { .rfmax = 153000, .val = 0xbd }, 451 + { .rfmax = 152600, .val = 0xbd }, 451 452 { .rfmax = 154000, .val = 0x20 }, 452 453 { .rfmax = 155000, .val = 0x22 }, 453 454 { .rfmax = 156000, .val = 0x24 }, ··· 458 459 { .rfmax = 161000, .val = 0x2d }, 459 460 { .rfmax = 163000, .val = 0x2e }, 460 461 { .rfmax = 164000, .val = 0x2f }, 461 - { .rfmax = 165000, .val = 0x30 }, 462 + { .rfmax = 164700, .val = 0x30 }, 462 463 { .rfmax = 166000, .val = 0x11 }, 463 464 { .rfmax = 167000, .val = 0x12 }, 464 465 { .rfmax = 168000, .val = 0x13 }, ··· 509 510 { .rfmax = 236000, .val = 0x1b }, 510 511 { .rfmax = 237000, .val = 0x1c }, 511 512 { .rfmax = 240000, .val = 0x1d }, 512 - { .rfmax = 242000, .val = 0x1f }, 513 + { .rfmax = 242000, .val = 0x1e }, 514 + { .rfmax = 244000, .val = 0x1f }, 513 515 { .rfmax = 247000, .val = 0x20 }, 514 516 { .rfmax = 249000, .val = 0x21 }, 515 517 { .rfmax = 252000, .val = 0x22 }, ··· 624 624 { .rfmax = 453000, .val = 0x93 }, 625 625 { .rfmax = 454000, .val = 0x94 }, 626 626 { .rfmax = 456000, .val = 0x96 }, 627 - { .rfmax = 457000, .val = 0x98 }, 627 + { .rfmax = 457800, .val = 0x98 }, 628 628 { .rfmax = 461000, .val = 0x11 }, 629 629 { .rfmax = 468000, .val = 0x12 }, 630 630 { .rfmax = 472000, .val = 0x13 },
+1 -1
drivers/media/dvb/b2c2/flexcop-pci.c
··· 38 38 DEBSTATUS); 39 39 40 40 #define DRIVER_VERSION "0.1" 41 - #define DRIVER_NAME "Technisat/B2C2 FlexCop II/IIb/III Digital TV PCI Driver" 41 + #define DRIVER_NAME "flexcop-pci" 42 42 #define DRIVER_AUTHOR "Patrick Boettcher <patrick.boettcher@desy.de>" 43 43 44 44 struct flexcop_pci {
+4 -2
drivers/media/dvb/dvb-usb/Kconfig
··· 356 356 select DVB_TDA826X if !DVB_FE_CUSTOMISE 357 357 select DVB_STV0288 if !DVB_FE_CUSTOMISE 358 358 select DVB_IX2505V if !DVB_FE_CUSTOMISE 359 + select DVB_STV0299 if !DVB_FE_CUSTOMISE 360 + select DVB_PLL if !DVB_FE_CUSTOMISE 359 361 help 360 362 Say Y here to support the LME DM04/QQBOX DVB-S USB2.0 . 361 363 362 364 config DVB_USB_TECHNISAT_USB2 363 365 tristate "Technisat DVB-S/S2 USB2.0 support" 364 366 depends on DVB_USB 365 - select DVB_STB0899 if !DVB_FE_CUSTOMISE 366 - select DVB_STB6100 if !DVB_FE_CUSTOMISE 367 + select DVB_STV090x if !DVB_FE_CUSTOMISE 368 + select DVB_STV6110x if !DVB_FE_CUSTOMISE 367 369 help 368 370 Say Y here to support the Technisat USB2 DVB-S/S2 device
+3 -3
drivers/media/dvb/dvb-usb/dib0700_devices.c
··· 2162 2162 .agc1_pt3 = 98, 2163 2163 .agc1_slope1 = 0, 2164 2164 .agc1_slope2 = 167, 2165 - .agc1_pt1 = 98, 2165 + .agc2_pt1 = 98, 2166 2166 .agc2_pt2 = 255, 2167 2167 .agc2_slope1 = 104, 2168 2168 .agc2_slope2 = 0, ··· 2440 2440 dib0700_set_i2c_speed(adap->dev, 340); 2441 2441 adap->fe = dvb_attach(dib7000p_attach, &adap->dev->i2c_adap, 0x90, &tfe7090pvr_dib7000p_config[0]); 2442 2442 2443 - dib7090_slave_reset(adap->fe); 2444 - 2445 2443 if (adap->fe == NULL) 2446 2444 return -ENODEV; 2445 + 2446 + dib7090_slave_reset(adap->fe); 2447 2447 2448 2448 return 0; 2449 2449 }
+1
drivers/media/dvb/ngene/ngene-core.c
··· 1520 1520 if (dev->ci.en && (io & NGENE_IO_TSOUT)) { 1521 1521 dvb_ca_en50221_init(adapter, dev->ci.en, 0, 1); 1522 1522 set_transfer(chan, 1); 1523 + chan->dev->channel[2].DataFormatFlags = DF_SWAP32; 1523 1524 set_transfer(&chan->dev->channel[2], 1); 1524 1525 dvb_register_device(adapter, &chan->ci_dev, 1525 1526 &ngene_dvbdev_ci, (void *) chan,
+6 -2
drivers/media/media-entity.c
··· 378 378 379 379 static int __media_entity_setup_link_notify(struct media_link *link, u32 flags) 380 380 { 381 - const u32 mask = MEDIA_LNK_FL_ENABLED; 382 381 int ret; 383 382 384 383 /* Notify both entities. */ ··· 394 395 return ret; 395 396 } 396 397 397 - link->flags = (link->flags & ~mask) | (flags & mask); 398 + link->flags = flags; 398 399 link->reverse->flags = link->flags; 399 400 400 401 return 0; ··· 416 417 */ 417 418 int __media_entity_setup_link(struct media_link *link, u32 flags) 418 419 { 420 + const u32 mask = MEDIA_LNK_FL_ENABLED; 419 421 struct media_device *mdev; 420 422 struct media_entity *source, *sink; 421 423 int ret = -EBUSY; 422 424 423 425 if (link == NULL) 426 + return -EINVAL; 427 + 428 + /* The non-modifiable link flags must not be modified. */ 429 + if ((link->flags & ~mask) != (flags & ~mask)) 424 430 return -EINVAL; 425 431 426 432 if (link->flags & MEDIA_LNK_FL_IMMUTABLE)
+1 -1
drivers/media/radio/radio-sf16fmr2.c
··· 170 170 return 0; 171 171 } 172 172 173 - /* !!! not tested, in my card this does't work !!! */ 173 + /* !!! not tested, in my card this doesn't work !!! */ 174 174 static int fmr2_setvolume(struct fmr2 *dev) 175 175 { 176 176 int vol[16] = { 0x021, 0x084, 0x090, 0x104,
+1 -1
drivers/media/radio/saa7706h.c
··· 376 376 v4l_info(client, "chip found @ 0x%02x (%s)\n", 377 377 client->addr << 1, client->adapter->name); 378 378 379 - state = kmalloc(sizeof(struct saa7706h_state), GFP_KERNEL); 379 + state = kzalloc(sizeof(struct saa7706h_state), GFP_KERNEL); 380 380 if (state == NULL) 381 381 return -ENOMEM; 382 382 sd = &state->sd;
+1 -1
drivers/media/radio/tef6862.c
··· 176 176 v4l_info(client, "chip found @ 0x%02x (%s)\n", 177 177 client->addr << 1, client->adapter->name); 178 178 179 - state = kmalloc(sizeof(struct tef6862_state), GFP_KERNEL); 179 + state = kzalloc(sizeof(struct tef6862_state), GFP_KERNEL); 180 180 if (state == NULL) 181 181 return -ENOMEM; 182 182 state->freq = TEF6862_LO_FREQ;
+27 -4
drivers/media/rc/imon.c
··· 46 46 #define MOD_AUTHOR "Jarod Wilson <jarod@wilsonet.com>" 47 47 #define MOD_DESC "Driver for SoundGraph iMON MultiMedia IR/Display" 48 48 #define MOD_NAME "imon" 49 - #define MOD_VERSION "0.9.2" 49 + #define MOD_VERSION "0.9.3" 50 50 51 51 #define DISPLAY_MINOR_BASE 144 52 52 #define DEVICE_NAME "lcd%d" ··· 460 460 } 461 461 462 462 /** 463 - * Sends a packet to the device -- this function must be called 464 - * with ictx->lock held. 463 + * Sends a packet to the device -- this function must be called with 464 + * ictx->lock held, or its unlock/lock sequence while waiting for tx 465 + * to complete can/will lead to a deadlock. 465 466 */ 466 467 static int send_packet(struct imon_context *ictx) 467 468 { ··· 992 991 * the iMON remotes, and those used by the Windows MCE remotes (which is 993 992 * really just RC-6), but only one or the other at a time, as the signals 994 993 * are decoded onboard the receiver. 994 + * 995 + * This function gets called two different ways, one way is from 996 + * rc_register_device, for initial protocol selection/setup, and the other is 997 + * via a userspace-initiated protocol change request, either by direct sysfs 998 + * prodding or by something like ir-keytable. In the rc_register_device case, 999 + * the imon context lock is already held, but when initiated from userspace, 1000 + * it is not, so we must acquire it prior to calling send_packet, which 1001 + * requires that the lock is held. 995 1002 */ 996 1003 static int imon_ir_change_protocol(struct rc_dev *rc, u64 rc_type) 997 1004 { 998 1005 int retval; 999 1006 struct imon_context *ictx = rc->priv; 1000 1007 struct device *dev = ictx->dev; 1008 + bool unlock = false; 1001 1009 unsigned char ir_proto_packet[] = { 1002 1010 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x86 }; 1003 1011 ··· 1039 1029 1040 1030 memcpy(ictx->usb_tx_buf, &ir_proto_packet, sizeof(ir_proto_packet)); 1041 1031 1032 + if (!mutex_is_locked(&ictx->lock)) { 1033 + unlock = true; 1034 + mutex_lock(&ictx->lock); 1035 + } 1036 + 1042 1037 retval = send_packet(ictx); 1043 1038 if (retval) 1044 1039 goto out; ··· 1052 1037 ictx->pad_mouse = false; 1053 1038 1054 1039 out: 1040 + if (unlock) 1041 + mutex_unlock(&ictx->lock); 1042 + 1055 1043 return retval; 1056 1044 } 1057 1045 ··· 2152 2134 goto rdev_setup_failed; 2153 2135 } 2154 2136 2137 + mutex_unlock(&ictx->lock); 2155 2138 return ictx; 2156 2139 2157 2140 rdev_setup_failed: ··· 2224 2205 goto urb_submit_failed; 2225 2206 } 2226 2207 2208 + mutex_unlock(&ictx->lock); 2227 2209 return ictx; 2228 2210 2229 2211 urb_submit_failed: ··· 2319 2299 usb_set_intfdata(interface, ictx); 2320 2300 2321 2301 if (ifnum == 0) { 2302 + mutex_lock(&ictx->lock); 2303 + 2322 2304 if (product == 0xffdc && ictx->rf_device) { 2323 2305 sysfs_err = sysfs_create_group(&interface->dev.kobj, 2324 2306 &imon_rf_attr_group); ··· 2331 2309 2332 2310 if (ictx->display_supported) 2333 2311 imon_init_display(ictx, interface); 2312 + 2313 + mutex_unlock(&ictx->lock); 2334 2314 } 2335 2315 2336 2316 dev_info(dev, "iMON device (%04x:%04x, intf%d) on " 2337 2317 "usb<%d:%d> initialized\n", vendor, product, ifnum, 2338 2318 usbdev->bus->busnum, usbdev->devnum); 2339 2319 2340 - mutex_unlock(&ictx->lock); 2341 2320 mutex_unlock(&driver_lock); 2342 2321 2343 2322 return 0;
+1
drivers/media/rc/ite-cir.c
··· 36 36 #include <linux/io.h> 37 37 #include <linux/interrupt.h> 38 38 #include <linux/sched.h> 39 + #include <linux/delay.h> 39 40 #include <linux/slab.h> 40 41 #include <linux/input.h> 41 42 #include <linux/bitops.h>
+2
drivers/media/rc/mceusb.c
··· 220 220 { USB_DEVICE(VENDOR_PHILIPS, 0x206c) }, 221 221 /* Philips/Spinel plus IR transceiver for ASUS */ 222 222 { USB_DEVICE(VENDOR_PHILIPS, 0x2088) }, 223 + /* Philips IR transceiver (Dell branded) */ 224 + { USB_DEVICE(VENDOR_PHILIPS, 0x2093) }, 223 225 /* Realtek MCE IR Receiver and card reader */ 224 226 { USB_DEVICE(VENDOR_REALTEK, 0x0161), 225 227 .driver_info = MULTIFUNCTION },
+3 -1
drivers/media/rc/rc-main.c
··· 707 707 { 708 708 struct rc_dev *rdev = input_get_drvdata(idev); 709 709 710 - rdev->close(rdev); 710 + if (rdev) 711 + rdev->close(rdev); 711 712 } 712 713 713 714 /* class for /sys/class/rc */ ··· 734 733 { RC_TYPE_SONY, "sony" }, 735 734 { RC_TYPE_RC5_SZ, "rc-5-sz" }, 736 735 { RC_TYPE_LIRC, "lirc" }, 736 + { RC_TYPE_OTHER, "other" }, 737 737 }; 738 738 739 739 #define PROTO_NONE "none"
+1 -1
drivers/media/video/Kconfig
··· 875 875 config VIDEO_MX3 876 876 tristate "i.MX3x Camera Sensor Interface driver" 877 877 depends on VIDEO_DEV && MX3_IPU && SOC_CAMERA 878 - select VIDEOBUF_DMA_CONTIG 878 + select VIDEOBUF2_DMA_CONTIG 879 879 select MX3_VIDEO 880 880 ---help--- 881 881 This is a v4l2 driver for the i.MX3x Camera Sensor Interface
+9 -1
drivers/media/video/cx18/cx18-streams.c
··· 350 350 351 351 /* No struct video_device, but can have buffers allocated */ 352 352 if (type == CX18_ENC_STREAM_TYPE_IDX) { 353 + /* If the module params didn't inhibit IDX ... */ 353 354 if (cx->stream_buffers[type] != 0) { 354 355 cx->stream_buffers[type] = 0; 355 - cx18_stream_free(&cx->streams[type]); 356 + /* 357 + * Before calling cx18_stream_free(), 358 + * check if the IDX stream was actually set up. 359 + * Needed, since the cx18_probe() error path 360 + * exits through here as well as normal clean up 361 + */ 362 + if (cx->streams[type].buffers != 0) 363 + cx18_stream_free(&cx->streams[type]); 356 364 } 357 365 continue; 358 366 }
+1
drivers/media/video/cx23885/Kconfig
··· 22 22 select DVB_CX24116 if !DVB_FE_CUSTOMISE 23 23 select DVB_STV0900 if !DVB_FE_CUSTOMISE 24 24 select DVB_DS3000 if !DVB_FE_CUSTOMISE 25 + select DVB_STV0367 if !DVB_FE_CUSTOMISE 25 26 select MEDIA_TUNER_MT2131 if !MEDIA_TUNER_CUSTOMISE 26 27 select MEDIA_TUNER_XC2028 if !MEDIA_TUNER_CUSTOMISE 27 28 select MEDIA_TUNER_TDA8290 if !MEDIA_TUNER_CUSTOMISE
+1 -1
drivers/media/video/cx88/cx88-input.c
··· 524 524 for (todo = 32; todo > 0; todo -= bits) { 525 525 ev.pulse = samples & 0x80000000 ? false : true; 526 526 bits = min(todo, 32U - fls(ev.pulse ? samples : ~samples)); 527 - ev.duration = (bits * NSEC_PER_SEC) / (1000 * ir_samplerate); 527 + ev.duration = (bits * (NSEC_PER_SEC / 1000)) / ir_samplerate; 528 528 ir_raw_event_store_with_filter(ir->dev, &ev); 529 529 samples <<= bits; 530 530 }
+1 -1
drivers/media/video/imx074.c
··· 298 298 static int imx074_set_bus_param(struct soc_camera_device *icd, 299 299 unsigned long flags) 300 300 { 301 - return -1; 301 + return -EINVAL; 302 302 } 303 303 304 304 static struct soc_camera_ops imx074_ops = {
+1 -1
drivers/media/video/m52790.c
··· 174 174 v4l_info(client, "chip found @ 0x%x (%s)\n", 175 175 client->addr << 1, client->adapter->name); 176 176 177 - state = kmalloc(sizeof(struct m52790_state), GFP_KERNEL); 177 + state = kzalloc(sizeof(struct m52790_state), GFP_KERNEL); 178 178 if (state == NULL) 179 179 return -ENOMEM; 180 180
+25 -9
drivers/media/video/omap3isp/isp.c
··· 215 215 } 216 216 217 217 switch (xclksel) { 218 - case 0: 218 + case ISP_XCLK_A: 219 219 isp_reg_clr_set(isp, OMAP3_ISP_IOMEM_MAIN, ISP_TCTRL_CTRL, 220 220 ISPTCTRL_CTRL_DIVA_MASK, 221 221 divisor << ISPTCTRL_CTRL_DIVA_SHIFT); 222 222 dev_dbg(isp->dev, "isp_set_xclk(): cam_xclka set to %d Hz\n", 223 223 currentxclk); 224 224 break; 225 - case 1: 225 + case ISP_XCLK_B: 226 226 isp_reg_clr_set(isp, OMAP3_ISP_IOMEM_MAIN, ISP_TCTRL_CTRL, 227 227 ISPTCTRL_CTRL_DIVB_MASK, 228 228 divisor << ISPTCTRL_CTRL_DIVB_SHIFT); 229 229 dev_dbg(isp->dev, "isp_set_xclk(): cam_xclkb set to %d Hz\n", 230 230 currentxclk); 231 231 break; 232 + case ISP_XCLK_NONE: 232 233 default: 233 234 omap3isp_put(isp); 234 235 dev_dbg(isp->dev, "ISP_ERR: isp_set_xclk(): Invalid requested " ··· 238 237 } 239 238 240 239 /* Do we go from stable whatever to clock? */ 241 - if (divisor >= 2 && isp->xclk_divisor[xclksel] < 2) 240 + if (divisor >= 2 && isp->xclk_divisor[xclksel - 1] < 2) 242 241 omap3isp_get(isp); 243 242 /* Stopping the clock. */ 244 - else if (divisor < 2 && isp->xclk_divisor[xclksel] >= 2) 243 + else if (divisor < 2 && isp->xclk_divisor[xclksel - 1] >= 2) 245 244 omap3isp_put(isp); 246 245 247 - isp->xclk_divisor[xclksel] = divisor; 246 + isp->xclk_divisor[xclksel - 1] = divisor; 248 247 249 248 omap3isp_put(isp); 250 249 ··· 286 285 */ 287 286 void omap3isp_configure_bridge(struct isp_device *isp, 288 287 enum ccdc_input_entity input, 289 - const struct isp_parallel_platform_data *pdata) 288 + const struct isp_parallel_platform_data *pdata, 289 + unsigned int shift) 290 290 { 291 291 u32 ispctrl_val; 292 292 ··· 300 298 switch (input) { 301 299 case CCDC_INPUT_PARALLEL: 302 300 ispctrl_val |= ISPCTRL_PAR_SER_CLK_SEL_PARALLEL; 303 - ispctrl_val |= pdata->data_lane_shift << ISPCTRL_SHIFT_SHIFT; 304 301 ispctrl_val |= pdata->clk_pol << ISPCTRL_PAR_CLK_POL_SHIFT; 305 302 ispctrl_val |= pdata->bridge << ISPCTRL_PAR_BRIDGE_SHIFT; 303 + shift += pdata->data_lane_shift * 2; 306 304 break; 307 305 308 306 case CCDC_INPUT_CSI2A: ··· 320 318 default: 321 319 return; 322 320 } 321 + 322 + ispctrl_val |= ((shift/2) << ISPCTRL_SHIFT_SHIFT) & ISPCTRL_SHIFT_MASK; 323 323 324 324 ispctrl_val &= ~ISPCTRL_SYNC_DETECT_MASK; 325 325 ispctrl_val |= ISPCTRL_SYNC_DETECT_VSRISE; ··· 662 658 663 659 /* Apply power change to connected non-nodes. */ 664 660 ret = isp_pipeline_pm_power(entity, change); 661 + if (ret < 0) 662 + entity->use_count -= change; 665 663 666 664 mutex_unlock(&entity->parent->graph_mutex); 667 665 ··· 878 872 } 879 873 } 880 874 875 + if (failure < 0) 876 + isp->needs_reset = true; 877 + 881 878 return failure; 882 879 } 883 880 ··· 893 884 * single-shot or continuous mode. 894 885 * 895 886 * Return 0 if successful, or the return value of the failed video::s_stream 896 - * operation otherwise. 887 + * operation otherwise. The pipeline state is not updated when the operation 888 + * fails, except when stopping the pipeline. 897 889 */ 898 890 int omap3isp_pipeline_set_stream(struct isp_pipeline *pipe, 899 891 enum isp_pipeline_stream_state state) ··· 905 895 ret = isp_pipeline_disable(pipe); 906 896 else 907 897 ret = isp_pipeline_enable(pipe, state); 908 - pipe->stream_state = state; 898 + 899 + if (ret == 0 || state == ISP_PIPELINE_STREAM_STOPPED) 900 + pipe->stream_state = state; 909 901 910 902 return ret; 911 903 } ··· 1493 1481 if (--isp->ref_count == 0) { 1494 1482 isp_disable_interrupts(isp); 1495 1483 isp_save_ctx(isp); 1484 + if (isp->needs_reset) { 1485 + isp_reset(isp); 1486 + isp->needs_reset = false; 1487 + } 1496 1488 isp_disable_clocks(isp); 1497 1489 } 1498 1490 mutex_unlock(&isp->isp_mutex);
+6 -6
drivers/media/video/omap3isp/isp.h
··· 132 132 133 133 /** 134 134 * struct isp_parallel_platform_data - Parallel interface platform data 135 - * @width: Parallel bus width in bits (8, 10, 11 or 12) 136 135 * @data_lane_shift: Data lane shifter 137 136 * 0 - CAMEXT[13:0] -> CAM[13:0] 138 137 * 1 - CAMEXT[13:2] -> CAM[11:0] ··· 145 146 * ISPCTRL_PAR_BRIDGE_BENDIAN - Big endian 146 147 */ 147 148 struct isp_parallel_platform_data { 148 - unsigned int width; 149 149 unsigned int data_lane_shift:2; 150 150 unsigned int clk_pol:1; 151 151 unsigned int bridge:4; ··· 260 262 /* ISP Obj */ 261 263 spinlock_t stat_lock; /* common lock for statistic drivers */ 262 264 struct mutex isp_mutex; /* For handling ref_count field */ 265 + bool needs_reset; 263 266 int has_context; 264 267 int ref_count; 265 268 unsigned int autoidle; ··· 310 311 enum isp_pipeline_stream_state state); 311 312 void omap3isp_configure_bridge(struct isp_device *isp, 312 313 enum ccdc_input_entity input, 313 - const struct isp_parallel_platform_data *pdata); 314 + const struct isp_parallel_platform_data *pdata, 315 + unsigned int shift); 314 316 315 - #define ISP_XCLK_NONE -1 316 - #define ISP_XCLK_A 0 317 - #define ISP_XCLK_B 1 317 + #define ISP_XCLK_NONE 0 318 + #define ISP_XCLK_A 1 319 + #define ISP_XCLK_B 2 318 320 319 321 struct isp_device *omap3isp_get(struct isp_device *isp); 320 322 void omap3isp_put(struct isp_device *isp);
+30 -7
drivers/media/video/omap3isp/ispccdc.c
··· 43 43 44 44 static const unsigned int ccdc_fmts[] = { 45 45 V4L2_MBUS_FMT_Y8_1X8, 46 + V4L2_MBUS_FMT_Y10_1X10, 47 + V4L2_MBUS_FMT_Y12_1X12, 48 + V4L2_MBUS_FMT_SGRBG8_1X8, 49 + V4L2_MBUS_FMT_SRGGB8_1X8, 50 + V4L2_MBUS_FMT_SBGGR8_1X8, 51 + V4L2_MBUS_FMT_SGBRG8_1X8, 46 52 V4L2_MBUS_FMT_SGRBG10_1X10, 47 53 V4L2_MBUS_FMT_SRGGB10_1X10, 48 54 V4L2_MBUS_FMT_SBGGR10_1X10, ··· 1116 1110 struct isp_parallel_platform_data *pdata = NULL; 1117 1111 struct v4l2_subdev *sensor; 1118 1112 struct v4l2_mbus_framefmt *format; 1113 + const struct isp_format_info *fmt_info; 1114 + struct v4l2_subdev_format fmt_src; 1115 + unsigned int depth_out; 1116 + unsigned int depth_in = 0; 1119 1117 struct media_pad *pad; 1120 1118 unsigned long flags; 1119 + unsigned int shift; 1121 1120 u32 syn_mode; 1122 1121 u32 ccdc_pattern; 1123 1122 1124 - if (ccdc->input == CCDC_INPUT_PARALLEL) { 1125 - pad = media_entity_remote_source(&ccdc->pads[CCDC_PAD_SINK]); 1126 - sensor = media_entity_to_v4l2_subdev(pad->entity); 1123 + pad = media_entity_remote_source(&ccdc->pads[CCDC_PAD_SINK]); 1124 + sensor = media_entity_to_v4l2_subdev(pad->entity); 1125 + if (ccdc->input == CCDC_INPUT_PARALLEL) 1127 1126 pdata = &((struct isp_v4l2_subdevs_group *)sensor->host_priv) 1128 1127 ->bus.parallel; 1128 + 1129 + /* Compute shift value for lane shifter to configure the bridge. */ 1130 + fmt_src.pad = pad->index; 1131 + fmt_src.which = V4L2_SUBDEV_FORMAT_ACTIVE; 1132 + if (!v4l2_subdev_call(sensor, pad, get_fmt, NULL, &fmt_src)) { 1133 + fmt_info = omap3isp_video_format_info(fmt_src.format.code); 1134 + depth_in = fmt_info->bpp; 1129 1135 } 1130 1136 1131 - omap3isp_configure_bridge(isp, ccdc->input, pdata); 1137 + fmt_info = omap3isp_video_format_info 1138 + (isp->isp_ccdc.formats[CCDC_PAD_SINK].code); 1139 + depth_out = fmt_info->bpp; 1132 1140 1133 - ccdc->syncif.datsz = pdata ? pdata->width : 10; 1141 + shift = depth_in - depth_out; 1142 + omap3isp_configure_bridge(isp, ccdc->input, pdata, shift); 1143 + 1144 + ccdc->syncif.datsz = depth_out; 1134 1145 ccdc_config_sync_if(ccdc, &ccdc->syncif); 1135 1146 1136 1147 /* CCDC_PAD_SINK */ ··· 1361 1338 * @ccdc: Pointer to ISP CCDC device. 1362 1339 * @event: Pointing which event trigger handler 1363 1340 * 1364 - * Return 1 when the event and stopping request combination is satisfyied, 1341 + * Return 1 when the event and stopping request combination is satisfied, 1365 1342 * zero otherwise. 1366 1343 */ 1367 1344 static int __ccdc_handle_stopping(struct isp_ccdc_device *ccdc, u32 event) ··· 1641 1618 1642 1619 ccdc_set_outaddr(ccdc, buffer->isp_addr); 1643 1620 1644 - /* We now have a buffer queued on the output, restart the pipeline in 1621 + /* We now have a buffer queued on the output, restart the pipeline 1645 1622 * on the next CCDC interrupt if running in continuous mode (or when 1646 1623 * starting the stream). 1647 1624 */
+1 -1
drivers/media/video/omap3isp/isppreview.c
··· 755 755 * @configs - pointer to update config structure. 756 756 * @config - return pointer to appropriate structure field. 757 757 * @bit - for which feature to return pointers. 758 - * Return size of coresponding prev_params member 758 + * Return size of corresponding prev_params member 759 759 */ 760 760 static u32 761 761 __preview_get_ptrs(struct prev_params *params, void **param,
+3 -3
drivers/media/video/omap3isp/ispqueue.c
··· 339 339 up_read(&current->mm->mmap_sem); 340 340 341 341 if (ret != buf->npages) { 342 - buf->npages = ret; 342 + buf->npages = ret < 0 ? 0 : ret; 343 343 isp_video_buffer_cleanup(buf); 344 344 return -EFAULT; 345 345 } ··· 408 408 * isp_video_buffer_prepare_vm_flags - Get VMA flags for a userspace address 409 409 * 410 410 * This function locates the VMAs for the buffer's userspace address and checks 411 - * that their flags match. The onlflag that we need to care for at the moment is 412 - * VM_PFNMAP. 411 + * that their flags match. The only flag that we need to care for at the moment 412 + * is VM_PFNMAP. 413 413 * 414 414 * The buffer vm_flags field is set to the first VMA flags. 415 415 *
+60 -15
drivers/media/video/omap3isp/ispresizer.c
··· 714 714 * iw and ih are the input width and height after cropping. Those equations need 715 715 * to be satisfied exactly for the resizer to work correctly. 716 716 * 717 - * Reverting the equations, we can compute the resizing ratios with 717 + * The equations can't be easily reverted, as the >> 8 operation is not linear. 718 + * In addition, not all input sizes can be achieved for a given output size. To 719 + * get the highest input size lower than or equal to the requested input size, 720 + * we need to compute the highest resizing ratio that satisfies the following 721 + * inequality (taking the 4-tap mode width equation as an example) 722 + * 723 + * iw >= (32 * sph + (ow - 1) * hrsz + 16) >> 8 - 7 724 + * 725 + * (where iw is the requested input width) which can be rewritten as 726 + * 727 + * iw - 7 >= (32 * sph + (ow - 1) * hrsz + 16) >> 8 728 + * (iw - 7) << 8 >= 32 * sph + (ow - 1) * hrsz + 16 - b 729 + * ((iw - 7) << 8) + b >= 32 * sph + (ow - 1) * hrsz + 16 730 + * 731 + * where b is the value of the 8 least significant bits of the right hand side 732 + * expression of the last inequality. The highest resizing ratio value will be 733 + * achieved when b is equal to its maximum value of 255. That resizing ratio 734 + * value will still satisfy the original inequality, as b will disappear when 735 + * the expression will be shifted right by 8. 736 + * 737 + * The reverted the equations thus become 718 738 * 719 739 * - 8-phase, 4-tap mode 720 - * hrsz = ((iw - 7) * 256 - 16 - 32 * sph) / (ow - 1) 721 - * vrsz = ((ih - 4) * 256 - 16 - 32 * spv) / (oh - 1) 740 + * hrsz = ((iw - 7) * 256 + 255 - 16 - 32 * sph) / (ow - 1) 741 + * vrsz = ((ih - 4) * 256 + 255 - 16 - 32 * spv) / (oh - 1) 722 742 * - 4-phase, 7-tap mode 723 - * hrsz = ((iw - 7) * 256 - 32 - 64 * sph) / (ow - 1) 724 - * vrsz = ((ih - 7) * 256 - 32 - 64 * spv) / (oh - 1) 743 + * hrsz = ((iw - 7) * 256 + 255 - 32 - 64 * sph) / (ow - 1) 744 + * vrsz = ((ih - 7) * 256 + 255 - 32 - 64 * spv) / (oh - 1) 725 745 * 726 - * The ratios are integer values, and must be rounded down to ensure that the 727 - * cropped input size is not bigger than the uncropped input size. As the ratio 728 - * in 7-tap mode is always smaller than the ratio in 4-tap mode, we can use the 729 - * 7-tap mode equations to compute a ratio approximation. 746 + * The ratios are integer values, and are rounded down to ensure that the 747 + * cropped input size is not bigger than the uncropped input size. 748 + * 749 + * As the number of phases/taps, used to select the correct equations to compute 750 + * the ratio, depends on the ratio, we start with the 4-tap mode equations to 751 + * compute an approximation of the ratio, and switch to the 7-tap mode equations 752 + * if the approximation is higher than the ratio threshold. 753 + * 754 + * As the 7-tap mode equations will return a ratio smaller than or equal to the 755 + * 4-tap mode equations, the resulting ratio could become lower than or equal to 756 + * the ratio threshold. This 'equations loop' isn't an issue as long as the 757 + * correct equations are used to compute the final input size. Starting with the 758 + * 4-tap mode equations ensure that, in case of values resulting in a 'ratio 759 + * loop', the smallest of the ratio values will be used, never exceeding the 760 + * requested input size. 730 761 * 731 762 * We first clamp the output size according to the hardware capabilitie to avoid 732 763 * auto-cropping the input more than required to satisfy the TRM equations. The ··· 806 775 unsigned int max_width; 807 776 unsigned int max_height; 808 777 unsigned int width_alignment; 778 + unsigned int width; 779 + unsigned int height; 809 780 810 781 /* 811 782 * Clamp the output height based on the hardware capabilities and ··· 819 786 max_height = min_t(unsigned int, max_height, MAX_OUT_HEIGHT); 820 787 output->height = clamp(output->height, min_height, max_height); 821 788 822 - ratio->vert = ((input->height - 7) * 256 - 32 - 64 * spv) 789 + ratio->vert = ((input->height - 4) * 256 + 255 - 16 - 32 * spv) 823 790 / (output->height - 1); 791 + if (ratio->vert > MID_RESIZE_VALUE) 792 + ratio->vert = ((input->height - 7) * 256 + 255 - 32 - 64 * spv) 793 + / (output->height - 1); 824 794 ratio->vert = clamp_t(unsigned int, ratio->vert, 825 795 MIN_RESIZE_VALUE, MAX_RESIZE_VALUE); 826 796 827 797 if (ratio->vert <= MID_RESIZE_VALUE) { 828 798 upscaled_height = (output->height - 1) * ratio->vert 829 799 + 32 * spv + 16; 830 - input->height = (upscaled_height >> 8) + 4; 800 + height = (upscaled_height >> 8) + 4; 831 801 } else { 832 802 upscaled_height = (output->height - 1) * ratio->vert 833 803 + 64 * spv + 32; 834 - input->height = (upscaled_height >> 8) + 7; 804 + height = (upscaled_height >> 8) + 7; 835 805 } 836 806 837 807 /* ··· 890 854 max_width & ~(width_alignment - 1)); 891 855 output->width = ALIGN(output->width, width_alignment); 892 856 893 - ratio->horz = ((input->width - 7) * 256 - 32 - 64 * sph) 857 + ratio->horz = ((input->width - 7) * 256 + 255 - 16 - 32 * sph) 894 858 / (output->width - 1); 859 + if (ratio->horz > MID_RESIZE_VALUE) 860 + ratio->horz = ((input->width - 7) * 256 + 255 - 32 - 64 * sph) 861 + / (output->width - 1); 895 862 ratio->horz = clamp_t(unsigned int, ratio->horz, 896 863 MIN_RESIZE_VALUE, MAX_RESIZE_VALUE); 897 864 898 865 if (ratio->horz <= MID_RESIZE_VALUE) { 899 866 upscaled_width = (output->width - 1) * ratio->horz 900 867 + 32 * sph + 16; 901 - input->width = (upscaled_width >> 8) + 7; 868 + width = (upscaled_width >> 8) + 7; 902 869 } else { 903 870 upscaled_width = (output->width - 1) * ratio->horz 904 871 + 64 * sph + 32; 905 - input->width = (upscaled_width >> 8) + 7; 872 + width = (upscaled_width >> 8) + 7; 906 873 } 874 + 875 + /* Center the new crop rectangle. */ 876 + input->left += (input->width - width) / 2; 877 + input->top += (input->height - height) / 2; 878 + input->width = width; 879 + input->height = height; 907 880 } 908 881 909 882 /*
+3 -3
drivers/media/video/omap3isp/ispstat.h
··· 131 131 struct ispstat_generic_config { 132 132 /* 133 133 * Fields must be in the same order as in: 134 - * - isph3a_aewb_config 135 - * - isph3a_af_config 136 - * - isphist_config 134 + * - omap3isp_h3a_aewb_config 135 + * - omap3isp_h3a_af_config 136 + * - omap3isp_hist_config 137 137 */ 138 138 u32 buf_size; 139 139 u16 config_counter;
+94 -14
drivers/media/video/omap3isp/ispvideo.c
··· 47 47 48 48 static struct isp_format_info formats[] = { 49 49 { V4L2_MBUS_FMT_Y8_1X8, V4L2_MBUS_FMT_Y8_1X8, 50 - V4L2_MBUS_FMT_Y8_1X8, V4L2_PIX_FMT_GREY, 8, }, 50 + V4L2_MBUS_FMT_Y8_1X8, V4L2_MBUS_FMT_Y8_1X8, 51 + V4L2_PIX_FMT_GREY, 8, }, 52 + { V4L2_MBUS_FMT_Y10_1X10, V4L2_MBUS_FMT_Y10_1X10, 53 + V4L2_MBUS_FMT_Y10_1X10, V4L2_MBUS_FMT_Y8_1X8, 54 + V4L2_PIX_FMT_Y10, 10, }, 55 + { V4L2_MBUS_FMT_Y12_1X12, V4L2_MBUS_FMT_Y10_1X10, 56 + V4L2_MBUS_FMT_Y12_1X12, V4L2_MBUS_FMT_Y8_1X8, 57 + V4L2_PIX_FMT_Y12, 12, }, 58 + { V4L2_MBUS_FMT_SBGGR8_1X8, V4L2_MBUS_FMT_SBGGR8_1X8, 59 + V4L2_MBUS_FMT_SBGGR8_1X8, V4L2_MBUS_FMT_SBGGR8_1X8, 60 + V4L2_PIX_FMT_SBGGR8, 8, }, 61 + { V4L2_MBUS_FMT_SGBRG8_1X8, V4L2_MBUS_FMT_SGBRG8_1X8, 62 + V4L2_MBUS_FMT_SGBRG8_1X8, V4L2_MBUS_FMT_SGBRG8_1X8, 63 + V4L2_PIX_FMT_SGBRG8, 8, }, 64 + { V4L2_MBUS_FMT_SGRBG8_1X8, V4L2_MBUS_FMT_SGRBG8_1X8, 65 + V4L2_MBUS_FMT_SGRBG8_1X8, V4L2_MBUS_FMT_SGRBG8_1X8, 66 + V4L2_PIX_FMT_SGRBG8, 8, }, 67 + { V4L2_MBUS_FMT_SRGGB8_1X8, V4L2_MBUS_FMT_SRGGB8_1X8, 68 + V4L2_MBUS_FMT_SRGGB8_1X8, V4L2_MBUS_FMT_SRGGB8_1X8, 69 + V4L2_PIX_FMT_SRGGB8, 8, }, 51 70 { V4L2_MBUS_FMT_SGRBG10_DPCM8_1X8, V4L2_MBUS_FMT_SGRBG10_DPCM8_1X8, 52 - V4L2_MBUS_FMT_SGRBG10_1X10, V4L2_PIX_FMT_SGRBG10DPCM8, 8, }, 71 + V4L2_MBUS_FMT_SGRBG10_1X10, 0, 72 + V4L2_PIX_FMT_SGRBG10DPCM8, 8, }, 53 73 { V4L2_MBUS_FMT_SBGGR10_1X10, V4L2_MBUS_FMT_SBGGR10_1X10, 54 - V4L2_MBUS_FMT_SBGGR10_1X10, V4L2_PIX_FMT_SBGGR10, 10, }, 74 + V4L2_MBUS_FMT_SBGGR10_1X10, V4L2_MBUS_FMT_SBGGR8_1X8, 75 + V4L2_PIX_FMT_SBGGR10, 10, }, 55 76 { V4L2_MBUS_FMT_SGBRG10_1X10, V4L2_MBUS_FMT_SGBRG10_1X10, 56 - V4L2_MBUS_FMT_SGBRG10_1X10, V4L2_PIX_FMT_SGBRG10, 10, }, 77 + V4L2_MBUS_FMT_SGBRG10_1X10, V4L2_MBUS_FMT_SGBRG8_1X8, 78 + V4L2_PIX_FMT_SGBRG10, 10, }, 57 79 { V4L2_MBUS_FMT_SGRBG10_1X10, V4L2_MBUS_FMT_SGRBG10_1X10, 58 - V4L2_MBUS_FMT_SGRBG10_1X10, V4L2_PIX_FMT_SGRBG10, 10, }, 80 + V4L2_MBUS_FMT_SGRBG10_1X10, V4L2_MBUS_FMT_SGRBG8_1X8, 81 + V4L2_PIX_FMT_SGRBG10, 10, }, 59 82 { V4L2_MBUS_FMT_SRGGB10_1X10, V4L2_MBUS_FMT_SRGGB10_1X10, 60 - V4L2_MBUS_FMT_SRGGB10_1X10, V4L2_PIX_FMT_SRGGB10, 10, }, 83 + V4L2_MBUS_FMT_SRGGB10_1X10, V4L2_MBUS_FMT_SRGGB8_1X8, 84 + V4L2_PIX_FMT_SRGGB10, 10, }, 61 85 { V4L2_MBUS_FMT_SBGGR12_1X12, V4L2_MBUS_FMT_SBGGR10_1X10, 62 - V4L2_MBUS_FMT_SBGGR12_1X12, V4L2_PIX_FMT_SBGGR12, 12, }, 86 + V4L2_MBUS_FMT_SBGGR12_1X12, V4L2_MBUS_FMT_SBGGR8_1X8, 87 + V4L2_PIX_FMT_SBGGR12, 12, }, 63 88 { V4L2_MBUS_FMT_SGBRG12_1X12, V4L2_MBUS_FMT_SGBRG10_1X10, 64 - V4L2_MBUS_FMT_SGBRG12_1X12, V4L2_PIX_FMT_SGBRG12, 12, }, 89 + V4L2_MBUS_FMT_SGBRG12_1X12, V4L2_MBUS_FMT_SGBRG8_1X8, 90 + V4L2_PIX_FMT_SGBRG12, 12, }, 65 91 { V4L2_MBUS_FMT_SGRBG12_1X12, V4L2_MBUS_FMT_SGRBG10_1X10, 66 - V4L2_MBUS_FMT_SGRBG12_1X12, V4L2_PIX_FMT_SGRBG12, 12, }, 92 + V4L2_MBUS_FMT_SGRBG12_1X12, V4L2_MBUS_FMT_SGRBG8_1X8, 93 + V4L2_PIX_FMT_SGRBG12, 12, }, 67 94 { V4L2_MBUS_FMT_SRGGB12_1X12, V4L2_MBUS_FMT_SRGGB10_1X10, 68 - V4L2_MBUS_FMT_SRGGB12_1X12, V4L2_PIX_FMT_SRGGB12, 12, }, 95 + V4L2_MBUS_FMT_SRGGB12_1X12, V4L2_MBUS_FMT_SRGGB8_1X8, 96 + V4L2_PIX_FMT_SRGGB12, 12, }, 69 97 { V4L2_MBUS_FMT_UYVY8_1X16, V4L2_MBUS_FMT_UYVY8_1X16, 70 - V4L2_MBUS_FMT_UYVY8_1X16, V4L2_PIX_FMT_UYVY, 16, }, 98 + V4L2_MBUS_FMT_UYVY8_1X16, 0, 99 + V4L2_PIX_FMT_UYVY, 16, }, 71 100 { V4L2_MBUS_FMT_YUYV8_1X16, V4L2_MBUS_FMT_YUYV8_1X16, 72 - V4L2_MBUS_FMT_YUYV8_1X16, V4L2_PIX_FMT_YUYV, 16, }, 101 + V4L2_MBUS_FMT_YUYV8_1X16, 0, 102 + V4L2_PIX_FMT_YUYV, 16, }, 73 103 }; 74 104 75 105 const struct isp_format_info * ··· 113 83 } 114 84 115 85 return NULL; 86 + } 87 + 88 + /* 89 + * Decide whether desired output pixel code can be obtained with 90 + * the lane shifter by shifting the input pixel code. 91 + * @in: input pixelcode to shifter 92 + * @out: output pixelcode from shifter 93 + * @additional_shift: # of bits the sensor's LSB is offset from CAMEXT[0] 94 + * 95 + * return true if the combination is possible 96 + * return false otherwise 97 + */ 98 + static bool isp_video_is_shiftable(enum v4l2_mbus_pixelcode in, 99 + enum v4l2_mbus_pixelcode out, 100 + unsigned int additional_shift) 101 + { 102 + const struct isp_format_info *in_info, *out_info; 103 + 104 + if (in == out) 105 + return true; 106 + 107 + in_info = omap3isp_video_format_info(in); 108 + out_info = omap3isp_video_format_info(out); 109 + 110 + if ((in_info->flavor == 0) || (out_info->flavor == 0)) 111 + return false; 112 + 113 + if (in_info->flavor != out_info->flavor) 114 + return false; 115 + 116 + return in_info->bpp - out_info->bpp + additional_shift <= 6; 116 117 } 117 118 118 119 /* ··· 296 235 return -EPIPE; 297 236 298 237 while (1) { 238 + unsigned int shifter_link; 299 239 /* Retrieve the sink format */ 300 240 pad = &subdev->entity.pads[0]; 301 241 if (!(pad->flags & MEDIA_PAD_FL_SINK)) ··· 325 263 return -ENOSPC; 326 264 } 327 265 266 + /* If sink pad is on CCDC, the link has the lane shifter 267 + * in the middle of it. */ 268 + shifter_link = subdev == &isp->isp_ccdc.subdev; 269 + 328 270 /* Retrieve the source format */ 329 271 pad = media_entity_remote_source(pad); 330 272 if (pad == NULL || ··· 344 278 return -EPIPE; 345 279 346 280 /* Check if the two ends match */ 347 - if (fmt_source.format.code != fmt_sink.format.code || 348 - fmt_source.format.width != fmt_sink.format.width || 281 + if (fmt_source.format.width != fmt_sink.format.width || 349 282 fmt_source.format.height != fmt_sink.format.height) 283 + return -EPIPE; 284 + 285 + if (shifter_link) { 286 + unsigned int parallel_shift = 0; 287 + if (isp->isp_ccdc.input == CCDC_INPUT_PARALLEL) { 288 + struct isp_parallel_platform_data *pdata = 289 + &((struct isp_v4l2_subdevs_group *) 290 + subdev->host_priv)->bus.parallel; 291 + parallel_shift = pdata->data_lane_shift * 2; 292 + } 293 + if (!isp_video_is_shiftable(fmt_source.format.code, 294 + fmt_sink.format.code, 295 + parallel_shift)) 296 + return -EPIPE; 297 + } else if (fmt_source.format.code != fmt_sink.format.code) 350 298 return -EPIPE; 351 299 } 352 300
+3
drivers/media/video/omap3isp/ispvideo.h
··· 49 49 * bits. Identical to @code if the format is 10 bits wide or less. 50 50 * @uncompressed: V4L2 media bus format code for the corresponding uncompressed 51 51 * format. Identical to @code if the format is not DPCM compressed. 52 + * @flavor: V4L2 media bus format code for the same pixel layout but 53 + * shifted to be 8 bits per pixel. =0 if format is not shiftable. 52 54 * @pixelformat: V4L2 pixel format FCC identifier 53 55 * @bpp: Bits per pixel 54 56 */ ··· 58 56 enum v4l2_mbus_pixelcode code; 59 57 enum v4l2_mbus_pixelcode truncated; 60 58 enum v4l2_mbus_pixelcode uncompressed; 59 + enum v4l2_mbus_pixelcode flavor; 61 60 u32 pixelformat; 62 61 unsigned int bpp; 63 62 };
+5 -3
drivers/media/video/s5p-fimc/fimc-capture.c
··· 527 527 if (ret) 528 528 return ret; 529 529 530 - if (vb2_is_streaming(&fimc->vid_cap.vbq) || fimc_capture_active(fimc)) 530 + if (vb2_is_busy(&fimc->vid_cap.vbq) || fimc_capture_active(fimc)) 531 531 return -EBUSY; 532 532 533 533 frame = &ctx->d_frame; ··· 539 539 return -EINVAL; 540 540 } 541 541 542 - for (i = 0; i < frame->fmt->colplanes; i++) 543 - frame->payload[i] = pix->plane_fmt[i].bytesperline * pix->height; 542 + for (i = 0; i < frame->fmt->colplanes; i++) { 543 + frame->payload[i] = 544 + (pix->width * pix->height * frame->fmt->depth[i]) >> 3; 545 + } 544 546 545 547 /* Output DMA frame pixel size and offsets. */ 546 548 frame->f_width = pix->plane_fmt[0].bytesperline * 8
+47 -27
drivers/media/video/s5p-fimc/fimc-core.c
··· 361 361 { 362 362 struct fimc_vid_cap *cap = &fimc->vid_cap; 363 363 struct fimc_vid_buffer *v_buf; 364 + struct timeval *tv; 365 + struct timespec ts; 364 366 365 367 if (!list_empty(&cap->active_buf_q) && 366 368 test_bit(ST_CAPT_RUN, &fimc->state)) { 369 + ktime_get_real_ts(&ts); 370 + 367 371 v_buf = active_queue_pop(cap); 372 + 373 + tv = &v_buf->vb.v4l2_buf.timestamp; 374 + tv->tv_sec = ts.tv_sec; 375 + tv->tv_usec = ts.tv_nsec / NSEC_PER_USEC; 376 + v_buf->vb.v4l2_buf.sequence = cap->frame_count++; 377 + 368 378 vb2_buffer_done(&v_buf->vb, VB2_BUF_STATE_DONE); 369 379 } 370 380 ··· 768 758 mutex_unlock(&ctx->fimc_dev->lock); 769 759 } 770 760 771 - struct vb2_ops fimc_qops = { 761 + static struct vb2_ops fimc_qops = { 772 762 .queue_setup = fimc_queue_setup, 773 763 .buf_prepare = fimc_buf_prepare, 774 764 .buf_queue = fimc_buf_queue, ··· 937 927 pix->num_planes = fmt->memplanes; 938 928 pix->colorspace = V4L2_COLORSPACE_JPEG; 939 929 930 + 940 931 for (i = 0; i < pix->num_planes; ++i) { 941 - int bpl = pix->plane_fmt[i].bytesperline; 932 + u32 bpl = pix->plane_fmt[i].bytesperline; 933 + u32 *sizeimage = &pix->plane_fmt[i].sizeimage; 942 934 943 - dbg("[%d] bpl: %d, depth: %d, w: %d, h: %d", 944 - i, bpl, fmt->depth[i], pix->width, pix->height); 935 + if (fmt->colplanes > 1 && (bpl == 0 || bpl < pix->width)) 936 + bpl = pix->width; /* Planar */ 945 937 946 - if (!bpl || (bpl * 8 / fmt->depth[i]) > pix->width) 947 - bpl = (pix->width * fmt->depth[0]) >> 3; 938 + if (fmt->colplanes == 1 && /* Packed */ 939 + (bpl == 0 || ((bpl * 8) / fmt->depth[i]) < pix->width)) 940 + bpl = (pix->width * fmt->depth[0]) / 8; 948 941 949 - if (!pix->plane_fmt[i].sizeimage) 950 - pix->plane_fmt[i].sizeimage = pix->height * bpl; 942 + if (i == 0) /* Same bytesperline for each plane. */ 943 + mod_x = bpl; 951 944 952 - pix->plane_fmt[i].bytesperline = bpl; 953 - 954 - dbg("[%d]: bpl: %d, sizeimage: %d", 955 - i, pix->plane_fmt[i].bytesperline, 956 - pix->plane_fmt[i].sizeimage); 945 + pix->plane_fmt[i].bytesperline = mod_x; 946 + *sizeimage = (pix->width * pix->height * fmt->depth[i]) / 8; 957 947 } 958 948 959 949 return 0; ··· 975 965 976 966 vq = v4l2_m2m_get_vq(ctx->m2m_ctx, f->type); 977 967 978 - if (vb2_is_streaming(vq)) { 968 + if (vb2_is_busy(vq)) { 979 969 v4l2_err(&fimc->m2m.v4l2_dev, "queue (%d) busy\n", f->type); 980 970 return -EBUSY; 981 971 } ··· 995 985 if (!frame->fmt) 996 986 return -EINVAL; 997 987 998 - for (i = 0; i < frame->fmt->colplanes; i++) 999 - frame->payload[i] = pix->plane_fmt[i].bytesperline * pix->height; 988 + for (i = 0; i < frame->fmt->colplanes; i++) { 989 + frame->payload[i] = 990 + (pix->width * pix->height * frame->fmt->depth[i]) / 8; 991 + } 1000 992 1001 993 frame->f_width = pix->plane_fmt[0].bytesperline * 8 / 1002 994 frame->fmt->depth[0]; ··· 1762 1750 } 1763 1751 1764 1752 /* Image pixel limits, similar across several FIMC HW revisions. */ 1765 - static struct fimc_pix_limit s5p_pix_limit[3] = { 1753 + static struct fimc_pix_limit s5p_pix_limit[4] = { 1766 1754 [0] = { 1767 1755 .scaler_en_w = 3264, 1768 1756 .scaler_dis_w = 8192, ··· 1785 1773 .in_rot_en_h = 1280, 1786 1774 .in_rot_dis_w = 8192, 1787 1775 .out_rot_en_w = 1280, 1776 + .out_rot_dis_w = 1920, 1777 + }, 1778 + [3] = { 1779 + .scaler_en_w = 1920, 1780 + .scaler_dis_w = 8192, 1781 + .in_rot_en_h = 1366, 1782 + .in_rot_dis_w = 8192, 1783 + .out_rot_en_w = 1366, 1788 1784 .out_rot_dis_w = 1920, 1789 1785 }, 1790 1786 }; ··· 1847 1827 .pix_limit = &s5p_pix_limit[2], 1848 1828 }; 1849 1829 1850 - static struct samsung_fimc_variant fimc0_variant_s5pv310 = { 1830 + static struct samsung_fimc_variant fimc0_variant_exynos4 = { 1851 1831 .pix_hoff = 1, 1852 1832 .has_inp_rot = 1, 1853 1833 .has_out_rot = 1, ··· 1860 1840 .pix_limit = &s5p_pix_limit[1], 1861 1841 }; 1862 1842 1863 - static struct samsung_fimc_variant fimc2_variant_s5pv310 = { 1843 + static struct samsung_fimc_variant fimc2_variant_exynos4 = { 1864 1844 .pix_hoff = 1, 1865 1845 .has_cistatus2 = 1, 1866 1846 .has_mainscaler_ext = 1, ··· 1868 1848 .min_out_pixsize = 16, 1869 1849 .hor_offs_align = 1, 1870 1850 .out_buf_count = 32, 1871 - .pix_limit = &s5p_pix_limit[2], 1851 + .pix_limit = &s5p_pix_limit[3], 1872 1852 }; 1873 1853 1874 1854 /* S5PC100 */ ··· 1894 1874 }; 1895 1875 1896 1876 /* S5PV310, S5PC210 */ 1897 - static struct samsung_fimc_driverdata fimc_drvdata_s5pv310 = { 1877 + static struct samsung_fimc_driverdata fimc_drvdata_exynos4 = { 1898 1878 .variant = { 1899 - [0] = &fimc0_variant_s5pv310, 1900 - [1] = &fimc0_variant_s5pv310, 1901 - [2] = &fimc0_variant_s5pv310, 1902 - [3] = &fimc2_variant_s5pv310, 1879 + [0] = &fimc0_variant_exynos4, 1880 + [1] = &fimc0_variant_exynos4, 1881 + [2] = &fimc0_variant_exynos4, 1882 + [3] = &fimc2_variant_exynos4, 1903 1883 }, 1904 1884 .num_entities = 4, 1905 1885 .lclk_frequency = 166000000UL, ··· 1913 1893 .name = "s5pv210-fimc", 1914 1894 .driver_data = (unsigned long)&fimc_drvdata_s5pv210, 1915 1895 }, { 1916 - .name = "s5pv310-fimc", 1917 - .driver_data = (unsigned long)&fimc_drvdata_s5pv310, 1896 + .name = "exynos4-fimc", 1897 + .driver_data = (unsigned long)&fimc_drvdata_exynos4, 1918 1898 }, 1919 1899 {}, 1920 1900 };
+5 -5
drivers/media/video/sh_mobile_ceu_camera.c
··· 922 922 /* Try 2560x1920, 1280x960, 640x480, 320x240 */ 923 923 mf.width = 2560 >> shift; 924 924 mf.height = 1920 >> shift; 925 - ret = v4l2_device_call_until_err(sd->v4l2_dev, 0, video, 925 + ret = v4l2_device_call_until_err(sd->v4l2_dev, (long)icd, video, 926 926 s_mbus_fmt, &mf); 927 927 if (ret < 0) 928 928 return ret; ··· 1224 1224 struct v4l2_cropcap cap; 1225 1225 int ret; 1226 1226 1227 - ret = v4l2_device_call_until_err(sd->v4l2_dev, 0, video, 1227 + ret = v4l2_device_call_until_err(sd->v4l2_dev, (long)icd, video, 1228 1228 s_mbus_fmt, mf); 1229 1229 if (ret < 0) 1230 1230 return ret; ··· 1254 1254 tmp_h = min(2 * tmp_h, max_height); 1255 1255 mf->width = tmp_w; 1256 1256 mf->height = tmp_h; 1257 - ret = v4l2_device_call_until_err(sd->v4l2_dev, 0, video, 1257 + ret = v4l2_device_call_until_err(sd->v4l2_dev, (long)icd, video, 1258 1258 s_mbus_fmt, mf); 1259 1259 dev_geo(dev, "Camera scaled to %ux%u\n", 1260 1260 mf->width, mf->height); ··· 1658 1658 mf.code = xlate->code; 1659 1659 mf.colorspace = pix->colorspace; 1660 1660 1661 - ret = v4l2_device_call_until_err(sd->v4l2_dev, 0, video, try_mbus_fmt, &mf); 1661 + ret = v4l2_device_call_until_err(sd->v4l2_dev, (long)icd, video, try_mbus_fmt, &mf); 1662 1662 if (ret < 0) 1663 1663 return ret; 1664 1664 ··· 1682 1682 */ 1683 1683 mf.width = 2560; 1684 1684 mf.height = 1920; 1685 - ret = v4l2_device_call_until_err(sd->v4l2_dev, 0, video, 1685 + ret = v4l2_device_call_until_err(sd->v4l2_dev, (long)icd, video, 1686 1686 try_mbus_fmt, &mf); 1687 1687 if (ret < 0) { 1688 1688 /* Shouldn't actually happen... */
+9 -2
drivers/media/video/sh_mobile_csi2.c
··· 38 38 void __iomem *base; 39 39 struct platform_device *pdev; 40 40 struct sh_csi2_client_config *client; 41 + unsigned long (*query_bus_param)(struct soc_camera_device *); 42 + int (*set_bus_param)(struct soc_camera_device *, unsigned long); 41 43 }; 42 44 43 45 static int sh_csi2_try_fmt(struct v4l2_subdev *sd, ··· 210 208 case BUS_NOTIFY_BOUND_DRIVER: 211 209 snprintf(priv->subdev.name, V4L2_SUBDEV_NAME_SIZE, "%s%s", 212 210 dev_name(v4l2_dev->dev), ".mipi-csi"); 211 + priv->subdev.grp_id = (long)icd; 213 212 ret = v4l2_device_register_subdev(v4l2_dev, &priv->subdev); 214 213 dev_dbg(dev, "%s(%p): ret(register_subdev) = %d\n", __func__, priv, ret); 215 214 if (ret < 0) ··· 218 215 219 216 priv->client = pdata->clients + i; 220 217 218 + priv->set_bus_param = icd->ops->set_bus_param; 219 + priv->query_bus_param = icd->ops->query_bus_param; 221 220 icd->ops->set_bus_param = sh_csi2_set_bus_param; 222 221 icd->ops->query_bus_param = sh_csi2_query_bus_param; 223 222 ··· 231 226 priv->client = NULL; 232 227 233 228 /* Driver is about to be unbound */ 234 - icd->ops->set_bus_param = NULL; 235 - icd->ops->query_bus_param = NULL; 229 + icd->ops->set_bus_param = priv->set_bus_param; 230 + icd->ops->query_bus_param = priv->query_bus_param; 231 + priv->set_bus_param = NULL; 232 + priv->query_bus_param = NULL; 236 233 237 234 v4l2_device_unregister_subdev(&priv->subdev); 238 235
+47 -8
drivers/media/video/soc_camera.c
··· 136 136 } 137 137 EXPORT_SYMBOL(soc_camera_apply_sensor_flags); 138 138 139 + #define pixfmtstr(x) (x) & 0xff, ((x) >> 8) & 0xff, ((x) >> 16) & 0xff, \ 140 + ((x) >> 24) & 0xff 141 + 142 + static int soc_camera_try_fmt(struct soc_camera_device *icd, 143 + struct v4l2_format *f) 144 + { 145 + struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); 146 + struct v4l2_pix_format *pix = &f->fmt.pix; 147 + int ret; 148 + 149 + dev_dbg(&icd->dev, "TRY_FMT(%c%c%c%c, %ux%u)\n", 150 + pixfmtstr(pix->pixelformat), pix->width, pix->height); 151 + 152 + pix->bytesperline = 0; 153 + pix->sizeimage = 0; 154 + 155 + ret = ici->ops->try_fmt(icd, f); 156 + if (ret < 0) 157 + return ret; 158 + 159 + if (!pix->sizeimage) { 160 + if (!pix->bytesperline) { 161 + const struct soc_camera_format_xlate *xlate; 162 + 163 + xlate = soc_camera_xlate_by_fourcc(icd, pix->pixelformat); 164 + if (!xlate) 165 + return -EINVAL; 166 + 167 + ret = soc_mbus_bytes_per_line(pix->width, 168 + xlate->host_fmt); 169 + if (ret > 0) 170 + pix->bytesperline = ret; 171 + } 172 + if (pix->bytesperline) 173 + pix->sizeimage = pix->bytesperline * pix->height; 174 + } 175 + 176 + return 0; 177 + } 178 + 139 179 static int soc_camera_try_fmt_vid_cap(struct file *file, void *priv, 140 180 struct v4l2_format *f) 141 181 { 142 182 struct soc_camera_device *icd = file->private_data; 143 - struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); 144 183 145 184 WARN_ON(priv != file->private_data); 146 185 ··· 188 149 return -EINVAL; 189 150 190 151 /* limit format to hardware capabilities */ 191 - return ici->ops->try_fmt(icd, f); 152 + return soc_camera_try_fmt(icd, f); 192 153 } 193 154 194 155 static int soc_camera_enum_input(struct file *file, void *priv, ··· 401 362 icd->user_formats = NULL; 402 363 } 403 364 404 - #define pixfmtstr(x) (x) & 0xff, ((x) >> 8) & 0xff, ((x) >> 16) & 0xff, \ 405 - ((x) >> 24) & 0xff 406 - 407 365 /* Called with .vb_lock held, or from the first open(2), see comment there */ 408 366 static int soc_camera_set_fmt(struct soc_camera_device *icd, 409 367 struct v4l2_format *f) ··· 413 377 pixfmtstr(pix->pixelformat), pix->width, pix->height); 414 378 415 379 /* We always call try_fmt() before set_fmt() or set_crop() */ 416 - ret = ici->ops->try_fmt(icd, f); 380 + ret = soc_camera_try_fmt(icd, f); 417 381 if (ret < 0) 418 382 return ret; 419 383 ··· 1032 996 { 1033 997 struct i2c_client *client = 1034 998 to_i2c_client(to_soc_camera_control(icd)); 999 + struct i2c_adapter *adap = client->adapter; 1035 1000 dev_set_drvdata(&icd->dev, NULL); 1036 1001 v4l2_device_unregister_subdev(i2c_get_clientdata(client)); 1037 1002 i2c_unregister_device(client); 1038 - i2c_put_adapter(client->adapter); 1003 + i2c_put_adapter(adap); 1039 1004 } 1040 1005 #else 1041 1006 #define soc_camera_init_i2c(icd, icl) (-ENODEV) ··· 1108 1071 } 1109 1072 } 1110 1073 1074 + sd = soc_camera_to_subdev(icd); 1075 + sd->grp_id = (long)icd; 1076 + 1111 1077 /* At this point client .probe() should have run already */ 1112 1078 ret = soc_camera_init_user_formats(icd); 1113 1079 if (ret < 0) ··· 1132 1092 goto evidstart; 1133 1093 1134 1094 /* Try to improve our guess of a reasonable window format */ 1135 - sd = soc_camera_to_subdev(icd); 1136 1095 if (!v4l2_subdev_call(sd, video, g_mbus_fmt, &mf)) { 1137 1096 icd->user_width = mf.width; 1138 1097 icd->user_height = mf.height;
+1 -1
drivers/media/video/tda9840.c
··· 171 171 v4l_info(client, "chip found @ 0x%x (%s)\n", 172 172 client->addr << 1, client->adapter->name); 173 173 174 - sd = kmalloc(sizeof(struct v4l2_subdev), GFP_KERNEL); 174 + sd = kzalloc(sizeof(struct v4l2_subdev), GFP_KERNEL); 175 175 if (sd == NULL) 176 176 return -ENOMEM; 177 177 v4l2_i2c_subdev_init(sd, client, &tda9840_ops);
+1 -1
drivers/media/video/tea6415c.c
··· 152 152 153 153 v4l_info(client, "chip found @ 0x%x (%s)\n", 154 154 client->addr << 1, client->adapter->name); 155 - sd = kmalloc(sizeof(struct v4l2_subdev), GFP_KERNEL); 155 + sd = kzalloc(sizeof(struct v4l2_subdev), GFP_KERNEL); 156 156 if (sd == NULL) 157 157 return -ENOMEM; 158 158 v4l2_i2c_subdev_init(sd, client, &tea6415c_ops);
+1 -1
drivers/media/video/tea6420.c
··· 125 125 v4l_info(client, "chip found @ 0x%x (%s)\n", 126 126 client->addr << 1, client->adapter->name); 127 127 128 - sd = kmalloc(sizeof(struct v4l2_subdev), GFP_KERNEL); 128 + sd = kzalloc(sizeof(struct v4l2_subdev), GFP_KERNEL); 129 129 if (sd == NULL) 130 130 return -ENOMEM; 131 131 v4l2_i2c_subdev_init(sd, client, &tea6420_ops);
+1 -1
drivers/media/video/upd64031a.c
··· 230 230 v4l_info(client, "chip found @ 0x%x (%s)\n", 231 231 client->addr << 1, client->adapter->name); 232 232 233 - state = kmalloc(sizeof(struct upd64031a_state), GFP_KERNEL); 233 + state = kzalloc(sizeof(struct upd64031a_state), GFP_KERNEL); 234 234 if (state == NULL) 235 235 return -ENOMEM; 236 236 sd = &state->sd;
+1 -1
drivers/media/video/upd64083.c
··· 202 202 v4l_info(client, "chip found @ 0x%x (%s)\n", 203 203 client->addr << 1, client->adapter->name); 204 204 205 - state = kmalloc(sizeof(struct upd64083_state), GFP_KERNEL); 205 + state = kzalloc(sizeof(struct upd64083_state), GFP_KERNEL); 206 206 if (state == NULL) 207 207 return -ENOMEM; 208 208 sd = &state->sd;
+10 -5
drivers/media/video/v4l2-dev.c
··· 389 389 video_get(vdev); 390 390 mutex_unlock(&videodev_lock); 391 391 #if defined(CONFIG_MEDIA_CONTROLLER) 392 - if (vdev->v4l2_dev && vdev->v4l2_dev->mdev) { 392 + if (vdev->v4l2_dev && vdev->v4l2_dev->mdev && 393 + vdev->vfl_type != VFL_TYPE_SUBDEV) { 393 394 entity = media_entity_get(&vdev->entity); 394 395 if (!entity) { 395 396 ret = -EBUSY; ··· 416 415 /* decrease the refcount in case of an error */ 417 416 if (ret) { 418 417 #if defined(CONFIG_MEDIA_CONTROLLER) 419 - if (vdev->v4l2_dev && vdev->v4l2_dev->mdev) 418 + if (vdev->v4l2_dev && vdev->v4l2_dev->mdev && 419 + vdev->vfl_type != VFL_TYPE_SUBDEV) 420 420 media_entity_put(entity); 421 421 #endif 422 422 video_put(vdev); ··· 439 437 mutex_unlock(vdev->lock); 440 438 } 441 439 #if defined(CONFIG_MEDIA_CONTROLLER) 442 - if (vdev->v4l2_dev && vdev->v4l2_dev->mdev) 440 + if (vdev->v4l2_dev && vdev->v4l2_dev->mdev && 441 + vdev->vfl_type != VFL_TYPE_SUBDEV) 443 442 media_entity_put(&vdev->entity); 444 443 #endif 445 444 /* decrease the refcount unconditionally since the release() ··· 689 686 690 687 #if defined(CONFIG_MEDIA_CONTROLLER) 691 688 /* Part 5: Register the entity. */ 692 - if (vdev->v4l2_dev && vdev->v4l2_dev->mdev) { 689 + if (vdev->v4l2_dev && vdev->v4l2_dev->mdev && 690 + vdev->vfl_type != VFL_TYPE_SUBDEV) { 693 691 vdev->entity.type = MEDIA_ENT_T_DEVNODE_V4L; 694 692 vdev->entity.name = vdev->name; 695 693 vdev->entity.v4l.major = VIDEO_MAJOR; ··· 737 733 return; 738 734 739 735 #if defined(CONFIG_MEDIA_CONTROLLER) 740 - if (vdev->v4l2_dev && vdev->v4l2_dev->mdev) 736 + if (vdev->v4l2_dev && vdev->v4l2_dev->mdev && 737 + vdev->vfl_type != VFL_TYPE_SUBDEV) 741 738 media_device_unregister_entity(&vdev->entity); 742 739 #endif 743 740
+4 -1
drivers/media/video/v4l2-device.c
··· 155 155 sd->v4l2_dev = v4l2_dev; 156 156 if (sd->internal_ops && sd->internal_ops->registered) { 157 157 err = sd->internal_ops->registered(sd); 158 - if (err) 158 + if (err) { 159 + module_put(sd->owner); 159 160 return err; 161 + } 160 162 } 161 163 162 164 /* This just returns 0 if either of the two args is NULL */ ··· 166 164 if (err) { 167 165 if (sd->internal_ops && sd->internal_ops->unregistered) 168 166 sd->internal_ops->unregistered(sd); 167 + module_put(sd->owner); 169 168 return err; 170 169 } 171 170
+7 -7
drivers/media/video/v4l2-subdev.c
··· 155 155 156 156 switch (cmd) { 157 157 case VIDIOC_QUERYCTRL: 158 - return v4l2_subdev_queryctrl(sd, arg); 158 + return v4l2_queryctrl(sd->ctrl_handler, arg); 159 159 160 160 case VIDIOC_QUERYMENU: 161 - return v4l2_subdev_querymenu(sd, arg); 161 + return v4l2_querymenu(sd->ctrl_handler, arg); 162 162 163 163 case VIDIOC_G_CTRL: 164 - return v4l2_subdev_g_ctrl(sd, arg); 164 + return v4l2_g_ctrl(sd->ctrl_handler, arg); 165 165 166 166 case VIDIOC_S_CTRL: 167 - return v4l2_subdev_s_ctrl(sd, arg); 167 + return v4l2_s_ctrl(sd->ctrl_handler, arg); 168 168 169 169 case VIDIOC_G_EXT_CTRLS: 170 - return v4l2_subdev_g_ext_ctrls(sd, arg); 170 + return v4l2_g_ext_ctrls(sd->ctrl_handler, arg); 171 171 172 172 case VIDIOC_S_EXT_CTRLS: 173 - return v4l2_subdev_s_ext_ctrls(sd, arg); 173 + return v4l2_s_ext_ctrls(sd->ctrl_handler, arg); 174 174 175 175 case VIDIOC_TRY_EXT_CTRLS: 176 - return v4l2_subdev_try_ext_ctrls(sd, arg); 176 + return v4l2_try_ext_ctrls(sd->ctrl_handler, arg); 177 177 178 178 case VIDIOC_DQEVENT: 179 179 if (!(sd->flags & V4L2_SUBDEV_FL_HAS_EVENTS))
+1 -1
drivers/media/video/videobuf-dma-contig.c
··· 300 300 301 301 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 302 302 retval = remap_pfn_range(vma, vma->vm_start, 303 - PFN_DOWN(virt_to_phys(mem->vaddr)), 303 + mem->dma_handle >> PAGE_SHIFT, 304 304 size, vma->vm_page_prot); 305 305 if (retval) { 306 306 dev_err(q->dev, "mmap: remap failed with error %d. ", retval);
+12 -5
drivers/media/video/videobuf2-core.c
··· 37 37 #define call_qop(q, op, args...) \ 38 38 (((q)->ops->op) ? ((q)->ops->op(args)) : 0) 39 39 40 + #define V4L2_BUFFER_STATE_FLAGS (V4L2_BUF_FLAG_MAPPED | V4L2_BUF_FLAG_QUEUED | \ 41 + V4L2_BUF_FLAG_DONE | V4L2_BUF_FLAG_ERROR) 42 + 40 43 /** 41 44 * __vb2_buf_mem_alloc() - allocate video memory for the given buffer 42 45 */ ··· 54 51 for (plane = 0; plane < vb->num_planes; ++plane) { 55 52 mem_priv = call_memop(q, plane, alloc, q->alloc_ctx[plane], 56 53 plane_sizes[plane]); 57 - if (!mem_priv) 54 + if (IS_ERR_OR_NULL(mem_priv)) 58 55 goto free; 59 56 60 57 /* Associate allocator private data with this plane */ ··· 287 284 struct vb2_queue *q = vb->vb2_queue; 288 285 int ret = 0; 289 286 290 - /* Copy back data such as timestamp, input, etc. */ 287 + /* Copy back data such as timestamp, flags, input, etc. */ 291 288 memcpy(b, &vb->v4l2_buf, offsetof(struct v4l2_buffer, m)); 292 289 b->input = vb->v4l2_buf.input; 293 290 b->reserved = vb->v4l2_buf.reserved; ··· 316 313 b->m.userptr = vb->v4l2_planes[0].m.userptr; 317 314 } 318 315 319 - b->flags = 0; 316 + /* 317 + * Clear any buffer state related flags. 318 + */ 319 + b->flags &= ~V4L2_BUFFER_STATE_FLAGS; 320 320 321 321 switch (vb->state) { 322 322 case VB2_BUF_STATE_QUEUED: ··· 525 519 num_buffers = min_t(unsigned int, req->count, VIDEO_MAX_FRAME); 526 520 memset(plane_sizes, 0, sizeof(plane_sizes)); 527 521 memset(q->alloc_ctx, 0, sizeof(q->alloc_ctx)); 522 + q->memory = req->memory; 528 523 529 524 /* 530 525 * Ask the driver how many buffers and planes per buffer it requires. ··· 566 559 */ 567 560 ret = num_buffers; 568 561 } 569 - 570 - q->memory = req->memory; 571 562 572 563 /* 573 564 * Return the number of successfully allocated buffers ··· 720 715 721 716 vb->v4l2_buf.field = b->field; 722 717 vb->v4l2_buf.timestamp = b->timestamp; 718 + vb->v4l2_buf.input = b->input; 719 + vb->v4l2_buf.flags = b->flags & ~V4L2_BUFFER_STATE_FLAGS; 723 720 724 721 return 0; 725 722 }
+1 -1
drivers/media/video/videobuf2-dma-contig.c
··· 46 46 GFP_KERNEL); 47 47 if (!buf->vaddr) { 48 48 dev_err(conf->dev, "dma_alloc_coherent of size %ld failed\n", 49 - buf->size); 49 + size); 50 50 kfree(buf); 51 51 return ERR_PTR(-ENOMEM); 52 52 }
-1
drivers/message/i2o/i2o_block.c
··· 1000 1000 gd->major = I2O_MAJOR; 1001 1001 gd->queue = queue; 1002 1002 gd->fops = &i2o_block_fops; 1003 - gd->events = DISK_EVENT_MEDIA_CHANGE; 1004 1003 gd->private_data = dev; 1005 1004 1006 1005 dev->gd = gd;
+1 -1
drivers/mfd/asic3.c
··· 144 144 int iter, i; 145 145 unsigned long flags; 146 146 147 - data->chip->irq_ack(irq_data); 147 + data->chip->irq_ack(data); 148 148 149 149 for (iter = 0 ; iter < MAX_ASIC_ISR_LOOPS; iter++) { 150 150 u32 status;
+7 -10
drivers/mfd/omap-usb-host.c
··· 25 25 #include <linux/dma-mapping.h> 26 26 #include <linux/spinlock.h> 27 27 #include <linux/gpio.h> 28 - #include <linux/regulator/consumer.h> 29 28 #include <plat/usb.h> 30 29 31 30 #define USBHS_DRIVER_NAME "usbhs-omap" ··· 699 700 dev_dbg(dev, "starting TI HSUSB Controller\n"); 700 701 if (!pdata) { 701 702 dev_dbg(dev, "missing platform_data\n"); 702 - ret = -ENODEV; 703 - goto end_enable; 703 + return -ENODEV; 704 704 } 705 705 706 706 spin_lock_irqsave(&omap->lock, flags); ··· 717 719 gpio_request(pdata->ehci_data->reset_gpio_port[0], 718 720 "USB1 PHY reset"); 719 721 gpio_direction_output 720 - (pdata->ehci_data->reset_gpio_port[0], 1); 722 + (pdata->ehci_data->reset_gpio_port[0], 0); 721 723 } 722 724 723 725 if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[1])) { 724 726 gpio_request(pdata->ehci_data->reset_gpio_port[1], 725 727 "USB2 PHY reset"); 726 728 gpio_direction_output 727 - (pdata->ehci_data->reset_gpio_port[1], 1); 729 + (pdata->ehci_data->reset_gpio_port[1], 0); 728 730 } 729 731 730 732 /* Hold the PHY in RESET for enough time till DIR is high */ ··· 904 906 905 907 if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[0])) 906 908 gpio_set_value 907 - (pdata->ehci_data->reset_gpio_port[0], 0); 909 + (pdata->ehci_data->reset_gpio_port[0], 1); 908 910 909 911 if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[1])) 910 912 gpio_set_value 911 - (pdata->ehci_data->reset_gpio_port[1], 0); 913 + (pdata->ehci_data->reset_gpio_port[1], 1); 912 914 } 913 915 914 916 end_count: 915 917 omap->count++; 916 - goto end_enable; 918 + spin_unlock_irqrestore(&omap->lock, flags); 919 + return 0; 917 920 918 921 err_tll: 919 922 if (pdata->ehci_data->phy_reset) { ··· 930 931 clk_disable(omap->usbhost_fs_fck); 931 932 clk_disable(omap->usbhost_hs_fck); 932 933 clk_disable(omap->usbhost_ick); 933 - 934 - end_enable: 935 934 spin_unlock_irqrestore(&omap->lock, flags); 936 935 return ret; 937 936 }
+2 -1
drivers/mfd/twl4030-power.c
··· 447 447 if (err) 448 448 goto out; 449 449 } 450 - if (tscript->flags & TWL4030_SLEEP_SCRIPT) 450 + if (tscript->flags & TWL4030_SLEEP_SCRIPT) { 451 451 if (order) 452 452 pr_warning("TWL4030: Bad order of scripts (sleep "\ 453 453 "script before wakeup) Leads to boot"\ 454 454 "failure on some boards\n"); 455 455 err = twl4030_config_sleep_sequence(address); 456 + } 456 457 out: 457 458 return err; 458 459 }
+1
drivers/mmc/core/bus.c
··· 284 284 type = "SD-combo"; 285 285 if (mmc_card_blockaddr(card)) 286 286 type = "SDHC-combo"; 287 + break; 287 288 default: 288 289 type = "?"; 289 290 break;
+1 -1
drivers/mmc/host/omap.c
··· 832 832 return IRQ_HANDLED; 833 833 } 834 834 835 - if (end_command) 835 + if (end_command && host->cmd) 836 836 mmc_omap_cmd_done(host, host->cmd); 837 837 if (host->data != NULL) { 838 838 if (transfer_error)
+5 -2
drivers/mmc/host/sdhci-of-core.c
··· 124 124 #endif 125 125 } 126 126 127 + static const struct of_device_id sdhci_of_match[]; 127 128 static int __devinit sdhci_of_probe(struct platform_device *ofdev) 128 129 { 130 + const struct of_device_id *match; 129 131 struct device_node *np = ofdev->dev.of_node; 130 132 struct sdhci_of_data *sdhci_of_data; 131 133 struct sdhci_host *host; ··· 136 134 int size; 137 135 int ret; 138 136 139 - if (!ofdev->dev.of_match) 137 + match = of_match_device(sdhci_of_match, &ofdev->dev); 138 + if (!match) 140 139 return -EINVAL; 141 - sdhci_of_data = ofdev->dev.of_match->data; 140 + sdhci_of_data = match->data; 142 141 143 142 if (!of_device_is_available(np)) 144 143 return -ENODEV;
+1
drivers/mmc/host/sdhci-pci.c
··· 957 957 host->ioaddr = pci_ioremap_bar(pdev, bar); 958 958 if (!host->ioaddr) { 959 959 dev_err(&pdev->dev, "failed to remap registers\n"); 960 + ret = -ENOMEM; 960 961 goto release; 961 962 } 962 963
+8 -1
drivers/mmc/host/sdhci.c
··· 1334 1334 1335 1335 host = (struct sdhci_host*)param; 1336 1336 1337 + /* 1338 + * If this tasklet gets rescheduled while running, it will 1339 + * be run again afterwards but without any active request. 1340 + */ 1341 + if (!host->mrq) 1342 + return; 1343 + 1337 1344 spin_lock_irqsave(&host->lock, flags); 1338 1345 1339 1346 del_timer(&host->timer); ··· 1352 1345 * upon error conditions. 1353 1346 */ 1354 1347 if (!(host->flags & SDHCI_DEVICE_DEAD) && 1355 - (mrq->cmd->error || 1348 + ((mrq->cmd && mrq->cmd->error) || 1356 1349 (mrq->data && (mrq->data->error || 1357 1350 (mrq->data->stop && mrq->data->stop->error))) || 1358 1351 (host->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST))) {
+5 -5
drivers/mmc/host/tmio_mmc_pio.c
··· 728 728 tmio_mmc_set_clock(host, ios->clock); 729 729 730 730 /* Power sequence - OFF -> UP -> ON */ 731 - if (ios->power_mode == MMC_POWER_OFF || !ios->clock) { 731 + if (ios->power_mode == MMC_POWER_UP) { 732 + /* power up SD bus */ 733 + if (host->set_pwr) 734 + host->set_pwr(host->pdev, 1); 735 + } else if (ios->power_mode == MMC_POWER_OFF || !ios->clock) { 732 736 /* power down SD bus */ 733 737 if (ios->power_mode == MMC_POWER_OFF && host->set_pwr) 734 738 host->set_pwr(host->pdev, 0); 735 739 tmio_mmc_clk_stop(host); 736 - } else if (ios->power_mode == MMC_POWER_UP) { 737 - /* power up SD bus */ 738 - if (host->set_pwr) 739 - host->set_pwr(host->pdev, 1); 740 740 } else { 741 741 /* start bus clock */ 742 742 tmio_mmc_clk_start(host);
+5 -2
drivers/mtd/maps/physmap_of.c
··· 214 214 } 215 215 #endif 216 216 217 + static struct of_device_id of_flash_match[]; 217 218 static int __devinit of_flash_probe(struct platform_device *dev) 218 219 { 219 220 #ifdef CONFIG_MTD_PARTITIONS 220 221 const char **part_probe_types; 221 222 #endif 223 + const struct of_device_id *match; 222 224 struct device_node *dp = dev->dev.of_node; 223 225 struct resource res; 224 226 struct of_flash *info; ··· 234 232 struct mtd_info **mtd_list = NULL; 235 233 resource_size_t res_size; 236 234 237 - if (!dev->dev.of_match) 235 + match = of_match_device(of_flash_match, &dev->dev); 236 + if (!match) 238 237 return -EINVAL; 239 - probe_type = dev->dev.of_match->data; 238 + probe_type = match->data; 240 239 241 240 reg_tuple_size = (of_n_addr_cells(dp) + of_n_size_cells(dp)) * sizeof(u32); 242 241
+1 -1
drivers/mtd/nand/diskonchip.c
··· 400 400 doc200x_hwcontrol(mtd, 0, NAND_CTRL_ALE | NAND_CTRL_CHANGE); 401 401 doc200x_hwcontrol(mtd, NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE); 402 402 403 - /* We can't' use dev_ready here, but at least we wait for the 403 + /* We can't use dev_ready here, but at least we wait for the 404 404 * command to complete 405 405 */ 406 406 udelay(50);
+7 -1
drivers/net/Kconfig
··· 2536 2536 source "drivers/net/stmmac/Kconfig" 2537 2537 2538 2538 config PCH_GBE 2539 - tristate "PCH Gigabit Ethernet" 2539 + tristate "Intel EG20T PCH / OKI SEMICONDUCTOR ML7223 IOH GbE" 2540 2540 depends on PCI 2541 2541 select MII 2542 2542 ---help--- ··· 2547 2547 Using this interface, it is able to access system devices connected 2548 2548 to Gigabit Ethernet. 2549 2549 This driver enables Gigabit Ethernet function. 2550 + 2551 + This driver also can be used for OKI SEMICONDUCTOR IOH(Input/ 2552 + Output Hub), ML7223. 2553 + ML7223 IOH is for MP(Media Phone) use. 2554 + ML7223 is companion chip for Intel Atom E6xx series. 2555 + ML7223 is completely compatible for Intel EG20T PCH. 2550 2556 2551 2557 endif # NETDEV_1000 2552 2558
+3 -3
drivers/net/Makefile
··· 144 144 obj-$(CONFIG_SB1250_MAC) += sb1250-mac.o 145 145 obj-$(CONFIG_B44) += b44.o 146 146 obj-$(CONFIG_FORCEDETH) += forcedeth.o 147 - obj-$(CONFIG_NE_H8300) += ne-h8300.o 8390.o 147 + obj-$(CONFIG_NE_H8300) += ne-h8300.o 148 148 obj-$(CONFIG_AX88796) += ax88796.o 149 149 obj-$(CONFIG_BCM63XX_ENET) += bcm63xx_enet.o 150 150 obj-$(CONFIG_FTMAC100) += ftmac100.o ··· 219 219 obj-$(CONFIG_LP486E) += lp486e.o 220 220 221 221 obj-$(CONFIG_ETH16I) += eth16i.o 222 - obj-$(CONFIG_ZORRO8390) += zorro8390.o 8390.o 222 + obj-$(CONFIG_ZORRO8390) += zorro8390.o 223 223 obj-$(CONFIG_HPLANCE) += hplance.o 7990.o 224 224 obj-$(CONFIG_MVME147_NET) += mvme147.o 7990.o 225 225 obj-$(CONFIG_EQUALIZER) += eql.o ··· 231 231 obj-$(CONFIG_DECLANCE) += declance.o 232 232 obj-$(CONFIG_ATARILANCE) += atarilance.o 233 233 obj-$(CONFIG_A2065) += a2065.o 234 - obj-$(CONFIG_HYDRA) += hydra.o 8390.o 234 + obj-$(CONFIG_HYDRA) += hydra.o 235 235 obj-$(CONFIG_ARIADNE) += ariadne.o 236 236 obj-$(CONFIG_CS89x0) += cs89x0.o 237 237 obj-$(CONFIG_MACSONIC) += macsonic.o
+1 -1
drivers/net/amd8111e.c
··· 106 106 MODULE_LICENSE("GPL"); 107 107 MODULE_DEVICE_TABLE(pci, amd8111e_pci_tbl); 108 108 module_param_array(speed_duplex, int, NULL, 0); 109 - MODULE_PARM_DESC(speed_duplex, "Set device speed and duplex modes, 0: Auto Negotitate, 1: 10Mbps Half Duplex, 2: 10Mbps Full Duplex, 3: 100Mbps Half Duplex, 4: 100Mbps Full Duplex"); 109 + MODULE_PARM_DESC(speed_duplex, "Set device speed and duplex modes, 0: Auto Negotiate, 1: 10Mbps Half Duplex, 2: 10Mbps Full Duplex, 3: 100Mbps Half Duplex, 4: 100Mbps Full Duplex"); 110 110 module_param_array(coalesce, bool, NULL, 0); 111 111 MODULE_PARM_DESC(coalesce, "Enable or Disable interrupt coalescing, 1: Enable, 0: Disable"); 112 112 module_param_array(dynamic_ipg, bool, NULL, 0);
+2 -2
drivers/net/arm/etherh.c
··· 527 527 * Read the ethernet address string from the on board rom. 528 528 * This is an ascii string... 529 529 */ 530 - static int __init etherh_addr(char *addr, struct expansion_card *ec) 530 + static int __devinit etherh_addr(char *addr, struct expansion_card *ec) 531 531 { 532 532 struct in_chunk_dir cd; 533 533 char *s; ··· 655 655 static u32 etherh_regoffsets[16]; 656 656 static u32 etherm_regoffsets[16]; 657 657 658 - static int __init 658 + static int __devinit 659 659 etherh_probe(struct expansion_card *ec, const struct ecard_id *id) 660 660 { 661 661 const struct etherh_data *data = id->data;
+3 -3
drivers/net/atl1c/atl1c.h
··· 566 566 #define __AT_TESTING 0x0001 567 567 #define __AT_RESETTING 0x0002 568 568 #define __AT_DOWN 0x0003 569 - u8 work_event; 570 - #define ATL1C_WORK_EVENT_RESET 0x01 571 - #define ATL1C_WORK_EVENT_LINK_CHANGE 0x02 569 + unsigned long work_event; 570 + #define ATL1C_WORK_EVENT_RESET 0 571 + #define ATL1C_WORK_EVENT_LINK_CHANGE 1 572 572 u32 msg_enable; 573 573 574 574 bool have_msi;
+5 -9
drivers/net/atl1c/atl1c_main.c
··· 325 325 } 326 326 } 327 327 328 - adapter->work_event |= ATL1C_WORK_EVENT_LINK_CHANGE; 328 + set_bit(ATL1C_WORK_EVENT_LINK_CHANGE, &adapter->work_event); 329 329 schedule_work(&adapter->common_task); 330 330 } 331 331 ··· 337 337 adapter = container_of(work, struct atl1c_adapter, common_task); 338 338 netdev = adapter->netdev; 339 339 340 - if (adapter->work_event & ATL1C_WORK_EVENT_RESET) { 341 - adapter->work_event &= ~ATL1C_WORK_EVENT_RESET; 340 + if (test_and_clear_bit(ATL1C_WORK_EVENT_RESET, &adapter->work_event)) { 342 341 netif_device_detach(netdev); 343 342 atl1c_down(adapter); 344 343 atl1c_up(adapter); 345 344 netif_device_attach(netdev); 346 - return; 347 345 } 348 346 349 - if (adapter->work_event & ATL1C_WORK_EVENT_LINK_CHANGE) { 350 - adapter->work_event &= ~ATL1C_WORK_EVENT_LINK_CHANGE; 347 + if (test_and_clear_bit(ATL1C_WORK_EVENT_LINK_CHANGE, 348 + &adapter->work_event)) 351 349 atl1c_check_link_status(adapter); 352 - } 353 - return; 354 350 } 355 351 356 352 ··· 365 369 struct atl1c_adapter *adapter = netdev_priv(netdev); 366 370 367 371 /* Do the reset outside of interrupt context */ 368 - adapter->work_event |= ATL1C_WORK_EVENT_RESET; 372 + set_bit(ATL1C_WORK_EVENT_RESET, &adapter->work_event); 369 373 schedule_work(&adapter->common_task); 370 374 } 371 375
+1 -1
drivers/net/benet/be.h
··· 213 213 214 214 struct be_rx_compl_info { 215 215 u32 rss_hash; 216 - u16 vid; 216 + u16 vlan_tag; 217 217 u16 pkt_size; 218 218 u16 rxq_idx; 219 219 u16 mac_id;
+1 -1
drivers/net/benet/be_cmds.c
··· 132 132 struct be_async_event_grp5_pvid_state *evt) 133 133 { 134 134 if (evt->enabled) 135 - adapter->pvid = evt->tag; 135 + adapter->pvid = le16_to_cpu(evt->tag); 136 136 else 137 137 adapter->pvid = 0; 138 138 }
+13 -6
drivers/net/benet/be_main.c
··· 1018 1018 kfree_skb(skb); 1019 1019 return; 1020 1020 } 1021 - vlan_hwaccel_receive_skb(skb, adapter->vlan_grp, rxcp->vid); 1021 + vlan_hwaccel_receive_skb(skb, adapter->vlan_grp, 1022 + rxcp->vlan_tag); 1022 1023 } else { 1023 1024 netif_receive_skb(skb); 1024 1025 } ··· 1077 1076 if (likely(!rxcp->vlanf)) 1078 1077 napi_gro_frags(&eq_obj->napi); 1079 1078 else 1080 - vlan_gro_frags(&eq_obj->napi, adapter->vlan_grp, rxcp->vid); 1079 + vlan_gro_frags(&eq_obj->napi, adapter->vlan_grp, 1080 + rxcp->vlan_tag); 1081 1081 } 1082 1082 1083 1083 static void be_parse_rx_compl_v1(struct be_adapter *adapter, ··· 1104 1102 rxcp->pkt_type = 1105 1103 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl); 1106 1104 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm, compl); 1107 - rxcp->vid = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag, compl); 1105 + rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag, 1106 + compl); 1108 1107 } 1109 1108 1110 1109 static void be_parse_rx_compl_v0(struct be_adapter *adapter, ··· 1131 1128 rxcp->pkt_type = 1132 1129 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl); 1133 1130 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm, compl); 1134 - rxcp->vid = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag, compl); 1131 + rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag, 1132 + compl); 1135 1133 } 1136 1134 1137 1135 static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo) ··· 1159 1155 rxcp->vlanf = 0; 1160 1156 1161 1157 if (!lancer_chip(adapter)) 1162 - rxcp->vid = swab16(rxcp->vid); 1158 + rxcp->vlan_tag = swab16(rxcp->vlan_tag); 1163 1159 1164 - if ((adapter->pvid == rxcp->vid) && !adapter->vlan_tag[rxcp->vid]) 1160 + if (((adapter->pvid & VLAN_VID_MASK) == 1161 + (rxcp->vlan_tag & VLAN_VID_MASK)) && 1162 + !adapter->vlan_tag[rxcp->vlan_tag]) 1165 1163 rxcp->vlanf = 0; 1166 1164 1167 1165 /* As the compl has been parsed, reset it; we wont touch it again */ ··· 1879 1873 be_detect_dump_ue(adapter); 1880 1874 1881 1875 reschedule: 1876 + adapter->work_counter++; 1882 1877 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000)); 1883 1878 } 1884 1879
+18 -13
drivers/net/bna/bfa_ioc.c
··· 38 38 #define bfa_ioc_map_port(__ioc) ((__ioc)->ioc_hwif->ioc_map_port(__ioc)) 39 39 #define bfa_ioc_notify_fail(__ioc) \ 40 40 ((__ioc)->ioc_hwif->ioc_notify_fail(__ioc)) 41 + #define bfa_ioc_sync_start(__ioc) \ 42 + ((__ioc)->ioc_hwif->ioc_sync_start(__ioc)) 41 43 #define bfa_ioc_sync_join(__ioc) \ 42 44 ((__ioc)->ioc_hwif->ioc_sync_join(__ioc)) 43 45 #define bfa_ioc_sync_leave(__ioc) \ ··· 604 602 switch (event) { 605 603 case IOCPF_E_SEMLOCKED: 606 604 if (bfa_ioc_firmware_lock(ioc)) { 607 - if (bfa_ioc_sync_complete(ioc)) { 605 + if (bfa_ioc_sync_start(ioc)) { 608 606 iocpf->retry_count = 0; 609 607 bfa_ioc_sync_join(ioc); 610 608 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit); ··· 1316 1314 * execution context (driver/bios) must match. 1317 1315 */ 1318 1316 static bool 1319 - bfa_ioc_fwver_valid(struct bfa_ioc *ioc) 1317 + bfa_ioc_fwver_valid(struct bfa_ioc *ioc, u32 boot_env) 1320 1318 { 1321 1319 struct bfi_ioc_image_hdr fwhdr, *drv_fwhdr; 1322 1320 ··· 1327 1325 if (fwhdr.signature != drv_fwhdr->signature) 1328 1326 return false; 1329 1327 1330 - if (fwhdr.exec != drv_fwhdr->exec) 1328 + if (swab32(fwhdr.param) != boot_env) 1331 1329 return false; 1332 1330 1333 1331 return bfa_nw_ioc_fwver_cmp(ioc, &fwhdr); ··· 1354 1352 { 1355 1353 enum bfi_ioc_state ioc_fwstate; 1356 1354 bool fwvalid; 1355 + u32 boot_env; 1357 1356 1358 1357 ioc_fwstate = readl(ioc->ioc_regs.ioc_fwstate); 1358 + 1359 + boot_env = BFI_BOOT_LOADER_OS; 1359 1360 1360 1361 if (force) 1361 1362 ioc_fwstate = BFI_IOC_UNINIT; ··· 1367 1362 * check if firmware is valid 1368 1363 */ 1369 1364 fwvalid = (ioc_fwstate == BFI_IOC_UNINIT) ? 1370 - false : bfa_ioc_fwver_valid(ioc); 1365 + false : bfa_ioc_fwver_valid(ioc, boot_env); 1371 1366 1372 1367 if (!fwvalid) { 1373 - bfa_ioc_boot(ioc, BFI_BOOT_TYPE_NORMAL, ioc->pcidev.device_id); 1368 + bfa_ioc_boot(ioc, BFI_BOOT_TYPE_NORMAL, boot_env); 1374 1369 return; 1375 1370 } 1376 1371 ··· 1401 1396 /** 1402 1397 * Initialize the h/w for any other states. 1403 1398 */ 1404 - bfa_ioc_boot(ioc, BFI_BOOT_TYPE_NORMAL, ioc->pcidev.device_id); 1399 + bfa_ioc_boot(ioc, BFI_BOOT_TYPE_NORMAL, boot_env); 1405 1400 } 1406 1401 1407 1402 void ··· 1511 1506 */ 1512 1507 static void 1513 1508 bfa_ioc_download_fw(struct bfa_ioc *ioc, u32 boot_type, 1514 - u32 boot_param) 1509 + u32 boot_env) 1515 1510 { 1516 1511 u32 *fwimg; 1517 1512 u32 pgnum, pgoff; ··· 1563 1558 /* 1564 1559 * Set boot type and boot param at the end. 1565 1560 */ 1566 - writel((swab32(swab32(boot_type))), ((ioc->ioc_regs.smem_page_start) 1561 + writel(boot_type, ((ioc->ioc_regs.smem_page_start) 1567 1562 + (BFI_BOOT_TYPE_OFF))); 1568 - writel((swab32(swab32(boot_param))), ((ioc->ioc_regs.smem_page_start) 1569 - + (BFI_BOOT_PARAM_OFF))); 1563 + writel(boot_env, ((ioc->ioc_regs.smem_page_start) 1564 + + (BFI_BOOT_LOADER_OFF))); 1570 1565 } 1571 1566 1572 1567 static void ··· 1726 1721 * as the entry vector. 1727 1722 */ 1728 1723 static void 1729 - bfa_ioc_boot(struct bfa_ioc *ioc, u32 boot_type, u32 boot_param) 1724 + bfa_ioc_boot(struct bfa_ioc *ioc, u32 boot_type, u32 boot_env) 1730 1725 { 1731 1726 void __iomem *rb; 1732 1727 ··· 1739 1734 * Initialize IOC state of all functions on a chip reset. 1740 1735 */ 1741 1736 rb = ioc->pcidev.pci_bar_kva; 1742 - if (boot_param == BFI_BOOT_TYPE_MEMTEST) { 1737 + if (boot_type == BFI_BOOT_TYPE_MEMTEST) { 1743 1738 writel(BFI_IOC_MEMTEST, (rb + BFA_IOC0_STATE_REG)); 1744 1739 writel(BFI_IOC_MEMTEST, (rb + BFA_IOC1_STATE_REG)); 1745 1740 } else { ··· 1748 1743 } 1749 1744 1750 1745 bfa_ioc_msgflush(ioc); 1751 - bfa_ioc_download_fw(ioc, boot_type, boot_param); 1746 + bfa_ioc_download_fw(ioc, boot_type, boot_env); 1752 1747 1753 1748 /** 1754 1749 * Enable interrupts just before starting LPU
+1
drivers/net/bna/bfa_ioc.h
··· 194 194 bool msix); 195 195 void (*ioc_notify_fail) (struct bfa_ioc *ioc); 196 196 void (*ioc_ownership_reset) (struct bfa_ioc *ioc); 197 + bool (*ioc_sync_start) (struct bfa_ioc *ioc); 197 198 void (*ioc_sync_join) (struct bfa_ioc *ioc); 198 199 void (*ioc_sync_leave) (struct bfa_ioc *ioc); 199 200 void (*ioc_sync_ack) (struct bfa_ioc *ioc);
+28
drivers/net/bna/bfa_ioc_ct.c
··· 41 41 static void bfa_ioc_ct_isr_mode_set(struct bfa_ioc *ioc, bool msix); 42 42 static void bfa_ioc_ct_notify_fail(struct bfa_ioc *ioc); 43 43 static void bfa_ioc_ct_ownership_reset(struct bfa_ioc *ioc); 44 + static bool bfa_ioc_ct_sync_start(struct bfa_ioc *ioc); 44 45 static void bfa_ioc_ct_sync_join(struct bfa_ioc *ioc); 45 46 static void bfa_ioc_ct_sync_leave(struct bfa_ioc *ioc); 46 47 static void bfa_ioc_ct_sync_ack(struct bfa_ioc *ioc); ··· 64 63 nw_hwif_ct.ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set; 65 64 nw_hwif_ct.ioc_notify_fail = bfa_ioc_ct_notify_fail; 66 65 nw_hwif_ct.ioc_ownership_reset = bfa_ioc_ct_ownership_reset; 66 + nw_hwif_ct.ioc_sync_start = bfa_ioc_ct_sync_start; 67 67 nw_hwif_ct.ioc_sync_join = bfa_ioc_ct_sync_join; 68 68 nw_hwif_ct.ioc_sync_leave = bfa_ioc_ct_sync_leave; 69 69 nw_hwif_ct.ioc_sync_ack = bfa_ioc_ct_sync_ack; ··· 344 342 bfa_nw_ioc_hw_sem_release(ioc); 345 343 } 346 344 345 + /** 346 + * Synchronized IOC failure processing routines 347 + */ 348 + static bool 349 + bfa_ioc_ct_sync_start(struct bfa_ioc *ioc) 350 + { 351 + u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync); 352 + u32 sync_reqd = bfa_ioc_ct_get_sync_reqd(r32); 353 + 354 + /* 355 + * Driver load time. If the sync required bit for this PCI fn 356 + * is set, it is due to an unclean exit by the driver for this 357 + * PCI fn in the previous incarnation. Whoever comes here first 358 + * should clean it up, no matter which PCI fn. 359 + */ 360 + 361 + if (sync_reqd & bfa_ioc_ct_sync_pos(ioc)) { 362 + writel(0, ioc->ioc_regs.ioc_fail_sync); 363 + writel(1, ioc->ioc_regs.ioc_usage_reg); 364 + writel(BFI_IOC_UNINIT, ioc->ioc_regs.ioc_fwstate); 365 + writel(BFI_IOC_UNINIT, ioc->ioc_regs.alt_ioc_fwstate); 366 + return true; 367 + } 368 + 369 + return bfa_ioc_ct_sync_complete(ioc); 370 + } 347 371 /** 348 372 * Synchronized IOC failure processing routines 349 373 */
+4 -2
drivers/net/bna/bfi.h
··· 184 184 #define BFI_IOC_MSGLEN_MAX 32 /* 32 bytes */ 185 185 186 186 #define BFI_BOOT_TYPE_OFF 8 187 - #define BFI_BOOT_PARAM_OFF 12 187 + #define BFI_BOOT_LOADER_OFF 12 188 188 189 - #define BFI_BOOT_TYPE_NORMAL 0 /* param is device id */ 189 + #define BFI_BOOT_TYPE_NORMAL 0 190 190 #define BFI_BOOT_TYPE_FLASH 1 191 191 #define BFI_BOOT_TYPE_MEMTEST 2 192 + 193 + #define BFI_BOOT_LOADER_OS 0 192 194 193 195 #define BFI_BOOT_MEMTEST_RES_ADDR 0x900 194 196 #define BFI_BOOT_MEMTEST_RES_SIG 0xA0A1A2A3
-1
drivers/net/bna/bnad.c
··· 1837 1837 /* Initialize the Rx event handlers */ 1838 1838 rx_cbfn.rcb_setup_cbfn = bnad_cb_rcb_setup; 1839 1839 rx_cbfn.rcb_destroy_cbfn = bnad_cb_rcb_destroy; 1840 - rx_cbfn.rcb_destroy_cbfn = NULL; 1841 1840 rx_cbfn.ccb_setup_cbfn = bnad_cb_ccb_setup; 1842 1841 rx_cbfn.ccb_destroy_cbfn = bnad_cb_ccb_destroy; 1843 1842 rx_cbfn.rx_cleanup_cbfn = bnad_cb_rx_cleanup;
+2
drivers/net/bnx2.c
··· 8413 8413 8414 8414 unregister_netdev(dev); 8415 8415 8416 + del_timer_sync(&bp->timer); 8417 + 8416 8418 if (bp->mips_firmware) 8417 8419 release_firmware(bp->mips_firmware); 8418 8420 if (bp->rv2p_firmware)
+24 -10
drivers/net/bnx2x/bnx2x_cmn.c
··· 2019 2019 static inline u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb, 2020 2020 u32 *parsing_data, u32 xmit_type) 2021 2021 { 2022 - *parsing_data |= ((tcp_hdrlen(skb)/4) << 2023 - ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) & 2024 - ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW; 2022 + *parsing_data |= 2023 + ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) << 2024 + ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT) & 2025 + ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W; 2025 2026 2026 - *parsing_data |= ((((u8 *)tcp_hdr(skb) - skb->data) / 2) << 2027 - ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT) & 2028 - ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W; 2027 + if (xmit_type & XMIT_CSUM_TCP) { 2028 + *parsing_data |= ((tcp_hdrlen(skb) / 4) << 2029 + ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) & 2030 + ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW; 2029 2031 2030 - return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data; 2032 + return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data; 2033 + } else 2034 + /* We support checksum offload for TCP and UDP only. 2035 + * No need to pass the UDP header length - it's a constant. 2036 + */ 2037 + return skb_transport_header(skb) + 2038 + sizeof(struct udphdr) - skb->data; 2031 2039 } 2032 2040 2033 2041 /** ··· 2051 2043 struct eth_tx_parse_bd_e1x *pbd, 2052 2044 u32 xmit_type) 2053 2045 { 2054 - u8 hlen = (skb_network_header(skb) - skb->data) / 2; 2046 + u8 hlen = (skb_network_header(skb) - skb->data) >> 1; 2055 2047 2056 2048 /* for now NS flag is not used in Linux */ 2057 2049 pbd->global_data = ··· 2059 2051 ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT)); 2060 2052 2061 2053 pbd->ip_hlen_w = (skb_transport_header(skb) - 2062 - skb_network_header(skb)) / 2; 2054 + skb_network_header(skb)) >> 1; 2063 2055 2064 - hlen += pbd->ip_hlen_w + tcp_hdrlen(skb) / 2; 2056 + hlen += pbd->ip_hlen_w; 2057 + 2058 + /* We support checksum offload for TCP and UDP only */ 2059 + if (xmit_type & XMIT_CSUM_TCP) 2060 + hlen += tcp_hdrlen(skb) / 2; 2061 + else 2062 + hlen += sizeof(struct udphdr) / 2; 2065 2063 2066 2064 pbd->total_hlen_w = cpu_to_le16(hlen); 2067 2065 hlen = hlen*2;
+4 -5
drivers/net/bnx2x/bnx2x_ethtool.c
··· 2114 2114 for (i = 0; i < (data * 2); i++) { 2115 2115 if ((i % 2) == 0) 2116 2116 bnx2x_set_led(&bp->link_params, &bp->link_vars, 2117 - LED_MODE_OPER, SPEED_1000); 2117 + LED_MODE_ON, SPEED_1000); 2118 2118 else 2119 2119 bnx2x_set_led(&bp->link_params, &bp->link_vars, 2120 - LED_MODE_OFF, 0); 2120 + LED_MODE_FRONT_PANEL_OFF, 0); 2121 2121 2122 2122 msleep_interruptible(500); 2123 2123 if (signal_pending(current)) 2124 2124 break; 2125 2125 } 2126 2126 2127 - if (bp->link_vars.link_up) 2128 - bnx2x_set_led(&bp->link_params, &bp->link_vars, LED_MODE_OPER, 2129 - bp->link_vars.line_speed); 2127 + bnx2x_set_led(&bp->link_params, &bp->link_vars, 2128 + LED_MODE_OPER, bp->link_vars.line_speed); 2130 2129 2131 2130 return 0; 2132 2131 }
+5 -2
drivers/net/bonding/bond_3ad.c
··· 1482 1482 1483 1483 static int agg_device_up(const struct aggregator *agg) 1484 1484 { 1485 - return (netif_running(agg->slave->dev) && 1486 - netif_carrier_ok(agg->slave->dev)); 1485 + struct port *port = agg->lag_ports; 1486 + if (!port) 1487 + return 0; 1488 + return (netif_running(port->slave->dev) && 1489 + netif_carrier_ok(port->slave->dev)); 1487 1490 } 1488 1491 1489 1492 /**
+5 -5
drivers/net/bonding/bond_3ad.h
··· 39 39 40 40 typedef struct mac_addr { 41 41 u8 mac_addr_value[ETH_ALEN]; 42 - } mac_addr_t; 42 + } __packed mac_addr_t; 43 43 44 44 enum { 45 45 BOND_AD_STABLE = 0, ··· 134 134 u8 tlv_type_terminator; // = terminator 135 135 u8 terminator_length; // = 0 136 136 u8 reserved_50[50]; // = 0 137 - } lacpdu_t; 137 + } __packed lacpdu_t; 138 138 139 139 typedef struct lacpdu_header { 140 140 struct ethhdr hdr; 141 141 struct lacpdu lacpdu; 142 - } lacpdu_header_t; 142 + } __packed lacpdu_header_t; 143 143 144 144 // Marker Protocol Data Unit(PDU) structure(43.5.3.2 in the 802.3ad standard) 145 145 typedef struct bond_marker { ··· 155 155 u8 tlv_type_terminator; // = 0x00 156 156 u8 terminator_length; // = 0x00 157 157 u8 reserved_90[90]; // = 0 158 - } bond_marker_t; 158 + } __packed bond_marker_t; 159 159 160 160 typedef struct bond_marker_header { 161 161 struct ethhdr hdr; 162 162 struct bond_marker marker; 163 - } bond_marker_header_t; 163 + } __packed bond_marker_header_t; 164 164 165 165 #pragma pack() 166 166
+3 -3
drivers/net/bonding/bond_alb.c
··· 176 176 bond_info->tx_hashtbl = new_hashtbl; 177 177 178 178 for (i = 0; i < TLB_HASH_TABLE_SIZE; i++) { 179 - tlb_init_table_entry(&bond_info->tx_hashtbl[i], 1); 179 + tlb_init_table_entry(&bond_info->tx_hashtbl[i], 0); 180 180 } 181 181 182 182 _unlock_tx_hashtbl(bond); ··· 701 701 */ 702 702 rlb_choose_channel(skb, bond); 703 703 704 - /* The ARP relpy packets must be delayed so that 704 + /* The ARP reply packets must be delayed so that 705 705 * they can cancel out the influence of the ARP request. 706 706 */ 707 707 bond->alb_info.rlb_update_delay_counter = RLB_UPDATE_DELAY; ··· 1042 1042 * 1043 1043 * If the permanent hw address of @slave is @bond's hw address, we need to 1044 1044 * find a different hw address to give @slave, that isn't in use by any other 1045 - * slave in the bond. This address must be, of course, one of the premanent 1045 + * slave in the bond. This address must be, of course, one of the permanent 1046 1046 * addresses of the other slaves. 1047 1047 * 1048 1048 * We go over the slave list, and for each slave there we compare its
+1 -3
drivers/net/bonding/bond_alb.h
··· 75 75 * gave this entry index. 76 76 */ 77 77 u32 tx_bytes; /* Each Client accumulates the BytesTx that 78 - * were tranmitted to it, and after each 78 + * were transmitted to it, and after each 79 79 * CallBack the LoadHistory is divided 80 80 * by the balance interval 81 81 */ ··· 122 122 }; 123 123 124 124 struct alb_bond_info { 125 - struct timer_list alb_timer; 126 125 struct tlb_client_info *tx_hashtbl; /* Dynamically allocated */ 127 126 spinlock_t tx_hashtbl_lock; 128 127 u32 unbalanced_load; ··· 139 140 struct slave *next_rx_slave;/* next slave to be assigned 140 141 * to a new rx client for 141 142 */ 142 - u32 rlb_interval_counter; 143 143 u8 primary_is_promisc; /* boolean */ 144 144 u32 rlb_promisc_timeout_counter;/* counts primary 145 145 * promiscuity time
+5 -2
drivers/net/can/mscan/mpc5xxx_can.c
··· 247 247 } 248 248 #endif /* CONFIG_PPC_MPC512x */ 249 249 250 + static struct of_device_id mpc5xxx_can_table[]; 250 251 static int __devinit mpc5xxx_can_probe(struct platform_device *ofdev) 251 252 { 253 + const struct of_device_id *match; 252 254 struct mpc5xxx_can_data *data; 253 255 struct device_node *np = ofdev->dev.of_node; 254 256 struct net_device *dev; ··· 260 258 int irq, mscan_clksrc = 0; 261 259 int err = -ENOMEM; 262 260 263 - if (!ofdev->dev.of_match) 261 + match = of_match_device(mpc5xxx_can_table, &ofdev->dev); 262 + if (!match) 264 263 return -EINVAL; 265 - data = (struct mpc5xxx_can_data *)of_dev->dev.of_match->data; 264 + data = match->data; 266 265 267 266 base = of_iomap(np, 0); 268 267 if (!base) {
+1 -1
drivers/net/can/sja1000/sja1000.c
··· 346 346 | (priv->read_reg(priv, REG_ID2) >> 5); 347 347 } 348 348 349 + cf->can_dlc = get_can_dlc(fi & 0x0F); 349 350 if (fi & FI_RTR) { 350 351 id |= CAN_RTR_FLAG; 351 352 } else { 352 - cf->can_dlc = get_can_dlc(fi & 0x0F); 353 353 for (i = 0; i < cf->can_dlc; i++) 354 354 cf->data[i] = priv->read_reg(priv, dreg++); 355 355 }
+3 -1
drivers/net/can/slcan.c
··· 583 583 /* Done. We have linked the TTY line to a channel. */ 584 584 rtnl_unlock(); 585 585 tty->receive_room = 65536; /* We don't flow control */ 586 - return sl->dev->base_addr; 586 + 587 + /* TTY layer expects 0 on success */ 588 + return 0; 587 589 588 590 err_free_chan: 589 591 sl->tty = NULL;
+13 -8
drivers/net/ehea/ehea_ethtool.c
··· 55 55 cmd->duplex = -1; 56 56 } 57 57 58 - cmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_1000baseT_Full 59 - | SUPPORTED_100baseT_Full | SUPPORTED_100baseT_Half 60 - | SUPPORTED_10baseT_Full | SUPPORTED_10baseT_Half 61 - | SUPPORTED_Autoneg | SUPPORTED_FIBRE); 58 + if (cmd->speed == SPEED_10000) { 59 + cmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE); 60 + cmd->advertising = (ADVERTISED_10000baseT_Full | ADVERTISED_FIBRE); 61 + cmd->port = PORT_FIBRE; 62 + } else { 63 + cmd->supported = (SUPPORTED_1000baseT_Full | SUPPORTED_100baseT_Full 64 + | SUPPORTED_100baseT_Half | SUPPORTED_10baseT_Full 65 + | SUPPORTED_10baseT_Half | SUPPORTED_Autoneg 66 + | SUPPORTED_TP); 67 + cmd->advertising = (ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg 68 + | ADVERTISED_TP); 69 + cmd->port = PORT_TP; 70 + } 62 71 63 - cmd->advertising = (ADVERTISED_10000baseT_Full | ADVERTISED_Autoneg 64 - | ADVERTISED_FIBRE); 65 - 66 - cmd->port = PORT_FIBRE; 67 72 cmd->autoneg = port->autoneg == 1 ? AUTONEG_ENABLE : AUTONEG_DISABLE; 68 73 69 74 return 0;
+9 -6
drivers/net/ehea/ehea_main.c
··· 2688 2688 netif_start_queue(dev); 2689 2689 } 2690 2690 2691 - init_waitqueue_head(&port->swqe_avail_wq); 2692 - init_waitqueue_head(&port->restart_wq); 2693 - 2694 2691 mutex_unlock(&port->port_lock); 2695 2692 2696 2693 return ret; ··· 3037 3040 3038 3041 if (dev->flags & IFF_UP) { 3039 3042 mutex_lock(&port->port_lock); 3040 - port_napi_enable(port); 3041 3043 ret = ehea_restart_qps(dev); 3042 - check_sqs(port); 3043 - if (!ret) 3044 + if (!ret) { 3045 + check_sqs(port); 3046 + port_napi_enable(port); 3044 3047 netif_wake_queue(dev); 3048 + } else { 3049 + netdev_err(dev, "Unable to restart QPS\n"); 3050 + } 3045 3051 mutex_unlock(&port->port_lock); 3046 3052 } 3047 3053 } ··· 3272 3272 dev->features |= NETIF_F_LRO; 3273 3273 3274 3274 INIT_WORK(&port->reset_task, ehea_reset_port); 3275 + 3276 + init_waitqueue_head(&port->swqe_avail_wq); 3277 + init_waitqueue_head(&port->restart_wq); 3275 3278 3276 3279 ret = register_netdev(dev); 3277 3280 if (ret) {
+6 -3
drivers/net/fs_enet/fs_enet-main.c
··· 998 998 #endif 999 999 }; 1000 1000 1001 + static struct of_device_id fs_enet_match[]; 1001 1002 static int __devinit fs_enet_probe(struct platform_device *ofdev) 1002 1003 { 1004 + const struct of_device_id *match; 1003 1005 struct net_device *ndev; 1004 1006 struct fs_enet_private *fep; 1005 1007 struct fs_platform_info *fpi; ··· 1009 1007 const u8 *mac_addr; 1010 1008 int privsize, len, ret = -ENODEV; 1011 1009 1012 - if (!ofdev->dev.of_match) 1010 + match = of_match_device(fs_enet_match, &ofdev->dev); 1011 + if (!match) 1013 1012 return -EINVAL; 1014 1013 1015 1014 fpi = kzalloc(sizeof(*fpi), GFP_KERNEL); 1016 1015 if (!fpi) 1017 1016 return -ENOMEM; 1018 1017 1019 - if (!IS_FEC(ofdev->dev.of_match)) { 1018 + if (!IS_FEC(match)) { 1020 1019 data = of_get_property(ofdev->dev.of_node, "fsl,cpm-command", &len); 1021 1020 if (!data || len != 4) 1022 1021 goto out_free_fpi; ··· 1052 1049 fep->dev = &ofdev->dev; 1053 1050 fep->ndev = ndev; 1054 1051 fep->fpi = fpi; 1055 - fep->ops = ofdev->dev.of_match->data; 1052 + fep->ops = match->data; 1056 1053 1057 1054 ret = fep->ops->setup_data(ndev); 1058 1055 if (ret)
+4 -4
drivers/net/fs_enet/mac-fec.c
··· 226 226 } 227 227 228 228 FC(fecp, r_cntrl, FEC_RCNTRL_PROM); 229 - FW(fecp, hash_table_high, fep->fec.hthi); 230 - FW(fecp, hash_table_low, fep->fec.htlo); 229 + FW(fecp, grp_hash_table_high, fep->fec.hthi); 230 + FW(fecp, grp_hash_table_low, fep->fec.htlo); 231 231 } 232 232 233 233 static void set_multicast_list(struct net_device *dev) ··· 273 273 /* 274 274 * Reset all multicast. 275 275 */ 276 - FW(fecp, hash_table_high, fep->fec.hthi); 277 - FW(fecp, hash_table_low, fep->fec.htlo); 276 + FW(fecp, grp_hash_table_high, fep->fec.hthi); 277 + FW(fecp, grp_hash_table_low, fep->fec.htlo); 278 278 279 279 /* 280 280 * Set maximum receive buffer size.
+5 -2
drivers/net/fs_enet/mii-fec.c
··· 101 101 return 0; 102 102 } 103 103 104 + static struct of_device_id fs_enet_mdio_fec_match[]; 104 105 static int __devinit fs_enet_mdio_probe(struct platform_device *ofdev) 105 106 { 107 + const struct of_device_id *match; 106 108 struct resource res; 107 109 struct mii_bus *new_bus; 108 110 struct fec_info *fec; 109 111 int (*get_bus_freq)(struct device_node *); 110 112 int ret = -ENOMEM, clock, speed; 111 113 112 - if (!ofdev->dev.of_match) 114 + match = of_match_device(fs_enet_mdio_fec_match, &ofdev->dev); 115 + if (!match) 113 116 return -EINVAL; 114 - get_bus_freq = ofdev->dev.of_match->data; 117 + get_bus_freq = match->data; 115 118 116 119 new_bus = mdiobus_alloc(); 117 120 if (!new_bus)
+4 -4
drivers/net/ftmac100.c
··· 139 139 * that hardware reset completed (what the f*ck). 140 140 * We still need to wait for a while. 141 141 */ 142 - usleep_range(500, 1000); 142 + udelay(500); 143 143 return 0; 144 144 } 145 145 146 - usleep_range(1000, 10000); 146 + udelay(1000); 147 147 } 148 148 149 149 netdev_err(netdev, "software reset failed\n"); ··· 772 772 if ((phycr & FTMAC100_PHYCR_MIIRD) == 0) 773 773 return phycr & FTMAC100_PHYCR_MIIRDATA; 774 774 775 - usleep_range(100, 1000); 775 + udelay(100); 776 776 } 777 777 778 778 netdev_err(netdev, "mdio read timed out\n"); ··· 801 801 if ((phycr & FTMAC100_PHYCR_MIIWR) == 0) 802 802 return; 803 803 804 - usleep_range(100, 1000); 804 + udelay(100); 805 805 } 806 806 807 807 netdev_err(netdev, "mdio write timed out\n");
+7 -7
drivers/net/hydra.c
··· 98 98 .ndo_open = hydra_open, 99 99 .ndo_stop = hydra_close, 100 100 101 - .ndo_start_xmit = ei_start_xmit, 102 - .ndo_tx_timeout = ei_tx_timeout, 103 - .ndo_get_stats = ei_get_stats, 104 - .ndo_set_multicast_list = ei_set_multicast_list, 101 + .ndo_start_xmit = __ei_start_xmit, 102 + .ndo_tx_timeout = __ei_tx_timeout, 103 + .ndo_get_stats = __ei_get_stats, 104 + .ndo_set_multicast_list = __ei_set_multicast_list, 105 105 .ndo_validate_addr = eth_validate_addr, 106 - .ndo_set_mac_address = eth_mac_addr, 106 + .ndo_set_mac_address = eth_mac_addr, 107 107 .ndo_change_mtu = eth_change_mtu, 108 108 #ifdef CONFIG_NET_POLL_CONTROLLER 109 - .ndo_poll_controller = ei_poll, 109 + .ndo_poll_controller = __ei_poll, 110 110 #endif 111 111 }; 112 112 ··· 125 125 0x10, 0x12, 0x14, 0x16, 0x18, 0x1a, 0x1c, 0x1e, 126 126 }; 127 127 128 - dev = alloc_ei_netdev(); 128 + dev = ____alloc_ei_netdev(0); 129 129 if (!dev) 130 130 return -ENOMEM; 131 131
+2 -1
drivers/net/loopback.c
··· 173 173 | NETIF_F_RXCSUM 174 174 | NETIF_F_HIGHDMA 175 175 | NETIF_F_LLTX 176 - | NETIF_F_NETNS_LOCAL; 176 + | NETIF_F_NETNS_LOCAL 177 + | NETIF_F_VLAN_CHALLENGED; 177 178 dev->ethtool_ops = &loopback_ethtool_ops; 178 179 dev->header_ops = &eth_header_ops; 179 180 dev->netdev_ops = &loopback_ops;
+4
drivers/net/mii.c
··· 49 49 result |= ADVERTISED_100baseT_Half; 50 50 if (advert & ADVERTISE_100FULL) 51 51 result |= ADVERTISED_100baseT_Full; 52 + if (advert & ADVERTISE_PAUSE_CAP) 53 + result |= ADVERTISED_Pause; 54 + if (advert & ADVERTISE_PAUSE_ASYM) 55 + result |= ADVERTISED_Asym_Pause; 52 56 53 57 return result; 54 58 }
+3
drivers/net/natsemi.c
··· 860 860 prev_eedata = eedata; 861 861 } 862 862 863 + /* Store MAC Address in perm_addr */ 864 + memcpy(dev->perm_addr, dev->dev_addr, ETH_ALEN); 865 + 863 866 dev->base_addr = (unsigned long __force) ioaddr; 864 867 dev->irq = irq; 865 868
+8 -8
drivers/net/ne-h8300.c
··· 167 167 #ifndef MODULE 168 168 struct net_device * __init ne_probe(int unit) 169 169 { 170 - struct net_device *dev = alloc_ei_netdev(); 170 + struct net_device *dev = ____alloc_ei_netdev(0); 171 171 int err; 172 172 173 173 if (!dev) ··· 197 197 .ndo_open = ne_open, 198 198 .ndo_stop = ne_close, 199 199 200 - .ndo_start_xmit = ei_start_xmit, 201 - .ndo_tx_timeout = ei_tx_timeout, 202 - .ndo_get_stats = ei_get_stats, 203 - .ndo_set_multicast_list = ei_set_multicast_list, 200 + .ndo_start_xmit = __ei_start_xmit, 201 + .ndo_tx_timeout = __ei_tx_timeout, 202 + .ndo_get_stats = __ei_get_stats, 203 + .ndo_set_multicast_list = __ei_set_multicast_list, 204 204 .ndo_validate_addr = eth_validate_addr, 205 - .ndo_set_mac_address = eth_mac_addr, 205 + .ndo_set_mac_address = eth_mac_addr, 206 206 .ndo_change_mtu = eth_change_mtu, 207 207 #ifdef CONFIG_NET_POLL_CONTROLLER 208 - .ndo_poll_controller = ei_poll, 208 + .ndo_poll_controller = __ei_poll, 209 209 #endif 210 210 }; 211 211 ··· 637 637 int err; 638 638 639 639 for (this_dev = 0; this_dev < MAX_NE_CARDS; this_dev++) { 640 - struct net_device *dev = alloc_ei_netdev(); 640 + struct net_device *dev = ____alloc_ei_netdev(0); 641 641 if (!dev) 642 642 break; 643 643 if (io[this_dev]) {
+8
drivers/net/netconsole.c
··· 671 671 goto done; 672 672 673 673 spin_lock_irqsave(&target_list_lock, flags); 674 + restart: 674 675 list_for_each_entry(nt, &target_list, list) { 675 676 netconsole_target_get(nt); 676 677 if (nt->np.dev == dev) { ··· 684 683 * rtnl_lock already held 685 684 */ 686 685 if (nt->np.dev) { 686 + spin_unlock_irqrestore( 687 + &target_list_lock, 688 + flags); 687 689 __netpoll_cleanup(&nt->np); 690 + spin_lock_irqsave(&target_list_lock, 691 + flags); 688 692 dev_put(nt->np.dev); 689 693 nt->np.dev = NULL; 694 + netconsole_target_put(nt); 695 + goto restart; 690 696 } 691 697 /* Fall through */ 692 698 case NETDEV_GOING_DOWN:
+2 -2
drivers/net/netxen/netxen_nic.h
··· 174 174 175 175 #define MAX_NUM_CARDS 4 176 176 177 - #define MAX_BUFFERS_PER_CMD 32 177 + #define NETXEN_MAX_FRAGS_PER_TX 14 178 178 #define MAX_TSO_HEADER_DESC 2 179 179 #define MGMT_CMD_DESC_RESV 4 180 180 #define TX_STOP_THRESH ((MAX_SKB_FRAGS >> 2) + MAX_TSO_HEADER_DESC \ ··· 558 558 */ 559 559 struct netxen_cmd_buffer { 560 560 struct sk_buff *skb; 561 - struct netxen_skb_frag frag_array[MAX_BUFFERS_PER_CMD + 1]; 561 + struct netxen_skb_frag frag_array[MAX_SKB_FRAGS + 1]; 562 562 u32 frag_count; 563 563 }; 564 564
+17
drivers/net/netxen/netxen_nic_main.c
··· 1844 1844 struct cmd_desc_type0 *hwdesc, *first_desc; 1845 1845 struct pci_dev *pdev; 1846 1846 int i, k; 1847 + int delta = 0; 1848 + struct skb_frag_struct *frag; 1847 1849 1848 1850 u32 producer; 1849 1851 int frag_count, no_of_desc; ··· 1853 1851 1854 1852 frag_count = skb_shinfo(skb)->nr_frags + 1; 1855 1853 1854 + /* 14 frags supported for normal packet and 1855 + * 32 frags supported for TSO packet 1856 + */ 1857 + if (!skb_is_gso(skb) && frag_count > NETXEN_MAX_FRAGS_PER_TX) { 1858 + 1859 + for (i = 0; i < (frag_count - NETXEN_MAX_FRAGS_PER_TX); i++) { 1860 + frag = &skb_shinfo(skb)->frags[i]; 1861 + delta += frag->size; 1862 + } 1863 + 1864 + if (!__pskb_pull_tail(skb, delta)) 1865 + goto drop_packet; 1866 + 1867 + frag_count = 1 + skb_shinfo(skb)->nr_frags; 1868 + } 1856 1869 /* 4 fragments per cmd des */ 1857 1870 no_of_desc = (frag_count + 3) >> 2; 1858 1871
+16 -7
drivers/net/pch_gbe/pch_gbe_main.c
··· 34 34 #define PCH_GBE_COPYBREAK_DEFAULT 256 35 35 #define PCH_GBE_PCI_BAR 1 36 36 37 + /* Macros for ML7223 */ 38 + #define PCI_VENDOR_ID_ROHM 0x10db 39 + #define PCI_DEVICE_ID_ROHM_ML7223_GBE 0x8013 40 + 37 41 #define PCH_GBE_TX_WEIGHT 64 38 42 #define PCH_GBE_RX_WEIGHT 64 39 43 #define PCH_GBE_RX_BUFFER_WRITE 16 ··· 47 43 48 44 #define PCH_GBE_MAC_RGMII_CTRL_SETTING ( \ 49 45 PCH_GBE_CHIP_TYPE_INTERNAL | \ 50 - PCH_GBE_RGMII_MODE_RGMII | \ 51 - PCH_GBE_CRS_SEL \ 46 + PCH_GBE_RGMII_MODE_RGMII \ 52 47 ) 53 48 54 49 /* Ethertype field values */ ··· 1497 1494 /* Write meta date of skb */ 1498 1495 skb_put(skb, length); 1499 1496 skb->protocol = eth_type_trans(skb, netdev); 1500 - if ((tcp_ip_status & PCH_GBE_RXD_ACC_STAT_TCPIPOK) == 1501 - PCH_GBE_RXD_ACC_STAT_TCPIPOK) { 1502 - skb->ip_summed = CHECKSUM_UNNECESSARY; 1503 - } else { 1497 + if (tcp_ip_status & PCH_GBE_RXD_ACC_STAT_TCPIPOK) 1504 1498 skb->ip_summed = CHECKSUM_NONE; 1505 - } 1499 + else 1500 + skb->ip_summed = CHECKSUM_UNNECESSARY; 1501 + 1506 1502 napi_gro_receive(&adapter->napi, skb); 1507 1503 (*work_done)++; 1508 1504 pr_debug("Receive skb->ip_summed: %d length: %d\n", ··· 2417 2415 static DEFINE_PCI_DEVICE_TABLE(pch_gbe_pcidev_id) = { 2418 2416 {.vendor = PCI_VENDOR_ID_INTEL, 2419 2417 .device = PCI_DEVICE_ID_INTEL_IOH1_GBE, 2418 + .subvendor = PCI_ANY_ID, 2419 + .subdevice = PCI_ANY_ID, 2420 + .class = (PCI_CLASS_NETWORK_ETHERNET << 8), 2421 + .class_mask = (0xFFFF00) 2422 + }, 2423 + {.vendor = PCI_VENDOR_ID_ROHM, 2424 + .device = PCI_DEVICE_ID_ROHM_ML7223_GBE, 2420 2425 .subvendor = PCI_ANY_ID, 2421 2426 .subdevice = PCI_ANY_ID, 2422 2427 .class = (PCI_CLASS_NETWORK_ETHERNET << 8),
+1
drivers/net/qlcnic/qlcnic.h
··· 99 99 #define TX_UDPV6_PKT 0x0c 100 100 101 101 /* Tx defines */ 102 + #define QLCNIC_MAX_FRAGS_PER_TX 14 102 103 #define MAX_TSO_HEADER_DESC 2 103 104 #define MGMT_CMD_DESC_RESV 4 104 105 #define TX_STOP_THRESH ((MAX_SKB_FRAGS >> 2) + MAX_TSO_HEADER_DESC \
+14
drivers/net/qlcnic/qlcnic_main.c
··· 2099 2099 struct cmd_desc_type0 *hwdesc, *first_desc; 2100 2100 struct pci_dev *pdev; 2101 2101 struct ethhdr *phdr; 2102 + int delta = 0; 2102 2103 int i, k; 2103 2104 2104 2105 u32 producer; ··· 2119 2118 } 2120 2119 2121 2120 frag_count = skb_shinfo(skb)->nr_frags + 1; 2121 + /* 14 frags supported for normal packet and 2122 + * 32 frags supported for TSO packet 2123 + */ 2124 + if (!skb_is_gso(skb) && frag_count > QLCNIC_MAX_FRAGS_PER_TX) { 2125 + 2126 + for (i = 0; i < (frag_count - QLCNIC_MAX_FRAGS_PER_TX); i++) 2127 + delta += skb_shinfo(skb)->frags[i].size; 2128 + 2129 + if (!__pskb_pull_tail(skb, delta)) 2130 + goto drop_packet; 2131 + 2132 + frag_count = 1 + skb_shinfo(skb)->nr_frags; 2133 + } 2122 2134 2123 2135 /* 4 fragments per cmd des */ 2124 2136 no_of_desc = (frag_count + 3) >> 2;
+71 -28
drivers/net/r8169.c
··· 170 170 }; 171 171 #undef _R 172 172 173 + static const struct rtl_firmware_info { 174 + int mac_version; 175 + const char *fw_name; 176 + } rtl_firmware_infos[] = { 177 + { .mac_version = RTL_GIGA_MAC_VER_25, .fw_name = FIRMWARE_8168D_1 }, 178 + { .mac_version = RTL_GIGA_MAC_VER_26, .fw_name = FIRMWARE_8168D_2 }, 179 + { .mac_version = RTL_GIGA_MAC_VER_29, .fw_name = FIRMWARE_8105E_1 }, 180 + { .mac_version = RTL_GIGA_MAC_VER_30, .fw_name = FIRMWARE_8105E_1 } 181 + }; 182 + 173 183 enum cfg_version { 174 184 RTL_CFG_0 = 0x00, 175 185 RTL_CFG_1, ··· 575 565 u32 saved_wolopts; 576 566 577 567 const struct firmware *fw; 568 + #define RTL_FIRMWARE_UNKNOWN ERR_PTR(-EAGAIN); 578 569 }; 579 570 580 571 MODULE_AUTHOR("Realtek and the Linux r8169 crew <netdev@vger.kernel.org>"); ··· 1800 1789 1801 1790 static void rtl_release_firmware(struct rtl8169_private *tp) 1802 1791 { 1803 - release_firmware(tp->fw); 1804 - tp->fw = NULL; 1792 + if (!IS_ERR_OR_NULL(tp->fw)) 1793 + release_firmware(tp->fw); 1794 + tp->fw = RTL_FIRMWARE_UNKNOWN; 1805 1795 } 1806 1796 1807 - static int rtl_apply_firmware(struct rtl8169_private *tp, const char *fw_name) 1797 + static void rtl_apply_firmware(struct rtl8169_private *tp) 1808 1798 { 1809 - const struct firmware **fw = &tp->fw; 1810 - int rc = !*fw; 1811 - 1812 - if (rc) { 1813 - rc = request_firmware(fw, fw_name, &tp->pci_dev->dev); 1814 - if (rc < 0) 1815 - goto out; 1816 - } 1799 + const struct firmware *fw = tp->fw; 1817 1800 1818 1801 /* TODO: release firmware once rtl_phy_write_fw signals failures. */ 1819 - rtl_phy_write_fw(tp, *fw); 1820 - out: 1821 - return rc; 1802 + if (!IS_ERR_OR_NULL(fw)) 1803 + rtl_phy_write_fw(tp, fw); 1804 + } 1805 + 1806 + static void rtl_apply_firmware_cond(struct rtl8169_private *tp, u8 reg, u16 val) 1807 + { 1808 + if (rtl_readphy(tp, reg) != val) 1809 + netif_warn(tp, hw, tp->dev, "chipset not ready for firmware\n"); 1810 + else 1811 + rtl_apply_firmware(tp); 1822 1812 } 1823 1813 1824 1814 static void rtl8169s_hw_phy_config(struct rtl8169_private *tp) ··· 2258 2246 2259 2247 rtl_writephy(tp, 0x1f, 0x0005); 2260 2248 rtl_writephy(tp, 0x05, 0x001b); 2261 - if ((rtl_readphy(tp, 0x06) != 0xbf00) || 2262 - (rtl_apply_firmware(tp, FIRMWARE_8168D_1) < 0)) { 2263 - netif_warn(tp, probe, tp->dev, "unable to apply firmware patch\n"); 2264 - } 2249 + 2250 + rtl_apply_firmware_cond(tp, MII_EXPANSION, 0xbf00); 2265 2251 2266 2252 rtl_writephy(tp, 0x1f, 0x0000); 2267 2253 } ··· 2361 2351 2362 2352 rtl_writephy(tp, 0x1f, 0x0005); 2363 2353 rtl_writephy(tp, 0x05, 0x001b); 2364 - if ((rtl_readphy(tp, 0x06) != 0xb300) || 2365 - (rtl_apply_firmware(tp, FIRMWARE_8168D_2) < 0)) { 2366 - netif_warn(tp, probe, tp->dev, "unable to apply firmware patch\n"); 2367 - } 2354 + 2355 + rtl_apply_firmware_cond(tp, MII_EXPANSION, 0xb300); 2368 2356 2369 2357 rtl_writephy(tp, 0x1f, 0x0000); 2370 2358 } ··· 2482 2474 rtl_writephy(tp, 0x18, 0x0310); 2483 2475 msleep(100); 2484 2476 2485 - if (rtl_apply_firmware(tp, FIRMWARE_8105E_1) < 0) 2486 - netif_warn(tp, probe, tp->dev, "unable to apply firmware patch\n"); 2477 + rtl_apply_firmware(tp); 2487 2478 2488 2479 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); 2489 2480 } ··· 3244 3237 tp->timer.data = (unsigned long) dev; 3245 3238 tp->timer.function = rtl8169_phy_timer; 3246 3239 3240 + tp->fw = RTL_FIRMWARE_UNKNOWN; 3241 + 3247 3242 rc = register_netdev(dev); 3248 3243 if (rc < 0) 3249 3244 goto err_out_msi_4; ··· 3297 3288 3298 3289 cancel_delayed_work_sync(&tp->task); 3299 3290 3300 - rtl_release_firmware(tp); 3301 - 3302 3291 unregister_netdev(dev); 3292 + 3293 + rtl_release_firmware(tp); 3303 3294 3304 3295 if (pci_dev_run_wake(pdev)) 3305 3296 pm_runtime_get_noresume(&pdev->dev); ··· 3310 3301 rtl_disable_msi(pdev, tp); 3311 3302 rtl8169_release_board(pdev, dev, tp->mmio_addr); 3312 3303 pci_set_drvdata(pdev, NULL); 3304 + } 3305 + 3306 + static void rtl_request_firmware(struct rtl8169_private *tp) 3307 + { 3308 + int i; 3309 + 3310 + /* Return early if the firmware is already loaded / cached. */ 3311 + if (!IS_ERR(tp->fw)) 3312 + goto out; 3313 + 3314 + for (i = 0; i < ARRAY_SIZE(rtl_firmware_infos); i++) { 3315 + const struct rtl_firmware_info *info = rtl_firmware_infos + i; 3316 + 3317 + if (info->mac_version == tp->mac_version) { 3318 + const char *name = info->fw_name; 3319 + int rc; 3320 + 3321 + rc = request_firmware(&tp->fw, name, &tp->pci_dev->dev); 3322 + if (rc < 0) { 3323 + netif_warn(tp, ifup, tp->dev, "unable to load " 3324 + "firmware patch %s (%d)\n", name, rc); 3325 + goto out_disable_request_firmware; 3326 + } 3327 + goto out; 3328 + } 3329 + } 3330 + 3331 + out_disable_request_firmware: 3332 + tp->fw = NULL; 3333 + out: 3334 + return; 3313 3335 } 3314 3336 3315 3337 static int rtl8169_open(struct net_device *dev) ··· 3374 3334 3375 3335 smp_mb(); 3376 3336 3337 + rtl_request_firmware(tp); 3338 + 3377 3339 retval = request_irq(dev->irq, rtl8169_interrupt, 3378 3340 (tp->features & RTL_FEATURE_MSI) ? 0 : IRQF_SHARED, 3379 3341 dev->name, dev); 3380 3342 if (retval < 0) 3381 - goto err_release_ring_2; 3343 + goto err_release_fw_2; 3382 3344 3383 3345 napi_enable(&tp->napi); 3384 3346 ··· 3401 3359 out: 3402 3360 return retval; 3403 3361 3404 - err_release_ring_2: 3362 + err_release_fw_2: 3363 + rtl_release_firmware(tp); 3405 3364 rtl8169_rx_clear(tp); 3406 3365 err_free_rx_1: 3407 3366 dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray,
+4 -2
drivers/net/sfc/efx.c
··· 328 328 * processing to finish, then directly poll (and ack ) the eventq. 329 329 * Finally reenable NAPI and interrupts. 330 330 * 331 - * Since we are touching interrupts the caller should hold the suspend lock 331 + * This is for use only during a loopback self-test. It must not 332 + * deliver any packets up the stack as this can result in deadlock. 332 333 */ 333 334 void efx_process_channel_now(struct efx_channel *channel) 334 335 { ··· 337 336 338 337 BUG_ON(channel->channel >= efx->n_channels); 339 338 BUG_ON(!channel->enabled); 339 + BUG_ON(!efx->loopback_selftest); 340 340 341 341 /* Disable interrupts and wait for ISRs to complete */ 342 342 efx_nic_disable_interrupts(efx); ··· 1438 1436 * restart the transmit interface early so the watchdog timer stops */ 1439 1437 efx_start_port(efx); 1440 1438 1441 - if (efx_dev_registered(efx)) 1439 + if (efx_dev_registered(efx) && !efx->port_inhibited) 1442 1440 netif_tx_wake_all_queues(efx->net_dev); 1443 1441 1444 1442 efx_for_each_channel(channel, efx)
+2
drivers/net/sfc/io.h
··· 152 152 153 153 spin_lock_irqsave(&efx->biu_lock, flags); 154 154 value->u32[0] = _efx_readd(efx, reg + 0); 155 + rmb(); 155 156 value->u32[1] = _efx_readd(efx, reg + 4); 156 157 value->u32[2] = _efx_readd(efx, reg + 8); 157 158 value->u32[3] = _efx_readd(efx, reg + 12); ··· 175 174 value->u64[0] = (__force __le64)__raw_readq(membase + addr); 176 175 #else 177 176 value->u32[0] = (__force __le32)__raw_readl(membase + addr); 177 + rmb(); 178 178 value->u32[1] = (__force __le32)__raw_readl(membase + addr + 4); 179 179 #endif 180 180 spin_unlock_irqrestore(&efx->biu_lock, flags);
+30 -19
drivers/net/sfc/mcdi.c
··· 50 50 return &nic_data->mcdi; 51 51 } 52 52 53 + static inline void 54 + efx_mcdi_readd(struct efx_nic *efx, efx_dword_t *value, unsigned reg) 55 + { 56 + struct siena_nic_data *nic_data = efx->nic_data; 57 + value->u32[0] = (__force __le32)__raw_readl(nic_data->mcdi_smem + reg); 58 + } 59 + 60 + static inline void 61 + efx_mcdi_writed(struct efx_nic *efx, const efx_dword_t *value, unsigned reg) 62 + { 63 + struct siena_nic_data *nic_data = efx->nic_data; 64 + __raw_writel((__force u32)value->u32[0], nic_data->mcdi_smem + reg); 65 + } 66 + 53 67 void efx_mcdi_init(struct efx_nic *efx) 54 68 { 55 69 struct efx_mcdi_iface *mcdi; ··· 84 70 const u8 *inbuf, size_t inlen) 85 71 { 86 72 struct efx_mcdi_iface *mcdi = efx_mcdi(efx); 87 - unsigned pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx); 88 - unsigned doorbell = FR_CZ_MC_TREG_SMEM + MCDI_DOORBELL(efx); 73 + unsigned pdu = MCDI_PDU(efx); 74 + unsigned doorbell = MCDI_DOORBELL(efx); 89 75 unsigned int i; 90 76 efx_dword_t hdr; 91 77 u32 xflags, seqno; ··· 106 92 MCDI_HEADER_SEQ, seqno, 107 93 MCDI_HEADER_XFLAGS, xflags); 108 94 109 - efx_writed(efx, &hdr, pdu); 95 + efx_mcdi_writed(efx, &hdr, pdu); 110 96 111 - for (i = 0; i < inlen; i += 4) { 112 - _efx_writed(efx, *((__le32 *)(inbuf + i)), pdu + 4 + i); 113 - /* use wmb() within loop to inhibit write combining */ 114 - wmb(); 115 - } 97 + for (i = 0; i < inlen; i += 4) 98 + efx_mcdi_writed(efx, (const efx_dword_t *)(inbuf + i), 99 + pdu + 4 + i); 116 100 117 101 /* ring the doorbell with a distinctive value */ 118 - _efx_writed(efx, (__force __le32) 0x45789abc, doorbell); 119 - wmb(); 102 + EFX_POPULATE_DWORD_1(hdr, EFX_DWORD_0, 0x45789abc); 103 + efx_mcdi_writed(efx, &hdr, doorbell); 120 104 } 121 105 122 106 static void efx_mcdi_copyout(struct efx_nic *efx, u8 *outbuf, size_t outlen) 123 107 { 124 108 struct efx_mcdi_iface *mcdi = efx_mcdi(efx); 125 - unsigned int pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx); 109 + unsigned int pdu = MCDI_PDU(efx); 126 110 int i; 127 111 128 112 BUG_ON(atomic_read(&mcdi->state) == MCDI_STATE_QUIESCENT); 129 113 BUG_ON(outlen & 3 || outlen >= 0x100); 130 114 131 115 for (i = 0; i < outlen; i += 4) 132 - *((__le32 *)(outbuf + i)) = _efx_readd(efx, pdu + 4 + i); 116 + efx_mcdi_readd(efx, (efx_dword_t *)(outbuf + i), pdu + 4 + i); 133 117 } 134 118 135 119 static int efx_mcdi_poll(struct efx_nic *efx) ··· 135 123 struct efx_mcdi_iface *mcdi = efx_mcdi(efx); 136 124 unsigned int time, finish; 137 125 unsigned int respseq, respcmd, error; 138 - unsigned int pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx); 126 + unsigned int pdu = MCDI_PDU(efx); 139 127 unsigned int rc, spins; 140 128 efx_dword_t reg; 141 129 ··· 161 149 162 150 time = get_seconds(); 163 151 164 - rmb(); 165 - efx_readd(efx, &reg, pdu); 152 + efx_mcdi_readd(efx, &reg, pdu); 166 153 167 154 /* All 1's indicates that shared memory is in reset (and is 168 155 * not a valid header). Wait for it to come out reset before ··· 188 177 respseq, mcdi->seqno); 189 178 rc = EIO; 190 179 } else if (error) { 191 - efx_readd(efx, &reg, pdu + 4); 180 + efx_mcdi_readd(efx, &reg, pdu + 4); 192 181 switch (EFX_DWORD_FIELD(reg, EFX_DWORD_0)) { 193 182 #define TRANSLATE_ERROR(name) \ 194 183 case MC_CMD_ERR_ ## name: \ ··· 222 211 /* Test and clear MC-rebooted flag for this port/function */ 223 212 int efx_mcdi_poll_reboot(struct efx_nic *efx) 224 213 { 225 - unsigned int addr = FR_CZ_MC_TREG_SMEM + MCDI_REBOOT_FLAG(efx); 214 + unsigned int addr = MCDI_REBOOT_FLAG(efx); 226 215 efx_dword_t reg; 227 216 uint32_t value; 228 217 229 218 if (efx_nic_rev(efx) < EFX_REV_SIENA_A0) 230 219 return false; 231 220 232 - efx_readd(efx, &reg, addr); 221 + efx_mcdi_readd(efx, &reg, addr); 233 222 value = EFX_DWORD_FIELD(reg, EFX_DWORD_0); 234 223 235 224 if (value == 0) 236 225 return 0; 237 226 238 227 EFX_ZERO_DWORD(reg); 239 - efx_writed(efx, &reg, addr); 228 + efx_mcdi_writed(efx, &reg, addr); 240 229 241 230 if (value == MC_STATUS_DWORD_ASSERT) 242 231 return -EINTR;
-2
drivers/net/sfc/net_driver.h
··· 330 330 * @eventq_mask: Event queue pointer mask 331 331 * @eventq_read_ptr: Event queue read pointer 332 332 * @last_eventq_read_ptr: Last event queue read pointer value. 333 - * @magic_count: Event queue test event count 334 333 * @irq_count: Number of IRQs since last adaptive moderation decision 335 334 * @irq_mod_score: IRQ moderation score 336 335 * @rx_alloc_level: Watermark based heuristic counter for pushing descriptors ··· 359 360 unsigned int eventq_mask; 360 361 unsigned int eventq_read_ptr; 361 362 unsigned int last_eventq_read_ptr; 362 - unsigned int magic_count; 363 363 364 364 unsigned int irq_count; 365 365 unsigned int irq_mod_score;
+22 -7
drivers/net/sfc/nic.c
··· 84 84 static inline efx_qword_t *efx_event(struct efx_channel *channel, 85 85 unsigned int index) 86 86 { 87 - return ((efx_qword_t *) (channel->eventq.addr)) + index; 87 + return ((efx_qword_t *) (channel->eventq.addr)) + 88 + (index & channel->eventq_mask); 88 89 } 89 90 90 91 /* See if an event is present ··· 674 673 efx_dword_t reg; 675 674 struct efx_nic *efx = channel->efx; 676 675 677 - EFX_POPULATE_DWORD_1(reg, FRF_AZ_EVQ_RPTR, channel->eventq_read_ptr); 676 + EFX_POPULATE_DWORD_1(reg, FRF_AZ_EVQ_RPTR, 677 + channel->eventq_read_ptr & channel->eventq_mask); 678 678 efx_writed_table(efx, &reg, efx->type->evq_rptr_tbl_base, 679 679 channel->channel); 680 680 } ··· 910 908 911 909 code = EFX_QWORD_FIELD(*event, FSF_AZ_DRV_GEN_EV_MAGIC); 912 910 if (code == EFX_CHANNEL_MAGIC_TEST(channel)) 913 - ++channel->magic_count; 911 + ; /* ignore */ 914 912 else if (code == EFX_CHANNEL_MAGIC_FILL(channel)) 915 913 /* The queue must be empty, so we won't receive any rx 916 914 * events, so efx_process_channel() won't refill the ··· 1017 1015 /* Clear this event by marking it all ones */ 1018 1016 EFX_SET_QWORD(*p_event); 1019 1017 1020 - /* Increment read pointer */ 1021 - read_ptr = (read_ptr + 1) & channel->eventq_mask; 1018 + ++read_ptr; 1022 1019 1023 1020 ev_code = EFX_QWORD_FIELD(event, FSF_AZ_EV_CODE); 1024 1021 ··· 1061 1060 return spent; 1062 1061 } 1063 1062 1063 + /* Check whether an event is present in the eventq at the current 1064 + * read pointer. Only useful for self-test. 1065 + */ 1066 + bool efx_nic_event_present(struct efx_channel *channel) 1067 + { 1068 + return efx_event_present(efx_event(channel, channel->eventq_read_ptr)); 1069 + } 1064 1070 1065 1071 /* Allocate buffer table entries for event queue */ 1066 1072 int efx_nic_probe_eventq(struct efx_channel *channel) ··· 1173 1165 struct efx_tx_queue *tx_queue; 1174 1166 struct efx_rx_queue *rx_queue; 1175 1167 unsigned int read_ptr = channel->eventq_read_ptr; 1176 - unsigned int end_ptr = (read_ptr - 1) & channel->eventq_mask; 1168 + unsigned int end_ptr = read_ptr + channel->eventq_mask - 1; 1177 1169 1178 1170 do { 1179 1171 efx_qword_t *event = efx_event(channel, read_ptr); ··· 1213 1205 * it's ok to throw away every non-flush event */ 1214 1206 EFX_SET_QWORD(*event); 1215 1207 1216 - read_ptr = (read_ptr + 1) & channel->eventq_mask; 1208 + ++read_ptr; 1217 1209 } while (read_ptr != end_ptr); 1218 1210 1219 1211 channel->eventq_read_ptr = read_ptr; ··· 1936 1928 continue; 1937 1929 1938 1930 size = min_t(size_t, table->step, 16); 1931 + 1932 + if (table->offset >= efx->type->mem_map_size) { 1933 + /* No longer mapped; return dummy data */ 1934 + memcpy(buf, "\xde\xc0\xad\xde", 4); 1935 + buf += table->rows * size; 1936 + continue; 1937 + } 1939 1938 1940 1939 for (i = 0; i < table->rows; i++) { 1941 1940 switch (table->step) {
+3
drivers/net/sfc/nic.h
··· 143 143 /** 144 144 * struct siena_nic_data - Siena NIC state 145 145 * @mcdi: Management-Controller-to-Driver Interface 146 + * @mcdi_smem: MCDI shared memory mapping. The mapping is always uncacheable. 146 147 * @wol_filter_id: Wake-on-LAN packet filter id 147 148 */ 148 149 struct siena_nic_data { 149 150 struct efx_mcdi_iface mcdi; 151 + void __iomem *mcdi_smem; 150 152 int wol_filter_id; 151 153 }; 152 154 ··· 186 184 extern void efx_nic_remove_eventq(struct efx_channel *channel); 187 185 extern int efx_nic_process_eventq(struct efx_channel *channel, int rx_quota); 188 186 extern void efx_nic_eventq_read_ack(struct efx_channel *channel); 187 + extern bool efx_nic_event_present(struct efx_channel *channel); 189 188 190 189 /* MAC/PHY */ 191 190 extern void falcon_drain_tx_fifo(struct efx_nic *efx);
+6 -19
drivers/net/sfc/selftest.c
··· 131 131 static int efx_test_interrupts(struct efx_nic *efx, 132 132 struct efx_self_tests *tests) 133 133 { 134 - struct efx_channel *channel; 135 - 136 134 netif_dbg(efx, drv, efx->net_dev, "testing interrupts\n"); 137 135 tests->interrupt = -1; 138 136 139 137 /* Reset interrupt flag */ 140 138 efx->last_irq_cpu = -1; 141 139 smp_wmb(); 142 - 143 - /* ACK each interrupting event queue. Receiving an interrupt due to 144 - * traffic before a test event is raised is considered a pass */ 145 - efx_for_each_channel(channel, efx) { 146 - if (channel->work_pending) 147 - efx_process_channel_now(channel); 148 - if (efx->last_irq_cpu >= 0) 149 - goto success; 150 - } 151 140 152 141 efx_nic_generate_interrupt(efx); 153 142 ··· 162 173 struct efx_self_tests *tests) 163 174 { 164 175 struct efx_nic *efx = channel->efx; 165 - unsigned int magic_count, count; 176 + unsigned int read_ptr, count; 166 177 167 178 tests->eventq_dma[channel->channel] = -1; 168 179 tests->eventq_int[channel->channel] = -1; 169 180 tests->eventq_poll[channel->channel] = -1; 170 181 171 - magic_count = channel->magic_count; 182 + read_ptr = channel->eventq_read_ptr; 172 183 channel->efx->last_irq_cpu = -1; 173 184 smp_wmb(); 174 185 ··· 179 190 do { 180 191 schedule_timeout_uninterruptible(HZ / 100); 181 192 182 - if (channel->work_pending) 183 - efx_process_channel_now(channel); 184 - 185 - if (channel->magic_count != magic_count) 193 + if (ACCESS_ONCE(channel->eventq_read_ptr) != read_ptr) 186 194 goto eventq_ok; 187 195 } while (++count < 2); 188 196 ··· 197 211 } 198 212 199 213 /* Check to see if event was received even if interrupt wasn't */ 200 - efx_process_channel_now(channel); 201 - if (channel->magic_count != magic_count) { 214 + if (efx_nic_event_present(channel)) { 202 215 netif_err(efx, drv, efx->net_dev, 203 216 "channel %d event was generated, but " 204 217 "failed to trigger an interrupt\n", channel->channel); ··· 754 769 efx->loopback_mode = loopback_mode; 755 770 __efx_reconfigure_port(efx); 756 771 mutex_unlock(&efx->mac_lock); 772 + 773 + netif_tx_wake_all_queues(efx->net_dev); 757 774 758 775 return rc_test; 759 776 }
+21 -4
drivers/net/sfc/siena.c
··· 220 220 efx_reado(efx, &reg, FR_AZ_CS_DEBUG); 221 221 efx->net_dev->dev_id = EFX_OWORD_FIELD(reg, FRF_CZ_CS_PORT_NUM) - 1; 222 222 223 + /* Initialise MCDI */ 224 + nic_data->mcdi_smem = ioremap_nocache(efx->membase_phys + 225 + FR_CZ_MC_TREG_SMEM, 226 + FR_CZ_MC_TREG_SMEM_STEP * 227 + FR_CZ_MC_TREG_SMEM_ROWS); 228 + if (!nic_data->mcdi_smem) { 229 + netif_err(efx, probe, efx->net_dev, 230 + "could not map MCDI at %llx+%x\n", 231 + (unsigned long long)efx->membase_phys + 232 + FR_CZ_MC_TREG_SMEM, 233 + FR_CZ_MC_TREG_SMEM_STEP * FR_CZ_MC_TREG_SMEM_ROWS); 234 + rc = -ENOMEM; 235 + goto fail1; 236 + } 223 237 efx_mcdi_init(efx); 224 238 225 239 /* Recover from a failed assertion before probing */ 226 240 rc = efx_mcdi_handle_assertion(efx); 227 241 if (rc) 228 - goto fail1; 242 + goto fail2; 229 243 230 244 /* Let the BMC know that the driver is now in charge of link and 231 245 * filter settings. We must do this before we reset the NIC */ ··· 294 280 fail3: 295 281 efx_mcdi_drv_attach(efx, false, NULL); 296 282 fail2: 283 + iounmap(nic_data->mcdi_smem); 297 284 fail1: 298 285 kfree(efx->nic_data); 299 286 return rc; ··· 374 359 375 360 static void siena_remove_nic(struct efx_nic *efx) 376 361 { 362 + struct siena_nic_data *nic_data = efx->nic_data; 363 + 377 364 efx_nic_free_buffer(efx, &efx->irq_status); 378 365 379 366 siena_reset_hw(efx, RESET_TYPE_ALL); ··· 385 368 efx_mcdi_drv_attach(efx, false, NULL); 386 369 387 370 /* Tear down the private nic state */ 388 - kfree(efx->nic_data); 371 + iounmap(nic_data->mcdi_smem); 372 + kfree(nic_data); 389 373 efx->nic_data = NULL; 390 374 } 391 375 ··· 624 606 .default_mac_ops = &efx_mcdi_mac_operations, 625 607 626 608 .revision = EFX_REV_SIENA_A0, 627 - .mem_map_size = (FR_CZ_MC_TREG_SMEM + 628 - FR_CZ_MC_TREG_SMEM_STEP * FR_CZ_MC_TREG_SMEM_ROWS), 609 + .mem_map_size = FR_CZ_MC_TREG_SMEM, /* MC_TREG_SMEM mapped separately */ 629 610 .txd_ptr_tbl_base = FR_BZ_TX_DESC_PTR_TBL, 630 611 .rxd_ptr_tbl_base = FR_BZ_RX_DESC_PTR_TBL, 631 612 .buf_tbl_base = FR_BZ_BUF_FULL_TBL,
+2 -1
drivers/net/sfc/tx.c
··· 435 435 * queue state. */ 436 436 smp_mb(); 437 437 if (unlikely(netif_tx_queue_stopped(tx_queue->core_txq)) && 438 - likely(efx->port_enabled)) { 438 + likely(efx->port_enabled) && 439 + likely(!efx->port_inhibited)) { 439 440 fill_level = tx_queue->insert_count - tx_queue->read_count; 440 441 if (fill_level < EFX_TXQ_THRESHOLD(efx)) { 441 442 EFX_BUG_ON_PARANOID(!efx_dev_registered(efx));
+19 -4
drivers/net/sis900.c
··· 240 240 * @net_dev: the net device to get address for 241 241 * 242 242 * Older SiS900 and friends, use EEPROM to store MAC address. 243 - * MAC address is read from read_eeprom() into @net_dev->dev_addr. 243 + * MAC address is read from read_eeprom() into @net_dev->dev_addr and 244 + * @net_dev->perm_addr. 244 245 */ 245 246 246 247 static int __devinit sis900_get_mac_addr(struct pci_dev * pci_dev, struct net_device *net_dev) ··· 262 261 for (i = 0; i < 3; i++) 263 262 ((u16 *)(net_dev->dev_addr))[i] = read_eeprom(ioaddr, i+EEPROMMACAddr); 264 263 264 + /* Store MAC Address in perm_addr */ 265 + memcpy(net_dev->perm_addr, net_dev->dev_addr, ETH_ALEN); 266 + 265 267 return 1; 266 268 } 267 269 ··· 275 271 * 276 272 * SiS630E model, use APC CMOS RAM to store MAC address. 277 273 * APC CMOS RAM is accessed through ISA bridge. 278 - * MAC address is read into @net_dev->dev_addr. 274 + * MAC address is read into @net_dev->dev_addr and 275 + * @net_dev->perm_addr. 279 276 */ 280 277 281 278 static int __devinit sis630e_get_mac_addr(struct pci_dev * pci_dev, ··· 301 296 outb(0x09 + i, 0x70); 302 297 ((u8 *)(net_dev->dev_addr))[i] = inb(0x71); 303 298 } 299 + 300 + /* Store MAC Address in perm_addr */ 301 + memcpy(net_dev->perm_addr, net_dev->dev_addr, ETH_ALEN); 302 + 304 303 pci_write_config_byte(isa_bridge, 0x48, reg & ~0x40); 305 304 pci_dev_put(isa_bridge); 306 305 ··· 319 310 * 320 311 * SiS635 model, set MAC Reload Bit to load Mac address from APC 321 312 * to rfdr. rfdr is accessed through rfcr. MAC address is read into 322 - * @net_dev->dev_addr. 313 + * @net_dev->dev_addr and @net_dev->perm_addr. 323 314 */ 324 315 325 316 static int __devinit sis635_get_mac_addr(struct pci_dev * pci_dev, ··· 343 334 *( ((u16 *)net_dev->dev_addr) + i) = inw(ioaddr + rfdr); 344 335 } 345 336 337 + /* Store MAC Address in perm_addr */ 338 + memcpy(net_dev->perm_addr, net_dev->dev_addr, ETH_ALEN); 339 + 346 340 /* enable packet filtering */ 347 341 outl(rfcrSave | RFEN, rfcr + ioaddr); 348 342 ··· 365 353 * EEDONE signal to refuse EEPROM access by LAN. 366 354 * The EEPROM map of SiS962 or SiS963 is different to SiS900. 367 355 * The signature field in SiS962 or SiS963 spec is meaningless. 368 - * MAC address is read into @net_dev->dev_addr. 356 + * MAC address is read into @net_dev->dev_addr and @net_dev->perm_addr. 369 357 */ 370 358 371 359 static int __devinit sis96x_get_mac_addr(struct pci_dev * pci_dev, ··· 383 371 /* get MAC address from EEPROM */ 384 372 for (i = 0; i < 3; i++) 385 373 ((u16 *)(net_dev->dev_addr))[i] = read_eeprom(ioaddr, i+EEPROMMACAddr); 374 + 375 + /* Store MAC Address in perm_addr */ 376 + memcpy(net_dev->perm_addr, net_dev->dev_addr, ETH_ALEN); 386 377 387 378 outl(EEDONE, ee_addr); 388 379 return 1;
+3 -1
drivers/net/slip.c
··· 853 853 /* Done. We have linked the TTY line to a channel. */ 854 854 rtnl_unlock(); 855 855 tty->receive_room = 65536; /* We don't flow control */ 856 - return sl->dev->base_addr; 856 + 857 + /* TTY layer expects 0 on success */ 858 + return 0; 857 859 858 860 err_free_bufs: 859 861 sl_free_bufs(sl);
+14 -14
drivers/net/stmmac/dwmac_lib.c
··· 26 26 27 27 #undef DWMAC_DMA_DEBUG 28 28 #ifdef DWMAC_DMA_DEBUG 29 - #define DBG(fmt, args...) printk(fmt, ## args) 29 + #define DWMAC_LIB_DBG(fmt, args...) printk(fmt, ## args) 30 30 #else 31 - #define DBG(fmt, args...) do { } while (0) 31 + #define DWMAC_LIB_DBG(fmt, args...) do { } while (0) 32 32 #endif 33 33 34 34 /* CSR1 enables the transmit DMA to check for new descriptor */ ··· 152 152 /* read the status register (CSR5) */ 153 153 u32 intr_status = readl(ioaddr + DMA_STATUS); 154 154 155 - DBG(INFO, "%s: [CSR5: 0x%08x]\n", __func__, intr_status); 155 + DWMAC_LIB_DBG(KERN_INFO "%s: [CSR5: 0x%08x]\n", __func__, intr_status); 156 156 #ifdef DWMAC_DMA_DEBUG 157 157 /* It displays the DMA process states (CSR5 register) */ 158 158 show_tx_process_state(intr_status); ··· 160 160 #endif 161 161 /* ABNORMAL interrupts */ 162 162 if (unlikely(intr_status & DMA_STATUS_AIS)) { 163 - DBG(INFO, "CSR5[15] DMA ABNORMAL IRQ: "); 163 + DWMAC_LIB_DBG(KERN_INFO "CSR5[15] DMA ABNORMAL IRQ: "); 164 164 if (unlikely(intr_status & DMA_STATUS_UNF)) { 165 - DBG(INFO, "transmit underflow\n"); 165 + DWMAC_LIB_DBG(KERN_INFO "transmit underflow\n"); 166 166 ret = tx_hard_error_bump_tc; 167 167 x->tx_undeflow_irq++; 168 168 } 169 169 if (unlikely(intr_status & DMA_STATUS_TJT)) { 170 - DBG(INFO, "transmit jabber\n"); 170 + DWMAC_LIB_DBG(KERN_INFO "transmit jabber\n"); 171 171 x->tx_jabber_irq++; 172 172 } 173 173 if (unlikely(intr_status & DMA_STATUS_OVF)) { 174 - DBG(INFO, "recv overflow\n"); 174 + DWMAC_LIB_DBG(KERN_INFO "recv overflow\n"); 175 175 x->rx_overflow_irq++; 176 176 } 177 177 if (unlikely(intr_status & DMA_STATUS_RU)) { 178 - DBG(INFO, "receive buffer unavailable\n"); 178 + DWMAC_LIB_DBG(KERN_INFO "receive buffer unavailable\n"); 179 179 x->rx_buf_unav_irq++; 180 180 } 181 181 if (unlikely(intr_status & DMA_STATUS_RPS)) { 182 - DBG(INFO, "receive process stopped\n"); 182 + DWMAC_LIB_DBG(KERN_INFO "receive process stopped\n"); 183 183 x->rx_process_stopped_irq++; 184 184 } 185 185 if (unlikely(intr_status & DMA_STATUS_RWT)) { 186 - DBG(INFO, "receive watchdog\n"); 186 + DWMAC_LIB_DBG(KERN_INFO "receive watchdog\n"); 187 187 x->rx_watchdog_irq++; 188 188 } 189 189 if (unlikely(intr_status & DMA_STATUS_ETI)) { 190 - DBG(INFO, "transmit early interrupt\n"); 190 + DWMAC_LIB_DBG(KERN_INFO "transmit early interrupt\n"); 191 191 x->tx_early_irq++; 192 192 } 193 193 if (unlikely(intr_status & DMA_STATUS_TPS)) { 194 - DBG(INFO, "transmit process stopped\n"); 194 + DWMAC_LIB_DBG(KERN_INFO "transmit process stopped\n"); 195 195 x->tx_process_stopped_irq++; 196 196 ret = tx_hard_error; 197 197 } 198 198 if (unlikely(intr_status & DMA_STATUS_FBI)) { 199 - DBG(INFO, "fatal bus error\n"); 199 + DWMAC_LIB_DBG(KERN_INFO "fatal bus error\n"); 200 200 x->fatal_bus_error_irq++; 201 201 ret = tx_hard_error; 202 202 } ··· 215 215 /* Clear the interrupt by writing a logic 1 to the CSR5[15-0] */ 216 216 writel((intr_status & 0x1ffff), ioaddr + DMA_STATUS); 217 217 218 - DBG(INFO, "\n\n"); 218 + DWMAC_LIB_DBG(KERN_INFO "\n\n"); 219 219 return ret; 220 220 } 221 221
+28 -21
drivers/net/stmmac/stmmac_main.c
··· 750 750 priv->hw->dma->dma_mode(priv->ioaddr, tc, SF_DMA_MODE); 751 751 priv->xstats.threshold = tc; 752 752 } 753 - stmmac_tx_err(priv); 754 753 } else if (unlikely(status == tx_hard_error)) 755 754 stmmac_tx_err(priv); 756 755 } ··· 780 781 781 782 stmmac_verify_args(); 782 783 783 - ret = stmmac_init_phy(dev); 784 - if (unlikely(ret)) { 785 - pr_err("%s: Cannot attach to PHY (error: %d)\n", __func__, ret); 786 - return ret; 787 - } 788 - 789 - /* Request the IRQ lines */ 790 - ret = request_irq(dev->irq, stmmac_interrupt, 791 - IRQF_SHARED, dev->name, dev); 792 - if (unlikely(ret < 0)) { 793 - pr_err("%s: ERROR: allocating the IRQ %d (error: %d)\n", 794 - __func__, dev->irq, ret); 795 - return ret; 796 - } 797 - 798 784 #ifdef CONFIG_STMMAC_TIMER 799 785 priv->tm = kzalloc(sizeof(struct stmmac_timer *), GFP_KERNEL); 800 786 if (unlikely(priv->tm == NULL)) { ··· 798 814 } else 799 815 priv->tm->enable = 1; 800 816 #endif 817 + ret = stmmac_init_phy(dev); 818 + if (unlikely(ret)) { 819 + pr_err("%s: Cannot attach to PHY (error: %d)\n", __func__, ret); 820 + goto open_error; 821 + } 801 822 802 823 /* Create and initialize the TX/RX descriptors chains. */ 803 824 priv->dma_tx_size = STMMAC_ALIGN(dma_txsize); ··· 811 822 init_dma_desc_rings(dev); 812 823 813 824 /* DMA initialization and SW reset */ 814 - if (unlikely(priv->hw->dma->init(priv->ioaddr, priv->plat->pbl, 815 - priv->dma_tx_phy, 816 - priv->dma_rx_phy) < 0)) { 817 - 825 + ret = priv->hw->dma->init(priv->ioaddr, priv->plat->pbl, 826 + priv->dma_tx_phy, priv->dma_rx_phy); 827 + if (ret < 0) { 818 828 pr_err("%s: DMA initialization failed\n", __func__); 819 - return -1; 829 + goto open_error; 820 830 } 821 831 822 832 /* Copy the MAC addr into the HW */ ··· 835 847 /* Initialise the MMC (if present) to disable all interrupts. */ 836 848 writel(0xffffffff, priv->ioaddr + MMC_HIGH_INTR_MASK); 837 849 writel(0xffffffff, priv->ioaddr + MMC_LOW_INTR_MASK); 850 + 851 + /* Request the IRQ lines */ 852 + ret = request_irq(dev->irq, stmmac_interrupt, 853 + IRQF_SHARED, dev->name, dev); 854 + if (unlikely(ret < 0)) { 855 + pr_err("%s: ERROR: allocating the IRQ %d (error: %d)\n", 856 + __func__, dev->irq, ret); 857 + goto open_error; 858 + } 838 859 839 860 /* Enable the MAC Rx/Tx */ 840 861 stmmac_enable_mac(priv->ioaddr); ··· 875 878 napi_enable(&priv->napi); 876 879 skb_queue_head_init(&priv->rx_recycle); 877 880 netif_start_queue(dev); 881 + 878 882 return 0; 883 + 884 + open_error: 885 + #ifdef CONFIG_STMMAC_TIMER 886 + kfree(priv->tm); 887 + #endif 888 + if (priv->phydev) 889 + phy_disconnect(priv->phydev); 890 + 891 + return ret; 879 892 } 880 893 881 894 /**
+5 -2
drivers/net/sunhme.c
··· 3237 3237 #endif 3238 3238 3239 3239 #ifdef CONFIG_SBUS 3240 + static const struct of_device_id hme_sbus_match[]; 3240 3241 static int __devinit hme_sbus_probe(struct platform_device *op) 3241 3242 { 3243 + const struct of_device_id *match; 3242 3244 struct device_node *dp = op->dev.of_node; 3243 3245 const char *model = of_get_property(dp, "model", NULL); 3244 3246 int is_qfe; 3245 3247 3246 - if (!op->dev.of_match) 3248 + match = of_match_device(hme_sbus_match, &op->dev); 3249 + if (!match) 3247 3250 return -EINVAL; 3248 - is_qfe = (op->dev.of_match->data != NULL); 3251 + is_qfe = (match->data != NULL); 3249 3252 3250 3253 if (!is_qfe && model && !strcmp(model, "SUNW,sbus-qfe")) 3251 3254 is_qfe = 1;
+6 -2
drivers/net/tg3.c
··· 12327 12327 if (val & VCPU_CFGSHDW_ASPM_DBNC) 12328 12328 tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND; 12329 12329 if ((val & VCPU_CFGSHDW_WOL_ENABLE) && 12330 - (val & VCPU_CFGSHDW_WOL_MAGPKT)) 12330 + (val & VCPU_CFGSHDW_WOL_MAGPKT)) { 12331 12331 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE; 12332 + device_set_wakeup_enable(&tp->pdev->dev, true); 12333 + } 12332 12334 goto done; 12333 12335 } 12334 12336 ··· 12463 12461 tp->tg3_flags &= ~TG3_FLAG_WOL_CAP; 12464 12462 12465 12463 if ((tp->tg3_flags & TG3_FLAG_WOL_CAP) && 12466 - (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) 12464 + (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) { 12467 12465 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE; 12466 + device_set_wakeup_enable(&tp->pdev->dev, true); 12467 + } 12468 12468 12469 12469 if (cfg2 & (1 << 17)) 12470 12470 tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
+2 -2
drivers/net/tokenring/3c359.c
··· 1251 1251 /* 1252 1252 * The NIC has told us that a packet has been downloaded onto the card, we must 1253 1253 * find out which packet it has done, clear the skb and information for the packet 1254 - * then advance around the ring for all tranmitted packets 1254 + * then advance around the ring for all transmitted packets 1255 1255 */ 1256 1256 1257 1257 static void xl_dn_comp(struct net_device *dev) ··· 1568 1568 if (lan_status_diff & LSC_SOFT_ERR) 1569 1569 printk(KERN_WARNING "%s: Adapter transmitted Soft Error Report Mac Frame\n",dev->name); 1570 1570 if (lan_status_diff & LSC_TRAN_BCN) 1571 - printk(KERN_INFO "%s: We are tranmitting the beacon, aaah\n",dev->name); 1571 + printk(KERN_INFO "%s: We are transmitting the beacon, aaah\n",dev->name); 1572 1572 if (lan_status_diff & LSC_SS) 1573 1573 printk(KERN_INFO "%s: Single Station on the ring\n", dev->name); 1574 1574 if (lan_status_diff & LSC_RING_REC)
+1 -1
drivers/net/tokenring/lanstreamer.c
··· 1675 1675 if (lan_status_diff & LSC_SOFT_ERR) 1676 1676 printk(KERN_WARNING "%s: Adapter transmitted Soft Error Report Mac Frame\n", dev->name); 1677 1677 if (lan_status_diff & LSC_TRAN_BCN) 1678 - printk(KERN_INFO "%s: We are tranmitting the beacon, aaah\n", dev->name); 1678 + printk(KERN_INFO "%s: We are transmitting the beacon, aaah\n", dev->name); 1679 1679 if (lan_status_diff & LSC_SS) 1680 1680 printk(KERN_INFO "%s: Single Station on the ring\n", dev->name); 1681 1681 if (lan_status_diff & LSC_RING_REC)
+1 -1
drivers/net/tokenring/olympic.c
··· 1500 1500 if (lan_status_diff & LSC_SOFT_ERR) 1501 1501 printk(KERN_WARNING "%s: Adapter transmitted Soft Error Report Mac Frame\n",dev->name); 1502 1502 if (lan_status_diff & LSC_TRAN_BCN) 1503 - printk(KERN_INFO "%s: We are tranmitting the beacon, aaah\n",dev->name); 1503 + printk(KERN_INFO "%s: We are transmitting the beacon, aaah\n",dev->name); 1504 1504 if (lan_status_diff & LSC_SS) 1505 1505 printk(KERN_INFO "%s: Single Station on the ring\n", dev->name); 1506 1506 if (lan_status_diff & LSC_RING_REC)
+13 -3
drivers/net/usb/cdc_ether.c
··· 460 460 .manage_power = cdc_manage_power, 461 461 }; 462 462 463 - static const struct driver_info mbm_info = { 463 + static const struct driver_info wwan_info = { 464 464 .description = "Mobile Broadband Network Device", 465 465 .flags = FLAG_WWAN, 466 466 .bind = usbnet_cdc_bind, ··· 471 471 472 472 /*-------------------------------------------------------------------------*/ 473 473 474 + #define HUAWEI_VENDOR_ID 0x12D1 474 475 475 476 static const struct usb_device_id products [] = { 476 477 /* ··· 567 566 { 568 567 USB_DEVICE_AND_INTERFACE_INFO(0x1004, 0x61aa, USB_CLASS_COMM, 569 568 USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE), 570 - .driver_info = 0, 569 + .driver_info = (unsigned long)&wwan_info, 571 570 }, 572 571 573 572 /* ··· 588 587 }, { 589 588 USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_MDLM, 590 589 USB_CDC_PROTO_NONE), 591 - .driver_info = (unsigned long)&mbm_info, 590 + .driver_info = (unsigned long)&wwan_info, 592 591 592 + }, { 593 + /* Various Huawei modems with a network port like the UMG1831 */ 594 + .match_flags = USB_DEVICE_ID_MATCH_VENDOR 595 + | USB_DEVICE_ID_MATCH_INT_INFO, 596 + .idVendor = HUAWEI_VENDOR_ID, 597 + .bInterfaceClass = USB_CLASS_COMM, 598 + .bInterfaceSubClass = USB_CDC_SUBCLASS_ETHERNET, 599 + .bInterfaceProtocol = 255, 600 + .driver_info = (unsigned long)&wwan_info, 593 601 }, 594 602 { }, // END 595 603 };
+2 -2
drivers/net/usb/cdc_ncm.c
··· 54 54 #include <linux/usb/usbnet.h> 55 55 #include <linux/usb/cdc.h> 56 56 57 - #define DRIVER_VERSION "7-Feb-2011" 57 + #define DRIVER_VERSION "23-Apr-2011" 58 58 59 59 /* CDC NCM subclass 3.2.1 */ 60 60 #define USB_CDC_NCM_NDP16_LENGTH_MIN 0x10 61 61 62 62 /* Maximum NTB length */ 63 - #define CDC_NCM_NTB_MAX_SIZE_TX 16384 /* bytes */ 63 + #define CDC_NCM_NTB_MAX_SIZE_TX (16384 + 4) /* bytes, must be short terminated */ 64 64 #define CDC_NCM_NTB_MAX_SIZE_RX 16384 /* bytes */ 65 65 66 66 /* Minimum value for MaxDatagramSize, ch. 6.2.9 */
+9 -5
drivers/net/usb/ipheth.c
··· 65 65 #define IPHETH_USBINTF_PROTO 1 66 66 67 67 #define IPHETH_BUF_SIZE 1516 68 + #define IPHETH_IP_ALIGN 2 /* padding at front of URB */ 68 69 #define IPHETH_TX_TIMEOUT (5 * HZ) 69 70 70 71 #define IPHETH_INTFNUM 2 ··· 203 202 return; 204 203 } 205 204 206 - len = urb->actual_length; 207 - buf = urb->transfer_buffer; 205 + if (urb->actual_length <= IPHETH_IP_ALIGN) { 206 + dev->net->stats.rx_length_errors++; 207 + return; 208 + } 209 + len = urb->actual_length - IPHETH_IP_ALIGN; 210 + buf = urb->transfer_buffer + IPHETH_IP_ALIGN; 208 211 209 - skb = dev_alloc_skb(NET_IP_ALIGN + len); 212 + skb = dev_alloc_skb(len); 210 213 if (!skb) { 211 214 err("%s: dev_alloc_skb: -ENOMEM", __func__); 212 215 dev->net->stats.rx_dropped++; 213 216 return; 214 217 } 215 218 216 - skb_reserve(skb, NET_IP_ALIGN); 217 - memcpy(skb_put(skb, len), buf + NET_IP_ALIGN, len - NET_IP_ALIGN); 219 + memcpy(skb_put(skb, len), buf, len); 218 220 skb->dev = dev->net; 219 221 skb->protocol = eth_type_trans(skb, dev->net); 220 222
+1 -1
drivers/net/usb/smsc95xx.c
··· 730 730 msleep(10); 731 731 bmcr = smsc95xx_mdio_read(dev->net, dev->mii.phy_id, MII_BMCR); 732 732 timeout++; 733 - } while ((bmcr & MII_BMCR) && (timeout < 100)); 733 + } while ((bmcr & BMCR_RESET) && (timeout < 100)); 734 734 735 735 if (timeout >= 100) { 736 736 netdev_warn(dev->net, "timeout on PHY Reset");
+15 -3
drivers/net/usb/usbnet.c
··· 645 645 struct driver_info *info = dev->driver_info; 646 646 int retval; 647 647 648 + clear_bit(EVENT_DEV_OPEN, &dev->flags); 648 649 netif_stop_queue (net); 649 650 650 651 netif_info(dev, ifdown, dev->net, ··· 737 736 } 738 737 } 739 738 739 + set_bit(EVENT_DEV_OPEN, &dev->flags); 740 740 netif_start_queue (net); 741 741 netif_info(dev, ifup, dev->net, 742 742 "open: enable queueing (rx %d, tx %d) mtu %d %s framing\n", ··· 1261 1259 if (dev->driver_info->unbind) 1262 1260 dev->driver_info->unbind (dev, intf); 1263 1261 1262 + usb_kill_urb(dev->interrupt); 1263 + usb_free_urb(dev->interrupt); 1264 + 1264 1265 free_netdev(net); 1265 1266 usb_put_dev (xdev); 1266 1267 } ··· 1503 1498 int retval; 1504 1499 1505 1500 if (!--dev->suspend_count) { 1501 + /* resume interrupt URBs */ 1502 + if (dev->interrupt && test_bit(EVENT_DEV_OPEN, &dev->flags)) 1503 + usb_submit_urb(dev->interrupt, GFP_NOIO); 1504 + 1506 1505 spin_lock_irq(&dev->txq.lock); 1507 1506 while ((res = usb_get_from_anchor(&dev->deferred))) { 1508 1507 ··· 1525 1516 smp_mb(); 1526 1517 clear_bit(EVENT_DEV_ASLEEP, &dev->flags); 1527 1518 spin_unlock_irq(&dev->txq.lock); 1528 - if (!(dev->txq.qlen >= TX_QLEN(dev))) 1529 - netif_start_queue(dev->net); 1530 - tasklet_schedule (&dev->bh); 1519 + 1520 + if (test_bit(EVENT_DEV_OPEN, &dev->flags)) { 1521 + if (!(dev->txq.qlen >= TX_QLEN(dev))) 1522 + netif_start_queue(dev->net); 1523 + tasklet_schedule (&dev->bh); 1524 + } 1531 1525 } 1532 1526 return 0; 1533 1527 }
+12
drivers/net/veth.c
··· 403 403 if (tb[IFLA_ADDRESS] == NULL) 404 404 random_ether_addr(dev->dev_addr); 405 405 406 + if (tb[IFLA_IFNAME]) 407 + nla_strlcpy(dev->name, tb[IFLA_IFNAME], IFNAMSIZ); 408 + else 409 + snprintf(dev->name, IFNAMSIZ, DRV_NAME "%%d"); 410 + 411 + if (strchr(dev->name, '%')) { 412 + err = dev_alloc_name(dev, dev->name); 413 + if (err < 0) 414 + goto err_alloc_name; 415 + } 416 + 406 417 err = register_netdevice(dev); 407 418 if (err < 0) 408 419 goto err_register_dev; ··· 433 422 434 423 err_register_dev: 435 424 /* nothing to do */ 425 + err_alloc_name: 436 426 err_configure_peer: 437 427 unregister_netdevice(peer); 438 428 return err;
+6 -4
drivers/net/vmxnet3/vmxnet3_drv.c
··· 178 178 vmxnet3_process_events(struct vmxnet3_adapter *adapter) 179 179 { 180 180 int i; 181 + unsigned long flags; 181 182 u32 events = le32_to_cpu(adapter->shared->ecr); 182 183 if (!events) 183 184 return; ··· 191 190 192 191 /* Check if there is an error on xmit/recv queues */ 193 192 if (events & (VMXNET3_ECR_TQERR | VMXNET3_ECR_RQERR)) { 194 - spin_lock(&adapter->cmd_lock); 193 + spin_lock_irqsave(&adapter->cmd_lock, flags); 195 194 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 196 195 VMXNET3_CMD_GET_QUEUE_STATUS); 197 - spin_unlock(&adapter->cmd_lock); 196 + spin_unlock_irqrestore(&adapter->cmd_lock, flags); 198 197 199 198 for (i = 0; i < adapter->num_tx_queues; i++) 200 199 if (adapter->tqd_start[i].status.stopped) ··· 2734 2733 vmxnet3_alloc_intr_resources(struct vmxnet3_adapter *adapter) 2735 2734 { 2736 2735 u32 cfg; 2736 + unsigned long flags; 2737 2737 2738 2738 /* intr settings */ 2739 - spin_lock(&adapter->cmd_lock); 2739 + spin_lock_irqsave(&adapter->cmd_lock, flags); 2740 2740 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 2741 2741 VMXNET3_CMD_GET_CONF_INTR); 2742 2742 cfg = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD); 2743 - spin_unlock(&adapter->cmd_lock); 2743 + spin_unlock_irqrestore(&adapter->cmd_lock, flags); 2744 2744 adapter->intr.type = cfg & 0x3; 2745 2745 adapter->intr.mask_mode = (cfg >> 2) & 0x3; 2746 2746
+3
drivers/net/vmxnet3/vmxnet3_ethtool.c
··· 311 311 /* toggle the LRO feature*/ 312 312 netdev->features ^= NETIF_F_LRO; 313 313 314 + /* Update private LRO flag */ 315 + adapter->lro = lro_requested; 316 + 314 317 /* update harware LRO capability accordingly */ 315 318 if (lro_requested) 316 319 adapter->shared->devRead.misc.uptFeatures |=
+2 -2
drivers/net/wireless/ath/ath9k/hif_usb.c
··· 1040 1040 } 1041 1041 1042 1042 ret = ath9k_htc_hw_init(hif_dev->htc_handle, 1043 - &hif_dev->udev->dev, hif_dev->device_id, 1043 + &interface->dev, hif_dev->device_id, 1044 1044 hif_dev->udev->product, id->driver_info); 1045 1045 if (ret) { 1046 1046 ret = -EINVAL; ··· 1158 1158 #endif 1159 1159 1160 1160 static struct usb_driver ath9k_hif_usb_driver = { 1161 - .name = "ath9k_hif_usb", 1161 + .name = KBUILD_MODNAME, 1162 1162 .probe = ath9k_hif_usb_probe, 1163 1163 .disconnect = ath9k_hif_usb_disconnect, 1164 1164 #ifdef CONFIG_PM
-9
drivers/net/wireless/ath/ath9k/hw.c
··· 1254 1254 ah->txchainmask = common->tx_chainmask; 1255 1255 ah->rxchainmask = common->rx_chainmask; 1256 1256 1257 - if ((common->bus_ops->ath_bus_type != ATH_USB) && !ah->chip_fullsleep) { 1258 - ath9k_hw_abortpcurecv(ah); 1259 - if (!ath9k_hw_stopdmarecv(ah)) { 1260 - ath_dbg(common, ATH_DBG_XMIT, 1261 - "Failed to stop receive dma\n"); 1262 - bChannelChange = false; 1263 - } 1264 - } 1265 - 1266 1257 if (!ath9k_hw_setpower(ah, ATH9K_PM_AWAKE)) 1267 1258 return -EIO; 1268 1259
+22 -3
drivers/net/wireless/ath/ath9k/mac.c
··· 751 751 } 752 752 EXPORT_SYMBOL(ath9k_hw_abortpcurecv); 753 753 754 - bool ath9k_hw_stopdmarecv(struct ath_hw *ah) 754 + bool ath9k_hw_stopdmarecv(struct ath_hw *ah, bool *reset) 755 755 { 756 756 #define AH_RX_STOP_DMA_TIMEOUT 10000 /* usec */ 757 757 #define AH_RX_TIME_QUANTUM 100 /* usec */ 758 758 struct ath_common *common = ath9k_hw_common(ah); 759 + u32 mac_status, last_mac_status = 0; 759 760 int i; 761 + 762 + /* Enable access to the DMA observation bus */ 763 + REG_WRITE(ah, AR_MACMISC, 764 + ((AR_MACMISC_DMA_OBS_LINE_8 << AR_MACMISC_DMA_OBS_S) | 765 + (AR_MACMISC_MISC_OBS_BUS_1 << 766 + AR_MACMISC_MISC_OBS_BUS_MSB_S))); 760 767 761 768 REG_WRITE(ah, AR_CR, AR_CR_RXD); 762 769 ··· 771 764 for (i = AH_RX_STOP_DMA_TIMEOUT / AH_TIME_QUANTUM; i != 0; i--) { 772 765 if ((REG_READ(ah, AR_CR) & AR_CR_RXE) == 0) 773 766 break; 767 + 768 + if (!AR_SREV_9300_20_OR_LATER(ah)) { 769 + mac_status = REG_READ(ah, AR_DMADBG_7) & 0x7f0; 770 + if (mac_status == 0x1c0 && mac_status == last_mac_status) { 771 + *reset = true; 772 + break; 773 + } 774 + 775 + last_mac_status = mac_status; 776 + } 777 + 774 778 udelay(AH_TIME_QUANTUM); 775 779 } 776 780 777 781 if (i == 0) { 778 782 ath_err(common, 779 - "DMA failed to stop in %d ms AR_CR=0x%08x AR_DIAG_SW=0x%08x\n", 783 + "DMA failed to stop in %d ms AR_CR=0x%08x AR_DIAG_SW=0x%08x DMADBG_7=0x%08x\n", 780 784 AH_RX_STOP_DMA_TIMEOUT / 1000, 781 785 REG_READ(ah, AR_CR), 782 - REG_READ(ah, AR_DIAG_SW)); 786 + REG_READ(ah, AR_DIAG_SW), 787 + REG_READ(ah, AR_DMADBG_7)); 783 788 return false; 784 789 } else { 785 790 return true;
+1 -1
drivers/net/wireless/ath/ath9k/mac.h
··· 695 695 void ath9k_hw_putrxbuf(struct ath_hw *ah, u32 rxdp); 696 696 void ath9k_hw_startpcureceive(struct ath_hw *ah, bool is_scanning); 697 697 void ath9k_hw_abortpcurecv(struct ath_hw *ah); 698 - bool ath9k_hw_stopdmarecv(struct ath_hw *ah); 698 + bool ath9k_hw_stopdmarecv(struct ath_hw *ah, bool *reset); 699 699 int ath9k_hw_beaconq_setup(struct ath_hw *ah); 700 700 701 701 /* Interrupt Handling */
+18 -2
drivers/net/wireless/ath/ath9k/main.c
··· 1376 1376 1377 1377 ath9k_calculate_iter_data(hw, vif, &iter_data); 1378 1378 1379 - ath9k_ps_wakeup(sc); 1380 1379 /* Set BSSID mask. */ 1381 1380 memcpy(common->bssidmask, iter_data.mask, ETH_ALEN); 1382 1381 ath_hw_setbssidmask(common); ··· 1410 1411 } 1411 1412 1412 1413 ath9k_hw_set_interrupts(ah, ah->imask); 1413 - ath9k_ps_restore(sc); 1414 1414 1415 1415 /* Set up ANI */ 1416 1416 if ((iter_data.naps + iter_data.nadhocs) > 0) { ··· 1455 1457 struct ath_vif *avp = (void *)vif->drv_priv; 1456 1458 int ret = 0; 1457 1459 1460 + ath9k_ps_wakeup(sc); 1458 1461 mutex_lock(&sc->mutex); 1459 1462 1460 1463 switch (vif->type) { ··· 1502 1503 ath9k_do_vif_add_setup(hw, vif); 1503 1504 out: 1504 1505 mutex_unlock(&sc->mutex); 1506 + ath9k_ps_restore(sc); 1505 1507 return ret; 1506 1508 } 1507 1509 ··· 1517 1517 1518 1518 ath_dbg(common, ATH_DBG_CONFIG, "Change Interface\n"); 1519 1519 mutex_lock(&sc->mutex); 1520 + ath9k_ps_wakeup(sc); 1520 1521 1521 1522 /* See if new interface type is valid. */ 1522 1523 if ((new_type == NL80211_IFTYPE_ADHOC) && ··· 1547 1546 1548 1547 ath9k_do_vif_add_setup(hw, vif); 1549 1548 out: 1549 + ath9k_ps_restore(sc); 1550 1550 mutex_unlock(&sc->mutex); 1551 1551 return ret; 1552 1552 } ··· 1560 1558 1561 1559 ath_dbg(common, ATH_DBG_CONFIG, "Detach Interface\n"); 1562 1560 1561 + ath9k_ps_wakeup(sc); 1563 1562 mutex_lock(&sc->mutex); 1564 1563 1565 1564 sc->nvifs--; ··· 1572 1569 ath9k_calculate_summary_state(hw, NULL); 1573 1570 1574 1571 mutex_unlock(&sc->mutex); 1572 + ath9k_ps_restore(sc); 1575 1573 } 1576 1574 1577 1575 static void ath9k_enable_ps(struct ath_softc *sc) ··· 1813 1809 1814 1810 txq = sc->tx.txq_map[queue]; 1815 1811 1812 + ath9k_ps_wakeup(sc); 1816 1813 mutex_lock(&sc->mutex); 1817 1814 1818 1815 memset(&qi, 0, sizeof(struct ath9k_tx_queue_info)); ··· 1837 1832 ath_beaconq_config(sc); 1838 1833 1839 1834 mutex_unlock(&sc->mutex); 1835 + ath9k_ps_restore(sc); 1840 1836 1841 1837 return ret; 1842 1838 } ··· 1900 1894 int slottime; 1901 1895 int error; 1902 1896 1897 + ath9k_ps_wakeup(sc); 1903 1898 mutex_lock(&sc->mutex); 1904 1899 1905 1900 if (changed & BSS_CHANGED_BSSID) { ··· 2001 1994 } 2002 1995 2003 1996 mutex_unlock(&sc->mutex); 1997 + ath9k_ps_restore(sc); 2004 1998 } 2005 1999 2006 2000 static u64 ath9k_get_tsf(struct ieee80211_hw *hw) ··· 2141 2133 static void ath9k_flush(struct ieee80211_hw *hw, bool drop) 2142 2134 { 2143 2135 struct ath_softc *sc = hw->priv; 2136 + struct ath_hw *ah = sc->sc_ah; 2137 + struct ath_common *common = ath9k_hw_common(ah); 2144 2138 int timeout = 200; /* ms */ 2145 2139 int i, j; 2146 2140 ··· 2150 2140 mutex_lock(&sc->mutex); 2151 2141 2152 2142 cancel_delayed_work_sync(&sc->tx_complete_work); 2143 + 2144 + if (sc->sc_flags & SC_OP_INVALID) { 2145 + ath_dbg(common, ATH_DBG_ANY, "Device not present\n"); 2146 + mutex_unlock(&sc->mutex); 2147 + return; 2148 + } 2153 2149 2154 2150 if (drop) 2155 2151 timeout = 1;
+3 -3
drivers/net/wireless/ath/ath9k/recv.c
··· 486 486 bool ath_stoprecv(struct ath_softc *sc) 487 487 { 488 488 struct ath_hw *ah = sc->sc_ah; 489 - bool stopped; 489 + bool stopped, reset = false; 490 490 491 491 spin_lock_bh(&sc->rx.rxbuflock); 492 492 ath9k_hw_abortpcurecv(ah); 493 493 ath9k_hw_setrxfilter(ah, 0); 494 - stopped = ath9k_hw_stopdmarecv(ah); 494 + stopped = ath9k_hw_stopdmarecv(ah, &reset); 495 495 496 496 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) 497 497 ath_edma_stop_recv(sc); ··· 506 506 "confusing the DMA engine when we start RX up\n"); 507 507 ATH_DBG_WARN_ON_ONCE(!stopped); 508 508 } 509 - return stopped; 509 + return stopped && !reset; 510 510 } 511 511 512 512 void ath_flushrecv(struct ath_softc *sc)
+1
drivers/net/wireless/ath/regd_common.h
··· 195 195 {APL9_WORLD, CTL_ETSI, CTL_ETSI}, 196 196 197 197 {APL3_FCCA, CTL_FCC, CTL_FCC}, 198 + {APL7_FCCA, CTL_FCC, CTL_FCC}, 198 199 {APL1_ETSIC, CTL_FCC, CTL_ETSI}, 199 200 {APL2_ETSIC, CTL_FCC, CTL_ETSI}, 200 201 {APL2_APLD, CTL_FCC, NO_CTL},
+1
drivers/net/wireless/b43/main.c
··· 72 72 MODULE_FIRMWARE("b43/ucode13.fw"); 73 73 MODULE_FIRMWARE("b43/ucode14.fw"); 74 74 MODULE_FIRMWARE("b43/ucode15.fw"); 75 + MODULE_FIRMWARE("b43/ucode16_mimo.fw"); 75 76 MODULE_FIRMWARE("b43/ucode5.fw"); 76 77 MODULE_FIRMWARE("b43/ucode9.fw"); 77 78
+5 -4
drivers/net/wireless/iwlegacy/Kconfig
··· 1 1 config IWLWIFI_LEGACY 2 - tristate "Intel Wireless Wifi legacy devices" 3 - depends on PCI && MAC80211 2 + tristate 4 3 select FW_LOADER 5 4 select NEW_LEDS 6 5 select LEDS_CLASS ··· 64 65 65 66 config IWL4965 66 67 tristate "Intel Wireless WiFi 4965AGN (iwl4965)" 67 - depends on IWLWIFI_LEGACY 68 + depends on PCI && MAC80211 69 + select IWLWIFI_LEGACY 68 70 ---help--- 69 71 This option enables support for 70 72 ··· 92 92 93 93 config IWL3945 94 94 tristate "Intel PRO/Wireless 3945ABG/BG Network Connection (iwl3945)" 95 - depends on IWLWIFI_LEGACY 95 + depends on PCI && MAC80211 96 + select IWLWIFI_LEGACY 96 97 ---help--- 97 98 Select to build the driver supporting the: 98 99
-2
drivers/net/wireless/iwlegacy/iwl-3945-hw.h
··· 74 74 /* RSSI to dBm */ 75 75 #define IWL39_RSSI_OFFSET 95 76 76 77 - #define IWL_DEFAULT_TX_POWER 0x0F 78 - 79 77 /* 80 78 * EEPROM related constants, enums, and structures. 81 79 */
-3
drivers/net/wireless/iwlegacy/iwl-4965-hw.h
··· 804 804 805 805 #define IWL4965_DEFAULT_TX_RETRY 15 806 806 807 - /* Limit range of txpower output target to be between these values */ 808 - #define IWL4965_TX_POWER_TARGET_POWER_MIN (0) /* 0 dBm: 1 milliwatt */ 809 - 810 807 /* EEPROM */ 811 808 #define IWL4965_FIRST_AMPDU_QUEUE 10 812 809
+19 -9
drivers/net/wireless/iwlegacy/iwl-4965-tx.c
··· 316 316 317 317 hdr_len = ieee80211_hdrlen(fc); 318 318 319 - /* Find index into station table for destination station */ 320 - sta_id = iwl_legacy_sta_id_or_broadcast(priv, ctx, info->control.sta); 321 - if (sta_id == IWL_INVALID_STATION) { 322 - IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n", 323 - hdr->addr1); 324 - goto drop_unlock; 319 + /* For management frames use broadcast id to do not break aggregation */ 320 + if (!ieee80211_is_data(fc)) 321 + sta_id = ctx->bcast_sta_id; 322 + else { 323 + /* Find index into station table for destination station */ 324 + sta_id = iwl_legacy_sta_id_or_broadcast(priv, ctx, info->control.sta); 325 + 326 + if (sta_id == IWL_INVALID_STATION) { 327 + IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n", 328 + hdr->addr1); 329 + goto drop_unlock; 330 + } 325 331 } 326 332 327 333 IWL_DEBUG_TX(priv, "station Id %d\n", sta_id); ··· 1133 1127 q->read_ptr = iwl_legacy_queue_inc_wrap(q->read_ptr, q->n_bd)) { 1134 1128 1135 1129 tx_info = &txq->txb[txq->q.read_ptr]; 1136 - iwl4965_tx_status(priv, tx_info, 1137 - txq_id >= IWL4965_FIRST_AMPDU_QUEUE); 1130 + 1131 + if (WARN_ON_ONCE(tx_info->skb == NULL)) 1132 + continue; 1138 1133 1139 1134 hdr = (struct ieee80211_hdr *)tx_info->skb->data; 1140 - if (hdr && ieee80211_is_data_qos(hdr->frame_control)) 1135 + if (ieee80211_is_data_qos(hdr->frame_control)) 1141 1136 nfreed++; 1137 + 1138 + iwl4965_tx_status(priv, tx_info, 1139 + txq_id >= IWL4965_FIRST_AMPDU_QUEUE); 1142 1140 tx_info->skb = NULL; 1143 1141 1144 1142 priv->cfg->ops->lib->txq_free_tfd(priv, txq);
+18 -6
drivers/net/wireless/iwlegacy/iwl-core.c
··· 160 160 struct ieee80211_channel *geo_ch; 161 161 struct ieee80211_rate *rates; 162 162 int i = 0; 163 + s8 max_tx_power = 0; 163 164 164 165 if (priv->bands[IEEE80211_BAND_2GHZ].n_bitrates || 165 166 priv->bands[IEEE80211_BAND_5GHZ].n_bitrates) { ··· 236 235 237 236 geo_ch->flags |= ch->ht40_extension_channel; 238 237 239 - if (ch->max_power_avg > priv->tx_power_device_lmt) 240 - priv->tx_power_device_lmt = ch->max_power_avg; 238 + if (ch->max_power_avg > max_tx_power) 239 + max_tx_power = ch->max_power_avg; 241 240 } else { 242 241 geo_ch->flags |= IEEE80211_CHAN_DISABLED; 243 242 } ··· 249 248 "restricted" : "valid", 250 249 geo_ch->flags); 251 250 } 251 + 252 + priv->tx_power_device_lmt = max_tx_power; 253 + priv->tx_power_user_lmt = max_tx_power; 254 + priv->tx_power_next = max_tx_power; 252 255 253 256 if ((priv->bands[IEEE80211_BAND_5GHZ].n_channels == 0) && 254 257 priv->cfg->sku & IWL_SKU_A) { ··· 1129 1124 if (!priv->cfg->ops->lib->send_tx_power) 1130 1125 return -EOPNOTSUPP; 1131 1126 1132 - if (tx_power < IWL4965_TX_POWER_TARGET_POWER_MIN) { 1127 + /* 0 dBm mean 1 milliwatt */ 1128 + if (tx_power < 0) { 1133 1129 IWL_WARN(priv, 1134 - "Requested user TXPOWER %d below lower limit %d.\n", 1135 - tx_power, 1136 - IWL4965_TX_POWER_TARGET_POWER_MIN); 1130 + "Requested user TXPOWER %d below 1 mW.\n", 1131 + tx_power); 1137 1132 return -EINVAL; 1138 1133 } 1139 1134 ··· 2151 2146 ch_info = iwl_legacy_get_channel_info(priv, channel->band, ch); 2152 2147 if (!iwl_legacy_is_channel_valid(ch_info)) { 2153 2148 IWL_DEBUG_MAC80211(priv, "leave - invalid channel\n"); 2149 + ret = -EINVAL; 2150 + goto set_ch_out; 2151 + } 2152 + 2153 + if (priv->iw_mode == NL80211_IFTYPE_ADHOC && 2154 + !iwl_legacy_is_channel_ibss(ch_info)) { 2155 + IWL_DEBUG_MAC80211(priv, "leave - not IBSS channel\n"); 2154 2156 ret = -EINVAL; 2155 2157 goto set_ch_out; 2156 2158 }
+6
drivers/net/wireless/iwlegacy/iwl-dev.h
··· 1411 1411 return (!(ch->flags & EEPROM_CHANNEL_ACTIVE)) ? 1 : 0; 1412 1412 } 1413 1413 1414 + static inline int 1415 + iwl_legacy_is_channel_ibss(const struct iwl_channel_info *ch) 1416 + { 1417 + return (ch->flags & EEPROM_CHANNEL_IBSS) ? 1 : 0; 1418 + } 1419 + 1414 1420 static inline void 1415 1421 __iwl_legacy_free_pages(struct iwl_priv *priv, struct page *page) 1416 1422 {
-7
drivers/net/wireless/iwlegacy/iwl-eeprom.c
··· 471 471 flags & EEPROM_CHANNEL_RADAR)) 472 472 ? "" : "not "); 473 473 474 - /* Set the tx_power_user_lmt to the highest power 475 - * supported by any channel */ 476 - if (eeprom_ch_info[ch].max_power_avg > 477 - priv->tx_power_user_lmt) 478 - priv->tx_power_user_lmt = 479 - eeprom_ch_info[ch].max_power_avg; 480 - 481 474 ch_info++; 482 475 } 483 476 }
+19 -1
drivers/net/wireless/iwlegacy/iwl-led.c
··· 48 48 MODULE_PARM_DESC(led_mode, "0=system default, " 49 49 "1=On(RF On)/Off(RF Off), 2=blinking"); 50 50 51 + /* Throughput OFF time(ms) ON time (ms) 52 + * >300 25 25 53 + * >200 to 300 40 40 54 + * >100 to 200 55 55 55 + * >70 to 100 65 65 56 + * >50 to 70 75 75 57 + * >20 to 50 85 85 58 + * >10 to 20 95 95 59 + * >5 to 10 110 110 60 + * >1 to 5 130 130 61 + * >0 to 1 167 167 62 + * <=0 SOLID ON 63 + */ 51 64 static const struct ieee80211_tpt_blink iwl_blink[] = { 52 - { .throughput = 0 * 1024 - 1, .blink_time = 334 }, 65 + { .throughput = 0, .blink_time = 334 }, 53 66 { .throughput = 1 * 1024 - 1, .blink_time = 260 }, 54 67 { .throughput = 5 * 1024 - 1, .blink_time = 220 }, 55 68 { .throughput = 10 * 1024 - 1, .blink_time = 190 }, ··· 113 100 114 101 if (priv->blink_on == on && priv->blink_off == off) 115 102 return 0; 103 + 104 + if (off == 0) { 105 + /* led is SOLID_ON */ 106 + on = IWL_LED_SOLID; 107 + } 116 108 117 109 IWL_DEBUG_LED(priv, "Led blink time compensation=%u\n", 118 110 priv->cfg->base_params->led_compensation);
-4
drivers/net/wireless/iwlegacy/iwl3945-base.c
··· 3825 3825 priv->force_reset[IWL_FW_RESET].reset_duration = 3826 3826 IWL_DELAY_NEXT_FORCE_FW_RELOAD; 3827 3827 3828 - 3829 - priv->tx_power_user_lmt = IWL_DEFAULT_TX_POWER; 3830 - priv->tx_power_next = IWL_DEFAULT_TX_POWER; 3831 - 3832 3828 if (eeprom->version < EEPROM_3945_EEPROM_VERSION) { 3833 3829 IWL_WARN(priv, "Unsupported EEPROM version: 0x%04X\n", 3834 3830 eeprom->version);
+4 -10
drivers/net/wireless/iwlegacy/iwl4965-base.c
··· 2984 2984 struct iwl_priv *priv = container_of(work, struct iwl_priv, 2985 2985 txpower_work); 2986 2986 2987 + mutex_lock(&priv->mutex); 2988 + 2987 2989 /* If a scan happened to start before we got here 2988 2990 * then just return; the statistics notification will 2989 2991 * kick off another scheduled work to compensate for 2990 2992 * any temperature delta we missed here. */ 2991 2993 if (test_bit(STATUS_EXIT_PENDING, &priv->status) || 2992 2994 test_bit(STATUS_SCANNING, &priv->status)) 2993 - return; 2994 - 2995 - mutex_lock(&priv->mutex); 2995 + goto out; 2996 2996 2997 2997 /* Regardless of if we are associated, we must reconfigure the 2998 2998 * TX power since frames can be sent on non-radar channels while ··· 3002 3002 /* Update last_temperature to keep is_calib_needed from running 3003 3003 * when it isn't needed... */ 3004 3004 priv->last_temperature = priv->temperature; 3005 - 3005 + out: 3006 3006 mutex_unlock(&priv->mutex); 3007 3007 } 3008 3008 ··· 3139 3139 &priv->contexts[IWL_RXON_CTX_BSS]); 3140 3140 3141 3141 iwl_legacy_init_scan_params(priv); 3142 - 3143 - /* Set the tx_power_user_lmt to the lowest power level 3144 - * this value will get overwritten by channel max power avg 3145 - * from eeprom */ 3146 - priv->tx_power_user_lmt = IWL4965_TX_POWER_TARGET_POWER_MIN; 3147 - priv->tx_power_next = IWL4965_TX_POWER_TARGET_POWER_MIN; 3148 3142 3149 3143 ret = iwl_legacy_init_channel_map(priv); 3150 3144 if (ret) {
+3
drivers/net/wireless/iwlwifi/iwl-5000.c
··· 530 530 struct iwl_cfg iwl5300_agn_cfg = { 531 531 .name = "Intel(R) Ultimate N WiFi Link 5300 AGN", 532 532 IWL_DEVICE_5000, 533 + /* at least EEPROM 0x11A has wrong info */ 534 + .valid_tx_ant = ANT_ABC, /* .cfg overwrite */ 535 + .valid_rx_ant = ANT_ABC, /* .cfg overwrite */ 533 536 .ht_params = &iwl5000_ht_params, 534 537 }; 535 538
+1 -6
drivers/net/wireless/iwlwifi/iwl-agn-rxon.c
··· 335 335 struct ieee80211_channel *channel = conf->channel; 336 336 const struct iwl_channel_info *ch_info; 337 337 int ret = 0; 338 - bool ht_changed[NUM_IWL_RXON_CTX] = {}; 339 338 340 339 IWL_DEBUG_MAC80211(priv, "changed %#x", changed); 341 340 ··· 382 383 383 384 for_each_context(priv, ctx) { 384 385 /* Configure HT40 channels */ 385 - if (ctx->ht.enabled != conf_is_ht(conf)) { 386 + if (ctx->ht.enabled != conf_is_ht(conf)) 386 387 ctx->ht.enabled = conf_is_ht(conf); 387 - ht_changed[ctx->ctxid] = true; 388 - } 389 388 390 389 if (ctx->ht.enabled) { 391 390 if (conf_is_ht40_minus(conf)) { ··· 452 455 if (!memcmp(&ctx->staging, &ctx->active, sizeof(ctx->staging))) 453 456 continue; 454 457 iwlagn_commit_rxon(priv, ctx); 455 - if (ht_changed[ctx->ctxid]) 456 - iwlagn_update_qos(priv, ctx); 457 458 } 458 459 out: 459 460 mutex_unlock(&priv->mutex);
+18 -9
drivers/net/wireless/iwlwifi/iwl-agn-tx.c
··· 568 568 569 569 hdr_len = ieee80211_hdrlen(fc); 570 570 571 - /* Find index into station table for destination station */ 572 - sta_id = iwl_sta_id_or_broadcast(priv, ctx, info->control.sta); 573 - if (sta_id == IWL_INVALID_STATION) { 574 - IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n", 575 - hdr->addr1); 576 - goto drop_unlock; 571 + /* For management frames use broadcast id to do not break aggregation */ 572 + if (!ieee80211_is_data(fc)) 573 + sta_id = ctx->bcast_sta_id; 574 + else { 575 + /* Find index into station table for destination station */ 576 + sta_id = iwl_sta_id_or_broadcast(priv, ctx, info->control.sta); 577 + if (sta_id == IWL_INVALID_STATION) { 578 + IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n", 579 + hdr->addr1); 580 + goto drop_unlock; 581 + } 577 582 } 578 583 579 584 IWL_DEBUG_TX(priv, "station Id %d\n", sta_id); ··· 1229 1224 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) { 1230 1225 1231 1226 tx_info = &txq->txb[txq->q.read_ptr]; 1232 - iwlagn_tx_status(priv, tx_info, 1233 - txq_id >= IWLAGN_FIRST_AMPDU_QUEUE); 1227 + 1228 + if (WARN_ON_ONCE(tx_info->skb == NULL)) 1229 + continue; 1234 1230 1235 1231 hdr = (struct ieee80211_hdr *)tx_info->skb->data; 1236 - if (hdr && ieee80211_is_data_qos(hdr->frame_control)) 1232 + if (ieee80211_is_data_qos(hdr->frame_control)) 1237 1233 nfreed++; 1234 + 1235 + iwlagn_tx_status(priv, tx_info, 1236 + txq_id >= IWLAGN_FIRST_AMPDU_QUEUE); 1238 1237 tx_info->skb = NULL; 1239 1238 1240 1239 if (priv->cfg->ops->lib->txq_inval_byte_cnt_tbl)
+4 -2
drivers/net/wireless/libertas/cmd.c
··· 1339 1339 cpu_to_le16(PS_MODE_ACTION_EXIT_PS)) { 1340 1340 lbs_deb_host( 1341 1341 "EXEC_NEXT_CMD: ignore ENTER_PS cmd\n"); 1342 - list_del(&cmdnode->list); 1343 1342 spin_lock_irqsave(&priv->driver_lock, flags); 1343 + list_del(&cmdnode->list); 1344 1344 lbs_complete_command(priv, cmdnode, 0); 1345 1345 spin_unlock_irqrestore(&priv->driver_lock, flags); 1346 1346 ··· 1352 1352 (priv->psstate == PS_STATE_PRE_SLEEP)) { 1353 1353 lbs_deb_host( 1354 1354 "EXEC_NEXT_CMD: ignore EXIT_PS cmd in sleep\n"); 1355 - list_del(&cmdnode->list); 1356 1355 spin_lock_irqsave(&priv->driver_lock, flags); 1356 + list_del(&cmdnode->list); 1357 1357 lbs_complete_command(priv, cmdnode, 0); 1358 1358 spin_unlock_irqrestore(&priv->driver_lock, flags); 1359 1359 priv->needtowakeup = 1; ··· 1366 1366 "EXEC_NEXT_CMD: sending EXIT_PS\n"); 1367 1367 } 1368 1368 } 1369 + spin_lock_irqsave(&priv->driver_lock, flags); 1369 1370 list_del(&cmdnode->list); 1371 + spin_unlock_irqrestore(&priv->driver_lock, flags); 1370 1372 lbs_deb_host("EXEC_NEXT_CMD: sending command 0x%04x\n", 1371 1373 le16_to_cpu(cmd->command)); 1372 1374 lbs_submit_command(priv, cmdnode);
+8 -1
drivers/net/wireless/mwl8k.c
··· 137 137 struct mwl8k_priv { 138 138 struct ieee80211_hw *hw; 139 139 struct pci_dev *pdev; 140 + int irq; 140 141 141 142 struct mwl8k_device_info *device_info; 142 143 ··· 3762 3761 rc = request_irq(priv->pdev->irq, mwl8k_interrupt, 3763 3762 IRQF_SHARED, MWL8K_NAME, hw); 3764 3763 if (rc) { 3764 + priv->irq = -1; 3765 3765 wiphy_err(hw->wiphy, "failed to register IRQ handler\n"); 3766 3766 return -EIO; 3767 3767 } 3768 + priv->irq = priv->pdev->irq; 3768 3769 3769 3770 /* Enable TX reclaim and RX tasklets. */ 3770 3771 tasklet_enable(&priv->poll_tx_task); ··· 3803 3800 if (rc) { 3804 3801 iowrite32(0, priv->regs + MWL8K_HIU_A2H_INTERRUPT_MASK); 3805 3802 free_irq(priv->pdev->irq, hw); 3803 + priv->irq = -1; 3806 3804 tasklet_disable(&priv->poll_tx_task); 3807 3805 tasklet_disable(&priv->poll_rx_task); 3808 3806 } ··· 3822 3818 3823 3819 /* Disable interrupts */ 3824 3820 iowrite32(0, priv->regs + MWL8K_HIU_A2H_INTERRUPT_MASK); 3825 - free_irq(priv->pdev->irq, hw); 3821 + if (priv->irq != -1) { 3822 + free_irq(priv->pdev->irq, hw); 3823 + priv->irq = -1; 3824 + } 3826 3825 3827 3826 /* Stop finalize join worker */ 3828 3827 cancel_work_sync(&priv->finalize_join_worker);
+1 -1
drivers/net/wireless/p54/txrx.c
··· 703 703 struct p54_tx_info *p54info; 704 704 struct p54_hdr *hdr; 705 705 struct p54_tx_data *txhdr; 706 - unsigned int padding, len, extra_len; 706 + unsigned int padding, len, extra_len = 0; 707 707 int i, j, ridx; 708 708 u16 hdr_flags = 0, aid = 0; 709 709 u8 rate, queue = 0, crypt_offset = 0;
+6 -6
drivers/net/zorro8390.c
··· 126 126 127 127 board = z->resource.start; 128 128 ioaddr = board+cards[i].offset; 129 - dev = alloc_ei_netdev(); 129 + dev = ____alloc_ei_netdev(0); 130 130 if (!dev) 131 131 return -ENOMEM; 132 132 if (!request_mem_region(ioaddr, NE_IO_EXTENT*2, DRV_NAME)) { ··· 146 146 static const struct net_device_ops zorro8390_netdev_ops = { 147 147 .ndo_open = zorro8390_open, 148 148 .ndo_stop = zorro8390_close, 149 - .ndo_start_xmit = ei_start_xmit, 150 - .ndo_tx_timeout = ei_tx_timeout, 151 - .ndo_get_stats = ei_get_stats, 152 - .ndo_set_multicast_list = ei_set_multicast_list, 149 + .ndo_start_xmit = __ei_start_xmit, 150 + .ndo_tx_timeout = __ei_tx_timeout, 151 + .ndo_get_stats = __ei_get_stats, 152 + .ndo_set_multicast_list = __ei_set_multicast_list, 153 153 .ndo_validate_addr = eth_validate_addr, 154 154 .ndo_set_mac_address = eth_mac_addr, 155 155 .ndo_change_mtu = eth_change_mtu, 156 156 #ifdef CONFIG_NET_POLL_CONTROLLER 157 - .ndo_poll_controller = ei_poll, 157 + .ndo_poll_controller = __ei_poll, 158 158 #endif 159 159 }; 160 160
+3 -5
drivers/parport/parport_pc.c
··· 2550 2550 const struct parport_pc_via_data *via) 2551 2551 { 2552 2552 short inta_addr[6] = { 0x2A0, 0x2C0, 0x220, 0x240, 0x1E0 }; 2553 - struct resource *base_res; 2554 2553 u32 ite8872set; 2555 2554 u32 ite8872_lpt, ite8872_lpthi; 2556 2555 u8 ite8872_irq, type; ··· 2560 2561 2561 2562 /* make sure which one chip */ 2562 2563 for (i = 0; i < 5; i++) { 2563 - base_res = request_region(inta_addr[i], 32, "it887x"); 2564 - if (base_res) { 2564 + if (request_region(inta_addr[i], 32, "it887x")) { 2565 2565 int test; 2566 2566 pci_write_config_dword(pdev, 0x60, 2567 2567 0xe5000000 | inta_addr[i]); ··· 2569 2571 test = inb(inta_addr[i]); 2570 2572 if (test != 0xff) 2571 2573 break; 2572 - release_region(inta_addr[i], 0x8); 2574 + release_region(inta_addr[i], 32); 2573 2575 } 2574 2576 } 2575 2577 if (i >= 5) { ··· 2633 2635 /* 2634 2636 * Release the resource so that parport_pc_probe_port can get it. 2635 2637 */ 2636 - release_resource(base_res); 2638 + release_region(inta_addr[i], 32); 2637 2639 if (parport_pc_probe_port(ite8872_lpt, ite8872_lpthi, 2638 2640 irq, PARPORT_DMA_NONE, &pdev->dev, 0)) { 2639 2641 printk(KERN_INFO
+3 -1
drivers/pci/Kconfig
··· 88 88 depends on HOTPLUG 89 89 default y 90 90 91 - select NLS if (DMI || ACPI) 91 + config PCI_LABEL 92 + def_bool y if (DMI || ACPI) 93 + select NLS
+2 -2
drivers/pci/Makefile
··· 56 56 # ACPI Related PCI FW Functions 57 57 # ACPI _DSM provided firmware instance and string name 58 58 # 59 - obj-$(CONFIG_ACPI) += pci-acpi.o pci-label.o 59 + obj-$(CONFIG_ACPI) += pci-acpi.o 60 60 61 61 # SMBIOS provided firmware instance and labels 62 - obj-$(CONFIG_DMI) += pci-label.o 62 + obj-$(CONFIG_PCI_LABEL) += pci-label.o 63 63 64 64 # Cardbus & CompactPCI use setup-bus 65 65 obj-$(CONFIG_HOTPLUG) += setup-bus.o
+43 -12
drivers/pci/intel-iommu.c
··· 1299 1299 static struct iova_domain reserved_iova_list; 1300 1300 static struct lock_class_key reserved_rbtree_key; 1301 1301 1302 - static void dmar_init_reserved_ranges(void) 1302 + static int dmar_init_reserved_ranges(void) 1303 1303 { 1304 1304 struct pci_dev *pdev = NULL; 1305 1305 struct iova *iova; ··· 1313 1313 /* IOAPIC ranges shouldn't be accessed by DMA */ 1314 1314 iova = reserve_iova(&reserved_iova_list, IOVA_PFN(IOAPIC_RANGE_START), 1315 1315 IOVA_PFN(IOAPIC_RANGE_END)); 1316 - if (!iova) 1316 + if (!iova) { 1317 1317 printk(KERN_ERR "Reserve IOAPIC range failed\n"); 1318 + return -ENODEV; 1319 + } 1318 1320 1319 1321 /* Reserve all PCI MMIO to avoid peer-to-peer access */ 1320 1322 for_each_pci_dev(pdev) { ··· 1329 1327 iova = reserve_iova(&reserved_iova_list, 1330 1328 IOVA_PFN(r->start), 1331 1329 IOVA_PFN(r->end)); 1332 - if (!iova) 1330 + if (!iova) { 1333 1331 printk(KERN_ERR "Reserve iova failed\n"); 1332 + return -ENODEV; 1333 + } 1334 1334 } 1335 1335 } 1336 - 1336 + return 0; 1337 1337 } 1338 1338 1339 1339 static void domain_reserve_special_ranges(struct dmar_domain *domain) ··· 1839 1835 1840 1836 ret = iommu_attach_domain(domain, iommu); 1841 1837 if (ret) { 1842 - domain_exit(domain); 1838 + free_domain_mem(domain); 1843 1839 goto error; 1844 1840 } 1845 1841 ··· 2217 2213 return 0; 2218 2214 } 2219 2215 2220 - int __init init_dmars(void) 2216 + static int __init init_dmars(int force_on) 2221 2217 { 2222 2218 struct dmar_drhd_unit *drhd; 2223 2219 struct dmar_rmrr_unit *rmrr; ··· 2397 2393 * enable translation 2398 2394 */ 2399 2395 for_each_drhd_unit(drhd) { 2400 - if (drhd->ignored) 2396 + if (drhd->ignored) { 2397 + /* 2398 + * we always have to disable PMRs or DMA may fail on 2399 + * this device 2400 + */ 2401 + if (force_on) 2402 + iommu_disable_protect_mem_regions(drhd->iommu); 2401 2403 continue; 2404 + } 2402 2405 iommu = drhd->iommu; 2403 2406 2404 2407 iommu_flush_write_buffer(iommu); ··· 3251 3240 if (!domain) 3252 3241 return 0; 3253 3242 3254 - if (action == BUS_NOTIFY_UNBOUND_DRIVER && !iommu_pass_through) 3243 + if (action == BUS_NOTIFY_UNBOUND_DRIVER && !iommu_pass_through) { 3255 3244 domain_remove_one_dev_info(domain, pdev); 3245 + 3246 + if (!(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) && 3247 + !(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) && 3248 + list_empty(&domain->devices)) 3249 + domain_exit(domain); 3250 + } 3256 3251 3257 3252 return 0; 3258 3253 } ··· 3294 3277 if (no_iommu || dmar_disabled) 3295 3278 return -ENODEV; 3296 3279 3297 - iommu_init_mempool(); 3298 - dmar_init_reserved_ranges(); 3280 + if (iommu_init_mempool()) { 3281 + if (force_on) 3282 + panic("tboot: Failed to initialize iommu memory\n"); 3283 + return -ENODEV; 3284 + } 3285 + 3286 + if (dmar_init_reserved_ranges()) { 3287 + if (force_on) 3288 + panic("tboot: Failed to reserve iommu ranges\n"); 3289 + return -ENODEV; 3290 + } 3299 3291 3300 3292 init_no_remapping_devices(); 3301 3293 3302 - ret = init_dmars(); 3294 + ret = init_dmars(force_on); 3303 3295 if (ret) { 3304 3296 if (force_on) 3305 3297 panic("tboot: Failed to initialize DMARs\n"); ··· 3417 3391 domain->iommu_count--; 3418 3392 domain_update_iommu_cap(domain); 3419 3393 spin_unlock_irqrestore(&domain->iommu_lock, tmp_flags); 3394 + 3395 + spin_lock_irqsave(&iommu->lock, tmp_flags); 3396 + clear_bit(domain->id, iommu->domain_ids); 3397 + iommu->domains[domain->id] = NULL; 3398 + spin_unlock_irqrestore(&iommu->lock, tmp_flags); 3420 3399 } 3421 3400 3422 3401 spin_unlock_irqrestore(&device_domain_lock, flags); ··· 3638 3607 3639 3608 pte = dmar_domain->pgd; 3640 3609 if (dma_pte_present(pte)) { 3641 - free_pgtable_page(dmar_domain->pgd); 3642 3610 dmar_domain->pgd = (struct dma_pte *) 3643 3611 phys_to_virt(dma_pte_addr(pte)); 3612 + free_pgtable_page(pte); 3644 3613 } 3645 3614 dmar_domain->agaw--; 3646 3615 }
+2 -2
drivers/pci/setup-bus.c
··· 579 579 } 580 580 size0 = calculate_iosize(size, min_size, size1, 581 581 resource_size(b_res), 4096); 582 - size1 = !add_size? size0: 582 + size1 = (!add_head || (add_head && !add_size)) ? size0 : 583 583 calculate_iosize(size, min_size+add_size, size1, 584 584 resource_size(b_res), 4096); 585 585 if (!size0 && !size1) { ··· 677 677 align += aligns[order]; 678 678 } 679 679 size0 = calculate_memsize(size, min_size, 0, resource_size(b_res), min_align); 680 - size1 = !add_size ? size : 680 + size1 = (!add_head || (add_head && !add_size)) ? size0 : 681 681 calculate_memsize(size, min_size+add_size, 0, 682 682 resource_size(b_res), min_align); 683 683 if (!size0 && !size1) {
+1 -1
drivers/pcmcia/pcmcia_resource.c
··· 173 173 c = p_dev->function_config; 174 174 175 175 if (!(c->state & CONFIG_LOCKED)) { 176 - dev_dbg(&p_dev->dev, "Configuration isn't't locked\n"); 176 + dev_dbg(&p_dev->dev, "Configuration isn't locked\n"); 177 177 mutex_unlock(&s->ops_mutex); 178 178 return -EACCES; 179 179 }
+41 -16
drivers/platform/x86/eeepc-laptop.c
··· 585 585 return true; 586 586 } 587 587 588 - static void eeepc_rfkill_hotplug(struct eeepc_laptop *eeepc) 588 + static void eeepc_rfkill_hotplug(struct eeepc_laptop *eeepc, acpi_handle handle) 589 589 { 590 + struct pci_dev *port; 590 591 struct pci_dev *dev; 591 592 struct pci_bus *bus; 592 593 bool blocked = eeepc_wlan_rfkill_blocked(eeepc); ··· 600 599 mutex_lock(&eeepc->hotplug_lock); 601 600 602 601 if (eeepc->hotplug_slot) { 603 - bus = pci_find_bus(0, 1); 602 + port = acpi_get_pci_dev(handle); 603 + if (!port) { 604 + pr_warning("Unable to find port\n"); 605 + goto out_unlock; 606 + } 607 + 608 + bus = port->subordinate; 609 + 604 610 if (!bus) { 605 - pr_warning("Unable to find PCI bus 1?\n"); 611 + pr_warning("Unable to find PCI bus?\n"); 606 612 goto out_unlock; 607 613 } 608 614 ··· 617 609 pr_err("Unable to read PCI config space?\n"); 618 610 goto out_unlock; 619 611 } 612 + 620 613 absent = (l == 0xffffffff); 621 614 622 615 if (blocked != absent) { ··· 656 647 mutex_unlock(&eeepc->hotplug_lock); 657 648 } 658 649 650 + static void eeepc_rfkill_hotplug_update(struct eeepc_laptop *eeepc, char *node) 651 + { 652 + acpi_status status = AE_OK; 653 + acpi_handle handle; 654 + 655 + status = acpi_get_handle(NULL, node, &handle); 656 + 657 + if (ACPI_SUCCESS(status)) 658 + eeepc_rfkill_hotplug(eeepc, handle); 659 + } 660 + 659 661 static void eeepc_rfkill_notify(acpi_handle handle, u32 event, void *data) 660 662 { 661 663 struct eeepc_laptop *eeepc = data; ··· 674 654 if (event != ACPI_NOTIFY_BUS_CHECK) 675 655 return; 676 656 677 - eeepc_rfkill_hotplug(eeepc); 657 + eeepc_rfkill_hotplug(eeepc, handle); 678 658 } 679 659 680 660 static int eeepc_register_rfkill_notifier(struct eeepc_laptop *eeepc, ··· 692 672 eeepc); 693 673 if (ACPI_FAILURE(status)) 694 674 pr_warning("Failed to register notify on %s\n", node); 675 + /* 676 + * Refresh pci hotplug in case the rfkill state was 677 + * changed during setup. 678 + */ 679 + eeepc_rfkill_hotplug(eeepc, handle); 695 680 } else 696 681 return -ENODEV; 697 682 ··· 718 693 if (ACPI_FAILURE(status)) 719 694 pr_err("Error removing rfkill notify handler %s\n", 720 695 node); 696 + /* 697 + * Refresh pci hotplug in case the rfkill 698 + * state was changed after 699 + * eeepc_unregister_rfkill_notifier() 700 + */ 701 + eeepc_rfkill_hotplug(eeepc, handle); 721 702 } 722 703 } 723 704 ··· 847 816 rfkill_destroy(eeepc->wlan_rfkill); 848 817 eeepc->wlan_rfkill = NULL; 849 818 } 850 - /* 851 - * Refresh pci hotplug in case the rfkill state was changed after 852 - * eeepc_unregister_rfkill_notifier() 853 - */ 854 - eeepc_rfkill_hotplug(eeepc); 819 + 855 820 if (eeepc->hotplug_slot) 856 821 pci_hp_deregister(eeepc->hotplug_slot); 857 822 ··· 916 889 eeepc_register_rfkill_notifier(eeepc, "\\_SB.PCI0.P0P5"); 917 890 eeepc_register_rfkill_notifier(eeepc, "\\_SB.PCI0.P0P6"); 918 891 eeepc_register_rfkill_notifier(eeepc, "\\_SB.PCI0.P0P7"); 919 - /* 920 - * Refresh pci hotplug in case the rfkill state was changed during 921 - * setup. 922 - */ 923 - eeepc_rfkill_hotplug(eeepc); 924 892 925 893 exit: 926 894 if (result && result != -ENODEV) ··· 950 928 struct eeepc_laptop *eeepc = dev_get_drvdata(device); 951 929 952 930 /* Refresh both wlan rfkill state and pci hotplug */ 953 - if (eeepc->wlan_rfkill) 954 - eeepc_rfkill_hotplug(eeepc); 931 + if (eeepc->wlan_rfkill) { 932 + eeepc_rfkill_hotplug_update(eeepc, "\\_SB.PCI0.P0P5"); 933 + eeepc_rfkill_hotplug_update(eeepc, "\\_SB.PCI0.P0P6"); 934 + eeepc_rfkill_hotplug_update(eeepc, "\\_SB.PCI0.P0P7"); 935 + } 955 936 956 937 if (eeepc->bluetooth_rfkill) 957 938 rfkill_set_sw_state(eeepc->bluetooth_rfkill,
+105 -25
drivers/platform/x86/sony-laptop.c
··· 934 934 /* 935 935 * Backlight device 936 936 */ 937 + struct sony_backlight_props { 938 + struct backlight_device *dev; 939 + int handle; 940 + u8 offset; 941 + u8 maxlvl; 942 + }; 943 + struct sony_backlight_props sony_bl_props; 944 + 937 945 static int sony_backlight_update_status(struct backlight_device *bd) 938 946 { 939 947 return acpi_callsetfunc(sony_nc_acpi_handle, "SBRT", ··· 962 954 { 963 955 int result; 964 956 int *handle = (int *)bl_get_data(bd); 957 + struct sony_backlight_props *sdev = 958 + (struct sony_backlight_props *)bl_get_data(bd); 965 959 966 - sony_call_snc_handle(*handle, 0x0200, &result); 960 + sony_call_snc_handle(sdev->handle, 0x0200, &result); 967 961 968 - return result & 0xff; 962 + return (result & 0xff) - sdev->offset; 969 963 } 970 964 971 965 static int sony_nc_update_status_ng(struct backlight_device *bd) 972 966 { 973 967 int value, result; 974 968 int *handle = (int *)bl_get_data(bd); 969 + struct sony_backlight_props *sdev = 970 + (struct sony_backlight_props *)bl_get_data(bd); 975 971 976 - value = bd->props.brightness; 977 - sony_call_snc_handle(*handle, 0x0100 | (value << 16), &result); 972 + value = bd->props.brightness + sdev->offset; 973 + if (sony_call_snc_handle(sdev->handle, 0x0100 | (value << 16), &result)) 974 + return -EIO; 978 975 979 - return sony_nc_get_brightness_ng(bd); 976 + return value; 980 977 } 981 978 982 979 static const struct backlight_ops sony_backlight_ops = { ··· 994 981 .update_status = sony_nc_update_status_ng, 995 982 .get_brightness = sony_nc_get_brightness_ng, 996 983 }; 997 - static int backlight_ng_handle; 998 - static struct backlight_device *sony_backlight_device; 999 984 1000 985 /* 1001 986 * New SNC-only Vaios event mapping to driver known keys ··· 1560 1549 &ignore); 1561 1550 } 1562 1551 1552 + static void sony_nc_backlight_ng_read_limits(int handle, 1553 + struct sony_backlight_props *props) 1554 + { 1555 + int offset; 1556 + acpi_status status; 1557 + u8 brlvl, i; 1558 + u8 min = 0xff, max = 0x00; 1559 + struct acpi_object_list params; 1560 + union acpi_object in_obj; 1561 + union acpi_object *lvl_enum; 1562 + struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 1563 + 1564 + props->handle = handle; 1565 + props->offset = 0; 1566 + props->maxlvl = 0xff; 1567 + 1568 + offset = sony_find_snc_handle(handle); 1569 + if (offset < 0) 1570 + return; 1571 + 1572 + /* try to read the boundaries from ACPI tables, if we fail the above 1573 + * defaults should be reasonable 1574 + */ 1575 + params.count = 1; 1576 + params.pointer = &in_obj; 1577 + in_obj.type = ACPI_TYPE_INTEGER; 1578 + in_obj.integer.value = offset; 1579 + status = acpi_evaluate_object(sony_nc_acpi_handle, "SN06", &params, 1580 + &buffer); 1581 + if (ACPI_FAILURE(status)) 1582 + return; 1583 + 1584 + lvl_enum = (union acpi_object *) buffer.pointer; 1585 + if (!lvl_enum) { 1586 + pr_err("No SN06 return object."); 1587 + return; 1588 + } 1589 + if (lvl_enum->type != ACPI_TYPE_BUFFER) { 1590 + pr_err("Invalid SN06 return object 0x%.2x\n", 1591 + lvl_enum->type); 1592 + goto out_invalid; 1593 + } 1594 + 1595 + /* the buffer lists brightness levels available, brightness levels are 1596 + * from 0 to 8 in the array, other values are used by ALS control. 1597 + */ 1598 + for (i = 0; i < 9 && i < lvl_enum->buffer.length; i++) { 1599 + 1600 + brlvl = *(lvl_enum->buffer.pointer + i); 1601 + dprintk("Brightness level: %d\n", brlvl); 1602 + 1603 + if (!brlvl) 1604 + break; 1605 + 1606 + if (brlvl > max) 1607 + max = brlvl; 1608 + if (brlvl < min) 1609 + min = brlvl; 1610 + } 1611 + props->offset = min; 1612 + props->maxlvl = max; 1613 + dprintk("Brightness levels: min=%d max=%d\n", props->offset, 1614 + props->maxlvl); 1615 + 1616 + out_invalid: 1617 + kfree(buffer.pointer); 1618 + return; 1619 + } 1620 + 1563 1621 static void sony_nc_backlight_setup(void) 1564 1622 { 1565 1623 acpi_handle unused; ··· 1637 1557 struct backlight_properties props; 1638 1558 1639 1559 if (sony_find_snc_handle(0x12f) != -1) { 1640 - backlight_ng_handle = 0x12f; 1641 1560 ops = &sony_backlight_ng_ops; 1642 - max_brightness = 0xff; 1561 + sony_nc_backlight_ng_read_limits(0x12f, &sony_bl_props); 1562 + max_brightness = sony_bl_props.maxlvl - sony_bl_props.offset; 1643 1563 1644 1564 } else if (sony_find_snc_handle(0x137) != -1) { 1645 - backlight_ng_handle = 0x137; 1646 1565 ops = &sony_backlight_ng_ops; 1647 - max_brightness = 0xff; 1566 + sony_nc_backlight_ng_read_limits(0x137, &sony_bl_props); 1567 + max_brightness = sony_bl_props.maxlvl - sony_bl_props.offset; 1648 1568 1649 1569 } else if (ACPI_SUCCESS(acpi_get_handle(sony_nc_acpi_handle, "GBRT", 1650 1570 &unused))) { ··· 1657 1577 memset(&props, 0, sizeof(struct backlight_properties)); 1658 1578 props.type = BACKLIGHT_PLATFORM; 1659 1579 props.max_brightness = max_brightness; 1660 - sony_backlight_device = backlight_device_register("sony", NULL, 1661 - &backlight_ng_handle, 1662 - ops, &props); 1580 + sony_bl_props.dev = backlight_device_register("sony", NULL, 1581 + &sony_bl_props, 1582 + ops, &props); 1663 1583 1664 - if (IS_ERR(sony_backlight_device)) { 1665 - pr_warning(DRV_PFX "unable to register backlight device\n"); 1666 - sony_backlight_device = NULL; 1584 + if (IS_ERR(sony_bl_props.dev)) { 1585 + pr_warn(DRV_PFX "unable to register backlight device\n"); 1586 + sony_bl_props.dev = NULL; 1667 1587 } else 1668 - sony_backlight_device->props.brightness = 1669 - ops->get_brightness(sony_backlight_device); 1588 + sony_bl_props.dev->props.brightness = 1589 + ops->get_brightness(sony_bl_props.dev); 1670 1590 } 1671 1591 1672 1592 static void sony_nc_backlight_cleanup(void) 1673 1593 { 1674 - if (sony_backlight_device) 1675 - backlight_device_unregister(sony_backlight_device); 1594 + if (sony_bl_props.dev) 1595 + backlight_device_unregister(sony_bl_props.dev); 1676 1596 } 1677 1597 1678 1598 static int sony_nc_add(struct acpi_device *device) ··· 2670 2590 mutex_lock(&spic_dev.lock); 2671 2591 switch (cmd) { 2672 2592 case SONYPI_IOCGBRT: 2673 - if (sony_backlight_device == NULL) { 2593 + if (sony_bl_props.dev == NULL) { 2674 2594 ret = -EIO; 2675 2595 break; 2676 2596 } ··· 2683 2603 ret = -EFAULT; 2684 2604 break; 2685 2605 case SONYPI_IOCSBRT: 2686 - if (sony_backlight_device == NULL) { 2606 + if (sony_bl_props.dev == NULL) { 2687 2607 ret = -EIO; 2688 2608 break; 2689 2609 } ··· 2697 2617 break; 2698 2618 } 2699 2619 /* sync the backlight device status */ 2700 - sony_backlight_device->props.brightness = 2701 - sony_backlight_get_brightness(sony_backlight_device); 2620 + sony_bl_props.dev->props.brightness = 2621 + sony_backlight_get_brightness(sony_bl_props.dev); 2702 2622 break; 2703 2623 case SONYPI_IOCGBAT1CAP: 2704 2624 if (ec_read16(SONYPI_BAT1_FULL, &val16)) {
+4 -2
drivers/platform/x86/thinkpad_acpi.c
··· 128 128 }; 129 129 130 130 /* ACPI HIDs */ 131 - #define TPACPI_ACPI_HKEY_HID "IBM0068" 131 + #define TPACPI_ACPI_IBM_HKEY_HID "IBM0068" 132 + #define TPACPI_ACPI_LENOVO_HKEY_HID "LEN0068" 132 133 #define TPACPI_ACPI_EC_HID "PNP0C09" 133 134 134 135 /* Input IDs */ ··· 3880 3879 } 3881 3880 3882 3881 static const struct acpi_device_id ibm_htk_device_ids[] = { 3883 - {TPACPI_ACPI_HKEY_HID, 0}, 3882 + {TPACPI_ACPI_IBM_HKEY_HID, 0}, 3883 + {TPACPI_ACPI_LENOVO_HKEY_HID, 0}, 3884 3884 {"", 0}, 3885 3885 }; 3886 3886
+9
drivers/rapidio/switches/idt_gen2.c
··· 95 95 else 96 96 table++; 97 97 98 + if (route_port == RIO_INVALID_ROUTE) 99 + route_port = IDT_DEFAULT_ROUTE; 100 + 98 101 rio_mport_write_config_32(mport, destid, hopcount, 99 102 LOCAL_RTE_CONF_DESTID_SEL, table); 100 103 ··· 413 410 rdev->rswitch->em_init = idtg2_em_init; 414 411 rdev->rswitch->em_handle = idtg2_em_handler; 415 412 rdev->rswitch->sw_sysfs = idtg2_sysfs; 413 + 414 + if (do_enum) { 415 + /* Ensure that default routing is disabled on startup */ 416 + rio_write_config_32(rdev, 417 + RIO_STD_RTE_DEFAULT_PORT, IDT_NO_ROUTE); 418 + } 416 419 417 420 return 0; 418 421 }
+6
drivers/rapidio/switches/idtcps.c
··· 26 26 { 27 27 u32 result; 28 28 29 + if (route_port == RIO_INVALID_ROUTE) 30 + route_port = CPS_DEFAULT_ROUTE; 31 + 29 32 if (table == RIO_GLOBAL_TABLE) { 30 33 rio_mport_write_config_32(mport, destid, hopcount, 31 34 RIO_STD_RTE_CONF_DESTID_SEL_CSR, route_destid); ··· 133 130 /* set TVAL = ~50us */ 134 131 rio_write_config_32(rdev, 135 132 rdev->phys_efptr + RIO_PORT_LINKTO_CTL_CSR, 0x8e << 8); 133 + /* Ensure that default routing is disabled on startup */ 134 + rio_write_config_32(rdev, 135 + RIO_STD_RTE_DEFAULT_PORT, CPS_NO_ROUTE); 136 136 } 137 137 138 138 return 0;
+6
drivers/rapidio/switches/tsi57x.c
··· 303 303 rdev->rswitch->em_init = tsi57x_em_init; 304 304 rdev->rswitch->em_handle = tsi57x_em_handler; 305 305 306 + if (do_enum) { 307 + /* Ensure that default routing is disabled on startup */ 308 + rio_write_config_32(rdev, RIO_STD_RTE_DEFAULT_PORT, 309 + RIO_INVALID_ROUTE); 310 + } 311 + 306 312 return 0; 307 313 } 308 314
+2 -2
drivers/rtc/rtc-coh901331.c
··· 220 220 } 221 221 clk_disable(rtap->clk); 222 222 223 + platform_set_drvdata(pdev, rtap); 223 224 rtap->rtc = rtc_device_register("coh901331", &pdev->dev, &coh901331_ops, 224 225 THIS_MODULE); 225 226 if (IS_ERR(rtap->rtc)) { ··· 228 227 goto out_no_rtc; 229 228 } 230 229 231 - platform_set_drvdata(pdev, rtap); 232 - 233 230 return 0; 234 231 235 232 out_no_rtc: 233 + platform_set_drvdata(pdev, NULL); 236 234 out_no_clk_enable: 237 235 clk_put(rtap->clk); 238 236 out_no_clk:
+3 -2
drivers/rtc/rtc-davinci.c
··· 524 524 goto fail2; 525 525 } 526 526 527 + platform_set_drvdata(pdev, davinci_rtc); 528 + 527 529 davinci_rtc->rtc = rtc_device_register(pdev->name, &pdev->dev, 528 530 &davinci_rtc_ops, THIS_MODULE); 529 531 if (IS_ERR(davinci_rtc->rtc)) { ··· 555 553 556 554 rtcss_write(davinci_rtc, PRTCSS_RTC_CCTRL_CAEN, PRTCSS_RTC_CCTRL); 557 555 558 - platform_set_drvdata(pdev, davinci_rtc); 559 - 560 556 device_init_wakeup(&pdev->dev, 0); 561 557 562 558 return 0; ··· 562 562 fail4: 563 563 rtc_device_unregister(davinci_rtc->rtc); 564 564 fail3: 565 + platform_set_drvdata(pdev, NULL); 565 566 iounmap(davinci_rtc->base); 566 567 fail2: 567 568 release_mem_region(davinci_rtc->pbase, davinci_rtc->base_size);
+1 -1
drivers/rtc/rtc-ds1286.c
··· 355 355 goto out; 356 356 } 357 357 spin_lock_init(&priv->lock); 358 + platform_set_drvdata(pdev, priv); 358 359 rtc = rtc_device_register("ds1286", &pdev->dev, 359 360 &ds1286_ops, THIS_MODULE); 360 361 if (IS_ERR(rtc)) { ··· 363 362 goto out; 364 363 } 365 364 priv->rtc = rtc; 366 - platform_set_drvdata(pdev, priv); 367 365 return 0; 368 366 369 367 out:
+2 -3
drivers/rtc/rtc-ep93xx.c
··· 151 151 return -ENXIO; 152 152 153 153 pdev->dev.platform_data = ep93xx_rtc; 154 + platform_set_drvdata(pdev, rtc); 154 155 155 156 rtc = rtc_device_register(pdev->name, 156 157 &pdev->dev, &ep93xx_rtc_ops, THIS_MODULE); ··· 160 159 goto exit; 161 160 } 162 161 163 - platform_set_drvdata(pdev, rtc); 164 - 165 162 err = sysfs_create_group(&pdev->dev.kobj, &ep93xx_rtc_sysfs_files); 166 163 if (err) 167 164 goto fail; ··· 167 168 return 0; 168 169 169 170 fail: 170 - platform_set_drvdata(pdev, NULL); 171 171 rtc_device_unregister(rtc); 172 172 exit: 173 + platform_set_drvdata(pdev, NULL); 173 174 pdev->dev.platform_data = NULL; 174 175 return err; 175 176 }
+3 -2
drivers/rtc/rtc-m41t80.c
··· 783 783 goto exit; 784 784 } 785 785 786 + clientdata->features = id->driver_data; 787 + i2c_set_clientdata(client, clientdata); 788 + 786 789 rtc = rtc_device_register(client->name, &client->dev, 787 790 &m41t80_rtc_ops, THIS_MODULE); 788 791 if (IS_ERR(rtc)) { ··· 795 792 } 796 793 797 794 clientdata->rtc = rtc; 798 - clientdata->features = id->driver_data; 799 - i2c_set_clientdata(client, clientdata); 800 795 801 796 /* Make sure HT (Halt Update) bit is cleared */ 802 797 rc = i2c_smbus_read_byte_data(client, M41T80_REG_ALARM_HOUR);
+5 -3
drivers/rtc/rtc-max8925.c
··· 257 257 goto out_irq; 258 258 } 259 259 260 + dev_set_drvdata(&pdev->dev, info); 261 + /* XXX - isn't this redundant? */ 262 + platform_set_drvdata(pdev, info); 263 + 260 264 info->rtc_dev = rtc_device_register("max8925-rtc", &pdev->dev, 261 265 &max8925_rtc_ops, THIS_MODULE); 262 266 ret = PTR_ERR(info->rtc_dev); ··· 269 265 goto out_rtc; 270 266 } 271 267 272 - dev_set_drvdata(&pdev->dev, info); 273 - platform_set_drvdata(pdev, info); 274 - 275 268 return 0; 276 269 out_rtc: 270 + platform_set_drvdata(pdev, NULL); 277 271 free_irq(chip->irq_base + MAX8925_IRQ_RTC_ALARM0, info); 278 272 out_irq: 279 273 kfree(info);
+3 -2
drivers/rtc/rtc-max8998.c
··· 265 265 info->rtc = max8998->rtc; 266 266 info->irq = max8998->irq_base + MAX8998_IRQ_ALARM0; 267 267 268 + platform_set_drvdata(pdev, info); 269 + 268 270 info->rtc_dev = rtc_device_register("max8998-rtc", &pdev->dev, 269 271 &max8998_rtc_ops, THIS_MODULE); 270 272 ··· 275 273 dev_err(&pdev->dev, "Failed to register RTC device: %d\n", ret); 276 274 goto out_rtc; 277 275 } 278 - 279 - platform_set_drvdata(pdev, info); 280 276 281 277 ret = request_threaded_irq(info->irq, NULL, max8998_rtc_alarm_irq, 0, 282 278 "rtc-alarm0", info); ··· 293 293 return 0; 294 294 295 295 out_rtc: 296 + platform_set_drvdata(pdev, NULL); 296 297 kfree(info); 297 298 return ret; 298 299 }
+6 -2
drivers/rtc/rtc-mc13xxx.c
··· 349 349 if (ret) 350 350 goto err_alarm_irq_request; 351 351 352 + mc13xxx_unlock(mc13xxx); 353 + 352 354 priv->rtc = rtc_device_register(pdev->name, 353 355 &pdev->dev, &mc13xxx_rtc_ops, THIS_MODULE); 354 356 if (IS_ERR(priv->rtc)) { 355 357 ret = PTR_ERR(priv->rtc); 358 + 359 + mc13xxx_lock(mc13xxx); 356 360 357 361 mc13xxx_irq_free(mc13xxx, MC13XXX_IRQ_TODA, priv); 358 362 err_alarm_irq_request: ··· 369 365 mc13xxx_irq_free(mc13xxx, MC13XXX_IRQ_RTCRST, priv); 370 366 err_reset_irq_request: 371 367 368 + mc13xxx_unlock(mc13xxx); 369 + 372 370 platform_set_drvdata(pdev, NULL); 373 371 kfree(priv); 374 372 } 375 - 376 - mc13xxx_unlock(mc13xxx); 377 373 378 374 return ret; 379 375 }
+2 -1
drivers/rtc/rtc-msm6242.c
··· 214 214 error = -ENOMEM; 215 215 goto out_free_priv; 216 216 } 217 + platform_set_drvdata(dev, priv); 217 218 218 219 rtc = rtc_device_register("rtc-msm6242", &dev->dev, &msm6242_rtc_ops, 219 220 THIS_MODULE); ··· 224 223 } 225 224 226 225 priv->rtc = rtc; 227 - platform_set_drvdata(dev, priv); 228 226 return 0; 229 227 230 228 out_unmap: 229 + platform_set_drvdata(dev, NULL); 231 230 iounmap(priv->regs); 232 231 out_free_priv: 233 232 kfree(priv);
+11 -8
drivers/rtc/rtc-mxc.c
··· 418 418 goto exit_put_clk; 419 419 } 420 420 421 - rtc = rtc_device_register(pdev->name, &pdev->dev, &mxc_rtc_ops, 422 - THIS_MODULE); 423 - if (IS_ERR(rtc)) { 424 - ret = PTR_ERR(rtc); 425 - goto exit_put_clk; 426 - } 427 - 428 - pdata->rtc = rtc; 429 421 platform_set_drvdata(pdev, pdata); 430 422 431 423 /* Configure and enable the RTC */ ··· 430 438 pdata->irq = -1; 431 439 } 432 440 441 + rtc = rtc_device_register(pdev->name, &pdev->dev, &mxc_rtc_ops, 442 + THIS_MODULE); 443 + if (IS_ERR(rtc)) { 444 + ret = PTR_ERR(rtc); 445 + goto exit_clr_drvdata; 446 + } 447 + 448 + pdata->rtc = rtc; 449 + 433 450 return 0; 434 451 452 + exit_clr_drvdata: 453 + platform_set_drvdata(pdev, NULL); 435 454 exit_put_clk: 436 455 clk_disable(pdata->clk); 437 456 clk_put(pdata->clk);
+1 -1
drivers/rtc/rtc-omap.c
··· 394 394 return 0; 395 395 396 396 fail2: 397 - free_irq(omap_rtc_timer, NULL); 397 + free_irq(omap_rtc_timer, rtc); 398 398 fail1: 399 399 rtc_device_unregister(rtc); 400 400 fail0:
+3 -1
drivers/rtc/rtc-pcap.c
··· 151 151 152 152 pcap_rtc->pcap = dev_get_drvdata(pdev->dev.parent); 153 153 154 + platform_set_drvdata(pdev, pcap_rtc); 155 + 154 156 pcap_rtc->rtc = rtc_device_register("pcap", &pdev->dev, 155 157 &pcap_rtc_ops, THIS_MODULE); 156 158 if (IS_ERR(pcap_rtc->rtc)) { ··· 160 158 goto fail_rtc; 161 159 } 162 160 163 - platform_set_drvdata(pdev, pcap_rtc); 164 161 165 162 timer_irq = pcap_to_irq(pcap_rtc->pcap, PCAP_IRQ_1HZ); 166 163 alarm_irq = pcap_to_irq(pcap_rtc->pcap, PCAP_IRQ_TODA); ··· 178 177 fail_timer: 179 178 rtc_device_unregister(pcap_rtc->rtc); 180 179 fail_rtc: 180 + platform_set_drvdata(pdev, NULL); 181 181 kfree(pcap_rtc); 182 182 return err; 183 183 }
+3 -2
drivers/rtc/rtc-rp5c01.c
··· 249 249 250 250 spin_lock_init(&priv->lock); 251 251 252 + platform_set_drvdata(dev, priv); 253 + 252 254 rtc = rtc_device_register("rtc-rp5c01", &dev->dev, &rp5c01_rtc_ops, 253 255 THIS_MODULE); 254 256 if (IS_ERR(rtc)) { 255 257 error = PTR_ERR(rtc); 256 258 goto out_unmap; 257 259 } 258 - 259 260 priv->rtc = rtc; 260 - platform_set_drvdata(dev, priv); 261 261 262 262 error = sysfs_create_bin_file(&dev->dev.kobj, &priv->nvram_attr); 263 263 if (error) ··· 268 268 out_unregister: 269 269 rtc_device_unregister(rtc); 270 270 out_unmap: 271 + platform_set_drvdata(dev, NULL); 271 272 iounmap(priv->regs); 272 273 out_free_priv: 273 274 kfree(priv);
+10 -3
drivers/rtc/rtc-s3c.c
··· 46 46 static void __iomem *s3c_rtc_base; 47 47 static int s3c_rtc_alarmno = NO_IRQ; 48 48 static int s3c_rtc_tickno = NO_IRQ; 49 + static bool wake_en; 49 50 static enum s3c_cpu_type s3c_rtc_cpu_type; 50 51 51 52 static DEFINE_SPINLOCK(s3c_rtc_pie_lock); ··· 563 562 } 564 563 s3c_rtc_enable(pdev, 0); 565 564 566 - if (device_may_wakeup(&pdev->dev)) 567 - enable_irq_wake(s3c_rtc_alarmno); 565 + if (device_may_wakeup(&pdev->dev) && !wake_en) { 566 + if (enable_irq_wake(s3c_rtc_alarmno) == 0) 567 + wake_en = true; 568 + else 569 + dev_err(&pdev->dev, "enable_irq_wake failed\n"); 570 + } 568 571 569 572 return 0; 570 573 } ··· 584 579 writew(tmp | ticnt_en_save, s3c_rtc_base + S3C2410_RTCCON); 585 580 } 586 581 587 - if (device_may_wakeup(&pdev->dev)) 582 + if (device_may_wakeup(&pdev->dev) && wake_en) { 588 583 disable_irq_wake(s3c_rtc_alarmno); 584 + wake_en = false; 585 + } 589 586 590 587 return 0; 591 588 }
+32 -19
drivers/s390/block/dasd.c
··· 1742 1742 static inline int _dasd_term_running_cqr(struct dasd_device *device) 1743 1743 { 1744 1744 struct dasd_ccw_req *cqr; 1745 + int rc; 1745 1746 1746 1747 if (list_empty(&device->ccw_queue)) 1747 1748 return 0; 1748 1749 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist); 1749 - return device->discipline->term_IO(cqr); 1750 + rc = device->discipline->term_IO(cqr); 1751 + if (!rc) 1752 + /* 1753 + * CQR terminated because a more important request is pending. 1754 + * Undo decreasing of retry counter because this is 1755 + * not an error case. 1756 + */ 1757 + cqr->retries++; 1758 + return rc; 1750 1759 } 1751 1760 1752 1761 int dasd_sleep_on_immediatly(struct dasd_ccw_req *cqr) ··· 2323 2314 2324 2315 static int dasd_open(struct block_device *bdev, fmode_t mode) 2325 2316 { 2326 - struct dasd_block *block = bdev->bd_disk->private_data; 2327 2317 struct dasd_device *base; 2328 2318 int rc; 2329 2319 2330 - if (!block) 2320 + base = dasd_device_from_gendisk(bdev->bd_disk); 2321 + if (!base) 2331 2322 return -ENODEV; 2332 2323 2333 - base = block->base; 2334 - atomic_inc(&block->open_count); 2324 + atomic_inc(&base->block->open_count); 2335 2325 if (test_bit(DASD_FLAG_OFFLINE, &base->flags)) { 2336 2326 rc = -ENODEV; 2337 2327 goto unlock; ··· 2363 2355 goto out; 2364 2356 } 2365 2357 2358 + dasd_put_device(base); 2366 2359 return 0; 2367 2360 2368 2361 out: 2369 2362 module_put(base->discipline->owner); 2370 2363 unlock: 2371 - atomic_dec(&block->open_count); 2364 + atomic_dec(&base->block->open_count); 2365 + dasd_put_device(base); 2372 2366 return rc; 2373 2367 } 2374 2368 2375 2369 static int dasd_release(struct gendisk *disk, fmode_t mode) 2376 2370 { 2377 - struct dasd_block *block = disk->private_data; 2371 + struct dasd_device *base; 2378 2372 2379 - atomic_dec(&block->open_count); 2380 - module_put(block->base->discipline->owner); 2373 + base = dasd_device_from_gendisk(disk); 2374 + if (!base) 2375 + return -ENODEV; 2376 + 2377 + atomic_dec(&base->block->open_count); 2378 + module_put(base->discipline->owner); 2379 + dasd_put_device(base); 2381 2380 return 0; 2382 2381 } 2383 2382 ··· 2393 2378 */ 2394 2379 static int dasd_getgeo(struct block_device *bdev, struct hd_geometry *geo) 2395 2380 { 2396 - struct dasd_block *block; 2397 2381 struct dasd_device *base; 2398 2382 2399 - block = bdev->bd_disk->private_data; 2400 - if (!block) 2383 + base = dasd_device_from_gendisk(bdev->bd_disk); 2384 + if (!base) 2401 2385 return -ENODEV; 2402 - base = block->base; 2403 2386 2404 2387 if (!base->discipline || 2405 - !base->discipline->fill_geometry) 2388 + !base->discipline->fill_geometry) { 2389 + dasd_put_device(base); 2406 2390 return -EINVAL; 2407 - 2408 - base->discipline->fill_geometry(block, geo); 2409 - geo->start = get_start_sect(bdev) >> block->s2b_shift; 2391 + } 2392 + base->discipline->fill_geometry(base->block, geo); 2393 + geo->start = get_start_sect(bdev) >> base->block->s2b_shift; 2394 + dasd_put_device(base); 2410 2395 return 0; 2411 2396 } 2412 2397 ··· 2543 2528 dasd_set_target_state(device, DASD_STATE_NEW); 2544 2529 /* dasd_delete_device destroys the device reference. */ 2545 2530 block = device->block; 2546 - device->block = NULL; 2547 2531 dasd_delete_device(device); 2548 2532 /* 2549 2533 * life cycle of block is bound to device, so delete it after ··· 2664 2650 dasd_set_target_state(device, DASD_STATE_NEW); 2665 2651 /* dasd_delete_device destroys the device reference. */ 2666 2652 block = device->block; 2667 - device->block = NULL; 2668 2653 dasd_delete_device(device); 2669 2654 /* 2670 2655 * life cycle of block is bound to device, so delete it after
+30
drivers/s390/block/dasd_devmap.c
··· 674 674 return device; 675 675 } 676 676 677 + void dasd_add_link_to_gendisk(struct gendisk *gdp, struct dasd_device *device) 678 + { 679 + struct dasd_devmap *devmap; 680 + 681 + devmap = dasd_find_busid(dev_name(&device->cdev->dev)); 682 + if (IS_ERR(devmap)) 683 + return; 684 + spin_lock(&dasd_devmap_lock); 685 + gdp->private_data = devmap; 686 + spin_unlock(&dasd_devmap_lock); 687 + } 688 + 689 + struct dasd_device *dasd_device_from_gendisk(struct gendisk *gdp) 690 + { 691 + struct dasd_device *device; 692 + struct dasd_devmap *devmap; 693 + 694 + if (!gdp->private_data) 695 + return NULL; 696 + device = NULL; 697 + spin_lock(&dasd_devmap_lock); 698 + devmap = gdp->private_data; 699 + if (devmap && devmap->device) { 700 + device = devmap->device; 701 + dasd_get_device(device); 702 + } 703 + spin_unlock(&dasd_devmap_lock); 704 + return device; 705 + } 706 + 677 707 /* 678 708 * SECTION: files in sysfs 679 709 */
+1 -1
drivers/s390/block/dasd_diag.c
··· 239 239 addr_t ip; 240 240 int rc; 241 241 242 - kstat_cpu(smp_processor_id()).irqs[EXTINT_DSD]++; 243 242 switch (ext_int_code >> 24) { 244 243 case DASD_DIAG_CODE_31BIT: 245 244 ip = (addr_t) param32; ··· 249 250 default: 250 251 return; 251 252 } 253 + kstat_cpu(smp_processor_id()).irqs[EXTINT_DSD]++; 252 254 if (!ip) { /* no intparm: unsolicited interrupt */ 253 255 DBF_EVENT(DBF_NOTICE, "%s", "caught unsolicited " 254 256 "interrupt");
+3 -2
drivers/s390/block/dasd_eckd.c
··· 2037 2037 return; 2038 2038 2039 2039 /* summary unit check */ 2040 - if ((sense[7] == 0x0D) && 2040 + if ((sense[27] & DASD_SENSE_BIT_0) && (sense[7] == 0x0D) && 2041 2041 (scsw_dstat(&irb->scsw) & DEV_STAT_UNIT_CHECK)) { 2042 2042 dasd_alias_handle_summary_unit_check(device, irb); 2043 2043 return; ··· 2053 2053 /* loss of device reservation is handled via base devices only 2054 2054 * as alias devices may be used with several bases 2055 2055 */ 2056 - if (device->block && (sense[7] == 0x3F) && 2056 + if (device->block && (sense[27] & DASD_SENSE_BIT_0) && 2057 + (sense[7] == 0x3F) && 2057 2058 (scsw_dstat(&irb->scsw) & DEV_STAT_UNIT_CHECK) && 2058 2059 test_bit(DASD_FLAG_IS_RESERVED, &device->flags)) { 2059 2060 if (device->features & DASD_FEATURE_FAILONSLCK)
+1 -1
drivers/s390/block/dasd_genhd.c
··· 73 73 if (base->features & DASD_FEATURE_READONLY || 74 74 test_bit(DASD_FLAG_DEVICE_RO, &base->flags)) 75 75 set_disk_ro(gdp, 1); 76 - gdp->private_data = block; 76 + dasd_add_link_to_gendisk(gdp, base); 77 77 gdp->queue = block->request_queue; 78 78 block->gdp = gdp; 79 79 set_capacity(block->gdp, 0);
+3
drivers/s390/block/dasd_int.h
··· 686 686 struct dasd_device *dasd_device_from_cdev_locked(struct ccw_device *); 687 687 struct dasd_device *dasd_device_from_devindex(int); 688 688 689 + void dasd_add_link_to_gendisk(struct gendisk *, struct dasd_device *); 690 + struct dasd_device *dasd_device_from_gendisk(struct gendisk *); 691 + 689 692 int dasd_parse(void); 690 693 int dasd_busid_known(const char *); 691 694
+87 -41
drivers/s390/block/dasd_ioctl.c
··· 42 42 static int 43 43 dasd_ioctl_enable(struct block_device *bdev) 44 44 { 45 - struct dasd_block *block = bdev->bd_disk->private_data; 45 + struct dasd_device *base; 46 46 47 47 if (!capable(CAP_SYS_ADMIN)) 48 48 return -EACCES; 49 49 50 - dasd_enable_device(block->base); 50 + base = dasd_device_from_gendisk(bdev->bd_disk); 51 + if (!base) 52 + return -ENODEV; 53 + 54 + dasd_enable_device(base); 51 55 /* Formatting the dasd device can change the capacity. */ 52 56 mutex_lock(&bdev->bd_mutex); 53 - i_size_write(bdev->bd_inode, (loff_t)get_capacity(block->gdp) << 9); 57 + i_size_write(bdev->bd_inode, 58 + (loff_t)get_capacity(base->block->gdp) << 9); 54 59 mutex_unlock(&bdev->bd_mutex); 60 + dasd_put_device(base); 55 61 return 0; 56 62 } 57 63 ··· 68 62 static int 69 63 dasd_ioctl_disable(struct block_device *bdev) 70 64 { 71 - struct dasd_block *block = bdev->bd_disk->private_data; 65 + struct dasd_device *base; 72 66 73 67 if (!capable(CAP_SYS_ADMIN)) 74 68 return -EACCES; 75 69 70 + base = dasd_device_from_gendisk(bdev->bd_disk); 71 + if (!base) 72 + return -ENODEV; 76 73 /* 77 74 * Man this is sick. We don't do a real disable but only downgrade 78 75 * the device to DASD_STATE_BASIC. The reason is that dasdfmt uses ··· 84 75 * using the BIODASDFMT ioctl. Therefore the correct state for the 85 76 * device is DASD_STATE_BASIC that allows to do basic i/o. 86 77 */ 87 - dasd_set_target_state(block->base, DASD_STATE_BASIC); 78 + dasd_set_target_state(base, DASD_STATE_BASIC); 88 79 /* 89 80 * Set i_size to zero, since read, write, etc. check against this 90 81 * value. ··· 92 83 mutex_lock(&bdev->bd_mutex); 93 84 i_size_write(bdev->bd_inode, 0); 94 85 mutex_unlock(&bdev->bd_mutex); 86 + dasd_put_device(base); 95 87 return 0; 96 88 } 97 89 ··· 201 191 static int 202 192 dasd_ioctl_format(struct block_device *bdev, void __user *argp) 203 193 { 204 - struct dasd_block *block = bdev->bd_disk->private_data; 194 + struct dasd_device *base; 205 195 struct format_data_t fdata; 196 + int rc; 206 197 207 198 if (!capable(CAP_SYS_ADMIN)) 208 199 return -EACCES; 209 200 if (!argp) 210 201 return -EINVAL; 211 - 212 - if (block->base->features & DASD_FEATURE_READONLY || 213 - test_bit(DASD_FLAG_DEVICE_RO, &block->base->flags)) 202 + base = dasd_device_from_gendisk(bdev->bd_disk); 203 + if (!base) 204 + return -ENODEV; 205 + if (base->features & DASD_FEATURE_READONLY || 206 + test_bit(DASD_FLAG_DEVICE_RO, &base->flags)) { 207 + dasd_put_device(base); 214 208 return -EROFS; 215 - if (copy_from_user(&fdata, argp, sizeof(struct format_data_t))) 209 + } 210 + if (copy_from_user(&fdata, argp, sizeof(struct format_data_t))) { 211 + dasd_put_device(base); 216 212 return -EFAULT; 213 + } 217 214 if (bdev != bdev->bd_contains) { 218 215 pr_warning("%s: The specified DASD is a partition and cannot " 219 216 "be formatted\n", 220 - dev_name(&block->base->cdev->dev)); 217 + dev_name(&base->cdev->dev)); 218 + dasd_put_device(base); 221 219 return -EINVAL; 222 220 } 223 - return dasd_format(block, &fdata); 221 + rc = dasd_format(base->block, &fdata); 222 + dasd_put_device(base); 223 + return rc; 224 224 } 225 225 226 226 #ifdef CONFIG_DASD_PROFILE ··· 360 340 static int 361 341 dasd_ioctl_set_ro(struct block_device *bdev, void __user *argp) 362 342 { 363 - struct dasd_block *block = bdev->bd_disk->private_data; 364 - int intval; 343 + struct dasd_device *base; 344 + int intval, rc; 365 345 366 346 if (!capable(CAP_SYS_ADMIN)) 367 347 return -EACCES; ··· 370 350 return -EINVAL; 371 351 if (get_user(intval, (int __user *)argp)) 372 352 return -EFAULT; 373 - if (!intval && test_bit(DASD_FLAG_DEVICE_RO, &block->base->flags)) 353 + base = dasd_device_from_gendisk(bdev->bd_disk); 354 + if (!base) 355 + return -ENODEV; 356 + if (!intval && test_bit(DASD_FLAG_DEVICE_RO, &base->flags)) { 357 + dasd_put_device(base); 374 358 return -EROFS; 359 + } 375 360 set_disk_ro(bdev->bd_disk, intval); 376 - return dasd_set_feature(block->base->cdev, DASD_FEATURE_READONLY, intval); 361 + rc = dasd_set_feature(base->cdev, DASD_FEATURE_READONLY, intval); 362 + dasd_put_device(base); 363 + return rc; 377 364 } 378 365 379 366 static int dasd_ioctl_readall_cmb(struct dasd_block *block, unsigned int cmd, ··· 399 372 int dasd_ioctl(struct block_device *bdev, fmode_t mode, 400 373 unsigned int cmd, unsigned long arg) 401 374 { 402 - struct dasd_block *block = bdev->bd_disk->private_data; 375 + struct dasd_block *block; 376 + struct dasd_device *base; 403 377 void __user *argp; 378 + int rc; 404 379 405 380 if (is_compat_task()) 406 381 argp = compat_ptr(arg); 407 382 else 408 383 argp = (void __user *)arg; 409 384 410 - if (!block) 411 - return -ENODEV; 412 - 413 385 if ((_IOC_DIR(cmd) != _IOC_NONE) && !arg) { 414 386 PRINT_DEBUG("empty data ptr"); 415 387 return -EINVAL; 416 388 } 417 389 390 + base = dasd_device_from_gendisk(bdev->bd_disk); 391 + if (!base) 392 + return -ENODEV; 393 + block = base->block; 394 + rc = 0; 418 395 switch (cmd) { 419 396 case BIODASDDISABLE: 420 - return dasd_ioctl_disable(bdev); 397 + rc = dasd_ioctl_disable(bdev); 398 + break; 421 399 case BIODASDENABLE: 422 - return dasd_ioctl_enable(bdev); 400 + rc = dasd_ioctl_enable(bdev); 401 + break; 423 402 case BIODASDQUIESCE: 424 - return dasd_ioctl_quiesce(block); 403 + rc = dasd_ioctl_quiesce(block); 404 + break; 425 405 case BIODASDRESUME: 426 - return dasd_ioctl_resume(block); 406 + rc = dasd_ioctl_resume(block); 407 + break; 427 408 case BIODASDFMT: 428 - return dasd_ioctl_format(bdev, argp); 409 + rc = dasd_ioctl_format(bdev, argp); 410 + break; 429 411 case BIODASDINFO: 430 - return dasd_ioctl_information(block, cmd, argp); 412 + rc = dasd_ioctl_information(block, cmd, argp); 413 + break; 431 414 case BIODASDINFO2: 432 - return dasd_ioctl_information(block, cmd, argp); 415 + rc = dasd_ioctl_information(block, cmd, argp); 416 + break; 433 417 case BIODASDPRRD: 434 - return dasd_ioctl_read_profile(block, argp); 418 + rc = dasd_ioctl_read_profile(block, argp); 419 + break; 435 420 case BIODASDPRRST: 436 - return dasd_ioctl_reset_profile(block); 421 + rc = dasd_ioctl_reset_profile(block); 422 + break; 437 423 case BLKROSET: 438 - return dasd_ioctl_set_ro(bdev, argp); 424 + rc = dasd_ioctl_set_ro(bdev, argp); 425 + break; 439 426 case DASDAPIVER: 440 - return dasd_ioctl_api_version(argp); 427 + rc = dasd_ioctl_api_version(argp); 428 + break; 441 429 case BIODASDCMFENABLE: 442 - return enable_cmf(block->base->cdev); 430 + rc = enable_cmf(base->cdev); 431 + break; 443 432 case BIODASDCMFDISABLE: 444 - return disable_cmf(block->base->cdev); 433 + rc = disable_cmf(base->cdev); 434 + break; 445 435 case BIODASDREADALLCMB: 446 - return dasd_ioctl_readall_cmb(block, cmd, argp); 436 + rc = dasd_ioctl_readall_cmb(block, cmd, argp); 437 + break; 447 438 default: 448 439 /* if the discipline has an ioctl method try it. */ 449 - if (block->base->discipline->ioctl) { 450 - int rval = block->base->discipline->ioctl(block, cmd, argp); 451 - if (rval != -ENOIOCTLCMD) 452 - return rval; 453 - } 454 - 455 - return -EINVAL; 440 + if (base->discipline->ioctl) { 441 + rc = base->discipline->ioctl(block, cmd, argp); 442 + if (rc == -ENOIOCTLCMD) 443 + rc = -EINVAL; 444 + } else 445 + rc = -EINVAL; 456 446 } 447 + dasd_put_device(base); 448 + return rc; 457 449 }
+2
drivers/s390/char/sclp_cmd.c
··· 518 518 return; 519 519 new_incr->rn = rn; 520 520 new_incr->standby = standby; 521 + if (!standby) 522 + new_incr->usecount = 1; 521 523 last_rn = 0; 522 524 prev = &sclp_mem_list; 523 525 list_for_each_entry(incr, &sclp_mem_list, list) {
-1
drivers/s390/char/tape_block.c
··· 236 236 disk->major = tapeblock_major; 237 237 disk->first_minor = device->first_minor; 238 238 disk->fops = &tapeblock_fops; 239 - disk->events = DISK_EVENT_MEDIA_CHANGE; 240 239 disk->private_data = tape_get_device(device); 241 240 disk->queue = blkdat->request_queue; 242 241 set_capacity(disk, 0);
+12 -5
drivers/s390/cio/qdio_main.c
··· 407 407 q->q_stats.nr_sbals[pos]++; 408 408 } 409 409 410 - static void announce_buffer_error(struct qdio_q *q, int count) 410 + static void process_buffer_error(struct qdio_q *q, int count) 411 411 { 412 + unsigned char state = (q->is_input_q) ? SLSB_P_INPUT_NOT_INIT : 413 + SLSB_P_OUTPUT_NOT_INIT; 414 + 412 415 q->qdio_error |= QDIO_ERROR_SLSB_STATE; 413 416 414 417 /* special handling for no target buffer empty */ ··· 429 426 DBF_ERROR("F14:%2x F15:%2x", 430 427 q->sbal[q->first_to_check]->element[14].flags & 0xff, 431 428 q->sbal[q->first_to_check]->element[15].flags & 0xff); 429 + 430 + /* 431 + * Interrupts may be avoided as long as the error is present 432 + * so change the buffer state immediately to avoid starvation. 433 + */ 434 + set_buf_states(q, q->first_to_check, state, count); 432 435 } 433 436 434 437 static inline void inbound_primed(struct qdio_q *q, int count) ··· 515 506 account_sbals(q, count); 516 507 break; 517 508 case SLSB_P_INPUT_ERROR: 518 - announce_buffer_error(q, count); 519 - /* process the buffer, the upper layer will take care of it */ 509 + process_buffer_error(q, count); 520 510 q->first_to_check = add_buf(q->first_to_check, count); 521 511 atomic_sub(count, &q->nr_buf_used); 522 512 if (q->irq_ptr->perf_stat_enabled) ··· 685 677 account_sbals(q, count); 686 678 break; 687 679 case SLSB_P_OUTPUT_ERROR: 688 - announce_buffer_error(q, count); 689 - /* process the buffer, the upper layer will take care of it */ 680 + process_buffer_error(q, count); 690 681 q->first_to_check = add_buf(q->first_to_check, count); 691 682 atomic_sub(count, &q->nr_buf_used); 692 683 if (q->irq_ptr->perf_stat_enabled)
+1 -1
drivers/s390/kvm/kvm_virtio.c
··· 381 381 u16 subcode; 382 382 u32 param; 383 383 384 - kstat_cpu(smp_processor_id()).irqs[EXTINT_VRT]++; 385 384 subcode = ext_int_code >> 16; 386 385 if ((subcode & 0xff00) != VIRTIO_SUBCODE_64) 387 386 return; 387 + kstat_cpu(smp_processor_id()).irqs[EXTINT_VRT]++; 388 388 389 389 /* The LSB might be overloaded, we have to mask it */ 390 390 vq = (struct virtqueue *)(param64 & ~1UL);
+6 -3
drivers/scsi/device_handler/scsi_dh.c
··· 394 394 unsigned long flags; 395 395 struct scsi_device *sdev; 396 396 struct scsi_device_handler *scsi_dh = NULL; 397 + struct device *dev = NULL; 397 398 398 399 spin_lock_irqsave(q->queue_lock, flags); 399 400 sdev = q->queuedata; 400 401 if (sdev && sdev->scsi_dh_data) 401 402 scsi_dh = sdev->scsi_dh_data->scsi_dh; 402 - if (!scsi_dh || !get_device(&sdev->sdev_gendev) || 403 + dev = get_device(&sdev->sdev_gendev); 404 + if (!scsi_dh || !dev || 403 405 sdev->sdev_state == SDEV_CANCEL || 404 406 sdev->sdev_state == SDEV_DEL) 405 407 err = SCSI_DH_NOSYS; ··· 412 410 if (err) { 413 411 if (fn) 414 412 fn(data, err); 415 - return err; 413 + goto out; 416 414 } 417 415 418 416 if (scsi_dh->activate) 419 417 err = scsi_dh->activate(sdev, fn, data); 420 - put_device(&sdev->sdev_gendev); 418 + out: 419 + put_device(dev); 421 420 return err; 422 421 } 423 422 EXPORT_SYMBOL_GPL(scsi_dh_activate);
+21 -2
drivers/scsi/mpt2sas/mpt2sas_ctl.c
··· 688 688 goto out; 689 689 } 690 690 691 + /* Check for overflow and wraparound */ 692 + if (karg.data_sge_offset * 4 > ioc->request_sz || 693 + karg.data_sge_offset > (UINT_MAX / 4)) { 694 + ret = -EINVAL; 695 + goto out; 696 + } 697 + 691 698 /* copy in request message frame from user */ 692 699 if (copy_from_user(mpi_request, mf, karg.data_sge_offset*4)) { 693 700 printk(KERN_ERR "failure at %s:%d/%s()!\n", __FILE__, __LINE__, ··· 1970 1963 Mpi2DiagBufferPostReply_t *mpi_reply; 1971 1964 int rc, i; 1972 1965 u8 buffer_type; 1973 - unsigned long timeleft; 1966 + unsigned long timeleft, request_size, copy_size; 1974 1967 u16 smid; 1975 1968 u16 ioc_status; 1976 1969 u8 issue_reset = 0; ··· 2006 1999 return -ENOMEM; 2007 2000 } 2008 2001 2002 + request_size = ioc->diag_buffer_sz[buffer_type]; 2003 + 2009 2004 if ((karg.starting_offset % 4) || (karg.bytes_to_read % 4)) { 2010 2005 printk(MPT2SAS_ERR_FMT "%s: either the starting_offset " 2011 2006 "or bytes_to_read are not 4 byte aligned\n", ioc->name, ··· 2015 2006 return -EINVAL; 2016 2007 } 2017 2008 2009 + if (karg.starting_offset > request_size) 2010 + return -EINVAL; 2011 + 2018 2012 diag_data = (void *)(request_data + karg.starting_offset); 2019 2013 dctlprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: diag_buffer(%p), " 2020 2014 "offset(%d), sz(%d)\n", ioc->name, __func__, 2021 2015 diag_data, karg.starting_offset, karg.bytes_to_read)); 2022 2016 2017 + /* Truncate data on requests that are too large */ 2018 + if ((diag_data + karg.bytes_to_read < diag_data) || 2019 + (diag_data + karg.bytes_to_read > request_data + request_size)) 2020 + copy_size = request_size - karg.starting_offset; 2021 + else 2022 + copy_size = karg.bytes_to_read; 2023 + 2023 2024 if (copy_to_user((void __user *)uarg->diagnostic_data, 2024 - diag_data, karg.bytes_to_read)) { 2025 + diag_data, copy_size)) { 2025 2026 printk(MPT2SAS_ERR_FMT "%s: Unable to write " 2026 2027 "mpt_diag_read_buffer_t data @ %p\n", ioc->name, 2027 2028 __func__, diag_data);
+3
drivers/scsi/pmcraid.c
··· 3814 3814 rc = -EFAULT; 3815 3815 goto out_free_buffer; 3816 3816 } 3817 + } else if (request_size < 0) { 3818 + rc = -EINVAL; 3819 + goto out_free_buffer; 3817 3820 } 3818 3821 3819 3822 /* check if we have any additional command parameters */
+5 -2
drivers/scsi/qlogicpti.c
··· 1292 1292 .use_clustering = ENABLE_CLUSTERING, 1293 1293 }; 1294 1294 1295 + static const struct of_device_id qpti_match[]; 1295 1296 static int __devinit qpti_sbus_probe(struct platform_device *op) 1296 1297 { 1298 + const struct of_device_id *match; 1297 1299 struct scsi_host_template *tpnt; 1298 1300 struct device_node *dp = op->dev.of_node; 1299 1301 struct Scsi_Host *host; ··· 1303 1301 static int nqptis; 1304 1302 const char *fcode; 1305 1303 1306 - if (!op->dev.of_match) 1304 + match = of_match_device(qpti_match, &op->dev); 1305 + if (!match) 1307 1306 return -EINVAL; 1308 - tpnt = op->dev.of_match->data; 1307 + tpnt = match->data; 1309 1308 1310 1309 /* Sometimes Antares cards come up not completely 1311 1310 * setup, and we get a report of a zero IRQ.
+17 -15
drivers/scsi/scsi_lib.c
··· 74 74 */ 75 75 #define SCSI_QUEUE_DELAY 3 76 76 77 - static void scsi_run_queue(struct request_queue *q); 78 - 79 77 /* 80 78 * Function: scsi_unprep_request() 81 79 * ··· 159 161 blk_requeue_request(q, cmd->request); 160 162 spin_unlock_irqrestore(q->queue_lock, flags); 161 163 162 - scsi_run_queue(q); 164 + kblockd_schedule_work(q, &device->requeue_work); 163 165 164 166 return 0; 165 167 } ··· 398 400 static void scsi_run_queue(struct request_queue *q) 399 401 { 400 402 struct scsi_device *sdev = q->queuedata; 401 - struct Scsi_Host *shost = sdev->host; 403 + struct Scsi_Host *shost; 402 404 LIST_HEAD(starved_list); 403 405 unsigned long flags; 404 406 407 + /* if the device is dead, sdev will be NULL, so no queue to run */ 408 + if (!sdev) 409 + return; 410 + 411 + shost = sdev->host; 405 412 if (scsi_target(sdev)->single_lun) 406 413 scsi_single_lun_run(sdev); 407 414 ··· 414 411 list_splice_init(&shost->starved_list, &starved_list); 415 412 416 413 while (!list_empty(&starved_list)) { 417 - int flagset; 418 - 419 414 /* 420 415 * As long as shost is accepting commands and we have 421 416 * starved queues, call blk_run_queue. scsi_request_fn ··· 437 436 } 438 437 439 438 spin_unlock(shost->host_lock); 440 - 441 439 spin_lock(sdev->request_queue->queue_lock); 442 - flagset = test_bit(QUEUE_FLAG_REENTER, &q->queue_flags) && 443 - !test_bit(QUEUE_FLAG_REENTER, 444 - &sdev->request_queue->queue_flags); 445 - if (flagset) 446 - queue_flag_set(QUEUE_FLAG_REENTER, sdev->request_queue); 447 440 __blk_run_queue(sdev->request_queue); 448 - if (flagset) 449 - queue_flag_clear(QUEUE_FLAG_REENTER, sdev->request_queue); 450 441 spin_unlock(sdev->request_queue->queue_lock); 451 - 452 442 spin_lock(shost->host_lock); 453 443 } 454 444 /* put any unprocessed entries back */ ··· 447 455 spin_unlock_irqrestore(shost->host_lock, flags); 448 456 449 457 blk_run_queue(q); 458 + } 459 + 460 + void scsi_requeue_run_queue(struct work_struct *work) 461 + { 462 + struct scsi_device *sdev; 463 + struct request_queue *q; 464 + 465 + sdev = container_of(work, struct scsi_device, requeue_work); 466 + q = sdev->request_queue; 467 + scsi_run_queue(q); 450 468 } 451 469 452 470 /*
+2
drivers/scsi/scsi_scan.c
··· 242 242 int display_failure_msg = 1, ret; 243 243 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); 244 244 extern void scsi_evt_thread(struct work_struct *work); 245 + extern void scsi_requeue_run_queue(struct work_struct *work); 245 246 246 247 sdev = kzalloc(sizeof(*sdev) + shost->transportt->device_size, 247 248 GFP_ATOMIC); ··· 265 264 INIT_LIST_HEAD(&sdev->event_list); 266 265 spin_lock_init(&sdev->list_lock); 267 266 INIT_WORK(&sdev->event_work, scsi_evt_thread); 267 + INIT_WORK(&sdev->requeue_work, scsi_requeue_run_queue); 268 268 269 269 sdev->sdev_gendev.parent = get_device(&starget->dev); 270 270 sdev->sdev_target = starget;
+8 -8
drivers/scsi/scsi_sysfs.c
··· 322 322 kfree(evt); 323 323 } 324 324 325 - if (sdev->request_queue) { 326 - sdev->request_queue->queuedata = NULL; 327 - /* user context needed to free queue */ 328 - scsi_free_queue(sdev->request_queue); 329 - /* temporary expedient, try to catch use of queue lock 330 - * after free of sdev */ 331 - sdev->request_queue = NULL; 332 - } 325 + /* NULL queue means the device can't be used */ 326 + sdev->request_queue = NULL; 333 327 334 328 scsi_target_reap(scsi_target(sdev)); 335 329 ··· 931 937 if (sdev->host->hostt->slave_destroy) 932 938 sdev->host->hostt->slave_destroy(sdev); 933 939 transport_destroy_device(dev); 940 + 941 + /* cause the request function to reject all I/O requests */ 942 + sdev->request_queue->queuedata = NULL; 943 + 944 + /* Freeing the queue signals to block that we're done */ 945 + scsi_free_queue(sdev->request_queue); 934 946 put_device(dev); 935 947 } 936 948
+4 -15
drivers/scsi/scsi_transport_fc.c
··· 3816 3816 static void 3817 3817 fc_bsg_goose_queue(struct fc_rport *rport) 3818 3818 { 3819 - int flagset; 3820 - unsigned long flags; 3821 - 3822 3819 if (!rport->rqst_q) 3823 3820 return; 3824 3821 3822 + /* 3823 + * This get/put dance makes no sense 3824 + */ 3825 3825 get_device(&rport->dev); 3826 - 3827 - spin_lock_irqsave(rport->rqst_q->queue_lock, flags); 3828 - flagset = test_bit(QUEUE_FLAG_REENTER, &rport->rqst_q->queue_flags) && 3829 - !test_bit(QUEUE_FLAG_REENTER, &rport->rqst_q->queue_flags); 3830 - if (flagset) 3831 - queue_flag_set(QUEUE_FLAG_REENTER, rport->rqst_q); 3832 - __blk_run_queue(rport->rqst_q); 3833 - if (flagset) 3834 - queue_flag_clear(QUEUE_FLAG_REENTER, rport->rqst_q); 3835 - spin_unlock_irqrestore(rport->rqst_q->queue_lock, flags); 3836 - 3826 + blk_run_queue_async(rport->rqst_q); 3837 3827 put_device(&rport->dev); 3838 3828 } 3839 - 3840 3829 3841 3830 /** 3842 3831 * fc_bsg_rport_dispatch - process rport bsg requests and dispatch to LLDD
-4
drivers/staging/ft1000/ft1000-pcmcia/ft1000_hw.c
··· 2288 2288 free_netdev(dev); 2289 2289 return NULL; 2290 2290 } 2291 - 2292 - EXPORT_SYMBOL(init_ft1000_card); 2293 - EXPORT_SYMBOL(stop_ft1000_card); 2294 - EXPORT_SYMBOL(flarion_ft1000_cnt);
-3
drivers/staging/ft1000/ft1000-pcmcia/ft1000_proc.c
··· 214 214 remove_proc_entry(FT1000_PROC, init_net.proc_net); 215 215 unregister_netdevice_notifier(&ft1000_netdev_notifier); 216 216 } 217 - 218 - EXPORT_SYMBOL(ft1000InitProc); 219 - EXPORT_SYMBOL(ft1000CleanupProc);
+1 -1
drivers/staging/gma500/Kconfig
··· 1 1 config DRM_PSB 2 2 tristate "Intel GMA500 KMS Framebuffer" 3 - depends on DRM && PCI 3 + depends on DRM && PCI && X86 4 4 select FB_CFB_COPYAREA 5 5 select FB_CFB_FILLRECT 6 6 select FB_CFB_IMAGEBLIT
+1
drivers/staging/intel_sst/intelmid_v1_control.c
··· 28 28 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 29 29 30 30 #include <linux/pci.h> 31 + #include <linux/delay.h> 31 32 #include <linux/file.h> 32 33 #include <asm/mrst.h> 33 34 #include <sound/pcm.h>
+1
drivers/staging/intel_sst/intelmid_v2_control.c
··· 29 29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 30 30 31 31 #include <linux/pci.h> 32 + #include <linux/delay.h> 32 33 #include <linux/file.h> 33 34 #include "intel_sst.h" 34 35 #include "intelmid_snd_control.h"
+1
drivers/staging/olpc_dcon/olpc_dcon_xo_1.c
··· 12 12 */ 13 13 #include <linux/cs5535.h> 14 14 #include <linux/gpio.h> 15 + #include <linux/delay.h> 15 16 #include <asm/olpc.h> 16 17 17 18 #include "olpc_dcon.h"
+1 -1
drivers/staging/rt2860/common/cmm_data_pci.c
··· 444 444 return (NDIS_STATUS_FAILURE); 445 445 } 446 446 } 447 - /* Drop not U2M frames, can't's drop here because we will drop beacon in this case */ 447 + /* Drop not U2M frames, can't drop here because we will drop beacon in this case */ 448 448 /* I am kind of doubting the U2M bit operation */ 449 449 /* if (pRxD->U2M == 0) */ 450 450 /* return(NDIS_STATUS_FAILURE); */
+1 -1
drivers/staging/rt2860/common/cmm_data_usb.c
··· 860 860 DBGPRINT_RAW(RT_DEBUG_ERROR, ("received packet too long\n")); 861 861 return NDIS_STATUS_FAILURE; 862 862 } 863 - /* Drop not U2M frames, can't's drop here because we will drop beacon in this case */ 863 + /* Drop not U2M frames, can't drop here because we will drop beacon in this case */ 864 864 /* I am kind of doubting the U2M bit operation */ 865 865 /* if (pRxD->U2M == 0) */ 866 866 /* return(NDIS_STATUS_FAILURE); */
+1 -1
drivers/staging/rts_pstor/debug.h
··· 28 28 29 29 #define RTSX_STOR "rts_pstor: " 30 30 31 - #if CONFIG_RTS_PSTOR_DEBUG 31 + #ifdef CONFIG_RTS_PSTOR_DEBUG 32 32 #define RTSX_DEBUGP(x...) printk(KERN_DEBUG RTSX_STOR x) 33 33 #define RTSX_DEBUGPN(x...) printk(KERN_DEBUG x) 34 34 #define RTSX_DEBUGPX(x...) printk(x)
+1
drivers/staging/rts_pstor/ms.c
··· 23 23 #include <linux/blkdev.h> 24 24 #include <linux/kthread.h> 25 25 #include <linux/sched.h> 26 + #include <linux/vmalloc.h> 26 27 27 28 #include "rtsx.h" 28 29 #include "rtsx_transport.h"
+3 -2
drivers/staging/rts_pstor/rtsx_chip.c
··· 24 24 #include <linux/kthread.h> 25 25 #include <linux/sched.h> 26 26 #include <linux/workqueue.h> 27 + #include <linux/vmalloc.h> 27 28 28 29 #include "rtsx.h" 29 30 #include "rtsx_transport.h" ··· 1312 1311 1313 1312 #ifdef SUPPORT_OCP 1314 1313 if (CHECK_LUN_MODE(chip, SD_MS_2LUN)) { 1315 - #if CONFIG_RTS_PSTOR_DEBUG 1314 + #ifdef CONFIG_RTS_PSTOR_DEBUG 1316 1315 if (chip->ocp_stat & (SD_OC_NOW | SD_OC_EVER | MS_OC_NOW | MS_OC_EVER)) { 1317 1316 RTSX_DEBUGP("Over current, OCPSTAT is 0x%x\n", chip->ocp_stat); 1318 1317 } 1319 - #endif 1318 + #endif 1320 1319 1321 1320 if (chip->ocp_stat & (SD_OC_NOW | SD_OC_EVER)) { 1322 1321 if (chip->card_exist & SD_CARD) {
+1
drivers/staging/rts_pstor/rtsx_scsi.c
··· 23 23 #include <linux/blkdev.h> 24 24 #include <linux/kthread.h> 25 25 #include <linux/sched.h> 26 + #include <linux/vmalloc.h> 26 27 27 28 #include "rtsx.h" 28 29 #include "rtsx_transport.h"
+2 -2
drivers/staging/rts_pstor/sd.c
··· 909 909 RTSX_WRITE_REG(chip, SD_VPCLK0_CTL, PHASE_NOT_RESET, PHASE_NOT_RESET); 910 910 RTSX_WRITE_REG(chip, CLK_CTL, CHANGE_CLK, 0); 911 911 } else { 912 - #if CONFIG_RTS_PSTOR_DEBUG 912 + #ifdef CONFIG_RTS_PSTOR_DEBUG 913 913 rtsx_read_register(chip, SD_VP_CTL, &val); 914 914 RTSX_DEBUGP("SD_VP_CTL: 0x%x\n", val); 915 915 rtsx_read_register(chip, SD_DCMPS_CTL, &val); ··· 958 958 return STATUS_SUCCESS; 959 959 960 960 Fail: 961 - #if CONFIG_RTS_PSTOR_DEBUG 961 + #ifdef CONFIG_RTS_PSTOR_DEBUG 962 962 rtsx_read_register(chip, SD_VP_CTL, &val); 963 963 RTSX_DEBUGP("SD_VP_CTL: 0x%x\n", val); 964 964 rtsx_read_register(chip, SD_DCMPS_CTL, &val);
+1 -1
drivers/staging/rts_pstor/trace.h
··· 82 82 #define TRACE_GOTO(chip, label) goto label 83 83 #endif 84 84 85 - #if CONFIG_RTS_PSTOR_DEBUG 85 + #ifdef CONFIG_RTS_PSTOR_DEBUG 86 86 static inline void rtsx_dump(u8 *buf, int buf_len) 87 87 { 88 88 int i;
+1
drivers/staging/rts_pstor/xd.c
··· 23 23 #include <linux/blkdev.h> 24 24 #include <linux/kthread.h> 25 25 #include <linux/sched.h> 26 + #include <linux/vmalloc.h> 26 27 27 28 #include "rtsx.h" 28 29 #include "rtsx_transport.h"
+1
drivers/staging/solo6x10/Kconfig
··· 2 2 tristate "Softlogic 6x10 MPEG codec cards" 3 3 depends on PCI && VIDEO_DEV && SND && I2C 4 4 select VIDEOBUF_DMA_SG 5 + select SND_PCM 5 6 ---help--- 6 7 This driver supports the Softlogic based MPEG-4 and h.264 codec 7 8 codec cards.
+1 -1
drivers/staging/spectra/ffsport.c
··· 653 653 } 654 654 dev->queue->queuedata = dev; 655 655 656 - /* As Linux block layer does't support >4KB hardware sector, */ 656 + /* As Linux block layer doesn't support >4KB hardware sector, */ 657 657 /* Here we force report 512 byte hardware sector size to Kernel */ 658 658 blk_queue_logical_block_size(dev->queue, 512); 659 659
+1 -1
drivers/staging/tidspbridge/dynload/cload.c
··· 718 718 * as a temporary for .dllview record construction. 719 719 * Allocate storage for the whole table. Add 1 to the section count 720 720 * in case a trampoline section is auto-generated as well as the 721 - * size of the trampoline section name so DLLView does't get lost. 721 + * size of the trampoline section name so DLLView doesn't get lost. 722 722 */ 723 723 724 724 siz = sym_count * sizeof(struct local_symbol);
+1 -1
drivers/staging/tty/specialix.c
··· 978 978 spin_lock_irqsave(&bp->lock, flags); 979 979 sx_out(bp, CD186x_CAR, port_No(port)); 980 980 981 - /* The Specialix board does't implement the RTS lines. 981 + /* The Specialix board doesn't implement the RTS lines. 982 982 They are used to set the IRQ level. Don't touch them. */ 983 983 if (sx_crtscts(tty)) 984 984 port->MSVR = MSVR_DTR | (sx_in(bp, CD186x_MSVR) & MSVR_RTS);
+5 -6
drivers/staging/usbip/vhci_hcd.c
··· 876 876 } 877 877 878 878 /* kill threads related to this sdev, if v.c. exists */ 879 - kthread_stop(vdev->ud.tcp_rx); 880 - kthread_stop(vdev->ud.tcp_tx); 879 + if (vdev->ud.tcp_rx) 880 + kthread_stop(vdev->ud.tcp_rx); 881 + if (vdev->ud.tcp_tx) 882 + kthread_stop(vdev->ud.tcp_tx); 881 883 882 884 usbip_uinfo("stop threads\n"); 883 885 ··· 950 948 static void vhci_device_init(struct vhci_device *vdev) 951 949 { 952 950 memset(vdev, 0, sizeof(*vdev)); 953 - 954 - vdev->ud.tcp_rx = kthread_create(vhci_rx_loop, &vdev->ud, "vhci_rx"); 955 - vdev->ud.tcp_tx = kthread_create(vhci_tx_loop, &vdev->ud, "vhci_tx"); 956 951 957 952 vdev->ud.side = USBIP_VHCI; 958 953 vdev->ud.status = VDEV_ST_NULL; ··· 1138 1139 usbip_uerr("create hcd failed\n"); 1139 1140 return -ENOMEM; 1140 1141 } 1141 - 1142 + hcd->has_tt = 1; 1142 1143 1143 1144 /* this is private data for vhci_hcd */ 1144 1145 the_controller = hcd_to_vhci(hcd);
+4 -3
drivers/staging/usbip/vhci_sysfs.c
··· 21 21 #include "vhci.h" 22 22 23 23 #include <linux/in.h> 24 + #include <linux/kthread.h> 24 25 25 26 /* TODO: refine locking ?*/ 26 27 ··· 221 220 vdev->ud.tcp_socket = socket; 222 221 vdev->ud.status = VDEV_ST_NOTASSIGNED; 223 222 224 - wake_up_process(vdev->ud.tcp_rx); 225 - wake_up_process(vdev->ud.tcp_tx); 226 - 227 223 spin_unlock(&vdev->ud.lock); 228 224 spin_unlock(&the_controller->lock); 229 225 /* end the lock */ 226 + 227 + vdev->ud.tcp_rx = kthread_run(vhci_rx_loop, &vdev->ud, "vhci_rx"); 228 + vdev->ud.tcp_tx = kthread_run(vhci_tx_loop, &vdev->ud, "vhci_tx"); 230 229 231 230 rh_port_connect(rhport, speed); 232 231
+1 -1
drivers/staging/wlan-ng/cfg80211.c
··· 273 273 } 274 274 275 275 int prism2_set_default_key(struct wiphy *wiphy, struct net_device *dev, 276 - u8 key_index) 276 + u8 key_index, bool unicast, bool multicast) 277 277 { 278 278 wlandevice_t *wlandev = dev->ml_priv; 279 279
+6 -2
drivers/tty/n_gsm.c
··· 1658 1658 1659 1659 if ((gsm->control & ~PF) == UI) 1660 1660 gsm->fcs = gsm_fcs_add_block(gsm->fcs, gsm->buf, gsm->len); 1661 - /* generate final CRC with received FCS */ 1662 - gsm->fcs = gsm_fcs_add(gsm->fcs, gsm->received_fcs); 1661 + if (gsm->encoding == 0){ 1662 + /* WARNING: gsm->received_fcs is used for gsm->encoding = 0 only. 1663 + In this case it contain the last piece of data 1664 + required to generate final CRC */ 1665 + gsm->fcs = gsm_fcs_add(gsm->fcs, gsm->received_fcs); 1666 + } 1663 1667 if (gsm->fcs != GOOD_FCS) { 1664 1668 gsm->bad_fcs++; 1665 1669 if (debug & 4)
+2 -1
drivers/tty/serial/imx.c
··· 382 382 static irqreturn_t imx_rtsint(int irq, void *dev_id) 383 383 { 384 384 struct imx_port *sport = dev_id; 385 - unsigned int val = readl(sport->port.membase + USR1) & USR1_RTSS; 385 + unsigned int val; 386 386 unsigned long flags; 387 387 388 388 spin_lock_irqsave(&sport->port.lock, flags); 389 389 390 390 writel(USR1_RTSD, sport->port.membase + USR1); 391 + val = readl(sport->port.membase + USR1) & USR1_RTSS; 391 392 uart_handle_cts_change(&sport->port, !!val); 392 393 wake_up_interruptible(&sport->port.state->port.delta_msr_wait); 393 394
+5 -2
drivers/tty/serial/of_serial.c
··· 80 80 /* 81 81 * Try to register a serial port 82 82 */ 83 + static struct of_device_id of_platform_serial_table[]; 83 84 static int __devinit of_platform_serial_probe(struct platform_device *ofdev) 84 85 { 86 + const struct of_device_id *match; 85 87 struct of_serial_info *info; 86 88 struct uart_port port; 87 89 int port_type; 88 90 int ret; 89 91 90 - if (!ofdev->dev.of_match) 92 + match = of_match_device(of_platform_serial_table, &ofdev->dev); 93 + if (!match) 91 94 return -EINVAL; 92 95 93 96 if (of_find_property(ofdev->dev.of_node, "used-by-rtas", NULL)) ··· 100 97 if (info == NULL) 101 98 return -ENOMEM; 102 99 103 - port_type = (unsigned long)ofdev->dev.of_match->data; 100 + port_type = (unsigned long)match->data; 104 101 ret = of_platform_serial_setup(ofdev, port_type, &port); 105 102 if (ret) 106 103 goto out;
+5 -2
drivers/usb/gadget/fsl_qe_udc.c
··· 2539 2539 } 2540 2540 2541 2541 /* Driver probe functions */ 2542 + static const struct of_device_id qe_udc_match[]; 2542 2543 static int __devinit qe_udc_probe(struct platform_device *ofdev) 2543 2544 { 2545 + const struct of_device_id *match; 2544 2546 struct device_node *np = ofdev->dev.of_node; 2545 2547 struct qe_ep *ep; 2546 2548 unsigned int ret = 0; 2547 2549 unsigned int i; 2548 2550 const void *prop; 2549 2551 2550 - if (!ofdev->dev.of_match) 2552 + match = of_match_device(qe_udc_match, &ofdev->dev); 2553 + if (!match) 2551 2554 return -EINVAL; 2552 2555 2553 2556 prop = of_get_property(np, "mode", NULL); ··· 2564 2561 return -ENOMEM; 2565 2562 } 2566 2563 2567 - udc_controller->soc_type = (unsigned long)ofdev->dev.of_match->data; 2564 + udc_controller->soc_type = (unsigned long)match->data; 2568 2565 udc_controller->usb_regs = of_iomap(np, 0); 2569 2566 if (!udc_controller->usb_regs) { 2570 2567 ret = -ENOMEM;
+20
drivers/usb/host/ehci-omap.c
··· 40 40 #include <linux/slab.h> 41 41 #include <linux/usb/ulpi.h> 42 42 #include <plat/usb.h> 43 + #include <linux/regulator/consumer.h> 43 44 44 45 /* EHCI Register Set */ 45 46 #define EHCI_INSNREG04 (0xA0) ··· 119 118 struct ehci_hcd *omap_ehci; 120 119 int ret = -ENODEV; 121 120 int irq; 121 + int i; 122 + char supply[7]; 122 123 123 124 if (usb_disabled()) 124 125 return -ENODEV; ··· 160 157 hcd->rsrc_start = res->start; 161 158 hcd->rsrc_len = resource_size(res); 162 159 hcd->regs = regs; 160 + 161 + /* get ehci regulator and enable */ 162 + for (i = 0 ; i < OMAP3_HS_USB_PORTS ; i++) { 163 + if (pdata->port_mode[i] != OMAP_EHCI_PORT_MODE_PHY) { 164 + pdata->regulator[i] = NULL; 165 + continue; 166 + } 167 + snprintf(supply, sizeof(supply), "hsusb%d", i); 168 + pdata->regulator[i] = regulator_get(dev, supply); 169 + if (IS_ERR(pdata->regulator[i])) { 170 + pdata->regulator[i] = NULL; 171 + dev_dbg(dev, 172 + "failed to get ehci port%d regulator\n", i); 173 + } else { 174 + regulator_enable(pdata->regulator[i]); 175 + } 176 + } 163 177 164 178 ret = omap_usbhs_enable(dev); 165 179 if (ret) {
+1
drivers/usb/host/isp1760-hcd.c
··· 1633 1633 ints[i].qh = NULL; 1634 1634 ints[i].qtd = NULL; 1635 1635 1636 + urb->status = status; 1636 1637 isp1760_urb_done(hcd, urb); 1637 1638 if (qtd) 1638 1639 pe(hcd, qh, qtd);
+17 -2
drivers/usb/host/xhci-hub.c
··· 777 777 if (t1 != t2) 778 778 xhci_writel(xhci, t2, port_array[port_index]); 779 779 780 - if (DEV_HIGHSPEED(t1)) { 780 + if (hcd->speed != HCD_USB3) { 781 781 /* enable remote wake up for USB 2.0 */ 782 782 u32 __iomem *addr; 783 783 u32 tmp; ··· 866 866 temp |= PORT_LINK_STROBE | XDEV_U0; 867 867 xhci_writel(xhci, temp, port_array[port_index]); 868 868 } 869 + /* wait for the port to enter U0 and report port link 870 + * state change. 871 + */ 872 + spin_unlock_irqrestore(&xhci->lock, flags); 873 + msleep(20); 874 + spin_lock_irqsave(&xhci->lock, flags); 875 + 876 + /* Clear PLC */ 877 + temp = xhci_readl(xhci, port_array[port_index]); 878 + if (temp & PORT_PLC) { 879 + temp = xhci_port_state_to_neutral(temp); 880 + temp |= PORT_PLC; 881 + xhci_writel(xhci, temp, port_array[port_index]); 882 + } 883 + 869 884 slot_id = xhci_find_slot_id_by_port(hcd, 870 885 xhci, port_index + 1); 871 886 if (slot_id) ··· 888 873 } else 889 874 xhci_writel(xhci, temp, port_array[port_index]); 890 875 891 - if (DEV_HIGHSPEED(temp)) { 876 + if (hcd->speed != HCD_USB3) { 892 877 /* disable remote wake up for USB 2.0 */ 893 878 u32 __iomem *addr; 894 879 u32 tmp;
+2 -4
drivers/usb/musb/musb_gadget.c
··· 1887 1887 otg_set_vbus(musb->xceiv, 1); 1888 1888 1889 1889 hcd->self.uses_pio_for_control = 1; 1890 - 1891 - if (musb->xceiv->last_event == USB_EVENT_NONE) 1892 - pm_runtime_put(musb->controller); 1893 - 1894 1890 } 1891 + if (musb->xceiv->last_event == USB_EVENT_NONE) 1892 + pm_runtime_put(musb->controller); 1895 1893 1896 1894 return 0; 1897 1895
+1 -1
drivers/usb/musb/omap2430.c
··· 270 270 DBG(4, "VBUS Disconnect\n"); 271 271 272 272 #ifdef CONFIG_USB_GADGET_MUSB_HDRC 273 - if (is_otg_enabled(musb)) 273 + if (is_otg_enabled(musb) || is_peripheral_enabled(musb)) 274 274 if (musb->gadget_driver) 275 275 #endif 276 276 {
+10 -16
drivers/video/acornfb.c
··· 66 66 * have. Allow 1% either way on the nominal for TVs. 67 67 */ 68 68 #define NR_MONTYPES 6 69 - static struct fb_monspecs monspecs[NR_MONTYPES] __initdata = { 69 + static struct fb_monspecs monspecs[NR_MONTYPES] __devinitdata = { 70 70 { /* TV */ 71 71 .hfmin = 15469, 72 72 .hfmax = 15781, ··· 873 873 /* 874 874 * Everything after here is initialisation!!! 875 875 */ 876 - static struct fb_videomode modedb[] __initdata = { 876 + static struct fb_videomode modedb[] __devinitdata = { 877 877 { /* 320x256 @ 50Hz */ 878 878 NULL, 50, 320, 256, 125000, 92, 62, 35, 19, 38, 2, 879 879 FB_SYNC_COMP_HIGH_ACT, ··· 925 925 } 926 926 }; 927 927 928 - static struct fb_videomode __initdata 929 - acornfb_default_mode = { 928 + static struct fb_videomode acornfb_default_mode __devinitdata = { 930 929 .name = NULL, 931 930 .refresh = 60, 932 931 .xres = 640, ··· 941 942 .vmode = FB_VMODE_NONINTERLACED 942 943 }; 943 944 944 - static void __init acornfb_init_fbinfo(void) 945 + static void __devinit acornfb_init_fbinfo(void) 945 946 { 946 947 static int first = 1; 947 948 ··· 1017 1018 * size can optionally be followed by 'M' or 'K' for 1018 1019 * MB or KB respectively. 1019 1020 */ 1020 - static void __init 1021 - acornfb_parse_mon(char *opt) 1021 + static void __devinit acornfb_parse_mon(char *opt) 1022 1022 { 1023 1023 char *p = opt; 1024 1024 ··· 1064 1066 current_par.montype = -1; 1065 1067 } 1066 1068 1067 - static void __init 1068 - acornfb_parse_montype(char *opt) 1069 + static void __devinit acornfb_parse_montype(char *opt) 1069 1070 { 1070 1071 current_par.montype = -2; 1071 1072 ··· 1105 1108 } 1106 1109 } 1107 1110 1108 - static void __init 1109 - acornfb_parse_dram(char *opt) 1111 + static void __devinit acornfb_parse_dram(char *opt) 1110 1112 { 1111 1113 unsigned int size; 1112 1114 ··· 1130 1134 static struct options { 1131 1135 char *name; 1132 1136 void (*parse)(char *opt); 1133 - } opt_table[] __initdata = { 1137 + } opt_table[] __devinitdata = { 1134 1138 { "mon", acornfb_parse_mon }, 1135 1139 { "montype", acornfb_parse_montype }, 1136 1140 { "dram", acornfb_parse_dram }, 1137 1141 { NULL, NULL } 1138 1142 }; 1139 1143 1140 - int __init 1141 - acornfb_setup(char *options) 1144 + static int __devinit acornfb_setup(char *options) 1142 1145 { 1143 1146 struct options *optp; 1144 1147 char *opt; ··· 1174 1179 * Detect type of monitor connected 1175 1180 * For now, we just assume SVGA 1176 1181 */ 1177 - static int __init 1178 - acornfb_detect_monitortype(void) 1182 + static int __devinit acornfb_detect_monitortype(void) 1179 1183 { 1180 1184 return 4; 1181 1185 }
+150 -77
drivers/video/fbmem.c
··· 42 42 43 43 #define FBPIXMAPSIZE (1024 * 8) 44 44 45 + static DEFINE_MUTEX(registration_lock); 45 46 struct fb_info *registered_fb[FB_MAX] __read_mostly; 46 47 int num_registered_fb __read_mostly; 48 + 49 + static struct fb_info *get_fb_info(unsigned int idx) 50 + { 51 + struct fb_info *fb_info; 52 + 53 + if (idx >= FB_MAX) 54 + return ERR_PTR(-ENODEV); 55 + 56 + mutex_lock(&registration_lock); 57 + fb_info = registered_fb[idx]; 58 + if (fb_info) 59 + atomic_inc(&fb_info->count); 60 + mutex_unlock(&registration_lock); 61 + 62 + return fb_info; 63 + } 64 + 65 + static void put_fb_info(struct fb_info *fb_info) 66 + { 67 + if (!atomic_dec_and_test(&fb_info->count)) 68 + return; 69 + if (fb_info->fbops->fb_destroy) 70 + fb_info->fbops->fb_destroy(fb_info); 71 + } 47 72 48 73 int lock_fb_info(struct fb_info *info) 49 74 { ··· 672 647 673 648 static void *fb_seq_start(struct seq_file *m, loff_t *pos) 674 649 { 650 + mutex_lock(&registration_lock); 675 651 return (*pos < FB_MAX) ? pos : NULL; 676 652 } 677 653 ··· 684 658 685 659 static void fb_seq_stop(struct seq_file *m, void *v) 686 660 { 661 + mutex_unlock(&registration_lock); 687 662 } 688 663 689 664 static int fb_seq_show(struct seq_file *m, void *v) ··· 717 690 .release = seq_release, 718 691 }; 719 692 693 + /* 694 + * We hold a reference to the fb_info in file->private_data, 695 + * but if the current registered fb has changed, we don't 696 + * actually want to use it. 697 + * 698 + * So look up the fb_info using the inode minor number, 699 + * and just verify it against the reference we have. 700 + */ 701 + static struct fb_info *file_fb_info(struct file *file) 702 + { 703 + struct inode *inode = file->f_path.dentry->d_inode; 704 + int fbidx = iminor(inode); 705 + struct fb_info *info = registered_fb[fbidx]; 706 + 707 + if (info != file->private_data) 708 + info = NULL; 709 + return info; 710 + } 711 + 720 712 static ssize_t 721 713 fb_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) 722 714 { 723 715 unsigned long p = *ppos; 724 - struct inode *inode = file->f_path.dentry->d_inode; 725 - int fbidx = iminor(inode); 726 - struct fb_info *info = registered_fb[fbidx]; 716 + struct fb_info *info = file_fb_info(file); 727 717 u8 *buffer, *dst; 728 718 u8 __iomem *src; 729 719 int c, cnt = 0, err = 0; ··· 805 761 fb_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) 806 762 { 807 763 unsigned long p = *ppos; 808 - struct inode *inode = file->f_path.dentry->d_inode; 809 - int fbidx = iminor(inode); 810 - struct fb_info *info = registered_fb[fbidx]; 764 + struct fb_info *info = file_fb_info(file); 811 765 u8 *buffer, *src; 812 766 u8 __iomem *dst; 813 767 int c, cnt = 0, err = 0; ··· 1183 1141 1184 1142 static long fb_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 1185 1143 { 1186 - struct inode *inode = file->f_path.dentry->d_inode; 1187 - int fbidx = iminor(inode); 1188 - struct fb_info *info = registered_fb[fbidx]; 1144 + struct fb_info *info = file_fb_info(file); 1189 1145 1146 + if (!info) 1147 + return -ENODEV; 1190 1148 return do_fb_ioctl(info, cmd, arg); 1191 1149 } 1192 1150 ··· 1307 1265 static long fb_compat_ioctl(struct file *file, unsigned int cmd, 1308 1266 unsigned long arg) 1309 1267 { 1310 - struct inode *inode = file->f_path.dentry->d_inode; 1311 - int fbidx = iminor(inode); 1312 - struct fb_info *info = registered_fb[fbidx]; 1313 - struct fb_ops *fb = info->fbops; 1268 + struct fb_info *info = file_fb_info(file); 1269 + struct fb_ops *fb; 1314 1270 long ret = -ENOIOCTLCMD; 1315 1271 1272 + if (!info) 1273 + return -ENODEV; 1274 + fb = info->fbops; 1316 1275 switch(cmd) { 1317 1276 case FBIOGET_VSCREENINFO: 1318 1277 case FBIOPUT_VSCREENINFO: ··· 1346 1303 static int 1347 1304 fb_mmap(struct file *file, struct vm_area_struct * vma) 1348 1305 { 1349 - int fbidx = iminor(file->f_path.dentry->d_inode); 1350 - struct fb_info *info = registered_fb[fbidx]; 1351 - struct fb_ops *fb = info->fbops; 1306 + struct fb_info *info = file_fb_info(file); 1307 + struct fb_ops *fb; 1352 1308 unsigned long off; 1353 1309 unsigned long start; 1354 1310 u32 len; 1355 1311 1312 + if (!info) 1313 + return -ENODEV; 1356 1314 if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT)) 1357 1315 return -EINVAL; 1358 1316 off = vma->vm_pgoff << PAGE_SHIFT; 1317 + fb = info->fbops; 1359 1318 if (!fb) 1360 1319 return -ENODEV; 1361 1320 mutex_lock(&info->mm_lock); ··· 1406 1361 struct fb_info *info; 1407 1362 int res = 0; 1408 1363 1409 - if (fbidx >= FB_MAX) 1410 - return -ENODEV; 1411 - info = registered_fb[fbidx]; 1412 - if (!info) 1364 + info = get_fb_info(fbidx); 1365 + if (!info) { 1413 1366 request_module("fb%d", fbidx); 1414 - info = registered_fb[fbidx]; 1415 - if (!info) 1416 - return -ENODEV; 1367 + info = get_fb_info(fbidx); 1368 + if (!info) 1369 + return -ENODEV; 1370 + } 1371 + if (IS_ERR(info)) 1372 + return PTR_ERR(info); 1373 + 1417 1374 mutex_lock(&info->lock); 1418 1375 if (!try_module_get(info->fbops->owner)) { 1419 1376 res = -ENODEV; ··· 1433 1386 #endif 1434 1387 out: 1435 1388 mutex_unlock(&info->lock); 1389 + if (res) 1390 + put_fb_info(info); 1436 1391 return res; 1437 1392 } 1438 1393 ··· 1450 1401 info->fbops->fb_release(info,1); 1451 1402 module_put(info->fbops->owner); 1452 1403 mutex_unlock(&info->lock); 1404 + put_fb_info(info); 1453 1405 return 0; 1454 1406 } 1455 1407 ··· 1537 1487 return false; 1538 1488 } 1539 1489 1490 + static int do_unregister_framebuffer(struct fb_info *fb_info); 1491 + 1540 1492 #define VGA_FB_PHYS 0xA0000 1541 - void remove_conflicting_framebuffers(struct apertures_struct *a, 1493 + static void do_remove_conflicting_framebuffers(struct apertures_struct *a, 1542 1494 const char *name, bool primary) 1543 1495 { 1544 1496 int i; ··· 1562 1510 printk(KERN_INFO "fb: conflicting fb hw usage " 1563 1511 "%s vs %s - removing generic driver\n", 1564 1512 name, registered_fb[i]->fix.id); 1565 - unregister_framebuffer(registered_fb[i]); 1513 + do_unregister_framebuffer(registered_fb[i]); 1566 1514 } 1567 1515 } 1568 1516 } 1569 - EXPORT_SYMBOL(remove_conflicting_framebuffers); 1570 1517 1571 - /** 1572 - * register_framebuffer - registers a frame buffer device 1573 - * @fb_info: frame buffer info structure 1574 - * 1575 - * Registers a frame buffer device @fb_info. 1576 - * 1577 - * Returns negative errno on error, or zero for success. 1578 - * 1579 - */ 1580 - 1581 - int 1582 - register_framebuffer(struct fb_info *fb_info) 1518 + static int do_register_framebuffer(struct fb_info *fb_info) 1583 1519 { 1584 1520 int i; 1585 1521 struct fb_event event; 1586 1522 struct fb_videomode mode; 1587 1523 1588 - if (num_registered_fb == FB_MAX) 1589 - return -ENXIO; 1590 - 1591 1524 if (fb_check_foreignness(fb_info)) 1592 1525 return -ENOSYS; 1593 1526 1594 - remove_conflicting_framebuffers(fb_info->apertures, fb_info->fix.id, 1527 + do_remove_conflicting_framebuffers(fb_info->apertures, fb_info->fix.id, 1595 1528 fb_is_primary_device(fb_info)); 1529 + 1530 + if (num_registered_fb == FB_MAX) 1531 + return -ENXIO; 1596 1532 1597 1533 num_registered_fb++; 1598 1534 for (i = 0 ; i < FB_MAX; i++) 1599 1535 if (!registered_fb[i]) 1600 1536 break; 1601 1537 fb_info->node = i; 1538 + atomic_set(&fb_info->count, 1); 1602 1539 mutex_init(&fb_info->lock); 1603 1540 mutex_init(&fb_info->mm_lock); 1604 1541 ··· 1633 1592 return 0; 1634 1593 } 1635 1594 1595 + static int do_unregister_framebuffer(struct fb_info *fb_info) 1596 + { 1597 + struct fb_event event; 1598 + int i, ret = 0; 1599 + 1600 + i = fb_info->node; 1601 + if (i < 0 || i >= FB_MAX || registered_fb[i] != fb_info) 1602 + return -EINVAL; 1603 + 1604 + if (!lock_fb_info(fb_info)) 1605 + return -ENODEV; 1606 + event.info = fb_info; 1607 + ret = fb_notifier_call_chain(FB_EVENT_FB_UNBIND, &event); 1608 + unlock_fb_info(fb_info); 1609 + 1610 + if (ret) 1611 + return -EINVAL; 1612 + 1613 + if (fb_info->pixmap.addr && 1614 + (fb_info->pixmap.flags & FB_PIXMAP_DEFAULT)) 1615 + kfree(fb_info->pixmap.addr); 1616 + fb_destroy_modelist(&fb_info->modelist); 1617 + registered_fb[i] = NULL; 1618 + num_registered_fb--; 1619 + fb_cleanup_device(fb_info); 1620 + device_destroy(fb_class, MKDEV(FB_MAJOR, i)); 1621 + event.info = fb_info; 1622 + fb_notifier_call_chain(FB_EVENT_FB_UNREGISTERED, &event); 1623 + 1624 + /* this may free fb info */ 1625 + put_fb_info(fb_info); 1626 + return 0; 1627 + } 1628 + 1629 + void remove_conflicting_framebuffers(struct apertures_struct *a, 1630 + const char *name, bool primary) 1631 + { 1632 + mutex_lock(&registration_lock); 1633 + do_remove_conflicting_framebuffers(a, name, primary); 1634 + mutex_unlock(&registration_lock); 1635 + } 1636 + EXPORT_SYMBOL(remove_conflicting_framebuffers); 1637 + 1638 + /** 1639 + * register_framebuffer - registers a frame buffer device 1640 + * @fb_info: frame buffer info structure 1641 + * 1642 + * Registers a frame buffer device @fb_info. 1643 + * 1644 + * Returns negative errno on error, or zero for success. 1645 + * 1646 + */ 1647 + int 1648 + register_framebuffer(struct fb_info *fb_info) 1649 + { 1650 + int ret; 1651 + 1652 + mutex_lock(&registration_lock); 1653 + ret = do_register_framebuffer(fb_info); 1654 + mutex_unlock(&registration_lock); 1655 + 1656 + return ret; 1657 + } 1636 1658 1637 1659 /** 1638 1660 * unregister_framebuffer - releases a frame buffer device ··· 1713 1609 * that the driver implements fb_open() and fb_release() to 1714 1610 * check that no processes are using the device. 1715 1611 */ 1716 - 1717 1612 int 1718 1613 unregister_framebuffer(struct fb_info *fb_info) 1719 1614 { 1720 - struct fb_event event; 1721 - int i, ret = 0; 1615 + int ret; 1722 1616 1723 - i = fb_info->node; 1724 - if (!registered_fb[i]) { 1725 - ret = -EINVAL; 1726 - goto done; 1727 - } 1617 + mutex_lock(&registration_lock); 1618 + ret = do_unregister_framebuffer(fb_info); 1619 + mutex_unlock(&registration_lock); 1728 1620 1729 - 1730 - if (!lock_fb_info(fb_info)) 1731 - return -ENODEV; 1732 - event.info = fb_info; 1733 - ret = fb_notifier_call_chain(FB_EVENT_FB_UNBIND, &event); 1734 - unlock_fb_info(fb_info); 1735 - 1736 - if (ret) { 1737 - ret = -EINVAL; 1738 - goto done; 1739 - } 1740 - 1741 - if (fb_info->pixmap.addr && 1742 - (fb_info->pixmap.flags & FB_PIXMAP_DEFAULT)) 1743 - kfree(fb_info->pixmap.addr); 1744 - fb_destroy_modelist(&fb_info->modelist); 1745 - registered_fb[i]=NULL; 1746 - num_registered_fb--; 1747 - fb_cleanup_device(fb_info); 1748 - device_destroy(fb_class, MKDEV(FB_MAJOR, i)); 1749 - event.info = fb_info; 1750 - fb_notifier_call_chain(FB_EVENT_FB_UNREGISTERED, &event); 1751 - 1752 - /* this may free fb info */ 1753 - if (fb_info->fbops->fb_destroy) 1754 - fb_info->fbops->fb_destroy(fb_info); 1755 - done: 1756 1621 return ret; 1757 1622 } 1758 1623
+8 -7
drivers/virtio/virtio_pci.c
··· 590 590 591 591 static void virtio_pci_release_dev(struct device *_d) 592 592 { 593 - struct virtio_device *dev = container_of(_d, struct virtio_device, dev); 593 + struct virtio_device *dev = container_of(_d, struct virtio_device, 594 + dev); 594 595 struct virtio_pci_device *vp_dev = to_vp_device(dev); 595 - struct pci_dev *pci_dev = vp_dev->pci_dev; 596 596 597 - vp_del_vqs(dev); 598 - pci_set_drvdata(pci_dev, NULL); 599 - pci_iounmap(pci_dev, vp_dev->ioaddr); 600 - pci_release_regions(pci_dev); 601 - pci_disable_device(pci_dev); 602 597 kfree(vp_dev); 603 598 } 604 599 ··· 676 681 struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev); 677 682 678 683 unregister_virtio_device(&vp_dev->vdev); 684 + 685 + vp_del_vqs(&vp_dev->vdev); 686 + pci_set_drvdata(pci_dev, NULL); 687 + pci_iounmap(pci_dev, vp_dev->ioaddr); 688 + pci_release_regions(pci_dev); 689 + pci_disable_device(pci_dev); 679 690 } 680 691 681 692 #ifdef CONFIG_PM
+1
drivers/virtio/virtio_ring.c
··· 371 371 /* detach_buf clears data, so grab it now. */ 372 372 buf = vq->data[i]; 373 373 detach_buf(vq, i); 374 + vq->vring.avail->idx--; 374 375 END_USE(vq); 375 376 return buf; 376 377 }
+97
drivers/watchdog/iTCO_wdt.c
··· 35 35 * document number 324645-001, 324646-001: Cougar Point (CPT) 36 36 * document number TBD : Patsburg (PBG) 37 37 * document number TBD : DH89xxCC 38 + * document number TBD : Panther Point 38 39 */ 39 40 40 41 /* ··· 154 153 TCO_PBG1, /* Patsburg */ 155 154 TCO_PBG2, /* Patsburg */ 156 155 TCO_DH89XXCC, /* DH89xxCC */ 156 + TCO_PPT0, /* Panther Point */ 157 + TCO_PPT1, /* Panther Point */ 158 + TCO_PPT2, /* Panther Point */ 159 + TCO_PPT3, /* Panther Point */ 160 + TCO_PPT4, /* Panther Point */ 161 + TCO_PPT5, /* Panther Point */ 162 + TCO_PPT6, /* Panther Point */ 163 + TCO_PPT7, /* Panther Point */ 164 + TCO_PPT8, /* Panther Point */ 165 + TCO_PPT9, /* Panther Point */ 166 + TCO_PPT10, /* Panther Point */ 167 + TCO_PPT11, /* Panther Point */ 168 + TCO_PPT12, /* Panther Point */ 169 + TCO_PPT13, /* Panther Point */ 170 + TCO_PPT14, /* Panther Point */ 171 + TCO_PPT15, /* Panther Point */ 172 + TCO_PPT16, /* Panther Point */ 173 + TCO_PPT17, /* Panther Point */ 174 + TCO_PPT18, /* Panther Point */ 175 + TCO_PPT19, /* Panther Point */ 176 + TCO_PPT20, /* Panther Point */ 177 + TCO_PPT21, /* Panther Point */ 178 + TCO_PPT22, /* Panther Point */ 179 + TCO_PPT23, /* Panther Point */ 180 + TCO_PPT24, /* Panther Point */ 181 + TCO_PPT25, /* Panther Point */ 182 + TCO_PPT26, /* Panther Point */ 183 + TCO_PPT27, /* Panther Point */ 184 + TCO_PPT28, /* Panther Point */ 185 + TCO_PPT29, /* Panther Point */ 186 + TCO_PPT30, /* Panther Point */ 187 + TCO_PPT31, /* Panther Point */ 157 188 }; 158 189 159 190 static struct { ··· 277 244 {"Patsburg", 2}, 278 245 {"Patsburg", 2}, 279 246 {"DH89xxCC", 2}, 247 + {"Panther Point", 2}, 248 + {"Panther Point", 2}, 249 + {"Panther Point", 2}, 250 + {"Panther Point", 2}, 251 + {"Panther Point", 2}, 252 + {"Panther Point", 2}, 253 + {"Panther Point", 2}, 254 + {"Panther Point", 2}, 255 + {"Panther Point", 2}, 256 + {"Panther Point", 2}, 257 + {"Panther Point", 2}, 258 + {"Panther Point", 2}, 259 + {"Panther Point", 2}, 260 + {"Panther Point", 2}, 261 + {"Panther Point", 2}, 262 + {"Panther Point", 2}, 263 + {"Panther Point", 2}, 264 + {"Panther Point", 2}, 265 + {"Panther Point", 2}, 266 + {"Panther Point", 2}, 267 + {"Panther Point", 2}, 268 + {"Panther Point", 2}, 269 + {"Panther Point", 2}, 270 + {"Panther Point", 2}, 271 + {"Panther Point", 2}, 272 + {"Panther Point", 2}, 273 + {"Panther Point", 2}, 274 + {"Panther Point", 2}, 275 + {"Panther Point", 2}, 276 + {"Panther Point", 2}, 277 + {"Panther Point", 2}, 278 + {"Panther Point", 2}, 280 279 {NULL, 0} 281 280 }; 282 281 ··· 428 363 { ITCO_PCI_DEVICE(0x1d40, TCO_PBG1)}, 429 364 { ITCO_PCI_DEVICE(0x1d41, TCO_PBG2)}, 430 365 { ITCO_PCI_DEVICE(0x2310, TCO_DH89XXCC)}, 366 + { ITCO_PCI_DEVICE(0x1e40, TCO_PPT0)}, 367 + { ITCO_PCI_DEVICE(0x1e41, TCO_PPT1)}, 368 + { ITCO_PCI_DEVICE(0x1e42, TCO_PPT2)}, 369 + { ITCO_PCI_DEVICE(0x1e43, TCO_PPT3)}, 370 + { ITCO_PCI_DEVICE(0x1e44, TCO_PPT4)}, 371 + { ITCO_PCI_DEVICE(0x1e45, TCO_PPT5)}, 372 + { ITCO_PCI_DEVICE(0x1e46, TCO_PPT6)}, 373 + { ITCO_PCI_DEVICE(0x1e47, TCO_PPT7)}, 374 + { ITCO_PCI_DEVICE(0x1e48, TCO_PPT8)}, 375 + { ITCO_PCI_DEVICE(0x1e49, TCO_PPT9)}, 376 + { ITCO_PCI_DEVICE(0x1e4a, TCO_PPT10)}, 377 + { ITCO_PCI_DEVICE(0x1e4b, TCO_PPT11)}, 378 + { ITCO_PCI_DEVICE(0x1e4c, TCO_PPT12)}, 379 + { ITCO_PCI_DEVICE(0x1e4d, TCO_PPT13)}, 380 + { ITCO_PCI_DEVICE(0x1e4e, TCO_PPT14)}, 381 + { ITCO_PCI_DEVICE(0x1e4f, TCO_PPT15)}, 382 + { ITCO_PCI_DEVICE(0x1e50, TCO_PPT16)}, 383 + { ITCO_PCI_DEVICE(0x1e51, TCO_PPT17)}, 384 + { ITCO_PCI_DEVICE(0x1e52, TCO_PPT18)}, 385 + { ITCO_PCI_DEVICE(0x1e53, TCO_PPT19)}, 386 + { ITCO_PCI_DEVICE(0x1e54, TCO_PPT20)}, 387 + { ITCO_PCI_DEVICE(0x1e55, TCO_PPT21)}, 388 + { ITCO_PCI_DEVICE(0x1e56, TCO_PPT22)}, 389 + { ITCO_PCI_DEVICE(0x1e57, TCO_PPT23)}, 390 + { ITCO_PCI_DEVICE(0x1e58, TCO_PPT24)}, 391 + { ITCO_PCI_DEVICE(0x1e59, TCO_PPT25)}, 392 + { ITCO_PCI_DEVICE(0x1e5a, TCO_PPT26)}, 393 + { ITCO_PCI_DEVICE(0x1e5b, TCO_PPT27)}, 394 + { ITCO_PCI_DEVICE(0x1e5c, TCO_PPT28)}, 395 + { ITCO_PCI_DEVICE(0x1e5d, TCO_PPT29)}, 396 + { ITCO_PCI_DEVICE(0x1e5e, TCO_PPT30)}, 397 + { ITCO_PCI_DEVICE(0x1e5f, TCO_PPT31)}, 431 398 { 0, }, /* End of list */ 432 399 }; 433 400 MODULE_DEVICE_TABLE(pci, iTCO_wdt_pci_tbl);
+5 -2
drivers/watchdog/mpc8xxx_wdt.c
··· 185 185 .fops = &mpc8xxx_wdt_fops, 186 186 }; 187 187 188 + static const struct of_device_id mpc8xxx_wdt_match[]; 188 189 static int __devinit mpc8xxx_wdt_probe(struct platform_device *ofdev) 189 190 { 190 191 int ret; 192 + const struct of_device_id *match; 191 193 struct device_node *np = ofdev->dev.of_node; 192 194 struct mpc8xxx_wdt_type *wdt_type; 193 195 u32 freq = fsl_get_sys_freq(); 194 196 bool enabled; 195 197 196 - if (!ofdev->dev.of_match) 198 + match = of_match_device(mpc8xxx_wdt_match, &ofdev->dev); 199 + if (!match) 197 200 return -EINVAL; 198 - wdt_type = ofdev->dev.of_match->data; 201 + wdt_type = match->data; 199 202 200 203 if (!freq || freq == -1) 201 204 return -EINVAL;
+8 -1
drivers/xen/manage.c
··· 8 8 #include <linux/sysrq.h> 9 9 #include <linux/stop_machine.h> 10 10 #include <linux/freezer.h> 11 + #include <linux/syscore_ops.h> 11 12 12 13 #include <xen/xen.h> 13 14 #include <xen/xenbus.h> ··· 71 70 BUG_ON(!irqs_disabled()); 72 71 73 72 err = sysdev_suspend(PMSG_FREEZE); 73 + if (!err) { 74 + err = syscore_suspend(); 75 + if (err) 76 + sysdev_resume(); 77 + } 74 78 if (err) { 75 - printk(KERN_ERR "xen_suspend: sysdev_suspend failed: %d\n", 79 + printk(KERN_ERR "xen_suspend: system core suspend failed: %d\n", 76 80 err); 77 81 return err; 78 82 } ··· 101 95 xen_timer_resume(); 102 96 } 103 97 98 + syscore_resume(); 104 99 sysdev_resume(); 105 100 106 101 return 0;
+3 -2
fs/btrfs/acl.c
··· 178 178 179 179 if (value) { 180 180 acl = posix_acl_from_xattr(value, size); 181 + if (IS_ERR(acl)) 182 + return PTR_ERR(acl); 183 + 181 184 if (acl) { 182 185 ret = posix_acl_valid(acl); 183 186 if (ret) 184 187 goto out; 185 - } else if (IS_ERR(acl)) { 186 - return PTR_ERR(acl); 187 188 } 188 189 } 189 190
+1 -1
fs/btrfs/ctree.h
··· 718 718 u64 total_bytes; /* total bytes in the space, 719 719 this doesn't take mirrors into account */ 720 720 u64 bytes_used; /* total bytes used, 721 - this does't take mirrors into account */ 721 + this doesn't take mirrors into account */ 722 722 u64 bytes_pinned; /* total bytes pinned, will be freed when the 723 723 transaction finishes */ 724 724 u64 bytes_reserved; /* total bytes the allocator has reserved for
+1
fs/btrfs/disk-io.c
··· 2824 2824 2825 2825 spin_lock(&delayed_refs->lock); 2826 2826 if (delayed_refs->num_entries == 0) { 2827 + spin_unlock(&delayed_refs->lock); 2827 2828 printk(KERN_INFO "delayed_refs has NO entry\n"); 2828 2829 return ret; 2829 2830 }
+30 -11
fs/btrfs/extent-tree.c
··· 8059 8059 u64 group_start = group->key.objectid; 8060 8060 new_extents = kmalloc(sizeof(*new_extents), 8061 8061 GFP_NOFS); 8062 + if (!new_extents) { 8063 + ret = -ENOMEM; 8064 + goto out; 8065 + } 8062 8066 nr_extents = 1; 8063 8067 ret = get_new_locations(reloc_inode, 8064 8068 extent_key, ··· 8856 8852 int btrfs_init_space_info(struct btrfs_fs_info *fs_info) 8857 8853 { 8858 8854 struct btrfs_space_info *space_info; 8855 + struct btrfs_super_block *disk_super; 8856 + u64 features; 8857 + u64 flags; 8858 + int mixed = 0; 8859 8859 int ret; 8860 8860 8861 - ret = update_space_info(fs_info, BTRFS_BLOCK_GROUP_SYSTEM, 0, 0, 8862 - &space_info); 8863 - if (ret) 8864 - return ret; 8861 + disk_super = &fs_info->super_copy; 8862 + if (!btrfs_super_root(disk_super)) 8863 + return 1; 8865 8864 8866 - ret = update_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA, 0, 0, 8867 - &space_info); 8868 - if (ret) 8869 - return ret; 8865 + features = btrfs_super_incompat_flags(disk_super); 8866 + if (features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS) 8867 + mixed = 1; 8870 8868 8871 - ret = update_space_info(fs_info, BTRFS_BLOCK_GROUP_DATA, 0, 0, 8872 - &space_info); 8869 + flags = BTRFS_BLOCK_GROUP_SYSTEM; 8870 + ret = update_space_info(fs_info, flags, 0, 0, &space_info); 8873 8871 if (ret) 8874 - return ret; 8872 + goto out; 8875 8873 8874 + if (mixed) { 8875 + flags = BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA; 8876 + ret = update_space_info(fs_info, flags, 0, 0, &space_info); 8877 + } else { 8878 + flags = BTRFS_BLOCK_GROUP_METADATA; 8879 + ret = update_space_info(fs_info, flags, 0, 0, &space_info); 8880 + if (ret) 8881 + goto out; 8882 + 8883 + flags = BTRFS_BLOCK_GROUP_DATA; 8884 + ret = update_space_info(fs_info, flags, 0, 0, &space_info); 8885 + } 8886 + out: 8876 8887 return ret; 8877 8888 } 8878 8889
+1 -1
fs/btrfs/extent_io.c
··· 2681 2681 prefetchw(&page->flags); 2682 2682 list_del(&page->lru); 2683 2683 if (!add_to_page_cache_lru(page, mapping, 2684 - page->index, GFP_KERNEL)) { 2684 + page->index, GFP_NOFS)) { 2685 2685 __extent_read_full_page(tree, page, get_extent, 2686 2686 &bio, 0, &bio_flags); 2687 2687 }
+8 -5
fs/btrfs/free-space-cache.c
··· 1768 1768 1769 1769 while ((node = rb_last(&block_group->free_space_offset)) != NULL) { 1770 1770 info = rb_entry(node, struct btrfs_free_space, offset_index); 1771 - unlink_free_space(block_group, info); 1772 - if (info->bitmap) 1773 - kfree(info->bitmap); 1774 - kmem_cache_free(btrfs_free_space_cachep, info); 1771 + if (!info->bitmap) { 1772 + unlink_free_space(block_group, info); 1773 + kmem_cache_free(btrfs_free_space_cachep, info); 1774 + } else { 1775 + free_bitmap(block_group, info); 1776 + } 1777 + 1775 1778 if (need_resched()) { 1776 1779 spin_unlock(&block_group->tree_lock); 1777 1780 cond_resched(); ··· 2304 2301 start = entry->offset; 2305 2302 bytes = min(entry->bytes, end - start); 2306 2303 unlink_free_space(block_group, entry); 2307 - kfree(entry); 2304 + kmem_cache_free(btrfs_free_space_cachep, entry); 2308 2305 } 2309 2306 2310 2307 spin_unlock(&block_group->tree_lock);
+13 -7
fs/btrfs/inode.c
··· 954 954 1, 0, NULL, GFP_NOFS); 955 955 while (start < end) { 956 956 async_cow = kmalloc(sizeof(*async_cow), GFP_NOFS); 957 + BUG_ON(!async_cow); 957 958 async_cow->inode = inode; 958 959 async_cow->root = root; 959 960 async_cow->locked_page = locked_page; ··· 4732 4731 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name, 4733 4732 dentry->d_name.len, dir->i_ino, objectid, 4734 4733 BTRFS_I(dir)->block_group, mode, &index); 4735 - err = PTR_ERR(inode); 4736 - if (IS_ERR(inode)) 4734 + if (IS_ERR(inode)) { 4735 + err = PTR_ERR(inode); 4737 4736 goto out_unlock; 4737 + } 4738 4738 4739 4739 err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name); 4740 4740 if (err) { ··· 4794 4792 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name, 4795 4793 dentry->d_name.len, dir->i_ino, objectid, 4796 4794 BTRFS_I(dir)->block_group, mode, &index); 4797 - err = PTR_ERR(inode); 4798 - if (IS_ERR(inode)) 4795 + if (IS_ERR(inode)) { 4796 + err = PTR_ERR(inode); 4799 4797 goto out_unlock; 4798 + } 4800 4799 4801 4800 err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name); 4802 4801 if (err) { ··· 5002 4999 inline_size = btrfs_file_extent_inline_item_len(leaf, 5003 5000 btrfs_item_nr(leaf, path->slots[0])); 5004 5001 tmp = kmalloc(inline_size, GFP_NOFS); 5002 + if (!tmp) 5003 + return -ENOMEM; 5005 5004 ptr = btrfs_file_extent_inline_start(item); 5006 5005 5007 5006 read_extent_buffer(leaf, tmp, ptr, inline_size); ··· 6041 6036 ret = btrfs_map_block(map_tree, READ, start_sector << 9, 6042 6037 &map_length, NULL, 0); 6043 6038 if (ret) { 6044 - bio_put(bio); 6039 + bio_put(orig_bio); 6045 6040 return -EIO; 6046 6041 } 6047 6042 ··· 7278 7273 dentry->d_name.len, dir->i_ino, objectid, 7279 7274 BTRFS_I(dir)->block_group, S_IFLNK|S_IRWXUGO, 7280 7275 &index); 7281 - err = PTR_ERR(inode); 7282 - if (IS_ERR(inode)) 7276 + if (IS_ERR(inode)) { 7277 + err = PTR_ERR(inode); 7283 7278 goto out_unlock; 7279 + } 7284 7280 7285 7281 err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name); 7286 7282 if (err) {
+15 -9
fs/btrfs/ioctl.c
··· 81 81 iflags |= FS_NOATIME_FL; 82 82 if (flags & BTRFS_INODE_DIRSYNC) 83 83 iflags |= FS_DIRSYNC_FL; 84 + if (flags & BTRFS_INODE_NODATACOW) 85 + iflags |= FS_NOCOW_FL; 86 + 87 + if ((flags & BTRFS_INODE_COMPRESS) && !(flags & BTRFS_INODE_NOCOMPRESS)) 88 + iflags |= FS_COMPR_FL; 89 + else if (flags & BTRFS_INODE_NOCOMPRESS) 90 + iflags |= FS_NOCOMP_FL; 84 91 85 92 return iflags; 86 93 } ··· 151 144 if (flags & ~(FS_IMMUTABLE_FL | FS_APPEND_FL | \ 152 145 FS_NOATIME_FL | FS_NODUMP_FL | \ 153 146 FS_SYNC_FL | FS_DIRSYNC_FL | \ 154 - FS_NOCOMP_FL | FS_COMPR_FL | \ 155 - FS_NOCOW_FL | FS_COW_FL)) 147 + FS_NOCOMP_FL | FS_COMPR_FL | 148 + FS_NOCOW_FL)) 156 149 return -EOPNOTSUPP; 157 150 158 151 if ((flags & FS_NOCOMP_FL) && (flags & FS_COMPR_FL)) 159 - return -EINVAL; 160 - 161 - if ((flags & FS_NOCOW_FL) && (flags & FS_COW_FL)) 162 152 return -EINVAL; 163 153 164 154 return 0; ··· 222 218 ip->flags |= BTRFS_INODE_DIRSYNC; 223 219 else 224 220 ip->flags &= ~BTRFS_INODE_DIRSYNC; 221 + if (flags & FS_NOCOW_FL) 222 + ip->flags |= BTRFS_INODE_NODATACOW; 223 + else 224 + ip->flags &= ~BTRFS_INODE_NODATACOW; 225 225 226 226 /* 227 227 * The COMPRESS flag can only be changed by users, while the NOCOMPRESS ··· 238 230 } else if (flags & FS_COMPR_FL) { 239 231 ip->flags |= BTRFS_INODE_COMPRESS; 240 232 ip->flags &= ~BTRFS_INODE_NOCOMPRESS; 233 + } else { 234 + ip->flags &= ~(BTRFS_INODE_COMPRESS | BTRFS_INODE_NOCOMPRESS); 241 235 } 242 - if (flags & FS_NOCOW_FL) 243 - ip->flags |= BTRFS_INODE_NODATACOW; 244 - else if (flags & FS_COW_FL) 245 - ip->flags &= ~BTRFS_INODE_NODATACOW; 246 236 247 237 trans = btrfs_join_transaction(root, 1); 248 238 BUG_ON(IS_ERR(trans));
+5 -2
fs/btrfs/tree-log.c
··· 2209 2209 2210 2210 log = root->log_root; 2211 2211 path = btrfs_alloc_path(); 2212 - if (!path) 2213 - return -ENOMEM; 2212 + if (!path) { 2213 + err = -ENOMEM; 2214 + goto out_unlock; 2215 + } 2214 2216 2215 2217 di = btrfs_lookup_dir_item(trans, log, path, dir->i_ino, 2216 2218 name, name_len, -1); ··· 2273 2271 } 2274 2272 fail: 2275 2273 btrfs_free_path(path); 2274 + out_unlock: 2276 2275 mutex_unlock(&BTRFS_I(dir)->log_mutex); 2277 2276 if (ret == -ENOSPC) { 2278 2277 root->fs_info->last_trans_log_full_commit = trans->transid;
+10
fs/btrfs/volumes.c
··· 155 155 unsigned long limit; 156 156 unsigned long last_waited = 0; 157 157 int force_reg = 0; 158 + struct blk_plug plug; 159 + 160 + /* 161 + * this function runs all the bios we've collected for 162 + * a particular device. We don't want to wander off to 163 + * another device without first sending all of these down. 164 + * So, setup a plug here and finish it off before we return 165 + */ 166 + blk_start_plug(&plug); 158 167 159 168 bdi = blk_get_backing_dev_info(device->bdev); 160 169 fs_info = device->dev_root->fs_info; ··· 303 294 spin_unlock(&device->io_lock); 304 295 305 296 done: 297 + blk_finish_plug(&plug); 306 298 return 0; 307 299 } 308 300
+7
fs/ceph/addr.c
··· 775 775 ci->i_truncate_seq, 776 776 ci->i_truncate_size, 777 777 &inode->i_mtime, true, 1, 0); 778 + 779 + if (!req) { 780 + rc = -ENOMEM; 781 + unlock_page(page); 782 + break; 783 + } 784 + 778 785 max_pages = req->r_num_pages; 779 786 780 787 alloc_page_vec(fsc, req);
+15 -15
fs/ceph/caps.c
··· 819 819 used |= CEPH_CAP_FILE_CACHE; 820 820 if (ci->i_wr_ref) 821 821 used |= CEPH_CAP_FILE_WR; 822 - if (ci->i_wrbuffer_ref) 822 + if (ci->i_wb_ref || ci->i_wrbuffer_ref) 823 823 used |= CEPH_CAP_FILE_BUFFER; 824 824 return used; 825 825 } ··· 1331 1331 } 1332 1332 1333 1333 /* 1334 - * Mark caps dirty. If inode is newly dirty, add to the global dirty 1335 - * list. 1334 + * Mark caps dirty. If inode is newly dirty, return the dirty flags. 1335 + * Caller is then responsible for calling __mark_inode_dirty with the 1336 + * returned flags value. 1336 1337 */ 1337 - void __ceph_mark_dirty_caps(struct ceph_inode_info *ci, int mask) 1338 + int __ceph_mark_dirty_caps(struct ceph_inode_info *ci, int mask) 1338 1339 { 1339 1340 struct ceph_mds_client *mdsc = 1340 1341 ceph_sb_to_client(ci->vfs_inode.i_sb)->mdsc; ··· 1358 1357 list_add(&ci->i_dirty_item, &mdsc->cap_dirty); 1359 1358 spin_unlock(&mdsc->cap_dirty_lock); 1360 1359 if (ci->i_flushing_caps == 0) { 1361 - igrab(inode); 1360 + ihold(inode); 1362 1361 dirty |= I_DIRTY_SYNC; 1363 1362 } 1364 1363 } ··· 1366 1365 if (((was | ci->i_flushing_caps) & CEPH_CAP_FILE_BUFFER) && 1367 1366 (mask & CEPH_CAP_FILE_BUFFER)) 1368 1367 dirty |= I_DIRTY_DATASYNC; 1369 - if (dirty) 1370 - __mark_inode_dirty(inode, dirty); 1371 1368 __cap_delay_requeue(mdsc, ci); 1369 + return dirty; 1372 1370 } 1373 1371 1374 1372 /* ··· 1990 1990 if (got & CEPH_CAP_FILE_WR) 1991 1991 ci->i_wr_ref++; 1992 1992 if (got & CEPH_CAP_FILE_BUFFER) { 1993 - if (ci->i_wrbuffer_ref == 0) 1994 - igrab(&ci->vfs_inode); 1995 - ci->i_wrbuffer_ref++; 1996 - dout("__take_cap_refs %p wrbuffer %d -> %d (?)\n", 1997 - &ci->vfs_inode, ci->i_wrbuffer_ref-1, ci->i_wrbuffer_ref); 1993 + if (ci->i_wb_ref == 0) 1994 + ihold(&ci->vfs_inode); 1995 + ci->i_wb_ref++; 1996 + dout("__take_cap_refs %p wb %d -> %d (?)\n", 1997 + &ci->vfs_inode, ci->i_wb_ref-1, ci->i_wb_ref); 1998 1998 } 1999 1999 } 2000 2000 ··· 2169 2169 if (--ci->i_rdcache_ref == 0) 2170 2170 last++; 2171 2171 if (had & CEPH_CAP_FILE_BUFFER) { 2172 - if (--ci->i_wrbuffer_ref == 0) { 2172 + if (--ci->i_wb_ref == 0) { 2173 2173 last++; 2174 2174 put++; 2175 2175 } 2176 - dout("put_cap_refs %p wrbuffer %d -> %d (?)\n", 2177 - inode, ci->i_wrbuffer_ref+1, ci->i_wrbuffer_ref); 2176 + dout("put_cap_refs %p wb %d -> %d (?)\n", 2177 + inode, ci->i_wb_ref+1, ci->i_wb_ref); 2178 2178 } 2179 2179 if (had & CEPH_CAP_FILE_WR) 2180 2180 if (--ci->i_wr_ref == 0) {
+4 -1
fs/ceph/file.c
··· 734 734 } 735 735 } 736 736 if (ret >= 0) { 737 + int dirty; 737 738 spin_lock(&inode->i_lock); 738 - __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR); 739 + dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR); 739 740 spin_unlock(&inode->i_lock); 741 + if (dirty) 742 + __mark_inode_dirty(inode, dirty); 740 743 } 741 744 742 745 out:
+6 -1
fs/ceph/inode.c
··· 355 355 ci->i_rd_ref = 0; 356 356 ci->i_rdcache_ref = 0; 357 357 ci->i_wr_ref = 0; 358 + ci->i_wb_ref = 0; 358 359 ci->i_wrbuffer_ref = 0; 359 360 ci->i_wrbuffer_ref_head = 0; 360 361 ci->i_shared_gen = 0; ··· 1568 1567 int release = 0, dirtied = 0; 1569 1568 int mask = 0; 1570 1569 int err = 0; 1570 + int inode_dirty_flags = 0; 1571 1571 1572 1572 if (ceph_snap(inode) != CEPH_NOSNAP) 1573 1573 return -EROFS; ··· 1727 1725 dout("setattr %p ATTR_FILE ... hrm!\n", inode); 1728 1726 1729 1727 if (dirtied) { 1730 - __ceph_mark_dirty_caps(ci, dirtied); 1728 + inode_dirty_flags = __ceph_mark_dirty_caps(ci, dirtied); 1731 1729 inode->i_ctime = CURRENT_TIME; 1732 1730 } 1733 1731 1734 1732 release &= issued; 1735 1733 spin_unlock(&inode->i_lock); 1734 + 1735 + if (inode_dirty_flags) 1736 + __mark_inode_dirty(inode, inode_dirty_flags); 1736 1737 1737 1738 if (mask) { 1738 1739 req->r_inode = igrab(inode);
+1 -1
fs/ceph/mds_client.c
··· 3304 3304 { 3305 3305 struct ceph_mds_session *s = con->private; 3306 3306 3307 + dout("mdsc con_put %p (%d)\n", s, atomic_read(&s->s_ref) - 1); 3307 3308 ceph_put_mds_session(s); 3308 - dout("mdsc con_put %p (%d)\n", s, atomic_read(&s->s_ref)); 3309 3309 } 3310 3310 3311 3311 /*
+1 -1
fs/ceph/snap.c
··· 206 206 up_write(&mdsc->snap_rwsem); 207 207 } else { 208 208 spin_lock(&mdsc->snap_empty_lock); 209 - list_add(&mdsc->snap_empty, &realm->empty_item); 209 + list_add(&realm->empty_item, &mdsc->snap_empty); 210 210 spin_unlock(&mdsc->snap_empty_lock); 211 211 } 212 212 }
+2 -2
fs/ceph/super.h
··· 293 293 294 294 /* held references to caps */ 295 295 int i_pin_ref; 296 - int i_rd_ref, i_rdcache_ref, i_wr_ref; 296 + int i_rd_ref, i_rdcache_ref, i_wr_ref, i_wb_ref; 297 297 int i_wrbuffer_ref, i_wrbuffer_ref_head; 298 298 u32 i_shared_gen; /* increment each time we get FILE_SHARED */ 299 299 u32 i_rdcache_gen; /* incremented each time we get FILE_CACHE. */ ··· 506 506 { 507 507 return ci->i_dirty_caps | ci->i_flushing_caps; 508 508 } 509 - extern void __ceph_mark_dirty_caps(struct ceph_inode_info *ci, int mask); 509 + extern int __ceph_mark_dirty_caps(struct ceph_inode_info *ci, int mask); 510 510 511 511 extern int ceph_caps_revoking(struct ceph_inode_info *ci, int mask); 512 512 extern int __ceph_caps_used(struct ceph_inode_info *ci);
+8 -4
fs/ceph/xattr.c
··· 703 703 struct ceph_inode_xattr *xattr = NULL; 704 704 int issued; 705 705 int required_blob_size; 706 + int dirty; 706 707 707 708 if (ceph_snap(inode) != CEPH_NOSNAP) 708 709 return -EROFS; ··· 764 763 dout("setxattr %p issued %s\n", inode, ceph_cap_string(issued)); 765 764 err = __set_xattr(ci, newname, name_len, newval, 766 765 val_len, 1, 1, 1, &xattr); 767 - __ceph_mark_dirty_caps(ci, CEPH_CAP_XATTR_EXCL); 766 + dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_XATTR_EXCL); 768 767 ci->i_xattrs.dirty = true; 769 768 inode->i_ctime = CURRENT_TIME; 770 769 spin_unlock(&inode->i_lock); 771 - 770 + if (dirty) 771 + __mark_inode_dirty(inode, dirty); 772 772 return err; 773 773 774 774 do_sync: ··· 812 810 struct ceph_vxattr_cb *vxattrs = ceph_inode_vxattrs(inode); 813 811 int issued; 814 812 int err; 813 + int dirty; 815 814 816 815 if (ceph_snap(inode) != CEPH_NOSNAP) 817 816 return -EROFS; ··· 836 833 goto do_sync; 837 834 838 835 err = __remove_xattr_by_name(ceph_inode(inode), name); 839 - __ceph_mark_dirty_caps(ci, CEPH_CAP_XATTR_EXCL); 836 + dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_XATTR_EXCL); 840 837 ci->i_xattrs.dirty = true; 841 838 inode->i_ctime = CURRENT_TIME; 842 839 843 840 spin_unlock(&inode->i_lock); 844 - 841 + if (dirty) 842 + __mark_inode_dirty(inode, dirty); 845 843 return err; 846 844 do_sync: 847 845 spin_unlock(&inode->i_lock);
+6 -8
fs/cifs/cifs_unicode.c
··· 277 277 278 278 for (i = 0, j = 0; i < srclen; j++) { 279 279 src_char = source[i]; 280 + charlen = 1; 280 281 switch (src_char) { 281 282 case 0: 282 283 put_unaligned(0, &target[j]); ··· 317 316 dst_char = cpu_to_le16(0x003f); 318 317 charlen = 1; 319 318 } 320 - /* 321 - * character may take more than one byte in the source 322 - * string, but will take exactly two bytes in the 323 - * target string 324 - */ 325 - i += charlen; 326 - continue; 327 319 } 320 + /* 321 + * character may take more than one byte in the source string, 322 + * but will take exactly two bytes in the target string 323 + */ 324 + i += charlen; 328 325 put_unaligned(dst_char, &target[j]); 329 - i++; /* move to next char in source string */ 330 326 } 331 327 332 328 ctoUCS_out:
+76 -56
fs/cifs/connect.c
··· 274 274 char *data_area_of_target; 275 275 char *data_area_of_buf2; 276 276 int remaining; 277 - __u16 byte_count, total_data_size, total_in_buf, total_in_buf2; 277 + unsigned int byte_count, total_in_buf; 278 + __u16 total_data_size, total_in_buf2; 278 279 279 280 total_data_size = get_unaligned_le16(&pSMBt->t2_rsp.TotalDataCount); 280 281 ··· 288 287 remaining = total_data_size - total_in_buf; 289 288 290 289 if (remaining < 0) 291 - return -EINVAL; 290 + return -EPROTO; 292 291 293 292 if (remaining == 0) /* nothing to do, ignore */ 294 293 return 0; ··· 309 308 data_area_of_target += total_in_buf; 310 309 311 310 /* copy second buffer into end of first buffer */ 312 - memcpy(data_area_of_target, data_area_of_buf2, total_in_buf2); 313 311 total_in_buf += total_in_buf2; 312 + /* is the result too big for the field? */ 313 + if (total_in_buf > USHRT_MAX) 314 + return -EPROTO; 314 315 put_unaligned_le16(total_in_buf, &pSMBt->t2_rsp.DataCount); 316 + 317 + /* fix up the BCC */ 315 318 byte_count = get_bcc_le(pTargetSMB); 316 319 byte_count += total_in_buf2; 320 + /* is the result too big for the field? */ 321 + if (byte_count > USHRT_MAX) 322 + return -EPROTO; 317 323 put_bcc_le(byte_count, pTargetSMB); 318 324 319 325 byte_count = pTargetSMB->smb_buf_length; 320 326 byte_count += total_in_buf2; 321 - 322 - /* BB also add check that we are not beyond maximum buffer size */ 323 - 327 + /* don't allow buffer to overflow */ 328 + if (byte_count > CIFSMaxBufSize) 329 + return -ENOBUFS; 324 330 pTargetSMB->smb_buf_length = byte_count; 331 + 332 + memcpy(data_area_of_target, data_area_of_buf2, total_in_buf2); 325 333 326 334 if (remaining == total_in_buf2) { 327 335 cFYI(1, "found the last secondary response"); ··· 617 607 list_for_each_safe(tmp, tmp2, &server->pending_mid_q) { 618 608 mid_entry = list_entry(tmp, struct mid_q_entry, qhead); 619 609 620 - if ((mid_entry->mid == smb_buffer->Mid) && 621 - (mid_entry->midState == MID_REQUEST_SUBMITTED) && 622 - (mid_entry->command == smb_buffer->Command)) { 623 - if (length == 0 && 624 - check2ndT2(smb_buffer, server->maxBuf) > 0) { 625 - /* We have a multipart transact2 resp */ 626 - isMultiRsp = true; 627 - if (mid_entry->resp_buf) { 628 - /* merge response - fix up 1st*/ 629 - if (coalesce_t2(smb_buffer, 630 - mid_entry->resp_buf)) { 631 - mid_entry->multiRsp = 632 - true; 633 - break; 634 - } else { 635 - /* all parts received */ 636 - mid_entry->multiEnd = 637 - true; 638 - goto multi_t2_fnd; 639 - } 610 + if (mid_entry->mid != smb_buffer->Mid || 611 + mid_entry->midState != MID_REQUEST_SUBMITTED || 612 + mid_entry->command != smb_buffer->Command) { 613 + mid_entry = NULL; 614 + continue; 615 + } 616 + 617 + if (length == 0 && 618 + check2ndT2(smb_buffer, server->maxBuf) > 0) { 619 + /* We have a multipart transact2 resp */ 620 + isMultiRsp = true; 621 + if (mid_entry->resp_buf) { 622 + /* merge response - fix up 1st*/ 623 + length = coalesce_t2(smb_buffer, 624 + mid_entry->resp_buf); 625 + if (length > 0) { 626 + length = 0; 627 + mid_entry->multiRsp = true; 628 + break; 640 629 } else { 641 - if (!isLargeBuf) { 642 - cERROR(1, "1st trans2 resp needs bigbuf"); 643 - /* BB maybe we can fix this up, switch 644 - to already allocated large buffer? */ 645 - } else { 646 - /* Have first buffer */ 647 - mid_entry->resp_buf = 648 - smb_buffer; 649 - mid_entry->largeBuf = 650 - true; 651 - bigbuf = NULL; 652 - } 630 + /* all parts received or 631 + * packet is malformed 632 + */ 633 + mid_entry->multiEnd = true; 634 + goto multi_t2_fnd; 653 635 } 654 - break; 636 + } else { 637 + if (!isLargeBuf) { 638 + /* 639 + * FIXME: switch to already 640 + * allocated largebuf? 641 + */ 642 + cERROR(1, "1st trans2 resp " 643 + "needs bigbuf"); 644 + } else { 645 + /* Have first buffer */ 646 + mid_entry->resp_buf = 647 + smb_buffer; 648 + mid_entry->largeBuf = true; 649 + bigbuf = NULL; 650 + } 655 651 } 656 - mid_entry->resp_buf = smb_buffer; 657 - mid_entry->largeBuf = isLargeBuf; 658 - multi_t2_fnd: 659 - if (length == 0) 660 - mid_entry->midState = 661 - MID_RESPONSE_RECEIVED; 662 - else 663 - mid_entry->midState = 664 - MID_RESPONSE_MALFORMED; 665 - #ifdef CONFIG_CIFS_STATS2 666 - mid_entry->when_received = jiffies; 667 - #endif 668 - list_del_init(&mid_entry->qhead); 669 - mid_entry->callback(mid_entry); 670 652 break; 671 653 } 672 - mid_entry = NULL; 654 + mid_entry->resp_buf = smb_buffer; 655 + mid_entry->largeBuf = isLargeBuf; 656 + multi_t2_fnd: 657 + if (length == 0) 658 + mid_entry->midState = MID_RESPONSE_RECEIVED; 659 + else 660 + mid_entry->midState = MID_RESPONSE_MALFORMED; 661 + #ifdef CONFIG_CIFS_STATS2 662 + mid_entry->when_received = jiffies; 663 + #endif 664 + list_del_init(&mid_entry->qhead); 665 + mid_entry->callback(mid_entry); 666 + break; 673 667 } 674 668 spin_unlock(&GlobalMid_Lock); 675 669 ··· 821 807 cifs_parse_mount_options(char *options, const char *devname, 822 808 struct smb_vol *vol) 823 809 { 824 - char *value; 825 - char *data; 810 + char *value, *data, *end; 826 811 unsigned int temp_len, i, j; 827 812 char separator[2]; 828 813 short int override_uid = -1; ··· 864 851 if (!options) 865 852 return 1; 866 853 854 + end = options + strlen(options); 867 855 if (strncmp(options, "sep=", 4) == 0) { 868 856 if (options[4] != 0) { 869 857 separator[0] = options[4]; ··· 930 916 the only illegal character in a password is null */ 931 917 932 918 if ((value[temp_len] == 0) && 919 + (value + temp_len < end) && 933 920 (value[temp_len+1] == separator[0])) { 934 921 /* reinsert comma */ 935 922 value[temp_len] = separator[0]; ··· 2673 2658 0 /* not legacy */, cifs_sb->local_nls, 2674 2659 cifs_sb->mnt_cifs_flags & 2675 2660 CIFS_MOUNT_MAP_SPECIAL_CHR); 2661 + 2662 + if (rc == -EOPNOTSUPP || rc == -EINVAL) 2663 + rc = SMBQueryInformation(xid, tcon, full_path, pfile_info, 2664 + cifs_sb->local_nls, cifs_sb->mnt_cifs_flags & 2665 + CIFS_MOUNT_MAP_SPECIAL_CHR); 2676 2666 kfree(pfile_info); 2677 2667 return rc; 2678 2668 }
+4 -15
fs/cifs/sess.c
··· 276 276 } 277 277 278 278 static void 279 - decode_unicode_ssetup(char **pbcc_area, __u16 bleft, struct cifsSesInfo *ses, 279 + decode_unicode_ssetup(char **pbcc_area, int bleft, struct cifsSesInfo *ses, 280 280 const struct nls_table *nls_cp) 281 281 { 282 282 int len; 283 283 char *data = *pbcc_area; 284 284 285 285 cFYI(1, "bleft %d", bleft); 286 - 287 - /* 288 - * Windows servers do not always double null terminate their final 289 - * Unicode string. Check to see if there are an uneven number of bytes 290 - * left. If so, then add an extra NULL pad byte to the end of the 291 - * response. 292 - * 293 - * See section 2.7.2 in "Implementing CIFS" for details 294 - */ 295 - if (bleft % 2) { 296 - data[bleft] = 0; 297 - ++bleft; 298 - } 299 286 300 287 kfree(ses->serverOS); 301 288 ses->serverOS = cifs_strndup_from_ucs(data, bleft, true, nls_cp); ··· 916 929 } 917 930 918 931 /* BB check if Unicode and decode strings */ 919 - if (smb_buf->Flags2 & SMBFLG2_UNICODE) { 932 + if (bytes_remaining == 0) { 933 + /* no string area to decode, do nothing */ 934 + } else if (smb_buf->Flags2 & SMBFLG2_UNICODE) { 920 935 /* unicode string area must be word-aligned */ 921 936 if (((unsigned long) bcc_ptr - (unsigned long) smb_buf) % 2) { 922 937 ++bcc_ptr;
+32 -7
fs/configfs/dir.c
··· 53 53 static void configfs_d_iput(struct dentry * dentry, 54 54 struct inode * inode) 55 55 { 56 - struct configfs_dirent * sd = dentry->d_fsdata; 56 + struct configfs_dirent *sd = dentry->d_fsdata; 57 57 58 58 if (sd) { 59 59 BUG_ON(sd->s_dentry != dentry); 60 + /* Coordinate with configfs_readdir */ 61 + spin_lock(&configfs_dirent_lock); 60 62 sd->s_dentry = NULL; 63 + spin_unlock(&configfs_dirent_lock); 61 64 configfs_put(sd); 62 65 } 63 66 iput(inode); ··· 692 689 sd = child->d_fsdata; 693 690 sd->s_type |= CONFIGFS_USET_DEFAULT; 694 691 } else { 695 - d_delete(child); 692 + BUG_ON(child->d_inode); 693 + d_drop(child); 696 694 dput(child); 697 695 } 698 696 } ··· 1549 1545 struct configfs_dirent * parent_sd = dentry->d_fsdata; 1550 1546 struct configfs_dirent *cursor = filp->private_data; 1551 1547 struct list_head *p, *q = &cursor->s_sibling; 1552 - ino_t ino; 1548 + ino_t ino = 0; 1553 1549 int i = filp->f_pos; 1554 1550 1555 1551 switch (i) { ··· 1577 1573 struct configfs_dirent *next; 1578 1574 const char * name; 1579 1575 int len; 1576 + struct inode *inode = NULL; 1580 1577 1581 1578 next = list_entry(p, struct configfs_dirent, 1582 1579 s_sibling); ··· 1586 1581 1587 1582 name = configfs_get_name(next); 1588 1583 len = strlen(name); 1589 - if (next->s_dentry) 1590 - ino = next->s_dentry->d_inode->i_ino; 1591 - else 1584 + 1585 + /* 1586 + * We'll have a dentry and an inode for 1587 + * PINNED items and for open attribute 1588 + * files. We lock here to prevent a race 1589 + * with configfs_d_iput() clearing 1590 + * s_dentry before calling iput(). 1591 + * 1592 + * Why do we go to the trouble? If 1593 + * someone has an attribute file open, 1594 + * the inode number should match until 1595 + * they close it. Beyond that, we don't 1596 + * care. 1597 + */ 1598 + spin_lock(&configfs_dirent_lock); 1599 + dentry = next->s_dentry; 1600 + if (dentry) 1601 + inode = dentry->d_inode; 1602 + if (inode) 1603 + ino = inode->i_ino; 1604 + spin_unlock(&configfs_dirent_lock); 1605 + if (!inode) 1592 1606 ino = iunique(configfs_sb, 2); 1593 1607 1594 1608 if (filldir(dirent, name, len, filp->f_pos, ino, ··· 1707 1683 err = configfs_attach_group(sd->s_element, &group->cg_item, 1708 1684 dentry); 1709 1685 if (err) { 1710 - d_delete(dentry); 1686 + BUG_ON(dentry->d_inode); 1687 + d_drop(dentry); 1711 1688 dput(dentry); 1712 1689 } else { 1713 1690 spin_lock(&configfs_dirent_lock);
+32 -55
fs/dcache.c
··· 99 99 static unsigned int d_hash_mask __read_mostly; 100 100 static unsigned int d_hash_shift __read_mostly; 101 101 102 - struct dcache_hash_bucket { 103 - struct hlist_bl_head head; 104 - }; 105 - static struct dcache_hash_bucket *dentry_hashtable __read_mostly; 102 + static struct hlist_bl_head *dentry_hashtable __read_mostly; 106 103 107 - static inline struct dcache_hash_bucket *d_hash(struct dentry *parent, 104 + static inline struct hlist_bl_head *d_hash(struct dentry *parent, 108 105 unsigned long hash) 109 106 { 110 107 hash += ((unsigned long) parent ^ GOLDEN_RATIO_PRIME) / L1_CACHE_BYTES; 111 108 hash = hash ^ ((hash ^ GOLDEN_RATIO_PRIME) >> D_HASHBITS); 112 109 return dentry_hashtable + (hash & D_HASHMASK); 113 - } 114 - 115 - static inline void spin_lock_bucket(struct dcache_hash_bucket *b) 116 - { 117 - bit_spin_lock(0, (unsigned long *)&b->head.first); 118 - } 119 - 120 - static inline void spin_unlock_bucket(struct dcache_hash_bucket *b) 121 - { 122 - __bit_spin_unlock(0, (unsigned long *)&b->head.first); 123 110 } 124 111 125 112 /* Statistics gathering. */ ··· 154 167 if (dentry->d_op && dentry->d_op->d_release) 155 168 dentry->d_op->d_release(dentry); 156 169 157 - /* if dentry was never inserted into hash, immediate free is OK */ 158 - if (hlist_bl_unhashed(&dentry->d_hash)) 170 + /* if dentry was never visible to RCU, immediate free is OK */ 171 + if (!(dentry->d_flags & DCACHE_RCUACCESS)) 159 172 __d_free(&dentry->d_u.d_rcu); 160 173 else 161 174 call_rcu(&dentry->d_u.d_rcu, __d_free); ··· 317 330 */ 318 331 void __d_drop(struct dentry *dentry) 319 332 { 320 - if (!(dentry->d_flags & DCACHE_UNHASHED)) { 321 - if (unlikely(dentry->d_flags & DCACHE_DISCONNECTED)) { 322 - bit_spin_lock(0, 323 - (unsigned long *)&dentry->d_sb->s_anon.first); 324 - dentry->d_flags |= DCACHE_UNHASHED; 325 - hlist_bl_del_init(&dentry->d_hash); 326 - __bit_spin_unlock(0, 327 - (unsigned long *)&dentry->d_sb->s_anon.first); 328 - } else { 329 - struct dcache_hash_bucket *b; 333 + if (!d_unhashed(dentry)) { 334 + struct hlist_bl_head *b; 335 + if (unlikely(dentry->d_flags & DCACHE_DISCONNECTED)) 336 + b = &dentry->d_sb->s_anon; 337 + else 330 338 b = d_hash(dentry->d_parent, dentry->d_name.hash); 331 - spin_lock_bucket(b); 332 - /* 333 - * We may not actually need to put DCACHE_UNHASHED 334 - * manipulations under the hash lock, but follow 335 - * the principle of least surprise. 336 - */ 337 - dentry->d_flags |= DCACHE_UNHASHED; 338 - hlist_bl_del_rcu(&dentry->d_hash); 339 - spin_unlock_bucket(b); 340 - dentry_rcuwalk_barrier(dentry); 341 - } 339 + 340 + hlist_bl_lock(b); 341 + __hlist_bl_del(&dentry->d_hash); 342 + dentry->d_hash.pprev = NULL; 343 + hlist_bl_unlock(b); 344 + 345 + dentry_rcuwalk_barrier(dentry); 342 346 } 343 347 } 344 348 EXPORT_SYMBOL(__d_drop); ··· 1282 1304 dname[name->len] = 0; 1283 1305 1284 1306 dentry->d_count = 1; 1285 - dentry->d_flags = DCACHE_UNHASHED; 1307 + dentry->d_flags = 0; 1286 1308 spin_lock_init(&dentry->d_lock); 1287 1309 seqcount_init(&dentry->d_seq); 1288 1310 dentry->d_inode = NULL; ··· 1584 1606 tmp->d_inode = inode; 1585 1607 tmp->d_flags |= DCACHE_DISCONNECTED; 1586 1608 list_add(&tmp->d_alias, &inode->i_dentry); 1587 - bit_spin_lock(0, (unsigned long *)&tmp->d_sb->s_anon.first); 1588 - tmp->d_flags &= ~DCACHE_UNHASHED; 1609 + hlist_bl_lock(&tmp->d_sb->s_anon); 1589 1610 hlist_bl_add_head(&tmp->d_hash, &tmp->d_sb->s_anon); 1590 - __bit_spin_unlock(0, (unsigned long *)&tmp->d_sb->s_anon.first); 1611 + hlist_bl_unlock(&tmp->d_sb->s_anon); 1591 1612 spin_unlock(&tmp->d_lock); 1592 1613 spin_unlock(&inode->i_lock); 1593 1614 security_d_instantiate(tmp, inode); ··· 1766 1789 unsigned int len = name->len; 1767 1790 unsigned int hash = name->hash; 1768 1791 const unsigned char *str = name->name; 1769 - struct dcache_hash_bucket *b = d_hash(parent, hash); 1792 + struct hlist_bl_head *b = d_hash(parent, hash); 1770 1793 struct hlist_bl_node *node; 1771 1794 struct dentry *dentry; 1772 1795 ··· 1790 1813 * 1791 1814 * See Documentation/filesystems/path-lookup.txt for more details. 1792 1815 */ 1793 - hlist_bl_for_each_entry_rcu(dentry, node, &b->head, d_hash) { 1816 + hlist_bl_for_each_entry_rcu(dentry, node, b, d_hash) { 1794 1817 struct inode *i; 1795 1818 const char *tname; 1796 1819 int tlen; ··· 1885 1908 unsigned int len = name->len; 1886 1909 unsigned int hash = name->hash; 1887 1910 const unsigned char *str = name->name; 1888 - struct dcache_hash_bucket *b = d_hash(parent, hash); 1911 + struct hlist_bl_head *b = d_hash(parent, hash); 1889 1912 struct hlist_bl_node *node; 1890 1913 struct dentry *found = NULL; 1891 1914 struct dentry *dentry; ··· 1912 1935 */ 1913 1936 rcu_read_lock(); 1914 1937 1915 - hlist_bl_for_each_entry_rcu(dentry, node, &b->head, d_hash) { 1938 + hlist_bl_for_each_entry_rcu(dentry, node, b, d_hash) { 1916 1939 const char *tname; 1917 1940 int tlen; 1918 1941 ··· 2063 2086 } 2064 2087 EXPORT_SYMBOL(d_delete); 2065 2088 2066 - static void __d_rehash(struct dentry * entry, struct dcache_hash_bucket *b) 2089 + static void __d_rehash(struct dentry * entry, struct hlist_bl_head *b) 2067 2090 { 2068 2091 BUG_ON(!d_unhashed(entry)); 2069 - spin_lock_bucket(b); 2070 - entry->d_flags &= ~DCACHE_UNHASHED; 2071 - hlist_bl_add_head_rcu(&entry->d_hash, &b->head); 2072 - spin_unlock_bucket(b); 2092 + hlist_bl_lock(b); 2093 + entry->d_flags |= DCACHE_RCUACCESS; 2094 + hlist_bl_add_head_rcu(&entry->d_hash, b); 2095 + hlist_bl_unlock(b); 2073 2096 } 2074 2097 2075 2098 static void _d_rehash(struct dentry * entry) ··· 3002 3025 3003 3026 dentry_hashtable = 3004 3027 alloc_large_system_hash("Dentry cache", 3005 - sizeof(struct dcache_hash_bucket), 3028 + sizeof(struct hlist_bl_head), 3006 3029 dhash_entries, 3007 3030 13, 3008 3031 HASH_EARLY, ··· 3011 3034 0); 3012 3035 3013 3036 for (loop = 0; loop < (1 << d_hash_shift); loop++) 3014 - INIT_HLIST_BL_HEAD(&dentry_hashtable[loop].head); 3037 + INIT_HLIST_BL_HEAD(dentry_hashtable + loop); 3015 3038 } 3016 3039 3017 3040 static void __init dcache_init(void) ··· 3034 3057 3035 3058 dentry_hashtable = 3036 3059 alloc_large_system_hash("Dentry cache", 3037 - sizeof(struct dcache_hash_bucket), 3060 + sizeof(struct hlist_bl_head), 3038 3061 dhash_entries, 3039 3062 13, 3040 3063 0, ··· 3043 3066 0); 3044 3067 3045 3068 for (loop = 0; loop < (1 << d_hash_shift); loop++) 3046 - INIT_HLIST_BL_HEAD(&dentry_hashtable[loop].head); 3069 + INIT_HLIST_BL_HEAD(dentry_hashtable + loop); 3047 3070 } 3048 3071 3049 3072 /* SLAB cache for __getname() consumers */
+21
fs/ecryptfs/crypto.c
··· 1452 1452 crypt_stat->metadata_size = ECRYPTFS_MINIMUM_HEADER_EXTENT_SIZE; 1453 1453 } 1454 1454 1455 + void ecryptfs_i_size_init(const char *page_virt, struct inode *inode) 1456 + { 1457 + struct ecryptfs_mount_crypt_stat *mount_crypt_stat; 1458 + struct ecryptfs_crypt_stat *crypt_stat; 1459 + u64 file_size; 1460 + 1461 + crypt_stat = &ecryptfs_inode_to_private(inode)->crypt_stat; 1462 + mount_crypt_stat = 1463 + &ecryptfs_superblock_to_private(inode->i_sb)->mount_crypt_stat; 1464 + if (mount_crypt_stat->flags & ECRYPTFS_ENCRYPTED_VIEW_ENABLED) { 1465 + file_size = i_size_read(ecryptfs_inode_to_lower(inode)); 1466 + if (crypt_stat->flags & ECRYPTFS_METADATA_IN_XATTR) 1467 + file_size += crypt_stat->metadata_size; 1468 + } else 1469 + file_size = get_unaligned_be64(page_virt); 1470 + i_size_write(inode, (loff_t)file_size); 1471 + crypt_stat->flags |= ECRYPTFS_I_SIZE_INITIALIZED; 1472 + } 1473 + 1455 1474 /** 1456 1475 * ecryptfs_read_headers_virt 1457 1476 * @page_virt: The virtual address into which to read the headers ··· 1501 1482 rc = -EINVAL; 1502 1483 goto out; 1503 1484 } 1485 + if (!(crypt_stat->flags & ECRYPTFS_I_SIZE_INITIALIZED)) 1486 + ecryptfs_i_size_init(page_virt, ecryptfs_dentry->d_inode); 1504 1487 offset += MAGIC_ECRYPTFS_MARKER_SIZE_BYTES; 1505 1488 rc = ecryptfs_process_flags(crypt_stat, (page_virt + offset), 1506 1489 &bytes_read);
+6 -1
fs/ecryptfs/ecryptfs_kernel.h
··· 269 269 #define ECRYPTFS_ENCFN_USE_MOUNT_FNEK 0x00000800 270 270 #define ECRYPTFS_ENCFN_USE_FEK 0x00001000 271 271 #define ECRYPTFS_UNLINK_SIGS 0x00002000 272 + #define ECRYPTFS_I_SIZE_INITIALIZED 0x00004000 272 273 u32 flags; 273 274 unsigned int file_version; 274 275 size_t iv_bytes; ··· 296 295 struct ecryptfs_inode_info { 297 296 struct inode vfs_inode; 298 297 struct inode *wii_inode; 298 + struct mutex lower_file_mutex; 299 + atomic_t lower_file_count; 299 300 struct file *lower_file; 300 301 struct ecryptfs_crypt_stat crypt_stat; 301 302 }; ··· 629 626 int ecryptfs_interpose(struct dentry *hidden_dentry, 630 627 struct dentry *this_dentry, struct super_block *sb, 631 628 u32 flags); 629 + void ecryptfs_i_size_init(const char *page_virt, struct inode *inode); 632 630 int ecryptfs_lookup_and_interpose_lower(struct dentry *ecryptfs_dentry, 633 631 struct dentry *lower_dentry, 634 632 struct inode *ecryptfs_dir_inode); ··· 761 757 struct dentry *lower_dentry, 762 758 struct vfsmount *lower_mnt, 763 759 const struct cred *cred); 764 - int ecryptfs_init_persistent_file(struct dentry *ecryptfs_dentry); 760 + int ecryptfs_get_lower_file(struct dentry *ecryptfs_dentry); 761 + void ecryptfs_put_lower_file(struct inode *inode); 765 762 int 766 763 ecryptfs_write_tag_70_packet(char *dest, size_t *remaining_bytes, 767 764 size_t *packet_size,
+12 -13
fs/ecryptfs/file.c
··· 191 191 | ECRYPTFS_ENCRYPTED); 192 192 } 193 193 mutex_unlock(&crypt_stat->cs_mutex); 194 - rc = ecryptfs_init_persistent_file(ecryptfs_dentry); 194 + rc = ecryptfs_get_lower_file(ecryptfs_dentry); 195 195 if (rc) { 196 196 printk(KERN_ERR "%s: Error attempting to initialize " 197 - "the persistent file for the dentry with name " 197 + "the lower file for the dentry with name " 198 198 "[%s]; rc = [%d]\n", __func__, 199 199 ecryptfs_dentry->d_name.name, rc); 200 200 goto out_free; ··· 202 202 if ((ecryptfs_inode_to_private(inode)->lower_file->f_flags & O_ACCMODE) 203 203 == O_RDONLY && (file->f_flags & O_ACCMODE) != O_RDONLY) { 204 204 rc = -EPERM; 205 - printk(KERN_WARNING "%s: Lower persistent file is RO; eCryptfs " 205 + printk(KERN_WARNING "%s: Lower file is RO; eCryptfs " 206 206 "file must hence be opened RO\n", __func__); 207 - goto out_free; 207 + goto out_put; 208 208 } 209 209 ecryptfs_set_file_lower( 210 210 file, ecryptfs_inode_to_private(inode)->lower_file); ··· 232 232 "Plaintext passthrough mode is not " 233 233 "enabled; returning -EIO\n"); 234 234 mutex_unlock(&crypt_stat->cs_mutex); 235 - goto out_free; 235 + goto out_put; 236 236 } 237 237 rc = 0; 238 - crypt_stat->flags &= ~(ECRYPTFS_ENCRYPTED); 238 + crypt_stat->flags &= ~(ECRYPTFS_I_SIZE_INITIALIZED 239 + | ECRYPTFS_ENCRYPTED); 239 240 mutex_unlock(&crypt_stat->cs_mutex); 240 241 goto out; 241 242 } ··· 246 245 "[0x%.16lx] size: [0x%.16llx]\n", inode, inode->i_ino, 247 246 (unsigned long long)i_size_read(inode)); 248 247 goto out; 248 + out_put: 249 + ecryptfs_put_lower_file(inode); 249 250 out_free: 250 251 kmem_cache_free(ecryptfs_file_info_cache, 251 252 ecryptfs_file_to_private(file)); ··· 257 254 258 255 static int ecryptfs_flush(struct file *file, fl_owner_t td) 259 256 { 260 - int rc = 0; 261 - struct file *lower_file = NULL; 262 - 263 - lower_file = ecryptfs_file_to_lower(file); 264 - if (lower_file->f_op && lower_file->f_op->flush) 265 - rc = lower_file->f_op->flush(lower_file, td); 266 - return rc; 257 + return file->f_mode & FMODE_WRITE 258 + ? filemap_write_and_wait(file->f_mapping) : 0; 267 259 } 268 260 269 261 static int ecryptfs_release(struct inode *inode, struct file *file) 270 262 { 263 + ecryptfs_put_lower_file(inode); 271 264 kmem_cache_free(ecryptfs_file_info_cache, 272 265 ecryptfs_file_to_private(file)); 273 266 return 0;
+32 -28
fs/ecryptfs/inode.c
··· 168 168 "context; rc = [%d]\n", rc); 169 169 goto out; 170 170 } 171 - rc = ecryptfs_init_persistent_file(ecryptfs_dentry); 171 + rc = ecryptfs_get_lower_file(ecryptfs_dentry); 172 172 if (rc) { 173 173 printk(KERN_ERR "%s: Error attempting to initialize " 174 - "the persistent file for the dentry with name " 174 + "the lower file for the dentry with name " 175 175 "[%s]; rc = [%d]\n", __func__, 176 176 ecryptfs_dentry->d_name.name, rc); 177 177 goto out; 178 178 } 179 179 rc = ecryptfs_write_metadata(ecryptfs_dentry); 180 - if (rc) { 180 + if (rc) 181 181 printk(KERN_ERR "Error writing headers; rc = [%d]\n", rc); 182 - goto out; 183 - } 182 + ecryptfs_put_lower_file(ecryptfs_dentry->d_inode); 184 183 out: 185 184 return rc; 186 185 } ··· 225 226 struct dentry *lower_dir_dentry; 226 227 struct vfsmount *lower_mnt; 227 228 struct inode *lower_inode; 228 - struct ecryptfs_mount_crypt_stat *mount_crypt_stat; 229 229 struct ecryptfs_crypt_stat *crypt_stat; 230 230 char *page_virt = NULL; 231 - u64 file_size; 232 - int rc = 0; 231 + int put_lower = 0, rc = 0; 233 232 234 233 lower_dir_dentry = lower_dentry->d_parent; 235 234 lower_mnt = mntget(ecryptfs_dentry_to_lower_mnt( ··· 274 277 rc = -ENOMEM; 275 278 goto out; 276 279 } 277 - rc = ecryptfs_init_persistent_file(ecryptfs_dentry); 280 + rc = ecryptfs_get_lower_file(ecryptfs_dentry); 278 281 if (rc) { 279 282 printk(KERN_ERR "%s: Error attempting to initialize " 280 - "the persistent file for the dentry with name " 283 + "the lower file for the dentry with name " 281 284 "[%s]; rc = [%d]\n", __func__, 282 285 ecryptfs_dentry->d_name.name, rc); 283 286 goto out_free_kmem; 284 287 } 288 + put_lower = 1; 285 289 crypt_stat = &ecryptfs_inode_to_private( 286 290 ecryptfs_dentry->d_inode)->crypt_stat; 287 291 /* TODO: lock for crypt_stat comparison */ ··· 300 302 } 301 303 crypt_stat->flags |= ECRYPTFS_METADATA_IN_XATTR; 302 304 } 303 - mount_crypt_stat = &ecryptfs_superblock_to_private( 304 - ecryptfs_dentry->d_sb)->mount_crypt_stat; 305 - if (mount_crypt_stat->flags & ECRYPTFS_ENCRYPTED_VIEW_ENABLED) { 306 - if (crypt_stat->flags & ECRYPTFS_METADATA_IN_XATTR) 307 - file_size = (crypt_stat->metadata_size 308 - + i_size_read(lower_dentry->d_inode)); 309 - else 310 - file_size = i_size_read(lower_dentry->d_inode); 311 - } else { 312 - file_size = get_unaligned_be64(page_virt); 313 - } 314 - i_size_write(ecryptfs_dentry->d_inode, (loff_t)file_size); 305 + ecryptfs_i_size_init(page_virt, ecryptfs_dentry->d_inode); 315 306 out_free_kmem: 316 307 kmem_cache_free(ecryptfs_header_cache_2, page_virt); 317 308 goto out; ··· 309 322 mntput(lower_mnt); 310 323 d_drop(ecryptfs_dentry); 311 324 out: 325 + if (put_lower) 326 + ecryptfs_put_lower_file(ecryptfs_dentry->d_inode); 312 327 return rc; 313 328 } 314 329 ··· 527 538 dget(lower_dentry); 528 539 rc = vfs_rmdir(lower_dir_dentry->d_inode, lower_dentry); 529 540 dput(lower_dentry); 530 - if (!rc) 531 - d_delete(lower_dentry); 532 541 fsstack_copy_attr_times(dir, lower_dir_dentry->d_inode); 533 542 dir->i_nlink = lower_dir_dentry->d_inode->i_nlink; 534 543 unlock_dir(lower_dir_dentry); ··· 597 610 fsstack_copy_attr_all(old_dir, lower_old_dir_dentry->d_inode); 598 611 out_lock: 599 612 unlock_rename(lower_old_dir_dentry, lower_new_dir_dentry); 600 - dput(lower_new_dentry->d_parent); 601 - dput(lower_old_dentry->d_parent); 613 + dput(lower_new_dir_dentry); 614 + dput(lower_old_dir_dentry); 602 615 dput(lower_new_dentry); 603 616 dput(lower_old_dentry); 604 617 return rc; ··· 746 759 747 760 if (unlikely((ia->ia_size == i_size))) { 748 761 lower_ia->ia_valid &= ~ATTR_SIZE; 749 - goto out; 762 + return 0; 750 763 } 764 + rc = ecryptfs_get_lower_file(dentry); 765 + if (rc) 766 + return rc; 751 767 crypt_stat = &ecryptfs_inode_to_private(dentry->d_inode)->crypt_stat; 752 768 /* Switch on growing or shrinking file */ 753 769 if (ia->ia_size > i_size) { ··· 828 838 lower_ia->ia_valid &= ~ATTR_SIZE; 829 839 } 830 840 out: 841 + ecryptfs_put_lower_file(inode); 831 842 return rc; 832 843 } 833 844 ··· 904 913 905 914 mount_crypt_stat = &ecryptfs_superblock_to_private( 906 915 dentry->d_sb)->mount_crypt_stat; 916 + rc = ecryptfs_get_lower_file(dentry); 917 + if (rc) { 918 + mutex_unlock(&crypt_stat->cs_mutex); 919 + goto out; 920 + } 907 921 rc = ecryptfs_read_metadata(dentry); 922 + ecryptfs_put_lower_file(inode); 908 923 if (rc) { 909 924 if (!(mount_crypt_stat->flags 910 925 & ECRYPTFS_PLAINTEXT_PASSTHROUGH_ENABLED)) { ··· 924 927 goto out; 925 928 } 926 929 rc = 0; 927 - crypt_stat->flags &= ~(ECRYPTFS_ENCRYPTED); 930 + crypt_stat->flags &= ~(ECRYPTFS_I_SIZE_INITIALIZED 931 + | ECRYPTFS_ENCRYPTED); 928 932 } 929 933 } 930 934 mutex_unlock(&crypt_stat->cs_mutex); 935 + if (S_ISREG(inode->i_mode)) { 936 + rc = filemap_write_and_wait(inode->i_mapping); 937 + if (rc) 938 + goto out; 939 + fsstack_copy_attr_all(inode, lower_inode); 940 + } 931 941 memcpy(&lower_ia, ia, sizeof(lower_ia)); 932 942 if (ia->ia_valid & ATTR_FILE) 933 943 lower_ia.ia_file = ecryptfs_file_to_lower(ia->ia_file);
+3 -3
fs/ecryptfs/kthread.c
··· 44 44 * @ignored: ignored 45 45 * 46 46 * The eCryptfs kernel thread that has the responsibility of getting 47 - * the lower persistent file with RW permissions. 47 + * the lower file with RW permissions. 48 48 * 49 49 * Returns zero on success; non-zero otherwise 50 50 */ ··· 141 141 int rc = 0; 142 142 143 143 /* Corresponding dput() and mntput() are done when the 144 - * persistent file is fput() when the eCryptfs inode is 145 - * destroyed. */ 144 + * lower file is fput() when all eCryptfs files for the inode are 145 + * released. */ 146 146 dget(lower_dentry); 147 147 mntget(lower_mnt); 148 148 flags |= IS_RDONLY(lower_dentry->d_inode) ? O_RDONLY : O_RDWR;
+51 -23
fs/ecryptfs/main.c
··· 96 96 } 97 97 98 98 /** 99 - * ecryptfs_init_persistent_file 99 + * ecryptfs_init_lower_file 100 100 * @ecryptfs_dentry: Fully initialized eCryptfs dentry object, with 101 101 * the lower dentry and the lower mount set 102 102 * ··· 104 104 * inode. All I/O operations to the lower inode occur through that 105 105 * file. When the first eCryptfs dentry that interposes with the first 106 106 * lower dentry for that inode is created, this function creates the 107 - * persistent file struct and associates it with the eCryptfs 108 - * inode. When the eCryptfs inode is destroyed, the file is closed. 107 + * lower file struct and associates it with the eCryptfs 108 + * inode. When all eCryptfs files associated with the inode are released, the 109 + * file is closed. 109 110 * 110 - * The persistent file will be opened with read/write permissions, if 111 + * The lower file will be opened with read/write permissions, if 111 112 * possible. Otherwise, it is opened read-only. 112 113 * 113 - * This function does nothing if a lower persistent file is already 114 + * This function does nothing if a lower file is already 114 115 * associated with the eCryptfs inode. 115 116 * 116 117 * Returns zero on success; non-zero otherwise 117 118 */ 118 - int ecryptfs_init_persistent_file(struct dentry *ecryptfs_dentry) 119 + static int ecryptfs_init_lower_file(struct dentry *dentry, 120 + struct file **lower_file) 119 121 { 120 122 const struct cred *cred = current_cred(); 121 - struct ecryptfs_inode_info *inode_info = 122 - ecryptfs_inode_to_private(ecryptfs_dentry->d_inode); 123 - int rc = 0; 123 + struct dentry *lower_dentry = ecryptfs_dentry_to_lower(dentry); 124 + struct vfsmount *lower_mnt = ecryptfs_dentry_to_lower_mnt(dentry); 125 + int rc; 124 126 125 - if (!inode_info->lower_file) { 126 - struct dentry *lower_dentry; 127 - struct vfsmount *lower_mnt = 128 - ecryptfs_dentry_to_lower_mnt(ecryptfs_dentry); 129 - 130 - lower_dentry = ecryptfs_dentry_to_lower(ecryptfs_dentry); 131 - rc = ecryptfs_privileged_open(&inode_info->lower_file, 132 - lower_dentry, lower_mnt, cred); 133 - if (rc) { 134 - printk(KERN_ERR "Error opening lower persistent file " 135 - "for lower_dentry [0x%p] and lower_mnt [0x%p]; " 136 - "rc = [%d]\n", lower_dentry, lower_mnt, rc); 137 - inode_info->lower_file = NULL; 138 - } 127 + rc = ecryptfs_privileged_open(lower_file, lower_dentry, lower_mnt, 128 + cred); 129 + if (rc) { 130 + printk(KERN_ERR "Error opening lower file " 131 + "for lower_dentry [0x%p] and lower_mnt [0x%p]; " 132 + "rc = [%d]\n", lower_dentry, lower_mnt, rc); 133 + (*lower_file) = NULL; 139 134 } 140 135 return rc; 136 + } 137 + 138 + int ecryptfs_get_lower_file(struct dentry *dentry) 139 + { 140 + struct ecryptfs_inode_info *inode_info = 141 + ecryptfs_inode_to_private(dentry->d_inode); 142 + int count, rc = 0; 143 + 144 + mutex_lock(&inode_info->lower_file_mutex); 145 + count = atomic_inc_return(&inode_info->lower_file_count); 146 + if (WARN_ON_ONCE(count < 1)) 147 + rc = -EINVAL; 148 + else if (count == 1) { 149 + rc = ecryptfs_init_lower_file(dentry, 150 + &inode_info->lower_file); 151 + if (rc) 152 + atomic_set(&inode_info->lower_file_count, 0); 153 + } 154 + mutex_unlock(&inode_info->lower_file_mutex); 155 + return rc; 156 + } 157 + 158 + void ecryptfs_put_lower_file(struct inode *inode) 159 + { 160 + struct ecryptfs_inode_info *inode_info; 161 + 162 + inode_info = ecryptfs_inode_to_private(inode); 163 + if (atomic_dec_and_mutex_lock(&inode_info->lower_file_count, 164 + &inode_info->lower_file_mutex)) { 165 + fput(inode_info->lower_file); 166 + inode_info->lower_file = NULL; 167 + mutex_unlock(&inode_info->lower_file_mutex); 168 + } 141 169 } 142 170 143 171 static struct inode *ecryptfs_get_inode(struct inode *lower_inode,
+4 -12
fs/ecryptfs/super.c
··· 55 55 if (unlikely(!inode_info)) 56 56 goto out; 57 57 ecryptfs_init_crypt_stat(&inode_info->crypt_stat); 58 + mutex_init(&inode_info->lower_file_mutex); 59 + atomic_set(&inode_info->lower_file_count, 0); 58 60 inode_info->lower_file = NULL; 59 61 inode = &inode_info->vfs_inode; 60 62 out: ··· 79 77 * 80 78 * This is used during the final destruction of the inode. All 81 79 * allocation of memory related to the inode, including allocated 82 - * memory in the crypt_stat struct, will be released here. This 83 - * function also fput()'s the persistent file for the lower inode. 80 + * memory in the crypt_stat struct, will be released here. 84 81 * There should be no chance that this deallocation will be missed. 85 82 */ 86 83 static void ecryptfs_destroy_inode(struct inode *inode) ··· 87 86 struct ecryptfs_inode_info *inode_info; 88 87 89 88 inode_info = ecryptfs_inode_to_private(inode); 90 - if (inode_info->lower_file) { 91 - struct dentry *lower_dentry = 92 - inode_info->lower_file->f_dentry; 93 - 94 - BUG_ON(!lower_dentry); 95 - if (lower_dentry->d_inode) { 96 - fput(inode_info->lower_file); 97 - inode_info->lower_file = NULL; 98 - } 99 - } 89 + BUG_ON(inode_info->lower_file); 100 90 ecryptfs_destroy_crypt_stat(&inode_info->crypt_stat); 101 91 call_rcu(&inode->i_rcu, ecryptfs_i_callback); 102 92 }
+11 -7
fs/file.c
··· 9 9 #include <linux/module.h> 10 10 #include <linux/fs.h> 11 11 #include <linux/mm.h> 12 + #include <linux/mmzone.h> 12 13 #include <linux/time.h> 13 14 #include <linux/sched.h> 14 15 #include <linux/slab.h> ··· 40 39 */ 41 40 static DEFINE_PER_CPU(struct fdtable_defer, fdtable_defer_list); 42 41 43 - static inline void *alloc_fdmem(unsigned int size) 42 + static void *alloc_fdmem(unsigned int size) 44 43 { 45 - void *data; 46 - 47 - data = kmalloc(size, GFP_KERNEL|__GFP_NOWARN); 48 - if (data != NULL) 49 - return data; 50 - 44 + /* 45 + * Very large allocations can stress page reclaim, so fall back to 46 + * vmalloc() if the allocation size will be considered "large" by the VM. 47 + */ 48 + if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) { 49 + void *data = kmalloc(size, GFP_KERNEL|__GFP_NOWARN); 50 + if (data != NULL) 51 + return data; 52 + } 51 53 return vmalloc(size); 52 54 } 53 55
+1 -1
fs/fuse/dir.c
··· 174 174 if (!inode) 175 175 return 0; 176 176 177 - if (nd->flags & LOOKUP_RCU) 177 + if (nd && (nd->flags & LOOKUP_RCU)) 178 178 return -ECHILD; 179 179 180 180 fc = get_fuse_conn(inode);
+1 -1
fs/gfs2/aops.c
··· 884 884 } 885 885 886 886 brelse(dibh); 887 - gfs2_trans_end(sdp); 888 887 failed: 888 + gfs2_trans_end(sdp); 889 889 if (al) { 890 890 gfs2_inplace_release(ip); 891 891 gfs2_quota_unlock(ip);
+1 -1
fs/gfs2/dir.c
··· 1506 1506 inode = gfs2_inode_lookup(dir->i_sb, 1507 1507 be16_to_cpu(dent->de_type), 1508 1508 be64_to_cpu(dent->de_inum.no_addr), 1509 - be64_to_cpu(dent->de_inum.no_formal_ino)); 1509 + be64_to_cpu(dent->de_inum.no_formal_ino), 0); 1510 1510 brelse(bh); 1511 1511 return inode; 1512 1512 }
+48 -10
fs/gfs2/file.c
··· 617 617 return generic_file_aio_write(iocb, iov, nr_segs, pos); 618 618 } 619 619 620 - static void empty_write_end(struct page *page, unsigned from, 621 - unsigned to) 620 + static int empty_write_end(struct page *page, unsigned from, 621 + unsigned to, int mode) 622 622 { 623 - struct gfs2_inode *ip = GFS2_I(page->mapping->host); 623 + struct inode *inode = page->mapping->host; 624 + struct gfs2_inode *ip = GFS2_I(inode); 625 + struct buffer_head *bh; 626 + unsigned offset, blksize = 1 << inode->i_blkbits; 627 + pgoff_t end_index = i_size_read(inode) >> PAGE_CACHE_SHIFT; 624 628 625 629 zero_user(page, from, to-from); 626 630 mark_page_accessed(page); 627 631 628 - if (!gfs2_is_writeback(ip)) 629 - gfs2_page_add_databufs(ip, page, from, to); 632 + if (page->index < end_index || !(mode & FALLOC_FL_KEEP_SIZE)) { 633 + if (!gfs2_is_writeback(ip)) 634 + gfs2_page_add_databufs(ip, page, from, to); 630 635 631 - block_commit_write(page, from, to); 636 + block_commit_write(page, from, to); 637 + return 0; 638 + } 639 + 640 + offset = 0; 641 + bh = page_buffers(page); 642 + while (offset < to) { 643 + if (offset >= from) { 644 + set_buffer_uptodate(bh); 645 + mark_buffer_dirty(bh); 646 + clear_buffer_new(bh); 647 + write_dirty_buffer(bh, WRITE); 648 + } 649 + offset += blksize; 650 + bh = bh->b_this_page; 651 + } 652 + 653 + offset = 0; 654 + bh = page_buffers(page); 655 + while (offset < to) { 656 + if (offset >= from) { 657 + wait_on_buffer(bh); 658 + if (!buffer_uptodate(bh)) 659 + return -EIO; 660 + } 661 + offset += blksize; 662 + bh = bh->b_this_page; 663 + } 664 + return 0; 632 665 } 633 666 634 667 static int needs_empty_write(sector_t block, struct inode *inode) ··· 676 643 return !buffer_mapped(&bh_map); 677 644 } 678 645 679 - static int write_empty_blocks(struct page *page, unsigned from, unsigned to) 646 + static int write_empty_blocks(struct page *page, unsigned from, unsigned to, 647 + int mode) 680 648 { 681 649 struct inode *inode = page->mapping->host; 682 650 unsigned start, end, next, blksize; ··· 702 668 gfs2_block_map); 703 669 if (unlikely(ret)) 704 670 return ret; 705 - empty_write_end(page, start, end); 671 + ret = empty_write_end(page, start, end, mode); 672 + if (unlikely(ret)) 673 + return ret; 706 674 end = 0; 707 675 } 708 676 start = next; ··· 718 682 ret = __block_write_begin(page, start, end - start, gfs2_block_map); 719 683 if (unlikely(ret)) 720 684 return ret; 721 - empty_write_end(page, start, end); 685 + ret = empty_write_end(page, start, end, mode); 686 + if (unlikely(ret)) 687 + return ret; 722 688 } 723 689 724 690 return 0; ··· 769 731 770 732 if (curr == end) 771 733 to = end_offset; 772 - error = write_empty_blocks(page, from, to); 734 + error = write_empty_blocks(page, from, to, mode); 773 735 if (!error && offset + to > inode->i_size && 774 736 !(mode & FALLOC_FL_KEEP_SIZE)) { 775 737 i_size_write(inode, offset + to);
+2 -4
fs/gfs2/glock.c
··· 93 93 94 94 static inline void spin_lock_bucket(unsigned int hash) 95 95 { 96 - struct hlist_bl_head *bl = &gl_hash_table[hash]; 97 - bit_spin_lock(0, (unsigned long *)bl); 96 + hlist_bl_lock(&gl_hash_table[hash]); 98 97 } 99 98 100 99 static inline void spin_unlock_bucket(unsigned int hash) 101 100 { 102 - struct hlist_bl_head *bl = &gl_hash_table[hash]; 103 - __bit_spin_unlock(0, (unsigned long *)bl); 101 + hlist_bl_unlock(&gl_hash_table[hash]); 104 102 } 105 103 106 104 static void gfs2_glock_dealloc(struct rcu_head *rcu)
+4
fs/gfs2/glops.c
··· 385 385 static void iopen_go_callback(struct gfs2_glock *gl) 386 386 { 387 387 struct gfs2_inode *ip = (struct gfs2_inode *)gl->gl_object; 388 + struct gfs2_sbd *sdp = gl->gl_sbd; 389 + 390 + if (sdp->sd_vfs->s_flags & MS_RDONLY) 391 + return; 388 392 389 393 if (gl->gl_demote_state == LM_ST_UNLOCKED && 390 394 gl->gl_state == LM_ST_SHARED && ip) {
+41 -15
fs/gfs2/inode.c
··· 40 40 u64 ir_length; 41 41 }; 42 42 43 + struct gfs2_skip_data { 44 + u64 no_addr; 45 + int skipped; 46 + int non_block; 47 + }; 48 + 43 49 static int iget_test(struct inode *inode, void *opaque) 44 50 { 45 51 struct gfs2_inode *ip = GFS2_I(inode); 46 - u64 *no_addr = opaque; 52 + struct gfs2_skip_data *data = opaque; 47 53 48 - if (ip->i_no_addr == *no_addr) 54 + if (ip->i_no_addr == data->no_addr) { 55 + if (data->non_block && 56 + inode->i_state & (I_FREEING|I_CLEAR|I_WILL_FREE)) { 57 + data->skipped = 1; 58 + return 0; 59 + } 49 60 return 1; 50 - 61 + } 51 62 return 0; 52 63 } 53 64 54 65 static int iget_set(struct inode *inode, void *opaque) 55 66 { 56 67 struct gfs2_inode *ip = GFS2_I(inode); 57 - u64 *no_addr = opaque; 68 + struct gfs2_skip_data *data = opaque; 58 69 59 - inode->i_ino = (unsigned long)*no_addr; 60 - ip->i_no_addr = *no_addr; 70 + if (data->skipped) 71 + return -ENOENT; 72 + inode->i_ino = (unsigned long)(data->no_addr); 73 + ip->i_no_addr = data->no_addr; 61 74 return 0; 62 75 } 63 76 64 77 struct inode *gfs2_ilookup(struct super_block *sb, u64 no_addr) 65 78 { 66 79 unsigned long hash = (unsigned long)no_addr; 67 - return ilookup5(sb, hash, iget_test, &no_addr); 80 + struct gfs2_skip_data data; 81 + 82 + data.no_addr = no_addr; 83 + data.skipped = 0; 84 + data.non_block = 0; 85 + return ilookup5(sb, hash, iget_test, &data); 68 86 } 69 87 70 - static struct inode *gfs2_iget(struct super_block *sb, u64 no_addr) 88 + static struct inode *gfs2_iget(struct super_block *sb, u64 no_addr, 89 + int non_block) 71 90 { 91 + struct gfs2_skip_data data; 72 92 unsigned long hash = (unsigned long)no_addr; 73 - return iget5_locked(sb, hash, iget_test, iget_set, &no_addr); 93 + 94 + data.no_addr = no_addr; 95 + data.skipped = 0; 96 + data.non_block = non_block; 97 + return iget5_locked(sb, hash, iget_test, iget_set, &data); 74 98 } 75 99 76 100 /** ··· 135 111 * @sb: The super block 136 112 * @no_addr: The inode number 137 113 * @type: The type of the inode 114 + * non_block: Can we block on inodes that are being freed? 138 115 * 139 116 * Returns: A VFS inode, or an error 140 117 */ 141 118 142 119 struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned int type, 143 - u64 no_addr, u64 no_formal_ino) 120 + u64 no_addr, u64 no_formal_ino, int non_block) 144 121 { 145 122 struct inode *inode; 146 123 struct gfs2_inode *ip; 147 124 struct gfs2_glock *io_gl = NULL; 148 125 int error; 149 126 150 - inode = gfs2_iget(sb, no_addr); 127 + inode = gfs2_iget(sb, no_addr, non_block); 151 128 ip = GFS2_I(inode); 152 129 153 130 if (!inode) ··· 210 185 { 211 186 struct super_block *sb = sdp->sd_vfs; 212 187 struct gfs2_holder i_gh; 213 - struct inode *inode; 188 + struct inode *inode = NULL; 214 189 int error; 215 190 191 + /* Must not read in block until block type is verified */ 216 192 error = gfs2_glock_nq_num(sdp, no_addr, &gfs2_inode_glops, 217 - LM_ST_SHARED, LM_FLAG_ANY, &i_gh); 193 + LM_ST_EXCLUSIVE, GL_SKIP, &i_gh); 218 194 if (error) 219 195 return ERR_PTR(error); 220 196 ··· 223 197 if (error) 224 198 goto fail; 225 199 226 - inode = gfs2_inode_lookup(sb, DT_UNKNOWN, no_addr, 0); 200 + inode = gfs2_inode_lookup(sb, DT_UNKNOWN, no_addr, 0, 1); 227 201 if (IS_ERR(inode)) 228 202 goto fail; 229 203 ··· 869 843 goto fail_gunlock2; 870 844 871 845 inode = gfs2_inode_lookup(dir->i_sb, IF2DT(mode), inum.no_addr, 872 - inum.no_formal_ino); 846 + inum.no_formal_ino, 0); 873 847 if (IS_ERR(inode)) 874 848 goto fail_gunlock2; 875 849
+2 -1
fs/gfs2/inode.h
··· 97 97 } 98 98 99 99 extern struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned type, 100 - u64 no_addr, u64 no_formal_ino); 100 + u64 no_addr, u64 no_formal_ino, 101 + int non_block); 101 102 extern struct inode *gfs2_lookup_by_inum(struct gfs2_sbd *sdp, u64 no_addr, 102 103 u64 *no_formal_ino, 103 104 unsigned int blktype);
+1 -1
fs/gfs2/ops_fstype.c
··· 430 430 struct dentry *dentry; 431 431 struct inode *inode; 432 432 433 - inode = gfs2_inode_lookup(sb, DT_DIR, no_addr, 0); 433 + inode = gfs2_inode_lookup(sb, DT_DIR, no_addr, 0, 0); 434 434 if (IS_ERR(inode)) { 435 435 fs_err(sdp, "can't read in %s inode: %ld\n", name, PTR_ERR(inode)); 436 436 return PTR_ERR(inode);
+2 -2
fs/gfs2/rgrp.c
··· 945 945 /* rgblk_search can return a block < goal, so we need to 946 946 keep it marching forward. */ 947 947 no_addr = block + rgd->rd_data0; 948 - goal++; 948 + goal = max(block + 1, goal + 1); 949 949 if (*last_unlinked != NO_BLOCK && no_addr <= *last_unlinked) 950 950 continue; 951 951 if (no_addr == skip) ··· 971 971 found++; 972 972 973 973 /* Limit reclaim to sensible number of tasks */ 974 - if (found > 2*NR_CPUS) 974 + if (found > NR_CPUS) 975 975 return; 976 976 } 977 977
+11 -3
fs/gfs2/super.c
··· 1318 1318 1319 1319 static void gfs2_evict_inode(struct inode *inode) 1320 1320 { 1321 - struct gfs2_sbd *sdp = inode->i_sb->s_fs_info; 1321 + struct super_block *sb = inode->i_sb; 1322 + struct gfs2_sbd *sdp = sb->s_fs_info; 1322 1323 struct gfs2_inode *ip = GFS2_I(inode); 1323 1324 struct gfs2_holder gh; 1324 1325 int error; 1325 1326 1326 - if (inode->i_nlink) 1327 + if (inode->i_nlink || (sb->s_flags & MS_RDONLY)) 1327 1328 goto out; 1328 1329 1329 - error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh); 1330 + /* Must not read inode block until block type has been verified */ 1331 + error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, GL_SKIP, &gh); 1330 1332 if (unlikely(error)) { 1331 1333 gfs2_glock_dq_uninit(&ip->i_iopen_gh); 1332 1334 goto out; ··· 1337 1335 error = gfs2_check_blk_type(sdp, ip->i_no_addr, GFS2_BLKST_UNLINKED); 1338 1336 if (error) 1339 1337 goto out_truncate; 1338 + 1339 + if (test_bit(GIF_INVALID, &ip->i_flags)) { 1340 + error = gfs2_inode_refresh(ip); 1341 + if (error) 1342 + goto out_truncate; 1343 + } 1340 1344 1341 1345 ip->i_iopen_gh.gh_flags |= GL_NOCACHE; 1342 1346 gfs2_glock_dq_wait(&ip->i_iopen_gh);
-1
fs/hpfs/Kconfig
··· 1 1 config HPFS_FS 2 2 tristate "OS/2 HPFS file system support" 3 3 depends on BLOCK 4 - depends on BROKEN || !PREEMPT 5 4 help 6 5 OS/2 is IBM's operating system for PC's, the same as Warp, and HPFS 7 6 is the file system used for organizing files on OS/2 hard disk
+43 -75
fs/hpfs/alloc.c
··· 8 8 9 9 #include "hpfs_fn.h" 10 10 11 - static int hpfs_alloc_if_possible_nolock(struct super_block *s, secno sec); 12 - 13 11 /* 14 12 * Check if a sector is allocated in bitmap 15 13 * This is really slow. Turned on only if chk==2 ··· 16 18 static int chk_if_allocated(struct super_block *s, secno sec, char *msg) 17 19 { 18 20 struct quad_buffer_head qbh; 19 - unsigned *bmp; 21 + u32 *bmp; 20 22 if (!(bmp = hpfs_map_bitmap(s, sec >> 14, &qbh, "chk"))) goto fail; 21 - if ((bmp[(sec & 0x3fff) >> 5] >> (sec & 0x1f)) & 1) { 23 + if ((cpu_to_le32(bmp[(sec & 0x3fff) >> 5]) >> (sec & 0x1f)) & 1) { 22 24 hpfs_error(s, "sector '%s' - %08x not allocated in bitmap", msg, sec); 23 25 goto fail1; 24 26 } ··· 26 28 if (sec >= hpfs_sb(s)->sb_dirband_start && sec < hpfs_sb(s)->sb_dirband_start + hpfs_sb(s)->sb_dirband_size) { 27 29 unsigned ssec = (sec - hpfs_sb(s)->sb_dirband_start) / 4; 28 30 if (!(bmp = hpfs_map_dnode_bitmap(s, &qbh))) goto fail; 29 - if ((bmp[ssec >> 5] >> (ssec & 0x1f)) & 1) { 31 + if ((le32_to_cpu(bmp[ssec >> 5]) >> (ssec & 0x1f)) & 1) { 30 32 hpfs_error(s, "sector '%s' - %08x not allocated in directory bitmap", msg, sec); 31 33 goto fail1; 32 34 } ··· 73 75 hpfs_error(s, "Bad allocation size: %d", n); 74 76 return 0; 75 77 } 76 - lock_super(s); 77 78 if (bs != ~0x3fff) { 78 79 if (!(bmp = hpfs_map_bitmap(s, near >> 14, &qbh, "aib"))) goto uls; 79 80 } else { ··· 82 85 ret = bs + nr; 83 86 goto rt; 84 87 } 85 - /*if (!tstbits(bmp, nr + n, n + forward)) { 86 - ret = bs + nr + n; 87 - goto rt; 88 - }*/ 89 88 q = nr + n; b = 0; 90 89 while ((a = tstbits(bmp, q, n + forward)) != 0) { 91 90 q += a; ··· 98 105 goto rt; 99 106 } 100 107 nr >>= 5; 101 - /*for (i = nr + 1; i != nr; i++, i &= 0x1ff) {*/ 108 + /*for (i = nr + 1; i != nr; i++, i &= 0x1ff) */ 102 109 i = nr; 103 110 do { 104 - if (!bmp[i]) goto cont; 105 - if (n + forward >= 0x3f && bmp[i] != -1) goto cont; 111 + if (!le32_to_cpu(bmp[i])) goto cont; 112 + if (n + forward >= 0x3f && le32_to_cpu(bmp[i]) != 0xffffffff) goto cont; 106 113 q = i<<5; 107 114 if (i > 0) { 108 - unsigned k = bmp[i-1]; 115 + unsigned k = le32_to_cpu(bmp[i-1]); 109 116 while (k & 0x80000000) { 110 117 q--; k <<= 1; 111 118 } ··· 125 132 } while (i != nr); 126 133 rt: 127 134 if (ret) { 128 - if (hpfs_sb(s)->sb_chk && ((ret >> 14) != (bs >> 14) || (bmp[(ret & 0x3fff) >> 5] | ~(((1 << n) - 1) << (ret & 0x1f))) != 0xffffffff)) { 135 + if (hpfs_sb(s)->sb_chk && ((ret >> 14) != (bs >> 14) || (le32_to_cpu(bmp[(ret & 0x3fff) >> 5]) | ~(((1 << n) - 1) << (ret & 0x1f))) != 0xffffffff)) { 129 136 hpfs_error(s, "Allocation doesn't work! Wanted %d, allocated at %08x", n, ret); 130 137 ret = 0; 131 138 goto b; 132 139 } 133 - bmp[(ret & 0x3fff) >> 5] &= ~(((1 << n) - 1) << (ret & 0x1f)); 140 + bmp[(ret & 0x3fff) >> 5] &= cpu_to_le32(~(((1 << n) - 1) << (ret & 0x1f))); 134 141 hpfs_mark_4buffers_dirty(&qbh); 135 142 } 136 143 b: 137 144 hpfs_brelse4(&qbh); 138 145 uls: 139 - unlock_super(s); 140 146 return ret; 141 147 } 142 148 ··· 147 155 * sectors 148 156 */ 149 157 150 - secno hpfs_alloc_sector(struct super_block *s, secno near, unsigned n, int forward, int lock) 158 + secno hpfs_alloc_sector(struct super_block *s, secno near, unsigned n, int forward) 151 159 { 152 160 secno sec; 153 161 int i; ··· 159 167 forward = -forward; 160 168 f_p = 1; 161 169 } 162 - if (lock) hpfs_lock_creation(s); 163 170 n_bmps = (sbi->sb_fs_size + 0x4000 - 1) >> 14; 164 171 if (near && near < sbi->sb_fs_size) { 165 172 if ((sec = alloc_in_bmp(s, near, n, f_p ? forward : forward/4))) goto ret; ··· 205 214 ret: 206 215 if (sec && f_p) { 207 216 for (i = 0; i < forward; i++) { 208 - if (!hpfs_alloc_if_possible_nolock(s, sec + i + 1)) { 217 + if (!hpfs_alloc_if_possible(s, sec + i + 1)) { 209 218 hpfs_error(s, "Prealloc doesn't work! Wanted %d, allocated at %08x, can't allocate %d", forward, sec, i); 210 219 sec = 0; 211 220 break; 212 221 } 213 222 } 214 223 } 215 - if (lock) hpfs_unlock_creation(s); 216 224 return sec; 217 225 } 218 226 219 - static secno alloc_in_dirband(struct super_block *s, secno near, int lock) 227 + static secno alloc_in_dirband(struct super_block *s, secno near) 220 228 { 221 229 unsigned nr = near; 222 230 secno sec; ··· 226 236 nr = sbi->sb_dirband_start + sbi->sb_dirband_size - 4; 227 237 nr -= sbi->sb_dirband_start; 228 238 nr >>= 2; 229 - if (lock) hpfs_lock_creation(s); 230 239 sec = alloc_in_bmp(s, (~0x3fff) | nr, 1, 0); 231 - if (lock) hpfs_unlock_creation(s); 232 240 if (!sec) return 0; 233 241 return ((sec & 0x3fff) << 2) + sbi->sb_dirband_start; 234 242 } 235 243 236 244 /* Alloc sector if it's free */ 237 245 238 - static int hpfs_alloc_if_possible_nolock(struct super_block *s, secno sec) 246 + int hpfs_alloc_if_possible(struct super_block *s, secno sec) 239 247 { 240 248 struct quad_buffer_head qbh; 241 - unsigned *bmp; 242 - lock_super(s); 249 + u32 *bmp; 243 250 if (!(bmp = hpfs_map_bitmap(s, sec >> 14, &qbh, "aip"))) goto end; 244 - if (bmp[(sec & 0x3fff) >> 5] & (1 << (sec & 0x1f))) { 245 - bmp[(sec & 0x3fff) >> 5] &= ~(1 << (sec & 0x1f)); 251 + if (le32_to_cpu(bmp[(sec & 0x3fff) >> 5]) & (1 << (sec & 0x1f))) { 252 + bmp[(sec & 0x3fff) >> 5] &= cpu_to_le32(~(1 << (sec & 0x1f))); 246 253 hpfs_mark_4buffers_dirty(&qbh); 247 254 hpfs_brelse4(&qbh); 248 - unlock_super(s); 249 255 return 1; 250 256 } 251 257 hpfs_brelse4(&qbh); 252 258 end: 253 - unlock_super(s); 254 259 return 0; 255 - } 256 - 257 - int hpfs_alloc_if_possible(struct super_block *s, secno sec) 258 - { 259 - int r; 260 - hpfs_lock_creation(s); 261 - r = hpfs_alloc_if_possible_nolock(s, sec); 262 - hpfs_unlock_creation(s); 263 - return r; 264 260 } 265 261 266 262 /* Free sectors in bitmaps */ ··· 254 278 void hpfs_free_sectors(struct super_block *s, secno sec, unsigned n) 255 279 { 256 280 struct quad_buffer_head qbh; 257 - unsigned *bmp; 281 + u32 *bmp; 258 282 struct hpfs_sb_info *sbi = hpfs_sb(s); 259 283 /*printk("2 - ");*/ 260 284 if (!n) return; ··· 262 286 hpfs_error(s, "Trying to free reserved sector %08x", sec); 263 287 return; 264 288 } 265 - lock_super(s); 266 289 sbi->sb_max_fwd_alloc += n > 0xffff ? 0xffff : n; 267 290 if (sbi->sb_max_fwd_alloc > 0xffffff) sbi->sb_max_fwd_alloc = 0xffffff; 268 291 new_map: 269 292 if (!(bmp = hpfs_map_bitmap(s, sec >> 14, &qbh, "free"))) { 270 - unlock_super(s); 271 293 return; 272 294 } 273 295 new_tst: 274 - if ((bmp[(sec & 0x3fff) >> 5] >> (sec & 0x1f) & 1)) { 296 + if ((le32_to_cpu(bmp[(sec & 0x3fff) >> 5]) >> (sec & 0x1f) & 1)) { 275 297 hpfs_error(s, "sector %08x not allocated", sec); 276 298 hpfs_brelse4(&qbh); 277 - unlock_super(s); 278 299 return; 279 300 } 280 - bmp[(sec & 0x3fff) >> 5] |= 1 << (sec & 0x1f); 301 + bmp[(sec & 0x3fff) >> 5] |= cpu_to_le32(1 << (sec & 0x1f)); 281 302 if (!--n) { 282 303 hpfs_mark_4buffers_dirty(&qbh); 283 304 hpfs_brelse4(&qbh); 284 - unlock_super(s); 285 305 return; 286 306 } 287 307 if (!(++sec & 0x3fff)) { ··· 299 327 int n_bmps = (hpfs_sb(s)->sb_fs_size + 0x4000 - 1) >> 14; 300 328 int b = hpfs_sb(s)->sb_c_bitmap & 0x0fffffff; 301 329 int i, j; 302 - unsigned *bmp; 330 + u32 *bmp; 303 331 struct quad_buffer_head qbh; 304 332 if ((bmp = hpfs_map_dnode_bitmap(s, &qbh))) { 305 333 for (j = 0; j < 512; j++) { 306 334 unsigned k; 307 - if (!bmp[j]) continue; 308 - for (k = bmp[j]; k; k >>= 1) if (k & 1) if (!--n) { 335 + if (!le32_to_cpu(bmp[j])) continue; 336 + for (k = le32_to_cpu(bmp[j]); k; k >>= 1) if (k & 1) if (!--n) { 309 337 hpfs_brelse4(&qbh); 310 338 return 0; 311 339 } ··· 324 352 chk_bmp: 325 353 if (bmp) { 326 354 for (j = 0; j < 512; j++) { 327 - unsigned k; 328 - if (!bmp[j]) continue; 355 + u32 k; 356 + if (!le32_to_cpu(bmp[j])) continue; 329 357 for (k = 0xf; k; k <<= 4) 330 - if ((bmp[j] & k) == k) { 358 + if ((le32_to_cpu(bmp[j]) & k) == k) { 331 359 if (!--n) { 332 360 hpfs_brelse4(&qbh); 333 361 return 0; ··· 351 379 hpfs_free_sectors(s, dno, 4); 352 380 } else { 353 381 struct quad_buffer_head qbh; 354 - unsigned *bmp; 382 + u32 *bmp; 355 383 unsigned ssec = (dno - hpfs_sb(s)->sb_dirband_start) / 4; 356 - lock_super(s); 357 384 if (!(bmp = hpfs_map_dnode_bitmap(s, &qbh))) { 358 - unlock_super(s); 359 385 return; 360 386 } 361 - bmp[ssec >> 5] |= 1 << (ssec & 0x1f); 387 + bmp[ssec >> 5] |= cpu_to_le32(1 << (ssec & 0x1f)); 362 388 hpfs_mark_4buffers_dirty(&qbh); 363 389 hpfs_brelse4(&qbh); 364 - unlock_super(s); 365 390 } 366 391 } 367 392 368 393 struct dnode *hpfs_alloc_dnode(struct super_block *s, secno near, 369 - dnode_secno *dno, struct quad_buffer_head *qbh, 370 - int lock) 394 + dnode_secno *dno, struct quad_buffer_head *qbh) 371 395 { 372 396 struct dnode *d; 373 397 if (hpfs_count_one_bitmap(s, hpfs_sb(s)->sb_dmap) > FREE_DNODES_ADD) { 374 - if (!(*dno = alloc_in_dirband(s, near, lock))) 375 - if (!(*dno = hpfs_alloc_sector(s, near, 4, 0, lock))) return NULL; 398 + if (!(*dno = alloc_in_dirband(s, near))) 399 + if (!(*dno = hpfs_alloc_sector(s, near, 4, 0))) return NULL; 376 400 } else { 377 - if (!(*dno = hpfs_alloc_sector(s, near, 4, 0, lock))) 378 - if (!(*dno = alloc_in_dirband(s, near, lock))) return NULL; 401 + if (!(*dno = hpfs_alloc_sector(s, near, 4, 0))) 402 + if (!(*dno = alloc_in_dirband(s, near))) return NULL; 379 403 } 380 404 if (!(d = hpfs_get_4sectors(s, *dno, qbh))) { 381 405 hpfs_free_dnode(s, *dno); 382 406 return NULL; 383 407 } 384 408 memset(d, 0, 2048); 385 - d->magic = DNODE_MAGIC; 386 - d->first_free = 52; 409 + d->magic = cpu_to_le32(DNODE_MAGIC); 410 + d->first_free = cpu_to_le32(52); 387 411 d->dirent[0] = 32; 388 412 d->dirent[2] = 8; 389 413 d->dirent[30] = 1; 390 414 d->dirent[31] = 255; 391 - d->self = *dno; 415 + d->self = cpu_to_le32(*dno); 392 416 return d; 393 417 } 394 418 ··· 392 424 struct buffer_head **bh) 393 425 { 394 426 struct fnode *f; 395 - if (!(*fno = hpfs_alloc_sector(s, near, 1, FNODE_ALLOC_FWD, 1))) return NULL; 427 + if (!(*fno = hpfs_alloc_sector(s, near, 1, FNODE_ALLOC_FWD))) return NULL; 396 428 if (!(f = hpfs_get_sector(s, *fno, bh))) { 397 429 hpfs_free_sectors(s, *fno, 1); 398 430 return NULL; 399 431 } 400 432 memset(f, 0, 512); 401 - f->magic = FNODE_MAGIC; 402 - f->ea_offs = 0xc4; 433 + f->magic = cpu_to_le32(FNODE_MAGIC); 434 + f->ea_offs = cpu_to_le16(0xc4); 403 435 f->btree.n_free_nodes = 8; 404 - f->btree.first_free = 8; 436 + f->btree.first_free = cpu_to_le16(8); 405 437 return f; 406 438 } 407 439 ··· 409 441 struct buffer_head **bh) 410 442 { 411 443 struct anode *a; 412 - if (!(*ano = hpfs_alloc_sector(s, near, 1, ANODE_ALLOC_FWD, 1))) return NULL; 444 + if (!(*ano = hpfs_alloc_sector(s, near, 1, ANODE_ALLOC_FWD))) return NULL; 413 445 if (!(a = hpfs_get_sector(s, *ano, bh))) { 414 446 hpfs_free_sectors(s, *ano, 1); 415 447 return NULL; 416 448 } 417 449 memset(a, 0, 512); 418 - a->magic = ANODE_MAGIC; 419 - a->self = *ano; 450 + a->magic = cpu_to_le32(ANODE_MAGIC); 451 + a->self = cpu_to_le32(*ano); 420 452 a->btree.n_free_nodes = 40; 421 453 a->btree.n_used_nodes = 0; 422 - a->btree.first_free = 8; 454 + a->btree.first_free = cpu_to_le16(8); 423 455 return a; 424 456 }
+69 -69
fs/hpfs/anode.c
··· 22 22 if (hpfs_sb(s)->sb_chk) if (hpfs_stop_cycles(s, a, &c1, &c2, "hpfs_bplus_lookup")) return -1; 23 23 if (btree->internal) { 24 24 for (i = 0; i < btree->n_used_nodes; i++) 25 - if (btree->u.internal[i].file_secno > sec) { 26 - a = btree->u.internal[i].down; 25 + if (le32_to_cpu(btree->u.internal[i].file_secno) > sec) { 26 + a = le32_to_cpu(btree->u.internal[i].down); 27 27 brelse(bh); 28 28 if (!(anode = hpfs_map_anode(s, a, &bh))) return -1; 29 29 btree = &anode->btree; ··· 34 34 return -1; 35 35 } 36 36 for (i = 0; i < btree->n_used_nodes; i++) 37 - if (btree->u.external[i].file_secno <= sec && 38 - btree->u.external[i].file_secno + btree->u.external[i].length > sec) { 39 - a = btree->u.external[i].disk_secno + sec - btree->u.external[i].file_secno; 37 + if (le32_to_cpu(btree->u.external[i].file_secno) <= sec && 38 + le32_to_cpu(btree->u.external[i].file_secno) + le32_to_cpu(btree->u.external[i].length) > sec) { 39 + a = le32_to_cpu(btree->u.external[i].disk_secno) + sec - le32_to_cpu(btree->u.external[i].file_secno); 40 40 if (hpfs_sb(s)->sb_chk) if (hpfs_chk_sectors(s, a, 1, "data")) { 41 41 brelse(bh); 42 42 return -1; 43 43 } 44 44 if (inode) { 45 45 struct hpfs_inode_info *hpfs_inode = hpfs_i(inode); 46 - hpfs_inode->i_file_sec = btree->u.external[i].file_secno; 47 - hpfs_inode->i_disk_sec = btree->u.external[i].disk_secno; 48 - hpfs_inode->i_n_secs = btree->u.external[i].length; 46 + hpfs_inode->i_file_sec = le32_to_cpu(btree->u.external[i].file_secno); 47 + hpfs_inode->i_disk_sec = le32_to_cpu(btree->u.external[i].disk_secno); 48 + hpfs_inode->i_n_secs = le32_to_cpu(btree->u.external[i].length); 49 49 } 50 50 brelse(bh); 51 51 return a; ··· 83 83 return -1; 84 84 } 85 85 if (btree->internal) { 86 - a = btree->u.internal[n].down; 87 - btree->u.internal[n].file_secno = -1; 86 + a = le32_to_cpu(btree->u.internal[n].down); 87 + btree->u.internal[n].file_secno = cpu_to_le32(-1); 88 88 mark_buffer_dirty(bh); 89 89 brelse(bh); 90 90 if (hpfs_sb(s)->sb_chk) ··· 94 94 goto go_down; 95 95 } 96 96 if (n >= 0) { 97 - if (btree->u.external[n].file_secno + btree->u.external[n].length != fsecno) { 97 + if (le32_to_cpu(btree->u.external[n].file_secno) + le32_to_cpu(btree->u.external[n].length) != fsecno) { 98 98 hpfs_error(s, "allocated size %08x, trying to add sector %08x, %cnode %08x", 99 - btree->u.external[n].file_secno + btree->u.external[n].length, fsecno, 99 + le32_to_cpu(btree->u.external[n].file_secno) + le32_to_cpu(btree->u.external[n].length), fsecno, 100 100 fnod?'f':'a', node); 101 101 brelse(bh); 102 102 return -1; 103 103 } 104 - if (hpfs_alloc_if_possible(s, se = btree->u.external[n].disk_secno + btree->u.external[n].length)) { 105 - btree->u.external[n].length++; 104 + if (hpfs_alloc_if_possible(s, se = le32_to_cpu(btree->u.external[n].disk_secno) + le32_to_cpu(btree->u.external[n].length))) { 105 + btree->u.external[n].length = cpu_to_le32(le32_to_cpu(btree->u.external[n].length) + 1); 106 106 mark_buffer_dirty(bh); 107 107 brelse(bh); 108 108 return se; ··· 115 115 } 116 116 se = !fnod ? node : (node + 16384) & ~16383; 117 117 } 118 - if (!(se = hpfs_alloc_sector(s, se, 1, fsecno*ALLOC_M>ALLOC_FWD_MAX ? ALLOC_FWD_MAX : fsecno*ALLOC_M<ALLOC_FWD_MIN ? ALLOC_FWD_MIN : fsecno*ALLOC_M, 1))) { 118 + if (!(se = hpfs_alloc_sector(s, se, 1, fsecno*ALLOC_M>ALLOC_FWD_MAX ? ALLOC_FWD_MAX : fsecno*ALLOC_M<ALLOC_FWD_MIN ? ALLOC_FWD_MIN : fsecno*ALLOC_M))) { 119 119 brelse(bh); 120 120 return -1; 121 121 } 122 - fs = n < 0 ? 0 : btree->u.external[n].file_secno + btree->u.external[n].length; 122 + fs = n < 0 ? 0 : le32_to_cpu(btree->u.external[n].file_secno) + le32_to_cpu(btree->u.external[n].length); 123 123 if (!btree->n_free_nodes) { 124 - up = a != node ? anode->up : -1; 124 + up = a != node ? le32_to_cpu(anode->up) : -1; 125 125 if (!(anode = hpfs_alloc_anode(s, a, &na, &bh1))) { 126 126 brelse(bh); 127 127 hpfs_free_sectors(s, se, 1); 128 128 return -1; 129 129 } 130 130 if (a == node && fnod) { 131 - anode->up = node; 131 + anode->up = cpu_to_le32(node); 132 132 anode->btree.fnode_parent = 1; 133 133 anode->btree.n_used_nodes = btree->n_used_nodes; 134 134 anode->btree.first_free = btree->first_free; ··· 137 137 btree->internal = 1; 138 138 btree->n_free_nodes = 11; 139 139 btree->n_used_nodes = 1; 140 - btree->first_free = (char *)&(btree->u.internal[1]) - (char *)btree; 141 - btree->u.internal[0].file_secno = -1; 142 - btree->u.internal[0].down = na; 140 + btree->first_free = cpu_to_le16((char *)&(btree->u.internal[1]) - (char *)btree); 141 + btree->u.internal[0].file_secno = cpu_to_le32(-1); 142 + btree->u.internal[0].down = cpu_to_le32(na); 143 143 mark_buffer_dirty(bh); 144 144 } else if (!(ranode = hpfs_alloc_anode(s, /*a*/0, &ra, &bh2))) { 145 145 brelse(bh); ··· 153 153 btree = &anode->btree; 154 154 } 155 155 btree->n_free_nodes--; n = btree->n_used_nodes++; 156 - btree->first_free += 12; 157 - btree->u.external[n].disk_secno = se; 158 - btree->u.external[n].file_secno = fs; 159 - btree->u.external[n].length = 1; 156 + btree->first_free = cpu_to_le16(le16_to_cpu(btree->first_free) + 12); 157 + btree->u.external[n].disk_secno = cpu_to_le32(se); 158 + btree->u.external[n].file_secno = cpu_to_le32(fs); 159 + btree->u.external[n].length = cpu_to_le32(1); 160 160 mark_buffer_dirty(bh); 161 161 brelse(bh); 162 162 if ((a == node && fnod) || na == -1) return se; 163 163 c2 = 0; 164 - while (up != -1) { 164 + while (up != (anode_secno)-1) { 165 165 struct anode *new_anode; 166 166 if (hpfs_sb(s)->sb_chk) 167 167 if (hpfs_stop_cycles(s, up, &c1, &c2, "hpfs_add_sector_to_btree #2")) return -1; ··· 174 174 } 175 175 if (btree->n_free_nodes) { 176 176 btree->n_free_nodes--; n = btree->n_used_nodes++; 177 - btree->first_free += 8; 178 - btree->u.internal[n].file_secno = -1; 179 - btree->u.internal[n].down = na; 180 - btree->u.internal[n-1].file_secno = fs; 177 + btree->first_free = cpu_to_le16(le16_to_cpu(btree->first_free) + 8); 178 + btree->u.internal[n].file_secno = cpu_to_le32(-1); 179 + btree->u.internal[n].down = cpu_to_le32(na); 180 + btree->u.internal[n-1].file_secno = cpu_to_le32(fs); 181 181 mark_buffer_dirty(bh); 182 182 brelse(bh); 183 183 brelse(bh2); 184 184 hpfs_free_sectors(s, ra, 1); 185 185 if ((anode = hpfs_map_anode(s, na, &bh))) { 186 - anode->up = up; 186 + anode->up = cpu_to_le32(up); 187 187 anode->btree.fnode_parent = up == node && fnod; 188 188 mark_buffer_dirty(bh); 189 189 brelse(bh); 190 190 } 191 191 return se; 192 192 } 193 - up = up != node ? anode->up : -1; 194 - btree->u.internal[btree->n_used_nodes - 1].file_secno = /*fs*/-1; 193 + up = up != node ? le32_to_cpu(anode->up) : -1; 194 + btree->u.internal[btree->n_used_nodes - 1].file_secno = cpu_to_le32(/*fs*/-1); 195 195 mark_buffer_dirty(bh); 196 196 brelse(bh); 197 197 a = na; 198 198 if ((new_anode = hpfs_alloc_anode(s, a, &na, &bh))) { 199 199 anode = new_anode; 200 - /*anode->up = up != -1 ? up : ra;*/ 200 + /*anode->up = cpu_to_le32(up != -1 ? up : ra);*/ 201 201 anode->btree.internal = 1; 202 202 anode->btree.n_used_nodes = 1; 203 203 anode->btree.n_free_nodes = 59; 204 - anode->btree.first_free = 16; 205 - anode->btree.u.internal[0].down = a; 206 - anode->btree.u.internal[0].file_secno = -1; 204 + anode->btree.first_free = cpu_to_le16(16); 205 + anode->btree.u.internal[0].down = cpu_to_le32(a); 206 + anode->btree.u.internal[0].file_secno = cpu_to_le32(-1); 207 207 mark_buffer_dirty(bh); 208 208 brelse(bh); 209 209 if ((anode = hpfs_map_anode(s, a, &bh))) { 210 - anode->up = na; 210 + anode->up = cpu_to_le32(na); 211 211 mark_buffer_dirty(bh); 212 212 brelse(bh); 213 213 } 214 214 } else na = a; 215 215 } 216 216 if ((anode = hpfs_map_anode(s, na, &bh))) { 217 - anode->up = node; 217 + anode->up = cpu_to_le32(node); 218 218 if (fnod) anode->btree.fnode_parent = 1; 219 219 mark_buffer_dirty(bh); 220 220 brelse(bh); ··· 232 232 } 233 233 btree = &fnode->btree; 234 234 } 235 - ranode->up = node; 236 - memcpy(&ranode->btree, btree, btree->first_free); 235 + ranode->up = cpu_to_le32(node); 236 + memcpy(&ranode->btree, btree, le16_to_cpu(btree->first_free)); 237 237 if (fnod) ranode->btree.fnode_parent = 1; 238 238 ranode->btree.n_free_nodes = (ranode->btree.internal ? 60 : 40) - ranode->btree.n_used_nodes; 239 239 if (ranode->btree.internal) for (n = 0; n < ranode->btree.n_used_nodes; n++) { 240 240 struct anode *unode; 241 - if ((unode = hpfs_map_anode(s, ranode->u.internal[n].down, &bh1))) { 242 - unode->up = ra; 241 + if ((unode = hpfs_map_anode(s, le32_to_cpu(ranode->u.internal[n].down), &bh1))) { 242 + unode->up = cpu_to_le32(ra); 243 243 unode->btree.fnode_parent = 0; 244 244 mark_buffer_dirty(bh1); 245 245 brelse(bh1); ··· 248 248 btree->internal = 1; 249 249 btree->n_free_nodes = fnod ? 10 : 58; 250 250 btree->n_used_nodes = 2; 251 - btree->first_free = (char *)&btree->u.internal[2] - (char *)btree; 252 - btree->u.internal[0].file_secno = fs; 253 - btree->u.internal[0].down = ra; 254 - btree->u.internal[1].file_secno = -1; 255 - btree->u.internal[1].down = na; 251 + btree->first_free = cpu_to_le16((char *)&btree->u.internal[2] - (char *)btree); 252 + btree->u.internal[0].file_secno = cpu_to_le32(fs); 253 + btree->u.internal[0].down = cpu_to_le32(ra); 254 + btree->u.internal[1].file_secno = cpu_to_le32(-1); 255 + btree->u.internal[1].down = cpu_to_le32(na); 256 256 mark_buffer_dirty(bh); 257 257 brelse(bh); 258 258 mark_buffer_dirty(bh2); ··· 279 279 go_down: 280 280 d2 = 0; 281 281 while (btree1->internal) { 282 - ano = btree1->u.internal[pos].down; 282 + ano = le32_to_cpu(btree1->u.internal[pos].down); 283 283 if (level) brelse(bh); 284 284 if (hpfs_sb(s)->sb_chk) 285 285 if (hpfs_stop_cycles(s, ano, &d1, &d2, "hpfs_remove_btree #1")) ··· 290 290 pos = 0; 291 291 } 292 292 for (i = 0; i < btree1->n_used_nodes; i++) 293 - hpfs_free_sectors(s, btree1->u.external[i].disk_secno, btree1->u.external[i].length); 293 + hpfs_free_sectors(s, le32_to_cpu(btree1->u.external[i].disk_secno), le32_to_cpu(btree1->u.external[i].length)); 294 294 go_up: 295 295 if (!level) return; 296 296 brelse(bh); ··· 298 298 if (hpfs_stop_cycles(s, ano, &c1, &c2, "hpfs_remove_btree #2")) return; 299 299 hpfs_free_sectors(s, ano, 1); 300 300 oano = ano; 301 - ano = anode->up; 301 + ano = le32_to_cpu(anode->up); 302 302 if (--level) { 303 303 if (!(anode = hpfs_map_anode(s, ano, &bh))) return; 304 304 btree1 = &anode->btree; 305 305 } else btree1 = btree; 306 306 for (i = 0; i < btree1->n_used_nodes; i++) { 307 - if (btree1->u.internal[i].down == oano) { 307 + if (le32_to_cpu(btree1->u.internal[i].down) == oano) { 308 308 if ((pos = i + 1) < btree1->n_used_nodes) 309 309 goto go_down; 310 310 else ··· 411 411 if (fno) { 412 412 btree->n_free_nodes = 8; 413 413 btree->n_used_nodes = 0; 414 - btree->first_free = 8; 414 + btree->first_free = cpu_to_le16(8); 415 415 btree->internal = 0; 416 416 mark_buffer_dirty(bh); 417 417 } else hpfs_free_sectors(s, f, 1); ··· 421 421 while (btree->internal) { 422 422 nodes = btree->n_used_nodes + btree->n_free_nodes; 423 423 for (i = 0; i < btree->n_used_nodes; i++) 424 - if (btree->u.internal[i].file_secno >= secs) goto f; 424 + if (le32_to_cpu(btree->u.internal[i].file_secno) >= secs) goto f; 425 425 brelse(bh); 426 426 hpfs_error(s, "internal btree %08x doesn't end with -1", node); 427 427 return; 428 428 f: 429 429 for (j = i + 1; j < btree->n_used_nodes; j++) 430 - hpfs_ea_remove(s, btree->u.internal[j].down, 1, 0); 430 + hpfs_ea_remove(s, le32_to_cpu(btree->u.internal[j].down), 1, 0); 431 431 btree->n_used_nodes = i + 1; 432 432 btree->n_free_nodes = nodes - btree->n_used_nodes; 433 - btree->first_free = 8 + 8 * btree->n_used_nodes; 433 + btree->first_free = cpu_to_le16(8 + 8 * btree->n_used_nodes); 434 434 mark_buffer_dirty(bh); 435 - if (btree->u.internal[i].file_secno == secs) { 435 + if (btree->u.internal[i].file_secno == cpu_to_le32(secs)) { 436 436 brelse(bh); 437 437 return; 438 438 } 439 - node = btree->u.internal[i].down; 439 + node = le32_to_cpu(btree->u.internal[i].down); 440 440 brelse(bh); 441 441 if (hpfs_sb(s)->sb_chk) 442 442 if (hpfs_stop_cycles(s, node, &c1, &c2, "hpfs_truncate_btree")) ··· 446 446 } 447 447 nodes = btree->n_used_nodes + btree->n_free_nodes; 448 448 for (i = 0; i < btree->n_used_nodes; i++) 449 - if (btree->u.external[i].file_secno + btree->u.external[i].length >= secs) goto ff; 449 + if (le32_to_cpu(btree->u.external[i].file_secno) + le32_to_cpu(btree->u.external[i].length) >= secs) goto ff; 450 450 brelse(bh); 451 451 return; 452 452 ff: 453 - if (secs <= btree->u.external[i].file_secno) { 453 + if (secs <= le32_to_cpu(btree->u.external[i].file_secno)) { 454 454 hpfs_error(s, "there is an allocation error in file %08x, sector %08x", f, secs); 455 455 if (i) i--; 456 456 } 457 - else if (btree->u.external[i].file_secno + btree->u.external[i].length > secs) { 458 - hpfs_free_sectors(s, btree->u.external[i].disk_secno + secs - 459 - btree->u.external[i].file_secno, btree->u.external[i].length 460 - - secs + btree->u.external[i].file_secno); /* I hope gcc optimizes this :-) */ 461 - btree->u.external[i].length = secs - btree->u.external[i].file_secno; 457 + else if (le32_to_cpu(btree->u.external[i].file_secno) + le32_to_cpu(btree->u.external[i].length) > secs) { 458 + hpfs_free_sectors(s, le32_to_cpu(btree->u.external[i].disk_secno) + secs - 459 + le32_to_cpu(btree->u.external[i].file_secno), le32_to_cpu(btree->u.external[i].length) 460 + - secs + le32_to_cpu(btree->u.external[i].file_secno)); /* I hope gcc optimizes this :-) */ 461 + btree->u.external[i].length = cpu_to_le32(secs - le32_to_cpu(btree->u.external[i].file_secno)); 462 462 } 463 463 for (j = i + 1; j < btree->n_used_nodes; j++) 464 - hpfs_free_sectors(s, btree->u.external[j].disk_secno, btree->u.external[j].length); 464 + hpfs_free_sectors(s, le32_to_cpu(btree->u.external[j].disk_secno), le32_to_cpu(btree->u.external[j].length)); 465 465 btree->n_used_nodes = i + 1; 466 466 btree->n_free_nodes = nodes - btree->n_used_nodes; 467 - btree->first_free = 8 + 12 * btree->n_used_nodes; 467 + btree->first_free = cpu_to_le16(8 + 12 * btree->n_used_nodes); 468 468 mark_buffer_dirty(bh); 469 469 brelse(bh); 470 470 } ··· 480 480 struct extended_attribute *ea_end; 481 481 if (!(fnode = hpfs_map_fnode(s, fno, &bh))) return; 482 482 if (!fnode->dirflag) hpfs_remove_btree(s, &fnode->btree); 483 - else hpfs_remove_dtree(s, fnode->u.external[0].disk_secno); 483 + else hpfs_remove_dtree(s, le32_to_cpu(fnode->u.external[0].disk_secno)); 484 484 ea_end = fnode_end_ea(fnode); 485 485 for (ea = fnode_ea(fnode); ea < ea_end; ea = next_ea(ea)) 486 486 if (ea->indirect) 487 487 hpfs_ea_remove(s, ea_sec(ea), ea->anode, ea_len(ea)); 488 - hpfs_ea_ext_remove(s, fnode->ea_secno, fnode->ea_anode, fnode->ea_size_l); 488 + hpfs_ea_ext_remove(s, le32_to_cpu(fnode->ea_secno), fnode->ea_anode, le32_to_cpu(fnode->ea_size_l)); 489 489 brelse(bh); 490 490 hpfs_free_sectors(s, fno, 1); 491 491 }
+8 -16
fs/hpfs/buffer.c
··· 9 9 #include <linux/slab.h> 10 10 #include "hpfs_fn.h" 11 11 12 - void hpfs_lock_creation(struct super_block *s) 13 - { 14 - #ifdef DEBUG_LOCKS 15 - printk("lock creation\n"); 16 - #endif 17 - mutex_lock(&hpfs_sb(s)->hpfs_creation_de); 18 - } 19 - 20 - void hpfs_unlock_creation(struct super_block *s) 21 - { 22 - #ifdef DEBUG_LOCKS 23 - printk("unlock creation\n"); 24 - #endif 25 - mutex_unlock(&hpfs_sb(s)->hpfs_creation_de); 26 - } 27 - 28 12 /* Map a sector into a buffer and return pointers to it and to the buffer. */ 29 13 30 14 void *hpfs_map_sector(struct super_block *s, unsigned secno, struct buffer_head **bhp, 31 15 int ahead) 32 16 { 33 17 struct buffer_head *bh; 18 + 19 + hpfs_lock_assert(s); 34 20 35 21 cond_resched(); 36 22 ··· 35 49 { 36 50 struct buffer_head *bh; 37 51 /*return hpfs_map_sector(s, secno, bhp, 0);*/ 52 + 53 + hpfs_lock_assert(s); 38 54 39 55 cond_resched(); 40 56 ··· 57 69 { 58 70 struct buffer_head *bh; 59 71 char *data; 72 + 73 + hpfs_lock_assert(s); 60 74 61 75 cond_resched(); 62 76 ··· 114 124 struct quad_buffer_head *qbh) 115 125 { 116 126 cond_resched(); 127 + 128 + hpfs_lock_assert(s); 117 129 118 130 if (secno & 3) { 119 131 printk("HPFS: hpfs_get_4sectors: unaligned read\n");
+10 -12
fs/hpfs/dir.c
··· 88 88 hpfs_error(inode->i_sb, "not a directory, fnode %08lx", 89 89 (unsigned long)inode->i_ino); 90 90 } 91 - if (hpfs_inode->i_dno != fno->u.external[0].disk_secno) { 91 + if (hpfs_inode->i_dno != le32_to_cpu(fno->u.external[0].disk_secno)) { 92 92 e = 1; 93 - hpfs_error(inode->i_sb, "corrupted inode: i_dno == %08x, fnode -> dnode == %08x", hpfs_inode->i_dno, fno->u.external[0].disk_secno); 93 + hpfs_error(inode->i_sb, "corrupted inode: i_dno == %08x, fnode -> dnode == %08x", hpfs_inode->i_dno, le32_to_cpu(fno->u.external[0].disk_secno)); 94 94 } 95 95 brelse(bh); 96 96 if (e) { ··· 156 156 goto again; 157 157 } 158 158 tempname = hpfs_translate_name(inode->i_sb, de->name, de->namelen, lc, de->not_8x3); 159 - if (filldir(dirent, tempname, de->namelen, old_pos, de->fnode, DT_UNKNOWN) < 0) { 159 + if (filldir(dirent, tempname, de->namelen, old_pos, le32_to_cpu(de->fnode), DT_UNKNOWN) < 0) { 160 160 filp->f_pos = old_pos; 161 161 if (tempname != de->name) kfree(tempname); 162 162 hpfs_brelse4(&qbh); ··· 221 221 * Get inode number, what we're after. 222 222 */ 223 223 224 - ino = de->fnode; 224 + ino = le32_to_cpu(de->fnode); 225 225 226 226 /* 227 227 * Go find or make an inode. ··· 236 236 hpfs_init_inode(result); 237 237 if (de->directory) 238 238 hpfs_read_inode(result); 239 - else if (de->ea_size && hpfs_sb(dir->i_sb)->sb_eas) 239 + else if (le32_to_cpu(de->ea_size) && hpfs_sb(dir->i_sb)->sb_eas) 240 240 hpfs_read_inode(result); 241 241 else { 242 242 result->i_mode |= S_IFREG; ··· 250 250 hpfs_result = hpfs_i(result); 251 251 if (!de->directory) hpfs_result->i_parent_dir = dir->i_ino; 252 252 253 - hpfs_decide_conv(result, name, len); 254 - 255 253 if (de->has_acl || de->has_xtd_perm) if (!(dir->i_sb->s_flags & MS_RDONLY)) { 256 254 hpfs_error(result->i_sb, "ACLs or XPERM found. This is probably HPFS386. This driver doesn't support it now. Send me some info on these structures"); 257 255 goto bail1; ··· 261 263 */ 262 264 263 265 if (!result->i_ctime.tv_sec) { 264 - if (!(result->i_ctime.tv_sec = local_to_gmt(dir->i_sb, de->creation_date))) 266 + if (!(result->i_ctime.tv_sec = local_to_gmt(dir->i_sb, le32_to_cpu(de->creation_date)))) 265 267 result->i_ctime.tv_sec = 1; 266 268 result->i_ctime.tv_nsec = 0; 267 - result->i_mtime.tv_sec = local_to_gmt(dir->i_sb, de->write_date); 269 + result->i_mtime.tv_sec = local_to_gmt(dir->i_sb, le32_to_cpu(de->write_date)); 268 270 result->i_mtime.tv_nsec = 0; 269 - result->i_atime.tv_sec = local_to_gmt(dir->i_sb, de->read_date); 271 + result->i_atime.tv_sec = local_to_gmt(dir->i_sb, le32_to_cpu(de->read_date)); 270 272 result->i_atime.tv_nsec = 0; 271 - hpfs_result->i_ea_size = de->ea_size; 273 + hpfs_result->i_ea_size = le32_to_cpu(de->ea_size); 272 274 if (!hpfs_result->i_ea_mode && de->read_only) 273 275 result->i_mode &= ~0222; 274 276 if (!de->directory) { 275 277 if (result->i_size == -1) { 276 - result->i_size = de->file_size; 278 + result->i_size = le32_to_cpu(de->file_size); 277 279 result->i_data.a_ops = &hpfs_aops; 278 280 hpfs_i(result)->mmu_private = result->i_size; 279 281 /*
+84 -90
fs/hpfs/dnode.c
··· 14 14 struct hpfs_dirent *de_end = dnode_end_de(d); 15 15 int i = 1; 16 16 for (de = dnode_first_de(d); de < de_end; de = de_next_de(de)) { 17 - if (de == fde) return ((loff_t) d->self << 4) | (loff_t)i; 17 + if (de == fde) return ((loff_t) le32_to_cpu(d->self) << 4) | (loff_t)i; 18 18 i++; 19 19 } 20 20 printk("HPFS: get_pos: not_found\n"); 21 - return ((loff_t)d->self << 4) | (loff_t)1; 21 + return ((loff_t)le32_to_cpu(d->self) << 4) | (loff_t)1; 22 22 } 23 23 24 24 void hpfs_add_pos(struct inode *inode, loff_t *pos) ··· 130 130 { 131 131 struct hpfs_dirent *de; 132 132 if (!(de = dnode_last_de(d))) { 133 - hpfs_error(s, "set_last_pointer: empty dnode %08x", d->self); 133 + hpfs_error(s, "set_last_pointer: empty dnode %08x", le32_to_cpu(d->self)); 134 134 return; 135 135 } 136 136 if (hpfs_sb(s)->sb_chk) { 137 137 if (de->down) { 138 138 hpfs_error(s, "set_last_pointer: dnode %08x has already last pointer %08x", 139 - d->self, de_down_pointer(de)); 139 + le32_to_cpu(d->self), de_down_pointer(de)); 140 140 return; 141 141 } 142 - if (de->length != 32) { 143 - hpfs_error(s, "set_last_pointer: bad last dirent in dnode %08x", d->self); 142 + if (le16_to_cpu(de->length) != 32) { 143 + hpfs_error(s, "set_last_pointer: bad last dirent in dnode %08x", le32_to_cpu(d->self)); 144 144 return; 145 145 } 146 146 } 147 147 if (ptr) { 148 - if ((d->first_free += 4) > 2048) { 149 - hpfs_error(s,"set_last_pointer: too long dnode %08x", d->self); 150 - d->first_free -= 4; 148 + d->first_free = cpu_to_le32(le32_to_cpu(d->first_free) + 4); 149 + if (le32_to_cpu(d->first_free) > 2048) { 150 + hpfs_error(s, "set_last_pointer: too long dnode %08x", le32_to_cpu(d->self)); 151 + d->first_free = cpu_to_le32(le32_to_cpu(d->first_free) - 4); 151 152 return; 152 153 } 153 - de->length = 36; 154 + de->length = cpu_to_le16(36); 154 155 de->down = 1; 155 - *(dnode_secno *)((char *)de + 32) = ptr; 156 + *(dnode_secno *)((char *)de + 32) = cpu_to_le32(ptr); 156 157 } 157 158 } 158 159 ··· 169 168 for (de = dnode_first_de(d); de < de_end; de = de_next_de(de)) { 170 169 int c = hpfs_compare_names(s, name, namelen, de->name, de->namelen, de->last); 171 170 if (!c) { 172 - hpfs_error(s, "name (%c,%d) already exists in dnode %08x", *name, namelen, d->self); 171 + hpfs_error(s, "name (%c,%d) already exists in dnode %08x", *name, namelen, le32_to_cpu(d->self)); 173 172 return NULL; 174 173 } 175 174 if (c < 0) break; ··· 177 176 memmove((char *)de + d_size, de, (char *)de_end - (char *)de); 178 177 memset(de, 0, d_size); 179 178 if (down_ptr) { 180 - *(int *)((char *)de + d_size - 4) = down_ptr; 179 + *(dnode_secno *)((char *)de + d_size - 4) = cpu_to_le32(down_ptr); 181 180 de->down = 1; 182 181 } 183 - de->length = d_size; 184 - if (down_ptr) de->down = 1; 182 + de->length = cpu_to_le16(d_size); 185 183 de->not_8x3 = hpfs_is_name_long(name, namelen); 186 184 de->namelen = namelen; 187 185 memcpy(de->name, name, namelen); 188 - d->first_free += d_size; 186 + d->first_free = cpu_to_le32(le32_to_cpu(d->first_free) + d_size); 189 187 return de; 190 188 } 191 189 ··· 194 194 struct hpfs_dirent *de) 195 195 { 196 196 if (de->last) { 197 - hpfs_error(s, "attempt to delete last dirent in dnode %08x", d->self); 197 + hpfs_error(s, "attempt to delete last dirent in dnode %08x", le32_to_cpu(d->self)); 198 198 return; 199 199 } 200 - d->first_free -= de->length; 201 - memmove(de, de_next_de(de), d->first_free + (char *)d - (char *)de); 200 + d->first_free = cpu_to_le32(le32_to_cpu(d->first_free) - le16_to_cpu(de->length)); 201 + memmove(de, de_next_de(de), le32_to_cpu(d->first_free) + (char *)d - (char *)de); 202 202 } 203 203 204 204 static void fix_up_ptrs(struct super_block *s, struct dnode *d) 205 205 { 206 206 struct hpfs_dirent *de; 207 207 struct hpfs_dirent *de_end = dnode_end_de(d); 208 - dnode_secno dno = d->self; 208 + dnode_secno dno = le32_to_cpu(d->self); 209 209 for (de = dnode_first_de(d); de < de_end; de = de_next_de(de)) 210 210 if (de->down) { 211 211 struct quad_buffer_head qbh; 212 212 struct dnode *dd; 213 213 if ((dd = hpfs_map_dnode(s, de_down_pointer(de), &qbh))) { 214 - if (dd->up != dno || dd->root_dnode) { 215 - dd->up = dno; 214 + if (le32_to_cpu(dd->up) != dno || dd->root_dnode) { 215 + dd->up = cpu_to_le32(dno); 216 216 dd->root_dnode = 0; 217 217 hpfs_mark_4buffers_dirty(&qbh); 218 218 } ··· 262 262 kfree(nname); 263 263 return 1; 264 264 } 265 - if (d->first_free + de_size(namelen, down_ptr) <= 2048) { 265 + if (le32_to_cpu(d->first_free) + de_size(namelen, down_ptr) <= 2048) { 266 266 loff_t t; 267 267 copy_de(de=hpfs_add_de(i->i_sb, d, name, namelen, down_ptr), new_de); 268 268 t = get_pos(d, de); ··· 286 286 kfree(nname); 287 287 return 1; 288 288 } 289 - memcpy(nd, d, d->first_free); 289 + memcpy(nd, d, le32_to_cpu(d->first_free)); 290 290 copy_de(de = hpfs_add_de(i->i_sb, nd, name, namelen, down_ptr), new_de); 291 291 for_all_poss(i, hpfs_pos_ins, get_pos(nd, de), 1); 292 292 h = ((char *)dnode_last_de(nd) - (char *)nd) / 2 + 10; 293 - if (!(ad = hpfs_alloc_dnode(i->i_sb, d->up, &adno, &qbh1, 0))) { 293 + if (!(ad = hpfs_alloc_dnode(i->i_sb, le32_to_cpu(d->up), &adno, &qbh1))) { 294 294 hpfs_error(i->i_sb, "unable to alloc dnode - dnode tree will be corrupted"); 295 295 hpfs_brelse4(&qbh); 296 296 kfree(nd); ··· 313 313 down_ptr = adno; 314 314 set_last_pointer(i->i_sb, ad, de->down ? de_down_pointer(de) : 0); 315 315 de = de_next_de(de); 316 - memmove((char *)nd + 20, de, nd->first_free + (char *)nd - (char *)de); 317 - nd->first_free -= (char *)de - (char *)nd - 20; 318 - memcpy(d, nd, nd->first_free); 316 + memmove((char *)nd + 20, de, le32_to_cpu(nd->first_free) + (char *)nd - (char *)de); 317 + nd->first_free = cpu_to_le32(le32_to_cpu(nd->first_free) - ((char *)de - (char *)nd - 20)); 318 + memcpy(d, nd, le32_to_cpu(nd->first_free)); 319 319 for_all_poss(i, hpfs_pos_del, (loff_t)dno << 4, pos); 320 320 fix_up_ptrs(i->i_sb, ad); 321 321 if (!d->root_dnode) { 322 - dno = ad->up = d->up; 322 + ad->up = d->up; 323 + dno = le32_to_cpu(ad->up); 323 324 hpfs_mark_4buffers_dirty(&qbh); 324 325 hpfs_brelse4(&qbh); 325 326 hpfs_mark_4buffers_dirty(&qbh1); 326 327 hpfs_brelse4(&qbh1); 327 328 goto go_up; 328 329 } 329 - if (!(rd = hpfs_alloc_dnode(i->i_sb, d->up, &rdno, &qbh2, 0))) { 330 + if (!(rd = hpfs_alloc_dnode(i->i_sb, le32_to_cpu(d->up), &rdno, &qbh2))) { 330 331 hpfs_error(i->i_sb, "unable to alloc dnode - dnode tree will be corrupted"); 331 332 hpfs_brelse4(&qbh); 332 333 hpfs_brelse4(&qbh1); ··· 339 338 i->i_blocks += 4; 340 339 rd->root_dnode = 1; 341 340 rd->up = d->up; 342 - if (!(fnode = hpfs_map_fnode(i->i_sb, d->up, &bh))) { 341 + if (!(fnode = hpfs_map_fnode(i->i_sb, le32_to_cpu(d->up), &bh))) { 343 342 hpfs_free_dnode(i->i_sb, rdno); 344 343 hpfs_brelse4(&qbh); 345 344 hpfs_brelse4(&qbh1); ··· 348 347 kfree(nname); 349 348 return 1; 350 349 } 351 - fnode->u.external[0].disk_secno = rdno; 350 + fnode->u.external[0].disk_secno = cpu_to_le32(rdno); 352 351 mark_buffer_dirty(bh); 353 352 brelse(bh); 354 - d->up = ad->up = hpfs_i(i)->i_dno = rdno; 353 + hpfs_i(i)->i_dno = rdno; 354 + d->up = ad->up = cpu_to_le32(rdno); 355 355 d->root_dnode = ad->root_dnode = 0; 356 356 hpfs_mark_4buffers_dirty(&qbh); 357 357 hpfs_brelse4(&qbh); ··· 375 373 376 374 int hpfs_add_dirent(struct inode *i, 377 375 const unsigned char *name, unsigned namelen, 378 - struct hpfs_dirent *new_de, int cdepth) 376 + struct hpfs_dirent *new_de) 379 377 { 380 378 struct hpfs_inode_info *hpfs_inode = hpfs_i(i); 381 379 struct dnode *d; ··· 405 403 } 406 404 } 407 405 hpfs_brelse4(&qbh); 408 - if (!cdepth) hpfs_lock_creation(i->i_sb); 409 406 if (hpfs_check_free_dnodes(i->i_sb, FREE_DNODES_ADD)) { 410 407 c = 1; 411 408 goto ret; ··· 412 411 i->i_version++; 413 412 c = hpfs_add_to_dnode(i, dno, name, namelen, new_de, 0); 414 413 ret: 415 - if (!cdepth) hpfs_unlock_creation(i->i_sb); 416 414 return c; 417 415 } 418 416 ··· 437 437 return 0; 438 438 if (!(dnode = hpfs_map_dnode(i->i_sb, dno, &qbh))) return 0; 439 439 if (hpfs_sb(i->i_sb)->sb_chk) { 440 - if (dnode->up != chk_up) { 440 + if (le32_to_cpu(dnode->up) != chk_up) { 441 441 hpfs_error(i->i_sb, "move_to_top: up pointer from %08x should be %08x, is %08x", 442 - dno, chk_up, dnode->up); 442 + dno, chk_up, le32_to_cpu(dnode->up)); 443 443 hpfs_brelse4(&qbh); 444 444 return 0; 445 445 } ··· 455 455 hpfs_brelse4(&qbh); 456 456 } 457 457 while (!(de = dnode_pre_last_de(dnode))) { 458 - dnode_secno up = dnode->up; 458 + dnode_secno up = le32_to_cpu(dnode->up); 459 459 hpfs_brelse4(&qbh); 460 460 hpfs_free_dnode(i->i_sb, dno); 461 461 i->i_size -= 2048; ··· 474 474 hpfs_brelse4(&qbh); 475 475 return 0; 476 476 } 477 - dnode->first_free -= 4; 478 - de->length -= 4; 477 + dnode->first_free = cpu_to_le32(le32_to_cpu(dnode->first_free) - 4); 478 + de->length = cpu_to_le16(le16_to_cpu(de->length) - 4); 479 479 de->down = 0; 480 480 hpfs_mark_4buffers_dirty(&qbh); 481 481 dno = up; ··· 483 483 t = get_pos(dnode, de); 484 484 for_all_poss(i, hpfs_pos_subst, t, 4); 485 485 for_all_poss(i, hpfs_pos_subst, t + 1, 5); 486 - if (!(nde = kmalloc(de->length, GFP_NOFS))) { 486 + if (!(nde = kmalloc(le16_to_cpu(de->length), GFP_NOFS))) { 487 487 hpfs_error(i->i_sb, "out of memory for dirent - directory will be corrupted"); 488 488 hpfs_brelse4(&qbh); 489 489 return 0; 490 490 } 491 - memcpy(nde, de, de->length); 491 + memcpy(nde, de, le16_to_cpu(de->length)); 492 492 ddno = de->down ? de_down_pointer(de) : 0; 493 493 hpfs_delete_de(i->i_sb, dnode, de); 494 494 set_last_pointer(i->i_sb, dnode, ddno); ··· 517 517 try_it_again: 518 518 if (hpfs_stop_cycles(i->i_sb, dno, &c1, &c2, "delete_empty_dnode")) return; 519 519 if (!(dnode = hpfs_map_dnode(i->i_sb, dno, &qbh))) return; 520 - if (dnode->first_free > 56) goto end; 521 - if (dnode->first_free == 52 || dnode->first_free == 56) { 520 + if (le32_to_cpu(dnode->first_free) > 56) goto end; 521 + if (le32_to_cpu(dnode->first_free) == 52 || le32_to_cpu(dnode->first_free) == 56) { 522 522 struct hpfs_dirent *de_end; 523 523 int root = dnode->root_dnode; 524 - up = dnode->up; 524 + up = le32_to_cpu(dnode->up); 525 525 de = dnode_first_de(dnode); 526 526 down = de->down ? de_down_pointer(de) : 0; 527 527 if (hpfs_sb(i->i_sb)->sb_chk) if (root && !down) { ··· 545 545 return; 546 546 } 547 547 if ((d1 = hpfs_map_dnode(i->i_sb, down, &qbh1))) { 548 - d1->up = up; 548 + d1->up = cpu_to_le32(up); 549 549 d1->root_dnode = 1; 550 550 hpfs_mark_4buffers_dirty(&qbh1); 551 551 hpfs_brelse4(&qbh1); 552 552 } 553 553 if ((fnode = hpfs_map_fnode(i->i_sb, up, &bh))) { 554 - fnode->u.external[0].disk_secno = down; 554 + fnode->u.external[0].disk_secno = cpu_to_le32(down); 555 555 mark_buffer_dirty(bh); 556 556 brelse(bh); 557 557 } ··· 570 570 for_all_poss(i, hpfs_pos_subst, ((loff_t)dno << 4) | 1, ((loff_t)up << 4) | p); 571 571 if (!down) { 572 572 de->down = 0; 573 - de->length -= 4; 574 - dnode->first_free -= 4; 573 + de->length = cpu_to_le16(le16_to_cpu(de->length) - 4); 574 + dnode->first_free = cpu_to_le32(le32_to_cpu(dnode->first_free) - 4); 575 575 memmove(de_next_de(de), (char *)de_next_de(de) + 4, 576 - (char *)dnode + dnode->first_free - (char *)de_next_de(de)); 576 + (char *)dnode + le32_to_cpu(dnode->first_free) - (char *)de_next_de(de)); 577 577 } else { 578 578 struct dnode *d1; 579 579 struct quad_buffer_head qbh1; 580 - *(dnode_secno *) ((void *) de + de->length - 4) = down; 580 + *(dnode_secno *) ((void *) de + le16_to_cpu(de->length) - 4) = down; 581 581 if ((d1 = hpfs_map_dnode(i->i_sb, down, &qbh1))) { 582 - d1->up = up; 582 + d1->up = cpu_to_le32(up); 583 583 hpfs_mark_4buffers_dirty(&qbh1); 584 584 hpfs_brelse4(&qbh1); 585 585 } 586 586 } 587 587 } else { 588 - hpfs_error(i->i_sb, "delete_empty_dnode: dnode %08x, first_free == %03x", dno, dnode->first_free); 588 + hpfs_error(i->i_sb, "delete_empty_dnode: dnode %08x, first_free == %03x", dno, le32_to_cpu(dnode->first_free)); 589 589 goto end; 590 590 } 591 591 ··· 596 596 struct quad_buffer_head qbh1; 597 597 if (!de_next->down) goto endm; 598 598 ndown = de_down_pointer(de_next); 599 - if (!(de_cp = kmalloc(de->length, GFP_NOFS))) { 599 + if (!(de_cp = kmalloc(le16_to_cpu(de->length), GFP_NOFS))) { 600 600 printk("HPFS: out of memory for dtree balancing\n"); 601 601 goto endm; 602 602 } 603 - memcpy(de_cp, de, de->length); 603 + memcpy(de_cp, de, le16_to_cpu(de->length)); 604 604 hpfs_delete_de(i->i_sb, dnode, de); 605 605 hpfs_mark_4buffers_dirty(&qbh); 606 606 hpfs_brelse4(&qbh); 607 607 for_all_poss(i, hpfs_pos_subst, ((loff_t)up << 4) | p, 4); 608 608 for_all_poss(i, hpfs_pos_del, ((loff_t)up << 4) | p, 1); 609 609 if (de_cp->down) if ((d1 = hpfs_map_dnode(i->i_sb, de_down_pointer(de_cp), &qbh1))) { 610 - d1->up = ndown; 610 + d1->up = cpu_to_le32(ndown); 611 611 hpfs_mark_4buffers_dirty(&qbh1); 612 612 hpfs_brelse4(&qbh1); 613 613 } ··· 635 635 struct hpfs_dirent *del = dnode_last_de(d1); 636 636 dlp = del->down ? de_down_pointer(del) : 0; 637 637 if (!dlp && down) { 638 - if (d1->first_free > 2044) { 638 + if (le32_to_cpu(d1->first_free) > 2044) { 639 639 if (hpfs_sb(i->i_sb)->sb_chk >= 2) { 640 640 printk("HPFS: warning: unbalanced dnode tree, see hpfs.txt 4 more info\n"); 641 641 printk("HPFS: warning: terminating balancing operation\n"); ··· 647 647 printk("HPFS: warning: unbalanced dnode tree, see hpfs.txt 4 more info\n"); 648 648 printk("HPFS: warning: goin'on\n"); 649 649 } 650 - del->length += 4; 650 + del->length = cpu_to_le16(le16_to_cpu(del->length) + 4); 651 651 del->down = 1; 652 - d1->first_free += 4; 652 + d1->first_free = cpu_to_le32(le32_to_cpu(d1->first_free) + 4); 653 653 } 654 654 if (dlp && !down) { 655 - del->length -= 4; 655 + del->length = cpu_to_le16(le16_to_cpu(del->length) - 4); 656 656 del->down = 0; 657 - d1->first_free -= 4; 657 + d1->first_free = cpu_to_le32(le32_to_cpu(d1->first_free) - 4); 658 658 } else if (down) 659 - *(dnode_secno *) ((void *) del + del->length - 4) = down; 659 + *(dnode_secno *) ((void *) del + le16_to_cpu(del->length) - 4) = cpu_to_le32(down); 660 660 } else goto endm; 661 - if (!(de_cp = kmalloc(de_prev->length, GFP_NOFS))) { 661 + if (!(de_cp = kmalloc(le16_to_cpu(de_prev->length), GFP_NOFS))) { 662 662 printk("HPFS: out of memory for dtree balancing\n"); 663 663 hpfs_brelse4(&qbh1); 664 664 goto endm; 665 665 } 666 666 hpfs_mark_4buffers_dirty(&qbh1); 667 667 hpfs_brelse4(&qbh1); 668 - memcpy(de_cp, de_prev, de_prev->length); 668 + memcpy(de_cp, de_prev, le16_to_cpu(de_prev->length)); 669 669 hpfs_delete_de(i->i_sb, dnode, de_prev); 670 670 if (!de_prev->down) { 671 - de_prev->length += 4; 671 + de_prev->length = cpu_to_le16(le16_to_cpu(de_prev->length) + 4); 672 672 de_prev->down = 1; 673 - dnode->first_free += 4; 673 + dnode->first_free = cpu_to_le32(le32_to_cpu(dnode->first_free) + 4); 674 674 } 675 - *(dnode_secno *) ((void *) de_prev + de_prev->length - 4) = ndown; 675 + *(dnode_secno *) ((void *) de_prev + le16_to_cpu(de_prev->length) - 4) = cpu_to_le32(ndown); 676 676 hpfs_mark_4buffers_dirty(&qbh); 677 677 hpfs_brelse4(&qbh); 678 678 for_all_poss(i, hpfs_pos_subst, ((loff_t)up << 4) | (p - 1), 4); 679 679 for_all_poss(i, hpfs_pos_subst, ((loff_t)up << 4) | p, ((loff_t)up << 4) | (p - 1)); 680 680 if (down) if ((d1 = hpfs_map_dnode(i->i_sb, de_down_pointer(de), &qbh1))) { 681 - d1->up = ndown; 681 + d1->up = cpu_to_le32(ndown); 682 682 hpfs_mark_4buffers_dirty(&qbh1); 683 683 hpfs_brelse4(&qbh1); 684 684 } ··· 701 701 { 702 702 struct dnode *dnode = qbh->data; 703 703 dnode_secno down = 0; 704 - int lock = 0; 705 704 loff_t t; 706 705 if (de->first || de->last) { 707 706 hpfs_error(i->i_sb, "hpfs_remove_dirent: attempt to delete first or last dirent in dnode %08x", dno); ··· 709 710 } 710 711 if (de->down) down = de_down_pointer(de); 711 712 if (depth && (de->down || (de == dnode_first_de(dnode) && de_next_de(de)->last))) { 712 - lock = 1; 713 - hpfs_lock_creation(i->i_sb); 714 713 if (hpfs_check_free_dnodes(i->i_sb, FREE_DNODES_DEL)) { 715 714 hpfs_brelse4(qbh); 716 - hpfs_unlock_creation(i->i_sb); 717 715 return 2; 718 716 } 719 717 } ··· 723 727 dnode_secno a = move_to_top(i, down, dno); 724 728 for_all_poss(i, hpfs_pos_subst, 5, t); 725 729 if (a) delete_empty_dnode(i, a); 726 - if (lock) hpfs_unlock_creation(i->i_sb); 727 730 return !a; 728 731 } 729 732 delete_empty_dnode(i, dno); 730 - if (lock) hpfs_unlock_creation(i->i_sb); 731 733 return 0; 732 734 } 733 735 ··· 745 751 ptr = 0; 746 752 go_up: 747 753 if (!(dnode = hpfs_map_dnode(s, dno, &qbh))) return; 748 - if (hpfs_sb(s)->sb_chk) if (odno && odno != -1 && dnode->up != odno) 749 - hpfs_error(s, "hpfs_count_dnodes: bad up pointer; dnode %08x, down %08x points to %08x", odno, dno, dnode->up); 754 + if (hpfs_sb(s)->sb_chk) if (odno && odno != -1 && le32_to_cpu(dnode->up) != odno) 755 + hpfs_error(s, "hpfs_count_dnodes: bad up pointer; dnode %08x, down %08x points to %08x", odno, dno, le32_to_cpu(dnode->up)); 750 756 de = dnode_first_de(dnode); 751 757 if (ptr) while(1) { 752 758 if (de->down) if (de_down_pointer(de) == ptr) goto process_de; ··· 770 776 if (!de->first && !de->last && n_items) (*n_items)++; 771 777 if ((de = de_next_de(de)) < dnode_end_de(dnode)) goto next_de; 772 778 ptr = dno; 773 - dno = dnode->up; 779 + dno = le32_to_cpu(dnode->up); 774 780 if (dnode->root_dnode) { 775 781 hpfs_brelse4(&qbh); 776 782 return; ··· 818 824 return d; 819 825 if (!(de = map_nth_dirent(s, d, 1, &qbh, NULL))) return dno; 820 826 if (hpfs_sb(s)->sb_chk) 821 - if (up && ((struct dnode *)qbh.data)->up != up) 822 - hpfs_error(s, "hpfs_de_as_down_as_possible: bad up pointer; dnode %08x, down %08x points to %08x", up, d, ((struct dnode *)qbh.data)->up); 827 + if (up && le32_to_cpu(((struct dnode *)qbh.data)->up) != up) 828 + hpfs_error(s, "hpfs_de_as_down_as_possible: bad up pointer; dnode %08x, down %08x points to %08x", up, d, le32_to_cpu(((struct dnode *)qbh.data)->up)); 823 829 if (!de->down) { 824 830 hpfs_brelse4(&qbh); 825 831 return d; ··· 868 874 /* Going up */ 869 875 if (dnode->root_dnode) goto bail; 870 876 871 - if (!(up_dnode = hpfs_map_dnode(inode->i_sb, dnode->up, &qbh0))) 877 + if (!(up_dnode = hpfs_map_dnode(inode->i_sb, le32_to_cpu(dnode->up), &qbh0))) 872 878 goto bail; 873 879 874 880 end_up_de = dnode_end_de(up_dnode); ··· 876 882 for (up_de = dnode_first_de(up_dnode); up_de < end_up_de; 877 883 up_de = de_next_de(up_de)) { 878 884 if (!(++c & 077)) hpfs_error(inode->i_sb, 879 - "map_pos_dirent: pos crossed dnode boundary; dnode = %08x", dnode->up); 885 + "map_pos_dirent: pos crossed dnode boundary; dnode = %08x", le32_to_cpu(dnode->up)); 880 886 if (up_de->down && de_down_pointer(up_de) == dno) { 881 - *posp = ((loff_t) dnode->up << 4) + c; 887 + *posp = ((loff_t) le32_to_cpu(dnode->up) << 4) + c; 882 888 hpfs_brelse4(&qbh0); 883 889 return de; 884 890 } 885 891 } 886 892 887 893 hpfs_error(inode->i_sb, "map_pos_dirent: pointer to dnode %08x not found in parent dnode %08x", 888 - dno, dnode->up); 894 + dno, le32_to_cpu(dnode->up)); 889 895 hpfs_brelse4(&qbh0); 890 896 891 897 bail: ··· 1011 1017 /*name2[15] = 0xff;*/ 1012 1018 name1len = 15; name2len = 256; 1013 1019 } 1014 - if (!(upf = hpfs_map_fnode(s, f->up, &bh))) { 1020 + if (!(upf = hpfs_map_fnode(s, le32_to_cpu(f->up), &bh))) { 1015 1021 kfree(name2); 1016 1022 return NULL; 1017 1023 } 1018 1024 if (!upf->dirflag) { 1019 1025 brelse(bh); 1020 - hpfs_error(s, "fnode %08x has non-directory parent %08x", fno, f->up); 1026 + hpfs_error(s, "fnode %08x has non-directory parent %08x", fno, le32_to_cpu(f->up)); 1021 1027 kfree(name2); 1022 1028 return NULL; 1023 1029 } 1024 - dno = upf->u.external[0].disk_secno; 1030 + dno = le32_to_cpu(upf->u.external[0].disk_secno); 1025 1031 brelse(bh); 1026 1032 go_down: 1027 1033 downd = 0; ··· 1043 1049 return NULL; 1044 1050 } 1045 1051 next_de: 1046 - if (de->fnode == fno) { 1052 + if (le32_to_cpu(de->fnode) == fno) { 1047 1053 kfree(name2); 1048 1054 return de; 1049 1055 } ··· 1059 1065 goto go_down; 1060 1066 } 1061 1067 f: 1062 - if (de->fnode == fno) { 1068 + if (le32_to_cpu(de->fnode) == fno) { 1063 1069 kfree(name2); 1064 1070 return de; 1065 1071 } ··· 1068 1074 if ((de = de_next_de(de)) < de_end) goto next_de; 1069 1075 if (d->root_dnode) goto not_found; 1070 1076 downd = dno; 1071 - dno = d->up; 1077 + dno = le32_to_cpu(d->up); 1072 1078 hpfs_brelse4(qbh); 1073 1079 if (hpfs_sb(s)->sb_chk) 1074 1080 if (hpfs_stop_cycles(s, downd, &d1, &d2, "map_fnode_dirent #2")) {
+69 -67
fs/hpfs/ea.c
··· 24 24 } 25 25 if (hpfs_ea_read(s, a, ano, pos, 4, ex)) return; 26 26 if (ea->indirect) { 27 - if (ea->valuelen != 8) { 27 + if (ea_valuelen(ea) != 8) { 28 28 hpfs_error(s, "ea->indirect set while ea->valuelen!=8, %s %08x, pos %08x", 29 29 ano ? "anode" : "sectors", a, pos); 30 30 return; ··· 33 33 return; 34 34 hpfs_ea_remove(s, ea_sec(ea), ea->anode, ea_len(ea)); 35 35 } 36 - pos += ea->namelen + ea->valuelen + 5; 36 + pos += ea->namelen + ea_valuelen(ea) + 5; 37 37 } 38 38 if (!ano) hpfs_free_sectors(s, a, (len+511) >> 9); 39 39 else { ··· 76 76 unsigned pos; 77 77 int ano, len; 78 78 secno a; 79 + char ex[4 + 255 + 1 + 8]; 79 80 struct extended_attribute *ea; 80 81 struct extended_attribute *ea_end = fnode_end_ea(fnode); 81 82 for (ea = fnode_ea(fnode); ea < ea_end; ea = next_ea(ea)) 82 83 if (!strcmp(ea->name, key)) { 83 84 if (ea->indirect) 84 85 goto indirect; 85 - if (ea->valuelen >= size) 86 + if (ea_valuelen(ea) >= size) 86 87 return -EINVAL; 87 - memcpy(buf, ea_data(ea), ea->valuelen); 88 - buf[ea->valuelen] = 0; 88 + memcpy(buf, ea_data(ea), ea_valuelen(ea)); 89 + buf[ea_valuelen(ea)] = 0; 89 90 return 0; 90 91 } 91 - a = fnode->ea_secno; 92 - len = fnode->ea_size_l; 92 + a = le32_to_cpu(fnode->ea_secno); 93 + len = le32_to_cpu(fnode->ea_size_l); 93 94 ano = fnode->ea_anode; 94 95 pos = 0; 95 96 while (pos < len) { 96 - char ex[4 + 255 + 1 + 8]; 97 97 ea = (struct extended_attribute *)ex; 98 98 if (pos + 4 > len) { 99 99 hpfs_error(s, "EAs don't end correctly, %s %08x, len %08x", ··· 106 106 if (!strcmp(ea->name, key)) { 107 107 if (ea->indirect) 108 108 goto indirect; 109 - if (ea->valuelen >= size) 109 + if (ea_valuelen(ea) >= size) 110 110 return -EINVAL; 111 - if (hpfs_ea_read(s, a, ano, pos + 4 + ea->namelen + 1, ea->valuelen, buf)) 111 + if (hpfs_ea_read(s, a, ano, pos + 4 + ea->namelen + 1, ea_valuelen(ea), buf)) 112 112 return -EIO; 113 - buf[ea->valuelen] = 0; 113 + buf[ea_valuelen(ea)] = 0; 114 114 return 0; 115 115 } 116 - pos += ea->namelen + ea->valuelen + 5; 116 + pos += ea->namelen + ea_valuelen(ea) + 5; 117 117 } 118 118 return -ENOENT; 119 119 indirect: ··· 138 138 if (!strcmp(ea->name, key)) { 139 139 if (ea->indirect) 140 140 return get_indirect_ea(s, ea->anode, ea_sec(ea), *size = ea_len(ea)); 141 - if (!(ret = kmalloc((*size = ea->valuelen) + 1, GFP_NOFS))) { 141 + if (!(ret = kmalloc((*size = ea_valuelen(ea)) + 1, GFP_NOFS))) { 142 142 printk("HPFS: out of memory for EA\n"); 143 143 return NULL; 144 144 } 145 - memcpy(ret, ea_data(ea), ea->valuelen); 146 - ret[ea->valuelen] = 0; 145 + memcpy(ret, ea_data(ea), ea_valuelen(ea)); 146 + ret[ea_valuelen(ea)] = 0; 147 147 return ret; 148 148 } 149 - a = fnode->ea_secno; 150 - len = fnode->ea_size_l; 149 + a = le32_to_cpu(fnode->ea_secno); 150 + len = le32_to_cpu(fnode->ea_size_l); 151 151 ano = fnode->ea_anode; 152 152 pos = 0; 153 153 while (pos < len) { ··· 164 164 if (!strcmp(ea->name, key)) { 165 165 if (ea->indirect) 166 166 return get_indirect_ea(s, ea->anode, ea_sec(ea), *size = ea_len(ea)); 167 - if (!(ret = kmalloc((*size = ea->valuelen) + 1, GFP_NOFS))) { 167 + if (!(ret = kmalloc((*size = ea_valuelen(ea)) + 1, GFP_NOFS))) { 168 168 printk("HPFS: out of memory for EA\n"); 169 169 return NULL; 170 170 } 171 - if (hpfs_ea_read(s, a, ano, pos + 4 + ea->namelen + 1, ea->valuelen, ret)) { 171 + if (hpfs_ea_read(s, a, ano, pos + 4 + ea->namelen + 1, ea_valuelen(ea), ret)) { 172 172 kfree(ret); 173 173 return NULL; 174 174 } 175 - ret[ea->valuelen] = 0; 175 + ret[ea_valuelen(ea)] = 0; 176 176 return ret; 177 177 } 178 - pos += ea->namelen + ea->valuelen + 5; 178 + pos += ea->namelen + ea_valuelen(ea) + 5; 179 179 } 180 180 return NULL; 181 181 } ··· 202 202 if (ea->indirect) { 203 203 if (ea_len(ea) == size) 204 204 set_indirect_ea(s, ea->anode, ea_sec(ea), data, size); 205 - } else if (ea->valuelen == size) { 205 + } else if (ea_valuelen(ea) == size) { 206 206 memcpy(ea_data(ea), data, size); 207 207 } 208 208 return; 209 209 } 210 - a = fnode->ea_secno; 211 - len = fnode->ea_size_l; 210 + a = le32_to_cpu(fnode->ea_secno); 211 + len = le32_to_cpu(fnode->ea_size_l); 212 212 ano = fnode->ea_anode; 213 213 pos = 0; 214 214 while (pos < len) { ··· 228 228 set_indirect_ea(s, ea->anode, ea_sec(ea), data, size); 229 229 } 230 230 else { 231 - if (ea->valuelen == size) 231 + if (ea_valuelen(ea) == size) 232 232 hpfs_ea_write(s, a, ano, pos + 4 + ea->namelen + 1, size, data); 233 233 } 234 234 return; 235 235 } 236 - pos += ea->namelen + ea->valuelen + 5; 236 + pos += ea->namelen + ea_valuelen(ea) + 5; 237 237 } 238 - if (!fnode->ea_offs) { 239 - /*if (fnode->ea_size_s) { 238 + if (!le16_to_cpu(fnode->ea_offs)) { 239 + /*if (le16_to_cpu(fnode->ea_size_s)) { 240 240 hpfs_error(s, "fnode %08x: ea_size_s == %03x, ea_offs == 0", 241 - inode->i_ino, fnode->ea_size_s); 241 + inode->i_ino, le16_to_cpu(fnode->ea_size_s)); 242 242 return; 243 243 }*/ 244 - fnode->ea_offs = 0xc4; 244 + fnode->ea_offs = cpu_to_le16(0xc4); 245 245 } 246 - if (fnode->ea_offs < 0xc4 || fnode->ea_offs + fnode->acl_size_s + fnode->ea_size_s > 0x200) { 246 + if (le16_to_cpu(fnode->ea_offs) < 0xc4 || le16_to_cpu(fnode->ea_offs) + le16_to_cpu(fnode->acl_size_s) + le16_to_cpu(fnode->ea_size_s) > 0x200) { 247 247 hpfs_error(s, "fnode %08lx: ea_offs == %03x, ea_size_s == %03x", 248 248 (unsigned long)inode->i_ino, 249 - fnode->ea_offs, fnode->ea_size_s); 249 + le32_to_cpu(fnode->ea_offs), le16_to_cpu(fnode->ea_size_s)); 250 250 return; 251 251 } 252 - if ((fnode->ea_size_s || !fnode->ea_size_l) && 253 - fnode->ea_offs + fnode->acl_size_s + fnode->ea_size_s + strlen(key) + size + 5 <= 0x200) { 252 + if ((le16_to_cpu(fnode->ea_size_s) || !le32_to_cpu(fnode->ea_size_l)) && 253 + le16_to_cpu(fnode->ea_offs) + le16_to_cpu(fnode->acl_size_s) + le16_to_cpu(fnode->ea_size_s) + strlen(key) + size + 5 <= 0x200) { 254 254 ea = fnode_end_ea(fnode); 255 255 *(char *)ea = 0; 256 256 ea->namelen = strlen(key); 257 - ea->valuelen = size; 257 + ea->valuelen_lo = size; 258 + ea->valuelen_hi = size >> 8; 258 259 strcpy(ea->name, key); 259 260 memcpy(ea_data(ea), data, size); 260 - fnode->ea_size_s += strlen(key) + size + 5; 261 + fnode->ea_size_s = cpu_to_le16(le16_to_cpu(fnode->ea_size_s) + strlen(key) + size + 5); 261 262 goto ret; 262 263 } 263 264 /* Most the code here is 99.9993422% unused. I hope there are no bugs. 264 265 But what .. HPFS.IFS has also bugs in ea management. */ 265 - if (fnode->ea_size_s && !fnode->ea_size_l) { 266 + if (le16_to_cpu(fnode->ea_size_s) && !le32_to_cpu(fnode->ea_size_l)) { 266 267 secno n; 267 268 struct buffer_head *bh; 268 269 char *data; 269 - if (!(n = hpfs_alloc_sector(s, fno, 1, 0, 1))) return; 270 + if (!(n = hpfs_alloc_sector(s, fno, 1, 0))) return; 270 271 if (!(data = hpfs_get_sector(s, n, &bh))) { 271 272 hpfs_free_sectors(s, n, 1); 272 273 return; 273 274 } 274 - memcpy(data, fnode_ea(fnode), fnode->ea_size_s); 275 - fnode->ea_size_l = fnode->ea_size_s; 276 - fnode->ea_size_s = 0; 277 - fnode->ea_secno = n; 278 - fnode->ea_anode = 0; 275 + memcpy(data, fnode_ea(fnode), le16_to_cpu(fnode->ea_size_s)); 276 + fnode->ea_size_l = cpu_to_le32(le16_to_cpu(fnode->ea_size_s)); 277 + fnode->ea_size_s = cpu_to_le16(0); 278 + fnode->ea_secno = cpu_to_le32(n); 279 + fnode->ea_anode = cpu_to_le32(0); 279 280 mark_buffer_dirty(bh); 280 281 brelse(bh); 281 282 } 282 - pos = fnode->ea_size_l + 5 + strlen(key) + size; 283 - len = (fnode->ea_size_l + 511) >> 9; 283 + pos = le32_to_cpu(fnode->ea_size_l) + 5 + strlen(key) + size; 284 + len = (le32_to_cpu(fnode->ea_size_l) + 511) >> 9; 284 285 if (pos >= 30000) goto bail; 285 286 while (((pos + 511) >> 9) > len) { 286 287 if (!len) { 287 - if (!(fnode->ea_secno = hpfs_alloc_sector(s, fno, 1, 0, 1))) 288 - goto bail; 288 + secno q = hpfs_alloc_sector(s, fno, 1, 0); 289 + if (!q) goto bail; 290 + fnode->ea_secno = cpu_to_le32(q); 289 291 fnode->ea_anode = 0; 290 292 len++; 291 293 } else if (!fnode->ea_anode) { 292 - if (hpfs_alloc_if_possible(s, fnode->ea_secno + len)) { 294 + if (hpfs_alloc_if_possible(s, le32_to_cpu(fnode->ea_secno) + len)) { 293 295 len++; 294 296 } else { 295 297 /* Aargh... don't know how to create ea anodes :-( */ ··· 300 298 anode_secno a_s; 301 299 if (!(anode = hpfs_alloc_anode(s, fno, &a_s, &bh))) 302 300 goto bail; 303 - anode->up = fno; 301 + anode->up = cpu_to_le32(fno); 304 302 anode->btree.fnode_parent = 1; 305 303 anode->btree.n_free_nodes--; 306 304 anode->btree.n_used_nodes++; 307 - anode->btree.first_free += 12; 308 - anode->u.external[0].disk_secno = fnode->ea_secno; 309 - anode->u.external[0].file_secno = 0; 310 - anode->u.external[0].length = len; 305 + anode->btree.first_free = cpu_to_le16(le16_to_cpu(anode->btree.first_free) + 12); 306 + anode->u.external[0].disk_secno = cpu_to_le32(le32_to_cpu(fnode->ea_secno)); 307 + anode->u.external[0].file_secno = cpu_to_le32(0); 308 + anode->u.external[0].length = cpu_to_le32(len); 311 309 mark_buffer_dirty(bh); 312 310 brelse(bh); 313 311 fnode->ea_anode = 1; 314 - fnode->ea_secno = a_s;*/ 312 + fnode->ea_secno = cpu_to_le32(a_s);*/ 315 313 secno new_sec; 316 314 int i; 317 - if (!(new_sec = hpfs_alloc_sector(s, fno, 1, 1 - ((pos + 511) >> 9), 1))) 315 + if (!(new_sec = hpfs_alloc_sector(s, fno, 1, 1 - ((pos + 511) >> 9)))) 318 316 goto bail; 319 317 for (i = 0; i < len; i++) { 320 318 struct buffer_head *bh1, *bh2; 321 319 void *b1, *b2; 322 - if (!(b1 = hpfs_map_sector(s, fnode->ea_secno + i, &bh1, len - i - 1))) { 320 + if (!(b1 = hpfs_map_sector(s, le32_to_cpu(fnode->ea_secno) + i, &bh1, len - i - 1))) { 323 321 hpfs_free_sectors(s, new_sec, (pos + 511) >> 9); 324 322 goto bail; 325 323 } ··· 333 331 mark_buffer_dirty(bh2); 334 332 brelse(bh2); 335 333 } 336 - hpfs_free_sectors(s, fnode->ea_secno, len); 337 - fnode->ea_secno = new_sec; 334 + hpfs_free_sectors(s, le32_to_cpu(fnode->ea_secno), len); 335 + fnode->ea_secno = cpu_to_le32(new_sec); 338 336 len = (pos + 511) >> 9; 339 337 } 340 338 } 341 339 if (fnode->ea_anode) { 342 - if (hpfs_add_sector_to_btree(s, fnode->ea_secno, 340 + if (hpfs_add_sector_to_btree(s, le32_to_cpu(fnode->ea_secno), 343 341 0, len) != -1) { 344 342 len++; 345 343 } else { ··· 351 349 h[1] = strlen(key); 352 350 h[2] = size & 0xff; 353 351 h[3] = size >> 8; 354 - if (hpfs_ea_write(s, fnode->ea_secno, fnode->ea_anode, fnode->ea_size_l, 4, h)) goto bail; 355 - if (hpfs_ea_write(s, fnode->ea_secno, fnode->ea_anode, fnode->ea_size_l + 4, h[1] + 1, key)) goto bail; 356 - if (hpfs_ea_write(s, fnode->ea_secno, fnode->ea_anode, fnode->ea_size_l + 5 + h[1], size, data)) goto bail; 357 - fnode->ea_size_l = pos; 352 + if (hpfs_ea_write(s, le32_to_cpu(fnode->ea_secno), fnode->ea_anode, le32_to_cpu(fnode->ea_size_l), 4, h)) goto bail; 353 + if (hpfs_ea_write(s, le32_to_cpu(fnode->ea_secno), fnode->ea_anode, le32_to_cpu(fnode->ea_size_l) + 4, h[1] + 1, key)) goto bail; 354 + if (hpfs_ea_write(s, le32_to_cpu(fnode->ea_secno), fnode->ea_anode, le32_to_cpu(fnode->ea_size_l) + 5 + h[1], size, data)) goto bail; 355 + fnode->ea_size_l = cpu_to_le32(pos); 358 356 ret: 359 357 hpfs_i(inode)->i_ea_size += 5 + strlen(key) + size; 360 358 return; 361 359 bail: 362 - if (fnode->ea_secno) 363 - if (fnode->ea_anode) hpfs_truncate_btree(s, fnode->ea_secno, 1, (fnode->ea_size_l + 511) >> 9); 364 - else hpfs_free_sectors(s, fnode->ea_secno + ((fnode->ea_size_l + 511) >> 9), len - ((fnode->ea_size_l + 511) >> 9)); 365 - else fnode->ea_secno = fnode->ea_size_l = 0; 360 + if (le32_to_cpu(fnode->ea_secno)) 361 + if (fnode->ea_anode) hpfs_truncate_btree(s, le32_to_cpu(fnode->ea_secno), 1, (le32_to_cpu(fnode->ea_size_l) + 511) >> 9); 362 + else hpfs_free_sectors(s, le32_to_cpu(fnode->ea_secno) + ((le32_to_cpu(fnode->ea_size_l) + 511) >> 9), len - ((le32_to_cpu(fnode->ea_size_l) + 511) >> 9)); 363 + else fnode->ea_secno = fnode->ea_size_l = cpu_to_le32(0); 366 364 } 367 365
+21 -10
fs/hpfs/file.c
··· 20 20 21 21 int hpfs_file_fsync(struct file *file, int datasync) 22 22 { 23 - /*return file_fsync(file, datasync);*/ 24 - return 0; /* Don't fsync :-) */ 23 + struct inode *inode = file->f_mapping->host; 24 + return sync_blockdev(inode->i_sb->s_bdev); 25 25 } 26 26 27 27 /* ··· 48 48 static void hpfs_truncate(struct inode *i) 49 49 { 50 50 if (IS_IMMUTABLE(i)) return /*-EPERM*/; 51 - hpfs_lock(i->i_sb); 51 + hpfs_lock_assert(i->i_sb); 52 + 52 53 hpfs_i(i)->i_n_secs = 0; 53 54 i->i_blocks = 1 + ((i->i_size + 511) >> 9); 54 55 hpfs_i(i)->mmu_private = i->i_size; 55 56 hpfs_truncate_btree(i->i_sb, i->i_ino, 1, ((i->i_size + 511) >> 9)); 56 57 hpfs_write_inode(i); 57 58 hpfs_i(i)->i_n_secs = 0; 58 - hpfs_unlock(i->i_sb); 59 59 } 60 60 61 61 static int hpfs_get_block(struct inode *inode, sector_t iblock, struct buffer_head *bh_result, int create) 62 62 { 63 + int r; 63 64 secno s; 65 + hpfs_lock(inode->i_sb); 64 66 s = hpfs_bmap(inode, iblock); 65 67 if (s) { 66 68 map_bh(bh_result, inode->i_sb, s); 67 - return 0; 69 + goto ret_0; 68 70 } 69 - if (!create) return 0; 71 + if (!create) goto ret_0; 70 72 if (iblock<<9 != hpfs_i(inode)->mmu_private) { 71 73 BUG(); 72 - return -EIO; 74 + r = -EIO; 75 + goto ret_r; 73 76 } 74 77 if ((s = hpfs_add_sector_to_btree(inode->i_sb, inode->i_ino, 1, inode->i_blocks - 1)) == -1) { 75 78 hpfs_truncate_btree(inode->i_sb, inode->i_ino, 1, inode->i_blocks - 1); 76 - return -ENOSPC; 79 + r = -ENOSPC; 80 + goto ret_r; 77 81 } 78 82 inode->i_blocks++; 79 83 hpfs_i(inode)->mmu_private += 512; 80 84 set_buffer_new(bh_result); 81 85 map_bh(bh_result, inode->i_sb, s); 82 - return 0; 86 + ret_0: 87 + r = 0; 88 + ret_r: 89 + hpfs_unlock(inode->i_sb); 90 + return r; 83 91 } 84 92 85 93 static int hpfs_writepage(struct page *page, struct writeback_control *wbc) ··· 138 130 ssize_t retval; 139 131 140 132 retval = do_sync_write(file, buf, count, ppos); 141 - if (retval > 0) 133 + if (retval > 0) { 134 + hpfs_lock(file->f_path.dentry->d_sb); 142 135 hpfs_i(file->f_path.dentry->d_inode)->i_dirty = 1; 136 + hpfs_unlock(file->f_path.dentry->d_sb); 137 + } 143 138 return retval; 144 139 } 145 140
+255 -180
fs/hpfs/hpfs.h
··· 19 19 For definitive information on HPFS, ask somebody else -- this is guesswork. 20 20 There are certain to be many mistakes. */ 21 21 22 + #if !defined(__LITTLE_ENDIAN) && !defined(__BIG_ENDIAN) 23 + #error unknown endian 24 + #endif 25 + 22 26 /* Notation */ 23 27 24 - typedef unsigned secno; /* sector number, partition relative */ 28 + typedef u32 secno; /* sector number, partition relative */ 25 29 26 30 typedef secno dnode_secno; /* sector number of a dnode */ 27 31 typedef secno fnode_secno; /* sector number of an fnode */ ··· 42 38 43 39 struct hpfs_boot_block 44 40 { 45 - unsigned char jmp[3]; 46 - unsigned char oem_id[8]; 47 - unsigned char bytes_per_sector[2]; /* 512 */ 48 - unsigned char sectors_per_cluster; 49 - unsigned char n_reserved_sectors[2]; 50 - unsigned char n_fats; 51 - unsigned char n_rootdir_entries[2]; 52 - unsigned char n_sectors_s[2]; 53 - unsigned char media_byte; 54 - unsigned short sectors_per_fat; 55 - unsigned short sectors_per_track; 56 - unsigned short heads_per_cyl; 57 - unsigned int n_hidden_sectors; 58 - unsigned int n_sectors_l; /* size of partition */ 59 - unsigned char drive_number; 60 - unsigned char mbz; 61 - unsigned char sig_28h; /* 28h */ 62 - unsigned char vol_serno[4]; 63 - unsigned char vol_label[11]; 64 - unsigned char sig_hpfs[8]; /* "HPFS " */ 65 - unsigned char pad[448]; 66 - unsigned short magic; /* aa55 */ 41 + u8 jmp[3]; 42 + u8 oem_id[8]; 43 + u8 bytes_per_sector[2]; /* 512 */ 44 + u8 sectors_per_cluster; 45 + u8 n_reserved_sectors[2]; 46 + u8 n_fats; 47 + u8 n_rootdir_entries[2]; 48 + u8 n_sectors_s[2]; 49 + u8 media_byte; 50 + u16 sectors_per_fat; 51 + u16 sectors_per_track; 52 + u16 heads_per_cyl; 53 + u32 n_hidden_sectors; 54 + u32 n_sectors_l; /* size of partition */ 55 + u8 drive_number; 56 + u8 mbz; 57 + u8 sig_28h; /* 28h */ 58 + u8 vol_serno[4]; 59 + u8 vol_label[11]; 60 + u8 sig_hpfs[8]; /* "HPFS " */ 61 + u8 pad[448]; 62 + u16 magic; /* aa55 */ 67 63 }; 68 64 69 65 ··· 75 71 76 72 struct hpfs_super_block 77 73 { 78 - unsigned magic; /* f995 e849 */ 79 - unsigned magic1; /* fa53 e9c5, more magic? */ 80 - /*unsigned huh202;*/ /* ?? 202 = N. of B. in 1.00390625 S.*/ 81 - char version; /* version of a filesystem usually 2 */ 82 - char funcversion; /* functional version - oldest version 74 + u32 magic; /* f995 e849 */ 75 + u32 magic1; /* fa53 e9c5, more magic? */ 76 + u8 version; /* version of a filesystem usually 2 */ 77 + u8 funcversion; /* functional version - oldest version 83 78 of filesystem that can understand 84 79 this disk */ 85 - unsigned short int zero; /* 0 */ 80 + u16 zero; /* 0 */ 86 81 fnode_secno root; /* fnode of root directory */ 87 82 secno n_sectors; /* size of filesystem */ 88 - unsigned n_badblocks; /* number of bad blocks */ 83 + u32 n_badblocks; /* number of bad blocks */ 89 84 secno bitmaps; /* pointers to free space bit maps */ 90 - unsigned zero1; /* 0 */ 85 + u32 zero1; /* 0 */ 91 86 secno badblocks; /* bad block list */ 92 - unsigned zero3; /* 0 */ 87 + u32 zero3; /* 0 */ 93 88 time32_t last_chkdsk; /* date last checked, 0 if never */ 94 - /*unsigned zero4;*/ /* 0 */ 95 - time32_t last_optimize; /* date last optimized, 0 if never */ 89 + time32_t last_optimize; /* date last optimized, 0 if never */ 96 90 secno n_dir_band; /* number of sectors in dir band */ 97 91 secno dir_band_start; /* first sector in dir band */ 98 92 secno dir_band_end; /* last sector in dir band */ 99 93 secno dir_band_bitmap; /* free space map, 1 dnode per bit */ 100 - char volume_name[32]; /* not used */ 94 + u8 volume_name[32]; /* not used */ 101 95 secno user_id_table; /* 8 preallocated sectors - user id */ 102 - unsigned zero6[103]; /* 0 */ 96 + u32 zero6[103]; /* 0 */ 103 97 }; 104 98 105 99 ··· 109 107 110 108 struct hpfs_spare_block 111 109 { 112 - unsigned magic; /* f991 1849 */ 113 - unsigned magic1; /* fa52 29c5, more magic? */ 110 + u32 magic; /* f991 1849 */ 111 + u32 magic1; /* fa52 29c5, more magic? */ 114 112 115 - unsigned dirty: 1; /* 0 clean, 1 "improperly stopped" */ 116 - /*unsigned flag1234: 4;*/ /* unknown flags */ 117 - unsigned sparedir_used: 1; /* spare dirblks used */ 118 - unsigned hotfixes_used: 1; /* hotfixes used */ 119 - unsigned bad_sector: 1; /* bad sector, corrupted disk (???) */ 120 - unsigned bad_bitmap: 1; /* bad bitmap */ 121 - unsigned fast: 1; /* partition was fast formatted */ 122 - unsigned old_wrote: 1; /* old version wrote to partion */ 123 - unsigned old_wrote_1: 1; /* old version wrote to partion (?) */ 124 - unsigned install_dasd_limits: 1; /* HPFS386 flags */ 125 - unsigned resynch_dasd_limits: 1; 126 - unsigned dasd_limits_operational: 1; 127 - unsigned multimedia_active: 1; 128 - unsigned dce_acls_active: 1; 129 - unsigned dasd_limits_dirty: 1; 130 - unsigned flag67: 2; 131 - unsigned char mm_contlgulty; 132 - unsigned char unused; 113 + #ifdef __LITTLE_ENDIAN 114 + u8 dirty: 1; /* 0 clean, 1 "improperly stopped" */ 115 + u8 sparedir_used: 1; /* spare dirblks used */ 116 + u8 hotfixes_used: 1; /* hotfixes used */ 117 + u8 bad_sector: 1; /* bad sector, corrupted disk (???) */ 118 + u8 bad_bitmap: 1; /* bad bitmap */ 119 + u8 fast: 1; /* partition was fast formatted */ 120 + u8 old_wrote: 1; /* old version wrote to partion */ 121 + u8 old_wrote_1: 1; /* old version wrote to partion (?) */ 122 + #else 123 + u8 old_wrote_1: 1; /* old version wrote to partion (?) */ 124 + u8 old_wrote: 1; /* old version wrote to partion */ 125 + u8 fast: 1; /* partition was fast formatted */ 126 + u8 bad_bitmap: 1; /* bad bitmap */ 127 + u8 bad_sector: 1; /* bad sector, corrupted disk (???) */ 128 + u8 hotfixes_used: 1; /* hotfixes used */ 129 + u8 sparedir_used: 1; /* spare dirblks used */ 130 + u8 dirty: 1; /* 0 clean, 1 "improperly stopped" */ 131 + #endif 132 + 133 + #ifdef __LITTLE_ENDIAN 134 + u8 install_dasd_limits: 1; /* HPFS386 flags */ 135 + u8 resynch_dasd_limits: 1; 136 + u8 dasd_limits_operational: 1; 137 + u8 multimedia_active: 1; 138 + u8 dce_acls_active: 1; 139 + u8 dasd_limits_dirty: 1; 140 + u8 flag67: 2; 141 + #else 142 + u8 flag67: 2; 143 + u8 dasd_limits_dirty: 1; 144 + u8 dce_acls_active: 1; 145 + u8 multimedia_active: 1; 146 + u8 dasd_limits_operational: 1; 147 + u8 resynch_dasd_limits: 1; 148 + u8 install_dasd_limits: 1; /* HPFS386 flags */ 149 + #endif 150 + 151 + u8 mm_contlgulty; 152 + u8 unused; 133 153 134 154 secno hotfix_map; /* info about remapped bad sectors */ 135 - unsigned n_spares_used; /* number of hotfixes */ 136 - unsigned n_spares; /* number of spares in hotfix map */ 137 - unsigned n_dnode_spares_free; /* spare dnodes unused */ 138 - unsigned n_dnode_spares; /* length of spare_dnodes[] list, 155 + u32 n_spares_used; /* number of hotfixes */ 156 + u32 n_spares; /* number of spares in hotfix map */ 157 + u32 n_dnode_spares_free; /* spare dnodes unused */ 158 + u32 n_dnode_spares; /* length of spare_dnodes[] list, 139 159 follows in this block*/ 140 160 secno code_page_dir; /* code page directory block */ 141 - unsigned n_code_pages; /* number of code pages */ 142 - /*unsigned large_numbers[2];*/ /* ?? */ 143 - unsigned super_crc; /* on HPFS386 and LAN Server this is 161 + u32 n_code_pages; /* number of code pages */ 162 + u32 super_crc; /* on HPFS386 and LAN Server this is 144 163 checksum of superblock, on normal 145 164 OS/2 unused */ 146 - unsigned spare_crc; /* on HPFS386 checksum of spareblock */ 147 - unsigned zero1[15]; /* unused */ 165 + u32 spare_crc; /* on HPFS386 checksum of spareblock */ 166 + u32 zero1[15]; /* unused */ 148 167 dnode_secno spare_dnodes[100]; /* emergency free dnode list */ 149 - unsigned zero2[1]; /* room for more? */ 168 + u32 zero2[1]; /* room for more? */ 150 169 }; 151 170 152 171 /* The bad block list is 4 sectors long. The first word must be zero, ··· 202 179 203 180 struct code_page_directory 204 181 { 205 - unsigned magic; /* 4945 21f7 */ 206 - unsigned n_code_pages; /* number of pointers following */ 207 - unsigned zero1[2]; 182 + u32 magic; /* 4945 21f7 */ 183 + u32 n_code_pages; /* number of pointers following */ 184 + u32 zero1[2]; 208 185 struct { 209 - unsigned short ix; /* index */ 210 - unsigned short code_page_number; /* code page number */ 211 - unsigned bounds; /* matches corresponding word 186 + u16 ix; /* index */ 187 + u16 code_page_number; /* code page number */ 188 + u32 bounds; /* matches corresponding word 212 189 in data block */ 213 190 secno code_page_data; /* sector number of a code_page_data 214 191 containing c.p. array */ 215 - unsigned short index; /* index in c.p. array in that sector*/ 216 - unsigned short unknown; /* some unknown value; usually 0; 192 + u16 index; /* index in c.p. array in that sector*/ 193 + u16 unknown; /* some unknown value; usually 0; 217 194 2 in Japanese version */ 218 195 } array[31]; /* unknown length */ 219 196 }; ··· 224 201 225 202 struct code_page_data 226 203 { 227 - unsigned magic; /* 8945 21f7 */ 228 - unsigned n_used; /* # elements used in c_p_data[] */ 229 - unsigned bounds[3]; /* looks a bit like 204 + u32 magic; /* 8945 21f7 */ 205 + u32 n_used; /* # elements used in c_p_data[] */ 206 + u32 bounds[3]; /* looks a bit like 230 207 (beg1,end1), (beg2,end2) 231 208 one byte each */ 232 - unsigned short offs[3]; /* offsets from start of sector 209 + u16 offs[3]; /* offsets from start of sector 233 210 to start of c_p_data[ix] */ 234 211 struct { 235 - unsigned short ix; /* index */ 236 - unsigned short code_page_number; /* code page number */ 237 - unsigned short unknown; /* the same as in cp directory */ 238 - unsigned char map[128]; /* upcase table for chars 80..ff */ 239 - unsigned short zero2; 212 + u16 ix; /* index */ 213 + u16 code_page_number; /* code page number */ 214 + u16 unknown; /* the same as in cp directory */ 215 + u8 map[128]; /* upcase table for chars 80..ff */ 216 + u16 zero2; 240 217 } code_page[3]; 241 - unsigned char incognita[78]; 218 + u8 incognita[78]; 242 219 }; 243 220 244 221 ··· 278 255 #define DNODE_MAGIC 0x77e40aae 279 256 280 257 struct dnode { 281 - unsigned magic; /* 77e4 0aae */ 282 - unsigned first_free; /* offset from start of dnode to 258 + u32 magic; /* 77e4 0aae */ 259 + u32 first_free; /* offset from start of dnode to 283 260 first free dir entry */ 284 - unsigned root_dnode:1; /* Is it root dnode? */ 285 - unsigned increment_me:31; /* some kind of activity counter? 286 - Neither HPFS.IFS nor CHKDSK cares 261 + #ifdef __LITTLE_ENDIAN 262 + u8 root_dnode: 1; /* Is it root dnode? */ 263 + u8 increment_me: 7; /* some kind of activity counter? */ 264 + /* Neither HPFS.IFS nor CHKDSK cares 287 265 if you change this word */ 266 + #else 267 + u8 increment_me: 7; /* some kind of activity counter? */ 268 + /* Neither HPFS.IFS nor CHKDSK cares 269 + if you change this word */ 270 + u8 root_dnode: 1; /* Is it root dnode? */ 271 + #endif 272 + u8 increment_me2[3]; 288 273 secno up; /* (root dnode) directory's fnode 289 274 (nonroot) parent dnode */ 290 275 dnode_secno self; /* pointer to this dnode */ 291 - unsigned char dirent[2028]; /* one or more dirents */ 276 + u8 dirent[2028]; /* one or more dirents */ 292 277 }; 293 278 294 279 struct hpfs_dirent { 295 - unsigned short length; /* offset to next dirent */ 296 - unsigned first: 1; /* set on phony ^A^A (".") entry */ 297 - unsigned has_acl: 1; 298 - unsigned down: 1; /* down pointer present (after name) */ 299 - unsigned last: 1; /* set on phony \377 entry */ 300 - unsigned has_ea: 1; /* entry has EA */ 301 - unsigned has_xtd_perm: 1; /* has extended perm list (???) */ 302 - unsigned has_explicit_acl: 1; 303 - unsigned has_needea: 1; /* ?? some EA has NEEDEA set 280 + u16 length; /* offset to next dirent */ 281 + 282 + #ifdef __LITTLE_ENDIAN 283 + u8 first: 1; /* set on phony ^A^A (".") entry */ 284 + u8 has_acl: 1; 285 + u8 down: 1; /* down pointer present (after name) */ 286 + u8 last: 1; /* set on phony \377 entry */ 287 + u8 has_ea: 1; /* entry has EA */ 288 + u8 has_xtd_perm: 1; /* has extended perm list (???) */ 289 + u8 has_explicit_acl: 1; 290 + u8 has_needea: 1; /* ?? some EA has NEEDEA set 304 291 I have no idea why this is 305 292 interesting in a dir entry */ 306 - unsigned read_only: 1; /* dos attrib */ 307 - unsigned hidden: 1; /* dos attrib */ 308 - unsigned system: 1; /* dos attrib */ 309 - unsigned flag11: 1; /* would be volume label dos attrib */ 310 - unsigned directory: 1; /* dos attrib */ 311 - unsigned archive: 1; /* dos attrib */ 312 - unsigned not_8x3: 1; /* name is not 8.3 */ 313 - unsigned flag15: 1; 293 + #else 294 + u8 has_needea: 1; /* ?? some EA has NEEDEA set 295 + I have no idea why this is 296 + interesting in a dir entry */ 297 + u8 has_explicit_acl: 1; 298 + u8 has_xtd_perm: 1; /* has extended perm list (???) */ 299 + u8 has_ea: 1; /* entry has EA */ 300 + u8 last: 1; /* set on phony \377 entry */ 301 + u8 down: 1; /* down pointer present (after name) */ 302 + u8 has_acl: 1; 303 + u8 first: 1; /* set on phony ^A^A (".") entry */ 304 + #endif 305 + 306 + #ifdef __LITTLE_ENDIAN 307 + u8 read_only: 1; /* dos attrib */ 308 + u8 hidden: 1; /* dos attrib */ 309 + u8 system: 1; /* dos attrib */ 310 + u8 flag11: 1; /* would be volume label dos attrib */ 311 + u8 directory: 1; /* dos attrib */ 312 + u8 archive: 1; /* dos attrib */ 313 + u8 not_8x3: 1; /* name is not 8.3 */ 314 + u8 flag15: 1; 315 + #else 316 + u8 flag15: 1; 317 + u8 not_8x3: 1; /* name is not 8.3 */ 318 + u8 archive: 1; /* dos attrib */ 319 + u8 directory: 1; /* dos attrib */ 320 + u8 flag11: 1; /* would be volume label dos attrib */ 321 + u8 system: 1; /* dos attrib */ 322 + u8 hidden: 1; /* dos attrib */ 323 + u8 read_only: 1; /* dos attrib */ 324 + #endif 325 + 314 326 fnode_secno fnode; /* fnode giving allocation info */ 315 327 time32_t write_date; /* mtime */ 316 - unsigned file_size; /* file length, bytes */ 328 + u32 file_size; /* file length, bytes */ 317 329 time32_t read_date; /* atime */ 318 330 time32_t creation_date; /* ctime */ 319 - unsigned ea_size; /* total EA length, bytes */ 320 - unsigned char no_of_acls : 3; /* number of ACL's */ 321 - unsigned char reserver : 5; 322 - unsigned char ix; /* code page index (of filename), see 331 + u32 ea_size; /* total EA length, bytes */ 332 + u8 no_of_acls; /* number of ACL's (low 3 bits) */ 333 + u8 ix; /* code page index (of filename), see 323 334 struct code_page_data */ 324 - unsigned char namelen, name[1]; /* file name */ 335 + u8 namelen, name[1]; /* file name */ 325 336 /* dnode_secno down; btree down pointer, if present, 326 337 follows name on next word boundary, or maybe it 327 338 precedes next dirent, which is on a word boundary. */ ··· 375 318 376 319 struct bplus_leaf_node 377 320 { 378 - unsigned file_secno; /* first file sector in extent */ 379 - unsigned length; /* length, sectors */ 321 + u32 file_secno; /* first file sector in extent */ 322 + u32 length; /* length, sectors */ 380 323 secno disk_secno; /* first corresponding disk sector */ 381 324 }; 382 325 383 326 struct bplus_internal_node 384 327 { 385 - unsigned file_secno; /* subtree maps sectors < this */ 328 + u32 file_secno; /* subtree maps sectors < this */ 386 329 anode_secno down; /* pointer to subtree */ 387 330 }; 388 331 389 332 struct bplus_header 390 333 { 391 - unsigned hbff: 1; /* high bit of first free entry offset */ 392 - unsigned flag1: 1; 393 - unsigned flag2: 1; 394 - unsigned flag3: 1; 395 - unsigned flag4: 1; 396 - unsigned fnode_parent: 1; /* ? we're pointed to by an fnode, 334 + #ifdef __LITTLE_ENDIAN 335 + u8 hbff: 1; /* high bit of first free entry offset */ 336 + u8 flag1234: 4; 337 + u8 fnode_parent: 1; /* ? we're pointed to by an fnode, 397 338 the data btree or some ea or the 398 339 main ea bootage pointer ea_secno */ 399 340 /* also can get set in fnodes, which 400 341 may be a chkdsk glitch or may mean 401 342 this bit is irrelevant in fnodes, 402 343 or this interpretation is all wet */ 403 - unsigned binary_search: 1; /* suggest binary search (unused) */ 404 - unsigned internal: 1; /* 1 -> (internal) tree of anodes 344 + u8 binary_search: 1; /* suggest binary search (unused) */ 345 + u8 internal: 1; /* 1 -> (internal) tree of anodes 405 346 0 -> (leaf) list of extents */ 406 - unsigned char fill[3]; 407 - unsigned char n_free_nodes; /* free nodes in following array */ 408 - unsigned char n_used_nodes; /* used nodes in following array */ 409 - unsigned short first_free; /* offset from start of header to 347 + #else 348 + u8 internal: 1; /* 1 -> (internal) tree of anodes 349 + 0 -> (leaf) list of extents */ 350 + u8 binary_search: 1; /* suggest binary search (unused) */ 351 + u8 fnode_parent: 1; /* ? we're pointed to by an fnode, 352 + the data btree or some ea or the 353 + main ea bootage pointer ea_secno */ 354 + /* also can get set in fnodes, which 355 + may be a chkdsk glitch or may mean 356 + this bit is irrelevant in fnodes, 357 + or this interpretation is all wet */ 358 + u8 flag1234: 4; 359 + u8 hbff: 1; /* high bit of first free entry offset */ 360 + #endif 361 + u8 fill[3]; 362 + u8 n_free_nodes; /* free nodes in following array */ 363 + u8 n_used_nodes; /* used nodes in following array */ 364 + u16 first_free; /* offset from start of header to 410 365 first free node in array */ 411 366 union { 412 367 struct bplus_internal_node internal[0]; /* (internal) 2-word entries giving ··· 438 369 439 370 struct fnode 440 371 { 441 - unsigned magic; /* f7e4 0aae */ 442 - unsigned zero1[2]; /* read history */ 443 - unsigned char len, name[15]; /* true length, truncated name */ 372 + u32 magic; /* f7e4 0aae */ 373 + u32 zero1[2]; /* read history */ 374 + u8 len, name[15]; /* true length, truncated name */ 444 375 fnode_secno up; /* pointer to file's directory fnode */ 445 - /*unsigned zero2[3];*/ 446 376 secno acl_size_l; 447 377 secno acl_secno; 448 - unsigned short acl_size_s; 449 - char acl_anode; 450 - char zero2; /* history bit count */ 451 - unsigned ea_size_l; /* length of disk-resident ea's */ 378 + u16 acl_size_s; 379 + u8 acl_anode; 380 + u8 zero2; /* history bit count */ 381 + u32 ea_size_l; /* length of disk-resident ea's */ 452 382 secno ea_secno; /* first sector of disk-resident ea's*/ 453 - unsigned short ea_size_s; /* length of fnode-resident ea's */ 383 + u16 ea_size_s; /* length of fnode-resident ea's */ 454 384 455 - unsigned flag0: 1; 456 - unsigned ea_anode: 1; /* 1 -> ea_secno is an anode */ 457 - unsigned flag2: 1; 458 - unsigned flag3: 1; 459 - unsigned flag4: 1; 460 - unsigned flag5: 1; 461 - unsigned flag6: 1; 462 - unsigned flag7: 1; 463 - unsigned dirflag: 1; /* 1 -> directory. first & only extent 385 + #ifdef __LITTLE_ENDIAN 386 + u8 flag0: 1; 387 + u8 ea_anode: 1; /* 1 -> ea_secno is an anode */ 388 + u8 flag234567: 6; 389 + #else 390 + u8 flag234567: 6; 391 + u8 ea_anode: 1; /* 1 -> ea_secno is an anode */ 392 + u8 flag0: 1; 393 + #endif 394 + 395 + #ifdef __LITTLE_ENDIAN 396 + u8 dirflag: 1; /* 1 -> directory. first & only extent 464 397 points to dnode. */ 465 - unsigned flag9: 1; 466 - unsigned flag10: 1; 467 - unsigned flag11: 1; 468 - unsigned flag12: 1; 469 - unsigned flag13: 1; 470 - unsigned flag14: 1; 471 - unsigned flag15: 1; 398 + u8 flag9012345: 7; 399 + #else 400 + u8 flag9012345: 7; 401 + u8 dirflag: 1; /* 1 -> directory. first & only extent 402 + points to dnode. */ 403 + #endif 472 404 473 405 struct bplus_header btree; /* b+ tree, 8 extents or 12 subtrees */ 474 406 union { ··· 477 407 struct bplus_internal_node internal[12]; 478 408 } u; 479 409 480 - unsigned file_size; /* file length, bytes */ 481 - unsigned n_needea; /* number of EA's with NEEDEA set */ 482 - char user_id[16]; /* unused */ 483 - unsigned short ea_offs; /* offset from start of fnode 410 + u32 file_size; /* file length, bytes */ 411 + u32 n_needea; /* number of EA's with NEEDEA set */ 412 + u8 user_id[16]; /* unused */ 413 + u16 ea_offs; /* offset from start of fnode 484 414 to first fnode-resident ea */ 485 - char dasd_limit_treshhold; 486 - char dasd_limit_delta; 487 - unsigned dasd_limit; 488 - unsigned dasd_usage; 489 - /*unsigned zero5[2];*/ 490 - unsigned char ea[316]; /* zero or more EA's, packed together 415 + u8 dasd_limit_treshhold; 416 + u8 dasd_limit_delta; 417 + u32 dasd_limit; 418 + u32 dasd_usage; 419 + u8 ea[316]; /* zero or more EA's, packed together 491 420 with no alignment padding. 492 421 (Do not use this name, get here 493 422 via fnode + ea_offs. I think.) */ ··· 499 430 500 431 struct anode 501 432 { 502 - unsigned magic; /* 37e4 0aae */ 433 + u32 magic; /* 37e4 0aae */ 503 434 anode_secno self; /* pointer to this anode */ 504 435 secno up; /* parent anode or fnode */ 505 436 ··· 509 440 struct bplus_internal_node internal[60]; 510 441 } u; 511 442 512 - unsigned fill[3]; /* unused */ 443 + u32 fill[3]; /* unused */ 513 444 }; 514 445 515 446 ··· 530 461 531 462 struct extended_attribute 532 463 { 533 - unsigned indirect: 1; /* 1 -> value gives sector number 464 + #ifdef __LITTLE_ENDIAN 465 + u8 indirect: 1; /* 1 -> value gives sector number 534 466 where real value starts */ 535 - unsigned anode: 1; /* 1 -> sector is an anode 467 + u8 anode: 1; /* 1 -> sector is an anode 536 468 that points to fragmented value */ 537 - unsigned flag2: 1; 538 - unsigned flag3: 1; 539 - unsigned flag4: 1; 540 - unsigned flag5: 1; 541 - unsigned flag6: 1; 542 - unsigned needea: 1; /* required ea */ 543 - unsigned char namelen; /* length of name, bytes */ 544 - unsigned short valuelen; /* length of value, bytes */ 545 - unsigned char name[0]; 469 + u8 flag23456: 5; 470 + u8 needea: 1; /* required ea */ 471 + #else 472 + u8 needea: 1; /* required ea */ 473 + u8 flag23456: 5; 474 + u8 anode: 1; /* 1 -> sector is an anode 475 + that points to fragmented value */ 476 + u8 indirect: 1; /* 1 -> value gives sector number 477 + where real value starts */ 478 + #endif 479 + u8 namelen; /* length of name, bytes */ 480 + u8 valuelen_lo; /* length of value, bytes */ 481 + u8 valuelen_hi; /* length of value, bytes */ 482 + u8 name[0]; 546 483 /* 547 - unsigned char name[namelen]; ascii attrib name 548 - unsigned char nul; terminating '\0', not counted 549 - unsigned char value[valuelen]; value, arbitrary 484 + u8 name[namelen]; ascii attrib name 485 + u8 nul; terminating '\0', not counted 486 + u8 value[valuelen]; value, arbitrary 550 487 if this.indirect, valuelen is 8 and the value is 551 - unsigned length; real length of value, bytes 488 + u32 length; real length of value, bytes 552 489 secno secno; sector address where it starts 553 490 if this.anode, the above sector number is the root of an anode tree 554 491 which points to the value.
+37 -43
fs/hpfs/hpfs_fn.h
··· 13 13 #include <linux/pagemap.h> 14 14 #include <linux/buffer_head.h> 15 15 #include <linux/slab.h> 16 + #include <asm/unaligned.h> 16 17 17 18 #include "hpfs.h" 18 19 ··· 52 51 unsigned i_disk_sec; /* (files) minimalist cache of alloc info */ 53 52 unsigned i_n_secs; /* (files) minimalist cache of alloc info */ 54 53 unsigned i_ea_size; /* size of extended attributes */ 55 - unsigned i_conv : 2; /* (files) crlf->newline hackery */ 56 54 unsigned i_ea_mode : 1; /* file's permission is stored in ea */ 57 55 unsigned i_ea_uid : 1; /* file's uid is stored in ea */ 58 56 unsigned i_ea_gid : 1; /* file's gid is stored in ea */ 59 57 unsigned i_dirty : 1; 60 - struct mutex i_mutex; 61 - struct mutex i_parent_mutex; 62 58 loff_t **i_rddir_off; 63 59 struct inode vfs_inode; 64 60 }; 65 61 66 62 struct hpfs_sb_info { 63 + struct mutex hpfs_mutex; /* global hpfs lock */ 67 64 ino_t sb_root; /* inode number of root dir */ 68 65 unsigned sb_fs_size; /* file system size, sectors */ 69 66 unsigned sb_bitmaps; /* sector number of bitmap list */ ··· 73 74 uid_t sb_uid; /* uid from mount options */ 74 75 gid_t sb_gid; /* gid from mount options */ 75 76 umode_t sb_mode; /* mode from mount options */ 76 - unsigned sb_conv : 2; /* crlf->newline hackery */ 77 77 unsigned sb_eas : 2; /* eas: 0-ignore, 1-ro, 2-rw */ 78 78 unsigned sb_err : 2; /* on errs: 0-cont, 1-ro, 2-panic */ 79 79 unsigned sb_chk : 2; /* checks: 0-no, 1-normal, 2-strict */ ··· 85 87 unsigned *sb_bmp_dir; /* main bitmap directory */ 86 88 unsigned sb_c_bitmap; /* current bitmap */ 87 89 unsigned sb_max_fwd_alloc; /* max forwad allocation */ 88 - struct mutex hpfs_creation_de; /* when creating dirents, nobody else 89 - can alloc blocks */ 90 - /*unsigned sb_mounting : 1;*/ 91 90 int sb_timeshift; 92 91 }; 93 - 94 - /* 95 - * conv= options 96 - */ 97 - 98 - #define CONV_BINARY 0 /* no conversion */ 99 - #define CONV_TEXT 1 /* crlf->newline */ 100 - #define CONV_AUTO 2 /* decide based on file contents */ 101 92 102 93 /* Four 512-byte buffers and the 2k block obtained by concatenating them */ 103 94 ··· 100 113 static inline dnode_secno de_down_pointer (struct hpfs_dirent *de) 101 114 { 102 115 CHKCOND(de->down,("HPFS: de_down_pointer: !de->down\n")); 103 - return *(dnode_secno *) ((void *) de + de->length - 4); 116 + return le32_to_cpu(*(dnode_secno *) ((void *) de + le16_to_cpu(de->length) - 4)); 104 117 } 105 118 106 119 /* The first dir entry in a dnode */ ··· 114 127 115 128 static inline struct hpfs_dirent *dnode_end_de (struct dnode *dnode) 116 129 { 117 - CHKCOND(dnode->first_free>=0x14 && dnode->first_free<=0xa00,("HPFS: dnode_end_de: dnode->first_free = %d\n",(int)dnode->first_free)); 118 - return (void *) dnode + dnode->first_free; 130 + CHKCOND(le32_to_cpu(dnode->first_free)>=0x14 && le32_to_cpu(dnode->first_free)<=0xa00,("HPFS: dnode_end_de: dnode->first_free = %x\n",(unsigned)le32_to_cpu(dnode->first_free))); 131 + return (void *) dnode + le32_to_cpu(dnode->first_free); 119 132 } 120 133 121 134 /* The dir entry after dir entry de */ 122 135 123 136 static inline struct hpfs_dirent *de_next_de (struct hpfs_dirent *de) 124 137 { 125 - CHKCOND(de->length>=0x20 && de->length<0x800,("HPFS: de_next_de: de->length = %d\n",(int)de->length)); 126 - return (void *) de + de->length; 138 + CHKCOND(le16_to_cpu(de->length)>=0x20 && le16_to_cpu(de->length)<0x800,("HPFS: de_next_de: de->length = %x\n",(unsigned)le16_to_cpu(de->length))); 139 + return (void *) de + le16_to_cpu(de->length); 127 140 } 128 141 129 142 static inline struct extended_attribute *fnode_ea(struct fnode *fnode) 130 143 { 131 - return (struct extended_attribute *)((char *)fnode + fnode->ea_offs + fnode->acl_size_s); 144 + return (struct extended_attribute *)((char *)fnode + le16_to_cpu(fnode->ea_offs) + le16_to_cpu(fnode->acl_size_s)); 132 145 } 133 146 134 147 static inline struct extended_attribute *fnode_end_ea(struct fnode *fnode) 135 148 { 136 - return (struct extended_attribute *)((char *)fnode + fnode->ea_offs + fnode->acl_size_s + fnode->ea_size_s); 149 + return (struct extended_attribute *)((char *)fnode + le16_to_cpu(fnode->ea_offs) + le16_to_cpu(fnode->acl_size_s) + le16_to_cpu(fnode->ea_size_s)); 150 + } 151 + 152 + static unsigned ea_valuelen(struct extended_attribute *ea) 153 + { 154 + return ea->valuelen_lo + 256 * ea->valuelen_hi; 137 155 } 138 156 139 157 static inline struct extended_attribute *next_ea(struct extended_attribute *ea) 140 158 { 141 - return (struct extended_attribute *)((char *)ea + 5 + ea->namelen + ea->valuelen); 159 + return (struct extended_attribute *)((char *)ea + 5 + ea->namelen + ea_valuelen(ea)); 142 160 } 143 161 144 162 static inline secno ea_sec(struct extended_attribute *ea) 145 163 { 146 - return *(secno *)((char *)ea + 9 + ea->namelen); 164 + return le32_to_cpu(get_unaligned((secno *)((char *)ea + 9 + ea->namelen))); 147 165 } 148 166 149 167 static inline secno ea_len(struct extended_attribute *ea) 150 168 { 151 - return *(secno *)((char *)ea + 5 + ea->namelen); 169 + return le32_to_cpu(get_unaligned((secno *)((char *)ea + 5 + ea->namelen))); 152 170 } 153 171 154 172 static inline char *ea_data(struct extended_attribute *ea) ··· 178 186 dst->not_8x3 = n; 179 187 } 180 188 181 - static inline unsigned tstbits(unsigned *bmp, unsigned b, unsigned n) 189 + static inline unsigned tstbits(u32 *bmp, unsigned b, unsigned n) 182 190 { 183 191 int i; 184 192 if ((b >= 0x4000) || (b + n - 1 >= 0x4000)) return n; 185 - if (!((bmp[(b & 0x3fff) >> 5] >> (b & 0x1f)) & 1)) return 1; 193 + if (!((le32_to_cpu(bmp[(b & 0x3fff) >> 5]) >> (b & 0x1f)) & 1)) return 1; 186 194 for (i = 1; i < n; i++) 187 - if (/*b+i < 0x4000 &&*/ !((bmp[((b+i) & 0x3fff) >> 5] >> ((b+i) & 0x1f)) & 1)) 195 + if (!((le32_to_cpu(bmp[((b+i) & 0x3fff) >> 5]) >> ((b+i) & 0x1f)) & 1)) 188 196 return i + 1; 189 197 return 0; 190 198 } ··· 192 200 /* alloc.c */ 193 201 194 202 int hpfs_chk_sectors(struct super_block *, secno, int, char *); 195 - secno hpfs_alloc_sector(struct super_block *, secno, unsigned, int, int); 203 + secno hpfs_alloc_sector(struct super_block *, secno, unsigned, int); 196 204 int hpfs_alloc_if_possible(struct super_block *, secno); 197 205 void hpfs_free_sectors(struct super_block *, secno, unsigned); 198 206 int hpfs_check_free_dnodes(struct super_block *, int); 199 207 void hpfs_free_dnode(struct super_block *, secno); 200 - struct dnode *hpfs_alloc_dnode(struct super_block *, secno, dnode_secno *, struct quad_buffer_head *, int); 208 + struct dnode *hpfs_alloc_dnode(struct super_block *, secno, dnode_secno *, struct quad_buffer_head *); 201 209 struct fnode *hpfs_alloc_fnode(struct super_block *, secno, fnode_secno *, struct buffer_head **); 202 210 struct anode *hpfs_alloc_anode(struct super_block *, secno, anode_secno *, struct buffer_head **); 203 211 ··· 214 222 215 223 /* buffer.c */ 216 224 217 - void hpfs_lock_creation(struct super_block *); 218 - void hpfs_unlock_creation(struct super_block *); 219 225 void *hpfs_map_sector(struct super_block *, unsigned, struct buffer_head **, int); 220 226 void *hpfs_get_sector(struct super_block *, unsigned, struct buffer_head **); 221 227 void *hpfs_map_4sectors(struct super_block *, unsigned, struct quad_buffer_head *, int); ··· 237 247 struct hpfs_dirent *hpfs_add_de(struct super_block *, struct dnode *, 238 248 const unsigned char *, unsigned, secno); 239 249 int hpfs_add_dirent(struct inode *, const unsigned char *, unsigned, 240 - struct hpfs_dirent *, int); 250 + struct hpfs_dirent *); 241 251 int hpfs_remove_dirent(struct inode *, dnode_secno, struct hpfs_dirent *, struct quad_buffer_head *, int); 242 252 void hpfs_count_dnodes(struct super_block *, dnode_secno, int *, int *, int *); 243 253 dnode_secno hpfs_de_as_down_as_possible(struct super_block *, dnode_secno dno); ··· 293 303 const unsigned char *, unsigned, int); 294 304 int hpfs_is_name_long(const unsigned char *, unsigned); 295 305 void hpfs_adjust_length(const unsigned char *, unsigned *); 296 - void hpfs_decide_conv(struct inode *, const unsigned char *, unsigned); 297 306 298 307 /* namei.c */ 299 308 ··· 335 346 /* 336 347 * Locking: 337 348 * 338 - * hpfs_lock() is a leftover from the big kernel lock. 339 - * Right now, these functions are empty and only left 340 - * for documentation purposes. The file system no longer 341 - * works on SMP systems, so the lock is not needed 342 - * any more. 349 + * hpfs_lock() locks the whole filesystem. It must be taken 350 + * on any method called by the VFS. 343 351 * 344 - * If someone is interested in making it work again, this 345 - * would be the place to start by adding a per-superblock 346 - * mutex and fixing all the bugs and performance issues 347 - * caused by that. 352 + * We don't do any per-file locking anymore, it is hard to 353 + * review and HPFS is not performance-sensitive anyway. 348 354 */ 349 355 static inline void hpfs_lock(struct super_block *s) 350 356 { 357 + struct hpfs_sb_info *sbi = hpfs_sb(s); 358 + mutex_lock(&sbi->hpfs_mutex); 351 359 } 352 360 353 361 static inline void hpfs_unlock(struct super_block *s) 354 362 { 363 + struct hpfs_sb_info *sbi = hpfs_sb(s); 364 + mutex_unlock(&sbi->hpfs_mutex); 365 + } 366 + 367 + static inline void hpfs_lock_assert(struct super_block *s) 368 + { 369 + struct hpfs_sb_info *sbi = hpfs_sb(s); 370 + WARN_ON(!mutex_is_locked(&sbi->hpfs_mutex)); 355 371 }
+21 -26
fs/hpfs/inode.c
··· 17 17 i->i_uid = hpfs_sb(sb)->sb_uid; 18 18 i->i_gid = hpfs_sb(sb)->sb_gid; 19 19 i->i_mode = hpfs_sb(sb)->sb_mode; 20 - hpfs_inode->i_conv = hpfs_sb(sb)->sb_conv; 21 20 i->i_size = -1; 22 21 i->i_blocks = -1; 23 22 ··· 115 116 i->i_mode |= S_IFDIR; 116 117 i->i_op = &hpfs_dir_iops; 117 118 i->i_fop = &hpfs_dir_ops; 118 - hpfs_inode->i_parent_dir = fnode->up; 119 - hpfs_inode->i_dno = fnode->u.external[0].disk_secno; 119 + hpfs_inode->i_parent_dir = le32_to_cpu(fnode->up); 120 + hpfs_inode->i_dno = le32_to_cpu(fnode->u.external[0].disk_secno); 120 121 if (hpfs_sb(sb)->sb_chk >= 2) { 121 122 struct buffer_head *bh0; 122 123 if (hpfs_map_fnode(sb, hpfs_inode->i_parent_dir, &bh0)) brelse(bh0); ··· 132 133 i->i_op = &hpfs_file_iops; 133 134 i->i_fop = &hpfs_file_ops; 134 135 i->i_nlink = 1; 135 - i->i_size = fnode->file_size; 136 + i->i_size = le32_to_cpu(fnode->file_size); 136 137 i->i_blocks = ((i->i_size + 511) >> 9) + 1; 137 138 i->i_data.a_ops = &hpfs_aops; 138 139 hpfs_i(i)->mmu_private = i->i_size; ··· 143 144 static void hpfs_write_inode_ea(struct inode *i, struct fnode *fnode) 144 145 { 145 146 struct hpfs_inode_info *hpfs_inode = hpfs_i(i); 146 - /*if (fnode->acl_size_l || fnode->acl_size_s) { 147 + /*if (le32_to_cpu(fnode->acl_size_l) || le16_to_cpu(fnode->acl_size_s)) { 147 148 Some unknown structures like ACL may be in fnode, 148 149 we'd better not overwrite them 149 150 hpfs_error(i->i_sb, "fnode %08x has some unknown HPFS386 stuctures", i->i_ino); ··· 186 187 kfree(hpfs_inode->i_rddir_off); 187 188 hpfs_inode->i_rddir_off = NULL; 188 189 } 189 - mutex_lock(&hpfs_inode->i_parent_mutex); 190 190 if (!i->i_nlink) { 191 - mutex_unlock(&hpfs_inode->i_parent_mutex); 192 191 return; 193 192 } 194 193 parent = iget_locked(i->i_sb, hpfs_inode->i_parent_dir); ··· 197 200 hpfs_read_inode(parent); 198 201 unlock_new_inode(parent); 199 202 } 200 - mutex_lock(&hpfs_inode->i_mutex); 201 203 hpfs_write_inode_nolock(i); 202 - mutex_unlock(&hpfs_inode->i_mutex); 203 204 iput(parent); 204 - } else { 205 - mark_inode_dirty(i); 206 205 } 207 - mutex_unlock(&hpfs_inode->i_parent_mutex); 208 206 } 209 207 210 208 void hpfs_write_inode_nolock(struct inode *i) ··· 218 226 } 219 227 } else de = NULL; 220 228 if (S_ISREG(i->i_mode)) { 221 - fnode->file_size = i->i_size; 222 - if (de) de->file_size = i->i_size; 229 + fnode->file_size = cpu_to_le32(i->i_size); 230 + if (de) de->file_size = cpu_to_le32(i->i_size); 223 231 } else if (S_ISDIR(i->i_mode)) { 224 - fnode->file_size = 0; 225 - if (de) de->file_size = 0; 232 + fnode->file_size = cpu_to_le32(0); 233 + if (de) de->file_size = cpu_to_le32(0); 226 234 } 227 235 hpfs_write_inode_ea(i, fnode); 228 236 if (de) { 229 - de->write_date = gmt_to_local(i->i_sb, i->i_mtime.tv_sec); 230 - de->read_date = gmt_to_local(i->i_sb, i->i_atime.tv_sec); 231 - de->creation_date = gmt_to_local(i->i_sb, i->i_ctime.tv_sec); 237 + de->write_date = cpu_to_le32(gmt_to_local(i->i_sb, i->i_mtime.tv_sec)); 238 + de->read_date = cpu_to_le32(gmt_to_local(i->i_sb, i->i_atime.tv_sec)); 239 + de->creation_date = cpu_to_le32(gmt_to_local(i->i_sb, i->i_ctime.tv_sec)); 232 240 de->read_only = !(i->i_mode & 0222); 233 - de->ea_size = hpfs_inode->i_ea_size; 241 + de->ea_size = cpu_to_le32(hpfs_inode->i_ea_size); 234 242 hpfs_mark_4buffers_dirty(&qbh); 235 243 hpfs_brelse4(&qbh); 236 244 } 237 245 if (S_ISDIR(i->i_mode)) { 238 246 if ((de = map_dirent(i, hpfs_inode->i_dno, "\001\001", 2, NULL, &qbh))) { 239 - de->write_date = gmt_to_local(i->i_sb, i->i_mtime.tv_sec); 240 - de->read_date = gmt_to_local(i->i_sb, i->i_atime.tv_sec); 241 - de->creation_date = gmt_to_local(i->i_sb, i->i_ctime.tv_sec); 247 + de->write_date = cpu_to_le32(gmt_to_local(i->i_sb, i->i_mtime.tv_sec)); 248 + de->read_date = cpu_to_le32(gmt_to_local(i->i_sb, i->i_atime.tv_sec)); 249 + de->creation_date = cpu_to_le32(gmt_to_local(i->i_sb, i->i_ctime.tv_sec)); 242 250 de->read_only = !(i->i_mode & 0222); 243 - de->ea_size = /*hpfs_inode->i_ea_size*/0; 244 - de->file_size = 0; 251 + de->ea_size = cpu_to_le32(/*hpfs_inode->i_ea_size*/0); 252 + de->file_size = cpu_to_le32(0); 245 253 hpfs_mark_4buffers_dirty(&qbh); 246 254 hpfs_brelse4(&qbh); 247 255 } else ··· 261 269 hpfs_lock(inode->i_sb); 262 270 if (inode->i_ino == hpfs_sb(inode->i_sb)->sb_root) 263 271 goto out_unlock; 272 + if ((attr->ia_valid & ATTR_UID) && attr->ia_uid >= 0x10000) 273 + goto out_unlock; 274 + if ((attr->ia_valid & ATTR_GID) && attr->ia_gid >= 0x10000) 275 + goto out_unlock; 264 276 if ((attr->ia_valid & ATTR_SIZE) && attr->ia_size > inode->i_size) 265 277 goto out_unlock; 266 278 ··· 280 284 } 281 285 282 286 setattr_copy(inode, attr); 283 - mark_inode_dirty(inode); 284 287 285 288 hpfs_write_inode(inode); 286 289
+30 -26
fs/hpfs/map.c
··· 21 21 hpfs_error(s, "hpfs_map_bitmap called with bad parameter: %08x at %s", bmp_block, id); 22 22 return NULL; 23 23 } 24 - sec = hpfs_sb(s)->sb_bmp_dir[bmp_block]; 24 + sec = le32_to_cpu(hpfs_sb(s)->sb_bmp_dir[bmp_block]); 25 25 if (!sec || sec > hpfs_sb(s)->sb_fs_size-4) { 26 26 hpfs_error(s, "invalid bitmap block pointer %08x -> %08x at %s", bmp_block, sec, id); 27 27 return NULL; ··· 46 46 struct code_page_data *cpd; 47 47 struct code_page_directory *cp = hpfs_map_sector(s, cps, &bh, 0); 48 48 if (!cp) return NULL; 49 - if (cp->magic != CP_DIR_MAGIC) { 50 - printk("HPFS: Code page directory magic doesn't match (magic = %08x)\n", cp->magic); 49 + if (le32_to_cpu(cp->magic) != CP_DIR_MAGIC) { 50 + printk("HPFS: Code page directory magic doesn't match (magic = %08x)\n", le32_to_cpu(cp->magic)); 51 51 brelse(bh); 52 52 return NULL; 53 53 } 54 - if (!cp->n_code_pages) { 54 + if (!le32_to_cpu(cp->n_code_pages)) { 55 55 printk("HPFS: n_code_pages == 0\n"); 56 56 brelse(bh); 57 57 return NULL; 58 58 } 59 - cpds = cp->array[0].code_page_data; 60 - cpi = cp->array[0].index; 59 + cpds = le32_to_cpu(cp->array[0].code_page_data); 60 + cpi = le16_to_cpu(cp->array[0].index); 61 61 brelse(bh); 62 62 63 63 if (cpi >= 3) { ··· 66 66 } 67 67 68 68 if (!(cpd = hpfs_map_sector(s, cpds, &bh, 0))) return NULL; 69 - if ((unsigned)cpd->offs[cpi] > 0x178) { 69 + if (le16_to_cpu(cpd->offs[cpi]) > 0x178) { 70 70 printk("HPFS: Code page index out of sector\n"); 71 71 brelse(bh); 72 72 return NULL; 73 73 } 74 - ptr = (unsigned char *)cpd + cpd->offs[cpi] + 6; 74 + ptr = (unsigned char *)cpd + le16_to_cpu(cpd->offs[cpi]) + 6; 75 75 if (!(cp_table = kmalloc(256, GFP_KERNEL))) { 76 76 printk("HPFS: out of memory for code page table\n"); 77 77 brelse(bh); ··· 125 125 if (hpfs_sb(s)->sb_chk) { 126 126 struct extended_attribute *ea; 127 127 struct extended_attribute *ea_end; 128 - if (fnode->magic != FNODE_MAGIC) { 128 + if (le32_to_cpu(fnode->magic) != FNODE_MAGIC) { 129 129 hpfs_error(s, "bad magic on fnode %08lx", 130 130 (unsigned long)ino); 131 131 goto bail; ··· 138 138 (unsigned long)ino); 139 139 goto bail; 140 140 } 141 - if (fnode->btree.first_free != 141 + if (le16_to_cpu(fnode->btree.first_free) != 142 142 8 + fnode->btree.n_used_nodes * (fnode->btree.internal ? 8 : 12)) { 143 143 hpfs_error(s, 144 144 "bad first_free pointer in fnode %08lx", ··· 146 146 goto bail; 147 147 } 148 148 } 149 - if (fnode->ea_size_s && ((signed int)fnode->ea_offs < 0xc4 || 150 - (signed int)fnode->ea_offs + fnode->acl_size_s + fnode->ea_size_s > 0x200)) { 149 + if (le16_to_cpu(fnode->ea_size_s) && (le16_to_cpu(fnode->ea_offs) < 0xc4 || 150 + le16_to_cpu(fnode->ea_offs) + le16_to_cpu(fnode->acl_size_s) + le16_to_cpu(fnode->ea_size_s) > 0x200)) { 151 151 hpfs_error(s, 152 152 "bad EA info in fnode %08lx: ea_offs == %04x ea_size_s == %04x", 153 153 (unsigned long)ino, 154 - fnode->ea_offs, fnode->ea_size_s); 154 + le16_to_cpu(fnode->ea_offs), le16_to_cpu(fnode->ea_size_s)); 155 155 goto bail; 156 156 } 157 157 ea = fnode_ea(fnode); ··· 178 178 if (hpfs_sb(s)->sb_chk) if (hpfs_chk_sectors(s, ano, 1, "anode")) return NULL; 179 179 if ((anode = hpfs_map_sector(s, ano, bhp, ANODE_RD_AHEAD))) 180 180 if (hpfs_sb(s)->sb_chk) { 181 - if (anode->magic != ANODE_MAGIC || anode->self != ano) { 181 + if (le32_to_cpu(anode->magic) != ANODE_MAGIC) { 182 182 hpfs_error(s, "bad magic on anode %08x", ano); 183 + goto bail; 184 + } 185 + if (le32_to_cpu(anode->self) != ano) { 186 + hpfs_error(s, "self pointer invalid on anode %08x", ano); 183 187 goto bail; 184 188 } 185 189 if ((unsigned)anode->btree.n_used_nodes + (unsigned)anode->btree.n_free_nodes != ··· 191 187 hpfs_error(s, "bad number of nodes in anode %08x", ano); 192 188 goto bail; 193 189 } 194 - if (anode->btree.first_free != 190 + if (le16_to_cpu(anode->btree.first_free) != 195 191 8 + anode->btree.n_used_nodes * (anode->btree.internal ? 8 : 12)) { 196 192 hpfs_error(s, "bad first_free pointer in anode %08x", ano); 197 193 goto bail; ··· 223 219 unsigned p, pp = 0; 224 220 unsigned char *d = (unsigned char *)dnode; 225 221 int b = 0; 226 - if (dnode->magic != DNODE_MAGIC) { 222 + if (le32_to_cpu(dnode->magic) != DNODE_MAGIC) { 227 223 hpfs_error(s, "bad magic on dnode %08x", secno); 228 224 goto bail; 229 225 } 230 - if (dnode->self != secno) 231 - hpfs_error(s, "bad self pointer on dnode %08x self = %08x", secno, dnode->self); 226 + if (le32_to_cpu(dnode->self) != secno) 227 + hpfs_error(s, "bad self pointer on dnode %08x self = %08x", secno, le32_to_cpu(dnode->self)); 232 228 /* Check dirents - bad dirents would cause infinite 233 229 loops or shooting to memory */ 234 - if (dnode->first_free > 2048/* || dnode->first_free < 84*/) { 235 - hpfs_error(s, "dnode %08x has first_free == %08x", secno, dnode->first_free); 230 + if (le32_to_cpu(dnode->first_free) > 2048) { 231 + hpfs_error(s, "dnode %08x has first_free == %08x", secno, le32_to_cpu(dnode->first_free)); 236 232 goto bail; 237 233 } 238 - for (p = 20; p < dnode->first_free; p += d[p] + (d[p+1] << 8)) { 234 + for (p = 20; p < le32_to_cpu(dnode->first_free); p += d[p] + (d[p+1] << 8)) { 239 235 struct hpfs_dirent *de = (struct hpfs_dirent *)((char *)dnode + p); 240 - if (de->length > 292 || (de->length < 32) || (de->length & 3) || p + de->length > 2048) { 236 + if (le16_to_cpu(de->length) > 292 || (le16_to_cpu(de->length) < 32) || (le16_to_cpu(de->length) & 3) || p + le16_to_cpu(de->length) > 2048) { 241 237 hpfs_error(s, "bad dirent size in dnode %08x, dirent %03x, last %03x", secno, p, pp); 242 238 goto bail; 243 239 } 244 - if (((31 + de->namelen + de->down*4 + 3) & ~3) != de->length) { 245 - if (((31 + de->namelen + de->down*4 + 3) & ~3) < de->length && s->s_flags & MS_RDONLY) goto ok; 240 + if (((31 + de->namelen + de->down*4 + 3) & ~3) != le16_to_cpu(de->length)) { 241 + if (((31 + de->namelen + de->down*4 + 3) & ~3) < le16_to_cpu(de->length) && s->s_flags & MS_RDONLY) goto ok; 246 242 hpfs_error(s, "namelen does not match dirent size in dnode %08x, dirent %03x, last %03x", secno, p, pp); 247 243 goto bail; 248 244 } ··· 255 251 pp = p; 256 252 257 253 } 258 - if (p != dnode->first_free) { 254 + if (p != le32_to_cpu(dnode->first_free)) { 259 255 hpfs_error(s, "size on last dirent does not match first_free; dnode %08x", secno); 260 256 goto bail; 261 257 } ··· 281 277 if (!fnode) 282 278 return 0; 283 279 284 - dno = fnode->u.external[0].disk_secno; 280 + dno = le32_to_cpu(fnode->u.external[0].disk_secno); 285 281 brelse(bh); 286 282 return dno; 287 283 }
-33
fs/hpfs/name.c
··· 8 8 9 9 #include "hpfs_fn.h" 10 10 11 - static const char *text_postfix[]={ 12 - ".ASM", ".BAS", ".BAT", ".C", ".CC", ".CFG", ".CMD", ".CON", ".CPP", ".DEF", 13 - ".DOC", ".DPR", ".ERX", ".H", ".HPP", ".HTM", ".HTML", ".JAVA", ".LOG", ".PAS", 14 - ".RC", ".TEX", ".TXT", ".Y", ""}; 15 - 16 - static const char *text_prefix[]={ 17 - "AUTOEXEC.", "CHANGES", "COPYING", "CONFIG.", "CREDITS", "FAQ", "FILE_ID.DIZ", 18 - "MAKEFILE", "READ.ME", "README", "TERMCAP", ""}; 19 - 20 - void hpfs_decide_conv(struct inode *inode, const unsigned char *name, unsigned len) 21 - { 22 - struct hpfs_inode_info *hpfs_inode = hpfs_i(inode); 23 - int i; 24 - if (hpfs_inode->i_conv != CONV_AUTO) return; 25 - for (i = 0; *text_postfix[i]; i++) { 26 - int l = strlen(text_postfix[i]); 27 - if (l <= len) 28 - if (!hpfs_compare_names(inode->i_sb, text_postfix[i], l, name + len - l, l, 0)) 29 - goto text; 30 - } 31 - for (i = 0; *text_prefix[i]; i++) { 32 - int l = strlen(text_prefix[i]); 33 - if (l <= len) 34 - if (!hpfs_compare_names(inode->i_sb, text_prefix[i], l, name, l, 0)) 35 - goto text; 36 - } 37 - hpfs_inode->i_conv = CONV_BINARY; 38 - return; 39 - text: 40 - hpfs_inode->i_conv = CONV_TEXT; 41 - return; 42 - } 43 - 44 11 static inline int not_allowed_char(unsigned char c) 45 12 { 46 13 return c<' ' || c=='"' || c=='*' || c=='/' || c==':' || c=='<' ||
+30 -76
fs/hpfs/namei.c
··· 29 29 fnode = hpfs_alloc_fnode(dir->i_sb, hpfs_i(dir)->i_dno, &fno, &bh); 30 30 if (!fnode) 31 31 goto bail; 32 - dnode = hpfs_alloc_dnode(dir->i_sb, fno, &dno, &qbh0, 1); 32 + dnode = hpfs_alloc_dnode(dir->i_sb, fno, &dno, &qbh0); 33 33 if (!dnode) 34 34 goto bail1; 35 35 memset(&dee, 0, sizeof dee); ··· 37 37 if (!(mode & 0222)) dee.read_only = 1; 38 38 /*dee.archive = 0;*/ 39 39 dee.hidden = name[0] == '.'; 40 - dee.fnode = fno; 41 - dee.creation_date = dee.write_date = dee.read_date = gmt_to_local(dir->i_sb, get_seconds()); 40 + dee.fnode = cpu_to_le32(fno); 41 + dee.creation_date = dee.write_date = dee.read_date = cpu_to_le32(gmt_to_local(dir->i_sb, get_seconds())); 42 42 result = new_inode(dir->i_sb); 43 43 if (!result) 44 44 goto bail2; ··· 46 46 result->i_ino = fno; 47 47 hpfs_i(result)->i_parent_dir = dir->i_ino; 48 48 hpfs_i(result)->i_dno = dno; 49 - result->i_ctime.tv_sec = result->i_mtime.tv_sec = result->i_atime.tv_sec = local_to_gmt(dir->i_sb, dee.creation_date); 49 + result->i_ctime.tv_sec = result->i_mtime.tv_sec = result->i_atime.tv_sec = local_to_gmt(dir->i_sb, le32_to_cpu(dee.creation_date)); 50 50 result->i_ctime.tv_nsec = 0; 51 51 result->i_mtime.tv_nsec = 0; 52 52 result->i_atime.tv_nsec = 0; ··· 60 60 if (dee.read_only) 61 61 result->i_mode &= ~0222; 62 62 63 - mutex_lock(&hpfs_i(dir)->i_mutex); 64 - r = hpfs_add_dirent(dir, name, len, &dee, 0); 63 + r = hpfs_add_dirent(dir, name, len, &dee); 65 64 if (r == 1) 66 65 goto bail3; 67 66 if (r == -1) { ··· 69 70 } 70 71 fnode->len = len; 71 72 memcpy(fnode->name, name, len > 15 ? 15 : len); 72 - fnode->up = dir->i_ino; 73 + fnode->up = cpu_to_le32(dir->i_ino); 73 74 fnode->dirflag = 1; 74 75 fnode->btree.n_free_nodes = 7; 75 76 fnode->btree.n_used_nodes = 1; 76 - fnode->btree.first_free = 0x14; 77 - fnode->u.external[0].disk_secno = dno; 78 - fnode->u.external[0].file_secno = -1; 77 + fnode->btree.first_free = cpu_to_le16(0x14); 78 + fnode->u.external[0].disk_secno = cpu_to_le32(dno); 79 + fnode->u.external[0].file_secno = cpu_to_le32(-1); 79 80 dnode->root_dnode = 1; 80 - dnode->up = fno; 81 + dnode->up = cpu_to_le32(fno); 81 82 de = hpfs_add_de(dir->i_sb, dnode, "\001\001", 2, 0); 82 - de->creation_date = de->write_date = de->read_date = gmt_to_local(dir->i_sb, get_seconds()); 83 + de->creation_date = de->write_date = de->read_date = cpu_to_le32(gmt_to_local(dir->i_sb, get_seconds())); 83 84 if (!(mode & 0222)) de->read_only = 1; 84 85 de->first = de->directory = 1; 85 86 /*de->hidden = de->system = 0;*/ 86 - de->fnode = fno; 87 + de->fnode = cpu_to_le32(fno); 87 88 mark_buffer_dirty(bh); 88 89 brelse(bh); 89 90 hpfs_mark_4buffers_dirty(&qbh0); ··· 100 101 hpfs_write_inode_nolock(result); 101 102 } 102 103 d_instantiate(dentry, result); 103 - mutex_unlock(&hpfs_i(dir)->i_mutex); 104 104 hpfs_unlock(dir->i_sb); 105 105 return 0; 106 106 bail3: 107 - mutex_unlock(&hpfs_i(dir)->i_mutex); 108 107 iput(result); 109 108 bail2: 110 109 hpfs_brelse4(&qbh0); ··· 137 140 if (!(mode & 0222)) dee.read_only = 1; 138 141 dee.archive = 1; 139 142 dee.hidden = name[0] == '.'; 140 - dee.fnode = fno; 141 - dee.creation_date = dee.write_date = dee.read_date = gmt_to_local(dir->i_sb, get_seconds()); 143 + dee.fnode = cpu_to_le32(fno); 144 + dee.creation_date = dee.write_date = dee.read_date = cpu_to_le32(gmt_to_local(dir->i_sb, get_seconds())); 142 145 143 146 result = new_inode(dir->i_sb); 144 147 if (!result) ··· 151 154 result->i_op = &hpfs_file_iops; 152 155 result->i_fop = &hpfs_file_ops; 153 156 result->i_nlink = 1; 154 - hpfs_decide_conv(result, name, len); 155 157 hpfs_i(result)->i_parent_dir = dir->i_ino; 156 - result->i_ctime.tv_sec = result->i_mtime.tv_sec = result->i_atime.tv_sec = local_to_gmt(dir->i_sb, dee.creation_date); 158 + result->i_ctime.tv_sec = result->i_mtime.tv_sec = result->i_atime.tv_sec = local_to_gmt(dir->i_sb, le32_to_cpu(dee.creation_date)); 157 159 result->i_ctime.tv_nsec = 0; 158 160 result->i_mtime.tv_nsec = 0; 159 161 result->i_atime.tv_nsec = 0; ··· 164 168 result->i_data.a_ops = &hpfs_aops; 165 169 hpfs_i(result)->mmu_private = 0; 166 170 167 - mutex_lock(&hpfs_i(dir)->i_mutex); 168 - r = hpfs_add_dirent(dir, name, len, &dee, 0); 171 + r = hpfs_add_dirent(dir, name, len, &dee); 169 172 if (r == 1) 170 173 goto bail2; 171 174 if (r == -1) { ··· 173 178 } 174 179 fnode->len = len; 175 180 memcpy(fnode->name, name, len > 15 ? 15 : len); 176 - fnode->up = dir->i_ino; 181 + fnode->up = cpu_to_le32(dir->i_ino); 177 182 mark_buffer_dirty(bh); 178 183 brelse(bh); 179 184 ··· 188 193 hpfs_write_inode_nolock(result); 189 194 } 190 195 d_instantiate(dentry, result); 191 - mutex_unlock(&hpfs_i(dir)->i_mutex); 192 196 hpfs_unlock(dir->i_sb); 193 197 return 0; 194 198 195 199 bail2: 196 - mutex_unlock(&hpfs_i(dir)->i_mutex); 197 200 iput(result); 198 201 bail1: 199 202 brelse(bh); ··· 225 232 if (!(mode & 0222)) dee.read_only = 1; 226 233 dee.archive = 1; 227 234 dee.hidden = name[0] == '.'; 228 - dee.fnode = fno; 229 - dee.creation_date = dee.write_date = dee.read_date = gmt_to_local(dir->i_sb, get_seconds()); 235 + dee.fnode = cpu_to_le32(fno); 236 + dee.creation_date = dee.write_date = dee.read_date = cpu_to_le32(gmt_to_local(dir->i_sb, get_seconds())); 230 237 231 238 result = new_inode(dir->i_sb); 232 239 if (!result) ··· 235 242 hpfs_init_inode(result); 236 243 result->i_ino = fno; 237 244 hpfs_i(result)->i_parent_dir = dir->i_ino; 238 - result->i_ctime.tv_sec = result->i_mtime.tv_sec = result->i_atime.tv_sec = local_to_gmt(dir->i_sb, dee.creation_date); 245 + result->i_ctime.tv_sec = result->i_mtime.tv_sec = result->i_atime.tv_sec = local_to_gmt(dir->i_sb, le32_to_cpu(dee.creation_date)); 239 246 result->i_ctime.tv_nsec = 0; 240 247 result->i_mtime.tv_nsec = 0; 241 248 result->i_atime.tv_nsec = 0; ··· 247 254 result->i_blocks = 1; 248 255 init_special_inode(result, mode, rdev); 249 256 250 - mutex_lock(&hpfs_i(dir)->i_mutex); 251 - r = hpfs_add_dirent(dir, name, len, &dee, 0); 257 + r = hpfs_add_dirent(dir, name, len, &dee); 252 258 if (r == 1) 253 259 goto bail2; 254 260 if (r == -1) { ··· 256 264 } 257 265 fnode->len = len; 258 266 memcpy(fnode->name, name, len > 15 ? 15 : len); 259 - fnode->up = dir->i_ino; 267 + fnode->up = cpu_to_le32(dir->i_ino); 260 268 mark_buffer_dirty(bh); 261 269 262 270 insert_inode_hash(result); 263 271 264 272 hpfs_write_inode_nolock(result); 265 273 d_instantiate(dentry, result); 266 - mutex_unlock(&hpfs_i(dir)->i_mutex); 267 274 brelse(bh); 268 275 hpfs_unlock(dir->i_sb); 269 276 return 0; 270 277 bail2: 271 - mutex_unlock(&hpfs_i(dir)->i_mutex); 272 278 iput(result); 273 279 bail1: 274 280 brelse(bh); ··· 300 310 memset(&dee, 0, sizeof dee); 301 311 dee.archive = 1; 302 312 dee.hidden = name[0] == '.'; 303 - dee.fnode = fno; 304 - dee.creation_date = dee.write_date = dee.read_date = gmt_to_local(dir->i_sb, get_seconds()); 313 + dee.fnode = cpu_to_le32(fno); 314 + dee.creation_date = dee.write_date = dee.read_date = cpu_to_le32(gmt_to_local(dir->i_sb, get_seconds())); 305 315 306 316 result = new_inode(dir->i_sb); 307 317 if (!result) ··· 309 319 result->i_ino = fno; 310 320 hpfs_init_inode(result); 311 321 hpfs_i(result)->i_parent_dir = dir->i_ino; 312 - result->i_ctime.tv_sec = result->i_mtime.tv_sec = result->i_atime.tv_sec = local_to_gmt(dir->i_sb, dee.creation_date); 322 + result->i_ctime.tv_sec = result->i_mtime.tv_sec = result->i_atime.tv_sec = local_to_gmt(dir->i_sb, le32_to_cpu(dee.creation_date)); 313 323 result->i_ctime.tv_nsec = 0; 314 324 result->i_mtime.tv_nsec = 0; 315 325 result->i_atime.tv_nsec = 0; ··· 323 333 result->i_op = &page_symlink_inode_operations; 324 334 result->i_data.a_ops = &hpfs_symlink_aops; 325 335 326 - mutex_lock(&hpfs_i(dir)->i_mutex); 327 - r = hpfs_add_dirent(dir, name, len, &dee, 0); 336 + r = hpfs_add_dirent(dir, name, len, &dee); 328 337 if (r == 1) 329 338 goto bail2; 330 339 if (r == -1) { ··· 332 343 } 333 344 fnode->len = len; 334 345 memcpy(fnode->name, name, len > 15 ? 15 : len); 335 - fnode->up = dir->i_ino; 346 + fnode->up = cpu_to_le32(dir->i_ino); 336 347 hpfs_set_ea(result, fnode, "SYMLINK", symlink, strlen(symlink)); 337 348 mark_buffer_dirty(bh); 338 349 brelse(bh); ··· 341 352 342 353 hpfs_write_inode_nolock(result); 343 354 d_instantiate(dentry, result); 344 - mutex_unlock(&hpfs_i(dir)->i_mutex); 345 355 hpfs_unlock(dir->i_sb); 346 356 return 0; 347 357 bail2: 348 - mutex_unlock(&hpfs_i(dir)->i_mutex); 349 358 iput(result); 350 359 bail1: 351 360 brelse(bh); ··· 361 374 struct hpfs_dirent *de; 362 375 struct inode *inode = dentry->d_inode; 363 376 dnode_secno dno; 364 - fnode_secno fno; 365 377 int r; 366 378 int rep = 0; 367 379 int err; ··· 368 382 hpfs_lock(dir->i_sb); 369 383 hpfs_adjust_length(name, &len); 370 384 again: 371 - mutex_lock(&hpfs_i(inode)->i_parent_mutex); 372 - mutex_lock(&hpfs_i(dir)->i_mutex); 373 385 err = -ENOENT; 374 386 de = map_dirent(dir, hpfs_i(dir)->i_dno, name, len, &dno, &qbh); 375 387 if (!de) ··· 381 397 if (de->directory) 382 398 goto out1; 383 399 384 - fno = de->fnode; 385 400 r = hpfs_remove_dirent(dir, dno, de, &qbh, 1); 386 401 switch (r) { 387 402 case 1: ··· 393 410 if (rep++) 394 411 break; 395 412 396 - mutex_unlock(&hpfs_i(dir)->i_mutex); 397 - mutex_unlock(&hpfs_i(inode)->i_parent_mutex); 398 413 dentry_unhash(dentry); 399 414 if (!d_unhashed(dentry)) { 400 415 dput(dentry); ··· 426 445 out1: 427 446 hpfs_brelse4(&qbh); 428 447 out: 429 - mutex_unlock(&hpfs_i(dir)->i_mutex); 430 - mutex_unlock(&hpfs_i(inode)->i_parent_mutex); 431 448 hpfs_unlock(dir->i_sb); 432 449 return err; 433 450 } ··· 438 459 struct hpfs_dirent *de; 439 460 struct inode *inode = dentry->d_inode; 440 461 dnode_secno dno; 441 - fnode_secno fno; 442 462 int n_items = 0; 443 463 int err; 444 464 int r; 445 465 446 466 hpfs_adjust_length(name, &len); 447 467 hpfs_lock(dir->i_sb); 448 - mutex_lock(&hpfs_i(inode)->i_parent_mutex); 449 - mutex_lock(&hpfs_i(dir)->i_mutex); 450 468 err = -ENOENT; 451 469 de = map_dirent(dir, hpfs_i(dir)->i_dno, name, len, &dno, &qbh); 452 470 if (!de) ··· 462 486 if (n_items) 463 487 goto out1; 464 488 465 - fno = de->fnode; 466 489 r = hpfs_remove_dirent(dir, dno, de, &qbh, 1); 467 490 switch (r) { 468 491 case 1: ··· 480 505 out1: 481 506 hpfs_brelse4(&qbh); 482 507 out: 483 - mutex_unlock(&hpfs_i(dir)->i_mutex); 484 - mutex_unlock(&hpfs_i(inode)->i_parent_mutex); 485 508 hpfs_unlock(dir->i_sb); 486 509 return err; 487 510 } ··· 541 568 542 569 hpfs_lock(i->i_sb); 543 570 /* order doesn't matter, due to VFS exclusion */ 544 - mutex_lock(&hpfs_i(i)->i_parent_mutex); 545 - if (new_inode) 546 - mutex_lock(&hpfs_i(new_inode)->i_parent_mutex); 547 - mutex_lock(&hpfs_i(old_dir)->i_mutex); 548 - if (new_dir != old_dir) 549 - mutex_lock(&hpfs_i(new_dir)->i_mutex); 550 571 551 572 /* Erm? Moving over the empty non-busy directory is perfectly legal */ 552 573 if (new_inode && S_ISDIR(new_inode->i_mode)) { ··· 577 610 578 611 if (new_dir == old_dir) hpfs_brelse4(&qbh); 579 612 580 - hpfs_lock_creation(i->i_sb); 581 - if ((r = hpfs_add_dirent(new_dir, new_name, new_len, &de, 1))) { 582 - hpfs_unlock_creation(i->i_sb); 613 + if ((r = hpfs_add_dirent(new_dir, new_name, new_len, &de))) { 583 614 if (r == -1) hpfs_error(new_dir->i_sb, "hpfs_rename: dirent already exists!"); 584 615 err = r == 1 ? -ENOSPC : -EFSERROR; 585 616 if (new_dir != old_dir) hpfs_brelse4(&qbh); ··· 586 621 587 622 if (new_dir == old_dir) 588 623 if (!(dep = map_dirent(old_dir, hpfs_i(old_dir)->i_dno, old_name, old_len, &dno, &qbh))) { 589 - hpfs_unlock_creation(i->i_sb); 590 624 hpfs_error(i->i_sb, "lookup succeeded but map dirent failed at #2"); 591 625 err = -ENOENT; 592 626 goto end1; 593 627 } 594 628 595 629 if ((r = hpfs_remove_dirent(old_dir, dno, dep, &qbh, 0))) { 596 - hpfs_unlock_creation(i->i_sb); 597 630 hpfs_error(i->i_sb, "hpfs_rename: could not remove dirent"); 598 631 err = r == 2 ? -ENOSPC : -EFSERROR; 599 632 goto end1; 600 633 } 601 - hpfs_unlock_creation(i->i_sb); 602 - 634 + 603 635 end: 604 636 hpfs_i(i)->i_parent_dir = new_dir->i_ino; 605 637 if (S_ISDIR(i->i_mode)) { ··· 604 642 drop_nlink(old_dir); 605 643 } 606 644 if ((fnode = hpfs_map_fnode(i->i_sb, i->i_ino, &bh))) { 607 - fnode->up = new_dir->i_ino; 645 + fnode->up = cpu_to_le32(new_dir->i_ino); 608 646 fnode->len = new_len; 609 647 memcpy(fnode->name, new_name, new_len>15?15:new_len); 610 648 if (new_len < 15) memset(&fnode->name[new_len], 0, 15 - new_len); 611 649 mark_buffer_dirty(bh); 612 650 brelse(bh); 613 651 } 614 - hpfs_i(i)->i_conv = hpfs_sb(i->i_sb)->sb_conv; 615 - hpfs_decide_conv(i, new_name, new_len); 616 652 end1: 617 - if (old_dir != new_dir) 618 - mutex_unlock(&hpfs_i(new_dir)->i_mutex); 619 - mutex_unlock(&hpfs_i(old_dir)->i_mutex); 620 - mutex_unlock(&hpfs_i(i)->i_parent_mutex); 621 - if (new_inode) 622 - mutex_unlock(&hpfs_i(new_inode)->i_parent_mutex); 623 653 hpfs_unlock(i->i_sb); 624 654 return err; 625 655 }
+51 -67
fs/hpfs/super.c
··· 18 18 19 19 /* Mark the filesystem dirty, so that chkdsk checks it when os/2 booted */ 20 20 21 - static void mark_dirty(struct super_block *s) 21 + static void mark_dirty(struct super_block *s, int remount) 22 22 { 23 - if (hpfs_sb(s)->sb_chkdsk && !(s->s_flags & MS_RDONLY)) { 23 + if (hpfs_sb(s)->sb_chkdsk && (remount || !(s->s_flags & MS_RDONLY))) { 24 24 struct buffer_head *bh; 25 25 struct hpfs_spare_block *sb; 26 26 if ((sb = hpfs_map_sector(s, 17, &bh, 0))) { 27 27 sb->dirty = 1; 28 28 sb->old_wrote = 0; 29 29 mark_buffer_dirty(bh); 30 + sync_dirty_buffer(bh); 30 31 brelse(bh); 31 32 } 32 33 } ··· 41 40 struct buffer_head *bh; 42 41 struct hpfs_spare_block *sb; 43 42 if (s->s_flags & MS_RDONLY) return; 43 + sync_blockdev(s->s_bdev); 44 44 if ((sb = hpfs_map_sector(s, 17, &bh, 0))) { 45 45 sb->dirty = hpfs_sb(s)->sb_chkdsk > 1 - hpfs_sb(s)->sb_was_error; 46 46 sb->old_wrote = hpfs_sb(s)->sb_chkdsk >= 2 && !hpfs_sb(s)->sb_was_error; 47 47 mark_buffer_dirty(bh); 48 + sync_dirty_buffer(bh); 48 49 brelse(bh); 49 50 } 50 51 } ··· 66 63 if (!hpfs_sb(s)->sb_was_error) { 67 64 if (hpfs_sb(s)->sb_err == 2) { 68 65 printk("; crashing the system because you wanted it\n"); 69 - mark_dirty(s); 66 + mark_dirty(s, 0); 70 67 panic("HPFS panic"); 71 68 } else if (hpfs_sb(s)->sb_err == 1) { 72 69 if (s->s_flags & MS_RDONLY) printk("; already mounted read-only\n"); 73 70 else { 74 71 printk("; remounting read-only\n"); 75 - mark_dirty(s); 72 + mark_dirty(s, 0); 76 73 s->s_flags |= MS_RDONLY; 77 74 } 78 75 } else if (s->s_flags & MS_RDONLY) printk("; going on - but anything won't be destroyed because it's read-only\n"); ··· 105 102 { 106 103 struct hpfs_sb_info *sbi = hpfs_sb(s); 107 104 105 + hpfs_lock(s); 106 + unmark_dirty(s); 107 + hpfs_unlock(s); 108 + 108 109 kfree(sbi->sb_cp_table); 109 110 kfree(sbi->sb_bmp_dir); 110 - unmark_dirty(s); 111 111 s->s_fs_info = NULL; 112 112 kfree(sbi); 113 113 } ··· 135 129 n_bands = (hpfs_sb(s)->sb_fs_size + 0x3fff) >> 14; 136 130 count = 0; 137 131 for (n = 0; n < n_bands; n++) 138 - count += hpfs_count_one_bitmap(s, hpfs_sb(s)->sb_bmp_dir[n]); 132 + count += hpfs_count_one_bitmap(s, le32_to_cpu(hpfs_sb(s)->sb_bmp_dir[n])); 139 133 return count; 140 134 } 141 135 ··· 194 188 { 195 189 struct hpfs_inode_info *ei = (struct hpfs_inode_info *) foo; 196 190 197 - mutex_init(&ei->i_mutex); 198 - mutex_init(&ei->i_parent_mutex); 199 191 inode_init_once(&ei->vfs_inode); 200 192 } 201 193 ··· 222 218 223 219 enum { 224 220 Opt_help, Opt_uid, Opt_gid, Opt_umask, Opt_case_lower, Opt_case_asis, 225 - Opt_conv_binary, Opt_conv_text, Opt_conv_auto, 226 221 Opt_check_none, Opt_check_normal, Opt_check_strict, 227 222 Opt_err_cont, Opt_err_ro, Opt_err_panic, 228 223 Opt_eas_no, Opt_eas_ro, Opt_eas_rw, ··· 236 233 {Opt_umask, "umask=%o"}, 237 234 {Opt_case_lower, "case=lower"}, 238 235 {Opt_case_asis, "case=asis"}, 239 - {Opt_conv_binary, "conv=binary"}, 240 - {Opt_conv_text, "conv=text"}, 241 - {Opt_conv_auto, "conv=auto"}, 242 236 {Opt_check_none, "check=none"}, 243 237 {Opt_check_normal, "check=normal"}, 244 238 {Opt_check_strict, "check=strict"}, ··· 253 253 }; 254 254 255 255 static int parse_opts(char *opts, uid_t *uid, gid_t *gid, umode_t *umask, 256 - int *lowercase, int *conv, int *eas, int *chk, int *errs, 256 + int *lowercase, int *eas, int *chk, int *errs, 257 257 int *chkdsk, int *timeshift) 258 258 { 259 259 char *p; ··· 294 294 break; 295 295 case Opt_case_asis: 296 296 *lowercase = 0; 297 - break; 298 - case Opt_conv_binary: 299 - *conv = CONV_BINARY; 300 - break; 301 - case Opt_conv_text: 302 - *conv = CONV_TEXT; 303 - break; 304 - case Opt_conv_auto: 305 - *conv = CONV_AUTO; 306 297 break; 307 298 case Opt_check_none: 308 299 *chk = 0; ··· 361 370 umask=xxx set mode of files that don't have mode specified in eas\n\ 362 371 case=lower lowercase all files\n\ 363 372 case=asis do not lowercase files (default)\n\ 364 - conv=binary do not convert CR/LF -> LF (default)\n\ 365 - conv=auto convert only files with known text extensions\n\ 366 - conv=text convert all files\n\ 367 373 check=none no fs checks - kernel may crash on corrupted filesystem\n\ 368 374 check=normal do some checks - it should not crash (default)\n\ 369 375 check=strict do extra time-consuming checks, used for debugging\n\ ··· 382 394 uid_t uid; 383 395 gid_t gid; 384 396 umode_t umask; 385 - int lowercase, conv, eas, chk, errs, chkdsk, timeshift; 397 + int lowercase, eas, chk, errs, chkdsk, timeshift; 386 398 int o; 387 399 struct hpfs_sb_info *sbi = hpfs_sb(s); 388 400 char *new_opts = kstrdup(data, GFP_KERNEL); ··· 393 405 lock_super(s); 394 406 uid = sbi->sb_uid; gid = sbi->sb_gid; 395 407 umask = 0777 & ~sbi->sb_mode; 396 - lowercase = sbi->sb_lowercase; conv = sbi->sb_conv; 408 + lowercase = sbi->sb_lowercase; 397 409 eas = sbi->sb_eas; chk = sbi->sb_chk; chkdsk = sbi->sb_chkdsk; 398 410 errs = sbi->sb_err; timeshift = sbi->sb_timeshift; 399 411 400 - if (!(o = parse_opts(data, &uid, &gid, &umask, &lowercase, &conv, 412 + if (!(o = parse_opts(data, &uid, &gid, &umask, &lowercase, 401 413 &eas, &chk, &errs, &chkdsk, &timeshift))) { 402 414 printk("HPFS: bad mount options.\n"); 403 415 goto out_err; ··· 415 427 416 428 sbi->sb_uid = uid; sbi->sb_gid = gid; 417 429 sbi->sb_mode = 0777 & ~umask; 418 - sbi->sb_lowercase = lowercase; sbi->sb_conv = conv; 430 + sbi->sb_lowercase = lowercase; 419 431 sbi->sb_eas = eas; sbi->sb_chk = chk; sbi->sb_chkdsk = chkdsk; 420 432 sbi->sb_err = errs; sbi->sb_timeshift = timeshift; 421 433 422 - if (!(*flags & MS_RDONLY)) mark_dirty(s); 434 + if (!(*flags & MS_RDONLY)) mark_dirty(s, 1); 423 435 424 436 replace_mount_options(s, new_opts); 425 437 ··· 459 471 uid_t uid; 460 472 gid_t gid; 461 473 umode_t umask; 462 - int lowercase, conv, eas, chk, errs, chkdsk, timeshift; 474 + int lowercase, eas, chk, errs, chkdsk, timeshift; 463 475 464 476 dnode_secno root_dno; 465 477 struct hpfs_dirent *de = NULL; 466 478 struct quad_buffer_head qbh; 467 479 468 480 int o; 469 - 470 - if (num_possible_cpus() > 1) { 471 - printk(KERN_ERR "HPFS is not SMP safe\n"); 472 - return -EINVAL; 473 - } 474 481 475 482 save_mount_options(s, options); 476 483 ··· 478 495 sbi->sb_bmp_dir = NULL; 479 496 sbi->sb_cp_table = NULL; 480 497 481 - mutex_init(&sbi->hpfs_creation_de); 498 + mutex_init(&sbi->hpfs_mutex); 499 + hpfs_lock(s); 482 500 483 501 uid = current_uid(); 484 502 gid = current_gid(); 485 503 umask = current_umask(); 486 504 lowercase = 0; 487 - conv = CONV_BINARY; 488 505 eas = 2; 489 506 chk = 1; 490 507 errs = 1; 491 508 chkdsk = 1; 492 509 timeshift = 0; 493 510 494 - if (!(o = parse_opts(options, &uid, &gid, &umask, &lowercase, &conv, 511 + if (!(o = parse_opts(options, &uid, &gid, &umask, &lowercase, 495 512 &eas, &chk, &errs, &chkdsk, &timeshift))) { 496 513 printk("HPFS: bad mount options.\n"); 497 514 goto bail0; ··· 509 526 if (!(spareblock = hpfs_map_sector(s, 17, &bh2, 0))) goto bail3; 510 527 511 528 /* Check magics */ 512 - if (/*bootblock->magic != BB_MAGIC 513 - ||*/ superblock->magic != SB_MAGIC 514 - || spareblock->magic != SP_MAGIC) { 529 + if (/*le16_to_cpu(bootblock->magic) != BB_MAGIC 530 + ||*/ le32_to_cpu(superblock->magic) != SB_MAGIC 531 + || le32_to_cpu(spareblock->magic) != SP_MAGIC) { 515 532 if (!silent) printk("HPFS: Bad magic ... probably not HPFS\n"); 516 533 goto bail4; 517 534 } ··· 532 549 s->s_op = &hpfs_sops; 533 550 s->s_d_op = &hpfs_dentry_operations; 534 551 535 - sbi->sb_root = superblock->root; 536 - sbi->sb_fs_size = superblock->n_sectors; 537 - sbi->sb_bitmaps = superblock->bitmaps; 538 - sbi->sb_dirband_start = superblock->dir_band_start; 539 - sbi->sb_dirband_size = superblock->n_dir_band; 540 - sbi->sb_dmap = superblock->dir_band_bitmap; 552 + sbi->sb_root = le32_to_cpu(superblock->root); 553 + sbi->sb_fs_size = le32_to_cpu(superblock->n_sectors); 554 + sbi->sb_bitmaps = le32_to_cpu(superblock->bitmaps); 555 + sbi->sb_dirband_start = le32_to_cpu(superblock->dir_band_start); 556 + sbi->sb_dirband_size = le32_to_cpu(superblock->n_dir_band); 557 + sbi->sb_dmap = le32_to_cpu(superblock->dir_band_bitmap); 541 558 sbi->sb_uid = uid; 542 559 sbi->sb_gid = gid; 543 560 sbi->sb_mode = 0777 & ~umask; 544 561 sbi->sb_n_free = -1; 545 562 sbi->sb_n_free_dnodes = -1; 546 563 sbi->sb_lowercase = lowercase; 547 - sbi->sb_conv = conv; 548 564 sbi->sb_eas = eas; 549 565 sbi->sb_chk = chk; 550 566 sbi->sb_chkdsk = chkdsk; ··· 555 573 sbi->sb_max_fwd_alloc = 0xffffff; 556 574 557 575 /* Load bitmap directory */ 558 - if (!(sbi->sb_bmp_dir = hpfs_load_bitmap_directory(s, superblock->bitmaps))) 576 + if (!(sbi->sb_bmp_dir = hpfs_load_bitmap_directory(s, le32_to_cpu(superblock->bitmaps)))) 559 577 goto bail4; 560 578 561 579 /* Check for general fs errors*/ ··· 573 591 mark_buffer_dirty(bh2); 574 592 } 575 593 576 - if (spareblock->hotfixes_used || spareblock->n_spares_used) { 594 + if (le32_to_cpu(spareblock->hotfixes_used) || le32_to_cpu(spareblock->n_spares_used)) { 577 595 if (errs >= 2) { 578 596 printk("HPFS: Hotfixes not supported here, try chkdsk\n"); 579 - mark_dirty(s); 597 + mark_dirty(s, 0); 580 598 goto bail4; 581 599 } 582 600 hpfs_error(s, "hotfixes not supported here, try chkdsk"); 583 601 if (errs == 0) printk("HPFS: Proceeding, but your filesystem will be probably corrupted by this driver...\n"); 584 602 else printk("HPFS: This driver may read bad files or crash when operating on disk with hotfixes.\n"); 585 603 } 586 - if (spareblock->n_dnode_spares != spareblock->n_dnode_spares_free) { 604 + if (le32_to_cpu(spareblock->n_dnode_spares) != le32_to_cpu(spareblock->n_dnode_spares_free)) { 587 605 if (errs >= 2) { 588 606 printk("HPFS: Spare dnodes used, try chkdsk\n"); 589 - mark_dirty(s); 607 + mark_dirty(s, 0); 590 608 goto bail4; 591 609 } 592 610 hpfs_error(s, "warning: spare dnodes used, try chkdsk"); ··· 594 612 } 595 613 if (chk) { 596 614 unsigned a; 597 - if (superblock->dir_band_end - superblock->dir_band_start + 1 != superblock->n_dir_band || 598 - superblock->dir_band_end < superblock->dir_band_start || superblock->n_dir_band > 0x4000) { 615 + if (le32_to_cpu(superblock->dir_band_end) - le32_to_cpu(superblock->dir_band_start) + 1 != le32_to_cpu(superblock->n_dir_band) || 616 + le32_to_cpu(superblock->dir_band_end) < le32_to_cpu(superblock->dir_band_start) || le32_to_cpu(superblock->n_dir_band) > 0x4000) { 599 617 hpfs_error(s, "dir band size mismatch: dir_band_start==%08x, dir_band_end==%08x, n_dir_band==%08x", 600 - superblock->dir_band_start, superblock->dir_band_end, superblock->n_dir_band); 618 + le32_to_cpu(superblock->dir_band_start), le32_to_cpu(superblock->dir_band_end), le32_to_cpu(superblock->n_dir_band)); 601 619 goto bail4; 602 620 } 603 621 a = sbi->sb_dirband_size; 604 622 sbi->sb_dirband_size = 0; 605 - if (hpfs_chk_sectors(s, superblock->dir_band_start, superblock->n_dir_band, "dir_band") || 606 - hpfs_chk_sectors(s, superblock->dir_band_bitmap, 4, "dir_band_bitmap") || 607 - hpfs_chk_sectors(s, superblock->bitmaps, 4, "bitmaps")) { 608 - mark_dirty(s); 623 + if (hpfs_chk_sectors(s, le32_to_cpu(superblock->dir_band_start), le32_to_cpu(superblock->n_dir_band), "dir_band") || 624 + hpfs_chk_sectors(s, le32_to_cpu(superblock->dir_band_bitmap), 4, "dir_band_bitmap") || 625 + hpfs_chk_sectors(s, le32_to_cpu(superblock->bitmaps), 4, "bitmaps")) { 626 + mark_dirty(s, 0); 609 627 goto bail4; 610 628 } 611 629 sbi->sb_dirband_size = a; 612 630 } else printk("HPFS: You really don't want any checks? You are crazy...\n"); 613 631 614 632 /* Load code page table */ 615 - if (spareblock->n_code_pages) 616 - if (!(sbi->sb_cp_table = hpfs_load_code_page(s, spareblock->code_page_dir))) 633 + if (le32_to_cpu(spareblock->n_code_pages)) 634 + if (!(sbi->sb_cp_table = hpfs_load_code_page(s, le32_to_cpu(spareblock->code_page_dir)))) 617 635 printk("HPFS: Warning: code page support is disabled\n"); 618 636 619 637 brelse(bh2); ··· 642 660 if (!de) 643 661 hpfs_error(s, "unable to find root dir"); 644 662 else { 645 - root->i_atime.tv_sec = local_to_gmt(s, de->read_date); 663 + root->i_atime.tv_sec = local_to_gmt(s, le32_to_cpu(de->read_date)); 646 664 root->i_atime.tv_nsec = 0; 647 - root->i_mtime.tv_sec = local_to_gmt(s, de->write_date); 665 + root->i_mtime.tv_sec = local_to_gmt(s, le32_to_cpu(de->write_date)); 648 666 root->i_mtime.tv_nsec = 0; 649 - root->i_ctime.tv_sec = local_to_gmt(s, de->creation_date); 667 + root->i_ctime.tv_sec = local_to_gmt(s, le32_to_cpu(de->creation_date)); 650 668 root->i_ctime.tv_nsec = 0; 651 - hpfs_i(root)->i_ea_size = de->ea_size; 669 + hpfs_i(root)->i_ea_size = le16_to_cpu(de->ea_size); 652 670 hpfs_i(root)->i_parent_dir = root->i_ino; 653 671 if (root->i_size == -1) 654 672 root->i_size = 2048; ··· 656 674 root->i_blocks = 5; 657 675 hpfs_brelse4(&qbh); 658 676 } 677 + hpfs_unlock(s); 659 678 return 0; 660 679 661 680 bail4: brelse(bh2); ··· 664 681 bail2: brelse(bh0); 665 682 bail1: 666 683 bail0: 684 + hpfs_unlock(s); 667 685 kfree(sbi->sb_bmp_dir); 668 686 kfree(sbi->sb_cp_table); 669 687 s->s_fs_info = NULL;
+4 -4
fs/logfs/super.c
··· 480 480 !read_only) 481 481 return -EIO; 482 482 483 - mutex_init(&super->s_dirop_mutex); 484 - mutex_init(&super->s_object_alias_mutex); 485 - INIT_LIST_HEAD(&super->s_freeing_list); 486 - 487 483 ret = logfs_init_rw(sb); 488 484 if (ret) 489 485 return ret; ··· 596 600 super = kzalloc(sizeof(*super), GFP_KERNEL); 597 601 if (!super) 598 602 return ERR_PTR(-ENOMEM); 603 + 604 + mutex_init(&super->s_dirop_mutex); 605 + mutex_init(&super->s_object_alias_mutex); 606 + INIT_LIST_HEAD(&super->s_freeing_list); 599 607 600 608 if (!devname) 601 609 err = logfs_get_sb_bdev(super, type, devname);
+1 -1
fs/namei.c
··· 179 179 static int acl_permission_check(struct inode *inode, int mask, unsigned int flags, 180 180 int (*check_acl)(struct inode *inode, int mask, unsigned int flags)) 181 181 { 182 - umode_t mode = inode->i_mode; 182 + unsigned int mode = inode->i_mode; 183 183 184 184 mask &= MAY_READ | MAY_WRITE | MAY_EXEC; 185 185
+2 -2
fs/nfs/namespace.c
··· 119 119 } 120 120 121 121 #ifdef CONFIG_NFS_V4 122 - static rpc_authflavor_t nfs_find_best_sec(struct nfs4_secinfo_flavors *flavors, struct inode *inode) 122 + static rpc_authflavor_t nfs_find_best_sec(struct nfs4_secinfo_flavors *flavors) 123 123 { 124 124 struct gss_api_mech *mech; 125 125 struct xdr_netobj oid; ··· 166 166 } 167 167 flavors = page_address(page); 168 168 ret = secinfo(parent->d_inode, &dentry->d_name, flavors); 169 - *flavor = nfs_find_best_sec(flavors, dentry->d_inode); 169 + *flavor = nfs_find_best_sec(flavors); 170 170 put_page(page); 171 171 } 172 172
+1
fs/nfs/nfs4_fs.h
··· 47 47 NFS4CLNT_LAYOUTRECALL, 48 48 NFS4CLNT_SESSION_RESET, 49 49 NFS4CLNT_RECALL_SLOT, 50 + NFS4CLNT_LEASE_CONFIRM, 50 51 }; 51 52 52 53 enum nfs4_session_state {
+16 -11
fs/nfs/nfs4filelayout.c
··· 117 117 case -EKEYEXPIRED: 118 118 rpc_delay(task, FILELAYOUT_POLL_RETRY_MAX); 119 119 break; 120 + case -NFS4ERR_RETRY_UNCACHED_REP: 121 + break; 120 122 default: 121 123 dprintk("%s DS error. Retry through MDS %d\n", __func__, 122 124 task->tk_status); ··· 418 416 filelayout_check_layout(struct pnfs_layout_hdr *lo, 419 417 struct nfs4_filelayout_segment *fl, 420 418 struct nfs4_layoutget_res *lgr, 421 - struct nfs4_deviceid *id) 419 + struct nfs4_deviceid *id, 420 + gfp_t gfp_flags) 422 421 { 423 422 struct nfs4_file_layout_dsaddr *dsaddr; 424 423 int status = -EINVAL; ··· 442 439 /* find and reference the deviceid */ 443 440 dsaddr = nfs4_fl_find_get_deviceid(id); 444 441 if (dsaddr == NULL) { 445 - dsaddr = get_device_info(lo->plh_inode, id); 442 + dsaddr = get_device_info(lo->plh_inode, id, gfp_flags); 446 443 if (dsaddr == NULL) 447 444 goto out; 448 445 } ··· 503 500 filelayout_decode_layout(struct pnfs_layout_hdr *flo, 504 501 struct nfs4_filelayout_segment *fl, 505 502 struct nfs4_layoutget_res *lgr, 506 - struct nfs4_deviceid *id) 503 + struct nfs4_deviceid *id, 504 + gfp_t gfp_flags) 507 505 { 508 506 struct xdr_stream stream; 509 507 struct xdr_buf buf = { ··· 520 516 521 517 dprintk("%s: set_layout_map Begin\n", __func__); 522 518 523 - scratch = alloc_page(GFP_KERNEL); 519 + scratch = alloc_page(gfp_flags); 524 520 if (!scratch) 525 521 return -ENOMEM; 526 522 ··· 558 554 goto out_err; 559 555 560 556 fl->fh_array = kzalloc(fl->num_fh * sizeof(struct nfs_fh *), 561 - GFP_KERNEL); 557 + gfp_flags); 562 558 if (!fl->fh_array) 563 559 goto out_err; 564 560 565 561 for (i = 0; i < fl->num_fh; i++) { 566 562 /* Do we want to use a mempool here? */ 567 - fl->fh_array[i] = kmalloc(sizeof(struct nfs_fh), GFP_KERNEL); 563 + fl->fh_array[i] = kmalloc(sizeof(struct nfs_fh), gfp_flags); 568 564 if (!fl->fh_array[i]) 569 565 goto out_err_free; 570 566 ··· 609 605 610 606 static struct pnfs_layout_segment * 611 607 filelayout_alloc_lseg(struct pnfs_layout_hdr *layoutid, 612 - struct nfs4_layoutget_res *lgr) 608 + struct nfs4_layoutget_res *lgr, 609 + gfp_t gfp_flags) 613 610 { 614 611 struct nfs4_filelayout_segment *fl; 615 612 int rc; 616 613 struct nfs4_deviceid id; 617 614 618 615 dprintk("--> %s\n", __func__); 619 - fl = kzalloc(sizeof(*fl), GFP_KERNEL); 616 + fl = kzalloc(sizeof(*fl), gfp_flags); 620 617 if (!fl) 621 618 return NULL; 622 619 623 - rc = filelayout_decode_layout(layoutid, fl, lgr, &id); 624 - if (rc != 0 || filelayout_check_layout(layoutid, fl, lgr, &id)) { 620 + rc = filelayout_decode_layout(layoutid, fl, lgr, &id, gfp_flags); 621 + if (rc != 0 || filelayout_check_layout(layoutid, fl, lgr, &id, gfp_flags)) { 625 622 _filelayout_free_lseg(fl); 626 623 return NULL; 627 624 } ··· 638 633 int size = (fl->stripe_type == STRIPE_SPARSE) ? 639 634 fl->dsaddr->ds_num : fl->dsaddr->stripe_count; 640 635 641 - fl->commit_buckets = kcalloc(size, sizeof(struct list_head), GFP_KERNEL); 636 + fl->commit_buckets = kcalloc(size, sizeof(struct list_head), gfp_flags); 642 637 if (!fl->commit_buckets) { 643 638 filelayout_free_lseg(&fl->generic_hdr); 644 639 return NULL;
+1 -1
fs/nfs/nfs4filelayout.h
··· 104 104 nfs4_fl_find_get_deviceid(struct nfs4_deviceid *dev_id); 105 105 extern void nfs4_fl_put_deviceid(struct nfs4_file_layout_dsaddr *dsaddr); 106 106 struct nfs4_file_layout_dsaddr * 107 - get_device_info(struct inode *inode, struct nfs4_deviceid *dev_id); 107 + get_device_info(struct inode *inode, struct nfs4_deviceid *dev_id, gfp_t gfp_flags); 108 108 109 109 #endif /* FS_NFS_NFS4FILELAYOUT_H */
+17 -17
fs/nfs/nfs4filelayoutdev.c
··· 225 225 } 226 226 227 227 static struct nfs4_pnfs_ds * 228 - nfs4_pnfs_ds_add(struct inode *inode, u32 ip_addr, u32 port) 228 + nfs4_pnfs_ds_add(struct inode *inode, u32 ip_addr, u32 port, gfp_t gfp_flags) 229 229 { 230 230 struct nfs4_pnfs_ds *tmp_ds, *ds; 231 231 232 - ds = kzalloc(sizeof(*tmp_ds), GFP_KERNEL); 232 + ds = kzalloc(sizeof(*tmp_ds), gfp_flags); 233 233 if (!ds) 234 234 goto out; 235 235 ··· 261 261 * Currently only support ipv4, and one multi-path address. 262 262 */ 263 263 static struct nfs4_pnfs_ds * 264 - decode_and_add_ds(struct xdr_stream *streamp, struct inode *inode) 264 + decode_and_add_ds(struct xdr_stream *streamp, struct inode *inode, gfp_t gfp_flags) 265 265 { 266 266 struct nfs4_pnfs_ds *ds = NULL; 267 267 char *buf; ··· 303 303 rlen); 304 304 goto out_err; 305 305 } 306 - buf = kmalloc(rlen + 1, GFP_KERNEL); 306 + buf = kmalloc(rlen + 1, gfp_flags); 307 307 if (!buf) { 308 308 dprintk("%s: Not enough memory\n", __func__); 309 309 goto out_err; ··· 333 333 sscanf(pstr, "-%d-%d", &tmp[0], &tmp[1]); 334 334 port = htons((tmp[0] << 8) | (tmp[1])); 335 335 336 - ds = nfs4_pnfs_ds_add(inode, ip_addr, port); 336 + ds = nfs4_pnfs_ds_add(inode, ip_addr, port, gfp_flags); 337 337 dprintk("%s: Decoded address and port %s\n", __func__, buf); 338 338 out_free: 339 339 kfree(buf); ··· 343 343 344 344 /* Decode opaque device data and return the result */ 345 345 static struct nfs4_file_layout_dsaddr* 346 - decode_device(struct inode *ino, struct pnfs_device *pdev) 346 + decode_device(struct inode *ino, struct pnfs_device *pdev, gfp_t gfp_flags) 347 347 { 348 348 int i; 349 349 u32 cnt, num; ··· 362 362 struct page *scratch; 363 363 364 364 /* set up xdr stream */ 365 - scratch = alloc_page(GFP_KERNEL); 365 + scratch = alloc_page(gfp_flags); 366 366 if (!scratch) 367 367 goto out_err; 368 368 ··· 384 384 } 385 385 386 386 /* read stripe indices */ 387 - stripe_indices = kcalloc(cnt, sizeof(u8), GFP_KERNEL); 387 + stripe_indices = kcalloc(cnt, sizeof(u8), gfp_flags); 388 388 if (!stripe_indices) 389 389 goto out_err_free_scratch; 390 390 ··· 423 423 424 424 dsaddr = kzalloc(sizeof(*dsaddr) + 425 425 (sizeof(struct nfs4_pnfs_ds *) * (num - 1)), 426 - GFP_KERNEL); 426 + gfp_flags); 427 427 if (!dsaddr) 428 428 goto out_err_free_stripe_indices; 429 429 ··· 452 452 for (j = 0; j < mp_count; j++) { 453 453 if (j == 0) { 454 454 dsaddr->ds_list[i] = decode_and_add_ds(&stream, 455 - ino); 455 + ino, gfp_flags); 456 456 if (dsaddr->ds_list[i] == NULL) 457 457 goto out_err_free_deviceid; 458 458 } else { ··· 503 503 * available devices. 504 504 */ 505 505 static struct nfs4_file_layout_dsaddr * 506 - decode_and_add_device(struct inode *inode, struct pnfs_device *dev) 506 + decode_and_add_device(struct inode *inode, struct pnfs_device *dev, gfp_t gfp_flags) 507 507 { 508 508 struct nfs4_file_layout_dsaddr *d, *new; 509 509 long hash; 510 510 511 - new = decode_device(inode, dev); 511 + new = decode_device(inode, dev, gfp_flags); 512 512 if (!new) { 513 513 printk(KERN_WARNING "%s: Could not decode or add device\n", 514 514 __func__); ··· 537 537 * of available devices, and return it. 538 538 */ 539 539 struct nfs4_file_layout_dsaddr * 540 - get_device_info(struct inode *inode, struct nfs4_deviceid *dev_id) 540 + get_device_info(struct inode *inode, struct nfs4_deviceid *dev_id, gfp_t gfp_flags) 541 541 { 542 542 struct pnfs_device *pdev = NULL; 543 543 u32 max_resp_sz; ··· 556 556 dprintk("%s inode %p max_resp_sz %u max_pages %d\n", 557 557 __func__, inode, max_resp_sz, max_pages); 558 558 559 - pdev = kzalloc(sizeof(struct pnfs_device), GFP_KERNEL); 559 + pdev = kzalloc(sizeof(struct pnfs_device), gfp_flags); 560 560 if (pdev == NULL) 561 561 return NULL; 562 562 563 - pages = kzalloc(max_pages * sizeof(struct page *), GFP_KERNEL); 563 + pages = kzalloc(max_pages * sizeof(struct page *), gfp_flags); 564 564 if (pages == NULL) { 565 565 kfree(pdev); 566 566 return NULL; 567 567 } 568 568 for (i = 0; i < max_pages; i++) { 569 - pages[i] = alloc_page(GFP_KERNEL); 569 + pages[i] = alloc_page(gfp_flags); 570 570 if (!pages[i]) 571 571 goto out_free; 572 572 } ··· 587 587 * Found new device, need to decode it and then add it to the 588 588 * list of known devices for this mountpoint. 589 589 */ 590 - dsaddr = decode_and_add_device(inode, pdev); 590 + dsaddr = decode_and_add_device(inode, pdev, gfp_flags); 591 591 out_free: 592 592 for (i = 0; i < max_pages; i++) 593 593 __free_page(pages[i]);
+66 -62
fs/nfs/nfs4proc.c
··· 46 46 #include <linux/nfs4.h> 47 47 #include <linux/nfs_fs.h> 48 48 #include <linux/nfs_page.h> 49 + #include <linux/nfs_mount.h> 49 50 #include <linux/namei.h> 50 51 #include <linux/mount.h> 51 52 #include <linux/module.h> ··· 300 299 ret = nfs4_delay(server->client, &exception->timeout); 301 300 if (ret != 0) 302 301 break; 302 + case -NFS4ERR_RETRY_UNCACHED_REP: 303 303 case -NFS4ERR_OLD_STATEID: 304 304 exception->retry = 1; 305 305 break; ··· 445 443 if (res->sr_status == 1) 446 444 res->sr_status = NFS_OK; 447 445 448 - /* -ERESTARTSYS can result in skipping nfs41_sequence_setup */ 449 - if (!res->sr_slot) 446 + /* don't increment the sequence number if the task wasn't sent */ 447 + if (!RPC_WAS_SENT(task)) 450 448 goto out; 451 449 452 450 /* Check the SEQUENCE operation status */ ··· 2187 2185 struct nfs4_exception exception = { }; 2188 2186 int err; 2189 2187 do { 2190 - err = nfs4_handle_exception(server, 2191 - _nfs4_lookup_root(server, fhandle, info), 2192 - &exception); 2188 + err = _nfs4_lookup_root(server, fhandle, info); 2189 + switch (err) { 2190 + case 0: 2191 + case -NFS4ERR_WRONGSEC: 2192 + break; 2193 + default: 2194 + err = nfs4_handle_exception(server, err, &exception); 2195 + } 2193 2196 } while (exception.retry); 2194 2197 return err; 2195 2198 } ··· 2215 2208 return ret; 2216 2209 } 2217 2210 2211 + static int nfs4_find_root_sec(struct nfs_server *server, struct nfs_fh *fhandle, 2212 + struct nfs_fsinfo *info) 2213 + { 2214 + int i, len, status = 0; 2215 + rpc_authflavor_t flav_array[NFS_MAX_SECFLAVORS]; 2216 + 2217 + len = gss_mech_list_pseudoflavors(&flav_array[0]); 2218 + flav_array[len] = RPC_AUTH_NULL; 2219 + len += 1; 2220 + 2221 + for (i = 0; i < len; i++) { 2222 + status = nfs4_lookup_root_sec(server, fhandle, info, flav_array[i]); 2223 + if (status == -NFS4ERR_WRONGSEC || status == -EACCES) 2224 + continue; 2225 + break; 2226 + } 2227 + /* 2228 + * -EACCESS could mean that the user doesn't have correct permissions 2229 + * to access the mount. It could also mean that we tried to mount 2230 + * with a gss auth flavor, but rpc.gssd isn't running. Either way, 2231 + * existing mount programs don't handle -EACCES very well so it should 2232 + * be mapped to -EPERM instead. 2233 + */ 2234 + if (status == -EACCES) 2235 + status = -EPERM; 2236 + return status; 2237 + } 2238 + 2218 2239 /* 2219 2240 * get the file handle for the "/" directory on the server 2220 2241 */ 2221 2242 static int nfs4_proc_get_root(struct nfs_server *server, struct nfs_fh *fhandle, 2222 2243 struct nfs_fsinfo *info) 2223 2244 { 2224 - int i, len, status = 0; 2225 - rpc_authflavor_t flav_array[NFS_MAX_SECFLAVORS + 2]; 2226 - 2227 - flav_array[0] = RPC_AUTH_UNIX; 2228 - len = gss_mech_list_pseudoflavors(&flav_array[1]); 2229 - flav_array[1+len] = RPC_AUTH_NULL; 2230 - len += 2; 2231 - 2232 - for (i = 0; i < len; i++) { 2233 - status = nfs4_lookup_root_sec(server, fhandle, info, flav_array[i]); 2234 - if (status != -EPERM) 2235 - break; 2236 - } 2245 + int status = nfs4_lookup_root(server, fhandle, info); 2246 + if ((status == -NFS4ERR_WRONGSEC) && !(server->flags & NFS_MOUNT_SECFLAVOUR)) 2247 + /* 2248 + * A status of -NFS4ERR_WRONGSEC will be mapped to -EPERM 2249 + * by nfs4_map_errors() as this function exits. 2250 + */ 2251 + status = nfs4_find_root_sec(server, fhandle, info); 2237 2252 if (status == 0) 2238 2253 status = nfs4_server_capabilities(server, fhandle); 2239 2254 if (status == 0) ··· 3696 3667 rpc_delay(task, NFS4_POLL_RETRY_MAX); 3697 3668 task->tk_status = 0; 3698 3669 return -EAGAIN; 3670 + case -NFS4ERR_RETRY_UNCACHED_REP: 3699 3671 case -NFS4ERR_OLD_STATEID: 3700 3672 task->tk_status = 0; 3701 3673 return -EAGAIN; ··· 3753 3723 sizeof(setclientid.sc_uaddr), "%s.%u.%u", 3754 3724 clp->cl_ipaddr, port >> 8, port & 255); 3755 3725 3756 - status = rpc_call_sync(clp->cl_rpcclient, &msg, 0); 3726 + status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT); 3757 3727 if (status != -NFS4ERR_CLID_INUSE) 3758 3728 break; 3759 - if (signalled()) 3729 + if (loop != 0) { 3730 + ++clp->cl_id_uniquifier; 3760 3731 break; 3761 - if (loop++ & 1) 3762 - ssleep(clp->cl_lease_time / HZ + 1); 3763 - else 3764 - if (++clp->cl_id_uniquifier == 0) 3765 - break; 3732 + } 3733 + ++loop; 3734 + ssleep(clp->cl_lease_time / HZ + 1); 3766 3735 } 3767 3736 return status; 3768 3737 } 3769 3738 3770 - static int _nfs4_proc_setclientid_confirm(struct nfs_client *clp, 3739 + int nfs4_proc_setclientid_confirm(struct nfs_client *clp, 3771 3740 struct nfs4_setclientid_res *arg, 3772 3741 struct rpc_cred *cred) 3773 3742 { ··· 3781 3752 int status; 3782 3753 3783 3754 now = jiffies; 3784 - status = rpc_call_sync(clp->cl_rpcclient, &msg, 0); 3755 + status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT); 3785 3756 if (status == 0) { 3786 3757 spin_lock(&clp->cl_lock); 3787 3758 clp->cl_lease_time = fsinfo.lease_time * HZ; ··· 3789 3760 spin_unlock(&clp->cl_lock); 3790 3761 } 3791 3762 return status; 3792 - } 3793 - 3794 - int nfs4_proc_setclientid_confirm(struct nfs_client *clp, 3795 - struct nfs4_setclientid_res *arg, 3796 - struct rpc_cred *cred) 3797 - { 3798 - long timeout = 0; 3799 - int err; 3800 - do { 3801 - err = _nfs4_proc_setclientid_confirm(clp, arg, cred); 3802 - switch (err) { 3803 - case 0: 3804 - return err; 3805 - case -NFS4ERR_RESOURCE: 3806 - /* The IBM lawyers misread another document! */ 3807 - case -NFS4ERR_DELAY: 3808 - err = nfs4_delay(clp->cl_rpcclient, &timeout); 3809 - } 3810 - } while (err == 0); 3811 - return err; 3812 3763 } 3813 3764 3814 3765 struct nfs4_delegreturndata { ··· 4795 4786 init_utsname()->domainname, 4796 4787 clp->cl_rpcclient->cl_auth->au_flavor); 4797 4788 4798 - status = rpc_call_sync(clp->cl_rpcclient, &msg, 0); 4789 + status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT); 4799 4790 if (!status) 4800 4791 status = nfs4_check_cl_exchange_flags(clp->cl_exchange_flags); 4801 4792 dprintk("<-- %s status= %d\n", __func__, status); ··· 4846 4837 dprintk("%s Retry: tk_status %d\n", __func__, task->tk_status); 4847 4838 rpc_delay(task, NFS4_POLL_RETRY_MIN); 4848 4839 task->tk_status = 0; 4840 + /* fall through */ 4841 + case -NFS4ERR_RETRY_UNCACHED_REP: 4849 4842 nfs_restart_rpc(task, data->clp); 4850 4843 return; 4851 4844 } ··· 4880 4869 .rpc_client = clp->cl_rpcclient, 4881 4870 .rpc_message = &msg, 4882 4871 .callback_ops = &nfs4_get_lease_time_ops, 4883 - .callback_data = &data 4872 + .callback_data = &data, 4873 + .flags = RPC_TASK_TIMEOUT, 4884 4874 }; 4885 4875 int status; 4886 4876 ··· 5183 5171 nfs4_init_channel_attrs(&args); 5184 5172 args.flags = (SESSION4_PERSIST | SESSION4_BACK_CHAN); 5185 5173 5186 - status = rpc_call_sync(session->clp->cl_rpcclient, &msg, 0); 5174 + status = rpc_call_sync(session->clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT); 5187 5175 5188 5176 if (!status) 5189 5177 /* Verify the session's negotiated channel_attrs values */ ··· 5206 5194 int status; 5207 5195 unsigned *ptr; 5208 5196 struct nfs4_session *session = clp->cl_session; 5209 - long timeout = 0; 5210 - int err; 5211 5197 5212 5198 dprintk("--> %s clp=%p session=%p\n", __func__, clp, session); 5213 5199 5214 - do { 5215 - status = _nfs4_proc_create_session(clp); 5216 - if (status == -NFS4ERR_DELAY) { 5217 - err = nfs4_delay(clp->cl_rpcclient, &timeout); 5218 - if (err) 5219 - status = err; 5220 - } 5221 - } while (status == -NFS4ERR_DELAY); 5222 - 5200 + status = _nfs4_proc_create_session(clp); 5223 5201 if (status) 5224 5202 goto out; 5225 5203 ··· 5250 5248 msg.rpc_argp = session; 5251 5249 msg.rpc_resp = NULL; 5252 5250 msg.rpc_cred = NULL; 5253 - status = rpc_call_sync(session->clp->cl_rpcclient, &msg, 0); 5251 + status = rpc_call_sync(session->clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT); 5254 5252 5255 5253 if (status) 5256 5254 printk(KERN_WARNING ··· 5483 5481 break; 5484 5482 case -NFS4ERR_DELAY: 5485 5483 rpc_delay(task, NFS4_POLL_RETRY_MAX); 5484 + /* fall through */ 5485 + case -NFS4ERR_RETRY_UNCACHED_REP: 5486 5486 return -EAGAIN; 5487 5487 default: 5488 5488 nfs4_schedule_lease_recovery(clp);
+34 -17
fs/nfs/nfs4state.c
··· 64 64 65 65 int nfs4_init_clientid(struct nfs_client *clp, struct rpc_cred *cred) 66 66 { 67 - struct nfs4_setclientid_res clid; 67 + struct nfs4_setclientid_res clid = { 68 + .clientid = clp->cl_clientid, 69 + .confirm = clp->cl_confirm, 70 + }; 68 71 unsigned short port; 69 72 int status; 70 73 74 + if (test_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state)) 75 + goto do_confirm; 71 76 port = nfs_callback_tcpport; 72 77 if (clp->cl_addr.ss_family == AF_INET6) 73 78 port = nfs_callback_tcpport6; ··· 80 75 status = nfs4_proc_setclientid(clp, NFS4_CALLBACK, port, cred, &clid); 81 76 if (status != 0) 82 77 goto out; 78 + clp->cl_clientid = clid.clientid; 79 + clp->cl_confirm = clid.confirm; 80 + set_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state); 81 + do_confirm: 83 82 status = nfs4_proc_setclientid_confirm(clp, &clid, cred); 84 83 if (status != 0) 85 84 goto out; 86 - clp->cl_clientid = clid.clientid; 85 + clear_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state); 87 86 nfs4_schedule_state_renewal(clp); 88 87 out: 89 88 return status; ··· 239 230 { 240 231 int status; 241 232 233 + if (test_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state)) 234 + goto do_confirm; 242 235 nfs4_begin_drain_session(clp); 243 236 status = nfs4_proc_exchange_id(clp, cred); 244 237 if (status != 0) 245 238 goto out; 239 + set_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state); 240 + do_confirm: 246 241 status = nfs4_proc_create_session(clp); 247 242 if (status != 0) 248 243 goto out; 244 + clear_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state); 249 245 nfs41_setup_state_renewal(clp); 250 246 nfs_mark_client_ready(clp, NFS_CS_READY); 251 247 out: ··· 1598 1584 */ 1599 1585 static void nfs4_set_lease_expired(struct nfs_client *clp, int status) 1600 1586 { 1601 - if (nfs4_has_session(clp)) { 1602 - switch (status) { 1603 - case -NFS4ERR_DELAY: 1604 - case -NFS4ERR_CLID_INUSE: 1605 - case -EAGAIN: 1606 - break; 1587 + switch (status) { 1588 + case -NFS4ERR_CLID_INUSE: 1589 + case -NFS4ERR_STALE_CLIENTID: 1590 + clear_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state); 1591 + break; 1592 + case -NFS4ERR_DELAY: 1593 + case -ETIMEDOUT: 1594 + case -EAGAIN: 1595 + ssleep(1); 1596 + break; 1607 1597 1608 - case -EKEYEXPIRED: 1609 - nfs4_warn_keyexpired(clp->cl_hostname); 1610 - case -NFS4ERR_NOT_SAME: /* FixMe: implement recovery 1611 - * in nfs4_exchange_id */ 1612 - default: 1613 - return; 1614 - } 1598 + case -EKEYEXPIRED: 1599 + nfs4_warn_keyexpired(clp->cl_hostname); 1600 + case -NFS4ERR_NOT_SAME: /* FixMe: implement recovery 1601 + * in nfs4_exchange_id */ 1602 + default: 1603 + return; 1615 1604 } 1616 1605 set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state); 1617 1606 } ··· 1624 1607 int status = 0; 1625 1608 1626 1609 /* Ensure exclusive access to NFSv4 state */ 1627 - for(;;) { 1610 + do { 1628 1611 if (test_and_clear_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state)) { 1629 1612 /* We're going to have to re-establish a clientid */ 1630 1613 status = nfs4_reclaim_lease(clp); ··· 1708 1691 break; 1709 1692 if (test_and_set_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) != 0) 1710 1693 break; 1711 - } 1694 + } while (atomic_read(&clp->cl_count) > 1); 1712 1695 return; 1713 1696 out_error: 1714 1697 printk(KERN_WARNING "Error: state manager failed on NFSv4 server %s"
+28 -25
fs/nfs/nfs4xdr.c
··· 1452 1452 1453 1453 static void encode_readdir(struct xdr_stream *xdr, const struct nfs4_readdir_arg *readdir, struct rpc_rqst *req, struct compound_hdr *hdr) 1454 1454 { 1455 - uint32_t attrs[2] = {0, 0}; 1455 + uint32_t attrs[2] = { 1456 + FATTR4_WORD0_RDATTR_ERROR, 1457 + FATTR4_WORD1_MOUNTED_ON_FILEID, 1458 + }; 1456 1459 uint32_t dircount = readdir->count >> 1; 1457 1460 __be32 *p; 1458 1461 1459 1462 if (readdir->plus) { 1460 1463 attrs[0] |= FATTR4_WORD0_TYPE|FATTR4_WORD0_CHANGE|FATTR4_WORD0_SIZE| 1461 - FATTR4_WORD0_FSID|FATTR4_WORD0_FILEHANDLE; 1464 + FATTR4_WORD0_FSID|FATTR4_WORD0_FILEHANDLE|FATTR4_WORD0_FILEID; 1462 1465 attrs[1] |= FATTR4_WORD1_MODE|FATTR4_WORD1_NUMLINKS|FATTR4_WORD1_OWNER| 1463 1466 FATTR4_WORD1_OWNER_GROUP|FATTR4_WORD1_RAWDEV| 1464 1467 FATTR4_WORD1_SPACE_USED|FATTR4_WORD1_TIME_ACCESS| 1465 1468 FATTR4_WORD1_TIME_METADATA|FATTR4_WORD1_TIME_MODIFY; 1466 1469 dircount >>= 1; 1467 1470 } 1468 - attrs[0] |= FATTR4_WORD0_RDATTR_ERROR|FATTR4_WORD0_FILEID; 1469 - attrs[1] |= FATTR4_WORD1_MOUNTED_ON_FILEID; 1470 - /* Switch to mounted_on_fileid if the server supports it */ 1471 - if (readdir->bitmask[1] & FATTR4_WORD1_MOUNTED_ON_FILEID) 1472 - attrs[0] &= ~FATTR4_WORD0_FILEID; 1473 - else 1474 - attrs[1] &= ~FATTR4_WORD1_MOUNTED_ON_FILEID; 1471 + /* Use mounted_on_fileid only if the server supports it */ 1472 + if (!(readdir->bitmask[1] & FATTR4_WORD1_MOUNTED_ON_FILEID)) 1473 + attrs[0] |= FATTR4_WORD0_FILEID; 1475 1474 1476 1475 p = reserve_space(xdr, 12+NFS4_VERIFIER_SIZE+20); 1477 1476 *p++ = cpu_to_be32(OP_READDIR); ··· 3139 3140 goto out_overflow; 3140 3141 xdr_decode_hyper(p, fileid); 3141 3142 bitmap[1] &= ~FATTR4_WORD1_MOUNTED_ON_FILEID; 3142 - ret = NFS_ATTR_FATTR_FILEID; 3143 + ret = NFS_ATTR_FATTR_MOUNTED_ON_FILEID; 3143 3144 } 3144 3145 dprintk("%s: fileid=%Lu\n", __func__, (unsigned long long)*fileid); 3145 3146 return ret; ··· 4001 4002 { 4002 4003 int status; 4003 4004 umode_t fmode = 0; 4004 - uint64_t fileid; 4005 4005 uint32_t type; 4006 4006 4007 4007 status = decode_attr_type(xdr, bitmap, &type); ··· 4099 4101 goto xdr_error; 4100 4102 fattr->valid |= status; 4101 4103 4102 - status = decode_attr_mounted_on_fileid(xdr, bitmap, &fileid); 4104 + status = decode_attr_mounted_on_fileid(xdr, bitmap, &fattr->mounted_on_fileid); 4103 4105 if (status < 0) 4104 4106 goto xdr_error; 4105 - if (status != 0 && !(fattr->valid & status)) { 4106 - fattr->fileid = fileid; 4107 - fattr->valid |= status; 4108 - } 4107 + fattr->valid |= status; 4109 4108 4110 4109 xdr_error: 4111 4110 dprintk("%s: xdr returned %d\n", __func__, -status); ··· 4833 4838 struct nfs4_secinfo_flavor *sec_flavor; 4834 4839 int status; 4835 4840 __be32 *p; 4836 - int i; 4841 + int i, num_flavors; 4837 4842 4838 4843 status = decode_op_hdr(xdr, OP_SECINFO); 4844 + if (status) 4845 + goto out; 4839 4846 p = xdr_inline_decode(xdr, 4); 4840 4847 if (unlikely(!p)) 4841 4848 goto out_overflow; 4842 - res->flavors->num_flavors = be32_to_cpup(p); 4843 4849 4844 - for (i = 0; i < res->flavors->num_flavors; i++) { 4850 + res->flavors->num_flavors = 0; 4851 + num_flavors = be32_to_cpup(p); 4852 + 4853 + for (i = 0; i < num_flavors; i++) { 4845 4854 sec_flavor = &res->flavors->flavors[i]; 4846 - if ((char *)&sec_flavor[1] - (char *)res > PAGE_SIZE) 4855 + if ((char *)&sec_flavor[1] - (char *)res->flavors > PAGE_SIZE) 4847 4856 break; 4848 4857 4849 4858 p = xdr_inline_decode(xdr, 4); ··· 4856 4857 sec_flavor->flavor = be32_to_cpup(p); 4857 4858 4858 4859 if (sec_flavor->flavor == RPC_AUTH_GSS) { 4859 - if (decode_secinfo_gss(xdr, sec_flavor)) 4860 - break; 4860 + status = decode_secinfo_gss(xdr, sec_flavor); 4861 + if (status) 4862 + goto out; 4861 4863 } 4864 + res->flavors->num_flavors++; 4862 4865 } 4863 4866 4864 - return 0; 4865 - 4867 + out: 4868 + return status; 4866 4869 out_overflow: 4867 4870 print_overflow_msg(__func__, xdr); 4868 4871 return -EIO; ··· 6409 6408 if (decode_getfattr_attrs(xdr, bitmap, entry->fattr, entry->fh, 6410 6409 entry->server, 1) < 0) 6411 6410 goto out_overflow; 6412 - if (entry->fattr->valid & NFS_ATTR_FATTR_FILEID) 6411 + if (entry->fattr->valid & NFS_ATTR_FATTR_MOUNTED_ON_FILEID) 6412 + entry->ino = entry->fattr->mounted_on_fileid; 6413 + else if (entry->fattr->valid & NFS_ATTR_FATTR_FILEID) 6413 6414 entry->ino = entry->fattr->fileid; 6414 6415 6415 6416 entry->d_type = DT_UNKNOWN;
+27 -15
fs/nfs/pnfs.c
··· 383 383 plh_layouts); 384 384 dprintk("%s freeing layout for inode %lu\n", __func__, 385 385 lo->plh_inode->i_ino); 386 + list_del_init(&lo->plh_layouts); 386 387 pnfs_destroy_layout(NFS_I(lo->plh_inode)); 387 388 } 388 389 } ··· 467 466 static struct pnfs_layout_segment * 468 467 send_layoutget(struct pnfs_layout_hdr *lo, 469 468 struct nfs_open_context *ctx, 470 - u32 iomode) 469 + u32 iomode, 470 + gfp_t gfp_flags) 471 471 { 472 472 struct inode *ino = lo->plh_inode; 473 473 struct nfs_server *server = NFS_SERVER(ino); ··· 481 479 dprintk("--> %s\n", __func__); 482 480 483 481 BUG_ON(ctx == NULL); 484 - lgp = kzalloc(sizeof(*lgp), GFP_KERNEL); 482 + lgp = kzalloc(sizeof(*lgp), gfp_flags); 485 483 if (lgp == NULL) 486 484 return NULL; 487 485 ··· 489 487 max_resp_sz = server->nfs_client->cl_session->fc_attrs.max_resp_sz; 490 488 max_pages = max_resp_sz >> PAGE_SHIFT; 491 489 492 - pages = kzalloc(max_pages * sizeof(struct page *), GFP_KERNEL); 490 + pages = kzalloc(max_pages * sizeof(struct page *), gfp_flags); 493 491 if (!pages) 494 492 goto out_err_free; 495 493 496 494 for (i = 0; i < max_pages; i++) { 497 - pages[i] = alloc_page(GFP_KERNEL); 495 + pages[i] = alloc_page(gfp_flags); 498 496 if (!pages[i]) 499 497 goto out_err_free; 500 498 } ··· 510 508 lgp->args.layout.pages = pages; 511 509 lgp->args.layout.pglen = max_pages * PAGE_SIZE; 512 510 lgp->lsegpp = &lseg; 511 + lgp->gfp_flags = gfp_flags; 513 512 514 513 /* Synchronously retrieve layout information from server and 515 514 * store in lseg. ··· 668 665 } 669 666 670 667 static struct pnfs_layout_hdr * 671 - alloc_init_layout_hdr(struct inode *ino) 668 + alloc_init_layout_hdr(struct inode *ino, gfp_t gfp_flags) 672 669 { 673 670 struct pnfs_layout_hdr *lo; 674 671 675 - lo = kzalloc(sizeof(struct pnfs_layout_hdr), GFP_KERNEL); 672 + lo = kzalloc(sizeof(struct pnfs_layout_hdr), gfp_flags); 676 673 if (!lo) 677 674 return NULL; 678 675 atomic_set(&lo->plh_refcount, 1); ··· 684 681 } 685 682 686 683 static struct pnfs_layout_hdr * 687 - pnfs_find_alloc_layout(struct inode *ino) 684 + pnfs_find_alloc_layout(struct inode *ino, gfp_t gfp_flags) 688 685 { 689 686 struct nfs_inode *nfsi = NFS_I(ino); 690 687 struct pnfs_layout_hdr *new = NULL; ··· 699 696 return nfsi->layout; 700 697 } 701 698 spin_unlock(&ino->i_lock); 702 - new = alloc_init_layout_hdr(ino); 699 + new = alloc_init_layout_hdr(ino, gfp_flags); 703 700 spin_lock(&ino->i_lock); 704 701 705 702 if (likely(nfsi->layout == NULL)) /* Won the race? */ ··· 759 756 struct pnfs_layout_segment * 760 757 pnfs_update_layout(struct inode *ino, 761 758 struct nfs_open_context *ctx, 762 - enum pnfs_iomode iomode) 759 + enum pnfs_iomode iomode, 760 + gfp_t gfp_flags) 763 761 { 764 762 struct nfs_inode *nfsi = NFS_I(ino); 765 763 struct nfs_client *clp = NFS_SERVER(ino)->nfs_client; ··· 771 767 if (!pnfs_enabled_sb(NFS_SERVER(ino))) 772 768 return NULL; 773 769 spin_lock(&ino->i_lock); 774 - lo = pnfs_find_alloc_layout(ino); 770 + lo = pnfs_find_alloc_layout(ino, gfp_flags); 775 771 if (lo == NULL) { 776 772 dprintk("%s ERROR: can't get pnfs_layout_hdr\n", __func__); 777 773 goto out_unlock; ··· 811 807 spin_unlock(&clp->cl_lock); 812 808 } 813 809 814 - lseg = send_layoutget(lo, ctx, iomode); 810 + lseg = send_layoutget(lo, ctx, iomode, gfp_flags); 815 811 if (!lseg && first) { 816 812 spin_lock(&clp->cl_lock); 817 813 list_del_init(&lo->plh_layouts); ··· 850 846 goto out; 851 847 } 852 848 /* Inject layout blob into I/O device driver */ 853 - lseg = NFS_SERVER(ino)->pnfs_curr_ld->alloc_lseg(lo, res); 849 + lseg = NFS_SERVER(ino)->pnfs_curr_ld->alloc_lseg(lo, res, lgp->gfp_flags); 854 850 if (!lseg || IS_ERR(lseg)) { 855 851 if (!lseg) 856 852 status = -ENOMEM; ··· 903 899 /* This is first coelesce call for a series of nfs_pages */ 904 900 pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode, 905 901 prev->wb_context, 906 - IOMODE_READ); 902 + IOMODE_READ, 903 + GFP_KERNEL); 907 904 } 908 905 return NFS_SERVER(pgio->pg_inode)->pnfs_curr_ld->pg_test(pgio, prev, req); 909 906 } ··· 926 921 /* This is first coelesce call for a series of nfs_pages */ 927 922 pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode, 928 923 prev->wb_context, 929 - IOMODE_RW); 924 + IOMODE_RW, 925 + GFP_NOFS); 930 926 } 931 927 return NFS_SERVER(pgio->pg_inode)->pnfs_curr_ld->pg_test(pgio, prev, req); 932 928 } ··· 1010 1004 { 1011 1005 struct nfs_inode *nfsi = NFS_I(wdata->inode); 1012 1006 loff_t end_pos = wdata->args.offset + wdata->res.count; 1007 + bool mark_as_dirty = false; 1013 1008 1014 1009 spin_lock(&nfsi->vfs_inode.i_lock); 1015 1010 if (!test_and_set_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags)) { ··· 1018 1011 get_lseg(wdata->lseg); 1019 1012 wdata->lseg->pls_lc_cred = 1020 1013 get_rpccred(wdata->args.context->state->owner->so_cred); 1021 - mark_inode_dirty_sync(wdata->inode); 1014 + mark_as_dirty = true; 1022 1015 dprintk("%s: Set layoutcommit for inode %lu ", 1023 1016 __func__, wdata->inode->i_ino); 1024 1017 } 1025 1018 if (end_pos > wdata->lseg->pls_end_pos) 1026 1019 wdata->lseg->pls_end_pos = end_pos; 1027 1020 spin_unlock(&nfsi->vfs_inode.i_lock); 1021 + 1022 + /* if pnfs_layoutcommit_inode() runs between inode locks, the next one 1023 + * will be a noop because NFS_INO_LAYOUTCOMMIT will not be set */ 1024 + if (mark_as_dirty) 1025 + mark_inode_dirty_sync(wdata->inode); 1028 1026 } 1029 1027 EXPORT_SYMBOL_GPL(pnfs_set_layoutcommit); 1030 1028
+3 -3
fs/nfs/pnfs.h
··· 70 70 const u32 id; 71 71 const char *name; 72 72 struct module *owner; 73 - struct pnfs_layout_segment * (*alloc_lseg) (struct pnfs_layout_hdr *layoutid, struct nfs4_layoutget_res *lgr); 73 + struct pnfs_layout_segment * (*alloc_lseg) (struct pnfs_layout_hdr *layoutid, struct nfs4_layoutget_res *lgr, gfp_t gfp_flags); 74 74 void (*free_lseg) (struct pnfs_layout_segment *lseg); 75 75 76 76 /* test for nfs page cache coalescing */ ··· 126 126 void put_lseg(struct pnfs_layout_segment *lseg); 127 127 struct pnfs_layout_segment * 128 128 pnfs_update_layout(struct inode *ino, struct nfs_open_context *ctx, 129 - enum pnfs_iomode access_type); 129 + enum pnfs_iomode access_type, gfp_t gfp_flags); 130 130 void set_pnfs_layoutdriver(struct nfs_server *, u32 id); 131 131 void unset_pnfs_layoutdriver(struct nfs_server *); 132 132 enum pnfs_try_status pnfs_try_to_write_data(struct nfs_write_data *, ··· 245 245 246 246 static inline struct pnfs_layout_segment * 247 247 pnfs_update_layout(struct inode *ino, struct nfs_open_context *ctx, 248 - enum pnfs_iomode access_type) 248 + enum pnfs_iomode access_type, gfp_t gfp_flags) 249 249 { 250 250 return NULL; 251 251 }
+2 -2
fs/nfs/read.c
··· 288 288 atomic_set(&req->wb_complete, requests); 289 289 290 290 BUG_ON(desc->pg_lseg != NULL); 291 - lseg = pnfs_update_layout(desc->pg_inode, req->wb_context, IOMODE_READ); 291 + lseg = pnfs_update_layout(desc->pg_inode, req->wb_context, IOMODE_READ, GFP_KERNEL); 292 292 ClearPageError(page); 293 293 offset = 0; 294 294 nbytes = desc->pg_count; ··· 351 351 } 352 352 req = nfs_list_entry(data->pages.next); 353 353 if ((!lseg) && list_is_singular(&data->pages)) 354 - lseg = pnfs_update_layout(desc->pg_inode, req->wb_context, IOMODE_READ); 354 + lseg = pnfs_update_layout(desc->pg_inode, req->wb_context, IOMODE_READ, GFP_KERNEL); 355 355 356 356 ret = nfs_read_rpcsetup(req, data, &nfs_read_full_ops, desc->pg_count, 357 357 0, lseg);
+11 -2
fs/nfs/super.c
··· 1004 1004 return 0; 1005 1005 } 1006 1006 1007 + mnt->flags |= NFS_MOUNT_SECFLAVOUR; 1007 1008 mnt->auth_flavor_len = 1; 1008 1009 return 1; 1009 1010 } ··· 1977 1976 if (error < 0) 1978 1977 goto out; 1979 1978 1979 + /* 1980 + * noac is a special case. It implies -o sync, but that's not 1981 + * necessarily reflected in the mtab options. do_remount_sb 1982 + * will clear MS_SYNCHRONOUS if -o sync wasn't specified in the 1983 + * remount options, so we have to explicitly reset it. 1984 + */ 1985 + if (data->flags & NFS_MOUNT_NOAC) 1986 + *flags |= MS_SYNCHRONOUS; 1987 + 1980 1988 /* compare new mount options with old ones */ 1981 1989 error = nfs_compare_remount_data(nfss, data); 1982 1990 out: ··· 2245 2235 if (!s->s_root) { 2246 2236 /* initial superblock/root creation */ 2247 2237 nfs_fill_super(s, data); 2248 - nfs_fscache_get_super_cookie( 2249 - s, data ? data->fscache_uniq : NULL, NULL); 2238 + nfs_fscache_get_super_cookie(s, data->fscache_uniq, NULL); 2250 2239 } 2251 2240 2252 2241 mntroot = nfs_get_root(s, mntfh, dev_name);
+3 -5
fs/nfs/write.c
··· 680 680 req = nfs_setup_write_request(ctx, page, offset, count); 681 681 if (IS_ERR(req)) 682 682 return PTR_ERR(req); 683 - nfs_mark_request_dirty(req); 684 683 /* Update file length */ 685 684 nfs_grow_file(page, offset, count); 686 685 nfs_mark_uptodate(page, req->wb_pgbase, req->wb_bytes); ··· 939 940 atomic_set(&req->wb_complete, requests); 940 941 941 942 BUG_ON(desc->pg_lseg); 942 - lseg = pnfs_update_layout(desc->pg_inode, req->wb_context, IOMODE_RW); 943 + lseg = pnfs_update_layout(desc->pg_inode, req->wb_context, IOMODE_RW, GFP_NOFS); 943 944 ClearPageError(page); 944 945 offset = 0; 945 946 nbytes = desc->pg_count; ··· 1013 1014 } 1014 1015 req = nfs_list_entry(data->pages.next); 1015 1016 if ((!lseg) && list_is_singular(&data->pages)) 1016 - lseg = pnfs_update_layout(desc->pg_inode, req->wb_context, IOMODE_RW); 1017 + lseg = pnfs_update_layout(desc->pg_inode, req->wb_context, IOMODE_RW, GFP_NOFS); 1017 1018 1018 1019 if ((desc->pg_ioflags & FLUSH_COND_STABLE) && 1019 1020 (desc->pg_moreio || NFS_I(desc->pg_inode)->ncommit)) ··· 1417 1418 task->tk_pid, task->tk_status); 1418 1419 1419 1420 /* Call the NFS version-specific code */ 1420 - if (NFS_PROTO(data->inode)->commit_done(task, data) != 0) 1421 - return; 1421 + NFS_PROTO(data->inode)->commit_done(task, data); 1422 1422 } 1423 1423 1424 1424 void nfs_commit_release_pages(struct nfs_write_data *data)
+2 -1
fs/nfsd/nfs4state.c
··· 258 258 if (atomic_dec_and_test(&fp->fi_delegees)) { 259 259 vfs_setlease(fp->fi_deleg_file, F_UNLCK, &fp->fi_lease); 260 260 fp->fi_lease = NULL; 261 + fput(fp->fi_deleg_file); 261 262 fp->fi_deleg_file = NULL; 262 263 } 263 264 } ··· 403 402 if (stp->st_access_bmap) { 404 403 oflag = nfs4_access_bmap_to_omode(stp); 405 404 nfs4_file_put_access(stp->st_file, oflag); 406 - put_nfs4_file(stp->st_file); 407 405 } 406 + put_nfs4_file(stp->st_file); 408 407 kmem_cache_free(stateid_slab, stp); 409 408 } 410 409
+8 -1
fs/nfsd/vfs.c
··· 1363 1363 goto out; 1364 1364 if (!(iap->ia_valid & ATTR_MODE)) 1365 1365 iap->ia_mode = 0; 1366 - err = fh_verify(rqstp, fhp, S_IFDIR, NFSD_MAY_CREATE); 1366 + err = fh_verify(rqstp, fhp, S_IFDIR, NFSD_MAY_EXEC); 1367 1367 if (err) 1368 1368 goto out; 1369 1369 ··· 1384 1384 host_err = PTR_ERR(dchild); 1385 1385 if (IS_ERR(dchild)) 1386 1386 goto out_nfserr; 1387 + 1388 + /* If file doesn't exist, check for permissions to create one */ 1389 + if (!dchild->d_inode) { 1390 + err = fh_verify(rqstp, fhp, S_IFDIR, NFSD_MAY_CREATE); 1391 + if (err) 1392 + goto out; 1393 + } 1387 1394 1388 1395 err = fh_compose(resfhp, fhp->fh_export, dchild, fhp); 1389 1396 if (err)
+1 -1
fs/nilfs2/alloc.c
··· 646 646 unsigned long group, group_offset; 647 647 int i, j, n, ret; 648 648 649 - for (i = 0; i < nitems; i += n) { 649 + for (i = 0; i < nitems; i = j) { 650 650 group = nilfs_palloc_group(inode, entry_nrs[i], &group_offset); 651 651 ret = nilfs_palloc_get_desc_block(inode, group, 0, &desc_bh); 652 652 if (ret < 0)
+43 -18
fs/ocfs2/cluster/heartbeat.c
··· 539 539 540 540 /* We want to make sure that nobody is heartbeating on top of us -- 541 541 * this will help detect an invalid configuration. */ 542 - static int o2hb_check_last_timestamp(struct o2hb_region *reg) 542 + static void o2hb_check_last_timestamp(struct o2hb_region *reg) 543 543 { 544 - int node_num, ret; 545 544 struct o2hb_disk_slot *slot; 546 545 struct o2hb_disk_heartbeat_block *hb_block; 546 + char *errstr; 547 547 548 - node_num = o2nm_this_node(); 549 - 550 - ret = 1; 551 - slot = &reg->hr_slots[node_num]; 548 + slot = &reg->hr_slots[o2nm_this_node()]; 552 549 /* Don't check on our 1st timestamp */ 553 - if (slot->ds_last_time) { 554 - hb_block = slot->ds_raw_block; 550 + if (!slot->ds_last_time) 551 + return; 555 552 556 - if (le64_to_cpu(hb_block->hb_seq) != slot->ds_last_time) 557 - ret = 0; 558 - } 553 + hb_block = slot->ds_raw_block; 554 + if (le64_to_cpu(hb_block->hb_seq) == slot->ds_last_time && 555 + le64_to_cpu(hb_block->hb_generation) == slot->ds_last_generation && 556 + hb_block->hb_node == slot->ds_node_num) 557 + return; 559 558 560 - return ret; 559 + #define ERRSTR1 "Another node is heartbeating on device" 560 + #define ERRSTR2 "Heartbeat generation mismatch on device" 561 + #define ERRSTR3 "Heartbeat sequence mismatch on device" 562 + 563 + if (hb_block->hb_node != slot->ds_node_num) 564 + errstr = ERRSTR1; 565 + else if (le64_to_cpu(hb_block->hb_generation) != 566 + slot->ds_last_generation) 567 + errstr = ERRSTR2; 568 + else 569 + errstr = ERRSTR3; 570 + 571 + mlog(ML_ERROR, "%s (%s): expected(%u:0x%llx, 0x%llx), " 572 + "ondisk(%u:0x%llx, 0x%llx)\n", errstr, reg->hr_dev_name, 573 + slot->ds_node_num, (unsigned long long)slot->ds_last_generation, 574 + (unsigned long long)slot->ds_last_time, hb_block->hb_node, 575 + (unsigned long long)le64_to_cpu(hb_block->hb_generation), 576 + (unsigned long long)le64_to_cpu(hb_block->hb_seq)); 561 577 } 562 578 563 579 static inline void o2hb_prepare_block(struct o2hb_region *reg, ··· 999 983 /* With an up to date view of the slots, we can check that no 1000 984 * other node has been improperly configured to heartbeat in 1001 985 * our slot. */ 1002 - if (!o2hb_check_last_timestamp(reg)) 1003 - mlog(ML_ERROR, "Device \"%s\": another node is heartbeating " 1004 - "in our slot!\n", reg->hr_dev_name); 986 + o2hb_check_last_timestamp(reg); 1005 987 1006 988 /* fill in the proper info for our next heartbeat */ 1007 989 o2hb_prepare_block(reg, reg->hr_generation); ··· 1013 999 } 1014 1000 1015 1001 i = -1; 1016 - while((i = find_next_bit(configured_nodes, O2NM_MAX_NODES, i + 1)) < O2NM_MAX_NODES) { 1017 - 1002 + while((i = find_next_bit(configured_nodes, 1003 + O2NM_MAX_NODES, i + 1)) < O2NM_MAX_NODES) { 1018 1004 change |= o2hb_check_slot(reg, &reg->hr_slots[i]); 1019 1005 } 1020 1006 ··· 1704 1690 struct file *filp = NULL; 1705 1691 struct inode *inode = NULL; 1706 1692 ssize_t ret = -EINVAL; 1693 + int live_threshold; 1707 1694 1708 1695 if (reg->hr_bdev) 1709 1696 goto out; ··· 1781 1766 * A node is considered live after it has beat LIVE_THRESHOLD 1782 1767 * times. We're not steady until we've given them a chance 1783 1768 * _after_ our first read. 1769 + * The default threshold is bare minimum so as to limit the delay 1770 + * during mounts. For global heartbeat, the threshold doubled for the 1771 + * first region. 1784 1772 */ 1785 - atomic_set(&reg->hr_steady_iterations, O2HB_LIVE_THRESHOLD + 1); 1773 + live_threshold = O2HB_LIVE_THRESHOLD; 1774 + if (o2hb_global_heartbeat_active()) { 1775 + spin_lock(&o2hb_live_lock); 1776 + if (o2hb_pop_count(&o2hb_region_bitmap, O2NM_MAX_REGIONS) == 1) 1777 + live_threshold <<= 1; 1778 + spin_unlock(&o2hb_live_lock); 1779 + } 1780 + atomic_set(&reg->hr_steady_iterations, live_threshold + 1); 1786 1781 1787 1782 hb_task = kthread_run(o2hb_thread, reg, "o2hb-%s", 1788 1783 reg->hr_item.ci_name);
+1 -1
fs/ocfs2/dir.c
··· 2868 2868 bytes = blocks_wanted << sb->s_blocksize_bits; 2869 2869 struct ocfs2_super *osb = OCFS2_SB(dir->i_sb); 2870 2870 struct ocfs2_inode_info *oi = OCFS2_I(dir); 2871 - struct ocfs2_alloc_context *data_ac; 2871 + struct ocfs2_alloc_context *data_ac = NULL; 2872 2872 struct ocfs2_alloc_context *meta_ac = NULL; 2873 2873 struct buffer_head *dirdata_bh = NULL; 2874 2874 struct buffer_head *dx_root_bh = NULL;
+2 -1
fs/ocfs2/dlm/dlmdomain.c
··· 1614 1614 spin_unlock(&dlm->spinlock); 1615 1615 1616 1616 /* Support for global heartbeat and node info was added in 1.1 */ 1617 - if (dlm_protocol.pv_major > 1 || dlm_protocol.pv_minor > 0) { 1617 + if (dlm->dlm_locking_proto.pv_major > 1 || 1618 + dlm->dlm_locking_proto.pv_minor > 0) { 1618 1619 status = dlm_send_nodeinfo(dlm, ctxt->yes_resp_map); 1619 1620 if (status) { 1620 1621 mlog_errno(status);
+3
fs/ocfs2/dlm/dlmmaster.c
··· 2574 2574 res->state &= ~DLM_LOCK_RES_MIGRATING; 2575 2575 wake = 1; 2576 2576 spin_unlock(&res->spinlock); 2577 + if (dlm_is_host_down(ret)) 2578 + dlm_wait_for_node_death(dlm, target, 2579 + DLM_NODE_DEATH_WAIT_MAX); 2577 2580 goto leave; 2578 2581 } 2579 2582
+12
fs/ocfs2/file.c
··· 1607 1607 range = le32_to_cpu(rec->e_cpos) + ocfs2_rec_clusters(el, rec); 1608 1608 1609 1609 if (le32_to_cpu(rec->e_cpos) >= trunc_start) { 1610 + /* 1611 + * remove an entire extent record. 1612 + */ 1610 1613 *trunc_cpos = le32_to_cpu(rec->e_cpos); 1611 1614 /* 1612 1615 * Skip holes if any. ··· 1620 1617 *blkno = le64_to_cpu(rec->e_blkno); 1621 1618 *trunc_end = le32_to_cpu(rec->e_cpos); 1622 1619 } else if (range > trunc_start) { 1620 + /* 1621 + * remove a partial extent record, which means we're 1622 + * removing the last extent record. 1623 + */ 1623 1624 *trunc_cpos = trunc_start; 1625 + /* 1626 + * skip hole if any. 1627 + */ 1628 + if (range < *trunc_end) 1629 + *trunc_end = range; 1624 1630 *trunc_len = *trunc_end - trunc_start; 1625 1631 coff = trunc_start - le32_to_cpu(rec->e_cpos); 1626 1632 *blkno = le64_to_cpu(rec->e_blkno) +
+3
fs/ocfs2/journal.c
··· 1260 1260 { 1261 1261 struct ocfs2_journal *journal = osb->journal; 1262 1262 1263 + if (ocfs2_is_hard_readonly(osb)) 1264 + return; 1265 + 1263 1266 /* No need to queue up our truncate_log as regular cleanup will catch 1264 1267 * that */ 1265 1268 ocfs2_queue_recovery_completion(journal, osb->slot_num,
+1 -1
fs/ocfs2/ocfs2_fs.h
··· 1019 1019 __le16 xe_name_offset; /* byte offset from the 1st entry in the 1020 1020 local xattr storage(inode, xattr block or 1021 1021 xattr bucket). */ 1022 - __u8 xe_name_len; /* xattr name len, does't include prefix. */ 1022 + __u8 xe_name_len; /* xattr name len, doesn't include prefix. */ 1023 1023 __u8 xe_type; /* the low 7 bits indicate the name prefix 1024 1024 * type and the highest bit indicates whether 1025 1025 * the EA is stored in the local storage. */
+6
fs/partitions/efi.c
··· 348 348 goto fail; 349 349 } 350 350 351 + /* Check that sizeof_partition_entry has the correct value */ 352 + if (le32_to_cpu((*gpt)->sizeof_partition_entry) != sizeof(gpt_entry)) { 353 + pr_debug("GUID Partitition Entry Size check failed.\n"); 354 + goto fail; 355 + } 356 + 351 357 if (!(*ptes = alloc_read_gpt_entries(state, *gpt))) 352 358 goto fail; 353 359
+7 -5
fs/proc/task_mmu.c
··· 214 214 int flags = vma->vm_flags; 215 215 unsigned long ino = 0; 216 216 unsigned long long pgoff = 0; 217 - unsigned long start; 217 + unsigned long start, end; 218 218 dev_t dev = 0; 219 219 int len; 220 220 ··· 227 227 228 228 /* We don't show the stack guard page in /proc/maps */ 229 229 start = vma->vm_start; 230 - if (vma->vm_flags & VM_GROWSDOWN) 231 - if (!vma_stack_continue(vma->vm_prev, vma->vm_start)) 232 - start += PAGE_SIZE; 230 + if (stack_guard_page_start(vma, start)) 231 + start += PAGE_SIZE; 232 + end = vma->vm_end; 233 + if (stack_guard_page_end(vma, end)) 234 + end -= PAGE_SIZE; 233 235 234 236 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n", 235 237 start, 236 - vma->vm_end, 238 + end, 237 239 flags & VM_READ ? 'r' : '-', 238 240 flags & VM_WRITE ? 'w' : '-', 239 241 flags & VM_EXEC ? 'x' : '-',
-20
fs/ubifs/log.c
··· 175 175 } 176 176 177 177 /** 178 - * ubifs_create_buds_lists - create journal head buds lists for remount rw. 179 - * @c: UBIFS file-system description object 180 - */ 181 - void ubifs_create_buds_lists(struct ubifs_info *c) 182 - { 183 - struct rb_node *p; 184 - 185 - spin_lock(&c->buds_lock); 186 - p = rb_first(&c->buds); 187 - while (p) { 188 - struct ubifs_bud *bud = rb_entry(p, struct ubifs_bud, rb); 189 - struct ubifs_jhead *jhead = &c->jheads[bud->jhead]; 190 - 191 - list_add_tail(&bud->list, &jhead->buds_list); 192 - p = rb_next(p); 193 - } 194 - spin_unlock(&c->buds_lock); 195 - } 196 - 197 - /** 198 178 * ubifs_add_bud_to_log - add a new bud to the log. 199 179 * @c: UBIFS file-system description object 200 180 * @jhead: journal head the bud belongs to
+26
fs/ubifs/recovery.c
··· 317 317 goto out_free; 318 318 } 319 319 memcpy(c->rcvrd_mst_node, c->mst_node, UBIFS_MST_NODE_SZ); 320 + 321 + /* 322 + * We had to recover the master node, which means there was an 323 + * unclean reboot. However, it is possible that the master node 324 + * is clean at this point, i.e., %UBIFS_MST_DIRTY is not set. 325 + * E.g., consider the following chain of events: 326 + * 327 + * 1. UBIFS was cleanly unmounted, so the master node is clean 328 + * 2. UBIFS is being mounted R/W and starts changing the master 329 + * node in the first (%UBIFS_MST_LNUM). A power cut happens, 330 + * so this LEB ends up with some amount of garbage at the 331 + * end. 332 + * 3. UBIFS is being mounted R/O. We reach this place and 333 + * recover the master node from the second LEB 334 + * (%UBIFS_MST_LNUM + 1). But we cannot update the media 335 + * because we are being mounted R/O. We have to defer the 336 + * operation. 337 + * 4. However, this master node (@c->mst_node) is marked as 338 + * clean (since the step 1). And if we just return, the 339 + * mount code will be confused and won't recover the master 340 + * node when it is re-mounter R/W later. 341 + * 342 + * Thus, to force the recovery by marking the master node as 343 + * dirty. 344 + */ 345 + c->mst_node->flags |= cpu_to_le32(UBIFS_MST_DIRTY); 320 346 } else { 321 347 /* Write the recovered master node */ 322 348 c->max_sqnum = le64_to_cpu(mst->ch.sqnum) - 1;
+12 -6
fs/ubifs/replay.c
··· 59 59 * @new_size: truncation new size 60 60 * @free: amount of free space in a bud 61 61 * @dirty: amount of dirty space in a bud from padding and deletion nodes 62 + * @jhead: journal head number of the bud 62 63 * 63 64 * UBIFS journal replay must compare node sequence numbers, which means it must 64 65 * build a tree of node information to insert into the TNC. ··· 81 80 struct { 82 81 int free; 83 82 int dirty; 83 + int jhead; 84 84 }; 85 85 }; 86 86 }; ··· 161 159 err = PTR_ERR(lp); 162 160 goto out; 163 161 } 162 + 163 + /* Make sure the journal head points to the latest bud */ 164 + err = ubifs_wbuf_seek_nolock(&c->jheads[r->jhead].wbuf, r->lnum, 165 + c->leb_size - r->free, UBI_SHORTTERM); 166 + 164 167 out: 165 168 ubifs_release_lprops(c); 166 169 return err; ··· 634 627 ubifs_assert(sleb->endpt - offs >= used); 635 628 ubifs_assert(sleb->endpt % c->min_io_size == 0); 636 629 637 - if (sleb->endpt + c->min_io_size <= c->leb_size && !c->ro_mount) 638 - err = ubifs_wbuf_seek_nolock(&c->jheads[jhead].wbuf, lnum, 639 - sleb->endpt, UBI_SHORTTERM); 640 - 641 630 *dirty = sleb->endpt - offs - used; 642 631 *free = c->leb_size - sleb->endpt; 643 632 ··· 656 653 * @sqnum: sequence number 657 654 * @free: amount of free space in bud 658 655 * @dirty: amount of dirty space from padding and deletion nodes 656 + * @jhead: journal head number for the bud 659 657 * 660 658 * This function inserts a reference node to the replay tree and returns zero 661 659 * in case of success or a negative error code in case of failure. 662 660 */ 663 661 static int insert_ref_node(struct ubifs_info *c, int lnum, int offs, 664 - unsigned long long sqnum, int free, int dirty) 662 + unsigned long long sqnum, int free, int dirty, 663 + int jhead) 665 664 { 666 665 struct rb_node **p = &c->replay_tree.rb_node, *parent = NULL; 667 666 struct replay_entry *r; ··· 693 688 r->flags = REPLAY_REF; 694 689 r->free = free; 695 690 r->dirty = dirty; 691 + r->jhead = jhead; 696 692 697 693 rb_link_node(&r->rb, parent, p); 698 694 rb_insert_color(&r->rb, &c->replay_tree); ··· 718 712 if (err) 719 713 return err; 720 714 err = insert_ref_node(c, b->bud->lnum, b->bud->start, b->sqnum, 721 - free, dirty); 715 + free, dirty, b->bud->jhead); 722 716 if (err) 723 717 return err; 724 718 }
+25 -19
fs/ubifs/super.c
··· 1257 1257 goto out_free; 1258 1258 } 1259 1259 1260 + err = alloc_wbufs(c); 1261 + if (err) 1262 + goto out_cbuf; 1263 + 1260 1264 sprintf(c->bgt_name, BGT_NAME_PATTERN, c->vi.ubi_num, c->vi.vol_id); 1261 1265 if (!c->ro_mount) { 1262 - err = alloc_wbufs(c); 1263 - if (err) 1264 - goto out_cbuf; 1265 - 1266 1266 /* Create background thread */ 1267 1267 c->bgt = kthread_create(ubifs_bg_thread, c, "%s", c->bgt_name); 1268 1268 if (IS_ERR(c->bgt)) { ··· 1631 1631 if (err) 1632 1632 goto out; 1633 1633 1634 - err = alloc_wbufs(c); 1635 - if (err) 1636 - goto out; 1637 - 1638 - ubifs_create_buds_lists(c); 1639 - 1640 1634 /* Create background thread */ 1641 1635 c->bgt = kthread_create(ubifs_bg_thread, c, "%s", c->bgt_name); 1642 1636 if (IS_ERR(c->bgt)) { ··· 1665 1671 if (err) 1666 1672 goto out; 1667 1673 1674 + dbg_gen("re-mounted read-write"); 1675 + c->remounting_rw = 0; 1676 + 1668 1677 if (c->need_recovery) { 1669 1678 c->need_recovery = 0; 1670 1679 ubifs_msg("deferred recovery completed"); 1680 + } else { 1681 + /* 1682 + * Do not run the debugging space check if the were doing 1683 + * recovery, because when we saved the information we had the 1684 + * file-system in a state where the TNC and lprops has been 1685 + * modified in memory, but all the I/O operations (including a 1686 + * commit) were deferred. So the file-system was in 1687 + * "non-committed" state. Now the file-system is in committed 1688 + * state, and of course the amount of free space will change 1689 + * because, for example, the old index size was imprecise. 1690 + */ 1691 + err = dbg_check_space_info(c); 1671 1692 } 1672 - 1673 - dbg_gen("re-mounted read-write"); 1674 - c->remounting_rw = 0; 1675 - err = dbg_check_space_info(c); 1676 1693 mutex_unlock(&c->umount_mutex); 1677 1694 return err; 1678 1695 ··· 1738 1733 if (err) 1739 1734 ubifs_ro_mode(c, err); 1740 1735 1741 - free_wbufs(c); 1742 1736 vfree(c->orph_buf); 1743 1737 c->orph_buf = NULL; 1744 1738 kfree(c->write_reserve_buf); ··· 1765 1761 * of the media. For example, there will be dirty inodes if we failed 1766 1762 * to write them back because of I/O errors. 1767 1763 */ 1768 - ubifs_assert(atomic_long_read(&c->dirty_pg_cnt) == 0); 1769 - ubifs_assert(c->budg_idx_growth == 0); 1770 - ubifs_assert(c->budg_dd_growth == 0); 1771 - ubifs_assert(c->budg_data_growth == 0); 1764 + if (!c->ro_error) { 1765 + ubifs_assert(atomic_long_read(&c->dirty_pg_cnt) == 0); 1766 + ubifs_assert(c->budg_idx_growth == 0); 1767 + ubifs_assert(c->budg_dd_growth == 0); 1768 + ubifs_assert(c->budg_data_growth == 0); 1769 + } 1772 1770 1773 1771 /* 1774 1772 * The 'c->umount_lock' prevents races between UBIFS memory shrinker
+1 -1
fs/xattr.c
··· 666 666 handler = xattr_resolve_name(dentry->d_sb->s_xattr, &name); 667 667 if (!handler) 668 668 return -EOPNOTSUPP; 669 - return handler->set(dentry, name, value, size, 0, handler->flags); 669 + return handler->set(dentry, name, value, size, flags, handler->flags); 670 670 } 671 671 672 672 /*
+3 -1
fs/xfs/linux-2.6/xfs_message.c
··· 34 34 const struct xfs_mount *mp, 35 35 struct va_format *vaf) 36 36 { 37 - if (mp && mp->m_fsname) 37 + if (mp && mp->m_fsname) { 38 38 printk("%sXFS (%s): %pV\n", level, mp->m_fsname, vaf); 39 + return; 40 + } 39 41 printk("%sXFS: %pV\n", level, vaf); 40 42 } 41 43
+1
fs/xfs/linux-2.6/xfs_sync.c
··· 926 926 XFS_LOOKUP_BATCH, 927 927 XFS_ICI_RECLAIM_TAG); 928 928 if (!nr_found) { 929 + done = 1; 929 930 rcu_read_unlock(); 930 931 break; 931 932 }
+26 -21
fs/xfs/xfs_trans_ail.c
··· 346 346 */ 347 347 STATIC void 348 348 xfs_ail_worker( 349 - struct work_struct *work) 349 + struct work_struct *work) 350 350 { 351 - struct xfs_ail *ailp = container_of(to_delayed_work(work), 351 + struct xfs_ail *ailp = container_of(to_delayed_work(work), 352 352 struct xfs_ail, xa_work); 353 - long tout; 354 - xfs_lsn_t target = ailp->xa_target; 355 - xfs_lsn_t lsn; 356 - xfs_log_item_t *lip; 357 - int flush_log, count, stuck; 358 - xfs_mount_t *mp = ailp->xa_mount; 353 + xfs_mount_t *mp = ailp->xa_mount; 359 354 struct xfs_ail_cursor *cur = &ailp->xa_cursors; 360 - int push_xfsbufd = 0; 355 + xfs_log_item_t *lip; 356 + xfs_lsn_t lsn; 357 + xfs_lsn_t target; 358 + long tout = 10; 359 + int flush_log = 0; 360 + int stuck = 0; 361 + int count = 0; 362 + int push_xfsbufd = 0; 361 363 362 364 spin_lock(&ailp->xa_lock); 365 + target = ailp->xa_target; 363 366 xfs_trans_ail_cursor_init(ailp, cur); 364 367 lip = xfs_trans_ail_cursor_first(ailp, cur, ailp->xa_last_pushed_lsn); 365 368 if (!lip || XFS_FORCED_SHUTDOWN(mp)) { ··· 371 368 */ 372 369 xfs_trans_ail_cursor_done(ailp, cur); 373 370 spin_unlock(&ailp->xa_lock); 374 - ailp->xa_last_pushed_lsn = 0; 375 - return; 371 + goto out_done; 376 372 } 377 373 378 374 XFS_STATS_INC(xs_push_ail); ··· 388 386 * lots of contention on the AIL lists. 389 387 */ 390 388 lsn = lip->li_lsn; 391 - flush_log = stuck = count = 0; 392 - while ((XFS_LSN_CMP(lip->li_lsn, target) < 0)) { 389 + while ((XFS_LSN_CMP(lip->li_lsn, target) <= 0)) { 393 390 int lock_result; 394 391 /* 395 392 * If we can lock the item without sleeping, unlock the AIL ··· 481 480 } 482 481 483 482 /* assume we have more work to do in a short while */ 484 - tout = 10; 483 + out_done: 485 484 if (!count) { 486 485 /* We're past our target or empty, so idle */ 487 486 ailp->xa_last_pushed_lsn = 0; 488 487 489 488 /* 490 - * Check for an updated push target before clearing the 491 - * XFS_AIL_PUSHING_BIT. If the target changed, we've got more 492 - * work to do. Wait a bit longer before starting that work. 489 + * We clear the XFS_AIL_PUSHING_BIT first before checking 490 + * whether the target has changed. If the target has changed, 491 + * this pushes the requeue race directly onto the result of the 492 + * atomic test/set bit, so we are guaranteed that either the 493 + * the pusher that changed the target or ourselves will requeue 494 + * the work (but not both). 493 495 */ 496 + clear_bit(XFS_AIL_PUSHING_BIT, &ailp->xa_flags); 494 497 smp_rmb(); 495 - if (ailp->xa_target == target) { 496 - clear_bit(XFS_AIL_PUSHING_BIT, &ailp->xa_flags); 498 + if (XFS_LSN_CMP(ailp->xa_target, target) == 0 || 499 + test_and_set_bit(XFS_AIL_PUSHING_BIT, &ailp->xa_flags)) 497 500 return; 498 - } 501 + 499 502 tout = 50; 500 503 } else if (XFS_LSN_CMP(lsn, target) >= 0) { 501 504 /* ··· 558 553 * the XFS_AIL_PUSHING_BIT. 559 554 */ 560 555 smp_wmb(); 561 - ailp->xa_target = threshold_lsn; 556 + xfs_trans_ail_copy_lsn(ailp, &ailp->xa_target, &threshold_lsn); 562 557 if (!test_and_set_bit(XFS_AIL_PUSHING_BIT, &ailp->xa_flags)) 563 558 queue_delayed_work(xfs_syncd_wq, &ailp->xa_work, 0); 564 559 }
+2 -1
include/drm/drm_fb_helper.h
··· 118 118 unsigned transp, 119 119 struct fb_info *info); 120 120 121 + bool drm_fb_helper_restore_fbdev_mode(struct drm_fb_helper *fb_helper); 121 122 void drm_fb_helper_restore(void); 122 123 void drm_fb_helper_fill_var(struct fb_info *info, struct drm_fb_helper *fb_helper, 123 124 uint32_t fb_width, uint32_t fb_height); ··· 127 126 128 127 int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info); 129 128 130 - bool drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper); 129 + int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper); 131 130 bool drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper, int bpp_sel); 132 131 int drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper); 133 132 int drm_fb_helper_debug_enter(struct fb_info *info);
+1 -1
include/drm/drm_mm.h
··· 86 86 } 87 87 #define drm_mm_for_each_node(entry, mm) list_for_each_entry(entry, \ 88 88 &(mm)->head_node.node_list, \ 89 - node_list); 89 + node_list) 90 90 #define drm_mm_for_each_scanned_node_reverse(entry, n, mm) \ 91 91 for (entry = (mm)->prev_scanned_node, \ 92 92 next = entry ? list_entry(entry->node_list.next, \
+5
include/drm/drm_pciids.h
··· 155 155 {0x1002, 0x6719, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAYMAN|RADEON_NEW_MEMMAP}, \ 156 156 {0x1002, 0x671c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAYMAN|RADEON_NEW_MEMMAP}, \ 157 157 {0x1002, 0x671d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAYMAN|RADEON_NEW_MEMMAP}, \ 158 + {0x1002, 0x671f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAYMAN|RADEON_NEW_MEMMAP}, \ 158 159 {0x1002, 0x6720, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BARTS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ 159 160 {0x1002, 0x6721, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BARTS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ 160 161 {0x1002, 0x6722, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BARTS|RADEON_NEW_MEMMAP}, \ ··· 168 167 {0x1002, 0x6729, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BARTS|RADEON_NEW_MEMMAP}, \ 169 168 {0x1002, 0x6738, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BARTS|RADEON_NEW_MEMMAP}, \ 170 169 {0x1002, 0x6739, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BARTS|RADEON_NEW_MEMMAP}, \ 170 + {0x1002, 0x673e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BARTS|RADEON_NEW_MEMMAP}, \ 171 171 {0x1002, 0x6740, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ 172 172 {0x1002, 0x6741, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ 173 173 {0x1002, 0x6742, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ ··· 201 199 {0x1002, 0x688D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_NEW_MEMMAP}, \ 202 200 {0x1002, 0x6898, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_NEW_MEMMAP}, \ 203 201 {0x1002, 0x6899, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_NEW_MEMMAP}, \ 202 + {0x1002, 0x689b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_NEW_MEMMAP}, \ 204 203 {0x1002, 0x689c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HEMLOCK|RADEON_NEW_MEMMAP}, \ 205 204 {0x1002, 0x689d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HEMLOCK|RADEON_NEW_MEMMAP}, \ 206 205 {0x1002, 0x689e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_NEW_MEMMAP}, \ ··· 212 209 {0x1002, 0x68b0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_JUNIPER|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ 213 210 {0x1002, 0x68b8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_JUNIPER|RADEON_NEW_MEMMAP}, \ 214 211 {0x1002, 0x68b9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_JUNIPER|RADEON_NEW_MEMMAP}, \ 212 + {0x1002, 0x68ba, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_JUNIPER|RADEON_NEW_MEMMAP}, \ 215 213 {0x1002, 0x68be, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_JUNIPER|RADEON_NEW_MEMMAP}, \ 214 + {0x1002, 0x68bf, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_JUNIPER|RADEON_NEW_MEMMAP}, \ 216 215 {0x1002, 0x68c0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_REDWOOD|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ 217 216 {0x1002, 0x68c1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_REDWOOD|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ 218 217 {0x1002, 0x68c7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_REDWOOD|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+2
include/drm/radeon_drm.h
··· 909 909 #define RADEON_INFO_WANT_CMASK 0x08 /* get access to CMASK on r300 */ 910 910 #define RADEON_INFO_CLOCK_CRYSTAL_FREQ 0x09 /* clock crystal frequency */ 911 911 #define RADEON_INFO_NUM_BACKENDS 0x0a /* DB/backends for r600+ - need for OQ */ 912 + #define RADEON_INFO_NUM_TILE_PIPES 0x0b /* tile pipes for r600+ */ 913 + #define RADEON_INFO_FUSION_GART_WORKING 0x0c /* fusion writes to GTT were broken before this */ 912 914 913 915 struct drm_radeon_info { 914 916 uint32_t request;
+4 -4
include/linux/bit_spinlock.h
··· 23 23 preempt_disable(); 24 24 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) 25 25 while (unlikely(test_and_set_bit_lock(bitnum, addr))) { 26 - while (test_bit(bitnum, addr)) { 27 - preempt_enable(); 26 + preempt_enable(); 27 + do { 28 28 cpu_relax(); 29 - preempt_disable(); 30 - } 29 + } while (test_bit(bitnum, addr)); 30 + preempt_disable(); 31 31 } 32 32 #endif 33 33 __acquire(bitlock);
+13 -13
include/linux/blkdev.h
··· 390 390 #define QUEUE_FLAG_SYNCFULL 3 /* read queue has been filled */ 391 391 #define QUEUE_FLAG_ASYNCFULL 4 /* write queue has been filled */ 392 392 #define QUEUE_FLAG_DEAD 5 /* queue being torn down */ 393 - #define QUEUE_FLAG_REENTER 6 /* Re-entrancy avoidance */ 394 - #define QUEUE_FLAG_ELVSWITCH 7 /* don't use elevator, just do FIFO */ 395 - #define QUEUE_FLAG_BIDI 8 /* queue supports bidi requests */ 396 - #define QUEUE_FLAG_NOMERGES 9 /* disable merge attempts */ 397 - #define QUEUE_FLAG_SAME_COMP 10 /* force complete on same CPU */ 398 - #define QUEUE_FLAG_FAIL_IO 11 /* fake timeout */ 399 - #define QUEUE_FLAG_STACKABLE 12 /* supports request stacking */ 400 - #define QUEUE_FLAG_NONROT 13 /* non-rotational device (SSD) */ 393 + #define QUEUE_FLAG_ELVSWITCH 6 /* don't use elevator, just do FIFO */ 394 + #define QUEUE_FLAG_BIDI 7 /* queue supports bidi requests */ 395 + #define QUEUE_FLAG_NOMERGES 8 /* disable merge attempts */ 396 + #define QUEUE_FLAG_SAME_COMP 9 /* force complete on same CPU */ 397 + #define QUEUE_FLAG_FAIL_IO 10 /* fake timeout */ 398 + #define QUEUE_FLAG_STACKABLE 11 /* supports request stacking */ 399 + #define QUEUE_FLAG_NONROT 12 /* non-rotational device (SSD) */ 401 400 #define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */ 402 - #define QUEUE_FLAG_IO_STAT 15 /* do IO stats */ 403 - #define QUEUE_FLAG_DISCARD 16 /* supports DISCARD */ 404 - #define QUEUE_FLAG_NOXMERGES 17 /* No extended merges */ 405 - #define QUEUE_FLAG_ADD_RANDOM 18 /* Contributes to random pool */ 406 - #define QUEUE_FLAG_SECDISCARD 19 /* supports SECDISCARD */ 401 + #define QUEUE_FLAG_IO_STAT 13 /* do IO stats */ 402 + #define QUEUE_FLAG_DISCARD 14 /* supports DISCARD */ 403 + #define QUEUE_FLAG_NOXMERGES 15 /* No extended merges */ 404 + #define QUEUE_FLAG_ADD_RANDOM 16 /* Contributes to random pool */ 405 + #define QUEUE_FLAG_SECDISCARD 17 /* supports SECDISCARD */ 407 406 408 407 #define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ 409 408 (1 << QUEUE_FLAG_STACKABLE) | \ ··· 700 701 extern void __blk_stop_queue(struct request_queue *q); 701 702 extern void __blk_run_queue(struct request_queue *q); 702 703 extern void blk_run_queue(struct request_queue *); 704 + extern void blk_run_queue_async(struct request_queue *q); 703 705 extern int blk_rq_map_user(struct request_queue *, struct request *, 704 706 struct rq_map_data *, void __user *, unsigned long, 705 707 gfp_t);
+2
include/linux/bootmem.h
··· 111 111 __alloc_bootmem_nopanic(x, PAGE_SIZE, __pa(MAX_DMA_ADDRESS)) 112 112 #define alloc_bootmem_node(pgdat, x) \ 113 113 __alloc_bootmem_node(pgdat, x, SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS)) 114 + #define alloc_bootmem_node_nopanic(pgdat, x) \ 115 + __alloc_bootmem_node_nopanic(pgdat, x, SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS)) 114 116 #define alloc_bootmem_pages_node(pgdat, x) \ 115 117 __alloc_bootmem_node(pgdat, x, PAGE_SIZE, __pa(MAX_DMA_ADDRESS)) 116 118 #define alloc_bootmem_pages_node_nopanic(pgdat, x) \
+1 -12
include/linux/capability.h
··· 546 546 extern bool capable(int cap); 547 547 extern bool ns_capable(struct user_namespace *ns, int cap); 548 548 extern bool task_ns_capable(struct task_struct *t, int cap); 549 - 550 - /** 551 - * nsown_capable - Check superior capability to one's own user_ns 552 - * @cap: The capability in question 553 - * 554 - * Return true if the current task has the given superior capability 555 - * targeted at its own user namespace. 556 - */ 557 - static inline bool nsown_capable(int cap) 558 - { 559 - return ns_capable(current_user_ns(), cap); 560 - } 549 + extern bool nsown_capable(int cap); 561 550 562 551 /* audit system wants to get cap info from files as well */ 563 552 extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps);
+8 -2
include/linux/cred.h
··· 146 146 void *security; /* subjective LSM security */ 147 147 #endif 148 148 struct user_struct *user; /* real user ID subscription */ 149 + struct user_namespace *user_ns; /* cached user->user_ns */ 149 150 struct group_info *group_info; /* supplementary groups for euid/fsgid */ 150 151 struct rcu_head rcu; /* RCU deletion hook */ 151 152 }; ··· 355 354 #define current_fsgid() (current_cred_xxx(fsgid)) 356 355 #define current_cap() (current_cred_xxx(cap_effective)) 357 356 #define current_user() (current_cred_xxx(user)) 358 - #define _current_user_ns() (current_cred_xxx(user)->user_ns) 359 357 #define current_security() (current_cred_xxx(security)) 360 358 361 - extern struct user_namespace *current_user_ns(void); 359 + #ifdef CONFIG_USER_NS 360 + #define current_user_ns() (current_cred_xxx(user_ns)) 361 + #else 362 + extern struct user_namespace init_user_ns; 363 + #define current_user_ns() (&init_user_ns) 364 + #endif 365 + 362 366 363 367 #define current_uid_gid(_uid, _gid) \ 364 368 do { \
+2 -2
include/linux/dcache.h
··· 197 197 * typically using d_splice_alias. */ 198 198 199 199 #define DCACHE_REFERENCED 0x0008 /* Recently used, don't discard. */ 200 - #define DCACHE_UNHASHED 0x0010 200 + #define DCACHE_RCUACCESS 0x0010 /* Entry has ever been RCU-visible */ 201 201 #define DCACHE_INOTIFY_PARENT_WATCHED 0x0020 202 202 /* Parent inode is watched by inotify */ 203 203 ··· 384 384 385 385 static inline int d_unhashed(struct dentry *dentry) 386 386 { 387 - return (dentry->d_flags & DCACHE_UNHASHED); 387 + return hlist_bl_unhashed(&dentry->d_hash); 388 388 } 389 389 390 390 static inline int d_unlinked(struct dentry *dentry)
-1
include/linux/device.h
··· 442 442 struct dev_archdata archdata; 443 443 444 444 struct device_node *of_node; /* associated device tree node */ 445 - const struct of_device_id *of_match; /* matching of_device_id from driver */ 446 445 447 446 dev_t devt; /* dev_t, creates the sysfs "dev" */ 448 447
+1
include/linux/fb.h
··· 832 832 #define FBINFO_CAN_FORCE_OUTPUT 0x200000 833 833 834 834 struct fb_info { 835 + atomic_t count; 835 836 int node; 836 837 int flags; 837 838 struct mutex lock; /* Lock for open/release/ioctl funcs */
+1 -1
include/linux/flex_array.h
··· 61 61 struct flex_array *flex_array_alloc(int element_size, unsigned int total, 62 62 gfp_t flags); 63 63 int flex_array_prealloc(struct flex_array *fa, unsigned int start, 64 - unsigned int end, gfp_t flags); 64 + unsigned int nr_elements, gfp_t flags); 65 65 void flex_array_free(struct flex_array *fa); 66 66 void flex_array_free_parts(struct flex_array *fa); 67 67 int flex_array_put(struct flex_array *fa, unsigned int element_nr, void *src,
-1
include/linux/fs.h
··· 358 358 #define FS_EXTENT_FL 0x00080000 /* Extents */ 359 359 #define FS_DIRECTIO_FL 0x00100000 /* Use direct i/o */ 360 360 #define FS_NOCOW_FL 0x00800000 /* Do not cow file */ 361 - #define FS_COW_FL 0x02000000 /* Cow file */ 362 361 #define FS_RESERVED_FL 0x80000000 /* reserved for ext2 lib */ 363 362 364 363 #define FS_FL_USER_VISIBLE 0x0003DFFF /* User visible flags */
+1
include/linux/ftrace_event.h
··· 37 37 unsigned char flags; 38 38 unsigned char preempt_count; 39 39 int pid; 40 + int padding; 40 41 }; 41 42 42 43 #define FTRACE_MAX_EVENT \
+2
include/linux/gfp.h
··· 353 353 354 354 void *alloc_pages_exact(size_t size, gfp_t gfp_mask); 355 355 void free_pages_exact(void *virt, size_t size); 356 + /* This is different from alloc_pages_exact_node !!! */ 357 + void *alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask); 356 358 357 359 #define __get_free_page(gfp_mask) \ 358 360 __get_free_pages((gfp_mask), 0)
+1 -1
include/linux/huge_mm.h
··· 117 117 unsigned long end, 118 118 long adjust_next) 119 119 { 120 - if (!vma->anon_vma || vma->vm_ops || vma->vm_file) 120 + if (!vma->anon_vma || vma->vm_ops) 121 121 return; 122 122 __vma_adjust_trans_huge(vma, start, end, adjust_next); 123 123 }
+1 -2
include/linux/libata.h
··· 137 137 ATA_DFLAG_ACPI_PENDING = (1 << 5), /* ACPI resume action pending */ 138 138 ATA_DFLAG_ACPI_FAILED = (1 << 6), /* ACPI on devcfg has failed */ 139 139 ATA_DFLAG_AN = (1 << 7), /* AN configured */ 140 - ATA_DFLAG_HIPM = (1 << 8), /* device supports HIPM */ 141 - ATA_DFLAG_DIPM = (1 << 9), /* device supports DIPM */ 142 140 ATA_DFLAG_DMADIR = (1 << 10), /* device requires DMADIR */ 143 141 ATA_DFLAG_CFG_MASK = (1 << 12) - 1, 144 142 ··· 196 198 * management */ 197 199 ATA_FLAG_SW_ACTIVITY = (1 << 22), /* driver supports sw activity 198 200 * led */ 201 + ATA_FLAG_NO_DIPM = (1 << 23), /* host not happy with DIPM */ 199 202 200 203 /* bits 24:31 of ap->flags are reserved for LLD specific flags */ 201 204
+11
include/linux/list_bl.h
··· 2 2 #define _LINUX_LIST_BL_H 3 3 4 4 #include <linux/list.h> 5 + #include <linux/bit_spinlock.h> 5 6 6 7 /* 7 8 * Special version of lists, where head of the list has a lock in the lowest ··· 113 112 __hlist_bl_del(n); 114 113 INIT_HLIST_BL_NODE(n); 115 114 } 115 + } 116 + 117 + static inline void hlist_bl_lock(struct hlist_bl_head *b) 118 + { 119 + bit_spin_lock(0, (unsigned long *)b); 120 + } 121 + 122 + static inline void hlist_bl_unlock(struct hlist_bl_head *b) 123 + { 124 + __bit_spin_unlock(0, (unsigned long *)b); 116 125 } 117 126 118 127 /**
+2
include/linux/mfd/wm831x/pdata.h
··· 81 81 int rpu; /** Pen down sensitivity resistor divider */ 82 82 int pressure; /** Report pressure (boolean) */ 83 83 unsigned int data_irq; /** Touch data ready IRQ */ 84 + int data_irqf; /** IRQ flags for data ready IRQ */ 84 85 unsigned int pd_irq; /** Touch pendown detect IRQ */ 86 + int pd_irqf; /** IRQ flags for pen down IRQ */ 85 87 }; 86 88 87 89 enum wm831x_watchdog_action {
+25 -2
include/linux/mm.h
··· 137 137 #define VM_RandomReadHint(v) ((v)->vm_flags & VM_RAND_READ) 138 138 139 139 /* 140 - * special vmas that are non-mergable, non-mlock()able 140 + * Special vmas that are non-mergable, non-mlock()able. 141 + * Note: mm/huge_memory.c VM_NO_THP depends on this definition. 141 142 */ 142 143 #define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_RESERVED | VM_PFNMAP) 143 144 ··· 1011 1010 int clear_page_dirty_for_io(struct page *page); 1012 1011 1013 1012 /* Is the vma a continuation of the stack vma above it? */ 1014 - static inline int vma_stack_continue(struct vm_area_struct *vma, unsigned long addr) 1013 + static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr) 1015 1014 { 1016 1015 return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN); 1016 + } 1017 + 1018 + static inline int stack_guard_page_start(struct vm_area_struct *vma, 1019 + unsigned long addr) 1020 + { 1021 + return (vma->vm_flags & VM_GROWSDOWN) && 1022 + (vma->vm_start == addr) && 1023 + !vma_growsdown(vma->vm_prev, addr); 1024 + } 1025 + 1026 + /* Is the vma a continuation of the stack vma below it? */ 1027 + static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr) 1028 + { 1029 + return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP); 1030 + } 1031 + 1032 + static inline int stack_guard_page_end(struct vm_area_struct *vma, 1033 + unsigned long addr) 1034 + { 1035 + return (vma->vm_flags & VM_GROWSUP) && 1036 + (vma->vm_end == addr) && 1037 + !vma_growsup(vma->vm_next, addr); 1017 1038 } 1018 1039 1019 1040 extern unsigned long move_page_tables(struct vm_area_struct *vma,
+1
include/linux/nfs_fs_sb.h
··· 47 47 48 48 #ifdef CONFIG_NFS_V4 49 49 u64 cl_clientid; /* constant */ 50 + nfs4_verifier cl_confirm; /* Clientid verifier */ 50 51 unsigned long cl_state; 51 52 52 53 spinlock_t cl_lock;
+3
include/linux/nfs_xdr.h
··· 50 50 } du; 51 51 struct nfs_fsid fsid; 52 52 __u64 fileid; 53 + __u64 mounted_on_fileid; 53 54 struct timespec atime; 54 55 struct timespec mtime; 55 56 struct timespec ctime; ··· 84 83 #define NFS_ATTR_FATTR_PRECHANGE (1U << 18) 85 84 #define NFS_ATTR_FATTR_V4_REFERRAL (1U << 19) /* NFSv4 referral */ 86 85 #define NFS_ATTR_FATTR_MOUNTPOINT (1U << 20) /* Treat as mountpoint */ 86 + #define NFS_ATTR_FATTR_MOUNTED_ON_FILEID (1U << 21) 87 87 88 88 #define NFS_ATTR_FATTR (NFS_ATTR_FATTR_TYPE \ 89 89 | NFS_ATTR_FATTR_MODE \ ··· 233 231 struct nfs4_layoutget_args args; 234 232 struct nfs4_layoutget_res res; 235 233 struct pnfs_layout_segment **lsegpp; 234 + gfp_t gfp_flags; 236 235 }; 237 236 238 237 struct nfs4_getdeviceinfo_args {
+6 -2
include/linux/of_device.h
··· 21 21 static inline int of_driver_match_device(struct device *dev, 22 22 const struct device_driver *drv) 23 23 { 24 - dev->of_match = of_match_device(drv->of_match_table, dev); 25 - return dev->of_match != NULL; 24 + return of_match_device(drv->of_match_table, dev) != NULL; 26 25 } 27 26 28 27 extern struct platform_device *of_dev_get(struct platform_device *dev); ··· 57 58 58 59 static inline void of_device_node_put(struct device *dev) { } 59 60 61 + static inline const struct of_device_id *of_match_device( 62 + const struct of_device_id *matches, const struct device *dev) 63 + { 64 + return NULL; 65 + } 60 66 #endif /* CONFIG_OF_DEVICE */ 61 67 62 68 #endif /* _LINUX_OF_DEVICE_H */
-4
include/linux/pci_ids.h
··· 2477 2477 #define PCI_DEVICE_ID_INTEL_82840_HB 0x1a21 2478 2478 #define PCI_DEVICE_ID_INTEL_82845_HB 0x1a30 2479 2479 #define PCI_DEVICE_ID_INTEL_IOAT 0x1a38 2480 - #define PCI_DEVICE_ID_INTEL_COUGARPOINT_SMBUS 0x1c22 2481 2480 #define PCI_DEVICE_ID_INTEL_COUGARPOINT_LPC_MIN 0x1c41 2482 2481 #define PCI_DEVICE_ID_INTEL_COUGARPOINT_LPC_MAX 0x1c5f 2483 - #define PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS 0x1d22 2484 2482 #define PCI_DEVICE_ID_INTEL_PATSBURG_LPC_0 0x1d40 2485 2483 #define PCI_DEVICE_ID_INTEL_PATSBURG_LPC_1 0x1d41 2486 2484 #define PCI_DEVICE_ID_INTEL_DH89XXCC_LPC_MIN 0x2310 2487 2485 #define PCI_DEVICE_ID_INTEL_DH89XXCC_LPC_MAX 0x231f 2488 - #define PCI_DEVICE_ID_INTEL_DH89XXCC_SMBUS 0x2330 2489 2486 #define PCI_DEVICE_ID_INTEL_82801AA_0 0x2410 2490 2487 #define PCI_DEVICE_ID_INTEL_82801AA_1 0x2411 2491 2488 #define PCI_DEVICE_ID_INTEL_82801AA_3 0x2413 ··· 2693 2696 #define PCI_DEVICE_ID_INTEL_ICH10_5 0x3a60 2694 2697 #define PCI_DEVICE_ID_INTEL_5_3400_SERIES_LPC_MIN 0x3b00 2695 2698 #define PCI_DEVICE_ID_INTEL_5_3400_SERIES_LPC_MAX 0x3b1f 2696 - #define PCI_DEVICE_ID_INTEL_5_3400_SERIES_SMBUS 0x3b30 2697 2699 #define PCI_DEVICE_ID_INTEL_IOAT_SNB 0x402f 2698 2700 #define PCI_DEVICE_ID_INTEL_5100_16 0x65f0 2699 2701 #define PCI_DEVICE_ID_INTEL_5100_21 0x65f5
+1 -1
include/linux/percpu.h
··· 948 948 irqsafe_generic_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) 949 949 # endif 950 950 # define irqsafe_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \ 951 - __pcpu_double_call_return_int(irqsafe_cpu_cmpxchg_double_, (pcp1), (pcp2), (oval1), (oval2), (nval1), (nval2)) 951 + __pcpu_double_call_return_bool(irqsafe_cpu_cmpxchg_double_, (pcp1), (pcp2), (oval1), (oval2), (nval1), (nval2)) 952 952 #endif 953 953 954 954 #endif /* __LINUX_PERCPU_H */
+3 -2
include/linux/posix-clock.h
··· 24 24 #include <linux/fs.h> 25 25 #include <linux/poll.h> 26 26 #include <linux/posix-timers.h> 27 + #include <linux/rwsem.h> 27 28 28 29 struct posix_clock; 29 30 ··· 105 104 * @ops: Functional interface to the clock 106 105 * @cdev: Character device instance for this clock 107 106 * @kref: Reference count. 108 - * @mutex: Protects the 'zombie' field from concurrent access. 107 + * @rwsem: Protects the 'zombie' field from concurrent access. 109 108 * @zombie: If 'zombie' is true, then the hardware has disappeared. 110 109 * @release: A function to free the structure when the reference count reaches 111 110 * zero. May be NULL if structure is statically allocated. ··· 118 117 struct posix_clock_operations ops; 119 118 struct cdev cdev; 120 119 struct kref kref; 121 - struct mutex mutex; 120 + struct rw_semaphore rwsem; 122 121 bool zombie; 123 122 void (*release)(struct posix_clock *clk); 124 123 };
+2
include/linux/proc_fs.h
··· 208 208 struct proc_dir_entry *parent,const char *dest) {return NULL;} 209 209 static inline struct proc_dir_entry *proc_mkdir(const char *name, 210 210 struct proc_dir_entry *parent) {return NULL;} 211 + static inline struct proc_dir_entry *proc_mkdir_mode(const char *name, 212 + mode_t mode, struct proc_dir_entry *parent) { return NULL; } 211 213 212 214 static inline struct proc_dir_entry *create_proc_read_entry(const char *name, 213 215 mode_t mode, struct proc_dir_entry *base,
+12 -1
include/linux/ptrace.h
··· 189 189 child->ptrace = current->ptrace; 190 190 __ptrace_link(child, current->parent); 191 191 } 192 + 193 + #ifdef CONFIG_HAVE_HW_BREAKPOINT 194 + atomic_set(&child->ptrace_bp_refcnt, 1); 195 + #endif 192 196 } 193 197 194 198 /** ··· 354 350 unsigned long args[6], unsigned int maxargs, 355 351 unsigned long *sp, unsigned long *pc); 356 352 357 - #endif 353 + #ifdef CONFIG_HAVE_HW_BREAKPOINT 354 + extern int ptrace_get_breakpoints(struct task_struct *tsk); 355 + extern void ptrace_put_breakpoints(struct task_struct *tsk); 356 + #else 357 + static inline void ptrace_put_breakpoints(struct task_struct *tsk) { } 358 + #endif /* CONFIG_HAVE_HW_BREAKPOINT */ 359 + 360 + #endif /* __KERNEL */ 358 361 359 362 #endif
+3
include/linux/sched.h
··· 1537 1537 unsigned long memsw_nr_pages; /* uncharged mem+swap usage */ 1538 1538 } memcg_batch; 1539 1539 #endif 1540 + #ifdef CONFIG_HAVE_HW_BREAKPOINT 1541 + atomic_t ptrace_bp_refcnt; 1542 + #endif 1540 1543 }; 1541 1544 1542 1545 /* Future-safe accessor for struct task_struct's cpus_allowed. */
+1 -1
include/linux/security.h
··· 1456 1456 struct inode *new_dir, struct dentry *new_dentry); 1457 1457 int (*inode_readlink) (struct dentry *dentry); 1458 1458 int (*inode_follow_link) (struct dentry *dentry, struct nameidata *nd); 1459 - int (*inode_permission) (struct inode *inode, int mask); 1459 + int (*inode_permission) (struct inode *inode, int mask, unsigned flags); 1460 1460 int (*inode_setattr) (struct dentry *dentry, struct iattr *attr); 1461 1461 int (*inode_getattr) (struct vfsmount *mnt, struct dentry *dentry); 1462 1462 int (*inode_setxattr) (struct dentry *dentry, const char *name,
+4 -1
include/linux/sunrpc/sched.h
··· 127 127 #define RPC_TASK_KILLED 0x0100 /* task was killed */ 128 128 #define RPC_TASK_SOFT 0x0200 /* Use soft timeouts */ 129 129 #define RPC_TASK_SOFTCONN 0x0400 /* Fail if can't connect */ 130 + #define RPC_TASK_SENT 0x0800 /* message was sent */ 131 + #define RPC_TASK_TIMEOUT 0x1000 /* fail with ETIMEDOUT on timeout */ 130 132 131 133 #define RPC_IS_ASYNC(t) ((t)->tk_flags & RPC_TASK_ASYNC) 132 134 #define RPC_IS_SWAPPER(t) ((t)->tk_flags & RPC_TASK_SWAPPER) 133 135 #define RPC_DO_ROOTOVERRIDE(t) ((t)->tk_flags & RPC_TASK_ROOTCREDS) 134 136 #define RPC_ASSASSINATED(t) ((t)->tk_flags & RPC_TASK_KILLED) 135 - #define RPC_IS_SOFT(t) ((t)->tk_flags & RPC_TASK_SOFT) 137 + #define RPC_IS_SOFT(t) ((t)->tk_flags & (RPC_TASK_SOFT|RPC_TASK_TIMEOUT)) 136 138 #define RPC_IS_SOFTCONN(t) ((t)->tk_flags & RPC_TASK_SOFTCONN) 139 + #define RPC_WAS_SENT(t) ((t)->tk_flags & RPC_TASK_SENT) 137 140 138 141 #define RPC_TASK_RUNNING 0 139 142 #define RPC_TASK_QUEUED 1
+3 -2
include/linux/usb/usbnet.h
··· 68 68 # define EVENT_RX_PAUSED 5 69 69 # define EVENT_DEV_WAKING 6 70 70 # define EVENT_DEV_ASLEEP 7 71 + # define EVENT_DEV_OPEN 8 71 72 }; 72 73 73 74 static inline struct usb_driver *driver_of(struct usb_interface *intf) ··· 104 103 * Indicates to usbnet, that USB driver accumulates multiple IP packets. 105 104 * Affects statistic (counters) and short packet handling. 106 105 */ 107 - #define FLAG_MULTI_PACKET 0x1000 108 - #define FLAG_RX_ASSEMBLE 0x2000 /* rx packets may span >1 frames */ 106 + #define FLAG_MULTI_PACKET 0x2000 107 + #define FLAG_RX_ASSEMBLE 0x4000 /* rx packets may span >1 frames */ 109 108 110 109 /* init device ... can sleep, or cause probe() failure */ 111 110 int (*bind)(struct usbnet *, struct usb_interface *);
+5 -2
include/linux/v4l2-mediabus.h
··· 47 47 V4L2_MBUS_FMT_RGB565_2X8_BE = 0x1007, 48 48 V4L2_MBUS_FMT_RGB565_2X8_LE = 0x1008, 49 49 50 - /* YUV (including grey) - next is 0x2013 */ 50 + /* YUV (including grey) - next is 0x2014 */ 51 51 V4L2_MBUS_FMT_Y8_1X8 = 0x2001, 52 52 V4L2_MBUS_FMT_UYVY8_1_5X8 = 0x2002, 53 53 V4L2_MBUS_FMT_VYUY8_1_5X8 = 0x2003, ··· 60 60 V4L2_MBUS_FMT_Y10_1X10 = 0x200a, 61 61 V4L2_MBUS_FMT_YUYV10_2X10 = 0x200b, 62 62 V4L2_MBUS_FMT_YVYU10_2X10 = 0x200c, 63 + V4L2_MBUS_FMT_Y12_1X12 = 0x2013, 63 64 V4L2_MBUS_FMT_UYVY8_1X16 = 0x200f, 64 65 V4L2_MBUS_FMT_VYUY8_1X16 = 0x2010, 65 66 V4L2_MBUS_FMT_YUYV8_1X16 = 0x2011, ··· 68 67 V4L2_MBUS_FMT_YUYV10_1X20 = 0x200d, 69 68 V4L2_MBUS_FMT_YVYU10_1X20 = 0x200e, 70 69 71 - /* Bayer - next is 0x3013 */ 70 + /* Bayer - next is 0x3015 */ 72 71 V4L2_MBUS_FMT_SBGGR8_1X8 = 0x3001, 72 + V4L2_MBUS_FMT_SGBRG8_1X8 = 0x3013, 73 73 V4L2_MBUS_FMT_SGRBG8_1X8 = 0x3002, 74 + V4L2_MBUS_FMT_SRGGB8_1X8 = 0x3014, 74 75 V4L2_MBUS_FMT_SBGGR10_DPCM8_1X8 = 0x300b, 75 76 V4L2_MBUS_FMT_SGBRG10_DPCM8_1X8 = 0x300c, 76 77 V4L2_MBUS_FMT_SGRBG10_DPCM8_1X8 = 0x3009,
+1
include/linux/videodev2.h
··· 308 308 #define V4L2_PIX_FMT_Y4 v4l2_fourcc('Y', '0', '4', ' ') /* 4 Greyscale */ 309 309 #define V4L2_PIX_FMT_Y6 v4l2_fourcc('Y', '0', '6', ' ') /* 6 Greyscale */ 310 310 #define V4L2_PIX_FMT_Y10 v4l2_fourcc('Y', '1', '0', ' ') /* 10 Greyscale */ 311 + #define V4L2_PIX_FMT_Y12 v4l2_fourcc('Y', '1', '2', ' ') /* 12 Greyscale */ 311 312 #define V4L2_PIX_FMT_Y16 v4l2_fourcc('Y', '1', '6', ' ') /* 16 Greyscale */ 312 313 313 314 /* Palette formats */
+1 -1
include/media/v4l2-device.h
··· 163 163 ({ \ 164 164 struct v4l2_subdev *__sd; \ 165 165 __v4l2_device_call_subdevs_until_err_p(v4l2_dev, __sd, cond, o, \ 166 - f, args...); \ 166 + f , ##args); \ 167 167 }) 168 168 169 169 /* Call the specified callback for all subdevs matching grp_id (if 0, then
+13 -3
include/net/inet_ecn.h
··· 38 38 return outer; 39 39 } 40 40 41 - #define INET_ECN_xmit(sk) do { inet_sk(sk)->tos |= INET_ECN_ECT_0; } while (0) 42 - #define INET_ECN_dontxmit(sk) \ 43 - do { inet_sk(sk)->tos &= ~INET_ECN_MASK; } while (0) 41 + static inline void INET_ECN_xmit(struct sock *sk) 42 + { 43 + inet_sk(sk)->tos |= INET_ECN_ECT_0; 44 + if (inet6_sk(sk) != NULL) 45 + inet6_sk(sk)->tclass |= INET_ECN_ECT_0; 46 + } 47 + 48 + static inline void INET_ECN_dontxmit(struct sock *sk) 49 + { 50 + inet_sk(sk)->tos &= ~INET_ECN_MASK; 51 + if (inet6_sk(sk) != NULL) 52 + inet6_sk(sk)->tclass &= ~INET_ECN_MASK; 53 + } 44 54 45 55 #define IP6_ECN_flow_init(label) do { \ 46 56 (label) &= ~htonl(INET_ECN_MASK << 20); \
+17
include/net/ip_vs.h
··· 791 791 /* IPVS in network namespace */ 792 792 struct netns_ipvs { 793 793 int gen; /* Generation */ 794 + int enable; /* enable like nf_hooks do */ 794 795 /* 795 796 * Hash table: for real service lookups 796 797 */ ··· 1090 1089 atomic_inc(&ctl_cp->n_control); 1091 1090 } 1092 1091 1092 + /* 1093 + * IPVS netns init & cleanup functions 1094 + */ 1095 + extern int __ip_vs_estimator_init(struct net *net); 1096 + extern int __ip_vs_control_init(struct net *net); 1097 + extern int __ip_vs_protocol_init(struct net *net); 1098 + extern int __ip_vs_app_init(struct net *net); 1099 + extern int __ip_vs_conn_init(struct net *net); 1100 + extern int __ip_vs_sync_init(struct net *net); 1101 + extern void __ip_vs_conn_cleanup(struct net *net); 1102 + extern void __ip_vs_app_cleanup(struct net *net); 1103 + extern void __ip_vs_protocol_cleanup(struct net *net); 1104 + extern void __ip_vs_control_cleanup(struct net *net); 1105 + extern void __ip_vs_estimator_cleanup(struct net *net); 1106 + extern void __ip_vs_sync_cleanup(struct net *net); 1107 + extern void __ip_vs_service_cleanup(struct net *net); 1093 1108 1094 1109 /* 1095 1110 * IPVS application functions
+4 -4
include/net/llc_pdu.h
··· 199 199 u8 ssap; 200 200 u8 ctrl_1; 201 201 u8 ctrl_2; 202 - }; 202 + } __packed; 203 203 204 204 static inline struct llc_pdu_sn *llc_pdu_sn_hdr(struct sk_buff *skb) 205 205 { ··· 211 211 u8 dsap; 212 212 u8 ssap; 213 213 u8 ctrl_1; 214 - }; 214 + } __packed; 215 215 216 216 static inline struct llc_pdu_un *llc_pdu_un_hdr(struct sk_buff *skb) 217 217 { ··· 359 359 u8 fmt_id; /* always 0x81 for LLC */ 360 360 u8 type; /* different if NULL/non-NULL LSAP */ 361 361 u8 rw; /* sender receive window */ 362 - }; 362 + } __packed; 363 363 364 364 /** 365 365 * llc_pdu_init_as_xid_cmd - sets bytes 3, 4 & 5 of LLC header as XID ··· 415 415 u8 curr_ssv; /* current send state variable val */ 416 416 u8 curr_rsv; /* current receive state variable */ 417 417 u8 ind_bits; /* indicator bits set with macro */ 418 - }; 418 + } __packed; 419 419 420 420 extern void llc_pdu_set_cmd_rsp(struct sk_buff *skb, u8 type); 421 421 extern void llc_pdu_set_pf_bit(struct sk_buff *skb, u8 bit_value);
+3
include/net/xfrm.h
··· 324 324 int (*tmpl_sort)(struct xfrm_tmpl **dst, struct xfrm_tmpl **src, int n); 325 325 int (*state_sort)(struct xfrm_state **dst, struct xfrm_state **src, int n); 326 326 int (*output)(struct sk_buff *skb); 327 + int (*output_finish)(struct sk_buff *skb); 327 328 int (*extract_input)(struct xfrm_state *x, 328 329 struct sk_buff *skb); 329 330 int (*extract_output)(struct xfrm_state *x, ··· 1455 1454 extern int xfrm4_extract_output(struct xfrm_state *x, struct sk_buff *skb); 1456 1455 extern int xfrm4_prepare_output(struct xfrm_state *x, struct sk_buff *skb); 1457 1456 extern int xfrm4_output(struct sk_buff *skb); 1457 + extern int xfrm4_output_finish(struct sk_buff *skb); 1458 1458 extern int xfrm4_tunnel_register(struct xfrm_tunnel *handler, unsigned short family); 1459 1459 extern int xfrm4_tunnel_deregister(struct xfrm_tunnel *handler, unsigned short family); 1460 1460 extern int xfrm6_extract_header(struct sk_buff *skb); ··· 1472 1470 extern int xfrm6_extract_output(struct xfrm_state *x, struct sk_buff *skb); 1473 1471 extern int xfrm6_prepare_output(struct xfrm_state *x, struct sk_buff *skb); 1474 1472 extern int xfrm6_output(struct sk_buff *skb); 1473 + extern int xfrm6_output_finish(struct sk_buff *skb); 1475 1474 extern int xfrm6_find_1stfragopt(struct xfrm_state *x, struct sk_buff *skb, 1476 1475 u8 **prevhdr); 1477 1476
+1
include/scsi/scsi_device.h
··· 169 169 sdev_dev; 170 170 171 171 struct execute_work ew; /* used to get process context on put */ 172 + struct work_struct requeue_work; 172 173 173 174 struct scsi_dh_data *scsi_dh_data; 174 175 enum scsi_device_state sdev_state;
+5 -1
include/trace/events/gfpflags.h
··· 10 10 */ 11 11 #define show_gfp_flags(flags) \ 12 12 (flags) ? __print_flags(flags, "|", \ 13 + {(unsigned long)GFP_TRANSHUGE, "GFP_TRANSHUGE"}, \ 13 14 {(unsigned long)GFP_HIGHUSER_MOVABLE, "GFP_HIGHUSER_MOVABLE"}, \ 14 15 {(unsigned long)GFP_HIGHUSER, "GFP_HIGHUSER"}, \ 15 16 {(unsigned long)GFP_USER, "GFP_USER"}, \ ··· 33 32 {(unsigned long)__GFP_HARDWALL, "GFP_HARDWALL"}, \ 34 33 {(unsigned long)__GFP_THISNODE, "GFP_THISNODE"}, \ 35 34 {(unsigned long)__GFP_RECLAIMABLE, "GFP_RECLAIMABLE"}, \ 36 - {(unsigned long)__GFP_MOVABLE, "GFP_MOVABLE"} \ 35 + {(unsigned long)__GFP_MOVABLE, "GFP_MOVABLE"}, \ 36 + {(unsigned long)__GFP_NOTRACK, "GFP_NOTRACK"}, \ 37 + {(unsigned long)__GFP_NO_KSWAPD, "GFP_NO_KSWAPD"}, \ 38 + {(unsigned long)__GFP_OTHER_NODE, "GFP_OTHER_NODE"} \ 37 39 ) : "GFP_NOWAIT" 38 40
+8 -8
init/Kconfig
··· 924 924 environments which can tolerate a "non-standard" kernel. 925 925 Only use this if you really know what you are doing. 926 926 927 - config EMBEDDED 928 - bool "Embedded system" 929 - select EXPERT 930 - help 931 - This option should be enabled if compiling the kernel for 932 - an embedded system so certain expert options are available 933 - for configuration. 934 - 935 927 config UID16 936 928 bool "Enable 16-bit UID system calls" if EXPERT 937 929 depends on ARM || BLACKFIN || CRIS || FRV || H8300 || X86_32 || M68K || (S390 && !64BIT) || SUPERH || SPARC32 || (SPARC64 && COMPAT) || UML || (X86_64 && IA32_EMULATION) ··· 1095 1103 This option enables POSIX asynchronous I/O which may by used 1096 1104 by some high performance threaded applications. Disabling 1097 1105 this option saves about 7k. 1106 + 1107 + config EMBEDDED 1108 + bool "Embedded system" 1109 + select EXPERT 1110 + help 1111 + This option should be enabled if compiling the kernel for 1112 + an embedded system so certain expert options are available 1113 + for configuration. 1098 1114 1099 1115 config HAVE_PERF_EVENTS 1100 1116 bool
+12
kernel/capability.c
··· 399 399 return ns_capable(task_cred_xxx(t, user)->user_ns, cap); 400 400 } 401 401 EXPORT_SYMBOL(task_ns_capable); 402 + 403 + /** 404 + * nsown_capable - Check superior capability to one's own user_ns 405 + * @cap: The capability in question 406 + * 407 + * Return true if the current task has the given superior capability 408 + * targeted at its own user namespace. 409 + */ 410 + bool nsown_capable(int cap) 411 + { 412 + return ns_capable(current_user_ns(), cap); 413 + }
+6 -6
kernel/cred.c
··· 54 54 .cap_effective = CAP_INIT_EFF_SET, 55 55 .cap_bset = CAP_INIT_BSET, 56 56 .user = INIT_USER, 57 + .user_ns = &init_user_ns, 57 58 .group_info = &init_groups, 58 59 #ifdef CONFIG_KEYS 59 60 .tgcred = &init_tgcred, ··· 411 410 goto error_put; 412 411 } 413 412 413 + /* cache user_ns in cred. Doesn't need a refcount because it will 414 + * stay pinned by cred->user 415 + */ 416 + new->user_ns = new->user->user_ns; 417 + 414 418 #ifdef CONFIG_KEYS 415 419 /* new threads get their own thread keyrings if their parent already 416 420 * had one */ ··· 746 740 return security_kernel_create_files_as(new, inode); 747 741 } 748 742 EXPORT_SYMBOL(set_create_files_as); 749 - 750 - struct user_namespace *current_user_ns(void) 751 - { 752 - return _current_user_ns(); 753 - } 754 - EXPORT_SYMBOL(current_user_ns); 755 743 756 744 #ifdef CONFIG_DEBUG_CREDENTIALS 757 745
+1 -1
kernel/exit.c
··· 1016 1016 /* 1017 1017 * FIXME: do that only when needed, using sched_exit tracepoint 1018 1018 */ 1019 - flush_ptrace_hw_breakpoint(tsk); 1019 + ptrace_put_breakpoints(tsk); 1020 1020 1021 1021 exit_notify(tsk, group_dead); 1022 1022 #ifdef CONFIG_NUMA
+5 -5
kernel/hrtimer.c
··· 81 81 } 82 82 }; 83 83 84 - static int hrtimer_clock_to_base_table[MAX_CLOCKS]; 84 + static int hrtimer_clock_to_base_table[MAX_CLOCKS] = { 85 + [CLOCK_REALTIME] = HRTIMER_BASE_REALTIME, 86 + [CLOCK_MONOTONIC] = HRTIMER_BASE_MONOTONIC, 87 + [CLOCK_BOOTTIME] = HRTIMER_BASE_BOOTTIME, 88 + }; 85 89 86 90 static inline int hrtimer_clockid_to_base(clockid_t clock_id) 87 91 { ··· 1726 1722 1727 1723 void __init hrtimers_init(void) 1728 1724 { 1729 - hrtimer_clock_to_base_table[CLOCK_REALTIME] = HRTIMER_BASE_REALTIME; 1730 - hrtimer_clock_to_base_table[CLOCK_MONOTONIC] = HRTIMER_BASE_MONOTONIC; 1731 - hrtimer_clock_to_base_table[CLOCK_BOOTTIME] = HRTIMER_BASE_BOOTTIME; 1732 - 1733 1725 hrtimer_cpu_notify(&hrtimers_nb, (unsigned long)CPU_UP_PREPARE, 1734 1726 (void *)(long)smp_processor_id()); 1735 1727 register_cpu_notifier(&hrtimers_nb);
+1 -1
kernel/irq/proc.c
··· 419 419 } else { 420 420 seq_printf(p, " %8s", "None"); 421 421 } 422 - #ifdef CONFIG_GENIRC_IRQ_SHOW_LEVEL 422 + #ifdef CONFIG_GENERIC_IRQ_SHOW_LEVEL 423 423 seq_printf(p, " %-8s", irqd_is_level_type(&desc->irq_data) ? "Level" : "Edge"); 424 424 #endif 425 425 if (desc->name)
+7
kernel/kexec.c
··· 33 33 #include <linux/vmalloc.h> 34 34 #include <linux/swap.h> 35 35 #include <linux/kmsg_dump.h> 36 + #include <linux/syscore_ops.h> 36 37 37 38 #include <asm/page.h> 38 39 #include <asm/uaccess.h> ··· 1533 1532 local_irq_disable(); 1534 1533 /* Suspend system devices */ 1535 1534 error = sysdev_suspend(PMSG_FREEZE); 1535 + if (!error) { 1536 + error = syscore_suspend(); 1537 + if (error) 1538 + sysdev_resume(); 1539 + } 1536 1540 if (error) 1537 1541 goto Enable_irqs; 1538 1542 } else ··· 1552 1546 1553 1547 #ifdef CONFIG_KEXEC_JUMP 1554 1548 if (kexec_image->preserve_context) { 1549 + syscore_resume(); 1555 1550 sysdev_resume(); 1556 1551 Enable_irqs: 1557 1552 local_irq_enable();
+8 -2
kernel/power/hibernate.c
··· 273 273 local_irq_disable(); 274 274 275 275 error = sysdev_suspend(PMSG_FREEZE); 276 - if (!error) 276 + if (!error) { 277 277 error = syscore_suspend(); 278 + if (error) 279 + sysdev_resume(); 280 + } 278 281 if (error) { 279 282 printk(KERN_ERR "PM: Some system devices failed to power down, " 280 283 "aborting hibernation\n"); ··· 410 407 local_irq_disable(); 411 408 412 409 error = sysdev_suspend(PMSG_QUIESCE); 413 - if (!error) 410 + if (!error) { 414 411 error = syscore_suspend(); 412 + if (error) 413 + sysdev_resume(); 414 + } 415 415 if (error) 416 416 goto Enable_irqs; 417 417
+6 -3
kernel/power/suspend.c
··· 164 164 BUG_ON(!irqs_disabled()); 165 165 166 166 error = sysdev_suspend(PMSG_SUSPEND); 167 - if (!error) 167 + if (!error) { 168 168 error = syscore_suspend(); 169 + if (error) 170 + sysdev_resume(); 171 + } 169 172 if (!error) { 170 173 if (!(suspend_test(TEST_CORE) || pm_wakeup_pending())) { 171 174 error = suspend_ops->enter(state); ··· 216 213 goto Close; 217 214 } 218 215 suspend_console(); 219 - pm_restrict_gfp_mask(); 220 216 suspend_test_start(); 221 217 error = dpm_suspend_start(PMSG_SUSPEND); 222 218 if (error) { ··· 232 230 suspend_test_start(); 233 231 dpm_resume_end(PMSG_RESUME); 234 232 suspend_test_finish("resume devices"); 235 - pm_restore_gfp_mask(); 236 233 resume_console(); 237 234 Close: 238 235 if (suspend_ops->end) ··· 292 291 goto Finish; 293 292 294 293 pr_debug("PM: Entering %s sleep\n", pm_states[state]); 294 + pm_restrict_gfp_mask(); 295 295 error = suspend_devices_and_enter(state); 296 + pm_restore_gfp_mask(); 296 297 297 298 Finish: 298 299 pr_debug("PM: Finishing wakeup.\n");
+4 -1
kernel/power/user.c
··· 135 135 free_basic_memory_bitmaps(); 136 136 data = filp->private_data; 137 137 free_all_swap_pages(data->swap); 138 - if (data->frozen) 138 + if (data->frozen) { 139 + pm_restore_gfp_mask(); 139 140 thaw_processes(); 141 + } 140 142 pm_notifier_call_chain(data->mode == O_RDONLY ? 141 143 PM_POST_HIBERNATION : PM_POST_RESTORE); 142 144 atomic_inc(&snapshot_device_available); ··· 381 379 * PM_HIBERNATION_PREPARE 382 380 */ 383 381 error = suspend_devices_and_enter(PM_SUSPEND_MEM); 382 + data->ready = 0; 384 383 break; 385 384 386 385 case SNAPSHOT_PLATFORM_SUPPORT:
+17
kernel/ptrace.c
··· 22 22 #include <linux/syscalls.h> 23 23 #include <linux/uaccess.h> 24 24 #include <linux/regset.h> 25 + #include <linux/hw_breakpoint.h> 25 26 26 27 27 28 /* ··· 880 879 return ret; 881 880 } 882 881 #endif /* CONFIG_COMPAT */ 882 + 883 + #ifdef CONFIG_HAVE_HW_BREAKPOINT 884 + int ptrace_get_breakpoints(struct task_struct *tsk) 885 + { 886 + if (atomic_inc_not_zero(&tsk->ptrace_bp_refcnt)) 887 + return 0; 888 + 889 + return -1; 890 + } 891 + 892 + void ptrace_put_breakpoints(struct task_struct *tsk) 893 + { 894 + if (atomic_dec_and_test(&tsk->ptrace_bp_refcnt)) 895 + flush_ptrace_hw_breakpoint(tsk); 896 + } 897 + #endif /* CONFIG_HAVE_HW_BREAKPOINT */
+2 -2
kernel/time/clocksource.c
··· 685 685 /* Add clocksource to the clcoksource list */ 686 686 mutex_lock(&clocksource_mutex); 687 687 clocksource_enqueue(cs); 688 - clocksource_select(); 689 688 clocksource_enqueue_watchdog(cs); 689 + clocksource_select(); 690 690 mutex_unlock(&clocksource_mutex); 691 691 return 0; 692 692 } ··· 706 706 707 707 mutex_lock(&clocksource_mutex); 708 708 clocksource_enqueue(cs); 709 - clocksource_select(); 710 709 clocksource_enqueue_watchdog(cs); 710 + clocksource_select(); 711 711 mutex_unlock(&clocksource_mutex); 712 712 return 0; 713 713 }
+9 -15
kernel/time/posix-clock.c
··· 19 19 */ 20 20 #include <linux/device.h> 21 21 #include <linux/file.h> 22 - #include <linux/mutex.h> 23 22 #include <linux/posix-clock.h> 24 23 #include <linux/slab.h> 25 24 #include <linux/syscalls.h> ··· 33 34 { 34 35 struct posix_clock *clk = fp->private_data; 35 36 36 - mutex_lock(&clk->mutex); 37 + down_read(&clk->rwsem); 37 38 38 39 if (!clk->zombie) 39 40 return clk; 40 41 41 - mutex_unlock(&clk->mutex); 42 + up_read(&clk->rwsem); 42 43 43 44 return NULL; 44 45 } 45 46 46 47 static void put_posix_clock(struct posix_clock *clk) 47 48 { 48 - mutex_unlock(&clk->mutex); 49 + up_read(&clk->rwsem); 49 50 } 50 51 51 52 static ssize_t posix_clock_read(struct file *fp, char __user *buf, ··· 155 156 struct posix_clock *clk = 156 157 container_of(inode->i_cdev, struct posix_clock, cdev); 157 158 158 - mutex_lock(&clk->mutex); 159 + down_read(&clk->rwsem); 159 160 160 161 if (clk->zombie) { 161 162 err = -ENODEV; ··· 171 172 fp->private_data = clk; 172 173 } 173 174 out: 174 - mutex_unlock(&clk->mutex); 175 + up_read(&clk->rwsem); 175 176 return err; 176 177 } 177 178 ··· 210 211 int err; 211 212 212 213 kref_init(&clk->kref); 213 - mutex_init(&clk->mutex); 214 + init_rwsem(&clk->rwsem); 214 215 215 216 cdev_init(&clk->cdev, &posix_clock_file_operations); 216 217 clk->cdev.owner = clk->ops.owner; 217 218 err = cdev_add(&clk->cdev, devid, 1); 218 - if (err) 219 - goto no_cdev; 220 219 221 - return err; 222 - no_cdev: 223 - mutex_destroy(&clk->mutex); 224 220 return err; 225 221 } 226 222 EXPORT_SYMBOL_GPL(posix_clock_register); ··· 223 229 static void delete_clock(struct kref *kref) 224 230 { 225 231 struct posix_clock *clk = container_of(kref, struct posix_clock, kref); 226 - mutex_destroy(&clk->mutex); 232 + 227 233 if (clk->release) 228 234 clk->release(clk); 229 235 } ··· 232 238 { 233 239 cdev_del(&clk->cdev); 234 240 235 - mutex_lock(&clk->mutex); 241 + down_write(&clk->rwsem); 236 242 clk->zombie = true; 237 - mutex_unlock(&clk->mutex); 243 + up_write(&clk->rwsem); 238 244 239 245 kref_put(&clk->kref, delete_clock); 240 246 }
+11 -1
kernel/time/tick-broadcast.c
··· 522 522 */ 523 523 void tick_broadcast_setup_oneshot(struct clock_event_device *bc) 524 524 { 525 + int cpu = smp_processor_id(); 526 + 525 527 /* Set it up only once ! */ 526 528 if (bc->event_handler != tick_handle_oneshot_broadcast) { 527 529 int was_periodic = bc->mode == CLOCK_EVT_MODE_PERIODIC; 528 - int cpu = smp_processor_id(); 529 530 530 531 bc->event_handler = tick_handle_oneshot_broadcast; 531 532 clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT); ··· 552 551 tick_broadcast_set_event(tick_next_period, 1); 553 552 } else 554 553 bc->next_event.tv64 = KTIME_MAX; 554 + } else { 555 + /* 556 + * The first cpu which switches to oneshot mode sets 557 + * the bit for all other cpus which are in the general 558 + * (periodic) broadcast mask. So the bit is set and 559 + * would prevent the first broadcast enter after this 560 + * to program the bc device. 561 + */ 562 + tick_broadcast_clear_oneshot(cpu); 555 563 } 556 564 } 557 565
+1 -1
kernel/trace/Kconfig
··· 141 141 config FUNCTION_TRACER 142 142 bool "Kernel Function Tracer" 143 143 depends on HAVE_FUNCTION_TRACER 144 - select FRAME_POINTER if !ARM_UNWIND && !S390 144 + select FRAME_POINTER if !ARM_UNWIND && !S390 && !MICROBLAZE 145 145 select KALLSYMS 146 146 select GENERIC_TRACER 147 147 select CONTEXT_SWITCH_TRACER
+1
kernel/trace/trace.c
··· 1110 1110 1111 1111 entry->preempt_count = pc & 0xff; 1112 1112 entry->pid = (tsk) ? tsk->pid : 0; 1113 + entry->padding = 0; 1113 1114 entry->flags = 1114 1115 #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT 1115 1116 (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
+1
kernel/trace/trace_events.c
··· 116 116 __common_field(unsigned char, flags); 117 117 __common_field(unsigned char, preempt_count); 118 118 __common_field(int, pid); 119 + __common_field(int, padding); 119 120 120 121 return ret; 121 122 }
+4 -1
kernel/watchdog.c
··· 430 430 p = kthread_create(watchdog, (void *)(unsigned long)cpu, "watchdog/%d", cpu); 431 431 if (IS_ERR(p)) { 432 432 printk(KERN_ERR "softlockup watchdog for %i failed\n", cpu); 433 - if (!err) 433 + if (!err) { 434 434 /* if hardlockup hasn't already set this */ 435 435 err = PTR_ERR(p); 436 + /* and disable the perf event */ 437 + watchdog_nmi_disable(cpu); 438 + } 436 439 goto out; 437 440 } 438 441 kthread_bind(p, cpu);
+7 -1
kernel/workqueue.c
··· 1291 1291 return true; 1292 1292 spin_unlock_irq(&gcwq->lock); 1293 1293 1294 - /* CPU has come up in between, retry migration */ 1294 + /* 1295 + * We've raced with CPU hot[un]plug. Give it a breather 1296 + * and retry migration. cond_resched() is required here; 1297 + * otherwise, we might deadlock against cpu_stop trying to 1298 + * bring down the CPU on non-preemptive kernel. 1299 + */ 1295 1300 cpu_relax(); 1301 + cond_resched(); 1296 1302 } 1297 1303 } 1298 1304
+18 -6
lib/flex_array.c
··· 232 232 233 233 /** 234 234 * flex_array_prealloc - guarantee that array space exists 235 - * @fa: the flex array for which to preallocate parts 236 - * @start: index of first array element for which space is allocated 237 - * @end: index of last (inclusive) element for which space is allocated 238 - * @flags: page allocation flags 235 + * @fa: the flex array for which to preallocate parts 236 + * @start: index of first array element for which space is allocated 237 + * @nr_elements: number of elements for which space is allocated 238 + * @flags: page allocation flags 239 239 * 240 240 * This will guarantee that no future calls to flex_array_put() 241 241 * will allocate memory. It can be used if you are expecting to ··· 245 245 * Locking must be provided by the caller. 246 246 */ 247 247 int flex_array_prealloc(struct flex_array *fa, unsigned int start, 248 - unsigned int end, gfp_t flags) 248 + unsigned int nr_elements, gfp_t flags) 249 249 { 250 250 int start_part; 251 251 int end_part; 252 252 int part_nr; 253 + unsigned int end; 253 254 struct flex_array_part *part; 254 255 255 - if (start >= fa->total_nr_elements || end >= fa->total_nr_elements) 256 + if (!start && !nr_elements) 257 + return 0; 258 + if (start >= fa->total_nr_elements) 259 + return -ENOSPC; 260 + if (!nr_elements) 261 + return 0; 262 + 263 + end = start + nr_elements - 1; 264 + 265 + if (end >= fa->total_nr_elements) 256 266 return -ENOSPC; 257 267 if (elements_fit_in_base(fa)) 258 268 return 0; ··· 353 343 int part_nr; 354 344 int ret = 0; 355 345 346 + if (!fa->total_nr_elements) 347 + return 0; 356 348 if (elements_fit_in_base(fa)) 357 349 return ret; 358 350 for (part_nr = 0; part_nr < FLEX_ARRAY_NR_BASE_PTRS; part_nr++) {
+1 -1
lib/vsprintf.c
··· 797 797 return string(buf, end, uuid, spec); 798 798 } 799 799 800 - int kptr_restrict = 1; 800 + int kptr_restrict __read_mostly; 801 801 802 802 /* 803 803 * Show a '%p' thing. A kernel extension is that the '%p' is followed
+3 -3
lib/xz/xz_dec_lzma2.c
··· 969 969 */ 970 970 tmp = b->in[b->in_pos++]; 971 971 972 + if (tmp == 0x00) 973 + return XZ_STREAM_END; 974 + 972 975 if (tmp >= 0xE0 || tmp == 0x01) { 973 976 s->lzma2.need_props = true; 974 977 s->lzma2.need_dict_reset = false; ··· 1004 1001 lzma_reset(s); 1005 1002 } 1006 1003 } else { 1007 - if (tmp == 0x00) 1008 - return XZ_STREAM_END; 1009 - 1010 1004 if (tmp > 0x02) 1011 1005 return XZ_DATA_ERROR; 1012 1006
+24 -19
mm/huge_memory.c
··· 1408 1408 return ret; 1409 1409 } 1410 1410 1411 + #define VM_NO_THP (VM_SPECIAL|VM_INSERTPAGE|VM_MIXEDMAP|VM_SAO| \ 1412 + VM_HUGETLB|VM_SHARED|VM_MAYSHARE) 1413 + 1411 1414 int hugepage_madvise(struct vm_area_struct *vma, 1412 1415 unsigned long *vm_flags, int advice) 1413 1416 { ··· 1419 1416 /* 1420 1417 * Be somewhat over-protective like KSM for now! 1421 1418 */ 1422 - if (*vm_flags & (VM_HUGEPAGE | 1423 - VM_SHARED | VM_MAYSHARE | 1424 - VM_PFNMAP | VM_IO | VM_DONTEXPAND | 1425 - VM_RESERVED | VM_HUGETLB | VM_INSERTPAGE | 1426 - VM_MIXEDMAP | VM_SAO)) 1419 + if (*vm_flags & (VM_HUGEPAGE | VM_NO_THP)) 1427 1420 return -EINVAL; 1428 1421 *vm_flags &= ~VM_NOHUGEPAGE; 1429 1422 *vm_flags |= VM_HUGEPAGE; ··· 1435 1436 /* 1436 1437 * Be somewhat over-protective like KSM for now! 1437 1438 */ 1438 - if (*vm_flags & (VM_NOHUGEPAGE | 1439 - VM_SHARED | VM_MAYSHARE | 1440 - VM_PFNMAP | VM_IO | VM_DONTEXPAND | 1441 - VM_RESERVED | VM_HUGETLB | VM_INSERTPAGE | 1442 - VM_MIXEDMAP | VM_SAO)) 1439 + if (*vm_flags & (VM_NOHUGEPAGE | VM_NO_THP)) 1443 1440 return -EINVAL; 1444 1441 *vm_flags &= ~VM_HUGEPAGE; 1445 1442 *vm_flags |= VM_NOHUGEPAGE; ··· 1569 1574 * page fault if needed. 1570 1575 */ 1571 1576 return 0; 1572 - if (vma->vm_file || vma->vm_ops) 1577 + if (vma->vm_ops) 1573 1578 /* khugepaged not yet working on file or special mappings */ 1574 1579 return 0; 1575 - VM_BUG_ON(is_linear_pfn_mapping(vma) || is_pfn_mapping(vma)); 1580 + /* 1581 + * If is_pfn_mapping() is true is_learn_pfn_mapping() must be 1582 + * true too, verify it here. 1583 + */ 1584 + VM_BUG_ON(is_linear_pfn_mapping(vma) || vma->vm_flags & VM_NO_THP); 1576 1585 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK; 1577 1586 hend = vma->vm_end & HPAGE_PMD_MASK; 1578 1587 if (hstart < hend) ··· 1827 1828 (vma->vm_flags & VM_NOHUGEPAGE)) 1828 1829 goto out; 1829 1830 1830 - /* VM_PFNMAP vmas may have vm_ops null but vm_file set */ 1831 - if (!vma->anon_vma || vma->vm_ops || vma->vm_file) 1831 + if (!vma->anon_vma || vma->vm_ops) 1832 1832 goto out; 1833 1833 if (is_vma_temporary_stack(vma)) 1834 1834 goto out; 1835 - VM_BUG_ON(is_linear_pfn_mapping(vma) || is_pfn_mapping(vma)); 1835 + /* 1836 + * If is_pfn_mapping() is true is_learn_pfn_mapping() must be 1837 + * true too, verify it here. 1838 + */ 1839 + VM_BUG_ON(is_linear_pfn_mapping(vma) || vma->vm_flags & VM_NO_THP); 1836 1840 1837 1841 pgd = pgd_offset(mm, address); 1838 1842 if (!pgd_present(*pgd)) ··· 2068 2066 progress++; 2069 2067 continue; 2070 2068 } 2071 - /* VM_PFNMAP vmas may have vm_ops null but vm_file set */ 2072 - if (!vma->anon_vma || vma->vm_ops || vma->vm_file) 2069 + if (!vma->anon_vma || vma->vm_ops) 2073 2070 goto skip; 2074 2071 if (is_vma_temporary_stack(vma)) 2075 2072 goto skip; 2076 - 2077 - VM_BUG_ON(is_linear_pfn_mapping(vma) || is_pfn_mapping(vma)); 2073 + /* 2074 + * If is_pfn_mapping() is true is_learn_pfn_mapping() 2075 + * must be true too, verify it here. 2076 + */ 2077 + VM_BUG_ON(is_linear_pfn_mapping(vma) || 2078 + vma->vm_flags & VM_NO_THP); 2078 2079 2079 2080 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK; 2080 2081 hend = vma->vm_end & HPAGE_PMD_MASK;
+9 -12
mm/memory.c
··· 1359 1359 */ 1360 1360 mark_page_accessed(page); 1361 1361 } 1362 - if (flags & FOLL_MLOCK) { 1362 + if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) { 1363 1363 /* 1364 1364 * The preliminary mapping check is mainly to avoid the 1365 1365 * pointless overhead of lock_page on the ZERO_PAGE ··· 1412 1412 1413 1413 static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr) 1414 1414 { 1415 - return (vma->vm_flags & VM_GROWSDOWN) && 1416 - (vma->vm_start == addr) && 1417 - !vma_stack_continue(vma->vm_prev, addr); 1415 + return stack_guard_page_start(vma, addr) || 1416 + stack_guard_page_end(vma, addr+PAGE_SIZE); 1418 1417 } 1419 1418 1420 1419 /** ··· 1550 1551 continue; 1551 1552 } 1552 1553 1553 - /* 1554 - * If we don't actually want the page itself, 1555 - * and it's the stack guard page, just skip it. 1556 - */ 1557 - if (!pages && stack_guard_page(vma, start)) 1558 - goto next_page; 1559 - 1560 1554 do { 1561 1555 struct page *page; 1562 1556 unsigned int foll_flags = gup_flags; ··· 1566 1574 int ret; 1567 1575 unsigned int fault_flags = 0; 1568 1576 1577 + /* For mlock, just skip the stack guard page. */ 1578 + if (foll_flags & FOLL_MLOCK) { 1579 + if (stack_guard_page(vma, start)) 1580 + goto next_page; 1581 + } 1569 1582 if (foll_flags & FOLL_WRITE) 1570 1583 fault_flags |= FAULT_FLAG_WRITE; 1571 1584 if (nonblocking) ··· 3393 3396 * run pte_offset_map on the pmd, if an huge pmd could 3394 3397 * materialize from under us from a different thread. 3395 3398 */ 3396 - if (unlikely(__pte_alloc(mm, vma, pmd, address))) 3399 + if (unlikely(pmd_none(*pmd)) && __pte_alloc(mm, vma, pmd, address)) 3397 3400 return VM_FAULT_OOM; 3398 3401 /* if an huge pmd materialized from under us just retry later */ 3399 3402 if (unlikely(pmd_trans_huge(*pmd)))
+1 -4
mm/mlock.c
··· 162 162 VM_BUG_ON(end > vma->vm_end); 163 163 VM_BUG_ON(!rwsem_is_locked(&mm->mmap_sem)); 164 164 165 - gup_flags = FOLL_TOUCH; 165 + gup_flags = FOLL_TOUCH | FOLL_MLOCK; 166 166 /* 167 167 * We want to touch writable mappings with a write fault in order 168 168 * to break COW, except for shared mappings because these don't COW ··· 177 177 */ 178 178 if (vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) 179 179 gup_flags |= FOLL_FORCE; 180 - 181 - if (vma->vm_flags & VM_LOCKED) 182 - gup_flags |= FOLL_MLOCK; 183 180 184 181 return __get_user_pages(current, mm, addr, nr_pages, gup_flags, 185 182 NULL, NULL, nonblocking);
+7 -4
mm/mmap.c
··· 1767 1767 size = address - vma->vm_start; 1768 1768 grow = (address - vma->vm_end) >> PAGE_SHIFT; 1769 1769 1770 - error = acct_stack_growth(vma, size, grow); 1771 - if (!error) { 1772 - vma->vm_end = address; 1773 - perf_event_mmap(vma); 1770 + error = -ENOMEM; 1771 + if (vma->vm_pgoff + (size >> PAGE_SHIFT) >= vma->vm_pgoff) { 1772 + error = acct_stack_growth(vma, size, grow); 1773 + if (!error) { 1774 + vma->vm_end = address; 1775 + perf_event_mmap(vma); 1776 + } 1774 1777 } 1775 1778 } 1776 1779 vma_unlock_anon_vma(vma);
+6 -3
mm/oom_kill.c
··· 172 172 173 173 /* 174 174 * The baseline for the badness score is the proportion of RAM that each 175 - * task's rss and swap space use. 175 + * task's rss, pagetable and swap space use. 176 176 */ 177 - points = (get_mm_rss(p->mm) + get_mm_counter(p->mm, MM_SWAPENTS)) * 1000 / 178 - totalpages; 177 + points = get_mm_rss(p->mm) + p->mm->nr_ptes; 178 + points += get_mm_counter(p->mm, MM_SWAPENTS); 179 + 180 + points *= 1000; 181 + points /= totalpages; 179 182 task_unlock(p); 180 183 181 184 /*
+42 -15
mm/page_alloc.c
··· 2317 2317 2318 2318 EXPORT_SYMBOL(free_pages); 2319 2319 2320 + static void *make_alloc_exact(unsigned long addr, unsigned order, size_t size) 2321 + { 2322 + if (addr) { 2323 + unsigned long alloc_end = addr + (PAGE_SIZE << order); 2324 + unsigned long used = addr + PAGE_ALIGN(size); 2325 + 2326 + split_page(virt_to_page((void *)addr), order); 2327 + while (used < alloc_end) { 2328 + free_page(used); 2329 + used += PAGE_SIZE; 2330 + } 2331 + } 2332 + return (void *)addr; 2333 + } 2334 + 2320 2335 /** 2321 2336 * alloc_pages_exact - allocate an exact number physically-contiguous pages. 2322 2337 * @size: the number of bytes to allocate ··· 2351 2336 unsigned long addr; 2352 2337 2353 2338 addr = __get_free_pages(gfp_mask, order); 2354 - if (addr) { 2355 - unsigned long alloc_end = addr + (PAGE_SIZE << order); 2356 - unsigned long used = addr + PAGE_ALIGN(size); 2357 - 2358 - split_page(virt_to_page((void *)addr), order); 2359 - while (used < alloc_end) { 2360 - free_page(used); 2361 - used += PAGE_SIZE; 2362 - } 2363 - } 2364 - 2365 - return (void *)addr; 2339 + return make_alloc_exact(addr, order, size); 2366 2340 } 2367 2341 EXPORT_SYMBOL(alloc_pages_exact); 2342 + 2343 + /** 2344 + * alloc_pages_exact_nid - allocate an exact number of physically-contiguous 2345 + * pages on a node. 2346 + * @nid: the preferred node ID where memory should be allocated 2347 + * @size: the number of bytes to allocate 2348 + * @gfp_mask: GFP flags for the allocation 2349 + * 2350 + * Like alloc_pages_exact(), but try to allocate on node nid first before falling 2351 + * back. 2352 + * Note this is not alloc_pages_exact_node() which allocates on a specific node, 2353 + * but is not exact. 2354 + */ 2355 + void *alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask) 2356 + { 2357 + unsigned order = get_order(size); 2358 + struct page *p = alloc_pages_node(nid, gfp_mask, order); 2359 + if (!p) 2360 + return NULL; 2361 + return make_alloc_exact((unsigned long)page_address(p), order, size); 2362 + } 2363 + EXPORT_SYMBOL(alloc_pages_exact_nid); 2368 2364 2369 2365 /** 2370 2366 * free_pages_exact - release memory allocated via alloc_pages_exact() ··· 3590 3564 3591 3565 if (!slab_is_available()) { 3592 3566 zone->wait_table = (wait_queue_head_t *) 3593 - alloc_bootmem_node(pgdat, alloc_size); 3567 + alloc_bootmem_node_nopanic(pgdat, alloc_size); 3594 3568 } else { 3595 3569 /* 3596 3570 * This case means that a zone whose size was 0 gets new memory ··· 4167 4141 unsigned long usemapsize = usemap_size(zonesize); 4168 4142 zone->pageblock_flags = NULL; 4169 4143 if (usemapsize) 4170 - zone->pageblock_flags = alloc_bootmem_node(pgdat, usemapsize); 4144 + zone->pageblock_flags = alloc_bootmem_node_nopanic(pgdat, 4145 + usemapsize); 4171 4146 } 4172 4147 #else 4173 4148 static inline void setup_usemap(struct pglist_data *pgdat, ··· 4334 4307 size = (end - start) * sizeof(struct page); 4335 4308 map = alloc_remap(pgdat->node_id, size); 4336 4309 if (!map) 4337 - map = alloc_bootmem_node(pgdat, size); 4310 + map = alloc_bootmem_node_nopanic(pgdat, size); 4338 4311 pgdat->node_mem_map = map + (pgdat->node_start_pfn - start); 4339 4312 } 4340 4313 #ifndef CONFIG_NEED_MULTIPLE_NODES
+1 -1
mm/page_cgroup.c
··· 134 134 { 135 135 void *addr = NULL; 136 136 137 - addr = alloc_pages_exact(size, GFP_KERNEL | __GFP_NOWARN); 137 + addr = alloc_pages_exact_nid(nid, size, GFP_KERNEL | __GFP_NOWARN); 138 138 if (addr) 139 139 return addr; 140 140
+83 -66
mm/shmem.c
··· 852 852 853 853 static int shmem_unuse_inode(struct shmem_inode_info *info, swp_entry_t entry, struct page *page) 854 854 { 855 - struct inode *inode; 855 + struct address_space *mapping; 856 856 unsigned long idx; 857 857 unsigned long size; 858 858 unsigned long limit; ··· 875 875 if (size > SHMEM_NR_DIRECT) 876 876 size = SHMEM_NR_DIRECT; 877 877 offset = shmem_find_swp(entry, ptr, ptr+size); 878 - if (offset >= 0) 878 + if (offset >= 0) { 879 + shmem_swp_balance_unmap(); 879 880 goto found; 881 + } 880 882 if (!info->i_indirect) 881 883 goto lost2; 882 884 ··· 916 914 if (size > ENTRIES_PER_PAGE) 917 915 size = ENTRIES_PER_PAGE; 918 916 offset = shmem_find_swp(entry, ptr, ptr+size); 919 - shmem_swp_unmap(ptr); 920 917 if (offset >= 0) { 921 918 shmem_dir_unmap(dir); 922 919 goto found; 923 920 } 921 + shmem_swp_unmap(ptr); 924 922 } 925 923 } 926 924 lost1: ··· 930 928 return 0; 931 929 found: 932 930 idx += offset; 933 - inode = igrab(&info->vfs_inode); 934 - spin_unlock(&info->lock); 931 + ptr += offset; 935 932 936 933 /* 937 934 * Move _head_ to start search for next from here. ··· 941 940 */ 942 941 if (shmem_swaplist.next != &info->swaplist) 943 942 list_move_tail(&shmem_swaplist, &info->swaplist); 944 - mutex_unlock(&shmem_swaplist_mutex); 945 943 946 - error = 1; 947 - if (!inode) 948 - goto out; 949 944 /* 950 - * Charge page using GFP_KERNEL while we can wait. 951 - * Charged back to the user(not to caller) when swap account is used. 952 - * add_to_page_cache() will be called with GFP_NOWAIT. 945 + * We rely on shmem_swaplist_mutex, not only to protect the swaplist, 946 + * but also to hold up shmem_evict_inode(): so inode cannot be freed 947 + * beneath us (pagelock doesn't help until the page is in pagecache). 953 948 */ 954 - error = mem_cgroup_cache_charge(page, current->mm, GFP_KERNEL); 955 - if (error) 956 - goto out; 957 - error = radix_tree_preload(GFP_KERNEL); 958 - if (error) { 959 - mem_cgroup_uncharge_cache_page(page); 960 - goto out; 961 - } 962 - error = 1; 963 - 964 - spin_lock(&info->lock); 965 - ptr = shmem_swp_entry(info, idx, NULL); 966 - if (ptr && ptr->val == entry.val) { 967 - error = add_to_page_cache_locked(page, inode->i_mapping, 968 - idx, GFP_NOWAIT); 969 - /* does mem_cgroup_uncharge_cache_page on error */ 970 - } else /* we must compensate for our precharge above */ 971 - mem_cgroup_uncharge_cache_page(page); 949 + mapping = info->vfs_inode.i_mapping; 950 + error = add_to_page_cache_locked(page, mapping, idx, GFP_NOWAIT); 951 + /* which does mem_cgroup_uncharge_cache_page on error */ 972 952 973 953 if (error == -EEXIST) { 974 - struct page *filepage = find_get_page(inode->i_mapping, idx); 954 + struct page *filepage = find_get_page(mapping, idx); 975 955 error = 1; 976 956 if (filepage) { 977 957 /* ··· 972 990 swap_free(entry); 973 991 error = 1; /* not an error, but entry was found */ 974 992 } 975 - if (ptr) 976 - shmem_swp_unmap(ptr); 993 + shmem_swp_unmap(ptr); 977 994 spin_unlock(&info->lock); 978 - radix_tree_preload_end(); 979 - out: 980 - unlock_page(page); 981 - page_cache_release(page); 982 - iput(inode); /* allows for NULL */ 983 995 return error; 984 996 } 985 997 ··· 985 1009 struct list_head *p, *next; 986 1010 struct shmem_inode_info *info; 987 1011 int found = 0; 1012 + int error; 1013 + 1014 + /* 1015 + * Charge page using GFP_KERNEL while we can wait, before taking 1016 + * the shmem_swaplist_mutex which might hold up shmem_writepage(). 1017 + * Charged back to the user (not to caller) when swap account is used. 1018 + * add_to_page_cache() will be called with GFP_NOWAIT. 1019 + */ 1020 + error = mem_cgroup_cache_charge(page, current->mm, GFP_KERNEL); 1021 + if (error) 1022 + goto out; 1023 + /* 1024 + * Try to preload while we can wait, to not make a habit of 1025 + * draining atomic reserves; but don't latch on to this cpu, 1026 + * it's okay if sometimes we get rescheduled after this. 1027 + */ 1028 + error = radix_tree_preload(GFP_KERNEL); 1029 + if (error) 1030 + goto uncharge; 1031 + radix_tree_preload_end(); 988 1032 989 1033 mutex_lock(&shmem_swaplist_mutex); 990 1034 list_for_each_safe(p, next, &shmem_swaplist) { ··· 1012 1016 found = shmem_unuse_inode(info, entry, page); 1013 1017 cond_resched(); 1014 1018 if (found) 1015 - goto out; 1019 + break; 1016 1020 } 1017 1021 mutex_unlock(&shmem_swaplist_mutex); 1018 - /* 1019 - * Can some race bring us here? We've been holding page lock, 1020 - * so I think not; but would rather try again later than BUG() 1021 - */ 1022 + 1023 + uncharge: 1024 + if (!found) 1025 + mem_cgroup_uncharge_cache_page(page); 1026 + if (found < 0) 1027 + error = found; 1028 + out: 1022 1029 unlock_page(page); 1023 1030 page_cache_release(page); 1024 - out: 1025 - return (found < 0) ? found : 0; 1031 + return error; 1026 1032 } 1027 1033 1028 1034 /* ··· 1062 1064 else 1063 1065 swap.val = 0; 1064 1066 1067 + /* 1068 + * Add inode to shmem_unuse()'s list of swapped-out inodes, 1069 + * if it's not already there. Do it now because we cannot take 1070 + * mutex while holding spinlock, and must do so before the page 1071 + * is moved to swap cache, when its pagelock no longer protects 1072 + * the inode from eviction. But don't unlock the mutex until 1073 + * we've taken the spinlock, because shmem_unuse_inode() will 1074 + * prune a !swapped inode from the swaplist under both locks. 1075 + */ 1076 + if (swap.val) { 1077 + mutex_lock(&shmem_swaplist_mutex); 1078 + if (list_empty(&info->swaplist)) 1079 + list_add_tail(&info->swaplist, &shmem_swaplist); 1080 + } 1081 + 1065 1082 spin_lock(&info->lock); 1083 + if (swap.val) 1084 + mutex_unlock(&shmem_swaplist_mutex); 1085 + 1066 1086 if (index >= info->next_index) { 1067 1087 BUG_ON(!(info->flags & SHMEM_TRUNCATE)); 1068 1088 goto unlock; ··· 1100 1084 delete_from_page_cache(page); 1101 1085 shmem_swp_set(info, entry, swap.val); 1102 1086 shmem_swp_unmap(entry); 1103 - if (list_empty(&info->swaplist)) 1104 - inode = igrab(inode); 1105 - else 1106 - inode = NULL; 1107 1087 spin_unlock(&info->lock); 1108 1088 swap_shmem_alloc(swap); 1109 1089 BUG_ON(page_mapped(page)); 1110 1090 swap_writepage(page, wbc); 1111 - if (inode) { 1112 - mutex_lock(&shmem_swaplist_mutex); 1113 - /* move instead of add in case we're racing */ 1114 - list_move_tail(&info->swaplist, &shmem_swaplist); 1115 - mutex_unlock(&shmem_swaplist_mutex); 1116 - iput(inode); 1117 - } 1118 1091 return 0; 1119 1092 } 1120 1093 ··· 1405 1400 if (sbinfo->max_blocks) { 1406 1401 if (percpu_counter_compare(&sbinfo->used_blocks, 1407 1402 sbinfo->max_blocks) >= 0 || 1408 - shmem_acct_block(info->flags)) { 1409 - spin_unlock(&info->lock); 1410 - error = -ENOSPC; 1411 - goto failed; 1412 - } 1403 + shmem_acct_block(info->flags)) 1404 + goto nospace; 1413 1405 percpu_counter_inc(&sbinfo->used_blocks); 1414 1406 spin_lock(&inode->i_lock); 1415 1407 inode->i_blocks += BLOCKS_PER_PAGE; 1416 1408 spin_unlock(&inode->i_lock); 1417 - } else if (shmem_acct_block(info->flags)) { 1418 - spin_unlock(&info->lock); 1419 - error = -ENOSPC; 1420 - goto failed; 1421 - } 1409 + } else if (shmem_acct_block(info->flags)) 1410 + goto nospace; 1422 1411 1423 1412 if (!filepage) { 1424 1413 int ret; ··· 1492 1493 error = 0; 1493 1494 goto out; 1494 1495 1496 + nospace: 1497 + /* 1498 + * Perhaps the page was brought in from swap between find_lock_page 1499 + * and taking info->lock? We allow for that at add_to_page_cache_lru, 1500 + * but must also avoid reporting a spurious ENOSPC while working on a 1501 + * full tmpfs. (When filepage has been passed in to shmem_getpage, it 1502 + * is already in page cache, which prevents this race from occurring.) 1503 + */ 1504 + if (!filepage) { 1505 + struct page *page = find_get_page(mapping, idx); 1506 + if (page) { 1507 + spin_unlock(&info->lock); 1508 + page_cache_release(page); 1509 + goto repeat; 1510 + } 1511 + } 1512 + spin_unlock(&info->lock); 1513 + error = -ENOSPC; 1495 1514 failed: 1496 1515 if (*pagep != filepage) { 1497 1516 unlock_page(filepage);
+2 -2
mm/slub.c
··· 1940 1940 * Since this is without lock semantics the protection is only against 1941 1941 * code executing on this cpu *not* from access by other cpus. 1942 1942 */ 1943 - if (unlikely(!this_cpu_cmpxchg_double( 1943 + if (unlikely(!irqsafe_cpu_cmpxchg_double( 1944 1944 s->cpu_slab->freelist, s->cpu_slab->tid, 1945 1945 object, tid, 1946 1946 get_freepointer(s, object), next_tid(tid)))) { ··· 2145 2145 set_freepointer(s, object, c->freelist); 2146 2146 2147 2147 #ifdef CONFIG_CMPXCHG_LOCAL 2148 - if (unlikely(!this_cpu_cmpxchg_double( 2148 + if (unlikely(!irqsafe_cpu_cmpxchg_double( 2149 2149 s->cpu_slab->freelist, s->cpu_slab->tid, 2150 2150 c->freelist, tid, 2151 2151 object, next_tid(tid)))) {
+3
mm/swap.c
··· 396 396 if (!PageLRU(page)) 397 397 return; 398 398 399 + if (PageUnevictable(page)) 400 + return; 401 + 399 402 /* Some processes are using the page */ 400 403 if (page_mapped(page)) 401 404 return;
+1 -1
mm/vmscan.c
··· 937 937 * back off and wait for congestion to clear because further reclaim 938 938 * will encounter the same problem 939 939 */ 940 - if (nr_dirty == nr_congested && nr_dirty != 0) 940 + if (nr_dirty && nr_dirty == nr_congested && scanning_global_lru(sc)) 941 941 zone_set_flag(zone, ZONE_CONGESTED); 942 942 943 943 free_page_list(&free_pages);
+3
net/8021q/vlan.c
··· 124 124 125 125 grp->nr_vlans--; 126 126 127 + if (vlan->flags & VLAN_FLAG_GVRP) 128 + vlan_gvrp_request_leave(dev); 129 + 127 130 vlan_group_set_device(grp, vlan_id, NULL); 128 131 if (!grp->killall) 129 132 synchronize_net();
-3
net/8021q/vlan_dev.c
··· 487 487 struct vlan_dev_info *vlan = vlan_dev_info(dev); 488 488 struct net_device *real_dev = vlan->real_dev; 489 489 490 - if (vlan->flags & VLAN_FLAG_GVRP) 491 - vlan_gvrp_request_leave(dev); 492 - 493 490 dev_mc_unsync(real_dev, dev); 494 491 dev_uc_unsync(real_dev, dev); 495 492 if (dev->flags & IFF_ALLMULTI)
+1 -1
net/9p/client.c
··· 614 614 615 615 err = c->trans_mod->request(c, req); 616 616 if (err < 0) { 617 - if (err != -ERESTARTSYS) 617 + if (err != -ERESTARTSYS && err != -EFAULT) 618 618 c->status = Disconnected; 619 619 goto reterr; 620 620 }
+1
net/9p/protocol.c
··· 674 674 } 675 675 676 676 strcpy(dirent->d_name, nameptr); 677 + kfree(nameptr); 677 678 678 679 out: 679 680 return fake_pdu.offset;
+3 -8
net/9p/trans_common.c
··· 63 63 int nr_pages, u8 rw) 64 64 { 65 65 uint32_t first_page_bytes = 0; 66 - uint32_t pdata_mapped_pages; 66 + int32_t pdata_mapped_pages; 67 67 struct trans_rpage_info *rpinfo; 68 68 69 69 *pdata_off = (__force size_t)req->tc->pubuf & (PAGE_SIZE-1); ··· 75 75 rpinfo = req->tc->private; 76 76 pdata_mapped_pages = get_user_pages_fast((unsigned long)req->tc->pubuf, 77 77 nr_pages, rw, &rpinfo->rp_data[0]); 78 + if (pdata_mapped_pages <= 0) 79 + return pdata_mapped_pages; 78 80 79 - if (pdata_mapped_pages < 0) { 80 - printk(KERN_ERR "get_user_pages_fast failed:%d udata:%p" 81 - "nr_pages:%d\n", pdata_mapped_pages, 82 - req->tc->pubuf, nr_pages); 83 - pdata_mapped_pages = 0; 84 - return -EIO; 85 - } 86 81 rpinfo->rp_nr_pages = pdata_mapped_pages; 87 82 if (*pdata_off) { 88 83 *pdata_len = first_page_bytes;
+2 -3
net/bluetooth/hci_core.c
··· 587 587 hci_req_cancel(hdev, ENODEV); 588 588 hci_req_lock(hdev); 589 589 590 - /* Stop timer, it might be running */ 591 - del_timer_sync(&hdev->cmd_timer); 592 - 593 590 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) { 591 + del_timer_sync(&hdev->cmd_timer); 594 592 hci_req_unlock(hdev); 595 593 return 0; 596 594 } ··· 627 629 628 630 /* Drop last sent command */ 629 631 if (hdev->sent_cmd) { 632 + del_timer_sync(&hdev->cmd_timer); 630 633 kfree_skb(hdev->sent_cmd); 631 634 hdev->sent_cmd = NULL; 632 635 }
-2
net/bluetooth/hci_event.c
··· 2387 2387 if (!conn) 2388 2388 goto unlock; 2389 2389 2390 - hci_conn_hold(conn); 2391 - 2392 2390 conn->remote_cap = ev->capability; 2393 2391 conn->remote_oob = ev->oob_data; 2394 2392 conn->remote_auth = ev->authentication;
+1
net/bluetooth/l2cap_core.c
··· 1051 1051 tx_skb = skb_clone(skb, GFP_ATOMIC); 1052 1052 bt_cb(skb)->retries++; 1053 1053 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE); 1054 + control &= L2CAP_CTRL_SAR; 1054 1055 1055 1056 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) { 1056 1057 control |= L2CAP_CTRL_FINAL;
+1 -1
net/bridge/br_input.c
··· 164 164 goto drop; 165 165 166 166 /* If STP is turned off, then forward */ 167 - if (p->br->stp_enabled == BR_NO_STP) 167 + if (p->br->stp_enabled == BR_NO_STP && dest[5] == 0) 168 168 goto forward; 169 169 170 170 if (NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_IN, skb, skb->dev,
+3 -5
net/bridge/br_netfilter.c
··· 249 249 goto drop; 250 250 } 251 251 252 - /* Zero out the CB buffer if no options present */ 253 - if (iph->ihl == 5) { 254 - memset(IPCB(skb), 0, sizeof(struct inet_skb_parm)); 252 + memset(IPCB(skb), 0, sizeof(struct inet_skb_parm)); 253 + if (iph->ihl == 5) 255 254 return 0; 256 - } 257 255 258 256 opt->optlen = iph->ihl*4 - sizeof(struct iphdr); 259 257 if (ip_options_compile(dev_net(dev), opt, skb)) ··· 737 739 nf_bridge->mask |= BRNF_PKT_TYPE; 738 740 } 739 741 740 - if (br_parse_ip_options(skb)) 742 + if (pf == PF_INET && br_parse_ip_options(skb)) 741 743 return NF_DROP; 742 744 743 745 /* The physdev module checks on this */
+11 -53
net/bridge/netfilter/ebtables.c
··· 1766 1766 1767 1767 newinfo->entries_size = size; 1768 1768 1769 - xt_compat_init_offsets(AF_INET, info->nentries); 1769 + xt_compat_init_offsets(NFPROTO_BRIDGE, info->nentries); 1770 1770 return EBT_ENTRY_ITERATE(entries, size, compat_calc_entry, info, 1771 1771 entries, newinfo); 1772 1772 } ··· 1882 1882 struct xt_match *match; 1883 1883 struct xt_target *wt; 1884 1884 void *dst = NULL; 1885 - int off, pad = 0, ret = 0; 1885 + int off, pad = 0; 1886 1886 unsigned int size_kern, entry_offset, match_size = mwt->match_size; 1887 1887 1888 1888 strlcpy(name, mwt->u.name, sizeof(name)); ··· 1933 1933 size_kern = wt->targetsize; 1934 1934 module_put(wt->me); 1935 1935 break; 1936 - } 1937 - 1938 - if (!dst) { 1939 - ret = xt_compat_add_offset(NFPROTO_BRIDGE, entry_offset, 1940 - off + ebt_compat_entry_padsize()); 1941 - if (ret < 0) 1942 - return ret; 1943 1936 } 1944 1937 1945 1938 state->buf_kern_offset += match_size + off; ··· 2009 2016 return growth; 2010 2017 } 2011 2018 2012 - #define EBT_COMPAT_WATCHER_ITERATE(e, fn, args...) \ 2013 - ({ \ 2014 - unsigned int __i; \ 2015 - int __ret = 0; \ 2016 - struct compat_ebt_entry_mwt *__watcher; \ 2017 - \ 2018 - for (__i = e->watchers_offset; \ 2019 - __i < (e)->target_offset; \ 2020 - __i += __watcher->watcher_size + \ 2021 - sizeof(struct compat_ebt_entry_mwt)) { \ 2022 - __watcher = (void *)(e) + __i; \ 2023 - __ret = fn(__watcher , ## args); \ 2024 - if (__ret != 0) \ 2025 - break; \ 2026 - } \ 2027 - if (__ret == 0) { \ 2028 - if (__i != (e)->target_offset) \ 2029 - __ret = -EINVAL; \ 2030 - } \ 2031 - __ret; \ 2032 - }) 2033 - 2034 - #define EBT_COMPAT_MATCH_ITERATE(e, fn, args...) \ 2035 - ({ \ 2036 - unsigned int __i; \ 2037 - int __ret = 0; \ 2038 - struct compat_ebt_entry_mwt *__match; \ 2039 - \ 2040 - for (__i = sizeof(struct ebt_entry); \ 2041 - __i < (e)->watchers_offset; \ 2042 - __i += __match->match_size + \ 2043 - sizeof(struct compat_ebt_entry_mwt)) { \ 2044 - __match = (void *)(e) + __i; \ 2045 - __ret = fn(__match , ## args); \ 2046 - if (__ret != 0) \ 2047 - break; \ 2048 - } \ 2049 - if (__ret == 0) { \ 2050 - if (__i != (e)->watchers_offset) \ 2051 - __ret = -EINVAL; \ 2052 - } \ 2053 - __ret; \ 2054 - }) 2055 - 2056 2019 /* called for all ebt_entry structures. */ 2057 2020 static int size_entry_mwt(struct ebt_entry *entry, const unsigned char *base, 2058 2021 unsigned int *total, ··· 2079 2130 offsets_update[i], offsets[j] + new_offset); 2080 2131 offsets_update[i] = offsets[j] + new_offset; 2081 2132 } 2133 + } 2134 + 2135 + if (state->buf_kern_start == NULL) { 2136 + unsigned int offset = buf_start - (char *) base; 2137 + 2138 + ret = xt_compat_add_offset(NFPROTO_BRIDGE, offset, new_offset); 2139 + if (ret < 0) 2140 + return ret; 2082 2141 } 2083 2142 2084 2143 startoff = state->buf_user_offset - startoff; ··· 2197 2240 2198 2241 xt_compat_lock(NFPROTO_BRIDGE); 2199 2242 2243 + xt_compat_init_offsets(NFPROTO_BRIDGE, tmp.nentries); 2200 2244 ret = compat_copy_entries(entries_tmp, tmp.entries_size, &state); 2201 2245 if (ret < 0) 2202 2246 goto out_unlock;
+5 -1
net/caif/cfdgml.c
··· 13 13 #include <net/caif/cfsrvl.h> 14 14 #include <net/caif/cfpkt.h> 15 15 16 + 16 17 #define container_obj(layr) ((struct cfsrvl *) layr) 17 18 18 19 #define DGM_CMD_BIT 0x80 ··· 84 83 85 84 static int cfdgml_transmit(struct cflayer *layr, struct cfpkt *pkt) 86 85 { 86 + u8 packet_type; 87 87 u32 zero = 0; 88 88 struct caif_payload_info *info; 89 89 struct cfsrvl *service = container_obj(layr); ··· 96 94 if (cfpkt_getlen(pkt) > DGM_MTU) 97 95 return -EMSGSIZE; 98 96 99 - cfpkt_add_head(pkt, &zero, 4); 97 + cfpkt_add_head(pkt, &zero, 3); 98 + packet_type = 0x08; /* B9 set - UNCLASSIFIED */ 99 + cfpkt_add_head(pkt, &packet_type, 1); 100 100 101 101 /* Add info for MUX-layer to route the packet out. */ 102 102 info = cfpkt_info(pkt);
+2 -2
net/caif/cfmuxl.c
··· 244 244 int phyid) 245 245 { 246 246 struct cfmuxl *muxl = container_obj(layr); 247 - struct list_head *node; 247 + struct list_head *node, *next; 248 248 struct cflayer *layer; 249 - list_for_each(node, &muxl->srvl_list) { 249 + list_for_each_safe(node, next, &muxl->srvl_list) { 250 250 layer = list_entry(node, struct cflayer, node); 251 251 if (cfsrvl_phyid_match(layer, phyid)) 252 252 layer->ctrlcmd(layer, ctrl, phyid);
+6 -1
net/can/bcm.c
··· 1427 1427 static int bcm_release(struct socket *sock) 1428 1428 { 1429 1429 struct sock *sk = sock->sk; 1430 - struct bcm_sock *bo = bcm_sk(sk); 1430 + struct bcm_sock *bo; 1431 1431 struct bcm_op *op, *next; 1432 + 1433 + if (sk == NULL) 1434 + return 0; 1435 + 1436 + bo = bcm_sk(sk); 1432 1437 1433 1438 /* remove bcm_ops, timer, rx_unregister(), etc. */ 1434 1439
+6 -1
net/can/raw.c
··· 305 305 static int raw_release(struct socket *sock) 306 306 { 307 307 struct sock *sk = sock->sk; 308 - struct raw_sock *ro = raw_sk(sk); 308 + struct raw_sock *ro; 309 + 310 + if (!sk) 311 + return 0; 312 + 313 + ro = raw_sk(sk); 309 314 310 315 unregister_netdevice_notifier(&ro->notifier); 311 316
+13 -13
net/ceph/messenger.c
··· 2267 2267 m->more_to_follow = false; 2268 2268 m->pool = NULL; 2269 2269 2270 + /* middle */ 2271 + m->middle = NULL; 2272 + 2273 + /* data */ 2274 + m->nr_pages = 0; 2275 + m->page_alignment = 0; 2276 + m->pages = NULL; 2277 + m->pagelist = NULL; 2278 + m->bio = NULL; 2279 + m->bio_iter = NULL; 2280 + m->bio_seg = 0; 2281 + m->trail = NULL; 2282 + 2270 2283 /* front */ 2271 2284 if (front_len) { 2272 2285 if (front_len > PAGE_CACHE_SIZE) { ··· 2298 2285 m->front.iov_base = NULL; 2299 2286 } 2300 2287 m->front.iov_len = front_len; 2301 - 2302 - /* middle */ 2303 - m->middle = NULL; 2304 - 2305 - /* data */ 2306 - m->nr_pages = 0; 2307 - m->page_alignment = 0; 2308 - m->pages = NULL; 2309 - m->pagelist = NULL; 2310 - m->bio = NULL; 2311 - m->bio_iter = NULL; 2312 - m->bio_seg = 0; 2313 - m->trail = NULL; 2314 2288 2315 2289 dout("ceph_msg_new %p front %d\n", m, front_len); 2316 2290 return m;
+2 -2
net/ceph/osd_client.c
··· 470 470 snapc, ops, 471 471 use_mempool, 472 472 GFP_NOFS, NULL, NULL); 473 - if (IS_ERR(req)) 474 - return req; 473 + if (!req) 474 + return NULL; 475 475 476 476 /* calculate max write size */ 477 477 calc_layout(osdc, vino, layout, off, plen, req, ops);
+23 -23
net/core/dev.c
··· 1284 1284 */ 1285 1285 int dev_close(struct net_device *dev) 1286 1286 { 1287 - LIST_HEAD(single); 1287 + if (dev->flags & IFF_UP) { 1288 + LIST_HEAD(single); 1288 1289 1289 - list_add(&dev->unreg_list, &single); 1290 - dev_close_many(&single); 1291 - list_del(&single); 1290 + list_add(&dev->unreg_list, &single); 1291 + dev_close_many(&single); 1292 + list_del(&single); 1293 + } 1292 1294 return 0; 1293 1295 } 1294 1296 EXPORT_SYMBOL(dev_close); ··· 4775 4773 * is never reached 4776 4774 */ 4777 4775 WARN_ON(1); 4778 - err = -EINVAL; 4776 + err = -ENOTTY; 4779 4777 break; 4780 4778 4781 4779 } ··· 5043 5041 /* Set the per device memory buffer space. 5044 5042 * Not applicable in our case */ 5045 5043 case SIOCSIFLINK: 5046 - return -EINVAL; 5044 + return -ENOTTY; 5047 5045 5048 5046 /* 5049 5047 * Unknown or private ioctl. ··· 5064 5062 /* Take care of Wireless Extensions */ 5065 5063 if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST) 5066 5064 return wext_handle_ioctl(net, &ifr, cmd, arg); 5067 - return -EINVAL; 5065 + return -ENOTTY; 5068 5066 } 5069 5067 } 5070 5068 ··· 5186 5184 /* Fix illegal checksum combinations */ 5187 5185 if ((features & NETIF_F_HW_CSUM) && 5188 5186 (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) { 5189 - netdev_info(dev, "mixed HW and IP checksum settings.\n"); 5187 + netdev_warn(dev, "mixed HW and IP checksum settings.\n"); 5190 5188 features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM); 5191 5189 } 5192 5190 5193 5191 if ((features & NETIF_F_NO_CSUM) && 5194 5192 (features & (NETIF_F_HW_CSUM|NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) { 5195 - netdev_info(dev, "mixed no checksumming and other settings.\n"); 5193 + netdev_warn(dev, "mixed no checksumming and other settings.\n"); 5196 5194 features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM|NETIF_F_HW_CSUM); 5197 5195 } 5198 5196 5199 5197 /* Fix illegal SG+CSUM combinations. */ 5200 5198 if ((features & NETIF_F_SG) && 5201 5199 !(features & NETIF_F_ALL_CSUM)) { 5202 - netdev_info(dev, 5203 - "Dropping NETIF_F_SG since no checksum feature.\n"); 5200 + netdev_dbg(dev, 5201 + "Dropping NETIF_F_SG since no checksum feature.\n"); 5204 5202 features &= ~NETIF_F_SG; 5205 5203 } 5206 5204 5207 5205 /* TSO requires that SG is present as well. */ 5208 - if ((features & NETIF_F_TSO) && !(features & NETIF_F_SG)) { 5209 - netdev_info(dev, "Dropping NETIF_F_TSO since no SG feature.\n"); 5210 - features &= ~NETIF_F_TSO; 5206 + if ((features & NETIF_F_ALL_TSO) && !(features & NETIF_F_SG)) { 5207 + netdev_dbg(dev, "Dropping TSO features since no SG feature.\n"); 5208 + features &= ~NETIF_F_ALL_TSO; 5211 5209 } 5210 + 5211 + /* TSO ECN requires that TSO is present as well. */ 5212 + if ((features & NETIF_F_ALL_TSO) == NETIF_F_TSO_ECN) 5213 + features &= ~NETIF_F_TSO_ECN; 5212 5214 5213 5215 /* Software GSO depends on SG. */ 5214 5216 if ((features & NETIF_F_GSO) && !(features & NETIF_F_SG)) { 5215 - netdev_info(dev, "Dropping NETIF_F_GSO since no SG feature.\n"); 5217 + netdev_dbg(dev, "Dropping NETIF_F_GSO since no SG feature.\n"); 5216 5218 features &= ~NETIF_F_GSO; 5217 5219 } 5218 5220 ··· 5226 5220 if (!((features & NETIF_F_GEN_CSUM) || 5227 5221 (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM)) 5228 5222 == (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) { 5229 - netdev_info(dev, 5223 + netdev_dbg(dev, 5230 5224 "Dropping NETIF_F_UFO since no checksum offload features.\n"); 5231 5225 features &= ~NETIF_F_UFO; 5232 5226 } 5233 5227 5234 5228 if (!(features & NETIF_F_SG)) { 5235 - netdev_info(dev, 5229 + netdev_dbg(dev, 5236 5230 "Dropping NETIF_F_UFO since no NETIF_F_SG feature.\n"); 5237 5231 features &= ~NETIF_F_UFO; 5238 5232 } ··· 5413 5407 dev->hw_features |= NETIF_F_SOFT_FEATURES; 5414 5408 dev->features |= NETIF_F_SOFT_FEATURES; 5415 5409 dev->wanted_features = dev->features & dev->hw_features; 5416 - 5417 - /* Avoid warning from netdev_fix_features() for GSO without SG */ 5418 - if (!(dev->wanted_features & NETIF_F_SG)) { 5419 - dev->wanted_features &= ~NETIF_F_GSO; 5420 - dev->features &= ~NETIF_F_GSO; 5421 - } 5422 5410 5423 5411 /* Enable GRO and NETIF_F_HIGHDMA for vlans by default, 5424 5412 * vlan_dev_init() will do the dev->features check, so these features
+2
net/dccp/options.c
··· 123 123 case DCCPO_CHANGE_L ... DCCPO_CONFIRM_R: 124 124 if (pkt_type == DCCP_PKT_DATA) /* RFC 4340, 6 */ 125 125 break; 126 + if (len == 0) 127 + goto out_invalid_option; 126 128 rc = dccp_feat_parse_options(sk, dreq, mandatory, opt, 127 129 *value, value + 1, len - 1); 128 130 if (rc)
+2 -2
net/dsa/Kconfig
··· 41 41 default n 42 42 43 43 config NET_DSA_MV88E6131 44 - bool "Marvell 88E6095/6095F/6131 ethernet switch chip support" 44 + bool "Marvell 88E6085/6095/6095F/6131 ethernet switch chip support" 45 45 select NET_DSA_MV88E6XXX 46 46 select NET_DSA_MV88E6XXX_NEED_PPU 47 47 select NET_DSA_TAG_DSA 48 48 ---help--- 49 - This enables support for the Marvell 88E6095/6095F/6131 49 + This enables support for the Marvell 88E6085/6095/6095F/6131 50 50 ethernet switch chips. 51 51 52 52 config NET_DSA_MV88E6123_61_65
+21 -5
net/dsa/mv88e6131.c
··· 207 207 * mode, but do not enable forwarding of unknown unicasts. 208 208 */ 209 209 val = 0x0433; 210 - if (p == dsa_upstream_port(ds)) 210 + if (p == dsa_upstream_port(ds)) { 211 211 val |= 0x0104; 212 + /* 213 + * On 6085, unknown multicast forward is controlled 214 + * here rather than in Port Control 2 register. 215 + */ 216 + if (ps->id == ID_6085) 217 + val |= 0x0008; 218 + } 212 219 if (ds->dsa_port_mask & (1 << p)) 213 220 val |= 0x0100; 214 221 REG_WRITE(addr, 0x04, val); ··· 258 251 * If this is the upstream port for this switch, enable 259 252 * forwarding of unknown multicast addresses. 260 253 */ 261 - val = 0x0080 | dsa_upstream_port(ds); 262 - if (p == dsa_upstream_port(ds)) 263 - val |= 0x0040; 264 - REG_WRITE(addr, 0x08, val); 254 + if (ps->id == ID_6085) 255 + /* 256 + * on 6085, bits 3:0 are reserved, bit 6 control ARP 257 + * mirroring, and multicast forward is handled in 258 + * Port Control register. 259 + */ 260 + REG_WRITE(addr, 0x08, 0x0080); 261 + else { 262 + val = 0x0080 | dsa_upstream_port(ds); 263 + if (p == dsa_upstream_port(ds)) 264 + val |= 0x0040; 265 + REG_WRITE(addr, 0x08, val); 266 + } 265 267 266 268 /* 267 269 * Rate Control: disable ingress rate limiting.
-2
net/ieee802154/Makefile
··· 1 1 obj-$(CONFIG_IEEE802154) += ieee802154.o af_802154.o 2 2 ieee802154-y := netlink.o nl-mac.o nl-phy.o nl_policy.o wpan-class.o 3 3 af_802154-y := af_ieee802154.o raw.o dgram.o 4 - 5 - ccflags-y += -Wall -DDEBUG
+1 -1
net/ipv4/devinet.c
··· 1680 1680 return; 1681 1681 1682 1682 cnf->sysctl = NULL; 1683 - unregister_sysctl_table(t->sysctl_header); 1683 + unregister_net_sysctl_table(t->sysctl_header); 1684 1684 kfree(t->dev_name); 1685 1685 kfree(t); 1686 1686 }
-3
net/ipv4/fib_trie.c
··· 1978 1978 t = (struct trie *) tb->tb_data; 1979 1979 memset(t, 0, sizeof(*t)); 1980 1980 1981 - if (id == RT_TABLE_LOCAL) 1982 - pr_info("IPv4 FIB: Using LC-trie version %s\n", VERSION); 1983 - 1984 1981 return tb; 1985 1982 } 1986 1983
+2 -3
net/ipv4/inet_connection_sock.c
··· 73 73 !sk2->sk_bound_dev_if || 74 74 sk->sk_bound_dev_if == sk2->sk_bound_dev_if)) { 75 75 if (!reuse || !sk2->sk_reuse || 76 - ((1 << sk2->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))) { 76 + sk2->sk_state == TCP_LISTEN) { 77 77 const __be32 sk2_rcv_saddr = sk_rcv_saddr(sk2); 78 78 if (!sk2_rcv_saddr || !sk_rcv_saddr(sk) || 79 79 sk2_rcv_saddr == sk_rcv_saddr(sk)) ··· 122 122 (tb->num_owners < smallest_size || smallest_size == -1)) { 123 123 smallest_size = tb->num_owners; 124 124 smallest_rover = rover; 125 - if (atomic_read(&hashinfo->bsockets) > (high - low) + 1 && 126 - !inet_csk(sk)->icsk_af_ops->bind_conflict(sk, tb)) { 125 + if (atomic_read(&hashinfo->bsockets) > (high - low) + 1) { 127 126 spin_unlock(&head->lock); 128 127 snum = smallest_rover; 129 128 goto have_snum;
+7 -6
net/ipv4/inetpeer.c
··· 354 354 } 355 355 356 356 /* May be called with local BH enabled. */ 357 - static void unlink_from_pool(struct inet_peer *p, struct inet_peer_base *base) 357 + static void unlink_from_pool(struct inet_peer *p, struct inet_peer_base *base, 358 + struct inet_peer __rcu **stack[PEER_MAXDEPTH]) 358 359 { 359 360 int do_free; 360 361 ··· 369 368 * We use refcnt=-1 to alert lockless readers this entry is deleted. 370 369 */ 371 370 if (atomic_cmpxchg(&p->refcnt, 1, -1) == 1) { 372 - struct inet_peer __rcu **stack[PEER_MAXDEPTH]; 373 371 struct inet_peer __rcu ***stackptr, ***delp; 374 372 if (lookup(&p->daddr, stack, base) != p) 375 373 BUG(); ··· 422 422 } 423 423 424 424 /* May be called with local BH enabled. */ 425 - static int cleanup_once(unsigned long ttl) 425 + static int cleanup_once(unsigned long ttl, struct inet_peer __rcu **stack[PEER_MAXDEPTH]) 426 426 { 427 427 struct inet_peer *p = NULL; 428 428 ··· 454 454 * happen because of entry limits in route cache. */ 455 455 return -1; 456 456 457 - unlink_from_pool(p, peer_to_base(p)); 457 + unlink_from_pool(p, peer_to_base(p), stack); 458 458 return 0; 459 459 } 460 460 ··· 524 524 525 525 if (base->total >= inet_peer_threshold) 526 526 /* Remove one less-recently-used entry. */ 527 - cleanup_once(0); 527 + cleanup_once(0, stack); 528 528 529 529 return p; 530 530 } ··· 540 540 { 541 541 unsigned long now = jiffies; 542 542 int ttl, total; 543 + struct inet_peer __rcu **stack[PEER_MAXDEPTH]; 543 544 544 545 total = compute_total(); 545 546 if (total >= inet_peer_threshold) ··· 549 548 ttl = inet_peer_maxttl 550 549 - (inet_peer_maxttl - inet_peer_minttl) / HZ * 551 550 total / inet_peer_threshold * HZ; 552 - while (!cleanup_once(ttl)) { 551 + while (!cleanup_once(ttl, stack)) { 553 552 if (jiffies != now) 554 553 break; 555 554 }
+15 -16
net/ipv4/ip_fragment.c
··· 223 223 224 224 if ((qp->q.last_in & INET_FRAG_FIRST_IN) && qp->q.fragments != NULL) { 225 225 struct sk_buff *head = qp->q.fragments; 226 + const struct iphdr *iph; 227 + int err; 226 228 227 229 rcu_read_lock(); 228 230 head->dev = dev_get_by_index_rcu(net, qp->iif); 229 231 if (!head->dev) 230 232 goto out_rcu_unlock; 231 233 234 + /* skb dst is stale, drop it, and perform route lookup again */ 235 + skb_dst_drop(head); 236 + iph = ip_hdr(head); 237 + err = ip_route_input_noref(head, iph->daddr, iph->saddr, 238 + iph->tos, head->dev); 239 + if (err) 240 + goto out_rcu_unlock; 241 + 232 242 /* 233 - * Only search router table for the head fragment, 234 - * when defraging timeout at PRE_ROUTING HOOK. 243 + * Only an end host needs to send an ICMP 244 + * "Fragment Reassembly Timeout" message, per RFC792. 235 245 */ 236 - if (qp->user == IP_DEFRAG_CONNTRACK_IN && !skb_dst(head)) { 237 - const struct iphdr *iph = ip_hdr(head); 238 - int err = ip_route_input(head, iph->daddr, iph->saddr, 239 - iph->tos, head->dev); 240 - if (unlikely(err)) 241 - goto out_rcu_unlock; 246 + if (qp->user == IP_DEFRAG_CONNTRACK_IN && 247 + skb_rtable(head)->rt_type != RTN_LOCAL) 248 + goto out_rcu_unlock; 242 249 243 - /* 244 - * Only an end host needs to send an ICMP 245 - * "Fragment Reassembly Timeout" message, per RFC792. 246 - */ 247 - if (skb_rtable(head)->rt_type != RTN_LOCAL) 248 - goto out_rcu_unlock; 249 - 250 - } 251 250 252 251 /* Send an ICMP "Fragment Reassembly Timeout" message. */ 253 252 icmp_send(head, ICMP_TIME_EXCEEDED, ICMP_EXC_FRAGTIME, 0);
+3 -3
net/ipv4/ip_options.c
··· 329 329 pp_ptr = optptr + 2; 330 330 goto error; 331 331 } 332 - if (skb) { 332 + if (rt) { 333 333 memcpy(&optptr[optptr[2]-1], &rt->rt_spec_dst, 4); 334 334 opt->is_changed = 1; 335 335 } ··· 371 371 goto error; 372 372 } 373 373 opt->ts = optptr - iph; 374 - if (skb) { 374 + if (rt) { 375 375 memcpy(&optptr[optptr[2]-1], &rt->rt_spec_dst, 4); 376 376 timeptr = (__be32*)&optptr[optptr[2]+3]; 377 377 } ··· 603 603 unsigned long orefdst; 604 604 int err; 605 605 606 - if (!opt->srr) 606 + if (!opt->srr || !rt) 607 607 return 0; 608 608 609 609 if (skb->pkt_type != PACKET_HOST)
+7
net/ipv4/route.c
··· 2690 2690 { 2691 2691 } 2692 2692 2693 + static u32 *ipv4_rt_blackhole_cow_metrics(struct dst_entry *dst, 2694 + unsigned long old) 2695 + { 2696 + return NULL; 2697 + } 2698 + 2693 2699 static struct dst_ops ipv4_dst_blackhole_ops = { 2694 2700 .family = AF_INET, 2695 2701 .protocol = cpu_to_be16(ETH_P_IP), ··· 2704 2698 .default_mtu = ipv4_blackhole_default_mtu, 2705 2699 .default_advmss = ipv4_default_advmss, 2706 2700 .update_pmtu = ipv4_rt_blackhole_update_pmtu, 2701 + .cow_metrics = ipv4_rt_blackhole_cow_metrics, 2707 2702 }; 2708 2703 2709 2704 struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_orig)
-3
net/ipv4/sysctl_net_ipv4.c
··· 311 311 .mode = 0644, 312 312 .proc_handler = proc_do_large_bitmap, 313 313 }, 314 - #ifdef CONFIG_IP_MULTICAST 315 314 { 316 315 .procname = "igmp_max_memberships", 317 316 .data = &sysctl_igmp_max_memberships, ··· 318 319 .mode = 0644, 319 320 .proc_handler = proc_dointvec 320 321 }, 321 - 322 - #endif 323 322 { 324 323 .procname = "igmp_max_msf", 325 324 .data = &sysctl_igmp_max_msf,
+7 -2
net/ipv4/tcp_cubic.c
··· 93 93 u32 ack_cnt; /* number of acks */ 94 94 u32 tcp_cwnd; /* estimated tcp cwnd */ 95 95 #define ACK_RATIO_SHIFT 4 96 + #define ACK_RATIO_LIMIT (32u << ACK_RATIO_SHIFT) 96 97 u16 delayed_ack; /* estimate the ratio of Packets/ACKs << 4 */ 97 98 u8 sample_cnt; /* number of samples to decide curr_rtt */ 98 99 u8 found; /* the exit point is found? */ ··· 399 398 u32 delay; 400 399 401 400 if (icsk->icsk_ca_state == TCP_CA_Open) { 402 - cnt -= ca->delayed_ack >> ACK_RATIO_SHIFT; 403 - ca->delayed_ack += cnt; 401 + u32 ratio = ca->delayed_ack; 402 + 403 + ratio -= ca->delayed_ack >> ACK_RATIO_SHIFT; 404 + ratio += cnt; 405 + 406 + ca->delayed_ack = min(ratio, ACK_RATIO_LIMIT); 404 407 } 405 408 406 409 /* Some calls are for duplicates without timetamps */
+6 -2
net/ipv4/xfrm4_output.c
··· 69 69 } 70 70 EXPORT_SYMBOL(xfrm4_prepare_output); 71 71 72 - static int xfrm4_output_finish(struct sk_buff *skb) 72 + int xfrm4_output_finish(struct sk_buff *skb) 73 73 { 74 74 #ifdef CONFIG_NETFILTER 75 75 if (!skb_dst(skb)->xfrm) { ··· 86 86 87 87 int xfrm4_output(struct sk_buff *skb) 88 88 { 89 + struct dst_entry *dst = skb_dst(skb); 90 + struct xfrm_state *x = dst->xfrm; 91 + 89 92 return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING, skb, 90 - NULL, skb_dst(skb)->dev, xfrm4_output_finish, 93 + NULL, dst->dev, 94 + x->outer_mode->afinfo->output_finish, 91 95 !(IPCB(skb)->flags & IPSKB_REROUTED)); 92 96 }
+1
net/ipv4/xfrm4_state.c
··· 78 78 .init_tempsel = __xfrm4_init_tempsel, 79 79 .init_temprop = xfrm4_init_temprop, 80 80 .output = xfrm4_output, 81 + .output_finish = xfrm4_output_finish, 81 82 .extract_input = xfrm4_extract_input, 82 83 .extract_output = xfrm4_extract_output, 83 84 .transport_finish = xfrm4_transport_finish,
+1 -1
net/ipv6/addrconf.c
··· 4537 4537 4538 4538 t = p->sysctl; 4539 4539 p->sysctl = NULL; 4540 - unregister_sysctl_table(t->sysctl_header); 4540 + unregister_net_sysctl_table(t->sysctl_header); 4541 4541 kfree(t->dev_name); 4542 4542 kfree(t); 4543 4543 }
+1 -1
net/ipv6/esp6.c
··· 371 371 iv = esp_tmp_iv(aead, tmp, seqhilen); 372 372 req = esp_tmp_req(aead, iv); 373 373 asg = esp_req_sg(aead, req); 374 - sg = asg + 1; 374 + sg = asg + sglists; 375 375 376 376 skb->ip_summed = CHECKSUM_NONE; 377 377
+1 -1
net/ipv6/inet6_connection_sock.c
··· 44 44 !sk2->sk_bound_dev_if || 45 45 sk->sk_bound_dev_if == sk2->sk_bound_dev_if) && 46 46 (!sk->sk_reuse || !sk2->sk_reuse || 47 - ((1 << sk2->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))) && 47 + sk2->sk_state == TCP_LISTEN) && 48 48 ipv6_rcv_saddr_equal(sk, sk2)) 49 49 break; 50 50 }
+3 -1
net/ipv6/netfilter/ip6t_REJECT.c
··· 45 45 int tcphoff, needs_ack; 46 46 const struct ipv6hdr *oip6h = ipv6_hdr(oldskb); 47 47 struct ipv6hdr *ip6h; 48 + #define DEFAULT_TOS_VALUE 0x0U 49 + const __u8 tclass = DEFAULT_TOS_VALUE; 48 50 struct dst_entry *dst = NULL; 49 51 u8 proto; 50 52 struct flowi6 fl6; ··· 126 124 skb_put(nskb, sizeof(struct ipv6hdr)); 127 125 skb_reset_network_header(nskb); 128 126 ip6h = ipv6_hdr(nskb); 129 - ip6h->version = 6; 127 + *(__be32 *)ip6h = htonl(0x60000000 | (tclass << 20)); 130 128 ip6h->hop_limit = ip6_dst_hoplimit(dst); 131 129 ip6h->nexthdr = IPPROTO_TCP; 132 130 ipv6_addr_copy(&ip6h->saddr, &oip6h->daddr);
+7 -1
net/ipv6/route.c
··· 153 153 { 154 154 } 155 155 156 + static u32 *ip6_rt_blackhole_cow_metrics(struct dst_entry *dst, 157 + unsigned long old) 158 + { 159 + return NULL; 160 + } 161 + 156 162 static struct dst_ops ip6_dst_blackhole_ops = { 157 163 .family = AF_INET6, 158 164 .protocol = cpu_to_be16(ETH_P_IPV6), ··· 167 161 .default_mtu = ip6_blackhole_default_mtu, 168 162 .default_advmss = ip6_default_advmss, 169 163 .update_pmtu = ip6_rt_blackhole_update_pmtu, 164 + .cow_metrics = ip6_rt_blackhole_cow_metrics, 170 165 }; 171 166 172 167 static const u32 ip6_template_metrics[RTAX_MAX] = { ··· 2019 2012 rt->dst.output = ip6_output; 2020 2013 rt->rt6i_dev = net->loopback_dev; 2021 2014 rt->rt6i_idev = idev; 2022 - dst_metric_set(&rt->dst, RTAX_HOPLIMIT, -1); 2023 2015 rt->dst.obsolete = -1; 2024 2016 2025 2017 rt->rt6i_flags = RTF_UP | RTF_NONEXTHOP;
+1 -1
net/ipv6/udp.c
··· 1335 1335 skb->ip_summed = CHECKSUM_NONE; 1336 1336 1337 1337 /* Check if there is enough headroom to insert fragment header. */ 1338 - if ((skb_headroom(skb) < frag_hdr_sz) && 1338 + if ((skb_mac_header(skb) < skb->head + frag_hdr_sz) && 1339 1339 pskb_expand_head(skb, frag_hdr_sz, 0, GFP_ATOMIC)) 1340 1340 goto out; 1341 1341
+3 -3
net/ipv6/xfrm6_output.c
··· 79 79 } 80 80 EXPORT_SYMBOL(xfrm6_prepare_output); 81 81 82 - static int xfrm6_output_finish(struct sk_buff *skb) 82 + int xfrm6_output_finish(struct sk_buff *skb) 83 83 { 84 84 #ifdef CONFIG_NETFILTER 85 85 IP6CB(skb)->flags |= IP6SKB_XFRM_TRANSFORMED; ··· 97 97 if ((x && x->props.mode == XFRM_MODE_TUNNEL) && 98 98 ((skb->len > ip6_skb_dst_mtu(skb) && !skb_is_gso(skb)) || 99 99 dst_allfrag(skb_dst(skb)))) { 100 - return ip6_fragment(skb, xfrm6_output_finish); 100 + return ip6_fragment(skb, x->outer_mode->afinfo->output_finish); 101 101 } 102 - return xfrm6_output_finish(skb); 102 + return x->outer_mode->afinfo->output_finish(skb); 103 103 } 104 104 105 105 int xfrm6_output(struct sk_buff *skb)
+1
net/ipv6/xfrm6_state.c
··· 178 178 .tmpl_sort = __xfrm6_tmpl_sort, 179 179 .state_sort = __xfrm6_state_sort, 180 180 .output = xfrm6_output, 181 + .output_finish = xfrm6_output_finish, 181 182 .extract_input = xfrm6_extract_input, 182 183 .extract_output = xfrm6_extract_output, 183 184 .transport_finish = xfrm6_transport_finish,
+1 -2
net/irda/af_irda.c
··· 1297 1297 /* Note : socket.c set MSG_EOR on SEQPACKET sockets */ 1298 1298 if (msg->msg_flags & ~(MSG_DONTWAIT | MSG_EOR | MSG_CMSG_COMPAT | 1299 1299 MSG_NOSIGNAL)) { 1300 - err = -EINVAL; 1301 - goto out; 1300 + return -EINVAL; 1302 1301 } 1303 1302 1304 1303 lock_sock(sk);
+1 -1
net/l2tp/l2tp_ip.c
··· 667 667 MODULE_DESCRIPTION("L2TP over IP"); 668 668 MODULE_VERSION("1.0"); 669 669 670 - /* Use the value of SOCK_DGRAM (2) directory, because __stringify does't like 670 + /* Use the value of SOCK_DGRAM (2) directory, because __stringify doesn't like 671 671 * enums 672 672 */ 673 673 MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_INET, 2, IPPROTO_L2TP);
+1 -2
net/llc/llc_input.c
··· 121 121 s32 data_size = ntohs(pdulen) - llc_len; 122 122 123 123 if (data_size < 0 || 124 - ((skb_tail_pointer(skb) - 125 - (u8 *)pdu) - llc_len) < data_size) 124 + !pskb_may_pull(skb, data_size)) 126 125 return 0; 127 126 if (unlikely(pskb_trim_rcsum(skb, data_size))) 128 127 return 0;
+2
net/mac80211/cfg.c
··· 1504 1504 enum ieee80211_smps_mode old_req; 1505 1505 int err; 1506 1506 1507 + lockdep_assert_held(&sdata->u.mgd.mtx); 1508 + 1507 1509 old_req = sdata->u.mgd.req_smps; 1508 1510 sdata->u.mgd.req_smps = smps_mode; 1509 1511
+2 -2
net/mac80211/debugfs_netdev.c
··· 177 177 if (sdata->vif.type != NL80211_IFTYPE_STATION) 178 178 return -EOPNOTSUPP; 179 179 180 - mutex_lock(&local->iflist_mtx); 180 + mutex_lock(&sdata->u.mgd.mtx); 181 181 err = __ieee80211_request_smps(sdata, smps_mode); 182 - mutex_unlock(&local->iflist_mtx); 182 + mutex_unlock(&sdata->u.mgd.mtx); 183 183 184 184 return err; 185 185 }
+4
net/mac80211/tx.c
··· 237 237 &local->dynamic_ps_disable_work); 238 238 } 239 239 240 + /* Don't restart the timer if we're not disassociated */ 241 + if (!ifmgd->associated) 242 + return TX_CONTINUE; 243 + 240 244 mod_timer(&local->dynamic_ps_timer, jiffies + 241 245 msecs_to_jiffies(local->hw.conf.dynamic_ps_timeout)); 242 246
+4
net/netfilter/ipset/ip_set_bitmap_ipmac.c
··· 343 343 ipset_adtfn adtfn = set->variant->adt[adt]; 344 344 struct ipmac data; 345 345 346 + /* MAC can be src only */ 347 + if (!(flags & IPSET_DIM_TWO_SRC)) 348 + return 0; 349 + 346 350 data.id = ntohl(ip4addr(skb, flags & IPSET_DIM_ONE_SRC)); 347 351 if (data.id < map->first_ip || data.id > map->last_ip) 348 352 return -IPSET_ERR_BITMAP_RANGE;
+10 -8
net/netfilter/ipset/ip_set_core.c
··· 1022 1022 if (cb->args[1] >= ip_set_max) 1023 1023 goto out; 1024 1024 1025 - pr_debug("args[0]: %ld args[1]: %ld\n", cb->args[0], cb->args[1]); 1026 1025 max = cb->args[0] == DUMP_ONE ? cb->args[1] + 1 : ip_set_max; 1026 + dump_last: 1027 + pr_debug("args[0]: %ld args[1]: %ld\n", cb->args[0], cb->args[1]); 1027 1028 for (; cb->args[1] < max; cb->args[1]++) { 1028 1029 index = (ip_set_id_t) cb->args[1]; 1029 1030 set = ip_set_list[index]; ··· 1039 1038 * so that lists (unions of sets) are dumped last. 1040 1039 */ 1041 1040 if (cb->args[0] != DUMP_ONE && 1042 - !((cb->args[0] == DUMP_ALL) ^ 1043 - (set->type->features & IPSET_DUMP_LAST))) 1041 + ((cb->args[0] == DUMP_ALL) == 1042 + !!(set->type->features & IPSET_DUMP_LAST))) 1044 1043 continue; 1045 1044 pr_debug("List set: %s\n", set->name); 1046 1045 if (!cb->args[2]) { ··· 1084 1083 goto release_refcount; 1085 1084 } 1086 1085 } 1086 + /* If we dump all sets, continue with dumping last ones */ 1087 + if (cb->args[0] == DUMP_ALL) { 1088 + cb->args[0] = DUMP_LAST; 1089 + cb->args[1] = 0; 1090 + goto dump_last; 1091 + } 1087 1092 goto out; 1088 1093 1089 1094 nla_put_failure: ··· 1100 1093 pr_debug("release set %s\n", ip_set_list[index]->name); 1101 1094 ip_set_put_byindex(index); 1102 1095 } 1103 - 1104 - /* If we dump all sets, continue with dumping last ones */ 1105 - if (cb->args[0] == DUMP_ALL && cb->args[1] >= max && !cb->args[2]) 1106 - cb->args[0] = DUMP_LAST; 1107 - 1108 1096 out: 1109 1097 if (nlh) { 1110 1098 nlmsg_end(skb, nlh);
+4 -13
net/netfilter/ipvs/ip_vs_app.c
··· 572 572 .open = ip_vs_app_open, 573 573 .read = seq_read, 574 574 .llseek = seq_lseek, 575 - .release = seq_release, 575 + .release = seq_release_net, 576 576 }; 577 577 #endif 578 578 579 - static int __net_init __ip_vs_app_init(struct net *net) 579 + int __net_init __ip_vs_app_init(struct net *net) 580 580 { 581 581 struct netns_ipvs *ipvs = net_ipvs(net); 582 582 ··· 585 585 return 0; 586 586 } 587 587 588 - static void __net_exit __ip_vs_app_cleanup(struct net *net) 588 + void __net_exit __ip_vs_app_cleanup(struct net *net) 589 589 { 590 590 proc_net_remove(net, "ip_vs_app"); 591 591 } 592 592 593 - static struct pernet_operations ip_vs_app_ops = { 594 - .init = __ip_vs_app_init, 595 - .exit = __ip_vs_app_cleanup, 596 - }; 597 - 598 593 int __init ip_vs_app_init(void) 599 594 { 600 - int rv; 601 - 602 - rv = register_pernet_subsys(&ip_vs_app_ops); 603 - return rv; 595 + return 0; 604 596 } 605 597 606 598 607 599 void ip_vs_app_cleanup(void) 608 600 { 609 - unregister_pernet_subsys(&ip_vs_app_ops); 610 601 }
+4 -12
net/netfilter/ipvs/ip_vs_conn.c
··· 1046 1046 .open = ip_vs_conn_open, 1047 1047 .read = seq_read, 1048 1048 .llseek = seq_lseek, 1049 - .release = seq_release, 1049 + .release = seq_release_net, 1050 1050 }; 1051 1051 1052 1052 static const char *ip_vs_origin_name(unsigned flags) ··· 1114 1114 .open = ip_vs_conn_sync_open, 1115 1115 .read = seq_read, 1116 1116 .llseek = seq_lseek, 1117 - .release = seq_release, 1117 + .release = seq_release_net, 1118 1118 }; 1119 1119 1120 1120 #endif ··· 1258 1258 return 0; 1259 1259 } 1260 1260 1261 - static void __net_exit __ip_vs_conn_cleanup(struct net *net) 1261 + void __net_exit __ip_vs_conn_cleanup(struct net *net) 1262 1262 { 1263 1263 /* flush all the connection entries first */ 1264 1264 ip_vs_conn_flush(net); 1265 1265 proc_net_remove(net, "ip_vs_conn"); 1266 1266 proc_net_remove(net, "ip_vs_conn_sync"); 1267 1267 } 1268 - static struct pernet_operations ipvs_conn_ops = { 1269 - .init = __ip_vs_conn_init, 1270 - .exit = __ip_vs_conn_cleanup, 1271 - }; 1272 1268 1273 1269 int __init ip_vs_conn_init(void) 1274 1270 { 1275 1271 int idx; 1276 - int retc; 1277 1272 1278 1273 /* Compute size and mask */ 1279 1274 ip_vs_conn_tab_size = 1 << ip_vs_conn_tab_bits; ··· 1304 1309 rwlock_init(&__ip_vs_conntbl_lock_array[idx].l); 1305 1310 } 1306 1311 1307 - retc = register_pernet_subsys(&ipvs_conn_ops); 1308 - 1309 1312 /* calculate the random value for connection hash */ 1310 1313 get_random_bytes(&ip_vs_conn_rnd, sizeof(ip_vs_conn_rnd)); 1311 1314 1312 - return retc; 1315 + return 0; 1313 1316 } 1314 1317 1315 1318 void ip_vs_conn_cleanup(void) 1316 1319 { 1317 - unregister_pernet_subsys(&ipvs_conn_ops); 1318 1320 /* Release the empty cache */ 1319 1321 kmem_cache_destroy(ip_vs_conn_cachep); 1320 1322 vfree(ip_vs_conn_tab);
+93 -10
net/netfilter/ipvs/ip_vs_core.c
··· 1113 1113 return NF_ACCEPT; 1114 1114 1115 1115 net = skb_net(skb); 1116 + if (!net_ipvs(net)->enable) 1117 + return NF_ACCEPT; 1118 + 1116 1119 ip_vs_fill_iphdr(af, skb_network_header(skb), &iph); 1117 1120 #ifdef CONFIG_IP_VS_IPV6 1118 1121 if (af == AF_INET6) { ··· 1346 1343 return NF_ACCEPT; /* The packet looks wrong, ignore */ 1347 1344 1348 1345 net = skb_net(skb); 1346 + 1349 1347 pd = ip_vs_proto_data_get(net, cih->protocol); 1350 1348 if (!pd) 1351 1349 return NF_ACCEPT; ··· 1533 1529 IP_VS_DBG_ADDR(af, &iph.daddr), hooknum); 1534 1530 return NF_ACCEPT; 1535 1531 } 1532 + /* ipvs enabled in this netns ? */ 1533 + net = skb_net(skb); 1534 + if (!net_ipvs(net)->enable) 1535 + return NF_ACCEPT; 1536 + 1536 1537 ip_vs_fill_iphdr(af, skb_network_header(skb), &iph); 1537 1538 1538 1539 /* Bad... Do not break raw sockets */ ··· 1571 1562 ip_vs_fill_iphdr(af, skb_network_header(skb), &iph); 1572 1563 } 1573 1564 1574 - net = skb_net(skb); 1575 1565 /* Protocol supported? */ 1576 1566 pd = ip_vs_proto_data_get(net, iph.protocol); 1577 1567 if (unlikely(!pd)) ··· 1596 1588 } 1597 1589 1598 1590 IP_VS_DBG_PKT(11, af, pp, skb, 0, "Incoming packet"); 1599 - net = skb_net(skb); 1600 1591 ipvs = net_ipvs(net); 1601 1592 /* Check the server status */ 1602 1593 if (cp->dest && !(cp->dest->flags & IP_VS_DEST_F_AVAILABLE)) { ··· 1750 1743 int (*okfn)(struct sk_buff *)) 1751 1744 { 1752 1745 int r; 1746 + struct net *net; 1753 1747 1754 1748 if (ip_hdr(skb)->protocol != IPPROTO_ICMP) 1749 + return NF_ACCEPT; 1750 + 1751 + /* ipvs enabled in this netns ? */ 1752 + net = skb_net(skb); 1753 + if (!net_ipvs(net)->enable) 1755 1754 return NF_ACCEPT; 1756 1755 1757 1756 return ip_vs_in_icmp(skb, &r, hooknum); ··· 1770 1757 int (*okfn)(struct sk_buff *)) 1771 1758 { 1772 1759 int r; 1760 + struct net *net; 1773 1761 1774 1762 if (ipv6_hdr(skb)->nexthdr != IPPROTO_ICMPV6) 1763 + return NF_ACCEPT; 1764 + 1765 + /* ipvs enabled in this netns ? */ 1766 + net = skb_net(skb); 1767 + if (!net_ipvs(net)->enable) 1775 1768 return NF_ACCEPT; 1776 1769 1777 1770 return ip_vs_in_icmp_v6(skb, &r, hooknum); ··· 1903 1884 pr_err("%s(): no memory.\n", __func__); 1904 1885 return -ENOMEM; 1905 1886 } 1887 + /* Hold the beast until a service is registerd */ 1888 + ipvs->enable = 0; 1906 1889 ipvs->net = net; 1907 1890 /* Counters used for creating unique names */ 1908 1891 ipvs->gen = atomic_read(&ipvs_netns_cnt); 1909 1892 atomic_inc(&ipvs_netns_cnt); 1910 1893 net->ipvs = ipvs; 1894 + 1895 + if (__ip_vs_estimator_init(net) < 0) 1896 + goto estimator_fail; 1897 + 1898 + if (__ip_vs_control_init(net) < 0) 1899 + goto control_fail; 1900 + 1901 + if (__ip_vs_protocol_init(net) < 0) 1902 + goto protocol_fail; 1903 + 1904 + if (__ip_vs_app_init(net) < 0) 1905 + goto app_fail; 1906 + 1907 + if (__ip_vs_conn_init(net) < 0) 1908 + goto conn_fail; 1909 + 1910 + if (__ip_vs_sync_init(net) < 0) 1911 + goto sync_fail; 1912 + 1911 1913 printk(KERN_INFO "IPVS: Creating netns size=%zu id=%d\n", 1912 1914 sizeof(struct netns_ipvs), ipvs->gen); 1913 1915 return 0; 1916 + /* 1917 + * Error handling 1918 + */ 1919 + 1920 + sync_fail: 1921 + __ip_vs_conn_cleanup(net); 1922 + conn_fail: 1923 + __ip_vs_app_cleanup(net); 1924 + app_fail: 1925 + __ip_vs_protocol_cleanup(net); 1926 + protocol_fail: 1927 + __ip_vs_control_cleanup(net); 1928 + control_fail: 1929 + __ip_vs_estimator_cleanup(net); 1930 + estimator_fail: 1931 + return -ENOMEM; 1914 1932 } 1915 1933 1916 1934 static void __net_exit __ip_vs_cleanup(struct net *net) 1917 1935 { 1918 - IP_VS_DBG(10, "ipvs netns %d released\n", net_ipvs(net)->gen); 1936 + __ip_vs_service_cleanup(net); /* ip_vs_flush() with locks */ 1937 + __ip_vs_conn_cleanup(net); 1938 + __ip_vs_app_cleanup(net); 1939 + __ip_vs_protocol_cleanup(net); 1940 + __ip_vs_control_cleanup(net); 1941 + __ip_vs_estimator_cleanup(net); 1942 + IP_VS_DBG(2, "ipvs netns %d released\n", net_ipvs(net)->gen); 1943 + } 1944 + 1945 + static void __net_exit __ip_vs_dev_cleanup(struct net *net) 1946 + { 1947 + EnterFunction(2); 1948 + net_ipvs(net)->enable = 0; /* Disable packet reception */ 1949 + __ip_vs_sync_cleanup(net); 1950 + LeaveFunction(2); 1919 1951 } 1920 1952 1921 1953 static struct pernet_operations ipvs_core_ops = { ··· 1976 1906 .size = sizeof(struct netns_ipvs), 1977 1907 }; 1978 1908 1909 + static struct pernet_operations ipvs_core_dev_ops = { 1910 + .exit = __ip_vs_dev_cleanup, 1911 + }; 1912 + 1979 1913 /* 1980 1914 * Initialize IP Virtual Server 1981 1915 */ 1982 1916 static int __init ip_vs_init(void) 1983 1917 { 1984 1918 int ret; 1985 - 1986 - ret = register_pernet_subsys(&ipvs_core_ops); /* Alloc ip_vs struct */ 1987 - if (ret < 0) 1988 - return ret; 1989 1919 1990 1920 ip_vs_estimator_init(); 1991 1921 ret = ip_vs_control_init(); ··· 2014 1944 goto cleanup_conn; 2015 1945 } 2016 1946 1947 + ret = register_pernet_subsys(&ipvs_core_ops); /* Alloc ip_vs struct */ 1948 + if (ret < 0) 1949 + goto cleanup_sync; 1950 + 1951 + ret = register_pernet_device(&ipvs_core_dev_ops); 1952 + if (ret < 0) 1953 + goto cleanup_sub; 1954 + 2017 1955 ret = nf_register_hooks(ip_vs_ops, ARRAY_SIZE(ip_vs_ops)); 2018 1956 if (ret < 0) { 2019 1957 pr_err("can't register hooks.\n"); 2020 - goto cleanup_sync; 1958 + goto cleanup_dev; 2021 1959 } 2022 1960 2023 1961 pr_info("ipvs loaded.\n"); 1962 + 2024 1963 return ret; 2025 1964 1965 + cleanup_dev: 1966 + unregister_pernet_device(&ipvs_core_dev_ops); 1967 + cleanup_sub: 1968 + unregister_pernet_subsys(&ipvs_core_ops); 2026 1969 cleanup_sync: 2027 1970 ip_vs_sync_cleanup(); 2028 1971 cleanup_conn: ··· 2047 1964 ip_vs_control_cleanup(); 2048 1965 cleanup_estimator: 2049 1966 ip_vs_estimator_cleanup(); 2050 - unregister_pernet_subsys(&ipvs_core_ops); /* free ip_vs struct */ 2051 1967 return ret; 2052 1968 } 2053 1969 2054 1970 static void __exit ip_vs_cleanup(void) 2055 1971 { 2056 1972 nf_unregister_hooks(ip_vs_ops, ARRAY_SIZE(ip_vs_ops)); 1973 + unregister_pernet_device(&ipvs_core_dev_ops); 1974 + unregister_pernet_subsys(&ipvs_core_ops); /* free ip_vs struct */ 2057 1975 ip_vs_sync_cleanup(); 2058 1976 ip_vs_conn_cleanup(); 2059 1977 ip_vs_app_cleanup(); 2060 1978 ip_vs_protocol_cleanup(); 2061 1979 ip_vs_control_cleanup(); 2062 1980 ip_vs_estimator_cleanup(); 2063 - unregister_pernet_subsys(&ipvs_core_ops); /* free ip_vs struct */ 2064 1981 pr_info("ipvs unloaded.\n"); 2065 1982 } 2066 1983
+104 -22
net/netfilter/ipvs/ip_vs_ctl.c
··· 69 69 } 70 70 #endif 71 71 72 + 73 + /* Protos */ 74 + static void __ip_vs_del_service(struct ip_vs_service *svc); 75 + 76 + 72 77 #ifdef CONFIG_IP_VS_IPV6 73 78 /* Taken from rt6_fill_node() in net/ipv6/route.c, is there a better way? */ 74 79 static int __ip_vs_addr_is_local_v6(struct net *net, ··· 1219 1214 write_unlock_bh(&__ip_vs_svc_lock); 1220 1215 1221 1216 *svc_p = svc; 1217 + /* Now there is a service - full throttle */ 1218 + ipvs->enable = 1; 1222 1219 return 0; 1223 1220 1224 1221 ··· 1479 1472 return 0; 1480 1473 } 1481 1474 1475 + /* 1476 + * Delete service by {netns} in the service table. 1477 + * Called by __ip_vs_cleanup() 1478 + */ 1479 + void __ip_vs_service_cleanup(struct net *net) 1480 + { 1481 + EnterFunction(2); 1482 + /* Check for "full" addressed entries */ 1483 + mutex_lock(&__ip_vs_mutex); 1484 + ip_vs_flush(net); 1485 + mutex_unlock(&__ip_vs_mutex); 1486 + LeaveFunction(2); 1487 + } 1488 + /* 1489 + * Release dst hold by dst_cache 1490 + */ 1491 + static inline void 1492 + __ip_vs_dev_reset(struct ip_vs_dest *dest, struct net_device *dev) 1493 + { 1494 + spin_lock_bh(&dest->dst_lock); 1495 + if (dest->dst_cache && dest->dst_cache->dev == dev) { 1496 + IP_VS_DBG_BUF(3, "Reset dev:%s dest %s:%u ,dest->refcnt=%d\n", 1497 + dev->name, 1498 + IP_VS_DBG_ADDR(dest->af, &dest->addr), 1499 + ntohs(dest->port), 1500 + atomic_read(&dest->refcnt)); 1501 + ip_vs_dst_reset(dest); 1502 + } 1503 + spin_unlock_bh(&dest->dst_lock); 1504 + 1505 + } 1506 + /* 1507 + * Netdev event receiver 1508 + * Currently only NETDEV_UNREGISTER is handled, i.e. if we hold a reference to 1509 + * a device that is "unregister" it must be released. 1510 + */ 1511 + static int ip_vs_dst_event(struct notifier_block *this, unsigned long event, 1512 + void *ptr) 1513 + { 1514 + struct net_device *dev = ptr; 1515 + struct net *net = dev_net(dev); 1516 + struct ip_vs_service *svc; 1517 + struct ip_vs_dest *dest; 1518 + unsigned int idx; 1519 + 1520 + if (event != NETDEV_UNREGISTER) 1521 + return NOTIFY_DONE; 1522 + IP_VS_DBG(3, "%s() dev=%s\n", __func__, dev->name); 1523 + EnterFunction(2); 1524 + mutex_lock(&__ip_vs_mutex); 1525 + for (idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) { 1526 + list_for_each_entry(svc, &ip_vs_svc_table[idx], s_list) { 1527 + if (net_eq(svc->net, net)) { 1528 + list_for_each_entry(dest, &svc->destinations, 1529 + n_list) { 1530 + __ip_vs_dev_reset(dest, dev); 1531 + } 1532 + } 1533 + } 1534 + 1535 + list_for_each_entry(svc, &ip_vs_svc_fwm_table[idx], f_list) { 1536 + if (net_eq(svc->net, net)) { 1537 + list_for_each_entry(dest, &svc->destinations, 1538 + n_list) { 1539 + __ip_vs_dev_reset(dest, dev); 1540 + } 1541 + } 1542 + 1543 + } 1544 + } 1545 + 1546 + list_for_each_entry(dest, &net_ipvs(net)->dest_trash, n_list) { 1547 + __ip_vs_dev_reset(dest, dev); 1548 + } 1549 + mutex_unlock(&__ip_vs_mutex); 1550 + LeaveFunction(2); 1551 + return NOTIFY_DONE; 1552 + } 1482 1553 1483 1554 /* 1484 1555 * Zero counters in a service or all services ··· 2066 1981 .open = ip_vs_info_open, 2067 1982 .read = seq_read, 2068 1983 .llseek = seq_lseek, 2069 - .release = seq_release_private, 1984 + .release = seq_release_net, 2070 1985 }; 2071 1986 2072 1987 #endif ··· 2109 2024 .open = ip_vs_stats_seq_open, 2110 2025 .read = seq_read, 2111 2026 .llseek = seq_lseek, 2112 - .release = single_release, 2027 + .release = single_release_net, 2113 2028 }; 2114 2029 2115 2030 static int ip_vs_stats_percpu_show(struct seq_file *seq, void *v) ··· 2178 2093 .open = ip_vs_stats_percpu_seq_open, 2179 2094 .read = seq_read, 2180 2095 .llseek = seq_lseek, 2181 - .release = single_release, 2096 + .release = single_release_net, 2182 2097 }; 2183 2098 #endif 2184 2099 ··· 3673 3588 3674 3589 #endif 3675 3590 3591 + static struct notifier_block ip_vs_dst_notifier = { 3592 + .notifier_call = ip_vs_dst_event, 3593 + }; 3594 + 3676 3595 int __net_init __ip_vs_control_init(struct net *net) 3677 3596 { 3678 3597 int idx; ··· 3715 3626 return -ENOMEM; 3716 3627 } 3717 3628 3718 - static void __net_exit __ip_vs_control_cleanup(struct net *net) 3629 + void __net_exit __ip_vs_control_cleanup(struct net *net) 3719 3630 { 3720 3631 struct netns_ipvs *ipvs = net_ipvs(net); 3721 3632 ··· 3727 3638 proc_net_remove(net, "ip_vs"); 3728 3639 free_percpu(ipvs->tot_stats.cpustats); 3729 3640 } 3730 - 3731 - static struct pernet_operations ipvs_control_ops = { 3732 - .init = __ip_vs_control_init, 3733 - .exit = __ip_vs_control_cleanup, 3734 - }; 3735 3641 3736 3642 int __init ip_vs_control_init(void) 3737 3643 { ··· 3741 3657 INIT_LIST_HEAD(&ip_vs_svc_fwm_table[idx]); 3742 3658 } 3743 3659 3744 - ret = register_pernet_subsys(&ipvs_control_ops); 3745 - if (ret) { 3746 - pr_err("cannot register namespace.\n"); 3747 - goto err; 3748 - } 3749 - 3750 3660 smp_wmb(); /* Do we really need it now ? */ 3751 3661 3752 3662 ret = nf_register_sockopt(&ip_vs_sockopts); 3753 3663 if (ret) { 3754 3664 pr_err("cannot register sockopt.\n"); 3755 - goto err_net; 3665 + goto err_sock; 3756 3666 } 3757 3667 3758 3668 ret = ip_vs_genl_register(); 3759 3669 if (ret) { 3760 3670 pr_err("cannot register Generic Netlink interface.\n"); 3761 - nf_unregister_sockopt(&ip_vs_sockopts); 3762 - goto err_net; 3671 + goto err_genl; 3763 3672 } 3673 + 3674 + ret = register_netdevice_notifier(&ip_vs_dst_notifier); 3675 + if (ret < 0) 3676 + goto err_notf; 3764 3677 3765 3678 LeaveFunction(2); 3766 3679 return 0; 3767 3680 3768 - err_net: 3769 - unregister_pernet_subsys(&ipvs_control_ops); 3770 - err: 3681 + err_notf: 3682 + ip_vs_genl_unregister(); 3683 + err_genl: 3684 + nf_unregister_sockopt(&ip_vs_sockopts); 3685 + err_sock: 3771 3686 return ret; 3772 3687 } 3773 3688 ··· 3774 3691 void ip_vs_control_cleanup(void) 3775 3692 { 3776 3693 EnterFunction(2); 3777 - unregister_pernet_subsys(&ipvs_control_ops); 3778 3694 ip_vs_genl_unregister(); 3779 3695 nf_unregister_sockopt(&ip_vs_sockopts); 3780 3696 LeaveFunction(2);
+3 -11
net/netfilter/ipvs/ip_vs_est.c
··· 192 192 dst->outbps = (e->outbps + 0xF) >> 5; 193 193 } 194 194 195 - static int __net_init __ip_vs_estimator_init(struct net *net) 195 + int __net_init __ip_vs_estimator_init(struct net *net) 196 196 { 197 197 struct netns_ipvs *ipvs = net_ipvs(net); 198 198 ··· 203 203 return 0; 204 204 } 205 205 206 - static void __net_exit __ip_vs_estimator_exit(struct net *net) 206 + void __net_exit __ip_vs_estimator_cleanup(struct net *net) 207 207 { 208 208 del_timer_sync(&net_ipvs(net)->est_timer); 209 209 } 210 - static struct pernet_operations ip_vs_app_ops = { 211 - .init = __ip_vs_estimator_init, 212 - .exit = __ip_vs_estimator_exit, 213 - }; 214 210 215 211 int __init ip_vs_estimator_init(void) 216 212 { 217 - int rv; 218 - 219 - rv = register_pernet_subsys(&ip_vs_app_ops); 220 - return rv; 213 + return 0; 221 214 } 222 215 223 216 void ip_vs_estimator_cleanup(void) 224 217 { 225 - unregister_pernet_subsys(&ip_vs_app_ops); 226 218 }
+2 -9
net/netfilter/ipvs/ip_vs_proto.c
··· 316 316 /* 317 317 * per network name-space init 318 318 */ 319 - static int __net_init __ip_vs_protocol_init(struct net *net) 319 + int __net_init __ip_vs_protocol_init(struct net *net) 320 320 { 321 321 #ifdef CONFIG_IP_VS_PROTO_TCP 322 322 register_ip_vs_proto_netns(net, &ip_vs_protocol_tcp); ··· 336 336 return 0; 337 337 } 338 338 339 - static void __net_exit __ip_vs_protocol_cleanup(struct net *net) 339 + void __net_exit __ip_vs_protocol_cleanup(struct net *net) 340 340 { 341 341 struct netns_ipvs *ipvs = net_ipvs(net); 342 342 struct ip_vs_proto_data *pd; ··· 348 348 unregister_ip_vs_proto_netns(net, pd); 349 349 } 350 350 } 351 - 352 - static struct pernet_operations ipvs_proto_ops = { 353 - .init = __ip_vs_protocol_init, 354 - .exit = __ip_vs_protocol_cleanup, 355 - }; 356 351 357 352 int __init ip_vs_protocol_init(void) 358 353 { ··· 377 382 REGISTER_PROTOCOL(&ip_vs_protocol_esp); 378 383 #endif 379 384 pr_info("Registered protocols (%s)\n", &protocols[2]); 380 - return register_pernet_subsys(&ipvs_proto_ops); 381 385 382 386 return 0; 383 387 } ··· 387 393 struct ip_vs_protocol *pp; 388 394 int i; 389 395 390 - unregister_pernet_subsys(&ipvs_proto_ops); 391 396 /* unregister all the ipvs protocols */ 392 397 for (i = 0; i < IP_VS_PROTO_TAB_SIZE; i++) { 393 398 while ((pp = ip_vs_proto_table[i]) != NULL)
+38 -29
net/netfilter/ipvs/ip_vs_sync.c
··· 1303 1303 struct socket *sock; 1304 1304 int result; 1305 1305 1306 - /* First create a socket */ 1307 - result = __sock_create(net, PF_INET, SOCK_DGRAM, IPPROTO_UDP, &sock, 1); 1306 + /* First create a socket move it to right name space later */ 1307 + result = sock_create_kern(PF_INET, SOCK_DGRAM, IPPROTO_UDP, &sock); 1308 1308 if (result < 0) { 1309 1309 pr_err("Error during creation of socket; terminating\n"); 1310 1310 return ERR_PTR(result); 1311 1311 } 1312 - 1312 + /* 1313 + * Kernel sockets that are a part of a namespace, should not 1314 + * hold a reference to a namespace in order to allow to stop it. 1315 + * After sk_change_net should be released using sk_release_kernel. 1316 + */ 1317 + sk_change_net(sock->sk, net); 1313 1318 result = set_mcast_if(sock->sk, ipvs->master_mcast_ifn); 1314 1319 if (result < 0) { 1315 1320 pr_err("Error setting outbound mcast interface\n"); ··· 1339 1334 1340 1335 return sock; 1341 1336 1342 - error: 1343 - sock_release(sock); 1337 + error: 1338 + sk_release_kernel(sock->sk); 1344 1339 return ERR_PTR(result); 1345 1340 } 1346 1341 ··· 1355 1350 int result; 1356 1351 1357 1352 /* First create a socket */ 1358 - result = __sock_create(net, PF_INET, SOCK_DGRAM, IPPROTO_UDP, &sock, 1); 1353 + result = sock_create_kern(PF_INET, SOCK_DGRAM, IPPROTO_UDP, &sock); 1359 1354 if (result < 0) { 1360 1355 pr_err("Error during creation of socket; terminating\n"); 1361 1356 return ERR_PTR(result); 1362 1357 } 1363 - 1358 + /* 1359 + * Kernel sockets that are a part of a namespace, should not 1360 + * hold a reference to a namespace in order to allow to stop it. 1361 + * After sk_change_net should be released using sk_release_kernel. 1362 + */ 1363 + sk_change_net(sock->sk, net); 1364 1364 /* it is equivalent to the REUSEADDR option in user-space */ 1365 1365 sock->sk->sk_reuse = 1; 1366 1366 ··· 1387 1377 1388 1378 return sock; 1389 1379 1390 - error: 1391 - sock_release(sock); 1380 + error: 1381 + sk_release_kernel(sock->sk); 1392 1382 return ERR_PTR(result); 1393 1383 } 1394 1384 ··· 1483 1473 ip_vs_sync_buff_release(sb); 1484 1474 1485 1475 /* release the sending multicast socket */ 1486 - sock_release(tinfo->sock); 1476 + sk_release_kernel(tinfo->sock->sk); 1487 1477 kfree(tinfo); 1488 1478 1489 1479 return 0; ··· 1523 1513 } 1524 1514 1525 1515 /* release the sending multicast socket */ 1526 - sock_release(tinfo->sock); 1516 + sk_release_kernel(tinfo->sock->sk); 1527 1517 kfree(tinfo->buf); 1528 1518 kfree(tinfo); 1529 1519 ··· 1611 1601 outbuf: 1612 1602 kfree(buf); 1613 1603 outsocket: 1614 - sock_release(sock); 1604 + sk_release_kernel(sock->sk); 1615 1605 out: 1616 1606 return result; 1617 1607 } ··· 1620 1610 int stop_sync_thread(struct net *net, int state) 1621 1611 { 1622 1612 struct netns_ipvs *ipvs = net_ipvs(net); 1613 + int retc = -EINVAL; 1623 1614 1624 1615 IP_VS_DBG(7, "%s(): pid %d\n", __func__, task_pid_nr(current)); 1625 1616 ··· 1640 1629 spin_lock_bh(&ipvs->sync_lock); 1641 1630 ipvs->sync_state &= ~IP_VS_STATE_MASTER; 1642 1631 spin_unlock_bh(&ipvs->sync_lock); 1643 - kthread_stop(ipvs->master_thread); 1632 + retc = kthread_stop(ipvs->master_thread); 1644 1633 ipvs->master_thread = NULL; 1645 1634 } else if (state == IP_VS_STATE_BACKUP) { 1646 1635 if (!ipvs->backup_thread) ··· 1650 1639 task_pid_nr(ipvs->backup_thread)); 1651 1640 1652 1641 ipvs->sync_state &= ~IP_VS_STATE_BACKUP; 1653 - kthread_stop(ipvs->backup_thread); 1642 + retc = kthread_stop(ipvs->backup_thread); 1654 1643 ipvs->backup_thread = NULL; 1655 - } else { 1656 - return -EINVAL; 1657 1644 } 1658 1645 1659 1646 /* decrease the module use count */ 1660 1647 ip_vs_use_count_dec(); 1661 1648 1662 - return 0; 1649 + return retc; 1663 1650 } 1664 1651 1665 1652 /* 1666 1653 * Initialize data struct for each netns 1667 1654 */ 1668 - static int __net_init __ip_vs_sync_init(struct net *net) 1655 + int __net_init __ip_vs_sync_init(struct net *net) 1669 1656 { 1670 1657 struct netns_ipvs *ipvs = net_ipvs(net); 1671 1658 ··· 1677 1668 return 0; 1678 1669 } 1679 1670 1680 - static void __ip_vs_sync_cleanup(struct net *net) 1671 + void __ip_vs_sync_cleanup(struct net *net) 1681 1672 { 1682 - stop_sync_thread(net, IP_VS_STATE_MASTER); 1683 - stop_sync_thread(net, IP_VS_STATE_BACKUP); 1673 + int retc; 1674 + 1675 + retc = stop_sync_thread(net, IP_VS_STATE_MASTER); 1676 + if (retc && retc != -ESRCH) 1677 + pr_err("Failed to stop Master Daemon\n"); 1678 + 1679 + retc = stop_sync_thread(net, IP_VS_STATE_BACKUP); 1680 + if (retc && retc != -ESRCH) 1681 + pr_err("Failed to stop Backup Daemon\n"); 1684 1682 } 1685 - 1686 - static struct pernet_operations ipvs_sync_ops = { 1687 - .init = __ip_vs_sync_init, 1688 - .exit = __ip_vs_sync_cleanup, 1689 - }; 1690 - 1691 1683 1692 1684 int __init ip_vs_sync_init(void) 1693 1685 { 1694 - return register_pernet_subsys(&ipvs_sync_ops); 1686 + return 0; 1695 1687 } 1696 1688 1697 1689 void ip_vs_sync_cleanup(void) 1698 1690 { 1699 - unregister_pernet_subsys(&ipvs_sync_ops); 1700 1691 }
+4
net/netfilter/nf_conntrack_netlink.c
··· 1334 1334 struct nf_conn *ct; 1335 1335 int err = -EINVAL; 1336 1336 struct nf_conntrack_helper *helper; 1337 + struct nf_conn_tstamp *tstamp; 1337 1338 1338 1339 ct = nf_conntrack_alloc(net, zone, otuple, rtuple, GFP_ATOMIC); 1339 1340 if (IS_ERR(ct)) ··· 1452 1451 __set_bit(IPS_EXPECTED_BIT, &ct->status); 1453 1452 ct->master = master_ct; 1454 1453 } 1454 + tstamp = nf_conn_tstamp_find(ct); 1455 + if (tstamp) 1456 + tstamp->start = ktime_to_ns(ktime_get_real()); 1455 1457 1456 1458 add_timer(&ct->timeout); 1457 1459 nf_conntrack_hash_insert(ct);
+2 -2
net/netfilter/x_tables.c
··· 455 455 vfree(xt[af].compat_tab); 456 456 xt[af].compat_tab = NULL; 457 457 xt[af].number = 0; 458 + xt[af].cur = 0; 458 459 } 459 460 } 460 461 EXPORT_SYMBOL_GPL(xt_compat_flush_offsets); ··· 474 473 else 475 474 return mid ? tmp[mid - 1].delta : 0; 476 475 } 477 - WARN_ON_ONCE(1); 478 - return 0; 476 + return left ? tmp[left - 1].delta : 0; 479 477 } 480 478 EXPORT_SYMBOL_GPL(xt_compat_calc_jump); 481 479
+1 -1
net/netfilter/xt_DSCP.c
··· 99 99 u_int8_t orig, nv; 100 100 101 101 orig = ipv6_get_dsfield(iph); 102 - nv = (orig & info->tos_mask) ^ info->tos_value; 102 + nv = (orig & ~info->tos_mask) ^ info->tos_value; 103 103 104 104 if (orig != nv) { 105 105 if (!skb_make_writable(skb, sizeof(struct iphdr)))
-5
net/netfilter/xt_conntrack.c
··· 272 272 { 273 273 int ret; 274 274 275 - if (strcmp(par->table, "raw") == 0) { 276 - pr_info("state is undetermined at the time of raw table\n"); 277 - return -EINVAL; 278 - } 279 - 280 275 ret = nf_ct_l3proto_try_module_get(par->family); 281 276 if (ret < 0) 282 277 pr_info("cannot load conntrack support for proto=%u\n",
+16 -2
net/netfilter/xt_set.c
··· 81 81 if (info->match_set.u.flags[IPSET_DIM_MAX-1] != 0) { 82 82 pr_warning("Protocol error: set match dimension " 83 83 "is over the limit!\n"); 84 + ip_set_nfnl_put(info->match_set.index); 84 85 return -ERANGE; 85 86 } 86 87 ··· 136 135 if (index == IPSET_INVALID_ID) { 137 136 pr_warning("Cannot find del_set index %u as target\n", 138 137 info->del_set.index); 138 + if (info->add_set.index != IPSET_INVALID_ID) 139 + ip_set_nfnl_put(info->add_set.index); 139 140 return -ENOENT; 140 141 } 141 142 } ··· 145 142 info->del_set.u.flags[IPSET_DIM_MAX-1] != 0) { 146 143 pr_warning("Protocol error: SET target dimension " 147 144 "is over the limit!\n"); 145 + if (info->add_set.index != IPSET_INVALID_ID) 146 + ip_set_nfnl_put(info->add_set.index); 147 + if (info->del_set.index != IPSET_INVALID_ID) 148 + ip_set_nfnl_put(info->del_set.index); 148 149 return -ERANGE; 149 150 } 150 151 ··· 199 192 if (info->match_set.dim > IPSET_DIM_MAX) { 200 193 pr_warning("Protocol error: set match dimension " 201 194 "is over the limit!\n"); 195 + ip_set_nfnl_put(info->match_set.index); 202 196 return -ERANGE; 203 197 } 204 198 ··· 227 219 if (info->del_set.index != IPSET_INVALID_ID) 228 220 ip_set_del(info->del_set.index, 229 221 skb, par->family, 230 - info->add_set.dim, 222 + info->del_set.dim, 231 223 info->del_set.flags); 232 224 233 225 return XT_CONTINUE; ··· 253 245 if (index == IPSET_INVALID_ID) { 254 246 pr_warning("Cannot find del_set index %u as target\n", 255 247 info->del_set.index); 248 + if (info->add_set.index != IPSET_INVALID_ID) 249 + ip_set_nfnl_put(info->add_set.index); 256 250 return -ENOENT; 257 251 } 258 252 } 259 253 if (info->add_set.dim > IPSET_DIM_MAX || 260 - info->del_set.flags > IPSET_DIM_MAX) { 254 + info->del_set.dim > IPSET_DIM_MAX) { 261 255 pr_warning("Protocol error: SET target dimension " 262 256 "is over the limit!\n"); 257 + if (info->add_set.index != IPSET_INVALID_ID) 258 + ip_set_nfnl_put(info->add_set.index); 259 + if (info->del_set.index != IPSET_INVALID_ID) 260 + ip_set_nfnl_put(info->del_set.index); 263 261 return -ERANGE; 264 262 } 265 263
+4
net/sctp/associola.c
··· 569 569 sctp_assoc_set_primary(asoc, transport); 570 570 if (asoc->peer.active_path == peer) 571 571 asoc->peer.active_path = transport; 572 + if (asoc->peer.retran_path == peer) 573 + asoc->peer.retran_path = transport; 572 574 if (asoc->peer.last_data_from == peer) 573 575 asoc->peer.last_data_from = transport; 574 576 ··· 1325 1323 1326 1324 if (t) 1327 1325 asoc->peer.retran_path = t; 1326 + else 1327 + t = asoc->peer.retran_path; 1328 1328 1329 1329 SCTP_DEBUG_PRINTK_IPADDR("sctp_assoc_update_retran_path:association" 1330 1330 " %p addr: ",
+1 -1
net/sctp/ulpevent.c
··· 554 554 memcpy(&ssf->ssf_info, &chunk->sinfo, sizeof(struct sctp_sndrcvinfo)); 555 555 556 556 /* Per TSVWG discussion with Randy. Allow the application to 557 - * resemble a fragmented message. 557 + * reassemble a fragmented message. 558 558 */ 559 559 ssf->ssf_info.sinfo_flags = chunk->chunk_hdr->flags; 560 560
+4 -5
net/sunrpc/Kconfig
··· 18 18 If unsure, say N. 19 19 20 20 config RPCSEC_GSS_KRB5 21 - tristate 21 + tristate "Secure RPC: Kerberos V mechanism" 22 22 depends on SUNRPC && CRYPTO 23 - prompt "Secure RPC: Kerberos V mechanism" if !(NFS_V4 || NFSD_V4) 23 + depends on CRYPTO_MD5 && CRYPTO_DES && CRYPTO_CBC && CRYPTO_CTS 24 + depends on CRYPTO_ECB && CRYPTO_HMAC && CRYPTO_SHA1 && CRYPTO_AES 25 + depends on CRYPTO_ARC4 24 26 default y 25 27 select SUNRPC_GSS 26 - select CRYPTO_MD5 27 - select CRYPTO_DES 28 - select CRYPTO_CBC 29 28 help 30 29 Choose Y here to enable Secure RPC using the Kerberos version 5 31 30 GSS-API mechanism (RFC 1964).
+5 -3
net/sunrpc/auth_gss/auth_gss.c
··· 520 520 warn_gssd(); 521 521 task->tk_timeout = 15*HZ; 522 522 rpc_sleep_on(&pipe_version_rpc_waitqueue, task, NULL); 523 - return 0; 523 + return -EAGAIN; 524 524 } 525 525 if (IS_ERR(gss_msg)) { 526 526 err = PTR_ERR(gss_msg); ··· 563 563 if (PTR_ERR(gss_msg) == -EAGAIN) { 564 564 err = wait_event_interruptible_timeout(pipe_version_waitqueue, 565 565 pipe_version >= 0, 15*HZ); 566 + if (pipe_version < 0) { 567 + warn_gssd(); 568 + err = -EACCES; 569 + } 566 570 if (err) 567 571 goto out; 568 - if (pipe_version < 0) 569 - warn_gssd(); 570 572 goto retry; 571 573 } 572 574 if (IS_ERR(gss_msg)) {
+4 -1
net/sunrpc/clnt.c
··· 1508 1508 if (clnt->cl_chatty) 1509 1509 printk(KERN_NOTICE "%s: server %s not responding, timed out\n", 1510 1510 clnt->cl_protname, clnt->cl_server); 1511 - rpc_exit(task, -EIO); 1511 + if (task->tk_flags & RPC_TASK_TIMEOUT) 1512 + rpc_exit(task, -ETIMEDOUT); 1513 + else 1514 + rpc_exit(task, -EIO); 1512 1515 return; 1513 1516 } 1514 1517
+1
net/sunrpc/xprt.c
··· 906 906 } 907 907 908 908 dprintk("RPC: %5u xmit complete\n", task->tk_pid); 909 + task->tk_flags |= RPC_TASK_SENT; 909 910 spin_lock_bh(&xprt->transport_lock); 910 911 911 912 xprt->ops->set_retrans_timeout(task);
+15 -1
net/unix/af_unix.c
··· 524 524 int, int); 525 525 static int unix_seqpacket_sendmsg(struct kiocb *, struct socket *, 526 526 struct msghdr *, size_t); 527 + static int unix_seqpacket_recvmsg(struct kiocb *, struct socket *, 528 + struct msghdr *, size_t, int); 527 529 528 530 static const struct proto_ops unix_stream_ops = { 529 531 .family = PF_UNIX, ··· 585 583 .setsockopt = sock_no_setsockopt, 586 584 .getsockopt = sock_no_getsockopt, 587 585 .sendmsg = unix_seqpacket_sendmsg, 588 - .recvmsg = unix_dgram_recvmsg, 586 + .recvmsg = unix_seqpacket_recvmsg, 589 587 .mmap = sock_no_mmap, 590 588 .sendpage = sock_no_sendpage, 591 589 }; ··· 1699 1697 msg->msg_namelen = 0; 1700 1698 1701 1699 return unix_dgram_sendmsg(kiocb, sock, msg, len); 1700 + } 1701 + 1702 + static int unix_seqpacket_recvmsg(struct kiocb *iocb, struct socket *sock, 1703 + struct msghdr *msg, size_t size, 1704 + int flags) 1705 + { 1706 + struct sock *sk = sock->sk; 1707 + 1708 + if (sk->sk_state != TCP_ESTABLISHED) 1709 + return -ENOTCONN; 1710 + 1711 + return unix_dgram_recvmsg(iocb, sock, msg, size, flags); 1702 1712 } 1703 1713 1704 1714 static void unix_copy_addr(struct msghdr *msg, struct sock *sk)
+13 -1
net/xfrm/xfrm_policy.c
··· 1406 1406 struct net *net = xp_net(policy); 1407 1407 unsigned long now = jiffies; 1408 1408 struct net_device *dev; 1409 + struct xfrm_mode *inner_mode; 1409 1410 struct dst_entry *dst_prev = NULL; 1410 1411 struct dst_entry *dst0 = NULL; 1411 1412 int i = 0; ··· 1437 1436 goto put_states; 1438 1437 } 1439 1438 1439 + if (xfrm[i]->sel.family == AF_UNSPEC) { 1440 + inner_mode = xfrm_ip2inner_mode(xfrm[i], 1441 + xfrm_af2proto(family)); 1442 + if (!inner_mode) { 1443 + err = -EAFNOSUPPORT; 1444 + dst_release(dst); 1445 + goto put_states; 1446 + } 1447 + } else 1448 + inner_mode = xfrm[i]->inner_mode; 1449 + 1440 1450 if (!dst_prev) 1441 1451 dst0 = dst1; 1442 1452 else { ··· 1476 1464 dst1->lastuse = now; 1477 1465 1478 1466 dst1->input = dst_discard; 1479 - dst1->output = xfrm[i]->outer_mode->afinfo->output; 1467 + dst1->output = inner_mode->afinfo->output; 1480 1468 1481 1469 dst1->next = dst_prev; 1482 1470 dst_prev = dst1;
+4 -1
net/xfrm/xfrm_replay.c
··· 532 532 533 533 if (replay_esn) { 534 534 if (replay_esn->replay_window > 535 - replay_esn->bmp_len * sizeof(__u32)) 535 + replay_esn->bmp_len * sizeof(__u32) * 8) 536 536 return -EINVAL; 537 + 538 + if ((x->props.flags & XFRM_STATE_ESN) && replay_esn->replay_window == 0) 539 + return -EINVAL; 537 540 538 541 if ((x->props.flags & XFRM_STATE_ESN) && x->replay_esn) 539 542 x->repl = &xfrm_replay_esn;
+3
net/xfrm/xfrm_user.c
··· 124 124 { 125 125 struct nlattr *rt = attrs[XFRMA_REPLAY_ESN_VAL]; 126 126 127 + if ((p->flags & XFRM_STATE_ESN) && !rt) 128 + return -EINVAL; 129 + 127 130 if (!rt) 128 131 return 0; 129 132
+1 -1
scripts/kconfig/conf.c
··· 332 332 } 333 333 if (!child) 334 334 continue; 335 - if (line[strlen(line) - 1] == '?') { 335 + if (line[0] && line[strlen(line) - 1] == '?') { 336 336 print_help(child); 337 337 continue; 338 338 }
+1 -1
security/capability.c
··· 181 181 return 0; 182 182 } 183 183 184 - static int cap_inode_permission(struct inode *inode, int mask) 184 + static int cap_inode_permission(struct inode *inode, int mask, unsigned flags) 185 185 { 186 186 return 0; 187 187 }
+2 -4
security/security.c
··· 518 518 { 519 519 if (unlikely(IS_PRIVATE(inode))) 520 520 return 0; 521 - return security_ops->inode_permission(inode, mask); 521 + return security_ops->inode_permission(inode, mask, 0); 522 522 } 523 523 524 524 int security_inode_exec_permission(struct inode *inode, unsigned int flags) 525 525 { 526 526 if (unlikely(IS_PRIVATE(inode))) 527 527 return 0; 528 - if (flags) 529 - return -ECHILD; 530 - return security_ops->inode_permission(inode, MAY_EXEC); 528 + return security_ops->inode_permission(inode, MAY_EXEC, flags); 531 529 } 532 530 533 531 int security_inode_setattr(struct dentry *dentry, struct iattr *attr)
+29 -7
security/selinux/avc.c
··· 471 471 * @avd: access vector decisions 472 472 * @result: result from avc_has_perm_noaudit 473 473 * @a: auxiliary audit data 474 + * @flags: VFS walk flags 474 475 * 475 476 * Audit the granting or denial of permissions in accordance 476 477 * with the policy. This function is typically called by ··· 482 481 * be performed under a lock, to allow the lock to be released 483 482 * before calling the auditing code. 484 483 */ 485 - void avc_audit(u32 ssid, u32 tsid, 484 + int avc_audit(u32 ssid, u32 tsid, 486 485 u16 tclass, u32 requested, 487 - struct av_decision *avd, int result, struct common_audit_data *a) 486 + struct av_decision *avd, int result, struct common_audit_data *a, 487 + unsigned flags) 488 488 { 489 489 struct common_audit_data stack_data; 490 490 u32 denied, audited; ··· 517 515 else 518 516 audited = requested & avd->auditallow; 519 517 if (!audited) 520 - return; 518 + return 0; 519 + 521 520 if (!a) { 522 521 a = &stack_data; 523 522 COMMON_AUDIT_DATA_INIT(a, NONE); 524 523 } 524 + 525 + /* 526 + * When in a RCU walk do the audit on the RCU retry. This is because 527 + * the collection of the dname in an inode audit message is not RCU 528 + * safe. Note this may drop some audits when the situation changes 529 + * during retry. However this is logically just as if the operation 530 + * happened a little later. 531 + */ 532 + if ((a->type == LSM_AUDIT_DATA_FS) && 533 + (flags & IPERM_FLAG_RCU)) 534 + return -ECHILD; 535 + 525 536 a->selinux_audit_data.tclass = tclass; 526 537 a->selinux_audit_data.requested = requested; 527 538 a->selinux_audit_data.ssid = ssid; ··· 544 529 a->lsm_pre_audit = avc_audit_pre_callback; 545 530 a->lsm_post_audit = avc_audit_post_callback; 546 531 common_lsm_audit(a); 532 + return 0; 547 533 } 548 534 549 535 /** ··· 809 793 * @tclass: target security class 810 794 * @requested: requested permissions, interpreted based on @tclass 811 795 * @auditdata: auxiliary audit data 796 + * @flags: VFS walk flags 812 797 * 813 798 * Check the AVC to determine whether the @requested permissions are granted 814 799 * for the SID pair (@ssid, @tsid), interpreting the permissions ··· 819 802 * permissions are granted, -%EACCES if any permissions are denied, or 820 803 * another -errno upon other errors. 821 804 */ 822 - int avc_has_perm(u32 ssid, u32 tsid, u16 tclass, 823 - u32 requested, struct common_audit_data *auditdata) 805 + int avc_has_perm_flags(u32 ssid, u32 tsid, u16 tclass, 806 + u32 requested, struct common_audit_data *auditdata, 807 + unsigned flags) 824 808 { 825 809 struct av_decision avd; 826 - int rc; 810 + int rc, rc2; 827 811 828 812 rc = avc_has_perm_noaudit(ssid, tsid, tclass, requested, 0, &avd); 829 - avc_audit(ssid, tsid, tclass, requested, &avd, rc, auditdata); 813 + 814 + rc2 = avc_audit(ssid, tsid, tclass, requested, &avd, rc, auditdata, 815 + flags); 816 + if (rc2) 817 + return rc2; 830 818 return rc; 831 819 } 832 820
+16 -11
security/selinux/hooks.c
··· 1446 1446 } 1447 1447 1448 1448 rc = avc_has_perm_noaudit(sid, sid, sclass, av, 0, &avd); 1449 - if (audit == SECURITY_CAP_AUDIT) 1450 - avc_audit(sid, sid, sclass, av, &avd, rc, &ad); 1449 + if (audit == SECURITY_CAP_AUDIT) { 1450 + int rc2 = avc_audit(sid, sid, sclass, av, &avd, rc, &ad, 0); 1451 + if (rc2) 1452 + return rc2; 1453 + } 1451 1454 return rc; 1452 1455 } 1453 1456 ··· 1470 1467 static int inode_has_perm(const struct cred *cred, 1471 1468 struct inode *inode, 1472 1469 u32 perms, 1473 - struct common_audit_data *adp) 1470 + struct common_audit_data *adp, 1471 + unsigned flags) 1474 1472 { 1475 1473 struct inode_security_struct *isec; 1476 1474 struct common_audit_data ad; ··· 1491 1487 ad.u.fs.inode = inode; 1492 1488 } 1493 1489 1494 - return avc_has_perm(sid, isec->sid, isec->sclass, perms, adp); 1490 + return avc_has_perm_flags(sid, isec->sid, isec->sclass, perms, adp, flags); 1495 1491 } 1496 1492 1497 1493 /* Same as inode_has_perm, but pass explicit audit data containing ··· 1508 1504 COMMON_AUDIT_DATA_INIT(&ad, FS); 1509 1505 ad.u.fs.path.mnt = mnt; 1510 1506 ad.u.fs.path.dentry = dentry; 1511 - return inode_has_perm(cred, inode, av, &ad); 1507 + return inode_has_perm(cred, inode, av, &ad, 0); 1512 1508 } 1513 1509 1514 1510 /* Check whether a task can use an open file descriptor to ··· 1544 1540 /* av is zero if only checking access to the descriptor. */ 1545 1541 rc = 0; 1546 1542 if (av) 1547 - rc = inode_has_perm(cred, inode, av, &ad); 1543 + rc = inode_has_perm(cred, inode, av, &ad, 0); 1548 1544 1549 1545 out: 1550 1546 return rc; ··· 1578 1574 return rc; 1579 1575 1580 1576 if (!newsid || !(sbsec->flags & SE_SBLABELSUPP)) { 1581 - rc = security_transition_sid(sid, dsec->sid, tclass, NULL, &newsid); 1577 + rc = security_transition_sid(sid, dsec->sid, tclass, 1578 + &dentry->d_name, &newsid); 1582 1579 if (rc) 1583 1580 return rc; 1584 1581 } ··· 2108 2103 file = file_priv->file; 2109 2104 inode = file->f_path.dentry->d_inode; 2110 2105 if (inode_has_perm(cred, inode, 2111 - FILE__READ | FILE__WRITE, NULL)) { 2106 + FILE__READ | FILE__WRITE, NULL, 0)) { 2112 2107 drop_tty = 1; 2113 2108 } 2114 2109 } ··· 2640 2635 return dentry_has_perm(cred, NULL, dentry, FILE__READ); 2641 2636 } 2642 2637 2643 - static int selinux_inode_permission(struct inode *inode, int mask) 2638 + static int selinux_inode_permission(struct inode *inode, int mask, unsigned flags) 2644 2639 { 2645 2640 const struct cred *cred = current_cred(); 2646 2641 struct common_audit_data ad; ··· 2662 2657 2663 2658 perms = file_mask_to_av(inode->i_mode, mask); 2664 2659 2665 - return inode_has_perm(cred, inode, perms, &ad); 2660 + return inode_has_perm(cred, inode, perms, &ad, flags); 2666 2661 } 2667 2662 2668 2663 static int selinux_inode_setattr(struct dentry *dentry, struct iattr *iattr) ··· 3210 3205 * new inode label or new policy. 3211 3206 * This check is not redundant - do not remove. 3212 3207 */ 3213 - return inode_has_perm(cred, inode, open_file_to_av(file), NULL); 3208 + return inode_has_perm(cred, inode, open_file_to_av(file), NULL, 0); 3214 3209 } 3215 3210 3216 3211 /* task security operations */
+13 -5
security/selinux/include/avc.h
··· 54 54 55 55 void __init avc_init(void); 56 56 57 - void avc_audit(u32 ssid, u32 tsid, 57 + int avc_audit(u32 ssid, u32 tsid, 58 58 u16 tclass, u32 requested, 59 59 struct av_decision *avd, 60 60 int result, 61 - struct common_audit_data *a); 61 + struct common_audit_data *a, unsigned flags); 62 62 63 63 #define AVC_STRICT 1 /* Ignore permissive mode. */ 64 64 int avc_has_perm_noaudit(u32 ssid, u32 tsid, ··· 66 66 unsigned flags, 67 67 struct av_decision *avd); 68 68 69 - int avc_has_perm(u32 ssid, u32 tsid, 70 - u16 tclass, u32 requested, 71 - struct common_audit_data *auditdata); 69 + int avc_has_perm_flags(u32 ssid, u32 tsid, 70 + u16 tclass, u32 requested, 71 + struct common_audit_data *auditdata, 72 + unsigned); 73 + 74 + static inline int avc_has_perm(u32 ssid, u32 tsid, 75 + u16 tclass, u32 requested, 76 + struct common_audit_data *auditdata) 77 + { 78 + return avc_has_perm_flags(ssid, tsid, tclass, requested, auditdata, 0); 79 + } 72 80 73 81 u32 avc_policy_seqno(void); 74 82
+3 -7
security/selinux/ss/policydb.c
··· 502 502 goto out; 503 503 504 504 rc = flex_array_prealloc(p->type_val_to_struct_array, 0, 505 - p->p_types.nprim - 1, GFP_KERNEL | __GFP_ZERO); 505 + p->p_types.nprim, GFP_KERNEL | __GFP_ZERO); 506 506 if (rc) 507 507 goto out; 508 508 ··· 519 519 goto out; 520 520 521 521 rc = flex_array_prealloc(p->sym_val_to_name[i], 522 - 0, p->symtab[i].nprim - 1, 522 + 0, p->symtab[i].nprim, 523 523 GFP_KERNEL | __GFP_ZERO); 524 524 if (rc) 525 525 goto out; ··· 1819 1819 goto out; 1820 1820 nel = le32_to_cpu(buf[0]); 1821 1821 1822 - printk(KERN_ERR "%s: nel=%d\n", __func__, nel); 1823 - 1824 1822 last = p->filename_trans; 1825 1823 while (last && last->next) 1826 1824 last = last->next; ··· 1854 1856 if (rc) 1855 1857 goto out; 1856 1858 name[len] = 0; 1857 - 1858 - printk(KERN_ERR "%s: ft=%p ft->name=%p ft->name=%s\n", __func__, ft, ft->name, ft->name); 1859 1859 1860 1860 rc = next_entry(buf, fp, sizeof(u32) * 4); 1861 1861 if (rc) ··· 2371 2375 goto bad; 2372 2376 2373 2377 /* preallocate so we don't have to worry about the put ever failing */ 2374 - rc = flex_array_prealloc(p->type_attr_map_array, 0, p->p_types.nprim - 1, 2378 + rc = flex_array_prealloc(p->type_attr_map_array, 0, p->p_types.nprim, 2375 2379 GFP_KERNEL | __GFP_ZERO); 2376 2380 if (rc) 2377 2381 goto bad;
+5 -1
security/smack/smack_lsm.c
··· 686 686 * 687 687 * Returns 0 if access is permitted, -EACCES otherwise 688 688 */ 689 - static int smack_inode_permission(struct inode *inode, int mask) 689 + static int smack_inode_permission(struct inode *inode, int mask, unsigned flags) 690 690 { 691 691 struct smk_audit_info ad; 692 692 ··· 696 696 */ 697 697 if (mask == 0) 698 698 return 0; 699 + 700 + /* May be droppable after audit */ 701 + if (flags & IPERM_FLAG_RCU) 702 + return -ECHILD; 699 703 smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_FS); 700 704 smk_ad_setfield_u_fs_inode(&ad, inode); 701 705 return smk_curacc(smk_of_inode(inode), mask, &ad);
+1 -1
sound/aoa/codecs/tas.c
··· 170 170 /* analysing the volume and mixer tables shows 171 171 * that they are similar enough when we shift 172 172 * the mixer table down by 4 bits. The error 173 - * is minuscule, in just one item the error 173 + * is miniscule, in just one item the error 174 174 * is 1, at a value of 0x07f17b (mixer table 175 175 * value is 0x07f17a) */ 176 176 tmp = tas_gaintable[left];
+5 -2
sound/pci/au88x0/au88x0_pcm.c
··· 44 44 .channels_min = 1, 45 45 .channels_max = 2, 46 46 .buffer_bytes_max = 0x10000, 47 - .period_bytes_min = 0x1, 47 + .period_bytes_min = 0x20, 48 48 .period_bytes_max = 0x1000, 49 49 .periods_min = 2, 50 - .periods_max = 32, 50 + .periods_max = 1024, 51 51 }; 52 52 53 53 #ifndef CHIP_AU8820 ··· 139 139 snd_pcm_hw_constraint_pow2(runtime, 0, 140 140 SNDRV_PCM_HW_PARAM_PERIOD_BYTES)) < 0) 141 141 return err; 142 + 143 + snd_pcm_hw_constraint_step(runtime, 0, 144 + SNDRV_PCM_HW_PARAM_BUFFER_BYTES, 64); 142 145 143 146 if (VORTEX_PCM_TYPE(substream->pcm) != VORTEX_PCM_WT) { 144 147 #ifndef CHIP_AU8820
+4
sound/pci/hda/hda_codec.c
··· 937 937 } 938 938 EXPORT_SYMBOL_HDA(snd_hda_shutup_pins); 939 939 940 + #ifdef SND_HDA_NEEDS_RESUME 940 941 /* Restore the pin controls cleared previously via snd_hda_shutup_pins() */ 941 942 static void restore_shutup_pins(struct hda_codec *codec) 942 943 { ··· 954 953 } 955 954 codec->pins_shutup = 0; 956 955 } 956 + #endif 957 957 958 958 static void init_hda_cache(struct hda_cache_rec *cache, 959 959 unsigned int record_size); ··· 1331 1329 } 1332 1330 } 1333 1331 1332 + #ifdef SND_HDA_NEEDS_RESUME 1334 1333 /* clean up all streams; called from suspend */ 1335 1334 static void hda_cleanup_all_streams(struct hda_codec *codec) 1336 1335 { ··· 1343 1340 really_cleanup_stream(codec, p); 1344 1341 } 1345 1342 } 1343 + #endif 1346 1344 1347 1345 /* 1348 1346 * amp access functions
+35 -20
sound/pci/hda/patch_realtek.c
··· 1704 1704 codec->chip_name, fix->type); 1705 1705 break; 1706 1706 } 1707 - if (!fix[id].chained) 1707 + if (!fix->chained) 1708 1708 break; 1709 1709 if (++depth > 10) 1710 1710 break; 1711 - id = fix[id].chain_id; 1711 + id = fix->chain_id; 1712 1712 } 1713 1713 } 1714 1714 ··· 5645 5645 static struct snd_pci_quirk beep_white_list[] = { 5646 5646 SND_PCI_QUIRK(0x1043, 0x829f, "ASUS", 1), 5647 5647 SND_PCI_QUIRK(0x1043, 0x83ce, "EeePC", 1), 5648 + SND_PCI_QUIRK(0x1043, 0x831a, "EeePC", 1), 5648 5649 SND_PCI_QUIRK(0x8086, 0xd613, "Intel", 1), 5649 5650 {} 5650 5651 }; ··· 9864 9863 SND_PCI_QUIRK(0x1071, 0x8258, "Evesham Voyaeger", ALC883_LAPTOP_EAPD), 9865 9864 SND_PCI_QUIRK(0x10f1, 0x2350, "TYAN-S2350", ALC888_6ST_DELL), 9866 9865 SND_PCI_QUIRK(0x108e, 0x534d, NULL, ALC883_3ST_6ch), 9866 + SND_PCI_QUIRK(0x1458, 0xa002, "Gigabyte P35 DS3R", ALC882_6ST_DIG), 9867 9867 9868 9868 SND_PCI_QUIRK(0x1462, 0x0349, "MSI", ALC883_TARGA_2ch_DIG), 9869 9869 SND_PCI_QUIRK(0x1462, 0x040d, "MSI", ALC883_TARGA_2ch_DIG), ··· 10701 10699 PINFIX_LENOVO_Y530, 10702 10700 PINFIX_PB_M5210, 10703 10701 PINFIX_ACER_ASPIRE_7736, 10704 - PINFIX_GIGABYTE_880GM, 10705 10702 }; 10706 10703 10707 10704 static const struct alc_fixup alc882_fixups[] = { ··· 10732 10731 .type = ALC_FIXUP_SKU, 10733 10732 .v.sku = ALC_FIXUP_SKU_IGNORE, 10734 10733 }, 10735 - [PINFIX_GIGABYTE_880GM] = { 10736 - .type = ALC_FIXUP_PINS, 10737 - .v.pins = (const struct alc_pincfg[]) { 10738 - { 0x14, 0x1114410 }, /* set as speaker */ 10739 - { } 10740 - } 10741 - }, 10742 10734 }; 10743 10735 10744 10736 static struct snd_pci_quirk alc882_fixup_tbl[] = { ··· 10739 10745 SND_PCI_QUIRK(0x17aa, 0x3a0d, "Lenovo Y530", PINFIX_LENOVO_Y530), 10740 10746 SND_PCI_QUIRK(0x147b, 0x107a, "Abit AW9D-MAX", PINFIX_ABIT_AW9D_MAX), 10741 10747 SND_PCI_QUIRK(0x1025, 0x0296, "Acer Aspire 7736z", PINFIX_ACER_ASPIRE_7736), 10742 - SND_PCI_QUIRK(0x1458, 0xa002, "Gigabyte", PINFIX_GIGABYTE_880GM), 10743 10748 {} 10744 10749 }; 10745 10750 ··· 14861 14868 alc_write_coef_idx(codec, 0x1e, coef | 0x80); 14862 14869 } 14863 14870 14871 + static void alc271_fixup_dmic(struct hda_codec *codec, 14872 + const struct alc_fixup *fix, int action) 14873 + { 14874 + static struct hda_verb verbs[] = { 14875 + {0x20, AC_VERB_SET_COEF_INDEX, 0x0d}, 14876 + {0x20, AC_VERB_SET_PROC_COEF, 0x4000}, 14877 + {} 14878 + }; 14879 + unsigned int cfg; 14880 + 14881 + if (strcmp(codec->chip_name, "ALC271X")) 14882 + return; 14883 + cfg = snd_hda_codec_get_pincfg(codec, 0x12); 14884 + if (get_defcfg_connect(cfg) == AC_JACK_PORT_FIXED) 14885 + snd_hda_sequence_write(codec, verbs); 14886 + } 14887 + 14864 14888 enum { 14865 14889 ALC269_FIXUP_SONY_VAIO, 14866 14890 ALC275_FIXUP_SONY_VAIO_GPIO2, ··· 14886 14876 ALC269_FIXUP_ASUS_G73JW, 14887 14877 ALC269_FIXUP_LENOVO_EAPD, 14888 14878 ALC275_FIXUP_SONY_HWEQ, 14879 + ALC271_FIXUP_DMIC, 14889 14880 }; 14890 14881 14891 14882 static const struct alc_fixup alc269_fixups[] = { ··· 14940 14929 .v.func = alc269_fixup_hweq, 14941 14930 .chained = true, 14942 14931 .chain_id = ALC275_FIXUP_SONY_VAIO_GPIO2 14943 - } 14932 + }, 14933 + [ALC271_FIXUP_DMIC] = { 14934 + .type = ALC_FIXUP_FUNC, 14935 + .v.func = alc271_fixup_dmic, 14936 + }, 14944 14937 }; 14945 14938 14946 14939 static struct snd_pci_quirk alc269_fixup_tbl[] = { ··· 14953 14938 SND_PCI_QUIRK(0x104d, 0x9084, "Sony VAIO", ALC275_FIXUP_SONY_HWEQ), 14954 14939 SND_PCI_QUIRK_VENDOR(0x104d, "Sony VAIO", ALC269_FIXUP_SONY_VAIO), 14955 14940 SND_PCI_QUIRK(0x1028, 0x0470, "Dell M101z", ALC269_FIXUP_DELL_M101Z), 14941 + SND_PCI_QUIRK_VENDOR(0x1025, "Acer Aspire", ALC271_FIXUP_DMIC), 14956 14942 SND_PCI_QUIRK(0x17aa, 0x20f2, "Thinkpad SL410/510", ALC269_FIXUP_SKU_IGNORE), 14957 14943 SND_PCI_QUIRK(0x17aa, 0x215e, "Thinkpad L512", ALC269_FIXUP_SKU_IGNORE), 14958 14944 SND_PCI_QUIRK(0x17aa, 0x21b8, "Thinkpad Edge 14", ALC269_FIXUP_SKU_IGNORE), ··· 18798 18782 ALC662_3ST_6ch_DIG), 18799 18783 SND_PCI_QUIRK(0x1179, 0xff6e, "Toshiba NB20x", ALC662_AUTO), 18800 18784 SND_PCI_QUIRK(0x144d, 0xca00, "Samsung NC10", ALC272_SAMSUNG_NC10), 18785 + SND_PCI_QUIRK(0x1458, 0xa002, "Gigabyte 945GCM-S2L", 18786 + ALC662_3ST_6ch_DIG), 18801 18787 SND_PCI_QUIRK(0x152d, 0x2304, "Quanta WH1", ALC663_ASUS_H13), 18802 18788 SND_PCI_QUIRK(0x1565, 0x820f, "Biostar TA780G M2+", ALC662_3ST_6ch_DIG), 18803 18789 SND_PCI_QUIRK(0x1631, 0xc10c, "PB RS65", ALC663_ASUS_M51VA), ··· 19473 19455 ALC662_FIXUP_IDEAPAD, 19474 19456 ALC272_FIXUP_MARIO, 19475 19457 ALC662_FIXUP_CZC_P10T, 19476 - ALC662_FIXUP_GIGABYTE, 19458 + ALC662_FIXUP_SKU_IGNORE, 19477 19459 }; 19478 19460 19479 19461 static const struct alc_fixup alc662_fixups[] = { ··· 19502 19484 {} 19503 19485 } 19504 19486 }, 19505 - [ALC662_FIXUP_GIGABYTE] = { 19506 - .type = ALC_FIXUP_PINS, 19507 - .v.pins = (const struct alc_pincfg[]) { 19508 - { 0x14, 0x1114410 }, /* set as speaker */ 19509 - { } 19510 - } 19487 + [ALC662_FIXUP_SKU_IGNORE] = { 19488 + .type = ALC_FIXUP_SKU, 19489 + .v.sku = ALC_FIXUP_SKU_IGNORE, 19511 19490 }, 19512 19491 }; 19513 19492 19514 19493 static struct snd_pci_quirk alc662_fixup_tbl[] = { 19515 19494 SND_PCI_QUIRK(0x1025, 0x0308, "Acer Aspire 8942G", ALC662_FIXUP_ASPIRE), 19495 + SND_PCI_QUIRK(0x1025, 0x031c, "Gateway NV79", ALC662_FIXUP_SKU_IGNORE), 19516 19496 SND_PCI_QUIRK(0x1025, 0x038b, "Acer Aspire 8943G", ALC662_FIXUP_ASPIRE), 19517 19497 SND_PCI_QUIRK(0x144d, 0xc051, "Samsung R720", ALC662_FIXUP_IDEAPAD), 19518 - SND_PCI_QUIRK(0x1458, 0xa002, "Gigabyte", ALC662_FIXUP_GIGABYTE), 19519 19498 SND_PCI_QUIRK(0x17aa, 0x38af, "Lenovo Ideapad Y550P", ALC662_FIXUP_IDEAPAD), 19520 19499 SND_PCI_QUIRK(0x17aa, 0x3a0d, "Lenovo Ideapad Y550", ALC662_FIXUP_IDEAPAD), 19521 19500 SND_PCI_QUIRK(0x1b35, 0x2206, "CZC P10T", ALC662_FIXUP_CZC_P10T),
+7 -3
sound/pci/hda/patch_via.c
··· 1292 1292 { 1293 1293 int i; 1294 1294 struct snd_ctl_elem_id id; 1295 - const char *labels[] = {"Mic", "Front Mic", "Line"}; 1295 + const char *labels[] = {"Mic", "Front Mic", "Line", "Rear Mic"}; 1296 + struct snd_kcontrol *ctl; 1296 1297 1297 1298 memset(&id, 0, sizeof(id)); 1298 1299 id.iface = SNDRV_CTL_ELEM_IFACE_MIXER; 1299 1300 for (i = 0; i < ARRAY_SIZE(labels); i++) { 1300 1301 sprintf(id.name, "%s Playback Volume", labels[i]); 1301 - snd_ctl_notify(codec->bus->card, SNDRV_CTL_EVENT_MASK_VALUE, 1302 - &id); 1302 + ctl = snd_hda_find_mixer_ctl(codec, id.name); 1303 + if (ctl) 1304 + snd_ctl_notify(codec->bus->card, 1305 + SNDRV_CTL_EVENT_MASK_VALUE, 1306 + &ctl->id); 1303 1307 } 1304 1308 } 1305 1309
-2
sound/soc/codecs/jz4740.c
··· 308 308 snd_soc_dapm_add_routes(dapm, jz4740_codec_dapm_routes, 309 309 ARRAY_SIZE(jz4740_codec_dapm_routes)); 310 310 311 - snd_soc_dapm_new_widgets(codec); 312 - 313 311 jz4740_codec_set_bias_level(codec, SND_SOC_BIAS_STANDBY); 314 312 315 313 return 0;
+1 -1
sound/soc/codecs/sn95031.c
··· 927 927 .owner = THIS_MODULE, 928 928 }, 929 929 .probe = sn95031_device_probe, 930 - .remove = sn95031_device_remove, 930 + .remove = __devexit_p(sn95031_device_remove), 931 931 }; 932 932 933 933 static int __init sn95031_init(void)
+5 -5
sound/soc/codecs/ssm2602.c
··· 139 139 SOC_DOUBLE_R("Capture Switch", SSM2602_LINVOL, SSM2602_RINVOL, 7, 1, 1), 140 140 141 141 SOC_SINGLE("Mic Boost (+20dB)", SSM2602_APANA, 0, 1, 0), 142 - SOC_SINGLE("Mic Boost2 (+20dB)", SSM2602_APANA, 7, 1, 0), 142 + SOC_SINGLE("Mic Boost2 (+20dB)", SSM2602_APANA, 8, 1, 0), 143 143 SOC_SINGLE("Mic Switch", SSM2602_APANA, 1, 1, 1), 144 144 145 145 SOC_SINGLE("Sidetone Playback Volume", SSM2602_APANA, 6, 3, 1), ··· 602 602 .read = ssm2602_read_reg_cache, 603 603 .write = ssm2602_write, 604 604 .set_bias_level = ssm2602_set_bias_level, 605 - .reg_cache_size = sizeof(ssm2602_reg), 605 + .reg_cache_size = ARRAY_SIZE(ssm2602_reg), 606 606 .reg_word_size = sizeof(u16), 607 607 .reg_cache_default = ssm2602_reg, 608 608 }; ··· 614 614 * low = 0x1a 615 615 * high = 0x1b 616 616 */ 617 - static int ssm2602_i2c_probe(struct i2c_client *i2c, 617 + static int __devinit ssm2602_i2c_probe(struct i2c_client *i2c, 618 618 const struct i2c_device_id *id) 619 619 { 620 620 struct ssm2602_priv *ssm2602; ··· 635 635 return ret; 636 636 } 637 637 638 - static int ssm2602_i2c_remove(struct i2c_client *client) 638 + static int __devexit ssm2602_i2c_remove(struct i2c_client *client) 639 639 { 640 640 snd_soc_unregister_codec(&client->dev); 641 641 kfree(i2c_get_clientdata(client)); ··· 655 655 .owner = THIS_MODULE, 656 656 }, 657 657 .probe = ssm2602_i2c_probe, 658 - .remove = ssm2602_i2c_remove, 658 + .remove = __devexit_p(ssm2602_i2c_remove), 659 659 .id_table = ssm2602_i2c_id, 660 660 }; 661 661 #endif
-2
sound/soc/codecs/uda134x.c
··· 601 601 .reg_cache_step = 1, 602 602 .read = uda134x_read_reg_cache, 603 603 .write = uda134x_write, 604 - #ifdef POWER_OFF_ON_STANDBY 605 604 .set_bias_level = uda134x_set_bias_level, 606 - #endif 607 605 }; 608 606 609 607 static int __devinit uda134x_codec_probe(struct platform_device *pdev)
+24 -16
sound/soc/codecs/wm8903.c
··· 247 247 case WM8903_REVISION_NUMBER: 248 248 case WM8903_INTERRUPT_STATUS_1: 249 249 case WM8903_WRITE_SEQUENCER_4: 250 - case WM8903_POWER_MANAGEMENT_3: 251 - case WM8903_POWER_MANAGEMENT_2: 252 250 case WM8903_DC_SERVO_READBACK_1: 253 251 case WM8903_DC_SERVO_READBACK_2: 254 252 case WM8903_DC_SERVO_READBACK_3: ··· 692 694 SOC_SINGLE_TLV("DRC Startup Volume", WM8903_DRC_0, 6, 18, 0, drc_tlv_startup), 693 695 694 696 SOC_DOUBLE_R_TLV("Digital Capture Volume", WM8903_ADC_DIGITAL_VOLUME_LEFT, 695 - WM8903_ADC_DIGITAL_VOLUME_RIGHT, 1, 96, 0, digital_tlv), 697 + WM8903_ADC_DIGITAL_VOLUME_RIGHT, 1, 120, 0, digital_tlv), 696 698 SOC_ENUM("ADC Companding Mode", adc_companding), 697 699 SOC_SINGLE("ADC Companding Switch", WM8903_AUDIO_INTERFACE_0, 3, 1, 0), 698 700 ··· 873 875 SND_SOC_DAPM_MIXER("Right Speaker Mixer", WM8903_POWER_MANAGEMENT_4, 0, 0, 874 876 right_speaker_mixer, ARRAY_SIZE(right_speaker_mixer)), 875 877 876 - SND_SOC_DAPM_PGA_S("Left Headphone Output PGA", 0, WM8903_ANALOGUE_HP_0, 877 - 4, 0, NULL, 0), 878 - SND_SOC_DAPM_PGA_S("Right Headphone Output PGA", 0, WM8903_ANALOGUE_HP_0, 878 + SND_SOC_DAPM_PGA_S("Left Headphone Output PGA", 0, WM8903_POWER_MANAGEMENT_2, 879 + 1, 0, NULL, 0), 880 + SND_SOC_DAPM_PGA_S("Right Headphone Output PGA", 0, WM8903_POWER_MANAGEMENT_2, 879 881 0, 0, NULL, 0), 880 882 881 - SND_SOC_DAPM_PGA_S("Left Line Output PGA", 0, WM8903_ANALOGUE_LINEOUT_0, 4, 0, 883 + SND_SOC_DAPM_PGA_S("Left Line Output PGA", 0, WM8903_POWER_MANAGEMENT_3, 1, 0, 882 884 NULL, 0), 883 - SND_SOC_DAPM_PGA_S("Right Line Output PGA", 0, WM8903_ANALOGUE_LINEOUT_0, 0, 0, 885 + SND_SOC_DAPM_PGA_S("Right Line Output PGA", 0, WM8903_POWER_MANAGEMENT_3, 0, 0, 884 886 NULL, 0), 885 887 886 888 SND_SOC_DAPM_PGA_S("HPL_RMV_SHORT", 4, WM8903_ANALOGUE_HP_0, 7, 0, NULL, 0), 887 889 SND_SOC_DAPM_PGA_S("HPL_ENA_OUTP", 3, WM8903_ANALOGUE_HP_0, 6, 0, NULL, 0), 888 - SND_SOC_DAPM_PGA_S("HPL_ENA_DLY", 1, WM8903_ANALOGUE_HP_0, 5, 0, NULL, 0), 890 + SND_SOC_DAPM_PGA_S("HPL_ENA_DLY", 2, WM8903_ANALOGUE_HP_0, 5, 0, NULL, 0), 891 + SND_SOC_DAPM_PGA_S("HPL_ENA", 1, WM8903_ANALOGUE_HP_0, 4, 0, NULL, 0), 889 892 SND_SOC_DAPM_PGA_S("HPR_RMV_SHORT", 4, WM8903_ANALOGUE_HP_0, 3, 0, NULL, 0), 890 893 SND_SOC_DAPM_PGA_S("HPR_ENA_OUTP", 3, WM8903_ANALOGUE_HP_0, 2, 0, NULL, 0), 891 - SND_SOC_DAPM_PGA_S("HPR_ENA_DLY", 1, WM8903_ANALOGUE_HP_0, 1, 0, NULL, 0), 894 + SND_SOC_DAPM_PGA_S("HPR_ENA_DLY", 2, WM8903_ANALOGUE_HP_0, 1, 0, NULL, 0), 895 + SND_SOC_DAPM_PGA_S("HPR_ENA", 1, WM8903_ANALOGUE_HP_0, 0, 0, NULL, 0), 892 896 893 897 SND_SOC_DAPM_PGA_S("LINEOUTL_RMV_SHORT", 4, WM8903_ANALOGUE_LINEOUT_0, 7, 0, 894 898 NULL, 0), 895 899 SND_SOC_DAPM_PGA_S("LINEOUTL_ENA_OUTP", 3, WM8903_ANALOGUE_LINEOUT_0, 6, 0, 896 900 NULL, 0), 897 - SND_SOC_DAPM_PGA_S("LINEOUTL_ENA_DLY", 1, WM8903_ANALOGUE_LINEOUT_0, 5, 0, 901 + SND_SOC_DAPM_PGA_S("LINEOUTL_ENA_DLY", 2, WM8903_ANALOGUE_LINEOUT_0, 5, 0, 902 + NULL, 0), 903 + SND_SOC_DAPM_PGA_S("LINEOUTL_ENA", 1, WM8903_ANALOGUE_LINEOUT_0, 4, 0, 898 904 NULL, 0), 899 905 SND_SOC_DAPM_PGA_S("LINEOUTR_RMV_SHORT", 4, WM8903_ANALOGUE_LINEOUT_0, 3, 0, 900 906 NULL, 0), 901 907 SND_SOC_DAPM_PGA_S("LINEOUTR_ENA_OUTP", 3, WM8903_ANALOGUE_LINEOUT_0, 2, 0, 902 908 NULL, 0), 903 - SND_SOC_DAPM_PGA_S("LINEOUTR_ENA_DLY", 1, WM8903_ANALOGUE_LINEOUT_0, 1, 0, 909 + SND_SOC_DAPM_PGA_S("LINEOUTR_ENA_DLY", 2, WM8903_ANALOGUE_LINEOUT_0, 1, 0, 910 + NULL, 0), 911 + SND_SOC_DAPM_PGA_S("LINEOUTR_ENA", 1, WM8903_ANALOGUE_LINEOUT_0, 0, 0, 904 912 NULL, 0), 905 913 906 914 SND_SOC_DAPM_SUPPLY("DCS Master", WM8903_DC_SERVO_0, 4, 0, NULL, 0), ··· 1041 1037 { "Left Speaker PGA", NULL, "Left Speaker Mixer" }, 1042 1038 { "Right Speaker PGA", NULL, "Right Speaker Mixer" }, 1043 1039 1044 - { "HPL_ENA_DLY", NULL, "Left Headphone Output PGA" }, 1045 - { "HPR_ENA_DLY", NULL, "Right Headphone Output PGA" }, 1046 - { "LINEOUTL_ENA_DLY", NULL, "Left Line Output PGA" }, 1047 - { "LINEOUTR_ENA_DLY", NULL, "Right Line Output PGA" }, 1040 + { "HPL_ENA", NULL, "Left Headphone Output PGA" }, 1041 + { "HPR_ENA", NULL, "Right Headphone Output PGA" }, 1042 + { "HPL_ENA_DLY", NULL, "HPL_ENA" }, 1043 + { "HPR_ENA_DLY", NULL, "HPR_ENA" }, 1044 + { "LINEOUTL_ENA", NULL, "Left Line Output PGA" }, 1045 + { "LINEOUTR_ENA", NULL, "Right Line Output PGA" }, 1046 + { "LINEOUTL_ENA_DLY", NULL, "LINEOUTL_ENA" }, 1047 + { "LINEOUTR_ENA_DLY", NULL, "LINEOUTR_ENA" }, 1048 1048 1049 1049 { "HPL_DCS", NULL, "DCS Master" }, 1050 1050 { "HPR_DCS", NULL, "DCS Master" },
+16
sound/soc/codecs/wm8994.c
··· 3261 3261 wm8994_set_bias_level(codec, SND_SOC_BIAS_STANDBY); 3262 3262 3263 3263 /* Latch volume updates (right only; we always do left then right). */ 3264 + snd_soc_update_bits(codec, WM8994_AIF1_DAC1_LEFT_VOLUME, 3265 + WM8994_AIF1DAC1_VU, WM8994_AIF1DAC1_VU); 3264 3266 snd_soc_update_bits(codec, WM8994_AIF1_DAC1_RIGHT_VOLUME, 3265 3267 WM8994_AIF1DAC1_VU, WM8994_AIF1DAC1_VU); 3268 + snd_soc_update_bits(codec, WM8994_AIF1_DAC2_LEFT_VOLUME, 3269 + WM8994_AIF1DAC2_VU, WM8994_AIF1DAC2_VU); 3266 3270 snd_soc_update_bits(codec, WM8994_AIF1_DAC2_RIGHT_VOLUME, 3267 3271 WM8994_AIF1DAC2_VU, WM8994_AIF1DAC2_VU); 3272 + snd_soc_update_bits(codec, WM8994_AIF2_DAC_LEFT_VOLUME, 3273 + WM8994_AIF2DAC_VU, WM8994_AIF2DAC_VU); 3268 3274 snd_soc_update_bits(codec, WM8994_AIF2_DAC_RIGHT_VOLUME, 3269 3275 WM8994_AIF2DAC_VU, WM8994_AIF2DAC_VU); 3276 + snd_soc_update_bits(codec, WM8994_AIF1_ADC1_LEFT_VOLUME, 3277 + WM8994_AIF1ADC1_VU, WM8994_AIF1ADC1_VU); 3270 3278 snd_soc_update_bits(codec, WM8994_AIF1_ADC1_RIGHT_VOLUME, 3271 3279 WM8994_AIF1ADC1_VU, WM8994_AIF1ADC1_VU); 3280 + snd_soc_update_bits(codec, WM8994_AIF1_ADC2_LEFT_VOLUME, 3281 + WM8994_AIF1ADC2_VU, WM8994_AIF1ADC2_VU); 3272 3282 snd_soc_update_bits(codec, WM8994_AIF1_ADC2_RIGHT_VOLUME, 3273 3283 WM8994_AIF1ADC2_VU, WM8994_AIF1ADC2_VU); 3284 + snd_soc_update_bits(codec, WM8994_AIF2_ADC_LEFT_VOLUME, 3285 + WM8994_AIF2ADC_VU, WM8994_AIF1ADC2_VU); 3274 3286 snd_soc_update_bits(codec, WM8994_AIF2_ADC_RIGHT_VOLUME, 3275 3287 WM8994_AIF2ADC_VU, WM8994_AIF1ADC2_VU); 3288 + snd_soc_update_bits(codec, WM8994_DAC1_LEFT_VOLUME, 3289 + WM8994_DAC1_VU, WM8994_DAC1_VU); 3276 3290 snd_soc_update_bits(codec, WM8994_DAC1_RIGHT_VOLUME, 3277 3291 WM8994_DAC1_VU, WM8994_DAC1_VU); 3292 + snd_soc_update_bits(codec, WM8994_DAC2_LEFT_VOLUME, 3293 + WM8994_DAC2_VU, WM8994_DAC2_VU); 3278 3294 snd_soc_update_bits(codec, WM8994_DAC2_RIGHT_VOLUME, 3279 3295 WM8994_DAC2_VU, WM8994_DAC2_VU); 3280 3296
+4 -4
sound/soc/codecs/wm_hubs.c
··· 740 740 741 741 { "SPKL", "Input Switch", "MIXINL" }, 742 742 { "SPKL", "IN1LP Switch", "IN1LP" }, 743 - { "SPKL", "Output Switch", "Left Output Mixer" }, 743 + { "SPKL", "Output Switch", "Left Output PGA" }, 744 744 { "SPKL", NULL, "TOCLK" }, 745 745 746 746 { "SPKR", "Input Switch", "MIXINR" }, 747 747 { "SPKR", "IN1RP Switch", "IN1RP" }, 748 - { "SPKR", "Output Switch", "Right Output Mixer" }, 748 + { "SPKR", "Output Switch", "Right Output PGA" }, 749 749 { "SPKR", NULL, "TOCLK" }, 750 750 751 751 { "SPKL Boost", "Direct Voice Switch", "Direct Voice" }, ··· 767 767 { "SPKOUTRP", NULL, "SPKR Driver" }, 768 768 { "SPKOUTRN", NULL, "SPKR Driver" }, 769 769 770 - { "Left Headphone Mux", "Mixer", "Left Output Mixer" }, 771 - { "Right Headphone Mux", "Mixer", "Right Output Mixer" }, 770 + { "Left Headphone Mux", "Mixer", "Left Output PGA" }, 771 + { "Right Headphone Mux", "Mixer", "Right Output PGA" }, 772 772 773 773 { "Headphone PGA", NULL, "Left Headphone Mux" }, 774 774 { "Headphone PGA", NULL, "Right Headphone Mux" },
+12 -7
sound/soc/davinci/davinci-mcasp.c
··· 434 434 mcasp_set_bits(base + DAVINCI_MCASP_ACLKRCTL_REG, ACLKRE); 435 435 mcasp_set_bits(base + DAVINCI_MCASP_RXFMCTL_REG, AFSRE); 436 436 437 - mcasp_set_bits(base + DAVINCI_MCASP_PDIR_REG, (0x7 << 26)); 437 + mcasp_set_bits(base + DAVINCI_MCASP_PDIR_REG, 438 + ACLKX | AHCLKX | AFSX); 438 439 break; 439 440 case SND_SOC_DAIFMT_CBM_CFS: 440 441 /* codec is clock master and frame slave */ 441 - mcasp_set_bits(base + DAVINCI_MCASP_ACLKXCTL_REG, ACLKXE); 442 + mcasp_clr_bits(base + DAVINCI_MCASP_ACLKXCTL_REG, ACLKXE); 442 443 mcasp_set_bits(base + DAVINCI_MCASP_TXFMCTL_REG, AFSXE); 443 444 444 - mcasp_set_bits(base + DAVINCI_MCASP_ACLKRCTL_REG, ACLKRE); 445 + mcasp_clr_bits(base + DAVINCI_MCASP_ACLKRCTL_REG, ACLKRE); 445 446 mcasp_set_bits(base + DAVINCI_MCASP_RXFMCTL_REG, AFSRE); 446 447 447 - mcasp_set_bits(base + DAVINCI_MCASP_PDIR_REG, (0x2d << 26)); 448 + mcasp_clr_bits(base + DAVINCI_MCASP_PDIR_REG, 449 + ACLKX | ACLKR); 450 + mcasp_set_bits(base + DAVINCI_MCASP_PDIR_REG, 451 + AFSX | AFSR); 448 452 break; 449 453 case SND_SOC_DAIFMT_CBM_CFM: 450 454 /* codec is clock and frame master */ ··· 458 454 mcasp_clr_bits(base + DAVINCI_MCASP_ACLKRCTL_REG, ACLKRE); 459 455 mcasp_clr_bits(base + DAVINCI_MCASP_RXFMCTL_REG, AFSRE); 460 456 461 - mcasp_clr_bits(base + DAVINCI_MCASP_PDIR_REG, (0x3f << 26)); 457 + mcasp_clr_bits(base + DAVINCI_MCASP_PDIR_REG, 458 + ACLKX | AHCLKX | AFSX | ACLKR | AHCLKR | AFSR); 462 459 break; 463 460 464 461 default: ··· 649 644 mcasp_set_reg(dev->base + DAVINCI_MCASP_TXTDM_REG, mask); 650 645 mcasp_set_bits(dev->base + DAVINCI_MCASP_TXFMT_REG, TXORD); 651 646 652 - if ((dev->tdm_slots >= 2) || (dev->tdm_slots <= 32)) 647 + if ((dev->tdm_slots >= 2) && (dev->tdm_slots <= 32)) 653 648 mcasp_mod_bits(dev->base + DAVINCI_MCASP_TXFMCTL_REG, 654 649 FSXMOD(dev->tdm_slots), FSXMOD(0x1FF)); 655 650 else ··· 665 660 AHCLKRE); 666 661 mcasp_set_reg(dev->base + DAVINCI_MCASP_RXTDM_REG, mask); 667 662 668 - if ((dev->tdm_slots >= 2) || (dev->tdm_slots <= 32)) 663 + if ((dev->tdm_slots >= 2) && (dev->tdm_slots <= 32)) 669 664 mcasp_mod_bits(dev->base + DAVINCI_MCASP_RXFMCTL_REG, 670 665 FSRMOD(dev->tdm_slots), FSRMOD(0x1FF)); 671 666 else
+1 -1
sound/soc/jz4740/jz4740-i2s.c
··· 133 133 struct jz4740_i2s *i2s = snd_soc_dai_get_drvdata(dai); 134 134 uint32_t conf; 135 135 136 - if (!dai->active) 136 + if (dai->active) 137 137 return; 138 138 139 139 conf = jz4740_i2s_read(i2s, JZ_REG_AIC_CONF);
+12 -4
sound/soc/mid-x86/sst_platform.c
··· 116 116 static inline void sst_set_stream_status(struct sst_runtime_stream *stream, 117 117 int state) 118 118 { 119 - spin_lock(&stream->status_lock); 119 + unsigned long flags; 120 + spin_lock_irqsave(&stream->status_lock, flags); 120 121 stream->stream_status = state; 121 - spin_unlock(&stream->status_lock); 122 + spin_unlock_irqrestore(&stream->status_lock, flags); 122 123 } 123 124 124 125 static inline int sst_get_stream_status(struct sst_runtime_stream *stream) 125 126 { 126 127 int state; 128 + unsigned long flags; 127 129 128 - spin_lock(&stream->status_lock); 130 + spin_lock_irqsave(&stream->status_lock, flags); 129 131 state = stream->stream_status; 130 - spin_unlock(&stream->status_lock); 132 + spin_unlock_irqrestore(&stream->status_lock, flags); 131 133 return state; 132 134 } 133 135 ··· 376 374 return 0; 377 375 } 378 376 377 + static int sst_platform_pcm_hw_free(struct snd_pcm_substream *substream) 378 + { 379 + return snd_pcm_lib_free_pages(substream); 380 + } 381 + 379 382 static struct snd_pcm_ops sst_platform_ops = { 380 383 .open = sst_platform_open, 381 384 .close = sst_platform_close, ··· 389 382 .trigger = sst_platform_pcm_trigger, 390 383 .pointer = sst_platform_pcm_pointer, 391 384 .hw_params = sst_platform_pcm_hw_params, 385 + .hw_free = sst_platform_pcm_hw_free, 392 386 }; 393 387 394 388 static void sst_pcm_free(struct snd_pcm *pcm)
+4 -4
sound/soc/samsung/goni_wm8994.c
··· 236 236 .name = "WM8994", 237 237 .stream_name = "WM8994 HiFi", 238 238 .cpu_dai_name = "samsung-i2s.0", 239 - .codec_dai_name = "wm8994-hifi", 239 + .codec_dai_name = "wm8994-aif1", 240 240 .platform_name = "samsung-audio", 241 - .codec_name = "wm8994-codec.0-0x1a", 241 + .codec_name = "wm8994-codec.0-001a", 242 242 .init = goni_wm8994_init, 243 243 .ops = &goni_hifi_ops, 244 244 }, { 245 245 .name = "WM8994 Voice", 246 246 .stream_name = "Voice", 247 247 .cpu_dai_name = "goni-voice-dai", 248 - .codec_dai_name = "wm8994-voice", 248 + .codec_dai_name = "wm8994-aif2", 249 249 .platform_name = "samsung-audio", 250 - .codec_name = "wm8994-codec.0-0x1a", 250 + .codec_name = "wm8994-codec.0-001a", 251 251 .ops = &goni_voice_ops, 252 252 }, 253 253 };
+2 -2
sound/soc/samsung/pcm.c
··· 350 350 ctl = readl(regs + S3C_PCM_CTL); 351 351 352 352 switch (fmt & SND_SOC_DAIFMT_INV_MASK) { 353 - case SND_SOC_DAIFMT_NB_NF: 354 - /* Nothing to do, NB_NF by default */ 353 + case SND_SOC_DAIFMT_IB_NF: 354 + /* Nothing to do, IB_NF by default */ 355 355 break; 356 356 default: 357 357 dev_err(pcm->dev, "Unsupported clock inversion!\n");
+16 -6
sound/soc/sh/fsi.c
··· 1200 1200 master->fsib.master = master; 1201 1201 1202 1202 pm_runtime_enable(&pdev->dev); 1203 - pm_runtime_resume(&pdev->dev); 1204 1203 dev_set_drvdata(&pdev->dev, master); 1205 1204 1205 + pm_runtime_get_sync(&pdev->dev); 1206 1206 fsi_soft_all_reset(master); 1207 + pm_runtime_put_sync(&pdev->dev); 1207 1208 1208 1209 ret = request_irq(irq, &fsi_interrupt, IRQF_DISABLED, 1209 1210 id_entry->name, master); ··· 1219 1218 goto exit_free_irq; 1220 1219 } 1221 1220 1222 - return snd_soc_register_dais(&pdev->dev, fsi_soc_dai, ARRAY_SIZE(fsi_soc_dai)); 1221 + ret = snd_soc_register_dais(&pdev->dev, fsi_soc_dai, 1222 + ARRAY_SIZE(fsi_soc_dai)); 1223 + if (ret < 0) { 1224 + dev_err(&pdev->dev, "cannot snd dai register\n"); 1225 + goto exit_snd_soc; 1226 + } 1223 1227 1228 + return ret; 1229 + 1230 + exit_snd_soc: 1231 + snd_soc_unregister_platform(&pdev->dev); 1224 1232 exit_free_irq: 1225 1233 free_irq(irq, master); 1226 1234 exit_iounmap: ··· 1248 1238 1249 1239 master = dev_get_drvdata(&pdev->dev); 1250 1240 1251 - snd_soc_unregister_dais(&pdev->dev, ARRAY_SIZE(fsi_soc_dai)); 1252 - snd_soc_unregister_platform(&pdev->dev); 1253 - 1241 + free_irq(master->irq, master); 1254 1242 pm_runtime_disable(&pdev->dev); 1255 1243 1256 - free_irq(master->irq, master); 1244 + snd_soc_unregister_dais(&pdev->dev, ARRAY_SIZE(fsi_soc_dai)); 1245 + snd_soc_unregister_platform(&pdev->dev); 1257 1246 1258 1247 iounmap(master->base); 1259 1248 kfree(master); ··· 1330 1321 MODULE_LICENSE("GPL"); 1331 1322 MODULE_DESCRIPTION("SuperH onchip FSI audio driver"); 1332 1323 MODULE_AUTHOR("Kuninori Morimoto <morimoto.kuninori@renesas.com>"); 1324 + MODULE_ALIAS("platform:fsi-pcm-audio");
+6 -1
sound/soc/soc-core.c
··· 629 629 runtime->hw.rates |= codec_dai_drv->capture.rates; 630 630 } 631 631 632 + ret = -EINVAL; 632 633 snd_pcm_limit_hw_rates(runtime); 633 634 if (!runtime->hw.rates) { 634 635 printk(KERN_ERR "asoc: %s <-> %s No matching rates\n", ··· 641 640 codec_dai->name, cpu_dai->name); 642 641 goto config_err; 643 642 } 644 - if (!runtime->hw.channels_min || !runtime->hw.channels_max) { 643 + if (!runtime->hw.channels_min || !runtime->hw.channels_max || 644 + runtime->hw.channels_min > runtime->hw.channels_max) { 645 645 printk(KERN_ERR "asoc: %s <-> %s No matching channels\n", 646 646 codec_dai->name, cpu_dai->name); 647 647 goto config_err; ··· 2062 2060 .resume = snd_soc_resume, 2063 2061 .poweroff = snd_soc_poweroff, 2064 2062 }; 2063 + EXPORT_SYMBOL_GPL(snd_soc_pm_ops); 2065 2064 2066 2065 /* ASoC platform driver */ 2067 2066 static struct platform_driver soc_driver = { ··· 3290 3287 3291 3288 if (!card->name || !card->dev) 3292 3289 return -EINVAL; 3290 + 3291 + dev_set_drvdata(card->dev, card); 3293 3292 3294 3293 snd_soc_initialize_card_lists(card); 3295 3294
+1
sound/soc/tegra/harmony.c
··· 370 370 .driver = { 371 371 .name = DRV_NAME, 372 372 .owner = THIS_MODULE, 373 + .pm = &snd_soc_pm_ops, 373 374 }, 374 375 .probe = tegra_snd_harmony_probe, 375 376 .remove = __devexit_p(tegra_snd_harmony_remove),
+3 -1
sound/usb/format.c
··· 176 176 if (!rate) 177 177 continue; 178 178 /* C-Media CM6501 mislabels its 96 kHz altsetting */ 179 + /* Terratec Aureon 7.1 USB C-Media 6206, too */ 179 180 if (rate == 48000 && nr_rates == 1 && 180 181 (chip->usb_id == USB_ID(0x0d8c, 0x0201) || 181 - chip->usb_id == USB_ID(0x0d8c, 0x0102)) && 182 + chip->usb_id == USB_ID(0x0d8c, 0x0102) || 183 + chip->usb_id == USB_ID(0x0ccd, 0x00b1)) && 182 184 fp->altsetting == 5 && fp->maxpacksize == 392) 183 185 rate = 96000; 184 186 /* Creative VF0470 Live Cam reports 16 kHz instead of 8kHz */
+1
sound/usb/quirks.c
··· 533 533 534 534 case USB_ID(0x0d8c, 0x0102): 535 535 /* C-Media CM6206 / CM106-Like Sound Device */ 536 + case USB_ID(0x0ccd, 0x00b1): /* Terratec Aureon 7.1 USB */ 536 537 return snd_usb_cm6206_boot_quirk(dev); 537 538 538 539 case USB_ID(0x133e, 0x0815):
+10 -6
tools/perf/Makefile
··· 35 35 -e s/ppc.*/powerpc/ -e s/mips.*/mips/ \ 36 36 -e s/sh[234].*/sh/ ) 37 37 38 + CC = $(CROSS_COMPILE)gcc 39 + AR = $(CROSS_COMPILE)ar 40 + 38 41 # Additional ARCH settings for x86 39 42 ifeq ($(ARCH),i386) 40 43 ARCH := x86 41 44 endif 42 45 ifeq ($(ARCH),x86_64) 43 - RAW_ARCH := x86_64 44 - ARCH := x86 45 - ARCH_CFLAGS := -DARCH_X86_64 46 - ARCH_INCLUDE = ../../arch/x86/lib/memcpy_64.S 46 + ARCH := x86 47 + IS_X86_64 := $(shell echo __x86_64__ | ${CC} -E -xc - | tail -n 1) 48 + ifeq (${IS_X86_64}, 1) 49 + RAW_ARCH := x86_64 50 + ARCH_CFLAGS := -DARCH_X86_64 51 + ARCH_INCLUDE = ../../arch/x86/lib/memcpy_64.S 52 + endif 47 53 endif 48 54 49 55 # ··· 125 119 126 120 export prefix bindir sharedir sysconfdir 127 121 128 - CC = $(CROSS_COMPILE)gcc 129 - AR = $(CROSS_COMPILE)ar 130 122 RM = rm -f 131 123 MKDIR = mkdir 132 124 FIND = find
+6 -3
tools/perf/builtin-record.c
··· 163 163 struct perf_event_attr *attr = &evsel->attr; 164 164 int track = !evsel->idx; /* only the first counter needs these */ 165 165 166 + attr->inherit = !no_inherit; 166 167 attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED | 167 168 PERF_FORMAT_TOTAL_TIME_RUNNING | 168 169 PERF_FORMAT_ID; ··· 252 251 { 253 252 struct perf_evsel *pos; 254 253 254 + if (evlist->cpus->map[0] < 0) 255 + no_inherit = true; 256 + 255 257 list_for_each_entry(pos, &evlist->entries, node) { 256 258 struct perf_event_attr *attr = &pos->attr; 257 259 /* ··· 275 271 retry_sample_id: 276 272 attr->sample_id_all = sample_id_all_avail ? 1 : 0; 277 273 try_again: 278 - if (perf_evsel__open(pos, evlist->cpus, evlist->threads, group, 279 - !no_inherit) < 0) { 274 + if (perf_evsel__open(pos, evlist->cpus, evlist->threads, group) < 0) { 280 275 int err = errno; 281 276 282 277 if (err == EPERM || err == EACCES) { ··· 427 424 { 428 425 int i; 429 426 430 - for (i = 0; i < evsel_list->cpus->nr; i++) { 427 + for (i = 0; i < evsel_list->nr_mmaps; i++) { 431 428 if (evsel_list->mmap[i].base) 432 429 mmap_read(&evsel_list->mmap[i]); 433 430 }
+5 -4
tools/perf/builtin-stat.c
··· 167 167 attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED | 168 168 PERF_FORMAT_TOTAL_TIME_RUNNING; 169 169 170 - if (system_wide) 171 - return perf_evsel__open_per_cpu(evsel, evsel_list->cpus, false, false); 172 - 173 170 attr->inherit = !no_inherit; 171 + 172 + if (system_wide) 173 + return perf_evsel__open_per_cpu(evsel, evsel_list->cpus, false); 174 + 174 175 if (target_pid == -1 && target_tid == -1) { 175 176 attr->disabled = 1; 176 177 attr->enable_on_exec = 1; 177 178 } 178 179 179 - return perf_evsel__open_per_thread(evsel, evsel_list->threads, false, false); 180 + return perf_evsel__open_per_thread(evsel, evsel_list->threads, false); 180 181 } 181 182 182 183 /*
+6 -6
tools/perf/builtin-test.c
··· 290 290 goto out_thread_map_delete; 291 291 } 292 292 293 - if (perf_evsel__open_per_thread(evsel, threads, false, false) < 0) { 293 + if (perf_evsel__open_per_thread(evsel, threads, false) < 0) { 294 294 pr_debug("failed to open counter: %s, " 295 295 "tweak /proc/sys/kernel/perf_event_paranoid?\n", 296 296 strerror(errno)); ··· 303 303 } 304 304 305 305 if (perf_evsel__read_on_cpu(evsel, 0, 0) < 0) { 306 - pr_debug("perf_evsel__open_read_on_cpu\n"); 306 + pr_debug("perf_evsel__read_on_cpu\n"); 307 307 goto out_close_fd; 308 308 } 309 309 ··· 365 365 goto out_thread_map_delete; 366 366 } 367 367 368 - if (perf_evsel__open(evsel, cpus, threads, false, false) < 0) { 368 + if (perf_evsel__open(evsel, cpus, threads, false) < 0) { 369 369 pr_debug("failed to open counter: %s, " 370 370 "tweak /proc/sys/kernel/perf_event_paranoid?\n", 371 371 strerror(errno)); ··· 418 418 continue; 419 419 420 420 if (perf_evsel__read_on_cpu(evsel, cpu, 0) < 0) { 421 - pr_debug("perf_evsel__open_read_on_cpu\n"); 421 + pr_debug("perf_evsel__read_on_cpu\n"); 422 422 err = -1; 423 423 break; 424 424 } ··· 529 529 530 530 perf_evlist__add(evlist, evsels[i]); 531 531 532 - if (perf_evsel__open(evsels[i], cpus, threads, false, false) < 0) { 532 + if (perf_evsel__open(evsels[i], cpus, threads, false) < 0) { 533 533 pr_debug("failed to open counter: %s, " 534 534 "tweak /proc/sys/kernel/perf_event_paranoid?\n", 535 535 strerror(errno)); ··· 549 549 ++foo; 550 550 } 551 551 552 - while ((event = perf_evlist__read_on_cpu(evlist, 0)) != NULL) { 552 + while ((event = perf_evlist__mmap_read(evlist, 0)) != NULL) { 553 553 struct perf_sample sample; 554 554 555 555 if (event->header.type != PERF_RECORD_SAMPLE) {
+6 -5
tools/perf/builtin-top.c
··· 801 801 } 802 802 } 803 803 804 - static void perf_session__mmap_read_cpu(struct perf_session *self, int cpu) 804 + static void perf_session__mmap_read_idx(struct perf_session *self, int idx) 805 805 { 806 806 struct perf_sample sample; 807 807 union perf_event *event; 808 808 809 - while ((event = perf_evlist__read_on_cpu(top.evlist, cpu)) != NULL) { 809 + while ((event = perf_evlist__mmap_read(top.evlist, idx)) != NULL) { 810 810 perf_session__parse_sample(self, event, &sample); 811 811 812 812 if (event->header.type == PERF_RECORD_SAMPLE) ··· 820 820 { 821 821 int i; 822 822 823 - for (i = 0; i < top.evlist->cpus->nr; i++) 824 - perf_session__mmap_read_cpu(self, i); 823 + for (i = 0; i < top.evlist->nr_mmaps; i++) 824 + perf_session__mmap_read_idx(self, i); 825 825 } 826 826 827 827 static void start_counters(struct perf_evlist *evlist) ··· 845 845 } 846 846 847 847 attr->mmap = 1; 848 + attr->inherit = inherit; 848 849 try_again: 849 850 if (perf_evsel__open(counter, top.evlist->cpus, 850 - top.evlist->threads, group, inherit) < 0) { 851 + top.evlist->threads, group) < 0) { 851 852 int err = errno; 852 853 853 854 if (err == EPERM || err == EACCES) {
+115 -48
tools/perf/util/evlist.c
··· 12 12 #include "evlist.h" 13 13 #include "evsel.h" 14 14 #include "util.h" 15 + #include "debug.h" 15 16 16 17 #include <sys/mman.h> 17 18 ··· 166 165 return NULL; 167 166 } 168 167 169 - union perf_event *perf_evlist__read_on_cpu(struct perf_evlist *evlist, int cpu) 168 + union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx) 170 169 { 171 170 /* XXX Move this to perf.c, making it generally available */ 172 171 unsigned int page_size = sysconf(_SC_PAGE_SIZE); 173 - struct perf_mmap *md = &evlist->mmap[cpu]; 172 + struct perf_mmap *md = &evlist->mmap[idx]; 174 173 unsigned int head = perf_mmap__read_head(md); 175 174 unsigned int old = md->prev; 176 175 unsigned char *data = md->base + page_size; ··· 235 234 236 235 void perf_evlist__munmap(struct perf_evlist *evlist) 237 236 { 238 - int cpu; 237 + int i; 239 238 239 + for (i = 0; i < evlist->nr_mmaps; i++) { 240 + if (evlist->mmap[i].base != NULL) { 241 + munmap(evlist->mmap[i].base, evlist->mmap_len); 242 + evlist->mmap[i].base = NULL; 243 + } 244 + } 245 + 246 + free(evlist->mmap); 247 + evlist->mmap = NULL; 248 + } 249 + 250 + int perf_evlist__alloc_mmap(struct perf_evlist *evlist) 251 + { 252 + evlist->nr_mmaps = evlist->cpus->nr; 253 + if (evlist->cpus->map[0] == -1) 254 + evlist->nr_mmaps = evlist->threads->nr; 255 + evlist->mmap = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap)); 256 + return evlist->mmap != NULL ? 0 : -ENOMEM; 257 + } 258 + 259 + static int __perf_evlist__mmap(struct perf_evlist *evlist, struct perf_evsel *evsel, 260 + int idx, int prot, int mask, int fd) 261 + { 262 + evlist->mmap[idx].prev = 0; 263 + evlist->mmap[idx].mask = mask; 264 + evlist->mmap[idx].base = mmap(NULL, evlist->mmap_len, prot, 265 + MAP_SHARED, fd, 0); 266 + if (evlist->mmap[idx].base == MAP_FAILED) { 267 + if (evlist->cpus->map[idx] == -1 && evsel->attr.inherit) 268 + ui__warning("Inherit is not allowed on per-task " 269 + "events using mmap.\n"); 270 + return -1; 271 + } 272 + 273 + perf_evlist__add_pollfd(evlist, fd); 274 + return 0; 275 + } 276 + 277 + static int perf_evlist__mmap_per_cpu(struct perf_evlist *evlist, int prot, int mask) 278 + { 279 + struct perf_evsel *evsel; 280 + int cpu, thread; 281 + 282 + for (cpu = 0; cpu < evlist->cpus->nr; cpu++) { 283 + int output = -1; 284 + 285 + for (thread = 0; thread < evlist->threads->nr; thread++) { 286 + list_for_each_entry(evsel, &evlist->entries, node) { 287 + int fd = FD(evsel, cpu, thread); 288 + 289 + if (output == -1) { 290 + output = fd; 291 + if (__perf_evlist__mmap(evlist, evsel, cpu, 292 + prot, mask, output) < 0) 293 + goto out_unmap; 294 + } else { 295 + if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, output) != 0) 296 + goto out_unmap; 297 + } 298 + 299 + if ((evsel->attr.read_format & PERF_FORMAT_ID) && 300 + perf_evlist__id_add_fd(evlist, evsel, cpu, thread, fd) < 0) 301 + goto out_unmap; 302 + } 303 + } 304 + } 305 + 306 + return 0; 307 + 308 + out_unmap: 240 309 for (cpu = 0; cpu < evlist->cpus->nr; cpu++) { 241 310 if (evlist->mmap[cpu].base != NULL) { 242 311 munmap(evlist->mmap[cpu].base, evlist->mmap_len); 243 312 evlist->mmap[cpu].base = NULL; 244 313 } 245 314 } 315 + return -1; 246 316 } 247 317 248 - int perf_evlist__alloc_mmap(struct perf_evlist *evlist) 318 + static int perf_evlist__mmap_per_thread(struct perf_evlist *evlist, int prot, int mask) 249 319 { 250 - evlist->mmap = zalloc(evlist->cpus->nr * sizeof(struct perf_mmap)); 251 - return evlist->mmap != NULL ? 0 : -ENOMEM; 252 - } 320 + struct perf_evsel *evsel; 321 + int thread; 253 322 254 - static int __perf_evlist__mmap(struct perf_evlist *evlist, int cpu, int prot, 255 - int mask, int fd) 256 - { 257 - evlist->mmap[cpu].prev = 0; 258 - evlist->mmap[cpu].mask = mask; 259 - evlist->mmap[cpu].base = mmap(NULL, evlist->mmap_len, prot, 260 - MAP_SHARED, fd, 0); 261 - if (evlist->mmap[cpu].base == MAP_FAILED) 262 - return -1; 323 + for (thread = 0; thread < evlist->threads->nr; thread++) { 324 + int output = -1; 263 325 264 - perf_evlist__add_pollfd(evlist, fd); 326 + list_for_each_entry(evsel, &evlist->entries, node) { 327 + int fd = FD(evsel, 0, thread); 328 + 329 + if (output == -1) { 330 + output = fd; 331 + if (__perf_evlist__mmap(evlist, evsel, thread, 332 + prot, mask, output) < 0) 333 + goto out_unmap; 334 + } else { 335 + if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, output) != 0) 336 + goto out_unmap; 337 + } 338 + 339 + if ((evsel->attr.read_format & PERF_FORMAT_ID) && 340 + perf_evlist__id_add_fd(evlist, evsel, 0, thread, fd) < 0) 341 + goto out_unmap; 342 + } 343 + } 344 + 265 345 return 0; 346 + 347 + out_unmap: 348 + for (thread = 0; thread < evlist->threads->nr; thread++) { 349 + if (evlist->mmap[thread].base != NULL) { 350 + munmap(evlist->mmap[thread].base, evlist->mmap_len); 351 + evlist->mmap[thread].base = NULL; 352 + } 353 + } 354 + return -1; 266 355 } 267 356 268 357 /** perf_evlist__mmap - Create per cpu maps to receive events ··· 373 282 int perf_evlist__mmap(struct perf_evlist *evlist, int pages, bool overwrite) 374 283 { 375 284 unsigned int page_size = sysconf(_SC_PAGE_SIZE); 376 - int mask = pages * page_size - 1, cpu; 377 - struct perf_evsel *first_evsel, *evsel; 285 + int mask = pages * page_size - 1; 286 + struct perf_evsel *evsel; 378 287 const struct cpu_map *cpus = evlist->cpus; 379 288 const struct thread_map *threads = evlist->threads; 380 - int thread, prot = PROT_READ | (overwrite ? 0 : PROT_WRITE); 289 + int prot = PROT_READ | (overwrite ? 0 : PROT_WRITE); 381 290 382 291 if (evlist->mmap == NULL && perf_evlist__alloc_mmap(evlist) < 0) 383 292 return -ENOMEM; ··· 387 296 388 297 evlist->overwrite = overwrite; 389 298 evlist->mmap_len = (pages + 1) * page_size; 390 - first_evsel = list_entry(evlist->entries.next, struct perf_evsel, node); 391 299 392 300 list_for_each_entry(evsel, &evlist->entries, node) { 393 301 if ((evsel->attr.read_format & PERF_FORMAT_ID) && 394 302 evsel->sample_id == NULL && 395 303 perf_evsel__alloc_id(evsel, cpus->nr, threads->nr) < 0) 396 304 return -ENOMEM; 397 - 398 - for (cpu = 0; cpu < cpus->nr; cpu++) { 399 - for (thread = 0; thread < threads->nr; thread++) { 400 - int fd = FD(evsel, cpu, thread); 401 - 402 - if (evsel->idx || thread) { 403 - if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, 404 - FD(first_evsel, cpu, 0)) != 0) 405 - goto out_unmap; 406 - } else if (__perf_evlist__mmap(evlist, cpu, prot, mask, fd) < 0) 407 - goto out_unmap; 408 - 409 - if ((evsel->attr.read_format & PERF_FORMAT_ID) && 410 - perf_evlist__id_add_fd(evlist, evsel, cpu, thread, fd) < 0) 411 - goto out_unmap; 412 - } 413 - } 414 305 } 415 306 416 - return 0; 307 + if (evlist->cpus->map[0] == -1) 308 + return perf_evlist__mmap_per_thread(evlist, prot, mask); 417 309 418 - out_unmap: 419 - for (cpu = 0; cpu < cpus->nr; cpu++) { 420 - if (evlist->mmap[cpu].base != NULL) { 421 - munmap(evlist->mmap[cpu].base, evlist->mmap_len); 422 - evlist->mmap[cpu].base = NULL; 423 - } 424 - } 425 - return -1; 310 + return perf_evlist__mmap_per_cpu(evlist, prot, mask); 426 311 } 427 312 428 313 int perf_evlist__create_maps(struct perf_evlist *evlist, pid_t target_pid, ··· 409 342 if (evlist->threads == NULL) 410 343 return -1; 411 344 412 - if (target_tid != -1) 345 + if (cpu_list == NULL && target_tid != -1) 413 346 evlist->cpus = cpu_map__dummy_new(); 414 347 else 415 348 evlist->cpus = cpu_map__new(cpu_list);
+2 -1
tools/perf/util/evlist.h
··· 17 17 struct hlist_head heads[PERF_EVLIST__HLIST_SIZE]; 18 18 int nr_entries; 19 19 int nr_fds; 20 + int nr_mmaps; 20 21 int mmap_len; 21 22 bool overwrite; 22 23 union perf_event event_copy; ··· 47 46 48 47 struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id); 49 48 50 - union perf_event *perf_evlist__read_on_cpu(struct perf_evlist *self, int cpu); 49 + union perf_event *perf_evlist__mmap_read(struct perf_evlist *self, int idx); 51 50 52 51 int perf_evlist__alloc_mmap(struct perf_evlist *evlist); 53 52 int perf_evlist__mmap(struct perf_evlist *evlist, int pages, bool overwrite);
+7 -20
tools/perf/util/evsel.c
··· 175 175 } 176 176 177 177 static int __perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus, 178 - struct thread_map *threads, bool group, bool inherit) 178 + struct thread_map *threads, bool group) 179 179 { 180 180 int cpu, thread; 181 181 unsigned long flags = 0; ··· 192 192 193 193 for (cpu = 0; cpu < cpus->nr; cpu++) { 194 194 int group_fd = -1; 195 - /* 196 - * Don't allow mmap() of inherited per-task counters. This 197 - * would create a performance issue due to all children writing 198 - * to the same buffer. 199 - * 200 - * FIXME: 201 - * Proper fix is not to pass 'inherit' to perf_evsel__open*, 202 - * but a 'flags' parameter, with 'group' folded there as well, 203 - * then introduce a PERF_O_{MMAP,GROUP,INHERIT} enum, and if 204 - * O_MMAP is set, emit a warning if cpu < 0 and O_INHERIT is 205 - * set. Lets go for the minimal fix first tho. 206 - */ 207 - evsel->attr.inherit = (cpus->map[cpu] >= 0) && inherit; 208 195 209 196 for (thread = 0; thread < threads->nr; thread++) { 210 197 ··· 240 253 }; 241 254 242 255 int perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus, 243 - struct thread_map *threads, bool group, bool inherit) 256 + struct thread_map *threads, bool group) 244 257 { 245 258 if (cpus == NULL) { 246 259 /* Work around old compiler warnings about strict aliasing */ ··· 250 263 if (threads == NULL) 251 264 threads = &empty_thread_map.map; 252 265 253 - return __perf_evsel__open(evsel, cpus, threads, group, inherit); 266 + return __perf_evsel__open(evsel, cpus, threads, group); 254 267 } 255 268 256 269 int perf_evsel__open_per_cpu(struct perf_evsel *evsel, 257 - struct cpu_map *cpus, bool group, bool inherit) 270 + struct cpu_map *cpus, bool group) 258 271 { 259 - return __perf_evsel__open(evsel, cpus, &empty_thread_map.map, group, inherit); 272 + return __perf_evsel__open(evsel, cpus, &empty_thread_map.map, group); 260 273 } 261 274 262 275 int perf_evsel__open_per_thread(struct perf_evsel *evsel, 263 - struct thread_map *threads, bool group, bool inherit) 276 + struct thread_map *threads, bool group) 264 277 { 265 - return __perf_evsel__open(evsel, &empty_cpu_map.map, threads, group, inherit); 278 + return __perf_evsel__open(evsel, &empty_cpu_map.map, threads, group); 266 279 } 267 280 268 281 static int perf_event__parse_id_sample(const union perf_event *event, u64 type,
+3 -3
tools/perf/util/evsel.h
··· 81 81 void perf_evsel__close_fd(struct perf_evsel *evsel, int ncpus, int nthreads); 82 82 83 83 int perf_evsel__open_per_cpu(struct perf_evsel *evsel, 84 - struct cpu_map *cpus, bool group, bool inherit); 84 + struct cpu_map *cpus, bool group); 85 85 int perf_evsel__open_per_thread(struct perf_evsel *evsel, 86 - struct thread_map *threads, bool group, bool inherit); 86 + struct thread_map *threads, bool group); 87 87 int perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus, 88 - struct thread_map *threads, bool group, bool inherit); 88 + struct thread_map *threads, bool group); 89 89 90 90 #define perf_evsel__match(evsel, t, c) \ 91 91 (evsel->attr.type == PERF_TYPE_##t && \
+6 -5
tools/perf/util/python.c
··· 498 498 struct cpu_map *cpus = NULL; 499 499 struct thread_map *threads = NULL; 500 500 PyObject *pcpus = NULL, *pthreads = NULL; 501 - int group = 0, overwrite = 0; 502 - static char *kwlist[] = {"cpus", "threads", "group", "overwrite", NULL, NULL}; 501 + int group = 0, inherit = 0; 502 + static char *kwlist[] = {"cpus", "threads", "group", "inherit", NULL, NULL}; 503 503 504 504 if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|OOii", kwlist, 505 - &pcpus, &pthreads, &group, &overwrite)) 505 + &pcpus, &pthreads, &group, &inherit)) 506 506 return NULL; 507 507 508 508 if (pthreads != NULL) ··· 511 511 if (pcpus != NULL) 512 512 cpus = ((struct pyrf_cpu_map *)pcpus)->cpus; 513 513 514 - if (perf_evsel__open(evsel, cpus, threads, group, overwrite) < 0) { 514 + evsel->attr.inherit = inherit; 515 + if (perf_evsel__open(evsel, cpus, threads, group) < 0) { 515 516 PyErr_SetFromErrno(PyExc_OSError); 516 517 return NULL; 517 518 } ··· 680 679 &cpu, &sample_id_all)) 681 680 return NULL; 682 681 683 - event = perf_evlist__read_on_cpu(evlist, cpu); 682 + event = perf_evlist__mmap_read(evlist, cpu); 684 683 if (event != NULL) { 685 684 struct perf_evsel *first; 686 685 PyObject *pyevent = pyrf_event__new(event);
+4 -2
tools/perf/util/ui/browsers/annotate.c
··· 256 256 int refresh) 257 257 { 258 258 struct objdump_line *pos, *n; 259 - struct annotation *notes = symbol__annotation(sym); 259 + struct annotation *notes; 260 260 struct annotate_browser browser = { 261 261 .b = { 262 - .entries = &notes->src->source, 263 262 .refresh = ui_browser__list_head_refresh, 264 263 .seek = ui_browser__list_head_seek, 265 264 .write = annotate_browser__write, ··· 280 281 281 282 ui_helpline__push("Press <- or ESC to exit"); 282 283 284 + notes = symbol__annotation(sym); 285 + 283 286 list_for_each_entry(pos, &notes->src->source, node) { 284 287 struct objdump_line_rb_node *rbpos; 285 288 size_t line_len = strlen(pos->line); ··· 292 291 rbpos->idx = browser.b.nr_entries++; 293 292 } 294 293 294 + browser.b.entries = &notes->src->source, 295 295 browser.b.width += 18; /* Percentage */ 296 296 ret = annotate_browser__run(&browser, evidx, refresh); 297 297 list_for_each_entry_safe(pos, n, &notes->src->source, node) {
+1 -1
tools/perf/util/ui/browsers/hists.c
··· 851 851 goto out_free_stack; 852 852 case 'a': 853 853 if (browser->selection == NULL || 854 - browser->selection->map == NULL || 854 + browser->selection->sym == NULL || 855 855 browser->selection->map->dso->annotate_warned) 856 856 continue; 857 857 goto do_annotate;