Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branches 'ib-mfd-iio-input-5.1', 'ib-mfd-input-watchdog-5.1' and 'ib-mfd-platform-5.1' into ibs-for-mfd-merged

+10232 -4861
+42 -1
.clang-format
··· 72 72 - 'apei_estatus_for_each_section' 73 73 - 'ata_for_each_dev' 74 74 - 'ata_for_each_link' 75 + - '__ata_qc_for_each' 76 + - 'ata_qc_for_each' 77 + - 'ata_qc_for_each_raw' 78 + - 'ata_qc_for_each_with_internal' 75 79 - 'ax25_for_each' 76 80 - 'ax25_uid_for_each' 77 81 - 'bio_for_each_integrity_vec' ··· 89 85 - 'blk_queue_for_each_rl' 90 86 - 'bond_for_each_slave' 91 87 - 'bond_for_each_slave_rcu' 88 + - 'bpf_for_each_spilled_reg' 92 89 - 'btree_for_each_safe128' 93 90 - 'btree_for_each_safe32' 94 91 - 'btree_for_each_safe64' ··· 108 103 - 'drm_atomic_crtc_for_each_plane' 109 104 - 'drm_atomic_crtc_state_for_each_plane' 110 105 - 'drm_atomic_crtc_state_for_each_plane_state' 106 + - 'drm_atomic_for_each_plane_damage' 107 + - 'drm_connector_for_each_possible_encoder' 111 108 - 'drm_for_each_connector_iter' 112 109 - 'drm_for_each_crtc' 113 110 - 'drm_for_each_encoder' ··· 128 121 - 'for_each_bio' 129 122 - 'for_each_board_func_rsrc' 130 123 - 'for_each_bvec' 124 + - 'for_each_card_components' 125 + - 'for_each_card_links' 126 + - 'for_each_card_links_safe' 127 + - 'for_each_card_prelinks' 128 + - 'for_each_card_rtds' 129 + - 'for_each_card_rtds_safe' 130 + - 'for_each_cgroup_storage_type' 131 131 - 'for_each_child_of_node' 132 132 - 'for_each_clear_bit' 133 133 - 'for_each_clear_bit_from' 134 134 - 'for_each_cmsghdr' 135 135 - 'for_each_compatible_node' 136 + - 'for_each_component_dais' 137 + - 'for_each_component_dais_safe' 138 + - 'for_each_comp_order' 136 139 - 'for_each_console' 137 140 - 'for_each_cpu' 138 141 - 'for_each_cpu_and' ··· 150 133 - 'for_each_cpu_wrap' 151 134 - 'for_each_dev_addr' 152 135 - 'for_each_dma_cap_mask' 136 + - 'for_each_dpcm_be' 137 + - 'for_each_dpcm_be_rollback' 138 + - 'for_each_dpcm_be_safe' 139 + - 'for_each_dpcm_fe' 153 140 - 'for_each_drhd_unit' 154 141 - 'for_each_dss_dev' 155 142 - 'for_each_efi_memory_desc' ··· 170 149 - 'for_each_iommu' 171 150 - 'for_each_ip_tunnel_rcu' 172 151 - 'for_each_irq_nr' 152 + - 'for_each_link_codecs' 173 153 - 'for_each_lru' 174 154 - 'for_each_matching_node' 175 155 - 'for_each_matching_node_and_match' ··· 182 160 - 'for_each_mem_range_rev' 183 161 - 'for_each_migratetype_order' 184 162 - 'for_each_msi_entry' 163 + - 'for_each_msi_entry_safe' 185 164 - 'for_each_net' 186 165 - 'for_each_netdev' 187 166 - 'for_each_netdev_continue' ··· 206 183 - 'for_each_node_with_property' 207 184 - 'for_each_of_allnodes' 208 185 - 'for_each_of_allnodes_from' 186 + - 'for_each_of_cpu_node' 209 187 - 'for_each_of_pci_range' 210 188 - 'for_each_old_connector_in_state' 211 189 - 'for_each_old_crtc_in_state' 212 190 - 'for_each_oldnew_connector_in_state' 213 191 - 'for_each_oldnew_crtc_in_state' 214 192 - 'for_each_oldnew_plane_in_state' 193 + - 'for_each_oldnew_plane_in_state_reverse' 215 194 - 'for_each_oldnew_private_obj_in_state' 216 195 - 'for_each_old_plane_in_state' 217 196 - 'for_each_old_private_obj_in_state' ··· 231 206 - 'for_each_process' 232 207 - 'for_each_process_thread' 233 208 - 'for_each_property_of_node' 209 + - 'for_each_registered_fb' 234 210 - 'for_each_reserved_mem_region' 235 - - 'for_each_resv_unavail_range' 211 + - 'for_each_rtd_codec_dai' 212 + - 'for_each_rtd_codec_dai_rollback' 236 213 - 'for_each_rtdcom' 237 214 - 'for_each_rtdcom_safe' 238 215 - 'for_each_set_bit' 239 216 - 'for_each_set_bit_from' 240 217 - 'for_each_sg' 241 218 - 'for_each_sg_page' 219 + - 'for_each_sibling_event' 242 220 - '__for_each_thread' 243 221 - 'for_each_thread' 244 222 - 'for_each_zone' ··· 279 251 - 'hlist_nulls_for_each_entry_from' 280 252 - 'hlist_nulls_for_each_entry_rcu' 281 253 - 'hlist_nulls_for_each_entry_safe' 254 + - 'i3c_bus_for_each_i2cdev' 255 + - 'i3c_bus_for_each_i3cdev' 282 256 - 'ide_host_for_each_port' 283 257 - 'ide_port_for_each_dev' 284 258 - 'ide_port_for_each_present_dev' ··· 297 267 - 'kvm_for_each_memslot' 298 268 - 'kvm_for_each_vcpu' 299 269 - 'list_for_each' 270 + - 'list_for_each_codec' 271 + - 'list_for_each_codec_safe' 300 272 - 'list_for_each_entry' 301 273 - 'list_for_each_entry_continue' 302 274 - 'list_for_each_entry_continue_rcu' 303 275 - 'list_for_each_entry_continue_reverse' 304 276 - 'list_for_each_entry_from' 277 + - 'list_for_each_entry_from_rcu' 305 278 - 'list_for_each_entry_from_reverse' 306 279 - 'list_for_each_entry_lockless' 307 280 - 'list_for_each_entry_rcu' ··· 324 291 - 'media_device_for_each_intf' 325 292 - 'media_device_for_each_link' 326 293 - 'media_device_for_each_pad' 294 + - 'nanddev_io_for_each_page' 327 295 - 'netdev_for_each_lower_dev' 328 296 - 'netdev_for_each_lower_private' 329 297 - 'netdev_for_each_lower_private_rcu' ··· 391 357 - 'sk_nulls_for_each' 392 358 - 'sk_nulls_for_each_from' 393 359 - 'sk_nulls_for_each_rcu' 360 + - 'snd_array_for_each' 394 361 - 'snd_pcm_group_for_each_entry' 395 362 - 'snd_soc_dapm_widget_for_each_path' 396 363 - 'snd_soc_dapm_widget_for_each_path_safe' 397 364 - 'snd_soc_dapm_widget_for_each_sink_path' 398 365 - 'snd_soc_dapm_widget_for_each_source_path' 399 366 - 'tb_property_for_each' 367 + - 'tcf_exts_for_each_action' 400 368 - 'udp_portaddr_for_each_entry' 401 369 - 'udp_portaddr_for_each_entry_rcu' 402 370 - 'usb_hub_for_each_child' ··· 407 371 - 'v4l2_m2m_for_each_dst_buf_safe' 408 372 - 'v4l2_m2m_for_each_src_buf' 409 373 - 'v4l2_m2m_for_each_src_buf_safe' 374 + - 'virtio_device_for_each_vq' 375 + - 'xa_for_each' 376 + - 'xas_for_each' 377 + - 'xas_for_each_conflict' 378 + - 'xas_for_each_marked' 410 379 - 'zorro_for_each_dev' 411 380 412 381 #IncludeBlocks: Preserve # Unknown to clang-format-5.0
+9
Documentation/ABI/testing/sysfs-block
··· 279 279 size in 512B sectors of the zones of the device, with 280 280 the eventual exception of the last zone of the device 281 281 which may be smaller. 282 + 283 + What: /sys/block/<disk>/queue/io_timeout 284 + Date: November 2018 285 + Contact: Weiping Zhang <zhangweiping@didiglobal.com> 286 + Description: 287 + io_timeout is the request timeout in milliseconds. If a request 288 + does not complete in this time then the block driver timeout 289 + handler is invoked. That timeout handler can decide to retry 290 + the request, to fail it or to start a device recovery strategy.
+9 -2
Documentation/ABI/testing/sysfs-block-zram
··· 122 122 statistics (bd_count, bd_reads, bd_writes) in a format 123 123 similar to block layer statistics file format. 124 124 125 + What: /sys/block/zram<id>/writeback_limit_enable 126 + Date: November 2018 127 + Contact: Minchan Kim <minchan@kernel.org> 128 + Description: 129 + The writeback_limit_enable file is read-write and specifies 130 + eanbe of writeback_limit feature. "1" means eable the feature. 131 + No limit "0" is the initial state. 132 + 125 133 What: /sys/block/zram<id>/writeback_limit 126 134 Date: November 2018 127 135 Contact: Minchan Kim <minchan@kernel.org> 128 136 Description: 129 137 The writeback_limit file is read-write and specifies the maximum 130 138 amount of writeback ZRAM can do. The limit could be changed 131 - in run time and "0" means disable the limit. 132 - No limit is the initial state. 139 + in run time.
+32
Documentation/ABI/testing/sysfs-class-chromeos
··· 1 + What: /sys/class/chromeos/<ec-device-name>/flashinfo 2 + Date: August 2015 3 + KernelVersion: 4.2 4 + Description: 5 + Show the EC flash information. 6 + 7 + What: /sys/class/chromeos/<ec-device-name>/kb_wake_angle 8 + Date: March 2018 9 + KernelVersion: 4.17 10 + Description: 11 + Control the keyboard wake lid angle. Values are between 12 + 0 and 360. This file will also show the keyboard wake lid 13 + angle by querying the hardware. 14 + 15 + What: /sys/class/chromeos/<ec-device-name>/reboot 16 + Date: August 2015 17 + KernelVersion: 4.2 18 + Description: 19 + Tell the EC to reboot in various ways. Options are: 20 + "cancel": Cancel a pending reboot. 21 + "ro": Jump to RO without rebooting. 22 + "rw": Jump to RW without rebooting. 23 + "cold": Cold reboot. 24 + "disable-jump": Disable jump until next reboot. 25 + "hibernate": Hibernate the EC. 26 + "at-shutdown": Reboot after an AP shutdown. 27 + 28 + What: /sys/class/chromeos/<ec-device-name>/version 29 + Date: August 2015 30 + KernelVersion: 4.2 31 + Description: 32 + Show the information about the EC software and hardware.
+74
Documentation/ABI/testing/sysfs-class-chromeos-driver-cros-ec-lightbar
··· 1 + What: /sys/class/chromeos/<ec-device-name>/lightbar/brightness 2 + Date: August 2015 3 + KernelVersion: 4.2 4 + Description: 5 + Writing to this file adjusts the overall brightness of 6 + the lightbar, separate from any color intensity. The 7 + valid range is 0 (off) to 255 (maximum brightness). 8 + 9 + What: /sys/class/chromeos/<ec-device-name>/lightbar/interval_msec 10 + Date: August 2015 11 + KernelVersion: 4.2 12 + Description: 13 + The lightbar is controlled by an embedded controller (EC), 14 + which also manages the keyboard, battery charging, fans, 15 + and other system hardware. To prevent unprivileged users 16 + from interfering with the other EC functions, the rate at 17 + which the lightbar control files can be read or written is 18 + limited. 19 + 20 + Reading this file will return the number of milliseconds 21 + that must elapse between accessing any of the lightbar 22 + functions through this interface. Going faster will simply 23 + block until the necessary interval has lapsed. The interval 24 + applies uniformly to all accesses of any kind by any user. 25 + 26 + What: /sys/class/chromeos/<ec-device-name>/lightbar/led_rgb 27 + Date: August 2015 28 + KernelVersion: 4.2 29 + Description: 30 + This allows you to control each LED segment. If the 31 + lightbar is already running one of the automatic 32 + sequences, you probably won’t see anything change because 33 + your color setting will be almost immediately replaced. 34 + To get useful results, you should stop the lightbar 35 + sequence first. 36 + 37 + The values written to this file are sets of four integers, 38 + indicating LED, RED, GREEN, BLUE. The LED number is 0 to 3 39 + to select a single segment, or 4 to set all four segments 40 + to the same value at once. The RED, GREEN, and BLUE 41 + numbers should be in the range 0 (off) to 255 (maximum). 42 + You can update more than one segment at a time by writing 43 + more than one set of four integers. 44 + 45 + What: /sys/class/chromeos/<ec-device-name>/lightbar/program 46 + Date: August 2015 47 + KernelVersion: 4.2 48 + Description: 49 + This allows you to upload and run custom lightbar sequences. 50 + 51 + What: /sys/class/chromeos/<ec-device-name>/lightbar/sequence 52 + Date: August 2015 53 + KernelVersion: 4.2 54 + Description: 55 + The Pixel lightbar has a number of built-in sequences 56 + that it displays under various conditions, such as at 57 + power on, shut down, or while running. Reading from this 58 + file displays the current sequence that the lightbar is 59 + displaying. Writing to this file allows you to change the 60 + sequence. 61 + 62 + What: /sys/class/chromeos/<ec-device-name>/lightbar/userspace_control 63 + Date: August 2015 64 + KernelVersion: 4.2 65 + Description: 66 + This allows you to take the control of the lightbar. This 67 + prevents the kernel from going through its normal 68 + sequences. 69 + 70 + What: /sys/class/chromeos/<ec-device-name>/lightbar/version 71 + Date: August 2015 72 + KernelVersion: 4.2 73 + Description: 74 + Show the information about the lightbar version.
+6
Documentation/ABI/testing/sysfs-class-chromeos-driver-cros-ec-vbc
··· 1 + What: /sys/class/chromeos/<ec-device-name>/vbc/vboot_context 2 + Date: October 2015 3 + KernelVersion: 4.4 4 + Description: 5 + Read/write the verified boot context data included on a 6 + small nvram space on some EC implementations.
+7
Documentation/block/bfq-iosched.txt
··· 357 357 than maximum throughput. In these cases, consider setting the 358 358 strict_guarantees parameter. 359 359 360 + slice_idle_us 361 + ------------- 362 + 363 + Controls the same tuning parameter as slice_idle, but in microseconds. 364 + Either tunable can be used to set idling behavior. Afterwards, the 365 + other tunable will reflect the newly set value in sysfs. 366 + 360 367 strict_guarantees 361 368 ----------------- 362 369
+2 -1
Documentation/block/null_blk.txt
··· 88 88 89 89 zoned=[0/1]: Default: 0 90 90 0: Block device is exposed as a random-access block device. 91 - 1: Block device is exposed as a host-managed zoned block device. 91 + 1: Block device is exposed as a host-managed zoned block device. Requires 92 + CONFIG_BLK_DEV_ZONED. 92 93 93 94 zone_size=[MB]: Default: 256 94 95 Per zone size when exposed as a zoned block device. Must be a power of two.
+7
Documentation/block/queue-sysfs.txt
··· 67 67 IO to sleep for this amount of microseconds before entering classic 68 68 polling. 69 69 70 + io_timeout (RW) 71 + --------------- 72 + io_timeout is the request timeout in milliseconds. If a request does not 73 + complete in this time then the block driver timeout handler is invoked. 74 + That timeout handler can decide to retry the request, to fail it or to start 75 + a device recovery strategy. 76 + 70 77 iostats (RW) 71 78 ------------- 72 79 This file is used to control (on/off) the iostats accounting of the
+47 -27
Documentation/blockdev/zram.txt
··· 156 156 A brief description of exported device attributes. For more details please 157 157 read Documentation/ABI/testing/sysfs-block-zram. 158 158 159 - Name access description 160 - ---- ------ ----------- 161 - disksize RW show and set the device's disk size 162 - initstate RO shows the initialization state of the device 163 - reset WO trigger device reset 164 - mem_used_max WO reset the `mem_used_max' counter (see later) 165 - mem_limit WO specifies the maximum amount of memory ZRAM can use 166 - to store the compressed data 167 - writeback_limit WO specifies the maximum amount of write IO zram can 168 - write out to backing device as 4KB unit 169 - max_comp_streams RW the number of possible concurrent compress operations 170 - comp_algorithm RW show and change the compression algorithm 171 - compact WO trigger memory compaction 172 - debug_stat RO this file is used for zram debugging purposes 173 - backing_dev RW set up backend storage for zram to write out 174 - idle WO mark allocated slot as idle 159 + Name access description 160 + ---- ------ ----------- 161 + disksize RW show and set the device's disk size 162 + initstate RO shows the initialization state of the device 163 + reset WO trigger device reset 164 + mem_used_max WO reset the `mem_used_max' counter (see later) 165 + mem_limit WO specifies the maximum amount of memory ZRAM can use 166 + to store the compressed data 167 + writeback_limit WO specifies the maximum amount of write IO zram can 168 + write out to backing device as 4KB unit 169 + writeback_limit_enable RW show and set writeback_limit feature 170 + max_comp_streams RW the number of possible concurrent compress operations 171 + comp_algorithm RW show and change the compression algorithm 172 + compact WO trigger memory compaction 173 + debug_stat RO this file is used for zram debugging purposes 174 + backing_dev RW set up backend storage for zram to write out 175 + idle WO mark allocated slot as idle 175 176 176 177 177 178 User space is advised to use the following files to read the device statistics. ··· 281 280 If there are lots of write IO with flash device, potentially, it has 282 281 flash wearout problem so that admin needs to design write limitation 283 282 to guarantee storage health for entire product life. 284 - To overcome the concern, zram supports "writeback_limit". 285 - The "writeback_limit"'s default value is 0 so that it doesn't limit 286 - any writeback. If admin want to measure writeback count in a certain 287 - period, he could know it via /sys/block/zram0/bd_stat's 3rd column. 283 + 284 + To overcome the concern, zram supports "writeback_limit" feature. 285 + The "writeback_limit_enable"'s default value is 0 so that it doesn't limit 286 + any writeback. IOW, if admin want to apply writeback budget, he should 287 + enable writeback_limit_enable via 288 + 289 + $ echo 1 > /sys/block/zramX/writeback_limit_enable 290 + 291 + Once writeback_limit_enable is set, zram doesn't allow any writeback 292 + until admin set the budget via /sys/block/zramX/writeback_limit. 293 + 294 + (If admin doesn't enable writeback_limit_enable, writeback_limit's value 295 + assigned via /sys/block/zramX/writeback_limit is meaninless.) 288 296 289 297 If admin want to limit writeback as per-day 400M, he could do it 290 298 like below. 291 299 292 - MB_SHIFT=20 293 - 4K_SHIFT=12 294 - echo $((400<<MB_SHIFT>>4K_SHIFT)) > \ 295 - /sys/block/zram0/writeback_limit. 300 + $ MB_SHIFT=20 301 + $ 4K_SHIFT=12 302 + $ echo $((400<<MB_SHIFT>>4K_SHIFT)) > \ 303 + /sys/block/zram0/writeback_limit. 304 + $ echo 1 > /sys/block/zram0/writeback_limit_enable 296 305 297 - If admin want to allow further write again, he could do it like below 306 + If admin want to allow further write again once the bugdet is exausted, 307 + he could do it like below 298 308 299 - echo 0 > /sys/block/zram0/writeback_limit 309 + $ echo $((400<<MB_SHIFT>>4K_SHIFT)) > \ 310 + /sys/block/zram0/writeback_limit 300 311 301 312 If admin want to see remaining writeback budget since he set, 302 313 303 - cat /sys/block/zram0/writeback_limit 314 + $ cat /sys/block/zramX/writeback_limit 315 + 316 + If admin want to disable writeback limit, he could do 317 + 318 + $ echo 0 > /sys/block/zramX/writeback_limit_enable 304 319 305 320 The writeback_limit count will reset whenever you reset zram(e.g., 306 321 system reboot, echo 1 > /sys/block/zramX/reset) so keeping how many of 307 322 writeback happened until you reset the zram to allocate extra writeback 308 323 budget in next setting is user's job. 324 + 325 + If admin want to measure writeback count in a certain period, he could 326 + know it via /sys/block/zram0/bd_stat's 3rd column. 309 327 310 328 = memory tracking 311 329
+5 -6
Documentation/bpf/bpf_design_QA.rst
··· 157 157 ------------------------------ 158 158 A: YES. BPF instructions, arguments to BPF programs, set of helper 159 159 functions and their arguments, recognized return codes are all part 160 - of ABI. However when tracing programs are using bpf_probe_read() helper 161 - to walk kernel internal datastructures and compile with kernel 162 - internal headers these accesses can and will break with newer 163 - kernels. The union bpf_attr -> kern_version is checked at load time 164 - to prevent accidentally loading kprobe-based bpf programs written 165 - for a different kernel. Networking programs don't do kern_version check. 160 + of ABI. However there is one specific exception to tracing programs 161 + which are using helpers like bpf_probe_read() to walk kernel internal 162 + data structures and compile with kernel internal headers. Both of these 163 + kernel internals are subject to change and can break with newer kernels 164 + such that the program needs to be adapted accordingly. 166 165 167 166 Q: How much stack space a BPF program uses? 168 167 -------------------------------------------
+1 -1
Documentation/devicetree/bindings/arm/cpu-capacity.txt
··· 235 235 =========================================== 236 236 237 237 [1] ARM Linux Kernel documentation - CPUs bindings 238 - Documentation/devicetree/bindings/arm/cpus.txt 238 + Documentation/devicetree/bindings/arm/cpus.yaml
+1 -1
Documentation/devicetree/bindings/arm/idle-states.txt
··· 684 684 =========================================== 685 685 686 686 [1] ARM Linux Kernel documentation - CPUs bindings 687 - Documentation/devicetree/bindings/arm/cpus.txt 687 + Documentation/devicetree/bindings/arm/cpus.yaml 688 688 689 689 [2] ARM Linux Kernel documentation - PSCI bindings 690 690 Documentation/devicetree/bindings/arm/psci.txt
+1 -1
Documentation/devicetree/bindings/arm/sp810.txt
··· 4 4 Required properties: 5 5 6 6 - compatible: standard compatible string for a Primecell peripheral, 7 - see Documentation/devicetree/bindings/arm/primecell.txt 7 + see Documentation/devicetree/bindings/arm/primecell.yaml 8 8 for more details 9 9 should be: "arm,sp810", "arm,primecell" 10 10
+1 -1
Documentation/devicetree/bindings/arm/topology.txt
··· 472 472 473 473 =============================================================================== 474 474 [1] ARM Linux kernel documentation 475 - Documentation/devicetree/bindings/arm/cpus.txt 475 + Documentation/devicetree/bindings/arm/cpus.yaml
+1 -1
Documentation/devicetree/bindings/clock/marvell,mmp2.txt
··· 18 18 Each clock is assigned an identifier and client nodes use this identifier 19 19 to specify the clock which they consume. 20 20 21 - All these identifier could be found in <dt-bindings/clock/marvell-mmp2.h>. 21 + All these identifiers could be found in <dt-bindings/clock/marvell,mmp2.h>.
+1 -1
Documentation/devicetree/bindings/display/arm,pl11x.txt
··· 1 1 * ARM PrimeCell Color LCD Controller PL110/PL111 2 2 3 - See also Documentation/devicetree/bindings/arm/primecell.txt 3 + See also Documentation/devicetree/bindings/arm/primecell.yaml 4 4 5 5 Required properties: 6 6
-2
Documentation/devicetree/bindings/gpio/gpio-mvebu.txt
··· 14 14 15 15 "marvell,armada-8k-gpio" should be used for the Armada 7K and 8K 16 16 SoCs (either from AP or CP), see 17 - Documentation/devicetree/bindings/arm/marvell/cp110-system-controller0.txt 18 - and 19 17 Documentation/devicetree/bindings/arm/marvell/ap806-system-controller.txt 20 18 for specific details about the offset property. 21 19
+28
Documentation/devicetree/bindings/input/st,stpmic1-onkey.txt
··· 1 + STMicroelectronics STPMIC1 Onkey 2 + 3 + Required properties: 4 + 5 + - compatible = "st,stpmic1-onkey"; 6 + - interrupts: interrupt line to use 7 + - interrupt-names = "onkey-falling", "onkey-rising" 8 + onkey-falling: happens when onkey is pressed; IT_PONKEY_F of pmic 9 + onkey-rising: happens when onkey is released; IT_PONKEY_R of pmic 10 + 11 + Optional properties: 12 + 13 + - st,onkey-clear-cc-flag: onkey is able power on after an 14 + over-current shutdown event. 15 + - st,onkey-pu-inactive: onkey pull up is not active 16 + - power-off-time-sec: Duration in seconds which the key should be kept 17 + pressed for device to power off automatically (from 1 to 16 seconds). 18 + see See Documentation/devicetree/bindings/input/keys.txt 19 + 20 + Example: 21 + 22 + onkey { 23 + compatible = "st,stpmic1-onkey"; 24 + interrupt-parent = <&pmic>; 25 + interrupts = <IT_PONKEY_F 0>,<IT_PONKEY_R 1>; 26 + interrupt-names = "onkey-falling", "onkey-rising"; 27 + power-off-time-sec = <10>; 28 + };
+1 -1
Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.txt
··· 78 78 PPI affinity can be expressed as a single "ppi-partitions" node, 79 79 containing a set of sub-nodes, each with the following property: 80 80 - affinity: Should be a list of phandles to CPU nodes (as described in 81 - Documentation/devicetree/bindings/arm/cpus.txt). 81 + Documentation/devicetree/bindings/arm/cpus.yaml). 82 82 83 83 GICv3 has one or more Interrupt Translation Services (ITS) that are 84 84 used to route Message Signalled Interrupts (MSI) to the CPUs.
+61
Documentation/devicetree/bindings/mfd/st,stpmic1.txt
··· 1 + * STMicroelectronics STPMIC1 Power Management IC 2 + 3 + Required properties: 4 + - compatible: : "st,stpmic1" 5 + - reg: : The I2C slave address for the STPMIC1 chip. 6 + - interrupts: : The interrupt line the device is connected to. 7 + - #interrupt-cells: : Should be 1. 8 + - interrupt-controller: : Marks the device node as an interrupt controller. 9 + Interrupt numbers are defined at 10 + dt-bindings/mfd/st,stpmic1.h. 11 + 12 + STPMIC1 consists in a varied group of sub-devices. 13 + Each sub-device binding is be described in own documentation file. 14 + 15 + Device Description 16 + ------ ------------ 17 + st,stpmic1-onkey : Power on key, see ../input/st,stpmic1-onkey.txt 18 + st,stpmic1-regulators : Regulators, see ../regulator/st,stpmic1-regulator.txt 19 + st,stpmic1-wdt : Watchdog, see ../watchdog/st,stpmic1-wdt.txt 20 + 21 + Example: 22 + 23 + #include <dt-bindings/mfd/st,stpmic1.h> 24 + 25 + pmic: pmic@33 { 26 + compatible = "st,stpmic1"; 27 + reg = <0x33>; 28 + interrupt-parent = <&gpioa>; 29 + interrupts = <0 2>; 30 + 31 + interrupt-controller; 32 + #interrupt-cells = <2>; 33 + 34 + onkey { 35 + compatible = "st,stpmic1-onkey"; 36 + interrupts = <IT_PONKEY_F 0>,<IT_PONKEY_R 1>; 37 + interrupt-names = "onkey-falling", "onkey-rising"; 38 + power-off-time-sec = <10>; 39 + }; 40 + 41 + watchdog { 42 + compatible = "st,stpmic1-wdt"; 43 + }; 44 + 45 + regulators { 46 + compatible = "st,stpmic1-regulators"; 47 + 48 + vdd_core: buck1 { 49 + regulator-name = "vdd_core"; 50 + regulator-boot-on; 51 + regulator-min-microvolt = <700000>; 52 + regulator-max-microvolt = <1200000>; 53 + }; 54 + vdd: buck3 { 55 + regulator-name = "vdd"; 56 + regulator-min-microvolt = <3300000>; 57 + regulator-max-microvolt = <3300000>; 58 + regulator-boot-on; 59 + regulator-pull-down; 60 + }; 61 + };
+2 -1
Documentation/devicetree/bindings/reset/socfpga-reset.txt
··· 1 1 Altera SOCFPGA Reset Manager 2 2 3 3 Required properties: 4 - - compatible : "altr,rst-mgr" 4 + - compatible : "altr,rst-mgr" for (Cyclone5/Arria5/Arria10) 5 + "altr,stratix10-rst-mgr","altr,rst-mgr" for Stratix10 ARM64 SoC 5 6 - reg : Should contain 1 register ranges(address and length) 6 7 - altr,modrst-offset : Should contain the offset of the first modrst register. 7 8 - #reset-cells: 1
+14 -11
Documentation/devicetree/bindings/reset/uniphier-reset.txt
··· 120 120 }; 121 121 122 122 123 - USB3 core reset 124 - --------------- 123 + Peripheral core reset in glue layer 124 + ----------------------------------- 125 125 126 - USB3 core reset belongs to USB3 glue layer. Before using the core reset, 127 - it is necessary to control the clocks and resets to enable this layer. 128 - These clocks and resets should be described in each property. 126 + Some peripheral core reset belongs to its own glue layer. Before using 127 + this core reset, it is necessary to control the clocks and resets to enable 128 + this layer. These clocks and resets should be described in each property. 129 129 130 130 Required properties: 131 131 - compatible: Should be 132 - "socionext,uniphier-pro4-usb3-reset" - for Pro4 SoC 133 - "socionext,uniphier-pxs2-usb3-reset" - for PXs2 SoC 134 - "socionext,uniphier-ld20-usb3-reset" - for LD20 SoC 135 - "socionext,uniphier-pxs3-usb3-reset" - for PXs3 SoC 132 + "socionext,uniphier-pro4-usb3-reset" - for Pro4 SoC USB3 133 + "socionext,uniphier-pxs2-usb3-reset" - for PXs2 SoC USB3 134 + "socionext,uniphier-ld20-usb3-reset" - for LD20 SoC USB3 135 + "socionext,uniphier-pxs3-usb3-reset" - for PXs3 SoC USB3 136 + "socionext,uniphier-pro4-ahci-reset" - for Pro4 SoC AHCI 137 + "socionext,uniphier-pxs2-ahci-reset" - for PXs2 SoC AHCI 138 + "socionext,uniphier-pxs3-ahci-reset" - for PXs3 SoC AHCI 136 139 - #reset-cells: Should be 1. 137 140 - reg: Specifies offset and length of the register set for the device. 138 - - clocks: A list of phandles to the clock gate for USB3 glue layer. 141 + - clocks: A list of phandles to the clock gate for the glue layer. 139 142 According to the clock-names, appropriate clocks are required. 140 143 - clock-names: Should contain 141 144 "gio", "link" - for Pro4 SoC 142 145 "link" - for others 143 - - resets: A list of phandles to the reset control for USB3 glue layer. 146 + - resets: A list of phandles to the reset control for the glue layer. 144 147 According to the reset-names, appropriate resets are required. 145 148 - reset-names: Should contain 146 149 "gio", "link" - for Pro4 SoC
+1 -1
Documentation/devicetree/bindings/soc/qcom/qcom,glink.txt
··· 55 55 = EXAMPLE 56 56 The following example represents the GLINK RPM node on a MSM8996 device, with 57 57 the function for the "rpm_request" channel defined, which is used for 58 - regualtors and root clocks. 58 + regulators and root clocks. 59 59 60 60 apcs_glb: mailbox@9820000 { 61 61 compatible = "qcom,msm8996-apcs-hmss-global";
+2 -2
Documentation/devicetree/bindings/soc/qcom/qcom,smp2p.txt
··· 41 41 - qcom,local-pid: 42 42 Usage: required 43 43 Value type: <u32> 44 - Definition: specifies the identfier of the local endpoint of this edge 44 + Definition: specifies the identifier of the local endpoint of this edge 45 45 46 46 - qcom,remote-pid: 47 47 Usage: required 48 48 Value type: <u32> 49 - Definition: specifies the identfier of the remote endpoint of this edge 49 + Definition: specifies the identifier of the remote endpoint of this edge 50 50 51 51 = SUBNODES 52 52 Each SMP2P pair contain a set of inbound and outbound entries, these are
+11
Documentation/devicetree/bindings/watchdog/st,stpmic1-wdt.txt
··· 1 + STMicroelectronics STPMIC1 Watchdog 2 + 3 + Required properties: 4 + 5 + - compatible : should be "st,stpmic1-wdt" 6 + 7 + Example: 8 + 9 + watchdog { 10 + compatible = "st,stpmic1-wdt"; 11 + };
+4 -4
Documentation/driver-model/bus.txt
··· 124 124 ssize_t (*store)(struct bus_type *, const char * buf, size_t count); 125 125 }; 126 126 127 - Bus drivers can export attributes using the BUS_ATTR macro that works 128 - similarly to the DEVICE_ATTR macro for devices. For example, a definition 129 - like this: 127 + Bus drivers can export attributes using the BUS_ATTR_RW macro that works 128 + similarly to the DEVICE_ATTR_RW macro for devices. For example, a 129 + definition like this: 130 130 131 - static BUS_ATTR(debug,0644,show_debug,store_debug); 131 + static BUS_ATTR_RW(debug); 132 132 133 133 is equivalent to declaring: 134 134
+8
Documentation/fb/fbcon.txt
··· 163 163 be preserved until there actually is some text is output to the console. 164 164 This option causes fbcon to bind immediately to the fbdev device. 165 165 166 + 7. fbcon=logo-pos:<location> 167 + 168 + The only possible 'location' is 'center' (without quotes), and when 169 + given, the bootup logo is moved from the default top-left corner 170 + location to the center of the framebuffer. If more than one logo is 171 + displayed due to multiple CPUs, the collected line of logos is moved 172 + as a whole. 173 + 166 174 C. Attaching, Detaching and Unloading 167 175 168 176 Before going on to how to attach, detach and unload the framebuffer console, an
+1
Documentation/features/core/cBPF-JIT/arch-support.txt
··· 11 11 | arm: | TODO | 12 12 | arm64: | TODO | 13 13 | c6x: | TODO | 14 + | csky: | TODO | 14 15 | h8300: | TODO | 15 16 | hexagon: | TODO | 16 17 | ia64: | TODO |
+1
Documentation/features/core/eBPF-JIT/arch-support.txt
··· 11 11 | arm: | ok | 12 12 | arm64: | ok | 13 13 | c6x: | TODO | 14 + | csky: | TODO | 14 15 | h8300: | TODO | 15 16 | hexagon: | TODO | 16 17 | ia64: | TODO |
+1
Documentation/features/core/generic-idle-thread/arch-support.txt
··· 11 11 | arm: | ok | 12 12 | arm64: | ok | 13 13 | c6x: | TODO | 14 + | csky: | ok | 14 15 | h8300: | TODO | 15 16 | hexagon: | ok | 16 17 | ia64: | ok |
+1
Documentation/features/core/jump-labels/arch-support.txt
··· 11 11 | arm: | ok | 12 12 | arm64: | ok | 13 13 | c6x: | TODO | 14 + | csky: | TODO | 14 15 | h8300: | TODO | 15 16 | hexagon: | TODO | 16 17 | ia64: | TODO |
+1
Documentation/features/core/tracehook/arch-support.txt
··· 11 11 | arm: | ok | 12 12 | arm64: | ok | 13 13 | c6x: | ok | 14 + | csky: | ok | 14 15 | h8300: | TODO | 15 16 | hexagon: | ok | 16 17 | ia64: | ok |
+1
Documentation/features/debug/KASAN/arch-support.txt
··· 11 11 | arm: | TODO | 12 12 | arm64: | ok | 13 13 | c6x: | TODO | 14 + | csky: | TODO | 14 15 | h8300: | TODO | 15 16 | hexagon: | TODO | 16 17 | ia64: | TODO |
+1
Documentation/features/debug/gcov-profile-all/arch-support.txt
··· 11 11 | arm: | ok | 12 12 | arm64: | ok | 13 13 | c6x: | TODO | 14 + | csky: | TODO | 14 15 | h8300: | TODO | 15 16 | hexagon: | TODO | 16 17 | ia64: | TODO |
+1
Documentation/features/debug/kgdb/arch-support.txt
··· 11 11 | arm: | ok | 12 12 | arm64: | ok | 13 13 | c6x: | TODO | 14 + | csky: | TODO | 14 15 | h8300: | ok | 15 16 | hexagon: | ok | 16 17 | ia64: | TODO |
+1
Documentation/features/debug/kprobes-on-ftrace/arch-support.txt
··· 11 11 | arm: | TODO | 12 12 | arm64: | TODO | 13 13 | c6x: | TODO | 14 + | csky: | TODO | 14 15 | h8300: | TODO | 15 16 | hexagon: | TODO | 16 17 | ia64: | TODO |
+1
Documentation/features/debug/kprobes/arch-support.txt
··· 11 11 | arm: | ok | 12 12 | arm64: | ok | 13 13 | c6x: | TODO | 14 + | csky: | TODO | 14 15 | h8300: | TODO | 15 16 | hexagon: | TODO | 16 17 | ia64: | ok |
+1
Documentation/features/debug/kretprobes/arch-support.txt
··· 11 11 | arm: | ok | 12 12 | arm64: | ok | 13 13 | c6x: | TODO | 14 + | csky: | TODO | 14 15 | h8300: | TODO | 15 16 | hexagon: | TODO | 16 17 | ia64: | ok |
+1
Documentation/features/debug/optprobes/arch-support.txt
··· 11 11 | arm: | ok | 12 12 | arm64: | TODO | 13 13 | c6x: | TODO | 14 + | csky: | TODO | 14 15 | h8300: | TODO | 15 16 | hexagon: | TODO | 16 17 | ia64: | TODO |
+1
Documentation/features/debug/stackprotector/arch-support.txt
··· 11 11 | arm: | ok | 12 12 | arm64: | ok | 13 13 | c6x: | TODO | 14 + | csky: | TODO | 14 15 | h8300: | TODO | 15 16 | hexagon: | TODO | 16 17 | ia64: | TODO |
+1
Documentation/features/debug/uprobes/arch-support.txt
··· 11 11 | arm: | ok | 12 12 | arm64: | ok | 13 13 | c6x: | TODO | 14 + | csky: | TODO | 14 15 | h8300: | TODO | 15 16 | hexagon: | TODO | 16 17 | ia64: | TODO |
+1
Documentation/features/debug/user-ret-profiler/arch-support.txt
··· 11 11 | arm: | TODO | 12 12 | arm64: | TODO | 13 13 | c6x: | TODO | 14 + | csky: | TODO | 14 15 | h8300: | TODO | 15 16 | hexagon: | TODO | 16 17 | ia64: | TODO |
+1
Documentation/features/io/dma-contiguous/arch-support.txt
··· 11 11 | arm: | ok | 12 12 | arm64: | ok | 13 13 | c6x: | TODO | 14 + | csky: | ok | 14 15 | h8300: | TODO | 15 16 | hexagon: | TODO | 16 17 | ia64: | TODO |
+1
Documentation/features/locking/cmpxchg-local/arch-support.txt
··· 11 11 | arm: | TODO | 12 12 | arm64: | ok | 13 13 | c6x: | TODO | 14 + | csky: | TODO | 14 15 | h8300: | TODO | 15 16 | hexagon: | TODO | 16 17 | ia64: | TODO |
+1
Documentation/features/locking/lockdep/arch-support.txt
··· 11 11 | arm: | ok | 12 12 | arm64: | ok | 13 13 | c6x: | TODO | 14 + | csky: | TODO | 14 15 | h8300: | TODO | 15 16 | hexagon: | ok | 16 17 | ia64: | TODO |
+1
Documentation/features/locking/queued-rwlocks/arch-support.txt
··· 11 11 | arm: | TODO | 12 12 | arm64: | ok | 13 13 | c6x: | TODO | 14 + | csky: | ok | 14 15 | h8300: | TODO | 15 16 | hexagon: | TODO | 16 17 | ia64: | TODO |
+1
Documentation/features/locking/queued-spinlocks/arch-support.txt
··· 11 11 | arm: | TODO | 12 12 | arm64: | TODO | 13 13 | c6x: | TODO | 14 + | csky: | TODO | 14 15 | h8300: | TODO | 15 16 | hexagon: | TODO | 16 17 | ia64: | TODO |
+1
Documentation/features/locking/rwsem-optimized/arch-support.txt
··· 11 11 | arm: | ok | 12 12 | arm64: | ok | 13 13 | c6x: | TODO | 14 + | csky: | TODO | 14 15 | h8300: | TODO | 15 16 | hexagon: | TODO | 16 17 | ia64: | ok |
+1
Documentation/features/perf/kprobes-event/arch-support.txt
··· 11 11 | arm: | ok | 12 12 | arm64: | ok | 13 13 | c6x: | TODO | 14 + | csky: | TODO | 14 15 | h8300: | TODO | 15 16 | hexagon: | ok | 16 17 | ia64: | TODO |
+1
Documentation/features/perf/perf-regs/arch-support.txt
··· 11 11 | arm: | ok | 12 12 | arm64: | ok | 13 13 | c6x: | TODO | 14 + | csky: | TODO | 14 15 | h8300: | TODO | 15 16 | hexagon: | TODO | 16 17 | ia64: | TODO |
+1
Documentation/features/perf/perf-stackdump/arch-support.txt
··· 11 11 | arm: | ok | 12 12 | arm64: | ok | 13 13 | c6x: | TODO | 14 + | csky: | TODO | 14 15 | h8300: | TODO | 15 16 | hexagon: | TODO | 16 17 | ia64: | TODO |
+1
Documentation/features/sched/membarrier-sync-core/arch-support.txt
··· 34 34 | arm: | ok | 35 35 | arm64: | ok | 36 36 | c6x: | TODO | 37 + | csky: | TODO | 37 38 | h8300: | TODO | 38 39 | hexagon: | TODO | 39 40 | ia64: | TODO |
+1
Documentation/features/sched/numa-balancing/arch-support.txt
··· 11 11 | arm: | .. | 12 12 | arm64: | ok | 13 13 | c6x: | .. | 14 + | csky: | .. | 14 15 | h8300: | .. | 15 16 | hexagon: | .. | 16 17 | ia64: | TODO |
+1
Documentation/features/seccomp/seccomp-filter/arch-support.txt
··· 11 11 | arm: | ok | 12 12 | arm64: | ok | 13 13 | c6x: | TODO | 14 + | csky: | TODO | 14 15 | h8300: | TODO | 15 16 | hexagon: | TODO | 16 17 | ia64: | TODO |
+1
Documentation/features/time/arch-tick-broadcast/arch-support.txt
··· 11 11 | arm: | ok | 12 12 | arm64: | ok | 13 13 | c6x: | TODO | 14 + | csky: | TODO | 14 15 | h8300: | TODO | 15 16 | hexagon: | TODO | 16 17 | ia64: | TODO |
+1
Documentation/features/time/clockevents/arch-support.txt
··· 11 11 | arm: | ok | 12 12 | arm64: | ok | 13 13 | c6x: | ok | 14 + | csky: | ok | 14 15 | h8300: | ok | 15 16 | hexagon: | ok | 16 17 | ia64: | TODO |
+1
Documentation/features/time/context-tracking/arch-support.txt
··· 11 11 | arm: | ok | 12 12 | arm64: | ok | 13 13 | c6x: | TODO | 14 + | csky: | TODO | 14 15 | h8300: | TODO | 15 16 | hexagon: | TODO | 16 17 | ia64: | TODO |
+1
Documentation/features/time/irq-time-acct/arch-support.txt
··· 11 11 | arm: | ok | 12 12 | arm64: | ok | 13 13 | c6x: | TODO | 14 + | csky: | TODO | 14 15 | h8300: | TODO | 15 16 | hexagon: | TODO | 16 17 | ia64: | .. |
+1
Documentation/features/time/modern-timekeeping/arch-support.txt
··· 11 11 | arm: | TODO | 12 12 | arm64: | ok | 13 13 | c6x: | ok | 14 + | csky: | ok | 14 15 | h8300: | ok | 15 16 | hexagon: | ok | 16 17 | ia64: | ok |
+1
Documentation/features/time/virt-cpuacct/arch-support.txt
··· 11 11 | arm: | ok | 12 12 | arm64: | ok | 13 13 | c6x: | TODO | 14 + | csky: | TODO | 14 15 | h8300: | TODO | 15 16 | hexagon: | TODO | 16 17 | ia64: | ok |
+1
Documentation/features/vm/ELF-ASLR/arch-support.txt
··· 11 11 | arm: | ok | 12 12 | arm64: | ok | 13 13 | c6x: | TODO | 14 + | csky: | TODO | 14 15 | h8300: | TODO | 15 16 | hexagon: | TODO | 16 17 | ia64: | TODO |
+1
Documentation/features/vm/PG_uncached/arch-support.txt
··· 11 11 | arm: | TODO | 12 12 | arm64: | TODO | 13 13 | c6x: | TODO | 14 + | csky: | TODO | 14 15 | h8300: | TODO | 15 16 | hexagon: | TODO | 16 17 | ia64: | ok |
+1
Documentation/features/vm/THP/arch-support.txt
··· 11 11 | arm: | ok | 12 12 | arm64: | ok | 13 13 | c6x: | .. | 14 + | csky: | .. | 14 15 | h8300: | .. | 15 16 | hexagon: | .. | 16 17 | ia64: | TODO |
+1
Documentation/features/vm/TLB/arch-support.txt
··· 11 11 | arm: | TODO | 12 12 | arm64: | TODO | 13 13 | c6x: | .. | 14 + | csky: | TODO | 14 15 | h8300: | .. | 15 16 | hexagon: | TODO | 16 17 | ia64: | TODO |
+1
Documentation/features/vm/huge-vmap/arch-support.txt
··· 11 11 | arm: | TODO | 12 12 | arm64: | ok | 13 13 | c6x: | TODO | 14 + | csky: | TODO | 14 15 | h8300: | TODO | 15 16 | hexagon: | TODO | 16 17 | ia64: | TODO |
+1
Documentation/features/vm/ioremap_prot/arch-support.txt
··· 11 11 | arm: | TODO | 12 12 | arm64: | TODO | 13 13 | c6x: | TODO | 14 + | csky: | TODO | 14 15 | h8300: | TODO | 15 16 | hexagon: | TODO | 16 17 | ia64: | TODO |
+1
Documentation/features/vm/numa-memblock/arch-support.txt
··· 11 11 | arm: | .. | 12 12 | arm64: | ok | 13 13 | c6x: | .. | 14 + | csky: | .. | 14 15 | h8300: | .. | 15 16 | hexagon: | .. | 16 17 | ia64: | ok |
+1
Documentation/features/vm/pte_special/arch-support.txt
··· 11 11 | arm: | ok | 12 12 | arm64: | ok | 13 13 | c6x: | TODO | 14 + | csky: | TODO | 14 15 | h8300: | TODO | 15 16 | hexagon: | TODO | 16 17 | ia64: | TODO |
+3 -1
Documentation/filesystems/sysfs.txt
··· 344 344 345 345 Declaring: 346 346 347 - BUS_ATTR(_name, _mode, _show, _store) 347 + static BUS_ATTR_RW(name); 348 + static BUS_ATTR_RO(name); 349 + static BUS_ATTR_WO(name); 348 350 349 351 Creation/Removal: 350 352
+13 -13
Documentation/networking/index.rst
··· 11 11 batman-adv 12 12 can 13 13 can_ucan_protocol 14 - dpaa2/index 15 - e100 16 - e1000 17 - e1000e 18 - fm10k 19 - igb 20 - igbvf 21 - ixgb 22 - ixgbe 23 - ixgbevf 24 - i40e 25 - iavf 26 - ice 14 + device_drivers/freescale/dpaa2/index 15 + device_drivers/intel/e100 16 + device_drivers/intel/e1000 17 + device_drivers/intel/e1000e 18 + device_drivers/intel/fm10k 19 + device_drivers/intel/igb 20 + device_drivers/intel/igbvf 21 + device_drivers/intel/ixgb 22 + device_drivers/intel/ixgbe 23 + device_drivers/intel/ixgbevf 24 + device_drivers/intel/i40e 25 + device_drivers/intel/iavf 26 + device_drivers/intel/ice 27 27 kapi 28 28 z8530book 29 29 msg_zerocopy
-45
Documentation/networking/rxrpc.txt
··· 1000 1000 size should be set when the call is begun. tx_total_len may not be less 1001 1001 than zero. 1002 1002 1003 - (*) Check to see the completion state of a call so that the caller can assess 1004 - whether it needs to be retried. 1005 - 1006 - enum rxrpc_call_completion { 1007 - RXRPC_CALL_SUCCEEDED, 1008 - RXRPC_CALL_REMOTELY_ABORTED, 1009 - RXRPC_CALL_LOCALLY_ABORTED, 1010 - RXRPC_CALL_LOCAL_ERROR, 1011 - RXRPC_CALL_NETWORK_ERROR, 1012 - }; 1013 - 1014 - int rxrpc_kernel_check_call(struct socket *sock, struct rxrpc_call *call, 1015 - enum rxrpc_call_completion *_compl, 1016 - u32 *_abort_code); 1017 - 1018 - On return, -EINPROGRESS will be returned if the call is still ongoing; if 1019 - it is finished, *_compl will be set to indicate the manner of completion, 1020 - *_abort_code will be set to any abort code that occurred. 0 will be 1021 - returned on a successful completion, -ECONNABORTED will be returned if the 1022 - client failed due to a remote abort and anything else will return an 1023 - appropriate error code. 1024 - 1025 - The caller should look at this information to decide if it's worth 1026 - retrying the call. 1027 - 1028 - (*) Retry a client call. 1029 - 1030 - int rxrpc_kernel_retry_call(struct socket *sock, 1031 - struct rxrpc_call *call, 1032 - struct sockaddr_rxrpc *srx, 1033 - struct key *key); 1034 - 1035 - This attempts to partially reinitialise a call and submit it again while 1036 - reusing the original call's Tx queue to avoid the need to repackage and 1037 - re-encrypt the data to be sent. call indicates the call to retry, srx the 1038 - new address to send it to and key the encryption key to use for signing or 1039 - encrypting the packets. 1040 - 1041 - For this to work, the first Tx data packet must still be in the transmit 1042 - queue, and currently this is only permitted for local and network errors 1043 - and the call must not have been aborted. Any partially constructed Tx 1044 - packet is left as is and can continue being filled afterwards. 1045 - 1046 - It returns 0 if the call was requeued and an error otherwise. 1047 - 1048 1003 (*) Get call RTT. 1049 1004 1050 1005 u64 rxrpc_kernel_get_rtt(struct socket *sock, struct rxrpc_call *call);
+125 -5
Documentation/networking/snmp_counter.rst
··· 336 336 to the accept queue. 337 337 338 338 339 - TCP Fast Open 339 + * TcpEstabResets 340 + Defined in `RFC1213 tcpEstabResets`_. 341 + 342 + .. _RFC1213 tcpEstabResets: https://tools.ietf.org/html/rfc1213#page-48 343 + 344 + * TcpAttemptFails 345 + Defined in `RFC1213 tcpAttemptFails`_. 346 + 347 + .. _RFC1213 tcpAttemptFails: https://tools.ietf.org/html/rfc1213#page-48 348 + 349 + * TcpOutRsts 350 + Defined in `RFC1213 tcpOutRsts`_. The RFC says this counter indicates 351 + the 'segments sent containing the RST flag', but in linux kernel, this 352 + couner indicates the segments kerenl tried to send. The sending 353 + process might be failed due to some errors (e.g. memory alloc failed). 354 + 355 + .. _RFC1213 tcpOutRsts: https://tools.ietf.org/html/rfc1213#page-52 356 + 357 + 358 + TCP Fast Path 340 359 ============ 341 360 When kernel receives a TCP packet, it has two paths to handler the 342 361 packet, one is fast path, another is slow path. The comment in kernel ··· 402 383 403 384 TCP abort 404 385 ======== 405 - 406 - 407 386 * TcpExtTCPAbortOnData 408 387 It means TCP layer has data in flight, but need to close the 409 388 connection. So TCP layer sends a RST to the other side, indicate the ··· 562 545 stack of kernel will increase TcpExtTCPSACKReorder for both of the 563 546 above scenarios. 564 547 565 - 566 548 DSACK 567 549 ===== 568 550 The DSACK is defined in `RFC2883`_. The receiver uses DSACK to report ··· 582 566 DSACK to the sender. 583 567 584 568 * TcpExtTCPDSACKRecv 585 - The TCP stack receives a DSACK, which indicate an acknowledged 569 + The TCP stack receives a DSACK, which indicates an acknowledged 586 570 duplicate packet is received. 587 571 588 572 * TcpExtTCPDSACKOfoRecv 589 573 The TCP stack receives a DSACK, which indicate an out of order 590 574 duplicate packet is received. 575 + 576 + invalid SACK and DSACK 577 + ==================== 578 + When a SACK (or DSACK) block is invalid, a corresponding counter would 579 + be updated. The validation method is base on the start/end sequence 580 + number of the SACK block. For more details, please refer the comment 581 + of the function tcp_is_sackblock_valid in the kernel source code. A 582 + SACK option could have up to 4 blocks, they are checked 583 + individually. E.g., if 3 blocks of a SACk is invalid, the 584 + corresponding counter would be updated 3 times. The comment of the 585 + `Add counters for discarded SACK blocks`_ patch has additional 586 + explaination: 587 + 588 + .. _Add counters for discarded SACK blocks: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=18f02545a9a16c9a89778b91a162ad16d510bb32 589 + 590 + * TcpExtTCPSACKDiscard 591 + This counter indicates how many SACK blocks are invalid. If the invalid 592 + SACK block is caused by ACK recording, the TCP stack will only ignore 593 + it and won't update this counter. 594 + 595 + * TcpExtTCPDSACKIgnoredOld and TcpExtTCPDSACKIgnoredNoUndo 596 + When a DSACK block is invalid, one of these two counters would be 597 + updated. Which counter will be updated depends on the undo_marker flag 598 + of the TCP socket. If the undo_marker is not set, the TCP stack isn't 599 + likely to re-transmit any packets, and we still receive an invalid 600 + DSACK block, the reason might be that the packet is duplicated in the 601 + middle of the network. In such scenario, TcpExtTCPDSACKIgnoredNoUndo 602 + will be updated. If the undo_marker is set, TcpExtTCPDSACKIgnoredOld 603 + will be updated. As implied in its name, it might be an old packet. 604 + 605 + SACK shift 606 + ========= 607 + The linux networking stack stores data in sk_buff struct (skb for 608 + short). If a SACK block acrosses multiple skb, the TCP stack will try 609 + to re-arrange data in these skb. E.g. if a SACK block acknowledges seq 610 + 10 to 15, skb1 has seq 10 to 13, skb2 has seq 14 to 20. The seq 14 and 611 + 15 in skb2 would be moved to skb1. This operation is 'shift'. If a 612 + SACK block acknowledges seq 10 to 20, skb1 has seq 10 to 13, skb2 has 613 + seq 14 to 20. All data in skb2 will be moved to skb1, and skb2 will be 614 + discard, this operation is 'merge'. 615 + 616 + * TcpExtTCPSackShifted 617 + A skb is shifted 618 + 619 + * TcpExtTCPSackMerged 620 + A skb is merged 621 + 622 + * TcpExtTCPSackShiftFallback 623 + A skb should be shifted or merged, but the TCP stack doesn't do it for 624 + some reasons. 591 625 592 626 TCP out of order 593 627 =============== ··· 728 662 .. _RFC 5961 section 4.2: https://tools.ietf.org/html/rfc5961#page-9 729 663 .. _RFC 5961 section 5.2: https://tools.ietf.org/html/rfc5961#page-11 730 664 665 + TCP receive window 666 + ================= 667 + * TcpExtTCPWantZeroWindowAdv 668 + Depending on current memory usage, the TCP stack tries to set receive 669 + window to zero. But the receive window might still be a no-zero 670 + value. For example, if the previous window size is 10, and the TCP 671 + stack receives 3 bytes, the current window size would be 7 even if the 672 + window size calculated by the memory usage is zero. 673 + 674 + * TcpExtTCPToZeroWindowAdv 675 + The TCP receive window is set to zero from a no-zero value. 676 + 677 + * TcpExtTCPFromZeroWindowAdv 678 + The TCP receive window is set to no-zero value from zero. 679 + 680 + 681 + Delayed ACK 682 + ========== 683 + The TCP Delayed ACK is a technique which is used for reducing the 684 + packet count in the network. For more details, please refer the 685 + `Delayed ACK wiki`_ 686 + 687 + .. _Delayed ACK wiki: https://en.wikipedia.org/wiki/TCP_delayed_acknowledgment 688 + 689 + * TcpExtDelayedACKs 690 + A delayed ACK timer expires. The TCP stack will send a pure ACK packet 691 + and exit the delayed ACK mode. 692 + 693 + * TcpExtDelayedACKLocked 694 + A delayed ACK timer expires, but the TCP stack can't send an ACK 695 + immediately due to the socket is locked by a userspace program. The 696 + TCP stack will send a pure ACK later (after the userspace program 697 + unlock the socket). When the TCP stack sends the pure ACK later, the 698 + TCP stack will also update TcpExtDelayedACKs and exit the delayed ACK 699 + mode. 700 + 701 + * TcpExtDelayedACKLost 702 + It will be updated when the TCP stack receives a packet which has been 703 + ACKed. A Delayed ACK loss might cause this issue, but it would also be 704 + triggered by other reasons, such as a packet is duplicated in the 705 + network. 706 + 707 + Tail Loss Probe (TLP) 708 + =================== 709 + TLP is an algorithm which is used to detect TCP packet loss. For more 710 + details, please refer the `TLP paper`_. 711 + 712 + .. _TLP paper: https://tools.ietf.org/html/draft-dukkipati-tcpm-tcp-loss-probe-01 713 + 714 + * TcpExtTCPLossProbes 715 + A TLP probe packet is sent. 716 + 717 + * TcpExtTCPLossProbeRecovery 718 + A packet loss is detected and recovered by TLP. 731 719 732 720 examples 733 721 =======
+2 -2
Documentation/networking/timestamping.txt
··· 417 417 418 418 Hardware time stamping must also be initialized for each device driver 419 419 that is expected to do hardware time stamping. The parameter is defined in 420 - /include/linux/net_tstamp.h as: 420 + include/uapi/linux/net_tstamp.h as: 421 421 422 422 struct hwtstamp_config { 423 423 int flags; /* no flags defined right now, must be zero */ ··· 487 487 HWTSTAMP_FILTER_PTP_V1_L4_EVENT, 488 488 489 489 /* for the complete list of values, please check 490 - * the include file /include/linux/net_tstamp.h 490 + * the include file include/uapi/linux/net_tstamp.h 491 491 */ 492 492 }; 493 493
+1 -1
Documentation/trace/coresight-cpu-debug.txt
··· 165 165 The same can also be done from an application program. 166 166 167 167 Disable specific CPU's specific idle state from cpuidle sysfs (see 168 - Documentation/cpuidle/sysfs.txt): 168 + Documentation/admin-guide/pm/cpuidle.rst): 169 169 # echo 1 > /sys/devices/system/cpu/cpu$cpu/cpuidle/state$state/disable 170 170 171 171
+1 -1
Documentation/virtual/kvm/amd-memory-encryption.rst
··· 242 242 ========== 243 243 244 244 .. [white-paper] http://amd-dev.wpengine.netdna-cdn.com/wordpress/media/2013/12/AMD_Memory_Encryption_Whitepaper_v7-Public.pdf 245 - .. [api-spec] http://support.amd.com/TechDocs/55766_SEV-KM%20API_Specification.pdf 245 + .. [api-spec] http://support.amd.com/TechDocs/55766_SEV-KM_API_Specification.pdf 246 246 .. [amd-apm] http://support.amd.com/TechDocs/24593.pdf (section 15.34) 247 247 .. [kvm-forum] http://www.linux-kvm.org/images/7/74/02x08A-Thomas_Lendacky-AMDs_Virtualizatoin_Memory_Encryption_Technology.pdf
+1 -1
Documentation/x86/resctrl_ui.txt
··· 9 9 Tony Luck <tony.luck@intel.com> 10 10 Vikas Shivappa <vikas.shivappa@intel.com> 11 11 12 - This feature is enabled by the CONFIG_RESCTRL and the X86 /proc/cpuinfo 12 + This feature is enabled by the CONFIG_X86_RESCTRL and the x86 /proc/cpuinfo 13 13 flag bits: 14 14 RDT (Resource Director Technology) Allocation - "rdt_a" 15 15 CAT (Cache Allocation Technology) - "cat_l3", "cat_l2"
+15 -8
MAINTAINERS
··· 3471 3471 F: drivers/i2c/busses/i2c-thunderx* 3472 3472 3473 3473 CAVIUM LIQUIDIO NETWORK DRIVER 3474 - M: Derek Chickles <derek.chickles@caviumnetworks.com> 3475 - M: Satanand Burla <satananda.burla@caviumnetworks.com> 3476 - M: Felix Manlunas <felix.manlunas@caviumnetworks.com> 3477 - M: Raghu Vatsavayi <raghu.vatsavayi@caviumnetworks.com> 3474 + M: Derek Chickles <dchickles@marvell.com> 3475 + M: Satanand Burla <sburla@marvell.com> 3476 + M: Felix Manlunas <fmanlunas@marvell.com> 3478 3477 L: netdev@vger.kernel.org 3479 3478 W: http://www.cavium.com 3480 3479 S: Supported ··· 3950 3951 S: Maintained 3951 3952 F: drivers/net/ethernet/ti/cpmac.c 3952 3953 3953 - CPU FREQUENCY DRIVERS 3954 + CPU FREQUENCY SCALING FRAMEWORK 3954 3955 M: "Rafael J. Wysocki" <rjw@rjwysocki.net> 3955 3956 M: Viresh Kumar <viresh.kumar@linaro.org> 3956 3957 L: linux-pm@vger.kernel.org ··· 3958 3959 T: git git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm.git 3959 3960 T: git git://git.linaro.org/people/vireshk/linux.git (For ARM Updates) 3960 3961 B: https://bugzilla.kernel.org 3962 + F: Documentation/admin-guide/pm/cpufreq.rst 3963 + F: Documentation/admin-guide/pm/intel_pstate.rst 3961 3964 F: Documentation/cpu-freq/ 3962 3965 F: Documentation/devicetree/bindings/cpufreq/ 3963 3966 F: drivers/cpufreq/ ··· 4007 4006 F: drivers/cpuidle/cpuidle-exynos.c 4008 4007 F: arch/arm/mach-exynos/pm.c 4009 4008 4010 - CPUIDLE DRIVERS 4009 + CPU IDLE TIME MANAGEMENT FRAMEWORK 4011 4010 M: "Rafael J. Wysocki" <rjw@rjwysocki.net> 4012 4011 M: Daniel Lezcano <daniel.lezcano@linaro.org> 4013 4012 L: linux-pm@vger.kernel.org 4014 4013 S: Maintained 4015 4014 T: git git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm.git 4016 4015 B: https://bugzilla.kernel.org 4016 + F: Documentation/admin-guide/pm/cpuidle.rst 4017 4017 F: drivers/cpuidle/* 4018 4018 F: include/linux/cpuidle.h 4019 4019 ··· 13822 13820 13823 13821 SIFIVE DRIVERS 13824 13822 M: Palmer Dabbelt <palmer@sifive.com> 13823 + M: Paul Walmsley <paul.walmsley@sifive.com> 13825 13824 L: linux-riscv@lists.infradead.org 13826 - T: git git://git.kernel.org/pub/scm/linux/kernel/git/palmer/riscv-linux.git 13825 + T: git git://github.com/sifive/riscv-linux.git 13827 13826 S: Supported 13828 13827 K: sifive 13829 13828 N: sifive ··· 14434 14431 M: Florian Schilhabel <florian.c.schilhabel@googlemail.com>. 14435 14432 S: Odd Fixes 14436 14433 F: drivers/staging/rtl8712/ 14434 + 14435 + STAGING - REALTEK RTL8188EU DRIVERS 14436 + M: Larry Finger <Larry.Finger@lwfinger.net> 14437 + S: Odd Fixes 14438 + F: drivers/staging/rtl8188eu/ 14437 14439 14438 14440 STAGING - SILICON MOTION SM750 FRAME BUFFER DRIVER 14439 14441 M: Sudip Mukherjee <sudipm.mukherjee@gmail.com> ··· 15810 15802 L: linux-usb@vger.kernel.org 15811 15803 L: usb-storage@lists.one-eyed-alien.net 15812 15804 S: Maintained 15813 - W: http://www.one-eyed-alien.net/~mdharm/linux-usb/ 15814 15805 F: drivers/usb/storage/ 15815 15806 15816 15807 USB MIDI DRIVER
+5 -5
Makefile
··· 2 2 VERSION = 5 3 3 PATCHLEVEL = 0 4 4 SUBLEVEL = 0 5 - EXTRAVERSION = -rc1 5 + EXTRAVERSION = -rc3 6 6 NAME = Shy Crocodile 7 7 8 8 # *DOCUMENTATION* ··· 955 955 endif 956 956 endif 957 957 958 + PHONY += prepare0 958 959 959 960 ifeq ($(KBUILD_EXTMOD),) 960 961 core-y += kernel/ certs/ mm/ fs/ ipc/ security/ crypto/ block/ ··· 1062 1061 # archprepare is used in arch Makefiles and when processed asm symlink, 1063 1062 # version.h and scripts_basic is processed / created. 1064 1063 1065 - # Listed in dependency order 1066 - PHONY += prepare archprepare prepare0 prepare1 prepare2 prepare3 1064 + PHONY += prepare archprepare prepare1 prepare2 prepare3 1067 1065 1068 1066 # prepare3 is used to check if we are building in a separate output directory, 1069 1067 # and if so do: ··· 1360 1360 mrproper: rm-files := $(wildcard $(MRPROPER_FILES)) 1361 1361 mrproper-dirs := $(addprefix _mrproper_,scripts) 1362 1362 1363 - PHONY += $(mrproper-dirs) mrproper archmrproper 1363 + PHONY += $(mrproper-dirs) mrproper 1364 1364 $(mrproper-dirs): 1365 1365 $(Q)$(MAKE) $(clean)=$(patsubst _mrproper_%,%,$@) 1366 1366 1367 - mrproper: clean archmrproper $(mrproper-dirs) 1367 + mrproper: clean $(mrproper-dirs) 1368 1368 $(call cmd,rmdirs) 1369 1369 $(call cmd,rmfiles) 1370 1370
+26 -5
arch/arm/boot/dts/da850-evm.dts
··· 94 94 regulator-boot-on; 95 95 }; 96 96 97 + baseboard_3v3: fixedregulator-3v3 { 98 + /* TPS73701DCQ */ 99 + compatible = "regulator-fixed"; 100 + regulator-name = "baseboard_3v3"; 101 + regulator-min-microvolt = <3300000>; 102 + regulator-max-microvolt = <3300000>; 103 + vin-supply = <&vbat>; 104 + regulator-always-on; 105 + regulator-boot-on; 106 + }; 107 + 108 + baseboard_1v8: fixedregulator-1v8 { 109 + /* TPS73701DCQ */ 110 + compatible = "regulator-fixed"; 111 + regulator-name = "baseboard_1v8"; 112 + regulator-min-microvolt = <1800000>; 113 + regulator-max-microvolt = <1800000>; 114 + vin-supply = <&vbat>; 115 + regulator-always-on; 116 + regulator-boot-on; 117 + }; 118 + 97 119 backlight_lcd: backlight-regulator { 98 120 compatible = "regulator-fixed"; 99 121 regulator-name = "lcd_backlight_pwr"; ··· 127 105 128 106 sound { 129 107 compatible = "simple-audio-card"; 130 - simple-audio-card,name = "DA850/OMAP-L138 EVM"; 108 + simple-audio-card,name = "DA850-OMAPL138 EVM"; 131 109 simple-audio-card,widgets = 132 110 "Line", "Line In", 133 111 "Line", "Line Out"; ··· 232 210 233 211 /* Regulators */ 234 212 IOVDD-supply = <&vdcdc2_reg>; 235 - /* Derived from VBAT: Baseboard 3.3V / 1.8V */ 236 - AVDD-supply = <&vbat>; 237 - DRVDD-supply = <&vbat>; 238 - DVDD-supply = <&vbat>; 213 + AVDD-supply = <&baseboard_3v3>; 214 + DRVDD-supply = <&baseboard_3v3>; 215 + DVDD-supply = <&baseboard_1v8>; 239 216 }; 240 217 tca6416: gpio@20 { 241 218 compatible = "ti,tca6416";
+37 -1
arch/arm/boot/dts/da850-lcdk.dts
··· 39 39 }; 40 40 }; 41 41 42 + vcc_5vd: fixedregulator-vcc_5vd { 43 + compatible = "regulator-fixed"; 44 + regulator-name = "vcc_5vd"; 45 + regulator-min-microvolt = <5000000>; 46 + regulator-max-microvolt = <5000000>; 47 + regulator-boot-on; 48 + }; 49 + 50 + vcc_3v3d: fixedregulator-vcc_3v3d { 51 + /* TPS650250 - VDCDC1 */ 52 + compatible = "regulator-fixed"; 53 + regulator-name = "vcc_3v3d"; 54 + regulator-min-microvolt = <3300000>; 55 + regulator-max-microvolt = <3300000>; 56 + vin-supply = <&vcc_5vd>; 57 + regulator-always-on; 58 + regulator-boot-on; 59 + }; 60 + 61 + vcc_1v8d: fixedregulator-vcc_1v8d { 62 + /* TPS650250 - VDCDC2 */ 63 + compatible = "regulator-fixed"; 64 + regulator-name = "vcc_1v8d"; 65 + regulator-min-microvolt = <1800000>; 66 + regulator-max-microvolt = <1800000>; 67 + vin-supply = <&vcc_5vd>; 68 + regulator-always-on; 69 + regulator-boot-on; 70 + }; 71 + 42 72 sound { 43 73 compatible = "simple-audio-card"; 44 - simple-audio-card,name = "DA850/OMAP-L138 LCDK"; 74 + simple-audio-card,name = "DA850-OMAPL138 LCDK"; 45 75 simple-audio-card,widgets = 46 76 "Line", "Line In", 47 77 "Line", "Line Out"; ··· 251 221 compatible = "ti,tlv320aic3106"; 252 222 reg = <0x18>; 253 223 status = "okay"; 224 + 225 + /* Regulators */ 226 + IOVDD-supply = <&vcc_3v3d>; 227 + AVDD-supply = <&vcc_3v3d>; 228 + DRVDD-supply = <&vcc_3v3d>; 229 + DVDD-supply = <&vcc_1v8d>; 254 230 }; 255 231 }; 256 232
+2 -2
arch/arm/boot/dts/kirkwood-dnskw.dtsi
··· 36 36 compatible = "gpio-fan"; 37 37 pinctrl-0 = <&pmx_fan_high_speed &pmx_fan_low_speed>; 38 38 pinctrl-names = "default"; 39 - gpios = <&gpio1 14 GPIO_ACTIVE_LOW 40 - &gpio1 13 GPIO_ACTIVE_LOW>; 39 + gpios = <&gpio1 14 GPIO_ACTIVE_HIGH 40 + &gpio1 13 GPIO_ACTIVE_HIGH>; 41 41 gpio-fan,speed-map = <0 0 42 42 3000 1 43 43 6000 2>;
+2 -2
arch/arm/mach-davinci/board-da830-evm.c
··· 208 208 .dev_id = "da830-mmc.0", 209 209 .table = { 210 210 /* gpio chip 1 contains gpio range 32-63 */ 211 - GPIO_LOOKUP("davinci_gpio.0", DA830_MMCSD_CD_PIN, "cd", 211 + GPIO_LOOKUP("davinci_gpio", DA830_MMCSD_CD_PIN, "cd", 212 212 GPIO_ACTIVE_LOW), 213 - GPIO_LOOKUP("davinci_gpio.0", DA830_MMCSD_WP_PIN, "wp", 213 + GPIO_LOOKUP("davinci_gpio", DA830_MMCSD_WP_PIN, "wp", 214 214 GPIO_ACTIVE_LOW), 215 215 }, 216 216 };
+2 -2
arch/arm/mach-davinci/board-da850-evm.c
··· 805 805 .dev_id = "da830-mmc.0", 806 806 .table = { 807 807 /* gpio chip 2 contains gpio range 64-95 */ 808 - GPIO_LOOKUP("davinci_gpio.0", DA850_MMCSD_CD_PIN, "cd", 808 + GPIO_LOOKUP("davinci_gpio", DA850_MMCSD_CD_PIN, "cd", 809 809 GPIO_ACTIVE_LOW), 810 - GPIO_LOOKUP("davinci_gpio.0", DA850_MMCSD_WP_PIN, "wp", 810 + GPIO_LOOKUP("davinci_gpio", DA850_MMCSD_WP_PIN, "wp", 811 811 GPIO_ACTIVE_HIGH), 812 812 }, 813 813 };
+2 -2
arch/arm/mach-davinci/board-dm355-evm.c
··· 117 117 static struct gpiod_lookup_table i2c_recovery_gpiod_table = { 118 118 .dev_id = "i2c_davinci.1", 119 119 .table = { 120 - GPIO_LOOKUP("davinci_gpio.0", DM355_I2C_SDA_PIN, "sda", 120 + GPIO_LOOKUP("davinci_gpio", DM355_I2C_SDA_PIN, "sda", 121 121 GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN), 122 - GPIO_LOOKUP("davinci_gpio.0", DM355_I2C_SCL_PIN, "scl", 122 + GPIO_LOOKUP("davinci_gpio", DM355_I2C_SCL_PIN, "scl", 123 123 GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN), 124 124 }, 125 125 };
+2 -2
arch/arm/mach-davinci/board-dm644x-evm.c
··· 660 660 static struct gpiod_lookup_table i2c_recovery_gpiod_table = { 661 661 .dev_id = "i2c_davinci.1", 662 662 .table = { 663 - GPIO_LOOKUP("davinci_gpio.0", DM644X_I2C_SDA_PIN, "sda", 663 + GPIO_LOOKUP("davinci_gpio", DM644X_I2C_SDA_PIN, "sda", 664 664 GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN), 665 - GPIO_LOOKUP("davinci_gpio.0", DM644X_I2C_SCL_PIN, "scl", 665 + GPIO_LOOKUP("davinci_gpio", DM644X_I2C_SCL_PIN, "scl", 666 666 GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN), 667 667 }, 668 668 };
+2 -2
arch/arm/mach-davinci/board-omapl138-hawk.c
··· 134 134 static struct gpiod_lookup_table mmc_gpios_table = { 135 135 .dev_id = "da830-mmc.0", 136 136 .table = { 137 - GPIO_LOOKUP("davinci_gpio.0", DA850_HAWK_MMCSD_CD_PIN, "cd", 137 + GPIO_LOOKUP("davinci_gpio", DA850_HAWK_MMCSD_CD_PIN, "cd", 138 138 GPIO_ACTIVE_LOW), 139 - GPIO_LOOKUP("davinci_gpio.0", DA850_HAWK_MMCSD_WP_PIN, "wp", 139 + GPIO_LOOKUP("davinci_gpio", DA850_HAWK_MMCSD_WP_PIN, "wp", 140 140 GPIO_ACTIVE_LOW), 141 141 }, 142 142 };
+6 -2
arch/arm/mach-integrator/impd1.c
··· 390 390 char *mmciname; 391 391 392 392 lookup = devm_kzalloc(&dev->dev, 393 - sizeof(*lookup) + 3 * sizeof(struct gpiod_lookup), 393 + struct_size(lookup, table, 3), 394 394 GFP_KERNEL); 395 395 chipname = devm_kstrdup(&dev->dev, devname, GFP_KERNEL); 396 - mmciname = kasprintf(GFP_KERNEL, "lm%x:00700", dev->id); 396 + mmciname = devm_kasprintf(&dev->dev, GFP_KERNEL, 397 + "lm%x:00700", dev->id); 398 + if (!lookup || !chipname || !mmciname) 399 + return -ENOMEM; 400 + 397 401 lookup->dev_id = mmciname; 398 402 /* 399 403 * Offsets on GPIO block 1:
+4
arch/arm/mach-socfpga/socfpga.c
··· 32 32 void __iomem *sdr_ctl_base_addr; 33 33 unsigned long socfpga_cpu1start_addr; 34 34 35 + extern void __init socfpga_reset_init(void); 36 + 35 37 static void __init socfpga_sysmgr_init(void) 36 38 { 37 39 struct device_node *np; ··· 66 64 67 65 if (IS_ENABLED(CONFIG_EDAC_ALTERA_OCRAM)) 68 66 socfpga_init_ocram_ecc(); 67 + socfpga_reset_init(); 69 68 } 70 69 71 70 static void __init socfpga_arria10_init_irq(void) ··· 77 74 socfpga_init_arria10_l2_ecc(); 78 75 if (IS_ENABLED(CONFIG_EDAC_ALTERA_OCRAM)) 79 76 socfpga_init_arria10_ocram_ecc(); 77 + socfpga_reset_init(); 80 78 } 81 79 82 80 static void socfpga_cyclone5_restart(enum reboot_mode mode, const char *cmd)
+1 -1
arch/arm64/boot/dts/marvell/armada-8040-mcbin.dtsi
··· 183 183 pinctrl-0 = <&cp0_pcie_pins>; 184 184 num-lanes = <4>; 185 185 num-viewport = <8>; 186 - reset-gpio = <&cp0_gpio1 20 GPIO_ACTIVE_LOW>; 186 + reset-gpios = <&cp0_gpio2 20 GPIO_ACTIVE_LOW>; 187 187 status = "okay"; 188 188 }; 189 189
+17
arch/arm64/boot/dts/marvell/armada-ap806.dtsi
··· 28 28 method = "smc"; 29 29 }; 30 30 31 + reserved-memory { 32 + #address-cells = <2>; 33 + #size-cells = <2>; 34 + ranges; 35 + 36 + /* 37 + * This area matches the mapping done with a 38 + * mainline U-Boot, and should be updated by the 39 + * bootloader. 40 + */ 41 + 42 + psci-area@4000000 { 43 + reg = <0x0 0x4000000 0x0 0x200000>; 44 + no-map; 45 + }; 46 + }; 47 + 31 48 ap806 { 32 49 #address-cells = <2>; 33 50 #size-cells = <2>;
+4
arch/arm64/configs/defconfig
··· 506 506 CONFIG_SND_SOC_ROCKCHIP_SPDIF=m 507 507 CONFIG_SND_SOC_ROCKCHIP_RT5645=m 508 508 CONFIG_SND_SOC_RK3399_GRU_SOUND=m 509 + CONFIG_SND_MESON_AXG_SOUND_CARD=m 509 510 CONFIG_SND_SOC_SAMSUNG=y 510 511 CONFIG_SND_SOC_RCAR=m 511 512 CONFIG_SND_SOC_AK4613=m 512 513 CONFIG_SND_SIMPLE_CARD=m 513 514 CONFIG_SND_AUDIO_GRAPH_CARD=m 515 + CONFIG_SND_SOC_ES7134=m 516 + CONFIG_SND_SOC_ES7241=m 517 + CONFIG_SND_SOC_TAS571X=m 514 518 CONFIG_I2C_HID=m 515 519 CONFIG_USB=y 516 520 CONFIG_USB_OTG=y
+1 -1
arch/arm64/include/asm/asm-prototypes.h
··· 2 2 #ifndef __ASM_PROTOTYPES_H 3 3 #define __ASM_PROTOTYPES_H 4 4 /* 5 - * CONFIG_MODEVERIONS requires a C declaration to generate the appropriate CRC 5 + * CONFIG_MODVERSIONS requires a C declaration to generate the appropriate CRC 6 6 * for each symbol. Since commit: 7 7 * 8 8 * 4efca4ed05cbdfd1 ("kbuild: modversions for EXPORT_SYMBOL() for asm")
+4
arch/arm64/include/asm/cache.h
··· 58 58 */ 59 59 #define ARCH_DMA_MINALIGN (128) 60 60 61 + #ifdef CONFIG_KASAN_SW_TAGS 62 + #define ARCH_SLAB_MINALIGN (1ULL << KASAN_SHADOW_SCALE_SHIFT) 63 + #endif 64 + 61 65 #ifndef __ASSEMBLY__ 62 66 63 67 #include <linux/bitops.h>
+44
arch/arm64/include/asm/mmu.h
··· 16 16 #ifndef __ASM_MMU_H 17 17 #define __ASM_MMU_H 18 18 19 + #include <asm/cputype.h> 20 + 19 21 #define MMCF_AARCH32 0x1 /* mm context flag for AArch32 executables */ 20 22 #define USER_ASID_BIT 48 21 23 #define USER_ASID_FLAG (UL(1) << USER_ASID_BIT) ··· 44 42 { 45 43 return IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0) && 46 44 cpus_have_const_cap(ARM64_UNMAP_KERNEL_AT_EL0); 45 + } 46 + 47 + static inline bool arm64_kernel_use_ng_mappings(void) 48 + { 49 + bool tx1_bug; 50 + 51 + /* What's a kpti? Use global mappings if we don't know. */ 52 + if (!IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0)) 53 + return false; 54 + 55 + /* 56 + * Note: this function is called before the CPU capabilities have 57 + * been configured, so our early mappings will be global. If we 58 + * later determine that kpti is required, then 59 + * kpti_install_ng_mappings() will make them non-global. 60 + */ 61 + if (arm64_kernel_unmapped_at_el0()) 62 + return true; 63 + 64 + if (!IS_ENABLED(CONFIG_RANDOMIZE_BASE)) 65 + return false; 66 + 67 + /* 68 + * KASLR is enabled so we're going to be enabling kpti on non-broken 69 + * CPUs regardless of their susceptibility to Meltdown. Rather 70 + * than force everybody to go through the G -> nG dance later on, 71 + * just put down non-global mappings from the beginning. 72 + */ 73 + if (!IS_ENABLED(CONFIG_CAVIUM_ERRATUM_27456)) { 74 + tx1_bug = false; 75 + #ifndef MODULE 76 + } else if (!static_branch_likely(&arm64_const_caps_ready)) { 77 + extern const struct midr_range cavium_erratum_27456_cpus[]; 78 + 79 + tx1_bug = is_midr_in_range_list(read_cpuid_id(), 80 + cavium_erratum_27456_cpus); 81 + #endif 82 + } else { 83 + tx1_bug = __cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_27456); 84 + } 85 + 86 + return !tx1_bug && kaslr_offset() > 0; 47 87 } 48 88 49 89 typedef void (*bp_hardening_cb_t)(void);
+2 -2
arch/arm64/include/asm/pgtable-prot.h
··· 37 37 #define _PROT_DEFAULT (PTE_TYPE_PAGE | PTE_AF | PTE_SHARED) 38 38 #define _PROT_SECT_DEFAULT (PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S) 39 39 40 - #define PTE_MAYBE_NG (arm64_kernel_unmapped_at_el0() ? PTE_NG : 0) 41 - #define PMD_MAYBE_NG (arm64_kernel_unmapped_at_el0() ? PMD_SECT_NG : 0) 40 + #define PTE_MAYBE_NG (arm64_kernel_use_ng_mappings() ? PTE_NG : 0) 41 + #define PMD_MAYBE_NG (arm64_kernel_use_ng_mappings() ? PMD_SECT_NG : 0) 42 42 43 43 #define PROT_DEFAULT (_PROT_DEFAULT | PTE_MAYBE_NG) 44 44 #define PROT_SECT_DEFAULT (_PROT_SECT_DEFAULT | PMD_MAYBE_NG)
+1 -1
arch/arm64/kernel/cpu_errata.c
··· 553 553 #endif 554 554 555 555 #ifdef CONFIG_CAVIUM_ERRATUM_27456 556 - static const struct midr_range cavium_erratum_27456_cpus[] = { 556 + const struct midr_range cavium_erratum_27456_cpus[] = { 557 557 /* Cavium ThunderX, T88 pass 1.x - 2.1 */ 558 558 MIDR_RANGE(MIDR_THUNDERX, 0, 0, 1, 1), 559 559 /* Cavium ThunderX, T81 pass 1.0 */
+7 -2
arch/arm64/kernel/cpufeature.c
··· 983 983 984 984 /* Useful for KASLR robustness */ 985 985 if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) 986 - return true; 986 + return kaslr_offset() > 0; 987 987 988 988 /* Don't force KPTI for CPUs that are not vulnerable */ 989 989 if (is_midr_in_range_list(read_cpuid_id(), kpti_safe_list)) ··· 1003 1003 static bool kpti_applied = false; 1004 1004 int cpu = smp_processor_id(); 1005 1005 1006 - if (kpti_applied) 1006 + /* 1007 + * We don't need to rewrite the page-tables if either we've done 1008 + * it already or we have KASLR enabled and therefore have not 1009 + * created any global mappings at all. 1010 + */ 1011 + if (kpti_applied || kaslr_offset() > 0) 1007 1012 return; 1008 1013 1009 1014 remap_fn = (void *)__pa_symbol(idmap_kpti_install_ng_mappings);
+1
arch/arm64/kernel/head.S
··· 475 475 476 476 ENTRY(kimage_vaddr) 477 477 .quad _text - TEXT_OFFSET 478 + EXPORT_SYMBOL(kimage_vaddr) 478 479 479 480 /* 480 481 * If we're fortunate enough to boot at EL2, ensure that the world is
+6 -2
arch/arm64/kernel/kaslr.c
··· 14 14 #include <linux/sched.h> 15 15 #include <linux/types.h> 16 16 17 + #include <asm/cacheflush.h> 17 18 #include <asm/fixmap.h> 18 19 #include <asm/kernel-pgtable.h> 19 20 #include <asm/memory.h> ··· 44 43 return ret; 45 44 } 46 45 47 - static __init const u8 *get_cmdline(void *fdt) 46 + static __init const u8 *kaslr_get_cmdline(void *fdt) 48 47 { 49 48 static __initconst const u8 default_cmdline[] = CONFIG_CMDLINE; 50 49 ··· 110 109 * Check if 'nokaslr' appears on the command line, and 111 110 * return 0 if that is the case. 112 111 */ 113 - cmdline = get_cmdline(fdt); 112 + cmdline = kaslr_get_cmdline(fdt); 114 113 str = strstr(cmdline, "nokaslr"); 115 114 if (str == cmdline || (str > cmdline && *(str - 1) == ' ')) 116 115 return 0; ··· 169 168 /* use the lower 21 bits to randomize the base of the module region */ 170 169 module_alloc_base += (module_range * (seed & ((1 << 21) - 1))) >> 21; 171 170 module_alloc_base &= PAGE_MASK; 171 + 172 + __flush_dcache_area(&module_alloc_base, sizeof(module_alloc_base)); 173 + __flush_dcache_area(&memstart_offset_seed, sizeof(memstart_offset_seed)); 172 174 173 175 return offset; 174 176 }
+3 -1
arch/arm64/kernel/machine_kexec_file.c
··· 87 87 88 88 /* add kaslr-seed */ 89 89 ret = fdt_delprop(dtb, off, FDT_PROP_KASLR_SEED); 90 - if (ret && (ret != -FDT_ERR_NOTFOUND)) 90 + if (ret == -FDT_ERR_NOTFOUND) 91 + ret = 0; 92 + else if (ret) 91 93 goto out; 92 94 93 95 if (rng_is_initialized()) {
+1
arch/c6x/include/uapi/asm/Kbuild
··· 1 1 include include/uapi/asm-generic/Kbuild.asm 2 2 3 3 generic-y += kvm_para.h 4 + generic-y += shmparam.h 4 5 generic-y += ucontext.h
+25
arch/csky/include/asm/io.h
··· 15 15 extern int remap_area_pages(unsigned long address, phys_addr_t phys_addr, 16 16 size_t size, unsigned long flags); 17 17 18 + /* 19 + * I/O memory access primitives. Reads are ordered relative to any 20 + * following Normal memory access. Writes are ordered relative to any prior 21 + * Normal memory access. 22 + * 23 + * For CACHEV1 (807, 810), store instruction could fast retire, so we need 24 + * another mb() to prevent st fast retire. 25 + * 26 + * For CACHEV2 (860), store instruction with PAGE_ATTR_NO_BUFFERABLE won't 27 + * fast retire. 28 + */ 29 + #define readb(c) ({ u8 __v = readb_relaxed(c); rmb(); __v; }) 30 + #define readw(c) ({ u16 __v = readw_relaxed(c); rmb(); __v; }) 31 + #define readl(c) ({ u32 __v = readl_relaxed(c); rmb(); __v; }) 32 + 33 + #ifdef CONFIG_CPU_HAS_CACHEV2 34 + #define writeb(v,c) ({ wmb(); writeb_relaxed((v),(c)); }) 35 + #define writew(v,c) ({ wmb(); writew_relaxed((v),(c)); }) 36 + #define writel(v,c) ({ wmb(); writel_relaxed((v),(c)); }) 37 + #else 38 + #define writeb(v,c) ({ wmb(); writeb_relaxed((v),(c)); mb(); }) 39 + #define writew(v,c) ({ wmb(); writew_relaxed((v),(c)); mb(); }) 40 + #define writel(v,c) ({ wmb(); writel_relaxed((v),(c)); mb(); }) 41 + #endif 42 + 18 43 #define ioremap_nocache(phy, sz) ioremap(phy, sz) 19 44 #define ioremap_wc ioremap_nocache 20 45 #define ioremap_wt ioremap_nocache
+17 -24
arch/csky/include/asm/pgalloc.h
··· 24 24 25 25 extern void pgd_init(unsigned long *p); 26 26 27 - static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, 28 - unsigned long address) 27 + static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm) 29 28 { 30 29 pte_t *pte; 31 - unsigned long *kaddr, i; 30 + unsigned long i; 32 31 33 - pte = (pte_t *) __get_free_pages(GFP_KERNEL | __GFP_RETRY_MAYFAIL, 34 - PTE_ORDER); 35 - kaddr = (unsigned long *)pte; 36 - if (address & 0x80000000) 37 - for (i = 0; i < (PAGE_SIZE/4); i++) 38 - *(kaddr + i) = 0x1; 39 - else 40 - clear_page(kaddr); 32 + pte = (pte_t *) __get_free_page(GFP_KERNEL); 33 + if (!pte) 34 + return NULL; 35 + 36 + for (i = 0; i < PAGE_SIZE/sizeof(pte_t); i++) 37 + (pte + i)->pte_low = _PAGE_GLOBAL; 41 38 42 39 return pte; 43 40 } 44 41 45 - static inline struct page *pte_alloc_one(struct mm_struct *mm, 46 - unsigned long address) 42 + static inline struct page *pte_alloc_one(struct mm_struct *mm) 47 43 { 48 44 struct page *pte; 49 - unsigned long *kaddr, i; 50 45 51 - pte = alloc_pages(GFP_KERNEL | __GFP_RETRY_MAYFAIL, PTE_ORDER); 52 - if (pte) { 53 - kaddr = kmap_atomic(pte); 54 - if (address & 0x80000000) { 55 - for (i = 0; i < (PAGE_SIZE/4); i++) 56 - *(kaddr + i) = 0x1; 57 - } else 58 - clear_page(kaddr); 59 - kunmap_atomic(kaddr); 60 - pgtable_page_ctor(pte); 46 + pte = alloc_pages(GFP_KERNEL | __GFP_ZERO, 0); 47 + if (!pte) 48 + return NULL; 49 + 50 + if (!pgtable_page_ctor(pte)) { 51 + __free_page(pte); 52 + return NULL; 61 53 } 54 + 62 55 return pte; 63 56 } 64 57
+22 -16
arch/csky/kernel/module.c
··· 12 12 #include <linux/spinlock.h> 13 13 #include <asm/pgtable.h> 14 14 15 - #if defined(__CSKYABIV2__) 15 + #ifdef CONFIG_CPU_CK810 16 16 #define IS_BSR32(hi16, lo16) (((hi16) & 0xFC00) == 0xE000) 17 17 #define IS_JSRI32(hi16, lo16) ((hi16) == 0xEAE0) 18 18 ··· 25 25 *(uint16_t *)(addr) = 0xE8Fa; \ 26 26 *((uint16_t *)(addr) + 1) = 0x0000; \ 27 27 } while (0) 28 + 29 + static void jsri_2_lrw_jsr(uint32_t *location) 30 + { 31 + uint16_t *location_tmp = (uint16_t *)location; 32 + 33 + if (IS_BSR32(*location_tmp, *(location_tmp + 1))) 34 + return; 35 + 36 + if (IS_JSRI32(*location_tmp, *(location_tmp + 1))) { 37 + /* jsri 0x... --> lrw r26, 0x... */ 38 + CHANGE_JSRI_TO_LRW(location); 39 + /* lsli r0, r0 --> jsr r26 */ 40 + SET_JSR32_R26(location + 1); 41 + } 42 + } 43 + #else 44 + static void inline jsri_2_lrw_jsr(uint32_t *location) 45 + { 46 + return; 47 + } 28 48 #endif 29 49 30 50 int apply_relocate_add(Elf32_Shdr *sechdrs, const char *strtab, ··· 55 35 Elf32_Sym *sym; 56 36 uint32_t *location; 57 37 short *temp; 58 - #if defined(__CSKYABIV2__) 59 - uint16_t *location_tmp; 60 - #endif 61 38 62 39 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) { 63 40 /* This is where to make the change */ ··· 76 59 case R_CSKY_PCRELJSR_IMM11BY2: 77 60 break; 78 61 case R_CSKY_PCRELJSR_IMM26BY2: 79 - #if defined(__CSKYABIV2__) 80 - location_tmp = (uint16_t *)location; 81 - if (IS_BSR32(*location_tmp, *(location_tmp + 1))) 82 - break; 83 - 84 - if (IS_JSRI32(*location_tmp, *(location_tmp + 1))) { 85 - /* jsri 0x... --> lrw r26, 0x... */ 86 - CHANGE_JSRI_TO_LRW(location); 87 - /* lsli r0, r0 --> jsr r26 */ 88 - SET_JSR32_R26(location + 1); 89 - } 90 - #endif 62 + jsri_2_lrw_jsr(location); 91 63 break; 92 64 case R_CSKY_ADDR_HI16: 93 65 temp = ((short *)location) + 1;
-2
arch/h8300/Makefile
··· 37 37 38 38 boot := arch/h8300/boot 39 39 40 - archmrproper: 41 - 42 40 archclean: 43 41 $(Q)$(MAKE) $(clean)=$(boot) 44 42
+1
arch/h8300/include/uapi/asm/Kbuild
··· 1 1 include include/uapi/asm-generic/Kbuild.asm 2 2 3 3 generic-y += kvm_para.h 4 + generic-y += shmparam.h 4 5 generic-y += ucontext.h
+1
arch/hexagon/include/uapi/asm/Kbuild
··· 1 1 include include/uapi/asm-generic/Kbuild.asm 2 2 3 + generic-y += shmparam.h 3 4 generic-y += ucontext.h
-2
arch/ia64/Makefile
··· 16 16 NM := $(CROSS_COMPILE)nm -B 17 17 READELF := $(CROSS_COMPILE)readelf 18 18 19 - export AWK 20 - 21 19 CHECKFLAGS += -D__ia64=1 -D__ia64__=1 -D_LP64 -D__LP64__ 22 20 23 21 OBJCOPYFLAGS := --strip-all
+1
arch/m68k/include/uapi/asm/Kbuild
··· 2 2 3 3 generated-y += unistd_32.h 4 4 generic-y += kvm_para.h 5 + generic-y += shmparam.h
+1
arch/microblaze/include/uapi/asm/Kbuild
··· 2 2 3 3 generated-y += unistd_32.h 4 4 generic-y += kvm_para.h 5 + generic-y += shmparam.h 5 6 generic-y += ucontext.h
+1
arch/mips/Kconfig
··· 3155 3155 config MIPS32_N32 3156 3156 bool "Kernel support for n32 binaries" 3157 3157 depends on 64BIT 3158 + select ARCH_WANT_COMPAT_IPC_PARSE_VERSION 3158 3159 select COMPAT 3159 3160 select MIPS32_COMPAT 3160 3161 select SYSVIPC_COMPAT if SYSVIPC
+31
arch/mips/bcm47xx/setup.c
··· 173 173 pm_power_off = bcm47xx_machine_halt; 174 174 } 175 175 176 + #ifdef CONFIG_BCM47XX_BCMA 177 + static struct device * __init bcm47xx_setup_device(void) 178 + { 179 + struct device *dev; 180 + int err; 181 + 182 + dev = kzalloc(sizeof(*dev), GFP_KERNEL); 183 + if (!dev) 184 + return NULL; 185 + 186 + err = dev_set_name(dev, "bcm47xx_soc"); 187 + if (err) { 188 + pr_err("Failed to set SoC device name: %d\n", err); 189 + kfree(dev); 190 + return NULL; 191 + } 192 + 193 + err = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(32)); 194 + if (err) 195 + pr_err("Failed to set SoC DMA mask: %d\n", err); 196 + 197 + return dev; 198 + } 199 + #endif 200 + 176 201 /* 177 202 * This finishes bus initialization doing things that were not possible without 178 203 * kmalloc. Make sure to call it late enough (after mm_init). ··· 207 182 #ifdef CONFIG_BCM47XX_BCMA 208 183 if (bcm47xx_bus_type == BCM47XX_BUS_TYPE_BCMA) { 209 184 int err; 185 + 186 + bcm47xx_bus.bcma.dev = bcm47xx_setup_device(); 187 + if (!bcm47xx_bus.bcma.dev) 188 + panic("Failed to setup SoC device\n"); 210 189 211 190 err = bcma_host_soc_init(&bcm47xx_bus.bcma); 212 191 if (err) ··· 264 235 #endif 265 236 #ifdef CONFIG_BCM47XX_BCMA 266 237 case BCM47XX_BUS_TYPE_BCMA: 238 + if (device_register(bcm47xx_bus.bcma.dev)) 239 + pr_err("Failed to register SoC device\n"); 267 240 bcma_bus_register(&bcm47xx_bus.bcma.bus); 268 241 break; 269 242 #endif
+1 -1
arch/mips/cavium-octeon/setup.c
··· 98 98 " sync \n" 99 99 " synci ($0) \n"); 100 100 101 - relocated_kexec_smp_wait(NULL); 101 + kexec_reboot(); 102 102 } 103 103 #endif 104 104
+1
arch/mips/configs/ath79_defconfig
··· 66 66 # CONFIG_SERIAL_8250_PCI is not set 67 67 CONFIG_SERIAL_8250_NR_UARTS=1 68 68 CONFIG_SERIAL_8250_RUNTIME_UARTS=1 69 + CONFIG_SERIAL_OF_PLATFORM=y 69 70 CONFIG_SERIAL_AR933X=y 70 71 CONFIG_SERIAL_AR933X_CONSOLE=y 71 72 # CONFIG_HW_RANDOM is not set
-2
arch/mips/include/asm/mach-lantiq/falcon/falcon_irq.h
··· 18 18 #define INT_NUM_EXTRA_START (INT_NUM_IM4_IRL0 + 32) 19 19 #define INT_NUM_IM_OFFSET (INT_NUM_IM1_IRL0 - INT_NUM_IM0_IRL0) 20 20 21 - #define MIPS_CPU_TIMER_IRQ 7 22 - 23 21 #define MAX_IM 5 24 22 25 23 #endif /* _FALCON_IRQ__ */
-2
arch/mips/include/asm/mach-lantiq/xway/lantiq_irq.h
··· 19 19 20 20 #define LTQ_DMA_CH0_INT (INT_NUM_IM2_IRL0) 21 21 22 - #define MIPS_CPU_TIMER_IRQ 7 23 - 24 22 #define MAX_IM 5 25 23 26 24 #endif
+3 -2
arch/mips/jazz/jazzdma.c
··· 74 74 get_order(VDMA_PGTBL_SIZE)); 75 75 BUG_ON(!pgtbl); 76 76 dma_cache_wback_inv((unsigned long)pgtbl, VDMA_PGTBL_SIZE); 77 - pgtbl = (VDMA_PGTBL_ENTRY *)KSEG1ADDR(pgtbl); 77 + pgtbl = (VDMA_PGTBL_ENTRY *)CKSEG1ADDR((unsigned long)pgtbl); 78 78 79 79 /* 80 80 * Clear the R4030 translation table 81 81 */ 82 82 vdma_pgtbl_init(); 83 83 84 - r4030_write_reg32(JAZZ_R4030_TRSTBL_BASE, CPHYSADDR(pgtbl)); 84 + r4030_write_reg32(JAZZ_R4030_TRSTBL_BASE, 85 + CPHYSADDR((unsigned long)pgtbl)); 85 86 r4030_write_reg32(JAZZ_R4030_TRSTBL_LIM, VDMA_PGTBL_SIZE); 86 87 r4030_write_reg32(JAZZ_R4030_TRSTBL_INV, 0); 87 88
+6 -71
arch/mips/lantiq/irq.c
··· 224 224 .irq_set_type = ltq_eiu_settype, 225 225 }; 226 226 227 - static void ltq_hw_irqdispatch(int module) 227 + static void ltq_hw_irq_handler(struct irq_desc *desc) 228 228 { 229 + int module = irq_desc_get_irq(desc) - 2; 229 230 u32 irq; 231 + int hwirq; 230 232 231 233 irq = ltq_icu_r32(module, LTQ_ICU_IM0_IOSR); 232 234 if (irq == 0) ··· 239 237 * other bits might be bogus 240 238 */ 241 239 irq = __fls(irq); 242 - do_IRQ((int)irq + MIPS_CPU_IRQ_CASCADE + (INT_NUM_IM_OFFSET * module)); 240 + hwirq = irq + MIPS_CPU_IRQ_CASCADE + (INT_NUM_IM_OFFSET * module); 241 + generic_handle_irq(irq_linear_revmap(ltq_domain, hwirq)); 243 242 244 243 /* if this is a EBU irq, we need to ack it or get a deadlock */ 245 244 if ((irq == LTQ_ICU_EBU_IRQ) && (module == 0) && LTQ_EBU_PCC_ISTAT) 246 245 ltq_ebu_w32(ltq_ebu_r32(LTQ_EBU_PCC_ISTAT) | 0x10, 247 246 LTQ_EBU_PCC_ISTAT); 248 - } 249 - 250 - #define DEFINE_HWx_IRQDISPATCH(x) \ 251 - static void ltq_hw ## x ## _irqdispatch(void) \ 252 - { \ 253 - ltq_hw_irqdispatch(x); \ 254 - } 255 - DEFINE_HWx_IRQDISPATCH(0) 256 - DEFINE_HWx_IRQDISPATCH(1) 257 - DEFINE_HWx_IRQDISPATCH(2) 258 - DEFINE_HWx_IRQDISPATCH(3) 259 - DEFINE_HWx_IRQDISPATCH(4) 260 - 261 - #if MIPS_CPU_TIMER_IRQ == 7 262 - static void ltq_hw5_irqdispatch(void) 263 - { 264 - do_IRQ(MIPS_CPU_TIMER_IRQ); 265 - } 266 - #else 267 - DEFINE_HWx_IRQDISPATCH(5) 268 - #endif 269 - 270 - static void ltq_hw_irq_handler(struct irq_desc *desc) 271 - { 272 - ltq_hw_irqdispatch(irq_desc_get_irq(desc) - 2); 273 - } 274 - 275 - asmlinkage void plat_irq_dispatch(void) 276 - { 277 - unsigned int pending = read_c0_status() & read_c0_cause() & ST0_IM; 278 - int irq; 279 - 280 - if (!pending) { 281 - spurious_interrupt(); 282 - return; 283 - } 284 - 285 - pending >>= CAUSEB_IP; 286 - while (pending) { 287 - irq = fls(pending) - 1; 288 - do_IRQ(MIPS_CPU_IRQ_BASE + irq); 289 - pending &= ~BIT(irq); 290 - } 291 247 } 292 248 293 249 static int icu_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw) ··· 303 343 for (i = 0; i < MAX_IM; i++) 304 344 irq_set_chained_handler(i + 2, ltq_hw_irq_handler); 305 345 306 - if (cpu_has_vint) { 307 - pr_info("Setting up vectored interrupts\n"); 308 - set_vi_handler(2, ltq_hw0_irqdispatch); 309 - set_vi_handler(3, ltq_hw1_irqdispatch); 310 - set_vi_handler(4, ltq_hw2_irqdispatch); 311 - set_vi_handler(5, ltq_hw3_irqdispatch); 312 - set_vi_handler(6, ltq_hw4_irqdispatch); 313 - set_vi_handler(7, ltq_hw5_irqdispatch); 314 - } 315 - 316 346 ltq_domain = irq_domain_add_linear(node, 317 347 (MAX_IM * INT_NUM_IM_OFFSET) + MIPS_CPU_IRQ_CASCADE, 318 348 &irq_domain_ops, 0); 319 349 320 - #ifndef CONFIG_MIPS_MT_SMP 321 - set_c0_status(IE_IRQ0 | IE_IRQ1 | IE_IRQ2 | 322 - IE_IRQ3 | IE_IRQ4 | IE_IRQ5); 323 - #else 324 - set_c0_status(IE_SW0 | IE_SW1 | IE_IRQ0 | IE_IRQ1 | 325 - IE_IRQ2 | IE_IRQ3 | IE_IRQ4 | IE_IRQ5); 326 - #endif 327 - 328 350 /* tell oprofile which irq to use */ 329 351 ltq_perfcount_irq = irq_create_mapping(ltq_domain, LTQ_PERF_IRQ); 330 - 331 - /* 332 - * if the timer irq is not one of the mips irqs we need to 333 - * create a mapping 334 - */ 335 - if (MIPS_CPU_TIMER_IRQ != 7) 336 - irq_create_mapping(ltq_domain, MIPS_CPU_TIMER_IRQ); 337 352 338 353 /* the external interrupts are optional and xway only */ 339 354 eiu_node = of_find_compatible_node(NULL, NULL, "lantiq,eiu-xway"); ··· 346 411 347 412 unsigned int get_c0_compare_int(void) 348 413 { 349 - return MIPS_CPU_TIMER_IRQ; 414 + return CP0_LEGACY_COMPARE_IRQ; 350 415 } 351 416 352 417 static struct of_device_id __initdata of_irq_ids[] = {
+3 -3
arch/mips/lantiq/xway/dma.c
··· 129 129 unsigned long flags; 130 130 131 131 ch->desc = 0; 132 - ch->desc_base = dma_zalloc_coherent(ch->dev, 133 - LTQ_DESC_NUM * LTQ_DESC_SIZE, 134 - &ch->phys, GFP_ATOMIC); 132 + ch->desc_base = dma_alloc_coherent(ch->dev, 133 + LTQ_DESC_NUM * LTQ_DESC_SIZE, 134 + &ch->phys, GFP_ATOMIC); 135 135 136 136 spin_lock_irqsave(&ltq_dma_lock, flags); 137 137 ltq_dma_w32(ch->nr, LTQ_DMA_CS);
+3 -1
arch/mips/pci/msi-octeon.c
··· 369 369 int irq; 370 370 struct irq_chip *msi; 371 371 372 - if (octeon_dma_bar_type == OCTEON_DMA_BAR_TYPE_PCIE) { 372 + if (octeon_dma_bar_type == OCTEON_DMA_BAR_TYPE_INVALID) { 373 + return 0; 374 + } else if (octeon_dma_bar_type == OCTEON_DMA_BAR_TYPE_PCIE) { 373 375 msi_rcv_reg[0] = CVMX_PEXP_NPEI_MSI_RCV0; 374 376 msi_rcv_reg[1] = CVMX_PEXP_NPEI_MSI_RCV1; 375 377 msi_rcv_reg[2] = CVMX_PEXP_NPEI_MSI_RCV2;
-8
arch/nds32/Makefile
··· 3 3 4 4 KBUILD_DEFCONFIG := defconfig 5 5 6 - comma = , 7 - 8 - 9 6 ifdef CONFIG_FUNCTION_TRACER 10 7 arch-y += -malways-save-lp -mno-relax 11 8 endif ··· 51 54 boot := arch/nds32/boot 52 55 core-y += $(boot)/dts/ 53 56 54 - .PHONY: FORCE 55 - 56 57 Image: vmlinux 57 58 $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@ 58 59 ··· 63 68 vdso_prepare: prepare0 64 69 $(Q)$(MAKE) $(build)=arch/nds32/kernel/vdso include/generated/vdso-offsets.h 65 70 66 - CLEAN_FILES += include/asm-nds32/constants.h* 67 - 68 - # We use MRPROPER_FILES and CLEAN_FILES now 69 71 archclean: 70 72 $(Q)$(MAKE) $(clean)=$(boot) 71 73
-3
arch/openrisc/Makefile
··· 20 20 KBUILD_DEFCONFIG := or1ksim_defconfig 21 21 22 22 OBJCOPYFLAGS := -O binary -R .note -R .comment -S 23 - LDFLAGS_vmlinux := 24 23 LIBGCC := $(shell $(CC) $(KBUILD_CFLAGS) -print-libgcc-file-name) 25 24 26 25 KBUILD_CFLAGS += -pipe -ffixed-r10 -D__linux__ ··· 49 50 BUILTIN_DTB := n 50 51 endif 51 52 core-$(BUILTIN_DTB) += arch/openrisc/boot/dts/ 52 - 53 - all: vmlinux
+6 -2
arch/openrisc/include/asm/uaccess.h
··· 58 58 /* Ensure that addr is below task's addr_limit */ 59 59 #define __addr_ok(addr) ((unsigned long) addr < get_fs()) 60 60 61 - #define access_ok(addr, size) \ 62 - __range_ok((unsigned long)addr, (unsigned long)size) 61 + #define access_ok(addr, size) \ 62 + ({ \ 63 + unsigned long __ao_addr = (unsigned long)(addr); \ 64 + unsigned long __ao_size = (unsigned long)(size); \ 65 + __range_ok(__ao_addr, __ao_size); \ 66 + }) 63 67 64 68 /* 65 69 * These are the main single-value transfer routines. They automatically
+1
arch/openrisc/include/uapi/asm/Kbuild
··· 1 1 include include/uapi/asm-generic/Kbuild.asm 2 2 3 3 generic-y += kvm_para.h 4 + generic-y += shmparam.h 4 5 generic-y += ucontext.h
+1
arch/powerpc/include/uapi/asm/perf_regs.h
··· 47 47 PERF_REG_POWERPC_DAR, 48 48 PERF_REG_POWERPC_DSISR, 49 49 PERF_REG_POWERPC_SIER, 50 + PERF_REG_POWERPC_MMCRA, 50 51 PERF_REG_POWERPC_MAX, 51 52 }; 52 53 #endif /* _UAPI_ASM_POWERPC_PERF_REGS_H */
+2 -1
arch/powerpc/kernel/head_8xx.S
··· 852 852 853 853 /* set up the PTE pointers for the Abatron bdiGDB. 854 854 */ 855 - tovirt(r6,r6) 856 855 lis r5, abatron_pteptrs@h 857 856 ori r5, r5, abatron_pteptrs@l 858 857 stw r5, 0xf0(0) /* Must match your Abatron config file */ 859 858 tophys(r5,r5) 859 + lis r6, swapper_pg_dir@h 860 + ori r6, r6, swapper_pg_dir@l 860 861 stw r6, 0(r5) 861 862 862 863 /* Now turn on the MMU for real! */
+4 -3
arch/powerpc/kernel/signal_64.c
··· 755 755 if (restore_tm_sigcontexts(current, &uc->uc_mcontext, 756 756 &uc_transact->uc_mcontext)) 757 757 goto badframe; 758 - } 758 + } else 759 759 #endif 760 - /* Fall through, for non-TM restore */ 761 - if (!MSR_TM_ACTIVE(msr)) { 760 + { 762 761 /* 762 + * Fall through, for non-TM restore 763 + * 763 764 * Unset MSR[TS] on the thread regs since MSR from user 764 765 * context does not have MSR active, and recheckpoint was 765 766 * not called since restore_tm_sigcontexts() was not called
-7
arch/powerpc/kernel/trace/ftrace.c
··· 967 967 } 968 968 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 969 969 970 - #if defined(CONFIG_FTRACE_SYSCALLS) && defined(CONFIG_PPC64) 971 - unsigned long __init arch_syscall_addr(int nr) 972 - { 973 - return sys_call_table[nr*2]; 974 - } 975 - #endif /* CONFIG_FTRACE_SYSCALLS && CONFIG_PPC64 */ 976 - 977 970 #ifdef PPC64_ELF_ABI_v1 978 971 char *arch_ftrace_match_adjust(char *str, const char *search) 979 972 {
+6
arch/powerpc/perf/perf_regs.c
··· 70 70 PT_REGS_OFFSET(PERF_REG_POWERPC_DAR, dar), 71 71 PT_REGS_OFFSET(PERF_REG_POWERPC_DSISR, dsisr), 72 72 PT_REGS_OFFSET(PERF_REG_POWERPC_SIER, dar), 73 + PT_REGS_OFFSET(PERF_REG_POWERPC_MMCRA, dsisr), 73 74 }; 74 75 75 76 u64 perf_reg_value(struct pt_regs *regs, int idx) ··· 82 81 (IS_ENABLED(CONFIG_FSL_EMB_PERF_EVENT) || 83 82 IS_ENABLED(CONFIG_PPC32) || 84 83 !is_sier_available())) 84 + return 0; 85 + 86 + if (idx == PERF_REG_POWERPC_MMCRA && 87 + (IS_ENABLED(CONFIG_FSL_EMB_PERF_EVENT) || 88 + IS_ENABLED(CONFIG_PPC32))) 85 89 return 0; 86 90 87 91 return regs_get_register(regs, pt_regs_offset[idx]);
+3 -3
arch/powerpc/platforms/4xx/ocm.c
··· 237 237 continue; 238 238 239 239 seq_printf(m, "PPC4XX OCM : %d\n", ocm->index); 240 - seq_printf(m, "PhysAddr : %pa[p]\n", &(ocm->phys)); 240 + seq_printf(m, "PhysAddr : %pa\n", &(ocm->phys)); 241 241 seq_printf(m, "MemTotal : %d Bytes\n", ocm->memtotal); 242 242 seq_printf(m, "MemTotal(NC) : %d Bytes\n", ocm->nc.memtotal); 243 243 seq_printf(m, "MemTotal(C) : %d Bytes\n\n", ocm->c.memtotal); 244 244 245 - seq_printf(m, "NC.PhysAddr : %pa[p]\n", &(ocm->nc.phys)); 245 + seq_printf(m, "NC.PhysAddr : %pa\n", &(ocm->nc.phys)); 246 246 seq_printf(m, "NC.VirtAddr : 0x%p\n", ocm->nc.virt); 247 247 seq_printf(m, "NC.MemTotal : %d Bytes\n", ocm->nc.memtotal); 248 248 seq_printf(m, "NC.MemFree : %d Bytes\n", ocm->nc.memfree); ··· 252 252 blk->size, blk->owner); 253 253 } 254 254 255 - seq_printf(m, "\nC.PhysAddr : %pa[p]\n", &(ocm->c.phys)); 255 + seq_printf(m, "\nC.PhysAddr : %pa\n", &(ocm->c.phys)); 256 256 seq_printf(m, "C.VirtAddr : 0x%p\n", ocm->c.virt); 257 257 seq_printf(m, "C.MemTotal : %d Bytes\n", ocm->c.memtotal); 258 258 seq_printf(m, "C.MemFree : %d Bytes\n", ocm->c.memfree);
+1 -2
arch/powerpc/platforms/chrp/setup.c
··· 538 538 /* see if there is a keyboard in the device tree 539 539 with a parent of type "adb" */ 540 540 for_each_node_by_name(kbd, "keyboard") 541 - if (kbd->parent && kbd->parent->type 542 - && strcmp(kbd->parent->type, "adb") == 0) 541 + if (of_node_is_type(kbd->parent, "adb")) 543 542 break; 544 543 of_node_put(kbd); 545 544 if (kbd)
+1 -1
arch/powerpc/platforms/pasemi/dma_lib.c
··· 255 255 256 256 chan->ring_size = ring_size; 257 257 258 - chan->ring_virt = dma_zalloc_coherent(&dma_pdev->dev, 258 + chan->ring_virt = dma_alloc_coherent(&dma_pdev->dev, 259 259 ring_size * sizeof(u64), 260 260 &chan->ring_dma, GFP_KERNEL); 261 261
+1 -1
arch/powerpc/platforms/powernv/npu-dma.c
··· 564 564 } 565 565 } else { 566 566 /* Create a group for 1 GPU and attached NPUs for POWER8 */ 567 - pe->npucomp = kzalloc(sizeof(pe->npucomp), GFP_KERNEL); 567 + pe->npucomp = kzalloc(sizeof(*pe->npucomp), GFP_KERNEL); 568 568 table_group = &pe->npucomp->table_group; 569 569 table_group->ops = &pnv_npu_peers_ops; 570 570 iommu_register_group(table_group, hose->global_number,
+2 -1
arch/powerpc/platforms/powernv/pci-ioda.c
··· 2681 2681 list_for_each_entry(hose, &hose_list, list_node) { 2682 2682 phb = hose->private_data; 2683 2683 2684 - if (phb->type == PNV_PHB_NPU_NVLINK) 2684 + if (phb->type == PNV_PHB_NPU_NVLINK || 2685 + phb->type == PNV_PHB_NPU_OCAPI) 2685 2686 continue; 2686 2687 2687 2688 list_for_each_entry(pe, &phb->ioda.pe_list, list) {
+2
arch/powerpc/platforms/pseries/pci.c
··· 264 264 if (!of_device_is_compatible(nvdn->parent, 265 265 "ibm,power9-npu")) 266 266 continue; 267 + #ifdef CONFIG_PPC_POWERNV 267 268 WARN_ON_ONCE(pnv_npu2_init(hose)); 269 + #endif 268 270 break; 269 271 } 270 272 }
+4 -3
arch/powerpc/sysdev/fsl_rmu.c
··· 756 756 } 757 757 758 758 /* Initialize outbound message descriptor ring */ 759 - rmu->msg_tx_ring.virt = dma_zalloc_coherent(priv->dev, 760 - rmu->msg_tx_ring.size * RIO_MSG_DESC_SIZE, 761 - &rmu->msg_tx_ring.phys, GFP_KERNEL); 759 + rmu->msg_tx_ring.virt = dma_alloc_coherent(priv->dev, 760 + rmu->msg_tx_ring.size * RIO_MSG_DESC_SIZE, 761 + &rmu->msg_tx_ring.phys, 762 + GFP_KERNEL); 762 763 if (!rmu->msg_tx_ring.virt) { 763 764 rc = -ENOMEM; 764 765 goto out_dma;
+3 -1
arch/riscv/Kconfig
··· 28 28 select GENERIC_STRNLEN_USER 29 29 select GENERIC_SMP_IDLE_THREAD 30 30 select GENERIC_ATOMIC64 if !64BIT || !RISCV_ISA_A 31 + select HAVE_ARCH_AUDITSYSCALL 31 32 select HAVE_MEMBLOCK_NODE_MAP 32 33 select HAVE_DMA_CONTIGUOUS 33 34 select HAVE_FUTEX_CMPXCHG if FUTEX 34 35 select HAVE_GENERIC_DMA_COHERENT 35 36 select HAVE_PERF_EVENTS 37 + select HAVE_SYSCALL_TRACEPOINTS 36 38 select IRQ_DOMAIN 37 39 select RISCV_ISA_A if SMP 38 40 select SPARSE_IRQ ··· 42 40 select HAVE_ARCH_TRACEHOOK 43 41 select HAVE_PCI 44 42 select MODULES_USE_ELF_RELA if MODULES 43 + select MODULE_SECTIONS if MODULES 45 44 select THREAD_INFO_IN_TASK 46 45 select PCI_DOMAINS_GENERIC if PCI 47 46 select PCI_MSI if PCI ··· 155 152 bool "2GiB" 156 153 config MAXPHYSMEM_128GB 157 154 depends on 64BIT && CMODEL_MEDANY 158 - select MODULE_SECTIONS if MODULES 159 155 bool "128GiB" 160 156 endchoice 161 157
+15 -13
arch/riscv/include/asm/module.h
··· 9 9 #define MODULE_ARCH_VERMAGIC "riscv" 10 10 11 11 struct module; 12 - u64 module_emit_got_entry(struct module *mod, u64 val); 13 - u64 module_emit_plt_entry(struct module *mod, u64 val); 12 + unsigned long module_emit_got_entry(struct module *mod, unsigned long val); 13 + unsigned long module_emit_plt_entry(struct module *mod, unsigned long val); 14 14 15 15 #ifdef CONFIG_MODULE_SECTIONS 16 16 struct mod_section { 17 - struct elf64_shdr *shdr; 17 + Elf_Shdr *shdr; 18 18 int num_entries; 19 19 int max_entries; 20 20 }; ··· 26 26 }; 27 27 28 28 struct got_entry { 29 - u64 symbol_addr; /* the real variable address */ 29 + unsigned long symbol_addr; /* the real variable address */ 30 30 }; 31 31 32 - static inline struct got_entry emit_got_entry(u64 val) 32 + static inline struct got_entry emit_got_entry(unsigned long val) 33 33 { 34 34 return (struct got_entry) {val}; 35 35 } 36 36 37 - static inline struct got_entry *get_got_entry(u64 val, 37 + static inline struct got_entry *get_got_entry(unsigned long val, 38 38 const struct mod_section *sec) 39 39 { 40 - struct got_entry *got = (struct got_entry *)sec->shdr->sh_addr; 40 + struct got_entry *got = (struct got_entry *)(sec->shdr->sh_addr); 41 41 int i; 42 42 for (i = 0; i < sec->num_entries; i++) { 43 43 if (got[i].symbol_addr == val) ··· 62 62 #define REG_T0 0x5 63 63 #define REG_T1 0x6 64 64 65 - static inline struct plt_entry emit_plt_entry(u64 val, u64 plt, u64 got_plt) 65 + static inline struct plt_entry emit_plt_entry(unsigned long val, 66 + unsigned long plt, 67 + unsigned long got_plt) 66 68 { 67 69 /* 68 70 * U-Type encoding: ··· 78 76 * +------------+------------+--------+----------+----------+ 79 77 * 80 78 */ 81 - u64 offset = got_plt - plt; 79 + unsigned long offset = got_plt - plt; 82 80 u32 hi20 = (offset + 0x800) & 0xfffff000; 83 81 u32 lo12 = (offset - hi20); 84 82 return (struct plt_entry) { ··· 88 86 }; 89 87 } 90 88 91 - static inline int get_got_plt_idx(u64 val, const struct mod_section *sec) 89 + static inline int get_got_plt_idx(unsigned long val, const struct mod_section *sec) 92 90 { 93 91 struct got_entry *got_plt = (struct got_entry *)sec->shdr->sh_addr; 94 92 int i; ··· 99 97 return -1; 100 98 } 101 99 102 - static inline struct plt_entry *get_plt_entry(u64 val, 103 - const struct mod_section *sec_plt, 104 - const struct mod_section *sec_got_plt) 100 + static inline struct plt_entry *get_plt_entry(unsigned long val, 101 + const struct mod_section *sec_plt, 102 + const struct mod_section *sec_got_plt) 105 103 { 106 104 struct plt_entry *plt = (struct plt_entry *)sec_plt->shdr->sh_addr; 107 105 int got_plt_idx = get_got_plt_idx(val, sec_got_plt);
+5
arch/riscv/include/asm/ptrace.h
··· 113 113 SET_FP(regs, val); 114 114 } 115 115 116 + static inline unsigned long regs_return_value(struct pt_regs *regs) 117 + { 118 + return regs->a0; 119 + } 120 + 116 121 #endif /* __ASSEMBLY__ */ 117 122 118 123 #endif /* _ASM_RISCV_PTRACE_H */
+10
arch/riscv/include/asm/syscall.h
··· 18 18 #ifndef _ASM_RISCV_SYSCALL_H 19 19 #define _ASM_RISCV_SYSCALL_H 20 20 21 + #include <uapi/linux/audit.h> 21 22 #include <linux/sched.h> 22 23 #include <linux/err.h> 23 24 ··· 98 97 n--; 99 98 } 100 99 memcpy(&regs->a1 + i * sizeof(regs->a1), args, n * sizeof(regs->a0)); 100 + } 101 + 102 + static inline int syscall_get_arch(void) 103 + { 104 + #ifdef CONFIG_64BIT 105 + return AUDIT_ARCH_RISCV64; 106 + #else 107 + return AUDIT_ARCH_RISCV32; 108 + #endif 101 109 } 102 110 103 111 #endif /* _ASM_RISCV_SYSCALL_H */
+6
arch/riscv/include/asm/thread_info.h
··· 80 80 #define TIF_RESTORE_SIGMASK 4 /* restore signal mask in do_signal() */ 81 81 #define TIF_MEMDIE 5 /* is terminating due to OOM killer */ 82 82 #define TIF_SYSCALL_TRACEPOINT 6 /* syscall tracepoint instrumentation */ 83 + #define TIF_SYSCALL_AUDIT 7 /* syscall auditing */ 83 84 84 85 #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) 85 86 #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) 86 87 #define _TIF_SIGPENDING (1 << TIF_SIGPENDING) 87 88 #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) 89 + #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT) 90 + #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) 88 91 89 92 #define _TIF_WORK_MASK \ 90 93 (_TIF_NOTIFY_RESUME | _TIF_SIGPENDING | _TIF_NEED_RESCHED) 94 + 95 + #define _TIF_SYSCALL_WORK \ 96 + (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_TRACEPOINT | _TIF_SYSCALL_AUDIT) 91 97 92 98 #endif /* _ASM_RISCV_THREAD_INFO_H */
+2
arch/riscv/include/asm/unistd.h
··· 19 19 #define __ARCH_WANT_SYS_CLONE 20 20 21 21 #include <uapi/asm/unistd.h> 22 + 23 + #define NR_syscalls (__NR_syscalls)
+2 -2
arch/riscv/kernel/entry.S
··· 201 201 REG_S s2, PT_SEPC(sp) 202 202 /* Trace syscalls, but only if requested by the user. */ 203 203 REG_L t0, TASK_TI_FLAGS(tp) 204 - andi t0, t0, _TIF_SYSCALL_TRACE 204 + andi t0, t0, _TIF_SYSCALL_WORK 205 205 bnez t0, handle_syscall_trace_enter 206 206 check_syscall_nr: 207 207 /* Check to make sure we don't jump to a bogus syscall number. */ ··· 221 221 REG_S a0, PT_A0(sp) 222 222 /* Trace syscalls, but only if requested by the user. */ 223 223 REG_L t0, TASK_TI_FLAGS(tp) 224 - andi t0, t0, _TIF_SYSCALL_TRACE 224 + andi t0, t0, _TIF_SYSCALL_WORK 225 225 bnez t0, handle_syscall_trace_exit 226 226 227 227 ret_from_exception:
+16 -14
arch/riscv/kernel/module-sections.c
··· 9 9 #include <linux/kernel.h> 10 10 #include <linux/module.h> 11 11 12 - u64 module_emit_got_entry(struct module *mod, u64 val) 12 + unsigned long module_emit_got_entry(struct module *mod, unsigned long val) 13 13 { 14 14 struct mod_section *got_sec = &mod->arch.got; 15 15 int i = got_sec->num_entries; 16 16 struct got_entry *got = get_got_entry(val, got_sec); 17 17 18 18 if (got) 19 - return (u64)got; 19 + return (unsigned long)got; 20 20 21 21 /* There is no duplicate entry, create a new one */ 22 22 got = (struct got_entry *)got_sec->shdr->sh_addr; ··· 25 25 got_sec->num_entries++; 26 26 BUG_ON(got_sec->num_entries > got_sec->max_entries); 27 27 28 - return (u64)&got[i]; 28 + return (unsigned long)&got[i]; 29 29 } 30 30 31 - u64 module_emit_plt_entry(struct module *mod, u64 val) 31 + unsigned long module_emit_plt_entry(struct module *mod, unsigned long val) 32 32 { 33 33 struct mod_section *got_plt_sec = &mod->arch.got_plt; 34 34 struct got_entry *got_plt; ··· 37 37 int i = plt_sec->num_entries; 38 38 39 39 if (plt) 40 - return (u64)plt; 40 + return (unsigned long)plt; 41 41 42 42 /* There is no duplicate entry, create a new one */ 43 43 got_plt = (struct got_entry *)got_plt_sec->shdr->sh_addr; 44 44 got_plt[i] = emit_got_entry(val); 45 45 plt = (struct plt_entry *)plt_sec->shdr->sh_addr; 46 - plt[i] = emit_plt_entry(val, (u64)&plt[i], (u64)&got_plt[i]); 46 + plt[i] = emit_plt_entry(val, 47 + (unsigned long)&plt[i], 48 + (unsigned long)&got_plt[i]); 47 49 48 50 plt_sec->num_entries++; 49 51 got_plt_sec->num_entries++; 50 52 BUG_ON(plt_sec->num_entries > plt_sec->max_entries); 51 53 52 - return (u64)&plt[i]; 54 + return (unsigned long)&plt[i]; 53 55 } 54 56 55 - static int is_rela_equal(const Elf64_Rela *x, const Elf64_Rela *y) 57 + static int is_rela_equal(const Elf_Rela *x, const Elf_Rela *y) 56 58 { 57 59 return x->r_info == y->r_info && x->r_addend == y->r_addend; 58 60 } 59 61 60 - static bool duplicate_rela(const Elf64_Rela *rela, int idx) 62 + static bool duplicate_rela(const Elf_Rela *rela, int idx) 61 63 { 62 64 int i; 63 65 for (i = 0; i < idx; i++) { ··· 69 67 return false; 70 68 } 71 69 72 - static void count_max_entries(Elf64_Rela *relas, int num, 70 + static void count_max_entries(Elf_Rela *relas, int num, 73 71 unsigned int *plts, unsigned int *gots) 74 72 { 75 73 unsigned int type, i; 76 74 77 75 for (i = 0; i < num; i++) { 78 - type = ELF64_R_TYPE(relas[i].r_info); 76 + type = ELF_RISCV_R_TYPE(relas[i].r_info); 79 77 if (type == R_RISCV_CALL_PLT) { 80 78 if (!duplicate_rela(relas, i)) 81 79 (*plts)++; ··· 120 118 121 119 /* Calculate the maxinum number of entries */ 122 120 for (i = 0; i < ehdr->e_shnum; i++) { 123 - Elf64_Rela *relas = (void *)ehdr + sechdrs[i].sh_offset; 124 - int num_rela = sechdrs[i].sh_size / sizeof(Elf64_Rela); 125 - Elf64_Shdr *dst_sec = sechdrs + sechdrs[i].sh_info; 121 + Elf_Rela *relas = (void *)ehdr + sechdrs[i].sh_offset; 122 + int num_rela = sechdrs[i].sh_size / sizeof(Elf_Rela); 123 + Elf_Shdr *dst_sec = sechdrs + sechdrs[i].sh_info; 126 124 127 125 if (sechdrs[i].sh_type != SHT_RELA) 128 126 continue;
+8 -1
arch/riscv/kernel/ptrace.c
··· 18 18 #include <asm/ptrace.h> 19 19 #include <asm/syscall.h> 20 20 #include <asm/thread_info.h> 21 + #include <linux/audit.h> 21 22 #include <linux/ptrace.h> 22 23 #include <linux/elf.h> 23 24 #include <linux/regset.h> 24 25 #include <linux/sched.h> 25 26 #include <linux/sched/task_stack.h> 26 27 #include <linux/tracehook.h> 28 + 29 + #define CREATE_TRACE_POINTS 27 30 #include <trace/events/syscalls.h> 28 31 29 32 enum riscv_regset { ··· 166 163 if (test_thread_flag(TIF_SYSCALL_TRACEPOINT)) 167 164 trace_sys_enter(regs, syscall_get_nr(current, regs)); 168 165 #endif 166 + 167 + audit_syscall_entry(regs->a7, regs->a0, regs->a1, regs->a2, regs->a3); 169 168 } 170 169 171 170 void do_syscall_trace_exit(struct pt_regs *regs) 172 171 { 172 + audit_syscall_exit(regs); 173 + 173 174 if (test_thread_flag(TIF_SYSCALL_TRACE)) 174 175 tracehook_report_syscall_exit(regs, 0); 175 176 176 177 #ifdef CONFIG_HAVE_SYSCALL_TRACEPOINTS 177 178 if (test_thread_flag(TIF_SYSCALL_TRACEPOINT)) 178 - trace_sys_exit(regs, regs->regs[0]); 179 + trace_sys_exit(regs, regs_return_value(regs)); 179 180 #endif 180 181 }
+8 -1
arch/riscv/kernel/setup.c
··· 149 149 150 150 void __init parse_dtb(unsigned int hartid, void *dtb) 151 151 { 152 - early_init_dt_scan(__va(dtb)); 152 + if (!early_init_dt_scan(__va(dtb))) 153 + return; 154 + 155 + pr_err("No DTB passed to the kernel\n"); 156 + #ifdef CONFIG_CMDLINE_FORCE 157 + strlcpy(boot_command_line, CONFIG_CMDLINE, COMMAND_LINE_SIZE); 158 + pr_info("Forcing kernel command line to: %s\n", boot_command_line); 159 + #endif 153 160 } 154 161 155 162 static void __init setup_bootmem(void)
+36 -7
arch/riscv/kernel/smp.c
··· 23 23 #include <linux/smp.h> 24 24 #include <linux/sched.h> 25 25 #include <linux/seq_file.h> 26 + #include <linux/delay.h> 26 27 27 28 #include <asm/sbi.h> 28 29 #include <asm/tlbflush.h> ··· 32 31 enum ipi_message_type { 33 32 IPI_RESCHEDULE, 34 33 IPI_CALL_FUNC, 34 + IPI_CPU_STOP, 35 35 IPI_MAX 36 36 }; 37 37 ··· 68 66 return -EINVAL; 69 67 } 70 68 69 + static void ipi_stop(void) 70 + { 71 + set_cpu_online(smp_processor_id(), false); 72 + while (1) 73 + wait_for_interrupt(); 74 + } 75 + 71 76 void riscv_software_interrupt(void) 72 77 { 73 78 unsigned long *pending_ipis = &ipi_data[smp_processor_id()].bits; ··· 101 92 if (ops & (1 << IPI_CALL_FUNC)) { 102 93 stats[IPI_CALL_FUNC]++; 103 94 generic_smp_call_function_interrupt(); 95 + } 96 + 97 + if (ops & (1 << IPI_CPU_STOP)) { 98 + stats[IPI_CPU_STOP]++; 99 + ipi_stop(); 104 100 } 105 101 106 102 BUG_ON((ops >> IPI_MAX) != 0); ··· 135 121 static const char * const ipi_names[] = { 136 122 [IPI_RESCHEDULE] = "Rescheduling interrupts", 137 123 [IPI_CALL_FUNC] = "Function call interrupts", 124 + [IPI_CPU_STOP] = "CPU stop interrupts", 138 125 }; 139 126 140 127 void show_ipi_stats(struct seq_file *p, int prec) ··· 161 146 send_ipi_message(cpumask_of(cpu), IPI_CALL_FUNC); 162 147 } 163 148 164 - static void ipi_stop(void *unused) 165 - { 166 - while (1) 167 - wait_for_interrupt(); 168 - } 169 - 170 149 void smp_send_stop(void) 171 150 { 172 - on_each_cpu(ipi_stop, NULL, 1); 151 + unsigned long timeout; 152 + 153 + if (num_online_cpus() > 1) { 154 + cpumask_t mask; 155 + 156 + cpumask_copy(&mask, cpu_online_mask); 157 + cpumask_clear_cpu(smp_processor_id(), &mask); 158 + 159 + if (system_state <= SYSTEM_RUNNING) 160 + pr_crit("SMP: stopping secondary CPUs\n"); 161 + send_ipi_message(&mask, IPI_CPU_STOP); 162 + } 163 + 164 + /* Wait up to one second for other CPUs to stop */ 165 + timeout = USEC_PER_SEC; 166 + while (num_online_cpus() > 1 && timeout--) 167 + udelay(1); 168 + 169 + if (num_online_cpus() > 1) 170 + pr_warn("SMP: failed to stop secondary CPUs %*pbl\n", 171 + cpumask_pr_args(cpu_online_mask)); 173 172 } 174 173 175 174 void smp_send_reschedule(int cpu)
+6 -2
arch/riscv/kernel/vmlinux.lds.S
··· 18 18 #include <asm/cache.h> 19 19 #include <asm/thread_info.h> 20 20 21 + #define MAX_BYTES_PER_LONG 0x10 22 + 21 23 OUTPUT_ARCH(riscv) 22 24 ENTRY(_start) 23 25 ··· 76 74 *(.sbss*) 77 75 } 78 76 79 - BSS_SECTION(PAGE_SIZE, PAGE_SIZE, 0) 80 - 81 77 EXCEPTION_TABLE(0x10) 82 78 NOTES 83 79 84 80 .rel.dyn : { 85 81 *(.rel.dyn*) 86 82 } 83 + 84 + BSS_SECTION(MAX_BYTES_PER_LONG, 85 + MAX_BYTES_PER_LONG, 86 + MAX_BYTES_PER_LONG) 87 87 88 88 _end = .; 89 89
+1
arch/unicore32/include/uapi/asm/Kbuild
··· 1 1 include include/uapi/asm-generic/Kbuild.asm 2 2 3 3 generic-y += kvm_para.h 4 + generic-y += shmparam.h 4 5 generic-y += ucontext.h
+2 -2
arch/x86/Kconfig
··· 446 446 branches. Requires a compiler with -mindirect-branch=thunk-extern 447 447 support for full protection. The kernel may run slower. 448 448 449 - config RESCTRL 449 + config X86_RESCTRL 450 450 bool "Resource Control support" 451 451 depends on X86 && (CPU_SUP_INTEL || CPU_SUP_AMD) 452 452 select KERNFS ··· 617 617 618 618 config X86_INTEL_LPSS 619 619 bool "Intel Low Power Subsystem Support" 620 - depends on X86 && ACPI 620 + depends on X86 && ACPI && PCI 621 621 select COMMON_CLK 622 622 select PINCTRL 623 623 select IOSF_MBI
+2 -2
arch/x86/include/asm/resctrl_sched.h
··· 2 2 #ifndef _ASM_X86_RESCTRL_SCHED_H 3 3 #define _ASM_X86_RESCTRL_SCHED_H 4 4 5 - #ifdef CONFIG_RESCTRL 5 + #ifdef CONFIG_X86_RESCTRL 6 6 7 7 #include <linux/sched.h> 8 8 #include <linux/jump_label.h> ··· 88 88 89 89 static inline void resctrl_sched_in(void) {} 90 90 91 - #endif /* CONFIG_RESCTRL */ 91 + #endif /* CONFIG_X86_RESCTRL */ 92 92 93 93 #endif /* _ASM_X86_RESCTRL_SCHED_H */
+1 -1
arch/x86/include/asm/uaccess.h
··· 711 711 { 712 712 if (unlikely(!access_ok(ptr,len))) 713 713 return 0; 714 - __uaccess_begin(); 714 + __uaccess_begin_nospec(); 715 715 return 1; 716 716 } 717 717 #define user_access_begin(a,b) user_access_begin(a,b)
+1 -1
arch/x86/kernel/cpu/Makefile
··· 39 39 obj-$(CONFIG_X86_MCE) += mce/ 40 40 obj-$(CONFIG_MTRR) += mtrr/ 41 41 obj-$(CONFIG_MICROCODE) += microcode/ 42 - obj-$(CONFIG_RESCTRL) += resctrl/ 42 + obj-$(CONFIG_X86_RESCTRL) += resctrl/ 43 43 44 44 obj-$(CONFIG_X86_LOCAL_APIC) += perfctr-watchdog.o 45 45
+1 -1
arch/x86/kernel/cpu/bugs.c
··· 215 215 static enum spectre_v2_user_mitigation spectre_v2_user __ro_after_init = 216 216 SPECTRE_V2_USER_NONE; 217 217 218 - #ifdef RETPOLINE 218 + #ifdef CONFIG_RETPOLINE 219 219 static bool spectre_v2_bad_module; 220 220 221 221 bool retpoline_module_ok(bool has_retpoline)
+2 -2
arch/x86/kernel/cpu/resctrl/Makefile
··· 1 1 # SPDX-License-Identifier: GPL-2.0 2 - obj-$(CONFIG_RESCTRL) += core.o rdtgroup.o monitor.o 3 - obj-$(CONFIG_RESCTRL) += ctrlmondata.o pseudo_lock.o 2 + obj-$(CONFIG_X86_RESCTRL) += core.o rdtgroup.o monitor.o 3 + obj-$(CONFIG_X86_RESCTRL) += ctrlmondata.o pseudo_lock.o 4 4 CFLAGS_pseudo_lock.o = -I$(src)
+3
arch/x86/kvm/svm.c
··· 6278 6278 int asid, ret; 6279 6279 6280 6280 ret = -EBUSY; 6281 + if (unlikely(sev->active)) 6282 + return ret; 6283 + 6281 6284 asid = sev_asid_new(); 6282 6285 if (asid < 0) 6283 6286 return ret;
+1 -2
arch/x86/kvm/vmx/nested.c
··· 4540 4540 * given physical address won't match the required 4541 4541 * VMCS12_REVISION identifier. 4542 4542 */ 4543 - nested_vmx_failValid(vcpu, 4543 + return nested_vmx_failValid(vcpu, 4544 4544 VMXERR_VMPTRLD_INCORRECT_VMCS_REVISION_ID); 4545 - return kvm_skip_emulated_instruction(vcpu); 4546 4545 } 4547 4546 new_vmcs12 = kmap(page); 4548 4547 if (new_vmcs12->hdr.revision_id != VMCS12_REVISION ||
+2 -2
arch/x86/kvm/vmx/vmx.c
··· 453 453 struct kvm_tlb_range *range) 454 454 { 455 455 struct kvm_vcpu *vcpu; 456 - int ret = -ENOTSUPP, i; 456 + int ret = 0, i; 457 457 458 458 spin_lock(&to_kvm_vmx(kvm)->ept_pointer_lock); 459 459 ··· 7044 7044 7045 7045 /* unmask address range configure area */ 7046 7046 for (i = 0; i < vmx->pt_desc.addr_range; i++) 7047 - vmx->pt_desc.ctl_bitmask &= ~(0xf << (32 + i * 4)); 7047 + vmx->pt_desc.ctl_bitmask &= ~(0xfULL << (32 + i * 4)); 7048 7048 } 7049 7049 7050 7050 static void vmx_cpuid_update(struct kvm_vcpu *vcpu)
+1 -4
arch/x86/xen/enlighten_pv.c
··· 898 898 val = native_read_msr_safe(msr, err); 899 899 switch (msr) { 900 900 case MSR_IA32_APICBASE: 901 - #ifdef CONFIG_X86_X2APIC 902 - if (!(cpuid_ecx(1) & (1 << (X86_FEATURE_X2APIC & 31)))) 903 - #endif 904 - val &= ~X2APIC_ENABLE; 901 + val &= ~X2APIC_ENABLE; 905 902 break; 906 903 } 907 904 return val;
+9 -3
arch/x86/xen/time.c
··· 361 361 { 362 362 int cpu; 363 363 364 - pvclock_resume(); 365 - 366 364 if (xen_clockevent != &xen_vcpuop_clockevent) 367 365 return; 368 366 ··· 377 379 }; 378 380 379 381 static struct pvclock_vsyscall_time_info *xen_clock __read_mostly; 382 + static u64 xen_clock_value_saved; 380 383 381 384 void xen_save_time_memory_area(void) 382 385 { 383 386 struct vcpu_register_time_memory_area t; 384 387 int ret; 388 + 389 + xen_clock_value_saved = xen_clocksource_read() - xen_sched_clock_offset; 385 390 386 391 if (!xen_clock) 387 392 return; ··· 405 404 int ret; 406 405 407 406 if (!xen_clock) 408 - return; 407 + goto out; 409 408 410 409 t.addr.v = &xen_clock->pvti; 411 410 ··· 422 421 if (ret != 0) 423 422 pr_notice("Cannot restore secondary vcpu_time_info (err %d)", 424 423 ret); 424 + 425 + out: 426 + /* Need pvclock_resume() before using xen_clocksource_read(). */ 427 + pvclock_resume(); 428 + xen_sched_clock_offset = xen_clocksource_read() - xen_clock_value_saved; 425 429 } 426 430 427 431 static void xen_setup_vsyscall_time_info(void)
+5 -6
block/bfq-wf2q.c
··· 1154 1154 } 1155 1155 1156 1156 /** 1157 - * __bfq_deactivate_entity - deactivate an entity from its service tree. 1158 - * @entity: the entity to deactivate. 1157 + * __bfq_deactivate_entity - update sched_data and service trees for 1158 + * entity, so as to represent entity as inactive 1159 + * @entity: the entity being deactivated. 1159 1160 * @ins_into_idle_tree: if false, the entity will not be put into the 1160 1161 * idle tree. 1161 1162 * 1162 - * Deactivates an entity, independently of its previous state. Must 1163 - * be invoked only if entity is on a service tree. Extracts the entity 1164 - * from that tree, and if necessary and allowed, puts it into the idle 1165 - * tree. 1163 + * If necessary and allowed, puts entity into the idle tree. NOTE: 1164 + * entity may be on no tree if in service. 1166 1165 */ 1167 1166 bool __bfq_deactivate_entity(struct bfq_entity *entity, bool ins_into_idle_tree) 1168 1167 {
+19 -1
block/blk-core.c
··· 661 661 * blk_attempt_plug_merge - try to merge with %current's plugged list 662 662 * @q: request_queue new bio is being queued at 663 663 * @bio: new bio being queued 664 - * @request_count: out parameter for number of traversed plugged requests 665 664 * @same_queue_rq: pointer to &struct request that gets filled in when 666 665 * another request associated with @q is found on the plug list 667 666 * (optional, may be %NULL) ··· 1682 1683 * @plug: The &struct blk_plug that needs to be initialized 1683 1684 * 1684 1685 * Description: 1686 + * blk_start_plug() indicates to the block layer an intent by the caller 1687 + * to submit multiple I/O requests in a batch. The block layer may use 1688 + * this hint to defer submitting I/Os from the caller until blk_finish_plug() 1689 + * is called. However, the block layer may choose to submit requests 1690 + * before a call to blk_finish_plug() if the number of queued I/Os 1691 + * exceeds %BLK_MAX_REQUEST_COUNT, or if the size of the I/O is larger than 1692 + * %BLK_PLUG_FLUSH_SIZE. The queued I/Os may also be submitted early if 1693 + * the task schedules (see below). 1694 + * 1685 1695 * Tracking blk_plug inside the task_struct will help with auto-flushing the 1686 1696 * pending I/O should the task end up blocking between blk_start_plug() and 1687 1697 * blk_finish_plug(). This is important from a performance perspective, but ··· 1773 1765 blk_mq_flush_plug_list(plug, from_schedule); 1774 1766 } 1775 1767 1768 + /** 1769 + * blk_finish_plug - mark the end of a batch of submitted I/O 1770 + * @plug: The &struct blk_plug passed to blk_start_plug() 1771 + * 1772 + * Description: 1773 + * Indicate that a batch of I/O submissions is complete. This function 1774 + * must be paired with an initial call to blk_start_plug(). The intent 1775 + * is to allow the block layer to optimize I/O submission. See the 1776 + * documentation for blk_start_plug() for more information. 1777 + */ 1776 1778 void blk_finish_plug(struct blk_plug *plug) 1777 1779 { 1778 1780 if (plug != current->plug)
-2
block/blk-mq-debugfs-zoned.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0 2 2 /* 3 3 * Copyright (C) 2017 Western Digital Corporation or its affiliates. 4 - * 5 - * This file is released under the GPL. 6 4 */ 7 5 8 6 #include <linux/blkdev.h>
+2 -1
block/blk-mq.c
··· 1906 1906 { 1907 1907 const int is_sync = op_is_sync(bio->bi_opf); 1908 1908 const int is_flush_fua = op_is_flush(bio->bi_opf); 1909 - struct blk_mq_alloc_data data = { .flags = 0, .cmd_flags = bio->bi_opf }; 1909 + struct blk_mq_alloc_data data = { .flags = 0}; 1910 1910 struct request *rq; 1911 1911 struct blk_plug *plug; 1912 1912 struct request *same_queue_rq = NULL; ··· 1928 1928 1929 1929 rq_qos_throttle(q, bio); 1930 1930 1931 + data.cmd_flags = bio->bi_opf; 1931 1932 rq = blk_mq_get_request(q, bio, &data); 1932 1933 if (unlikely(!rq)) { 1933 1934 rq_qos_cleanup(q, bio);
+4
crypto/adiantum.c
··· 539 539 ictx = skcipher_instance_ctx(inst); 540 540 541 541 /* Stream cipher, e.g. "xchacha12" */ 542 + crypto_set_skcipher_spawn(&ictx->streamcipher_spawn, 543 + skcipher_crypto_instance(inst)); 542 544 err = crypto_grab_skcipher(&ictx->streamcipher_spawn, streamcipher_name, 543 545 0, crypto_requires_sync(algt->type, 544 546 algt->mask)); ··· 549 547 streamcipher_alg = crypto_spawn_skcipher_alg(&ictx->streamcipher_spawn); 550 548 551 549 /* Block cipher, e.g. "aes" */ 550 + crypto_set_spawn(&ictx->blockcipher_spawn, 551 + skcipher_crypto_instance(inst)); 552 552 err = crypto_grab_spawn(&ictx->blockcipher_spawn, blockcipher_name, 553 553 CRYPTO_ALG_TYPE_CIPHER, CRYPTO_ALG_TYPE_MASK); 554 554 if (err)
+11 -3
crypto/authenc.c
··· 58 58 return -EINVAL; 59 59 if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM) 60 60 return -EINVAL; 61 - if (RTA_PAYLOAD(rta) < sizeof(*param)) 61 + 62 + /* 63 + * RTA_OK() didn't align the rtattr's payload when validating that it 64 + * fits in the buffer. Yet, the keys should start on the next 4-byte 65 + * aligned boundary. To avoid confusion, require that the rtattr 66 + * payload be exactly the param struct, which has a 4-byte aligned size. 67 + */ 68 + if (RTA_PAYLOAD(rta) != sizeof(*param)) 62 69 return -EINVAL; 70 + BUILD_BUG_ON(sizeof(*param) % RTA_ALIGNTO); 63 71 64 72 param = RTA_DATA(rta); 65 73 keys->enckeylen = be32_to_cpu(param->enckeylen); 66 74 67 - key += RTA_ALIGN(rta->rta_len); 68 - keylen -= RTA_ALIGN(rta->rta_len); 75 + key += rta->rta_len; 76 + keylen -= rta->rta_len; 69 77 70 78 if (keylen < keys->enckeylen) 71 79 return -EINVAL;
+1 -1
crypto/authencesn.c
··· 279 279 struct aead_request *req = areq->data; 280 280 281 281 err = err ?: crypto_authenc_esn_decrypt_tail(req, 0); 282 - aead_request_complete(req, err); 282 + authenc_esn_request_complete(req, err); 283 283 } 284 284 285 285 static int crypto_authenc_esn_decrypt(struct aead_request *req)
+1 -1
crypto/sm3_generic.c
··· 100 100 101 101 for (i = 0; i <= 63; i++) { 102 102 103 - ss1 = rol32((rol32(a, 12) + e + rol32(t(i), i)), 7); 103 + ss1 = rol32((rol32(a, 12) + e + rol32(t(i), i & 31)), 7); 104 104 105 105 ss2 = ss1 ^ rol32(a, 12); 106 106
+1
drivers/acpi/Kconfig
··· 10 10 bool "ACPI (Advanced Configuration and Power Interface) Support" 11 11 depends on ARCH_SUPPORTS_ACPI 12 12 select PNP 13 + select NLS 13 14 default y if X86 14 15 help 15 16 Advanced Configuration and Power Interface (ACPI) support for
+2 -1
drivers/acpi/Makefile
··· 41 41 acpi-$(CONFIG_ACPI_DOCK) += dock.o 42 42 acpi-$(CONFIG_PCI) += pci_root.o pci_link.o pci_irq.o 43 43 obj-$(CONFIG_ACPI_MCFG) += pci_mcfg.o 44 - acpi-y += acpi_lpss.o acpi_apd.o 44 + acpi-$(CONFIG_PCI) += acpi_lpss.o 45 + acpi-y += acpi_apd.o 45 46 acpi-y += acpi_platform.o 46 47 acpi-y += acpi_pnp.o 47 48 acpi-$(CONFIG_ARM_AMBA) += acpi_amba.o
+3 -2
drivers/acpi/arm64/iort.c
··· 876 876 return (resv == its->its_count) ? resv : -ENODEV; 877 877 } 878 878 #else 879 - static inline const struct iommu_ops *iort_fwspec_iommu_ops(struct device *dev); 879 + static inline const struct iommu_ops *iort_fwspec_iommu_ops(struct device *dev) 880 880 { return NULL; } 881 881 static inline int iort_add_device_replay(const struct iommu_ops *ops, 882 882 struct device *dev) ··· 952 952 { 953 953 struct acpi_iort_node *node; 954 954 struct acpi_iort_root_complex *rc; 955 + struct pci_bus *pbus = to_pci_dev(dev)->bus; 955 956 956 957 node = iort_scan_node(ACPI_IORT_NODE_PCI_ROOT_COMPLEX, 957 - iort_match_node_callback, dev); 958 + iort_match_node_callback, &pbus->dev); 958 959 if (!node || node->revision < 1) 959 960 return -ENODEV; 960 961
+12 -12
drivers/acpi/bus.c
··· 1054 1054 goto error0; 1055 1055 } 1056 1056 1057 - /* 1058 - * ACPI 2.0 requires the EC driver to be loaded and work before 1059 - * the EC device is found in the namespace (i.e. before 1060 - * acpi_load_tables() is called). 1061 - * 1062 - * This is accomplished by looking for the ECDT table, and getting 1063 - * the EC parameters out of that. 1064 - * 1065 - * Ignore the result. Not having an ECDT is not fatal. 1066 - */ 1067 - status = acpi_ec_ecdt_probe(); 1068 - 1069 1057 #ifdef CONFIG_X86 1070 1058 if (!acpi_ioapic) { 1071 1059 /* compatible (0) means level (3) */ ··· 1129 1141 "Unable to load the System Description Tables\n"); 1130 1142 goto error1; 1131 1143 } 1144 + 1145 + /* 1146 + * ACPI 2.0 requires the EC driver to be loaded and work before the EC 1147 + * device is found in the namespace. 1148 + * 1149 + * This is accomplished by looking for the ECDT table and getting the EC 1150 + * parameters out of that. 1151 + * 1152 + * Do that before calling acpi_initialize_objects() which may trigger EC 1153 + * address space accesses. 1154 + */ 1155 + acpi_ec_ecdt_probe(); 1132 1156 1133 1157 status = acpi_enable_subsystem(ACPI_NO_ACPI_ENABLE); 1134 1158 if (ACPI_FAILURE(status)) {
+4
drivers/acpi/internal.h
··· 81 81 #else 82 82 static inline void acpi_debugfs_init(void) { return; } 83 83 #endif 84 + #ifdef CONFIG_PCI 84 85 void acpi_lpss_init(void); 86 + #else 87 + static inline void acpi_lpss_init(void) {} 88 + #endif 85 89 86 90 void acpi_apd_init(void); 87 91
+7 -13
drivers/acpi/nfit/core.c
··· 26 26 #include <acpi/nfit.h> 27 27 #include "intel.h" 28 28 #include "nfit.h" 29 - #include "intel.h" 30 29 31 30 /* 32 31 * For readq() and writeq() on 32-bit builds, the hi-lo, lo-hi order is ··· 76 77 return &nfit_uuid[id]; 77 78 } 78 79 EXPORT_SYMBOL(to_nfit_uuid); 79 - 80 - static struct acpi_nfit_desc *to_acpi_nfit_desc( 81 - struct nvdimm_bus_descriptor *nd_desc) 82 - { 83 - return container_of(nd_desc, struct acpi_nfit_desc, nd_desc); 84 - } 85 80 86 81 static struct acpi_device *to_acpi_dev(struct acpi_nfit_desc *acpi_desc) 87 82 { ··· 412 419 int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm, 413 420 unsigned int cmd, void *buf, unsigned int buf_len, int *cmd_rc) 414 421 { 415 - struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc); 422 + struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); 416 423 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); 417 424 union acpi_object in_obj, in_buf, *out_obj; 418 425 const struct nd_cmd_desc *desc = NULL; ··· 714 721 struct acpi_nfit_memory_map *memdev; 715 722 struct acpi_nfit_desc *acpi_desc; 716 723 struct nfit_mem *nfit_mem; 724 + u16 physical_id; 717 725 718 726 mutex_lock(&acpi_desc_lock); 719 727 list_for_each_entry(acpi_desc, &acpi_descs, list) { ··· 722 728 list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) { 723 729 memdev = __to_nfit_memdev(nfit_mem); 724 730 if (memdev->device_handle == device_handle) { 731 + *flags = memdev->flags; 732 + physical_id = memdev->physical_id; 725 733 mutex_unlock(&acpi_desc->init_mutex); 726 734 mutex_unlock(&acpi_desc_lock); 727 - *flags = memdev->flags; 728 - return memdev->physical_id; 735 + return physical_id; 729 736 } 730 737 } 731 738 mutex_unlock(&acpi_desc->init_mutex); ··· 2226 2231 nd_set = devm_kzalloc(dev, sizeof(*nd_set), GFP_KERNEL); 2227 2232 if (!nd_set) 2228 2233 return -ENOMEM; 2229 - ndr_desc->nd_set = nd_set; 2230 2234 guid_copy(&nd_set->type_guid, (guid_t *) spa->range_guid); 2231 2235 2232 2236 info = devm_kzalloc(dev, sizeof_nfit_set_info(nr), GFP_KERNEL); ··· 3361 3367 3362 3368 static int acpi_nfit_flush_probe(struct nvdimm_bus_descriptor *nd_desc) 3363 3369 { 3364 - struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc); 3370 + struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); 3365 3371 struct device *dev = acpi_desc->dev; 3366 3372 3367 3373 /* Bounce the device lock to flush acpi_nfit_add / acpi_nfit_notify */ ··· 3378 3384 static int __acpi_nfit_clear_to_send(struct nvdimm_bus_descriptor *nd_desc, 3379 3385 struct nvdimm *nvdimm, unsigned int cmd) 3380 3386 { 3381 - struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc); 3387 + struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); 3382 3388 3383 3389 if (nvdimm) 3384 3390 return 0;
+4 -4
drivers/acpi/nfit/intel.c
··· 146 146 147 147 static void nvdimm_invalidate_cache(void); 148 148 149 - static int intel_security_unlock(struct nvdimm *nvdimm, 149 + static int __maybe_unused intel_security_unlock(struct nvdimm *nvdimm, 150 150 const struct nvdimm_key_data *key_data) 151 151 { 152 152 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); ··· 227 227 return 0; 228 228 } 229 229 230 - static int intel_security_erase(struct nvdimm *nvdimm, 230 + static int __maybe_unused intel_security_erase(struct nvdimm *nvdimm, 231 231 const struct nvdimm_key_data *key, 232 232 enum nvdimm_passphrase_type ptype) 233 233 { ··· 276 276 return 0; 277 277 } 278 278 279 - static int intel_security_query_overwrite(struct nvdimm *nvdimm) 279 + static int __maybe_unused intel_security_query_overwrite(struct nvdimm *nvdimm) 280 280 { 281 281 int rc; 282 282 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); ··· 313 313 return 0; 314 314 } 315 315 316 - static int intel_security_overwrite(struct nvdimm *nvdimm, 316 + static int __maybe_unused intel_security_overwrite(struct nvdimm *nvdimm, 317 317 const struct nvdimm_key_data *nkey) 318 318 { 319 319 int rc;
+3 -3
drivers/acpi/numa.c
··· 146 146 { 147 147 struct acpi_srat_mem_affinity *p = 148 148 (struct acpi_srat_mem_affinity *)header; 149 - pr_debug("SRAT Memory (0x%lx length 0x%lx) in proximity domain %d %s%s%s\n", 150 - (unsigned long)p->base_address, 151 - (unsigned long)p->length, 149 + pr_debug("SRAT Memory (0x%llx length 0x%llx) in proximity domain %d %s%s%s\n", 150 + (unsigned long long)p->base_address, 151 + (unsigned long long)p->length, 152 152 p->proximity_domain, 153 153 (p->flags & ACPI_SRAT_MEM_ENABLED) ? 154 154 "enabled" : "disabled",
+33 -8
drivers/acpi/pmic/intel_pmic_xpower.c
··· 20 20 #define GPI1_LDO_ON (3 << 0) 21 21 #define GPI1_LDO_OFF (4 << 0) 22 22 23 - #define AXP288_ADC_TS_PIN_GPADC 0xf2 24 - #define AXP288_ADC_TS_PIN_ON 0xf3 23 + #define AXP288_ADC_TS_CURRENT_ON_OFF_MASK GENMASK(1, 0) 24 + #define AXP288_ADC_TS_CURRENT_OFF (0 << 0) 25 + #define AXP288_ADC_TS_CURRENT_ON_WHEN_CHARGING (1 << 0) 26 + #define AXP288_ADC_TS_CURRENT_ON_ONDEMAND (2 << 0) 27 + #define AXP288_ADC_TS_CURRENT_ON (3 << 0) 25 28 26 29 static struct pmic_table power_table[] = { 27 30 { ··· 215 212 */ 216 213 static int intel_xpower_pmic_get_raw_temp(struct regmap *regmap, int reg) 217 214 { 215 + int ret, adc_ts_pin_ctrl; 218 216 u8 buf[2]; 219 - int ret; 220 217 221 - ret = regmap_write(regmap, AXP288_ADC_TS_PIN_CTRL, 222 - AXP288_ADC_TS_PIN_GPADC); 218 + /* 219 + * The current-source used for the battery temp-sensor (TS) is shared 220 + * with the GPADC. For proper fuel-gauge and charger operation the TS 221 + * current-source needs to be permanently on. But to read the GPADC we 222 + * need to temporary switch the TS current-source to ondemand, so that 223 + * the GPADC can use it, otherwise we will always read an all 0 value. 224 + * 225 + * Note that the switching from on to on-ondemand is not necessary 226 + * when the TS current-source is off (this happens on devices which 227 + * do not use the TS-pin). 228 + */ 229 + ret = regmap_read(regmap, AXP288_ADC_TS_PIN_CTRL, &adc_ts_pin_ctrl); 223 230 if (ret) 224 231 return ret; 225 232 226 - /* After switching to the GPADC pin give things some time to settle */ 227 - usleep_range(6000, 10000); 233 + if (adc_ts_pin_ctrl & AXP288_ADC_TS_CURRENT_ON_OFF_MASK) { 234 + ret = regmap_update_bits(regmap, AXP288_ADC_TS_PIN_CTRL, 235 + AXP288_ADC_TS_CURRENT_ON_OFF_MASK, 236 + AXP288_ADC_TS_CURRENT_ON_ONDEMAND); 237 + if (ret) 238 + return ret; 239 + 240 + /* Wait a bit after switching the current-source */ 241 + usleep_range(6000, 10000); 242 + } 228 243 229 244 ret = regmap_bulk_read(regmap, AXP288_GP_ADC_H, buf, 2); 230 245 if (ret == 0) 231 246 ret = (buf[0] << 4) + ((buf[1] >> 4) & 0x0f); 232 247 233 - regmap_write(regmap, AXP288_ADC_TS_PIN_CTRL, AXP288_ADC_TS_PIN_ON); 248 + if (adc_ts_pin_ctrl & AXP288_ADC_TS_CURRENT_ON_OFF_MASK) { 249 + regmap_update_bits(regmap, AXP288_ADC_TS_PIN_CTRL, 250 + AXP288_ADC_TS_CURRENT_ON_OFF_MASK, 251 + AXP288_ADC_TS_CURRENT_ON); 252 + } 234 253 235 254 return ret; 236 255 }
+22
drivers/acpi/power.c
··· 131 131 } 132 132 } 133 133 134 + static bool acpi_power_resource_is_dup(union acpi_object *package, 135 + unsigned int start, unsigned int i) 136 + { 137 + acpi_handle rhandle, dup; 138 + unsigned int j; 139 + 140 + /* The caller is expected to check the package element types */ 141 + rhandle = package->package.elements[i].reference.handle; 142 + for (j = start; j < i; j++) { 143 + dup = package->package.elements[j].reference.handle; 144 + if (dup == rhandle) 145 + return true; 146 + } 147 + 148 + return false; 149 + } 150 + 134 151 int acpi_extract_power_resources(union acpi_object *package, unsigned int start, 135 152 struct list_head *list) 136 153 { ··· 167 150 err = -ENODEV; 168 151 break; 169 152 } 153 + 154 + /* Some ACPI tables contain duplicate power resource references */ 155 + if (acpi_power_resource_is_dup(package, start, i)) 156 + continue; 157 + 170 158 err = acpi_add_power_resource(rhandle); 171 159 if (err) 172 160 break;
+1 -1
drivers/ata/Kconfig
··· 1091 1091 1092 1092 config PATA_ACPI 1093 1093 tristate "ACPI firmware driver for PATA" 1094 - depends on ATA_ACPI && ATA_BMDMA 1094 + depends on ATA_ACPI && ATA_BMDMA && PCI 1095 1095 help 1096 1096 This option enables an ACPI method driver which drives 1097 1097 motherboard PATA controller interfaces through the ACPI
+2
drivers/ata/ahci.h
··· 254 254 AHCI_HFLAG_IS_MOBILE = (1 << 25), /* mobile chipset, use 255 255 SATA_MOBILE_LPM_POLICY 256 256 as default lpm_policy */ 257 + AHCI_HFLAG_SUSPEND_PHYS = (1 << 26), /* handle PHYs during 258 + suspend/resume */ 257 259 258 260 /* ap->flags bits */ 259 261
+64 -23
drivers/ata/ahci_mvebu.c
··· 28 28 #define AHCI_WINDOW_BASE(win) (0x64 + ((win) << 4)) 29 29 #define AHCI_WINDOW_SIZE(win) (0x68 + ((win) << 4)) 30 30 31 + struct ahci_mvebu_plat_data { 32 + int (*plat_config)(struct ahci_host_priv *hpriv); 33 + unsigned int flags; 34 + }; 35 + 31 36 static void ahci_mvebu_mbus_config(struct ahci_host_priv *hpriv, 32 37 const struct mbus_dram_target_info *dram) 33 38 { ··· 65 60 */ 66 61 writel(0x4, hpriv->mmio + AHCI_VENDOR_SPECIFIC_0_ADDR); 67 62 writel(0x80, hpriv->mmio + AHCI_VENDOR_SPECIFIC_0_DATA); 63 + } 64 + 65 + static int ahci_mvebu_armada_380_config(struct ahci_host_priv *hpriv) 66 + { 67 + const struct mbus_dram_target_info *dram; 68 + int rc = 0; 69 + 70 + dram = mv_mbus_dram_info(); 71 + if (dram) 72 + ahci_mvebu_mbus_config(hpriv, dram); 73 + else 74 + rc = -ENODEV; 75 + 76 + ahci_mvebu_regret_option(hpriv); 77 + 78 + return rc; 79 + } 80 + 81 + static int ahci_mvebu_armada_3700_config(struct ahci_host_priv *hpriv) 82 + { 83 + u32 reg; 84 + 85 + writel(0, hpriv->mmio + AHCI_VENDOR_SPECIFIC_0_ADDR); 86 + 87 + reg = readl(hpriv->mmio + AHCI_VENDOR_SPECIFIC_0_DATA); 88 + reg |= BIT(6); 89 + writel(reg, hpriv->mmio + AHCI_VENDOR_SPECIFIC_0_DATA); 90 + 91 + return 0; 68 92 } 69 93 70 94 /** ··· 160 126 { 161 127 struct ata_host *host = platform_get_drvdata(pdev); 162 128 struct ahci_host_priv *hpriv = host->private_data; 163 - const struct mbus_dram_target_info *dram; 129 + const struct ahci_mvebu_plat_data *pdata = hpriv->plat_data; 164 130 165 - dram = mv_mbus_dram_info(); 166 - if (dram) 167 - ahci_mvebu_mbus_config(hpriv, dram); 168 - 169 - ahci_mvebu_regret_option(hpriv); 131 + pdata->plat_config(hpriv); 170 132 171 133 return ahci_platform_resume_host(&pdev->dev); 172 134 } ··· 184 154 185 155 static int ahci_mvebu_probe(struct platform_device *pdev) 186 156 { 157 + const struct ahci_mvebu_plat_data *pdata; 187 158 struct ahci_host_priv *hpriv; 188 - const struct mbus_dram_target_info *dram; 189 159 int rc; 160 + 161 + pdata = of_device_get_match_data(&pdev->dev); 162 + if (!pdata) 163 + return -EINVAL; 190 164 191 165 hpriv = ahci_platform_get_resources(pdev, 0); 192 166 if (IS_ERR(hpriv)) 193 167 return PTR_ERR(hpriv); 168 + 169 + hpriv->flags |= pdata->flags; 170 + hpriv->plat_data = (void *)pdata; 194 171 195 172 rc = ahci_platform_enable_resources(hpriv); 196 173 if (rc) ··· 205 168 206 169 hpriv->stop_engine = ahci_mvebu_stop_engine; 207 170 208 - if (of_device_is_compatible(pdev->dev.of_node, 209 - "marvell,armada-380-ahci")) { 210 - dram = mv_mbus_dram_info(); 211 - if (!dram) 212 - return -ENODEV; 213 - 214 - ahci_mvebu_mbus_config(hpriv, dram); 215 - ahci_mvebu_regret_option(hpriv); 216 - } 171 + rc = pdata->plat_config(hpriv); 172 + if (rc) 173 + goto disable_resources; 217 174 218 175 rc = ahci_platform_init_host(pdev, hpriv, &ahci_mvebu_port_info, 219 176 &ahci_platform_sht); ··· 221 190 return rc; 222 191 } 223 192 193 + static const struct ahci_mvebu_plat_data ahci_mvebu_armada_380_plat_data = { 194 + .plat_config = ahci_mvebu_armada_380_config, 195 + }; 196 + 197 + static const struct ahci_mvebu_plat_data ahci_mvebu_armada_3700_plat_data = { 198 + .plat_config = ahci_mvebu_armada_3700_config, 199 + .flags = AHCI_HFLAG_SUSPEND_PHYS, 200 + }; 201 + 224 202 static const struct of_device_id ahci_mvebu_of_match[] = { 225 - { .compatible = "marvell,armada-380-ahci", }, 226 - { .compatible = "marvell,armada-3700-ahci", }, 203 + { 204 + .compatible = "marvell,armada-380-ahci", 205 + .data = &ahci_mvebu_armada_380_plat_data, 206 + }, 207 + { 208 + .compatible = "marvell,armada-3700-ahci", 209 + .data = &ahci_mvebu_armada_3700_plat_data, 210 + }, 227 211 { }, 228 212 }; 229 213 MODULE_DEVICE_TABLE(of, ahci_mvebu_of_match); 230 214 231 - /* 232 - * We currently don't provide power management related operations, 233 - * since there is no suspend/resume support at the platform level for 234 - * Armada 38x for the moment. 235 - */ 236 215 static struct platform_driver ahci_mvebu_driver = { 237 216 .probe = ahci_mvebu_probe, 238 217 .remove = ata_platform_remove_one,
+13
drivers/ata/libahci_platform.c
··· 56 56 if (rc) 57 57 goto disable_phys; 58 58 59 + rc = phy_set_mode(hpriv->phys[i], PHY_MODE_SATA); 60 + if (rc) { 61 + phy_exit(hpriv->phys[i]); 62 + goto disable_phys; 63 + } 64 + 59 65 rc = phy_power_on(hpriv->phys[i]); 60 66 if (rc) { 61 67 phy_exit(hpriv->phys[i]); ··· 744 738 writel(ctl, mmio + HOST_CTL); 745 739 readl(mmio + HOST_CTL); /* flush */ 746 740 741 + if (hpriv->flags & AHCI_HFLAG_SUSPEND_PHYS) 742 + ahci_platform_disable_phys(hpriv); 743 + 747 744 return ata_host_suspend(host, PMSG_SUSPEND); 748 745 } 749 746 EXPORT_SYMBOL_GPL(ahci_platform_suspend_host); ··· 765 756 int ahci_platform_resume_host(struct device *dev) 766 757 { 767 758 struct ata_host *host = dev_get_drvdata(dev); 759 + struct ahci_host_priv *hpriv = host->private_data; 768 760 int rc; 769 761 770 762 if (dev->power.power_state.event == PM_EVENT_SUSPEND) { ··· 775 765 776 766 ahci_init_controller(host); 777 767 } 768 + 769 + if (hpriv->flags & AHCI_HFLAG_SUSPEND_PHYS) 770 + ahci_platform_enable_phys(hpriv); 778 771 779 772 ata_host_resume(host); 780 773
+2 -2
drivers/ata/sata_fsl.c
··· 729 729 if (!pp) 730 730 return -ENOMEM; 731 731 732 - mem = dma_zalloc_coherent(dev, SATA_FSL_PORT_PRIV_DMA_SZ, &mem_dma, 733 - GFP_KERNEL); 732 + mem = dma_alloc_coherent(dev, SATA_FSL_PORT_PRIV_DMA_SZ, &mem_dma, 733 + GFP_KERNEL); 734 734 if (!mem) { 735 735 kfree(pp); 736 736 return -ENOMEM;
+20 -21
drivers/atm/he.c
··· 533 533 534 534 static int he_init_tpdrq(struct he_dev *he_dev) 535 535 { 536 - he_dev->tpdrq_base = dma_zalloc_coherent(&he_dev->pci_dev->dev, 537 - CONFIG_TPDRQ_SIZE * sizeof(struct he_tpdrq), 538 - &he_dev->tpdrq_phys, GFP_KERNEL); 536 + he_dev->tpdrq_base = dma_alloc_coherent(&he_dev->pci_dev->dev, 537 + CONFIG_TPDRQ_SIZE * sizeof(struct he_tpdrq), 538 + &he_dev->tpdrq_phys, 539 + GFP_KERNEL); 539 540 if (he_dev->tpdrq_base == NULL) { 540 541 hprintk("failed to alloc tpdrq\n"); 541 542 return -ENOMEM; ··· 718 717 instead of '/ 512', use '>> 9' to prevent a call 719 718 to divdu3 on x86 platforms 720 719 */ 721 - rate_cps = (unsigned long long) (1 << exp) * (man + 512) >> 9; 720 + rate_cps = (unsigned long long) (1UL << exp) * (man + 512) >> 9; 722 721 723 722 if (rate_cps < 10) 724 723 rate_cps = 10; /* 2.2.1 minimum payload rate is 10 cps */ ··· 806 805 goto out_free_rbpl_virt; 807 806 } 808 807 809 - he_dev->rbpl_base = dma_zalloc_coherent(&he_dev->pci_dev->dev, 810 - CONFIG_RBPL_SIZE * sizeof(struct he_rbp), 811 - &he_dev->rbpl_phys, GFP_KERNEL); 808 + he_dev->rbpl_base = dma_alloc_coherent(&he_dev->pci_dev->dev, 809 + CONFIG_RBPL_SIZE * sizeof(struct he_rbp), 810 + &he_dev->rbpl_phys, GFP_KERNEL); 812 811 if (he_dev->rbpl_base == NULL) { 813 812 hprintk("failed to alloc rbpl_base\n"); 814 813 goto out_destroy_rbpl_pool; ··· 845 844 846 845 /* rx buffer ready queue */ 847 846 848 - he_dev->rbrq_base = dma_zalloc_coherent(&he_dev->pci_dev->dev, 849 - CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq), 850 - &he_dev->rbrq_phys, GFP_KERNEL); 847 + he_dev->rbrq_base = dma_alloc_coherent(&he_dev->pci_dev->dev, 848 + CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq), 849 + &he_dev->rbrq_phys, GFP_KERNEL); 851 850 if (he_dev->rbrq_base == NULL) { 852 851 hprintk("failed to allocate rbrq\n"); 853 852 goto out_free_rbpl; ··· 869 868 870 869 /* tx buffer ready queue */ 871 870 872 - he_dev->tbrq_base = dma_zalloc_coherent(&he_dev->pci_dev->dev, 873 - CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq), 874 - &he_dev->tbrq_phys, GFP_KERNEL); 871 + he_dev->tbrq_base = dma_alloc_coherent(&he_dev->pci_dev->dev, 872 + CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq), 873 + &he_dev->tbrq_phys, GFP_KERNEL); 875 874 if (he_dev->tbrq_base == NULL) { 876 875 hprintk("failed to allocate tbrq\n"); 877 876 goto out_free_rbpq_base; ··· 914 913 /* 2.9.3.5 tail offset for each interrupt queue is located after the 915 914 end of the interrupt queue */ 916 915 917 - he_dev->irq_base = dma_zalloc_coherent(&he_dev->pci_dev->dev, 918 - (CONFIG_IRQ_SIZE + 1) 919 - * sizeof(struct he_irq), 920 - &he_dev->irq_phys, 921 - GFP_KERNEL); 916 + he_dev->irq_base = dma_alloc_coherent(&he_dev->pci_dev->dev, 917 + (CONFIG_IRQ_SIZE + 1) * sizeof(struct he_irq), 918 + &he_dev->irq_phys, GFP_KERNEL); 922 919 if (he_dev->irq_base == NULL) { 923 920 hprintk("failed to allocate irq\n"); 924 921 return -ENOMEM; ··· 1463 1464 1464 1465 /* host status page */ 1465 1466 1466 - he_dev->hsp = dma_zalloc_coherent(&he_dev->pci_dev->dev, 1467 - sizeof(struct he_hsp), 1468 - &he_dev->hsp_phys, GFP_KERNEL); 1467 + he_dev->hsp = dma_alloc_coherent(&he_dev->pci_dev->dev, 1468 + sizeof(struct he_hsp), 1469 + &he_dev->hsp_phys, GFP_KERNEL); 1469 1470 if (he_dev->hsp == NULL) { 1470 1471 hprintk("failed to allocate host status page\n"); 1471 1472 return -ENOMEM;
+8 -8
drivers/atm/idt77252.c
··· 641 641 scq = kzalloc(sizeof(struct scq_info), GFP_KERNEL); 642 642 if (!scq) 643 643 return NULL; 644 - scq->base = dma_zalloc_coherent(&card->pcidev->dev, SCQ_SIZE, 645 - &scq->paddr, GFP_KERNEL); 644 + scq->base = dma_alloc_coherent(&card->pcidev->dev, SCQ_SIZE, 645 + &scq->paddr, GFP_KERNEL); 646 646 if (scq->base == NULL) { 647 647 kfree(scq); 648 648 return NULL; ··· 971 971 { 972 972 struct rsq_entry *rsqe; 973 973 974 - card->rsq.base = dma_zalloc_coherent(&card->pcidev->dev, RSQSIZE, 975 - &card->rsq.paddr, GFP_KERNEL); 974 + card->rsq.base = dma_alloc_coherent(&card->pcidev->dev, RSQSIZE, 975 + &card->rsq.paddr, GFP_KERNEL); 976 976 if (card->rsq.base == NULL) { 977 977 printk("%s: can't allocate RSQ.\n", card->name); 978 978 return -1; ··· 3390 3390 writel(0, SAR_REG_GP); 3391 3391 3392 3392 /* Initialize RAW Cell Handle Register */ 3393 - card->raw_cell_hnd = dma_zalloc_coherent(&card->pcidev->dev, 3394 - 2 * sizeof(u32), 3395 - &card->raw_cell_paddr, 3396 - GFP_KERNEL); 3393 + card->raw_cell_hnd = dma_alloc_coherent(&card->pcidev->dev, 3394 + 2 * sizeof(u32), 3395 + &card->raw_cell_paddr, 3396 + GFP_KERNEL); 3397 3397 if (!card->raw_cell_hnd) { 3398 3398 printk("%s: memory allocation failure.\n", card->name); 3399 3399 deinit_card(card);
+3
drivers/base/power/main.c
··· 32 32 #include <trace/events/power.h> 33 33 #include <linux/cpufreq.h> 34 34 #include <linux/cpuidle.h> 35 + #include <linux/devfreq.h> 35 36 #include <linux/timer.h> 36 37 37 38 #include "../base.h" ··· 1079 1078 dpm_show_time(starttime, state, 0, NULL); 1080 1079 1081 1080 cpufreq_resume(); 1081 + devfreq_resume(); 1082 1082 trace_suspend_resume(TPS("dpm_resume"), state.event, false); 1083 1083 } 1084 1084 ··· 1854 1852 trace_suspend_resume(TPS("dpm_suspend"), state.event, true); 1855 1853 might_sleep(); 1856 1854 1855 + devfreq_suspend(); 1857 1856 cpufreq_suspend(); 1858 1857 1859 1858 mutex_lock(&dpm_list_mtx);
+7 -4
drivers/base/power/runtime.c
··· 121 121 * Compute the autosuspend-delay expiration time based on the device's 122 122 * power.last_busy time. If the delay has already expired or is disabled 123 123 * (negative) or the power.use_autosuspend flag isn't set, return 0. 124 - * Otherwise return the expiration time in jiffies (adjusted to be nonzero). 124 + * Otherwise return the expiration time in nanoseconds (adjusted to be nonzero). 125 125 * 126 126 * This function may be called either with or without dev->power.lock held. 127 127 * Either way it can be racy, since power.last_busy may be updated at any time. ··· 141 141 142 142 last_busy = READ_ONCE(dev->power.last_busy); 143 143 144 - expires = last_busy + autosuspend_delay * NSEC_PER_MSEC; 144 + expires = last_busy + (u64)autosuspend_delay * NSEC_PER_MSEC; 145 145 if (expires <= now) 146 146 expires = 0; /* Already expired. */ 147 147 ··· 525 525 * We add a slack of 25% to gather wakeups 526 526 * without sacrificing the granularity. 527 527 */ 528 - u64 slack = READ_ONCE(dev->power.autosuspend_delay) * 528 + u64 slack = (u64)READ_ONCE(dev->power.autosuspend_delay) * 529 529 (NSEC_PER_MSEC >> 2); 530 530 531 531 dev->power.timer_expires = expires; ··· 905 905 spin_lock_irqsave(&dev->power.lock, flags); 906 906 907 907 expires = dev->power.timer_expires; 908 - /* If 'expire' is after 'jiffies' we've been called too early. */ 908 + /* 909 + * If 'expires' is after the current time, we've been called 910 + * too early. 911 + */ 909 912 if (expires > 0 && expires < ktime_to_ns(ktime_get())) { 910 913 dev->power.timer_expires = 0; 911 914 rpm_suspend(dev, dev->power.timer_autosuspends ?
+7 -1
drivers/base/regmap/regmap-irq.c
··· 108 108 * suppress pointless writes. 109 109 */ 110 110 for (i = 0; i < d->chip->num_regs; i++) { 111 + if (!d->chip->mask_base) 112 + continue; 113 + 111 114 reg = d->chip->mask_base + 112 115 (i * map->reg_stride * d->irq_reg_stride); 113 116 if (d->chip->mask_invert) { ··· 261 258 const struct regmap_irq_type *t = &irq_data->type; 262 259 263 260 if ((t->types_supported & type) != type) 264 - return -ENOTSUPP; 261 + return 0; 265 262 266 263 reg = t->type_reg_offset / map->reg_stride; 267 264 ··· 591 588 /* Mask all the interrupts by default */ 592 589 for (i = 0; i < chip->num_regs; i++) { 593 590 d->mask_buf[i] = d->mask_buf_def[i]; 591 + if (!chip->mask_base) 592 + continue; 593 + 594 594 reg = chip->mask_base + 595 595 (i * map->reg_stride * d->irq_reg_stride); 596 596 if (chip->mask_invert)
+33 -2
drivers/block/loop.c
··· 1190 1190 goto out_unlock; 1191 1191 } 1192 1192 1193 + if (lo->lo_offset != info->lo_offset || 1194 + lo->lo_sizelimit != info->lo_sizelimit) { 1195 + sync_blockdev(lo->lo_device); 1196 + kill_bdev(lo->lo_device); 1197 + } 1198 + 1193 1199 /* I/O need to be drained during transfer transition */ 1194 1200 blk_mq_freeze_queue(lo->lo_queue); 1195 1201 ··· 1224 1218 1225 1219 if (lo->lo_offset != info->lo_offset || 1226 1220 lo->lo_sizelimit != info->lo_sizelimit) { 1221 + /* kill_bdev should have truncated all the pages */ 1222 + if (lo->lo_device->bd_inode->i_mapping->nrpages) { 1223 + err = -EAGAIN; 1224 + pr_warn("%s: loop%d (%s) has still dirty pages (nrpages=%lu)\n", 1225 + __func__, lo->lo_number, lo->lo_file_name, 1226 + lo->lo_device->bd_inode->i_mapping->nrpages); 1227 + goto out_unfreeze; 1228 + } 1227 1229 if (figure_loop_size(lo, info->lo_offset, info->lo_sizelimit)) { 1228 1230 err = -EFBIG; 1229 1231 goto out_unfreeze; ··· 1457 1443 1458 1444 static int loop_set_block_size(struct loop_device *lo, unsigned long arg) 1459 1445 { 1446 + int err = 0; 1447 + 1460 1448 if (lo->lo_state != Lo_bound) 1461 1449 return -ENXIO; 1462 1450 1463 1451 if (arg < 512 || arg > PAGE_SIZE || !is_power_of_2(arg)) 1464 1452 return -EINVAL; 1465 1453 1454 + if (lo->lo_queue->limits.logical_block_size != arg) { 1455 + sync_blockdev(lo->lo_device); 1456 + kill_bdev(lo->lo_device); 1457 + } 1458 + 1466 1459 blk_mq_freeze_queue(lo->lo_queue); 1460 + 1461 + /* kill_bdev should have truncated all the pages */ 1462 + if (lo->lo_queue->limits.logical_block_size != arg && 1463 + lo->lo_device->bd_inode->i_mapping->nrpages) { 1464 + err = -EAGAIN; 1465 + pr_warn("%s: loop%d (%s) has still dirty pages (nrpages=%lu)\n", 1466 + __func__, lo->lo_number, lo->lo_file_name, 1467 + lo->lo_device->bd_inode->i_mapping->nrpages); 1468 + goto out_unfreeze; 1469 + } 1467 1470 1468 1471 blk_queue_logical_block_size(lo->lo_queue, arg); 1469 1472 blk_queue_physical_block_size(lo->lo_queue, arg); 1470 1473 blk_queue_io_min(lo->lo_queue, arg); 1471 1474 loop_update_dio(lo); 1472 - 1475 + out_unfreeze: 1473 1476 blk_mq_unfreeze_queue(lo->lo_queue); 1474 1477 1475 - return 0; 1478 + return err; 1476 1479 } 1477 1480 1478 1481 static int lo_simple_ioctl(struct loop_device *lo, unsigned int cmd,
+3 -2
drivers/block/nbd.c
··· 288 288 blk_queue_physical_block_size(nbd->disk->queue, config->blksize); 289 289 set_capacity(nbd->disk, config->bytesize >> 9); 290 290 if (bdev) { 291 - if (bdev->bd_disk) 291 + if (bdev->bd_disk) { 292 292 bd_set_size(bdev, config->bytesize); 293 - else 293 + set_blocksize(bdev, config->blksize); 294 + } else 294 295 bdev->bd_invalidated = 1; 295 296 bdput(bdev); 296 297 }
+1
drivers/block/null_blk.h
··· 97 97 #else 98 98 static inline int null_zone_init(struct nullb_device *dev) 99 99 { 100 + pr_err("null_blk: CONFIG_BLK_DEV_ZONED not enabled\n"); 100 101 return -EINVAL; 101 102 } 102 103 static inline void null_zone_exit(struct nullb_device *dev) {}
+4 -5
drivers/block/rbd.c
··· 5986 5986 struct list_head *tmp; 5987 5987 int dev_id; 5988 5988 char opt_buf[6]; 5989 - bool already = false; 5990 5989 bool force = false; 5991 5990 int ret; 5992 5991 ··· 6018 6019 spin_lock_irq(&rbd_dev->lock); 6019 6020 if (rbd_dev->open_count && !force) 6020 6021 ret = -EBUSY; 6021 - else 6022 - already = test_and_set_bit(RBD_DEV_FLAG_REMOVING, 6023 - &rbd_dev->flags); 6022 + else if (test_and_set_bit(RBD_DEV_FLAG_REMOVING, 6023 + &rbd_dev->flags)) 6024 + ret = -EINPROGRESS; 6024 6025 spin_unlock_irq(&rbd_dev->lock); 6025 6026 } 6026 6027 spin_unlock(&rbd_dev_list_lock); 6027 - if (ret < 0 || already) 6028 + if (ret) 6028 6029 return ret; 6029 6030 6030 6031 if (force) {
+2 -2
drivers/block/skd_main.c
··· 2641 2641 "comp pci_alloc, total bytes %zd entries %d\n", 2642 2642 SKD_SKCOMP_SIZE, SKD_N_COMPLETION_ENTRY); 2643 2643 2644 - skcomp = dma_zalloc_coherent(&skdev->pdev->dev, SKD_SKCOMP_SIZE, 2645 - &skdev->cq_dma_address, GFP_KERNEL); 2644 + skcomp = dma_alloc_coherent(&skdev->pdev->dev, SKD_SKCOMP_SIZE, 2645 + &skdev->cq_dma_address, GFP_KERNEL); 2646 2646 2647 2647 if (skcomp == NULL) { 2648 2648 rc = -ENOMEM;
+66 -24
drivers/block/zram/zram_drv.c
··· 316 316 * See the comment in writeback_store. 317 317 */ 318 318 zram_slot_lock(zram, index); 319 - if (!zram_allocated(zram, index) || 320 - zram_test_flag(zram, index, ZRAM_UNDER_WB)) 321 - goto next; 322 - zram_set_flag(zram, index, ZRAM_IDLE); 323 - next: 319 + if (zram_allocated(zram, index) && 320 + !zram_test_flag(zram, index, ZRAM_UNDER_WB)) 321 + zram_set_flag(zram, index, ZRAM_IDLE); 324 322 zram_slot_unlock(zram, index); 325 323 } 326 324 ··· 328 330 } 329 331 330 332 #ifdef CONFIG_ZRAM_WRITEBACK 333 + static ssize_t writeback_limit_enable_store(struct device *dev, 334 + struct device_attribute *attr, const char *buf, size_t len) 335 + { 336 + struct zram *zram = dev_to_zram(dev); 337 + u64 val; 338 + ssize_t ret = -EINVAL; 339 + 340 + if (kstrtoull(buf, 10, &val)) 341 + return ret; 342 + 343 + down_read(&zram->init_lock); 344 + spin_lock(&zram->wb_limit_lock); 345 + zram->wb_limit_enable = val; 346 + spin_unlock(&zram->wb_limit_lock); 347 + up_read(&zram->init_lock); 348 + ret = len; 349 + 350 + return ret; 351 + } 352 + 353 + static ssize_t writeback_limit_enable_show(struct device *dev, 354 + struct device_attribute *attr, char *buf) 355 + { 356 + bool val; 357 + struct zram *zram = dev_to_zram(dev); 358 + 359 + down_read(&zram->init_lock); 360 + spin_lock(&zram->wb_limit_lock); 361 + val = zram->wb_limit_enable; 362 + spin_unlock(&zram->wb_limit_lock); 363 + up_read(&zram->init_lock); 364 + 365 + return scnprintf(buf, PAGE_SIZE, "%d\n", val); 366 + } 367 + 331 368 static ssize_t writeback_limit_store(struct device *dev, 332 369 struct device_attribute *attr, const char *buf, size_t len) 333 370 { ··· 374 341 return ret; 375 342 376 343 down_read(&zram->init_lock); 377 - atomic64_set(&zram->stats.bd_wb_limit, val); 378 - if (val == 0) 379 - zram->stop_writeback = false; 344 + spin_lock(&zram->wb_limit_lock); 345 + zram->bd_wb_limit = val; 346 + spin_unlock(&zram->wb_limit_lock); 380 347 up_read(&zram->init_lock); 381 348 ret = len; 382 349 ··· 390 357 struct zram *zram = dev_to_zram(dev); 391 358 392 359 down_read(&zram->init_lock); 393 - val = atomic64_read(&zram->stats.bd_wb_limit); 360 + spin_lock(&zram->wb_limit_lock); 361 + val = zram->bd_wb_limit; 362 + spin_unlock(&zram->wb_limit_lock); 394 363 up_read(&zram->init_lock); 395 364 396 365 return scnprintf(buf, PAGE_SIZE, "%llu\n", val); ··· 623 588 return 1; 624 589 } 625 590 626 - #define HUGE_WRITEBACK 0x1 627 - #define IDLE_WRITEBACK 0x2 591 + #define HUGE_WRITEBACK 1 592 + #define IDLE_WRITEBACK 2 628 593 629 594 static ssize_t writeback_store(struct device *dev, 630 595 struct device_attribute *attr, const char *buf, size_t len) ··· 637 602 struct page *page; 638 603 ssize_t ret, sz; 639 604 char mode_buf[8]; 640 - unsigned long mode = -1UL; 605 + int mode = -1; 641 606 unsigned long blk_idx = 0; 642 607 643 608 sz = strscpy(mode_buf, buf, sizeof(mode_buf)); ··· 653 618 else if (!strcmp(mode_buf, "huge")) 654 619 mode = HUGE_WRITEBACK; 655 620 656 - if (mode == -1UL) 621 + if (mode == -1) 657 622 return -EINVAL; 658 623 659 624 down_read(&zram->init_lock); ··· 680 645 bvec.bv_len = PAGE_SIZE; 681 646 bvec.bv_offset = 0; 682 647 683 - if (zram->stop_writeback) { 648 + spin_lock(&zram->wb_limit_lock); 649 + if (zram->wb_limit_enable && !zram->bd_wb_limit) { 650 + spin_unlock(&zram->wb_limit_lock); 684 651 ret = -EIO; 685 652 break; 686 653 } 654 + spin_unlock(&zram->wb_limit_lock); 687 655 688 656 if (!blk_idx) { 689 657 blk_idx = alloc_block_bdev(zram); ··· 705 667 zram_test_flag(zram, index, ZRAM_UNDER_WB)) 706 668 goto next; 707 669 708 - if ((mode & IDLE_WRITEBACK && 709 - !zram_test_flag(zram, index, ZRAM_IDLE)) && 710 - (mode & HUGE_WRITEBACK && 711 - !zram_test_flag(zram, index, ZRAM_HUGE))) 670 + if (mode == IDLE_WRITEBACK && 671 + !zram_test_flag(zram, index, ZRAM_IDLE)) 672 + goto next; 673 + if (mode == HUGE_WRITEBACK && 674 + !zram_test_flag(zram, index, ZRAM_HUGE)) 712 675 goto next; 713 676 /* 714 677 * Clearing ZRAM_UNDER_WB is duty of caller. ··· 771 732 zram_set_element(zram, index, blk_idx); 772 733 blk_idx = 0; 773 734 atomic64_inc(&zram->stats.pages_stored); 774 - if (atomic64_add_unless(&zram->stats.bd_wb_limit, 775 - -1 << (PAGE_SHIFT - 12), 0)) { 776 - if (atomic64_read(&zram->stats.bd_wb_limit) == 0) 777 - zram->stop_writeback = true; 778 - } 735 + spin_lock(&zram->wb_limit_lock); 736 + if (zram->wb_limit_enable && zram->bd_wb_limit > 0) 737 + zram->bd_wb_limit -= 1UL << (PAGE_SHIFT - 12); 738 + spin_unlock(&zram->wb_limit_lock); 779 739 next: 780 740 zram_slot_unlock(zram, index); 781 741 } ··· 1850 1812 static DEVICE_ATTR_RW(backing_dev); 1851 1813 static DEVICE_ATTR_WO(writeback); 1852 1814 static DEVICE_ATTR_RW(writeback_limit); 1815 + static DEVICE_ATTR_RW(writeback_limit_enable); 1853 1816 #endif 1854 1817 1855 1818 static struct attribute *zram_disk_attrs[] = { ··· 1867 1828 &dev_attr_backing_dev.attr, 1868 1829 &dev_attr_writeback.attr, 1869 1830 &dev_attr_writeback_limit.attr, 1831 + &dev_attr_writeback_limit_enable.attr, 1870 1832 #endif 1871 1833 &dev_attr_io_stat.attr, 1872 1834 &dev_attr_mm_stat.attr, ··· 1907 1867 device_id = ret; 1908 1868 1909 1869 init_rwsem(&zram->init_lock); 1910 - 1870 + #ifdef CONFIG_ZRAM_WRITEBACK 1871 + spin_lock_init(&zram->wb_limit_lock); 1872 + #endif 1911 1873 queue = blk_alloc_queue(GFP_KERNEL); 1912 1874 if (!queue) { 1913 1875 pr_err("Error allocating disk queue for device %d\n",
+3 -2
drivers/block/zram/zram_drv.h
··· 86 86 atomic64_t bd_count; /* no. of pages in backing device */ 87 87 atomic64_t bd_reads; /* no. of reads from backing device */ 88 88 atomic64_t bd_writes; /* no. of writes from backing device */ 89 - atomic64_t bd_wb_limit; /* writeback limit of backing device */ 90 89 #endif 91 90 }; 92 91 ··· 113 114 */ 114 115 bool claim; /* Protected by bdev->bd_mutex */ 115 116 struct file *backing_dev; 116 - bool stop_writeback; 117 117 #ifdef CONFIG_ZRAM_WRITEBACK 118 + spinlock_t wb_limit_lock; 119 + bool wb_limit_enable; 120 + u64 bd_wb_limit; 118 121 struct block_device *bdev; 119 122 unsigned int old_block_size; 120 123 unsigned long *bitmap;
+4 -8
drivers/cpufreq/cpufreq.c
··· 1530 1530 { 1531 1531 unsigned int ret_freq = 0; 1532 1532 1533 - if (!cpufreq_driver->get) 1533 + if (unlikely(policy_is_inactive(policy)) || !cpufreq_driver->get) 1534 1534 return ret_freq; 1535 1535 1536 1536 ret_freq = cpufreq_driver->get(policy->cpu); 1537 1537 1538 1538 /* 1539 - * Updating inactive policies is invalid, so avoid doing that. Also 1540 - * if fast frequency switching is used with the given policy, the check 1539 + * If fast frequency switching is used with the given policy, the check 1541 1540 * against policy->cur is pointless, so skip it in that case too. 1542 1541 */ 1543 - if (unlikely(policy_is_inactive(policy)) || policy->fast_switch_enabled) 1542 + if (policy->fast_switch_enabled) 1544 1543 return ret_freq; 1545 1544 1546 1545 if (ret_freq && policy->cur && ··· 1568 1569 1569 1570 if (policy) { 1570 1571 down_read(&policy->rwsem); 1571 - 1572 - if (!policy_is_inactive(policy)) 1573 - ret_freq = __cpufreq_get(policy); 1574 - 1572 + ret_freq = __cpufreq_get(policy); 1575 1573 up_read(&policy->rwsem); 1576 1574 1577 1575 cpufreq_cpu_put(policy);
+4 -4
drivers/cpufreq/scmi-cpufreq.c
··· 52 52 int ret; 53 53 struct scmi_data *priv = policy->driver_data; 54 54 struct scmi_perf_ops *perf_ops = handle->perf_ops; 55 - u64 freq = policy->freq_table[index].frequency * 1000; 55 + u64 freq = policy->freq_table[index].frequency; 56 56 57 - ret = perf_ops->freq_set(handle, priv->domain_id, freq, false); 57 + ret = perf_ops->freq_set(handle, priv->domain_id, freq * 1000, false); 58 58 if (!ret) 59 59 arch_set_freq_scale(policy->related_cpus, freq, 60 60 policy->cpuinfo.max_freq); ··· 176 176 out_free_priv: 177 177 kfree(priv); 178 178 out_free_opp: 179 - dev_pm_opp_cpumask_remove_table(policy->cpus); 179 + dev_pm_opp_remove_all_dynamic(cpu_dev); 180 180 181 181 return ret; 182 182 } ··· 188 188 cpufreq_cooling_unregister(priv->cdev); 189 189 dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table); 190 190 kfree(priv); 191 - dev_pm_opp_cpumask_remove_table(policy->related_cpus); 191 + dev_pm_opp_remove_all_dynamic(priv->cpu_dev); 192 192 193 193 return 0; 194 194 }
+2 -2
drivers/cpufreq/scpi-cpufreq.c
··· 177 177 out_free_priv: 178 178 kfree(priv); 179 179 out_free_opp: 180 - dev_pm_opp_cpumask_remove_table(policy->cpus); 180 + dev_pm_opp_remove_all_dynamic(cpu_dev); 181 181 182 182 return ret; 183 183 } ··· 190 190 clk_put(priv->clk); 191 191 dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table); 192 192 kfree(priv); 193 - dev_pm_opp_cpumask_remove_table(policy->related_cpus); 193 + dev_pm_opp_remove_all_dynamic(priv->cpu_dev); 194 194 195 195 return 0; 196 196 }
+1
drivers/crypto/Kconfig
··· 692 692 depends on ARCH_BCM_IPROC 693 693 depends on MAILBOX 694 694 default m 695 + select CRYPTO_AUTHENC 695 696 select CRYPTO_DES 696 697 select CRYPTO_MD5 697 698 select CRYPTO_SHA1
+3 -3
drivers/crypto/amcc/crypto4xx_core.c
··· 283 283 */ 284 284 static u32 crypto4xx_build_gdr(struct crypto4xx_device *dev) 285 285 { 286 - dev->gdr = dma_zalloc_coherent(dev->core_dev->device, 287 - sizeof(struct ce_gd) * PPC4XX_NUM_GD, 288 - &dev->gdr_pa, GFP_ATOMIC); 286 + dev->gdr = dma_alloc_coherent(dev->core_dev->device, 287 + sizeof(struct ce_gd) * PPC4XX_NUM_GD, 288 + &dev->gdr_pa, GFP_ATOMIC); 289 289 if (!dev->gdr) 290 290 return -ENOMEM; 291 291
+13 -31
drivers/crypto/bcm/cipher.c
··· 2845 2845 struct spu_hw *spu = &iproc_priv.spu; 2846 2846 struct iproc_ctx_s *ctx = crypto_aead_ctx(cipher); 2847 2847 struct crypto_tfm *tfm = crypto_aead_tfm(cipher); 2848 - struct rtattr *rta = (void *)key; 2849 - struct crypto_authenc_key_param *param; 2850 - const u8 *origkey = key; 2851 - const unsigned int origkeylen = keylen; 2852 - 2853 - int ret = 0; 2848 + struct crypto_authenc_keys keys; 2849 + int ret; 2854 2850 2855 2851 flow_log("%s() aead:%p key:%p keylen:%u\n", __func__, cipher, key, 2856 2852 keylen); 2857 2853 flow_dump(" key: ", key, keylen); 2858 2854 2859 - if (!RTA_OK(rta, keylen)) 2860 - goto badkey; 2861 - if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM) 2862 - goto badkey; 2863 - if (RTA_PAYLOAD(rta) < sizeof(*param)) 2855 + ret = crypto_authenc_extractkeys(&keys, key, keylen); 2856 + if (ret) 2864 2857 goto badkey; 2865 2858 2866 - param = RTA_DATA(rta); 2867 - ctx->enckeylen = be32_to_cpu(param->enckeylen); 2868 - 2869 - key += RTA_ALIGN(rta->rta_len); 2870 - keylen -= RTA_ALIGN(rta->rta_len); 2871 - 2872 - if (keylen < ctx->enckeylen) 2873 - goto badkey; 2874 - if (ctx->enckeylen > MAX_KEY_SIZE) 2859 + if (keys.enckeylen > MAX_KEY_SIZE || 2860 + keys.authkeylen > MAX_KEY_SIZE) 2875 2861 goto badkey; 2876 2862 2877 - ctx->authkeylen = keylen - ctx->enckeylen; 2863 + ctx->enckeylen = keys.enckeylen; 2864 + ctx->authkeylen = keys.authkeylen; 2878 2865 2879 - if (ctx->authkeylen > MAX_KEY_SIZE) 2880 - goto badkey; 2881 - 2882 - memcpy(ctx->enckey, key + ctx->authkeylen, ctx->enckeylen); 2866 + memcpy(ctx->enckey, keys.enckey, keys.enckeylen); 2883 2867 /* May end up padding auth key. So make sure it's zeroed. */ 2884 2868 memset(ctx->authkey, 0, sizeof(ctx->authkey)); 2885 - memcpy(ctx->authkey, key, ctx->authkeylen); 2869 + memcpy(ctx->authkey, keys.authkey, keys.authkeylen); 2886 2870 2887 2871 switch (ctx->alg->cipher_info.alg) { 2888 2872 case CIPHER_ALG_DES: ··· 2874 2890 u32 tmp[DES_EXPKEY_WORDS]; 2875 2891 u32 flags = CRYPTO_TFM_RES_WEAK_KEY; 2876 2892 2877 - if (des_ekey(tmp, key) == 0) { 2893 + if (des_ekey(tmp, keys.enckey) == 0) { 2878 2894 if (crypto_aead_get_flags(cipher) & 2879 2895 CRYPTO_TFM_REQ_WEAK_KEY) { 2880 2896 crypto_aead_set_flags(cipher, flags); ··· 2889 2905 break; 2890 2906 case CIPHER_ALG_3DES: 2891 2907 if (ctx->enckeylen == (DES_KEY_SIZE * 3)) { 2892 - const u32 *K = (const u32 *)key; 2908 + const u32 *K = (const u32 *)keys.enckey; 2893 2909 u32 flags = CRYPTO_TFM_RES_BAD_KEY_SCHED; 2894 2910 2895 2911 if (!((K[0] ^ K[2]) | (K[1] ^ K[3])) || ··· 2940 2956 ctx->fallback_cipher->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK; 2941 2957 ctx->fallback_cipher->base.crt_flags |= 2942 2958 tfm->crt_flags & CRYPTO_TFM_REQ_MASK; 2943 - ret = 2944 - crypto_aead_setkey(ctx->fallback_cipher, origkey, 2945 - origkeylen); 2959 + ret = crypto_aead_setkey(ctx->fallback_cipher, key, keylen); 2946 2960 if (ret) { 2947 2961 flow_log(" fallback setkey() returned:%d\n", ret); 2948 2962 tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
+1 -1
drivers/crypto/caam/caamalg.c
··· 3476 3476 * Skip algorithms requiring message digests 3477 3477 * if MD or MD size is not supported by device. 3478 3478 */ 3479 - if ((c2_alg_sel & ~OP_ALG_ALGSEL_SUBMASK) == 0x40 && 3479 + if (is_mdha(c2_alg_sel) && 3480 3480 (!md_inst || t_alg->aead.maxauthsize > md_limit)) 3481 3481 continue; 3482 3482
+9 -6
drivers/crypto/caam/caamhash.c
··· 1072 1072 1073 1073 desc = edesc->hw_desc; 1074 1074 1075 - state->buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE); 1076 - if (dma_mapping_error(jrdev, state->buf_dma)) { 1077 - dev_err(jrdev, "unable to map src\n"); 1078 - goto unmap; 1079 - } 1075 + if (buflen) { 1076 + state->buf_dma = dma_map_single(jrdev, buf, buflen, 1077 + DMA_TO_DEVICE); 1078 + if (dma_mapping_error(jrdev, state->buf_dma)) { 1079 + dev_err(jrdev, "unable to map src\n"); 1080 + goto unmap; 1081 + } 1080 1082 1081 - append_seq_in_ptr(desc, state->buf_dma, buflen, 0); 1083 + append_seq_in_ptr(desc, state->buf_dma, buflen, 0); 1084 + } 1082 1085 1083 1086 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result, 1084 1087 digestsize);
+1
drivers/crypto/caam/desc.h
··· 1155 1155 #define OP_ALG_ALGSEL_DES (0x20 << OP_ALG_ALGSEL_SHIFT) 1156 1156 #define OP_ALG_ALGSEL_3DES (0x21 << OP_ALG_ALGSEL_SHIFT) 1157 1157 #define OP_ALG_ALGSEL_ARC4 (0x30 << OP_ALG_ALGSEL_SHIFT) 1158 + #define OP_ALG_CHA_MDHA (0x40 << OP_ALG_ALGSEL_SHIFT) 1158 1159 #define OP_ALG_ALGSEL_MD5 (0x40 << OP_ALG_ALGSEL_SHIFT) 1159 1160 #define OP_ALG_ALGSEL_SHA1 (0x41 << OP_ALG_ALGSEL_SHIFT) 1160 1161 #define OP_ALG_ALGSEL_SHA224 (0x42 << OP_ALG_ALGSEL_SHIFT)
+9
drivers/crypto/caam/error.h
··· 7 7 8 8 #ifndef CAAM_ERROR_H 9 9 #define CAAM_ERROR_H 10 + 11 + #include "desc.h" 12 + 10 13 #define CAAM_ERROR_STR_MAX 302 11 14 12 15 void caam_strstatus(struct device *dev, u32 status, bool qi_v2); ··· 20 17 void caam_dump_sg(const char *level, const char *prefix_str, int prefix_type, 21 18 int rowsize, int groupsize, struct scatterlist *sg, 22 19 size_t tlen, bool ascii); 20 + 21 + static inline bool is_mdha(u32 algtype) 22 + { 23 + return (algtype & OP_ALG_ALGSEL_MASK & ~OP_ALG_ALGSEL_SUBMASK) == 24 + OP_ALG_CHA_MDHA; 25 + } 23 26 #endif /* CAAM_ERROR_H */
+2 -2
drivers/crypto/cavium/cpt/cptpf_main.c
··· 278 278 mcode->num_cores = is_ae ? 6 : 10; 279 279 280 280 /* Allocate DMAable space */ 281 - mcode->code = dma_zalloc_coherent(&cpt->pdev->dev, mcode->code_size, 282 - &mcode->phys_base, GFP_KERNEL); 281 + mcode->code = dma_alloc_coherent(&cpt->pdev->dev, mcode->code_size, 282 + &mcode->phys_base, GFP_KERNEL); 283 283 if (!mcode->code) { 284 284 dev_err(dev, "Unable to allocate space for microcode"); 285 285 ret = -ENOMEM;
+4 -3
drivers/crypto/cavium/cpt/cptvf_main.c
··· 236 236 237 237 c_size = (rem_q_size > qcsize_bytes) ? qcsize_bytes : 238 238 rem_q_size; 239 - curr->head = (u8 *)dma_zalloc_coherent(&pdev->dev, 240 - c_size + CPT_NEXT_CHUNK_PTR_SIZE, 241 - &curr->dma_addr, GFP_KERNEL); 239 + curr->head = (u8 *)dma_alloc_coherent(&pdev->dev, 240 + c_size + CPT_NEXT_CHUNK_PTR_SIZE, 241 + &curr->dma_addr, 242 + GFP_KERNEL); 242 243 if (!curr->head) { 243 244 dev_err(&pdev->dev, "Command Q (%d) chunk (%d) allocation failed\n", 244 245 i, queue->nchunks);
+3 -3
drivers/crypto/cavium/nitrox/nitrox_lib.c
··· 25 25 struct nitrox_device *ndev = cmdq->ndev; 26 26 27 27 cmdq->qsize = (ndev->qlen * cmdq->instr_size) + align_bytes; 28 - cmdq->unalign_base = dma_zalloc_coherent(DEV(ndev), cmdq->qsize, 29 - &cmdq->unalign_dma, 30 - GFP_KERNEL); 28 + cmdq->unalign_base = dma_alloc_coherent(DEV(ndev), cmdq->qsize, 29 + &cmdq->unalign_dma, 30 + GFP_KERNEL); 31 31 if (!cmdq->unalign_base) 32 32 return -ENOMEM; 33 33
+1 -1
drivers/crypto/cavium/nitrox/nitrox_reqmgr.c
··· 567 567 568 568 /* ORH error code */ 569 569 err = READ_ONCE(*sr->resp.orh) & 0xff; 570 - softreq_destroy(sr); 571 570 572 571 if (sr->callback) 573 572 sr->callback(sr->cb_arg, err); 573 + softreq_destroy(sr); 574 574 575 575 req_completed++; 576 576 }
+3 -3
drivers/crypto/ccp/ccp-dev-v5.c
··· 822 822 /* Page alignment satisfies our needs for N <= 128 */ 823 823 BUILD_BUG_ON(COMMANDS_PER_QUEUE > 128); 824 824 cmd_q->qsize = Q_SIZE(Q_DESC_SIZE); 825 - cmd_q->qbase = dma_zalloc_coherent(dev, cmd_q->qsize, 826 - &cmd_q->qbase_dma, 827 - GFP_KERNEL); 825 + cmd_q->qbase = dma_alloc_coherent(dev, cmd_q->qsize, 826 + &cmd_q->qbase_dma, 827 + GFP_KERNEL); 828 828 if (!cmd_q->qbase) { 829 829 dev_err(dev, "unable to allocate command queue\n"); 830 830 ret = -ENOMEM;
+19 -21
drivers/crypto/ccree/cc_aead.c
··· 549 549 unsigned int keylen) 550 550 { 551 551 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm); 552 - struct rtattr *rta = (struct rtattr *)key; 553 552 struct cc_crypto_req cc_req = {}; 554 - struct crypto_authenc_key_param *param; 555 553 struct cc_hw_desc desc[MAX_AEAD_SETKEY_SEQ]; 556 - int rc = -EINVAL; 557 554 unsigned int seq_len = 0; 558 555 struct device *dev = drvdata_to_dev(ctx->drvdata); 556 + const u8 *enckey, *authkey; 557 + int rc; 559 558 560 559 dev_dbg(dev, "Setting key in context @%p for %s. key=%p keylen=%u\n", 561 560 ctx, crypto_tfm_alg_name(crypto_aead_tfm(tfm)), key, keylen); ··· 562 563 /* STAT_PHASE_0: Init and sanity checks */ 563 564 564 565 if (ctx->auth_mode != DRV_HASH_NULL) { /* authenc() alg. */ 565 - if (!RTA_OK(rta, keylen)) 566 + struct crypto_authenc_keys keys; 567 + 568 + rc = crypto_authenc_extractkeys(&keys, key, keylen); 569 + if (rc) 566 570 goto badkey; 567 - if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM) 568 - goto badkey; 569 - if (RTA_PAYLOAD(rta) < sizeof(*param)) 570 - goto badkey; 571 - param = RTA_DATA(rta); 572 - ctx->enc_keylen = be32_to_cpu(param->enckeylen); 573 - key += RTA_ALIGN(rta->rta_len); 574 - keylen -= RTA_ALIGN(rta->rta_len); 575 - if (keylen < ctx->enc_keylen) 576 - goto badkey; 577 - ctx->auth_keylen = keylen - ctx->enc_keylen; 571 + enckey = keys.enckey; 572 + authkey = keys.authkey; 573 + ctx->enc_keylen = keys.enckeylen; 574 + ctx->auth_keylen = keys.authkeylen; 578 575 579 576 if (ctx->cipher_mode == DRV_CIPHER_CTR) { 580 577 /* the nonce is stored in bytes at end of key */ 578 + rc = -EINVAL; 581 579 if (ctx->enc_keylen < 582 580 (AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE)) 583 581 goto badkey; 584 582 /* Copy nonce from last 4 bytes in CTR key to 585 583 * first 4 bytes in CTR IV 586 584 */ 587 - memcpy(ctx->ctr_nonce, key + ctx->auth_keylen + 588 - ctx->enc_keylen - CTR_RFC3686_NONCE_SIZE, 589 - CTR_RFC3686_NONCE_SIZE); 585 + memcpy(ctx->ctr_nonce, enckey + ctx->enc_keylen - 586 + CTR_RFC3686_NONCE_SIZE, CTR_RFC3686_NONCE_SIZE); 590 587 /* Set CTR key size */ 591 588 ctx->enc_keylen -= CTR_RFC3686_NONCE_SIZE; 592 589 } 593 590 } else { /* non-authenc - has just one key */ 591 + enckey = key; 592 + authkey = NULL; 594 593 ctx->enc_keylen = keylen; 595 594 ctx->auth_keylen = 0; 596 595 } ··· 600 603 /* STAT_PHASE_1: Copy key to ctx */ 601 604 602 605 /* Get key material */ 603 - memcpy(ctx->enckey, key + ctx->auth_keylen, ctx->enc_keylen); 606 + memcpy(ctx->enckey, enckey, ctx->enc_keylen); 604 607 if (ctx->enc_keylen == 24) 605 608 memset(ctx->enckey + 24, 0, CC_AES_KEY_SIZE_MAX - 24); 606 609 if (ctx->auth_mode == DRV_HASH_XCBC_MAC) { 607 - memcpy(ctx->auth_state.xcbc.xcbc_keys, key, ctx->auth_keylen); 610 + memcpy(ctx->auth_state.xcbc.xcbc_keys, authkey, 611 + ctx->auth_keylen); 608 612 } else if (ctx->auth_mode != DRV_HASH_NULL) { /* HMAC */ 609 - rc = cc_get_plain_hmac_key(tfm, key, ctx->auth_keylen); 613 + rc = cc_get_plain_hmac_key(tfm, authkey, ctx->auth_keylen); 610 614 if (rc) 611 615 goto badkey; 612 616 }
+2 -2
drivers/crypto/hisilicon/sec/sec_algs.c
··· 241 241 memset(ctx->key, 0, SEC_MAX_CIPHER_KEY); 242 242 } else { 243 243 /* new key */ 244 - ctx->key = dma_zalloc_coherent(dev, SEC_MAX_CIPHER_KEY, 245 - &ctx->pkey, GFP_KERNEL); 244 + ctx->key = dma_alloc_coherent(dev, SEC_MAX_CIPHER_KEY, 245 + &ctx->pkey, GFP_KERNEL); 246 246 if (!ctx->key) { 247 247 mutex_unlock(&ctx->lock); 248 248 return -ENOMEM;
+6 -9
drivers/crypto/hisilicon/sec/sec_drv.c
··· 1082 1082 struct sec_queue_ring_db *ring_db = &queue->ring_db; 1083 1083 int ret; 1084 1084 1085 - ring_cmd->vaddr = dma_zalloc_coherent(dev, SEC_Q_CMD_SIZE, 1086 - &ring_cmd->paddr, 1087 - GFP_KERNEL); 1085 + ring_cmd->vaddr = dma_alloc_coherent(dev, SEC_Q_CMD_SIZE, 1086 + &ring_cmd->paddr, GFP_KERNEL); 1088 1087 if (!ring_cmd->vaddr) 1089 1088 return -ENOMEM; 1090 1089 ··· 1091 1092 mutex_init(&ring_cmd->lock); 1092 1093 ring_cmd->callback = sec_alg_callback; 1093 1094 1094 - ring_cq->vaddr = dma_zalloc_coherent(dev, SEC_Q_CQ_SIZE, 1095 - &ring_cq->paddr, 1096 - GFP_KERNEL); 1095 + ring_cq->vaddr = dma_alloc_coherent(dev, SEC_Q_CQ_SIZE, 1096 + &ring_cq->paddr, GFP_KERNEL); 1097 1097 if (!ring_cq->vaddr) { 1098 1098 ret = -ENOMEM; 1099 1099 goto err_free_ring_cmd; 1100 1100 } 1101 1101 1102 - ring_db->vaddr = dma_zalloc_coherent(dev, SEC_Q_DB_SIZE, 1103 - &ring_db->paddr, 1104 - GFP_KERNEL); 1102 + ring_db->vaddr = dma_alloc_coherent(dev, SEC_Q_DB_SIZE, 1103 + &ring_db->paddr, GFP_KERNEL); 1105 1104 if (!ring_db->vaddr) { 1106 1105 ret = -ENOMEM; 1107 1106 goto err_free_ring_cq;
+3 -3
drivers/crypto/ixp4xx_crypto.c
··· 260 260 { 261 261 struct device *dev = &pdev->dev; 262 262 BUILD_BUG_ON(sizeof(struct crypt_ctl) != 64); 263 - crypt_virt = dma_zalloc_coherent(dev, 264 - NPE_QLEN * sizeof(struct crypt_ctl), 265 - &crypt_phys, GFP_ATOMIC); 263 + crypt_virt = dma_alloc_coherent(dev, 264 + NPE_QLEN * sizeof(struct crypt_ctl), 265 + &crypt_phys, GFP_ATOMIC); 266 266 if (!crypt_virt) 267 267 return -ENOMEM; 268 268 return 0;
+8 -8
drivers/crypto/mediatek/mtk-platform.c
··· 453 453 if (!ring[i]) 454 454 goto err_cleanup; 455 455 456 - ring[i]->cmd_base = dma_zalloc_coherent(cryp->dev, 457 - MTK_DESC_RING_SZ, 458 - &ring[i]->cmd_dma, 459 - GFP_KERNEL); 456 + ring[i]->cmd_base = dma_alloc_coherent(cryp->dev, 457 + MTK_DESC_RING_SZ, 458 + &ring[i]->cmd_dma, 459 + GFP_KERNEL); 460 460 if (!ring[i]->cmd_base) 461 461 goto err_cleanup; 462 462 463 - ring[i]->res_base = dma_zalloc_coherent(cryp->dev, 464 - MTK_DESC_RING_SZ, 465 - &ring[i]->res_dma, 466 - GFP_KERNEL); 463 + ring[i]->res_base = dma_alloc_coherent(cryp->dev, 464 + MTK_DESC_RING_SZ, 465 + &ring[i]->res_dma, 466 + GFP_KERNEL); 467 467 if (!ring[i]->res_base) 468 468 goto err_cleanup; 469 469
+6 -6
drivers/crypto/qat/qat_common/adf_admin.c
··· 244 244 dev_to_node(&GET_DEV(accel_dev))); 245 245 if (!admin) 246 246 return -ENOMEM; 247 - admin->virt_addr = dma_zalloc_coherent(&GET_DEV(accel_dev), PAGE_SIZE, 248 - &admin->phy_addr, GFP_KERNEL); 247 + admin->virt_addr = dma_alloc_coherent(&GET_DEV(accel_dev), PAGE_SIZE, 248 + &admin->phy_addr, GFP_KERNEL); 249 249 if (!admin->virt_addr) { 250 250 dev_err(&GET_DEV(accel_dev), "Failed to allocate dma buff\n"); 251 251 kfree(admin); 252 252 return -ENOMEM; 253 253 } 254 254 255 - admin->virt_tbl_addr = dma_zalloc_coherent(&GET_DEV(accel_dev), 256 - PAGE_SIZE, 257 - &admin->const_tbl_addr, 258 - GFP_KERNEL); 255 + admin->virt_tbl_addr = dma_alloc_coherent(&GET_DEV(accel_dev), 256 + PAGE_SIZE, 257 + &admin->const_tbl_addr, 258 + GFP_KERNEL); 259 259 if (!admin->virt_tbl_addr) { 260 260 dev_err(&GET_DEV(accel_dev), "Failed to allocate const_tbl\n"); 261 261 dma_free_coherent(&GET_DEV(accel_dev), PAGE_SIZE,
+12 -12
drivers/crypto/qat/qat_common/qat_algs.c
··· 601 601 602 602 dev = &GET_DEV(inst->accel_dev); 603 603 ctx->inst = inst; 604 - ctx->enc_cd = dma_zalloc_coherent(dev, sizeof(*ctx->enc_cd), 605 - &ctx->enc_cd_paddr, 606 - GFP_ATOMIC); 604 + ctx->enc_cd = dma_alloc_coherent(dev, sizeof(*ctx->enc_cd), 605 + &ctx->enc_cd_paddr, 606 + GFP_ATOMIC); 607 607 if (!ctx->enc_cd) { 608 608 return -ENOMEM; 609 609 } 610 - ctx->dec_cd = dma_zalloc_coherent(dev, sizeof(*ctx->dec_cd), 611 - &ctx->dec_cd_paddr, 612 - GFP_ATOMIC); 610 + ctx->dec_cd = dma_alloc_coherent(dev, sizeof(*ctx->dec_cd), 611 + &ctx->dec_cd_paddr, 612 + GFP_ATOMIC); 613 613 if (!ctx->dec_cd) { 614 614 goto out_free_enc; 615 615 } ··· 933 933 934 934 dev = &GET_DEV(inst->accel_dev); 935 935 ctx->inst = inst; 936 - ctx->enc_cd = dma_zalloc_coherent(dev, sizeof(*ctx->enc_cd), 937 - &ctx->enc_cd_paddr, 938 - GFP_ATOMIC); 936 + ctx->enc_cd = dma_alloc_coherent(dev, sizeof(*ctx->enc_cd), 937 + &ctx->enc_cd_paddr, 938 + GFP_ATOMIC); 939 939 if (!ctx->enc_cd) { 940 940 spin_unlock(&ctx->lock); 941 941 return -ENOMEM; 942 942 } 943 - ctx->dec_cd = dma_zalloc_coherent(dev, sizeof(*ctx->dec_cd), 944 - &ctx->dec_cd_paddr, 945 - GFP_ATOMIC); 943 + ctx->dec_cd = dma_alloc_coherent(dev, sizeof(*ctx->dec_cd), 944 + &ctx->dec_cd_paddr, 945 + GFP_ATOMIC); 946 946 if (!ctx->dec_cd) { 947 947 spin_unlock(&ctx->lock); 948 948 goto out_free_enc;
+34 -34
drivers/crypto/qat/qat_common/qat_asym_algs.c
··· 332 332 } else { 333 333 int shift = ctx->p_size - req->src_len; 334 334 335 - qat_req->src_align = dma_zalloc_coherent(dev, 336 - ctx->p_size, 337 - &qat_req->in.dh.in.b, 338 - GFP_KERNEL); 335 + qat_req->src_align = dma_alloc_coherent(dev, 336 + ctx->p_size, 337 + &qat_req->in.dh.in.b, 338 + GFP_KERNEL); 339 339 if (unlikely(!qat_req->src_align)) 340 340 return ret; 341 341 ··· 360 360 goto unmap_src; 361 361 362 362 } else { 363 - qat_req->dst_align = dma_zalloc_coherent(dev, ctx->p_size, 364 - &qat_req->out.dh.r, 365 - GFP_KERNEL); 363 + qat_req->dst_align = dma_alloc_coherent(dev, ctx->p_size, 364 + &qat_req->out.dh.r, 365 + GFP_KERNEL); 366 366 if (unlikely(!qat_req->dst_align)) 367 367 goto unmap_src; 368 368 } ··· 447 447 return -EINVAL; 448 448 449 449 ctx->p_size = params->p_size; 450 - ctx->p = dma_zalloc_coherent(dev, ctx->p_size, &ctx->dma_p, GFP_KERNEL); 450 + ctx->p = dma_alloc_coherent(dev, ctx->p_size, &ctx->dma_p, GFP_KERNEL); 451 451 if (!ctx->p) 452 452 return -ENOMEM; 453 453 memcpy(ctx->p, params->p, ctx->p_size); ··· 458 458 return 0; 459 459 } 460 460 461 - ctx->g = dma_zalloc_coherent(dev, ctx->p_size, &ctx->dma_g, GFP_KERNEL); 461 + ctx->g = dma_alloc_coherent(dev, ctx->p_size, &ctx->dma_g, GFP_KERNEL); 462 462 if (!ctx->g) 463 463 return -ENOMEM; 464 464 memcpy(ctx->g + (ctx->p_size - params->g_size), params->g, ··· 503 503 if (ret < 0) 504 504 goto err_clear_ctx; 505 505 506 - ctx->xa = dma_zalloc_coherent(dev, ctx->p_size, &ctx->dma_xa, 507 - GFP_KERNEL); 506 + ctx->xa = dma_alloc_coherent(dev, ctx->p_size, &ctx->dma_xa, 507 + GFP_KERNEL); 508 508 if (!ctx->xa) { 509 509 ret = -ENOMEM; 510 510 goto err_clear_ctx; ··· 737 737 } else { 738 738 int shift = ctx->key_sz - req->src_len; 739 739 740 - qat_req->src_align = dma_zalloc_coherent(dev, ctx->key_sz, 741 - &qat_req->in.rsa.enc.m, 742 - GFP_KERNEL); 740 + qat_req->src_align = dma_alloc_coherent(dev, ctx->key_sz, 741 + &qat_req->in.rsa.enc.m, 742 + GFP_KERNEL); 743 743 if (unlikely(!qat_req->src_align)) 744 744 return ret; 745 745 ··· 756 756 goto unmap_src; 757 757 758 758 } else { 759 - qat_req->dst_align = dma_zalloc_coherent(dev, ctx->key_sz, 760 - &qat_req->out.rsa.enc.c, 761 - GFP_KERNEL); 759 + qat_req->dst_align = dma_alloc_coherent(dev, ctx->key_sz, 760 + &qat_req->out.rsa.enc.c, 761 + GFP_KERNEL); 762 762 if (unlikely(!qat_req->dst_align)) 763 763 goto unmap_src; 764 764 ··· 881 881 } else { 882 882 int shift = ctx->key_sz - req->src_len; 883 883 884 - qat_req->src_align = dma_zalloc_coherent(dev, ctx->key_sz, 885 - &qat_req->in.rsa.dec.c, 886 - GFP_KERNEL); 884 + qat_req->src_align = dma_alloc_coherent(dev, ctx->key_sz, 885 + &qat_req->in.rsa.dec.c, 886 + GFP_KERNEL); 887 887 if (unlikely(!qat_req->src_align)) 888 888 return ret; 889 889 ··· 900 900 goto unmap_src; 901 901 902 902 } else { 903 - qat_req->dst_align = dma_zalloc_coherent(dev, ctx->key_sz, 904 - &qat_req->out.rsa.dec.m, 905 - GFP_KERNEL); 903 + qat_req->dst_align = dma_alloc_coherent(dev, ctx->key_sz, 904 + &qat_req->out.rsa.dec.m, 905 + GFP_KERNEL); 906 906 if (unlikely(!qat_req->dst_align)) 907 907 goto unmap_src; 908 908 ··· 989 989 goto err; 990 990 991 991 ret = -ENOMEM; 992 - ctx->n = dma_zalloc_coherent(dev, ctx->key_sz, &ctx->dma_n, GFP_KERNEL); 992 + ctx->n = dma_alloc_coherent(dev, ctx->key_sz, &ctx->dma_n, GFP_KERNEL); 993 993 if (!ctx->n) 994 994 goto err; 995 995 ··· 1018 1018 return -EINVAL; 1019 1019 } 1020 1020 1021 - ctx->e = dma_zalloc_coherent(dev, ctx->key_sz, &ctx->dma_e, GFP_KERNEL); 1021 + ctx->e = dma_alloc_coherent(dev, ctx->key_sz, &ctx->dma_e, GFP_KERNEL); 1022 1022 if (!ctx->e) 1023 1023 return -ENOMEM; 1024 1024 ··· 1044 1044 goto err; 1045 1045 1046 1046 ret = -ENOMEM; 1047 - ctx->d = dma_zalloc_coherent(dev, ctx->key_sz, &ctx->dma_d, GFP_KERNEL); 1047 + ctx->d = dma_alloc_coherent(dev, ctx->key_sz, &ctx->dma_d, GFP_KERNEL); 1048 1048 if (!ctx->d) 1049 1049 goto err; 1050 1050 ··· 1077 1077 qat_rsa_drop_leading_zeros(&ptr, &len); 1078 1078 if (!len) 1079 1079 goto err; 1080 - ctx->p = dma_zalloc_coherent(dev, half_key_sz, &ctx->dma_p, GFP_KERNEL); 1080 + ctx->p = dma_alloc_coherent(dev, half_key_sz, &ctx->dma_p, GFP_KERNEL); 1081 1081 if (!ctx->p) 1082 1082 goto err; 1083 1083 memcpy(ctx->p + (half_key_sz - len), ptr, len); ··· 1088 1088 qat_rsa_drop_leading_zeros(&ptr, &len); 1089 1089 if (!len) 1090 1090 goto free_p; 1091 - ctx->q = dma_zalloc_coherent(dev, half_key_sz, &ctx->dma_q, GFP_KERNEL); 1091 + ctx->q = dma_alloc_coherent(dev, half_key_sz, &ctx->dma_q, GFP_KERNEL); 1092 1092 if (!ctx->q) 1093 1093 goto free_p; 1094 1094 memcpy(ctx->q + (half_key_sz - len), ptr, len); ··· 1099 1099 qat_rsa_drop_leading_zeros(&ptr, &len); 1100 1100 if (!len) 1101 1101 goto free_q; 1102 - ctx->dp = dma_zalloc_coherent(dev, half_key_sz, &ctx->dma_dp, 1103 - GFP_KERNEL); 1102 + ctx->dp = dma_alloc_coherent(dev, half_key_sz, &ctx->dma_dp, 1103 + GFP_KERNEL); 1104 1104 if (!ctx->dp) 1105 1105 goto free_q; 1106 1106 memcpy(ctx->dp + (half_key_sz - len), ptr, len); ··· 1111 1111 qat_rsa_drop_leading_zeros(&ptr, &len); 1112 1112 if (!len) 1113 1113 goto free_dp; 1114 - ctx->dq = dma_zalloc_coherent(dev, half_key_sz, &ctx->dma_dq, 1115 - GFP_KERNEL); 1114 + ctx->dq = dma_alloc_coherent(dev, half_key_sz, &ctx->dma_dq, 1115 + GFP_KERNEL); 1116 1116 if (!ctx->dq) 1117 1117 goto free_dp; 1118 1118 memcpy(ctx->dq + (half_key_sz - len), ptr, len); ··· 1123 1123 qat_rsa_drop_leading_zeros(&ptr, &len); 1124 1124 if (!len) 1125 1125 goto free_dq; 1126 - ctx->qinv = dma_zalloc_coherent(dev, half_key_sz, &ctx->dma_qinv, 1127 - GFP_KERNEL); 1126 + ctx->qinv = dma_alloc_coherent(dev, half_key_sz, &ctx->dma_qinv, 1127 + GFP_KERNEL); 1128 1128 if (!ctx->qinv) 1129 1129 goto free_dq; 1130 1130 memcpy(ctx->qinv + (half_key_sz - len), ptr, len);
+9 -17
drivers/crypto/talitos.c
··· 1361 1361 struct talitos_private *priv = dev_get_drvdata(dev); 1362 1362 bool is_sec1 = has_ftr_sec1(priv); 1363 1363 int max_len = is_sec1 ? TALITOS1_MAX_DATA_LEN : TALITOS2_MAX_DATA_LEN; 1364 - void *err; 1365 1364 1366 1365 if (cryptlen + authsize > max_len) { 1367 1366 dev_err(dev, "length exceeds h/w max limit\n"); 1368 1367 return ERR_PTR(-EINVAL); 1369 1368 } 1370 1369 1371 - if (ivsize) 1372 - iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE); 1373 - 1374 1370 if (!dst || dst == src) { 1375 1371 src_len = assoclen + cryptlen + authsize; 1376 1372 src_nents = sg_nents_for_len(src, src_len); 1377 1373 if (src_nents < 0) { 1378 1374 dev_err(dev, "Invalid number of src SG.\n"); 1379 - err = ERR_PTR(-EINVAL); 1380 - goto error_sg; 1375 + return ERR_PTR(-EINVAL); 1381 1376 } 1382 1377 src_nents = (src_nents == 1) ? 0 : src_nents; 1383 1378 dst_nents = dst ? src_nents : 0; ··· 1382 1387 src_nents = sg_nents_for_len(src, src_len); 1383 1388 if (src_nents < 0) { 1384 1389 dev_err(dev, "Invalid number of src SG.\n"); 1385 - err = ERR_PTR(-EINVAL); 1386 - goto error_sg; 1390 + return ERR_PTR(-EINVAL); 1387 1391 } 1388 1392 src_nents = (src_nents == 1) ? 0 : src_nents; 1389 1393 dst_len = assoclen + cryptlen + (encrypt ? authsize : 0); 1390 1394 dst_nents = sg_nents_for_len(dst, dst_len); 1391 1395 if (dst_nents < 0) { 1392 1396 dev_err(dev, "Invalid number of dst SG.\n"); 1393 - err = ERR_PTR(-EINVAL); 1394 - goto error_sg; 1397 + return ERR_PTR(-EINVAL); 1395 1398 } 1396 1399 dst_nents = (dst_nents == 1) ? 0 : dst_nents; 1397 1400 } ··· 1416 1423 /* if its a ahash, add space for a second desc next to the first one */ 1417 1424 if (is_sec1 && !dst) 1418 1425 alloc_len += sizeof(struct talitos_desc); 1426 + alloc_len += ivsize; 1419 1427 1420 1428 edesc = kmalloc(alloc_len, GFP_DMA | flags); 1421 - if (!edesc) { 1422 - err = ERR_PTR(-ENOMEM); 1423 - goto error_sg; 1429 + if (!edesc) 1430 + return ERR_PTR(-ENOMEM); 1431 + if (ivsize) { 1432 + iv = memcpy(((u8 *)edesc) + alloc_len - ivsize, iv, ivsize); 1433 + iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE); 1424 1434 } 1425 1435 memset(&edesc->desc, 0, sizeof(edesc->desc)); 1426 1436 ··· 1441 1445 DMA_BIDIRECTIONAL); 1442 1446 } 1443 1447 return edesc; 1444 - error_sg: 1445 - if (iv_dma) 1446 - dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE); 1447 - return err; 1448 1448 } 1449 1449 1450 1450 static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq, u8 *iv,
+4 -4
drivers/dma/imx-sdma.c
··· 1182 1182 { 1183 1183 int ret = -EBUSY; 1184 1184 1185 - sdma->bd0 = dma_zalloc_coherent(NULL, PAGE_SIZE, &sdma->bd0_phys, 1186 - GFP_NOWAIT); 1185 + sdma->bd0 = dma_alloc_coherent(NULL, PAGE_SIZE, &sdma->bd0_phys, 1186 + GFP_NOWAIT); 1187 1187 if (!sdma->bd0) { 1188 1188 ret = -ENOMEM; 1189 1189 goto out; ··· 1205 1205 u32 bd_size = desc->num_bd * sizeof(struct sdma_buffer_descriptor); 1206 1206 int ret = 0; 1207 1207 1208 - desc->bd = dma_zalloc_coherent(NULL, bd_size, &desc->bd_phys, 1209 - GFP_NOWAIT); 1208 + desc->bd = dma_alloc_coherent(NULL, bd_size, &desc->bd_phys, 1209 + GFP_NOWAIT); 1210 1210 if (!desc->bd) { 1211 1211 ret = -ENOMEM; 1212 1212 goto out;
+2 -2
drivers/dma/mediatek/mtk-hsdma.c
··· 325 325 * and [MTK_DMA_SIZE ... 2 * MTK_DMA_SIZE - 1] is for RX ring. 326 326 */ 327 327 pc->sz_ring = 2 * MTK_DMA_SIZE * sizeof(*ring->txd); 328 - ring->txd = dma_zalloc_coherent(hsdma2dev(hsdma), pc->sz_ring, 329 - &ring->tphys, GFP_NOWAIT); 328 + ring->txd = dma_alloc_coherent(hsdma2dev(hsdma), pc->sz_ring, 329 + &ring->tphys, GFP_NOWAIT); 330 330 if (!ring->txd) 331 331 return -ENOMEM; 332 332
+3 -3
drivers/dma/mxs-dma.c
··· 416 416 struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; 417 417 int ret; 418 418 419 - mxs_chan->ccw = dma_zalloc_coherent(mxs_dma->dma_device.dev, 420 - CCW_BLOCK_SIZE, 421 - &mxs_chan->ccw_phys, GFP_KERNEL); 419 + mxs_chan->ccw = dma_alloc_coherent(mxs_dma->dma_device.dev, 420 + CCW_BLOCK_SIZE, 421 + &mxs_chan->ccw_phys, GFP_KERNEL); 422 422 if (!mxs_chan->ccw) { 423 423 ret = -ENOMEM; 424 424 goto err_alloc;
+2 -2
drivers/dma/xgene-dma.c
··· 1208 1208 ring->size = ret; 1209 1209 1210 1210 /* Allocate memory for DMA ring descriptor */ 1211 - ring->desc_vaddr = dma_zalloc_coherent(chan->dev, ring->size, 1212 - &ring->desc_paddr, GFP_KERNEL); 1211 + ring->desc_vaddr = dma_alloc_coherent(chan->dev, ring->size, 1212 + &ring->desc_paddr, GFP_KERNEL); 1213 1213 if (!ring->desc_vaddr) { 1214 1214 chan_err(chan, "Failed to allocate ring desc\n"); 1215 1215 return -ENOMEM;
+7 -7
drivers/dma/xilinx/xilinx_dma.c
··· 879 879 */ 880 880 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { 881 881 /* Allocate the buffer descriptors. */ 882 - chan->seg_v = dma_zalloc_coherent(chan->dev, 883 - sizeof(*chan->seg_v) * 884 - XILINX_DMA_NUM_DESCS, 885 - &chan->seg_p, GFP_KERNEL); 882 + chan->seg_v = dma_alloc_coherent(chan->dev, 883 + sizeof(*chan->seg_v) * XILINX_DMA_NUM_DESCS, 884 + &chan->seg_p, GFP_KERNEL); 886 885 if (!chan->seg_v) { 887 886 dev_err(chan->dev, 888 887 "unable to allocate channel %d descriptors\n", ··· 894 895 * so allocating a desc segment during channel allocation for 895 896 * programming tail descriptor. 896 897 */ 897 - chan->cyclic_seg_v = dma_zalloc_coherent(chan->dev, 898 - sizeof(*chan->cyclic_seg_v), 899 - &chan->cyclic_seg_p, GFP_KERNEL); 898 + chan->cyclic_seg_v = dma_alloc_coherent(chan->dev, 899 + sizeof(*chan->cyclic_seg_v), 900 + &chan->cyclic_seg_p, 901 + GFP_KERNEL); 900 902 if (!chan->cyclic_seg_v) { 901 903 dev_err(chan->dev, 902 904 "unable to allocate desc segment for cyclic DMA\n");
+3 -3
drivers/dma/xilinx/zynqmp_dma.c
··· 490 490 list_add_tail(&desc->node, &chan->free_list); 491 491 } 492 492 493 - chan->desc_pool_v = dma_zalloc_coherent(chan->dev, 494 - (2 * chan->desc_size * ZYNQMP_DMA_NUM_DESCS), 495 - &chan->desc_pool_p, GFP_KERNEL); 493 + chan->desc_pool_v = dma_alloc_coherent(chan->dev, 494 + (2 * chan->desc_size * ZYNQMP_DMA_NUM_DESCS), 495 + &chan->desc_pool_p, GFP_KERNEL); 496 496 if (!chan->desc_pool_v) 497 497 return -ENOMEM; 498 498
+1 -1
drivers/gpio/gpio-pca953x.c
··· 289 289 return pca953x_check_register(chip, reg, bank); 290 290 } 291 291 292 - const struct regmap_config pca953x_i2c_regmap = { 292 + static const struct regmap_config pca953x_i2c_regmap = { 293 293 .reg_bits = 8, 294 294 .val_bits = 8, 295 295
+1 -6
drivers/gpio/gpiolib-acpi.c
··· 357 357 mutex_unlock(&acpi_gpio_deferred_req_irqs_lock); 358 358 359 359 list_for_each_entry_safe_reverse(event, ep, &acpi_gpio->events, node) { 360 - struct gpio_desc *desc; 361 - 362 360 if (event->irq_requested) { 363 361 if (event->irq_is_wake) 364 362 disable_irq_wake(event->irq); ··· 364 366 free_irq(event->irq, event); 365 367 } 366 368 367 - desc = event->desc; 368 - if (WARN_ON(IS_ERR(desc))) 369 - continue; 370 369 gpiochip_unlock_as_irq(chip, event->pin); 371 - gpiochip_free_own_desc(desc); 370 + gpiochip_free_own_desc(event->desc); 372 371 list_del(&event->node); 373 372 kfree(event); 374 373 }
+6 -6
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
··· 1701 1701 amdgpu_xgmi_add_device(adev); 1702 1702 amdgpu_amdkfd_device_init(adev); 1703 1703 1704 - if (amdgpu_sriov_vf(adev)) 1704 + if (amdgpu_sriov_vf(adev)) { 1705 + amdgpu_virt_init_data_exchange(adev); 1705 1706 amdgpu_virt_release_full_gpu(adev, true); 1707 + } 1706 1708 1707 1709 return 0; 1708 1710 } ··· 2634 2632 goto failed; 2635 2633 } 2636 2634 2637 - if (amdgpu_sriov_vf(adev)) 2638 - amdgpu_virt_init_data_exchange(adev); 2639 - 2640 2635 amdgpu_fbdev_init(adev); 2641 2636 2642 2637 r = amdgpu_pm_sysfs_init(adev); ··· 2797 2798 struct drm_framebuffer *fb = crtc->primary->fb; 2798 2799 struct amdgpu_bo *robj; 2799 2800 2800 - if (amdgpu_crtc->cursor_bo) { 2801 + if (amdgpu_crtc->cursor_bo && !adev->enable_virtual_display) { 2801 2802 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo); 2802 2803 r = amdgpu_bo_reserve(aobj, true); 2803 2804 if (r == 0) { ··· 2905 2906 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 2906 2907 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2907 2908 2908 - if (amdgpu_crtc->cursor_bo) { 2909 + if (amdgpu_crtc->cursor_bo && !adev->enable_virtual_display) { 2909 2910 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo); 2910 2911 r = amdgpu_bo_reserve(aobj, true); 2911 2912 if (r == 0) { ··· 3225 3226 r = amdgpu_ib_ring_tests(adev); 3226 3227 3227 3228 error: 3229 + amdgpu_virt_init_data_exchange(adev); 3228 3230 amdgpu_virt_release_full_gpu(adev, true); 3229 3231 if (!r && adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) { 3230 3232 atomic_inc(&adev->vram_lost_counter);
+12 -8
drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
··· 188 188 goto cleanup; 189 189 } 190 190 191 - r = amdgpu_bo_pin(new_abo, amdgpu_display_supported_domains(adev)); 192 - if (unlikely(r != 0)) { 193 - DRM_ERROR("failed to pin new abo buffer before flip\n"); 194 - goto unreserve; 191 + if (!adev->enable_virtual_display) { 192 + r = amdgpu_bo_pin(new_abo, amdgpu_display_supported_domains(adev)); 193 + if (unlikely(r != 0)) { 194 + DRM_ERROR("failed to pin new abo buffer before flip\n"); 195 + goto unreserve; 196 + } 195 197 } 196 198 197 199 r = amdgpu_ttm_alloc_gart(&new_abo->tbo); ··· 213 211 amdgpu_bo_get_tiling_flags(new_abo, &tiling_flags); 214 212 amdgpu_bo_unreserve(new_abo); 215 213 216 - work->base = amdgpu_bo_gpu_offset(new_abo); 214 + if (!adev->enable_virtual_display) 215 + work->base = amdgpu_bo_gpu_offset(new_abo); 217 216 work->target_vblank = target - (uint32_t)drm_crtc_vblank_count(crtc) + 218 217 amdgpu_get_vblank_counter_kms(dev, work->crtc_id); 219 218 ··· 245 242 goto cleanup; 246 243 } 247 244 unpin: 248 - if (unlikely(amdgpu_bo_unpin(new_abo) != 0)) { 249 - DRM_ERROR("failed to unpin new abo in error path\n"); 250 - } 245 + if (!adev->enable_virtual_display) 246 + if (unlikely(amdgpu_bo_unpin(new_abo) != 0)) 247 + DRM_ERROR("failed to unpin new abo in error path\n"); 248 + 251 249 unreserve: 252 250 amdgpu_bo_unreserve(new_abo); 253 251
+14 -8
drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
··· 2008 2008 2009 2009 int amdgpu_pm_sysfs_init(struct amdgpu_device *adev) 2010 2010 { 2011 + struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle; 2011 2012 int ret; 2012 2013 2013 2014 if (adev->pm.sysfs_initialized) ··· 2092 2091 "pp_power_profile_mode\n"); 2093 2092 return ret; 2094 2093 } 2095 - ret = device_create_file(adev->dev, 2096 - &dev_attr_pp_od_clk_voltage); 2097 - if (ret) { 2098 - DRM_ERROR("failed to create device file " 2099 - "pp_od_clk_voltage\n"); 2100 - return ret; 2094 + if (hwmgr->od_enabled) { 2095 + ret = device_create_file(adev->dev, 2096 + &dev_attr_pp_od_clk_voltage); 2097 + if (ret) { 2098 + DRM_ERROR("failed to create device file " 2099 + "pp_od_clk_voltage\n"); 2100 + return ret; 2101 + } 2101 2102 } 2102 2103 ret = device_create_file(adev->dev, 2103 2104 &dev_attr_gpu_busy_percent); ··· 2121 2118 2122 2119 void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev) 2123 2120 { 2121 + struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle; 2122 + 2124 2123 if (adev->pm.dpm_enabled == 0) 2125 2124 return; 2126 2125 ··· 2143 2138 device_remove_file(adev->dev, &dev_attr_pp_mclk_od); 2144 2139 device_remove_file(adev->dev, 2145 2140 &dev_attr_pp_power_profile_mode); 2146 - device_remove_file(adev->dev, 2147 - &dev_attr_pp_od_clk_voltage); 2141 + if (hwmgr->od_enabled) 2142 + device_remove_file(adev->dev, 2143 + &dev_attr_pp_od_clk_voltage); 2148 2144 device_remove_file(adev->dev, &dev_attr_gpu_busy_percent); 2149 2145 } 2150 2146
-3
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
··· 847 847 bp->size = amdgpu_vm_bo_size(adev, level); 848 848 bp->byte_align = AMDGPU_GPU_PAGE_SIZE; 849 849 bp->domain = AMDGPU_GEM_DOMAIN_VRAM; 850 - if (bp->size <= PAGE_SIZE && adev->asic_type >= CHIP_VEGA10 && 851 - adev->flags & AMD_IS_APU) 852 - bp->domain |= AMDGPU_GEM_DOMAIN_GTT; 853 850 bp->domain = amdgpu_bo_get_preferred_pin_domain(adev, bp->domain); 854 851 bp->flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS | 855 852 AMDGPU_GEM_CREATE_CPU_GTT_USWC;
+3 -14
drivers/gpu/drm/amd/amdgpu/dce_virtual.c
··· 167 167 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 168 168 169 169 dce_virtual_crtc_dpms(crtc, DRM_MODE_DPMS_OFF); 170 - if (crtc->primary->fb) { 171 - int r; 172 - struct amdgpu_bo *abo; 173 - 174 - abo = gem_to_amdgpu_bo(crtc->primary->fb->obj[0]); 175 - r = amdgpu_bo_reserve(abo, true); 176 - if (unlikely(r)) 177 - DRM_ERROR("failed to reserve abo before unpin\n"); 178 - else { 179 - amdgpu_bo_unpin(abo); 180 - amdgpu_bo_unreserve(abo); 181 - } 182 - } 183 170 184 171 amdgpu_crtc->pll_id = ATOM_PPLL_INVALID; 185 172 amdgpu_crtc->encoder = NULL; ··· 679 692 spin_unlock_irqrestore(&adev->ddev->event_lock, flags); 680 693 681 694 drm_crtc_vblank_put(&amdgpu_crtc->base); 682 - schedule_work(&works->unpin_work); 695 + amdgpu_bo_unref(&works->old_abo); 696 + kfree(works->shared); 697 + kfree(works); 683 698 684 699 return 0; 685 700 }
+35 -15
drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
··· 4233 4233 u32 tmp; 4234 4234 u32 rb_bufsz; 4235 4235 u64 rb_addr, rptr_addr, wptr_gpu_addr; 4236 - int r; 4237 4236 4238 4237 /* Set the write pointer delay */ 4239 4238 WREG32(mmCP_RB_WPTR_DELAY, 0); ··· 4277 4278 amdgpu_ring_clear_ring(ring); 4278 4279 gfx_v8_0_cp_gfx_start(adev); 4279 4280 ring->sched.ready = true; 4280 - r = amdgpu_ring_test_helper(ring); 4281 4281 4282 - return r; 4282 + return 0; 4283 4283 } 4284 4284 4285 4285 static void gfx_v8_0_cp_compute_enable(struct amdgpu_device *adev, bool enable) ··· 4367 4369 amdgpu_ring_write(kiq_ring, upper_32_bits(wptr_addr)); 4368 4370 } 4369 4371 4370 - r = amdgpu_ring_test_helper(kiq_ring); 4371 - if (r) 4372 - DRM_ERROR("KCQ enable failed\n"); 4373 - return r; 4372 + amdgpu_ring_commit(kiq_ring); 4373 + 4374 + return 0; 4374 4375 } 4375 4376 4376 4377 static int gfx_v8_0_deactivate_hqd(struct amdgpu_device *adev, u32 req) ··· 4706 4709 if (r) 4707 4710 goto done; 4708 4711 4709 - /* Test KCQs - reversing the order of rings seems to fix ring test failure 4710 - * after GPU reset 4711 - */ 4712 - for (i = adev->gfx.num_compute_rings - 1; i >= 0; i--) { 4713 - ring = &adev->gfx.compute_ring[i]; 4714 - r = amdgpu_ring_test_helper(ring); 4715 - } 4716 - 4717 4712 done: 4718 4713 return r; 4714 + } 4715 + 4716 + static int gfx_v8_0_cp_test_all_rings(struct amdgpu_device *adev) 4717 + { 4718 + int r, i; 4719 + struct amdgpu_ring *ring; 4720 + 4721 + /* collect all the ring_tests here, gfx, kiq, compute */ 4722 + ring = &adev->gfx.gfx_ring[0]; 4723 + r = amdgpu_ring_test_helper(ring); 4724 + if (r) 4725 + return r; 4726 + 4727 + ring = &adev->gfx.kiq.ring; 4728 + r = amdgpu_ring_test_helper(ring); 4729 + if (r) 4730 + return r; 4731 + 4732 + for (i = 0; i < adev->gfx.num_compute_rings; i++) { 4733 + ring = &adev->gfx.compute_ring[i]; 4734 + amdgpu_ring_test_helper(ring); 4735 + } 4736 + 4737 + return 0; 4719 4738 } 4720 4739 4721 4740 static int gfx_v8_0_cp_resume(struct amdgpu_device *adev) ··· 4752 4739 r = gfx_v8_0_kcq_resume(adev); 4753 4740 if (r) 4754 4741 return r; 4742 + 4743 + r = gfx_v8_0_cp_test_all_rings(adev); 4744 + if (r) 4745 + return r; 4746 + 4755 4747 gfx_v8_0_enable_gui_idle_interrupt(adev, true); 4756 4748 4757 4749 return 0; ··· 5103 5085 if (REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CP) || 5104 5086 REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_GFX)) 5105 5087 gfx_v8_0_cp_gfx_resume(adev); 5088 + 5089 + gfx_v8_0_cp_test_all_rings(adev); 5106 5090 5107 5091 adev->gfx.rlc.funcs->start(adev); 5108 5092
+9 -5
drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
··· 113 113 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x4a2c0e68), 114 114 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0xb5d3f197), 115 115 SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_CACHE_INVALIDATION, 0x3fff3af3, 0x19200000), 116 - SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_GS_MAX_WAVE_ID, 0x00000fff, 0x000003ff) 116 + SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_GS_MAX_WAVE_ID, 0x00000fff, 0x000003ff), 117 + SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC1_F32_INT_DIS, 0x00000000, 0x00000800), 118 + SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC2_F32_INT_DIS, 0x00000000, 0x00000800), 119 + SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_DEBUG, 0x00000000, 0x00008000) 117 120 }; 118 121 119 122 static const struct soc15_reg_golden golden_settings_gc_9_0_vg10[] = ··· 138 135 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_UTCL1_CNTL2, 0x00030000, 0x00020000), 139 136 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1, 0x0000000f, 0x01000107), 140 137 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x00001800, 0x00000800), 141 - SOC15_REG_GOLDEN_VALUE(GC, 0, mmWD_UTCL1_CNTL, 0x08000000, 0x08000080), 142 - SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC1_F32_INT_DIS, 0x00000000, 0x00000800), 143 - SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC2_F32_INT_DIS, 0x00000000, 0x00000800), 144 - SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_DEBUG, 0x00000000, 0x00008000) 138 + SOC15_REG_GOLDEN_VALUE(GC, 0, mmWD_UTCL1_CNTL, 0x08000000, 0x08000080) 145 139 }; 146 140 147 141 static const struct soc15_reg_golden golden_settings_gc_9_0_vg20[] = ··· 3587 3587 { 3588 3588 uint32_t data, def; 3589 3589 3590 + amdgpu_gfx_rlc_enter_safe_mode(adev); 3591 + 3590 3592 /* It is disabled by HW by default */ 3591 3593 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) { 3592 3594 /* 1 - RLC_CGTT_MGCG_OVERRIDE */ ··· 3653 3651 WREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL, data); 3654 3652 } 3655 3653 } 3654 + 3655 + amdgpu_gfx_rlc_exit_safe_mode(adev); 3656 3656 } 3657 3657 3658 3658 static void gfx_v9_0_update_3d_clock_gating(struct amdgpu_device *adev,
+1 -1
drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
··· 174 174 return r; 175 175 } 176 176 /* Retrieve checksum from mailbox2 */ 177 - if (req == IDH_REQ_GPU_INIT_ACCESS) { 177 + if (req == IDH_REQ_GPU_INIT_ACCESS || req == IDH_REQ_GPU_RESET_ACCESS) { 178 178 adev->virt.fw_reserve.checksum_key = 179 179 RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, 180 180 mmBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW2));
+2 -1
drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
··· 78 78 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000), 79 79 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff, 0x000003c0), 80 80 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_WATERMK, 0xfc000000, 0x00000000), 81 - SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CHICKEN_BITS, 0xfe931f07, 0x02831f07), 82 81 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CLK_CTRL, 0xffffffff, 0x3f000100), 83 82 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GFX_IB_CNTL, 0x800f0100, 0x00000100), 84 83 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GFX_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000), ··· 95 96 static const struct soc15_reg_golden golden_settings_sdma_vg10[] = { 96 97 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG, 0x0018773f, 0x00104002), 97 98 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104002), 99 + SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CHICKEN_BITS, 0xfe931f07, 0x02831d07), 98 100 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG, 0x0018773f, 0x00104002), 99 101 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104002) 100 102 }; ··· 103 103 static const struct soc15_reg_golden golden_settings_sdma_vg12[] = { 104 104 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG, 0x0018773f, 0x00104001), 105 105 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104001), 106 + SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CHICKEN_BITS, 0xfe931f07, 0x02831d07), 106 107 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG, 0x0018773f, 0x00104001), 107 108 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104001) 108 109 };
+2 -2
drivers/gpu/drm/amd/amdkfd/Kconfig
··· 4 4 5 5 config HSA_AMD 6 6 bool "HSA kernel driver for AMD GPU devices" 7 - depends on DRM_AMDGPU && X86_64 8 - imply AMD_IOMMU_V2 7 + depends on DRM_AMDGPU && (X86_64 || ARM64) 8 + imply AMD_IOMMU_V2 if X86_64 9 9 select MMU_NOTIFIER 10 10 help 11 11 Enable this if you want to use HSA features on AMD GPU devices.
+8
drivers/gpu/drm/amd/amdkfd/kfd_crat.c
··· 863 863 return 0; 864 864 } 865 865 866 + #if CONFIG_X86_64 866 867 static int kfd_fill_iolink_info_for_cpu(int numa_node_id, int *avail_size, 867 868 uint32_t *num_entries, 868 869 struct crat_subtype_iolink *sub_type_hdr) ··· 906 905 907 906 return 0; 908 907 } 908 + #endif 909 909 910 910 /* kfd_create_vcrat_image_cpu - Create Virtual CRAT for CPU 911 911 * ··· 922 920 struct crat_subtype_generic *sub_type_hdr; 923 921 int avail_size = *size; 924 922 int numa_node_id; 923 + #ifdef CONFIG_X86_64 925 924 uint32_t entries = 0; 925 + #endif 926 926 int ret = 0; 927 927 928 928 if (!pcrat_image || avail_size < VCRAT_SIZE_FOR_CPU) ··· 986 982 sub_type_hdr->length); 987 983 988 984 /* Fill in Subtype: IO Link */ 985 + #ifdef CONFIG_X86_64 989 986 ret = kfd_fill_iolink_info_for_cpu(numa_node_id, &avail_size, 990 987 &entries, 991 988 (struct crat_subtype_iolink *)sub_type_hdr); ··· 997 992 998 993 sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr + 999 994 sub_type_hdr->length * entries); 995 + #else 996 + pr_info("IO link not available for non x86 platforms\n"); 997 + #endif 1000 998 1001 999 crat_table->num_domains++; 1002 1000 }
+14 -7
drivers/gpu/drm/amd/amdkfd/kfd_topology.c
··· 1093 1093 * the GPU device is not already present in the topology device 1094 1094 * list then return NULL. This means a new topology device has to 1095 1095 * be created for this GPU. 1096 - * TODO: Rather than assiging @gpu to first topology device withtout 1097 - * gpu attached, it will better to have more stringent check. 1098 1096 */ 1099 1097 static struct kfd_topology_device *kfd_assign_gpu(struct kfd_dev *gpu) 1100 1098 { ··· 1100 1102 struct kfd_topology_device *out_dev = NULL; 1101 1103 1102 1104 down_write(&topology_lock); 1103 - list_for_each_entry(dev, &topology_device_list, list) 1105 + list_for_each_entry(dev, &topology_device_list, list) { 1106 + /* Discrete GPUs need their own topology device list 1107 + * entries. Don't assign them to CPU/APU nodes. 1108 + */ 1109 + if (!gpu->device_info->needs_iommu_device && 1110 + dev->node_props.cpu_cores_count) 1111 + continue; 1112 + 1104 1113 if (!dev->gpu && (dev->node_props.simd_count > 0)) { 1105 1114 dev->gpu = gpu; 1106 1115 out_dev = dev; 1107 1116 break; 1108 1117 } 1118 + } 1109 1119 up_write(&topology_lock); 1110 1120 return out_dev; 1111 1121 } ··· 1398 1392 1399 1393 static int kfd_cpumask_to_apic_id(const struct cpumask *cpumask) 1400 1394 { 1401 - const struct cpuinfo_x86 *cpuinfo; 1402 1395 int first_cpu_of_numa_node; 1403 1396 1404 1397 if (!cpumask || cpumask == cpu_none_mask) ··· 1405 1400 first_cpu_of_numa_node = cpumask_first(cpumask); 1406 1401 if (first_cpu_of_numa_node >= nr_cpu_ids) 1407 1402 return -1; 1408 - cpuinfo = &cpu_data(first_cpu_of_numa_node); 1409 - 1410 - return cpuinfo->apicid; 1403 + #ifdef CONFIG_X86_64 1404 + return cpu_data(first_cpu_of_numa_node).apicid; 1405 + #else 1406 + return first_cpu_of_numa_node; 1407 + #endif 1411 1408 } 1412 1409 1413 1410 /* kfd_numa_node_to_apic_id - Returns the APIC ID of the first logical processor
+27 -14
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
··· 699 699 { 700 700 struct amdgpu_dm_connector *aconnector; 701 701 struct drm_connector *connector; 702 + struct drm_dp_mst_topology_mgr *mgr; 703 + int ret; 704 + bool need_hotplug = false; 702 705 703 706 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); 704 707 705 - list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 706 - aconnector = to_amdgpu_dm_connector(connector); 707 - if (aconnector->dc_link->type == dc_connection_mst_branch && 708 - !aconnector->mst_port) { 708 + list_for_each_entry(connector, &dev->mode_config.connector_list, 709 + head) { 710 + aconnector = to_amdgpu_dm_connector(connector); 711 + if (aconnector->dc_link->type != dc_connection_mst_branch || 712 + aconnector->mst_port) 713 + continue; 709 714 710 - if (suspend) 711 - drm_dp_mst_topology_mgr_suspend(&aconnector->mst_mgr); 712 - else 713 - drm_dp_mst_topology_mgr_resume(&aconnector->mst_mgr); 714 - } 715 + mgr = &aconnector->mst_mgr; 716 + 717 + if (suspend) { 718 + drm_dp_mst_topology_mgr_suspend(mgr); 719 + } else { 720 + ret = drm_dp_mst_topology_mgr_resume(mgr); 721 + if (ret < 0) { 722 + drm_dp_mst_topology_mgr_set_mst(mgr, false); 723 + need_hotplug = true; 724 + } 725 + } 715 726 } 716 727 717 728 drm_modeset_unlock(&dev->mode_config.connection_mutex); 729 + 730 + if (need_hotplug) 731 + drm_kms_helper_hotplug_event(dev); 718 732 } 719 733 720 734 /** ··· 912 898 struct drm_plane_state *new_plane_state; 913 899 struct dm_plane_state *dm_new_plane_state; 914 900 enum dc_connection_type new_connection_type = dc_connection_none; 915 - int ret; 916 901 int i; 917 902 918 903 /* power on hardware */ ··· 984 971 } 985 972 } 986 973 987 - ret = drm_atomic_helper_resume(ddev, dm->cached_state); 974 + drm_atomic_helper_resume(ddev, dm->cached_state); 988 975 989 976 dm->cached_state = NULL; 990 977 991 978 amdgpu_dm_irq_resume_late(adev); 992 979 993 - return ret; 980 + return 0; 994 981 } 995 982 996 983 /** ··· 1772 1759 + caps.min_input_signal * 0x101; 1773 1760 1774 1761 if (dc_link_set_backlight_level(dm->backlight_link, 1775 - brightness, 0, 0)) 1762 + brightness, 0)) 1776 1763 return 0; 1777 1764 else 1778 1765 return 1; ··· 5933 5920 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 5934 5921 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) && 5935 5922 !new_crtc_state->color_mgmt_changed && 5936 - !new_crtc_state->vrr_enabled) 5923 + old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled) 5937 5924 continue; 5938 5925 5939 5926 if (!new_crtc_state->enable)
+1 -11
drivers/gpu/drm/amd/display/dc/core/dc_link.c
··· 2190 2190 2191 2191 bool dc_link_set_backlight_level(const struct dc_link *link, 2192 2192 uint32_t backlight_pwm_u16_16, 2193 - uint32_t frame_ramp, 2194 - const struct dc_stream_state *stream) 2193 + uint32_t frame_ramp) 2195 2194 { 2196 2195 struct dc *core_dc = link->ctx->dc; 2197 2196 struct abm *abm = core_dc->res_pool->abm; ··· 2204 2205 (abm == NULL) || 2205 2206 (abm->funcs->set_backlight_level_pwm == NULL)) 2206 2207 return false; 2207 - 2208 - if (stream) 2209 - ((struct dc_stream_state *)stream)->bl_pwm_level = 2210 - backlight_pwm_u16_16; 2211 2208 2212 2209 use_smooth_brightness = dmcu->funcs->is_dmcu_initialized(dmcu); 2213 2210 ··· 2632 2637 2633 2638 if (dc_is_dp_signal(pipe_ctx->stream->signal)) 2634 2639 enable_stream_features(pipe_ctx); 2635 - 2636 - dc_link_set_backlight_level(pipe_ctx->stream->sink->link, 2637 - pipe_ctx->stream->bl_pwm_level, 2638 - 0, 2639 - pipe_ctx->stream); 2640 2640 } 2641 2641 2642 2642 }
+1 -2
drivers/gpu/drm/amd/display/dc/dc_link.h
··· 146 146 */ 147 147 bool dc_link_set_backlight_level(const struct dc_link *dc_link, 148 148 uint32_t backlight_pwm_u16_16, 149 - uint32_t frame_ramp, 150 - const struct dc_stream_state *stream); 149 + uint32_t frame_ramp); 151 150 152 151 int dc_link_get_backlight_level(const struct dc_link *dc_link); 153 152
-1
drivers/gpu/drm/amd/display/dc/dc_stream.h
··· 91 91 92 92 /* DMCU info */ 93 93 unsigned int abm_level; 94 - unsigned int bl_pwm_level; 95 94 96 95 /* from core_stream struct */ 97 96 struct dc_context *ctx;
+6 -1
drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
··· 1000 1000 1001 1001 pipe_ctx->stream_res.audio->funcs->az_enable(pipe_ctx->stream_res.audio); 1002 1002 1003 - if (num_audio == 1 && pp_smu != NULL && pp_smu->set_pme_wa_enable != NULL) 1003 + if (num_audio >= 1 && pp_smu != NULL && pp_smu->set_pme_wa_enable != NULL) 1004 1004 /*this is the first audio. apply the PME w/a in order to wake AZ from D3*/ 1005 1005 pp_smu->set_pme_wa_enable(&pp_smu->pp_smu); 1006 1006 /* un-mute audio */ ··· 1017 1017 pipe_ctx->stream_res.stream_enc->funcs->audio_mute_control( 1018 1018 pipe_ctx->stream_res.stream_enc, true); 1019 1019 if (pipe_ctx->stream_res.audio) { 1020 + struct pp_smu_funcs_rv *pp_smu = dc->res_pool->pp_smu; 1021 + 1020 1022 if (option != KEEP_ACQUIRED_RESOURCE || 1021 1023 !dc->debug.az_endpoint_mute_only) { 1022 1024 /*only disalbe az_endpoint if power down or free*/ ··· 1038 1036 update_audio_usage(&dc->current_state->res_ctx, dc->res_pool, pipe_ctx->stream_res.audio, false); 1039 1037 pipe_ctx->stream_res.audio = NULL; 1040 1038 } 1039 + if (pp_smu != NULL && pp_smu->set_pme_wa_enable != NULL) 1040 + /*this is the first audio. apply the PME w/a in order to wake AZ from D3*/ 1041 + pp_smu->set_pme_wa_enable(&pp_smu->pp_smu); 1041 1042 1042 1043 /* TODO: notify audio driver for if audio modes list changed 1043 1044 * add audio mode list change flag */
+1 -1
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
··· 463 463 if (src_y_offset >= (int)param->viewport.height) 464 464 cur_en = 0; /* not visible beyond bottom edge*/ 465 465 466 - if (src_y_offset < 0) 466 + if (src_y_offset + (int)height <= 0) 467 467 cur_en = 0; /* not visible beyond top edge*/ 468 468 469 469 REG_UPDATE(CURSOR0_CONTROL,
+1 -1
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
··· 1140 1140 if (src_y_offset >= (int)param->viewport.height) 1141 1141 cur_en = 0; /* not visible beyond bottom edge*/ 1142 1142 1143 - if (src_y_offset < 0) //+ (int)hubp->curs_attr.height 1143 + if (src_y_offset + (int)hubp->curs_attr.height <= 0) 1144 1144 cur_en = 0; /* not visible beyond top edge*/ 1145 1145 1146 1146 if (cur_en && REG_READ(CURSOR_SURFACE_ADDRESS) == 0)
+6 -13
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
··· 2355 2355 top_pipe_to_program->plane_state->update_flags.bits.full_update) 2356 2356 for (i = 0; i < dc->res_pool->pipe_count; i++) { 2357 2357 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; 2358 - 2358 + tg = pipe_ctx->stream_res.tg; 2359 2359 /* Skip inactive pipes and ones already updated */ 2360 2360 if (!pipe_ctx->stream || pipe_ctx->stream == stream 2361 - || !pipe_ctx->plane_state) 2361 + || !pipe_ctx->plane_state 2362 + || !tg->funcs->is_tg_enabled(tg)) 2362 2363 continue; 2363 2364 2364 - pipe_ctx->stream_res.tg->funcs->lock(pipe_ctx->stream_res.tg); 2365 + tg->funcs->lock(tg); 2365 2366 2366 2367 pipe_ctx->plane_res.hubp->funcs->hubp_setup_interdependent( 2367 2368 pipe_ctx->plane_res.hubp, 2368 2369 &pipe_ctx->dlg_regs, 2369 2370 &pipe_ctx->ttu_regs); 2371 + 2372 + tg->funcs->unlock(tg); 2370 2373 } 2371 - 2372 - for (i = 0; i < dc->res_pool->pipe_count; i++) { 2373 - struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; 2374 - 2375 - if (!pipe_ctx->stream || pipe_ctx->stream == stream 2376 - || !pipe_ctx->plane_state) 2377 - continue; 2378 - 2379 - dcn10_pipe_control_lock(dc, pipe_ctx, false); 2380 - } 2381 2374 2382 2375 if (num_planes == 0) 2383 2376 false_optc_underflow_wa(dc, stream, tg);
+2
drivers/gpu/drm/amd/display/modules/power/power_helpers.c
··· 57 57 #define NUM_POWER_FN_SEGS 8 58 58 #define NUM_BL_CURVE_SEGS 16 59 59 60 + #pragma pack(push, 1) 60 61 /* NOTE: iRAM is 256B in size */ 61 62 struct iram_table_v_2 { 62 63 /* flags */ ··· 101 100 uint8_t dummy8; /* 0xfe */ 102 101 uint8_t dummy9; /* 0xff */ 103 102 }; 103 + #pragma pack(pop) 104 104 105 105 static uint16_t backlight_8_to_16(unsigned int backlight_8bit) 106 106 {
+7 -6
drivers/gpu/drm/amd/include/kgd_pp_interface.h
··· 127 127 }; 128 128 129 129 enum PP_SMC_POWER_PROFILE { 130 - PP_SMC_POWER_PROFILE_FULLSCREEN3D = 0x0, 131 - PP_SMC_POWER_PROFILE_POWERSAVING = 0x1, 132 - PP_SMC_POWER_PROFILE_VIDEO = 0x2, 133 - PP_SMC_POWER_PROFILE_VR = 0x3, 134 - PP_SMC_POWER_PROFILE_COMPUTE = 0x4, 135 - PP_SMC_POWER_PROFILE_CUSTOM = 0x5, 130 + PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT = 0x0, 131 + PP_SMC_POWER_PROFILE_FULLSCREEN3D = 0x1, 132 + PP_SMC_POWER_PROFILE_POWERSAVING = 0x2, 133 + PP_SMC_POWER_PROFILE_VIDEO = 0x3, 134 + PP_SMC_POWER_PROFILE_VR = 0x4, 135 + PP_SMC_POWER_PROFILE_COMPUTE = 0x5, 136 + PP_SMC_POWER_PROFILE_CUSTOM = 0x6, 136 137 }; 137 138 138 139 enum {
+12 -10
drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
··· 64 64 65 65 static void hwmgr_init_workload_prority(struct pp_hwmgr *hwmgr) 66 66 { 67 - hwmgr->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D] = 2; 68 - hwmgr->workload_prority[PP_SMC_POWER_PROFILE_POWERSAVING] = 0; 69 - hwmgr->workload_prority[PP_SMC_POWER_PROFILE_VIDEO] = 1; 70 - hwmgr->workload_prority[PP_SMC_POWER_PROFILE_VR] = 3; 71 - hwmgr->workload_prority[PP_SMC_POWER_PROFILE_COMPUTE] = 4; 67 + hwmgr->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT] = 0; 68 + hwmgr->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D] = 1; 69 + hwmgr->workload_prority[PP_SMC_POWER_PROFILE_POWERSAVING] = 2; 70 + hwmgr->workload_prority[PP_SMC_POWER_PROFILE_VIDEO] = 3; 71 + hwmgr->workload_prority[PP_SMC_POWER_PROFILE_VR] = 4; 72 + hwmgr->workload_prority[PP_SMC_POWER_PROFILE_COMPUTE] = 5; 72 73 73 - hwmgr->workload_setting[0] = PP_SMC_POWER_PROFILE_POWERSAVING; 74 - hwmgr->workload_setting[1] = PP_SMC_POWER_PROFILE_VIDEO; 75 - hwmgr->workload_setting[2] = PP_SMC_POWER_PROFILE_FULLSCREEN3D; 76 - hwmgr->workload_setting[3] = PP_SMC_POWER_PROFILE_VR; 77 - hwmgr->workload_setting[4] = PP_SMC_POWER_PROFILE_COMPUTE; 74 + hwmgr->workload_setting[0] = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT; 75 + hwmgr->workload_setting[1] = PP_SMC_POWER_PROFILE_FULLSCREEN3D; 76 + hwmgr->workload_setting[2] = PP_SMC_POWER_PROFILE_POWERSAVING; 77 + hwmgr->workload_setting[3] = PP_SMC_POWER_PROFILE_VIDEO; 78 + hwmgr->workload_setting[4] = PP_SMC_POWER_PROFILE_VR; 79 + hwmgr->workload_setting[5] = PP_SMC_POWER_PROFILE_COMPUTE; 78 80 } 79 81 80 82 int hwmgr_early_init(struct pp_hwmgr *hwmgr)
+5 -3
drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
··· 77 77 #define PCIE_BUS_CLK 10000 78 78 #define TCLK (PCIE_BUS_CLK / 10) 79 79 80 - static const struct profile_mode_setting smu7_profiling[6] = 81 - {{1, 0, 100, 30, 1, 0, 100, 10}, 80 + static const struct profile_mode_setting smu7_profiling[7] = 81 + {{0, 0, 0, 0, 0, 0, 0, 0}, 82 + {1, 0, 100, 30, 1, 0, 100, 10}, 82 83 {1, 10, 0, 30, 0, 0, 0, 0}, 83 84 {0, 0, 0, 0, 1, 10, 16, 31}, 84 85 {1, 0, 11, 50, 1, 0, 100, 10}, ··· 4890 4889 uint32_t i, size = 0; 4891 4890 uint32_t len; 4892 4891 4893 - static const char *profile_name[6] = {"3D_FULL_SCREEN", 4892 + static const char *profile_name[7] = {"BOOTUP_DEFAULT", 4893 + "3D_FULL_SCREEN", 4894 4894 "POWER_SAVING", 4895 4895 "VIDEO", 4896 4896 "VR",
+7 -5
drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
··· 804 804 805 805 hwmgr->backend = data; 806 806 807 - hwmgr->workload_mask = 1 << hwmgr->workload_prority[PP_SMC_POWER_PROFILE_VIDEO]; 808 - hwmgr->power_profile_mode = PP_SMC_POWER_PROFILE_VIDEO; 809 - hwmgr->default_power_profile_mode = PP_SMC_POWER_PROFILE_VIDEO; 807 + hwmgr->workload_mask = 1 << hwmgr->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT]; 808 + hwmgr->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT; 809 + hwmgr->default_power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT; 810 810 811 811 vega10_set_default_registry_data(hwmgr); 812 812 data->disable_dpm_mask = 0xff; ··· 4668 4668 { 4669 4669 struct vega10_hwmgr *data = hwmgr->backend; 4670 4670 uint32_t i, size = 0; 4671 - static const uint8_t profile_mode_setting[5][4] = {{70, 60, 1, 3,}, 4671 + static const uint8_t profile_mode_setting[6][4] = {{70, 60, 0, 0,}, 4672 + {70, 60, 1, 3,}, 4672 4673 {90, 60, 0, 0,}, 4673 4674 {70, 60, 0, 0,}, 4674 4675 {70, 90, 0, 0,}, 4675 4676 {30, 60, 0, 6,}, 4676 4677 }; 4677 - static const char *profile_name[6] = {"3D_FULL_SCREEN", 4678 + static const char *profile_name[7] = {"BOOTUP_DEFAULT", 4679 + "3D_FULL_SCREEN", 4678 4680 "POWER_SAVING", 4679 4681 "VIDEO", 4680 4682 "VR",
+21
drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
··· 753 753 return 0; 754 754 } 755 755 756 + static int vega12_run_acg_btc(struct pp_hwmgr *hwmgr) 757 + { 758 + uint32_t result; 759 + 760 + PP_ASSERT_WITH_CODE( 761 + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunAcgBtc) == 0, 762 + "[Run_ACG_BTC] Attempt to run ACG BTC failed!", 763 + return -EINVAL); 764 + 765 + result = smum_get_argument(hwmgr); 766 + PP_ASSERT_WITH_CODE(result == 1, 767 + "Failed to run ACG BTC!", return -EINVAL); 768 + 769 + return 0; 770 + } 771 + 756 772 static int vega12_set_allowed_featuresmask(struct pp_hwmgr *hwmgr) 757 773 { 758 774 struct vega12_hwmgr *data = ··· 945 929 tmp_result = vega12_init_smc_table(hwmgr); 946 930 PP_ASSERT_WITH_CODE(!tmp_result, 947 931 "Failed to initialize SMC table!", 932 + result = tmp_result); 933 + 934 + tmp_result = vega12_run_acg_btc(hwmgr); 935 + PP_ASSERT_WITH_CODE(!tmp_result, 936 + "Failed to run ACG BTC!", 948 937 result = tmp_result); 949 938 950 939 result = vega12_enable_all_smu_features(hwmgr);
+24 -10
drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
··· 390 390 391 391 hwmgr->backend = data; 392 392 393 - hwmgr->workload_mask = 1 << hwmgr->workload_prority[PP_SMC_POWER_PROFILE_VIDEO]; 394 - hwmgr->power_profile_mode = PP_SMC_POWER_PROFILE_VIDEO; 395 - hwmgr->default_power_profile_mode = PP_SMC_POWER_PROFILE_VIDEO; 393 + hwmgr->workload_mask = 1 << hwmgr->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT]; 394 + hwmgr->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT; 395 + hwmgr->default_power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT; 396 396 397 397 vega20_set_default_registry_data(hwmgr); 398 398 ··· 979 979 if (pptable_information->od_feature_capabilities[ATOM_VEGA20_ODFEATURE_FAN_ZERO_RPM_CONTROL] && 980 980 pp_table->FanZeroRpmEnable) 981 981 od_settings->overdrive8_capabilities |= OD8_FAN_ZERO_RPM_CONTROL; 982 + 983 + if (!od_settings->overdrive8_capabilities) 984 + hwmgr->od_enabled = false; 982 985 983 986 return 0; 984 987 } ··· 1692 1689 (PPCLK_UCLK << 16) | (min_freq & 0xffff))), 1693 1690 "Failed to set soft min memclk !", 1694 1691 return ret); 1695 - 1696 - min_freq = data->dpm_table.mem_table.dpm_state.hard_min_level; 1697 - PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( 1698 - hwmgr, PPSMC_MSG_SetHardMinByFreq, 1699 - (PPCLK_UCLK << 16) | (min_freq & 0xffff))), 1700 - "Failed to set hard min memclk !", 1701 - return ret); 1702 1692 } 1703 1693 1704 1694 if (data->smu_features[GNLD_DPM_UVD].enabled && ··· 2244 2248 soft_min_level = mask ? (ffs(mask) - 1) : 0; 2245 2249 soft_max_level = mask ? (fls(mask) - 1) : 0; 2246 2250 2251 + if (soft_max_level >= data->dpm_table.gfx_table.count) { 2252 + pr_err("Clock level specified %d is over max allowed %d\n", 2253 + soft_max_level, 2254 + data->dpm_table.gfx_table.count - 1); 2255 + return -EINVAL; 2256 + } 2257 + 2247 2258 data->dpm_table.gfx_table.dpm_state.soft_min_level = 2248 2259 data->dpm_table.gfx_table.dpm_levels[soft_min_level].value; 2249 2260 data->dpm_table.gfx_table.dpm_state.soft_max_level = ··· 2270 2267 case PP_MCLK: 2271 2268 soft_min_level = mask ? (ffs(mask) - 1) : 0; 2272 2269 soft_max_level = mask ? (fls(mask) - 1) : 0; 2270 + 2271 + if (soft_max_level >= data->dpm_table.mem_table.count) { 2272 + pr_err("Clock level specified %d is over max allowed %d\n", 2273 + soft_max_level, 2274 + data->dpm_table.mem_table.count - 1); 2275 + return -EINVAL; 2276 + } 2273 2277 2274 2278 data->dpm_table.mem_table.dpm_state.soft_min_level = 2275 2279 data->dpm_table.mem_table.dpm_levels[soft_min_level].value; ··· 3271 3261 int pplib_workload = 0; 3272 3262 3273 3263 switch (power_profile) { 3264 + case PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT: 3265 + pplib_workload = WORKLOAD_DEFAULT_BIT; 3266 + break; 3274 3267 case PP_SMC_POWER_PROFILE_FULLSCREEN3D: 3275 3268 pplib_workload = WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT; 3276 3269 break; ··· 3303 3290 uint32_t i, size = 0; 3304 3291 uint16_t workload_type = 0; 3305 3292 static const char *profile_name[] = { 3293 + "BOOTUP_DEFAULT", 3306 3294 "3D_FULL_SCREEN", 3307 3295 "POWER_SAVING", 3308 3296 "VIDEO",
+1 -1
drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
··· 705 705 /** 706 706 * The main hardware manager structure. 707 707 */ 708 - #define Workload_Policy_Max 5 708 + #define Workload_Policy_Max 6 709 709 710 710 struct pp_hwmgr { 711 711 void *adev;
+38 -10
drivers/gpu/drm/bridge/tc358767.c
··· 98 98 #define DP0_STARTVAL 0x064c 99 99 #define DP0_ACTIVEVAL 0x0650 100 100 #define DP0_SYNCVAL 0x0654 101 + #define SYNCVAL_HS_POL_ACTIVE_LOW (1 << 15) 102 + #define SYNCVAL_VS_POL_ACTIVE_LOW (1 << 31) 101 103 #define DP0_MISC 0x0658 102 104 #define TU_SIZE_RECOMMENDED (63) /* LSCLK cycles per TU */ 103 105 #define BPC_6 (0 << 5) ··· 144 142 #define DP0_LTLOOPCTRL 0x06d8 145 143 #define DP0_SNKLTCTRL 0x06e4 146 144 145 + #define DP1_SRCCTRL 0x07a0 146 + 147 147 /* PHY */ 148 148 #define DP_PHY_CTRL 0x0800 149 149 #define DP_PHY_RST BIT(28) /* DP PHY Global Soft Reset */ ··· 154 150 #define PHY_M1_RST BIT(12) /* Reset PHY1 Main Channel */ 155 151 #define PHY_RDY BIT(16) /* PHY Main Channels Ready */ 156 152 #define PHY_M0_RST BIT(8) /* Reset PHY0 Main Channel */ 153 + #define PHY_2LANE BIT(2) /* PHY Enable 2 lanes */ 157 154 #define PHY_A0_EN BIT(1) /* PHY Aux Channel0 Enable */ 158 155 #define PHY_M0_EN BIT(0) /* PHY Main Channel0 Enable */ 159 156 ··· 545 540 unsigned long rate; 546 541 u32 value; 547 542 int ret; 543 + u32 dp_phy_ctrl; 548 544 549 545 rate = clk_get_rate(tc->refclk); 550 546 switch (rate) { ··· 570 564 value |= SYSCLK_SEL_LSCLK | LSCLK_DIV_2; 571 565 tc_write(SYS_PLLPARAM, value); 572 566 573 - tc_write(DP_PHY_CTRL, BGREN | PWR_SW_EN | BIT(2) | PHY_A0_EN); 567 + dp_phy_ctrl = BGREN | PWR_SW_EN | PHY_A0_EN; 568 + if (tc->link.base.num_lanes == 2) 569 + dp_phy_ctrl |= PHY_2LANE; 570 + tc_write(DP_PHY_CTRL, dp_phy_ctrl); 574 571 575 572 /* 576 573 * Initially PLLs are in bypass. Force PLL parameter update, ··· 728 719 729 720 tc_write(DP0_ACTIVEVAL, (mode->vdisplay << 16) | (mode->hdisplay)); 730 721 731 - tc_write(DP0_SYNCVAL, (vsync_len << 16) | (hsync_len << 0)); 722 + tc_write(DP0_SYNCVAL, (vsync_len << 16) | (hsync_len << 0) | 723 + ((mode->flags & DRM_MODE_FLAG_NHSYNC) ? SYNCVAL_HS_POL_ACTIVE_LOW : 0) | 724 + ((mode->flags & DRM_MODE_FLAG_NVSYNC) ? SYNCVAL_VS_POL_ACTIVE_LOW : 0)); 732 725 733 726 tc_write(DPIPXLFMT, VS_POL_ACTIVE_LOW | HS_POL_ACTIVE_LOW | 734 727 DE_POL_ACTIVE_HIGH | SUB_CFG_TYPE_CONFIG1 | DPI_BPP_RGB888); ··· 840 829 if (!tc->mode) 841 830 return -EINVAL; 842 831 843 - /* from excel file - DP0_SrcCtrl */ 844 - tc_write(DP0_SRCCTRL, DP0_SRCCTRL_SCRMBLDIS | DP0_SRCCTRL_EN810B | 845 - DP0_SRCCTRL_LANESKEW | DP0_SRCCTRL_LANES_2 | 846 - DP0_SRCCTRL_BW27 | DP0_SRCCTRL_AUTOCORRECT); 847 - /* from excel file - DP1_SrcCtrl */ 848 - tc_write(0x07a0, 0x00003083); 832 + tc_write(DP0_SRCCTRL, tc_srcctrl(tc)); 833 + /* SSCG and BW27 on DP1 must be set to the same as on DP0 */ 834 + tc_write(DP1_SRCCTRL, 835 + (tc->link.spread ? DP0_SRCCTRL_SSCG : 0) | 836 + ((tc->link.base.rate != 162000) ? DP0_SRCCTRL_BW27 : 0)); 849 837 850 838 rate = clk_get_rate(tc->refclk); 851 839 switch (rate) { ··· 865 855 } 866 856 value |= SYSCLK_SEL_LSCLK | LSCLK_DIV_2; 867 857 tc_write(SYS_PLLPARAM, value); 858 + 868 859 /* Setup Main Link */ 869 - dp_phy_ctrl = BGREN | PWR_SW_EN | BIT(2) | PHY_A0_EN | PHY_M0_EN; 860 + dp_phy_ctrl = BGREN | PWR_SW_EN | PHY_A0_EN | PHY_M0_EN; 861 + if (tc->link.base.num_lanes == 2) 862 + dp_phy_ctrl |= PHY_2LANE; 870 863 tc_write(DP_PHY_CTRL, dp_phy_ctrl); 871 864 msleep(100); 872 865 ··· 1118 1105 static enum drm_mode_status tc_connector_mode_valid(struct drm_connector *connector, 1119 1106 struct drm_display_mode *mode) 1120 1107 { 1108 + struct tc_data *tc = connector_to_tc(connector); 1109 + u32 req, avail; 1110 + u32 bits_per_pixel = 24; 1111 + 1121 1112 /* DPI interface clock limitation: upto 154 MHz */ 1122 1113 if (mode->clock > 154000) 1123 1114 return MODE_CLOCK_HIGH; 1115 + 1116 + req = mode->clock * bits_per_pixel / 8; 1117 + avail = tc->link.base.num_lanes * tc->link.base.rate; 1118 + 1119 + if (req > avail) 1120 + return MODE_BAD; 1124 1121 1125 1122 return MODE_OK; 1126 1123 } ··· 1209 1186 /* Create eDP connector */ 1210 1187 drm_connector_helper_add(&tc->connector, &tc_connector_helper_funcs); 1211 1188 ret = drm_connector_init(drm, &tc->connector, &tc_connector_funcs, 1212 - DRM_MODE_CONNECTOR_eDP); 1189 + tc->panel ? DRM_MODE_CONNECTOR_eDP : 1190 + DRM_MODE_CONNECTOR_DisplayPort); 1213 1191 if (ret) 1214 1192 return ret; 1215 1193 ··· 1219 1195 1220 1196 drm_display_info_set_bus_formats(&tc->connector.display_info, 1221 1197 &bus_format, 1); 1198 + tc->connector.display_info.bus_flags = 1199 + DRM_BUS_FLAG_DE_HIGH | 1200 + DRM_BUS_FLAG_PIXDATA_NEGEDGE | 1201 + DRM_BUS_FLAG_SYNC_NEGEDGE; 1222 1202 drm_connector_attach_encoder(&tc->connector, tc->bridge.encoder); 1223 1203 1224 1204 return 0;
+1 -2
drivers/gpu/drm/drm_atomic_uapi.c
··· 1296 1296 (arg->flags & DRM_MODE_PAGE_FLIP_EVENT)) 1297 1297 return -EINVAL; 1298 1298 1299 - drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE); 1300 - 1301 1299 state = drm_atomic_state_alloc(dev); 1302 1300 if (!state) 1303 1301 return -ENOMEM; 1304 1302 1303 + drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE); 1305 1304 state->acquire_ctx = &ctx; 1306 1305 state->allow_modeset = !!(arg->flags & DRM_MODE_ATOMIC_ALLOW_MODESET); 1307 1306
+2
drivers/gpu/drm/drm_dp_helper.c
··· 1273 1273 { OUI(0x00, 0x22, 0xb9), DEVICE_ID_ANY, true, BIT(DP_DPCD_QUIRK_CONSTANT_N) }, 1274 1274 /* LG LP140WF6-SPM1 eDP panel */ 1275 1275 { OUI(0x00, 0x22, 0xb9), DEVICE_ID('s', 'i', 'v', 'a', 'r', 'T'), false, BIT(DP_DPCD_QUIRK_CONSTANT_N) }, 1276 + /* Apple panels need some additional handling to support PSR */ 1277 + { OUI(0x00, 0x10, 0xfa), DEVICE_ID_ANY, false, BIT(DP_DPCD_QUIRK_NO_PSR) } 1276 1278 }; 1277 1279 1278 1280 #undef OUI
+79 -54
drivers/gpu/drm/drm_fb_helper.c
··· 1621 1621 var_1->transp.msb_right == var_2->transp.msb_right; 1622 1622 } 1623 1623 1624 + static void drm_fb_helper_fill_pixel_fmt(struct fb_var_screeninfo *var, 1625 + u8 depth) 1626 + { 1627 + switch (depth) { 1628 + case 8: 1629 + var->red.offset = 0; 1630 + var->green.offset = 0; 1631 + var->blue.offset = 0; 1632 + var->red.length = 8; /* 8bit DAC */ 1633 + var->green.length = 8; 1634 + var->blue.length = 8; 1635 + var->transp.offset = 0; 1636 + var->transp.length = 0; 1637 + break; 1638 + case 15: 1639 + var->red.offset = 10; 1640 + var->green.offset = 5; 1641 + var->blue.offset = 0; 1642 + var->red.length = 5; 1643 + var->green.length = 5; 1644 + var->blue.length = 5; 1645 + var->transp.offset = 15; 1646 + var->transp.length = 1; 1647 + break; 1648 + case 16: 1649 + var->red.offset = 11; 1650 + var->green.offset = 5; 1651 + var->blue.offset = 0; 1652 + var->red.length = 5; 1653 + var->green.length = 6; 1654 + var->blue.length = 5; 1655 + var->transp.offset = 0; 1656 + break; 1657 + case 24: 1658 + var->red.offset = 16; 1659 + var->green.offset = 8; 1660 + var->blue.offset = 0; 1661 + var->red.length = 8; 1662 + var->green.length = 8; 1663 + var->blue.length = 8; 1664 + var->transp.offset = 0; 1665 + var->transp.length = 0; 1666 + break; 1667 + case 32: 1668 + var->red.offset = 16; 1669 + var->green.offset = 8; 1670 + var->blue.offset = 0; 1671 + var->red.length = 8; 1672 + var->green.length = 8; 1673 + var->blue.length = 8; 1674 + var->transp.offset = 24; 1675 + var->transp.length = 8; 1676 + break; 1677 + default: 1678 + break; 1679 + } 1680 + } 1681 + 1624 1682 /** 1625 1683 * drm_fb_helper_check_var - implementation for &fb_ops.fb_check_var 1626 1684 * @var: screeninfo to check ··· 1690 1632 struct drm_fb_helper *fb_helper = info->par; 1691 1633 struct drm_framebuffer *fb = fb_helper->fb; 1692 1634 1693 - if (var->pixclock != 0 || in_dbg_master()) 1635 + if (in_dbg_master()) 1694 1636 return -EINVAL; 1637 + 1638 + if (var->pixclock != 0) { 1639 + DRM_DEBUG("fbdev emulation doesn't support changing the pixel clock, value of pixclock is ignored\n"); 1640 + var->pixclock = 0; 1641 + } 1695 1642 1696 1643 if ((drm_format_info_block_width(fb->format, 0) > 1) || 1697 1644 (drm_format_info_block_height(fb->format, 0) > 1)) ··· 1715 1652 var->xres_virtual, var->yres_virtual, 1716 1653 fb->width, fb->height, fb->format->cpp[0] * 8); 1717 1654 return -EINVAL; 1655 + } 1656 + 1657 + /* 1658 + * Workaround for SDL 1.2, which is known to be setting all pixel format 1659 + * fields values to zero in some cases. We treat this situation as a 1660 + * kind of "use some reasonable autodetected values". 1661 + */ 1662 + if (!var->red.offset && !var->green.offset && 1663 + !var->blue.offset && !var->transp.offset && 1664 + !var->red.length && !var->green.length && 1665 + !var->blue.length && !var->transp.length && 1666 + !var->red.msb_right && !var->green.msb_right && 1667 + !var->blue.msb_right && !var->transp.msb_right) { 1668 + drm_fb_helper_fill_pixel_fmt(var, fb->format->depth); 1718 1669 } 1719 1670 1720 1671 /* ··· 2044 1967 info->var.yoffset = 0; 2045 1968 info->var.activate = FB_ACTIVATE_NOW; 2046 1969 2047 - switch (fb->format->depth) { 2048 - case 8: 2049 - info->var.red.offset = 0; 2050 - info->var.green.offset = 0; 2051 - info->var.blue.offset = 0; 2052 - info->var.red.length = 8; /* 8bit DAC */ 2053 - info->var.green.length = 8; 2054 - info->var.blue.length = 8; 2055 - info->var.transp.offset = 0; 2056 - info->var.transp.length = 0; 2057 - break; 2058 - case 15: 2059 - info->var.red.offset = 10; 2060 - info->var.green.offset = 5; 2061 - info->var.blue.offset = 0; 2062 - info->var.red.length = 5; 2063 - info->var.green.length = 5; 2064 - info->var.blue.length = 5; 2065 - info->var.transp.offset = 15; 2066 - info->var.transp.length = 1; 2067 - break; 2068 - case 16: 2069 - info->var.red.offset = 11; 2070 - info->var.green.offset = 5; 2071 - info->var.blue.offset = 0; 2072 - info->var.red.length = 5; 2073 - info->var.green.length = 6; 2074 - info->var.blue.length = 5; 2075 - info->var.transp.offset = 0; 2076 - break; 2077 - case 24: 2078 - info->var.red.offset = 16; 2079 - info->var.green.offset = 8; 2080 - info->var.blue.offset = 0; 2081 - info->var.red.length = 8; 2082 - info->var.green.length = 8; 2083 - info->var.blue.length = 8; 2084 - info->var.transp.offset = 0; 2085 - info->var.transp.length = 0; 2086 - break; 2087 - case 32: 2088 - info->var.red.offset = 16; 2089 - info->var.green.offset = 8; 2090 - info->var.blue.offset = 0; 2091 - info->var.red.length = 8; 2092 - info->var.green.length = 8; 2093 - info->var.blue.length = 8; 2094 - info->var.transp.offset = 24; 2095 - info->var.transp.length = 8; 2096 - break; 2097 - default: 2098 - break; 2099 - } 1970 + drm_fb_helper_fill_pixel_fmt(&info->var, fb->format->depth); 2100 1971 2101 1972 info->var.xres = fb_width; 2102 1973 info->var.yres = fb_height;
+2 -2
drivers/gpu/drm/drm_mode_object.c
··· 459 459 struct drm_modeset_acquire_ctx ctx; 460 460 int ret; 461 461 462 - drm_modeset_acquire_init(&ctx, 0); 463 - 464 462 state = drm_atomic_state_alloc(dev); 465 463 if (!state) 466 464 return -ENOMEM; 465 + 466 + drm_modeset_acquire_init(&ctx, 0); 467 467 state->acquire_ctx = &ctx; 468 468 retry: 469 469 if (prop == state->dev->mode_config.dpms_property) {
+3 -2
drivers/gpu/drm/drm_pci.c
··· 61 61 return NULL; 62 62 63 63 dmah->size = size; 64 - dmah->vaddr = dma_zalloc_coherent(&dev->pdev->dev, size, &dmah->busaddr, 65 - GFP_KERNEL | __GFP_COMP); 64 + dmah->vaddr = dma_alloc_coherent(&dev->pdev->dev, size, 65 + &dmah->busaddr, 66 + GFP_KERNEL | __GFP_COMP); 66 67 67 68 if (dmah->vaddr == NULL) { 68 69 kfree(dmah);
+1
drivers/gpu/drm/i915/gvt/handlers.c
··· 2799 2799 MMIO_DFH(_MMIO(0xe2a0), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); 2800 2800 MMIO_DFH(_MMIO(0xe2b0), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); 2801 2801 MMIO_DFH(_MMIO(0xe2c0), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); 2802 + MMIO_DFH(_MMIO(0x21f0), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); 2802 2803 return 0; 2803 2804 } 2804 2805
+1 -1
drivers/gpu/drm/i915/gvt/hypercall.h
··· 41 41 int (*host_init)(struct device *dev, void *gvt, const void *ops); 42 42 void (*host_exit)(struct device *dev, void *gvt); 43 43 int (*attach_vgpu)(void *vgpu, unsigned long *handle); 44 - void (*detach_vgpu)(unsigned long handle); 44 + void (*detach_vgpu)(void *vgpu); 45 45 int (*inject_msi)(unsigned long handle, u32 addr, u16 data); 46 46 unsigned long (*from_virt_to_mfn)(void *p); 47 47 int (*enable_page_track)(unsigned long handle, u64 gfn);
+26 -4
drivers/gpu/drm/i915/gvt/kvmgt.c
··· 996 996 { 997 997 unsigned int index; 998 998 u64 virtaddr; 999 - unsigned long req_size, pgoff = 0; 999 + unsigned long req_size, pgoff, req_start; 1000 1000 pgprot_t pg_prot; 1001 1001 struct intel_vgpu *vgpu = mdev_get_drvdata(mdev); 1002 1002 ··· 1014 1014 pg_prot = vma->vm_page_prot; 1015 1015 virtaddr = vma->vm_start; 1016 1016 req_size = vma->vm_end - vma->vm_start; 1017 - pgoff = vgpu_aperture_pa_base(vgpu) >> PAGE_SHIFT; 1017 + pgoff = vma->vm_pgoff & 1018 + ((1U << (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT)) - 1); 1019 + req_start = pgoff << PAGE_SHIFT; 1020 + 1021 + if (!intel_vgpu_in_aperture(vgpu, req_start)) 1022 + return -EINVAL; 1023 + if (req_start + req_size > 1024 + vgpu_aperture_offset(vgpu) + vgpu_aperture_sz(vgpu)) 1025 + return -EINVAL; 1026 + 1027 + pgoff = (gvt_aperture_pa_base(vgpu->gvt) >> PAGE_SHIFT) + pgoff; 1018 1028 1019 1029 return remap_pfn_range(vma, virtaddr, pgoff, req_size, pg_prot); 1020 1030 } ··· 1672 1662 return 0; 1673 1663 } 1674 1664 1675 - static void kvmgt_detach_vgpu(unsigned long handle) 1665 + static void kvmgt_detach_vgpu(void *p_vgpu) 1676 1666 { 1677 - /* nothing to do here */ 1667 + int i; 1668 + struct intel_vgpu *vgpu = (struct intel_vgpu *)p_vgpu; 1669 + 1670 + if (!vgpu->vdev.region) 1671 + return; 1672 + 1673 + for (i = 0; i < vgpu->vdev.num_regions; i++) 1674 + if (vgpu->vdev.region[i].ops->release) 1675 + vgpu->vdev.region[i].ops->release(vgpu, 1676 + &vgpu->vdev.region[i]); 1677 + vgpu->vdev.num_regions = 0; 1678 + kfree(vgpu->vdev.region); 1679 + vgpu->vdev.region = NULL; 1678 1680 } 1679 1681 1680 1682 static int kvmgt_inject_msi(unsigned long handle, u32 addr, u16 data)
+1 -1
drivers/gpu/drm/i915/gvt/mpt.h
··· 101 101 if (!intel_gvt_host.mpt->detach_vgpu) 102 102 return; 103 103 104 - intel_gvt_host.mpt->detach_vgpu(vgpu->handle); 104 + intel_gvt_host.mpt->detach_vgpu(vgpu); 105 105 } 106 106 107 107 #define MSI_CAP_CONTROL(offset) (offset + 2)
+42 -22
drivers/gpu/drm/i915/gvt/scheduler.c
··· 356 356 return 0; 357 357 } 358 358 359 + static int 360 + intel_gvt_workload_req_alloc(struct intel_vgpu_workload *workload) 361 + { 362 + struct intel_vgpu *vgpu = workload->vgpu; 363 + struct intel_vgpu_submission *s = &vgpu->submission; 364 + struct i915_gem_context *shadow_ctx = s->shadow_ctx; 365 + struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; 366 + struct intel_engine_cs *engine = dev_priv->engine[workload->ring_id]; 367 + struct i915_request *rq; 368 + int ret = 0; 369 + 370 + lockdep_assert_held(&dev_priv->drm.struct_mutex); 371 + 372 + if (workload->req) 373 + goto out; 374 + 375 + rq = i915_request_alloc(engine, shadow_ctx); 376 + if (IS_ERR(rq)) { 377 + gvt_vgpu_err("fail to allocate gem request\n"); 378 + ret = PTR_ERR(rq); 379 + goto out; 380 + } 381 + workload->req = i915_request_get(rq); 382 + out: 383 + return ret; 384 + } 385 + 359 386 /** 360 387 * intel_gvt_scan_and_shadow_workload - audit the workload by scanning and 361 388 * shadow it as well, include ringbuffer,wa_ctx and ctx. ··· 399 372 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; 400 373 struct intel_engine_cs *engine = dev_priv->engine[workload->ring_id]; 401 374 struct intel_context *ce; 402 - struct i915_request *rq; 403 375 int ret; 404 376 405 377 lockdep_assert_held(&dev_priv->drm.struct_mutex); 406 378 407 - if (workload->req) 379 + if (workload->shadow) 408 380 return 0; 409 381 410 382 ret = set_context_ppgtt_from_shadow(workload, shadow_ctx); ··· 443 417 goto err_shadow; 444 418 } 445 419 446 - rq = i915_request_alloc(engine, shadow_ctx); 447 - if (IS_ERR(rq)) { 448 - gvt_vgpu_err("fail to allocate gem request\n"); 449 - ret = PTR_ERR(rq); 450 - goto err_shadow; 451 - } 452 - workload->req = i915_request_get(rq); 453 - 454 - ret = populate_shadow_context(workload); 455 - if (ret) 456 - goto err_req; 457 - 420 + workload->shadow = true; 458 421 return 0; 459 - err_req: 460 - rq = fetch_and_zero(&workload->req); 461 - i915_request_put(rq); 462 422 err_shadow: 463 423 release_shadow_wa_ctx(&workload->wa_ctx); 464 424 err_unpin: ··· 683 671 mutex_lock(&vgpu->vgpu_lock); 684 672 mutex_lock(&dev_priv->drm.struct_mutex); 685 673 674 + ret = intel_gvt_workload_req_alloc(workload); 675 + if (ret) 676 + goto err_req; 677 + 686 678 ret = intel_gvt_scan_and_shadow_workload(workload); 687 679 if (ret) 688 680 goto out; 689 681 682 + ret = populate_shadow_context(workload); 683 + if (ret) { 684 + release_shadow_wa_ctx(&workload->wa_ctx); 685 + goto out; 686 + } 687 + 690 688 ret = prepare_workload(workload); 691 - 692 689 out: 693 - if (ret) 694 - workload->status = ret; 695 - 696 690 if (!IS_ERR_OR_NULL(workload->req)) { 697 691 gvt_dbg_sched("ring id %d submit workload to i915 %p\n", 698 692 ring_id, workload->req); 699 693 i915_request_add(workload->req); 700 694 workload->dispatched = true; 701 695 } 702 - 696 + err_req: 697 + if (ret) 698 + workload->status = ret; 703 699 mutex_unlock(&dev_priv->drm.struct_mutex); 704 700 mutex_unlock(&vgpu->vgpu_lock); 705 701 return ret;
+1
drivers/gpu/drm/i915/gvt/scheduler.h
··· 83 83 struct i915_request *req; 84 84 /* if this workload has been dispatched to i915? */ 85 85 bool dispatched; 86 + bool shadow; /* if workload has done shadow of guest request */ 86 87 int status; 87 88 88 89 struct intel_vgpu_mm *shadow_mm;
+9 -3
drivers/gpu/drm/i915/i915_debugfs.c
··· 984 984 intel_runtime_pm_get(i915); 985 985 gpu = i915_capture_gpu_state(i915); 986 986 intel_runtime_pm_put(i915); 987 - if (!gpu) 988 - return -ENOMEM; 987 + if (IS_ERR(gpu)) 988 + return PTR_ERR(gpu); 989 989 990 990 file->private_data = gpu; 991 991 return 0; ··· 1018 1018 1019 1019 static int i915_error_state_open(struct inode *inode, struct file *file) 1020 1020 { 1021 - file->private_data = i915_first_error_state(inode->i_private); 1021 + struct i915_gpu_state *error; 1022 + 1023 + error = i915_first_error_state(inode->i_private); 1024 + if (IS_ERR(error)) 1025 + return PTR_ERR(error); 1026 + 1027 + file->private_data = error; 1022 1028 return 0; 1023 1029 } 1024 1030
+12 -3
drivers/gpu/drm/i915/i915_gem_gtt.c
··· 2075 2075 int gen6_ppgtt_pin(struct i915_hw_ppgtt *base) 2076 2076 { 2077 2077 struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(base); 2078 + int err; 2078 2079 2079 2080 /* 2080 2081 * Workaround the limited maximum vma->pin_count and the aliasing_ppgtt ··· 2091 2090 * allocator works in address space sizes, so it's multiplied by page 2092 2091 * size. We allocate at the top of the GTT to avoid fragmentation. 2093 2092 */ 2094 - return i915_vma_pin(ppgtt->vma, 2095 - 0, GEN6_PD_ALIGN, 2096 - PIN_GLOBAL | PIN_HIGH); 2093 + err = i915_vma_pin(ppgtt->vma, 2094 + 0, GEN6_PD_ALIGN, 2095 + PIN_GLOBAL | PIN_HIGH); 2096 + if (err) 2097 + goto unpin; 2098 + 2099 + return 0; 2100 + 2101 + unpin: 2102 + ppgtt->pin_count = 0; 2103 + return err; 2097 2104 } 2098 2105 2099 2106 void gen6_ppgtt_unpin(struct i915_hw_ppgtt *base)
+14 -9
drivers/gpu/drm/i915/i915_gpu_error.c
··· 1907 1907 { 1908 1908 struct i915_gpu_state *error; 1909 1909 1910 + /* Check if GPU capture has been disabled */ 1911 + error = READ_ONCE(i915->gpu_error.first_error); 1912 + if (IS_ERR(error)) 1913 + return error; 1914 + 1910 1915 error = kzalloc(sizeof(*error), GFP_ATOMIC); 1911 - if (!error) 1912 - return NULL; 1916 + if (!error) { 1917 + i915_disable_error_state(i915, -ENOMEM); 1918 + return ERR_PTR(-ENOMEM); 1919 + } 1913 1920 1914 1921 kref_init(&error->ref); 1915 1922 error->i915 = i915; ··· 1952 1945 return; 1953 1946 1954 1947 error = i915_capture_gpu_state(i915); 1955 - if (!error) { 1956 - DRM_DEBUG_DRIVER("out of memory, not capturing error state\n"); 1957 - i915_disable_error_state(i915, -ENOMEM); 1948 + if (IS_ERR(error)) 1958 1949 return; 1959 - } 1960 1950 1961 1951 i915_error_capture_msg(i915, error, engine_mask, error_msg); 1962 1952 DRM_INFO("%s\n", error->error_msg); ··· 1991 1987 1992 1988 spin_lock_irq(&i915->gpu_error.lock); 1993 1989 error = i915->gpu_error.first_error; 1994 - if (error) 1990 + if (!IS_ERR_OR_NULL(error)) 1995 1991 i915_gpu_state_get(error); 1996 1992 spin_unlock_irq(&i915->gpu_error.lock); 1997 1993 ··· 2004 2000 2005 2001 spin_lock_irq(&i915->gpu_error.lock); 2006 2002 error = i915->gpu_error.first_error; 2007 - i915->gpu_error.first_error = NULL; 2003 + if (error != ERR_PTR(-ENODEV)) /* if disabled, always disabled */ 2004 + i915->gpu_error.first_error = NULL; 2008 2005 spin_unlock_irq(&i915->gpu_error.lock); 2009 2006 2010 - if (!IS_ERR(error)) 2007 + if (!IS_ERR_OR_NULL(error)) 2011 2008 i915_gpu_state_put(error); 2012 2009 } 2013 2010
+3 -1
drivers/gpu/drm/i915/i915_sysfs.c
··· 521 521 ssize_t ret; 522 522 523 523 gpu = i915_first_error_state(i915); 524 - if (gpu) { 524 + if (IS_ERR(gpu)) { 525 + ret = PTR_ERR(gpu); 526 + } else if (gpu) { 525 527 ret = i915_gpu_state_copy_to_buffer(gpu, buf, off, count); 526 528 i915_gpu_state_put(gpu); 527 529 } else {
+2 -1
drivers/gpu/drm/i915/intel_lrc.c
··· 2244 2244 if (ret) 2245 2245 return ret; 2246 2246 2247 + intel_engine_init_workarounds(engine); 2248 + 2247 2249 if (HAS_LOGICAL_RING_ELSQ(i915)) { 2248 2250 execlists->submit_reg = i915->regs + 2249 2251 i915_mmio_reg_offset(RING_EXECLIST_SQ_CONTENTS(engine)); ··· 2312 2310 } 2313 2311 2314 2312 intel_engine_init_whitelist(engine); 2315 - intel_engine_init_workarounds(engine); 2316 2313 2317 2314 return 0; 2318 2315 }
+6
drivers/gpu/drm/i915/intel_psr.c
··· 274 274 DRM_DEBUG_KMS("eDP panel supports PSR version %x\n", 275 275 intel_dp->psr_dpcd[0]); 276 276 277 + if (drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_NO_PSR)) { 278 + DRM_DEBUG_KMS("PSR support not currently available for this panel\n"); 279 + return; 280 + } 281 + 277 282 if (!(intel_dp->edp_dpcd[1] & DP_EDP_SET_POWER_CAP)) { 278 283 DRM_DEBUG_KMS("Panel lacks power state control, PSR cannot be enabled\n"); 279 284 return; 280 285 } 286 + 281 287 dev_priv->psr.sink_support = true; 282 288 dev_priv->psr.sink_sync_latency = 283 289 intel_dp_get_sink_sync_latency(intel_dp);
+2 -21
drivers/gpu/drm/meson/meson_crtc.c
··· 46 46 struct drm_crtc base; 47 47 struct drm_pending_vblank_event *event; 48 48 struct meson_drm *priv; 49 - bool enabled; 50 49 }; 51 50 #define to_meson_crtc(x) container_of(x, struct meson_crtc, base) 52 51 ··· 81 82 82 83 }; 83 84 84 - static void meson_crtc_enable(struct drm_crtc *crtc) 85 + static void meson_crtc_atomic_enable(struct drm_crtc *crtc, 86 + struct drm_crtc_state *old_state) 85 87 { 86 88 struct meson_crtc *meson_crtc = to_meson_crtc(crtc); 87 89 struct drm_crtc_state *crtc_state = crtc->state; ··· 107 107 priv->io_base + _REG(VPP_MISC)); 108 108 109 109 drm_crtc_vblank_on(crtc); 110 - 111 - meson_crtc->enabled = true; 112 - } 113 - 114 - static void meson_crtc_atomic_enable(struct drm_crtc *crtc, 115 - struct drm_crtc_state *old_state) 116 - { 117 - struct meson_crtc *meson_crtc = to_meson_crtc(crtc); 118 - struct meson_drm *priv = meson_crtc->priv; 119 - 120 - DRM_DEBUG_DRIVER("\n"); 121 - 122 - if (!meson_crtc->enabled) 123 - meson_crtc_enable(crtc); 124 110 125 111 priv->viu.osd1_enabled = true; 126 112 } ··· 139 153 140 154 crtc->state->event = NULL; 141 155 } 142 - 143 - meson_crtc->enabled = false; 144 156 } 145 157 146 158 static void meson_crtc_atomic_begin(struct drm_crtc *crtc, ··· 146 162 { 147 163 struct meson_crtc *meson_crtc = to_meson_crtc(crtc); 148 164 unsigned long flags; 149 - 150 - if (crtc->state->enable && !meson_crtc->enabled) 151 - meson_crtc_enable(crtc); 152 165 153 166 if (crtc->state->event) { 154 167 WARN_ON(drm_crtc_vblank_get(crtc) != 0);
+12 -2
drivers/gpu/drm/meson/meson_drv.c
··· 75 75 .fb_create = drm_gem_fb_create, 76 76 }; 77 77 78 + static const struct drm_mode_config_helper_funcs meson_mode_config_helpers = { 79 + .atomic_commit_tail = drm_atomic_helper_commit_tail_rpm, 80 + }; 81 + 78 82 static irqreturn_t meson_irq(int irq, void *arg) 79 83 { 80 84 struct drm_device *dev = arg; ··· 270 266 drm->mode_config.max_width = 3840; 271 267 drm->mode_config.max_height = 2160; 272 268 drm->mode_config.funcs = &meson_mode_config_funcs; 269 + drm->mode_config.helper_private = &meson_mode_config_helpers; 273 270 274 271 /* Hardware Initialization */ 275 272 ··· 393 388 remote_node = of_graph_get_remote_port_parent(ep); 394 389 if (!remote_node || 395 390 remote_node == parent || /* Ignore parent endpoint */ 396 - !of_device_is_available(remote_node)) 391 + !of_device_is_available(remote_node)) { 392 + of_node_put(remote_node); 397 393 continue; 394 + } 398 395 399 396 count += meson_probe_remote(pdev, match, remote, remote_node); 400 397 ··· 415 408 416 409 for_each_endpoint_of_node(np, ep) { 417 410 remote = of_graph_get_remote_port_parent(ep); 418 - if (!remote || !of_device_is_available(remote)) 411 + if (!remote || !of_device_is_available(remote)) { 412 + of_node_put(remote); 419 413 continue; 414 + } 420 415 421 416 count += meson_probe_remote(pdev, &match, np, remote); 417 + of_node_put(remote); 422 418 } 423 419 424 420 if (count && !match)
+3
drivers/gpu/drm/nouveau/nouveau_backlight.c
··· 253 253 case NV_DEVICE_INFO_V0_FERMI: 254 254 case NV_DEVICE_INFO_V0_KEPLER: 255 255 case NV_DEVICE_INFO_V0_MAXWELL: 256 + case NV_DEVICE_INFO_V0_PASCAL: 257 + case NV_DEVICE_INFO_V0_VOLTA: 258 + case NV_DEVICE_INFO_V0_TURING: 256 259 ret = nv50_backlight_init(nv_encoder, &props, &ops); 257 260 break; 258 261 default:
+33
drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
··· 2435 2435 }; 2436 2436 2437 2437 static const struct nvkm_device_chip 2438 + nv162_chipset = { 2439 + .name = "TU102", 2440 + .bar = tu104_bar_new, 2441 + .bios = nvkm_bios_new, 2442 + .bus = gf100_bus_new, 2443 + .devinit = tu104_devinit_new, 2444 + .fault = tu104_fault_new, 2445 + .fb = gv100_fb_new, 2446 + .fuse = gm107_fuse_new, 2447 + .gpio = gk104_gpio_new, 2448 + .i2c = gm200_i2c_new, 2449 + .ibus = gm200_ibus_new, 2450 + .imem = nv50_instmem_new, 2451 + .ltc = gp102_ltc_new, 2452 + .mc = tu104_mc_new, 2453 + .mmu = tu104_mmu_new, 2454 + .pci = gp100_pci_new, 2455 + .pmu = gp102_pmu_new, 2456 + .therm = gp100_therm_new, 2457 + .timer = gk20a_timer_new, 2458 + .top = gk104_top_new, 2459 + .ce[0] = tu104_ce_new, 2460 + .ce[1] = tu104_ce_new, 2461 + .ce[2] = tu104_ce_new, 2462 + .ce[3] = tu104_ce_new, 2463 + .ce[4] = tu104_ce_new, 2464 + .disp = tu104_disp_new, 2465 + .dma = gv100_dma_new, 2466 + .fifo = tu104_fifo_new, 2467 + }; 2468 + 2469 + static const struct nvkm_device_chip 2438 2470 nv164_chipset = { 2439 2471 .name = "TU104", 2440 2472 .bar = tu104_bar_new, ··· 2982 2950 case 0x138: device->chip = &nv138_chipset; break; 2983 2951 case 0x13b: device->chip = &nv13b_chipset; break; 2984 2952 case 0x140: device->chip = &nv140_chipset; break; 2953 + case 0x162: device->chip = &nv162_chipset; break; 2985 2954 case 0x164: device->chip = &nv164_chipset; break; 2986 2955 case 0x166: device->chip = &nv166_chipset; break; 2987 2956 default:
+5 -2
drivers/gpu/drm/nouveau/nvkm/engine/falcon.c
··· 22 22 #include <engine/falcon.h> 23 23 24 24 #include <core/gpuobj.h> 25 + #include <subdev/mc.h> 25 26 #include <subdev/timer.h> 26 27 #include <engine/fifo.h> 27 28 ··· 108 107 } 109 108 } 110 109 111 - nvkm_mask(device, base + 0x048, 0x00000003, 0x00000000); 112 - nvkm_wr32(device, base + 0x014, 0xffffffff); 110 + if (nvkm_mc_enabled(device, engine->subdev.index)) { 111 + nvkm_mask(device, base + 0x048, 0x00000003, 0x00000000); 112 + nvkm_wr32(device, base + 0x014, 0xffffffff); 113 + } 113 114 return 0; 114 115 } 115 116
+4 -3
drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c
··· 132 132 duty = nvkm_therm_update_linear(therm); 133 133 break; 134 134 case NVBIOS_THERM_FAN_OTHER: 135 - if (therm->cstate) 135 + if (therm->cstate) { 136 136 duty = therm->cstate; 137 - else 137 + poll = false; 138 + } else { 138 139 duty = nvkm_therm_update_linear_fallback(therm); 139 - poll = false; 140 + } 140 141 break; 141 142 } 142 143 immd = false;
-4
drivers/gpu/drm/qxl/qxl_drv.c
··· 250 250 #if defined(CONFIG_DEBUG_FS) 251 251 .debugfs_init = qxl_debugfs_init, 252 252 #endif 253 - .prime_handle_to_fd = drm_gem_prime_handle_to_fd, 254 - .prime_fd_to_handle = drm_gem_prime_fd_to_handle, 255 253 .gem_prime_export = drm_gem_prime_export, 256 254 .gem_prime_import = drm_gem_prime_import, 257 255 .gem_prime_pin = qxl_gem_prime_pin, 258 256 .gem_prime_unpin = qxl_gem_prime_unpin, 259 - .gem_prime_get_sg_table = qxl_gem_prime_get_sg_table, 260 - .gem_prime_import_sg_table = qxl_gem_prime_import_sg_table, 261 257 .gem_prime_vmap = qxl_gem_prime_vmap, 262 258 .gem_prime_vunmap = qxl_gem_prime_vunmap, 263 259 .gem_prime_mmap = qxl_gem_prime_mmap,
-14
drivers/gpu/drm/qxl/qxl_prime.c
··· 38 38 WARN_ONCE(1, "not implemented"); 39 39 } 40 40 41 - struct sg_table *qxl_gem_prime_get_sg_table(struct drm_gem_object *obj) 42 - { 43 - WARN_ONCE(1, "not implemented"); 44 - return ERR_PTR(-ENOSYS); 45 - } 46 - 47 - struct drm_gem_object *qxl_gem_prime_import_sg_table( 48 - struct drm_device *dev, struct dma_buf_attachment *attach, 49 - struct sg_table *table) 50 - { 51 - WARN_ONCE(1, "not implemented"); 52 - return ERR_PTR(-ENOSYS); 53 - } 54 - 55 41 void *qxl_gem_prime_vmap(struct drm_gem_object *obj) 56 42 { 57 43 WARN_ONCE(1, "not implemented");
+3 -1
drivers/gpu/drm/rockchip/rockchip_rgb.c
··· 113 113 child_count++; 114 114 ret = drm_of_find_panel_or_bridge(dev->of_node, 0, endpoint_id, 115 115 &panel, &bridge); 116 - if (!ret) 116 + if (!ret) { 117 + of_node_put(endpoint); 117 118 break; 119 + } 118 120 } 119 121 120 122 of_node_put(port);
+3 -2
drivers/gpu/drm/sun4i/sun4i_backend.c
··· 786 786 remote = of_graph_get_remote_port_parent(ep); 787 787 if (!remote) 788 788 continue; 789 + of_node_put(remote); 789 790 790 791 /* does this node match any registered engines? */ 791 792 list_for_each_entry(frontend, &drv->frontend_list, list) { 792 793 if (remote == frontend->node) { 793 - of_node_put(remote); 794 794 of_node_put(port); 795 + of_node_put(ep); 795 796 return frontend; 796 797 } 797 798 } 798 799 } 799 - 800 + of_node_put(port); 800 801 return ERR_PTR(-EINVAL); 801 802 } 802 803
-4
drivers/gpu/drm/virtio/virtgpu_drv.c
··· 127 127 #if defined(CONFIG_DEBUG_FS) 128 128 .debugfs_init = virtio_gpu_debugfs_init, 129 129 #endif 130 - .prime_handle_to_fd = drm_gem_prime_handle_to_fd, 131 - .prime_fd_to_handle = drm_gem_prime_fd_to_handle, 132 130 .gem_prime_export = drm_gem_prime_export, 133 131 .gem_prime_import = drm_gem_prime_import, 134 132 .gem_prime_pin = virtgpu_gem_prime_pin, 135 133 .gem_prime_unpin = virtgpu_gem_prime_unpin, 136 - .gem_prime_get_sg_table = virtgpu_gem_prime_get_sg_table, 137 - .gem_prime_import_sg_table = virtgpu_gem_prime_import_sg_table, 138 134 .gem_prime_vmap = virtgpu_gem_prime_vmap, 139 135 .gem_prime_vunmap = virtgpu_gem_prime_vunmap, 140 136 .gem_prime_mmap = virtgpu_gem_prime_mmap,
-4
drivers/gpu/drm/virtio/virtgpu_drv.h
··· 372 372 /* virtgpu_prime.c */ 373 373 int virtgpu_gem_prime_pin(struct drm_gem_object *obj); 374 374 void virtgpu_gem_prime_unpin(struct drm_gem_object *obj); 375 - struct sg_table *virtgpu_gem_prime_get_sg_table(struct drm_gem_object *obj); 376 - struct drm_gem_object *virtgpu_gem_prime_import_sg_table( 377 - struct drm_device *dev, struct dma_buf_attachment *attach, 378 - struct sg_table *sgt); 379 375 void *virtgpu_gem_prime_vmap(struct drm_gem_object *obj); 380 376 void virtgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr); 381 377 int virtgpu_gem_prime_mmap(struct drm_gem_object *obj,
-14
drivers/gpu/drm/virtio/virtgpu_prime.c
··· 39 39 WARN_ONCE(1, "not implemented"); 40 40 } 41 41 42 - struct sg_table *virtgpu_gem_prime_get_sg_table(struct drm_gem_object *obj) 43 - { 44 - WARN_ONCE(1, "not implemented"); 45 - return ERR_PTR(-ENODEV); 46 - } 47 - 48 - struct drm_gem_object *virtgpu_gem_prime_import_sg_table( 49 - struct drm_device *dev, struct dma_buf_attachment *attach, 50 - struct sg_table *table) 51 - { 52 - WARN_ONCE(1, "not implemented"); 53 - return ERR_PTR(-ENODEV); 54 - } 55 - 56 42 void *virtgpu_gem_prime_vmap(struct drm_gem_object *obj) 57 43 { 58 44 struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
+1
drivers/gpu/vga/Kconfig
··· 21 21 bool "Laptop Hybrid Graphics - GPU switching support" 22 22 depends on X86 23 23 depends on ACPI 24 + depends on PCI 24 25 select VGA_ARB 25 26 help 26 27 Many laptops released in 2008/9/10 have two GPUs with a multiplexer
+3 -1
drivers/hwmon/lm80.c
··· 393 393 } 394 394 395 395 rv = lm80_read_value(client, LM80_REG_FANDIV); 396 - if (rv < 0) 396 + if (rv < 0) { 397 + mutex_unlock(&data->update_lock); 397 398 return rv; 399 + } 398 400 reg = (rv & ~(3 << (2 * (nr + 1)))) 399 401 | (data->fan_div[nr] << (2 * (nr + 1))); 400 402 lm80_write_value(client, LM80_REG_FANDIV, reg);
+7 -5
drivers/hwmon/nct6775.c
··· 44 44 * nct6796d 14 7 7 2+6 0xd420 0xc1 0x5ca3 45 45 * nct6797d 14 7 7 2+6 0xd450 0xc1 0x5ca3 46 46 * (0xd451) 47 - * nct6798d 14 7 7 2+6 0xd458 0xc1 0x5ca3 48 - * (0xd459) 47 + * nct6798d 14 7 7 2+6 0xd428 0xc1 0x5ca3 48 + * (0xd429) 49 49 * 50 50 * #temp lists the number of monitored temperature sources (first value) plus 51 51 * the number of directly connectable temperature sensors (second value). ··· 138 138 #define SIO_NCT6795_ID 0xd350 139 139 #define SIO_NCT6796_ID 0xd420 140 140 #define SIO_NCT6797_ID 0xd450 141 - #define SIO_NCT6798_ID 0xd458 141 + #define SIO_NCT6798_ID 0xd428 142 142 #define SIO_ID_MASK 0xFFF8 143 143 144 144 enum pwm_enable { off, manual, thermal_cruise, speed_cruise, sf3, sf4 }; ··· 4508 4508 4509 4509 if (data->kind == nct6791 || data->kind == nct6792 || 4510 4510 data->kind == nct6793 || data->kind == nct6795 || 4511 - data->kind == nct6796) 4511 + data->kind == nct6796 || data->kind == nct6797 || 4512 + data->kind == nct6798) 4512 4513 nct6791_enable_io_mapping(sioreg); 4513 4514 4514 4515 superio_exit(sioreg); ··· 4645 4644 4646 4645 if (sio_data->kind == nct6791 || sio_data->kind == nct6792 || 4647 4646 sio_data->kind == nct6793 || sio_data->kind == nct6795 || 4648 - sio_data->kind == nct6796) 4647 + sio_data->kind == nct6796 || sio_data->kind == nct6797 || 4648 + sio_data->kind == nct6798) 4649 4649 nct6791_enable_io_mapping(sioaddr); 4650 4650 4651 4651 superio_exit(sioaddr);
+12 -12
drivers/hwmon/occ/common.c
··· 380 380 val *= 1000000ULL; 381 381 break; 382 382 case 2: 383 - val = get_unaligned_be32(&power->update_tag) * 384 - occ->powr_sample_time_us; 383 + val = (u64)get_unaligned_be32(&power->update_tag) * 384 + occ->powr_sample_time_us; 385 385 break; 386 386 case 3: 387 387 val = get_unaligned_be16(&power->value) * 1000000ULL; ··· 425 425 &power->update_tag); 426 426 break; 427 427 case 2: 428 - val = get_unaligned_be32(&power->update_tag) * 429 - occ->powr_sample_time_us; 428 + val = (u64)get_unaligned_be32(&power->update_tag) * 429 + occ->powr_sample_time_us; 430 430 break; 431 431 case 3: 432 432 val = get_unaligned_be16(&power->value) * 1000000ULL; ··· 463 463 &power->system.update_tag); 464 464 break; 465 465 case 2: 466 - val = get_unaligned_be32(&power->system.update_tag) * 467 - occ->powr_sample_time_us; 466 + val = (u64)get_unaligned_be32(&power->system.update_tag) * 467 + occ->powr_sample_time_us; 468 468 break; 469 469 case 3: 470 470 val = get_unaligned_be16(&power->system.value) * 1000000ULL; ··· 477 477 &power->proc.update_tag); 478 478 break; 479 479 case 6: 480 - val = get_unaligned_be32(&power->proc.update_tag) * 481 - occ->powr_sample_time_us; 480 + val = (u64)get_unaligned_be32(&power->proc.update_tag) * 481 + occ->powr_sample_time_us; 482 482 break; 483 483 case 7: 484 484 val = get_unaligned_be16(&power->proc.value) * 1000000ULL; ··· 491 491 &power->vdd.update_tag); 492 492 break; 493 493 case 10: 494 - val = get_unaligned_be32(&power->vdd.update_tag) * 495 - occ->powr_sample_time_us; 494 + val = (u64)get_unaligned_be32(&power->vdd.update_tag) * 495 + occ->powr_sample_time_us; 496 496 break; 497 497 case 11: 498 498 val = get_unaligned_be16(&power->vdd.value) * 1000000ULL; ··· 505 505 &power->vdn.update_tag); 506 506 break; 507 507 case 14: 508 - val = get_unaligned_be32(&power->vdn.update_tag) * 509 - occ->powr_sample_time_us; 508 + val = (u64)get_unaligned_be32(&power->vdn.update_tag) * 509 + occ->powr_sample_time_us; 510 510 break; 511 511 case 15: 512 512 val = get_unaligned_be16(&power->vdn.value) * 1000000ULL;
+1 -1
drivers/hwmon/tmp421.c
··· 88 88 .data = (void *)2 89 89 }, 90 90 { 91 - .compatible = "ti,tmp422", 91 + .compatible = "ti,tmp442", 92 92 .data = (void *)3 93 93 }, 94 94 { },
+14 -1
drivers/i2c/busses/i2c-tegra.c
··· 155 155 * @has_mst_fifo: The I2C controller contains the new MST FIFO interface that 156 156 * provides additional features and allows for longer messages to 157 157 * be transferred in one go. 158 + * @quirks: i2c adapter quirks for limiting write/read transfer size and not 159 + * allowing 0 length transfers. 158 160 */ 159 161 struct tegra_i2c_hw_feature { 160 162 bool has_continue_xfer_support; ··· 169 167 bool has_multi_master_mode; 170 168 bool has_slcg_override_reg; 171 169 bool has_mst_fifo; 170 + const struct i2c_adapter_quirks *quirks; 172 171 }; 173 172 174 173 /** ··· 840 837 .max_write_len = 4096, 841 838 }; 842 839 840 + static const struct i2c_adapter_quirks tegra194_i2c_quirks = { 841 + .flags = I2C_AQ_NO_ZERO_LEN, 842 + }; 843 + 843 844 static const struct tegra_i2c_hw_feature tegra20_i2c_hw = { 844 845 .has_continue_xfer_support = false, 845 846 .has_per_pkt_xfer_complete_irq = false, ··· 855 848 .has_multi_master_mode = false, 856 849 .has_slcg_override_reg = false, 857 850 .has_mst_fifo = false, 851 + .quirks = &tegra_i2c_quirks, 858 852 }; 859 853 860 854 static const struct tegra_i2c_hw_feature tegra30_i2c_hw = { ··· 869 861 .has_multi_master_mode = false, 870 862 .has_slcg_override_reg = false, 871 863 .has_mst_fifo = false, 864 + .quirks = &tegra_i2c_quirks, 872 865 }; 873 866 874 867 static const struct tegra_i2c_hw_feature tegra114_i2c_hw = { ··· 883 874 .has_multi_master_mode = false, 884 875 .has_slcg_override_reg = false, 885 876 .has_mst_fifo = false, 877 + .quirks = &tegra_i2c_quirks, 886 878 }; 887 879 888 880 static const struct tegra_i2c_hw_feature tegra124_i2c_hw = { ··· 897 887 .has_multi_master_mode = false, 898 888 .has_slcg_override_reg = true, 899 889 .has_mst_fifo = false, 890 + .quirks = &tegra_i2c_quirks, 900 891 }; 901 892 902 893 static const struct tegra_i2c_hw_feature tegra210_i2c_hw = { ··· 911 900 .has_multi_master_mode = true, 912 901 .has_slcg_override_reg = true, 913 902 .has_mst_fifo = false, 903 + .quirks = &tegra_i2c_quirks, 914 904 }; 915 905 916 906 static const struct tegra_i2c_hw_feature tegra194_i2c_hw = { ··· 925 913 .has_multi_master_mode = true, 926 914 .has_slcg_override_reg = true, 927 915 .has_mst_fifo = true, 916 + .quirks = &tegra194_i2c_quirks, 928 917 }; 929 918 930 919 /* Match table for of_platform binding */ ··· 977 964 i2c_dev->base = base; 978 965 i2c_dev->div_clk = div_clk; 979 966 i2c_dev->adapter.algo = &tegra_i2c_algo; 980 - i2c_dev->adapter.quirks = &tegra_i2c_quirks; 981 967 i2c_dev->irq = irq; 982 968 i2c_dev->cont_id = pdev->id; 983 969 i2c_dev->dev = &pdev->dev; ··· 992 980 i2c_dev->hw = of_device_get_match_data(&pdev->dev); 993 981 i2c_dev->is_dvc = of_device_is_compatible(pdev->dev.of_node, 994 982 "nvidia,tegra20-i2c-dvc"); 983 + i2c_dev->adapter.quirks = i2c_dev->hw->quirks; 995 984 init_completion(&i2c_dev->msg_complete); 996 985 spin_lock_init(&i2c_dev->xfer_lock); 997 986
+6
drivers/i2c/i2c-dev.c
··· 470 470 data_arg.data); 471 471 } 472 472 case I2C_RETRIES: 473 + if (arg > INT_MAX) 474 + return -EINVAL; 475 + 473 476 client->adapter->retries = arg; 474 477 break; 475 478 case I2C_TIMEOUT: 479 + if (arg > INT_MAX) 480 + return -EINVAL; 481 + 476 482 /* For historical reasons, user-space sets the timeout 477 483 * value in units of 10 ms. 478 484 */
+2 -5
drivers/i3c/master/dw-i3c-master.c
··· 901 901 master->regs + 902 902 DEV_ADDR_TABLE_LOC(master->datstartaddr, data->index)); 903 903 904 - if (!old_dyn_addr) 905 - return 0; 906 - 907 904 master->addrs[data->index] = dev->info.dyn_addr; 908 905 909 906 return 0; ··· 922 925 return -ENOMEM; 923 926 924 927 data->index = pos; 925 - master->addrs[pos] = dev->info.dyn_addr; 928 + master->addrs[pos] = dev->info.dyn_addr ? : dev->info.static_addr; 926 929 master->free_pos &= ~BIT(pos); 927 930 i3c_dev_set_master_data(dev, data); 928 931 929 - writel(DEV_ADDR_TABLE_DYNAMIC_ADDR(dev->info.dyn_addr), 932 + writel(DEV_ADDR_TABLE_DYNAMIC_ADDR(master->addrs[pos]), 930 933 master->regs + 931 934 DEV_ADDR_TABLE_LOC(master->datstartaddr, data->index)); 932 935
+2 -2
drivers/i3c/master/i3c-master-cdns.c
··· 1556 1556 return PTR_ERR(master->pclk); 1557 1557 1558 1558 master->sysclk = devm_clk_get(&pdev->dev, "sysclk"); 1559 - if (IS_ERR(master->pclk)) 1560 - return PTR_ERR(master->pclk); 1559 + if (IS_ERR(master->sysclk)) 1560 + return PTR_ERR(master->sysclk); 1561 1561 1562 1562 irq = platform_get_irq(pdev, 0); 1563 1563 if (irq < 0)
+4 -1
drivers/iio/adc/ti_am335x_adc.c
··· 142 142 stepconfig |= STEPCONFIG_MODE_SWCNT; 143 143 144 144 tiadc_writel(adc_dev, REG_STEPCONFIG(steps), 145 - stepconfig | STEPCONFIG_INP(chan)); 145 + stepconfig | STEPCONFIG_INP(chan) | 146 + STEPCONFIG_INM_ADCREFM | 147 + STEPCONFIG_RFP_VREFP | 148 + STEPCONFIG_RFM_VREFN); 146 149 147 150 if (adc_dev->open_delay[i] > STEPDELAY_OPEN_MASK) { 148 151 dev_warn(dev, "chan %d open delay truncating to 0x3FFFF\n",
+4 -1
drivers/infiniband/core/cma.c
··· 494 494 id_priv->id.route.addr.dev_addr.transport = 495 495 rdma_node_get_transport(cma_dev->device->node_type); 496 496 list_add_tail(&id_priv->list, &cma_dev->id_list); 497 - rdma_restrack_kadd(&id_priv->res); 497 + if (id_priv->res.kern_name) 498 + rdma_restrack_kadd(&id_priv->res); 499 + else 500 + rdma_restrack_uadd(&id_priv->res); 498 501 } 499 502 500 503 static void cma_attach_to_dev(struct rdma_id_private *id_priv,
-4
drivers/infiniband/core/nldev.c
··· 584 584 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_USECNT, 585 585 atomic_read(&pd->usecnt), RDMA_NLDEV_ATTR_PAD)) 586 586 goto err; 587 - if ((pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY) && 588 - nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_UNSAFE_GLOBAL_RKEY, 589 - pd->unsafe_global_rkey)) 590 - goto err; 591 587 592 588 if (fill_res_name_pid(msg, res)) 593 589 goto err;
+2
drivers/infiniband/core/rdma_core.h
··· 106 106 enum uverbs_obj_access access, 107 107 bool commit); 108 108 109 + int uverbs_output_written(const struct uverbs_attr_bundle *bundle, size_t idx); 110 + 109 111 void setup_ufile_idr_uobject(struct ib_uverbs_file *ufile); 110 112 void release_ufile_idr_uobject(struct ib_uverbs_file *ufile); 111 113
+10 -1
drivers/infiniband/core/uverbs_cmd.c
··· 60 60 { 61 61 int ret; 62 62 63 + if (uverbs_attr_is_valid(attrs, UVERBS_ATTR_CORE_OUT)) 64 + return uverbs_copy_to_struct_or_zero( 65 + attrs, UVERBS_ATTR_CORE_OUT, resp, resp_len); 66 + 63 67 if (copy_to_user(attrs->ucore.outbuf, resp, 64 68 min(attrs->ucore.outlen, resp_len))) 65 69 return -EFAULT; ··· 1185 1181 goto out_put; 1186 1182 } 1187 1183 1184 + if (uverbs_attr_is_valid(attrs, UVERBS_ATTR_CORE_OUT)) 1185 + ret = uverbs_output_written(attrs, UVERBS_ATTR_CORE_OUT); 1186 + 1188 1187 ret = 0; 1189 1188 1190 1189 out_put: ··· 2019 2012 return -ENOMEM; 2020 2013 2021 2014 qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, attrs); 2022 - if (!qp) 2015 + if (!qp) { 2016 + ret = -EINVAL; 2023 2017 goto out; 2018 + } 2024 2019 2025 2020 is_ud = qp->qp_type == IB_QPT_UD; 2026 2021 sg_ind = 0;
+49 -13
drivers/infiniband/core/uverbs_ioctl.c
··· 144 144 0, uattr->len - len); 145 145 } 146 146 147 + static int uverbs_set_output(const struct uverbs_attr_bundle *bundle, 148 + const struct uverbs_attr *attr) 149 + { 150 + struct bundle_priv *pbundle = 151 + container_of(bundle, struct bundle_priv, bundle); 152 + u16 flags; 153 + 154 + flags = pbundle->uattrs[attr->ptr_attr.uattr_idx].flags | 155 + UVERBS_ATTR_F_VALID_OUTPUT; 156 + if (put_user(flags, 157 + &pbundle->user_attrs[attr->ptr_attr.uattr_idx].flags)) 158 + return -EFAULT; 159 + return 0; 160 + } 161 + 147 162 static int uverbs_process_idrs_array(struct bundle_priv *pbundle, 148 163 const struct uverbs_api_attr *attr_uapi, 149 164 struct uverbs_objs_arr_attr *attr, ··· 471 456 } 472 457 473 458 /* 459 + * Until the drivers are revised to use the bundle directly we have to 460 + * assume that the driver wrote to its UHW_OUT and flag userspace 461 + * appropriately. 462 + */ 463 + if (!ret && pbundle->method_elm->has_udata) { 464 + const struct uverbs_attr *attr = 465 + uverbs_attr_get(&pbundle->bundle, UVERBS_ATTR_UHW_OUT); 466 + 467 + if (!IS_ERR(attr)) 468 + ret = uverbs_set_output(&pbundle->bundle, attr); 469 + } 470 + 471 + /* 474 472 * EPROTONOSUPPORT is ONLY to be returned if the ioctl framework can 475 473 * not invoke the method because the request is not supported. No 476 474 * other cases should return this code. ··· 734 706 int uverbs_copy_to(const struct uverbs_attr_bundle *bundle, size_t idx, 735 707 const void *from, size_t size) 736 708 { 737 - struct bundle_priv *pbundle = 738 - container_of(bundle, struct bundle_priv, bundle); 739 709 const struct uverbs_attr *attr = uverbs_attr_get(bundle, idx); 740 - u16 flags; 741 710 size_t min_size; 742 711 743 712 if (IS_ERR(attr)) ··· 744 719 if (copy_to_user(u64_to_user_ptr(attr->ptr_attr.data), from, min_size)) 745 720 return -EFAULT; 746 721 747 - flags = pbundle->uattrs[attr->ptr_attr.uattr_idx].flags | 748 - UVERBS_ATTR_F_VALID_OUTPUT; 749 - if (put_user(flags, 750 - &pbundle->user_attrs[attr->ptr_attr.uattr_idx].flags)) 751 - return -EFAULT; 752 - 753 - return 0; 722 + return uverbs_set_output(bundle, attr); 754 723 } 755 724 EXPORT_SYMBOL(uverbs_copy_to); 725 + 726 + 727 + /* 728 + * This is only used if the caller has directly used copy_to_use to write the 729 + * data. It signals to user space that the buffer is filled in. 730 + */ 731 + int uverbs_output_written(const struct uverbs_attr_bundle *bundle, size_t idx) 732 + { 733 + const struct uverbs_attr *attr = uverbs_attr_get(bundle, idx); 734 + 735 + if (IS_ERR(attr)) 736 + return PTR_ERR(attr); 737 + 738 + return uverbs_set_output(bundle, attr); 739 + } 756 740 757 741 int _uverbs_get_const(s64 *to, const struct uverbs_attr_bundle *attrs_bundle, 758 742 size_t idx, s64 lower_bound, u64 upper_bound, ··· 791 757 { 792 758 const struct uverbs_attr *attr = uverbs_attr_get(bundle, idx); 793 759 794 - if (clear_user(u64_to_user_ptr(attr->ptr_attr.data), 795 - attr->ptr_attr.len)) 796 - return -EFAULT; 760 + if (size < attr->ptr_attr.len) { 761 + if (clear_user(u64_to_user_ptr(attr->ptr_attr.data) + size, 762 + attr->ptr_attr.len - size)) 763 + return -EFAULT; 764 + } 797 765 return uverbs_copy_to(bundle, idx, from, size); 798 766 }
+1
drivers/infiniband/core/uverbs_main.c
··· 690 690 691 691 buf += sizeof(hdr); 692 692 693 + memset(bundle.attr_present, 0, sizeof(bundle.attr_present)); 693 694 bundle.ufile = file; 694 695 if (!method_elm->is_ex) { 695 696 size_t in_len = hdr.in_words * 4 - sizeof(hdr);
+2 -2
drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
··· 766 766 return NULL; 767 767 768 768 sbuf->size = size; 769 - sbuf->sb = dma_zalloc_coherent(&rcfw->pdev->dev, sbuf->size, 770 - &sbuf->dma_addr, GFP_ATOMIC); 769 + sbuf->sb = dma_alloc_coherent(&rcfw->pdev->dev, sbuf->size, 770 + &sbuf->dma_addr, GFP_ATOMIC); 771 771 if (!sbuf->sb) 772 772 goto bail; 773 773
+4 -4
drivers/infiniband/hw/bnxt_re/qplib_res.c
··· 105 105 106 106 if (!sghead) { 107 107 for (i = 0; i < pages; i++) { 108 - pbl->pg_arr[i] = dma_zalloc_coherent(&pdev->dev, 109 - pbl->pg_size, 110 - &pbl->pg_map_arr[i], 111 - GFP_KERNEL); 108 + pbl->pg_arr[i] = dma_alloc_coherent(&pdev->dev, 109 + pbl->pg_size, 110 + &pbl->pg_map_arr[i], 111 + GFP_KERNEL); 112 112 if (!pbl->pg_arr[i]) 113 113 goto fail; 114 114 pbl->pg_count++;
+3 -3
drivers/infiniband/hw/cxgb3/cxio_hal.c
··· 291 291 if (!wq->sq) 292 292 goto err3; 293 293 294 - wq->queue = dma_zalloc_coherent(&(rdev_p->rnic_info.pdev->dev), 295 - depth * sizeof(union t3_wr), 296 - &(wq->dma_addr), GFP_KERNEL); 294 + wq->queue = dma_alloc_coherent(&(rdev_p->rnic_info.pdev->dev), 295 + depth * sizeof(union t3_wr), 296 + &(wq->dma_addr), GFP_KERNEL); 297 297 if (!wq->queue) 298 298 goto err4; 299 299
+2 -3
drivers/infiniband/hw/cxgb4/qp.c
··· 2564 2564 wq->rqt_abs_idx = (wq->rqt_hwaddr - rdev->lldi.vr->rq.start) >> 2565 2565 T4_RQT_ENTRY_SHIFT; 2566 2566 2567 - wq->queue = dma_zalloc_coherent(&rdev->lldi.pdev->dev, 2568 - wq->memsize, &wq->dma_addr, 2569 - GFP_KERNEL); 2567 + wq->queue = dma_alloc_coherent(&rdev->lldi.pdev->dev, wq->memsize, 2568 + &wq->dma_addr, GFP_KERNEL); 2570 2569 if (!wq->queue) 2571 2570 goto err_free_rqtpool; 2572 2571
+15 -14
drivers/infiniband/hw/hfi1/init.c
··· 899 899 goto done; 900 900 901 901 /* allocate dummy tail memory for all receive contexts */ 902 - dd->rcvhdrtail_dummy_kvaddr = dma_zalloc_coherent( 903 - &dd->pcidev->dev, sizeof(u64), 904 - &dd->rcvhdrtail_dummy_dma, 905 - GFP_KERNEL); 902 + dd->rcvhdrtail_dummy_kvaddr = dma_alloc_coherent(&dd->pcidev->dev, 903 + sizeof(u64), 904 + &dd->rcvhdrtail_dummy_dma, 905 + GFP_KERNEL); 906 906 907 907 if (!dd->rcvhdrtail_dummy_kvaddr) { 908 908 dd_dev_err(dd, "cannot allocate dummy tail memory\n"); ··· 1863 1863 gfp_flags = GFP_KERNEL; 1864 1864 else 1865 1865 gfp_flags = GFP_USER; 1866 - rcd->rcvhdrq = dma_zalloc_coherent( 1867 - &dd->pcidev->dev, amt, &rcd->rcvhdrq_dma, 1868 - gfp_flags | __GFP_COMP); 1866 + rcd->rcvhdrq = dma_alloc_coherent(&dd->pcidev->dev, amt, 1867 + &rcd->rcvhdrq_dma, 1868 + gfp_flags | __GFP_COMP); 1869 1869 1870 1870 if (!rcd->rcvhdrq) { 1871 1871 dd_dev_err(dd, ··· 1876 1876 1877 1877 if (HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL) || 1878 1878 HFI1_CAP_UGET_MASK(rcd->flags, DMA_RTAIL)) { 1879 - rcd->rcvhdrtail_kvaddr = dma_zalloc_coherent( 1880 - &dd->pcidev->dev, PAGE_SIZE, 1881 - &rcd->rcvhdrqtailaddr_dma, gfp_flags); 1879 + rcd->rcvhdrtail_kvaddr = dma_alloc_coherent(&dd->pcidev->dev, 1880 + PAGE_SIZE, 1881 + &rcd->rcvhdrqtailaddr_dma, 1882 + gfp_flags); 1882 1883 if (!rcd->rcvhdrtail_kvaddr) 1883 1884 goto bail_free; 1884 1885 } ··· 1975 1974 while (alloced_bytes < rcd->egrbufs.size && 1976 1975 rcd->egrbufs.alloced < rcd->egrbufs.count) { 1977 1976 rcd->egrbufs.buffers[idx].addr = 1978 - dma_zalloc_coherent(&dd->pcidev->dev, 1979 - rcd->egrbufs.rcvtid_size, 1980 - &rcd->egrbufs.buffers[idx].dma, 1981 - gfp_flags); 1977 + dma_alloc_coherent(&dd->pcidev->dev, 1978 + rcd->egrbufs.rcvtid_size, 1979 + &rcd->egrbufs.buffers[idx].dma, 1980 + gfp_flags); 1982 1981 if (rcd->egrbufs.buffers[idx].addr) { 1983 1982 rcd->egrbufs.buffers[idx].len = 1984 1983 rcd->egrbufs.rcvtid_size;
+4 -5
drivers/infiniband/hw/hfi1/pio.c
··· 2098 2098 int bytes = TXE_NUM_CONTEXTS * sizeof(struct credit_return); 2099 2099 2100 2100 set_dev_node(&dd->pcidev->dev, i); 2101 - dd->cr_base[i].va = dma_zalloc_coherent( 2102 - &dd->pcidev->dev, 2103 - bytes, 2104 - &dd->cr_base[i].dma, 2105 - GFP_KERNEL); 2101 + dd->cr_base[i].va = dma_alloc_coherent(&dd->pcidev->dev, 2102 + bytes, 2103 + &dd->cr_base[i].dma, 2104 + GFP_KERNEL); 2106 2105 if (!dd->cr_base[i].va) { 2107 2106 set_dev_node(&dd->pcidev->dev, dd->node); 2108 2107 dd_dev_err(dd,
+9 -18
drivers/infiniband/hw/hfi1/sdma.c
··· 1453 1453 timer_setup(&sde->err_progress_check_timer, 1454 1454 sdma_err_progress_check, 0); 1455 1455 1456 - sde->descq = dma_zalloc_coherent( 1457 - &dd->pcidev->dev, 1458 - descq_cnt * sizeof(u64[2]), 1459 - &sde->descq_phys, 1460 - GFP_KERNEL 1461 - ); 1456 + sde->descq = dma_alloc_coherent(&dd->pcidev->dev, 1457 + descq_cnt * sizeof(u64[2]), 1458 + &sde->descq_phys, GFP_KERNEL); 1462 1459 if (!sde->descq) 1463 1460 goto bail; 1464 1461 sde->tx_ring = ··· 1468 1471 1469 1472 dd->sdma_heads_size = L1_CACHE_BYTES * num_engines; 1470 1473 /* Allocate memory for DMA of head registers to memory */ 1471 - dd->sdma_heads_dma = dma_zalloc_coherent( 1472 - &dd->pcidev->dev, 1473 - dd->sdma_heads_size, 1474 - &dd->sdma_heads_phys, 1475 - GFP_KERNEL 1476 - ); 1474 + dd->sdma_heads_dma = dma_alloc_coherent(&dd->pcidev->dev, 1475 + dd->sdma_heads_size, 1476 + &dd->sdma_heads_phys, 1477 + GFP_KERNEL); 1477 1478 if (!dd->sdma_heads_dma) { 1478 1479 dd_dev_err(dd, "failed to allocate SendDMA head memory\n"); 1479 1480 goto bail; 1480 1481 } 1481 1482 1482 1483 /* Allocate memory for pad */ 1483 - dd->sdma_pad_dma = dma_zalloc_coherent( 1484 - &dd->pcidev->dev, 1485 - sizeof(u32), 1486 - &dd->sdma_pad_phys, 1487 - GFP_KERNEL 1488 - ); 1484 + dd->sdma_pad_dma = dma_alloc_coherent(&dd->pcidev->dev, sizeof(u32), 1485 + &dd->sdma_pad_phys, GFP_KERNEL); 1489 1486 if (!dd->sdma_pad_dma) { 1490 1487 dd_dev_err(dd, "failed to allocate SendDMA pad memory\n"); 1491 1488 goto bail;
+6 -5
drivers/infiniband/hw/hns/hns_roce_alloc.c
··· 197 197 buf->npages = 1 << order; 198 198 buf->page_shift = page_shift; 199 199 /* MTT PA must be recorded in 4k alignment, t is 4k aligned */ 200 - buf->direct.buf = dma_zalloc_coherent(dev, 201 - size, &t, GFP_KERNEL); 200 + buf->direct.buf = dma_alloc_coherent(dev, size, &t, 201 + GFP_KERNEL); 202 202 if (!buf->direct.buf) 203 203 return -ENOMEM; 204 204 ··· 219 219 return -ENOMEM; 220 220 221 221 for (i = 0; i < buf->nbufs; ++i) { 222 - buf->page_list[i].buf = dma_zalloc_coherent(dev, 223 - page_size, &t, 224 - GFP_KERNEL); 222 + buf->page_list[i].buf = dma_alloc_coherent(dev, 223 + page_size, 224 + &t, 225 + GFP_KERNEL); 225 226 226 227 if (!buf->page_list[i].buf) 227 228 goto err_free;
+5 -5
drivers/infiniband/hw/hns/hns_roce_hw_v2.c
··· 5091 5091 eqe_alloc = i * (buf_chk_sz / eq->eqe_size); 5092 5092 size = (eq->entries - eqe_alloc) * eq->eqe_size; 5093 5093 } 5094 - eq->buf[i] = dma_zalloc_coherent(dev, size, 5094 + eq->buf[i] = dma_alloc_coherent(dev, size, 5095 5095 &(eq->buf_dma[i]), 5096 5096 GFP_KERNEL); 5097 5097 if (!eq->buf[i]) ··· 5126 5126 size = (eq->entries - eqe_alloc) 5127 5127 * eq->eqe_size; 5128 5128 } 5129 - eq->buf[idx] = dma_zalloc_coherent(dev, size, 5130 - &(eq->buf_dma[idx]), 5131 - GFP_KERNEL); 5129 + eq->buf[idx] = dma_alloc_coherent(dev, size, 5130 + &(eq->buf_dma[idx]), 5131 + GFP_KERNEL); 5132 5132 if (!eq->buf[idx]) 5133 5133 goto err_dma_alloc_buf; 5134 5134 ··· 5241 5241 goto free_cmd_mbox; 5242 5242 } 5243 5243 5244 - eq->buf_list->buf = dma_zalloc_coherent(dev, buf_chk_sz, 5244 + eq->buf_list->buf = dma_alloc_coherent(dev, buf_chk_sz, 5245 5245 &(eq->buf_list->map), 5246 5246 GFP_KERNEL); 5247 5247 if (!eq->buf_list->buf) {
+2 -2
drivers/infiniband/hw/i40iw/i40iw_utils.c
··· 745 745 if (!mem) 746 746 return I40IW_ERR_PARAM; 747 747 mem->size = ALIGN(size, alignment); 748 - mem->va = dma_zalloc_coherent(&pcidev->dev, mem->size, 749 - (dma_addr_t *)&mem->pa, GFP_KERNEL); 748 + mem->va = dma_alloc_coherent(&pcidev->dev, mem->size, 749 + (dma_addr_t *)&mem->pa, GFP_KERNEL); 750 750 if (!mem->va) 751 751 return I40IW_ERR_NO_MEMORY; 752 752 return 0;
+3 -2
drivers/infiniband/hw/mthca/mthca_memfree.c
··· 623 623 page = dev->db_tab->page + end; 624 624 625 625 alloc: 626 - page->db_rec = dma_zalloc_coherent(&dev->pdev->dev, MTHCA_ICM_PAGE_SIZE, 627 - &page->mapping, GFP_KERNEL); 626 + page->db_rec = dma_alloc_coherent(&dev->pdev->dev, 627 + MTHCA_ICM_PAGE_SIZE, &page->mapping, 628 + GFP_KERNEL); 628 629 if (!page->db_rec) { 629 630 ret = -ENOMEM; 630 631 goto out;
+2 -2
drivers/infiniband/hw/mthca/mthca_provider.c
··· 534 534 { 535 535 struct mthca_ucontext *context; 536 536 537 - qp = kmalloc(sizeof *qp, GFP_KERNEL); 537 + qp = kzalloc(sizeof(*qp), GFP_KERNEL); 538 538 if (!qp) 539 539 return ERR_PTR(-ENOMEM); 540 540 ··· 600 600 if (udata) 601 601 return ERR_PTR(-EINVAL); 602 602 603 - qp = kmalloc(sizeof (struct mthca_sqp), GFP_KERNEL); 603 + qp = kzalloc(sizeof(struct mthca_sqp), GFP_KERNEL); 604 604 if (!qp) 605 605 return ERR_PTR(-ENOMEM); 606 606
+7 -7
drivers/infiniband/hw/ocrdma/ocrdma_hw.c
··· 380 380 q->len = len; 381 381 q->entry_size = entry_size; 382 382 q->size = len * entry_size; 383 - q->va = dma_zalloc_coherent(&dev->nic_info.pdev->dev, q->size, 384 - &q->dma, GFP_KERNEL); 383 + q->va = dma_alloc_coherent(&dev->nic_info.pdev->dev, q->size, &q->dma, 384 + GFP_KERNEL); 385 385 if (!q->va) 386 386 return -ENOMEM; 387 387 return 0; ··· 1819 1819 return -ENOMEM; 1820 1820 ocrdma_init_mch(&cmd->cmd.req, OCRDMA_CMD_CREATE_CQ, 1821 1821 OCRDMA_SUBSYS_COMMON, sizeof(*cmd)); 1822 - cq->va = dma_zalloc_coherent(&pdev->dev, cq->len, &cq->pa, GFP_KERNEL); 1822 + cq->va = dma_alloc_coherent(&pdev->dev, cq->len, &cq->pa, GFP_KERNEL); 1823 1823 if (!cq->va) { 1824 1824 status = -ENOMEM; 1825 1825 goto mem_err; ··· 2209 2209 qp->sq.max_cnt = max_wqe_allocated; 2210 2210 len = (hw_pages * hw_page_size); 2211 2211 2212 - qp->sq.va = dma_zalloc_coherent(&pdev->dev, len, &pa, GFP_KERNEL); 2212 + qp->sq.va = dma_alloc_coherent(&pdev->dev, len, &pa, GFP_KERNEL); 2213 2213 if (!qp->sq.va) 2214 2214 return -EINVAL; 2215 2215 qp->sq.len = len; ··· 2259 2259 qp->rq.max_cnt = max_rqe_allocated; 2260 2260 len = (hw_pages * hw_page_size); 2261 2261 2262 - qp->rq.va = dma_zalloc_coherent(&pdev->dev, len, &pa, GFP_KERNEL); 2262 + qp->rq.va = dma_alloc_coherent(&pdev->dev, len, &pa, GFP_KERNEL); 2263 2263 if (!qp->rq.va) 2264 2264 return -ENOMEM; 2265 2265 qp->rq.pa = pa; ··· 2315 2315 if (dev->attr.ird == 0) 2316 2316 return 0; 2317 2317 2318 - qp->ird_q_va = dma_zalloc_coherent(&pdev->dev, ird_q_len, &pa, 2319 - GFP_KERNEL); 2318 + qp->ird_q_va = dma_alloc_coherent(&pdev->dev, ird_q_len, &pa, 2319 + GFP_KERNEL); 2320 2320 if (!qp->ird_q_va) 2321 2321 return -ENOMEM; 2322 2322 ocrdma_build_q_pages(&cmd->ird_addr[0], dev->attr.num_ird_pages,
+2 -2
drivers/infiniband/hw/ocrdma/ocrdma_stats.c
··· 73 73 mem->size = max_t(u32, sizeof(struct ocrdma_rdma_stats_req), 74 74 sizeof(struct ocrdma_rdma_stats_resp)); 75 75 76 - mem->va = dma_zalloc_coherent(&dev->nic_info.pdev->dev, mem->size, 77 - &mem->pa, GFP_KERNEL); 76 + mem->va = dma_alloc_coherent(&dev->nic_info.pdev->dev, mem->size, 77 + &mem->pa, GFP_KERNEL); 78 78 if (!mem->va) { 79 79 pr_err("%s: stats mbox allocation failed\n", __func__); 80 80 return false;
+3 -3
drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
··· 504 504 INIT_LIST_HEAD(&ctx->mm_head); 505 505 mutex_init(&ctx->mm_list_lock); 506 506 507 - ctx->ah_tbl.va = dma_zalloc_coherent(&pdev->dev, map_len, 508 - &ctx->ah_tbl.pa, GFP_KERNEL); 507 + ctx->ah_tbl.va = dma_alloc_coherent(&pdev->dev, map_len, 508 + &ctx->ah_tbl.pa, GFP_KERNEL); 509 509 if (!ctx->ah_tbl.va) { 510 510 kfree(ctx); 511 511 return ERR_PTR(-ENOMEM); ··· 838 838 return -ENOMEM; 839 839 840 840 for (i = 0; i < mr->num_pbls; i++) { 841 - va = dma_zalloc_coherent(&pdev->dev, dma_len, &pa, GFP_KERNEL); 841 + va = dma_alloc_coherent(&pdev->dev, dma_len, &pa, GFP_KERNEL); 842 842 if (!va) { 843 843 ocrdma_free_mr_pbl_tbl(dev, mr); 844 844 status = -ENOMEM;
+2 -2
drivers/infiniband/hw/qedr/verbs.c
··· 556 556 return ERR_PTR(-ENOMEM); 557 557 558 558 for (i = 0; i < pbl_info->num_pbls; i++) { 559 - va = dma_zalloc_coherent(&pdev->dev, pbl_info->pbl_size, 560 - &pa, flags); 559 + va = dma_alloc_coherent(&pdev->dev, pbl_info->pbl_size, &pa, 560 + flags); 561 561 if (!va) 562 562 goto err; 563 563
+34 -1
drivers/infiniband/hw/vmw_pvrdma/pvrdma.h
··· 427 427 428 428 static inline enum pvrdma_wr_opcode ib_wr_opcode_to_pvrdma(enum ib_wr_opcode op) 429 429 { 430 - return (enum pvrdma_wr_opcode)op; 430 + switch (op) { 431 + case IB_WR_RDMA_WRITE: 432 + return PVRDMA_WR_RDMA_WRITE; 433 + case IB_WR_RDMA_WRITE_WITH_IMM: 434 + return PVRDMA_WR_RDMA_WRITE_WITH_IMM; 435 + case IB_WR_SEND: 436 + return PVRDMA_WR_SEND; 437 + case IB_WR_SEND_WITH_IMM: 438 + return PVRDMA_WR_SEND_WITH_IMM; 439 + case IB_WR_RDMA_READ: 440 + return PVRDMA_WR_RDMA_READ; 441 + case IB_WR_ATOMIC_CMP_AND_SWP: 442 + return PVRDMA_WR_ATOMIC_CMP_AND_SWP; 443 + case IB_WR_ATOMIC_FETCH_AND_ADD: 444 + return PVRDMA_WR_ATOMIC_FETCH_AND_ADD; 445 + case IB_WR_LSO: 446 + return PVRDMA_WR_LSO; 447 + case IB_WR_SEND_WITH_INV: 448 + return PVRDMA_WR_SEND_WITH_INV; 449 + case IB_WR_RDMA_READ_WITH_INV: 450 + return PVRDMA_WR_RDMA_READ_WITH_INV; 451 + case IB_WR_LOCAL_INV: 452 + return PVRDMA_WR_LOCAL_INV; 453 + case IB_WR_REG_MR: 454 + return PVRDMA_WR_FAST_REG_MR; 455 + case IB_WR_MASKED_ATOMIC_CMP_AND_SWP: 456 + return PVRDMA_WR_MASKED_ATOMIC_CMP_AND_SWP; 457 + case IB_WR_MASKED_ATOMIC_FETCH_AND_ADD: 458 + return PVRDMA_WR_MASKED_ATOMIC_FETCH_AND_ADD; 459 + case IB_WR_REG_SIG_MR: 460 + return PVRDMA_WR_REG_SIG_MR; 461 + default: 462 + return PVRDMA_WR_ERROR; 463 + } 431 464 } 432 465 433 466 static inline enum ib_wc_status pvrdma_wc_status_to_ib(
+2 -2
drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
··· 890 890 dev_info(&pdev->dev, "device version %d, driver version %d\n", 891 891 dev->dsr_version, PVRDMA_VERSION); 892 892 893 - dev->dsr = dma_zalloc_coherent(&pdev->dev, sizeof(*dev->dsr), 894 - &dev->dsrbase, GFP_KERNEL); 893 + dev->dsr = dma_alloc_coherent(&pdev->dev, sizeof(*dev->dsr), 894 + &dev->dsrbase, GFP_KERNEL); 895 895 if (!dev->dsr) { 896 896 dev_err(&pdev->dev, "failed to allocate shared region\n"); 897 897 ret = -ENOMEM;
+6
drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
··· 721 721 wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) 722 722 wqe_hdr->ex.imm_data = wr->ex.imm_data; 723 723 724 + if (unlikely(wqe_hdr->opcode == PVRDMA_WR_ERROR)) { 725 + *bad_wr = wr; 726 + ret = -EINVAL; 727 + goto out; 728 + } 729 + 724 730 switch (qp->ibqp.qp_type) { 725 731 case IB_QPT_GSI: 726 732 case IB_QPT_UD:
+11
drivers/input/misc/Kconfig
··· 851 851 To compile this driver as a module, choose M here. The module will 852 852 be called sc27xx_vibra. 853 853 854 + config INPUT_STPMIC1_ONKEY 855 + tristate "STPMIC1 PMIC Onkey support" 856 + depends on MFD_STPMIC1 857 + help 858 + Say Y to enable support of onkey embedded into STPMIC1 PMIC. onkey 859 + can be used to wakeup from low power modes and force a shut-down on 860 + long press. 861 + 862 + To compile this driver as a module, choose M here: the 863 + module will be called stpmic1_onkey. 864 + 854 865 endif
+2
drivers/input/misc/Makefile
··· 71 71 obj-$(CONFIG_INPUT_SIRFSOC_ONKEY) += sirfsoc-onkey.o 72 72 obj-$(CONFIG_INPUT_SOC_BUTTON_ARRAY) += soc_button_array.o 73 73 obj-$(CONFIG_INPUT_SPARCSPKR) += sparcspkr.o 74 + obj-$(CONFIG_INPUT_STPMIC1_ONKEY) += stpmic1_onkey.o 74 75 obj-$(CONFIG_INPUT_TPS65218_PWRBUTTON) += tps65218-pwrbutton.o 75 76 obj-$(CONFIG_INPUT_TWL4030_PWRBUTTON) += twl4030-pwrbutton.o 76 77 obj-$(CONFIG_INPUT_TWL4030_VIBRA) += twl4030-vibra.o ··· 82 81 obj-$(CONFIG_INPUT_XEN_KBDDEV_FRONTEND) += xen-kbdfront.o 83 82 obj-$(CONFIG_INPUT_YEALINK) += yealink.o 84 83 obj-$(CONFIG_INPUT_IDEAPAD_SLIDEBAR) += ideapad_slidebar.o 84 +
+198
drivers/input/misc/stpmic1_onkey.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + // Copyright (C) STMicroelectronics 2018 3 + // Author: Pascal Paillet <p.paillet@st.com> for STMicroelectronics. 4 + 5 + #include <linux/input.h> 6 + #include <linux/interrupt.h> 7 + #include <linux/mfd/stpmic1.h> 8 + #include <linux/module.h> 9 + #include <linux/of.h> 10 + #include <linux/platform_device.h> 11 + #include <linux/property.h> 12 + #include <linux/regmap.h> 13 + 14 + /** 15 + * struct stpmic1_onkey - OnKey data 16 + * @input_dev: pointer to input device 17 + * @irq_falling: irq that we are hooked on to 18 + * @irq_rising: irq that we are hooked on to 19 + */ 20 + struct stpmic1_onkey { 21 + struct input_dev *input_dev; 22 + int irq_falling; 23 + int irq_rising; 24 + }; 25 + 26 + static irqreturn_t onkey_falling_irq(int irq, void *ponkey) 27 + { 28 + struct stpmic1_onkey *onkey = ponkey; 29 + struct input_dev *input_dev = onkey->input_dev; 30 + 31 + input_report_key(input_dev, KEY_POWER, 1); 32 + pm_wakeup_event(input_dev->dev.parent, 0); 33 + input_sync(input_dev); 34 + 35 + return IRQ_HANDLED; 36 + } 37 + 38 + static irqreturn_t onkey_rising_irq(int irq, void *ponkey) 39 + { 40 + struct stpmic1_onkey *onkey = ponkey; 41 + struct input_dev *input_dev = onkey->input_dev; 42 + 43 + input_report_key(input_dev, KEY_POWER, 0); 44 + pm_wakeup_event(input_dev->dev.parent, 0); 45 + input_sync(input_dev); 46 + 47 + return IRQ_HANDLED; 48 + } 49 + 50 + static int stpmic1_onkey_probe(struct platform_device *pdev) 51 + { 52 + struct stpmic1 *pmic = dev_get_drvdata(pdev->dev.parent); 53 + struct device *dev = &pdev->dev; 54 + struct input_dev *input_dev; 55 + struct stpmic1_onkey *onkey; 56 + unsigned int val, reg = 0; 57 + int error; 58 + 59 + onkey = devm_kzalloc(dev, sizeof(*onkey), GFP_KERNEL); 60 + if (!onkey) 61 + return -ENOMEM; 62 + 63 + onkey->irq_falling = platform_get_irq_byname(pdev, "onkey-falling"); 64 + if (onkey->irq_falling < 0) { 65 + dev_err(dev, "failed: request IRQ onkey-falling %d\n", 66 + onkey->irq_falling); 67 + return onkey->irq_falling; 68 + } 69 + 70 + onkey->irq_rising = platform_get_irq_byname(pdev, "onkey-rising"); 71 + if (onkey->irq_rising < 0) { 72 + dev_err(dev, "failed: request IRQ onkey-rising %d\n", 73 + onkey->irq_rising); 74 + return onkey->irq_rising; 75 + } 76 + 77 + if (!device_property_read_u32(dev, "power-off-time-sec", &val)) { 78 + if (val > 0 && val <= 16) { 79 + dev_dbg(dev, "power-off-time=%d seconds\n", val); 80 + reg |= PONKEY_PWR_OFF; 81 + reg |= ((16 - val) & PONKEY_TURNOFF_TIMER_MASK); 82 + } else { 83 + dev_err(dev, "power-off-time-sec out of range\n"); 84 + return -EINVAL; 85 + } 86 + } 87 + 88 + if (device_property_present(dev, "st,onkey-clear-cc-flag")) 89 + reg |= PONKEY_CC_FLAG_CLEAR; 90 + 91 + error = regmap_update_bits(pmic->regmap, PKEY_TURNOFF_CR, 92 + PONKEY_TURNOFF_MASK, reg); 93 + if (error) { 94 + dev_err(dev, "PKEY_TURNOFF_CR write failed: %d\n", error); 95 + return error; 96 + } 97 + 98 + if (device_property_present(dev, "st,onkey-pu-inactive")) { 99 + error = regmap_update_bits(pmic->regmap, PADS_PULL_CR, 100 + PONKEY_PU_INACTIVE, 101 + PONKEY_PU_INACTIVE); 102 + if (error) { 103 + dev_err(dev, "ONKEY Pads configuration failed: %d\n", 104 + error); 105 + return error; 106 + } 107 + } 108 + 109 + input_dev = devm_input_allocate_device(dev); 110 + if (!input_dev) { 111 + dev_err(dev, "Can't allocate Pwr Onkey Input Device\n"); 112 + return -ENOMEM; 113 + } 114 + 115 + input_dev->name = "pmic_onkey"; 116 + input_dev->phys = "pmic_onkey/input0"; 117 + 118 + input_set_capability(input_dev, EV_KEY, KEY_POWER); 119 + 120 + onkey->input_dev = input_dev; 121 + 122 + /* interrupt is nested in a thread */ 123 + error = devm_request_threaded_irq(dev, onkey->irq_falling, NULL, 124 + onkey_falling_irq, IRQF_ONESHOT, 125 + dev_name(dev), onkey); 126 + if (error) { 127 + dev_err(dev, "Can't get IRQ Onkey Falling: %d\n", error); 128 + return error; 129 + } 130 + 131 + error = devm_request_threaded_irq(dev, onkey->irq_rising, NULL, 132 + onkey_rising_irq, IRQF_ONESHOT, 133 + dev_name(dev), onkey); 134 + if (error) { 135 + dev_err(dev, "Can't get IRQ Onkey Rising: %d\n", error); 136 + return error; 137 + } 138 + 139 + error = input_register_device(input_dev); 140 + if (error) { 141 + dev_err(dev, "Can't register power button: %d\n", error); 142 + return error; 143 + } 144 + 145 + platform_set_drvdata(pdev, onkey); 146 + device_init_wakeup(dev, true); 147 + 148 + return 0; 149 + } 150 + 151 + static int __maybe_unused stpmic1_onkey_suspend(struct device *dev) 152 + { 153 + struct platform_device *pdev = to_platform_device(dev); 154 + struct stpmic1_onkey *onkey = platform_get_drvdata(pdev); 155 + 156 + if (device_may_wakeup(dev)) { 157 + enable_irq_wake(onkey->irq_falling); 158 + enable_irq_wake(onkey->irq_rising); 159 + } 160 + return 0; 161 + } 162 + 163 + static int __maybe_unused stpmic1_onkey_resume(struct device *dev) 164 + { 165 + struct platform_device *pdev = to_platform_device(dev); 166 + struct stpmic1_onkey *onkey = platform_get_drvdata(pdev); 167 + 168 + if (device_may_wakeup(dev)) { 169 + disable_irq_wake(onkey->irq_falling); 170 + disable_irq_wake(onkey->irq_rising); 171 + } 172 + return 0; 173 + } 174 + 175 + static SIMPLE_DEV_PM_OPS(stpmic1_onkey_pm, 176 + stpmic1_onkey_suspend, 177 + stpmic1_onkey_resume); 178 + 179 + static const struct of_device_id of_stpmic1_onkey_match[] = { 180 + { .compatible = "st,stpmic1-onkey" }, 181 + { }, 182 + }; 183 + 184 + MODULE_DEVICE_TABLE(of, of_stpmic1_onkey_match); 185 + 186 + static struct platform_driver stpmic1_onkey_driver = { 187 + .probe = stpmic1_onkey_probe, 188 + .driver = { 189 + .name = "stpmic1_onkey", 190 + .of_match_table = of_match_ptr(of_stpmic1_onkey_match), 191 + .pm = &stpmic1_onkey_pm, 192 + }, 193 + }; 194 + module_platform_driver(stpmic1_onkey_driver); 195 + 196 + MODULE_DESCRIPTION("Onkey driver for STPMIC1"); 197 + MODULE_AUTHOR("Pascal Paillet <p.paillet@st.com>"); 198 + MODULE_LICENSE("GPL v2");
+2 -2
drivers/input/touchscreen/raspberrypi-ts.c
··· 147 147 return -ENOMEM; 148 148 ts->pdev = pdev; 149 149 150 - ts->fw_regs_va = dma_zalloc_coherent(dev, PAGE_SIZE, &ts->fw_regs_phys, 151 - GFP_KERNEL); 150 + ts->fw_regs_va = dma_alloc_coherent(dev, PAGE_SIZE, &ts->fw_regs_phys, 151 + GFP_KERNEL); 152 152 if (!ts->fw_regs_va) { 153 153 dev_err(dev, "failed to dma_alloc_coherent\n"); 154 154 return -ENOMEM;
+2 -3
drivers/iommu/mtk_iommu_v1.c
··· 232 232 233 233 spin_lock_init(&dom->pgtlock); 234 234 235 - dom->pgt_va = dma_zalloc_coherent(data->dev, 236 - M2701_IOMMU_PGT_SIZE, 237 - &dom->pgt_pa, GFP_KERNEL); 235 + dom->pgt_va = dma_alloc_coherent(data->dev, M2701_IOMMU_PGT_SIZE, 236 + &dom->pgt_pa, GFP_KERNEL); 238 237 if (!dom->pgt_va) 239 238 return -ENOMEM; 240 239
+42 -35
drivers/irqchip/irq-csky-apb-intc.c
··· 95 95 96 96 /* Setup 64 channel slots */ 97 97 for (i = 0; i < INTC_IRQS; i += 4) 98 - writel_relaxed(build_channel_val(i, magic), reg_addr + i); 98 + writel(build_channel_val(i, magic), reg_addr + i); 99 99 } 100 100 101 101 static int __init ··· 135 135 static inline bool handle_irq_perbit(struct pt_regs *regs, u32 hwirq, 136 136 u32 irq_base) 137 137 { 138 - u32 irq; 139 - 140 138 if (hwirq == 0) 141 139 return 0; 142 140 143 - while (hwirq) { 144 - irq = __ffs(hwirq); 145 - hwirq &= ~BIT(irq); 146 - handle_domain_irq(root_domain, irq_base + irq, regs); 147 - } 141 + handle_domain_irq(root_domain, irq_base + __fls(hwirq), regs); 148 142 149 143 return 1; 150 144 } ··· 148 154 { 149 155 bool ret; 150 156 151 - do { 152 - ret = handle_irq_perbit(regs, 153 - readl_relaxed(reg_base + GX_INTC_PEN31_00), 0); 154 - ret |= handle_irq_perbit(regs, 155 - readl_relaxed(reg_base + GX_INTC_PEN63_32), 32); 156 - } while (ret); 157 + retry: 158 + ret = handle_irq_perbit(regs, 159 + readl(reg_base + GX_INTC_PEN63_32), 32); 160 + if (ret) 161 + goto retry; 162 + 163 + ret = handle_irq_perbit(regs, 164 + readl(reg_base + GX_INTC_PEN31_00), 0); 165 + if (ret) 166 + goto retry; 157 167 } 158 168 159 169 static int __init ··· 172 174 /* 173 175 * Initial enable reg to disable all interrupts 174 176 */ 175 - writel_relaxed(0x0, reg_base + GX_INTC_NEN31_00); 176 - writel_relaxed(0x0, reg_base + GX_INTC_NEN63_32); 177 + writel(0x0, reg_base + GX_INTC_NEN31_00); 178 + writel(0x0, reg_base + GX_INTC_NEN63_32); 177 179 178 180 /* 179 181 * Initial mask reg with all unmasked, because we only use enalbe reg 180 182 */ 181 - writel_relaxed(0x0, reg_base + GX_INTC_NMASK31_00); 182 - writel_relaxed(0x0, reg_base + GX_INTC_NMASK63_32); 183 + writel(0x0, reg_base + GX_INTC_NMASK31_00); 184 + writel(0x0, reg_base + GX_INTC_NMASK63_32); 183 185 184 186 setup_irq_channel(0x03020100, reg_base + GX_INTC_SOURCE); 185 187 ··· 202 204 void __iomem *reg_pen_lo = reg_base + CK_INTC_PEN31_00; 203 205 void __iomem *reg_pen_hi = reg_base + CK_INTC_PEN63_32; 204 206 205 - do { 206 - /* handle 0 - 31 irqs */ 207 - ret = handle_irq_perbit(regs, readl_relaxed(reg_pen_lo), 0); 208 - ret |= handle_irq_perbit(regs, readl_relaxed(reg_pen_hi), 32); 207 + retry: 208 + /* handle 0 - 63 irqs */ 209 + ret = handle_irq_perbit(regs, readl(reg_pen_hi), 32); 210 + if (ret) 211 + goto retry; 209 212 210 - if (nr_irq == INTC_IRQS) 211 - continue; 213 + ret = handle_irq_perbit(regs, readl(reg_pen_lo), 0); 214 + if (ret) 215 + goto retry; 212 216 213 - /* handle 64 - 127 irqs */ 214 - ret |= handle_irq_perbit(regs, 215 - readl_relaxed(reg_pen_lo + CK_INTC_DUAL_BASE), 64); 216 - ret |= handle_irq_perbit(regs, 217 - readl_relaxed(reg_pen_hi + CK_INTC_DUAL_BASE), 96); 218 - } while (ret); 217 + if (nr_irq == INTC_IRQS) 218 + return; 219 + 220 + /* handle 64 - 127 irqs */ 221 + ret = handle_irq_perbit(regs, 222 + readl(reg_pen_hi + CK_INTC_DUAL_BASE), 96); 223 + if (ret) 224 + goto retry; 225 + 226 + ret = handle_irq_perbit(regs, 227 + readl(reg_pen_lo + CK_INTC_DUAL_BASE), 64); 228 + if (ret) 229 + goto retry; 219 230 } 220 231 221 232 static int __init ··· 237 230 return ret; 238 231 239 232 /* Initial enable reg to disable all interrupts */ 240 - writel_relaxed(0, reg_base + CK_INTC_NEN31_00); 241 - writel_relaxed(0, reg_base + CK_INTC_NEN63_32); 233 + writel(0, reg_base + CK_INTC_NEN31_00); 234 + writel(0, reg_base + CK_INTC_NEN63_32); 242 235 243 236 /* Enable irq intc */ 244 - writel_relaxed(BIT(31), reg_base + CK_INTC_ICR); 237 + writel(BIT(31), reg_base + CK_INTC_ICR); 245 238 246 239 ck_set_gc(node, reg_base, CK_INTC_NEN31_00, 0); 247 240 ck_set_gc(node, reg_base, CK_INTC_NEN63_32, 32); ··· 267 260 return ret; 268 261 269 262 /* Initial enable reg to disable all interrupts */ 270 - writel_relaxed(0, reg_base + CK_INTC_NEN31_00 + CK_INTC_DUAL_BASE); 271 - writel_relaxed(0, reg_base + CK_INTC_NEN63_32 + CK_INTC_DUAL_BASE); 263 + writel(0, reg_base + CK_INTC_NEN31_00 + CK_INTC_DUAL_BASE); 264 + writel(0, reg_base + CK_INTC_NEN63_32 + CK_INTC_DUAL_BASE); 272 265 273 266 ck_set_gc(node, reg_base + CK_INTC_DUAL_BASE, CK_INTC_NEN31_00, 64); 274 267 ck_set_gc(node, reg_base + CK_INTC_DUAL_BASE, CK_INTC_NEN63_32, 96);
+1 -1
drivers/isdn/hardware/avm/b1.c
··· 423 423 int i, j; 424 424 425 425 for (j = 0; j < AVM_MAXVERSION; j++) 426 - cinfo->version[j] = "\0\0" + 1; 426 + cinfo->version[j] = ""; 427 427 for (i = 0, j = 0; 428 428 j < AVM_MAXVERSION && i < cinfo->versionlen; 429 429 j++, i += cinfo->versionbuf[i] + 1)
+1 -2
drivers/isdn/hardware/mISDN/hfcsusb.c
··· 262 262 struct dchannel *dch = &hw->dch; 263 263 int i; 264 264 265 - phi = kzalloc(sizeof(struct ph_info) + 266 - dch->dev.nrbchan * sizeof(struct ph_info_ch), GFP_ATOMIC); 265 + phi = kzalloc(struct_size(phi, bch, dch->dev.nrbchan), GFP_ATOMIC); 267 266 phi->dch.ch.protocol = hw->protocol; 268 267 phi->dch.ch.Flags = dch->Flags; 269 268 phi->dch.state = dch->state;
+5 -1
drivers/isdn/i4l/isdn_tty.c
··· 1437 1437 { 1438 1438 modem_info *info = (modem_info *) tty->driver_data; 1439 1439 1440 + mutex_lock(&modem_info_mutex); 1440 1441 if (!old_termios) 1441 1442 isdn_tty_change_speed(info); 1442 1443 else { 1443 1444 if (tty->termios.c_cflag == old_termios->c_cflag && 1444 1445 tty->termios.c_ispeed == old_termios->c_ispeed && 1445 - tty->termios.c_ospeed == old_termios->c_ospeed) 1446 + tty->termios.c_ospeed == old_termios->c_ospeed) { 1447 + mutex_unlock(&modem_info_mutex); 1446 1448 return; 1449 + } 1447 1450 isdn_tty_change_speed(info); 1448 1451 } 1452 + mutex_unlock(&modem_info_mutex); 1449 1453 } 1450 1454 1451 1455 /*
+3 -1
drivers/leds/leds-lp5523.c
··· 318 318 319 319 /* Let the programs run for couple of ms and check the engine status */ 320 320 usleep_range(3000, 6000); 321 - lp55xx_read(chip, LP5523_REG_STATUS, &status); 321 + ret = lp55xx_read(chip, LP5523_REG_STATUS, &status); 322 + if (ret) 323 + return ret; 322 324 status &= LP5523_ENG_STATUS_MASK; 323 325 324 326 if (status != LP5523_ENG_STATUS_MASK) {
+1 -6
drivers/md/md.c
··· 207 207 struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs, 208 208 struct mddev *mddev) 209 209 { 210 - struct bio *b; 211 - 212 210 if (!mddev || !bioset_initialized(&mddev->bio_set)) 213 211 return bio_alloc(gfp_mask, nr_iovecs); 214 212 215 - b = bio_alloc_bioset(gfp_mask, nr_iovecs, &mddev->bio_set); 216 - if (!b) 217 - return NULL; 218 - return b; 213 + return bio_alloc_bioset(gfp_mask, nr_iovecs, &mddev->bio_set); 219 214 } 220 215 EXPORT_SYMBOL_GPL(bio_alloc_mddev); 221 216
+2 -2
drivers/media/pci/intel/ipu3/ipu3-cio2.c
··· 218 218 { 219 219 struct device *dev = &cio2->pci_dev->dev; 220 220 221 - q->fbpt = dma_zalloc_coherent(dev, CIO2_FBPT_SIZE, &q->fbpt_bus_addr, 222 - GFP_KERNEL); 221 + q->fbpt = dma_alloc_coherent(dev, CIO2_FBPT_SIZE, &q->fbpt_bus_addr, 222 + GFP_KERNEL); 223 223 if (!q->fbpt) 224 224 return -ENOMEM; 225 225
+1 -1
drivers/media/platform/mtk-vcodec/mtk_vcodec_util.c
··· 49 49 struct mtk_vcodec_ctx *ctx = (struct mtk_vcodec_ctx *)data; 50 50 struct device *dev = &ctx->dev->plat_dev->dev; 51 51 52 - mem->va = dma_zalloc_coherent(dev, size, &mem->dma_addr, GFP_KERNEL); 52 + mem->va = dma_alloc_coherent(dev, size, &mem->dma_addr, GFP_KERNEL); 53 53 if (!mem->va) { 54 54 mtk_v4l2_err("%s dma_alloc size=%ld failed!", dev_name(dev), 55 55 size);
+3 -1
drivers/media/platform/vim2m.c
··· 807 807 struct vb2_v4l2_buffer *vbuf; 808 808 unsigned long flags; 809 809 810 - cancel_delayed_work_sync(&dev->work_run); 810 + if (v4l2_m2m_get_curr_priv(dev->m2m_dev) == ctx) 811 + cancel_delayed_work_sync(&dev->work_run); 812 + 811 813 for (;;) { 812 814 if (V4L2_TYPE_IS_OUTPUT(q->type)) 813 815 vbuf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
+19 -5
drivers/media/v4l2-core/v4l2-ioctl.c
··· 287 287 const struct v4l2_window *win; 288 288 const struct v4l2_sdr_format *sdr; 289 289 const struct v4l2_meta_format *meta; 290 + u32 planes; 290 291 unsigned i; 291 292 292 293 pr_cont("type=%s", prt_names(p->type, v4l2_type_names)); ··· 318 317 prt_names(mp->field, v4l2_field_names), 319 318 mp->colorspace, mp->num_planes, mp->flags, 320 319 mp->ycbcr_enc, mp->quantization, mp->xfer_func); 321 - for (i = 0; i < mp->num_planes; i++) 320 + planes = min_t(u32, mp->num_planes, VIDEO_MAX_PLANES); 321 + for (i = 0; i < planes; i++) 322 322 printk(KERN_DEBUG "plane %u: bytesperline=%u sizeimage=%u\n", i, 323 323 mp->plane_fmt[i].bytesperline, 324 324 mp->plane_fmt[i].sizeimage); ··· 1553 1551 if (unlikely(!ops->vidioc_s_fmt_vid_cap_mplane)) 1554 1552 break; 1555 1553 CLEAR_AFTER_FIELD(p, fmt.pix_mp.xfer_func); 1554 + if (p->fmt.pix_mp.num_planes > VIDEO_MAX_PLANES) 1555 + break; 1556 1556 for (i = 0; i < p->fmt.pix_mp.num_planes; i++) 1557 - CLEAR_AFTER_FIELD(p, fmt.pix_mp.plane_fmt[i].bytesperline); 1557 + CLEAR_AFTER_FIELD(&p->fmt.pix_mp.plane_fmt[i], 1558 + bytesperline); 1558 1559 return ops->vidioc_s_fmt_vid_cap_mplane(file, fh, arg); 1559 1560 case V4L2_BUF_TYPE_VIDEO_OVERLAY: 1560 1561 if (unlikely(!ops->vidioc_s_fmt_vid_overlay)) ··· 1586 1581 if (unlikely(!ops->vidioc_s_fmt_vid_out_mplane)) 1587 1582 break; 1588 1583 CLEAR_AFTER_FIELD(p, fmt.pix_mp.xfer_func); 1584 + if (p->fmt.pix_mp.num_planes > VIDEO_MAX_PLANES) 1585 + break; 1589 1586 for (i = 0; i < p->fmt.pix_mp.num_planes; i++) 1590 - CLEAR_AFTER_FIELD(p, fmt.pix_mp.plane_fmt[i].bytesperline); 1587 + CLEAR_AFTER_FIELD(&p->fmt.pix_mp.plane_fmt[i], 1588 + bytesperline); 1591 1589 return ops->vidioc_s_fmt_vid_out_mplane(file, fh, arg); 1592 1590 case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY: 1593 1591 if (unlikely(!ops->vidioc_s_fmt_vid_out_overlay)) ··· 1656 1648 if (unlikely(!ops->vidioc_try_fmt_vid_cap_mplane)) 1657 1649 break; 1658 1650 CLEAR_AFTER_FIELD(p, fmt.pix_mp.xfer_func); 1651 + if (p->fmt.pix_mp.num_planes > VIDEO_MAX_PLANES) 1652 + break; 1659 1653 for (i = 0; i < p->fmt.pix_mp.num_planes; i++) 1660 - CLEAR_AFTER_FIELD(p, fmt.pix_mp.plane_fmt[i].bytesperline); 1654 + CLEAR_AFTER_FIELD(&p->fmt.pix_mp.plane_fmt[i], 1655 + bytesperline); 1661 1656 return ops->vidioc_try_fmt_vid_cap_mplane(file, fh, arg); 1662 1657 case V4L2_BUF_TYPE_VIDEO_OVERLAY: 1663 1658 if (unlikely(!ops->vidioc_try_fmt_vid_overlay)) ··· 1689 1678 if (unlikely(!ops->vidioc_try_fmt_vid_out_mplane)) 1690 1679 break; 1691 1680 CLEAR_AFTER_FIELD(p, fmt.pix_mp.xfer_func); 1681 + if (p->fmt.pix_mp.num_planes > VIDEO_MAX_PLANES) 1682 + break; 1692 1683 for (i = 0; i < p->fmt.pix_mp.num_planes; i++) 1693 - CLEAR_AFTER_FIELD(p, fmt.pix_mp.plane_fmt[i].bytesperline); 1684 + CLEAR_AFTER_FIELD(&p->fmt.pix_mp.plane_fmt[i], 1685 + bytesperline); 1694 1686 return ops->vidioc_try_fmt_vid_out_mplane(file, fh, arg); 1695 1687 case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY: 1696 1688 if (unlikely(!ops->vidioc_try_fmt_vid_out_overlay))
+17 -1
drivers/mfd/Kconfig
··· 102 102 config MFD_AT91_USART 103 103 tristate "AT91 USART Driver" 104 104 select MFD_CORE 105 + depends on ARCH_AT91 || COMPILE_TEST 105 106 help 106 107 Select this to get support for AT91 USART IP. This is a wrapper 107 108 over at91-usart-serial driver and usart-spi-driver. Only one function ··· 215 214 config MFD_CROS_EC_CHARDEV 216 215 tristate "Chrome OS Embedded Controller userspace device interface" 217 216 depends on MFD_CROS_EC 218 - select CROS_EC_CTL 219 217 ---help--- 220 218 This driver adds support to talk with the ChromeOS EC from userspace. 221 219 ··· 1871 1871 Select this option to enable STM32 timers driver used 1872 1872 for PWM and IIO Timer. This driver allow to share the 1873 1873 registers between the others drivers. 1874 + 1875 + config MFD_STPMIC1 1876 + tristate "Support for STPMIC1 PMIC" 1877 + depends on (I2C=y && OF) 1878 + select REGMAP_I2C 1879 + select REGMAP_IRQ 1880 + select MFD_CORE 1881 + help 1882 + Support for ST Microelectronics STPMIC1 PMIC. STPMIC1 has power on 1883 + key, watchdog and regulator functionalities which are supported via 1884 + the relevant subsystems. This driver provides core support for the 1885 + STPMIC1. In order to use the actual functionaltiy of the device other 1886 + drivers must be enabled. 1887 + 1888 + To compile this driver as a module, choose M here: the 1889 + module will be called stpmic1. 1874 1890 1875 1891 menu "Multimedia Capabilities Port drivers" 1876 1892 depends on ARCH_SA1100
+1
drivers/mfd/Makefile
··· 233 233 obj-$(CONFIG_MFD_MT6397) += mt6397-core.o 234 234 235 235 obj-$(CONFIG_MFD_ALTERA_A10SR) += altera-a10sr.o 236 + obj-$(CONFIG_MFD_STPMIC1) += stpmic1.o 236 237 obj-$(CONFIG_MFD_SUN4I_GPADC) += sun4i-gpadc.o 237 238 238 239 obj-$(CONFIG_MFD_STM32_LPTIMER) += stm32-lptimer.o
+1 -1
drivers/mfd/ab8500-core.c
··· 261 261 mutex_unlock(&ab8500->lock); 262 262 dev_vdbg(ab8500->dev, "rd: addr %#x => data %#x\n", addr, ret); 263 263 264 - return ret; 264 + return (ret < 0) ? ret : 0; 265 265 } 266 266 267 267 static int ab8500_get_register(struct device *dev, u8 bank,
+70 -56
drivers/mfd/axp20x.c
··· 641 641 642 642 static const struct mfd_cell axp223_cells[] = { 643 643 { 644 - .name = "axp221-pek", 645 - .num_resources = ARRAY_SIZE(axp22x_pek_resources), 646 - .resources = axp22x_pek_resources, 644 + .name = "axp221-pek", 645 + .num_resources = ARRAY_SIZE(axp22x_pek_resources), 646 + .resources = axp22x_pek_resources, 647 647 }, { 648 648 .name = "axp22x-adc", 649 649 .of_compatible = "x-powers,axp221-adc", ··· 651 651 .name = "axp20x-battery-power-supply", 652 652 .of_compatible = "x-powers,axp221-battery-power-supply", 653 653 }, { 654 - .name = "axp20x-regulator", 654 + .name = "axp20x-regulator", 655 655 }, { 656 656 .name = "axp20x-ac-power-supply", 657 657 .of_compatible = "x-powers,axp221-ac-power-supply", ··· 667 667 668 668 static const struct mfd_cell axp152_cells[] = { 669 669 { 670 - .name = "axp20x-pek", 671 - .num_resources = ARRAY_SIZE(axp152_pek_resources), 672 - .resources = axp152_pek_resources, 670 + .name = "axp20x-pek", 671 + .num_resources = ARRAY_SIZE(axp152_pek_resources), 672 + .resources = axp152_pek_resources, 673 673 }, 674 674 }; 675 675 ··· 698 698 699 699 static const struct mfd_cell axp288_cells[] = { 700 700 { 701 - .name = "axp288_adc", 702 - .num_resources = ARRAY_SIZE(axp288_adc_resources), 703 - .resources = axp288_adc_resources, 704 - }, 705 - { 706 - .name = "axp288_extcon", 707 - .num_resources = ARRAY_SIZE(axp288_extcon_resources), 708 - .resources = axp288_extcon_resources, 709 - }, 710 - { 711 - .name = "axp288_charger", 712 - .num_resources = ARRAY_SIZE(axp288_charger_resources), 713 - .resources = axp288_charger_resources, 714 - }, 715 - { 716 - .name = "axp288_fuel_gauge", 717 - .num_resources = ARRAY_SIZE(axp288_fuel_gauge_resources), 718 - .resources = axp288_fuel_gauge_resources, 719 - }, 720 - { 721 - .name = "axp221-pek", 722 - .num_resources = ARRAY_SIZE(axp288_power_button_resources), 723 - .resources = axp288_power_button_resources, 724 - }, 725 - { 726 - .name = "axp288_pmic_acpi", 701 + .name = "axp288_adc", 702 + .num_resources = ARRAY_SIZE(axp288_adc_resources), 703 + .resources = axp288_adc_resources, 704 + }, { 705 + .name = "axp288_extcon", 706 + .num_resources = ARRAY_SIZE(axp288_extcon_resources), 707 + .resources = axp288_extcon_resources, 708 + }, { 709 + .name = "axp288_charger", 710 + .num_resources = ARRAY_SIZE(axp288_charger_resources), 711 + .resources = axp288_charger_resources, 712 + }, { 713 + .name = "axp288_fuel_gauge", 714 + .num_resources = ARRAY_SIZE(axp288_fuel_gauge_resources), 715 + .resources = axp288_fuel_gauge_resources, 716 + }, { 717 + .name = "axp221-pek", 718 + .num_resources = ARRAY_SIZE(axp288_power_button_resources), 719 + .resources = axp288_power_button_resources, 720 + }, { 721 + .name = "axp288_pmic_acpi", 727 722 }, 728 723 }; 729 724 730 725 static const struct mfd_cell axp803_cells[] = { 731 726 { 732 - .name = "axp221-pek", 733 - .num_resources = ARRAY_SIZE(axp803_pek_resources), 734 - .resources = axp803_pek_resources, 727 + .name = "axp221-pek", 728 + .num_resources = ARRAY_SIZE(axp803_pek_resources), 729 + .resources = axp803_pek_resources, 730 + }, { 731 + .name = "axp20x-gpio", 732 + .of_compatible = "x-powers,axp813-gpio", 733 + }, { 734 + .name = "axp813-adc", 735 + .of_compatible = "x-powers,axp813-adc", 736 + }, { 737 + .name = "axp20x-battery-power-supply", 738 + .of_compatible = "x-powers,axp813-battery-power-supply", 739 + }, { 740 + .name = "axp20x-ac-power-supply", 741 + .of_compatible = "x-powers,axp813-ac-power-supply", 742 + .num_resources = ARRAY_SIZE(axp20x_ac_power_supply_resources), 743 + .resources = axp20x_ac_power_supply_resources, 735 744 }, 736 - { .name = "axp20x-regulator" }, 745 + { .name = "axp20x-regulator" }, 737 746 }; 738 747 739 748 static const struct mfd_cell axp806_self_working_cells[] = { 740 749 { 741 - .name = "axp221-pek", 742 - .num_resources = ARRAY_SIZE(axp806_pek_resources), 743 - .resources = axp806_pek_resources, 750 + .name = "axp221-pek", 751 + .num_resources = ARRAY_SIZE(axp806_pek_resources), 752 + .resources = axp806_pek_resources, 744 753 }, 745 - { .name = "axp20x-regulator" }, 754 + { .name = "axp20x-regulator" }, 746 755 }; 747 756 748 757 static const struct mfd_cell axp806_cells[] = { 749 758 { 750 - .id = 2, 751 - .name = "axp20x-regulator", 759 + .id = 2, 760 + .name = "axp20x-regulator", 752 761 }, 753 762 }; 754 763 755 764 static const struct mfd_cell axp809_cells[] = { 756 765 { 757 - .name = "axp221-pek", 758 - .num_resources = ARRAY_SIZE(axp809_pek_resources), 759 - .resources = axp809_pek_resources, 766 + .name = "axp221-pek", 767 + .num_resources = ARRAY_SIZE(axp809_pek_resources), 768 + .resources = axp809_pek_resources, 760 769 }, { 761 - .id = 1, 762 - .name = "axp20x-regulator", 770 + .id = 1, 771 + .name = "axp20x-regulator", 763 772 }, 764 773 }; 765 774 766 775 static const struct mfd_cell axp813_cells[] = { 767 776 { 768 - .name = "axp221-pek", 769 - .num_resources = ARRAY_SIZE(axp803_pek_resources), 770 - .resources = axp803_pek_resources, 777 + .name = "axp221-pek", 778 + .num_resources = ARRAY_SIZE(axp803_pek_resources), 779 + .resources = axp803_pek_resources, 771 780 }, { 772 - .name = "axp20x-regulator", 781 + .name = "axp20x-regulator", 773 782 }, { 774 - .name = "axp20x-gpio", 775 - .of_compatible = "x-powers,axp813-gpio", 783 + .name = "axp20x-gpio", 784 + .of_compatible = "x-powers,axp813-gpio", 776 785 }, { 777 - .name = "axp813-adc", 778 - .of_compatible = "x-powers,axp813-adc", 786 + .name = "axp813-adc", 787 + .of_compatible = "x-powers,axp813-adc", 779 788 }, { 780 789 .name = "axp20x-battery-power-supply", 781 790 .of_compatible = "x-powers,axp813-battery-power-supply", 791 + }, { 792 + .name = "axp20x-ac-power-supply", 793 + .of_compatible = "x-powers,axp813-ac-power-supply", 794 + .num_resources = ARRAY_SIZE(axp20x_ac_power_supply_resources), 795 + .resources = axp20x_ac_power_supply_resources, 782 796 }, 783 797 }; 784 798
+1
drivers/mfd/bd9571mwv.c
··· 59 59 }; 60 60 61 61 static const struct regmap_range bd9571mwv_volatile_yes_ranges[] = { 62 + regmap_reg_range(BD9571MWV_DVFS_MONIVDAC, BD9571MWV_DVFS_MONIVDAC), 62 63 regmap_reg_range(BD9571MWV_GPIO_IN, BD9571MWV_GPIO_IN), 63 64 regmap_reg_range(BD9571MWV_GPIO_INT, BD9571MWV_GPIO_INT), 64 65 regmap_reg_range(BD9571MWV_INT_INTREQ, BD9571MWV_INT_INTREQ),
+3 -11
drivers/mfd/cros_ec.c
··· 129 129 } 130 130 } 131 131 132 - err = mfd_add_devices(ec_dev->dev, PLATFORM_DEVID_AUTO, &ec_cell, 1, 133 - NULL, ec_dev->irq, NULL); 132 + err = devm_mfd_add_devices(ec_dev->dev, PLATFORM_DEVID_AUTO, &ec_cell, 133 + 1, NULL, ec_dev->irq, NULL); 134 134 if (err) { 135 135 dev_err(dev, 136 136 "Failed to register Embedded Controller subdevice %d\n", ··· 147 147 * - the EC is responsive at init time (it is not true for a 148 148 * sensor hub. 149 149 */ 150 - err = mfd_add_devices(ec_dev->dev, PLATFORM_DEVID_AUTO, 150 + err = devm_mfd_add_devices(ec_dev->dev, PLATFORM_DEVID_AUTO, 151 151 &ec_pd_cell, 1, NULL, ec_dev->irq, NULL); 152 152 if (err) { 153 153 dev_err(dev, ··· 180 180 return 0; 181 181 } 182 182 EXPORT_SYMBOL(cros_ec_register); 183 - 184 - int cros_ec_remove(struct cros_ec_device *ec_dev) 185 - { 186 - mfd_remove_devices(ec_dev->dev); 187 - 188 - return 0; 189 - } 190 - EXPORT_SYMBOL(cros_ec_remove); 191 183 192 184 #ifdef CONFIG_PM_SLEEP 193 185 int cros_ec_suspend(struct cros_ec_device *ec_dev)
+33 -57
drivers/mfd/cros_ec_dev.c
··· 21 21 #include <linux/mfd/core.h> 22 22 #include <linux/module.h> 23 23 #include <linux/mod_devicetable.h> 24 + #include <linux/of_platform.h> 24 25 #include <linux/platform_device.h> 25 26 #include <linux/pm.h> 26 27 #include <linux/slab.h> ··· 35 34 #define CROS_MAX_DEV 128 36 35 static int ec_major; 37 36 38 - static const struct attribute_group *cros_ec_groups[] = { 39 - &cros_ec_attr_group, 40 - &cros_ec_lightbar_attr_group, 41 - &cros_ec_vbc_attr_group, 42 - NULL, 43 - }; 44 - 45 37 static struct class cros_class = { 46 38 .owner = THIS_MODULE, 47 39 .name = "chromeos", 48 - .dev_groups = cros_ec_groups, 49 40 }; 50 41 51 42 /* Basic communication */ ··· 388 395 { .name = "cros-usbpd-charger" } 389 396 }; 390 397 398 + static const struct mfd_cell cros_ec_platform_cells[] = { 399 + { .name = "cros-ec-debugfs" }, 400 + { .name = "cros-ec-lightbar" }, 401 + { .name = "cros-ec-sysfs" }, 402 + }; 403 + 404 + static const struct mfd_cell cros_ec_vbc_cells[] = { 405 + { .name = "cros-ec-vbc" } 406 + }; 407 + 391 408 static int ec_device_probe(struct platform_device *pdev) 392 409 { 393 410 int retval = -ENOMEM; 411 + struct device_node *node; 394 412 struct device *dev = &pdev->dev; 395 413 struct cros_ec_platform *ec_platform = dev_get_platdata(dev); 396 414 struct cros_ec_dev *ec = kzalloc(sizeof(*ec), GFP_KERNEL); ··· 474 470 retval); 475 471 } 476 472 477 - /* Take control of the lightbar from the EC. */ 478 - lb_manual_suspend_ctrl(ec, 1); 479 - 480 473 /* We can now add the sysfs class, we know which parameter to show */ 481 474 retval = cdev_device_add(&ec->cdev, &ec->class_dev); 482 475 if (retval) { ··· 481 480 goto failed; 482 481 } 483 482 484 - if (cros_ec_debugfs_init(ec)) 485 - dev_warn(dev, "failed to create debugfs directory\n"); 483 + retval = mfd_add_devices(ec->dev, PLATFORM_DEVID_AUTO, 484 + cros_ec_platform_cells, 485 + ARRAY_SIZE(cros_ec_platform_cells), 486 + NULL, 0, NULL); 487 + if (retval) 488 + dev_warn(ec->dev, 489 + "failed to add cros-ec platform devices: %d\n", 490 + retval); 491 + 492 + /* Check whether this EC instance has a VBC NVRAM */ 493 + node = ec->ec_dev->dev->of_node; 494 + if (of_property_read_bool(node, "google,has-vbc-nvram")) { 495 + retval = mfd_add_devices(ec->dev, PLATFORM_DEVID_AUTO, 496 + cros_ec_vbc_cells, 497 + ARRAY_SIZE(cros_ec_vbc_cells), 498 + NULL, 0, NULL); 499 + if (retval) 500 + dev_warn(ec->dev, "failed to add VBC devices: %d\n", 501 + retval); 502 + } 486 503 487 504 return 0; 488 505 ··· 513 494 { 514 495 struct cros_ec_dev *ec = dev_get_drvdata(&pdev->dev); 515 496 516 - /* Let the EC take over the lightbar again. */ 517 - lb_manual_suspend_ctrl(ec, 0); 518 - 519 - cros_ec_debugfs_remove(ec); 520 - 497 + mfd_remove_devices(ec->dev); 521 498 cdev_del(&ec->cdev); 522 499 device_unregister(&ec->class_dev); 523 500 return 0; 524 - } 525 - 526 - static void ec_device_shutdown(struct platform_device *pdev) 527 - { 528 - struct cros_ec_dev *ec = dev_get_drvdata(&pdev->dev); 529 - 530 - /* Be sure to clear up debugfs delayed works */ 531 - cros_ec_debugfs_remove(ec); 532 501 } 533 502 534 503 static const struct platform_device_id cros_ec_id[] = { ··· 525 518 }; 526 519 MODULE_DEVICE_TABLE(platform, cros_ec_id); 527 520 528 - static __maybe_unused int ec_device_suspend(struct device *dev) 529 - { 530 - struct cros_ec_dev *ec = dev_get_drvdata(dev); 531 - 532 - cros_ec_debugfs_suspend(ec); 533 - 534 - lb_suspend(ec); 535 - 536 - return 0; 537 - } 538 - 539 - static __maybe_unused int ec_device_resume(struct device *dev) 540 - { 541 - struct cros_ec_dev *ec = dev_get_drvdata(dev); 542 - 543 - cros_ec_debugfs_resume(ec); 544 - 545 - lb_resume(ec); 546 - 547 - return 0; 548 - } 549 - 550 - static const struct dev_pm_ops cros_ec_dev_pm_ops = { 551 - #ifdef CONFIG_PM_SLEEP 552 - .suspend = ec_device_suspend, 553 - .resume = ec_device_resume, 554 - #endif 555 - }; 556 - 557 521 static struct platform_driver cros_ec_dev_driver = { 558 522 .driver = { 559 523 .name = DRV_NAME, 560 - .pm = &cros_ec_dev_pm_ops, 561 524 }, 562 525 .id_table = cros_ec_id, 563 526 .probe = ec_device_probe, 564 527 .remove = ec_device_remove, 565 - .shutdown = ec_device_shutdown, 566 528 }; 567 529 568 530 static int __init cros_ec_dev_init(void)
-6
drivers/mfd/cros_ec_dev.h
··· 44 44 #define CROS_EC_DEV_IOCXCMD _IOWR(CROS_EC_DEV_IOC, 0, struct cros_ec_command) 45 45 #define CROS_EC_DEV_IOCRDMEM _IOWR(CROS_EC_DEV_IOC, 1, struct cros_ec_readmem) 46 46 47 - /* Lightbar utilities */ 48 - extern bool ec_has_lightbar(struct cros_ec_dev *ec); 49 - extern int lb_manual_suspend_ctrl(struct cros_ec_dev *ec, uint8_t enable); 50 - extern int lb_suspend(struct cros_ec_dev *ec); 51 - extern int lb_resume(struct cros_ec_dev *ec); 52 - 53 47 #endif /* _CROS_EC_DEV_H_ */
+2 -2
drivers/mfd/db8500-prcmu.c
··· 2584 2584 .irq_unmask = prcmu_irq_unmask, 2585 2585 }; 2586 2586 2587 - static __init char *fw_project_name(u32 project) 2587 + static char *fw_project_name(u32 project) 2588 2588 { 2589 2589 switch (project) { 2590 2590 case PRCMU_FW_PROJECT_U8500: ··· 2732 2732 INIT_WORK(&mb0_transfer.mask_work, prcmu_mask_work); 2733 2733 } 2734 2734 2735 - static void __init init_prcm_registers(void) 2735 + static void init_prcm_registers(void) 2736 2736 { 2737 2737 u32 val; 2738 2738
+3 -1
drivers/mfd/exynos-lpass.c
··· 82 82 LPASS_INTR_SFR | LPASS_INTR_DMA | LPASS_INTR_I2S); 83 83 84 84 regmap_write(lpass->top, SFR_LPASS_INTR_CPU_MASK, 85 - LPASS_INTR_SFR | LPASS_INTR_DMA | LPASS_INTR_I2S); 85 + LPASS_INTR_SFR | LPASS_INTR_DMA | LPASS_INTR_I2S | 86 + LPASS_INTR_UART); 86 87 87 88 exynos_lpass_core_sw_reset(lpass, LPASS_I2S_SW_RESET); 88 89 exynos_lpass_core_sw_reset(lpass, LPASS_DMA_SW_RESET); 89 90 exynos_lpass_core_sw_reset(lpass, LPASS_MEM_SW_RESET); 91 + exynos_lpass_core_sw_reset(lpass, LPASS_UART_SW_RESET); 90 92 } 91 93 92 94 static void exynos_lpass_disable(struct exynos_lpass *lpass)
+4 -1
drivers/mfd/madera-core.c
··· 15 15 #include <linux/gpio.h> 16 16 #include <linux/mfd/core.h> 17 17 #include <linux/module.h> 18 + #include <linux/mutex.h> 18 19 #include <linux/notifier.h> 19 20 #include <linux/of.h> 20 21 #include <linux/of_gpio.h> ··· 156 155 usleep_range(MADERA_BOOT_POLL_INTERVAL_USEC / 2, 157 156 MADERA_BOOT_POLL_INTERVAL_USEC); 158 157 regmap_read(madera->regmap, MADERA_IRQ1_RAW_STATUS_1, &val); 159 - }; 158 + } 160 159 161 160 if (!(val & MADERA_BOOT_DONE_STS1)) { 162 161 dev_err(madera->dev, "Polling BOOT_DONE_STS timed out\n"); ··· 358 357 359 358 dev_set_drvdata(madera->dev, madera); 360 359 BLOCKING_INIT_NOTIFIER_HEAD(&madera->notifier); 360 + mutex_init(&madera->dapm_ptr_lock); 361 + 361 362 madera_set_micbias_info(madera); 362 363 363 364 /*
+1 -1
drivers/mfd/max77620.c
··· 280 280 281 281 for (fps_id = 0; fps_id < MAX77620_FPS_COUNT; fps_id++) { 282 282 sprintf(fps_name, "fps%d", fps_id); 283 - if (!strcmp(fps_np->name, fps_name)) 283 + if (of_node_name_eq(fps_np, fps_name)) 284 284 break; 285 285 } 286 286
+3 -1
drivers/mfd/mc13xxx-core.c
··· 274 274 275 275 mc13xxx->adcflags |= MC13XXX_ADC_WORKING; 276 276 277 - mc13xxx_reg_read(mc13xxx, MC13XXX_ADC0, &old_adc0); 277 + ret = mc13xxx_reg_read(mc13xxx, MC13XXX_ADC0, &old_adc0); 278 + if (ret) 279 + goto out; 278 280 279 281 adc0 = MC13XXX_ADC0_ADINC1 | MC13XXX_ADC0_ADINC2 | 280 282 MC13XXX_ADC0_CHRGRAWDIV;
+1 -2
drivers/mfd/mt6397-core.c
··· 329 329 330 330 default: 331 331 dev_err(&pdev->dev, "unsupported chip: %d\n", id); 332 - ret = -ENODEV; 333 - break; 332 + return -ENODEV; 334 333 } 335 334 336 335 if (ret) {
+4
drivers/mfd/qcom_rpm.c
··· 638 638 return -EFAULT; 639 639 } 640 640 641 + writel(fw_version[0], RPM_CTRL_REG(rpm, 0)); 642 + writel(fw_version[1], RPM_CTRL_REG(rpm, 1)); 643 + writel(fw_version[2], RPM_CTRL_REG(rpm, 2)); 644 + 641 645 dev_info(&pdev->dev, "RPM firmware %u.%u.%u\n", fw_version[0], 642 646 fw_version[1], 643 647 fw_version[2]);
+1 -1
drivers/mfd/rave-sp.c
··· 109 109 /** 110 110 * struct rave_sp_checksum - Variant specific checksum implementation details 111 111 * 112 - * @length: Caculated checksum length 112 + * @length: Calculated checksum length 113 113 * @subroutine: Utilized checksum algorithm implementation 114 114 */ 115 115 struct rave_sp_checksum {
+6 -6
drivers/mfd/stmpe.c
··· 1358 1358 pdata->autosleep = (pdata->autosleep_timeout) ? true : false; 1359 1359 1360 1360 for_each_child_of_node(np, child) { 1361 - if (!strcmp(child->name, "stmpe_gpio")) { 1361 + if (of_node_name_eq(child, "stmpe_gpio")) { 1362 1362 pdata->blocks |= STMPE_BLOCK_GPIO; 1363 - } else if (!strcmp(child->name, "stmpe_keypad")) { 1363 + } else if (of_node_name_eq(child, "stmpe_keypad")) { 1364 1364 pdata->blocks |= STMPE_BLOCK_KEYPAD; 1365 - } else if (!strcmp(child->name, "stmpe_touchscreen")) { 1365 + } else if (of_node_name_eq(child, "stmpe_touchscreen")) { 1366 1366 pdata->blocks |= STMPE_BLOCK_TOUCHSCREEN; 1367 - } else if (!strcmp(child->name, "stmpe_adc")) { 1367 + } else if (of_node_name_eq(child, "stmpe_adc")) { 1368 1368 pdata->blocks |= STMPE_BLOCK_ADC; 1369 - } else if (!strcmp(child->name, "stmpe_pwm")) { 1369 + } else if (of_node_name_eq(child, "stmpe_pwm")) { 1370 1370 pdata->blocks |= STMPE_BLOCK_PWM; 1371 - } else if (!strcmp(child->name, "stmpe_rotator")) { 1371 + } else if (of_node_name_eq(child, "stmpe_rotator")) { 1372 1372 pdata->blocks |= STMPE_BLOCK_ROTATOR; 1373 1373 } 1374 1374 }
+213
drivers/mfd/stpmic1.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + // Copyright (C) STMicroelectronics 2018 3 + // Author: Pascal Paillet <p.paillet@st.com> 4 + 5 + #include <linux/i2c.h> 6 + #include <linux/interrupt.h> 7 + #include <linux/mfd/core.h> 8 + #include <linux/mfd/stpmic1.h> 9 + #include <linux/module.h> 10 + #include <linux/of.h> 11 + #include <linux/of_irq.h> 12 + #include <linux/of_platform.h> 13 + #include <linux/pm_wakeirq.h> 14 + #include <linux/regmap.h> 15 + 16 + #include <dt-bindings/mfd/st,stpmic1.h> 17 + 18 + #define STPMIC1_MAIN_IRQ 0 19 + 20 + static const struct regmap_range stpmic1_readable_ranges[] = { 21 + regmap_reg_range(TURN_ON_SR, VERSION_SR), 22 + regmap_reg_range(SWOFF_PWRCTRL_CR, LDO6_STDBY_CR), 23 + regmap_reg_range(BST_SW_CR, BST_SW_CR), 24 + regmap_reg_range(INT_PENDING_R1, INT_PENDING_R4), 25 + regmap_reg_range(INT_CLEAR_R1, INT_CLEAR_R4), 26 + regmap_reg_range(INT_MASK_R1, INT_MASK_R4), 27 + regmap_reg_range(INT_SET_MASK_R1, INT_SET_MASK_R4), 28 + regmap_reg_range(INT_CLEAR_MASK_R1, INT_CLEAR_MASK_R4), 29 + regmap_reg_range(INT_SRC_R1, INT_SRC_R1), 30 + }; 31 + 32 + static const struct regmap_range stpmic1_writeable_ranges[] = { 33 + regmap_reg_range(SWOFF_PWRCTRL_CR, LDO6_STDBY_CR), 34 + regmap_reg_range(BST_SW_CR, BST_SW_CR), 35 + regmap_reg_range(INT_CLEAR_R1, INT_CLEAR_R4), 36 + regmap_reg_range(INT_SET_MASK_R1, INT_SET_MASK_R4), 37 + regmap_reg_range(INT_CLEAR_MASK_R1, INT_CLEAR_MASK_R4), 38 + }; 39 + 40 + static const struct regmap_range stpmic1_volatile_ranges[] = { 41 + regmap_reg_range(TURN_ON_SR, VERSION_SR), 42 + regmap_reg_range(WCHDG_CR, WCHDG_CR), 43 + regmap_reg_range(INT_PENDING_R1, INT_PENDING_R4), 44 + regmap_reg_range(INT_SRC_R1, INT_SRC_R4), 45 + }; 46 + 47 + static const struct regmap_access_table stpmic1_readable_table = { 48 + .yes_ranges = stpmic1_readable_ranges, 49 + .n_yes_ranges = ARRAY_SIZE(stpmic1_readable_ranges), 50 + }; 51 + 52 + static const struct regmap_access_table stpmic1_writeable_table = { 53 + .yes_ranges = stpmic1_writeable_ranges, 54 + .n_yes_ranges = ARRAY_SIZE(stpmic1_writeable_ranges), 55 + }; 56 + 57 + static const struct regmap_access_table stpmic1_volatile_table = { 58 + .yes_ranges = stpmic1_volatile_ranges, 59 + .n_yes_ranges = ARRAY_SIZE(stpmic1_volatile_ranges), 60 + }; 61 + 62 + const struct regmap_config stpmic1_regmap_config = { 63 + .reg_bits = 8, 64 + .val_bits = 8, 65 + .cache_type = REGCACHE_RBTREE, 66 + .max_register = PMIC_MAX_REGISTER_ADDRESS, 67 + .rd_table = &stpmic1_readable_table, 68 + .wr_table = &stpmic1_writeable_table, 69 + .volatile_table = &stpmic1_volatile_table, 70 + }; 71 + 72 + static const struct regmap_irq stpmic1_irqs[] = { 73 + REGMAP_IRQ_REG(IT_PONKEY_F, 0, 0x01), 74 + REGMAP_IRQ_REG(IT_PONKEY_R, 0, 0x02), 75 + REGMAP_IRQ_REG(IT_WAKEUP_F, 0, 0x04), 76 + REGMAP_IRQ_REG(IT_WAKEUP_R, 0, 0x08), 77 + REGMAP_IRQ_REG(IT_VBUS_OTG_F, 0, 0x10), 78 + REGMAP_IRQ_REG(IT_VBUS_OTG_R, 0, 0x20), 79 + REGMAP_IRQ_REG(IT_SWOUT_F, 0, 0x40), 80 + REGMAP_IRQ_REG(IT_SWOUT_R, 0, 0x80), 81 + 82 + REGMAP_IRQ_REG(IT_CURLIM_BUCK1, 1, 0x01), 83 + REGMAP_IRQ_REG(IT_CURLIM_BUCK2, 1, 0x02), 84 + REGMAP_IRQ_REG(IT_CURLIM_BUCK3, 1, 0x04), 85 + REGMAP_IRQ_REG(IT_CURLIM_BUCK4, 1, 0x08), 86 + REGMAP_IRQ_REG(IT_OCP_OTG, 1, 0x10), 87 + REGMAP_IRQ_REG(IT_OCP_SWOUT, 1, 0x20), 88 + REGMAP_IRQ_REG(IT_OCP_BOOST, 1, 0x40), 89 + REGMAP_IRQ_REG(IT_OVP_BOOST, 1, 0x80), 90 + 91 + REGMAP_IRQ_REG(IT_CURLIM_LDO1, 2, 0x01), 92 + REGMAP_IRQ_REG(IT_CURLIM_LDO2, 2, 0x02), 93 + REGMAP_IRQ_REG(IT_CURLIM_LDO3, 2, 0x04), 94 + REGMAP_IRQ_REG(IT_CURLIM_LDO4, 2, 0x08), 95 + REGMAP_IRQ_REG(IT_CURLIM_LDO5, 2, 0x10), 96 + REGMAP_IRQ_REG(IT_CURLIM_LDO6, 2, 0x20), 97 + REGMAP_IRQ_REG(IT_SHORT_SWOTG, 2, 0x40), 98 + REGMAP_IRQ_REG(IT_SHORT_SWOUT, 2, 0x80), 99 + 100 + REGMAP_IRQ_REG(IT_TWARN_F, 3, 0x01), 101 + REGMAP_IRQ_REG(IT_TWARN_R, 3, 0x02), 102 + REGMAP_IRQ_REG(IT_VINLOW_F, 3, 0x04), 103 + REGMAP_IRQ_REG(IT_VINLOW_R, 3, 0x08), 104 + REGMAP_IRQ_REG(IT_SWIN_F, 3, 0x40), 105 + REGMAP_IRQ_REG(IT_SWIN_R, 3, 0x80), 106 + }; 107 + 108 + static const struct regmap_irq_chip stpmic1_regmap_irq_chip = { 109 + .name = "pmic_irq", 110 + .status_base = INT_PENDING_R1, 111 + .mask_base = INT_CLEAR_MASK_R1, 112 + .unmask_base = INT_SET_MASK_R1, 113 + .ack_base = INT_CLEAR_R1, 114 + .num_regs = STPMIC1_PMIC_NUM_IRQ_REGS, 115 + .irqs = stpmic1_irqs, 116 + .num_irqs = ARRAY_SIZE(stpmic1_irqs), 117 + }; 118 + 119 + static int stpmic1_probe(struct i2c_client *i2c, 120 + const struct i2c_device_id *id) 121 + { 122 + struct stpmic1 *ddata; 123 + struct device *dev = &i2c->dev; 124 + int ret; 125 + struct device_node *np = dev->of_node; 126 + u32 reg; 127 + 128 + ddata = devm_kzalloc(dev, sizeof(struct stpmic1), GFP_KERNEL); 129 + if (!ddata) 130 + return -ENOMEM; 131 + 132 + i2c_set_clientdata(i2c, ddata); 133 + ddata->dev = dev; 134 + 135 + ddata->regmap = devm_regmap_init_i2c(i2c, &stpmic1_regmap_config); 136 + if (IS_ERR(ddata->regmap)) 137 + return PTR_ERR(ddata->regmap); 138 + 139 + ddata->irq = of_irq_get(np, STPMIC1_MAIN_IRQ); 140 + if (ddata->irq < 0) { 141 + dev_err(dev, "Failed to get main IRQ: %d\n", ddata->irq); 142 + return ddata->irq; 143 + } 144 + 145 + ret = regmap_read(ddata->regmap, VERSION_SR, &reg); 146 + if (ret) { 147 + dev_err(dev, "Unable to read PMIC version\n"); 148 + return ret; 149 + } 150 + dev_info(dev, "PMIC Chip Version: 0x%x\n", reg); 151 + 152 + /* Initialize PMIC IRQ Chip & associated IRQ domains */ 153 + ret = devm_regmap_add_irq_chip(dev, ddata->regmap, ddata->irq, 154 + IRQF_ONESHOT | IRQF_SHARED, 155 + 0, &stpmic1_regmap_irq_chip, 156 + &ddata->irq_data); 157 + if (ret) { 158 + dev_err(dev, "IRQ Chip registration failed: %d\n", ret); 159 + return ret; 160 + } 161 + 162 + return devm_of_platform_populate(dev); 163 + } 164 + 165 + #ifdef CONFIG_PM_SLEEP 166 + static int stpmic1_suspend(struct device *dev) 167 + { 168 + struct i2c_client *i2c = to_i2c_client(dev); 169 + struct stpmic1 *pmic_dev = i2c_get_clientdata(i2c); 170 + 171 + disable_irq(pmic_dev->irq); 172 + 173 + return 0; 174 + } 175 + 176 + static int stpmic1_resume(struct device *dev) 177 + { 178 + struct i2c_client *i2c = to_i2c_client(dev); 179 + struct stpmic1 *pmic_dev = i2c_get_clientdata(i2c); 180 + int ret; 181 + 182 + ret = regcache_sync(pmic_dev->regmap); 183 + if (ret) 184 + return ret; 185 + 186 + enable_irq(pmic_dev->irq); 187 + 188 + return 0; 189 + } 190 + #endif 191 + 192 + static SIMPLE_DEV_PM_OPS(stpmic1_pm, stpmic1_suspend, stpmic1_resume); 193 + 194 + static const struct of_device_id stpmic1_of_match[] = { 195 + { .compatible = "st,stpmic1", }, 196 + {}, 197 + }; 198 + MODULE_DEVICE_TABLE(of, stpmic1_of_match); 199 + 200 + static struct i2c_driver stpmic1_driver = { 201 + .driver = { 202 + .name = "stpmic1", 203 + .of_match_table = of_match_ptr(stpmic1_of_match), 204 + .pm = &stpmic1_pm, 205 + }, 206 + .probe = stpmic1_probe, 207 + }; 208 + 209 + module_i2c_driver(stpmic1_driver); 210 + 211 + MODULE_DESCRIPTION("STPMIC1 PMIC Driver"); 212 + MODULE_AUTHOR("Pascal Paillet <p.paillet@st.com>"); 213 + MODULE_LICENSE("GPL v2");
+3 -2
drivers/mfd/ti_am335x_tscadc.c
··· 264 264 cell->pdata_size = sizeof(tscadc); 265 265 } 266 266 267 - err = mfd_add_devices(&pdev->dev, pdev->id, tscadc->cells, 268 - tscadc->used_cells, NULL, 0, NULL); 267 + err = mfd_add_devices(&pdev->dev, PLATFORM_DEVID_AUTO, 268 + tscadc->cells, tscadc->used_cells, NULL, 269 + 0, NULL); 269 270 if (err < 0) 270 271 goto err_disable_clk; 271 272
+3 -21
drivers/mfd/tps65218.c
··· 235 235 236 236 mutex_init(&tps->tps_lock); 237 237 238 - ret = regmap_add_irq_chip(tps->regmap, tps->irq, 239 - IRQF_ONESHOT, 0, &tps65218_irq_chip, 240 - &tps->irq_data); 238 + ret = devm_regmap_add_irq_chip(&client->dev, tps->regmap, tps->irq, 239 + IRQF_ONESHOT, 0, &tps65218_irq_chip, 240 + &tps->irq_data); 241 241 if (ret < 0) 242 242 return ret; 243 243 ··· 253 253 ARRAY_SIZE(tps65218_cells), NULL, 0, 254 254 regmap_irq_get_domain(tps->irq_data)); 255 255 256 - if (ret < 0) 257 - goto err_irq; 258 - 259 - return 0; 260 - 261 - err_irq: 262 - regmap_del_irq_chip(tps->irq, tps->irq_data); 263 - 264 256 return ret; 265 - } 266 - 267 - static int tps65218_remove(struct i2c_client *client) 268 - { 269 - struct tps65218 *tps = i2c_get_clientdata(client); 270 - 271 - regmap_del_irq_chip(tps->irq, tps->irq_data); 272 - 273 - return 0; 274 257 } 275 258 276 259 static const struct i2c_device_id tps65218_id_table[] = { ··· 268 285 .of_match_table = of_tps65218_match_table, 269 286 }, 270 287 .probe = tps65218_probe, 271 - .remove = tps65218_remove, 272 288 .id_table = tps65218_id_table, 273 289 }; 274 290
+24
drivers/mfd/tps6586x.c
··· 592 592 return 0; 593 593 } 594 594 595 + static int __maybe_unused tps6586x_i2c_suspend(struct device *dev) 596 + { 597 + struct tps6586x *tps6586x = dev_get_drvdata(dev); 598 + 599 + if (tps6586x->client->irq) 600 + disable_irq(tps6586x->client->irq); 601 + 602 + return 0; 603 + } 604 + 605 + static int __maybe_unused tps6586x_i2c_resume(struct device *dev) 606 + { 607 + struct tps6586x *tps6586x = dev_get_drvdata(dev); 608 + 609 + if (tps6586x->client->irq) 610 + enable_irq(tps6586x->client->irq); 611 + 612 + return 0; 613 + } 614 + 615 + static SIMPLE_DEV_PM_OPS(tps6586x_pm_ops, tps6586x_i2c_suspend, 616 + tps6586x_i2c_resume); 617 + 595 618 static const struct i2c_device_id tps6586x_id_table[] = { 596 619 { "tps6586x", 0 }, 597 620 { }, ··· 625 602 .driver = { 626 603 .name = "tps6586x", 627 604 .of_match_table = of_match_ptr(tps6586x_of_match), 605 + .pm = &tps6586x_pm_ops, 628 606 }, 629 607 .probe = tps6586x_i2c_probe, 630 608 .remove = tps6586x_i2c_remove,
+2 -2
drivers/mfd/twl-core.c
··· 979 979 * letting it generate the right frequencies for USB, MADC, and 980 980 * other purposes. 981 981 */ 982 - static inline int __init protect_pm_master(void) 982 + static inline int protect_pm_master(void) 983 983 { 984 984 int e = 0; 985 985 ··· 988 988 return e; 989 989 } 990 990 991 - static inline int __init unprotect_pm_master(void) 991 + static inline int unprotect_pm_master(void) 992 992 { 993 993 int e = 0; 994 994
+2
drivers/mfd/wm5110-tables.c
··· 1618 1618 { 0x00000ECD, 0x0000 }, /* R3789 - HPLPF4_2 */ 1619 1619 { 0x00000EE0, 0x0000 }, /* R3808 - ASRC_ENABLE */ 1620 1620 { 0x00000EE2, 0x0000 }, /* R3810 - ASRC_RATE1 */ 1621 + { 0x00000EE3, 0x4000 }, /* R3811 - ASRC_RATE2 */ 1621 1622 { 0x00000EF0, 0x0000 }, /* R3824 - ISRC 1 CTRL 1 */ 1622 1623 { 0x00000EF1, 0x0000 }, /* R3825 - ISRC 1 CTRL 2 */ 1623 1624 { 0x00000EF2, 0x0000 }, /* R3826 - ISRC 1 CTRL 3 */ ··· 2870 2869 case ARIZONA_ASRC_ENABLE: 2871 2870 case ARIZONA_ASRC_STATUS: 2872 2871 case ARIZONA_ASRC_RATE1: 2872 + case ARIZONA_ASRC_RATE2: 2873 2873 case ARIZONA_ISRC_1_CTRL_1: 2874 2874 case ARIZONA_ISRC_1_CTRL_2: 2875 2875 case ARIZONA_ISRC_1_CTRL_3:
+2 -2
drivers/misc/genwqe/card_utils.c
··· 218 218 if (get_order(size) >= MAX_ORDER) 219 219 return NULL; 220 220 221 - return dma_zalloc_coherent(&cd->pci_dev->dev, size, dma_handle, 222 - GFP_KERNEL); 221 + return dma_alloc_coherent(&cd->pci_dev->dev, size, dma_handle, 222 + GFP_KERNEL); 223 223 } 224 224 225 225 void __genwqe_free_consistent(struct genwqe_dev *cd, size_t size,
+7 -2
drivers/misc/mic/vop/vop_main.c
··· 394 394 struct _vop_vdev *vdev = to_vopvdev(dev); 395 395 struct vop_device *vpdev = vdev->vpdev; 396 396 struct mic_device_ctrl __iomem *dc = vdev->dc; 397 - int i, err, retry; 397 + int i, err, retry, queue_idx = 0; 398 398 399 399 /* We must have this many virtqueues. */ 400 400 if (nvqs > ioread8(&vdev->desc->num_vq)) 401 401 return -ENOENT; 402 402 403 403 for (i = 0; i < nvqs; ++i) { 404 + if (!names[i]) { 405 + vqs[i] = NULL; 406 + continue; 407 + } 408 + 404 409 dev_dbg(_vop_dev(vdev), "%s: %d: %s\n", 405 410 __func__, i, names[i]); 406 - vqs[i] = vop_find_vq(dev, i, callbacks[i], names[i], 411 + vqs[i] = vop_find_vq(dev, queue_idx++, callbacks[i], names[i], 407 412 ctx ? ctx[i] : false); 408 413 if (IS_ERR(vqs[i])) { 409 414 err = PTR_ERR(vqs[i]);
+1 -1
drivers/mmc/core/host.c
··· 234 234 if (device_property_read_bool(dev, "broken-cd")) 235 235 host->caps |= MMC_CAP_NEEDS_POLL; 236 236 237 - ret = mmc_gpiod_request_cd(host, "cd", 0, true, 237 + ret = mmc_gpiod_request_cd(host, "cd", 0, false, 238 238 cd_debounce_delay_ms * 1000, 239 239 &cd_gpio_invert); 240 240 if (!ret)
+3 -2
drivers/mmc/host/sdhci.c
··· 3763 3763 * Use zalloc to zero the reserved high 32-bits of 128-bit 3764 3764 * descriptors so that they never need to be written. 3765 3765 */ 3766 - buf = dma_zalloc_coherent(mmc_dev(mmc), host->align_buffer_sz + 3767 - host->adma_table_sz, &dma, GFP_KERNEL); 3766 + buf = dma_alloc_coherent(mmc_dev(mmc), 3767 + host->align_buffer_sz + host->adma_table_sz, 3768 + &dma, GFP_KERNEL); 3768 3769 if (!buf) { 3769 3770 pr_warn("%s: Unable to allocate ADMA buffers - falling back to standard DMA\n", 3770 3771 mmc_hostname(mmc));
+1 -1
drivers/mtd/mtdcore.c
··· 522 522 mtd->nvmem = nvmem_register(&config); 523 523 if (IS_ERR(mtd->nvmem)) { 524 524 /* Just ignore if there is no NVMEM support in the kernel */ 525 - if (PTR_ERR(mtd->nvmem) == -ENOSYS) { 525 + if (PTR_ERR(mtd->nvmem) == -EOPNOTSUPP) { 526 526 mtd->nvmem = NULL; 527 527 } else { 528 528 dev_err(&mtd->dev, "Failed to register NVMEM device\n");
+1 -1
drivers/mtd/mtdcore.h
··· 7 7 extern struct mutex mtd_table_mutex; 8 8 9 9 struct mtd_info *__mtd_next_device(int i); 10 - int add_mtd_device(struct mtd_info *mtd); 10 + int __must_check add_mtd_device(struct mtd_info *mtd); 11 11 int del_mtd_device(struct mtd_info *mtd); 12 12 int add_mtd_partitions(struct mtd_info *, const struct mtd_partition *, int); 13 13 int del_mtd_partitions(struct mtd_info *);
+31 -5
drivers/mtd/mtdpart.c
··· 618 618 list_add(&new->list, &mtd_partitions); 619 619 mutex_unlock(&mtd_partitions_mutex); 620 620 621 - add_mtd_device(&new->mtd); 621 + ret = add_mtd_device(&new->mtd); 622 + if (ret) 623 + goto err_remove_part; 622 624 623 625 mtd_add_partition_attrs(new); 626 + 627 + return 0; 628 + 629 + err_remove_part: 630 + mutex_lock(&mtd_partitions_mutex); 631 + list_del(&new->list); 632 + mutex_unlock(&mtd_partitions_mutex); 633 + 634 + free_partition(new); 635 + pr_info("%s:%i\n", __func__, __LINE__); 624 636 625 637 return ret; 626 638 } ··· 724 712 { 725 713 struct mtd_part *slave; 726 714 uint64_t cur_offset = 0; 727 - int i; 715 + int i, ret; 728 716 729 717 printk(KERN_NOTICE "Creating %d MTD partitions on \"%s\":\n", nbparts, master->name); 730 718 731 719 for (i = 0; i < nbparts; i++) { 732 720 slave = allocate_partition(master, parts + i, i, cur_offset); 733 721 if (IS_ERR(slave)) { 734 - del_mtd_partitions(master); 735 - return PTR_ERR(slave); 722 + ret = PTR_ERR(slave); 723 + goto err_del_partitions; 736 724 } 737 725 738 726 mutex_lock(&mtd_partitions_mutex); 739 727 list_add(&slave->list, &mtd_partitions); 740 728 mutex_unlock(&mtd_partitions_mutex); 741 729 742 - add_mtd_device(&slave->mtd); 730 + ret = add_mtd_device(&slave->mtd); 731 + if (ret) { 732 + mutex_lock(&mtd_partitions_mutex); 733 + list_del(&slave->list); 734 + mutex_unlock(&mtd_partitions_mutex); 735 + 736 + free_partition(slave); 737 + goto err_del_partitions; 738 + } 739 + 743 740 mtd_add_partition_attrs(slave); 744 741 /* Look for subpartitions */ 745 742 parse_mtd_partitions(&slave->mtd, parts[i].types, NULL); ··· 757 736 } 758 737 759 738 return 0; 739 + 740 + err_del_partitions: 741 + del_mtd_partitions(master); 742 + 743 + return ret; 760 744 } 761 745 762 746 static DEFINE_SPINLOCK(part_parser_lock);
+1 -1
drivers/mtd/nand/raw/denali.c
··· 1322 1322 } 1323 1323 1324 1324 /* clk rate info is needed for setup_data_interface */ 1325 - if (denali->clk_rate && denali->clk_x_rate) 1325 + if (!denali->clk_rate || !denali->clk_x_rate) 1326 1326 chip->options |= NAND_KEEP_TIMINGS; 1327 1327 1328 1328 chip->legacy.dummy_controller.ops = &denali_controller_ops;
-21
drivers/mtd/nand/raw/fsmc_nand.c
··· 593 593 dma_xfer(host, (void *)buf, len, DMA_TO_DEVICE); 594 594 } 595 595 596 - /* fsmc_select_chip - assert or deassert nCE */ 597 - static void fsmc_ce_ctrl(struct fsmc_nand_data *host, bool assert) 598 - { 599 - u32 pc = readl(host->regs_va + FSMC_PC); 600 - 601 - if (!assert) 602 - writel_relaxed(pc & ~FSMC_ENABLE, host->regs_va + FSMC_PC); 603 - else 604 - writel_relaxed(pc | FSMC_ENABLE, host->regs_va + FSMC_PC); 605 - 606 - /* 607 - * nCE line changes must be applied before returning from this 608 - * function. 609 - */ 610 - mb(); 611 - } 612 - 613 596 /* 614 597 * fsmc_exec_op - hook called by the core to execute NAND operations 615 598 * ··· 609 626 int i; 610 627 611 628 pr_debug("Executing operation [%d instructions]:\n", op->ninstrs); 612 - 613 - fsmc_ce_ctrl(host, true); 614 629 615 630 for (op_id = 0; op_id < op->ninstrs; op_id++) { 616 631 instr = &op->instrs[op_id]; ··· 666 685 break; 667 686 } 668 687 } 669 - 670 - fsmc_ce_ctrl(host, false); 671 688 672 689 return ret; 673 690 }
+1 -1
drivers/mtd/nand/raw/jz4740_nand.c
··· 260 260 } 261 261 262 262 static int jz_nand_ioremap_resource(struct platform_device *pdev, 263 - const char *name, struct resource **res, void *__iomem *base) 263 + const char *name, struct resource **res, void __iomem **base) 264 264 { 265 265 int ret; 266 266
+10 -10
drivers/mtd/nand/raw/qcom_nandc.c
··· 2833 2833 if (ret) 2834 2834 return ret; 2835 2835 2836 + if (nandc->props->is_bam) { 2837 + free_bam_transaction(nandc); 2838 + nandc->bam_txn = alloc_bam_transaction(nandc); 2839 + if (!nandc->bam_txn) { 2840 + dev_err(nandc->dev, 2841 + "failed to allocate bam transaction\n"); 2842 + return -ENOMEM; 2843 + } 2844 + } 2845 + 2836 2846 ret = mtd_device_register(mtd, NULL, 0); 2837 2847 if (ret) 2838 2848 nand_cleanup(chip); ··· 2856 2846 struct device_node *dn = dev->of_node, *child; 2857 2847 struct qcom_nand_host *host; 2858 2848 int ret; 2859 - 2860 - if (nandc->props->is_bam) { 2861 - free_bam_transaction(nandc); 2862 - nandc->bam_txn = alloc_bam_transaction(nandc); 2863 - if (!nandc->bam_txn) { 2864 - dev_err(nandc->dev, 2865 - "failed to allocate bam transaction\n"); 2866 - return -ENOMEM; 2867 - } 2868 - } 2869 2849 2870 2850 for_each_available_child_of_node(dn, child) { 2871 2851 host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
+1 -1
drivers/net/Kconfig
··· 519 519 and destroy a failover master netdev and manages a primary and 520 520 standby slave netdevs that get registered via the generic failover 521 521 infrastructure. This can be used by paravirtual drivers to enable 522 - an alternate low latency datapath. It alsoenables live migration of 522 + an alternate low latency datapath. It also enables live migration of 523 523 a VM with direct attached VF by failing over to the paravirtual 524 524 datapath when the VF is unplugged. 525 525
+3
drivers/net/bonding/bond_main.c
··· 1963 1963 if (!bond_has_slaves(bond)) { 1964 1964 bond_set_carrier(bond); 1965 1965 eth_hw_addr_random(bond_dev); 1966 + bond->nest_level = SINGLE_DEPTH_NESTING; 1967 + } else { 1968 + bond->nest_level = dev_get_nest_level(bond_dev) + 1; 1966 1969 } 1967 1970 1968 1971 unblock_netpoll_tx();
-2
drivers/net/dsa/microchip/ksz_common.c
··· 7 7 8 8 #include <linux/delay.h> 9 9 #include <linux/export.h> 10 - #include <linux/gpio.h> 11 10 #include <linux/gpio/consumer.h> 12 11 #include <linux/kernel.h> 13 12 #include <linux/module.h> ··· 14 15 #include <linux/phy.h> 15 16 #include <linux/etherdevice.h> 16 17 #include <linux/if_bridge.h> 17 - #include <linux/of_gpio.h> 18 18 #include <linux/of_net.h> 19 19 #include <net/dsa.h> 20 20 #include <net/switchdev.h>
-1
drivers/net/dsa/mt7530.c
··· 18 18 #include <linux/mfd/syscon.h> 19 19 #include <linux/module.h> 20 20 #include <linux/netdevice.h> 21 - #include <linux/of_gpio.h> 22 21 #include <linux/of_mdio.h> 23 22 #include <linux/of_net.h> 24 23 #include <linux/of_platform.h>
+113
drivers/net/dsa/mv88e6xxx/chip.c
··· 2403 2403 return mv88e6xxx_g1_stats_clear(chip); 2404 2404 } 2405 2405 2406 + /* The mv88e6390 has some hidden registers used for debug and 2407 + * development. The errata also makes use of them. 2408 + */ 2409 + static int mv88e6390_hidden_write(struct mv88e6xxx_chip *chip, int port, 2410 + int reg, u16 val) 2411 + { 2412 + u16 ctrl; 2413 + int err; 2414 + 2415 + err = mv88e6xxx_port_write(chip, PORT_RESERVED_1A_DATA_PORT, 2416 + PORT_RESERVED_1A, val); 2417 + if (err) 2418 + return err; 2419 + 2420 + ctrl = PORT_RESERVED_1A_BUSY | PORT_RESERVED_1A_WRITE | 2421 + PORT_RESERVED_1A_BLOCK | port << PORT_RESERVED_1A_PORT_SHIFT | 2422 + reg; 2423 + 2424 + return mv88e6xxx_port_write(chip, PORT_RESERVED_1A_CTRL_PORT, 2425 + PORT_RESERVED_1A, ctrl); 2426 + } 2427 + 2428 + static int mv88e6390_hidden_wait(struct mv88e6xxx_chip *chip) 2429 + { 2430 + return mv88e6xxx_wait(chip, PORT_RESERVED_1A_CTRL_PORT, 2431 + PORT_RESERVED_1A, PORT_RESERVED_1A_BUSY); 2432 + } 2433 + 2434 + 2435 + static int mv88e6390_hidden_read(struct mv88e6xxx_chip *chip, int port, 2436 + int reg, u16 *val) 2437 + { 2438 + u16 ctrl; 2439 + int err; 2440 + 2441 + ctrl = PORT_RESERVED_1A_BUSY | PORT_RESERVED_1A_READ | 2442 + PORT_RESERVED_1A_BLOCK | port << PORT_RESERVED_1A_PORT_SHIFT | 2443 + reg; 2444 + 2445 + err = mv88e6xxx_port_write(chip, PORT_RESERVED_1A_CTRL_PORT, 2446 + PORT_RESERVED_1A, ctrl); 2447 + if (err) 2448 + return err; 2449 + 2450 + err = mv88e6390_hidden_wait(chip); 2451 + if (err) 2452 + return err; 2453 + 2454 + return mv88e6xxx_port_read(chip, PORT_RESERVED_1A_DATA_PORT, 2455 + PORT_RESERVED_1A, val); 2456 + } 2457 + 2458 + /* Check if the errata has already been applied. */ 2459 + static bool mv88e6390_setup_errata_applied(struct mv88e6xxx_chip *chip) 2460 + { 2461 + int port; 2462 + int err; 2463 + u16 val; 2464 + 2465 + for (port = 0; port < mv88e6xxx_num_ports(chip); port++) { 2466 + err = mv88e6390_hidden_read(chip, port, 0, &val); 2467 + if (err) { 2468 + dev_err(chip->dev, 2469 + "Error reading hidden register: %d\n", err); 2470 + return false; 2471 + } 2472 + if (val != 0x01c0) 2473 + return false; 2474 + } 2475 + 2476 + return true; 2477 + } 2478 + 2479 + /* The 6390 copper ports have an errata which require poking magic 2480 + * values into undocumented hidden registers and then performing a 2481 + * software reset. 2482 + */ 2483 + static int mv88e6390_setup_errata(struct mv88e6xxx_chip *chip) 2484 + { 2485 + int port; 2486 + int err; 2487 + 2488 + if (mv88e6390_setup_errata_applied(chip)) 2489 + return 0; 2490 + 2491 + /* Set the ports into blocking mode */ 2492 + for (port = 0; port < mv88e6xxx_num_ports(chip); port++) { 2493 + err = mv88e6xxx_port_set_state(chip, port, BR_STATE_DISABLED); 2494 + if (err) 2495 + return err; 2496 + } 2497 + 2498 + for (port = 0; port < mv88e6xxx_num_ports(chip); port++) { 2499 + err = mv88e6390_hidden_write(chip, port, 0, 0x01c0); 2500 + if (err) 2501 + return err; 2502 + } 2503 + 2504 + return mv88e6xxx_software_reset(chip); 2505 + } 2506 + 2406 2507 static int mv88e6xxx_setup(struct dsa_switch *ds) 2407 2508 { 2408 2509 struct mv88e6xxx_chip *chip = ds->priv; ··· 2515 2414 ds->slave_mii_bus = mv88e6xxx_default_mdio_bus(chip); 2516 2415 2517 2416 mutex_lock(&chip->reg_lock); 2417 + 2418 + if (chip->info->ops->setup_errata) { 2419 + err = chip->info->ops->setup_errata(chip); 2420 + if (err) 2421 + goto unlock; 2422 + } 2518 2423 2519 2424 /* Cache the cmode of each port. */ 2520 2425 for (i = 0; i < mv88e6xxx_num_ports(chip); i++) { ··· 3333 3226 3334 3227 static const struct mv88e6xxx_ops mv88e6190_ops = { 3335 3228 /* MV88E6XXX_FAMILY_6390 */ 3229 + .setup_errata = mv88e6390_setup_errata, 3336 3230 .irl_init_all = mv88e6390_g2_irl_init_all, 3337 3231 .get_eeprom = mv88e6xxx_g2_get_eeprom8, 3338 3232 .set_eeprom = mv88e6xxx_g2_set_eeprom8, ··· 3377 3269 3378 3270 static const struct mv88e6xxx_ops mv88e6190x_ops = { 3379 3271 /* MV88E6XXX_FAMILY_6390 */ 3272 + .setup_errata = mv88e6390_setup_errata, 3380 3273 .irl_init_all = mv88e6390_g2_irl_init_all, 3381 3274 .get_eeprom = mv88e6xxx_g2_get_eeprom8, 3382 3275 .set_eeprom = mv88e6xxx_g2_set_eeprom8, ··· 3421 3312 3422 3313 static const struct mv88e6xxx_ops mv88e6191_ops = { 3423 3314 /* MV88E6XXX_FAMILY_6390 */ 3315 + .setup_errata = mv88e6390_setup_errata, 3424 3316 .irl_init_all = mv88e6390_g2_irl_init_all, 3425 3317 .get_eeprom = mv88e6xxx_g2_get_eeprom8, 3426 3318 .set_eeprom = mv88e6xxx_g2_set_eeprom8, ··· 3514 3404 3515 3405 static const struct mv88e6xxx_ops mv88e6290_ops = { 3516 3406 /* MV88E6XXX_FAMILY_6390 */ 3407 + .setup_errata = mv88e6390_setup_errata, 3517 3408 .irl_init_all = mv88e6390_g2_irl_init_all, 3518 3409 .get_eeprom = mv88e6xxx_g2_get_eeprom8, 3519 3410 .set_eeprom = mv88e6xxx_g2_set_eeprom8, ··· 3820 3709 3821 3710 static const struct mv88e6xxx_ops mv88e6390_ops = { 3822 3711 /* MV88E6XXX_FAMILY_6390 */ 3712 + .setup_errata = mv88e6390_setup_errata, 3823 3713 .irl_init_all = mv88e6390_g2_irl_init_all, 3824 3714 .get_eeprom = mv88e6xxx_g2_get_eeprom8, 3825 3715 .set_eeprom = mv88e6xxx_g2_set_eeprom8, ··· 3868 3756 3869 3757 static const struct mv88e6xxx_ops mv88e6390x_ops = { 3870 3758 /* MV88E6XXX_FAMILY_6390 */ 3759 + .setup_errata = mv88e6390_setup_errata, 3871 3760 .irl_init_all = mv88e6390_g2_irl_init_all, 3872 3761 .get_eeprom = mv88e6xxx_g2_get_eeprom8, 3873 3762 .set_eeprom = mv88e6xxx_g2_set_eeprom8,
+5
drivers/net/dsa/mv88e6xxx/chip.h
··· 300 300 }; 301 301 302 302 struct mv88e6xxx_ops { 303 + /* Switch Setup Errata, called early in the switch setup to 304 + * allow any errata actions to be performed 305 + */ 306 + int (*setup_errata)(struct mv88e6xxx_chip *chip); 307 + 303 308 int (*ieee_pri_map)(struct mv88e6xxx_chip *chip); 304 309 int (*ip_pri_map)(struct mv88e6xxx_chip *chip); 305 310
+10
drivers/net/dsa/mv88e6xxx/port.h
··· 251 251 /* Offset 0x19: Port IEEE Priority Remapping Registers (4-7) */ 252 252 #define MV88E6095_PORT_IEEE_PRIO_REMAP_4567 0x19 253 253 254 + /* Offset 0x1a: Magic undocumented errata register */ 255 + #define PORT_RESERVED_1A 0x1a 256 + #define PORT_RESERVED_1A_BUSY BIT(15) 257 + #define PORT_RESERVED_1A_WRITE BIT(14) 258 + #define PORT_RESERVED_1A_READ 0 259 + #define PORT_RESERVED_1A_PORT_SHIFT 5 260 + #define PORT_RESERVED_1A_BLOCK (0xf << 10) 261 + #define PORT_RESERVED_1A_CTRL_PORT 4 262 + #define PORT_RESERVED_1A_DATA_PORT 5 263 + 254 264 int mv88e6xxx_port_read(struct mv88e6xxx_chip *chip, int port, int reg, 255 265 u16 *val); 256 266 int mv88e6xxx_port_write(struct mv88e6xxx_chip *chip, int port, int reg,
+13 -5
drivers/net/dsa/realtek-smi.c
··· 347 347 struct device_node *mdio_np; 348 348 int ret; 349 349 350 - mdio_np = of_find_compatible_node(smi->dev->of_node, NULL, 351 - "realtek,smi-mdio"); 350 + mdio_np = of_get_compatible_child(smi->dev->of_node, "realtek,smi-mdio"); 352 351 if (!mdio_np) { 353 352 dev_err(smi->dev, "no MDIO bus node\n"); 354 353 return -ENODEV; 355 354 } 356 355 357 356 smi->slave_mii_bus = devm_mdiobus_alloc(smi->dev); 358 - if (!smi->slave_mii_bus) 359 - return -ENOMEM; 357 + if (!smi->slave_mii_bus) { 358 + ret = -ENOMEM; 359 + goto err_put_node; 360 + } 360 361 smi->slave_mii_bus->priv = smi; 361 362 smi->slave_mii_bus->name = "SMI slave MII"; 362 363 smi->slave_mii_bus->read = realtek_smi_mdio_read; ··· 372 371 if (ret) { 373 372 dev_err(smi->dev, "unable to register MDIO bus %s\n", 374 373 smi->slave_mii_bus->id); 375 - of_node_put(mdio_np); 374 + goto err_put_node; 376 375 } 377 376 378 377 return 0; 378 + 379 + err_put_node: 380 + of_node_put(mdio_np); 381 + 382 + return ret; 379 383 } 380 384 381 385 static int realtek_smi_probe(struct platform_device *pdev) ··· 463 457 struct realtek_smi *smi = dev_get_drvdata(&pdev->dev); 464 458 465 459 dsa_unregister_switch(smi->ds); 460 + if (smi->slave_mii_bus) 461 + of_node_put(smi->slave_mii_bus->dev.of_node); 466 462 gpiod_set_value(smi->reset, 1); 467 463 468 464 return 0;
+6 -6
drivers/net/ethernet/aeroflex/greth.c
··· 1433 1433 } 1434 1434 1435 1435 /* Allocate TX descriptor ring in coherent memory */ 1436 - greth->tx_bd_base = dma_zalloc_coherent(greth->dev, 1024, 1437 - &greth->tx_bd_base_phys, 1438 - GFP_KERNEL); 1436 + greth->tx_bd_base = dma_alloc_coherent(greth->dev, 1024, 1437 + &greth->tx_bd_base_phys, 1438 + GFP_KERNEL); 1439 1439 if (!greth->tx_bd_base) { 1440 1440 err = -ENOMEM; 1441 1441 goto error3; 1442 1442 } 1443 1443 1444 1444 /* Allocate RX descriptor ring in coherent memory */ 1445 - greth->rx_bd_base = dma_zalloc_coherent(greth->dev, 1024, 1446 - &greth->rx_bd_base_phys, 1447 - GFP_KERNEL); 1445 + greth->rx_bd_base = dma_alloc_coherent(greth->dev, 1024, 1446 + &greth->rx_bd_base_phys, 1447 + GFP_KERNEL); 1448 1448 if (!greth->rx_bd_base) { 1449 1449 err = -ENOMEM; 1450 1450 goto error4;
+6 -6
drivers/net/ethernet/alacritech/slicoss.c
··· 795 795 size = stq->len * sizeof(*descs) + DESC_ALIGN_MASK; 796 796 797 797 for (i = 0; i < SLIC_NUM_STAT_DESC_ARRAYS; i++) { 798 - descs = dma_zalloc_coherent(&sdev->pdev->dev, size, &paddr, 799 - GFP_KERNEL); 798 + descs = dma_alloc_coherent(&sdev->pdev->dev, size, &paddr, 799 + GFP_KERNEL); 800 800 if (!descs) { 801 801 netdev_err(sdev->netdev, 802 802 "failed to allocate status descriptors\n"); ··· 1240 1240 struct slic_shmem_data *sm_data; 1241 1241 dma_addr_t paddr; 1242 1242 1243 - sm_data = dma_zalloc_coherent(&sdev->pdev->dev, sizeof(*sm_data), 1244 - &paddr, GFP_KERNEL); 1243 + sm_data = dma_alloc_coherent(&sdev->pdev->dev, sizeof(*sm_data), 1244 + &paddr, GFP_KERNEL); 1245 1245 if (!sm_data) { 1246 1246 dev_err(&sdev->pdev->dev, "failed to allocate shared memory\n"); 1247 1247 return -ENOMEM; ··· 1621 1621 int err = 0; 1622 1622 u8 *mac[2]; 1623 1623 1624 - eeprom = dma_zalloc_coherent(&sdev->pdev->dev, SLIC_EEPROM_SIZE, 1625 - &paddr, GFP_KERNEL); 1624 + eeprom = dma_alloc_coherent(&sdev->pdev->dev, SLIC_EEPROM_SIZE, 1625 + &paddr, GFP_KERNEL); 1626 1626 if (!eeprom) 1627 1627 return -ENOMEM; 1628 1628
+31 -30
drivers/net/ethernet/amazon/ena/ena_com.c
··· 111 111 struct ena_com_admin_sq *sq = &queue->sq; 112 112 u16 size = ADMIN_SQ_SIZE(queue->q_depth); 113 113 114 - sq->entries = dma_zalloc_coherent(queue->q_dmadev, size, &sq->dma_addr, 115 - GFP_KERNEL); 114 + sq->entries = dma_alloc_coherent(queue->q_dmadev, size, &sq->dma_addr, 115 + GFP_KERNEL); 116 116 117 117 if (!sq->entries) { 118 118 pr_err("memory allocation failed"); ··· 133 133 struct ena_com_admin_cq *cq = &queue->cq; 134 134 u16 size = ADMIN_CQ_SIZE(queue->q_depth); 135 135 136 - cq->entries = dma_zalloc_coherent(queue->q_dmadev, size, &cq->dma_addr, 137 - GFP_KERNEL); 136 + cq->entries = dma_alloc_coherent(queue->q_dmadev, size, &cq->dma_addr, 137 + GFP_KERNEL); 138 138 139 139 if (!cq->entries) { 140 140 pr_err("memory allocation failed"); ··· 156 156 157 157 dev->aenq.q_depth = ENA_ASYNC_QUEUE_DEPTH; 158 158 size = ADMIN_AENQ_SIZE(ENA_ASYNC_QUEUE_DEPTH); 159 - aenq->entries = dma_zalloc_coherent(dev->dmadev, size, &aenq->dma_addr, 160 - GFP_KERNEL); 159 + aenq->entries = dma_alloc_coherent(dev->dmadev, size, &aenq->dma_addr, 160 + GFP_KERNEL); 161 161 162 162 if (!aenq->entries) { 163 163 pr_err("memory allocation failed"); ··· 344 344 dev_node = dev_to_node(ena_dev->dmadev); 345 345 set_dev_node(ena_dev->dmadev, ctx->numa_node); 346 346 io_sq->desc_addr.virt_addr = 347 - dma_zalloc_coherent(ena_dev->dmadev, size, 348 - &io_sq->desc_addr.phys_addr, 349 - GFP_KERNEL); 347 + dma_alloc_coherent(ena_dev->dmadev, size, 348 + &io_sq->desc_addr.phys_addr, 349 + GFP_KERNEL); 350 350 set_dev_node(ena_dev->dmadev, dev_node); 351 351 if (!io_sq->desc_addr.virt_addr) { 352 352 io_sq->desc_addr.virt_addr = 353 - dma_zalloc_coherent(ena_dev->dmadev, size, 354 - &io_sq->desc_addr.phys_addr, 355 - GFP_KERNEL); 353 + dma_alloc_coherent(ena_dev->dmadev, size, 354 + &io_sq->desc_addr.phys_addr, 355 + GFP_KERNEL); 356 356 } 357 357 358 358 if (!io_sq->desc_addr.virt_addr) { ··· 425 425 prev_node = dev_to_node(ena_dev->dmadev); 426 426 set_dev_node(ena_dev->dmadev, ctx->numa_node); 427 427 io_cq->cdesc_addr.virt_addr = 428 - dma_zalloc_coherent(ena_dev->dmadev, size, 429 - &io_cq->cdesc_addr.phys_addr, GFP_KERNEL); 428 + dma_alloc_coherent(ena_dev->dmadev, size, 429 + &io_cq->cdesc_addr.phys_addr, GFP_KERNEL); 430 430 set_dev_node(ena_dev->dmadev, prev_node); 431 431 if (!io_cq->cdesc_addr.virt_addr) { 432 432 io_cq->cdesc_addr.virt_addr = 433 - dma_zalloc_coherent(ena_dev->dmadev, size, 434 - &io_cq->cdesc_addr.phys_addr, 435 - GFP_KERNEL); 433 + dma_alloc_coherent(ena_dev->dmadev, size, 434 + &io_cq->cdesc_addr.phys_addr, 435 + GFP_KERNEL); 436 436 } 437 437 438 438 if (!io_cq->cdesc_addr.virt_addr) { ··· 1026 1026 struct ena_rss *rss = &ena_dev->rss; 1027 1027 1028 1028 rss->hash_key = 1029 - dma_zalloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_key), 1030 - &rss->hash_key_dma_addr, GFP_KERNEL); 1029 + dma_alloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_key), 1030 + &rss->hash_key_dma_addr, GFP_KERNEL); 1031 1031 1032 1032 if (unlikely(!rss->hash_key)) 1033 1033 return -ENOMEM; ··· 1050 1050 struct ena_rss *rss = &ena_dev->rss; 1051 1051 1052 1052 rss->hash_ctrl = 1053 - dma_zalloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_ctrl), 1054 - &rss->hash_ctrl_dma_addr, GFP_KERNEL); 1053 + dma_alloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_ctrl), 1054 + &rss->hash_ctrl_dma_addr, GFP_KERNEL); 1055 1055 1056 1056 if (unlikely(!rss->hash_ctrl)) 1057 1057 return -ENOMEM; ··· 1094 1094 sizeof(struct ena_admin_rss_ind_table_entry); 1095 1095 1096 1096 rss->rss_ind_tbl = 1097 - dma_zalloc_coherent(ena_dev->dmadev, tbl_size, 1098 - &rss->rss_ind_tbl_dma_addr, GFP_KERNEL); 1097 + dma_alloc_coherent(ena_dev->dmadev, tbl_size, 1098 + &rss->rss_ind_tbl_dma_addr, GFP_KERNEL); 1099 1099 if (unlikely(!rss->rss_ind_tbl)) 1100 1100 goto mem_err1; 1101 1101 ··· 1649 1649 1650 1650 spin_lock_init(&mmio_read->lock); 1651 1651 mmio_read->read_resp = 1652 - dma_zalloc_coherent(ena_dev->dmadev, 1653 - sizeof(*mmio_read->read_resp), 1654 - &mmio_read->read_resp_dma_addr, GFP_KERNEL); 1652 + dma_alloc_coherent(ena_dev->dmadev, 1653 + sizeof(*mmio_read->read_resp), 1654 + &mmio_read->read_resp_dma_addr, GFP_KERNEL); 1655 1655 if (unlikely(!mmio_read->read_resp)) 1656 1656 goto err; 1657 1657 ··· 2623 2623 struct ena_host_attribute *host_attr = &ena_dev->host_attr; 2624 2624 2625 2625 host_attr->host_info = 2626 - dma_zalloc_coherent(ena_dev->dmadev, SZ_4K, 2627 - &host_attr->host_info_dma_addr, GFP_KERNEL); 2626 + dma_alloc_coherent(ena_dev->dmadev, SZ_4K, 2627 + &host_attr->host_info_dma_addr, GFP_KERNEL); 2628 2628 if (unlikely(!host_attr->host_info)) 2629 2629 return -ENOMEM; 2630 2630 ··· 2641 2641 struct ena_host_attribute *host_attr = &ena_dev->host_attr; 2642 2642 2643 2643 host_attr->debug_area_virt_addr = 2644 - dma_zalloc_coherent(ena_dev->dmadev, debug_area_size, 2645 - &host_attr->debug_area_dma_addr, GFP_KERNEL); 2644 + dma_alloc_coherent(ena_dev->dmadev, debug_area_size, 2645 + &host_attr->debug_area_dma_addr, 2646 + GFP_KERNEL); 2646 2647 if (unlikely(!host_attr->debug_area_virt_addr)) { 2647 2648 host_attr->debug_area_size = 0; 2648 2649 return -ENOMEM;
-2
drivers/net/ethernet/amd/xgbe/xgbe-common.h
··· 431 431 #define MAC_MDIOSCAR_PA_WIDTH 5 432 432 #define MAC_MDIOSCAR_RA_INDEX 0 433 433 #define MAC_MDIOSCAR_RA_WIDTH 16 434 - #define MAC_MDIOSCAR_REG_INDEX 0 435 - #define MAC_MDIOSCAR_REG_WIDTH 21 436 434 #define MAC_MDIOSCCDR_BUSY_INDEX 22 437 435 #define MAC_MDIOSCCDR_BUSY_WIDTH 1 438 436 #define MAC_MDIOSCCDR_CMD_INDEX 16
+16 -6
drivers/net/ethernet/amd/xgbe/xgbe-dev.c
··· 1284 1284 } 1285 1285 } 1286 1286 1287 + static unsigned int xgbe_create_mdio_sca(int port, int reg) 1288 + { 1289 + unsigned int mdio_sca, da; 1290 + 1291 + da = (reg & MII_ADDR_C45) ? reg >> 16 : 0; 1292 + 1293 + mdio_sca = 0; 1294 + XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, RA, reg); 1295 + XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, PA, port); 1296 + XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, DA, da); 1297 + 1298 + return mdio_sca; 1299 + } 1300 + 1287 1301 static int xgbe_write_ext_mii_regs(struct xgbe_prv_data *pdata, int addr, 1288 1302 int reg, u16 val) 1289 1303 { ··· 1305 1291 1306 1292 reinit_completion(&pdata->mdio_complete); 1307 1293 1308 - mdio_sca = 0; 1309 - XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, REG, reg); 1310 - XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, DA, addr); 1294 + mdio_sca = xgbe_create_mdio_sca(addr, reg); 1311 1295 XGMAC_IOWRITE(pdata, MAC_MDIOSCAR, mdio_sca); 1312 1296 1313 1297 mdio_sccd = 0; ··· 1329 1317 1330 1318 reinit_completion(&pdata->mdio_complete); 1331 1319 1332 - mdio_sca = 0; 1333 - XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, REG, reg); 1334 - XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, DA, addr); 1320 + mdio_sca = xgbe_create_mdio_sca(addr, reg); 1335 1321 XGMAC_IOWRITE(pdata, MAC_MDIOSCAR, mdio_sca); 1336 1322 1337 1323 mdio_sccd = 0;
+4 -4
drivers/net/ethernet/apm/xgene-v2/main.c
··· 206 206 } 207 207 208 208 /* Packet buffers should be 64B aligned */ 209 - pkt_buf = dma_zalloc_coherent(dev, XGENE_ENET_STD_MTU, &dma_addr, 210 - GFP_ATOMIC); 209 + pkt_buf = dma_alloc_coherent(dev, XGENE_ENET_STD_MTU, &dma_addr, 210 + GFP_ATOMIC); 211 211 if (unlikely(!pkt_buf)) { 212 212 dev_kfree_skb_any(skb); 213 213 return NETDEV_TX_OK; ··· 428 428 ring->ndev = ndev; 429 429 430 430 size = XGENE_ENET_DESC_SIZE * XGENE_ENET_NUM_DESC; 431 - ring->desc_addr = dma_zalloc_coherent(dev, size, &ring->dma_addr, 432 - GFP_KERNEL); 431 + ring->desc_addr = dma_alloc_coherent(dev, size, &ring->dma_addr, 432 + GFP_KERNEL); 433 433 if (!ring->desc_addr) 434 434 goto err; 435 435
+3 -4
drivers/net/ethernet/atheros/alx/main.c
··· 660 660 alx->num_txq + 661 661 sizeof(struct alx_rrd) * alx->rx_ringsz + 662 662 sizeof(struct alx_rfd) * alx->rx_ringsz; 663 - alx->descmem.virt = dma_zalloc_coherent(&alx->hw.pdev->dev, 664 - alx->descmem.size, 665 - &alx->descmem.dma, 666 - GFP_KERNEL); 663 + alx->descmem.virt = dma_alloc_coherent(&alx->hw.pdev->dev, 664 + alx->descmem.size, 665 + &alx->descmem.dma, GFP_KERNEL); 667 666 if (!alx->descmem.virt) 668 667 return -ENOMEM; 669 668
+2 -2
drivers/net/ethernet/atheros/atl1c/atl1c_main.c
··· 1019 1019 sizeof(struct atl1c_recv_ret_status) * rx_desc_count + 1020 1020 8 * 4; 1021 1021 1022 - ring_header->desc = dma_zalloc_coherent(&pdev->dev, ring_header->size, 1023 - &ring_header->dma, GFP_KERNEL); 1022 + ring_header->desc = dma_alloc_coherent(&pdev->dev, ring_header->size, 1023 + &ring_header->dma, GFP_KERNEL); 1024 1024 if (unlikely(!ring_header->desc)) { 1025 1025 dev_err(&pdev->dev, "could not get memory for DMA buffer\n"); 1026 1026 goto err_nomem;
+4 -4
drivers/net/ethernet/broadcom/bcm63xx_enet.c
··· 936 936 937 937 /* allocate rx dma ring */ 938 938 size = priv->rx_ring_size * sizeof(struct bcm_enet_desc); 939 - p = dma_zalloc_coherent(kdev, size, &priv->rx_desc_dma, GFP_KERNEL); 939 + p = dma_alloc_coherent(kdev, size, &priv->rx_desc_dma, GFP_KERNEL); 940 940 if (!p) { 941 941 ret = -ENOMEM; 942 942 goto out_freeirq_tx; ··· 947 947 948 948 /* allocate tx dma ring */ 949 949 size = priv->tx_ring_size * sizeof(struct bcm_enet_desc); 950 - p = dma_zalloc_coherent(kdev, size, &priv->tx_desc_dma, GFP_KERNEL); 950 + p = dma_alloc_coherent(kdev, size, &priv->tx_desc_dma, GFP_KERNEL); 951 951 if (!p) { 952 952 ret = -ENOMEM; 953 953 goto out_free_rx_ring; ··· 2120 2120 2121 2121 /* allocate rx dma ring */ 2122 2122 size = priv->rx_ring_size * sizeof(struct bcm_enet_desc); 2123 - p = dma_zalloc_coherent(kdev, size, &priv->rx_desc_dma, GFP_KERNEL); 2123 + p = dma_alloc_coherent(kdev, size, &priv->rx_desc_dma, GFP_KERNEL); 2124 2124 if (!p) { 2125 2125 dev_err(kdev, "cannot allocate rx ring %u\n", size); 2126 2126 ret = -ENOMEM; ··· 2132 2132 2133 2133 /* allocate tx dma ring */ 2134 2134 size = priv->tx_ring_size * sizeof(struct bcm_enet_desc); 2135 - p = dma_zalloc_coherent(kdev, size, &priv->tx_desc_dma, GFP_KERNEL); 2135 + p = dma_alloc_coherent(kdev, size, &priv->tx_desc_dma, GFP_KERNEL); 2136 2136 if (!p) { 2137 2137 dev_err(kdev, "cannot allocate tx ring\n"); 2138 2138 ret = -ENOMEM;
+2 -2
drivers/net/ethernet/broadcom/bcmsysport.c
··· 1506 1506 /* We just need one DMA descriptor which is DMA-able, since writing to 1507 1507 * the port will allocate a new descriptor in its internal linked-list 1508 1508 */ 1509 - p = dma_zalloc_coherent(kdev, sizeof(struct dma_desc), &ring->desc_dma, 1510 - GFP_KERNEL); 1509 + p = dma_alloc_coherent(kdev, sizeof(struct dma_desc), &ring->desc_dma, 1510 + GFP_KERNEL); 1511 1511 if (!p) { 1512 1512 netif_err(priv, hw, priv->netdev, "DMA alloc failed\n"); 1513 1513 return -ENOMEM;
+6 -6
drivers/net/ethernet/broadcom/bgmac.c
··· 634 634 635 635 /* Alloc ring of descriptors */ 636 636 size = BGMAC_TX_RING_SLOTS * sizeof(struct bgmac_dma_desc); 637 - ring->cpu_base = dma_zalloc_coherent(dma_dev, size, 638 - &ring->dma_base, 639 - GFP_KERNEL); 637 + ring->cpu_base = dma_alloc_coherent(dma_dev, size, 638 + &ring->dma_base, 639 + GFP_KERNEL); 640 640 if (!ring->cpu_base) { 641 641 dev_err(bgmac->dev, "Allocation of TX ring 0x%X failed\n", 642 642 ring->mmio_base); ··· 659 659 660 660 /* Alloc ring of descriptors */ 661 661 size = BGMAC_RX_RING_SLOTS * sizeof(struct bgmac_dma_desc); 662 - ring->cpu_base = dma_zalloc_coherent(dma_dev, size, 663 - &ring->dma_base, 664 - GFP_KERNEL); 662 + ring->cpu_base = dma_alloc_coherent(dma_dev, size, 663 + &ring->dma_base, 664 + GFP_KERNEL); 665 665 if (!ring->cpu_base) { 666 666 dev_err(bgmac->dev, "Allocation of RX ring 0x%X failed\n", 667 667 ring->mmio_base);
+2 -2
drivers/net/ethernet/broadcom/bnx2.c
··· 844 844 BNX2_SBLK_MSIX_ALIGN_SIZE); 845 845 bp->status_stats_size = status_blk_size + 846 846 sizeof(struct statistics_block); 847 - status_blk = dma_zalloc_coherent(&bp->pdev->dev, bp->status_stats_size, 848 - &bp->status_blk_mapping, GFP_KERNEL); 847 + status_blk = dma_alloc_coherent(&bp->pdev->dev, bp->status_stats_size, 848 + &bp->status_blk_mapping, GFP_KERNEL); 849 849 if (!status_blk) 850 850 return -ENOMEM; 851 851
+1 -1
drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
··· 2081 2081 bool is_pf); 2082 2082 2083 2083 #define BNX2X_ILT_ZALLOC(x, y, size) \ 2084 - x = dma_zalloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL) 2084 + x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL) 2085 2085 2086 2086 #define BNX2X_ILT_FREE(x, y, size) \ 2087 2087 do { \
+1 -1
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
··· 52 52 53 53 #define BNX2X_PCI_ALLOC(y, size) \ 54 54 ({ \ 55 - void *x = dma_zalloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \ 55 + void *x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \ 56 56 if (x) \ 57 57 DP(NETIF_MSG_HW, \ 58 58 "BNX2X_PCI_ALLOC: Physical %Lx Virtual %p\n", \
+16 -12
drivers/net/ethernet/broadcom/bnxt/bnxt.c
··· 3449 3449 goto alloc_tx_ext_stats; 3450 3450 3451 3451 bp->hw_rx_port_stats_ext = 3452 - dma_zalloc_coherent(&pdev->dev, 3453 - sizeof(struct rx_port_stats_ext), 3454 - &bp->hw_rx_port_stats_ext_map, 3455 - GFP_KERNEL); 3452 + dma_alloc_coherent(&pdev->dev, 3453 + sizeof(struct rx_port_stats_ext), 3454 + &bp->hw_rx_port_stats_ext_map, 3455 + GFP_KERNEL); 3456 3456 if (!bp->hw_rx_port_stats_ext) 3457 3457 return 0; 3458 3458 ··· 3462 3462 3463 3463 if (bp->hwrm_spec_code >= 0x10902) { 3464 3464 bp->hw_tx_port_stats_ext = 3465 - dma_zalloc_coherent(&pdev->dev, 3466 - sizeof(struct tx_port_stats_ext), 3467 - &bp->hw_tx_port_stats_ext_map, 3468 - GFP_KERNEL); 3465 + dma_alloc_coherent(&pdev->dev, 3466 + sizeof(struct tx_port_stats_ext), 3467 + &bp->hw_tx_port_stats_ext_map, 3468 + GFP_KERNEL); 3469 3469 } 3470 3470 bp->flags |= BNXT_FLAG_PORT_STATS_EXT; 3471 3471 } ··· 5601 5601 FUNC_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST | 5602 5602 FUNC_CFG_REQ_FLAGS_VNIC_ASSETS_TEST; 5603 5603 if (bp->flags & BNXT_FLAG_CHIP_P5) 5604 - flags |= FUNC_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST; 5604 + flags |= FUNC_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST | 5605 + FUNC_CFG_REQ_FLAGS_NQ_ASSETS_TEST; 5605 5606 else 5606 5607 flags |= FUNC_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST; 5607 5608 } ··· 6222 6221 rmem->pg_tbl_map = ctx_pg->ctx_dma_arr[i]; 6223 6222 rmem->depth = 1; 6224 6223 rmem->nr_pages = MAX_CTX_PAGES; 6225 - if (i == (nr_tbls - 1)) 6226 - rmem->nr_pages = ctx_pg->nr_pages % 6227 - MAX_CTX_PAGES; 6224 + if (i == (nr_tbls - 1)) { 6225 + int rem = ctx_pg->nr_pages % MAX_CTX_PAGES; 6226 + 6227 + if (rem) 6228 + rmem->nr_pages = rem; 6229 + } 6228 6230 rc = bnxt_alloc_ctx_mem_blk(bp, pg_tbl); 6229 6231 if (rc) 6230 6232 break;
+2 -2
drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
··· 316 316 317 317 n = IEEE_8021QAZ_MAX_TCS; 318 318 data_len = sizeof(*data) + sizeof(*fw_app) * n; 319 - data = dma_zalloc_coherent(&bp->pdev->dev, data_len, &mapping, 320 - GFP_KERNEL); 319 + data = dma_alloc_coherent(&bp->pdev->dev, data_len, &mapping, 320 + GFP_KERNEL); 321 321 if (!data) 322 322 return -ENOMEM; 323 323
+2 -2
drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
··· 85 85 return -EFAULT; 86 86 } 87 87 88 - data_addr = dma_zalloc_coherent(&bp->pdev->dev, bytesize, 89 - &data_dma_addr, GFP_KERNEL); 88 + data_addr = dma_alloc_coherent(&bp->pdev->dev, bytesize, 89 + &data_dma_addr, GFP_KERNEL); 90 90 if (!data_addr) 91 91 return -ENOMEM; 92 92
+3 -2
drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
··· 386 386 #define HWRM_VERSION_MAJOR 1 387 387 #define HWRM_VERSION_MINOR 10 388 388 #define HWRM_VERSION_UPDATE 0 389 - #define HWRM_VERSION_RSVD 33 390 - #define HWRM_VERSION_STR "1.10.0.33" 389 + #define HWRM_VERSION_RSVD 35 390 + #define HWRM_VERSION_STR "1.10.0.35" 391 391 392 392 /* hwrm_ver_get_input (size:192b/24B) */ 393 393 struct hwrm_ver_get_input { ··· 1184 1184 #define FUNC_CFG_REQ_FLAGS_L2_CTX_ASSETS_TEST 0x100000UL 1185 1185 #define FUNC_CFG_REQ_FLAGS_TRUSTED_VF_ENABLE 0x200000UL 1186 1186 #define FUNC_CFG_REQ_FLAGS_DYNAMIC_TX_RING_ALLOC 0x400000UL 1187 + #define FUNC_CFG_REQ_FLAGS_NQ_ASSETS_TEST 0x800000UL 1187 1188 __le32 enables; 1188 1189 #define FUNC_CFG_REQ_ENABLES_MTU 0x1UL 1189 1190 #define FUNC_CFG_REQ_ENABLES_MRU 0x2UL
+11 -11
drivers/net/ethernet/broadcom/tg3.c
··· 8712 8712 if (!i && tg3_flag(tp, ENABLE_RSS)) 8713 8713 continue; 8714 8714 8715 - tnapi->rx_rcb = dma_zalloc_coherent(&tp->pdev->dev, 8716 - TG3_RX_RCB_RING_BYTES(tp), 8717 - &tnapi->rx_rcb_mapping, 8718 - GFP_KERNEL); 8715 + tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev, 8716 + TG3_RX_RCB_RING_BYTES(tp), 8717 + &tnapi->rx_rcb_mapping, 8718 + GFP_KERNEL); 8719 8719 if (!tnapi->rx_rcb) 8720 8720 goto err_out; 8721 8721 } ··· 8768 8768 { 8769 8769 int i; 8770 8770 8771 - tp->hw_stats = dma_zalloc_coherent(&tp->pdev->dev, 8772 - sizeof(struct tg3_hw_stats), 8773 - &tp->stats_mapping, GFP_KERNEL); 8771 + tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev, 8772 + sizeof(struct tg3_hw_stats), 8773 + &tp->stats_mapping, GFP_KERNEL); 8774 8774 if (!tp->hw_stats) 8775 8775 goto err_out; 8776 8776 ··· 8778 8778 struct tg3_napi *tnapi = &tp->napi[i]; 8779 8779 struct tg3_hw_status *sblk; 8780 8780 8781 - tnapi->hw_status = dma_zalloc_coherent(&tp->pdev->dev, 8782 - TG3_HW_STATUS_SIZE, 8783 - &tnapi->status_mapping, 8784 - GFP_KERNEL); 8781 + tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev, 8782 + TG3_HW_STATUS_SIZE, 8783 + &tnapi->status_mapping, 8784 + GFP_KERNEL); 8785 8785 if (!tnapi->hw_status) 8786 8786 goto err_out; 8787 8787
+2 -6
drivers/net/ethernet/cadence/macb_main.c
··· 1738 1738 *skb = nskb; 1739 1739 } 1740 1740 1741 - if (padlen) { 1742 - if (padlen >= ETH_FCS_LEN) 1743 - skb_put_zero(*skb, padlen - ETH_FCS_LEN); 1744 - else 1745 - skb_trim(*skb, ETH_FCS_LEN - padlen); 1746 - } 1741 + if (padlen > ETH_FCS_LEN) 1742 + skb_put_zero(*skb, padlen - ETH_FCS_LEN); 1747 1743 1748 1744 add_fcs: 1749 1745 /* set FCS to packet */
+1 -1
drivers/net/ethernet/cavium/thunder/nicvf_queues.c
··· 59 59 dmem->q_len = q_len; 60 60 dmem->size = (desc_size * q_len) + align_bytes; 61 61 /* Save address, need it while freeing */ 62 - dmem->unalign_base = dma_zalloc_coherent(&nic->pdev->dev, dmem->size, 62 + dmem->unalign_base = dma_alloc_coherent(&nic->pdev->dev, dmem->size, 63 63 &dmem->dma, GFP_KERNEL); 64 64 if (!dmem->unalign_base) 65 65 return -ENOMEM;
+8 -6
drivers/net/ethernet/chelsio/cxgb3/sge.c
··· 620 620 { 621 621 size_t len = nelem * elem_size; 622 622 void *s = NULL; 623 - void *p = dma_zalloc_coherent(&pdev->dev, len, phys, GFP_KERNEL); 623 + void *p = dma_alloc_coherent(&pdev->dev, len, phys, GFP_KERNEL); 624 624 625 625 if (!p) 626 626 return NULL; ··· 2381 2381 lro_add_page(adap, qs, fl, 2382 2382 G_RSPD_LEN(len), 2383 2383 flags & F_RSPD_EOP); 2384 - goto next_fl; 2384 + goto next_fl; 2385 2385 } 2386 2386 2387 2387 skb = get_packet_pg(adap, fl, q, ··· 3214 3214 for (i = 0; i < SGE_QSETS; ++i) { 3215 3215 struct sge_qset *q = &adap->sge.qs[i]; 3216 3216 3217 - if (q->tx_reclaim_timer.function) 3218 - mod_timer(&q->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD); 3217 + if (q->tx_reclaim_timer.function) 3218 + mod_timer(&q->tx_reclaim_timer, 3219 + jiffies + TX_RECLAIM_PERIOD); 3219 3220 3220 - if (q->rx_reclaim_timer.function) 3221 - mod_timer(&q->rx_reclaim_timer, jiffies + RX_RECLAIM_PERIOD); 3221 + if (q->rx_reclaim_timer.function) 3222 + mod_timer(&q->rx_reclaim_timer, 3223 + jiffies + RX_RECLAIM_PERIOD); 3222 3224 } 3223 3225 } 3224 3226
+3 -3
drivers/net/ethernet/chelsio/cxgb3/t3_hw.c
··· 1082 1082 CH_WARN(adapter, "found newer FW version(%u.%u), " 1083 1083 "driver compiled for version %u.%u\n", major, minor, 1084 1084 FW_VERSION_MAJOR, FW_VERSION_MINOR); 1085 - return 0; 1085 + return 0; 1086 1086 } 1087 1087 return -EINVAL; 1088 1088 } ··· 3619 3619 3620 3620 static int init_parity(struct adapter *adap) 3621 3621 { 3622 - int i, err, addr; 3622 + int i, err, addr; 3623 3623 3624 3624 if (t3_read_reg(adap, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY) 3625 3625 return -EBUSY; ··· 3806 3806 p->phy.ops->power_down(&p->phy, 1); 3807 3807 } 3808 3808 3809 - return 0; 3809 + return 0; 3810 3810 } 3811 3811
+4 -4
drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c
··· 378 378 int err; 379 379 380 380 memset(&c, 0, sizeof(c)); 381 - c.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PTP_CMD) | 382 - FW_CMD_REQUEST_F | 383 - FW_CMD_WRITE_F | 384 - FW_PTP_CMD_PORTID_V(0)); 381 + c.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PTP_CMD) | 382 + FW_CMD_REQUEST_F | 383 + FW_CMD_WRITE_F | 384 + FW_PTP_CMD_PORTID_V(0)); 385 385 c.retval_len16 = cpu_to_be32(FW_CMD_LEN16_V(sizeof(c) / 16)); 386 386 c.u.scmd.sc = FW_PTP_SC_INIT_TIMER; 387 387
+1 -1
drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
··· 78 78 unsigned long flags; 79 79 80 80 spin_lock_irqsave(&bmap->lock, flags); 81 - __clear_bit(msix_idx, bmap->msix_bmap); 81 + __clear_bit(msix_idx, bmap->msix_bmap); 82 82 spin_unlock_irqrestore(&bmap->lock, flags); 83 83 } 84 84
+1 -1
drivers/net/ethernet/chelsio/cxgb4/sge.c
··· 694 694 { 695 695 size_t len = nelem * elem_size + stat_size; 696 696 void *s = NULL; 697 - void *p = dma_zalloc_coherent(dev, len, phys, GFP_KERNEL); 697 + void *p = dma_alloc_coherent(dev, len, phys, GFP_KERNEL); 698 698 699 699 if (!p) 700 700 return NULL;
+1 -1
drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
··· 3794 3794 /* If we have version number support, then check to see if the adapter 3795 3795 * already has up-to-date PHY firmware loaded. 3796 3796 */ 3797 - if (phy_fw_version) { 3797 + if (phy_fw_version) { 3798 3798 new_phy_fw_vers = phy_fw_version(phy_fw_data, phy_fw_size); 3799 3799 ret = t4_phy_fw_ver(adap, &cur_phy_fw_ver); 3800 3800 if (ret < 0)
+1 -1
drivers/net/ethernet/chelsio/cxgb4vf/sge.c
··· 756 756 * Allocate the hardware ring and PCI DMA bus address space for said. 757 757 */ 758 758 size_t hwlen = nelem * hwsize + stat_size; 759 - void *hwring = dma_zalloc_coherent(dev, hwlen, busaddrp, GFP_KERNEL); 759 + void *hwring = dma_alloc_coherent(dev, hwlen, busaddrp, GFP_KERNEL); 760 760 761 761 if (!hwring) 762 762 return NULL;
+34 -34
drivers/net/ethernet/emulex/benet/be_cmds.c
··· 1808 1808 total_size = buf_len; 1809 1809 1810 1810 get_fat_cmd.size = sizeof(struct be_cmd_req_get_fat) + 60*1024; 1811 - get_fat_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, 1812 - get_fat_cmd.size, 1813 - &get_fat_cmd.dma, GFP_ATOMIC); 1811 + get_fat_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, 1812 + get_fat_cmd.size, 1813 + &get_fat_cmd.dma, GFP_ATOMIC); 1814 1814 if (!get_fat_cmd.va) 1815 1815 return -ENOMEM; 1816 1816 ··· 2302 2302 return -EINVAL; 2303 2303 2304 2304 cmd.size = sizeof(struct be_cmd_resp_port_type); 2305 - cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma, 2306 - GFP_ATOMIC); 2305 + cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma, 2306 + GFP_ATOMIC); 2307 2307 if (!cmd.va) { 2308 2308 dev_err(&adapter->pdev->dev, "Memory allocation failed\n"); 2309 2309 return -ENOMEM; ··· 3066 3066 3067 3067 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object) 3068 3068 + LANCER_FW_DOWNLOAD_CHUNK; 3069 - flash_cmd.va = dma_zalloc_coherent(dev, flash_cmd.size, 3070 - &flash_cmd.dma, GFP_KERNEL); 3069 + flash_cmd.va = dma_alloc_coherent(dev, flash_cmd.size, &flash_cmd.dma, 3070 + GFP_KERNEL); 3071 3071 if (!flash_cmd.va) 3072 3072 return -ENOMEM; 3073 3073 ··· 3184 3184 } 3185 3185 3186 3186 flash_cmd.size = sizeof(struct be_cmd_write_flashrom); 3187 - flash_cmd.va = dma_zalloc_coherent(dev, flash_cmd.size, &flash_cmd.dma, 3188 - GFP_KERNEL); 3187 + flash_cmd.va = dma_alloc_coherent(dev, flash_cmd.size, &flash_cmd.dma, 3188 + GFP_KERNEL); 3189 3189 if (!flash_cmd.va) 3190 3190 return -ENOMEM; 3191 3191 ··· 3435 3435 goto err; 3436 3436 } 3437 3437 cmd.size = sizeof(struct be_cmd_req_get_phy_info); 3438 - cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma, 3439 - GFP_ATOMIC); 3438 + cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma, 3439 + GFP_ATOMIC); 3440 3440 if (!cmd.va) { 3441 3441 dev_err(&adapter->pdev->dev, "Memory alloc failure\n"); 3442 3442 status = -ENOMEM; ··· 3522 3522 3523 3523 memset(&attribs_cmd, 0, sizeof(struct be_dma_mem)); 3524 3524 attribs_cmd.size = sizeof(struct be_cmd_resp_cntl_attribs); 3525 - attribs_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, 3526 - attribs_cmd.size, 3527 - &attribs_cmd.dma, GFP_ATOMIC); 3525 + attribs_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, 3526 + attribs_cmd.size, 3527 + &attribs_cmd.dma, GFP_ATOMIC); 3528 3528 if (!attribs_cmd.va) { 3529 3529 dev_err(&adapter->pdev->dev, "Memory allocation failure\n"); 3530 3530 status = -ENOMEM; ··· 3699 3699 3700 3700 memset(&get_mac_list_cmd, 0, sizeof(struct be_dma_mem)); 3701 3701 get_mac_list_cmd.size = sizeof(struct be_cmd_resp_get_mac_list); 3702 - get_mac_list_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, 3703 - get_mac_list_cmd.size, 3704 - &get_mac_list_cmd.dma, 3705 - GFP_ATOMIC); 3702 + get_mac_list_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, 3703 + get_mac_list_cmd.size, 3704 + &get_mac_list_cmd.dma, 3705 + GFP_ATOMIC); 3706 3706 3707 3707 if (!get_mac_list_cmd.va) { 3708 3708 dev_err(&adapter->pdev->dev, ··· 3829 3829 3830 3830 memset(&cmd, 0, sizeof(struct be_dma_mem)); 3831 3831 cmd.size = sizeof(struct be_cmd_req_set_mac_list); 3832 - cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma, 3833 - GFP_KERNEL); 3832 + cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma, 3833 + GFP_KERNEL); 3834 3834 if (!cmd.va) 3835 3835 return -ENOMEM; 3836 3836 ··· 4035 4035 4036 4036 memset(&cmd, 0, sizeof(struct be_dma_mem)); 4037 4037 cmd.size = sizeof(struct be_cmd_resp_acpi_wol_magic_config_v1); 4038 - cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma, 4039 - GFP_ATOMIC); 4038 + cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma, 4039 + GFP_ATOMIC); 4040 4040 if (!cmd.va) { 4041 4041 dev_err(&adapter->pdev->dev, "Memory allocation failure\n"); 4042 4042 status = -ENOMEM; ··· 4089 4089 4090 4090 memset(&extfat_cmd, 0, sizeof(struct be_dma_mem)); 4091 4091 extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps); 4092 - extfat_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, 4093 - extfat_cmd.size, &extfat_cmd.dma, 4094 - GFP_ATOMIC); 4092 + extfat_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, 4093 + extfat_cmd.size, &extfat_cmd.dma, 4094 + GFP_ATOMIC); 4095 4095 if (!extfat_cmd.va) 4096 4096 return -ENOMEM; 4097 4097 ··· 4127 4127 4128 4128 memset(&extfat_cmd, 0, sizeof(struct be_dma_mem)); 4129 4129 extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps); 4130 - extfat_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, 4131 - extfat_cmd.size, &extfat_cmd.dma, 4132 - GFP_ATOMIC); 4130 + extfat_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, 4131 + extfat_cmd.size, &extfat_cmd.dma, 4132 + GFP_ATOMIC); 4133 4133 4134 4134 if (!extfat_cmd.va) { 4135 4135 dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n", ··· 4354 4354 4355 4355 memset(&cmd, 0, sizeof(struct be_dma_mem)); 4356 4356 cmd.size = sizeof(struct be_cmd_resp_get_func_config); 4357 - cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma, 4358 - GFP_ATOMIC); 4357 + cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma, 4358 + GFP_ATOMIC); 4359 4359 if (!cmd.va) { 4360 4360 dev_err(&adapter->pdev->dev, "Memory alloc failure\n"); 4361 4361 status = -ENOMEM; ··· 4452 4452 4453 4453 memset(&cmd, 0, sizeof(struct be_dma_mem)); 4454 4454 cmd.size = sizeof(struct be_cmd_resp_get_profile_config); 4455 - cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma, 4456 - GFP_ATOMIC); 4455 + cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma, 4456 + GFP_ATOMIC); 4457 4457 if (!cmd.va) 4458 4458 return -ENOMEM; 4459 4459 ··· 4539 4539 4540 4540 memset(&cmd, 0, sizeof(struct be_dma_mem)); 4541 4541 cmd.size = sizeof(struct be_cmd_req_set_profile_config); 4542 - cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma, 4543 - GFP_ATOMIC); 4542 + cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma, 4543 + GFP_ATOMIC); 4544 4544 if (!cmd.va) 4545 4545 return -ENOMEM; 4546 4546
+9 -9
drivers/net/ethernet/emulex/benet/be_ethtool.c
··· 274 274 int status = 0; 275 275 276 276 read_cmd.size = LANCER_READ_FILE_CHUNK; 277 - read_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, read_cmd.size, 278 - &read_cmd.dma, GFP_ATOMIC); 277 + read_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, read_cmd.size, 278 + &read_cmd.dma, GFP_ATOMIC); 279 279 280 280 if (!read_cmd.va) { 281 281 dev_err(&adapter->pdev->dev, ··· 815 815 } 816 816 817 817 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config); 818 - cmd.va = dma_zalloc_coherent(dev, cmd.size, &cmd.dma, GFP_KERNEL); 818 + cmd.va = dma_alloc_coherent(dev, cmd.size, &cmd.dma, GFP_KERNEL); 819 819 if (!cmd.va) 820 820 return -ENOMEM; 821 821 ··· 851 851 }; 852 852 853 853 ddrdma_cmd.size = sizeof(struct be_cmd_req_ddrdma_test); 854 - ddrdma_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, 855 - ddrdma_cmd.size, &ddrdma_cmd.dma, 856 - GFP_KERNEL); 854 + ddrdma_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, 855 + ddrdma_cmd.size, &ddrdma_cmd.dma, 856 + GFP_KERNEL); 857 857 if (!ddrdma_cmd.va) 858 858 return -ENOMEM; 859 859 ··· 1014 1014 1015 1015 memset(&eeprom_cmd, 0, sizeof(struct be_dma_mem)); 1016 1016 eeprom_cmd.size = sizeof(struct be_cmd_req_seeprom_read); 1017 - eeprom_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, 1018 - eeprom_cmd.size, &eeprom_cmd.dma, 1019 - GFP_KERNEL); 1017 + eeprom_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, 1018 + eeprom_cmd.size, &eeprom_cmd.dma, 1019 + GFP_KERNEL); 1020 1020 1021 1021 if (!eeprom_cmd.va) 1022 1022 return -ENOMEM;
+9 -9
drivers/net/ethernet/emulex/benet/be_main.c
··· 167 167 q->len = len; 168 168 q->entry_size = entry_size; 169 169 mem->size = len * entry_size; 170 - mem->va = dma_zalloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma, 171 - GFP_KERNEL); 170 + mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, 171 + &mem->dma, GFP_KERNEL); 172 172 if (!mem->va) 173 173 return -ENOMEM; 174 174 return 0; ··· 5766 5766 int status = 0; 5767 5767 5768 5768 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16; 5769 - mbox_mem_alloc->va = dma_zalloc_coherent(dev, mbox_mem_alloc->size, 5770 - &mbox_mem_alloc->dma, 5771 - GFP_KERNEL); 5769 + mbox_mem_alloc->va = dma_alloc_coherent(dev, mbox_mem_alloc->size, 5770 + &mbox_mem_alloc->dma, 5771 + GFP_KERNEL); 5772 5772 if (!mbox_mem_alloc->va) 5773 5773 return -ENOMEM; 5774 5774 ··· 5777 5777 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16); 5778 5778 5779 5779 rx_filter->size = sizeof(struct be_cmd_req_rx_filter); 5780 - rx_filter->va = dma_zalloc_coherent(dev, rx_filter->size, 5781 - &rx_filter->dma, GFP_KERNEL); 5780 + rx_filter->va = dma_alloc_coherent(dev, rx_filter->size, 5781 + &rx_filter->dma, GFP_KERNEL); 5782 5782 if (!rx_filter->va) { 5783 5783 status = -ENOMEM; 5784 5784 goto free_mbox; ··· 5792 5792 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v1); 5793 5793 else 5794 5794 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v2); 5795 - stats_cmd->va = dma_zalloc_coherent(dev, stats_cmd->size, 5796 - &stats_cmd->dma, GFP_KERNEL); 5795 + stats_cmd->va = dma_alloc_coherent(dev, stats_cmd->size, 5796 + &stats_cmd->dma, GFP_KERNEL); 5797 5797 if (!stats_cmd->va) { 5798 5798 status = -ENOMEM; 5799 5799 goto free_rx_filter;
+6 -8
drivers/net/ethernet/faraday/ftgmac100.c
··· 935 935 return -ENOMEM; 936 936 937 937 /* Allocate descriptors */ 938 - priv->rxdes = dma_zalloc_coherent(priv->dev, 939 - MAX_RX_QUEUE_ENTRIES * 940 - sizeof(struct ftgmac100_rxdes), 941 - &priv->rxdes_dma, GFP_KERNEL); 938 + priv->rxdes = dma_alloc_coherent(priv->dev, 939 + MAX_RX_QUEUE_ENTRIES * sizeof(struct ftgmac100_rxdes), 940 + &priv->rxdes_dma, GFP_KERNEL); 942 941 if (!priv->rxdes) 943 942 return -ENOMEM; 944 - priv->txdes = dma_zalloc_coherent(priv->dev, 945 - MAX_TX_QUEUE_ENTRIES * 946 - sizeof(struct ftgmac100_txdes), 947 - &priv->txdes_dma, GFP_KERNEL); 943 + priv->txdes = dma_alloc_coherent(priv->dev, 944 + MAX_TX_QUEUE_ENTRIES * sizeof(struct ftgmac100_txdes), 945 + &priv->txdes_dma, GFP_KERNEL); 948 946 if (!priv->txdes) 949 947 return -ENOMEM; 950 948
+3 -4
drivers/net/ethernet/faraday/ftmac100.c
··· 734 734 { 735 735 int i; 736 736 737 - priv->descs = dma_zalloc_coherent(priv->dev, 738 - sizeof(struct ftmac100_descs), 739 - &priv->descs_dma_addr, 740 - GFP_KERNEL); 737 + priv->descs = dma_alloc_coherent(priv->dev, 738 + sizeof(struct ftmac100_descs), 739 + &priv->descs_dma_addr, GFP_KERNEL); 741 740 if (!priv->descs) 742 741 return -ENOMEM; 743 742
+6
drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
··· 2051 2051 bool nonlinear = skb_is_nonlinear(skb); 2052 2052 struct rtnl_link_stats64 *percpu_stats; 2053 2053 struct dpaa_percpu_priv *percpu_priv; 2054 + struct netdev_queue *txq; 2054 2055 struct dpaa_priv *priv; 2055 2056 struct qm_fd fd; 2056 2057 int offset = 0; ··· 2100 2099 } 2101 2100 if (unlikely(err < 0)) 2102 2101 goto skb_to_fd_failed; 2102 + 2103 + txq = netdev_get_tx_queue(net_dev, queue_mapping); 2104 + 2105 + /* LLTX requires to do our own update of trans_start */ 2106 + txq->trans_start = jiffies; 2103 2107 2104 2108 if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) { 2105 2109 fd.cmd |= cpu_to_be32(FM_FD_CMD_UPD);
+2 -2
drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
··· 1006 1006 1007 1007 for (i = 0; i < QUEUE_NUMS; i++) { 1008 1008 size = priv->pool[i].count * sizeof(struct hix5hd2_desc); 1009 - virt_addr = dma_zalloc_coherent(dev, size, &phys_addr, 1010 - GFP_KERNEL); 1009 + virt_addr = dma_alloc_coherent(dev, size, &phys_addr, 1010 + GFP_KERNEL); 1011 1011 if (virt_addr == NULL) 1012 1012 goto error_free_pool; 1013 1013
+2 -4
drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
··· 147 147 struct hnae_vf_cb *vf_cb = hns_ae_get_vf_cb(handle); 148 148 int i; 149 149 150 - vf_cb->mac_cb = NULL; 151 - 152 - kfree(vf_cb); 153 - 154 150 for (i = 0; i < handle->q_num; i++) 155 151 hns_ae_get_ring_pair(handle->qs[i])->used_by_vf = 0; 152 + 153 + kfree(vf_cb); 156 154 } 157 155 158 156 static int hns_ae_wait_flow_down(struct hnae_handle *handle)
+7 -10
drivers/net/ethernet/hisilicon/hns/hns_enet.c
··· 1170 1170 if (!h->phy_dev) 1171 1171 return 0; 1172 1172 1173 + ethtool_convert_legacy_u32_to_link_mode(supported, h->if_support); 1174 + linkmode_and(phy_dev->supported, phy_dev->supported, supported); 1175 + linkmode_copy(phy_dev->advertising, phy_dev->supported); 1176 + 1177 + if (h->phy_if == PHY_INTERFACE_MODE_XGMII) 1178 + phy_dev->autoneg = false; 1179 + 1173 1180 if (h->phy_if != PHY_INTERFACE_MODE_XGMII) { 1174 1181 phy_dev->dev_flags = 0; 1175 1182 ··· 1187 1180 } 1188 1181 if (unlikely(ret)) 1189 1182 return -ENODEV; 1190 - 1191 - ethtool_convert_legacy_u32_to_link_mode(supported, h->if_support); 1192 - linkmode_and(phy_dev->supported, phy_dev->supported, supported); 1193 - linkmode_copy(phy_dev->advertising, phy_dev->supported); 1194 - 1195 - if (h->phy_if == PHY_INTERFACE_MODE_XGMII) 1196 - phy_dev->autoneg = false; 1197 - 1198 - if (h->phy_if == PHY_INTERFACE_MODE_SGMII) 1199 - phy_stop(phy_dev); 1200 1183 1201 1184 return 0; 1202 1185 }
+2 -3
drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
··· 2041 2041 { 2042 2042 int size = ring->desc_num * sizeof(ring->desc[0]); 2043 2043 2044 - ring->desc = dma_zalloc_coherent(ring_to_dev(ring), size, 2045 - &ring->desc_dma_addr, 2046 - GFP_KERNEL); 2044 + ring->desc = dma_alloc_coherent(ring_to_dev(ring), size, 2045 + &ring->desc_dma_addr, GFP_KERNEL); 2047 2046 if (!ring->desc) 2048 2047 return -ENOMEM; 2049 2048
+2 -3
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
··· 39 39 { 40 40 int size = ring->desc_num * sizeof(struct hclge_desc); 41 41 42 - ring->desc = dma_zalloc_coherent(cmq_ring_to_dev(ring), 43 - size, &ring->desc_dma_addr, 44 - GFP_KERNEL); 42 + ring->desc = dma_alloc_coherent(cmq_ring_to_dev(ring), size, 43 + &ring->desc_dma_addr, GFP_KERNEL); 45 44 if (!ring->desc) 46 45 return -ENOMEM; 47 46
+2 -3
drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
··· 115 115 { 116 116 int size = ring->desc_num * sizeof(struct hclgevf_desc); 117 117 118 - ring->desc = dma_zalloc_coherent(cmq_ring_to_dev(ring), 119 - size, &ring->desc_dma_addr, 120 - GFP_KERNEL); 118 + ring->desc = dma_alloc_coherent(cmq_ring_to_dev(ring), size, 119 + &ring->desc_dma_addr, GFP_KERNEL); 121 120 if (!ring->desc) 122 121 return -ENOMEM; 123 122
+8 -8
drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.c
··· 613 613 u8 *cmd_vaddr; 614 614 int err = 0; 615 615 616 - cmd_vaddr = dma_zalloc_coherent(&pdev->dev, API_CMD_BUF_SIZE, 617 - &cmd_paddr, GFP_KERNEL); 616 + cmd_vaddr = dma_alloc_coherent(&pdev->dev, API_CMD_BUF_SIZE, 617 + &cmd_paddr, GFP_KERNEL); 618 618 if (!cmd_vaddr) { 619 619 dev_err(&pdev->dev, "Failed to allocate API CMD DMA memory\n"); 620 620 return -ENOMEM; ··· 663 663 dma_addr_t node_paddr; 664 664 int err; 665 665 666 - node = dma_zalloc_coherent(&pdev->dev, chain->cell_size, 667 - &node_paddr, GFP_KERNEL); 666 + node = dma_alloc_coherent(&pdev->dev, chain->cell_size, &node_paddr, 667 + GFP_KERNEL); 668 668 if (!node) { 669 669 dev_err(&pdev->dev, "Failed to allocate dma API CMD cell\n"); 670 670 return -ENOMEM; ··· 821 821 if (!chain->cell_ctxt) 822 822 return -ENOMEM; 823 823 824 - chain->wb_status = dma_zalloc_coherent(&pdev->dev, 825 - sizeof(*chain->wb_status), 826 - &chain->wb_status_paddr, 827 - GFP_KERNEL); 824 + chain->wb_status = dma_alloc_coherent(&pdev->dev, 825 + sizeof(*chain->wb_status), 826 + &chain->wb_status_paddr, 827 + GFP_KERNEL); 828 828 if (!chain->wb_status) { 829 829 dev_err(&pdev->dev, "Failed to allocate DMA wb status\n"); 830 830 return -ENOMEM;
+4 -4
drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c
··· 593 593 } 594 594 595 595 for (pg = 0; pg < eq->num_pages; pg++) { 596 - eq->virt_addr[pg] = dma_zalloc_coherent(&pdev->dev, 597 - eq->page_size, 598 - &eq->dma_addr[pg], 599 - GFP_KERNEL); 596 + eq->virt_addr[pg] = dma_alloc_coherent(&pdev->dev, 597 + eq->page_size, 598 + &eq->dma_addr[pg], 599 + GFP_KERNEL); 600 600 if (!eq->virt_addr[pg]) { 601 601 err = -ENOMEM; 602 602 goto err_dma_alloc;
+3 -3
drivers/net/ethernet/huawei/hinic/hinic_hw_io.c
··· 355 355 goto err_sq_db; 356 356 } 357 357 358 - ci_addr_base = dma_zalloc_coherent(&pdev->dev, CI_TABLE_SIZE(num_qps), 359 - &func_to_io->ci_dma_base, 360 - GFP_KERNEL); 358 + ci_addr_base = dma_alloc_coherent(&pdev->dev, CI_TABLE_SIZE(num_qps), 359 + &func_to_io->ci_dma_base, 360 + GFP_KERNEL); 361 361 if (!ci_addr_base) { 362 362 dev_err(&pdev->dev, "Failed to allocate CI area\n"); 363 363 err = -ENOMEM;
+5 -5
drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
··· 336 336 goto err_cqe_dma_arr_alloc; 337 337 338 338 for (i = 0; i < wq->q_depth; i++) { 339 - rq->cqe[i] = dma_zalloc_coherent(&pdev->dev, 340 - sizeof(*rq->cqe[i]), 341 - &rq->cqe_dma[i], GFP_KERNEL); 339 + rq->cqe[i] = dma_alloc_coherent(&pdev->dev, 340 + sizeof(*rq->cqe[i]), 341 + &rq->cqe_dma[i], GFP_KERNEL); 342 342 if (!rq->cqe[i]) 343 343 goto err_cqe_alloc; 344 344 } ··· 415 415 416 416 /* HW requirements: Must be at least 32 bit */ 417 417 pi_size = ALIGN(sizeof(*rq->pi_virt_addr), sizeof(u32)); 418 - rq->pi_virt_addr = dma_zalloc_coherent(&pdev->dev, pi_size, 419 - &rq->pi_dma_addr, GFP_KERNEL); 418 + rq->pi_virt_addr = dma_alloc_coherent(&pdev->dev, pi_size, 419 + &rq->pi_dma_addr, GFP_KERNEL); 420 420 if (!rq->pi_virt_addr) { 421 421 dev_err(&pdev->dev, "Failed to allocate PI address\n"); 422 422 err = -ENOMEM;
+4 -4
drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c
··· 114 114 struct pci_dev *pdev = hwif->pdev; 115 115 dma_addr_t dma_addr; 116 116 117 - *vaddr = dma_zalloc_coherent(&pdev->dev, page_sz, &dma_addr, 118 - GFP_KERNEL); 117 + *vaddr = dma_alloc_coherent(&pdev->dev, page_sz, &dma_addr, 118 + GFP_KERNEL); 119 119 if (!*vaddr) { 120 120 dev_err(&pdev->dev, "Failed to allocate dma for wqs page\n"); 121 121 return -ENOMEM; ··· 482 482 u64 *paddr = &wq->block_vaddr[i]; 483 483 dma_addr_t dma_addr; 484 484 485 - *vaddr = dma_zalloc_coherent(&pdev->dev, wq->wq_page_size, 486 - &dma_addr, GFP_KERNEL); 485 + *vaddr = dma_alloc_coherent(&pdev->dev, wq->wq_page_size, 486 + &dma_addr, GFP_KERNEL); 487 487 if (!*vaddr) { 488 488 dev_err(&pdev->dev, "Failed to allocate wq page\n"); 489 489 goto err_alloc_wq_pages;
+2 -2
drivers/net/ethernet/ibm/emac/mal.c
··· 636 636 bd_size = sizeof(struct mal_descriptor) * 637 637 (NUM_TX_BUFF * mal->num_tx_chans + 638 638 NUM_RX_BUFF * mal->num_rx_chans); 639 - mal->bd_virt = dma_zalloc_coherent(&ofdev->dev, bd_size, &mal->bd_dma, 640 - GFP_KERNEL); 639 + mal->bd_virt = dma_alloc_coherent(&ofdev->dev, bd_size, &mal->bd_dma, 640 + GFP_KERNEL); 641 641 if (mal->bd_virt == NULL) { 642 642 err = -ENOMEM; 643 643 goto fail_unmap;
+1 -1
drivers/net/ethernet/intel/Kconfig
··· 159 159 tristate "Intel(R) 10GbE PCI Express adapters support" 160 160 depends on PCI 161 161 select MDIO 162 - select MDIO_DEVICE 162 + select PHYLIB 163 163 imply PTP_1588_CLOCK 164 164 ---help--- 165 165 This driver supports Intel(R) 10GbE PCI Express family of
+4 -4
drivers/net/ethernet/intel/e1000/e1000_ethtool.c
··· 993 993 994 994 txdr->size = txdr->count * sizeof(struct e1000_tx_desc); 995 995 txdr->size = ALIGN(txdr->size, 4096); 996 - txdr->desc = dma_zalloc_coherent(&pdev->dev, txdr->size, &txdr->dma, 997 - GFP_KERNEL); 996 + txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma, 997 + GFP_KERNEL); 998 998 if (!txdr->desc) { 999 999 ret_val = 2; 1000 1000 goto err_nomem; ··· 1051 1051 } 1052 1052 1053 1053 rxdr->size = rxdr->count * sizeof(struct e1000_rx_desc); 1054 - rxdr->desc = dma_zalloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma, 1055 - GFP_KERNEL); 1054 + rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma, 1055 + GFP_KERNEL); 1056 1056 if (!rxdr->desc) { 1057 1057 ret_val = 6; 1058 1058 goto err_nomem;
+2 -2
drivers/net/ethernet/intel/e1000e/netdev.c
··· 2305 2305 { 2306 2306 struct pci_dev *pdev = adapter->pdev; 2307 2307 2308 - ring->desc = dma_zalloc_coherent(&pdev->dev, ring->size, &ring->dma, 2309 - GFP_KERNEL); 2308 + ring->desc = dma_alloc_coherent(&pdev->dev, ring->size, &ring->dma, 2309 + GFP_KERNEL); 2310 2310 if (!ring->desc) 2311 2311 return -ENOMEM; 2312 2312
+2 -2
drivers/net/ethernet/intel/i40e/i40e_main.c
··· 109 109 struct i40e_pf *pf = (struct i40e_pf *)hw->back; 110 110 111 111 mem->size = ALIGN(size, alignment); 112 - mem->va = dma_zalloc_coherent(&pf->pdev->dev, mem->size, 113 - &mem->pa, GFP_KERNEL); 112 + mem->va = dma_alloc_coherent(&pf->pdev->dev, mem->size, &mem->pa, 113 + GFP_KERNEL); 114 114 if (!mem->va) 115 115 return -ENOMEM; 116 116
+1 -1
drivers/net/ethernet/intel/igb/igb.h
··· 515 515 /* OS defined structs */ 516 516 struct pci_dev *pdev; 517 517 518 - struct mutex stats64_lock; 518 + spinlock_t stats64_lock; 519 519 struct rtnl_link_stats64 stats64; 520 520 521 521 /* structs defined in e1000_hw.h */
+2 -2
drivers/net/ethernet/intel/igb/igb_ethtool.c
··· 2295 2295 int i, j; 2296 2296 char *p; 2297 2297 2298 - mutex_lock(&adapter->stats64_lock); 2298 + spin_lock(&adapter->stats64_lock); 2299 2299 igb_update_stats(adapter); 2300 2300 2301 2301 for (i = 0; i < IGB_GLOBAL_STATS_LEN; i++) { ··· 2338 2338 } while (u64_stats_fetch_retry_irq(&ring->rx_syncp, start)); 2339 2339 i += IGB_RX_QUEUE_STATS_LEN; 2340 2340 } 2341 - mutex_unlock(&adapter->stats64_lock); 2341 + spin_unlock(&adapter->stats64_lock); 2342 2342 } 2343 2343 2344 2344 static void igb_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
+7 -7
drivers/net/ethernet/intel/igb/igb_main.c
··· 2203 2203 del_timer_sync(&adapter->phy_info_timer); 2204 2204 2205 2205 /* record the stats before reset*/ 2206 - mutex_lock(&adapter->stats64_lock); 2206 + spin_lock(&adapter->stats64_lock); 2207 2207 igb_update_stats(adapter); 2208 - mutex_unlock(&adapter->stats64_lock); 2208 + spin_unlock(&adapter->stats64_lock); 2209 2209 2210 2210 adapter->link_speed = 0; 2211 2211 adapter->link_duplex = 0; ··· 3840 3840 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN; 3841 3841 3842 3842 spin_lock_init(&adapter->nfc_lock); 3843 - mutex_init(&adapter->stats64_lock); 3843 + spin_lock_init(&adapter->stats64_lock); 3844 3844 #ifdef CONFIG_PCI_IOV 3845 3845 switch (hw->mac.type) { 3846 3846 case e1000_82576: ··· 5406 5406 } 5407 5407 } 5408 5408 5409 - mutex_lock(&adapter->stats64_lock); 5409 + spin_lock(&adapter->stats64_lock); 5410 5410 igb_update_stats(adapter); 5411 - mutex_unlock(&adapter->stats64_lock); 5411 + spin_unlock(&adapter->stats64_lock); 5412 5412 5413 5413 for (i = 0; i < adapter->num_tx_queues; i++) { 5414 5414 struct igb_ring *tx_ring = adapter->tx_ring[i]; ··· 6235 6235 { 6236 6236 struct igb_adapter *adapter = netdev_priv(netdev); 6237 6237 6238 - mutex_lock(&adapter->stats64_lock); 6238 + spin_lock(&adapter->stats64_lock); 6239 6239 igb_update_stats(adapter); 6240 6240 memcpy(stats, &adapter->stats64, sizeof(*stats)); 6241 - mutex_unlock(&adapter->stats64_lock); 6241 + spin_unlock(&adapter->stats64_lock); 6242 6242 } 6243 6243 6244 6244 /**
+4 -4
drivers/net/ethernet/intel/ixgb/ixgb_main.c
··· 680 680 txdr->size = txdr->count * sizeof(struct ixgb_tx_desc); 681 681 txdr->size = ALIGN(txdr->size, 4096); 682 682 683 - txdr->desc = dma_zalloc_coherent(&pdev->dev, txdr->size, &txdr->dma, 684 - GFP_KERNEL); 683 + txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma, 684 + GFP_KERNEL); 685 685 if (!txdr->desc) { 686 686 vfree(txdr->buffer_info); 687 687 return -ENOMEM; ··· 763 763 rxdr->size = rxdr->count * sizeof(struct ixgb_rx_desc); 764 764 rxdr->size = ALIGN(rxdr->size, 4096); 765 765 766 - rxdr->desc = dma_zalloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma, 767 - GFP_KERNEL); 766 + rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma, 767 + GFP_KERNEL); 768 768 769 769 if (!rxdr->desc) { 770 770 vfree(rxdr->buffer_info);
+3 -3
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
··· 2044 2044 u32 txq_dma; 2045 2045 2046 2046 /* Allocate memory for TX descriptors */ 2047 - aggr_txq->descs = dma_zalloc_coherent(&pdev->dev, 2048 - MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE, 2049 - &aggr_txq->descs_dma, GFP_KERNEL); 2047 + aggr_txq->descs = dma_alloc_coherent(&pdev->dev, 2048 + MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE, 2049 + &aggr_txq->descs_dma, GFP_KERNEL); 2050 2050 if (!aggr_txq->descs) 2051 2051 return -ENOMEM; 2052 2052
+3 -1
drivers/net/ethernet/marvell/octeontx2/af/cgx.c
··· 825 825 if (!cgx->cgx_cmd_workq) { 826 826 dev_err(dev, "alloc workqueue failed for cgx cmd"); 827 827 err = -ENOMEM; 828 - goto err_release_regions; 828 + goto err_free_irq_vectors; 829 829 } 830 830 831 831 list_add(&cgx->cgx_list, &cgx_list); ··· 841 841 err_release_lmac: 842 842 cgx_lmac_exit(cgx); 843 843 list_del(&cgx->cgx_list); 844 + err_free_irq_vectors: 845 + pci_free_irq_vectors(pdev); 844 846 err_release_regions: 845 847 pci_release_regions(pdev); 846 848 err_disable_device:
+1 -1
drivers/net/ethernet/marvell/octeontx2/af/common.h
··· 64 64 65 65 qmem->entry_sz = entry_sz; 66 66 qmem->alloc_sz = (qsize * entry_sz) + OTX2_ALIGN; 67 - qmem->base = dma_zalloc_coherent(dev, qmem->alloc_sz, 67 + qmem->base = dma_alloc_coherent(dev, qmem->alloc_sz, 68 68 &qmem->iova, GFP_KERNEL); 69 69 if (!qmem->base) 70 70 return -ENOMEM;
+9 -9
drivers/net/ethernet/marvell/pxa168_eth.c
··· 557 557 * table is full. 558 558 */ 559 559 if (!pep->htpr) { 560 - pep->htpr = dma_zalloc_coherent(pep->dev->dev.parent, 561 - HASH_ADDR_TABLE_SIZE, 562 - &pep->htpr_dma, GFP_KERNEL); 560 + pep->htpr = dma_alloc_coherent(pep->dev->dev.parent, 561 + HASH_ADDR_TABLE_SIZE, 562 + &pep->htpr_dma, GFP_KERNEL); 563 563 if (!pep->htpr) 564 564 return -ENOMEM; 565 565 } else { ··· 1044 1044 pep->rx_desc_count = 0; 1045 1045 size = pep->rx_ring_size * sizeof(struct rx_desc); 1046 1046 pep->rx_desc_area_size = size; 1047 - pep->p_rx_desc_area = dma_zalloc_coherent(pep->dev->dev.parent, size, 1048 - &pep->rx_desc_dma, 1049 - GFP_KERNEL); 1047 + pep->p_rx_desc_area = dma_alloc_coherent(pep->dev->dev.parent, size, 1048 + &pep->rx_desc_dma, 1049 + GFP_KERNEL); 1050 1050 if (!pep->p_rx_desc_area) 1051 1051 goto out; 1052 1052 ··· 1103 1103 pep->tx_desc_count = 0; 1104 1104 size = pep->tx_ring_size * sizeof(struct tx_desc); 1105 1105 pep->tx_desc_area_size = size; 1106 - pep->p_tx_desc_area = dma_zalloc_coherent(pep->dev->dev.parent, size, 1107 - &pep->tx_desc_dma, 1108 - GFP_KERNEL); 1106 + pep->p_tx_desc_area = dma_alloc_coherent(pep->dev->dev.parent, size, 1107 + &pep->tx_desc_dma, 1108 + GFP_KERNEL); 1109 1109 if (!pep->p_tx_desc_area) 1110 1110 goto out; 1111 1111 /* Initialize the next_desc_ptr links in the Tx descriptors ring */
+9 -25
drivers/net/ethernet/mediatek/mtk_eth_soc.c
··· 258 258 259 259 mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id)); 260 260 261 - if (dev->phydev->link) 262 - netif_carrier_on(dev); 263 - else 264 - netif_carrier_off(dev); 265 - 266 261 if (!of_phy_is_fixed_link(mac->of_node)) 267 262 phy_print_status(dev->phydev); 268 263 } ··· 341 346 /* couple phydev to net_device */ 342 347 if (mtk_phy_connect_node(eth, mac, np)) 343 348 goto err_phy; 344 - 345 - dev->phydev->autoneg = AUTONEG_ENABLE; 346 - dev->phydev->speed = 0; 347 - dev->phydev->duplex = 0; 348 - 349 - phy_set_max_speed(dev->phydev, SPEED_1000); 350 - phy_support_asym_pause(dev->phydev); 351 - linkmode_copy(dev->phydev->advertising, dev->phydev->supported); 352 - linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, 353 - dev->phydev->advertising); 354 - phy_start_aneg(dev->phydev); 355 349 356 350 of_node_put(np); 357 351 ··· 582 598 dma_addr_t dma_addr; 583 599 int i; 584 600 585 - eth->scratch_ring = dma_zalloc_coherent(eth->dev, 586 - cnt * sizeof(struct mtk_tx_dma), 587 - &eth->phy_scratch_ring, 588 - GFP_ATOMIC); 601 + eth->scratch_ring = dma_alloc_coherent(eth->dev, 602 + cnt * sizeof(struct mtk_tx_dma), 603 + &eth->phy_scratch_ring, 604 + GFP_ATOMIC); 589 605 if (unlikely(!eth->scratch_ring)) 590 606 return -ENOMEM; 591 607 ··· 1197 1213 if (!ring->buf) 1198 1214 goto no_tx_mem; 1199 1215 1200 - ring->dma = dma_zalloc_coherent(eth->dev, MTK_DMA_SIZE * sz, 1201 - &ring->phys, GFP_ATOMIC); 1216 + ring->dma = dma_alloc_coherent(eth->dev, MTK_DMA_SIZE * sz, 1217 + &ring->phys, GFP_ATOMIC); 1202 1218 if (!ring->dma) 1203 1219 goto no_tx_mem; 1204 1220 ··· 1294 1310 return -ENOMEM; 1295 1311 } 1296 1312 1297 - ring->dma = dma_zalloc_coherent(eth->dev, 1298 - rx_dma_size * sizeof(*ring->dma), 1299 - &ring->phys, GFP_ATOMIC); 1313 + ring->dma = dma_alloc_coherent(eth->dev, 1314 + rx_dma_size * sizeof(*ring->dma), 1315 + &ring->phys, GFP_ATOMIC); 1300 1316 if (!ring->dma) 1301 1317 return -ENOMEM; 1302 1318
+4 -4
drivers/net/ethernet/mellanox/mlx4/alloc.c
··· 584 584 buf->npages = 1; 585 585 buf->page_shift = get_order(size) + PAGE_SHIFT; 586 586 buf->direct.buf = 587 - dma_zalloc_coherent(&dev->persist->pdev->dev, 588 - size, &t, GFP_KERNEL); 587 + dma_alloc_coherent(&dev->persist->pdev->dev, size, &t, 588 + GFP_KERNEL); 589 589 if (!buf->direct.buf) 590 590 return -ENOMEM; 591 591 ··· 624 624 625 625 for (i = 0; i < buf->nbufs; ++i) { 626 626 buf->page_list[i].buf = 627 - dma_zalloc_coherent(&dev->persist->pdev->dev, 628 - PAGE_SIZE, &t, GFP_KERNEL); 627 + dma_alloc_coherent(&dev->persist->pdev->dev, 628 + PAGE_SIZE, &t, GFP_KERNEL); 629 629 if (!buf->page_list[i].buf) 630 630 goto err_free; 631 631
+61 -42
drivers/net/ethernet/mellanox/mlx4/icm.c
··· 57 57 int i; 58 58 59 59 if (chunk->nsg > 0) 60 - pci_unmap_sg(dev->persist->pdev, chunk->mem, chunk->npages, 61 - PCI_DMA_BIDIRECTIONAL); 60 + dma_unmap_sg(&dev->persist->pdev->dev, chunk->sg, chunk->npages, 61 + DMA_BIDIRECTIONAL); 62 62 63 63 for (i = 0; i < chunk->npages; ++i) 64 - __free_pages(sg_page(&chunk->mem[i]), 65 - get_order(chunk->mem[i].length)); 64 + __free_pages(sg_page(&chunk->sg[i]), 65 + get_order(chunk->sg[i].length)); 66 66 } 67 67 68 68 static void mlx4_free_icm_coherent(struct mlx4_dev *dev, struct mlx4_icm_chunk *chunk) ··· 71 71 72 72 for (i = 0; i < chunk->npages; ++i) 73 73 dma_free_coherent(&dev->persist->pdev->dev, 74 - chunk->mem[i].length, 75 - lowmem_page_address(sg_page(&chunk->mem[i])), 76 - sg_dma_address(&chunk->mem[i])); 74 + chunk->buf[i].size, 75 + chunk->buf[i].addr, 76 + chunk->buf[i].dma_addr); 77 77 } 78 78 79 79 void mlx4_free_icm(struct mlx4_dev *dev, struct mlx4_icm *icm, int coherent) ··· 111 111 return 0; 112 112 } 113 113 114 - static int mlx4_alloc_icm_coherent(struct device *dev, struct scatterlist *mem, 115 - int order, gfp_t gfp_mask) 114 + static int mlx4_alloc_icm_coherent(struct device *dev, struct mlx4_icm_buf *buf, 115 + int order, gfp_t gfp_mask) 116 116 { 117 - void *buf = dma_alloc_coherent(dev, PAGE_SIZE << order, 118 - &sg_dma_address(mem), gfp_mask); 119 - if (!buf) 117 + buf->addr = dma_alloc_coherent(dev, PAGE_SIZE << order, 118 + &buf->dma_addr, gfp_mask); 119 + if (!buf->addr) 120 120 return -ENOMEM; 121 121 122 - if (offset_in_page(buf)) { 123 - dma_free_coherent(dev, PAGE_SIZE << order, 124 - buf, sg_dma_address(mem)); 122 + if (offset_in_page(buf->addr)) { 123 + dma_free_coherent(dev, PAGE_SIZE << order, buf->addr, 124 + buf->dma_addr); 125 125 return -ENOMEM; 126 126 } 127 127 128 - sg_set_buf(mem, buf, PAGE_SIZE << order); 129 - sg_dma_len(mem) = PAGE_SIZE << order; 128 + buf->size = PAGE_SIZE << order; 130 129 return 0; 131 130 } 132 131 ··· 158 159 159 160 while (npages > 0) { 160 161 if (!chunk) { 161 - chunk = kmalloc_node(sizeof(*chunk), 162 + chunk = kzalloc_node(sizeof(*chunk), 162 163 gfp_mask & ~(__GFP_HIGHMEM | 163 164 __GFP_NOWARN), 164 165 dev->numa_node); 165 166 if (!chunk) { 166 - chunk = kmalloc(sizeof(*chunk), 167 + chunk = kzalloc(sizeof(*chunk), 167 168 gfp_mask & ~(__GFP_HIGHMEM | 168 169 __GFP_NOWARN)); 169 170 if (!chunk) 170 171 goto fail; 171 172 } 173 + chunk->coherent = coherent; 172 174 173 - sg_init_table(chunk->mem, MLX4_ICM_CHUNK_LEN); 174 - chunk->npages = 0; 175 - chunk->nsg = 0; 175 + if (!coherent) 176 + sg_init_table(chunk->sg, MLX4_ICM_CHUNK_LEN); 176 177 list_add_tail(&chunk->list, &icm->chunk_list); 177 178 } 178 179 ··· 185 186 186 187 if (coherent) 187 188 ret = mlx4_alloc_icm_coherent(&dev->persist->pdev->dev, 188 - &chunk->mem[chunk->npages], 189 - cur_order, mask); 189 + &chunk->buf[chunk->npages], 190 + cur_order, mask); 190 191 else 191 - ret = mlx4_alloc_icm_pages(&chunk->mem[chunk->npages], 192 + ret = mlx4_alloc_icm_pages(&chunk->sg[chunk->npages], 192 193 cur_order, mask, 193 194 dev->numa_node); 194 195 ··· 204 205 if (coherent) 205 206 ++chunk->nsg; 206 207 else if (chunk->npages == MLX4_ICM_CHUNK_LEN) { 207 - chunk->nsg = pci_map_sg(dev->persist->pdev, chunk->mem, 208 - chunk->npages, 209 - PCI_DMA_BIDIRECTIONAL); 208 + chunk->nsg = dma_map_sg(&dev->persist->pdev->dev, 209 + chunk->sg, chunk->npages, 210 + DMA_BIDIRECTIONAL); 210 211 211 212 if (chunk->nsg <= 0) 212 213 goto fail; ··· 219 220 } 220 221 221 222 if (!coherent && chunk) { 222 - chunk->nsg = pci_map_sg(dev->persist->pdev, chunk->mem, 223 - chunk->npages, 224 - PCI_DMA_BIDIRECTIONAL); 223 + chunk->nsg = dma_map_sg(&dev->persist->pdev->dev, chunk->sg, 224 + chunk->npages, DMA_BIDIRECTIONAL); 225 225 226 226 if (chunk->nsg <= 0) 227 227 goto fail; ··· 318 320 u64 idx; 319 321 struct mlx4_icm_chunk *chunk; 320 322 struct mlx4_icm *icm; 321 - struct page *page = NULL; 323 + void *addr = NULL; 322 324 323 325 if (!table->lowmem) 324 326 return NULL; ··· 334 336 335 337 list_for_each_entry(chunk, &icm->chunk_list, list) { 336 338 for (i = 0; i < chunk->npages; ++i) { 337 - if (dma_handle && dma_offset >= 0) { 338 - if (sg_dma_len(&chunk->mem[i]) > dma_offset) 339 - *dma_handle = sg_dma_address(&chunk->mem[i]) + 340 - dma_offset; 341 - dma_offset -= sg_dma_len(&chunk->mem[i]); 339 + dma_addr_t dma_addr; 340 + size_t len; 341 + 342 + if (table->coherent) { 343 + len = chunk->buf[i].size; 344 + dma_addr = chunk->buf[i].dma_addr; 345 + addr = chunk->buf[i].addr; 346 + } else { 347 + struct page *page; 348 + 349 + len = sg_dma_len(&chunk->sg[i]); 350 + dma_addr = sg_dma_address(&chunk->sg[i]); 351 + 352 + /* XXX: we should never do this for highmem 353 + * allocation. This function either needs 354 + * to be split, or the kernel virtual address 355 + * return needs to be made optional. 356 + */ 357 + page = sg_page(&chunk->sg[i]); 358 + addr = lowmem_page_address(page); 342 359 } 360 + 361 + if (dma_handle && dma_offset >= 0) { 362 + if (len > dma_offset) 363 + *dma_handle = dma_addr + dma_offset; 364 + dma_offset -= len; 365 + } 366 + 343 367 /* 344 368 * DMA mapping can merge pages but not split them, 345 369 * so if we found the page, dma_handle has already 346 370 * been assigned to. 347 371 */ 348 - if (chunk->mem[i].length > offset) { 349 - page = sg_page(&chunk->mem[i]); 372 + if (len > offset) 350 373 goto out; 351 - } 352 - offset -= chunk->mem[i].length; 374 + offset -= len; 353 375 } 354 376 } 355 377 378 + addr = NULL; 356 379 out: 357 380 mutex_unlock(&table->mutex); 358 - return page ? lowmem_page_address(page) + offset : NULL; 381 + return addr ? addr + offset : NULL; 359 382 } 360 383 361 384 int mlx4_table_get_range(struct mlx4_dev *dev, struct mlx4_icm_table *table,
+19 -3
drivers/net/ethernet/mellanox/mlx4/icm.h
··· 47 47 MLX4_ICM_PAGE_SIZE = 1 << MLX4_ICM_PAGE_SHIFT, 48 48 }; 49 49 50 + struct mlx4_icm_buf { 51 + void *addr; 52 + size_t size; 53 + dma_addr_t dma_addr; 54 + }; 55 + 50 56 struct mlx4_icm_chunk { 51 57 struct list_head list; 52 58 int npages; 53 59 int nsg; 54 - struct scatterlist mem[MLX4_ICM_CHUNK_LEN]; 60 + bool coherent; 61 + union { 62 + struct scatterlist sg[MLX4_ICM_CHUNK_LEN]; 63 + struct mlx4_icm_buf buf[MLX4_ICM_CHUNK_LEN]; 64 + }; 55 65 }; 56 66 57 67 struct mlx4_icm { ··· 124 114 125 115 static inline dma_addr_t mlx4_icm_addr(struct mlx4_icm_iter *iter) 126 116 { 127 - return sg_dma_address(&iter->chunk->mem[iter->page_idx]); 117 + if (iter->chunk->coherent) 118 + return iter->chunk->buf[iter->page_idx].dma_addr; 119 + else 120 + return sg_dma_address(&iter->chunk->sg[iter->page_idx]); 128 121 } 129 122 130 123 static inline unsigned long mlx4_icm_size(struct mlx4_icm_iter *iter) 131 124 { 132 - return sg_dma_len(&iter->chunk->mem[iter->page_idx]); 125 + if (iter->chunk->coherent) 126 + return iter->chunk->buf[iter->page_idx].size; 127 + else 128 + return sg_dma_len(&iter->chunk->sg[iter->page_idx]); 133 129 } 134 130 135 131 int mlx4_MAP_ICM_AUX(struct mlx4_dev *dev, struct mlx4_icm *icm);
+2 -2
drivers/net/ethernet/mellanox/mlx5/core/alloc.c
··· 63 63 mutex_lock(&priv->alloc_mutex); 64 64 original_node = dev_to_node(&dev->pdev->dev); 65 65 set_dev_node(&dev->pdev->dev, node); 66 - cpu_handle = dma_zalloc_coherent(&dev->pdev->dev, size, 67 - dma_handle, GFP_KERNEL); 66 + cpu_handle = dma_alloc_coherent(&dev->pdev->dev, size, dma_handle, 67 + GFP_KERNEL); 68 68 set_dev_node(&dev->pdev->dev, original_node); 69 69 mutex_unlock(&priv->alloc_mutex); 70 70 return cpu_handle;
+5 -5
drivers/net/ethernet/mellanox/mlx5/core/cmd.c
··· 1789 1789 { 1790 1790 struct device *ddev = &dev->pdev->dev; 1791 1791 1792 - cmd->cmd_alloc_buf = dma_zalloc_coherent(ddev, MLX5_ADAPTER_PAGE_SIZE, 1793 - &cmd->alloc_dma, GFP_KERNEL); 1792 + cmd->cmd_alloc_buf = dma_alloc_coherent(ddev, MLX5_ADAPTER_PAGE_SIZE, 1793 + &cmd->alloc_dma, GFP_KERNEL); 1794 1794 if (!cmd->cmd_alloc_buf) 1795 1795 return -ENOMEM; 1796 1796 ··· 1804 1804 1805 1805 dma_free_coherent(ddev, MLX5_ADAPTER_PAGE_SIZE, cmd->cmd_alloc_buf, 1806 1806 cmd->alloc_dma); 1807 - cmd->cmd_alloc_buf = dma_zalloc_coherent(ddev, 1808 - 2 * MLX5_ADAPTER_PAGE_SIZE - 1, 1809 - &cmd->alloc_dma, GFP_KERNEL); 1807 + cmd->cmd_alloc_buf = dma_alloc_coherent(ddev, 1808 + 2 * MLX5_ADAPTER_PAGE_SIZE - 1, 1809 + &cmd->alloc_dma, GFP_KERNEL); 1810 1810 if (!cmd->cmd_alloc_buf) 1811 1811 return -ENOMEM; 1812 1812
+4 -1
drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
··· 844 844 ethtool_link_ksettings_add_link_mode(link_ksettings, supported, 845 845 Autoneg); 846 846 847 - if (get_fec_supported_advertised(mdev, link_ksettings)) 847 + err = get_fec_supported_advertised(mdev, link_ksettings); 848 + if (err) { 848 849 netdev_dbg(priv->netdev, "%s: FEC caps query failed: %d\n", 849 850 __func__, err); 851 + err = 0; /* don't fail caps query because of FEC error */ 852 + } 850 853 851 854 if (!an_disable_admin) 852 855 ethtool_link_ksettings_add_link_mode(link_ksettings,
+17 -13
drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
··· 58 58 struct list_head list; 59 59 }; 60 60 61 - static void mlx5e_rep_indr_unregister_block(struct net_device *netdev); 61 + static void mlx5e_rep_indr_unregister_block(struct mlx5e_rep_priv *rpriv, 62 + struct net_device *netdev); 62 63 63 64 static void mlx5e_rep_get_drvinfo(struct net_device *dev, 64 65 struct ethtool_drvinfo *drvinfo) ··· 180 179 181 180 s->tx_packets += sq_stats->packets; 182 181 s->tx_bytes += sq_stats->bytes; 182 + s->tx_queue_dropped += sq_stats->dropped; 183 183 } 184 184 } 185 185 } ··· 665 663 struct list_head *head = &rpriv->uplink_priv.tc_indr_block_priv_list; 666 664 667 665 list_for_each_entry_safe(cb_priv, temp, head, list) { 668 - mlx5e_rep_indr_unregister_block(cb_priv->netdev); 666 + mlx5e_rep_indr_unregister_block(rpriv, cb_priv->netdev); 669 667 kfree(cb_priv); 670 668 } 671 669 } ··· 737 735 738 736 err = tcf_block_cb_register(f->block, 739 737 mlx5e_rep_indr_setup_block_cb, 740 - netdev, indr_priv, f->extack); 738 + indr_priv, indr_priv, f->extack); 741 739 if (err) { 742 740 list_del(&indr_priv->list); 743 741 kfree(indr_priv); ··· 745 743 746 744 return err; 747 745 case TC_BLOCK_UNBIND: 746 + indr_priv = mlx5e_rep_indr_block_priv_lookup(rpriv, netdev); 747 + if (!indr_priv) 748 + return -ENOENT; 749 + 748 750 tcf_block_cb_unregister(f->block, 749 751 mlx5e_rep_indr_setup_block_cb, 750 - netdev); 751 - indr_priv = mlx5e_rep_indr_block_priv_lookup(rpriv, netdev); 752 - if (indr_priv) { 753 - list_del(&indr_priv->list); 754 - kfree(indr_priv); 755 - } 752 + indr_priv); 753 + list_del(&indr_priv->list); 754 + kfree(indr_priv); 756 755 757 756 return 0; 758 757 default: ··· 782 779 783 780 err = __tc_indr_block_cb_register(netdev, rpriv, 784 781 mlx5e_rep_indr_setup_tc_cb, 785 - netdev); 782 + rpriv); 786 783 if (err) { 787 784 struct mlx5e_priv *priv = netdev_priv(rpriv->netdev); 788 785 ··· 792 789 return err; 793 790 } 794 791 795 - static void mlx5e_rep_indr_unregister_block(struct net_device *netdev) 792 + static void mlx5e_rep_indr_unregister_block(struct mlx5e_rep_priv *rpriv, 793 + struct net_device *netdev) 796 794 { 797 795 __tc_indr_block_cb_unregister(netdev, mlx5e_rep_indr_setup_tc_cb, 798 - netdev); 796 + rpriv); 799 797 } 800 798 801 799 static int mlx5e_nic_rep_netdevice_event(struct notifier_block *nb, ··· 815 811 mlx5e_rep_indr_register_block(rpriv, netdev); 816 812 break; 817 813 case NETDEV_UNREGISTER: 818 - mlx5e_rep_indr_unregister_block(netdev); 814 + mlx5e_rep_indr_unregister_block(rpriv, netdev); 819 815 break; 820 816 } 821 817 return NOTIFY_OK;
+13
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
··· 732 732 ((struct ipv6hdr *)ip_p)->nexthdr; 733 733 } 734 734 735 + #define short_frame(size) ((size) <= ETH_ZLEN + ETH_FCS_LEN) 736 + 735 737 static inline void mlx5e_handle_csum(struct net_device *netdev, 736 738 struct mlx5_cqe64 *cqe, 737 739 struct mlx5e_rq *rq, ··· 754 752 } 755 753 756 754 if (unlikely(test_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &rq->state))) 755 + goto csum_unnecessary; 756 + 757 + /* CQE csum doesn't cover padding octets in short ethernet 758 + * frames. And the pad field is appended prior to calculating 759 + * and appending the FCS field. 760 + * 761 + * Detecting these padded frames requires to verify and parse 762 + * IP headers, so we simply force all those small frames to be 763 + * CHECKSUM_UNNECESSARY even if they are not padded. 764 + */ 765 + if (short_frame(skb->len)) 757 766 goto csum_unnecessary; 758 767 759 768 if (likely(is_last_ethertype_ip(skb, &network_depth, &proto))) {
+1
drivers/net/ethernet/mellanox/mlxsw/Kconfig
··· 78 78 depends on IPV6 || IPV6=n 79 79 depends on NET_IPGRE || NET_IPGRE=n 80 80 depends on IPV6_GRE || IPV6_GRE=n 81 + depends on VXLAN || VXLAN=n 81 82 select GENERIC_ALLOCATOR 82 83 select PARMAN 83 84 select OBJAGG
+9 -7
drivers/net/ethernet/mellanox/mlxsw/pci.c
··· 604 604 u16 wqe_counter = mlxsw_pci_cqe_wqe_counter_get(cqe); 605 605 u8 sendq = mlxsw_pci_cqe_sr_get(q->u.cq.v, cqe); 606 606 u8 dqn = mlxsw_pci_cqe_dqn_get(q->u.cq.v, cqe); 607 + char ncqe[MLXSW_PCI_CQE_SIZE_MAX]; 608 + 609 + memcpy(ncqe, cqe, q->elem_size); 610 + mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q); 607 611 608 612 if (sendq) { 609 613 struct mlxsw_pci_queue *sdq; 610 614 611 615 sdq = mlxsw_pci_sdq_get(mlxsw_pci, dqn); 612 616 mlxsw_pci_cqe_sdq_handle(mlxsw_pci, sdq, 613 - wqe_counter, cqe); 617 + wqe_counter, ncqe); 614 618 q->u.cq.comp_sdq_count++; 615 619 } else { 616 620 struct mlxsw_pci_queue *rdq; 617 621 618 622 rdq = mlxsw_pci_rdq_get(mlxsw_pci, dqn); 619 623 mlxsw_pci_cqe_rdq_handle(mlxsw_pci, rdq, 620 - wqe_counter, q->u.cq.v, cqe); 624 + wqe_counter, q->u.cq.v, ncqe); 621 625 q->u.cq.comp_rdq_count++; 622 626 } 623 627 if (++items == credits) 624 628 break; 625 629 } 626 - if (items) { 627 - mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q); 630 + if (items) 628 631 mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q); 629 - } 630 632 } 631 633 632 634 static u16 mlxsw_pci_cq_elem_count(const struct mlxsw_pci_queue *q) ··· 1367 1365 u32 val = mlxsw_pci_read32(mlxsw_pci, FW_READY); 1368 1366 1369 1367 if ((val & MLXSW_PCI_FW_READY_MASK) == MLXSW_PCI_FW_READY_MAGIC) 1370 - break; 1368 + return 0; 1371 1369 cond_resched(); 1372 1370 } while (time_before(jiffies, end)); 1373 - return 0; 1371 + return -EBUSY; 1374 1372 } 1375 1373 1376 1374 static int mlxsw_pci_alloc_irq_vectors(struct mlxsw_pci *mlxsw_pci)
+2 -1
drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
··· 27 27 28 28 #define MLXSW_PCI_SW_RESET 0xF0010 29 29 #define MLXSW_PCI_SW_RESET_RST_BIT BIT(0) 30 - #define MLXSW_PCI_SW_RESET_TIMEOUT_MSECS 5000 30 + #define MLXSW_PCI_SW_RESET_TIMEOUT_MSECS 13000 31 31 #define MLXSW_PCI_SW_RESET_WAIT_MSECS 100 32 32 #define MLXSW_PCI_FW_READY 0xA1844 33 33 #define MLXSW_PCI_FW_READY_MASK 0xFFFF ··· 53 53 #define MLXSW_PCI_WQE_SIZE 32 /* 32 bytes per element */ 54 54 #define MLXSW_PCI_CQE01_SIZE 16 /* 16 bytes per element */ 55 55 #define MLXSW_PCI_CQE2_SIZE 32 /* 32 bytes per element */ 56 + #define MLXSW_PCI_CQE_SIZE_MAX MLXSW_PCI_CQE2_SIZE 56 57 #define MLXSW_PCI_EQE_SIZE 16 /* 16 bytes per element */ 57 58 #define MLXSW_PCI_WQE_COUNT (MLXSW_PCI_AQ_SIZE / MLXSW_PCI_WQE_SIZE) 58 59 #define MLXSW_PCI_CQE01_COUNT (MLXSW_PCI_AQ_SIZE / MLXSW_PCI_CQE01_SIZE)
+5 -2
drivers/net/ethernet/mellanox/mlxsw/spectrum.c
··· 5005 5005 lower_dev, 5006 5006 upper_dev); 5007 5007 } else if (netif_is_lag_master(upper_dev)) { 5008 - if (info->linking) 5008 + if (info->linking) { 5009 5009 err = mlxsw_sp_port_lag_join(mlxsw_sp_port, 5010 5010 upper_dev); 5011 - else 5011 + } else { 5012 + mlxsw_sp_port_lag_tx_en_set(mlxsw_sp_port, 5013 + false); 5012 5014 mlxsw_sp_port_lag_leave(mlxsw_sp_port, 5013 5015 upper_dev); 5016 + } 5014 5017 } else if (netif_is_ovs_master(upper_dev)) { 5015 5018 if (info->linking) 5016 5019 err = mlxsw_sp_port_ovs_join(mlxsw_sp_port);
+9 -1
drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_ctcam.c
··· 72 72 act_set = mlxsw_afa_block_first_set(rulei->act_block); 73 73 mlxsw_reg_ptce2_flex_action_set_memcpy_to(ptce2_pl, act_set); 74 74 75 - return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptce2), ptce2_pl); 75 + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptce2), ptce2_pl); 76 + if (err) 77 + goto err_ptce2_write; 78 + 79 + return 0; 80 + 81 + err_ptce2_write: 82 + cregion->ops->entry_remove(cregion, centry); 83 + return err; 76 84 } 77 85 78 86 static void
-2
drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c
··· 1022 1022 { 1023 1023 struct objagg_obj *objagg_obj = (struct objagg_obj *) erp_mask; 1024 1024 1025 - ASSERT_RTNL(); 1026 1025 objagg_obj_put(aregion->erp_table->objagg, objagg_obj); 1027 1026 } 1028 1027 ··· 1053 1054 const struct mlxsw_sp_acl_erp *erp = objagg_obj_root_priv(objagg_obj); 1054 1055 unsigned int erp_bank; 1055 1056 1056 - ASSERT_RTNL(); 1057 1057 if (!mlxsw_sp_acl_erp_table_is_used(erp->erp_table)) 1058 1058 return; 1059 1059
+2 -2
drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c
··· 997 997 static const struct mlxsw_sp_fid_family mlxsw_sp_fid_dummy_family = { 998 998 .type = MLXSW_SP_FID_TYPE_DUMMY, 999 999 .fid_size = sizeof(struct mlxsw_sp_fid), 1000 - .start_index = MLXSW_SP_RFID_BASE - 1, 1001 - .end_index = MLXSW_SP_RFID_BASE - 1, 1000 + .start_index = VLAN_N_VID - 1, 1001 + .end_index = VLAN_N_VID - 1, 1002 1002 .ops = &mlxsw_sp_fid_dummy_ops, 1003 1003 }; 1004 1004
+2 -2
drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c
··· 816 816 ops = nve->nve_ops_arr[params->type]; 817 817 818 818 if (!ops->can_offload(nve, params->dev, extack)) 819 - return -EOPNOTSUPP; 819 + return -EINVAL; 820 820 821 821 memset(&config, 0, sizeof(config)); 822 822 ops->nve_config(nve, params->dev, &config); 823 823 if (nve->num_nve_tunnels && 824 824 memcmp(&config, &nve->config, sizeof(config))) { 825 825 NL_SET_ERR_MSG_MOD(extack, "Conflicting NVE tunnels configuration"); 826 - return -EOPNOTSUPP; 826 + return -EINVAL; 827 827 } 828 828 829 829 err = mlxsw_sp_nve_tunnel_init(mlxsw_sp, &config);
+16 -19
drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
··· 1078 1078 mlxsw_sp_bridge_port_vlan_add(struct mlxsw_sp_port *mlxsw_sp_port, 1079 1079 struct mlxsw_sp_bridge_port *bridge_port, 1080 1080 u16 vid, bool is_untagged, bool is_pvid, 1081 - struct netlink_ext_ack *extack, 1082 - struct switchdev_trans *trans) 1081 + struct netlink_ext_ack *extack) 1083 1082 { 1084 1083 u16 pvid = mlxsw_sp_port_pvid_determine(mlxsw_sp_port, vid, is_pvid); 1085 1084 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; ··· 1093 1094 if (mlxsw_sp_port_vlan && 1094 1095 mlxsw_sp_port_vlan->bridge_port != bridge_port) 1095 1096 return -EEXIST; 1096 - 1097 - if (switchdev_trans_ph_prepare(trans)) 1098 - return 0; 1099 1097 1100 1098 if (!mlxsw_sp_port_vlan) { 1101 1099 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_create(mlxsw_sp_port, ··· 1184 1188 return err; 1185 1189 } 1186 1190 1191 + if (switchdev_trans_ph_commit(trans)) 1192 + return 0; 1193 + 1187 1194 bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev); 1188 1195 if (WARN_ON(!bridge_port)) 1189 1196 return -EINVAL; ··· 1199 1200 1200 1201 err = mlxsw_sp_bridge_port_vlan_add(mlxsw_sp_port, bridge_port, 1201 1202 vid, flag_untagged, 1202 - flag_pvid, extack, trans); 1203 + flag_pvid, extack); 1203 1204 if (err) 1204 1205 return err; 1205 1206 } ··· 1233 1234 static enum mlxsw_reg_sfd_rec_policy mlxsw_sp_sfd_rec_policy(bool dynamic) 1234 1235 { 1235 1236 return dynamic ? MLXSW_REG_SFD_REC_POLICY_DYNAMIC_ENTRY_INGRESS : 1236 - MLXSW_REG_SFD_REC_POLICY_STATIC_ENTRY; 1237 + MLXSW_REG_SFD_REC_POLICY_DYNAMIC_ENTRY_MLAG; 1237 1238 } 1238 1239 1239 1240 static enum mlxsw_reg_sfd_op mlxsw_sp_sfd_op(bool adding) ··· 1290 1291 static int __mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port, 1291 1292 const char *mac, u16 fid, bool adding, 1292 1293 enum mlxsw_reg_sfd_rec_action action, 1293 - bool dynamic) 1294 + enum mlxsw_reg_sfd_rec_policy policy) 1294 1295 { 1295 1296 char *sfd_pl; 1296 1297 u8 num_rec; ··· 1301 1302 return -ENOMEM; 1302 1303 1303 1304 mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0); 1304 - mlxsw_reg_sfd_uc_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic), 1305 - mac, fid, action, local_port); 1305 + mlxsw_reg_sfd_uc_pack(sfd_pl, 0, policy, mac, fid, action, local_port); 1306 1306 num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl); 1307 1307 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl); 1308 1308 if (err) ··· 1320 1322 bool dynamic) 1321 1323 { 1322 1324 return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid, adding, 1323 - MLXSW_REG_SFD_REC_ACTION_NOP, dynamic); 1325 + MLXSW_REG_SFD_REC_ACTION_NOP, 1326 + mlxsw_sp_sfd_rec_policy(dynamic)); 1324 1327 } 1325 1328 1326 1329 int mlxsw_sp_rif_fdb_op(struct mlxsw_sp *mlxsw_sp, const char *mac, u16 fid, ··· 1329 1330 { 1330 1331 return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, 0, mac, fid, adding, 1331 1332 MLXSW_REG_SFD_REC_ACTION_FORWARD_IP_ROUTER, 1332 - false); 1333 + MLXSW_REG_SFD_REC_POLICY_STATIC_ENTRY); 1333 1334 } 1334 1335 1335 1336 static int mlxsw_sp_port_fdb_uc_lag_op(struct mlxsw_sp *mlxsw_sp, u16 lag_id, ··· 1807 1808 mlxsw_sp_bridge_port_vlan_del(struct mlxsw_sp_port *mlxsw_sp_port, 1808 1809 struct mlxsw_sp_bridge_port *bridge_port, u16 vid) 1809 1810 { 1810 - u16 pvid = mlxsw_sp_port->pvid == vid ? 0 : vid; 1811 + u16 pvid = mlxsw_sp_port->pvid == vid ? 0 : mlxsw_sp_port->pvid; 1811 1812 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 1812 1813 1813 1814 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid); ··· 3206 3207 struct mlxsw_sp_bridge_device *bridge_device, 3207 3208 const struct net_device *vxlan_dev, u16 vid, 3208 3209 bool flag_untagged, bool flag_pvid, 3209 - struct switchdev_trans *trans, 3210 3210 struct netlink_ext_ack *extack) 3211 3211 { 3212 3212 struct vxlan_dev *vxlan = netdev_priv(vxlan_dev); ··· 3222 3224 if (flag_untagged && flag_pvid && 3223 3225 mlxsw_sp_bridge_8021q_vxlan_dev_find(bridge_device->dev, vid)) 3224 3226 return -EINVAL; 3225 - 3226 - if (switchdev_trans_ph_prepare(trans)) 3227 - return 0; 3228 3227 3229 3228 if (!netif_running(vxlan_dev)) 3230 3229 return 0; ··· 3340 3345 3341 3346 port_obj_info->handled = true; 3342 3347 3348 + if (switchdev_trans_ph_commit(trans)) 3349 + return 0; 3350 + 3343 3351 bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev); 3344 3352 if (!bridge_device) 3345 3353 return -EINVAL; ··· 3356 3358 err = mlxsw_sp_switchdev_vxlan_vlan_add(mlxsw_sp, bridge_device, 3357 3359 vxlan_dev, vid, 3358 3360 flag_untagged, 3359 - flag_pvid, trans, 3360 - extack); 3361 + flag_pvid, extack); 3361 3362 if (err) 3362 3363 return err; 3363 3364 }
+4 -7
drivers/net/ethernet/microchip/lan743x_main.c
··· 962 962 963 963 memset(&ksettings, 0, sizeof(ksettings)); 964 964 phy_ethtool_get_link_ksettings(netdev, &ksettings); 965 - local_advertisement = phy_read(phydev, MII_ADVERTISE); 966 - if (local_advertisement < 0) 967 - return; 968 - 969 - remote_advertisement = phy_read(phydev, MII_LPA); 970 - if (remote_advertisement < 0) 971 - return; 965 + local_advertisement = 966 + linkmode_adv_to_mii_adv_t(phydev->advertising); 967 + remote_advertisement = 968 + linkmode_adv_to_mii_adv_t(phydev->lp_advertising); 972 969 973 970 lan743x_phy_update_flowcontrol(adapter, 974 971 ksettings.base.duplex,
+3 -3
drivers/net/ethernet/myricom/myri10ge/myri10ge.c
··· 3604 3604 for (i = 0; i < mgp->num_slices; i++) { 3605 3605 ss = &mgp->ss[i]; 3606 3606 bytes = mgp->max_intr_slots * sizeof(*ss->rx_done.entry); 3607 - ss->rx_done.entry = dma_zalloc_coherent(&pdev->dev, bytes, 3608 - &ss->rx_done.bus, 3609 - GFP_KERNEL); 3607 + ss->rx_done.entry = dma_alloc_coherent(&pdev->dev, bytes, 3608 + &ss->rx_done.bus, 3609 + GFP_KERNEL); 3610 3610 if (ss->rx_done.entry == NULL) 3611 3611 goto abort; 3612 3612 bytes = sizeof(*ss->fw_stats);
+6 -6
drivers/net/ethernet/netronome/nfp/nfp_net_common.c
··· 2170 2170 tx_ring->cnt = dp->txd_cnt; 2171 2171 2172 2172 tx_ring->size = array_size(tx_ring->cnt, sizeof(*tx_ring->txds)); 2173 - tx_ring->txds = dma_zalloc_coherent(dp->dev, tx_ring->size, 2174 - &tx_ring->dma, 2175 - GFP_KERNEL | __GFP_NOWARN); 2173 + tx_ring->txds = dma_alloc_coherent(dp->dev, tx_ring->size, 2174 + &tx_ring->dma, 2175 + GFP_KERNEL | __GFP_NOWARN); 2176 2176 if (!tx_ring->txds) { 2177 2177 netdev_warn(dp->netdev, "failed to allocate TX descriptor ring memory, requested descriptor count: %d, consider lowering descriptor count\n", 2178 2178 tx_ring->cnt); ··· 2328 2328 2329 2329 rx_ring->cnt = dp->rxd_cnt; 2330 2330 rx_ring->size = array_size(rx_ring->cnt, sizeof(*rx_ring->rxds)); 2331 - rx_ring->rxds = dma_zalloc_coherent(dp->dev, rx_ring->size, 2332 - &rx_ring->dma, 2333 - GFP_KERNEL | __GFP_NOWARN); 2331 + rx_ring->rxds = dma_alloc_coherent(dp->dev, rx_ring->size, 2332 + &rx_ring->dma, 2333 + GFP_KERNEL | __GFP_NOWARN); 2334 2334 if (!rx_ring->rxds) { 2335 2335 netdev_warn(dp->netdev, "failed to allocate RX descriptor ring memory, requested descriptor count: %d, consider lowering descriptor count\n", 2336 2336 rx_ring->cnt);
+6 -6
drivers/net/ethernet/ni/nixge.c
··· 287 287 priv->rx_bd_ci = 0; 288 288 289 289 /* Allocate the Tx and Rx buffer descriptors. */ 290 - priv->tx_bd_v = dma_zalloc_coherent(ndev->dev.parent, 291 - sizeof(*priv->tx_bd_v) * TX_BD_NUM, 292 - &priv->tx_bd_p, GFP_KERNEL); 290 + priv->tx_bd_v = dma_alloc_coherent(ndev->dev.parent, 291 + sizeof(*priv->tx_bd_v) * TX_BD_NUM, 292 + &priv->tx_bd_p, GFP_KERNEL); 293 293 if (!priv->tx_bd_v) 294 294 goto out; 295 295 ··· 299 299 if (!priv->tx_skb) 300 300 goto out; 301 301 302 - priv->rx_bd_v = dma_zalloc_coherent(ndev->dev.parent, 303 - sizeof(*priv->rx_bd_v) * RX_BD_NUM, 304 - &priv->rx_bd_p, GFP_KERNEL); 302 + priv->rx_bd_v = dma_alloc_coherent(ndev->dev.parent, 303 + sizeof(*priv->rx_bd_v) * RX_BD_NUM, 304 + &priv->rx_bd_p, GFP_KERNEL); 305 305 if (!priv->rx_bd_v) 306 306 goto out; 307 307
+6 -6
drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
··· 1440 1440 1441 1441 size = rx_ring->count * bufsz + PCH_GBE_RESERVE_MEMORY; 1442 1442 rx_ring->rx_buff_pool = 1443 - dma_zalloc_coherent(&pdev->dev, size, 1444 - &rx_ring->rx_buff_pool_logic, GFP_KERNEL); 1443 + dma_alloc_coherent(&pdev->dev, size, 1444 + &rx_ring->rx_buff_pool_logic, GFP_KERNEL); 1445 1445 if (!rx_ring->rx_buff_pool) 1446 1446 return -ENOMEM; 1447 1447 ··· 1755 1755 1756 1756 tx_ring->size = tx_ring->count * (int)sizeof(struct pch_gbe_tx_desc); 1757 1757 1758 - tx_ring->desc = dma_zalloc_coherent(&pdev->dev, tx_ring->size, 1759 - &tx_ring->dma, GFP_KERNEL); 1758 + tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size, 1759 + &tx_ring->dma, GFP_KERNEL); 1760 1760 if (!tx_ring->desc) { 1761 1761 vfree(tx_ring->buffer_info); 1762 1762 return -ENOMEM; ··· 1798 1798 return -ENOMEM; 1799 1799 1800 1800 rx_ring->size = rx_ring->count * (int)sizeof(struct pch_gbe_rx_desc); 1801 - rx_ring->desc = dma_zalloc_coherent(&pdev->dev, rx_ring->size, 1802 - &rx_ring->dma, GFP_KERNEL); 1801 + rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size, 1802 + &rx_ring->dma, GFP_KERNEL); 1803 1803 if (!rx_ring->desc) { 1804 1804 vfree(rx_ring->buffer_info); 1805 1805 return -ENOMEM;
+3 -3
drivers/net/ethernet/pasemi/pasemi_mac.c
··· 401 401 if (pasemi_dma_alloc_ring(&ring->chan, RX_RING_SIZE)) 402 402 goto out_ring_desc; 403 403 404 - ring->buffers = dma_zalloc_coherent(&mac->dma_pdev->dev, 405 - RX_RING_SIZE * sizeof(u64), 406 - &ring->buf_dma, GFP_KERNEL); 404 + ring->buffers = dma_alloc_coherent(&mac->dma_pdev->dev, 405 + RX_RING_SIZE * sizeof(u64), 406 + &ring->buf_dma, GFP_KERNEL); 407 407 if (!ring->buffers) 408 408 goto out_ring_desc; 409 409
+8 -8
drivers/net/ethernet/qlogic/qed/qed_cxt.c
··· 936 936 u32 size = min_t(u32, total_size, psz); 937 937 void **p_virt = &p_mngr->t2[i].p_virt; 938 938 939 - *p_virt = dma_zalloc_coherent(&p_hwfn->cdev->pdev->dev, 940 - size, &p_mngr->t2[i].p_phys, 941 - GFP_KERNEL); 939 + *p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, size, 940 + &p_mngr->t2[i].p_phys, 941 + GFP_KERNEL); 942 942 if (!p_mngr->t2[i].p_virt) { 943 943 rc = -ENOMEM; 944 944 goto t2_fail; ··· 1054 1054 u32 size; 1055 1055 1056 1056 size = min_t(u32, sz_left, p_blk->real_size_in_page); 1057 - p_virt = dma_zalloc_coherent(&p_hwfn->cdev->pdev->dev, size, 1058 - &p_phys, GFP_KERNEL); 1057 + p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, size, 1058 + &p_phys, GFP_KERNEL); 1059 1059 if (!p_virt) 1060 1060 return -ENOMEM; 1061 1061 ··· 2306 2306 goto out0; 2307 2307 } 2308 2308 2309 - p_virt = dma_zalloc_coherent(&p_hwfn->cdev->pdev->dev, 2310 - p_blk->real_size_in_page, &p_phys, 2311 - GFP_KERNEL); 2309 + p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, 2310 + p_blk->real_size_in_page, &p_phys, 2311 + GFP_KERNEL); 2312 2312 if (!p_virt) { 2313 2313 rc = -ENOMEM; 2314 2314 goto out1;
+4
drivers/net/ethernet/qlogic/qed/qed_ll2.c
··· 1619 1619 cq_prod = qed_chain_get_prod_idx(&p_rx->rcq_chain); 1620 1620 rx_prod.bd_prod = cpu_to_le16(bd_prod); 1621 1621 rx_prod.cqe_prod = cpu_to_le16(cq_prod); 1622 + 1623 + /* Make sure chain element is updated before ringing the doorbell */ 1624 + dma_wmb(); 1625 + 1622 1626 DIRECT_REG_WR(p_rx->set_prod_addr, *((u32 *)&rx_prod)); 1623 1627 } 1624 1628
+14 -14
drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
··· 434 434 *(tx_ring->hw_consumer) = 0; 435 435 436 436 rq_size = SIZEOF_HOSTRQ_TX(struct qlcnic_hostrq_tx_ctx); 437 - rq_addr = dma_zalloc_coherent(&adapter->pdev->dev, rq_size, 438 - &rq_phys_addr, GFP_KERNEL); 437 + rq_addr = dma_alloc_coherent(&adapter->pdev->dev, rq_size, 438 + &rq_phys_addr, GFP_KERNEL); 439 439 if (!rq_addr) 440 440 return -ENOMEM; 441 441 442 442 rsp_size = SIZEOF_CARDRSP_TX(struct qlcnic_cardrsp_tx_ctx); 443 - rsp_addr = dma_zalloc_coherent(&adapter->pdev->dev, rsp_size, 444 - &rsp_phys_addr, GFP_KERNEL); 443 + rsp_addr = dma_alloc_coherent(&adapter->pdev->dev, rsp_size, 444 + &rsp_phys_addr, GFP_KERNEL); 445 445 if (!rsp_addr) { 446 446 err = -ENOMEM; 447 447 goto out_free_rq; ··· 855 855 struct qlcnic_cmd_args cmd; 856 856 size_t nic_size = sizeof(struct qlcnic_info_le); 857 857 858 - nic_info_addr = dma_zalloc_coherent(&adapter->pdev->dev, nic_size, 859 - &nic_dma_t, GFP_KERNEL); 858 + nic_info_addr = dma_alloc_coherent(&adapter->pdev->dev, nic_size, 859 + &nic_dma_t, GFP_KERNEL); 860 860 if (!nic_info_addr) 861 861 return -ENOMEM; 862 862 ··· 909 909 if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC) 910 910 return err; 911 911 912 - nic_info_addr = dma_zalloc_coherent(&adapter->pdev->dev, nic_size, 913 - &nic_dma_t, GFP_KERNEL); 912 + nic_info_addr = dma_alloc_coherent(&adapter->pdev->dev, nic_size, 913 + &nic_dma_t, GFP_KERNEL); 914 914 if (!nic_info_addr) 915 915 return -ENOMEM; 916 916 ··· 964 964 void *pci_info_addr; 965 965 int err = 0, i; 966 966 967 - pci_info_addr = dma_zalloc_coherent(&adapter->pdev->dev, pci_size, 968 - &pci_info_dma_t, GFP_KERNEL); 967 + pci_info_addr = dma_alloc_coherent(&adapter->pdev->dev, pci_size, 968 + &pci_info_dma_t, GFP_KERNEL); 969 969 if (!pci_info_addr) 970 970 return -ENOMEM; 971 971 ··· 1078 1078 return -EIO; 1079 1079 } 1080 1080 1081 - stats_addr = dma_zalloc_coherent(&adapter->pdev->dev, stats_size, 1082 - &stats_dma_t, GFP_KERNEL); 1081 + stats_addr = dma_alloc_coherent(&adapter->pdev->dev, stats_size, 1082 + &stats_dma_t, GFP_KERNEL); 1083 1083 if (!stats_addr) 1084 1084 return -ENOMEM; 1085 1085 ··· 1134 1134 if (mac_stats == NULL) 1135 1135 return -ENOMEM; 1136 1136 1137 - stats_addr = dma_zalloc_coherent(&adapter->pdev->dev, stats_size, 1138 - &stats_dma_t, GFP_KERNEL); 1137 + stats_addr = dma_alloc_coherent(&adapter->pdev->dev, stats_size, 1138 + &stats_dma_t, GFP_KERNEL); 1139 1139 if (!stats_addr) 1140 1140 return -ENOMEM; 1141 1141
+1 -1
drivers/net/ethernet/qualcomm/emac/emac-mac.c
··· 776 776 8 + 2 * 8; /* 8 byte per one Tx and two Rx rings */ 777 777 778 778 ring_header->used = 0; 779 - ring_header->v_addr = dma_zalloc_coherent(dev, ring_header->size, 779 + ring_header->v_addr = dma_alloc_coherent(dev, ring_header->size, 780 780 &ring_header->dma_addr, 781 781 GFP_KERNEL); 782 782 if (!ring_header->v_addr)
+7 -2
drivers/net/ethernet/realtek/r8169.c
··· 205 205 }; 206 206 207 207 static const struct pci_device_id rtl8169_pci_tbl[] = { 208 + { PCI_VDEVICE(REALTEK, 0x2502), RTL_CFG_1 }, 209 + { PCI_VDEVICE(REALTEK, 0x2600), RTL_CFG_1 }, 208 210 { PCI_VDEVICE(REALTEK, 0x8129), RTL_CFG_0 }, 209 211 { PCI_VDEVICE(REALTEK, 0x8136), RTL_CFG_2 }, 210 212 { PCI_VDEVICE(REALTEK, 0x8161), RTL_CFG_1 }, ··· 708 706 MODULE_PARM_DESC(use_dac, "Enable PCI DAC. Unsafe on 32 bit PCI slot."); 709 707 module_param_named(debug, debug.msg_enable, int, 0); 710 708 MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 16=all)"); 709 + MODULE_SOFTDEP("pre: realtek"); 711 710 MODULE_LICENSE("GPL"); 712 711 MODULE_FIRMWARE(FIRMWARE_8168D_1); 713 712 MODULE_FIRMWARE(FIRMWARE_8168D_2); ··· 1682 1679 1683 1680 static bool rtl8169_update_counters(struct rtl8169_private *tp) 1684 1681 { 1682 + u8 val = RTL_R8(tp, ChipCmd); 1683 + 1685 1684 /* 1686 1685 * Some chips are unable to dump tally counters when the receiver 1687 - * is disabled. 1686 + * is disabled. If 0xff chip may be in a PCI power-save state. 1688 1687 */ 1689 - if ((RTL_R8(tp, ChipCmd) & CmdRxEnb) == 0) 1688 + if (!(val & CmdRxEnb) || val == 0xff) 1690 1689 return true; 1691 1690 1692 1691 return rtl8169_do_counters(tp, CounterDump);
+6 -6
drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
··· 400 400 } 401 401 402 402 /* allocate memory for TX descriptors */ 403 - tx_ring->dma_tx = dma_zalloc_coherent(dev, 404 - tx_rsize * sizeof(struct sxgbe_tx_norm_desc), 405 - &tx_ring->dma_tx_phy, GFP_KERNEL); 403 + tx_ring->dma_tx = dma_alloc_coherent(dev, 404 + tx_rsize * sizeof(struct sxgbe_tx_norm_desc), 405 + &tx_ring->dma_tx_phy, GFP_KERNEL); 406 406 if (!tx_ring->dma_tx) 407 407 return -ENOMEM; 408 408 ··· 479 479 rx_ring->queue_no = queue_no; 480 480 481 481 /* allocate memory for RX descriptors */ 482 - rx_ring->dma_rx = dma_zalloc_coherent(priv->device, 483 - rx_rsize * sizeof(struct sxgbe_rx_norm_desc), 484 - &rx_ring->dma_rx_phy, GFP_KERNEL); 482 + rx_ring->dma_rx = dma_alloc_coherent(priv->device, 483 + rx_rsize * sizeof(struct sxgbe_rx_norm_desc), 484 + &rx_ring->dma_rx_phy, GFP_KERNEL); 485 485 486 486 if (rx_ring->dma_rx == NULL) 487 487 return -ENOMEM;
+2 -2
drivers/net/ethernet/sfc/falcon/nic.c
··· 33 33 int ef4_nic_alloc_buffer(struct ef4_nic *efx, struct ef4_buffer *buffer, 34 34 unsigned int len, gfp_t gfp_flags) 35 35 { 36 - buffer->addr = dma_zalloc_coherent(&efx->pci_dev->dev, len, 37 - &buffer->dma_addr, gfp_flags); 36 + buffer->addr = dma_alloc_coherent(&efx->pci_dev->dev, len, 37 + &buffer->dma_addr, gfp_flags); 38 38 if (!buffer->addr) 39 39 return -ENOMEM; 40 40 buffer->len = len;
+2 -2
drivers/net/ethernet/sfc/nic.c
··· 34 34 int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer, 35 35 unsigned int len, gfp_t gfp_flags) 36 36 { 37 - buffer->addr = dma_zalloc_coherent(&efx->pci_dev->dev, len, 38 - &buffer->dma_addr, gfp_flags); 37 + buffer->addr = dma_alloc_coherent(&efx->pci_dev->dev, len, 38 + &buffer->dma_addr, gfp_flags); 39 39 if (!buffer->addr) 40 40 return -ENOMEM; 41 41 buffer->len = len;
+2 -2
drivers/net/ethernet/sgi/meth.c
··· 211 211 static int meth_init_tx_ring(struct meth_private *priv) 212 212 { 213 213 /* Init TX ring */ 214 - priv->tx_ring = dma_zalloc_coherent(NULL, TX_RING_BUFFER_SIZE, 215 - &priv->tx_ring_dma, GFP_ATOMIC); 214 + priv->tx_ring = dma_alloc_coherent(NULL, TX_RING_BUFFER_SIZE, 215 + &priv->tx_ring_dma, GFP_ATOMIC); 216 216 if (!priv->tx_ring) 217 217 return -ENOMEM; 218 218
+2 -2
drivers/net/ethernet/socionext/netsec.c
··· 1029 1029 struct netsec_desc_ring *dring = &priv->desc_ring[id]; 1030 1030 int i; 1031 1031 1032 - dring->vaddr = dma_zalloc_coherent(priv->dev, DESC_SZ * DESC_NUM, 1033 - &dring->desc_dma, GFP_KERNEL); 1032 + dring->vaddr = dma_alloc_coherent(priv->dev, DESC_SZ * DESC_NUM, 1033 + &dring->desc_dma, GFP_KERNEL); 1034 1034 if (!dring->vaddr) 1035 1035 goto err; 1036 1036
+3 -3
drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
··· 263 263 struct stmmac_extra_stats *x, u32 chan) 264 264 { 265 265 u32 intr_status = readl(ioaddr + XGMAC_DMA_CH_STATUS(chan)); 266 + u32 intr_en = readl(ioaddr + XGMAC_DMA_CH_INT_EN(chan)); 266 267 int ret = 0; 267 268 268 269 /* ABNORMAL interrupts */ ··· 283 282 x->normal_irq_n++; 284 283 285 284 if (likely(intr_status & XGMAC_RI)) { 286 - u32 value = readl(ioaddr + XGMAC_DMA_CH_INT_EN(chan)); 287 - if (likely(value & XGMAC_RIE)) { 285 + if (likely(intr_en & XGMAC_RIE)) { 288 286 x->rx_normal_irq_n++; 289 287 ret |= handle_rx; 290 288 } ··· 295 295 } 296 296 297 297 /* Clear interrupts */ 298 - writel(~0x0, ioaddr + XGMAC_DMA_CH_STATUS(chan)); 298 + writel(intr_en & intr_status, ioaddr + XGMAC_DMA_CH_STATUS(chan)); 299 299 300 300 return ret; 301 301 }
+42 -49
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
··· 1549 1549 goto err_dma; 1550 1550 1551 1551 if (priv->extend_desc) { 1552 - rx_q->dma_erx = dma_zalloc_coherent(priv->device, 1553 - DMA_RX_SIZE * 1554 - sizeof(struct 1555 - dma_extended_desc), 1556 - &rx_q->dma_rx_phy, 1557 - GFP_KERNEL); 1552 + rx_q->dma_erx = dma_alloc_coherent(priv->device, 1553 + DMA_RX_SIZE * sizeof(struct dma_extended_desc), 1554 + &rx_q->dma_rx_phy, 1555 + GFP_KERNEL); 1558 1556 if (!rx_q->dma_erx) 1559 1557 goto err_dma; 1560 1558 1561 1559 } else { 1562 - rx_q->dma_rx = dma_zalloc_coherent(priv->device, 1563 - DMA_RX_SIZE * 1564 - sizeof(struct 1565 - dma_desc), 1566 - &rx_q->dma_rx_phy, 1567 - GFP_KERNEL); 1560 + rx_q->dma_rx = dma_alloc_coherent(priv->device, 1561 + DMA_RX_SIZE * sizeof(struct dma_desc), 1562 + &rx_q->dma_rx_phy, 1563 + GFP_KERNEL); 1568 1564 if (!rx_q->dma_rx) 1569 1565 goto err_dma; 1570 1566 } ··· 1608 1612 goto err_dma; 1609 1613 1610 1614 if (priv->extend_desc) { 1611 - tx_q->dma_etx = dma_zalloc_coherent(priv->device, 1612 - DMA_TX_SIZE * 1613 - sizeof(struct 1614 - dma_extended_desc), 1615 - &tx_q->dma_tx_phy, 1616 - GFP_KERNEL); 1615 + tx_q->dma_etx = dma_alloc_coherent(priv->device, 1616 + DMA_TX_SIZE * sizeof(struct dma_extended_desc), 1617 + &tx_q->dma_tx_phy, 1618 + GFP_KERNEL); 1617 1619 if (!tx_q->dma_etx) 1618 1620 goto err_dma; 1619 1621 } else { 1620 - tx_q->dma_tx = dma_zalloc_coherent(priv->device, 1621 - DMA_TX_SIZE * 1622 - sizeof(struct 1623 - dma_desc), 1624 - &tx_q->dma_tx_phy, 1625 - GFP_KERNEL); 1622 + tx_q->dma_tx = dma_alloc_coherent(priv->device, 1623 + DMA_TX_SIZE * sizeof(struct dma_desc), 1624 + &tx_q->dma_tx_phy, 1625 + GFP_KERNEL); 1626 1626 if (!tx_q->dma_tx) 1627 1627 goto err_dma; 1628 1628 } ··· 3517 3525 struct stmmac_channel *ch = 3518 3526 container_of(napi, struct stmmac_channel, napi); 3519 3527 struct stmmac_priv *priv = ch->priv_data; 3520 - int work_done = 0, work_rem = budget; 3528 + int work_done, rx_done = 0, tx_done = 0; 3521 3529 u32 chan = ch->index; 3522 3530 3523 3531 priv->xstats.napi_poll++; 3524 3532 3525 - if (ch->has_tx) { 3526 - int done = stmmac_tx_clean(priv, work_rem, chan); 3533 + if (ch->has_tx) 3534 + tx_done = stmmac_tx_clean(priv, budget, chan); 3535 + if (ch->has_rx) 3536 + rx_done = stmmac_rx(priv, budget, chan); 3527 3537 3528 - work_done += done; 3529 - work_rem -= done; 3530 - } 3538 + work_done = max(rx_done, tx_done); 3539 + work_done = min(work_done, budget); 3531 3540 3532 - if (ch->has_rx) { 3533 - int done = stmmac_rx(priv, work_rem, chan); 3541 + if (work_done < budget && napi_complete_done(napi, work_done)) { 3542 + int stat; 3534 3543 3535 - work_done += done; 3536 - work_rem -= done; 3537 - } 3538 - 3539 - if (work_done < budget && napi_complete_done(napi, work_done)) 3540 3544 stmmac_enable_dma_irq(priv, priv->ioaddr, chan); 3545 + stat = stmmac_dma_interrupt_status(priv, priv->ioaddr, 3546 + &priv->xstats, chan); 3547 + if (stat && napi_reschedule(napi)) 3548 + stmmac_disable_dma_irq(priv, priv->ioaddr, chan); 3549 + } 3541 3550 3542 3551 return work_done; 3543 3552 } ··· 4161 4168 return ret; 4162 4169 } 4163 4170 4171 + /* Rx Watchdog is available in the COREs newer than the 3.40. 4172 + * In some case, for example on bugged HW this feature 4173 + * has to be disable and this can be done by passing the 4174 + * riwt_off field from the platform. 4175 + */ 4176 + if (((priv->synopsys_id >= DWMAC_CORE_3_50) || 4177 + (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) { 4178 + priv->use_riwt = 1; 4179 + dev_info(priv->device, 4180 + "Enable RX Mitigation via HW Watchdog Timer\n"); 4181 + } 4182 + 4164 4183 return 0; 4165 4184 } 4166 4185 ··· 4304 4299 4305 4300 if (flow_ctrl) 4306 4301 priv->flow_ctrl = FLOW_AUTO; /* RX/TX pause on */ 4307 - 4308 - /* Rx Watchdog is available in the COREs newer than the 3.40. 4309 - * In some case, for example on bugged HW this feature 4310 - * has to be disable and this can be done by passing the 4311 - * riwt_off field from the platform. 4312 - */ 4313 - if (((priv->synopsys_id >= DWMAC_CORE_3_50) || 4314 - (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) { 4315 - priv->use_riwt = 1; 4316 - dev_info(priv->device, 4317 - "Enable RX Mitigation via HW Watchdog Timer\n"); 4318 - } 4319 4302 4320 4303 /* Setup channels NAPI */ 4321 4304 maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
+10
drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
··· 299 299 */ 300 300 static void stmmac_pci_remove(struct pci_dev *pdev) 301 301 { 302 + int i; 303 + 302 304 stmmac_dvr_remove(&pdev->dev); 305 + 306 + for (i = 0; i <= PCI_STD_RESOURCE_END; i++) { 307 + if (pci_resource_len(pdev, i) == 0) 308 + continue; 309 + pcim_iounmap_regions(pdev, BIT(i)); 310 + break; 311 + } 312 + 303 313 pci_disable_device(pdev); 304 314 } 305 315
+2
drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
··· 301 301 /* Queue 0 is not AVB capable */ 302 302 if (queue <= 0 || queue >= tx_queues_count) 303 303 return -EINVAL; 304 + if (!priv->dma_cap.av) 305 + return -EOPNOTSUPP; 304 306 if (priv->speed != SPEED_100 && priv->speed != SPEED_1000) 305 307 return -EOPNOTSUPP; 306 308
+4 -4
drivers/net/ethernet/tundra/tsi108_eth.c
··· 1311 1311 data->id, dev->irq, dev->name); 1312 1312 } 1313 1313 1314 - data->rxring = dma_zalloc_coherent(&data->pdev->dev, rxring_size, 1315 - &data->rxdma, GFP_KERNEL); 1314 + data->rxring = dma_alloc_coherent(&data->pdev->dev, rxring_size, 1315 + &data->rxdma, GFP_KERNEL); 1316 1316 if (!data->rxring) 1317 1317 return -ENOMEM; 1318 1318 1319 - data->txring = dma_zalloc_coherent(&data->pdev->dev, txring_size, 1320 - &data->txdma, GFP_KERNEL); 1319 + data->txring = dma_alloc_coherent(&data->pdev->dev, txring_size, 1320 + &data->txdma, GFP_KERNEL); 1321 1321 if (!data->txring) { 1322 1322 dma_free_coherent(&data->pdev->dev, rxring_size, data->rxring, 1323 1323 data->rxdma);
+6 -6
drivers/net/ethernet/xilinx/ll_temac_main.c
··· 243 243 244 244 /* allocate the tx and rx ring buffer descriptors. */ 245 245 /* returns a virtual address and a physical address. */ 246 - lp->tx_bd_v = dma_zalloc_coherent(ndev->dev.parent, 247 - sizeof(*lp->tx_bd_v) * TX_BD_NUM, 248 - &lp->tx_bd_p, GFP_KERNEL); 246 + lp->tx_bd_v = dma_alloc_coherent(ndev->dev.parent, 247 + sizeof(*lp->tx_bd_v) * TX_BD_NUM, 248 + &lp->tx_bd_p, GFP_KERNEL); 249 249 if (!lp->tx_bd_v) 250 250 goto out; 251 251 252 - lp->rx_bd_v = dma_zalloc_coherent(ndev->dev.parent, 253 - sizeof(*lp->rx_bd_v) * RX_BD_NUM, 254 - &lp->rx_bd_p, GFP_KERNEL); 252 + lp->rx_bd_v = dma_alloc_coherent(ndev->dev.parent, 253 + sizeof(*lp->rx_bd_v) * RX_BD_NUM, 254 + &lp->rx_bd_p, GFP_KERNEL); 255 255 if (!lp->rx_bd_v) 256 256 goto out; 257 257
+6 -6
drivers/net/ethernet/xilinx/xilinx_axienet_main.c
··· 199 199 lp->rx_bd_ci = 0; 200 200 201 201 /* Allocate the Tx and Rx buffer descriptors. */ 202 - lp->tx_bd_v = dma_zalloc_coherent(ndev->dev.parent, 203 - sizeof(*lp->tx_bd_v) * TX_BD_NUM, 204 - &lp->tx_bd_p, GFP_KERNEL); 202 + lp->tx_bd_v = dma_alloc_coherent(ndev->dev.parent, 203 + sizeof(*lp->tx_bd_v) * TX_BD_NUM, 204 + &lp->tx_bd_p, GFP_KERNEL); 205 205 if (!lp->tx_bd_v) 206 206 goto out; 207 207 208 - lp->rx_bd_v = dma_zalloc_coherent(ndev->dev.parent, 209 - sizeof(*lp->rx_bd_v) * RX_BD_NUM, 210 - &lp->rx_bd_p, GFP_KERNEL); 208 + lp->rx_bd_v = dma_alloc_coherent(ndev->dev.parent, 209 + sizeof(*lp->rx_bd_v) * RX_BD_NUM, 210 + &lp->rx_bd_p, GFP_KERNEL); 211 211 if (!lp->rx_bd_v) 212 212 goto out; 213 213
+3 -3
drivers/net/fddi/defxx.c
··· 1139 1139 #endif 1140 1140 sizeof(PI_CONSUMER_BLOCK) + 1141 1141 (PI_ALIGN_K_DESC_BLK - 1); 1142 - bp->kmalloced = top_v = dma_zalloc_coherent(bp->bus_dev, alloc_size, 1143 - &bp->kmalloced_dma, 1144 - GFP_ATOMIC); 1142 + bp->kmalloced = top_v = dma_alloc_coherent(bp->bus_dev, alloc_size, 1143 + &bp->kmalloced_dma, 1144 + GFP_ATOMIC); 1145 1145 if (top_v == NULL) 1146 1146 return DFX_K_FAILURE; 1147 1147
+4 -4
drivers/net/fddi/skfp/skfddi.c
··· 409 409 if (bp->SharedMemSize > 0) { 410 410 bp->SharedMemSize += 16; // for descriptor alignment 411 411 412 - bp->SharedMemAddr = dma_zalloc_coherent(&bp->pdev.dev, 413 - bp->SharedMemSize, 414 - &bp->SharedMemDMA, 415 - GFP_ATOMIC); 412 + bp->SharedMemAddr = dma_alloc_coherent(&bp->pdev.dev, 413 + bp->SharedMemSize, 414 + &bp->SharedMemDMA, 415 + GFP_ATOMIC); 416 416 if (!bp->SharedMemAddr) { 417 417 printk("could not allocate mem for "); 418 418 printk("hardware module: %ld byte\n",
+1 -1
drivers/net/macvlan.c
··· 337 337 338 338 if (src) 339 339 dev_put(src->dev); 340 - kfree_skb(skb); 340 + consume_skb(skb); 341 341 } 342 342 } 343 343
+2
drivers/net/phy/bcm87xx.c
··· 197 197 .phy_id = PHY_ID_BCM8706, 198 198 .phy_id_mask = 0xffffffff, 199 199 .name = "Broadcom BCM8706", 200 + .features = PHY_10GBIT_FEC_FEATURES, 200 201 .config_init = bcm87xx_config_init, 201 202 .config_aneg = bcm87xx_config_aneg, 202 203 .read_status = bcm87xx_read_status, ··· 209 208 .phy_id = PHY_ID_BCM8727, 210 209 .phy_id_mask = 0xffffffff, 211 210 .name = "Broadcom BCM8727", 211 + .features = PHY_10GBIT_FEC_FEATURES, 212 212 .config_init = bcm87xx_config_init, 213 213 .config_aneg = bcm87xx_config_aneg, 214 214 .read_status = bcm87xx_read_status,
+1
drivers/net/phy/cortina.c
··· 88 88 .phy_id = PHY_ID_CS4340, 89 89 .phy_id_mask = 0xffffffff, 90 90 .name = "Cortina CS4340", 91 + .features = PHY_10GBIT_FEATURES, 91 92 .config_init = gen10g_config_init, 92 93 .config_aneg = gen10g_config_aneg, 93 94 .read_status = cortina_read_status,
+35 -2
drivers/net/phy/marvell.c
··· 1046 1046 return 0; 1047 1047 } 1048 1048 1049 + /* The VOD can be out of specification on link up. Poke an 1050 + * undocumented register, in an undocumented page, with a magic value 1051 + * to fix this. 1052 + */ 1053 + static int m88e6390_errata(struct phy_device *phydev) 1054 + { 1055 + int err; 1056 + 1057 + err = phy_write(phydev, MII_BMCR, 1058 + BMCR_ANENABLE | BMCR_SPEED1000 | BMCR_FULLDPLX); 1059 + if (err) 1060 + return err; 1061 + 1062 + usleep_range(300, 400); 1063 + 1064 + err = phy_write_paged(phydev, 0xf8, 0x08, 0x36); 1065 + if (err) 1066 + return err; 1067 + 1068 + return genphy_soft_reset(phydev); 1069 + } 1070 + 1071 + static int m88e6390_config_aneg(struct phy_device *phydev) 1072 + { 1073 + int err; 1074 + 1075 + err = m88e6390_errata(phydev); 1076 + if (err) 1077 + return err; 1078 + 1079 + return m88e1510_config_aneg(phydev); 1080 + } 1081 + 1049 1082 /** 1050 1083 * fiber_lpa_mod_linkmode_lpa_t 1051 1084 * @advertising: the linkmode advertisement settings ··· 1435 1402 * before enabling it if !phy_interrupt_is_valid() 1436 1403 */ 1437 1404 if (!phy_interrupt_is_valid(phydev)) 1438 - phy_read(phydev, MII_M1011_IEVENT); 1405 + __phy_read(phydev, MII_M1011_IEVENT); 1439 1406 1440 1407 /* Enable the WOL interrupt */ 1441 1408 err = __phy_modify(phydev, MII_88E1318S_PHY_CSIER, 0, ··· 2316 2283 .features = PHY_GBIT_FEATURES, 2317 2284 .probe = m88e6390_probe, 2318 2285 .config_init = &marvell_config_init, 2319 - .config_aneg = &m88e1510_config_aneg, 2286 + .config_aneg = &m88e6390_config_aneg, 2320 2287 .read_status = &marvell_read_status, 2321 2288 .ack_interrupt = &marvell_ack_interrupt, 2322 2289 .config_intr = &marvell_config_intr,
+1
drivers/net/phy/mdio_bus.c
··· 390 390 if (IS_ERR(gpiod)) { 391 391 dev_err(&bus->dev, "mii_bus %s couldn't get reset GPIO\n", 392 392 bus->id); 393 + device_del(&bus->dev); 393 394 return PTR_ERR(gpiod); 394 395 } else if (gpiod) { 395 396 bus->reset_gpiod = gpiod;
+1
drivers/net/phy/meson-gxl.c
··· 233 233 .name = "Meson GXL Internal PHY", 234 234 .features = PHY_BASIC_FEATURES, 235 235 .flags = PHY_IS_INTERNAL, 236 + .soft_reset = genphy_soft_reset, 236 237 .config_init = meson_gxl_config_init, 237 238 .aneg_done = genphy_aneg_done, 238 239 .read_status = meson_gxl_read_status,
+2
drivers/net/phy/micrel.c
··· 1070 1070 .driver_data = &ksz9021_type, 1071 1071 .probe = kszphy_probe, 1072 1072 .config_init = ksz9031_config_init, 1073 + .soft_reset = genphy_soft_reset, 1073 1074 .read_status = ksz9031_read_status, 1074 1075 .ack_interrupt = kszphy_ack_interrupt, 1075 1076 .config_intr = kszphy_config_intr, ··· 1099 1098 .phy_id = PHY_ID_KSZ8873MLL, 1100 1099 .phy_id_mask = MICREL_PHY_ID_MASK, 1101 1100 .name = "Micrel KSZ8873MLL Switch", 1101 + .features = PHY_BASIC_FEATURES, 1102 1102 .config_init = kszphy_config_init, 1103 1103 .config_aneg = ksz8873mll_config_aneg, 1104 1104 .read_status = ksz8873mll_read_status,
+7 -12
drivers/net/phy/phy.c
··· 543 543 544 544 mutex_lock(&phydev->lock); 545 545 546 - if (!__phy_is_started(phydev)) { 547 - WARN(1, "called from state %s\n", 548 - phy_state_to_str(phydev->state)); 549 - err = -EBUSY; 550 - goto out_unlock; 551 - } 552 - 553 546 if (AUTONEG_DISABLE == phydev->autoneg) 554 547 phy_sanitize_settings(phydev); 555 548 ··· 553 560 if (err < 0) 554 561 goto out_unlock; 555 562 556 - if (phydev->autoneg == AUTONEG_ENABLE) { 557 - err = phy_check_link_status(phydev); 558 - } else { 559 - phydev->state = PHY_FORCING; 560 - phydev->link_timeout = PHY_FORCE_TIMEOUT; 563 + if (__phy_is_started(phydev)) { 564 + if (phydev->autoneg == AUTONEG_ENABLE) { 565 + err = phy_check_link_status(phydev); 566 + } else { 567 + phydev->state = PHY_FORCING; 568 + phydev->link_timeout = PHY_FORCE_TIMEOUT; 569 + } 561 570 } 562 571 563 572 out_unlock:
+17
drivers/net/phy/phy_device.c
··· 61 61 __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_features) __ro_after_init; 62 62 EXPORT_SYMBOL_GPL(phy_10gbit_features); 63 63 64 + __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_fec_features) __ro_after_init; 65 + EXPORT_SYMBOL_GPL(phy_10gbit_fec_features); 66 + 64 67 static const int phy_basic_ports_array[] = { 65 68 ETHTOOL_LINK_MODE_Autoneg_BIT, 66 69 ETHTOOL_LINK_MODE_TP_BIT, ··· 111 108 ETHTOOL_LINK_MODE_10000baseT_Full_BIT, 112 109 }; 113 110 EXPORT_SYMBOL_GPL(phy_10gbit_features_array); 111 + 112 + const int phy_10gbit_fec_features_array[1] = { 113 + ETHTOOL_LINK_MODE_10000baseR_FEC_BIT, 114 + }; 115 + EXPORT_SYMBOL_GPL(phy_10gbit_fec_features_array); 114 116 115 117 __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_full_features) __ro_after_init; 116 118 EXPORT_SYMBOL_GPL(phy_10gbit_full_features); ··· 199 191 linkmode_set_bit_array(phy_10gbit_full_features_array, 200 192 ARRAY_SIZE(phy_10gbit_full_features_array), 201 193 phy_10gbit_full_features); 194 + /* 10G FEC only */ 195 + linkmode_set_bit_array(phy_10gbit_fec_features_array, 196 + ARRAY_SIZE(phy_10gbit_fec_features_array), 197 + phy_10gbit_fec_features); 202 198 } 203 199 204 200 void phy_device_free(struct phy_device *phydev) ··· 2254 2242 int phy_driver_register(struct phy_driver *new_driver, struct module *owner) 2255 2243 { 2256 2244 int retval; 2245 + 2246 + if (WARN_ON(!new_driver->features)) { 2247 + pr_err("%s: Driver features are missing\n", new_driver->name); 2248 + return -EINVAL; 2249 + } 2257 2250 2258 2251 new_driver->mdiodrv.flags |= MDIO_DEVICE_IS_PHY; 2259 2252 new_driver->mdiodrv.driver.name = new_driver->name;
+1
drivers/net/phy/teranetics.c
··· 80 80 .phy_id = PHY_ID_TN2020, 81 81 .phy_id_mask = 0xffffffff, 82 82 .name = "Teranetics TN2020", 83 + .features = PHY_10GBIT_FEATURES, 83 84 .soft_reset = gen10g_no_soft_reset, 84 85 .aneg_done = teranetics_aneg_done, 85 86 .config_init = gen10g_config_init,
+1
drivers/net/ppp/pppoe.c
··· 445 445 if (pskb_trim_rcsum(skb, len)) 446 446 goto drop; 447 447 448 + ph = pppoe_hdr(skb); 448 449 pn = pppoe_pernet(dev_net(dev)); 449 450 450 451 /* Note that get_item does a sock_hold(), so sk_pppox(po)
+7 -4
drivers/net/tun.c
··· 856 856 err = 0; 857 857 } 858 858 859 - rcu_assign_pointer(tfile->tun, tun); 860 - rcu_assign_pointer(tun->tfiles[tun->numqueues], tfile); 861 - tun->numqueues++; 862 - 863 859 if (tfile->detached) { 864 860 tun_enable_queue(tfile); 865 861 } else { ··· 872 876 * refcnt. 873 877 */ 874 878 879 + /* Publish tfile->tun and tun->tfiles only after we've fully 880 + * initialized tfile; otherwise we risk using half-initialized 881 + * object. 882 + */ 883 + rcu_assign_pointer(tfile->tun, tun); 884 + rcu_assign_pointer(tun->tfiles[tun->numqueues], tfile); 885 + tun->numqueues++; 875 886 out: 876 887 return err; 877 888 }
+15
drivers/net/usb/aqc111.c
··· 1287 1287 1288 1288 #undef ASIX112_DESC 1289 1289 1290 + static const struct driver_info trendnet_info = { 1291 + .description = "USB-C 3.1 to 5GBASE-T Ethernet Adapter", 1292 + .bind = aqc111_bind, 1293 + .unbind = aqc111_unbind, 1294 + .status = aqc111_status, 1295 + .link_reset = aqc111_link_reset, 1296 + .reset = aqc111_reset, 1297 + .stop = aqc111_stop, 1298 + .flags = FLAG_ETHER | FLAG_FRAMING_AX | 1299 + FLAG_AVOID_UNLINK_URBS | FLAG_MULTI_PACKET, 1300 + .rx_fixup = aqc111_rx_fixup, 1301 + .tx_fixup = aqc111_tx_fixup, 1302 + }; 1303 + 1290 1304 static int aqc111_suspend(struct usb_interface *intf, pm_message_t message) 1291 1305 { 1292 1306 struct usbnet *dev = usb_get_intfdata(intf); ··· 1454 1440 {AQC111_USB_ETH_DEV(0x2eca, 0xc101, aqc111_info)}, 1455 1441 {AQC111_USB_ETH_DEV(0x0b95, 0x2790, asix111_info)}, 1456 1442 {AQC111_USB_ETH_DEV(0x0b95, 0x2791, asix112_info)}, 1443 + {AQC111_USB_ETH_DEV(0x20f4, 0xe05a, trendnet_info)}, 1457 1444 { },/* END */ 1458 1445 }; 1459 1446 MODULE_DEVICE_TABLE(usb, products);
+19 -15
drivers/net/usb/cdc_ether.c
··· 179 179 * probed with) and a slave/data interface; union 180 180 * descriptors sort this all out. 181 181 */ 182 - info->control = usb_ifnum_to_if(dev->udev, 183 - info->u->bMasterInterface0); 184 - info->data = usb_ifnum_to_if(dev->udev, 185 - info->u->bSlaveInterface0); 182 + info->control = usb_ifnum_to_if(dev->udev, info->u->bMasterInterface0); 183 + info->data = usb_ifnum_to_if(dev->udev, info->u->bSlaveInterface0); 186 184 if (!info->control || !info->data) { 187 185 dev_dbg(&intf->dev, 188 186 "master #%u/%p slave #%u/%p\n", ··· 214 216 /* a data interface altsetting does the real i/o */ 215 217 d = &info->data->cur_altsetting->desc; 216 218 if (d->bInterfaceClass != USB_CLASS_CDC_DATA) { 217 - dev_dbg(&intf->dev, "slave class %u\n", 218 - d->bInterfaceClass); 219 + dev_dbg(&intf->dev, "slave class %u\n", d->bInterfaceClass); 219 220 goto bad_desc; 220 221 } 221 222 skip: 222 - if ( rndis && 223 - header.usb_cdc_acm_descriptor && 224 - header.usb_cdc_acm_descriptor->bmCapabilities) { 225 - dev_dbg(&intf->dev, 226 - "ACM capabilities %02x, not really RNDIS?\n", 227 - header.usb_cdc_acm_descriptor->bmCapabilities); 228 - goto bad_desc; 223 + if (rndis && header.usb_cdc_acm_descriptor && 224 + header.usb_cdc_acm_descriptor->bmCapabilities) { 225 + dev_dbg(&intf->dev, 226 + "ACM capabilities %02x, not really RNDIS?\n", 227 + header.usb_cdc_acm_descriptor->bmCapabilities); 228 + goto bad_desc; 229 229 } 230 230 231 231 if (header.usb_cdc_ether_desc && info->ether->wMaxSegmentSize) { ··· 234 238 } 235 239 236 240 if (header.usb_cdc_mdlm_desc && 237 - memcmp(header.usb_cdc_mdlm_desc->bGUID, mbm_guid, 16)) { 241 + memcmp(header.usb_cdc_mdlm_desc->bGUID, mbm_guid, 16)) { 238 242 dev_dbg(&intf->dev, "GUID doesn't match\n"); 239 243 goto bad_desc; 240 244 } ··· 298 302 if (info->control->cur_altsetting->desc.bNumEndpoints == 1) { 299 303 struct usb_endpoint_descriptor *desc; 300 304 301 - dev->status = &info->control->cur_altsetting->endpoint [0]; 305 + dev->status = &info->control->cur_altsetting->endpoint[0]; 302 306 desc = &dev->status->desc; 303 307 if (!usb_endpoint_is_int_in(desc) || 304 308 (le16_to_cpu(desc->wMaxPacketSize) ··· 838 842 /* ASIX USB 3.1 Gen1 to 2.5G Multi-Gigabit Ethernet Adapter(based on AQC112U) */ 839 843 { 840 844 USB_DEVICE_AND_INTERFACE_INFO(ASIX_VENDOR_ID, 0x2791, USB_CLASS_COMM, 845 + USB_CDC_SUBCLASS_ETHERNET, 846 + USB_CDC_PROTO_NONE), 847 + .driver_info = 0, 848 + }, 849 + 850 + /* USB-C 3.1 to 5GBASE-T Ethernet Adapter (based on AQC111U) */ 851 + { 852 + USB_DEVICE_AND_INTERFACE_INFO(0x20f4, 0xe05a, USB_CLASS_COMM, 841 853 USB_CDC_SUBCLASS_ETHERNET, 842 854 USB_CDC_PROTO_NONE), 843 855 .driver_info = 0,
+1
drivers/net/usb/qmi_wwan.c
··· 123 123 dev->addr_len = 0; 124 124 dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST; 125 125 dev->netdev_ops = &qmimux_netdev_ops; 126 + dev->mtu = 1500; 126 127 dev->needs_free_netdev = true; 127 128 } 128 129
+6 -6
drivers/net/virtio_net.c
··· 1330 1330 return stats.packets; 1331 1331 } 1332 1332 1333 - static void free_old_xmit_skbs(struct send_queue *sq) 1333 + static void free_old_xmit_skbs(struct send_queue *sq, bool in_napi) 1334 1334 { 1335 1335 struct sk_buff *skb; 1336 1336 unsigned int len; ··· 1343 1343 bytes += skb->len; 1344 1344 packets++; 1345 1345 1346 - dev_consume_skb_any(skb); 1346 + napi_consume_skb(skb, in_napi); 1347 1347 } 1348 1348 1349 1349 /* Avoid overhead when no packets have been processed ··· 1369 1369 return; 1370 1370 1371 1371 if (__netif_tx_trylock(txq)) { 1372 - free_old_xmit_skbs(sq); 1372 + free_old_xmit_skbs(sq, true); 1373 1373 __netif_tx_unlock(txq); 1374 1374 } 1375 1375 ··· 1445 1445 struct netdev_queue *txq = netdev_get_tx_queue(vi->dev, vq2txq(sq->vq)); 1446 1446 1447 1447 __netif_tx_lock(txq, raw_smp_processor_id()); 1448 - free_old_xmit_skbs(sq); 1448 + free_old_xmit_skbs(sq, true); 1449 1449 __netif_tx_unlock(txq); 1450 1450 1451 1451 virtqueue_napi_complete(napi, sq->vq, 0); ··· 1514 1514 bool use_napi = sq->napi.weight; 1515 1515 1516 1516 /* Free up any pending old buffers before queueing new ones. */ 1517 - free_old_xmit_skbs(sq); 1517 + free_old_xmit_skbs(sq, false); 1518 1518 1519 1519 if (use_napi && kick) 1520 1520 virtqueue_enable_cb_delayed(sq->vq); ··· 1557 1557 if (!use_napi && 1558 1558 unlikely(!virtqueue_enable_cb_delayed(sq->vq))) { 1559 1559 /* More just got used, free them then recheck. */ 1560 - free_old_xmit_skbs(sq); 1560 + free_old_xmit_skbs(sq, false); 1561 1561 if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) { 1562 1562 netif_start_subqueue(dev, qnum); 1563 1563 virtqueue_disable_cb(sq->vq);
+4 -4
drivers/net/vmxnet3/vmxnet3_drv.c
··· 535 535 } 536 536 537 537 sz = tq->tx_ring.size * sizeof(tq->buf_info[0]); 538 - tq->buf_info = dma_zalloc_coherent(&adapter->pdev->dev, sz, 539 - &tq->buf_info_pa, GFP_KERNEL); 538 + tq->buf_info = dma_alloc_coherent(&adapter->pdev->dev, sz, 539 + &tq->buf_info_pa, GFP_KERNEL); 540 540 if (!tq->buf_info) 541 541 goto err; 542 542 ··· 1815 1815 1816 1816 sz = sizeof(struct vmxnet3_rx_buf_info) * (rq->rx_ring[0].size + 1817 1817 rq->rx_ring[1].size); 1818 - bi = dma_zalloc_coherent(&adapter->pdev->dev, sz, &rq->buf_info_pa, 1819 - GFP_KERNEL); 1818 + bi = dma_alloc_coherent(&adapter->pdev->dev, sz, &rq->buf_info_pa, 1819 + GFP_KERNEL); 1820 1820 if (!bi) 1821 1821 goto err; 1822 1822
+64 -5
drivers/net/wan/fsl_ucc_hdlc.c
··· 279 279 iowrite16be(DEFAULT_HDLC_ADDR, &priv->ucc_pram->haddr4); 280 280 281 281 /* Get BD buffer */ 282 - bd_buffer = dma_zalloc_coherent(priv->dev, 283 - (RX_BD_RING_LEN + TX_BD_RING_LEN) * 284 - MAX_RX_BUF_LENGTH, 285 - &bd_dma_addr, GFP_KERNEL); 282 + bd_buffer = dma_alloc_coherent(priv->dev, 283 + (RX_BD_RING_LEN + TX_BD_RING_LEN) * MAX_RX_BUF_LENGTH, 284 + &bd_dma_addr, GFP_KERNEL); 286 285 287 286 if (!bd_buffer) { 288 287 dev_err(priv->dev, "Could not allocate buffer descriptors\n"); ··· 1056 1057 .ndo_tx_timeout = uhdlc_tx_timeout, 1057 1058 }; 1058 1059 1060 + static int hdlc_map_iomem(char *name, int init_flag, void __iomem **ptr) 1061 + { 1062 + struct device_node *np; 1063 + struct platform_device *pdev; 1064 + struct resource *res; 1065 + static int siram_init_flag; 1066 + int ret = 0; 1067 + 1068 + np = of_find_compatible_node(NULL, NULL, name); 1069 + if (!np) 1070 + return -EINVAL; 1071 + 1072 + pdev = of_find_device_by_node(np); 1073 + if (!pdev) { 1074 + pr_err("%pOFn: failed to lookup pdev\n", np); 1075 + of_node_put(np); 1076 + return -EINVAL; 1077 + } 1078 + 1079 + of_node_put(np); 1080 + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1081 + if (!res) { 1082 + ret = -EINVAL; 1083 + goto error_put_device; 1084 + } 1085 + *ptr = ioremap(res->start, resource_size(res)); 1086 + if (!*ptr) { 1087 + ret = -ENOMEM; 1088 + goto error_put_device; 1089 + } 1090 + 1091 + /* We've remapped the addresses, and we don't need the device any 1092 + * more, so we should release it. 1093 + */ 1094 + put_device(&pdev->dev); 1095 + 1096 + if (init_flag && siram_init_flag == 0) { 1097 + memset_io(*ptr, 0, resource_size(res)); 1098 + siram_init_flag = 1; 1099 + } 1100 + return 0; 1101 + 1102 + error_put_device: 1103 + put_device(&pdev->dev); 1104 + 1105 + return ret; 1106 + } 1107 + 1059 1108 static int ucc_hdlc_probe(struct platform_device *pdev) 1060 1109 { 1061 1110 struct device_node *np = pdev->dev.of_node; ··· 1198 1151 ret = ucc_of_parse_tdm(np, utdm, ut_info); 1199 1152 if (ret) 1200 1153 goto free_utdm; 1154 + 1155 + ret = hdlc_map_iomem("fsl,t1040-qe-si", 0, 1156 + (void __iomem **)&utdm->si_regs); 1157 + if (ret) 1158 + goto free_utdm; 1159 + ret = hdlc_map_iomem("fsl,t1040-qe-siram", 1, 1160 + (void __iomem **)&utdm->siram); 1161 + if (ret) 1162 + goto unmap_si_regs; 1201 1163 } 1202 1164 1203 1165 if (of_property_read_u16(np, "fsl,hmask", &uhdlc_priv->hmask)) ··· 1215 1159 ret = uhdlc_init(uhdlc_priv); 1216 1160 if (ret) { 1217 1161 dev_err(&pdev->dev, "Failed to init uhdlc\n"); 1218 - goto free_utdm; 1162 + goto undo_uhdlc_init; 1219 1163 } 1220 1164 1221 1165 dev = alloc_hdlcdev(uhdlc_priv); ··· 1244 1188 free_dev: 1245 1189 free_netdev(dev); 1246 1190 undo_uhdlc_init: 1191 + iounmap(utdm->siram); 1192 + unmap_si_regs: 1193 + iounmap(utdm->si_regs); 1247 1194 free_utdm: 1248 1195 if (uhdlc_priv->tsa) 1249 1196 kfree(utdm);
+3 -4
drivers/net/wireless/ath/ath10k/ce.c
··· 1553 1553 * coherent DMA are unsupported 1554 1554 */ 1555 1555 dest_ring->base_addr_owner_space_unaligned = 1556 - dma_zalloc_coherent(ar->dev, 1557 - (nentries * sizeof(struct ce_desc) + 1558 - CE_DESC_RING_ALIGN), 1559 - &base_addr, GFP_KERNEL); 1556 + dma_alloc_coherent(ar->dev, 1557 + (nentries * sizeof(struct ce_desc) + CE_DESC_RING_ALIGN), 1558 + &base_addr, GFP_KERNEL); 1560 1559 if (!dest_ring->base_addr_owner_space_unaligned) { 1561 1560 kfree(dest_ring); 1562 1561 return ERR_PTR(-ENOMEM);
+4 -4
drivers/net/wireless/ath/ath10k/mac.c
··· 5169 5169 if (vif->type == NL80211_IFTYPE_ADHOC || 5170 5170 vif->type == NL80211_IFTYPE_MESH_POINT || 5171 5171 vif->type == NL80211_IFTYPE_AP) { 5172 - arvif->beacon_buf = dma_zalloc_coherent(ar->dev, 5173 - IEEE80211_MAX_FRAME_LEN, 5174 - &arvif->beacon_paddr, 5175 - GFP_ATOMIC); 5172 + arvif->beacon_buf = dma_alloc_coherent(ar->dev, 5173 + IEEE80211_MAX_FRAME_LEN, 5174 + &arvif->beacon_paddr, 5175 + GFP_ATOMIC); 5176 5176 if (!arvif->beacon_buf) { 5177 5177 ret = -ENOMEM; 5178 5178 ath10k_warn(ar, "failed to allocate beacon buffer: %d\n",
+1 -2
drivers/net/wireless/ath/ath10k/pci.c
··· 936 936 */ 937 937 alloc_nbytes = min_t(unsigned int, nbytes, DIAG_TRANSFER_LIMIT); 938 938 939 - data_buf = (unsigned char *)dma_zalloc_coherent(ar->dev, 940 - alloc_nbytes, 939 + data_buf = (unsigned char *)dma_alloc_coherent(ar->dev, alloc_nbytes, 941 940 &ce_data_base, 942 941 GFP_ATOMIC); 943 942
+1 -1
drivers/net/wireless/ath/ath10k/wmi.c
··· 5193 5193 void *vaddr; 5194 5194 5195 5195 pool_size = num_units * round_up(unit_len, 4); 5196 - vaddr = dma_zalloc_coherent(ar->dev, pool_size, &paddr, GFP_KERNEL); 5196 + vaddr = dma_alloc_coherent(ar->dev, pool_size, &paddr, GFP_KERNEL); 5197 5197 5198 5198 if (!vaddr) 5199 5199 return -ENOMEM;
+8 -9
drivers/net/wireless/ath/wcn36xx/dxe.c
··· 174 174 int i; 175 175 176 176 size = wcn_ch->desc_num * sizeof(struct wcn36xx_dxe_desc); 177 - wcn_ch->cpu_addr = dma_zalloc_coherent(dev, size, 178 - &wcn_ch->dma_addr, 179 - GFP_KERNEL); 177 + wcn_ch->cpu_addr = dma_alloc_coherent(dev, size, &wcn_ch->dma_addr, 178 + GFP_KERNEL); 180 179 if (!wcn_ch->cpu_addr) 181 180 return -ENOMEM; 182 181 ··· 626 627 16 - (WCN36XX_BD_CHUNK_SIZE % 8); 627 628 628 629 s = wcn->mgmt_mem_pool.chunk_size * WCN36XX_DXE_CH_DESC_NUMB_TX_H; 629 - cpu_addr = dma_zalloc_coherent(wcn->dev, s, 630 - &wcn->mgmt_mem_pool.phy_addr, 631 - GFP_KERNEL); 630 + cpu_addr = dma_alloc_coherent(wcn->dev, s, 631 + &wcn->mgmt_mem_pool.phy_addr, 632 + GFP_KERNEL); 632 633 if (!cpu_addr) 633 634 goto out_err; 634 635 ··· 641 642 16 - (WCN36XX_BD_CHUNK_SIZE % 8); 642 643 643 644 s = wcn->data_mem_pool.chunk_size * WCN36XX_DXE_CH_DESC_NUMB_TX_L; 644 - cpu_addr = dma_zalloc_coherent(wcn->dev, s, 645 - &wcn->data_mem_pool.phy_addr, 646 - GFP_KERNEL); 645 + cpu_addr = dma_alloc_coherent(wcn->dev, s, 646 + &wcn->data_mem_pool.phy_addr, 647 + GFP_KERNEL); 647 648 if (!cpu_addr) 648 649 goto out_err; 649 650
+4 -4
drivers/net/wireless/ath/wil6210/txrx_edma.c
··· 99 99 /* Status messages are allocated and initialized to 0. This is necessary 100 100 * since DR bit should be initialized to 0. 101 101 */ 102 - sring->va = dma_zalloc_coherent(dev, sz, &sring->pa, GFP_KERNEL); 102 + sring->va = dma_alloc_coherent(dev, sz, &sring->pa, GFP_KERNEL); 103 103 if (!sring->va) 104 104 return -ENOMEM; 105 105 ··· 381 381 if (!ring->ctx) 382 382 goto err; 383 383 384 - ring->va = dma_zalloc_coherent(dev, sz, &ring->pa, GFP_KERNEL); 384 + ring->va = dma_alloc_coherent(dev, sz, &ring->pa, GFP_KERNEL); 385 385 if (!ring->va) 386 386 goto err_free_ctx; 387 387 388 388 if (ring->is_rx) { 389 389 sz = sizeof(*ring->edma_rx_swtail.va); 390 390 ring->edma_rx_swtail.va = 391 - dma_zalloc_coherent(dev, sz, &ring->edma_rx_swtail.pa, 392 - GFP_KERNEL); 391 + dma_alloc_coherent(dev, sz, &ring->edma_rx_swtail.pa, 392 + GFP_KERNEL); 393 393 if (!ring->edma_rx_swtail.va) 394 394 goto err_free_va; 395 395 }
+3 -3
drivers/net/wireless/broadcom/b43/dma.c
··· 431 431 u16 ring_mem_size = (ring->type == B43_DMA_64BIT) ? 432 432 B43_DMA64_RINGMEMSIZE : B43_DMA32_RINGMEMSIZE; 433 433 434 - ring->descbase = dma_zalloc_coherent(ring->dev->dev->dma_dev, 435 - ring_mem_size, &(ring->dmabase), 436 - GFP_KERNEL); 434 + ring->descbase = dma_alloc_coherent(ring->dev->dev->dma_dev, 435 + ring_mem_size, &(ring->dmabase), 436 + GFP_KERNEL); 437 437 if (!ring->descbase) 438 438 return -ENOMEM; 439 439
+3 -3
drivers/net/wireless/broadcom/b43legacy/dma.c
··· 331 331 static int alloc_ringmemory(struct b43legacy_dmaring *ring) 332 332 { 333 333 /* GFP flags must match the flags in free_ringmemory()! */ 334 - ring->descbase = dma_zalloc_coherent(ring->dev->dev->dma_dev, 335 - B43legacy_DMA_RINGMEMSIZE, 336 - &(ring->dmabase), GFP_KERNEL); 334 + ring->descbase = dma_alloc_coherent(ring->dev->dev->dma_dev, 335 + B43legacy_DMA_RINGMEMSIZE, 336 + &(ring->dmabase), GFP_KERNEL); 337 337 if (!ring->descbase) 338 338 return -ENOMEM; 339 339
+8 -8
drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
··· 1281 1281 u32 addr; 1282 1282 1283 1283 devinfo->shared.scratch = 1284 - dma_zalloc_coherent(&devinfo->pdev->dev, 1285 - BRCMF_DMA_D2H_SCRATCH_BUF_LEN, 1286 - &devinfo->shared.scratch_dmahandle, 1287 - GFP_KERNEL); 1284 + dma_alloc_coherent(&devinfo->pdev->dev, 1285 + BRCMF_DMA_D2H_SCRATCH_BUF_LEN, 1286 + &devinfo->shared.scratch_dmahandle, 1287 + GFP_KERNEL); 1288 1288 if (!devinfo->shared.scratch) 1289 1289 goto fail; 1290 1290 ··· 1298 1298 brcmf_pcie_write_tcm32(devinfo, addr, BRCMF_DMA_D2H_SCRATCH_BUF_LEN); 1299 1299 1300 1300 devinfo->shared.ringupd = 1301 - dma_zalloc_coherent(&devinfo->pdev->dev, 1302 - BRCMF_DMA_D2H_RINGUPD_BUF_LEN, 1303 - &devinfo->shared.ringupd_dmahandle, 1304 - GFP_KERNEL); 1301 + dma_alloc_coherent(&devinfo->pdev->dev, 1302 + BRCMF_DMA_D2H_RINGUPD_BUF_LEN, 1303 + &devinfo->shared.ringupd_dmahandle, 1304 + GFP_KERNEL); 1305 1305 if (!devinfo->shared.ringupd) 1306 1306 goto fail; 1307 1307
+15 -24
drivers/net/wireless/intel/iwlwifi/pcie/rx.c
··· 711 711 * Allocate the circular buffer of Read Buffer Descriptors 712 712 * (RBDs) 713 713 */ 714 - rxq->bd = dma_zalloc_coherent(dev, 715 - free_size * rxq->queue_size, 716 - &rxq->bd_dma, GFP_KERNEL); 714 + rxq->bd = dma_alloc_coherent(dev, free_size * rxq->queue_size, 715 + &rxq->bd_dma, GFP_KERNEL); 717 716 if (!rxq->bd) 718 717 goto err; 719 718 720 719 if (trans->cfg->mq_rx_supported) { 721 - rxq->used_bd = dma_zalloc_coherent(dev, 722 - (use_rx_td ? 723 - sizeof(*rxq->cd) : 724 - sizeof(__le32)) * 725 - rxq->queue_size, 726 - &rxq->used_bd_dma, 727 - GFP_KERNEL); 720 + rxq->used_bd = dma_alloc_coherent(dev, 721 + (use_rx_td ? sizeof(*rxq->cd) : sizeof(__le32)) * rxq->queue_size, 722 + &rxq->used_bd_dma, 723 + GFP_KERNEL); 728 724 if (!rxq->used_bd) 729 725 goto err; 730 726 } 731 727 732 728 /* Allocate the driver's pointer to receive buffer status */ 733 - rxq->rb_stts = dma_zalloc_coherent(dev, use_rx_td ? 734 - sizeof(__le16) : 735 - sizeof(struct iwl_rb_status), 736 - &rxq->rb_stts_dma, 737 - GFP_KERNEL); 729 + rxq->rb_stts = dma_alloc_coherent(dev, 730 + use_rx_td ? sizeof(__le16) : sizeof(struct iwl_rb_status), 731 + &rxq->rb_stts_dma, GFP_KERNEL); 738 732 if (!rxq->rb_stts) 739 733 goto err; 740 734 ··· 736 742 return 0; 737 743 738 744 /* Allocate the driver's pointer to TR tail */ 739 - rxq->tr_tail = dma_zalloc_coherent(dev, sizeof(__le16), 740 - &rxq->tr_tail_dma, 741 - GFP_KERNEL); 745 + rxq->tr_tail = dma_alloc_coherent(dev, sizeof(__le16), 746 + &rxq->tr_tail_dma, GFP_KERNEL); 742 747 if (!rxq->tr_tail) 743 748 goto err; 744 749 745 750 /* Allocate the driver's pointer to CR tail */ 746 - rxq->cr_tail = dma_zalloc_coherent(dev, sizeof(__le16), 747 - &rxq->cr_tail_dma, 748 - GFP_KERNEL); 751 + rxq->cr_tail = dma_alloc_coherent(dev, sizeof(__le16), 752 + &rxq->cr_tail_dma, GFP_KERNEL); 749 753 if (!rxq->cr_tail) 750 754 goto err; 751 755 /* ··· 1939 1947 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1940 1948 1941 1949 trans_pcie->ict_tbl = 1942 - dma_zalloc_coherent(trans->dev, ICT_SIZE, 1943 - &trans_pcie->ict_tbl_dma, 1944 - GFP_KERNEL); 1950 + dma_alloc_coherent(trans->dev, ICT_SIZE, 1951 + &trans_pcie->ict_tbl_dma, GFP_KERNEL); 1945 1952 if (!trans_pcie->ict_tbl) 1946 1953 return -ENOMEM; 1947 1954
+3 -3
drivers/net/wireless/ralink/rt2x00/rt2x00mmio.c
··· 119 119 /* 120 120 * Allocate DMA memory for descriptor and buffer. 121 121 */ 122 - addr = dma_zalloc_coherent(rt2x00dev->dev, 123 - queue->limit * queue->desc_size, &dma, 124 - GFP_KERNEL); 122 + addr = dma_alloc_coherent(rt2x00dev->dev, 123 + queue->limit * queue->desc_size, &dma, 124 + GFP_KERNEL); 125 125 if (!addr) 126 126 return -ENOMEM; 127 127
+4 -4
drivers/ntb/hw/mscc/ntb_hw_switchtec.c
··· 1339 1339 int rc; 1340 1340 1341 1341 sndev->nr_rsvd_luts++; 1342 - sndev->self_shared = dma_zalloc_coherent(&sndev->stdev->pdev->dev, 1343 - LUT_SIZE, 1344 - &sndev->self_shared_dma, 1345 - GFP_KERNEL); 1342 + sndev->self_shared = dma_alloc_coherent(&sndev->stdev->pdev->dev, 1343 + LUT_SIZE, 1344 + &sndev->self_shared_dma, 1345 + GFP_KERNEL); 1346 1346 if (!sndev->self_shared) { 1347 1347 dev_err(&sndev->stdev->dev, 1348 1348 "unable to allocate memory for shared mw\n");
+2 -2
drivers/nvdimm/nd-core.h
··· 54 54 }; 55 55 56 56 static inline enum nvdimm_security_state nvdimm_security_state( 57 - struct nvdimm *nvdimm, bool master) 57 + struct nvdimm *nvdimm, enum nvdimm_passphrase_type ptype) 58 58 { 59 59 if (!nvdimm->sec.ops) 60 60 return -ENXIO; 61 61 62 - return nvdimm->sec.ops->state(nvdimm, master); 62 + return nvdimm->sec.ops->state(nvdimm, ptype); 63 63 } 64 64 int nvdimm_security_freeze(struct nvdimm *nvdimm); 65 65 #if IS_ENABLED(CONFIG_NVDIMM_KEYS)
+10 -9
drivers/nvme/host/core.c
··· 2173 2173 size_t nqnlen; 2174 2174 int off; 2175 2175 2176 - nqnlen = strnlen(id->subnqn, NVMF_NQN_SIZE); 2177 - if (nqnlen > 0 && nqnlen < NVMF_NQN_SIZE) { 2178 - strlcpy(subsys->subnqn, id->subnqn, NVMF_NQN_SIZE); 2179 - return; 2180 - } 2176 + if(!(ctrl->quirks & NVME_QUIRK_IGNORE_DEV_SUBNQN)) { 2177 + nqnlen = strnlen(id->subnqn, NVMF_NQN_SIZE); 2178 + if (nqnlen > 0 && nqnlen < NVMF_NQN_SIZE) { 2179 + strlcpy(subsys->subnqn, id->subnqn, NVMF_NQN_SIZE); 2180 + return; 2181 + } 2181 2182 2182 - if (ctrl->vs >= NVME_VS(1, 2, 1)) 2183 - dev_warn(ctrl->device, "missing or invalid SUBNQN field.\n"); 2183 + if (ctrl->vs >= NVME_VS(1, 2, 1)) 2184 + dev_warn(ctrl->device, "missing or invalid SUBNQN field.\n"); 2185 + } 2184 2186 2185 2187 /* Generate a "fake" NQN per Figure 254 in NVMe 1.3 + ECN 001 */ 2186 2188 off = snprintf(subsys->subnqn, NVMF_NQN_SIZE, 2187 - "nqn.2014.08.org.nvmexpress:%4x%4x", 2189 + "nqn.2014.08.org.nvmexpress:%04x%04x", 2188 2190 le16_to_cpu(id->vid), le16_to_cpu(id->ssvid)); 2189 2191 memcpy(subsys->subnqn + off, id->sn, sizeof(id->sn)); 2190 2192 off += sizeof(id->sn); ··· 2502 2500 ctrl->oaes = le32_to_cpu(id->oaes); 2503 2501 atomic_set(&ctrl->abort_limit, id->acl + 1); 2504 2502 ctrl->vwc = id->vwc; 2505 - ctrl->cntlid = le16_to_cpup(&id->cntlid); 2506 2503 if (id->mdts) 2507 2504 max_hw_sectors = 1 << (id->mdts + page_shift - 9); 2508 2505 else
+2
drivers/nvme/host/fabrics.c
··· 874 874 if (opts->discovery_nqn) { 875 875 opts->kato = 0; 876 876 opts->nr_io_queues = 0; 877 + opts->nr_write_queues = 0; 878 + opts->nr_poll_queues = 0; 877 879 opts->duplicate_connect = true; 878 880 } 879 881 if (ctrl_loss_tmo < 0)
+2
drivers/nvme/host/multipath.c
··· 570 570 return 0; 571 571 out_free_ana_log_buf: 572 572 kfree(ctrl->ana_log_buf); 573 + ctrl->ana_log_buf = NULL; 573 574 out: 574 575 return error; 575 576 } ··· 578 577 void nvme_mpath_uninit(struct nvme_ctrl *ctrl) 579 578 { 580 579 kfree(ctrl->ana_log_buf); 580 + ctrl->ana_log_buf = NULL; 581 581 } 582 582
+5
drivers/nvme/host/nvme.h
··· 90 90 * Set MEDIUM priority on SQ creation 91 91 */ 92 92 NVME_QUIRK_MEDIUM_PRIO_SQ = (1 << 7), 93 + 94 + /* 95 + * Ignore device provided subnqn. 96 + */ 97 + NVME_QUIRK_IGNORE_DEV_SUBNQN = (1 << 8), 93 98 }; 94 99 95 100 /*
+64 -32
drivers/nvme/host/pci.c
··· 95 95 struct nvme_queue; 96 96 97 97 static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown); 98 + static bool __nvme_disable_io_queues(struct nvme_dev *dev, u8 opcode); 98 99 99 100 /* 100 101 * Represents an NVM Express device. Each nvme_dev is a PCI function. ··· 1020 1019 1021 1020 static inline void nvme_update_cq_head(struct nvme_queue *nvmeq) 1022 1021 { 1023 - if (++nvmeq->cq_head == nvmeq->q_depth) { 1022 + if (nvmeq->cq_head == nvmeq->q_depth - 1) { 1024 1023 nvmeq->cq_head = 0; 1025 1024 nvmeq->cq_phase = !nvmeq->cq_phase; 1025 + } else { 1026 + nvmeq->cq_head++; 1026 1027 } 1027 1028 } 1028 1029 ··· 1423 1420 return 0; 1424 1421 } 1425 1422 1423 + static void nvme_suspend_io_queues(struct nvme_dev *dev) 1424 + { 1425 + int i; 1426 + 1427 + for (i = dev->ctrl.queue_count - 1; i > 0; i--) 1428 + nvme_suspend_queue(&dev->queues[i]); 1429 + } 1430 + 1426 1431 static void nvme_disable_admin_queue(struct nvme_dev *dev, bool shutdown) 1427 1432 { 1428 1433 struct nvme_queue *nvmeq = &dev->queues[0]; ··· 1496 1485 if (dev->ctrl.queue_count > qid) 1497 1486 return 0; 1498 1487 1499 - nvmeq->cqes = dma_zalloc_coherent(dev->dev, CQ_SIZE(depth), 1500 - &nvmeq->cq_dma_addr, GFP_KERNEL); 1488 + nvmeq->cqes = dma_alloc_coherent(dev->dev, CQ_SIZE(depth), 1489 + &nvmeq->cq_dma_addr, GFP_KERNEL); 1501 1490 if (!nvmeq->cqes) 1502 1491 goto free_nvmeq; 1503 1492 ··· 1896 1885 struct nvme_host_mem_buf_desc *desc = &dev->host_mem_descs[i]; 1897 1886 size_t size = le32_to_cpu(desc->size) * dev->ctrl.page_size; 1898 1887 1899 - dma_free_coherent(dev->dev, size, dev->host_mem_desc_bufs[i], 1900 - le64_to_cpu(desc->addr)); 1888 + dma_free_attrs(dev->dev, size, dev->host_mem_desc_bufs[i], 1889 + le64_to_cpu(desc->addr), 1890 + DMA_ATTR_NO_KERNEL_MAPPING | DMA_ATTR_NO_WARN); 1901 1891 } 1902 1892 1903 1893 kfree(dev->host_mem_desc_bufs); ··· 1927 1915 if (dev->ctrl.hmmaxd && dev->ctrl.hmmaxd < max_entries) 1928 1916 max_entries = dev->ctrl.hmmaxd; 1929 1917 1930 - descs = dma_zalloc_coherent(dev->dev, max_entries * sizeof(*descs), 1931 - &descs_dma, GFP_KERNEL); 1918 + descs = dma_alloc_coherent(dev->dev, max_entries * sizeof(*descs), 1919 + &descs_dma, GFP_KERNEL); 1932 1920 if (!descs) 1933 1921 goto out; 1934 1922 ··· 1964 1952 while (--i >= 0) { 1965 1953 size_t size = le32_to_cpu(descs[i].size) * dev->ctrl.page_size; 1966 1954 1967 - dma_free_coherent(dev->dev, size, bufs[i], 1968 - le64_to_cpu(descs[i].addr)); 1955 + dma_free_attrs(dev->dev, size, bufs[i], 1956 + le64_to_cpu(descs[i].addr), 1957 + DMA_ATTR_NO_KERNEL_MAPPING | DMA_ATTR_NO_WARN); 1969 1958 } 1970 1959 1971 1960 kfree(bufs); ··· 2041 2028 return ret; 2042 2029 } 2043 2030 2031 + /* irq_queues covers admin queue */ 2044 2032 static void nvme_calc_io_queues(struct nvme_dev *dev, unsigned int irq_queues) 2045 2033 { 2046 2034 unsigned int this_w_queues = write_queues; 2047 2035 2036 + WARN_ON(!irq_queues); 2037 + 2048 2038 /* 2049 - * Setup read/write queue split 2039 + * Setup read/write queue split, assign admin queue one independent 2040 + * irq vector if irq_queues is > 1. 2050 2041 */ 2051 - if (irq_queues == 1) { 2042 + if (irq_queues <= 2) { 2052 2043 dev->io_queues[HCTX_TYPE_DEFAULT] = 1; 2053 2044 dev->io_queues[HCTX_TYPE_READ] = 0; 2054 2045 return; ··· 2060 2043 2061 2044 /* 2062 2045 * If 'write_queues' is set, ensure it leaves room for at least 2063 - * one read queue 2046 + * one read queue and one admin queue 2064 2047 */ 2065 2048 if (this_w_queues >= irq_queues) 2066 - this_w_queues = irq_queues - 1; 2049 + this_w_queues = irq_queues - 2; 2067 2050 2068 2051 /* 2069 2052 * If 'write_queues' is set to zero, reads and writes will share 2070 2053 * a queue set. 2071 2054 */ 2072 2055 if (!this_w_queues) { 2073 - dev->io_queues[HCTX_TYPE_DEFAULT] = irq_queues; 2056 + dev->io_queues[HCTX_TYPE_DEFAULT] = irq_queues - 1; 2074 2057 dev->io_queues[HCTX_TYPE_READ] = 0; 2075 2058 } else { 2076 2059 dev->io_queues[HCTX_TYPE_DEFAULT] = this_w_queues; 2077 - dev->io_queues[HCTX_TYPE_READ] = irq_queues - this_w_queues; 2060 + dev->io_queues[HCTX_TYPE_READ] = irq_queues - this_w_queues - 1; 2078 2061 } 2079 2062 } 2080 2063 ··· 2099 2082 this_p_queues = nr_io_queues - 1; 2100 2083 irq_queues = 1; 2101 2084 } else { 2102 - irq_queues = nr_io_queues - this_p_queues; 2085 + irq_queues = nr_io_queues - this_p_queues + 1; 2103 2086 } 2104 2087 dev->io_queues[HCTX_TYPE_POLL] = this_p_queues; 2105 2088 ··· 2119 2102 * If we got a failure and we're down to asking for just 2120 2103 * 1 + 1 queues, just ask for a single vector. We'll share 2121 2104 * that between the single IO queue and the admin queue. 2105 + * Otherwise, we assign one independent vector to admin queue. 2122 2106 */ 2123 - if (result >= 0 && irq_queues > 1) 2107 + if (irq_queues > 1) 2124 2108 irq_queues = irq_sets[0] + irq_sets[1] + 1; 2125 2109 2126 2110 result = pci_alloc_irq_vectors_affinity(pdev, irq_queues, ··· 2148 2130 } while (1); 2149 2131 2150 2132 return result; 2133 + } 2134 + 2135 + static void nvme_disable_io_queues(struct nvme_dev *dev) 2136 + { 2137 + if (__nvme_disable_io_queues(dev, nvme_admin_delete_sq)) 2138 + __nvme_disable_io_queues(dev, nvme_admin_delete_cq); 2151 2139 } 2152 2140 2153 2141 static int nvme_setup_io_queues(struct nvme_dev *dev) ··· 2192 2168 } while (1); 2193 2169 adminq->q_db = dev->dbs; 2194 2170 2171 + retry: 2195 2172 /* Deregister the admin queue's interrupt */ 2196 2173 pci_free_irq(pdev, 0, adminq); 2197 2174 ··· 2210 2185 result = max(result - 1, 1); 2211 2186 dev->max_qid = result + dev->io_queues[HCTX_TYPE_POLL]; 2212 2187 2213 - dev_info(dev->ctrl.device, "%d/%d/%d default/read/poll queues\n", 2214 - dev->io_queues[HCTX_TYPE_DEFAULT], 2215 - dev->io_queues[HCTX_TYPE_READ], 2216 - dev->io_queues[HCTX_TYPE_POLL]); 2217 - 2218 2188 /* 2219 2189 * Should investigate if there's a performance win from allocating 2220 2190 * more queues than interrupt vectors; it might allow the submission 2221 2191 * path to scale better, even if the receive path is limited by the 2222 2192 * number of interrupts. 2223 2193 */ 2224 - 2225 2194 result = queue_request_irq(adminq); 2226 2195 if (result) { 2227 2196 adminq->cq_vector = -1; 2228 2197 return result; 2229 2198 } 2230 2199 set_bit(NVMEQ_ENABLED, &adminq->flags); 2231 - return nvme_create_io_queues(dev); 2200 + 2201 + result = nvme_create_io_queues(dev); 2202 + if (result || dev->online_queues < 2) 2203 + return result; 2204 + 2205 + if (dev->online_queues - 1 < dev->max_qid) { 2206 + nr_io_queues = dev->online_queues - 1; 2207 + nvme_disable_io_queues(dev); 2208 + nvme_suspend_io_queues(dev); 2209 + goto retry; 2210 + } 2211 + dev_info(dev->ctrl.device, "%d/%d/%d default/read/poll queues\n", 2212 + dev->io_queues[HCTX_TYPE_DEFAULT], 2213 + dev->io_queues[HCTX_TYPE_READ], 2214 + dev->io_queues[HCTX_TYPE_POLL]); 2215 + return 0; 2232 2216 } 2233 2217 2234 2218 static void nvme_del_queue_end(struct request *req, blk_status_t error) ··· 2282 2248 return 0; 2283 2249 } 2284 2250 2285 - static bool nvme_disable_io_queues(struct nvme_dev *dev, u8 opcode) 2251 + static bool __nvme_disable_io_queues(struct nvme_dev *dev, u8 opcode) 2286 2252 { 2287 2253 int nr_queues = dev->online_queues - 1, sent = 0; 2288 2254 unsigned long timeout; ··· 2328 2294 dev->tagset.nr_maps = 2; /* default + read */ 2329 2295 if (dev->io_queues[HCTX_TYPE_POLL]) 2330 2296 dev->tagset.nr_maps++; 2331 - dev->tagset.nr_maps = HCTX_MAX_TYPES; 2332 2297 dev->tagset.timeout = NVME_IO_TIMEOUT; 2333 2298 dev->tagset.numa_node = dev_to_node(dev->dev); 2334 2299 dev->tagset.queue_depth = ··· 2443 2410 2444 2411 static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown) 2445 2412 { 2446 - int i; 2447 2413 bool dead = true; 2448 2414 struct pci_dev *pdev = to_pci_dev(dev->dev); 2449 2415 ··· 2469 2437 nvme_stop_queues(&dev->ctrl); 2470 2438 2471 2439 if (!dead && dev->ctrl.queue_count > 0) { 2472 - if (nvme_disable_io_queues(dev, nvme_admin_delete_sq)) 2473 - nvme_disable_io_queues(dev, nvme_admin_delete_cq); 2440 + nvme_disable_io_queues(dev); 2474 2441 nvme_disable_admin_queue(dev, shutdown); 2475 2442 } 2476 - for (i = dev->ctrl.queue_count - 1; i >= 0; i--) 2477 - nvme_suspend_queue(&dev->queues[i]); 2478 - 2443 + nvme_suspend_io_queues(dev); 2444 + nvme_suspend_queue(&dev->queues[0]); 2479 2445 nvme_pci_disable(dev); 2480 2446 2481 2447 blk_mq_tagset_busy_iter(&dev->tagset, nvme_cancel_request, &dev->ctrl); ··· 2976 2946 { PCI_VDEVICE(INTEL, 0xf1a5), /* Intel 600P/P3100 */ 2977 2947 .driver_data = NVME_QUIRK_NO_DEEPEST_PS | 2978 2948 NVME_QUIRK_MEDIUM_PRIO_SQ }, 2949 + { PCI_VDEVICE(INTEL, 0xf1a6), /* Intel 760p/Pro 7600p */ 2950 + .driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN, }, 2979 2951 { PCI_VDEVICE(INTEL, 0x5845), /* Qemu emulated controller */ 2980 2952 .driver_data = NVME_QUIRK_IDENTIFY_CNS, }, 2981 2953 { PCI_DEVICE(0x1bb1, 0x0100), /* Seagate Nytro Flash Storage */
+6 -10
drivers/nvme/host/tcp.c
··· 1565 1565 { 1566 1566 nvme_tcp_stop_io_queues(ctrl); 1567 1567 if (remove) { 1568 - if (ctrl->ops->flags & NVME_F_FABRICS) 1569 - blk_cleanup_queue(ctrl->connect_q); 1568 + blk_cleanup_queue(ctrl->connect_q); 1570 1569 blk_mq_free_tag_set(ctrl->tagset); 1571 1570 } 1572 1571 nvme_tcp_free_io_queues(ctrl); ··· 1586 1587 goto out_free_io_queues; 1587 1588 } 1588 1589 1589 - if (ctrl->ops->flags & NVME_F_FABRICS) { 1590 - ctrl->connect_q = blk_mq_init_queue(ctrl->tagset); 1591 - if (IS_ERR(ctrl->connect_q)) { 1592 - ret = PTR_ERR(ctrl->connect_q); 1593 - goto out_free_tag_set; 1594 - } 1590 + ctrl->connect_q = blk_mq_init_queue(ctrl->tagset); 1591 + if (IS_ERR(ctrl->connect_q)) { 1592 + ret = PTR_ERR(ctrl->connect_q); 1593 + goto out_free_tag_set; 1595 1594 } 1596 1595 } else { 1597 1596 blk_mq_update_nr_hw_queues(ctrl->tagset, ··· 1603 1606 return 0; 1604 1607 1605 1608 out_cleanup_connect_q: 1606 - if (new && (ctrl->ops->flags & NVME_F_FABRICS)) 1609 + if (new) 1607 1610 blk_cleanup_queue(ctrl->connect_q); 1608 1611 out_free_tag_set: 1609 1612 if (new) ··· 1617 1620 { 1618 1621 nvme_tcp_stop_queue(ctrl, 0); 1619 1622 if (remove) { 1620 - free_opal_dev(ctrl->opal_dev); 1621 1623 blk_cleanup_queue(ctrl->admin_q); 1622 1624 blk_mq_free_tag_set(ctrl->admin_tagset); 1623 1625 }
+1 -1
drivers/nvme/target/tcp.c
··· 1089 1089 1090 1090 static int nvmet_tcp_try_recv_one(struct nvmet_tcp_queue *queue) 1091 1091 { 1092 - int result; 1092 + int result = 0; 1093 1093 1094 1094 if (unlikely(queue->rcv_state == NVMET_TCP_RECV_ERR)) 1095 1095 return 0;
-3
drivers/of/dynamic.c
··· 207 207 208 208 if (!of_node_check_flag(np, OF_OVERLAY)) { 209 209 np->name = __of_get_property(np, "name", NULL); 210 - np->type = __of_get_property(np, "device_type", NULL); 211 210 if (!np->name) 212 211 np->name = "<NULL>"; 213 - if (!np->type) 214 - np->type = "<NULL>"; 215 212 216 213 phandle = __of_get_property(np, "phandle", &sz); 217 214 if (!phandle)
-4
drivers/of/fdt.c
··· 314 314 populate_properties(blob, offset, mem, np, pathp, dryrun); 315 315 if (!dryrun) { 316 316 np->name = of_get_property(np, "name", NULL); 317 - np->type = of_get_property(np, "device_type", NULL); 318 - 319 317 if (!np->name) 320 318 np->name = "<NULL>"; 321 - if (!np->type) 322 - np->type = "<NULL>"; 323 319 } 324 320 325 321 *pnp = np;
-3
drivers/of/overlay.c
··· 423 423 424 424 tchild->parent = target->np; 425 425 tchild->name = __of_get_property(node, "name", NULL); 426 - tchild->type = __of_get_property(node, "device_type", NULL); 427 426 428 427 if (!tchild->name) 429 428 tchild->name = "<NULL>"; 430 - if (!tchild->type) 431 - tchild->type = "<NULL>"; 432 429 433 430 /* ignore obsolete "linux,phandle" */ 434 431 phandle = __of_get_property(node, "phandle", &size);
-1
drivers/of/pdt.c
··· 155 155 dp->parent = parent; 156 156 157 157 dp->name = of_pdt_get_one_property(node, "name"); 158 - dp->type = of_pdt_get_one_property(node, "device_type"); 159 158 dp->phandle = node; 160 159 161 160 dp->properties = of_pdt_build_prop_list(node);
+1
drivers/of/property.c
··· 806 806 807 807 if (!of_device_is_available(remote)) { 808 808 pr_debug("not available for remote node\n"); 809 + of_node_put(remote); 809 810 return NULL; 810 811 } 811 812
+58 -5
drivers/opp/core.c
··· 988 988 kfree(opp); 989 989 } 990 990 991 - static void _opp_kref_release(struct kref *kref) 991 + static void _opp_kref_release(struct dev_pm_opp *opp, 992 + struct opp_table *opp_table) 992 993 { 993 - struct dev_pm_opp *opp = container_of(kref, struct dev_pm_opp, kref); 994 - struct opp_table *opp_table = opp->opp_table; 995 - 996 994 /* 997 995 * Notify the changes in the availability of the operable 998 996 * frequency/voltage list. ··· 1000 1002 opp_debug_remove_one(opp); 1001 1003 list_del(&opp->node); 1002 1004 kfree(opp); 1005 + } 1003 1006 1007 + static void _opp_kref_release_unlocked(struct kref *kref) 1008 + { 1009 + struct dev_pm_opp *opp = container_of(kref, struct dev_pm_opp, kref); 1010 + struct opp_table *opp_table = opp->opp_table; 1011 + 1012 + _opp_kref_release(opp, opp_table); 1013 + } 1014 + 1015 + static void _opp_kref_release_locked(struct kref *kref) 1016 + { 1017 + struct dev_pm_opp *opp = container_of(kref, struct dev_pm_opp, kref); 1018 + struct opp_table *opp_table = opp->opp_table; 1019 + 1020 + _opp_kref_release(opp, opp_table); 1004 1021 mutex_unlock(&opp_table->lock); 1005 1022 } 1006 1023 ··· 1026 1013 1027 1014 void dev_pm_opp_put(struct dev_pm_opp *opp) 1028 1015 { 1029 - kref_put_mutex(&opp->kref, _opp_kref_release, &opp->opp_table->lock); 1016 + kref_put_mutex(&opp->kref, _opp_kref_release_locked, 1017 + &opp->opp_table->lock); 1030 1018 } 1031 1019 EXPORT_SYMBOL_GPL(dev_pm_opp_put); 1020 + 1021 + static void dev_pm_opp_put_unlocked(struct dev_pm_opp *opp) 1022 + { 1023 + kref_put(&opp->kref, _opp_kref_release_unlocked); 1024 + } 1032 1025 1033 1026 /** 1034 1027 * dev_pm_opp_remove() - Remove an OPP from OPP table ··· 1078 1059 dev_pm_opp_put_opp_table(opp_table); 1079 1060 } 1080 1061 EXPORT_SYMBOL_GPL(dev_pm_opp_remove); 1062 + 1063 + /** 1064 + * dev_pm_opp_remove_all_dynamic() - Remove all dynamically created OPPs 1065 + * @dev: device for which we do this operation 1066 + * 1067 + * This function removes all dynamically created OPPs from the opp table. 1068 + */ 1069 + void dev_pm_opp_remove_all_dynamic(struct device *dev) 1070 + { 1071 + struct opp_table *opp_table; 1072 + struct dev_pm_opp *opp, *temp; 1073 + int count = 0; 1074 + 1075 + opp_table = _find_opp_table(dev); 1076 + if (IS_ERR(opp_table)) 1077 + return; 1078 + 1079 + mutex_lock(&opp_table->lock); 1080 + list_for_each_entry_safe(opp, temp, &opp_table->opp_list, node) { 1081 + if (opp->dynamic) { 1082 + dev_pm_opp_put_unlocked(opp); 1083 + count++; 1084 + } 1085 + } 1086 + mutex_unlock(&opp_table->lock); 1087 + 1088 + /* Drop the references taken by dev_pm_opp_add() */ 1089 + while (count--) 1090 + dev_pm_opp_put_opp_table(opp_table); 1091 + 1092 + /* Drop the reference taken by _find_opp_table() */ 1093 + dev_pm_opp_put_opp_table(opp_table); 1094 + } 1095 + EXPORT_SYMBOL_GPL(dev_pm_opp_remove_all_dynamic); 1081 1096 1082 1097 struct dev_pm_opp *_opp_allocate(struct opp_table *table) 1083 1098 {
+8 -14
drivers/pci/Kconfig
··· 21 21 support for PCI-X and the foundations for PCI Express support. 22 22 Say 'Y' here unless you know what you are doing. 23 23 24 + if PCI 25 + 24 26 config PCI_DOMAINS 25 27 bool 26 28 depends on PCI 27 29 28 30 config PCI_DOMAINS_GENERIC 29 31 bool 30 - depends on PCI 31 32 select PCI_DOMAINS 32 33 33 34 config PCI_SYSCALL ··· 38 37 39 38 config PCI_MSI 40 39 bool "Message Signaled Interrupts (MSI and MSI-X)" 41 - depends on PCI 42 40 select GENERIC_MSI_IRQ 43 41 help 44 42 This allows device drivers to enable MSI (Message Signaled ··· 59 59 config PCI_QUIRKS 60 60 default y 61 61 bool "Enable PCI quirk workarounds" if EXPERT 62 - depends on PCI 63 62 help 64 63 This enables workarounds for various PCI chipset bugs/quirks. 65 64 Disable this only if your target machine is unaffected by PCI ··· 66 67 67 68 config PCI_DEBUG 68 69 bool "PCI Debugging" 69 - depends on PCI && DEBUG_KERNEL 70 + depends on DEBUG_KERNEL 70 71 help 71 72 Say Y here if you want the PCI core to produce a bunch of debug 72 73 messages to the system log. Select this if you are having a ··· 76 77 77 78 config PCI_REALLOC_ENABLE_AUTO 78 79 bool "Enable PCI resource re-allocation detection" 79 - depends on PCI 80 80 depends on PCI_IOV 81 81 help 82 82 Say Y here if you want the PCI core to detect if PCI resource ··· 88 90 89 91 config PCI_STUB 90 92 tristate "PCI Stub driver" 91 - depends on PCI 92 93 help 93 94 Say Y or M here if you want be able to reserve a PCI device 94 95 when it is going to be assigned to a guest operating system. ··· 96 99 97 100 config PCI_PF_STUB 98 101 tristate "PCI PF Stub driver" 99 - depends on PCI 100 102 depends on PCI_IOV 101 103 help 102 104 Say Y or M here if you want to enable support for devices that ··· 107 111 108 112 config XEN_PCIDEV_FRONTEND 109 113 tristate "Xen PCI Frontend" 110 - depends on PCI && X86 && XEN 114 + depends on X86 && XEN 111 115 select PCI_XEN 112 116 select XEN_XENBUS_FRONTEND 113 117 default y ··· 129 133 130 134 config PCI_IOV 131 135 bool "PCI IOV support" 132 - depends on PCI 133 136 select PCI_ATS 134 137 help 135 138 I/O Virtualization is a PCI feature supported by some devices ··· 139 144 140 145 config PCI_PRI 141 146 bool "PCI PRI support" 142 - depends on PCI 143 147 select PCI_ATS 144 148 help 145 149 PRI is the PCI Page Request Interface. It allows PCI devices that are ··· 148 154 149 155 config PCI_PASID 150 156 bool "PCI PASID support" 151 - depends on PCI 152 157 select PCI_ATS 153 158 help 154 159 Process Address Space Identifiers (PASIDs) can be used by PCI devices ··· 160 167 161 168 config PCI_P2PDMA 162 169 bool "PCI peer-to-peer transfer support" 163 - depends on PCI && ZONE_DEVICE 170 + depends on ZONE_DEVICE 164 171 select GENERIC_ALLOCATOR 165 172 help 166 173 Enableѕ drivers to do PCI peer-to-peer transactions to and from ··· 177 184 178 185 config PCI_LABEL 179 186 def_bool y if (DMI || ACPI) 180 - depends on PCI 181 187 select NLS 182 188 183 189 config PCI_HYPERV 184 190 tristate "Hyper-V PCI Frontend" 185 - depends on PCI && X86 && HYPERV && PCI_MSI && PCI_MSI_IRQ_DOMAIN && X86_64 191 + depends on X86 && HYPERV && PCI_MSI && PCI_MSI_IRQ_DOMAIN && X86_64 186 192 help 187 193 The PCI device frontend driver allows the kernel to import arbitrary 188 194 PCI devices from a PCI backend to support PCI driver domains. ··· 190 198 source "drivers/pci/controller/Kconfig" 191 199 source "drivers/pci/endpoint/Kconfig" 192 200 source "drivers/pci/switch/Kconfig" 201 + 202 + endif
+1
drivers/pci/controller/dwc/pci-meson.c
··· 8 8 9 9 #include <linux/clk.h> 10 10 #include <linux/delay.h> 11 + #include <linux/gpio/consumer.h> 11 12 #include <linux/of_device.h> 12 13 #include <linux/of_gpio.h> 13 14 #include <linux/pci.h>
+3 -3
drivers/pci/controller/pcie-iproc-msi.c
··· 602 602 } 603 603 604 604 /* Reserve memory for event queue and make sure memories are zeroed */ 605 - msi->eq_cpu = dma_zalloc_coherent(pcie->dev, 606 - msi->nr_eq_region * EQ_MEM_REGION_SIZE, 607 - &msi->eq_dma, GFP_KERNEL); 605 + msi->eq_cpu = dma_alloc_coherent(pcie->dev, 606 + msi->nr_eq_region * EQ_MEM_REGION_SIZE, 607 + &msi->eq_dma, GFP_KERNEL); 608 608 if (!msi->eq_cpu) { 609 609 ret = -ENOMEM; 610 610 goto free_irqs;
+13 -9
drivers/pci/msi.c
··· 1168 1168 const struct irq_affinity *affd) 1169 1169 { 1170 1170 static const struct irq_affinity msi_default_affd; 1171 - int vecs = -ENOSPC; 1171 + int msix_vecs = -ENOSPC; 1172 + int msi_vecs = -ENOSPC; 1172 1173 1173 1174 if (flags & PCI_IRQ_AFFINITY) { 1174 1175 if (!affd) ··· 1180 1179 } 1181 1180 1182 1181 if (flags & PCI_IRQ_MSIX) { 1183 - vecs = __pci_enable_msix_range(dev, NULL, min_vecs, max_vecs, 1184 - affd); 1185 - if (vecs > 0) 1186 - return vecs; 1182 + msix_vecs = __pci_enable_msix_range(dev, NULL, min_vecs, 1183 + max_vecs, affd); 1184 + if (msix_vecs > 0) 1185 + return msix_vecs; 1187 1186 } 1188 1187 1189 1188 if (flags & PCI_IRQ_MSI) { 1190 - vecs = __pci_enable_msi_range(dev, min_vecs, max_vecs, affd); 1191 - if (vecs > 0) 1192 - return vecs; 1189 + msi_vecs = __pci_enable_msi_range(dev, min_vecs, max_vecs, 1190 + affd); 1191 + if (msi_vecs > 0) 1192 + return msi_vecs; 1193 1193 } 1194 1194 1195 1195 /* use legacy irq if allowed */ ··· 1201 1199 } 1202 1200 } 1203 1201 1204 - return vecs; 1202 + if (msix_vecs == -ENOSPC) 1203 + return -ENOSPC; 1204 + return msi_vecs; 1205 1205 } 1206 1206 EXPORT_SYMBOL(pci_alloc_irq_vectors_affinity); 1207 1207
+2 -1
drivers/pci/pci.c
··· 6195 6195 } else if (!strncmp(str, "pcie_scan_all", 13)) { 6196 6196 pci_add_flags(PCI_SCAN_ALL_PCIE_DEVS); 6197 6197 } else if (!strncmp(str, "disable_acs_redir=", 18)) { 6198 - disable_acs_redir_param = str + 18; 6198 + disable_acs_redir_param = 6199 + kstrdup(str + 18, GFP_KERNEL); 6199 6200 } else { 6200 6201 printk(KERN_ERR "PCI: Unknown option `%s'\n", 6201 6202 str);
+4 -4
drivers/pci/switch/switchtec.c
··· 1373 1373 if (ioread32(&stdev->mmio_mrpc->dma_ver) == 0) 1374 1374 return 0; 1375 1375 1376 - stdev->dma_mrpc = dma_zalloc_coherent(&stdev->pdev->dev, 1377 - sizeof(*stdev->dma_mrpc), 1378 - &stdev->dma_mrpc_dma_addr, 1379 - GFP_KERNEL); 1376 + stdev->dma_mrpc = dma_alloc_coherent(&stdev->pdev->dev, 1377 + sizeof(*stdev->dma_mrpc), 1378 + &stdev->dma_mrpc_dma_addr, 1379 + GFP_KERNEL); 1380 1380 if (stdev->dma_mrpc == NULL) 1381 1381 return -ENOMEM; 1382 1382
+3 -2
drivers/phy/marvell/phy-berlin-sata.c
··· 32 32 33 33 /* register 0x01 */ 34 34 #define REF_FREF_SEL_25 BIT(0) 35 - #define PHY_MODE_SATA (0x0 << 5) 35 + #define PHY_BERLIN_MODE_SATA (0x0 << 5) 36 36 37 37 /* register 0x02 */ 38 38 #define USE_MAX_PLL_RATE BIT(12) ··· 102 102 103 103 /* set PHY mode and ref freq to 25 MHz */ 104 104 phy_berlin_sata_reg_setbits(ctrl_reg, priv->phy_base, 0x01, 105 - 0x00ff, REF_FREF_SEL_25 | PHY_MODE_SATA); 105 + 0x00ff, 106 + REF_FREF_SEL_25 | PHY_BERLIN_MODE_SATA); 106 107 107 108 /* set PHY up to 6 Gbps */ 108 109 phy_berlin_sata_reg_setbits(ctrl_reg, priv->phy_base, 0x25,
+1
drivers/phy/ti/Kconfig
··· 82 82 default y if TI_CPSW=y 83 83 depends on TI_CPSW || COMPILE_TEST 84 84 select GENERIC_PHY 85 + select REGMAP 85 86 default m 86 87 help 87 88 This driver supports configuring of the TI CPSW Port mode depending on
+44 -3
drivers/platform/chrome/Kconfig
··· 49 49 To compile this driver as a module, choose M here: the 50 50 module will be called chromeos_tbmc. 51 51 52 - config CROS_EC_CTL 53 - tristate 54 - 55 52 config CROS_EC_I2C 56 53 tristate "ChromeOS Embedded Controller (I2C)" 57 54 depends on MFD_CROS_EC && I2C ··· 107 110 108 111 To compile this driver as a module, choose M here: the 109 112 module will be called cros_kbd_led_backlight. 113 + 114 + config CROS_EC_LIGHTBAR 115 + tristate "Chromebook Pixel's lightbar support" 116 + depends on MFD_CROS_EC_CHARDEV 117 + default MFD_CROS_EC_CHARDEV 118 + help 119 + This option exposes the Chromebook Pixel's lightbar to 120 + userspace. 121 + 122 + To compile this driver as a module, choose M here: the 123 + module will be called cros_ec_lightbar. 124 + 125 + config CROS_EC_VBC 126 + tristate "ChromeOS EC vboot context support" 127 + depends on MFD_CROS_EC_CHARDEV && OF 128 + default MFD_CROS_EC_CHARDEV 129 + help 130 + This option exposes the ChromeOS EC vboot context nvram to 131 + userspace. 132 + 133 + To compile this driver as a module, choose M here: the 134 + module will be called cros_ec_vbc. 135 + 136 + config CROS_EC_DEBUGFS 137 + tristate "Export ChromeOS EC internals in DebugFS" 138 + depends on MFD_CROS_EC_CHARDEV && DEBUG_FS 139 + default MFD_CROS_EC_CHARDEV 140 + help 141 + This option exposes the ChromeOS EC device internals to 142 + userspace. 143 + 144 + To compile this driver as a module, choose M here: the 145 + module will be called cros_ec_debugfs. 146 + 147 + config CROS_EC_SYSFS 148 + tristate "ChromeOS EC control and information through sysfs" 149 + depends on MFD_CROS_EC_CHARDEV && SYSFS 150 + default MFD_CROS_EC_CHARDEV 151 + help 152 + This option exposes some sysfs attributes to control and get 153 + information from ChromeOS EC. 154 + 155 + To compile this driver as a module, choose M here: the 156 + module will be called cros_ec_sysfs. 110 157 111 158 endif # CHROMEOS_PLATFORMS
+4 -3
drivers/platform/chrome/Makefile
··· 3 3 obj-$(CONFIG_CHROMEOS_LAPTOP) += chromeos_laptop.o 4 4 obj-$(CONFIG_CHROMEOS_PSTORE) += chromeos_pstore.o 5 5 obj-$(CONFIG_CHROMEOS_TBMC) += chromeos_tbmc.o 6 - cros_ec_ctl-objs := cros_ec_sysfs.o cros_ec_lightbar.o \ 7 - cros_ec_vbc.o cros_ec_debugfs.o 8 - obj-$(CONFIG_CROS_EC_CTL) += cros_ec_ctl.o 9 6 obj-$(CONFIG_CROS_EC_I2C) += cros_ec_i2c.o 10 7 obj-$(CONFIG_CROS_EC_SPI) += cros_ec_spi.o 11 8 cros_ec_lpcs-objs := cros_ec_lpc.o cros_ec_lpc_reg.o ··· 10 13 obj-$(CONFIG_CROS_EC_LPC) += cros_ec_lpcs.o 11 14 obj-$(CONFIG_CROS_EC_PROTO) += cros_ec_proto.o 12 15 obj-$(CONFIG_CROS_KBD_LED_BACKLIGHT) += cros_kbd_led_backlight.o 16 + obj-$(CONFIG_CROS_EC_LIGHTBAR) += cros_ec_lightbar.o 17 + obj-$(CONFIG_CROS_EC_VBC) += cros_ec_vbc.o 18 + obj-$(CONFIG_CROS_EC_DEBUGFS) += cros_ec_debugfs.o 19 + obj-$(CONFIG_CROS_EC_SYSFS) += cros_ec_sysfs.o
+44 -22
drivers/platform/chrome/cros_ec_debugfs.c
··· 23 23 #include <linux/fs.h> 24 24 #include <linux/mfd/cros_ec.h> 25 25 #include <linux/mfd/cros_ec_commands.h> 26 + #include <linux/module.h> 26 27 #include <linux/mutex.h> 28 + #include <linux/platform_device.h> 27 29 #include <linux/poll.h> 28 30 #include <linux/sched.h> 29 31 #include <linux/slab.h> 30 32 #include <linux/wait.h> 33 + 34 + #define DRV_NAME "cros-ec-debugfs" 31 35 32 36 #define LOG_SHIFT 14 33 37 #define LOG_SIZE (1 << LOG_SHIFT) ··· 427 423 return 0; 428 424 } 429 425 430 - int cros_ec_debugfs_init(struct cros_ec_dev *ec) 426 + static int cros_ec_debugfs_probe(struct platform_device *pd) 431 427 { 428 + struct cros_ec_dev *ec = dev_get_drvdata(pd->dev.parent); 432 429 struct cros_ec_platform *ec_platform = dev_get_platdata(ec->dev); 433 430 const char *name = ec_platform->ec_name; 434 431 struct cros_ec_debugfs *debug_info; ··· 458 453 459 454 ec->debug_info = debug_info; 460 455 456 + dev_set_drvdata(&pd->dev, ec); 457 + 461 458 return 0; 462 459 463 460 remove_debugfs: 464 461 debugfs_remove_recursive(debug_info->dir); 465 462 return ret; 466 463 } 467 - EXPORT_SYMBOL(cros_ec_debugfs_init); 468 464 469 - void cros_ec_debugfs_remove(struct cros_ec_dev *ec) 465 + static int cros_ec_debugfs_remove(struct platform_device *pd) 470 466 { 471 - if (!ec->debug_info) 472 - return; 467 + struct cros_ec_dev *ec = dev_get_drvdata(pd->dev.parent); 473 468 474 469 debugfs_remove_recursive(ec->debug_info->dir); 475 470 cros_ec_cleanup_console_log(ec->debug_info); 476 - } 477 - EXPORT_SYMBOL(cros_ec_debugfs_remove); 478 471 479 - void cros_ec_debugfs_suspend(struct cros_ec_dev *ec) 480 - { 481 - /* 482 - * cros_ec_debugfs_init() failures are non-fatal; it's also possible 483 - * that we initted things but decided that console log wasn't supported. 484 - * We'll use the same set of checks that cros_ec_debugfs_remove() + 485 - * cros_ec_cleanup_console_log() end up using to handle those cases. 486 - */ 487 - if (ec->debug_info && ec->debug_info->log_buffer.buf) 488 - cancel_delayed_work_sync(&ec->debug_info->log_poll_work); 472 + return 0; 489 473 } 490 - EXPORT_SYMBOL(cros_ec_debugfs_suspend); 491 474 492 - void cros_ec_debugfs_resume(struct cros_ec_dev *ec) 475 + static int __maybe_unused cros_ec_debugfs_suspend(struct device *dev) 493 476 { 494 - if (ec->debug_info && ec->debug_info->log_buffer.buf) 495 - schedule_delayed_work(&ec->debug_info->log_poll_work, 0); 477 + struct cros_ec_dev *ec = dev_get_drvdata(dev); 478 + 479 + cancel_delayed_work_sync(&ec->debug_info->log_poll_work); 480 + 481 + return 0; 496 482 } 497 - EXPORT_SYMBOL(cros_ec_debugfs_resume); 483 + 484 + static int __maybe_unused cros_ec_debugfs_resume(struct device *dev) 485 + { 486 + struct cros_ec_dev *ec = dev_get_drvdata(dev); 487 + 488 + schedule_delayed_work(&ec->debug_info->log_poll_work, 0); 489 + 490 + return 0; 491 + } 492 + 493 + static SIMPLE_DEV_PM_OPS(cros_ec_debugfs_pm_ops, 494 + cros_ec_debugfs_suspend, cros_ec_debugfs_resume); 495 + 496 + static struct platform_driver cros_ec_debugfs_driver = { 497 + .driver = { 498 + .name = DRV_NAME, 499 + .pm = &cros_ec_debugfs_pm_ops, 500 + }, 501 + .probe = cros_ec_debugfs_probe, 502 + .remove = cros_ec_debugfs_remove, 503 + }; 504 + 505 + module_platform_driver(cros_ec_debugfs_driver); 506 + 507 + MODULE_LICENSE("GPL"); 508 + MODULE_DESCRIPTION("Debug logs for ChromeOS EC"); 509 + MODULE_ALIAS("platform:" DRV_NAME);
-10
drivers/platform/chrome/cros_ec_i2c.c
··· 317 317 return 0; 318 318 } 319 319 320 - static int cros_ec_i2c_remove(struct i2c_client *client) 321 - { 322 - struct cros_ec_device *ec_dev = i2c_get_clientdata(client); 323 - 324 - cros_ec_remove(ec_dev); 325 - 326 - return 0; 327 - } 328 - 329 320 #ifdef CONFIG_PM_SLEEP 330 321 static int cros_ec_i2c_suspend(struct device *dev) 331 322 { ··· 367 376 .pm = &cros_ec_i2c_pm_ops, 368 377 }, 369 378 .probe = cros_ec_i2c_probe, 370 - .remove = cros_ec_i2c_remove, 371 379 .id_table = cros_ec_i2c_id, 372 380 }; 373 381
+87 -53
drivers/platform/chrome/cros_ec_lightbar.c
··· 33 33 #include <linux/uaccess.h> 34 34 #include <linux/slab.h> 35 35 36 + #define DRV_NAME "cros-ec-lightbar" 37 + 36 38 /* Rate-limit the lightbar interface to prevent DoS. */ 37 39 static unsigned long lb_interval_jiffies = 50 * HZ / 1000; 38 40 ··· 43 41 * If this is true, we won't do anything during suspend/resume. 44 42 */ 45 43 static bool userspace_control; 46 - static struct cros_ec_dev *ec_with_lightbar; 47 44 48 45 static ssize_t interval_msec_show(struct device *dev, 49 46 struct device_attribute *attr, char *buf) ··· 374 373 return ret; 375 374 } 376 375 377 - int lb_manual_suspend_ctrl(struct cros_ec_dev *ec, uint8_t enable) 376 + static int lb_manual_suspend_ctrl(struct cros_ec_dev *ec, uint8_t enable) 378 377 { 379 378 struct ec_params_lightbar *param; 380 379 struct cros_ec_command *msg; 381 380 int ret; 382 - 383 - if (ec != ec_with_lightbar) 384 - return 0; 385 381 386 382 msg = alloc_lightbar_cmd_msg(ec); 387 383 if (!msg) ··· 406 408 407 409 return ret; 408 410 } 409 - EXPORT_SYMBOL(lb_manual_suspend_ctrl); 410 - 411 - int lb_suspend(struct cros_ec_dev *ec) 412 - { 413 - if (userspace_control || ec != ec_with_lightbar) 414 - return 0; 415 - 416 - return lb_send_empty_cmd(ec, LIGHTBAR_CMD_SUSPEND); 417 - } 418 - EXPORT_SYMBOL(lb_suspend); 419 - 420 - int lb_resume(struct cros_ec_dev *ec) 421 - { 422 - if (userspace_control || ec != ec_with_lightbar) 423 - return 0; 424 - 425 - return lb_send_empty_cmd(ec, LIGHTBAR_CMD_RESUME); 426 - } 427 - EXPORT_SYMBOL(lb_resume); 428 411 429 412 static ssize_t sequence_store(struct device *dev, struct device_attribute *attr, 430 413 const char *buf, size_t count) ··· 563 584 NULL, 564 585 }; 565 586 566 - bool ec_has_lightbar(struct cros_ec_dev *ec) 567 - { 568 - return !!get_lightbar_version(ec, NULL, NULL); 569 - } 570 - 571 - static umode_t cros_ec_lightbar_attrs_are_visible(struct kobject *kobj, 572 - struct attribute *a, int n) 573 - { 574 - struct device *dev = container_of(kobj, struct device, kobj); 575 - struct cros_ec_dev *ec = to_cros_ec_dev(dev); 576 - struct platform_device *pdev = to_platform_device(ec->dev); 577 - struct cros_ec_platform *pdata = pdev->dev.platform_data; 578 - int is_cros_ec; 579 - 580 - is_cros_ec = strcmp(pdata->ec_name, CROS_EC_DEV_NAME); 581 - 582 - if (is_cros_ec != 0) 583 - return 0; 584 - 585 - /* Only instantiate this stuff if the EC has a lightbar */ 586 - if (ec_has_lightbar(ec)) { 587 - ec_with_lightbar = ec; 588 - return a->mode; 589 - } 590 - return 0; 591 - } 592 - 593 587 struct attribute_group cros_ec_lightbar_attr_group = { 594 588 .name = "lightbar", 595 589 .attrs = __lb_cmds_attrs, 596 - .is_visible = cros_ec_lightbar_attrs_are_visible, 597 590 }; 598 - EXPORT_SYMBOL(cros_ec_lightbar_attr_group); 591 + 592 + static int cros_ec_lightbar_probe(struct platform_device *pd) 593 + { 594 + struct cros_ec_dev *ec_dev = dev_get_drvdata(pd->dev.parent); 595 + struct cros_ec_platform *pdata = dev_get_platdata(ec_dev->dev); 596 + struct device *dev = &pd->dev; 597 + int ret; 598 + 599 + /* 600 + * Only instantiate the lightbar if the EC name is 'cros_ec'. Other EC 601 + * devices like 'cros_pd' doesn't have a lightbar. 602 + */ 603 + if (strcmp(pdata->ec_name, CROS_EC_DEV_NAME) != 0) 604 + return -ENODEV; 605 + 606 + /* 607 + * Ask then for the lightbar version, if it's 0 then the 'cros_ec' 608 + * doesn't have a lightbar. 609 + */ 610 + if (!get_lightbar_version(ec_dev, NULL, NULL)) 611 + return -ENODEV; 612 + 613 + /* Take control of the lightbar from the EC. */ 614 + lb_manual_suspend_ctrl(ec_dev, 1); 615 + 616 + ret = sysfs_create_group(&ec_dev->class_dev.kobj, 617 + &cros_ec_lightbar_attr_group); 618 + if (ret < 0) 619 + dev_err(dev, "failed to create %s attributes. err=%d\n", 620 + cros_ec_lightbar_attr_group.name, ret); 621 + 622 + return ret; 623 + } 624 + 625 + static int cros_ec_lightbar_remove(struct platform_device *pd) 626 + { 627 + struct cros_ec_dev *ec_dev = dev_get_drvdata(pd->dev.parent); 628 + 629 + sysfs_remove_group(&ec_dev->class_dev.kobj, 630 + &cros_ec_lightbar_attr_group); 631 + 632 + /* Let the EC take over the lightbar again. */ 633 + lb_manual_suspend_ctrl(ec_dev, 0); 634 + 635 + return 0; 636 + } 637 + 638 + static int __maybe_unused cros_ec_lightbar_resume(struct device *dev) 639 + { 640 + struct cros_ec_dev *ec_dev = dev_get_drvdata(dev); 641 + 642 + if (userspace_control) 643 + return 0; 644 + 645 + return lb_send_empty_cmd(ec_dev, LIGHTBAR_CMD_RESUME); 646 + } 647 + 648 + static int __maybe_unused cros_ec_lightbar_suspend(struct device *dev) 649 + { 650 + struct cros_ec_dev *ec_dev = dev_get_drvdata(dev); 651 + 652 + if (userspace_control) 653 + return 0; 654 + 655 + return lb_send_empty_cmd(ec_dev, LIGHTBAR_CMD_SUSPEND); 656 + } 657 + 658 + static SIMPLE_DEV_PM_OPS(cros_ec_lightbar_pm_ops, 659 + cros_ec_lightbar_suspend, cros_ec_lightbar_resume); 660 + 661 + static struct platform_driver cros_ec_lightbar_driver = { 662 + .driver = { 663 + .name = DRV_NAME, 664 + .pm = &cros_ec_lightbar_pm_ops, 665 + }, 666 + .probe = cros_ec_lightbar_probe, 667 + .remove = cros_ec_lightbar_remove, 668 + }; 669 + 670 + module_platform_driver(cros_ec_lightbar_driver); 671 + 672 + MODULE_LICENSE("GPL"); 673 + MODULE_DESCRIPTION("Expose the Chromebook Pixel's lightbar to userspace"); 674 + MODULE_ALIAS("platform:" DRV_NAME);
-4
drivers/platform/chrome/cros_ec_lpc.c
··· 327 327 328 328 static int cros_ec_lpc_remove(struct platform_device *pdev) 329 329 { 330 - struct cros_ec_device *ec_dev; 331 330 struct acpi_device *adev; 332 331 333 332 adev = ACPI_COMPANION(&pdev->dev); 334 333 if (adev) 335 334 acpi_remove_notify_handler(adev->handle, ACPI_ALL_NOTIFY, 336 335 cros_ec_lpc_acpi_notify); 337 - 338 - ec_dev = platform_get_drvdata(pdev); 339 - cros_ec_remove(ec_dev); 340 336 341 337 return 0; 342 338 }
-11
drivers/platform/chrome/cros_ec_spi.c
··· 685 685 return 0; 686 686 } 687 687 688 - static int cros_ec_spi_remove(struct spi_device *spi) 689 - { 690 - struct cros_ec_device *ec_dev; 691 - 692 - ec_dev = spi_get_drvdata(spi); 693 - cros_ec_remove(ec_dev); 694 - 695 - return 0; 696 - } 697 - 698 688 #ifdef CONFIG_PM_SLEEP 699 689 static int cros_ec_spi_suspend(struct device *dev) 700 690 { ··· 723 733 .pm = &cros_ec_spi_pm_ops, 724 734 }, 725 735 .probe = cros_ec_spi_probe, 726 - .remove = cros_ec_spi_remove, 727 736 .id_table = cros_ec_spi_id, 728 737 }; 729 738
+35 -1
drivers/platform/chrome/cros_ec_sysfs.c
··· 34 34 #include <linux/types.h> 35 35 #include <linux/uaccess.h> 36 36 37 + #define DRV_NAME "cros-ec-sysfs" 38 + 37 39 /* Accessor functions */ 38 40 39 41 static ssize_t reboot_show(struct device *dev, ··· 355 353 .attrs = __ec_attrs, 356 354 .is_visible = cros_ec_ctrl_visible, 357 355 }; 358 - EXPORT_SYMBOL(cros_ec_attr_group); 356 + 357 + static int cros_ec_sysfs_probe(struct platform_device *pd) 358 + { 359 + struct cros_ec_dev *ec_dev = dev_get_drvdata(pd->dev.parent); 360 + struct device *dev = &pd->dev; 361 + int ret; 362 + 363 + ret = sysfs_create_group(&ec_dev->class_dev.kobj, &cros_ec_attr_group); 364 + if (ret < 0) 365 + dev_err(dev, "failed to create attributes. err=%d\n", ret); 366 + 367 + return ret; 368 + } 369 + 370 + static int cros_ec_sysfs_remove(struct platform_device *pd) 371 + { 372 + struct cros_ec_dev *ec_dev = dev_get_drvdata(pd->dev.parent); 373 + 374 + sysfs_remove_group(&ec_dev->class_dev.kobj, &cros_ec_attr_group); 375 + 376 + return 0; 377 + } 378 + 379 + static struct platform_driver cros_ec_sysfs_driver = { 380 + .driver = { 381 + .name = DRV_NAME, 382 + }, 383 + .probe = cros_ec_sysfs_probe, 384 + .remove = cros_ec_sysfs_remove, 385 + }; 386 + 387 + module_platform_driver(cros_ec_sysfs_driver); 359 388 360 389 MODULE_LICENSE("GPL"); 361 390 MODULE_DESCRIPTION("ChromeOS EC control driver"); 391 + MODULE_ALIAS("platform:" DRV_NAME);
+42 -17
drivers/platform/chrome/cros_ec_vbc.c
··· 22 22 #include <linux/platform_device.h> 23 23 #include <linux/mfd/cros_ec.h> 24 24 #include <linux/mfd/cros_ec_commands.h> 25 + #include <linux/module.h> 25 26 #include <linux/slab.h> 27 + 28 + #define DRV_NAME "cros-ec-vbc" 26 29 27 30 static ssize_t vboot_context_read(struct file *filp, struct kobject *kobj, 28 31 struct bin_attribute *att, char *buf, ··· 108 105 return data_sz; 109 106 } 110 107 111 - static umode_t cros_ec_vbc_is_visible(struct kobject *kobj, 112 - struct bin_attribute *a, int n) 113 - { 114 - struct device *dev = container_of(kobj, struct device, kobj); 115 - struct cros_ec_dev *ec = to_cros_ec_dev(dev); 116 - struct device_node *np = ec->ec_dev->dev->of_node; 117 - 118 - if (IS_ENABLED(CONFIG_OF) && np) { 119 - if (of_property_read_bool(np, "google,has-vbc-nvram")) 120 - return a->attr.mode; 121 - } 122 - 123 - return 0; 124 - } 125 - 126 108 static BIN_ATTR_RW(vboot_context, 16); 127 109 128 110 static struct bin_attribute *cros_ec_vbc_bin_attrs[] = { ··· 118 130 struct attribute_group cros_ec_vbc_attr_group = { 119 131 .name = "vbc", 120 132 .bin_attrs = cros_ec_vbc_bin_attrs, 121 - .is_bin_visible = cros_ec_vbc_is_visible, 122 133 }; 123 - EXPORT_SYMBOL(cros_ec_vbc_attr_group); 134 + 135 + static int cros_ec_vbc_probe(struct platform_device *pd) 136 + { 137 + struct cros_ec_dev *ec_dev = dev_get_drvdata(pd->dev.parent); 138 + struct device *dev = &pd->dev; 139 + int ret; 140 + 141 + ret = sysfs_create_group(&ec_dev->class_dev.kobj, 142 + &cros_ec_vbc_attr_group); 143 + if (ret < 0) 144 + dev_err(dev, "failed to create %s attributes. err=%d\n", 145 + cros_ec_vbc_attr_group.name, ret); 146 + 147 + return ret; 148 + } 149 + 150 + static int cros_ec_vbc_remove(struct platform_device *pd) 151 + { 152 + struct cros_ec_dev *ec_dev = dev_get_drvdata(pd->dev.parent); 153 + 154 + sysfs_remove_group(&ec_dev->class_dev.kobj, 155 + &cros_ec_vbc_attr_group); 156 + 157 + return 0; 158 + } 159 + 160 + static struct platform_driver cros_ec_vbc_driver = { 161 + .driver = { 162 + .name = DRV_NAME, 163 + }, 164 + .probe = cros_ec_vbc_probe, 165 + .remove = cros_ec_vbc_remove, 166 + }; 167 + 168 + module_platform_driver(cros_ec_vbc_driver); 169 + 170 + MODULE_LICENSE("GPL"); 171 + MODULE_DESCRIPTION("Expose the vboot context nvram to userspace"); 172 + MODULE_ALIAS("platform:" DRV_NAME);
+3 -3
drivers/platform/x86/Kconfig
··· 1009 1009 1010 1010 config INTEL_IPS 1011 1011 tristate "Intel Intelligent Power Sharing" 1012 - depends on ACPI 1012 + depends on ACPI && PCI 1013 1013 ---help--- 1014 1014 Intel Calpella platforms support dynamic power sharing between the 1015 1015 CPU and GPU, maximizing performance in a given TDP. This driver, ··· 1135 1135 1136 1136 config APPLE_GMUX 1137 1137 tristate "Apple Gmux Driver" 1138 - depends on ACPI 1138 + depends on ACPI && PCI 1139 1139 depends on PNP 1140 1140 depends on BACKLIGHT_CLASS_DEVICE 1141 1141 depends on BACKLIGHT_APPLE=n || BACKLIGHT_APPLE ··· 1174 1174 1175 1175 config INTEL_PMC_IPC 1176 1176 tristate "Intel PMC IPC Driver" 1177 - depends on ACPI 1177 + depends on ACPI && PCI 1178 1178 ---help--- 1179 1179 This driver provides support for PMC control on some Intel platforms. 1180 1180 The PMC is an ARC processor which defines IPC commands for communication
+2 -1
drivers/ptp/ptp_chardev.c
··· 224 224 extoff = NULL; 225 225 break; 226 226 } 227 - if (extoff->n_samples > PTP_MAX_SAMPLES) { 227 + if (extoff->n_samples > PTP_MAX_SAMPLES 228 + || extoff->rsv[0] || extoff->rsv[1] || extoff->rsv[2]) { 228 229 err = -EINVAL; 229 230 break; 230 231 }
+11 -11
drivers/rapidio/devices/tsi721.c
··· 1382 1382 INIT_WORK(&priv->idb_work, tsi721_db_dpc); 1383 1383 1384 1384 /* Allocate buffer for inbound doorbells queue */ 1385 - priv->idb_base = dma_zalloc_coherent(&priv->pdev->dev, 1386 - IDB_QSIZE * TSI721_IDB_ENTRY_SIZE, 1387 - &priv->idb_dma, GFP_KERNEL); 1385 + priv->idb_base = dma_alloc_coherent(&priv->pdev->dev, 1386 + IDB_QSIZE * TSI721_IDB_ENTRY_SIZE, 1387 + &priv->idb_dma, GFP_KERNEL); 1388 1388 if (!priv->idb_base) 1389 1389 return -ENOMEM; 1390 1390 ··· 1447 1447 regs = priv->regs + TSI721_DMAC_BASE(TSI721_DMACH_MAINT); 1448 1448 1449 1449 /* Allocate space for DMA descriptors */ 1450 - bd_ptr = dma_zalloc_coherent(&priv->pdev->dev, 1451 - bd_num * sizeof(struct tsi721_dma_desc), 1452 - &bd_phys, GFP_KERNEL); 1450 + bd_ptr = dma_alloc_coherent(&priv->pdev->dev, 1451 + bd_num * sizeof(struct tsi721_dma_desc), 1452 + &bd_phys, GFP_KERNEL); 1453 1453 if (!bd_ptr) 1454 1454 return -ENOMEM; 1455 1455 ··· 1464 1464 sts_size = (bd_num >= TSI721_DMA_MINSTSSZ) ? 1465 1465 bd_num : TSI721_DMA_MINSTSSZ; 1466 1466 sts_size = roundup_pow_of_two(sts_size); 1467 - sts_ptr = dma_zalloc_coherent(&priv->pdev->dev, 1467 + sts_ptr = dma_alloc_coherent(&priv->pdev->dev, 1468 1468 sts_size * sizeof(struct tsi721_dma_sts), 1469 1469 &sts_phys, GFP_KERNEL); 1470 1470 if (!sts_ptr) { ··· 1939 1939 1940 1940 /* Outbound message descriptor status FIFO allocation */ 1941 1941 priv->omsg_ring[mbox].sts_size = roundup_pow_of_two(entries + 1); 1942 - priv->omsg_ring[mbox].sts_base = dma_zalloc_coherent(&priv->pdev->dev, 1943 - priv->omsg_ring[mbox].sts_size * 1944 - sizeof(struct tsi721_dma_sts), 1945 - &priv->omsg_ring[mbox].sts_phys, GFP_KERNEL); 1942 + priv->omsg_ring[mbox].sts_base = dma_alloc_coherent(&priv->pdev->dev, 1943 + priv->omsg_ring[mbox].sts_size * sizeof(struct tsi721_dma_sts), 1944 + &priv->omsg_ring[mbox].sts_phys, 1945 + GFP_KERNEL); 1946 1946 if (priv->omsg_ring[mbox].sts_base == NULL) { 1947 1947 tsi_debug(OMSG, &priv->pdev->dev, 1948 1948 "ENOMEM for OB_MSG_%d status FIFO", mbox);
+4 -4
drivers/rapidio/devices/tsi721_dma.c
··· 90 90 * Allocate space for DMA descriptors 91 91 * (add an extra element for link descriptor) 92 92 */ 93 - bd_ptr = dma_zalloc_coherent(dev, 94 - (bd_num + 1) * sizeof(struct tsi721_dma_desc), 95 - &bd_phys, GFP_ATOMIC); 93 + bd_ptr = dma_alloc_coherent(dev, 94 + (bd_num + 1) * sizeof(struct tsi721_dma_desc), 95 + &bd_phys, GFP_ATOMIC); 96 96 if (!bd_ptr) 97 97 return -ENOMEM; 98 98 ··· 108 108 sts_size = ((bd_num + 1) >= TSI721_DMA_MINSTSSZ) ? 109 109 (bd_num + 1) : TSI721_DMA_MINSTSSZ; 110 110 sts_size = roundup_pow_of_two(sts_size); 111 - sts_ptr = dma_zalloc_coherent(dev, 111 + sts_ptr = dma_alloc_coherent(dev, 112 112 sts_size * sizeof(struct tsi721_dma_sts), 113 113 &sts_phys, GFP_ATOMIC); 114 114 if (!sts_ptr) {
+7 -2
drivers/remoteproc/remoteproc_virtio.c
··· 153 153 const bool * ctx, 154 154 struct irq_affinity *desc) 155 155 { 156 - int i, ret; 156 + int i, ret, queue_idx = 0; 157 157 158 158 for (i = 0; i < nvqs; ++i) { 159 - vqs[i] = rp_find_vq(vdev, i, callbacks[i], names[i], 159 + if (!names[i]) { 160 + vqs[i] = NULL; 161 + continue; 162 + } 163 + 164 + vqs[i] = rp_find_vq(vdev, queue_idx++, callbacks[i], names[i], 160 165 ctx ? ctx[i] : false); 161 166 if (IS_ERR(vqs[i])) { 162 167 ret = PTR_ERR(vqs[i]);
+14 -6
drivers/reset/Kconfig
··· 109 109 110 110 config RESET_SIMPLE 111 111 bool "Simple Reset Controller Driver" if COMPILE_TEST 112 - default ARCH_SOCFPGA || ARCH_STM32 || ARCH_STRATIX10 || ARCH_SUNXI || ARCH_ZX || ARCH_ASPEED 112 + default ARCH_STM32 || ARCH_STRATIX10 || ARCH_SUNXI || ARCH_ZX || ARCH_ASPEED 113 113 help 114 114 This enables a simple reset controller driver for reset lines that 115 115 that can be asserted and deasserted by toggling bits in a contiguous, ··· 127 127 default MACH_STM32MP157 128 128 help 129 129 This enables the RCC reset controller driver for STM32 MPUs. 130 + 131 + config RESET_SOCFPGA 132 + bool "SoCFPGA Reset Driver" if COMPILE_TEST && !ARCH_SOCFPGA 133 + default ARCH_SOCFPGA 134 + select RESET_SIMPLE 135 + help 136 + This enables the reset driver for the SoCFPGA ARMv7 platforms. This 137 + driver gets initialized early during platform init calls. 130 138 131 139 config RESET_SUNXI 132 140 bool "Allwinner SoCs Reset Driver" if COMPILE_TEST && !ARCH_SUNXI ··· 171 163 Say Y if you want to control reset signals provided by System Control 172 164 block, Media I/O block, Peripheral Block. 173 165 174 - config RESET_UNIPHIER_USB3 175 - tristate "USB3 reset driver for UniPhier SoCs" 166 + config RESET_UNIPHIER_GLUE 167 + tristate "Reset driver in glue layer for UniPhier SoCs" 176 168 depends on (ARCH_UNIPHIER || COMPILE_TEST) && OF 177 169 default ARCH_UNIPHIER 178 170 select RESET_SIMPLE 179 171 help 180 - Support for the USB3 core reset on UniPhier SoCs. 181 - Say Y if you want to control reset signals provided by 182 - USB3 glue layer. 172 + Support for peripheral core reset included in its own glue layer 173 + on UniPhier SoCs. Say Y if you want to control reset signals 174 + provided by the glue layer. 183 175 184 176 config RESET_ZYNQ 185 177 bool "ZYNQ Reset Driver" if COMPILE_TEST
+2 -1
drivers/reset/Makefile
··· 19 19 obj-$(CONFIG_RESET_QCOM_PDC) += reset-qcom-pdc.o 20 20 obj-$(CONFIG_RESET_SIMPLE) += reset-simple.o 21 21 obj-$(CONFIG_RESET_STM32MP157) += reset-stm32mp1.o 22 + obj-$(CONFIG_RESET_SOCFPGA) += reset-socfpga.o 22 23 obj-$(CONFIG_RESET_SUNXI) += reset-sunxi.o 23 24 obj-$(CONFIG_RESET_TI_SCI) += reset-ti-sci.o 24 25 obj-$(CONFIG_RESET_TI_SYSCON) += reset-ti-syscon.o 25 26 obj-$(CONFIG_RESET_UNIPHIER) += reset-uniphier.o 26 - obj-$(CONFIG_RESET_UNIPHIER_USB3) += reset-uniphier-usb3.o 27 + obj-$(CONFIG_RESET_UNIPHIER_GLUE) += reset-uniphier-glue.o 27 28 obj-$(CONFIG_RESET_ZYNQ) += reset-zynq.o 28 29
+42
drivers/reset/core.c
··· 795 795 return rstc; 796 796 } 797 797 EXPORT_SYMBOL_GPL(devm_reset_control_array_get); 798 + 799 + static int reset_control_get_count_from_lookup(struct device *dev) 800 + { 801 + const struct reset_control_lookup *lookup; 802 + const char *dev_id; 803 + int count = 0; 804 + 805 + if (!dev) 806 + return -EINVAL; 807 + 808 + dev_id = dev_name(dev); 809 + mutex_lock(&reset_lookup_mutex); 810 + 811 + list_for_each_entry(lookup, &reset_lookup_list, list) { 812 + if (!strcmp(lookup->dev_id, dev_id)) 813 + count++; 814 + } 815 + 816 + mutex_unlock(&reset_lookup_mutex); 817 + 818 + if (count == 0) 819 + count = -ENOENT; 820 + 821 + return count; 822 + } 823 + 824 + /** 825 + * reset_control_get_count - Count number of resets available with a device 826 + * 827 + * @dev: device for which to return the number of resets 828 + * 829 + * Returns positive reset count on success, or error number on failure and 830 + * on count being zero. 831 + */ 832 + int reset_control_get_count(struct device *dev) 833 + { 834 + if (dev->of_node) 835 + return of_reset_control_get_count(dev->of_node); 836 + 837 + return reset_control_get_count_from_lookup(dev); 838 + } 839 + EXPORT_SYMBOL_GPL(reset_control_get_count);
+1
drivers/reset/reset-hsdk.c
··· 86 86 87 87 static const struct reset_control_ops hsdk_reset_ops = { 88 88 .reset = hsdk_reset_reset, 89 + .deassert = hsdk_reset_reset, 89 90 }; 90 91 91 92 static int hsdk_reset_probe(struct platform_device *pdev)
+3 -10
drivers/reset/reset-simple.c
··· 109 109 #define SOCFPGA_NR_BANKS 8 110 110 111 111 static const struct reset_simple_devdata reset_simple_socfpga = { 112 - .reg_offset = 0x10, 112 + .reg_offset = 0x20, 113 113 .nr_resets = SOCFPGA_NR_BANKS * 32, 114 114 .status_active_low = true, 115 115 }; ··· 120 120 }; 121 121 122 122 static const struct of_device_id reset_simple_dt_ids[] = { 123 - { .compatible = "altr,rst-mgr", .data = &reset_simple_socfpga }, 123 + { .compatible = "altr,stratix10-rst-mgr", 124 + .data = &reset_simple_socfpga }, 124 125 { .compatible = "st,stm32-rcc", }, 125 126 { .compatible = "allwinner,sun6i-a31-clock-reset", 126 127 .data = &reset_simple_active_low }, ··· 165 164 data->rcdev.nr_resets = devdata->nr_resets; 166 165 data->active_low = devdata->active_low; 167 166 data->status_active_low = devdata->status_active_low; 168 - } 169 - 170 - if (of_device_is_compatible(dev->of_node, "altr,rst-mgr") && 171 - of_property_read_u32(dev->of_node, "altr,modrst-offset", 172 - &reg_offset)) { 173 - dev_warn(dev, 174 - "missing altr,modrst-offset property, assuming 0x%x!\n", 175 - reg_offset); 176 167 } 177 168 178 169 data->membase += reg_offset;
+88
drivers/reset/reset-socfpga.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* 3 + * Copyright (C) 2018, Intel Corporation 4 + * Copied from reset-sunxi.c 5 + */ 6 + 7 + #include <linux/err.h> 8 + #include <linux/io.h> 9 + #include <linux/init.h> 10 + #include <linux/of.h> 11 + #include <linux/of_address.h> 12 + #include <linux/platform_device.h> 13 + #include <linux/reset-controller.h> 14 + #include <linux/slab.h> 15 + #include <linux/spinlock.h> 16 + #include <linux/types.h> 17 + 18 + #include "reset-simple.h" 19 + 20 + #define SOCFPGA_NR_BANKS 8 21 + void __init socfpga_reset_init(void); 22 + 23 + static int a10_reset_init(struct device_node *np) 24 + { 25 + struct reset_simple_data *data; 26 + struct resource res; 27 + resource_size_t size; 28 + int ret; 29 + u32 reg_offset = 0x10; 30 + 31 + data = kzalloc(sizeof(*data), GFP_KERNEL); 32 + if (!data) 33 + return -ENOMEM; 34 + 35 + ret = of_address_to_resource(np, 0, &res); 36 + if (ret) 37 + goto err_alloc; 38 + 39 + size = resource_size(&res); 40 + if (!request_mem_region(res.start, size, np->name)) { 41 + ret = -EBUSY; 42 + goto err_alloc; 43 + } 44 + 45 + data->membase = ioremap(res.start, size); 46 + if (!data->membase) { 47 + ret = -ENOMEM; 48 + goto err_alloc; 49 + } 50 + 51 + if (of_property_read_u32(np, "altr,modrst-offset", &reg_offset)) 52 + pr_warn("missing altr,modrst-offset property, assuming 0x10\n"); 53 + data->membase += reg_offset; 54 + 55 + spin_lock_init(&data->lock); 56 + 57 + data->rcdev.owner = THIS_MODULE; 58 + data->rcdev.nr_resets = SOCFPGA_NR_BANKS * 32; 59 + data->rcdev.ops = &reset_simple_ops; 60 + data->rcdev.of_node = np; 61 + data->status_active_low = true; 62 + 63 + return reset_controller_register(&data->rcdev); 64 + 65 + err_alloc: 66 + kfree(data); 67 + return ret; 68 + }; 69 + 70 + /* 71 + * These are the reset controller we need to initialize early on in 72 + * our system, before we can even think of using a regular device 73 + * driver for it. 74 + * The controllers that we can register through the regular device 75 + * model are handled by the simple reset driver directly. 76 + */ 77 + static const struct of_device_id socfpga_early_reset_dt_ids[] __initconst = { 78 + { .compatible = "altr,rst-mgr", }, 79 + { /* sentinel */ }, 80 + }; 81 + 82 + void __init socfpga_reset_init(void) 83 + { 84 + struct device_node *np; 85 + 86 + for_each_matching_node(np, socfpga_early_reset_dt_ids) 87 + a10_reset_init(np); 88 + }
+31 -19
drivers/reset/reset-uniphier-usb3.c drivers/reset/reset-uniphier-glue.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0 2 2 // 3 - // reset-uniphier-usb3.c - USB3 reset driver for UniPhier 3 + // reset-uniphier-glue.c - Glue layer reset driver for UniPhier 4 4 // Copyright 2018 Socionext Inc. 5 5 // Author: Kunihiko Hayashi <hayashi.kunihiko@socionext.com> 6 6 ··· 15 15 #define MAX_CLKS 2 16 16 #define MAX_RSTS 2 17 17 18 - struct uniphier_usb3_reset_soc_data { 18 + struct uniphier_glue_reset_soc_data { 19 19 int nclks; 20 20 const char * const *clock_names; 21 21 int nrsts; 22 22 const char * const *reset_names; 23 23 }; 24 24 25 - struct uniphier_usb3_reset_priv { 25 + struct uniphier_glue_reset_priv { 26 26 struct clk_bulk_data clk[MAX_CLKS]; 27 27 struct reset_control *rst[MAX_RSTS]; 28 28 struct reset_simple_data rdata; 29 - const struct uniphier_usb3_reset_soc_data *data; 29 + const struct uniphier_glue_reset_soc_data *data; 30 30 }; 31 31 32 - static int uniphier_usb3_reset_probe(struct platform_device *pdev) 32 + static int uniphier_glue_reset_probe(struct platform_device *pdev) 33 33 { 34 34 struct device *dev = &pdev->dev; 35 - struct uniphier_usb3_reset_priv *priv; 35 + struct uniphier_glue_reset_priv *priv; 36 36 struct resource *res; 37 37 resource_size_t size; 38 38 const char *name; ··· 100 100 return ret; 101 101 } 102 102 103 - static int uniphier_usb3_reset_remove(struct platform_device *pdev) 103 + static int uniphier_glue_reset_remove(struct platform_device *pdev) 104 104 { 105 - struct uniphier_usb3_reset_priv *priv = platform_get_drvdata(pdev); 105 + struct uniphier_glue_reset_priv *priv = platform_get_drvdata(pdev); 106 106 int i; 107 107 108 108 for (i = 0; i < priv->data->nrsts; i++) ··· 117 117 "gio", "link", 118 118 }; 119 119 120 - static const struct uniphier_usb3_reset_soc_data uniphier_pro4_data = { 120 + static const struct uniphier_glue_reset_soc_data uniphier_pro4_data = { 121 121 .nclks = ARRAY_SIZE(uniphier_pro4_clock_reset_names), 122 122 .clock_names = uniphier_pro4_clock_reset_names, 123 123 .nrsts = ARRAY_SIZE(uniphier_pro4_clock_reset_names), ··· 128 128 "link", 129 129 }; 130 130 131 - static const struct uniphier_usb3_reset_soc_data uniphier_pxs2_data = { 131 + static const struct uniphier_glue_reset_soc_data uniphier_pxs2_data = { 132 132 .nclks = ARRAY_SIZE(uniphier_pxs2_clock_reset_names), 133 133 .clock_names = uniphier_pxs2_clock_reset_names, 134 134 .nrsts = ARRAY_SIZE(uniphier_pxs2_clock_reset_names), 135 135 .reset_names = uniphier_pxs2_clock_reset_names, 136 136 }; 137 137 138 - static const struct of_device_id uniphier_usb3_reset_match[] = { 138 + static const struct of_device_id uniphier_glue_reset_match[] = { 139 139 { 140 140 .compatible = "socionext,uniphier-pro4-usb3-reset", 141 141 .data = &uniphier_pro4_data, ··· 152 152 .compatible = "socionext,uniphier-pxs3-usb3-reset", 153 153 .data = &uniphier_pxs2_data, 154 154 }, 155 + { 156 + .compatible = "socionext,uniphier-pro4-ahci-reset", 157 + .data = &uniphier_pro4_data, 158 + }, 159 + { 160 + .compatible = "socionext,uniphier-pxs2-ahci-reset", 161 + .data = &uniphier_pxs2_data, 162 + }, 163 + { 164 + .compatible = "socionext,uniphier-pxs3-ahci-reset", 165 + .data = &uniphier_pxs2_data, 166 + }, 155 167 { /* Sentinel */ } 156 168 }; 157 - MODULE_DEVICE_TABLE(of, uniphier_usb3_reset_match); 169 + MODULE_DEVICE_TABLE(of, uniphier_glue_reset_match); 158 170 159 - static struct platform_driver uniphier_usb3_reset_driver = { 160 - .probe = uniphier_usb3_reset_probe, 161 - .remove = uniphier_usb3_reset_remove, 171 + static struct platform_driver uniphier_glue_reset_driver = { 172 + .probe = uniphier_glue_reset_probe, 173 + .remove = uniphier_glue_reset_remove, 162 174 .driver = { 163 - .name = "uniphier-usb3-reset", 164 - .of_match_table = uniphier_usb3_reset_match, 175 + .name = "uniphier-glue-reset", 176 + .of_match_table = uniphier_glue_reset_match, 165 177 }, 166 178 }; 167 - module_platform_driver(uniphier_usb3_reset_driver); 179 + module_platform_driver(uniphier_glue_reset_driver); 168 180 169 181 MODULE_AUTHOR("Kunihiko Hayashi <hayashi.kunihiko@socionext.com>"); 170 - MODULE_DESCRIPTION("UniPhier USB3 Reset Driver"); 182 + MODULE_DESCRIPTION("UniPhier Glue layer reset driver"); 171 183 MODULE_LICENSE("GPL");
+7 -8
drivers/s390/net/ism_drv.c
··· 89 89 dma_addr_t dma_handle; 90 90 struct ism_sba *sba; 91 91 92 - sba = dma_zalloc_coherent(&ism->pdev->dev, PAGE_SIZE, 93 - &dma_handle, GFP_KERNEL); 92 + sba = dma_alloc_coherent(&ism->pdev->dev, PAGE_SIZE, &dma_handle, 93 + GFP_KERNEL); 94 94 if (!sba) 95 95 return -ENOMEM; 96 96 ··· 116 116 dma_addr_t dma_handle; 117 117 struct ism_eq *ieq; 118 118 119 - ieq = dma_zalloc_coherent(&ism->pdev->dev, PAGE_SIZE, 120 - &dma_handle, GFP_KERNEL); 119 + ieq = dma_alloc_coherent(&ism->pdev->dev, PAGE_SIZE, &dma_handle, 120 + GFP_KERNEL); 121 121 if (!ieq) 122 122 return -ENOMEM; 123 123 ··· 234 234 test_and_set_bit(dmb->sba_idx, ism->sba_bitmap)) 235 235 return -EINVAL; 236 236 237 - dmb->cpu_addr = dma_zalloc_coherent(&ism->pdev->dev, dmb->dmb_len, 238 - &dmb->dma_addr, GFP_KERNEL | 239 - __GFP_NOWARN | __GFP_NOMEMALLOC | 240 - __GFP_COMP | __GFP_NORETRY); 237 + dmb->cpu_addr = dma_alloc_coherent(&ism->pdev->dev, dmb->dmb_len, 238 + &dmb->dma_addr, 239 + GFP_KERNEL | __GFP_NOWARN | __GFP_NOMEMALLOC | __GFP_COMP | __GFP_NORETRY); 241 240 if (!dmb->cpu_addr) 242 241 clear_bit(dmb->sba_idx, ism->sba_bitmap); 243 242
+9 -3
drivers/s390/virtio/virtio_ccw.c
··· 635 635 { 636 636 struct virtio_ccw_device *vcdev = to_vc_device(vdev); 637 637 unsigned long *indicatorp = NULL; 638 - int ret, i; 638 + int ret, i, queue_idx = 0; 639 639 struct ccw1 *ccw; 640 640 641 641 ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL); ··· 643 643 return -ENOMEM; 644 644 645 645 for (i = 0; i < nvqs; ++i) { 646 - vqs[i] = virtio_ccw_setup_vq(vdev, i, callbacks[i], names[i], 647 - ctx ? ctx[i] : false, ccw); 646 + if (!names[i]) { 647 + vqs[i] = NULL; 648 + continue; 649 + } 650 + 651 + vqs[i] = virtio_ccw_setup_vq(vdev, queue_idx++, callbacks[i], 652 + names[i], ctx ? ctx[i] : false, 653 + ccw); 648 654 if (IS_ERR(vqs[i])) { 649 655 ret = PTR_ERR(vqs[i]); 650 656 vqs[i] = NULL;
+3 -2
drivers/scsi/3w-sas.c
··· 646 646 unsigned long *cpu_addr; 647 647 int retval = 1; 648 648 649 - cpu_addr = dma_zalloc_coherent(&tw_dev->tw_pci_dev->dev, 650 - size * TW_Q_LENGTH, &dma_handle, GFP_KERNEL); 649 + cpu_addr = dma_alloc_coherent(&tw_dev->tw_pci_dev->dev, 650 + size * TW_Q_LENGTH, &dma_handle, 651 + GFP_KERNEL); 651 652 if (!cpu_addr) { 652 653 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x5, "Memory allocation failed"); 653 654 goto out;
+4 -4
drivers/scsi/a100u2w.c
··· 1123 1123 1124 1124 /* Get total memory needed for SCB */ 1125 1125 sz = ORC_MAXQUEUE * sizeof(struct orc_scb); 1126 - host->scb_virt = dma_zalloc_coherent(&pdev->dev, sz, &host->scb_phys, 1127 - GFP_KERNEL); 1126 + host->scb_virt = dma_alloc_coherent(&pdev->dev, sz, &host->scb_phys, 1127 + GFP_KERNEL); 1128 1128 if (!host->scb_virt) { 1129 1129 printk("inia100: SCB memory allocation error\n"); 1130 1130 goto out_host_put; ··· 1132 1132 1133 1133 /* Get total memory needed for ESCB */ 1134 1134 sz = ORC_MAXQUEUE * sizeof(struct orc_extended_scb); 1135 - host->escb_virt = dma_zalloc_coherent(&pdev->dev, sz, &host->escb_phys, 1136 - GFP_KERNEL); 1135 + host->escb_virt = dma_alloc_coherent(&pdev->dev, sz, &host->escb_phys, 1136 + GFP_KERNEL); 1137 1137 if (!host->escb_virt) { 1138 1138 printk("inia100: ESCB memory allocation error\n"); 1139 1139 goto out_free_scb_array;
+12 -6
drivers/scsi/arcmsr/arcmsr_hba.c
··· 587 587 case ACB_ADAPTER_TYPE_B: { 588 588 struct MessageUnit_B *reg; 589 589 acb->roundup_ccbsize = roundup(sizeof(struct MessageUnit_B), 32); 590 - dma_coherent = dma_zalloc_coherent(&pdev->dev, acb->roundup_ccbsize, 591 - &dma_coherent_handle, GFP_KERNEL); 590 + dma_coherent = dma_alloc_coherent(&pdev->dev, 591 + acb->roundup_ccbsize, 592 + &dma_coherent_handle, 593 + GFP_KERNEL); 592 594 if (!dma_coherent) { 593 595 pr_notice("arcmsr%d: DMA allocation failed\n", acb->host->host_no); 594 596 return false; ··· 619 617 struct MessageUnit_D *reg; 620 618 621 619 acb->roundup_ccbsize = roundup(sizeof(struct MessageUnit_D), 32); 622 - dma_coherent = dma_zalloc_coherent(&pdev->dev, acb->roundup_ccbsize, 623 - &dma_coherent_handle, GFP_KERNEL); 620 + dma_coherent = dma_alloc_coherent(&pdev->dev, 621 + acb->roundup_ccbsize, 622 + &dma_coherent_handle, 623 + GFP_KERNEL); 624 624 if (!dma_coherent) { 625 625 pr_notice("arcmsr%d: DMA allocation failed\n", acb->host->host_no); 626 626 return false; ··· 663 659 uint32_t completeQ_size; 664 660 completeQ_size = sizeof(struct deliver_completeQ) * ARCMSR_MAX_HBE_DONEQUEUE + 128; 665 661 acb->roundup_ccbsize = roundup(completeQ_size, 32); 666 - dma_coherent = dma_zalloc_coherent(&pdev->dev, acb->roundup_ccbsize, 667 - &dma_coherent_handle, GFP_KERNEL); 662 + dma_coherent = dma_alloc_coherent(&pdev->dev, 663 + acb->roundup_ccbsize, 664 + &dma_coherent_handle, 665 + GFP_KERNEL); 668 666 if (!dma_coherent){ 669 667 pr_notice("arcmsr%d: DMA allocation failed\n", acb->host->host_no); 670 668 return false;
+2 -2
drivers/scsi/be2iscsi/be_main.c
··· 3321 3321 q->len = len; 3322 3322 q->entry_size = entry_size; 3323 3323 mem->size = len * entry_size; 3324 - mem->va = dma_zalloc_coherent(&phba->pcidev->dev, mem->size, &mem->dma, 3325 - GFP_KERNEL); 3324 + mem->va = dma_alloc_coherent(&phba->pcidev->dev, mem->size, &mem->dma, 3325 + GFP_KERNEL); 3326 3326 if (!mem->va) 3327 3327 return -ENOMEM; 3328 3328 return 0;
+5 -6
drivers/scsi/be2iscsi/be_mgmt.c
··· 293 293 struct be_dma_mem *cmd, 294 294 u8 subsystem, u8 opcode, u32 size) 295 295 { 296 - cmd->va = dma_zalloc_coherent(&phba->ctrl.pdev->dev, size, &cmd->dma, 297 - GFP_KERNEL); 296 + cmd->va = dma_alloc_coherent(&phba->ctrl.pdev->dev, size, &cmd->dma, 297 + GFP_KERNEL); 298 298 if (!cmd->va) { 299 299 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, 300 300 "BG_%d : Failed to allocate memory for if info\n"); ··· 1510 1510 return -EINVAL; 1511 1511 1512 1512 nonemb_cmd.size = sizeof(union be_invldt_cmds_params); 1513 - nonemb_cmd.va = dma_zalloc_coherent(&phba->ctrl.pdev->dev, 1514 - nonemb_cmd.size, 1515 - &nonemb_cmd.dma, 1516 - GFP_KERNEL); 1513 + nonemb_cmd.va = dma_alloc_coherent(&phba->ctrl.pdev->dev, 1514 + nonemb_cmd.size, &nonemb_cmd.dma, 1515 + GFP_KERNEL); 1517 1516 if (!nonemb_cmd.va) { 1518 1517 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_EH, 1519 1518 "BM_%d : invldt_cmds_params alloc failed\n");
+3 -3
drivers/scsi/bfa/bfad_bsg.c
··· 3264 3264 /* Allocate dma coherent memory */ 3265 3265 buf_info = buf_base; 3266 3266 buf_info->size = payload_len; 3267 - buf_info->virt = dma_zalloc_coherent(&bfad->pcidev->dev, 3268 - buf_info->size, &buf_info->phys, 3269 - GFP_KERNEL); 3267 + buf_info->virt = dma_alloc_coherent(&bfad->pcidev->dev, 3268 + buf_info->size, &buf_info->phys, 3269 + GFP_KERNEL); 3270 3270 if (!buf_info->virt) 3271 3271 goto out_free_mem; 3272 3272
+24 -25
drivers/scsi/bnx2fc/bnx2fc_hwi.c
··· 1857 1857 * entries. Hence the limit with one page is 8192 task context 1858 1858 * entries. 1859 1859 */ 1860 - hba->task_ctx_bd_tbl = dma_zalloc_coherent(&hba->pcidev->dev, 1861 - PAGE_SIZE, 1862 - &hba->task_ctx_bd_dma, 1863 - GFP_KERNEL); 1860 + hba->task_ctx_bd_tbl = dma_alloc_coherent(&hba->pcidev->dev, 1861 + PAGE_SIZE, 1862 + &hba->task_ctx_bd_dma, 1863 + GFP_KERNEL); 1864 1864 if (!hba->task_ctx_bd_tbl) { 1865 1865 printk(KERN_ERR PFX "unable to allocate task context BDT\n"); 1866 1866 rc = -1; ··· 1894 1894 task_ctx_bdt = (struct regpair *)hba->task_ctx_bd_tbl; 1895 1895 for (i = 0; i < task_ctx_arr_sz; i++) { 1896 1896 1897 - hba->task_ctx[i] = dma_zalloc_coherent(&hba->pcidev->dev, 1898 - PAGE_SIZE, 1899 - &hba->task_ctx_dma[i], 1900 - GFP_KERNEL); 1897 + hba->task_ctx[i] = dma_alloc_coherent(&hba->pcidev->dev, 1898 + PAGE_SIZE, 1899 + &hba->task_ctx_dma[i], 1900 + GFP_KERNEL); 1901 1901 if (!hba->task_ctx[i]) { 1902 1902 printk(KERN_ERR PFX "unable to alloc task context\n"); 1903 1903 rc = -1; ··· 2031 2031 } 2032 2032 2033 2033 for (i = 0; i < segment_count; ++i) { 2034 - hba->hash_tbl_segments[i] = dma_zalloc_coherent(&hba->pcidev->dev, 2035 - BNX2FC_HASH_TBL_CHUNK_SIZE, 2036 - &dma_segment_array[i], 2037 - GFP_KERNEL); 2034 + hba->hash_tbl_segments[i] = dma_alloc_coherent(&hba->pcidev->dev, 2035 + BNX2FC_HASH_TBL_CHUNK_SIZE, 2036 + &dma_segment_array[i], 2037 + GFP_KERNEL); 2038 2038 if (!hba->hash_tbl_segments[i]) { 2039 2039 printk(KERN_ERR PFX "hash segment alloc failed\n"); 2040 2040 goto cleanup_dma; 2041 2041 } 2042 2042 } 2043 2043 2044 - hba->hash_tbl_pbl = dma_zalloc_coherent(&hba->pcidev->dev, PAGE_SIZE, 2045 - &hba->hash_tbl_pbl_dma, 2046 - GFP_KERNEL); 2044 + hba->hash_tbl_pbl = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE, 2045 + &hba->hash_tbl_pbl_dma, 2046 + GFP_KERNEL); 2047 2047 if (!hba->hash_tbl_pbl) { 2048 2048 printk(KERN_ERR PFX "hash table pbl alloc failed\n"); 2049 2049 goto cleanup_dma; ··· 2104 2104 return -ENOMEM; 2105 2105 2106 2106 mem_size = BNX2FC_NUM_MAX_SESS * sizeof(struct regpair); 2107 - hba->t2_hash_tbl_ptr = dma_zalloc_coherent(&hba->pcidev->dev, 2108 - mem_size, 2109 - &hba->t2_hash_tbl_ptr_dma, 2110 - GFP_KERNEL); 2107 + hba->t2_hash_tbl_ptr = dma_alloc_coherent(&hba->pcidev->dev, mem_size, 2108 + &hba->t2_hash_tbl_ptr_dma, 2109 + GFP_KERNEL); 2111 2110 if (!hba->t2_hash_tbl_ptr) { 2112 2111 printk(KERN_ERR PFX "unable to allocate t2 hash table ptr\n"); 2113 2112 bnx2fc_free_fw_resc(hba); ··· 2115 2116 2116 2117 mem_size = BNX2FC_NUM_MAX_SESS * 2117 2118 sizeof(struct fcoe_t2_hash_table_entry); 2118 - hba->t2_hash_tbl = dma_zalloc_coherent(&hba->pcidev->dev, mem_size, 2119 - &hba->t2_hash_tbl_dma, 2120 - GFP_KERNEL); 2119 + hba->t2_hash_tbl = dma_alloc_coherent(&hba->pcidev->dev, mem_size, 2120 + &hba->t2_hash_tbl_dma, 2121 + GFP_KERNEL); 2121 2122 if (!hba->t2_hash_tbl) { 2122 2123 printk(KERN_ERR PFX "unable to allocate t2 hash table\n"); 2123 2124 bnx2fc_free_fw_resc(hba); ··· 2139 2140 return -ENOMEM; 2140 2141 } 2141 2142 2142 - hba->stats_buffer = dma_zalloc_coherent(&hba->pcidev->dev, PAGE_SIZE, 2143 - &hba->stats_buf_dma, 2144 - GFP_KERNEL); 2143 + hba->stats_buffer = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE, 2144 + &hba->stats_buf_dma, 2145 + GFP_KERNEL); 2145 2146 if (!hba->stats_buffer) { 2146 2147 printk(KERN_ERR PFX "unable to alloc Stats Buffer\n"); 2147 2148 bnx2fc_free_fw_resc(hba);
+22 -22
drivers/scsi/bnx2fc/bnx2fc_tgt.c
··· 672 672 tgt->sq_mem_size = (tgt->sq_mem_size + (CNIC_PAGE_SIZE - 1)) & 673 673 CNIC_PAGE_MASK; 674 674 675 - tgt->sq = dma_zalloc_coherent(&hba->pcidev->dev, tgt->sq_mem_size, 676 - &tgt->sq_dma, GFP_KERNEL); 675 + tgt->sq = dma_alloc_coherent(&hba->pcidev->dev, tgt->sq_mem_size, 676 + &tgt->sq_dma, GFP_KERNEL); 677 677 if (!tgt->sq) { 678 678 printk(KERN_ERR PFX "unable to allocate SQ memory %d\n", 679 679 tgt->sq_mem_size); ··· 685 685 tgt->cq_mem_size = (tgt->cq_mem_size + (CNIC_PAGE_SIZE - 1)) & 686 686 CNIC_PAGE_MASK; 687 687 688 - tgt->cq = dma_zalloc_coherent(&hba->pcidev->dev, tgt->cq_mem_size, 689 - &tgt->cq_dma, GFP_KERNEL); 688 + tgt->cq = dma_alloc_coherent(&hba->pcidev->dev, tgt->cq_mem_size, 689 + &tgt->cq_dma, GFP_KERNEL); 690 690 if (!tgt->cq) { 691 691 printk(KERN_ERR PFX "unable to allocate CQ memory %d\n", 692 692 tgt->cq_mem_size); ··· 698 698 tgt->rq_mem_size = (tgt->rq_mem_size + (CNIC_PAGE_SIZE - 1)) & 699 699 CNIC_PAGE_MASK; 700 700 701 - tgt->rq = dma_zalloc_coherent(&hba->pcidev->dev, tgt->rq_mem_size, 702 - &tgt->rq_dma, GFP_KERNEL); 701 + tgt->rq = dma_alloc_coherent(&hba->pcidev->dev, tgt->rq_mem_size, 702 + &tgt->rq_dma, GFP_KERNEL); 703 703 if (!tgt->rq) { 704 704 printk(KERN_ERR PFX "unable to allocate RQ memory %d\n", 705 705 tgt->rq_mem_size); ··· 710 710 tgt->rq_pbl_size = (tgt->rq_pbl_size + (CNIC_PAGE_SIZE - 1)) & 711 711 CNIC_PAGE_MASK; 712 712 713 - tgt->rq_pbl = dma_zalloc_coherent(&hba->pcidev->dev, tgt->rq_pbl_size, 714 - &tgt->rq_pbl_dma, GFP_KERNEL); 713 + tgt->rq_pbl = dma_alloc_coherent(&hba->pcidev->dev, tgt->rq_pbl_size, 714 + &tgt->rq_pbl_dma, GFP_KERNEL); 715 715 if (!tgt->rq_pbl) { 716 716 printk(KERN_ERR PFX "unable to allocate RQ PBL %d\n", 717 717 tgt->rq_pbl_size); ··· 735 735 tgt->xferq_mem_size = (tgt->xferq_mem_size + (CNIC_PAGE_SIZE - 1)) & 736 736 CNIC_PAGE_MASK; 737 737 738 - tgt->xferq = dma_zalloc_coherent(&hba->pcidev->dev, 739 - tgt->xferq_mem_size, &tgt->xferq_dma, 740 - GFP_KERNEL); 738 + tgt->xferq = dma_alloc_coherent(&hba->pcidev->dev, 739 + tgt->xferq_mem_size, &tgt->xferq_dma, 740 + GFP_KERNEL); 741 741 if (!tgt->xferq) { 742 742 printk(KERN_ERR PFX "unable to allocate XFERQ %d\n", 743 743 tgt->xferq_mem_size); ··· 749 749 tgt->confq_mem_size = (tgt->confq_mem_size + (CNIC_PAGE_SIZE - 1)) & 750 750 CNIC_PAGE_MASK; 751 751 752 - tgt->confq = dma_zalloc_coherent(&hba->pcidev->dev, 753 - tgt->confq_mem_size, &tgt->confq_dma, 754 - GFP_KERNEL); 752 + tgt->confq = dma_alloc_coherent(&hba->pcidev->dev, 753 + tgt->confq_mem_size, &tgt->confq_dma, 754 + GFP_KERNEL); 755 755 if (!tgt->confq) { 756 756 printk(KERN_ERR PFX "unable to allocate CONFQ %d\n", 757 757 tgt->confq_mem_size); ··· 763 763 tgt->confq_pbl_size = 764 764 (tgt->confq_pbl_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK; 765 765 766 - tgt->confq_pbl = dma_zalloc_coherent(&hba->pcidev->dev, 767 - tgt->confq_pbl_size, 768 - &tgt->confq_pbl_dma, GFP_KERNEL); 766 + tgt->confq_pbl = dma_alloc_coherent(&hba->pcidev->dev, 767 + tgt->confq_pbl_size, 768 + &tgt->confq_pbl_dma, GFP_KERNEL); 769 769 if (!tgt->confq_pbl) { 770 770 printk(KERN_ERR PFX "unable to allocate CONFQ PBL %d\n", 771 771 tgt->confq_pbl_size); ··· 787 787 /* Allocate and map ConnDB */ 788 788 tgt->conn_db_mem_size = sizeof(struct fcoe_conn_db); 789 789 790 - tgt->conn_db = dma_zalloc_coherent(&hba->pcidev->dev, 791 - tgt->conn_db_mem_size, 792 - &tgt->conn_db_dma, GFP_KERNEL); 790 + tgt->conn_db = dma_alloc_coherent(&hba->pcidev->dev, 791 + tgt->conn_db_mem_size, 792 + &tgt->conn_db_dma, GFP_KERNEL); 793 793 if (!tgt->conn_db) { 794 794 printk(KERN_ERR PFX "unable to allocate conn_db %d\n", 795 795 tgt->conn_db_mem_size); ··· 802 802 tgt->lcq_mem_size = (tgt->lcq_mem_size + (CNIC_PAGE_SIZE - 1)) & 803 803 CNIC_PAGE_MASK; 804 804 805 - tgt->lcq = dma_zalloc_coherent(&hba->pcidev->dev, tgt->lcq_mem_size, 806 - &tgt->lcq_dma, GFP_KERNEL); 805 + tgt->lcq = dma_alloc_coherent(&hba->pcidev->dev, tgt->lcq_mem_size, 806 + &tgt->lcq_dma, GFP_KERNEL); 807 807 808 808 if (!tgt->lcq) { 809 809 printk(KERN_ERR PFX "unable to allocate lcq %d\n",
+4 -4
drivers/scsi/bnx2i/bnx2i_hwi.c
··· 1070 1070 1071 1071 /* Allocate memory area for actual SQ element */ 1072 1072 ep->qp.sq_virt = 1073 - dma_zalloc_coherent(&hba->pcidev->dev, ep->qp.sq_mem_size, 1074 - &ep->qp.sq_phys, GFP_KERNEL); 1073 + dma_alloc_coherent(&hba->pcidev->dev, ep->qp.sq_mem_size, 1074 + &ep->qp.sq_phys, GFP_KERNEL); 1075 1075 if (!ep->qp.sq_virt) { 1076 1076 printk(KERN_ALERT "bnx2i: unable to alloc SQ BD memory %d\n", 1077 1077 ep->qp.sq_mem_size); ··· 1106 1106 1107 1107 /* Allocate memory area for actual CQ element */ 1108 1108 ep->qp.cq_virt = 1109 - dma_zalloc_coherent(&hba->pcidev->dev, ep->qp.cq_mem_size, 1110 - &ep->qp.cq_phys, GFP_KERNEL); 1109 + dma_alloc_coherent(&hba->pcidev->dev, ep->qp.cq_mem_size, 1110 + &ep->qp.cq_phys, GFP_KERNEL); 1111 1111 if (!ep->qp.cq_virt) { 1112 1112 printk(KERN_ALERT "bnx2i: unable to alloc CQ BD memory %d\n", 1113 1113 ep->qp.cq_mem_size);
+2 -2
drivers/scsi/csiostor/csio_wr.c
··· 233 233 234 234 q = wrm->q_arr[free_idx]; 235 235 236 - q->vstart = dma_zalloc_coherent(&hw->pdev->dev, qsz, &q->pstart, 237 - GFP_KERNEL); 236 + q->vstart = dma_alloc_coherent(&hw->pdev->dev, qsz, &q->pstart, 237 + GFP_KERNEL); 238 238 if (!q->vstart) { 239 239 csio_err(hw, 240 240 "Failed to allocate DMA memory for "
+4 -5
drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
··· 1144 1144 } 1145 1145 1146 1146 static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk, 1147 - unsigned int tid, int pg_idx, bool reply) 1147 + unsigned int tid, int pg_idx) 1148 1148 { 1149 1149 struct sk_buff *skb = alloc_wr(sizeof(struct cpl_set_tcb_field), 0, 1150 1150 GFP_KERNEL); ··· 1160 1160 req = (struct cpl_set_tcb_field *)skb->head; 1161 1161 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); 1162 1162 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid)); 1163 - req->reply = V_NO_REPLY(reply ? 0 : 1); 1163 + req->reply = V_NO_REPLY(1); 1164 1164 req->cpu_idx = 0; 1165 1165 req->word = htons(31); 1166 1166 req->mask = cpu_to_be64(0xF0000000); ··· 1177 1177 * @tid: connection id 1178 1178 * @hcrc: header digest enabled 1179 1179 * @dcrc: data digest enabled 1180 - * @reply: request reply from h/w 1181 1180 * set up the iscsi digest settings for a connection identified by tid 1182 1181 */ 1183 1182 static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid, 1184 - int hcrc, int dcrc, int reply) 1183 + int hcrc, int dcrc) 1185 1184 { 1186 1185 struct sk_buff *skb = alloc_wr(sizeof(struct cpl_set_tcb_field), 0, 1187 1186 GFP_KERNEL); ··· 1196 1197 req = (struct cpl_set_tcb_field *)skb->head; 1197 1198 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); 1198 1199 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid)); 1199 - req->reply = V_NO_REPLY(reply ? 0 : 1); 1200 + req->reply = V_NO_REPLY(1); 1200 1201 req->cpu_idx = 0; 1201 1202 req->word = htons(31); 1202 1203 req->mask = cpu_to_be64(0x0F000000);
+20 -8
drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
··· 1548 1548 struct cxgbi_sock *csk; 1549 1549 1550 1550 csk = lookup_tid(t, tid); 1551 - if (!csk) 1551 + if (!csk) { 1552 1552 pr_err("can't find conn. for tid %u.\n", tid); 1553 + return; 1554 + } 1553 1555 1554 1556 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 1555 1557 "csk 0x%p,%u,%lx,%u, status 0x%x.\n", 1556 1558 csk, csk->state, csk->flags, csk->tid, rpl->status); 1557 1559 1558 - if (rpl->status != CPL_ERR_NONE) 1560 + if (rpl->status != CPL_ERR_NONE) { 1559 1561 pr_err("csk 0x%p,%u, SET_TCB_RPL status %u.\n", 1560 1562 csk, tid, rpl->status); 1563 + csk->err = -EINVAL; 1564 + } 1565 + 1566 + complete(&csk->cmpl); 1561 1567 1562 1568 __kfree_skb(skb); 1563 1569 } ··· 1989 1983 } 1990 1984 1991 1985 static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk, unsigned int tid, 1992 - int pg_idx, bool reply) 1986 + int pg_idx) 1993 1987 { 1994 1988 struct sk_buff *skb; 1995 1989 struct cpl_set_tcb_field *req; ··· 2005 1999 req = (struct cpl_set_tcb_field *)skb->head; 2006 2000 INIT_TP_WR(req, csk->tid); 2007 2001 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, csk->tid)); 2008 - req->reply_ctrl = htons(NO_REPLY_V(reply) | QUEUENO_V(csk->rss_qid)); 2002 + req->reply_ctrl = htons(NO_REPLY_V(0) | QUEUENO_V(csk->rss_qid)); 2009 2003 req->word_cookie = htons(0); 2010 2004 req->mask = cpu_to_be64(0x3 << 8); 2011 2005 req->val = cpu_to_be64(pg_idx << 8); ··· 2014 2008 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 2015 2009 "csk 0x%p, tid 0x%x, pg_idx %u.\n", csk, csk->tid, pg_idx); 2016 2010 2011 + reinit_completion(&csk->cmpl); 2017 2012 cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb); 2018 - return 0; 2013 + wait_for_completion(&csk->cmpl); 2014 + 2015 + return csk->err; 2019 2016 } 2020 2017 2021 2018 static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid, 2022 - int hcrc, int dcrc, int reply) 2019 + int hcrc, int dcrc) 2023 2020 { 2024 2021 struct sk_buff *skb; 2025 2022 struct cpl_set_tcb_field *req; ··· 2040 2031 req = (struct cpl_set_tcb_field *)skb->head; 2041 2032 INIT_TP_WR(req, tid); 2042 2033 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid)); 2043 - req->reply_ctrl = htons(NO_REPLY_V(reply) | QUEUENO_V(csk->rss_qid)); 2034 + req->reply_ctrl = htons(NO_REPLY_V(0) | QUEUENO_V(csk->rss_qid)); 2044 2035 req->word_cookie = htons(0); 2045 2036 req->mask = cpu_to_be64(0x3 << 4); 2046 2037 req->val = cpu_to_be64(((hcrc ? ULP_CRC_HEADER : 0) | ··· 2050 2041 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 2051 2042 "csk 0x%p, tid 0x%x, crc %d,%d.\n", csk, csk->tid, hcrc, dcrc); 2052 2043 2044 + reinit_completion(&csk->cmpl); 2053 2045 cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb); 2054 - return 0; 2046 + wait_for_completion(&csk->cmpl); 2047 + 2048 + return csk->err; 2055 2049 } 2056 2050 2057 2051 static struct cxgbi_ppm *cdev2ppm(struct cxgbi_device *cdev)
+4 -3
drivers/scsi/cxgbi/libcxgbi.c
··· 573 573 skb_queue_head_init(&csk->receive_queue); 574 574 skb_queue_head_init(&csk->write_queue); 575 575 timer_setup(&csk->retry_timer, NULL, 0); 576 + init_completion(&csk->cmpl); 576 577 rwlock_init(&csk->callback_lock); 577 578 csk->cdev = cdev; 578 579 csk->flags = 0; ··· 2252 2251 if (!err && conn->hdrdgst_en) 2253 2252 err = csk->cdev->csk_ddp_setup_digest(csk, csk->tid, 2254 2253 conn->hdrdgst_en, 2255 - conn->datadgst_en, 0); 2254 + conn->datadgst_en); 2256 2255 break; 2257 2256 case ISCSI_PARAM_DATADGST_EN: 2258 2257 err = iscsi_set_param(cls_conn, param, buf, buflen); 2259 2258 if (!err && conn->datadgst_en) 2260 2259 err = csk->cdev->csk_ddp_setup_digest(csk, csk->tid, 2261 2260 conn->hdrdgst_en, 2262 - conn->datadgst_en, 0); 2261 + conn->datadgst_en); 2263 2262 break; 2264 2263 case ISCSI_PARAM_MAX_R2T: 2265 2264 return iscsi_tcp_set_max_r2t(conn, buf); ··· 2385 2384 2386 2385 ppm = csk->cdev->cdev2ppm(csk->cdev); 2387 2386 err = csk->cdev->csk_ddp_setup_pgidx(csk, csk->tid, 2388 - ppm->tformat.pgsz_idx_dflt, 0); 2387 + ppm->tformat.pgsz_idx_dflt); 2389 2388 if (err < 0) 2390 2389 return err; 2391 2390
+3 -2
drivers/scsi/cxgbi/libcxgbi.h
··· 149 149 struct sk_buff_head receive_queue; 150 150 struct sk_buff_head write_queue; 151 151 struct timer_list retry_timer; 152 + struct completion cmpl; 152 153 int err; 153 154 rwlock_t callback_lock; 154 155 void *user_data; ··· 491 490 struct cxgbi_ppm *, 492 491 struct cxgbi_task_tag_info *); 493 492 int (*csk_ddp_setup_digest)(struct cxgbi_sock *, 494 - unsigned int, int, int, int); 493 + unsigned int, int, int); 495 494 int (*csk_ddp_setup_pgidx)(struct cxgbi_sock *, 496 - unsigned int, int, bool); 495 + unsigned int, int); 497 496 498 497 void (*csk_release_offload_resources)(struct cxgbi_sock *); 499 498 int (*csk_rx_pdu_ready)(struct cxgbi_sock *, struct sk_buff *);
+6 -6
drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
··· 2507 2507 sha->sas_port[i] = &hisi_hba->port[i].sas_port; 2508 2508 } 2509 2509 2510 + if (hisi_hba->prot_mask) { 2511 + dev_info(dev, "Registering for DIF/DIX prot_mask=0x%x\n", 2512 + prot_mask); 2513 + scsi_host_set_prot(hisi_hba->shost, prot_mask); 2514 + } 2515 + 2510 2516 rc = scsi_add_host(shost, dev); 2511 2517 if (rc) 2512 2518 goto err_out_ha; ··· 2524 2518 rc = hisi_hba->hw->hw_init(hisi_hba); 2525 2519 if (rc) 2526 2520 goto err_out_register_ha; 2527 - 2528 - if (hisi_hba->prot_mask) { 2529 - dev_info(dev, "Registering for DIF/DIX prot_mask=0x%x\n", 2530 - prot_mask); 2531 - scsi_host_set_prot(hisi_hba->shost, prot_mask); 2532 - } 2533 2521 2534 2522 scsi_scan_host(shost); 2535 2523
+7 -7
drivers/scsi/isci/init.c
··· 576 576 shost->max_lun = ~0; 577 577 shost->max_cmd_len = MAX_COMMAND_SIZE; 578 578 579 + /* turn on DIF support */ 580 + scsi_host_set_prot(shost, 581 + SHOST_DIF_TYPE1_PROTECTION | 582 + SHOST_DIF_TYPE2_PROTECTION | 583 + SHOST_DIF_TYPE3_PROTECTION); 584 + scsi_host_set_guard(shost, SHOST_DIX_GUARD_CRC); 585 + 579 586 err = scsi_add_host(shost, &pdev->dev); 580 587 if (err) 581 588 goto err_shost; ··· 670 663 goto err_host_alloc; 671 664 } 672 665 pci_info->hosts[i] = h; 673 - 674 - /* turn on DIF support */ 675 - scsi_host_set_prot(to_shost(h), 676 - SHOST_DIF_TYPE1_PROTECTION | 677 - SHOST_DIF_TYPE2_PROTECTION | 678 - SHOST_DIF_TYPE3_PROTECTION); 679 - scsi_host_set_guard(to_shost(h), SHOST_DIX_GUARD_CRC); 680 666 } 681 667 682 668 err = isci_setup_interrupts(pdev);
+2 -2
drivers/scsi/lpfc/lpfc_bsg.c
··· 2730 2730 INIT_LIST_HEAD(&dmabuf->list); 2731 2731 2732 2732 /* now, allocate dma buffer */ 2733 - dmabuf->virt = dma_zalloc_coherent(&pcidev->dev, BSG_MBOX_SIZE, 2734 - &(dmabuf->phys), GFP_KERNEL); 2733 + dmabuf->virt = dma_alloc_coherent(&pcidev->dev, BSG_MBOX_SIZE, 2734 + &(dmabuf->phys), GFP_KERNEL); 2735 2735 2736 2736 if (!dmabuf->virt) { 2737 2737 kfree(dmabuf);
+7 -7
drivers/scsi/lpfc/lpfc_init.c
··· 6973 6973 if (!dmabuf) 6974 6974 return NULL; 6975 6975 6976 - dmabuf->virt = dma_zalloc_coherent(&phba->pcidev->dev, 6977 - LPFC_HDR_TEMPLATE_SIZE, 6978 - &dmabuf->phys, GFP_KERNEL); 6976 + dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, 6977 + LPFC_HDR_TEMPLATE_SIZE, 6978 + &dmabuf->phys, GFP_KERNEL); 6979 6979 if (!dmabuf->virt) { 6980 6980 rpi_hdr = NULL; 6981 6981 goto err_free_dmabuf; ··· 7397 7397 } 7398 7398 7399 7399 /* Allocate memory for SLI-2 structures */ 7400 - phba->slim2p.virt = dma_zalloc_coherent(&pdev->dev, SLI2_SLIM_SIZE, 7401 - &phba->slim2p.phys, GFP_KERNEL); 7400 + phba->slim2p.virt = dma_alloc_coherent(&pdev->dev, SLI2_SLIM_SIZE, 7401 + &phba->slim2p.phys, GFP_KERNEL); 7402 7402 if (!phba->slim2p.virt) 7403 7403 goto out_iounmap; 7404 7404 ··· 7816 7816 * plus an alignment restriction of 16 bytes. 7817 7817 */ 7818 7818 bmbx_size = sizeof(struct lpfc_bmbx_create) + (LPFC_ALIGN_16_BYTE - 1); 7819 - dmabuf->virt = dma_zalloc_coherent(&phba->pcidev->dev, bmbx_size, 7820 - &dmabuf->phys, GFP_KERNEL); 7819 + dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, bmbx_size, 7820 + &dmabuf->phys, GFP_KERNEL); 7821 7821 if (!dmabuf->virt) { 7822 7822 kfree(dmabuf); 7823 7823 return -ENOMEM;
+3 -3
drivers/scsi/lpfc/lpfc_mbox.c
··· 1827 1827 * page, this is used as a priori size of SLI4_PAGE_SIZE for 1828 1828 * the later DMA memory free. 1829 1829 */ 1830 - viraddr = dma_zalloc_coherent(&phba->pcidev->dev, 1831 - SLI4_PAGE_SIZE, &phyaddr, 1832 - GFP_KERNEL); 1830 + viraddr = dma_alloc_coherent(&phba->pcidev->dev, 1831 + SLI4_PAGE_SIZE, &phyaddr, 1832 + GFP_KERNEL); 1833 1833 /* In case of malloc fails, proceed with whatever we have */ 1834 1834 if (!viraddr) 1835 1835 break;
+18 -17
drivers/scsi/lpfc/lpfc_sli.c
··· 5362 5362 * mailbox command. 5363 5363 */ 5364 5364 dma_size = *vpd_size; 5365 - dmabuf->virt = dma_zalloc_coherent(&phba->pcidev->dev, dma_size, 5366 - &dmabuf->phys, GFP_KERNEL); 5365 + dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, dma_size, 5366 + &dmabuf->phys, GFP_KERNEL); 5367 5367 if (!dmabuf->virt) { 5368 5368 kfree(dmabuf); 5369 5369 return -ENOMEM; ··· 6300 6300 goto free_mem; 6301 6301 } 6302 6302 6303 - dmabuf->virt = dma_zalloc_coherent(&phba->pcidev->dev, 6303 + dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, 6304 6304 LPFC_RAS_MAX_ENTRY_SIZE, 6305 - &dmabuf->phys, 6306 - GFP_KERNEL); 6305 + &dmabuf->phys, GFP_KERNEL); 6307 6306 if (!dmabuf->virt) { 6308 6307 kfree(dmabuf); 6309 6308 rc = -ENOMEM; ··· 9407 9408 cmnd = CMD_XMIT_SEQUENCE64_CR; 9408 9409 if (phba->link_flag & LS_LOOPBACK_MODE) 9409 9410 bf_set(wqe_xo, &wqe->xmit_sequence.wge_ctl, 1); 9411 + /* fall through */ 9410 9412 case CMD_XMIT_SEQUENCE64_CR: 9411 9413 /* word3 iocb=io_tag32 wqe=reserved */ 9412 9414 wqe->xmit_sequence.rsvd3 = 0; ··· 13529 13529 case FC_STATUS_RQ_BUF_LEN_EXCEEDED: 13530 13530 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 13531 13531 "2537 Receive Frame Truncated!!\n"); 13532 + /* fall through */ 13532 13533 case FC_STATUS_RQ_SUCCESS: 13533 13534 spin_lock_irqsave(&phba->hbalock, iflags); 13534 13535 lpfc_sli4_rq_release(hrq, drq); ··· 13939 13938 case FC_STATUS_RQ_BUF_LEN_EXCEEDED: 13940 13939 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 13941 13940 "6126 Receive Frame Truncated!!\n"); 13942 - /* Drop thru */ 13941 + /* fall through */ 13943 13942 case FC_STATUS_RQ_SUCCESS: 13944 13943 spin_lock_irqsave(&phba->hbalock, iflags); 13945 13944 lpfc_sli4_rq_release(hrq, drq); ··· 14614 14613 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 14615 14614 if (!dmabuf) 14616 14615 goto out_fail; 14617 - dmabuf->virt = dma_zalloc_coherent(&phba->pcidev->dev, 14618 - hw_page_size, &dmabuf->phys, 14619 - GFP_KERNEL); 14616 + dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, 14617 + hw_page_size, &dmabuf->phys, 14618 + GFP_KERNEL); 14620 14619 if (!dmabuf->virt) { 14621 14620 kfree(dmabuf); 14622 14621 goto out_fail; ··· 14851 14850 eq->entry_count); 14852 14851 if (eq->entry_count < 256) 14853 14852 return -EINVAL; 14854 - /* otherwise default to smallest count (drop through) */ 14853 + /* fall through - otherwise default to smallest count */ 14855 14854 case 256: 14856 14855 bf_set(lpfc_eq_context_count, &eq_create->u.request.context, 14857 14856 LPFC_EQ_CNT_256); ··· 14982 14981 LPFC_CQ_CNT_WORD7); 14983 14982 break; 14984 14983 } 14985 - /* Fall Thru */ 14984 + /* fall through */ 14986 14985 default: 14987 14986 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 14988 14987 "0361 Unsupported CQ count: " ··· 14993 14992 status = -EINVAL; 14994 14993 goto out; 14995 14994 } 14996 - /* otherwise default to smallest count (drop through) */ 14995 + /* fall through - otherwise default to smallest count */ 14997 14996 case 256: 14998 14997 bf_set(lpfc_cq_context_count, &cq_create->u.request.context, 14999 14998 LPFC_CQ_CNT_256); ··· 15153 15152 LPFC_CQ_CNT_WORD7); 15154 15153 break; 15155 15154 } 15156 - /* Fall Thru */ 15155 + /* fall through */ 15157 15156 default: 15158 15157 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 15159 15158 "3118 Bad CQ count. (%d)\n", ··· 15162 15161 status = -EINVAL; 15163 15162 goto out; 15164 15163 } 15165 - /* otherwise default to smallest (drop thru) */ 15164 + /* fall through - otherwise default to smallest */ 15166 15165 case 256: 15167 15166 bf_set(lpfc_mbx_cq_create_set_cqe_cnt, 15168 15167 &cq_set->u.request, LPFC_CQ_CNT_256); ··· 15434 15433 status = -EINVAL; 15435 15434 goto out; 15436 15435 } 15437 - /* otherwise default to smallest count (drop through) */ 15436 + /* fall through - otherwise default to smallest count */ 15438 15437 case 16: 15439 15438 bf_set(lpfc_mq_context_ring_size, 15440 15439 &mq_create_ext->u.request.context, ··· 15853 15852 status = -EINVAL; 15854 15853 goto out; 15855 15854 } 15856 - /* otherwise default to smallest count (drop through) */ 15855 + /* fall through - otherwise default to smallest count */ 15857 15856 case 512: 15858 15857 bf_set(lpfc_rq_context_rqe_count, 15859 15858 &rq_create->u.request.context, ··· 15990 15989 status = -EINVAL; 15991 15990 goto out; 15992 15991 } 15993 - /* otherwise default to smallest count (drop through) */ 15992 + /* fall through - otherwise default to smallest count */ 15994 15993 case 512: 15995 15994 bf_set(lpfc_rq_context_rqe_count, 15996 15995 &rq_create->u.request.context,
+8 -7
drivers/scsi/megaraid/megaraid_mbox.c
··· 967 967 * Allocate the common 16-byte aligned memory for the handshake 968 968 * mailbox. 969 969 */ 970 - raid_dev->una_mbox64 = dma_zalloc_coherent(&adapter->pdev->dev, 971 - sizeof(mbox64_t), &raid_dev->una_mbox64_dma, 972 - GFP_KERNEL); 970 + raid_dev->una_mbox64 = dma_alloc_coherent(&adapter->pdev->dev, 971 + sizeof(mbox64_t), 972 + &raid_dev->una_mbox64_dma, 973 + GFP_KERNEL); 973 974 974 975 if (!raid_dev->una_mbox64) { 975 976 con_log(CL_ANN, (KERN_WARNING ··· 996 995 align; 997 996 998 997 // Allocate memory for commands issued internally 999 - adapter->ibuf = dma_zalloc_coherent(&pdev->dev, MBOX_IBUF_SIZE, 1000 - &adapter->ibuf_dma_h, GFP_KERNEL); 998 + adapter->ibuf = dma_alloc_coherent(&pdev->dev, MBOX_IBUF_SIZE, 999 + &adapter->ibuf_dma_h, GFP_KERNEL); 1001 1000 if (!adapter->ibuf) { 1002 1001 1003 1002 con_log(CL_ANN, (KERN_WARNING ··· 2898 2897 * Issue an ENQUIRY3 command to find out certain adapter parameters, 2899 2898 * e.g., max channels, max commands etc. 2900 2899 */ 2901 - pinfo = dma_zalloc_coherent(&adapter->pdev->dev, sizeof(mraid_pinfo_t), 2902 - &pinfo_dma_h, GFP_KERNEL); 2900 + pinfo = dma_alloc_coherent(&adapter->pdev->dev, sizeof(mraid_pinfo_t), 2901 + &pinfo_dma_h, GFP_KERNEL); 2903 2902 if (pinfo == NULL) { 2904 2903 con_log(CL_ANN, (KERN_WARNING 2905 2904 "megaraid: out of memory, %s %d\n", __func__,
+14 -14
drivers/scsi/megaraid/megaraid_sas_base.c
··· 2273 2273 sizeof(struct MR_LD_VF_AFFILIATION_111)); 2274 2274 else { 2275 2275 new_affiliation_111 = 2276 - dma_zalloc_coherent(&instance->pdev->dev, 2277 - sizeof(struct MR_LD_VF_AFFILIATION_111), 2278 - &new_affiliation_111_h, GFP_KERNEL); 2276 + dma_alloc_coherent(&instance->pdev->dev, 2277 + sizeof(struct MR_LD_VF_AFFILIATION_111), 2278 + &new_affiliation_111_h, GFP_KERNEL); 2279 2279 if (!new_affiliation_111) { 2280 2280 dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate " 2281 2281 "memory for new affiliation for scsi%d\n", ··· 2380 2380 sizeof(struct MR_LD_VF_AFFILIATION)); 2381 2381 else { 2382 2382 new_affiliation = 2383 - dma_zalloc_coherent(&instance->pdev->dev, 2384 - (MAX_LOGICAL_DRIVES + 1) * 2385 - sizeof(struct MR_LD_VF_AFFILIATION), 2386 - &new_affiliation_h, GFP_KERNEL); 2383 + dma_alloc_coherent(&instance->pdev->dev, 2384 + (MAX_LOGICAL_DRIVES + 1) * sizeof(struct MR_LD_VF_AFFILIATION), 2385 + &new_affiliation_h, GFP_KERNEL); 2387 2386 if (!new_affiliation) { 2388 2387 dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate " 2389 2388 "memory for new affiliation for scsi%d\n", ··· 2545 2546 2546 2547 if (initial) { 2547 2548 instance->hb_host_mem = 2548 - dma_zalloc_coherent(&instance->pdev->dev, 2549 - sizeof(struct MR_CTRL_HB_HOST_MEM), 2550 - &instance->hb_host_mem_h, GFP_KERNEL); 2549 + dma_alloc_coherent(&instance->pdev->dev, 2550 + sizeof(struct MR_CTRL_HB_HOST_MEM), 2551 + &instance->hb_host_mem_h, 2552 + GFP_KERNEL); 2551 2553 if (!instance->hb_host_mem) { 2552 2554 dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate" 2553 2555 " memory for heartbeat host memory for scsi%d\n", ··· 5816 5816 } 5817 5817 5818 5818 dcmd = &cmd->frame->dcmd; 5819 - el_info = dma_zalloc_coherent(&instance->pdev->dev, 5820 - sizeof(struct megasas_evt_log_info), &el_info_h, 5821 - GFP_KERNEL); 5819 + el_info = dma_alloc_coherent(&instance->pdev->dev, 5820 + sizeof(struct megasas_evt_log_info), 5821 + &el_info_h, GFP_KERNEL); 5822 5822 if (!el_info) { 5823 5823 megasas_return_cmd(instance, cmd); 5824 5824 return -ENOMEM; ··· 6236 6236 instance->consistent_mask_64bit = true; 6237 6237 6238 6238 dev_info(&pdev->dev, "%s bit DMA mask and %s bit consistent mask\n", 6239 - ((*pdev->dev.dma_mask == DMA_BIT_MASK(64)) ? "63" : "32"), 6239 + ((*pdev->dev.dma_mask == DMA_BIT_MASK(63)) ? "63" : "32"), 6240 6240 (instance->consistent_mask_64bit ? "63" : "32")); 6241 6241 6242 6242 return 0;
+5 -3
drivers/scsi/megaraid/megaraid_sas_fusion.c
··· 175 175 /* 176 176 * Check if it is our interrupt 177 177 */ 178 - status = readl(&regs->outbound_intr_status); 178 + status = megasas_readl(instance, 179 + &regs->outbound_intr_status); 179 180 180 181 if (status & 1) { 181 182 writel(status, &regs->outbound_intr_status); ··· 690 689 array_size = sizeof(struct MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY) * 691 690 MAX_MSIX_QUEUES_FUSION; 692 691 693 - fusion->rdpq_virt = dma_zalloc_coherent(&instance->pdev->dev, 694 - array_size, &fusion->rdpq_phys, GFP_KERNEL); 692 + fusion->rdpq_virt = dma_alloc_coherent(&instance->pdev->dev, 693 + array_size, &fusion->rdpq_phys, 694 + GFP_KERNEL); 695 695 if (!fusion->rdpq_virt) { 696 696 dev_err(&instance->pdev->dev, 697 697 "Failed from %s %d\n", __func__, __LINE__);
+3 -2
drivers/scsi/mesh.c
··· 1915 1915 /* We use the PCI APIs for now until the generic one gets fixed 1916 1916 * enough or until we get some macio-specific versions 1917 1917 */ 1918 - dma_cmd_space = dma_zalloc_coherent(&macio_get_pci_dev(mdev)->dev, 1919 - ms->dma_cmd_size, &dma_cmd_bus, GFP_KERNEL); 1918 + dma_cmd_space = dma_alloc_coherent(&macio_get_pci_dev(mdev)->dev, 1919 + ms->dma_cmd_size, &dma_cmd_bus, 1920 + GFP_KERNEL); 1920 1921 if (dma_cmd_space == NULL) { 1921 1922 printk(KERN_ERR "mesh: can't allocate DMA table\n"); 1922 1923 goto out_unmap;
+5 -4
drivers/scsi/mvumi.c
··· 143 143 144 144 case RESOURCE_UNCACHED_MEMORY: 145 145 size = round_up(size, 8); 146 - res->virt_addr = dma_zalloc_coherent(&mhba->pdev->dev, size, 147 - &res->bus_addr, GFP_KERNEL); 146 + res->virt_addr = dma_alloc_coherent(&mhba->pdev->dev, size, 147 + &res->bus_addr, 148 + GFP_KERNEL); 148 149 if (!res->virt_addr) { 149 150 dev_err(&mhba->pdev->dev, 150 151 "unable to allocate consistent mem," ··· 247 246 if (size == 0) 248 247 return 0; 249 248 250 - virt_addr = dma_zalloc_coherent(&mhba->pdev->dev, size, &phy_addr, 251 - GFP_KERNEL); 249 + virt_addr = dma_alloc_coherent(&mhba->pdev->dev, size, &phy_addr, 250 + GFP_KERNEL); 252 251 if (!virt_addr) 253 252 return -1; 254 253
+3 -3
drivers/scsi/pm8001/pm8001_sas.c
··· 116 116 u64 align_offset = 0; 117 117 if (align) 118 118 align_offset = (dma_addr_t)align - 1; 119 - mem_virt_alloc = dma_zalloc_coherent(&pdev->dev, mem_size + align, 120 - &mem_dma_handle, GFP_KERNEL); 119 + mem_virt_alloc = dma_alloc_coherent(&pdev->dev, mem_size + align, 120 + &mem_dma_handle, GFP_KERNEL); 121 121 if (!mem_virt_alloc) { 122 122 pm8001_printk("memory allocation error\n"); 123 123 return -1; ··· 657 657 if (dev->dev_type == SAS_SATA_DEV) { 658 658 pm8001_device->attached_phy = 659 659 dev->rphy->identify.phy_identifier; 660 - flag = 1; /* directly sata*/ 660 + flag = 1; /* directly sata */ 661 661 } 662 662 } /*register this device to HBA*/ 663 663 PM8001_DISC_DBG(pm8001_ha, pm8001_printk("Found device\n"));
+17 -12
drivers/scsi/qedf/qedf_main.c
··· 1050 1050 sizeof(void *); 1051 1051 fcport->sq_pbl_size = fcport->sq_pbl_size + QEDF_PAGE_SIZE; 1052 1052 1053 - fcport->sq = dma_zalloc_coherent(&qedf->pdev->dev, 1054 - fcport->sq_mem_size, &fcport->sq_dma, GFP_KERNEL); 1053 + fcport->sq = dma_alloc_coherent(&qedf->pdev->dev, fcport->sq_mem_size, 1054 + &fcport->sq_dma, GFP_KERNEL); 1055 1055 if (!fcport->sq) { 1056 1056 QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate send queue.\n"); 1057 1057 rval = 1; 1058 1058 goto out; 1059 1059 } 1060 1060 1061 - fcport->sq_pbl = dma_zalloc_coherent(&qedf->pdev->dev, 1062 - fcport->sq_pbl_size, &fcport->sq_pbl_dma, GFP_KERNEL); 1061 + fcport->sq_pbl = dma_alloc_coherent(&qedf->pdev->dev, 1062 + fcport->sq_pbl_size, 1063 + &fcport->sq_pbl_dma, GFP_KERNEL); 1063 1064 if (!fcport->sq_pbl) { 1064 1065 QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate send queue PBL.\n"); 1065 1066 rval = 1; ··· 2681 2680 } 2682 2681 2683 2682 /* Allocate list of PBL pages */ 2684 - qedf->bdq_pbl_list = dma_zalloc_coherent(&qedf->pdev->dev, 2685 - QEDF_PAGE_SIZE, &qedf->bdq_pbl_list_dma, GFP_KERNEL); 2683 + qedf->bdq_pbl_list = dma_alloc_coherent(&qedf->pdev->dev, 2684 + QEDF_PAGE_SIZE, 2685 + &qedf->bdq_pbl_list_dma, 2686 + GFP_KERNEL); 2686 2687 if (!qedf->bdq_pbl_list) { 2687 2688 QEDF_ERR(&(qedf->dbg_ctx), "Could not allocate list of PBL pages.\n"); 2688 2689 return -ENOMEM; ··· 2773 2770 ALIGN(qedf->global_queues[i]->cq_pbl_size, QEDF_PAGE_SIZE); 2774 2771 2775 2772 qedf->global_queues[i]->cq = 2776 - dma_zalloc_coherent(&qedf->pdev->dev, 2777 - qedf->global_queues[i]->cq_mem_size, 2778 - &qedf->global_queues[i]->cq_dma, GFP_KERNEL); 2773 + dma_alloc_coherent(&qedf->pdev->dev, 2774 + qedf->global_queues[i]->cq_mem_size, 2775 + &qedf->global_queues[i]->cq_dma, 2776 + GFP_KERNEL); 2779 2777 2780 2778 if (!qedf->global_queues[i]->cq) { 2781 2779 QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate cq.\n"); ··· 2785 2781 } 2786 2782 2787 2783 qedf->global_queues[i]->cq_pbl = 2788 - dma_zalloc_coherent(&qedf->pdev->dev, 2789 - qedf->global_queues[i]->cq_pbl_size, 2790 - &qedf->global_queues[i]->cq_pbl_dma, GFP_KERNEL); 2784 + dma_alloc_coherent(&qedf->pdev->dev, 2785 + qedf->global_queues[i]->cq_pbl_size, 2786 + &qedf->global_queues[i]->cq_pbl_dma, 2787 + GFP_KERNEL); 2791 2788 2792 2789 if (!qedf->global_queues[i]->cq_pbl) { 2793 2790 QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate cq PBL.\n");
+3
drivers/scsi/qedi/qedi_iscsi.c
··· 953 953 954 954 qedi_ep = ep->dd_data; 955 955 if (qedi_ep->state == EP_STATE_IDLE || 956 + qedi_ep->state == EP_STATE_OFLDCONN_NONE || 956 957 qedi_ep->state == EP_STATE_OFLDCONN_FAILED) 957 958 return -1; 958 959 ··· 1036 1035 1037 1036 switch (qedi_ep->state) { 1038 1037 case EP_STATE_OFLDCONN_START: 1038 + case EP_STATE_OFLDCONN_NONE: 1039 1039 goto ep_release_conn; 1040 1040 case EP_STATE_OFLDCONN_FAILED: 1041 1041 break; ··· 1227 1225 1228 1226 if (!is_valid_ether_addr(&path_data->mac_addr[0])) { 1229 1227 QEDI_NOTICE(&qedi->dbg_ctx, "dst mac NOT VALID\n"); 1228 + qedi_ep->state = EP_STATE_OFLDCONN_NONE; 1230 1229 ret = -EIO; 1231 1230 goto set_path_exit; 1232 1231 }
+1
drivers/scsi/qedi/qedi_iscsi.h
··· 59 59 EP_STATE_OFLDCONN_FAILED = 0x2000, 60 60 EP_STATE_CONNECT_FAILED = 0x4000, 61 61 EP_STATE_DISCONN_TIMEDOUT = 0x8000, 62 + EP_STATE_OFLDCONN_NONE = 0x10000, 62 63 }; 63 64 64 65 struct qedi_conn;
+19 -20
drivers/scsi/qedi/qedi_main.c
··· 1394 1394 { 1395 1395 struct qedi_nvm_iscsi_image nvm_image; 1396 1396 1397 - qedi->iscsi_image = dma_zalloc_coherent(&qedi->pdev->dev, 1398 - sizeof(nvm_image), 1399 - &qedi->nvm_buf_dma, 1400 - GFP_KERNEL); 1397 + qedi->iscsi_image = dma_alloc_coherent(&qedi->pdev->dev, 1398 + sizeof(nvm_image), 1399 + &qedi->nvm_buf_dma, GFP_KERNEL); 1401 1400 if (!qedi->iscsi_image) { 1402 1401 QEDI_ERR(&qedi->dbg_ctx, "Could not allocate NVM BUF.\n"); 1403 1402 return -ENOMEM; ··· 1509 1510 } 1510 1511 1511 1512 /* Allocate list of PBL pages */ 1512 - qedi->bdq_pbl_list = dma_zalloc_coherent(&qedi->pdev->dev, 1513 - QEDI_PAGE_SIZE, 1514 - &qedi->bdq_pbl_list_dma, 1515 - GFP_KERNEL); 1513 + qedi->bdq_pbl_list = dma_alloc_coherent(&qedi->pdev->dev, 1514 + QEDI_PAGE_SIZE, 1515 + &qedi->bdq_pbl_list_dma, 1516 + GFP_KERNEL); 1516 1517 if (!qedi->bdq_pbl_list) { 1517 1518 QEDI_ERR(&qedi->dbg_ctx, 1518 1519 "Could not allocate list of PBL pages.\n"); ··· 1608 1609 (qedi->global_queues[i]->cq_pbl_size + 1609 1610 (QEDI_PAGE_SIZE - 1)); 1610 1611 1611 - qedi->global_queues[i]->cq = dma_zalloc_coherent(&qedi->pdev->dev, 1612 - qedi->global_queues[i]->cq_mem_size, 1613 - &qedi->global_queues[i]->cq_dma, 1614 - GFP_KERNEL); 1612 + qedi->global_queues[i]->cq = dma_alloc_coherent(&qedi->pdev->dev, 1613 + qedi->global_queues[i]->cq_mem_size, 1614 + &qedi->global_queues[i]->cq_dma, 1615 + GFP_KERNEL); 1615 1616 1616 1617 if (!qedi->global_queues[i]->cq) { 1617 1618 QEDI_WARN(&qedi->dbg_ctx, ··· 1619 1620 status = -ENOMEM; 1620 1621 goto mem_alloc_failure; 1621 1622 } 1622 - qedi->global_queues[i]->cq_pbl = dma_zalloc_coherent(&qedi->pdev->dev, 1623 - qedi->global_queues[i]->cq_pbl_size, 1624 - &qedi->global_queues[i]->cq_pbl_dma, 1625 - GFP_KERNEL); 1623 + qedi->global_queues[i]->cq_pbl = dma_alloc_coherent(&qedi->pdev->dev, 1624 + qedi->global_queues[i]->cq_pbl_size, 1625 + &qedi->global_queues[i]->cq_pbl_dma, 1626 + GFP_KERNEL); 1626 1627 1627 1628 if (!qedi->global_queues[i]->cq_pbl) { 1628 1629 QEDI_WARN(&qedi->dbg_ctx, ··· 1690 1691 ep->sq_pbl_size = (ep->sq_mem_size / QEDI_PAGE_SIZE) * sizeof(void *); 1691 1692 ep->sq_pbl_size = ep->sq_pbl_size + QEDI_PAGE_SIZE; 1692 1693 1693 - ep->sq = dma_zalloc_coherent(&qedi->pdev->dev, ep->sq_mem_size, 1694 - &ep->sq_dma, GFP_KERNEL); 1694 + ep->sq = dma_alloc_coherent(&qedi->pdev->dev, ep->sq_mem_size, 1695 + &ep->sq_dma, GFP_KERNEL); 1695 1696 if (!ep->sq) { 1696 1697 QEDI_WARN(&qedi->dbg_ctx, 1697 1698 "Could not allocate send queue.\n"); 1698 1699 rval = -ENOMEM; 1699 1700 goto out; 1700 1701 } 1701 - ep->sq_pbl = dma_zalloc_coherent(&qedi->pdev->dev, ep->sq_pbl_size, 1702 - &ep->sq_pbl_dma, GFP_KERNEL); 1702 + ep->sq_pbl = dma_alloc_coherent(&qedi->pdev->dev, ep->sq_pbl_size, 1703 + &ep->sq_pbl_dma, GFP_KERNEL); 1703 1704 if (!ep->sq_pbl) { 1704 1705 QEDI_WARN(&qedi->dbg_ctx, 1705 1706 "Could not allocate send queue PBL.\n");
+1 -1
drivers/scsi/qla1280.c
··· 4248 4248 ha->devnum = devnum; /* specifies microcode load address */ 4249 4249 4250 4250 #ifdef QLA_64BIT_PTR 4251 - if (dma_set_mask(&ha->pdev->dev, DMA_BIT_MASK(64))) { 4251 + if (dma_set_mask_and_coherent(&ha->pdev->dev, DMA_BIT_MASK(64))) { 4252 4252 if (dma_set_mask(&ha->pdev->dev, DMA_BIT_MASK(32))) { 4253 4253 printk(KERN_WARNING "scsi(%li): Unable to set a " 4254 4254 "suitable DMA mask - aborting\n", ha->host_no);
+2 -2
drivers/scsi/qla2xxx/qla_attr.c
··· 2415 2415 if (qla2x00_chip_is_down(vha)) 2416 2416 goto done; 2417 2417 2418 - stats = dma_zalloc_coherent(&ha->pdev->dev, sizeof(*stats), 2419 - &stats_dma, GFP_KERNEL); 2418 + stats = dma_alloc_coherent(&ha->pdev->dev, sizeof(*stats), &stats_dma, 2419 + GFP_KERNEL); 2420 2420 if (!stats) { 2421 2421 ql_log(ql_log_warn, vha, 0x707d, 2422 2422 "Failed to allocate memory for stats.\n");
+2 -2
drivers/scsi/qla2xxx/qla_bsg.c
··· 2312 2312 if (!IS_FWI2_CAPABLE(ha)) 2313 2313 return -EPERM; 2314 2314 2315 - stats = dma_zalloc_coherent(&ha->pdev->dev, sizeof(*stats), 2316 - &stats_dma, GFP_KERNEL); 2315 + stats = dma_alloc_coherent(&ha->pdev->dev, sizeof(*stats), &stats_dma, 2316 + GFP_KERNEL); 2317 2317 if (!stats) { 2318 2318 ql_log(ql_log_warn, vha, 0x70e2, 2319 2319 "Failed to allocate memory for stats.\n");
+2
drivers/scsi/qla2xxx/qla_def.h
··· 4394 4394 uint16_t n2n_id; 4395 4395 struct list_head gpnid_list; 4396 4396 struct fab_scan scan; 4397 + 4398 + unsigned int irq_offset; 4397 4399 } scsi_qla_host_t; 4398 4400 4399 4401 struct qla27xx_image_status {
+8 -6
drivers/scsi/qla2xxx/qla_gs.c
··· 4147 4147 return rval; 4148 4148 } 4149 4149 4150 - sp->u.iocb_cmd.u.ctarg.req = dma_zalloc_coherent( 4151 - &vha->hw->pdev->dev, sizeof(struct ct_sns_pkt), 4152 - &sp->u.iocb_cmd.u.ctarg.req_dma, GFP_KERNEL); 4150 + sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev, 4151 + sizeof(struct ct_sns_pkt), 4152 + &sp->u.iocb_cmd.u.ctarg.req_dma, 4153 + GFP_KERNEL); 4153 4154 sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt); 4154 4155 if (!sp->u.iocb_cmd.u.ctarg.req) { 4155 4156 ql_log(ql_log_warn, vha, 0xffff, ··· 4166 4165 ((vha->hw->max_fibre_devices - 1) * 4167 4166 sizeof(struct ct_sns_gpn_ft_data)); 4168 4167 4169 - sp->u.iocb_cmd.u.ctarg.rsp = dma_zalloc_coherent( 4170 - &vha->hw->pdev->dev, rspsz, 4171 - &sp->u.iocb_cmd.u.ctarg.rsp_dma, GFP_KERNEL); 4168 + sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev, 4169 + rspsz, 4170 + &sp->u.iocb_cmd.u.ctarg.rsp_dma, 4171 + GFP_KERNEL); 4172 4172 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = sizeof(struct ct_sns_pkt); 4173 4173 if (!sp->u.iocb_cmd.u.ctarg.rsp) { 4174 4174 ql_log(ql_log_warn, vha, 0xffff,
+4 -4
drivers/scsi/qla2xxx/qla_init.c
··· 3099 3099 FCE_SIZE, ha->fce, ha->fce_dma); 3100 3100 3101 3101 /* Allocate memory for Fibre Channel Event Buffer. */ 3102 - tc = dma_zalloc_coherent(&ha->pdev->dev, FCE_SIZE, &tc_dma, 3103 - GFP_KERNEL); 3102 + tc = dma_alloc_coherent(&ha->pdev->dev, FCE_SIZE, &tc_dma, 3103 + GFP_KERNEL); 3104 3104 if (!tc) { 3105 3105 ql_log(ql_log_warn, vha, 0x00be, 3106 3106 "Unable to allocate (%d KB) for FCE.\n", ··· 3131 3131 EFT_SIZE, ha->eft, ha->eft_dma); 3132 3132 3133 3133 /* Allocate memory for Extended Trace Buffer. */ 3134 - tc = dma_zalloc_coherent(&ha->pdev->dev, EFT_SIZE, &tc_dma, 3135 - GFP_KERNEL); 3134 + tc = dma_alloc_coherent(&ha->pdev->dev, EFT_SIZE, &tc_dma, 3135 + GFP_KERNEL); 3136 3136 if (!tc) { 3137 3137 ql_log(ql_log_warn, vha, 0x00c1, 3138 3138 "Unable to allocate (%d KB) for EFT.\n",
+1
drivers/scsi/qla2xxx/qla_isr.c
··· 3446 3446 "Adjusted Max no of queues pairs: %d.\n", ha->max_qpairs); 3447 3447 } 3448 3448 } 3449 + vha->irq_offset = desc.pre_vectors; 3449 3450 ha->msix_entries = kcalloc(ha->msix_count, 3450 3451 sizeof(struct qla_msix_entry), 3451 3452 GFP_KERNEL);
+1 -1
drivers/scsi/qla2xxx/qla_os.c
··· 6939 6939 if (USER_CTRL_IRQ(vha->hw)) 6940 6940 rc = blk_mq_map_queues(qmap); 6941 6941 else 6942 - rc = blk_mq_pci_map_queues(qmap, vha->hw->pdev, 0); 6942 + rc = blk_mq_pci_map_queues(qmap, vha->hw->pdev, vha->irq_offset); 6943 6943 return rc; 6944 6944 } 6945 6945
+2 -2
drivers/scsi/qla4xxx/ql4_init.c
··· 153 153 dma_addr_t sys_info_dma; 154 154 int status = QLA_ERROR; 155 155 156 - sys_info = dma_zalloc_coherent(&ha->pdev->dev, sizeof(*sys_info), 157 - &sys_info_dma, GFP_KERNEL); 156 + sys_info = dma_alloc_coherent(&ha->pdev->dev, sizeof(*sys_info), 157 + &sys_info_dma, GFP_KERNEL); 158 158 if (sys_info == NULL) { 159 159 DEBUG2(printk("scsi%ld: %s: Unable to allocate dma buffer.\n", 160 160 ha->host_no, __func__));
+9 -9
drivers/scsi/qla4xxx/ql4_mbx.c
··· 625 625 uint32_t mbox_sts[MBOX_REG_COUNT]; 626 626 int status = QLA_ERROR; 627 627 628 - init_fw_cb = dma_zalloc_coherent(&ha->pdev->dev, 629 - sizeof(struct addr_ctrl_blk), 630 - &init_fw_cb_dma, GFP_KERNEL); 628 + init_fw_cb = dma_alloc_coherent(&ha->pdev->dev, 629 + sizeof(struct addr_ctrl_blk), 630 + &init_fw_cb_dma, GFP_KERNEL); 631 631 if (init_fw_cb == NULL) { 632 632 DEBUG2(printk("scsi%ld: %s: Unable to alloc init_cb\n", 633 633 ha->host_no, __func__)); ··· 709 709 uint32_t mbox_cmd[MBOX_REG_COUNT]; 710 710 uint32_t mbox_sts[MBOX_REG_COUNT]; 711 711 712 - init_fw_cb = dma_zalloc_coherent(&ha->pdev->dev, 713 - sizeof(struct addr_ctrl_blk), 714 - &init_fw_cb_dma, GFP_KERNEL); 712 + init_fw_cb = dma_alloc_coherent(&ha->pdev->dev, 713 + sizeof(struct addr_ctrl_blk), 714 + &init_fw_cb_dma, GFP_KERNEL); 715 715 if (init_fw_cb == NULL) { 716 716 printk("scsi%ld: %s: Unable to alloc init_cb\n", ha->host_no, 717 717 __func__); ··· 1340 1340 uint32_t mbox_sts[MBOX_REG_COUNT]; 1341 1341 int status = QLA_ERROR; 1342 1342 1343 - about_fw = dma_zalloc_coherent(&ha->pdev->dev, 1344 - sizeof(struct about_fw_info), 1345 - &about_fw_dma, GFP_KERNEL); 1343 + about_fw = dma_alloc_coherent(&ha->pdev->dev, 1344 + sizeof(struct about_fw_info), 1345 + &about_fw_dma, GFP_KERNEL); 1346 1346 if (!about_fw) { 1347 1347 DEBUG2(ql4_printk(KERN_ERR, ha, "%s: Unable to alloc memory " 1348 1348 "for about_fw\n", __func__));
+2 -2
drivers/scsi/qla4xxx/ql4_nx.c
··· 4052 4052 dma_addr_t sys_info_dma; 4053 4053 int status = QLA_ERROR; 4054 4054 4055 - sys_info = dma_zalloc_coherent(&ha->pdev->dev, sizeof(*sys_info), 4056 - &sys_info_dma, GFP_KERNEL); 4055 + sys_info = dma_alloc_coherent(&ha->pdev->dev, sizeof(*sys_info), 4056 + &sys_info_dma, GFP_KERNEL); 4057 4057 if (sys_info == NULL) { 4058 4058 DEBUG2(printk("scsi%ld: %s: Unable to allocate dma buffer.\n", 4059 4059 ha->host_no, __func__));
+7 -5
drivers/scsi/qla4xxx/ql4_os.c
··· 2704 2704 uint32_t rem = len; 2705 2705 struct nlattr *attr; 2706 2706 2707 - init_fw_cb = dma_zalloc_coherent(&ha->pdev->dev, 2708 - sizeof(struct addr_ctrl_blk), 2709 - &init_fw_cb_dma, GFP_KERNEL); 2707 + init_fw_cb = dma_alloc_coherent(&ha->pdev->dev, 2708 + sizeof(struct addr_ctrl_blk), 2709 + &init_fw_cb_dma, GFP_KERNEL); 2710 2710 if (!init_fw_cb) { 2711 2711 ql4_printk(KERN_ERR, ha, "%s: Unable to alloc init_cb\n", 2712 2712 __func__); ··· 4206 4206 sizeof(struct shadow_regs) + 4207 4207 MEM_ALIGN_VALUE + 4208 4208 (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1); 4209 - ha->queues = dma_zalloc_coherent(&ha->pdev->dev, ha->queues_len, 4210 - &ha->queues_dma, GFP_KERNEL); 4209 + ha->queues = dma_alloc_coherent(&ha->pdev->dev, ha->queues_len, 4210 + &ha->queues_dma, GFP_KERNEL); 4211 4211 if (ha->queues == NULL) { 4212 4212 ql4_printk(KERN_WARNING, ha, 4213 4213 "Memory Allocation failed - queues.\n"); ··· 7232 7232 7233 7233 rc = qla4xxx_copy_from_fwddb_param(fnode_sess, fnode_conn, 7234 7234 fw_ddb_entry); 7235 + if (rc) 7236 + goto free_sess; 7235 7237 7236 7238 ql4_printk(KERN_INFO, ha, "%s: sysfs entry %s created\n", 7237 7239 __func__, fnode_sess->dev.kobj.name);
+15 -11
drivers/scsi/scsi_pm.c
··· 80 80 81 81 if (err == 0) { 82 82 pm_runtime_disable(dev); 83 - pm_runtime_set_active(dev); 83 + err = pm_runtime_set_active(dev); 84 84 pm_runtime_enable(dev); 85 + 86 + /* 87 + * Forcibly set runtime PM status of request queue to "active" 88 + * to make sure we can again get requests from the queue 89 + * (see also blk_pm_peek_request()). 90 + * 91 + * The resume hook will correct runtime PM status of the disk. 92 + */ 93 + if (!err && scsi_is_sdev_device(dev)) { 94 + struct scsi_device *sdev = to_scsi_device(dev); 95 + 96 + if (sdev->request_queue->dev) 97 + blk_set_runtime_active(sdev->request_queue); 98 + } 85 99 } 86 100 87 101 return err; ··· 153 139 fn = async_sdev_restore; 154 140 else 155 141 fn = NULL; 156 - 157 - /* 158 - * Forcibly set runtime PM status of request queue to "active" to 159 - * make sure we can again get requests from the queue (see also 160 - * blk_pm_peek_request()). 161 - * 162 - * The resume hook will correct runtime PM status of the disk. 163 - */ 164 - if (scsi_is_sdev_device(dev) && pm_runtime_suspended(dev)) 165 - blk_set_runtime_active(to_scsi_device(dev)->request_queue); 166 142 167 143 if (fn) { 168 144 async_schedule_domain(fn, dev, &scsi_sd_pm_domain);
+6
drivers/scsi/sd.c
··· 206 206 sp = buffer_data[0] & 0x80 ? 1 : 0; 207 207 buffer_data[0] &= ~0x80; 208 208 209 + /* 210 + * Ensure WP, DPOFUA, and RESERVED fields are cleared in 211 + * received mode parameter buffer before doing MODE SELECT. 212 + */ 213 + data.device_specific = 0; 214 + 209 215 if (scsi_mode_select(sdp, 1, sp, 8, buffer_data, len, SD_TIMEOUT, 210 216 SD_MAX_RETRIES, &data, &sshdr)) { 211 217 if (scsi_sense_valid(&sshdr))
+17 -17
drivers/scsi/smartpqi/smartpqi_init.c
··· 323 323 static inline bool pqi_device_in_remove(struct pqi_ctrl_info *ctrl_info, 324 324 struct pqi_scsi_dev *device) 325 325 { 326 - return device->in_remove & !ctrl_info->in_shutdown; 326 + return device->in_remove && !ctrl_info->in_shutdown; 327 327 } 328 328 329 329 static inline void pqi_schedule_rescan_worker_with_delay( ··· 3576 3576 alloc_length += PQI_EXTRA_SGL_MEMORY; 3577 3577 3578 3578 ctrl_info->queue_memory_base = 3579 - dma_zalloc_coherent(&ctrl_info->pci_dev->dev, 3580 - alloc_length, 3581 - &ctrl_info->queue_memory_base_dma_handle, GFP_KERNEL); 3579 + dma_alloc_coherent(&ctrl_info->pci_dev->dev, alloc_length, 3580 + &ctrl_info->queue_memory_base_dma_handle, 3581 + GFP_KERNEL); 3582 3582 3583 3583 if (!ctrl_info->queue_memory_base) 3584 3584 return -ENOMEM; ··· 3715 3715 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT; 3716 3716 3717 3717 ctrl_info->admin_queue_memory_base = 3718 - dma_zalloc_coherent(&ctrl_info->pci_dev->dev, 3719 - alloc_length, 3720 - &ctrl_info->admin_queue_memory_base_dma_handle, 3721 - GFP_KERNEL); 3718 + dma_alloc_coherent(&ctrl_info->pci_dev->dev, alloc_length, 3719 + &ctrl_info->admin_queue_memory_base_dma_handle, 3720 + GFP_KERNEL); 3722 3721 3723 3722 if (!ctrl_info->admin_queue_memory_base) 3724 3723 return -ENOMEM; ··· 4601 4602 4602 4603 static inline int pqi_alloc_error_buffer(struct pqi_ctrl_info *ctrl_info) 4603 4604 { 4604 - ctrl_info->error_buffer = dma_zalloc_coherent(&ctrl_info->pci_dev->dev, 4605 - ctrl_info->error_buffer_length, 4606 - &ctrl_info->error_buffer_dma_handle, GFP_KERNEL); 4605 + ctrl_info->error_buffer = dma_alloc_coherent(&ctrl_info->pci_dev->dev, 4606 + ctrl_info->error_buffer_length, 4607 + &ctrl_info->error_buffer_dma_handle, 4608 + GFP_KERNEL); 4607 4609 4608 4610 if (!ctrl_info->error_buffer) 4609 4611 return -ENOMEM; ··· 7487 7487 dma_addr_t dma_handle; 7488 7488 7489 7489 ctrl_info->pqi_ofa_chunk_virt_addr[i] = 7490 - dma_zalloc_coherent(dev, chunk_size, &dma_handle, 7491 - GFP_KERNEL); 7490 + dma_alloc_coherent(dev, chunk_size, &dma_handle, 7491 + GFP_KERNEL); 7492 7492 7493 7493 if (!ctrl_info->pqi_ofa_chunk_virt_addr[i]) 7494 7494 break; ··· 7545 7545 struct device *dev; 7546 7546 7547 7547 dev = &ctrl_info->pci_dev->dev; 7548 - pqi_ofa_memory = dma_zalloc_coherent(dev, 7549 - PQI_OFA_MEMORY_DESCRIPTOR_LENGTH, 7550 - &ctrl_info->pqi_ofa_mem_dma_handle, 7551 - GFP_KERNEL); 7548 + pqi_ofa_memory = dma_alloc_coherent(dev, 7549 + PQI_OFA_MEMORY_DESCRIPTOR_LENGTH, 7550 + &ctrl_info->pqi_ofa_mem_dma_handle, 7551 + GFP_KERNEL); 7552 7552 7553 7553 if (!pqi_ofa_memory) 7554 7554 return;
+1 -1
drivers/scsi/ufs/ufs.h
··· 195 195 QUERY_DESC_CONFIGURATION_DEF_SIZE = 0x90, 196 196 QUERY_DESC_UNIT_DEF_SIZE = 0x23, 197 197 QUERY_DESC_INTERCONNECT_DEF_SIZE = 0x06, 198 - QUERY_DESC_GEOMETRY_DEF_SIZE = 0x44, 198 + QUERY_DESC_GEOMETRY_DEF_SIZE = 0x48, 199 199 QUERY_DESC_POWER_DEF_SIZE = 0x62, 200 200 QUERY_DESC_HEALTH_DEF_SIZE = 0x25, 201 201 };
+2
drivers/scsi/ufs/ufshcd.c
··· 8001 8001 trace_ufshcd_system_resume(dev_name(hba->dev), ret, 8002 8002 ktime_to_us(ktime_sub(ktime_get(), start)), 8003 8003 hba->curr_dev_pwr_mode, hba->uic_link_state); 8004 + if (!ret) 8005 + hba->is_sys_suspended = false; 8004 8006 return ret; 8005 8007 } 8006 8008 EXPORT_SYMBOL(ufshcd_system_resume);
+1 -1
drivers/soc/fsl/qbman/dpaa_sys.c
··· 62 62 return -ENODEV; 63 63 } 64 64 65 - if (!dma_zalloc_coherent(dev, *size, addr, 0)) { 65 + if (!dma_alloc_coherent(dev, *size, addr, 0)) { 66 66 dev_err(dev, "DMA Alloc memory failed\n"); 67 67 return -ENODEV; 68 68 }
-55
drivers/soc/fsl/qe/qe_tdm.c
··· 44 44 const char *sprop; 45 45 int ret = 0; 46 46 u32 val; 47 - struct resource *res; 48 - struct device_node *np2; 49 - static int siram_init_flag; 50 - struct platform_device *pdev; 51 47 52 48 sprop = of_get_property(np, "fsl,rx-sync-clock", NULL); 53 49 if (sprop) { ··· 120 124 utdm->siram_entry_id = val; 121 125 122 126 set_si_param(utdm, ut_info); 123 - 124 - np2 = of_find_compatible_node(NULL, NULL, "fsl,t1040-qe-si"); 125 - if (!np2) 126 - return -EINVAL; 127 - 128 - pdev = of_find_device_by_node(np2); 129 - if (!pdev) { 130 - pr_err("%pOFn: failed to lookup pdev\n", np2); 131 - of_node_put(np2); 132 - return -EINVAL; 133 - } 134 - 135 - of_node_put(np2); 136 - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 137 - utdm->si_regs = devm_ioremap_resource(&pdev->dev, res); 138 - if (IS_ERR(utdm->si_regs)) { 139 - ret = PTR_ERR(utdm->si_regs); 140 - goto err_miss_siram_property; 141 - } 142 - 143 - np2 = of_find_compatible_node(NULL, NULL, "fsl,t1040-qe-siram"); 144 - if (!np2) { 145 - ret = -EINVAL; 146 - goto err_miss_siram_property; 147 - } 148 - 149 - pdev = of_find_device_by_node(np2); 150 - if (!pdev) { 151 - ret = -EINVAL; 152 - pr_err("%pOFn: failed to lookup pdev\n", np2); 153 - of_node_put(np2); 154 - goto err_miss_siram_property; 155 - } 156 - 157 - of_node_put(np2); 158 - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 159 - utdm->siram = devm_ioremap_resource(&pdev->dev, res); 160 - if (IS_ERR(utdm->siram)) { 161 - ret = PTR_ERR(utdm->siram); 162 - goto err_miss_siram_property; 163 - } 164 - 165 - if (siram_init_flag == 0) { 166 - memset_io(utdm->siram, 0, resource_size(res)); 167 - siram_init_flag = 1; 168 - } 169 - 170 - return ret; 171 - 172 - err_miss_siram_property: 173 - devm_iounmap(&pdev->dev, utdm->si_regs); 174 127 return ret; 175 128 } 176 129 EXPORT_SYMBOL(ucc_of_parse_tdm);
+1 -1
drivers/soc/renesas/Kconfig
··· 44 44 bool 45 45 select ARM_AMBA 46 46 47 - if ARM 47 + if ARM && ARCH_RENESAS 48 48 49 49 #comment "Renesas ARM SoCs System Type" 50 50
+4 -19
drivers/soc/renesas/r8a774c0-sysc.c
··· 28 28 { "3dg-b", 0x100, 1, R8A774C0_PD_3DG_B, R8A774C0_PD_3DG_A }, 29 29 }; 30 30 31 - static void __init rcar_sysc_fix_parent(struct rcar_sysc_area *areas, 32 - unsigned int num_areas, u8 id, 33 - int new_parent) 34 - { 35 - unsigned int i; 36 - 37 - for (i = 0; i < num_areas; i++) 38 - if (areas[i].isr_bit == id) { 39 - areas[i].parent = new_parent; 40 - return; 41 - } 42 - } 43 - 44 31 /* Fixups for RZ/G2E ES1.0 revision */ 45 32 static const struct soc_device_attribute r8a774c0[] __initconst = { 46 33 { .soc_id = "r8a774c0", .revision = "ES1.0" }, ··· 37 50 static int __init r8a774c0_sysc_init(void) 38 51 { 39 52 if (soc_device_match(r8a774c0)) { 40 - rcar_sysc_fix_parent(r8a774c0_areas, 41 - ARRAY_SIZE(r8a774c0_areas), 42 - R8A774C0_PD_3DG_A, R8A774C0_PD_3DG_B); 43 - rcar_sysc_fix_parent(r8a774c0_areas, 44 - ARRAY_SIZE(r8a774c0_areas), 45 - R8A774C0_PD_3DG_B, R8A774C0_PD_ALWAYS_ON); 53 + /* Fix incorrect 3DG hierarchy */ 54 + swap(r8a774c0_areas[6], r8a774c0_areas[7]); 55 + r8a774c0_areas[6].parent = R8A774C0_PD_ALWAYS_ON; 56 + r8a774c0_areas[7].parent = R8A774C0_PD_3DG_B; 46 57 } 47 58 48 59 return 0;
+3 -3
drivers/spi/spi-pic32-sqi.c
··· 466 466 int i; 467 467 468 468 /* allocate coherent DMAable memory for hardware buffer descriptors. */ 469 - sqi->bd = dma_zalloc_coherent(&sqi->master->dev, 470 - sizeof(*bd) * PESQI_BD_COUNT, 471 - &sqi->bd_dma, GFP_KERNEL); 469 + sqi->bd = dma_alloc_coherent(&sqi->master->dev, 470 + sizeof(*bd) * PESQI_BD_COUNT, 471 + &sqi->bd_dma, GFP_KERNEL); 472 472 if (!sqi->bd) { 473 473 dev_err(&sqi->master->dev, "failed allocating dma buffer\n"); 474 474 return -ENOMEM;
+1 -2
drivers/staging/mt7621-eth/mtk_eth_soc.c
··· 1396 1396 if (!ring->tx_buf) 1397 1397 goto no_tx_mem; 1398 1398 1399 - ring->tx_dma = dma_zalloc_coherent(eth->dev, 1400 - ring->tx_ring_size * sz, 1399 + ring->tx_dma = dma_alloc_coherent(eth->dev, ring->tx_ring_size * sz, 1401 1400 &ring->tx_phys, 1402 1401 GFP_ATOMIC | __GFP_ZERO); 1403 1402 if (!ring->tx_dma)
+3 -3
drivers/staging/rtl8188eu/core/rtw_security.c
··· 154 154 155 155 pframe = ((struct xmit_frame *)pxmitframe)->buf_addr + hw_hdr_offset; 156 156 157 - crypto_ops = try_then_request_module(lib80211_get_crypto_ops("WEP"), "lib80211_crypt_wep"); 157 + crypto_ops = lib80211_get_crypto_ops("WEP"); 158 158 159 159 if (!crypto_ops) 160 160 return; ··· 210 210 void *crypto_private = NULL; 211 211 int status = _SUCCESS; 212 212 const int keyindex = prxattrib->key_index; 213 - struct lib80211_crypto_ops *crypto_ops = try_then_request_module(lib80211_get_crypto_ops("WEP"), "lib80211_crypt_wep"); 213 + struct lib80211_crypto_ops *crypto_ops = lib80211_get_crypto_ops("WEP"); 214 214 char iv[4], icv[4]; 215 215 216 216 if (!crypto_ops) { ··· 1291 1291 struct sk_buff *skb = ((struct recv_frame *)precvframe)->pkt; 1292 1292 void *crypto_private = NULL; 1293 1293 u8 *key, *pframe = skb->data; 1294 - struct lib80211_crypto_ops *crypto_ops = try_then_request_module(lib80211_get_crypto_ops("CCMP"), "lib80211_crypt_ccmp"); 1294 + struct lib80211_crypto_ops *crypto_ops = lib80211_get_crypto_ops("CCMP"); 1295 1295 struct security_priv *psecuritypriv = &padapter->securitypriv; 1296 1296 char iv[8], icv[8]; 1297 1297
+1 -1
drivers/staging/rtl8723bs/os_dep/sdio_intf.c
··· 22 22 { SDIO_DEVICE(0x024c, 0xb723), }, 23 23 { /* end: all zeroes */ }, 24 24 }; 25 - static const struct acpi_device_id acpi_ids[] __used = { 25 + static const struct acpi_device_id acpi_ids[] = { 26 26 {"OBDA8723", 0x0000}, 27 27 {} 28 28 };
+2 -4
drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
··· 407 407 /* Allocate enough storage to hold the page pointers and the page 408 408 * list 409 409 */ 410 - pagelist = dma_zalloc_coherent(g_dev, 411 - pagelist_size, 412 - &dma_addr, 413 - GFP_KERNEL); 410 + pagelist = dma_alloc_coherent(g_dev, pagelist_size, &dma_addr, 411 + GFP_KERNEL); 414 412 415 413 vchiq_log_trace(vchiq_arm_log_level, "%s - %pK", __func__, pagelist); 416 414
+6 -13
drivers/staging/vt6655/device_main.c
··· 440 440 void *vir_pool; 441 441 442 442 /*allocate all RD/TD rings a single pool*/ 443 - vir_pool = dma_zalloc_coherent(&priv->pcid->dev, 444 - priv->opts.rx_descs0 * sizeof(struct vnt_rx_desc) + 445 - priv->opts.rx_descs1 * sizeof(struct vnt_rx_desc) + 446 - priv->opts.tx_descs[0] * sizeof(struct vnt_tx_desc) + 447 - priv->opts.tx_descs[1] * sizeof(struct vnt_tx_desc), 448 - &priv->pool_dma, GFP_ATOMIC); 443 + vir_pool = dma_alloc_coherent(&priv->pcid->dev, 444 + priv->opts.rx_descs0 * sizeof(struct vnt_rx_desc) + priv->opts.rx_descs1 * sizeof(struct vnt_rx_desc) + priv->opts.tx_descs[0] * sizeof(struct vnt_tx_desc) + priv->opts.tx_descs[1] * sizeof(struct vnt_tx_desc), 445 + &priv->pool_dma, GFP_ATOMIC); 449 446 if (!vir_pool) { 450 447 dev_err(&priv->pcid->dev, "allocate desc dma memory failed\n"); 451 448 return false; ··· 456 459 priv->rd1_pool_dma = priv->rd0_pool_dma + 457 460 priv->opts.rx_descs0 * sizeof(struct vnt_rx_desc); 458 461 459 - priv->tx0_bufs = dma_zalloc_coherent(&priv->pcid->dev, 460 - priv->opts.tx_descs[0] * PKT_BUF_SZ + 461 - priv->opts.tx_descs[1] * PKT_BUF_SZ + 462 - CB_BEACON_BUF_SIZE + 463 - CB_MAX_BUF_SIZE, 464 - &priv->tx_bufs_dma0, 465 - GFP_ATOMIC); 462 + priv->tx0_bufs = dma_alloc_coherent(&priv->pcid->dev, 463 + priv->opts.tx_descs[0] * PKT_BUF_SZ + priv->opts.tx_descs[1] * PKT_BUF_SZ + CB_BEACON_BUF_SIZE + CB_MAX_BUF_SIZE, 464 + &priv->tx_bufs_dma0, GFP_ATOMIC); 466 465 if (!priv->tx0_bufs) { 467 466 dev_err(&priv->pcid->dev, "allocate buf dma memory failed\n"); 468 467
+1 -1
drivers/target/iscsi/iscsi_target.c
··· 714 714 sizeof(struct iscsi_queue_req), 715 715 __alignof__(struct iscsi_queue_req), 0, NULL); 716 716 if (!lio_qr_cache) { 717 - pr_err("nable to kmem_cache_create() for" 717 + pr_err("Unable to kmem_cache_create() for" 718 718 " lio_qr_cache\n"); 719 719 goto bitmap_out; 720 720 }
+61 -27
drivers/target/target_core_user.c
··· 148 148 size_t ring_size; 149 149 150 150 struct mutex cmdr_lock; 151 - struct list_head cmdr_queue; 151 + struct list_head qfull_queue; 152 152 153 153 uint32_t dbi_max; 154 154 uint32_t dbi_thresh; ··· 159 159 160 160 struct timer_list cmd_timer; 161 161 unsigned int cmd_time_out; 162 + struct list_head inflight_queue; 162 163 163 164 struct timer_list qfull_timer; 164 165 int qfull_time_out; ··· 180 179 struct tcmu_cmd { 181 180 struct se_cmd *se_cmd; 182 181 struct tcmu_dev *tcmu_dev; 183 - struct list_head cmdr_queue_entry; 182 + struct list_head queue_entry; 184 183 185 184 uint16_t cmd_id; 186 185 ··· 193 192 unsigned long deadline; 194 193 195 194 #define TCMU_CMD_BIT_EXPIRED 0 195 + #define TCMU_CMD_BIT_INFLIGHT 1 196 196 unsigned long flags; 197 197 }; 198 198 /* ··· 588 586 if (!tcmu_cmd) 589 587 return NULL; 590 588 591 - INIT_LIST_HEAD(&tcmu_cmd->cmdr_queue_entry); 589 + INIT_LIST_HEAD(&tcmu_cmd->queue_entry); 592 590 tcmu_cmd->se_cmd = se_cmd; 593 591 tcmu_cmd->tcmu_dev = udev; 594 592 ··· 917 915 return 0; 918 916 919 917 tcmu_cmd->deadline = round_jiffies_up(jiffies + msecs_to_jiffies(tmo)); 920 - mod_timer(timer, tcmu_cmd->deadline); 918 + if (!timer_pending(timer)) 919 + mod_timer(timer, tcmu_cmd->deadline); 920 + 921 921 return 0; 922 922 } 923 923 924 - static int add_to_cmdr_queue(struct tcmu_cmd *tcmu_cmd) 924 + static int add_to_qfull_queue(struct tcmu_cmd *tcmu_cmd) 925 925 { 926 926 struct tcmu_dev *udev = tcmu_cmd->tcmu_dev; 927 927 unsigned int tmo; ··· 946 942 if (ret) 947 943 return ret; 948 944 949 - list_add_tail(&tcmu_cmd->cmdr_queue_entry, &udev->cmdr_queue); 945 + list_add_tail(&tcmu_cmd->queue_entry, &udev->qfull_queue); 950 946 pr_debug("adding cmd %u on dev %s to ring space wait queue\n", 951 947 tcmu_cmd->cmd_id, udev->name); 952 948 return 0; ··· 1003 999 base_command_size = tcmu_cmd_get_base_cmd_size(tcmu_cmd->dbi_cnt); 1004 1000 command_size = tcmu_cmd_get_cmd_size(tcmu_cmd, base_command_size); 1005 1001 1006 - if (!list_empty(&udev->cmdr_queue)) 1002 + if (!list_empty(&udev->qfull_queue)) 1007 1003 goto queue; 1008 1004 1009 1005 mb = udev->mb_addr; ··· 1100 1096 UPDATE_HEAD(mb->cmd_head, command_size, udev->cmdr_size); 1101 1097 tcmu_flush_dcache_range(mb, sizeof(*mb)); 1102 1098 1099 + list_add_tail(&tcmu_cmd->queue_entry, &udev->inflight_queue); 1100 + set_bit(TCMU_CMD_BIT_INFLIGHT, &tcmu_cmd->flags); 1101 + 1103 1102 /* TODO: only if FLUSH and FUA? */ 1104 1103 uio_event_notify(&udev->uio_info); 1105 1104 1106 1105 return 0; 1107 1106 1108 1107 queue: 1109 - if (add_to_cmdr_queue(tcmu_cmd)) { 1108 + if (add_to_qfull_queue(tcmu_cmd)) { 1110 1109 *scsi_err = TCM_OUT_OF_RESOURCES; 1111 1110 return -1; 1112 1111 } ··· 1151 1144 */ 1152 1145 if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) 1153 1146 goto out; 1147 + 1148 + list_del_init(&cmd->queue_entry); 1154 1149 1155 1150 tcmu_cmd_reset_dbi_cur(cmd); 1156 1151 ··· 1203 1194 tcmu_free_cmd(cmd); 1204 1195 } 1205 1196 1197 + static void tcmu_set_next_deadline(struct list_head *queue, 1198 + struct timer_list *timer) 1199 + { 1200 + struct tcmu_cmd *tcmu_cmd, *tmp_cmd; 1201 + unsigned long deadline = 0; 1202 + 1203 + list_for_each_entry_safe(tcmu_cmd, tmp_cmd, queue, queue_entry) { 1204 + if (!time_after(jiffies, tcmu_cmd->deadline)) { 1205 + deadline = tcmu_cmd->deadline; 1206 + break; 1207 + } 1208 + } 1209 + 1210 + if (deadline) 1211 + mod_timer(timer, deadline); 1212 + else 1213 + del_timer(timer); 1214 + } 1215 + 1206 1216 static unsigned int tcmu_handle_completions(struct tcmu_dev *udev) 1207 1217 { 1208 1218 struct tcmu_mailbox *mb; 1219 + struct tcmu_cmd *cmd; 1209 1220 int handled = 0; 1210 1221 1211 1222 if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags)) { ··· 1239 1210 while (udev->cmdr_last_cleaned != READ_ONCE(mb->cmd_tail)) { 1240 1211 1241 1212 struct tcmu_cmd_entry *entry = (void *) mb + CMDR_OFF + udev->cmdr_last_cleaned; 1242 - struct tcmu_cmd *cmd; 1243 1213 1244 1214 tcmu_flush_dcache_range(entry, sizeof(*entry)); 1245 1215 ··· 1271 1243 /* no more pending commands */ 1272 1244 del_timer(&udev->cmd_timer); 1273 1245 1274 - if (list_empty(&udev->cmdr_queue)) { 1246 + if (list_empty(&udev->qfull_queue)) { 1275 1247 /* 1276 1248 * no more pending or waiting commands so try to 1277 1249 * reclaim blocks if needed. ··· 1280 1252 tcmu_global_max_blocks) 1281 1253 schedule_delayed_work(&tcmu_unmap_work, 0); 1282 1254 } 1255 + } else if (udev->cmd_time_out) { 1256 + tcmu_set_next_deadline(&udev->inflight_queue, &udev->cmd_timer); 1283 1257 } 1284 1258 1285 1259 return handled; ··· 1301 1271 if (!time_after(jiffies, cmd->deadline)) 1302 1272 return 0; 1303 1273 1304 - is_running = list_empty(&cmd->cmdr_queue_entry); 1274 + is_running = test_bit(TCMU_CMD_BIT_INFLIGHT, &cmd->flags); 1305 1275 se_cmd = cmd->se_cmd; 1306 1276 1307 1277 if (is_running) { ··· 1318 1288 */ 1319 1289 scsi_status = SAM_STAT_CHECK_CONDITION; 1320 1290 } else { 1321 - list_del_init(&cmd->cmdr_queue_entry); 1322 - 1323 1291 idr_remove(&udev->commands, id); 1324 1292 tcmu_free_cmd(cmd); 1325 1293 scsi_status = SAM_STAT_TASK_SET_FULL; 1326 1294 } 1295 + list_del_init(&cmd->queue_entry); 1327 1296 1328 1297 pr_debug("Timing out cmd %u on dev %s that is %s.\n", 1329 1298 id, udev->name, is_running ? "inflight" : "queued"); ··· 1401 1372 1402 1373 INIT_LIST_HEAD(&udev->node); 1403 1374 INIT_LIST_HEAD(&udev->timedout_entry); 1404 - INIT_LIST_HEAD(&udev->cmdr_queue); 1375 + INIT_LIST_HEAD(&udev->qfull_queue); 1376 + INIT_LIST_HEAD(&udev->inflight_queue); 1405 1377 idr_init(&udev->commands); 1406 1378 1407 1379 timer_setup(&udev->qfull_timer, tcmu_qfull_timedout, 0); ··· 1413 1383 return &udev->se_dev; 1414 1384 } 1415 1385 1416 - static bool run_cmdr_queue(struct tcmu_dev *udev, bool fail) 1386 + static bool run_qfull_queue(struct tcmu_dev *udev, bool fail) 1417 1387 { 1418 1388 struct tcmu_cmd *tcmu_cmd, *tmp_cmd; 1419 1389 LIST_HEAD(cmds); ··· 1421 1391 sense_reason_t scsi_ret; 1422 1392 int ret; 1423 1393 1424 - if (list_empty(&udev->cmdr_queue)) 1394 + if (list_empty(&udev->qfull_queue)) 1425 1395 return true; 1426 1396 1427 1397 pr_debug("running %s's cmdr queue forcefail %d\n", udev->name, fail); 1428 1398 1429 - list_splice_init(&udev->cmdr_queue, &cmds); 1399 + list_splice_init(&udev->qfull_queue, &cmds); 1430 1400 1431 - list_for_each_entry_safe(tcmu_cmd, tmp_cmd, &cmds, cmdr_queue_entry) { 1432 - list_del_init(&tcmu_cmd->cmdr_queue_entry); 1401 + list_for_each_entry_safe(tcmu_cmd, tmp_cmd, &cmds, queue_entry) { 1402 + list_del_init(&tcmu_cmd->queue_entry); 1433 1403 1434 1404 pr_debug("removing cmd %u on dev %s from queue\n", 1435 1405 tcmu_cmd->cmd_id, udev->name); ··· 1467 1437 * cmd was requeued, so just put all cmds back in 1468 1438 * the queue 1469 1439 */ 1470 - list_splice_tail(&cmds, &udev->cmdr_queue); 1440 + list_splice_tail(&cmds, &udev->qfull_queue); 1471 1441 drained = false; 1472 - goto done; 1442 + break; 1473 1443 } 1474 1444 } 1475 - if (list_empty(&udev->cmdr_queue)) 1476 - del_timer(&udev->qfull_timer); 1477 - done: 1445 + 1446 + tcmu_set_next_deadline(&udev->qfull_queue, &udev->qfull_timer); 1478 1447 return drained; 1479 1448 } 1480 1449 ··· 1483 1454 1484 1455 mutex_lock(&udev->cmdr_lock); 1485 1456 tcmu_handle_completions(udev); 1486 - run_cmdr_queue(udev, false); 1457 + run_qfull_queue(udev, false); 1487 1458 mutex_unlock(&udev->cmdr_lock); 1488 1459 1489 1460 return 0; ··· 2011 1982 /* complete IO that has executed successfully */ 2012 1983 tcmu_handle_completions(udev); 2013 1984 /* fail IO waiting to be queued */ 2014 - run_cmdr_queue(udev, true); 1985 + run_qfull_queue(udev, true); 2015 1986 2016 1987 unlock: 2017 1988 mutex_unlock(&udev->cmdr_lock); ··· 2026 1997 mutex_lock(&udev->cmdr_lock); 2027 1998 2028 1999 idr_for_each_entry(&udev->commands, cmd, i) { 2029 - if (!list_empty(&cmd->cmdr_queue_entry)) 2000 + if (!test_bit(TCMU_CMD_BIT_INFLIGHT, &cmd->flags)) 2030 2001 continue; 2031 2002 2032 2003 pr_debug("removing cmd %u on dev %s from ring (is expired %d)\n", ··· 2035 2006 2036 2007 idr_remove(&udev->commands, i); 2037 2008 if (!test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) { 2009 + list_del_init(&cmd->queue_entry); 2038 2010 if (err_level == 1) { 2039 2011 /* 2040 2012 * Userspace was not able to start the ··· 2696 2666 2697 2667 mutex_lock(&udev->cmdr_lock); 2698 2668 idr_for_each(&udev->commands, tcmu_check_expired_cmd, NULL); 2669 + 2670 + tcmu_set_next_deadline(&udev->inflight_queue, &udev->cmd_timer); 2671 + tcmu_set_next_deadline(&udev->qfull_queue, &udev->qfull_timer); 2672 + 2699 2673 mutex_unlock(&udev->cmdr_lock); 2700 2674 2701 2675 spin_lock_bh(&timed_out_udevs_lock);
+1 -1
drivers/thermal/intel/int340x_thermal/Kconfig
··· 4 4 5 5 config INT340X_THERMAL 6 6 tristate "ACPI INT340X thermal drivers" 7 - depends on X86 && ACPI 7 + depends on X86 && ACPI && PCI 8 8 select THERMAL_GOV_USER_SPACE 9 9 select ACPI_THERMAL_REL 10 10 select ACPI_FAN
+12
drivers/tty/serial/Kconfig
··· 85 85 with "earlycon=smh" on the kernel command line. The console is 86 86 enabled when early_param is processed. 87 87 88 + config SERIAL_EARLYCON_RISCV_SBI 89 + bool "Early console using RISC-V SBI" 90 + depends on RISCV 91 + select SERIAL_CORE 92 + select SERIAL_CORE_CONSOLE 93 + select SERIAL_EARLYCON 94 + help 95 + Support for early debug console using RISC-V SBI. This enables 96 + the console before standard serial driver is probed. This is enabled 97 + with "earlycon=sbi" on the kernel command line. The console is 98 + enabled when early_param is processed. 99 + 88 100 config SERIAL_SB1250_DUART 89 101 tristate "BCM1xxx on-chip DUART serial support" 90 102 depends on SIBYTE_SB1xxx_SOC=y
+1
drivers/tty/serial/Makefile
··· 7 7 8 8 obj-$(CONFIG_SERIAL_EARLYCON) += earlycon.o 9 9 obj-$(CONFIG_SERIAL_EARLYCON_ARM_SEMIHOST) += earlycon-arm-semihost.o 10 + obj-$(CONFIG_SERIAL_EARLYCON_RISCV_SBI) += earlycon-riscv-sbi.o 10 11 11 12 # These Sparc drivers have to appear before others such as 8250 12 13 # which share ttySx minor node space. Otherwise console device
+28
drivers/tty/serial/earlycon-riscv-sbi.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* 3 + * RISC-V SBI based earlycon 4 + * 5 + * Copyright (C) 2018 Anup Patel <anup@brainfault.org> 6 + */ 7 + #include <linux/kernel.h> 8 + #include <linux/console.h> 9 + #include <linux/init.h> 10 + #include <linux/serial_core.h> 11 + #include <asm/sbi.h> 12 + 13 + static void sbi_console_write(struct console *con, 14 + const char *s, unsigned int n) 15 + { 16 + int i; 17 + 18 + for (i = 0; i < n; ++i) 19 + sbi_console_putchar(s[i]); 20 + } 21 + 22 + static int __init early_sbi_setup(struct earlycon_device *device, 23 + const char *opt) 24 + { 25 + device->con->write = sbi_console_write; 26 + return 0; 27 + } 28 + EARLYCON_DECLARE(sbi, early_sbi_setup);
+19 -17
drivers/tty/serial/lantiq.c
··· 114 114 115 115 static inline void asc_update_bits(u32 clear, u32 set, void __iomem *reg) 116 116 { 117 - u32 tmp = readl(reg); 117 + u32 tmp = __raw_readl(reg); 118 118 119 - writel((tmp & ~clear) | set, reg); 119 + __raw_writel((tmp & ~clear) | set, reg); 120 120 } 121 121 122 122 static inline struct ··· 144 144 static void 145 145 lqasc_stop_rx(struct uart_port *port) 146 146 { 147 - writel(ASCWHBSTATE_CLRREN, port->membase + LTQ_ASC_WHBSTATE); 147 + __raw_writel(ASCWHBSTATE_CLRREN, port->membase + LTQ_ASC_WHBSTATE); 148 148 } 149 149 150 150 static int ··· 153 153 struct tty_port *tport = &port->state->port; 154 154 unsigned int ch = 0, rsr = 0, fifocnt; 155 155 156 - fifocnt = readl(port->membase + LTQ_ASC_FSTAT) & ASCFSTAT_RXFFLMASK; 156 + fifocnt = __raw_readl(port->membase + LTQ_ASC_FSTAT) & 157 + ASCFSTAT_RXFFLMASK; 157 158 while (fifocnt--) { 158 159 u8 flag = TTY_NORMAL; 159 160 ch = readb(port->membase + LTQ_ASC_RBUF); 160 - rsr = (readl(port->membase + LTQ_ASC_STATE) 161 + rsr = (__raw_readl(port->membase + LTQ_ASC_STATE) 161 162 & ASCSTATE_ANY) | UART_DUMMY_UER_RX; 162 163 tty_flip_buffer_push(tport); 163 164 port->icount.rx++; ··· 218 217 return; 219 218 } 220 219 221 - while (((readl(port->membase + LTQ_ASC_FSTAT) & 220 + while (((__raw_readl(port->membase + LTQ_ASC_FSTAT) & 222 221 ASCFSTAT_TXFREEMASK) >> ASCFSTAT_TXFREEOFF) != 0) { 223 222 if (port->x_char) { 224 223 writeb(port->x_char, port->membase + LTQ_ASC_TBUF); ··· 246 245 unsigned long flags; 247 246 struct uart_port *port = (struct uart_port *)_port; 248 247 spin_lock_irqsave(&ltq_asc_lock, flags); 249 - writel(ASC_IRNCR_TIR, port->membase + LTQ_ASC_IRNCR); 248 + __raw_writel(ASC_IRNCR_TIR, port->membase + LTQ_ASC_IRNCR); 250 249 spin_unlock_irqrestore(&ltq_asc_lock, flags); 251 250 lqasc_start_tx(port); 252 251 return IRQ_HANDLED; ··· 271 270 unsigned long flags; 272 271 struct uart_port *port = (struct uart_port *)_port; 273 272 spin_lock_irqsave(&ltq_asc_lock, flags); 274 - writel(ASC_IRNCR_RIR, port->membase + LTQ_ASC_IRNCR); 273 + __raw_writel(ASC_IRNCR_RIR, port->membase + LTQ_ASC_IRNCR); 275 274 lqasc_rx_chars(port); 276 275 spin_unlock_irqrestore(&ltq_asc_lock, flags); 277 276 return IRQ_HANDLED; ··· 281 280 lqasc_tx_empty(struct uart_port *port) 282 281 { 283 282 int status; 284 - status = readl(port->membase + LTQ_ASC_FSTAT) & ASCFSTAT_TXFFLMASK; 283 + status = __raw_readl(port->membase + LTQ_ASC_FSTAT) & 284 + ASCFSTAT_TXFFLMASK; 285 285 return status ? 0 : TIOCSER_TEMT; 286 286 } 287 287 ··· 315 313 asc_update_bits(ASCCLC_DISS | ASCCLC_RMCMASK, (1 << ASCCLC_RMCOFFSET), 316 314 port->membase + LTQ_ASC_CLC); 317 315 318 - writel(0, port->membase + LTQ_ASC_PISEL); 319 - writel( 316 + __raw_writel(0, port->membase + LTQ_ASC_PISEL); 317 + __raw_writel( 320 318 ((TXFIFO_FL << ASCTXFCON_TXFITLOFF) & ASCTXFCON_TXFITLMASK) | 321 319 ASCTXFCON_TXFEN | ASCTXFCON_TXFFLU, 322 320 port->membase + LTQ_ASC_TXFCON); 323 - writel( 321 + __raw_writel( 324 322 ((RXFIFO_FL << ASCRXFCON_RXFITLOFF) & ASCRXFCON_RXFITLMASK) 325 323 | ASCRXFCON_RXFEN | ASCRXFCON_RXFFLU, 326 324 port->membase + LTQ_ASC_RXFCON); ··· 352 350 goto err2; 353 351 } 354 352 355 - writel(ASC_IRNREN_RX | ASC_IRNREN_ERR | ASC_IRNREN_TX, 353 + __raw_writel(ASC_IRNREN_RX | ASC_IRNREN_ERR | ASC_IRNREN_TX, 356 354 port->membase + LTQ_ASC_IRNREN); 357 355 return 0; 358 356 ··· 371 369 free_irq(ltq_port->rx_irq, port); 372 370 free_irq(ltq_port->err_irq, port); 373 371 374 - writel(0, port->membase + LTQ_ASC_CON); 372 + __raw_writel(0, port->membase + LTQ_ASC_CON); 375 373 asc_update_bits(ASCRXFCON_RXFEN, ASCRXFCON_RXFFLU, 376 374 port->membase + LTQ_ASC_RXFCON); 377 375 asc_update_bits(ASCTXFCON_TXFEN, ASCTXFCON_TXFFLU, ··· 463 461 asc_update_bits(ASCCON_BRS, 0, port->membase + LTQ_ASC_CON); 464 462 465 463 /* now we can write the new baudrate into the register */ 466 - writel(divisor, port->membase + LTQ_ASC_BG); 464 + __raw_writel(divisor, port->membase + LTQ_ASC_BG); 467 465 468 466 /* turn the baudrate generator back on */ 469 467 asc_update_bits(0, ASCCON_R, port->membase + LTQ_ASC_CON); 470 468 471 469 /* enable rx */ 472 - writel(ASCWHBSTATE_SETREN, port->membase + LTQ_ASC_WHBSTATE); 470 + __raw_writel(ASCWHBSTATE_SETREN, port->membase + LTQ_ASC_WHBSTATE); 473 471 474 472 spin_unlock_irqrestore(&ltq_asc_lock, flags); 475 473 ··· 580 578 return; 581 579 582 580 do { 583 - fifofree = (readl(port->membase + LTQ_ASC_FSTAT) 581 + fifofree = (__raw_readl(port->membase + LTQ_ASC_FSTAT) 584 582 & ASCFSTAT_TXFREEMASK) >> ASCFSTAT_TXFREEOFF; 585 583 } while (fifofree == 0); 586 584 writeb(ch, port->membase + LTQ_ASC_TBUF);
+13 -7
drivers/tty/tty_io.c
··· 1256 1256 static int tty_reopen(struct tty_struct *tty) 1257 1257 { 1258 1258 struct tty_driver *driver = tty->driver; 1259 - int retval; 1259 + struct tty_ldisc *ld; 1260 + int retval = 0; 1260 1261 1261 1262 if (driver->type == TTY_DRIVER_TYPE_PTY && 1262 1263 driver->subtype == PTY_TYPE_MASTER) ··· 1269 1268 if (test_bit(TTY_EXCLUSIVE, &tty->flags) && !capable(CAP_SYS_ADMIN)) 1270 1269 return -EBUSY; 1271 1270 1272 - retval = tty_ldisc_lock(tty, 5 * HZ); 1273 - if (retval) 1274 - return retval; 1271 + ld = tty_ldisc_ref_wait(tty); 1272 + if (ld) { 1273 + tty_ldisc_deref(ld); 1274 + } else { 1275 + retval = tty_ldisc_lock(tty, 5 * HZ); 1276 + if (retval) 1277 + return retval; 1275 1278 1276 - if (!tty->ldisc) 1277 - retval = tty_ldisc_reinit(tty, tty->termios.c_line); 1278 - tty_ldisc_unlock(tty); 1279 + if (!tty->ldisc) 1280 + retval = tty_ldisc_reinit(tty, tty->termios.c_line); 1281 + tty_ldisc_unlock(tty); 1282 + } 1279 1283 1280 1284 if (retval == 0) 1281 1285 tty->count++;
+7
drivers/usb/class/cdc-acm.c
··· 1865 1865 .driver_info = IGNORE_DEVICE, 1866 1866 }, 1867 1867 1868 + { USB_DEVICE(0x1bc7, 0x0021), /* Telit 3G ACM only composition */ 1869 + .driver_info = SEND_ZERO_PACKET, 1870 + }, 1871 + { USB_DEVICE(0x1bc7, 0x0023), /* Telit 3G ACM + ECM composition */ 1872 + .driver_info = SEND_ZERO_PACKET, 1873 + }, 1874 + 1868 1875 /* control interfaces without any protocol set */ 1869 1876 { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM, 1870 1877 USB_CDC_PROTO_NONE) },
+6 -3
drivers/usb/core/generic.c
··· 143 143 continue; 144 144 } 145 145 146 - if (i > 0 && desc && is_audio(desc) && is_uac3_config(desc)) { 147 - best = c; 148 - break; 146 + if (i > 0 && desc && is_audio(desc)) { 147 + if (is_uac3_config(desc)) { 148 + best = c; 149 + break; 150 + } 151 + continue; 149 152 } 150 153 151 154 /* From the remaining configs, choose the first one whose
+2 -1
drivers/usb/core/quirks.c
··· 394 394 { USB_DEVICE(0x1a40, 0x0101), .driver_info = USB_QUIRK_HUB_SLOW_RESET }, 395 395 396 396 /* Corsair K70 RGB */ 397 - { USB_DEVICE(0x1b1c, 0x1b13), .driver_info = USB_QUIRK_DELAY_INIT }, 397 + { USB_DEVICE(0x1b1c, 0x1b13), .driver_info = USB_QUIRK_DELAY_INIT | 398 + USB_QUIRK_DELAY_CTRL_MSG }, 398 399 399 400 /* Corsair Strafe */ 400 401 { USB_DEVICE(0x1b1c, 0x1b15), .driver_info = USB_QUIRK_DELAY_INIT |
+6 -7
drivers/usb/gadget/udc/bdc/bdc_core.c
··· 172 172 /* Refer to BDC spec, Table 4 for description of SPB */ 173 173 sp_buff_size = 1 << (sp_buff_size + 5); 174 174 dev_dbg(bdc->dev, "Allocating %d bytes for scratchpad\n", sp_buff_size); 175 - bdc->scratchpad.buff = dma_zalloc_coherent(bdc->dev, sp_buff_size, 176 - &bdc->scratchpad.sp_dma, GFP_KERNEL); 175 + bdc->scratchpad.buff = dma_alloc_coherent(bdc->dev, sp_buff_size, 176 + &bdc->scratchpad.sp_dma, 177 + GFP_KERNEL); 177 178 178 179 if (!bdc->scratchpad.buff) 179 180 goto fail; ··· 203 202 bdc_writel(bdc->regs, BDC_SRRINT(0), BDC_SRR_RWS | BDC_SRR_RST); 204 203 bdc->srr.dqp_index = 0; 205 204 /* allocate the status report descriptors */ 206 - bdc->srr.sr_bds = dma_zalloc_coherent( 207 - bdc->dev, 208 - NUM_SR_ENTRIES * sizeof(struct bdc_bd), 209 - &bdc->srr.dma_addr, 210 - GFP_KERNEL); 205 + bdc->srr.sr_bds = dma_alloc_coherent(bdc->dev, 206 + NUM_SR_ENTRIES * sizeof(struct bdc_bd), 207 + &bdc->srr.dma_addr, GFP_KERNEL); 211 208 if (!bdc->srr.sr_bds) 212 209 return -ENOMEM; 213 210
+3 -3
drivers/usb/host/uhci-hcd.c
··· 596 596 &uhci_debug_operations); 597 597 #endif 598 598 599 - uhci->frame = dma_zalloc_coherent(uhci_dev(uhci), 600 - UHCI_NUMFRAMES * sizeof(*uhci->frame), 601 - &uhci->frame_dma_handle, GFP_KERNEL); 599 + uhci->frame = dma_alloc_coherent(uhci_dev(uhci), 600 + UHCI_NUMFRAMES * sizeof(*uhci->frame), 601 + &uhci->frame_dma_handle, GFP_KERNEL); 602 602 if (!uhci->frame) { 603 603 dev_err(uhci_dev(uhci), 604 604 "unable to allocate consistent memory for frame list\n");
+4 -4
drivers/usb/host/xhci-mem.c
··· 1672 1672 xhci->dcbaa->dev_context_ptrs[0] = cpu_to_le64(xhci->scratchpad->sp_dma); 1673 1673 for (i = 0; i < num_sp; i++) { 1674 1674 dma_addr_t dma; 1675 - void *buf = dma_zalloc_coherent(dev, xhci->page_size, &dma, 1676 - flags); 1675 + void *buf = dma_alloc_coherent(dev, xhci->page_size, &dma, 1676 + flags); 1677 1677 if (!buf) 1678 1678 goto fail_sp4; 1679 1679 ··· 1799 1799 struct xhci_erst_entry *entry; 1800 1800 1801 1801 size = sizeof(struct xhci_erst_entry) * evt_ring->num_segs; 1802 - erst->entries = dma_zalloc_coherent(xhci_to_hcd(xhci)->self.sysdev, 1803 - size, &erst->erst_dma_addr, flags); 1802 + erst->entries = dma_alloc_coherent(xhci_to_hcd(xhci)->self.sysdev, 1803 + size, &erst->erst_dma_addr, flags); 1804 1804 if (!erst->entries) 1805 1805 return -ENOMEM; 1806 1806
+6 -2
drivers/usb/storage/scsiglue.c
··· 235 235 if (!(us->fflags & US_FL_NEEDS_CAP16)) 236 236 sdev->try_rc_10_first = 1; 237 237 238 - /* assume SPC3 or latter devices support sense size > 18 */ 239 - if (sdev->scsi_level > SCSI_SPC_2) 238 + /* 239 + * assume SPC3 or latter devices support sense size > 18 240 + * unless US_FL_BAD_SENSE quirk is specified. 241 + */ 242 + if (sdev->scsi_level > SCSI_SPC_2 && 243 + !(us->fflags & US_FL_BAD_SENSE)) 240 244 us->fflags |= US_FL_SANE_SENSE; 241 245 242 246 /*
+12
drivers/usb/storage/unusual_devs.h
··· 1266 1266 US_FL_FIX_CAPACITY ), 1267 1267 1268 1268 /* 1269 + * Reported by Icenowy Zheng <icenowy@aosc.io> 1270 + * The SMI SM3350 USB-UFS bridge controller will enter a wrong state 1271 + * that do not process read/write command if a long sense is requested, 1272 + * so force to use 18-byte sense. 1273 + */ 1274 + UNUSUAL_DEV( 0x090c, 0x3350, 0x0000, 0xffff, 1275 + "SMI", 1276 + "SM3350 UFS-to-USB-Mass-Storage bridge", 1277 + USB_SC_DEVICE, USB_PR_DEVICE, NULL, 1278 + US_FL_BAD_SENSE ), 1279 + 1280 + /* 1269 1281 * Reported by Paul Hartman <paul.hartman+linux@gmail.com> 1270 1282 * This card reader returns "Illegal Request, Logical Block Address 1271 1283 * Out of Range" for the first READ(10) after a new card is inserted.
+1 -1
drivers/vfio/pci/trace.h
··· 94 94 #endif /* _TRACE_VFIO_PCI_H */ 95 95 96 96 #undef TRACE_INCLUDE_PATH 97 - #define TRACE_INCLUDE_PATH . 97 + #define TRACE_INCLUDE_PATH ../../drivers/vfio/pci 98 98 #undef TRACE_INCLUDE_FILE 99 99 #define TRACE_INCLUDE_FILE trace 100 100
+1 -1
drivers/vfio/vfio_iommu_type1.c
··· 878 878 return -EINVAL; 879 879 if (!unmap->size || unmap->size & mask) 880 880 return -EINVAL; 881 - if (unmap->iova + unmap->size < unmap->iova || 881 + if (unmap->iova + unmap->size - 1 < unmap->iova || 882 882 unmap->size > SIZE_MAX) 883 883 return -EINVAL; 884 884
+2 -1
drivers/vhost/net.c
··· 1236 1236 if (nvq->done_idx > VHOST_NET_BATCH) 1237 1237 vhost_net_signal_used(nvq); 1238 1238 if (unlikely(vq_log)) 1239 - vhost_log_write(vq, vq_log, log, vhost_len); 1239 + vhost_log_write(vq, vq_log, log, vhost_len, 1240 + vq->iov, in); 1240 1241 total_len += vhost_len; 1241 1242 if (unlikely(vhost_exceeds_weight(++recv_pkts, total_len))) { 1242 1243 vhost_poll_queue(&vq->poll);
+12 -8
drivers/vhost/scsi.c
··· 1127 1127 struct vhost_virtqueue *vq, 1128 1128 struct vhost_scsi_ctx *vc) 1129 1129 { 1130 - struct virtio_scsi_ctrl_tmf_resp __user *resp; 1131 1130 struct virtio_scsi_ctrl_tmf_resp rsp; 1131 + struct iov_iter iov_iter; 1132 1132 int ret; 1133 1133 1134 1134 pr_debug("%s\n", __func__); 1135 1135 memset(&rsp, 0, sizeof(rsp)); 1136 1136 rsp.response = VIRTIO_SCSI_S_FUNCTION_REJECTED; 1137 - resp = vq->iov[vc->out].iov_base; 1138 - ret = __copy_to_user(resp, &rsp, sizeof(rsp)); 1139 - if (!ret) 1137 + 1138 + iov_iter_init(&iov_iter, READ, &vq->iov[vc->out], vc->in, sizeof(rsp)); 1139 + 1140 + ret = copy_to_iter(&rsp, sizeof(rsp), &iov_iter); 1141 + if (likely(ret == sizeof(rsp))) 1140 1142 vhost_add_used_and_signal(&vs->dev, vq, vc->head, 0); 1141 1143 else 1142 1144 pr_err("Faulted on virtio_scsi_ctrl_tmf_resp\n"); ··· 1149 1147 struct vhost_virtqueue *vq, 1150 1148 struct vhost_scsi_ctx *vc) 1151 1149 { 1152 - struct virtio_scsi_ctrl_an_resp __user *resp; 1153 1150 struct virtio_scsi_ctrl_an_resp rsp; 1151 + struct iov_iter iov_iter; 1154 1152 int ret; 1155 1153 1156 1154 pr_debug("%s\n", __func__); 1157 1155 memset(&rsp, 0, sizeof(rsp)); /* event_actual = 0 */ 1158 1156 rsp.response = VIRTIO_SCSI_S_OK; 1159 - resp = vq->iov[vc->out].iov_base; 1160 - ret = __copy_to_user(resp, &rsp, sizeof(rsp)); 1161 - if (!ret) 1157 + 1158 + iov_iter_init(&iov_iter, READ, &vq->iov[vc->out], vc->in, sizeof(rsp)); 1159 + 1160 + ret = copy_to_iter(&rsp, sizeof(rsp), &iov_iter); 1161 + if (likely(ret == sizeof(rsp))) 1162 1162 vhost_add_used_and_signal(&vs->dev, vq, vc->head, 0); 1163 1163 else 1164 1164 pr_err("Faulted on virtio_scsi_ctrl_an_resp\n");
+89 -16
drivers/vhost/vhost.c
··· 1034 1034 int type, ret; 1035 1035 1036 1036 ret = copy_from_iter(&type, sizeof(type), from); 1037 - if (ret != sizeof(type)) 1037 + if (ret != sizeof(type)) { 1038 + ret = -EINVAL; 1038 1039 goto done; 1040 + } 1039 1041 1040 1042 switch (type) { 1041 1043 case VHOST_IOTLB_MSG: ··· 1056 1054 1057 1055 iov_iter_advance(from, offset); 1058 1056 ret = copy_from_iter(&msg, sizeof(msg), from); 1059 - if (ret != sizeof(msg)) 1057 + if (ret != sizeof(msg)) { 1058 + ret = -EINVAL; 1060 1059 goto done; 1060 + } 1061 1061 if (vhost_process_iotlb_msg(dev, &msg)) { 1062 1062 ret = -EFAULT; 1063 1063 goto done; ··· 1737 1733 return r; 1738 1734 } 1739 1735 1736 + static int log_write_hva(struct vhost_virtqueue *vq, u64 hva, u64 len) 1737 + { 1738 + struct vhost_umem *umem = vq->umem; 1739 + struct vhost_umem_node *u; 1740 + u64 start, end, l, min; 1741 + int r; 1742 + bool hit = false; 1743 + 1744 + while (len) { 1745 + min = len; 1746 + /* More than one GPAs can be mapped into a single HVA. So 1747 + * iterate all possible umems here to be safe. 1748 + */ 1749 + list_for_each_entry(u, &umem->umem_list, link) { 1750 + if (u->userspace_addr > hva - 1 + len || 1751 + u->userspace_addr - 1 + u->size < hva) 1752 + continue; 1753 + start = max(u->userspace_addr, hva); 1754 + end = min(u->userspace_addr - 1 + u->size, 1755 + hva - 1 + len); 1756 + l = end - start + 1; 1757 + r = log_write(vq->log_base, 1758 + u->start + start - u->userspace_addr, 1759 + l); 1760 + if (r < 0) 1761 + return r; 1762 + hit = true; 1763 + min = min(l, min); 1764 + } 1765 + 1766 + if (!hit) 1767 + return -EFAULT; 1768 + 1769 + len -= min; 1770 + hva += min; 1771 + } 1772 + 1773 + return 0; 1774 + } 1775 + 1776 + static int log_used(struct vhost_virtqueue *vq, u64 used_offset, u64 len) 1777 + { 1778 + struct iovec iov[64]; 1779 + int i, ret; 1780 + 1781 + if (!vq->iotlb) 1782 + return log_write(vq->log_base, vq->log_addr + used_offset, len); 1783 + 1784 + ret = translate_desc(vq, (uintptr_t)vq->used + used_offset, 1785 + len, iov, 64, VHOST_ACCESS_WO); 1786 + if (ret) 1787 + return ret; 1788 + 1789 + for (i = 0; i < ret; i++) { 1790 + ret = log_write_hva(vq, (uintptr_t)iov[i].iov_base, 1791 + iov[i].iov_len); 1792 + if (ret) 1793 + return ret; 1794 + } 1795 + 1796 + return 0; 1797 + } 1798 + 1740 1799 int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log, 1741 - unsigned int log_num, u64 len) 1800 + unsigned int log_num, u64 len, struct iovec *iov, int count) 1742 1801 { 1743 1802 int i, r; 1744 1803 1745 1804 /* Make sure data written is seen before log. */ 1746 1805 smp_wmb(); 1806 + 1807 + if (vq->iotlb) { 1808 + for (i = 0; i < count; i++) { 1809 + r = log_write_hva(vq, (uintptr_t)iov[i].iov_base, 1810 + iov[i].iov_len); 1811 + if (r < 0) 1812 + return r; 1813 + } 1814 + return 0; 1815 + } 1816 + 1747 1817 for (i = 0; i < log_num; ++i) { 1748 1818 u64 l = min(log[i].len, len); 1749 1819 r = log_write(vq->log_base, log[i].addr, l); ··· 1847 1769 smp_wmb(); 1848 1770 /* Log used flag write. */ 1849 1771 used = &vq->used->flags; 1850 - log_write(vq->log_base, vq->log_addr + 1851 - (used - (void __user *)vq->used), 1852 - sizeof vq->used->flags); 1772 + log_used(vq, (used - (void __user *)vq->used), 1773 + sizeof vq->used->flags); 1853 1774 if (vq->log_ctx) 1854 1775 eventfd_signal(vq->log_ctx, 1); 1855 1776 } ··· 1866 1789 smp_wmb(); 1867 1790 /* Log avail event write */ 1868 1791 used = vhost_avail_event(vq); 1869 - log_write(vq->log_base, vq->log_addr + 1870 - (used - (void __user *)vq->used), 1871 - sizeof *vhost_avail_event(vq)); 1792 + log_used(vq, (used - (void __user *)vq->used), 1793 + sizeof *vhost_avail_event(vq)); 1872 1794 if (vq->log_ctx) 1873 1795 eventfd_signal(vq->log_ctx, 1); 1874 1796 } ··· 2267 2191 /* Make sure data is seen before log. */ 2268 2192 smp_wmb(); 2269 2193 /* Log used ring entry write. */ 2270 - log_write(vq->log_base, 2271 - vq->log_addr + 2272 - ((void __user *)used - (void __user *)vq->used), 2273 - count * sizeof *used); 2194 + log_used(vq, ((void __user *)used - (void __user *)vq->used), 2195 + count * sizeof *used); 2274 2196 } 2275 2197 old = vq->last_used_idx; 2276 2198 new = (vq->last_used_idx += count); ··· 2310 2236 /* Make sure used idx is seen before log. */ 2311 2237 smp_wmb(); 2312 2238 /* Log used index update. */ 2313 - log_write(vq->log_base, 2314 - vq->log_addr + offsetof(struct vring_used, idx), 2315 - sizeof vq->used->idx); 2239 + log_used(vq, offsetof(struct vring_used, idx), 2240 + sizeof vq->used->idx); 2316 2241 if (vq->log_ctx) 2317 2242 eventfd_signal(vq->log_ctx, 1); 2318 2243 }
+2 -1
drivers/vhost/vhost.h
··· 205 205 bool vhost_enable_notify(struct vhost_dev *, struct vhost_virtqueue *); 206 206 207 207 int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log, 208 - unsigned int log_num, u64 len); 208 + unsigned int log_num, u64 len, 209 + struct iovec *iov, int count); 209 210 int vq_iotlb_prefetch(struct vhost_virtqueue *vq); 210 211 211 212 struct vhost_msg_node *vhost_new_msg(struct vhost_virtqueue *vq, int type);
+1 -1
drivers/vhost/vsock.c
··· 642 642 hash_del_rcu(&vsock->hash); 643 643 644 644 vsock->guest_cid = guest_cid; 645 - hash_add_rcu(vhost_vsock_hash, &vsock->hash, guest_cid); 645 + hash_add_rcu(vhost_vsock_hash, &vsock->hash, vsock->guest_cid); 646 646 mutex_unlock(&vhost_vsock_mutex); 647 647 648 648 return 0;
+1 -1
drivers/video/backlight/88pm860x_bl.c
··· 174 174 return -ENODEV; 175 175 } 176 176 for_each_child_of_node(nproot, np) { 177 - if (!of_node_cmp(np->name, name)) { 177 + if (of_node_name_eq(np, name)) { 178 178 of_property_read_u32(np, "marvell,88pm860x-iset", 179 179 &iset); 180 180 data->iset = PM8606_WLED_CURRENT(iset);
+17 -11
drivers/video/backlight/pwm_bl.c
··· 30 30 struct device *dev; 31 31 unsigned int lth_brightness; 32 32 unsigned int *levels; 33 + bool enabled; 33 34 struct regulator *power_supply; 34 35 struct gpio_desc *enable_gpio; 35 36 unsigned int scale; ··· 51 50 int err; 52 51 53 52 pwm_get_state(pb->pwm, &state); 54 - if (state.enabled) 53 + if (pb->enabled) 55 54 return; 56 55 57 56 err = regulator_enable(pb->power_supply); ··· 66 65 67 66 if (pb->enable_gpio) 68 67 gpiod_set_value_cansleep(pb->enable_gpio, 1); 68 + 69 + pb->enabled = true; 69 70 } 70 71 71 72 static void pwm_backlight_power_off(struct pwm_bl_data *pb) ··· 75 72 struct pwm_state state; 76 73 77 74 pwm_get_state(pb->pwm, &state); 78 - if (!state.enabled) 75 + if (!pb->enabled) 79 76 return; 80 77 81 78 if (pb->enable_gpio) ··· 89 86 pwm_apply_state(pb->pwm, &state); 90 87 91 88 regulator_disable(pb->power_supply); 89 + pb->enabled = false; 92 90 } 93 91 94 92 static int compute_duty_cycle(struct pwm_bl_data *pb, int brightness) ··· 273 269 memset(data, 0, sizeof(*data)); 274 270 275 271 /* 272 + * These values are optional and set as 0 by default, the out values 273 + * are modified only if a valid u32 value can be decoded. 274 + */ 275 + of_property_read_u32(node, "post-pwm-on-delay-ms", 276 + &data->post_pwm_on_delay); 277 + of_property_read_u32(node, "pwm-off-delay-ms", &data->pwm_off_delay); 278 + 279 + data->enable_gpio = -EINVAL; 280 + 281 + /* 276 282 * Determine the number of brightness levels, if this property is not 277 283 * set a default table of brightness levels will be used. 278 284 */ ··· 394 380 data->max_brightness--; 395 381 } 396 382 397 - /* 398 - * These values are optional and set as 0 by default, the out values 399 - * are modified only if a valid u32 value can be decoded. 400 - */ 401 - of_property_read_u32(node, "post-pwm-on-delay-ms", 402 - &data->post_pwm_on_delay); 403 - of_property_read_u32(node, "pwm-off-delay-ms", &data->pwm_off_delay); 404 - 405 - data->enable_gpio = -EINVAL; 406 383 return 0; 407 384 } 408 385 ··· 488 483 pb->check_fb = data->check_fb; 489 484 pb->exit = data->exit; 490 485 pb->dev = &pdev->dev; 486 + pb->enabled = false; 491 487 pb->post_pwm_on_delay = data->post_pwm_on_delay; 492 488 pb->pwm_off_delay = data->pwm_off_delay; 493 489
+7
drivers/video/fbdev/core/fbcon.c
··· 510 510 continue; 511 511 } 512 512 #endif 513 + 514 + if (!strncmp(options, "logo-pos:", 9)) { 515 + options += 9; 516 + if (!strcmp(options, "center")) 517 + fb_center_logo = true; 518 + continue; 519 + } 513 520 } 514 521 return 1; 515 522 }
+10 -9
drivers/video/fbdev/core/fbmem.c
··· 53 53 int num_registered_fb __read_mostly; 54 54 EXPORT_SYMBOL(num_registered_fb); 55 55 56 + bool fb_center_logo __read_mostly; 57 + EXPORT_SYMBOL(fb_center_logo); 58 + 56 59 static struct fb_info *get_fb_info(unsigned int idx) 57 60 { 58 61 struct fb_info *fb_info; ··· 509 506 fb_set_logo(info, logo, logo_new, fb_logo.depth); 510 507 } 511 508 512 - #ifdef CONFIG_FB_LOGO_CENTER 513 - { 509 + if (fb_center_logo) { 514 510 int xres = info->var.xres; 515 511 int yres = info->var.yres; 516 512 ··· 522 520 --n; 523 521 image.dx = (xres - n * (logo->width + 8) - 8) / 2; 524 522 image.dy = y ?: (yres - logo->height) / 2; 523 + } else { 524 + image.dx = 0; 525 + image.dy = y; 525 526 } 526 - #else 527 - image.dx = 0; 528 - image.dy = y; 529 - #endif 527 + 530 528 image.width = logo->width; 531 529 image.height = logo->height; 532 530 ··· 686 684 } 687 685 688 686 height = fb_logo.logo->height; 689 - #ifdef CONFIG_FB_LOGO_CENTER 690 - height += (yres - fb_logo.logo->height) / 2; 691 - #endif 687 + if (fb_center_logo) 688 + height += (yres - fb_logo.logo->height) / 2; 692 689 693 690 return fb_prepare_extra_logos(info, height, yres); 694 691 }
+3 -3
drivers/video/fbdev/da8xx-fb.c
··· 1446 1446 da8xx_fb_fix.line_length - 1; 1447 1447 1448 1448 /* allocate palette buffer */ 1449 - par->v_palette_base = dma_zalloc_coherent(NULL, PALETTE_SIZE, 1450 - &par->p_palette_base, 1451 - GFP_KERNEL | GFP_DMA); 1449 + par->v_palette_base = dma_alloc_coherent(NULL, PALETTE_SIZE, 1450 + &par->p_palette_base, 1451 + GFP_KERNEL | GFP_DMA); 1452 1452 if (!par->v_palette_base) { 1453 1453 dev_err(&device->dev, 1454 1454 "GLCD: kmalloc for palette buffer failed\n");
+9 -9
drivers/video/fbdev/offb.c
··· 318 318 } 319 319 320 320 static void offb_init_palette_hacks(struct fb_info *info, struct device_node *dp, 321 - const char *name, unsigned long address) 321 + unsigned long address) 322 322 { 323 323 struct offb_par *par = (struct offb_par *) info->par; 324 324 325 - if (dp && !strncmp(name, "ATY,Rage128", 11)) { 325 + if (of_node_name_prefix(dp, "ATY,Rage128")) { 326 326 par->cmap_adr = offb_map_reg(dp, 2, 0, 0x1fff); 327 327 if (par->cmap_adr) 328 328 par->cmap_type = cmap_r128; 329 - } else if (dp && (!strncmp(name, "ATY,RageM3pA", 12) 330 - || !strncmp(name, "ATY,RageM3p12A", 14))) { 329 + } else if (of_node_name_prefix(dp, "ATY,RageM3pA") || 330 + of_node_name_prefix(dp, "ATY,RageM3p12A")) { 331 331 par->cmap_adr = offb_map_reg(dp, 2, 0, 0x1fff); 332 332 if (par->cmap_adr) 333 333 par->cmap_type = cmap_M3A; 334 - } else if (dp && !strncmp(name, "ATY,RageM3pB", 12)) { 334 + } else if (of_node_name_prefix(dp, "ATY,RageM3pB")) { 335 335 par->cmap_adr = offb_map_reg(dp, 2, 0, 0x1fff); 336 336 if (par->cmap_adr) 337 337 par->cmap_type = cmap_M3B; 338 - } else if (dp && !strncmp(name, "ATY,Rage6", 9)) { 338 + } else if (of_node_name_prefix(dp, "ATY,Rage6")) { 339 339 par->cmap_adr = offb_map_reg(dp, 1, 0, 0x1fff); 340 340 if (par->cmap_adr) 341 341 par->cmap_type = cmap_radeon; 342 - } else if (!strncmp(name, "ATY,", 4)) { 342 + } else if (of_node_name_prefix(dp, "ATY,")) { 343 343 unsigned long base = address & 0xff000000UL; 344 344 par->cmap_adr = 345 345 ioremap(base + 0x7ff000, 0x1000) + 0xcc0; ··· 350 350 par->cmap_adr = offb_map_reg(dp, 0, 0x6000, 0x1000); 351 351 if (par->cmap_adr) 352 352 par->cmap_type = cmap_gxt2000; 353 - } else if (dp && !strncmp(name, "vga,Display-", 12)) { 353 + } else if (of_node_name_prefix(dp, "vga,Display-")) { 354 354 /* Look for AVIVO initialized by SLOF */ 355 355 struct device_node *pciparent = of_get_parent(dp); 356 356 const u32 *vid, *did; ··· 438 438 439 439 par->cmap_type = cmap_unknown; 440 440 if (depth == 8) 441 - offb_init_palette_hacks(info, dp, name, address); 441 + offb_init_palette_hacks(info, dp, address); 442 442 else 443 443 fix->visual = FB_VISUAL_TRUECOLOR; 444 444
+2
drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c
··· 609 609 610 610 int r = 0; 611 611 612 + memset(&p, 0, sizeof(p)); 613 + 612 614 switch (cmd) { 613 615 case OMAPFB_SYNC_GFX: 614 616 DBG("ioctl SYNC_GFX\n");
-9
drivers/video/logo/Kconfig
··· 10 10 11 11 if LOGO 12 12 13 - config FB_LOGO_CENTER 14 - bool "Center the logo" 15 - depends on FB=y 16 - help 17 - When this option is selected, the bootup logo is centered both 18 - horizontally and vertically. If more than one logo is displayed 19 - due to multiple CPUs, the collected line of logos is centered 20 - as a whole. 21 - 22 13 config FB_LOGO_EXTRA 23 14 bool 24 15 depends on FB=y
+65 -33
drivers/virtio/virtio_balloon.c
··· 61 61 VIRTIO_BALLOON_VQ_MAX 62 62 }; 63 63 64 + enum virtio_balloon_config_read { 65 + VIRTIO_BALLOON_CONFIG_READ_CMD_ID = 0, 66 + }; 67 + 64 68 struct virtio_balloon { 65 69 struct virtio_device *vdev; 66 70 struct virtqueue *inflate_vq, *deflate_vq, *stats_vq, *free_page_vq; ··· 81 77 /* Prevent updating balloon when it is being canceled. */ 82 78 spinlock_t stop_update_lock; 83 79 bool stop_update; 80 + /* Bitmap to indicate if reading the related config fields are needed */ 81 + unsigned long config_read_bitmap; 84 82 85 83 /* The list of allocated free pages, waiting to be given back to mm */ 86 84 struct list_head free_page_list; 87 85 spinlock_t free_page_list_lock; 88 86 /* The number of free page blocks on the above list */ 89 87 unsigned long num_free_page_blocks; 90 - /* The cmd id received from host */ 91 - u32 cmd_id_received; 88 + /* 89 + * The cmd id received from host. 90 + * Read it via virtio_balloon_cmd_id_received to get the latest value 91 + * sent from host. 92 + */ 93 + u32 cmd_id_received_cache; 92 94 /* The cmd id that is actively in use */ 93 95 __virtio32 cmd_id_active; 94 96 /* Buffer to store the stop sign */ ··· 400 390 return num_returned; 401 391 } 402 392 393 + static void virtio_balloon_queue_free_page_work(struct virtio_balloon *vb) 394 + { 395 + if (!virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT)) 396 + return; 397 + 398 + /* No need to queue the work if the bit was already set. */ 399 + if (test_and_set_bit(VIRTIO_BALLOON_CONFIG_READ_CMD_ID, 400 + &vb->config_read_bitmap)) 401 + return; 402 + 403 + queue_work(vb->balloon_wq, &vb->report_free_page_work); 404 + } 405 + 403 406 static void virtballoon_changed(struct virtio_device *vdev) 404 407 { 405 408 struct virtio_balloon *vb = vdev->priv; 406 409 unsigned long flags; 407 - s64 diff = towards_target(vb); 408 410 409 - if (diff) { 410 - spin_lock_irqsave(&vb->stop_update_lock, flags); 411 - if (!vb->stop_update) 412 - queue_work(system_freezable_wq, 413 - &vb->update_balloon_size_work); 414 - spin_unlock_irqrestore(&vb->stop_update_lock, flags); 411 + spin_lock_irqsave(&vb->stop_update_lock, flags); 412 + if (!vb->stop_update) { 413 + queue_work(system_freezable_wq, 414 + &vb->update_balloon_size_work); 415 + virtio_balloon_queue_free_page_work(vb); 415 416 } 416 - 417 - if (virtio_has_feature(vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT)) { 418 - virtio_cread(vdev, struct virtio_balloon_config, 419 - free_page_report_cmd_id, &vb->cmd_id_received); 420 - if (vb->cmd_id_received == VIRTIO_BALLOON_CMD_ID_DONE) { 421 - /* Pass ULONG_MAX to give back all the free pages */ 422 - return_free_pages_to_mm(vb, ULONG_MAX); 423 - } else if (vb->cmd_id_received != VIRTIO_BALLOON_CMD_ID_STOP && 424 - vb->cmd_id_received != 425 - virtio32_to_cpu(vdev, vb->cmd_id_active)) { 426 - spin_lock_irqsave(&vb->stop_update_lock, flags); 427 - if (!vb->stop_update) { 428 - queue_work(vb->balloon_wq, 429 - &vb->report_free_page_work); 430 - } 431 - spin_unlock_irqrestore(&vb->stop_update_lock, flags); 432 - } 433 - } 417 + spin_unlock_irqrestore(&vb->stop_update_lock, flags); 434 418 } 435 419 436 420 static void update_balloon_size(struct virtio_balloon *vb) ··· 531 527 return 0; 532 528 } 533 529 530 + static u32 virtio_balloon_cmd_id_received(struct virtio_balloon *vb) 531 + { 532 + if (test_and_clear_bit(VIRTIO_BALLOON_CONFIG_READ_CMD_ID, 533 + &vb->config_read_bitmap)) 534 + virtio_cread(vb->vdev, struct virtio_balloon_config, 535 + free_page_report_cmd_id, 536 + &vb->cmd_id_received_cache); 537 + 538 + return vb->cmd_id_received_cache; 539 + } 540 + 534 541 static int send_cmd_id_start(struct virtio_balloon *vb) 535 542 { 536 543 struct scatterlist sg; ··· 552 537 while (virtqueue_get_buf(vq, &unused)) 553 538 ; 554 539 555 - vb->cmd_id_active = cpu_to_virtio32(vb->vdev, vb->cmd_id_received); 540 + vb->cmd_id_active = virtio32_to_cpu(vb->vdev, 541 + virtio_balloon_cmd_id_received(vb)); 556 542 sg_init_one(&sg, &vb->cmd_id_active, sizeof(vb->cmd_id_active)); 557 543 err = virtqueue_add_outbuf(vq, &sg, 1, &vb->cmd_id_active, GFP_KERNEL); 558 544 if (!err) ··· 636 620 * stop the reporting. 637 621 */ 638 622 cmd_id_active = virtio32_to_cpu(vb->vdev, vb->cmd_id_active); 639 - if (cmd_id_active != vb->cmd_id_received) 623 + if (unlikely(cmd_id_active != 624 + virtio_balloon_cmd_id_received(vb))) 640 625 break; 641 626 642 627 /* ··· 654 637 return 0; 655 638 } 656 639 657 - static void report_free_page_func(struct work_struct *work) 640 + static void virtio_balloon_report_free_page(struct virtio_balloon *vb) 658 641 { 659 642 int err; 660 - struct virtio_balloon *vb = container_of(work, struct virtio_balloon, 661 - report_free_page_work); 662 643 struct device *dev = &vb->vdev->dev; 663 644 664 645 /* Start by sending the received cmd id to host with an outbuf. */ ··· 672 657 err = send_cmd_id_stop(vb); 673 658 if (unlikely(err)) 674 659 dev_err(dev, "Failed to send a stop id, err = %d\n", err); 660 + } 661 + 662 + static void report_free_page_func(struct work_struct *work) 663 + { 664 + struct virtio_balloon *vb = container_of(work, struct virtio_balloon, 665 + report_free_page_work); 666 + u32 cmd_id_received; 667 + 668 + cmd_id_received = virtio_balloon_cmd_id_received(vb); 669 + if (cmd_id_received == VIRTIO_BALLOON_CMD_ID_DONE) { 670 + /* Pass ULONG_MAX to give back all the free pages */ 671 + return_free_pages_to_mm(vb, ULONG_MAX); 672 + } else if (cmd_id_received != VIRTIO_BALLOON_CMD_ID_STOP && 673 + cmd_id_received != 674 + virtio32_to_cpu(vb->vdev, vb->cmd_id_active)) { 675 + virtio_balloon_report_free_page(vb); 676 + } 675 677 } 676 678 677 679 #ifdef CONFIG_BALLOON_COMPACTION ··· 917 885 goto out_del_vqs; 918 886 } 919 887 INIT_WORK(&vb->report_free_page_work, report_free_page_func); 920 - vb->cmd_id_received = VIRTIO_BALLOON_CMD_ID_STOP; 888 + vb->cmd_id_received_cache = VIRTIO_BALLOON_CMD_ID_STOP; 921 889 vb->cmd_id_active = cpu_to_virtio32(vb->vdev, 922 890 VIRTIO_BALLOON_CMD_ID_STOP); 923 891 vb->cmd_id_stop = cpu_to_virtio32(vb->vdev,
+7 -2
drivers/virtio/virtio_mmio.c
··· 468 468 { 469 469 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); 470 470 unsigned int irq = platform_get_irq(vm_dev->pdev, 0); 471 - int i, err; 471 + int i, err, queue_idx = 0; 472 472 473 473 err = request_irq(irq, vm_interrupt, IRQF_SHARED, 474 474 dev_name(&vdev->dev), vm_dev); ··· 476 476 return err; 477 477 478 478 for (i = 0; i < nvqs; ++i) { 479 - vqs[i] = vm_setup_vq(vdev, i, callbacks[i], names[i], 479 + if (!names[i]) { 480 + vqs[i] = NULL; 481 + continue; 482 + } 483 + 484 + vqs[i] = vm_setup_vq(vdev, queue_idx++, callbacks[i], names[i], 480 485 ctx ? ctx[i] : false); 481 486 if (IS_ERR(vqs[i])) { 482 487 vm_del_vqs(vdev);
+4 -4
drivers/virtio/virtio_pci_common.c
··· 285 285 { 286 286 struct virtio_pci_device *vp_dev = to_vp_device(vdev); 287 287 u16 msix_vec; 288 - int i, err, nvectors, allocated_vectors; 288 + int i, err, nvectors, allocated_vectors, queue_idx = 0; 289 289 290 290 vp_dev->vqs = kcalloc(nvqs, sizeof(*vp_dev->vqs), GFP_KERNEL); 291 291 if (!vp_dev->vqs) ··· 321 321 msix_vec = allocated_vectors++; 322 322 else 323 323 msix_vec = VP_MSIX_VQ_VECTOR; 324 - vqs[i] = vp_setup_vq(vdev, i, callbacks[i], names[i], 324 + vqs[i] = vp_setup_vq(vdev, queue_idx++, callbacks[i], names[i], 325 325 ctx ? ctx[i] : false, 326 326 msix_vec); 327 327 if (IS_ERR(vqs[i])) { ··· 356 356 const char * const names[], const bool *ctx) 357 357 { 358 358 struct virtio_pci_device *vp_dev = to_vp_device(vdev); 359 - int i, err; 359 + int i, err, queue_idx = 0; 360 360 361 361 vp_dev->vqs = kcalloc(nvqs, sizeof(*vp_dev->vqs), GFP_KERNEL); 362 362 if (!vp_dev->vqs) ··· 374 374 vqs[i] = NULL; 375 375 continue; 376 376 } 377 - vqs[i] = vp_setup_vq(vdev, i, callbacks[i], names[i], 377 + vqs[i] = vp_setup_vq(vdev, queue_idx++, callbacks[i], names[i], 378 378 ctx ? ctx[i] : false, 379 379 VIRTIO_MSI_NO_VECTOR); 380 380 if (IS_ERR(vqs[i])) {
+12
drivers/watchdog/Kconfig
··· 817 817 To compile this driver as a module, choose M here: the 818 818 module will be called stm32_iwdg. 819 819 820 + config STPMIC1_WATCHDOG 821 + tristate "STPMIC1 PMIC watchdog support" 822 + depends on MFD_STPMIC1 823 + select WATCHDOG_CORE 824 + help 825 + Say Y here to include watchdog support embedded into STPMIC1 PMIC. 826 + If the watchdog timer expires, stpmic1 will shut down all its power 827 + supplies. 828 + 829 + To compile this driver as a module, choose M here: the 830 + module will be called spmic1_wdt. 831 + 820 832 config UNIPHIER_WATCHDOG 821 833 tristate "UniPhier watchdog support" 822 834 depends on ARCH_UNIPHIER || COMPILE_TEST
+1
drivers/watchdog/Makefile
··· 220 220 obj-$(CONFIG_MENF21BMC_WATCHDOG) += menf21bmc_wdt.o 221 221 obj-$(CONFIG_MENZ069_WATCHDOG) += menz69_wdt.o 222 222 obj-$(CONFIG_RAVE_SP_WATCHDOG) += rave-sp-wdt.o 223 + obj-$(CONFIG_STPMIC1_WATCHDOG) += stpmic1_wdt.o
+1
drivers/watchdog/mt7621_wdt.c
··· 17 17 #include <linux/watchdog.h> 18 18 #include <linux/moduleparam.h> 19 19 #include <linux/platform_device.h> 20 + #include <linux/mod_devicetable.h> 20 21 21 22 #include <asm/mach-ralink/ralink_regs.h> 22 23
+1
drivers/watchdog/rt2880_wdt.c
··· 18 18 #include <linux/watchdog.h> 19 19 #include <linux/moduleparam.h> 20 20 #include <linux/platform_device.h> 21 + #include <linux/mod_devicetable.h> 21 22 22 23 #include <asm/mach-ralink/ralink_regs.h> 23 24
+139
drivers/watchdog/stpmic1_wdt.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + // Copyright (C) STMicroelectronics 2018 3 + // Author: Pascal Paillet <p.paillet@st.com> for STMicroelectronics. 4 + 5 + #include <linux/kernel.h> 6 + #include <linux/mfd/stpmic1.h> 7 + #include <linux/module.h> 8 + #include <linux/platform_device.h> 9 + #include <linux/of.h> 10 + #include <linux/regmap.h> 11 + #include <linux/slab.h> 12 + #include <linux/watchdog.h> 13 + 14 + /* WATCHDOG CONTROL REGISTER bit */ 15 + #define WDT_START BIT(0) 16 + #define WDT_PING BIT(1) 17 + #define WDT_START_MASK BIT(0) 18 + #define WDT_PING_MASK BIT(1) 19 + #define WDT_STOP 0 20 + 21 + #define PMIC_WDT_MIN_TIMEOUT 1 22 + #define PMIC_WDT_MAX_TIMEOUT 256 23 + #define PMIC_WDT_DEFAULT_TIMEOUT 30 24 + 25 + static bool nowayout = WATCHDOG_NOWAYOUT; 26 + module_param(nowayout, bool, 0); 27 + MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=" 28 + __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); 29 + 30 + struct stpmic1_wdt { 31 + struct stpmic1 *pmic; 32 + struct watchdog_device wdtdev; 33 + }; 34 + 35 + static int pmic_wdt_start(struct watchdog_device *wdd) 36 + { 37 + struct stpmic1_wdt *wdt = watchdog_get_drvdata(wdd); 38 + 39 + return regmap_update_bits(wdt->pmic->regmap, 40 + WCHDG_CR, WDT_START_MASK, WDT_START); 41 + } 42 + 43 + static int pmic_wdt_stop(struct watchdog_device *wdd) 44 + { 45 + struct stpmic1_wdt *wdt = watchdog_get_drvdata(wdd); 46 + 47 + return regmap_update_bits(wdt->pmic->regmap, 48 + WCHDG_CR, WDT_START_MASK, WDT_STOP); 49 + } 50 + 51 + static int pmic_wdt_ping(struct watchdog_device *wdd) 52 + { 53 + struct stpmic1_wdt *wdt = watchdog_get_drvdata(wdd); 54 + 55 + return regmap_update_bits(wdt->pmic->regmap, 56 + WCHDG_CR, WDT_PING_MASK, WDT_PING); 57 + } 58 + 59 + static int pmic_wdt_set_timeout(struct watchdog_device *wdd, 60 + unsigned int timeout) 61 + { 62 + struct stpmic1_wdt *wdt = watchdog_get_drvdata(wdd); 63 + 64 + wdd->timeout = timeout; 65 + /* timeout is equal to register value + 1 */ 66 + return regmap_write(wdt->pmic->regmap, WCHDG_TIMER_CR, timeout - 1); 67 + } 68 + 69 + static const struct watchdog_info pmic_watchdog_info = { 70 + .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE, 71 + .identity = "STPMIC1 PMIC Watchdog", 72 + }; 73 + 74 + static const struct watchdog_ops pmic_watchdog_ops = { 75 + .owner = THIS_MODULE, 76 + .start = pmic_wdt_start, 77 + .stop = pmic_wdt_stop, 78 + .ping = pmic_wdt_ping, 79 + .set_timeout = pmic_wdt_set_timeout, 80 + }; 81 + 82 + static int pmic_wdt_probe(struct platform_device *pdev) 83 + { 84 + int ret; 85 + struct stpmic1 *pmic; 86 + struct stpmic1_wdt *wdt; 87 + 88 + if (!pdev->dev.parent) 89 + return -EINVAL; 90 + 91 + pmic = dev_get_drvdata(pdev->dev.parent); 92 + if (!pmic) 93 + return -EINVAL; 94 + 95 + wdt = devm_kzalloc(&pdev->dev, sizeof(struct stpmic1_wdt), GFP_KERNEL); 96 + if (!wdt) 97 + return -ENOMEM; 98 + 99 + wdt->pmic = pmic; 100 + 101 + wdt->wdtdev.info = &pmic_watchdog_info; 102 + wdt->wdtdev.ops = &pmic_watchdog_ops; 103 + wdt->wdtdev.min_timeout = PMIC_WDT_MIN_TIMEOUT; 104 + wdt->wdtdev.max_timeout = PMIC_WDT_MAX_TIMEOUT; 105 + wdt->wdtdev.parent = &pdev->dev; 106 + 107 + wdt->wdtdev.timeout = PMIC_WDT_DEFAULT_TIMEOUT; 108 + watchdog_init_timeout(&wdt->wdtdev, 0, &pdev->dev); 109 + 110 + watchdog_set_nowayout(&wdt->wdtdev, nowayout); 111 + watchdog_set_drvdata(&wdt->wdtdev, wdt); 112 + 113 + ret = devm_watchdog_register_device(&pdev->dev, &wdt->wdtdev); 114 + if (ret) 115 + return ret; 116 + 117 + dev_dbg(wdt->pmic->dev, "PMIC Watchdog driver probed\n"); 118 + return 0; 119 + } 120 + 121 + static const struct of_device_id of_pmic_wdt_match[] = { 122 + { .compatible = "st,stpmic1-wdt" }, 123 + { }, 124 + }; 125 + 126 + MODULE_DEVICE_TABLE(of, of_pmic_wdt_match); 127 + 128 + static struct platform_driver stpmic1_wdt_driver = { 129 + .probe = pmic_wdt_probe, 130 + .driver = { 131 + .name = "stpmic1-wdt", 132 + .of_match_table = of_pmic_wdt_match, 133 + }, 134 + }; 135 + module_platform_driver(stpmic1_wdt_driver); 136 + 137 + MODULE_DESCRIPTION("Watchdog driver for STPMIC1 device"); 138 + MODULE_AUTHOR("Pascal Paillet <p.paillet@st.com>"); 139 + MODULE_LICENSE("GPL v2");
+4 -4
drivers/watchdog/tqmx86_wdt.c
··· 79 79 return -ENOMEM; 80 80 81 81 res = platform_get_resource(pdev, IORESOURCE_IO, 0); 82 - if (IS_ERR(res)) 83 - return PTR_ERR(res); 82 + if (!res) 83 + return -ENODEV; 84 84 85 85 priv->io_base = devm_ioport_map(&pdev->dev, res->start, 86 86 resource_size(res)); 87 - if (IS_ERR(priv->io_base)) 88 - return PTR_ERR(priv->io_base); 87 + if (!priv->io_base) 88 + return -ENOMEM; 89 89 90 90 watchdog_set_drvdata(&priv->wdd, priv); 91 91
+1 -1
drivers/xen/events/events_base.c
··· 1650 1650 xen_have_vector_callback = 0; 1651 1651 return; 1652 1652 } 1653 - pr_info("Xen HVM callback vector for event delivery is enabled\n"); 1653 + pr_info_once("Xen HVM callback vector for event delivery is enabled\n"); 1654 1654 alloc_intr_gate(HYPERVISOR_CALLBACK_VECTOR, 1655 1655 xen_hvm_callback_vector); 1656 1656 }
+4 -5
drivers/xen/pvcalls-back.c
··· 160 160 161 161 /* write the data, then modify the indexes */ 162 162 virt_wmb(); 163 - if (ret < 0) 163 + if (ret < 0) { 164 + atomic_set(&map->read, 0); 164 165 intf->in_error = ret; 165 - else 166 + } else 166 167 intf->in_prod = prod + ret; 167 168 /* update the indexes, then notify the other end */ 168 169 virt_wmb(); ··· 283 282 static void pvcalls_sk_state_change(struct sock *sock) 284 283 { 285 284 struct sock_mapping *map = sock->sk_user_data; 286 - struct pvcalls_data_intf *intf; 287 285 288 286 if (map == NULL) 289 287 return; 290 288 291 - intf = map->ring; 292 - intf->in_error = -ENOTCONN; 289 + atomic_inc(&map->read); 293 290 notify_remote_via_irq(map->irq); 294 291 } 295 292
+76 -30
drivers/xen/pvcalls-front.c
··· 31 31 #define PVCALLS_NR_RSP_PER_RING __CONST_RING_SIZE(xen_pvcalls, XEN_PAGE_SIZE) 32 32 #define PVCALLS_FRONT_MAX_SPIN 5000 33 33 34 + static struct proto pvcalls_proto = { 35 + .name = "PVCalls", 36 + .owner = THIS_MODULE, 37 + .obj_size = sizeof(struct sock), 38 + }; 39 + 34 40 struct pvcalls_bedata { 35 41 struct xen_pvcalls_front_ring ring; 36 42 grant_ref_t ref; ··· 341 335 return ret; 342 336 } 343 337 338 + static void free_active_ring(struct sock_mapping *map) 339 + { 340 + if (!map->active.ring) 341 + return; 342 + 343 + free_pages((unsigned long)map->active.data.in, 344 + map->active.ring->ring_order); 345 + free_page((unsigned long)map->active.ring); 346 + } 347 + 348 + static int alloc_active_ring(struct sock_mapping *map) 349 + { 350 + void *bytes; 351 + 352 + map->active.ring = (struct pvcalls_data_intf *) 353 + get_zeroed_page(GFP_KERNEL); 354 + if (!map->active.ring) 355 + goto out; 356 + 357 + map->active.ring->ring_order = PVCALLS_RING_ORDER; 358 + bytes = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 359 + PVCALLS_RING_ORDER); 360 + if (!bytes) 361 + goto out; 362 + 363 + map->active.data.in = bytes; 364 + map->active.data.out = bytes + 365 + XEN_FLEX_RING_SIZE(PVCALLS_RING_ORDER); 366 + 367 + return 0; 368 + 369 + out: 370 + free_active_ring(map); 371 + return -ENOMEM; 372 + } 373 + 344 374 static int create_active(struct sock_mapping *map, int *evtchn) 345 375 { 346 376 void *bytes; ··· 385 343 *evtchn = -1; 386 344 init_waitqueue_head(&map->active.inflight_conn_req); 387 345 388 - map->active.ring = (struct pvcalls_data_intf *) 389 - __get_free_page(GFP_KERNEL | __GFP_ZERO); 390 - if (map->active.ring == NULL) 391 - goto out_error; 392 - map->active.ring->ring_order = PVCALLS_RING_ORDER; 393 - bytes = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 394 - PVCALLS_RING_ORDER); 395 - if (bytes == NULL) 396 - goto out_error; 346 + bytes = map->active.data.in; 397 347 for (i = 0; i < (1 << PVCALLS_RING_ORDER); i++) 398 348 map->active.ring->ref[i] = gnttab_grant_foreign_access( 399 349 pvcalls_front_dev->otherend_id, ··· 394 360 map->active.ref = gnttab_grant_foreign_access( 395 361 pvcalls_front_dev->otherend_id, 396 362 pfn_to_gfn(virt_to_pfn((void *)map->active.ring)), 0); 397 - 398 - map->active.data.in = bytes; 399 - map->active.data.out = bytes + 400 - XEN_FLEX_RING_SIZE(PVCALLS_RING_ORDER); 401 363 402 364 ret = xenbus_alloc_evtchn(pvcalls_front_dev, evtchn); 403 365 if (ret) ··· 415 385 out_error: 416 386 if (*evtchn >= 0) 417 387 xenbus_free_evtchn(pvcalls_front_dev, *evtchn); 418 - free_pages((unsigned long)map->active.data.in, PVCALLS_RING_ORDER); 419 - free_page((unsigned long)map->active.ring); 420 388 return ret; 421 389 } 422 390 ··· 434 406 return PTR_ERR(map); 435 407 436 408 bedata = dev_get_drvdata(&pvcalls_front_dev->dev); 409 + ret = alloc_active_ring(map); 410 + if (ret < 0) { 411 + pvcalls_exit_sock(sock); 412 + return ret; 413 + } 437 414 438 415 spin_lock(&bedata->socket_lock); 439 416 ret = get_request(bedata, &req_id); 440 417 if (ret < 0) { 441 418 spin_unlock(&bedata->socket_lock); 419 + free_active_ring(map); 442 420 pvcalls_exit_sock(sock); 443 421 return ret; 444 422 } 445 423 ret = create_active(map, &evtchn); 446 424 if (ret < 0) { 447 425 spin_unlock(&bedata->socket_lock); 426 + free_active_ring(map); 448 427 pvcalls_exit_sock(sock); 449 428 return ret; 450 429 } ··· 504 469 virt_mb(); 505 470 506 471 size = pvcalls_queued(prod, cons, array_size); 507 - if (size >= array_size) 472 + if (size > array_size) 508 473 return -EINVAL; 474 + if (size == array_size) 475 + return 0; 509 476 if (len > array_size - size) 510 477 len = array_size - size; 511 478 ··· 597 560 error = intf->in_error; 598 561 /* get pointers before reading from the ring */ 599 562 virt_rmb(); 600 - if (error < 0) 601 - return error; 602 563 603 564 size = pvcalls_queued(prod, cons, array_size); 604 565 masked_prod = pvcalls_mask(prod, array_size); 605 566 masked_cons = pvcalls_mask(cons, array_size); 606 567 607 568 if (size == 0) 608 - return 0; 569 + return error ?: size; 609 570 610 571 if (len > size) 611 572 len = size; ··· 815 780 } 816 781 } 817 782 783 + map2 = kzalloc(sizeof(*map2), GFP_KERNEL); 784 + if (map2 == NULL) { 785 + clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT, 786 + (void *)&map->passive.flags); 787 + pvcalls_exit_sock(sock); 788 + return -ENOMEM; 789 + } 790 + ret = alloc_active_ring(map2); 791 + if (ret < 0) { 792 + clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT, 793 + (void *)&map->passive.flags); 794 + kfree(map2); 795 + pvcalls_exit_sock(sock); 796 + return ret; 797 + } 818 798 spin_lock(&bedata->socket_lock); 819 799 ret = get_request(bedata, &req_id); 820 800 if (ret < 0) { 821 801 clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT, 822 802 (void *)&map->passive.flags); 823 803 spin_unlock(&bedata->socket_lock); 804 + free_active_ring(map2); 805 + kfree(map2); 824 806 pvcalls_exit_sock(sock); 825 807 return ret; 826 808 } 827 - map2 = kzalloc(sizeof(*map2), GFP_ATOMIC); 828 - if (map2 == NULL) { 829 - clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT, 830 - (void *)&map->passive.flags); 831 - spin_unlock(&bedata->socket_lock); 832 - pvcalls_exit_sock(sock); 833 - return -ENOMEM; 834 - } 809 + 835 810 ret = create_active(map2, &evtchn); 836 811 if (ret < 0) { 812 + free_active_ring(map2); 837 813 kfree(map2); 838 814 clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT, 839 815 (void *)&map->passive.flags); ··· 885 839 886 840 received: 887 841 map2->sock = newsock; 888 - newsock->sk = kzalloc(sizeof(*newsock->sk), GFP_KERNEL); 842 + newsock->sk = sk_alloc(sock_net(sock->sk), PF_INET, GFP_KERNEL, &pvcalls_proto, false); 889 843 if (!newsock->sk) { 890 844 bedata->rsp[req_id].req_id = PVCALLS_INVALID_ID; 891 845 map->passive.inflight_req_id = PVCALLS_INVALID_ID; ··· 1078 1032 spin_lock(&bedata->socket_lock); 1079 1033 list_del(&map->list); 1080 1034 spin_unlock(&bedata->socket_lock); 1081 - if (READ_ONCE(map->passive.inflight_req_id) != 1082 - PVCALLS_INVALID_ID) { 1035 + if (READ_ONCE(map->passive.inflight_req_id) != PVCALLS_INVALID_ID && 1036 + READ_ONCE(map->passive.inflight_req_id) != 0) { 1083 1037 pvcalls_front_free_map(bedata, 1084 1038 map->passive.accept_map); 1085 1039 }
+2 -2
fs/afs/flock.c
··· 208 208 /* The new front of the queue now owns the state variables. */ 209 209 next = list_entry(vnode->pending_locks.next, 210 210 struct file_lock, fl_u.afs.link); 211 - vnode->lock_key = afs_file_key(next->fl_file); 211 + vnode->lock_key = key_get(afs_file_key(next->fl_file)); 212 212 vnode->lock_type = (next->fl_type == F_RDLCK) ? AFS_LOCK_READ : AFS_LOCK_WRITE; 213 213 vnode->lock_state = AFS_VNODE_LOCK_WAITING_FOR_CB; 214 214 goto again; ··· 413 413 /* The new front of the queue now owns the state variables. */ 414 414 next = list_entry(vnode->pending_locks.next, 415 415 struct file_lock, fl_u.afs.link); 416 - vnode->lock_key = afs_file_key(next->fl_file); 416 + vnode->lock_key = key_get(afs_file_key(next->fl_file)); 417 417 vnode->lock_type = (next->fl_type == F_RDLCK) ? AFS_LOCK_READ : AFS_LOCK_WRITE; 418 418 vnode->lock_state = AFS_VNODE_LOCK_WAITING_FOR_CB; 419 419 afs_lock_may_be_available(vnode);
+2 -1
fs/afs/inode.c
··· 414 414 } else if (test_bit(AFS_VNODE_DELETED, &vnode->flags)) { 415 415 valid = true; 416 416 } else { 417 - vnode->cb_s_break = vnode->cb_interest->server->cb_s_break; 418 417 vnode->cb_v_break = vnode->volume->cb_v_break; 419 418 valid = false; 420 419 } ··· 545 546 #endif 546 547 547 548 afs_put_permits(rcu_access_pointer(vnode->permit_cache)); 549 + key_put(vnode->lock_key); 550 + vnode->lock_key = NULL; 548 551 _leave(""); 549 552 } 550 553
+11
fs/afs/protocol_yfs.h
··· 161 161 struct yfs_xdr_u64 max_quota; 162 162 struct yfs_xdr_u64 file_quota; 163 163 } __packed; 164 + 165 + enum yfs_lock_type { 166 + yfs_LockNone = -1, 167 + yfs_LockRead = 0, 168 + yfs_LockWrite = 1, 169 + yfs_LockExtend = 2, 170 + yfs_LockRelease = 3, 171 + yfs_LockMandatoryRead = 0x100, 172 + yfs_LockMandatoryWrite = 0x101, 173 + yfs_LockMandatoryExtend = 0x102, 174 + };
+42 -11
fs/afs/rxrpc.c
··· 23 23 static void afs_wake_up_call_waiter(struct sock *, struct rxrpc_call *, unsigned long); 24 24 static long afs_wait_for_call_to_complete(struct afs_call *, struct afs_addr_cursor *); 25 25 static void afs_wake_up_async_call(struct sock *, struct rxrpc_call *, unsigned long); 26 + static void afs_delete_async_call(struct work_struct *); 26 27 static void afs_process_async_call(struct work_struct *); 27 28 static void afs_rx_new_call(struct sock *, struct rxrpc_call *, unsigned long); 28 29 static void afs_rx_discard_new_call(struct rxrpc_call *, unsigned long); ··· 204 203 } 205 204 } 206 205 206 + static struct afs_call *afs_get_call(struct afs_call *call, 207 + enum afs_call_trace why) 208 + { 209 + int u = atomic_inc_return(&call->usage); 210 + 211 + trace_afs_call(call, why, u, 212 + atomic_read(&call->net->nr_outstanding_calls), 213 + __builtin_return_address(0)); 214 + return call; 215 + } 216 + 207 217 /* 208 218 * Queue the call for actual work. 209 219 */ 210 220 static void afs_queue_call_work(struct afs_call *call) 211 221 { 212 222 if (call->type->work) { 213 - int u = atomic_inc_return(&call->usage); 214 - 215 - trace_afs_call(call, afs_call_trace_work, u, 216 - atomic_read(&call->net->nr_outstanding_calls), 217 - __builtin_return_address(0)); 218 - 219 223 INIT_WORK(&call->work, call->type->work); 220 224 225 + afs_get_call(call, afs_call_trace_work); 221 226 if (!queue_work(afs_wq, &call->work)) 222 227 afs_put_call(call); 223 228 } ··· 405 398 } 406 399 } 407 400 401 + /* If the call is going to be asynchronous, we need an extra ref for 402 + * the call to hold itself so the caller need not hang on to its ref. 403 + */ 404 + if (call->async) 405 + afs_get_call(call, afs_call_trace_get); 406 + 408 407 /* create a call */ 409 408 rxcall = rxrpc_kernel_begin_call(call->net->socket, srx, call->key, 410 409 (unsigned long)call, ··· 451 438 goto error_do_abort; 452 439 } 453 440 454 - /* at this point, an async call may no longer exist as it may have 455 - * already completed */ 456 - if (call->async) 441 + /* Note that at this point, we may have received the reply or an abort 442 + * - and an asynchronous call may already have completed. 443 + */ 444 + if (call->async) { 445 + afs_put_call(call); 457 446 return -EINPROGRESS; 447 + } 458 448 459 449 return afs_wait_for_call_to_complete(call, ac); 460 450 461 451 error_do_abort: 462 - call->state = AFS_CALL_COMPLETE; 463 452 if (ret != -ECONNABORTED) { 464 453 rxrpc_kernel_abort_call(call->net->socket, rxcall, 465 454 RX_USER_ABORT, ret, "KSD"); ··· 478 463 error_kill_call: 479 464 if (call->type->done) 480 465 call->type->done(call); 481 - afs_put_call(call); 466 + 467 + /* We need to dispose of the extra ref we grabbed for an async call. 468 + * The call, however, might be queued on afs_async_calls and we need to 469 + * make sure we don't get any more notifications that might requeue it. 470 + */ 471 + if (call->rxcall) { 472 + rxrpc_kernel_end_call(call->net->socket, call->rxcall); 473 + call->rxcall = NULL; 474 + } 475 + if (call->async) { 476 + if (cancel_work_sync(&call->async_work)) 477 + afs_put_call(call); 478 + afs_put_call(call); 479 + } 480 + 482 481 ac->error = ret; 482 + call->state = AFS_CALL_COMPLETE; 483 + afs_put_call(call); 483 484 _leave(" = %d", ret); 484 485 return ret; 485 486 }
+1 -3
fs/afs/server_list.c
··· 42 42 if (vldb->fs_mask[i] & type_mask) 43 43 nr_servers++; 44 44 45 - slist = kzalloc(sizeof(struct afs_server_list) + 46 - sizeof(struct afs_server_entry) * nr_servers, 47 - GFP_KERNEL); 45 + slist = kzalloc(struct_size(slist, servers, nr_servers), GFP_KERNEL); 48 46 if (!slist) 49 47 goto error; 50 48
+1 -1
fs/afs/yfsclient.c
··· 803 803 bp = xdr_encode_YFSFid(bp, &vnode->fid); 804 804 bp = xdr_encode_string(bp, name, namesz); 805 805 bp = xdr_encode_YFSStoreStatus_mode(bp, mode); 806 - bp = xdr_encode_u32(bp, 0); /* ViceLockType */ 806 + bp = xdr_encode_u32(bp, yfs_LockNone); /* ViceLockType */ 807 807 yfs_check_req(call, bp); 808 808 809 809 afs_use_fs_server(call, fc->cbi);
+18 -10
fs/block_dev.c
··· 104 104 } 105 105 EXPORT_SYMBOL(invalidate_bdev); 106 106 107 + static void set_init_blocksize(struct block_device *bdev) 108 + { 109 + unsigned bsize = bdev_logical_block_size(bdev); 110 + loff_t size = i_size_read(bdev->bd_inode); 111 + 112 + while (bsize < PAGE_SIZE) { 113 + if (size & bsize) 114 + break; 115 + bsize <<= 1; 116 + } 117 + bdev->bd_block_size = bsize; 118 + bdev->bd_inode->i_blkbits = blksize_bits(bsize); 119 + } 120 + 107 121 int set_blocksize(struct block_device *bdev, int size) 108 122 { 109 123 /* Size must be a power of two, and between 512 and PAGE_SIZE */ ··· 1445 1431 1446 1432 void bd_set_size(struct block_device *bdev, loff_t size) 1447 1433 { 1448 - unsigned bsize = bdev_logical_block_size(bdev); 1449 - 1450 1434 inode_lock(bdev->bd_inode); 1451 1435 i_size_write(bdev->bd_inode, size); 1452 1436 inode_unlock(bdev->bd_inode); 1453 - while (bsize < PAGE_SIZE) { 1454 - if (size & bsize) 1455 - break; 1456 - bsize <<= 1; 1457 - } 1458 - bdev->bd_block_size = bsize; 1459 - bdev->bd_inode->i_blkbits = blksize_bits(bsize); 1460 1437 } 1461 1438 EXPORT_SYMBOL(bd_set_size); 1462 1439 ··· 1524 1519 } 1525 1520 } 1526 1521 1527 - if (!ret) 1522 + if (!ret) { 1528 1523 bd_set_size(bdev,(loff_t)get_capacity(disk)<<9); 1524 + set_init_blocksize(bdev); 1525 + } 1529 1526 1530 1527 /* 1531 1528 * If the device is invalidated, rescan partition ··· 1562 1555 goto out_clear; 1563 1556 } 1564 1557 bd_set_size(bdev, (loff_t)bdev->bd_part->nr_sects << 9); 1558 + set_init_blocksize(bdev); 1565 1559 } 1566 1560 1567 1561 if (bdev->bd_bdi == &noop_backing_dev_info)
+9 -7
fs/btrfs/ctree.c
··· 1016 1016 parent_start = parent->start; 1017 1017 1018 1018 /* 1019 - * If we are COWing a node/leaf from the extent, chunk or device trees, 1020 - * make sure that we do not finish block group creation of pending block 1021 - * groups. We do this to avoid a deadlock. 1019 + * If we are COWing a node/leaf from the extent, chunk, device or free 1020 + * space trees, make sure that we do not finish block group creation of 1021 + * pending block groups. We do this to avoid a deadlock. 1022 1022 * COWing can result in allocation of a new chunk, and flushing pending 1023 1023 * block groups (btrfs_create_pending_block_groups()) can be triggered 1024 1024 * when finishing allocation of a new chunk. Creation of a pending block 1025 - * group modifies the extent, chunk and device trees, therefore we could 1026 - * deadlock with ourselves since we are holding a lock on an extent 1027 - * buffer that btrfs_create_pending_block_groups() may try to COW later. 1025 + * group modifies the extent, chunk, device and free space trees, 1026 + * therefore we could deadlock with ourselves since we are holding a 1027 + * lock on an extent buffer that btrfs_create_pending_block_groups() may 1028 + * try to COW later. 1028 1029 */ 1029 1030 if (root == fs_info->extent_root || 1030 1031 root == fs_info->chunk_root || 1031 - root == fs_info->dev_root) 1032 + root == fs_info->dev_root || 1033 + root == fs_info->free_space_root) 1032 1034 trans->can_flush_pending_bgs = false; 1033 1035 1034 1036 cow = btrfs_alloc_tree_block(trans, root, parent_start,
+7
fs/btrfs/ctree.h
··· 35 35 struct btrfs_trans_handle; 36 36 struct btrfs_transaction; 37 37 struct btrfs_pending_snapshot; 38 + struct btrfs_delayed_ref_root; 38 39 extern struct kmem_cache *btrfs_trans_handle_cachep; 39 40 extern struct kmem_cache *btrfs_bit_radix_cachep; 40 41 extern struct kmem_cache *btrfs_path_cachep; ··· 787 786 * main phase. The fs_info::balance_ctl is initialized. 788 787 */ 789 788 BTRFS_FS_BALANCE_RUNNING, 789 + 790 + /* Indicate that the cleaner thread is awake and doing something. */ 791 + BTRFS_FS_CLEANER_RUNNING, 790 792 }; 791 793 792 794 struct btrfs_fs_info { ··· 2665 2661 unsigned long count); 2666 2662 int btrfs_async_run_delayed_refs(struct btrfs_fs_info *fs_info, 2667 2663 unsigned long count, u64 transid, int wait); 2664 + void btrfs_cleanup_ref_head_accounting(struct btrfs_fs_info *fs_info, 2665 + struct btrfs_delayed_ref_root *delayed_refs, 2666 + struct btrfs_delayed_ref_head *head); 2668 2667 int btrfs_lookup_data_extent(struct btrfs_fs_info *fs_info, u64 start, u64 len); 2669 2668 int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans, 2670 2669 struct btrfs_fs_info *fs_info, u64 bytenr,
+12
fs/btrfs/disk-io.c
··· 1682 1682 while (1) { 1683 1683 again = 0; 1684 1684 1685 + set_bit(BTRFS_FS_CLEANER_RUNNING, &fs_info->flags); 1686 + 1685 1687 /* Make the cleaner go to sleep early. */ 1686 1688 if (btrfs_need_cleaner_sleep(fs_info)) 1687 1689 goto sleep; ··· 1730 1728 */ 1731 1729 btrfs_delete_unused_bgs(fs_info); 1732 1730 sleep: 1731 + clear_bit(BTRFS_FS_CLEANER_RUNNING, &fs_info->flags); 1733 1732 if (kthread_should_park()) 1734 1733 kthread_parkme(); 1735 1734 if (kthread_should_stop()) ··· 4204 4201 spin_lock(&fs_info->ordered_root_lock); 4205 4202 } 4206 4203 spin_unlock(&fs_info->ordered_root_lock); 4204 + 4205 + /* 4206 + * We need this here because if we've been flipped read-only we won't 4207 + * get sync() from the umount, so we need to make sure any ordered 4208 + * extents that haven't had their dirty pages IO start writeout yet 4209 + * actually get run and error out properly. 4210 + */ 4211 + btrfs_wait_ordered_roots(fs_info, U64_MAX, 0, (u64)-1); 4207 4212 } 4208 4213 4209 4214 static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans, ··· 4276 4265 if (pin_bytes) 4277 4266 btrfs_pin_extent(fs_info, head->bytenr, 4278 4267 head->num_bytes, 1); 4268 + btrfs_cleanup_ref_head_accounting(fs_info, delayed_refs, head); 4279 4269 btrfs_put_delayed_ref_head(head); 4280 4270 cond_resched(); 4281 4271 spin_lock(&delayed_refs->lock);
+14 -7
fs/btrfs/extent-tree.c
··· 2456 2456 return ret ? ret : 1; 2457 2457 } 2458 2458 2459 - static void cleanup_ref_head_accounting(struct btrfs_trans_handle *trans, 2460 - struct btrfs_delayed_ref_head *head) 2459 + void btrfs_cleanup_ref_head_accounting(struct btrfs_fs_info *fs_info, 2460 + struct btrfs_delayed_ref_root *delayed_refs, 2461 + struct btrfs_delayed_ref_head *head) 2461 2462 { 2462 - struct btrfs_fs_info *fs_info = trans->fs_info; 2463 - struct btrfs_delayed_ref_root *delayed_refs = 2464 - &trans->transaction->delayed_refs; 2465 2463 int nr_items = 1; /* Dropping this ref head update. */ 2466 2464 2467 2465 if (head->total_ref_mod < 0) { ··· 2542 2544 } 2543 2545 } 2544 2546 2545 - cleanup_ref_head_accounting(trans, head); 2547 + btrfs_cleanup_ref_head_accounting(fs_info, delayed_refs, head); 2546 2548 2547 2549 trace_run_delayed_ref_head(fs_info, head, 0); 2548 2550 btrfs_delayed_ref_unlock(head); ··· 4952 4954 ret = 0; 4953 4955 break; 4954 4956 case COMMIT_TRANS: 4957 + /* 4958 + * If we have pending delayed iputs then we could free up a 4959 + * bunch of pinned space, so make sure we run the iputs before 4960 + * we do our pinned bytes check below. 4961 + */ 4962 + mutex_lock(&fs_info->cleaner_delayed_iput_mutex); 4963 + btrfs_run_delayed_iputs(fs_info); 4964 + mutex_unlock(&fs_info->cleaner_delayed_iput_mutex); 4965 + 4955 4966 ret = may_commit_transaction(fs_info, space_info); 4956 4967 break; 4957 4968 default: ··· 7195 7188 if (head->must_insert_reserved) 7196 7189 ret = 1; 7197 7190 7198 - cleanup_ref_head_accounting(trans, head); 7191 + btrfs_cleanup_ref_head_accounting(trans->fs_info, delayed_refs, head); 7199 7192 mutex_unlock(&head->mutex); 7200 7193 btrfs_put_delayed_ref_head(head); 7201 7194 return ret;
+2 -3
fs/btrfs/inode.c
··· 3129 3129 /* once for the tree */ 3130 3130 btrfs_put_ordered_extent(ordered_extent); 3131 3131 3132 - /* Try to release some metadata so we don't get an OOM but don't wait */ 3133 - btrfs_btree_balance_dirty_nodelay(fs_info); 3134 - 3135 3132 return ret; 3136 3133 } 3137 3134 ··· 3251 3254 ASSERT(list_empty(&binode->delayed_iput)); 3252 3255 list_add_tail(&binode->delayed_iput, &fs_info->delayed_iputs); 3253 3256 spin_unlock(&fs_info->delayed_iput_lock); 3257 + if (!test_bit(BTRFS_FS_CLEANER_RUNNING, &fs_info->flags)) 3258 + wake_up_process(fs_info->cleaner_kthread); 3254 3259 } 3255 3260 3256 3261 void btrfs_run_delayed_iputs(struct btrfs_fs_info *fs_info)
+43 -6
fs/btrfs/ioctl.c
··· 3221 3221 inode_lock_nested(inode2, I_MUTEX_CHILD); 3222 3222 } 3223 3223 3224 + static void btrfs_double_extent_unlock(struct inode *inode1, u64 loff1, 3225 + struct inode *inode2, u64 loff2, u64 len) 3226 + { 3227 + unlock_extent(&BTRFS_I(inode1)->io_tree, loff1, loff1 + len - 1); 3228 + unlock_extent(&BTRFS_I(inode2)->io_tree, loff2, loff2 + len - 1); 3229 + } 3230 + 3231 + static void btrfs_double_extent_lock(struct inode *inode1, u64 loff1, 3232 + struct inode *inode2, u64 loff2, u64 len) 3233 + { 3234 + if (inode1 < inode2) { 3235 + swap(inode1, inode2); 3236 + swap(loff1, loff2); 3237 + } else if (inode1 == inode2 && loff2 < loff1) { 3238 + swap(loff1, loff2); 3239 + } 3240 + lock_extent(&BTRFS_I(inode1)->io_tree, loff1, loff1 + len - 1); 3241 + lock_extent(&BTRFS_I(inode2)->io_tree, loff2, loff2 + len - 1); 3242 + } 3243 + 3224 3244 static int btrfs_extent_same_range(struct inode *src, u64 loff, u64 olen, 3225 3245 struct inode *dst, u64 dst_loff) 3226 3246 { ··· 3262 3242 return -EINVAL; 3263 3243 3264 3244 /* 3265 - * Lock destination range to serialize with concurrent readpages(). 3245 + * Lock destination range to serialize with concurrent readpages() and 3246 + * source range to serialize with relocation. 3266 3247 */ 3267 - lock_extent(&BTRFS_I(dst)->io_tree, dst_loff, dst_loff + len - 1); 3248 + btrfs_double_extent_lock(src, loff, dst, dst_loff, len); 3268 3249 ret = btrfs_clone(src, dst, loff, olen, len, dst_loff, 1); 3269 - unlock_extent(&BTRFS_I(dst)->io_tree, dst_loff, dst_loff + len - 1); 3250 + btrfs_double_extent_unlock(src, loff, dst, dst_loff, len); 3270 3251 3271 3252 return ret; 3272 3253 } ··· 3926 3905 len = ALIGN(src->i_size, bs) - off; 3927 3906 3928 3907 if (destoff > inode->i_size) { 3908 + const u64 wb_start = ALIGN_DOWN(inode->i_size, bs); 3909 + 3929 3910 ret = btrfs_cont_expand(inode, inode->i_size, destoff); 3911 + if (ret) 3912 + return ret; 3913 + /* 3914 + * We may have truncated the last block if the inode's size is 3915 + * not sector size aligned, so we need to wait for writeback to 3916 + * complete before proceeding further, otherwise we can race 3917 + * with cloning and attempt to increment a reference to an 3918 + * extent that no longer exists (writeback completed right after 3919 + * we found the previous extent covering eof and before we 3920 + * attempted to increment its reference count). 3921 + */ 3922 + ret = btrfs_wait_ordered_range(inode, wb_start, 3923 + destoff - wb_start); 3930 3924 if (ret) 3931 3925 return ret; 3932 3926 } 3933 3927 3934 3928 /* 3935 - * Lock destination range to serialize with concurrent readpages(). 3929 + * Lock destination range to serialize with concurrent readpages() and 3930 + * source range to serialize with relocation. 3936 3931 */ 3937 - lock_extent(&BTRFS_I(inode)->io_tree, destoff, destoff + len - 1); 3932 + btrfs_double_extent_lock(src, off, inode, destoff, len); 3938 3933 ret = btrfs_clone(src, inode, off, olen, len, destoff, 0); 3939 - unlock_extent(&BTRFS_I(inode)->io_tree, destoff, destoff + len - 1); 3934 + btrfs_double_extent_unlock(src, off, inode, destoff, len); 3940 3935 /* 3941 3936 * Truncate page cache pages so that future reads will see the cloned 3942 3937 * data immediately and not the previous data.
+12
fs/btrfs/volumes.c
··· 7825 7825 ret = -EUCLEAN; 7826 7826 goto out; 7827 7827 } 7828 + 7829 + /* It's possible this device is a dummy for seed device */ 7830 + if (dev->disk_total_bytes == 0) { 7831 + dev = find_device(fs_info->fs_devices->seed, devid, NULL); 7832 + if (!dev) { 7833 + btrfs_err(fs_info, "failed to find seed devid %llu", 7834 + devid); 7835 + ret = -EUCLEAN; 7836 + goto out; 7837 + } 7838 + } 7839 + 7828 7840 if (physical_offset + physical_len > dev->disk_total_bytes) { 7829 7841 btrfs_err(fs_info, 7830 7842 "dev extent devid %llu physical offset %llu len %llu is beyond device boundary %llu",
+1 -4
fs/ceph/addr.c
··· 1494 1494 if (err < 0 || off >= i_size_read(inode)) { 1495 1495 unlock_page(page); 1496 1496 put_page(page); 1497 - if (err == -ENOMEM) 1498 - ret = VM_FAULT_OOM; 1499 - else 1500 - ret = VM_FAULT_SIGBUS; 1497 + ret = vmf_error(err); 1501 1498 goto out_inline; 1502 1499 } 1503 1500 if (err < PAGE_SIZE)
+2 -2
fs/ceph/super.c
··· 530 530 seq_putc(m, ','); 531 531 pos = m->count; 532 532 533 - ret = ceph_print_client_options(m, fsc->client); 533 + ret = ceph_print_client_options(m, fsc->client, false); 534 534 if (ret) 535 535 return ret; 536 536 ··· 640 640 opt = NULL; /* fsc->client now owns this */ 641 641 642 642 fsc->client->extra_mon_dispatch = extra_mon_dispatch; 643 - fsc->client->osdc.abort_on_full = true; 643 + ceph_set_opt(fsc->client, ABORT_ON_FULL); 644 644 645 645 if (!fsopt->mds_namespace) { 646 646 ceph_monc_want_map(&fsc->client->monc, CEPH_SUB_MDSMAP,
+1 -1
fs/cifs/cifsfs.h
··· 150 150 extern const struct export_operations cifs_export_ops; 151 151 #endif /* CONFIG_CIFS_NFSD_EXPORT */ 152 152 153 - #define CIFS_VERSION "2.15" 153 + #define CIFS_VERSION "2.16" 154 154 #endif /* _CIFSFS_H */
+20
fs/cifs/cifsglob.h
··· 1438 1438 int mid_state; /* wish this were enum but can not pass to wait_event */ 1439 1439 unsigned int mid_flags; 1440 1440 __le16 command; /* smb command code */ 1441 + unsigned int optype; /* operation type */ 1441 1442 bool large_buf:1; /* if valid response, is pointer to large buf */ 1442 1443 bool multiRsp:1; /* multiple trans2 responses for one request */ 1443 1444 bool multiEnd:1; /* both received */ ··· 1573 1572 kfree(param[i].node_name); 1574 1573 } 1575 1574 kfree(param); 1575 + } 1576 + 1577 + static inline bool is_interrupt_error(int error) 1578 + { 1579 + switch (error) { 1580 + case -EINTR: 1581 + case -ERESTARTSYS: 1582 + case -ERESTARTNOHAND: 1583 + case -ERESTARTNOINTR: 1584 + return true; 1585 + } 1586 + return false; 1587 + } 1588 + 1589 + static inline bool is_retryable_error(int error) 1590 + { 1591 + if (is_interrupt_error(error) || error == -EAGAIN) 1592 + return true; 1593 + return false; 1576 1594 } 1577 1595 1578 1596 #define MID_FREE 0
+20 -10
fs/cifs/cifssmb.c
··· 128 128 int rc; 129 129 struct dfs_cache_tgt_list tl; 130 130 struct dfs_cache_tgt_iterator *it = NULL; 131 - char tree[MAX_TREE_SIZE + 1]; 131 + char *tree; 132 132 const char *tcp_host; 133 133 size_t tcp_host_len; 134 134 const char *dfs_host; 135 135 size_t dfs_host_len; 136 136 137 + tree = kzalloc(MAX_TREE_SIZE, GFP_KERNEL); 138 + if (!tree) 139 + return -ENOMEM; 140 + 137 141 if (tcon->ipc) { 138 - snprintf(tree, sizeof(tree), "\\\\%s\\IPC$", 142 + snprintf(tree, MAX_TREE_SIZE, "\\\\%s\\IPC$", 139 143 tcon->ses->server->hostname); 140 - return CIFSTCon(0, tcon->ses, tree, tcon, nlsc); 144 + rc = CIFSTCon(0, tcon->ses, tree, tcon, nlsc); 145 + goto out; 141 146 } 142 147 143 - if (!tcon->dfs_path) 144 - return CIFSTCon(0, tcon->ses, tcon->treeName, tcon, nlsc); 148 + if (!tcon->dfs_path) { 149 + rc = CIFSTCon(0, tcon->ses, tcon->treeName, tcon, nlsc); 150 + goto out; 151 + } 145 152 146 153 rc = dfs_cache_noreq_find(tcon->dfs_path + 1, NULL, &tl); 147 154 if (rc) 148 - return rc; 155 + goto out; 149 156 150 157 extract_unc_hostname(tcon->ses->server->hostname, &tcp_host, 151 158 &tcp_host_len); ··· 172 165 continue; 173 166 } 174 167 175 - snprintf(tree, sizeof(tree), "\\%s", tgt); 168 + snprintf(tree, MAX_TREE_SIZE, "\\%s", tgt); 176 169 177 170 rc = CIFSTCon(0, tcon->ses, tree, tcon, nlsc); 178 171 if (!rc) ··· 189 182 rc = -ENOENT; 190 183 } 191 184 dfs_cache_free_tgts(&tl); 185 + out: 186 + kfree(tree); 192 187 return rc; 193 188 } 194 189 #else ··· 2123 2114 2124 2115 for (j = 0; j < nr_pages; j++) { 2125 2116 unlock_page(wdata2->pages[j]); 2126 - if (rc != 0 && rc != -EAGAIN) { 2117 + if (rc != 0 && !is_retryable_error(rc)) { 2127 2118 SetPageError(wdata2->pages[j]); 2128 2119 end_page_writeback(wdata2->pages[j]); 2129 2120 put_page(wdata2->pages[j]); ··· 2132 2123 2133 2124 if (rc) { 2134 2125 kref_put(&wdata2->refcount, cifs_writedata_release); 2135 - if (rc == -EAGAIN) 2126 + if (is_retryable_error(rc)) 2136 2127 continue; 2137 2128 break; 2138 2129 } ··· 2141 2132 i += nr_pages; 2142 2133 } while (i < wdata->nr_pages); 2143 2134 2144 - mapping_set_error(inode->i_mapping, rc); 2135 + if (rc != 0 && !is_retryable_error(rc)) 2136 + mapping_set_error(inode->i_mapping, rc); 2145 2137 kref_put(&wdata->refcount, cifs_writedata_release); 2146 2138 } 2147 2139
+4 -3
fs/cifs/connect.c
··· 433 433 kfree(server->hostname); 434 434 435 435 server->hostname = extract_hostname(name); 436 - if (!server->hostname) { 437 - cifs_dbg(FYI, "%s: failed to extract hostname from target: %d\n", 438 - __func__, -ENOMEM); 436 + if (IS_ERR(server->hostname)) { 437 + cifs_dbg(FYI, 438 + "%s: failed to extract hostname from target: %ld\n", 439 + __func__, PTR_ERR(server->hostname)); 439 440 } 440 441 } 441 442
+1
fs/cifs/dfs_cache.c
··· 776 776 it->it_name = kstrndup(t->t_name, strlen(t->t_name), 777 777 GFP_KERNEL); 778 778 if (!it->it_name) { 779 + kfree(it); 779 780 rc = -ENOMEM; 780 781 goto err_free_it; 781 782 }
+35 -10
fs/cifs/file.c
··· 733 733 734 734 if (can_flush) { 735 735 rc = filemap_write_and_wait(inode->i_mapping); 736 - mapping_set_error(inode->i_mapping, rc); 736 + if (!is_interrupt_error(rc)) 737 + mapping_set_error(inode->i_mapping, rc); 737 738 738 739 if (tcon->unix_ext) 739 740 rc = cifs_get_inode_info_unix(&inode, full_path, ··· 1133 1132 1134 1133 /* 1135 1134 * Accessing maxBuf is racy with cifs_reconnect - need to store value 1136 - * and check it for zero before using. 1135 + * and check it before using. 1137 1136 */ 1138 1137 max_buf = tcon->ses->server->maxBuf; 1139 - if (!max_buf) { 1138 + if (max_buf < (sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE))) { 1140 1139 free_xid(xid); 1141 1140 return -EINVAL; 1142 1141 } 1143 1142 1143 + BUILD_BUG_ON(sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE) > 1144 + PAGE_SIZE); 1145 + max_buf = min_t(unsigned int, max_buf - sizeof(struct smb_hdr), 1146 + PAGE_SIZE); 1144 1147 max_num = (max_buf - sizeof(struct smb_hdr)) / 1145 1148 sizeof(LOCKING_ANDX_RANGE); 1146 1149 buf = kcalloc(max_num, sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL); ··· 1477 1472 1478 1473 /* 1479 1474 * Accessing maxBuf is racy with cifs_reconnect - need to store value 1480 - * and check it for zero before using. 1475 + * and check it before using. 1481 1476 */ 1482 1477 max_buf = tcon->ses->server->maxBuf; 1483 - if (!max_buf) 1478 + if (max_buf < (sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE))) 1484 1479 return -EINVAL; 1485 1480 1481 + BUILD_BUG_ON(sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE) > 1482 + PAGE_SIZE); 1483 + max_buf = min_t(unsigned int, max_buf - sizeof(struct smb_hdr), 1484 + PAGE_SIZE); 1486 1485 max_num = (max_buf - sizeof(struct smb_hdr)) / 1487 1486 sizeof(LOCKING_ANDX_RANGE); 1488 1487 buf = kcalloc(max_num, sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL); ··· 2119 2110 pgoff_t end, index; 2120 2111 struct cifs_writedata *wdata; 2121 2112 int rc = 0; 2113 + int saved_rc = 0; 2122 2114 unsigned int xid; 2123 2115 2124 2116 /* ··· 2148 2138 2149 2139 rc = server->ops->wait_mtu_credits(server, cifs_sb->wsize, 2150 2140 &wsize, &credits); 2151 - if (rc) 2141 + if (rc != 0) { 2142 + done = true; 2152 2143 break; 2144 + } 2153 2145 2154 2146 tofind = min((wsize / PAGE_SIZE) - 1, end - index) + 1; 2155 2147 ··· 2159 2147 &found_pages); 2160 2148 if (!wdata) { 2161 2149 rc = -ENOMEM; 2150 + done = true; 2162 2151 add_credits_and_wake_if(server, credits, 0); 2163 2152 break; 2164 2153 } ··· 2188 2175 if (rc != 0) { 2189 2176 add_credits_and_wake_if(server, wdata->credits, 0); 2190 2177 for (i = 0; i < nr_pages; ++i) { 2191 - if (rc == -EAGAIN) 2178 + if (is_retryable_error(rc)) 2192 2179 redirty_page_for_writepage(wbc, 2193 2180 wdata->pages[i]); 2194 2181 else ··· 2196 2183 end_page_writeback(wdata->pages[i]); 2197 2184 put_page(wdata->pages[i]); 2198 2185 } 2199 - if (rc != -EAGAIN) 2186 + if (!is_retryable_error(rc)) 2200 2187 mapping_set_error(mapping, rc); 2201 2188 } 2202 2189 kref_put(&wdata->refcount, cifs_writedata_release); ··· 2205 2192 index = saved_index; 2206 2193 continue; 2207 2194 } 2195 + 2196 + /* Return immediately if we received a signal during writing */ 2197 + if (is_interrupt_error(rc)) { 2198 + done = true; 2199 + break; 2200 + } 2201 + 2202 + if (rc != 0 && saved_rc == 0) 2203 + saved_rc = rc; 2208 2204 2209 2205 wbc->nr_to_write -= nr_pages; 2210 2206 if (wbc->nr_to_write <= 0) ··· 2231 2209 index = 0; 2232 2210 goto retry; 2233 2211 } 2212 + 2213 + if (saved_rc != 0) 2214 + rc = saved_rc; 2234 2215 2235 2216 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0)) 2236 2217 mapping->writeback_index = index; ··· 2267 2242 set_page_writeback(page); 2268 2243 retry_write: 2269 2244 rc = cifs_partialpagewrite(page, 0, PAGE_SIZE); 2270 - if (rc == -EAGAIN) { 2271 - if (wbc->sync_mode == WB_SYNC_ALL) 2245 + if (is_retryable_error(rc)) { 2246 + if (wbc->sync_mode == WB_SYNC_ALL && rc == -EAGAIN) 2272 2247 goto retry_write; 2273 2248 redirty_page_for_writepage(wbc, page); 2274 2249 } else if (rc != 0) {
+10
fs/cifs/inode.c
··· 2257 2257 * the flush returns error? 2258 2258 */ 2259 2259 rc = filemap_write_and_wait(inode->i_mapping); 2260 + if (is_interrupt_error(rc)) { 2261 + rc = -ERESTARTSYS; 2262 + goto out; 2263 + } 2264 + 2260 2265 mapping_set_error(inode->i_mapping, rc); 2261 2266 rc = 0; 2262 2267 ··· 2405 2400 * the flush returns error? 2406 2401 */ 2407 2402 rc = filemap_write_and_wait(inode->i_mapping); 2403 + if (is_interrupt_error(rc)) { 2404 + rc = -ERESTARTSYS; 2405 + goto cifs_setattr_exit; 2406 + } 2407 + 2408 2408 mapping_set_error(inode->i_mapping, rc); 2409 2409 rc = 0; 2410 2410
+6 -2
fs/cifs/smb2file.c
··· 122 122 123 123 /* 124 124 * Accessing maxBuf is racy with cifs_reconnect - need to store value 125 - * and check it for zero before using. 125 + * and check it before using. 126 126 */ 127 127 max_buf = tcon->ses->server->maxBuf; 128 - if (!max_buf) 128 + if (max_buf < sizeof(struct smb2_lock_element)) 129 129 return -EINVAL; 130 130 131 + BUILD_BUG_ON(sizeof(struct smb2_lock_element) > PAGE_SIZE); 132 + max_buf = min_t(unsigned int, max_buf, PAGE_SIZE); 131 133 max_num = max_buf / sizeof(struct smb2_lock_element); 132 134 buf = kcalloc(max_num, sizeof(struct smb2_lock_element), GFP_KERNEL); 133 135 if (!buf) ··· 266 264 return -EINVAL; 267 265 } 268 266 267 + BUILD_BUG_ON(sizeof(struct smb2_lock_element) > PAGE_SIZE); 268 + max_buf = min_t(unsigned int, max_buf, PAGE_SIZE); 269 269 max_num = max_buf / sizeof(struct smb2_lock_element); 270 270 buf = kcalloc(max_num, sizeof(struct smb2_lock_element), GFP_KERNEL); 271 271 if (!buf) {
+22 -9
fs/cifs/smb2pdu.c
··· 162 162 int rc; 163 163 struct dfs_cache_tgt_list tl; 164 164 struct dfs_cache_tgt_iterator *it = NULL; 165 - char tree[MAX_TREE_SIZE + 1]; 165 + char *tree; 166 166 const char *tcp_host; 167 167 size_t tcp_host_len; 168 168 const char *dfs_host; 169 169 size_t dfs_host_len; 170 170 171 + tree = kzalloc(MAX_TREE_SIZE, GFP_KERNEL); 172 + if (!tree) 173 + return -ENOMEM; 174 + 171 175 if (tcon->ipc) { 172 - snprintf(tree, sizeof(tree), "\\\\%s\\IPC$", 176 + snprintf(tree, MAX_TREE_SIZE, "\\\\%s\\IPC$", 173 177 tcon->ses->server->hostname); 174 - return SMB2_tcon(0, tcon->ses, tree, tcon, nlsc); 178 + rc = SMB2_tcon(0, tcon->ses, tree, tcon, nlsc); 179 + goto out; 175 180 } 176 181 177 - if (!tcon->dfs_path) 178 - return SMB2_tcon(0, tcon->ses, tcon->treeName, tcon, nlsc); 182 + if (!tcon->dfs_path) { 183 + rc = SMB2_tcon(0, tcon->ses, tcon->treeName, tcon, nlsc); 184 + goto out; 185 + } 179 186 180 187 rc = dfs_cache_noreq_find(tcon->dfs_path + 1, NULL, &tl); 181 188 if (rc) 182 - return rc; 189 + goto out; 183 190 184 191 extract_unc_hostname(tcon->ses->server->hostname, &tcp_host, 185 192 &tcp_host_len); ··· 206 199 continue; 207 200 } 208 201 209 - snprintf(tree, sizeof(tree), "\\%s", tgt); 202 + snprintf(tree, MAX_TREE_SIZE, "\\%s", tgt); 210 203 211 204 rc = SMB2_tcon(0, tcon->ses, tree, tcon, nlsc); 212 205 if (!rc) ··· 223 216 rc = -ENOENT; 224 217 } 225 218 dfs_cache_free_tgts(&tl); 219 + out: 220 + kfree(tree); 226 221 return rc; 227 222 } 228 223 #else ··· 3287 3278 if (rdata->credits) { 3288 3279 shdr->CreditCharge = cpu_to_le16(DIV_ROUND_UP(rdata->bytes, 3289 3280 SMB2_MAX_BUFFER_SIZE)); 3290 - shdr->CreditRequest = shdr->CreditCharge; 3281 + shdr->CreditRequest = 3282 + cpu_to_le16(le16_to_cpu(shdr->CreditCharge) + 1); 3291 3283 spin_lock(&server->req_lock); 3292 3284 server->credits += rdata->credits - 3293 3285 le16_to_cpu(shdr->CreditCharge); 3294 3286 spin_unlock(&server->req_lock); 3295 3287 wake_up(&server->request_q); 3288 + rdata->credits = le16_to_cpu(shdr->CreditCharge); 3296 3289 flags |= CIFS_HAS_CREDITS; 3297 3290 } 3298 3291 ··· 3566 3555 if (wdata->credits) { 3567 3556 shdr->CreditCharge = cpu_to_le16(DIV_ROUND_UP(wdata->bytes, 3568 3557 SMB2_MAX_BUFFER_SIZE)); 3569 - shdr->CreditRequest = shdr->CreditCharge; 3558 + shdr->CreditRequest = 3559 + cpu_to_le16(le16_to_cpu(shdr->CreditCharge) + 1); 3570 3560 spin_lock(&server->req_lock); 3571 3561 server->credits += wdata->credits - 3572 3562 le16_to_cpu(shdr->CreditCharge); 3573 3563 spin_unlock(&server->req_lock); 3574 3564 wake_up(&server->request_q); 3565 + wdata->credits = le16_to_cpu(shdr->CreditCharge); 3575 3566 flags |= CIFS_HAS_CREDITS; 3576 3567 } 3577 3568
+92 -30
fs/cifs/transport.c
··· 387 387 if (rc < 0 && rc != -EINTR) 388 388 cifs_dbg(VFS, "Error %d sending data on socket to server\n", 389 389 rc); 390 - else 390 + else if (rc > 0) 391 391 rc = 0; 392 392 393 393 return rc; ··· 783 783 } 784 784 785 785 static void 786 - cifs_noop_callback(struct mid_q_entry *mid) 786 + cifs_compound_callback(struct mid_q_entry *mid) 787 787 { 788 + struct TCP_Server_Info *server = mid->server; 789 + unsigned int optype = mid->optype; 790 + unsigned int credits_received = 0; 791 + 792 + if (mid->mid_state == MID_RESPONSE_RECEIVED) { 793 + if (mid->resp_buf) 794 + credits_received = server->ops->get_credits(mid); 795 + else 796 + cifs_dbg(FYI, "Bad state for cancelled MID\n"); 797 + } 798 + 799 + add_credits(server, credits_received, optype); 800 + } 801 + 802 + static void 803 + cifs_compound_last_callback(struct mid_q_entry *mid) 804 + { 805 + cifs_compound_callback(mid); 806 + cifs_wake_up_task(mid); 807 + } 808 + 809 + static void 810 + cifs_cancelled_callback(struct mid_q_entry *mid) 811 + { 812 + cifs_compound_callback(mid); 813 + DeleteMidQEntry(mid); 788 814 } 789 815 790 816 int ··· 821 795 int i, j, rc = 0; 822 796 int timeout, optype; 823 797 struct mid_q_entry *midQ[MAX_COMPOUND]; 824 - unsigned int credits = 0; 798 + bool cancelled_mid[MAX_COMPOUND] = {false}; 799 + unsigned int credits[MAX_COMPOUND] = {0}; 825 800 char *buf; 826 801 827 802 timeout = flags & CIFS_TIMEOUT_MASK; ··· 840 813 return -ENOENT; 841 814 842 815 /* 843 - * Ensure that we do not send more than 50 overlapping requests 844 - * to the same server. We may make this configurable later or 845 - * use ses->maxReq. 816 + * Ensure we obtain 1 credit per request in the compound chain. 817 + * It can be optimized further by waiting for all the credits 818 + * at once but this can wait long enough if we don't have enough 819 + * credits due to some heavy operations in progress or the server 820 + * not granting us much, so a fallback to the current approach is 821 + * needed anyway. 846 822 */ 847 - rc = wait_for_free_request(ses->server, timeout, optype); 848 - if (rc) 849 - return rc; 823 + for (i = 0; i < num_rqst; i++) { 824 + rc = wait_for_free_request(ses->server, timeout, optype); 825 + if (rc) { 826 + /* 827 + * We haven't sent an SMB packet to the server yet but 828 + * we already obtained credits for i requests in the 829 + * compound chain - need to return those credits back 830 + * for future use. Note that we need to call add_credits 831 + * multiple times to match the way we obtained credits 832 + * in the first place and to account for in flight 833 + * requests correctly. 834 + */ 835 + for (j = 0; j < i; j++) 836 + add_credits(ses->server, 1, optype); 837 + return rc; 838 + } 839 + credits[i] = 1; 840 + } 850 841 851 842 /* 852 843 * Make sure that we sign in the same order that we send on this socket ··· 880 835 for (j = 0; j < i; j++) 881 836 cifs_delete_mid(midQ[j]); 882 837 mutex_unlock(&ses->server->srv_mutex); 838 + 883 839 /* Update # of requests on wire to server */ 884 - add_credits(ses->server, 1, optype); 840 + for (j = 0; j < num_rqst; j++) 841 + add_credits(ses->server, credits[j], optype); 885 842 return PTR_ERR(midQ[i]); 886 843 } 887 844 888 845 midQ[i]->mid_state = MID_REQUEST_SUBMITTED; 846 + midQ[i]->optype = optype; 889 847 /* 890 - * We don't invoke the callback compounds unless it is the last 891 - * request. 848 + * Invoke callback for every part of the compound chain 849 + * to calculate credits properly. Wake up this thread only when 850 + * the last element is received. 892 851 */ 893 852 if (i < num_rqst - 1) 894 - midQ[i]->callback = cifs_noop_callback; 853 + midQ[i]->callback = cifs_compound_callback; 854 + else 855 + midQ[i]->callback = cifs_compound_last_callback; 895 856 } 896 857 cifs_in_send_inc(ses->server); 897 858 rc = smb_send_rqst(ses->server, num_rqst, rqst, flags); ··· 911 860 912 861 mutex_unlock(&ses->server->srv_mutex); 913 862 914 - if (rc < 0) 863 + if (rc < 0) { 864 + /* Sending failed for some reason - return credits back */ 865 + for (i = 0; i < num_rqst; i++) 866 + add_credits(ses->server, credits[i], optype); 915 867 goto out; 868 + } 869 + 870 + /* 871 + * At this point the request is passed to the network stack - we assume 872 + * that any credits taken from the server structure on the client have 873 + * been spent and we can't return them back. Once we receive responses 874 + * we will collect credits granted by the server in the mid callbacks 875 + * and add those credits to the server structure. 876 + */ 916 877 917 878 /* 918 879 * Compounding is never used during session establish. ··· 938 875 939 876 for (i = 0; i < num_rqst; i++) { 940 877 rc = wait_for_response(ses->server, midQ[i]); 941 - if (rc != 0) { 878 + if (rc != 0) 879 + break; 880 + } 881 + if (rc != 0) { 882 + for (; i < num_rqst; i++) { 942 883 cifs_dbg(VFS, "Cancelling wait for mid %llu cmd: %d\n", 943 884 midQ[i]->mid, le16_to_cpu(midQ[i]->command)); 944 885 send_cancel(ses->server, &rqst[i], midQ[i]); 945 886 spin_lock(&GlobalMid_Lock); 946 887 if (midQ[i]->mid_state == MID_REQUEST_SUBMITTED) { 947 888 midQ[i]->mid_flags |= MID_WAIT_CANCELLED; 948 - midQ[i]->callback = DeleteMidQEntry; 949 - spin_unlock(&GlobalMid_Lock); 950 - add_credits(ses->server, 1, optype); 951 - return rc; 889 + midQ[i]->callback = cifs_cancelled_callback; 890 + cancelled_mid[i] = true; 891 + credits[i] = 0; 952 892 } 953 893 spin_unlock(&GlobalMid_Lock); 954 894 } 955 895 } 956 - 957 - for (i = 0; i < num_rqst; i++) 958 - if (midQ[i]->resp_buf) 959 - credits += ses->server->ops->get_credits(midQ[i]); 960 - if (!credits) 961 - credits = 1; 962 896 963 897 for (i = 0; i < num_rqst; i++) { 964 898 if (rc < 0) ··· 963 903 964 904 rc = cifs_sync_mid_result(midQ[i], ses->server); 965 905 if (rc != 0) { 966 - add_credits(ses->server, credits, optype); 967 - return rc; 906 + /* mark this mid as cancelled to not free it below */ 907 + cancelled_mid[i] = true; 908 + goto out; 968 909 } 969 910 970 911 if (!midQ[i]->resp_buf || ··· 1012 951 * This is prevented above by using a noop callback that will not 1013 952 * wake this thread except for the very last PDU. 1014 953 */ 1015 - for (i = 0; i < num_rqst; i++) 1016 - cifs_delete_mid(midQ[i]); 1017 - add_credits(ses->server, credits, optype); 954 + for (i = 0; i < num_rqst; i++) { 955 + if (!cancelled_mid[i]) 956 + cifs_delete_mid(midQ[i]); 957 + } 1018 958 1019 959 return rc; 1020 960 }
+33 -28
fs/hugetlbfs/inode.c
··· 383 383 * truncation is indicated by end of range being LLONG_MAX 384 384 * In this case, we first scan the range and release found pages. 385 385 * After releasing pages, hugetlb_unreserve_pages cleans up region/reserv 386 - * maps and global counts. 386 + * maps and global counts. Page faults can not race with truncation 387 + * in this routine. hugetlb_no_page() prevents page faults in the 388 + * truncated range. It checks i_size before allocation, and again after 389 + * with the page table lock for the page held. The same lock must be 390 + * acquired to unmap a page. 387 391 * hole punch is indicated if end is not LLONG_MAX 388 392 * In the hole punch case we scan the range and release found pages. 389 393 * Only when releasing a page is the associated region/reserv map 390 394 * deleted. The region/reserv map for ranges without associated 391 - * pages are not modified. 392 - * 393 - * Callers of this routine must hold the i_mmap_rwsem in write mode to prevent 394 - * races with page faults. 395 - * 395 + * pages are not modified. Page faults can race with hole punch. 396 + * This is indicated if we find a mapped page. 396 397 * Note: If the passed end of range value is beyond the end of file, but 397 398 * not LLONG_MAX this routine still performs a hole punch operation. 398 399 */ ··· 423 422 424 423 for (i = 0; i < pagevec_count(&pvec); ++i) { 425 424 struct page *page = pvec.pages[i]; 425 + u32 hash; 426 426 427 427 index = page->index; 428 + hash = hugetlb_fault_mutex_hash(h, current->mm, 429 + &pseudo_vma, 430 + mapping, index, 0); 431 + mutex_lock(&hugetlb_fault_mutex_table[hash]); 432 + 428 433 /* 429 - * A mapped page is impossible as callers should unmap 430 - * all references before calling. And, i_mmap_rwsem 431 - * prevents the creation of additional mappings. 434 + * If page is mapped, it was faulted in after being 435 + * unmapped in caller. Unmap (again) now after taking 436 + * the fault mutex. The mutex will prevent faults 437 + * until we finish removing the page. 438 + * 439 + * This race can only happen in the hole punch case. 440 + * Getting here in a truncate operation is a bug. 432 441 */ 433 - VM_BUG_ON(page_mapped(page)); 442 + if (unlikely(page_mapped(page))) { 443 + BUG_ON(truncate_op); 444 + 445 + i_mmap_lock_write(mapping); 446 + hugetlb_vmdelete_list(&mapping->i_mmap, 447 + index * pages_per_huge_page(h), 448 + (index + 1) * pages_per_huge_page(h)); 449 + i_mmap_unlock_write(mapping); 450 + } 434 451 435 452 lock_page(page); 436 453 /* ··· 470 451 } 471 452 472 453 unlock_page(page); 454 + mutex_unlock(&hugetlb_fault_mutex_table[hash]); 473 455 } 474 456 huge_pagevec_release(&pvec); 475 457 cond_resched(); ··· 482 462 483 463 static void hugetlbfs_evict_inode(struct inode *inode) 484 464 { 485 - struct address_space *mapping = inode->i_mapping; 486 465 struct resv_map *resv_map; 487 466 488 - /* 489 - * The vfs layer guarantees that there are no other users of this 490 - * inode. Therefore, it would be safe to call remove_inode_hugepages 491 - * without holding i_mmap_rwsem. We acquire and hold here to be 492 - * consistent with other callers. Since there will be no contention 493 - * on the semaphore, overhead is negligible. 494 - */ 495 - i_mmap_lock_write(mapping); 496 467 remove_inode_hugepages(inode, 0, LLONG_MAX); 497 - i_mmap_unlock_write(mapping); 498 - 499 468 resv_map = (struct resv_map *)inode->i_mapping->private_data; 500 469 /* root inode doesn't have the resv_map, so we should check it */ 501 470 if (resv_map) ··· 505 496 i_mmap_lock_write(mapping); 506 497 if (!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root)) 507 498 hugetlb_vmdelete_list(&mapping->i_mmap, pgoff, 0); 508 - remove_inode_hugepages(inode, offset, LLONG_MAX); 509 499 i_mmap_unlock_write(mapping); 500 + remove_inode_hugepages(inode, offset, LLONG_MAX); 510 501 return 0; 511 502 } 512 503 ··· 540 531 hugetlb_vmdelete_list(&mapping->i_mmap, 541 532 hole_start >> PAGE_SHIFT, 542 533 hole_end >> PAGE_SHIFT); 543 - remove_inode_hugepages(inode, hole_start, hole_end); 544 534 i_mmap_unlock_write(mapping); 535 + remove_inode_hugepages(inode, hole_start, hole_end); 545 536 inode_unlock(inode); 546 537 } 547 538 ··· 624 615 /* addr is the offset within the file (zero based) */ 625 616 addr = index * hpage_size; 626 617 627 - /* 628 - * fault mutex taken here, protects against fault path 629 - * and hole punch. inode_lock previously taken protects 630 - * against truncation. 631 - */ 618 + /* mutex taken here, fault path and hole punch */ 632 619 hash = hugetlb_fault_mutex_hash(h, mm, &pseudo_vma, mapping, 633 620 index, addr); 634 621 mutex_lock(&hugetlb_fault_mutex_table[hash]);
+1 -7
fs/nfs/nfs4file.c
··· 133 133 struct file *file_out, loff_t pos_out, 134 134 size_t count, unsigned int flags) 135 135 { 136 - ssize_t ret; 137 - 138 136 if (file_inode(file_in) == file_inode(file_out)) 139 137 return -EINVAL; 140 - retry: 141 - ret = nfs42_proc_copy(file_in, pos_in, file_out, pos_out, count); 142 - if (ret == -EAGAIN) 143 - goto retry; 144 - return ret; 138 + return nfs42_proc_copy(file_in, pos_in, file_out, pos_out, count); 145 139 } 146 140 147 141 static loff_t nfs4_file_llseek(struct file *filep, loff_t offset, int whence)
+4 -8
fs/pstore/ram.c
··· 128 128 struct pstore_record *record) 129 129 { 130 130 struct persistent_ram_zone *prz; 131 - bool update = (record->type == PSTORE_TYPE_DMESG); 132 131 133 132 /* Give up if we never existed or have hit the end. */ 134 133 if (!przs) ··· 138 139 return NULL; 139 140 140 141 /* Update old/shadowed buffer. */ 141 - if (update) 142 + if (prz->type == PSTORE_TYPE_DMESG) 142 143 persistent_ram_save_old(prz); 143 144 144 145 if (!persistent_ram_old_size(prz)) ··· 710 711 { 711 712 struct device *dev = &pdev->dev; 712 713 struct ramoops_platform_data *pdata = dev->platform_data; 714 + struct ramoops_platform_data pdata_local; 713 715 struct ramoops_context *cxt = &oops_cxt; 714 716 size_t dump_mem_sz; 715 717 phys_addr_t paddr; 716 718 int err = -EINVAL; 717 719 718 720 if (dev_of_node(dev) && !pdata) { 719 - pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); 720 - if (!pdata) { 721 - pr_err("cannot allocate platform data buffer\n"); 722 - err = -ENOMEM; 723 - goto fail_out; 724 - } 721 + pdata = &pdata_local; 722 + memset(pdata, 0, sizeof(*pdata)); 725 723 726 724 err = ramoops_parse_dt(pdev, pdata); 727 725 if (err < 0)
+2 -1
fs/sysfs/dir.c
··· 43 43 kuid_t uid; 44 44 kgid_t gid; 45 45 46 - BUG_ON(!kobj); 46 + if (WARN_ON(!kobj)) 47 + return -EINVAL; 47 48 48 49 if (kobj->parent) 49 50 parent = kobj->parent->sd;
+4 -2
fs/sysfs/file.c
··· 325 325 kuid_t uid; 326 326 kgid_t gid; 327 327 328 - BUG_ON(!kobj || !kobj->sd || !attr); 328 + if (WARN_ON(!kobj || !kobj->sd || !attr)) 329 + return -EINVAL; 329 330 330 331 kobject_get_ownership(kobj, &uid, &gid); 331 332 return sysfs_add_file_mode_ns(kobj->sd, attr, false, attr->mode, ··· 538 537 kuid_t uid; 539 538 kgid_t gid; 540 539 541 - BUG_ON(!kobj || !kobj->sd || !attr); 540 + if (WARN_ON(!kobj || !kobj->sd || !attr)) 541 + return -EINVAL; 542 542 543 543 kobject_get_ownership(kobj, &uid, &gid); 544 544 return sysfs_add_file_mode_ns(kobj->sd, &attr->attr, true,
+2 -1
fs/sysfs/group.c
··· 112 112 kgid_t gid; 113 113 int error; 114 114 115 - BUG_ON(!kobj || (!update && !kobj->sd)); 115 + if (WARN_ON(!kobj || (!update && !kobj->sd))) 116 + return -EINVAL; 116 117 117 118 /* Updates may happen before the object has been instantiated */ 118 119 if (unlikely(update && !kobj->sd))
+2 -1
fs/sysfs/symlink.c
··· 23 23 { 24 24 struct kernfs_node *kn, *target = NULL; 25 25 26 - BUG_ON(!name || !parent); 26 + if (WARN_ON(!name || !parent)) 27 + return -EINVAL; 27 28 28 29 /* 29 30 * We don't own @target_kobj and it may be removed at any time.
+7
include/drm/drm_dp_helper.h
··· 1365 1365 * to 16 bits. So will give a constant value (0x8000) for compatability. 1366 1366 */ 1367 1367 DP_DPCD_QUIRK_CONSTANT_N, 1368 + /** 1369 + * @DP_DPCD_QUIRK_NO_PSR: 1370 + * 1371 + * The device does not support PSR even if reports that it supports or 1372 + * driver still need to implement proper handling for such device. 1373 + */ 1374 + DP_DPCD_QUIRK_NO_PSR, 1368 1375 }; 1369 1376 1370 1377 /**
+2 -1
include/drm/drm_dp_mst_helper.h
··· 616 616 struct drm_dp_mst_topology_mgr *mgr); 617 617 618 618 void drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr *mgr); 619 - int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr); 619 + int __must_check 620 + drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr); 620 621 struct drm_dp_mst_topology_state *drm_atomic_get_mst_topology_state(struct drm_atomic_state *state, 621 622 struct drm_dp_mst_topology_mgr *mgr); 622 623 int drm_dp_atomic_find_vcpi_slots(struct drm_atomic_state *state,
+50
include/dt-bindings/mfd/st,stpmic1.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* 3 + * Copyright (C) STMicroelectronics 2018 - All Rights Reserved 4 + * Author: Philippe Peurichard <philippe.peurichard@st.com>, 5 + * Pascal Paillet <p.paillet@st.com> for STMicroelectronics. 6 + */ 7 + 8 + #ifndef __DT_BINDINGS_STPMIC1_H__ 9 + #define __DT_BINDINGS_STPMIC1_H__ 10 + 11 + /* IRQ definitions */ 12 + #define IT_PONKEY_F 0 13 + #define IT_PONKEY_R 1 14 + #define IT_WAKEUP_F 2 15 + #define IT_WAKEUP_R 3 16 + #define IT_VBUS_OTG_F 4 17 + #define IT_VBUS_OTG_R 5 18 + #define IT_SWOUT_F 6 19 + #define IT_SWOUT_R 7 20 + 21 + #define IT_CURLIM_BUCK1 8 22 + #define IT_CURLIM_BUCK2 9 23 + #define IT_CURLIM_BUCK3 10 24 + #define IT_CURLIM_BUCK4 11 25 + #define IT_OCP_OTG 12 26 + #define IT_OCP_SWOUT 13 27 + #define IT_OCP_BOOST 14 28 + #define IT_OVP_BOOST 15 29 + 30 + #define IT_CURLIM_LDO1 16 31 + #define IT_CURLIM_LDO2 17 32 + #define IT_CURLIM_LDO3 18 33 + #define IT_CURLIM_LDO4 19 34 + #define IT_CURLIM_LDO5 20 35 + #define IT_CURLIM_LDO6 21 36 + #define IT_SHORT_SWOTG 22 37 + #define IT_SHORT_SWOUT 23 38 + 39 + #define IT_TWARN_F 24 40 + #define IT_TWARN_R 25 41 + #define IT_VINLOW_F 26 42 + #define IT_VINLOW_R 27 43 + #define IT_SWIN_F 30 44 + #define IT_SWIN_R 31 45 + 46 + /* BUCK MODES definitions */ 47 + #define STPMIC1_BUCK_MODE_NORMAL 0 48 + #define STPMIC1_BUCK_MODE_LP 2 49 + 50 + #endif /* __DT_BINDINGS_STPMIC1_H__ */
+1 -2
include/dt-bindings/reset/amlogic,meson-axg-reset.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause */ 1 2 /* 2 - * 3 3 * Copyright (c) 2016 BayLibre, SAS. 4 4 * Author: Neil Armstrong <narmstrong@baylibre.com> 5 5 * 6 6 * Copyright (c) 2017 Amlogic, inc. 7 7 * Author: Yixun Lan <yixun.lan@amlogic.com> 8 8 * 9 - * SPDX-License-Identifier: (GPL-2.0+ OR BSD) 10 9 */ 11 10 12 11 #ifndef _DT_BINDINGS_AMLOGIC_MESON_AXG_RESET_H
+1
include/linux/bcma/bcma_soc.h
··· 6 6 7 7 struct bcma_soc { 8 8 struct bcma_bus bus; 9 + struct device *dev; 9 10 }; 10 11 11 12 int __init bcma_host_soc_register(struct bcma_soc *soc);
+1
include/linux/bpf_verifier.h
··· 172 172 #define BPF_ALU_SANITIZE_SRC 1U 173 173 #define BPF_ALU_SANITIZE_DST 2U 174 174 #define BPF_ALU_NEG_VALUE (1U << 2) 175 + #define BPF_ALU_NON_POINTER (1U << 3) 175 176 #define BPF_ALU_SANITIZE (BPF_ALU_SANITIZE_SRC | \ 176 177 BPF_ALU_SANITIZE_DST) 177 178
+12 -3
include/linux/bpfilter.h
··· 3 3 #define _LINUX_BPFILTER_H 4 4 5 5 #include <uapi/linux/bpfilter.h> 6 + #include <linux/umh.h> 6 7 7 8 struct sock; 8 9 int bpfilter_ip_set_sockopt(struct sock *sk, int optname, char __user *optval, 9 10 unsigned int optlen); 10 11 int bpfilter_ip_get_sockopt(struct sock *sk, int optname, char __user *optval, 11 12 int __user *optlen); 12 - extern int (*bpfilter_process_sockopt)(struct sock *sk, int optname, 13 - char __user *optval, 14 - unsigned int optlen, bool is_set); 13 + struct bpfilter_umh_ops { 14 + struct umh_info info; 15 + /* since ip_getsockopt() can run in parallel, serialize access to umh */ 16 + struct mutex lock; 17 + int (*sockopt)(struct sock *sk, int optname, 18 + char __user *optval, 19 + unsigned int optlen, bool is_set); 20 + int (*start)(void); 21 + bool stop; 22 + }; 23 + extern struct bpfilter_umh_ops bpfilter_ops; 15 24 #endif
+4 -2
include/linux/ceph/libceph.h
··· 35 35 #define CEPH_OPT_NOMSGAUTH (1<<4) /* don't require msg signing feat */ 36 36 #define CEPH_OPT_TCP_NODELAY (1<<5) /* TCP_NODELAY on TCP sockets */ 37 37 #define CEPH_OPT_NOMSGSIGN (1<<6) /* don't sign msgs */ 38 + #define CEPH_OPT_ABORT_ON_FULL (1<<7) /* abort w/ ENOSPC when full */ 38 39 39 40 #define CEPH_OPT_DEFAULT (CEPH_OPT_TCP_NODELAY) 40 41 ··· 54 53 unsigned long osd_request_timeout; /* jiffies */ 55 54 56 55 /* 57 - * any type that can't be simply compared or doesn't need need 56 + * any type that can't be simply compared or doesn't need 58 57 * to be compared should go beyond this point, 59 58 * ceph_compare_options() should be updated accordingly 60 59 */ ··· 282 281 const char *dev_name, const char *dev_name_end, 283 282 int (*parse_extra_token)(char *c, void *private), 284 283 void *private); 285 - int ceph_print_client_options(struct seq_file *m, struct ceph_client *client); 284 + int ceph_print_client_options(struct seq_file *m, struct ceph_client *client, 285 + bool show_all); 286 286 extern void ceph_destroy_options(struct ceph_options *opt); 287 287 extern int ceph_compare_options(struct ceph_options *new_opt, 288 288 struct ceph_client *client);
-1
include/linux/ceph/osd_client.h
··· 354 354 struct rb_root linger_map_checks; 355 355 atomic_t num_requests; 356 356 atomic_t num_homeless; 357 - bool abort_on_full; /* abort w/ ENOSPC when full */ 358 357 int abort_err; 359 358 struct delayed_work timeout_work; 360 359 struct delayed_work osds_timeout_work;
+2 -3
include/linux/compiler-clang.h
··· 3 3 #error "Please don't include <linux/compiler-clang.h> directly, include <linux/compiler.h> instead." 4 4 #endif 5 5 6 - /* Some compiler specific definitions are overwritten here 7 - * for Clang compiler 8 - */ 6 + /* Compiler specific definitions for Clang compiler */ 7 + 9 8 #define uninitialized_var(x) x = *(&(x)) 10 9 11 10 /* same as gcc, this was present in clang-2.6 so we can assume it works
+1 -5
include/linux/compiler-gcc.h
··· 58 58 (typeof(ptr)) (__ptr + (off)); \ 59 59 }) 60 60 61 - /* Make the optimizer believe the variable can be manipulated arbitrarily. */ 62 - #define OPTIMIZER_HIDE_VAR(var) \ 63 - __asm__ ("" : "=r" (var) : "0" (var)) 64 - 65 61 /* 66 62 * A trick to suppress uninitialized variable warning without generating any 67 63 * code 68 64 */ 69 65 #define uninitialized_var(x) x = x 70 66 71 - #ifdef RETPOLINE 67 + #ifdef CONFIG_RETPOLINE 72 68 #define __noretpoline __attribute__((__indirect_branch__("keep"))) 73 69 #endif 74 70
+1 -3
include/linux/compiler-intel.h
··· 5 5 6 6 #ifdef __ECC 7 7 8 - /* Some compiler specific definitions are overwritten here 9 - * for Intel ECC compiler 10 - */ 8 + /* Compiler specific definitions for Intel ECC compiler */ 11 9 12 10 #include <asm/intrinsics.h> 13 11
+3 -1
include/linux/compiler.h
··· 161 161 #endif 162 162 163 163 #ifndef OPTIMIZER_HIDE_VAR 164 - #define OPTIMIZER_HIDE_VAR(var) barrier() 164 + /* Make the optimizer believe the variable can be manipulated arbitrarily. */ 165 + #define OPTIMIZER_HIDE_VAR(var) \ 166 + __asm__ ("" : "=r" (var) : "0" (var)) 165 167 #endif 166 168 167 169 /* Not-quite-unique ID. */
-9
include/linux/dma-mapping.h
··· 717 717 } 718 718 #endif 719 719 720 - /* 721 - * Please always use dma_alloc_coherent instead as it already zeroes the memory! 722 - */ 723 - static inline void *dma_zalloc_coherent(struct device *dev, size_t size, 724 - dma_addr_t *dma_handle, gfp_t flag) 725 - { 726 - return dma_alloc_coherent(dev, size, dma_handle, flag); 727 - } 728 - 729 720 static inline int dma_get_cache_alignment(void) 730 721 { 731 722 #ifdef ARCH_DMA_MINALIGN
+1
include/linux/fb.h
··· 653 653 654 654 extern struct fb_info *registered_fb[FB_MAX]; 655 655 extern int num_registered_fb; 656 + extern bool fb_center_logo; 656 657 extern struct class *fb_class; 657 658 658 659 #define for_each_registered_fb(i) \
+1
include/linux/libnvdimm.h
··· 160 160 } 161 161 162 162 enum nvdimm_security_state { 163 + NVDIMM_SECURITY_ERROR = -1, 163 164 NVDIMM_SECURITY_DISABLED, 164 165 NVDIMM_SECURITY_UNLOCKED, 165 166 NVDIMM_SECURITY_LOCKED,
-21
include/linux/mfd/cros_ec.h
··· 282 282 struct cros_ec_command *msg); 283 283 284 284 /** 285 - * cros_ec_remove() - Remove a ChromeOS EC. 286 - * @ec_dev: Device to register. 287 - * 288 - * Call this to deregister a ChromeOS EC, then clean up any private data. 289 - * 290 - * Return: 0 on success or negative error code. 291 - */ 292 - int cros_ec_remove(struct cros_ec_device *ec_dev); 293 - 294 - /** 295 285 * cros_ec_register() - Register a new ChromeOS EC, using the provided info. 296 286 * @ec_dev: Device to register. 297 287 * ··· 324 334 * Return: 0 on error or non-zero bitmask of one or more EC_HOST_EVENT_*. 325 335 */ 326 336 u32 cros_ec_get_host_event(struct cros_ec_device *ec_dev); 327 - 328 - /* sysfs stuff */ 329 - extern struct attribute_group cros_ec_attr_group; 330 - extern struct attribute_group cros_ec_lightbar_attr_group; 331 - extern struct attribute_group cros_ec_vbc_attr_group; 332 - 333 - /* debugfs stuff */ 334 - int cros_ec_debugfs_init(struct cros_ec_dev *ec); 335 - void cros_ec_debugfs_remove(struct cros_ec_dev *ec); 336 - void cros_ec_debugfs_suspend(struct cros_ec_dev *ec); 337 - void cros_ec_debugfs_resume(struct cros_ec_dev *ec); 338 337 339 338 #endif /* __LINUX_MFD_CROS_EC_H */
+94
include/linux/mfd/cros_ec_commands.h
··· 2791 2791 } __packed; 2792 2792 2793 2793 /*****************************************************************************/ 2794 + /* Commands for I2S recording on audio codec. */ 2795 + 2796 + #define EC_CMD_CODEC_I2S 0x00BC 2797 + 2798 + enum ec_codec_i2s_subcmd { 2799 + EC_CODEC_SET_SAMPLE_DEPTH = 0x0, 2800 + EC_CODEC_SET_GAIN = 0x1, 2801 + EC_CODEC_GET_GAIN = 0x2, 2802 + EC_CODEC_I2S_ENABLE = 0x3, 2803 + EC_CODEC_I2S_SET_CONFIG = 0x4, 2804 + EC_CODEC_I2S_SET_TDM_CONFIG = 0x5, 2805 + EC_CODEC_I2S_SET_BCLK = 0x6, 2806 + }; 2807 + 2808 + enum ec_sample_depth_value { 2809 + EC_CODEC_SAMPLE_DEPTH_16 = 0, 2810 + EC_CODEC_SAMPLE_DEPTH_24 = 1, 2811 + }; 2812 + 2813 + enum ec_i2s_config { 2814 + EC_DAI_FMT_I2S = 0, 2815 + EC_DAI_FMT_RIGHT_J = 1, 2816 + EC_DAI_FMT_LEFT_J = 2, 2817 + EC_DAI_FMT_PCM_A = 3, 2818 + EC_DAI_FMT_PCM_B = 4, 2819 + EC_DAI_FMT_PCM_TDM = 5, 2820 + }; 2821 + 2822 + struct ec_param_codec_i2s { 2823 + /* 2824 + * enum ec_codec_i2s_subcmd 2825 + */ 2826 + uint8_t cmd; 2827 + union { 2828 + /* 2829 + * EC_CODEC_SET_SAMPLE_DEPTH 2830 + * Value should be one of ec_sample_depth_value. 2831 + */ 2832 + uint8_t depth; 2833 + 2834 + /* 2835 + * EC_CODEC_SET_GAIN 2836 + * Value should be 0~43 for both channels. 2837 + */ 2838 + struct ec_param_codec_i2s_set_gain { 2839 + uint8_t left; 2840 + uint8_t right; 2841 + } __packed gain; 2842 + 2843 + /* 2844 + * EC_CODEC_I2S_ENABLE 2845 + * 1 to enable, 0 to disable. 2846 + */ 2847 + uint8_t i2s_enable; 2848 + 2849 + /* 2850 + * EC_CODEC_I2S_SET_COFNIG 2851 + * Value should be one of ec_i2s_config. 2852 + */ 2853 + uint8_t i2s_config; 2854 + 2855 + /* 2856 + * EC_CODEC_I2S_SET_TDM_CONFIG 2857 + * Value should be one of ec_i2s_config. 2858 + */ 2859 + struct ec_param_codec_i2s_tdm { 2860 + /* 2861 + * 0 to 496 2862 + */ 2863 + int16_t ch0_delay; 2864 + /* 2865 + * -1 to 496 2866 + */ 2867 + int16_t ch1_delay; 2868 + uint8_t adjacent_to_ch0; 2869 + uint8_t adjacent_to_ch1; 2870 + } __packed tdm_param; 2871 + 2872 + /* 2873 + * EC_CODEC_I2S_SET_BCLK 2874 + */ 2875 + uint32_t bclk; 2876 + }; 2877 + } __packed; 2878 + 2879 + /* 2880 + * For subcommand EC_CODEC_GET_GAIN. 2881 + */ 2882 + struct ec_response_codec_gain { 2883 + uint8_t left; 2884 + uint8_t right; 2885 + } __packed; 2886 + 2887 + /*****************************************************************************/ 2794 2888 /* System commands */ 2795 2889 2796 2890 /*
+1 -1
include/linux/mfd/ingenic-tcu.h
··· 41 41 #define TCU_TCSR_PRESCALE_LSB 3 42 42 #define TCU_TCSR_PRESCALE_MASK 0x38 43 43 44 - #define TCU_TCSR_PWM_SD BIT(9) /* 0: Shutdown abruptly 1: gracefully */ 44 + #define TCU_TCSR_PWM_SD BIT(9) /* 0: Shutdown gracefully 1: abruptly */ 45 45 #define TCU_TCSR_PWM_INITL_HIGH BIT(8) /* Sets the initial output level */ 46 46 #define TCU_TCSR_PWM_EN BIT(7) /* PWM pin output enable */ 47 47
+7
include/linux/mfd/madera/core.h
··· 15 15 #include <linux/gpio/consumer.h> 16 16 #include <linux/interrupt.h> 17 17 #include <linux/mfd/madera/pdata.h> 18 + #include <linux/mutex.h> 18 19 #include <linux/notifier.h> 19 20 #include <linux/regmap.h> 20 21 #include <linux/regulator/consumer.h> ··· 37 36 #define CS47L90_NUM_GPIOS 38 38 37 39 38 #define MADERA_MAX_MICBIAS 4 39 + 40 + #define MADERA_MAX_HP_OUTPUT 3 40 41 41 42 /* Notifier events */ 42 43 #define MADERA_NOTIFY_VOICE_TRIGGER 0x1 ··· 186 183 unsigned int num_childbias[MADERA_MAX_MICBIAS]; 187 184 188 185 struct snd_soc_dapm_context *dapm; 186 + struct mutex dapm_ptr_lock; 187 + unsigned int hp_ena; 188 + bool out_clamp[MADERA_MAX_HP_OUTPUT]; 189 + bool out_shorted[MADERA_MAX_HP_OUTPUT]; 189 190 190 191 struct blocking_notifier_head notifier; 191 192 };
+212
include/linux/mfd/stpmic1.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* 3 + * Copyright (C) STMicroelectronics 2018 - All Rights Reserved 4 + * Author: Philippe Peurichard <philippe.peurichard@st.com>, 5 + * Pascal Paillet <p.paillet@st.com> for STMicroelectronics. 6 + */ 7 + 8 + #ifndef __LINUX_MFD_STPMIC1_H 9 + #define __LINUX_MFD_STPMIC1_H 10 + 11 + #define TURN_ON_SR 0x1 12 + #define TURN_OFF_SR 0x2 13 + #define ICC_LDO_TURN_OFF_SR 0x3 14 + #define ICC_BUCK_TURN_OFF_SR 0x4 15 + #define RREQ_STATE_SR 0x5 16 + #define VERSION_SR 0x6 17 + 18 + #define SWOFF_PWRCTRL_CR 0x10 19 + #define PADS_PULL_CR 0x11 20 + #define BUCKS_PD_CR 0x12 21 + #define LDO14_PD_CR 0x13 22 + #define LDO56_VREF_PD_CR 0x14 23 + #define VBUS_DET_VIN_CR 0x15 24 + #define PKEY_TURNOFF_CR 0x16 25 + #define BUCKS_MASK_RANK_CR 0x17 26 + #define BUCKS_MASK_RESET_CR 0x18 27 + #define LDOS_MASK_RANK_CR 0x19 28 + #define LDOS_MASK_RESET_CR 0x1A 29 + #define WCHDG_CR 0x1B 30 + #define WCHDG_TIMER_CR 0x1C 31 + #define BUCKS_ICCTO_CR 0x1D 32 + #define LDOS_ICCTO_CR 0x1E 33 + 34 + #define BUCK1_ACTIVE_CR 0x20 35 + #define BUCK2_ACTIVE_CR 0x21 36 + #define BUCK3_ACTIVE_CR 0x22 37 + #define BUCK4_ACTIVE_CR 0x23 38 + #define VREF_DDR_ACTIVE_CR 0x24 39 + #define LDO1_ACTIVE_CR 0x25 40 + #define LDO2_ACTIVE_CR 0x26 41 + #define LDO3_ACTIVE_CR 0x27 42 + #define LDO4_ACTIVE_CR 0x28 43 + #define LDO5_ACTIVE_CR 0x29 44 + #define LDO6_ACTIVE_CR 0x2A 45 + 46 + #define BUCK1_STDBY_CR 0x30 47 + #define BUCK2_STDBY_CR 0x31 48 + #define BUCK3_STDBY_CR 0x32 49 + #define BUCK4_STDBY_CR 0x33 50 + #define VREF_DDR_STDBY_CR 0x34 51 + #define LDO1_STDBY_CR 0x35 52 + #define LDO2_STDBY_CR 0x36 53 + #define LDO3_STDBY_CR 0x37 54 + #define LDO4_STDBY_CR 0x38 55 + #define LDO5_STDBY_CR 0x39 56 + #define LDO6_STDBY_CR 0x3A 57 + 58 + #define BST_SW_CR 0x40 59 + 60 + #define INT_PENDING_R1 0x50 61 + #define INT_PENDING_R2 0x51 62 + #define INT_PENDING_R3 0x52 63 + #define INT_PENDING_R4 0x53 64 + 65 + #define INT_DBG_LATCH_R1 0x60 66 + #define INT_DBG_LATCH_R2 0x61 67 + #define INT_DBG_LATCH_R3 0x62 68 + #define INT_DBG_LATCH_R4 0x63 69 + 70 + #define INT_CLEAR_R1 0x70 71 + #define INT_CLEAR_R2 0x71 72 + #define INT_CLEAR_R3 0x72 73 + #define INT_CLEAR_R4 0x73 74 + 75 + #define INT_MASK_R1 0x80 76 + #define INT_MASK_R2 0x81 77 + #define INT_MASK_R3 0x82 78 + #define INT_MASK_R4 0x83 79 + 80 + #define INT_SET_MASK_R1 0x90 81 + #define INT_SET_MASK_R2 0x91 82 + #define INT_SET_MASK_R3 0x92 83 + #define INT_SET_MASK_R4 0x93 84 + 85 + #define INT_CLEAR_MASK_R1 0xA0 86 + #define INT_CLEAR_MASK_R2 0xA1 87 + #define INT_CLEAR_MASK_R3 0xA2 88 + #define INT_CLEAR_MASK_R4 0xA3 89 + 90 + #define INT_SRC_R1 0xB0 91 + #define INT_SRC_R2 0xB1 92 + #define INT_SRC_R3 0xB2 93 + #define INT_SRC_R4 0xB3 94 + 95 + #define PMIC_MAX_REGISTER_ADDRESS INT_SRC_R4 96 + 97 + #define STPMIC1_PMIC_NUM_IRQ_REGS 4 98 + 99 + #define TURN_OFF_SR_ICC_EVENT 0x08 100 + 101 + #define LDO_VOLTAGE_MASK GENMASK(6, 2) 102 + #define BUCK_VOLTAGE_MASK GENMASK(7, 2) 103 + #define LDO_BUCK_VOLTAGE_SHIFT 2 104 + 105 + #define LDO_ENABLE_MASK BIT(0) 106 + #define BUCK_ENABLE_MASK BIT(0) 107 + 108 + #define BUCK_HPLP_ENABLE_MASK BIT(1) 109 + #define BUCK_HPLP_SHIFT 1 110 + 111 + #define STDBY_ENABLE_MASK BIT(0) 112 + 113 + #define BUCKS_PD_CR_REG_MASK GENMASK(7, 0) 114 + #define BUCK_MASK_RANK_REGISTER_MASK GENMASK(3, 0) 115 + #define BUCK_MASK_RESET_REGISTER_MASK GENMASK(3, 0) 116 + #define LDO1234_PULL_DOWN_REGISTER_MASK GENMASK(7, 0) 117 + #define LDO56_VREF_PD_CR_REG_MASK GENMASK(5, 0) 118 + #define LDO_MASK_RANK_REGISTER_MASK GENMASK(5, 0) 119 + #define LDO_MASK_RESET_REGISTER_MASK GENMASK(5, 0) 120 + 121 + #define BUCK1_PULL_DOWN_REG BUCKS_PD_CR 122 + #define BUCK1_PULL_DOWN_MASK BIT(0) 123 + #define BUCK2_PULL_DOWN_REG BUCKS_PD_CR 124 + #define BUCK2_PULL_DOWN_MASK BIT(2) 125 + #define BUCK3_PULL_DOWN_REG BUCKS_PD_CR 126 + #define BUCK3_PULL_DOWN_MASK BIT(4) 127 + #define BUCK4_PULL_DOWN_REG BUCKS_PD_CR 128 + #define BUCK4_PULL_DOWN_MASK BIT(6) 129 + 130 + #define LDO1_PULL_DOWN_REG LDO14_PD_CR 131 + #define LDO1_PULL_DOWN_MASK BIT(0) 132 + #define LDO2_PULL_DOWN_REG LDO14_PD_CR 133 + #define LDO2_PULL_DOWN_MASK BIT(2) 134 + #define LDO3_PULL_DOWN_REG LDO14_PD_CR 135 + #define LDO3_PULL_DOWN_MASK BIT(4) 136 + #define LDO4_PULL_DOWN_REG LDO14_PD_CR 137 + #define LDO4_PULL_DOWN_MASK BIT(6) 138 + #define LDO5_PULL_DOWN_REG LDO56_VREF_PD_CR 139 + #define LDO5_PULL_DOWN_MASK BIT(0) 140 + #define LDO6_PULL_DOWN_REG LDO56_VREF_PD_CR 141 + #define LDO6_PULL_DOWN_MASK BIT(2) 142 + #define VREF_DDR_PULL_DOWN_REG LDO56_VREF_PD_CR 143 + #define VREF_DDR_PULL_DOWN_MASK BIT(4) 144 + 145 + #define BUCKS_ICCTO_CR_REG_MASK GENMASK(6, 0) 146 + #define LDOS_ICCTO_CR_REG_MASK GENMASK(5, 0) 147 + 148 + #define LDO_BYPASS_MASK BIT(7) 149 + 150 + /* Main PMIC Control Register 151 + * SWOFF_PWRCTRL_CR 152 + * Address : 0x10 153 + */ 154 + #define ICC_EVENT_ENABLED BIT(4) 155 + #define PWRCTRL_POLARITY_HIGH BIT(3) 156 + #define PWRCTRL_PIN_VALID BIT(2) 157 + #define RESTART_REQUEST_ENABLED BIT(1) 158 + #define SOFTWARE_SWITCH_OFF_ENABLED BIT(0) 159 + 160 + /* Main PMIC PADS Control Register 161 + * PADS_PULL_CR 162 + * Address : 0x11 163 + */ 164 + #define WAKEUP_DETECTOR_DISABLED BIT(4) 165 + #define PWRCTRL_PD_ACTIVE BIT(3) 166 + #define PWRCTRL_PU_ACTIVE BIT(2) 167 + #define WAKEUP_PD_ACTIVE BIT(1) 168 + #define PONKEY_PU_INACTIVE BIT(0) 169 + 170 + /* Main PMIC VINLOW Control Register 171 + * VBUS_DET_VIN_CRC DMSC 172 + * Address : 0x15 173 + */ 174 + #define SWIN_DETECTOR_ENABLED BIT(7) 175 + #define SWOUT_DETECTOR_ENABLED BIT(6) 176 + #define VINLOW_ENABLED BIT(0) 177 + #define VINLOW_CTRL_REG_MASK GENMASK(7, 0) 178 + 179 + /* USB Control Register 180 + * Address : 0x40 181 + */ 182 + #define BOOST_OVP_DISABLED BIT(7) 183 + #define VBUS_OTG_DETECTION_DISABLED BIT(6) 184 + #define SW_OUT_DISCHARGE BIT(5) 185 + #define VBUS_OTG_DISCHARGE BIT(4) 186 + #define OCP_LIMIT_HIGH BIT(3) 187 + #define SWIN_SWOUT_ENABLED BIT(2) 188 + #define USBSW_OTG_SWITCH_ENABLED BIT(1) 189 + #define BOOST_ENABLED BIT(0) 190 + 191 + /* PKEY_TURNOFF_CR 192 + * Address : 0x16 193 + */ 194 + #define PONKEY_PWR_OFF BIT(7) 195 + #define PONKEY_CC_FLAG_CLEAR BIT(6) 196 + #define PONKEY_TURNOFF_TIMER_MASK GENMASK(3, 0) 197 + #define PONKEY_TURNOFF_MASK GENMASK(7, 0) 198 + 199 + /* 200 + * struct stpmic1 - stpmic1 master device for sub-drivers 201 + * @dev: master device of the chip (can be used to access platform data) 202 + * @irq: main IRQ number 203 + * @regmap_irq_chip_data: irq chip data 204 + */ 205 + struct stpmic1 { 206 + struct device *dev; 207 + struct regmap *regmap; 208 + int irq; 209 + struct regmap_irq_chip_data *irq_data; 210 + }; 211 + 212 + #endif /* __LINUX_MFD_STPMIC1_H */
+4
include/linux/mfd/ti_am335x_tscadc.h
··· 78 78 #define STEPCONFIG_YNN BIT(8) 79 79 #define STEPCONFIG_XNP BIT(9) 80 80 #define STEPCONFIG_YPN BIT(10) 81 + #define STEPCONFIG_RFP(val) ((val) << 12) 82 + #define STEPCONFIG_RFP_VREFP (0x3 << 12) 81 83 #define STEPCONFIG_INM_MASK (0xF << 15) 82 84 #define STEPCONFIG_INM(val) ((val) << 15) 83 85 #define STEPCONFIG_INM_ADCREFM STEPCONFIG_INM(8) ··· 88 86 #define STEPCONFIG_INP_AN4 STEPCONFIG_INP(4) 89 87 #define STEPCONFIG_INP_ADCREFM STEPCONFIG_INP(8) 90 88 #define STEPCONFIG_FIFO1 BIT(26) 89 + #define STEPCONFIG_RFM(val) ((val) << 23) 90 + #define STEPCONFIG_RFM_VREFN (0x3 << 23) 91 91 92 92 /* Delay register */ 93 93 #define STEPDELAY_OPEN_MASK (0x3FFFF << 0)
+1 -1
include/linux/mfd/tmio.h
··· 79 79 /* Some controllers have a CBSY bit */ 80 80 #define TMIO_MMC_HAVE_CBSY BIT(11) 81 81 82 - /* Some controllers that support HS400 use use 4 taps while others use 8. */ 82 + /* Some controllers that support HS400 use 4 taps while others use 8. */ 83 83 #define TMIO_MMC_HAVE_4TAP_HS400 BIT(13) 84 84 85 85 int tmio_core_mmc_enable(void __iomem *cnf, int shift, unsigned long base);
+6
include/linux/mmzone.h
··· 520 520 PGDAT_RECLAIM_LOCKED, /* prevents concurrent reclaim */ 521 521 }; 522 522 523 + enum zone_flags { 524 + ZONE_BOOSTED_WATERMARK, /* zone recently boosted watermarks. 525 + * Cleared when kswapd is woken. 526 + */ 527 + }; 528 + 523 529 static inline unsigned long zone_managed_pages(struct zone *zone) 524 530 { 525 531 return (unsigned long)atomic_long_read(&zone->managed_pages);
+1 -1
include/linux/module.h
··· 828 828 static inline void module_bug_cleanup(struct module *mod) {} 829 829 #endif /* CONFIG_GENERIC_BUG */ 830 830 831 - #ifdef RETPOLINE 831 + #ifdef CONFIG_RETPOLINE 832 832 extern bool retpoline_module_ok(bool has_retpoline); 833 833 #else 834 834 static inline bool retpoline_module_ok(bool has_retpoline)
-1
include/linux/of.h
··· 50 50 51 51 struct device_node { 52 52 const char *name; 53 - const char *type; 54 53 phandle phandle; 55 54 const char *full_name; 56 55 struct fwnode_handle fwnode;
+1 -1
include/linux/pci-dma-compat.h
··· 24 24 pci_zalloc_consistent(struct pci_dev *hwdev, size_t size, 25 25 dma_addr_t *dma_handle) 26 26 { 27 - return dma_zalloc_coherent(&hwdev->dev, size, dma_handle, GFP_ATOMIC); 27 + return dma_alloc_coherent(&hwdev->dev, size, dma_handle, GFP_ATOMIC); 28 28 } 29 29 30 30 static inline void
+4 -2
include/linux/phy.h
··· 48 48 extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_gbit_fibre_features) __ro_after_init; 49 49 extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_gbit_all_ports_features) __ro_after_init; 50 50 extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_features) __ro_after_init; 51 + extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_fec_features) __ro_after_init; 51 52 extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_full_features) __ro_after_init; 52 53 53 54 #define PHY_BASIC_FEATURES ((unsigned long *)&phy_basic_features) ··· 57 56 #define PHY_GBIT_FIBRE_FEATURES ((unsigned long *)&phy_gbit_fibre_features) 58 57 #define PHY_GBIT_ALL_PORTS_FEATURES ((unsigned long *)&phy_gbit_all_ports_features) 59 58 #define PHY_10GBIT_FEATURES ((unsigned long *)&phy_10gbit_features) 59 + #define PHY_10GBIT_FEC_FEATURES ((unsigned long *)&phy_10gbit_fec_features) 60 60 #define PHY_10GBIT_FULL_FEATURES ((unsigned long *)&phy_10gbit_full_features) 61 61 62 62 extern const int phy_10_100_features_array[4]; ··· 469 467 * only works for PHYs with IDs which match this field 470 468 * name: The friendly name of this PHY type 471 469 * phy_id_mask: Defines the important bits of the phy_id 472 - * features: A list of features (speed, duplex, etc) supported 473 - * by this PHY 470 + * features: A mandatory list of features (speed, duplex, etc) 471 + * supported by this PHY 474 472 * flags: A bitfield defining certain other features this PHY 475 473 * supports (like interrupts) 476 474 *
+1
include/linux/phy/phy.h
··· 42 42 PHY_MODE_PCIE, 43 43 PHY_MODE_ETHERNET, 44 44 PHY_MODE_MIPI_DPHY, 45 + PHY_MODE_SATA 45 46 }; 46 47 47 48 /**
+5
include/linux/pm_opp.h
··· 108 108 int dev_pm_opp_add(struct device *dev, unsigned long freq, 109 109 unsigned long u_volt); 110 110 void dev_pm_opp_remove(struct device *dev, unsigned long freq); 111 + void dev_pm_opp_remove_all_dynamic(struct device *dev); 111 112 112 113 int dev_pm_opp_enable(struct device *dev, unsigned long freq); 113 114 ··· 215 214 } 216 215 217 216 static inline void dev_pm_opp_remove(struct device *dev, unsigned long freq) 217 + { 218 + } 219 + 220 + static inline void dev_pm_opp_remove_all_dynamic(struct device *dev) 218 221 { 219 222 } 220 223
+1
include/linux/qcom_scm.h
··· 13 13 #ifndef __QCOM_SCM_H 14 14 #define __QCOM_SCM_H 15 15 16 + #include <linux/err.h> 16 17 #include <linux/types.h> 17 18 #include <linux/cpumask.h> 18 19
+31
include/linux/qed/qed_chain.h
··· 663 663 static inline void qed_chain_set_prod(struct qed_chain *p_chain, 664 664 u32 prod_idx, void *p_prod_elem) 665 665 { 666 + if (p_chain->mode == QED_CHAIN_MODE_PBL) { 667 + u32 cur_prod, page_mask, page_cnt, page_diff; 668 + 669 + cur_prod = is_chain_u16(p_chain) ? p_chain->u.chain16.prod_idx : 670 + p_chain->u.chain32.prod_idx; 671 + 672 + /* Assume that number of elements in a page is power of 2 */ 673 + page_mask = ~p_chain->elem_per_page_mask; 674 + 675 + /* Use "cur_prod - 1" and "prod_idx - 1" since producer index 676 + * reaches the first element of next page before the page index 677 + * is incremented. See qed_chain_produce(). 678 + * Index wrap around is not a problem because the difference 679 + * between current and given producer indices is always 680 + * positive and lower than the chain's capacity. 681 + */ 682 + page_diff = (((cur_prod - 1) & page_mask) - 683 + ((prod_idx - 1) & page_mask)) / 684 + p_chain->elem_per_page; 685 + 686 + page_cnt = qed_chain_get_page_cnt(p_chain); 687 + if (is_chain_u16(p_chain)) 688 + p_chain->pbl.c.u16.prod_page_idx = 689 + (p_chain->pbl.c.u16.prod_page_idx - 690 + page_diff + page_cnt) % page_cnt; 691 + else 692 + p_chain->pbl.c.u32.prod_page_idx = 693 + (p_chain->pbl.c.u32.prod_page_idx - 694 + page_diff + page_cnt) % page_cnt; 695 + } 696 + 666 697 if (is_chain_u16(p_chain)) 667 698 p_chain->u.chain16.prod_idx = (u16) prod_idx; 668 699 else
+11 -4
include/linux/reset.h
··· 32 32 struct reset_control *of_reset_control_array_get(struct device_node *np, 33 33 bool shared, bool optional); 34 34 35 + int reset_control_get_count(struct device *dev); 36 + 35 37 #else 36 38 37 39 static inline int reset_control_reset(struct reset_control *rstc) ··· 99 97 return optional ? NULL : ERR_PTR(-ENOTSUPP); 100 98 } 101 99 100 + static inline int reset_control_get_count(struct device *dev) 101 + { 102 + return -ENOENT; 103 + } 104 + 102 105 #endif /* CONFIG_RESET_CONTROLLER */ 103 106 104 107 static inline int __must_check device_reset(struct device *dev) ··· 145 138 * 146 139 * Returns a struct reset_control or IS_ERR() condition containing errno. 147 140 * This function is intended for use with reset-controls which are shared 148 - * between hardware-blocks. 141 + * between hardware blocks. 149 142 * 150 143 * When a reset-control is shared, the behavior of reset_control_assert / 151 144 * deassert is changed, the reset-core will keep track of a deassert_count ··· 194 187 } 195 188 196 189 /** 197 - * of_reset_control_get_shared - Lookup and obtain an shared reference 190 + * of_reset_control_get_shared - Lookup and obtain a shared reference 198 191 * to a reset controller. 199 192 * @node: device to be reset by the controller 200 193 * @id: reset line name ··· 236 229 } 237 230 238 231 /** 239 - * of_reset_control_get_shared_by_index - Lookup and obtain an shared 232 + * of_reset_control_get_shared_by_index - Lookup and obtain a shared 240 233 * reference to a reset controller 241 234 * by index. 242 235 * @node: device to be reset by the controller ··· 329 322 330 323 /** 331 324 * devm_reset_control_get_shared_by_index - resource managed 332 - * reset_control_get_shared 325 + * reset_control_get_shared 333 326 * @dev: device to be reset by the controller 334 327 * @index: index of the reset controller 335 328 *
+10 -1
include/linux/sched.h
··· 995 995 /* cg_list protected by css_set_lock and tsk->alloc_lock: */ 996 996 struct list_head cg_list; 997 997 #endif 998 - #ifdef CONFIG_RESCTRL 998 + #ifdef CONFIG_X86_RESCTRL 999 999 u32 closid; 1000 1000 u32 rmid; 1001 1001 #endif ··· 1406 1406 #define PF_RANDOMIZE 0x00400000 /* Randomize virtual address space */ 1407 1407 #define PF_SWAPWRITE 0x00800000 /* Allowed to write to swap */ 1408 1408 #define PF_MEMSTALL 0x01000000 /* Stalled due to lack of memory */ 1409 + #define PF_UMH 0x02000000 /* I'm an Usermodehelper process */ 1409 1410 #define PF_NO_SETAFFINITY 0x04000000 /* Userland is not allowed to meddle with cpus_allowed */ 1410 1411 #define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */ 1411 1412 #define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */ ··· 1904 1903 } 1905 1904 1906 1905 #endif 1906 + 1907 + void __exit_umh(struct task_struct *tsk); 1908 + 1909 + static inline void exit_umh(struct task_struct *tsk) 1910 + { 1911 + if (unlikely(tsk->flags & PF_UMH)) 1912 + __exit_umh(tsk); 1913 + } 1907 1914 1908 1915 #ifdef CONFIG_DEBUG_RSEQ 1909 1916
+1
include/linux/skbuff.h
··· 3218 3218 * 3219 3219 * This is exactly the same as pskb_trim except that it ensures the 3220 3220 * checksum of received packets are still valid after the operation. 3221 + * It can change skb pointers. 3221 3222 */ 3222 3223 3223 3224 static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len)
+2
include/linux/umh.h
··· 47 47 const char *cmdline; 48 48 struct file *pipe_to_umh; 49 49 struct file *pipe_from_umh; 50 + struct list_head list; 51 + void (*cleanup)(struct umh_info *info); 50 52 pid_t pid; 51 53 }; 52 54 int fork_usermode_blob(void *data, size_t len, struct umh_info *info);
+9 -4
include/linux/virtio_config.h
··· 12 12 13 13 /** 14 14 * virtio_config_ops - operations for configuring a virtio device 15 + * Note: Do not assume that a transport implements all of the operations 16 + * getting/setting a value as a simple read/write! Generally speaking, 17 + * any of @get/@set, @get_status/@set_status, or @get_features/ 18 + * @finalize_features are NOT safe to be called from an atomic 19 + * context. 15 20 * @get: read the value of a configuration field 16 21 * vdev: the virtio_device 17 22 * offset: the offset of the configuration field ··· 27 22 * offset: the offset of the configuration field 28 23 * buf: the buffer to read the field value from. 29 24 * len: the length of the buffer 30 - * @generation: config generation counter 25 + * @generation: config generation counter (optional) 31 26 * vdev: the virtio_device 32 27 * Returns the config generation counter 33 28 * @get_status: read the status byte ··· 53 48 * @del_vqs: free virtqueues found by find_vqs(). 54 49 * @get_features: get the array of feature bits for this device. 55 50 * vdev: the virtio_device 56 - * Returns the first 32 feature bits (all we currently need). 51 + * Returns the first 64 feature bits (all we currently need). 57 52 * @finalize_features: confirm what device features we'll be using. 58 53 * vdev: the virtio_device 59 54 * This gives the final feature bits for the device: it can change 60 55 * the dev->feature bits if it wants. 61 56 * Returns 0 on success or error status 62 - * @bus_name: return the bus name associated with the device 57 + * @bus_name: return the bus name associated with the device (optional) 63 58 * vdev: the virtio_device 64 59 * This returns a pointer to the bus name a la pci_name from which 65 60 * the caller can then copy. 66 - * @set_vq_affinity: set the affinity for a virtqueue. 61 + * @set_vq_affinity: set the affinity for a virtqueue (optional). 67 62 * @get_vq_affinity: get the affinity for a virtqueue (optional). 68 63 */ 69 64 typedef void vq_callback_t(struct virtqueue *);
-16
include/net/af_rxrpc.h
··· 21 21 struct rxrpc_call; 22 22 23 23 /* 24 - * Call completion condition (state == RXRPC_CALL_COMPLETE). 25 - */ 26 - enum rxrpc_call_completion { 27 - RXRPC_CALL_SUCCEEDED, /* - Normal termination */ 28 - RXRPC_CALL_REMOTELY_ABORTED, /* - call aborted by peer */ 29 - RXRPC_CALL_LOCALLY_ABORTED, /* - call aborted locally on error or close */ 30 - RXRPC_CALL_LOCAL_ERROR, /* - call failed due to local error */ 31 - RXRPC_CALL_NETWORK_ERROR, /* - call terminated by network error */ 32 - NR__RXRPC_CALL_COMPLETIONS 33 - }; 34 - 35 - /* 36 24 * Debug ID counter for tracing. 37 25 */ 38 26 extern atomic_t rxrpc_debug_id; ··· 61 73 rxrpc_user_attach_call_t, unsigned long, gfp_t, 62 74 unsigned int); 63 75 void rxrpc_kernel_set_tx_length(struct socket *, struct rxrpc_call *, s64); 64 - int rxrpc_kernel_retry_call(struct socket *, struct rxrpc_call *, 65 - struct sockaddr_rxrpc *, struct key *); 66 - int rxrpc_kernel_check_call(struct socket *, struct rxrpc_call *, 67 - enum rxrpc_call_completion *, u32 *); 68 76 u32 rxrpc_kernel_check_life(const struct socket *, const struct rxrpc_call *); 69 77 void rxrpc_kernel_probe_life(struct socket *, struct rxrpc_call *); 70 78 u32 rxrpc_kernel_get_epoch(struct socket *, struct rxrpc_call *);
+1 -1
include/net/ip_fib.h
··· 241 241 struct netlink_ext_ack *extack); 242 242 int fib_table_dump(struct fib_table *table, struct sk_buff *skb, 243 243 struct netlink_callback *cb, struct fib_dump_filter *filter); 244 - int fib_table_flush(struct net *net, struct fib_table *table); 244 + int fib_table_flush(struct net *net, struct fib_table *table, bool flush_all); 245 245 struct fib_table *fib_trie_unmerge(struct fib_table *main_tb); 246 246 void fib_table_flush_external(struct fib_table *table); 247 247 void fib_free_table(struct fib_table *tb);
-1
include/net/netfilter/nf_flow_table.h
··· 84 84 struct nf_flow_route { 85 85 struct { 86 86 struct dst_entry *dst; 87 - int ifindex; 88 87 } tuple[FLOW_OFFLOAD_DIR_MAX]; 89 88 }; 90 89
+2
include/trace/events/afs.h
··· 25 25 enum afs_call_trace { 26 26 afs_call_trace_alloc, 27 27 afs_call_trace_free, 28 + afs_call_trace_get, 28 29 afs_call_trace_put, 29 30 afs_call_trace_wake, 30 31 afs_call_trace_work, ··· 160 159 #define afs_call_traces \ 161 160 EM(afs_call_trace_alloc, "ALLOC") \ 162 161 EM(afs_call_trace_free, "FREE ") \ 162 + EM(afs_call_trace_get, "GET ") \ 163 163 EM(afs_call_trace_put, "PUT ") \ 164 164 EM(afs_call_trace_wake, "WAKE ") \ 165 165 E_(afs_call_trace_work, "WORK ")
+2
include/uapi/linux/audit.h
··· 400 400 /* do not define AUDIT_ARCH_PPCLE since it is not supported by audit */ 401 401 #define AUDIT_ARCH_PPC64 (EM_PPC64|__AUDIT_ARCH_64BIT) 402 402 #define AUDIT_ARCH_PPC64LE (EM_PPC64|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE) 403 + #define AUDIT_ARCH_RISCV32 (EM_RISCV|__AUDIT_ARCH_LE) 404 + #define AUDIT_ARCH_RISCV64 (EM_RISCV|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE) 403 405 #define AUDIT_ARCH_S390 (EM_S390) 404 406 #define AUDIT_ARCH_S390X (EM_S390|__AUDIT_ARCH_64BIT) 405 407 #define AUDIT_ARCH_SH (EM_SH)
+1 -1
include/uapi/linux/in.h
··· 268 268 #define IN_MULTICAST(a) IN_CLASSD(a) 269 269 #define IN_MULTICAST_NET 0xe0000000 270 270 271 - #define IN_BADCLASS(a) ((((long int) (a) ) == 0xffffffff) 271 + #define IN_BADCLASS(a) (((long int) (a) ) == (long int)0xffffffff) 272 272 #define IN_EXPERIMENTAL(a) IN_BADCLASS((a)) 273 273 274 274 #define IN_CLASSE(a) ((((long int) (a)) & 0xf0000000) == 0xf0000000)
+1 -1
include/uapi/linux/ptp_clock.h
··· 147 147 #define PTP_SYS_OFFSET_PRECISE \ 148 148 _IOWR(PTP_CLK_MAGIC, 8, struct ptp_sys_offset_precise) 149 149 #define PTP_SYS_OFFSET_EXTENDED \ 150 - _IOW(PTP_CLK_MAGIC, 9, struct ptp_sys_offset_extended) 150 + _IOWR(PTP_CLK_MAGIC, 9, struct ptp_sys_offset_extended) 151 151 152 152 struct ptp_extts_event { 153 153 struct ptp_clock_time t; /* Time event occured. */
+1
include/uapi/rdma/vmw_pvrdma-abi.h
··· 78 78 PVRDMA_WR_MASKED_ATOMIC_FETCH_AND_ADD, 79 79 PVRDMA_WR_BIND_MW, 80 80 PVRDMA_WR_REG_SIG_MR, 81 + PVRDMA_WR_ERROR, 81 82 }; 82 83 83 84 enum pvrdma_wc_status {
+1
init/Kconfig
··· 1124 1124 bool "Dead code and data elimination (EXPERIMENTAL)" 1125 1125 depends on HAVE_LD_DEAD_CODE_DATA_ELIMINATION 1126 1126 depends on EXPERT 1127 + depends on !(FUNCTION_TRACER && CC_IS_GCC && GCC_VERSION < 40800) 1127 1128 depends on $(cc-option,-ffunction-sections -fdata-sections) 1128 1129 depends on $(ld-option,--gc-sections) 1129 1130 help
+7 -7
kernel/bpf/btf.c
··· 467 467 return kind_ops[BTF_INFO_KIND(t->info)]; 468 468 } 469 469 470 - bool btf_name_offset_valid(const struct btf *btf, u32 offset) 470 + static bool btf_name_offset_valid(const struct btf *btf, u32 offset) 471 471 { 472 472 return BTF_STR_OFFSET_VALID(offset) && 473 473 offset < btf->hdr.str_len; ··· 1219 1219 u8 nr_copy_bits; 1220 1220 u64 print_num; 1221 1221 1222 - data += BITS_ROUNDDOWN_BYTES(bits_offset); 1223 - bits_offset = BITS_PER_BYTE_MASKED(bits_offset); 1224 1222 nr_copy_bits = nr_bits + bits_offset; 1225 1223 nr_copy_bytes = BITS_ROUNDUP_BYTES(nr_copy_bits); 1226 1224 ··· 1253 1255 * BTF_INT_OFFSET() cannot exceed 64 bits. 1254 1256 */ 1255 1257 total_bits_offset = bits_offset + BTF_INT_OFFSET(int_data); 1256 - btf_bitfield_seq_show(data, total_bits_offset, nr_bits, m); 1258 + data += BITS_ROUNDDOWN_BYTES(total_bits_offset); 1259 + bits_offset = BITS_PER_BYTE_MASKED(total_bits_offset); 1260 + btf_bitfield_seq_show(data, bits_offset, nr_bits, m); 1257 1261 } 1258 1262 1259 1263 static void btf_int_seq_show(const struct btf *btf, const struct btf_type *t, ··· 2001 2001 2002 2002 member_offset = btf_member_bit_offset(t, member); 2003 2003 bitfield_size = btf_member_bitfield_size(t, member); 2004 + bytes_offset = BITS_ROUNDDOWN_BYTES(member_offset); 2005 + bits8_offset = BITS_PER_BYTE_MASKED(member_offset); 2004 2006 if (bitfield_size) { 2005 - btf_bitfield_seq_show(data, member_offset, 2007 + btf_bitfield_seq_show(data + bytes_offset, bits8_offset, 2006 2008 bitfield_size, m); 2007 2009 } else { 2008 - bytes_offset = BITS_ROUNDDOWN_BYTES(member_offset); 2009 - bits8_offset = BITS_PER_BYTE_MASKED(member_offset); 2010 2010 ops = btf_type_ops(member_type); 2011 2011 ops->seq_show(btf, member_type, member->type, 2012 2012 data + bytes_offset, bits8_offset, m);
+1
kernel/bpf/cgroup.c
··· 718 718 case BPF_FUNC_trace_printk: 719 719 if (capable(CAP_SYS_ADMIN)) 720 720 return bpf_get_trace_printk_proto(); 721 + /* fall through */ 721 722 default: 722 723 return NULL; 723 724 }
+15 -2
kernel/bpf/map_in_map.c
··· 12 12 struct bpf_map *bpf_map_meta_alloc(int inner_map_ufd) 13 13 { 14 14 struct bpf_map *inner_map, *inner_map_meta; 15 + u32 inner_map_meta_size; 15 16 struct fd f; 16 17 17 18 f = fdget(inner_map_ufd); ··· 37 36 return ERR_PTR(-EINVAL); 38 37 } 39 38 40 - inner_map_meta = kzalloc(sizeof(*inner_map_meta), GFP_USER); 39 + inner_map_meta_size = sizeof(*inner_map_meta); 40 + /* In some cases verifier needs to access beyond just base map. */ 41 + if (inner_map->ops == &array_map_ops) 42 + inner_map_meta_size = sizeof(struct bpf_array); 43 + 44 + inner_map_meta = kzalloc(inner_map_meta_size, GFP_USER); 41 45 if (!inner_map_meta) { 42 46 fdput(f); 43 47 return ERR_PTR(-ENOMEM); ··· 52 46 inner_map_meta->key_size = inner_map->key_size; 53 47 inner_map_meta->value_size = inner_map->value_size; 54 48 inner_map_meta->map_flags = inner_map->map_flags; 55 - inner_map_meta->ops = inner_map->ops; 56 49 inner_map_meta->max_entries = inner_map->max_entries; 50 + 51 + /* Misc members not needed in bpf_map_meta_equal() check. */ 52 + inner_map_meta->ops = inner_map->ops; 53 + if (inner_map->ops == &array_map_ops) { 54 + inner_map_meta->unpriv_array = inner_map->unpriv_array; 55 + container_of(inner_map_meta, struct bpf_array, map)->index_mask = 56 + container_of(inner_map, struct bpf_array, map)->index_mask; 57 + } 57 58 58 59 fdput(f); 59 60 return inner_map_meta;
+9 -3
kernel/bpf/stackmap.c
··· 180 180 181 181 if (nhdr->n_type == BPF_BUILD_ID && 182 182 nhdr->n_namesz == sizeof("GNU") && 183 - nhdr->n_descsz == BPF_BUILD_ID_SIZE) { 183 + nhdr->n_descsz > 0 && 184 + nhdr->n_descsz <= BPF_BUILD_ID_SIZE) { 184 185 memcpy(build_id, 185 186 note_start + note_offs + 186 187 ALIGN(sizeof("GNU"), 4) + sizeof(Elf32_Nhdr), 187 - BPF_BUILD_ID_SIZE); 188 + nhdr->n_descsz); 189 + memset(build_id + nhdr->n_descsz, 0, 190 + BPF_BUILD_ID_SIZE - nhdr->n_descsz); 188 191 return 0; 189 192 } 190 193 new_offs = note_offs + sizeof(Elf32_Nhdr) + ··· 263 260 return -EFAULT; /* page not mapped */ 264 261 265 262 ret = -EINVAL; 266 - page_addr = page_address(page); 263 + page_addr = kmap_atomic(page); 267 264 ehdr = (Elf32_Ehdr *)page_addr; 268 265 269 266 /* compare magic x7f "ELF" */ ··· 279 276 else if (ehdr->e_ident[EI_CLASS] == ELFCLASS64) 280 277 ret = stack_map_get_build_id_64(page_addr, build_id); 281 278 out: 279 + kunmap_atomic(page_addr); 282 280 put_page(page); 283 281 return ret; 284 282 } ··· 314 310 for (i = 0; i < trace_nr; i++) { 315 311 id_offs[i].status = BPF_STACK_BUILD_ID_IP; 316 312 id_offs[i].ip = ips[i]; 313 + memset(id_offs[i].build_id, 0, BPF_BUILD_ID_SIZE); 317 314 } 318 315 return; 319 316 } ··· 325 320 /* per entry fall back to ips */ 326 321 id_offs[i].status = BPF_STACK_BUILD_ID_IP; 327 322 id_offs[i].ip = ips[i]; 323 + memset(id_offs[i].build_id, 0, BPF_BUILD_ID_SIZE); 328 324 continue; 329 325 } 330 326 id_offs[i].offset = (vma->vm_pgoff << PAGE_SHIFT) + ips[i]
+48 -13
kernel/bpf/verifier.c
··· 3103 3103 } 3104 3104 } 3105 3105 3106 + static bool can_skip_alu_sanitation(const struct bpf_verifier_env *env, 3107 + const struct bpf_insn *insn) 3108 + { 3109 + return env->allow_ptr_leaks || BPF_SRC(insn->code) == BPF_K; 3110 + } 3111 + 3112 + static int update_alu_sanitation_state(struct bpf_insn_aux_data *aux, 3113 + u32 alu_state, u32 alu_limit) 3114 + { 3115 + /* If we arrived here from different branches with different 3116 + * state or limits to sanitize, then this won't work. 3117 + */ 3118 + if (aux->alu_state && 3119 + (aux->alu_state != alu_state || 3120 + aux->alu_limit != alu_limit)) 3121 + return -EACCES; 3122 + 3123 + /* Corresponding fixup done in fixup_bpf_calls(). */ 3124 + aux->alu_state = alu_state; 3125 + aux->alu_limit = alu_limit; 3126 + return 0; 3127 + } 3128 + 3129 + static int sanitize_val_alu(struct bpf_verifier_env *env, 3130 + struct bpf_insn *insn) 3131 + { 3132 + struct bpf_insn_aux_data *aux = cur_aux(env); 3133 + 3134 + if (can_skip_alu_sanitation(env, insn)) 3135 + return 0; 3136 + 3137 + return update_alu_sanitation_state(aux, BPF_ALU_NON_POINTER, 0); 3138 + } 3139 + 3106 3140 static int sanitize_ptr_alu(struct bpf_verifier_env *env, 3107 3141 struct bpf_insn *insn, 3108 3142 const struct bpf_reg_state *ptr_reg, ··· 3151 3117 struct bpf_reg_state tmp; 3152 3118 bool ret; 3153 3119 3154 - if (env->allow_ptr_leaks || BPF_SRC(insn->code) == BPF_K) 3120 + if (can_skip_alu_sanitation(env, insn)) 3155 3121 return 0; 3156 3122 3157 3123 /* We already marked aux for masking from non-speculative ··· 3167 3133 3168 3134 if (retrieve_ptr_limit(ptr_reg, &alu_limit, opcode, off_is_neg)) 3169 3135 return 0; 3170 - 3171 - /* If we arrived here from different branches with different 3172 - * limits to sanitize, then this won't work. 3173 - */ 3174 - if (aux->alu_state && 3175 - (aux->alu_state != alu_state || 3176 - aux->alu_limit != alu_limit)) 3136 + if (update_alu_sanitation_state(aux, alu_state, alu_limit)) 3177 3137 return -EACCES; 3178 - 3179 - /* Corresponding fixup done in fixup_bpf_calls(). */ 3180 - aux->alu_state = alu_state; 3181 - aux->alu_limit = alu_limit; 3182 - 3183 3138 do_sim: 3184 3139 /* Simulate and find potential out-of-bounds access under 3185 3140 * speculative execution from truncation as a result of ··· 3441 3418 s64 smin_val, smax_val; 3442 3419 u64 umin_val, umax_val; 3443 3420 u64 insn_bitness = (BPF_CLASS(insn->code) == BPF_ALU64) ? 64 : 32; 3421 + u32 dst = insn->dst_reg; 3422 + int ret; 3444 3423 3445 3424 if (insn_bitness == 32) { 3446 3425 /* Relevant for 32-bit RSH: Information can propagate towards ··· 3477 3452 3478 3453 switch (opcode) { 3479 3454 case BPF_ADD: 3455 + ret = sanitize_val_alu(env, insn); 3456 + if (ret < 0) { 3457 + verbose(env, "R%d tried to add from different pointers or scalars\n", dst); 3458 + return ret; 3459 + } 3480 3460 if (signed_add_overflows(dst_reg->smin_value, smin_val) || 3481 3461 signed_add_overflows(dst_reg->smax_value, smax_val)) { 3482 3462 dst_reg->smin_value = S64_MIN; ··· 3501 3471 dst_reg->var_off = tnum_add(dst_reg->var_off, src_reg.var_off); 3502 3472 break; 3503 3473 case BPF_SUB: 3474 + ret = sanitize_val_alu(env, insn); 3475 + if (ret < 0) { 3476 + verbose(env, "R%d tried to sub from different pointers or scalars\n", dst); 3477 + return ret; 3478 + } 3504 3479 if (signed_sub_overflows(dst_reg->smin_value, smax_val) || 3505 3480 signed_sub_overflows(dst_reg->smax_value, smin_val)) { 3506 3481 /* Overflow possible, we know nothing */
+2
kernel/dma/swiotlb.c
··· 378 378 memblock_free_late(io_tlb_start, 379 379 PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT)); 380 380 } 381 + io_tlb_start = 0; 382 + io_tlb_end = 0; 381 383 io_tlb_nslabs = 0; 382 384 max_segment = 0; 383 385 }
+1
kernel/exit.c
··· 866 866 exit_task_namespaces(tsk); 867 867 exit_task_work(tsk); 868 868 exit_thread(tsk); 869 + exit_umh(tsk); 869 870 870 871 /* 871 872 * Flush inherited counters to the parent - before the parent
+12 -2
kernel/fork.c
··· 217 217 memset(s->addr, 0, THREAD_SIZE); 218 218 219 219 tsk->stack_vm_area = s; 220 + tsk->stack = s->addr; 220 221 return s->addr; 221 222 } 222 223 ··· 1834 1833 1835 1834 posix_cpu_timers_init(p); 1836 1835 1837 - p->start_time = ktime_get_ns(); 1838 - p->real_start_time = ktime_get_boot_ns(); 1839 1836 p->io_context = NULL; 1840 1837 audit_set_context(p, NULL); 1841 1838 cgroup_fork(p); ··· 1998 1999 retval = cgroup_can_fork(p); 1999 2000 if (retval) 2000 2001 goto bad_fork_free_pid; 2002 + 2003 + /* 2004 + * From this point on we must avoid any synchronous user-space 2005 + * communication until we take the tasklist-lock. In particular, we do 2006 + * not want user-space to be able to predict the process start-time by 2007 + * stalling fork(2) after we recorded the start_time but before it is 2008 + * visible to the system. 2009 + */ 2010 + 2011 + p->start_time = ktime_get_ns(); 2012 + p->real_start_time = ktime_get_boot_ns(); 2001 2013 2002 2014 /* 2003 2015 * Make it visible to the rest of the system, but dont wake it up yet.
+4
kernel/seccomp.c
··· 976 976 struct seccomp_filter *filter = file->private_data; 977 977 struct seccomp_knotif *knotif; 978 978 979 + if (!filter) 980 + return 0; 981 + 979 982 mutex_lock(&filter->notify_lock); 980 983 981 984 /* ··· 1303 1300 out_put_fd: 1304 1301 if (flags & SECCOMP_FILTER_FLAG_NEW_LISTENER) { 1305 1302 if (ret < 0) { 1303 + listener_f->private_data = NULL; 1306 1304 fput(listener_f); 1307 1305 put_unused_fd(listener); 1308 1306 } else {
+2 -1
kernel/sys.c
··· 1207 1207 /* 1208 1208 * Work around broken programs that cannot handle "Linux 3.0". 1209 1209 * Instead we map 3.x to 2.6.40+x, so e.g. 3.0 would be 2.6.40 1210 - * And we map 4.x to 2.6.60+x, so 4.0 would be 2.6.60. 1210 + * And we map 4.x and later versions to 2.6.60+x, so 4.0/5.0/6.0/... would be 1211 + * 2.6.60. 1211 1212 */ 1212 1213 static int override_release(char __user *release, size_t len) 1213 1214 {
+9 -3
kernel/trace/trace_kprobe.c
··· 607 607 char buf[MAX_EVENT_NAME_LEN]; 608 608 unsigned int flags = TPARG_FL_KERNEL; 609 609 610 - /* argc must be >= 1 */ 611 - if (argv[0][0] == 'r') { 610 + switch (argv[0][0]) { 611 + case 'r': 612 612 is_return = true; 613 613 flags |= TPARG_FL_RETURN; 614 - } else if (argv[0][0] != 'p' || argc < 2) 614 + break; 615 + case 'p': 616 + break; 617 + default: 618 + return -ECANCELED; 619 + } 620 + if (argc < 2) 615 621 return -ECANCELED; 616 622 617 623 event = strchr(&argv[0][1], ':');
+31 -2
kernel/umh.c
··· 37 37 static kernel_cap_t usermodehelper_inheritable = CAP_FULL_SET; 38 38 static DEFINE_SPINLOCK(umh_sysctl_lock); 39 39 static DECLARE_RWSEM(umhelper_sem); 40 + static LIST_HEAD(umh_list); 41 + static DEFINE_MUTEX(umh_list_lock); 40 42 41 43 static void call_usermodehelper_freeinfo(struct subprocess_info *info) 42 44 { ··· 102 100 commit_creds(new); 103 101 104 102 sub_info->pid = task_pid_nr(current); 105 - if (sub_info->file) 103 + if (sub_info->file) { 106 104 retval = do_execve_file(sub_info->file, 107 105 sub_info->argv, sub_info->envp); 108 - else 106 + if (!retval) 107 + current->flags |= PF_UMH; 108 + } else 109 109 retval = do_execve(getname_kernel(sub_info->path), 110 110 (const char __user *const __user *)sub_info->argv, 111 111 (const char __user *const __user *)sub_info->envp); ··· 521 517 goto out; 522 518 523 519 err = call_usermodehelper_exec(sub_info, UMH_WAIT_EXEC); 520 + if (!err) { 521 + mutex_lock(&umh_list_lock); 522 + list_add(&info->list, &umh_list); 523 + mutex_unlock(&umh_list_lock); 524 + } 524 525 out: 525 526 fput(file); 526 527 return err; ··· 686 677 } 687 678 688 679 return 0; 680 + } 681 + 682 + void __exit_umh(struct task_struct *tsk) 683 + { 684 + struct umh_info *info; 685 + pid_t pid = tsk->pid; 686 + 687 + mutex_lock(&umh_list_lock); 688 + list_for_each_entry(info, &umh_list, list) { 689 + if (info->pid == pid) { 690 + list_del(&info->list); 691 + mutex_unlock(&umh_list_lock); 692 + goto out; 693 + } 694 + } 695 + mutex_unlock(&umh_list_lock); 696 + return; 697 + out: 698 + if (info->cleanup) 699 + info->cleanup(info); 689 700 } 690 701 691 702 struct ctl_table usermodehelper_table[] = {
+1 -1
lib/int_sqrt.c
··· 52 52 if (x <= ULONG_MAX) 53 53 return int_sqrt((unsigned long) x); 54 54 55 - m = 1ULL << (fls64(x) & ~1ULL); 55 + m = 1ULL << ((fls64(x) - 1) & ~1ULL); 56 56 while (m != 0) { 57 57 b = y + m; 58 58 y >>= 1;
+3 -10
lib/sbitmap.c
··· 26 26 static inline bool sbitmap_deferred_clear(struct sbitmap *sb, int index) 27 27 { 28 28 unsigned long mask, val; 29 - unsigned long __maybe_unused flags; 30 29 bool ret = false; 30 + unsigned long flags; 31 31 32 - /* Silence bogus lockdep warning */ 33 - #if defined(CONFIG_LOCKDEP) 34 - local_irq_save(flags); 35 - #endif 36 - spin_lock(&sb->map[index].swap_lock); 32 + spin_lock_irqsave(&sb->map[index].swap_lock, flags); 37 33 38 34 if (!sb->map[index].cleared) 39 35 goto out_unlock; ··· 50 54 51 55 ret = true; 52 56 out_unlock: 53 - spin_unlock(&sb->map[index].swap_lock); 54 - #if defined(CONFIG_LOCKDEP) 55 - local_irq_restore(flags); 56 - #endif 57 + spin_unlock_irqrestore(&sb->map[index].swap_lock, flags); 57 58 return ret; 58 59 } 59 60
+24 -57
mm/hugetlb.c
··· 3238 3238 struct page *ptepage; 3239 3239 unsigned long addr; 3240 3240 int cow; 3241 - struct address_space *mapping = vma->vm_file->f_mapping; 3242 3241 struct hstate *h = hstate_vma(vma); 3243 3242 unsigned long sz = huge_page_size(h); 3244 3243 struct mmu_notifier_range range; ··· 3249 3250 mmu_notifier_range_init(&range, src, vma->vm_start, 3250 3251 vma->vm_end); 3251 3252 mmu_notifier_invalidate_range_start(&range); 3252 - } else { 3253 - /* 3254 - * For shared mappings i_mmap_rwsem must be held to call 3255 - * huge_pte_alloc, otherwise the returned ptep could go 3256 - * away if part of a shared pmd and another thread calls 3257 - * huge_pmd_unshare. 3258 - */ 3259 - i_mmap_lock_read(mapping); 3260 3253 } 3261 3254 3262 3255 for (addr = vma->vm_start; addr < vma->vm_end; addr += sz) { 3263 3256 spinlock_t *src_ptl, *dst_ptl; 3264 - 3265 3257 src_pte = huge_pte_offset(src, addr, sz); 3266 3258 if (!src_pte) 3267 3259 continue; 3268 - 3269 3260 dst_pte = huge_pte_alloc(dst, addr, sz); 3270 3261 if (!dst_pte) { 3271 3262 ret = -ENOMEM; ··· 3326 3337 3327 3338 if (cow) 3328 3339 mmu_notifier_invalidate_range_end(&range); 3329 - else 3330 - i_mmap_unlock_read(mapping); 3331 3340 3332 3341 return ret; 3333 3342 } ··· 3742 3755 } 3743 3756 3744 3757 /* 3745 - * We can not race with truncation due to holding i_mmap_rwsem. 3746 - * Check once here for faults beyond end of file. 3758 + * Use page lock to guard against racing truncation 3759 + * before we get page_table_lock. 3747 3760 */ 3748 - size = i_size_read(mapping->host) >> huge_page_shift(h); 3749 - if (idx >= size) 3750 - goto out; 3751 - 3752 3761 retry: 3753 3762 page = find_lock_page(mapping, idx); 3754 3763 if (!page) { 3764 + size = i_size_read(mapping->host) >> huge_page_shift(h); 3765 + if (idx >= size) 3766 + goto out; 3767 + 3755 3768 /* 3756 3769 * Check for page in userfault range 3757 3770 */ ··· 3771 3784 }; 3772 3785 3773 3786 /* 3774 - * hugetlb_fault_mutex and i_mmap_rwsem must be 3775 - * dropped before handling userfault. Reacquire 3776 - * after handling fault to make calling code simpler. 3787 + * hugetlb_fault_mutex must be dropped before 3788 + * handling userfault. Reacquire after handling 3789 + * fault to make calling code simpler. 3777 3790 */ 3778 3791 hash = hugetlb_fault_mutex_hash(h, mm, vma, mapping, 3779 3792 idx, haddr); 3780 3793 mutex_unlock(&hugetlb_fault_mutex_table[hash]); 3781 - i_mmap_unlock_read(mapping); 3782 - 3783 3794 ret = handle_userfault(&vmf, VM_UFFD_MISSING); 3784 - 3785 - i_mmap_lock_read(mapping); 3786 3795 mutex_lock(&hugetlb_fault_mutex_table[hash]); 3787 3796 goto out; 3788 3797 } ··· 3837 3854 } 3838 3855 3839 3856 ptl = huge_pte_lock(h, mm, ptep); 3857 + size = i_size_read(mapping->host) >> huge_page_shift(h); 3858 + if (idx >= size) 3859 + goto backout; 3840 3860 3841 3861 ret = 0; 3842 3862 if (!huge_pte_none(huge_ptep_get(ptep))) ··· 3926 3940 3927 3941 ptep = huge_pte_offset(mm, haddr, huge_page_size(h)); 3928 3942 if (ptep) { 3929 - /* 3930 - * Since we hold no locks, ptep could be stale. That is 3931 - * OK as we are only making decisions based on content and 3932 - * not actually modifying content here. 3933 - */ 3934 3943 entry = huge_ptep_get(ptep); 3935 3944 if (unlikely(is_hugetlb_entry_migration(entry))) { 3936 3945 migration_entry_wait_huge(vma, mm, ptep); ··· 3933 3952 } else if (unlikely(is_hugetlb_entry_hwpoisoned(entry))) 3934 3953 return VM_FAULT_HWPOISON_LARGE | 3935 3954 VM_FAULT_SET_HINDEX(hstate_index(h)); 3955 + } else { 3956 + ptep = huge_pte_alloc(mm, haddr, huge_page_size(h)); 3957 + if (!ptep) 3958 + return VM_FAULT_OOM; 3936 3959 } 3937 3960 3938 - /* 3939 - * Acquire i_mmap_rwsem before calling huge_pte_alloc and hold 3940 - * until finished with ptep. This serves two purposes: 3941 - * 1) It prevents huge_pmd_unshare from being called elsewhere 3942 - * and making the ptep no longer valid. 3943 - * 2) It synchronizes us with file truncation. 3944 - * 3945 - * ptep could have already be assigned via huge_pte_offset. That 3946 - * is OK, as huge_pte_alloc will return the same value unless 3947 - * something changed. 3948 - */ 3949 3961 mapping = vma->vm_file->f_mapping; 3950 - i_mmap_lock_read(mapping); 3951 - ptep = huge_pte_alloc(mm, haddr, huge_page_size(h)); 3952 - if (!ptep) { 3953 - i_mmap_unlock_read(mapping); 3954 - return VM_FAULT_OOM; 3955 - } 3962 + idx = vma_hugecache_offset(h, vma, haddr); 3956 3963 3957 3964 /* 3958 3965 * Serialize hugepage allocation and instantiation, so that we don't 3959 3966 * get spurious allocation failures if two CPUs race to instantiate 3960 3967 * the same page in the page cache. 3961 3968 */ 3962 - idx = vma_hugecache_offset(h, vma, haddr); 3963 3969 hash = hugetlb_fault_mutex_hash(h, mm, vma, mapping, idx, haddr); 3964 3970 mutex_lock(&hugetlb_fault_mutex_table[hash]); 3965 3971 ··· 4034 4066 } 4035 4067 out_mutex: 4036 4068 mutex_unlock(&hugetlb_fault_mutex_table[hash]); 4037 - i_mmap_unlock_read(mapping); 4038 4069 /* 4039 4070 * Generally it's safe to hold refcount during waiting page lock. But 4040 4071 * here we just wait to defer the next page fault to avoid busy loop and ··· 4638 4671 * Search for a shareable pmd page for hugetlb. In any case calls pmd_alloc() 4639 4672 * and returns the corresponding pte. While this is not necessary for the 4640 4673 * !shared pmd case because we can allocate the pmd later as well, it makes the 4641 - * code much cleaner. 4642 - * 4643 - * This routine must be called with i_mmap_rwsem held in at least read mode. 4644 - * For hugetlbfs, this prevents removal of any page table entries associated 4645 - * with the address space. This is important as we are setting up sharing 4646 - * based on existing page table entries (mappings). 4674 + * code much cleaner. pmd allocation is essential for the shared case because 4675 + * pud has to be populated inside the same i_mmap_rwsem section - otherwise 4676 + * racing tasks could either miss the sharing (see huge_pte_offset) or select a 4677 + * bad pmd for sharing. 4647 4678 */ 4648 4679 pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud) 4649 4680 { ··· 4658 4693 if (!vma_shareable(vma, addr)) 4659 4694 return (pte_t *)pmd_alloc(mm, pud, addr); 4660 4695 4696 + i_mmap_lock_write(mapping); 4661 4697 vma_interval_tree_foreach(svma, &mapping->i_mmap, idx, idx) { 4662 4698 if (svma == vma) 4663 4699 continue; ··· 4688 4722 spin_unlock(ptl); 4689 4723 out: 4690 4724 pte = (pte_t *)pmd_alloc(mm, pud, addr); 4725 + i_mmap_unlock_write(mapping); 4691 4726 return pte; 4692 4727 } 4693 4728 ··· 4699 4732 * indicated by page_count > 1, unmap is achieved by clearing pud and 4700 4733 * decrementing the ref count. If count == 1, the pte page is not shared. 4701 4734 * 4702 - * Called with page table lock held and i_mmap_rwsem held in write mode. 4735 + * called with page table lock held. 4703 4736 * 4704 4737 * returns: 1 successfully unmapped a shared pte page 4705 4738 * 0 the underlying pte page is not shared, or it is the last user
+44 -23
mm/kasan/common.c
··· 298 298 return; 299 299 } 300 300 301 - cache->align = round_up(cache->align, KASAN_SHADOW_SCALE_SIZE); 302 - 303 301 *flags |= SLAB_KASAN; 304 302 } 305 303 ··· 347 349 } 348 350 349 351 /* 350 - * Since it's desirable to only call object contructors once during slab 351 - * allocation, we preassign tags to all such objects. Also preassign tags for 352 - * SLAB_TYPESAFE_BY_RCU slabs to avoid use-after-free reports. 353 - * For SLAB allocator we can't preassign tags randomly since the freelist is 354 - * stored as an array of indexes instead of a linked list. Assign tags based 355 - * on objects indexes, so that objects that are next to each other get 356 - * different tags. 357 - * After a tag is assigned, the object always gets allocated with the same tag. 358 - * The reason is that we can't change tags for objects with constructors on 359 - * reallocation (even for non-SLAB_TYPESAFE_BY_RCU), because the constructor 360 - * code can save the pointer to the object somewhere (e.g. in the object 361 - * itself). Then if we retag it, the old saved pointer will become invalid. 352 + * This function assigns a tag to an object considering the following: 353 + * 1. A cache might have a constructor, which might save a pointer to a slab 354 + * object somewhere (e.g. in the object itself). We preassign a tag for 355 + * each object in caches with constructors during slab creation and reuse 356 + * the same tag each time a particular object is allocated. 357 + * 2. A cache might be SLAB_TYPESAFE_BY_RCU, which means objects can be 358 + * accessed after being freed. We preassign tags for objects in these 359 + * caches as well. 360 + * 3. For SLAB allocator we can't preassign tags randomly since the freelist 361 + * is stored as an array of indexes instead of a linked list. Assign tags 362 + * based on objects indexes, so that objects that are next to each other 363 + * get different tags. 362 364 */ 363 - static u8 assign_tag(struct kmem_cache *cache, const void *object, bool new) 365 + static u8 assign_tag(struct kmem_cache *cache, const void *object, 366 + bool init, bool krealloc) 364 367 { 365 - if (!cache->ctor && !(cache->flags & SLAB_TYPESAFE_BY_RCU)) 366 - return new ? KASAN_TAG_KERNEL : random_tag(); 368 + /* Reuse the same tag for krealloc'ed objects. */ 369 + if (krealloc) 370 + return get_tag(object); 367 371 372 + /* 373 + * If the cache neither has a constructor nor has SLAB_TYPESAFE_BY_RCU 374 + * set, assign a tag when the object is being allocated (init == false). 375 + */ 376 + if (!cache->ctor && !(cache->flags & SLAB_TYPESAFE_BY_RCU)) 377 + return init ? KASAN_TAG_KERNEL : random_tag(); 378 + 379 + /* For caches that either have a constructor or SLAB_TYPESAFE_BY_RCU: */ 368 380 #ifdef CONFIG_SLAB 381 + /* For SLAB assign tags based on the object index in the freelist. */ 369 382 return (u8)obj_to_index(cache, virt_to_page(object), (void *)object); 370 383 #else 371 - return new ? random_tag() : get_tag(object); 384 + /* 385 + * For SLUB assign a random tag during slab creation, otherwise reuse 386 + * the already assigned tag. 387 + */ 388 + return init ? random_tag() : get_tag(object); 372 389 #endif 373 390 } 374 391 ··· 399 386 __memset(alloc_info, 0, sizeof(*alloc_info)); 400 387 401 388 if (IS_ENABLED(CONFIG_KASAN_SW_TAGS)) 402 - object = set_tag(object, assign_tag(cache, object, true)); 389 + object = set_tag(object, 390 + assign_tag(cache, object, true, false)); 403 391 404 392 return (void *)object; 405 393 } ··· 466 452 return __kasan_slab_free(cache, object, ip, true); 467 453 } 468 454 469 - void * __must_check kasan_kmalloc(struct kmem_cache *cache, const void *object, 470 - size_t size, gfp_t flags) 455 + static void *__kasan_kmalloc(struct kmem_cache *cache, const void *object, 456 + size_t size, gfp_t flags, bool krealloc) 471 457 { 472 458 unsigned long redzone_start; 473 459 unsigned long redzone_end; ··· 485 471 KASAN_SHADOW_SCALE_SIZE); 486 472 487 473 if (IS_ENABLED(CONFIG_KASAN_SW_TAGS)) 488 - tag = assign_tag(cache, object, false); 474 + tag = assign_tag(cache, object, false, krealloc); 489 475 490 476 /* Tag is ignored in set_tag without CONFIG_KASAN_SW_TAGS */ 491 477 kasan_unpoison_shadow(set_tag(object, tag), size); ··· 496 482 set_track(&get_alloc_info(cache, object)->alloc_track, flags); 497 483 498 484 return set_tag(object, tag); 485 + } 486 + 487 + void * __must_check kasan_kmalloc(struct kmem_cache *cache, const void *object, 488 + size_t size, gfp_t flags) 489 + { 490 + return __kasan_kmalloc(cache, object, size, flags, false); 499 491 } 500 492 EXPORT_SYMBOL(kasan_kmalloc); 501 493 ··· 542 522 if (unlikely(!PageSlab(page))) 543 523 return kasan_kmalloc_large(object, size, flags); 544 524 else 545 - return kasan_kmalloc(page->slab_cache, object, size, flags); 525 + return __kasan_kmalloc(page->slab_cache, object, size, 526 + flags, true); 546 527 } 547 528 548 529 void kasan_poison_kfree(void *ptr, unsigned long ip)
+2 -14
mm/memory-failure.c
··· 966 966 enum ttu_flags ttu = TTU_IGNORE_MLOCK | TTU_IGNORE_ACCESS; 967 967 struct address_space *mapping; 968 968 LIST_HEAD(tokill); 969 - bool unmap_success = true; 969 + bool unmap_success; 970 970 int kill = 1, forcekill; 971 971 struct page *hpage = *hpagep; 972 972 bool mlocked = PageMlocked(hpage); ··· 1028 1028 if (kill) 1029 1029 collect_procs(hpage, &tokill, flags & MF_ACTION_REQUIRED); 1030 1030 1031 - if (!PageHuge(hpage)) { 1032 - unmap_success = try_to_unmap(hpage, ttu); 1033 - } else if (mapping) { 1034 - /* 1035 - * For hugetlb pages, try_to_unmap could potentially call 1036 - * huge_pmd_unshare. Because of this, take semaphore in 1037 - * write mode here and set TTU_RMAP_LOCKED to indicate we 1038 - * have taken the lock at this higer level. 1039 - */ 1040 - i_mmap_lock_write(mapping); 1041 - unmap_success = try_to_unmap(hpage, ttu|TTU_RMAP_LOCKED); 1042 - i_mmap_unlock_write(mapping); 1043 - } 1031 + unmap_success = try_to_unmap(hpage, ttu); 1044 1032 if (!unmap_success) 1045 1033 pr_err("Memory failure: %#lx: failed to unmap page (mapcount=%d)\n", 1046 1034 pfn, page_mapcount(hpage));
+24 -2
mm/memory.c
··· 2994 2994 struct vm_area_struct *vma = vmf->vma; 2995 2995 vm_fault_t ret; 2996 2996 2997 + /* 2998 + * Preallocate pte before we take page_lock because this might lead to 2999 + * deadlocks for memcg reclaim which waits for pages under writeback: 3000 + * lock_page(A) 3001 + * SetPageWriteback(A) 3002 + * unlock_page(A) 3003 + * lock_page(B) 3004 + * lock_page(B) 3005 + * pte_alloc_pne 3006 + * shrink_page_list 3007 + * wait_on_page_writeback(A) 3008 + * SetPageWriteback(B) 3009 + * unlock_page(B) 3010 + * # flush A, B to clear the writeback 3011 + */ 3012 + if (pmd_none(*vmf->pmd) && !vmf->prealloc_pte) { 3013 + vmf->prealloc_pte = pte_alloc_one(vmf->vma->vm_mm); 3014 + if (!vmf->prealloc_pte) 3015 + return VM_FAULT_OOM; 3016 + smp_wmb(); /* See comment in __pte_alloc() */ 3017 + } 3018 + 2997 3019 ret = vma->vm_ops->fault(vmf); 2998 3020 if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY | 2999 3021 VM_FAULT_DONE_COW))) ··· 4099 4077 goto out; 4100 4078 4101 4079 if (range) { 4102 - range->start = address & PAGE_MASK; 4103 - range->end = range->start + PAGE_SIZE; 4080 + mmu_notifier_range_init(range, mm, address & PAGE_MASK, 4081 + (address & PAGE_MASK) + PAGE_SIZE); 4104 4082 mmu_notifier_invalidate_range_start(range); 4105 4083 } 4106 4084 ptep = pte_offset_map_lock(mm, pmd, address, ptlp);
+1 -12
mm/migrate.c
··· 1324 1324 goto put_anon; 1325 1325 1326 1326 if (page_mapped(hpage)) { 1327 - struct address_space *mapping = page_mapping(hpage); 1328 - 1329 - /* 1330 - * try_to_unmap could potentially call huge_pmd_unshare. 1331 - * Because of this, take semaphore in write mode here and 1332 - * set TTU_RMAP_LOCKED to let lower levels know we have 1333 - * taken the lock. 1334 - */ 1335 - i_mmap_lock_write(mapping); 1336 1327 try_to_unmap(hpage, 1337 - TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS| 1338 - TTU_RMAP_LOCKED); 1339 - i_mmap_unlock_write(mapping); 1328 + TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS); 1340 1329 page_was_mapped = 1; 1341 1330 } 1342 1331
+7 -1
mm/page_alloc.c
··· 2214 2214 */ 2215 2215 boost_watermark(zone); 2216 2216 if (alloc_flags & ALLOC_KSWAPD) 2217 - wakeup_kswapd(zone, 0, 0, zone_idx(zone)); 2217 + set_bit(ZONE_BOOSTED_WATERMARK, &zone->flags); 2218 2218 2219 2219 /* We are not allowed to try stealing from the whole block */ 2220 2220 if (!whole_block) ··· 3102 3102 local_irq_restore(flags); 3103 3103 3104 3104 out: 3105 + /* Separate test+clear to avoid unnecessary atomics */ 3106 + if (test_bit(ZONE_BOOSTED_WATERMARK, &zone->flags)) { 3107 + clear_bit(ZONE_BOOSTED_WATERMARK, &zone->flags); 3108 + wakeup_kswapd(zone, 0, 0, zone_idx(zone)); 3109 + } 3110 + 3105 3111 VM_BUG_ON_PAGE(page && bad_range(zone, page), page); 3106 3112 return page; 3107 3113
+2 -6
mm/rmap.c
··· 25 25 * page->flags PG_locked (lock_page) 26 26 * hugetlbfs_i_mmap_rwsem_key (in huge_pmd_share) 27 27 * mapping->i_mmap_rwsem 28 - * hugetlb_fault_mutex (hugetlbfs specific page fault mutex) 29 28 * anon_vma->rwsem 30 29 * mm->page_table_lock or pte_lock 31 30 * zone_lru_lock (in mark_page_accessed, isolate_lru_page) ··· 1371 1372 * Note that the page can not be free in this function as call of 1372 1373 * try_to_unmap() must hold a reference on the page. 1373 1374 */ 1374 - mmu_notifier_range_init(&range, vma->vm_mm, vma->vm_start, 1375 - min(vma->vm_end, vma->vm_start + 1375 + mmu_notifier_range_init(&range, vma->vm_mm, address, 1376 + min(vma->vm_end, address + 1376 1377 (PAGE_SIZE << compound_order(page)))); 1377 1378 if (PageHuge(page)) { 1378 1379 /* 1379 1380 * If sharing is possible, start and end will be adjusted 1380 1381 * accordingly. 1381 - * 1382 - * If called for a huge page, caller must hold i_mmap_rwsem 1383 - * in write mode as it is possible to call huge_pmd_unshare. 1384 1382 */ 1385 1383 adjust_range_if_pmd_sharing_possible(vma, &range.start, 1386 1384 &range.end);
+4 -2
mm/slab.c
··· 666 666 struct alien_cache *alc = NULL; 667 667 668 668 alc = kmalloc_node(memsize, gfp, node); 669 - init_arraycache(&alc->ac, entries, batch); 670 - spin_lock_init(&alc->lock); 669 + if (alc) { 670 + init_arraycache(&alc->ac, entries, batch); 671 + spin_lock_init(&alc->lock); 672 + } 671 673 return alc; 672 674 } 673 675
+2
mm/slub.c
··· 3846 3846 unsigned int offset; 3847 3847 size_t object_size; 3848 3848 3849 + ptr = kasan_reset_tag(ptr); 3850 + 3849 3851 /* Find object and usable object size. */ 3850 3852 s = page->slab_cache; 3851 3853
+5 -4
mm/usercopy.c
··· 247 247 /* 248 248 * Validates that the given object is: 249 249 * - not bogus address 250 - * - known-safe heap or stack object 250 + * - fully contained by stack (or stack frame, when available) 251 + * - fully within SLAB object (or object whitelist area, when available) 251 252 * - not in kernel text 252 253 */ 253 254 void __check_object_size(const void *ptr, unsigned long n, bool to_user) ··· 262 261 263 262 /* Check for invalid addresses. */ 264 263 check_bogus_address((const unsigned long)ptr, n, to_user); 265 - 266 - /* Check for bad heap object. */ 267 - check_heap_object(ptr, n, to_user); 268 264 269 265 /* Check for bad stack object. */ 270 266 switch (check_stack_object(ptr, n)) { ··· 279 281 default: 280 282 usercopy_abort("process stack", NULL, to_user, 0, n); 281 283 } 284 + 285 + /* Check for bad heap object. */ 286 + check_heap_object(ptr, n, to_user); 282 287 283 288 /* Check for object in kernel to avoid text exposure. */ 284 289 check_kernel_text_object((const unsigned long)ptr, n, to_user);
+2 -9
mm/userfaultfd.c
··· 267 267 VM_BUG_ON(dst_addr & ~huge_page_mask(h)); 268 268 269 269 /* 270 - * Serialize via i_mmap_rwsem and hugetlb_fault_mutex. 271 - * i_mmap_rwsem ensures the dst_pte remains valid even 272 - * in the case of shared pmds. fault mutex prevents 273 - * races with other faulting threads. 270 + * Serialize via hugetlb_fault_mutex 274 271 */ 275 - mapping = dst_vma->vm_file->f_mapping; 276 - i_mmap_lock_read(mapping); 277 272 idx = linear_page_index(dst_vma, dst_addr); 273 + mapping = dst_vma->vm_file->f_mapping; 278 274 hash = hugetlb_fault_mutex_hash(h, dst_mm, dst_vma, mapping, 279 275 idx, dst_addr); 280 276 mutex_lock(&hugetlb_fault_mutex_table[hash]); ··· 279 283 dst_pte = huge_pte_alloc(dst_mm, dst_addr, huge_page_size(h)); 280 284 if (!dst_pte) { 281 285 mutex_unlock(&hugetlb_fault_mutex_table[hash]); 282 - i_mmap_unlock_read(mapping); 283 286 goto out_unlock; 284 287 } 285 288 ··· 286 291 dst_pteval = huge_ptep_get(dst_pte); 287 292 if (!huge_pte_none(dst_pteval)) { 288 293 mutex_unlock(&hugetlb_fault_mutex_table[hash]); 289 - i_mmap_unlock_read(mapping); 290 294 goto out_unlock; 291 295 } 292 296 ··· 293 299 dst_addr, src_addr, &page); 294 300 295 301 mutex_unlock(&hugetlb_fault_mutex_table[hash]); 296 - i_mmap_unlock_read(mapping); 297 302 vm_alloc_shared = vm_shared; 298 303 299 304 cond_resched();
+1 -1
mm/util.c
··· 478 478 return true; 479 479 if (PageHuge(page)) 480 480 return false; 481 - for (i = 0; i < hpage_nr_pages(page); i++) { 481 + for (i = 0; i < (1 << compound_order(page)); i++) { 482 482 if (atomic_read(&page[i]._mapcount) >= 0) 483 483 return true; 484 484 }
+52 -44
net/bpfilter/bpfilter_kern.c
··· 13 13 extern char bpfilter_umh_start; 14 14 extern char bpfilter_umh_end; 15 15 16 - static struct umh_info info; 17 - /* since ip_getsockopt() can run in parallel, serialize access to umh */ 18 - static DEFINE_MUTEX(bpfilter_lock); 19 - 20 - static void shutdown_umh(struct umh_info *info) 16 + static void shutdown_umh(void) 21 17 { 22 18 struct task_struct *tsk; 23 19 24 - if (!info->pid) 20 + if (bpfilter_ops.stop) 25 21 return; 26 - tsk = get_pid_task(find_vpid(info->pid), PIDTYPE_PID); 22 + 23 + tsk = get_pid_task(find_vpid(bpfilter_ops.info.pid), PIDTYPE_PID); 27 24 if (tsk) { 28 25 force_sig(SIGKILL, tsk); 29 26 put_task_struct(tsk); 30 27 } 31 - fput(info->pipe_to_umh); 32 - fput(info->pipe_from_umh); 33 - info->pid = 0; 34 28 } 35 29 36 30 static void __stop_umh(void) 37 31 { 38 - if (IS_ENABLED(CONFIG_INET)) { 39 - bpfilter_process_sockopt = NULL; 40 - shutdown_umh(&info); 41 - } 42 - } 43 - 44 - static void stop_umh(void) 45 - { 46 - mutex_lock(&bpfilter_lock); 47 - __stop_umh(); 48 - mutex_unlock(&bpfilter_lock); 32 + if (IS_ENABLED(CONFIG_INET)) 33 + shutdown_umh(); 49 34 } 50 35 51 36 static int __bpfilter_process_sockopt(struct sock *sk, int optname, ··· 48 63 req.cmd = optname; 49 64 req.addr = (long __force __user)optval; 50 65 req.len = optlen; 51 - mutex_lock(&bpfilter_lock); 52 - if (!info.pid) 66 + if (!bpfilter_ops.info.pid) 53 67 goto out; 54 - n = __kernel_write(info.pipe_to_umh, &req, sizeof(req), &pos); 68 + n = __kernel_write(bpfilter_ops.info.pipe_to_umh, &req, sizeof(req), 69 + &pos); 55 70 if (n != sizeof(req)) { 56 71 pr_err("write fail %zd\n", n); 57 72 __stop_umh(); ··· 59 74 goto out; 60 75 } 61 76 pos = 0; 62 - n = kernel_read(info.pipe_from_umh, &reply, sizeof(reply), &pos); 77 + n = kernel_read(bpfilter_ops.info.pipe_from_umh, &reply, sizeof(reply), 78 + &pos); 63 79 if (n != sizeof(reply)) { 64 80 pr_err("read fail %zd\n", n); 65 81 __stop_umh(); ··· 69 83 } 70 84 ret = reply.status; 71 85 out: 72 - mutex_unlock(&bpfilter_lock); 73 86 return ret; 87 + } 88 + 89 + static int start_umh(void) 90 + { 91 + int err; 92 + 93 + /* fork usermode process */ 94 + err = fork_usermode_blob(&bpfilter_umh_start, 95 + &bpfilter_umh_end - &bpfilter_umh_start, 96 + &bpfilter_ops.info); 97 + if (err) 98 + return err; 99 + bpfilter_ops.stop = false; 100 + pr_info("Loaded bpfilter_umh pid %d\n", bpfilter_ops.info.pid); 101 + 102 + /* health check that usermode process started correctly */ 103 + if (__bpfilter_process_sockopt(NULL, 0, NULL, 0, 0) != 0) { 104 + shutdown_umh(); 105 + return -EFAULT; 106 + } 107 + 108 + return 0; 74 109 } 75 110 76 111 static int __init load_umh(void) 77 112 { 78 113 int err; 79 114 80 - /* fork usermode process */ 81 - info.cmdline = "bpfilter_umh"; 82 - err = fork_usermode_blob(&bpfilter_umh_start, 83 - &bpfilter_umh_end - &bpfilter_umh_start, 84 - &info); 85 - if (err) 86 - return err; 87 - pr_info("Loaded bpfilter_umh pid %d\n", info.pid); 88 - 89 - /* health check that usermode process started correctly */ 90 - if (__bpfilter_process_sockopt(NULL, 0, NULL, 0, 0) != 0) { 91 - stop_umh(); 92 - return -EFAULT; 115 + mutex_lock(&bpfilter_ops.lock); 116 + if (!bpfilter_ops.stop) { 117 + err = -EFAULT; 118 + goto out; 93 119 } 94 - if (IS_ENABLED(CONFIG_INET)) 95 - bpfilter_process_sockopt = &__bpfilter_process_sockopt; 96 - 97 - return 0; 120 + err = start_umh(); 121 + if (!err && IS_ENABLED(CONFIG_INET)) { 122 + bpfilter_ops.sockopt = &__bpfilter_process_sockopt; 123 + bpfilter_ops.start = &start_umh; 124 + } 125 + out: 126 + mutex_unlock(&bpfilter_ops.lock); 127 + return err; 98 128 } 99 129 100 130 static void __exit fini_umh(void) 101 131 { 102 - stop_umh(); 132 + mutex_lock(&bpfilter_ops.lock); 133 + if (IS_ENABLED(CONFIG_INET)) { 134 + shutdown_umh(); 135 + bpfilter_ops.start = NULL; 136 + bpfilter_ops.sockopt = NULL; 137 + } 138 + mutex_unlock(&bpfilter_ops.lock); 103 139 } 104 140 module_init(load_umh); 105 141 module_exit(fini_umh);
+1 -1
net/bpfilter/bpfilter_umh_blob.S
··· 1 1 /* SPDX-License-Identifier: GPL-2.0 */ 2 - .section .init.rodata, "a" 2 + .section .rodata, "a" 3 3 .global bpfilter_umh_start 4 4 bpfilter_umh_start: 5 5 .incbin "net/bpfilter/bpfilter_umh"
+5
net/bridge/br_fdb.c
··· 1128 1128 err = -ENOMEM; 1129 1129 goto err_unlock; 1130 1130 } 1131 + if (swdev_notify) 1132 + fdb->added_by_user = 1; 1131 1133 fdb->added_by_external_learn = 1; 1132 1134 fdb_notify(br, fdb, RTM_NEWNEIGH, swdev_notify); 1133 1135 } else { ··· 1148 1146 fdb->added_by_external_learn = 1; 1149 1147 modified = true; 1150 1148 } 1149 + 1150 + if (swdev_notify) 1151 + fdb->added_by_user = 1; 1151 1152 1152 1153 if (modified) 1153 1154 fdb_notify(br, fdb, RTM_NEWNEIGH, swdev_notify);
+5 -5
net/bridge/br_forward.c
··· 36 36 37 37 int br_dev_queue_push_xmit(struct net *net, struct sock *sk, struct sk_buff *skb) 38 38 { 39 + skb_push(skb, ETH_HLEN); 39 40 if (!is_skb_forwardable(skb->dev, skb)) 40 41 goto drop; 41 42 42 - skb_push(skb, ETH_HLEN); 43 43 br_drop_fake_rtable(skb); 44 44 45 45 if (skb->ip_summed == CHECKSUM_PARTIAL && ··· 65 65 66 66 int br_forward_finish(struct net *net, struct sock *sk, struct sk_buff *skb) 67 67 { 68 + skb->tstamp = 0; 68 69 return NF_HOOK(NFPROTO_BRIDGE, NF_BR_POST_ROUTING, 69 70 net, sk, skb, NULL, skb->dev, 70 71 br_dev_queue_push_xmit); ··· 98 97 net = dev_net(indev); 99 98 } else { 100 99 if (unlikely(netpoll_tx_running(to->br->dev))) { 101 - if (!is_skb_forwardable(skb->dev, skb)) { 100 + skb_push(skb, ETH_HLEN); 101 + if (!is_skb_forwardable(skb->dev, skb)) 102 102 kfree_skb(skb); 103 - } else { 104 - skb_push(skb, ETH_HLEN); 103 + else 105 104 br_netpoll_send_skb(to, skb); 106 - } 107 105 return; 108 106 } 109 107 br_hook = NF_BR_LOCAL_OUT;
+1 -1
net/bridge/br_netfilter_hooks.c
··· 265 265 struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb); 266 266 int ret; 267 267 268 - if (neigh->hh.hh_len) { 268 + if ((neigh->nud_state & NUD_CONNECTED) && neigh->hh.hh_len) { 269 269 neigh_hh_bridge(&neigh->hh, skb); 270 270 skb->dev = nf_bridge->physindev; 271 271 ret = br_handle_frame_finish(net, sk, skb);
+1
net/bridge/br_netfilter_ipv6.c
··· 131 131 IPSTATS_MIB_INDISCARDS); 132 132 goto drop; 133 133 } 134 + hdr = ipv6_hdr(skb); 134 135 } 135 136 if (hdr->nexthdr == NEXTHDR_HOP && br_nf_check_hbh_len(skb)) 136 137 goto drop;
+1
net/bridge/br_private.h
··· 107 107 /* private vlan flags */ 108 108 enum { 109 109 BR_VLFLAG_PER_PORT_STATS = BIT(0), 110 + BR_VLFLAG_ADDED_BY_SWITCHDEV = BIT(1), 110 111 }; 111 112 112 113 /**
+13 -13
net/bridge/br_vlan.c
··· 80 80 } 81 81 82 82 static int __vlan_vid_add(struct net_device *dev, struct net_bridge *br, 83 - u16 vid, u16 flags, struct netlink_ext_ack *extack) 83 + struct net_bridge_vlan *v, u16 flags, 84 + struct netlink_ext_ack *extack) 84 85 { 85 86 int err; 86 87 87 88 /* Try switchdev op first. In case it is not supported, fallback to 88 89 * 8021q add. 89 90 */ 90 - err = br_switchdev_port_vlan_add(dev, vid, flags, extack); 91 + err = br_switchdev_port_vlan_add(dev, v->vid, flags, extack); 91 92 if (err == -EOPNOTSUPP) 92 - return vlan_vid_add(dev, br->vlan_proto, vid); 93 + return vlan_vid_add(dev, br->vlan_proto, v->vid); 94 + v->priv_flags |= BR_VLFLAG_ADDED_BY_SWITCHDEV; 93 95 return err; 94 96 } 95 97 ··· 123 121 } 124 122 125 123 static int __vlan_vid_del(struct net_device *dev, struct net_bridge *br, 126 - u16 vid) 124 + const struct net_bridge_vlan *v) 127 125 { 128 126 int err; 129 127 130 128 /* Try switchdev op first. In case it is not supported, fallback to 131 129 * 8021q del. 132 130 */ 133 - err = br_switchdev_port_vlan_del(dev, vid); 134 - if (err == -EOPNOTSUPP) { 135 - vlan_vid_del(dev, br->vlan_proto, vid); 136 - return 0; 137 - } 138 - return err; 131 + err = br_switchdev_port_vlan_del(dev, v->vid); 132 + if (!(v->priv_flags & BR_VLFLAG_ADDED_BY_SWITCHDEV)) 133 + vlan_vid_del(dev, br->vlan_proto, v->vid); 134 + return err == -EOPNOTSUPP ? 0 : err; 139 135 } 140 136 141 137 /* Returns a master vlan, if it didn't exist it gets created. In all cases a ··· 242 242 * This ensures tagged traffic enters the bridge when 243 243 * promiscuous mode is disabled by br_manage_promisc(). 244 244 */ 245 - err = __vlan_vid_add(dev, br, v->vid, flags, extack); 245 + err = __vlan_vid_add(dev, br, v, flags, extack); 246 246 if (err) 247 247 goto out; 248 248 ··· 305 305 306 306 out_filt: 307 307 if (p) { 308 - __vlan_vid_del(dev, br, v->vid); 308 + __vlan_vid_del(dev, br, v); 309 309 if (masterv) { 310 310 if (v->stats && masterv->stats != v->stats) 311 311 free_percpu(v->stats); ··· 338 338 339 339 __vlan_delete_pvid(vg, v->vid); 340 340 if (p) { 341 - err = __vlan_vid_del(p->dev, p->br, v->vid); 341 + err = __vlan_vid_del(p->dev, p->br, v); 342 342 if (err) 343 343 goto out; 344 344 } else {
+4 -2
net/bridge/netfilter/ebtables.c
··· 1137 1137 tmp.name[sizeof(tmp.name) - 1] = 0; 1138 1138 1139 1139 countersize = COUNTER_OFFSET(tmp.nentries) * nr_cpu_ids; 1140 - newinfo = vmalloc(sizeof(*newinfo) + countersize); 1140 + newinfo = __vmalloc(sizeof(*newinfo) + countersize, GFP_KERNEL_ACCOUNT, 1141 + PAGE_KERNEL); 1141 1142 if (!newinfo) 1142 1143 return -ENOMEM; 1143 1144 1144 1145 if (countersize) 1145 1146 memset(newinfo->counters, 0, countersize); 1146 1147 1147 - newinfo->entries = vmalloc(tmp.entries_size); 1148 + newinfo->entries = __vmalloc(tmp.entries_size, GFP_KERNEL_ACCOUNT, 1149 + PAGE_KERNEL); 1148 1150 if (!newinfo->entries) { 1149 1151 ret = -ENOMEM; 1150 1152 goto free_newinfo;
+1
net/bridge/netfilter/nft_reject_bridge.c
··· 229 229 pskb_trim_rcsum(skb, ntohs(ip6h->payload_len) + sizeof(*ip6h))) 230 230 return false; 231 231 232 + ip6h = ipv6_hdr(skb); 232 233 thoff = ipv6_skip_exthdr(skb, ((u8*)(ip6h+1) - skb->data), &proto, &fo); 233 234 if (thoff < 0 || thoff >= skb->len || (fo & htons(~0x7)) != 0) 234 235 return false;
+28 -4
net/can/gw.c
··· 416 416 while (modidx < MAX_MODFUNCTIONS && gwj->mod.modfunc[modidx]) 417 417 (*gwj->mod.modfunc[modidx++])(cf, &gwj->mod); 418 418 419 - /* check for checksum updates when the CAN frame has been modified */ 419 + /* Has the CAN frame been modified? */ 420 420 if (modidx) { 421 - if (gwj->mod.csumfunc.crc8) 422 - (*gwj->mod.csumfunc.crc8)(cf, &gwj->mod.csum.crc8); 421 + /* get available space for the processed CAN frame type */ 422 + int max_len = nskb->len - offsetof(struct can_frame, data); 423 423 424 - if (gwj->mod.csumfunc.xor) 424 + /* dlc may have changed, make sure it fits to the CAN frame */ 425 + if (cf->can_dlc > max_len) 426 + goto out_delete; 427 + 428 + /* check for checksum updates in classic CAN length only */ 429 + if (gwj->mod.csumfunc.crc8) { 430 + if (cf->can_dlc > 8) 431 + goto out_delete; 432 + 433 + (*gwj->mod.csumfunc.crc8)(cf, &gwj->mod.csum.crc8); 434 + } 435 + 436 + if (gwj->mod.csumfunc.xor) { 437 + if (cf->can_dlc > 8) 438 + goto out_delete; 439 + 425 440 (*gwj->mod.csumfunc.xor)(cf, &gwj->mod.csum.xor); 441 + } 426 442 } 427 443 428 444 /* clear the skb timestamp if not configured the other way */ ··· 450 434 gwj->dropped_frames++; 451 435 else 452 436 gwj->handled_frames++; 437 + 438 + return; 439 + 440 + out_delete: 441 + /* delete frame due to misconfiguration */ 442 + gwj->deleted_frames++; 443 + kfree_skb(nskb); 444 + return; 453 445 } 454 446 455 447 static inline int cgw_register_filter(struct net *net, struct cgw_job *gwj)
+10 -1
net/ceph/ceph_common.c
··· 255 255 Opt_nocephx_sign_messages, 256 256 Opt_tcp_nodelay, 257 257 Opt_notcp_nodelay, 258 + Opt_abort_on_full, 258 259 }; 259 260 260 261 static match_table_t opt_tokens = { ··· 281 280 {Opt_nocephx_sign_messages, "nocephx_sign_messages"}, 282 281 {Opt_tcp_nodelay, "tcp_nodelay"}, 283 282 {Opt_notcp_nodelay, "notcp_nodelay"}, 283 + {Opt_abort_on_full, "abort_on_full"}, 284 284 {-1, NULL} 285 285 }; 286 286 ··· 537 535 opt->flags &= ~CEPH_OPT_TCP_NODELAY; 538 536 break; 539 537 538 + case Opt_abort_on_full: 539 + opt->flags |= CEPH_OPT_ABORT_ON_FULL; 540 + break; 541 + 540 542 default: 541 543 BUG_ON(token); 542 544 } ··· 555 549 } 556 550 EXPORT_SYMBOL(ceph_parse_options); 557 551 558 - int ceph_print_client_options(struct seq_file *m, struct ceph_client *client) 552 + int ceph_print_client_options(struct seq_file *m, struct ceph_client *client, 553 + bool show_all) 559 554 { 560 555 struct ceph_options *opt = client->options; 561 556 size_t pos = m->count; ··· 581 574 seq_puts(m, "nocephx_sign_messages,"); 582 575 if ((opt->flags & CEPH_OPT_TCP_NODELAY) == 0) 583 576 seq_puts(m, "notcp_nodelay,"); 577 + if (show_all && (opt->flags & CEPH_OPT_ABORT_ON_FULL)) 578 + seq_puts(m, "abort_on_full,"); 584 579 585 580 if (opt->mount_timeout != CEPH_MOUNT_TIMEOUT_DEFAULT) 586 581 seq_printf(m, "mount_timeout=%d,",
+1 -1
net/ceph/debugfs.c
··· 375 375 struct ceph_client *client = s->private; 376 376 int ret; 377 377 378 - ret = ceph_print_client_options(s, client); 378 + ret = ceph_print_client_options(s, client, true); 379 379 if (ret) 380 380 return ret; 381 381
+2 -2
net/ceph/osd_client.c
··· 2315 2315 (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) || 2316 2316 pool_full(osdc, req->r_t.base_oloc.pool))) { 2317 2317 dout("req %p full/pool_full\n", req); 2318 - if (osdc->abort_on_full) { 2318 + if (ceph_test_opt(osdc->client, ABORT_ON_FULL)) { 2319 2319 err = -ENOSPC; 2320 2320 } else { 2321 2321 pr_warn_ratelimited("FULL or reached pool quota\n"); ··· 2545 2545 { 2546 2546 bool victims = false; 2547 2547 2548 - if (osdc->abort_on_full && 2548 + if (ceph_test_opt(osdc->client, ABORT_ON_FULL) && 2549 2549 (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) || have_pool_full(osdc))) 2550 2550 for_each_request(osdc, abort_on_full_fn, &victims); 2551 2551 }
+21 -13
net/core/filter.c
··· 2020 2020 static int __bpf_redirect_no_mac(struct sk_buff *skb, struct net_device *dev, 2021 2021 u32 flags) 2022 2022 { 2023 - /* skb->mac_len is not set on normal egress */ 2024 - unsigned int mlen = skb->network_header - skb->mac_header; 2023 + unsigned int mlen = skb_network_offset(skb); 2025 2024 2026 - __skb_pull(skb, mlen); 2025 + if (mlen) { 2026 + __skb_pull(skb, mlen); 2027 2027 2028 - /* At ingress, the mac header has already been pulled once. 2029 - * At egress, skb_pospull_rcsum has to be done in case that 2030 - * the skb is originated from ingress (i.e. a forwarded skb) 2031 - * to ensure that rcsum starts at net header. 2032 - */ 2033 - if (!skb_at_tc_ingress(skb)) 2034 - skb_postpull_rcsum(skb, skb_mac_header(skb), mlen); 2028 + /* At ingress, the mac header has already been pulled once. 2029 + * At egress, skb_pospull_rcsum has to be done in case that 2030 + * the skb is originated from ingress (i.e. a forwarded skb) 2031 + * to ensure that rcsum starts at net header. 2032 + */ 2033 + if (!skb_at_tc_ingress(skb)) 2034 + skb_postpull_rcsum(skb, skb_mac_header(skb), mlen); 2035 + } 2035 2036 skb_pop_mac_header(skb); 2036 2037 skb_reset_mac_len(skb); 2037 2038 return flags & BPF_F_INGRESS ? ··· 4120 4119 sk->sk_sndbuf = max_t(int, val * 2, SOCK_MIN_SNDBUF); 4121 4120 break; 4122 4121 case SO_MAX_PACING_RATE: /* 32bit version */ 4122 + if (val != ~0U) 4123 + cmpxchg(&sk->sk_pacing_status, 4124 + SK_PACING_NONE, 4125 + SK_PACING_NEEDED); 4123 4126 sk->sk_max_pacing_rate = (val == ~0U) ? ~0UL : val; 4124 4127 sk->sk_pacing_rate = min(sk->sk_pacing_rate, 4125 4128 sk->sk_max_pacing_rate); ··· 4137 4132 sk->sk_rcvlowat = val ? : 1; 4138 4133 break; 4139 4134 case SO_MARK: 4140 - sk->sk_mark = val; 4135 + if (sk->sk_mark != val) { 4136 + sk->sk_mark = val; 4137 + sk_dst_reset(sk); 4138 + } 4141 4139 break; 4142 4140 default: 4143 4141 ret = -EINVAL; ··· 4211 4203 /* Only some options are supported */ 4212 4204 switch (optname) { 4213 4205 case TCP_BPF_IW: 4214 - if (val <= 0 || tp->data_segs_out > 0) 4206 + if (val <= 0 || tp->data_segs_out > tp->syn_data) 4215 4207 ret = -EINVAL; 4216 4208 else 4217 4209 tp->snd_cwnd = val; ··· 5317 5309 case BPF_FUNC_trace_printk: 5318 5310 if (capable(CAP_SYS_ADMIN)) 5319 5311 return bpf_get_trace_printk_proto(); 5320 - /* else: fall through */ 5312 + /* else, fall through */ 5321 5313 default: 5322 5314 return NULL; 5323 5315 }
+1
net/core/lwt_bpf.c
··· 63 63 lwt->name ? : "<unknown>"); 64 64 ret = BPF_OK; 65 65 } else { 66 + skb_reset_mac_header(skb); 66 67 ret = skb_do_redirect(skb); 67 68 if (ret == 0) 68 69 ret = BPF_REDIRECT;
+10 -5
net/core/neighbour.c
··· 18 18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 19 19 20 20 #include <linux/slab.h> 21 + #include <linux/kmemleak.h> 21 22 #include <linux/types.h> 22 23 #include <linux/kernel.h> 23 24 #include <linux/module.h> ··· 444 443 ret = kmalloc(sizeof(*ret), GFP_ATOMIC); 445 444 if (!ret) 446 445 return NULL; 447 - if (size <= PAGE_SIZE) 446 + if (size <= PAGE_SIZE) { 448 447 buckets = kzalloc(size, GFP_ATOMIC); 449 - else 448 + } else { 450 449 buckets = (struct neighbour __rcu **) 451 450 __get_free_pages(GFP_ATOMIC | __GFP_ZERO, 452 451 get_order(size)); 452 + kmemleak_alloc(buckets, size, 1, GFP_ATOMIC); 453 + } 453 454 if (!buckets) { 454 455 kfree(ret); 455 456 return NULL; ··· 471 468 size_t size = (1 << nht->hash_shift) * sizeof(struct neighbour *); 472 469 struct neighbour __rcu **buckets = nht->hash_buckets; 473 470 474 - if (size <= PAGE_SIZE) 471 + if (size <= PAGE_SIZE) { 475 472 kfree(buckets); 476 - else 473 + } else { 474 + kmemleak_free(buckets); 477 475 free_pages((unsigned long)buckets, get_order(size)); 476 + } 478 477 kfree(nht); 479 478 } 480 479 ··· 1007 1002 if (neigh->ops->solicit) 1008 1003 neigh->ops->solicit(neigh, skb); 1009 1004 atomic_inc(&neigh->probes); 1010 - kfree_skb(skb); 1005 + consume_skb(skb); 1011 1006 } 1012 1007 1013 1008 /* Called when a timer expires for a neighbour entry. */
+1 -6
net/core/skbuff.c
··· 5270 5270 unsigned long chunk; 5271 5271 struct sk_buff *skb; 5272 5272 struct page *page; 5273 - gfp_t gfp_head; 5274 5273 int i; 5275 5274 5276 5275 *errcode = -EMSGSIZE; ··· 5279 5280 if (npages > MAX_SKB_FRAGS) 5280 5281 return NULL; 5281 5282 5282 - gfp_head = gfp_mask; 5283 - if (gfp_head & __GFP_DIRECT_RECLAIM) 5284 - gfp_head |= __GFP_RETRY_MAYFAIL; 5285 - 5286 5283 *errcode = -ENOBUFS; 5287 - skb = alloc_skb(header_len, gfp_head); 5284 + skb = alloc_skb(header_len, gfp_mask); 5288 5285 if (!skb) 5289 5286 return NULL; 5290 5287
+48 -10
net/ipv4/bpfilter/sockopt.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0 2 + #include <linux/init.h> 3 + #include <linux/module.h> 2 4 #include <linux/uaccess.h> 3 5 #include <linux/bpfilter.h> 4 6 #include <uapi/linux/bpf.h> 5 7 #include <linux/wait.h> 6 8 #include <linux/kmod.h> 9 + #include <linux/fs.h> 10 + #include <linux/file.h> 7 11 8 - int (*bpfilter_process_sockopt)(struct sock *sk, int optname, 9 - char __user *optval, 10 - unsigned int optlen, bool is_set); 11 - EXPORT_SYMBOL_GPL(bpfilter_process_sockopt); 12 + struct bpfilter_umh_ops bpfilter_ops; 13 + EXPORT_SYMBOL_GPL(bpfilter_ops); 14 + 15 + static void bpfilter_umh_cleanup(struct umh_info *info) 16 + { 17 + mutex_lock(&bpfilter_ops.lock); 18 + bpfilter_ops.stop = true; 19 + fput(info->pipe_to_umh); 20 + fput(info->pipe_from_umh); 21 + info->pid = 0; 22 + mutex_unlock(&bpfilter_ops.lock); 23 + } 12 24 13 25 static int bpfilter_mbox_request(struct sock *sk, int optname, 14 26 char __user *optval, 15 27 unsigned int optlen, bool is_set) 16 28 { 17 - if (!bpfilter_process_sockopt) { 18 - int err = request_module("bpfilter"); 29 + int err; 30 + mutex_lock(&bpfilter_ops.lock); 31 + if (!bpfilter_ops.sockopt) { 32 + mutex_unlock(&bpfilter_ops.lock); 33 + err = request_module("bpfilter"); 34 + mutex_lock(&bpfilter_ops.lock); 19 35 20 36 if (err) 21 - return err; 22 - if (!bpfilter_process_sockopt) 23 - return -ECHILD; 37 + goto out; 38 + if (!bpfilter_ops.sockopt) { 39 + err = -ECHILD; 40 + goto out; 41 + } 24 42 } 25 - return bpfilter_process_sockopt(sk, optname, optval, optlen, is_set); 43 + if (bpfilter_ops.stop) { 44 + err = bpfilter_ops.start(); 45 + if (err) 46 + goto out; 47 + } 48 + err = bpfilter_ops.sockopt(sk, optname, optval, optlen, is_set); 49 + out: 50 + mutex_unlock(&bpfilter_ops.lock); 51 + return err; 26 52 } 27 53 28 54 int bpfilter_ip_set_sockopt(struct sock *sk, int optname, char __user *optval, ··· 67 41 68 42 return bpfilter_mbox_request(sk, optname, optval, len, false); 69 43 } 44 + 45 + static int __init bpfilter_sockopt_init(void) 46 + { 47 + mutex_init(&bpfilter_ops.lock); 48 + bpfilter_ops.stop = true; 49 + bpfilter_ops.info.cmdline = "bpfilter_umh"; 50 + bpfilter_ops.info.cleanup = &bpfilter_umh_cleanup; 51 + 52 + return 0; 53 + } 54 + 55 + module_init(bpfilter_sockopt_init);
+1 -1
net/ipv4/devinet.c
··· 1826 1826 if (fillargs.netnsid >= 0) 1827 1827 put_net(tgt_net); 1828 1828 1829 - return err < 0 ? err : skb->len; 1829 + return skb->len ? : err; 1830 1830 } 1831 1831 1832 1832 static void rtmsg_ifa(int event, struct in_ifaddr *ifa, struct nlmsghdr *nlh,
+2 -2
net/ipv4/fib_frontend.c
··· 203 203 struct fib_table *tb; 204 204 205 205 hlist_for_each_entry_safe(tb, tmp, head, tb_hlist) 206 - flushed += fib_table_flush(net, tb); 206 + flushed += fib_table_flush(net, tb, false); 207 207 } 208 208 209 209 if (flushed) ··· 1463 1463 1464 1464 hlist_for_each_entry_safe(tb, tmp, head, tb_hlist) { 1465 1465 hlist_del(&tb->tb_hlist); 1466 - fib_table_flush(net, tb); 1466 + fib_table_flush(net, tb, true); 1467 1467 fib_free_table(tb); 1468 1468 } 1469 1469 }
+12 -3
net/ipv4/fib_trie.c
··· 1856 1856 } 1857 1857 1858 1858 /* Caller must hold RTNL. */ 1859 - int fib_table_flush(struct net *net, struct fib_table *tb) 1859 + int fib_table_flush(struct net *net, struct fib_table *tb, bool flush_all) 1860 1860 { 1861 1861 struct trie *t = (struct trie *)tb->tb_data; 1862 1862 struct key_vector *pn = t->kv; ··· 1904 1904 hlist_for_each_entry_safe(fa, tmp, &n->leaf, fa_list) { 1905 1905 struct fib_info *fi = fa->fa_info; 1906 1906 1907 - if (!fi || !(fi->fib_flags & RTNH_F_DEAD) || 1908 - tb->tb_id != fa->tb_id) { 1907 + if (!fi || tb->tb_id != fa->tb_id || 1908 + (!(fi->fib_flags & RTNH_F_DEAD) && 1909 + !fib_props[fa->fa_type].error)) { 1910 + slen = fa->fa_slen; 1911 + continue; 1912 + } 1913 + 1914 + /* Do not flush error routes if network namespace is 1915 + * not being dismantled 1916 + */ 1917 + if (!flush_all && fib_props[fa->fa_type].error) { 1909 1918 slen = fa->fa_slen; 1910 1919 continue; 1911 1920 }
+9 -3
net/ipv4/fou.c
··· 1020 1020 { 1021 1021 int transport_offset = skb_transport_offset(skb); 1022 1022 struct guehdr *guehdr; 1023 - size_t optlen; 1023 + size_t len, optlen; 1024 1024 int ret; 1025 1025 1026 - if (skb->len < sizeof(struct udphdr) + sizeof(struct guehdr)) 1026 + len = sizeof(struct udphdr) + sizeof(struct guehdr); 1027 + if (!pskb_may_pull(skb, len)) 1027 1028 return -EINVAL; 1028 1029 1029 1030 guehdr = (struct guehdr *)&udp_hdr(skb)[1]; ··· 1059 1058 1060 1059 optlen = guehdr->hlen << 2; 1061 1060 1061 + if (!pskb_may_pull(skb, len + optlen)) 1062 + return -EINVAL; 1063 + 1064 + guehdr = (struct guehdr *)&udp_hdr(skb)[1]; 1062 1065 if (validate_gue_flags(guehdr, optlen)) 1063 1066 return -EINVAL; 1064 1067 ··· 1070 1065 * recursion. Besides, this kind of encapsulation can't even be 1071 1066 * configured currently. Discard this. 1072 1067 */ 1073 - if (guehdr->proto_ctype == IPPROTO_UDP) 1068 + if (guehdr->proto_ctype == IPPROTO_UDP || 1069 + guehdr->proto_ctype == IPPROTO_UDPLITE) 1074 1070 return -EOPNOTSUPP; 1075 1071 1076 1072 skb_set_transport_header(skb, -(int)sizeof(struct icmphdr));
+14 -9
net/ipv4/ip_gre.c
··· 569 569 dev->stats.tx_dropped++; 570 570 } 571 571 572 - static void erspan_fb_xmit(struct sk_buff *skb, struct net_device *dev, 573 - __be16 proto) 572 + static void erspan_fb_xmit(struct sk_buff *skb, struct net_device *dev) 574 573 { 575 574 struct ip_tunnel *tunnel = netdev_priv(dev); 576 575 struct ip_tunnel_info *tun_info; ··· 577 578 struct erspan_metadata *md; 578 579 struct rtable *rt = NULL; 579 580 bool truncate = false; 581 + __be16 df, proto; 580 582 struct flowi4 fl; 581 583 int tunnel_hlen; 582 584 int version; 583 - __be16 df; 584 585 int nhoff; 585 586 int thoff; 586 587 ··· 625 626 if (version == 1) { 626 627 erspan_build_header(skb, ntohl(tunnel_id_to_key32(key->tun_id)), 627 628 ntohl(md->u.index), truncate, true); 629 + proto = htons(ETH_P_ERSPAN); 628 630 } else if (version == 2) { 629 631 erspan_build_header_v2(skb, 630 632 ntohl(tunnel_id_to_key32(key->tun_id)), 631 633 md->u.md2.dir, 632 634 get_hwid(&md->u.md2), 633 635 truncate, true); 636 + proto = htons(ETH_P_ERSPAN2); 634 637 } else { 635 638 goto err_free_rt; 636 639 } 637 640 638 641 gre_build_header(skb, 8, TUNNEL_SEQ, 639 - htons(ETH_P_ERSPAN), 0, htonl(tunnel->o_seqno++)); 642 + proto, 0, htonl(tunnel->o_seqno++)); 640 643 641 644 df = key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0; 642 645 ··· 722 721 { 723 722 struct ip_tunnel *tunnel = netdev_priv(dev); 724 723 bool truncate = false; 724 + __be16 proto; 725 725 726 726 if (!pskb_inet_may_pull(skb)) 727 727 goto free_skb; 728 728 729 729 if (tunnel->collect_md) { 730 - erspan_fb_xmit(skb, dev, skb->protocol); 730 + erspan_fb_xmit(skb, dev); 731 731 return NETDEV_TX_OK; 732 732 } 733 733 ··· 744 742 } 745 743 746 744 /* Push ERSPAN header */ 747 - if (tunnel->erspan_ver == 1) 745 + if (tunnel->erspan_ver == 1) { 748 746 erspan_build_header(skb, ntohl(tunnel->parms.o_key), 749 747 tunnel->index, 750 748 truncate, true); 751 - else if (tunnel->erspan_ver == 2) 749 + proto = htons(ETH_P_ERSPAN); 750 + } else if (tunnel->erspan_ver == 2) { 752 751 erspan_build_header_v2(skb, ntohl(tunnel->parms.o_key), 753 752 tunnel->dir, tunnel->hwid, 754 753 truncate, true); 755 - else 754 + proto = htons(ETH_P_ERSPAN2); 755 + } else { 756 756 goto free_skb; 757 + } 757 758 758 759 tunnel->parms.o_flags &= ~TUNNEL_KEY; 759 - __gre_xmit(skb, dev, &tunnel->parms.iph, htons(ETH_P_ERSPAN)); 760 + __gre_xmit(skb, dev, &tunnel->parms.iph, proto); 760 761 return NETDEV_TX_OK; 761 762 762 763 free_skb:
+1
net/ipv4/ip_input.c
··· 488 488 goto drop; 489 489 } 490 490 491 + iph = ip_hdr(skb); 491 492 skb->transport_header = skb->network_header + iph->ihl*4; 492 493 493 494 /* Remove any debris in the socket control block */
+5 -7
net/ipv4/ip_sockglue.c
··· 148 148 149 149 static void ip_cmsg_recv_dstaddr(struct msghdr *msg, struct sk_buff *skb) 150 150 { 151 + __be16 _ports[2], *ports; 151 152 struct sockaddr_in sin; 152 - __be16 *ports; 153 - int end; 154 - 155 - end = skb_transport_offset(skb) + 4; 156 - if (end > 0 && !pskb_may_pull(skb, end)) 157 - return; 158 153 159 154 /* All current transport protocols have the port numbers in the 160 155 * first four bytes of the transport header and this function is 161 156 * written with this assumption in mind. 162 157 */ 163 - ports = (__be16 *)skb_transport_header(skb); 158 + ports = skb_header_pointer(skb, skb_transport_offset(skb), 159 + sizeof(_ports), &_ports); 160 + if (!ports) 161 + return; 164 162 165 163 sin.sin_family = AF_INET; 166 164 sin.sin_addr.s_addr = ip_hdr(skb)->daddr;
+1 -1
net/ipv4/tcp.c
··· 1186 1186 flags = msg->msg_flags; 1187 1187 1188 1188 if (flags & MSG_ZEROCOPY && size && sock_flag(sk, SOCK_ZEROCOPY)) { 1189 - if (sk->sk_state != TCP_ESTABLISHED) { 1189 + if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) { 1190 1190 err = -EINVAL; 1191 1191 goto out_err; 1192 1192 }
+1 -1
net/ipv4/tcp_timer.c
··· 226 226 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) { 227 227 if (icsk->icsk_retransmits) { 228 228 dst_negative_advice(sk); 229 - } else if (!tp->syn_data && !tp->syn_fastopen) { 229 + } else { 230 230 sk_rethink_txhash(sk); 231 231 } 232 232 retry_until = icsk->icsk_syn_retries ? : net->ipv4.sysctl_tcp_syn_retries;
+13 -5
net/ipv4/udp.c
··· 847 847 const int hlen = skb_network_header_len(skb) + 848 848 sizeof(struct udphdr); 849 849 850 - if (hlen + cork->gso_size > cork->fragsize) 850 + if (hlen + cork->gso_size > cork->fragsize) { 851 + kfree_skb(skb); 851 852 return -EINVAL; 852 - if (skb->len > cork->gso_size * UDP_MAX_SEGMENTS) 853 + } 854 + if (skb->len > cork->gso_size * UDP_MAX_SEGMENTS) { 855 + kfree_skb(skb); 853 856 return -EINVAL; 854 - if (sk->sk_no_check_tx) 857 + } 858 + if (sk->sk_no_check_tx) { 859 + kfree_skb(skb); 855 860 return -EINVAL; 861 + } 856 862 if (skb->ip_summed != CHECKSUM_PARTIAL || is_udplite || 857 - dst_xfrm(skb_dst(skb))) 863 + dst_xfrm(skb_dst(skb))) { 864 + kfree_skb(skb); 858 865 return -EIO; 866 + } 859 867 860 868 skb_shinfo(skb)->gso_size = cork->gso_size; 861 869 skb_shinfo(skb)->gso_type = SKB_GSO_UDP_L4; ··· 1926 1918 } 1927 1919 EXPORT_SYMBOL(udp_lib_rehash); 1928 1920 1929 - static void udp_v4_rehash(struct sock *sk) 1921 + void udp_v4_rehash(struct sock *sk) 1930 1922 { 1931 1923 u16 new_hash = ipv4_portaddr_hash(sock_net(sk), 1932 1924 inet_sk(sk)->inet_rcv_saddr,
+1
net/ipv4/udp_impl.h
··· 10 10 int __udp4_lib_err(struct sk_buff *, u32, struct udp_table *); 11 11 12 12 int udp_v4_get_port(struct sock *sk, unsigned short snum); 13 + void udp_v4_rehash(struct sock *sk); 13 14 14 15 int udp_setsockopt(struct sock *sk, int level, int optname, 15 16 char __user *optval, unsigned int optlen);
+1
net/ipv4/udplite.c
··· 53 53 .sendpage = udp_sendpage, 54 54 .hash = udp_lib_hash, 55 55 .unhash = udp_lib_unhash, 56 + .rehash = udp_v4_rehash, 56 57 .get_port = udp_v4_get_port, 57 58 .memory_allocated = &udp_memory_allocated, 58 59 .sysctl_mem = sysctl_udp_mem,
+1 -1
net/ipv6/addrconf.c
··· 5154 5154 if (fillargs.netnsid >= 0) 5155 5155 put_net(tgt_net); 5156 5156 5157 - return err < 0 ? err : skb->len; 5157 + return skb->len ? : err; 5158 5158 } 5159 5159 5160 5160 static int inet6_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
+13 -1
net/ipv6/af_inet6.c
··· 310 310 311 311 /* Check if the address belongs to the host. */ 312 312 if (addr_type == IPV6_ADDR_MAPPED) { 313 + struct net_device *dev = NULL; 313 314 int chk_addr_ret; 314 315 315 316 /* Binding to v4-mapped address on a v6-only socket ··· 321 320 goto out; 322 321 } 323 322 323 + rcu_read_lock(); 324 + if (sk->sk_bound_dev_if) { 325 + dev = dev_get_by_index_rcu(net, sk->sk_bound_dev_if); 326 + if (!dev) { 327 + err = -ENODEV; 328 + goto out_unlock; 329 + } 330 + } 331 + 324 332 /* Reproduce AF_INET checks to make the bindings consistent */ 325 333 v4addr = addr->sin6_addr.s6_addr32[3]; 326 - chk_addr_ret = inet_addr_type(net, v4addr); 334 + chk_addr_ret = inet_addr_type_dev_table(net, dev, v4addr); 335 + rcu_read_unlock(); 336 + 327 337 if (!inet_can_nonlocal_bind(net, inet) && 328 338 v4addr != htonl(INADDR_ANY) && 329 339 chk_addr_ret != RTN_LOCAL &&
+5 -6
net/ipv6/datagram.c
··· 341 341 skb_reset_network_header(skb); 342 342 iph = ipv6_hdr(skb); 343 343 iph->daddr = fl6->daddr; 344 + ip6_flow_hdr(iph, 0, 0); 344 345 345 346 serr = SKB_EXT_ERR(skb); 346 347 serr->ee.ee_errno = err; ··· 701 700 } 702 701 if (np->rxopt.bits.rxorigdstaddr) { 703 702 struct sockaddr_in6 sin6; 704 - __be16 *ports; 705 - int end; 703 + __be16 _ports[2], *ports; 706 704 707 - end = skb_transport_offset(skb) + 4; 708 - if (end <= 0 || pskb_may_pull(skb, end)) { 705 + ports = skb_header_pointer(skb, skb_transport_offset(skb), 706 + sizeof(_ports), &_ports); 707 + if (ports) { 709 708 /* All current transport protocols have the port numbers in the 710 709 * first four bytes of the transport header and this function is 711 710 * written with this assumption in mind. 712 711 */ 713 - ports = (__be16 *)skb_transport_header(skb); 714 - 715 712 sin6.sin6_family = AF_INET6; 716 713 sin6.sin6_addr = ipv6_hdr(skb)->daddr; 717 714 sin6.sin6_port = ports[1];
+15 -2
net/ipv6/fou6.c
··· 90 90 { 91 91 int transport_offset = skb_transport_offset(skb); 92 92 struct guehdr *guehdr; 93 - size_t optlen; 93 + size_t len, optlen; 94 94 int ret; 95 95 96 - if (skb->len < sizeof(struct udphdr) + sizeof(struct guehdr)) 96 + len = sizeof(struct udphdr) + sizeof(struct guehdr); 97 + if (!pskb_may_pull(skb, len)) 97 98 return -EINVAL; 98 99 99 100 guehdr = (struct guehdr *)&udp_hdr(skb)[1]; ··· 129 128 130 129 optlen = guehdr->hlen << 2; 131 130 131 + if (!pskb_may_pull(skb, len + optlen)) 132 + return -EINVAL; 133 + 134 + guehdr = (struct guehdr *)&udp_hdr(skb)[1]; 132 135 if (validate_gue_flags(guehdr, optlen)) 133 136 return -EINVAL; 137 + 138 + /* Handling exceptions for direct UDP encapsulation in GUE would lead to 139 + * recursion. Besides, this kind of encapsulation can't even be 140 + * configured currently. Discard this. 141 + */ 142 + if (guehdr->proto_ctype == IPPROTO_UDP || 143 + guehdr->proto_ctype == IPPROTO_UDPLITE) 144 + return -EOPNOTSUPP; 134 145 135 146 skb_set_transport_header(skb, -(int)sizeof(struct icmp6hdr)); 136 147 ret = gue6_err_proto_handler(guehdr->proto_ctype, skb,
+6 -2
net/ipv6/icmp.c
··· 423 423 static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info, 424 424 const struct in6_addr *force_saddr) 425 425 { 426 - struct net *net = dev_net(skb->dev); 427 426 struct inet6_dev *idev = NULL; 428 427 struct ipv6hdr *hdr = ipv6_hdr(skb); 429 428 struct sock *sk; 429 + struct net *net; 430 430 struct ipv6_pinfo *np; 431 431 const struct in6_addr *saddr = NULL; 432 432 struct dst_entry *dst; ··· 437 437 int iif = 0; 438 438 int addr_type = 0; 439 439 int len; 440 - u32 mark = IP6_REPLY_MARK(net, skb->mark); 440 + u32 mark; 441 441 442 442 if ((u8 *)hdr < skb->head || 443 443 (skb_network_header(skb) + sizeof(*hdr)) > skb_tail_pointer(skb)) 444 444 return; 445 445 446 + if (!skb->dev) 447 + return; 448 + net = dev_net(skb->dev); 449 + mark = IP6_REPLY_MARK(net, skb->mark); 446 450 /* 447 451 * Make sure we respect the rules 448 452 * i.e. RFC 1885 2.4(e)
+10 -4
net/ipv6/ip6_gre.c
··· 922 922 __u8 dsfield = false; 923 923 struct flowi6 fl6; 924 924 int err = -EINVAL; 925 + __be16 proto; 925 926 __u32 mtu; 926 927 int nhoff; 927 928 int thoff; ··· 1036 1035 } 1037 1036 1038 1037 /* Push GRE header. */ 1039 - gre_build_header(skb, 8, TUNNEL_SEQ, 1040 - htons(ETH_P_ERSPAN), 0, htonl(t->o_seqno++)); 1038 + proto = (t->parms.erspan_ver == 1) ? htons(ETH_P_ERSPAN) 1039 + : htons(ETH_P_ERSPAN2); 1040 + gre_build_header(skb, 8, TUNNEL_SEQ, proto, 0, htonl(t->o_seqno++)); 1041 1041 1042 1042 /* TooBig packet may have updated dst->dev's mtu */ 1043 1043 if (!t->parms.collect_md && dst && dst_mtu(dst) > dst->dev->mtu) ··· 1171 1169 t->parms.i_flags = p->i_flags; 1172 1170 t->parms.o_flags = p->o_flags; 1173 1171 t->parms.fwmark = p->fwmark; 1172 + t->parms.erspan_ver = p->erspan_ver; 1173 + t->parms.index = p->index; 1174 + t->parms.dir = p->dir; 1175 + t->parms.hwid = p->hwid; 1174 1176 dst_cache_reset(&t->dst_cache); 1175 1177 } 1176 1178 ··· 2031 2025 struct nlattr *data[], 2032 2026 struct netlink_ext_ack *extack) 2033 2027 { 2034 - struct ip6gre_net *ign = net_generic(dev_net(dev), ip6gre_net_id); 2028 + struct ip6_tnl *t = netdev_priv(dev); 2029 + struct ip6gre_net *ign = net_generic(t->net, ip6gre_net_id); 2035 2030 struct __ip6_tnl_parm p; 2036 - struct ip6_tnl *t; 2037 2031 2038 2032 t = ip6gre_changelink_common(dev, tb, data, &p, extack); 2039 2033 if (IS_ERR(t))
+2 -12
net/ipv6/route.c
··· 4251 4251 struct list_head next; 4252 4252 }; 4253 4253 4254 - static void ip6_print_replace_route_err(struct list_head *rt6_nh_list) 4255 - { 4256 - struct rt6_nh *nh; 4257 - 4258 - list_for_each_entry(nh, rt6_nh_list, next) { 4259 - pr_warn("IPV6: multipath route replace failed (check consistency of installed routes): %pI6c nexthop %pI6c ifi %d\n", 4260 - &nh->r_cfg.fc_dst, &nh->r_cfg.fc_gateway, 4261 - nh->r_cfg.fc_ifindex); 4262 - } 4263 - } 4264 - 4265 4254 static int ip6_route_info_append(struct net *net, 4266 4255 struct list_head *rt6_nh_list, 4267 4256 struct fib6_info *rt, ··· 4396 4407 nh->fib6_info = NULL; 4397 4408 if (err) { 4398 4409 if (replace && nhn) 4399 - ip6_print_replace_route_err(&rt6_nh_list); 4410 + NL_SET_ERR_MSG_MOD(extack, 4411 + "multipath route replace failed (check consistency of installed routes)"); 4400 4412 err_nh = nh; 4401 4413 goto add_errout; 4402 4414 }
+17 -9
net/ipv6/udp.c
··· 102 102 return udp_lib_get_port(sk, snum, hash2_nulladdr); 103 103 } 104 104 105 - static void udp_v6_rehash(struct sock *sk) 105 + void udp_v6_rehash(struct sock *sk) 106 106 { 107 107 u16 new_hash = ipv6_portaddr_hash(sock_net(sk), 108 108 &sk->sk_v6_rcv_saddr, ··· 1132 1132 const int hlen = skb_network_header_len(skb) + 1133 1133 sizeof(struct udphdr); 1134 1134 1135 - if (hlen + cork->gso_size > cork->fragsize) 1135 + if (hlen + cork->gso_size > cork->fragsize) { 1136 + kfree_skb(skb); 1136 1137 return -EINVAL; 1137 - if (skb->len > cork->gso_size * UDP_MAX_SEGMENTS) 1138 + } 1139 + if (skb->len > cork->gso_size * UDP_MAX_SEGMENTS) { 1140 + kfree_skb(skb); 1138 1141 return -EINVAL; 1139 - if (udp_sk(sk)->no_check6_tx) 1142 + } 1143 + if (udp_sk(sk)->no_check6_tx) { 1144 + kfree_skb(skb); 1140 1145 return -EINVAL; 1146 + } 1141 1147 if (skb->ip_summed != CHECKSUM_PARTIAL || is_udplite || 1142 - dst_xfrm(skb_dst(skb))) 1148 + dst_xfrm(skb_dst(skb))) { 1149 + kfree_skb(skb); 1143 1150 return -EIO; 1151 + } 1144 1152 1145 1153 skb_shinfo(skb)->gso_size = cork->gso_size; 1146 1154 skb_shinfo(skb)->gso_type = SKB_GSO_UDP_L4; ··· 1398 1390 ipc6.opt = opt; 1399 1391 1400 1392 fl6.flowi6_proto = sk->sk_protocol; 1401 - if (!ipv6_addr_any(daddr)) 1402 - fl6.daddr = *daddr; 1403 - else 1404 - fl6.daddr.s6_addr[15] = 0x1; /* :: means loopback (BSD'ism) */ 1393 + fl6.daddr = *daddr; 1405 1394 if (ipv6_addr_any(&fl6.saddr) && !ipv6_addr_any(&np->saddr)) 1406 1395 fl6.saddr = np->saddr; 1407 1396 fl6.fl6_sport = inet->inet_sport; ··· 1425 1420 fl6.daddr = sin6->sin6_addr; 1426 1421 } 1427 1422 } 1423 + 1424 + if (ipv6_addr_any(&fl6.daddr)) 1425 + fl6.daddr.s6_addr[15] = 0x1; /* :: means loopback (BSD'ism) */ 1428 1426 1429 1427 final_p = fl6_update_dst(&fl6, opt, &final); 1430 1428 if (final_p)
+1
net/ipv6/udp_impl.h
··· 13 13 __be32, struct udp_table *); 14 14 15 15 int udp_v6_get_port(struct sock *sk, unsigned short snum); 16 + void udp_v6_rehash(struct sock *sk); 16 17 17 18 int udpv6_getsockopt(struct sock *sk, int level, int optname, 18 19 char __user *optval, int __user *optlen);
+1
net/ipv6/udplite.c
··· 49 49 .recvmsg = udpv6_recvmsg, 50 50 .hash = udp_lib_hash, 51 51 .unhash = udp_lib_unhash, 52 + .rehash = udp_v6_rehash, 52 53 .get_port = udp_v6_get_port, 53 54 .memory_allocated = &udp_memory_allocated, 54 55 .sysctl_mem = sysctl_udp_mem,
+3 -2
net/netfilter/nf_flow_table_core.c
··· 28 28 { 29 29 struct flow_offload_tuple *ft = &flow->tuplehash[dir].tuple; 30 30 struct nf_conntrack_tuple *ctt = &ct->tuplehash[dir].tuple; 31 + struct dst_entry *other_dst = route->tuple[!dir].dst; 31 32 struct dst_entry *dst = route->tuple[dir].dst; 32 33 33 34 ft->dir = dir; ··· 51 50 ft->src_port = ctt->src.u.tcp.port; 52 51 ft->dst_port = ctt->dst.u.tcp.port; 53 52 54 - ft->iifidx = route->tuple[dir].ifindex; 55 - ft->oifidx = route->tuple[!dir].ifindex; 53 + ft->iifidx = other_dst->dev->ifindex; 54 + ft->oifidx = dst->dev->ifindex; 56 55 ft->dst_cache = dst; 57 56 } 58 57
+7 -7
net/netfilter/nf_tables_api.c
··· 2304 2304 struct net *net = sock_net(skb->sk); 2305 2305 unsigned int s_idx = cb->args[0]; 2306 2306 const struct nft_rule *rule; 2307 - int rc = 1; 2308 2307 2309 2308 list_for_each_entry_rcu(rule, &chain->rules, list) { 2310 2309 if (!nft_is_active(net, rule)) ··· 2320 2321 NLM_F_MULTI | NLM_F_APPEND, 2321 2322 table->family, 2322 2323 table, chain, rule) < 0) 2323 - goto out_unfinished; 2324 + return 1; 2324 2325 2325 2326 nl_dump_check_consistent(cb, nlmsg_hdr(skb)); 2326 2327 cont: 2327 2328 (*idx)++; 2328 2329 } 2329 - rc = 0; 2330 - out_unfinished: 2331 - cb->args[0] = *idx; 2332 - return rc; 2330 + return 0; 2333 2331 } 2334 2332 2335 2333 static int nf_tables_dump_rules(struct sk_buff *skb, ··· 2350 2354 if (ctx && ctx->table && strcmp(ctx->table, table->name) != 0) 2351 2355 continue; 2352 2356 2353 - if (ctx && ctx->chain) { 2357 + if (ctx && ctx->table && ctx->chain) { 2354 2358 struct rhlist_head *list, *tmp; 2355 2359 2356 2360 list = rhltable_lookup(&table->chains_ht, ctx->chain, ··· 2378 2382 } 2379 2383 done: 2380 2384 rcu_read_unlock(); 2385 + 2386 + cb->args[0] = idx; 2381 2387 return skb->len; 2382 2388 } 2383 2389 ··· 4506 4508 err5: 4507 4509 kfree(trans); 4508 4510 err4: 4511 + if (obj) 4512 + obj->use--; 4509 4513 kfree(elem.priv); 4510 4514 err3: 4511 4515 if (nla[NFTA_SET_ELEM_DATA] != NULL)
+8 -5
net/netfilter/nft_flow_offload.c
··· 12 12 #include <net/netfilter/nf_conntrack_core.h> 13 13 #include <linux/netfilter/nf_conntrack_common.h> 14 14 #include <net/netfilter/nf_flow_table.h> 15 + #include <net/netfilter/nf_conntrack_helper.h> 15 16 16 17 struct nft_flow_offload { 17 18 struct nft_flowtable *flowtable; ··· 30 29 memset(&fl, 0, sizeof(fl)); 31 30 switch (nft_pf(pkt)) { 32 31 case NFPROTO_IPV4: 33 - fl.u.ip4.daddr = ct->tuplehash[!dir].tuple.dst.u3.ip; 32 + fl.u.ip4.daddr = ct->tuplehash[dir].tuple.src.u3.ip; 33 + fl.u.ip4.flowi4_oif = nft_in(pkt)->ifindex; 34 34 break; 35 35 case NFPROTO_IPV6: 36 - fl.u.ip6.daddr = ct->tuplehash[!dir].tuple.dst.u3.in6; 36 + fl.u.ip6.daddr = ct->tuplehash[dir].tuple.src.u3.in6; 37 + fl.u.ip6.flowi6_oif = nft_in(pkt)->ifindex; 37 38 break; 38 39 } 39 40 ··· 44 41 return -ENOENT; 45 42 46 43 route->tuple[dir].dst = this_dst; 47 - route->tuple[dir].ifindex = nft_in(pkt)->ifindex; 48 44 route->tuple[!dir].dst = other_dst; 49 - route->tuple[!dir].ifindex = nft_out(pkt)->ifindex; 50 45 51 46 return 0; 52 47 } ··· 67 66 { 68 67 struct nft_flow_offload *priv = nft_expr_priv(expr); 69 68 struct nf_flowtable *flowtable = &priv->flowtable->data; 69 + const struct nf_conn_help *help; 70 70 enum ip_conntrack_info ctinfo; 71 71 struct nf_flow_route route; 72 72 struct flow_offload *flow; ··· 90 88 goto out; 91 89 } 92 90 93 - if (test_bit(IPS_HELPER_BIT, &ct->status)) 91 + help = nfct_help(ct); 92 + if (help) 94 93 goto out; 95 94 96 95 if (ctinfo == IP_CT_NEW ||
+5 -3
net/openvswitch/flow.c
··· 276 276 277 277 nexthdr = ipv6_find_hdr(skb, &payload_ofs, -1, &frag_off, &flags); 278 278 if (flags & IP6_FH_F_FRAG) { 279 - if (frag_off) 279 + if (frag_off) { 280 280 key->ip.frag = OVS_FRAG_TYPE_LATER; 281 - else 282 - key->ip.frag = OVS_FRAG_TYPE_FIRST; 281 + key->ip.proto = nexthdr; 282 + return 0; 283 + } 284 + key->ip.frag = OVS_FRAG_TYPE_FIRST; 283 285 } else { 284 286 key->ip.frag = OVS_FRAG_TYPE_NONE; 285 287 }
+1 -1
net/openvswitch/flow_netlink.c
··· 500 500 return -EINVAL; 501 501 } 502 502 503 - if (!nz || !is_all_zero(nla_data(nla), expected_len)) { 503 + if (!nz || !is_all_zero(nla_data(nla), nla_len(nla))) { 504 504 attrs |= 1 << type; 505 505 a[type] = nla; 506 506 }
+4 -3
net/packet/af_packet.c
··· 2628 2628 addr = saddr->sll_halen ? saddr->sll_addr : NULL; 2629 2629 dev = dev_get_by_index(sock_net(&po->sk), saddr->sll_ifindex); 2630 2630 if (addr && dev && saddr->sll_halen < dev->addr_len) 2631 - goto out; 2631 + goto out_put; 2632 2632 } 2633 2633 2634 2634 err = -ENXIO; ··· 2828 2828 addr = saddr->sll_halen ? saddr->sll_addr : NULL; 2829 2829 dev = dev_get_by_index(sock_net(sk), saddr->sll_ifindex); 2830 2830 if (addr && dev && saddr->sll_halen < dev->addr_len) 2831 - goto out; 2831 + goto out_unlock; 2832 2832 } 2833 2833 2834 2834 err = -ENXIO; ··· 2887 2887 goto out_free; 2888 2888 } else if (reserve) { 2889 2889 skb_reserve(skb, -reserve); 2890 - if (len < reserve) 2890 + if (len < reserve + sizeof(struct ipv6hdr) && 2891 + dev->min_header_len != dev->hard_header_len) 2891 2892 skb_reset_network_header(skb); 2892 2893 } 2893 2894
+2 -2
net/rds/ib_send.c
··· 522 522 if (be32_to_cpu(rm->m_inc.i_hdr.h_len) == 0) 523 523 i = 1; 524 524 else 525 - i = ceil(be32_to_cpu(rm->m_inc.i_hdr.h_len), RDS_FRAG_SIZE); 525 + i = DIV_ROUND_UP(be32_to_cpu(rm->m_inc.i_hdr.h_len), RDS_FRAG_SIZE); 526 526 527 527 work_alloc = rds_ib_ring_alloc(&ic->i_send_ring, i, &pos); 528 528 if (work_alloc == 0) { ··· 879 879 * Instead of knowing how to return a partial rdma read/write we insist that there 880 880 * be enough work requests to send the entire message. 881 881 */ 882 - i = ceil(op->op_count, max_sge); 882 + i = DIV_ROUND_UP(op->op_count, max_sge); 883 883 884 884 work_alloc = rds_ib_ring_alloc(&ic->i_send_ring, i, &pos); 885 885 if (work_alloc != i) {
+2 -2
net/rds/message.c
··· 341 341 { 342 342 struct rds_message *rm; 343 343 unsigned int i; 344 - int num_sgs = ceil(total_len, PAGE_SIZE); 344 + int num_sgs = DIV_ROUND_UP(total_len, PAGE_SIZE); 345 345 int extra_bytes = num_sgs * sizeof(struct scatterlist); 346 346 int ret; 347 347 ··· 351 351 352 352 set_bit(RDS_MSG_PAGEVEC, &rm->m_flags); 353 353 rm->m_inc.i_hdr.h_len = cpu_to_be32(total_len); 354 - rm->data.op_nents = ceil(total_len, PAGE_SIZE); 354 + rm->data.op_nents = DIV_ROUND_UP(total_len, PAGE_SIZE); 355 355 rm->data.op_sg = rds_message_alloc_sgs(rm, num_sgs, &ret); 356 356 if (!rm->data.op_sg) { 357 357 rds_message_put(rm);
-4
net/rds/rds.h
··· 48 48 } 49 49 #endif 50 50 51 - /* XXX is there one of these somewhere? */ 52 - #define ceil(x, y) \ 53 - ({ unsigned long __x = (x), __y = (y); (__x + __y - 1) / __y; }) 54 - 55 51 #define RDS_FRAG_SHIFT 12 56 52 #define RDS_FRAG_SIZE ((unsigned int)(1 << RDS_FRAG_SHIFT)) 57 53
+1 -1
net/rds/send.c
··· 1107 1107 size_t total_payload_len = payload_len, rdma_payload_len = 0; 1108 1108 bool zcopy = ((msg->msg_flags & MSG_ZEROCOPY) && 1109 1109 sock_flag(rds_rs_to_sk(rs), SOCK_ZEROCOPY)); 1110 - int num_sgs = ceil(payload_len, PAGE_SIZE); 1110 + int num_sgs = DIV_ROUND_UP(payload_len, PAGE_SIZE); 1111 1111 int namelen; 1112 1112 struct rds_iov_vector_arr vct; 1113 1113 int ind;
-70
net/rxrpc/af_rxrpc.c
··· 419 419 EXPORT_SYMBOL(rxrpc_kernel_get_epoch); 420 420 421 421 /** 422 - * rxrpc_kernel_check_call - Check a call's state 423 - * @sock: The socket the call is on 424 - * @call: The call to check 425 - * @_compl: Where to store the completion state 426 - * @_abort_code: Where to store any abort code 427 - * 428 - * Allow a kernel service to query the state of a call and find out the manner 429 - * of its termination if it has completed. Returns -EINPROGRESS if the call is 430 - * still going, 0 if the call finished successfully, -ECONNABORTED if the call 431 - * was aborted and an appropriate error if the call failed in some other way. 432 - */ 433 - int rxrpc_kernel_check_call(struct socket *sock, struct rxrpc_call *call, 434 - enum rxrpc_call_completion *_compl, u32 *_abort_code) 435 - { 436 - if (call->state != RXRPC_CALL_COMPLETE) 437 - return -EINPROGRESS; 438 - smp_rmb(); 439 - *_compl = call->completion; 440 - *_abort_code = call->abort_code; 441 - return call->error; 442 - } 443 - EXPORT_SYMBOL(rxrpc_kernel_check_call); 444 - 445 - /** 446 - * rxrpc_kernel_retry_call - Allow a kernel service to retry a call 447 - * @sock: The socket the call is on 448 - * @call: The call to retry 449 - * @srx: The address of the peer to contact 450 - * @key: The security context to use (defaults to socket setting) 451 - * 452 - * Allow a kernel service to try resending a client call that failed due to a 453 - * network error to a new address. The Tx queue is maintained intact, thereby 454 - * relieving the need to re-encrypt any request data that has already been 455 - * buffered. 456 - */ 457 - int rxrpc_kernel_retry_call(struct socket *sock, struct rxrpc_call *call, 458 - struct sockaddr_rxrpc *srx, struct key *key) 459 - { 460 - struct rxrpc_conn_parameters cp; 461 - struct rxrpc_sock *rx = rxrpc_sk(sock->sk); 462 - int ret; 463 - 464 - _enter("%d{%d}", call->debug_id, atomic_read(&call->usage)); 465 - 466 - if (!key) 467 - key = rx->key; 468 - if (key && !key->payload.data[0]) 469 - key = NULL; /* a no-security key */ 470 - 471 - memset(&cp, 0, sizeof(cp)); 472 - cp.local = rx->local; 473 - cp.key = key; 474 - cp.security_level = 0; 475 - cp.exclusive = false; 476 - cp.service_id = srx->srx_service; 477 - 478 - mutex_lock(&call->user_mutex); 479 - 480 - ret = rxrpc_prepare_call_for_retry(rx, call); 481 - if (ret == 0) 482 - ret = rxrpc_retry_client_call(rx, call, &cp, srx, GFP_KERNEL); 483 - 484 - mutex_unlock(&call->user_mutex); 485 - rxrpc_put_peer(cp.peer); 486 - _leave(" = %d", ret); 487 - return ret; 488 - } 489 - EXPORT_SYMBOL(rxrpc_kernel_retry_call); 490 - 491 - /** 492 422 * rxrpc_kernel_new_call_notification - Get notifications of new calls 493 423 * @sock: The socket to intercept received messages on 494 424 * @notify_new_call: Function to be called when new calls appear
+12 -7
net/rxrpc/ar-internal.h
··· 476 476 RXRPC_CALL_EXPOSED, /* The call was exposed to the world */ 477 477 RXRPC_CALL_RX_LAST, /* Received the last packet (at rxtx_top) */ 478 478 RXRPC_CALL_TX_LAST, /* Last packet in Tx buffer (at rxtx_top) */ 479 - RXRPC_CALL_TX_LASTQ, /* Last packet has been queued */ 480 479 RXRPC_CALL_SEND_PING, /* A ping will need to be sent */ 481 480 RXRPC_CALL_PINGING, /* Ping in process */ 482 481 RXRPC_CALL_RETRANS_TIMEOUT, /* Retransmission due to timeout occurred */ ··· 514 515 RXRPC_CALL_SERVER_AWAIT_ACK, /* - server awaiting final ACK */ 515 516 RXRPC_CALL_COMPLETE, /* - call complete */ 516 517 NR__RXRPC_CALL_STATES 518 + }; 519 + 520 + /* 521 + * Call completion condition (state == RXRPC_CALL_COMPLETE). 522 + */ 523 + enum rxrpc_call_completion { 524 + RXRPC_CALL_SUCCEEDED, /* - Normal termination */ 525 + RXRPC_CALL_REMOTELY_ABORTED, /* - call aborted by peer */ 526 + RXRPC_CALL_LOCALLY_ABORTED, /* - call aborted locally on error or close */ 527 + RXRPC_CALL_LOCAL_ERROR, /* - call failed due to local error */ 528 + RXRPC_CALL_NETWORK_ERROR, /* - call terminated by network error */ 529 + NR__RXRPC_CALL_COMPLETIONS 517 530 }; 518 531 519 532 /* ··· 772 761 struct sockaddr_rxrpc *, 773 762 struct rxrpc_call_params *, gfp_t, 774 763 unsigned int); 775 - int rxrpc_retry_client_call(struct rxrpc_sock *, 776 - struct rxrpc_call *, 777 - struct rxrpc_conn_parameters *, 778 - struct sockaddr_rxrpc *, 779 - gfp_t); 780 764 void rxrpc_incoming_call(struct rxrpc_sock *, struct rxrpc_call *, 781 765 struct sk_buff *); 782 766 void rxrpc_release_call(struct rxrpc_sock *, struct rxrpc_call *); 783 - int rxrpc_prepare_call_for_retry(struct rxrpc_sock *, struct rxrpc_call *); 784 767 void rxrpc_release_calls_on_socket(struct rxrpc_sock *); 785 768 bool __rxrpc_queue_call(struct rxrpc_call *); 786 769 bool rxrpc_queue_call(struct rxrpc_call *);
-97
net/rxrpc/call_object.c
··· 325 325 } 326 326 327 327 /* 328 - * Retry a call to a new address. It is expected that the Tx queue of the call 329 - * will contain data previously packaged for an old call. 330 - */ 331 - int rxrpc_retry_client_call(struct rxrpc_sock *rx, 332 - struct rxrpc_call *call, 333 - struct rxrpc_conn_parameters *cp, 334 - struct sockaddr_rxrpc *srx, 335 - gfp_t gfp) 336 - { 337 - const void *here = __builtin_return_address(0); 338 - int ret; 339 - 340 - /* Set up or get a connection record and set the protocol parameters, 341 - * including channel number and call ID. 342 - */ 343 - ret = rxrpc_connect_call(rx, call, cp, srx, gfp); 344 - if (ret < 0) 345 - goto error; 346 - 347 - trace_rxrpc_call(call, rxrpc_call_connected, atomic_read(&call->usage), 348 - here, NULL); 349 - 350 - rxrpc_start_call_timer(call); 351 - 352 - _net("CALL new %d on CONN %d", call->debug_id, call->conn->debug_id); 353 - 354 - if (!test_and_set_bit(RXRPC_CALL_EV_RESEND, &call->events)) 355 - rxrpc_queue_call(call); 356 - 357 - _leave(" = 0"); 358 - return 0; 359 - 360 - error: 361 - rxrpc_set_call_completion(call, RXRPC_CALL_LOCAL_ERROR, 362 - RX_CALL_DEAD, ret); 363 - trace_rxrpc_call(call, rxrpc_call_error, atomic_read(&call->usage), 364 - here, ERR_PTR(ret)); 365 - _leave(" = %d", ret); 366 - return ret; 367 - } 368 - 369 - /* 370 328 * Set up an incoming call. call->conn points to the connection. 371 329 * This is called in BH context and isn't allowed to fail. 372 330 */ ··· 489 531 } 490 532 491 533 _leave(""); 492 - } 493 - 494 - /* 495 - * Prepare a kernel service call for retry. 496 - */ 497 - int rxrpc_prepare_call_for_retry(struct rxrpc_sock *rx, struct rxrpc_call *call) 498 - { 499 - const void *here = __builtin_return_address(0); 500 - int i; 501 - u8 last = 0; 502 - 503 - _enter("{%d,%d}", call->debug_id, atomic_read(&call->usage)); 504 - 505 - trace_rxrpc_call(call, rxrpc_call_release, atomic_read(&call->usage), 506 - here, (const void *)call->flags); 507 - 508 - ASSERTCMP(call->state, ==, RXRPC_CALL_COMPLETE); 509 - ASSERTCMP(call->completion, !=, RXRPC_CALL_REMOTELY_ABORTED); 510 - ASSERTCMP(call->completion, !=, RXRPC_CALL_LOCALLY_ABORTED); 511 - ASSERT(list_empty(&call->recvmsg_link)); 512 - 513 - del_timer_sync(&call->timer); 514 - 515 - _debug("RELEASE CALL %p (%d CONN %p)", call, call->debug_id, call->conn); 516 - 517 - if (call->conn) 518 - rxrpc_disconnect_call(call); 519 - 520 - if (rxrpc_is_service_call(call) || 521 - !call->tx_phase || 522 - call->tx_hard_ack != 0 || 523 - call->rx_hard_ack != 0 || 524 - call->rx_top != 0) 525 - return -EINVAL; 526 - 527 - call->state = RXRPC_CALL_UNINITIALISED; 528 - call->completion = RXRPC_CALL_SUCCEEDED; 529 - call->call_id = 0; 530 - call->cid = 0; 531 - call->cong_cwnd = 0; 532 - call->cong_extra = 0; 533 - call->cong_ssthresh = 0; 534 - call->cong_mode = 0; 535 - call->cong_dup_acks = 0; 536 - call->cong_cumul_acks = 0; 537 - call->acks_lowest_nak = 0; 538 - 539 - for (i = 0; i < RXRPC_RXTX_BUFF_SIZE; i++) { 540 - last |= call->rxtx_annotations[i]; 541 - call->rxtx_annotations[i] &= RXRPC_TX_ANNO_LAST; 542 - call->rxtx_annotations[i] |= RXRPC_TX_ANNO_RETRANS; 543 - } 544 - 545 - _leave(" = 0"); 546 - return 0; 547 534 } 548 535 549 536 /*
+1 -4
net/rxrpc/conn_client.c
··· 562 562 clear_bit(RXRPC_CONN_FINAL_ACK_0 + channel, &conn->flags); 563 563 564 564 write_lock_bh(&call->state_lock); 565 - if (!test_bit(RXRPC_CALL_TX_LASTQ, &call->flags)) 566 - call->state = RXRPC_CALL_CLIENT_SEND_REQUEST; 567 - else 568 - call->state = RXRPC_CALL_CLIENT_AWAIT_REPLY; 565 + call->state = RXRPC_CALL_CLIENT_SEND_REQUEST; 569 566 write_unlock_bh(&call->state_lock); 570 567 571 568 rxrpc_see_call(call);
+11 -13
net/rxrpc/sendmsg.c
··· 169 169 170 170 ASSERTCMP(seq, ==, call->tx_top + 1); 171 171 172 - if (last) { 172 + if (last) 173 173 annotation |= RXRPC_TX_ANNO_LAST; 174 - set_bit(RXRPC_CALL_TX_LASTQ, &call->flags); 175 - } 176 174 177 175 /* We have to set the timestamp before queueing as the retransmit 178 176 * algorithm can see the packet as soon as we queue it. ··· 384 386 call->tx_total_len -= copy; 385 387 } 386 388 389 + /* check for the far side aborting the call or a network error 390 + * occurring */ 391 + if (call->state == RXRPC_CALL_COMPLETE) 392 + goto call_terminated; 393 + 387 394 /* add the packet to the send queue if it's now full */ 388 395 if (sp->remain <= 0 || 389 396 (msg_data_left(msg) == 0 && !more)) { ··· 428 425 notify_end_tx); 429 426 skb = NULL; 430 427 } 431 - 432 - /* Check for the far side aborting the call or a network error 433 - * occurring. If this happens, save any packet that was under 434 - * construction so that in the case of a network error, the 435 - * call can be retried or redirected. 436 - */ 437 - if (call->state == RXRPC_CALL_COMPLETE) { 438 - ret = call->error; 439 - goto out; 440 - } 441 428 } while (msg_data_left(msg) > 0); 442 429 443 430 success: ··· 436 443 call->tx_pending = skb; 437 444 _leave(" = %d", ret); 438 445 return ret; 446 + 447 + call_terminated: 448 + rxrpc_free_skb(skb, rxrpc_skb_tx_freed); 449 + _leave(" = %d", call->error); 450 + return call->error; 439 451 440 452 maybe_error: 441 453 if (copied)
+11 -8
net/sched/act_tunnel_key.c
··· 197 197 [TCA_TUNNEL_KEY_ENC_TTL] = { .type = NLA_U8 }, 198 198 }; 199 199 200 + static void tunnel_key_release_params(struct tcf_tunnel_key_params *p) 201 + { 202 + if (!p) 203 + return; 204 + if (p->tcft_action == TCA_TUNNEL_KEY_ACT_SET) 205 + dst_release(&p->tcft_enc_metadata->dst); 206 + kfree_rcu(p, rcu); 207 + } 208 + 200 209 static int tunnel_key_init(struct net *net, struct nlattr *nla, 201 210 struct nlattr *est, struct tc_action **a, 202 211 int ovr, int bind, bool rtnl_held, ··· 369 360 rcu_swap_protected(t->params, params_new, 370 361 lockdep_is_held(&t->tcf_lock)); 371 362 spin_unlock_bh(&t->tcf_lock); 372 - if (params_new) 373 - kfree_rcu(params_new, rcu); 363 + tunnel_key_release_params(params_new); 374 364 375 365 if (ret == ACT_P_CREATED) 376 366 tcf_idr_insert(tn, *a); ··· 393 385 struct tcf_tunnel_key_params *params; 394 386 395 387 params = rcu_dereference_protected(t->params, 1); 396 - if (params) { 397 - if (params->tcft_action == TCA_TUNNEL_KEY_ACT_SET) 398 - dst_release(&params->tcft_enc_metadata->dst); 399 - 400 - kfree_rcu(params, rcu); 401 - } 388 + tunnel_key_release_params(params); 402 389 } 403 390 404 391 static int tunnel_key_geneve_opts_dump(struct sk_buff *skb,
+1 -2
net/sched/cls_api.c
··· 1277 1277 int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp, 1278 1278 struct tcf_result *res, bool compat_mode) 1279 1279 { 1280 - __be16 protocol = tc_skb_protocol(skb); 1281 1280 #ifdef CONFIG_NET_CLS_ACT 1282 1281 const int max_reclassify_loop = 4; 1283 1282 const struct tcf_proto *orig_tp = tp; ··· 1286 1287 reclassify: 1287 1288 #endif 1288 1289 for (; tp; tp = rcu_dereference_bh(tp->next)) { 1290 + __be16 protocol = tc_skb_protocol(skb); 1289 1291 int err; 1290 1292 1291 1293 if (tp->protocol != protocol && ··· 1319 1319 } 1320 1320 1321 1321 tp = first_tp; 1322 - protocol = tc_skb_protocol(skb); 1323 1322 goto reclassify; 1324 1323 #endif 1325 1324 }
+14 -5
net/sched/cls_flower.c
··· 1290 1290 struct cls_fl_head *head = rtnl_dereference(tp->root); 1291 1291 struct cls_fl_filter *fold = *arg; 1292 1292 struct cls_fl_filter *fnew; 1293 + struct fl_flow_mask *mask; 1293 1294 struct nlattr **tb; 1294 - struct fl_flow_mask mask = {}; 1295 1295 int err; 1296 1296 1297 1297 if (!tca[TCA_OPTIONS]) 1298 1298 return -EINVAL; 1299 1299 1300 - tb = kcalloc(TCA_FLOWER_MAX + 1, sizeof(struct nlattr *), GFP_KERNEL); 1301 - if (!tb) 1300 + mask = kzalloc(sizeof(struct fl_flow_mask), GFP_KERNEL); 1301 + if (!mask) 1302 1302 return -ENOBUFS; 1303 + 1304 + tb = kcalloc(TCA_FLOWER_MAX + 1, sizeof(struct nlattr *), GFP_KERNEL); 1305 + if (!tb) { 1306 + err = -ENOBUFS; 1307 + goto errout_mask_alloc; 1308 + } 1303 1309 1304 1310 err = nla_parse_nested(tb, TCA_FLOWER_MAX, tca[TCA_OPTIONS], 1305 1311 fl_policy, NULL); ··· 1349 1343 } 1350 1344 } 1351 1345 1352 - err = fl_set_parms(net, tp, fnew, &mask, base, tb, tca[TCA_RATE], ovr, 1346 + err = fl_set_parms(net, tp, fnew, mask, base, tb, tca[TCA_RATE], ovr, 1353 1347 tp->chain->tmplt_priv, extack); 1354 1348 if (err) 1355 1349 goto errout_idr; 1356 1350 1357 - err = fl_check_assign_mask(head, fnew, fold, &mask); 1351 + err = fl_check_assign_mask(head, fnew, fold, mask); 1358 1352 if (err) 1359 1353 goto errout_idr; 1360 1354 ··· 1398 1392 } 1399 1393 1400 1394 kfree(tb); 1395 + kfree(mask); 1401 1396 return 0; 1402 1397 1403 1398 errout_mask: ··· 1412 1405 kfree(fnew); 1413 1406 errout_tb: 1414 1407 kfree(tb); 1408 + errout_mask_alloc: 1409 + kfree(mask); 1415 1410 return err; 1416 1411 } 1417 1412
+3 -2
net/sched/sch_cake.c
··· 1667 1667 if (skb_is_gso(skb) && q->rate_flags & CAKE_FLAG_SPLIT_GSO) { 1668 1668 struct sk_buff *segs, *nskb; 1669 1669 netdev_features_t features = netif_skb_features(skb); 1670 - unsigned int slen = 0; 1670 + unsigned int slen = 0, numsegs = 0; 1671 1671 1672 1672 segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK); 1673 1673 if (IS_ERR_OR_NULL(segs)) ··· 1683 1683 flow_queue_add(flow, segs); 1684 1684 1685 1685 sch->q.qlen++; 1686 + numsegs++; 1686 1687 slen += segs->len; 1687 1688 q->buffer_used += segs->truesize; 1688 1689 b->packets++; ··· 1697 1696 sch->qstats.backlog += slen; 1698 1697 q->avg_window_bytes += slen; 1699 1698 1700 - qdisc_tree_reduce_backlog(sch, 1, len); 1699 + qdisc_tree_reduce_backlog(sch, 1-numsegs, len-slen); 1701 1700 consume_skb(skb); 1702 1701 } else { 1703 1702 /* not splitting */
+2 -1
net/sched/sch_cbs.c
··· 88 88 struct Qdisc *child, 89 89 struct sk_buff **to_free) 90 90 { 91 + unsigned int len = qdisc_pkt_len(skb); 91 92 int err; 92 93 93 94 err = child->ops->enqueue(skb, child, to_free); 94 95 if (err != NET_XMIT_SUCCESS) 95 96 return err; 96 97 97 - qdisc_qstats_backlog_inc(sch, skb); 98 + sch->qstats.backlog += len; 98 99 sch->q.qlen++; 99 100 100 101 return NET_XMIT_SUCCESS;
+5 -2
net/sched/sch_drr.c
··· 350 350 static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch, 351 351 struct sk_buff **to_free) 352 352 { 353 + unsigned int len = qdisc_pkt_len(skb); 353 354 struct drr_sched *q = qdisc_priv(sch); 354 355 struct drr_class *cl; 355 356 int err = 0; 357 + bool first; 356 358 357 359 cl = drr_classify(skb, sch, &err); 358 360 if (cl == NULL) { ··· 364 362 return err; 365 363 } 366 364 365 + first = !cl->qdisc->q.qlen; 367 366 err = qdisc_enqueue(skb, cl->qdisc, to_free); 368 367 if (unlikely(err != NET_XMIT_SUCCESS)) { 369 368 if (net_xmit_drop_count(err)) { ··· 374 371 return err; 375 372 } 376 373 377 - if (cl->qdisc->q.qlen == 1) { 374 + if (first) { 378 375 list_add_tail(&cl->alist, &q->active); 379 376 cl->deficit = cl->quantum; 380 377 } 381 378 382 - qdisc_qstats_backlog_inc(sch, skb); 379 + sch->qstats.backlog += len; 383 380 sch->q.qlen++; 384 381 return err; 385 382 }
+2 -1
net/sched/sch_dsmark.c
··· 199 199 static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch, 200 200 struct sk_buff **to_free) 201 201 { 202 + unsigned int len = qdisc_pkt_len(skb); 202 203 struct dsmark_qdisc_data *p = qdisc_priv(sch); 203 204 int err; 204 205 ··· 272 271 return err; 273 272 } 274 273 275 - qdisc_qstats_backlog_inc(sch, skb); 274 + sch->qstats.backlog += len; 276 275 sch->q.qlen++; 277 276 278 277 return NET_XMIT_SUCCESS;
+5 -4
net/sched/sch_hfsc.c
··· 1539 1539 static int 1540 1540 hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) 1541 1541 { 1542 + unsigned int len = qdisc_pkt_len(skb); 1542 1543 struct hfsc_class *cl; 1543 1544 int uninitialized_var(err); 1545 + bool first; 1544 1546 1545 1547 cl = hfsc_classify(skb, sch, &err); 1546 1548 if (cl == NULL) { ··· 1552 1550 return err; 1553 1551 } 1554 1552 1553 + first = !cl->qdisc->q.qlen; 1555 1554 err = qdisc_enqueue(skb, cl->qdisc, to_free); 1556 1555 if (unlikely(err != NET_XMIT_SUCCESS)) { 1557 1556 if (net_xmit_drop_count(err)) { ··· 1562 1559 return err; 1563 1560 } 1564 1561 1565 - if (cl->qdisc->q.qlen == 1) { 1566 - unsigned int len = qdisc_pkt_len(skb); 1567 - 1562 + if (first) { 1568 1563 if (cl->cl_flags & HFSC_RSC) 1569 1564 init_ed(cl, len); 1570 1565 if (cl->cl_flags & HFSC_FSC) ··· 1577 1576 1578 1577 } 1579 1578 1580 - qdisc_qstats_backlog_inc(sch, skb); 1579 + sch->qstats.backlog += len; 1581 1580 sch->q.qlen++; 1582 1581 1583 1582 return NET_XMIT_SUCCESS;
+2 -1
net/sched/sch_htb.c
··· 581 581 struct sk_buff **to_free) 582 582 { 583 583 int uninitialized_var(ret); 584 + unsigned int len = qdisc_pkt_len(skb); 584 585 struct htb_sched *q = qdisc_priv(sch); 585 586 struct htb_class *cl = htb_classify(skb, sch, &ret); 586 587 ··· 611 610 htb_activate(q, cl); 612 611 } 613 612 614 - qdisc_qstats_backlog_inc(sch, skb); 613 + sch->qstats.backlog += len; 615 614 sch->q.qlen++; 616 615 return NET_XMIT_SUCCESS; 617 616 }
+2 -1
net/sched/sch_prio.c
··· 72 72 static int 73 73 prio_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) 74 74 { 75 + unsigned int len = qdisc_pkt_len(skb); 75 76 struct Qdisc *qdisc; 76 77 int ret; 77 78 ··· 89 88 90 89 ret = qdisc_enqueue(skb, qdisc, to_free); 91 90 if (ret == NET_XMIT_SUCCESS) { 92 - qdisc_qstats_backlog_inc(sch, skb); 91 + sch->qstats.backlog += len; 93 92 sch->q.qlen++; 94 93 return NET_XMIT_SUCCESS; 95 94 }
+12 -8
net/sched/sch_qfq.c
··· 1210 1210 static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch, 1211 1211 struct sk_buff **to_free) 1212 1212 { 1213 + unsigned int len = qdisc_pkt_len(skb), gso_segs; 1213 1214 struct qfq_sched *q = qdisc_priv(sch); 1214 1215 struct qfq_class *cl; 1215 1216 struct qfq_aggregate *agg; 1216 1217 int err = 0; 1218 + bool first; 1217 1219 1218 1220 cl = qfq_classify(skb, sch, &err); 1219 1221 if (cl == NULL) { ··· 1226 1224 } 1227 1225 pr_debug("qfq_enqueue: cl = %x\n", cl->common.classid); 1228 1226 1229 - if (unlikely(cl->agg->lmax < qdisc_pkt_len(skb))) { 1227 + if (unlikely(cl->agg->lmax < len)) { 1230 1228 pr_debug("qfq: increasing maxpkt from %u to %u for class %u", 1231 - cl->agg->lmax, qdisc_pkt_len(skb), cl->common.classid); 1232 - err = qfq_change_agg(sch, cl, cl->agg->class_weight, 1233 - qdisc_pkt_len(skb)); 1229 + cl->agg->lmax, len, cl->common.classid); 1230 + err = qfq_change_agg(sch, cl, cl->agg->class_weight, len); 1234 1231 if (err) { 1235 1232 cl->qstats.drops++; 1236 1233 return qdisc_drop(skb, sch, to_free); 1237 1234 } 1238 1235 } 1239 1236 1237 + gso_segs = skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1; 1238 + first = !cl->qdisc->q.qlen; 1240 1239 err = qdisc_enqueue(skb, cl->qdisc, to_free); 1241 1240 if (unlikely(err != NET_XMIT_SUCCESS)) { 1242 1241 pr_debug("qfq_enqueue: enqueue failed %d\n", err); ··· 1248 1245 return err; 1249 1246 } 1250 1247 1251 - bstats_update(&cl->bstats, skb); 1252 - qdisc_qstats_backlog_inc(sch, skb); 1248 + cl->bstats.bytes += len; 1249 + cl->bstats.packets += gso_segs; 1250 + sch->qstats.backlog += len; 1253 1251 ++sch->q.qlen; 1254 1252 1255 1253 agg = cl->agg; 1256 1254 /* if the queue was not empty, then done here */ 1257 - if (cl->qdisc->q.qlen != 1) { 1255 + if (!first) { 1258 1256 if (unlikely(skb == cl->qdisc->ops->peek(cl->qdisc)) && 1259 1257 list_first_entry(&agg->active, struct qfq_class, alist) 1260 - == cl && cl->deficit < qdisc_pkt_len(skb)) 1258 + == cl && cl->deficit < len) 1261 1259 list_move_tail(&cl->alist, &agg->active); 1262 1260 1263 1261 return err;
+2 -1
net/sched/sch_tbf.c
··· 185 185 struct sk_buff **to_free) 186 186 { 187 187 struct tbf_sched_data *q = qdisc_priv(sch); 188 + unsigned int len = qdisc_pkt_len(skb); 188 189 int ret; 189 190 190 191 if (qdisc_pkt_len(skb) > q->max_size) { ··· 201 200 return ret; 202 201 } 203 202 204 - qdisc_qstats_backlog_inc(sch, skb); 203 + sch->qstats.backlog += len; 205 204 sch->q.qlen++; 206 205 return NET_XMIT_SUCCESS; 207 206 }
+1 -4
net/sctp/ipv6.c
··· 97 97 98 98 switch (ev) { 99 99 case NETDEV_UP: 100 - addr = kmalloc(sizeof(struct sctp_sockaddr_entry), GFP_ATOMIC); 100 + addr = kzalloc(sizeof(*addr), GFP_ATOMIC); 101 101 if (addr) { 102 102 addr->a.v6.sin6_family = AF_INET6; 103 - addr->a.v6.sin6_port = 0; 104 - addr->a.v6.sin6_flowinfo = 0; 105 103 addr->a.v6.sin6_addr = ifa->addr; 106 104 addr->a.v6.sin6_scope_id = ifa->idev->dev->ifindex; 107 105 addr->valid = 1; ··· 432 434 addr = kzalloc(sizeof(*addr), GFP_ATOMIC); 433 435 if (addr) { 434 436 addr->a.v6.sin6_family = AF_INET6; 435 - addr->a.v6.sin6_port = 0; 436 437 addr->a.v6.sin6_addr = ifp->addr; 437 438 addr->a.v6.sin6_scope_id = dev->ifindex; 438 439 addr->valid = 1;
+1 -3
net/sctp/protocol.c
··· 101 101 addr = kzalloc(sizeof(*addr), GFP_ATOMIC); 102 102 if (addr) { 103 103 addr->a.v4.sin_family = AF_INET; 104 - addr->a.v4.sin_port = 0; 105 104 addr->a.v4.sin_addr.s_addr = ifa->ifa_local; 106 105 addr->valid = 1; 107 106 INIT_LIST_HEAD(&addr->list); ··· 775 776 776 777 switch (ev) { 777 778 case NETDEV_UP: 778 - addr = kmalloc(sizeof(struct sctp_sockaddr_entry), GFP_ATOMIC); 779 + addr = kzalloc(sizeof(*addr), GFP_ATOMIC); 779 780 if (addr) { 780 781 addr->a.v4.sin_family = AF_INET; 781 - addr->a.v4.sin_port = 0; 782 782 addr->a.v4.sin_addr.s_addr = ifa->ifa_local; 783 783 addr->valid = 1; 784 784 spin_lock_bh(&net->sctp.local_addr_lock);
+3 -1
net/smc/af_smc.c
··· 146 146 sock_set_flag(sk, SOCK_DEAD); 147 147 sk->sk_shutdown |= SHUTDOWN_MASK; 148 148 } 149 + 150 + sk->sk_prot->unhash(sk); 151 + 149 152 if (smc->clcsock) { 150 153 if (smc->use_fallback && sk->sk_state == SMC_LISTEN) { 151 154 /* wake up clcsock accept */ ··· 173 170 smc_conn_free(&smc->conn); 174 171 release_sock(sk); 175 172 176 - sk->sk_prot->unhash(sk); 177 173 sock_put(sk); /* final sock_put */ 178 174 out: 179 175 return rc;
+3
net/sunrpc/auth.c
··· 41 41 42 42 static struct cred machine_cred = { 43 43 .usage = ATOMIC_INIT(1), 44 + #ifdef CONFIG_DEBUG_CREDENTIALS 45 + .magic = CRED_MAGIC, 46 + #endif 44 47 }; 45 48 46 49 /*
+9 -3
net/sunrpc/auth_gss/auth_gss.c
··· 1549 1549 cred_len = p++; 1550 1550 1551 1551 spin_lock(&ctx->gc_seq_lock); 1552 - req->rq_seqno = ctx->gc_seq++; 1552 + req->rq_seqno = (ctx->gc_seq < MAXSEQ) ? ctx->gc_seq++ : MAXSEQ; 1553 1553 spin_unlock(&ctx->gc_seq_lock); 1554 + if (req->rq_seqno == MAXSEQ) 1555 + goto out_expired; 1554 1556 1555 1557 *p++ = htonl((u32) RPC_GSS_VERSION); 1556 1558 *p++ = htonl((u32) ctx->gc_proc); ··· 1574 1572 mic.data = (u8 *)(p + 1); 1575 1573 maj_stat = gss_get_mic(ctx->gc_gss_ctx, &verf_buf, &mic); 1576 1574 if (maj_stat == GSS_S_CONTEXT_EXPIRED) { 1577 - clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags); 1575 + goto out_expired; 1578 1576 } else if (maj_stat != 0) { 1579 - printk("gss_marshal: gss_get_mic FAILED (%d)\n", maj_stat); 1577 + pr_warn("gss_marshal: gss_get_mic FAILED (%d)\n", maj_stat); 1578 + task->tk_status = -EIO; 1580 1579 goto out_put_ctx; 1581 1580 } 1582 1581 p = xdr_encode_opaque(p, NULL, mic.len); 1583 1582 gss_put_ctx(ctx); 1584 1583 return p; 1584 + out_expired: 1585 + clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags); 1586 + task->tk_status = -EKEYEXPIRED; 1585 1587 out_put_ctx: 1586 1588 gss_put_ctx(ctx); 1587 1589 return NULL;
+12 -8
net/sunrpc/clnt.c
··· 1739 1739 xdr_buf_init(&req->rq_rcv_buf, 1740 1740 req->rq_rbuffer, 1741 1741 req->rq_rcvsize); 1742 - req->rq_bytes_sent = 0; 1743 1742 1744 1743 p = rpc_encode_header(task); 1745 - if (p == NULL) { 1746 - printk(KERN_INFO "RPC: couldn't encode RPC header, exit EIO\n"); 1747 - rpc_exit(task, -EIO); 1744 + if (p == NULL) 1748 1745 return; 1749 - } 1750 1746 1751 1747 encode = task->tk_msg.rpc_proc->p_encode; 1752 1748 if (encode == NULL) ··· 1767 1771 /* Did the encode result in an error condition? */ 1768 1772 if (task->tk_status != 0) { 1769 1773 /* Was the error nonfatal? */ 1770 - if (task->tk_status == -EAGAIN || task->tk_status == -ENOMEM) 1774 + switch (task->tk_status) { 1775 + case -EAGAIN: 1776 + case -ENOMEM: 1771 1777 rpc_delay(task, HZ >> 4); 1772 - else 1778 + break; 1779 + case -EKEYEXPIRED: 1780 + task->tk_action = call_refresh; 1781 + break; 1782 + default: 1773 1783 rpc_exit(task, task->tk_status); 1784 + } 1774 1785 return; 1775 1786 } 1776 1787 ··· 2339 2336 *p++ = htonl(clnt->cl_vers); /* program version */ 2340 2337 *p++ = htonl(task->tk_msg.rpc_proc->p_proc); /* procedure */ 2341 2338 p = rpcauth_marshcred(task, p); 2342 - req->rq_slen = xdr_adjust_iovec(&req->rq_svec[0], p); 2339 + if (p) 2340 + req->rq_slen = xdr_adjust_iovec(&req->rq_svec[0], p); 2343 2341 return p; 2344 2342 } 2345 2343
+2 -1
net/sunrpc/xprt.c
··· 1151 1151 struct rpc_xprt *xprt = req->rq_xprt; 1152 1152 1153 1153 if (xprt_request_need_enqueue_transmit(task, req)) { 1154 + req->rq_bytes_sent = 0; 1154 1155 spin_lock(&xprt->queue_lock); 1155 1156 /* 1156 1157 * Requests that carry congestion control credits are added ··· 1178 1177 INIT_LIST_HEAD(&req->rq_xmit2); 1179 1178 goto out; 1180 1179 } 1181 - } else { 1180 + } else if (!req->rq_seqno) { 1182 1181 list_for_each_entry(pos, &xprt->xmit_queue, rq_xmit) { 1183 1182 if (pos->rq_task->tk_owner != task->tk_owner) 1184 1183 continue;
+4 -6
net/sunrpc/xprtrdma/verbs.c
··· 845 845 for (i = 0; i <= buf->rb_sc_last; i++) { 846 846 sc = rpcrdma_sendctx_create(&r_xprt->rx_ia); 847 847 if (!sc) 848 - goto out_destroy; 848 + return -ENOMEM; 849 849 850 850 sc->sc_xprt = r_xprt; 851 851 buf->rb_sc_ctxs[i] = sc; 852 852 } 853 853 854 854 return 0; 855 - 856 - out_destroy: 857 - rpcrdma_sendctxs_destroy(buf); 858 - return -ENOMEM; 859 855 } 860 856 861 857 /* The sendctx queue is not guaranteed to have a size that is a ··· 1109 1113 WQ_MEM_RECLAIM | WQ_HIGHPRI, 1110 1114 0, 1111 1115 r_xprt->rx_xprt.address_strings[RPC_DISPLAY_ADDR]); 1112 - if (!buf->rb_completion_wq) 1116 + if (!buf->rb_completion_wq) { 1117 + rc = -ENOMEM; 1113 1118 goto out; 1119 + } 1114 1120 1115 1121 return 0; 1116 1122 out:
+22
net/sunrpc/xprtsock.c
··· 48 48 #include <net/udp.h> 49 49 #include <net/tcp.h> 50 50 #include <linux/bvec.h> 51 + #include <linux/highmem.h> 51 52 #include <linux/uio.h> 52 53 53 54 #include <trace/events/sunrpc.h> ··· 377 376 return sock_recvmsg(sock, msg, flags); 378 377 } 379 378 379 + #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 380 + static void 381 + xs_flush_bvec(const struct bio_vec *bvec, size_t count, size_t seek) 382 + { 383 + struct bvec_iter bi = { 384 + .bi_size = count, 385 + }; 386 + struct bio_vec bv; 387 + 388 + bvec_iter_advance(bvec, &bi, seek & PAGE_MASK); 389 + for_each_bvec(bv, bvec, bi, bi) 390 + flush_dcache_page(bv.bv_page); 391 + } 392 + #else 393 + static inline void 394 + xs_flush_bvec(const struct bio_vec *bvec, size_t count, size_t seek) 395 + { 396 + } 397 + #endif 398 + 380 399 static ssize_t 381 400 xs_read_xdr_buf(struct socket *sock, struct msghdr *msg, int flags, 382 401 struct xdr_buf *buf, size_t count, size_t seek, size_t *read) ··· 430 409 seek + buf->page_base); 431 410 if (ret <= 0) 432 411 goto sock_err; 412 + xs_flush_bvec(buf->bvec, ret, seek + buf->page_base); 433 413 offset += ret - buf->page_base; 434 414 if (offset == count || msg->msg_flags & (MSG_EOR|MSG_TRUNC)) 435 415 goto out;
+1 -1
net/tipc/topsrv.c
··· 398 398 ret = sock_recvmsg(con->sock, &msg, MSG_DONTWAIT); 399 399 if (ret == -EWOULDBLOCK) 400 400 return -EWOULDBLOCK; 401 - if (ret > 0) { 401 + if (ret == sizeof(s)) { 402 402 read_lock_bh(&sk->sk_callback_lock); 403 403 ret = tipc_conn_rcv_sub(srv, con, &s); 404 404 read_unlock_bh(&sk->sk_callback_lock);
+13 -3
net/xdp/xdp_umem.c
··· 41 41 * not know if the device has more tx queues than rx, or the opposite. 42 42 * This might also change during run time. 43 43 */ 44 - static void xdp_reg_umem_at_qid(struct net_device *dev, struct xdp_umem *umem, 45 - u16 queue_id) 44 + static int xdp_reg_umem_at_qid(struct net_device *dev, struct xdp_umem *umem, 45 + u16 queue_id) 46 46 { 47 + if (queue_id >= max_t(unsigned int, 48 + dev->real_num_rx_queues, 49 + dev->real_num_tx_queues)) 50 + return -EINVAL; 51 + 47 52 if (queue_id < dev->real_num_rx_queues) 48 53 dev->_rx[queue_id].umem = umem; 49 54 if (queue_id < dev->real_num_tx_queues) 50 55 dev->_tx[queue_id].umem = umem; 56 + 57 + return 0; 51 58 } 52 59 53 60 struct xdp_umem *xdp_get_umem_from_qid(struct net_device *dev, ··· 95 88 goto out_rtnl_unlock; 96 89 } 97 90 98 - xdp_reg_umem_at_qid(dev, umem, queue_id); 91 + err = xdp_reg_umem_at_qid(dev, umem, queue_id); 92 + if (err) 93 + goto out_rtnl_unlock; 94 + 99 95 umem->dev = dev; 100 96 umem->queue_id = queue_id; 101 97 if (force_copy)
+1
samples/bpf/Makefile
··· 279 279 -Wno-gnu-variable-sized-type-not-at-end \ 280 280 -Wno-address-of-packed-member -Wno-tautological-compare \ 281 281 -Wno-unknown-warning-option $(CLANG_ARCH_ARGS) \ 282 + -I$(srctree)/samples/bpf/ -include asm_goto_workaround.h \ 282 283 -O2 -emit-llvm -c $< -o -| $(LLC) -march=bpf $(LLC_FLAGS) -filetype=obj -o $@ 283 284 ifeq ($(DWARF2BTF),y) 284 285 $(BTF_PAHOLE) -J $@
+16
samples/bpf/asm_goto_workaround.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* Copyright (c) 2019 Facebook */ 3 + #ifndef __ASM_GOTO_WORKAROUND_H 4 + #define __ASM_GOTO_WORKAROUND_H 5 + 6 + /* this will bring in asm_volatile_goto macro definition 7 + * if enabled by compiler and config options. 8 + */ 9 + #include <linux/types.h> 10 + 11 + #ifdef asm_volatile_goto 12 + #undef asm_volatile_goto 13 + #define asm_volatile_goto(x...) asm volatile("invalid use of asm_volatile_goto") 14 + #endif 15 + 16 + #endif
+7 -7
samples/bpf/test_cgrp2_attach2.c
··· 77 77 78 78 /* Create cgroup /foo, get fd, and join it */ 79 79 foo = create_and_get_cgroup(FOO); 80 - if (!foo) 80 + if (foo < 0) 81 81 goto err; 82 82 83 83 if (join_cgroup(FOO)) ··· 94 94 95 95 /* Create cgroup /foo/bar, get fd, and join it */ 96 96 bar = create_and_get_cgroup(BAR); 97 - if (!bar) 97 + if (bar < 0) 98 98 goto err; 99 99 100 100 if (join_cgroup(BAR)) ··· 298 298 goto err; 299 299 300 300 cg1 = create_and_get_cgroup("/cg1"); 301 - if (!cg1) 301 + if (cg1 < 0) 302 302 goto err; 303 303 cg2 = create_and_get_cgroup("/cg1/cg2"); 304 - if (!cg2) 304 + if (cg2 < 0) 305 305 goto err; 306 306 cg3 = create_and_get_cgroup("/cg1/cg2/cg3"); 307 - if (!cg3) 307 + if (cg3 < 0) 308 308 goto err; 309 309 cg4 = create_and_get_cgroup("/cg1/cg2/cg3/cg4"); 310 - if (!cg4) 310 + if (cg4 < 0) 311 311 goto err; 312 312 cg5 = create_and_get_cgroup("/cg1/cg2/cg3/cg4/cg5"); 313 - if (!cg5) 313 + if (cg5 < 0) 314 314 goto err; 315 315 316 316 if (join_cgroup("/cg1/cg2/cg3/cg4/cg5"))
+1 -1
samples/bpf/test_current_task_under_cgroup_user.c
··· 32 32 33 33 cg2 = create_and_get_cgroup(CGROUP_PATH); 34 34 35 - if (!cg2) 35 + if (cg2 < 0) 36 36 goto err; 37 37 38 38 if (bpf_map_update_elem(map_fd[0], &idx, &cg2, BPF_ANY)) {
+1 -1
samples/bpf/xdp1_user.c
··· 103 103 return 1; 104 104 } 105 105 106 - ifindex = if_nametoindex(argv[1]); 106 + ifindex = if_nametoindex(argv[optind]); 107 107 if (!ifindex) { 108 108 perror("if_nametoindex"); 109 109 return 1;
+1
samples/seccomp/Makefile
··· 34 34 HOSTCFLAGS_dropper.o += $(MFLAG) 35 35 HOSTCFLAGS_bpf-helper.o += $(MFLAG) 36 36 HOSTCFLAGS_bpf-fancy.o += $(MFLAG) 37 + HOSTCFLAGS_user-trap.o += $(MFLAG) 37 38 HOSTLDLIBS_bpf-direct += $(MFLAG) 38 39 HOSTLDLIBS_bpf-fancy += $(MFLAG) 39 40 HOSTLDLIBS_dropper += $(MFLAG)
-4
scripts/Kbuild.include
··· 24 24 basetarget = $(basename $(notdir $@)) 25 25 26 26 ### 27 - # filename of first prerequisite with directory and extension stripped 28 - baseprereq = $(basename $(notdir $<)) 29 - 30 - ### 31 27 # Escape single quote for use in echo statements 32 28 escsq = $(subst $(squote),'\$(squote)',$1) 33 29
+4 -4
scripts/coccinelle/api/alloc/alloc_cast.cocci
··· 32 32 (T *) 33 33 \(kmalloc\|kzalloc\|kcalloc\|kmem_cache_alloc\|kmem_cache_zalloc\| 34 34 kmem_cache_alloc_node\|kmalloc_node\|kzalloc_node\|vmalloc\|vzalloc\| 35 - dma_alloc_coherent\|dma_zalloc_coherent\|devm_kmalloc\|devm_kzalloc\| 35 + dma_alloc_coherent\|devm_kmalloc\|devm_kzalloc\| 36 36 kvmalloc\|kvzalloc\|kvmalloc_node\|kvzalloc_node\|pci_alloc_consistent\| 37 37 pci_zalloc_consistent\|kmem_alloc\|kmem_zalloc\|kmem_zone_alloc\| 38 38 kmem_zone_zalloc\|vmalloc_node\|vzalloc_node\)(...) ··· 55 55 * (T *) 56 56 \(kmalloc\|kzalloc\|kcalloc\|kmem_cache_alloc\|kmem_cache_zalloc\| 57 57 kmem_cache_alloc_node\|kmalloc_node\|kzalloc_node\|vmalloc\|vzalloc\| 58 - dma_alloc_coherent\|dma_zalloc_coherent\|devm_kmalloc\|devm_kzalloc\| 58 + dma_alloc_coherent\|devm_kmalloc\|devm_kzalloc\| 59 59 kvmalloc\|kvzalloc\|kvmalloc_node\|kvzalloc_node\|pci_alloc_consistent\| 60 60 pci_zalloc_consistent\|kmem_alloc\|kmem_zalloc\|kmem_zone_alloc\| 61 61 kmem_zone_zalloc\|vmalloc_node\|vzalloc_node\)(...) ··· 78 78 - (T *) 79 79 \(kmalloc\|kzalloc\|kcalloc\|kmem_cache_alloc\|kmem_cache_zalloc\| 80 80 kmem_cache_alloc_node\|kmalloc_node\|kzalloc_node\|vmalloc\|vzalloc\| 81 - dma_alloc_coherent\|dma_zalloc_coherent\|devm_kmalloc\|devm_kzalloc\| 81 + dma_alloc_coherent\|devm_kmalloc\|devm_kzalloc\| 82 82 kvmalloc\|kvzalloc\|kvmalloc_node\|kvzalloc_node\|pci_alloc_consistent\| 83 83 pci_zalloc_consistent\|kmem_alloc\|kmem_zalloc\|kmem_zone_alloc\| 84 84 kmem_zone_zalloc\|vmalloc_node\|vzalloc_node\)(...) ··· 95 95 (T@p *) 96 96 \(kmalloc\|kzalloc\|kcalloc\|kmem_cache_alloc\|kmem_cache_zalloc\| 97 97 kmem_cache_alloc_node\|kmalloc_node\|kzalloc_node\|vmalloc\|vzalloc\| 98 - dma_alloc_coherent\|dma_zalloc_coherent\|devm_kmalloc\|devm_kzalloc\| 98 + dma_alloc_coherent\|devm_kmalloc\|devm_kzalloc\| 99 99 kvmalloc\|kvzalloc\|kvmalloc_node\|kvzalloc_node\|pci_alloc_consistent\| 100 100 pci_zalloc_consistent\|kmem_alloc\|kmem_zalloc\|kmem_zone_alloc\| 101 101 kmem_zone_zalloc\|vmalloc_node\|vzalloc_node\)(...)
+1 -10
scripts/coccinelle/api/alloc/zalloc-simple.cocci
··· 69 69 - x = (T)vmalloc(E1); 70 70 + x = (T)vzalloc(E1); 71 71 | 72 - - x = dma_alloc_coherent(E2,E1,E3,E4); 73 - + x = dma_zalloc_coherent(E2,E1,E3,E4); 74 - | 75 - - x = (T *)dma_alloc_coherent(E2,E1,E3,E4); 76 - + x = dma_zalloc_coherent(E2,E1,E3,E4); 77 - | 78 - - x = (T)dma_alloc_coherent(E2,E1,E3,E4); 79 - + x = (T)dma_zalloc_coherent(E2,E1,E3,E4); 80 - | 81 72 - x = kmalloc_node(E1,E2,E3); 82 73 + x = kzalloc_node(E1,E2,E3); 83 74 | ··· 216 225 x << r2.x; 217 226 @@ 218 227 219 - msg="WARNING: dma_zalloc_coherent should be used for %s, instead of dma_alloc_coherent/memset" % (x) 228 + msg="WARNING: dma_alloc_coherent use in %s already zeroes out memory, so memset is not needed" % (x) 220 229 coccilib.report.print_report(p[0], msg) 221 230 222 231 //-----------------------------------------------------------------
+21 -2
scripts/gcc-plugins/arm_ssp_per_task_plugin.c
··· 13 13 for (insn = get_insns(); insn; insn = NEXT_INSN(insn)) { 14 14 const char *sym; 15 15 rtx body; 16 - rtx masked_sp; 16 + rtx mask, masked_sp; 17 17 18 18 /* 19 19 * Find a SET insn involving a SYMBOL_REF to __stack_chk_guard ··· 33 33 * produces the address of the copy of the stack canary value 34 34 * stored in struct thread_info 35 35 */ 36 + mask = GEN_INT(sext_hwi(sp_mask, GET_MODE_PRECISION(Pmode))); 36 37 masked_sp = gen_reg_rtx(Pmode); 37 38 38 39 emit_insn_before(gen_rtx_SET(masked_sp, 39 40 gen_rtx_AND(Pmode, 40 41 stack_pointer_rtx, 41 - GEN_INT(sp_mask))), 42 + mask)), 42 43 insn); 43 44 44 45 SET_SRC(body) = gen_rtx_PLUS(Pmode, masked_sp, ··· 52 51 53 52 #define NO_GATE 54 53 #include "gcc-generate-rtl-pass.h" 54 + 55 + #if BUILDING_GCC_VERSION >= 9000 56 + static bool no(void) 57 + { 58 + return false; 59 + } 60 + 61 + static void arm_pertask_ssp_start_unit(void *gcc_data, void *user_data) 62 + { 63 + targetm.have_stack_protect_combined_set = no; 64 + targetm.have_stack_protect_combined_test = no; 65 + } 66 + #endif 55 67 56 68 __visible int plugin_init(struct plugin_name_args *plugin_info, 57 69 struct plugin_gcc_version *version) ··· 112 98 113 99 register_callback(plugin_info->base_name, PLUGIN_PASS_MANAGER_SETUP, 114 100 NULL, &arm_pertask_ssp_rtl_pass_info); 101 + 102 + #if BUILDING_GCC_VERSION >= 9000 103 + register_callback(plugin_info->base_name, PLUGIN_START_UNIT, 104 + arm_pertask_ssp_start_unit, NULL); 105 + #endif 115 106 116 107 return 0; 117 108 }
+1 -1
scripts/kconfig/Makefile
··· 206 206 $(obj)/%conf-cfg: $(src)/%conf-cfg.sh FORCE 207 207 $(call filechk,conf_cfg) 208 208 209 - clean-files += conf-cfg 209 + clean-files += *conf-cfg
+1 -1
scripts/mod/modpost.c
··· 2185 2185 /* Cannot check for assembler */ 2186 2186 static void add_retpoline(struct buffer *b) 2187 2187 { 2188 - buf_printf(b, "\n#ifdef RETPOLINE\n"); 2188 + buf_printf(b, "\n#ifdef CONFIG_RETPOLINE\n"); 2189 2189 buf_printf(b, "MODULE_INFO(retpoline, \"Y\");\n"); 2190 2190 buf_printf(b, "#endif\n"); 2191 2191 }
+7
security/security.c
··· 1027 1027 1028 1028 void security_cred_free(struct cred *cred) 1029 1029 { 1030 + /* 1031 + * There is a failure case in prepare_creds() that 1032 + * may result in a call here with ->security being NULL. 1033 + */ 1034 + if (unlikely(cred->security == NULL)) 1035 + return; 1036 + 1030 1037 call_void_hook(cred_free, cred); 1031 1038 } 1032 1039
+2 -1
security/selinux/ss/policydb.c
··· 732 732 kfree(key); 733 733 if (datum) { 734 734 levdatum = datum; 735 - ebitmap_destroy(&levdatum->level->cat); 735 + if (levdatum->level) 736 + ebitmap_destroy(&levdatum->level->cat); 736 737 kfree(levdatum->level); 737 738 } 738 739 kfree(datum);
+3 -1
security/yama/yama_lsm.c
··· 368 368 break; 369 369 case YAMA_SCOPE_RELATIONAL: 370 370 rcu_read_lock(); 371 - if (!task_is_descendant(current, child) && 371 + if (!pid_alive(child)) 372 + rc = -EPERM; 373 + if (!rc && !task_is_descendant(current, child) && 372 374 !ptracer_exception_found(current, child) && 373 375 !ns_capable(__task_cred(child)->user_ns, CAP_SYS_PTRACE)) 374 376 rc = -EPERM;
+2 -2
sound/aoa/soundbus/i2sbus/core.c
··· 47 47 /* We use the PCI APIs for now until the generic one gets fixed 48 48 * enough or until we get some macio-specific versions 49 49 */ 50 - r->space = dma_zalloc_coherent(&macio_get_pci_dev(i2sdev->macio)->dev, 51 - r->size, &r->bus_addr, GFP_KERNEL); 50 + r->space = dma_alloc_coherent(&macio_get_pci_dev(i2sdev->macio)->dev, 51 + r->size, &r->bus_addr, GFP_KERNEL); 52 52 if (!r->space) 53 53 return -ENOMEM; 54 54
+3
sound/pci/cs46xx/dsp_spos.c
··· 903 903 struct dsp_spos_instance * ins = chip->dsp_spos_instance; 904 904 int i; 905 905 906 + if (!ins) 907 + return 0; 908 + 906 909 snd_info_free_entry(ins->proc_sym_info_entry); 907 910 ins->proc_sym_info_entry = NULL; 908 911
+17 -1
sound/pci/hda/patch_realtek.c
··· 4102 4102 case 0x10ec0295: 4103 4103 case 0x10ec0289: 4104 4104 case 0x10ec0299: 4105 + alc_process_coef_fw(codec, alc225_pre_hsmode); 4105 4106 alc_process_coef_fw(codec, coef0225); 4106 4107 break; 4107 4108 case 0x10ec0867: ··· 5441 5440 } 5442 5441 } 5443 5442 5443 + static void alc_fixup_disable_mic_vref(struct hda_codec *codec, 5444 + const struct hda_fixup *fix, int action) 5445 + { 5446 + if (action == HDA_FIXUP_ACT_PRE_PROBE) 5447 + snd_hda_codec_set_pin_target(codec, 0x19, PIN_VREFHIZ); 5448 + } 5449 + 5444 5450 /* for hda_fixup_thinkpad_acpi() */ 5445 5451 #include "thinkpad_helper.c" 5446 5452 ··· 5557 5549 ALC293_FIXUP_LENOVO_SPK_NOISE, 5558 5550 ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY, 5559 5551 ALC255_FIXUP_DELL_SPK_NOISE, 5552 + ALC225_FIXUP_DISABLE_MIC_VREF, 5560 5553 ALC225_FIXUP_DELL1_MIC_NO_PRESENCE, 5561 5554 ALC295_FIXUP_DISABLE_DAC3, 5562 5555 ALC280_FIXUP_HP_HEADSET_MIC, ··· 6277 6268 .chained = true, 6278 6269 .chain_id = ALC255_FIXUP_DELL1_MIC_NO_PRESENCE 6279 6270 }, 6271 + [ALC225_FIXUP_DISABLE_MIC_VREF] = { 6272 + .type = HDA_FIXUP_FUNC, 6273 + .v.func = alc_fixup_disable_mic_vref, 6274 + .chained = true, 6275 + .chain_id = ALC269_FIXUP_DELL1_MIC_NO_PRESENCE 6276 + }, 6280 6277 [ALC225_FIXUP_DELL1_MIC_NO_PRESENCE] = { 6281 6278 .type = HDA_FIXUP_VERBS, 6282 6279 .v.verbs = (const struct hda_verb[]) { ··· 6292 6277 {} 6293 6278 }, 6294 6279 .chained = true, 6295 - .chain_id = ALC269_FIXUP_DELL1_MIC_NO_PRESENCE 6280 + .chain_id = ALC225_FIXUP_DISABLE_MIC_VREF 6296 6281 }, 6297 6282 [ALC280_FIXUP_HP_HEADSET_MIC] = { 6298 6283 .type = HDA_FIXUP_FUNC, ··· 6599 6584 SND_PCI_QUIRK(0x1028, 0x0871, "Dell Precision 3630", ALC255_FIXUP_DELL_HEADSET_MIC), 6600 6585 SND_PCI_QUIRK(0x1028, 0x0872, "Dell Precision 3630", ALC255_FIXUP_DELL_HEADSET_MIC), 6601 6586 SND_PCI_QUIRK(0x1028, 0x0873, "Dell Precision 3930", ALC255_FIXUP_DUMMY_LINEOUT_VERB), 6587 + SND_PCI_QUIRK(0x1028, 0x0935, "Dell", ALC274_FIXUP_DELL_AIO_LINEOUT_VERB), 6602 6588 SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), 6603 6589 SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), 6604 6590 SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2),
+2 -2
sound/sparc/dbri.c
··· 2541 2541 dbri->op = op; 2542 2542 dbri->irq = irq; 2543 2543 2544 - dbri->dma = dma_zalloc_coherent(&op->dev, sizeof(struct dbri_dma), 2545 - &dbri->dma_dvma, GFP_KERNEL); 2544 + dbri->dma = dma_alloc_coherent(&op->dev, sizeof(struct dbri_dma), 2545 + &dbri->dma_dvma, GFP_KERNEL); 2546 2546 if (!dbri->dma) 2547 2547 return -ENOMEM; 2548 2548
+1 -1
sound/usb/card.c
··· 246 246 h1 = snd_usb_find_csint_desc(host_iface->extra, 247 247 host_iface->extralen, 248 248 NULL, UAC_HEADER); 249 - if (!h1) { 249 + if (!h1 || h1->bLength < sizeof(*h1)) { 250 250 dev_err(&dev->dev, "cannot find UAC_HEADER\n"); 251 251 return -EINVAL; 252 252 }
+23 -6
sound/usb/mixer.c
··· 753 753 struct uac_mixer_unit_descriptor *desc) 754 754 { 755 755 int mu_channels; 756 + void *c; 756 757 757 - if (desc->bLength < 11) 758 + if (desc->bLength < sizeof(*desc)) 758 759 return -EINVAL; 759 760 if (!desc->bNrInPins) 760 761 return -EINVAL; ··· 764 763 case UAC_VERSION_1: 765 764 case UAC_VERSION_2: 766 765 default: 766 + if (desc->bLength < sizeof(*desc) + desc->bNrInPins + 1) 767 + return 0; /* no bmControls -> skip */ 767 768 mu_channels = uac_mixer_unit_bNrChannels(desc); 768 769 break; 769 770 case UAC_VERSION_3: ··· 775 772 } 776 773 777 774 if (!mu_channels) 778 - return -EINVAL; 775 + return 0; 776 + 777 + c = uac_mixer_unit_bmControls(desc, state->mixer->protocol); 778 + if (c - (void *)desc + (mu_channels - 1) / 8 >= desc->bLength) 779 + return 0; /* no bmControls -> skip */ 779 780 780 781 return mu_channels; 781 782 } ··· 951 944 struct uac_mixer_unit_descriptor *d = p1; 952 945 953 946 err = uac_mixer_unit_get_channels(state, d); 954 - if (err < 0) 947 + if (err <= 0) 955 948 return err; 956 949 957 950 term->channels = err; ··· 2075 2068 2076 2069 if (state->mixer->protocol == UAC_VERSION_2) { 2077 2070 struct uac2_input_terminal_descriptor *d_v2 = raw_desc; 2071 + if (d_v2->bLength < sizeof(*d_v2)) 2072 + return -EINVAL; 2078 2073 control = UAC2_TE_CONNECTOR; 2079 2074 term_id = d_v2->bTerminalID; 2080 2075 bmctls = le16_to_cpu(d_v2->bmControls); 2081 2076 } else if (state->mixer->protocol == UAC_VERSION_3) { 2082 2077 struct uac3_input_terminal_descriptor *d_v3 = raw_desc; 2078 + if (d_v3->bLength < sizeof(*d_v3)) 2079 + return -EINVAL; 2083 2080 control = UAC3_TE_INSERTION; 2084 2081 term_id = d_v3->bTerminalID; 2085 2082 bmctls = le32_to_cpu(d_v3->bmControls); ··· 2129 2118 if (err < 0) 2130 2119 continue; 2131 2120 /* no bmControls field (e.g. Maya44) -> ignore */ 2132 - if (desc->bLength <= 10 + input_pins) 2121 + if (!num_outs) 2133 2122 continue; 2134 2123 err = check_input_term(state, desc->baSourceID[pin], &iterm); 2135 2124 if (err < 0) ··· 2325 2314 char *name) 2326 2315 { 2327 2316 struct uac_processing_unit_descriptor *desc = raw_desc; 2328 - int num_ins = desc->bNrInPins; 2317 + int num_ins; 2329 2318 struct usb_mixer_elem_info *cval; 2330 2319 struct snd_kcontrol *kctl; 2331 2320 int i, err, nameid, type, len; ··· 2340 2329 0, NULL, default_value_info 2341 2330 }; 2342 2331 2343 - if (desc->bLength < 13 || desc->bLength < 13 + num_ins || 2332 + if (desc->bLength < 13) { 2333 + usb_audio_err(state->chip, "invalid %s descriptor (id %d)\n", name, unitid); 2334 + return -EINVAL; 2335 + } 2336 + 2337 + num_ins = desc->bNrInPins; 2338 + if (desc->bLength < 13 + num_ins || 2344 2339 desc->bLength < num_ins + uac_processing_unit_bControlSize(desc, state->mixer->protocol)) { 2345 2340 usb_audio_err(state->chip, "invalid %s descriptor (id %d)\n", name, unitid); 2346 2341 return -EINVAL;
+6
sound/usb/quirks-table.h
··· 3326 3326 } 3327 3327 } 3328 3328 }, 3329 + { 3330 + .ifnum = -1 3331 + }, 3329 3332 } 3330 3333 } 3331 3334 }, ··· 3371 3368 48000 3372 3369 } 3373 3370 } 3371 + }, 3372 + { 3373 + .ifnum = -1 3374 3374 }, 3375 3375 } 3376 3376 }
+1 -1
sound/usb/quirks.c
··· 768 768 * REG1: PLL binary search enable, soft mute enable. 769 769 */ 770 770 CM6206_REG1_PLLBIN_EN | 771 - CM6206_REG1_SOFT_MUTE_EN | 771 + CM6206_REG1_SOFT_MUTE_EN, 772 772 /* 773 773 * REG2: enable output drivers, 774 774 * select front channels to the headphone output,
+25 -11
sound/usb/stream.c
··· 596 596 csep = snd_usb_find_desc(alts->extra, alts->extralen, NULL, USB_DT_CS_ENDPOINT); 597 597 598 598 if (!csep || csep->bLength < 7 || 599 - csep->bDescriptorSubtype != UAC_EP_GENERAL) { 600 - usb_audio_warn(chip, 601 - "%u:%d : no or invalid class specific endpoint descriptor\n", 602 - iface_no, altsd->bAlternateSetting); 603 - return 0; 604 - } 599 + csep->bDescriptorSubtype != UAC_EP_GENERAL) 600 + goto error; 605 601 606 602 if (protocol == UAC_VERSION_1) { 607 603 attributes = csep->bmAttributes; ··· 605 609 struct uac2_iso_endpoint_descriptor *csep2 = 606 610 (struct uac2_iso_endpoint_descriptor *) csep; 607 611 612 + if (csep2->bLength < sizeof(*csep2)) 613 + goto error; 608 614 attributes = csep->bmAttributes & UAC_EP_CS_ATTR_FILL_MAX; 609 615 610 616 /* emulate the endpoint attributes of a v1 device */ ··· 616 618 struct uac3_iso_endpoint_descriptor *csep3 = 617 619 (struct uac3_iso_endpoint_descriptor *) csep; 618 620 621 + if (csep3->bLength < sizeof(*csep3)) 622 + goto error; 619 623 /* emulate the endpoint attributes of a v1 device */ 620 624 if (le32_to_cpu(csep3->bmControls) & UAC2_CONTROL_PITCH) 621 625 attributes |= UAC_EP_CS_ATTR_PITCH_CONTROL; 622 626 } 623 627 624 628 return attributes; 629 + 630 + error: 631 + usb_audio_warn(chip, 632 + "%u:%d : no or invalid class specific endpoint descriptor\n", 633 + iface_no, altsd->bAlternateSetting); 634 + return 0; 625 635 } 626 636 627 637 /* find an input terminal descriptor (either UAC1 or UAC2) with the given ··· 637 631 */ 638 632 static void * 639 633 snd_usb_find_input_terminal_descriptor(struct usb_host_interface *ctrl_iface, 640 - int terminal_id) 634 + int terminal_id, bool uac23) 641 635 { 642 636 struct uac2_input_terminal_descriptor *term = NULL; 637 + size_t minlen = uac23 ? sizeof(struct uac2_input_terminal_descriptor) : 638 + sizeof(struct uac_input_terminal_descriptor); 643 639 644 640 while ((term = snd_usb_find_csint_desc(ctrl_iface->extra, 645 641 ctrl_iface->extralen, 646 642 term, UAC_INPUT_TERMINAL))) { 643 + if (term->bLength < minlen) 644 + continue; 647 645 if (term->bTerminalID == terminal_id) 648 646 return term; 649 647 } ··· 665 655 while ((term = snd_usb_find_csint_desc(ctrl_iface->extra, 666 656 ctrl_iface->extralen, 667 657 term, UAC_OUTPUT_TERMINAL))) { 668 - if (term->bTerminalID == terminal_id) 658 + if (term->bLength >= sizeof(*term) && 659 + term->bTerminalID == terminal_id) 669 660 return term; 670 661 } 671 662 ··· 740 729 format = le16_to_cpu(as->wFormatTag); /* remember the format value */ 741 730 742 731 iterm = snd_usb_find_input_terminal_descriptor(chip->ctrl_intf, 743 - as->bTerminalLink); 732 + as->bTerminalLink, 733 + false); 744 734 if (iterm) { 745 735 num_channels = iterm->bNrChannels; 746 736 chconfig = le16_to_cpu(iterm->wChannelConfig); ··· 776 764 * to extract the clock 777 765 */ 778 766 input_term = snd_usb_find_input_terminal_descriptor(chip->ctrl_intf, 779 - as->bTerminalLink); 767 + as->bTerminalLink, 768 + true); 780 769 if (input_term) { 781 770 clock = input_term->bCSourceID; 782 771 if (!chconfig && (num_channels == input_term->bNrChannels)) ··· 1011 998 * to extract the clock 1012 999 */ 1013 1000 input_term = snd_usb_find_input_terminal_descriptor(chip->ctrl_intf, 1014 - as->bTerminalLink); 1001 + as->bTerminalLink, 1002 + true); 1015 1003 if (input_term) { 1016 1004 clock = input_term->bCSourceID; 1017 1005 goto found_clock;
+1
tools/arch/powerpc/include/uapi/asm/perf_regs.h
··· 47 47 PERF_REG_POWERPC_DAR, 48 48 PERF_REG_POWERPC_DSISR, 49 49 PERF_REG_POWERPC_SIER, 50 + PERF_REG_POWERPC_MMCRA, 50 51 PERF_REG_POWERPC_MAX, 51 52 }; 52 53 #endif /* _UAPI_ASM_POWERPC_PERF_REGS_H */
-404
tools/arch/powerpc/include/uapi/asm/unistd.h
··· 1 - /* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */ 2 - /* 3 - * This file contains the system call numbers. 4 - * 5 - * This program is free software; you can redistribute it and/or 6 - * modify it under the terms of the GNU General Public License 7 - * as published by the Free Software Foundation; either version 8 - * 2 of the License, or (at your option) any later version. 9 - */ 10 - #ifndef _UAPI_ASM_POWERPC_UNISTD_H_ 11 - #define _UAPI_ASM_POWERPC_UNISTD_H_ 12 - 13 - 14 - #define __NR_restart_syscall 0 15 - #define __NR_exit 1 16 - #define __NR_fork 2 17 - #define __NR_read 3 18 - #define __NR_write 4 19 - #define __NR_open 5 20 - #define __NR_close 6 21 - #define __NR_waitpid 7 22 - #define __NR_creat 8 23 - #define __NR_link 9 24 - #define __NR_unlink 10 25 - #define __NR_execve 11 26 - #define __NR_chdir 12 27 - #define __NR_time 13 28 - #define __NR_mknod 14 29 - #define __NR_chmod 15 30 - #define __NR_lchown 16 31 - #define __NR_break 17 32 - #define __NR_oldstat 18 33 - #define __NR_lseek 19 34 - #define __NR_getpid 20 35 - #define __NR_mount 21 36 - #define __NR_umount 22 37 - #define __NR_setuid 23 38 - #define __NR_getuid 24 39 - #define __NR_stime 25 40 - #define __NR_ptrace 26 41 - #define __NR_alarm 27 42 - #define __NR_oldfstat 28 43 - #define __NR_pause 29 44 - #define __NR_utime 30 45 - #define __NR_stty 31 46 - #define __NR_gtty 32 47 - #define __NR_access 33 48 - #define __NR_nice 34 49 - #define __NR_ftime 35 50 - #define __NR_sync 36 51 - #define __NR_kill 37 52 - #define __NR_rename 38 53 - #define __NR_mkdir 39 54 - #define __NR_rmdir 40 55 - #define __NR_dup 41 56 - #define __NR_pipe 42 57 - #define __NR_times 43 58 - #define __NR_prof 44 59 - #define __NR_brk 45 60 - #define __NR_setgid 46 61 - #define __NR_getgid 47 62 - #define __NR_signal 48 63 - #define __NR_geteuid 49 64 - #define __NR_getegid 50 65 - #define __NR_acct 51 66 - #define __NR_umount2 52 67 - #define __NR_lock 53 68 - #define __NR_ioctl 54 69 - #define __NR_fcntl 55 70 - #define __NR_mpx 56 71 - #define __NR_setpgid 57 72 - #define __NR_ulimit 58 73 - #define __NR_oldolduname 59 74 - #define __NR_umask 60 75 - #define __NR_chroot 61 76 - #define __NR_ustat 62 77 - #define __NR_dup2 63 78 - #define __NR_getppid 64 79 - #define __NR_getpgrp 65 80 - #define __NR_setsid 66 81 - #define __NR_sigaction 67 82 - #define __NR_sgetmask 68 83 - #define __NR_ssetmask 69 84 - #define __NR_setreuid 70 85 - #define __NR_setregid 71 86 - #define __NR_sigsuspend 72 87 - #define __NR_sigpending 73 88 - #define __NR_sethostname 74 89 - #define __NR_setrlimit 75 90 - #define __NR_getrlimit 76 91 - #define __NR_getrusage 77 92 - #define __NR_gettimeofday 78 93 - #define __NR_settimeofday 79 94 - #define __NR_getgroups 80 95 - #define __NR_setgroups 81 96 - #define __NR_select 82 97 - #define __NR_symlink 83 98 - #define __NR_oldlstat 84 99 - #define __NR_readlink 85 100 - #define __NR_uselib 86 101 - #define __NR_swapon 87 102 - #define __NR_reboot 88 103 - #define __NR_readdir 89 104 - #define __NR_mmap 90 105 - #define __NR_munmap 91 106 - #define __NR_truncate 92 107 - #define __NR_ftruncate 93 108 - #define __NR_fchmod 94 109 - #define __NR_fchown 95 110 - #define __NR_getpriority 96 111 - #define __NR_setpriority 97 112 - #define __NR_profil 98 113 - #define __NR_statfs 99 114 - #define __NR_fstatfs 100 115 - #define __NR_ioperm 101 116 - #define __NR_socketcall 102 117 - #define __NR_syslog 103 118 - #define __NR_setitimer 104 119 - #define __NR_getitimer 105 120 - #define __NR_stat 106 121 - #define __NR_lstat 107 122 - #define __NR_fstat 108 123 - #define __NR_olduname 109 124 - #define __NR_iopl 110 125 - #define __NR_vhangup 111 126 - #define __NR_idle 112 127 - #define __NR_vm86 113 128 - #define __NR_wait4 114 129 - #define __NR_swapoff 115 130 - #define __NR_sysinfo 116 131 - #define __NR_ipc 117 132 - #define __NR_fsync 118 133 - #define __NR_sigreturn 119 134 - #define __NR_clone 120 135 - #define __NR_setdomainname 121 136 - #define __NR_uname 122 137 - #define __NR_modify_ldt 123 138 - #define __NR_adjtimex 124 139 - #define __NR_mprotect 125 140 - #define __NR_sigprocmask 126 141 - #define __NR_create_module 127 142 - #define __NR_init_module 128 143 - #define __NR_delete_module 129 144 - #define __NR_get_kernel_syms 130 145 - #define __NR_quotactl 131 146 - #define __NR_getpgid 132 147 - #define __NR_fchdir 133 148 - #define __NR_bdflush 134 149 - #define __NR_sysfs 135 150 - #define __NR_personality 136 151 - #define __NR_afs_syscall 137 /* Syscall for Andrew File System */ 152 - #define __NR_setfsuid 138 153 - #define __NR_setfsgid 139 154 - #define __NR__llseek 140 155 - #define __NR_getdents 141 156 - #define __NR__newselect 142 157 - #define __NR_flock 143 158 - #define __NR_msync 144 159 - #define __NR_readv 145 160 - #define __NR_writev 146 161 - #define __NR_getsid 147 162 - #define __NR_fdatasync 148 163 - #define __NR__sysctl 149 164 - #define __NR_mlock 150 165 - #define __NR_munlock 151 166 - #define __NR_mlockall 152 167 - #define __NR_munlockall 153 168 - #define __NR_sched_setparam 154 169 - #define __NR_sched_getparam 155 170 - #define __NR_sched_setscheduler 156 171 - #define __NR_sched_getscheduler 157 172 - #define __NR_sched_yield 158 173 - #define __NR_sched_get_priority_max 159 174 - #define __NR_sched_get_priority_min 160 175 - #define __NR_sched_rr_get_interval 161 176 - #define __NR_nanosleep 162 177 - #define __NR_mremap 163 178 - #define __NR_setresuid 164 179 - #define __NR_getresuid 165 180 - #define __NR_query_module 166 181 - #define __NR_poll 167 182 - #define __NR_nfsservctl 168 183 - #define __NR_setresgid 169 184 - #define __NR_getresgid 170 185 - #define __NR_prctl 171 186 - #define __NR_rt_sigreturn 172 187 - #define __NR_rt_sigaction 173 188 - #define __NR_rt_sigprocmask 174 189 - #define __NR_rt_sigpending 175 190 - #define __NR_rt_sigtimedwait 176 191 - #define __NR_rt_sigqueueinfo 177 192 - #define __NR_rt_sigsuspend 178 193 - #define __NR_pread64 179 194 - #define __NR_pwrite64 180 195 - #define __NR_chown 181 196 - #define __NR_getcwd 182 197 - #define __NR_capget 183 198 - #define __NR_capset 184 199 - #define __NR_sigaltstack 185 200 - #define __NR_sendfile 186 201 - #define __NR_getpmsg 187 /* some people actually want streams */ 202 - #define __NR_putpmsg 188 /* some people actually want streams */ 203 - #define __NR_vfork 189 204 - #define __NR_ugetrlimit 190 /* SuS compliant getrlimit */ 205 - #define __NR_readahead 191 206 - #ifndef __powerpc64__ /* these are 32-bit only */ 207 - #define __NR_mmap2 192 208 - #define __NR_truncate64 193 209 - #define __NR_ftruncate64 194 210 - #define __NR_stat64 195 211 - #define __NR_lstat64 196 212 - #define __NR_fstat64 197 213 - #endif 214 - #define __NR_pciconfig_read 198 215 - #define __NR_pciconfig_write 199 216 - #define __NR_pciconfig_iobase 200 217 - #define __NR_multiplexer 201 218 - #define __NR_getdents64 202 219 - #define __NR_pivot_root 203 220 - #ifndef __powerpc64__ 221 - #define __NR_fcntl64 204 222 - #endif 223 - #define __NR_madvise 205 224 - #define __NR_mincore 206 225 - #define __NR_gettid 207 226 - #define __NR_tkill 208 227 - #define __NR_setxattr 209 228 - #define __NR_lsetxattr 210 229 - #define __NR_fsetxattr 211 230 - #define __NR_getxattr 212 231 - #define __NR_lgetxattr 213 232 - #define __NR_fgetxattr 214 233 - #define __NR_listxattr 215 234 - #define __NR_llistxattr 216 235 - #define __NR_flistxattr 217 236 - #define __NR_removexattr 218 237 - #define __NR_lremovexattr 219 238 - #define __NR_fremovexattr 220 239 - #define __NR_futex 221 240 - #define __NR_sched_setaffinity 222 241 - #define __NR_sched_getaffinity 223 242 - /* 224 currently unused */ 243 - #define __NR_tuxcall 225 244 - #ifndef __powerpc64__ 245 - #define __NR_sendfile64 226 246 - #endif 247 - #define __NR_io_setup 227 248 - #define __NR_io_destroy 228 249 - #define __NR_io_getevents 229 250 - #define __NR_io_submit 230 251 - #define __NR_io_cancel 231 252 - #define __NR_set_tid_address 232 253 - #define __NR_fadvise64 233 254 - #define __NR_exit_group 234 255 - #define __NR_lookup_dcookie 235 256 - #define __NR_epoll_create 236 257 - #define __NR_epoll_ctl 237 258 - #define __NR_epoll_wait 238 259 - #define __NR_remap_file_pages 239 260 - #define __NR_timer_create 240 261 - #define __NR_timer_settime 241 262 - #define __NR_timer_gettime 242 263 - #define __NR_timer_getoverrun 243 264 - #define __NR_timer_delete 244 265 - #define __NR_clock_settime 245 266 - #define __NR_clock_gettime 246 267 - #define __NR_clock_getres 247 268 - #define __NR_clock_nanosleep 248 269 - #define __NR_swapcontext 249 270 - #define __NR_tgkill 250 271 - #define __NR_utimes 251 272 - #define __NR_statfs64 252 273 - #define __NR_fstatfs64 253 274 - #ifndef __powerpc64__ 275 - #define __NR_fadvise64_64 254 276 - #endif 277 - #define __NR_rtas 255 278 - #define __NR_sys_debug_setcontext 256 279 - /* Number 257 is reserved for vserver */ 280 - #define __NR_migrate_pages 258 281 - #define __NR_mbind 259 282 - #define __NR_get_mempolicy 260 283 - #define __NR_set_mempolicy 261 284 - #define __NR_mq_open 262 285 - #define __NR_mq_unlink 263 286 - #define __NR_mq_timedsend 264 287 - #define __NR_mq_timedreceive 265 288 - #define __NR_mq_notify 266 289 - #define __NR_mq_getsetattr 267 290 - #define __NR_kexec_load 268 291 - #define __NR_add_key 269 292 - #define __NR_request_key 270 293 - #define __NR_keyctl 271 294 - #define __NR_waitid 272 295 - #define __NR_ioprio_set 273 296 - #define __NR_ioprio_get 274 297 - #define __NR_inotify_init 275 298 - #define __NR_inotify_add_watch 276 299 - #define __NR_inotify_rm_watch 277 300 - #define __NR_spu_run 278 301 - #define __NR_spu_create 279 302 - #define __NR_pselect6 280 303 - #define __NR_ppoll 281 304 - #define __NR_unshare 282 305 - #define __NR_splice 283 306 - #define __NR_tee 284 307 - #define __NR_vmsplice 285 308 - #define __NR_openat 286 309 - #define __NR_mkdirat 287 310 - #define __NR_mknodat 288 311 - #define __NR_fchownat 289 312 - #define __NR_futimesat 290 313 - #ifdef __powerpc64__ 314 - #define __NR_newfstatat 291 315 - #else 316 - #define __NR_fstatat64 291 317 - #endif 318 - #define __NR_unlinkat 292 319 - #define __NR_renameat 293 320 - #define __NR_linkat 294 321 - #define __NR_symlinkat 295 322 - #define __NR_readlinkat 296 323 - #define __NR_fchmodat 297 324 - #define __NR_faccessat 298 325 - #define __NR_get_robust_list 299 326 - #define __NR_set_robust_list 300 327 - #define __NR_move_pages 301 328 - #define __NR_getcpu 302 329 - #define __NR_epoll_pwait 303 330 - #define __NR_utimensat 304 331 - #define __NR_signalfd 305 332 - #define __NR_timerfd_create 306 333 - #define __NR_eventfd 307 334 - #define __NR_sync_file_range2 308 335 - #define __NR_fallocate 309 336 - #define __NR_subpage_prot 310 337 - #define __NR_timerfd_settime 311 338 - #define __NR_timerfd_gettime 312 339 - #define __NR_signalfd4 313 340 - #define __NR_eventfd2 314 341 - #define __NR_epoll_create1 315 342 - #define __NR_dup3 316 343 - #define __NR_pipe2 317 344 - #define __NR_inotify_init1 318 345 - #define __NR_perf_event_open 319 346 - #define __NR_preadv 320 347 - #define __NR_pwritev 321 348 - #define __NR_rt_tgsigqueueinfo 322 349 - #define __NR_fanotify_init 323 350 - #define __NR_fanotify_mark 324 351 - #define __NR_prlimit64 325 352 - #define __NR_socket 326 353 - #define __NR_bind 327 354 - #define __NR_connect 328 355 - #define __NR_listen 329 356 - #define __NR_accept 330 357 - #define __NR_getsockname 331 358 - #define __NR_getpeername 332 359 - #define __NR_socketpair 333 360 - #define __NR_send 334 361 - #define __NR_sendto 335 362 - #define __NR_recv 336 363 - #define __NR_recvfrom 337 364 - #define __NR_shutdown 338 365 - #define __NR_setsockopt 339 366 - #define __NR_getsockopt 340 367 - #define __NR_sendmsg 341 368 - #define __NR_recvmsg 342 369 - #define __NR_recvmmsg 343 370 - #define __NR_accept4 344 371 - #define __NR_name_to_handle_at 345 372 - #define __NR_open_by_handle_at 346 373 - #define __NR_clock_adjtime 347 374 - #define __NR_syncfs 348 375 - #define __NR_sendmmsg 349 376 - #define __NR_setns 350 377 - #define __NR_process_vm_readv 351 378 - #define __NR_process_vm_writev 352 379 - #define __NR_finit_module 353 380 - #define __NR_kcmp 354 381 - #define __NR_sched_setattr 355 382 - #define __NR_sched_getattr 356 383 - #define __NR_renameat2 357 384 - #define __NR_seccomp 358 385 - #define __NR_getrandom 359 386 - #define __NR_memfd_create 360 387 - #define __NR_bpf 361 388 - #define __NR_execveat 362 389 - #define __NR_switch_endian 363 390 - #define __NR_userfaultfd 364 391 - #define __NR_membarrier 365 392 - #define __NR_mlock2 378 393 - #define __NR_copy_file_range 379 394 - #define __NR_preadv2 380 395 - #define __NR_pwritev2 381 396 - #define __NR_kexec_file_load 382 397 - #define __NR_statx 383 398 - #define __NR_pkey_alloc 384 399 - #define __NR_pkey_free 385 400 - #define __NR_pkey_mprotect 386 401 - #define __NR_rseq 387 402 - #define __NR_io_pgetevents 388 403 - 404 - #endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */
+25
tools/arch/riscv/include/uapi/asm/bitsperlong.h
··· 1 + /* 2 + * Copyright (C) 2012 ARM Ltd. 3 + * Copyright (C) 2015 Regents of the University of California 4 + * 5 + * This program is free software; you can redistribute it and/or modify 6 + * it under the terms of the GNU General Public License version 2 as 7 + * published by the Free Software Foundation. 8 + * 9 + * This program is distributed in the hope that it will be useful, 10 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 + * GNU General Public License for more details. 13 + * 14 + * You should have received a copy of the GNU General Public License 15 + * along with this program. If not, see <http://www.gnu.org/licenses/>. 16 + */ 17 + 18 + #ifndef _UAPI_ASM_RISCV_BITSPERLONG_H 19 + #define _UAPI_ASM_RISCV_BITSPERLONG_H 20 + 21 + #define __BITS_PER_LONG (__SIZEOF_POINTER__ * 8) 22 + 23 + #include <asm-generic/bitsperlong.h> 24 + 25 + #endif /* _UAPI_ASM_RISCV_BITSPERLONG_H */
+2
tools/arch/x86/include/asm/cpufeatures.h
··· 281 281 #define X86_FEATURE_CLZERO (13*32+ 0) /* CLZERO instruction */ 282 282 #define X86_FEATURE_IRPERF (13*32+ 1) /* Instructions Retired Count */ 283 283 #define X86_FEATURE_XSAVEERPTR (13*32+ 2) /* Always save/restore FP error pointers */ 284 + #define X86_FEATURE_WBNOINVD (13*32+ 9) /* WBNOINVD instruction */ 284 285 #define X86_FEATURE_AMD_IBPB (13*32+12) /* "" Indirect Branch Prediction Barrier */ 285 286 #define X86_FEATURE_AMD_IBRS (13*32+14) /* "" Indirect Branch Restricted Speculation */ 286 287 #define X86_FEATURE_AMD_STIBP (13*32+15) /* "" Single Thread Indirect Branch Predictors */ 288 + #define X86_FEATURE_AMD_STIBP_ALWAYS_ON (13*32+17) /* "" Single Thread Indirect Branch Predictors always-on preferred */ 287 289 #define X86_FEATURE_AMD_SSBD (13*32+24) /* "" Speculative Store Bypass Disable */ 288 290 #define X86_FEATURE_VIRT_SSBD (13*32+25) /* Virtualized Speculative Store Bypass Disable */ 289 291 #define X86_FEATURE_AMD_SSB_NO (13*32+26) /* "" Speculative Store Bypass is fixed in hardware. */
+7 -1
tools/arch/x86/include/asm/disabled-features.h
··· 16 16 # define DISABLE_MPX (1<<(X86_FEATURE_MPX & 31)) 17 17 #endif 18 18 19 + #ifdef CONFIG_X86_SMAP 20 + # define DISABLE_SMAP 0 21 + #else 22 + # define DISABLE_SMAP (1<<(X86_FEATURE_SMAP & 31)) 23 + #endif 24 + 19 25 #ifdef CONFIG_X86_INTEL_UMIP 20 26 # define DISABLE_UMIP 0 21 27 #else ··· 74 68 #define DISABLED_MASK6 0 75 69 #define DISABLED_MASK7 (DISABLE_PTI) 76 70 #define DISABLED_MASK8 0 77 - #define DISABLED_MASK9 (DISABLE_MPX) 71 + #define DISABLED_MASK9 (DISABLE_MPX|DISABLE_SMAP) 78 72 #define DISABLED_MASK10 0 79 73 #define DISABLED_MASK11 0 80 74 #define DISABLED_MASK12 0
+8 -1
tools/bpf/bpftool/Makefile
··· 93 93 SRCS = $(filter-out $(BFD_SRCS),$(wildcard *.c)) 94 94 95 95 ifeq ($(feature-libbfd),1) 96 + LIBS += -lbfd -ldl -lopcodes 97 + else ifeq ($(feature-libbfd-liberty),1) 98 + LIBS += -lbfd -ldl -lopcodes -liberty 99 + else ifeq ($(feature-libbfd-liberty-z),1) 100 + LIBS += -lbfd -ldl -lopcodes -liberty -lz 101 + endif 102 + 103 + ifneq ($(filter -lbfd,$(LIBS)),) 96 104 CFLAGS += -DHAVE_LIBBFD_SUPPORT 97 105 SRCS += $(BFD_SRCS) 98 - LIBS += -lbfd -lopcodes 99 106 endif 100 107 101 108 OBJS = $(patsubst %.c,$(OUTPUT)%.o,$(SRCS)) $(OUTPUT)disasm.o
+7 -6
tools/bpf/bpftool/btf_dumper.c
··· 82 82 int bits_to_copy; 83 83 __u64 print_num; 84 84 85 - data += BITS_ROUNDDOWN_BYTES(bit_offset); 86 - bit_offset = BITS_PER_BYTE_MASKED(bit_offset); 87 85 bits_to_copy = bit_offset + nr_bits; 88 86 bytes_to_copy = BITS_ROUNDUP_BYTES(bits_to_copy); 89 87 ··· 116 118 * BTF_INT_OFFSET() cannot exceed 64 bits. 117 119 */ 118 120 total_bits_offset = bit_offset + BTF_INT_OFFSET(int_type); 119 - btf_dumper_bitfield(nr_bits, total_bits_offset, data, jw, 121 + data += BITS_ROUNDDOWN_BYTES(total_bits_offset); 122 + bit_offset = BITS_PER_BYTE_MASKED(total_bits_offset); 123 + btf_dumper_bitfield(nr_bits, bit_offset, data, jw, 120 124 is_plain_text); 121 125 } 122 126 ··· 216 216 } 217 217 218 218 jsonw_name(d->jw, btf__name_by_offset(d->btf, m[i].name_off)); 219 + data_off = data + BITS_ROUNDDOWN_BYTES(bit_offset); 219 220 if (bitfield_size) { 220 - btf_dumper_bitfield(bitfield_size, bit_offset, 221 - data, d->jw, d->is_plain_text); 221 + btf_dumper_bitfield(bitfield_size, 222 + BITS_PER_BYTE_MASKED(bit_offset), 223 + data_off, d->jw, d->is_plain_text); 222 224 } else { 223 - data_off = data + BITS_ROUNDDOWN_BYTES(bit_offset); 224 225 ret = btf_dumper_do_type(d, m[i].type, 225 226 BITS_PER_BYTE_MASKED(bit_offset), 226 227 data_off);
+1 -6
tools/bpf/bpftool/json_writer.c
··· 1 - // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) 1 + // SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-2-Clause) 2 2 /* 3 3 * Simple streaming JSON writer 4 4 * 5 5 * This takes care of the annoying bits of JSON syntax like the commas 6 6 * after elements 7 - * 8 - * This program is free software; you can redistribute it and/or 9 - * modify it under the terms of the GNU General Public License 10 - * as published by the Free Software Foundation; either version 11 - * 2 of the License, or (at your option) any later version. 12 7 * 13 8 * Authors: Stephen Hemminger <stephen@networkplumber.org> 14 9 */
-5
tools/bpf/bpftool/json_writer.h
··· 5 5 * This takes care of the annoying bits of JSON syntax like the commas 6 6 * after elements 7 7 * 8 - * This program is free software; you can redistribute it and/or 9 - * modify it under the terms of the GNU General Public License 10 - * as published by the Free Software Foundation; either version 11 - * 2 of the License, or (at your option) any later version. 12 - * 13 8 * Authors: Stephen Hemminger <stephen@networkplumber.org> 14 9 */ 15 10
+3 -1
tools/include/uapi/asm-generic/unistd.h
··· 738 738 __SC_COMP(__NR_io_pgetevents, sys_io_pgetevents, compat_sys_io_pgetevents) 739 739 #define __NR_rseq 293 740 740 __SYSCALL(__NR_rseq, sys_rseq) 741 + #define __NR_kexec_file_load 294 742 + __SYSCALL(__NR_kexec_file_load, sys_kexec_file_load) 741 743 742 744 #undef __NR_syscalls 743 - #define __NR_syscalls 294 745 + #define __NR_syscalls 295 744 746 745 747 /* 746 748 * 32 bit systems traditionally used different
+2
tools/include/uapi/asm/bitsperlong.h
··· 13 13 #include "../../arch/mips/include/uapi/asm/bitsperlong.h" 14 14 #elif defined(__ia64__) 15 15 #include "../../arch/ia64/include/uapi/asm/bitsperlong.h" 16 + #elif defined(__riscv) 17 + #include "../../arch/riscv/include/uapi/asm/bitsperlong.h" 16 18 #else 17 19 #include <asm-generic/bitsperlong.h> 18 20 #endif
+8
tools/include/uapi/drm/i915_drm.h
··· 412 412 int irq_seq; 413 413 } drm_i915_irq_wait_t; 414 414 415 + /* 416 + * Different modes of per-process Graphics Translation Table, 417 + * see I915_PARAM_HAS_ALIASING_PPGTT 418 + */ 419 + #define I915_GEM_PPGTT_NONE 0 420 + #define I915_GEM_PPGTT_ALIASING 1 421 + #define I915_GEM_PPGTT_FULL 2 422 + 415 423 /* Ioctl to query kernel params: 416 424 */ 417 425 #define I915_PARAM_IRQ_ACTIVE 1
+8 -52
tools/include/uapi/linux/fs.h
··· 14 14 #include <linux/ioctl.h> 15 15 #include <linux/types.h> 16 16 17 + /* Use of MS_* flags within the kernel is restricted to core mount(2) code. */ 18 + #if !defined(__KERNEL__) 19 + #include <linux/mount.h> 20 + #endif 21 + 17 22 /* 18 23 * It's silly to have NR_OPEN bigger than NR_FILE, but you can change 19 24 * the file limit at runtime and only root can increase the per-process ··· 105 100 106 101 107 102 #define NR_FILE 8192 /* this can well be larger on a larger system */ 108 - 109 - 110 - /* 111 - * These are the fs-independent mount-flags: up to 32 flags are supported 112 - */ 113 - #define MS_RDONLY 1 /* Mount read-only */ 114 - #define MS_NOSUID 2 /* Ignore suid and sgid bits */ 115 - #define MS_NODEV 4 /* Disallow access to device special files */ 116 - #define MS_NOEXEC 8 /* Disallow program execution */ 117 - #define MS_SYNCHRONOUS 16 /* Writes are synced at once */ 118 - #define MS_REMOUNT 32 /* Alter flags of a mounted FS */ 119 - #define MS_MANDLOCK 64 /* Allow mandatory locks on an FS */ 120 - #define MS_DIRSYNC 128 /* Directory modifications are synchronous */ 121 - #define MS_NOATIME 1024 /* Do not update access times. */ 122 - #define MS_NODIRATIME 2048 /* Do not update directory access times */ 123 - #define MS_BIND 4096 124 - #define MS_MOVE 8192 125 - #define MS_REC 16384 126 - #define MS_VERBOSE 32768 /* War is peace. Verbosity is silence. 127 - MS_VERBOSE is deprecated. */ 128 - #define MS_SILENT 32768 129 - #define MS_POSIXACL (1<<16) /* VFS does not apply the umask */ 130 - #define MS_UNBINDABLE (1<<17) /* change to unbindable */ 131 - #define MS_PRIVATE (1<<18) /* change to private */ 132 - #define MS_SLAVE (1<<19) /* change to slave */ 133 - #define MS_SHARED (1<<20) /* change to shared */ 134 - #define MS_RELATIME (1<<21) /* Update atime relative to mtime/ctime. */ 135 - #define MS_KERNMOUNT (1<<22) /* this is a kern_mount call */ 136 - #define MS_I_VERSION (1<<23) /* Update inode I_version field */ 137 - #define MS_STRICTATIME (1<<24) /* Always perform atime updates */ 138 - #define MS_LAZYTIME (1<<25) /* Update the on-disk [acm]times lazily */ 139 - 140 - /* These sb flags are internal to the kernel */ 141 - #define MS_SUBMOUNT (1<<26) 142 - #define MS_NOREMOTELOCK (1<<27) 143 - #define MS_NOSEC (1<<28) 144 - #define MS_BORN (1<<29) 145 - #define MS_ACTIVE (1<<30) 146 - #define MS_NOUSER (1<<31) 147 - 148 - /* 149 - * Superblock flags that can be altered by MS_REMOUNT 150 - */ 151 - #define MS_RMT_MASK (MS_RDONLY|MS_SYNCHRONOUS|MS_MANDLOCK|MS_I_VERSION|\ 152 - MS_LAZYTIME) 153 - 154 - /* 155 - * Old magic mount flag and mask 156 - */ 157 - #define MS_MGC_VAL 0xC0ED0000 158 - #define MS_MGC_MSK 0xffff0000 159 103 160 104 /* 161 105 * Structure for FS_IOC_FSGETXATTR[A] and FS_IOC_FSSETXATTR. ··· 223 269 #define FS_POLICY_FLAGS_PAD_16 0x02 224 270 #define FS_POLICY_FLAGS_PAD_32 0x03 225 271 #define FS_POLICY_FLAGS_PAD_MASK 0x03 226 - #define FS_POLICY_FLAGS_VALID 0x03 272 + #define FS_POLICY_FLAG_DIRECT_KEY 0x04 /* use master key directly */ 273 + #define FS_POLICY_FLAGS_VALID 0x07 227 274 228 275 /* Encryption algorithms */ 229 276 #define FS_ENCRYPTION_MODE_INVALID 0 ··· 236 281 #define FS_ENCRYPTION_MODE_AES_128_CTS 6 237 282 #define FS_ENCRYPTION_MODE_SPECK128_256_XTS 7 /* Removed, do not use. */ 238 283 #define FS_ENCRYPTION_MODE_SPECK128_256_CTS 8 /* Removed, do not use. */ 284 + #define FS_ENCRYPTION_MODE_ADIANTUM 9 239 285 240 286 struct fscrypt_policy { 241 287 __u8 version;
+19
tools/include/uapi/linux/if_link.h
··· 288 288 IFLA_BR_MCAST_IGMP_VERSION, 289 289 IFLA_BR_MCAST_MLD_VERSION, 290 290 IFLA_BR_VLAN_STATS_PER_PORT, 291 + IFLA_BR_MULTI_BOOLOPT, 291 292 __IFLA_BR_MAX, 292 293 }; 293 294 ··· 534 533 IFLA_VXLAN_LABEL, 535 534 IFLA_VXLAN_GPE, 536 535 IFLA_VXLAN_TTL_INHERIT, 536 + IFLA_VXLAN_DF, 537 537 __IFLA_VXLAN_MAX 538 538 }; 539 539 #define IFLA_VXLAN_MAX (__IFLA_VXLAN_MAX - 1) ··· 542 540 struct ifla_vxlan_port_range { 543 541 __be16 low; 544 542 __be16 high; 543 + }; 544 + 545 + enum ifla_vxlan_df { 546 + VXLAN_DF_UNSET = 0, 547 + VXLAN_DF_SET, 548 + VXLAN_DF_INHERIT, 549 + __VXLAN_DF_END, 550 + VXLAN_DF_MAX = __VXLAN_DF_END - 1, 545 551 }; 546 552 547 553 /* GENEVE section */ ··· 567 557 IFLA_GENEVE_UDP_ZERO_CSUM6_RX, 568 558 IFLA_GENEVE_LABEL, 569 559 IFLA_GENEVE_TTL_INHERIT, 560 + IFLA_GENEVE_DF, 570 561 __IFLA_GENEVE_MAX 571 562 }; 572 563 #define IFLA_GENEVE_MAX (__IFLA_GENEVE_MAX - 1) 564 + 565 + enum ifla_geneve_df { 566 + GENEVE_DF_UNSET = 0, 567 + GENEVE_DF_SET, 568 + GENEVE_DF_INHERIT, 569 + __GENEVE_DF_END, 570 + GENEVE_DF_MAX = __GENEVE_DF_END - 1, 571 + }; 573 572 574 573 /* PPP section */ 575 574 enum {
+7 -3
tools/include/uapi/linux/in.h
··· 266 266 267 267 #define IN_CLASSD(a) ((((long int) (a)) & 0xf0000000) == 0xe0000000) 268 268 #define IN_MULTICAST(a) IN_CLASSD(a) 269 - #define IN_MULTICAST_NET 0xF0000000 269 + #define IN_MULTICAST_NET 0xe0000000 270 270 271 - #define IN_EXPERIMENTAL(a) ((((long int) (a)) & 0xf0000000) == 0xf0000000) 272 - #define IN_BADCLASS(a) IN_EXPERIMENTAL((a)) 271 + #define IN_BADCLASS(a) ((((long int) (a) ) == 0xffffffff) 272 + #define IN_EXPERIMENTAL(a) IN_BADCLASS((a)) 273 + 274 + #define IN_CLASSE(a) ((((long int) (a)) & 0xf0000000) == 0xf0000000) 275 + #define IN_CLASSE_NET 0xffffffff 276 + #define IN_CLASSE_NSHIFT 0 273 277 274 278 /* Address to accept any incoming messages. */ 275 279 #define INADDR_ANY ((unsigned long int) 0x00000000)
+19
tools/include/uapi/linux/kvm.h
··· 492 492 }; 493 493 }; 494 494 495 + /* for KVM_CLEAR_DIRTY_LOG */ 496 + struct kvm_clear_dirty_log { 497 + __u32 slot; 498 + __u32 num_pages; 499 + __u64 first_page; 500 + union { 501 + void __user *dirty_bitmap; /* one bit per page */ 502 + __u64 padding2; 503 + }; 504 + }; 505 + 495 506 /* for KVM_SET_SIGNAL_MASK */ 496 507 struct kvm_signal_mask { 497 508 __u32 len; ··· 986 975 #define KVM_CAP_HYPERV_ENLIGHTENED_VMCS 163 987 976 #define KVM_CAP_EXCEPTION_PAYLOAD 164 988 977 #define KVM_CAP_ARM_VM_IPA_SIZE 165 978 + #define KVM_CAP_MANUAL_DIRTY_LOG_PROTECT 166 979 + #define KVM_CAP_HYPERV_CPUID 167 989 980 990 981 #ifdef KVM_CAP_IRQ_ROUTING 991 982 ··· 1433 1420 /* Available with KVM_CAP_NESTED_STATE */ 1434 1421 #define KVM_GET_NESTED_STATE _IOWR(KVMIO, 0xbe, struct kvm_nested_state) 1435 1422 #define KVM_SET_NESTED_STATE _IOW(KVMIO, 0xbf, struct kvm_nested_state) 1423 + 1424 + /* Available with KVM_CAP_MANUAL_DIRTY_LOG_PROTECT */ 1425 + #define KVM_CLEAR_DIRTY_LOG _IOWR(KVMIO, 0xc0, struct kvm_clear_dirty_log) 1426 + 1427 + /* Available with KVM_CAP_HYPERV_CPUID */ 1428 + #define KVM_GET_SUPPORTED_HV_CPUID _IOWR(KVMIO, 0xc1, struct kvm_cpuid2) 1436 1429 1437 1430 /* Secure Encrypted Virtualization command */ 1438 1431 enum sev_cmd_id {
+58
tools/include/uapi/linux/mount.h
··· 1 + #ifndef _UAPI_LINUX_MOUNT_H 2 + #define _UAPI_LINUX_MOUNT_H 3 + 4 + /* 5 + * These are the fs-independent mount-flags: up to 32 flags are supported 6 + * 7 + * Usage of these is restricted within the kernel to core mount(2) code and 8 + * callers of sys_mount() only. Filesystems should be using the SB_* 9 + * equivalent instead. 10 + */ 11 + #define MS_RDONLY 1 /* Mount read-only */ 12 + #define MS_NOSUID 2 /* Ignore suid and sgid bits */ 13 + #define MS_NODEV 4 /* Disallow access to device special files */ 14 + #define MS_NOEXEC 8 /* Disallow program execution */ 15 + #define MS_SYNCHRONOUS 16 /* Writes are synced at once */ 16 + #define MS_REMOUNT 32 /* Alter flags of a mounted FS */ 17 + #define MS_MANDLOCK 64 /* Allow mandatory locks on an FS */ 18 + #define MS_DIRSYNC 128 /* Directory modifications are synchronous */ 19 + #define MS_NOATIME 1024 /* Do not update access times. */ 20 + #define MS_NODIRATIME 2048 /* Do not update directory access times */ 21 + #define MS_BIND 4096 22 + #define MS_MOVE 8192 23 + #define MS_REC 16384 24 + #define MS_VERBOSE 32768 /* War is peace. Verbosity is silence. 25 + MS_VERBOSE is deprecated. */ 26 + #define MS_SILENT 32768 27 + #define MS_POSIXACL (1<<16) /* VFS does not apply the umask */ 28 + #define MS_UNBINDABLE (1<<17) /* change to unbindable */ 29 + #define MS_PRIVATE (1<<18) /* change to private */ 30 + #define MS_SLAVE (1<<19) /* change to slave */ 31 + #define MS_SHARED (1<<20) /* change to shared */ 32 + #define MS_RELATIME (1<<21) /* Update atime relative to mtime/ctime. */ 33 + #define MS_KERNMOUNT (1<<22) /* this is a kern_mount call */ 34 + #define MS_I_VERSION (1<<23) /* Update inode I_version field */ 35 + #define MS_STRICTATIME (1<<24) /* Always perform atime updates */ 36 + #define MS_LAZYTIME (1<<25) /* Update the on-disk [acm]times lazily */ 37 + 38 + /* These sb flags are internal to the kernel */ 39 + #define MS_SUBMOUNT (1<<26) 40 + #define MS_NOREMOTELOCK (1<<27) 41 + #define MS_NOSEC (1<<28) 42 + #define MS_BORN (1<<29) 43 + #define MS_ACTIVE (1<<30) 44 + #define MS_NOUSER (1<<31) 45 + 46 + /* 47 + * Superblock flags that can be altered by MS_REMOUNT 48 + */ 49 + #define MS_RMT_MASK (MS_RDONLY|MS_SYNCHRONOUS|MS_MANDLOCK|MS_I_VERSION|\ 50 + MS_LAZYTIME) 51 + 52 + /* 53 + * Old magic mount flag and mask 54 + */ 55 + #define MS_MGC_VAL 0xC0ED0000 56 + #define MS_MGC_MSK 0xffff0000 57 + 58 + #endif /* _UAPI_LINUX_MOUNT_H */
+1163
tools/include/uapi/linux/pkt_sched.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ 2 + #ifndef __LINUX_PKT_SCHED_H 3 + #define __LINUX_PKT_SCHED_H 4 + 5 + #include <linux/types.h> 6 + 7 + /* Logical priority bands not depending on specific packet scheduler. 8 + Every scheduler will map them to real traffic classes, if it has 9 + no more precise mechanism to classify packets. 10 + 11 + These numbers have no special meaning, though their coincidence 12 + with obsolete IPv6 values is not occasional :-). New IPv6 drafts 13 + preferred full anarchy inspired by diffserv group. 14 + 15 + Note: TC_PRIO_BESTEFFORT does not mean that it is the most unhappy 16 + class, actually, as rule it will be handled with more care than 17 + filler or even bulk. 18 + */ 19 + 20 + #define TC_PRIO_BESTEFFORT 0 21 + #define TC_PRIO_FILLER 1 22 + #define TC_PRIO_BULK 2 23 + #define TC_PRIO_INTERACTIVE_BULK 4 24 + #define TC_PRIO_INTERACTIVE 6 25 + #define TC_PRIO_CONTROL 7 26 + 27 + #define TC_PRIO_MAX 15 28 + 29 + /* Generic queue statistics, available for all the elements. 30 + Particular schedulers may have also their private records. 31 + */ 32 + 33 + struct tc_stats { 34 + __u64 bytes; /* Number of enqueued bytes */ 35 + __u32 packets; /* Number of enqueued packets */ 36 + __u32 drops; /* Packets dropped because of lack of resources */ 37 + __u32 overlimits; /* Number of throttle events when this 38 + * flow goes out of allocated bandwidth */ 39 + __u32 bps; /* Current flow byte rate */ 40 + __u32 pps; /* Current flow packet rate */ 41 + __u32 qlen; 42 + __u32 backlog; 43 + }; 44 + 45 + struct tc_estimator { 46 + signed char interval; 47 + unsigned char ewma_log; 48 + }; 49 + 50 + /* "Handles" 51 + --------- 52 + 53 + All the traffic control objects have 32bit identifiers, or "handles". 54 + 55 + They can be considered as opaque numbers from user API viewpoint, 56 + but actually they always consist of two fields: major and 57 + minor numbers, which are interpreted by kernel specially, 58 + that may be used by applications, though not recommended. 59 + 60 + F.e. qdisc handles always have minor number equal to zero, 61 + classes (or flows) have major equal to parent qdisc major, and 62 + minor uniquely identifying class inside qdisc. 63 + 64 + Macros to manipulate handles: 65 + */ 66 + 67 + #define TC_H_MAJ_MASK (0xFFFF0000U) 68 + #define TC_H_MIN_MASK (0x0000FFFFU) 69 + #define TC_H_MAJ(h) ((h)&TC_H_MAJ_MASK) 70 + #define TC_H_MIN(h) ((h)&TC_H_MIN_MASK) 71 + #define TC_H_MAKE(maj,min) (((maj)&TC_H_MAJ_MASK)|((min)&TC_H_MIN_MASK)) 72 + 73 + #define TC_H_UNSPEC (0U) 74 + #define TC_H_ROOT (0xFFFFFFFFU) 75 + #define TC_H_INGRESS (0xFFFFFFF1U) 76 + #define TC_H_CLSACT TC_H_INGRESS 77 + 78 + #define TC_H_MIN_PRIORITY 0xFFE0U 79 + #define TC_H_MIN_INGRESS 0xFFF2U 80 + #define TC_H_MIN_EGRESS 0xFFF3U 81 + 82 + /* Need to corrospond to iproute2 tc/tc_core.h "enum link_layer" */ 83 + enum tc_link_layer { 84 + TC_LINKLAYER_UNAWARE, /* Indicate unaware old iproute2 util */ 85 + TC_LINKLAYER_ETHERNET, 86 + TC_LINKLAYER_ATM, 87 + }; 88 + #define TC_LINKLAYER_MASK 0x0F /* limit use to lower 4 bits */ 89 + 90 + struct tc_ratespec { 91 + unsigned char cell_log; 92 + __u8 linklayer; /* lower 4 bits */ 93 + unsigned short overhead; 94 + short cell_align; 95 + unsigned short mpu; 96 + __u32 rate; 97 + }; 98 + 99 + #define TC_RTAB_SIZE 1024 100 + 101 + struct tc_sizespec { 102 + unsigned char cell_log; 103 + unsigned char size_log; 104 + short cell_align; 105 + int overhead; 106 + unsigned int linklayer; 107 + unsigned int mpu; 108 + unsigned int mtu; 109 + unsigned int tsize; 110 + }; 111 + 112 + enum { 113 + TCA_STAB_UNSPEC, 114 + TCA_STAB_BASE, 115 + TCA_STAB_DATA, 116 + __TCA_STAB_MAX 117 + }; 118 + 119 + #define TCA_STAB_MAX (__TCA_STAB_MAX - 1) 120 + 121 + /* FIFO section */ 122 + 123 + struct tc_fifo_qopt { 124 + __u32 limit; /* Queue length: bytes for bfifo, packets for pfifo */ 125 + }; 126 + 127 + /* SKBPRIO section */ 128 + 129 + /* 130 + * Priorities go from zero to (SKBPRIO_MAX_PRIORITY - 1). 131 + * SKBPRIO_MAX_PRIORITY should be at least 64 in order for skbprio to be able 132 + * to map one to one the DS field of IPV4 and IPV6 headers. 133 + * Memory allocation grows linearly with SKBPRIO_MAX_PRIORITY. 134 + */ 135 + 136 + #define SKBPRIO_MAX_PRIORITY 64 137 + 138 + struct tc_skbprio_qopt { 139 + __u32 limit; /* Queue length in packets. */ 140 + }; 141 + 142 + /* PRIO section */ 143 + 144 + #define TCQ_PRIO_BANDS 16 145 + #define TCQ_MIN_PRIO_BANDS 2 146 + 147 + struct tc_prio_qopt { 148 + int bands; /* Number of bands */ 149 + __u8 priomap[TC_PRIO_MAX+1]; /* Map: logical priority -> PRIO band */ 150 + }; 151 + 152 + /* MULTIQ section */ 153 + 154 + struct tc_multiq_qopt { 155 + __u16 bands; /* Number of bands */ 156 + __u16 max_bands; /* Maximum number of queues */ 157 + }; 158 + 159 + /* PLUG section */ 160 + 161 + #define TCQ_PLUG_BUFFER 0 162 + #define TCQ_PLUG_RELEASE_ONE 1 163 + #define TCQ_PLUG_RELEASE_INDEFINITE 2 164 + #define TCQ_PLUG_LIMIT 3 165 + 166 + struct tc_plug_qopt { 167 + /* TCQ_PLUG_BUFFER: Inset a plug into the queue and 168 + * buffer any incoming packets 169 + * TCQ_PLUG_RELEASE_ONE: Dequeue packets from queue head 170 + * to beginning of the next plug. 171 + * TCQ_PLUG_RELEASE_INDEFINITE: Dequeue all packets from queue. 172 + * Stop buffering packets until the next TCQ_PLUG_BUFFER 173 + * command is received (just act as a pass-thru queue). 174 + * TCQ_PLUG_LIMIT: Increase/decrease queue size 175 + */ 176 + int action; 177 + __u32 limit; 178 + }; 179 + 180 + /* TBF section */ 181 + 182 + struct tc_tbf_qopt { 183 + struct tc_ratespec rate; 184 + struct tc_ratespec peakrate; 185 + __u32 limit; 186 + __u32 buffer; 187 + __u32 mtu; 188 + }; 189 + 190 + enum { 191 + TCA_TBF_UNSPEC, 192 + TCA_TBF_PARMS, 193 + TCA_TBF_RTAB, 194 + TCA_TBF_PTAB, 195 + TCA_TBF_RATE64, 196 + TCA_TBF_PRATE64, 197 + TCA_TBF_BURST, 198 + TCA_TBF_PBURST, 199 + TCA_TBF_PAD, 200 + __TCA_TBF_MAX, 201 + }; 202 + 203 + #define TCA_TBF_MAX (__TCA_TBF_MAX - 1) 204 + 205 + 206 + /* TEQL section */ 207 + 208 + /* TEQL does not require any parameters */ 209 + 210 + /* SFQ section */ 211 + 212 + struct tc_sfq_qopt { 213 + unsigned quantum; /* Bytes per round allocated to flow */ 214 + int perturb_period; /* Period of hash perturbation */ 215 + __u32 limit; /* Maximal packets in queue */ 216 + unsigned divisor; /* Hash divisor */ 217 + unsigned flows; /* Maximal number of flows */ 218 + }; 219 + 220 + struct tc_sfqred_stats { 221 + __u32 prob_drop; /* Early drops, below max threshold */ 222 + __u32 forced_drop; /* Early drops, after max threshold */ 223 + __u32 prob_mark; /* Marked packets, below max threshold */ 224 + __u32 forced_mark; /* Marked packets, after max threshold */ 225 + __u32 prob_mark_head; /* Marked packets, below max threshold */ 226 + __u32 forced_mark_head;/* Marked packets, after max threshold */ 227 + }; 228 + 229 + struct tc_sfq_qopt_v1 { 230 + struct tc_sfq_qopt v0; 231 + unsigned int depth; /* max number of packets per flow */ 232 + unsigned int headdrop; 233 + /* SFQRED parameters */ 234 + __u32 limit; /* HARD maximal flow queue length (bytes) */ 235 + __u32 qth_min; /* Min average length threshold (bytes) */ 236 + __u32 qth_max; /* Max average length threshold (bytes) */ 237 + unsigned char Wlog; /* log(W) */ 238 + unsigned char Plog; /* log(P_max/(qth_max-qth_min)) */ 239 + unsigned char Scell_log; /* cell size for idle damping */ 240 + unsigned char flags; 241 + __u32 max_P; /* probability, high resolution */ 242 + /* SFQRED stats */ 243 + struct tc_sfqred_stats stats; 244 + }; 245 + 246 + 247 + struct tc_sfq_xstats { 248 + __s32 allot; 249 + }; 250 + 251 + /* RED section */ 252 + 253 + enum { 254 + TCA_RED_UNSPEC, 255 + TCA_RED_PARMS, 256 + TCA_RED_STAB, 257 + TCA_RED_MAX_P, 258 + __TCA_RED_MAX, 259 + }; 260 + 261 + #define TCA_RED_MAX (__TCA_RED_MAX - 1) 262 + 263 + struct tc_red_qopt { 264 + __u32 limit; /* HARD maximal queue length (bytes) */ 265 + __u32 qth_min; /* Min average length threshold (bytes) */ 266 + __u32 qth_max; /* Max average length threshold (bytes) */ 267 + unsigned char Wlog; /* log(W) */ 268 + unsigned char Plog; /* log(P_max/(qth_max-qth_min)) */ 269 + unsigned char Scell_log; /* cell size for idle damping */ 270 + unsigned char flags; 271 + #define TC_RED_ECN 1 272 + #define TC_RED_HARDDROP 2 273 + #define TC_RED_ADAPTATIVE 4 274 + }; 275 + 276 + struct tc_red_xstats { 277 + __u32 early; /* Early drops */ 278 + __u32 pdrop; /* Drops due to queue limits */ 279 + __u32 other; /* Drops due to drop() calls */ 280 + __u32 marked; /* Marked packets */ 281 + }; 282 + 283 + /* GRED section */ 284 + 285 + #define MAX_DPs 16 286 + 287 + enum { 288 + TCA_GRED_UNSPEC, 289 + TCA_GRED_PARMS, 290 + TCA_GRED_STAB, 291 + TCA_GRED_DPS, 292 + TCA_GRED_MAX_P, 293 + TCA_GRED_LIMIT, 294 + TCA_GRED_VQ_LIST, /* nested TCA_GRED_VQ_ENTRY */ 295 + __TCA_GRED_MAX, 296 + }; 297 + 298 + #define TCA_GRED_MAX (__TCA_GRED_MAX - 1) 299 + 300 + enum { 301 + TCA_GRED_VQ_ENTRY_UNSPEC, 302 + TCA_GRED_VQ_ENTRY, /* nested TCA_GRED_VQ_* */ 303 + __TCA_GRED_VQ_ENTRY_MAX, 304 + }; 305 + #define TCA_GRED_VQ_ENTRY_MAX (__TCA_GRED_VQ_ENTRY_MAX - 1) 306 + 307 + enum { 308 + TCA_GRED_VQ_UNSPEC, 309 + TCA_GRED_VQ_PAD, 310 + TCA_GRED_VQ_DP, /* u32 */ 311 + TCA_GRED_VQ_STAT_BYTES, /* u64 */ 312 + TCA_GRED_VQ_STAT_PACKETS, /* u32 */ 313 + TCA_GRED_VQ_STAT_BACKLOG, /* u32 */ 314 + TCA_GRED_VQ_STAT_PROB_DROP, /* u32 */ 315 + TCA_GRED_VQ_STAT_PROB_MARK, /* u32 */ 316 + TCA_GRED_VQ_STAT_FORCED_DROP, /* u32 */ 317 + TCA_GRED_VQ_STAT_FORCED_MARK, /* u32 */ 318 + TCA_GRED_VQ_STAT_PDROP, /* u32 */ 319 + TCA_GRED_VQ_STAT_OTHER, /* u32 */ 320 + TCA_GRED_VQ_FLAGS, /* u32 */ 321 + __TCA_GRED_VQ_MAX 322 + }; 323 + 324 + #define TCA_GRED_VQ_MAX (__TCA_GRED_VQ_MAX - 1) 325 + 326 + struct tc_gred_qopt { 327 + __u32 limit; /* HARD maximal queue length (bytes) */ 328 + __u32 qth_min; /* Min average length threshold (bytes) */ 329 + __u32 qth_max; /* Max average length threshold (bytes) */ 330 + __u32 DP; /* up to 2^32 DPs */ 331 + __u32 backlog; 332 + __u32 qave; 333 + __u32 forced; 334 + __u32 early; 335 + __u32 other; 336 + __u32 pdrop; 337 + __u8 Wlog; /* log(W) */ 338 + __u8 Plog; /* log(P_max/(qth_max-qth_min)) */ 339 + __u8 Scell_log; /* cell size for idle damping */ 340 + __u8 prio; /* prio of this VQ */ 341 + __u32 packets; 342 + __u32 bytesin; 343 + }; 344 + 345 + /* gred setup */ 346 + struct tc_gred_sopt { 347 + __u32 DPs; 348 + __u32 def_DP; 349 + __u8 grio; 350 + __u8 flags; 351 + __u16 pad1; 352 + }; 353 + 354 + /* CHOKe section */ 355 + 356 + enum { 357 + TCA_CHOKE_UNSPEC, 358 + TCA_CHOKE_PARMS, 359 + TCA_CHOKE_STAB, 360 + TCA_CHOKE_MAX_P, 361 + __TCA_CHOKE_MAX, 362 + }; 363 + 364 + #define TCA_CHOKE_MAX (__TCA_CHOKE_MAX - 1) 365 + 366 + struct tc_choke_qopt { 367 + __u32 limit; /* Hard queue length (packets) */ 368 + __u32 qth_min; /* Min average threshold (packets) */ 369 + __u32 qth_max; /* Max average threshold (packets) */ 370 + unsigned char Wlog; /* log(W) */ 371 + unsigned char Plog; /* log(P_max/(qth_max-qth_min)) */ 372 + unsigned char Scell_log; /* cell size for idle damping */ 373 + unsigned char flags; /* see RED flags */ 374 + }; 375 + 376 + struct tc_choke_xstats { 377 + __u32 early; /* Early drops */ 378 + __u32 pdrop; /* Drops due to queue limits */ 379 + __u32 other; /* Drops due to drop() calls */ 380 + __u32 marked; /* Marked packets */ 381 + __u32 matched; /* Drops due to flow match */ 382 + }; 383 + 384 + /* HTB section */ 385 + #define TC_HTB_NUMPRIO 8 386 + #define TC_HTB_MAXDEPTH 8 387 + #define TC_HTB_PROTOVER 3 /* the same as HTB and TC's major */ 388 + 389 + struct tc_htb_opt { 390 + struct tc_ratespec rate; 391 + struct tc_ratespec ceil; 392 + __u32 buffer; 393 + __u32 cbuffer; 394 + __u32 quantum; 395 + __u32 level; /* out only */ 396 + __u32 prio; 397 + }; 398 + struct tc_htb_glob { 399 + __u32 version; /* to match HTB/TC */ 400 + __u32 rate2quantum; /* bps->quantum divisor */ 401 + __u32 defcls; /* default class number */ 402 + __u32 debug; /* debug flags */ 403 + 404 + /* stats */ 405 + __u32 direct_pkts; /* count of non shaped packets */ 406 + }; 407 + enum { 408 + TCA_HTB_UNSPEC, 409 + TCA_HTB_PARMS, 410 + TCA_HTB_INIT, 411 + TCA_HTB_CTAB, 412 + TCA_HTB_RTAB, 413 + TCA_HTB_DIRECT_QLEN, 414 + TCA_HTB_RATE64, 415 + TCA_HTB_CEIL64, 416 + TCA_HTB_PAD, 417 + __TCA_HTB_MAX, 418 + }; 419 + 420 + #define TCA_HTB_MAX (__TCA_HTB_MAX - 1) 421 + 422 + struct tc_htb_xstats { 423 + __u32 lends; 424 + __u32 borrows; 425 + __u32 giants; /* unused since 'Make HTB scheduler work with TSO.' */ 426 + __s32 tokens; 427 + __s32 ctokens; 428 + }; 429 + 430 + /* HFSC section */ 431 + 432 + struct tc_hfsc_qopt { 433 + __u16 defcls; /* default class */ 434 + }; 435 + 436 + struct tc_service_curve { 437 + __u32 m1; /* slope of the first segment in bps */ 438 + __u32 d; /* x-projection of the first segment in us */ 439 + __u32 m2; /* slope of the second segment in bps */ 440 + }; 441 + 442 + struct tc_hfsc_stats { 443 + __u64 work; /* total work done */ 444 + __u64 rtwork; /* work done by real-time criteria */ 445 + __u32 period; /* current period */ 446 + __u32 level; /* class level in hierarchy */ 447 + }; 448 + 449 + enum { 450 + TCA_HFSC_UNSPEC, 451 + TCA_HFSC_RSC, 452 + TCA_HFSC_FSC, 453 + TCA_HFSC_USC, 454 + __TCA_HFSC_MAX, 455 + }; 456 + 457 + #define TCA_HFSC_MAX (__TCA_HFSC_MAX - 1) 458 + 459 + 460 + /* CBQ section */ 461 + 462 + #define TC_CBQ_MAXPRIO 8 463 + #define TC_CBQ_MAXLEVEL 8 464 + #define TC_CBQ_DEF_EWMA 5 465 + 466 + struct tc_cbq_lssopt { 467 + unsigned char change; 468 + unsigned char flags; 469 + #define TCF_CBQ_LSS_BOUNDED 1 470 + #define TCF_CBQ_LSS_ISOLATED 2 471 + unsigned char ewma_log; 472 + unsigned char level; 473 + #define TCF_CBQ_LSS_FLAGS 1 474 + #define TCF_CBQ_LSS_EWMA 2 475 + #define TCF_CBQ_LSS_MAXIDLE 4 476 + #define TCF_CBQ_LSS_MINIDLE 8 477 + #define TCF_CBQ_LSS_OFFTIME 0x10 478 + #define TCF_CBQ_LSS_AVPKT 0x20 479 + __u32 maxidle; 480 + __u32 minidle; 481 + __u32 offtime; 482 + __u32 avpkt; 483 + }; 484 + 485 + struct tc_cbq_wrropt { 486 + unsigned char flags; 487 + unsigned char priority; 488 + unsigned char cpriority; 489 + unsigned char __reserved; 490 + __u32 allot; 491 + __u32 weight; 492 + }; 493 + 494 + struct tc_cbq_ovl { 495 + unsigned char strategy; 496 + #define TC_CBQ_OVL_CLASSIC 0 497 + #define TC_CBQ_OVL_DELAY 1 498 + #define TC_CBQ_OVL_LOWPRIO 2 499 + #define TC_CBQ_OVL_DROP 3 500 + #define TC_CBQ_OVL_RCLASSIC 4 501 + unsigned char priority2; 502 + __u16 pad; 503 + __u32 penalty; 504 + }; 505 + 506 + struct tc_cbq_police { 507 + unsigned char police; 508 + unsigned char __res1; 509 + unsigned short __res2; 510 + }; 511 + 512 + struct tc_cbq_fopt { 513 + __u32 split; 514 + __u32 defmap; 515 + __u32 defchange; 516 + }; 517 + 518 + struct tc_cbq_xstats { 519 + __u32 borrows; 520 + __u32 overactions; 521 + __s32 avgidle; 522 + __s32 undertime; 523 + }; 524 + 525 + enum { 526 + TCA_CBQ_UNSPEC, 527 + TCA_CBQ_LSSOPT, 528 + TCA_CBQ_WRROPT, 529 + TCA_CBQ_FOPT, 530 + TCA_CBQ_OVL_STRATEGY, 531 + TCA_CBQ_RATE, 532 + TCA_CBQ_RTAB, 533 + TCA_CBQ_POLICE, 534 + __TCA_CBQ_MAX, 535 + }; 536 + 537 + #define TCA_CBQ_MAX (__TCA_CBQ_MAX - 1) 538 + 539 + /* dsmark section */ 540 + 541 + enum { 542 + TCA_DSMARK_UNSPEC, 543 + TCA_DSMARK_INDICES, 544 + TCA_DSMARK_DEFAULT_INDEX, 545 + TCA_DSMARK_SET_TC_INDEX, 546 + TCA_DSMARK_MASK, 547 + TCA_DSMARK_VALUE, 548 + __TCA_DSMARK_MAX, 549 + }; 550 + 551 + #define TCA_DSMARK_MAX (__TCA_DSMARK_MAX - 1) 552 + 553 + /* ATM section */ 554 + 555 + enum { 556 + TCA_ATM_UNSPEC, 557 + TCA_ATM_FD, /* file/socket descriptor */ 558 + TCA_ATM_PTR, /* pointer to descriptor - later */ 559 + TCA_ATM_HDR, /* LL header */ 560 + TCA_ATM_EXCESS, /* excess traffic class (0 for CLP) */ 561 + TCA_ATM_ADDR, /* PVC address (for output only) */ 562 + TCA_ATM_STATE, /* VC state (ATM_VS_*; for output only) */ 563 + __TCA_ATM_MAX, 564 + }; 565 + 566 + #define TCA_ATM_MAX (__TCA_ATM_MAX - 1) 567 + 568 + /* Network emulator */ 569 + 570 + enum { 571 + TCA_NETEM_UNSPEC, 572 + TCA_NETEM_CORR, 573 + TCA_NETEM_DELAY_DIST, 574 + TCA_NETEM_REORDER, 575 + TCA_NETEM_CORRUPT, 576 + TCA_NETEM_LOSS, 577 + TCA_NETEM_RATE, 578 + TCA_NETEM_ECN, 579 + TCA_NETEM_RATE64, 580 + TCA_NETEM_PAD, 581 + TCA_NETEM_LATENCY64, 582 + TCA_NETEM_JITTER64, 583 + TCA_NETEM_SLOT, 584 + TCA_NETEM_SLOT_DIST, 585 + __TCA_NETEM_MAX, 586 + }; 587 + 588 + #define TCA_NETEM_MAX (__TCA_NETEM_MAX - 1) 589 + 590 + struct tc_netem_qopt { 591 + __u32 latency; /* added delay (us) */ 592 + __u32 limit; /* fifo limit (packets) */ 593 + __u32 loss; /* random packet loss (0=none ~0=100%) */ 594 + __u32 gap; /* re-ordering gap (0 for none) */ 595 + __u32 duplicate; /* random packet dup (0=none ~0=100%) */ 596 + __u32 jitter; /* random jitter in latency (us) */ 597 + }; 598 + 599 + struct tc_netem_corr { 600 + __u32 delay_corr; /* delay correlation */ 601 + __u32 loss_corr; /* packet loss correlation */ 602 + __u32 dup_corr; /* duplicate correlation */ 603 + }; 604 + 605 + struct tc_netem_reorder { 606 + __u32 probability; 607 + __u32 correlation; 608 + }; 609 + 610 + struct tc_netem_corrupt { 611 + __u32 probability; 612 + __u32 correlation; 613 + }; 614 + 615 + struct tc_netem_rate { 616 + __u32 rate; /* byte/s */ 617 + __s32 packet_overhead; 618 + __u32 cell_size; 619 + __s32 cell_overhead; 620 + }; 621 + 622 + struct tc_netem_slot { 623 + __s64 min_delay; /* nsec */ 624 + __s64 max_delay; 625 + __s32 max_packets; 626 + __s32 max_bytes; 627 + __s64 dist_delay; /* nsec */ 628 + __s64 dist_jitter; /* nsec */ 629 + }; 630 + 631 + enum { 632 + NETEM_LOSS_UNSPEC, 633 + NETEM_LOSS_GI, /* General Intuitive - 4 state model */ 634 + NETEM_LOSS_GE, /* Gilbert Elliot models */ 635 + __NETEM_LOSS_MAX 636 + }; 637 + #define NETEM_LOSS_MAX (__NETEM_LOSS_MAX - 1) 638 + 639 + /* State transition probabilities for 4 state model */ 640 + struct tc_netem_gimodel { 641 + __u32 p13; 642 + __u32 p31; 643 + __u32 p32; 644 + __u32 p14; 645 + __u32 p23; 646 + }; 647 + 648 + /* Gilbert-Elliot models */ 649 + struct tc_netem_gemodel { 650 + __u32 p; 651 + __u32 r; 652 + __u32 h; 653 + __u32 k1; 654 + }; 655 + 656 + #define NETEM_DIST_SCALE 8192 657 + #define NETEM_DIST_MAX 16384 658 + 659 + /* DRR */ 660 + 661 + enum { 662 + TCA_DRR_UNSPEC, 663 + TCA_DRR_QUANTUM, 664 + __TCA_DRR_MAX 665 + }; 666 + 667 + #define TCA_DRR_MAX (__TCA_DRR_MAX - 1) 668 + 669 + struct tc_drr_stats { 670 + __u32 deficit; 671 + }; 672 + 673 + /* MQPRIO */ 674 + #define TC_QOPT_BITMASK 15 675 + #define TC_QOPT_MAX_QUEUE 16 676 + 677 + enum { 678 + TC_MQPRIO_HW_OFFLOAD_NONE, /* no offload requested */ 679 + TC_MQPRIO_HW_OFFLOAD_TCS, /* offload TCs, no queue counts */ 680 + __TC_MQPRIO_HW_OFFLOAD_MAX 681 + }; 682 + 683 + #define TC_MQPRIO_HW_OFFLOAD_MAX (__TC_MQPRIO_HW_OFFLOAD_MAX - 1) 684 + 685 + enum { 686 + TC_MQPRIO_MODE_DCB, 687 + TC_MQPRIO_MODE_CHANNEL, 688 + __TC_MQPRIO_MODE_MAX 689 + }; 690 + 691 + #define __TC_MQPRIO_MODE_MAX (__TC_MQPRIO_MODE_MAX - 1) 692 + 693 + enum { 694 + TC_MQPRIO_SHAPER_DCB, 695 + TC_MQPRIO_SHAPER_BW_RATE, /* Add new shapers below */ 696 + __TC_MQPRIO_SHAPER_MAX 697 + }; 698 + 699 + #define __TC_MQPRIO_SHAPER_MAX (__TC_MQPRIO_SHAPER_MAX - 1) 700 + 701 + struct tc_mqprio_qopt { 702 + __u8 num_tc; 703 + __u8 prio_tc_map[TC_QOPT_BITMASK + 1]; 704 + __u8 hw; 705 + __u16 count[TC_QOPT_MAX_QUEUE]; 706 + __u16 offset[TC_QOPT_MAX_QUEUE]; 707 + }; 708 + 709 + #define TC_MQPRIO_F_MODE 0x1 710 + #define TC_MQPRIO_F_SHAPER 0x2 711 + #define TC_MQPRIO_F_MIN_RATE 0x4 712 + #define TC_MQPRIO_F_MAX_RATE 0x8 713 + 714 + enum { 715 + TCA_MQPRIO_UNSPEC, 716 + TCA_MQPRIO_MODE, 717 + TCA_MQPRIO_SHAPER, 718 + TCA_MQPRIO_MIN_RATE64, 719 + TCA_MQPRIO_MAX_RATE64, 720 + __TCA_MQPRIO_MAX, 721 + }; 722 + 723 + #define TCA_MQPRIO_MAX (__TCA_MQPRIO_MAX - 1) 724 + 725 + /* SFB */ 726 + 727 + enum { 728 + TCA_SFB_UNSPEC, 729 + TCA_SFB_PARMS, 730 + __TCA_SFB_MAX, 731 + }; 732 + 733 + #define TCA_SFB_MAX (__TCA_SFB_MAX - 1) 734 + 735 + /* 736 + * Note: increment, decrement are Q0.16 fixed-point values. 737 + */ 738 + struct tc_sfb_qopt { 739 + __u32 rehash_interval; /* delay between hash move, in ms */ 740 + __u32 warmup_time; /* double buffering warmup time in ms (warmup_time < rehash_interval) */ 741 + __u32 max; /* max len of qlen_min */ 742 + __u32 bin_size; /* maximum queue length per bin */ 743 + __u32 increment; /* probability increment, (d1 in Blue) */ 744 + __u32 decrement; /* probability decrement, (d2 in Blue) */ 745 + __u32 limit; /* max SFB queue length */ 746 + __u32 penalty_rate; /* inelastic flows are rate limited to 'rate' pps */ 747 + __u32 penalty_burst; 748 + }; 749 + 750 + struct tc_sfb_xstats { 751 + __u32 earlydrop; 752 + __u32 penaltydrop; 753 + __u32 bucketdrop; 754 + __u32 queuedrop; 755 + __u32 childdrop; /* drops in child qdisc */ 756 + __u32 marked; 757 + __u32 maxqlen; 758 + __u32 maxprob; 759 + __u32 avgprob; 760 + }; 761 + 762 + #define SFB_MAX_PROB 0xFFFF 763 + 764 + /* QFQ */ 765 + enum { 766 + TCA_QFQ_UNSPEC, 767 + TCA_QFQ_WEIGHT, 768 + TCA_QFQ_LMAX, 769 + __TCA_QFQ_MAX 770 + }; 771 + 772 + #define TCA_QFQ_MAX (__TCA_QFQ_MAX - 1) 773 + 774 + struct tc_qfq_stats { 775 + __u32 weight; 776 + __u32 lmax; 777 + }; 778 + 779 + /* CODEL */ 780 + 781 + enum { 782 + TCA_CODEL_UNSPEC, 783 + TCA_CODEL_TARGET, 784 + TCA_CODEL_LIMIT, 785 + TCA_CODEL_INTERVAL, 786 + TCA_CODEL_ECN, 787 + TCA_CODEL_CE_THRESHOLD, 788 + __TCA_CODEL_MAX 789 + }; 790 + 791 + #define TCA_CODEL_MAX (__TCA_CODEL_MAX - 1) 792 + 793 + struct tc_codel_xstats { 794 + __u32 maxpacket; /* largest packet we've seen so far */ 795 + __u32 count; /* how many drops we've done since the last time we 796 + * entered dropping state 797 + */ 798 + __u32 lastcount; /* count at entry to dropping state */ 799 + __u32 ldelay; /* in-queue delay seen by most recently dequeued packet */ 800 + __s32 drop_next; /* time to drop next packet */ 801 + __u32 drop_overlimit; /* number of time max qdisc packet limit was hit */ 802 + __u32 ecn_mark; /* number of packets we ECN marked instead of dropped */ 803 + __u32 dropping; /* are we in dropping state ? */ 804 + __u32 ce_mark; /* number of CE marked packets because of ce_threshold */ 805 + }; 806 + 807 + /* FQ_CODEL */ 808 + 809 + enum { 810 + TCA_FQ_CODEL_UNSPEC, 811 + TCA_FQ_CODEL_TARGET, 812 + TCA_FQ_CODEL_LIMIT, 813 + TCA_FQ_CODEL_INTERVAL, 814 + TCA_FQ_CODEL_ECN, 815 + TCA_FQ_CODEL_FLOWS, 816 + TCA_FQ_CODEL_QUANTUM, 817 + TCA_FQ_CODEL_CE_THRESHOLD, 818 + TCA_FQ_CODEL_DROP_BATCH_SIZE, 819 + TCA_FQ_CODEL_MEMORY_LIMIT, 820 + __TCA_FQ_CODEL_MAX 821 + }; 822 + 823 + #define TCA_FQ_CODEL_MAX (__TCA_FQ_CODEL_MAX - 1) 824 + 825 + enum { 826 + TCA_FQ_CODEL_XSTATS_QDISC, 827 + TCA_FQ_CODEL_XSTATS_CLASS, 828 + }; 829 + 830 + struct tc_fq_codel_qd_stats { 831 + __u32 maxpacket; /* largest packet we've seen so far */ 832 + __u32 drop_overlimit; /* number of time max qdisc 833 + * packet limit was hit 834 + */ 835 + __u32 ecn_mark; /* number of packets we ECN marked 836 + * instead of being dropped 837 + */ 838 + __u32 new_flow_count; /* number of time packets 839 + * created a 'new flow' 840 + */ 841 + __u32 new_flows_len; /* count of flows in new list */ 842 + __u32 old_flows_len; /* count of flows in old list */ 843 + __u32 ce_mark; /* packets above ce_threshold */ 844 + __u32 memory_usage; /* in bytes */ 845 + __u32 drop_overmemory; 846 + }; 847 + 848 + struct tc_fq_codel_cl_stats { 849 + __s32 deficit; 850 + __u32 ldelay; /* in-queue delay seen by most recently 851 + * dequeued packet 852 + */ 853 + __u32 count; 854 + __u32 lastcount; 855 + __u32 dropping; 856 + __s32 drop_next; 857 + }; 858 + 859 + struct tc_fq_codel_xstats { 860 + __u32 type; 861 + union { 862 + struct tc_fq_codel_qd_stats qdisc_stats; 863 + struct tc_fq_codel_cl_stats class_stats; 864 + }; 865 + }; 866 + 867 + /* FQ */ 868 + 869 + enum { 870 + TCA_FQ_UNSPEC, 871 + 872 + TCA_FQ_PLIMIT, /* limit of total number of packets in queue */ 873 + 874 + TCA_FQ_FLOW_PLIMIT, /* limit of packets per flow */ 875 + 876 + TCA_FQ_QUANTUM, /* RR quantum */ 877 + 878 + TCA_FQ_INITIAL_QUANTUM, /* RR quantum for new flow */ 879 + 880 + TCA_FQ_RATE_ENABLE, /* enable/disable rate limiting */ 881 + 882 + TCA_FQ_FLOW_DEFAULT_RATE,/* obsolete, do not use */ 883 + 884 + TCA_FQ_FLOW_MAX_RATE, /* per flow max rate */ 885 + 886 + TCA_FQ_BUCKETS_LOG, /* log2(number of buckets) */ 887 + 888 + TCA_FQ_FLOW_REFILL_DELAY, /* flow credit refill delay in usec */ 889 + 890 + TCA_FQ_ORPHAN_MASK, /* mask applied to orphaned skb hashes */ 891 + 892 + TCA_FQ_LOW_RATE_THRESHOLD, /* per packet delay under this rate */ 893 + 894 + TCA_FQ_CE_THRESHOLD, /* DCTCP-like CE-marking threshold */ 895 + 896 + __TCA_FQ_MAX 897 + }; 898 + 899 + #define TCA_FQ_MAX (__TCA_FQ_MAX - 1) 900 + 901 + struct tc_fq_qd_stats { 902 + __u64 gc_flows; 903 + __u64 highprio_packets; 904 + __u64 tcp_retrans; 905 + __u64 throttled; 906 + __u64 flows_plimit; 907 + __u64 pkts_too_long; 908 + __u64 allocation_errors; 909 + __s64 time_next_delayed_flow; 910 + __u32 flows; 911 + __u32 inactive_flows; 912 + __u32 throttled_flows; 913 + __u32 unthrottle_latency_ns; 914 + __u64 ce_mark; /* packets above ce_threshold */ 915 + }; 916 + 917 + /* Heavy-Hitter Filter */ 918 + 919 + enum { 920 + TCA_HHF_UNSPEC, 921 + TCA_HHF_BACKLOG_LIMIT, 922 + TCA_HHF_QUANTUM, 923 + TCA_HHF_HH_FLOWS_LIMIT, 924 + TCA_HHF_RESET_TIMEOUT, 925 + TCA_HHF_ADMIT_BYTES, 926 + TCA_HHF_EVICT_TIMEOUT, 927 + TCA_HHF_NON_HH_WEIGHT, 928 + __TCA_HHF_MAX 929 + }; 930 + 931 + #define TCA_HHF_MAX (__TCA_HHF_MAX - 1) 932 + 933 + struct tc_hhf_xstats { 934 + __u32 drop_overlimit; /* number of times max qdisc packet limit 935 + * was hit 936 + */ 937 + __u32 hh_overlimit; /* number of times max heavy-hitters was hit */ 938 + __u32 hh_tot_count; /* number of captured heavy-hitters so far */ 939 + __u32 hh_cur_count; /* number of current heavy-hitters */ 940 + }; 941 + 942 + /* PIE */ 943 + enum { 944 + TCA_PIE_UNSPEC, 945 + TCA_PIE_TARGET, 946 + TCA_PIE_LIMIT, 947 + TCA_PIE_TUPDATE, 948 + TCA_PIE_ALPHA, 949 + TCA_PIE_BETA, 950 + TCA_PIE_ECN, 951 + TCA_PIE_BYTEMODE, 952 + __TCA_PIE_MAX 953 + }; 954 + #define TCA_PIE_MAX (__TCA_PIE_MAX - 1) 955 + 956 + struct tc_pie_xstats { 957 + __u32 prob; /* current probability */ 958 + __u32 delay; /* current delay in ms */ 959 + __u32 avg_dq_rate; /* current average dq_rate in bits/pie_time */ 960 + __u32 packets_in; /* total number of packets enqueued */ 961 + __u32 dropped; /* packets dropped due to pie_action */ 962 + __u32 overlimit; /* dropped due to lack of space in queue */ 963 + __u32 maxq; /* maximum queue size */ 964 + __u32 ecn_mark; /* packets marked with ecn*/ 965 + }; 966 + 967 + /* CBS */ 968 + struct tc_cbs_qopt { 969 + __u8 offload; 970 + __u8 _pad[3]; 971 + __s32 hicredit; 972 + __s32 locredit; 973 + __s32 idleslope; 974 + __s32 sendslope; 975 + }; 976 + 977 + enum { 978 + TCA_CBS_UNSPEC, 979 + TCA_CBS_PARMS, 980 + __TCA_CBS_MAX, 981 + }; 982 + 983 + #define TCA_CBS_MAX (__TCA_CBS_MAX - 1) 984 + 985 + 986 + /* ETF */ 987 + struct tc_etf_qopt { 988 + __s32 delta; 989 + __s32 clockid; 990 + __u32 flags; 991 + #define TC_ETF_DEADLINE_MODE_ON BIT(0) 992 + #define TC_ETF_OFFLOAD_ON BIT(1) 993 + }; 994 + 995 + enum { 996 + TCA_ETF_UNSPEC, 997 + TCA_ETF_PARMS, 998 + __TCA_ETF_MAX, 999 + }; 1000 + 1001 + #define TCA_ETF_MAX (__TCA_ETF_MAX - 1) 1002 + 1003 + 1004 + /* CAKE */ 1005 + enum { 1006 + TCA_CAKE_UNSPEC, 1007 + TCA_CAKE_PAD, 1008 + TCA_CAKE_BASE_RATE64, 1009 + TCA_CAKE_DIFFSERV_MODE, 1010 + TCA_CAKE_ATM, 1011 + TCA_CAKE_FLOW_MODE, 1012 + TCA_CAKE_OVERHEAD, 1013 + TCA_CAKE_RTT, 1014 + TCA_CAKE_TARGET, 1015 + TCA_CAKE_AUTORATE, 1016 + TCA_CAKE_MEMORY, 1017 + TCA_CAKE_NAT, 1018 + TCA_CAKE_RAW, 1019 + TCA_CAKE_WASH, 1020 + TCA_CAKE_MPU, 1021 + TCA_CAKE_INGRESS, 1022 + TCA_CAKE_ACK_FILTER, 1023 + TCA_CAKE_SPLIT_GSO, 1024 + __TCA_CAKE_MAX 1025 + }; 1026 + #define TCA_CAKE_MAX (__TCA_CAKE_MAX - 1) 1027 + 1028 + enum { 1029 + __TCA_CAKE_STATS_INVALID, 1030 + TCA_CAKE_STATS_PAD, 1031 + TCA_CAKE_STATS_CAPACITY_ESTIMATE64, 1032 + TCA_CAKE_STATS_MEMORY_LIMIT, 1033 + TCA_CAKE_STATS_MEMORY_USED, 1034 + TCA_CAKE_STATS_AVG_NETOFF, 1035 + TCA_CAKE_STATS_MIN_NETLEN, 1036 + TCA_CAKE_STATS_MAX_NETLEN, 1037 + TCA_CAKE_STATS_MIN_ADJLEN, 1038 + TCA_CAKE_STATS_MAX_ADJLEN, 1039 + TCA_CAKE_STATS_TIN_STATS, 1040 + TCA_CAKE_STATS_DEFICIT, 1041 + TCA_CAKE_STATS_COBALT_COUNT, 1042 + TCA_CAKE_STATS_DROPPING, 1043 + TCA_CAKE_STATS_DROP_NEXT_US, 1044 + TCA_CAKE_STATS_P_DROP, 1045 + TCA_CAKE_STATS_BLUE_TIMER_US, 1046 + __TCA_CAKE_STATS_MAX 1047 + }; 1048 + #define TCA_CAKE_STATS_MAX (__TCA_CAKE_STATS_MAX - 1) 1049 + 1050 + enum { 1051 + __TCA_CAKE_TIN_STATS_INVALID, 1052 + TCA_CAKE_TIN_STATS_PAD, 1053 + TCA_CAKE_TIN_STATS_SENT_PACKETS, 1054 + TCA_CAKE_TIN_STATS_SENT_BYTES64, 1055 + TCA_CAKE_TIN_STATS_DROPPED_PACKETS, 1056 + TCA_CAKE_TIN_STATS_DROPPED_BYTES64, 1057 + TCA_CAKE_TIN_STATS_ACKS_DROPPED_PACKETS, 1058 + TCA_CAKE_TIN_STATS_ACKS_DROPPED_BYTES64, 1059 + TCA_CAKE_TIN_STATS_ECN_MARKED_PACKETS, 1060 + TCA_CAKE_TIN_STATS_ECN_MARKED_BYTES64, 1061 + TCA_CAKE_TIN_STATS_BACKLOG_PACKETS, 1062 + TCA_CAKE_TIN_STATS_BACKLOG_BYTES, 1063 + TCA_CAKE_TIN_STATS_THRESHOLD_RATE64, 1064 + TCA_CAKE_TIN_STATS_TARGET_US, 1065 + TCA_CAKE_TIN_STATS_INTERVAL_US, 1066 + TCA_CAKE_TIN_STATS_WAY_INDIRECT_HITS, 1067 + TCA_CAKE_TIN_STATS_WAY_MISSES, 1068 + TCA_CAKE_TIN_STATS_WAY_COLLISIONS, 1069 + TCA_CAKE_TIN_STATS_PEAK_DELAY_US, 1070 + TCA_CAKE_TIN_STATS_AVG_DELAY_US, 1071 + TCA_CAKE_TIN_STATS_BASE_DELAY_US, 1072 + TCA_CAKE_TIN_STATS_SPARSE_FLOWS, 1073 + TCA_CAKE_TIN_STATS_BULK_FLOWS, 1074 + TCA_CAKE_TIN_STATS_UNRESPONSIVE_FLOWS, 1075 + TCA_CAKE_TIN_STATS_MAX_SKBLEN, 1076 + TCA_CAKE_TIN_STATS_FLOW_QUANTUM, 1077 + __TCA_CAKE_TIN_STATS_MAX 1078 + }; 1079 + #define TCA_CAKE_TIN_STATS_MAX (__TCA_CAKE_TIN_STATS_MAX - 1) 1080 + #define TC_CAKE_MAX_TINS (8) 1081 + 1082 + enum { 1083 + CAKE_FLOW_NONE = 0, 1084 + CAKE_FLOW_SRC_IP, 1085 + CAKE_FLOW_DST_IP, 1086 + CAKE_FLOW_HOSTS, /* = CAKE_FLOW_SRC_IP | CAKE_FLOW_DST_IP */ 1087 + CAKE_FLOW_FLOWS, 1088 + CAKE_FLOW_DUAL_SRC, /* = CAKE_FLOW_SRC_IP | CAKE_FLOW_FLOWS */ 1089 + CAKE_FLOW_DUAL_DST, /* = CAKE_FLOW_DST_IP | CAKE_FLOW_FLOWS */ 1090 + CAKE_FLOW_TRIPLE, /* = CAKE_FLOW_HOSTS | CAKE_FLOW_FLOWS */ 1091 + CAKE_FLOW_MAX, 1092 + }; 1093 + 1094 + enum { 1095 + CAKE_DIFFSERV_DIFFSERV3 = 0, 1096 + CAKE_DIFFSERV_DIFFSERV4, 1097 + CAKE_DIFFSERV_DIFFSERV8, 1098 + CAKE_DIFFSERV_BESTEFFORT, 1099 + CAKE_DIFFSERV_PRECEDENCE, 1100 + CAKE_DIFFSERV_MAX 1101 + }; 1102 + 1103 + enum { 1104 + CAKE_ACK_NONE = 0, 1105 + CAKE_ACK_FILTER, 1106 + CAKE_ACK_AGGRESSIVE, 1107 + CAKE_ACK_MAX 1108 + }; 1109 + 1110 + enum { 1111 + CAKE_ATM_NONE = 0, 1112 + CAKE_ATM_ATM, 1113 + CAKE_ATM_PTM, 1114 + CAKE_ATM_MAX 1115 + }; 1116 + 1117 + 1118 + /* TAPRIO */ 1119 + enum { 1120 + TC_TAPRIO_CMD_SET_GATES = 0x00, 1121 + TC_TAPRIO_CMD_SET_AND_HOLD = 0x01, 1122 + TC_TAPRIO_CMD_SET_AND_RELEASE = 0x02, 1123 + }; 1124 + 1125 + enum { 1126 + TCA_TAPRIO_SCHED_ENTRY_UNSPEC, 1127 + TCA_TAPRIO_SCHED_ENTRY_INDEX, /* u32 */ 1128 + TCA_TAPRIO_SCHED_ENTRY_CMD, /* u8 */ 1129 + TCA_TAPRIO_SCHED_ENTRY_GATE_MASK, /* u32 */ 1130 + TCA_TAPRIO_SCHED_ENTRY_INTERVAL, /* u32 */ 1131 + __TCA_TAPRIO_SCHED_ENTRY_MAX, 1132 + }; 1133 + #define TCA_TAPRIO_SCHED_ENTRY_MAX (__TCA_TAPRIO_SCHED_ENTRY_MAX - 1) 1134 + 1135 + /* The format for schedule entry list is: 1136 + * [TCA_TAPRIO_SCHED_ENTRY_LIST] 1137 + * [TCA_TAPRIO_SCHED_ENTRY] 1138 + * [TCA_TAPRIO_SCHED_ENTRY_CMD] 1139 + * [TCA_TAPRIO_SCHED_ENTRY_GATES] 1140 + * [TCA_TAPRIO_SCHED_ENTRY_INTERVAL] 1141 + */ 1142 + enum { 1143 + TCA_TAPRIO_SCHED_UNSPEC, 1144 + TCA_TAPRIO_SCHED_ENTRY, 1145 + __TCA_TAPRIO_SCHED_MAX, 1146 + }; 1147 + 1148 + #define TCA_TAPRIO_SCHED_MAX (__TCA_TAPRIO_SCHED_MAX - 1) 1149 + 1150 + enum { 1151 + TCA_TAPRIO_ATTR_UNSPEC, 1152 + TCA_TAPRIO_ATTR_PRIOMAP, /* struct tc_mqprio_qopt */ 1153 + TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST, /* nested of entry */ 1154 + TCA_TAPRIO_ATTR_SCHED_BASE_TIME, /* s64 */ 1155 + TCA_TAPRIO_ATTR_SCHED_SINGLE_ENTRY, /* single entry */ 1156 + TCA_TAPRIO_ATTR_SCHED_CLOCKID, /* s32 */ 1157 + TCA_TAPRIO_PAD, 1158 + __TCA_TAPRIO_ATTR_MAX, 1159 + }; 1160 + 1161 + #define TCA_TAPRIO_ATTR_MAX (__TCA_TAPRIO_ATTR_MAX - 1) 1162 + 1163 + #endif
+8
tools/include/uapi/linux/prctl.h
··· 220 220 # define PR_SPEC_DISABLE (1UL << 2) 221 221 # define PR_SPEC_FORCE_DISABLE (1UL << 3) 222 222 223 + /* Reset arm64 pointer authentication keys */ 224 + #define PR_PAC_RESET_KEYS 54 225 + # define PR_PAC_APIAKEY (1UL << 0) 226 + # define PR_PAC_APIBKEY (1UL << 1) 227 + # define PR_PAC_APDAKEY (1UL << 2) 228 + # define PR_PAC_APDBKEY (1UL << 3) 229 + # define PR_PAC_APGAKEY (1UL << 4) 230 + 223 231 #endif /* _LINUX_PRCTL_H */
+2 -111
tools/include/uapi/linux/vhost.h
··· 11 11 * device configuration. 12 12 */ 13 13 14 + #include <linux/vhost_types.h> 14 15 #include <linux/types.h> 15 - #include <linux/compiler.h> 16 16 #include <linux/ioctl.h> 17 - #include <linux/virtio_config.h> 18 - #include <linux/virtio_ring.h> 19 - 20 - struct vhost_vring_state { 21 - unsigned int index; 22 - unsigned int num; 23 - }; 24 - 25 - struct vhost_vring_file { 26 - unsigned int index; 27 - int fd; /* Pass -1 to unbind from file. */ 28 - 29 - }; 30 - 31 - struct vhost_vring_addr { 32 - unsigned int index; 33 - /* Option flags. */ 34 - unsigned int flags; 35 - /* Flag values: */ 36 - /* Whether log address is valid. If set enables logging. */ 37 - #define VHOST_VRING_F_LOG 0 38 - 39 - /* Start of array of descriptors (virtually contiguous) */ 40 - __u64 desc_user_addr; 41 - /* Used structure address. Must be 32 bit aligned */ 42 - __u64 used_user_addr; 43 - /* Available structure address. Must be 16 bit aligned */ 44 - __u64 avail_user_addr; 45 - /* Logging support. */ 46 - /* Log writes to used structure, at offset calculated from specified 47 - * address. Address must be 32 bit aligned. */ 48 - __u64 log_guest_addr; 49 - }; 50 - 51 - /* no alignment requirement */ 52 - struct vhost_iotlb_msg { 53 - __u64 iova; 54 - __u64 size; 55 - __u64 uaddr; 56 - #define VHOST_ACCESS_RO 0x1 57 - #define VHOST_ACCESS_WO 0x2 58 - #define VHOST_ACCESS_RW 0x3 59 - __u8 perm; 60 - #define VHOST_IOTLB_MISS 1 61 - #define VHOST_IOTLB_UPDATE 2 62 - #define VHOST_IOTLB_INVALIDATE 3 63 - #define VHOST_IOTLB_ACCESS_FAIL 4 64 - __u8 type; 65 - }; 66 - 67 - #define VHOST_IOTLB_MSG 0x1 68 - #define VHOST_IOTLB_MSG_V2 0x2 69 - 70 - struct vhost_msg { 71 - int type; 72 - union { 73 - struct vhost_iotlb_msg iotlb; 74 - __u8 padding[64]; 75 - }; 76 - }; 77 - 78 - struct vhost_msg_v2 { 79 - __u32 type; 80 - __u32 reserved; 81 - union { 82 - struct vhost_iotlb_msg iotlb; 83 - __u8 padding[64]; 84 - }; 85 - }; 86 - 87 - struct vhost_memory_region { 88 - __u64 guest_phys_addr; 89 - __u64 memory_size; /* bytes */ 90 - __u64 userspace_addr; 91 - __u64 flags_padding; /* No flags are currently specified. */ 92 - }; 93 - 94 - /* All region addresses and sizes must be 4K aligned. */ 95 - #define VHOST_PAGE_SIZE 0x1000 96 - 97 - struct vhost_memory { 98 - __u32 nregions; 99 - __u32 padding; 100 - struct vhost_memory_region regions[0]; 101 - }; 102 17 103 18 /* ioctls */ 104 19 ··· 101 186 * device. This can be used to stop the ring (e.g. for migration). */ 102 187 #define VHOST_NET_SET_BACKEND _IOW(VHOST_VIRTIO, 0x30, struct vhost_vring_file) 103 188 104 - /* Feature bits */ 105 - /* Log all write descriptors. Can be changed while device is active. */ 106 - #define VHOST_F_LOG_ALL 26 107 - /* vhost-net should add virtio_net_hdr for RX, and strip for TX packets. */ 108 - #define VHOST_NET_F_VIRTIO_NET_HDR 27 109 - 110 - /* VHOST_SCSI specific definitions */ 111 - 112 - /* 113 - * Used by QEMU userspace to ensure a consistent vhost-scsi ABI. 114 - * 115 - * ABI Rev 0: July 2012 version starting point for v3.6-rc merge candidate + 116 - * RFC-v2 vhost-scsi userspace. Add GET_ABI_VERSION ioctl usage 117 - * ABI Rev 1: January 2013. Ignore vhost_tpgt filed in struct vhost_scsi_target. 118 - * All the targets under vhost_wwpn can be seen and used by guset. 119 - */ 120 - 121 - #define VHOST_SCSI_ABI_VERSION 1 122 - 123 - struct vhost_scsi_target { 124 - int abi_version; 125 - char vhost_wwpn[224]; /* TRANSPORT_IQN_LEN */ 126 - unsigned short vhost_tpgt; 127 - unsigned short reserved; 128 - }; 189 + /* VHOST_SCSI specific defines */ 129 190 130 191 #define VHOST_SCSI_SET_ENDPOINT _IOW(VHOST_VIRTIO, 0x40, struct vhost_scsi_target) 131 192 #define VHOST_SCSI_CLEAR_ENDPOINT _IOW(VHOST_VIRTIO, 0x41, struct vhost_scsi_target)
+1
tools/lib/bpf/.gitignore
··· 1 1 libbpf_version.h 2 2 FEATURE-DUMP.libbpf 3 + test_libbpf
+14
tools/lib/bpf/README.rst
··· 132 132 Format of version script and ways to handle ABI changes, including 133 133 incompatible ones, described in details in [1]. 134 134 135 + Stand-alone build 136 + ================= 137 + 138 + Under https://github.com/libbpf/libbpf there is a (semi-)automated 139 + mirror of the mainline's version of libbpf for a stand-alone build. 140 + 141 + However, all changes to libbpf's code base must be upstreamed through 142 + the mainline kernel tree. 143 + 144 + License 145 + ======= 146 + 147 + libbpf is dual-licensed under LGPL 2.1 and BSD 2-Clause. 148 + 135 149 Links 136 150 ===== 137 151
+15 -4
tools/lib/bpf/bpf.c
··· 65 65 return syscall(__NR_bpf, cmd, attr, size); 66 66 } 67 67 68 + static inline int sys_bpf_prog_load(union bpf_attr *attr, unsigned int size) 69 + { 70 + int fd; 71 + 72 + do { 73 + fd = sys_bpf(BPF_PROG_LOAD, attr, size); 74 + } while (fd < 0 && errno == EAGAIN); 75 + 76 + return fd; 77 + } 78 + 68 79 int bpf_create_map_xattr(const struct bpf_create_map_attr *create_attr) 69 80 { 70 81 __u32 name_len = create_attr->name ? strlen(create_attr->name) : 0; ··· 243 232 memcpy(attr.prog_name, load_attr->name, 244 233 min(name_len, BPF_OBJ_NAME_LEN - 1)); 245 234 246 - fd = sys_bpf(BPF_PROG_LOAD, &attr, sizeof(attr)); 235 + fd = sys_bpf_prog_load(&attr, sizeof(attr)); 247 236 if (fd >= 0) 248 237 return fd; 249 238 ··· 280 269 break; 281 270 } 282 271 283 - fd = sys_bpf(BPF_PROG_LOAD, &attr, sizeof(attr)); 272 + fd = sys_bpf_prog_load(&attr, sizeof(attr)); 284 273 285 274 if (fd >= 0) 286 275 goto done; ··· 294 283 attr.log_size = log_buf_sz; 295 284 attr.log_level = 1; 296 285 log_buf[0] = 0; 297 - fd = sys_bpf(BPF_PROG_LOAD, &attr, sizeof(attr)); 286 + fd = sys_bpf_prog_load(&attr, sizeof(attr)); 298 287 done: 299 288 free(finfo); 300 289 free(linfo); ··· 339 328 attr.kern_version = kern_version; 340 329 attr.prog_flags = prog_flags; 341 330 342 - return sys_bpf(BPF_PROG_LOAD, &attr, sizeof(attr)); 331 + return sys_bpf_prog_load(&attr, sizeof(attr)); 343 332 } 344 333 345 334 int bpf_map_update_elem(int fd, const void *key, const void *value,
+2 -2
tools/lib/traceevent/event-parse-api.c
··· 194 194 } 195 195 196 196 /** 197 - * tep_is_file_bigendian - get if the file is in big endian order 197 + * tep_file_bigendian - get if the file is in big endian order 198 198 * @pevent: a handle to the tep_handle 199 199 * 200 200 * This returns if the file is in big endian order 201 201 * If @pevent is NULL, 0 is returned. 202 202 */ 203 - int tep_is_file_bigendian(struct tep_handle *pevent) 203 + int tep_file_bigendian(struct tep_handle *pevent) 204 204 { 205 205 if(pevent) 206 206 return pevent->file_bigendian;
+2 -2
tools/lib/traceevent/event-parse-local.h
··· 7 7 #ifndef _PARSE_EVENTS_INT_H 8 8 #define _PARSE_EVENTS_INT_H 9 9 10 - struct cmdline; 10 + struct tep_cmdline; 11 11 struct cmdline_list; 12 12 struct func_map; 13 13 struct func_list; ··· 36 36 int long_size; 37 37 int page_size; 38 38 39 - struct cmdline *cmdlines; 39 + struct tep_cmdline *cmdlines; 40 40 struct cmdline_list *cmdlist; 41 41 int cmdline_count; 42 42
+82 -47
tools/lib/traceevent/event-parse.c
··· 124 124 return calloc(1, sizeof(struct tep_print_arg)); 125 125 } 126 126 127 - struct cmdline { 127 + struct tep_cmdline { 128 128 char *comm; 129 129 int pid; 130 130 }; 131 131 132 132 static int cmdline_cmp(const void *a, const void *b) 133 133 { 134 - const struct cmdline *ca = a; 135 - const struct cmdline *cb = b; 134 + const struct tep_cmdline *ca = a; 135 + const struct tep_cmdline *cb = b; 136 136 137 137 if (ca->pid < cb->pid) 138 138 return -1; ··· 152 152 { 153 153 struct cmdline_list *cmdlist = pevent->cmdlist; 154 154 struct cmdline_list *item; 155 - struct cmdline *cmdlines; 155 + struct tep_cmdline *cmdlines; 156 156 int i; 157 157 158 158 cmdlines = malloc(sizeof(*cmdlines) * pevent->cmdline_count); ··· 179 179 180 180 static const char *find_cmdline(struct tep_handle *pevent, int pid) 181 181 { 182 - const struct cmdline *comm; 183 - struct cmdline key; 182 + const struct tep_cmdline *comm; 183 + struct tep_cmdline key; 184 184 185 185 if (!pid) 186 186 return "<idle>"; ··· 208 208 */ 209 209 int tep_pid_is_registered(struct tep_handle *pevent, int pid) 210 210 { 211 - const struct cmdline *comm; 212 - struct cmdline key; 211 + const struct tep_cmdline *comm; 212 + struct tep_cmdline key; 213 213 214 214 if (!pid) 215 215 return 1; ··· 232 232 * we must add this pid. This is much slower than when cmdlines 233 233 * are added before the array is initialized. 234 234 */ 235 - static int add_new_comm(struct tep_handle *pevent, const char *comm, int pid) 235 + static int add_new_comm(struct tep_handle *pevent, 236 + const char *comm, int pid, bool override) 236 237 { 237 - struct cmdline *cmdlines = pevent->cmdlines; 238 - const struct cmdline *cmdline; 239 - struct cmdline key; 238 + struct tep_cmdline *cmdlines = pevent->cmdlines; 239 + struct tep_cmdline *cmdline; 240 + struct tep_cmdline key; 241 + char *new_comm; 240 242 241 243 if (!pid) 242 244 return 0; ··· 249 247 cmdline = bsearch(&key, pevent->cmdlines, pevent->cmdline_count, 250 248 sizeof(*pevent->cmdlines), cmdline_cmp); 251 249 if (cmdline) { 252 - errno = EEXIST; 253 - return -1; 250 + if (!override) { 251 + errno = EEXIST; 252 + return -1; 253 + } 254 + new_comm = strdup(comm); 255 + if (!new_comm) { 256 + errno = ENOMEM; 257 + return -1; 258 + } 259 + free(cmdline->comm); 260 + cmdline->comm = new_comm; 261 + 262 + return 0; 254 263 } 255 264 256 265 cmdlines = realloc(cmdlines, sizeof(*cmdlines) * (pevent->cmdline_count + 1)); ··· 288 275 return 0; 289 276 } 290 277 291 - /** 292 - * tep_register_comm - register a pid / comm mapping 293 - * @pevent: handle for the pevent 294 - * @comm: the command line to register 295 - * @pid: the pid to map the command line to 296 - * 297 - * This adds a mapping to search for command line names with 298 - * a given pid. The comm is duplicated. 299 - */ 300 - int tep_register_comm(struct tep_handle *pevent, const char *comm, int pid) 278 + static int _tep_register_comm(struct tep_handle *pevent, 279 + const char *comm, int pid, bool override) 301 280 { 302 281 struct cmdline_list *item; 303 282 304 283 if (pevent->cmdlines) 305 - return add_new_comm(pevent, comm, pid); 284 + return add_new_comm(pevent, comm, pid, override); 306 285 307 286 item = malloc(sizeof(*item)); 308 287 if (!item) ··· 315 310 pevent->cmdline_count++; 316 311 317 312 return 0; 313 + } 314 + 315 + /** 316 + * tep_register_comm - register a pid / comm mapping 317 + * @pevent: handle for the pevent 318 + * @comm: the command line to register 319 + * @pid: the pid to map the command line to 320 + * 321 + * This adds a mapping to search for command line names with 322 + * a given pid. The comm is duplicated. If a command with the same pid 323 + * already exist, -1 is returned and errno is set to EEXIST 324 + */ 325 + int tep_register_comm(struct tep_handle *pevent, const char *comm, int pid) 326 + { 327 + return _tep_register_comm(pevent, comm, pid, false); 328 + } 329 + 330 + /** 331 + * tep_override_comm - register a pid / comm mapping 332 + * @pevent: handle for the pevent 333 + * @comm: the command line to register 334 + * @pid: the pid to map the command line to 335 + * 336 + * This adds a mapping to search for command line names with 337 + * a given pid. The comm is duplicated. If a command with the same pid 338 + * already exist, the command string is udapted with the new one 339 + */ 340 + int tep_override_comm(struct tep_handle *pevent, const char *comm, int pid) 341 + { 342 + if (!pevent->cmdlines && cmdline_init(pevent)) { 343 + errno = ENOMEM; 344 + return -1; 345 + } 346 + return _tep_register_comm(pevent, comm, pid, true); 318 347 } 319 348 320 349 int tep_register_trace_clock(struct tep_handle *pevent, const char *trace_clock) ··· 5266 5227 } 5267 5228 5268 5229 /** 5269 - * tep_data_event_from_type - find the event by a given type 5270 - * @pevent: a handle to the pevent 5271 - * @type: the type of the event. 5272 - * 5273 - * This returns the event form a given @type; 5274 - */ 5275 - struct tep_event *tep_data_event_from_type(struct tep_handle *pevent, int type) 5276 - { 5277 - return tep_find_event(pevent, type); 5278 - } 5279 - 5280 - /** 5281 5230 * tep_data_pid - parse the PID from record 5282 5231 * @pevent: a handle to the pevent 5283 5232 * @rec: the record to parse ··· 5319 5292 return comm; 5320 5293 } 5321 5294 5322 - static struct cmdline * 5323 - pid_from_cmdlist(struct tep_handle *pevent, const char *comm, struct cmdline *next) 5295 + static struct tep_cmdline * 5296 + pid_from_cmdlist(struct tep_handle *pevent, const char *comm, struct tep_cmdline *next) 5324 5297 { 5325 5298 struct cmdline_list *cmdlist = (struct cmdline_list *)next; 5326 5299 ··· 5332 5305 while (cmdlist && strcmp(cmdlist->comm, comm) != 0) 5333 5306 cmdlist = cmdlist->next; 5334 5307 5335 - return (struct cmdline *)cmdlist; 5308 + return (struct tep_cmdline *)cmdlist; 5336 5309 } 5337 5310 5338 5311 /** ··· 5348 5321 * next pid. 5349 5322 * Also, it does a linear search, so it may be slow. 5350 5323 */ 5351 - struct cmdline *tep_data_pid_from_comm(struct tep_handle *pevent, const char *comm, 5352 - struct cmdline *next) 5324 + struct tep_cmdline *tep_data_pid_from_comm(struct tep_handle *pevent, const char *comm, 5325 + struct tep_cmdline *next) 5353 5326 { 5354 - struct cmdline *cmdline; 5327 + struct tep_cmdline *cmdline; 5355 5328 5356 5329 /* 5357 5330 * If the cmdlines have not been converted yet, then use ··· 5390 5363 * Returns the pid for a give cmdline. If @cmdline is NULL, then 5391 5364 * -1 is returned. 5392 5365 */ 5393 - int tep_cmdline_pid(struct tep_handle *pevent, struct cmdline *cmdline) 5366 + int tep_cmdline_pid(struct tep_handle *pevent, struct tep_cmdline *cmdline) 5394 5367 { 5395 5368 struct cmdline_list *cmdlist = (struct cmdline_list *)cmdline; 5396 5369 ··· 6620 6593 * 6621 6594 * If @id is >= 0, then it is used to find the event. 6622 6595 * else @sys_name and @event_name are used. 6596 + * 6597 + * Returns: 6598 + * TEP_REGISTER_SUCCESS_OVERWRITE if an existing handler is overwritten 6599 + * TEP_REGISTER_SUCCESS if a new handler is registered successfully 6600 + * negative TEP_ERRNO_... in case of an error 6601 + * 6623 6602 */ 6624 6603 int tep_register_event_handler(struct tep_handle *pevent, int id, 6625 6604 const char *sys_name, const char *event_name, ··· 6643 6610 6644 6611 event->handler = func; 6645 6612 event->context = context; 6646 - return 0; 6613 + return TEP_REGISTER_SUCCESS_OVERWRITE; 6647 6614 6648 6615 not_found: 6649 6616 /* Save for later use. */ ··· 6673 6640 pevent->handlers = handle; 6674 6641 handle->context = context; 6675 6642 6676 - return -1; 6643 + return TEP_REGISTER_SUCCESS; 6677 6644 } 6678 6645 6679 6646 static int handle_matches(struct event_handler *handler, int id, ··· 6756 6723 { 6757 6724 struct tep_handle *pevent = calloc(1, sizeof(*pevent)); 6758 6725 6759 - if (pevent) 6726 + if (pevent) { 6760 6727 pevent->ref_count = 1; 6728 + pevent->host_bigendian = tep_host_bigendian(); 6729 + } 6761 6730 6762 6731 return pevent; 6763 6732 }
+11 -6
tools/lib/traceevent/event-parse.h
··· 432 432 tep_func_resolver_t *func, void *priv); 433 433 void tep_reset_function_resolver(struct tep_handle *pevent); 434 434 int tep_register_comm(struct tep_handle *pevent, const char *comm, int pid); 435 + int tep_override_comm(struct tep_handle *pevent, const char *comm, int pid); 435 436 int tep_register_trace_clock(struct tep_handle *pevent, const char *trace_clock); 436 437 int tep_register_function(struct tep_handle *pevent, char *name, 437 438 unsigned long long addr, char *mod); ··· 485 484 struct tep_event *event, const char *name, 486 485 struct tep_record *record, int err); 487 486 487 + enum tep_reg_handler { 488 + TEP_REGISTER_SUCCESS = 0, 489 + TEP_REGISTER_SUCCESS_OVERWRITE, 490 + }; 491 + 488 492 int tep_register_event_handler(struct tep_handle *pevent, int id, 489 493 const char *sys_name, const char *event_name, 490 494 tep_event_handler_func func, void *context); ··· 526 520 void tep_data_lat_fmt(struct tep_handle *pevent, 527 521 struct trace_seq *s, struct tep_record *record); 528 522 int tep_data_type(struct tep_handle *pevent, struct tep_record *rec); 529 - struct tep_event *tep_data_event_from_type(struct tep_handle *pevent, int type); 530 523 int tep_data_pid(struct tep_handle *pevent, struct tep_record *rec); 531 524 int tep_data_preempt_count(struct tep_handle *pevent, struct tep_record *rec); 532 525 int tep_data_flags(struct tep_handle *pevent, struct tep_record *rec); 533 526 const char *tep_data_comm_from_pid(struct tep_handle *pevent, int pid); 534 - struct cmdline; 535 - struct cmdline *tep_data_pid_from_comm(struct tep_handle *pevent, const char *comm, 536 - struct cmdline *next); 537 - int tep_cmdline_pid(struct tep_handle *pevent, struct cmdline *cmdline); 527 + struct tep_cmdline; 528 + struct tep_cmdline *tep_data_pid_from_comm(struct tep_handle *pevent, const char *comm, 529 + struct tep_cmdline *next); 530 + int tep_cmdline_pid(struct tep_handle *pevent, struct tep_cmdline *cmdline); 538 531 539 532 void tep_print_field(struct trace_seq *s, void *data, 540 533 struct tep_format_field *field); ··· 558 553 void tep_set_long_size(struct tep_handle *pevent, int long_size); 559 554 int tep_get_page_size(struct tep_handle *pevent); 560 555 void tep_set_page_size(struct tep_handle *pevent, int _page_size); 561 - int tep_is_file_bigendian(struct tep_handle *pevent); 556 + int tep_file_bigendian(struct tep_handle *pevent); 562 557 void tep_set_file_bigendian(struct tep_handle *pevent, enum tep_endian endian); 563 558 int tep_is_host_bigendian(struct tep_handle *pevent); 564 559 void tep_set_host_bigendian(struct tep_handle *pevent, enum tep_endian endian);
+1 -1
tools/lib/traceevent/plugin_kvm.c
··· 389 389 * We can only use the structure if file is of the same 390 390 * endianness. 391 391 */ 392 - if (tep_is_file_bigendian(event->pevent) == 392 + if (tep_file_bigendian(event->pevent) == 393 393 tep_is_host_bigendian(event->pevent)) { 394 394 395 395 trace_seq_printf(s, "%u q%u%s %s%s %spae %snxe %swp%s%s%s",
+12 -5
tools/lib/traceevent/trace-seq.c
··· 100 100 * @fmt: printf format string 101 101 * 102 102 * It returns 0 if the trace oversizes the buffer's free 103 - * space, 1 otherwise. 103 + * space, the number of characters printed, or a negative 104 + * value in case of an error. 104 105 * 105 106 * The tracer may use either sequence operations or its own 106 107 * copy to user routines. To simplify formating of a trace ··· 130 129 goto try_again; 131 130 } 132 131 133 - s->len += ret; 132 + if (ret > 0) 133 + s->len += ret; 134 134 135 - return 1; 135 + return ret; 136 136 } 137 137 138 138 /** ··· 141 139 * @s: trace sequence descriptor 142 140 * @fmt: printf format string 143 141 * 142 + * It returns 0 if the trace oversizes the buffer's free 143 + * space, the number of characters printed, or a negative 144 + * value in case of an error. 145 + * * 144 146 * The tracer may use either sequence operations or its own 145 147 * copy to user routines. To simplify formating of a trace 146 148 * trace_seq_printf is used to store strings into a special ··· 169 163 goto try_again; 170 164 } 171 165 172 - s->len += ret; 166 + if (ret > 0) 167 + s->len += ret; 173 168 174 - return len; 169 + return ret; 175 170 } 176 171 177 172 /**
+5 -3
tools/perf/Makefile.perf
··· 524 524 525 525 all: shell_compatibility_test $(ALL_PROGRAMS) $(LANG_BINDINGS) $(OTHER_PROGRAMS) 526 526 527 + # Create python binding output directory if not already present 528 + _dummy := $(shell [ -d '$(OUTPUT)python' ] || mkdir -p '$(OUTPUT)python') 529 + 527 530 $(OUTPUT)python/perf.so: $(PYTHON_EXT_SRCS) $(PYTHON_EXT_DEPS) $(LIBTRACEEVENT_DYNAMIC_LIST) 528 531 $(QUIET_GEN)LDSHARED="$(CC) -pthread -shared" \ 529 532 CFLAGS='$(CFLAGS)' LDFLAGS='$(LDFLAGS) $(LIBTRACEEVENT_DYNAMIC_LIST_LDFLAGS)' \ 530 533 $(PYTHON_WORD) util/setup.py \ 531 534 --quiet build_ext; \ 532 - mkdir -p $(OUTPUT)python && \ 533 535 cp $(PYTHON_EXTBUILD_LIB)perf*.so $(OUTPUT)python/ 534 536 535 537 please_set_SHELL_PATH_to_a_more_modern_shell: ··· 662 660 $(QUIET_LINK)$(CC) $(CFLAGS) -o $@ $(LDFLAGS) $(filter %.o,$^) $(LIBS) 663 661 664 662 ifndef NO_PERF_READ_VDSO32 665 - $(OUTPUT)perf-read-vdso32: perf-read-vdso.c util/find-vdso-map.c 663 + $(OUTPUT)perf-read-vdso32: perf-read-vdso.c util/find-map.c 666 664 $(QUIET_CC)$(CC) -m32 $(filter -static,$(LDFLAGS)) -Wall -Werror -o $@ perf-read-vdso.c 667 665 endif 668 666 669 667 ifndef NO_PERF_READ_VDSOX32 670 - $(OUTPUT)perf-read-vdsox32: perf-read-vdso.c util/find-vdso-map.c 668 + $(OUTPUT)perf-read-vdsox32: perf-read-vdso.c util/find-map.c 671 669 $(QUIET_CC)$(CC) -mx32 $(filter -static,$(LDFLAGS)) -Wall -Werror -o $@ perf-read-vdso.c 672 670 endif 673 671
+1
tools/perf/arch/arm/tests/Build
··· 1 1 libperf-y += regs_load.o 2 2 libperf-y += dwarf-unwind.o 3 + libperf-y += vectors-page.o 3 4 4 5 libperf-y += arch-tests.o
+4
tools/perf/arch/arm/tests/arch-tests.c
··· 11 11 }, 12 12 #endif 13 13 { 14 + .desc = "Vectors page", 15 + .func = test__vectors_page, 16 + }, 17 + { 14 18 .func = NULL, 15 19 }, 16 20 };
+24
tools/perf/arch/arm/tests/vectors-page.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + #include <stdio.h> 3 + #include <string.h> 4 + #include <linux/compiler.h> 5 + 6 + #include "debug.h" 7 + #include "tests/tests.h" 8 + #include "util/find-map.c" 9 + 10 + #define VECTORS__MAP_NAME "[vectors]" 11 + 12 + int test__vectors_page(struct test *test __maybe_unused, 13 + int subtest __maybe_unused) 14 + { 15 + void *start, *end; 16 + 17 + if (find_map(&start, &end, VECTORS__MAP_NAME)) { 18 + pr_err("%s not found, is CONFIG_KUSER_HELPERS enabled?\n", 19 + VECTORS__MAP_NAME); 20 + return TEST_FAIL; 21 + } 22 + 23 + return TEST_OK; 24 + }
+11 -4
tools/perf/arch/powerpc/Makefile
··· 14 14 out := $(OUTPUT)arch/powerpc/include/generated/asm 15 15 header32 := $(out)/syscalls_32.c 16 16 header64 := $(out)/syscalls_64.c 17 - sysdef := $(srctree)/tools/arch/powerpc/include/uapi/asm/unistd.h 18 - sysprf := $(srctree)/tools/perf/arch/powerpc/entry/syscalls/ 17 + syskrn := $(srctree)/arch/powerpc/kernel/syscalls/syscall.tbl 18 + sysprf := $(srctree)/tools/perf/arch/powerpc/entry/syscalls 19 + sysdef := $(sysprf)/syscall.tbl 19 20 systbl := $(sysprf)/mksyscalltbl 20 21 21 22 # Create output directory if not already present 22 23 _dummy := $(shell [ -d '$(out)' ] || mkdir -p '$(out)') 23 24 24 25 $(header64): $(sysdef) $(systbl) 25 - $(Q)$(SHELL) '$(systbl)' '64' '$(CC)' $(sysdef) > $@ 26 + @(test -d ../../kernel -a -d ../../tools -a -d ../perf && ( \ 27 + (diff -B $(sysdef) $(syskrn) >/dev/null) \ 28 + || echo "Warning: Kernel ABI header at '$(sysdef)' differs from latest version at '$(syskrn)'" >&2 )) || true 29 + $(Q)$(SHELL) '$(systbl)' '64' $(sysdef) > $@ 26 30 27 31 $(header32): $(sysdef) $(systbl) 28 - $(Q)$(SHELL) '$(systbl)' '32' '$(CC)' $(sysdef) > $@ 32 + @(test -d ../../kernel -a -d ../../tools -a -d ../perf && ( \ 33 + (diff -B $(sysdef) $(syskrn) >/dev/null) \ 34 + || echo "Warning: Kernel ABI header at '$(sysdef)' differs from latest version at '$(syskrn)'" >&2 )) || true 35 + $(Q)$(SHELL) '$(systbl)' '32' $(sysdef) > $@ 29 36 30 37 clean:: 31 38 $(call QUIET_CLEAN, powerpc) $(RM) $(header32) $(header64)
+12 -10
tools/perf/arch/powerpc/entry/syscalls/mksyscalltbl
··· 9 9 # Changed by: Ravi Bangoria <ravi.bangoria@linux.vnet.ibm.com> 10 10 11 11 wordsize=$1 12 - gcc=$2 13 - input=$3 12 + SYSCALL_TBL=$2 14 13 15 - if ! test -r $input; then 14 + if ! test -r $SYSCALL_TBL; then 16 15 echo "Could not read input file" >&2 17 16 exit 1 18 17 fi ··· 19 20 create_table() 20 21 { 21 22 local wordsize=$1 22 - local max_nr 23 + local max_nr nr abi sc discard 24 + max_nr=-1 25 + nr=0 23 26 24 27 echo "static const char *syscalltbl_powerpc_${wordsize}[] = {" 25 - while read sc nr; do 26 - printf '\t[%d] = "%s",\n' $nr $sc 27 - max_nr=$nr 28 + while read nr abi sc discard; do 29 + if [ "$max_nr" -lt "$nr" ]; then 30 + printf '\t[%d] = "%s",\n' $nr $sc 31 + max_nr=$nr 32 + fi 28 33 done 29 34 echo '};' 30 35 echo "#define SYSCALLTBL_POWERPC_${wordsize}_MAX_ID $max_nr" 31 36 } 32 37 33 - $gcc -m${wordsize} -E -dM -x c $input \ 34 - |sed -ne 's/^#define __NR_//p' \ 35 - |sort -t' ' -k2 -nu \ 38 + grep -E "^[[:digit:]]+[[:space:]]+(common|spu|nospu|${wordsize})" $SYSCALL_TBL \ 39 + |sort -k1 -n \ 36 40 |create_table ${wordsize}
+427
tools/perf/arch/powerpc/entry/syscalls/syscall.tbl
··· 1 + # SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note 2 + # 3 + # system call numbers and entry vectors for powerpc 4 + # 5 + # The format is: 6 + # <number> <abi> <name> <entry point> <compat entry point> 7 + # 8 + # The <abi> can be common, spu, nospu, 64, or 32 for this file. 9 + # 10 + 0 nospu restart_syscall sys_restart_syscall 11 + 1 nospu exit sys_exit 12 + 2 nospu fork ppc_fork 13 + 3 common read sys_read 14 + 4 common write sys_write 15 + 5 common open sys_open compat_sys_open 16 + 6 common close sys_close 17 + 7 common waitpid sys_waitpid 18 + 8 common creat sys_creat 19 + 9 common link sys_link 20 + 10 common unlink sys_unlink 21 + 11 nospu execve sys_execve compat_sys_execve 22 + 12 common chdir sys_chdir 23 + 13 common time sys_time compat_sys_time 24 + 14 common mknod sys_mknod 25 + 15 common chmod sys_chmod 26 + 16 common lchown sys_lchown 27 + 17 common break sys_ni_syscall 28 + 18 32 oldstat sys_stat sys_ni_syscall 29 + 18 64 oldstat sys_ni_syscall 30 + 18 spu oldstat sys_ni_syscall 31 + 19 common lseek sys_lseek compat_sys_lseek 32 + 20 common getpid sys_getpid 33 + 21 nospu mount sys_mount compat_sys_mount 34 + 22 32 umount sys_oldumount 35 + 22 64 umount sys_ni_syscall 36 + 22 spu umount sys_ni_syscall 37 + 23 common setuid sys_setuid 38 + 24 common getuid sys_getuid 39 + 25 common stime sys_stime compat_sys_stime 40 + 26 nospu ptrace sys_ptrace compat_sys_ptrace 41 + 27 common alarm sys_alarm 42 + 28 32 oldfstat sys_fstat sys_ni_syscall 43 + 28 64 oldfstat sys_ni_syscall 44 + 28 spu oldfstat sys_ni_syscall 45 + 29 nospu pause sys_pause 46 + 30 nospu utime sys_utime compat_sys_utime 47 + 31 common stty sys_ni_syscall 48 + 32 common gtty sys_ni_syscall 49 + 33 common access sys_access 50 + 34 common nice sys_nice 51 + 35 common ftime sys_ni_syscall 52 + 36 common sync sys_sync 53 + 37 common kill sys_kill 54 + 38 common rename sys_rename 55 + 39 common mkdir sys_mkdir 56 + 40 common rmdir sys_rmdir 57 + 41 common dup sys_dup 58 + 42 common pipe sys_pipe 59 + 43 common times sys_times compat_sys_times 60 + 44 common prof sys_ni_syscall 61 + 45 common brk sys_brk 62 + 46 common setgid sys_setgid 63 + 47 common getgid sys_getgid 64 + 48 nospu signal sys_signal 65 + 49 common geteuid sys_geteuid 66 + 50 common getegid sys_getegid 67 + 51 nospu acct sys_acct 68 + 52 nospu umount2 sys_umount 69 + 53 common lock sys_ni_syscall 70 + 54 common ioctl sys_ioctl compat_sys_ioctl 71 + 55 common fcntl sys_fcntl compat_sys_fcntl 72 + 56 common mpx sys_ni_syscall 73 + 57 common setpgid sys_setpgid 74 + 58 common ulimit sys_ni_syscall 75 + 59 32 oldolduname sys_olduname 76 + 59 64 oldolduname sys_ni_syscall 77 + 59 spu oldolduname sys_ni_syscall 78 + 60 common umask sys_umask 79 + 61 common chroot sys_chroot 80 + 62 nospu ustat sys_ustat compat_sys_ustat 81 + 63 common dup2 sys_dup2 82 + 64 common getppid sys_getppid 83 + 65 common getpgrp sys_getpgrp 84 + 66 common setsid sys_setsid 85 + 67 32 sigaction sys_sigaction compat_sys_sigaction 86 + 67 64 sigaction sys_ni_syscall 87 + 67 spu sigaction sys_ni_syscall 88 + 68 common sgetmask sys_sgetmask 89 + 69 common ssetmask sys_ssetmask 90 + 70 common setreuid sys_setreuid 91 + 71 common setregid sys_setregid 92 + 72 32 sigsuspend sys_sigsuspend 93 + 72 64 sigsuspend sys_ni_syscall 94 + 72 spu sigsuspend sys_ni_syscall 95 + 73 32 sigpending sys_sigpending compat_sys_sigpending 96 + 73 64 sigpending sys_ni_syscall 97 + 73 spu sigpending sys_ni_syscall 98 + 74 common sethostname sys_sethostname 99 + 75 common setrlimit sys_setrlimit compat_sys_setrlimit 100 + 76 32 getrlimit sys_old_getrlimit compat_sys_old_getrlimit 101 + 76 64 getrlimit sys_ni_syscall 102 + 76 spu getrlimit sys_ni_syscall 103 + 77 common getrusage sys_getrusage compat_sys_getrusage 104 + 78 common gettimeofday sys_gettimeofday compat_sys_gettimeofday 105 + 79 common settimeofday sys_settimeofday compat_sys_settimeofday 106 + 80 common getgroups sys_getgroups 107 + 81 common setgroups sys_setgroups 108 + 82 32 select ppc_select sys_ni_syscall 109 + 82 64 select sys_ni_syscall 110 + 82 spu select sys_ni_syscall 111 + 83 common symlink sys_symlink 112 + 84 32 oldlstat sys_lstat sys_ni_syscall 113 + 84 64 oldlstat sys_ni_syscall 114 + 84 spu oldlstat sys_ni_syscall 115 + 85 common readlink sys_readlink 116 + 86 nospu uselib sys_uselib 117 + 87 nospu swapon sys_swapon 118 + 88 nospu reboot sys_reboot 119 + 89 32 readdir sys_old_readdir compat_sys_old_readdir 120 + 89 64 readdir sys_ni_syscall 121 + 89 spu readdir sys_ni_syscall 122 + 90 common mmap sys_mmap 123 + 91 common munmap sys_munmap 124 + 92 common truncate sys_truncate compat_sys_truncate 125 + 93 common ftruncate sys_ftruncate compat_sys_ftruncate 126 + 94 common fchmod sys_fchmod 127 + 95 common fchown sys_fchown 128 + 96 common getpriority sys_getpriority 129 + 97 common setpriority sys_setpriority 130 + 98 common profil sys_ni_syscall 131 + 99 nospu statfs sys_statfs compat_sys_statfs 132 + 100 nospu fstatfs sys_fstatfs compat_sys_fstatfs 133 + 101 common ioperm sys_ni_syscall 134 + 102 common socketcall sys_socketcall compat_sys_socketcall 135 + 103 common syslog sys_syslog 136 + 104 common setitimer sys_setitimer compat_sys_setitimer 137 + 105 common getitimer sys_getitimer compat_sys_getitimer 138 + 106 common stat sys_newstat compat_sys_newstat 139 + 107 common lstat sys_newlstat compat_sys_newlstat 140 + 108 common fstat sys_newfstat compat_sys_newfstat 141 + 109 32 olduname sys_uname 142 + 109 64 olduname sys_ni_syscall 143 + 109 spu olduname sys_ni_syscall 144 + 110 common iopl sys_ni_syscall 145 + 111 common vhangup sys_vhangup 146 + 112 common idle sys_ni_syscall 147 + 113 common vm86 sys_ni_syscall 148 + 114 common wait4 sys_wait4 compat_sys_wait4 149 + 115 nospu swapoff sys_swapoff 150 + 116 common sysinfo sys_sysinfo compat_sys_sysinfo 151 + 117 nospu ipc sys_ipc compat_sys_ipc 152 + 118 common fsync sys_fsync 153 + 119 32 sigreturn sys_sigreturn compat_sys_sigreturn 154 + 119 64 sigreturn sys_ni_syscall 155 + 119 spu sigreturn sys_ni_syscall 156 + 120 nospu clone ppc_clone 157 + 121 common setdomainname sys_setdomainname 158 + 122 common uname sys_newuname 159 + 123 common modify_ldt sys_ni_syscall 160 + 124 common adjtimex sys_adjtimex compat_sys_adjtimex 161 + 125 common mprotect sys_mprotect 162 + 126 32 sigprocmask sys_sigprocmask compat_sys_sigprocmask 163 + 126 64 sigprocmask sys_ni_syscall 164 + 126 spu sigprocmask sys_ni_syscall 165 + 127 common create_module sys_ni_syscall 166 + 128 nospu init_module sys_init_module 167 + 129 nospu delete_module sys_delete_module 168 + 130 common get_kernel_syms sys_ni_syscall 169 + 131 nospu quotactl sys_quotactl 170 + 132 common getpgid sys_getpgid 171 + 133 common fchdir sys_fchdir 172 + 134 common bdflush sys_bdflush 173 + 135 common sysfs sys_sysfs 174 + 136 32 personality sys_personality ppc64_personality 175 + 136 64 personality ppc64_personality 176 + 136 spu personality ppc64_personality 177 + 137 common afs_syscall sys_ni_syscall 178 + 138 common setfsuid sys_setfsuid 179 + 139 common setfsgid sys_setfsgid 180 + 140 common _llseek sys_llseek 181 + 141 common getdents sys_getdents compat_sys_getdents 182 + 142 common _newselect sys_select compat_sys_select 183 + 143 common flock sys_flock 184 + 144 common msync sys_msync 185 + 145 common readv sys_readv compat_sys_readv 186 + 146 common writev sys_writev compat_sys_writev 187 + 147 common getsid sys_getsid 188 + 148 common fdatasync sys_fdatasync 189 + 149 nospu _sysctl sys_sysctl compat_sys_sysctl 190 + 150 common mlock sys_mlock 191 + 151 common munlock sys_munlock 192 + 152 common mlockall sys_mlockall 193 + 153 common munlockall sys_munlockall 194 + 154 common sched_setparam sys_sched_setparam 195 + 155 common sched_getparam sys_sched_getparam 196 + 156 common sched_setscheduler sys_sched_setscheduler 197 + 157 common sched_getscheduler sys_sched_getscheduler 198 + 158 common sched_yield sys_sched_yield 199 + 159 common sched_get_priority_max sys_sched_get_priority_max 200 + 160 common sched_get_priority_min sys_sched_get_priority_min 201 + 161 common sched_rr_get_interval sys_sched_rr_get_interval compat_sys_sched_rr_get_interval 202 + 162 common nanosleep sys_nanosleep compat_sys_nanosleep 203 + 163 common mremap sys_mremap 204 + 164 common setresuid sys_setresuid 205 + 165 common getresuid sys_getresuid 206 + 166 common query_module sys_ni_syscall 207 + 167 common poll sys_poll 208 + 168 common nfsservctl sys_ni_syscall 209 + 169 common setresgid sys_setresgid 210 + 170 common getresgid sys_getresgid 211 + 171 common prctl sys_prctl 212 + 172 nospu rt_sigreturn sys_rt_sigreturn compat_sys_rt_sigreturn 213 + 173 nospu rt_sigaction sys_rt_sigaction compat_sys_rt_sigaction 214 + 174 nospu rt_sigprocmask sys_rt_sigprocmask compat_sys_rt_sigprocmask 215 + 175 nospu rt_sigpending sys_rt_sigpending compat_sys_rt_sigpending 216 + 176 nospu rt_sigtimedwait sys_rt_sigtimedwait compat_sys_rt_sigtimedwait 217 + 177 nospu rt_sigqueueinfo sys_rt_sigqueueinfo compat_sys_rt_sigqueueinfo 218 + 178 nospu rt_sigsuspend sys_rt_sigsuspend compat_sys_rt_sigsuspend 219 + 179 common pread64 sys_pread64 compat_sys_pread64 220 + 180 common pwrite64 sys_pwrite64 compat_sys_pwrite64 221 + 181 common chown sys_chown 222 + 182 common getcwd sys_getcwd 223 + 183 common capget sys_capget 224 + 184 common capset sys_capset 225 + 185 nospu sigaltstack sys_sigaltstack compat_sys_sigaltstack 226 + 186 32 sendfile sys_sendfile compat_sys_sendfile 227 + 186 64 sendfile sys_sendfile64 228 + 186 spu sendfile sys_sendfile64 229 + 187 common getpmsg sys_ni_syscall 230 + 188 common putpmsg sys_ni_syscall 231 + 189 nospu vfork ppc_vfork 232 + 190 common ugetrlimit sys_getrlimit compat_sys_getrlimit 233 + 191 common readahead sys_readahead compat_sys_readahead 234 + 192 32 mmap2 sys_mmap2 compat_sys_mmap2 235 + 193 32 truncate64 sys_truncate64 compat_sys_truncate64 236 + 194 32 ftruncate64 sys_ftruncate64 compat_sys_ftruncate64 237 + 195 32 stat64 sys_stat64 238 + 196 32 lstat64 sys_lstat64 239 + 197 32 fstat64 sys_fstat64 240 + 198 nospu pciconfig_read sys_pciconfig_read 241 + 199 nospu pciconfig_write sys_pciconfig_write 242 + 200 nospu pciconfig_iobase sys_pciconfig_iobase 243 + 201 common multiplexer sys_ni_syscall 244 + 202 common getdents64 sys_getdents64 245 + 203 common pivot_root sys_pivot_root 246 + 204 32 fcntl64 sys_fcntl64 compat_sys_fcntl64 247 + 205 common madvise sys_madvise 248 + 206 common mincore sys_mincore 249 + 207 common gettid sys_gettid 250 + 208 common tkill sys_tkill 251 + 209 common setxattr sys_setxattr 252 + 210 common lsetxattr sys_lsetxattr 253 + 211 common fsetxattr sys_fsetxattr 254 + 212 common getxattr sys_getxattr 255 + 213 common lgetxattr sys_lgetxattr 256 + 214 common fgetxattr sys_fgetxattr 257 + 215 common listxattr sys_listxattr 258 + 216 common llistxattr sys_llistxattr 259 + 217 common flistxattr sys_flistxattr 260 + 218 common removexattr sys_removexattr 261 + 219 common lremovexattr sys_lremovexattr 262 + 220 common fremovexattr sys_fremovexattr 263 + 221 common futex sys_futex compat_sys_futex 264 + 222 common sched_setaffinity sys_sched_setaffinity compat_sys_sched_setaffinity 265 + 223 common sched_getaffinity sys_sched_getaffinity compat_sys_sched_getaffinity 266 + # 224 unused 267 + 225 common tuxcall sys_ni_syscall 268 + 226 32 sendfile64 sys_sendfile64 compat_sys_sendfile64 269 + 227 common io_setup sys_io_setup compat_sys_io_setup 270 + 228 common io_destroy sys_io_destroy 271 + 229 common io_getevents sys_io_getevents compat_sys_io_getevents 272 + 230 common io_submit sys_io_submit compat_sys_io_submit 273 + 231 common io_cancel sys_io_cancel 274 + 232 nospu set_tid_address sys_set_tid_address 275 + 233 common fadvise64 sys_fadvise64 ppc32_fadvise64 276 + 234 nospu exit_group sys_exit_group 277 + 235 nospu lookup_dcookie sys_lookup_dcookie compat_sys_lookup_dcookie 278 + 236 common epoll_create sys_epoll_create 279 + 237 common epoll_ctl sys_epoll_ctl 280 + 238 common epoll_wait sys_epoll_wait 281 + 239 common remap_file_pages sys_remap_file_pages 282 + 240 common timer_create sys_timer_create compat_sys_timer_create 283 + 241 common timer_settime sys_timer_settime compat_sys_timer_settime 284 + 242 common timer_gettime sys_timer_gettime compat_sys_timer_gettime 285 + 243 common timer_getoverrun sys_timer_getoverrun 286 + 244 common timer_delete sys_timer_delete 287 + 245 common clock_settime sys_clock_settime compat_sys_clock_settime 288 + 246 common clock_gettime sys_clock_gettime compat_sys_clock_gettime 289 + 247 common clock_getres sys_clock_getres compat_sys_clock_getres 290 + 248 common clock_nanosleep sys_clock_nanosleep compat_sys_clock_nanosleep 291 + 249 32 swapcontext ppc_swapcontext ppc32_swapcontext 292 + 249 64 swapcontext ppc64_swapcontext 293 + 249 spu swapcontext sys_ni_syscall 294 + 250 common tgkill sys_tgkill 295 + 251 common utimes sys_utimes compat_sys_utimes 296 + 252 common statfs64 sys_statfs64 compat_sys_statfs64 297 + 253 common fstatfs64 sys_fstatfs64 compat_sys_fstatfs64 298 + 254 32 fadvise64_64 ppc_fadvise64_64 299 + 254 spu fadvise64_64 sys_ni_syscall 300 + 255 common rtas sys_rtas 301 + 256 32 sys_debug_setcontext sys_debug_setcontext sys_ni_syscall 302 + 256 64 sys_debug_setcontext sys_ni_syscall 303 + 256 spu sys_debug_setcontext sys_ni_syscall 304 + # 257 reserved for vserver 305 + 258 nospu migrate_pages sys_migrate_pages compat_sys_migrate_pages 306 + 259 nospu mbind sys_mbind compat_sys_mbind 307 + 260 nospu get_mempolicy sys_get_mempolicy compat_sys_get_mempolicy 308 + 261 nospu set_mempolicy sys_set_mempolicy compat_sys_set_mempolicy 309 + 262 nospu mq_open sys_mq_open compat_sys_mq_open 310 + 263 nospu mq_unlink sys_mq_unlink 311 + 264 nospu mq_timedsend sys_mq_timedsend compat_sys_mq_timedsend 312 + 265 nospu mq_timedreceive sys_mq_timedreceive compat_sys_mq_timedreceive 313 + 266 nospu mq_notify sys_mq_notify compat_sys_mq_notify 314 + 267 nospu mq_getsetattr sys_mq_getsetattr compat_sys_mq_getsetattr 315 + 268 nospu kexec_load sys_kexec_load compat_sys_kexec_load 316 + 269 nospu add_key sys_add_key 317 + 270 nospu request_key sys_request_key 318 + 271 nospu keyctl sys_keyctl compat_sys_keyctl 319 + 272 nospu waitid sys_waitid compat_sys_waitid 320 + 273 nospu ioprio_set sys_ioprio_set 321 + 274 nospu ioprio_get sys_ioprio_get 322 + 275 nospu inotify_init sys_inotify_init 323 + 276 nospu inotify_add_watch sys_inotify_add_watch 324 + 277 nospu inotify_rm_watch sys_inotify_rm_watch 325 + 278 nospu spu_run sys_spu_run 326 + 279 nospu spu_create sys_spu_create 327 + 280 nospu pselect6 sys_pselect6 compat_sys_pselect6 328 + 281 nospu ppoll sys_ppoll compat_sys_ppoll 329 + 282 common unshare sys_unshare 330 + 283 common splice sys_splice 331 + 284 common tee sys_tee 332 + 285 common vmsplice sys_vmsplice compat_sys_vmsplice 333 + 286 common openat sys_openat compat_sys_openat 334 + 287 common mkdirat sys_mkdirat 335 + 288 common mknodat sys_mknodat 336 + 289 common fchownat sys_fchownat 337 + 290 common futimesat sys_futimesat compat_sys_futimesat 338 + 291 32 fstatat64 sys_fstatat64 339 + 291 64 newfstatat sys_newfstatat 340 + 291 spu newfstatat sys_newfstatat 341 + 292 common unlinkat sys_unlinkat 342 + 293 common renameat sys_renameat 343 + 294 common linkat sys_linkat 344 + 295 common symlinkat sys_symlinkat 345 + 296 common readlinkat sys_readlinkat 346 + 297 common fchmodat sys_fchmodat 347 + 298 common faccessat sys_faccessat 348 + 299 common get_robust_list sys_get_robust_list compat_sys_get_robust_list 349 + 300 common set_robust_list sys_set_robust_list compat_sys_set_robust_list 350 + 301 common move_pages sys_move_pages compat_sys_move_pages 351 + 302 common getcpu sys_getcpu 352 + 303 nospu epoll_pwait sys_epoll_pwait compat_sys_epoll_pwait 353 + 304 common utimensat sys_utimensat compat_sys_utimensat 354 + 305 common signalfd sys_signalfd compat_sys_signalfd 355 + 306 common timerfd_create sys_timerfd_create 356 + 307 common eventfd sys_eventfd 357 + 308 common sync_file_range2 sys_sync_file_range2 compat_sys_sync_file_range2 358 + 309 nospu fallocate sys_fallocate compat_sys_fallocate 359 + 310 nospu subpage_prot sys_subpage_prot 360 + 311 common timerfd_settime sys_timerfd_settime compat_sys_timerfd_settime 361 + 312 common timerfd_gettime sys_timerfd_gettime compat_sys_timerfd_gettime 362 + 313 common signalfd4 sys_signalfd4 compat_sys_signalfd4 363 + 314 common eventfd2 sys_eventfd2 364 + 315 common epoll_create1 sys_epoll_create1 365 + 316 common dup3 sys_dup3 366 + 317 common pipe2 sys_pipe2 367 + 318 nospu inotify_init1 sys_inotify_init1 368 + 319 common perf_event_open sys_perf_event_open 369 + 320 common preadv sys_preadv compat_sys_preadv 370 + 321 common pwritev sys_pwritev compat_sys_pwritev 371 + 322 nospu rt_tgsigqueueinfo sys_rt_tgsigqueueinfo compat_sys_rt_tgsigqueueinfo 372 + 323 nospu fanotify_init sys_fanotify_init 373 + 324 nospu fanotify_mark sys_fanotify_mark compat_sys_fanotify_mark 374 + 325 common prlimit64 sys_prlimit64 375 + 326 common socket sys_socket 376 + 327 common bind sys_bind 377 + 328 common connect sys_connect 378 + 329 common listen sys_listen 379 + 330 common accept sys_accept 380 + 331 common getsockname sys_getsockname 381 + 332 common getpeername sys_getpeername 382 + 333 common socketpair sys_socketpair 383 + 334 common send sys_send 384 + 335 common sendto sys_sendto 385 + 336 common recv sys_recv compat_sys_recv 386 + 337 common recvfrom sys_recvfrom compat_sys_recvfrom 387 + 338 common shutdown sys_shutdown 388 + 339 common setsockopt sys_setsockopt compat_sys_setsockopt 389 + 340 common getsockopt sys_getsockopt compat_sys_getsockopt 390 + 341 common sendmsg sys_sendmsg compat_sys_sendmsg 391 + 342 common recvmsg sys_recvmsg compat_sys_recvmsg 392 + 343 common recvmmsg sys_recvmmsg compat_sys_recvmmsg 393 + 344 common accept4 sys_accept4 394 + 345 common name_to_handle_at sys_name_to_handle_at 395 + 346 common open_by_handle_at sys_open_by_handle_at compat_sys_open_by_handle_at 396 + 347 common clock_adjtime sys_clock_adjtime compat_sys_clock_adjtime 397 + 348 common syncfs sys_syncfs 398 + 349 common sendmmsg sys_sendmmsg compat_sys_sendmmsg 399 + 350 common setns sys_setns 400 + 351 nospu process_vm_readv sys_process_vm_readv compat_sys_process_vm_readv 401 + 352 nospu process_vm_writev sys_process_vm_writev compat_sys_process_vm_writev 402 + 353 nospu finit_module sys_finit_module 403 + 354 nospu kcmp sys_kcmp 404 + 355 common sched_setattr sys_sched_setattr 405 + 356 common sched_getattr sys_sched_getattr 406 + 357 common renameat2 sys_renameat2 407 + 358 common seccomp sys_seccomp 408 + 359 common getrandom sys_getrandom 409 + 360 common memfd_create sys_memfd_create 410 + 361 common bpf sys_bpf 411 + 362 nospu execveat sys_execveat compat_sys_execveat 412 + 363 32 switch_endian sys_ni_syscall 413 + 363 64 switch_endian ppc_switch_endian 414 + 363 spu switch_endian sys_ni_syscall 415 + 364 common userfaultfd sys_userfaultfd 416 + 365 common membarrier sys_membarrier 417 + 378 nospu mlock2 sys_mlock2 418 + 379 nospu copy_file_range sys_copy_file_range 419 + 380 common preadv2 sys_preadv2 compat_sys_preadv2 420 + 381 common pwritev2 sys_pwritev2 compat_sys_pwritev2 421 + 382 nospu kexec_file_load sys_kexec_file_load 422 + 383 nospu statx sys_statx 423 + 384 nospu pkey_alloc sys_pkey_alloc 424 + 385 nospu pkey_free sys_pkey_free 425 + 386 nospu pkey_mprotect sys_pkey_mprotect 426 + 387 nospu rseq sys_rseq 427 + 388 nospu io_pgetevents sys_io_pgetevents compat_sys_io_pgetevents
+2 -1
tools/perf/arch/powerpc/include/perf_regs.h
··· 63 63 [PERF_REG_POWERPC_TRAP] = "trap", 64 64 [PERF_REG_POWERPC_DAR] = "dar", 65 65 [PERF_REG_POWERPC_DSISR] = "dsisr", 66 - [PERF_REG_POWERPC_SIER] = "sier" 66 + [PERF_REG_POWERPC_SIER] = "sier", 67 + [PERF_REG_POWERPC_MMCRA] = "mmcra" 67 68 }; 68 69 69 70 static inline const char *perf_reg_name(int id)
+1
tools/perf/arch/powerpc/util/perf_regs.c
··· 53 53 SMPL_REG(dar, PERF_REG_POWERPC_DAR), 54 54 SMPL_REG(dsisr, PERF_REG_POWERPC_DSISR), 55 55 SMPL_REG(sier, PERF_REG_POWERPC_SIER), 56 + SMPL_REG(mmcra, PERF_REG_POWERPC_MMCRA), 56 57 SMPL_REG_END 57 58 }; 58 59
+2 -1
tools/perf/builtin-stat.c
··· 561 561 break; 562 562 } 563 563 } 564 - wait4(child_pid, &status, 0, &stat_config.ru_data); 564 + if (child_pid != -1) 565 + wait4(child_pid, &status, 0, &stat_config.ru_data); 565 566 566 567 if (workload_exec_errno) { 567 568 const char *emsg = str_error_r(workload_exec_errno, msg, sizeof(msg));
+1 -6
tools/perf/builtin-top.c
··· 1028 1028 1029 1029 static int callchain_param__setup_sample_type(struct callchain_param *callchain) 1030 1030 { 1031 - if (!perf_hpp_list.sym) { 1032 - if (callchain->enabled) { 1033 - ui__error("Selected -g but \"sym\" not present in --sort/-s."); 1034 - return -EINVAL; 1035 - } 1036 - } else if (callchain->mode != CHAIN_NONE) { 1031 + if (callchain->mode != CHAIN_NONE) { 1037 1032 if (callchain_register_param(callchain) < 0) { 1038 1033 ui__error("Can't register callchain params.\n"); 1039 1034 return -EINVAL;
+11 -4
tools/perf/builtin-trace.c
··· 1758 1758 { 1759 1759 struct thread_trace *ttrace; 1760 1760 size_t printed; 1761 + int len; 1761 1762 1762 1763 if (trace->failure_only || trace->current == NULL) 1763 1764 return 0; ··· 1769 1768 return 0; 1770 1769 1771 1770 printed = trace__fprintf_entry_head(trace, trace->current, 0, false, ttrace->entry_time, trace->output); 1772 - printed += fprintf(trace->output, ")%-*s ...\n", trace->args_alignment, ttrace->entry_str); 1773 - ttrace->entry_pending = false; 1771 + printed += len = fprintf(trace->output, "%s)", ttrace->entry_str); 1774 1772 1773 + if (len < trace->args_alignment - 4) 1774 + printed += fprintf(trace->output, "%-*s", trace->args_alignment - 4 - len, " "); 1775 + 1776 + printed += fprintf(trace->output, " ...\n"); 1777 + 1778 + ttrace->entry_pending = false; 1775 1779 ++trace->nr_events_printed; 1776 1780 1777 1781 return printed; ··· 2032 2026 if (ttrace->entry_pending) { 2033 2027 printed = fprintf(trace->output, "%s", ttrace->entry_str); 2034 2028 } else { 2035 - fprintf(trace->output, " ... ["); 2029 + printed += fprintf(trace->output, " ... ["); 2036 2030 color_fprintf(trace->output, PERF_COLOR_YELLOW, "continued"); 2037 - fprintf(trace->output, "]: %s()", sc->name); 2031 + printed += 9; 2032 + printed += fprintf(trace->output, "]: %s()", sc->name); 2038 2033 } 2039 2034 2040 2035 printed++; /* the closing ')' */
+1 -1
tools/perf/check-headers.sh
··· 10 10 include/uapi/linux/kcmp.h 11 11 include/uapi/linux/kvm.h 12 12 include/uapi/linux/in.h 13 + include/uapi/linux/mount.h 13 14 include/uapi/linux/perf_event.h 14 15 include/uapi/linux/prctl.h 15 16 include/uapi/linux/sched.h ··· 50 49 arch/powerpc/include/uapi/asm/errno.h 51 50 arch/sparc/include/uapi/asm/errno.h 52 51 arch/x86/include/uapi/asm/errno.h 53 - arch/powerpc/include/uapi/asm/unistd.h 54 52 include/asm-generic/bitops/arch_hweight.h 55 53 include/asm-generic/bitops/const_hweight.h 56 54 include/asm-generic/bitops/__fls.h
+3 -3
tools/perf/perf-read-vdso.c
··· 5 5 #define VDSO__MAP_NAME "[vdso]" 6 6 7 7 /* 8 - * Include definition of find_vdso_map() also used in util/vdso.c for 8 + * Include definition of find_map() also used in util/vdso.c for 9 9 * building perf. 10 10 */ 11 - #include "util/find-vdso-map.c" 11 + #include "util/find-map.c" 12 12 13 13 int main(void) 14 14 { 15 15 void *start, *end; 16 16 size_t size, written; 17 17 18 - if (find_vdso_map(&start, &end)) 18 + if (find_map(&start, &end, VDSO__MAP_NAME)) 19 19 return 1; 20 20 21 21 size = end - start;
+2 -1
tools/perf/tests/shell/lib/probe_vfs_getname.sh
··· 13 13 local verbose=$1 14 14 if [ $had_vfs_getname -eq 1 ] ; then 15 15 line=$(perf probe -L getname_flags 2>&1 | egrep 'result.*=.*filename;' | sed -r 's/[[:space:]]+([[:digit:]]+)[[:space:]]+result->uptr.*/\1/') 16 - perf probe $verbose "vfs_getname=getname_flags:${line} pathname=result->name:string" 16 + perf probe -q "vfs_getname=getname_flags:${line} pathname=result->name:string" || \ 17 + perf probe $verbose "vfs_getname=getname_flags:${line} pathname=filename:string" 17 18 fi 18 19 } 19 20
+5
tools/perf/tests/tests.h
··· 119 119 struct thread *thread); 120 120 #endif 121 121 #endif 122 + 123 + #if defined(__arm__) 124 + int test__vectors_page(struct test *test, int subtest); 125 + #endif 126 + 122 127 #endif /* TESTS_H */
+2 -2
tools/perf/trace/beauty/mount_flags.sh
··· 5 5 6 6 printf "static const char *mount_flags[] = {\n" 7 7 regex='^[[:space:]]*#[[:space:]]*define[[:space:]]+MS_([[:alnum:]_]+)[[:space:]]+([[:digit:]]+)[[:space:]]*.*' 8 - egrep $regex ${header_dir}/fs.h | egrep -v '(MSK|VERBOSE|MGC_VAL)\>' | \ 8 + egrep $regex ${header_dir}/mount.h | egrep -v '(MSK|VERBOSE|MGC_VAL)\>' | \ 9 9 sed -r "s/$regex/\2 \2 \1/g" | sort -n | \ 10 10 xargs printf "\t[%s ? (ilog2(%s) + 1) : 0] = \"%s\",\n" 11 11 regex='^[[:space:]]*#[[:space:]]*define[[:space:]]+MS_([[:alnum:]_]+)[[:space:]]+\(1<<([[:digit:]]+)\)[[:space:]]*.*' 12 - egrep $regex ${header_dir}/fs.h | \ 12 + egrep $regex ${header_dir}/mount.h | \ 13 13 sed -r "s/$regex/\2 \1/g" | \ 14 14 xargs printf "\t[%s + 1] = \"%s\",\n" 15 15 printf "};\n"
+1 -1
tools/perf/trace/beauty/prctl_option.sh
··· 4 4 [ $# -eq 1 ] && header_dir=$1 || header_dir=tools/include/uapi/linux/ 5 5 6 6 printf "static const char *prctl_options[] = {\n" 7 - regex='^#define[[:space:]]+PR_([GS]ET\w+)[[:space:]]*([[:xdigit:]]+).*' 7 + regex='^#define[[:space:]]+PR_(\w+)[[:space:]]*([[:xdigit:]]+).*' 8 8 egrep $regex ${header_dir}/prctl.h | grep -v PR_SET_PTRACER | \ 9 9 sed -r "s/$regex/\2 \1/g" | \ 10 10 sort -n | xargs printf "\t[%s] = \"%s\",\n"
+4 -4
tools/perf/util/annotate.c
··· 1723 1723 err = asprintf(&command, 1724 1724 "%s %s%s --start-address=0x%016" PRIx64 1725 1725 " --stop-address=0x%016" PRIx64 1726 - " -l -d %s %s -C \"%s\" 2>/dev/null|grep -v \"%s:\"|expand", 1726 + " -l -d %s %s -C \"$1\" 2>/dev/null|grep -v \"$1:\"|expand", 1727 1727 opts->objdump_path ?: "objdump", 1728 1728 opts->disassembler_style ? "-M " : "", 1729 1729 opts->disassembler_style ?: "", 1730 1730 map__rip_2objdump(map, sym->start), 1731 1731 map__rip_2objdump(map, sym->end), 1732 1732 opts->show_asm_raw ? "" : "--no-show-raw", 1733 - opts->annotate_src ? "-S" : "", 1734 - symfs_filename, symfs_filename); 1733 + opts->annotate_src ? "-S" : ""); 1735 1734 1736 1735 if (err < 0) { 1737 1736 pr_err("Failure allocating memory for the command to run\n"); ··· 1755 1756 close(stdout_fd[0]); 1756 1757 dup2(stdout_fd[1], 1); 1757 1758 close(stdout_fd[1]); 1758 - execl("/bin/sh", "sh", "-c", command, NULL); 1759 + execl("/bin/sh", "sh", "-c", command, "--", symfs_filename, 1760 + NULL); 1759 1761 perror(command); 1760 1762 exit(-1); 1761 1763 }
+20 -12
tools/perf/util/callchain.c
··· 766 766 cnode->cycles_count += node->branch_flags.cycles; 767 767 cnode->iter_count += node->nr_loop_iter; 768 768 cnode->iter_cycles += node->iter_cycles; 769 + cnode->from_count++; 769 770 } 770 771 } 771 772 ··· 1346 1345 static int branch_from_str(char *bf, int bfsize, 1347 1346 u64 branch_count, 1348 1347 u64 cycles_count, u64 iter_count, 1349 - u64 iter_cycles) 1348 + u64 iter_cycles, u64 from_count) 1350 1349 { 1351 1350 int printed = 0, i = 0; 1352 - u64 cycles; 1351 + u64 cycles, v = 0; 1353 1352 1354 1353 cycles = cycles_count / branch_count; 1355 1354 if (cycles) { ··· 1358 1357 bf + printed, bfsize - printed); 1359 1358 } 1360 1359 1361 - if (iter_count) { 1362 - printed += count_pri64_printf(i++, "iter", 1363 - iter_count, 1364 - bf + printed, bfsize - printed); 1360 + if (iter_count && from_count) { 1361 + v = iter_count / from_count; 1362 + if (v) { 1363 + printed += count_pri64_printf(i++, "iter", 1364 + v, bf + printed, bfsize - printed); 1365 1365 1366 - printed += count_pri64_printf(i++, "avg_cycles", 1367 - iter_cycles / iter_count, 1368 - bf + printed, bfsize - printed); 1366 + printed += count_pri64_printf(i++, "avg_cycles", 1367 + iter_cycles / iter_count, 1368 + bf + printed, bfsize - printed); 1369 + } 1369 1370 } 1370 1371 1371 1372 if (i) ··· 1380 1377 u64 branch_count, u64 predicted_count, 1381 1378 u64 abort_count, u64 cycles_count, 1382 1379 u64 iter_count, u64 iter_cycles, 1380 + u64 from_count, 1383 1381 struct branch_type_stat *brtype_stat) 1384 1382 { 1385 1383 int printed; ··· 1393 1389 predicted_count, abort_count, brtype_stat); 1394 1390 } else { 1395 1391 printed = branch_from_str(bf, bfsize, branch_count, 1396 - cycles_count, iter_count, iter_cycles); 1392 + cycles_count, iter_count, iter_cycles, 1393 + from_count); 1397 1394 } 1398 1395 1399 1396 if (!printed) ··· 1407 1402 u64 branch_count, u64 predicted_count, 1408 1403 u64 abort_count, u64 cycles_count, 1409 1404 u64 iter_count, u64 iter_cycles, 1405 + u64 from_count, 1410 1406 struct branch_type_stat *brtype_stat) 1411 1407 { 1412 1408 char str[256]; 1413 1409 1414 1410 counts_str_build(str, sizeof(str), branch_count, 1415 1411 predicted_count, abort_count, cycles_count, 1416 - iter_count, iter_cycles, brtype_stat); 1412 + iter_count, iter_cycles, from_count, brtype_stat); 1417 1413 1418 1414 if (fp) 1419 1415 return fprintf(fp, "%s", str); ··· 1428 1422 u64 branch_count, predicted_count; 1429 1423 u64 abort_count, cycles_count; 1430 1424 u64 iter_count, iter_cycles; 1425 + u64 from_count; 1431 1426 1432 1427 branch_count = clist->branch_count; 1433 1428 predicted_count = clist->predicted_count; ··· 1436 1429 cycles_count = clist->cycles_count; 1437 1430 iter_count = clist->iter_count; 1438 1431 iter_cycles = clist->iter_cycles; 1432 + from_count = clist->from_count; 1439 1433 1440 1434 return callchain_counts_printf(fp, bf, bfsize, branch_count, 1441 1435 predicted_count, abort_count, 1442 1436 cycles_count, iter_count, iter_cycles, 1443 - &clist->brtype_stat); 1437 + from_count, &clist->brtype_stat); 1444 1438 } 1445 1439 1446 1440 static void free_callchain_node(struct callchain_node *node)
+1
tools/perf/util/callchain.h
··· 118 118 bool has_children; 119 119 }; 120 120 u64 branch_count; 121 + u64 from_count; 121 122 u64 predicted_count; 122 123 u64 abort_count; 123 124 u64 cycles_count;
+3 -4
tools/perf/util/find-vdso-map.c tools/perf/util/find-map.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0 2 - static int find_vdso_map(void **start, void **end) 2 + static int find_map(void **start, void **end, const char *name) 3 3 { 4 4 FILE *maps; 5 5 char line[128]; ··· 7 7 8 8 maps = fopen("/proc/self/maps", "r"); 9 9 if (!maps) { 10 - fprintf(stderr, "vdso: cannot open maps\n"); 10 + fprintf(stderr, "cannot open maps\n"); 11 11 return -1; 12 12 } 13 13 ··· 21 21 if (m < 0) 22 22 continue; 23 23 24 - if (!strncmp(&line[m], VDSO__MAP_NAME, 25 - sizeof(VDSO__MAP_NAME) - 1)) 24 + if (!strncmp(&line[m], name, strlen(name))) 26 25 found = 1; 27 26 } 28 27
+1 -1
tools/perf/util/machine.c
··· 2005 2005 { 2006 2006 int i; 2007 2007 2008 - iter->nr_loop_iter = nr; 2008 + iter->nr_loop_iter++; 2009 2009 iter->cycles = 0; 2010 2010 2011 2011 for (i = 0; i < nr; i++)
-1
tools/perf/util/strbuf.c
··· 109 109 return ret; 110 110 } 111 111 len = vsnprintf(sb->buf + sb->len, sb->alloc - sb->len, fmt, ap_saved); 112 - va_end(ap_saved); 113 112 if (len > strbuf_avail(sb)) { 114 113 pr_debug("this should not happen, your vsnprintf is broken"); 115 114 va_end(ap_saved);
+1
tools/perf/util/symbol.c
··· 614 614 static bool symbol__is_idle(const char *name) 615 615 { 616 616 const char * const idle_symbols[] = { 617 + "arch_cpu_idle", 617 618 "cpu_idle", 618 619 "cpu_startup_entry", 619 620 "intel_idle",
+3 -3
tools/perf/util/vdso.c
··· 18 18 #include "debug.h" 19 19 20 20 /* 21 - * Include definition of find_vdso_map() also used in perf-read-vdso.c for 21 + * Include definition of find_map() also used in perf-read-vdso.c for 22 22 * building perf-read-vdso32 and perf-read-vdsox32. 23 23 */ 24 - #include "find-vdso-map.c" 24 + #include "find-map.c" 25 25 26 26 #define VDSO__TEMP_FILE_NAME "/tmp/perf-vdso.so-XXXXXX" 27 27 ··· 76 76 if (vdso_file->found) 77 77 return vdso_file->temp_file_name; 78 78 79 - if (vdso_file->error || find_vdso_map(&start, &end)) 79 + if (vdso_file->error || find_map(&start, &end, VDSO__MAP_NAME)) 80 80 return NULL; 81 81 82 82 size = end - start;
+1
tools/testing/selftests/bpf/.gitignore
··· 28 28 test_netcnt 29 29 test_section_names 30 30 test_tcpnotify_user 31 + test_libbpf
+4 -1
tools/testing/selftests/bpf/Makefile
··· 55 55 test_flow_dissector.sh \ 56 56 test_xdp_vlan.sh 57 57 58 - TEST_PROGS_EXTENDED := with_addr.sh 58 + TEST_PROGS_EXTENDED := with_addr.sh \ 59 + with_tunnels.sh \ 60 + tcp_client.py \ 61 + tcp_server.py 59 62 60 63 # Compile but not part of 'make run_tests' 61 64 TEST_GEN_PROGS_EXTENDED = test_libbpf_open test_sock_addr test_skb_cgroup_id_user \
+3 -3
tools/testing/selftests/bpf/cgroup_helpers.c
··· 155 155 * This function creates a cgroup under the top level workdir and returns the 156 156 * file descriptor. It is idempotent. 157 157 * 158 - * On success, it returns the file descriptor. On failure it returns 0. 158 + * On success, it returns the file descriptor. On failure it returns -1. 159 159 * If there is a failure, it prints the error to stderr. 160 160 */ 161 161 int create_and_get_cgroup(const char *path) ··· 166 166 format_cgroup_path(cgroup_path, path); 167 167 if (mkdir(cgroup_path, 0777) && errno != EEXIST) { 168 168 log_err("mkdiring cgroup %s .. %s", path, cgroup_path); 169 - return 0; 169 + return -1; 170 170 } 171 171 172 172 fd = open(cgroup_path, O_RDONLY); 173 173 if (fd < 0) { 174 174 log_err("Opening Cgroup"); 175 - return 0; 175 + return -1; 176 176 } 177 177 178 178 return fd;
+21 -8
tools/testing/selftests/bpf/test_btf.c
··· 3526 3526 ENUM_TWO, 3527 3527 ENUM_THREE, 3528 3528 } aenum; 3529 + uint32_t ui32b; 3530 + uint32_t bits2c:2; 3529 3531 }; 3530 3532 3531 3533 static struct btf_raw_test pprint_test_template[] = { ··· 3570 3568 BTF_ENUM_ENC(NAME_TBD, 2), 3571 3569 BTF_ENUM_ENC(NAME_TBD, 3), 3572 3570 /* struct pprint_mapv */ /* [16] */ 3573 - BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 8), 32), 3571 + BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 10), 40), 3574 3572 BTF_MEMBER_ENC(NAME_TBD, 11, 0), /* uint32_t ui32 */ 3575 3573 BTF_MEMBER_ENC(NAME_TBD, 10, 32), /* uint16_t ui16 */ 3576 3574 BTF_MEMBER_ENC(NAME_TBD, 12, 64), /* int32_t si32 */ ··· 3579 3577 BTF_MEMBER_ENC(NAME_TBD, 6, 126), /* unused_bits2b */ 3580 3578 BTF_MEMBER_ENC(0, 14, 128), /* union (anon) */ 3581 3579 BTF_MEMBER_ENC(NAME_TBD, 15, 192), /* aenum */ 3580 + BTF_MEMBER_ENC(NAME_TBD, 11, 224), /* uint32_t ui32b */ 3581 + BTF_MEMBER_ENC(NAME_TBD, 6, 256), /* bits2c */ 3582 3582 BTF_END_RAW, 3583 3583 }, 3584 - BTF_STR_SEC("\0unsigned char\0unsigned short\0unsigned int\0int\0unsigned long long\0uint8_t\0uint16_t\0uint32_t\0int32_t\0uint64_t\0ui64\0ui8a\0ENUM_ZERO\0ENUM_ONE\0ENUM_TWO\0ENUM_THREE\0pprint_mapv\0ui32\0ui16\0si32\0unused_bits2a\0bits28\0unused_bits2b\0aenum"), 3584 + BTF_STR_SEC("\0unsigned char\0unsigned short\0unsigned int\0int\0unsigned long long\0uint8_t\0uint16_t\0uint32_t\0int32_t\0uint64_t\0ui64\0ui8a\0ENUM_ZERO\0ENUM_ONE\0ENUM_TWO\0ENUM_THREE\0pprint_mapv\0ui32\0ui16\0si32\0unused_bits2a\0bits28\0unused_bits2b\0aenum\0ui32b\0bits2c"), 3585 3585 .key_size = sizeof(unsigned int), 3586 3586 .value_size = sizeof(struct pprint_mapv), 3587 3587 .key_type_id = 3, /* unsigned int */ ··· 3632 3628 BTF_ENUM_ENC(NAME_TBD, 2), 3633 3629 BTF_ENUM_ENC(NAME_TBD, 3), 3634 3630 /* struct pprint_mapv */ /* [16] */ 3635 - BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_STRUCT, 1, 8), 32), 3631 + BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_STRUCT, 1, 10), 40), 3636 3632 BTF_MEMBER_ENC(NAME_TBD, 11, BTF_MEMBER_OFFSET(0, 0)), /* uint32_t ui32 */ 3637 3633 BTF_MEMBER_ENC(NAME_TBD, 10, BTF_MEMBER_OFFSET(0, 32)), /* uint16_t ui16 */ 3638 3634 BTF_MEMBER_ENC(NAME_TBD, 12, BTF_MEMBER_OFFSET(0, 64)), /* int32_t si32 */ ··· 3641 3637 BTF_MEMBER_ENC(NAME_TBD, 6, BTF_MEMBER_OFFSET(2, 126)), /* unused_bits2b */ 3642 3638 BTF_MEMBER_ENC(0, 14, BTF_MEMBER_OFFSET(0, 128)), /* union (anon) */ 3643 3639 BTF_MEMBER_ENC(NAME_TBD, 15, BTF_MEMBER_OFFSET(0, 192)), /* aenum */ 3640 + BTF_MEMBER_ENC(NAME_TBD, 11, BTF_MEMBER_OFFSET(0, 224)), /* uint32_t ui32b */ 3641 + BTF_MEMBER_ENC(NAME_TBD, 6, BTF_MEMBER_OFFSET(2, 256)), /* bits2c */ 3644 3642 BTF_END_RAW, 3645 3643 }, 3646 - BTF_STR_SEC("\0unsigned char\0unsigned short\0unsigned int\0int\0unsigned long long\0uint8_t\0uint16_t\0uint32_t\0int32_t\0uint64_t\0ui64\0ui8a\0ENUM_ZERO\0ENUM_ONE\0ENUM_TWO\0ENUM_THREE\0pprint_mapv\0ui32\0ui16\0si32\0unused_bits2a\0bits28\0unused_bits2b\0aenum"), 3644 + BTF_STR_SEC("\0unsigned char\0unsigned short\0unsigned int\0int\0unsigned long long\0uint8_t\0uint16_t\0uint32_t\0int32_t\0uint64_t\0ui64\0ui8a\0ENUM_ZERO\0ENUM_ONE\0ENUM_TWO\0ENUM_THREE\0pprint_mapv\0ui32\0ui16\0si32\0unused_bits2a\0bits28\0unused_bits2b\0aenum\0ui32b\0bits2c"), 3647 3645 .key_size = sizeof(unsigned int), 3648 3646 .value_size = sizeof(struct pprint_mapv), 3649 3647 .key_type_id = 3, /* unsigned int */ ··· 3696 3690 BTF_ENUM_ENC(NAME_TBD, 2), 3697 3691 BTF_ENUM_ENC(NAME_TBD, 3), 3698 3692 /* struct pprint_mapv */ /* [16] */ 3699 - BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_STRUCT, 1, 8), 32), 3693 + BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_STRUCT, 1, 10), 40), 3700 3694 BTF_MEMBER_ENC(NAME_TBD, 11, BTF_MEMBER_OFFSET(0, 0)), /* uint32_t ui32 */ 3701 3695 BTF_MEMBER_ENC(NAME_TBD, 10, BTF_MEMBER_OFFSET(0, 32)), /* uint16_t ui16 */ 3702 3696 BTF_MEMBER_ENC(NAME_TBD, 12, BTF_MEMBER_OFFSET(0, 64)), /* int32_t si32 */ ··· 3705 3699 BTF_MEMBER_ENC(NAME_TBD, 19, BTF_MEMBER_OFFSET(2, 126)),/* unused_bits2b */ 3706 3700 BTF_MEMBER_ENC(0, 14, BTF_MEMBER_OFFSET(0, 128)), /* union (anon) */ 3707 3701 BTF_MEMBER_ENC(NAME_TBD, 15, BTF_MEMBER_OFFSET(0, 192)), /* aenum */ 3702 + BTF_MEMBER_ENC(NAME_TBD, 11, BTF_MEMBER_OFFSET(0, 224)), /* uint32_t ui32b */ 3703 + BTF_MEMBER_ENC(NAME_TBD, 17, BTF_MEMBER_OFFSET(2, 256)), /* bits2c */ 3708 3704 /* typedef unsigned int ___int */ /* [17] */ 3709 3705 BTF_TYPEDEF_ENC(NAME_TBD, 18), 3710 3706 BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_VOLATILE, 0, 0), 6), /* [18] */ 3711 3707 BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_CONST, 0, 0), 15), /* [19] */ 3712 3708 BTF_END_RAW, 3713 3709 }, 3714 - BTF_STR_SEC("\0unsigned char\0unsigned short\0unsigned int\0int\0unsigned long long\0uint8_t\0uint16_t\0uint32_t\0int32_t\0uint64_t\0ui64\0ui8a\0ENUM_ZERO\0ENUM_ONE\0ENUM_TWO\0ENUM_THREE\0pprint_mapv\0ui32\0ui16\0si32\0unused_bits2a\0bits28\0unused_bits2b\0aenum\0___int"), 3710 + BTF_STR_SEC("\0unsigned char\0unsigned short\0unsigned int\0int\0unsigned long long\0uint8_t\0uint16_t\0uint32_t\0int32_t\0uint64_t\0ui64\0ui8a\0ENUM_ZERO\0ENUM_ONE\0ENUM_TWO\0ENUM_THREE\0pprint_mapv\0ui32\0ui16\0si32\0unused_bits2a\0bits28\0unused_bits2b\0aenum\0ui32b\0bits2c\0___int"), 3715 3711 .key_size = sizeof(unsigned int), 3716 3712 .value_size = sizeof(struct pprint_mapv), 3717 3713 .key_type_id = 3, /* unsigned int */ ··· 3801 3793 v->unused_bits2b = 3; 3802 3794 v->ui64 = i; 3803 3795 v->aenum = i & 0x03; 3796 + v->ui32b = 4; 3797 + v->bits2c = 1; 3804 3798 v = (void *)v + rounded_value_size; 3805 3799 } 3806 3800 } ··· 3965 3955 3966 3956 nexpected_line = snprintf(expected_line, sizeof(expected_line), 3967 3957 "%s%u: {%u,0,%d,0x%x,0x%x,0x%x," 3968 - "{%lu|[%u,%u,%u,%u,%u,%u,%u,%u]},%s}\n", 3958 + "{%lu|[%u,%u,%u,%u,%u,%u,%u,%u]},%s," 3959 + "%u,0x%x}\n", 3969 3960 percpu_map ? "\tcpu" : "", 3970 3961 percpu_map ? cpu : next_key, 3971 3962 cmapv->ui32, cmapv->si32, ··· 3978 3967 cmapv->ui8a[2], cmapv->ui8a[3], 3979 3968 cmapv->ui8a[4], cmapv->ui8a[5], 3980 3969 cmapv->ui8a[6], cmapv->ui8a[7], 3981 - pprint_enum_str[cmapv->aenum]); 3970 + pprint_enum_str[cmapv->aenum], 3971 + cmapv->ui32b, 3972 + cmapv->bits2c); 3982 3973 3983 3974 err = check_line(expected_line, nexpected_line, 3984 3975 sizeof(expected_line), line);
+1 -1
tools/testing/selftests/bpf/test_cgroup_storage.c
··· 81 81 82 82 /* Create a cgroup, get fd, and join it */ 83 83 cgroup_fd = create_and_get_cgroup(TEST_CGROUP); 84 - if (!cgroup_fd) { 84 + if (cgroup_fd < 0) { 85 85 printf("Failed to create test cgroup\n"); 86 86 goto err; 87 87 }
+1 -1
tools/testing/selftests/bpf/test_dev_cgroup.c
··· 43 43 44 44 /* Create a cgroup, get fd, and join it */ 45 45 cgroup_fd = create_and_get_cgroup(TEST_CGROUP); 46 - if (!cgroup_fd) { 46 + if (cgroup_fd < 0) { 47 47 printf("Failed to create test cgroup\n"); 48 48 goto err; 49 49 }
+1 -1
tools/testing/selftests/bpf/test_netcnt.c
··· 65 65 66 66 /* Create a cgroup, get fd, and join it */ 67 67 cgroup_fd = create_and_get_cgroup(TEST_CGROUP); 68 - if (!cgroup_fd) { 68 + if (cgroup_fd < 0) { 69 69 printf("Failed to create test cgroup\n"); 70 70 goto err; 71 71 }
+30
tools/testing/selftests/bpf/test_progs.c
··· 1188 1188 int i, j; 1189 1189 struct bpf_stack_build_id id_offs[PERF_MAX_STACK_DEPTH]; 1190 1190 int build_id_matches = 0; 1191 + int retry = 1; 1191 1192 1193 + retry: 1192 1194 err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd); 1193 1195 if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno)) 1194 1196 goto out; ··· 1303 1301 previous_key = key; 1304 1302 } while (bpf_map_get_next_key(stackmap_fd, &previous_key, &key) == 0); 1305 1303 1304 + /* stack_map_get_build_id_offset() is racy and sometimes can return 1305 + * BPF_STACK_BUILD_ID_IP instead of BPF_STACK_BUILD_ID_VALID; 1306 + * try it one more time. 1307 + */ 1308 + if (build_id_matches < 1 && retry--) { 1309 + ioctl(pmu_fd, PERF_EVENT_IOC_DISABLE); 1310 + close(pmu_fd); 1311 + bpf_object__close(obj); 1312 + printf("%s:WARN:Didn't find expected build ID from the map, retrying\n", 1313 + __func__); 1314 + goto retry; 1315 + } 1316 + 1306 1317 if (CHECK(build_id_matches < 1, "build id match", 1307 1318 "Didn't find expected build ID from the map\n")) 1308 1319 goto disable_pmu; ··· 1356 1341 int i, j; 1357 1342 struct bpf_stack_build_id id_offs[PERF_MAX_STACK_DEPTH]; 1358 1343 int build_id_matches = 0; 1344 + int retry = 1; 1359 1345 1346 + retry: 1360 1347 err = bpf_prog_load(file, BPF_PROG_TYPE_PERF_EVENT, &obj, &prog_fd); 1361 1348 if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno)) 1362 1349 return; ··· 1452 1435 } 1453 1436 previous_key = key; 1454 1437 } while (bpf_map_get_next_key(stackmap_fd, &previous_key, &key) == 0); 1438 + 1439 + /* stack_map_get_build_id_offset() is racy and sometimes can return 1440 + * BPF_STACK_BUILD_ID_IP instead of BPF_STACK_BUILD_ID_VALID; 1441 + * try it one more time. 1442 + */ 1443 + if (build_id_matches < 1 && retry--) { 1444 + ioctl(pmu_fd, PERF_EVENT_IOC_DISABLE); 1445 + close(pmu_fd); 1446 + bpf_object__close(obj); 1447 + printf("%s:WARN:Didn't find expected build ID from the map, retrying\n", 1448 + __func__); 1449 + goto retry; 1450 + } 1455 1451 1456 1452 if (CHECK(build_id_matches < 1, "build id match", 1457 1453 "Didn't find expected build ID from the map\n"))
+1 -1
tools/testing/selftests/bpf/test_skb_cgroup_id_user.c
··· 164 164 goto err; 165 165 166 166 cgfd = create_and_get_cgroup(CGROUP_PATH); 167 - if (!cgfd) 167 + if (cgfd < 0) 168 168 goto err; 169 169 170 170 if (join_cgroup(CGROUP_PATH))
+1 -1
tools/testing/selftests/bpf/test_sock.c
··· 458 458 goto err; 459 459 460 460 cgfd = create_and_get_cgroup(CG_PATH); 461 - if (!cgfd) 461 + if (cgfd < 0) 462 462 goto err; 463 463 464 464 if (join_cgroup(CG_PATH))
+51 -4
tools/testing/selftests/bpf/test_sock_addr.c
··· 44 44 #define SERV6_V4MAPPED_IP "::ffff:192.168.0.4" 45 45 #define SRC6_IP "::1" 46 46 #define SRC6_REWRITE_IP "::6" 47 + #define WILDCARD6_IP "::" 47 48 #define SERV6_PORT 6060 48 49 #define SERV6_REWRITE_PORT 6666 49 50 ··· 86 85 static int bind6_prog_load(const struct sock_addr_test *test); 87 86 static int connect4_prog_load(const struct sock_addr_test *test); 88 87 static int connect6_prog_load(const struct sock_addr_test *test); 88 + static int sendmsg_allow_prog_load(const struct sock_addr_test *test); 89 89 static int sendmsg_deny_prog_load(const struct sock_addr_test *test); 90 90 static int sendmsg4_rw_asm_prog_load(const struct sock_addr_test *test); 91 91 static int sendmsg4_rw_c_prog_load(const struct sock_addr_test *test); 92 92 static int sendmsg6_rw_asm_prog_load(const struct sock_addr_test *test); 93 93 static int sendmsg6_rw_c_prog_load(const struct sock_addr_test *test); 94 94 static int sendmsg6_rw_v4mapped_prog_load(const struct sock_addr_test *test); 95 + static int sendmsg6_rw_wildcard_prog_load(const struct sock_addr_test *test); 95 96 96 97 static struct sock_addr_test tests[] = { 97 98 /* bind */ ··· 466 463 SYSCALL_ENOTSUPP, 467 464 }, 468 465 { 466 + "sendmsg6: set dst IP = [::] (BSD'ism)", 467 + sendmsg6_rw_wildcard_prog_load, 468 + BPF_CGROUP_UDP6_SENDMSG, 469 + BPF_CGROUP_UDP6_SENDMSG, 470 + AF_INET6, 471 + SOCK_DGRAM, 472 + SERV6_IP, 473 + SERV6_PORT, 474 + SERV6_REWRITE_IP, 475 + SERV6_REWRITE_PORT, 476 + SRC6_REWRITE_IP, 477 + SUCCESS, 478 + }, 479 + { 480 + "sendmsg6: preserve dst IP = [::] (BSD'ism)", 481 + sendmsg_allow_prog_load, 482 + BPF_CGROUP_UDP6_SENDMSG, 483 + BPF_CGROUP_UDP6_SENDMSG, 484 + AF_INET6, 485 + SOCK_DGRAM, 486 + WILDCARD6_IP, 487 + SERV6_PORT, 488 + SERV6_REWRITE_IP, 489 + SERV6_PORT, 490 + SRC6_IP, 491 + SUCCESS, 492 + }, 493 + { 469 494 "sendmsg6: deny call", 470 495 sendmsg_deny_prog_load, 471 496 BPF_CGROUP_UDP6_SENDMSG, ··· 765 734 return load_path(test, CONNECT6_PROG_PATH); 766 735 } 767 736 768 - static int sendmsg_deny_prog_load(const struct sock_addr_test *test) 737 + static int sendmsg_ret_only_prog_load(const struct sock_addr_test *test, 738 + int32_t rc) 769 739 { 770 740 struct bpf_insn insns[] = { 771 - /* return 0 */ 772 - BPF_MOV64_IMM(BPF_REG_0, 0), 741 + /* return rc */ 742 + BPF_MOV64_IMM(BPF_REG_0, rc), 773 743 BPF_EXIT_INSN(), 774 744 }; 775 745 return load_insns(test, insns, sizeof(insns) / sizeof(struct bpf_insn)); 746 + } 747 + 748 + static int sendmsg_allow_prog_load(const struct sock_addr_test *test) 749 + { 750 + return sendmsg_ret_only_prog_load(test, /*rc*/ 1); 751 + } 752 + 753 + static int sendmsg_deny_prog_load(const struct sock_addr_test *test) 754 + { 755 + return sendmsg_ret_only_prog_load(test, /*rc*/ 0); 776 756 } 777 757 778 758 static int sendmsg4_rw_asm_prog_load(const struct sock_addr_test *test) ··· 904 862 static int sendmsg6_rw_v4mapped_prog_load(const struct sock_addr_test *test) 905 863 { 906 864 return sendmsg6_rw_dst_asm_prog_load(test, SERV6_V4MAPPED_IP); 865 + } 866 + 867 + static int sendmsg6_rw_wildcard_prog_load(const struct sock_addr_test *test) 868 + { 869 + return sendmsg6_rw_dst_asm_prog_load(test, WILDCARD6_IP); 907 870 } 908 871 909 872 static int sendmsg6_rw_c_prog_load(const struct sock_addr_test *test) ··· 1442 1395 goto err; 1443 1396 1444 1397 cgfd = create_and_get_cgroup(CG_PATH); 1445 - if (!cgfd) 1398 + if (cgfd < 0) 1446 1399 goto err; 1447 1400 1448 1401 if (join_cgroup(CG_PATH))
+1 -1
tools/testing/selftests/bpf/test_socket_cookie.c
··· 202 202 goto err; 203 203 204 204 cgfd = create_and_get_cgroup(CG_PATH); 205 - if (!cgfd) 205 + if (cgfd < 0) 206 206 goto err; 207 207 208 208 if (join_cgroup(CG_PATH))
+1 -1
tools/testing/selftests/bpf/test_tcpbpf_user.c
··· 103 103 goto err; 104 104 105 105 cg_fd = create_and_get_cgroup(cg_path); 106 - if (!cg_fd) 106 + if (cg_fd < 0) 107 107 goto err; 108 108 109 109 if (join_cgroup(cg_path))
+1 -1
tools/testing/selftests/bpf/test_tcpnotify_user.c
··· 115 115 goto err; 116 116 117 117 cg_fd = create_and_get_cgroup(cg_path); 118 - if (!cg_fd) 118 + if (cg_fd < 0) 119 119 goto err; 120 120 121 121 if (join_cgroup(cg_path))
+120
tools/testing/selftests/bpf/test_verifier.c
··· 6934 6934 .retval = 1, 6935 6935 }, 6936 6936 { 6937 + "map access: mixing value pointer and scalar, 1", 6938 + .insns = { 6939 + // load map value pointer into r0 and r2 6940 + BPF_MOV64_IMM(BPF_REG_0, 1), 6941 + BPF_LD_MAP_FD(BPF_REG_ARG1, 0), 6942 + BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP), 6943 + BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -16), 6944 + BPF_ST_MEM(BPF_DW, BPF_REG_FP, -16, 0), 6945 + BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), 6946 + BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), 6947 + BPF_EXIT_INSN(), 6948 + // load some number from the map into r1 6949 + BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), 6950 + // depending on r1, branch: 6951 + BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 3), 6952 + // branch A 6953 + BPF_MOV64_REG(BPF_REG_2, BPF_REG_0), 6954 + BPF_MOV64_IMM(BPF_REG_3, 0), 6955 + BPF_JMP_A(2), 6956 + // branch B 6957 + BPF_MOV64_IMM(BPF_REG_2, 0), 6958 + BPF_MOV64_IMM(BPF_REG_3, 0x100000), 6959 + // common instruction 6960 + BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3), 6961 + // depending on r1, branch: 6962 + BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1), 6963 + // branch A 6964 + BPF_JMP_A(4), 6965 + // branch B 6966 + BPF_MOV64_IMM(BPF_REG_0, 0x13371337), 6967 + // verifier follows fall-through 6968 + BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 0x100000, 2), 6969 + BPF_MOV64_IMM(BPF_REG_0, 0), 6970 + BPF_EXIT_INSN(), 6971 + // fake-dead code; targeted from branch A to 6972 + // prevent dead code sanitization 6973 + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0), 6974 + BPF_MOV64_IMM(BPF_REG_0, 0), 6975 + BPF_EXIT_INSN(), 6976 + }, 6977 + .fixup_map_array_48b = { 1 }, 6978 + .result = ACCEPT, 6979 + .result_unpriv = REJECT, 6980 + .errstr_unpriv = "R2 tried to add from different pointers or scalars", 6981 + .retval = 0, 6982 + }, 6983 + { 6984 + "map access: mixing value pointer and scalar, 2", 6985 + .insns = { 6986 + // load map value pointer into r0 and r2 6987 + BPF_MOV64_IMM(BPF_REG_0, 1), 6988 + BPF_LD_MAP_FD(BPF_REG_ARG1, 0), 6989 + BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP), 6990 + BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -16), 6991 + BPF_ST_MEM(BPF_DW, BPF_REG_FP, -16, 0), 6992 + BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), 6993 + BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), 6994 + BPF_EXIT_INSN(), 6995 + // load some number from the map into r1 6996 + BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), 6997 + // depending on r1, branch: 6998 + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3), 6999 + // branch A 7000 + BPF_MOV64_IMM(BPF_REG_2, 0), 7001 + BPF_MOV64_IMM(BPF_REG_3, 0x100000), 7002 + BPF_JMP_A(2), 7003 + // branch B 7004 + BPF_MOV64_REG(BPF_REG_2, BPF_REG_0), 7005 + BPF_MOV64_IMM(BPF_REG_3, 0), 7006 + // common instruction 7007 + BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3), 7008 + // depending on r1, branch: 7009 + BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1), 7010 + // branch A 7011 + BPF_JMP_A(4), 7012 + // branch B 7013 + BPF_MOV64_IMM(BPF_REG_0, 0x13371337), 7014 + // verifier follows fall-through 7015 + BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 0x100000, 2), 7016 + BPF_MOV64_IMM(BPF_REG_0, 0), 7017 + BPF_EXIT_INSN(), 7018 + // fake-dead code; targeted from branch A to 7019 + // prevent dead code sanitization 7020 + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0), 7021 + BPF_MOV64_IMM(BPF_REG_0, 0), 7022 + BPF_EXIT_INSN(), 7023 + }, 7024 + .fixup_map_array_48b = { 1 }, 7025 + .result = ACCEPT, 7026 + .result_unpriv = REJECT, 7027 + .errstr_unpriv = "R2 tried to add from different maps or paths", 7028 + .retval = 0, 7029 + }, 7030 + { 7031 + "sanitation: alu with different scalars", 7032 + .insns = { 7033 + BPF_MOV64_IMM(BPF_REG_0, 1), 7034 + BPF_LD_MAP_FD(BPF_REG_ARG1, 0), 7035 + BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP), 7036 + BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -16), 7037 + BPF_ST_MEM(BPF_DW, BPF_REG_FP, -16, 0), 7038 + BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), 7039 + BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), 7040 + BPF_EXIT_INSN(), 7041 + BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), 7042 + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3), 7043 + BPF_MOV64_IMM(BPF_REG_2, 0), 7044 + BPF_MOV64_IMM(BPF_REG_3, 0x100000), 7045 + BPF_JMP_A(2), 7046 + BPF_MOV64_IMM(BPF_REG_2, 42), 7047 + BPF_MOV64_IMM(BPF_REG_3, 0x100001), 7048 + BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3), 7049 + BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), 7050 + BPF_EXIT_INSN(), 7051 + }, 7052 + .fixup_map_array_48b = { 1 }, 7053 + .result = ACCEPT, 7054 + .retval = 0x100000, 7055 + }, 7056 + { 6937 7057 "map access: value_ptr += known scalar, upper oob arith, test 1", 6938 7058 .insns = { 6939 7059 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+20
tools/testing/selftests/drivers/net/mlxsw/rtnetlink.sh
··· 25 25 lag_unlink_slaves_test 26 26 lag_dev_deletion_test 27 27 vlan_interface_uppers_test 28 + bridge_extern_learn_test 28 29 devlink_reload_test 29 30 " 30 31 NUM_NETIFS=2 ··· 538 537 ip link del dev br-test 539 538 540 539 log_test "vlan interface uppers" 540 + 541 + ip link del dev br0 542 + } 543 + 544 + bridge_extern_learn_test() 545 + { 546 + # Test that externally learned entries added from user space are 547 + # marked as offloaded 548 + RET=0 549 + 550 + ip link add name br0 type bridge 551 + ip link set dev $swp1 master br0 552 + 553 + bridge fdb add de:ad:be:ef:13:37 dev $swp1 master extern_learn 554 + 555 + bridge fdb show brport $swp1 | grep de:ad:be:ef:13:37 | grep -q offload 556 + check_err $? "fdb entry not marked as offloaded when should" 557 + 558 + log_test "externally learned fdb entry" 541 559 542 560 ip link del dev br0 543 561 }
+18
tools/testing/selftests/drivers/net/mlxsw/vxlan.sh
··· 847 847 848 848 log_test "vlan-aware - failed enslavement to vlan-aware bridge" 849 849 850 + bridge vlan del vid 10 dev vxlan20 851 + bridge vlan add vid 20 dev vxlan20 pvid untagged 852 + 853 + # Test that offloading of an unsupported tunnel fails when it is 854 + # triggered by addition of VLAN to a local port 855 + RET=0 856 + 857 + # TOS must be set to inherit 858 + ip link set dev vxlan10 type vxlan tos 42 859 + 860 + ip link set dev $swp1 master br0 861 + bridge vlan add vid 10 dev $swp1 &> /dev/null 862 + check_fail $? 863 + 864 + log_test "vlan-aware - failed vlan addition to a local port" 865 + 866 + ip link set dev vxlan10 type vxlan tos inherit 867 + 850 868 ip link del dev vxlan20 851 869 ip link del dev vxlan10 852 870 ip link del dev br0
+46 -1
tools/testing/selftests/net/forwarding/bridge_vlan_aware.sh
··· 1 1 #!/bin/bash 2 2 # SPDX-License-Identifier: GPL-2.0 3 3 4 - ALL_TESTS="ping_ipv4 ping_ipv6 learning flooding" 4 + ALL_TESTS="ping_ipv4 ping_ipv6 learning flooding vlan_deletion extern_learn" 5 5 NUM_NETIFS=4 6 6 CHECK_TC="yes" 7 7 source lib.sh ··· 94 94 flooding() 95 95 { 96 96 flood_test $swp2 $h1 $h2 97 + } 98 + 99 + vlan_deletion() 100 + { 101 + # Test that the deletion of a VLAN on a bridge port does not affect 102 + # the PVID VLAN 103 + log_info "Add and delete a VLAN on bridge port $swp1" 104 + 105 + bridge vlan add vid 10 dev $swp1 106 + bridge vlan del vid 10 dev $swp1 107 + 108 + ping_ipv4 109 + ping_ipv6 110 + } 111 + 112 + extern_learn() 113 + { 114 + local mac=de:ad:be:ef:13:37 115 + local ageing_time 116 + 117 + # Test that externally learned FDB entries can roam, but not age out 118 + RET=0 119 + 120 + bridge fdb add de:ad:be:ef:13:37 dev $swp1 master extern_learn vlan 1 121 + 122 + bridge fdb show brport $swp1 | grep -q de:ad:be:ef:13:37 123 + check_err $? "Did not find FDB entry when should" 124 + 125 + # Wait for 10 seconds after the ageing time to make sure the FDB entry 126 + # was not aged out 127 + ageing_time=$(bridge_ageing_time_get br0) 128 + sleep $((ageing_time + 10)) 129 + 130 + bridge fdb show brport $swp1 | grep -q de:ad:be:ef:13:37 131 + check_err $? "FDB entry was aged out when should not" 132 + 133 + $MZ $h2 -c 1 -p 64 -a $mac -t ip -q 134 + 135 + bridge fdb show brport $swp2 | grep -q de:ad:be:ef:13:37 136 + check_err $? "FDB entry did not roam when should" 137 + 138 + log_test "Externally learned FDB entry - ageing & roaming" 139 + 140 + bridge fdb del de:ad:be:ef:13:37 dev $swp2 master vlan 1 &> /dev/null 141 + bridge fdb del de:ad:be:ef:13:37 dev $swp1 master vlan 1 &> /dev/null 97 142 } 98 143 99 144 trap cleanup EXIT
+1 -1
tools/testing/selftests/net/forwarding/vxlan_bridge_1d.sh
··· 629 629 RET=0 630 630 631 631 tc filter add dev $h1 ingress pref 77 prot ip \ 632 - flower ip_tos $decapped_tos action pass 632 + flower ip_tos $decapped_tos action drop 633 633 sleep 1 634 634 vxlan_encapped_ping_test v2 v1 192.0.2.17 \ 635 635 $orig_inner_tos $orig_outer_tos \
+87 -9
tools/testing/selftests/net/ip_defrag.c
··· 203 203 { 204 204 struct ip *iphdr = (struct ip *)ip_frame; 205 205 struct ip6_hdr *ip6hdr = (struct ip6_hdr *)ip_frame; 206 + const bool ipv4 = !ipv6; 206 207 int res; 207 208 int offset; 208 209 int frag_len; ··· 240 239 iphdr->ip_sum = 0; 241 240 } 242 241 242 + /* Occasionally test in-order fragments. */ 243 + if (!cfg_overlap && (rand() % 100 < 15)) { 244 + offset = 0; 245 + while (offset < (UDP_HLEN + payload_len)) { 246 + send_fragment(fd_raw, addr, alen, offset, ipv6); 247 + offset += max_frag_len; 248 + } 249 + return; 250 + } 251 + 252 + /* Occasionally test IPv4 "runs" (see net/ipv4/ip_fragment.c) */ 253 + if (ipv4 && !cfg_overlap && (rand() % 100 < 20) && 254 + (payload_len > 9 * max_frag_len)) { 255 + offset = 6 * max_frag_len; 256 + while (offset < (UDP_HLEN + payload_len)) { 257 + send_fragment(fd_raw, addr, alen, offset, ipv6); 258 + offset += max_frag_len; 259 + } 260 + offset = 3 * max_frag_len; 261 + while (offset < 6 * max_frag_len) { 262 + send_fragment(fd_raw, addr, alen, offset, ipv6); 263 + offset += max_frag_len; 264 + } 265 + offset = 0; 266 + while (offset < 3 * max_frag_len) { 267 + send_fragment(fd_raw, addr, alen, offset, ipv6); 268 + offset += max_frag_len; 269 + } 270 + return; 271 + } 272 + 243 273 /* Odd fragments. */ 244 274 offset = max_frag_len; 245 275 while (offset < (UDP_HLEN + payload_len)) { 246 276 send_fragment(fd_raw, addr, alen, offset, ipv6); 277 + /* IPv4 ignores duplicates, so randomly send a duplicate. */ 278 + if (ipv4 && (1 == rand() % 100)) 279 + send_fragment(fd_raw, addr, alen, offset, ipv6); 247 280 offset += 2 * max_frag_len; 248 281 } 249 282 250 283 if (cfg_overlap) { 251 284 /* Send an extra random fragment. */ 252 - offset = rand() % (UDP_HLEN + payload_len - 1); 253 - /* sendto() returns EINVAL if offset + frag_len is too small. */ 254 285 if (ipv6) { 255 286 struct ip6_frag *fraghdr = (struct ip6_frag *)(ip_frame + IP6_HLEN); 287 + /* sendto() returns EINVAL if offset + frag_len is too small. */ 288 + offset = rand() % (UDP_HLEN + payload_len - 1); 256 289 frag_len = max_frag_len + rand() % 256; 257 290 /* In IPv6 if !!(frag_len % 8), the fragment is dropped. */ 258 291 frag_len &= ~0x7; ··· 294 259 ip6hdr->ip6_plen = htons(frag_len); 295 260 frag_len += IP6_HLEN; 296 261 } else { 297 - frag_len = IP4_HLEN + UDP_HLEN + rand() % 256; 262 + /* In IPv4, duplicates and some fragments completely inside 263 + * previously sent fragments are dropped/ignored. So 264 + * random offset and frag_len can result in a dropped 265 + * fragment instead of a dropped queue/packet. So we 266 + * hard-code offset and frag_len. 267 + * 268 + * See ade446403bfb ("net: ipv4: do not handle duplicate 269 + * fragments as overlapping"). 270 + */ 271 + if (max_frag_len * 4 < payload_len || max_frag_len < 16) { 272 + /* not enough payload to play with random offset and frag_len. */ 273 + offset = 8; 274 + frag_len = IP4_HLEN + UDP_HLEN + max_frag_len; 275 + } else { 276 + offset = rand() % (payload_len / 2); 277 + frag_len = 2 * max_frag_len + 1 + rand() % 256; 278 + } 298 279 iphdr->ip_off = htons(offset / 8 | IP4_MF); 299 280 iphdr->ip_len = htons(frag_len); 300 281 } 301 282 res = sendto(fd_raw, ip_frame, frag_len, 0, addr, alen); 302 283 if (res < 0) 303 - error(1, errno, "sendto overlap"); 284 + error(1, errno, "sendto overlap: %d", frag_len); 304 285 if (res != frag_len) 305 286 error(1, 0, "sendto overlap: %d vs %d", (int)res, frag_len); 306 287 frag_counter++; ··· 326 275 offset = 0; 327 276 while (offset < (UDP_HLEN + payload_len)) { 328 277 send_fragment(fd_raw, addr, alen, offset, ipv6); 278 + /* IPv4 ignores duplicates, so randomly send a duplicate. */ 279 + if (ipv4 && (1 == rand() % 100)) 280 + send_fragment(fd_raw, addr, alen, offset, ipv6); 329 281 offset += 2 * max_frag_len; 330 282 } 331 283 } ··· 336 282 static void run_test(struct sockaddr *addr, socklen_t alen, bool ipv6) 337 283 { 338 284 int fd_tx_raw, fd_rx_udp; 339 - struct timeval tv = { .tv_sec = 0, .tv_usec = 10 * 1000 }; 285 + /* Frag queue timeout is set to one second in the calling script; 286 + * socket timeout should be just a bit longer to avoid tests interfering 287 + * with each other. 288 + */ 289 + struct timeval tv = { .tv_sec = 1, .tv_usec = 10 }; 340 290 int idx; 341 291 int min_frag_len = ipv6 ? 1280 : 8; 342 292 ··· 366 308 payload_len += (rand() % 4096)) { 367 309 if (cfg_verbose) 368 310 printf("payload_len: %d\n", payload_len); 369 - max_frag_len = min_frag_len; 370 - do { 311 + 312 + if (cfg_overlap) { 313 + /* With overlaps, one send/receive pair below takes 314 + * at least one second (== timeout) to run, so there 315 + * is not enough test time to run a nested loop: 316 + * the full overlap test takes 20-30 seconds. 317 + */ 318 + max_frag_len = min_frag_len + 319 + rand() % (1500 - FRAG_HLEN - min_frag_len); 371 320 send_udp_frags(fd_tx_raw, addr, alen, ipv6); 372 321 recv_validate_udp(fd_rx_udp); 373 - max_frag_len += 8 * (rand() % 8); 374 - } while (max_frag_len < (1500 - FRAG_HLEN) && max_frag_len <= payload_len); 322 + } else { 323 + /* Without overlaps, each packet reassembly (== one 324 + * send/receive pair below) takes very little time to 325 + * run, so we can easily afford more thourough testing 326 + * with a nested loop: the full non-overlap test takes 327 + * less than one second). 328 + */ 329 + max_frag_len = min_frag_len; 330 + do { 331 + send_udp_frags(fd_tx_raw, addr, alen, ipv6); 332 + recv_validate_udp(fd_rx_udp); 333 + max_frag_len += 8 * (rand() % 8); 334 + } while (max_frag_len < (1500 - FRAG_HLEN) && 335 + max_frag_len <= payload_len); 336 + } 375 337 } 376 338 377 339 /* Cleanup. */
+8 -1
tools/testing/selftests/net/ip_defrag.sh
··· 11 11 setup() { 12 12 ip netns add "${NETNS}" 13 13 ip -netns "${NETNS}" link set lo up 14 + 14 15 ip netns exec "${NETNS}" sysctl -w net.ipv4.ipfrag_high_thresh=9000000 >/dev/null 2>&1 15 16 ip netns exec "${NETNS}" sysctl -w net.ipv4.ipfrag_low_thresh=7000000 >/dev/null 2>&1 17 + ip netns exec "${NETNS}" sysctl -w net.ipv4.ipfrag_time=1 >/dev/null 2>&1 18 + 16 19 ip netns exec "${NETNS}" sysctl -w net.ipv6.ip6frag_high_thresh=9000000 >/dev/null 2>&1 17 20 ip netns exec "${NETNS}" sysctl -w net.ipv6.ip6frag_low_thresh=7000000 >/dev/null 2>&1 21 + ip netns exec "${NETNS}" sysctl -w net.ipv6.ip6frag_time=1 >/dev/null 2>&1 22 + 23 + # DST cache can get full with a lot of frags, with GC not keeping up with the test. 24 + ip netns exec "${NETNS}" sysctl -w net.ipv6.route.max_size=65536 >/dev/null 2>&1 18 25 } 19 26 20 27 cleanup() { ··· 34 27 echo "ipv4 defrag" 35 28 ip netns exec "${NETNS}" ./ip_defrag -4 36 29 37 - 38 30 echo "ipv4 defrag with overlaps" 39 31 ip netns exec "${NETNS}" ./ip_defrag -4o 40 32 ··· 43 37 echo "ipv6 defrag with overlaps" 44 38 ip netns exec "${NETNS}" ./ip_defrag -6o 45 39 40 + echo "all tests done"
+1 -1
tools/testing/selftests/networking/timestamping/txtimestamp.c
··· 240 240 cm->cmsg_type == IP_RECVERR) || 241 241 (cm->cmsg_level == SOL_IPV6 && 242 242 cm->cmsg_type == IPV6_RECVERR) || 243 - (cm->cmsg_level = SOL_PACKET && 243 + (cm->cmsg_level == SOL_PACKET && 244 244 cm->cmsg_type == PACKET_TX_TIMESTAMP)) { 245 245 serr = (void *) CMSG_DATA(cm); 246 246 if (serr->ee_errno != ENOMSG ||
+44 -44
tools/testing/selftests/tc-testing/tc-tests/actions/ife.json
··· 17 17 "cmdUnderTest": "$TC actions add action ife encode allow mark pass index 2", 18 18 "expExitCode": "0", 19 19 "verifyCmd": "$TC actions get action ife index 2", 20 - "matchPattern": "action order [0-9]*: ife encode action pass.*type 0xED3E.*allow mark.*index 2", 20 + "matchPattern": "action order [0-9]*: ife encode action pass.*type 0[xX]ED3E.*allow mark.*index 2", 21 21 "matchCount": "1", 22 22 "teardown": [ 23 23 "$TC actions flush action ife" ··· 41 41 "cmdUnderTest": "$TC actions add action ife encode use mark 10 pipe index 2", 42 42 "expExitCode": "0", 43 43 "verifyCmd": "$TC actions get action ife index 2", 44 - "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0xED3E.*use mark.*index 2", 44 + "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]ED3E.*use mark.*index 2", 45 45 "matchCount": "1", 46 46 "teardown": [ 47 47 "$TC actions flush action ife" ··· 65 65 "cmdUnderTest": "$TC actions add action ife encode allow mark continue index 2", 66 66 "expExitCode": "0", 67 67 "verifyCmd": "$TC actions get action ife index 2", 68 - "matchPattern": "action order [0-9]*: ife encode action continue.*type 0xED3E.*allow mark.*index 2", 68 + "matchPattern": "action order [0-9]*: ife encode action continue.*type 0[xX]ED3E.*allow mark.*index 2", 69 69 "matchCount": "1", 70 70 "teardown": [ 71 71 "$TC actions flush action ife" ··· 89 89 "cmdUnderTest": "$TC actions add action ife encode use mark 789 drop index 2", 90 90 "expExitCode": "0", 91 91 "verifyCmd": "$TC actions get action ife index 2", 92 - "matchPattern": "action order [0-9]*: ife encode action drop.*type 0xED3E.*use mark 789.*index 2", 92 + "matchPattern": "action order [0-9]*: ife encode action drop.*type 0[xX]ED3E.*use mark 789.*index 2", 93 93 "matchCount": "1", 94 94 "teardown": [ 95 95 "$TC actions flush action ife" ··· 113 113 "cmdUnderTest": "$TC actions add action ife encode use mark 656768 reclassify index 2", 114 114 "expExitCode": "0", 115 115 "verifyCmd": "$TC actions get action ife index 2", 116 - "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0xED3E.*use mark 656768.*index 2", 116 + "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0[xX]ED3E.*use mark 656768.*index 2", 117 117 "matchCount": "1", 118 118 "teardown": [ 119 119 "$TC actions flush action ife" ··· 137 137 "cmdUnderTest": "$TC actions add action ife encode use mark 65 jump 1 index 2", 138 138 "expExitCode": "0", 139 139 "verifyCmd": "$TC actions get action ife index 2", 140 - "matchPattern": "action order [0-9]*: ife encode action jump 1.*type 0xED3E.*use mark 65.*index 2", 140 + "matchPattern": "action order [0-9]*: ife encode action jump 1.*type 0[xX]ED3E.*use mark 65.*index 2", 141 141 "matchCount": "1", 142 142 "teardown": [ 143 143 "$TC actions flush action ife" ··· 161 161 "cmdUnderTest": "$TC actions add action ife encode use mark 4294967295 reclassify index 90", 162 162 "expExitCode": "0", 163 163 "verifyCmd": "$TC actions get action ife index 90", 164 - "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0xED3E.*use mark 4294967295.*index 90", 164 + "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0[xX]ED3E.*use mark 4294967295.*index 90", 165 165 "matchCount": "1", 166 166 "teardown": [ 167 167 "$TC actions flush action ife" ··· 185 185 "cmdUnderTest": "$TC actions add action ife encode use mark 4294967295999 pipe index 90", 186 186 "expExitCode": "255", 187 187 "verifyCmd": "$TC actions get action ife index 90", 188 - "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0xED3E.*use mark 4294967295999.*index 90", 188 + "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]ED3E.*use mark 4294967295999.*index 90", 189 189 "matchCount": "0", 190 190 "teardown": [] 191 191 }, ··· 207 207 "cmdUnderTest": "$TC actions add action ife encode allow prio pass index 9", 208 208 "expExitCode": "0", 209 209 "verifyCmd": "$TC actions get action ife index 9", 210 - "matchPattern": "action order [0-9]*: ife encode action pass.*type 0xED3E.*allow prio.*index 9", 210 + "matchPattern": "action order [0-9]*: ife encode action pass.*type 0[xX]ED3E.*allow prio.*index 9", 211 211 "matchCount": "1", 212 212 "teardown": [ 213 213 "$TC actions flush action ife" ··· 231 231 "cmdUnderTest": "$TC actions add action ife encode use prio 7 pipe index 9", 232 232 "expExitCode": "0", 233 233 "verifyCmd": "$TC actions get action ife index 9", 234 - "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0xED3E.*use prio 7.*index 9", 234 + "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]ED3E.*use prio 7.*index 9", 235 235 "matchCount": "1", 236 236 "teardown": [ 237 237 "$TC actions flush action ife" ··· 255 255 "cmdUnderTest": "$TC actions add action ife encode use prio 3 continue index 9", 256 256 "expExitCode": "0", 257 257 "verifyCmd": "$TC actions get action ife index 9", 258 - "matchPattern": "action order [0-9]*: ife encode action continue.*type 0xED3E.*use prio 3.*index 9", 258 + "matchPattern": "action order [0-9]*: ife encode action continue.*type 0[xX]ED3E.*use prio 3.*index 9", 259 259 "matchCount": "1", 260 260 "teardown": [ 261 261 "$TC actions flush action ife" ··· 279 279 "cmdUnderTest": "$TC actions add action ife encode allow prio drop index 9", 280 280 "expExitCode": "0", 281 281 "verifyCmd": "$TC actions get action ife index 9", 282 - "matchPattern": "action order [0-9]*: ife encode action drop.*type 0xED3E.*allow prio.*index 9", 282 + "matchPattern": "action order [0-9]*: ife encode action drop.*type 0[xX]ED3E.*allow prio.*index 9", 283 283 "matchCount": "1", 284 284 "teardown": [ 285 285 "$TC actions flush action ife" ··· 303 303 "cmdUnderTest": "$TC actions add action ife encode use prio 998877 reclassify index 9", 304 304 "expExitCode": "0", 305 305 "verifyCmd": "$TC actions get action ife index 9", 306 - "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0xED3E.*use prio 998877.*index 9", 306 + "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0[xX]ED3E.*use prio 998877.*index 9", 307 307 "matchCount": "1", 308 308 "teardown": [ 309 309 "$TC actions flush action ife" ··· 327 327 "cmdUnderTest": "$TC actions add action ife encode use prio 998877 jump 10 index 9", 328 328 "expExitCode": "0", 329 329 "verifyCmd": "$TC actions get action ife index 9", 330 - "matchPattern": "action order [0-9]*: ife encode action jump 10.*type 0xED3E.*use prio 998877.*index 9", 330 + "matchPattern": "action order [0-9]*: ife encode action jump 10.*type 0[xX]ED3E.*use prio 998877.*index 9", 331 331 "matchCount": "1", 332 332 "teardown": [ 333 333 "$TC actions flush action ife" ··· 351 351 "cmdUnderTest": "$TC actions add action ife encode use prio 4294967295 reclassify index 99", 352 352 "expExitCode": "0", 353 353 "verifyCmd": "$TC actions get action ife index 99", 354 - "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0xED3E.*use prio 4294967295.*index 99", 354 + "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0[xX]ED3E.*use prio 4294967295.*index 99", 355 355 "matchCount": "1", 356 356 "teardown": [ 357 357 "$TC actions flush action ife" ··· 375 375 "cmdUnderTest": "$TC actions add action ife encode use prio 4294967298 pipe index 99", 376 376 "expExitCode": "255", 377 377 "verifyCmd": "$TC actions get action ife index 99", 378 - "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0xED3E.*use prio 4294967298.*index 99", 378 + "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]ED3E.*use prio 4294967298.*index 99", 379 379 "matchCount": "0", 380 380 "teardown": [] 381 381 }, ··· 397 397 "cmdUnderTest": "$TC actions add action ife encode allow tcindex pass index 1", 398 398 "expExitCode": "0", 399 399 "verifyCmd": "$TC actions get action ife index 1", 400 - "matchPattern": "action order [0-9]*: ife encode action pass.*type 0xED3E.*allow tcindex.*index 1", 400 + "matchPattern": "action order [0-9]*: ife encode action pass.*type 0[xX]ED3E.*allow tcindex.*index 1", 401 401 "matchCount": "1", 402 402 "teardown": [ 403 403 "$TC actions flush action ife" ··· 421 421 "cmdUnderTest": "$TC actions add action ife encode use tcindex 111 pipe index 1", 422 422 "expExitCode": "0", 423 423 "verifyCmd": "$TC actions get action ife index 1", 424 - "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0xED3E.*use tcindex 111.*index 1", 424 + "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]ED3E.*use tcindex 111.*index 1", 425 425 "matchCount": "1", 426 426 "teardown": [ 427 427 "$TC actions flush action ife" ··· 445 445 "cmdUnderTest": "$TC actions add action ife encode use tcindex 1 continue index 1", 446 446 "expExitCode": "0", 447 447 "verifyCmd": "$TC actions get action ife index 1", 448 - "matchPattern": "action order [0-9]*: ife encode action continue.*type 0xED3E.*use tcindex 1.*index 1", 448 + "matchPattern": "action order [0-9]*: ife encode action continue.*type 0[xX]ED3E.*use tcindex 1.*index 1", 449 449 "matchCount": "1", 450 450 "teardown": [ 451 451 "$TC actions flush action ife" ··· 469 469 "cmdUnderTest": "$TC actions add action ife encode use tcindex 1 continue index 1", 470 470 "expExitCode": "0", 471 471 "verifyCmd": "$TC actions get action ife index 1", 472 - "matchPattern": "action order [0-9]*: ife encode action continue.*type 0xED3E.*use tcindex 1.*index 1", 472 + "matchPattern": "action order [0-9]*: ife encode action continue.*type 0[xX]ED3E.*use tcindex 1.*index 1", 473 473 "matchCount": "1", 474 474 "teardown": [ 475 475 "$TC actions flush action ife" ··· 493 493 "cmdUnderTest": "$TC actions add action ife encode allow tcindex drop index 77", 494 494 "expExitCode": "0", 495 495 "verifyCmd": "$TC actions get action ife index 77", 496 - "matchPattern": "action order [0-9]*: ife encode action drop.*type 0xED3E.*allow tcindex.*index 77", 496 + "matchPattern": "action order [0-9]*: ife encode action drop.*type 0[xX]ED3E.*allow tcindex.*index 77", 497 497 "matchCount": "1", 498 498 "teardown": [ 499 499 "$TC actions flush action ife" ··· 517 517 "cmdUnderTest": "$TC actions add action ife encode allow tcindex reclassify index 77", 518 518 "expExitCode": "0", 519 519 "verifyCmd": "$TC actions get action ife index 77", 520 - "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0xED3E.*allow tcindex.*index 77", 520 + "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0[xX]ED3E.*allow tcindex.*index 77", 521 521 "matchCount": "1", 522 522 "teardown": [ 523 523 "$TC actions flush action ife" ··· 541 541 "cmdUnderTest": "$TC actions add action ife encode allow tcindex jump 999 index 77", 542 542 "expExitCode": "0", 543 543 "verifyCmd": "$TC actions get action ife index 77", 544 - "matchPattern": "action order [0-9]*: ife encode action jump 999.*type 0xED3E.*allow tcindex.*index 77", 544 + "matchPattern": "action order [0-9]*: ife encode action jump 999.*type 0[xX]ED3E.*allow tcindex.*index 77", 545 545 "matchCount": "1", 546 546 "teardown": [ 547 547 "$TC actions flush action ife" ··· 565 565 "cmdUnderTest": "$TC actions add action ife encode use tcindex 65535 pass index 1", 566 566 "expExitCode": "0", 567 567 "verifyCmd": "$TC actions get action ife index 1", 568 - "matchPattern": "action order [0-9]*: ife encode action pass.*type 0xED3E.*use tcindex 65535.*index 1", 568 + "matchPattern": "action order [0-9]*: ife encode action pass.*type 0[xX]ED3E.*use tcindex 65535.*index 1", 569 569 "matchCount": "1", 570 570 "teardown": [ 571 571 "$TC actions flush action ife" ··· 589 589 "cmdUnderTest": "$TC actions add action ife encode use tcindex 65539 pipe index 1", 590 590 "expExitCode": "255", 591 591 "verifyCmd": "$TC actions get action ife index 1", 592 - "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0xED3E.*use tcindex 65539.*index 1", 592 + "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]ED3E.*use tcindex 65539.*index 1", 593 593 "matchCount": "0", 594 594 "teardown": [] 595 595 }, ··· 611 611 "cmdUnderTest": "$TC actions add action ife encode allow mark src 00:11:22:33:44:55 pipe index 1", 612 612 "expExitCode": "0", 613 613 "verifyCmd": "$TC actions get action ife index 1", 614 - "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0xED3E.*allow mark src 00:11:22:33:44:55.*index 1", 614 + "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]ED3E.*allow mark src 00:11:22:33:44:55.*index 1", 615 615 "matchCount": "1", 616 616 "teardown": [ 617 617 "$TC actions flush action ife" ··· 635 635 "cmdUnderTest": "$TC actions add action ife encode use prio 9876 dst 00:11:22:33:44:55 reclassify index 1", 636 636 "expExitCode": "0", 637 637 "verifyCmd": "$TC actions get action ife index 1", 638 - "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0xED3E.*use prio 9876 dst 00:11:22:33:44:55.*index 1", 638 + "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0[xX]ED3E.*use prio 9876 dst 00:11:22:33:44:55.*index 1", 639 639 "matchCount": "1", 640 640 "teardown": [ 641 641 "$TC actions flush action ife" ··· 659 659 "cmdUnderTest": "$TC actions add action ife encode allow tcindex src 00:aa:bb:cc:dd:ee dst 00:11:22:33:44:55 pass index 11", 660 660 "expExitCode": "0", 661 661 "verifyCmd": "$TC actions get action ife index 11", 662 - "matchPattern": "action order [0-9]*: ife encode action pass.*type 0xED3E.*allow tcindex dst 00:11:22:33:44:55 src 00:aa:bb:cc:dd:ee .*index 11", 662 + "matchPattern": "action order [0-9]*: ife encode action pass.*type 0[xX]ED3E.*allow tcindex dst 00:11:22:33:44:55 src 00:aa:bb:cc:dd:ee .*index 11", 663 663 "matchCount": "1", 664 664 "teardown": [ 665 665 "$TC actions flush action ife" ··· 683 683 "cmdUnderTest": "$TC actions add action ife encode use mark 7 type 0xfefe pass index 1", 684 684 "expExitCode": "0", 685 685 "verifyCmd": "$TC actions get action ife index 1", 686 - "matchPattern": "action order [0-9]*: ife encode action pass.*type 0xFEFE.*use mark 7.*index 1", 686 + "matchPattern": "action order [0-9]*: ife encode action pass.*type 0[xX]FEFE.*use mark 7.*index 1", 687 687 "matchCount": "1", 688 688 "teardown": [ 689 689 "$TC actions flush action ife" ··· 707 707 "cmdUnderTest": "$TC actions add action ife encode use prio 444 type 0xabba pipe index 21", 708 708 "expExitCode": "0", 709 709 "verifyCmd": "$TC actions get action ife index 21", 710 - "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0xABBA.*use prio 444.*index 21", 710 + "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]ABBA.*use prio 444.*index 21", 711 711 "matchCount": "1", 712 712 "teardown": [ 713 713 "$TC actions flush action ife" ··· 731 731 "cmdUnderTest": "$TC actions add action ife encode use tcindex 5000 type 0xabcd reclassify index 21", 732 732 "expExitCode": "0", 733 733 "verifyCmd": "$TC actions get action ife index 21", 734 - "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0xABCD.*use tcindex 5000.*index 21", 734 + "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0[xX]ABCD.*use tcindex 5000.*index 21", 735 735 "matchCount": "1", 736 736 "teardown": [ 737 737 "$TC actions flush action ife" ··· 739 739 }, 740 740 { 741 741 "id": "fac3", 742 - "name": "Create valid ife encode action with index at 32-bit maximnum", 742 + "name": "Create valid ife encode action with index at 32-bit maximum", 743 743 "category": [ 744 744 "actions", 745 745 "ife" ··· 755 755 "cmdUnderTest": "$TC actions add action ife encode allow mark pass index 4294967295", 756 756 "expExitCode": "0", 757 757 "verifyCmd": "$TC actions get action ife index 4294967295", 758 - "matchPattern": "action order [0-9]*: ife encode action pass.*type 0xED3E.*allow mark.*index 4294967295", 758 + "matchPattern": "action order [0-9]*: ife encode action pass.*type 0[xX]ED3E.*allow mark.*index 4294967295", 759 759 "matchCount": "1", 760 760 "teardown": [ 761 761 "$TC actions flush action ife" ··· 779 779 "cmdUnderTest": "$TC actions add action ife decode pass index 1", 780 780 "expExitCode": "0", 781 781 "verifyCmd": "$TC actions get action ife index 1", 782 - "matchPattern": "action order [0-9]*: ife decode action pass.*type 0x0.*allow mark allow tcindex allow prio.*index 1", 782 + "matchPattern": "action order [0-9]*: ife decode action pass.*type 0(x0)?.*allow mark allow tcindex allow prio.*index 1", 783 783 "matchCount": "1", 784 784 "teardown": [ 785 785 "$TC actions flush action ife" ··· 803 803 "cmdUnderTest": "$TC actions add action ife decode pipe index 1", 804 804 "expExitCode": "0", 805 805 "verifyCmd": "$TC actions get action ife index 1", 806 - "matchPattern": "action order [0-9]*: ife decode action pipe.*type 0x0.*allow mark allow tcindex allow prio.*index 1", 806 + "matchPattern": "action order [0-9]*: ife decode action pipe.*type 0(x0)?.*allow mark allow tcindex allow prio.*index 1", 807 807 "matchCount": "1", 808 808 "teardown": [ 809 809 "$TC actions flush action ife" ··· 827 827 "cmdUnderTest": "$TC actions add action ife decode continue index 1", 828 828 "expExitCode": "0", 829 829 "verifyCmd": "$TC actions get action ife index 1", 830 - "matchPattern": "action order [0-9]*: ife decode action continue.*type 0x0.*allow mark allow tcindex allow prio.*index 1", 830 + "matchPattern": "action order [0-9]*: ife decode action continue.*type 0(x0)?.*allow mark allow tcindex allow prio.*index 1", 831 831 "matchCount": "1", 832 832 "teardown": [ 833 833 "$TC actions flush action ife" ··· 851 851 "cmdUnderTest": "$TC actions add action ife decode drop index 1", 852 852 "expExitCode": "0", 853 853 "verifyCmd": "$TC actions get action ife index 1", 854 - "matchPattern": "action order [0-9]*: ife decode action drop.*type 0x0.*allow mark allow tcindex allow prio.*index 1", 854 + "matchPattern": "action order [0-9]*: ife decode action drop.*type 0(x0)?.*allow mark allow tcindex allow prio.*index 1", 855 855 "matchCount": "1", 856 856 "teardown": [ 857 857 "$TC actions flush action ife" ··· 875 875 "cmdUnderTest": "$TC actions add action ife decode reclassify index 1", 876 876 "expExitCode": "0", 877 877 "verifyCmd": "$TC actions get action ife index 1", 878 - "matchPattern": "action order [0-9]*: ife decode action reclassify.*type 0x0.*allow mark allow tcindex allow prio.*index 1", 878 + "matchPattern": "action order [0-9]*: ife decode action reclassify.*type 0(x0)?.*allow mark allow tcindex allow prio.*index 1", 879 879 "matchCount": "1", 880 880 "teardown": [ 881 881 "$TC actions flush action ife" ··· 899 899 "cmdUnderTest": "$TC actions add action ife decode jump 10 index 1", 900 900 "expExitCode": "0", 901 901 "verifyCmd": "$TC actions get action ife index 1", 902 - "matchPattern": "action order [0-9]*: ife decode action jump 10.*type 0x0.*allow mark allow tcindex allow prio.*index 1", 902 + "matchPattern": "action order [0-9]*: ife decode action jump 10.*type 0(x0)?.*allow mark allow tcindex allow prio.*index 1", 903 903 "matchCount": "1", 904 904 "teardown": [ 905 905 "$TC actions flush action ife" ··· 923 923 "cmdUnderTest": "$TC actions add action ife encode allow mark pass index 4294967295999", 924 924 "expExitCode": "255", 925 925 "verifyCmd": "$TC actions get action ife index 4294967295999", 926 - "matchPattern": "action order [0-9]*: ife encode action pass.*type 0xED3E.*allow mark.*index 4294967295999", 926 + "matchPattern": "action order [0-9]*: ife encode action pass.*type 0[xX]ED3E.*allow mark.*index 4294967295999", 927 927 "matchCount": "0", 928 928 "teardown": [] 929 929 }, ··· 945 945 "cmdUnderTest": "$TC actions add action ife encode allow mark kuka index 4", 946 946 "expExitCode": "255", 947 947 "verifyCmd": "$TC actions get action ife index 4", 948 - "matchPattern": "action order [0-9]*: ife encode action kuka.*type 0xED3E.*allow mark.*index 4", 948 + "matchPattern": "action order [0-9]*: ife encode action kuka.*type 0[xX]ED3E.*allow mark.*index 4", 949 949 "matchCount": "0", 950 950 "teardown": [] 951 951 }, ··· 967 967 "cmdUnderTest": "$TC actions add action ife encode allow prio pipe index 4 cookie aabbccddeeff112233445566778800a1", 968 968 "expExitCode": "0", 969 969 "verifyCmd": "$TC actions get action ife index 4", 970 - "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0xED3E.*allow prio.*index 4.*cookie aabbccddeeff112233445566778800a1", 970 + "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]ED3E.*allow prio.*index 4.*cookie aabbccddeeff112233445566778800a1", 971 971 "matchCount": "1", 972 972 "teardown": [ 973 973 "$TC actions flush action ife" ··· 991 991 "cmdUnderTest": "$TC actions add action ife encode allow foo pipe index 4", 992 992 "expExitCode": "255", 993 993 "verifyCmd": "$TC actions get action ife index 4", 994 - "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0xED3E.*allow foo.*index 4", 994 + "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]ED3E.*allow foo.*index 4", 995 995 "matchCount": "0", 996 996 "teardown": [] 997 997 }, ··· 1013 1013 "cmdUnderTest": "$TC actions add action ife encode allow prio type 70000 pipe index 4", 1014 1014 "expExitCode": "255", 1015 1015 "verifyCmd": "$TC actions get action ife index 4", 1016 - "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0x11170.*allow prio.*index 4", 1016 + "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]11170.*allow prio.*index 4", 1017 1017 "matchCount": "0", 1018 1018 "teardown": [] 1019 1019 },
+1 -30
tools/testing/selftests/tc-testing/tc-tests/actions/tunnel_key.json
··· 82 82 ] 83 83 }, 84 84 { 85 - "id": "ba4e", 86 - "name": "Add tunnel_key set action with missing mandatory id parameter", 87 - "category": [ 88 - "actions", 89 - "tunnel_key" 90 - ], 91 - "setup": [ 92 - [ 93 - "$TC actions flush action tunnel_key", 94 - 0, 95 - 1, 96 - 255 97 - ] 98 - ], 99 - "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 10.10.10.1 dst_ip 20.20.20.2", 100 - "expExitCode": "255", 101 - "verifyCmd": "$TC actions list action tunnel_key", 102 - "matchPattern": "action order [0-9]+: tunnel_key set.*src_ip 10.10.10.1.*dst_ip 20.20.20.2", 103 - "matchCount": "0", 104 - "teardown": [ 105 - [ 106 - "$TC actions flush action tunnel_key", 107 - 0, 108 - 1, 109 - 255 110 - ] 111 - ] 112 - }, 113 - { 114 85 "id": "a5e0", 115 86 "name": "Add tunnel_key set action with invalid src_ip parameter", 116 87 "category": [ ··· 605 634 "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 10.10.10.1 dst_ip 10.10.10.2 id 7 index 4 cookie aa11bb22cc33dd44ee55ff66aa11b1b2", 606 635 "expExitCode": "0", 607 636 "verifyCmd": "$TC actions get action tunnel_key index 4", 608 - "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 10.10.10.1.*dst_ip 10.10.10.2.*key_id 7.*dst_port 0.*csum pipe.*index 4 ref.*cookie aa11bb22cc33dd44ee55ff66aa11b1b2", 637 + "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 10.10.10.1.*dst_ip 10.10.10.2.*key_id 7.*csum pipe.*index 4 ref.*cookie aa11bb22cc33dd44ee55ff66aa11b1b2", 609 638 "matchCount": "1", 610 639 "teardown": [ 611 640 "$TC actions flush action tunnel_key"
+1 -1
tools/thermal/tmon/Makefile
··· 6 6 7 7 BINDIR=usr/bin 8 8 WARNFLAGS=-Wall -Wshadow -W -Wformat -Wimplicit-function-declaration -Wimplicit-int 9 - override CFLAGS+= -O1 ${WARNFLAGS} 9 + override CFLAGS+= $(call cc-option,-O3,-O1) ${WARNFLAGS} 10 10 # Add "-fstack-protector" only if toolchain supports it. 11 11 override CFLAGS+= $(call cc-option,-fstack-protector-strong) 12 12 CC?= $(CROSS_COMPILE)gcc
+3 -1
tools/vm/page_owner_sort.c
··· 5 5 * Example use: 6 6 * cat /sys/kernel/debug/page_owner > page_owner_full.txt 7 7 * grep -v ^PFN page_owner_full.txt > page_owner.txt 8 - * ./sort page_owner.txt sorted_page_owner.txt 8 + * ./page_owner_sort page_owner.txt sorted_page_owner.txt 9 + * 10 + * See Documentation/vm/page_owner.rst 9 11 */ 10 12 11 13 #include <stdio.h>
+7 -2
virt/kvm/kvm_main.c
··· 1227 1227 { 1228 1228 struct kvm_memslots *slots; 1229 1229 struct kvm_memory_slot *memslot; 1230 - int as_id, id, n; 1230 + int as_id, id; 1231 1231 gfn_t offset; 1232 - unsigned long i; 1232 + unsigned long i, n; 1233 1233 unsigned long *dirty_bitmap; 1234 1234 unsigned long *dirty_bitmap_buffer; 1235 1235 ··· 1249 1249 return -ENOENT; 1250 1250 1251 1251 n = kvm_dirty_bitmap_bytes(memslot); 1252 + 1253 + if (log->first_page > memslot->npages || 1254 + log->num_pages > memslot->npages - log->first_page) 1255 + return -EINVAL; 1256 + 1252 1257 *flush = false; 1253 1258 dirty_bitmap_buffer = kvm_second_dirty_bitmap(memslot); 1254 1259 if (copy_from_user(dirty_bitmap_buffer, log->dirty_bitmap, n))