Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

spi: Merge up fixes from Linus' tree

Gets us pine64plus back if nothing else.

+3258 -1859
+96 -1
.mailmap
··· 13 13 Aaron Durbin <adurbin@google.com> 14 14 Abel Vesa <abelvesa@kernel.org> <abel.vesa@nxp.com> 15 15 Abel Vesa <abelvesa@kernel.org> <abelvesa@gmail.com> 16 + Abhijeet Dharmapurikar <quic_adharmap@quicinc.com> <adharmap@codeaurora.org> 16 17 Abhinav Kumar <quic_abhinavk@quicinc.com> <abhinavk@codeaurora.org> 18 + Ahmad Masri <quic_amasri@quicinc.com> <amasri@codeaurora.org> 17 19 Adam Oldham <oldhamca@gmail.com> 18 20 Adam Radford <aradford@gmail.com> 19 21 Adriana Reus <adi.reus@gmail.com> <adriana.reus@intel.com> ··· 32 30 Alexander Mikhalitsyn <alexander@mihalicyn.com> <aleksandr.mikhalitsyn@canonical.com> 33 31 Alexandre Belloni <alexandre.belloni@bootlin.com> <alexandre.belloni@free-electrons.com> 34 32 Alexandre Ghiti <alex@ghiti.fr> <alexandre.ghiti@canonical.com> 33 + Alexei Avshalom Lazar <quic_ailizaro@quicinc.com> <ailizaro@codeaurora.org> 35 34 Alexei Starovoitov <ast@kernel.org> <alexei.starovoitov@gmail.com> 36 35 Alexei Starovoitov <ast@kernel.org> <ast@fb.com> 37 36 Alexei Starovoitov <ast@kernel.org> <ast@plumgrid.com> ··· 40 37 Alex Shi <alexs@kernel.org> <alex.shi@intel.com> 41 38 Alex Shi <alexs@kernel.org> <alex.shi@linaro.org> 42 39 Alex Shi <alexs@kernel.org> <alex.shi@linux.alibaba.com> 40 + Aloka Dixit <quic_alokad@quicinc.com> <alokad@codeaurora.org> 43 41 Al Viro <viro@ftp.linux.org.uk> 44 42 Al Viro <viro@zenIV.linux.org.uk> 43 + Amit Blay <quic_ablay@quicinc.com> <ablay@codeaurora.org> 44 + Amit Nischal <quic_anischal@quicinc.com> <anischal@codeaurora.org> 45 45 Andi Kleen <ak@linux.intel.com> <ak@suse.de> 46 46 Andi Shyti <andi@etezian.org> <andi.shyti@samsung.com> 47 47 Andreas Herrmann <aherrman@de.ibm.com> ··· 60 54 Andrzej Hajda <andrzej.hajda@intel.com> <a.hajda@samsung.com> 61 55 André Almeida <andrealmeid@igalia.com> <andrealmeid@collabora.com> 62 56 Andy Adamson <andros@citi.umich.edu> 57 + Anilkumar Kolli <quic_akolli@quicinc.com> <akolli@codeaurora.org> 58 + Anirudh Ghayal <quic_aghayal@quicinc.com> <aghayal@codeaurora.org> 63 59 Antoine Tenart <atenart@kernel.org> <antoine.tenart@bootlin.com> 64 60 Antoine Tenart <atenart@kernel.org> <antoine.tenart@free-electrons.com> 65 61 Antonio Ospite <ao2@ao2.it> <ao2@amarulasolutions.com> ··· 70 62 Ard Biesheuvel <ardb@kernel.org> <ard.biesheuvel@linaro.org> 71 63 Arnaud Patard <arnaud.patard@rtp-net.org> 72 64 Arnd Bergmann <arnd@arndb.de> 65 + Arun Kumar Neelakantam <quic_aneela@quicinc.com> <aneela@codeaurora.org> 66 + Ashok Raj Nagarajan <quic_arnagara@quicinc.com> <arnagara@codeaurora.org> 67 + Ashwin Chaugule <quic_ashwinc@quicinc.com> <ashwinc@codeaurora.org> 68 + Asutosh Das <quic_asutoshd@quicinc.com> <asutoshd@codeaurora.org> 73 69 Atish Patra <atishp@atishpatra.org> <atish.patra@wdc.com> 70 + Avaneesh Kumar Dwivedi <quic_akdwived@quicinc.com> <akdwived@codeaurora.org> 74 71 Axel Dyks <xl@xlsigned.net> 75 72 Axel Lin <axel.lin@gmail.com> 73 + Balakrishna Godavarthi <quic_bgodavar@quicinc.com> <bgodavar@codeaurora.org> 74 + Banajit Goswami <quic_bgoswami@quicinc.com> <bgoswami@codeaurora.org> 75 + Baochen Qiang <quic_bqiang@quicinc.com> <bqiang@codeaurora.org> 76 76 Baolin Wang <baolin.wang@linux.alibaba.com> <baolin.wang@linaro.org> 77 77 Baolin Wang <baolin.wang@linux.alibaba.com> <baolin.wang@spreadtrum.com> 78 78 Baolin Wang <baolin.wang@linux.alibaba.com> <baolin.wang@unisoc.com> ··· 109 93 Brian King <brking@us.ibm.com> 110 94 Brian Silverman <bsilver16384@gmail.com> <brian.silverman@bluerivertech.com> 111 95 Cai Huoqing <cai.huoqing@linux.dev> <caihuoqing@baidu.com> 96 + Can Guo <quic_cang@quicinc.com> <cang@codeaurora.org> 97 + Carl Huang <quic_cjhuang@quicinc.com> <cjhuang@codeaurora.org> 112 98 Changbin Du <changbin.du@intel.com> <changbin.du@gmail.com> 113 99 Changbin Du <changbin.du@intel.com> <changbin.du@intel.com> 114 100 Chao Yu <chao@kernel.org> <chao2.yu@samsung.com> 115 101 Chao Yu <chao@kernel.org> <yuchao0@huawei.com> 116 102 Chris Chiu <chris.chiu@canonical.com> <chiu@endlessm.com> 117 103 Chris Chiu <chris.chiu@canonical.com> <chiu@endlessos.org> 104 + Chris Lew <quic_clew@quicinc.com> <clew@codeaurora.org> 118 105 Christian Borntraeger <borntraeger@linux.ibm.com> <borntraeger@de.ibm.com> 119 106 Christian Borntraeger <borntraeger@linux.ibm.com> <cborntra@de.ibm.com> 120 107 Christian Borntraeger <borntraeger@linux.ibm.com> <borntrae@de.ibm.com> ··· 138 119 Daniel Borkmann <daniel@iogearbox.net> <dborkman@redhat.com> 139 120 Daniel Borkmann <daniel@iogearbox.net> <dxchgb@gmail.com> 140 121 David Brownell <david-b@pacbell.net> 122 + David Collins <quic_collinsd@quicinc.com> <collinsd@codeaurora.org> 141 123 David Woodhouse <dwmw2@shinybook.infradead.org> 124 + Dedy Lansky <quic_dlansky@quicinc.com> <dlansky@codeaurora.org> 125 + Deepak Kumar Singh <quic_deesin@quicinc.com> <deesin@codeaurora.org> 142 126 Dengcheng Zhu <dzhu@wavecomp.com> <dczhu@mips.com> 143 127 Dengcheng Zhu <dzhu@wavecomp.com> <dengcheng.zhu@gmail.com> 144 128 Dengcheng Zhu <dzhu@wavecomp.com> <dengcheng.zhu@imgtec.com> ··· 158 136 Domen Puncer <domen@coderock.org> 159 137 Douglas Gilbert <dougg@torque.net> 160 138 Ed L. Cashin <ecashin@coraid.com> 139 + Elliot Berman <quic_eberman@quicinc.com> <eberman@codeaurora.org> 161 140 Enric Balletbo i Serra <eballetbo@kernel.org> <enric.balletbo@collabora.com> 162 141 Enric Balletbo i Serra <eballetbo@kernel.org> <eballetbo@iseebcn.com> 163 142 Erik Kaneda <erik.kaneda@intel.com> <erik.schmauss@intel.com> ··· 171 148 Felipe W Damasio <felipewd@terra.com.br> 172 149 Felix Kuhling <fxkuehl@gmx.de> 173 150 Felix Moeller <felix@derklecks.de> 151 + Fenglin Wu <quic_fenglinw@quicinc.com> <fenglinw@codeaurora.org> 174 152 Filipe Lautert <filipe@icewall.org> 175 153 Finn Thain <fthain@linux-m68k.org> <fthain@telegraphics.com.au> 176 154 Franck Bui-Huu <vagabon.xyz@gmail.com> ··· 195 171 Gregory CLEMENT <gregory.clement@bootlin.com> <gregory.clement@free-electrons.com> 196 172 Guilherme G. Piccoli <kernel@gpiccoli.net> <gpiccoli@linux.vnet.ibm.com> 197 173 Guilherme G. Piccoli <kernel@gpiccoli.net> <gpiccoli@canonical.com> 174 + Gokul Sriram Palanisamy <quic_gokulsri@quicinc.com> <gokulsri@codeaurora.org> 175 + Govindaraj Saminathan <quic_gsamin@quicinc.com> <gsamin@codeaurora.org> 198 176 Guo Ren <guoren@kernel.org> <guoren@linux.alibaba.com> 199 177 Guo Ren <guoren@kernel.org> <ren_guo@c-sky.com> 178 + Guru Das Srinagesh <quic_gurus@quicinc.com> <gurus@codeaurora.org> 200 179 Gustavo Padovan <gustavo@las.ic.unicamp.br> 201 180 Gustavo Padovan <padovan@profusion.mobi> 202 181 Hanjun Guo <guohanjun@huawei.com> <hanjun.guo@linaro.org> ··· 217 190 J. Bruce Fields <bfields@fieldses.org> <bfields@redhat.com> 218 191 J. Bruce Fields <bfields@fieldses.org> <bfields@citi.umich.edu> 219 192 Jacob Shin <Jacob.Shin@amd.com> 193 + Jack Pham <quic_jackp@quicinc.com> <jackp@codeaurora.org> 220 194 Jaegeuk Kim <jaegeuk@kernel.org> <jaegeuk@google.com> 221 195 Jaegeuk Kim <jaegeuk@kernel.org> <jaegeuk.kim@samsung.com> 222 196 Jaegeuk Kim <jaegeuk@kernel.org> <jaegeuk@motorola.com> ··· 245 217 Jayachandran C <c.jayachandran@gmail.com> <jnair@caviumnetworks.com> 246 218 <jean-philippe@linaro.org> <jean-philippe.brucker@arm.com> 247 219 Jean Tourrilhes <jt@hpl.hp.com> 220 + Jeevan Shriram <quic_jshriram@quicinc.com> <jshriram@codeaurora.org> 248 221 Jeff Garzik <jgarzik@pretzel.yyz.us> 249 222 Jeff Layton <jlayton@kernel.org> <jlayton@poochiereds.net> 250 223 Jeff Layton <jlayton@kernel.org> <jlayton@primarydata.com> 251 224 Jeff Layton <jlayton@kernel.org> <jlayton@redhat.com> 225 + Jeffrey Hugo <quic_jhugo@quicinc.com> <jhugo@codeaurora.org> 252 226 Jens Axboe <axboe@kernel.dk> <axboe@suse.de> 253 227 Jens Axboe <axboe@kernel.dk> <jens.axboe@oracle.com> 254 228 Jens Axboe <axboe@kernel.dk> <axboe@fb.com> ··· 258 228 Jens Osterkamp <Jens.Osterkamp@de.ibm.com> 259 229 Jernej Skrabec <jernej.skrabec@gmail.com> <jernej.skrabec@siol.net> 260 230 Jessica Zhang <quic_jesszhan@quicinc.com> <jesszhan@codeaurora.org> 231 + Jilai Wang <quic_jilaiw@quicinc.com> <jilaiw@codeaurora.org> 261 232 Jiri Pirko <jiri@resnulli.us> <jiri@nvidia.com> 262 233 Jiri Pirko <jiri@resnulli.us> <jiri@mellanox.com> 263 234 Jiri Pirko <jiri@resnulli.us> <jpirko@redhat.com> ··· 269 238 Jiri Slaby <jirislaby@kernel.org> <xslaby@fi.muni.cz> 270 239 Jisheng Zhang <jszhang@kernel.org> <jszhang@marvell.com> 271 240 Jisheng Zhang <jszhang@kernel.org> <Jisheng.Zhang@synaptics.com> 241 + Jishnu Prakash <quic_jprakash@quicinc.com> <jprakash@codeaurora.org> 272 242 Johan Hovold <johan@kernel.org> <jhovold@gmail.com> 273 243 Johan Hovold <johan@kernel.org> <johan@hovoldconsulting.com> 274 244 John Crispin <john@phrozen.org> <blogic@openwrt.org> ··· 288 256 <josh@joshtriplett.org> <josht@vnet.ibm.com> 289 257 Josh Poimboeuf <jpoimboe@kernel.org> <jpoimboe@redhat.com> 290 258 Josh Poimboeuf <jpoimboe@kernel.org> <jpoimboe@us.ibm.com> 259 + Jouni Malinen <quic_jouni@quicinc.com> <jouni@codeaurora.org> 291 260 Juha Yrjola <at solidboot.com> 292 261 Juha Yrjola <juha.yrjola@nokia.com> 293 262 Juha Yrjola <juha.yrjola@solidboot.com> ··· 296 263 Iskren Chernev <me@iskren.info> <iskren.chernev@gmail.com> 297 264 Kalle Valo <kvalo@kernel.org> <kvalo@codeaurora.org> 298 265 Kalyan Thota <quic_kalyant@quicinc.com> <kalyan_t@codeaurora.org> 266 + Karthikeyan Periyasamy <quic_periyasa@quicinc.com> <periyasa@codeaurora.org> 267 + Kathiravan T <quic_kathirav@quicinc.com> <kathirav@codeaurora.org> 299 268 Kay Sievers <kay.sievers@vrfy.org> 300 269 Kees Cook <keescook@chromium.org> <kees.cook@canonical.com> 301 270 Kees Cook <keescook@chromium.org> <keescook@google.com> ··· 306 271 Keith Busch <kbusch@kernel.org> <keith.busch@intel.com> 307 272 Keith Busch <kbusch@kernel.org> <keith.busch@linux.intel.com> 308 273 Kenneth W Chen <kenneth.w.chen@intel.com> 274 + Kenneth Westfield <quic_kwestfie@quicinc.com> <kwestfie@codeaurora.org> 275 + Kiran Gunda <quic_kgunda@quicinc.com> <kgunda@codeaurora.org> 309 276 Kirill Tkhai <tkhai@ya.ru> <ktkhai@virtuozzo.com> 310 277 Konstantin Khlebnikov <koct9i@gmail.com> <khlebnikov@yandex-team.ru> 311 278 Konstantin Khlebnikov <koct9i@gmail.com> <k.khlebnikov@samsung.com> ··· 316 279 Krzysztof Kozlowski <krzk@kernel.org> <k.kozlowski.k@gmail.com> 317 280 Krzysztof Kozlowski <krzk@kernel.org> <k.kozlowski@samsung.com> 318 281 Krzysztof Kozlowski <krzk@kernel.org> <krzysztof.kozlowski@canonical.com> 282 + Kshitiz Godara <quic_kgodara@quicinc.com> <kgodara@codeaurora.org> 319 283 Kuninori Morimoto <kuninori.morimoto.gx@renesas.com> 320 284 Kuogee Hsieh <quic_khsieh@quicinc.com> <khsieh@codeaurora.org> 321 285 Lee Jones <lee@kernel.org> <joneslee@google.com> ··· 330 292 Leon Romanovsky <leon@kernel.org> <leon@leon.nu> 331 293 Leon Romanovsky <leon@kernel.org> <leonro@mellanox.com> 332 294 Leon Romanovsky <leon@kernel.org> <leonro@nvidia.com> 295 + Liam Mark <quic_lmark@quicinc.com> <lmark@codeaurora.org> 333 296 Linas Vepstas <linas@austin.ibm.com> 334 297 Linus Lüssing <linus.luessing@c0d3.blue> <linus.luessing@ascom.ch> 335 298 Linus Lüssing <linus.luessing@c0d3.blue> <linus.luessing@web.de> 336 299 <linux-hardening@vger.kernel.org> <kernel-hardening@lists.openwall.com> 337 300 Li Yang <leoyang.li@nxp.com> <leoli@freescale.com> 338 301 Li Yang <leoyang.li@nxp.com> <leo@zh-kernel.org> 302 + Lior David <quic_liord@quicinc.com> <liord@codeaurora.org> 339 303 Lorenzo Pieralisi <lpieralisi@kernel.org> <lorenzo.pieralisi@arm.com> 340 304 Luca Ceresoli <luca.ceresoli@bootlin.com> <luca@lucaceresoli.net> 341 305 Lukasz Luba <lukasz.luba@arm.com> <l.luba@partner.samsung.com> 306 + Luo Jie <quic_luoj@quicinc.com> <luoj@codeaurora.org> 342 307 Maciej W. Rozycki <macro@mips.com> <macro@imgtec.com> 343 308 Maciej W. Rozycki <macro@orcam.me.uk> <macro@linux-mips.org> 309 + Maharaja Kennadyrajan <quic_mkenna@quicinc.com> <mkenna@codeaurora.org> 310 + Maheshwar Ajja <quic_majja@quicinc.com> <majja@codeaurora.org> 311 + Malathi Gottam <quic_mgottam@quicinc.com> <mgottam@codeaurora.org> 312 + Manikanta Pubbisetty <quic_mpubbise@quicinc.com> <mpubbise@codeaurora.org> 344 313 Manivannan Sadhasivam <mani@kernel.org> <manivannanece23@gmail.com> 345 314 Manivannan Sadhasivam <mani@kernel.org> <manivannan.sadhasivam@linaro.org> 315 + Manoj Basapathi <quic_manojbm@quicinc.com> <manojbm@codeaurora.org> 346 316 Marcin Nowakowski <marcin.nowakowski@mips.com> <marcin.nowakowski@imgtec.com> 347 317 Marc Zyngier <maz@kernel.org> <marc.zyngier@arm.com> 348 318 Marek Behún <kabel@kernel.org> <marek.behun@nic.cz> ··· 380 334 Matt Ranostay <mranostay@gmail.com> Matthew Ranostay <mranostay@embeddedalley.com> 381 335 Matt Ranostay <mranostay@gmail.com> <matt.ranostay@intel.com> 382 336 Matt Redfearn <matt.redfearn@mips.com> <matt.redfearn@imgtec.com> 337 + Maulik Shah <quic_mkshah@quicinc.com> <mkshah@codeaurora.org> 383 338 Mauro Carvalho Chehab <mchehab@kernel.org> <maurochehab@gmail.com> 384 339 Mauro Carvalho Chehab <mchehab@kernel.org> <mchehab@brturbo.com.br> 385 340 Mauro Carvalho Chehab <mchehab@kernel.org> <mchehab@infradead.org> ··· 393 346 Maxime Ripard <mripard@kernel.org> <maxime@cerno.tech> 394 347 Maxime Ripard <mripard@kernel.org> <maxime.ripard@bootlin.com> 395 348 Maxime Ripard <mripard@kernel.org> <maxime.ripard@free-electrons.com> 349 + Maya Erez <quic_merez@quicinc.com> <merez@codeaurora.org> 396 350 Mayuresh Janorkar <mayur@ti.com> 351 + Md Sadre Alam <quic_mdalam@quicinc.com> <mdalam@codeaurora.org> 352 + Miaoqing Pan <quic_miaoqing@quicinc.com> <miaoqing@codeaurora.org> 397 353 Michael Buesch <m@bues.ch> 398 354 Michal Simek <michal.simek@amd.com> <michal.simek@xilinx.com> 399 355 Michel Dänzer <michel@tungstengraphics.com> ··· 407 357 Mike Rapoport <rppt@kernel.org> <mike@compulab.co.il> 408 358 Mike Rapoport <rppt@kernel.org> <mike.rapoport@gmail.com> 409 359 Mike Rapoport <rppt@kernel.org> <rppt@linux.ibm.com> 360 + Mike Tipton <quic_mdtipton@quicinc.com> <mdtipton@codeaurora.org> 410 361 Miodrag Dinic <miodrag.dinic@mips.com> <miodrag.dinic@imgtec.com> 411 362 Miquel Raynal <miquel.raynal@bootlin.com> <miquel.raynal@free-electrons.com> 412 363 Mitesh shah <mshah@teja.com> ··· 416 365 Morten Welinder <welinder@anemone.rentec.com> 417 366 Morten Welinder <welinder@darter.rentec.com> 418 367 Morten Welinder <welinder@troll.com> 368 + Mukesh Ojha <quic_mojha@quicinc.com> <mojha@codeaurora.org> 369 + Muna Sinada <quic_msinada@quicinc.com> <msinada@codeaurora.org> 370 + Murali Nalajala <quic_mnalajal@quicinc.com> <mnalajal@codeaurora.org> 419 371 Mythri P K <mythripk@ti.com> 420 372 Nadia Yvette Chambers <nyc@holomorphy.com> William Lee Irwin III <wli@holomorphy.com> 421 373 Nathan Chancellor <nathan@kernel.org> <natechancellor@gmail.com> 374 + Neeraj Upadhyay <quic_neeraju@quicinc.com> <neeraju@codeaurora.org> 422 375 Neil Armstrong <neil.armstrong@linaro.org> <narmstrong@baylibre.com> 423 376 Nguyen Anh Quynh <aquynh@gmail.com> 424 377 Nicholas Piggin <npiggin@gmail.com> <npiggen@suse.de> ··· 441 386 Nikolay Aleksandrov <razor@blackwall.org> <nikolay@cumulusnetworks.com> 442 387 Nikolay Aleksandrov <razor@blackwall.org> <nikolay@nvidia.com> 443 388 Nikolay Aleksandrov <razor@blackwall.org> <nikolay@isovalent.com> 389 + Odelu Kukatla <quic_okukatla@quicinc.com> <okukatla@codeaurora.org> 444 390 Oleksandr Natalenko <oleksandr@natalenko.name> <oleksandr@redhat.com> 445 391 Oleksij Rempel <linux@rempel-privat.de> <bug-track@fisher-privat.net> 446 392 Oleksij Rempel <linux@rempel-privat.de> <external.Oleksij.Rempel@de.bosch.com> ··· 449 393 Oleksij Rempel <linux@rempel-privat.de> <o.rempel@pengutronix.de> 450 394 Oleksij Rempel <linux@rempel-privat.de> <ore@pengutronix.de> 451 395 Oliver Upton <oliver.upton@linux.dev> <oupton@google.com> 396 + Oza Pawandeep <quic_poza@quicinc.com> <poza@codeaurora.org> 452 397 Pali Rohár <pali@kernel.org> <pali.rohar@gmail.com> 453 398 Paolo 'Blaisorblade' Giarrusso <blaisorblade@yahoo.it> 454 399 Patrick Mochel <mochel@digitalimplant.org> ··· 461 404 Paul E. McKenney <paulmck@kernel.org> <paulmck@us.ibm.com> 462 405 Paul Mackerras <paulus@ozlabs.org> <paulus@samba.org> 463 406 Paul Mackerras <paulus@ozlabs.org> <paulus@au1.ibm.com> 407 + Pavankumar Kondeti <quic_pkondeti@quicinc.com> <pkondeti@codeaurora.org> 464 408 Peter A Jonsson <pj@ludd.ltu.se> 465 409 Peter Oruba <peter.oruba@amd.com> 466 410 Peter Oruba <peter@oruba.de> 467 411 Pratyush Anand <pratyush.anand@gmail.com> <pratyush.anand@st.com> 468 412 Praveen BP <praveenbp@ti.com> 413 + Pradeep Kumar Chitrapu <quic_pradeepc@quicinc.com> <pradeepc@codeaurora.org> 414 + Prasad Sodagudi <quic_psodagud@quicinc.com> <psodagud@codeaurora.org> 469 415 Punit Agrawal <punitagrawal@gmail.com> <punit.agrawal@arm.com> 470 416 Qais Yousef <qyousef@layalina.io> <qais.yousef@imgtec.com> 471 417 Qais Yousef <qyousef@layalina.io> <qais.yousef@arm.com> ··· 477 417 Rafael J. Wysocki <rjw@rjwysocki.net> <rjw@sisk.pl> 478 418 Rajeev Nandan <quic_rajeevny@quicinc.com> <rajeevny@codeaurora.org> 479 419 Rajendra Nayak <quic_rjendra@quicinc.com> <rnayak@codeaurora.org> 420 + Rajeshwari Ravindra Kamble <quic_rkambl@quicinc.com> <rkambl@codeaurora.org> 421 + Raju P.L.S.S.S.N <quic_rplsssn@quicinc.com> <rplsssn@codeaurora.org> 480 422 Rajesh Shah <rajesh.shah@intel.com> 423 + Rakesh Pillai <quic_pillair@quicinc.com> <pillair@codeaurora.org> 481 424 Ralf Baechle <ralf@linux-mips.org> 482 425 Ralf Wildenhues <Ralf.Wildenhues@gmx.de> 426 + Ram Chandra Jangir <quic_rjangir@quicinc.com> <rjangir@codeaurora.org> 483 427 Randy Dunlap <rdunlap@infradead.org> <rdunlap@xenotime.net> 428 + Ravi Kumar Bokka <quic_rbokka@quicinc.com> <rbokka@codeaurora.org> 429 + Ravi Kumar Siddojigari <quic_rsiddoji@quicinc.com> <rsiddoji@codeaurora.org> 484 430 Rémi Denis-Courmont <rdenis@simphalempin.com> 485 431 Ricardo Ribalda <ribalda@kernel.org> <ricardo@ribalda.com> 486 432 Ricardo Ribalda <ribalda@kernel.org> Ricardo Ribalda Delgado <ribalda@kernel.org> ··· 495 429 Richard Leitner <richard.leitner@linux.dev> <me@g0hl1n.net> 496 430 Richard Leitner <richard.leitner@linux.dev> <richard.leitner@skidata.com> 497 431 Robert Foss <rfoss@kernel.org> <robert.foss@linaro.org> 432 + Rocky Liao <quic_rjliao@quicinc.com> <rjliao@codeaurora.org> 498 433 Roman Gushchin <roman.gushchin@linux.dev> <guro@fb.com> 499 434 Roman Gushchin <roman.gushchin@linux.dev> <guroan@gmail.com> 500 435 Roman Gushchin <roman.gushchin@linux.dev> <klamm@yandex-team.ru> ··· 513 446 Santosh Shilimkar <ssantosh@kernel.org> 514 447 Sarangdhar Joshi <spjoshi@codeaurora.org> 515 448 Sascha Hauer <s.hauer@pengutronix.de> 449 + Sahitya Tummala <quic_stummala@quicinc.com> <stummala@codeaurora.org> 450 + Sathishkumar Muruganandam <quic_murugana@quicinc.com> <murugana@codeaurora.org> 516 451 Satya Priya <quic_c_skakit@quicinc.com> <skakit@codeaurora.org> 517 452 S.Çağlar Onur <caglar@pardus.org.tr> 453 + Sayali Lokhande <quic_sayalil@quicinc.com> <sayalil@codeaurora.org> 518 454 Sean Christopherson <seanjc@google.com> <sean.j.christopherson@intel.com> 519 455 Sean Nyekjaer <sean@geanix.com> <sean.nyekjaer@prevas.dk> 456 + Sean Tranchetti <quic_stranche@quicinc.com> <stranche@codeaurora.org> 520 457 Sebastian Reichel <sre@kernel.org> <sebastian.reichel@collabora.co.uk> 521 458 Sebastian Reichel <sre@kernel.org> <sre@debian.org> 522 459 Sedat Dilek <sedat.dilek@gmail.com> <sedat.dilek@credativ.de> 460 + Senthilkumar N L <quic_snlakshm@quicinc.com> <snlakshm@codeaurora.org> 523 461 Seth Forshee <sforshee@kernel.org> <seth.forshee@canonical.com> 524 462 Shannon Nelson <shannon.nelson@amd.com> <snelson@pensando.io> 525 463 Shannon Nelson <shannon.nelson@amd.com> <shannon.nelson@intel.com> 526 464 Shannon Nelson <shannon.nelson@amd.com> <shannon.nelson@oracle.com> 465 + Sharath Chandra Vurukala <quic_sharathv@quicinc.com> <sharathv@codeaurora.org> 527 466 Shiraz Hashim <shiraz.linux.kernel@gmail.com> <shiraz.hashim@st.com> 528 467 Shuah Khan <shuah@kernel.org> <shuahkhan@gmail.com> 529 468 Shuah Khan <shuah@kernel.org> <shuah.khan@hp.com> 530 469 Shuah Khan <shuah@kernel.org> <shuahkh@osg.samsung.com> 531 470 Shuah Khan <shuah@kernel.org> <shuah.kh@samsung.com> 471 + Sibi Sankar <quic_sibis@quicinc.com> <sibis@codeaurora.org> 472 + Sid Manning <quic_sidneym@quicinc.com> <sidneym@codeaurora.org> 532 473 Simon Arlott <simon@octiron.net> <simon@fire.lp0.eu> 533 474 Simon Kelley <simon@thekelleys.org.uk> 475 + Sricharan Ramabadhran <quic_srichara@quicinc.com> <sricharan@codeaurora.org> 476 + Srinivas Ramana <quic_sramana@quicinc.com> <sramana@codeaurora.org> 477 + Sriram R <quic_srirrama@quicinc.com> <srirrama@codeaurora.org> 534 478 Stéphane Witzmann <stephane.witzmann@ubpmes.univ-bpclermont.fr> 535 479 Stephen Hemminger <stephen@networkplumber.org> <shemminger@linux-foundation.org> 536 480 Stephen Hemminger <stephen@networkplumber.org> <shemminger@osdl.org> ··· 549 471 Stephen Hemminger <stephen@networkplumber.org> <sthemmin@vyatta.com> 550 472 Steve Wise <larrystevenwise@gmail.com> <swise@chelsio.com> 551 473 Steve Wise <larrystevenwise@gmail.com> <swise@opengridcomputing.com> 552 - Subash Abhinov Kasiviswanathan <subashab@codeaurora.org> 474 + Subash Abhinov Kasiviswanathan <quic_subashab@quicinc.com> <subashab@codeaurora.org> 475 + Subbaraman Narayanamurthy <quic_subbaram@quicinc.com> <subbaram@codeaurora.org> 553 476 Subhash Jadavani <subhashj@codeaurora.org> 477 + Sudarshan Rajagopalan <quic_sudaraja@quicinc.com> <sudaraja@codeaurora.org> 554 478 Sudeep Holla <sudeep.holla@arm.com> Sudeep KarkadaNagesha <sudeep.karkadanagesha@arm.com> 555 479 Sumit Semwal <sumit.semwal@ti.com> 480 + Surabhi Vishnoi <quic_svishnoi@quicinc.com> <svishnoi@codeaurora.org> 556 481 Takashi YOSHII <takashi.yoshii.zj@renesas.com> 482 + Tamizh Chelvam Raja <quic_tamizhr@quicinc.com> <tamizhr@codeaurora.org> 483 + Taniya Das <quic_tdas@quicinc.com> <tdas@codeaurora.org> 557 484 Tejun Heo <htejun@gmail.com> 558 485 Thomas Graf <tgraf@suug.ch> 559 486 Thomas Körper <socketcan@esd.eu> <thomas.koerper@esd.eu> 560 487 Thomas Pedersen <twp@codeaurora.org> 561 488 Tiezhu Yang <yangtiezhu@loongson.cn> <kernelpatch@126.com> 489 + Tingwei Zhang <quic_tingwei@quicinc.com> <tingwei@codeaurora.org> 490 + Tirupathi Reddy <quic_tirupath@quicinc.com> <tirupath@codeaurora.org> 562 491 Tobias Klauser <tklauser@distanz.ch> <tobias.klauser@gmail.com> 563 492 Tobias Klauser <tklauser@distanz.ch> <klto@zhaw.ch> 564 493 Tobias Klauser <tklauser@distanz.ch> <tklauser@nuerscht.ch> 565 494 Tobias Klauser <tklauser@distanz.ch> <tklauser@xenon.tklauser.home> 566 495 Todor Tomov <todor.too@gmail.com> <todor.tomov@linaro.org> 567 496 Tony Luck <tony.luck@intel.com> 497 + Trilok Soni <quic_tsoni@quicinc.com> <tsoni@codeaurora.org> 568 498 TripleX Chung <xxx.phy@gmail.com> <triplex@zh-kernel.org> 569 499 TripleX Chung <xxx.phy@gmail.com> <zhongyu@18mail.cn> 570 500 Tsuneo Yoshioka <Tsuneo.Yoshioka@f-secure.com> ··· 585 499 Uwe Kleine-König <ukl@pengutronix.de> 586 500 Uwe Kleine-König <Uwe.Kleine-Koenig@digi.com> 587 501 Valdis Kletnieks <Valdis.Kletnieks@vt.edu> 502 + Vara Reddy <quic_varar@quicinc.com> <varar@codeaurora.org> 503 + Varadarajan Narayanan <quic_varada@quicinc.com> <varada@codeaurora.org> 504 + Vasanthakumar Thiagarajan <quic_vthiagar@quicinc.com> <vthiagar@codeaurora.org> 588 505 Vasily Averin <vasily.averin@linux.dev> <vvs@virtuozzo.com> 589 506 Vasily Averin <vasily.averin@linux.dev> <vvs@openvz.org> 590 507 Vasily Averin <vasily.averin@linux.dev> <vvs@parallels.com> 591 508 Vasily Averin <vasily.averin@linux.dev> <vvs@sw.ru> 592 509 Valentin Schneider <vschneid@redhat.com> <valentin.schneider@arm.com> 510 + Veera Sundaram Sankaran <quic_veeras@quicinc.com> <veeras@codeaurora.org> 511 + Veerabhadrarao Badiganti <quic_vbadigan@quicinc.com> <vbadigan@codeaurora.org> 512 + Venkateswara Naralasetty <quic_vnaralas@quicinc.com> <vnaralas@codeaurora.org> 593 513 Vikash Garodia <quic_vgarodia@quicinc.com> <vgarodia@codeaurora.org> 594 514 Vinod Koul <vkoul@kernel.org> <vinod.koul@intel.com> 595 515 Vinod Koul <vkoul@kernel.org> <vinod.koul@linux.intel.com> ··· 605 513 Viresh Kumar <vireshk@kernel.org> <viresh.linux@gmail.com> 606 514 Viresh Kumar <viresh.kumar@linaro.org> <viresh.kumar@linaro.org> 607 515 Viresh Kumar <viresh.kumar@linaro.org> <viresh.kumar@linaro.com> 516 + Vivek Aknurwar <quic_viveka@quicinc.com> <viveka@codeaurora.org> 608 517 Vivien Didelot <vivien.didelot@gmail.com> <vivien.didelot@savoirfairelinux.com> 609 518 Vlad Dogaru <ddvlad@gmail.com> <vlad.dogaru@intel.com> 610 519 Vladimir Davydov <vdavydov.dev@gmail.com> <vdavydov@parallels.com> 611 520 Vladimir Davydov <vdavydov.dev@gmail.com> <vdavydov@virtuozzo.com> 612 521 WeiXiong Liao <gmpy.liaowx@gmail.com> <liaoweixiong@allwinnertech.com> 522 + Wen Gong <quic_wgong@quicinc.com> <wgong@codeaurora.org> 523 + Wesley Cheng <quic_wcheng@quicinc.com> <wcheng@codeaurora.org> 613 524 Will Deacon <will@kernel.org> <will.deacon@arm.com> 614 525 Wolfram Sang <wsa@kernel.org> <w.sang@pengutronix.de> 615 526 Wolfram Sang <wsa@kernel.org> <wsa@the-dreams.de>
+11
Documentation/ABI/testing/sysfs-module
··· 60 60 C staging driver module 61 61 E unsigned module 62 62 == ===================== 63 + 64 + What: /sys/module/grant_table/parameters/free_per_iteration 65 + Date: July 2023 66 + KernelVersion: 6.5 but backported to all supported stable branches 67 + Contact: Xen developer discussion <xen-devel@lists.xenproject.org> 68 + Description: Read and write number of grant entries to attempt to free per iteration. 69 + 70 + Note: Future versions of Xen and Linux may provide a better 71 + interface for controlling the rate of deferred grant reclaim 72 + or may not need it at all. 73 + Users: Qubes OS (https://www.qubes-os.org)
+1 -1
Documentation/admin-guide/devices.txt
··· 2691 2691 45 = /dev/ttyMM1 Marvell MPSC - port 1 (obsolete unused) 2692 2692 46 = /dev/ttyCPM0 PPC CPM (SCC or SMC) - port 0 2693 2693 ... 2694 - 47 = /dev/ttyCPM5 PPC CPM (SCC or SMC) - port 5 2694 + 49 = /dev/ttyCPM5 PPC CPM (SCC or SMC) - port 3 2695 2695 50 = /dev/ttyIOC0 Altix serial card 2696 2696 ... 2697 2697 81 = /dev/ttyIOC31 Altix serial card
+7 -4
Documentation/admin-guide/hw-vuln/spectre.rst
··· 484 484 485 485 Systems which support enhanced IBRS (eIBRS) enable IBRS protection once at 486 486 boot, by setting the IBRS bit, and they're automatically protected against 487 - Spectre v2 variant attacks, including cross-thread branch target injections 488 - on SMT systems (STIBP). In other words, eIBRS enables STIBP too. 487 + Spectre v2 variant attacks. 489 488 490 - Legacy IBRS systems clear the IBRS bit on exit to userspace and 491 - therefore explicitly enable STIBP for that 489 + On Intel's enhanced IBRS systems, this includes cross-thread branch target 490 + injections on SMT systems (STIBP). In other words, Intel eIBRS enables 491 + STIBP, too. 492 + 493 + AMD Automatic IBRS does not protect userspace, and Legacy IBRS systems clear 494 + the IBRS bit on exit to userspace, therefore both explicitly enable STIBP. 492 495 493 496 The retpoline mitigation is turned on by default on vulnerable 494 497 CPUs. It can be forced on or off by the administrator
+3
Documentation/arch/arm64/silicon-errata.rst
··· 148 148 | ARM | MMU-700 | #2268618,2812531| N/A | 149 149 +----------------+-----------------+-----------------+-----------------------------+ 150 150 +----------------+-----------------+-----------------+-----------------------------+ 151 + | ARM | GIC-700 | #2941627 | ARM64_ERRATUM_2941627 | 152 + +----------------+-----------------+-----------------+-----------------------------+ 153 + +----------------+-----------------+-----------------+-----------------------------+ 151 154 | Broadcom | Brahma-B53 | N/A | ARM64_ERRATUM_845719 | 152 155 +----------------+-----------------+-----------------+-----------------------------+ 153 156 | Broadcom | Brahma-B53 | N/A | ARM64_ERRATUM_843419 |
+19 -26
Documentation/filesystems/tmpfs.rst
··· 84 84 is half of the number of your physical RAM pages, or (on a 85 85 machine with highmem) the number of lowmem RAM pages, 86 86 whichever is the lower. 87 - noswap Disables swap. Remounts must respect the original settings. 88 - By default swap is enabled. 89 87 ========= ============================================================ 90 88 91 89 These parameters accept a suffix k, m or g for kilo, mega and giga and ··· 97 99 use up all the memory on the machine; but enhances the scalability of 98 100 that instance in a system with many CPUs making intensive use of it. 99 101 102 + tmpfs blocks may be swapped out, when there is a shortage of memory. 103 + tmpfs has a mount option to disable its use of swap: 104 + 105 + ====== =========================================================== 106 + noswap Disables swap. Remounts must respect the original settings. 107 + By default swap is enabled. 108 + ====== =========================================================== 109 + 100 110 tmpfs also supports Transparent Huge Pages which requires a kernel 101 111 configured with CONFIG_TRANSPARENT_HUGEPAGE and with huge supported for 102 112 your system (has_transparent_hugepage(), which is architecture specific). 103 113 The mount options for this are: 104 114 105 - ====== ============================================================ 106 - huge=0 never: disables huge pages for the mount 107 - huge=1 always: enables huge pages for the mount 108 - huge=2 within_size: only allocate huge pages if the page will be 109 - fully within i_size, also respect fadvise()/madvise() hints. 110 - huge=3 advise: only allocate huge pages if requested with 111 - fadvise()/madvise() 112 - ====== ============================================================ 115 + ================ ============================================================== 116 + huge=never Do not allocate huge pages. This is the default. 117 + huge=always Attempt to allocate huge page every time a new page is needed. 118 + huge=within_size Only allocate huge page if it will be fully within i_size. 119 + Also respect madvise(2) hints. 120 + huge=advise Only allocate huge page if requested with madvise(2). 121 + ================ ============================================================== 113 122 114 - There is a sysfs file which you can also use to control system wide THP 115 - configuration for all tmpfs mounts, the file is: 116 - 117 - /sys/kernel/mm/transparent_hugepage/shmem_enabled 118 - 119 - This sysfs file is placed on top of THP sysfs directory and so is registered 120 - by THP code. It is however only used to control all tmpfs mounts with one 121 - single knob. Since it controls all tmpfs mounts it should only be used either 122 - for emergency or testing purposes. The values you can set for shmem_enabled are: 123 - 124 - == ============================================================ 125 - -1 deny: disables huge on shm_mnt and all mounts, for 126 - emergency use 127 - -2 force: enables huge on shm_mnt and all mounts, w/o needing 128 - option, for testing 129 - == ============================================================ 123 + See also Documentation/admin-guide/mm/transhuge.rst, which describes the 124 + sysfs file /sys/kernel/mm/transparent_hugepage/shmem_enabled: which can 125 + be used to deny huge pages on all tmpfs mounts in an emergency, or to 126 + force huge pages on all tmpfs mounts for testing. 130 127 131 128 tmpfs has a mount option to set the NUMA memory allocation policy for 132 129 all files in that instance (if CONFIG_NUMA is enabled) - which can be
+7 -6
Documentation/networking/napi.rst
··· 65 65 packets but should only process up to ``budget`` number of 66 66 Rx packets. Rx processing is usually much more expensive. 67 67 68 - In other words, it is recommended to ignore the budget argument when 69 - performing TX buffer reclamation to ensure that the reclamation is not 70 - arbitrarily bounded; however, it is required to honor the budget argument 71 - for RX processing. 68 + In other words for Rx processing the ``budget`` argument limits how many 69 + packets driver can process in a single poll. Rx specific APIs like page 70 + pool or XDP cannot be used at all when ``budget`` is 0. 71 + skb Tx processing should happen regardless of the ``budget``, but if 72 + the argument is 0 driver cannot call any XDP (or page pool) APIs. 72 73 73 74 .. warning:: 74 75 75 - The ``budget`` argument may be 0 if core tries to only process Tx completions 76 - and no Rx packets. 76 + The ``budget`` argument may be 0 if core tries to only process 77 + skb Tx completions and no Rx or XDP packets. 77 78 78 79 The poll method returns the amount of work done. If the driver still 79 80 has outstanding work to do (e.g. ``budget`` was exhausted)
-3
Documentation/process/embargoed-hardware-issues.rst
··· 254 254 Samsung Javier González <javier.gonz@samsung.com> 255 255 256 256 Microsoft James Morris <jamorris@linux.microsoft.com> 257 - VMware 258 257 Xen Andrew Cooper <andrew.cooper3@citrix.com> 259 258 260 259 Canonical John Johansen <john.johansen@canonical.com> ··· 262 263 Red Hat Josh Poimboeuf <jpoimboe@redhat.com> 263 264 SUSE Jiri Kosina <jkosina@suse.cz> 264 265 265 - Amazon 266 266 Google Kees Cook <keescook@chromium.org> 267 267 268 - GCC 269 268 LLVM Nick Desaulniers <ndesaulniers@google.com> 270 269 ============= ======================================================== 271 270
+17 -20
Documentation/process/security-bugs.rst
··· 63 63 of the report are treated confidentially even after the embargo has been 64 64 lifted, in perpetuity. 65 65 66 - Coordination 67 - ------------ 66 + Coordination with other groups 67 + ------------------------------ 68 68 69 - Fixes for sensitive bugs, such as those that might lead to privilege 70 - escalations, may need to be coordinated with the private 71 - <linux-distros@vs.openwall.org> mailing list so that distribution vendors 72 - are well prepared to issue a fixed kernel upon public disclosure of the 73 - upstream fix. Distros will need some time to test the proposed patch and 74 - will generally request at least a few days of embargo, and vendor update 75 - publication prefers to happen Tuesday through Thursday. When appropriate, 76 - the security team can assist with this coordination, or the reporter can 77 - include linux-distros from the start. In this case, remember to prefix 78 - the email Subject line with "[vs]" as described in the linux-distros wiki: 79 - <http://oss-security.openwall.org/wiki/mailing-lists/distros#how-to-use-the-lists> 69 + The kernel security team strongly recommends that reporters of potential 70 + security issues NEVER contact the "linux-distros" mailing list until 71 + AFTER discussing it with the kernel security team. Do not Cc: both 72 + lists at once. You may contact the linux-distros mailing list after a 73 + fix has been agreed on and you fully understand the requirements that 74 + doing so will impose on you and the kernel community. 75 + 76 + The different lists have different goals and the linux-distros rules do 77 + not contribute to actually fixing any potential security problems. 80 78 81 79 CVE assignment 82 80 -------------- 83 81 84 - The security team does not normally assign CVEs, nor do we require them 85 - for reports or fixes, as this can needlessly complicate the process and 86 - may delay the bug handling. If a reporter wishes to have a CVE identifier 87 - assigned ahead of public disclosure, they will need to contact the private 88 - linux-distros list, described above. When such a CVE identifier is known 89 - before a patch is provided, it is desirable to mention it in the commit 90 - message if the reporter agrees. 82 + The security team does not assign CVEs, nor do we require them for 83 + reports or fixes, as this can needlessly complicate the process and may 84 + delay the bug handling. If a reporter wishes to have a CVE identifier 85 + assigned, they should find one by themselves, for example by contacting 86 + MITRE directly. However under no circumstances will a patch inclusion 87 + be delayed to wait for a CVE identifier to arrive. 91 88 92 89 Non-disclosure agreements 93 90 -------------------------
+9 -2
MAINTAINERS
··· 4463 4463 M: Peter Chen <peter.chen@kernel.org> 4464 4464 M: Pawel Laszczak <pawell@cadence.com> 4465 4465 R: Roger Quadros <rogerq@kernel.org> 4466 - R: Aswath Govindraju <a-govindraju@ti.com> 4467 4466 L: linux-usb@vger.kernel.org 4468 4467 S: Maintained 4469 4468 T: git git://git.kernel.org/pub/scm/linux/kernel/git/peter.chen/usb.git ··· 5148 5149 F: include/linux/compiler_attributes.h 5149 5150 5150 5151 COMPUTE EXPRESS LINK (CXL) 5152 + M: Davidlohr Bueso <dave@stgolabs.net> 5153 + M: Jonathan Cameron <jonathan.cameron@huawei.com> 5154 + M: Dave Jiang <dave.jiang@intel.com> 5151 5155 M: Alison Schofield <alison.schofield@intel.com> 5152 5156 M: Vishal Verma <vishal.l.verma@intel.com> 5153 5157 M: Ira Weiny <ira.weiny@intel.com> 5154 - M: Ben Widawsky <bwidawsk@kernel.org> 5155 5158 M: Dan Williams <dan.j.williams@intel.com> 5156 5159 L: linux-cxl@vger.kernel.org 5157 5160 S: Maintained ··· 21642 21641 TTY LAYER 21643 21642 M: Greg Kroah-Hartman <gregkh@linuxfoundation.org> 21644 21643 M: Jiri Slaby <jirislaby@kernel.org> 21644 + L: linux-kernel@vger.kernel.org 21645 + L: linux-serial@vger.kernel.org 21645 21646 S: Supported 21646 21647 T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/tty.git 21647 21648 F: Documentation/driver-api/serial/ 21648 21649 F: drivers/tty/ 21650 + F: drivers/tty/serial/serial_base.h 21651 + F: drivers/tty/serial/serial_base_bus.c 21649 21652 F: drivers/tty/serial/serial_core.c 21653 + F: drivers/tty/serial/serial_ctrl.c 21654 + F: drivers/tty/serial/serial_port.c 21650 21655 F: include/linux/selection.h 21651 21656 F: include/linux/serial.h 21652 21657 F: include/linux/serial_core.h
+1 -1
Makefile
··· 2 2 VERSION = 6 3 3 PATCHLEVEL = 5 4 4 SUBLEVEL = 0 5 - EXTRAVERSION = -rc3 5 + EXTRAVERSION = -rc4 6 6 NAME = Hurr durr I'ma ninja sloth 7 7 8 8 # *DOCUMENTATION*
+1 -1
arch/arm/configs/axm55xx_defconfig
··· 197 197 CONFIG_EXT3_FS=y 198 198 # CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set 199 199 CONFIG_EXT4_FS=y 200 - CONFIG_AUTOFS4_FS=y 200 + CONFIG_AUTOFS_FS=y 201 201 CONFIG_FUSE_FS=y 202 202 CONFIG_CUSE=y 203 203 CONFIG_FSCACHE=y
+1 -1
arch/arm/configs/davinci_all_defconfig
··· 232 232 CONFIG_EXT3_FS=y 233 233 CONFIG_EXT4_FS_POSIX_ACL=y 234 234 CONFIG_XFS_FS=m 235 - CONFIG_AUTOFS4_FS=m 235 + CONFIG_AUTOFS_FS=m 236 236 CONFIG_MSDOS_FS=y 237 237 CONFIG_VFAT_FS=y 238 238 CONFIG_TMPFS=y
+1 -1
arch/arm/configs/exynos_defconfig
··· 327 327 CONFIG_PHY_EXYNOS5250_SATA=y 328 328 CONFIG_EXT2_FS=y 329 329 CONFIG_EXT4_FS=y 330 - CONFIG_AUTOFS4_FS=y 330 + CONFIG_AUTOFS_FS=y 331 331 CONFIG_MSDOS_FS=y 332 332 CONFIG_VFAT_FS=y 333 333 CONFIG_TMPFS=y
+1 -1
arch/arm/configs/footbridge_defconfig
··· 94 94 CONFIG_LEDS_TRIGGERS=y 95 95 CONFIG_LEDS_TRIGGER_TIMER=y 96 96 CONFIG_EXT2_FS=y 97 - CONFIG_AUTOFS4_FS=y 97 + CONFIG_AUTOFS_FS=y 98 98 CONFIG_ISO9660_FS=m 99 99 CONFIG_JOLIET=y 100 100 CONFIG_MSDOS_FS=m
+1 -1
arch/arm/configs/imx_v6_v7_defconfig
··· 442 442 CONFIG_QUOTA=y 443 443 CONFIG_QUOTA_NETLINK_INTERFACE=y 444 444 # CONFIG_PRINT_QUOTA_WARNING is not set 445 - CONFIG_AUTOFS4_FS=y 445 + CONFIG_AUTOFS_FS=y 446 446 CONFIG_FUSE_FS=y 447 447 CONFIG_ISO9660_FS=m 448 448 CONFIG_JOLIET=y
+1 -1
arch/arm/configs/keystone_defconfig
··· 207 207 CONFIG_EXT4_FS=y 208 208 CONFIG_EXT4_FS_POSIX_ACL=y 209 209 CONFIG_FANOTIFY=y 210 - CONFIG_AUTOFS4_FS=y 210 + CONFIG_AUTOFS_FS=y 211 211 CONFIG_MSDOS_FS=y 212 212 CONFIG_VFAT_FS=y 213 213 CONFIG_NTFS_FS=y
+1 -1
arch/arm/configs/lpc32xx_defconfig
··· 162 162 CONFIG_PWM=y 163 163 CONFIG_PWM_LPC32XX=y 164 164 CONFIG_EXT2_FS=y 165 - CONFIG_AUTOFS4_FS=y 165 + CONFIG_AUTOFS_FS=y 166 166 CONFIG_MSDOS_FS=y 167 167 CONFIG_VFAT_FS=y 168 168 CONFIG_TMPFS=y
+1 -1
arch/arm/configs/milbeaut_m10v_defconfig
··· 81 81 CONFIG_MEMORY=y 82 82 # CONFIG_ARM_PMU is not set 83 83 CONFIG_EXT4_FS=y 84 - CONFIG_AUTOFS4_FS=y 84 + CONFIG_AUTOFS_FS=y 85 85 CONFIG_MSDOS_FS=y 86 86 CONFIG_VFAT_FS=y 87 87 CONFIG_NTFS_FS=y
+1 -1
arch/arm/configs/multi_v7_defconfig
··· 1226 1226 CONFIG_STM32_TIMER_CNT=m 1227 1227 CONFIG_STM32_LPTIMER_CNT=m 1228 1228 CONFIG_EXT4_FS=y 1229 - CONFIG_AUTOFS4_FS=y 1229 + CONFIG_AUTOFS_FS=y 1230 1230 CONFIG_MSDOS_FS=y 1231 1231 CONFIG_VFAT_FS=y 1232 1232 CONFIG_NTFS_FS=y
+1 -1
arch/arm/configs/omap1_defconfig
··· 188 188 CONFIG_EXT2_FS=y 189 189 CONFIG_EXT3_FS=y 190 190 # CONFIG_DNOTIFY is not set 191 - CONFIG_AUTOFS4_FS=y 191 + CONFIG_AUTOFS_FS=y 192 192 CONFIG_ISO9660_FS=y 193 193 CONFIG_JOLIET=y 194 194 CONFIG_MSDOS_FS=y
+1 -1
arch/arm/configs/omap2plus_defconfig
··· 678 678 CONFIG_FANOTIFY=y 679 679 CONFIG_QUOTA=y 680 680 CONFIG_QFMT_V2=y 681 - CONFIG_AUTOFS4_FS=m 681 + CONFIG_AUTOFS_FS=m 682 682 CONFIG_MSDOS_FS=y 683 683 CONFIG_VFAT_FS=y 684 684 CONFIG_TMPFS=y
+1 -1
arch/arm/configs/pxa_defconfig
··· 589 589 CONFIG_REISERFS_FS_POSIX_ACL=y 590 590 CONFIG_REISERFS_FS_SECURITY=y 591 591 CONFIG_XFS_FS=m 592 - CONFIG_AUTOFS4_FS=m 592 + CONFIG_AUTOFS_FS=m 593 593 CONFIG_FUSE_FS=m 594 594 CONFIG_CUSE=m 595 595 CONFIG_FSCACHE=y
+1 -1
arch/arm/configs/rpc_defconfig
··· 79 79 CONFIG_RTC_DRV_PCF8583=y 80 80 CONFIG_EXT2_FS=y 81 81 CONFIG_EXT3_FS=y 82 - CONFIG_AUTOFS4_FS=m 82 + CONFIG_AUTOFS_FS=m 83 83 CONFIG_ISO9660_FS=y 84 84 CONFIG_JOLIET=y 85 85 CONFIG_MSDOS_FS=m
+1 -1
arch/arm/configs/s5pv210_defconfig
··· 103 103 CONFIG_PHY_S5PV210_USB2=y 104 104 CONFIG_EXT2_FS=y 105 105 CONFIG_EXT4_FS=y 106 - CONFIG_AUTOFS4_FS=y 106 + CONFIG_AUTOFS_FS=y 107 107 CONFIG_MSDOS_FS=y 108 108 CONFIG_VFAT_FS=y 109 109 CONFIG_TMPFS=y
+1 -1
arch/arm/configs/socfpga_defconfig
··· 136 136 CONFIG_EXT2_FS_XATTR=y 137 137 CONFIG_EXT2_FS_POSIX_ACL=y 138 138 CONFIG_EXT3_FS=y 139 - CONFIG_AUTOFS4_FS=y 139 + CONFIG_AUTOFS_FS=y 140 140 CONFIG_VFAT_FS=y 141 141 CONFIG_NTFS_FS=y 142 142 CONFIG_NTFS_RW=y
+1 -1
arch/arm/configs/spear13xx_defconfig
··· 85 85 CONFIG_EXT2_FS_SECURITY=y 86 86 CONFIG_EXT3_FS=y 87 87 CONFIG_EXT3_FS_SECURITY=y 88 - CONFIG_AUTOFS4_FS=m 88 + CONFIG_AUTOFS_FS=m 89 89 CONFIG_FUSE_FS=y 90 90 CONFIG_MSDOS_FS=m 91 91 CONFIG_VFAT_FS=m
+1 -1
arch/arm/configs/spear3xx_defconfig
··· 68 68 CONFIG_EXT2_FS_SECURITY=y 69 69 CONFIG_EXT3_FS=y 70 70 CONFIG_EXT3_FS_SECURITY=y 71 - CONFIG_AUTOFS4_FS=m 71 + CONFIG_AUTOFS_FS=m 72 72 CONFIG_MSDOS_FS=m 73 73 CONFIG_VFAT_FS=m 74 74 CONFIG_FAT_DEFAULT_IOCHARSET="ascii"
+1 -1
arch/arm/configs/spear6xx_defconfig
··· 54 54 CONFIG_EXT2_FS_SECURITY=y 55 55 CONFIG_EXT3_FS=y 56 56 CONFIG_EXT3_FS_SECURITY=y 57 - CONFIG_AUTOFS4_FS=m 57 + CONFIG_AUTOFS_FS=m 58 58 CONFIG_MSDOS_FS=m 59 59 CONFIG_VFAT_FS=m 60 60 CONFIG_FAT_DEFAULT_IOCHARSET="ascii"
+1 -1
arch/arm64/configs/defconfig
··· 1469 1469 CONFIG_FANOTIFY=y 1470 1470 CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y 1471 1471 CONFIG_QUOTA=y 1472 - CONFIG_AUTOFS4_FS=y 1472 + CONFIG_AUTOFS_FS=y 1473 1473 CONFIG_FUSE_FS=m 1474 1474 CONFIG_CUSE=m 1475 1475 CONFIG_OVERLAY_FS=m
+2 -3
arch/arm64/kernel/fpsimd.c
··· 917 917 if (task == current) 918 918 put_cpu_fpsimd_context(); 919 919 920 + task_set_vl(task, type, vl); 921 + 920 922 /* 921 923 * Free the changed states if they are not in use, SME will be 922 924 * reallocated to the correct size on next use and we just ··· 932 930 933 931 if (free_sme) 934 932 sme_free(task); 935 - 936 - task_set_vl(task, type, vl); 937 933 938 934 out: 939 935 update_tsk_thread_flag(task, vec_vl_inherit_flag(type), ··· 1666 1666 1667 1667 fpsimd_flush_thread_vl(ARM64_VEC_SME); 1668 1668 current->thread.svcr = 0; 1669 - sme_smstop(); 1670 1669 } 1671 1670 1672 1671 current->thread.fp_type = FP_STATE_FPSIMD;
+1 -1
arch/ia64/configs/bigsur_defconfig
··· 77 77 CONFIG_XFS_FS=y 78 78 CONFIG_XFS_QUOTA=y 79 79 CONFIG_XFS_POSIX_ACL=y 80 - CONFIG_AUTOFS4_FS=m 80 + CONFIG_AUTOFS_FS=m 81 81 CONFIG_ISO9660_FS=m 82 82 CONFIG_JOLIET=y 83 83 CONFIG_UDF_FS=m
+1 -1
arch/ia64/configs/generic_defconfig
··· 146 146 CONFIG_REISERFS_FS_POSIX_ACL=y 147 147 CONFIG_REISERFS_FS_SECURITY=y 148 148 CONFIG_XFS_FS=y 149 - CONFIG_AUTOFS4_FS=m 149 + CONFIG_AUTOFS_FS=m 150 150 CONFIG_ISO9660_FS=m 151 151 CONFIG_JOLIET=y 152 152 CONFIG_UDF_FS=m
+1 -1
arch/ia64/configs/gensparse_defconfig
··· 127 127 CONFIG_REISERFS_FS_POSIX_ACL=y 128 128 CONFIG_REISERFS_FS_SECURITY=y 129 129 CONFIG_XFS_FS=y 130 - CONFIG_AUTOFS4_FS=y 130 + CONFIG_AUTOFS_FS=y 131 131 CONFIG_ISO9660_FS=m 132 132 CONFIG_JOLIET=y 133 133 CONFIG_UDF_FS=m
+1 -1
arch/ia64/configs/tiger_defconfig
··· 110 110 CONFIG_REISERFS_FS_POSIX_ACL=y 111 111 CONFIG_REISERFS_FS_SECURITY=y 112 112 CONFIG_XFS_FS=y 113 - CONFIG_AUTOFS4_FS=y 113 + CONFIG_AUTOFS_FS=y 114 114 CONFIG_ISO9660_FS=m 115 115 CONFIG_JOLIET=y 116 116 CONFIG_UDF_FS=m
+1
arch/loongarch/Kconfig
··· 14 14 select ARCH_HAS_CPU_FINALIZE_INIT 15 15 select ARCH_HAS_FORTIFY_SOURCE 16 16 select ARCH_HAS_NMI_SAFE_THIS_CPU_OPS 17 + select ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE 17 18 select ARCH_HAS_PTE_SPECIAL 18 19 select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST 19 20 select ARCH_INLINE_READ_LOCK if !PREEMPTION
+3 -1
arch/loongarch/Makefile
··· 68 68 ifdef CONFIG_AS_HAS_EXPLICIT_RELOCS 69 69 cflags-y += $(call cc-option,-mexplicit-relocs) 70 70 KBUILD_CFLAGS_KERNEL += $(call cc-option,-mdirect-extern-access) 71 + KBUILD_AFLAGS_MODULE += $(call cc-option,-mno-relax) $(call cc-option,-Wa$(comma)-mno-relax) 72 + KBUILD_CFLAGS_MODULE += $(call cc-option,-mno-relax) $(call cc-option,-Wa$(comma)-mno-relax) 71 73 else 72 74 cflags-y += $(call cc-option,-mno-explicit-relocs) 73 75 KBUILD_AFLAGS_KERNEL += -Wa,-mla-global-with-pcrel ··· 113 111 114 112 KBUILD_LDFLAGS += -m $(ld-emul) 115 113 116 - ifdef CONFIG_LOONGARCH 114 + ifdef need-compiler 117 115 CHECKFLAGS += $(shell $(CC) $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS) -dM -E -x c /dev/null | \ 118 116 grep -E -vw '__GNUC_(MINOR_|PATCHLEVEL_)?_' | \ 119 117 sed -e "s/^\#define /-D'/" -e "s/ /'='/" -e "s/$$/'/" -e 's/\$$/&&/g')
+1 -1
arch/loongarch/configs/loongson3_defconfig
··· 769 769 # CONFIG_PRINT_QUOTA_WARNING is not set 770 770 CONFIG_QFMT_V1=m 771 771 CONFIG_QFMT_V2=m 772 - CONFIG_AUTOFS4_FS=y 772 + CONFIG_AUTOFS_FS=y 773 773 CONFIG_FUSE_FS=m 774 774 CONFIG_OVERLAY_FS=y 775 775 CONFIG_OVERLAY_FS_INDEX=y
+4 -11
arch/loongarch/include/asm/fpu.h
··· 218 218 219 219 static inline void init_lsx_upper(void) 220 220 { 221 - /* 222 - * Check cpu_has_lsx only if it's a constant. This will allow the 223 - * compiler to optimise out code for CPUs without LSX without adding 224 - * an extra redundant check for CPUs with LSX. 225 - */ 226 - if (__builtin_constant_p(cpu_has_lsx) && !cpu_has_lsx) 227 - return; 228 - 229 - _init_lsx_upper(); 221 + if (cpu_has_lsx) 222 + _init_lsx_upper(); 230 223 } 231 224 232 225 static inline void restore_lsx_upper(struct task_struct *t) ··· 287 294 288 295 static inline int thread_lsx_context_live(void) 289 296 { 290 - if (__builtin_constant_p(cpu_has_lsx) && !cpu_has_lsx) 297 + if (!cpu_has_lsx) 291 298 return 0; 292 299 293 300 return test_thread_flag(TIF_LSX_CTX_LIVE); ··· 295 302 296 303 static inline int thread_lasx_context_live(void) 297 304 { 298 - if (__builtin_constant_p(cpu_has_lasx) && !cpu_has_lasx) 305 + if (!cpu_has_lasx) 299 306 return 0; 300 307 301 308 return test_thread_flag(TIF_LASX_CTX_LIVE);
+16
arch/loongarch/kernel/setup.c
··· 332 332 strlcat(boot_command_line, " ", COMMAND_LINE_SIZE); 333 333 334 334 strlcat(boot_command_line, init_command_line, COMMAND_LINE_SIZE); 335 + goto out; 335 336 } 336 337 #endif 338 + 339 + /* 340 + * Append built-in command line to the bootloader command line if 341 + * CONFIG_CMDLINE_EXTEND is enabled. 342 + */ 343 + if (IS_ENABLED(CONFIG_CMDLINE_EXTEND) && CONFIG_CMDLINE[0]) { 344 + strlcat(boot_command_line, " ", COMMAND_LINE_SIZE); 345 + strlcat(boot_command_line, CONFIG_CMDLINE, COMMAND_LINE_SIZE); 346 + } 347 + 348 + /* 349 + * Use built-in command line if the bootloader command line is empty. 350 + */ 351 + if (IS_ENABLED(CONFIG_CMDLINE_BOOTLOADER) && !boot_command_line[0]) 352 + strscpy(boot_command_line, CONFIG_CMDLINE, COMMAND_LINE_SIZE); 337 353 338 354 out: 339 355 *cmdline_p = boot_command_line;
+2 -1
arch/loongarch/lib/clear_user.S
··· 108 108 addi.d a3, a2, -8 109 109 bgeu a0, a3, .Llt8 110 110 15: st.d zero, a0, 0 111 + addi.d a0, a0, 8 111 112 112 113 .Llt8: 113 114 16: st.d zero, a2, -8 ··· 189 188 _asm_extable 13b, .L_fixup_handle_0 190 189 _asm_extable 14b, .L_fixup_handle_1 191 190 _asm_extable 15b, .L_fixup_handle_0 192 - _asm_extable 16b, .L_fixup_handle_1 191 + _asm_extable 16b, .L_fixup_handle_0 193 192 _asm_extable 17b, .L_fixup_handle_s0 194 193 _asm_extable 18b, .L_fixup_handle_s0 195 194 _asm_extable 19b, .L_fixup_handle_s0
+2 -1
arch/loongarch/lib/copy_user.S
··· 136 136 bgeu a1, a4, .Llt8 137 137 30: ld.d t0, a1, 0 138 138 31: st.d t0, a0, 0 139 + addi.d a0, a0, 8 139 140 140 141 .Llt8: 141 142 32: ld.d t0, a3, -8 ··· 247 246 _asm_extable 30b, .L_fixup_handle_0 248 247 _asm_extable 31b, .L_fixup_handle_0 249 248 _asm_extable 32b, .L_fixup_handle_0 250 - _asm_extable 33b, .L_fixup_handle_1 249 + _asm_extable 33b, .L_fixup_handle_0 251 250 _asm_extable 34b, .L_fixup_handle_s0 252 251 _asm_extable 35b, .L_fixup_handle_s0 253 252 _asm_extable 36b, .L_fixup_handle_s0
+1 -1
arch/loongarch/net/bpf_jit.h
··· 150 150 * no need to call lu32id to do a new filled operation. 151 151 */ 152 152 imm_51_31 = (imm >> 31) & 0x1fffff; 153 - if (imm_51_31 != 0 || imm_51_31 != 0x1fffff) { 153 + if (imm_51_31 != 0 && imm_51_31 != 0x1fffff) { 154 154 /* lu32id rd, imm_51_32 */ 155 155 imm_51_32 = (imm >> 32) & 0xfffff; 156 156 emit_insn(ctx, lu32id, rd, imm_51_32);
+2 -2
arch/m68k/fpsp040/skeleton.S
··· 499 499 dbf %d0,morein 500 500 rts 501 501 502 - .section .fixup,#alloc,#execinstr 502 + .section .fixup,"ax" 503 503 .even 504 504 1: 505 505 jbsr fpsp040_die 506 506 jbra .Lnotkern 507 507 508 - .section __ex_table,#alloc 508 + .section __ex_table,"a" 509 509 .align 4 510 510 511 511 .long in_ea,1b
+2 -2
arch/m68k/ifpsp060/os.S
··· 379 379 380 380 381 381 | Execption handling for movs access to illegal memory 382 - .section .fixup,#alloc,#execinstr 382 + .section .fixup,"ax" 383 383 .even 384 384 1: moveq #-1,%d1 385 385 rts 386 - .section __ex_table,#alloc 386 + .section __ex_table,"a" 387 387 .align 4 388 388 .long dmrbuae,1b 389 389 .long dmrwuae,1b
+2 -2
arch/m68k/kernel/relocate_kernel.S
··· 26 26 lea %pc@(.Lcopy),%a4 27 27 2: addl #0x00000000,%a4 /* virt_to_phys() */ 28 28 29 - .section ".m68k_fixup","aw" 29 + .section .m68k_fixup,"aw" 30 30 .long M68K_FIXUP_MEMOFFSET, 2b+2 31 31 .previous 32 32 ··· 49 49 lea %pc@(.Lcont040),%a4 50 50 5: addl #0x00000000,%a4 /* virt_to_phys() */ 51 51 52 - .section ".m68k_fixup","aw" 52 + .section .m68k_fixup,"aw" 53 53 .long M68K_FIXUP_MEMOFFSET, 5b+2 54 54 .previous 55 55
+1 -1
arch/mips/configs/bigsur_defconfig
··· 153 153 CONFIG_QUOTA_NETLINK_INTERFACE=y 154 154 # CONFIG_PRINT_QUOTA_WARNING is not set 155 155 CONFIG_QFMT_V2=m 156 - CONFIG_AUTOFS4_FS=m 156 + CONFIG_AUTOFS_FS=m 157 157 CONFIG_FUSE_FS=m 158 158 CONFIG_ISO9660_FS=m 159 159 CONFIG_JOLIET=y
+1 -1
arch/mips/configs/fuloong2e_defconfig
··· 178 178 CONFIG_EXT4_FS_POSIX_ACL=y 179 179 CONFIG_EXT4_FS_SECURITY=y 180 180 CONFIG_REISERFS_FS=m 181 - CONFIG_AUTOFS4_FS=y 181 + CONFIG_AUTOFS_FS=y 182 182 CONFIG_FUSE_FS=y 183 183 CONFIG_ISO9660_FS=m 184 184 CONFIG_JOLIET=y
+1 -1
arch/mips/configs/ip22_defconfig
··· 245 245 CONFIG_QUOTA_NETLINK_INTERFACE=y 246 246 # CONFIG_PRINT_QUOTA_WARNING is not set 247 247 CONFIG_QFMT_V2=m 248 - CONFIG_AUTOFS4_FS=m 248 + CONFIG_AUTOFS_FS=m 249 249 CONFIG_FUSE_FS=m 250 250 CONFIG_ISO9660_FS=m 251 251 CONFIG_JOLIET=y
+1 -1
arch/mips/configs/ip32_defconfig
··· 95 95 CONFIG_QUOTA=y 96 96 CONFIG_QFMT_V1=m 97 97 CONFIG_QFMT_V2=m 98 - CONFIG_AUTOFS4_FS=m 98 + CONFIG_AUTOFS_FS=m 99 99 CONFIG_FUSE_FS=m 100 100 CONFIG_ISO9660_FS=m 101 101 CONFIG_JOLIET=y
+1 -1
arch/mips/configs/jazz_defconfig
··· 76 76 CONFIG_REISERFS_FS_SECURITY=y 77 77 CONFIG_XFS_FS=m 78 78 CONFIG_XFS_QUOTA=y 79 - CONFIG_AUTOFS4_FS=m 79 + CONFIG_AUTOFS_FS=m 80 80 CONFIG_FUSE_FS=m 81 81 CONFIG_ISO9660_FS=m 82 82 CONFIG_JOLIET=y
+1 -1
arch/mips/configs/lemote2f_defconfig
··· 240 240 CONFIG_BTRFS_FS=m 241 241 CONFIG_QUOTA=y 242 242 CONFIG_QFMT_V2=m 243 - CONFIG_AUTOFS4_FS=m 243 + CONFIG_AUTOFS_FS=m 244 244 CONFIG_FSCACHE=m 245 245 CONFIG_CACHEFILES=m 246 246 CONFIG_ISO9660_FS=m
+1 -1
arch/mips/configs/loongson2k_defconfig
··· 296 296 CONFIG_XFS_POSIX_ACL=y 297 297 CONFIG_QUOTA=y 298 298 # CONFIG_PRINT_QUOTA_WARNING is not set 299 - CONFIG_AUTOFS4_FS=y 299 + CONFIG_AUTOFS_FS=y 300 300 CONFIG_FUSE_FS=m 301 301 CONFIG_ISO9660_FS=m 302 302 CONFIG_JOLIET=y
+1 -1
arch/mips/configs/loongson3_defconfig
··· 352 352 # CONFIG_PRINT_QUOTA_WARNING is not set 353 353 CONFIG_QFMT_V1=m 354 354 CONFIG_QFMT_V2=m 355 - CONFIG_AUTOFS4_FS=y 355 + CONFIG_AUTOFS_FS=y 356 356 CONFIG_FUSE_FS=m 357 357 CONFIG_VIRTIO_FS=m 358 358 CONFIG_FSCACHE=m
+1 -1
arch/mips/configs/mtx1_defconfig
··· 601 601 CONFIG_EXT3_FS_POSIX_ACL=y 602 602 CONFIG_EXT3_FS_SECURITY=y 603 603 CONFIG_QUOTA=y 604 - CONFIG_AUTOFS4_FS=y 604 + CONFIG_AUTOFS_FS=y 605 605 CONFIG_FUSE_FS=m 606 606 CONFIG_ISO9660_FS=m 607 607 CONFIG_JOLIET=y
+1 -1
arch/mips/configs/pic32mzda_defconfig
··· 66 66 CONFIG_EXT4_FS=y 67 67 CONFIG_EXT4_FS_POSIX_ACL=y 68 68 CONFIG_EXT4_FS_SECURITY=y 69 - CONFIG_AUTOFS4_FS=m 69 + CONFIG_AUTOFS_FS=m 70 70 CONFIG_FUSE_FS=m 71 71 CONFIG_FSCACHE=m 72 72 CONFIG_ISO9660_FS=m
+1 -1
arch/mips/configs/rm200_defconfig
··· 317 317 CONFIG_REISERFS_FS_SECURITY=y 318 318 CONFIG_XFS_FS=m 319 319 CONFIG_XFS_QUOTA=y 320 - CONFIG_AUTOFS4_FS=m 320 + CONFIG_AUTOFS_FS=m 321 321 CONFIG_FUSE_FS=m 322 322 CONFIG_ISO9660_FS=m 323 323 CONFIG_JOLIET=y
+1 -1
arch/parisc/configs/generic-32bit_defconfig
··· 237 237 CONFIG_QUOTA=y 238 238 CONFIG_QUOTA_NETLINK_INTERFACE=y 239 239 CONFIG_QFMT_V2=y 240 - CONFIG_AUTOFS4_FS=y 240 + CONFIG_AUTOFS_FS=y 241 241 CONFIG_ISO9660_FS=y 242 242 CONFIG_JOLIET=y 243 243 CONFIG_VFAT_FS=y
+1 -1
arch/parisc/configs/generic-64bit_defconfig
··· 259 259 CONFIG_QUOTA=y 260 260 CONFIG_QUOTA_NETLINK_INTERFACE=y 261 261 CONFIG_QFMT_V2=y 262 - CONFIG_AUTOFS4_FS=y 262 + CONFIG_AUTOFS_FS=y 263 263 CONFIG_FUSE_FS=y 264 264 CONFIG_CUSE=y 265 265 CONFIG_ISO9660_FS=y
+1 -1
arch/powerpc/configs/44x/sam440ep_defconfig
··· 79 79 CONFIG_EXT4_FS=y 80 80 CONFIG_EXT4_FS_POSIX_ACL=y 81 81 CONFIG_REISERFS_FS=y 82 - CONFIG_AUTOFS4_FS=y 82 + CONFIG_AUTOFS_FS=y 83 83 CONFIG_ISO9660_FS=y 84 84 CONFIG_JOLIET=y 85 85 CONFIG_ZISOFS=y
+1 -1
arch/powerpc/configs/85xx/stx_gp3_defconfig
··· 50 50 CONFIG_SOUND=m 51 51 CONFIG_EXT2_FS=y 52 52 CONFIG_EXT4_FS=y 53 - CONFIG_AUTOFS4_FS=y 53 + CONFIG_AUTOFS_FS=y 54 54 CONFIG_ISO9660_FS=m 55 55 CONFIG_UDF_FS=m 56 56 CONFIG_MSDOS_FS=m
+1 -1
arch/powerpc/configs/cell_defconfig
··· 172 172 CONFIG_UIO=m 173 173 CONFIG_EXT2_FS=y 174 174 CONFIG_EXT4_FS=y 175 - CONFIG_AUTOFS4_FS=m 175 + CONFIG_AUTOFS_FS=m 176 176 CONFIG_ISO9660_FS=m 177 177 CONFIG_JOLIET=y 178 178 CONFIG_UDF_FS=m
+1 -1
arch/powerpc/configs/ep8248e_defconfig
··· 47 47 # CONFIG_USB_SUPPORT is not set 48 48 CONFIG_EXT2_FS=y 49 49 CONFIG_EXT4_FS=y 50 - CONFIG_AUTOFS4_FS=y 50 + CONFIG_AUTOFS_FS=y 51 51 CONFIG_PROC_KCORE=y 52 52 CONFIG_TMPFS=y 53 53 CONFIG_CRAMFS=y
+1 -1
arch/powerpc/configs/mgcoge_defconfig
··· 60 60 CONFIG_USB_G_SERIAL=y 61 61 CONFIG_UIO=y 62 62 CONFIG_EXT2_FS=y 63 - CONFIG_AUTOFS4_FS=y 63 + CONFIG_AUTOFS_FS=y 64 64 CONFIG_PROC_KCORE=y 65 65 CONFIG_TMPFS=y 66 66 CONFIG_JFFS2_FS=y
+1 -1
arch/powerpc/configs/pasemi_defconfig
··· 143 143 CONFIG_EXT2_FS_XATTR=y 144 144 CONFIG_EXT2_FS_POSIX_ACL=y 145 145 CONFIG_EXT4_FS=y 146 - CONFIG_AUTOFS4_FS=y 146 + CONFIG_AUTOFS_FS=y 147 147 CONFIG_ISO9660_FS=y 148 148 CONFIG_UDF_FS=y 149 149 CONFIG_MSDOS_FS=y
+1 -1
arch/powerpc/configs/pmac32_defconfig
··· 254 254 CONFIG_EXT2_FS=y 255 255 CONFIG_EXT4_FS=y 256 256 CONFIG_EXT4_FS_POSIX_ACL=y 257 - CONFIG_AUTOFS4_FS=m 257 + CONFIG_AUTOFS_FS=m 258 258 CONFIG_FUSE_FS=m 259 259 CONFIG_ISO9660_FS=y 260 260 CONFIG_JOLIET=y
+1 -1
arch/powerpc/configs/powernv_defconfig
··· 270 270 CONFIG_BTRFS_FS_POSIX_ACL=y 271 271 CONFIG_NILFS2_FS=m 272 272 CONFIG_FANOTIFY=y 273 - CONFIG_AUTOFS4_FS=m 273 + CONFIG_AUTOFS_FS=m 274 274 CONFIG_FUSE_FS=m 275 275 CONFIG_OVERLAY_FS=m 276 276 CONFIG_ISO9660_FS=y
+1 -1
arch/powerpc/configs/ppc64_defconfig
··· 327 327 CONFIG_BTRFS_FS_POSIX_ACL=y 328 328 CONFIG_NILFS2_FS=m 329 329 CONFIG_FS_DAX=y 330 - CONFIG_AUTOFS4_FS=m 330 + CONFIG_AUTOFS_FS=m 331 331 CONFIG_FUSE_FS=m 332 332 CONFIG_OVERLAY_FS=m 333 333 CONFIG_ISO9660_FS=y
+1 -1
arch/powerpc/configs/ppc64e_defconfig
··· 185 185 CONFIG_XFS_FS=m 186 186 CONFIG_XFS_POSIX_ACL=y 187 187 CONFIG_FS_DAX=y 188 - CONFIG_AUTOFS4_FS=m 188 + CONFIG_AUTOFS_FS=m 189 189 CONFIG_ISO9660_FS=y 190 190 CONFIG_UDF_FS=m 191 191 CONFIG_MSDOS_FS=y
+1 -1
arch/powerpc/configs/ppc6xx_defconfig
··· 969 969 CONFIG_GFS2_FS=m 970 970 CONFIG_FS_DAX=y 971 971 CONFIG_QUOTA_NETLINK_INTERFACE=y 972 - CONFIG_AUTOFS4_FS=m 972 + CONFIG_AUTOFS_FS=m 973 973 CONFIG_FUSE_FS=m 974 974 CONFIG_ISO9660_FS=y 975 975 CONFIG_JOLIET=y
+1 -1
arch/powerpc/configs/ps3_defconfig
··· 129 129 CONFIG_EXT4_FS=y 130 130 CONFIG_QUOTA=y 131 131 CONFIG_QFMT_V2=y 132 - CONFIG_AUTOFS4_FS=m 132 + CONFIG_AUTOFS_FS=m 133 133 CONFIG_ISO9660_FS=m 134 134 CONFIG_JOLIET=y 135 135 CONFIG_UDF_FS=m
+1 -1
arch/riscv/configs/defconfig
··· 192 192 CONFIG_EXT4_FS_SECURITY=y 193 193 CONFIG_BTRFS_FS=m 194 194 CONFIG_BTRFS_FS_POSIX_ACL=y 195 - CONFIG_AUTOFS4_FS=y 195 + CONFIG_AUTOFS_FS=y 196 196 CONFIG_OVERLAY_FS=m 197 197 CONFIG_ISO9660_FS=y 198 198 CONFIG_JOLIET=y
+1 -1
arch/riscv/configs/rv32_defconfig
··· 98 98 CONFIG_RPMSG_VIRTIO=y 99 99 CONFIG_EXT4_FS=y 100 100 CONFIG_EXT4_FS_POSIX_ACL=y 101 - CONFIG_AUTOFS4_FS=y 101 + CONFIG_AUTOFS_FS=y 102 102 CONFIG_MSDOS_FS=y 103 103 CONFIG_VFAT_FS=y 104 104 CONFIG_TMPFS=y
+1 -1
arch/s390/configs/debug_defconfig
··· 624 624 CONFIG_QUOTA_DEBUG=y 625 625 CONFIG_QFMT_V1=m 626 626 CONFIG_QFMT_V2=m 627 - CONFIG_AUTOFS4_FS=m 627 + CONFIG_AUTOFS_FS=m 628 628 CONFIG_FUSE_FS=y 629 629 CONFIG_CUSE=m 630 630 CONFIG_VIRTIO_FS=m
+1 -1
arch/s390/configs/defconfig
··· 609 609 CONFIG_QUOTA_NETLINK_INTERFACE=y 610 610 CONFIG_QFMT_V1=m 611 611 CONFIG_QFMT_V2=m 612 - CONFIG_AUTOFS4_FS=m 612 + CONFIG_AUTOFS_FS=m 613 613 CONFIG_FUSE_FS=y 614 614 CONFIG_CUSE=m 615 615 CONFIG_VIRTIO_FS=m
+1 -1
arch/sh/configs/espt_defconfig
··· 61 61 CONFIG_EXT2_FS=y 62 62 CONFIG_EXT3_FS=y 63 63 # CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set 64 - CONFIG_AUTOFS4_FS=y 64 + CONFIG_AUTOFS_FS=y 65 65 CONFIG_PROC_KCORE=y 66 66 CONFIG_TMPFS=y 67 67 CONFIG_TMPFS_POSIX_ACL=y
+1 -1
arch/sh/configs/sdk7780_defconfig
··· 105 105 CONFIG_EXT3_FS=y 106 106 # CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set 107 107 CONFIG_EXT3_FS_POSIX_ACL=y 108 - CONFIG_AUTOFS4_FS=y 108 + CONFIG_AUTOFS_FS=y 109 109 CONFIG_ISO9660_FS=y 110 110 CONFIG_MSDOS_FS=y 111 111 CONFIG_VFAT_FS=y
+1 -1
arch/sh/configs/sdk7786_defconfig
··· 168 168 CONFIG_EXT4_FS=y 169 169 CONFIG_XFS_FS=y 170 170 CONFIG_BTRFS_FS=y 171 - CONFIG_AUTOFS4_FS=m 171 + CONFIG_AUTOFS_FS=m 172 172 CONFIG_FUSE_FS=y 173 173 CONFIG_CUSE=m 174 174 CONFIG_FSCACHE=m
+1 -1
arch/sh/configs/sh03_defconfig
··· 60 60 CONFIG_EXT3_FS=y 61 61 # CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set 62 62 CONFIG_EXT3_FS_POSIX_ACL=y 63 - CONFIG_AUTOFS4_FS=y 63 + CONFIG_AUTOFS_FS=y 64 64 CONFIG_ISO9660_FS=m 65 65 CONFIG_JOLIET=y 66 66 CONFIG_ZISOFS=y
+1 -1
arch/sh/configs/sh7763rdp_defconfig
··· 63 63 CONFIG_EXT2_FS=y 64 64 CONFIG_EXT3_FS=y 65 65 # CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set 66 - CONFIG_AUTOFS4_FS=y 66 + CONFIG_AUTOFS_FS=y 67 67 CONFIG_MSDOS_FS=y 68 68 CONFIG_VFAT_FS=y 69 69 CONFIG_PROC_KCORE=y
+1 -1
arch/sparc/configs/sparc32_defconfig
··· 65 65 CONFIG_EXT2_FS_XATTR=y 66 66 CONFIG_EXT2_FS_POSIX_ACL=y 67 67 CONFIG_EXT2_FS_SECURITY=y 68 - CONFIG_AUTOFS4_FS=m 68 + CONFIG_AUTOFS_FS=m 69 69 CONFIG_ISO9660_FS=m 70 70 CONFIG_PROC_KCORE=y 71 71 CONFIG_ROMFS_FS=m
+1 -1
arch/um/configs/i386_defconfig
··· 62 62 CONFIG_EXT4_FS=y 63 63 CONFIG_REISERFS_FS=y 64 64 CONFIG_QUOTA=y 65 - CONFIG_AUTOFS4_FS=m 65 + CONFIG_AUTOFS_FS=m 66 66 CONFIG_ISO9660_FS=m 67 67 CONFIG_JOLIET=y 68 68 CONFIG_PROC_KCORE=y
+1 -1
arch/um/configs/x86_64_defconfig
··· 60 60 CONFIG_EXT4_FS=y 61 61 CONFIG_REISERFS_FS=y 62 62 CONFIG_QUOTA=y 63 - CONFIG_AUTOFS4_FS=m 63 + CONFIG_AUTOFS_FS=m 64 64 CONFIG_ISO9660_FS=m 65 65 CONFIG_JOLIET=y 66 66 CONFIG_PROC_KCORE=y
+4 -3
arch/um/os-Linux/sigio.c
··· 3 3 * Copyright (C) 2002 - 2008 Jeff Dike (jdike@{addtoit,linux.intel}.com) 4 4 */ 5 5 6 - #include <linux/minmax.h> 7 6 #include <unistd.h> 8 7 #include <errno.h> 9 8 #include <fcntl.h> ··· 50 51 51 52 static int write_sigio_thread(void *unused) 52 53 { 53 - struct pollfds *fds; 54 + struct pollfds *fds, tmp; 54 55 struct pollfd *p; 55 56 int i, n, respond_fd; 56 57 char c; ··· 77 78 "write_sigio_thread : " 78 79 "read on socket failed, " 79 80 "err = %d\n", errno); 80 - swap(current_poll, next_poll); 81 + tmp = current_poll; 82 + current_poll = next_poll; 83 + next_poll = tmp; 81 84 respond_fd = sigio_private[1]; 82 85 } 83 86 else {
+1 -1
arch/x86/configs/i386_defconfig
··· 245 245 CONFIG_QUOTA_NETLINK_INTERFACE=y 246 246 # CONFIG_PRINT_QUOTA_WARNING is not set 247 247 CONFIG_QFMT_V2=y 248 - CONFIG_AUTOFS4_FS=y 248 + CONFIG_AUTOFS_FS=y 249 249 CONFIG_ISO9660_FS=y 250 250 CONFIG_JOLIET=y 251 251 CONFIG_ZISOFS=y
+1 -1
arch/x86/configs/x86_64_defconfig
··· 242 242 CONFIG_QUOTA_NETLINK_INTERFACE=y 243 243 # CONFIG_PRINT_QUOTA_WARNING is not set 244 244 CONFIG_QFMT_V2=y 245 - CONFIG_AUTOFS4_FS=y 245 + CONFIG_AUTOFS_FS=y 246 246 CONFIG_ISO9660_FS=y 247 247 CONFIG_JOLIET=y 248 248 CONFIG_ZISOFS=y
+15 -1
arch/x86/entry/entry_64.S
··· 285 285 */ 286 286 .pushsection .text, "ax" 287 287 SYM_CODE_START(ret_from_fork_asm) 288 - UNWIND_HINT_REGS 288 + /* 289 + * This is the start of the kernel stack; even through there's a 290 + * register set at the top, the regset isn't necessarily coherent 291 + * (consider kthreads) and one cannot unwind further. 292 + * 293 + * This ensures stack unwinds of kernel threads terminate in a known 294 + * good state. 295 + */ 296 + UNWIND_HINT_END_OF_STACK 289 297 ANNOTATE_NOENDBR // copy_thread 290 298 CALL_DEPTH_ACCOUNT 291 299 ··· 303 295 movq %r12, %rcx /* fn_arg */ 304 296 call ret_from_fork 305 297 298 + /* 299 + * Set the stack state to what is expected for the target function 300 + * -- at this point the register set should be a valid user set 301 + * and unwind should work normally. 302 + */ 303 + UNWIND_HINT_REGS 306 304 jmp swapgs_restore_regs_and_return_to_usermode 307 305 SYM_CODE_END(ret_from_fork_asm) 308 306 .popsection
+1
arch/x86/include/asm/kvm-x86-ops.h
··· 37 37 KVM_X86_OP(get_cpl) 38 38 KVM_X86_OP(set_segment) 39 39 KVM_X86_OP(get_cs_db_l_bits) 40 + KVM_X86_OP(is_valid_cr0) 40 41 KVM_X86_OP(set_cr0) 41 42 KVM_X86_OP_OPTIONAL(post_set_cr3) 42 43 KVM_X86_OP(is_valid_cr4)
+2 -1
arch/x86/include/asm/kvm_host.h
··· 1566 1566 void (*set_segment)(struct kvm_vcpu *vcpu, 1567 1567 struct kvm_segment *var, int seg); 1568 1568 void (*get_cs_db_l_bits)(struct kvm_vcpu *vcpu, int *db, int *l); 1569 + bool (*is_valid_cr0)(struct kvm_vcpu *vcpu, unsigned long cr0); 1569 1570 void (*set_cr0)(struct kvm_vcpu *vcpu, unsigned long cr0); 1570 1571 void (*post_set_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3); 1571 - bool (*is_valid_cr4)(struct kvm_vcpu *vcpu, unsigned long cr0); 1572 + bool (*is_valid_cr4)(struct kvm_vcpu *vcpu, unsigned long cr4); 1572 1573 void (*set_cr4)(struct kvm_vcpu *vcpu, unsigned long cr4); 1573 1574 int (*set_efer)(struct kvm_vcpu *vcpu, u64 efer); 1574 1575 void (*get_idt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
+1
arch/x86/include/asm/microcode.h
··· 5 5 #include <asm/cpu.h> 6 6 #include <linux/earlycpio.h> 7 7 #include <linux/initrd.h> 8 + #include <asm/microcode_amd.h> 8 9 9 10 struct ucode_patch { 10 11 struct list_head plist;
+2
arch/x86/include/asm/microcode_amd.h
··· 48 48 extern void load_ucode_amd_ap(unsigned int family); 49 49 extern int __init save_microcode_in_initrd_amd(unsigned int family); 50 50 void reload_ucode_amd(unsigned int cpu); 51 + extern void amd_check_microcode(void); 51 52 #else 52 53 static inline void __init load_ucode_amd_bsp(unsigned int family) {} 53 54 static inline void load_ucode_amd_ap(unsigned int family) {} 54 55 static inline int __init 55 56 save_microcode_in_initrd_amd(unsigned int family) { return -EINVAL; } 56 57 static inline void reload_ucode_amd(unsigned int cpu) {} 58 + static inline void amd_check_microcode(void) {} 57 59 #endif 58 60 #endif /* _ASM_X86_MICROCODE_AMD_H */
+1
arch/x86/include/asm/msr-index.h
··· 545 545 #define MSR_AMD64_DE_CFG 0xc0011029 546 546 #define MSR_AMD64_DE_CFG_LFENCE_SERIALIZE_BIT 1 547 547 #define MSR_AMD64_DE_CFG_LFENCE_SERIALIZE BIT_ULL(MSR_AMD64_DE_CFG_LFENCE_SERIALIZE_BIT) 548 + #define MSR_AMD64_DE_CFG_ZEN2_FP_BACKUP_FIX_BIT 9 548 549 549 550 #define MSR_AMD64_BU_CFG2 0xc001102a 550 551 #define MSR_AMD64_IBSFETCHCTL 0xc0011030
+127 -72
arch/x86/kernel/cpu/amd.c
··· 27 27 28 28 #include "cpu.h" 29 29 30 - static const int amd_erratum_383[]; 31 - static const int amd_erratum_400[]; 32 - static const int amd_erratum_1054[]; 33 - static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum); 34 - 35 30 /* 36 31 * nodes_per_socket: Stores the number of nodes per socket. 37 32 * Refer to Fam15h Models 00-0fh BKDG - CPUID Fn8000_001E_ECX 38 33 * Node Identifiers[10:8] 39 34 */ 40 35 static u32 nodes_per_socket = 1; 36 + 37 + /* 38 + * AMD errata checking 39 + * 40 + * Errata are defined as arrays of ints using the AMD_LEGACY_ERRATUM() or 41 + * AMD_OSVW_ERRATUM() macros. The latter is intended for newer errata that 42 + * have an OSVW id assigned, which it takes as first argument. Both take a 43 + * variable number of family-specific model-stepping ranges created by 44 + * AMD_MODEL_RANGE(). 45 + * 46 + * Example: 47 + * 48 + * const int amd_erratum_319[] = 49 + * AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0x4, 0x2), 50 + * AMD_MODEL_RANGE(0x10, 0x8, 0x0, 0x8, 0x0), 51 + * AMD_MODEL_RANGE(0x10, 0x9, 0x0, 0x9, 0x0)); 52 + */ 53 + 54 + #define AMD_LEGACY_ERRATUM(...) { -1, __VA_ARGS__, 0 } 55 + #define AMD_OSVW_ERRATUM(osvw_id, ...) { osvw_id, __VA_ARGS__, 0 } 56 + #define AMD_MODEL_RANGE(f, m_start, s_start, m_end, s_end) \ 57 + ((f << 24) | (m_start << 16) | (s_start << 12) | (m_end << 4) | (s_end)) 58 + #define AMD_MODEL_RANGE_FAMILY(range) (((range) >> 24) & 0xff) 59 + #define AMD_MODEL_RANGE_START(range) (((range) >> 12) & 0xfff) 60 + #define AMD_MODEL_RANGE_END(range) ((range) & 0xfff) 61 + 62 + static const int amd_erratum_400[] = 63 + AMD_OSVW_ERRATUM(1, AMD_MODEL_RANGE(0xf, 0x41, 0x2, 0xff, 0xf), 64 + AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0xff, 0xf)); 65 + 66 + static const int amd_erratum_383[] = 67 + AMD_OSVW_ERRATUM(3, AMD_MODEL_RANGE(0x10, 0, 0, 0xff, 0xf)); 68 + 69 + /* #1054: Instructions Retired Performance Counter May Be Inaccurate */ 70 + static const int amd_erratum_1054[] = 71 + AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x17, 0, 0, 0x2f, 0xf)); 72 + 73 + static const int amd_zenbleed[] = 74 + AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x17, 0x30, 0x0, 0x4f, 0xf), 75 + AMD_MODEL_RANGE(0x17, 0x60, 0x0, 0x7f, 0xf), 76 + AMD_MODEL_RANGE(0x17, 0xa0, 0x0, 0xaf, 0xf)); 77 + 78 + static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum) 79 + { 80 + int osvw_id = *erratum++; 81 + u32 range; 82 + u32 ms; 83 + 84 + if (osvw_id >= 0 && osvw_id < 65536 && 85 + cpu_has(cpu, X86_FEATURE_OSVW)) { 86 + u64 osvw_len; 87 + 88 + rdmsrl(MSR_AMD64_OSVW_ID_LENGTH, osvw_len); 89 + if (osvw_id < osvw_len) { 90 + u64 osvw_bits; 91 + 92 + rdmsrl(MSR_AMD64_OSVW_STATUS + (osvw_id >> 6), 93 + osvw_bits); 94 + return osvw_bits & (1ULL << (osvw_id & 0x3f)); 95 + } 96 + } 97 + 98 + /* OSVW unavailable or ID unknown, match family-model-stepping range */ 99 + ms = (cpu->x86_model << 4) | cpu->x86_stepping; 100 + while ((range = *erratum++)) 101 + if ((cpu->x86 == AMD_MODEL_RANGE_FAMILY(range)) && 102 + (ms >= AMD_MODEL_RANGE_START(range)) && 103 + (ms <= AMD_MODEL_RANGE_END(range))) 104 + return true; 105 + 106 + return false; 107 + } 41 108 42 109 static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p) 43 110 { ··· 983 916 } 984 917 } 985 918 919 + static bool cpu_has_zenbleed_microcode(void) 920 + { 921 + u32 good_rev = 0; 922 + 923 + switch (boot_cpu_data.x86_model) { 924 + case 0x30 ... 0x3f: good_rev = 0x0830107a; break; 925 + case 0x60 ... 0x67: good_rev = 0x0860010b; break; 926 + case 0x68 ... 0x6f: good_rev = 0x08608105; break; 927 + case 0x70 ... 0x7f: good_rev = 0x08701032; break; 928 + case 0xa0 ... 0xaf: good_rev = 0x08a00008; break; 929 + 930 + default: 931 + return false; 932 + break; 933 + } 934 + 935 + if (boot_cpu_data.microcode < good_rev) 936 + return false; 937 + 938 + return true; 939 + } 940 + 941 + static void zenbleed_check(struct cpuinfo_x86 *c) 942 + { 943 + if (!cpu_has_amd_erratum(c, amd_zenbleed)) 944 + return; 945 + 946 + if (cpu_has(c, X86_FEATURE_HYPERVISOR)) 947 + return; 948 + 949 + if (!cpu_has(c, X86_FEATURE_AVX)) 950 + return; 951 + 952 + if (!cpu_has_zenbleed_microcode()) { 953 + pr_notice_once("Zenbleed: please update your microcode for the most optimal fix\n"); 954 + msr_set_bit(MSR_AMD64_DE_CFG, MSR_AMD64_DE_CFG_ZEN2_FP_BACKUP_FIX_BIT); 955 + } else { 956 + msr_clear_bit(MSR_AMD64_DE_CFG, MSR_AMD64_DE_CFG_ZEN2_FP_BACKUP_FIX_BIT); 957 + } 958 + } 959 + 986 960 static void init_amd(struct cpuinfo_x86 *c) 987 961 { 988 962 early_init_amd(c); ··· 1128 1020 if (spectre_v2_in_eibrs_mode(spectre_v2_enabled) && 1129 1021 cpu_has(c, X86_FEATURE_AUTOIBRS)) 1130 1022 WARN_ON_ONCE(msr_set_bit(MSR_EFER, _EFER_AUTOIBRS)); 1023 + 1024 + zenbleed_check(c); 1131 1025 } 1132 1026 1133 1027 #ifdef CONFIG_X86_32 ··· 1225 1115 1226 1116 cpu_dev_register(amd_cpu_dev); 1227 1117 1228 - /* 1229 - * AMD errata checking 1230 - * 1231 - * Errata are defined as arrays of ints using the AMD_LEGACY_ERRATUM() or 1232 - * AMD_OSVW_ERRATUM() macros. The latter is intended for newer errata that 1233 - * have an OSVW id assigned, which it takes as first argument. Both take a 1234 - * variable number of family-specific model-stepping ranges created by 1235 - * AMD_MODEL_RANGE(). 1236 - * 1237 - * Example: 1238 - * 1239 - * const int amd_erratum_319[] = 1240 - * AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0x4, 0x2), 1241 - * AMD_MODEL_RANGE(0x10, 0x8, 0x0, 0x8, 0x0), 1242 - * AMD_MODEL_RANGE(0x10, 0x9, 0x0, 0x9, 0x0)); 1243 - */ 1244 - 1245 - #define AMD_LEGACY_ERRATUM(...) { -1, __VA_ARGS__, 0 } 1246 - #define AMD_OSVW_ERRATUM(osvw_id, ...) { osvw_id, __VA_ARGS__, 0 } 1247 - #define AMD_MODEL_RANGE(f, m_start, s_start, m_end, s_end) \ 1248 - ((f << 24) | (m_start << 16) | (s_start << 12) | (m_end << 4) | (s_end)) 1249 - #define AMD_MODEL_RANGE_FAMILY(range) (((range) >> 24) & 0xff) 1250 - #define AMD_MODEL_RANGE_START(range) (((range) >> 12) & 0xfff) 1251 - #define AMD_MODEL_RANGE_END(range) ((range) & 0xfff) 1252 - 1253 - static const int amd_erratum_400[] = 1254 - AMD_OSVW_ERRATUM(1, AMD_MODEL_RANGE(0xf, 0x41, 0x2, 0xff, 0xf), 1255 - AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0xff, 0xf)); 1256 - 1257 - static const int amd_erratum_383[] = 1258 - AMD_OSVW_ERRATUM(3, AMD_MODEL_RANGE(0x10, 0, 0, 0xff, 0xf)); 1259 - 1260 - /* #1054: Instructions Retired Performance Counter May Be Inaccurate */ 1261 - static const int amd_erratum_1054[] = 1262 - AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x17, 0, 0, 0x2f, 0xf)); 1263 - 1264 - static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum) 1265 - { 1266 - int osvw_id = *erratum++; 1267 - u32 range; 1268 - u32 ms; 1269 - 1270 - if (osvw_id >= 0 && osvw_id < 65536 && 1271 - cpu_has(cpu, X86_FEATURE_OSVW)) { 1272 - u64 osvw_len; 1273 - 1274 - rdmsrl(MSR_AMD64_OSVW_ID_LENGTH, osvw_len); 1275 - if (osvw_id < osvw_len) { 1276 - u64 osvw_bits; 1277 - 1278 - rdmsrl(MSR_AMD64_OSVW_STATUS + (osvw_id >> 6), 1279 - osvw_bits); 1280 - return osvw_bits & (1ULL << (osvw_id & 0x3f)); 1281 - } 1282 - } 1283 - 1284 - /* OSVW unavailable or ID unknown, match family-model-stepping range */ 1285 - ms = (cpu->x86_model << 4) | cpu->x86_stepping; 1286 - while ((range = *erratum++)) 1287 - if ((cpu->x86 == AMD_MODEL_RANGE_FAMILY(range)) && 1288 - (ms >= AMD_MODEL_RANGE_START(range)) && 1289 - (ms <= AMD_MODEL_RANGE_END(range))) 1290 - return true; 1291 - 1292 - return false; 1293 - } 1294 - 1295 1118 static DEFINE_PER_CPU_READ_MOSTLY(unsigned long[4], amd_dr_addr_mask); 1296 1119 1297 1120 static unsigned int amd_msr_dr_addr_masks[] = { ··· 1278 1235 return 255; 1279 1236 } 1280 1237 EXPORT_SYMBOL_GPL(amd_get_highest_perf); 1238 + 1239 + static void zenbleed_check_cpu(void *unused) 1240 + { 1241 + struct cpuinfo_x86 *c = &cpu_data(smp_processor_id()); 1242 + 1243 + zenbleed_check(c); 1244 + } 1245 + 1246 + void amd_check_microcode(void) 1247 + { 1248 + on_each_cpu(zenbleed_check_cpu, NULL, 1); 1249 + }
+9 -6
arch/x86/kernel/cpu/bugs.c
··· 1150 1150 } 1151 1151 1152 1152 /* 1153 - * If no STIBP, enhanced IBRS is enabled, or SMT impossible, STIBP 1153 + * If no STIBP, Intel enhanced IBRS is enabled, or SMT impossible, STIBP 1154 1154 * is not required. 1155 1155 * 1156 - * Enhanced IBRS also protects against cross-thread branch target 1156 + * Intel's Enhanced IBRS also protects against cross-thread branch target 1157 1157 * injection in user-mode as the IBRS bit remains always set which 1158 1158 * implicitly enables cross-thread protections. However, in legacy IBRS 1159 1159 * mode, the IBRS bit is set only on kernel entry and cleared on return 1160 - * to userspace. This disables the implicit cross-thread protection, 1161 - * so allow for STIBP to be selected in that case. 1160 + * to userspace. AMD Automatic IBRS also does not protect userspace. 1161 + * These modes therefore disable the implicit cross-thread protection, 1162 + * so allow for STIBP to be selected in those cases. 1162 1163 */ 1163 1164 if (!boot_cpu_has(X86_FEATURE_STIBP) || 1164 1165 !smt_possible || 1165 - spectre_v2_in_eibrs_mode(spectre_v2_enabled)) 1166 + (spectre_v2_in_eibrs_mode(spectre_v2_enabled) && 1167 + !boot_cpu_has(X86_FEATURE_AUTOIBRS))) 1166 1168 return; 1167 1169 1168 1170 /* ··· 2296 2294 2297 2295 static char *stibp_state(void) 2298 2296 { 2299 - if (spectre_v2_in_eibrs_mode(spectre_v2_enabled)) 2297 + if (spectre_v2_in_eibrs_mode(spectre_v2_enabled) && 2298 + !boot_cpu_has(X86_FEATURE_AUTOIBRS)) 2300 2299 return ""; 2301 2300 2302 2301 switch (spectre_v2_user_stibp) {
+2
arch/x86/kernel/cpu/common.c
··· 2287 2287 2288 2288 perf_check_microcode(); 2289 2289 2290 + amd_check_microcode(); 2291 + 2290 2292 store_cpu_caps(&curr_info); 2291 2293 2292 2294 if (!memcmp(&prev_info->x86_capability, &curr_info.x86_capability,
+2 -2
arch/x86/kernel/cpu/mce/amd.c
··· 1261 1261 struct threshold_block *pos = NULL; 1262 1262 struct threshold_block *tmp = NULL; 1263 1263 1264 - kobject_del(b->kobj); 1264 + kobject_put(b->kobj); 1265 1265 1266 1266 list_for_each_entry_safe(pos, tmp, &b->blocks->miscj, miscj) 1267 - kobject_del(&pos->kobj); 1267 + kobject_put(b->kobj); 1268 1268 } 1269 1269 1270 1270 static void threshold_remove_bank(struct threshold_bank *bank)
+11 -7
arch/x86/kernel/traps.c
··· 697 697 } 698 698 699 699 static bool gp_try_fixup_and_notify(struct pt_regs *regs, int trapnr, 700 - unsigned long error_code, const char *str) 700 + unsigned long error_code, const char *str, 701 + unsigned long address) 701 702 { 702 - if (fixup_exception(regs, trapnr, error_code, 0)) 703 + if (fixup_exception(regs, trapnr, error_code, address)) 703 704 return true; 704 705 705 706 current->thread.error_code = error_code; ··· 760 759 goto exit; 761 760 } 762 761 763 - if (gp_try_fixup_and_notify(regs, X86_TRAP_GP, error_code, desc)) 762 + if (gp_try_fixup_and_notify(regs, X86_TRAP_GP, error_code, desc, 0)) 764 763 goto exit; 765 764 766 765 if (error_code) ··· 1358 1357 1359 1358 #define VE_FAULT_STR "VE fault" 1360 1359 1361 - static void ve_raise_fault(struct pt_regs *regs, long error_code) 1360 + static void ve_raise_fault(struct pt_regs *regs, long error_code, 1361 + unsigned long address) 1362 1362 { 1363 1363 if (user_mode(regs)) { 1364 1364 gp_user_force_sig_segv(regs, X86_TRAP_VE, error_code, VE_FAULT_STR); 1365 1365 return; 1366 1366 } 1367 1367 1368 - if (gp_try_fixup_and_notify(regs, X86_TRAP_VE, error_code, VE_FAULT_STR)) 1368 + if (gp_try_fixup_and_notify(regs, X86_TRAP_VE, error_code, 1369 + VE_FAULT_STR, address)) { 1369 1370 return; 1371 + } 1370 1372 1371 - die_addr(VE_FAULT_STR, regs, error_code, 0); 1373 + die_addr(VE_FAULT_STR, regs, error_code, address); 1372 1374 } 1373 1375 1374 1376 /* ··· 1435 1431 * it successfully, treat it as #GP(0) and handle it. 1436 1432 */ 1437 1433 if (!tdx_handle_virt_exception(regs, &ve)) 1438 - ve_raise_fault(regs, 0); 1434 + ve_raise_fault(regs, 0, ve.gla); 1439 1435 1440 1436 cond_local_irq_disable(regs); 1441 1437 }
+17 -8
arch/x86/kvm/lapic.c
··· 637 637 *max_irr = -1; 638 638 639 639 for (i = vec = 0; i <= 7; i++, vec += 32) { 640 + u32 *p_irr = (u32 *)(regs + APIC_IRR + i * 0x10); 641 + 642 + irr_val = *p_irr; 640 643 pir_val = READ_ONCE(pir[i]); 641 - irr_val = *((u32 *)(regs + APIC_IRR + i * 0x10)); 644 + 642 645 if (pir_val) { 646 + pir_val = xchg(&pir[i], 0); 647 + 643 648 prev_irr_val = irr_val; 644 - irr_val |= xchg(&pir[i], 0); 645 - *((u32 *)(regs + APIC_IRR + i * 0x10)) = irr_val; 646 - if (prev_irr_val != irr_val) { 647 - max_updated_irr = 648 - __fls(irr_val ^ prev_irr_val) + vec; 649 - } 649 + do { 650 + irr_val = prev_irr_val | pir_val; 651 + } while (prev_irr_val != irr_val && 652 + !try_cmpxchg(p_irr, &prev_irr_val, irr_val)); 653 + 654 + if (prev_irr_val != irr_val) 655 + max_updated_irr = __fls(irr_val ^ prev_irr_val) + vec; 650 656 } 651 657 if (irr_val) 652 658 *max_irr = __fls(irr_val) + vec; ··· 666 660 bool kvm_apic_update_irr(struct kvm_vcpu *vcpu, u32 *pir, int *max_irr) 667 661 { 668 662 struct kvm_lapic *apic = vcpu->arch.apic; 663 + bool irr_updated = __kvm_apic_update_irr(pir, apic->regs, max_irr); 669 664 670 - return __kvm_apic_update_irr(pir, apic->regs, max_irr); 665 + if (unlikely(!apic->apicv_active && irr_updated)) 666 + apic->irr_pending = true; 667 + return irr_updated; 671 668 } 672 669 EXPORT_SYMBOL_GPL(kvm_apic_update_irr); 673 670
+8 -8
arch/x86/kvm/svm/svm.c
··· 1786 1786 } 1787 1787 } 1788 1788 1789 + static bool svm_is_valid_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) 1790 + { 1791 + return true; 1792 + } 1793 + 1789 1794 void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) 1790 1795 { 1791 1796 struct vcpu_svm *svm = to_svm(vcpu); ··· 3991 3986 3992 3987 static fastpath_t svm_exit_handlers_fastpath(struct kvm_vcpu *vcpu) 3993 3988 { 3994 - struct vmcb_control_area *control = &to_svm(vcpu)->vmcb->control; 3995 - 3996 - /* 3997 - * Note, the next RIP must be provided as SRCU isn't held, i.e. KVM 3998 - * can't read guest memory (dereference memslots) to decode the WRMSR. 3999 - */ 4000 - if (control->exit_code == SVM_EXIT_MSR && control->exit_info_1 && 4001 - nrips && control->next_rip) 3989 + if (to_svm(vcpu)->vmcb->control.exit_code == SVM_EXIT_MSR && 3990 + to_svm(vcpu)->vmcb->control.exit_info_1) 4002 3991 return handle_fastpath_set_msr_irqoff(vcpu); 4003 3992 4004 3993 return EXIT_FASTPATH_NONE; ··· 4814 4815 .set_segment = svm_set_segment, 4815 4816 .get_cpl = svm_get_cpl, 4816 4817 .get_cs_db_l_bits = svm_get_cs_db_l_bits, 4818 + .is_valid_cr0 = svm_is_valid_cr0, 4817 4819 .set_cr0 = svm_set_cr0, 4818 4820 .post_set_cr3 = sev_post_set_cr3, 4819 4821 .is_valid_cr4 = svm_is_valid_cr4,
+4 -4
arch/x86/kvm/vmx/vmenter.S
··· 303 303 VMX_DO_EVENT_IRQOFF call asm_exc_nmi_kvm_vmx 304 304 SYM_FUNC_END(vmx_do_nmi_irqoff) 305 305 306 - 307 - .section .text, "ax" 308 - 309 306 #ifndef CONFIG_CC_HAS_ASM_GOTO_OUTPUT 307 + 310 308 /** 311 309 * vmread_error_trampoline - Trampoline from inline asm to vmread_error() 312 310 * @field: VMCS field encoding that failed ··· 333 335 mov 3*WORD_SIZE(%_ASM_BP), %_ASM_ARG2 334 336 mov 2*WORD_SIZE(%_ASM_BP), %_ASM_ARG1 335 337 336 - call vmread_error 338 + call vmread_error_trampoline2 337 339 338 340 /* Zero out @fault, which will be popped into the result register. */ 339 341 _ASM_MOV $0, 3*WORD_SIZE(%_ASM_BP) ··· 354 356 RET 355 357 SYM_FUNC_END(vmread_error_trampoline) 356 358 #endif 359 + 360 + .section .text, "ax" 357 361 358 362 SYM_FUNC_START(vmx_do_interrupt_irqoff) 359 363 VMX_DO_EVENT_IRQOFF CALL_NOSPEC _ASM_ARG1
+47 -17
arch/x86/kvm/vmx/vmx.c
··· 441 441 pr_warn_ratelimited(fmt); \ 442 442 } while (0) 443 443 444 - void vmread_error(unsigned long field, bool fault) 444 + noinline void vmread_error(unsigned long field) 445 445 { 446 - if (fault) 447 - kvm_spurious_fault(); 448 - else 449 - vmx_insn_failed("vmread failed: field=%lx\n", field); 446 + vmx_insn_failed("vmread failed: field=%lx\n", field); 450 447 } 448 + 449 + #ifndef CONFIG_CC_HAS_ASM_GOTO_OUTPUT 450 + noinstr void vmread_error_trampoline2(unsigned long field, bool fault) 451 + { 452 + if (fault) { 453 + kvm_spurious_fault(); 454 + } else { 455 + instrumentation_begin(); 456 + vmread_error(field); 457 + instrumentation_end(); 458 + } 459 + } 460 + #endif 451 461 452 462 noinline void vmwrite_error(unsigned long field, unsigned long value) 453 463 { ··· 1513 1503 struct vcpu_vmx *vmx = to_vmx(vcpu); 1514 1504 unsigned long old_rflags; 1515 1505 1506 + /* 1507 + * Unlike CR0 and CR4, RFLAGS handling requires checking if the vCPU 1508 + * is an unrestricted guest in order to mark L2 as needing emulation 1509 + * if L1 runs L2 as a restricted guest. 1510 + */ 1516 1511 if (is_unrestricted_guest(vcpu)) { 1517 1512 kvm_register_mark_available(vcpu, VCPU_EXREG_RFLAGS); 1518 1513 vmx->rflags = rflags; ··· 3052 3037 struct vcpu_vmx *vmx = to_vmx(vcpu); 3053 3038 struct kvm_vmx *kvm_vmx = to_kvm_vmx(vcpu->kvm); 3054 3039 3040 + /* 3041 + * KVM should never use VM86 to virtualize Real Mode when L2 is active, 3042 + * as using VM86 is unnecessary if unrestricted guest is enabled, and 3043 + * if unrestricted guest is disabled, VM-Enter (from L1) with CR0.PG=0 3044 + * should VM-Fail and KVM should reject userspace attempts to stuff 3045 + * CR0.PG=0 when L2 is active. 3046 + */ 3047 + WARN_ON_ONCE(is_guest_mode(vcpu)); 3048 + 3055 3049 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_TR], VCPU_SREG_TR); 3056 3050 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_ES], VCPU_SREG_ES); 3057 3051 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_DS], VCPU_SREG_DS); ··· 3250 3226 #define CR3_EXITING_BITS (CPU_BASED_CR3_LOAD_EXITING | \ 3251 3227 CPU_BASED_CR3_STORE_EXITING) 3252 3228 3229 + static bool vmx_is_valid_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) 3230 + { 3231 + if (is_guest_mode(vcpu)) 3232 + return nested_guest_cr0_valid(vcpu, cr0); 3233 + 3234 + if (to_vmx(vcpu)->nested.vmxon) 3235 + return nested_host_cr0_valid(vcpu, cr0); 3236 + 3237 + return true; 3238 + } 3239 + 3253 3240 void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) 3254 3241 { 3255 3242 struct vcpu_vmx *vmx = to_vmx(vcpu); ··· 3270 3235 old_cr0_pg = kvm_read_cr0_bits(vcpu, X86_CR0_PG); 3271 3236 3272 3237 hw_cr0 = (cr0 & ~KVM_VM_CR0_ALWAYS_OFF); 3273 - if (is_unrestricted_guest(vcpu)) 3238 + if (enable_unrestricted_guest) 3274 3239 hw_cr0 |= KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST; 3275 3240 else { 3276 3241 hw_cr0 |= KVM_VM_CR0_ALWAYS_ON; ··· 3298 3263 } 3299 3264 #endif 3300 3265 3301 - if (enable_ept && !is_unrestricted_guest(vcpu)) { 3266 + if (enable_ept && !enable_unrestricted_guest) { 3302 3267 /* 3303 3268 * Ensure KVM has an up-to-date snapshot of the guest's CR3. If 3304 3269 * the below code _enables_ CR3 exiting, vmx_cache_reg() will ··· 3429 3394 * this bit, even if host CR4.MCE == 0. 3430 3395 */ 3431 3396 hw_cr4 = (cr4_read_shadow() & X86_CR4_MCE) | (cr4 & ~X86_CR4_MCE); 3432 - if (is_unrestricted_guest(vcpu)) 3397 + if (enable_unrestricted_guest) 3433 3398 hw_cr4 |= KVM_VM_CR4_ALWAYS_ON_UNRESTRICTED_GUEST; 3434 3399 else if (vmx->rmode.vm86_active) 3435 3400 hw_cr4 |= KVM_RMODE_VM_CR4_ALWAYS_ON; ··· 3449 3414 vcpu->arch.cr4 = cr4; 3450 3415 kvm_register_mark_available(vcpu, VCPU_EXREG_CR4); 3451 3416 3452 - if (!is_unrestricted_guest(vcpu)) { 3417 + if (!enable_unrestricted_guest) { 3453 3418 if (enable_ept) { 3454 3419 if (!is_paging(vcpu)) { 3455 3420 hw_cr4 &= ~X86_CR4_PAE; ··· 4686 4651 if (kvm_vmx->pid_table) 4687 4652 return 0; 4688 4653 4689 - pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, vmx_get_pid_table_order(kvm)); 4654 + pages = alloc_pages(GFP_KERNEL_ACCOUNT | __GFP_ZERO, 4655 + vmx_get_pid_table_order(kvm)); 4690 4656 if (!pages) 4691 4657 return -ENOMEM; 4692 4658 ··· 5400 5364 val = (val & ~vmcs12->cr0_guest_host_mask) | 5401 5365 (vmcs12->guest_cr0 & vmcs12->cr0_guest_host_mask); 5402 5366 5403 - if (!nested_guest_cr0_valid(vcpu, val)) 5404 - return 1; 5405 - 5406 5367 if (kvm_set_cr0(vcpu, val)) 5407 5368 return 1; 5408 5369 vmcs_writel(CR0_READ_SHADOW, orig_val); 5409 5370 return 0; 5410 5371 } else { 5411 - if (to_vmx(vcpu)->nested.vmxon && 5412 - !nested_host_cr0_valid(vcpu, val)) 5413 - return 1; 5414 - 5415 5372 return kvm_set_cr0(vcpu, val); 5416 5373 } 5417 5374 } ··· 8232 8203 .set_segment = vmx_set_segment, 8233 8204 .get_cpl = vmx_get_cpl, 8234 8205 .get_cs_db_l_bits = vmx_get_cs_db_l_bits, 8206 + .is_valid_cr0 = vmx_is_valid_cr0, 8235 8207 .set_cr0 = vmx_set_cr0, 8236 8208 .is_valid_cr4 = vmx_is_valid_cr4, 8237 8209 .set_cr4 = vmx_set_cr4,
+9 -3
arch/x86/kvm/vmx/vmx_ops.h
··· 10 10 #include "vmcs.h" 11 11 #include "../x86.h" 12 12 13 - void vmread_error(unsigned long field, bool fault); 13 + void vmread_error(unsigned long field); 14 14 void vmwrite_error(unsigned long field, unsigned long value); 15 15 void vmclear_error(struct vmcs *vmcs, u64 phys_addr); 16 16 void vmptrld_error(struct vmcs *vmcs, u64 phys_addr); ··· 31 31 * void vmread_error_trampoline(unsigned long field, bool fault); 32 32 */ 33 33 extern unsigned long vmread_error_trampoline; 34 + 35 + /* 36 + * The second VMREAD error trampoline, called from the assembly trampoline, 37 + * exists primarily to enable instrumentation for the VM-Fail path. 38 + */ 39 + void vmread_error_trampoline2(unsigned long field, bool fault); 40 + 34 41 #endif 35 42 36 43 static __always_inline void vmcs_check16(unsigned long field) ··· 108 101 109 102 do_fail: 110 103 instrumentation_begin(); 111 - WARN_ONCE(1, KBUILD_MODNAME ": vmread failed: field=%lx\n", field); 112 - pr_warn_ratelimited(KBUILD_MODNAME ": vmread failed: field=%lx\n", field); 104 + vmread_error(field); 113 105 instrumentation_end(); 114 106 return 0; 115 107
+34 -16
arch/x86/kvm/x86.c
··· 906 906 } 907 907 EXPORT_SYMBOL_GPL(load_pdptrs); 908 908 909 + static bool kvm_is_valid_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) 910 + { 911 + #ifdef CONFIG_X86_64 912 + if (cr0 & 0xffffffff00000000UL) 913 + return false; 914 + #endif 915 + 916 + if ((cr0 & X86_CR0_NW) && !(cr0 & X86_CR0_CD)) 917 + return false; 918 + 919 + if ((cr0 & X86_CR0_PG) && !(cr0 & X86_CR0_PE)) 920 + return false; 921 + 922 + return static_call(kvm_x86_is_valid_cr0)(vcpu, cr0); 923 + } 924 + 909 925 void kvm_post_set_cr0(struct kvm_vcpu *vcpu, unsigned long old_cr0, unsigned long cr0) 910 926 { 911 927 /* ··· 968 952 { 969 953 unsigned long old_cr0 = kvm_read_cr0(vcpu); 970 954 955 + if (!kvm_is_valid_cr0(vcpu, cr0)) 956 + return 1; 957 + 971 958 cr0 |= X86_CR0_ET; 972 959 973 - #ifdef CONFIG_X86_64 974 - if (cr0 & 0xffffffff00000000UL) 975 - return 1; 976 - #endif 977 - 960 + /* Write to CR0 reserved bits are ignored, even on Intel. */ 978 961 cr0 &= ~CR0_RESERVED_BITS; 979 - 980 - if ((cr0 & X86_CR0_NW) && !(cr0 & X86_CR0_CD)) 981 - return 1; 982 - 983 - if ((cr0 & X86_CR0_PG) && !(cr0 & X86_CR0_PE)) 984 - return 1; 985 962 986 963 #ifdef CONFIG_X86_64 987 964 if ((vcpu->arch.efer & EFER_LME) && !is_paging(vcpu) && ··· 2181 2172 u64 data; 2182 2173 fastpath_t ret = EXIT_FASTPATH_NONE; 2183 2174 2175 + kvm_vcpu_srcu_read_lock(vcpu); 2176 + 2184 2177 switch (msr) { 2185 2178 case APIC_BASE_MSR + (APIC_ICR >> 4): 2186 2179 data = kvm_read_edx_eax(vcpu); ··· 2204 2193 2205 2194 if (ret != EXIT_FASTPATH_NONE) 2206 2195 trace_kvm_msr_write(msr, data); 2196 + 2197 + kvm_vcpu_srcu_read_unlock(vcpu); 2207 2198 2208 2199 return ret; 2209 2200 } ··· 10216 10203 if (r < 0) 10217 10204 goto out; 10218 10205 if (r) { 10219 - kvm_queue_interrupt(vcpu, kvm_cpu_get_interrupt(vcpu), false); 10220 - static_call(kvm_x86_inject_irq)(vcpu, false); 10221 - WARN_ON(static_call(kvm_x86_interrupt_allowed)(vcpu, true) < 0); 10206 + int irq = kvm_cpu_get_interrupt(vcpu); 10207 + 10208 + if (!WARN_ON_ONCE(irq == -1)) { 10209 + kvm_queue_interrupt(vcpu, irq, false); 10210 + static_call(kvm_x86_inject_irq)(vcpu, false); 10211 + WARN_ON(static_call(kvm_x86_interrupt_allowed)(vcpu, true) < 0); 10212 + } 10222 10213 } 10223 10214 if (kvm_cpu_has_injectable_intr(vcpu)) 10224 10215 static_call(kvm_x86_enable_irq_window)(vcpu); ··· 11477 11460 return false; 11478 11461 } 11479 11462 11480 - return kvm_is_valid_cr4(vcpu, sregs->cr4); 11463 + return kvm_is_valid_cr4(vcpu, sregs->cr4) && 11464 + kvm_is_valid_cr0(vcpu, sregs->cr0); 11481 11465 } 11482 11466 11483 11467 static int __set_sregs_common(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs, ··· 13203 13185 13204 13186 bool kvm_arch_has_irq_bypass(void) 13205 13187 { 13206 - return true; 13188 + return enable_apicv && irq_remapping_cap(IRQ_POSTING_CAP); 13207 13189 } 13208 13190 13209 13191 int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *cons,
-3
drivers/acpi/arm64/iort.c
··· 1007 1007 for (i = 0; i < node->mapping_count; i++, map++) { 1008 1008 struct acpi_iort_node *parent; 1009 1009 1010 - if (!map->id_count) 1011 - continue; 1012 - 1013 1010 parent = ACPI_ADD_PTR(struct acpi_iort_node, iort_table, 1014 1011 map->output_reference); 1015 1012 if (parent != iommu)
+2 -2
drivers/ata/libata-core.c
··· 4938 4938 if (qc->result_tf.status & ATA_SENSE && 4939 4939 ((ata_is_ncq(qc->tf.protocol) && 4940 4940 dev->flags & ATA_DFLAG_CDL_ENABLED) || 4941 - (!(ata_is_ncq(qc->tf.protocol) && 4942 - ata_id_sense_reporting_enabled(dev->id))))) { 4941 + (!ata_is_ncq(qc->tf.protocol) && 4942 + ata_id_sense_reporting_enabled(dev->id)))) { 4943 4943 /* 4944 4944 * Tell SCSI EH to not overwrite scmd->result even if 4945 4945 * this command is finished with result SAM_STAT_GOOD.
+2 -1
drivers/ata/pata_arasan_cf.c
··· 529 529 /* dma_request_channel may sleep, so calling from process context */ 530 530 acdev->dma_chan = dma_request_chan(acdev->host->dev, "data"); 531 531 if (IS_ERR(acdev->dma_chan)) { 532 - dev_err(acdev->host->dev, "Unable to get dma_chan\n"); 532 + dev_err_probe(acdev->host->dev, PTR_ERR(acdev->dma_chan), 533 + "Unable to get dma_chan\n"); 533 534 acdev->dma_chan = NULL; 534 535 goto chan_request_fail; 535 536 }
+1 -1
drivers/ata/pata_ns87415.c
··· 260 260 * LOCKING: 261 261 * Inherited from caller. 262 262 */ 263 - void ns87560_tf_read(struct ata_port *ap, struct ata_taskfile *tf) 263 + static void ns87560_tf_read(struct ata_port *ap, struct ata_taskfile *tf) 264 264 { 265 265 struct ata_ioports *ioaddr = &ap->ioaddr; 266 266
+3 -3
drivers/ata/pata_octeon_cf.c
··· 815 815 irq_handler_t irq_handler = NULL; 816 816 void __iomem *base; 817 817 struct octeon_cf_port *cf_port; 818 - int rv = -ENOMEM; 819 818 u32 bus_width; 819 + int rv; 820 820 821 821 node = pdev->dev.of_node; 822 822 if (node == NULL) ··· 893 893 cs0 = devm_ioremap(&pdev->dev, res_cs0->start, 894 894 resource_size(res_cs0)); 895 895 if (!cs0) 896 - return rv; 896 + return -ENOMEM; 897 897 898 898 /* allocate host */ 899 899 host = ata_host_alloc(&pdev->dev, 1); 900 900 if (!host) 901 - return rv; 901 + return -ENOMEM; 902 902 903 903 ap = host->ports[0]; 904 904 ap->private_data = cf_port;
+1
drivers/base/power/power.h
··· 29 29 #define WAKE_IRQ_DEDICATED_MASK (WAKE_IRQ_DEDICATED_ALLOCATED | \ 30 30 WAKE_IRQ_DEDICATED_MANAGED | \ 31 31 WAKE_IRQ_DEDICATED_REVERSE) 32 + #define WAKE_IRQ_DEDICATED_ENABLED BIT(3) 32 33 33 34 struct wake_irq { 34 35 struct device *dev;
+8 -53
drivers/base/power/wakeirq.c
··· 194 194 return err; 195 195 } 196 196 197 - 198 197 /** 199 198 * dev_pm_set_dedicated_wake_irq - Request a dedicated wake-up interrupt 200 199 * @dev: Device entry ··· 205 206 * Sets up a threaded interrupt handler for a device that has 206 207 * a dedicated wake-up interrupt in addition to the device IO 207 208 * interrupt. 208 - * 209 - * The interrupt starts disabled, and needs to be managed for 210 - * the device by the bus code or the device driver using 211 - * dev_pm_enable_wake_irq*() and dev_pm_disable_wake_irq*() 212 - * functions. 213 209 */ 214 210 int dev_pm_set_dedicated_wake_irq(struct device *dev, int irq) 215 211 { ··· 226 232 * the status of WAKE_IRQ_DEDICATED_REVERSE to tell rpm_suspend() 227 233 * to enable dedicated wake-up interrupt after running the runtime suspend 228 234 * callback for @dev. 229 - * 230 - * The interrupt starts disabled, and needs to be managed for 231 - * the device by the bus code or the device driver using 232 - * dev_pm_enable_wake_irq*() and dev_pm_disable_wake_irq*() 233 - * functions. 234 235 */ 235 236 int dev_pm_set_dedicated_wake_irq_reverse(struct device *dev, int irq) 236 237 { 237 238 return __dev_pm_set_dedicated_wake_irq(dev, irq, WAKE_IRQ_DEDICATED_REVERSE); 238 239 } 239 240 EXPORT_SYMBOL_GPL(dev_pm_set_dedicated_wake_irq_reverse); 240 - 241 - /** 242 - * dev_pm_enable_wake_irq - Enable device wake-up interrupt 243 - * @dev: Device 244 - * 245 - * Optionally called from the bus code or the device driver for 246 - * runtime_resume() to override the PM runtime core managed wake-up 247 - * interrupt handling to enable the wake-up interrupt. 248 - * 249 - * Note that for runtime_suspend()) the wake-up interrupts 250 - * should be unconditionally enabled unlike for suspend() 251 - * that is conditional. 252 - */ 253 - void dev_pm_enable_wake_irq(struct device *dev) 254 - { 255 - struct wake_irq *wirq = dev->power.wakeirq; 256 - 257 - if (wirq && (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED)) 258 - enable_irq(wirq->irq); 259 - } 260 - EXPORT_SYMBOL_GPL(dev_pm_enable_wake_irq); 261 - 262 - /** 263 - * dev_pm_disable_wake_irq - Disable device wake-up interrupt 264 - * @dev: Device 265 - * 266 - * Optionally called from the bus code or the device driver for 267 - * runtime_suspend() to override the PM runtime core managed wake-up 268 - * interrupt handling to disable the wake-up interrupt. 269 - */ 270 - void dev_pm_disable_wake_irq(struct device *dev) 271 - { 272 - struct wake_irq *wirq = dev->power.wakeirq; 273 - 274 - if (wirq && (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED)) 275 - disable_irq_nosync(wirq->irq); 276 - } 277 - EXPORT_SYMBOL_GPL(dev_pm_disable_wake_irq); 278 241 279 242 /** 280 243 * dev_pm_enable_wake_irq_check - Checks and enables wake-up interrupt ··· 265 314 return; 266 315 267 316 enable: 268 - if (!can_change_status || !(wirq->status & WAKE_IRQ_DEDICATED_REVERSE)) 317 + if (!can_change_status || !(wirq->status & WAKE_IRQ_DEDICATED_REVERSE)) { 269 318 enable_irq(wirq->irq); 319 + wirq->status |= WAKE_IRQ_DEDICATED_ENABLED; 320 + } 270 321 } 271 322 272 323 /** ··· 289 336 if (cond_disable && (wirq->status & WAKE_IRQ_DEDICATED_REVERSE)) 290 337 return; 291 338 292 - if (wirq->status & WAKE_IRQ_DEDICATED_MANAGED) 339 + if (wirq->status & WAKE_IRQ_DEDICATED_MANAGED) { 340 + wirq->status &= ~WAKE_IRQ_DEDICATED_ENABLED; 293 341 disable_irq_nosync(wirq->irq); 342 + } 294 343 } 295 344 296 345 /** ··· 331 376 332 377 if (device_may_wakeup(wirq->dev)) { 333 378 if (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED && 334 - !pm_runtime_status_suspended(wirq->dev)) 379 + !(wirq->status & WAKE_IRQ_DEDICATED_ENABLED)) 335 380 enable_irq(wirq->irq); 336 381 337 382 enable_irq_wake(wirq->irq); ··· 354 399 disable_irq_wake(wirq->irq); 355 400 356 401 if (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED && 357 - !pm_runtime_status_suspended(wirq->dev)) 402 + !(wirq->status & WAKE_IRQ_DEDICATED_ENABLED)) 358 403 disable_irq_nosync(wirq->irq); 359 404 } 360 405 }
+86 -38
drivers/block/rbd.c
··· 3849 3849 list_splice_tail_init(&rbd_dev->acquiring_list, &rbd_dev->running_list); 3850 3850 } 3851 3851 3852 - static int get_lock_owner_info(struct rbd_device *rbd_dev, 3853 - struct ceph_locker **lockers, u32 *num_lockers) 3852 + static bool locker_equal(const struct ceph_locker *lhs, 3853 + const struct ceph_locker *rhs) 3854 + { 3855 + return lhs->id.name.type == rhs->id.name.type && 3856 + lhs->id.name.num == rhs->id.name.num && 3857 + !strcmp(lhs->id.cookie, rhs->id.cookie) && 3858 + ceph_addr_equal_no_type(&lhs->info.addr, &rhs->info.addr); 3859 + } 3860 + 3861 + static void free_locker(struct ceph_locker *locker) 3862 + { 3863 + if (locker) 3864 + ceph_free_lockers(locker, 1); 3865 + } 3866 + 3867 + static struct ceph_locker *get_lock_owner_info(struct rbd_device *rbd_dev) 3854 3868 { 3855 3869 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc; 3870 + struct ceph_locker *lockers; 3871 + u32 num_lockers; 3856 3872 u8 lock_type; 3857 3873 char *lock_tag; 3874 + u64 handle; 3858 3875 int ret; 3859 - 3860 - dout("%s rbd_dev %p\n", __func__, rbd_dev); 3861 3876 3862 3877 ret = ceph_cls_lock_info(osdc, &rbd_dev->header_oid, 3863 3878 &rbd_dev->header_oloc, RBD_LOCK_NAME, 3864 - &lock_type, &lock_tag, lockers, num_lockers); 3865 - if (ret) 3866 - return ret; 3879 + &lock_type, &lock_tag, &lockers, &num_lockers); 3880 + if (ret) { 3881 + rbd_warn(rbd_dev, "failed to retrieve lockers: %d", ret); 3882 + return ERR_PTR(ret); 3883 + } 3867 3884 3868 - if (*num_lockers == 0) { 3885 + if (num_lockers == 0) { 3869 3886 dout("%s rbd_dev %p no lockers detected\n", __func__, rbd_dev); 3887 + lockers = NULL; 3870 3888 goto out; 3871 3889 } 3872 3890 3873 3891 if (strcmp(lock_tag, RBD_LOCK_TAG)) { 3874 3892 rbd_warn(rbd_dev, "locked by external mechanism, tag %s", 3875 3893 lock_tag); 3876 - ret = -EBUSY; 3877 - goto out; 3894 + goto err_busy; 3878 3895 } 3879 3896 3880 - if (lock_type == CEPH_CLS_LOCK_SHARED) { 3881 - rbd_warn(rbd_dev, "shared lock type detected"); 3882 - ret = -EBUSY; 3883 - goto out; 3897 + if (lock_type != CEPH_CLS_LOCK_EXCLUSIVE) { 3898 + rbd_warn(rbd_dev, "incompatible lock type detected"); 3899 + goto err_busy; 3884 3900 } 3885 3901 3886 - if (strncmp((*lockers)[0].id.cookie, RBD_LOCK_COOKIE_PREFIX, 3887 - strlen(RBD_LOCK_COOKIE_PREFIX))) { 3902 + WARN_ON(num_lockers != 1); 3903 + ret = sscanf(lockers[0].id.cookie, RBD_LOCK_COOKIE_PREFIX " %llu", 3904 + &handle); 3905 + if (ret != 1) { 3888 3906 rbd_warn(rbd_dev, "locked by external mechanism, cookie %s", 3889 - (*lockers)[0].id.cookie); 3890 - ret = -EBUSY; 3891 - goto out; 3907 + lockers[0].id.cookie); 3908 + goto err_busy; 3892 3909 } 3910 + if (ceph_addr_is_blank(&lockers[0].info.addr)) { 3911 + rbd_warn(rbd_dev, "locker has a blank address"); 3912 + goto err_busy; 3913 + } 3914 + 3915 + dout("%s rbd_dev %p got locker %s%llu@%pISpc/%u handle %llu\n", 3916 + __func__, rbd_dev, ENTITY_NAME(lockers[0].id.name), 3917 + &lockers[0].info.addr.in_addr, 3918 + le32_to_cpu(lockers[0].info.addr.nonce), handle); 3893 3919 3894 3920 out: 3895 3921 kfree(lock_tag); 3896 - return ret; 3922 + return lockers; 3923 + 3924 + err_busy: 3925 + kfree(lock_tag); 3926 + ceph_free_lockers(lockers, num_lockers); 3927 + return ERR_PTR(-EBUSY); 3897 3928 } 3898 3929 3899 3930 static int find_watcher(struct rbd_device *rbd_dev, ··· 3978 3947 static int rbd_try_lock(struct rbd_device *rbd_dev) 3979 3948 { 3980 3949 struct ceph_client *client = rbd_dev->rbd_client->client; 3981 - struct ceph_locker *lockers; 3982 - u32 num_lockers; 3950 + struct ceph_locker *locker, *refreshed_locker; 3983 3951 int ret; 3984 3952 3985 3953 for (;;) { 3954 + locker = refreshed_locker = NULL; 3955 + 3986 3956 ret = rbd_lock(rbd_dev); 3987 3957 if (ret != -EBUSY) 3988 - return ret; 3958 + goto out; 3989 3959 3990 3960 /* determine if the current lock holder is still alive */ 3991 - ret = get_lock_owner_info(rbd_dev, &lockers, &num_lockers); 3992 - if (ret) 3993 - return ret; 3994 - 3995 - if (num_lockers == 0) 3961 + locker = get_lock_owner_info(rbd_dev); 3962 + if (IS_ERR(locker)) { 3963 + ret = PTR_ERR(locker); 3964 + locker = NULL; 3965 + goto out; 3966 + } 3967 + if (!locker) 3996 3968 goto again; 3997 3969 3998 - ret = find_watcher(rbd_dev, lockers); 3970 + ret = find_watcher(rbd_dev, locker); 3999 3971 if (ret) 4000 3972 goto out; /* request lock or error */ 4001 3973 3974 + refreshed_locker = get_lock_owner_info(rbd_dev); 3975 + if (IS_ERR(refreshed_locker)) { 3976 + ret = PTR_ERR(refreshed_locker); 3977 + refreshed_locker = NULL; 3978 + goto out; 3979 + } 3980 + if (!refreshed_locker || 3981 + !locker_equal(locker, refreshed_locker)) 3982 + goto again; 3983 + 4002 3984 rbd_warn(rbd_dev, "breaking header lock owned by %s%llu", 4003 - ENTITY_NAME(lockers[0].id.name)); 3985 + ENTITY_NAME(locker->id.name)); 4004 3986 4005 3987 ret = ceph_monc_blocklist_add(&client->monc, 4006 - &lockers[0].info.addr); 3988 + &locker->info.addr); 4007 3989 if (ret) { 4008 - rbd_warn(rbd_dev, "blocklist of %s%llu failed: %d", 4009 - ENTITY_NAME(lockers[0].id.name), ret); 3990 + rbd_warn(rbd_dev, "failed to blocklist %s%llu: %d", 3991 + ENTITY_NAME(locker->id.name), ret); 4010 3992 goto out; 4011 3993 } 4012 3994 4013 3995 ret = ceph_cls_break_lock(&client->osdc, &rbd_dev->header_oid, 4014 3996 &rbd_dev->header_oloc, RBD_LOCK_NAME, 4015 - lockers[0].id.cookie, 4016 - &lockers[0].id.name); 4017 - if (ret && ret != -ENOENT) 3997 + locker->id.cookie, &locker->id.name); 3998 + if (ret && ret != -ENOENT) { 3999 + rbd_warn(rbd_dev, "failed to break header lock: %d", 4000 + ret); 4018 4001 goto out; 4002 + } 4019 4003 4020 4004 again: 4021 - ceph_free_lockers(lockers, num_lockers); 4005 + free_locker(refreshed_locker); 4006 + free_locker(locker); 4022 4007 } 4023 4008 4024 4009 out: 4025 - ceph_free_lockers(lockers, num_lockers); 4010 + free_locker(refreshed_locker); 4011 + free_locker(locker); 4026 4012 return ret; 4027 4013 } 4028 4014
+7 -4
drivers/block/ublk_drv.c
··· 1847 1847 if (ublksrv_pid <= 0) 1848 1848 return -EINVAL; 1849 1849 1850 - wait_for_completion_interruptible(&ub->completion); 1850 + if (wait_for_completion_interruptible(&ub->completion) != 0) 1851 + return -EINTR; 1851 1852 1852 1853 schedule_delayed_work(&ub->monitor_work, UBLK_DAEMON_MONITOR_PERIOD); 1853 1854 ··· 2126 2125 * - the device number is freed already, we will not find this 2127 2126 * device via ublk_get_device_from_id() 2128 2127 */ 2129 - wait_event_interruptible(ublk_idr_wq, ublk_idr_freed(idx)); 2130 - 2128 + if (wait_event_interruptible(ublk_idr_wq, ublk_idr_freed(idx))) 2129 + return -EINTR; 2131 2130 return 0; 2132 2131 } 2133 2132 ··· 2324 2323 pr_devel("%s: Waiting for new ubq_daemons(nr: %d) are ready, dev id %d...\n", 2325 2324 __func__, ub->dev_info.nr_hw_queues, header->dev_id); 2326 2325 /* wait until new ubq_daemon sending all FETCH_REQ */ 2327 - wait_for_completion_interruptible(&ub->completion); 2326 + if (wait_for_completion_interruptible(&ub->completion)) 2327 + return -EINTR; 2328 + 2328 2329 pr_devel("%s: All new ubq_daemons(nr: %d) are ready, dev id %d\n", 2329 2330 __func__, ub->dev_info.nr_hw_queues, header->dev_id); 2330 2331
+1 -1
drivers/char/tpm/st33zp24/i2c.c
··· 160 160 .of_match_table = of_match_ptr(of_st33zp24_i2c_match), 161 161 .acpi_match_table = ACPI_PTR(st33zp24_i2c_acpi_match), 162 162 }, 163 - .probe_new = st33zp24_i2c_probe, 163 + .probe = st33zp24_i2c_probe, 164 164 .remove = st33zp24_i2c_remove, 165 165 .id_table = st33zp24_i2c_id 166 166 };
+1 -1
drivers/char/tpm/tpm_i2c_atmel.c
··· 203 203 204 204 static struct i2c_driver i2c_atmel_driver = { 205 205 .id_table = i2c_atmel_id, 206 - .probe_new = i2c_atmel_probe, 206 + .probe = i2c_atmel_probe, 207 207 .remove = i2c_atmel_remove, 208 208 .driver = { 209 209 .name = I2C_DRIVER_NAME,
+1 -1
drivers/char/tpm/tpm_i2c_infineon.c
··· 716 716 717 717 static struct i2c_driver tpm_tis_i2c_driver = { 718 718 .id_table = tpm_tis_i2c_table, 719 - .probe_new = tpm_tis_i2c_probe, 719 + .probe = tpm_tis_i2c_probe, 720 720 .remove = tpm_tis_i2c_remove, 721 721 .driver = { 722 722 .name = "tpm_i2c_infineon",
+1 -1
drivers/char/tpm/tpm_i2c_nuvoton.c
··· 650 650 651 651 static struct i2c_driver i2c_nuvoton_driver = { 652 652 .id_table = i2c_nuvoton_id, 653 - .probe_new = i2c_nuvoton_probe, 653 + .probe = i2c_nuvoton_probe, 654 654 .remove = i2c_nuvoton_remove, 655 655 .driver = { 656 656 .name = "tpm_i2c_nuvoton",
+7 -2
drivers/char/tpm/tpm_tis_core.c
··· 366 366 goto out; 367 367 } 368 368 369 - size += recv_data(chip, &buf[TPM_HEADER_SIZE], 370 - expected - TPM_HEADER_SIZE); 369 + rc = recv_data(chip, &buf[TPM_HEADER_SIZE], 370 + expected - TPM_HEADER_SIZE); 371 + if (rc < 0) { 372 + size = rc; 373 + goto out; 374 + } 375 + size += rc; 371 376 if (size < expected) { 372 377 dev_err(&chip->dev, "Unable to read remainder of result\n"); 373 378 size = -ETIME;
+1 -1
drivers/char/tpm/tpm_tis_i2c.c
··· 394 394 .pm = &tpm_tis_pm, 395 395 .of_match_table = of_match_ptr(of_tis_i2c_match), 396 396 }, 397 - .probe_new = tpm_tis_i2c_probe, 397 + .probe = tpm_tis_i2c_probe, 398 398 .remove = tpm_tis_i2c_remove, 399 399 .id_table = tpm_tis_i2c_id, 400 400 };
+1 -1
drivers/char/tpm/tpm_tis_i2c_cr50.c
··· 779 779 static SIMPLE_DEV_PM_OPS(cr50_i2c_pm, tpm_pm_suspend, tpm_pm_resume); 780 780 781 781 static struct i2c_driver cr50_i2c_driver = { 782 - .probe_new = tpm_cr50_i2c_probe, 782 + .probe = tpm_cr50_i2c_probe, 783 783 .remove = tpm_cr50_i2c_remove, 784 784 .driver = { 785 785 .name = "cr50_i2c",
+2 -1
drivers/cxl/Kconfig
··· 2 2 menuconfig CXL_BUS 3 3 tristate "CXL (Compute Express Link) Devices Support" 4 4 depends on PCI 5 + select FW_LOADER 6 + select FW_UPLOAD 5 7 select PCI_DOE 6 8 help 7 9 CXL is a bus that is electrically compatible with PCI Express, but ··· 84 82 config CXL_MEM 85 83 tristate "CXL: Memory Expansion" 86 84 depends on CXL_PCI 87 - select FW_UPLOAD 88 85 default CXL_BUS 89 86 help 90 87 The CXL.mem protocol allows a device to act as a provider of "System
+2 -3
drivers/cxl/acpi.c
··· 296 296 else 297 297 rc = cxl_decoder_autoremove(dev, cxld); 298 298 if (rc) { 299 - dev_err(dev, "Failed to add decode range [%#llx - %#llx]\n", 300 - cxld->hpa_range.start, cxld->hpa_range.end); 301 - return 0; 299 + dev_err(dev, "Failed to add decode range: %pr", res); 300 + return rc; 302 301 } 303 302 dev_dbg(dev, "add: %s node: %d range [%#llx - %#llx]\n", 304 303 dev_name(&cxld->dev),
+1 -1
drivers/cxl/cxlmem.h
··· 323 323 324 324 /* FW state bits */ 325 325 #define CXL_FW_STATE_BITS 32 326 - #define CXL_FW_CANCEL BIT(0) 326 + #define CXL_FW_CANCEL 0 327 327 328 328 /** 329 329 * struct cxl_fw_state - Firmware upload / activation state
+3 -3
drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
··· 498 498 return 0; 499 499 500 500 failed2: 501 - amdgpu_bo_free_kernel(&psp->fw_pri_bo, 502 - &psp->fw_pri_mc_addr, &psp->fw_pri_buf); 503 - failed1: 504 501 amdgpu_bo_free_kernel(&psp->fence_buf_bo, 505 502 &psp->fence_buf_mc_addr, &psp->fence_buf); 503 + failed1: 504 + amdgpu_bo_free_kernel(&psp->fw_pri_bo, 505 + &psp->fw_pri_mc_addr, &psp->fw_pri_buf); 506 506 return ret; 507 507 } 508 508
+2 -1
drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
··· 46 46 #define RLCG_UCODE_LOADING_START_ADDRESS 0x00002000L 47 47 48 48 #define GOLDEN_GB_ADDR_CONFIG 0x2a114042 49 + #define CP_HQD_PERSISTENT_STATE_DEFAULT 0xbe05301 49 50 50 51 struct amdgpu_gfx_ras gfx_v9_4_3_ras; 51 52 ··· 1737 1736 1738 1737 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_IQ_TIMER, 0); 1739 1738 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_IB_CONTROL, 0); 1740 - WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PERSISTENT_STATE, 0); 1739 + WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PERSISTENT_STATE, CP_HQD_PERSISTENT_STATE_DEFAULT); 1741 1740 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_DOORBELL_CONTROL, 0x40000000); 1742 1741 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_DOORBELL_CONTROL, 0); 1743 1742 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_RPTR, 0);
+1 -4
drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c
··· 402 402 static int gfxhub_v1_2_xcc_gart_enable(struct amdgpu_device *adev, 403 403 uint32_t xcc_mask) 404 404 { 405 - uint32_t tmp_mask; 406 405 int i; 407 406 408 - tmp_mask = xcc_mask; 409 407 /* 410 408 * MC_VM_FB_LOCATION_BASE/TOP is NULL for VF, because they are 411 409 * VF copy registers so vbios post doesn't program them, for 412 410 * SRIOV driver need to program them 413 411 */ 414 412 if (amdgpu_sriov_vf(adev)) { 415 - for_each_inst(i, tmp_mask) { 416 - i = ffs(tmp_mask) - 1; 413 + for_each_inst(i, xcc_mask) { 417 414 WREG32_SOC15_RLC(GC, GET_INST(GC, i), regMC_VM_FB_LOCATION_BASE, 418 415 adev->gmc.vram_start >> 24); 419 416 WREG32_SOC15_RLC(GC, GET_INST(GC, i), regMC_VM_FB_LOCATION_TOP,
+2 -3
drivers/gpu/drm/amd/amdkfd/kfd_debug.c
··· 302 302 if (!q) 303 303 return 0; 304 304 305 - if (KFD_GC_VERSION(q->device) < IP_VERSION(11, 0, 0) || 306 - KFD_GC_VERSION(q->device) >= IP_VERSION(12, 0, 0)) 305 + if (!kfd_dbg_has_cwsr_workaround(q->device)) 307 306 return 0; 308 307 309 308 if (enable && q->properties.is_user_cu_masked) ··· 348 349 { 349 350 uint32_t spi_dbg_cntl = pdd->spi_dbg_override | pdd->spi_dbg_launch_mode; 350 351 uint32_t flags = pdd->process->dbg_flags; 351 - bool sq_trap_en = !!spi_dbg_cntl; 352 + bool sq_trap_en = !!spi_dbg_cntl || !kfd_dbg_has_cwsr_workaround(pdd->dev); 352 353 353 354 if (!kfd_dbg_is_per_vmid_supported(pdd->dev)) 354 355 return 0;
+6
drivers/gpu/drm/amd/amdkfd/kfd_debug.h
··· 100 100 KFD_GC_VERSION(dev) == IP_VERSION(10, 1, 1)); 101 101 } 102 102 103 + static inline bool kfd_dbg_has_cwsr_workaround(struct kfd_node *dev) 104 + { 105 + return KFD_GC_VERSION(dev) >= IP_VERSION(11, 0, 0) && 106 + KFD_GC_VERSION(dev) <= IP_VERSION(11, 0, 3); 107 + } 108 + 103 109 static inline bool kfd_dbg_has_gws_support(struct kfd_node *dev) 104 110 { 105 111 if ((KFD_GC_VERSION(dev) == IP_VERSION(9, 0, 1)
+2 -4
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
··· 226 226 queue_input.paging = false; 227 227 queue_input.tba_addr = qpd->tba_addr; 228 228 queue_input.tma_addr = qpd->tma_addr; 229 - queue_input.trap_en = KFD_GC_VERSION(q->device) < IP_VERSION(11, 0, 0) || 230 - KFD_GC_VERSION(q->device) > IP_VERSION(11, 0, 3); 229 + queue_input.trap_en = !kfd_dbg_has_cwsr_workaround(q->device); 231 230 queue_input.skip_process_ctx_clear = qpd->pqm->process->debug_trap_enabled; 232 231 233 232 queue_type = convert_to_mes_queue_type(q->properties.type); ··· 1805 1806 */ 1806 1807 q->properties.is_evicted = !!qpd->evicted; 1807 1808 q->properties.is_dbg_wa = qpd->pqm->process->debug_trap_enabled && 1808 - KFD_GC_VERSION(q->device) >= IP_VERSION(11, 0, 0) && 1809 - KFD_GC_VERSION(q->device) <= IP_VERSION(11, 0, 3); 1809 + kfd_dbg_has_cwsr_workaround(q->device); 1810 1810 1811 1811 if (qd) 1812 1812 mqd_mgr->restore_mqd(mqd_mgr, &q->mqd, q->mqd_mem_obj, &q->gart_mqd_addr,
+1 -1
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
··· 706 706 707 707 if (retry == 3) { 708 708 DRM_ERROR("Failed to ack MST event.\n"); 709 - return; 709 + break; 710 710 } 711 711 712 712 drm_dp_mst_hpd_irq_send_new_request(&aconnector->mst_mgr);
+3
drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
··· 1792 1792 hws->funcs.edp_backlight_control(edp_link_with_sink, false); 1793 1793 } 1794 1794 /*resume from S3, no vbios posting, no need to power down again*/ 1795 + clk_mgr_exit_optimized_pwr_state(dc, dc->clk_mgr); 1796 + 1795 1797 power_down_all_hw_blocks(dc); 1796 1798 disable_vga_and_power_gate_all_controllers(dc); 1797 1799 if (edp_link_with_sink && !keep_edp_vdd_on) 1798 1800 dc->hwss.edp_power_control(edp_link_with_sink, false); 1801 + clk_mgr_optimize_pwr_state(dc, dc->clk_mgr); 1799 1802 } 1800 1803 bios_set_scratch_acc_mode_change(dc->ctx->dc_bios, 1); 1801 1804 }
+2 -1
drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.c
··· 84 84 struct dcn_dccg *dccg_dcn, 85 85 enum phyd32clk_clock_source src) 86 86 { 87 - if (dccg_dcn->base.ctx->asic_id.hw_internal_rev == YELLOW_CARP_B0) { 87 + if (dccg_dcn->base.ctx->asic_id.chip_family == FAMILY_YELLOW_CARP && 88 + dccg_dcn->base.ctx->asic_id.hw_internal_rev == YELLOW_CARP_B0) { 88 89 if (src == PHYD32CLKC) 89 90 src = PHYD32CLKF; 90 91 if (src == PHYD32CLKD)
+4 -1
drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.c
··· 49 49 uint32_t dispclk_rdivider_value = 0; 50 50 51 51 REG_GET(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_RDIVIDER, &dispclk_rdivider_value); 52 - REG_UPDATE(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_WDIVIDER, dispclk_rdivider_value); 52 + 53 + /* Not valid for the WDIVIDER to be set to 0 */ 54 + if (dispclk_rdivider_value != 0) 55 + REG_UPDATE(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_WDIVIDER, dispclk_rdivider_value); 53 56 } 54 57 55 58 static void dccg32_get_pixel_rate_div(
+1 -1
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
··· 1734 1734 gpu_metrics->average_vclk1_frequency = metrics->AverageVclk1Frequency; 1735 1735 gpu_metrics->average_dclk1_frequency = metrics->AverageDclk1Frequency; 1736 1736 1737 - gpu_metrics->current_gfxclk = metrics->CurrClock[PPCLK_GFXCLK]; 1737 + gpu_metrics->current_gfxclk = gpu_metrics->average_gfxclk_frequency; 1738 1738 gpu_metrics->current_socclk = metrics->CurrClock[PPCLK_SOCCLK]; 1739 1739 gpu_metrics->current_uclk = metrics->CurrClock[PPCLK_UCLK]; 1740 1740 gpu_metrics->current_vclk0 = metrics->CurrClock[PPCLK_VCLK_0];
+3 -1
drivers/gpu/drm/i915/display/intel_dpt.c
··· 166 166 i915_vma_get(vma); 167 167 } 168 168 169 + dpt->obj->mm.dirty = true; 170 + 169 171 atomic_dec(&i915->gpu_error.pending_fb_pin); 170 172 intel_runtime_pm_put(&i915->runtime_pm, wakeref); 171 173 ··· 263 261 dpt_obj = i915_gem_object_create_stolen(i915, size); 264 262 if (IS_ERR(dpt_obj) && !HAS_LMEM(i915)) { 265 263 drm_dbg_kms(&i915->drm, "Allocating dpt from smem\n"); 266 - dpt_obj = i915_gem_object_create_internal(i915, size); 264 + dpt_obj = i915_gem_object_create_shmem(i915, size); 267 265 } 268 266 if (IS_ERR(dpt_obj)) 269 267 return ERR_CAST(dpt_obj);
+4 -2
drivers/gpu/drm/i915/gem/selftests/huge_pages.c
··· 1246 1246 * times in succession a possibility by enlarging the permutation array. 1247 1247 */ 1248 1248 order = i915_random_order(count * count, &prng); 1249 - if (!order) 1250 - return -ENOMEM; 1249 + if (!order) { 1250 + err = -ENOMEM; 1251 + goto out; 1252 + } 1251 1253 1252 1254 max_page_size = rounddown_pow_of_two(obj->mm.page_sizes.sg); 1253 1255 max = div_u64(max - size, max_page_size);
+1 -1
drivers/gpu/drm/msm/adreno/a5xx_gpu.c
··· 89 89 * since we've already mapped it once in 90 90 * submit_reloc() 91 91 */ 92 - if (WARN_ON(!ptr)) 92 + if (WARN_ON(IS_ERR_OR_NULL(ptr))) 93 93 return; 94 94 95 95 for (i = 0; i < dwords; i++) {
+1 -1
drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h
··· 206 206 SHADER(A6XX_SP_LB_3_DATA, 0x800), 207 207 SHADER(A6XX_SP_LB_4_DATA, 0x800), 208 208 SHADER(A6XX_SP_LB_5_DATA, 0x200), 209 - SHADER(A6XX_SP_CB_BINDLESS_DATA, 0x2000), 209 + SHADER(A6XX_SP_CB_BINDLESS_DATA, 0x800), 210 210 SHADER(A6XX_SP_CB_LEGACY_DATA, 0x280), 211 211 SHADER(A6XX_SP_UAV_DATA, 0x80), 212 212 SHADER(A6XX_SP_INST_TAG, 0x80),
-2
drivers/gpu/drm/msm/adreno/adreno_device.c
··· 369 369 .hwcg = a640_hwcg, 370 370 }, { 371 371 .rev = ADRENO_REV(6, 9, 0, ANY_ID), 372 - .revn = 690, 373 - .name = "A690", 374 372 .fw = { 375 373 [ADRENO_FW_SQE] = "a660_sqe.fw", 376 374 [ADRENO_FW_GMU] = "a690_gmu.bin",
+8 -4
drivers/gpu/drm/msm/adreno/adreno_gpu.h
··· 149 149 150 150 static inline bool adreno_is_revn(const struct adreno_gpu *gpu, uint32_t revn) 151 151 { 152 - WARN_ON_ONCE(!gpu->revn); 152 + /* revn can be zero, but if not is set at same time as info */ 153 + WARN_ON_ONCE(!gpu->info); 153 154 154 155 return gpu->revn == revn; 155 156 } ··· 162 161 163 162 static inline bool adreno_is_a2xx(const struct adreno_gpu *gpu) 164 163 { 165 - WARN_ON_ONCE(!gpu->revn); 164 + /* revn can be zero, but if not is set at same time as info */ 165 + WARN_ON_ONCE(!gpu->info); 166 166 167 167 return (gpu->revn < 300); 168 168 } 169 169 170 170 static inline bool adreno_is_a20x(const struct adreno_gpu *gpu) 171 171 { 172 - WARN_ON_ONCE(!gpu->revn); 172 + /* revn can be zero, but if not is set at same time as info */ 173 + WARN_ON_ONCE(!gpu->info); 173 174 174 175 return (gpu->revn < 210); 175 176 } ··· 310 307 311 308 static inline int adreno_is_a690(const struct adreno_gpu *gpu) 312 309 { 313 - return adreno_is_revn(gpu, 690); 310 + /* The order of args is important here to handle ANY_ID correctly */ 311 + return adreno_cmp_rev(ADRENO_REV(6, 9, 0, ANY_ID), gpu->rev); 314 312 }; 315 313 316 314 /* check for a615, a616, a618, a619 or any derivatives */
-13
drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.h
··· 15 15 #define DPU_PERF_DEFAULT_MAX_CORE_CLK_RATE 412500000 16 16 17 17 /** 18 - * enum dpu_core_perf_data_bus_id - data bus identifier 19 - * @DPU_CORE_PERF_DATA_BUS_ID_MNOC: DPU/MNOC data bus 20 - * @DPU_CORE_PERF_DATA_BUS_ID_LLCC: MNOC/LLCC data bus 21 - * @DPU_CORE_PERF_DATA_BUS_ID_EBI: LLCC/EBI data bus 22 - */ 23 - enum dpu_core_perf_data_bus_id { 24 - DPU_CORE_PERF_DATA_BUS_ID_MNOC, 25 - DPU_CORE_PERF_DATA_BUS_ID_LLCC, 26 - DPU_CORE_PERF_DATA_BUS_ID_EBI, 27 - DPU_CORE_PERF_DATA_BUS_ID_MAX, 28 - }; 29 - 30 - /** 31 18 * struct dpu_core_perf_params - definition of performance parameters 32 19 * @max_per_pipe_ib: maximum instantaneous bandwidth request 33 20 * @bw_ctl: arbitrated bandwidth request
+7 -1
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
··· 51 51 52 52 static const u32 fetch_tbl[SSPP_MAX] = {CTL_INVALID_BIT, 16, 17, 18, 19, 53 53 CTL_INVALID_BIT, CTL_INVALID_BIT, CTL_INVALID_BIT, CTL_INVALID_BIT, 0, 54 - 1, 2, 3, CTL_INVALID_BIT, CTL_INVALID_BIT}; 54 + 1, 2, 3, 4, 5}; 55 55 56 56 static int _mixer_stages(const struct dpu_lm_cfg *mixer, int count, 57 57 enum dpu_lm lm) ··· 197 197 break; 198 198 case SSPP_DMA3: 199 199 ctx->pending_flush_mask |= BIT(25); 200 + break; 201 + case SSPP_DMA4: 202 + ctx->pending_flush_mask |= BIT(13); 203 + break; 204 + case SSPP_DMA5: 205 + ctx->pending_flush_mask |= BIT(14); 200 206 break; 201 207 case SSPP_CURSOR0: 202 208 ctx->pending_flush_mask |= BIT(22);
-2
drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c
··· 1087 1087 1088 1088 const struct msm_dsi_phy_cfg dsi_phy_14nm_2290_cfgs = { 1089 1089 .has_phy_lane = true, 1090 - .regulator_data = dsi_phy_14nm_17mA_regulators, 1091 - .num_regulators = ARRAY_SIZE(dsi_phy_14nm_17mA_regulators), 1092 1090 .ops = { 1093 1091 .enable = dsi_14nm_phy_enable, 1094 1092 .disable = dsi_14nm_phy_disable,
+6
drivers/gpu/drm/msm/msm_fence.c
··· 191 191 192 192 f->fctx = fctx; 193 193 194 + /* 195 + * Until this point, the fence was just some pre-allocated memory, 196 + * no-one should have taken a reference to it yet. 197 + */ 198 + WARN_ON(kref_read(&fence->refcount)); 199 + 194 200 dma_fence_init(&f->base, &msm_fence_ops, &fctx->spinlock, 195 201 fctx->context, ++fctx->last_fence); 196 202 }
+14 -2
drivers/gpu/drm/msm/msm_gem_submit.c
··· 86 86 } 87 87 88 88 dma_fence_put(submit->user_fence); 89 - dma_fence_put(submit->hw_fence); 89 + 90 + /* 91 + * If the submit is freed before msm_job_run(), then hw_fence is 92 + * just some pre-allocated memory, not a reference counted fence. 93 + * Once the job runs and the hw_fence is initialized, it will 94 + * have a refcount of at least one, since the submit holds a ref 95 + * to the hw_fence. 96 + */ 97 + if (kref_read(&submit->hw_fence->refcount) == 0) { 98 + kfree(submit->hw_fence); 99 + } else { 100 + dma_fence_put(submit->hw_fence); 101 + } 90 102 91 103 put_pid(submit->pid); 92 104 msm_submitqueue_put(submit->queue); ··· 901 889 * after the job is armed 902 890 */ 903 891 if ((args->flags & MSM_SUBMIT_FENCE_SN_IN) && 904 - idr_find(&queue->fence_idr, args->fence)) { 892 + (!args->fence || idr_find(&queue->fence_idr, args->fence))) { 905 893 spin_unlock(&queue->idr_lock); 906 894 idr_preload_end(); 907 895 ret = -EINVAL;
+17 -2
drivers/gpu/drm/msm/msm_mdss.c
··· 189 189 #define UBWC_2_0 0x20000000 190 190 #define UBWC_3_0 0x30000000 191 191 #define UBWC_4_0 0x40000000 192 + #define UBWC_4_3 0x40030000 192 193 193 194 static void msm_mdss_setup_ubwc_dec_20(struct msm_mdss *msm_mdss) 194 195 { ··· 228 227 writel_relaxed(1, msm_mdss->mmio + UBWC_CTRL_2); 229 228 writel_relaxed(0, msm_mdss->mmio + UBWC_PREDICTION_MODE); 230 229 } else { 231 - writel_relaxed(2, msm_mdss->mmio + UBWC_CTRL_2); 230 + if (data->ubwc_dec_version == UBWC_4_3) 231 + writel_relaxed(3, msm_mdss->mmio + UBWC_CTRL_2); 232 + else 233 + writel_relaxed(2, msm_mdss->mmio + UBWC_CTRL_2); 232 234 writel_relaxed(1, msm_mdss->mmio + UBWC_PREDICTION_MODE); 233 235 } 234 236 } ··· 275 271 msm_mdss_setup_ubwc_dec_30(msm_mdss); 276 272 break; 277 273 case UBWC_4_0: 274 + case UBWC_4_3: 278 275 msm_mdss_setup_ubwc_dec_40(msm_mdss); 279 276 break; 280 277 default: ··· 574 569 .macrotile_mode = 1, 575 570 }; 576 571 572 + static const struct msm_mdss_data sm8550_data = { 573 + .ubwc_version = UBWC_4_0, 574 + .ubwc_dec_version = UBWC_4_3, 575 + .ubwc_swizzle = 6, 576 + .ubwc_static = 1, 577 + /* TODO: highest_bank_bit = 2 for LP_DDR4 */ 578 + .highest_bank_bit = 3, 579 + .macrotile_mode = 1, 580 + }; 581 + 577 582 static const struct of_device_id mdss_dt_match[] = { 578 583 { .compatible = "qcom,mdss" }, 579 584 { .compatible = "qcom,msm8998-mdss" }, ··· 600 585 { .compatible = "qcom,sm8250-mdss", .data = &sm8250_data }, 601 586 { .compatible = "qcom,sm8350-mdss", .data = &sm8250_data }, 602 587 { .compatible = "qcom,sm8450-mdss", .data = &sm8250_data }, 603 - { .compatible = "qcom,sm8550-mdss", .data = &sm8250_data }, 588 + { .compatible = "qcom,sm8550-mdss", .data = &sm8550_data }, 604 589 {} 605 590 }; 606 591 MODULE_DEVICE_TABLE(of, mdss_dt_match);
+1 -1
drivers/hwmon/aquacomputer_d5next.c
··· 1027 1027 if (ret < 0) 1028 1028 return ret; 1029 1029 1030 - *val = aqc_percent_to_pwm(ret); 1030 + *val = aqc_percent_to_pwm(*val); 1031 1031 break; 1032 1032 } 1033 1033 break;
+15 -2
drivers/hwmon/k10temp.c
··· 77 77 #define ZEN_CUR_TEMP_RANGE_SEL_MASK BIT(19) 78 78 #define ZEN_CUR_TEMP_TJ_SEL_MASK GENMASK(17, 16) 79 79 80 + /* 81 + * AMD's Industrial processor 3255 supports temperature from -40 deg to 105 deg Celsius. 82 + * Use the model name to identify 3255 CPUs and set a flag to display negative temperature. 83 + * Do not round off to zero for negative Tctl or Tdie values if the flag is set 84 + */ 85 + #define AMD_I3255_STR "3255" 86 + 80 87 struct k10temp_data { 81 88 struct pci_dev *pdev; 82 89 void (*read_htcreg)(struct pci_dev *pdev, u32 *regval); ··· 93 86 u32 show_temp; 94 87 bool is_zen; 95 88 u32 ccd_offset; 89 + bool disp_negative; 96 90 }; 97 91 98 92 #define TCTL_BIT 0 ··· 212 204 switch (channel) { 213 205 case 0: /* Tctl */ 214 206 *val = get_raw_temp(data); 215 - if (*val < 0) 207 + if (*val < 0 && !data->disp_negative) 216 208 *val = 0; 217 209 break; 218 210 case 1: /* Tdie */ 219 211 *val = get_raw_temp(data) - data->temp_offset; 220 - if (*val < 0) 212 + if (*val < 0 && !data->disp_negative) 221 213 *val = 0; 222 214 break; 223 215 case 2 ... 13: /* Tccd{1-12} */ ··· 412 404 413 405 data->pdev = pdev; 414 406 data->show_temp |= BIT(TCTL_BIT); /* Always show Tctl */ 407 + 408 + if (boot_cpu_data.x86 == 0x17 && 409 + strstr(boot_cpu_data.x86_model_id, AMD_I3255_STR)) { 410 + data->disp_negative = true; 411 + } 415 412 416 413 if (boot_cpu_data.x86 == 0x15 && 417 414 ((boot_cpu_data.x86_model & 0xf0) == 0x60 ||
+22 -6
drivers/hwmon/nct6775-core.c
··· 955 955 800, 800 956 956 }; 957 957 958 - static inline long in_from_reg(u8 reg, u8 nr) 958 + /* 959 + * NCT6798 scaling: 960 + * CPUVC, IN1, AVSB, 3VCC, IN0, IN8, IN4, 3VSB, VBAT, VTT, IN5, IN6, IN2, 961 + * IN3, IN7 962 + * Additional scales to be added later: IN9 (800), VHIF (1600) 963 + */ 964 + static const u16 scale_in_6798[15] = { 965 + 800, 800, 1600, 1600, 800, 800, 800, 1600, 1600, 1600, 1600, 1600, 800, 966 + 800, 800 967 + }; 968 + 969 + static inline long in_from_reg(u8 reg, u8 nr, const u16 *scales) 959 970 { 960 - return DIV_ROUND_CLOSEST(reg * scale_in[nr], 100); 971 + return DIV_ROUND_CLOSEST(reg * scales[nr], 100); 961 972 } 962 973 963 - static inline u8 in_to_reg(u32 val, u8 nr) 974 + static inline u8 in_to_reg(u32 val, u8 nr, const u16 *scales) 964 975 { 965 - return clamp_val(DIV_ROUND_CLOSEST(val * 100, scale_in[nr]), 0, 255); 976 + return clamp_val(DIV_ROUND_CLOSEST(val * 100, scales[nr]), 0, 255); 966 977 } 967 978 968 979 /* TSI temperatures are in 8.3 format */ ··· 1684 1673 if (IS_ERR(data)) 1685 1674 return PTR_ERR(data); 1686 1675 1687 - return sprintf(buf, "%ld\n", in_from_reg(data->in[nr][index], nr)); 1676 + return sprintf(buf, "%ld\n", 1677 + in_from_reg(data->in[nr][index], nr, data->scale_in)); 1688 1678 } 1689 1679 1690 1680 static ssize_t ··· 1703 1691 if (err < 0) 1704 1692 return err; 1705 1693 mutex_lock(&data->update_lock); 1706 - data->in[nr][index] = in_to_reg(val, nr); 1694 + data->in[nr][index] = in_to_reg(val, nr, data->scale_in); 1707 1695 err = nct6775_write_value(data, data->REG_IN_MINMAX[index - 1][nr], data->in[nr][index]); 1708 1696 mutex_unlock(&data->update_lock); 1709 1697 return err ? : count; ··· 3474 3462 mutex_init(&data->update_lock); 3475 3463 data->name = nct6775_device_names[data->kind]; 3476 3464 data->bank = 0xff; /* Force initial bank selection */ 3465 + data->scale_in = scale_in; 3477 3466 3478 3467 switch (data->kind) { 3479 3468 case nct6106: ··· 3989 3976 num_reg_tsi_temp = 0; 3990 3977 break; 3991 3978 } 3979 + 3980 + if (data->kind == nct6798 || data->kind == nct6799) 3981 + data->scale_in = scale_in_6798; 3992 3982 3993 3983 reg_temp = NCT6779_REG_TEMP; 3994 3984 num_reg_temp = ARRAY_SIZE(NCT6779_REG_TEMP);
+1 -1
drivers/hwmon/nct6775-platform.c
··· 586 586 int creb; 587 587 int cred; 588 588 589 - cre6 = sio_data->sio_inb(sio_data, 0xe0); 589 + cre6 = sio_data->sio_inb(sio_data, 0xe6); 590 590 591 591 sio_data->sio_select(sio_data, NCT6775_LD_12); 592 592 cre0 = sio_data->sio_inb(sio_data, 0xe0);
+1
drivers/hwmon/nct6775.h
··· 98 98 u8 bank; /* current register bank */ 99 99 u8 in_num; /* number of in inputs we have */ 100 100 u8 in[15][3]; /* [0]=in, [1]=in_max, [2]=in_min */ 101 + const u16 *scale_in; /* internal scaling factors */ 101 102 unsigned int rpm[NUM_FAN]; 102 103 u16 fan_min[NUM_FAN]; 103 104 u8 fan_pulses[NUM_FAN];
+1 -1
drivers/hwmon/nct7802.c
··· 725 725 if (index >= 38 && index < 46 && !(reg & 0x01)) /* PECI 0 */ 726 726 return 0; 727 727 728 - if (index >= 0x46 && (!(reg & 0x02))) /* PECI 1 */ 728 + if (index >= 46 && !(reg & 0x02)) /* PECI 1 */ 729 729 return 0; 730 730 731 731 return attr->mode;
+24 -14
drivers/hwmon/oxp-sensors.c
··· 220 220 } 221 221 222 222 /* Callbacks for turbo toggle attribute */ 223 + static umode_t tt_toggle_is_visible(struct kobject *kobj, 224 + struct attribute *attr, int n) 225 + { 226 + switch (board) { 227 + case aok_zoe_a1: 228 + case oxp_mini_amd_a07: 229 + case oxp_mini_amd_pro: 230 + return attr->mode; 231 + default: 232 + break; 233 + } 234 + return 0; 235 + } 236 + 223 237 static ssize_t tt_toggle_store(struct device *dev, 224 238 struct device_attribute *attr, const char *buf, 225 239 size_t count) ··· 410 396 NULL 411 397 }; 412 398 413 - ATTRIBUTE_GROUPS(oxp_ec); 399 + static struct attribute_group oxp_ec_attribute_group = { 400 + .is_visible = tt_toggle_is_visible, 401 + .attrs = oxp_ec_attrs, 402 + }; 403 + 404 + static const struct attribute_group *oxp_ec_groups[] = { 405 + &oxp_ec_attribute_group, 406 + NULL 407 + }; 414 408 415 409 static const struct hwmon_ops oxp_ec_hwmon_ops = { 416 410 .is_visible = oxp_ec_hwmon_is_visible, ··· 437 415 const struct dmi_system_id *dmi_entry; 438 416 struct device *dev = &pdev->dev; 439 417 struct device *hwdev; 440 - int ret; 441 418 442 419 /* 443 420 * Have to check for AMD processor here because DMI strings are the ··· 451 430 452 431 board = (enum oxp_board)(unsigned long)dmi_entry->driver_data; 453 432 454 - switch (board) { 455 - case aok_zoe_a1: 456 - case oxp_mini_amd_a07: 457 - case oxp_mini_amd_pro: 458 - ret = devm_device_add_groups(dev, oxp_ec_groups); 459 - if (ret) 460 - return ret; 461 - break; 462 - default: 463 - break; 464 - } 465 - 466 433 hwdev = devm_hwmon_device_register_with_info(dev, "oxpec", NULL, 467 434 &oxp_ec_chip_info, NULL); 468 435 ··· 460 451 static struct platform_driver oxp_platform_driver = { 461 452 .driver = { 462 453 .name = "oxp-platform", 454 + .dev_groups = oxp_ec_groups, 463 455 }, 464 456 .probe = oxp_platform_probe, 465 457 };
+11 -9
drivers/hwmon/pmbus/pmbus_core.c
··· 2745 2745 }, 2746 2746 }; 2747 2747 2748 - static int _pmbus_is_enabled(struct device *dev, u8 page) 2748 + static int _pmbus_is_enabled(struct i2c_client *client, u8 page) 2749 2749 { 2750 - struct i2c_client *client = to_i2c_client(dev->parent); 2751 2750 int ret; 2752 2751 2753 2752 ret = _pmbus_read_byte_data(client, page, PMBUS_OPERATION); ··· 2757 2758 return !!(ret & PB_OPERATION_CONTROL_ON); 2758 2759 } 2759 2760 2760 - static int __maybe_unused pmbus_is_enabled(struct device *dev, u8 page) 2761 + static int __maybe_unused pmbus_is_enabled(struct i2c_client *client, u8 page) 2761 2762 { 2762 - struct i2c_client *client = to_i2c_client(dev->parent); 2763 2763 struct pmbus_data *data = i2c_get_clientdata(client); 2764 2764 int ret; 2765 2765 2766 2766 mutex_lock(&data->update_lock); 2767 - ret = _pmbus_is_enabled(dev, page); 2767 + ret = _pmbus_is_enabled(client, page); 2768 2768 mutex_unlock(&data->update_lock); 2769 2769 2770 - return !!(ret & PB_OPERATION_CONTROL_ON); 2770 + return ret; 2771 2771 } 2772 2772 2773 2773 #define to_dev_attr(_dev_attr) \ ··· 2842 2844 if (status < 0) 2843 2845 return status; 2844 2846 2845 - if (_pmbus_is_enabled(dev, page)) { 2847 + if (_pmbus_is_enabled(client, page)) { 2846 2848 if (status & PB_STATUS_OFF) { 2847 2849 *flags |= REGULATOR_ERROR_FAIL; 2848 2850 *event |= REGULATOR_EVENT_FAIL; ··· 2896 2898 #if IS_ENABLED(CONFIG_REGULATOR) 2897 2899 static int pmbus_regulator_is_enabled(struct regulator_dev *rdev) 2898 2900 { 2899 - return pmbus_is_enabled(rdev_get_dev(rdev), rdev_get_id(rdev)); 2901 + struct device *dev = rdev_get_dev(rdev); 2902 + struct i2c_client *client = to_i2c_client(dev->parent); 2903 + 2904 + return pmbus_is_enabled(client, rdev_get_id(rdev)); 2900 2905 } 2901 2906 2902 2907 static int _pmbus_regulator_on_off(struct regulator_dev *rdev, bool enable) ··· 2946 2945 struct pmbus_data *data = i2c_get_clientdata(client); 2947 2946 u8 page = rdev_get_id(rdev); 2948 2947 int status, ret; 2948 + int event; 2949 2949 2950 2950 mutex_lock(&data->update_lock); 2951 2951 status = pmbus_get_status(client, page, PMBUS_STATUS_WORD); ··· 2966 2964 goto unlock; 2967 2965 } 2968 2966 2969 - ret = pmbus_regulator_get_error_flags(rdev, &status); 2967 + ret = _pmbus_get_flags(data, rdev_get_id(rdev), &status, &event, false); 2970 2968 if (ret) 2971 2969 goto unlock; 2972 2970
+2
drivers/infiniband/core/cma.c
··· 4062 4062 RDMA_CM_ADDR_QUERY))) 4063 4063 return -EINVAL; 4064 4064 4065 + } else { 4066 + memcpy(cma_dst_addr(id_priv), dst_addr, rdma_addr_size(dst_addr)); 4065 4067 } 4066 4068 4067 4069 if (cma_family(id_priv) != dst_addr->sa_family) {
+12
drivers/infiniband/hw/bnxt_re/ib_verbs.c
··· 869 869 int bnxt_re_destroy_qp(struct ib_qp *ib_qp, struct ib_udata *udata) 870 870 { 871 871 struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp); 872 + struct bnxt_qplib_qp *qplib_qp = &qp->qplib_qp; 872 873 struct bnxt_re_dev *rdev = qp->rdev; 874 + struct bnxt_qplib_nq *scq_nq = NULL; 875 + struct bnxt_qplib_nq *rcq_nq = NULL; 873 876 unsigned int flags; 874 877 int rc; 875 878 ··· 905 902 906 903 ib_umem_release(qp->rumem); 907 904 ib_umem_release(qp->sumem); 905 + 906 + /* Flush all the entries of notification queue associated with 907 + * given qp. 908 + */ 909 + scq_nq = qplib_qp->scq->nq; 910 + rcq_nq = qplib_qp->rcq->nq; 911 + bnxt_re_synchronize_nq(scq_nq); 912 + if (scq_nq != rcq_nq) 913 + bnxt_re_synchronize_nq(rcq_nq); 908 914 909 915 return 0; 910 916 }
+23 -5
drivers/infiniband/hw/bnxt_re/qplib_fp.c
··· 381 381 spin_unlock_bh(&hwq->lock); 382 382 } 383 383 384 + /* bnxt_re_synchronize_nq - self polling notification queue. 385 + * @nq - notification queue pointer 386 + * 387 + * This function will start polling entries of a given notification queue 388 + * for all pending entries. 389 + * This function is useful to synchronize notification entries while resources 390 + * are going away. 391 + */ 392 + 393 + void bnxt_re_synchronize_nq(struct bnxt_qplib_nq *nq) 394 + { 395 + int budget = nq->budget; 396 + 397 + nq->budget = nq->hwq.max_elements; 398 + bnxt_qplib_service_nq(&nq->nq_tasklet); 399 + nq->budget = budget; 400 + } 401 + 384 402 static irqreturn_t bnxt_qplib_nq_irq(int irq, void *dev_instance) 385 403 { 386 404 struct bnxt_qplib_nq *nq = dev_instance; ··· 420 402 if (!nq->requested) 421 403 return; 422 404 423 - tasklet_disable(&nq->nq_tasklet); 405 + nq->requested = false; 424 406 /* Mask h/w interrupt */ 425 407 bnxt_qplib_ring_nq_db(&nq->nq_db.dbinfo, nq->res->cctx, false); 426 408 /* Sync with last running IRQ handler */ 427 409 synchronize_irq(nq->msix_vec); 428 - if (kill) 429 - tasklet_kill(&nq->nq_tasklet); 430 - 431 410 irq_set_affinity_hint(nq->msix_vec, NULL); 432 411 free_irq(nq->msix_vec, nq); 433 412 kfree(nq->name); 434 413 nq->name = NULL; 435 - nq->requested = false; 414 + 415 + if (kill) 416 + tasklet_kill(&nq->nq_tasklet); 417 + tasklet_disable(&nq->nq_tasklet); 436 418 } 437 419 438 420 void bnxt_qplib_disable_nq(struct bnxt_qplib_nq *nq)
+1
drivers/infiniband/hw/bnxt_re/qplib_fp.h
··· 553 553 struct bnxt_qplib_cqe *cqe, 554 554 int num_cqes); 555 555 void bnxt_qplib_flush_cqn_wq(struct bnxt_qplib_qp *qp); 556 + void bnxt_re_synchronize_nq(struct bnxt_qplib_nq *nq); 556 557 557 558 static inline void *bnxt_qplib_get_swqe(struct bnxt_qplib_q *que, u32 *swq_idx) 558 559 {
+4 -5
drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
··· 989 989 if (!creq->requested) 990 990 return; 991 991 992 - tasklet_disable(&creq->creq_tasklet); 992 + creq->requested = false; 993 993 /* Mask h/w interrupts */ 994 994 bnxt_qplib_ring_nq_db(&creq->creq_db.dbinfo, rcfw->res->cctx, false); 995 995 /* Sync with last running IRQ-handler */ 996 996 synchronize_irq(creq->msix_vec); 997 - if (kill) 998 - tasklet_kill(&creq->creq_tasklet); 999 - 1000 997 free_irq(creq->msix_vec, rcfw); 1001 998 kfree(creq->irq_name); 1002 999 creq->irq_name = NULL; 1003 - creq->requested = false; 1004 1000 atomic_set(&rcfw->rcfw_intr_enabled, 0); 1001 + if (kill) 1002 + tasklet_kill(&creq->creq_tasklet); 1003 + tasklet_disable(&creq->creq_tasklet); 1005 1004 } 1006 1005 1007 1006 void bnxt_qplib_disable_rcfw_channel(struct bnxt_qplib_rcfw *rcfw)
+19 -12
drivers/infiniband/hw/irdma/ctrl.c
··· 2712 2712 */ 2713 2713 void irdma_check_cqp_progress(struct irdma_cqp_timeout *timeout, struct irdma_sc_dev *dev) 2714 2714 { 2715 - if (timeout->compl_cqp_cmds != dev->cqp_cmd_stats[IRDMA_OP_CMPL_CMDS]) { 2716 - timeout->compl_cqp_cmds = dev->cqp_cmd_stats[IRDMA_OP_CMPL_CMDS]; 2715 + u64 completed_ops = atomic64_read(&dev->cqp->completed_ops); 2716 + 2717 + if (timeout->compl_cqp_cmds != completed_ops) { 2718 + timeout->compl_cqp_cmds = completed_ops; 2717 2719 timeout->count = 0; 2718 - } else { 2719 - if (dev->cqp_cmd_stats[IRDMA_OP_REQ_CMDS] != 2720 - timeout->compl_cqp_cmds) 2721 - timeout->count++; 2720 + } else if (timeout->compl_cqp_cmds != dev->cqp->requested_ops) { 2721 + timeout->count++; 2722 2722 } 2723 2723 } 2724 2724 ··· 2761 2761 if (newtail != tail) { 2762 2762 /* SUCCESS */ 2763 2763 IRDMA_RING_MOVE_TAIL(cqp->sq_ring); 2764 - cqp->dev->cqp_cmd_stats[IRDMA_OP_CMPL_CMDS]++; 2764 + atomic64_inc(&cqp->completed_ops); 2765 2765 return 0; 2766 2766 } 2767 2767 udelay(cqp->dev->hw_attrs.max_sleep_count); ··· 3121 3121 info->dev->cqp = cqp; 3122 3122 3123 3123 IRDMA_RING_INIT(cqp->sq_ring, cqp->sq_size); 3124 - cqp->dev->cqp_cmd_stats[IRDMA_OP_REQ_CMDS] = 0; 3125 - cqp->dev->cqp_cmd_stats[IRDMA_OP_CMPL_CMDS] = 0; 3124 + cqp->requested_ops = 0; 3125 + atomic64_set(&cqp->completed_ops, 0); 3126 3126 /* for the cqp commands backlog. */ 3127 3127 INIT_LIST_HEAD(&cqp->dev->cqp_cmd_head); 3128 3128 ··· 3274 3274 if (ret_code) 3275 3275 return NULL; 3276 3276 3277 - cqp->dev->cqp_cmd_stats[IRDMA_OP_REQ_CMDS]++; 3277 + cqp->requested_ops++; 3278 3278 if (!*wqe_idx) 3279 3279 cqp->polarity = !cqp->polarity; 3280 3280 wqe = cqp->sq_base[*wqe_idx].elem; ··· 3363 3363 if (polarity != ccq->cq_uk.polarity) 3364 3364 return -ENOENT; 3365 3365 3366 + /* Ensure CEQE contents are read after valid bit is checked */ 3367 + dma_rmb(); 3368 + 3366 3369 get_64bit_val(cqe, 8, &qp_ctx); 3367 3370 cqp = (struct irdma_sc_cqp *)(unsigned long)qp_ctx; 3368 3371 info->error = (bool)FIELD_GET(IRDMA_CQ_ERROR, temp); ··· 3400 3397 dma_wmb(); /* make sure shadow area is updated before moving tail */ 3401 3398 3402 3399 IRDMA_RING_MOVE_TAIL(cqp->sq_ring); 3403 - ccq->dev->cqp_cmd_stats[IRDMA_OP_CMPL_CMDS]++; 3400 + atomic64_inc(&cqp->completed_ops); 3404 3401 3405 3402 return ret_code; 3406 3403 } ··· 4012 4009 u8 polarity; 4013 4010 4014 4011 aeqe = IRDMA_GET_CURRENT_AEQ_ELEM(aeq); 4015 - get_64bit_val(aeqe, 0, &compl_ctx); 4016 4012 get_64bit_val(aeqe, 8, &temp); 4017 4013 polarity = (u8)FIELD_GET(IRDMA_AEQE_VALID, temp); 4018 4014 4019 4015 if (aeq->polarity != polarity) 4020 4016 return -ENOENT; 4017 + 4018 + /* Ensure AEQE contents are read after valid bit is checked */ 4019 + dma_rmb(); 4020 + 4021 + get_64bit_val(aeqe, 0, &compl_ctx); 4021 4022 4022 4023 print_hex_dump_debug("WQE: AEQ_ENTRY WQE", DUMP_PREFIX_OFFSET, 16, 8, 4023 4024 aeqe, 16, false);
+22 -24
drivers/infiniband/hw/irdma/defs.h
··· 191 191 IRDMA_OP_MANAGE_VF_PBLE_BP = 25, 192 192 IRDMA_OP_QUERY_FPM_VAL = 26, 193 193 IRDMA_OP_COMMIT_FPM_VAL = 27, 194 - IRDMA_OP_REQ_CMDS = 28, 195 - IRDMA_OP_CMPL_CMDS = 29, 196 - IRDMA_OP_AH_CREATE = 30, 197 - IRDMA_OP_AH_MODIFY = 31, 198 - IRDMA_OP_AH_DESTROY = 32, 199 - IRDMA_OP_MC_CREATE = 33, 200 - IRDMA_OP_MC_DESTROY = 34, 201 - IRDMA_OP_MC_MODIFY = 35, 202 - IRDMA_OP_STATS_ALLOCATE = 36, 203 - IRDMA_OP_STATS_FREE = 37, 204 - IRDMA_OP_STATS_GATHER = 38, 205 - IRDMA_OP_WS_ADD_NODE = 39, 206 - IRDMA_OP_WS_MODIFY_NODE = 40, 207 - IRDMA_OP_WS_DELETE_NODE = 41, 208 - IRDMA_OP_WS_FAILOVER_START = 42, 209 - IRDMA_OP_WS_FAILOVER_COMPLETE = 43, 210 - IRDMA_OP_SET_UP_MAP = 44, 211 - IRDMA_OP_GEN_AE = 45, 212 - IRDMA_OP_QUERY_RDMA_FEATURES = 46, 213 - IRDMA_OP_ALLOC_LOCAL_MAC_ENTRY = 47, 214 - IRDMA_OP_ADD_LOCAL_MAC_ENTRY = 48, 215 - IRDMA_OP_DELETE_LOCAL_MAC_ENTRY = 49, 216 - IRDMA_OP_CQ_MODIFY = 50, 194 + IRDMA_OP_AH_CREATE = 28, 195 + IRDMA_OP_AH_MODIFY = 29, 196 + IRDMA_OP_AH_DESTROY = 30, 197 + IRDMA_OP_MC_CREATE = 31, 198 + IRDMA_OP_MC_DESTROY = 32, 199 + IRDMA_OP_MC_MODIFY = 33, 200 + IRDMA_OP_STATS_ALLOCATE = 34, 201 + IRDMA_OP_STATS_FREE = 35, 202 + IRDMA_OP_STATS_GATHER = 36, 203 + IRDMA_OP_WS_ADD_NODE = 37, 204 + IRDMA_OP_WS_MODIFY_NODE = 38, 205 + IRDMA_OP_WS_DELETE_NODE = 39, 206 + IRDMA_OP_WS_FAILOVER_START = 40, 207 + IRDMA_OP_WS_FAILOVER_COMPLETE = 41, 208 + IRDMA_OP_SET_UP_MAP = 42, 209 + IRDMA_OP_GEN_AE = 43, 210 + IRDMA_OP_QUERY_RDMA_FEATURES = 44, 211 + IRDMA_OP_ALLOC_LOCAL_MAC_ENTRY = 45, 212 + IRDMA_OP_ADD_LOCAL_MAC_ENTRY = 46, 213 + IRDMA_OP_DELETE_LOCAL_MAC_ENTRY = 47, 214 + IRDMA_OP_CQ_MODIFY = 48, 217 215 218 216 /* Must be last entry*/ 219 - IRDMA_MAX_CQP_OPS = 51, 217 + IRDMA_MAX_CQP_OPS = 49, 220 218 }; 221 219 222 220 /* CQP SQ WQES */
+2 -1
drivers/infiniband/hw/irdma/hw.c
··· 191 191 case IRDMA_AE_AMP_MWBIND_INVALID_RIGHTS: 192 192 case IRDMA_AE_AMP_MWBIND_BIND_DISABLED: 193 193 case IRDMA_AE_AMP_MWBIND_INVALID_BOUNDS: 194 + case IRDMA_AE_AMP_MWBIND_VALID_STAG: 194 195 qp->flush_code = FLUSH_MW_BIND_ERR; 195 196 qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR; 196 197 break; ··· 2076 2075 cqp_request->compl_info.error = info.error; 2077 2076 2078 2077 if (cqp_request->waiting) { 2079 - cqp_request->request_done = true; 2078 + WRITE_ONCE(cqp_request->request_done, true); 2080 2079 wake_up(&cqp_request->waitq); 2081 2080 irdma_put_cqp_request(&rf->cqp, cqp_request); 2082 2081 } else {
+1 -1
drivers/infiniband/hw/irdma/main.h
··· 161 161 void (*callback_fcn)(struct irdma_cqp_request *cqp_request); 162 162 void *param; 163 163 struct irdma_cqp_compl_info compl_info; 164 + bool request_done; /* READ/WRITE_ONCE macros operate on it */ 164 165 bool waiting:1; 165 - bool request_done:1; 166 166 bool dynamic:1; 167 167 }; 168 168
+6
drivers/infiniband/hw/irdma/puda.c
··· 230 230 if (valid_bit != cq_uk->polarity) 231 231 return -ENOENT; 232 232 233 + /* Ensure CQE contents are read after valid bit is checked */ 234 + dma_rmb(); 235 + 233 236 if (cq->dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) 234 237 ext_valid = (bool)FIELD_GET(IRDMA_CQ_EXTCQE, qword3); 235 238 ··· 245 242 polarity ^= 1; 246 243 if (polarity != cq_uk->polarity) 247 244 return -ENOENT; 245 + 246 + /* Ensure ext CQE contents are read after ext valid bit is checked */ 247 + dma_rmb(); 248 248 249 249 IRDMA_RING_MOVE_HEAD_NOCHECK(cq_uk->cq_ring); 250 250 if (!IRDMA_RING_CURRENT_HEAD(cq_uk->cq_ring))
+2
drivers/infiniband/hw/irdma/type.h
··· 365 365 struct irdma_dcqcn_cc_params dcqcn_params; 366 366 __le64 *host_ctx; 367 367 u64 *scratch_array; 368 + u64 requested_ops; 369 + atomic64_t completed_ops; 368 370 u32 cqp_id; 369 371 u32 sq_size; 370 372 u32 hw_sq_size;
+4 -1
drivers/infiniband/hw/irdma/uk.c
··· 1161 1161 } 1162 1162 wqe_idx = (u32)FIELD_GET(IRDMA_CQ_WQEIDX, qword3); 1163 1163 info->qp_handle = (irdma_qp_handle)(unsigned long)qp; 1164 - info->op_type = (u8)FIELD_GET(IRDMA_CQ_SQ, qword3); 1164 + info->op_type = (u8)FIELD_GET(IRDMACQ_OP, qword3); 1165 1165 1166 1166 if (info->q_type == IRDMA_CQE_QTYPE_RQ) { 1167 1167 u32 array_idx; ··· 1526 1526 1527 1527 if (polarity != temp) 1528 1528 break; 1529 + 1530 + /* Ensure CQE contents are read after valid bit is checked */ 1531 + dma_rmb(); 1529 1532 1530 1533 get_64bit_val(cqe, 8, &comp_ctx); 1531 1534 if ((void *)(unsigned long)comp_ctx == q)
+4 -4
drivers/infiniband/hw/irdma/utils.c
··· 481 481 if (cqp_request->dynamic) { 482 482 kfree(cqp_request); 483 483 } else { 484 - cqp_request->request_done = false; 484 + WRITE_ONCE(cqp_request->request_done, false); 485 485 cqp_request->callback_fcn = NULL; 486 486 cqp_request->waiting = false; 487 487 ··· 515 515 { 516 516 if (cqp_request->waiting) { 517 517 cqp_request->compl_info.error = true; 518 - cqp_request->request_done = true; 518 + WRITE_ONCE(cqp_request->request_done, true); 519 519 wake_up(&cqp_request->waitq); 520 520 } 521 521 wait_event_timeout(cqp->remove_wq, ··· 567 567 bool cqp_error = false; 568 568 int err_code = 0; 569 569 570 - cqp_timeout.compl_cqp_cmds = rf->sc_dev.cqp_cmd_stats[IRDMA_OP_CMPL_CMDS]; 570 + cqp_timeout.compl_cqp_cmds = atomic64_read(&rf->sc_dev.cqp->completed_ops); 571 571 do { 572 572 irdma_cqp_ce_handler(rf, &rf->ccq.sc_cq); 573 573 if (wait_event_timeout(cqp_request->waitq, 574 - cqp_request->request_done, 574 + READ_ONCE(cqp_request->request_done), 575 575 msecs_to_jiffies(CQP_COMPL_WAIT_TIME_MS))) 576 576 break; 577 577
+9 -9
drivers/infiniband/hw/mlx4/qp.c
··· 565 565 return (-EOPNOTSUPP); 566 566 } 567 567 568 - if (ucmd->rx_hash_fields_mask & ~(MLX4_IB_RX_HASH_SRC_IPV4 | 569 - MLX4_IB_RX_HASH_DST_IPV4 | 570 - MLX4_IB_RX_HASH_SRC_IPV6 | 571 - MLX4_IB_RX_HASH_DST_IPV6 | 572 - MLX4_IB_RX_HASH_SRC_PORT_TCP | 573 - MLX4_IB_RX_HASH_DST_PORT_TCP | 574 - MLX4_IB_RX_HASH_SRC_PORT_UDP | 575 - MLX4_IB_RX_HASH_DST_PORT_UDP | 576 - MLX4_IB_RX_HASH_INNER)) { 568 + if (ucmd->rx_hash_fields_mask & ~(u64)(MLX4_IB_RX_HASH_SRC_IPV4 | 569 + MLX4_IB_RX_HASH_DST_IPV4 | 570 + MLX4_IB_RX_HASH_SRC_IPV6 | 571 + MLX4_IB_RX_HASH_DST_IPV6 | 572 + MLX4_IB_RX_HASH_SRC_PORT_TCP | 573 + MLX4_IB_RX_HASH_DST_PORT_TCP | 574 + MLX4_IB_RX_HASH_SRC_PORT_UDP | 575 + MLX4_IB_RX_HASH_DST_PORT_UDP | 576 + MLX4_IB_RX_HASH_INNER)) { 577 577 pr_debug("RX Hash fields_mask has unsupported mask (0x%llx)\n", 578 578 ucmd->rx_hash_fields_mask); 579 579 return (-EOPNOTSUPP);
+1 -1
drivers/infiniband/hw/mthca/mthca_qp.c
··· 1393 1393 if (mthca_array_get(&dev->qp_table.qp, mqpn)) 1394 1394 err = -EBUSY; 1395 1395 else 1396 - mthca_array_set(&dev->qp_table.qp, mqpn, qp->sqp); 1396 + mthca_array_set(&dev->qp_table.qp, mqpn, qp); 1397 1397 spin_unlock_irq(&dev->qp_table.lock); 1398 1398 1399 1399 if (err)
+2 -1
drivers/infiniband/sw/rxe/rxe_mw.c
··· 199 199 200 200 if (access & ~RXE_ACCESS_SUPPORTED_MW) { 201 201 rxe_err_mw(mw, "access %#x not supported", access); 202 - return -EOPNOTSUPP; 202 + ret = -EOPNOTSUPP; 203 + goto err_drop_mr; 203 204 } 204 205 205 206 spin_lock_bh(&mw->lock);
+3 -9
drivers/iommu/iommufd/device.c
··· 109 109 */ 110 110 void iommufd_device_unbind(struct iommufd_device *idev) 111 111 { 112 - bool was_destroyed; 113 - 114 - was_destroyed = iommufd_object_destroy_user(idev->ictx, &idev->obj); 115 - WARN_ON(!was_destroyed); 112 + iommufd_object_destroy_user(idev->ictx, &idev->obj); 116 113 } 117 114 EXPORT_SYMBOL_NS_GPL(iommufd_device_unbind, IOMMUFD); 118 115 ··· 379 382 mutex_unlock(&hwpt->devices_lock); 380 383 381 384 if (hwpt->auto_domain) 382 - iommufd_object_destroy_user(idev->ictx, &hwpt->obj); 385 + iommufd_object_deref_user(idev->ictx, &hwpt->obj); 383 386 else 384 387 refcount_dec(&hwpt->obj.users); 385 388 ··· 453 456 */ 454 457 void iommufd_access_destroy(struct iommufd_access *access) 455 458 { 456 - bool was_destroyed; 457 - 458 - was_destroyed = iommufd_object_destroy_user(access->ictx, &access->obj); 459 - WARN_ON(!was_destroyed); 459 + iommufd_object_destroy_user(access->ictx, &access->obj); 460 460 } 461 461 EXPORT_SYMBOL_NS_GPL(iommufd_access_destroy, IOMMUFD); 462 462
+13 -2
drivers/iommu/iommufd/iommufd_private.h
··· 176 176 struct iommufd_object *obj); 177 177 void iommufd_object_finalize(struct iommufd_ctx *ictx, 178 178 struct iommufd_object *obj); 179 - bool iommufd_object_destroy_user(struct iommufd_ctx *ictx, 180 - struct iommufd_object *obj); 179 + void __iommufd_object_destroy_user(struct iommufd_ctx *ictx, 180 + struct iommufd_object *obj, bool allow_fail); 181 + static inline void iommufd_object_destroy_user(struct iommufd_ctx *ictx, 182 + struct iommufd_object *obj) 183 + { 184 + __iommufd_object_destroy_user(ictx, obj, false); 185 + } 186 + static inline void iommufd_object_deref_user(struct iommufd_ctx *ictx, 187 + struct iommufd_object *obj) 188 + { 189 + __iommufd_object_destroy_user(ictx, obj, true); 190 + } 191 + 181 192 struct iommufd_object *_iommufd_object_alloc(struct iommufd_ctx *ictx, 182 193 size_t size, 183 194 enum iommufd_object_type type);
+59 -19
drivers/iommu/iommufd/main.c
··· 117 117 } 118 118 119 119 /* 120 + * Remove the given object id from the xarray if the only reference to the 121 + * object is held by the xarray. The caller must call ops destroy(). 122 + */ 123 + static struct iommufd_object *iommufd_object_remove(struct iommufd_ctx *ictx, 124 + u32 id, bool extra_put) 125 + { 126 + struct iommufd_object *obj; 127 + XA_STATE(xas, &ictx->objects, id); 128 + 129 + xa_lock(&ictx->objects); 130 + obj = xas_load(&xas); 131 + if (xa_is_zero(obj) || !obj) { 132 + obj = ERR_PTR(-ENOENT); 133 + goto out_xa; 134 + } 135 + 136 + /* 137 + * If the caller is holding a ref on obj we put it here under the 138 + * spinlock. 139 + */ 140 + if (extra_put) 141 + refcount_dec(&obj->users); 142 + 143 + if (!refcount_dec_if_one(&obj->users)) { 144 + obj = ERR_PTR(-EBUSY); 145 + goto out_xa; 146 + } 147 + 148 + xas_store(&xas, NULL); 149 + if (ictx->vfio_ioas == container_of(obj, struct iommufd_ioas, obj)) 150 + ictx->vfio_ioas = NULL; 151 + 152 + out_xa: 153 + xa_unlock(&ictx->objects); 154 + 155 + /* The returned object reference count is zero */ 156 + return obj; 157 + } 158 + 159 + /* 120 160 * The caller holds a users refcount and wants to destroy the object. Returns 121 161 * true if the object was destroyed. In all cases the caller no longer has a 122 162 * reference on obj. 123 163 */ 124 - bool iommufd_object_destroy_user(struct iommufd_ctx *ictx, 125 - struct iommufd_object *obj) 164 + void __iommufd_object_destroy_user(struct iommufd_ctx *ictx, 165 + struct iommufd_object *obj, bool allow_fail) 126 166 { 167 + struct iommufd_object *ret; 168 + 127 169 /* 128 170 * The purpose of the destroy_rwsem is to ensure deterministic 129 171 * destruction of objects used by external drivers and destroyed by this ··· 173 131 * side of this, such as during ioctl execution. 174 132 */ 175 133 down_write(&obj->destroy_rwsem); 176 - xa_lock(&ictx->objects); 177 - refcount_dec(&obj->users); 178 - if (!refcount_dec_if_one(&obj->users)) { 179 - xa_unlock(&ictx->objects); 180 - up_write(&obj->destroy_rwsem); 181 - return false; 182 - } 183 - __xa_erase(&ictx->objects, obj->id); 184 - if (ictx->vfio_ioas && &ictx->vfio_ioas->obj == obj) 185 - ictx->vfio_ioas = NULL; 186 - xa_unlock(&ictx->objects); 134 + ret = iommufd_object_remove(ictx, obj->id, true); 187 135 up_write(&obj->destroy_rwsem); 136 + 137 + if (allow_fail && IS_ERR(ret)) 138 + return; 139 + 140 + /* 141 + * If there is a bug and we couldn't destroy the object then we did put 142 + * back the caller's refcount and will eventually try to free it again 143 + * during close. 144 + */ 145 + if (WARN_ON(IS_ERR(ret))) 146 + return; 188 147 189 148 iommufd_object_ops[obj->type].destroy(obj); 190 149 kfree(obj); 191 - return true; 192 150 } 193 151 194 152 static int iommufd_destroy(struct iommufd_ucmd *ucmd) ··· 196 154 struct iommu_destroy *cmd = ucmd->cmd; 197 155 struct iommufd_object *obj; 198 156 199 - obj = iommufd_get_object(ucmd->ictx, cmd->id, IOMMUFD_OBJ_ANY); 157 + obj = iommufd_object_remove(ucmd->ictx, cmd->id, false); 200 158 if (IS_ERR(obj)) 201 159 return PTR_ERR(obj); 202 - iommufd_ref_to_users(obj); 203 - /* See iommufd_ref_to_users() */ 204 - if (!iommufd_object_destroy_user(ucmd->ictx, obj)) 205 - return -EBUSY; 160 + iommufd_object_ops[obj->type].destroy(obj); 161 + kfree(obj); 206 162 return 0; 207 163 } 208 164
+1 -1
drivers/iommu/iommufd/pages.c
··· 297 297 batch->pfns[0] = batch->pfns[batch->end - 1] + 298 298 (batch->npfns[batch->end - 1] - keep_pfns); 299 299 batch->npfns[0] = keep_pfns; 300 - batch->end = 0; 300 + batch->end = 1; 301 301 } 302 302 303 303 static void batch_skip_carry(struct pfn_batch *batch, unsigned int skip_pfns)
+5 -9
drivers/irqchip/irq-bcm6345-l1.c
··· 82 82 }; 83 83 84 84 struct bcm6345_l1_cpu { 85 + struct bcm6345_l1_chip *intc; 85 86 void __iomem *map_base; 86 87 unsigned int parent_irq; 87 88 u32 enable_cache[]; ··· 116 115 117 116 static void bcm6345_l1_irq_handle(struct irq_desc *desc) 118 117 { 119 - struct bcm6345_l1_chip *intc = irq_desc_get_handler_data(desc); 120 - struct bcm6345_l1_cpu *cpu; 118 + struct bcm6345_l1_cpu *cpu = irq_desc_get_handler_data(desc); 119 + struct bcm6345_l1_chip *intc = cpu->intc; 121 120 struct irq_chip *chip = irq_desc_get_chip(desc); 122 121 unsigned int idx; 123 - 124 - #ifdef CONFIG_SMP 125 - cpu = intc->cpus[cpu_logical_map(smp_processor_id())]; 126 - #else 127 - cpu = intc->cpus[0]; 128 - #endif 129 122 130 123 chained_irq_enter(chip, desc); 131 124 ··· 248 253 if (!cpu) 249 254 return -ENOMEM; 250 255 256 + cpu->intc = intc; 251 257 cpu->map_base = ioremap(res.start, sz); 252 258 if (!cpu->map_base) 253 259 return -ENOMEM; ··· 267 271 return -EINVAL; 268 272 } 269 273 irq_set_chained_handler_and_data(cpu->parent_irq, 270 - bcm6345_l1_irq_handle, intc); 274 + bcm6345_l1_irq_handle, cpu); 271 275 272 276 return 0; 273 277 }
+48 -30
drivers/irqchip/irq-gic-v3-its.c
··· 273 273 raw_spin_unlock_irqrestore(&vpe->vpe_lock, flags); 274 274 } 275 275 276 + static struct irq_chip its_vpe_irq_chip; 277 + 276 278 static int irq_to_cpuid_lock(struct irq_data *d, unsigned long *flags) 277 279 { 278 - struct its_vlpi_map *map = get_vlpi_map(d); 280 + struct its_vpe *vpe = NULL; 279 281 int cpu; 280 282 281 - if (map) { 282 - cpu = vpe_to_cpuid_lock(map->vpe, flags); 283 + if (d->chip == &its_vpe_irq_chip) { 284 + vpe = irq_data_get_irq_chip_data(d); 285 + } else { 286 + struct its_vlpi_map *map = get_vlpi_map(d); 287 + if (map) 288 + vpe = map->vpe; 289 + } 290 + 291 + if (vpe) { 292 + cpu = vpe_to_cpuid_lock(vpe, flags); 283 293 } else { 284 294 /* Physical LPIs are already locked via the irq_desc lock */ 285 295 struct its_device *its_dev = irq_data_get_irq_chip_data(d); ··· 303 293 304 294 static void irq_to_cpuid_unlock(struct irq_data *d, unsigned long flags) 305 295 { 306 - struct its_vlpi_map *map = get_vlpi_map(d); 296 + struct its_vpe *vpe = NULL; 307 297 308 - if (map) 309 - vpe_to_cpuid_unlock(map->vpe, flags); 298 + if (d->chip == &its_vpe_irq_chip) { 299 + vpe = irq_data_get_irq_chip_data(d); 300 + } else { 301 + struct its_vlpi_map *map = get_vlpi_map(d); 302 + if (map) 303 + vpe = map->vpe; 304 + } 305 + 306 + if (vpe) 307 + vpe_to_cpuid_unlock(vpe, flags); 310 308 } 311 309 312 310 static struct its_collection *valid_col(struct its_collection *col) ··· 1451 1433 cpu_relax(); 1452 1434 } 1453 1435 1436 + static void __direct_lpi_inv(struct irq_data *d, u64 val) 1437 + { 1438 + void __iomem *rdbase; 1439 + unsigned long flags; 1440 + int cpu; 1441 + 1442 + /* Target the redistributor this LPI is currently routed to */ 1443 + cpu = irq_to_cpuid_lock(d, &flags); 1444 + raw_spin_lock(&gic_data_rdist_cpu(cpu)->rd_lock); 1445 + 1446 + rdbase = per_cpu_ptr(gic_rdists->rdist, cpu)->rd_base; 1447 + gic_write_lpir(val, rdbase + GICR_INVLPIR); 1448 + wait_for_syncr(rdbase); 1449 + 1450 + raw_spin_unlock(&gic_data_rdist_cpu(cpu)->rd_lock); 1451 + irq_to_cpuid_unlock(d, flags); 1452 + } 1453 + 1454 1454 static void direct_lpi_inv(struct irq_data *d) 1455 1455 { 1456 1456 struct its_vlpi_map *map = get_vlpi_map(d); 1457 - void __iomem *rdbase; 1458 - unsigned long flags; 1459 1457 u64 val; 1460 - int cpu; 1461 1458 1462 1459 if (map) { 1463 1460 struct its_device *its_dev = irq_data_get_irq_chip_data(d); ··· 1486 1453 val = d->hwirq; 1487 1454 } 1488 1455 1489 - /* Target the redistributor this LPI is currently routed to */ 1490 - cpu = irq_to_cpuid_lock(d, &flags); 1491 - raw_spin_lock(&gic_data_rdist_cpu(cpu)->rd_lock); 1492 - rdbase = per_cpu_ptr(gic_rdists->rdist, cpu)->rd_base; 1493 - gic_write_lpir(val, rdbase + GICR_INVLPIR); 1494 - 1495 - wait_for_syncr(rdbase); 1496 - raw_spin_unlock(&gic_data_rdist_cpu(cpu)->rd_lock); 1497 - irq_to_cpuid_unlock(d, flags); 1456 + __direct_lpi_inv(d, val); 1498 1457 } 1499 1458 1500 1459 static void lpi_update_config(struct irq_data *d, u8 clr, u8 set) ··· 3978 3953 { 3979 3954 struct its_vpe *vpe = irq_data_get_irq_chip_data(d); 3980 3955 3981 - if (gic_rdists->has_direct_lpi) { 3982 - void __iomem *rdbase; 3983 - 3984 - /* Target the redistributor this VPE is currently known on */ 3985 - raw_spin_lock(&gic_data_rdist_cpu(vpe->col_idx)->rd_lock); 3986 - rdbase = per_cpu_ptr(gic_rdists->rdist, vpe->col_idx)->rd_base; 3987 - gic_write_lpir(d->parent_data->hwirq, rdbase + GICR_INVLPIR); 3988 - wait_for_syncr(rdbase); 3989 - raw_spin_unlock(&gic_data_rdist_cpu(vpe->col_idx)->rd_lock); 3990 - } else { 3956 + if (gic_rdists->has_direct_lpi) 3957 + __direct_lpi_inv(d, d->parent_data->hwirq); 3958 + else 3991 3959 its_vpe_send_cmd(vpe, its_send_inv); 3992 - } 3993 3960 } 3994 3961 3995 3962 static void its_vpe_mask_irq(struct irq_data *d) ··· 4744 4727 { 4745 4728 struct its_node *its = data; 4746 4729 4747 - if (!of_machine_is_compatible("rockchip,rk3588")) 4730 + if (!of_machine_is_compatible("rockchip,rk3588") && 4731 + !of_machine_is_compatible("rockchip,rk3588s")) 4748 4732 return false; 4749 4733 4750 4734 its->flags |= ITS_FLAGS_FORCE_NON_SHAREABLE;
+61 -1
drivers/irqchip/irq-gic-v3.c
··· 69 69 static void __iomem *t241_dist_base_alias[T241_CHIPS_MAX] __read_mostly; 70 70 static DEFINE_STATIC_KEY_FALSE(gic_nvidia_t241_erratum); 71 71 72 + static DEFINE_STATIC_KEY_FALSE(gic_arm64_2941627_erratum); 73 + 72 74 static struct gic_chip_data gic_data __read_mostly; 73 75 static DEFINE_STATIC_KEY_TRUE(supports_deactivate_key); 74 76 ··· 594 592 gic_irq_set_prio(d, GICD_INT_DEF_PRI); 595 593 } 596 594 595 + static bool gic_arm64_erratum_2941627_needed(struct irq_data *d) 596 + { 597 + enum gic_intid_range range; 598 + 599 + if (!static_branch_unlikely(&gic_arm64_2941627_erratum)) 600 + return false; 601 + 602 + range = get_intid_range(d); 603 + 604 + /* 605 + * The workaround is needed if the IRQ is an SPI and 606 + * the target cpu is different from the one we are 607 + * executing on. 608 + */ 609 + return (range == SPI_RANGE || range == ESPI_RANGE) && 610 + !cpumask_test_cpu(raw_smp_processor_id(), 611 + irq_data_get_effective_affinity_mask(d)); 612 + } 613 + 597 614 static void gic_eoi_irq(struct irq_data *d) 598 615 { 599 616 write_gicreg(gic_irq(d), ICC_EOIR1_EL1); 600 617 isb(); 618 + 619 + if (gic_arm64_erratum_2941627_needed(d)) { 620 + /* 621 + * Make sure the GIC stream deactivate packet 622 + * issued by ICC_EOIR1_EL1 has completed before 623 + * deactivating through GICD_IACTIVER. 624 + */ 625 + dsb(sy); 626 + gic_poke_irq(d, GICD_ICACTIVER); 627 + } 601 628 } 602 629 603 630 static void gic_eoimode1_eoi_irq(struct irq_data *d) ··· 637 606 */ 638 607 if (gic_irq(d) >= 8192 || irqd_is_forwarded_to_vcpu(d)) 639 608 return; 640 - gic_write_dir(gic_irq(d)); 609 + 610 + if (!gic_arm64_erratum_2941627_needed(d)) 611 + gic_write_dir(gic_irq(d)); 612 + else 613 + gic_poke_irq(d, GICD_ICACTIVER); 641 614 } 642 615 643 616 static int gic_set_type(struct irq_data *d, unsigned int type) ··· 1851 1816 return true; 1852 1817 } 1853 1818 1819 + static bool gic_enable_quirk_arm64_2941627(void *data) 1820 + { 1821 + static_branch_enable(&gic_arm64_2941627_erratum); 1822 + return true; 1823 + } 1824 + 1854 1825 static const struct gic_quirk gic_quirks[] = { 1855 1826 { 1856 1827 .desc = "GICv3: Qualcomm MSM8996 broken firmware", ··· 1903 1862 .iidr = 0x0402043b, 1904 1863 .mask = 0xffffffff, 1905 1864 .init = gic_enable_quirk_nvidia_t241, 1865 + }, 1866 + { 1867 + /* 1868 + * GIC-700: 2941627 workaround - IP variant [0,1] 1869 + * 1870 + */ 1871 + .desc = "GICv3: ARM64 erratum 2941627", 1872 + .iidr = 0x0400043b, 1873 + .mask = 0xff0e0fff, 1874 + .init = gic_enable_quirk_arm64_2941627, 1875 + }, 1876 + { 1877 + /* 1878 + * GIC-700: 2941627 workaround - IP variant [2] 1879 + */ 1880 + .desc = "GICv3: ARM64 erratum 2941627", 1881 + .iidr = 0x0402043b, 1882 + .mask = 0xff0f0fff, 1883 + .init = gic_enable_quirk_arm64_2941627, 1906 1884 }, 1907 1885 { 1908 1886 }
+18 -10
drivers/md/dm-cache-policy-smq.c
··· 857 857 858 858 struct background_tracker *bg_work; 859 859 860 - bool migrations_allowed; 860 + bool migrations_allowed:1; 861 + 862 + /* 863 + * If this is set the policy will try and clean the whole cache 864 + * even if the device is not idle. 865 + */ 866 + bool cleaner:1; 861 867 }; 862 868 863 869 /*----------------------------------------------------------------*/ ··· 1144 1138 * Cache entries may not be populated. So we cannot rely on the 1145 1139 * size of the clean queue. 1146 1140 */ 1147 - if (idle) { 1141 + if (idle || mq->cleaner) { 1148 1142 /* 1149 1143 * We'd like to clean everything. 1150 1144 */ ··· 1728 1722 *hotspot_block_size /= 2u; 1729 1723 } 1730 1724 1731 - static struct dm_cache_policy *__smq_create(dm_cblock_t cache_size, 1732 - sector_t origin_size, 1733 - sector_t cache_block_size, 1734 - bool mimic_mq, 1735 - bool migrations_allowed) 1725 + static struct dm_cache_policy * 1726 + __smq_create(dm_cblock_t cache_size, sector_t origin_size, sector_t cache_block_size, 1727 + bool mimic_mq, bool migrations_allowed, bool cleaner) 1736 1728 { 1737 1729 unsigned int i; 1738 1730 unsigned int nr_sentinels_per_queue = 2u * NR_CACHE_LEVELS; ··· 1817 1813 goto bad_btracker; 1818 1814 1819 1815 mq->migrations_allowed = migrations_allowed; 1816 + mq->cleaner = cleaner; 1820 1817 1821 1818 return &mq->policy; 1822 1819 ··· 1841 1836 sector_t origin_size, 1842 1837 sector_t cache_block_size) 1843 1838 { 1844 - return __smq_create(cache_size, origin_size, cache_block_size, false, true); 1839 + return __smq_create(cache_size, origin_size, cache_block_size, 1840 + false, true, false); 1845 1841 } 1846 1842 1847 1843 static struct dm_cache_policy *mq_create(dm_cblock_t cache_size, 1848 1844 sector_t origin_size, 1849 1845 sector_t cache_block_size) 1850 1846 { 1851 - return __smq_create(cache_size, origin_size, cache_block_size, true, true); 1847 + return __smq_create(cache_size, origin_size, cache_block_size, 1848 + true, true, false); 1852 1849 } 1853 1850 1854 1851 static struct dm_cache_policy *cleaner_create(dm_cblock_t cache_size, 1855 1852 sector_t origin_size, 1856 1853 sector_t cache_block_size) 1857 1854 { 1858 - return __smq_create(cache_size, origin_size, cache_block_size, false, false); 1855 + return __smq_create(cache_size, origin_size, cache_block_size, 1856 + false, false, true); 1859 1857 } 1860 1858 1861 1859 /*----------------------------------------------------------------*/
+1
drivers/md/dm-integrity.c
··· 2676 2676 recalc_tags = kvmalloc(recalc_tags_size, GFP_NOIO); 2677 2677 if (!recalc_tags) { 2678 2678 vfree(recalc_buffer); 2679 + recalc_buffer = NULL; 2679 2680 goto oom; 2680 2681 } 2681 2682
+9 -11
drivers/md/dm-raid.c
··· 3251 3251 r = md_start(&rs->md); 3252 3252 if (r) { 3253 3253 ti->error = "Failed to start raid array"; 3254 - mddev_unlock(&rs->md); 3255 - goto bad_md_start; 3254 + goto bad_unlock; 3256 3255 } 3257 3256 3258 3257 /* If raid4/5/6 journal mode explicitly requested (only possible with journal dev) -> set it */ ··· 3259 3260 r = r5c_journal_mode_set(&rs->md, rs->journal_dev.mode); 3260 3261 if (r) { 3261 3262 ti->error = "Failed to set raid4/5/6 journal mode"; 3262 - mddev_unlock(&rs->md); 3263 - goto bad_journal_mode_set; 3263 + goto bad_unlock; 3264 3264 } 3265 3265 } 3266 3266 ··· 3270 3272 if (rs_is_raid456(rs)) { 3271 3273 r = rs_set_raid456_stripe_cache(rs); 3272 3274 if (r) 3273 - goto bad_stripe_cache; 3275 + goto bad_unlock; 3274 3276 } 3275 3277 3276 3278 /* Now do an early reshape check */ 3277 3279 if (test_bit(RT_FLAG_RESHAPE_RS, &rs->runtime_flags)) { 3278 3280 r = rs_check_reshape(rs); 3279 3281 if (r) 3280 - goto bad_check_reshape; 3282 + goto bad_unlock; 3281 3283 3282 3284 /* Restore new, ctr requested layout to perform check */ 3283 3285 rs_config_restore(rs, &rs_layout); ··· 3286 3288 r = rs->md.pers->check_reshape(&rs->md); 3287 3289 if (r) { 3288 3290 ti->error = "Reshape check failed"; 3289 - goto bad_check_reshape; 3291 + goto bad_unlock; 3290 3292 } 3291 3293 } 3292 3294 } ··· 3297 3299 mddev_unlock(&rs->md); 3298 3300 return 0; 3299 3301 3300 - bad_md_start: 3301 - bad_journal_mode_set: 3302 - bad_stripe_cache: 3303 - bad_check_reshape: 3302 + bad_unlock: 3304 3303 md_stop(&rs->md); 3304 + mddev_unlock(&rs->md); 3305 3305 bad: 3306 3306 raid_set_free(rs); 3307 3307 ··· 3310 3314 { 3311 3315 struct raid_set *rs = ti->private; 3312 3316 3317 + mddev_lock_nointr(&rs->md); 3313 3318 md_stop(&rs->md); 3319 + mddev_unlock(&rs->md); 3314 3320 raid_set_free(rs); 3315 3321 } 3316 3322
+2
drivers/md/md.c
··· 6247 6247 6248 6248 void md_stop(struct mddev *mddev) 6249 6249 { 6250 + lockdep_assert_held(&mddev->reconfig_mutex); 6251 + 6250 6252 /* stop the array and free an attached data structures. 6251 6253 * This is called from dm-raid 6252 6254 */
+5 -2
drivers/media/cec/usb/pulse8/pulse8-cec.c
··· 809 809 810 810 mutex_lock(&pulse8->lock); 811 811 cmd = MSGCODE_PING; 812 - pulse8_send_and_wait(pulse8, &cmd, 1, 813 - MSGCODE_COMMAND_ACCEPTED, 0); 812 + if (pulse8_send_and_wait(pulse8, &cmd, 1, 813 + MSGCODE_COMMAND_ACCEPTED, 0)) { 814 + dev_warn(pulse8->dev, "failed to ping EEPROM\n"); 815 + goto unlock; 816 + } 814 817 815 818 if (pulse8->vers < 2) 816 819 goto unlock;
+2 -2
drivers/media/i2c/tc358746.c
··· 813 813 u32 min_delta = 0xffffffff; 814 814 u16 prediv_max = 17; 815 815 u16 prediv_min = 1; 816 - u16 m_best, mul; 817 - u16 p_best, p; 816 + u16 m_best = 0, mul; 817 + u16 p_best = 1, p; 818 818 u8 postdiv; 819 819 820 820 if (fout > 1000 * HZ_PER_MHZ) {
-12
drivers/media/pci/cx23885/cx23885-dvb.c
··· 2459 2459 request_module("%s", info.type); 2460 2460 client_tuner = i2c_new_client_device(&dev->i2c_bus[1].i2c_adap, &info); 2461 2461 if (!i2c_client_has_driver(client_tuner)) { 2462 - module_put(client_demod->dev.driver->owner); 2463 - i2c_unregister_device(client_demod); 2464 - port->i2c_client_demod = NULL; 2465 2462 goto frontend_detach; 2466 2463 } 2467 2464 if (!try_module_get(client_tuner->dev.driver->owner)) { 2468 2465 i2c_unregister_device(client_tuner); 2469 - module_put(client_demod->dev.driver->owner); 2470 - i2c_unregister_device(client_demod); 2471 - port->i2c_client_demod = NULL; 2472 2466 goto frontend_detach; 2473 2467 } 2474 2468 port->i2c_client_tuner = client_tuner; ··· 2499 2505 request_module("%s", info.type); 2500 2506 client_tuner = i2c_new_client_device(&dev->i2c_bus[1].i2c_adap, &info); 2501 2507 if (!i2c_client_has_driver(client_tuner)) { 2502 - module_put(client_demod->dev.driver->owner); 2503 - i2c_unregister_device(client_demod); 2504 - port->i2c_client_demod = NULL; 2505 2508 goto frontend_detach; 2506 2509 } 2507 2510 if (!try_module_get(client_tuner->dev.driver->owner)) { 2508 2511 i2c_unregister_device(client_tuner); 2509 - module_put(client_demod->dev.driver->owner); 2510 - i2c_unregister_device(client_demod); 2511 - port->i2c_client_demod = NULL; 2512 2512 goto frontend_detach; 2513 2513 } 2514 2514 port->i2c_client_tuner = client_tuner;
+2 -2
drivers/media/platform/amphion/vpu_core.c
··· 826 826 827 827 static struct vpu_core_resources imx8q_enc = { 828 828 .type = VPU_CORE_TYPE_ENC, 829 - .fwname = "vpu/vpu_fw_imx8_enc.bin", 829 + .fwname = "amphion/vpu/vpu_fw_imx8_enc.bin", 830 830 .stride = 16, 831 831 .max_width = 1920, 832 832 .max_height = 1920, ··· 841 841 842 842 static struct vpu_core_resources imx8q_dec = { 843 843 .type = VPU_CORE_TYPE_DEC, 844 - .fwname = "vpu/vpu_fw_imx8_dec.bin", 844 + .fwname = "amphion/vpu/vpu_fw_imx8_dec.bin", 845 845 .stride = 256, 846 846 .max_width = 8188, 847 847 .max_height = 8188,
+4 -5
drivers/media/platform/amphion/vpu_mbox.c
··· 46 46 cl->rx_callback = vpu_mbox_rx_callback; 47 47 48 48 ch = mbox_request_channel_byname(cl, mbox->name); 49 - if (IS_ERR(ch)) { 50 - dev_err(dev, "Failed to request mbox chan %s, ret : %ld\n", 51 - mbox->name, PTR_ERR(ch)); 52 - return PTR_ERR(ch); 53 - } 49 + if (IS_ERR(ch)) 50 + return dev_err_probe(dev, PTR_ERR(ch), 51 + "Failed to request mbox chan %s\n", 52 + mbox->name); 54 53 55 54 mbox->ch = ch; 56 55 return 0;
+1 -5
drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.c
··· 28 28 #include "mtk_jpeg_core.h" 29 29 #include "mtk_jpeg_dec_parse.h" 30 30 31 - #if defined(CONFIG_OF) 32 31 static struct mtk_jpeg_fmt mtk_jpeg_enc_formats[] = { 33 32 { 34 33 .fourcc = V4L2_PIX_FMT_JPEG, ··· 101 102 .flags = MTK_JPEG_FMT_FLAG_CAPTURE, 102 103 }, 103 104 }; 104 - #endif 105 105 106 106 #define MTK_JPEG_ENC_NUM_FORMATS ARRAY_SIZE(mtk_jpeg_enc_formats) 107 107 #define MTK_JPEG_DEC_NUM_FORMATS ARRAY_SIZE(mtk_jpeg_dec_formats) ··· 1453 1455 SET_RUNTIME_PM_OPS(mtk_jpeg_pm_suspend, mtk_jpeg_pm_resume, NULL) 1454 1456 }; 1455 1457 1456 - #if defined(CONFIG_OF) 1457 1458 static int mtk_jpegenc_get_hw(struct mtk_jpeg_ctx *ctx) 1458 1459 { 1459 1460 struct mtk_jpegenc_comp_dev *comp_jpeg; ··· 1948 1951 }; 1949 1952 1950 1953 MODULE_DEVICE_TABLE(of, mtk_jpeg_match); 1951 - #endif 1952 1954 1953 1955 static struct platform_driver mtk_jpeg_driver = { 1954 1956 .probe = mtk_jpeg_probe, 1955 1957 .remove_new = mtk_jpeg_remove, 1956 1958 .driver = { 1957 1959 .name = MTK_JPEG_NAME, 1958 - .of_match_table = of_match_ptr(mtk_jpeg_match), 1960 + .of_match_table = mtk_jpeg_match, 1959 1961 .pm = &mtk_jpeg_pm_ops, 1960 1962 }, 1961 1963 };
+1 -3
drivers/media/platform/mediatek/jpeg/mtk_jpeg_dec_hw.c
··· 39 39 MTK_JPEG_COLOR_400 = 0x00110000 40 40 }; 41 41 42 - #if defined(CONFIG_OF) 43 42 static const struct of_device_id mtk_jpegdec_hw_ids[] = { 44 43 { 45 44 .compatible = "mediatek,mt8195-jpgdec-hw", ··· 46 47 {}, 47 48 }; 48 49 MODULE_DEVICE_TABLE(of, mtk_jpegdec_hw_ids); 49 - #endif 50 50 51 51 static inline int mtk_jpeg_verify_align(u32 val, int align, u32 reg) 52 52 { ··· 651 653 .probe = mtk_jpegdec_hw_probe, 652 654 .driver = { 653 655 .name = "mtk-jpegdec-hw", 654 - .of_match_table = of_match_ptr(mtk_jpegdec_hw_ids), 656 + .of_match_table = mtk_jpegdec_hw_ids, 655 657 }, 656 658 }; 657 659
+1 -3
drivers/media/platform/mediatek/jpeg/mtk_jpeg_enc_hw.c
··· 46 46 {.quality_param = 97, .hardware_value = JPEG_ENC_QUALITY_Q97}, 47 47 }; 48 48 49 - #if defined(CONFIG_OF) 50 49 static const struct of_device_id mtk_jpegenc_drv_ids[] = { 51 50 { 52 51 .compatible = "mediatek,mt8195-jpgenc-hw", ··· 53 54 {}, 54 55 }; 55 56 MODULE_DEVICE_TABLE(of, mtk_jpegenc_drv_ids); 56 - #endif 57 57 58 58 void mtk_jpeg_enc_reset(void __iomem *base) 59 59 { ··· 375 377 .probe = mtk_jpegenc_hw_probe, 376 378 .driver = { 377 379 .name = "mtk-jpegenc-hw", 378 - .of_match_table = of_match_ptr(mtk_jpegenc_drv_ids), 380 + .of_match_table = mtk_jpegenc_drv_ids, 379 381 }, 380 382 }; 381 383
+2 -1
drivers/media/platform/mediatek/vcodec/vdec_msg_queue.c
··· 233 233 kfree(lat_buf->private_data); 234 234 } 235 235 236 - cancel_work_sync(&msg_queue->core_work); 236 + if (msg_queue->wdma_addr.size) 237 + cancel_work_sync(&msg_queue->core_work); 237 238 } 238 239 239 240 static void vdec_msg_queue_core_work(struct work_struct *work)
-1
drivers/media/platform/nxp/imx-jpeg/mxc-jpeg-hw.h
··· 58 58 #define CAST_OFBSIZE_LO CAST_STATUS18 59 59 #define CAST_OFBSIZE_HI CAST_STATUS19 60 60 61 - #define MXC_MAX_SLOTS 1 /* TODO use all 4 slots*/ 62 61 /* JPEG-Decoder Wrapper Slot Registers 0..3 */ 63 62 #define SLOT_BASE 0x10000 64 63 #define SLOT_STATUS 0x0
+65 -70
drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c
··· 745 745 v4l2_event_queue_fh(&ctx->fh, &ev); 746 746 } 747 747 748 - static int mxc_get_free_slot(struct mxc_jpeg_slot_data slot_data[], int n) 748 + static int mxc_get_free_slot(struct mxc_jpeg_slot_data *slot_data) 749 749 { 750 - int free_slot = 0; 751 - 752 - while (slot_data[free_slot].used && free_slot < n) 753 - free_slot++; 754 - 755 - return free_slot; /* >=n when there are no more free slots */ 750 + if (!slot_data->used) 751 + return slot_data->slot; 752 + return -1; 756 753 } 757 754 758 - static bool mxc_jpeg_alloc_slot_data(struct mxc_jpeg_dev *jpeg, 759 - unsigned int slot) 755 + static bool mxc_jpeg_alloc_slot_data(struct mxc_jpeg_dev *jpeg) 760 756 { 761 757 struct mxc_jpeg_desc *desc; 762 758 struct mxc_jpeg_desc *cfg_desc; 763 759 void *cfg_stm; 764 760 765 - if (jpeg->slot_data[slot].desc) 761 + if (jpeg->slot_data.desc) 766 762 goto skip_alloc; /* already allocated, reuse it */ 767 763 768 764 /* allocate descriptor for decoding/encoding phase */ 769 765 desc = dma_alloc_coherent(jpeg->dev, 770 766 sizeof(struct mxc_jpeg_desc), 771 - &jpeg->slot_data[slot].desc_handle, 767 + &jpeg->slot_data.desc_handle, 772 768 GFP_ATOMIC); 773 769 if (!desc) 774 770 goto err; 775 - jpeg->slot_data[slot].desc = desc; 771 + jpeg->slot_data.desc = desc; 776 772 777 773 /* allocate descriptor for configuration phase (encoder only) */ 778 774 cfg_desc = dma_alloc_coherent(jpeg->dev, 779 775 sizeof(struct mxc_jpeg_desc), 780 - &jpeg->slot_data[slot].cfg_desc_handle, 776 + &jpeg->slot_data.cfg_desc_handle, 781 777 GFP_ATOMIC); 782 778 if (!cfg_desc) 783 779 goto err; 784 - jpeg->slot_data[slot].cfg_desc = cfg_desc; 780 + jpeg->slot_data.cfg_desc = cfg_desc; 785 781 786 782 /* allocate configuration stream */ 787 783 cfg_stm = dma_alloc_coherent(jpeg->dev, 788 784 MXC_JPEG_MAX_CFG_STREAM, 789 - &jpeg->slot_data[slot].cfg_stream_handle, 785 + &jpeg->slot_data.cfg_stream_handle, 790 786 GFP_ATOMIC); 791 787 if (!cfg_stm) 792 788 goto err; 793 - jpeg->slot_data[slot].cfg_stream_vaddr = cfg_stm; 789 + jpeg->slot_data.cfg_stream_vaddr = cfg_stm; 794 790 795 791 skip_alloc: 796 - jpeg->slot_data[slot].used = true; 792 + jpeg->slot_data.used = true; 797 793 798 794 return true; 799 795 err: 800 - dev_err(jpeg->dev, "Could not allocate descriptors for slot %d", slot); 796 + dev_err(jpeg->dev, "Could not allocate descriptors for slot %d", jpeg->slot_data.slot); 801 797 802 798 return false; 803 799 } 804 800 805 - static void mxc_jpeg_free_slot_data(struct mxc_jpeg_dev *jpeg, 806 - unsigned int slot) 801 + static void mxc_jpeg_free_slot_data(struct mxc_jpeg_dev *jpeg) 807 802 { 808 - if (slot >= MXC_MAX_SLOTS) { 809 - dev_err(jpeg->dev, "Invalid slot %d, nothing to free.", slot); 810 - return; 811 - } 812 - 813 803 /* free descriptor for decoding/encoding phase */ 814 804 dma_free_coherent(jpeg->dev, sizeof(struct mxc_jpeg_desc), 815 - jpeg->slot_data[slot].desc, 816 - jpeg->slot_data[slot].desc_handle); 805 + jpeg->slot_data.desc, 806 + jpeg->slot_data.desc_handle); 817 807 818 808 /* free descriptor for encoder configuration phase / decoder DHT */ 819 809 dma_free_coherent(jpeg->dev, sizeof(struct mxc_jpeg_desc), 820 - jpeg->slot_data[slot].cfg_desc, 821 - jpeg->slot_data[slot].cfg_desc_handle); 810 + jpeg->slot_data.cfg_desc, 811 + jpeg->slot_data.cfg_desc_handle); 822 812 823 813 /* free configuration stream */ 824 814 dma_free_coherent(jpeg->dev, MXC_JPEG_MAX_CFG_STREAM, 825 - jpeg->slot_data[slot].cfg_stream_vaddr, 826 - jpeg->slot_data[slot].cfg_stream_handle); 815 + jpeg->slot_data.cfg_stream_vaddr, 816 + jpeg->slot_data.cfg_stream_handle); 827 817 828 - jpeg->slot_data[slot].used = false; 818 + jpeg->slot_data.used = false; 829 819 } 830 820 831 821 static void mxc_jpeg_check_and_set_last_buffer(struct mxc_jpeg_ctx *ctx, ··· 845 855 v4l2_m2m_buf_done(dst_buf, state); 846 856 847 857 mxc_jpeg_disable_irq(reg, ctx->slot); 848 - ctx->mxc_jpeg->slot_data[ctx->slot].used = false; 858 + jpeg->slot_data.used = false; 849 859 if (reset) 850 860 mxc_jpeg_sw_reset(reg); 851 861 } ··· 909 919 goto job_unlock; 910 920 } 911 921 912 - if (!jpeg->slot_data[slot].used) 922 + if (!jpeg->slot_data.used) 913 923 goto job_unlock; 914 924 915 925 dec_ret = readl(reg + MXC_SLOT_OFFSET(slot, SLOT_STATUS)); ··· 1169 1179 struct mxc_jpeg_dev *jpeg = ctx->mxc_jpeg; 1170 1180 void __iomem *reg = jpeg->base_reg; 1171 1181 unsigned int slot = ctx->slot; 1172 - struct mxc_jpeg_desc *desc = jpeg->slot_data[slot].desc; 1173 - struct mxc_jpeg_desc *cfg_desc = jpeg->slot_data[slot].cfg_desc; 1174 - dma_addr_t desc_handle = jpeg->slot_data[slot].desc_handle; 1175 - dma_addr_t cfg_desc_handle = jpeg->slot_data[slot].cfg_desc_handle; 1176 - dma_addr_t cfg_stream_handle = jpeg->slot_data[slot].cfg_stream_handle; 1177 - unsigned int *cfg_size = &jpeg->slot_data[slot].cfg_stream_size; 1178 - void *cfg_stream_vaddr = jpeg->slot_data[slot].cfg_stream_vaddr; 1182 + struct mxc_jpeg_desc *desc = jpeg->slot_data.desc; 1183 + struct mxc_jpeg_desc *cfg_desc = jpeg->slot_data.cfg_desc; 1184 + dma_addr_t desc_handle = jpeg->slot_data.desc_handle; 1185 + dma_addr_t cfg_desc_handle = jpeg->slot_data.cfg_desc_handle; 1186 + dma_addr_t cfg_stream_handle = jpeg->slot_data.cfg_stream_handle; 1187 + unsigned int *cfg_size = &jpeg->slot_data.cfg_stream_size; 1188 + void *cfg_stream_vaddr = jpeg->slot_data.cfg_stream_vaddr; 1179 1189 struct mxc_jpeg_src_buf *jpeg_src_buf; 1180 1190 1181 1191 jpeg_src_buf = vb2_to_mxc_buf(src_buf); ··· 1235 1245 struct mxc_jpeg_dev *jpeg = ctx->mxc_jpeg; 1236 1246 void __iomem *reg = jpeg->base_reg; 1237 1247 unsigned int slot = ctx->slot; 1238 - struct mxc_jpeg_desc *desc = jpeg->slot_data[slot].desc; 1239 - struct mxc_jpeg_desc *cfg_desc = jpeg->slot_data[slot].cfg_desc; 1240 - dma_addr_t desc_handle = jpeg->slot_data[slot].desc_handle; 1241 - dma_addr_t cfg_desc_handle = jpeg->slot_data[slot].cfg_desc_handle; 1242 - void *cfg_stream_vaddr = jpeg->slot_data[slot].cfg_stream_vaddr; 1248 + struct mxc_jpeg_desc *desc = jpeg->slot_data.desc; 1249 + struct mxc_jpeg_desc *cfg_desc = jpeg->slot_data.cfg_desc; 1250 + dma_addr_t desc_handle = jpeg->slot_data.desc_handle; 1251 + dma_addr_t cfg_desc_handle = jpeg->slot_data.cfg_desc_handle; 1252 + void *cfg_stream_vaddr = jpeg->slot_data.cfg_stream_vaddr; 1243 1253 struct mxc_jpeg_q_data *q_data; 1244 1254 enum mxc_jpeg_image_format img_fmt; 1245 1255 int w, h; 1246 1256 1247 1257 q_data = mxc_jpeg_get_q_data(ctx, src_buf->vb2_queue->type); 1248 1258 1249 - jpeg->slot_data[slot].cfg_stream_size = 1259 + jpeg->slot_data.cfg_stream_size = 1250 1260 mxc_jpeg_setup_cfg_stream(cfg_stream_vaddr, 1251 1261 q_data->fmt->fourcc, 1252 1262 q_data->crop.width, ··· 1255 1265 /* chain the config descriptor with the encoding descriptor */ 1256 1266 cfg_desc->next_descpt_ptr = desc_handle | MXC_NXT_DESCPT_EN; 1257 1267 1258 - cfg_desc->buf_base0 = jpeg->slot_data[slot].cfg_stream_handle; 1268 + cfg_desc->buf_base0 = jpeg->slot_data.cfg_stream_handle; 1259 1269 cfg_desc->buf_base1 = 0; 1260 1270 cfg_desc->line_pitch = 0; 1261 1271 cfg_desc->stm_bufbase = 0; /* no output expected */ ··· 1398 1408 unsigned long flags; 1399 1409 1400 1410 spin_lock_irqsave(&ctx->mxc_jpeg->hw_lock, flags); 1401 - if (ctx->slot < MXC_MAX_SLOTS && ctx->mxc_jpeg->slot_data[ctx->slot].used) { 1411 + if (ctx->mxc_jpeg->slot_data.used) { 1402 1412 dev_warn(jpeg->dev, "%s timeout, cancel it\n", 1403 1413 ctx->mxc_jpeg->mode == MXC_JPEG_DECODE ? "decode" : "encode"); 1404 1414 mxc_jpeg_job_finish(ctx, VB2_BUF_STATE_ERROR, true); ··· 1466 1476 mxc_jpeg_enable(reg); 1467 1477 mxc_jpeg_set_l_endian(reg, 1); 1468 1478 1469 - ctx->slot = mxc_get_free_slot(jpeg->slot_data, MXC_MAX_SLOTS); 1470 - if (ctx->slot >= MXC_MAX_SLOTS) { 1479 + ctx->slot = mxc_get_free_slot(&jpeg->slot_data); 1480 + if (ctx->slot < 0) { 1471 1481 dev_err(dev, "No more free slots\n"); 1472 1482 goto end; 1473 1483 } 1474 - if (!mxc_jpeg_alloc_slot_data(jpeg, ctx->slot)) { 1484 + if (!mxc_jpeg_alloc_slot_data(jpeg)) { 1475 1485 dev_err(dev, "Cannot allocate slot data\n"); 1476 1486 goto end; 1477 1487 } ··· 2091 2101 } 2092 2102 ctx->fh.ctrl_handler = &ctx->ctrl_handler; 2093 2103 mxc_jpeg_set_default_params(ctx); 2094 - ctx->slot = MXC_MAX_SLOTS; /* slot not allocated yet */ 2104 + ctx->slot = -1; /* slot not allocated yet */ 2095 2105 INIT_DELAYED_WORK(&ctx->task_timer, mxc_jpeg_device_run_timeout); 2096 2106 2097 2107 if (mxc_jpeg->mode == MXC_JPEG_DECODE) ··· 2667 2677 dev_err(dev, "No power domains defined for jpeg node\n"); 2668 2678 return jpeg->num_domains; 2669 2679 } 2680 + if (jpeg->num_domains == 1) { 2681 + /* genpd_dev_pm_attach() attach automatically if power domains count is 1 */ 2682 + jpeg->num_domains = 0; 2683 + return 0; 2684 + } 2670 2685 2671 2686 jpeg->pd_dev = devm_kmalloc_array(dev, jpeg->num_domains, 2672 2687 sizeof(*jpeg->pd_dev), GFP_KERNEL); ··· 2713 2718 int ret; 2714 2719 int mode; 2715 2720 const struct of_device_id *of_id; 2716 - unsigned int slot; 2717 2721 2718 2722 of_id = of_match_node(mxc_jpeg_match, dev->of_node); 2719 2723 if (!of_id) ··· 2736 2742 if (IS_ERR(jpeg->base_reg)) 2737 2743 return PTR_ERR(jpeg->base_reg); 2738 2744 2739 - for (slot = 0; slot < MXC_MAX_SLOTS; slot++) { 2740 - dec_irq = platform_get_irq(pdev, slot); 2741 - if (dec_irq < 0) { 2742 - ret = dec_irq; 2743 - goto err_irq; 2744 - } 2745 - ret = devm_request_irq(&pdev->dev, dec_irq, mxc_jpeg_dec_irq, 2746 - 0, pdev->name, jpeg); 2747 - if (ret) { 2748 - dev_err(&pdev->dev, "Failed to request irq %d (%d)\n", 2749 - dec_irq, ret); 2750 - goto err_irq; 2751 - } 2745 + ret = of_property_read_u32_index(pdev->dev.of_node, "slot", 0, &jpeg->slot_data.slot); 2746 + if (ret) 2747 + jpeg->slot_data.slot = 0; 2748 + dev_info(&pdev->dev, "choose slot %d\n", jpeg->slot_data.slot); 2749 + dec_irq = platform_get_irq(pdev, 0); 2750 + if (dec_irq < 0) { 2751 + dev_err(&pdev->dev, "Failed to get irq %d\n", dec_irq); 2752 + ret = dec_irq; 2753 + goto err_irq; 2754 + } 2755 + ret = devm_request_irq(&pdev->dev, dec_irq, mxc_jpeg_dec_irq, 2756 + 0, pdev->name, jpeg); 2757 + if (ret) { 2758 + dev_err(&pdev->dev, "Failed to request irq %d (%d)\n", 2759 + dec_irq, ret); 2760 + goto err_irq; 2752 2761 } 2753 2762 2754 2763 jpeg->pdev = pdev; ··· 2911 2914 2912 2915 static void mxc_jpeg_remove(struct platform_device *pdev) 2913 2916 { 2914 - unsigned int slot; 2915 2917 struct mxc_jpeg_dev *jpeg = platform_get_drvdata(pdev); 2916 2918 2917 - for (slot = 0; slot < MXC_MAX_SLOTS; slot++) 2918 - mxc_jpeg_free_slot_data(jpeg, slot); 2919 + mxc_jpeg_free_slot_data(jpeg); 2919 2920 2920 2921 pm_runtime_disable(&pdev->dev); 2921 2922 video_unregister_device(jpeg->dec_vdev);
+3 -2
drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.h
··· 97 97 struct mxc_jpeg_q_data cap_q; 98 98 struct v4l2_fh fh; 99 99 enum mxc_jpeg_enc_state enc_state; 100 - unsigned int slot; 100 + int slot; 101 101 unsigned int source_change; 102 102 bool header_parsed; 103 103 struct v4l2_ctrl_handler ctrl_handler; ··· 106 106 }; 107 107 108 108 struct mxc_jpeg_slot_data { 109 + int slot; 109 110 bool used; 110 111 struct mxc_jpeg_desc *desc; // enc/dec descriptor 111 112 struct mxc_jpeg_desc *cfg_desc; // configuration descriptor ··· 129 128 struct v4l2_device v4l2_dev; 130 129 struct v4l2_m2m_dev *m2m_dev; 131 130 struct video_device *dec_vdev; 132 - struct mxc_jpeg_slot_data slot_data[MXC_MAX_SLOTS]; 131 + struct mxc_jpeg_slot_data slot_data; 133 132 int num_domains; 134 133 struct device **pd_dev; 135 134 struct device_link **pd_link;
+25 -25
drivers/media/platform/verisilicon/hantro.h
··· 370 370 pr_err("%s:%d: " fmt, __func__, __LINE__, ##args) 371 371 372 372 /* Structure access helpers. */ 373 - static inline struct hantro_ctx *fh_to_ctx(struct v4l2_fh *fh) 373 + static __always_inline struct hantro_ctx *fh_to_ctx(struct v4l2_fh *fh) 374 374 { 375 375 return container_of(fh, struct hantro_ctx, fh); 376 376 } 377 377 378 378 /* Register accessors. */ 379 - static inline void vepu_write_relaxed(struct hantro_dev *vpu, 380 - u32 val, u32 reg) 379 + static __always_inline void vepu_write_relaxed(struct hantro_dev *vpu, 380 + u32 val, u32 reg) 381 381 { 382 382 vpu_debug(6, "0x%04x = 0x%08x\n", reg / 4, val); 383 383 writel_relaxed(val, vpu->enc_base + reg); 384 384 } 385 385 386 - static inline void vepu_write(struct hantro_dev *vpu, u32 val, u32 reg) 386 + static __always_inline void vepu_write(struct hantro_dev *vpu, u32 val, u32 reg) 387 387 { 388 388 vpu_debug(6, "0x%04x = 0x%08x\n", reg / 4, val); 389 389 writel(val, vpu->enc_base + reg); 390 390 } 391 391 392 - static inline u32 vepu_read(struct hantro_dev *vpu, u32 reg) 392 + static __always_inline u32 vepu_read(struct hantro_dev *vpu, u32 reg) 393 393 { 394 394 u32 val = readl(vpu->enc_base + reg); 395 395 ··· 397 397 return val; 398 398 } 399 399 400 - static inline void vdpu_write_relaxed(struct hantro_dev *vpu, 401 - u32 val, u32 reg) 400 + static __always_inline void vdpu_write_relaxed(struct hantro_dev *vpu, 401 + u32 val, u32 reg) 402 402 { 403 403 vpu_debug(6, "0x%04x = 0x%08x\n", reg / 4, val); 404 404 writel_relaxed(val, vpu->dec_base + reg); 405 405 } 406 406 407 - static inline void vdpu_write(struct hantro_dev *vpu, u32 val, u32 reg) 407 + static __always_inline void vdpu_write(struct hantro_dev *vpu, u32 val, u32 reg) 408 408 { 409 409 vpu_debug(6, "0x%04x = 0x%08x\n", reg / 4, val); 410 410 writel(val, vpu->dec_base + reg); 411 411 } 412 412 413 - static inline void hantro_write_addr(struct hantro_dev *vpu, 414 - unsigned long offset, 415 - dma_addr_t addr) 413 + static __always_inline void hantro_write_addr(struct hantro_dev *vpu, 414 + unsigned long offset, 415 + dma_addr_t addr) 416 416 { 417 417 vdpu_write(vpu, addr & 0xffffffff, offset); 418 418 } 419 419 420 - static inline u32 vdpu_read(struct hantro_dev *vpu, u32 reg) 420 + static __always_inline u32 vdpu_read(struct hantro_dev *vpu, u32 reg) 421 421 { 422 422 u32 val = readl(vpu->dec_base + reg); 423 423 ··· 425 425 return val; 426 426 } 427 427 428 - static inline u32 vdpu_read_mask(struct hantro_dev *vpu, 429 - const struct hantro_reg *reg, 430 - u32 val) 428 + static __always_inline u32 vdpu_read_mask(struct hantro_dev *vpu, 429 + const struct hantro_reg *reg, 430 + u32 val) 431 431 { 432 432 u32 v; 433 433 ··· 437 437 return v; 438 438 } 439 439 440 - static inline void hantro_reg_write(struct hantro_dev *vpu, 441 - const struct hantro_reg *reg, 442 - u32 val) 443 - { 444 - vdpu_write_relaxed(vpu, vdpu_read_mask(vpu, reg, val), reg->base); 445 - } 446 - 447 - static inline void hantro_reg_write_s(struct hantro_dev *vpu, 448 - const struct hantro_reg *reg, 449 - u32 val) 440 + static __always_inline void hantro_reg_write(struct hantro_dev *vpu, 441 + const struct hantro_reg *reg, 442 + u32 val) 450 443 { 451 444 vdpu_write(vpu, vdpu_read_mask(vpu, reg, val), reg->base); 445 + } 446 + 447 + static __always_inline void hantro_reg_write_relaxed(struct hantro_dev *vpu, 448 + const struct hantro_reg *reg, 449 + u32 val) 450 + { 451 + vdpu_write_relaxed(vpu, vdpu_read_mask(vpu, reg, val), reg->base); 452 452 } 453 453 454 454 void *hantro_get_ctrl(struct hantro_ctx *ctx, u32 id);
+6 -6
drivers/media/platform/verisilicon/hantro_postproc.c
··· 21 21 val); \ 22 22 } 23 23 24 - #define HANTRO_PP_REG_WRITE_S(vpu, reg_name, val) \ 24 + #define HANTRO_PP_REG_WRITE_RELAXED(vpu, reg_name, val) \ 25 25 { \ 26 - hantro_reg_write_s(vpu, \ 27 - &hantro_g1_postproc_regs.reg_name, \ 28 - val); \ 26 + hantro_reg_write_relaxed(vpu, \ 27 + &hantro_g1_postproc_regs.reg_name, \ 28 + val); \ 29 29 } 30 30 31 31 #define VPU_PP_IN_YUYV 0x0 ··· 72 72 dma_addr_t dst_dma; 73 73 74 74 /* Turn on pipeline mode. Must be done first. */ 75 - HANTRO_PP_REG_WRITE_S(vpu, pipeline_en, 0x1); 75 + HANTRO_PP_REG_WRITE(vpu, pipeline_en, 0x1); 76 76 77 77 src_pp_fmt = VPU_PP_IN_NV12; 78 78 ··· 242 242 { 243 243 struct hantro_dev *vpu = ctx->dev; 244 244 245 - HANTRO_PP_REG_WRITE_S(vpu, pipeline_en, 0x0); 245 + HANTRO_PP_REG_WRITE(vpu, pipeline_en, 0x0); 246 246 } 247 247 248 248 static void hantro_postproc_g2_disable(struct hantro_ctx *ctx)
+1 -1
drivers/misc/sram.c
··· 236 236 } 237 237 if (!label) 238 238 block->label = devm_kasprintf(sram->dev, GFP_KERNEL, 239 - "%s", dev_name(sram->dev)); 239 + "%s", of_node_full_name(child)); 240 240 else 241 241 block->label = devm_kstrdup(sram->dev, 242 242 label, GFP_KERNEL);
+5
drivers/net/bonding/bond_main.c
··· 1508 1508 1509 1509 memcpy(bond_dev->broadcast, slave_dev->broadcast, 1510 1510 slave_dev->addr_len); 1511 + 1512 + if (slave_dev->flags & IFF_POINTOPOINT) { 1513 + bond_dev->flags &= ~(IFF_BROADCAST | IFF_MULTICAST); 1514 + bond_dev->flags |= (IFF_POINTOPOINT | IFF_NOARP); 1515 + } 1511 1516 } 1512 1517 1513 1518 /* On bonding slaves other than the currently active slave, suppress
+2
drivers/net/can/usb/gs_usb.c
··· 1030 1030 usb_kill_anchored_urbs(&dev->tx_submitted); 1031 1031 atomic_set(&dev->active_tx_urbs, 0); 1032 1032 1033 + dev->can.state = CAN_STATE_STOPPED; 1034 + 1033 1035 /* reset the device */ 1034 1036 rc = gs_cmd_reset(dev); 1035 1037 if (rc < 0)
+5 -2
drivers/net/dsa/qca/qca8k-8xxx.c
··· 576 576 .rd_table = &qca8k_readable_table, 577 577 .disable_locking = true, /* Locking is handled by qca8k read/write */ 578 578 .cache_type = REGCACHE_NONE, /* Explicitly disable CACHE */ 579 - .max_raw_read = 32, /* mgmt eth can read/write up to 8 registers at time */ 580 - .max_raw_write = 32, 579 + .max_raw_read = 32, /* mgmt eth can read up to 8 registers at time */ 580 + /* ATU regs suffer from a bug where some data are not correctly 581 + * written. Disable bulk write to correctly write ATU entry. 582 + */ 583 + .use_single_write = true, 581 584 }; 582 585 583 586 static int
+16 -3
drivers/net/dsa/qca/qca8k-common.c
··· 244 244 } 245 245 246 246 static int qca8k_fdb_search_and_insert(struct qca8k_priv *priv, u8 port_mask, 247 - const u8 *mac, u16 vid) 247 + const u8 *mac, u16 vid, u8 aging) 248 248 { 249 249 struct qca8k_fdb fdb = { 0 }; 250 250 int ret; ··· 261 261 goto exit; 262 262 263 263 /* Rule exist. Delete first */ 264 - if (!fdb.aging) { 264 + if (fdb.aging) { 265 265 ret = qca8k_fdb_access(priv, QCA8K_FDB_PURGE, -1); 266 266 if (ret) 267 267 goto exit; 268 + } else { 269 + fdb.aging = aging; 268 270 } 269 271 270 272 /* Add port to fdb portmask */ ··· 290 288 291 289 qca8k_fdb_write(priv, vid, 0, mac, 0); 292 290 ret = qca8k_fdb_access(priv, QCA8K_FDB_SEARCH, -1); 291 + if (ret < 0) 292 + goto exit; 293 + 294 + ret = qca8k_fdb_read(priv, &fdb); 293 295 if (ret < 0) 294 296 goto exit; 295 297 ··· 816 810 const u8 *addr = mdb->addr; 817 811 u16 vid = mdb->vid; 818 812 819 - return qca8k_fdb_search_and_insert(priv, BIT(port), addr, vid); 813 + if (!vid) 814 + vid = QCA8K_PORT_VID_DEF; 815 + 816 + return qca8k_fdb_search_and_insert(priv, BIT(port), addr, vid, 817 + QCA8K_ATU_STATUS_STATIC); 820 818 } 821 819 822 820 int qca8k_port_mdb_del(struct dsa_switch *ds, int port, ··· 830 820 struct qca8k_priv *priv = ds->priv; 831 821 const u8 *addr = mdb->addr; 832 822 u16 vid = mdb->vid; 823 + 824 + if (!vid) 825 + vid = QCA8K_PORT_VID_DEF; 833 826 834 827 return qca8k_fdb_search_and_del(priv, BIT(port), addr, vid); 835 828 }
+5 -2
drivers/net/ethernet/atheros/atl1c/atl1c_main.c
··· 2094 2094 real_len = (((unsigned char *)ip_hdr(skb) - skb->data) 2095 2095 + ntohs(ip_hdr(skb)->tot_len)); 2096 2096 2097 - if (real_len < skb->len) 2098 - pskb_trim(skb, real_len); 2097 + if (real_len < skb->len) { 2098 + err = pskb_trim(skb, real_len); 2099 + if (err) 2100 + return err; 2101 + } 2099 2102 2100 2103 hdr_len = skb_tcp_all_headers(skb); 2101 2104 if (unlikely(skb->len == hdr_len)) {
+5 -2
drivers/net/ethernet/atheros/atl1e/atl1e_main.c
··· 1641 1641 real_len = (((unsigned char *)ip_hdr(skb) - skb->data) 1642 1642 + ntohs(ip_hdr(skb)->tot_len)); 1643 1643 1644 - if (real_len < skb->len) 1645 - pskb_trim(skb, real_len); 1644 + if (real_len < skb->len) { 1645 + err = pskb_trim(skb, real_len); 1646 + if (err) 1647 + return err; 1648 + } 1646 1649 1647 1650 hdr_len = skb_tcp_all_headers(skb); 1648 1651 if (unlikely(skb->len == hdr_len)) {
+5 -2
drivers/net/ethernet/atheros/atlx/atl1.c
··· 2113 2113 2114 2114 real_len = (((unsigned char *)iph - skb->data) + 2115 2115 ntohs(iph->tot_len)); 2116 - if (real_len < skb->len) 2117 - pskb_trim(skb, real_len); 2116 + if (real_len < skb->len) { 2117 + err = pskb_trim(skb, real_len); 2118 + if (err) 2119 + return err; 2120 + } 2118 2121 hdr_len = skb_tcp_all_headers(skb); 2119 2122 if (skb->len == hdr_len) { 2120 2123 iph->check = 0;
+2 -1
drivers/net/ethernet/emulex/benet/be_main.c
··· 1138 1138 (lancer_chip(adapter) || BE3_chip(adapter) || 1139 1139 skb_vlan_tag_present(skb)) && is_ipv4_pkt(skb)) { 1140 1140 ip = (struct iphdr *)ip_hdr(skb); 1141 - pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len)); 1141 + if (unlikely(pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len)))) 1142 + goto tx_drop; 1142 1143 } 1143 1144 1144 1145 /* If vlan tag is already inlined in the packet, skip HW VLAN
+14 -4
drivers/net/ethernet/freescale/fec_main.c
··· 1372 1372 } 1373 1373 1374 1374 static void 1375 - fec_enet_tx_queue(struct net_device *ndev, u16 queue_id) 1375 + fec_enet_tx_queue(struct net_device *ndev, u16 queue_id, int budget) 1376 1376 { 1377 1377 struct fec_enet_private *fep; 1378 1378 struct xdp_frame *xdpf; ··· 1416 1416 if (!skb) 1417 1417 goto tx_buf_done; 1418 1418 } else { 1419 + /* Tx processing cannot call any XDP (or page pool) APIs if 1420 + * the "budget" is 0. Because NAPI is called with budget of 1421 + * 0 (such as netpoll) indicates we may be in an IRQ context, 1422 + * however, we can't use the page pool from IRQ context. 1423 + */ 1424 + if (unlikely(!budget)) 1425 + break; 1426 + 1419 1427 xdpf = txq->tx_buf[index].xdp; 1420 1428 if (bdp->cbd_bufaddr) 1421 1429 dma_unmap_single(&fep->pdev->dev, ··· 1516 1508 writel(0, txq->bd.reg_desc_active); 1517 1509 } 1518 1510 1519 - static void fec_enet_tx(struct net_device *ndev) 1511 + static void fec_enet_tx(struct net_device *ndev, int budget) 1520 1512 { 1521 1513 struct fec_enet_private *fep = netdev_priv(ndev); 1522 1514 int i; 1523 1515 1524 1516 /* Make sure that AVB queues are processed first. */ 1525 1517 for (i = fep->num_tx_queues - 1; i >= 0; i--) 1526 - fec_enet_tx_queue(ndev, i); 1518 + fec_enet_tx_queue(ndev, i, budget); 1527 1519 } 1528 1520 1529 1521 static void fec_enet_update_cbd(struct fec_enet_priv_rx_q *rxq, ··· 1866 1858 1867 1859 do { 1868 1860 done += fec_enet_rx(ndev, budget - done); 1869 - fec_enet_tx(ndev); 1861 + fec_enet_tx(ndev, budget); 1870 1862 } while ((done < budget) && fec_enet_collect_events(fep)); 1871 1863 1872 1864 if (done < budget) { ··· 3924 3916 3925 3917 __netif_tx_lock(nq, cpu); 3926 3918 3919 + /* Avoid tx timeout as XDP shares the queue with kernel stack */ 3920 + txq_trans_cond_update(nq); 3927 3921 for (i = 0; i < num_frames; i++) { 3928 3922 if (fec_enet_txq_xmit_frame(fep, txq, frames[i]) < 0) 3929 3923 break;
+6 -1
drivers/net/ethernet/hisilicon/hns3/hnae3.h
··· 31 31 #include <linux/pci.h> 32 32 #include <linux/pkt_sched.h> 33 33 #include <linux/types.h> 34 + #include <linux/bitmap.h> 34 35 #include <net/pkt_cls.h> 35 36 #include <net/pkt_sched.h> 36 37 ··· 102 101 HNAE3_DEV_SUPPORT_FEC_STATS_B, 103 102 HNAE3_DEV_SUPPORT_LANE_NUM_B, 104 103 HNAE3_DEV_SUPPORT_WOL_B, 104 + HNAE3_DEV_SUPPORT_TM_FLUSH_B, 105 105 }; 106 106 107 107 #define hnae3_ae_dev_fd_supported(ae_dev) \ ··· 173 171 174 172 #define hnae3_ae_dev_wol_supported(ae_dev) \ 175 173 test_bit(HNAE3_DEV_SUPPORT_WOL_B, (ae_dev)->caps) 174 + 175 + #define hnae3_ae_dev_tm_flush_supported(hdev) \ 176 + test_bit(HNAE3_DEV_SUPPORT_TM_FLUSH_B, (hdev)->ae_dev->caps) 176 177 177 178 enum HNAE3_PF_CAP_BITS { 178 179 HNAE3_PF_SUPPORT_VLAN_FLTR_MDF_B = 0, ··· 412 407 unsigned long hw_err_reset_req; 413 408 struct hnae3_dev_specs dev_specs; 414 409 u32 dev_version; 415 - unsigned long caps[BITS_TO_LONGS(HNAE3_DEV_CAPS_MAX_NUM)]; 410 + DECLARE_BITMAP(caps, HNAE3_DEV_CAPS_MAX_NUM); 416 411 void *priv; 417 412 }; 418 413
+19 -3
drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c
··· 156 156 {HCLGE_COMM_CAP_FEC_STATS_B, HNAE3_DEV_SUPPORT_FEC_STATS_B}, 157 157 {HCLGE_COMM_CAP_LANE_NUM_B, HNAE3_DEV_SUPPORT_LANE_NUM_B}, 158 158 {HCLGE_COMM_CAP_WOL_B, HNAE3_DEV_SUPPORT_WOL_B}, 159 + {HCLGE_COMM_CAP_TM_FLUSH_B, HNAE3_DEV_SUPPORT_TM_FLUSH_B}, 159 160 }; 160 161 161 162 static const struct hclge_comm_caps_bit_map hclge_vf_cmd_caps[] = { ··· 173 172 }; 174 173 175 174 static void 175 + hclge_comm_capability_to_bitmap(unsigned long *bitmap, __le32 *caps) 176 + { 177 + const unsigned int words = HCLGE_COMM_QUERY_CAP_LENGTH; 178 + u32 val[HCLGE_COMM_QUERY_CAP_LENGTH]; 179 + unsigned int i; 180 + 181 + for (i = 0; i < words; i++) 182 + val[i] = __le32_to_cpu(caps[i]); 183 + 184 + bitmap_from_arr32(bitmap, val, 185 + HCLGE_COMM_QUERY_CAP_LENGTH * BITS_PER_TYPE(u32)); 186 + } 187 + 188 + static void 176 189 hclge_comm_parse_capability(struct hnae3_ae_dev *ae_dev, bool is_pf, 177 190 struct hclge_comm_query_version_cmd *cmd) 178 191 { ··· 194 179 is_pf ? hclge_pf_cmd_caps : hclge_vf_cmd_caps; 195 180 u32 size = is_pf ? ARRAY_SIZE(hclge_pf_cmd_caps) : 196 181 ARRAY_SIZE(hclge_vf_cmd_caps); 197 - u32 caps, i; 182 + DECLARE_BITMAP(caps, HCLGE_COMM_QUERY_CAP_LENGTH * BITS_PER_TYPE(u32)); 183 + u32 i; 198 184 199 - caps = __le32_to_cpu(cmd->caps[0]); 185 + hclge_comm_capability_to_bitmap(caps, cmd->caps); 200 186 for (i = 0; i < size; i++) 201 - if (hnae3_get_bit(caps, caps_map[i].imp_bit)) 187 + if (test_bit(caps_map[i].imp_bit, caps)) 202 188 set_bit(caps_map[i].local_bit, ae_dev->caps); 203 189 } 204 190
+2
drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.h
··· 153 153 HCLGE_OPC_TM_INTERNAL_STS = 0x0850, 154 154 HCLGE_OPC_TM_INTERNAL_CNT = 0x0851, 155 155 HCLGE_OPC_TM_INTERNAL_STS_1 = 0x0852, 156 + HCLGE_OPC_TM_FLUSH = 0x0872, 156 157 157 158 /* Packet buffer allocate commands */ 158 159 HCLGE_OPC_TX_BUFF_ALLOC = 0x0901, ··· 350 349 HCLGE_COMM_CAP_FEC_STATS_B = 25, 351 350 HCLGE_COMM_CAP_LANE_NUM_B = 27, 352 351 HCLGE_COMM_CAP_WOL_B = 28, 352 + HCLGE_COMM_CAP_TM_FLUSH_B = 31, 353 353 }; 354 354 355 355 enum HCLGE_COMM_API_CAP_BITS {
+3
drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
··· 411 411 }, { 412 412 .name = "support wake on lan", 413 413 .cap_bit = HNAE3_DEV_SUPPORT_WOL_B, 414 + }, { 415 + .name = "support tm flush", 416 + .cap_bit = HNAE3_DEV_SUPPORT_TM_FLUSH_B, 414 417 } 415 418 }; 416 419
+43 -8
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
··· 52 52 53 53 for (i = 0; i < HNAE3_MAX_TC; i++) { 54 54 ets->prio_tc[i] = hdev->tm_info.prio_tc[i]; 55 - ets->tc_tx_bw[i] = hdev->tm_info.pg_info[0].tc_dwrr[i]; 55 + if (i < hdev->tm_info.num_tc) 56 + ets->tc_tx_bw[i] = hdev->tm_info.pg_info[0].tc_dwrr[i]; 57 + else 58 + ets->tc_tx_bw[i] = 0; 56 59 57 60 if (hdev->tm_info.tc_info[i].tc_sch_mode == 58 61 HCLGE_SCH_MODE_SP) ··· 126 123 } 127 124 128 125 static int hclge_ets_sch_mode_validate(struct hclge_dev *hdev, 129 - struct ieee_ets *ets, bool *changed) 126 + struct ieee_ets *ets, bool *changed, 127 + u8 tc_num) 130 128 { 131 129 bool has_ets_tc = false; 132 130 u32 total_ets_bw = 0; ··· 141 137 *changed = true; 142 138 break; 143 139 case IEEE_8021QAZ_TSA_ETS: 140 + if (i >= tc_num) { 141 + dev_err(&hdev->pdev->dev, 142 + "tc%u is disabled, cannot set ets bw\n", 143 + i); 144 + return -EINVAL; 145 + } 146 + 144 147 /* The hardware will switch to sp mode if bandwidth is 145 148 * 0, so limit ets bandwidth must be greater than 0. 146 149 */ ··· 187 176 if (ret) 188 177 return ret; 189 178 190 - ret = hclge_ets_sch_mode_validate(hdev, ets, changed); 179 + ret = hclge_ets_sch_mode_validate(hdev, ets, changed, tc_num); 191 180 if (ret) 192 181 return ret; 193 182 ··· 227 216 if (ret) 228 217 return ret; 229 218 219 + ret = hclge_tm_flush_cfg(hdev, true); 220 + if (ret) 221 + return ret; 222 + 230 223 return hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT); 231 224 } 232 225 ··· 239 224 int ret; 240 225 241 226 ret = hclge_notify_client(hdev, HNAE3_INIT_CLIENT); 227 + if (ret) 228 + return ret; 229 + 230 + ret = hclge_tm_flush_cfg(hdev, false); 242 231 if (ret) 243 232 return ret; 244 233 ··· 332 313 struct net_device *netdev = h->kinfo.netdev; 333 314 struct hclge_dev *hdev = vport->back; 334 315 u8 i, j, pfc_map, *prio_tc; 316 + int last_bad_ret = 0; 335 317 int ret; 336 318 337 319 if (!(hdev->dcbx_cap & DCB_CAP_DCBX_VER_IEEE)) ··· 370 350 if (ret) 371 351 return ret; 372 352 373 - ret = hclge_buffer_alloc(hdev); 374 - if (ret) { 375 - hclge_notify_client(hdev, HNAE3_UP_CLIENT); 353 + ret = hclge_tm_flush_cfg(hdev, true); 354 + if (ret) 376 355 return ret; 377 - } 378 356 379 - return hclge_notify_client(hdev, HNAE3_UP_CLIENT); 357 + /* No matter whether the following operations are performed 358 + * successfully or not, disabling the tm flush and notify 359 + * the network status to up are necessary. 360 + * Do not return immediately. 361 + */ 362 + ret = hclge_buffer_alloc(hdev); 363 + if (ret) 364 + last_bad_ret = ret; 365 + 366 + ret = hclge_tm_flush_cfg(hdev, false); 367 + if (ret) 368 + last_bad_ret = ret; 369 + 370 + ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT); 371 + if (ret) 372 + last_bad_ret = ret; 373 + 374 + return last_bad_ret; 380 375 } 381 376 382 377 static int hclge_ieee_setapp(struct hnae3_handle *h, struct dcb_app *app)
+1 -2
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
··· 693 693 for (i = 0; i < HNAE3_MAX_TC; i++) { 694 694 sch_mode_str = ets_weight->tc_weight[i] ? "dwrr" : "sp"; 695 695 pos += scnprintf(buf + pos, len - pos, "%u %4s %3u\n", 696 - i, sch_mode_str, 697 - hdev->tm_info.pg_info[0].tc_dwrr[i]); 696 + i, sch_mode_str, ets_weight->tc_weight[i]); 698 697 } 699 698 700 699 return 0;
+32 -2
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
··· 785 785 static void hclge_tm_pg_info_init(struct hclge_dev *hdev) 786 786 { 787 787 #define BW_PERCENT 100 788 + #define DEFAULT_BW_WEIGHT 1 788 789 789 790 u8 i; 790 791 ··· 807 806 for (k = 0; k < hdev->tm_info.num_tc; k++) 808 807 hdev->tm_info.pg_info[i].tc_dwrr[k] = BW_PERCENT; 809 808 for (; k < HNAE3_MAX_TC; k++) 810 - hdev->tm_info.pg_info[i].tc_dwrr[k] = 0; 809 + hdev->tm_info.pg_info[i].tc_dwrr[k] = DEFAULT_BW_WEIGHT; 811 810 } 812 811 } 813 812 ··· 1485 1484 return ret; 1486 1485 1487 1486 /* Cfg schd mode for each level schd */ 1488 - return hclge_tm_schd_mode_hw(hdev); 1487 + ret = hclge_tm_schd_mode_hw(hdev); 1488 + if (ret) 1489 + return ret; 1490 + 1491 + return hclge_tm_flush_cfg(hdev, false); 1489 1492 } 1490 1493 1491 1494 static int hclge_pause_param_setup_hw(struct hclge_dev *hdev) ··· 2117 2112 para->rate = le32_to_cpu(port_shap_cfg_cmd->port_rate); 2118 2113 2119 2114 return 0; 2115 + } 2116 + 2117 + int hclge_tm_flush_cfg(struct hclge_dev *hdev, bool enable) 2118 + { 2119 + struct hclge_desc desc; 2120 + int ret; 2121 + 2122 + if (!hnae3_ae_dev_tm_flush_supported(hdev)) 2123 + return 0; 2124 + 2125 + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_FLUSH, false); 2126 + 2127 + desc.data[0] = cpu_to_le32(enable ? HCLGE_TM_FLUSH_EN_MSK : 0); 2128 + 2129 + ret = hclge_cmd_send(&hdev->hw, &desc, 1); 2130 + if (ret) { 2131 + dev_err(&hdev->pdev->dev, 2132 + "failed to config tm flush, ret = %d\n", ret); 2133 + return ret; 2134 + } 2135 + 2136 + if (enable) 2137 + msleep(HCLGE_TM_FLUSH_TIME_MS); 2138 + 2139 + return ret; 2120 2140 }
+4
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
··· 33 33 #define HCLGE_DSCP_MAP_TC_BD_NUM 2 34 34 #define HCLGE_DSCP_TC_SHIFT(n) (((n) & 1) * 4) 35 35 36 + #define HCLGE_TM_FLUSH_TIME_MS 10 37 + #define HCLGE_TM_FLUSH_EN_MSK BIT(0) 38 + 36 39 struct hclge_pg_to_pri_link_cmd { 37 40 u8 pg_id; 38 41 u8 rsvd1[3]; ··· 275 272 struct hclge_tm_shaper_para *para); 276 273 int hclge_up_to_tc_map(struct hclge_dev *hdev); 277 274 int hclge_dscp_to_tc_map(struct hclge_dev *hdev); 275 + int hclge_tm_flush_cfg(struct hclge_dev *hdev, bool enable); 278 276 #endif
+1 -1
drivers/net/ethernet/intel/i40e/i40e_debugfs.c
··· 1839 1839 void i40e_dbg_init(void) 1840 1840 { 1841 1841 i40e_dbg_root = debugfs_create_dir(i40e_driver_name, NULL); 1842 - if (!i40e_dbg_root) 1842 + if (IS_ERR(i40e_dbg_root)) 1843 1843 pr_info("init of debugfs failed\n"); 1844 1844 } 1845 1845
+6 -5
drivers/net/ethernet/intel/iavf/iavf_main.c
··· 3250 3250 u32 val, oldval; 3251 3251 u16 pending; 3252 3252 3253 - if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) 3254 - goto out; 3255 - 3256 3253 if (!mutex_trylock(&adapter->crit_lock)) { 3257 3254 if (adapter->state == __IAVF_REMOVE) 3258 3255 return; ··· 3258 3261 goto out; 3259 3262 } 3260 3263 3264 + if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) 3265 + goto unlock; 3266 + 3261 3267 event.buf_len = IAVF_MAX_AQ_BUF_SIZE; 3262 3268 event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL); 3263 3269 if (!event.msg_buf) 3264 - goto out; 3270 + goto unlock; 3265 3271 3266 3272 do { 3267 3273 ret = iavf_clean_arq_element(hw, &event, &pending); ··· 3279 3279 if (pending != 0) 3280 3280 memset(event.msg_buf, 0, IAVF_MAX_AQ_BUF_SIZE); 3281 3281 } while (pending); 3282 - mutex_unlock(&adapter->crit_lock); 3283 3282 3284 3283 if (iavf_is_reset_in_progress(adapter)) 3285 3284 goto freedom; ··· 3322 3323 3323 3324 freedom: 3324 3325 kfree(event.msg_buf); 3326 + unlock: 3327 + mutex_unlock(&adapter->crit_lock); 3325 3328 out: 3326 3329 /* re-enable Admin queue interrupt cause */ 3327 3330 iavf_misc_irq_enable(adapter);
+14 -12
drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c
··· 1281 1281 ICE_FLOW_FLD_OFF_INVAL); 1282 1282 } 1283 1283 1284 - /* add filter for outer headers */ 1285 1284 fltr_idx = ice_ethtool_flow_to_fltr(fsp->flow_type & ~FLOW_EXT); 1285 + 1286 + assign_bit(fltr_idx, hw->fdir_perfect_fltr, perfect_filter); 1287 + 1288 + /* add filter for outer headers */ 1286 1289 ret = ice_fdir_set_hw_fltr_rule(pf, seg, fltr_idx, 1287 1290 ICE_FD_HW_SEG_NON_TUN); 1288 - if (ret == -EEXIST) 1289 - /* Rule already exists, free memory and continue */ 1290 - devm_kfree(dev, seg); 1291 - else if (ret) 1291 + if (ret == -EEXIST) { 1292 + /* Rule already exists, free memory and count as success */ 1293 + ret = 0; 1294 + goto err_exit; 1295 + } else if (ret) { 1292 1296 /* could not write filter, free memory */ 1293 1297 goto err_exit; 1298 + } 1294 1299 1295 1300 /* make tunneled filter HW entries if possible */ 1296 1301 memcpy(&tun_seg[1], seg, sizeof(*seg)); ··· 1310 1305 devm_kfree(dev, tun_seg); 1311 1306 } 1312 1307 1313 - if (perfect_filter) 1314 - set_bit(fltr_idx, hw->fdir_perfect_fltr); 1315 - else 1316 - clear_bit(fltr_idx, hw->fdir_perfect_fltr); 1317 - 1318 1308 return ret; 1319 1309 1320 1310 err_exit: 1321 1311 devm_kfree(dev, tun_seg); 1322 1312 devm_kfree(dev, seg); 1323 1313 1324 - return -EOPNOTSUPP; 1314 + return ret; 1325 1315 } 1326 1316 1327 1317 /** ··· 1914 1914 input->comp_report = ICE_FXD_FLTR_QW0_COMP_REPORT_SW_FAIL; 1915 1915 1916 1916 /* input struct is added to the HW filter list */ 1917 - ice_fdir_update_list_entry(pf, input, fsp->location); 1917 + ret = ice_fdir_update_list_entry(pf, input, fsp->location); 1918 + if (ret) 1919 + goto release_lock; 1918 1920 1919 1921 ret = ice_fdir_write_all_fltr(pf, input, true); 1920 1922 if (ret)
+28 -12
drivers/net/ethernet/intel/igc/igc_main.c
··· 316 316 igc_clean_tx_ring(adapter->tx_ring[i]); 317 317 } 318 318 319 + static void igc_disable_tx_ring_hw(struct igc_ring *ring) 320 + { 321 + struct igc_hw *hw = &ring->q_vector->adapter->hw; 322 + u8 idx = ring->reg_idx; 323 + u32 txdctl; 324 + 325 + txdctl = rd32(IGC_TXDCTL(idx)); 326 + txdctl &= ~IGC_TXDCTL_QUEUE_ENABLE; 327 + txdctl |= IGC_TXDCTL_SWFLUSH; 328 + wr32(IGC_TXDCTL(idx), txdctl); 329 + } 330 + 331 + /** 332 + * igc_disable_all_tx_rings_hw - Disable all transmit queue operation 333 + * @adapter: board private structure 334 + */ 335 + static void igc_disable_all_tx_rings_hw(struct igc_adapter *adapter) 336 + { 337 + int i; 338 + 339 + for (i = 0; i < adapter->num_tx_queues; i++) { 340 + struct igc_ring *tx_ring = adapter->tx_ring[i]; 341 + 342 + igc_disable_tx_ring_hw(tx_ring); 343 + } 344 + } 345 + 319 346 /** 320 347 * igc_setup_tx_resources - allocate Tx resources (Descriptors) 321 348 * @tx_ring: tx descriptor ring (for a specific queue) to setup ··· 5085 5058 /* clear VLAN promisc flag so VFTA will be updated if necessary */ 5086 5059 adapter->flags &= ~IGC_FLAG_VLAN_PROMISC; 5087 5060 5061 + igc_disable_all_tx_rings_hw(adapter); 5088 5062 igc_clean_all_tx_rings(adapter); 5089 5063 igc_clean_all_rx_rings(adapter); 5090 5064 } ··· 7316 7288 igc_alloc_rx_buffers_zc(ring, igc_desc_unused(ring)); 7317 7289 else 7318 7290 igc_alloc_rx_buffers(ring, igc_desc_unused(ring)); 7319 - } 7320 - 7321 - static void igc_disable_tx_ring_hw(struct igc_ring *ring) 7322 - { 7323 - struct igc_hw *hw = &ring->q_vector->adapter->hw; 7324 - u8 idx = ring->reg_idx; 7325 - u32 txdctl; 7326 - 7327 - txdctl = rd32(IGC_TXDCTL(idx)); 7328 - txdctl &= ~IGC_TXDCTL_QUEUE_ENABLE; 7329 - txdctl |= IGC_TXDCTL_SWFLUSH; 7330 - wr32(IGC_TXDCTL(idx), txdctl); 7331 7291 } 7332 7292 7333 7293 void igc_disable_tx_ring(struct igc_ring *ring)
+1 -1
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
··· 8479 8479 struct ixgbe_adapter *adapter = q_vector->adapter; 8480 8480 8481 8481 if (unlikely(skb_tail_pointer(skb) < hdr.network + 8482 - VXLAN_HEADROOM)) 8482 + vxlan_headroom(0))) 8483 8483 return; 8484 8484 8485 8485 /* verify the port is recognized as VXLAN */
+42 -1
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
··· 218 218 219 219 void npc_program_mkex_hash(struct rvu *rvu, int blkaddr) 220 220 { 221 + struct npc_mcam_kex_hash *mh = rvu->kpu.mkex_hash; 221 222 struct hw_cap *hwcap = &rvu->hw->cap; 223 + u8 intf, ld, hdr_offset, byte_len; 222 224 struct rvu_hwinfo *hw = rvu->hw; 223 - u8 intf; 225 + u64 cfg; 224 226 227 + /* Check if hardware supports hash extraction */ 225 228 if (!hwcap->npc_hash_extract) 226 229 return; 227 230 231 + /* Check if IPv6 source/destination address 232 + * should be hash enabled. 233 + * Hashing reduces 128bit SIP/DIP fields to 32bit 234 + * so that 224 bit X2 key can be used for IPv6 based filters as well, 235 + * which in turn results in more number of MCAM entries available for 236 + * use. 237 + * 238 + * Hashing of IPV6 SIP/DIP is enabled in below scenarios 239 + * 1. If the silicon variant supports hashing feature 240 + * 2. If the number of bytes of IP addr being extracted is 4 bytes ie 241 + * 32bit. The assumption here is that if user wants 8bytes of LSB of 242 + * IP addr or full 16 bytes then his intention is not to use 32bit 243 + * hash. 244 + */ 245 + for (intf = 0; intf < hw->npc_intfs; intf++) { 246 + for (ld = 0; ld < NPC_MAX_LD; ld++) { 247 + cfg = rvu_read64(rvu, blkaddr, 248 + NPC_AF_INTFX_LIDX_LTX_LDX_CFG(intf, 249 + NPC_LID_LC, 250 + NPC_LT_LC_IP6, 251 + ld)); 252 + hdr_offset = FIELD_GET(NPC_HDR_OFFSET, cfg); 253 + byte_len = FIELD_GET(NPC_BYTESM, cfg); 254 + /* Hashing of IPv6 source/destination address should be 255 + * enabled if, 256 + * hdr_offset == 8 (offset of source IPv6 address) or 257 + * hdr_offset == 24 (offset of destination IPv6) 258 + * address) and the number of byte to be 259 + * extracted is 4. As per hardware configuration 260 + * byte_len should be == actual byte_len - 1. 261 + * Hence byte_len is checked against 3 but nor 4. 262 + */ 263 + if ((hdr_offset == 8 || hdr_offset == 24) && byte_len == 3) 264 + mh->lid_lt_ld_hash_en[intf][NPC_LID_LC][NPC_LT_LC_IP6][ld] = true; 265 + } 266 + } 267 + 268 + /* Update hash configuration if the field is hash enabled */ 228 269 for (intf = 0; intf < hw->npc_intfs; intf++) { 229 270 npc_program_mkex_hash_rx(rvu, blkaddr, intf); 230 271 npc_program_mkex_hash_tx(rvu, blkaddr, intf);
+4 -4
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.h
··· 70 70 [NIX_INTF_RX] = { 71 71 [NPC_LID_LC] = { 72 72 [NPC_LT_LC_IP6] = { 73 - true, 74 - true, 73 + false, 74 + false, 75 75 }, 76 76 }, 77 77 }, ··· 79 79 [NIX_INTF_TX] = { 80 80 [NPC_LID_LC] = { 81 81 [NPC_LT_LC_IP6] = { 82 - true, 83 - true, 82 + false, 83 + false, 84 84 }, 85 85 }, 86 86 },
+3 -1
drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c
··· 240 240 void stmmac_dwmac4_set_mac(void __iomem *ioaddr, bool enable) 241 241 { 242 242 u32 value = readl(ioaddr + GMAC_CONFIG); 243 + u32 old_val = value; 243 244 244 245 if (enable) 245 246 value |= GMAC_CONFIG_RE | GMAC_CONFIG_TE; 246 247 else 247 248 value &= ~(GMAC_CONFIG_TE | GMAC_CONFIG_RE); 248 249 249 - writel(value, ioaddr + GMAC_CONFIG); 250 + if (value != old_val) 251 + writel(value, ioaddr + GMAC_CONFIG); 250 252 } 251 253 252 254 void stmmac_dwmac4_get_mac_addr(void __iomem *ioaddr, unsigned char *addr,
+12 -10
drivers/net/ipa/ipa_table.c
··· 273 273 if (ret) 274 274 return ret; 275 275 276 + ret = ipa_filter_reset_table(ipa, false, true, modem); 277 + if (ret || !ipa_table_hash_support(ipa)) 278 + return ret; 279 + 276 280 ret = ipa_filter_reset_table(ipa, true, false, modem); 277 281 if (ret) 278 282 return ret; 279 283 280 - ret = ipa_filter_reset_table(ipa, false, true, modem); 281 - if (ret) 282 - return ret; 283 - ret = ipa_filter_reset_table(ipa, true, true, modem); 284 - 285 - return ret; 284 + return ipa_filter_reset_table(ipa, true, true, modem); 286 285 } 287 286 288 287 /* The AP routes and modem routes are each contiguous within the ··· 290 291 * */ 291 292 static int ipa_route_reset(struct ipa *ipa, bool modem) 292 293 { 294 + bool hash_support = ipa_table_hash_support(ipa); 293 295 u32 modem_route_count = ipa->modem_route_count; 294 296 struct gsi_trans *trans; 295 297 u16 first; 296 298 u16 count; 297 299 298 - trans = ipa_cmd_trans_alloc(ipa, 4); 300 + trans = ipa_cmd_trans_alloc(ipa, hash_support ? 4 : 2); 299 301 if (!trans) { 300 302 dev_err(&ipa->pdev->dev, 301 303 "no transaction for %s route reset\n", ··· 313 313 } 314 314 315 315 ipa_table_reset_add(trans, false, false, false, first, count); 316 - ipa_table_reset_add(trans, false, true, false, first, count); 317 - 318 316 ipa_table_reset_add(trans, false, false, true, first, count); 319 - ipa_table_reset_add(trans, false, true, true, first, count); 317 + 318 + if (hash_support) { 319 + ipa_table_reset_add(trans, false, true, false, first, count); 320 + ipa_table_reset_add(trans, false, true, true, first, count); 321 + } 320 322 321 323 gsi_trans_commit_wait(trans); 322 324
+1
drivers/net/macvlan.c
··· 1746 1746 [IFLA_MACVLAN_MACADDR_COUNT] = { .type = NLA_U32 }, 1747 1747 [IFLA_MACVLAN_BC_QUEUE_LEN] = { .type = NLA_U32 }, 1748 1748 [IFLA_MACVLAN_BC_QUEUE_LEN_USED] = { .type = NLA_REJECT }, 1749 + [IFLA_MACVLAN_BC_CUTOFF] = { .type = NLA_S32 }, 1749 1750 }; 1750 1751 1751 1752 int macvlan_link_register(struct rtnl_link_ops *ops)
+7
drivers/net/phy/marvell10g.c
··· 328 328 ret = phy_clear_bits_mmd(phydev, MDIO_MMD_VEND2, MV_V2_PORT_CTRL, 329 329 MV_V2_PORT_CTRL_PWRDOWN); 330 330 331 + /* Sometimes, the power down bit doesn't clear immediately, and 332 + * a read of this register causes the bit not to clear. Delay 333 + * 100us to allow the PHY to come out of power down mode before 334 + * the next access. 335 + */ 336 + udelay(100); 337 + 331 338 if (phydev->drv->phy_id != MARVELL_PHY_ID_88X3310 || 332 339 priv->firmware_ver < 0x00030000) 333 340 return ret;
+9
drivers/net/team/team.c
··· 2135 2135 dev->mtu = port_dev->mtu; 2136 2136 memcpy(dev->broadcast, port_dev->broadcast, port_dev->addr_len); 2137 2137 eth_hw_addr_inherit(dev, port_dev); 2138 + 2139 + if (port_dev->flags & IFF_POINTOPOINT) { 2140 + dev->flags &= ~(IFF_BROADCAST | IFF_MULTICAST); 2141 + dev->flags |= (IFF_POINTOPOINT | IFF_NOARP); 2142 + } else if ((port_dev->flags & (IFF_BROADCAST | IFF_MULTICAST)) == 2143 + (IFF_BROADCAST | IFF_MULTICAST)) { 2144 + dev->flags |= (IFF_BROADCAST | IFF_MULTICAST); 2145 + dev->flags &= ~(IFF_POINTOPOINT | IFF_NOARP); 2146 + } 2138 2147 } 2139 2148 2140 2149 static int team_dev_type_check_change(struct net_device *dev,
+2 -2
drivers/net/virtio_net.c
··· 4219 4219 if (vi->has_rss || vi->has_rss_hash_report) 4220 4220 virtnet_init_default_rss(vi); 4221 4221 4222 + _virtnet_set_queues(vi, vi->curr_queue_pairs); 4223 + 4222 4224 /* serialize netdev register + virtio_device_ready() with ndo_open() */ 4223 4225 rtnl_lock(); 4224 4226 ··· 4258 4256 pr_debug("virtio_net: registering cpu notifier failed\n"); 4259 4257 goto free_unregister_netdev; 4260 4258 } 4261 - 4262 - virtnet_set_queues(vi, vi->curr_queue_pairs); 4263 4259 4264 4260 /* Assume link up if device can't report link status, 4265 4261 otherwise get link status from config. */
+107 -58
drivers/net/vxlan/vxlan_core.c
··· 623 623 return 1; 624 624 } 625 625 626 + static bool vxlan_parse_gpe_proto(struct vxlanhdr *hdr, __be16 *protocol) 627 + { 628 + struct vxlanhdr_gpe *gpe = (struct vxlanhdr_gpe *)hdr; 629 + 630 + /* Need to have Next Protocol set for interfaces in GPE mode. */ 631 + if (!gpe->np_applied) 632 + return false; 633 + /* "The initial version is 0. If a receiver does not support the 634 + * version indicated it MUST drop the packet. 635 + */ 636 + if (gpe->version != 0) 637 + return false; 638 + /* "When the O bit is set to 1, the packet is an OAM packet and OAM 639 + * processing MUST occur." However, we don't implement OAM 640 + * processing, thus drop the packet. 641 + */ 642 + if (gpe->oam_flag) 643 + return false; 644 + 645 + *protocol = tun_p_to_eth_p(gpe->next_protocol); 646 + if (!*protocol) 647 + return false; 648 + 649 + return true; 650 + } 651 + 626 652 static struct vxlanhdr *vxlan_gro_remcsum(struct sk_buff *skb, 627 653 unsigned int off, 628 654 struct vxlanhdr *vh, size_t hdrlen, ··· 675 649 return vh; 676 650 } 677 651 678 - static struct sk_buff *vxlan_gro_receive(struct sock *sk, 679 - struct list_head *head, 680 - struct sk_buff *skb) 652 + static struct vxlanhdr *vxlan_gro_prepare_receive(struct sock *sk, 653 + struct list_head *head, 654 + struct sk_buff *skb, 655 + struct gro_remcsum *grc) 681 656 { 682 - struct sk_buff *pp = NULL; 683 657 struct sk_buff *p; 684 658 struct vxlanhdr *vh, *vh2; 685 659 unsigned int hlen, off_vx; 686 - int flush = 1; 687 660 struct vxlan_sock *vs = rcu_dereference_sk_user_data(sk); 688 661 __be32 flags; 689 - struct gro_remcsum grc; 690 662 691 - skb_gro_remcsum_init(&grc); 663 + skb_gro_remcsum_init(grc); 692 664 693 665 off_vx = skb_gro_offset(skb); 694 666 hlen = off_vx + sizeof(*vh); 695 667 vh = skb_gro_header(skb, hlen, off_vx); 696 668 if (unlikely(!vh)) 697 - goto out; 669 + return NULL; 698 670 699 671 skb_gro_postpull_rcsum(skb, vh, sizeof(struct vxlanhdr)); 700 672 ··· 700 676 701 677 if ((flags & VXLAN_HF_RCO) && (vs->flags & VXLAN_F_REMCSUM_RX)) { 702 678 vh = vxlan_gro_remcsum(skb, off_vx, vh, sizeof(struct vxlanhdr), 703 - vh->vx_vni, &grc, 679 + vh->vx_vni, grc, 704 680 !!(vs->flags & 705 681 VXLAN_F_REMCSUM_NOPARTIAL)); 706 682 707 683 if (!vh) 708 - goto out; 684 + return NULL; 709 685 } 710 686 711 687 skb_gro_pull(skb, sizeof(struct vxlanhdr)); /* pull vxlan header */ ··· 722 698 } 723 699 } 724 700 725 - pp = call_gro_receive(eth_gro_receive, head, skb); 726 - flush = 0; 701 + return vh; 702 + } 727 703 704 + static struct sk_buff *vxlan_gro_receive(struct sock *sk, 705 + struct list_head *head, 706 + struct sk_buff *skb) 707 + { 708 + struct sk_buff *pp = NULL; 709 + struct gro_remcsum grc; 710 + int flush = 1; 711 + 712 + if (vxlan_gro_prepare_receive(sk, head, skb, &grc)) { 713 + pp = call_gro_receive(eth_gro_receive, head, skb); 714 + flush = 0; 715 + } 716 + skb_gro_flush_final_remcsum(skb, pp, flush, &grc); 717 + return pp; 718 + } 719 + 720 + static struct sk_buff *vxlan_gpe_gro_receive(struct sock *sk, 721 + struct list_head *head, 722 + struct sk_buff *skb) 723 + { 724 + const struct packet_offload *ptype; 725 + struct sk_buff *pp = NULL; 726 + struct gro_remcsum grc; 727 + struct vxlanhdr *vh; 728 + __be16 protocol; 729 + int flush = 1; 730 + 731 + vh = vxlan_gro_prepare_receive(sk, head, skb, &grc); 732 + if (vh) { 733 + if (!vxlan_parse_gpe_proto(vh, &protocol)) 734 + goto out; 735 + ptype = gro_find_receive_by_type(protocol); 736 + if (!ptype) 737 + goto out; 738 + pp = call_gro_receive(ptype->callbacks.gro_receive, head, skb); 739 + flush = 0; 740 + } 728 741 out: 729 742 skb_gro_flush_final_remcsum(skb, pp, flush, &grc); 730 - 731 743 return pp; 732 744 } 733 745 ··· 773 713 * 'skb->encapsulation' set. 774 714 */ 775 715 return eth_gro_complete(skb, nhoff + sizeof(struct vxlanhdr)); 716 + } 717 + 718 + static int vxlan_gpe_gro_complete(struct sock *sk, struct sk_buff *skb, int nhoff) 719 + { 720 + struct vxlanhdr *vh = (struct vxlanhdr *)(skb->data + nhoff); 721 + const struct packet_offload *ptype; 722 + int err = -ENOSYS; 723 + __be16 protocol; 724 + 725 + if (!vxlan_parse_gpe_proto(vh, &protocol)) 726 + return err; 727 + ptype = gro_find_complete_by_type(protocol); 728 + if (ptype) 729 + err = ptype->callbacks.gro_complete(skb, nhoff + sizeof(struct vxlanhdr)); 730 + return err; 776 731 } 777 732 778 733 static struct vxlan_fdb *vxlan_fdb_alloc(struct vxlan_dev *vxlan, const u8 *mac, ··· 1600 1525 unparsed->vx_flags &= ~VXLAN_GBP_USED_BITS; 1601 1526 } 1602 1527 1603 - static bool vxlan_parse_gpe_hdr(struct vxlanhdr *unparsed, 1604 - __be16 *protocol, 1605 - struct sk_buff *skb, u32 vxflags) 1606 - { 1607 - struct vxlanhdr_gpe *gpe = (struct vxlanhdr_gpe *)unparsed; 1608 - 1609 - /* Need to have Next Protocol set for interfaces in GPE mode. */ 1610 - if (!gpe->np_applied) 1611 - return false; 1612 - /* "The initial version is 0. If a receiver does not support the 1613 - * version indicated it MUST drop the packet. 1614 - */ 1615 - if (gpe->version != 0) 1616 - return false; 1617 - /* "When the O bit is set to 1, the packet is an OAM packet and OAM 1618 - * processing MUST occur." However, we don't implement OAM 1619 - * processing, thus drop the packet. 1620 - */ 1621 - if (gpe->oam_flag) 1622 - return false; 1623 - 1624 - *protocol = tun_p_to_eth_p(gpe->next_protocol); 1625 - if (!*protocol) 1626 - return false; 1627 - 1628 - unparsed->vx_flags &= ~VXLAN_GPE_USED_BITS; 1629 - return true; 1630 - } 1631 - 1632 1528 static bool vxlan_set_mac(struct vxlan_dev *vxlan, 1633 1529 struct vxlan_sock *vs, 1634 1530 struct sk_buff *skb, __be32 vni) ··· 1701 1655 * used by VXLAN extensions if explicitly requested. 1702 1656 */ 1703 1657 if (vs->flags & VXLAN_F_GPE) { 1704 - if (!vxlan_parse_gpe_hdr(&unparsed, &protocol, skb, vs->flags)) 1658 + if (!vxlan_parse_gpe_proto(&unparsed, &protocol)) 1705 1659 goto drop; 1660 + unparsed.vx_flags &= ~VXLAN_GPE_USED_BITS; 1706 1661 raw_proto = true; 1707 1662 } 1708 1663 ··· 2563 2516 } 2564 2517 2565 2518 ndst = &rt->dst; 2566 - err = skb_tunnel_check_pmtu(skb, ndst, VXLAN_HEADROOM, 2519 + err = skb_tunnel_check_pmtu(skb, ndst, vxlan_headroom(flags & VXLAN_F_GPE), 2567 2520 netif_is_any_bridge_port(dev)); 2568 2521 if (err < 0) { 2569 2522 goto tx_error; ··· 2624 2577 goto out_unlock; 2625 2578 } 2626 2579 2627 - err = skb_tunnel_check_pmtu(skb, ndst, VXLAN6_HEADROOM, 2580 + err = skb_tunnel_check_pmtu(skb, ndst, 2581 + vxlan_headroom((flags & VXLAN_F_GPE) | VXLAN_F_IPV6), 2628 2582 netif_is_any_bridge_port(dev)); 2629 2583 if (err < 0) { 2630 2584 goto tx_error; ··· 3037 2989 struct vxlan_rdst *dst = &vxlan->default_dst; 3038 2990 struct net_device *lowerdev = __dev_get_by_index(vxlan->net, 3039 2991 dst->remote_ifindex); 3040 - bool use_ipv6 = !!(vxlan->cfg.flags & VXLAN_F_IPV6); 3041 2992 3042 2993 /* This check is different than dev->max_mtu, because it looks at 3043 2994 * the lowerdev->mtu, rather than the static dev->max_mtu 3044 2995 */ 3045 2996 if (lowerdev) { 3046 - int max_mtu = lowerdev->mtu - 3047 - (use_ipv6 ? VXLAN6_HEADROOM : VXLAN_HEADROOM); 2997 + int max_mtu = lowerdev->mtu - vxlan_headroom(vxlan->cfg.flags); 3048 2998 if (new_mtu > max_mtu) 3049 2999 return -EINVAL; 3050 3000 } ··· 3425 3379 tunnel_cfg.encap_rcv = vxlan_rcv; 3426 3380 tunnel_cfg.encap_err_lookup = vxlan_err_lookup; 3427 3381 tunnel_cfg.encap_destroy = NULL; 3428 - tunnel_cfg.gro_receive = vxlan_gro_receive; 3429 - tunnel_cfg.gro_complete = vxlan_gro_complete; 3382 + if (vs->flags & VXLAN_F_GPE) { 3383 + tunnel_cfg.gro_receive = vxlan_gpe_gro_receive; 3384 + tunnel_cfg.gro_complete = vxlan_gpe_gro_complete; 3385 + } else { 3386 + tunnel_cfg.gro_receive = vxlan_gro_receive; 3387 + tunnel_cfg.gro_complete = vxlan_gro_complete; 3388 + } 3430 3389 3431 3390 setup_udp_tunnel_sock(net, sock, &tunnel_cfg); 3432 3391 ··· 3695 3644 struct vxlan_dev *vxlan = netdev_priv(dev); 3696 3645 struct vxlan_rdst *dst = &vxlan->default_dst; 3697 3646 unsigned short needed_headroom = ETH_HLEN; 3698 - bool use_ipv6 = !!(conf->flags & VXLAN_F_IPV6); 3699 3647 int max_mtu = ETH_MAX_MTU; 3648 + u32 flags = conf->flags; 3700 3649 3701 3650 if (!changelink) { 3702 - if (conf->flags & VXLAN_F_GPE) 3651 + if (flags & VXLAN_F_GPE) 3703 3652 vxlan_raw_setup(dev); 3704 3653 else 3705 3654 vxlan_ether_setup(dev); ··· 3724 3673 3725 3674 dev->needed_tailroom = lowerdev->needed_tailroom; 3726 3675 3727 - max_mtu = lowerdev->mtu - (use_ipv6 ? VXLAN6_HEADROOM : 3728 - VXLAN_HEADROOM); 3676 + max_mtu = lowerdev->mtu - vxlan_headroom(flags); 3729 3677 if (max_mtu < ETH_MIN_MTU) 3730 3678 max_mtu = ETH_MIN_MTU; 3731 3679 ··· 3735 3685 if (dev->mtu > max_mtu) 3736 3686 dev->mtu = max_mtu; 3737 3687 3738 - if (use_ipv6 || conf->flags & VXLAN_F_COLLECT_METADATA) 3739 - needed_headroom += VXLAN6_HEADROOM; 3740 - else 3741 - needed_headroom += VXLAN_HEADROOM; 3688 + if (flags & VXLAN_F_COLLECT_METADATA) 3689 + flags |= VXLAN_F_IPV6; 3690 + needed_headroom += vxlan_headroom(flags); 3742 3691 dev->needed_headroom = needed_headroom; 3743 3692 3744 3693 memcpy(&vxlan->cfg, conf, sizeof(*conf));
+1 -1
drivers/phy/hisilicon/phy-hisi-inno-usb2.c
··· 184 184 phy_set_drvdata(phy, &priv->ports[i]); 185 185 i++; 186 186 187 - if (i > INNO_PHY_PORT_NUM) { 187 + if (i >= INNO_PHY_PORT_NUM) { 188 188 dev_warn(dev, "Support %d ports in maximum\n", i); 189 189 of_node_put(child); 190 190 break;
+1 -1
drivers/phy/mediatek/phy-mtk-dp.c
··· 169 169 170 170 regs = *(struct regmap **)dev->platform_data; 171 171 if (!regs) 172 - return dev_err_probe(dev, EINVAL, 172 + return dev_err_probe(dev, -EINVAL, 173 173 "No data passed, requires struct regmap**\n"); 174 174 175 175 dp_phy = devm_kzalloc(dev, sizeof(*dp_phy), GFP_KERNEL);
+1 -1
drivers/phy/mediatek/phy-mtk-hdmi-mt8195.c
··· 253 253 for (i = 0; i < ARRAY_SIZE(txpredivs); i++) { 254 254 ns_hdmipll_ck = 5 * tmds_clk * txposdiv * txpredivs[i]; 255 255 if (ns_hdmipll_ck >= 5 * GIGA && 256 - ns_hdmipll_ck <= 1 * GIGA) 256 + ns_hdmipll_ck <= 12 * GIGA) 257 257 break; 258 258 } 259 259 if (i == (ARRAY_SIZE(txpredivs) - 1) &&
+50 -28
drivers/phy/qualcomm/phy-qcom-snps-femto-v2.c
··· 110 110 /** 111 111 * struct qcom_snps_hsphy - snps hs phy attributes 112 112 * 113 + * @dev: device structure 114 + * 113 115 * @phy: generic phy 114 116 * @base: iomapped memory space for snps hs phy 115 117 * 116 - * @cfg_ahb_clk: AHB2PHY interface clock 117 - * @ref_clk: phy reference clock 118 + * @num_clks: number of clocks 119 + * @clks: array of clocks 118 120 * @phy_reset: phy reset control 119 121 * @vregs: regulator supplies bulk data 120 122 * @phy_initialized: if PHY has been initialized correctly ··· 124 122 * @update_seq_cfg: tuning parameters for phy init 125 123 */ 126 124 struct qcom_snps_hsphy { 125 + struct device *dev; 126 + 127 127 struct phy *phy; 128 128 void __iomem *base; 129 129 130 - struct clk *cfg_ahb_clk; 131 - struct clk *ref_clk; 130 + int num_clks; 131 + struct clk_bulk_data *clks; 132 132 struct reset_control *phy_reset; 133 133 struct regulator_bulk_data vregs[SNPS_HS_NUM_VREGS]; 134 134 ··· 138 134 enum phy_mode mode; 139 135 struct phy_override_seq update_seq_cfg[NUM_HSPHY_TUNING_PARAMS]; 140 136 }; 137 + 138 + static int qcom_snps_hsphy_clk_init(struct qcom_snps_hsphy *hsphy) 139 + { 140 + struct device *dev = hsphy->dev; 141 + 142 + hsphy->num_clks = 2; 143 + hsphy->clks = devm_kcalloc(dev, hsphy->num_clks, sizeof(*hsphy->clks), GFP_KERNEL); 144 + if (!hsphy->clks) 145 + return -ENOMEM; 146 + 147 + /* 148 + * TODO: Currently no device tree instantiation of the PHY is using the clock. 149 + * This needs to be fixed in order for this code to be able to use devm_clk_bulk_get(). 150 + */ 151 + hsphy->clks[0].id = "cfg_ahb"; 152 + hsphy->clks[0].clk = devm_clk_get_optional(dev, "cfg_ahb"); 153 + if (IS_ERR(hsphy->clks[0].clk)) 154 + return dev_err_probe(dev, PTR_ERR(hsphy->clks[0].clk), 155 + "failed to get cfg_ahb clk\n"); 156 + 157 + hsphy->clks[1].id = "ref"; 158 + hsphy->clks[1].clk = devm_clk_get(dev, "ref"); 159 + if (IS_ERR(hsphy->clks[1].clk)) 160 + return dev_err_probe(dev, PTR_ERR(hsphy->clks[1].clk), 161 + "failed to get ref clk\n"); 162 + 163 + return 0; 164 + } 141 165 142 166 static inline void qcom_snps_hsphy_write_mask(void __iomem *base, u32 offset, 143 167 u32 mask, u32 val) ··· 197 165 0, USB2_AUTO_RESUME); 198 166 } 199 167 200 - clk_disable_unprepare(hsphy->cfg_ahb_clk); 201 168 return 0; 202 169 } 203 170 204 171 static int qcom_snps_hsphy_resume(struct qcom_snps_hsphy *hsphy) 205 172 { 206 - int ret; 207 - 208 173 dev_dbg(&hsphy->phy->dev, "Resume QCOM SNPS PHY, mode\n"); 209 - 210 - ret = clk_prepare_enable(hsphy->cfg_ahb_clk); 211 - if (ret) { 212 - dev_err(&hsphy->phy->dev, "failed to enable cfg ahb clock\n"); 213 - return ret; 214 - } 215 174 216 175 return 0; 217 176 } ··· 214 191 if (!hsphy->phy_initialized) 215 192 return 0; 216 193 217 - qcom_snps_hsphy_suspend(hsphy); 218 - return 0; 194 + return qcom_snps_hsphy_suspend(hsphy); 219 195 } 220 196 221 197 static int __maybe_unused qcom_snps_hsphy_runtime_resume(struct device *dev) ··· 224 202 if (!hsphy->phy_initialized) 225 203 return 0; 226 204 227 - qcom_snps_hsphy_resume(hsphy); 228 - return 0; 205 + return qcom_snps_hsphy_resume(hsphy); 229 206 } 230 207 231 208 static int qcom_snps_hsphy_set_mode(struct phy *phy, enum phy_mode mode, ··· 395 374 if (ret) 396 375 return ret; 397 376 398 - ret = clk_prepare_enable(hsphy->cfg_ahb_clk); 377 + ret = clk_bulk_prepare_enable(hsphy->num_clks, hsphy->clks); 399 378 if (ret) { 400 - dev_err(&phy->dev, "failed to enable cfg ahb clock, %d\n", ret); 379 + dev_err(&phy->dev, "failed to enable clocks, %d\n", ret); 401 380 goto poweroff_phy; 402 381 } 403 382 404 383 ret = reset_control_assert(hsphy->phy_reset); 405 384 if (ret) { 406 385 dev_err(&phy->dev, "failed to assert phy_reset, %d\n", ret); 407 - goto disable_ahb_clk; 386 + goto disable_clks; 408 387 } 409 388 410 389 usleep_range(100, 150); ··· 412 391 ret = reset_control_deassert(hsphy->phy_reset); 413 392 if (ret) { 414 393 dev_err(&phy->dev, "failed to de-assert phy_reset, %d\n", ret); 415 - goto disable_ahb_clk; 394 + goto disable_clks; 416 395 } 417 396 418 397 qcom_snps_hsphy_write_mask(hsphy->base, USB2_PHY_USB_PHY_CFG0, ··· 469 448 470 449 return 0; 471 450 472 - disable_ahb_clk: 473 - clk_disable_unprepare(hsphy->cfg_ahb_clk); 451 + disable_clks: 452 + clk_bulk_disable_unprepare(hsphy->num_clks, hsphy->clks); 474 453 poweroff_phy: 475 454 regulator_bulk_disable(ARRAY_SIZE(hsphy->vregs), hsphy->vregs); 476 455 ··· 482 461 struct qcom_snps_hsphy *hsphy = phy_get_drvdata(phy); 483 462 484 463 reset_control_assert(hsphy->phy_reset); 485 - clk_disable_unprepare(hsphy->cfg_ahb_clk); 464 + clk_bulk_disable_unprepare(hsphy->num_clks, hsphy->clks); 486 465 regulator_bulk_disable(ARRAY_SIZE(hsphy->vregs), hsphy->vregs); 487 466 hsphy->phy_initialized = false; 488 467 ··· 575 554 if (!hsphy) 576 555 return -ENOMEM; 577 556 557 + hsphy->dev = dev; 558 + 578 559 hsphy->base = devm_platform_ioremap_resource(pdev, 0); 579 560 if (IS_ERR(hsphy->base)) 580 561 return PTR_ERR(hsphy->base); 581 562 582 - hsphy->ref_clk = devm_clk_get(dev, "ref"); 583 - if (IS_ERR(hsphy->ref_clk)) 584 - return dev_err_probe(dev, PTR_ERR(hsphy->ref_clk), 585 - "failed to get ref clk\n"); 563 + ret = qcom_snps_hsphy_clk_init(hsphy); 564 + if (ret) 565 + return dev_err_probe(dev, ret, "failed to initialize clocks\n"); 586 566 587 567 hsphy->phy_reset = devm_reset_control_get_exclusive(&pdev->dev, NULL); 588 568 if (IS_ERR(hsphy->phy_reset)) {
+2 -6
drivers/platform/x86/amd/pmc-quirks.c
··· 11 11 #include <linux/dmi.h> 12 12 #include <linux/io.h> 13 13 #include <linux/ioport.h> 14 - #include <linux/slab.h> 15 14 16 15 #include "pmc.h" 17 16 ··· 134 135 */ 135 136 static void amd_pmc_skip_nvme_smi_handler(u32 s2idle_bug_mmio) 136 137 { 137 - struct resource *res; 138 138 void __iomem *addr; 139 139 u8 val; 140 140 141 - res = request_mem_region_muxed(s2idle_bug_mmio, 1, "amd_pmc_pm80"); 142 - if (!res) 141 + if (!request_mem_region_muxed(s2idle_bug_mmio, 1, "amd_pmc_pm80")) 143 142 return; 144 143 145 144 addr = ioremap(s2idle_bug_mmio, 1); ··· 149 152 150 153 iounmap(addr); 151 154 cleanup_resource: 152 - release_resource(res); 153 - kfree(res); 155 + release_mem_region(s2idle_bug_mmio, 1); 154 156 } 155 157 156 158 void amd_pmc_process_restore_quirks(struct amd_pmc_dev *dev)
+22 -1
drivers/platform/x86/amd/pmf/acpi.c
··· 106 106 data, sizeof(*data)); 107 107 } 108 108 109 + int apmf_os_power_slider_update(struct amd_pmf_dev *pdev, u8 event) 110 + { 111 + struct os_power_slider args; 112 + struct acpi_buffer params; 113 + union acpi_object *info; 114 + int err = 0; 115 + 116 + args.size = sizeof(args); 117 + args.slider_event = event; 118 + 119 + params.length = sizeof(args); 120 + params.pointer = (void *)&args; 121 + 122 + info = apmf_if_call(pdev, APMF_FUNC_OS_POWER_SLIDER_UPDATE, &params); 123 + if (!info) 124 + err = -EIO; 125 + 126 + kfree(info); 127 + return err; 128 + } 129 + 109 130 static void apmf_sbios_heartbeat_notify(struct work_struct *work) 110 131 { 111 132 struct amd_pmf_dev *dev = container_of(work, struct amd_pmf_dev, heart_beat.work); ··· 310 289 311 290 ret = apmf_get_system_params(pmf_dev); 312 291 if (ret) { 313 - dev_err(pmf_dev->dev, "APMF apmf_get_system_params failed :%d\n", ret); 292 + dev_dbg(pmf_dev->dev, "APMF apmf_get_system_params failed :%d\n", ret); 314 293 goto out; 315 294 } 316 295
+7 -2
drivers/platform/x86/amd/pmf/core.c
··· 72 72 return NOTIFY_DONE; 73 73 } 74 74 75 - amd_pmf_set_sps_power_limits(pmf); 75 + if (is_apmf_func_supported(pmf, APMF_FUNC_STATIC_SLIDER_GRANULAR)) 76 + amd_pmf_set_sps_power_limits(pmf); 77 + 78 + if (is_apmf_func_supported(pmf, APMF_FUNC_OS_POWER_SLIDER_UPDATE)) 79 + amd_pmf_power_slider_update_event(pmf); 76 80 77 81 return NOTIFY_OK; 78 82 } ··· 301 297 int ret; 302 298 303 299 /* Enable Static Slider */ 304 - if (is_apmf_func_supported(dev, APMF_FUNC_STATIC_SLIDER_GRANULAR)) { 300 + if (is_apmf_func_supported(dev, APMF_FUNC_STATIC_SLIDER_GRANULAR) || 301 + is_apmf_func_supported(dev, APMF_FUNC_OS_POWER_SLIDER_UPDATE)) { 305 302 amd_pmf_init_sps(dev); 306 303 dev->pwr_src_notifier.notifier_call = amd_pmf_pwr_src_notify_call; 307 304 power_supply_reg_notifier(&dev->pwr_src_notifier);
+16
drivers/platform/x86/amd/pmf/pmf.h
··· 21 21 #define APMF_FUNC_SBIOS_HEARTBEAT 4 22 22 #define APMF_FUNC_AUTO_MODE 5 23 23 #define APMF_FUNC_SET_FAN_IDX 7 24 + #define APMF_FUNC_OS_POWER_SLIDER_UPDATE 8 24 25 #define APMF_FUNC_STATIC_SLIDER_GRANULAR 9 25 26 #define APMF_FUNC_DYN_SLIDER_AC 11 26 27 #define APMF_FUNC_DYN_SLIDER_DC 12 ··· 44 43 #define GET_STT_MIN_LIMIT 0x1F 45 44 #define GET_STT_LIMIT_APU 0x20 46 45 #define GET_STT_LIMIT_HS2 0x21 46 + 47 + /* OS slider update notification */ 48 + #define DC_BEST_PERF 0 49 + #define DC_BETTER_PERF 1 50 + #define DC_BATTERY_SAVER 3 51 + #define AC_BEST_PERF 4 52 + #define AC_BETTER_PERF 5 53 + #define AC_BETTER_BATTERY 6 47 54 48 55 /* Fan Index for Auto Mode */ 49 56 #define FAN_INDEX_AUTO 0xFFFFFFFF ··· 201 192 u16 size; 202 193 struct apmf_sps_prop_granular prop[POWER_SOURCE_MAX][POWER_MODE_MAX]; 203 194 }; 195 + 196 + struct os_power_slider { 197 + u16 size; 198 + u8 slider_event; 199 + } __packed; 204 200 205 201 struct fan_table_control { 206 202 bool manual; ··· 397 383 int amd_pmf_init_metrics_table(struct amd_pmf_dev *dev); 398 384 int amd_pmf_get_power_source(void); 399 385 int apmf_install_handler(struct amd_pmf_dev *pmf_dev); 386 + int apmf_os_power_slider_update(struct amd_pmf_dev *dev, u8 flag); 400 387 401 388 /* SPS Layer */ 402 389 int amd_pmf_get_pprof_modes(struct amd_pmf_dev *pmf); ··· 408 393 int apmf_get_static_slider_granular(struct amd_pmf_dev *pdev, 409 394 struct apmf_static_slider_granular_output *output); 410 395 bool is_pprof_balanced(struct amd_pmf_dev *pmf); 396 + int amd_pmf_power_slider_update_event(struct amd_pmf_dev *dev); 411 397 412 398 413 399 int apmf_update_fan_idx(struct amd_pmf_dev *pdev, bool manual, u32 idx);
+70 -4
drivers/platform/x86/amd/pmf/sps.c
··· 174 174 return mode; 175 175 } 176 176 177 + int amd_pmf_power_slider_update_event(struct amd_pmf_dev *dev) 178 + { 179 + u8 mode, flag = 0; 180 + int src; 181 + 182 + mode = amd_pmf_get_pprof_modes(dev); 183 + if (mode < 0) 184 + return mode; 185 + 186 + src = amd_pmf_get_power_source(); 187 + 188 + if (src == POWER_SOURCE_AC) { 189 + switch (mode) { 190 + case POWER_MODE_PERFORMANCE: 191 + flag |= BIT(AC_BEST_PERF); 192 + break; 193 + case POWER_MODE_BALANCED_POWER: 194 + flag |= BIT(AC_BETTER_PERF); 195 + break; 196 + case POWER_MODE_POWER_SAVER: 197 + flag |= BIT(AC_BETTER_BATTERY); 198 + break; 199 + default: 200 + dev_err(dev->dev, "unsupported platform profile\n"); 201 + return -EOPNOTSUPP; 202 + } 203 + 204 + } else if (src == POWER_SOURCE_DC) { 205 + switch (mode) { 206 + case POWER_MODE_PERFORMANCE: 207 + flag |= BIT(DC_BEST_PERF); 208 + break; 209 + case POWER_MODE_BALANCED_POWER: 210 + flag |= BIT(DC_BETTER_PERF); 211 + break; 212 + case POWER_MODE_POWER_SAVER: 213 + flag |= BIT(DC_BATTERY_SAVER); 214 + break; 215 + default: 216 + dev_err(dev->dev, "unsupported platform profile\n"); 217 + return -EOPNOTSUPP; 218 + } 219 + } 220 + 221 + apmf_os_power_slider_update(dev, flag); 222 + 223 + return 0; 224 + } 225 + 177 226 static int amd_pmf_profile_set(struct platform_profile_handler *pprof, 178 227 enum platform_profile_option profile) 179 228 { 180 229 struct amd_pmf_dev *pmf = container_of(pprof, struct amd_pmf_dev, pprof); 230 + int ret = 0; 181 231 182 232 pmf->current_profile = profile; 183 233 184 - return amd_pmf_set_sps_power_limits(pmf); 234 + /* Notify EC about the slider position change */ 235 + if (is_apmf_func_supported(pmf, APMF_FUNC_OS_POWER_SLIDER_UPDATE)) { 236 + ret = amd_pmf_power_slider_update_event(pmf); 237 + if (ret) 238 + return ret; 239 + } 240 + 241 + if (is_apmf_func_supported(pmf, APMF_FUNC_STATIC_SLIDER_GRANULAR)) { 242 + ret = amd_pmf_set_sps_power_limits(pmf); 243 + if (ret) 244 + return ret; 245 + } 246 + 247 + return 0; 185 248 } 186 249 187 250 int amd_pmf_init_sps(struct amd_pmf_dev *dev) ··· 252 189 int err; 253 190 254 191 dev->current_profile = PLATFORM_PROFILE_BALANCED; 255 - amd_pmf_load_defaults_sps(dev); 256 192 257 - /* update SPS balanced power mode thermals */ 258 - amd_pmf_set_sps_power_limits(dev); 193 + if (is_apmf_func_supported(dev, APMF_FUNC_STATIC_SLIDER_GRANULAR)) { 194 + amd_pmf_load_defaults_sps(dev); 195 + 196 + /* update SPS balanced power mode thermals */ 197 + amd_pmf_set_sps_power_limits(dev); 198 + } 259 199 260 200 dev->pprof.profile_get = amd_pmf_profile_get; 261 201 dev->pprof.profile_set = amd_pmf_profile_set;
+12 -2
drivers/platform/x86/asus-wmi.c
··· 738 738 struct device_attribute *attr, 739 739 const char *buf, size_t count) 740 740 { 741 - u32 cmd, mode, r, g, b, speed; 741 + u32 cmd, mode, r, g, b, speed; 742 742 int err; 743 743 744 744 if (sscanf(buf, "%d %d %d %d %d %d", &cmd, &mode, &r, &g, &b, &speed) != 6) 745 745 return -EINVAL; 746 746 747 - cmd = !!cmd; 747 + /* B3 is set and B4 is save to BIOS */ 748 + switch (cmd) { 749 + case 0: 750 + cmd = 0xb3; 751 + break; 752 + case 1: 753 + cmd = 0xb4; 754 + break; 755 + default: 756 + return -EINVAL; 757 + } 748 758 749 759 /* These are the known usable modes across all TUF/ROG */ 750 760 if (mode >= 12 || mode == 9)
+2
drivers/platform/x86/huawei-wmi.c
··· 85 85 { KE_IGNORE, 0x293, { KEY_KBDILLUMTOGGLE } }, 86 86 { KE_IGNORE, 0x294, { KEY_KBDILLUMUP } }, 87 87 { KE_IGNORE, 0x295, { KEY_KBDILLUMUP } }, 88 + // Ignore Ambient Light Sensoring 89 + { KE_KEY, 0x2c1, { KEY_RESERVED } }, 88 90 { KE_END, 0 } 89 91 }; 90 92
+15 -12
drivers/platform/x86/intel/hid.c
··· 150 150 DMI_MATCH(DMI_PRODUCT_NAME, "Surface Go"), 151 151 }, 152 152 }, 153 + { 154 + .matches = { 155 + DMI_MATCH(DMI_SYS_VENDOR, "HP"), 156 + DMI_MATCH(DMI_PRODUCT_NAME, "HP Elite Dragonfly G2 Notebook PC"), 157 + }, 158 + }, 153 159 { } 154 160 }; 155 161 ··· 626 620 static int intel_hid_probe(struct platform_device *device) 627 621 { 628 622 acpi_handle handle = ACPI_HANDLE(&device->dev); 629 - unsigned long long mode; 623 + unsigned long long mode, dummy; 630 624 struct intel_hid_priv *priv; 631 625 acpi_status status; 632 626 int err; ··· 698 692 if (err) 699 693 goto err_remove_notify; 700 694 701 - if (priv->array) { 702 - unsigned long long dummy; 695 + intel_button_array_enable(&device->dev, true); 703 696 704 - intel_button_array_enable(&device->dev, true); 705 - 706 - /* Call button load method to enable HID power button */ 707 - if (!intel_hid_evaluate_method(handle, INTEL_HID_DSM_BTNL_FN, 708 - &dummy)) { 709 - dev_warn(&device->dev, 710 - "failed to enable HID power button\n"); 711 - } 712 - } 697 + /* 698 + * Call button load method to enable HID power button 699 + * Always do this since it activates events on some devices without 700 + * a button array too. 701 + */ 702 + if (!intel_hid_evaluate_method(handle, INTEL_HID_DSM_BTNL_FN, &dummy)) 703 + dev_warn(&device->dev, "failed to enable HID power button\n"); 713 704 714 705 device_init_wakeup(&device->dev, true); 715 706 /*
+4 -4
drivers/platform/x86/msi-laptop.c
··· 208 208 return -EINVAL; 209 209 210 210 if (quirks->ec_read_only) 211 - return -EOPNOTSUPP; 211 + return 0; 212 212 213 213 /* read current device state */ 214 214 result = ec_read(MSI_STANDARD_EC_COMMAND_ADDRESS, &rdata); ··· 838 838 static void msi_init_rfkill(struct work_struct *ignored) 839 839 { 840 840 if (rfk_wlan) { 841 - rfkill_set_sw_state(rfk_wlan, !wlan_s); 841 + msi_rfkill_set_state(rfk_wlan, !wlan_s); 842 842 rfkill_wlan_set(NULL, !wlan_s); 843 843 } 844 844 if (rfk_bluetooth) { 845 - rfkill_set_sw_state(rfk_bluetooth, !bluetooth_s); 845 + msi_rfkill_set_state(rfk_bluetooth, !bluetooth_s); 846 846 rfkill_bluetooth_set(NULL, !bluetooth_s); 847 847 } 848 848 if (rfk_threeg) { 849 - rfkill_set_sw_state(rfk_threeg, !threeg_s); 849 + msi_rfkill_set_state(rfk_threeg, !threeg_s); 850 850 rfkill_threeg_set(NULL, !threeg_s); 851 851 } 852 852 }
+17 -4
drivers/platform/x86/serial-multi-instantiate.c
··· 21 21 #define IRQ_RESOURCE_NONE 0 22 22 #define IRQ_RESOURCE_GPIO 1 23 23 #define IRQ_RESOURCE_APIC 2 24 + #define IRQ_RESOURCE_AUTO 3 24 25 25 26 enum smi_bus_type { 26 27 SMI_I2C, ··· 53 52 int ret; 54 53 55 54 switch (inst->flags & IRQ_RESOURCE_TYPE) { 55 + case IRQ_RESOURCE_AUTO: 56 + ret = acpi_dev_gpio_irq_get(adev, inst->irq_idx); 57 + if (ret > 0) { 58 + dev_dbg(&pdev->dev, "Using gpio irq\n"); 59 + break; 60 + } 61 + ret = platform_get_irq(pdev, inst->irq_idx); 62 + if (ret > 0) { 63 + dev_dbg(&pdev->dev, "Using platform irq\n"); 64 + break; 65 + } 66 + break; 56 67 case IRQ_RESOURCE_GPIO: 57 68 ret = acpi_dev_gpio_irq_get(adev, inst->irq_idx); 58 69 break; ··· 320 307 321 308 static const struct smi_node cs35l41_hda = { 322 309 .instances = { 323 - { "cs35l41-hda", IRQ_RESOURCE_GPIO, 0 }, 324 - { "cs35l41-hda", IRQ_RESOURCE_GPIO, 0 }, 325 - { "cs35l41-hda", IRQ_RESOURCE_GPIO, 0 }, 326 - { "cs35l41-hda", IRQ_RESOURCE_GPIO, 0 }, 310 + { "cs35l41-hda", IRQ_RESOURCE_AUTO, 0 }, 311 + { "cs35l41-hda", IRQ_RESOURCE_AUTO, 0 }, 312 + { "cs35l41-hda", IRQ_RESOURCE_AUTO, 0 }, 313 + { "cs35l41-hda", IRQ_RESOURCE_AUTO, 0 }, 327 314 {} 328 315 }, 329 316 .bus_type = SMI_AUTO_DETECT,
+2 -2
drivers/platform/x86/think-lmi.c
··· 719 719 /* Format: 'Password,Signature' */ 720 720 auth_str = kasprintf(GFP_KERNEL, "%s,%s", passwd, setting->signature); 721 721 if (!auth_str) { 722 - kfree(passwd); 722 + kfree_sensitive(passwd); 723 723 return -ENOMEM; 724 724 } 725 725 ret = tlmi_simple_call(LENOVO_CERT_TO_PASSWORD_GUID, auth_str); 726 726 kfree(auth_str); 727 - kfree(passwd); 727 + kfree_sensitive(passwd); 728 728 729 729 return ret ?: count; 730 730 }
+4 -3
drivers/platform/x86/touchscreen_dmi.c
··· 27 27 /* NOTE: Please keep all entries sorted alphabetically */ 28 28 29 29 static const struct property_entry archos_101_cesium_educ_props[] = { 30 - PROPERTY_ENTRY_U32("touchscreen-size-x", 1280), 31 - PROPERTY_ENTRY_U32("touchscreen-size-y", 1850), 32 - PROPERTY_ENTRY_BOOL("touchscreen-inverted-x"), 30 + PROPERTY_ENTRY_U32("touchscreen-size-x", 1850), 31 + PROPERTY_ENTRY_U32("touchscreen-size-y", 1280), 32 + PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"), 33 33 PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"), 34 34 PROPERTY_ENTRY_U32("silead,max-fingers", 10), 35 + PROPERTY_ENTRY_BOOL("silead,home-button"), 35 36 PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-archos-101-cesium-educ.fw"), 36 37 { } 37 38 };
+5 -5
drivers/regulator/mt6358-regulator.c
··· 661 661 /* Disable VCN33_WIFI */ 662 662 ret = regmap_update_bits(mt6397->regmap, MT6358_LDO_VCN33_CON0_1, BIT(0), 0); 663 663 if (ret) { 664 - dev_err(dev, "Failed to disable VCN33_BT\n"); 664 + dev_err(dev, "Failed to disable VCN33_WIFI\n"); 665 665 return ret; 666 666 } 667 667 ··· 676 676 const struct mt6358_regulator_info *mt6358_info; 677 677 int i, max_regulator, ret; 678 678 679 - ret = mt6358_sync_vcn33_setting(&pdev->dev); 680 - if (ret) 681 - return ret; 682 - 683 679 if (mt6397->chip_id == MT6366_CHIP_ID) { 684 680 max_regulator = MT6366_MAX_REGULATOR; 685 681 mt6358_info = mt6366_regulators; ··· 683 687 max_regulator = MT6358_MAX_REGULATOR; 684 688 mt6358_info = mt6358_regulators; 685 689 } 690 + 691 + ret = mt6358_sync_vcn33_setting(&pdev->dev); 692 + if (ret) 693 + return ret; 686 694 687 695 for (i = 0; i < max_regulator; i++) { 688 696 config.dev = &pdev->dev;
+49 -78
drivers/s390/block/dasd.c
··· 2943 2943 * Requeue a request back to the block request queue 2944 2944 * only works for block requests 2945 2945 */ 2946 - static int _dasd_requeue_request(struct dasd_ccw_req *cqr) 2946 + static void _dasd_requeue_request(struct dasd_ccw_req *cqr) 2947 2947 { 2948 - struct dasd_block *block = cqr->block; 2949 2948 struct request *req; 2950 2949 2951 - if (!block) 2952 - return -EINVAL; 2953 2950 /* 2954 2951 * If the request is an ERP request there is nothing to requeue. 2955 2952 * This will be done with the remaining original request. 2956 2953 */ 2957 2954 if (cqr->refers) 2958 - return 0; 2955 + return; 2959 2956 spin_lock_irq(&cqr->dq->lock); 2960 2957 req = (struct request *) cqr->callback_data; 2961 2958 blk_mq_requeue_request(req, true); 2962 2959 spin_unlock_irq(&cqr->dq->lock); 2963 2960 2964 - return 0; 2961 + return; 2965 2962 } 2966 2963 2967 - /* 2968 - * Go through all request on the dasd_block request queue, cancel them 2969 - * on the respective dasd_device, and return them to the generic 2970 - * block layer. 2971 - */ 2972 - static int dasd_flush_block_queue(struct dasd_block *block) 2964 + static int _dasd_requests_to_flushqueue(struct dasd_block *block, 2965 + struct list_head *flush_queue) 2973 2966 { 2974 2967 struct dasd_ccw_req *cqr, *n; 2975 - int rc, i; 2976 - struct list_head flush_queue; 2977 2968 unsigned long flags; 2969 + int rc, i; 2978 2970 2979 - INIT_LIST_HEAD(&flush_queue); 2980 - spin_lock_bh(&block->queue_lock); 2971 + spin_lock_irqsave(&block->queue_lock, flags); 2981 2972 rc = 0; 2982 2973 restart: 2983 2974 list_for_each_entry_safe(cqr, n, &block->ccw_queue, blocklist) { ··· 2983 2992 * is returned from the dasd_device layer. 2984 2993 */ 2985 2994 cqr->callback = _dasd_wake_block_flush_cb; 2986 - for (i = 0; cqr != NULL; cqr = cqr->refers, i++) 2987 - list_move_tail(&cqr->blocklist, &flush_queue); 2995 + for (i = 0; cqr; cqr = cqr->refers, i++) 2996 + list_move_tail(&cqr->blocklist, flush_queue); 2988 2997 if (i > 1) 2989 2998 /* moved more than one request - need to restart */ 2990 2999 goto restart; 2991 3000 } 2992 - spin_unlock_bh(&block->queue_lock); 3001 + spin_unlock_irqrestore(&block->queue_lock, flags); 3002 + 3003 + return rc; 3004 + } 3005 + 3006 + /* 3007 + * Go through all request on the dasd_block request queue, cancel them 3008 + * on the respective dasd_device, and return them to the generic 3009 + * block layer. 3010 + */ 3011 + static int dasd_flush_block_queue(struct dasd_block *block) 3012 + { 3013 + struct dasd_ccw_req *cqr, *n; 3014 + struct list_head flush_queue; 3015 + unsigned long flags; 3016 + int rc; 3017 + 3018 + INIT_LIST_HEAD(&flush_queue); 3019 + rc = _dasd_requests_to_flushqueue(block, &flush_queue); 3020 + 2993 3021 /* Now call the callback function of flushed requests */ 2994 3022 restart_cb: 2995 3023 list_for_each_entry_safe(cqr, n, &flush_queue, blocklist) { ··· 3891 3881 */ 3892 3882 int dasd_generic_requeue_all_requests(struct dasd_device *device) 3893 3883 { 3884 + struct dasd_block *block = device->block; 3894 3885 struct list_head requeue_queue; 3895 3886 struct dasd_ccw_req *cqr, *n; 3896 - struct dasd_ccw_req *refers; 3897 3887 int rc; 3898 3888 3889 + if (!block) 3890 + return 0; 3891 + 3899 3892 INIT_LIST_HEAD(&requeue_queue); 3900 - spin_lock_irq(get_ccwdev_lock(device->cdev)); 3901 - rc = 0; 3902 - list_for_each_entry_safe(cqr, n, &device->ccw_queue, devlist) { 3903 - /* Check status and move request to flush_queue */ 3904 - if (cqr->status == DASD_CQR_IN_IO) { 3905 - rc = device->discipline->term_IO(cqr); 3906 - if (rc) { 3907 - /* unable to terminate requeust */ 3908 - dev_err(&device->cdev->dev, 3909 - "Unable to terminate request %p " 3910 - "on suspend\n", cqr); 3911 - spin_unlock_irq(get_ccwdev_lock(device->cdev)); 3912 - dasd_put_device(device); 3913 - return rc; 3914 - } 3893 + rc = _dasd_requests_to_flushqueue(block, &requeue_queue); 3894 + 3895 + /* Now call the callback function of flushed requests */ 3896 + restart_cb: 3897 + list_for_each_entry_safe(cqr, n, &requeue_queue, blocklist) { 3898 + wait_event(dasd_flush_wq, (cqr->status < DASD_CQR_QUEUED)); 3899 + /* Process finished ERP request. */ 3900 + if (cqr->refers) { 3901 + spin_lock_bh(&block->queue_lock); 3902 + __dasd_process_erp(block->base, cqr); 3903 + spin_unlock_bh(&block->queue_lock); 3904 + /* restart list_for_xx loop since dasd_process_erp 3905 + * might remove multiple elements 3906 + */ 3907 + goto restart_cb; 3915 3908 } 3916 - list_move_tail(&cqr->devlist, &requeue_queue); 3917 - } 3918 - spin_unlock_irq(get_ccwdev_lock(device->cdev)); 3919 - 3920 - list_for_each_entry_safe(cqr, n, &requeue_queue, devlist) { 3921 - wait_event(dasd_flush_wq, 3922 - (cqr->status != DASD_CQR_CLEAR_PENDING)); 3923 - 3924 - /* 3925 - * requeue requests to blocklayer will only work 3926 - * for block device requests 3927 - */ 3928 - if (_dasd_requeue_request(cqr)) 3929 - continue; 3930 - 3931 - /* remove requests from device and block queue */ 3932 - list_del_init(&cqr->devlist); 3933 - while (cqr->refers != NULL) { 3934 - refers = cqr->refers; 3935 - /* remove the request from the block queue */ 3936 - list_del(&cqr->blocklist); 3937 - /* free the finished erp request */ 3938 - dasd_free_erp_request(cqr, cqr->memdev); 3939 - cqr = refers; 3940 - } 3941 - 3942 - /* 3943 - * _dasd_requeue_request already checked for a valid 3944 - * blockdevice, no need to check again 3945 - * all erp requests (cqr->refers) have a cqr->block 3946 - * pointer copy from the original cqr 3947 - */ 3909 + _dasd_requeue_request(cqr); 3948 3910 list_del_init(&cqr->blocklist); 3949 3911 cqr->block->base->discipline->free_cp( 3950 3912 cqr, (struct request *) cqr->callback_data); 3951 - } 3952 - 3953 - /* 3954 - * if requests remain then they are internal request 3955 - * and go back to the device queue 3956 - */ 3957 - if (!list_empty(&requeue_queue)) { 3958 - /* move freeze_queue to start of the ccw_queue */ 3959 - spin_lock_irq(get_ccwdev_lock(device->cdev)); 3960 - list_splice_tail(&requeue_queue, &device->ccw_queue); 3961 - spin_unlock_irq(get_ccwdev_lock(device->cdev)); 3962 3913 } 3963 3914 dasd_schedule_device_bh(device); 3964 3915 return rc;
+2 -2
drivers/s390/block/dasd_3990_erp.c
··· 1050 1050 dev_err(&device->cdev->dev, "An I/O request was rejected" 1051 1051 " because writing is inhibited\n"); 1052 1052 erp = dasd_3990_erp_cleanup(erp, DASD_CQR_FAILED); 1053 - } else if (sense[7] & SNS7_INVALID_ON_SEC) { 1053 + } else if (sense[7] == SNS7_INVALID_ON_SEC) { 1054 1054 dev_err(&device->cdev->dev, "An I/O request was rejected on a copy pair secondary device\n"); 1055 1055 /* suppress dump of sense data for this error */ 1056 1056 set_bit(DASD_CQR_SUPPRESS_CR, &erp->refers->flags); ··· 2441 2441 erp->block = cqr->block; 2442 2442 erp->magic = cqr->magic; 2443 2443 erp->expires = cqr->expires; 2444 - erp->retries = 256; 2444 + erp->retries = device->default_retries; 2445 2445 erp->buildclk = get_tod_clock(); 2446 2446 erp->status = DASD_CQR_FILLED; 2447 2447
+1
drivers/s390/block/dasd_ioctl.c
··· 131 131 spin_unlock_irqrestore(get_ccwdev_lock(base->cdev), flags); 132 132 133 133 dasd_schedule_block_bh(block); 134 + dasd_schedule_device_bh(base); 134 135 return 0; 135 136 } 136 137
+4 -3
drivers/scsi/sg.c
··· 1497 1497 int error; 1498 1498 unsigned long iflags; 1499 1499 1500 - error = blk_get_queue(scsidp->request_queue); 1501 - if (error) 1502 - return error; 1500 + if (!blk_get_queue(scsidp->request_queue)) { 1501 + pr_warn("%s: get scsi_device queue failed\n", __func__); 1502 + return -ENODEV; 1503 + } 1503 1504 1504 1505 error = -ENOMEM; 1505 1506 cdev = cdev_alloc();
+2 -2
drivers/soundwire/amd_manager.c
··· 910 910 return -ENOMEM; 911 911 912 912 amd_manager->acp_mmio = devm_ioremap(dev, res->start, resource_size(res)); 913 - if (IS_ERR(amd_manager->mmio)) { 913 + if (!amd_manager->acp_mmio) { 914 914 dev_err(dev, "mmio not found\n"); 915 - return PTR_ERR(amd_manager->mmio); 915 + return -ENOMEM; 916 916 } 917 917 amd_manager->instance = pdata->instance; 918 918 amd_manager->mmio = amd_manager->acp_mmio +
+4 -4
drivers/soundwire/bus.c
··· 922 922 "initializing enumeration and init completion for Slave %d\n", 923 923 slave->dev_num); 924 924 925 - init_completion(&slave->enumeration_complete); 926 - init_completion(&slave->initialization_complete); 925 + reinit_completion(&slave->enumeration_complete); 926 + reinit_completion(&slave->initialization_complete); 927 927 928 928 } else if ((status == SDW_SLAVE_ATTACHED) && 929 929 (slave->status == SDW_SLAVE_UNATTACHED)) { ··· 931 931 "signaling enumeration completion for Slave %d\n", 932 932 slave->dev_num); 933 933 934 - complete(&slave->enumeration_complete); 934 + complete_all(&slave->enumeration_complete); 935 935 } 936 936 slave->status = status; 937 937 mutex_unlock(&bus->bus_lock); ··· 1951 1951 "signaling initialization completion for Slave %d\n", 1952 1952 slave->dev_num); 1953 1953 1954 - complete(&slave->initialization_complete); 1954 + complete_all(&slave->initialization_complete); 1955 1955 1956 1956 /* 1957 1957 * If the manager became pm_runtime active, the peripherals will be
+1 -1
drivers/soundwire/qcom.c
··· 540 540 status = (val >> (dev_num * SWRM_MCP_SLV_STATUS_SZ)); 541 541 542 542 if ((status & SWRM_MCP_SLV_STATUS_MASK) == SDW_SLAVE_ALERT) { 543 - ctrl->status[dev_num] = status; 543 + ctrl->status[dev_num] = status & SWRM_MCP_SLV_STATUS_MASK; 544 544 return dev_num; 545 545 } 546 546 }
+49 -5
drivers/spi/spi-qcom-qspi.c
··· 69 69 WR_FIFO_OVERRUN) 70 70 #define QSPI_ALL_IRQS (QSPI_ERR_IRQS | RESP_FIFO_RDY | \ 71 71 WR_FIFO_EMPTY | WR_FIFO_FULL | \ 72 - TRANSACTION_DONE) 72 + TRANSACTION_DONE | DMA_CHAIN_DONE) 73 73 74 74 #define PIO_XFER_CTRL 0x0014 75 75 #define REQUEST_COUNT_MSK 0xffff ··· 308 308 dma_addr_t dma_cmd_desc; 309 309 310 310 /* allocate for dma cmd descriptor */ 311 - virt_cmd_desc = dma_pool_alloc(ctrl->dma_cmd_pool, GFP_KERNEL | __GFP_ZERO, &dma_cmd_desc); 312 - if (!virt_cmd_desc) 313 - return -ENOMEM; 311 + virt_cmd_desc = dma_pool_alloc(ctrl->dma_cmd_pool, GFP_ATOMIC | __GFP_ZERO, &dma_cmd_desc); 312 + if (!virt_cmd_desc) { 313 + dev_warn_once(ctrl->dev, "Couldn't find memory for descriptor\n"); 314 + return -EAGAIN; 315 + } 314 316 315 317 ctrl->virt_cmd_desc[ctrl->n_cmd_desc] = virt_cmd_desc; 316 318 ctrl->dma_cmd_desc[ctrl->n_cmd_desc] = dma_cmd_desc; ··· 357 355 358 356 for (i = 0; i < sgt->nents; i++) { 359 357 dma_ptr_sg = sg_dma_address(sgt->sgl + i); 358 + dma_len_sg = sg_dma_len(sgt->sgl + i); 360 359 if (!IS_ALIGNED(dma_ptr_sg, QSPI_ALIGN_REQ)) { 361 360 dev_warn_once(ctrl->dev, "dma_address not aligned to %d\n", QSPI_ALIGN_REQ); 361 + return -EAGAIN; 362 + } 363 + /* 364 + * When reading with DMA the controller writes to memory 1 word 365 + * at a time. If the length isn't a multiple of 4 bytes then 366 + * the controller can clobber the things later in memory. 367 + * Fallback to PIO to be safe. 368 + */ 369 + if (ctrl->xfer.dir == QSPI_READ && (dma_len_sg & 0x03)) { 370 + dev_warn_once(ctrl->dev, "fallback to PIO for read of size %#010x\n", 371 + dma_len_sg); 362 372 return -EAGAIN; 363 373 } 364 374 } ··· 455 441 456 442 ret = qcom_qspi_setup_dma_desc(ctrl, xfer); 457 443 if (ret != -EAGAIN) { 458 - if (!ret) 444 + if (!ret) { 445 + dma_wmb(); 459 446 qcom_qspi_dma_xfer(ctrl); 447 + } 460 448 goto exit; 461 449 } 462 450 dev_warn_once(ctrl->dev, "DMA failure, falling back to PIO\n"); ··· 619 603 int_status = readl(ctrl->base + MSTR_INT_STATUS); 620 604 writel(int_status, ctrl->base + MSTR_INT_STATUS); 621 605 606 + /* Ignore disabled interrupts */ 607 + int_status &= readl(ctrl->base + MSTR_INT_EN); 608 + 622 609 /* PIO mode handling */ 623 610 if (ctrl->xfer.dir == QSPI_WRITE) { 624 611 if (int_status & WR_FIFO_EMPTY) ··· 665 646 spin_unlock(&ctrl->lock); 666 647 return ret; 667 648 } 649 + 650 + static int qcom_qspi_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op) 651 + { 652 + /* 653 + * If qcom_qspi_can_dma() is going to return false we don't need to 654 + * adjust anything. 655 + */ 656 + if (op->data.nbytes <= QSPI_MAX_BYTES_FIFO) 657 + return 0; 658 + 659 + /* 660 + * When reading, the transfer needs to be a multiple of 4 bytes so 661 + * shrink the transfer if that's not true. The caller will then do a 662 + * second transfer to finish things up. 663 + */ 664 + if (op->data.dir == SPI_MEM_DATA_IN && (op->data.nbytes & 0x3)) 665 + op->data.nbytes &= ~0x3; 666 + 667 + return 0; 668 + } 669 + 670 + static const struct spi_controller_mem_ops qcom_qspi_mem_ops = { 671 + .adjust_op_size = qcom_qspi_adjust_op_size, 672 + }; 668 673 669 674 static int qcom_qspi_probe(struct platform_device *pdev) 670 675 { ··· 774 731 if (of_property_read_bool(pdev->dev.of_node, "iommus")) 775 732 master->can_dma = qcom_qspi_can_dma; 776 733 master->auto_runtime_pm = true; 734 + master->mem_ops = &qcom_qspi_mem_ops; 777 735 778 736 ret = devm_pm_opp_set_clkname(&pdev->dev, "core"); 779 737 if (ret)
+1 -1
drivers/staging/fbtft/fb_ili9341.c
··· 145 145 }, 146 146 }; 147 147 148 - FBTFT_REGISTER_DRIVER(DRVNAME, "ilitek,ili9341", &display); 148 + FBTFT_REGISTER_SPI_DRIVER(DRVNAME, "ilitek", "ili9341", &display); 149 149 150 150 MODULE_ALIAS("spi:" DRVNAME); 151 151 MODULE_ALIAS("platform:" DRVNAME);
+4 -2
drivers/staging/ks7010/ks_wlan_net.c
··· 1583 1583 commit |= SME_WEP_FLAG; 1584 1584 } 1585 1585 if (enc->key_len) { 1586 - memcpy(&key->key_val[0], &enc->key[0], enc->key_len); 1587 - key->key_len = enc->key_len; 1586 + int key_len = clamp_val(enc->key_len, 0, IW_ENCODING_TOKEN_MAX); 1587 + 1588 + memcpy(&key->key_val[0], &enc->key[0], key_len); 1589 + key->key_len = key_len; 1588 1590 commit |= (SME_WEP_VAL1 << index); 1589 1591 } 1590 1592 break;
+1
drivers/staging/media/atomisp/Kconfig
··· 13 13 tristate "Intel Atom Image Signal Processor Driver" 14 14 depends on VIDEO_DEV && INTEL_ATOMISP 15 15 depends on PMIC_OPREGION 16 + select V4L2_FWNODE 16 17 select IOSF_MBI 17 18 select VIDEOBUF2_VMALLOC 18 19 select VIDEO_V4L2_SUBDEV_API
+34 -9
drivers/staging/rtl8712/rtl871x_xmit.c
··· 21 21 #include "osdep_intf.h" 22 22 #include "usb_ops.h" 23 23 24 + #include <linux/usb.h> 24 25 #include <linux/ieee80211.h> 25 26 26 27 static const u8 P802_1H_OUI[P80211_OUI_LEN] = {0x00, 0x00, 0xf8}; ··· 56 55 sint i; 57 56 struct xmit_buf *pxmitbuf; 58 57 struct xmit_frame *pxframe; 58 + int j; 59 59 60 60 memset((unsigned char *)pxmitpriv, 0, sizeof(struct xmit_priv)); 61 61 spin_lock_init(&pxmitpriv->lock); ··· 119 117 _init_queue(&pxmitpriv->pending_xmitbuf_queue); 120 118 pxmitpriv->pallocated_xmitbuf = 121 119 kmalloc(NR_XMITBUFF * sizeof(struct xmit_buf) + 4, GFP_ATOMIC); 122 - if (!pxmitpriv->pallocated_xmitbuf) { 123 - kfree(pxmitpriv->pallocated_frame_buf); 124 - pxmitpriv->pallocated_frame_buf = NULL; 125 - return -ENOMEM; 126 - } 120 + if (!pxmitpriv->pallocated_xmitbuf) 121 + goto clean_up_frame_buf; 127 122 pxmitpriv->pxmitbuf = pxmitpriv->pallocated_xmitbuf + 4 - 128 123 ((addr_t)(pxmitpriv->pallocated_xmitbuf) & 3); 129 124 pxmitbuf = (struct xmit_buf *)pxmitpriv->pxmitbuf; ··· 128 129 INIT_LIST_HEAD(&pxmitbuf->list); 129 130 pxmitbuf->pallocated_buf = 130 131 kmalloc(MAX_XMITBUF_SZ + XMITBUF_ALIGN_SZ, GFP_ATOMIC); 131 - if (!pxmitbuf->pallocated_buf) 132 - return -ENOMEM; 132 + if (!pxmitbuf->pallocated_buf) { 133 + j = 0; 134 + goto clean_up_alloc_buf; 135 + } 133 136 pxmitbuf->pbuf = pxmitbuf->pallocated_buf + XMITBUF_ALIGN_SZ - 134 137 ((addr_t) (pxmitbuf->pallocated_buf) & 135 138 (XMITBUF_ALIGN_SZ - 1)); 136 - if (r8712_xmit_resource_alloc(padapter, pxmitbuf)) 137 - return -ENOMEM; 139 + if (r8712_xmit_resource_alloc(padapter, pxmitbuf)) { 140 + j = 1; 141 + goto clean_up_alloc_buf; 142 + } 138 143 list_add_tail(&pxmitbuf->list, 139 144 &(pxmitpriv->free_xmitbuf_queue.queue)); 140 145 pxmitbuf++; ··· 149 146 init_hwxmits(pxmitpriv->hwxmits, pxmitpriv->hwxmit_entry); 150 147 tasklet_setup(&pxmitpriv->xmit_tasklet, r8712_xmit_bh); 151 148 return 0; 149 + 150 + clean_up_alloc_buf: 151 + if (j) { 152 + /* failure happened in r8712_xmit_resource_alloc() 153 + * delete extra pxmitbuf->pallocated_buf 154 + */ 155 + kfree(pxmitbuf->pallocated_buf); 156 + } 157 + for (j = 0; j < i; j++) { 158 + int k; 159 + 160 + pxmitbuf--; /* reset pointer */ 161 + kfree(pxmitbuf->pallocated_buf); 162 + for (k = 0; k < 8; k++) /* delete xmit urb's */ 163 + usb_free_urb(pxmitbuf->pxmit_urb[k]); 164 + } 165 + kfree(pxmitpriv->pallocated_xmitbuf); 166 + pxmitpriv->pallocated_xmitbuf = NULL; 167 + clean_up_frame_buf: 168 + kfree(pxmitpriv->pallocated_frame_buf); 169 + pxmitpriv->pallocated_frame_buf = NULL; 170 + return -ENOMEM; 152 171 } 153 172 154 173 void _free_xmit_priv(struct xmit_priv *pxmitpriv)
+6
drivers/staging/rtl8712/xmit_linux.c
··· 112 112 for (i = 0; i < 8; i++) { 113 113 pxmitbuf->pxmit_urb[i] = usb_alloc_urb(0, GFP_KERNEL); 114 114 if (!pxmitbuf->pxmit_urb[i]) { 115 + int k; 116 + 117 + for (k = i - 1; k >= 0; k--) { 118 + /* handle allocation errors part way through loop */ 119 + usb_free_urb(pxmitbuf->pxmit_urb[k]); 120 + } 115 121 netdev_err(padapter->pnetdev, "pxmitbuf->pxmit_urb[i] == NULL\n"); 116 122 return -ENOMEM; 117 123 }
+2 -2
drivers/thermal/thermal_core.c
··· 1203 1203 struct thermal_zone_device * 1204 1204 thermal_zone_device_register_with_trips(const char *type, struct thermal_trip *trips, int num_trips, int mask, 1205 1205 void *devdata, struct thermal_zone_device_ops *ops, 1206 - struct thermal_zone_params *tzp, int passive_delay, 1206 + const struct thermal_zone_params *tzp, int passive_delay, 1207 1207 int polling_delay) 1208 1208 { 1209 1209 struct thermal_zone_device *tz; ··· 1371 1371 1372 1372 struct thermal_zone_device *thermal_zone_device_register(const char *type, int ntrips, int mask, 1373 1373 void *devdata, struct thermal_zone_device_ops *ops, 1374 - struct thermal_zone_params *tzp, int passive_delay, 1374 + const struct thermal_zone_params *tzp, int passive_delay, 1375 1375 int polling_delay) 1376 1376 { 1377 1377 return thermal_zone_device_register_with_trips(type, NULL, ntrips, mask,
+6 -21
drivers/thermal/thermal_of.c
··· 238 238 return 0; 239 239 } 240 240 241 - static struct thermal_zone_params *thermal_of_parameters_init(struct device_node *np) 241 + static void thermal_of_parameters_init(struct device_node *np, 242 + struct thermal_zone_params *tzp) 242 243 { 243 - struct thermal_zone_params *tzp; 244 244 int coef[2]; 245 245 int ncoef = ARRAY_SIZE(coef); 246 246 int prop, ret; 247 - 248 - tzp = kzalloc(sizeof(*tzp), GFP_KERNEL); 249 - if (!tzp) 250 - return ERR_PTR(-ENOMEM); 251 247 252 248 tzp->no_hwmon = true; 253 249 ··· 263 267 264 268 tzp->slope = coef[0]; 265 269 tzp->offset = coef[1]; 266 - 267 - return tzp; 268 270 } 269 271 270 272 static struct device_node *thermal_of_zone_get_by_name(struct thermal_zone_device *tz) ··· 436 442 static void thermal_of_zone_unregister(struct thermal_zone_device *tz) 437 443 { 438 444 struct thermal_trip *trips = tz->trips; 439 - struct thermal_zone_params *tzp = tz->tzp; 440 445 struct thermal_zone_device_ops *ops = tz->ops; 441 446 442 447 thermal_zone_device_disable(tz); 443 448 thermal_zone_device_unregister(tz); 444 449 kfree(trips); 445 - kfree(tzp); 446 450 kfree(ops); 447 451 } 448 452 ··· 469 477 { 470 478 struct thermal_zone_device *tz; 471 479 struct thermal_trip *trips; 472 - struct thermal_zone_params *tzp; 480 + struct thermal_zone_params tzp = {}; 473 481 struct thermal_zone_device_ops *of_ops; 474 482 struct device_node *np; 475 483 int delay, pdelay; ··· 501 509 goto out_kfree_trips; 502 510 } 503 511 504 - tzp = thermal_of_parameters_init(np); 505 - if (IS_ERR(tzp)) { 506 - ret = PTR_ERR(tzp); 507 - pr_err("Failed to initialize parameter from %pOFn: %d\n", np, ret); 508 - goto out_kfree_trips; 509 - } 512 + thermal_of_parameters_init(np, &tzp); 510 513 511 514 of_ops->bind = thermal_of_bind; 512 515 of_ops->unbind = thermal_of_unbind; ··· 509 522 mask = GENMASK_ULL((ntrips) - 1, 0); 510 523 511 524 tz = thermal_zone_device_register_with_trips(np->name, trips, ntrips, 512 - mask, data, of_ops, tzp, 525 + mask, data, of_ops, &tzp, 513 526 pdelay, delay); 514 527 if (IS_ERR(tz)) { 515 528 ret = PTR_ERR(tz); 516 529 pr_err("Failed to register thermal zone %pOFn: %d\n", np, ret); 517 - goto out_kfree_tzp; 530 + goto out_kfree_trips; 518 531 } 519 532 520 533 ret = thermal_zone_device_enable(tz); ··· 527 540 528 541 return tz; 529 542 530 - out_kfree_tzp: 531 - kfree(tzp); 532 543 out_kfree_trips: 533 544 kfree(trips); 534 545 out_kfree_of_ops:
+3 -1
drivers/tty/n_gsm.c
··· 3070 3070 gsm->has_devices = false; 3071 3071 } 3072 3072 for (i = NUM_DLCI - 1; i >= 0; i--) 3073 - if (gsm->dlci[i]) 3073 + if (gsm->dlci[i]) { 3074 3074 gsm_dlci_release(gsm->dlci[i]); 3075 + gsm->dlci[i] = NULL; 3076 + } 3075 3077 mutex_unlock(&gsm->mutex); 3076 3078 /* Now wipe the queues */ 3077 3079 tty_ldisc_flush(gsm->tty);
+4 -2
drivers/tty/serial/8250/8250_dwlib.c
··· 244 244 struct dw8250_port_data *pd = p->private_data; 245 245 struct dw8250_data *data = to_dw8250_data(pd); 246 246 struct uart_8250_port *up = up_to_u8250p(p); 247 - u32 reg; 247 + u32 reg, old_dlf; 248 248 249 249 pd->hw_rs485_support = dw8250_detect_rs485_hw(p); 250 250 if (pd->hw_rs485_support) { ··· 270 270 dev_dbg(p->dev, "Designware UART version %c.%c%c\n", 271 271 (reg >> 24) & 0xff, (reg >> 16) & 0xff, (reg >> 8) & 0xff); 272 272 273 + /* Preserve value written by firmware or bootloader */ 274 + old_dlf = dw8250_readl_ext(p, DW_UART_DLF); 273 275 dw8250_writel_ext(p, DW_UART_DLF, ~0U); 274 276 reg = dw8250_readl_ext(p, DW_UART_DLF); 275 - dw8250_writel_ext(p, DW_UART_DLF, 0); 277 + dw8250_writel_ext(p, DW_UART_DLF, old_dlf); 276 278 277 279 if (reg) { 278 280 pd->dlf_size = fls(reg);
-7
drivers/tty/serial/qcom_geni_serial.c
··· 1681 1681 if (ret) 1682 1682 return ret; 1683 1683 1684 - /* 1685 - * Set pm_runtime status as ACTIVE so that wakeup_irq gets 1686 - * enabled/disabled from dev_pm_arm_wake_irq during system 1687 - * suspend/resume respectively. 1688 - */ 1689 - pm_runtime_set_active(&pdev->dev); 1690 - 1691 1684 if (port->wakeup_irq > 0) { 1692 1685 device_init_wakeup(&pdev->dev, true); 1693 1686 ret = dev_pm_set_dedicated_wake_irq(&pdev->dev,
+1 -1
drivers/tty/serial/sh-sci.c
··· 590 590 dma_submit_error(s->cookie_tx)) { 591 591 if (s->cfg->regtype == SCIx_RZ_SCIFA_REGTYPE) 592 592 /* Switch irq from SCIF to DMA */ 593 - disable_irq(s->irqs[SCIx_TXI_IRQ]); 593 + disable_irq_nosync(s->irqs[SCIx_TXI_IRQ]); 594 594 595 595 s->cookie_tx = 0; 596 596 schedule_work(&s->work_tx);
+1 -1
drivers/tty/serial/sifive.c
··· 811 811 local_irq_restore(flags); 812 812 } 813 813 814 - static int __init sifive_serial_console_setup(struct console *co, char *options) 814 + static int sifive_serial_console_setup(struct console *co, char *options) 815 815 { 816 816 struct sifive_serial_port *ssp; 817 817 int baud = SIFIVE_DEFAULT_BAUD_RATE;
+1 -1
drivers/tty/serial/ucc_uart.c
··· 59 59 /* #define LOOPBACK */ 60 60 61 61 /* The major and minor device numbers are defined in 62 - * http://www.lanana.org/docs/device-list/devices-2.6+.txt. For the QE 62 + * Documentation/admin-guide/devices.txt. For the QE 63 63 * UART, we have major number 204 and minor numbers 46 - 49, which are the 64 64 * same as for the CPM2. This decision was made because no Freescale part 65 65 * has both a CPM and a QE.
+1 -1
drivers/tty/tty_io.c
··· 2285 2285 char ch, mbz = 0; 2286 2286 struct tty_ldisc *ld; 2287 2287 2288 - if (!tty_legacy_tiocsti) 2288 + if (!tty_legacy_tiocsti && !capable(CAP_SYS_ADMIN)) 2289 2289 return -EIO; 2290 2290 2291 2291 if ((current->signal->tty != tty) && !capable(CAP_SYS_ADMIN))
+3 -1
drivers/usb/cdns3/cdns3-gadget.c
··· 3015 3015 static int cdns3_gadget_check_config(struct usb_gadget *gadget) 3016 3016 { 3017 3017 struct cdns3_device *priv_dev = gadget_to_cdns3_device(gadget); 3018 + struct cdns3_endpoint *priv_ep; 3018 3019 struct usb_ep *ep; 3019 3020 int n_in = 0; 3020 3021 int total; 3021 3022 3022 3023 list_for_each_entry(ep, &gadget->ep_list, ep_list) { 3023 - if (ep->claimed && (ep->address & USB_DIR_IN)) 3024 + priv_ep = ep_to_cdns3_ep(ep); 3025 + if ((priv_ep->flags & EP_CLAIMED) && (ep->address & USB_DIR_IN)) 3024 3026 n_in++; 3025 3027 } 3026 3028
+4
drivers/usb/core/quirks.c
··· 436 436 /* novation SoundControl XL */ 437 437 { USB_DEVICE(0x1235, 0x0061), .driver_info = USB_QUIRK_RESET_RESUME }, 438 438 439 + /* Focusrite Scarlett Solo USB */ 440 + { USB_DEVICE(0x1235, 0x8211), .driver_info = 441 + USB_QUIRK_DISCONNECT_SUSPEND }, 442 + 439 443 /* Huawei 4G LTE module */ 440 444 { USB_DEVICE(0x12d1, 0x15bb), .driver_info = 441 445 USB_QUIRK_DISCONNECT_SUSPEND },
+2 -18
drivers/usb/dwc3/core.c
··· 277 277 /* 278 278 * We're resetting only the device side because, if we're in host mode, 279 279 * XHCI driver will reset the host block. If dwc3 was configured for 280 - * host-only mode, then we can return early. 280 + * host-only mode or current role is host, then we can return early. 281 281 */ 282 - if (dwc->current_dr_role == DWC3_GCTL_PRTCAP_HOST) 282 + if (dwc->dr_mode == USB_DR_MODE_HOST || dwc->current_dr_role == DWC3_GCTL_PRTCAP_HOST) 283 283 return 0; 284 284 285 285 reg = dwc3_readl(dwc->regs, DWC3_DCTL); ··· 1207 1207 reg |= DWC3_GUCTL1_DEV_FORCE_20_CLK_FOR_30_CLK; 1208 1208 1209 1209 dwc3_writel(dwc->regs, DWC3_GUCTL1, reg); 1210 - } 1211 - 1212 - if (dwc->dr_mode == USB_DR_MODE_HOST || 1213 - dwc->dr_mode == USB_DR_MODE_OTG) { 1214 - reg = dwc3_readl(dwc->regs, DWC3_GUCTL); 1215 - 1216 - /* 1217 - * Enable Auto retry Feature to make the controller operating in 1218 - * Host mode on seeing transaction errors(CRC errors or internal 1219 - * overrun scenerios) on IN transfers to reply to the device 1220 - * with a non-terminating retry ACK (i.e, an ACK transcation 1221 - * packet with Retry=1 & Nump != 0) 1222 - */ 1223 - reg |= DWC3_GUCTL_HSTINAUTORETRY; 1224 - 1225 - dwc3_writel(dwc->regs, DWC3_GUCTL, reg); 1226 1210 } 1227 1211 1228 1212 /*
-3
drivers/usb/dwc3/core.h
··· 256 256 #define DWC3_GCTL_GBLHIBERNATIONEN BIT(1) 257 257 #define DWC3_GCTL_DSBLCLKGTNG BIT(0) 258 258 259 - /* Global User Control Register */ 260 - #define DWC3_GUCTL_HSTINAUTORETRY BIT(14) 261 - 262 259 /* Global User Control 1 Register */ 263 260 #define DWC3_GUCTL1_DEV_DECOUPLE_L1L2_EVT BIT(31) 264 261 #define DWC3_GUCTL1_TX_IPGAP_LINECHECK_DIS BIT(28)
+4 -2
drivers/usb/dwc3/dwc3-pci.c
··· 233 233 234 234 /* 235 235 * A lot of BYT devices lack ACPI resource entries for 236 - * the GPIOs, add a fallback mapping to the reference 236 + * the GPIOs. If the ACPI entry for the GPIO controller 237 + * is present add a fallback mapping to the reference 237 238 * design GPIOs which all boards seem to use. 238 239 */ 239 - gpiod_add_lookup_table(&platform_bytcr_gpios); 240 + if (acpi_dev_present("INT33FC", NULL, -1)) 241 + gpiod_add_lookup_table(&platform_bytcr_gpios); 240 242 241 243 /* 242 244 * These GPIOs will turn on the USB2 PHY. Note that we have to
+4
drivers/usb/gadget/composite.c
··· 1125 1125 goto done; 1126 1126 1127 1127 status = bind(config); 1128 + 1129 + if (status == 0) 1130 + status = usb_gadget_check_config(cdev->gadget); 1131 + 1128 1132 if (status < 0) { 1129 1133 while (!list_empty(&config->functions)) { 1130 1134 struct usb_function *f;
+7 -5
drivers/usb/gadget/legacy/raw_gadget.c
··· 310 310 dev->eps_num = i; 311 311 spin_unlock_irqrestore(&dev->lock, flags); 312 312 313 + ret = raw_queue_event(dev, USB_RAW_EVENT_CONNECT, 0, NULL); 314 + if (ret < 0) { 315 + dev_err(&gadget->dev, "failed to queue event\n"); 316 + set_gadget_data(gadget, NULL); 317 + return ret; 318 + } 319 + 313 320 /* Matches kref_put() in gadget_unbind(). */ 314 321 kref_get(&dev->count); 315 - 316 - ret = raw_queue_event(dev, USB_RAW_EVENT_CONNECT, 0, NULL); 317 - if (ret < 0) 318 - dev_err(&gadget->dev, "failed to queue event\n"); 319 - 320 322 return ret; 321 323 } 322 324
-1
drivers/usb/gadget/udc/core.c
··· 878 878 */ 879 879 if (gadget->connected) 880 880 ret = usb_gadget_connect_locked(gadget); 881 - mutex_unlock(&gadget->udc->connect_lock); 882 881 883 882 unlock: 884 883 mutex_unlock(&gadget->udc->connect_lock);
+4 -4
drivers/usb/gadget/udc/tegra-xudc.c
··· 3718 3718 int err; 3719 3719 3720 3720 xudc->genpd_dev_device = dev_pm_domain_attach_by_name(dev, "dev"); 3721 - if (IS_ERR_OR_NULL(xudc->genpd_dev_device)) { 3722 - err = PTR_ERR(xudc->genpd_dev_device) ? : -ENODATA; 3721 + if (IS_ERR(xudc->genpd_dev_device)) { 3722 + err = PTR_ERR(xudc->genpd_dev_device); 3723 3723 dev_err(dev, "failed to get device power domain: %d\n", err); 3724 3724 return err; 3725 3725 } 3726 3726 3727 3727 xudc->genpd_dev_ss = dev_pm_domain_attach_by_name(dev, "ss"); 3728 - if (IS_ERR_OR_NULL(xudc->genpd_dev_ss)) { 3729 - err = PTR_ERR(xudc->genpd_dev_ss) ? : -ENODATA; 3728 + if (IS_ERR(xudc->genpd_dev_ss)) { 3729 + err = PTR_ERR(xudc->genpd_dev_ss); 3730 3730 dev_err(dev, "failed to get SuperSpeed power domain: %d\n", err); 3731 3731 return err; 3732 3732 }
+7 -1
drivers/usb/host/ohci-at91.c
··· 672 672 else 673 673 at91_start_clock(ohci_at91); 674 674 675 - ohci_resume(hcd, false); 675 + /* 676 + * According to the comment in ohci_hcd_at91_drv_suspend() 677 + * we need to do a reset if the 48Mhz clock was stopped, 678 + * that is, if ohci_at91->wakeup is clear. Tell ohci_resume() 679 + * to reset in this case by setting its "hibernated" flag. 680 + */ 681 + ohci_resume(hcd, !ohci_at91->wakeup); 676 682 677 683 return 0; 678 684 }
+1
drivers/usb/host/xhci-mtk.c
··· 586 586 } 587 587 588 588 device_init_wakeup(dev, true); 589 + dma_set_max_seg_size(dev, UINT_MAX); 589 590 590 591 xhci = hcd_to_xhci(hcd); 591 592 xhci->main_hcd = hcd;
+1 -3
drivers/usb/host/xhci-pci.c
··· 479 479 pdev->device == 0x3432) 480 480 xhci->quirks |= XHCI_BROKEN_STREAMS; 481 481 482 - if (pdev->vendor == PCI_VENDOR_ID_VIA && pdev->device == 0x3483) { 482 + if (pdev->vendor == PCI_VENDOR_ID_VIA && pdev->device == 0x3483) 483 483 xhci->quirks |= XHCI_LPM_SUPPORT; 484 - xhci->quirks |= XHCI_EP_CTX_BROKEN_DCS; 485 - } 486 484 487 485 if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA && 488 486 pdev->device == PCI_DEVICE_ID_ASMEDIA_1042_XHCI) {
+1 -24
drivers/usb/host/xhci-ring.c
··· 626 626 struct xhci_ring *ep_ring; 627 627 struct xhci_command *cmd; 628 628 struct xhci_segment *new_seg; 629 - struct xhci_segment *halted_seg = NULL; 630 629 union xhci_trb *new_deq; 631 630 int new_cycle; 632 - union xhci_trb *halted_trb; 633 - int index = 0; 634 631 dma_addr_t addr; 635 632 u64 hw_dequeue; 636 633 bool cycle_found = false; ··· 665 668 hw_dequeue = xhci_get_hw_deq(xhci, dev, ep_index, stream_id); 666 669 new_seg = ep_ring->deq_seg; 667 670 new_deq = ep_ring->dequeue; 668 - 669 - /* 670 - * Quirk: xHC write-back of the DCS field in the hardware dequeue 671 - * pointer is wrong - use the cycle state of the TRB pointed to by 672 - * the dequeue pointer. 673 - */ 674 - if (xhci->quirks & XHCI_EP_CTX_BROKEN_DCS && 675 - !(ep->ep_state & EP_HAS_STREAMS)) 676 - halted_seg = trb_in_td(xhci, td->start_seg, 677 - td->first_trb, td->last_trb, 678 - hw_dequeue & ~0xf, false); 679 - if (halted_seg) { 680 - index = ((dma_addr_t)(hw_dequeue & ~0xf) - halted_seg->dma) / 681 - sizeof(*halted_trb); 682 - halted_trb = &halted_seg->trbs[index]; 683 - new_cycle = halted_trb->generic.field[3] & 0x1; 684 - xhci_dbg(xhci, "Endpoint DCS = %d TRB index = %d cycle = %d\n", 685 - (u8)(hw_dequeue & 0x1), index, new_cycle); 686 - } else { 687 - new_cycle = hw_dequeue & 0x1; 688 - } 671 + new_cycle = hw_dequeue & 0x1; 689 672 690 673 /* 691 674 * We want to find the pointer, segment and cycle state of the new trb
+4 -4
drivers/usb/host/xhci-tegra.c
··· 1145 1145 int err; 1146 1146 1147 1147 tegra->genpd_dev_host = dev_pm_domain_attach_by_name(dev, "xusb_host"); 1148 - if (IS_ERR_OR_NULL(tegra->genpd_dev_host)) { 1149 - err = PTR_ERR(tegra->genpd_dev_host) ? : -ENODATA; 1148 + if (IS_ERR(tegra->genpd_dev_host)) { 1149 + err = PTR_ERR(tegra->genpd_dev_host); 1150 1150 dev_err(dev, "failed to get host pm-domain: %d\n", err); 1151 1151 return err; 1152 1152 } 1153 1153 1154 1154 tegra->genpd_dev_ss = dev_pm_domain_attach_by_name(dev, "xusb_ss"); 1155 - if (IS_ERR_OR_NULL(tegra->genpd_dev_ss)) { 1156 - err = PTR_ERR(tegra->genpd_dev_ss) ? : -ENODATA; 1155 + if (IS_ERR(tegra->genpd_dev_ss)) { 1156 + err = PTR_ERR(tegra->genpd_dev_ss); 1157 1157 dev_err(dev, "failed to get superspeed pm-domain: %d\n", err); 1158 1158 return err; 1159 1159 }
+4 -4
drivers/usb/misc/ehset.c
··· 77 77 switch (test_pid) { 78 78 case TEST_SE0_NAK_PID: 79 79 ret = ehset_prepare_port_for_testing(hub_udev, portnum); 80 - if (!ret) 80 + if (ret < 0) 81 81 break; 82 82 ret = usb_control_msg_send(hub_udev, 0, USB_REQ_SET_FEATURE, 83 83 USB_RT_PORT, USB_PORT_FEAT_TEST, ··· 86 86 break; 87 87 case TEST_J_PID: 88 88 ret = ehset_prepare_port_for_testing(hub_udev, portnum); 89 - if (!ret) 89 + if (ret < 0) 90 90 break; 91 91 ret = usb_control_msg_send(hub_udev, 0, USB_REQ_SET_FEATURE, 92 92 USB_RT_PORT, USB_PORT_FEAT_TEST, ··· 95 95 break; 96 96 case TEST_K_PID: 97 97 ret = ehset_prepare_port_for_testing(hub_udev, portnum); 98 - if (!ret) 98 + if (ret < 0) 99 99 break; 100 100 ret = usb_control_msg_send(hub_udev, 0, USB_REQ_SET_FEATURE, 101 101 USB_RT_PORT, USB_PORT_FEAT_TEST, ··· 104 104 break; 105 105 case TEST_PACKET_PID: 106 106 ret = ehset_prepare_port_for_testing(hub_udev, portnum); 107 - if (!ret) 107 + if (ret < 0) 108 108 break; 109 109 ret = usb_control_msg_send(hub_udev, 0, USB_REQ_SET_FEATURE, 110 110 USB_RT_PORT, USB_PORT_FEAT_TEST,
+6
drivers/usb/serial/option.c
··· 251 251 #define QUECTEL_PRODUCT_EM061K_LTA 0x0123 252 252 #define QUECTEL_PRODUCT_EM061K_LMS 0x0124 253 253 #define QUECTEL_PRODUCT_EC25 0x0125 254 + #define QUECTEL_PRODUCT_EM060K_128 0x0128 254 255 #define QUECTEL_PRODUCT_EG91 0x0191 255 256 #define QUECTEL_PRODUCT_EG95 0x0195 256 257 #define QUECTEL_PRODUCT_BG96 0x0296 ··· 269 268 #define QUECTEL_PRODUCT_RM520N 0x0801 270 269 #define QUECTEL_PRODUCT_EC200U 0x0901 271 270 #define QUECTEL_PRODUCT_EC200S_CN 0x6002 271 + #define QUECTEL_PRODUCT_EC200A 0x6005 272 272 #define QUECTEL_PRODUCT_EM061K_LWW 0x6008 273 273 #define QUECTEL_PRODUCT_EM061K_LCN 0x6009 274 274 #define QUECTEL_PRODUCT_EC200T 0x6026 ··· 1199 1197 { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K, 0xff, 0x00, 0x40) }, 1200 1198 { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K, 0xff, 0xff, 0x30) }, 1201 1199 { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K, 0xff, 0xff, 0x40) }, 1200 + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_128, 0xff, 0xff, 0x30) }, 1201 + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_128, 0xff, 0x00, 0x40) }, 1202 + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_128, 0xff, 0xff, 0x40) }, 1202 1203 { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM061K_LCN, 0xff, 0xff, 0x30) }, 1203 1204 { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM061K_LCN, 0xff, 0x00, 0x40) }, 1204 1205 { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM061K_LCN, 0xff, 0xff, 0x40) }, ··· 1230 1225 { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM520N, 0xff, 0, 0) }, 1231 1226 { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, 0x0900, 0xff, 0, 0), /* RM500U-CN */ 1232 1227 .driver_info = ZLP }, 1228 + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC200A, 0xff, 0, 0) }, 1233 1229 { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC200U, 0xff, 0, 0) }, 1234 1230 { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC200S_CN, 0xff, 0, 0) }, 1235 1231 { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC200T, 0xff, 0, 0) },
+40 -33
drivers/usb/serial/usb-serial-simple.c
··· 38 38 { USB_DEVICE(0x0a21, 0x8001) } /* MMT-7305WW */ 39 39 DEVICE(carelink, CARELINK_IDS); 40 40 41 - /* ZIO Motherboard USB driver */ 42 - #define ZIO_IDS() \ 43 - { USB_DEVICE(0x1CBE, 0x0103) } 44 - DEVICE(zio, ZIO_IDS); 45 - 46 - /* Funsoft Serial USB driver */ 47 - #define FUNSOFT_IDS() \ 48 - { USB_DEVICE(0x1404, 0xcddc) } 49 - DEVICE(funsoft, FUNSOFT_IDS); 50 - 51 41 /* Infineon Flashloader driver */ 52 42 #define FLASHLOADER_IDS() \ 53 43 { USB_DEVICE_INTERFACE_CLASS(0x058b, 0x0041, USB_CLASS_CDC_DATA) }, \ 54 44 { USB_DEVICE(0x8087, 0x0716) }, \ 55 45 { USB_DEVICE(0x8087, 0x0801) } 56 46 DEVICE(flashloader, FLASHLOADER_IDS); 47 + 48 + /* Funsoft Serial USB driver */ 49 + #define FUNSOFT_IDS() \ 50 + { USB_DEVICE(0x1404, 0xcddc) } 51 + DEVICE(funsoft, FUNSOFT_IDS); 57 52 58 53 /* Google Serial USB SubClass */ 59 54 #define GOOGLE_IDS() \ ··· 58 63 0x01) } 59 64 DEVICE(google, GOOGLE_IDS); 60 65 66 + /* HP4x (48/49) Generic Serial driver */ 67 + #define HP4X_IDS() \ 68 + { USB_DEVICE(0x03f0, 0x0121) } 69 + DEVICE(hp4x, HP4X_IDS); 70 + 71 + /* KAUFMANN RKS+CAN VCP */ 72 + #define KAUFMANN_IDS() \ 73 + { USB_DEVICE(0x16d0, 0x0870) } 74 + DEVICE(kaufmann, KAUFMANN_IDS); 75 + 61 76 /* Libtransistor USB console */ 62 77 #define LIBTRANSISTOR_IDS() \ 63 78 { USB_DEVICE(0x1209, 0x8b00) } 64 79 DEVICE(libtransistor, LIBTRANSISTOR_IDS); 65 - 66 - /* ViVOpay USB Serial Driver */ 67 - #define VIVOPAY_IDS() \ 68 - { USB_DEVICE(0x1d5f, 0x1004) } /* ViVOpay 8800 */ 69 - DEVICE(vivopay, VIVOPAY_IDS); 70 80 71 81 /* Motorola USB Phone driver */ 72 82 #define MOTO_IDS() \ ··· 101 101 { USB_DEVICE(0x09d7, 0x0100) } /* NovAtel FlexPack GPS */ 102 102 DEVICE_N(novatel_gps, NOVATEL_IDS, 3); 103 103 104 - /* HP4x (48/49) Generic Serial driver */ 105 - #define HP4X_IDS() \ 106 - { USB_DEVICE(0x03f0, 0x0121) } 107 - DEVICE(hp4x, HP4X_IDS); 104 + /* Siemens USB/MPI adapter */ 105 + #define SIEMENS_IDS() \ 106 + { USB_DEVICE(0x908, 0x0004) } 107 + DEVICE(siemens_mpi, SIEMENS_IDS); 108 108 109 109 /* Suunto ANT+ USB Driver */ 110 110 #define SUUNTO_IDS() \ ··· 112 112 { USB_DEVICE(0x0fcf, 0x1009) } /* Dynastream ANT USB-m Stick */ 113 113 DEVICE(suunto, SUUNTO_IDS); 114 114 115 - /* Siemens USB/MPI adapter */ 116 - #define SIEMENS_IDS() \ 117 - { USB_DEVICE(0x908, 0x0004) } 118 - DEVICE(siemens_mpi, SIEMENS_IDS); 115 + /* ViVOpay USB Serial Driver */ 116 + #define VIVOPAY_IDS() \ 117 + { USB_DEVICE(0x1d5f, 0x1004) } /* ViVOpay 8800 */ 118 + DEVICE(vivopay, VIVOPAY_IDS); 119 + 120 + /* ZIO Motherboard USB driver */ 121 + #define ZIO_IDS() \ 122 + { USB_DEVICE(0x1CBE, 0x0103) } 123 + DEVICE(zio, ZIO_IDS); 119 124 120 125 /* All of the above structures mushed into two lists */ 121 126 static struct usb_serial_driver * const serial_drivers[] = { 122 127 &carelink_device, 123 - &zio_device, 124 - &funsoft_device, 125 128 &flashloader_device, 129 + &funsoft_device, 126 130 &google_device, 131 + &hp4x_device, 132 + &kaufmann_device, 127 133 &libtransistor_device, 128 - &vivopay_device, 129 134 &moto_modem_device, 130 135 &motorola_tetra_device, 131 136 &nokia_device, 132 137 &novatel_gps_device, 133 - &hp4x_device, 134 - &suunto_device, 135 138 &siemens_mpi_device, 139 + &suunto_device, 140 + &vivopay_device, 141 + &zio_device, 136 142 NULL 137 143 }; 138 144 139 145 static const struct usb_device_id id_table[] = { 140 146 CARELINK_IDS(), 141 - ZIO_IDS(), 142 - FUNSOFT_IDS(), 143 147 FLASHLOADER_IDS(), 148 + FUNSOFT_IDS(), 144 149 GOOGLE_IDS(), 150 + HP4X_IDS(), 151 + KAUFMANN_IDS(), 145 152 LIBTRANSISTOR_IDS(), 146 - VIVOPAY_IDS(), 147 153 MOTO_IDS(), 148 154 MOTOROLA_TETRA_IDS(), 149 155 NOKIA_IDS(), 150 156 NOVATEL_IDS(), 151 - HP4X_IDS(), 152 - SUUNTO_IDS(), 153 157 SIEMENS_IDS(), 158 + SUUNTO_IDS(), 159 + VIVOPAY_IDS(), 160 + ZIO_IDS(), 154 161 { }, 155 162 }; 156 163 MODULE_DEVICE_TABLE(usb, id_table);
+8 -7
drivers/usb/typec/class.c
··· 1277 1277 { 1278 1278 struct typec_port *port = to_typec_port(dev); 1279 1279 struct usb_power_delivery **pds; 1280 - struct usb_power_delivery *pd; 1281 - int ret = 0; 1280 + int i, ret = 0; 1282 1281 1283 1282 if (!port->ops || !port->ops->pd_get) 1284 1283 return -EOPNOTSUPP; ··· 1286 1287 if (!pds) 1287 1288 return 0; 1288 1289 1289 - for (pd = pds[0]; pd; pd++) { 1290 - if (pd == port->pd) 1291 - ret += sysfs_emit(buf + ret, "[%s] ", dev_name(&pd->dev)); 1290 + for (i = 0; pds[i]; i++) { 1291 + if (pds[i] == port->pd) 1292 + ret += sysfs_emit_at(buf, ret, "[%s] ", dev_name(&pds[i]->dev)); 1292 1293 else 1293 - ret += sysfs_emit(buf + ret, "%s ", dev_name(&pd->dev)); 1294 + ret += sysfs_emit_at(buf, ret, "%s ", dev_name(&pds[i]->dev)); 1294 1295 } 1295 1296 1296 1297 buf[ret - 1] = '\n'; ··· 2287 2288 return ERR_PTR(ret); 2288 2289 } 2289 2290 2291 + port->pd = cap->pd; 2292 + 2290 2293 ret = device_add(&port->dev); 2291 2294 if (ret) { 2292 2295 dev_err(parent, "failed to register port (%d)\n", ret); ··· 2296 2295 return ERR_PTR(ret); 2297 2296 } 2298 2297 2299 - ret = typec_port_set_usb_power_delivery(port, cap->pd); 2298 + ret = usb_power_delivery_link_device(port->pd, &port->dev); 2300 2299 if (ret) { 2301 2300 dev_err(&port->dev, "failed to link pd\n"); 2302 2301 device_unregister(&port->dev);
+2 -2
drivers/usb/typec/tcpm/qcom/qcom_pmic_typec.c
··· 209 209 platform_set_drvdata(pdev, tcpm); 210 210 211 211 tcpm->tcpc.fwnode = device_get_named_child_node(tcpm->dev, "connector"); 212 - if (IS_ERR(tcpm->tcpc.fwnode)) 213 - return PTR_ERR(tcpm->tcpc.fwnode); 212 + if (!tcpm->tcpc.fwnode) 213 + return -EINVAL; 214 214 215 215 tcpm->tcpm_port = tcpm_register_port(tcpm->dev, &tcpm->tcpc); 216 216 if (IS_ERR(tcpm->tcpm_port)) {
+2 -2
drivers/usb/typec/ucsi/ucsi.c
··· 785 785 if (!con->partner) 786 786 return; 787 787 788 + typec_set_mode(con->port, TYPEC_STATE_SAFE); 789 + 788 790 ucsi_unregister_partner_pdos(con); 789 791 ucsi_unregister_altmodes(con, UCSI_RECIPIENT_SOP); 790 792 typec_unregister_partner(con->partner); ··· 827 825 UCSI_CONSTAT_PARTNER_FLAG_USB) 828 826 typec_set_mode(con->port, TYPEC_STATE_USB); 829 827 } 830 - } else { 831 - typec_set_mode(con->port, TYPEC_STATE_SAFE); 832 828 } 833 829 834 830 /* Only notify USB controller if partner supports USB data */
+5 -11
drivers/xen/events/events_base.c
··· 112 112 unsigned int irq_epoch; /* If eoi_cpu valid: irq_epoch of event */ 113 113 u64 eoi_time; /* Time in jiffies when to EOI. */ 114 114 raw_spinlock_t lock; 115 + bool is_static; /* Is event channel static */ 115 116 116 117 union { 117 118 unsigned short virq; ··· 816 815 irq_free_desc(irq); 817 816 } 818 817 819 - static void xen_evtchn_close(evtchn_port_t port) 820 - { 821 - struct evtchn_close close; 822 - 823 - close.port = port; 824 - if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0) 825 - BUG(); 826 - } 827 - 828 818 /* Not called for lateeoi events. */ 829 819 static void event_handler_exit(struct irq_info *info) 830 820 { ··· 974 982 unsigned int cpu = cpu_from_irq(irq); 975 983 struct xenbus_device *dev; 976 984 977 - xen_evtchn_close(evtchn); 985 + if (!info->is_static) 986 + xen_evtchn_close(evtchn); 978 987 979 988 switch (type_from_irq(irq)) { 980 989 case IRQT_VIRQ: ··· 1567 1574 } 1568 1575 EXPORT_SYMBOL_GPL(xen_set_irq_priority); 1569 1576 1570 - int evtchn_make_refcounted(evtchn_port_t evtchn) 1577 + int evtchn_make_refcounted(evtchn_port_t evtchn, bool is_static) 1571 1578 { 1572 1579 int irq = get_evtchn_to_irq(evtchn); 1573 1580 struct irq_info *info; ··· 1583 1590 WARN_ON(info->refcnt != -1); 1584 1591 1585 1592 info->refcnt = 1; 1593 + info->is_static = is_static; 1586 1594 1587 1595 return 0; 1588 1596 }
+26 -9
drivers/xen/evtchn.c
··· 366 366 return 0; 367 367 } 368 368 369 - static int evtchn_bind_to_user(struct per_user_data *u, evtchn_port_t port) 369 + static int evtchn_bind_to_user(struct per_user_data *u, evtchn_port_t port, 370 + bool is_static) 370 371 { 371 372 struct user_evtchn *evtchn; 372 - struct evtchn_close close; 373 373 int rc = 0; 374 374 375 375 /* ··· 402 402 if (rc < 0) 403 403 goto err; 404 404 405 - rc = evtchn_make_refcounted(port); 405 + rc = evtchn_make_refcounted(port, is_static); 406 406 return rc; 407 407 408 408 err: 409 409 /* bind failed, should close the port now */ 410 - close.port = port; 411 - if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0) 412 - BUG(); 410 + if (!is_static) 411 + xen_evtchn_close(port); 412 + 413 413 del_evtchn(u, evtchn); 414 414 return rc; 415 415 } ··· 456 456 if (rc != 0) 457 457 break; 458 458 459 - rc = evtchn_bind_to_user(u, bind_virq.port); 459 + rc = evtchn_bind_to_user(u, bind_virq.port, false); 460 460 if (rc == 0) 461 461 rc = bind_virq.port; 462 462 break; ··· 482 482 if (rc != 0) 483 483 break; 484 484 485 - rc = evtchn_bind_to_user(u, bind_interdomain.local_port); 485 + rc = evtchn_bind_to_user(u, bind_interdomain.local_port, false); 486 486 if (rc == 0) 487 487 rc = bind_interdomain.local_port; 488 488 break; ··· 507 507 if (rc != 0) 508 508 break; 509 509 510 - rc = evtchn_bind_to_user(u, alloc_unbound.port); 510 + rc = evtchn_bind_to_user(u, alloc_unbound.port, false); 511 511 if (rc == 0) 512 512 rc = alloc_unbound.port; 513 513 break; ··· 533 533 disable_irq(irq_from_evtchn(unbind.port)); 534 534 evtchn_unbind_from_user(u, evtchn); 535 535 rc = 0; 536 + break; 537 + } 538 + 539 + case IOCTL_EVTCHN_BIND_STATIC: { 540 + struct ioctl_evtchn_bind bind; 541 + struct user_evtchn *evtchn; 542 + 543 + rc = -EFAULT; 544 + if (copy_from_user(&bind, uarg, sizeof(bind))) 545 + break; 546 + 547 + rc = -EISCONN; 548 + evtchn = find_evtchn(u, bind.port); 549 + if (evtchn) 550 + break; 551 + 552 + rc = evtchn_bind_to_user(u, bind.port, true); 536 553 break; 537 554 } 538 555
+29 -11
drivers/xen/grant-table.c
··· 498 498 static void gnttab_handle_deferred(struct timer_list *); 499 499 static DEFINE_TIMER(deferred_timer, gnttab_handle_deferred); 500 500 501 + static atomic64_t deferred_count; 502 + static atomic64_t leaked_count; 503 + static unsigned int free_per_iteration = 10; 504 + module_param(free_per_iteration, uint, 0600); 505 + 501 506 static void gnttab_handle_deferred(struct timer_list *unused) 502 507 { 503 - unsigned int nr = 10; 508 + unsigned int nr = READ_ONCE(free_per_iteration); 509 + const bool ignore_limit = nr == 0; 504 510 struct deferred_entry *first = NULL; 505 511 unsigned long flags; 512 + size_t freed = 0; 506 513 507 514 spin_lock_irqsave(&gnttab_list_lock, flags); 508 - while (nr--) { 515 + while ((ignore_limit || nr--) && !list_empty(&deferred_list)) { 509 516 struct deferred_entry *entry 510 517 = list_first_entry(&deferred_list, 511 518 struct deferred_entry, list); ··· 522 515 list_del(&entry->list); 523 516 spin_unlock_irqrestore(&gnttab_list_lock, flags); 524 517 if (_gnttab_end_foreign_access_ref(entry->ref)) { 518 + uint64_t ret = atomic64_dec_return(&deferred_count); 519 + 525 520 put_free_entry(entry->ref); 526 - pr_debug("freeing g.e. %#x (pfn %#lx)\n", 527 - entry->ref, page_to_pfn(entry->page)); 521 + pr_debug("freeing g.e. %#x (pfn %#lx), %llu remaining\n", 522 + entry->ref, page_to_pfn(entry->page), 523 + (unsigned long long)ret); 528 524 put_page(entry->page); 525 + freed++; 529 526 kfree(entry); 530 527 entry = NULL; 531 528 } else { ··· 541 530 spin_lock_irqsave(&gnttab_list_lock, flags); 542 531 if (entry) 543 532 list_add_tail(&entry->list, &deferred_list); 544 - else if (list_empty(&deferred_list)) 545 - break; 546 533 } 547 - if (!list_empty(&deferred_list) && !timer_pending(&deferred_timer)) { 534 + if (list_empty(&deferred_list)) 535 + WARN_ON(atomic64_read(&deferred_count)); 536 + else if (!timer_pending(&deferred_timer)) { 548 537 deferred_timer.expires = jiffies + HZ; 549 538 add_timer(&deferred_timer); 550 539 } 551 540 spin_unlock_irqrestore(&gnttab_list_lock, flags); 541 + pr_debug("Freed %zu references", freed); 552 542 } 553 543 554 544 static void gnttab_add_deferred(grant_ref_t ref, struct page *page) 555 545 { 556 546 struct deferred_entry *entry; 557 547 gfp_t gfp = (in_atomic() || irqs_disabled()) ? GFP_ATOMIC : GFP_KERNEL; 558 - const char *what = KERN_WARNING "leaking"; 548 + uint64_t leaked, deferred; 559 549 560 550 entry = kmalloc(sizeof(*entry), gfp); 561 551 if (!page) { ··· 579 567 add_timer(&deferred_timer); 580 568 } 581 569 spin_unlock_irqrestore(&gnttab_list_lock, flags); 582 - what = KERN_DEBUG "deferring"; 570 + deferred = atomic64_inc_return(&deferred_count); 571 + leaked = atomic64_read(&leaked_count); 572 + pr_debug("deferring g.e. %#x (pfn %#lx) (total deferred %llu, total leaked %llu)\n", 573 + ref, page ? page_to_pfn(page) : -1, deferred, leaked); 574 + } else { 575 + deferred = atomic64_read(&deferred_count); 576 + leaked = atomic64_inc_return(&leaked_count); 577 + pr_warn("leaking g.e. %#x (pfn %#lx) (total deferred %llu, total leaked %llu)\n", 578 + ref, page ? page_to_pfn(page) : -1, deferred, leaked); 583 579 } 584 - printk("%s g.e. %#x (pfn %#lx)\n", 585 - what, ref, page ? page_to_pfn(page) : -1); 586 580 } 587 581 588 582 int gnttab_try_end_foreign_access(grant_ref_t ref)
+3
drivers/xen/xenbus/xenbus_probe.c
··· 811 811 812 812 static int __init xenbus_probe_initcall(void) 813 813 { 814 + if (!xen_domain()) 815 + return -ENODEV; 816 + 814 817 /* 815 818 * Probe XenBus here in the XS_PV case, and also XS_HVM unless we 816 819 * need to wait for the platform PCI device to come up or
+3 -3
fs/9p/fid.h
··· 46 46 * NOTE: these are set after open so only reflect 9p client not 47 47 * underlying file system on server. 48 48 */ 49 - static inline void v9fs_fid_add_modes(struct p9_fid *fid, int s_flags, 50 - int s_cache, unsigned int f_flags) 49 + static inline void v9fs_fid_add_modes(struct p9_fid *fid, unsigned int s_flags, 50 + unsigned int s_cache, unsigned int f_flags) 51 51 { 52 52 if (fid->qid.type != P9_QTFILE) 53 53 return; ··· 57 57 (s_flags & V9FS_DIRECT_IO) || (f_flags & O_DIRECT)) { 58 58 fid->mode |= P9L_DIRECT; /* no read or write cache */ 59 59 } else if ((!(s_cache & CACHE_WRITEBACK)) || 60 - (f_flags & O_DSYNC) | (s_flags & V9FS_SYNC)) { 60 + (f_flags & O_DSYNC) || (s_flags & V9FS_SYNC)) { 61 61 fid->mode |= P9L_NOWRITECACHE; 62 62 } 63 63 }
-2
fs/9p/v9fs.c
··· 545 545 p9_client_begin_disconnect(v9ses->clnt); 546 546 } 547 547 548 - extern int v9fs_error_init(void); 549 - 550 548 static struct kobject *v9fs_kobj; 551 549 552 550 #ifdef CONFIG_9P_FSCACHE
+1 -1
fs/9p/v9fs.h
··· 108 108 109 109 struct v9fs_session_info { 110 110 /* options */ 111 - unsigned char flags; 111 + unsigned int flags; 112 112 unsigned char nodev; 113 113 unsigned short debug; 114 114 unsigned int afid;
+3 -2
fs/9p/vfs_dir.c
··· 208 208 struct p9_fid *fid; 209 209 __le32 version; 210 210 loff_t i_size; 211 - int retval = 0; 211 + int retval = 0, put_err; 212 212 213 213 fid = filp->private_data; 214 214 p9_debug(P9_DEBUG_VFS, "inode: %p filp: %p fid: %d\n", ··· 221 221 spin_lock(&inode->i_lock); 222 222 hlist_del(&fid->ilist); 223 223 spin_unlock(&inode->i_lock); 224 - retval = p9_fid_put(fid); 224 + put_err = p9_fid_put(fid); 225 + retval = retval < 0 ? retval : put_err; 225 226 } 226 227 227 228 if ((filp->f_mode & FMODE_WRITE)) {
+1 -4
fs/9p/vfs_file.c
··· 505 505 p9_debug(P9_DEBUG_MMAP, "filp :%p\n", filp); 506 506 507 507 if (!(v9ses->cache & CACHE_WRITEBACK)) { 508 - p9_debug(P9_DEBUG_CACHE, "(no mmap mode)"); 509 - if (vma->vm_flags & VM_MAYSHARE) 510 - return -ENODEV; 511 - invalidate_inode_pages2(filp->f_mapping); 508 + p9_debug(P9_DEBUG_CACHE, "(read-only mmap mode)"); 512 509 return generic_file_readonly_mmap(filp, vma); 513 510 } 514 511
-6
fs/9p/vfs_inode.c
··· 163 163 { 164 164 int ret; 165 165 166 - ret = 0; 167 166 switch (uflags&3) { 168 167 default: 169 168 case O_RDONLY: ··· 602 603 603 604 p9_debug(P9_DEBUG_VFS, "name %pd\n", dentry); 604 605 605 - err = 0; 606 606 name = dentry->d_name.name; 607 607 dfid = v9fs_parent_fid(dentry); 608 608 if (IS_ERR(dfid)) { ··· 813 815 if (!(flags & O_CREAT) || d_really_is_positive(dentry)) 814 816 return finish_no_open(file, res); 815 817 816 - err = 0; 817 - 818 818 v9ses = v9fs_inode2v9ses(dir); 819 819 perm = unixmode2p9mode(v9ses, mode); 820 820 p9_omode = v9fs_uflags2omode(flags, v9fs_proto_dotu(v9ses)); ··· 908 912 return -EINVAL; 909 913 910 914 p9_debug(P9_DEBUG_VFS, "\n"); 911 - retval = 0; 912 915 old_inode = d_inode(old_dentry); 913 916 new_inode = d_inode(new_dentry); 914 917 v9ses = v9fs_inode2v9ses(old_inode); ··· 1061 1066 if (retval) 1062 1067 return retval; 1063 1068 1064 - retval = -EPERM; 1065 1069 v9ses = v9fs_dentry2v9ses(dentry); 1066 1070 if (iattr->ia_valid & ATTR_FILE) { 1067 1071 fid = iattr->ia_file->private_data;
-1
fs/9p/vfs_inode_dotl.c
··· 366 366 struct posix_acl *dacl = NULL, *pacl = NULL; 367 367 368 368 p9_debug(P9_DEBUG_VFS, "name %pd\n", dentry); 369 - err = 0; 370 369 v9ses = v9fs_inode2v9ses(dir); 371 370 372 371 omode |= S_IFDIR;
-12
fs/autofs/Kconfig
··· 1 1 # SPDX-License-Identifier: GPL-2.0-only 2 - config AUTOFS4_FS 3 - tristate "Old Kconfig name for Kernel automounter support" 4 - select AUTOFS_FS 5 - help 6 - This name exists for people to just automatically pick up the 7 - new name of the autofs Kconfig option. All it does is select 8 - the new option name. 9 - 10 - It will go away in a release or two as people have 11 - transitioned to just plain AUTOFS_FS. 12 - 13 2 config AUTOFS_FS 14 3 tristate "Kernel automounter support (supports v3, v4 and v5)" 15 - default n 16 4 help 17 5 The automounter is a tool to automatically mount remote file systems 18 6 on demand. This implementation is partially kernel-based to reduce
+34 -17
fs/btrfs/block-group.c
··· 499 499 * used yet since their free space will be released as soon as the transaction 500 500 * commits. 501 501 */ 502 - u64 add_new_free_space(struct btrfs_block_group *block_group, u64 start, u64 end) 502 + int add_new_free_space(struct btrfs_block_group *block_group, u64 start, u64 end, 503 + u64 *total_added_ret) 503 504 { 504 505 struct btrfs_fs_info *info = block_group->fs_info; 505 - u64 extent_start, extent_end, size, total_added = 0; 506 + u64 extent_start, extent_end, size; 506 507 int ret; 508 + 509 + if (total_added_ret) 510 + *total_added_ret = 0; 507 511 508 512 while (start < end) { 509 513 ret = find_first_extent_bit(&info->excluded_extents, start, ··· 521 517 start = extent_end + 1; 522 518 } else if (extent_start > start && extent_start < end) { 523 519 size = extent_start - start; 524 - total_added += size; 525 520 ret = btrfs_add_free_space_async_trimmed(block_group, 526 521 start, size); 527 - BUG_ON(ret); /* -ENOMEM or logic error */ 522 + if (ret) 523 + return ret; 524 + if (total_added_ret) 525 + *total_added_ret += size; 528 526 start = extent_end + 1; 529 527 } else { 530 528 break; ··· 535 529 536 530 if (start < end) { 537 531 size = end - start; 538 - total_added += size; 539 532 ret = btrfs_add_free_space_async_trimmed(block_group, start, 540 533 size); 541 - BUG_ON(ret); /* -ENOMEM or logic error */ 534 + if (ret) 535 + return ret; 536 + if (total_added_ret) 537 + *total_added_ret += size; 542 538 } 543 539 544 - return total_added; 540 + return 0; 545 541 } 546 542 547 543 /* ··· 787 779 788 780 if (key.type == BTRFS_EXTENT_ITEM_KEY || 789 781 key.type == BTRFS_METADATA_ITEM_KEY) { 790 - total_found += add_new_free_space(block_group, last, 791 - key.objectid); 782 + u64 space_added; 783 + 784 + ret = add_new_free_space(block_group, last, key.objectid, 785 + &space_added); 786 + if (ret) 787 + goto out; 788 + total_found += space_added; 792 789 if (key.type == BTRFS_METADATA_ITEM_KEY) 793 790 last = key.objectid + 794 791 fs_info->nodesize; ··· 808 795 } 809 796 path->slots[0]++; 810 797 } 811 - ret = 0; 812 798 813 - total_found += add_new_free_space(block_group, last, 814 - block_group->start + block_group->length); 815 - 799 + ret = add_new_free_space(block_group, last, 800 + block_group->start + block_group->length, 801 + NULL); 816 802 out: 817 803 btrfs_free_path(path); 818 804 return ret; ··· 2306 2294 btrfs_free_excluded_extents(cache); 2307 2295 } else if (cache->used == 0) { 2308 2296 cache->cached = BTRFS_CACHE_FINISHED; 2309 - add_new_free_space(cache, cache->start, 2310 - cache->start + cache->length); 2297 + ret = add_new_free_space(cache, cache->start, 2298 + cache->start + cache->length, NULL); 2311 2299 btrfs_free_excluded_extents(cache); 2300 + if (ret) 2301 + goto error; 2312 2302 } 2313 2303 2314 2304 ret = btrfs_add_block_group_cache(info, cache); ··· 2754 2740 return ERR_PTR(ret); 2755 2741 } 2756 2742 2757 - add_new_free_space(cache, chunk_offset, chunk_offset + size); 2758 - 2743 + ret = add_new_free_space(cache, chunk_offset, chunk_offset + size, NULL); 2759 2744 btrfs_free_excluded_extents(cache); 2745 + if (ret) { 2746 + btrfs_put_block_group(cache); 2747 + return ERR_PTR(ret); 2748 + } 2760 2749 2761 2750 /* 2762 2751 * Ensure the corresponding space_info object is created and
+2 -2
fs/btrfs/block-group.h
··· 289 289 void btrfs_put_caching_control(struct btrfs_caching_control *ctl); 290 290 struct btrfs_caching_control *btrfs_get_caching_control( 291 291 struct btrfs_block_group *cache); 292 - u64 add_new_free_space(struct btrfs_block_group *block_group, 293 - u64 start, u64 end); 292 + int add_new_free_space(struct btrfs_block_group *block_group, 293 + u64 start, u64 end, u64 *total_added_ret); 294 294 struct btrfs_trans_handle *btrfs_start_trans_remove_block_group( 295 295 struct btrfs_fs_info *fs_info, 296 296 const u64 chunk_offset);
+5
fs/btrfs/block-rsv.c
··· 349 349 } 350 350 read_unlock(&fs_info->global_root_lock); 351 351 352 + if (btrfs_fs_compat_ro(fs_info, BLOCK_GROUP_TREE)) { 353 + num_bytes += btrfs_root_used(&fs_info->block_group_root->root_item); 354 + min_items++; 355 + } 356 + 352 357 /* 353 358 * But we also want to reserve enough space so we can do the fallback 354 359 * global reserve for an unlink, which is an additional
+6 -1
fs/btrfs/disk-io.c
··· 3438 3438 * For devices supporting discard turn on discard=async automatically, 3439 3439 * unless it's already set or disabled. This could be turned off by 3440 3440 * nodiscard for the same mount. 3441 + * 3442 + * The zoned mode piggy backs on the discard functionality for 3443 + * resetting a zone. There is no reason to delay the zone reset as it is 3444 + * fast enough. So, do not enable async discard for zoned mode. 3441 3445 */ 3442 3446 if (!(btrfs_test_opt(fs_info, DISCARD_SYNC) || 3443 3447 btrfs_test_opt(fs_info, DISCARD_ASYNC) || 3444 3448 btrfs_test_opt(fs_info, NODISCARD)) && 3445 - fs_info->fs_devices->discardable) { 3449 + fs_info->fs_devices->discardable && 3450 + !btrfs_is_zoned(fs_info)) { 3446 3451 btrfs_set_and_info(fs_info, DISCARD_ASYNC, 3447 3452 "auto enabling async discard"); 3448 3453 }
+17 -7
fs/btrfs/free-space-tree.c
··· 1515 1515 if (prev_bit == 0 && bit == 1) { 1516 1516 extent_start = offset; 1517 1517 } else if (prev_bit == 1 && bit == 0) { 1518 - total_found += add_new_free_space(block_group, 1519 - extent_start, 1520 - offset); 1518 + u64 space_added; 1519 + 1520 + ret = add_new_free_space(block_group, extent_start, 1521 + offset, &space_added); 1522 + if (ret) 1523 + goto out; 1524 + total_found += space_added; 1521 1525 if (total_found > CACHING_CTL_WAKE_UP) { 1522 1526 total_found = 0; 1523 1527 wake_up(&caching_ctl->wait); ··· 1533 1529 } 1534 1530 } 1535 1531 if (prev_bit == 1) { 1536 - total_found += add_new_free_space(block_group, extent_start, 1537 - end); 1532 + ret = add_new_free_space(block_group, extent_start, end, NULL); 1533 + if (ret) 1534 + goto out; 1538 1535 extent_count++; 1539 1536 } 1540 1537 ··· 1574 1569 end = block_group->start + block_group->length; 1575 1570 1576 1571 while (1) { 1572 + u64 space_added; 1573 + 1577 1574 ret = btrfs_next_item(root, path); 1578 1575 if (ret < 0) 1579 1576 goto out; ··· 1590 1583 ASSERT(key.type == BTRFS_FREE_SPACE_EXTENT_KEY); 1591 1584 ASSERT(key.objectid < end && key.objectid + key.offset <= end); 1592 1585 1593 - total_found += add_new_free_space(block_group, key.objectid, 1594 - key.objectid + key.offset); 1586 + ret = add_new_free_space(block_group, key.objectid, 1587 + key.objectid + key.offset, &space_added); 1588 + if (ret) 1589 + goto out; 1590 + total_found += space_added; 1595 1591 if (total_found > CACHING_CTL_WAKE_UP) { 1596 1592 total_found = 0; 1597 1593 wake_up(&caching_ctl->wait);
+8 -2
fs/btrfs/transaction.c
··· 826 826 827 827 trans = start_transaction(root, 0, TRANS_ATTACH, 828 828 BTRFS_RESERVE_NO_FLUSH, true); 829 - if (trans == ERR_PTR(-ENOENT)) 830 - btrfs_wait_for_commit(root->fs_info, 0); 829 + if (trans == ERR_PTR(-ENOENT)) { 830 + int ret; 831 + 832 + ret = btrfs_wait_for_commit(root->fs_info, 0); 833 + if (ret) 834 + return ERR_PTR(ret); 835 + } 831 836 832 837 return trans; 833 838 } ··· 936 931 } 937 932 938 933 wait_for_commit(cur_trans, TRANS_STATE_COMPLETED); 934 + ret = cur_trans->aborted; 939 935 btrfs_put_transaction(cur_trans); 940 936 out: 941 937 return ret;
+3
fs/btrfs/zoned.c
··· 805 805 return -EINVAL; 806 806 } 807 807 808 + btrfs_clear_and_info(info, DISCARD_ASYNC, 809 + "zoned: async discard ignored and disabled for zoned mode"); 810 + 808 811 return 0; 809 812 } 810 813
+1 -1
fs/ceph/metric.c
··· 216 216 struct ceph_mds_client *mdsc = 217 217 container_of(m, struct ceph_mds_client, metric); 218 218 219 - if (mdsc->stopping) 219 + if (mdsc->stopping || disable_send_metrics) 220 220 return; 221 221 222 222 if (!m->session || !check_session_state(m->session)) {
+2 -4
fs/file.c
··· 1042 1042 struct file *file = (struct file *)(v & ~3); 1043 1043 1044 1044 if (file && (file->f_mode & FMODE_ATOMIC_POS)) { 1045 - if (file_count(file) > 1) { 1046 - v |= FDPUT_POS_UNLOCK; 1047 - mutex_lock(&file->f_pos_lock); 1048 - } 1045 + v |= FDPUT_POS_UNLOCK; 1046 + mutex_lock(&file->f_pos_lock); 1049 1047 } 1050 1048 return v; 1051 1049 }
-2
fs/nfsd/nfs4state.c
··· 6341 6341 if (ZERO_STATEID(stateid) || ONE_STATEID(stateid) || 6342 6342 CLOSE_STATEID(stateid)) 6343 6343 return status; 6344 - if (!same_clid(&stateid->si_opaque.so_clid, &cl->cl_clientid)) 6345 - return status; 6346 6344 spin_lock(&cl->cl_lock); 6347 6345 s = find_stateid_locked(cl, stateid); 6348 6346 if (!s)
+2 -2
fs/nls/nls_base.c
··· 272 272 return -EINVAL; 273 273 } 274 274 275 - static struct nls_table *find_nls(char *charset) 275 + static struct nls_table *find_nls(const char *charset) 276 276 { 277 277 struct nls_table *nls; 278 278 spin_lock(&nls_lock); ··· 288 288 return nls; 289 289 } 290 290 291 - struct nls_table *load_nls(char *charset) 291 + struct nls_table *load_nls(const char *charset) 292 292 { 293 293 return try_then_request_module(find_nls(charset), "nls_%s", charset); 294 294 }
+1 -1
fs/overlayfs/super.c
··· 1460 1460 ovl_trusted_xattr_handlers; 1461 1461 sb->s_fs_info = ofs; 1462 1462 sb->s_flags |= SB_POSIXACL; 1463 - sb->s_iflags |= SB_I_SKIP_SYNC; 1463 + sb->s_iflags |= SB_I_SKIP_SYNC | SB_I_IMA_UNVERIFIABLE_SIGNATURE; 1464 1464 1465 1465 err = -ENOMEM; 1466 1466 root_dentry = ovl_get_root(sb, ctx->upper.dentry, oe);
+1 -1
fs/proc/vmcore.c
··· 132 132 u64 *ppos, bool encrypted) 133 133 { 134 134 unsigned long pfn, offset; 135 - size_t nr_bytes; 135 + ssize_t nr_bytes; 136 136 ssize_t read = 0, tmp; 137 137 int idx; 138 138
+1
fs/smb/client/cifsglob.h
··· 1062 1062 unsigned long chans_need_reconnect; 1063 1063 /* ========= end: protected by chan_lock ======== */ 1064 1064 struct cifs_ses *dfs_root_ses; 1065 + struct nls_table *local_nls; 1065 1066 }; 1066 1067 1067 1068 static inline bool
+1 -2
fs/smb/client/cifssmb.c
··· 129 129 } 130 130 spin_unlock(&server->srv_lock); 131 131 132 - nls_codepage = load_nls_default(); 132 + nls_codepage = ses->local_nls; 133 133 134 134 /* 135 135 * need to prevent multiple threads trying to simultaneously ··· 200 200 rc = -EAGAIN; 201 201 } 202 202 203 - unload_nls(nls_codepage); 204 203 return rc; 205 204 } 206 205
+5
fs/smb/client/connect.c
··· 1842 1842 CIFS_MAX_PASSWORD_LEN)) 1843 1843 return 0; 1844 1844 } 1845 + 1846 + if (strcmp(ctx->local_nls->charset, ses->local_nls->charset)) 1847 + return 0; 1848 + 1845 1849 return 1; 1846 1850 } 1847 1851 ··· 2290 2286 2291 2287 ses->sectype = ctx->sectype; 2292 2288 ses->sign = ctx->sign; 2289 + ses->local_nls = load_nls(ctx->local_nls->charset); 2293 2290 2294 2291 /* add server as first channel */ 2295 2292 spin_lock(&ses->chan_lock);
+5
fs/smb/client/ioctl.c
··· 478 478 } 479 479 cifs_sb = CIFS_SB(inode->i_sb); 480 480 tlink = cifs_sb_tlink(cifs_sb); 481 + if (IS_ERR(tlink)) { 482 + rc = PTR_ERR(tlink); 483 + break; 484 + } 485 + 481 486 tcon = tlink_tcon(tlink); 482 487 rc = cifs_dump_full_key(tcon, (void __user *)arg); 483 488 cifs_put_tlink(tlink);
+1
fs/smb/client/misc.c
··· 95 95 return; 96 96 } 97 97 98 + unload_nls(buf_to_free->local_nls); 98 99 atomic_dec(&sesInfoAllocCount); 99 100 kfree(buf_to_free->serverOS); 100 101 kfree(buf_to_free->serverDomain);
+3 -1
fs/smb/client/sess.c
··· 1013 1013 } 1014 1014 1015 1015 1016 + /* See MS-NLMP 2.2.1.3 */ 1016 1017 int build_ntlmssp_auth_blob(unsigned char **pbuffer, 1017 1018 u16 *buflen, 1018 1019 struct cifs_ses *ses, ··· 1048 1047 1049 1048 flags = ses->ntlmssp->server_flags | NTLMSSP_REQUEST_TARGET | 1050 1049 NTLMSSP_NEGOTIATE_TARGET_INFO | NTLMSSP_NEGOTIATE_WORKSTATION_SUPPLIED; 1051 - 1050 + /* we only send version information in ntlmssp negotiate, so do not set this flag */ 1051 + flags = flags & ~NTLMSSP_NEGOTIATE_VERSION; 1052 1052 tmp = *pbuffer + sizeof(AUTHENTICATE_MESSAGE); 1053 1053 sec_blob->NegotiateFlags = cpu_to_le32(flags); 1054 1054
+1 -2
fs/smb/client/smb2pdu.c
··· 242 242 } 243 243 spin_unlock(&server->srv_lock); 244 244 245 - nls_codepage = load_nls_default(); 245 + nls_codepage = ses->local_nls; 246 246 247 247 /* 248 248 * need to prevent multiple threads trying to simultaneously ··· 324 324 rc = -EAGAIN; 325 325 } 326 326 failed: 327 - unload_nls(nls_codepage); 328 327 return rc; 329 328 } 330 329
+2 -1
fs/smb/server/ksmbd_netlink.h
··· 352 352 #define KSMBD_SHARE_FLAG_STREAMS BIT(11) 353 353 #define KSMBD_SHARE_FLAG_FOLLOW_SYMLINKS BIT(12) 354 354 #define KSMBD_SHARE_FLAG_ACL_XATTR BIT(13) 355 - #define KSMBD_SHARE_FLAG_UPDATE BIT(14) 355 + #define KSMBD_SHARE_FLAG_UPDATE BIT(14) 356 + #define KSMBD_SHARE_FLAG_CROSSMNT BIT(15) 356 357 357 358 /* 358 359 * Tree connect request flags.
+6 -1
fs/smb/server/server.c
··· 286 286 static int queue_ksmbd_work(struct ksmbd_conn *conn) 287 287 { 288 288 struct ksmbd_work *work; 289 + int err; 289 290 290 291 work = ksmbd_alloc_work_struct(); 291 292 if (!work) { ··· 298 297 work->request_buf = conn->request_buf; 299 298 conn->request_buf = NULL; 300 299 301 - ksmbd_init_smb_server(work); 300 + err = ksmbd_init_smb_server(work); 301 + if (err) { 302 + ksmbd_free_work_struct(work); 303 + return 0; 304 + } 302 305 303 306 ksmbd_conn_enqueue_request(work); 304 307 atomic_inc(&conn->r_count);
+29 -18
fs/smb/server/smb2pdu.c
··· 87 87 */ 88 88 int smb2_get_ksmbd_tcon(struct ksmbd_work *work) 89 89 { 90 - struct smb2_hdr *req_hdr = smb2_get_msg(work->request_buf); 90 + struct smb2_hdr *req_hdr = ksmbd_req_buf_next(work); 91 91 unsigned int cmd = le16_to_cpu(req_hdr->Command); 92 - int tree_id; 92 + unsigned int tree_id; 93 93 94 94 if (cmd == SMB2_TREE_CONNECT_HE || 95 95 cmd == SMB2_CANCEL_HE || ··· 114 114 pr_err("The first operation in the compound does not have tcon\n"); 115 115 return -EINVAL; 116 116 } 117 - if (work->tcon->id != tree_id) { 117 + if (tree_id != UINT_MAX && work->tcon->id != tree_id) { 118 118 pr_err("tree id(%u) is different with id(%u) in first operation\n", 119 119 tree_id, work->tcon->id); 120 120 return -EINVAL; ··· 559 559 */ 560 560 int smb2_check_user_session(struct ksmbd_work *work) 561 561 { 562 - struct smb2_hdr *req_hdr = smb2_get_msg(work->request_buf); 562 + struct smb2_hdr *req_hdr = ksmbd_req_buf_next(work); 563 563 struct ksmbd_conn *conn = work->conn; 564 - unsigned int cmd = conn->ops->get_cmd_val(work); 564 + unsigned int cmd = le16_to_cpu(req_hdr->Command); 565 565 unsigned long long sess_id; 566 566 567 567 /* ··· 587 587 pr_err("The first operation in the compound does not have sess\n"); 588 588 return -EINVAL; 589 589 } 590 - if (work->sess->id != sess_id) { 590 + if (sess_id != ULLONG_MAX && work->sess->id != sess_id) { 591 591 pr_err("session id(%llu) is different with the first operation(%lld)\n", 592 592 sess_id, work->sess->id); 593 593 return -EINVAL; ··· 2467 2467 } 2468 2468 } 2469 2469 2470 - static int smb2_creat(struct ksmbd_work *work, struct path *path, char *name, 2471 - int open_flags, umode_t posix_mode, bool is_dir) 2470 + static int smb2_creat(struct ksmbd_work *work, struct path *parent_path, 2471 + struct path *path, char *name, int open_flags, 2472 + umode_t posix_mode, bool is_dir) 2472 2473 { 2473 2474 struct ksmbd_tree_connect *tcon = work->tcon; 2474 2475 struct ksmbd_share_config *share = tcon->share_conf; ··· 2496 2495 return rc; 2497 2496 } 2498 2497 2499 - rc = ksmbd_vfs_kern_path_locked(work, name, 0, path, 0); 2498 + rc = ksmbd_vfs_kern_path_locked(work, name, 0, parent_path, path, 0); 2500 2499 if (rc) { 2501 2500 pr_err("cannot get linux path (%s), err = %d\n", 2502 2501 name, rc); ··· 2566 2565 struct ksmbd_tree_connect *tcon = work->tcon; 2567 2566 struct smb2_create_req *req; 2568 2567 struct smb2_create_rsp *rsp; 2569 - struct path path; 2568 + struct path path, parent_path; 2570 2569 struct ksmbd_share_config *share = tcon->share_conf; 2571 2570 struct ksmbd_file *fp = NULL; 2572 2571 struct file *filp = NULL; ··· 2787 2786 goto err_out1; 2788 2787 } 2789 2788 2790 - rc = ksmbd_vfs_kern_path_locked(work, name, LOOKUP_NO_SYMLINKS, &path, 1); 2789 + rc = ksmbd_vfs_kern_path_locked(work, name, LOOKUP_NO_SYMLINKS, 2790 + &parent_path, &path, 1); 2791 2791 if (!rc) { 2792 2792 file_present = true; 2793 2793 ··· 2908 2906 2909 2907 /*create file if not present */ 2910 2908 if (!file_present) { 2911 - rc = smb2_creat(work, &path, name, open_flags, posix_mode, 2909 + rc = smb2_creat(work, &parent_path, &path, name, open_flags, 2910 + posix_mode, 2912 2911 req->CreateOptions & FILE_DIRECTORY_FILE_LE); 2913 2912 if (rc) { 2914 2913 if (rc == -ENOENT) { ··· 3324 3321 3325 3322 err_out: 3326 3323 if (file_present || created) { 3327 - inode_unlock(d_inode(path.dentry->d_parent)); 3328 - dput(path.dentry); 3324 + inode_unlock(d_inode(parent_path.dentry)); 3325 + path_put(&path); 3326 + path_put(&parent_path); 3329 3327 } 3330 3328 ksmbd_revert_fsids(work); 3331 3329 err_out1: ··· 5549 5545 struct nls_table *local_nls) 5550 5546 { 5551 5547 char *link_name = NULL, *target_name = NULL, *pathname = NULL; 5552 - struct path path; 5548 + struct path path, parent_path; 5553 5549 bool file_present = false; 5554 5550 int rc; 5555 5551 ··· 5579 5575 5580 5576 ksmbd_debug(SMB, "target name is %s\n", target_name); 5581 5577 rc = ksmbd_vfs_kern_path_locked(work, link_name, LOOKUP_NO_SYMLINKS, 5582 - &path, 0); 5578 + &parent_path, &path, 0); 5583 5579 if (rc) { 5584 5580 if (rc != -ENOENT) 5585 5581 goto out; ··· 5609 5605 rc = -EINVAL; 5610 5606 out: 5611 5607 if (file_present) { 5612 - inode_unlock(d_inode(path.dentry->d_parent)); 5608 + inode_unlock(d_inode(parent_path.dentry)); 5613 5609 path_put(&path); 5610 + path_put(&parent_path); 5614 5611 } 5615 5612 if (!IS_ERR(link_name)) 5616 5613 kfree(link_name); ··· 6214 6209 unsigned int max_read_size = conn->vals->max_read_size; 6215 6210 6216 6211 WORK_BUFFERS(work, req, rsp); 6212 + if (work->next_smb2_rcv_hdr_off) { 6213 + work->send_no_response = 1; 6214 + err = -EOPNOTSUPP; 6215 + goto out; 6216 + } 6217 6217 6218 6218 if (test_share_config_flag(work->tcon->share_conf, 6219 6219 KSMBD_SHARE_FLAG_PIPE)) { ··· 8619 8609 struct smb2_transform_hdr *tr_hdr = smb2_get_msg(buf); 8620 8610 int rc = 0; 8621 8611 8622 - if (buf_data_size < sizeof(struct smb2_hdr)) { 8612 + if (pdu_length < sizeof(struct smb2_transform_hdr) || 8613 + buf_data_size < sizeof(struct smb2_hdr)) { 8623 8614 pr_err("Transform message is too small (%u)\n", 8624 8615 pdu_length); 8625 8616 return -ECONNABORTED;
+11 -8
fs/smb/server/smb_common.c
··· 388 388 [SMB_COM_NEGOTIATE_EX] = { .proc = smb1_negotiate, }, 389 389 }; 390 390 391 - static void init_smb1_server(struct ksmbd_conn *conn) 391 + static int init_smb1_server(struct ksmbd_conn *conn) 392 392 { 393 393 conn->ops = &smb1_server_ops; 394 394 conn->cmds = smb1_server_cmds; 395 395 conn->max_cmds = ARRAY_SIZE(smb1_server_cmds); 396 + return 0; 396 397 } 397 398 398 - void ksmbd_init_smb_server(struct ksmbd_work *work) 399 + int ksmbd_init_smb_server(struct ksmbd_work *work) 399 400 { 400 401 struct ksmbd_conn *conn = work->conn; 401 402 __le32 proto; 402 403 403 - if (conn->need_neg == false) 404 - return; 405 - 406 404 proto = *(__le32 *)((struct smb_hdr *)work->request_buf)->Protocol; 405 + if (conn->need_neg == false) { 406 + if (proto == SMB1_PROTO_NUMBER) 407 + return -EINVAL; 408 + return 0; 409 + } 410 + 407 411 if (proto == SMB1_PROTO_NUMBER) 408 - init_smb1_server(conn); 409 - else 410 - init_smb3_11_server(conn); 412 + return init_smb1_server(conn); 413 + return init_smb3_11_server(conn); 411 414 } 412 415 413 416 int ksmbd_populate_dot_dotdot_entries(struct ksmbd_work *work, int info_level,
+1 -1
fs/smb/server/smb_common.h
··· 427 427 428 428 int ksmbd_lookup_dialect_by_id(__le16 *cli_dialects, __le16 dialects_count); 429 429 430 - void ksmbd_init_smb_server(struct ksmbd_work *work); 430 + int ksmbd_init_smb_server(struct ksmbd_work *work); 431 431 432 432 struct ksmbd_kstat; 433 433 int ksmbd_populate_dot_dotdot_entries(struct ksmbd_work *work,
+37 -28
fs/smb/server/vfs.c
··· 63 63 64 64 static int ksmbd_vfs_path_lookup_locked(struct ksmbd_share_config *share_conf, 65 65 char *pathname, unsigned int flags, 66 + struct path *parent_path, 66 67 struct path *path) 67 68 { 68 69 struct qstr last; 69 70 struct filename *filename; 70 71 struct path *root_share_path = &share_conf->vfs_path; 71 72 int err, type; 72 - struct path parent_path; 73 73 struct dentry *d; 74 74 75 75 if (pathname[0] == '\0') { ··· 84 84 return PTR_ERR(filename); 85 85 86 86 err = vfs_path_parent_lookup(filename, flags, 87 - &parent_path, &last, &type, 87 + parent_path, &last, &type, 88 88 root_share_path); 89 89 if (err) { 90 90 putname(filename); ··· 92 92 } 93 93 94 94 if (unlikely(type != LAST_NORM)) { 95 - path_put(&parent_path); 95 + path_put(parent_path); 96 96 putname(filename); 97 97 return -ENOENT; 98 98 } 99 99 100 - inode_lock_nested(parent_path.dentry->d_inode, I_MUTEX_PARENT); 101 - d = lookup_one_qstr_excl(&last, parent_path.dentry, 0); 100 + inode_lock_nested(parent_path->dentry->d_inode, I_MUTEX_PARENT); 101 + d = lookup_one_qstr_excl(&last, parent_path->dentry, 0); 102 102 if (IS_ERR(d)) 103 103 goto err_out; 104 104 ··· 108 108 } 109 109 110 110 path->dentry = d; 111 - path->mnt = share_conf->vfs_path.mnt; 112 - path_put(&parent_path); 113 - putname(filename); 111 + path->mnt = mntget(parent_path->mnt); 114 112 113 + if (test_share_config_flag(share_conf, KSMBD_SHARE_FLAG_CROSSMNT)) { 114 + err = follow_down(path, 0); 115 + if (err < 0) { 116 + path_put(path); 117 + goto err_out; 118 + } 119 + } 120 + 121 + putname(filename); 115 122 return 0; 116 123 117 124 err_out: 118 - inode_unlock(parent_path.dentry->d_inode); 119 - path_put(&parent_path); 125 + inode_unlock(d_inode(parent_path->dentry)); 126 + path_put(parent_path); 120 127 putname(filename); 121 128 return -ENOENT; 122 129 } ··· 419 412 { 420 413 char *stream_buf = NULL, *wbuf; 421 414 struct mnt_idmap *idmap = file_mnt_idmap(fp->filp); 422 - size_t size, v_len; 415 + size_t size; 416 + ssize_t v_len; 423 417 int err = 0; 424 418 425 419 ksmbd_debug(VFS, "write stream data pos : %llu, count : %zd\n", ··· 437 429 fp->stream.name, 438 430 fp->stream.size, 439 431 &stream_buf); 440 - if ((int)v_len < 0) { 432 + if (v_len < 0) { 441 433 pr_err("not found stream in xattr : %zd\n", v_len); 442 - err = (int)v_len; 434 + err = v_len; 443 435 goto out; 444 436 } 445 437 ··· 1202 1194 * Return: 0 on success, otherwise error 1203 1195 */ 1204 1196 int ksmbd_vfs_kern_path_locked(struct ksmbd_work *work, char *name, 1205 - unsigned int flags, struct path *path, 1206 - bool caseless) 1197 + unsigned int flags, struct path *parent_path, 1198 + struct path *path, bool caseless) 1207 1199 { 1208 1200 struct ksmbd_share_config *share_conf = work->tcon->share_conf; 1209 1201 int err; 1210 - struct path parent_path; 1211 1202 1212 - err = ksmbd_vfs_path_lookup_locked(share_conf, name, flags, path); 1203 + err = ksmbd_vfs_path_lookup_locked(share_conf, name, flags, parent_path, 1204 + path); 1213 1205 if (!err) 1214 1206 return 0; 1215 1207 ··· 1224 1216 path_len = strlen(filepath); 1225 1217 remain_len = path_len; 1226 1218 1227 - parent_path = share_conf->vfs_path; 1228 - path_get(&parent_path); 1219 + *parent_path = share_conf->vfs_path; 1220 + path_get(parent_path); 1229 1221 1230 - while (d_can_lookup(parent_path.dentry)) { 1222 + while (d_can_lookup(parent_path->dentry)) { 1231 1223 char *filename = filepath + path_len - remain_len; 1232 1224 char *next = strchrnul(filename, '/'); 1233 1225 size_t filename_len = next - filename; ··· 1236 1228 if (filename_len == 0) 1237 1229 break; 1238 1230 1239 - err = ksmbd_vfs_lookup_in_dir(&parent_path, filename, 1231 + err = ksmbd_vfs_lookup_in_dir(parent_path, filename, 1240 1232 filename_len, 1241 1233 work->conn->um); 1242 1234 if (err) ··· 1253 1245 goto out2; 1254 1246 else if (is_last) 1255 1247 goto out1; 1256 - path_put(&parent_path); 1257 - parent_path = *path; 1248 + path_put(parent_path); 1249 + *parent_path = *path; 1258 1250 1259 1251 next[0] = '/'; 1260 1252 remain_len -= filename_len + 1; ··· 1262 1254 1263 1255 err = -EINVAL; 1264 1256 out2: 1265 - path_put(&parent_path); 1257 + path_put(parent_path); 1266 1258 out1: 1267 1259 kfree(filepath); 1268 1260 } 1269 1261 1270 1262 if (!err) { 1271 - err = ksmbd_vfs_lock_parent(parent_path.dentry, path->dentry); 1272 - if (err) 1273 - dput(path->dentry); 1274 - path_put(&parent_path); 1263 + err = ksmbd_vfs_lock_parent(parent_path->dentry, path->dentry); 1264 + if (err) { 1265 + path_put(path); 1266 + path_put(parent_path); 1267 + } 1275 1268 } 1276 1269 return err; 1277 1270 }
+2 -2
fs/smb/server/vfs.h
··· 115 115 int ksmbd_vfs_remove_xattr(struct mnt_idmap *idmap, 116 116 const struct path *path, char *attr_name); 117 117 int ksmbd_vfs_kern_path_locked(struct ksmbd_work *work, char *name, 118 - unsigned int flags, struct path *path, 119 - bool caseless); 118 + unsigned int flags, struct path *parent_path, 119 + struct path *path, bool caseless); 120 120 struct dentry *ksmbd_vfs_kern_path_create(struct ksmbd_work *work, 121 121 const char *name, 122 122 unsigned int flags,
+2
fs/splice.c
··· 876 876 msg.msg_flags |= MSG_MORE; 877 877 if (remain && pipe_occupancy(pipe->head, tail) > 0) 878 878 msg.msg_flags |= MSG_MORE; 879 + if (out->f_flags & O_NONBLOCK) 880 + msg.msg_flags |= MSG_DONTWAIT; 879 881 880 882 iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, bvec, bc, 881 883 len - remain);
-5
include/drm/drm_fb_helper.h
··· 368 368 { 369 369 } 370 370 371 - static inline int drm_fb_helper_defio_init(struct drm_fb_helper *fb_helper) 372 - { 373 - return -ENODEV; 374 - } 375 - 376 371 static inline void drm_fb_helper_set_suspend(struct drm_fb_helper *fb_helper, 377 372 bool suspend) 378 373 {
-4
include/linux/ftrace.h
··· 684 684 ftrace_set_early_filter(struct ftrace_ops *ops, char *buf, int enable); 685 685 686 686 /* defined in arch */ 687 - extern int ftrace_ip_converted(unsigned long ip); 688 687 extern int ftrace_dyn_arch_init(void); 689 688 extern void ftrace_replace_code(int enable); 690 689 extern int ftrace_update_ftrace_func(ftrace_func_t func); ··· 857 858 return -EINVAL; 858 859 } 859 860 #endif 860 - 861 - /* May be defined in arch */ 862 - extern int ftrace_arch_read_dyn_info(char *buf, int size); 863 861 864 862 extern int skip_trace(unsigned long ip); 865 863 extern void ftrace_module_init(struct module *mod);
+23 -6
include/linux/mm.h
··· 641 641 */ 642 642 static inline bool vma_start_read(struct vm_area_struct *vma) 643 643 { 644 - /* Check before locking. A race might cause false locked result. */ 645 - if (vma->vm_lock_seq == READ_ONCE(vma->vm_mm->mm_lock_seq)) 644 + /* 645 + * Check before locking. A race might cause false locked result. 646 + * We can use READ_ONCE() for the mm_lock_seq here, and don't need 647 + * ACQUIRE semantics, because this is just a lockless check whose result 648 + * we don't rely on for anything - the mm_lock_seq read against which we 649 + * need ordering is below. 650 + */ 651 + if (READ_ONCE(vma->vm_lock_seq) == READ_ONCE(vma->vm_mm->mm_lock_seq)) 646 652 return false; 647 653 648 654 if (unlikely(down_read_trylock(&vma->vm_lock->lock) == 0)) ··· 659 653 * False unlocked result is impossible because we modify and check 660 654 * vma->vm_lock_seq under vma->vm_lock protection and mm->mm_lock_seq 661 655 * modification invalidates all existing locks. 656 + * 657 + * We must use ACQUIRE semantics for the mm_lock_seq so that if we are 658 + * racing with vma_end_write_all(), we only start reading from the VMA 659 + * after it has been unlocked. 660 + * This pairs with RELEASE semantics in vma_end_write_all(). 662 661 */ 663 - if (unlikely(vma->vm_lock_seq == READ_ONCE(vma->vm_mm->mm_lock_seq))) { 662 + if (unlikely(vma->vm_lock_seq == smp_load_acquire(&vma->vm_mm->mm_lock_seq))) { 664 663 up_read(&vma->vm_lock->lock); 665 664 return false; 666 665 } ··· 687 676 * current task is holding mmap_write_lock, both vma->vm_lock_seq and 688 677 * mm->mm_lock_seq can't be concurrently modified. 689 678 */ 690 - *mm_lock_seq = READ_ONCE(vma->vm_mm->mm_lock_seq); 679 + *mm_lock_seq = vma->vm_mm->mm_lock_seq; 691 680 return (vma->vm_lock_seq == *mm_lock_seq); 692 681 } 693 682 ··· 699 688 return; 700 689 701 690 down_write(&vma->vm_lock->lock); 702 - vma->vm_lock_seq = mm_lock_seq; 691 + /* 692 + * We should use WRITE_ONCE() here because we can have concurrent reads 693 + * from the early lockless pessimistic check in vma_start_read(). 694 + * We don't really care about the correctness of that early check, but 695 + * we should use WRITE_ONCE() for cleanliness and to keep KCSAN happy. 696 + */ 697 + WRITE_ONCE(vma->vm_lock_seq, mm_lock_seq); 703 698 up_write(&vma->vm_lock->lock); 704 699 } 705 700 ··· 719 702 if (!down_write_trylock(&vma->vm_lock->lock)) 720 703 return false; 721 704 722 - vma->vm_lock_seq = mm_lock_seq; 705 + WRITE_ONCE(vma->vm_lock_seq, mm_lock_seq); 723 706 up_write(&vma->vm_lock->lock); 724 707 return true; 725 708 }
+28
include/linux/mm_types.h
··· 514 514 }; 515 515 516 516 #ifdef CONFIG_PER_VMA_LOCK 517 + /* 518 + * Can only be written (using WRITE_ONCE()) while holding both: 519 + * - mmap_lock (in write mode) 520 + * - vm_lock->lock (in write mode) 521 + * Can be read reliably while holding one of: 522 + * - mmap_lock (in read or write mode) 523 + * - vm_lock->lock (in read or write mode) 524 + * Can be read unreliably (using READ_ONCE()) for pessimistic bailout 525 + * while holding nothing (except RCU to keep the VMA struct allocated). 526 + * 527 + * This sequence counter is explicitly allowed to overflow; sequence 528 + * counter reuse can only lead to occasional unnecessary use of the 529 + * slowpath. 530 + */ 517 531 int vm_lock_seq; 518 532 struct vma_lock *vm_lock; 519 533 ··· 693 679 * by mmlist_lock 694 680 */ 695 681 #ifdef CONFIG_PER_VMA_LOCK 682 + /* 683 + * This field has lock-like semantics, meaning it is sometimes 684 + * accessed with ACQUIRE/RELEASE semantics. 685 + * Roughly speaking, incrementing the sequence number is 686 + * equivalent to releasing locks on VMAs; reading the sequence 687 + * number can be part of taking a read lock on a VMA. 688 + * 689 + * Can be modified under write mmap_lock using RELEASE 690 + * semantics. 691 + * Can be read with no other protection when holding write 692 + * mmap_lock. 693 + * Can be read with ACQUIRE semantics if not holding write 694 + * mmap_lock. 695 + */ 696 696 int mm_lock_seq; 697 697 #endif 698 698
+8 -2
include/linux/mmap_lock.h
··· 76 76 static inline void vma_end_write_all(struct mm_struct *mm) 77 77 { 78 78 mmap_assert_write_locked(mm); 79 - /* No races during update due to exclusive mmap_lock being held */ 80 - WRITE_ONCE(mm->mm_lock_seq, mm->mm_lock_seq + 1); 79 + /* 80 + * Nobody can concurrently modify mm->mm_lock_seq due to exclusive 81 + * mmap_lock being held. 82 + * We need RELEASE semantics here to ensure that preceding stores into 83 + * the VMA take effect before we unlock it with this store. 84 + * Pairs with ACQUIRE semantics in vma_start_read(). 85 + */ 86 + smp_store_release(&mm->mm_lock_seq, mm->mm_lock_seq + 1); 81 87 } 82 88 #else 83 89 static inline void vma_end_write_all(struct mm_struct *mm) {}
+1 -1
include/linux/nls.h
··· 47 47 /* nls_base.c */ 48 48 extern int __register_nls(struct nls_table *, struct module *); 49 49 extern int unregister_nls(struct nls_table *); 50 - extern struct nls_table *load_nls(char *); 50 + extern struct nls_table *load_nls(const char *charset); 51 51 extern void unload_nls(struct nls_table *); 52 52 extern struct nls_table *load_nls_default(void); 53 53 #define register_nls(nls) __register_nls((nls), THIS_MODULE)
-10
include/linux/pm_wakeirq.h
··· 10 10 extern int dev_pm_set_dedicated_wake_irq(struct device *dev, int irq); 11 11 extern int dev_pm_set_dedicated_wake_irq_reverse(struct device *dev, int irq); 12 12 extern void dev_pm_clear_wake_irq(struct device *dev); 13 - extern void dev_pm_enable_wake_irq(struct device *dev); 14 - extern void dev_pm_disable_wake_irq(struct device *dev); 15 13 16 14 #else /* !CONFIG_PM */ 17 15 ··· 29 31 } 30 32 31 33 static inline void dev_pm_clear_wake_irq(struct device *dev) 32 - { 33 - } 34 - 35 - static inline void dev_pm_enable_wake_irq(struct device *dev) 36 - { 37 - } 38 - 39 - static inline void dev_pm_disable_wake_irq(struct device *dev) 40 34 { 41 35 } 42 36
+3 -3
include/linux/thermal.h
··· 301 301 #ifdef CONFIG_THERMAL 302 302 struct thermal_zone_device *thermal_zone_device_register(const char *, int, int, 303 303 void *, struct thermal_zone_device_ops *, 304 - struct thermal_zone_params *, int, int); 304 + const struct thermal_zone_params *, int, int); 305 305 306 306 void thermal_zone_device_unregister(struct thermal_zone_device *); 307 307 308 308 struct thermal_zone_device * 309 309 thermal_zone_device_register_with_trips(const char *, struct thermal_trip *, int, int, 310 310 void *, struct thermal_zone_device_ops *, 311 - struct thermal_zone_params *, int, int); 311 + const struct thermal_zone_params *, int, int); 312 312 313 313 void *thermal_zone_device_priv(struct thermal_zone_device *tzd); 314 314 const char *thermal_zone_device_type(struct thermal_zone_device *tzd); ··· 348 348 static inline struct thermal_zone_device *thermal_zone_device_register( 349 349 const char *type, int trips, int mask, void *devdata, 350 350 struct thermal_zone_device_ops *ops, 351 - struct thermal_zone_params *tzp, 351 + const struct thermal_zone_params *tzp, 352 352 int passive_delay, int polling_delay) 353 353 { return ERR_PTR(-ENODEV); } 354 354 static inline void thermal_zone_device_unregister(
+2 -6
include/net/ipv6.h
··· 752 752 /* more secured version of ipv6_addr_hash() */ 753 753 static inline u32 __ipv6_addr_jhash(const struct in6_addr *a, const u32 initval) 754 754 { 755 - u32 v = (__force u32)a->s6_addr32[0] ^ (__force u32)a->s6_addr32[1]; 756 - 757 - return jhash_3words(v, 758 - (__force u32)a->s6_addr32[2], 759 - (__force u32)a->s6_addr32[3], 760 - initval); 755 + return jhash2((__force const u32 *)a->s6_addr32, 756 + ARRAY_SIZE(a->s6_addr32), initval); 761 757 } 762 758 763 759 static inline bool ipv6_addr_loopback(const struct in6_addr *a)
+9 -4
include/net/vxlan.h
··· 386 386 return features; 387 387 } 388 388 389 - /* IP header + UDP + VXLAN + Ethernet header */ 390 - #define VXLAN_HEADROOM (20 + 8 + 8 + 14) 391 - /* IPv6 header + UDP + VXLAN + Ethernet header */ 392 - #define VXLAN6_HEADROOM (40 + 8 + 8 + 14) 389 + static inline int vxlan_headroom(u32 flags) 390 + { 391 + /* VXLAN: IP4/6 header + UDP + VXLAN + Ethernet header */ 392 + /* VXLAN-GPE: IP4/6 header + UDP + VXLAN */ 393 + return (flags & VXLAN_F_IPV6 ? sizeof(struct ipv6hdr) : 394 + sizeof(struct iphdr)) + 395 + sizeof(struct udphdr) + sizeof(struct vxlanhdr) + 396 + (flags & VXLAN_F_GPE ? 0 : ETH_HLEN); 397 + } 393 398 394 399 static inline struct vxlanhdr *vxlan_hdr(struct sk_buff *skb) 395 400 {
+5 -5
include/uapi/linux/blkzoned.h
··· 51 51 * 52 52 * The Zone Condition state machine in the ZBC/ZAC standards maps the above 53 53 * deinitions as: 54 - * - ZC1: Empty | BLK_ZONE_EMPTY 54 + * - ZC1: Empty | BLK_ZONE_COND_EMPTY 55 55 * - ZC2: Implicit Open | BLK_ZONE_COND_IMP_OPEN 56 56 * - ZC3: Explicit Open | BLK_ZONE_COND_EXP_OPEN 57 - * - ZC4: Closed | BLK_ZONE_CLOSED 58 - * - ZC5: Full | BLK_ZONE_FULL 59 - * - ZC6: Read Only | BLK_ZONE_READONLY 60 - * - ZC7: Offline | BLK_ZONE_OFFLINE 57 + * - ZC4: Closed | BLK_ZONE_COND_CLOSED 58 + * - ZC5: Full | BLK_ZONE_COND_FULL 59 + * - ZC6: Read Only | BLK_ZONE_COND_READONLY 60 + * - ZC7: Offline | BLK_ZONE_COND_OFFLINE 61 61 * 62 62 * Conditions 0x5 to 0xC are reserved by the current ZBC/ZAC spec and should 63 63 * be considered invalid.
+5 -1
include/uapi/linux/if_packet.h
··· 18 18 unsigned short sll_hatype; 19 19 unsigned char sll_pkttype; 20 20 unsigned char sll_halen; 21 - unsigned char sll_addr[8]; 21 + union { 22 + unsigned char sll_addr[8]; 23 + /* Actual length is in sll_halen. */ 24 + __DECLARE_FLEX_ARRAY(unsigned char, sll_addr_flex); 25 + }; 22 26 }; 23 27 24 28 /* Packet types */
+9
include/uapi/xen/evtchn.h
··· 101 101 domid_t domid; 102 102 }; 103 103 104 + /* 105 + * Bind statically allocated @port. 106 + */ 107 + #define IOCTL_EVTCHN_BIND_STATIC \ 108 + _IOC(_IOC_NONE, 'E', 7, sizeof(struct ioctl_evtchn_bind)) 109 + struct ioctl_evtchn_bind { 110 + unsigned int port; 111 + }; 112 + 104 113 #endif /* __LINUX_PUBLIC_EVTCHN_H__ */
+10 -1
include/xen/events.h
··· 69 69 /* 70 70 * Allow extra references to event channels exposed to userspace by evtchn 71 71 */ 72 - int evtchn_make_refcounted(evtchn_port_t evtchn); 72 + int evtchn_make_refcounted(evtchn_port_t evtchn, bool is_static); 73 73 int evtchn_get(evtchn_port_t evtchn); 74 74 void evtchn_put(evtchn_port_t evtchn); 75 75 ··· 140 140 void xen_init_IRQ(void); 141 141 142 142 irqreturn_t xen_debug_interrupt(int irq, void *dev_id); 143 + 144 + static inline void xen_evtchn_close(evtchn_port_t port) 145 + { 146 + struct evtchn_close close; 147 + 148 + close.port = port; 149 + if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0) 150 + BUG(); 151 + } 143 152 144 153 #endif /* _XEN_EVENTS_H */
+17 -6
io_uring/io_uring.c
··· 2493 2493 return 0; 2494 2494 } 2495 2495 2496 + static bool current_pending_io(void) 2497 + { 2498 + struct io_uring_task *tctx = current->io_uring; 2499 + 2500 + if (!tctx) 2501 + return false; 2502 + return percpu_counter_read_positive(&tctx->inflight); 2503 + } 2504 + 2496 2505 /* when returns >0, the caller should retry */ 2497 2506 static inline int io_cqring_wait_schedule(struct io_ring_ctx *ctx, 2498 2507 struct io_wait_queue *iowq) 2499 2508 { 2500 - int token, ret; 2509 + int io_wait, ret; 2501 2510 2502 2511 if (unlikely(READ_ONCE(ctx->check_cq))) 2503 2512 return 1; ··· 2520 2511 return 0; 2521 2512 2522 2513 /* 2523 - * Use io_schedule_prepare/finish, so cpufreq can take into account 2524 - * that the task is waiting for IO - turns out to be important for low 2525 - * QD IO. 2514 + * Mark us as being in io_wait if we have pending requests, so cpufreq 2515 + * can take into account that the task is waiting for IO - turns out 2516 + * to be important for low QD IO. 2526 2517 */ 2527 - token = io_schedule_prepare(); 2518 + io_wait = current->in_iowait; 2519 + if (current_pending_io()) 2520 + current->in_iowait = 1; 2528 2521 ret = 0; 2529 2522 if (iowq->timeout == KTIME_MAX) 2530 2523 schedule(); 2531 2524 else if (!schedule_hrtimeout(&iowq->timeout, HRTIMER_MODE_ABS)) 2532 2525 ret = -ETIME; 2533 - io_schedule_finish(token); 2526 + current->in_iowait = io_wait; 2534 2527 return ret; 2535 2528 } 2536 2529
+13 -1
kernel/kprobes.c
··· 1545 1545 return 0; 1546 1546 } 1547 1547 1548 + static bool is_cfi_preamble_symbol(unsigned long addr) 1549 + { 1550 + char symbuf[KSYM_NAME_LEN]; 1551 + 1552 + if (lookup_symbol_name(addr, symbuf)) 1553 + return false; 1554 + 1555 + return str_has_prefix("__cfi_", symbuf) || 1556 + str_has_prefix("__pfx_", symbuf); 1557 + } 1558 + 1548 1559 static int check_kprobe_address_safe(struct kprobe *p, 1549 1560 struct module **probed_mod) 1550 1561 { ··· 1574 1563 within_kprobe_blacklist((unsigned long) p->addr) || 1575 1564 jump_label_text_reserved(p->addr, p->addr) || 1576 1565 static_call_text_reserved(p->addr, p->addr) || 1577 - find_bug((unsigned long)p->addr)) { 1566 + find_bug((unsigned long)p->addr) || 1567 + is_cfi_preamble_symbol((unsigned long)p->addr)) { 1578 1568 ret = -EINVAL; 1579 1569 goto out; 1580 1570 }
+115 -57
kernel/locking/rtmutex.c
··· 333 333 return prio; 334 334 } 335 335 336 + /* 337 + * Update the waiter->tree copy of the sort keys. 338 + */ 336 339 static __always_inline void 337 340 waiter_update_prio(struct rt_mutex_waiter *waiter, struct task_struct *task) 338 341 { 339 - waiter->prio = __waiter_prio(task); 340 - waiter->deadline = task->dl.deadline; 342 + lockdep_assert_held(&waiter->lock->wait_lock); 343 + lockdep_assert(RB_EMPTY_NODE(&waiter->tree.entry)); 344 + 345 + waiter->tree.prio = __waiter_prio(task); 346 + waiter->tree.deadline = task->dl.deadline; 341 347 } 342 348 343 349 /* 344 - * Only use with rt_mutex_waiter_{less,equal}() 350 + * Update the waiter->pi_tree copy of the sort keys (from the tree copy). 345 351 */ 346 - #define task_to_waiter(p) \ 347 - &(struct rt_mutex_waiter){ .prio = __waiter_prio(p), .deadline = (p)->dl.deadline } 352 + static __always_inline void 353 + waiter_clone_prio(struct rt_mutex_waiter *waiter, struct task_struct *task) 354 + { 355 + lockdep_assert_held(&waiter->lock->wait_lock); 356 + lockdep_assert_held(&task->pi_lock); 357 + lockdep_assert(RB_EMPTY_NODE(&waiter->pi_tree.entry)); 348 358 349 - static __always_inline int rt_mutex_waiter_less(struct rt_mutex_waiter *left, 350 - struct rt_mutex_waiter *right) 359 + waiter->pi_tree.prio = waiter->tree.prio; 360 + waiter->pi_tree.deadline = waiter->tree.deadline; 361 + } 362 + 363 + /* 364 + * Only use with rt_waiter_node_{less,equal}() 365 + */ 366 + #define task_to_waiter_node(p) \ 367 + &(struct rt_waiter_node){ .prio = __waiter_prio(p), .deadline = (p)->dl.deadline } 368 + #define task_to_waiter(p) \ 369 + &(struct rt_mutex_waiter){ .tree = *task_to_waiter_node(p) } 370 + 371 + static __always_inline int rt_waiter_node_less(struct rt_waiter_node *left, 372 + struct rt_waiter_node *right) 351 373 { 352 374 if (left->prio < right->prio) 353 375 return 1; ··· 386 364 return 0; 387 365 } 388 366 389 - static __always_inline int rt_mutex_waiter_equal(struct rt_mutex_waiter *left, 390 - struct rt_mutex_waiter *right) 367 + static __always_inline int rt_waiter_node_equal(struct rt_waiter_node *left, 368 + struct rt_waiter_node *right) 391 369 { 392 370 if (left->prio != right->prio) 393 371 return 0; ··· 407 385 static inline bool rt_mutex_steal(struct rt_mutex_waiter *waiter, 408 386 struct rt_mutex_waiter *top_waiter) 409 387 { 410 - if (rt_mutex_waiter_less(waiter, top_waiter)) 388 + if (rt_waiter_node_less(&waiter->tree, &top_waiter->tree)) 411 389 return true; 412 390 413 391 #ifdef RT_MUTEX_BUILD_SPINLOCKS ··· 415 393 * Note that RT tasks are excluded from same priority (lateral) 416 394 * steals to prevent the introduction of an unbounded latency. 417 395 */ 418 - if (rt_prio(waiter->prio) || dl_prio(waiter->prio)) 396 + if (rt_prio(waiter->tree.prio) || dl_prio(waiter->tree.prio)) 419 397 return false; 420 398 421 - return rt_mutex_waiter_equal(waiter, top_waiter); 399 + return rt_waiter_node_equal(&waiter->tree, &top_waiter->tree); 422 400 #else 423 401 return false; 424 402 #endif 425 403 } 426 404 427 405 #define __node_2_waiter(node) \ 428 - rb_entry((node), struct rt_mutex_waiter, tree_entry) 406 + rb_entry((node), struct rt_mutex_waiter, tree.entry) 429 407 430 408 static __always_inline bool __waiter_less(struct rb_node *a, const struct rb_node *b) 431 409 { 432 410 struct rt_mutex_waiter *aw = __node_2_waiter(a); 433 411 struct rt_mutex_waiter *bw = __node_2_waiter(b); 434 412 435 - if (rt_mutex_waiter_less(aw, bw)) 413 + if (rt_waiter_node_less(&aw->tree, &bw->tree)) 436 414 return 1; 437 415 438 416 if (!build_ww_mutex()) 439 417 return 0; 440 418 441 - if (rt_mutex_waiter_less(bw, aw)) 419 + if (rt_waiter_node_less(&bw->tree, &aw->tree)) 442 420 return 0; 443 421 444 422 /* NOTE: relies on waiter->ww_ctx being set before insertion */ ··· 456 434 static __always_inline void 457 435 rt_mutex_enqueue(struct rt_mutex_base *lock, struct rt_mutex_waiter *waiter) 458 436 { 459 - rb_add_cached(&waiter->tree_entry, &lock->waiters, __waiter_less); 437 + lockdep_assert_held(&lock->wait_lock); 438 + 439 + rb_add_cached(&waiter->tree.entry, &lock->waiters, __waiter_less); 460 440 } 461 441 462 442 static __always_inline void 463 443 rt_mutex_dequeue(struct rt_mutex_base *lock, struct rt_mutex_waiter *waiter) 464 444 { 465 - if (RB_EMPTY_NODE(&waiter->tree_entry)) 445 + lockdep_assert_held(&lock->wait_lock); 446 + 447 + if (RB_EMPTY_NODE(&waiter->tree.entry)) 466 448 return; 467 449 468 - rb_erase_cached(&waiter->tree_entry, &lock->waiters); 469 - RB_CLEAR_NODE(&waiter->tree_entry); 450 + rb_erase_cached(&waiter->tree.entry, &lock->waiters); 451 + RB_CLEAR_NODE(&waiter->tree.entry); 470 452 } 471 453 472 - #define __node_2_pi_waiter(node) \ 473 - rb_entry((node), struct rt_mutex_waiter, pi_tree_entry) 454 + #define __node_2_rt_node(node) \ 455 + rb_entry((node), struct rt_waiter_node, entry) 474 456 475 - static __always_inline bool 476 - __pi_waiter_less(struct rb_node *a, const struct rb_node *b) 457 + static __always_inline bool __pi_waiter_less(struct rb_node *a, const struct rb_node *b) 477 458 { 478 - return rt_mutex_waiter_less(__node_2_pi_waiter(a), __node_2_pi_waiter(b)); 459 + return rt_waiter_node_less(__node_2_rt_node(a), __node_2_rt_node(b)); 479 460 } 480 461 481 462 static __always_inline void 482 463 rt_mutex_enqueue_pi(struct task_struct *task, struct rt_mutex_waiter *waiter) 483 464 { 484 - rb_add_cached(&waiter->pi_tree_entry, &task->pi_waiters, __pi_waiter_less); 465 + lockdep_assert_held(&task->pi_lock); 466 + 467 + rb_add_cached(&waiter->pi_tree.entry, &task->pi_waiters, __pi_waiter_less); 485 468 } 486 469 487 470 static __always_inline void 488 471 rt_mutex_dequeue_pi(struct task_struct *task, struct rt_mutex_waiter *waiter) 489 472 { 490 - if (RB_EMPTY_NODE(&waiter->pi_tree_entry)) 473 + lockdep_assert_held(&task->pi_lock); 474 + 475 + if (RB_EMPTY_NODE(&waiter->pi_tree.entry)) 491 476 return; 492 477 493 - rb_erase_cached(&waiter->pi_tree_entry, &task->pi_waiters); 494 - RB_CLEAR_NODE(&waiter->pi_tree_entry); 478 + rb_erase_cached(&waiter->pi_tree.entry, &task->pi_waiters); 479 + RB_CLEAR_NODE(&waiter->pi_tree.entry); 495 480 } 496 481 497 - static __always_inline void rt_mutex_adjust_prio(struct task_struct *p) 482 + static __always_inline void rt_mutex_adjust_prio(struct rt_mutex_base *lock, 483 + struct task_struct *p) 498 484 { 499 485 struct task_struct *pi_task = NULL; 500 486 487 + lockdep_assert_held(&lock->wait_lock); 488 + lockdep_assert(rt_mutex_owner(lock) == p); 501 489 lockdep_assert_held(&p->pi_lock); 502 490 503 491 if (task_has_pi_waiters(p)) ··· 603 571 * Chain walk basics and protection scope 604 572 * 605 573 * [R] refcount on task 606 - * [P] task->pi_lock held 574 + * [Pn] task->pi_lock held 607 575 * [L] rtmutex->wait_lock held 576 + * 577 + * Normal locking order: 578 + * 579 + * rtmutex->wait_lock 580 + * task->pi_lock 608 581 * 609 582 * Step Description Protected by 610 583 * function arguments: ··· 625 588 * again: 626 589 * loop_sanity_check(); 627 590 * retry: 628 - * [1] lock(task->pi_lock); [R] acquire [P] 629 - * [2] waiter = task->pi_blocked_on; [P] 630 - * [3] check_exit_conditions_1(); [P] 631 - * [4] lock = waiter->lock; [P] 632 - * [5] if (!try_lock(lock->wait_lock)) { [P] try to acquire [L] 633 - * unlock(task->pi_lock); release [P] 591 + * [1] lock(task->pi_lock); [R] acquire [P1] 592 + * [2] waiter = task->pi_blocked_on; [P1] 593 + * [3] check_exit_conditions_1(); [P1] 594 + * [4] lock = waiter->lock; [P1] 595 + * [5] if (!try_lock(lock->wait_lock)) { [P1] try to acquire [L] 596 + * unlock(task->pi_lock); release [P1] 634 597 * goto retry; 635 598 * } 636 - * [6] check_exit_conditions_2(); [P] + [L] 637 - * [7] requeue_lock_waiter(lock, waiter); [P] + [L] 638 - * [8] unlock(task->pi_lock); release [P] 599 + * [6] check_exit_conditions_2(); [P1] + [L] 600 + * [7] requeue_lock_waiter(lock, waiter); [P1] + [L] 601 + * [8] unlock(task->pi_lock); release [P1] 639 602 * put_task_struct(task); release [R] 640 603 * [9] check_exit_conditions_3(); [L] 641 604 * [10] task = owner(lock); [L] 642 605 * get_task_struct(task); [L] acquire [R] 643 - * lock(task->pi_lock); [L] acquire [P] 644 - * [11] requeue_pi_waiter(tsk, waiters(lock));[P] + [L] 645 - * [12] check_exit_conditions_4(); [P] + [L] 646 - * [13] unlock(task->pi_lock); release [P] 606 + * lock(task->pi_lock); [L] acquire [P2] 607 + * [11] requeue_pi_waiter(tsk, waiters(lock));[P2] + [L] 608 + * [12] check_exit_conditions_4(); [P2] + [L] 609 + * [13] unlock(task->pi_lock); release [P2] 647 610 * unlock(lock->wait_lock); release [L] 648 611 * goto again; 612 + * 613 + * Where P1 is the blocking task and P2 is the lock owner; going up one step 614 + * the owner becomes the next blocked task etc.. 615 + * 616 + * 649 617 */ 650 618 static int __sched rt_mutex_adjust_prio_chain(struct task_struct *task, 651 619 enum rtmutex_chainwalk chwalk, ··· 798 756 * enabled we continue, but stop the requeueing in the chain 799 757 * walk. 800 758 */ 801 - if (rt_mutex_waiter_equal(waiter, task_to_waiter(task))) { 759 + if (rt_waiter_node_equal(&waiter->tree, task_to_waiter_node(task))) { 802 760 if (!detect_deadlock) 803 761 goto out_unlock_pi; 804 762 else ··· 806 764 } 807 765 808 766 /* 809 - * [4] Get the next lock 767 + * [4] Get the next lock; per holding task->pi_lock we can't unblock 768 + * and guarantee @lock's existence. 810 769 */ 811 770 lock = waiter->lock; 812 771 /* 813 772 * [5] We need to trylock here as we are holding task->pi_lock, 814 773 * which is the reverse lock order versus the other rtmutex 815 774 * operations. 775 + * 776 + * Per the above, holding task->pi_lock guarantees lock exists, so 777 + * inverting this lock order is infeasible from a life-time 778 + * perspective. 816 779 */ 817 780 if (!raw_spin_trylock(&lock->wait_lock)) { 818 781 raw_spin_unlock_irq(&task->pi_lock); ··· 921 874 * or 922 875 * 923 876 * DL CBS enforcement advancing the effective deadline. 924 - * 925 - * Even though pi_waiters also uses these fields, and that tree is only 926 - * updated in [11], we can do this here, since we hold [L], which 927 - * serializes all pi_waiters access and rb_erase() does not care about 928 - * the values of the node being removed. 929 877 */ 930 878 waiter_update_prio(waiter, task); 931 879 932 880 rt_mutex_enqueue(lock, waiter); 933 881 934 - /* [8] Release the task */ 882 + /* 883 + * [8] Release the (blocking) task in preparation for 884 + * taking the owner task in [10]. 885 + * 886 + * Since we hold lock->waiter_lock, task cannot unblock, even if we 887 + * release task->pi_lock. 888 + */ 935 889 raw_spin_unlock(&task->pi_lock); 936 890 put_task_struct(task); 937 891 ··· 956 908 return 0; 957 909 } 958 910 959 - /* [10] Grab the next task, i.e. the owner of @lock */ 911 + /* 912 + * [10] Grab the next task, i.e. the owner of @lock 913 + * 914 + * Per holding lock->wait_lock and checking for !owner above, there 915 + * must be an owner and it cannot go away. 916 + */ 960 917 task = get_task_struct(rt_mutex_owner(lock)); 961 918 raw_spin_lock(&task->pi_lock); 962 919 ··· 974 921 * and adjust the priority of the owner. 975 922 */ 976 923 rt_mutex_dequeue_pi(task, prerequeue_top_waiter); 924 + waiter_clone_prio(waiter, task); 977 925 rt_mutex_enqueue_pi(task, waiter); 978 - rt_mutex_adjust_prio(task); 926 + rt_mutex_adjust_prio(lock, task); 979 927 980 928 } else if (prerequeue_top_waiter == waiter) { 981 929 /* ··· 991 937 */ 992 938 rt_mutex_dequeue_pi(task, waiter); 993 939 waiter = rt_mutex_top_waiter(lock); 940 + waiter_clone_prio(waiter, task); 994 941 rt_mutex_enqueue_pi(task, waiter); 995 - rt_mutex_adjust_prio(task); 942 + rt_mutex_adjust_prio(lock, task); 996 943 } else { 997 944 /* 998 945 * Nothing changed. No need to do any priority ··· 1209 1154 waiter->task = task; 1210 1155 waiter->lock = lock; 1211 1156 waiter_update_prio(waiter, task); 1157 + waiter_clone_prio(waiter, task); 1212 1158 1213 1159 /* Get the top priority waiter on the lock */ 1214 1160 if (rt_mutex_has_waiters(lock)) ··· 1243 1187 rt_mutex_dequeue_pi(owner, top_waiter); 1244 1188 rt_mutex_enqueue_pi(owner, waiter); 1245 1189 1246 - rt_mutex_adjust_prio(owner); 1190 + rt_mutex_adjust_prio(lock, owner); 1247 1191 if (owner->pi_blocked_on) 1248 1192 chain_walk = 1; 1249 1193 } else if (rt_mutex_cond_detect_deadlock(waiter, chwalk)) { ··· 1290 1234 { 1291 1235 struct rt_mutex_waiter *waiter; 1292 1236 1237 + lockdep_assert_held(&lock->wait_lock); 1238 + 1293 1239 raw_spin_lock(&current->pi_lock); 1294 1240 1295 1241 waiter = rt_mutex_top_waiter(lock); ··· 1304 1246 * task unblocks. 1305 1247 */ 1306 1248 rt_mutex_dequeue_pi(current, waiter); 1307 - rt_mutex_adjust_prio(current); 1249 + rt_mutex_adjust_prio(lock, current); 1308 1250 1309 1251 /* 1310 1252 * As we are waking up the top waiter, and the waiter stays ··· 1540 1482 if (rt_mutex_has_waiters(lock)) 1541 1483 rt_mutex_enqueue_pi(owner, rt_mutex_top_waiter(lock)); 1542 1484 1543 - rt_mutex_adjust_prio(owner); 1485 + rt_mutex_adjust_prio(lock, owner); 1544 1486 1545 1487 /* Store the lock on which owner is blocked or NULL */ 1546 1488 next_lock = task_blocked_on_lock(owner);
+1 -1
kernel/locking/rtmutex_api.c
··· 459 459 raw_spin_lock_irqsave(&task->pi_lock, flags); 460 460 461 461 waiter = task->pi_blocked_on; 462 - if (!waiter || rt_mutex_waiter_equal(waiter, task_to_waiter(task))) { 462 + if (!waiter || rt_waiter_node_equal(&waiter->tree, task_to_waiter_node(task))) { 463 463 raw_spin_unlock_irqrestore(&task->pi_lock, flags); 464 464 return; 465 465 }
+34 -13
kernel/locking/rtmutex_common.h
··· 17 17 #include <linux/rtmutex.h> 18 18 #include <linux/sched/wake_q.h> 19 19 20 + 21 + /* 22 + * This is a helper for the struct rt_mutex_waiter below. A waiter goes in two 23 + * separate trees and they need their own copy of the sort keys because of 24 + * different locking requirements. 25 + * 26 + * @entry: rbtree node to enqueue into the waiters tree 27 + * @prio: Priority of the waiter 28 + * @deadline: Deadline of the waiter if applicable 29 + * 30 + * See rt_waiter_node_less() and waiter_*_prio(). 31 + */ 32 + struct rt_waiter_node { 33 + struct rb_node entry; 34 + int prio; 35 + u64 deadline; 36 + }; 37 + 20 38 /* 21 39 * This is the control structure for tasks blocked on a rt_mutex, 22 40 * which is allocated on the kernel stack on of the blocked task. 23 41 * 24 - * @tree_entry: pi node to enqueue into the mutex waiters tree 25 - * @pi_tree_entry: pi node to enqueue into the mutex owner waiters tree 42 + * @tree: node to enqueue into the mutex waiters tree 43 + * @pi_tree: node to enqueue into the mutex owner waiters tree 26 44 * @task: task reference to the blocked task 27 45 * @lock: Pointer to the rt_mutex on which the waiter blocks 28 46 * @wake_state: Wakeup state to use (TASK_NORMAL or TASK_RTLOCK_WAIT) 29 - * @prio: Priority of the waiter 30 - * @deadline: Deadline of the waiter if applicable 31 47 * @ww_ctx: WW context pointer 48 + * 49 + * @tree is ordered by @lock->wait_lock 50 + * @pi_tree is ordered by rt_mutex_owner(@lock)->pi_lock 32 51 */ 33 52 struct rt_mutex_waiter { 34 - struct rb_node tree_entry; 35 - struct rb_node pi_tree_entry; 53 + struct rt_waiter_node tree; 54 + struct rt_waiter_node pi_tree; 36 55 struct task_struct *task; 37 56 struct rt_mutex_base *lock; 38 57 unsigned int wake_state; 39 - int prio; 40 - u64 deadline; 41 58 struct ww_acquire_ctx *ww_ctx; 42 59 }; 43 60 ··· 122 105 { 123 106 struct rb_node *leftmost = rb_first_cached(&lock->waiters); 124 107 125 - return rb_entry(leftmost, struct rt_mutex_waiter, tree_entry) == waiter; 108 + return rb_entry(leftmost, struct rt_mutex_waiter, tree.entry) == waiter; 126 109 } 127 110 128 111 static inline struct rt_mutex_waiter *rt_mutex_top_waiter(struct rt_mutex_base *lock) ··· 130 113 struct rb_node *leftmost = rb_first_cached(&lock->waiters); 131 114 struct rt_mutex_waiter *w = NULL; 132 115 116 + lockdep_assert_held(&lock->wait_lock); 117 + 133 118 if (leftmost) { 134 - w = rb_entry(leftmost, struct rt_mutex_waiter, tree_entry); 119 + w = rb_entry(leftmost, struct rt_mutex_waiter, tree.entry); 135 120 BUG_ON(w->lock != lock); 136 121 } 137 122 return w; ··· 146 127 147 128 static inline struct rt_mutex_waiter *task_top_pi_waiter(struct task_struct *p) 148 129 { 130 + lockdep_assert_held(&p->pi_lock); 131 + 149 132 return rb_entry(p->pi_waiters.rb_leftmost, struct rt_mutex_waiter, 150 - pi_tree_entry); 133 + pi_tree.entry); 151 134 } 152 135 153 136 #define RT_MUTEX_HAS_WAITERS 1UL ··· 211 190 static inline void rt_mutex_init_waiter(struct rt_mutex_waiter *waiter) 212 191 { 213 192 debug_rt_mutex_init_waiter(waiter); 214 - RB_CLEAR_NODE(&waiter->pi_tree_entry); 215 - RB_CLEAR_NODE(&waiter->tree_entry); 193 + RB_CLEAR_NODE(&waiter->pi_tree.entry); 194 + RB_CLEAR_NODE(&waiter->tree.entry); 216 195 waiter->wake_state = TASK_NORMAL; 217 196 waiter->task = NULL; 218 197 }
+6 -6
kernel/locking/ww_mutex.h
··· 96 96 struct rb_node *n = rb_first(&lock->rtmutex.waiters.rb_root); 97 97 if (!n) 98 98 return NULL; 99 - return rb_entry(n, struct rt_mutex_waiter, tree_entry); 99 + return rb_entry(n, struct rt_mutex_waiter, tree.entry); 100 100 } 101 101 102 102 static inline struct rt_mutex_waiter * 103 103 __ww_waiter_next(struct rt_mutex *lock, struct rt_mutex_waiter *w) 104 104 { 105 - struct rb_node *n = rb_next(&w->tree_entry); 105 + struct rb_node *n = rb_next(&w->tree.entry); 106 106 if (!n) 107 107 return NULL; 108 - return rb_entry(n, struct rt_mutex_waiter, tree_entry); 108 + return rb_entry(n, struct rt_mutex_waiter, tree.entry); 109 109 } 110 110 111 111 static inline struct rt_mutex_waiter * 112 112 __ww_waiter_prev(struct rt_mutex *lock, struct rt_mutex_waiter *w) 113 113 { 114 - struct rb_node *n = rb_prev(&w->tree_entry); 114 + struct rb_node *n = rb_prev(&w->tree.entry); 115 115 if (!n) 116 116 return NULL; 117 - return rb_entry(n, struct rt_mutex_waiter, tree_entry); 117 + return rb_entry(n, struct rt_mutex_waiter, tree.entry); 118 118 } 119 119 120 120 static inline struct rt_mutex_waiter * ··· 123 123 struct rb_node *n = rb_last(&lock->rtmutex.waiters.rb_root); 124 124 if (!n) 125 125 return NULL; 126 - return rb_entry(n, struct rt_mutex_waiter, tree_entry); 126 + return rb_entry(n, struct rt_mutex_waiter, tree.entry); 127 127 } 128 128 129 129 static inline void
+4
kernel/signal.c
··· 562 562 if (handler != SIG_IGN && handler != SIG_DFL) 563 563 return false; 564 564 565 + /* If dying, we handle all new signals by ignoring them */ 566 + if (fatal_signal_pending(tsk)) 567 + return false; 568 + 565 569 /* if ptraced, let the tracer determine */ 566 570 return !tsk->ptrace; 567 571 }
+13 -12
kernel/trace/ring_buffer.c
··· 523 523 rb_time_t before_stamp; 524 524 u64 event_stamp[MAX_NEST]; 525 525 u64 read_stamp; 526 + /* pages removed since last reset */ 527 + unsigned long pages_removed; 526 528 /* ring buffer pages to update, > 0 to add, < 0 to remove */ 527 529 long nr_pages_to_update; 528 530 struct list_head new_pages; /* new pages to add */ ··· 561 559 struct buffer_page *head_page; 562 560 struct buffer_page *cache_reader_page; 563 561 unsigned long cache_read; 562 + unsigned long cache_pages_removed; 564 563 u64 read_stamp; 565 564 u64 page_stamp; 566 565 struct ring_buffer_event *event; ··· 950 947 /** 951 948 * ring_buffer_wake_waiters - wake up any waiters on this ring buffer 952 949 * @buffer: The ring buffer to wake waiters on 950 + * @cpu: The CPU buffer to wake waiters on 953 951 * 954 952 * In the case of a file that represents a ring buffer is closing, 955 953 * it is prudent to wake up any waiters that are on this. ··· 1961 1957 to_remove = rb_list_head(to_remove)->next; 1962 1958 head_bit |= (unsigned long)to_remove & RB_PAGE_HEAD; 1963 1959 } 1960 + /* Read iterators need to reset themselves when some pages removed */ 1961 + cpu_buffer->pages_removed += nr_removed; 1964 1962 1965 1963 next_page = rb_list_head(to_remove)->next; 1966 1964 ··· 1983 1977 if (head_bit) 1984 1978 cpu_buffer->head_page = list_entry(next_page, 1985 1979 struct buffer_page, list); 1986 - 1987 - /* 1988 - * change read pointer to make sure any read iterators reset 1989 - * themselves 1990 - */ 1991 - cpu_buffer->read = 0; 1992 1980 1993 1981 /* pages are removed, resume tracing and then free the pages */ 1994 1982 atomic_dec(&cpu_buffer->record_disabled); ··· 3376 3376 /** 3377 3377 * ring_buffer_unlock_commit - commit a reserved 3378 3378 * @buffer: The buffer to commit to 3379 - * @event: The event pointer to commit. 3380 3379 * 3381 3380 * This commits the data to the ring buffer, and releases any locks held. 3382 3381 * ··· 4394 4395 4395 4396 iter->cache_reader_page = iter->head_page; 4396 4397 iter->cache_read = cpu_buffer->read; 4398 + iter->cache_pages_removed = cpu_buffer->pages_removed; 4397 4399 4398 4400 if (iter->head) { 4399 4401 iter->read_stamp = cpu_buffer->read_stamp; ··· 4849 4849 buffer = cpu_buffer->buffer; 4850 4850 4851 4851 /* 4852 - * Check if someone performed a consuming read to 4853 - * the buffer. A consuming read invalidates the iterator 4854 - * and we need to reset the iterator in this case. 4852 + * Check if someone performed a consuming read to the buffer 4853 + * or removed some pages from the buffer. In these cases, 4854 + * iterator was invalidated and we need to reset it. 4855 4855 */ 4856 4856 if (unlikely(iter->cache_read != cpu_buffer->read || 4857 - iter->cache_reader_page != cpu_buffer->reader_page)) 4857 + iter->cache_reader_page != cpu_buffer->reader_page || 4858 + iter->cache_pages_removed != cpu_buffer->pages_removed)) 4858 4859 rb_iter_reset(iter); 4859 4860 4860 4861 again: ··· 5299 5298 cpu_buffer->last_overrun = 0; 5300 5299 5301 5300 rb_head_page_activate(cpu_buffer); 5301 + cpu_buffer->pages_removed = 0; 5302 5302 } 5303 5303 5304 5304 /* Must have disabled the cpu buffer then done a synchronize_rcu */ ··· 5358 5356 /** 5359 5357 * ring_buffer_reset_online_cpus - reset a ring buffer per CPU buffer 5360 5358 * @buffer: The ring buffer to reset a per cpu buffer of 5361 - * @cpu: The CPU buffer to be reset 5362 5359 */ 5363 5360 void ring_buffer_reset_online_cpus(struct trace_buffer *buffer) 5364 5361 {
+4 -10
kernel/trace/trace_events.c
··· 611 611 { 612 612 struct trace_event_call *call = file->event_call; 613 613 struct trace_array *tr = file->tr; 614 - unsigned long file_flags = file->flags; 615 614 int ret = 0; 616 615 int disable; 617 616 ··· 634 635 break; 635 636 disable = file->flags & EVENT_FILE_FL_SOFT_DISABLED; 636 637 clear_bit(EVENT_FILE_FL_SOFT_MODE_BIT, &file->flags); 638 + /* Disable use of trace_buffered_event */ 639 + trace_buffered_event_disable(); 637 640 } else 638 641 disable = !(file->flags & EVENT_FILE_FL_SOFT_MODE); 639 642 ··· 674 673 if (atomic_inc_return(&file->sm_ref) > 1) 675 674 break; 676 675 set_bit(EVENT_FILE_FL_SOFT_MODE_BIT, &file->flags); 676 + /* Enable use of trace_buffered_event */ 677 + trace_buffered_event_enable(); 677 678 } 678 679 679 680 if (!(file->flags & EVENT_FILE_FL_ENABLED)) { ··· 713 710 set_bit(EVENT_FILE_FL_WAS_ENABLED_BIT, &file->flags); 714 711 } 715 712 break; 716 - } 717 - 718 - /* Enable or disable use of trace_buffered_event */ 719 - if ((file_flags & EVENT_FILE_FL_SOFT_DISABLED) != 720 - (file->flags & EVENT_FILE_FL_SOFT_DISABLED)) { 721 - if (file->flags & EVENT_FILE_FL_SOFT_DISABLED) 722 - trace_buffered_event_enable(); 723 - else 724 - trace_buffered_event_disable(); 725 713 } 726 714 727 715 return ret;
+1
kernel/trace/trace_events_synth.c
··· 1230 1230 * synth_event_gen_cmd_array_start - Start synthetic event command from an array 1231 1231 * @cmd: A pointer to the dynevent_cmd struct representing the new event 1232 1232 * @name: The name of the synthetic event 1233 + * @mod: The module creating the event, NULL if not created from a module 1233 1234 * @fields: An array of type/name field descriptions 1234 1235 * @n_fields: The number of field descriptions contained in the fields array 1235 1236 *
+2
kernel/trace/trace_events_trigger.c
··· 31 31 /** 32 32 * event_triggers_call - Call triggers associated with a trace event 33 33 * @file: The trace_event_file associated with the event 34 + * @buffer: The ring buffer that the event is being written to 34 35 * @rec: The trace entry for the event, NULL for unconditional invocation 36 + * @event: The event meta data in the ring buffer 35 37 * 36 38 * For each trigger associated with an event, invoke the trigger 37 39 * function registered with the associated trigger command. If rec is
+4 -4
kernel/trace/trace_probe.c
··· 386 386 387 387 /* Get BTF_KIND_FUNC type */ 388 388 t = btf_type_by_id(btf, id); 389 - if (!btf_type_is_func(t)) 389 + if (!t || !btf_type_is_func(t)) 390 390 return ERR_PTR(-ENOENT); 391 391 392 392 /* The type of BTF_KIND_FUNC is BTF_KIND_FUNC_PROTO */ 393 393 t = btf_type_by_id(btf, t->type); 394 - if (!btf_type_is_func_proto(t)) 394 + if (!t || !btf_type_is_func_proto(t)) 395 395 return ERR_PTR(-ENOENT); 396 396 397 397 return t; ··· 443 443 if (!ctx->params) { 444 444 params = find_btf_func_param(ctx->funcname, &ctx->nr_params, 445 445 ctx->flags & TPARG_FL_TPOINT); 446 - if (IS_ERR(params)) { 446 + if (IS_ERR_OR_NULL(params)) { 447 447 trace_probe_log_err(ctx->offset, NO_BTF_ENTRY); 448 448 return PTR_ERR(params); 449 449 } ··· 1273 1273 1274 1274 params = find_btf_func_param(ctx->funcname, &nr_params, 1275 1275 ctx->flags & TPARG_FL_TPOINT); 1276 - if (IS_ERR(params)) { 1276 + if (IS_ERR_OR_NULL(params)) { 1277 1277 if (args_idx != -1) { 1278 1278 /* $arg* requires BTF info */ 1279 1279 trace_probe_log_err(0, NOSUP_BTFARG);
+1
kernel/trace/trace_seq.c
··· 131 131 * trace_seq_vprintf - sequence printing of trace information 132 132 * @s: trace sequence descriptor 133 133 * @fmt: printf format string 134 + * @args: Arguments for the format string 134 135 * 135 136 * The tracer may use either sequence operations or its own 136 137 * copy to user routines. To simplify formatting of a trace
+1 -1
lib/genalloc.c
··· 895 895 896 896 of_property_read_string(np_pool, "label", &name); 897 897 if (!name) 898 - name = np_pool->name; 898 + name = of_node_full_name(np_pool); 899 899 } 900 900 if (pdev) 901 901 pool = gen_pool_get(&pdev->dev, name);
+5 -5
mm/damon/core-test.h
··· 320 320 321 321 static void damon_test_set_attrs(struct kunit *test) 322 322 { 323 - struct damon_ctx ctx; 323 + struct damon_ctx *c = damon_new_ctx(); 324 324 struct damon_attrs valid_attrs = { 325 325 .min_nr_regions = 10, .max_nr_regions = 1000, 326 326 .sample_interval = 5000, .aggr_interval = 100000,}; 327 327 struct damon_attrs invalid_attrs; 328 328 329 - KUNIT_EXPECT_EQ(test, damon_set_attrs(&ctx, &valid_attrs), 0); 329 + KUNIT_EXPECT_EQ(test, damon_set_attrs(c, &valid_attrs), 0); 330 330 331 331 invalid_attrs = valid_attrs; 332 332 invalid_attrs.min_nr_regions = 1; 333 - KUNIT_EXPECT_EQ(test, damon_set_attrs(&ctx, &invalid_attrs), -EINVAL); 333 + KUNIT_EXPECT_EQ(test, damon_set_attrs(c, &invalid_attrs), -EINVAL); 334 334 335 335 invalid_attrs = valid_attrs; 336 336 invalid_attrs.max_nr_regions = 9; 337 - KUNIT_EXPECT_EQ(test, damon_set_attrs(&ctx, &invalid_attrs), -EINVAL); 337 + KUNIT_EXPECT_EQ(test, damon_set_attrs(c, &invalid_attrs), -EINVAL); 338 338 339 339 invalid_attrs = valid_attrs; 340 340 invalid_attrs.aggr_interval = 4999; 341 - KUNIT_EXPECT_EQ(test, damon_set_attrs(&ctx, &invalid_attrs), -EINVAL); 341 + KUNIT_EXPECT_EQ(test, damon_set_attrs(c, &invalid_attrs), -EINVAL); 342 342 } 343 343 344 344 static struct kunit_case damon_test_cases[] = {
+1 -1
mm/memory-failure.c
··· 2487 2487 goto unlock_mutex; 2488 2488 } 2489 2489 2490 - if (!folio_test_hwpoison(folio)) { 2490 + if (!PageHWPoison(p)) { 2491 2491 unpoison_pr_info("Unpoison: Page was already unpoisoned %#lx\n", 2492 2492 pfn, &unpoison_rs); 2493 2493 goto unlock_mutex;
+16 -12
mm/memory.c
··· 5393 5393 if (!vma_is_anonymous(vma) && !vma_is_tcp(vma)) 5394 5394 goto inval; 5395 5395 5396 - /* find_mergeable_anon_vma uses adjacent vmas which are not locked */ 5397 - if (!vma->anon_vma && !vma_is_tcp(vma)) 5398 - goto inval; 5399 - 5400 5396 if (!vma_start_read(vma)) 5401 5397 goto inval; 5398 + 5399 + /* 5400 + * find_mergeable_anon_vma uses adjacent vmas which are not locked. 5401 + * This check must happen after vma_start_read(); otherwise, a 5402 + * concurrent mremap() with MREMAP_DONTUNMAP could dissociate the VMA 5403 + * from its anon_vma. 5404 + */ 5405 + if (unlikely(!vma->anon_vma && !vma_is_tcp(vma))) 5406 + goto inval_end_read; 5402 5407 5403 5408 /* 5404 5409 * Due to the possibility of userfault handler dropping mmap_lock, avoid 5405 5410 * it for now and fall back to page fault handling under mmap_lock. 5406 5411 */ 5407 - if (userfaultfd_armed(vma)) { 5408 - vma_end_read(vma); 5409 - goto inval; 5410 - } 5412 + if (userfaultfd_armed(vma)) 5413 + goto inval_end_read; 5411 5414 5412 5415 /* Check since vm_start/vm_end might change before we lock the VMA */ 5413 - if (unlikely(address < vma->vm_start || address >= vma->vm_end)) { 5414 - vma_end_read(vma); 5415 - goto inval; 5416 - } 5416 + if (unlikely(address < vma->vm_start || address >= vma->vm_end)) 5417 + goto inval_end_read; 5417 5418 5418 5419 /* Check if the VMA got isolated after we found it */ 5419 5420 if (vma->detached) { ··· 5426 5425 5427 5426 rcu_read_unlock(); 5428 5427 return vma; 5428 + 5429 + inval_end_read: 5430 + vma_end_read(vma); 5429 5431 inval: 5430 5432 rcu_read_unlock(); 5431 5433 count_vm_vma_lock_event(VMA_LOCK_ABORT);
+14 -1
mm/mempolicy.c
··· 384 384 VMA_ITERATOR(vmi, mm, 0); 385 385 386 386 mmap_write_lock(mm); 387 - for_each_vma(vmi, vma) 387 + for_each_vma(vmi, vma) { 388 + vma_start_write(vma); 388 389 mpol_rebind_policy(vma->vm_policy, new); 390 + } 389 391 mmap_write_unlock(mm); 390 392 } 391 393 ··· 769 767 int err; 770 768 struct mempolicy *old; 771 769 struct mempolicy *new; 770 + 771 + vma_assert_write_locked(vma); 772 772 773 773 pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n", 774 774 vma->vm_start, vma->vm_end, vma->vm_pgoff, ··· 1317 1313 if (err) 1318 1314 goto mpol_out; 1319 1315 1316 + /* 1317 + * Lock the VMAs before scanning for pages to migrate, to ensure we don't 1318 + * miss a concurrently inserted page. 1319 + */ 1320 + vma_iter_init(&vmi, mm, start); 1321 + for_each_vma_range(vmi, vma, end) 1322 + vma_start_write(vma); 1323 + 1320 1324 ret = queue_pages_range(mm, start, end, nmask, 1321 1325 flags | MPOL_MF_INVERT, &pagelist); 1322 1326 ··· 1550 1538 break; 1551 1539 } 1552 1540 1541 + vma_start_write(vma); 1553 1542 new->home_node = home_node; 1554 1543 err = mbind_range(&vmi, vma, &prev, start, end, new); 1555 1544 mpol_put(new);
+1
mm/mmap.c
··· 615 615 * anon pages imported. 616 616 */ 617 617 if (src->anon_vma && !dst->anon_vma) { 618 + vma_start_write(dst); 618 619 dst->anon_vma = src->anon_vma; 619 620 return anon_vma_clone(dst, src); 620 621 }
+4 -1
mm/pagewalk.c
··· 48 48 if (walk->no_vma) { 49 49 /* 50 50 * pte_offset_map() might apply user-specific validation. 51 + * Indeed, on x86_64 the pmd entries set up by init_espfix_ap() 52 + * fit its pmd_bad() check (_PAGE_NX set and _PAGE_RW clear), 53 + * and CONFIG_EFI_PGT_DUMP efi_mm goes so far as to walk them. 51 54 */ 52 - if (walk->mm == &init_mm) 55 + if (walk->mm == &init_mm || addr >= TASK_SIZE) 53 56 pte = pte_offset_kernel(pmd, addr); 54 57 else 55 58 pte = pte_offset_map(pmd, addr);
+6 -3
mm/shmem.c
··· 2796 2796 if (*ppos >= i_size_read(inode)) 2797 2797 break; 2798 2798 2799 - error = shmem_get_folio(inode, *ppos / PAGE_SIZE, &folio, SGP_READ); 2799 + error = shmem_get_folio(inode, *ppos / PAGE_SIZE, &folio, 2800 + SGP_READ); 2800 2801 if (error) { 2801 2802 if (error == -EINVAL) 2802 2803 error = 0; ··· 2806 2805 if (folio) { 2807 2806 folio_unlock(folio); 2808 2807 2809 - if (folio_test_hwpoison(folio)) { 2808 + if (folio_test_hwpoison(folio) || 2809 + (folio_test_large(folio) && 2810 + folio_test_has_hwpoisoned(folio))) { 2810 2811 error = -EIO; 2811 2812 break; 2812 2813 } ··· 2844 2841 folio_put(folio); 2845 2842 folio = NULL; 2846 2843 } else { 2847 - n = splice_zeropage_into_pipe(pipe, *ppos, len); 2844 + n = splice_zeropage_into_pipe(pipe, *ppos, part); 2848 2845 } 2849 2846 2850 2847 if (!n)
+12 -34
net/9p/client.c
··· 904 904 905 905 static int p9_client_version(struct p9_client *c) 906 906 { 907 - int err = 0; 907 + int err; 908 908 struct p9_req_t *req; 909 909 char *version = NULL; 910 910 int msize; ··· 975 975 struct p9_client *clnt; 976 976 char *client_id; 977 977 978 - err = 0; 979 978 clnt = kmalloc(sizeof(*clnt), GFP_KERNEL); 980 979 if (!clnt) 981 980 return ERR_PTR(-ENOMEM); ··· 1093 1094 const char *uname, kuid_t n_uname, 1094 1095 const char *aname) 1095 1096 { 1096 - int err = 0; 1097 + int err; 1097 1098 struct p9_req_t *req; 1098 1099 struct p9_fid *fid; 1099 1100 struct p9_qid qid; ··· 1146 1147 struct p9_req_t *req; 1147 1148 u16 nwqids, count; 1148 1149 1149 - err = 0; 1150 1150 wqids = NULL; 1151 1151 clnt = oldfid->clnt; 1152 1152 if (clone) { ··· 1222 1224 clnt = fid->clnt; 1223 1225 p9_debug(P9_DEBUG_9P, ">>> %s fid %d mode %d\n", 1224 1226 p9_is_proto_dotl(clnt) ? "TLOPEN" : "TOPEN", fid->fid, mode); 1225 - err = 0; 1226 1227 1227 1228 if (fid->mode != -1) 1228 1229 return -EINVAL; ··· 1259 1262 int p9_client_create_dotl(struct p9_fid *ofid, const char *name, u32 flags, 1260 1263 u32 mode, kgid_t gid, struct p9_qid *qid) 1261 1264 { 1262 - int err = 0; 1265 + int err; 1263 1266 struct p9_client *clnt; 1264 1267 struct p9_req_t *req; 1265 1268 int iounit; ··· 1311 1314 1312 1315 p9_debug(P9_DEBUG_9P, ">>> TCREATE fid %d name %s perm %d mode %d\n", 1313 1316 fid->fid, name, perm, mode); 1314 - err = 0; 1315 1317 clnt = fid->clnt; 1316 1318 1317 1319 if (fid->mode != -1) ··· 1346 1350 int p9_client_symlink(struct p9_fid *dfid, const char *name, 1347 1351 const char *symtgt, kgid_t gid, struct p9_qid *qid) 1348 1352 { 1349 - int err = 0; 1353 + int err; 1350 1354 struct p9_client *clnt; 1351 1355 struct p9_req_t *req; 1352 1356 ··· 1398 1402 1399 1403 int p9_client_fsync(struct p9_fid *fid, int datasync) 1400 1404 { 1401 - int err; 1405 + int err = 0; 1402 1406 struct p9_client *clnt; 1403 1407 struct p9_req_t *req; 1404 1408 1405 1409 p9_debug(P9_DEBUG_9P, ">>> TFSYNC fid %d datasync:%d\n", 1406 1410 fid->fid, datasync); 1407 - err = 0; 1408 1411 clnt = fid->clnt; 1409 1412 1410 1413 req = p9_client_rpc(clnt, P9_TFSYNC, "dd", fid->fid, datasync); ··· 1423 1428 1424 1429 int p9_client_clunk(struct p9_fid *fid) 1425 1430 { 1426 - int err; 1431 + int err = 0; 1427 1432 struct p9_client *clnt; 1428 1433 struct p9_req_t *req; 1429 1434 int retries = 0; ··· 1431 1436 again: 1432 1437 p9_debug(P9_DEBUG_9P, ">>> TCLUNK fid %d (try %d)\n", 1433 1438 fid->fid, retries); 1434 - err = 0; 1435 1439 clnt = fid->clnt; 1436 1440 1437 1441 req = p9_client_rpc(clnt, P9_TCLUNK, "d", fid->fid); ··· 1459 1465 1460 1466 int p9_client_remove(struct p9_fid *fid) 1461 1467 { 1462 - int err; 1468 + int err = 0; 1463 1469 struct p9_client *clnt; 1464 1470 struct p9_req_t *req; 1465 1471 1466 1472 p9_debug(P9_DEBUG_9P, ">>> TREMOVE fid %d\n", fid->fid); 1467 - err = 0; 1468 1473 clnt = fid->clnt; 1469 1474 1470 1475 req = p9_client_rpc(clnt, P9_TREMOVE, "d", fid->fid); ··· 1673 1680 if (!ret) 1674 1681 return ERR_PTR(-ENOMEM); 1675 1682 1676 - err = 0; 1677 1683 clnt = fid->clnt; 1678 1684 1679 1685 req = p9_client_rpc(clnt, P9_TSTAT, "d", fid->fid); ··· 1725 1733 if (!ret) 1726 1734 return ERR_PTR(-ENOMEM); 1727 1735 1728 - err = 0; 1729 1736 clnt = fid->clnt; 1730 1737 1731 1738 req = p9_client_rpc(clnt, P9_TGETATTR, "dq", fid->fid, request_mask); ··· 1803 1812 1804 1813 int p9_client_wstat(struct p9_fid *fid, struct p9_wstat *wst) 1805 1814 { 1806 - int err; 1815 + int err = 0; 1807 1816 struct p9_req_t *req; 1808 1817 struct p9_client *clnt; 1809 1818 1810 - err = 0; 1811 1819 clnt = fid->clnt; 1812 1820 wst->size = p9_client_statsize(wst, clnt->proto_version); 1813 1821 p9_debug(P9_DEBUG_9P, ">>> TWSTAT fid %d\n", ··· 1841 1851 1842 1852 int p9_client_setattr(struct p9_fid *fid, struct p9_iattr_dotl *p9attr) 1843 1853 { 1844 - int err; 1854 + int err = 0; 1845 1855 struct p9_req_t *req; 1846 1856 struct p9_client *clnt; 1847 1857 1848 - err = 0; 1849 1858 clnt = fid->clnt; 1850 1859 p9_debug(P9_DEBUG_9P, ">>> TSETATTR fid %d\n", fid->fid); 1851 1860 p9_debug(P9_DEBUG_9P, " valid=%x mode=%x uid=%d gid=%d size=%lld\n", ··· 1876 1887 struct p9_req_t *req; 1877 1888 struct p9_client *clnt; 1878 1889 1879 - err = 0; 1880 1890 clnt = fid->clnt; 1881 1891 1882 1892 p9_debug(P9_DEBUG_9P, ">>> TSTATFS fid %d\n", fid->fid); ··· 1909 1921 int p9_client_rename(struct p9_fid *fid, 1910 1922 struct p9_fid *newdirfid, const char *name) 1911 1923 { 1912 - int err; 1924 + int err = 0; 1913 1925 struct p9_req_t *req; 1914 1926 struct p9_client *clnt; 1915 1927 1916 - err = 0; 1917 1928 clnt = fid->clnt; 1918 1929 1919 1930 p9_debug(P9_DEBUG_9P, ">>> TRENAME fid %d newdirfid %d name %s\n", ··· 1936 1949 int p9_client_renameat(struct p9_fid *olddirfid, const char *old_name, 1937 1950 struct p9_fid *newdirfid, const char *new_name) 1938 1951 { 1939 - int err; 1952 + int err = 0; 1940 1953 struct p9_req_t *req; 1941 1954 struct p9_client *clnt; 1942 1955 1943 - err = 0; 1944 1956 clnt = olddirfid->clnt; 1945 1957 1946 1958 p9_debug(P9_DEBUG_9P, ··· 1972 1986 struct p9_client *clnt; 1973 1987 struct p9_fid *attr_fid; 1974 1988 1975 - err = 0; 1976 1989 clnt = file_fid->clnt; 1977 1990 attr_fid = p9_fid_create(clnt); 1978 1991 if (!attr_fid) { ··· 2012 2027 int p9_client_xattrcreate(struct p9_fid *fid, const char *name, 2013 2028 u64 attr_size, int flags) 2014 2029 { 2015 - int err; 2030 + int err = 0; 2016 2031 struct p9_req_t *req; 2017 2032 struct p9_client *clnt; 2018 2033 2019 2034 p9_debug(P9_DEBUG_9P, 2020 2035 ">>> TXATTRCREATE fid %d name %s size %llu flag %d\n", 2021 2036 fid->fid, name, attr_size, flags); 2022 - err = 0; 2023 2037 clnt = fid->clnt; 2024 2038 req = p9_client_rpc(clnt, P9_TXATTRCREATE, "dsqd", 2025 2039 fid->fid, name, attr_size, flags); ··· 2047 2063 p9_debug(P9_DEBUG_9P, ">>> TREADDIR fid %d offset %llu count %d\n", 2048 2064 fid->fid, offset, count); 2049 2065 2050 - err = 0; 2051 2066 clnt = fid->clnt; 2052 2067 2053 2068 rsize = fid->iounit; ··· 2105 2122 struct p9_client *clnt; 2106 2123 struct p9_req_t *req; 2107 2124 2108 - err = 0; 2109 2125 clnt = fid->clnt; 2110 2126 p9_debug(P9_DEBUG_9P, 2111 2127 ">>> TMKNOD fid %d name %s mode %d major %d minor %d\n", ··· 2135 2153 struct p9_client *clnt; 2136 2154 struct p9_req_t *req; 2137 2155 2138 - err = 0; 2139 2156 clnt = fid->clnt; 2140 2157 p9_debug(P9_DEBUG_9P, ">>> TMKDIR fid %d name %s mode %d gid %d\n", 2141 2158 fid->fid, name, mode, from_kgid(&init_user_ns, gid)); ··· 2163 2182 struct p9_client *clnt; 2164 2183 struct p9_req_t *req; 2165 2184 2166 - err = 0; 2167 2185 clnt = fid->clnt; 2168 2186 p9_debug(P9_DEBUG_9P, 2169 2187 ">>> TLOCK fid %d type %i flags %d start %lld length %lld proc_id %d client_id %s\n", ··· 2194 2214 struct p9_client *clnt; 2195 2215 struct p9_req_t *req; 2196 2216 2197 - err = 0; 2198 2217 clnt = fid->clnt; 2199 2218 p9_debug(P9_DEBUG_9P, 2200 2219 ">>> TGETLOCK fid %d, type %i start %lld length %lld proc_id %d client_id %s\n", ··· 2230 2251 struct p9_client *clnt; 2231 2252 struct p9_req_t *req; 2232 2253 2233 - err = 0; 2234 2254 clnt = fid->clnt; 2235 2255 p9_debug(P9_DEBUG_9P, ">>> TREADLINK fid %d\n", fid->fid); 2236 2256
+4 -4
net/9p/trans_virtio.c
··· 384 384 void *to = req->rc.sdata + in_hdr_len; 385 385 386 386 // Fits entirely into the static data? Nothing to do. 387 - if (req->rc.size < in_hdr_len) 387 + if (req->rc.size < in_hdr_len || !pages) 388 388 return; 389 389 390 390 // Really long error message? Tough, truncate the reply. Might get ··· 428 428 struct page **in_pages = NULL, **out_pages = NULL; 429 429 struct virtio_chan *chan = client->trans; 430 430 struct scatterlist *sgs[4]; 431 - size_t offs; 431 + size_t offs = 0; 432 432 int need_drop = 0; 433 433 int kicked = 0; 434 434 ··· 501 501 502 502 if (in_pages) { 503 503 sgs[out_sgs + in_sgs++] = chan->sg + out + in; 504 - in += pack_sg_list_p(chan->sg, out + in, VIRTQUEUE_NUM, 505 - in_pages, in_nr_pages, offs, inlen); 504 + pack_sg_list_p(chan->sg, out + in, VIRTQUEUE_NUM, 505 + in_pages, in_nr_pages, offs, inlen); 506 506 } 507 507 508 508 BUG_ON(out_sgs + in_sgs > ARRAY_SIZE(sgs));
+3 -2
net/can/raw.c
··· 386 386 list_del(&ro->notifier); 387 387 spin_unlock(&raw_notifier_lock); 388 388 389 + rtnl_lock(); 389 390 lock_sock(sk); 390 391 391 - rtnl_lock(); 392 392 /* remove current filters & unregister */ 393 393 if (ro->bound) { 394 394 if (ro->dev) ··· 405 405 ro->dev = NULL; 406 406 ro->count = 0; 407 407 free_percpu(ro->uniq); 408 - rtnl_unlock(); 409 408 410 409 sock_orphan(sk); 411 410 sock->sk = NULL; 412 411 413 412 release_sock(sk); 413 + rtnl_unlock(); 414 + 414 415 sock_put(sk); 415 416 416 417 return 0;
+1
net/ceph/messenger.c
··· 1123 1123 return true; 1124 1124 } 1125 1125 } 1126 + EXPORT_SYMBOL(ceph_addr_is_blank); 1126 1127 1127 1128 int ceph_addr_port(const struct ceph_entity_addr *addr) 1128 1129 {
+10 -4
net/ipv6/addrconf.c
··· 2561 2561 ipv6_ifa_notify(0, ift); 2562 2562 } 2563 2563 2564 - if ((create || list_empty(&idev->tempaddr_list)) && 2565 - idev->cnf.use_tempaddr > 0) { 2564 + /* Also create a temporary address if it's enabled but no temporary 2565 + * address currently exists. 2566 + * However, we get called with valid_lft == 0, prefered_lft == 0, create == false 2567 + * as part of cleanup (ie. deleting the mngtmpaddr). 2568 + * We don't want that to result in creating a new temporary ip address. 2569 + */ 2570 + if (list_empty(&idev->tempaddr_list) && (valid_lft || prefered_lft)) 2571 + create = true; 2572 + 2573 + if (create && idev->cnf.use_tempaddr > 0) { 2566 2574 /* When a new public address is created as described 2567 2575 * in [ADDRCONF], also create a new temporary address. 2568 - * Also create a temporary address if it's enabled but 2569 - * no temporary address currently exists. 2570 2576 */ 2571 2577 read_unlock_bh(&idev->lock); 2572 2578 ipv6_create_tempaddr(ifp, false);
+1 -2
net/mptcp/protocol.c
··· 3723 3723 if (!err) { 3724 3724 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); 3725 3725 mptcp_copy_inaddrs(sk, ssock->sk); 3726 + mptcp_event_pm_listener(ssock->sk, MPTCP_EVENT_LISTENER_CREATED); 3726 3727 } 3727 - 3728 - mptcp_event_pm_listener(ssock->sk, MPTCP_EVENT_LISTENER_CREATED); 3729 3728 3730 3729 unlock: 3731 3730 release_sock(sk);
+3 -2
net/netfilter/nf_tables_api.c
··· 3811 3811 NL_SET_BAD_ATTR(extack, nla[NFTA_RULE_CHAIN]); 3812 3812 return PTR_ERR(chain); 3813 3813 } 3814 - if (nft_chain_is_bound(chain)) 3815 - return -EOPNOTSUPP; 3816 3814 3817 3815 } else if (nla[NFTA_RULE_CHAIN_ID]) { 3818 3816 chain = nft_chain_lookup_byid(net, table, nla[NFTA_RULE_CHAIN_ID], ··· 3822 3824 } else { 3823 3825 return -EINVAL; 3824 3826 } 3827 + 3828 + if (nft_chain_is_bound(chain)) 3829 + return -EOPNOTSUPP; 3825 3830 3826 3831 if (nla[NFTA_RULE_HANDLE]) { 3827 3832 handle = be64_to_cpu(nla_get_be64(nla[NFTA_RULE_HANDLE]));
+18 -9
net/netfilter/nft_immediate.c
··· 125 125 return nft_data_hold(&priv->data, nft_dreg_to_type(priv->dreg)); 126 126 } 127 127 128 + static void nft_immediate_chain_deactivate(const struct nft_ctx *ctx, 129 + struct nft_chain *chain, 130 + enum nft_trans_phase phase) 131 + { 132 + struct nft_ctx chain_ctx; 133 + struct nft_rule *rule; 134 + 135 + chain_ctx = *ctx; 136 + chain_ctx.chain = chain; 137 + 138 + list_for_each_entry(rule, &chain->rules, list) 139 + nft_rule_expr_deactivate(&chain_ctx, rule, phase); 140 + } 141 + 128 142 static void nft_immediate_deactivate(const struct nft_ctx *ctx, 129 143 const struct nft_expr *expr, 130 144 enum nft_trans_phase phase) 131 145 { 132 146 const struct nft_immediate_expr *priv = nft_expr_priv(expr); 133 147 const struct nft_data *data = &priv->data; 134 - struct nft_ctx chain_ctx; 135 148 struct nft_chain *chain; 136 - struct nft_rule *rule; 137 149 138 150 if (priv->dreg == NFT_REG_VERDICT) { 139 151 switch (data->verdict.code) { ··· 155 143 if (!nft_chain_binding(chain)) 156 144 break; 157 145 158 - chain_ctx = *ctx; 159 - chain_ctx.chain = chain; 160 - 161 - list_for_each_entry(rule, &chain->rules, list) 162 - nft_rule_expr_deactivate(&chain_ctx, rule, phase); 163 - 164 146 switch (phase) { 165 147 case NFT_TRANS_PREPARE_ERROR: 166 148 nf_tables_unbind_chain(ctx, chain); 167 - fallthrough; 149 + nft_deactivate_next(ctx->net, chain); 150 + break; 168 151 case NFT_TRANS_PREPARE: 152 + nft_immediate_chain_deactivate(ctx, chain, phase); 169 153 nft_deactivate_next(ctx->net, chain); 170 154 break; 171 155 default: 156 + nft_immediate_chain_deactivate(ctx, chain, phase); 172 157 nft_chain_del(chain); 173 158 chain->bound = false; 174 159 nft_use_dec(&chain->table->use);
+14 -6
net/netfilter/nft_set_rbtree.c
··· 217 217 218 218 static int nft_rbtree_gc_elem(const struct nft_set *__set, 219 219 struct nft_rbtree *priv, 220 - struct nft_rbtree_elem *rbe) 220 + struct nft_rbtree_elem *rbe, 221 + u8 genmask) 221 222 { 222 223 struct nft_set *set = (struct nft_set *)__set; 223 224 struct rb_node *prev = rb_prev(&rbe->node); 224 - struct nft_rbtree_elem *rbe_prev = NULL; 225 + struct nft_rbtree_elem *rbe_prev; 225 226 struct nft_set_gc_batch *gcb; 226 227 227 228 gcb = nft_set_gc_batch_check(set, NULL, GFP_ATOMIC); 228 229 if (!gcb) 229 230 return -ENOMEM; 230 231 231 - /* search for expired end interval coming before this element. */ 232 + /* search for end interval coming before this element. 233 + * end intervals don't carry a timeout extension, they 234 + * are coupled with the interval start element. 235 + */ 232 236 while (prev) { 233 237 rbe_prev = rb_entry(prev, struct nft_rbtree_elem, node); 234 - if (nft_rbtree_interval_end(rbe_prev)) 238 + if (nft_rbtree_interval_end(rbe_prev) && 239 + nft_set_elem_active(&rbe_prev->ext, genmask)) 235 240 break; 236 241 237 242 prev = rb_prev(prev); 238 243 } 239 244 240 - if (rbe_prev) { 245 + if (prev) { 246 + rbe_prev = rb_entry(prev, struct nft_rbtree_elem, node); 247 + 241 248 rb_erase(&rbe_prev->node, &priv->root); 242 249 atomic_dec(&set->nelems); 250 + nft_set_gc_batch_add(gcb, rbe_prev); 243 251 } 244 252 245 253 rb_erase(&rbe->node, &priv->root); ··· 329 321 330 322 /* perform garbage collection to avoid bogus overlap reports. */ 331 323 if (nft_set_elem_expired(&rbe->ext)) { 332 - err = nft_rbtree_gc_elem(set, priv, rbe); 324 + err = nft_rbtree_gc_elem(set, priv, rbe, genmask); 333 325 if (err < 0) 334 326 return err; 335 327
+1 -1
net/packet/af_packet.c
··· 3601 3601 if (dev) { 3602 3602 sll->sll_hatype = dev->type; 3603 3603 sll->sll_halen = dev->addr_len; 3604 - memcpy(sll->sll_addr, dev->dev_addr, dev->addr_len); 3604 + memcpy(sll->sll_addr_flex, dev->dev_addr, dev->addr_len); 3605 3605 } else { 3606 3606 sll->sll_hatype = 0; /* Bad: we have no ARPHRD_UNSPEC */ 3607 3607 sll->sll_halen = 0;
+14
net/sched/sch_mqprio.c
··· 290 290 "Attribute type expected to be TCA_MQPRIO_MIN_RATE64"); 291 291 return -EINVAL; 292 292 } 293 + 294 + if (nla_len(attr) != sizeof(u64)) { 295 + NL_SET_ERR_MSG_ATTR(extack, attr, 296 + "Attribute TCA_MQPRIO_MIN_RATE64 expected to have 8 bytes length"); 297 + return -EINVAL; 298 + } 299 + 293 300 if (i >= qopt->num_tc) 294 301 break; 295 302 priv->min_rate[i] = nla_get_u64(attr); ··· 319 312 "Attribute type expected to be TCA_MQPRIO_MAX_RATE64"); 320 313 return -EINVAL; 321 314 } 315 + 316 + if (nla_len(attr) != sizeof(u64)) { 317 + NL_SET_ERR_MSG_ATTR(extack, attr, 318 + "Attribute TCA_MQPRIO_MAX_RATE64 expected to have 8 bytes length"); 319 + return -EINVAL; 320 + } 321 + 322 322 if (i >= qopt->num_tc) 323 323 break; 324 324 priv->max_rate[i] = nla_get_u64(attr);
+2 -1
net/tipc/crypto.c
··· 1960 1960 1961 1961 skb_reset_network_header(*skb); 1962 1962 skb_pull(*skb, tipc_ehdr_size(ehdr)); 1963 - pskb_trim(*skb, (*skb)->len - aead->authsize); 1963 + if (pskb_trim(*skb, (*skb)->len - aead->authsize)) 1964 + goto free_skb; 1964 1965 1965 1966 /* Validate TIPCv2 message */ 1966 1967 if (unlikely(!tipc_msg_validate(skb))) {
+1 -1
net/tipc/node.c
··· 583 583 n->capabilities, &n->bc_entry.inputq1, 584 584 &n->bc_entry.namedq, snd_l, &n->bc_entry.link)) { 585 585 pr_warn("Broadcast rcv link creation failed, no memory\n"); 586 - kfree(n); 586 + tipc_node_put(n); 587 587 n = NULL; 588 588 goto exit; 589 589 }
+16 -7
net/unix/af_unix.c
··· 289 289 return 0; 290 290 } 291 291 292 - static void unix_mkname_bsd(struct sockaddr_un *sunaddr, int addr_len) 292 + static int unix_mkname_bsd(struct sockaddr_un *sunaddr, int addr_len) 293 293 { 294 + struct sockaddr_storage *addr = (struct sockaddr_storage *)sunaddr; 295 + short offset = offsetof(struct sockaddr_storage, __data); 296 + 297 + BUILD_BUG_ON(offset != offsetof(struct sockaddr_un, sun_path)); 298 + 294 299 /* This may look like an off by one error but it is a bit more 295 300 * subtle. 108 is the longest valid AF_UNIX path for a binding. 296 301 * sun_path[108] doesn't as such exist. However in kernel space 297 302 * we are guaranteed that it is a valid memory location in our 298 303 * kernel address buffer because syscall functions always pass 299 304 * a pointer of struct sockaddr_storage which has a bigger buffer 300 - * than 108. 305 + * than 108. Also, we must terminate sun_path for strlen() in 306 + * getname_kernel(). 301 307 */ 302 - ((char *)sunaddr)[addr_len] = 0; 308 + addr->__data[addr_len - offset] = 0; 309 + 310 + /* Don't pass sunaddr->sun_path to strlen(). Otherwise, 108 will 311 + * cause panic if CONFIG_FORTIFY_SOURCE=y. Let __fortify_strlen() 312 + * know the actual buffer. 313 + */ 314 + return strlen(addr->__data) + offset + 1; 303 315 } 304 316 305 317 static void __unix_remove_socket(struct sock *sk) ··· 1220 1208 struct path parent; 1221 1209 int err; 1222 1210 1223 - unix_mkname_bsd(sunaddr, addr_len); 1224 - addr_len = strlen(sunaddr->sun_path) + 1225 - offsetof(struct sockaddr_un, sun_path) + 1; 1226 - 1211 + addr_len = unix_mkname_bsd(sunaddr, addr_len); 1227 1212 addr = unix_create_addr(sunaddr, addr_len); 1228 1213 if (!addr) 1229 1214 return -ENOMEM;
-68
scripts/coccinelle/api/debugfs/debugfs_simple_attr.cocci
··· 1 - // SPDX-License-Identifier: GPL-2.0 2 - /// Use DEFINE_DEBUGFS_ATTRIBUTE rather than DEFINE_SIMPLE_ATTRIBUTE 3 - /// for debugfs files. 4 - /// 5 - //# Rationale: DEFINE_SIMPLE_ATTRIBUTE + debugfs_create_file() 6 - //# imposes some significant overhead as compared to 7 - //# DEFINE_DEBUGFS_ATTRIBUTE + debugfs_create_file_unsafe(). 8 - // 9 - // Copyright (C): 2016 Nicolai Stange 10 - // Options: --no-includes 11 - // 12 - 13 - virtual context 14 - virtual patch 15 - virtual org 16 - virtual report 17 - 18 - @dsa@ 19 - declarer name DEFINE_SIMPLE_ATTRIBUTE; 20 - identifier dsa_fops; 21 - expression dsa_get, dsa_set, dsa_fmt; 22 - position p; 23 - @@ 24 - DEFINE_SIMPLE_ATTRIBUTE@p(dsa_fops, dsa_get, dsa_set, dsa_fmt); 25 - 26 - @dcf@ 27 - expression name, mode, parent, data; 28 - identifier dsa.dsa_fops; 29 - @@ 30 - debugfs_create_file(name, mode, parent, data, &dsa_fops) 31 - 32 - 33 - @context_dsa depends on context && dcf@ 34 - declarer name DEFINE_DEBUGFS_ATTRIBUTE; 35 - identifier dsa.dsa_fops; 36 - expression dsa.dsa_get, dsa.dsa_set, dsa.dsa_fmt; 37 - @@ 38 - * DEFINE_SIMPLE_ATTRIBUTE(dsa_fops, dsa_get, dsa_set, dsa_fmt); 39 - 40 - 41 - @patch_dcf depends on patch expression@ 42 - expression name, mode, parent, data; 43 - identifier dsa.dsa_fops; 44 - @@ 45 - - debugfs_create_file(name, mode, parent, data, &dsa_fops) 46 - + debugfs_create_file_unsafe(name, mode, parent, data, &dsa_fops) 47 - 48 - @patch_dsa depends on patch_dcf && patch@ 49 - identifier dsa.dsa_fops; 50 - expression dsa.dsa_get, dsa.dsa_set, dsa.dsa_fmt; 51 - @@ 52 - - DEFINE_SIMPLE_ATTRIBUTE(dsa_fops, dsa_get, dsa_set, dsa_fmt); 53 - + DEFINE_DEBUGFS_ATTRIBUTE(dsa_fops, dsa_get, dsa_set, dsa_fmt); 54 - 55 - 56 - @script:python depends on org && dcf@ 57 - fops << dsa.dsa_fops; 58 - p << dsa.p; 59 - @@ 60 - msg="%s should be defined with DEFINE_DEBUGFS_ATTRIBUTE" % (fops) 61 - coccilib.org.print_todo(p[0], msg) 62 - 63 - @script:python depends on report && dcf@ 64 - fops << dsa.dsa_fops; 65 - p << dsa.p; 66 - @@ 67 - msg="WARNING: %s should be defined with DEFINE_DEBUGFS_ATTRIBUTE" % (fops) 68 - coccilib.report.print_report(p[0], msg)
-1
scripts/spelling.txt
··· 1541 1541 temorary||temporary 1542 1542 temproarily||temporarily 1543 1543 temperture||temperature 1544 - thead||thread 1545 1544 theads||threads 1546 1545 therfore||therefore 1547 1546 thier||their
+8 -3
security/keys/keyctl.c
··· 980 980 ret = -EACCES; 981 981 down_write(&key->sem); 982 982 983 - if (!capable(CAP_SYS_ADMIN)) { 983 + { 984 + bool is_privileged_op = false; 985 + 984 986 /* only the sysadmin can chown a key to some other UID */ 985 987 if (user != (uid_t) -1 && !uid_eq(key->uid, uid)) 986 - goto error_put; 988 + is_privileged_op = true; 987 989 988 990 /* only the sysadmin can set the key's GID to a group other 989 991 * than one of those that the current process subscribes to */ 990 992 if (group != (gid_t) -1 && !gid_eq(gid, key->gid) && !in_group_p(gid)) 993 + is_privileged_op = true; 994 + 995 + if (is_privileged_op && !capable(CAP_SYS_ADMIN)) 991 996 goto error_put; 992 997 } 993 998 ··· 1093 1088 down_write(&key->sem); 1094 1089 1095 1090 /* if we're not the sysadmin, we can only change a key that we own */ 1096 - if (capable(CAP_SYS_ADMIN) || uid_eq(key->uid, current_fsuid())) { 1091 + if (uid_eq(key->uid, current_fsuid()) || capable(CAP_SYS_ADMIN)) { 1097 1092 key->perm = perm; 1098 1093 notify_key(key, NOTIFY_KEY_SETATTR, 0); 1099 1094 ret = 0;
+1 -2
sound/core/seq/seq_ump_client.c
··· 298 298 } 299 299 300 300 list_for_each_entry(fb, &client->ump->block_list, list) { 301 - if (fb->info.first_group < 0 || 302 - fb->info.first_group + fb->info.num_groups > SNDRV_UMP_MAX_GROUPS) 301 + if (fb->info.first_group + fb->info.num_groups > SNDRV_UMP_MAX_GROUPS) 303 302 break; 304 303 group = &client->groups[fb->info.first_group]; 305 304 for (i = 0; i < fb->info.num_groups; i++, group++) {
+2
sound/pci/hda/patch_realtek.c
··· 9541 9541 SND_PCI_QUIRK(0x103c, 0x880d, "HP EliteBook 830 G8 Notebook PC", ALC285_FIXUP_HP_GPIO_LED), 9542 9542 SND_PCI_QUIRK(0x103c, 0x8811, "HP Spectre x360 15-eb1xxx", ALC285_FIXUP_HP_SPECTRE_X360_EB1), 9543 9543 SND_PCI_QUIRK(0x103c, 0x8812, "HP Spectre x360 15-eb1xxx", ALC285_FIXUP_HP_SPECTRE_X360_EB1), 9544 + SND_PCI_QUIRK(0x103c, 0x881d, "HP 250 G8 Notebook PC", ALC236_FIXUP_HP_MUTE_LED_COEFBIT2), 9544 9545 SND_PCI_QUIRK(0x103c, 0x8846, "HP EliteBook 850 G8 Notebook PC", ALC285_FIXUP_HP_GPIO_LED), 9545 9546 SND_PCI_QUIRK(0x103c, 0x8847, "HP EliteBook x360 830 G8 Notebook PC", ALC285_FIXUP_HP_GPIO_LED), 9546 9547 SND_PCI_QUIRK(0x103c, 0x884b, "HP EliteBook 840 Aero G8 Notebook PC", ALC285_FIXUP_HP_GPIO_LED), ··· 9672 9671 SND_PCI_QUIRK(0x1043, 0x1c9f, "ASUS G614JI", ALC285_FIXUP_ASUS_HEADSET_MIC), 9673 9672 SND_PCI_QUIRK(0x1043, 0x1caf, "ASUS G634JYR/JZR", ALC285_FIXUP_ASUS_SPI_REAR_SPEAKERS), 9674 9673 SND_PCI_QUIRK(0x1043, 0x1ccd, "ASUS X555UB", ALC256_FIXUP_ASUS_MIC), 9674 + SND_PCI_QUIRK(0x1043, 0x1d1f, "ASUS ROG Strix G17 2023 (G713PV)", ALC287_FIXUP_CS35L41_I2C_2), 9675 9675 SND_PCI_QUIRK(0x1043, 0x1d42, "ASUS Zephyrus G14 2022", ALC289_FIXUP_ASUS_GA401), 9676 9676 SND_PCI_QUIRK(0x1043, 0x1d4e, "ASUS TM420", ALC256_FIXUP_ASUS_HPE), 9677 9677 SND_PCI_QUIRK(0x1043, 0x1e02, "ASUS UX3402", ALC245_FIXUP_CS35L41_SPI_2),
+4 -1
sound/soc/atmel/atmel-i2s.c
··· 163 163 164 164 #define I2S_MCK_12M288 12288000UL 165 165 #define I2S_MCK_11M2896 11289600UL 166 + #define I2S_MCK_6M144 6144000UL 166 167 167 168 /* mck = (32 * (imckfs+1) / (imckdiv+1)) * fs */ 168 169 static const struct atmel_i2s_gck_param gck_params[] = { 170 + /* mck = 6.144Mhz */ 171 + { 8000, I2S_MCK_6M144, 1, 47}, /* mck = 768 fs */ 172 + 169 173 /* mck = 12.288MHz */ 170 - { 8000, I2S_MCK_12M288, 0, 47}, /* mck = 1536 fs */ 171 174 { 16000, I2S_MCK_12M288, 1, 47}, /* mck = 768 fs */ 172 175 { 24000, I2S_MCK_12M288, 3, 63}, /* mck = 512 fs */ 173 176 { 32000, I2S_MCK_12M288, 3, 47}, /* mck = 384 fs */
+9 -3
sound/soc/codecs/da7219-aad.c
··· 361 361 struct da7219_priv *da7219 = snd_soc_component_get_drvdata(component); 362 362 u8 events[DA7219_AAD_IRQ_REG_MAX]; 363 363 u8 statusa; 364 - int i, report = 0, mask = 0; 364 + int i, ret, report = 0, mask = 0; 365 365 366 366 /* Read current IRQ events */ 367 - regmap_bulk_read(da7219->regmap, DA7219_ACCDET_IRQ_EVENT_A, 368 - events, DA7219_AAD_IRQ_REG_MAX); 367 + ret = regmap_bulk_read(da7219->regmap, DA7219_ACCDET_IRQ_EVENT_A, 368 + events, DA7219_AAD_IRQ_REG_MAX); 369 + if (ret) { 370 + dev_warn_ratelimited(component->dev, "Failed to read IRQ events: %d\n", ret); 371 + return IRQ_NONE; 372 + } 369 373 370 374 if (!events[DA7219_AAD_IRQ_REG_A] && !events[DA7219_AAD_IRQ_REG_B]) 371 375 return IRQ_NONE; ··· 948 944 } 949 945 } 950 946 } 947 + 948 + synchronize_irq(da7219_aad->irq); 951 949 } 952 950 953 951 void da7219_aad_resume(struct snd_soc_component *component)
+1 -1
sound/soc/codecs/es8316.c
··· 153 153 "dmic data at high level", 154 154 "dmic data at low level", 155 155 }; 156 - static const unsigned int es8316_dmic_values[] = { 0, 1, 2 }; 156 + static const unsigned int es8316_dmic_values[] = { 0, 2, 3 }; 157 157 static const struct soc_enum es8316_dmic_src_enum = 158 158 SOC_VALUE_ENUM_SINGLE(ES8316_ADC_DMIC, 0, 3, 159 159 ARRAY_SIZE(es8316_dmic_txt),
+41
sound/soc/codecs/nau8821.c
··· 10 10 #include <linux/acpi.h> 11 11 #include <linux/clk.h> 12 12 #include <linux/delay.h> 13 + #include <linux/dmi.h> 13 14 #include <linux/init.h> 14 15 #include <linux/i2c.h> 15 16 #include <linux/module.h> ··· 25 24 #include <sound/soc.h> 26 25 #include <sound/tlv.h> 27 26 #include "nau8821.h" 27 + 28 + #define NAU8821_JD_ACTIVE_HIGH BIT(0) 29 + 30 + static int nau8821_quirk; 31 + static int quirk_override = -1; 32 + module_param_named(quirk, quirk_override, uint, 0444); 33 + MODULE_PARM_DESC(quirk, "Board-specific quirk override"); 28 34 29 35 #define NAU_FREF_MAX 13500000 30 36 #define NAU_FVCO_MAX 100000000 ··· 1800 1792 return 0; 1801 1793 } 1802 1794 1795 + /* Please keep this list alphabetically sorted */ 1796 + static const struct dmi_system_id nau8821_quirk_table[] = { 1797 + { 1798 + /* Positivo CW14Q01P-V2 */ 1799 + .matches = { 1800 + DMI_MATCH(DMI_SYS_VENDOR, "Positivo Tecnologia SA"), 1801 + DMI_MATCH(DMI_BOARD_NAME, "CW14Q01P-V2"), 1802 + }, 1803 + .driver_data = (void *)(NAU8821_JD_ACTIVE_HIGH), 1804 + }, 1805 + {} 1806 + }; 1807 + 1808 + static void nau8821_check_quirks(void) 1809 + { 1810 + const struct dmi_system_id *dmi_id; 1811 + 1812 + if (quirk_override != -1) { 1813 + nau8821_quirk = quirk_override; 1814 + return; 1815 + } 1816 + 1817 + dmi_id = dmi_first_match(nau8821_quirk_table); 1818 + if (dmi_id) 1819 + nau8821_quirk = (unsigned long)dmi_id->driver_data; 1820 + } 1821 + 1803 1822 static int nau8821_i2c_probe(struct i2c_client *i2c) 1804 1823 { 1805 1824 struct device *dev = &i2c->dev; ··· 1847 1812 1848 1813 nau8821->dev = dev; 1849 1814 nau8821->irq = i2c->irq; 1815 + 1816 + nau8821_check_quirks(); 1817 + 1818 + if (nau8821_quirk & NAU8821_JD_ACTIVE_HIGH) 1819 + nau8821->jkdet_polarity = 0; 1820 + 1850 1821 nau8821_print_device_properties(nau8821); 1851 1822 1852 1823 nau8821_reset_chip(nau8821->regmap);
+8 -1
sound/soc/codecs/rt5682-sdw.c
··· 750 750 if (!rt5682->first_hw_init) 751 751 return 0; 752 752 753 - if (!slave->unattach_request) 753 + if (!slave->unattach_request) { 754 + if (rt5682->disable_irq == true) { 755 + mutex_lock(&rt5682->disable_irq_lock); 756 + sdw_write_no_pm(slave, SDW_SCP_INTMASK1, SDW_SCP_INT1_IMPL_DEF); 757 + rt5682->disable_irq = false; 758 + mutex_unlock(&rt5682->disable_irq_lock); 759 + } 754 760 goto regmap_sync; 761 + } 755 762 756 763 time = wait_for_completion_timeout(&slave->initialization_complete, 757 764 msecs_to_jiffies(RT5682_PROBE_TIMEOUT));
+9 -1
sound/soc/codecs/rt711-sdca-sdw.c
··· 438 438 if (!rt711->first_hw_init) 439 439 return 0; 440 440 441 - if (!slave->unattach_request) 441 + if (!slave->unattach_request) { 442 + if (rt711->disable_irq == true) { 443 + mutex_lock(&rt711->disable_irq_lock); 444 + sdw_write_no_pm(slave, SDW_SCP_SDCA_INTMASK1, SDW_SCP_SDCA_INTMASK_SDCA_0); 445 + sdw_write_no_pm(slave, SDW_SCP_SDCA_INTMASK2, SDW_SCP_SDCA_INTMASK_SDCA_8); 446 + rt711->disable_irq = false; 447 + mutex_unlock(&rt711->disable_irq_lock); 448 + } 442 449 goto regmap_sync; 450 + } 443 451 444 452 time = wait_for_completion_timeout(&slave->initialization_complete, 445 453 msecs_to_jiffies(RT711_PROBE_TIMEOUT));
+8 -1
sound/soc/codecs/rt711-sdw.c
··· 538 538 if (!rt711->first_hw_init) 539 539 return 0; 540 540 541 - if (!slave->unattach_request) 541 + if (!slave->unattach_request) { 542 + if (rt711->disable_irq == true) { 543 + mutex_lock(&rt711->disable_irq_lock); 544 + sdw_write_no_pm(slave, SDW_SCP_INTMASK1, SDW_SCP_INT1_IMPL_DEF); 545 + rt711->disable_irq = false; 546 + mutex_unlock(&rt711->disable_irq_lock); 547 + } 542 548 goto regmap_sync; 549 + } 543 550 544 551 time = wait_for_completion_timeout(&slave->initialization_complete, 545 552 msecs_to_jiffies(RT711_PROBE_TIMEOUT));
+9 -1
sound/soc/codecs/rt712-sdca-sdw.c
··· 438 438 if (!rt712->first_hw_init) 439 439 return 0; 440 440 441 - if (!slave->unattach_request) 441 + if (!slave->unattach_request) { 442 + if (rt712->disable_irq == true) { 443 + mutex_lock(&rt712->disable_irq_lock); 444 + sdw_write_no_pm(slave, SDW_SCP_SDCA_INTMASK1, SDW_SCP_SDCA_INTMASK_SDCA_0); 445 + sdw_write_no_pm(slave, SDW_SCP_SDCA_INTMASK2, SDW_SCP_SDCA_INTMASK_SDCA_8); 446 + rt712->disable_irq = false; 447 + mutex_unlock(&rt712->disable_irq_lock); 448 + } 442 449 goto regmap_sync; 450 + } 443 451 444 452 time = wait_for_completion_timeout(&slave->initialization_complete, 445 453 msecs_to_jiffies(RT712_PROBE_TIMEOUT));
+9 -1
sound/soc/codecs/rt722-sdca-sdw.c
··· 463 463 if (!rt722->first_hw_init) 464 464 return 0; 465 465 466 - if (!slave->unattach_request) 466 + if (!slave->unattach_request) { 467 + if (rt722->disable_irq == true) { 468 + mutex_lock(&rt722->disable_irq_lock); 469 + sdw_write_no_pm(slave, SDW_SCP_SDCA_INTMASK1, SDW_SCP_SDCA_INTMASK_SDCA_6); 470 + sdw_write_no_pm(slave, SDW_SCP_SDCA_INTMASK2, SDW_SCP_SDCA_INTMASK_SDCA_8); 471 + rt722->disable_irq = false; 472 + mutex_unlock(&rt722->disable_irq_lock); 473 + } 467 474 goto regmap_sync; 475 + } 468 476 469 477 time = wait_for_completion_timeout(&slave->initialization_complete, 470 478 msecs_to_jiffies(RT722_PROBE_TIMEOUT));
+3
sound/soc/codecs/wm8904.c
··· 2308 2308 regmap_update_bits(wm8904->regmap, WM8904_BIAS_CONTROL_0, 2309 2309 WM8904_POBCTRL, 0); 2310 2310 2311 + /* Fill the cache for the ADC test register */ 2312 + regmap_read(wm8904->regmap, WM8904_ADC_TEST_0, &val); 2313 + 2311 2314 /* Can leave the device powered off until we need it */ 2312 2315 regcache_cache_only(wm8904->regmap, true); 2313 2316 regulator_bulk_disable(ARRAY_SIZE(wm8904->supplies), wm8904->supplies);
+2
sound/soc/fsl/fsl_spdif.c
··· 751 751 case SNDRV_PCM_TRIGGER_PAUSE_PUSH: 752 752 regmap_update_bits(regmap, REG_SPDIF_SCR, dmaen, 0); 753 753 regmap_update_bits(regmap, REG_SPDIF_SIE, intr, 0); 754 + regmap_write(regmap, REG_SPDIF_STL, 0x0); 755 + regmap_write(regmap, REG_SPDIF_STR, 0x0); 754 756 break; 755 757 default: 756 758 return -EINVAL;
+14
sound/usb/mixer_maps.c
··· 374 374 { 0 } 375 375 }; 376 376 377 + /* Microsoft USB Link headset */ 378 + /* a guess work: raw playback volume values are from 2 to 129 */ 379 + static const struct usbmix_dB_map ms_usb_link_dB = { -3225, 0, true }; 380 + static const struct usbmix_name_map ms_usb_link_map[] = { 381 + { 9, NULL, .dB = &ms_usb_link_dB }, 382 + { 10, NULL }, /* Headset Capture volume; seems non-working, disabled */ 383 + { 0 } /* terminator */ 384 + }; 385 + 377 386 /* ASUS ROG Zenith II with Realtek ALC1220-VB */ 378 387 static const struct usbmix_name_map asus_zenith_ii_map[] = { 379 388 { 19, NULL, 12 }, /* FU, Input Gain Pad - broken response, disabled */ ··· 676 667 /* Sennheiser Communications Headset [PC 8] */ 677 668 .id = USB_ID(0x1395, 0x0025), 678 669 .map = sennheiser_pc8_map, 670 + }, 671 + { 672 + /* Microsoft USB Link headset */ 673 + .id = USB_ID(0x045e, 0x083c), 674 + .map = ms_usb_link_map, 679 675 }, 680 676 { 0 } /* terminator */ 681 677 };
+31 -6
sound/usb/quirks.c
··· 1876 1876 1877 1877 /* XMOS based USB DACs */ 1878 1878 switch (chip->usb_id) { 1879 - case USB_ID(0x1511, 0x0037): /* AURALiC VEGA */ 1880 - case USB_ID(0x21ed, 0xd75a): /* Accuphase DAC-60 option card */ 1879 + case USB_ID(0x139f, 0x5504): /* Nagra DAC */ 1880 + case USB_ID(0x20b1, 0x3089): /* Mola-Mola DAC */ 1881 + case USB_ID(0x2522, 0x0007): /* LH Labs Geek Out 1V5 */ 1882 + case USB_ID(0x2522, 0x0009): /* LH Labs Geek Pulse X Inifinity 2V0 */ 1881 1883 case USB_ID(0x2522, 0x0012): /* LH Labs VI DAC Infinity */ 1882 1884 case USB_ID(0x2772, 0x0230): /* Pro-Ject Pre Box S2 Digital */ 1883 1885 if (fp->altsetting == 2) ··· 1889 1887 case USB_ID(0x0d8c, 0x0316): /* Hegel HD12 DSD */ 1890 1888 case USB_ID(0x10cb, 0x0103): /* The Bit Opus #3; with fp->dsd_raw */ 1891 1889 case USB_ID(0x16d0, 0x06b2): /* NuPrime DAC-10 */ 1892 - case USB_ID(0x16d0, 0x09dd): /* Encore mDSD */ 1890 + case USB_ID(0x16d0, 0x06b4): /* NuPrime Audio HD-AVP/AVA */ 1893 1891 case USB_ID(0x16d0, 0x0733): /* Furutech ADL Stratos */ 1892 + case USB_ID(0x16d0, 0x09d8): /* NuPrime IDA-8 */ 1894 1893 case USB_ID(0x16d0, 0x09db): /* NuPrime Audio DAC-9 */ 1894 + case USB_ID(0x16d0, 0x09dd): /* Encore mDSD */ 1895 1895 case USB_ID(0x1db5, 0x0003): /* Bryston BDA3 */ 1896 + case USB_ID(0x20a0, 0x4143): /* WaveIO USB Audio 2.0 */ 1896 1897 case USB_ID(0x22e1, 0xca01): /* HDTA Serenade DSD */ 1897 1898 case USB_ID(0x249c, 0x9326): /* M2Tech Young MkIII */ 1898 1899 case USB_ID(0x2616, 0x0106): /* PS Audio NuWave DAC */ 1899 1900 case USB_ID(0x2622, 0x0041): /* Audiolab M-DAC+ */ 1901 + case USB_ID(0x278b, 0x5100): /* Rotel RC-1590 */ 1900 1902 case USB_ID(0x27f7, 0x3002): /* W4S DAC-2v2SE */ 1901 1903 case USB_ID(0x29a2, 0x0086): /* Mutec MC3+ USB */ 1902 1904 case USB_ID(0x6b42, 0x0042): /* MSB Technology */ ··· 1910 1904 1911 1905 /* Amanero Combo384 USB based DACs with native DSD support */ 1912 1906 case USB_ID(0x16d0, 0x071a): /* Amanero - Combo384 */ 1913 - case USB_ID(0x2ab6, 0x0004): /* T+A DAC8DSD-V2.0, MP1000E-V2.0, MP2000R-V2.0, MP2500R-V2.0, MP3100HV-V2.0 */ 1914 - case USB_ID(0x2ab6, 0x0005): /* T+A USB HD Audio 1 */ 1915 - case USB_ID(0x2ab6, 0x0006): /* T+A USB HD Audio 2 */ 1916 1907 if (fp->altsetting == 2) { 1917 1908 switch (le16_to_cpu(chip->dev->descriptor.bcdDevice)) { 1918 1909 case 0x199: ··· 2016 2013 QUIRK_FLAG_IGNORE_CTL_ERROR), 2017 2014 DEVICE_FLG(0x041e, 0x4080, /* Creative Live Cam VF0610 */ 2018 2015 QUIRK_FLAG_GET_SAMPLE_RATE), 2016 + DEVICE_FLG(0x045e, 0x083c, /* MS USB Link headset */ 2017 + QUIRK_FLAG_GET_SAMPLE_RATE | QUIRK_FLAG_CTL_MSG_DELAY | 2018 + QUIRK_FLAG_DISABLE_AUTOSUSPEND), 2019 2019 DEVICE_FLG(0x046d, 0x084c, /* Logitech ConferenceCam Connect */ 2020 2020 QUIRK_FLAG_GET_SAMPLE_RATE | QUIRK_FLAG_CTL_MSG_DELAY_1M), 2021 2021 DEVICE_FLG(0x046d, 0x0991, /* Logitech QuickCam Pro */ ··· 2054 2048 QUIRK_FLAG_IFACE_DELAY), 2055 2049 DEVICE_FLG(0x0644, 0x805f, /* TEAC Model 12 */ 2056 2050 QUIRK_FLAG_FORCE_IFACE_RESET), 2051 + DEVICE_FLG(0x0644, 0x806b, /* TEAC UD-701 */ 2052 + QUIRK_FLAG_ITF_USB_DSD_DAC | QUIRK_FLAG_CTL_MSG_DELAY | 2053 + QUIRK_FLAG_IFACE_DELAY), 2057 2054 DEVICE_FLG(0x06f8, 0xb000, /* Hercules DJ Console (Windows Edition) */ 2058 2055 QUIRK_FLAG_IGNORE_CTL_ERROR), 2059 2056 DEVICE_FLG(0x06f8, 0xd002, /* Hercules DJ Console (Macintosh Edition) */ ··· 2095 2086 QUIRK_FLAG_ITF_USB_DSD_DAC | QUIRK_FLAG_CTL_MSG_DELAY), 2096 2087 DEVICE_FLG(0x154e, 0x3006, /* Marantz SA-14S1 */ 2097 2088 QUIRK_FLAG_ITF_USB_DSD_DAC | QUIRK_FLAG_CTL_MSG_DELAY), 2089 + DEVICE_FLG(0x154e, 0x300b, /* Marantz SA-KI RUBY / SA-12 */ 2090 + QUIRK_FLAG_DSD_RAW), 2098 2091 DEVICE_FLG(0x154e, 0x500e, /* Denon DN-X1600 */ 2099 2092 QUIRK_FLAG_IGNORE_CLOCK_SOURCE), 2100 2093 DEVICE_FLG(0x1686, 0x00dd, /* Zoom R16/24 */ ··· 2141 2130 QUIRK_FLAG_SHARE_MEDIA_DEVICE | QUIRK_FLAG_ALIGN_TRANSFER), 2142 2131 DEVICE_FLG(0x21b4, 0x0081, /* AudioQuest DragonFly */ 2143 2132 QUIRK_FLAG_GET_SAMPLE_RATE), 2133 + DEVICE_FLG(0x21b4, 0x0230, /* Ayre QB-9 Twenty */ 2134 + QUIRK_FLAG_DSD_RAW), 2135 + DEVICE_FLG(0x21b4, 0x0232, /* Ayre QX-5 Twenty */ 2136 + QUIRK_FLAG_DSD_RAW), 2144 2137 DEVICE_FLG(0x2522, 0x0007, /* LH Labs Geek Out HD Audio 1V5 */ 2145 2138 QUIRK_FLAG_SET_IFACE_FIRST), 2146 2139 DEVICE_FLG(0x2708, 0x0002, /* Audient iD14 */ ··· 2187 2172 QUIRK_FLAG_VALIDATE_RATES), 2188 2173 VENDOR_FLG(0x1235, /* Focusrite Novation */ 2189 2174 QUIRK_FLAG_VALIDATE_RATES), 2175 + VENDOR_FLG(0x1511, /* AURALiC */ 2176 + QUIRK_FLAG_DSD_RAW), 2190 2177 VENDOR_FLG(0x152a, /* Thesycon devices */ 2178 + QUIRK_FLAG_DSD_RAW), 2179 + VENDOR_FLG(0x18d1, /* iBasso devices */ 2191 2180 QUIRK_FLAG_DSD_RAW), 2192 2181 VENDOR_FLG(0x1de7, /* Phoenix Audio */ 2193 2182 QUIRK_FLAG_GET_SAMPLE_RATE), 2194 2183 VENDOR_FLG(0x20b1, /* XMOS based devices */ 2184 + QUIRK_FLAG_DSD_RAW), 2185 + VENDOR_FLG(0x21ed, /* Accuphase Laboratory */ 2195 2186 QUIRK_FLAG_DSD_RAW), 2196 2187 VENDOR_FLG(0x22d9, /* Oppo */ 2197 2188 QUIRK_FLAG_DSD_RAW), ··· 2214 2193 QUIRK_FLAG_DSD_RAW), 2215 2194 VENDOR_FLG(0x2ab6, /* T+A devices */ 2216 2195 QUIRK_FLAG_DSD_RAW), 2196 + VENDOR_FLG(0x2d87, /* Cayin device */ 2197 + QUIRK_FLAG_DSD_RAW), 2217 2198 VENDOR_FLG(0x3336, /* HEM devices */ 2218 2199 QUIRK_FLAG_DSD_RAW), 2219 2200 VENDOR_FLG(0x3353, /* Khadas devices */ 2201 + QUIRK_FLAG_DSD_RAW), 2202 + VENDOR_FLG(0x35f4, /* MSB Technology */ 2220 2203 QUIRK_FLAG_DSD_RAW), 2221 2204 VENDOR_FLG(0x3842, /* EVGA */ 2222 2205 QUIRK_FLAG_DSD_RAW),
+8 -8
tools/net/ynl/lib/ynl.py
··· 417 417 pad = b'\x00' * ((4 - len(attr_payload) % 4) % 4) 418 418 return struct.pack('HH', len(attr_payload) + 4, nl_type) + attr_payload + pad 419 419 420 - def _decode_enum(self, rsp, attr_spec): 421 - raw = rsp[attr_spec['name']] 420 + def _decode_enum(self, raw, attr_spec): 422 421 enum = self.consts[attr_spec['enum']] 423 - i = attr_spec.get('value-start', 0) 424 422 if 'enum-as-flags' in attr_spec and attr_spec['enum-as-flags']: 423 + i = 0 425 424 value = set() 426 425 while raw: 427 426 if raw & 1: ··· 428 429 raw >>= 1 429 430 i += 1 430 431 else: 431 - value = enum.entries_by_val[raw - i].name 432 - rsp[attr_spec['name']] = value 432 + value = enum.entries_by_val[raw].name 433 + return value 433 434 434 435 def _decode_binary(self, attr, attr_spec): 435 436 if attr_spec.struct_name: ··· 437 438 decoded = attr.as_struct(members) 438 439 for m in members: 439 440 if m.enum: 440 - self._decode_enum(decoded, m) 441 + decoded[m.name] = self._decode_enum(decoded[m.name], m) 441 442 elif attr_spec.sub_type: 442 443 decoded = attr.as_c_array(attr_spec.sub_type) 443 444 else: ··· 465 466 else: 466 467 raise Exception(f'Unknown {attr_spec["type"]} with name {attr_spec["name"]}') 467 468 469 + if 'enum' in attr_spec: 470 + decoded = self._decode_enum(decoded, attr_spec) 471 + 468 472 if not attr_spec.is_multi: 469 473 rsp[attr_spec['name']] = decoded 470 474 elif attr_spec.name in rsp: ··· 475 473 else: 476 474 rsp[attr_spec.name] = [decoded] 477 475 478 - if 'enum' in attr_spec: 479 - self._decode_enum(rsp, attr_spec) 480 476 return rsp 481 477 482 478 def _decode_extack_path(self, attrs, attr_set, offset, target):
-4
tools/testing/cxl/test/cxl.c
··· 999 999 #define SZ_64G (SZ_32G * 2) 1000 1000 #endif 1001 1001 1002 - #ifndef SZ_512G 1003 - #define SZ_512G (SZ_64G * 8) 1004 - #endif 1005 - 1006 1002 static __init int cxl_rch_init(void) 1007 1003 { 1008 1004 int rc, i;
+1 -1
tools/testing/selftests/ftrace/test.d/dynevent/add_remove_btfarg.tc
··· 13 13 FPROBES=yes 14 14 fi 15 15 16 - if [ -z "$KPROBES" -a "$FPROBES" ] ; then 16 + if [ -z "$KPROBES" -a -z "$FPROBES" ] ; then 17 17 exit_unsupported 18 18 fi 19 19
+4 -2
tools/testing/selftests/kvm/include/kvm_util_base.h
··· 362 362 { 363 363 ssize_t ret; 364 364 365 - ret = read(stats_fd, header, sizeof(*header)); 366 - TEST_ASSERT(ret == sizeof(*header), "Read stats header"); 365 + ret = pread(stats_fd, header, sizeof(*header), 0); 366 + TEST_ASSERT(ret == sizeof(*header), 367 + "Failed to read '%lu' header bytes, ret = '%ld'", 368 + sizeof(*header), ret); 367 369 } 368 370 369 371 struct kvm_stats_desc *read_stats_descriptors(int stats_fd,
+45 -23
tools/testing/selftests/kvm/kvm_binary_stats_test.c
··· 43 43 id = malloc(header.name_size); 44 44 TEST_ASSERT(id, "Allocate memory for id string"); 45 45 46 - ret = read(stats_fd, id, header.name_size); 47 - TEST_ASSERT(ret == header.name_size, "Read id string"); 46 + ret = pread(stats_fd, id, header.name_size, sizeof(header)); 47 + TEST_ASSERT(ret == header.name_size, 48 + "Expected header size '%u', read '%lu' bytes", 49 + header.name_size, ret); 48 50 49 51 /* Check id string, that should start with "kvm" */ 50 52 TEST_ASSERT(!strncmp(id, "kvm", 3) && strlen(id) < header.name_size, ··· 167 165 free(stats_data); 168 166 free(stats_desc); 169 167 free(id); 170 - } 171 168 172 - 173 - static void vm_stats_test(struct kvm_vm *vm) 174 - { 175 - int stats_fd = vm_get_stats_fd(vm); 176 - 177 - stats_test(stats_fd); 178 - close(stats_fd); 179 - TEST_ASSERT(fcntl(stats_fd, F_GETFD) == -1, "Stats fd not freed"); 180 - } 181 - 182 - static void vcpu_stats_test(struct kvm_vcpu *vcpu) 183 - { 184 - int stats_fd = vcpu_get_stats_fd(vcpu); 185 - 186 - stats_test(stats_fd); 187 169 close(stats_fd); 188 170 TEST_ASSERT(fcntl(stats_fd, F_GETFD) == -1, "Stats fd not freed"); 189 171 } ··· 185 199 186 200 int main(int argc, char *argv[]) 187 201 { 202 + int vm_stats_fds, *vcpu_stats_fds; 188 203 int i, j; 189 204 struct kvm_vcpu **vcpus; 190 205 struct kvm_vm **vms; ··· 218 231 vcpus = malloc(sizeof(struct kvm_vcpu *) * max_vm * max_vcpu); 219 232 TEST_ASSERT(vcpus, "Allocate memory for storing vCPU pointers"); 220 233 234 + /* 235 + * Not per-VM as the array is populated, used, and invalidated within a 236 + * single for-loop iteration. 237 + */ 238 + vcpu_stats_fds = calloc(max_vm, sizeof(*vcpu_stats_fds)); 239 + TEST_ASSERT(vcpu_stats_fds, "Allocate memory for VM stats fds"); 240 + 221 241 for (i = 0; i < max_vm; ++i) { 222 242 vms[i] = vm_create_barebones(); 223 243 for (j = 0; j < max_vcpu; ++j) 224 244 vcpus[i * max_vcpu + j] = __vm_vcpu_add(vms[i], j); 225 245 } 226 246 227 - /* Check stats read for every VM and VCPU */ 247 + /* 248 + * Check stats read for every VM and vCPU, with a variety of flavors. 249 + * Note, stats_test() closes the passed in stats fd. 250 + */ 228 251 for (i = 0; i < max_vm; ++i) { 229 - vm_stats_test(vms[i]); 252 + /* 253 + * Verify that creating multiple userspace references to a 254 + * single stats file works and doesn't cause explosions. 255 + */ 256 + vm_stats_fds = vm_get_stats_fd(vms[i]); 257 + stats_test(dup(vm_stats_fds)); 258 + 259 + /* Verify userspace can instantiate multiple stats files. */ 260 + stats_test(vm_get_stats_fd(vms[i])); 261 + 262 + for (j = 0; j < max_vcpu; ++j) { 263 + vcpu_stats_fds[j] = vcpu_get_stats_fd(vcpus[i * max_vcpu + j]); 264 + stats_test(dup(vcpu_stats_fds[j])); 265 + stats_test(vcpu_get_stats_fd(vcpus[i * max_vcpu + j])); 266 + } 267 + 268 + /* 269 + * Close the VM fd and redo the stats tests. KVM should gift a 270 + * reference (to the VM) to each stats fd, i.e. stats should 271 + * still be accessible even after userspace has put its last 272 + * _direct_ reference to the VM. 273 + */ 274 + kvm_vm_free(vms[i]); 275 + 276 + stats_test(vm_stats_fds); 230 277 for (j = 0; j < max_vcpu; ++j) 231 - vcpu_stats_test(vcpus[i * max_vcpu + j]); 278 + stats_test(vcpu_stats_fds[j]); 279 + 232 280 ksft_test_result_pass("vm%i\n", i); 233 281 } 234 282 235 - for (i = 0; i < max_vm; ++i) 236 - kvm_vm_free(vms[i]); 237 283 free(vms); 284 + free(vcpus); 285 + free(vcpu_stats_fds); 238 286 239 287 ksft_finished(); /* Print results and exit() accordingly */ 240 288 }
+39 -31
tools/testing/selftests/kvm/x86_64/set_sregs_test.c
··· 22 22 #include "kvm_util.h" 23 23 #include "processor.h" 24 24 25 - static void test_cr4_feature_bit(struct kvm_vcpu *vcpu, struct kvm_sregs *orig, 26 - uint64_t feature_bit) 27 - { 28 - struct kvm_sregs sregs; 29 - int rc; 30 - 31 - /* Skip the sub-test, the feature is supported. */ 32 - if (orig->cr4 & feature_bit) 33 - return; 34 - 35 - memcpy(&sregs, orig, sizeof(sregs)); 36 - sregs.cr4 |= feature_bit; 37 - 38 - rc = _vcpu_sregs_set(vcpu, &sregs); 39 - TEST_ASSERT(rc, "KVM allowed unsupported CR4 bit (0x%lx)", feature_bit); 40 - 41 - /* Sanity check that KVM didn't change anything. */ 42 - vcpu_sregs_get(vcpu, &sregs); 43 - TEST_ASSERT(!memcmp(&sregs, orig, sizeof(sregs)), "KVM modified sregs"); 44 - } 25 + #define TEST_INVALID_CR_BIT(vcpu, cr, orig, bit) \ 26 + do { \ 27 + struct kvm_sregs new; \ 28 + int rc; \ 29 + \ 30 + /* Skip the sub-test, the feature/bit is supported. */ \ 31 + if (orig.cr & bit) \ 32 + break; \ 33 + \ 34 + memcpy(&new, &orig, sizeof(sregs)); \ 35 + new.cr |= bit; \ 36 + \ 37 + rc = _vcpu_sregs_set(vcpu, &new); \ 38 + TEST_ASSERT(rc, "KVM allowed invalid " #cr " bit (0x%lx)", bit); \ 39 + \ 40 + /* Sanity check that KVM didn't change anything. */ \ 41 + vcpu_sregs_get(vcpu, &new); \ 42 + TEST_ASSERT(!memcmp(&new, &orig, sizeof(new)), "KVM modified sregs"); \ 43 + } while (0) 45 44 46 45 static uint64_t calc_supported_cr4_feature_bits(void) 47 46 { ··· 79 80 struct kvm_vcpu *vcpu; 80 81 struct kvm_vm *vm; 81 82 uint64_t cr4; 82 - int rc; 83 + int rc, i; 83 84 84 85 /* 85 86 * Create a dummy VM, specifically to avoid doing KVM_SET_CPUID2, and ··· 91 92 92 93 vcpu_sregs_get(vcpu, &sregs); 93 94 95 + sregs.cr0 = 0; 94 96 sregs.cr4 |= calc_supported_cr4_feature_bits(); 95 97 cr4 = sregs.cr4; 96 98 ··· 103 103 sregs.cr4, cr4); 104 104 105 105 /* Verify all unsupported features are rejected by KVM. */ 106 - test_cr4_feature_bit(vcpu, &sregs, X86_CR4_UMIP); 107 - test_cr4_feature_bit(vcpu, &sregs, X86_CR4_LA57); 108 - test_cr4_feature_bit(vcpu, &sregs, X86_CR4_VMXE); 109 - test_cr4_feature_bit(vcpu, &sregs, X86_CR4_SMXE); 110 - test_cr4_feature_bit(vcpu, &sregs, X86_CR4_FSGSBASE); 111 - test_cr4_feature_bit(vcpu, &sregs, X86_CR4_PCIDE); 112 - test_cr4_feature_bit(vcpu, &sregs, X86_CR4_OSXSAVE); 113 - test_cr4_feature_bit(vcpu, &sregs, X86_CR4_SMEP); 114 - test_cr4_feature_bit(vcpu, &sregs, X86_CR4_SMAP); 115 - test_cr4_feature_bit(vcpu, &sregs, X86_CR4_PKE); 106 + TEST_INVALID_CR_BIT(vcpu, cr4, sregs, X86_CR4_UMIP); 107 + TEST_INVALID_CR_BIT(vcpu, cr4, sregs, X86_CR4_LA57); 108 + TEST_INVALID_CR_BIT(vcpu, cr4, sregs, X86_CR4_VMXE); 109 + TEST_INVALID_CR_BIT(vcpu, cr4, sregs, X86_CR4_SMXE); 110 + TEST_INVALID_CR_BIT(vcpu, cr4, sregs, X86_CR4_FSGSBASE); 111 + TEST_INVALID_CR_BIT(vcpu, cr4, sregs, X86_CR4_PCIDE); 112 + TEST_INVALID_CR_BIT(vcpu, cr4, sregs, X86_CR4_OSXSAVE); 113 + TEST_INVALID_CR_BIT(vcpu, cr4, sregs, X86_CR4_SMEP); 114 + TEST_INVALID_CR_BIT(vcpu, cr4, sregs, X86_CR4_SMAP); 115 + TEST_INVALID_CR_BIT(vcpu, cr4, sregs, X86_CR4_PKE); 116 + 117 + for (i = 32; i < 64; i++) 118 + TEST_INVALID_CR_BIT(vcpu, cr0, sregs, BIT(i)); 119 + 120 + /* NW without CD is illegal, as is PG without PE. */ 121 + TEST_INVALID_CR_BIT(vcpu, cr0, sregs, X86_CR0_NW); 122 + TEST_INVALID_CR_BIT(vcpu, cr0, sregs, X86_CR0_PG); 123 + 116 124 kvm_vm_free(vm); 117 125 118 126 /* Create a "real" VM and verify APIC_BASE can be set. */
+1 -3
tools/testing/selftests/net/mptcp/mptcp_join.sh
··· 162 162 elif ! iptables -V &> /dev/null; then 163 163 echo "SKIP: Could not run all tests without iptables tool" 164 164 exit $ksft_skip 165 - fi 166 - 167 - if ! ip6tables -V &> /dev/null; then 165 + elif ! ip6tables -V &> /dev/null; then 168 166 echo "SKIP: Could not run all tests without ip6tables tool" 169 167 exit $ksft_skip 170 168 fi
+22 -6
tools/testing/selftests/rseq/rseq.c
··· 34 34 #include "../kselftest.h" 35 35 #include "rseq.h" 36 36 37 - static const ptrdiff_t *libc_rseq_offset_p; 38 - static const unsigned int *libc_rseq_size_p; 39 - static const unsigned int *libc_rseq_flags_p; 37 + /* 38 + * Define weak versions to play nice with binaries that are statically linked 39 + * against a libc that doesn't support registering its own rseq. 40 + */ 41 + __weak ptrdiff_t __rseq_offset; 42 + __weak unsigned int __rseq_size; 43 + __weak unsigned int __rseq_flags; 44 + 45 + static const ptrdiff_t *libc_rseq_offset_p = &__rseq_offset; 46 + static const unsigned int *libc_rseq_size_p = &__rseq_size; 47 + static const unsigned int *libc_rseq_flags_p = &__rseq_flags; 40 48 41 49 /* Offset from the thread pointer to the rseq area. */ 42 50 ptrdiff_t rseq_offset; ··· 163 155 static __attribute__((constructor)) 164 156 void rseq_init(void) 165 157 { 166 - libc_rseq_offset_p = dlsym(RTLD_NEXT, "__rseq_offset"); 167 - libc_rseq_size_p = dlsym(RTLD_NEXT, "__rseq_size"); 168 - libc_rseq_flags_p = dlsym(RTLD_NEXT, "__rseq_flags"); 158 + /* 159 + * If the libc's registered rseq size isn't already valid, it may be 160 + * because the binary is dynamically linked and not necessarily due to 161 + * libc not having registered a restartable sequence. Try to find the 162 + * symbols if that's the case. 163 + */ 164 + if (!*libc_rseq_size_p) { 165 + libc_rseq_offset_p = dlsym(RTLD_NEXT, "__rseq_offset"); 166 + libc_rseq_size_p = dlsym(RTLD_NEXT, "__rseq_size"); 167 + libc_rseq_flags_p = dlsym(RTLD_NEXT, "__rseq_flags"); 168 + } 169 169 if (libc_rseq_size_p && libc_rseq_offset_p && libc_rseq_flags_p && 170 170 *libc_rseq_size_p != 0) { 171 171 /* rseq registration owned by glibc */
+24
virt/kvm/kvm_main.c
··· 4035 4035 sizeof(vcpu->stat), user_buffer, size, offset); 4036 4036 } 4037 4037 4038 + static int kvm_vcpu_stats_release(struct inode *inode, struct file *file) 4039 + { 4040 + struct kvm_vcpu *vcpu = file->private_data; 4041 + 4042 + kvm_put_kvm(vcpu->kvm); 4043 + return 0; 4044 + } 4045 + 4038 4046 static const struct file_operations kvm_vcpu_stats_fops = { 4039 4047 .read = kvm_vcpu_stats_read, 4048 + .release = kvm_vcpu_stats_release, 4040 4049 .llseek = noop_llseek, 4041 4050 }; 4042 4051 ··· 4066 4057 put_unused_fd(fd); 4067 4058 return PTR_ERR(file); 4068 4059 } 4060 + 4061 + kvm_get_kvm(vcpu->kvm); 4062 + 4069 4063 file->f_mode |= FMODE_PREAD; 4070 4064 fd_install(fd, file); 4071 4065 ··· 4713 4701 sizeof(kvm->stat), user_buffer, size, offset); 4714 4702 } 4715 4703 4704 + static int kvm_vm_stats_release(struct inode *inode, struct file *file) 4705 + { 4706 + struct kvm *kvm = file->private_data; 4707 + 4708 + kvm_put_kvm(kvm); 4709 + return 0; 4710 + } 4711 + 4716 4712 static const struct file_operations kvm_vm_stats_fops = { 4717 4713 .read = kvm_vm_stats_read, 4714 + .release = kvm_vm_stats_release, 4718 4715 .llseek = noop_llseek, 4719 4716 }; 4720 4717 ··· 4742 4721 put_unused_fd(fd); 4743 4722 return PTR_ERR(file); 4744 4723 } 4724 + 4725 + kvm_get_kvm(kvm); 4726 + 4745 4727 file->f_mode |= FMODE_PREAD; 4746 4728 fd_install(fd, file); 4747 4729