···88Required properties:99- compatible : Should be of the form "ti,emif-<ip-rev>" where <ip-rev>1010 is the IP revision of the specific EMIF instance.1111+ For am437x should be ti,emif-am4372.11121213- phy-type : <u32> indicating the DDR phy type. Following are the1314 allowed values
···4242- regulator-system-load: Load in uA present on regulator that is not captured by4343 any consumer request.4444- regulator-pull-down: Enable pull down resistor when the regulator is disabled.4545+- regulator-over-current-protection: Enable over current protection.45464647Deprecated properties:4748- regulator-compatible: If a regulator chip contains multiple
+11-2
Documentation/power/swsusp.txt
···410410411411Q: Can I suspend-to-disk using a swap partition under LVM?412412413413-A: No. You can suspend successfully, but you'll not be able to414414-resume. uswsusp should be able to work with LVM. See suspend.sf.net.413413+A: Yes and No. You can suspend successfully, but the kernel will not be able414414+to resume on its own. You need an initramfs that can recognize the resume415415+situation, activate the logical volume containing the swap volume (but not416416+touch any filesystems!), and eventually call417417+418418+echo -n "$major:$minor" > /sys/power/resume419419+420420+where $major and $minor are the respective major and minor device numbers of421421+the swap volume.422422+423423+uswsusp works with LVM, too. See http://suspend.sourceforge.net/415424416425Q: I upgraded the kernel from 2.6.15 to 2.6.16. Both kernels were417426compiled with the similar configuration files. Anyway I found that
+21-5
MAINTAINERS
···16141614L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)16151615S: Maintained16161616F: arch/arm/boot/dts/vexpress*16171617+F: arch/arm64/boot/dts/arm/vexpress*16171618F: arch/arm/mach-vexpress/16181619F: */*/vexpress*16191620F: */*/*/vexpress*···25632562F: arch/powerpc/oprofile/*cell*25642563F: arch/powerpc/platforms/cell/2565256425662566-CEPH DISTRIBUTED FILE SYSTEM CLIENT25652565+CEPH COMMON CODE (LIBCEPH)25662566+M: Ilya Dryomov <idryomov@gmail.com>25672567M: "Yan, Zheng" <zyan@redhat.com>25682568M: Sage Weil <sage@redhat.com>25692569L: ceph-devel@vger.kernel.org25702570W: http://ceph.com/25712571T: git git://git.kernel.org/pub/scm/linux/kernel/git/sage/ceph-client.git25722572+T: git git://github.com/ceph/ceph-client.git25722573S: Supported25732573-F: Documentation/filesystems/ceph.txt25742574-F: fs/ceph/25752574F: net/ceph/25762575F: include/linux/ceph/25772576F: include/linux/crush/25772577+25782578+CEPH DISTRIBUTED FILE SYSTEM CLIENT (CEPH)25792579+M: "Yan, Zheng" <zyan@redhat.com>25802580+M: Sage Weil <sage@redhat.com>25812581+M: Ilya Dryomov <idryomov@gmail.com>25822582+L: ceph-devel@vger.kernel.org25832583+W: http://ceph.com/25842584+T: git git://git.kernel.org/pub/scm/linux/kernel/git/sage/ceph-client.git25852585+T: git git://github.com/ceph/ceph-client.git25862586+S: Supported25872587+F: Documentation/filesystems/ceph.txt25882588+F: fs/ceph/2578258925792590CERTIFIED WIRELESS USB (WUSB) SUBSYSTEM:25802591L: linux-usb@vger.kernel.org···61606147Q: https://patchwork.kernel.org/project/linux-nvdimm/list/61616148S: Supported61626149F: drivers/nvdimm/pmem.c61506150+F: include/linux/pmem.h6163615161646152LINUX FOR IBM pSERIES (RS/6000)61656153M: Paul Mackerras <paulus@au.ibm.com>···61756161W: http://www.penguinppc.org/61766162L: linuxppc-dev@lists.ozlabs.org61776163Q: http://patchwork.ozlabs.org/project/linuxppc-dev/list/61786178-T: git git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc.git61646164+T: git git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux.git61796165S: Supported61806166F: Documentation/powerpc/61816167F: arch/powerpc/···83808366M: Ilya Dryomov <idryomov@gmail.com>83818367M: Sage Weil <sage@redhat.com>83828368M: Alex Elder <elder@kernel.org>83838383-M: ceph-devel@vger.kernel.org83698369+L: ceph-devel@vger.kernel.org83848370W: http://ceph.com/83858371T: git git://git.kernel.org/pub/scm/linux/kernel/git/sage/ceph-client.git83728372+T: git git://github.com/ceph/ceph-client.git83868373S: Supported83748374+F: Documentation/ABI/testing/sysfs-bus-rbd83878375F: drivers/block/rbd.c83888376F: drivers/block/rbd_types.h83898377
···16931693config HIGHPTE16941694 bool "Allocate 2nd-level pagetables from highmem"16951695 depends on HIGHMEM16961696+ help16971697+ The VM uses one page of physical memory for each page table.16981698+ For systems with a lot of processes, this can use a lot of16991699+ precious low memory, eventually leading to low memory being17001700+ consumed by page tables. Setting this option will allow17011701+ user-space 2nd level page tables to reside in high memory.1696170216971703config HW_PERF_EVENTS16981704 bool "Enable hardware performance counter support for perf events"
+1-1
arch/arm/Kconfig.debug
···1635163516361636config DEBUG_SET_MODULE_RONX16371637 bool "Set loadable kernel module data as NX and text as RO"16381638- depends on MODULES16381638+ depends on MODULES && MMU16391639 ---help---16401640 This option helps catch unintended modifications to loadable16411641 kernel module's text and read-only data. It also prevents execution
+4
arch/arm/boot/dts/am335x-boneblack.dts
···8080 status = "okay";8181 };8282};8383+8484+&rtc {8585+ system-power-controller;8686+};
···140140 * The _caller variety takes a __builtin_return_address(0) value for141141 * /proc/vmalloc to use - and should only be used in non-inline functions.142142 */143143-extern void __iomem *__arm_ioremap_pfn_caller(unsigned long, unsigned long,144144- size_t, unsigned int, void *);145143extern void __iomem *__arm_ioremap_caller(phys_addr_t, size_t, unsigned int,146144 void *);147147-148145extern void __iomem *__arm_ioremap_pfn(unsigned long, unsigned long, size_t, unsigned int);149149-extern void __iomem *__arm_ioremap(phys_addr_t, size_t, unsigned int);150146extern void __iomem *__arm_ioremap_exec(phys_addr_t, size_t, bool cached);151147extern void __iounmap(volatile void __iomem *addr);152152-extern void __arm_iounmap(volatile void __iomem *addr);153148154149extern void __iomem * (*arch_ioremap_caller)(phys_addr_t, size_t,155150 unsigned int, void *);···316321static inline void memset_io(volatile void __iomem *dst, unsigned c,317322 size_t count)318323{319319- memset((void __force *)dst, c, count);324324+ extern void mmioset(void *, unsigned int, size_t);325325+ mmioset((void __force *)dst, c, count);320326}321327#define memset_io(dst,c,count) memset_io(dst,c,count)322328323329static inline void memcpy_fromio(void *to, const volatile void __iomem *from,324330 size_t count)325331{326326- memcpy(to, (const void __force *)from, count);332332+ extern void mmiocpy(void *, const void *, size_t);333333+ mmiocpy(to, (const void __force *)from, count);327334}328335#define memcpy_fromio(to,from,count) memcpy_fromio(to,from,count)329336330337static inline void memcpy_toio(volatile void __iomem *to, const void *from,331338 size_t count)332339{333333- memcpy((void __force *)to, from, count);340340+ extern void mmiocpy(void *, const void *, size_t);341341+ mmiocpy((void __force *)to, from, count);334342}335343#define memcpy_toio(to,from,count) memcpy_toio(to,from,count)336344···346348#endif /* readl */347349348350/*349349- * ioremap and friends.351351+ * ioremap() and friends.350352 *351351- * ioremap takes a PCI memory address, as specified in352352- * Documentation/io-mapping.txt.353353+ * ioremap() takes a resource address, and size. Due to the ARM memory354354+ * types, it is important to use the correct ioremap() function as each355355+ * mapping has specific properties.353356 *357357+ * Function Memory type Cacheability Cache hint358358+ * ioremap() Device n/a n/a359359+ * ioremap_nocache() Device n/a n/a360360+ * ioremap_cache() Normal Writeback Read allocate361361+ * ioremap_wc() Normal Non-cacheable n/a362362+ * ioremap_wt() Normal Non-cacheable n/a363363+ *364364+ * All device mappings have the following properties:365365+ * - no access speculation366366+ * - no repetition (eg, on return from an exception)367367+ * - number, order and size of accesses are maintained368368+ * - unaligned accesses are "unpredictable"369369+ * - writes may be delayed before they hit the endpoint device370370+ *371371+ * ioremap_nocache() is the same as ioremap() as there are too many device372372+ * drivers using this for device registers, and documentation which tells373373+ * people to use it for such for this to be any different. This is not a374374+ * safe fallback for memory-like mappings, or memory regions where the375375+ * compiler may generate unaligned accesses - eg, via inlining its own376376+ * memcpy.377377+ *378378+ * All normal memory mappings have the following properties:379379+ * - reads can be repeated with no side effects380380+ * - repeated reads return the last value written381381+ * - reads can fetch additional locations without side effects382382+ * - writes can be repeated (in certain cases) with no side effects383383+ * - writes can be merged before accessing the target384384+ * - unaligned accesses can be supported385385+ * - ordering is not guaranteed without explicit dependencies or barrier386386+ * instructions387387+ * - writes may be delayed before they hit the endpoint memory388388+ *389389+ * The cache hint is only a performance hint: CPUs may alias these hints.390390+ * Eg, a CPU not implementing read allocate but implementing write allocate391391+ * will provide a write allocate mapping instead.354392 */355355-#define ioremap(cookie,size) __arm_ioremap((cookie), (size), MT_DEVICE)356356-#define ioremap_nocache(cookie,size) __arm_ioremap((cookie), (size), MT_DEVICE)357357-#define ioremap_cache(cookie,size) __arm_ioremap((cookie), (size), MT_DEVICE_CACHED)358358-#define ioremap_wc(cookie,size) __arm_ioremap((cookie), (size), MT_DEVICE_WC)359359-#define ioremap_wt(cookie,size) __arm_ioremap((cookie), (size), MT_DEVICE)360360-#define iounmap __arm_iounmap393393+void __iomem *ioremap(resource_size_t res_cookie, size_t size);394394+#define ioremap ioremap395395+#define ioremap_nocache ioremap396396+397397+void __iomem *ioremap_cache(resource_size_t res_cookie, size_t size);398398+#define ioremap_cache ioremap_cache399399+400400+void __iomem *ioremap_wc(resource_size_t res_cookie, size_t size);401401+#define ioremap_wc ioremap_wc402402+#define ioremap_wt ioremap_wc403403+404404+void iounmap(volatile void __iomem *iomem_cookie);405405+#define iounmap iounmap361406362407/*363408 * io{read,write}{16,32}be() macros
···129129130130/*131131 * These are the memory types, defined to be compatible with132132- * pre-ARMv6 CPUs cacheable and bufferable bits: XXCB132132+ * pre-ARMv6 CPUs cacheable and bufferable bits: n/a,n/a,C,B133133+ * ARMv6+ without TEX remapping, they are a table index.134134+ * ARMv6+ with TEX remapping, they correspond to n/a,TEX(0),C,B135135+ *136136+ * MT type Pre-ARMv6 ARMv6+ type / cacheable status137137+ * UNCACHED Uncached Strongly ordered138138+ * BUFFERABLE Bufferable Normal memory / non-cacheable139139+ * WRITETHROUGH Writethrough Normal memory / write through140140+ * WRITEBACK Writeback Normal memory / write back, read alloc141141+ * MINICACHE Minicache N/A142142+ * WRITEALLOC Writeback Normal memory / write back, write alloc143143+ * DEV_SHARED Uncached Device memory (shared)144144+ * DEV_NONSHARED Uncached Device memory (non-shared)145145+ * DEV_WC Bufferable Normal memory / non-cacheable146146+ * DEV_CACHED Writeback Normal memory / write back, read alloc147147+ * VECTORS Variable Normal memory / variable148148+ *149149+ * All normal memory mappings have the following properties:150150+ * - reads can be repeated with no side effects151151+ * - repeated reads return the last value written152152+ * - reads can fetch additional locations without side effects153153+ * - writes can be repeated (in certain cases) with no side effects154154+ * - writes can be merged before accessing the target155155+ * - unaligned accesses can be supported156156+ *157157+ * All device mappings have the following properties:158158+ * - no access speculation159159+ * - no repetition (eg, on return from an exception)160160+ * - number, order and size of accesses are maintained161161+ * - unaligned accesses are "unpredictable"133162 */134163#define L_PTE_MT_UNCACHED (_AT(pteval_t, 0x00) << 2) /* 0000 */135164#define L_PTE_MT_BUFFERABLE (_AT(pteval_t, 0x01) << 2) /* 0001 */
···11/*22- * RTC I/O Bridge interfaces for CSR SiRFprimaII22+ * RTC I/O Bridge interfaces for CSR SiRFprimaII/atlas733 * ARM access the registers of SYSRTC, GPSRTC and PWRC through this module44 *55 * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company.···1010#include <linux/kernel.h>1111#include <linux/module.h>1212#include <linux/io.h>1313+#include <linux/regmap.h>1314#include <linux/of.h>1415#include <linux/of_address.h>1516#include <linux/of_device.h>···6766{6867 unsigned long flags, val;69686969+ /* TODO: add hwspinlock to sync with M3 */7070 spin_lock_irqsave(&rtciobrg_lock, flags);71717272 val = __sirfsoc_rtc_iobrg_readl(addr);···9290{9391 unsigned long flags;94929393+ /* TODO: add hwspinlock to sync with M3 */9594 spin_lock_irqsave(&rtciobrg_lock, flags);96959796 sirfsoc_rtc_iobrg_pre_writel(val, addr);···104101 spin_unlock_irqrestore(&rtciobrg_lock, flags);105102}106103EXPORT_SYMBOL_GPL(sirfsoc_rtc_iobrg_writel);104104+105105+106106+static int regmap_iobg_regwrite(void *context, unsigned int reg,107107+ unsigned int val)108108+{109109+ sirfsoc_rtc_iobrg_writel(val, reg);110110+ return 0;111111+}112112+113113+static int regmap_iobg_regread(void *context, unsigned int reg,114114+ unsigned int *val)115115+{116116+ *val = (u32)sirfsoc_rtc_iobrg_readl(reg);117117+ return 0;118118+}119119+120120+static struct regmap_bus regmap_iobg = {121121+ .reg_write = regmap_iobg_regwrite,122122+ .reg_read = regmap_iobg_regread,123123+};124124+125125+/**126126+ * devm_regmap_init_iobg(): Initialise managed register map127127+ *128128+ * @iobg: Device that will be interacted with129129+ * @config: Configuration for register map130130+ *131131+ * The return value will be an ERR_PTR() on error or a valid pointer132132+ * to a struct regmap. The regmap will be automatically freed by the133133+ * device management code.134134+ */135135+struct regmap *devm_regmap_init_iobg(struct device *dev,136136+ const struct regmap_config *config)137137+{138138+ const struct regmap_bus *bus = ®map_iobg;139139+140140+ return devm_regmap_init(dev, bus, dev, config);141141+}142142+EXPORT_SYMBOL_GPL(devm_regmap_init_iobg);107143108144static const struct of_device_id rtciobrg_ids[] = {109145 { .compatible = "sirf,prima2-rtciobg" },···174132}175133postcore_initcall(sirfsoc_rtciobrg_init);176134177177-MODULE_AUTHOR("Zhiwu Song <zhiwu.song@csr.com>, "178178- "Barry Song <baohua.song@csr.com>");135135+MODULE_AUTHOR("Zhiwu Song <zhiwu.song@csr.com>");136136+MODULE_AUTHOR("Barry Song <baohua.song@csr.com>");179137MODULE_DESCRIPTION("CSR SiRFprimaII rtc io bridge");180138MODULE_LICENSE("GPL v2");
···600600 break;601601602602 case blezl_op: /* not really i_format */603603- if (NO_R6EMU)603603+ if (!insn.i_format.rt && NO_R6EMU)604604 goto sigill_r6;605605 case blez_op:606606 /*···635635 break;636636637637 case bgtzl_op:638638- if (NO_R6EMU)638638+ if (!insn.i_format.rt && NO_R6EMU)639639 goto sigill_r6;640640 case bgtz_op:641641 /*
+48-48
arch/mips/kernel/cps-vec.S
···6060 nop61616262 /* This is an NMI */6363- la k0, nmi_handler6363+ PTR_LA k0, nmi_handler6464 jr k06565 nop6666···107107 mul t1, t1, t0108108 mul t1, t1, t2109109110110- li a0, KSEG0111111- add a1, a0, t1110110+ li a0, CKSEG0111111+ PTR_ADD a1, a0, t11121121: cache Index_Store_Tag_I, 0(a0)113113- add a0, a0, t0113113+ PTR_ADD a0, a0, t0114114 bne a0, a1, 1b115115 nop116116icache_done:···134134 mul t1, t1, t0135135 mul t1, t1, t2136136137137- li a0, KSEG0138138- addu a1, a0, t1139139- subu a1, a1, t0137137+ li a0, CKSEG0138138+ PTR_ADDU a1, a0, t1139139+ PTR_SUBU a1, a1, t01401401: cache Index_Store_Tag_D, 0(a0)141141 bne a0, a1, 1b142142- add a0, a0, t0142142+ PTR_ADD a0, a0, t0143143dcache_done:144144145145 /* Set Kseg0 CCA to that in s0 */···152152153153 /* Enter the coherent domain */154154 li t0, 0xff155155- sw t0, GCR_CL_COHERENCE_OFS(v1)155155+ PTR_S t0, GCR_CL_COHERENCE_OFS(v1)156156 ehb157157158158 /* Jump to kseg0 */159159- la t0, 1f159159+ PTR_LA t0, 1f160160 jr t0161161 nop162162···178178 nop179179180180 /* Off we go! */181181- lw t1, VPEBOOTCFG_PC(v0)182182- lw gp, VPEBOOTCFG_GP(v0)183183- lw sp, VPEBOOTCFG_SP(v0)181181+ PTR_L t1, VPEBOOTCFG_PC(v0)182182+ PTR_L gp, VPEBOOTCFG_GP(v0)183183+ PTR_L sp, VPEBOOTCFG_SP(v0)184184 jr t1185185 nop186186 END(mips_cps_core_entry)···217217218218.org 0x480219219LEAF(excep_ejtag)220220- la k0, ejtag_debug_handler220220+ PTR_LA k0, ejtag_debug_handler221221 jr k0222222 nop223223 END(excep_ejtag)···229229 nop230230231231 .set push232232- .set mips32r2232232+ .set mips64r2233233 .set mt234234235235 /* Only allow 1 TC per VPE to execute... */···237237238238 /* ...and for the moment only 1 VPE */239239 dvpe240240- la t1, 1f240240+ PTR_LA t1, 1f241241 jr.hb t1242242 nop243243···250250 mfc0 t0, CP0_MVPCONF0251251 srl t0, t0, MVPCONF0_PVPE_SHIFT252252 andi t0, t0, (MVPCONF0_PVPE >> MVPCONF0_PVPE_SHIFT)253253- addiu t7, t0, 1253253+ addiu ta3, t0, 1254254255255 /* If there's only 1, we're done */256256 beqz t0, 2f257257 nop258258259259 /* Loop through each VPE within this core */260260- li t5, 1260260+ li ta1, 12612612622621: /* Operate on the appropriate TC */263263- mtc0 t5, CP0_VPECONTROL263263+ mtc0 ta1, CP0_VPECONTROL264264 ehb265265266266 /* Bind TC to VPE (1:1 TC:VPE mapping) */267267- mttc0 t5, CP0_TCBIND267267+ mttc0 ta1, CP0_TCBIND268268269269 /* Set exclusive TC, non-active, master */270270 li t0, VPECONF0_MVP271271- sll t1, t5, VPECONF0_XTC_SHIFT271271+ sll t1, ta1, VPECONF0_XTC_SHIFT272272 or t0, t0, t1273273 mttc0 t0, CP0_VPECONF0274274···280280 mttc0 t0, CP0_TCHALT281281282282 /* Next VPE */283283- addiu t5, t5, 1284284- slt t0, t5, t7283283+ addiu ta1, ta1, 1284284+ slt t0, ta1, ta3285285 bnez t0, 1b286286 nop287287···298298299299LEAF(mips_cps_boot_vpes)300300 /* Retrieve CM base address */301301- la t0, mips_cm_base302302- lw t0, 0(t0)301301+ PTR_LA t0, mips_cm_base302302+ PTR_L t0, 0(t0)303303304304 /* Calculate a pointer to this cores struct core_boot_config */305305- lw t0, GCR_CL_ID_OFS(t0)305305+ PTR_L t0, GCR_CL_ID_OFS(t0)306306 li t1, COREBOOTCFG_SIZE307307 mul t0, t0, t1308308- la t1, mips_cps_core_bootcfg309309- lw t1, 0(t1)310310- addu t0, t0, t1308308+ PTR_LA t1, mips_cps_core_bootcfg309309+ PTR_L t1, 0(t1)310310+ PTR_ADDU t0, t0, t1311311312312 /* Calculate this VPEs ID. If the core doesn't support MT use 0 */313313- has_mt t6, 1f313313+ has_mt ta2, 1f314314 li t9, 0315315316316 /* Find the number of VPEs present in the core */···3343341: /* Calculate a pointer to this VPEs struct vpe_boot_config */335335 li t1, VPEBOOTCFG_SIZE336336 mul v0, t9, t1337337- lw t7, COREBOOTCFG_VPECONFIG(t0)338338- addu v0, v0, t7337337+ PTR_L ta3, COREBOOTCFG_VPECONFIG(t0)338338+ PTR_ADDU v0, v0, ta3339339340340#ifdef CONFIG_MIPS_MT341341342342 /* If the core doesn't support MT then return */343343- bnez t6, 1f343343+ bnez ta2, 1f344344 nop345345 jr ra346346 nop347347348348 .set push349349- .set mips32r2349349+ .set mips64r2350350 .set mt3513513523521: /* Enter VPE configuration state */353353 dvpe354354- la t1, 1f354354+ PTR_LA t1, 1f355355 jr.hb t1356356 nop3573571: mfc0 t1, CP0_MVPCONTROL···360360 ehb361361362362 /* Loop through each VPE */363363- lw t6, COREBOOTCFG_VPEMASK(t0)364364- move t8, t6365365- li t5, 0363363+ PTR_L ta2, COREBOOTCFG_VPEMASK(t0)364364+ move t8, ta2365365+ li ta1, 0366366367367 /* Check whether the VPE should be running. If not, skip it */368368-1: andi t0, t6, 1368368+1: andi t0, ta2, 1369369 beqz t0, 2f370370 nop371371···373373 mfc0 t0, CP0_VPECONTROL374374 ori t0, t0, VPECONTROL_TARGTC375375 xori t0, t0, VPECONTROL_TARGTC376376- or t0, t0, t5376376+ or t0, t0, ta1377377 mtc0 t0, CP0_VPECONTROL378378 ehb379379···384384385385 /* Calculate a pointer to the VPEs struct vpe_boot_config */386386 li t0, VPEBOOTCFG_SIZE387387- mul t0, t0, t5388388- addu t0, t0, t7387387+ mul t0, t0, ta1388388+ addu t0, t0, ta3389389390390 /* Set the TC restart PC */391391 lw t1, VPEBOOTCFG_PC(t0)···423423 mttc0 t0, CP0_VPECONF0424424425425 /* Next VPE */426426-2: srl t6, t6, 1427427- addiu t5, t5, 1428428- bnez t6, 1b426426+2: srl ta2, ta2, 1427427+ addiu ta1, ta1, 1428428+ bnez ta2, 1b429429 nop430430431431 /* Leave VPE configuration state */···445445 /* This VPE should be offline, halt the TC */446446 li t0, TCHALT_H447447 mtc0 t0, CP0_TCHALT448448- la t0, 1f448448+ PTR_LA t0, 1f4494491: jr.hb t0450450 nop451451···466466 .set noat467467 lw $1, TI_CPU(gp)468468 sll $1, $1, LONGLOG469469- la \dest, __per_cpu_offset469469+ PTR_LA \dest, __per_cpu_offset470470 addu $1, $1, \dest471471 lw $1, 0($1)472472- la \dest, cps_cpu_state472472+ PTR_LA \dest, cps_cpu_state473473 addu \dest, \dest, $1474474 .set pop475475 .endm
+27-10
arch/mips/kernel/scall32-o32.S
···7373 .set noreorder7474 .set nomacro75757676-1: user_lw(t5, 16(t0)) # argument #5 from usp7777-4: user_lw(t6, 20(t0)) # argument #6 from usp7878-3: user_lw(t7, 24(t0)) # argument #7 from usp7979-2: user_lw(t8, 28(t0)) # argument #8 from usp7676+load_a4: user_lw(t5, 16(t0)) # argument #5 from usp7777+load_a5: user_lw(t6, 20(t0)) # argument #6 from usp7878+load_a6: user_lw(t7, 24(t0)) # argument #7 from usp7979+load_a7: user_lw(t8, 28(t0)) # argument #8 from usp8080+loads_done:80818182 sw t5, 16(sp) # argument #5 to ksp8283 sw t6, 20(sp) # argument #6 to ksp···8685 .set pop87868887 .section __ex_table,"a"8989- PTR 1b,bad_stack9090- PTR 2b,bad_stack9191- PTR 3b,bad_stack9292- PTR 4b,bad_stack8888+ PTR load_a4, bad_stack_a48989+ PTR load_a5, bad_stack_a59090+ PTR load_a6, bad_stack_a69191+ PTR load_a7, bad_stack_a79392 .previous94939594 lw t0, TI_FLAGS($28) # syscall tracing enabled?···154153/* ------------------------------------------------------------------------ */155154156155 /*157157- * The stackpointer for a call with more than 4 arguments is bad.158158- * We probably should handle this case a bit more drastic.156156+ * Our open-coded access area sanity test for the stack pointer157157+ * failed. We probably should handle this case a bit more drastic.159158 */160159bad_stack:161160 li v0, EFAULT···163162 li t0, 1 # set error flag164163 sw t0, PT_R7(sp)165164 j o32_syscall_exit165165+166166+bad_stack_a4:167167+ li t5, 0168168+ b load_a5169169+170170+bad_stack_a5:171171+ li t6, 0172172+ b load_a6173173+174174+bad_stack_a6:175175+ li t7, 0176176+ b load_a7177177+178178+bad_stack_a7:179179+ li t8, 0180180+ b loads_done166181167182 /*168183 * The system call does not exist in this kernel
+26-9
arch/mips/kernel/scall64-o32.S
···6969 daddu t1, t0, 327070 bltz t1, bad_stack71717272-1: lw a4, 16(t0) # argument #5 from usp7373-2: lw a5, 20(t0) # argument #6 from usp7474-3: lw a6, 24(t0) # argument #7 from usp7575-4: lw a7, 28(t0) # argument #8 from usp (for indirect syscalls)7272+load_a4: lw a4, 16(t0) # argument #5 from usp7373+load_a5: lw a5, 20(t0) # argument #6 from usp7474+load_a6: lw a6, 24(t0) # argument #7 from usp7575+load_a7: lw a7, 28(t0) # argument #8 from usp7676+loads_done:76777778 .section __ex_table,"a"7878- PTR 1b, bad_stack7979- PTR 2b, bad_stack8080- PTR 3b, bad_stack8181- PTR 4b, bad_stack7979+ PTR load_a4, bad_stack_a48080+ PTR load_a5, bad_stack_a58181+ PTR load_a6, bad_stack_a68282+ PTR load_a7, bad_stack_a78283 .previous83848485 li t1, _TIF_WORK_SYSCALL_ENTRY···167166 li t0, 1 # set error flag168167 sd t0, PT_R7(sp)169168 j o32_syscall_exit169169+170170+bad_stack_a4:171171+ li a4, 0172172+ b load_a5173173+174174+bad_stack_a5:175175+ li a5, 0176176+ b load_a6177177+178178+bad_stack_a6:179179+ li a6, 0180180+ b load_a7181181+182182+bad_stack_a7:183183+ li a7, 0184184+ b loads_done170185171186not_o32_scall:172187 /*···400383 PTR sys_connect /* 4170 */401384 PTR sys_getpeername402385 PTR sys_getsockname403403- PTR sys_getsockopt386386+ PTR compat_sys_getsockopt404387 PTR sys_listen405388 PTR compat_sys_recv /* 4175 */406389 PTR compat_sys_recvfrom
+5-8
arch/mips/kernel/setup.c
···337337 min_low_pfn = start;338338 if (end <= reserved_end)339339 continue;340340+#ifdef CONFIG_BLK_DEV_INITRD341341+ /* mapstart should be after initrd_end */342342+ if (initrd_end && end <= (unsigned long)PFN_UP(__pa(initrd_end)))343343+ continue;344344+#endif340345 if (start >= mapstart)341346 continue;342347 mapstart = max(reserved_end, start);···370365#endif371366 max_low_pfn = PFN_DOWN(HIGHMEM_START);372367 }373373-374374-#ifdef CONFIG_BLK_DEV_INITRD375375- /*376376- * mapstart should be after initrd_end377377- */378378- if (initrd_end)379379- mapstart = max(mapstart, (unsigned long)PFN_UP(__pa(initrd_end)));380380-#endif381368382369 /*383370 * Initialize the boot-time allocator with low memory only.
+3-3
arch/mips/kernel/smp-cps.c
···133133 /*134134 * Patch the start of mips_cps_core_entry to provide:135135 *136136- * v0 = CM base address136136+ * v1 = CM base address137137 * s0 = kseg0 CCA138138 */139139 entry_code = (u32 *)&mips_cps_core_entry;···369369370370static void wait_for_sibling_halt(void *ptr_cpu)371371{372372- unsigned cpu = (unsigned)ptr_cpu;372372+ unsigned cpu = (unsigned long)ptr_cpu;373373 unsigned vpe_id = cpu_vpe_id(&cpu_data[cpu]);374374 unsigned halted;375375 unsigned long flags;···430430 */431431 err = smp_call_function_single(cpu_death_sibling,432432 wait_for_sibling_halt,433433- (void *)cpu, 1);433433+ (void *)(unsigned long)cpu, 1);434434 if (err)435435 panic("Failed to call remote sibling CPU\n");436436 }
+43-1
arch/mips/kernel/smp.c
···6363cpumask_t cpu_core_map[NR_CPUS] __read_mostly;6464EXPORT_SYMBOL(cpu_core_map);65656666+/*6767+ * A logcal cpu mask containing only one VPE per core to6868+ * reduce the number of IPIs on large MT systems.6969+ */7070+cpumask_t cpu_foreign_map __read_mostly;7171+EXPORT_SYMBOL(cpu_foreign_map);7272+6673/* representing cpus for which sibling maps can be computed */6774static cpumask_t cpu_sibling_setup_map;6875···108101 cpumask_set_cpu(cpu, &cpu_core_map[i]);109102 }110103 }104104+}105105+106106+/*107107+ * Calculate a new cpu_foreign_map mask whenever a108108+ * new cpu appears or disappears.109109+ */110110+static inline void calculate_cpu_foreign_map(void)111111+{112112+ int i, k, core_present;113113+ cpumask_t temp_foreign_map;114114+115115+ /* Re-calculate the mask */116116+ for_each_online_cpu(i) {117117+ core_present = 0;118118+ for_each_cpu(k, &temp_foreign_map)119119+ if (cpu_data[i].package == cpu_data[k].package &&120120+ cpu_data[i].core == cpu_data[k].core)121121+ core_present = 1;122122+ if (!core_present)123123+ cpumask_set_cpu(i, &temp_foreign_map);124124+ }125125+126126+ cpumask_copy(&cpu_foreign_map, &temp_foreign_map);111127}112128113129struct plat_smp_ops *mp_ops;···176146 set_cpu_sibling_map(cpu);177147 set_cpu_core_map(cpu);178148149149+ calculate_cpu_foreign_map();150150+179151 cpumask_set_cpu(cpu, &cpu_callin_map);180152181153 synchronise_count_slave(cpu);···205173static void stop_this_cpu(void *dummy)206174{207175 /*208208- * Remove this CPU:176176+ * Remove this CPU. Be a bit slow here and177177+ * set the bits for every online CPU so we don't miss178178+ * any IPI whilst taking this VPE down.209179 */180180+181181+ cpumask_copy(&cpu_foreign_map, cpu_online_mask);182182+183183+ /* Make it visible to every other CPU */184184+ smp_mb();185185+210186 set_cpu_online(smp_processor_id(), false);187187+ calculate_cpu_foreign_map();211188 local_irq_disable();212189 while (1);213190}···238197 mp_ops->prepare_cpus(max_cpus);239198 set_cpu_sibling_map(0);240199 set_cpu_core_map(0);200200+ calculate_cpu_foreign_map();241201#ifndef CONFIG_HOTPLUG_CPU242202 init_cpu_present(cpu_possible_mask);243203#endif
+4-4
arch/mips/kernel/traps.c
···21302130 BUG_ON(current->mm);21312131 enter_lazy_tlb(&init_mm, current);2132213221332133- /* Boot CPU's cache setup in setup_arch(). */21342134- if (!is_boot_cpu)21352135- cpu_cache_init();21362136- tlb_init();21332133+ /* Boot CPU's cache setup in setup_arch(). */21342134+ if (!is_boot_cpu)21352135+ cpu_cache_init();21362136+ tlb_init();21372137 TLBMISS_HANDLER_SETUP();21382138}21392139
+1-1
arch/mips/loongson64/common/bonito-irq.c
···33 * Author: Jun Sun, jsun@mvista.com or jsun@junsun.net44 * Copyright (C) 2000, 2001 Ralf Baechle (ralf@gnu.org)55 *66- * Copyright (C) 2007 Lemote Inc. & Insititute of Computing Technology66+ * Copyright (C) 2007 Lemote Inc. & Institute of Computing Technology77 * Author: Fuxin Zhang, zhangfx@lemote.com88 *99 * This program is free software; you can redistribute it and/or modify it
+1-1
arch/mips/loongson64/common/cmdline.c
···66 * Copyright 2003 ICT CAS77 * Author: Michael Guo <guoyi@ict.ac.cn>88 *99- * Copyright (C) 2007 Lemote Inc. & Insititute of Computing Technology99+ * Copyright (C) 2007 Lemote Inc. & Institute of Computing Technology1010 * Author: Fuxin Zhang, zhangfx@lemote.com1111 *1212 * Copyright (C) 2009 Lemote Inc.
+1-1
arch/mips/loongson64/common/cs5536/cs5536_mfgpt.c
···11/*22 * CS5536 General timer functions33 *44- * Copyright (C) 2007 Lemote Inc. & Insititute of Computing Technology44+ * Copyright (C) 2007 Lemote Inc. & Institute of Computing Technology55 * Author: Yanhua, yanh@lemote.com66 *77 * Copyright (C) 2009 Lemote Inc.
+1-1
arch/mips/loongson64/common/env.c
···66 * Copyright 2003 ICT CAS77 * Author: Michael Guo <guoyi@ict.ac.cn>88 *99- * Copyright (C) 2007 Lemote Inc. & Insititute of Computing Technology99+ * Copyright (C) 2007 Lemote Inc. & Institute of Computing Technology1010 * Author: Fuxin Zhang, zhangfx@lemote.com1111 *1212 * Copyright (C) 2009 Lemote Inc.
+1-1
arch/mips/loongson64/common/irq.c
···11/*22- * Copyright (C) 2007 Lemote Inc. & Insititute of Computing Technology22+ * Copyright (C) 2007 Lemote Inc. & Institute of Computing Technology33 * Author: Fuxin Zhang, zhangfx@lemote.com44 *55 * This program is free software; you can redistribute it and/or modify it
+1-1
arch/mips/loongson64/common/setup.c
···11/*22- * Copyright (C) 2007 Lemote Inc. & Insititute of Computing Technology22+ * Copyright (C) 2007 Lemote Inc. & Institute of Computing Technology33 * Author: Fuxin Zhang, zhangfx@lemote.com44 *55 * This program is free software; you can redistribute it and/or modify it
+1-1
arch/mips/loongson64/fuloong-2e/irq.c
···11/*22- * Copyright (C) 2007 Lemote Inc. & Insititute of Computing Technology22+ * Copyright (C) 2007 Lemote Inc. & Institute of Computing Technology33 * Author: Fuxin Zhang, zhangfx@lemote.com44 *55 * This program is free software; you can redistribute it and/or modify it
+2-2
arch/mips/loongson64/lemote-2f/clock.c
···11/*22- * Copyright (C) 2006 - 2008 Lemote Inc. & Insititute of Computing Technology22+ * Copyright (C) 2006 - 2008 Lemote Inc. & Institute of Computing Technology33 * Author: Yanhua, yanh@lemote.com44 *55 * This file is subject to the terms and conditions of the GNU General Public···1515#include <linux/spinlock.h>16161717#include <asm/clock.h>1818-#include <asm/mach-loongson/loongson.h>1818+#include <asm/mach-loongson64/loongson.h>19192020static LIST_HEAD(clock_list);2121static DEFINE_SPINLOCK(clock_lock);
+1-1
arch/mips/loongson64/loongson-3/numa.c
···11/*22 * Copyright (C) 2010 Loongson Inc. & Lemote Inc. &33- * Insititute of Computing Technology33+ * Institute of Computing Technology44 * Author: Xiang Gao, gaoxiang@ict.ac.cn55 * Huacai Chen, chenhc@lemote.com66 * Xiaofu Meng, Shuangshuang Zhang
+3-3
arch/mips/math-emu/cp1emu.c
···451451 /* Fall through */452452 case jr_op:453453 /* For R6, JR already emulated in jalr_op */454454- if (NO_R6EMU && insn.r_format.opcode == jr_op)454454+ if (NO_R6EMU && insn.r_format.func == jr_op)455455 break;456456 *contpc = regs->regs[insn.r_format.rs];457457 return 1;···551551 dec_insn.next_pc_inc;552552 return 1;553553 case blezl_op:554554- if (NO_R6EMU)554554+ if (!insn.i_format.rt && NO_R6EMU)555555 break;556556 case blez_op:557557···588588 dec_insn.next_pc_inc;589589 return 1;590590 case bgtzl_op:591591- if (NO_R6EMU)591591+ if (!insn.i_format.rt && NO_R6EMU)592592 break;593593 case bgtz_op:594594 /*
+14-4
arch/mips/mm/c-r4k.c
···3737#include <asm/cacheflush.h> /* for run_uncached() */3838#include <asm/traps.h>3939#include <asm/dma-coherence.h>4040+#include <asm/mips-cm.h>40414142/*4243 * Special Variant of smp_call_function for use by cache functions:···5251{5352 preempt_disable();54535555-#ifndef CONFIG_MIPS_MT_SMP5656- smp_call_function(func, info, 1);5757-#endif5454+ /*5555+ * The Coherent Manager propagates address-based cache ops to other5656+ * cores but not index-based ops. However, r4k_on_each_cpu is used5757+ * in both cases so there is no easy way to tell what kind of op is5858+ * executed to the other cores. The best we can probably do is5959+ * to restrict that call when a CM is not present because both6060+ * CM-based SMP protocols (CMP & CPS) restrict index-based cache ops.6161+ */6262+ if (!mips_cm_present())6363+ smp_call_function_many(&cpu_foreign_map, func, info, 1);5864 func(info);5965 preempt_enable();6066}···945937}946938947939static char *way_string[] = { NULL, "direct mapped", "2-way",948948- "3-way", "4-way", "5-way", "6-way", "7-way", "8-way"940940+ "3-way", "4-way", "5-way", "6-way", "7-way", "8-way",941941+ "9-way", "10-way", "11-way", "12-way",942942+ "13-way", "14-way", "15-way", "16-way",949943};950944951945static void probe_pcache(void)
+13-7
arch/mips/mti-malta/malta-time.c
···119119120120int get_c0_fdc_int(void)121121{122122- int mips_cpu_fdc_irq;122122+ /*123123+ * Some cores claim the FDC is routable through the GIC, but it doesn't124124+ * actually seem to be connected for those Malta bitstreams.125125+ */126126+ switch (current_cpu_type()) {127127+ case CPU_INTERAPTIV:128128+ case CPU_PROAPTIV:129129+ return -1;130130+ };123131124132 if (cpu_has_veic)125125- mips_cpu_fdc_irq = -1;133133+ return -1;126134 else if (gic_present)127127- mips_cpu_fdc_irq = gic_get_c0_fdc_int();135135+ return gic_get_c0_fdc_int();128136 else if (cp0_fdc_irq >= 0)129129- mips_cpu_fdc_irq = MIPS_CPU_IRQ_BASE + cp0_fdc_irq;137137+ return MIPS_CPU_IRQ_BASE + cp0_fdc_irq;130138 else131131- mips_cpu_fdc_irq = -1;132132-133133- return mips_cpu_fdc_irq;139139+ return -1;134140}135141136142int get_c0_perfcount_int(void)
···1616#include <asm/processor.h>1717#include <asm/cache.h>18181919-extern spinlock_t pa_dbit_lock;1919+extern spinlock_t pa_tlb_lock;20202121/*2222 * kern_addr_valid(ADDR) tests if ADDR is pointing to valid kernel···3333 */3434#define kern_addr_valid(addr) (1)35353636+/* Purge data and instruction TLB entries. Must be called holding3737+ * the pa_tlb_lock. The TLB purge instructions are slow on SMP3838+ * machines since the purge must be broadcast to all CPUs.3939+ */4040+4141+static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)4242+{4343+ mtsp(mm->context, 1);4444+ pdtlb(addr);4545+ if (unlikely(split_tlb))4646+ pitlb(addr);4747+}4848+3649/* Certain architectures need to do special things when PTEs3750 * within a page table are directly modified. Thus, the following3851 * hook is made available.···5542 *(pteptr) = (pteval); \5643 } while(0)57445858-extern void purge_tlb_entries(struct mm_struct *, unsigned long);4545+#define pte_inserted(x) \4646+ ((pte_val(x) & (_PAGE_PRESENT|_PAGE_ACCESSED)) \4747+ == (_PAGE_PRESENT|_PAGE_ACCESSED))59486060-#define set_pte_at(mm, addr, ptep, pteval) \6161- do { \4949+#define set_pte_at(mm, addr, ptep, pteval) \5050+ do { \5151+ pte_t old_pte; \6252 unsigned long flags; \6363- spin_lock_irqsave(&pa_dbit_lock, flags); \6464- set_pte(ptep, pteval); \6565- purge_tlb_entries(mm, addr); \6666- spin_unlock_irqrestore(&pa_dbit_lock, flags); \5353+ spin_lock_irqsave(&pa_tlb_lock, flags); \5454+ old_pte = *ptep; \5555+ set_pte(ptep, pteval); \5656+ if (pte_inserted(old_pte)) \5757+ purge_tlb_entries(mm, addr); \5858+ spin_unlock_irqrestore(&pa_tlb_lock, flags); \6759 } while (0)68606961#endif /* !__ASSEMBLY__ */···286268287269#define pte_none(x) (pte_val(x) == 0)288270#define pte_present(x) (pte_val(x) & _PAGE_PRESENT)289289-#define pte_clear(mm,addr,xp) do { pte_val(*(xp)) = 0; } while (0)271271+#define pte_clear(mm, addr, xp) set_pte_at(mm, addr, xp, __pte(0))290272291273#define pmd_flag(x) (pmd_val(x) & PxD_FLAG_MASK)292274#define pmd_address(x) ((unsigned long)(pmd_val(x) &~ PxD_FLAG_MASK) << PxD_VALUE_SHIFT)···453435 if (!pte_young(*ptep))454436 return 0;455437456456- spin_lock_irqsave(&pa_dbit_lock, flags);438438+ spin_lock_irqsave(&pa_tlb_lock, flags);457439 pte = *ptep;458440 if (!pte_young(pte)) {459459- spin_unlock_irqrestore(&pa_dbit_lock, flags);441441+ spin_unlock_irqrestore(&pa_tlb_lock, flags);460442 return 0;461443 }462444 set_pte(ptep, pte_mkold(pte));463445 purge_tlb_entries(vma->vm_mm, addr);464464- spin_unlock_irqrestore(&pa_dbit_lock, flags);446446+ spin_unlock_irqrestore(&pa_tlb_lock, flags);465447 return 1;466448}467449···471453 pte_t old_pte;472454 unsigned long flags;473455474474- spin_lock_irqsave(&pa_dbit_lock, flags);456456+ spin_lock_irqsave(&pa_tlb_lock, flags);475457 old_pte = *ptep;476476- pte_clear(mm,addr,ptep);477477- purge_tlb_entries(mm, addr);478478- spin_unlock_irqrestore(&pa_dbit_lock, flags);458458+ set_pte(ptep, __pte(0));459459+ if (pte_inserted(old_pte))460460+ purge_tlb_entries(mm, addr);461461+ spin_unlock_irqrestore(&pa_tlb_lock, flags);479462480463 return old_pte;481464}···484465static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)485466{486467 unsigned long flags;487487- spin_lock_irqsave(&pa_dbit_lock, flags);468468+ spin_lock_irqsave(&pa_tlb_lock, flags);488469 set_pte(ptep, pte_wrprotect(*ptep));489470 purge_tlb_entries(mm, addr);490490- spin_unlock_irqrestore(&pa_dbit_lock, flags);471471+ spin_unlock_irqrestore(&pa_tlb_lock, flags);491472}492473493474#define pte_same(A,B) (pte_val(A) == pte_val(B))
+29-24
arch/parisc/include/asm/tlbflush.h
···1313 * active at any one time on the Merced bus. This tlb purge1414 * synchronisation is fairly lightweight and harmless so we activate1515 * it on all systems not just the N class.1616+1717+ * It is also used to ensure PTE updates are atomic and consistent1818+ * with the TLB.1619 */1720extern spinlock_t pa_tlb_lock;1821···27242825#define smp_flush_tlb_all() flush_tlb_all()29262727+int __flush_tlb_range(unsigned long sid,2828+ unsigned long start, unsigned long end);2929+3030+#define flush_tlb_range(vma, start, end) \3131+ __flush_tlb_range((vma)->vm_mm->context, start, end)3232+3333+#define flush_tlb_kernel_range(start, end) \3434+ __flush_tlb_range(0, start, end)3535+3036/*3137 * flush_tlb_mm()3238 *3333- * XXX This code is NOT valid for HP-UX compatibility processes,3434- * (although it will probably work 99% of the time). HP-UX3535- * processes are free to play with the space id's and save them3636- * over long periods of time, etc. so we have to preserve the3737- * space and just flush the entire tlb. We need to check the3838- * personality in order to do that, but the personality is not3939- * currently being set correctly.4040- *4141- * Of course, Linux processes could do the same thing, but4242- * we don't support that (and the compilers, dynamic linker,4343- * etc. do not do that).3939+ * The code to switch to a new context is NOT valid for processes4040+ * which play with the space id's. Thus, we have to preserve the4141+ * space and just flush the entire tlb. However, the compilers,4242+ * dynamic linker, etc, do not manipulate space id's, so there4343+ * could be a significant performance benefit in switching contexts4444+ * and not flushing the whole tlb.4445 */45464647static inline void flush_tlb_mm(struct mm_struct *mm)···5245 BUG_ON(mm == &init_mm); /* Should never happen */53465447#if 1 || defined(CONFIG_SMP)4848+ /* Except for very small threads, flushing the whole TLB is4949+ * faster than using __flush_tlb_range. The pdtlb and pitlb5050+ * instructions are very slow because of the TLB broadcast.5151+ * It might be faster to do local range flushes on all CPUs5252+ * on PA 2.0 systems.5353+ */5554 flush_tlb_all();5655#else5756 /* FIXME: currently broken, causing space id and protection ids5858- * to go out of sync, resulting in faults on userspace accesses.5757+ * to go out of sync, resulting in faults on userspace accesses.5858+ * This approach needs further investigation since running many5959+ * small applications (e.g., GCC testsuite) is faster on HP-UX.5960 */6061 if (mm) {6162 if (mm->context != 0)···8065{8166 unsigned long flags, sid;82678383- /* For one page, it's not worth testing the split_tlb variable */8484-8585- mb();8668 sid = vma->vm_mm->context;8769 purge_tlb_start(flags);8870 mtsp(sid, 1);8971 pdtlb(addr);9090- pitlb(addr);7272+ if (unlikely(split_tlb))7373+ pitlb(addr);9174 purge_tlb_end(flags);9275}9393-9494-void __flush_tlb_range(unsigned long sid,9595- unsigned long start, unsigned long end);9696-9797-#define flush_tlb_range(vma,start,end) __flush_tlb_range((vma)->vm_mm->context,start,end)9898-9999-#define flush_tlb_kernel_range(start, end) __flush_tlb_range(0,start,end)100100-10176#endif
+67-38
arch/parisc/kernel/cache.c
···342342EXPORT_SYMBOL(flush_kernel_icache_range_asm);343343344344#define FLUSH_THRESHOLD 0x80000 /* 0.5MB */345345-int parisc_cache_flush_threshold __read_mostly = FLUSH_THRESHOLD;345345+static unsigned long parisc_cache_flush_threshold __read_mostly = FLUSH_THRESHOLD;346346+347347+#define FLUSH_TLB_THRESHOLD (2*1024*1024) /* 2MB initial TLB threshold */348348+static unsigned long parisc_tlb_flush_threshold __read_mostly = FLUSH_TLB_THRESHOLD;346349347350void __init parisc_setup_cache_timing(void)348351{349352 unsigned long rangetime, alltime;350350- unsigned long size;353353+ unsigned long size, start;351354352355 alltime = mfctl(16);353356 flush_data_cache();···367364 /* Racy, but if we see an intermediate value, it's ok too... */368365 parisc_cache_flush_threshold = size * alltime / rangetime;369366370370- parisc_cache_flush_threshold = (parisc_cache_flush_threshold + L1_CACHE_BYTES - 1) &~ (L1_CACHE_BYTES - 1); 367367+ parisc_cache_flush_threshold = L1_CACHE_ALIGN(parisc_cache_flush_threshold);371368 if (!parisc_cache_flush_threshold)372369 parisc_cache_flush_threshold = FLUSH_THRESHOLD;373370374371 if (parisc_cache_flush_threshold > cache_info.dc_size)375372 parisc_cache_flush_threshold = cache_info.dc_size;376373377377- printk(KERN_INFO "Setting cache flush threshold to %x (%d CPUs online)\n", parisc_cache_flush_threshold, num_online_cpus());374374+ printk(KERN_INFO "Setting cache flush threshold to %lu kB\n",375375+ parisc_cache_flush_threshold/1024);376376+377377+ /* calculate TLB flush threshold */378378+379379+ alltime = mfctl(16);380380+ flush_tlb_all();381381+ alltime = mfctl(16) - alltime;382382+383383+ size = PAGE_SIZE;384384+ start = (unsigned long) _text;385385+ rangetime = mfctl(16);386386+ while (start < (unsigned long) _end) {387387+ flush_tlb_kernel_range(start, start + PAGE_SIZE);388388+ start += PAGE_SIZE;389389+ size += PAGE_SIZE;390390+ }391391+ rangetime = mfctl(16) - rangetime;392392+393393+ printk(KERN_DEBUG "Whole TLB flush %lu cycles, flushing %lu bytes %lu cycles\n",394394+ alltime, size, rangetime);395395+396396+ parisc_tlb_flush_threshold = size * alltime / rangetime;397397+ parisc_tlb_flush_threshold *= num_online_cpus();398398+ parisc_tlb_flush_threshold = PAGE_ALIGN(parisc_tlb_flush_threshold);399399+ if (!parisc_tlb_flush_threshold)400400+ parisc_tlb_flush_threshold = FLUSH_TLB_THRESHOLD;401401+402402+ printk(KERN_INFO "Setting TLB flush threshold to %lu kB\n",403403+ parisc_tlb_flush_threshold/1024);378404}379405380406extern void purge_kernel_dcache_page_asm(unsigned long);···435403}436404EXPORT_SYMBOL(copy_user_page);437405438438-void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)406406+/* __flush_tlb_range()407407+ *408408+ * returns 1 if all TLBs were flushed.409409+ */410410+int __flush_tlb_range(unsigned long sid, unsigned long start,411411+ unsigned long end)439412{440440- unsigned long flags;413413+ unsigned long flags, size;441414442442- /* Note: purge_tlb_entries can be called at startup with443443- no context. */444444-445445- purge_tlb_start(flags);446446- mtsp(mm->context, 1);447447- pdtlb(addr);448448- pitlb(addr);449449- purge_tlb_end(flags);450450-}451451-EXPORT_SYMBOL(purge_tlb_entries);452452-453453-void __flush_tlb_range(unsigned long sid, unsigned long start,454454- unsigned long end)455455-{456456- unsigned long npages;457457-458458- npages = ((end - (start & PAGE_MASK)) + (PAGE_SIZE - 1)) >> PAGE_SHIFT;459459- if (npages >= 512) /* 2MB of space: arbitrary, should be tuned */415415+ size = (end - start);416416+ if (size >= parisc_tlb_flush_threshold) {460417 flush_tlb_all();461461- else {462462- unsigned long flags;418418+ return 1;419419+ }463420421421+ /* Purge TLB entries for small ranges using the pdtlb and422422+ pitlb instructions. These instructions execute locally423423+ but cause a purge request to be broadcast to other TLBs. */424424+ if (likely(!split_tlb)) {425425+ while (start < end) {426426+ purge_tlb_start(flags);427427+ mtsp(sid, 1);428428+ pdtlb(start);429429+ purge_tlb_end(flags);430430+ start += PAGE_SIZE;431431+ }432432+ return 0;433433+ }434434+435435+ /* split TLB case */436436+ while (start < end) {464437 purge_tlb_start(flags);465438 mtsp(sid, 1);466466- if (split_tlb) {467467- while (npages--) {468468- pdtlb(start);469469- pitlb(start);470470- start += PAGE_SIZE;471471- }472472- } else {473473- while (npages--) {474474- pdtlb(start);475475- start += PAGE_SIZE;476476- }477477- }439439+ pdtlb(start);440440+ pitlb(start);478441 purge_tlb_end(flags);442442+ start += PAGE_SIZE;479443 }444444+ return 0;480445}481446482447static void cacheflush_h_tmp_function(void *dummy)
+79-84
arch/parisc/kernel/entry.S
···4545 .level 2.04646#endif47474848- .import pa_dbit_lock,data4848+ .import pa_tlb_lock,data49495050 /* space_to_prot macro creates a prot id from a space id */5151···420420 SHLREG %r9,PxD_VALUE_SHIFT,\pmd421421 extru \va,31-PAGE_SHIFT,ASM_BITS_PER_PTE,\index422422 dep %r0,31,PAGE_SHIFT,\pmd /* clear offset */423423- shladd \index,BITS_PER_PTE_ENTRY,\pmd,\pmd424424- LDREG %r0(\pmd),\pte /* pmd is now pte */423423+ shladd \index,BITS_PER_PTE_ENTRY,\pmd,\pmd /* pmd is now pte */424424+ LDREG %r0(\pmd),\pte425425 bb,>=,n \pte,_PAGE_PRESENT_BIT,\fault426426 .endm427427···453453 L2_ptep \pgd,\pte,\index,\va,\fault454454 .endm455455456456- /* Acquire pa_dbit_lock lock. */457457- .macro dbit_lock spc,tmp,tmp1456456+ /* Acquire pa_tlb_lock lock and recheck page is still present. */457457+ .macro tlb_lock spc,ptp,pte,tmp,tmp1,fault458458#ifdef CONFIG_SMP459459 cmpib,COND(=),n 0,\spc,2f460460- load32 PA(pa_dbit_lock),\tmp460460+ load32 PA(pa_tlb_lock),\tmp4614611: LDCW 0(\tmp),\tmp1462462 cmpib,COND(=) 0,\tmp1,1b463463 nop464464+ LDREG 0(\ptp),\pte465465+ bb,<,n \pte,_PAGE_PRESENT_BIT,2f466466+ b \fault467467+ stw \spc,0(\tmp)4644682:465469#endif466470 .endm467471468468- /* Release pa_dbit_lock lock without reloading lock address. */469469- .macro dbit_unlock0 spc,tmp472472+ /* Release pa_tlb_lock lock without reloading lock address. */473473+ .macro tlb_unlock0 spc,tmp470474#ifdef CONFIG_SMP471475 or,COND(=) %r0,\spc,%r0472476 stw \spc,0(\tmp)473477#endif474478 .endm475479476476- /* Release pa_dbit_lock lock. */477477- .macro dbit_unlock1 spc,tmp480480+ /* Release pa_tlb_lock lock. */481481+ .macro tlb_unlock1 spc,tmp478482#ifdef CONFIG_SMP479479- load32 PA(pa_dbit_lock),\tmp480480- dbit_unlock0 \spc,\tmp483483+ load32 PA(pa_tlb_lock),\tmp484484+ tlb_unlock0 \spc,\tmp481485#endif482486 .endm483487484488 /* Set the _PAGE_ACCESSED bit of the PTE. Be clever and485489 * don't needlessly dirty the cache line if it was already set */486486- .macro update_ptep spc,ptep,pte,tmp,tmp1487487-#ifdef CONFIG_SMP488488- or,COND(=) %r0,\spc,%r0489489- LDREG 0(\ptep),\pte490490-#endif490490+ .macro update_accessed ptp,pte,tmp,tmp1491491 ldi _PAGE_ACCESSED,\tmp1492492 or \tmp1,\pte,\tmp493493 and,COND(<>) \tmp1,\pte,%r0494494- STREG \tmp,0(\ptep)494494+ STREG \tmp,0(\ptp)495495 .endm496496497497 /* Set the dirty bit (and accessed bit). No need to be498498 * clever, this is only used from the dirty fault */499499- .macro update_dirty spc,ptep,pte,tmp500500-#ifdef CONFIG_SMP501501- or,COND(=) %r0,\spc,%r0502502- LDREG 0(\ptep),\pte503503-#endif499499+ .macro update_dirty ptp,pte,tmp504500 ldi _PAGE_ACCESSED|_PAGE_DIRTY,\tmp505501 or \tmp,\pte,\pte506506- STREG \pte,0(\ptep)502502+ STREG \pte,0(\ptp)507503 .endm508504509505 /* bitshift difference between a PFN (based on kernel's PAGE_SIZE)···1144114811451149 L3_ptep ptp,pte,t0,va,dtlb_check_alias_20w1146115011471147- dbit_lock spc,t0,t111481148- update_ptep spc,ptp,pte,t0,t111511151+ tlb_lock spc,ptp,pte,t0,t1,dtlb_check_alias_20w11521152+ update_accessed ptp,pte,t0,t11149115311501154 make_insert_tlb spc,pte,prot1151115511521156 idtlbt pte,prot11531153- dbit_unlock1 spc,t01154115711581158+ tlb_unlock1 spc,t011551159 rfir11561160 nop11571161···1170117411711175 L3_ptep ptp,pte,t0,va,nadtlb_check_alias_20w1172117611731173- dbit_lock spc,t0,t111741174- update_ptep spc,ptp,pte,t0,t111771177+ tlb_lock spc,ptp,pte,t0,t1,nadtlb_check_alias_20w11781178+ update_accessed ptp,pte,t0,t11175117911761180 make_insert_tlb spc,pte,prot1177118111781182 idtlbt pte,prot11791179- dbit_unlock1 spc,t01180118311841184+ tlb_unlock1 spc,t011811185 rfir11821186 nop11831187···1198120211991203 L2_ptep ptp,pte,t0,va,dtlb_check_alias_111200120412011201- dbit_lock spc,t0,t112021202- update_ptep spc,ptp,pte,t0,t112051205+ tlb_lock spc,ptp,pte,t0,t1,dtlb_check_alias_1112061206+ update_accessed ptp,pte,t0,t11203120712041208 make_insert_tlb_11 spc,pte,prot1205120912061206- mfsp %sr1,t0 /* Save sr1 so we can use it in tlb inserts */12101210+ mfsp %sr1,t1 /* Save sr1 so we can use it in tlb inserts */12071211 mtsp spc,%sr11208121212091213 idtlba pte,(%sr1,va)12101214 idtlbp prot,(%sr1,va)1211121512121212- mtsp t0, %sr1 /* Restore sr1 */12131213- dbit_unlock1 spc,t012161216+ mtsp t1, %sr1 /* Restore sr1 */1214121712181218+ tlb_unlock1 spc,t012151219 rfir12161220 nop12171221···1231123512321236 L2_ptep ptp,pte,t0,va,nadtlb_check_alias_111233123712341234- dbit_lock spc,t0,t112351235- update_ptep spc,ptp,pte,t0,t112381238+ tlb_lock spc,ptp,pte,t0,t1,nadtlb_check_alias_1112391239+ update_accessed ptp,pte,t0,t11236124012371241 make_insert_tlb_11 spc,pte,prot1238124212391239-12401240- mfsp %sr1,t0 /* Save sr1 so we can use it in tlb inserts */12431243+ mfsp %sr1,t1 /* Save sr1 so we can use it in tlb inserts */12411244 mtsp spc,%sr11242124512431246 idtlba pte,(%sr1,va)12441247 idtlbp prot,(%sr1,va)1245124812461246- mtsp t0, %sr1 /* Restore sr1 */12471247- dbit_unlock1 spc,t012491249+ mtsp t1, %sr1 /* Restore sr1 */1248125012511251+ tlb_unlock1 spc,t012491252 rfir12501253 nop12511254···1264126912651270 L2_ptep ptp,pte,t0,va,dtlb_check_alias_201266127112671267- dbit_lock spc,t0,t112681268- update_ptep spc,ptp,pte,t0,t112721272+ tlb_lock spc,ptp,pte,t0,t1,dtlb_check_alias_2012731273+ update_accessed ptp,pte,t0,t11269127412701275 make_insert_tlb spc,pte,prot1271127612721272- f_extend pte,t012771277+ f_extend pte,t11273127812741279 idtlbt pte,prot12751275- dbit_unlock1 spc,t01276128012811281+ tlb_unlock1 spc,t012771282 rfir12781283 nop12791284···1292129712931298 L2_ptep ptp,pte,t0,va,nadtlb_check_alias_201294129912951295- dbit_lock spc,t0,t112961296- update_ptep spc,ptp,pte,t0,t113001300+ tlb_lock spc,ptp,pte,t0,t1,nadtlb_check_alias_2013011301+ update_accessed ptp,pte,t0,t11297130212981303 make_insert_tlb spc,pte,prot1299130413001300- f_extend pte,t013051305+ f_extend pte,t11301130613021302- idtlbt pte,prot13031303- dbit_unlock1 spc,t013071307+ idtlbt pte,prot1304130813091309+ tlb_unlock1 spc,t013051310 rfir13061311 nop13071312···1401140614021407 L3_ptep ptp,pte,t0,va,itlb_fault1403140814041404- dbit_lock spc,t0,t114051405- update_ptep spc,ptp,pte,t0,t114091409+ tlb_lock spc,ptp,pte,t0,t1,itlb_fault14101410+ update_accessed ptp,pte,t0,t11406141114071412 make_insert_tlb spc,pte,prot1408141314091414 iitlbt pte,prot14101410- dbit_unlock1 spc,t01411141514161416+ tlb_unlock1 spc,t014121417 rfir14131418 nop14141419···1425143014261431 L3_ptep ptp,pte,t0,va,naitlb_check_alias_20w1427143214281428- dbit_lock spc,t0,t114291429- update_ptep spc,ptp,pte,t0,t114331433+ tlb_lock spc,ptp,pte,t0,t1,naitlb_check_alias_20w14341434+ update_accessed ptp,pte,t0,t11430143514311436 make_insert_tlb spc,pte,prot1432143714331438 iitlbt pte,prot14341434- dbit_unlock1 spc,t01435143914401440+ tlb_unlock1 spc,t014361441 rfir14371442 nop14381443···1453145814541459 L2_ptep ptp,pte,t0,va,itlb_fault1455146014561456- dbit_lock spc,t0,t114571457- update_ptep spc,ptp,pte,t0,t114611461+ tlb_lock spc,ptp,pte,t0,t1,itlb_fault14621462+ update_accessed ptp,pte,t0,t11458146314591464 make_insert_tlb_11 spc,pte,prot1460146514611461- mfsp %sr1,t0 /* Save sr1 so we can use it in tlb inserts */14661466+ mfsp %sr1,t1 /* Save sr1 so we can use it in tlb inserts */14621467 mtsp spc,%sr11463146814641469 iitlba pte,(%sr1,va)14651470 iitlbp prot,(%sr1,va)1466147114671467- mtsp t0, %sr1 /* Restore sr1 */14681468- dbit_unlock1 spc,t014721472+ mtsp t1, %sr1 /* Restore sr1 */1469147314741474+ tlb_unlock1 spc,t014701475 rfir14711476 nop14721477···1477148214781483 L2_ptep ptp,pte,t0,va,naitlb_check_alias_111479148414801480- dbit_lock spc,t0,t114811481- update_ptep spc,ptp,pte,t0,t114851485+ tlb_lock spc,ptp,pte,t0,t1,naitlb_check_alias_1114861486+ update_accessed ptp,pte,t0,t11482148714831488 make_insert_tlb_11 spc,pte,prot1484148914851485- mfsp %sr1,t0 /* Save sr1 so we can use it in tlb inserts */14901490+ mfsp %sr1,t1 /* Save sr1 so we can use it in tlb inserts */14861491 mtsp spc,%sr11487149214881493 iitlba pte,(%sr1,va)14891494 iitlbp prot,(%sr1,va)1490149514911491- mtsp t0, %sr1 /* Restore sr1 */14921492- dbit_unlock1 spc,t014961496+ mtsp t1, %sr1 /* Restore sr1 */1493149714981498+ tlb_unlock1 spc,t014941499 rfir14951500 nop14961501···1511151615121517 L2_ptep ptp,pte,t0,va,itlb_fault1513151815141514- dbit_lock spc,t0,t115151515- update_ptep spc,ptp,pte,t0,t115191519+ tlb_lock spc,ptp,pte,t0,t1,itlb_fault15201520+ update_accessed ptp,pte,t0,t11516152115171522 make_insert_tlb spc,pte,prot1518152315191519- f_extend pte,t0 15241524+ f_extend pte,t11520152515211526 iitlbt pte,prot15221522- dbit_unlock1 spc,t01523152715281528+ tlb_unlock1 spc,t015241529 rfir15251530 nop15261531···1531153615321537 L2_ptep ptp,pte,t0,va,naitlb_check_alias_201533153815341534- dbit_lock spc,t0,t115351535- update_ptep spc,ptp,pte,t0,t115391539+ tlb_lock spc,ptp,pte,t0,t1,naitlb_check_alias_2015401540+ update_accessed ptp,pte,t0,t11536154115371542 make_insert_tlb spc,pte,prot1538154315391539- f_extend pte,t015441544+ f_extend pte,t11540154515411546 iitlbt pte,prot15421542- dbit_unlock1 spc,t01543154715481548+ tlb_unlock1 spc,t015441549 rfir15451550 nop15461551···1563156815641569 L3_ptep ptp,pte,t0,va,dbit_fault1565157015661566- dbit_lock spc,t0,t115671567- update_dirty spc,ptp,pte,t115711571+ tlb_lock spc,ptp,pte,t0,t1,dbit_fault15721572+ update_dirty ptp,pte,t11568157315691574 make_insert_tlb spc,pte,prot1570157515711576 idtlbt pte,prot15721572- dbit_unlock0 spc,t01573157715781578+ tlb_unlock0 spc,t015741579 rfir15751580 nop15761581#else···1583158815841589 L2_ptep ptp,pte,t0,va,dbit_fault1585159015861586- dbit_lock spc,t0,t115871587- update_dirty spc,ptp,pte,t115911591+ tlb_lock spc,ptp,pte,t0,t1,dbit_fault15921592+ update_dirty ptp,pte,t11588159315891594 make_insert_tlb_11 spc,pte,prot15901595···15951600 idtlbp prot,(%sr1,va)1596160115971602 mtsp t1, %sr1 /* Restore sr1 */15981598- dbit_unlock0 spc,t01599160316041604+ tlb_unlock0 spc,t016001605 rfir16011606 nop16021607···1607161216081613 L2_ptep ptp,pte,t0,va,dbit_fault1609161416101610- dbit_lock spc,t0,t116111611- update_dirty spc,ptp,pte,t116151615+ tlb_lock spc,ptp,pte,t0,t1,dbit_fault16161616+ update_dirty ptp,pte,t11612161716131618 make_insert_tlb spc,pte,prot1614161916151620 f_extend pte,t11616162116171617- idtlbt pte,prot16181618- dbit_unlock0 spc,t016221622+ idtlbt pte,prot1619162316241624+ tlb_unlock0 spc,t016201625 rfir16211626 nop16221627#endif
-4
arch/parisc/kernel/traps.c
···43434444#include "../math-emu/math-emu.h" /* for handle_fpe() */45454646-#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)4747-DEFINE_SPINLOCK(pa_dbit_lock);4848-#endif4949-5046static void parisc_show_stack(struct task_struct *task, unsigned long *sp,5147 struct pt_regs *regs);5248
+21-10
arch/powerpc/kernel/idle_power7.S
···5252 .text53535454/*5555+ * Used by threads when the lock bit of core_idle_state is set.5656+ * Threads will spin in HMT_LOW until the lock bit is cleared.5757+ * r14 - pointer to core_idle_state5858+ * r15 - used to load contents of core_idle_state5959+ */6060+6161+core_idle_lock_held:6262+ HMT_LOW6363+3: lwz r15,0(r14)6464+ andi. r15,r15,PNV_CORE_IDLE_LOCK_BIT6565+ bne 3b6666+ HMT_MEDIUM6767+ lwarx r15,0,r146868+ blr6969+7070+/*5571 * Pass requested state in r3:5672 * r3 - PNV_THREAD_NAP/SLEEP/WINKLE5773 *···166150 ld r14,PACA_CORE_IDLE_STATE_PTR(r13)167151lwarx_loop1:168152 lwarx r15,0,r14153153+154154+ andi. r9,r15,PNV_CORE_IDLE_LOCK_BIT155155+ bnel core_idle_lock_held156156+169157 andc r15,r15,r7 /* Clear thread bit */170158171159 andi. r15,r15,PNV_CORE_IDLE_THREAD_BITS···314294 * workaround undo code or resyncing timebase or restoring context315295 * In either case loop until the lock bit is cleared.316296 */317317- bne core_idle_lock_held297297+ bnel core_idle_lock_held318298319299 cmpwi cr2,r15,0320300 lbz r4,PACA_SUBCORE_SIBLING_MASK(r13)···338318 bne- lwarx_loop2339319 isync340320 b common_exit341341-342342-core_idle_lock_held:343343- HMT_LOW344344-core_idle_lock_loop:345345- lwz r15,0(14)346346- andi. r9,r15,PNV_CORE_IDLE_LOCK_BIT347347- bne core_idle_lock_loop348348- HMT_MEDIUM349349- b lwarx_loop2350321351322first_thread_in_subcore:352323 /* First thread in subcore to wakeup */
···254254config ARCH_SUPPORTS_DEBUG_PAGEALLOC255255 def_bool y256256257257+config KASAN_SHADOW_OFFSET258258+ hex259259+ depends on KASAN260260+ default 0xdffffc0000000000261261+257262config HAVE_INTEL_TXT258263 def_bool y259264 depends on INTEL_IOMMU && ACPI···2020201520212016 To compile command line arguments into the kernel,20222017 set this option to 'Y', then fill in the20232023- the boot arguments in CONFIG_CMDLINE.20182018+ boot arguments in CONFIG_CMDLINE.2024201920252020 Systems with fully functional boot loaders (i.e. non-embedded)20262021 should leave this option set to 'N'.
···409409 int irq, vector;410410 struct apic_chip_data *data;411411412412- /*413413- * vector_lock will make sure that we don't run into irq vector414414- * assignments that might be happening on another cpu in parallel,415415- * while we setup our initial vector to irq mappings.416416- */417417- raw_spin_lock(&vector_lock);418412 /* Mark the inuse vectors */419413 for_each_active_irq(irq) {420414 data = apic_chip_data(irq_get_irq_data(irq));···430436 if (!cpumask_test_cpu(cpu, data->domain))431437 per_cpu(vector_irq, cpu)[vector] = VECTOR_UNDEFINED;432438 }433433- raw_spin_unlock(&vector_lock);434439}435440436441/*437437- * Setup the vector to irq mappings.442442+ * Setup the vector to irq mappings. Must be called with vector_lock held.438443 */439444void setup_vector_irq(int cpu)440445{441446 int irq;442447448448+ lockdep_assert_held(&vector_lock);443449 /*444450 * On most of the platforms, legacy PIC delivers the interrupts on the445451 * boot cpu. But there are certain platforms where PIC interrupts are
+3-1
arch/x86/kernel/early_printk.c
···175175 }176176177177 if (*s) {178178- if (kstrtoul(s, 0, &baud) < 0 || baud == 0)178178+ baud = simple_strtoull(s, &e, 0);179179+180180+ if (baud == 0 || s == e)179181 baud = DEFAULT_BAUD;180182 }181183
+16-12
arch/x86/kernel/espfix_64.c
···131131 init_espfix_random();132132133133 /* The rest is the same as for any other processor */134134- init_espfix_ap();134134+ init_espfix_ap(0);135135}136136137137-void init_espfix_ap(void)137137+void init_espfix_ap(int cpu)138138{139139- unsigned int cpu, page;139139+ unsigned int page;140140 unsigned long addr;141141 pud_t pud, *pud_p;142142 pmd_t pmd, *pmd_p;143143 pte_t pte, *pte_p;144144- int n;144144+ int n, node;145145 void *stack_page;146146 pteval_t ptemask;147147148148 /* We only have to do this once... */149149- if (likely(this_cpu_read(espfix_stack)))149149+ if (likely(per_cpu(espfix_stack, cpu)))150150 return; /* Already initialized */151151152152- cpu = smp_processor_id();153152 addr = espfix_base_addr(cpu);154153 page = cpu/ESPFIX_STACKS_PER_PAGE;155154···164165 if (stack_page)165166 goto unlock_done;166167168168+ node = cpu_to_node(cpu);167169 ptemask = __supported_pte_mask;168170169171 pud_p = &espfix_pud_page[pud_index(addr)];170172 pud = *pud_p;171173 if (!pud_present(pud)) {172172- pmd_p = (pmd_t *)__get_free_page(PGALLOC_GFP);174174+ struct page *page = alloc_pages_node(node, PGALLOC_GFP, 0);175175+176176+ pmd_p = (pmd_t *)page_address(page);173177 pud = __pud(__pa(pmd_p) | (PGTABLE_PROT & ptemask));174178 paravirt_alloc_pmd(&init_mm, __pa(pmd_p) >> PAGE_SHIFT);175179 for (n = 0; n < ESPFIX_PUD_CLONES; n++)···182180 pmd_p = pmd_offset(&pud, addr);183181 pmd = *pmd_p;184182 if (!pmd_present(pmd)) {185185- pte_p = (pte_t *)__get_free_page(PGALLOC_GFP);183183+ struct page *page = alloc_pages_node(node, PGALLOC_GFP, 0);184184+185185+ pte_p = (pte_t *)page_address(page);186186 pmd = __pmd(__pa(pte_p) | (PGTABLE_PROT & ptemask));187187 paravirt_alloc_pte(&init_mm, __pa(pte_p) >> PAGE_SHIFT);188188 for (n = 0; n < ESPFIX_PMD_CLONES; n++)···192188 }193189194190 pte_p = pte_offset_kernel(&pmd, addr);195195- stack_page = (void *)__get_free_page(GFP_KERNEL);191191+ stack_page = page_address(alloc_pages_node(node, GFP_KERNEL, 0));196192 pte = __pte(__pa(stack_page) | (__PAGE_KERNEL_RO & ptemask));197193 for (n = 0; n < ESPFIX_PTE_CLONES; n++)198194 set_pte(&pte_p[n*PTE_STRIDE], pte);···203199unlock_done:204200 mutex_unlock(&espfix_init_mutex);205201done:206206- this_cpu_write(espfix_stack, addr);207207- this_cpu_write(espfix_waddr, (unsigned long)stack_page208208- + (addr & ~PAGE_MASK));202202+ per_cpu(espfix_stack, cpu) = addr;203203+ per_cpu(espfix_waddr, cpu) = (unsigned long)stack_page204204+ + (addr & ~PAGE_MASK);209205}
+4-6
arch/x86/kernel/head64.c
···161161 /* Kill off the identity-map trampoline */162162 reset_early_page_tables();163163164164- kasan_map_early_shadow(early_level4_pgt);165165-166166- /* clear bss before set_intr_gate with early_idt_handler */167164 clear_bss();165165+166166+ clear_page(init_level4_pgt);167167+168168+ kasan_early_init();168169169170 for (i = 0; i < NUM_EXCEPTION_VECTORS; i++)170171 set_intr_gate(i, early_idt_handler_array[i]);···178177 */179178 load_ucode_bsp();180179181181- clear_page(init_level4_pgt);182180 /* set init_level4_pgt kernel high mapping*/183181 init_level4_pgt[511] = early_level4_pgt[511];184184-185185- kasan_map_early_shadow(init_level4_pgt);186182187183 x86_64_start_reservations(real_mode_data);188184}
-29
arch/x86/kernel/head_64.S
···516516 /* This must match the first entry in level2_kernel_pgt */517517 .quad 0x0000000000000000518518519519-#ifdef CONFIG_KASAN520520-#define FILL(VAL, COUNT) \521521- .rept (COUNT) ; \522522- .quad (VAL) ; \523523- .endr524524-525525-NEXT_PAGE(kasan_zero_pte)526526- FILL(kasan_zero_page - __START_KERNEL_map + _KERNPG_TABLE, 512)527527-NEXT_PAGE(kasan_zero_pmd)528528- FILL(kasan_zero_pte - __START_KERNEL_map + _KERNPG_TABLE, 512)529529-NEXT_PAGE(kasan_zero_pud)530530- FILL(kasan_zero_pmd - __START_KERNEL_map + _KERNPG_TABLE, 512)531531-532532-#undef FILL533533-#endif534534-535535-536519#include "../../x86/xen/xen-head.S"537520538521 __PAGE_ALIGNED_BSS539522NEXT_PAGE(empty_zero_page)540523 .skip PAGE_SIZE541524542542-#ifdef CONFIG_KASAN543543-/*544544- * This page used as early shadow. We don't use empty_zero_page545545- * at early stages, stack instrumentation could write some garbage546546- * to this page.547547- * Latter we reuse it as zero shadow for large ranges of memory548548- * that allowed to access, but not instrumented by kasan549549- * (vmalloc/vmemmap ...).550550- */551551-NEXT_PAGE(kasan_zero_page)552552- .skip PAGE_SIZE553553-#endif
+18-2
arch/x86/kernel/irq.c
···347347 if (!desc)348348 continue;349349350350+ /*351351+ * Protect against concurrent action removal,352352+ * affinity changes etc.353353+ */354354+ raw_spin_lock(&desc->lock);350355 data = irq_desc_get_irq_data(desc);351356 cpumask_copy(&affinity_new, data->affinity);352357 cpumask_clear_cpu(this_cpu, &affinity_new);353358354359 /* Do not count inactive or per-cpu irqs. */355355- if (!irq_has_action(irq) || irqd_is_per_cpu(data))360360+ if (!irq_has_action(irq) || irqd_is_per_cpu(data)) {361361+ raw_spin_unlock(&desc->lock);356362 continue;363363+ }357364365365+ raw_spin_unlock(&desc->lock);358366 /*359367 * A single irq may be mapped to multiple360368 * cpu's vector_irq[] (for example IOAPIC cluster···393385 * vector. If the vector is marked in the used vectors394386 * bitmap or an irq is assigned to it, we don't count395387 * it as available.388388+ *389389+ * As this is an inaccurate snapshot anyway, we can do390390+ * this w/o holding vector_lock.396391 */397392 for (vector = FIRST_EXTERNAL_VECTOR;398393 vector < first_system_vector; vector++) {···497486 */498487 mdelay(1);499488489489+ /*490490+ * We can walk the vector array of this cpu without holding491491+ * vector_lock because the cpu is already marked !online, so492492+ * nothing else will touch it.493493+ */500494 for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) {501495 unsigned int irr;502496···513497 irq = __this_cpu_read(vector_irq[vector]);514498515499 desc = irq_to_desc(irq);500500+ raw_spin_lock(&desc->lock);516501 data = irq_desc_get_irq_data(desc);517502 chip = irq_data_get_irq_chip(data);518518- raw_spin_lock(&desc->lock);519503 if (chip->irq_retrigger) {520504 chip->irq_retrigger(data);521505 __this_cpu_write(vector_irq[vector], VECTOR_RETRIGGERED);
+12-15
arch/x86/kernel/smpboot.c
···171171 apic_ap_setup();172172173173 /*174174- * Need to setup vector mappings before we enable interrupts.175175- */176176- setup_vector_irq(smp_processor_id());177177-178178- /*179174 * Save our processor parameters. Note: this information180175 * is needed for clock calibration.181176 */···234239 check_tsc_sync_target();235240236241 /*237237- * Enable the espfix hack for this CPU238238- */239239-#ifdef CONFIG_X86_ESPFIX64240240- init_espfix_ap();241241-#endif242242-243243- /*244244- * We need to hold vector_lock so there the set of online cpus245245- * does not change while we are assigning vectors to cpus. Holding246246- * this lock ensures we don't half assign or remove an irq from a cpu.242242+ * Lock vector_lock and initialize the vectors on this cpu243243+ * before setting the cpu online. We must set it online with244244+ * vector_lock held to prevent a concurrent setup/teardown245245+ * from seeing a half valid vector space.247246 */248247 lock_vector_lock();248248+ setup_vector_irq(smp_processor_id());249249 set_cpu_online(smp_processor_id(), true);250250 unlock_vector_lock();251251 cpu_set_state_online(smp_processor_id());···843853 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);844854 initial_code = (unsigned long)start_secondary;845855 stack_start = idle->thread.sp;856856+857857+ /*858858+ * Enable the espfix hack for this CPU859859+ */860860+#ifdef CONFIG_X86_ESPFIX64861861+ init_espfix_ap(cpu);862862+#endif846863847864 /* So we see what's up */848865 announce_cpu(cpu, apicid);
+10-1
arch/x86/kernel/tsc.c
···598598 if (!pit_expect_msb(0xff-i, &delta, &d2))599599 break;600600601601+ delta -= tsc;602602+603603+ /*604604+ * Extrapolate the error and fail fast if the error will605605+ * never be below 500 ppm.606606+ */607607+ if (i == 1 &&608608+ d1 + d2 >= (delta * MAX_QUICK_PIT_ITERATIONS) >> 11)609609+ return 0;610610+601611 /*602612 * Iterate until the error is less than 500 ppm603613 */604604- delta -= tsc;605614 if (d1+d2 >= delta >> 11)606615 continue;607616
+1-1
arch/x86/lib/usercopy.c
···2020 unsigned long ret;21212222 if (__range_not_ok(from, n, TASK_SIZE))2323- return 0;2323+ return n;24242525 /*2626 * Even though this function is typically called from NMI/IRQ context
+42-5
arch/x86/mm/kasan_init_64.c
···11+#define pr_fmt(fmt) "kasan: " fmt12#include <linux/bootmem.h>23#include <linux/kasan.h>34#include <linux/kdebug.h>···1211extern pgd_t early_level4_pgt[PTRS_PER_PGD];1312extern struct range pfn_mapped[E820_X_MAX];14131515-extern unsigned char kasan_zero_page[PAGE_SIZE];1414+static pud_t kasan_zero_pud[PTRS_PER_PUD] __page_aligned_bss;1515+static pmd_t kasan_zero_pmd[PTRS_PER_PMD] __page_aligned_bss;1616+static pte_t kasan_zero_pte[PTRS_PER_PTE] __page_aligned_bss;1717+1818+/*1919+ * This page used as early shadow. We don't use empty_zero_page2020+ * at early stages, stack instrumentation could write some garbage2121+ * to this page.2222+ * Latter we reuse it as zero shadow for large ranges of memory2323+ * that allowed to access, but not instrumented by kasan2424+ * (vmalloc/vmemmap ...).2525+ */2626+static unsigned char kasan_zero_page[PAGE_SIZE] __page_aligned_bss;16271728static int __init map_range(struct range *range)1829{···4936 pgd_clear(pgd_offset_k(start));5037}51385252-void __init kasan_map_early_shadow(pgd_t *pgd)3939+static void __init kasan_map_early_shadow(pgd_t *pgd)5340{5441 int i;5542 unsigned long start = KASAN_SHADOW_START;···8673 while (IS_ALIGNED(addr, PMD_SIZE) && addr + PMD_SIZE <= end) {8774 WARN_ON(!pmd_none(*pmd));8875 set_pmd(pmd, __pmd(__pa_nodebug(kasan_zero_pte)8989- | __PAGE_KERNEL_RO));7676+ | _KERNPG_TABLE));9077 addr += PMD_SIZE;9178 pmd = pmd_offset(pud, addr);9279 }···11299 while (IS_ALIGNED(addr, PUD_SIZE) && addr + PUD_SIZE <= end) {113100 WARN_ON(!pud_none(*pud));114101 set_pud(pud, __pud(__pa_nodebug(kasan_zero_pmd)115115- | __PAGE_KERNEL_RO));102102+ | _KERNPG_TABLE));116103 addr += PUD_SIZE;117104 pud = pud_offset(pgd, addr);118105 }···137124 while (IS_ALIGNED(addr, PGDIR_SIZE) && addr + PGDIR_SIZE <= end) {138125 WARN_ON(!pgd_none(*pgd));139126 set_pgd(pgd, __pgd(__pa_nodebug(kasan_zero_pud)140140- | __PAGE_KERNEL_RO));127127+ | _KERNPG_TABLE));141128 addr += PGDIR_SIZE;142129 pgd = pgd_offset_k(addr);143130 }···179166};180167#endif181168169169+void __init kasan_early_init(void)170170+{171171+ int i;172172+ pteval_t pte_val = __pa_nodebug(kasan_zero_page) | __PAGE_KERNEL;173173+ pmdval_t pmd_val = __pa_nodebug(kasan_zero_pte) | _KERNPG_TABLE;174174+ pudval_t pud_val = __pa_nodebug(kasan_zero_pmd) | _KERNPG_TABLE;175175+176176+ for (i = 0; i < PTRS_PER_PTE; i++)177177+ kasan_zero_pte[i] = __pte(pte_val);178178+179179+ for (i = 0; i < PTRS_PER_PMD; i++)180180+ kasan_zero_pmd[i] = __pmd(pmd_val);181181+182182+ for (i = 0; i < PTRS_PER_PUD; i++)183183+ kasan_zero_pud[i] = __pud(pud_val);184184+185185+ kasan_map_early_shadow(early_level4_pgt);186186+ kasan_map_early_shadow(init_level4_pgt);187187+}188188+182189void __init kasan_init(void)183190{184191 int i;···209176210177 memcpy(early_level4_pgt, init_level4_pgt, sizeof(early_level4_pgt));211178 load_cr3(early_level4_pgt);179179+ __flush_tlb_all();212180213181 clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END);214182···236202 memset(kasan_zero_page, 0, PAGE_SIZE);237203238204 load_cr3(init_level4_pgt);205205+ __flush_tlb_all();239206 init_task.kasan_depth = 0;207207+208208+ pr_info("Kernel address sanitizer initialized\n");240209}
+5-2
drivers/acpi/acpi_lpss.c
···352352 pdata->mmio_size = resource_size(rentry->res);353353 pdata->mmio_base = ioremap(rentry->res->start,354354 pdata->mmio_size);355355- if (!pdata->mmio_base)356356- goto err_out;357355 break;358356 }359357360358 acpi_dev_free_resource_list(&resource_list);359359+360360+ if (!pdata->mmio_base) {361361+ ret = -ENOMEM;362362+ goto err_out;363363+ }361364362365 pdata->dev_desc = dev_desc;363366
+120-14
drivers/acpi/nfit.c
···1818#include <linux/list.h>1919#include <linux/acpi.h>2020#include <linux/sort.h>2121+#include <linux/pmem.h>2122#include <linux/io.h>2223#include "nfit.h"2324···306305 return true;307306}308307308308+static bool add_flush(struct acpi_nfit_desc *acpi_desc,309309+ struct acpi_nfit_flush_address *flush)310310+{311311+ struct device *dev = acpi_desc->dev;312312+ struct nfit_flush *nfit_flush = devm_kzalloc(dev, sizeof(*nfit_flush),313313+ GFP_KERNEL);314314+315315+ if (!nfit_flush)316316+ return false;317317+ INIT_LIST_HEAD(&nfit_flush->list);318318+ nfit_flush->flush = flush;319319+ list_add_tail(&nfit_flush->list, &acpi_desc->flushes);320320+ dev_dbg(dev, "%s: nfit_flush handle: %d hint_count: %d\n", __func__,321321+ flush->device_handle, flush->hint_count);322322+ return true;323323+}324324+309325static void *add_table(struct acpi_nfit_desc *acpi_desc, void *table,310326 const void *end)311327{···356338 return err;357339 break;358340 case ACPI_NFIT_TYPE_FLUSH_ADDRESS:359359- dev_dbg(dev, "%s: flush\n", __func__);341341+ if (!add_flush(acpi_desc, table))342342+ return err;360343 break;361344 case ACPI_NFIT_TYPE_SMBIOS:362345 dev_dbg(dev, "%s: smbios\n", __func__);···408389{409390 u16 dcr = __to_nfit_memdev(nfit_mem)->region_index;410391 struct nfit_memdev *nfit_memdev;392392+ struct nfit_flush *nfit_flush;411393 struct nfit_dcr *nfit_dcr;412394 struct nfit_bdw *nfit_bdw;413395 struct nfit_idt *nfit_idt;···460440 if (nfit_idt->idt->interleave_index != idt_idx)461441 continue;462442 nfit_mem->idt_bdw = nfit_idt->idt;443443+ break;444444+ }445445+446446+ list_for_each_entry(nfit_flush, &acpi_desc->flushes, list) {447447+ if (nfit_flush->flush->device_handle !=448448+ nfit_memdev->memdev->device_handle)449449+ continue;450450+ nfit_mem->nfit_flush = nfit_flush;463451 break;464452 }465453 break;···1006978 return mmio->base_offset + line_offset + table_offset + sub_line_offset;1007979}1008980981981+static void wmb_blk(struct nfit_blk *nfit_blk)982982+{983983+984984+ if (nfit_blk->nvdimm_flush) {985985+ /*986986+ * The first wmb() is needed to 'sfence' all previous writes987987+ * such that they are architecturally visible for the platform988988+ * buffer flush. Note that we've already arranged for pmem989989+ * writes to avoid the cache via arch_memcpy_to_pmem(). The990990+ * final wmb() ensures ordering for the NVDIMM flush write.991991+ */992992+ wmb();993993+ writeq(1, nfit_blk->nvdimm_flush);994994+ wmb();995995+ } else996996+ wmb_pmem();997997+}998998+1009999static u64 read_blk_stat(struct nfit_blk *nfit_blk, unsigned int bw)10101000{10111001 struct nfit_blk_mmio *mmio = &nfit_blk->mmio[DCR];···10581012 offset = to_interleave_offset(offset, mmio);1059101310601014 writeq(cmd, mmio->base + offset);10611061- /* FIXME: conditionally perform read-back if mandated by firmware */10151015+ wmb_blk(nfit_blk);10161016+10171017+ if (nfit_blk->dimm_flags & ND_BLK_DCR_LATCH)10181018+ readq(mmio->base + offset);10621019}1063102010641021static int acpi_nfit_blk_single_io(struct nfit_blk *nfit_blk,···1075102610761027 base_offset = nfit_blk->bdw_offset + dpa % L1_CACHE_BYTES10771028 + lane * mmio->size;10781078- /* TODO: non-temporal access, flush hints, cache management etc... */10791029 write_blk_ctl(nfit_blk, lane, dpa, len, rw);10801030 while (len) {10811031 unsigned int c;···10931045 }1094104610951047 if (rw)10961096- memcpy(mmio->aperture + offset, iobuf + copied, c);10481048+ memcpy_to_pmem(mmio->aperture + offset,10491049+ iobuf + copied, c);10971050 else10981098- memcpy(iobuf + copied, mmio->aperture + offset, c);10511051+ memcpy_from_pmem(iobuf + copied,10521052+ mmio->aperture + offset, c);1099105311001054 copied += c;11011055 len -= c;11021056 }10571057+10581058+ if (rw)10591059+ wmb_blk(nfit_blk);10601060+11031061 rc = read_blk_stat(nfit_blk, lane) ? -EIO : 0;11041062 return rc;11051063}···11781124}1179112511801126static void __iomem *__nfit_spa_map(struct acpi_nfit_desc *acpi_desc,11811181- struct acpi_nfit_system_address *spa)11271127+ struct acpi_nfit_system_address *spa, enum spa_map_type type)11821128{11831129 resource_size_t start = spa->address;11841130 resource_size_t n = spa->length;···12061152 if (!res)12071153 goto err_mem;1208115412091209- /* TODO: cacheability based on the spa type */12101210- spa_map->iomem = ioremap_nocache(start, n);11551155+ if (type == SPA_MAP_APERTURE) {11561156+ /*11571157+ * TODO: memremap_pmem() support, but that requires cache11581158+ * flushing when the aperture is moved.11591159+ */11601160+ spa_map->iomem = ioremap_wc(start, n);11611161+ } else11621162+ spa_map->iomem = ioremap_nocache(start, n);11631163+12111164 if (!spa_map->iomem)12121165 goto err_map;12131166···12321171 * nfit_spa_map - interleave-aware managed-mappings of acpi_nfit_system_address ranges12331172 * @nvdimm_bus: NFIT-bus that provided the spa table entry12341173 * @nfit_spa: spa table to map11741174+ * @type: aperture or control region12351175 *12361176 * In the case where block-data-window apertures and12371177 * dimm-control-regions are interleaved they will end up sharing a···12421180 * unbound.12431181 */12441182static void __iomem *nfit_spa_map(struct acpi_nfit_desc *acpi_desc,12451245- struct acpi_nfit_system_address *spa)11831183+ struct acpi_nfit_system_address *spa, enum spa_map_type type)12461184{12471185 void __iomem *iomem;1248118612491187 mutex_lock(&acpi_desc->spa_map_mutex);12501250- iomem = __nfit_spa_map(acpi_desc, spa);11881188+ iomem = __nfit_spa_map(acpi_desc, spa, type);12511189 mutex_unlock(&acpi_desc->spa_map_mutex);1252119012531191 return iomem;···12681206 return 0;12691207}1270120812091209+static int acpi_nfit_blk_get_flags(struct nvdimm_bus_descriptor *nd_desc,12101210+ struct nvdimm *nvdimm, struct nfit_blk *nfit_blk)12111211+{12121212+ struct nd_cmd_dimm_flags flags;12131213+ int rc;12141214+12151215+ memset(&flags, 0, sizeof(flags));12161216+ rc = nd_desc->ndctl(nd_desc, nvdimm, ND_CMD_DIMM_FLAGS, &flags,12171217+ sizeof(flags));12181218+12191219+ if (rc >= 0 && flags.status == 0)12201220+ nfit_blk->dimm_flags = flags.flags;12211221+ else if (rc == -ENOTTY) {12221222+ /* fall back to a conservative default */12231223+ nfit_blk->dimm_flags = ND_BLK_DCR_LATCH;12241224+ rc = 0;12251225+ } else12261226+ rc = -ENXIO;12271227+12281228+ return rc;12291229+}12301230+12711231static int acpi_nfit_blk_region_enable(struct nvdimm_bus *nvdimm_bus,12721232 struct device *dev)12731233{12741234 struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);12751235 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);12761236 struct nd_blk_region *ndbr = to_nd_blk_region(dev);12371237+ struct nfit_flush *nfit_flush;12771238 struct nfit_blk_mmio *mmio;12781239 struct nfit_blk *nfit_blk;12791240 struct nfit_mem *nfit_mem;···13081223 if (!nfit_mem || !nfit_mem->dcr || !nfit_mem->bdw) {13091224 dev_dbg(dev, "%s: missing%s%s%s\n", __func__,13101225 nfit_mem ? "" : " nfit_mem",13111311- nfit_mem->dcr ? "" : " dcr",13121312- nfit_mem->bdw ? "" : " bdw");12261226+ (nfit_mem && nfit_mem->dcr) ? "" : " dcr",12271227+ (nfit_mem && nfit_mem->bdw) ? "" : " bdw");13131228 return -ENXIO;13141229 }13151230···13221237 /* map block aperture memory */13231238 nfit_blk->bdw_offset = nfit_mem->bdw->offset;13241239 mmio = &nfit_blk->mmio[BDW];13251325- mmio->base = nfit_spa_map(acpi_desc, nfit_mem->spa_bdw);12401240+ mmio->base = nfit_spa_map(acpi_desc, nfit_mem->spa_bdw,12411241+ SPA_MAP_APERTURE);13261242 if (!mmio->base) {13271243 dev_dbg(dev, "%s: %s failed to map bdw\n", __func__,13281244 nvdimm_name(nvdimm));···13451259 nfit_blk->cmd_offset = nfit_mem->dcr->command_offset;13461260 nfit_blk->stat_offset = nfit_mem->dcr->status_offset;13471261 mmio = &nfit_blk->mmio[DCR];13481348- mmio->base = nfit_spa_map(acpi_desc, nfit_mem->spa_dcr);12621262+ mmio->base = nfit_spa_map(acpi_desc, nfit_mem->spa_dcr,12631263+ SPA_MAP_CONTROL);13491264 if (!mmio->base) {13501265 dev_dbg(dev, "%s: %s failed to map dcr\n", __func__,13511266 nvdimm_name(nvdimm));···13631276 __func__, nvdimm_name(nvdimm));13641277 return rc;13651278 }12791279+12801280+ rc = acpi_nfit_blk_get_flags(nd_desc, nvdimm, nfit_blk);12811281+ if (rc < 0) {12821282+ dev_dbg(dev, "%s: %s failed get DIMM flags\n",12831283+ __func__, nvdimm_name(nvdimm));12841284+ return rc;12851285+ }12861286+12871287+ nfit_flush = nfit_mem->nfit_flush;12881288+ if (nfit_flush && nfit_flush->flush->hint_count != 0) {12891289+ nfit_blk->nvdimm_flush = devm_ioremap_nocache(dev,12901290+ nfit_flush->flush->hint_address[0], 8);12911291+ if (!nfit_blk->nvdimm_flush)12921292+ return -ENOMEM;12931293+ }12941294+12951295+ if (!arch_has_pmem_api() && !nfit_blk->nvdimm_flush)12961296+ dev_warn(dev, "unable to guarantee persistence of writes\n");1366129713671298 if (mmio->line_size == 0)13681299 return 0;···15641459 INIT_LIST_HEAD(&acpi_desc->dcrs);15651460 INIT_LIST_HEAD(&acpi_desc->bdws);15661461 INIT_LIST_HEAD(&acpi_desc->idts);14621462+ INIT_LIST_HEAD(&acpi_desc->flushes);15671463 INIT_LIST_HEAD(&acpi_desc->memdevs);15681464 INIT_LIST_HEAD(&acpi_desc->dimms);15691465 mutex_init(&acpi_desc->spa_map_mutex);
···2626#include <linux/device.h>2727#include <linux/export.h>2828#include <linux/ioport.h>2929-#include <linux/list.h>3029#include <linux/slab.h>31303231#ifdef CONFIG_X86···621622 return (type & types) ? 0 : 1;622623}623624EXPORT_SYMBOL_GPL(acpi_dev_filter_resource_type);624624-625625-struct reserved_region {626626- struct list_head node;627627- u64 start;628628- u64 end;629629-};630630-631631-static LIST_HEAD(reserved_io_regions);632632-static LIST_HEAD(reserved_mem_regions);633633-634634-static int request_range(u64 start, u64 end, u8 space_id, unsigned long flags,635635- char *desc)636636-{637637- unsigned int length = end - start + 1;638638- struct resource *res;639639-640640- res = space_id == ACPI_ADR_SPACE_SYSTEM_IO ?641641- request_region(start, length, desc) :642642- request_mem_region(start, length, desc);643643- if (!res)644644- return -EIO;645645-646646- res->flags &= ~flags;647647- return 0;648648-}649649-650650-static int add_region_before(u64 start, u64 end, u8 space_id,651651- unsigned long flags, char *desc,652652- struct list_head *head)653653-{654654- struct reserved_region *reg;655655- int error;656656-657657- reg = kmalloc(sizeof(*reg), GFP_KERNEL);658658- if (!reg)659659- return -ENOMEM;660660-661661- error = request_range(start, end, space_id, flags, desc);662662- if (error) {663663- kfree(reg);664664- return error;665665- }666666-667667- reg->start = start;668668- reg->end = end;669669- list_add_tail(®->node, head);670670- return 0;671671-}672672-673673-/**674674- * acpi_reserve_region - Reserve an I/O or memory region as a system resource.675675- * @start: Starting address of the region.676676- * @length: Length of the region.677677- * @space_id: Identifier of address space to reserve the region from.678678- * @flags: Resource flags to clear for the region after requesting it.679679- * @desc: Region description (for messages).680680- *681681- * Reserve an I/O or memory region as a system resource to prevent others from682682- * using it. If the new region overlaps with one of the regions (in the given683683- * address space) already reserved by this routine, only the non-overlapping684684- * parts of it will be reserved.685685- *686686- * Returned is either 0 (success) or a negative error code indicating a resource687687- * reservation problem. It is the code of the first encountered error, but the688688- * routine doesn't abort until it has attempted to request all of the parts of689689- * the new region that don't overlap with other regions reserved previously.690690- *691691- * The resources requested by this routine are never released.692692- */693693-int acpi_reserve_region(u64 start, unsigned int length, u8 space_id,694694- unsigned long flags, char *desc)695695-{696696- struct list_head *regions;697697- struct reserved_region *reg;698698- u64 end = start + length - 1;699699- int ret = 0, error = 0;700700-701701- if (space_id == ACPI_ADR_SPACE_SYSTEM_IO)702702- regions = &reserved_io_regions;703703- else if (space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)704704- regions = &reserved_mem_regions;705705- else706706- return -EINVAL;707707-708708- if (list_empty(regions))709709- return add_region_before(start, end, space_id, flags, desc, regions);710710-711711- list_for_each_entry(reg, regions, node)712712- if (reg->start == end + 1) {713713- /* The new region can be prepended to this one. */714714- ret = request_range(start, end, space_id, flags, desc);715715- if (!ret)716716- reg->start = start;717717-718718- return ret;719719- } else if (reg->start > end) {720720- /* No overlap. Add the new region here and get out. */721721- return add_region_before(start, end, space_id, flags,722722- desc, ®->node);723723- } else if (reg->end == start - 1) {724724- goto combine;725725- } else if (reg->end >= start) {726726- goto overlap;727727- }728728-729729- /* The new region goes after the last existing one. */730730- return add_region_before(start, end, space_id, flags, desc, regions);731731-732732- overlap:733733- /*734734- * The new region overlaps an existing one.735735- *736736- * The head part of the new region immediately preceding the existing737737- * overlapping one can be combined with it right away.738738- */739739- if (reg->start > start) {740740- error = request_range(start, reg->start - 1, space_id, flags, desc);741741- if (error)742742- ret = error;743743- else744744- reg->start = start;745745- }746746-747747- combine:748748- /*749749- * The new region is adjacent to an existing one. If it extends beyond750750- * that region all the way to the next one, it is possible to combine751751- * all three of them.752752- */753753- while (reg->end < end) {754754- struct reserved_region *next = NULL;755755- u64 a = reg->end + 1, b = end;756756-757757- if (!list_is_last(®->node, regions)) {758758- next = list_next_entry(reg, node);759759- if (next->start <= end)760760- b = next->start - 1;761761- }762762- error = request_range(a, b, space_id, flags, desc);763763- if (!error) {764764- if (next && next->start == b + 1) {765765- reg->end = next->end;766766- list_del(&next->node);767767- kfree(next);768768- } else {769769- reg->end = end;770770- break;771771- }772772- } else if (next) {773773- if (!ret)774774- ret = error;775775-776776- reg = next;777777- } else {778778- break;779779- }780780- }781781-782782- return ret ? ret : error;783783-}784784-EXPORT_SYMBOL_GPL(acpi_reserve_region);
+30-2
drivers/acpi/scan.c
···10191019 return false;10201020}1021102110221022+static bool __acpi_match_device_cls(const struct acpi_device_id *id,10231023+ struct acpi_hardware_id *hwid)10241024+{10251025+ int i, msk, byte_shift;10261026+ char buf[3];10271027+10281028+ if (!id->cls)10291029+ return false;10301030+10311031+ /* Apply class-code bitmask, before checking each class-code byte */10321032+ for (i = 1; i <= 3; i++) {10331033+ byte_shift = 8 * (3 - i);10341034+ msk = (id->cls_msk >> byte_shift) & 0xFF;10351035+ if (!msk)10361036+ continue;10371037+10381038+ sprintf(buf, "%02x", (id->cls >> byte_shift) & msk);10391039+ if (strncmp(buf, &hwid->id[(i - 1) * 2], 2))10401040+ return false;10411041+ }10421042+ return true;10431043+}10441044+10221045static const struct acpi_device_id *__acpi_match_device(10231046 struct acpi_device *device,10241047 const struct acpi_device_id *ids,···1059103610601037 list_for_each_entry(hwid, &device->pnp.ids, list) {10611038 /* First, check the ACPI/PNP IDs provided by the caller. */10621062- for (id = ids; id->id[0]; id++)10631063- if (!strcmp((char *) id->id, hwid->id))10391039+ for (id = ids; id->id[0] || id->cls; id++) {10401040+ if (id->id[0] && !strcmp((char *) id->id, hwid->id))10641041 return id;10421042+ else if (id->cls && __acpi_match_device_cls(id, hwid))10431043+ return id;10441044+ }1065104510661046 /*10671047 * Next, check ACPI_DT_NAMESPACE_HID and try to match the···21272101 if (info->valid & ACPI_VALID_UID)21282102 pnp->unique_id = kstrdup(info->unique_id.string,21292103 GFP_KERNEL);21042104+ if (info->valid & ACPI_VALID_CLS)21052105+ acpi_add_id(pnp, info->class_code.string);2130210621312107 kfree(info);21322108
+1-1
drivers/ata/Kconfig
···48484949config ATA_ACPI5050 bool "ATA ACPI Support"5151- depends on ACPI && PCI5151+ depends on ACPI5252 default y5353 help5454 This option adds support for ATA-related ACPI objects.
···563563 kfree(fw_priv);564564}565565566566-static int firmware_uevent(struct device *dev, struct kobj_uevent_env *env)566566+static int do_firmware_uevent(struct firmware_priv *fw_priv, struct kobj_uevent_env *env)567567{568568- struct firmware_priv *fw_priv = to_firmware_priv(dev);569569-570568 if (add_uevent_var(env, "FIRMWARE=%s", fw_priv->buf->fw_id))571569 return -ENOMEM;572570 if (add_uevent_var(env, "TIMEOUT=%i", loading_timeout))···573575 return -ENOMEM;574576575577 return 0;578578+}579579+580580+static int firmware_uevent(struct device *dev, struct kobj_uevent_env *env)581581+{582582+ struct firmware_priv *fw_priv = to_firmware_priv(dev);583583+ int err = 0;584584+585585+ mutex_lock(&fw_lock);586586+ if (fw_priv->buf)587587+ err = do_firmware_uevent(fw_priv, env);588588+ mutex_unlock(&fw_lock);589589+ return err;576590}577591578592static struct class firmware_class = {
+11-2
drivers/base/power/domain.c
···66 * This file is released under the GPLv2.77 */8899+#include <linux/delay.h>910#include <linux/kernel.h>1011#include <linux/io.h>1112#include <linux/platform_device.h>···1918#include <linux/sched.h>2019#include <linux/suspend.h>2120#include <linux/export.h>2121+2222+#define GENPD_RETRY_MAX_MS 250 /* Approximate */22232324#define GENPD_DEV_CALLBACK(genpd, type, callback, dev) \2425({ \···21342131static void genpd_dev_pm_detach(struct device *dev, bool power_off)21352132{21362133 struct generic_pm_domain *pd;21342134+ unsigned int i;21372135 int ret = 0;2138213621392137 pd = pm_genpd_lookup_dev(dev);···2143213921442140 dev_dbg(dev, "removing from PM domain %s\n", pd->name);2145214121462146- while (1) {21422142+ for (i = 1; i < GENPD_RETRY_MAX_MS; i <<= 1) {21472143 ret = pm_genpd_remove_device(pd, dev);21482144 if (ret != -EAGAIN)21492145 break;21462146+21472147+ mdelay(i);21502148 cond_resched();21512149 }21522150···21892183{21902184 struct of_phandle_args pd_args;21912185 struct generic_pm_domain *pd;21862186+ unsigned int i;21922187 int ret;2193218821942189 if (!dev->of_node)···2225221822262219 dev_dbg(dev, "adding to PM domain %s\n", pd->name);2227222022282228- while (1) {22212221+ for (i = 1; i < GENPD_RETRY_MAX_MS; i <<= 1) {22292222 ret = pm_genpd_add_device(pd, dev);22302223 if (ret != -EAGAIN)22312224 break;22252225+22262226+ mdelay(i);22322227 cond_resched();22332228 }22342229
···281281 * Attach a device wakeirq to the wakeup source so the device282282 * wake IRQ can be configured automatically for suspend and283283 * resume.284284+ *285285+ * Call under the device's power.lock lock.284286 */285287int device_wakeup_attach_irq(struct device *dev,286288 struct wake_irq *wakeirq)287289{288290 struct wakeup_source *ws;289289- int ret = 0;290291291291- spin_lock_irq(&dev->power.lock);292292 ws = dev->power.wakeup;293293 if (!ws) {294294 dev_err(dev, "forgot to call call device_init_wakeup?\n");295295- ret = -EINVAL;296296- goto unlock;295295+ return -EINVAL;297296 }298297299299- if (ws->wakeirq) {300300- ret = -EEXIST;301301- goto unlock;302302- }298298+ if (ws->wakeirq)299299+ return -EEXIST;303300304301 ws->wakeirq = wakeirq;305305-306306-unlock:307307- spin_unlock_irq(&dev->power.lock);308308-309309- return ret;302302+ return 0;310303}311304312305/**···307314 * @dev: Device to handle308315 *309316 * Removes a device wakeirq from the wakeup source.317317+ *318318+ * Call under the device's power.lock lock.310319 */311320void device_wakeup_detach_irq(struct device *dev)312321{313322 struct wakeup_source *ws;314323315315- spin_lock_irq(&dev->power.lock);316324 ws = dev->power.wakeup;317317- if (!ws)318318- goto unlock;319319-320320- ws->wakeirq = NULL;321321-322322-unlock:323323- spin_unlock_irq(&dev->power.lock);325325+ if (ws)326326+ ws->wakeirq = NULL;324327}325328326329/**
+3-1
drivers/clk/at91/clk-h32mx.c
···116116 h32mxclk->pmc = pmc;117117118118 clk = clk_register(NULL, &h32mxclk->hw);119119- if (!clk)119119+ if (!clk) {120120+ kfree(h32mxclk);120121 return;122122+ }121123122124 of_clk_add_provider(np, of_clk_src_simple_get, clk);123125}
+3-1
drivers/clk/at91/clk-main.c
···171171 irq_set_status_flags(osc->irq, IRQ_NOAUTOEN);172172 ret = request_irq(osc->irq, clk_main_osc_irq_handler,173173 IRQF_TRIGGER_HIGH, name, osc);174174- if (ret)174174+ if (ret) {175175+ kfree(osc);175176 return ERR_PTR(ret);177177+ }176178177179 if (bypass)178180 pmc_write(pmc, AT91_CKGR_MOR,
+6-2
drivers/clk/at91/clk-master.c
···165165 irq_set_status_flags(master->irq, IRQ_NOAUTOEN);166166 ret = request_irq(master->irq, clk_master_irq_handler,167167 IRQF_TRIGGER_HIGH, "clk-master", master);168168- if (ret)168168+ if (ret) {169169+ kfree(master);169170 return ERR_PTR(ret);171171+ }170172171173 clk = clk_register(NULL, &master->hw);172172- if (IS_ERR(clk))174174+ if (IS_ERR(clk)) {175175+ free_irq(master->irq, master);173176 kfree(master);177177+ }174178175179 return clk;176180}
+6-2
drivers/clk/at91/clk-pll.c
···346346 irq_set_status_flags(pll->irq, IRQ_NOAUTOEN);347347 ret = request_irq(pll->irq, clk_pll_irq_handler, IRQF_TRIGGER_HIGH,348348 id ? "clk-pllb" : "clk-plla", pll);349349- if (ret)349349+ if (ret) {350350+ kfree(pll);350351 return ERR_PTR(ret);352352+ }351353352354 clk = clk_register(NULL, &pll->hw);353353- if (IS_ERR(clk))355355+ if (IS_ERR(clk)) {356356+ free_irq(pll->irq, pll);354357 kfree(pll);358358+ }355359356360 return clk;357361}
+6-2
drivers/clk/at91/clk-system.c
···130130 irq_set_status_flags(sys->irq, IRQ_NOAUTOEN);131131 ret = request_irq(sys->irq, clk_system_irq_handler,132132 IRQF_TRIGGER_HIGH, name, sys);133133- if (ret)133133+ if (ret) {134134+ kfree(sys);134135 return ERR_PTR(ret);136136+ }135137 }136138137139 clk = clk_register(NULL, &sys->hw);138138- if (IS_ERR(clk))140140+ if (IS_ERR(clk)) {141141+ free_irq(sys->irq, sys);139142 kfree(sys);143143+ }140144141145 return clk;142146}
+6-2
drivers/clk/at91/clk-utmi.c
···118118 irq_set_status_flags(utmi->irq, IRQ_NOAUTOEN);119119 ret = request_irq(utmi->irq, clk_utmi_irq_handler,120120 IRQF_TRIGGER_HIGH, "clk-utmi", utmi);121121- if (ret)121121+ if (ret) {122122+ kfree(utmi);122123 return ERR_PTR(ret);124124+ }123125124126 clk = clk_register(NULL, &utmi->hw);125125- if (IS_ERR(clk))127127+ if (IS_ERR(clk)) {128128+ free_irq(utmi->irq, utmi);126129 kfree(utmi);130130+ }127131128132 return clk;129133}
+1-5
drivers/clk/bcm/clk-iproc-asiu.c
···222222 struct iproc_asiu_clk *asiu_clk;223223 const char *clk_name;224224225225- clk_name = kzalloc(IPROC_CLK_NAME_LEN, GFP_KERNEL);226226- if (WARN_ON(!clk_name))227227- goto err_clk_register;228228-229225 ret = of_property_read_string_index(node, "clock-output-names",230226 i, &clk_name);231227 if (WARN_ON(ret))···255259256260err_clk_register:257261 for (i = 0; i < num_clks; i++)258258- kfree(asiu->clks[i].name);262262+ clk_unregister(asiu->clk_data.clks[i]);259263 iounmap(asiu->gate_base);260264261265err_iomap_gate:
+4-9
drivers/clk/bcm/clk-iproc-pll.c
···366366 val = readl(pll->pll_base + ctrl->ndiv_int.offset);367367 ndiv_int = (val >> ctrl->ndiv_int.shift) &368368 bit_mask(ctrl->ndiv_int.width);369369- ndiv = ndiv_int << ctrl->ndiv_int.shift;369369+ ndiv = (u64)ndiv_int << ctrl->ndiv_int.shift;370370371371 if (ctrl->flags & IPROC_CLK_PLL_HAS_NDIV_FRAC) {372372 val = readl(pll->pll_base + ctrl->ndiv_frac.offset);···374374 bit_mask(ctrl->ndiv_frac.width);375375376376 if (ndiv_frac != 0)377377- ndiv = (ndiv_int << ctrl->ndiv_int.shift) | ndiv_frac;377377+ ndiv = ((u64)ndiv_int << ctrl->ndiv_int.shift) |378378+ ndiv_frac;378379 }379380380381 val = readl(pll->pll_base + ctrl->pdiv.offset);···656655 memset(&init, 0, sizeof(init));657656 parent_name = node->name;658657659659- clk_name = kzalloc(IPROC_CLK_NAME_LEN, GFP_KERNEL);660660- if (WARN_ON(!clk_name))661661- goto err_clk_register;662662-663658 ret = of_property_read_string_index(node, "clock-output-names",664659 i, &clk_name);665660 if (WARN_ON(ret))···687690 return;688691689692err_clk_register:690690- for (i = 0; i < num_clks; i++) {691691- kfree(pll->clks[i].name);693693+ for (i = 0; i < num_clks; i++)692694 clk_unregister(pll->clk_data.clks[i]);693693- }694695695696err_pll_register:696697 if (pll->asiu_base)
+1-1
drivers/clk/clk-stm32f4.c
···268268 memcpy(table, stm32f42xx_gate_map, sizeof(table));269269270270 /* only bits set in table can be used as indices */271271- if (WARN_ON(secondary > 8 * sizeof(table) ||271271+ if (WARN_ON(secondary >= BITS_PER_BYTE * sizeof(table) ||272272 0 == (table[BIT_ULL_WORD(secondary)] &273273 BIT_ULL_MASK(secondary))))274274 return -EINVAL;
···33 *44 * The 2E revision of loongson processor not support this feature.55 *66- * Copyright (C) 2006 - 2008 Lemote Inc. & Insititute of Computing Technology66+ * Copyright (C) 2006 - 2008 Lemote Inc. & Institute of Computing Technology77 * Author: Yanhua, yanh@lemote.com88 *99 * This file is subject to the terms and conditions of the GNU General Public
···183183 if (IS_GEN4(dev)) {184184 uint32_t ddc2 = I915_READ(DCC2);185185186186- if (!(ddc2 & DCC2_MODIFIED_ENHANCED_DISABLE))186186+ if (!(ddc2 & DCC2_MODIFIED_ENHANCED_DISABLE)) {187187+ /* Since the swizzling may vary within an188188+ * object, we have no idea what the swizzling189189+ * is for any page in particular. Thus we190190+ * cannot migrate tiled pages using the GPU,191191+ * nor can we tell userspace what the exact192192+ * swizzling is for any object.193193+ */187194 dev_priv->quirks |= QUIRK_PIN_SWIZZLED_PAGES;195195+ swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;196196+ swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;197197+ }188198 }189199190200 if (dcc == 0xffffffff) {
···285285286286 if (wait) {287287 if (!wait_for_completion_timeout(&engine->compl,288288- msecs_to_jiffies(1))) {288288+ msecs_to_jiffies(100))) {289289 dev_err(dmm->dev, "timed out waiting for done\n");290290 ret = -ETIMEDOUT;291291 }
···287287}288288289289/* unpin, no longer being scanned out: */290290-int omap_framebuffer_unpin(struct drm_framebuffer *fb)290290+void omap_framebuffer_unpin(struct drm_framebuffer *fb)291291{292292 struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb);293293- int ret, i, n = drm_format_num_planes(fb->pixel_format);293293+ int i, n = drm_format_num_planes(fb->pixel_format);294294295295 mutex_lock(&omap_fb->lock);296296···298298299299 if (omap_fb->pin_count > 0) {300300 mutex_unlock(&omap_fb->lock);301301- return 0;301301+ return;302302 }303303304304 for (i = 0; i < n; i++) {305305 struct plane *plane = &omap_fb->planes[i];306306- ret = omap_gem_put_paddr(plane->bo);307307- if (ret)308308- goto fail;306306+ omap_gem_put_paddr(plane->bo);309307 plane->paddr = 0;310308 }311309312310 mutex_unlock(&omap_fb->lock);313313-314314- return 0;315315-316316-fail:317317- mutex_unlock(&omap_fb->lock);318318- return ret;319311}320312321313struct drm_gem_object *omap_framebuffer_bo(struct drm_framebuffer *fb, int p)
+1-1
drivers/gpu/drm/omapdrm/omap_fbdev.c
···135135 fbdev->ywrap_enabled = priv->has_dmm && ywrap_enabled;136136 if (fbdev->ywrap_enabled) {137137 /* need to align pitch to page size if using DMM scrolling */138138- mode_cmd.pitches[0] = ALIGN(mode_cmd.pitches[0], PAGE_SIZE);138138+ mode_cmd.pitches[0] = PAGE_ALIGN(mode_cmd.pitches[0]);139139 }140140141141 /* allocate backing bo */
+14-12
drivers/gpu/drm/omapdrm/omap_gem.c
···808808/* Release physical address, when DMA is no longer being performed.. this809809 * could potentially unpin and unmap buffers from TILER810810 */811811-int omap_gem_put_paddr(struct drm_gem_object *obj)811811+void omap_gem_put_paddr(struct drm_gem_object *obj)812812{813813 struct omap_gem_object *omap_obj = to_omap_bo(obj);814814- int ret = 0;814814+ int ret;815815816816 mutex_lock(&obj->dev->struct_mutex);817817 if (omap_obj->paddr_cnt > 0) {···821821 if (ret) {822822 dev_err(obj->dev->dev,823823 "could not unpin pages: %d\n", ret);824824- goto fail;825824 }826825 ret = tiler_release(omap_obj->block);827826 if (ret) {···831832 omap_obj->block = NULL;832833 }833834 }834834-fail:835835+835836 mutex_unlock(&obj->dev->struct_mutex);836836- return ret;837837}838838839839/* Get rotated scanout address (only valid if already pinned), at the···1376137813771379 omap_obj = kzalloc(sizeof(*omap_obj), GFP_KERNEL);13781380 if (!omap_obj)13791379- goto fail;13801380-13811381- spin_lock(&priv->list_lock);13821382- list_add(&omap_obj->mm_list, &priv->obj_list);13831383- spin_unlock(&priv->list_lock);13811381+ return NULL;1384138213851383 obj = &omap_obj->base;13861384···13861392 */13871393 omap_obj->vaddr = dma_alloc_writecombine(dev->dev, size,13881394 &omap_obj->paddr, GFP_KERNEL);13891389- if (omap_obj->vaddr)13901390- flags |= OMAP_BO_DMA;13951395+ if (!omap_obj->vaddr) {13961396+ kfree(omap_obj);1391139713981398+ return NULL;13991399+ }14001400+14011401+ flags |= OMAP_BO_DMA;13921402 }14031403+14041404+ spin_lock(&priv->list_lock);14051405+ list_add(&omap_obj->mm_list, &priv->obj_list);14061406+ spin_unlock(&priv->list_lock);1393140713941408 omap_obj->flags = flags;13951409
+26
drivers/gpu/drm/omapdrm/omap_plane.c
···1717 * this program. If not, see <http://www.gnu.org/licenses/>.1818 */19192020+#include <drm/drm_atomic.h>2021#include <drm/drm_atomic_helper.h>2122#include <drm/drm_plane_helper.h>2223···154153 dispc_ovl_enable(omap_plane->id, false);155154}156155156156+static int omap_plane_atomic_check(struct drm_plane *plane,157157+ struct drm_plane_state *state)158158+{159159+ struct drm_crtc_state *crtc_state;160160+161161+ if (!state->crtc)162162+ return 0;163163+164164+ crtc_state = drm_atomic_get_crtc_state(state->state, state->crtc);165165+ if (IS_ERR(crtc_state))166166+ return PTR_ERR(crtc_state);167167+168168+ if (state->crtc_x < 0 || state->crtc_y < 0)169169+ return -EINVAL;170170+171171+ if (state->crtc_x + state->crtc_w > crtc_state->adjusted_mode.hdisplay)172172+ return -EINVAL;173173+174174+ if (state->crtc_y + state->crtc_h > crtc_state->adjusted_mode.vdisplay)175175+ return -EINVAL;176176+177177+ return 0;178178+}179179+157180static const struct drm_plane_helper_funcs omap_plane_helper_funcs = {158181 .prepare_fb = omap_plane_prepare_fb,159182 .cleanup_fb = omap_plane_cleanup_fb,183183+ .atomic_check = omap_plane_atomic_check,160184 .atomic_update = omap_plane_atomic_update,161185 .atomic_disable = omap_plane_atomic_disable,162186};
+192-144
drivers/gpu/drm/radeon/cik.c
···79647964 case 1: /* D1 vblank/vline */79657965 switch (src_data) {79667966 case 0: /* D1 vblank */79677967- if (rdev->irq.stat_regs.cik.disp_int & LB_D1_VBLANK_INTERRUPT) {79687968- if (rdev->irq.crtc_vblank_int[0]) {79697969- drm_handle_vblank(rdev->ddev, 0);79707970- rdev->pm.vblank_sync = true;79717971- wake_up(&rdev->irq.vblank_queue);79727972- }79737973- if (atomic_read(&rdev->irq.pflip[0]))79747974- radeon_crtc_handle_vblank(rdev, 0);79757975- rdev->irq.stat_regs.cik.disp_int &= ~LB_D1_VBLANK_INTERRUPT;79767976- DRM_DEBUG("IH: D1 vblank\n");79677967+ if (!(rdev->irq.stat_regs.cik.disp_int & LB_D1_VBLANK_INTERRUPT))79687968+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");79697969+79707970+ if (rdev->irq.crtc_vblank_int[0]) {79717971+ drm_handle_vblank(rdev->ddev, 0);79727972+ rdev->pm.vblank_sync = true;79737973+ wake_up(&rdev->irq.vblank_queue);79777974 }79757975+ if (atomic_read(&rdev->irq.pflip[0]))79767976+ radeon_crtc_handle_vblank(rdev, 0);79777977+ rdev->irq.stat_regs.cik.disp_int &= ~LB_D1_VBLANK_INTERRUPT;79787978+ DRM_DEBUG("IH: D1 vblank\n");79797979+79787980 break;79797981 case 1: /* D1 vline */79807980- if (rdev->irq.stat_regs.cik.disp_int & LB_D1_VLINE_INTERRUPT) {79817981- rdev->irq.stat_regs.cik.disp_int &= ~LB_D1_VLINE_INTERRUPT;79827982- DRM_DEBUG("IH: D1 vline\n");79837983- }79827982+ if (!(rdev->irq.stat_regs.cik.disp_int & LB_D1_VLINE_INTERRUPT))79837983+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");79847984+79857985+ rdev->irq.stat_regs.cik.disp_int &= ~LB_D1_VLINE_INTERRUPT;79867986+ DRM_DEBUG("IH: D1 vline\n");79877987+79847988 break;79857989 default:79867990 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);···79947990 case 2: /* D2 vblank/vline */79957991 switch (src_data) {79967992 case 0: /* D2 vblank */79977997- if (rdev->irq.stat_regs.cik.disp_int_cont & LB_D2_VBLANK_INTERRUPT) {79987998- if (rdev->irq.crtc_vblank_int[1]) {79997999- drm_handle_vblank(rdev->ddev, 1);80008000- rdev->pm.vblank_sync = true;80018001- wake_up(&rdev->irq.vblank_queue);80028002- }80038003- if (atomic_read(&rdev->irq.pflip[1]))80048004- radeon_crtc_handle_vblank(rdev, 1);80058005- rdev->irq.stat_regs.cik.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT;80068006- DRM_DEBUG("IH: D2 vblank\n");79937993+ if (!(rdev->irq.stat_regs.cik.disp_int_cont & LB_D2_VBLANK_INTERRUPT))79947994+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");79957995+79967996+ if (rdev->irq.crtc_vblank_int[1]) {79977997+ drm_handle_vblank(rdev->ddev, 1);79987998+ rdev->pm.vblank_sync = true;79997999+ wake_up(&rdev->irq.vblank_queue);80078000 }80018001+ if (atomic_read(&rdev->irq.pflip[1]))80028002+ radeon_crtc_handle_vblank(rdev, 1);80038003+ rdev->irq.stat_regs.cik.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT;80048004+ DRM_DEBUG("IH: D2 vblank\n");80058005+80088006 break;80098007 case 1: /* D2 vline */80108010- if (rdev->irq.stat_regs.cik.disp_int_cont & LB_D2_VLINE_INTERRUPT) {80118011- rdev->irq.stat_regs.cik.disp_int_cont &= ~LB_D2_VLINE_INTERRUPT;80128012- DRM_DEBUG("IH: D2 vline\n");80138013- }80088008+ if (!(rdev->irq.stat_regs.cik.disp_int_cont & LB_D2_VLINE_INTERRUPT))80098009+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");80108010+80118011+ rdev->irq.stat_regs.cik.disp_int_cont &= ~LB_D2_VLINE_INTERRUPT;80128012+ DRM_DEBUG("IH: D2 vline\n");80138013+80148014 break;80158015 default:80168016 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);···80248016 case 3: /* D3 vblank/vline */80258017 switch (src_data) {80268018 case 0: /* D3 vblank */80278027- if (rdev->irq.stat_regs.cik.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) {80288028- if (rdev->irq.crtc_vblank_int[2]) {80298029- drm_handle_vblank(rdev->ddev, 2);80308030- rdev->pm.vblank_sync = true;80318031- wake_up(&rdev->irq.vblank_queue);80328032- }80338033- if (atomic_read(&rdev->irq.pflip[2]))80348034- radeon_crtc_handle_vblank(rdev, 2);80358035- rdev->irq.stat_regs.cik.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT;80368036- DRM_DEBUG("IH: D3 vblank\n");80198019+ if (!(rdev->irq.stat_regs.cik.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT))80208020+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");80218021+80228022+ if (rdev->irq.crtc_vblank_int[2]) {80238023+ drm_handle_vblank(rdev->ddev, 2);80248024+ rdev->pm.vblank_sync = true;80258025+ wake_up(&rdev->irq.vblank_queue);80378026 }80278027+ if (atomic_read(&rdev->irq.pflip[2]))80288028+ radeon_crtc_handle_vblank(rdev, 2);80298029+ rdev->irq.stat_regs.cik.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT;80308030+ DRM_DEBUG("IH: D3 vblank\n");80318031+80388032 break;80398033 case 1: /* D3 vline */80408040- if (rdev->irq.stat_regs.cik.disp_int_cont2 & LB_D3_VLINE_INTERRUPT) {80418041- rdev->irq.stat_regs.cik.disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT;80428042- DRM_DEBUG("IH: D3 vline\n");80438043- }80348034+ if (!(rdev->irq.stat_regs.cik.disp_int_cont2 & LB_D3_VLINE_INTERRUPT))80358035+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");80368036+80378037+ rdev->irq.stat_regs.cik.disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT;80388038+ DRM_DEBUG("IH: D3 vline\n");80398039+80448040 break;80458041 default:80468042 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);···80548042 case 4: /* D4 vblank/vline */80558043 switch (src_data) {80568044 case 0: /* D4 vblank */80578057- if (rdev->irq.stat_regs.cik.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) {80588058- if (rdev->irq.crtc_vblank_int[3]) {80598059- drm_handle_vblank(rdev->ddev, 3);80608060- rdev->pm.vblank_sync = true;80618061- wake_up(&rdev->irq.vblank_queue);80628062- }80638063- if (atomic_read(&rdev->irq.pflip[3]))80648064- radeon_crtc_handle_vblank(rdev, 3);80658065- rdev->irq.stat_regs.cik.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT;80668066- DRM_DEBUG("IH: D4 vblank\n");80458045+ if (!(rdev->irq.stat_regs.cik.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT))80468046+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");80478047+80488048+ if (rdev->irq.crtc_vblank_int[3]) {80498049+ drm_handle_vblank(rdev->ddev, 3);80508050+ rdev->pm.vblank_sync = true;80518051+ wake_up(&rdev->irq.vblank_queue);80678052 }80538053+ if (atomic_read(&rdev->irq.pflip[3]))80548054+ radeon_crtc_handle_vblank(rdev, 3);80558055+ rdev->irq.stat_regs.cik.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT;80568056+ DRM_DEBUG("IH: D4 vblank\n");80578057+80688058 break;80698059 case 1: /* D4 vline */80708070- if (rdev->irq.stat_regs.cik.disp_int_cont3 & LB_D4_VLINE_INTERRUPT) {80718071- rdev->irq.stat_regs.cik.disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT;80728072- DRM_DEBUG("IH: D4 vline\n");80738073- }80608060+ if (!(rdev->irq.stat_regs.cik.disp_int_cont3 & LB_D4_VLINE_INTERRUPT))80618061+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");80628062+80638063+ rdev->irq.stat_regs.cik.disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT;80648064+ DRM_DEBUG("IH: D4 vline\n");80658065+80748066 break;80758067 default:80768068 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);···80848068 case 5: /* D5 vblank/vline */80858069 switch (src_data) {80868070 case 0: /* D5 vblank */80878087- if (rdev->irq.stat_regs.cik.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) {80888088- if (rdev->irq.crtc_vblank_int[4]) {80898089- drm_handle_vblank(rdev->ddev, 4);80908090- rdev->pm.vblank_sync = true;80918091- wake_up(&rdev->irq.vblank_queue);80928092- }80938093- if (atomic_read(&rdev->irq.pflip[4]))80948094- radeon_crtc_handle_vblank(rdev, 4);80958095- rdev->irq.stat_regs.cik.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT;80968096- DRM_DEBUG("IH: D5 vblank\n");80718071+ if (!(rdev->irq.stat_regs.cik.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT))80728072+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");80738073+80748074+ if (rdev->irq.crtc_vblank_int[4]) {80758075+ drm_handle_vblank(rdev->ddev, 4);80768076+ rdev->pm.vblank_sync = true;80778077+ wake_up(&rdev->irq.vblank_queue);80978078 }80798079+ if (atomic_read(&rdev->irq.pflip[4]))80808080+ radeon_crtc_handle_vblank(rdev, 4);80818081+ rdev->irq.stat_regs.cik.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT;80828082+ DRM_DEBUG("IH: D5 vblank\n");80838083+80988084 break;80998085 case 1: /* D5 vline */81008100- if (rdev->irq.stat_regs.cik.disp_int_cont4 & LB_D5_VLINE_INTERRUPT) {81018101- rdev->irq.stat_regs.cik.disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT;81028102- DRM_DEBUG("IH: D5 vline\n");81038103- }80868086+ if (!(rdev->irq.stat_regs.cik.disp_int_cont4 & LB_D5_VLINE_INTERRUPT))80878087+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");80888088+80898089+ rdev->irq.stat_regs.cik.disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT;80908090+ DRM_DEBUG("IH: D5 vline\n");80918091+81048092 break;81058093 default:81068094 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);···81148094 case 6: /* D6 vblank/vline */81158095 switch (src_data) {81168096 case 0: /* D6 vblank */81178117- if (rdev->irq.stat_regs.cik.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) {81188118- if (rdev->irq.crtc_vblank_int[5]) {81198119- drm_handle_vblank(rdev->ddev, 5);81208120- rdev->pm.vblank_sync = true;81218121- wake_up(&rdev->irq.vblank_queue);81228122- }81238123- if (atomic_read(&rdev->irq.pflip[5]))81248124- radeon_crtc_handle_vblank(rdev, 5);81258125- rdev->irq.stat_regs.cik.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT;81268126- DRM_DEBUG("IH: D6 vblank\n");80978097+ if (!(rdev->irq.stat_regs.cik.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT))80988098+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");80998099+81008100+ if (rdev->irq.crtc_vblank_int[5]) {81018101+ drm_handle_vblank(rdev->ddev, 5);81028102+ rdev->pm.vblank_sync = true;81038103+ wake_up(&rdev->irq.vblank_queue);81278104 }81058105+ if (atomic_read(&rdev->irq.pflip[5]))81068106+ radeon_crtc_handle_vblank(rdev, 5);81078107+ rdev->irq.stat_regs.cik.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT;81088108+ DRM_DEBUG("IH: D6 vblank\n");81098109+81288110 break;81298111 case 1: /* D6 vline */81308130- if (rdev->irq.stat_regs.cik.disp_int_cont5 & LB_D6_VLINE_INTERRUPT) {81318131- rdev->irq.stat_regs.cik.disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT;81328132- DRM_DEBUG("IH: D6 vline\n");81338133- }81128112+ if (!(rdev->irq.stat_regs.cik.disp_int_cont5 & LB_D6_VLINE_INTERRUPT))81138113+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");81148114+81158115+ rdev->irq.stat_regs.cik.disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT;81168116+ DRM_DEBUG("IH: D6 vline\n");81178117+81348118 break;81358119 default:81368120 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);···81548130 case 42: /* HPD hotplug */81558131 switch (src_data) {81568132 case 0:81578157- if (rdev->irq.stat_regs.cik.disp_int & DC_HPD1_INTERRUPT) {81588158- rdev->irq.stat_regs.cik.disp_int &= ~DC_HPD1_INTERRUPT;81598159- queue_hotplug = true;81608160- DRM_DEBUG("IH: HPD1\n");81618161- }81338133+ if (!(rdev->irq.stat_regs.cik.disp_int & DC_HPD1_INTERRUPT))81348134+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");81358135+81368136+ rdev->irq.stat_regs.cik.disp_int &= ~DC_HPD1_INTERRUPT;81378137+ queue_hotplug = true;81388138+ DRM_DEBUG("IH: HPD1\n");81398139+81628140 break;81638141 case 1:81648164- if (rdev->irq.stat_regs.cik.disp_int_cont & DC_HPD2_INTERRUPT) {81658165- rdev->irq.stat_regs.cik.disp_int_cont &= ~DC_HPD2_INTERRUPT;81668166- queue_hotplug = true;81678167- DRM_DEBUG("IH: HPD2\n");81688168- }81428142+ if (!(rdev->irq.stat_regs.cik.disp_int_cont & DC_HPD2_INTERRUPT))81438143+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");81448144+81458145+ rdev->irq.stat_regs.cik.disp_int_cont &= ~DC_HPD2_INTERRUPT;81468146+ queue_hotplug = true;81478147+ DRM_DEBUG("IH: HPD2\n");81488148+81698149 break;81708150 case 2:81718171- if (rdev->irq.stat_regs.cik.disp_int_cont2 & DC_HPD3_INTERRUPT) {81728172- rdev->irq.stat_regs.cik.disp_int_cont2 &= ~DC_HPD3_INTERRUPT;81738173- queue_hotplug = true;81748174- DRM_DEBUG("IH: HPD3\n");81758175- }81518151+ if (!(rdev->irq.stat_regs.cik.disp_int_cont2 & DC_HPD3_INTERRUPT))81528152+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");81538153+81548154+ rdev->irq.stat_regs.cik.disp_int_cont2 &= ~DC_HPD3_INTERRUPT;81558155+ queue_hotplug = true;81568156+ DRM_DEBUG("IH: HPD3\n");81578157+81768158 break;81778159 case 3:81788178- if (rdev->irq.stat_regs.cik.disp_int_cont3 & DC_HPD4_INTERRUPT) {81798179- rdev->irq.stat_regs.cik.disp_int_cont3 &= ~DC_HPD4_INTERRUPT;81808180- queue_hotplug = true;81818181- DRM_DEBUG("IH: HPD4\n");81828182- }81608160+ if (!(rdev->irq.stat_regs.cik.disp_int_cont3 & DC_HPD4_INTERRUPT))81618161+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");81628162+81638163+ rdev->irq.stat_regs.cik.disp_int_cont3 &= ~DC_HPD4_INTERRUPT;81648164+ queue_hotplug = true;81658165+ DRM_DEBUG("IH: HPD4\n");81668166+81838167 break;81848168 case 4:81858185- if (rdev->irq.stat_regs.cik.disp_int_cont4 & DC_HPD5_INTERRUPT) {81868186- rdev->irq.stat_regs.cik.disp_int_cont4 &= ~DC_HPD5_INTERRUPT;81878187- queue_hotplug = true;81888188- DRM_DEBUG("IH: HPD5\n");81898189- }81698169+ if (!(rdev->irq.stat_regs.cik.disp_int_cont4 & DC_HPD5_INTERRUPT))81708170+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");81718171+81728172+ rdev->irq.stat_regs.cik.disp_int_cont4 &= ~DC_HPD5_INTERRUPT;81738173+ queue_hotplug = true;81748174+ DRM_DEBUG("IH: HPD5\n");81758175+81908176 break;81918177 case 5:81928192- if (rdev->irq.stat_regs.cik.disp_int_cont5 & DC_HPD6_INTERRUPT) {81938193- rdev->irq.stat_regs.cik.disp_int_cont5 &= ~DC_HPD6_INTERRUPT;81948194- queue_hotplug = true;81958195- DRM_DEBUG("IH: HPD6\n");81968196- }81788178+ if (!(rdev->irq.stat_regs.cik.disp_int_cont5 & DC_HPD6_INTERRUPT))81798179+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");81808180+81818181+ rdev->irq.stat_regs.cik.disp_int_cont5 &= ~DC_HPD6_INTERRUPT;81828182+ queue_hotplug = true;81838183+ DRM_DEBUG("IH: HPD6\n");81848184+81978185 break;81988186 case 6:81998199- if (rdev->irq.stat_regs.cik.disp_int & DC_HPD1_RX_INTERRUPT) {82008200- rdev->irq.stat_regs.cik.disp_int &= ~DC_HPD1_RX_INTERRUPT;82018201- queue_dp = true;82028202- DRM_DEBUG("IH: HPD_RX 1\n");82038203- }81878187+ if (!(rdev->irq.stat_regs.cik.disp_int & DC_HPD1_RX_INTERRUPT))81888188+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");81898189+81908190+ rdev->irq.stat_regs.cik.disp_int &= ~DC_HPD1_RX_INTERRUPT;81918191+ queue_dp = true;81928192+ DRM_DEBUG("IH: HPD_RX 1\n");81938193+82048194 break;82058195 case 7:82068206- if (rdev->irq.stat_regs.cik.disp_int_cont & DC_HPD2_RX_INTERRUPT) {82078207- rdev->irq.stat_regs.cik.disp_int_cont &= ~DC_HPD2_RX_INTERRUPT;82088208- queue_dp = true;82098209- DRM_DEBUG("IH: HPD_RX 2\n");82108210- }81968196+ if (!(rdev->irq.stat_regs.cik.disp_int_cont & DC_HPD2_RX_INTERRUPT))81978197+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");81988198+81998199+ rdev->irq.stat_regs.cik.disp_int_cont &= ~DC_HPD2_RX_INTERRUPT;82008200+ queue_dp = true;82018201+ DRM_DEBUG("IH: HPD_RX 2\n");82028202+82118203 break;82128204 case 8:82138213- if (rdev->irq.stat_regs.cik.disp_int_cont2 & DC_HPD3_RX_INTERRUPT) {82148214- rdev->irq.stat_regs.cik.disp_int_cont2 &= ~DC_HPD3_RX_INTERRUPT;82158215- queue_dp = true;82168216- DRM_DEBUG("IH: HPD_RX 3\n");82178217- }82058205+ if (!(rdev->irq.stat_regs.cik.disp_int_cont2 & DC_HPD3_RX_INTERRUPT))82068206+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");82078207+82088208+ rdev->irq.stat_regs.cik.disp_int_cont2 &= ~DC_HPD3_RX_INTERRUPT;82098209+ queue_dp = true;82108210+ DRM_DEBUG("IH: HPD_RX 3\n");82118211+82188212 break;82198213 case 9:82208220- if (rdev->irq.stat_regs.cik.disp_int_cont3 & DC_HPD4_RX_INTERRUPT) {82218221- rdev->irq.stat_regs.cik.disp_int_cont3 &= ~DC_HPD4_RX_INTERRUPT;82228222- queue_dp = true;82238223- DRM_DEBUG("IH: HPD_RX 4\n");82248224- }82148214+ if (!(rdev->irq.stat_regs.cik.disp_int_cont3 & DC_HPD4_RX_INTERRUPT))82158215+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");82168216+82178217+ rdev->irq.stat_regs.cik.disp_int_cont3 &= ~DC_HPD4_RX_INTERRUPT;82188218+ queue_dp = true;82198219+ DRM_DEBUG("IH: HPD_RX 4\n");82208220+82258221 break;82268222 case 10:82278227- if (rdev->irq.stat_regs.cik.disp_int_cont4 & DC_HPD5_RX_INTERRUPT) {82288228- rdev->irq.stat_regs.cik.disp_int_cont4 &= ~DC_HPD5_RX_INTERRUPT;82298229- queue_dp = true;82308230- DRM_DEBUG("IH: HPD_RX 5\n");82318231- }82238223+ if (!(rdev->irq.stat_regs.cik.disp_int_cont4 & DC_HPD5_RX_INTERRUPT))82248224+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");82258225+82268226+ rdev->irq.stat_regs.cik.disp_int_cont4 &= ~DC_HPD5_RX_INTERRUPT;82278227+ queue_dp = true;82288228+ DRM_DEBUG("IH: HPD_RX 5\n");82298229+82328230 break;82338231 case 11:82348234- if (rdev->irq.stat_regs.cik.disp_int_cont5 & DC_HPD6_RX_INTERRUPT) {82358235- rdev->irq.stat_regs.cik.disp_int_cont5 &= ~DC_HPD6_RX_INTERRUPT;82368236- queue_dp = true;82378237- DRM_DEBUG("IH: HPD_RX 6\n");82388238- }82328232+ if (!(rdev->irq.stat_regs.cik.disp_int_cont5 & DC_HPD6_RX_INTERRUPT))82338233+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");82348234+82358235+ rdev->irq.stat_regs.cik.disp_int_cont5 &= ~DC_HPD6_RX_INTERRUPT;82368236+ queue_dp = true;82378237+ DRM_DEBUG("IH: HPD_RX 6\n");82388238+82398239 break;82408240 default:82418241 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
+217-175
drivers/gpu/drm/radeon/evergreen.c
···49244924 return IRQ_NONE;4925492549264926 rptr = rdev->ih.rptr;49274927- DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr);49274927+ DRM_DEBUG("evergreen_irq_process start: rptr %d, wptr %d\n", rptr, wptr);4928492849294929 /* Order reading of wptr vs. reading of IH ring data */49304930 rmb();···49424942 case 1: /* D1 vblank/vline */49434943 switch (src_data) {49444944 case 0: /* D1 vblank */49454945- if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VBLANK_INTERRUPT) {49464946- if (rdev->irq.crtc_vblank_int[0]) {49474947- drm_handle_vblank(rdev->ddev, 0);49484948- rdev->pm.vblank_sync = true;49494949- wake_up(&rdev->irq.vblank_queue);49504950- }49514951- if (atomic_read(&rdev->irq.pflip[0]))49524952- radeon_crtc_handle_vblank(rdev, 0);49534953- rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VBLANK_INTERRUPT;49544954- DRM_DEBUG("IH: D1 vblank\n");49454945+ if (!(rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VBLANK_INTERRUPT))49464946+ DRM_DEBUG("IH: D1 vblank - IH event w/o asserted irq bit?\n");49474947+49484948+ if (rdev->irq.crtc_vblank_int[0]) {49494949+ drm_handle_vblank(rdev->ddev, 0);49504950+ rdev->pm.vblank_sync = true;49514951+ wake_up(&rdev->irq.vblank_queue);49554952 }49534953+ if (atomic_read(&rdev->irq.pflip[0]))49544954+ radeon_crtc_handle_vblank(rdev, 0);49554955+ rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VBLANK_INTERRUPT;49564956+ DRM_DEBUG("IH: D1 vblank\n");49574957+49564958 break;49574959 case 1: /* D1 vline */49584958- if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VLINE_INTERRUPT) {49594959- rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VLINE_INTERRUPT;49604960- DRM_DEBUG("IH: D1 vline\n");49614961- }49604960+ if (!(rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VLINE_INTERRUPT))49614961+ DRM_DEBUG("IH: D1 vline - IH event w/o asserted irq bit?\n");49624962+49634963+ rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VLINE_INTERRUPT;49644964+ DRM_DEBUG("IH: D1 vline\n");49654965+49624966 break;49634967 default:49644968 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);···49724968 case 2: /* D2 vblank/vline */49734969 switch (src_data) {49744970 case 0: /* D2 vblank */49754975- if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VBLANK_INTERRUPT) {49764976- if (rdev->irq.crtc_vblank_int[1]) {49774977- drm_handle_vblank(rdev->ddev, 1);49784978- rdev->pm.vblank_sync = true;49794979- wake_up(&rdev->irq.vblank_queue);49804980- }49814981- if (atomic_read(&rdev->irq.pflip[1]))49824982- radeon_crtc_handle_vblank(rdev, 1);49834983- rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT;49844984- DRM_DEBUG("IH: D2 vblank\n");49714971+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VBLANK_INTERRUPT))49724972+ DRM_DEBUG("IH: D2 vblank - IH event w/o asserted irq bit?\n");49734973+49744974+ if (rdev->irq.crtc_vblank_int[1]) {49754975+ drm_handle_vblank(rdev->ddev, 1);49764976+ rdev->pm.vblank_sync = true;49774977+ wake_up(&rdev->irq.vblank_queue);49854978 }49794979+ if (atomic_read(&rdev->irq.pflip[1]))49804980+ radeon_crtc_handle_vblank(rdev, 1);49814981+ rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT;49824982+ DRM_DEBUG("IH: D2 vblank\n");49834983+49864984 break;49874985 case 1: /* D2 vline */49884988- if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VLINE_INTERRUPT) {49894989- rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VLINE_INTERRUPT;49904990- DRM_DEBUG("IH: D2 vline\n");49914991- }49864986+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VLINE_INTERRUPT))49874987+ DRM_DEBUG("IH: D2 vline - IH event w/o asserted irq bit?\n");49884988+49894989+ rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VLINE_INTERRUPT;49904990+ DRM_DEBUG("IH: D2 vline\n");49914991+49924992 break;49934993 default:49944994 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);···50024994 case 3: /* D3 vblank/vline */50034995 switch (src_data) {50044996 case 0: /* D3 vblank */50055005- if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) {50065006- if (rdev->irq.crtc_vblank_int[2]) {50075007- drm_handle_vblank(rdev->ddev, 2);50085008- rdev->pm.vblank_sync = true;50095009- wake_up(&rdev->irq.vblank_queue);50105010- }50115011- if (atomic_read(&rdev->irq.pflip[2]))50125012- radeon_crtc_handle_vblank(rdev, 2);50135013- rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT;50145014- DRM_DEBUG("IH: D3 vblank\n");49974997+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT))49984998+ DRM_DEBUG("IH: D3 vblank - IH event w/o asserted irq bit?\n");49994999+50005000+ if (rdev->irq.crtc_vblank_int[2]) {50015001+ drm_handle_vblank(rdev->ddev, 2);50025002+ rdev->pm.vblank_sync = true;50035003+ wake_up(&rdev->irq.vblank_queue);50155004 }50055005+ if (atomic_read(&rdev->irq.pflip[2]))50065006+ radeon_crtc_handle_vblank(rdev, 2);50075007+ rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT;50085008+ DRM_DEBUG("IH: D3 vblank\n");50095009+50165010 break;50175011 case 1: /* D3 vline */50185018- if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VLINE_INTERRUPT) {50195019- rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT;50205020- DRM_DEBUG("IH: D3 vline\n");50215021- }50125012+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VLINE_INTERRUPT))50135013+ DRM_DEBUG("IH: D3 vline - IH event w/o asserted irq bit?\n");50145014+50155015+ rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT;50165016+ DRM_DEBUG("IH: D3 vline\n");50175017+50225018 break;50235019 default:50245020 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);···50325020 case 4: /* D4 vblank/vline */50335021 switch (src_data) {50345022 case 0: /* D4 vblank */50355035- if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) {50365036- if (rdev->irq.crtc_vblank_int[3]) {50375037- drm_handle_vblank(rdev->ddev, 3);50385038- rdev->pm.vblank_sync = true;50395039- wake_up(&rdev->irq.vblank_queue);50405040- }50415041- if (atomic_read(&rdev->irq.pflip[3]))50425042- radeon_crtc_handle_vblank(rdev, 3);50435043- rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT;50445044- DRM_DEBUG("IH: D4 vblank\n");50235023+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT))50245024+ DRM_DEBUG("IH: D4 vblank - IH event w/o asserted irq bit?\n");50255025+50265026+ if (rdev->irq.crtc_vblank_int[3]) {50275027+ drm_handle_vblank(rdev->ddev, 3);50285028+ rdev->pm.vblank_sync = true;50295029+ wake_up(&rdev->irq.vblank_queue);50455030 }50315031+ if (atomic_read(&rdev->irq.pflip[3]))50325032+ radeon_crtc_handle_vblank(rdev, 3);50335033+ rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT;50345034+ DRM_DEBUG("IH: D4 vblank\n");50355035+50465036 break;50475037 case 1: /* D4 vline */50485048- if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VLINE_INTERRUPT) {50495049- rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT;50505050- DRM_DEBUG("IH: D4 vline\n");50515051- }50385038+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VLINE_INTERRUPT))50395039+ DRM_DEBUG("IH: D4 vline - IH event w/o asserted irq bit?\n");50405040+50415041+ rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT;50425042+ DRM_DEBUG("IH: D4 vline\n");50435043+50525044 break;50535045 default:50545046 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);···50625046 case 5: /* D5 vblank/vline */50635047 switch (src_data) {50645048 case 0: /* D5 vblank */50655065- if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) {50665066- if (rdev->irq.crtc_vblank_int[4]) {50675067- drm_handle_vblank(rdev->ddev, 4);50685068- rdev->pm.vblank_sync = true;50695069- wake_up(&rdev->irq.vblank_queue);50705070- }50715071- if (atomic_read(&rdev->irq.pflip[4]))50725072- radeon_crtc_handle_vblank(rdev, 4);50735073- rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT;50745074- DRM_DEBUG("IH: D5 vblank\n");50495049+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT))50505050+ DRM_DEBUG("IH: D5 vblank - IH event w/o asserted irq bit?\n");50515051+50525052+ if (rdev->irq.crtc_vblank_int[4]) {50535053+ drm_handle_vblank(rdev->ddev, 4);50545054+ rdev->pm.vblank_sync = true;50555055+ wake_up(&rdev->irq.vblank_queue);50755056 }50575057+ if (atomic_read(&rdev->irq.pflip[4]))50585058+ radeon_crtc_handle_vblank(rdev, 4);50595059+ rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT;50605060+ DRM_DEBUG("IH: D5 vblank\n");50615061+50765062 break;50775063 case 1: /* D5 vline */50785078- if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VLINE_INTERRUPT) {50795079- rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT;50805080- DRM_DEBUG("IH: D5 vline\n");50815081- }50645064+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VLINE_INTERRUPT))50655065+ DRM_DEBUG("IH: D5 vline - IH event w/o asserted irq bit?\n");50665066+50675067+ rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT;50685068+ DRM_DEBUG("IH: D5 vline\n");50695069+50825070 break;50835071 default:50845072 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);···50925072 case 6: /* D6 vblank/vline */50935073 switch (src_data) {50945074 case 0: /* D6 vblank */50955095- if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) {50965096- if (rdev->irq.crtc_vblank_int[5]) {50975097- drm_handle_vblank(rdev->ddev, 5);50985098- rdev->pm.vblank_sync = true;50995099- wake_up(&rdev->irq.vblank_queue);51005100- }51015101- if (atomic_read(&rdev->irq.pflip[5]))51025102- radeon_crtc_handle_vblank(rdev, 5);51035103- rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT;51045104- DRM_DEBUG("IH: D6 vblank\n");50755075+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT))50765076+ DRM_DEBUG("IH: D6 vblank - IH event w/o asserted irq bit?\n");50775077+50785078+ if (rdev->irq.crtc_vblank_int[5]) {50795079+ drm_handle_vblank(rdev->ddev, 5);50805080+ rdev->pm.vblank_sync = true;50815081+ wake_up(&rdev->irq.vblank_queue);51055082 }50835083+ if (atomic_read(&rdev->irq.pflip[5]))50845084+ radeon_crtc_handle_vblank(rdev, 5);50855085+ rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT;50865086+ DRM_DEBUG("IH: D6 vblank\n");50875087+51065088 break;51075089 case 1: /* D6 vline */51085108- if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VLINE_INTERRUPT) {51095109- rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT;51105110- DRM_DEBUG("IH: D6 vline\n");51115111- }50905090+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VLINE_INTERRUPT))50915091+ DRM_DEBUG("IH: D6 vline - IH event w/o asserted irq bit?\n");50925092+50935093+ rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT;50945094+ DRM_DEBUG("IH: D6 vline\n");50955095+51125096 break;51135097 default:51145098 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);···51325108 case 42: /* HPD hotplug */51335109 switch (src_data) {51345110 case 0:51355135- if (rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_INTERRUPT) {51365136- rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_INTERRUPT;51375137- queue_hotplug = true;51385138- DRM_DEBUG("IH: HPD1\n");51395139- }51115111+ if (!(rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_INTERRUPT))51125112+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");51135113+51145114+ rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_INTERRUPT;51155115+ queue_hotplug = true;51165116+ DRM_DEBUG("IH: HPD1\n");51405117 break;51415118 case 1:51425142- if (rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_INTERRUPT) {51435143- rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_INTERRUPT;51445144- queue_hotplug = true;51455145- DRM_DEBUG("IH: HPD2\n");51465146- }51195119+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_INTERRUPT))51205120+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");51215121+51225122+ rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_INTERRUPT;51235123+ queue_hotplug = true;51245124+ DRM_DEBUG("IH: HPD2\n");51475125 break;51485126 case 2:51495149- if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_INTERRUPT) {51505150- rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_INTERRUPT;51515151- queue_hotplug = true;51525152- DRM_DEBUG("IH: HPD3\n");51535153- }51275127+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_INTERRUPT))51285128+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");51295129+51305130+ rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_INTERRUPT;51315131+ queue_hotplug = true;51325132+ DRM_DEBUG("IH: HPD3\n");51545133 break;51555134 case 3:51565156- if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_INTERRUPT) {51575157- rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_INTERRUPT;51585158- queue_hotplug = true;51595159- DRM_DEBUG("IH: HPD4\n");51605160- }51355135+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_INTERRUPT))51365136+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");51375137+51385138+ rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_INTERRUPT;51395139+ queue_hotplug = true;51405140+ DRM_DEBUG("IH: HPD4\n");51615141 break;51625142 case 4:51635163- if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_INTERRUPT) {51645164- rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_INTERRUPT;51655165- queue_hotplug = true;51665166- DRM_DEBUG("IH: HPD5\n");51675167- }51435143+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_INTERRUPT))51445144+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");51455145+51465146+ rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_INTERRUPT;51475147+ queue_hotplug = true;51485148+ DRM_DEBUG("IH: HPD5\n");51685149 break;51695150 case 5:51705170- if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT) {51715171- rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_INTERRUPT;51725172- queue_hotplug = true;51735173- DRM_DEBUG("IH: HPD6\n");51745174- }51515151+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT))51525152+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");51535153+51545154+ rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_INTERRUPT;51555155+ queue_hotplug = true;51565156+ DRM_DEBUG("IH: HPD6\n");51755157 break;51765158 case 6:51775177- if (rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_RX_INTERRUPT) {51785178- rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_RX_INTERRUPT;51795179- queue_dp = true;51805180- DRM_DEBUG("IH: HPD_RX 1\n");51815181- }51595159+ if (!(rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_RX_INTERRUPT))51605160+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");51615161+51625162+ rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_RX_INTERRUPT;51635163+ queue_dp = true;51645164+ DRM_DEBUG("IH: HPD_RX 1\n");51825165 break;51835166 case 7:51845184- if (rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_RX_INTERRUPT) {51855185- rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_RX_INTERRUPT;51865186- queue_dp = true;51875187- DRM_DEBUG("IH: HPD_RX 2\n");51885188- }51675167+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_RX_INTERRUPT))51685168+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");51695169+51705170+ rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_RX_INTERRUPT;51715171+ queue_dp = true;51725172+ DRM_DEBUG("IH: HPD_RX 2\n");51895173 break;51905174 case 8:51915191- if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_RX_INTERRUPT) {51925192- rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_RX_INTERRUPT;51935193- queue_dp = true;51945194- DRM_DEBUG("IH: HPD_RX 3\n");51955195- }51755175+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_RX_INTERRUPT))51765176+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");51775177+51785178+ rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_RX_INTERRUPT;51795179+ queue_dp = true;51805180+ DRM_DEBUG("IH: HPD_RX 3\n");51965181 break;51975182 case 9:51985198- if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_RX_INTERRUPT) {51995199- rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_RX_INTERRUPT;52005200- queue_dp = true;52015201- DRM_DEBUG("IH: HPD_RX 4\n");52025202- }51835183+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_RX_INTERRUPT))51845184+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");51855185+51865186+ rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_RX_INTERRUPT;51875187+ queue_dp = true;51885188+ DRM_DEBUG("IH: HPD_RX 4\n");52035189 break;52045190 case 10:52055205- if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_RX_INTERRUPT) {52065206- rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_RX_INTERRUPT;52075207- queue_dp = true;52085208- DRM_DEBUG("IH: HPD_RX 5\n");52095209- }51915191+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_RX_INTERRUPT))51925192+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");51935193+51945194+ rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_RX_INTERRUPT;51955195+ queue_dp = true;51965196+ DRM_DEBUG("IH: HPD_RX 5\n");52105197 break;52115198 case 11:52125212- if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_RX_INTERRUPT) {52135213- rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_RX_INTERRUPT;52145214- queue_dp = true;52155215- DRM_DEBUG("IH: HPD_RX 6\n");52165216- }51995199+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_RX_INTERRUPT))52005200+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");52015201+52025202+ rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_RX_INTERRUPT;52035203+ queue_dp = true;52045204+ DRM_DEBUG("IH: HPD_RX 6\n");52175205 break;52185206 default:52195207 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);···52355199 case 44: /* hdmi */52365200 switch (src_data) {52375201 case 0:52385238- if (rdev->irq.stat_regs.evergreen.afmt_status1 & AFMT_AZ_FORMAT_WTRIG) {52395239- rdev->irq.stat_regs.evergreen.afmt_status1 &= ~AFMT_AZ_FORMAT_WTRIG;52405240- queue_hdmi = true;52415241- DRM_DEBUG("IH: HDMI0\n");52425242- }52025202+ if (!(rdev->irq.stat_regs.evergreen.afmt_status1 & AFMT_AZ_FORMAT_WTRIG))52035203+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");52045204+52055205+ rdev->irq.stat_regs.evergreen.afmt_status1 &= ~AFMT_AZ_FORMAT_WTRIG;52065206+ queue_hdmi = true;52075207+ DRM_DEBUG("IH: HDMI0\n");52435208 break;52445209 case 1:52455245- if (rdev->irq.stat_regs.evergreen.afmt_status2 & AFMT_AZ_FORMAT_WTRIG) {52465246- rdev->irq.stat_regs.evergreen.afmt_status2 &= ~AFMT_AZ_FORMAT_WTRIG;52475247- queue_hdmi = true;52485248- DRM_DEBUG("IH: HDMI1\n");52495249- }52105210+ if (!(rdev->irq.stat_regs.evergreen.afmt_status2 & AFMT_AZ_FORMAT_WTRIG))52115211+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");52125212+52135213+ rdev->irq.stat_regs.evergreen.afmt_status2 &= ~AFMT_AZ_FORMAT_WTRIG;52145214+ queue_hdmi = true;52155215+ DRM_DEBUG("IH: HDMI1\n");52505216 break;52515217 case 2:52525252- if (rdev->irq.stat_regs.evergreen.afmt_status3 & AFMT_AZ_FORMAT_WTRIG) {52535253- rdev->irq.stat_regs.evergreen.afmt_status3 &= ~AFMT_AZ_FORMAT_WTRIG;52545254- queue_hdmi = true;52555255- DRM_DEBUG("IH: HDMI2\n");52565256- }52185218+ if (!(rdev->irq.stat_regs.evergreen.afmt_status3 & AFMT_AZ_FORMAT_WTRIG))52195219+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");52205220+52215221+ rdev->irq.stat_regs.evergreen.afmt_status3 &= ~AFMT_AZ_FORMAT_WTRIG;52225222+ queue_hdmi = true;52235223+ DRM_DEBUG("IH: HDMI2\n");52575224 break;52585225 case 3:52595259- if (rdev->irq.stat_regs.evergreen.afmt_status4 & AFMT_AZ_FORMAT_WTRIG) {52605260- rdev->irq.stat_regs.evergreen.afmt_status4 &= ~AFMT_AZ_FORMAT_WTRIG;52615261- queue_hdmi = true;52625262- DRM_DEBUG("IH: HDMI3\n");52635263- }52265226+ if (!(rdev->irq.stat_regs.evergreen.afmt_status4 & AFMT_AZ_FORMAT_WTRIG))52275227+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");52285228+52295229+ rdev->irq.stat_regs.evergreen.afmt_status4 &= ~AFMT_AZ_FORMAT_WTRIG;52305230+ queue_hdmi = true;52315231+ DRM_DEBUG("IH: HDMI3\n");52645232 break;52655233 case 4:52665266- if (rdev->irq.stat_regs.evergreen.afmt_status5 & AFMT_AZ_FORMAT_WTRIG) {52675267- rdev->irq.stat_regs.evergreen.afmt_status5 &= ~AFMT_AZ_FORMAT_WTRIG;52685268- queue_hdmi = true;52695269- DRM_DEBUG("IH: HDMI4\n");52705270- }52345234+ if (!(rdev->irq.stat_regs.evergreen.afmt_status5 & AFMT_AZ_FORMAT_WTRIG))52355235+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");52365236+52375237+ rdev->irq.stat_regs.evergreen.afmt_status5 &= ~AFMT_AZ_FORMAT_WTRIG;52385238+ queue_hdmi = true;52395239+ DRM_DEBUG("IH: HDMI4\n");52715240 break;52725241 case 5:52735273- if (rdev->irq.stat_regs.evergreen.afmt_status6 & AFMT_AZ_FORMAT_WTRIG) {52745274- rdev->irq.stat_regs.evergreen.afmt_status6 &= ~AFMT_AZ_FORMAT_WTRIG;52755275- queue_hdmi = true;52765276- DRM_DEBUG("IH: HDMI5\n");52775277- }52425242+ if (!(rdev->irq.stat_regs.evergreen.afmt_status6 & AFMT_AZ_FORMAT_WTRIG))52435243+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");52445244+52455245+ rdev->irq.stat_regs.evergreen.afmt_status6 &= ~AFMT_AZ_FORMAT_WTRIG;52465246+ queue_hdmi = true;52475247+ DRM_DEBUG("IH: HDMI5\n");52785248 break;52795249 default:52805250 DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data);
+14-11
drivers/gpu/drm/radeon/ni.c
···21622162 DRM_ERROR("radeon: failed initializing UVD (%d).\n", r);21632163 }2164216421652165- ring = &rdev->ring[TN_RING_TYPE_VCE1_INDEX];21662166- if (ring->ring_size)21672167- r = radeon_ring_init(rdev, ring, ring->ring_size, 0, 0x0);21652165+ if (rdev->family == CHIP_ARUBA) {21662166+ ring = &rdev->ring[TN_RING_TYPE_VCE1_INDEX];21672167+ if (ring->ring_size)21682168+ r = radeon_ring_init(rdev, ring, ring->ring_size, 0, 0x0);2168216921692169- ring = &rdev->ring[TN_RING_TYPE_VCE2_INDEX];21702170- if (ring->ring_size)21712171- r = radeon_ring_init(rdev, ring, ring->ring_size, 0, 0x0);21702170+ ring = &rdev->ring[TN_RING_TYPE_VCE2_INDEX];21712171+ if (ring->ring_size)21722172+ r = radeon_ring_init(rdev, ring, ring->ring_size, 0, 0x0);2172217321732173- if (!r)21742174- r = vce_v1_0_init(rdev);21752175- else if (r != -ENOENT)21762176- DRM_ERROR("radeon: failed initializing VCE (%d).\n", r);21742174+ if (!r)21752175+ r = vce_v1_0_init(rdev);21762176+ if (r)21772177+ DRM_ERROR("radeon: failed initializing VCE (%d).\n", r);21782178+ }2177217921782180 r = radeon_ib_pool_init(rdev);21792181 if (r) {···23982396 radeon_irq_kms_fini(rdev);23992397 uvd_v1_0_fini(rdev);24002398 radeon_uvd_fini(rdev);24012401- radeon_vce_fini(rdev);23992399+ if (rdev->family == CHIP_ARUBA)24002400+ radeon_vce_fini(rdev);24022401 cayman_pcie_gart_fini(rdev);24032402 r600_vram_scratch_fini(rdev);24042403 radeon_gem_fini(rdev);
+87-68
drivers/gpu/drm/radeon/r600.c
···40864086 case 1: /* D1 vblank/vline */40874087 switch (src_data) {40884088 case 0: /* D1 vblank */40894089- if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VBLANK_INTERRUPT) {40904090- if (rdev->irq.crtc_vblank_int[0]) {40914091- drm_handle_vblank(rdev->ddev, 0);40924092- rdev->pm.vblank_sync = true;40934093- wake_up(&rdev->irq.vblank_queue);40944094- }40954095- if (atomic_read(&rdev->irq.pflip[0]))40964096- radeon_crtc_handle_vblank(rdev, 0);40974097- rdev->irq.stat_regs.r600.disp_int &= ~LB_D1_VBLANK_INTERRUPT;40984098- DRM_DEBUG("IH: D1 vblank\n");40894089+ if (!(rdev->irq.stat_regs.r600.disp_int & LB_D1_VBLANK_INTERRUPT))40904090+ DRM_DEBUG("IH: D1 vblank - IH event w/o asserted irq bit?\n");40914091+40924092+ if (rdev->irq.crtc_vblank_int[0]) {40934093+ drm_handle_vblank(rdev->ddev, 0);40944094+ rdev->pm.vblank_sync = true;40954095+ wake_up(&rdev->irq.vblank_queue);40994096 }40974097+ if (atomic_read(&rdev->irq.pflip[0]))40984098+ radeon_crtc_handle_vblank(rdev, 0);40994099+ rdev->irq.stat_regs.r600.disp_int &= ~LB_D1_VBLANK_INTERRUPT;41004100+ DRM_DEBUG("IH: D1 vblank\n");41014101+41004102 break;41014103 case 1: /* D1 vline */41024102- if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VLINE_INTERRUPT) {41034103- rdev->irq.stat_regs.r600.disp_int &= ~LB_D1_VLINE_INTERRUPT;41044104- DRM_DEBUG("IH: D1 vline\n");41054105- }41044104+ if (!(rdev->irq.stat_regs.r600.disp_int & LB_D1_VLINE_INTERRUPT))41054105+ DRM_DEBUG("IH: D1 vline - IH event w/o asserted irq bit?\n");41064106+41074107+ rdev->irq.stat_regs.r600.disp_int &= ~LB_D1_VLINE_INTERRUPT;41084108+ DRM_DEBUG("IH: D1 vline\n");41094109+41064110 break;41074111 default:41084112 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);···41164112 case 5: /* D2 vblank/vline */41174113 switch (src_data) {41184114 case 0: /* D2 vblank */41194119- if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VBLANK_INTERRUPT) {41204120- if (rdev->irq.crtc_vblank_int[1]) {41214121- drm_handle_vblank(rdev->ddev, 1);41224122- rdev->pm.vblank_sync = true;41234123- wake_up(&rdev->irq.vblank_queue);41244124- }41254125- if (atomic_read(&rdev->irq.pflip[1]))41264126- radeon_crtc_handle_vblank(rdev, 1);41274127- rdev->irq.stat_regs.r600.disp_int &= ~LB_D2_VBLANK_INTERRUPT;41284128- DRM_DEBUG("IH: D2 vblank\n");41154115+ if (!(rdev->irq.stat_regs.r600.disp_int & LB_D2_VBLANK_INTERRUPT))41164116+ DRM_DEBUG("IH: D2 vblank - IH event w/o asserted irq bit?\n");41174117+41184118+ if (rdev->irq.crtc_vblank_int[1]) {41194119+ drm_handle_vblank(rdev->ddev, 1);41204120+ rdev->pm.vblank_sync = true;41214121+ wake_up(&rdev->irq.vblank_queue);41294122 }41234123+ if (atomic_read(&rdev->irq.pflip[1]))41244124+ radeon_crtc_handle_vblank(rdev, 1);41254125+ rdev->irq.stat_regs.r600.disp_int &= ~LB_D2_VBLANK_INTERRUPT;41264126+ DRM_DEBUG("IH: D2 vblank\n");41274127+41304128 break;41314129 case 1: /* D1 vline */41324132- if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VLINE_INTERRUPT) {41334133- rdev->irq.stat_regs.r600.disp_int &= ~LB_D2_VLINE_INTERRUPT;41344134- DRM_DEBUG("IH: D2 vline\n");41354135- }41304130+ if (!(rdev->irq.stat_regs.r600.disp_int & LB_D2_VLINE_INTERRUPT))41314131+ DRM_DEBUG("IH: D2 vline - IH event w/o asserted irq bit?\n");41324132+41334133+ rdev->irq.stat_regs.r600.disp_int &= ~LB_D2_VLINE_INTERRUPT;41344134+ DRM_DEBUG("IH: D2 vline\n");41354135+41364136 break;41374137 default:41384138 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);···41564148 case 19: /* HPD/DAC hotplug */41574149 switch (src_data) {41584150 case 0:41594159- if (rdev->irq.stat_regs.r600.disp_int & DC_HPD1_INTERRUPT) {41604160- rdev->irq.stat_regs.r600.disp_int &= ~DC_HPD1_INTERRUPT;41614161- queue_hotplug = true;41624162- DRM_DEBUG("IH: HPD1\n");41634163- }41514151+ if (!(rdev->irq.stat_regs.r600.disp_int & DC_HPD1_INTERRUPT))41524152+ DRM_DEBUG("IH: HPD1 - IH event w/o asserted irq bit?\n");41534153+41544154+ rdev->irq.stat_regs.r600.disp_int &= ~DC_HPD1_INTERRUPT;41554155+ queue_hotplug = true;41564156+ DRM_DEBUG("IH: HPD1\n");41644157 break;41654158 case 1:41664166- if (rdev->irq.stat_regs.r600.disp_int & DC_HPD2_INTERRUPT) {41674167- rdev->irq.stat_regs.r600.disp_int &= ~DC_HPD2_INTERRUPT;41684168- queue_hotplug = true;41694169- DRM_DEBUG("IH: HPD2\n");41704170- }41594159+ if (!(rdev->irq.stat_regs.r600.disp_int & DC_HPD2_INTERRUPT))41604160+ DRM_DEBUG("IH: HPD2 - IH event w/o asserted irq bit?\n");41614161+41624162+ rdev->irq.stat_regs.r600.disp_int &= ~DC_HPD2_INTERRUPT;41634163+ queue_hotplug = true;41644164+ DRM_DEBUG("IH: HPD2\n");41714165 break;41724166 case 4:41734173- if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD3_INTERRUPT) {41744174- rdev->irq.stat_regs.r600.disp_int_cont &= ~DC_HPD3_INTERRUPT;41754175- queue_hotplug = true;41764176- DRM_DEBUG("IH: HPD3\n");41774177- }41674167+ if (!(rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD3_INTERRUPT))41684168+ DRM_DEBUG("IH: HPD3 - IH event w/o asserted irq bit?\n");41694169+41704170+ rdev->irq.stat_regs.r600.disp_int_cont &= ~DC_HPD3_INTERRUPT;41714171+ queue_hotplug = true;41724172+ DRM_DEBUG("IH: HPD3\n");41784173 break;41794174 case 5:41804180- if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD4_INTERRUPT) {41814181- rdev->irq.stat_regs.r600.disp_int_cont &= ~DC_HPD4_INTERRUPT;41824182- queue_hotplug = true;41834183- DRM_DEBUG("IH: HPD4\n");41844184- }41754175+ if (!(rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD4_INTERRUPT))41764176+ DRM_DEBUG("IH: HPD4 - IH event w/o asserted irq bit?\n");41774177+41784178+ rdev->irq.stat_regs.r600.disp_int_cont &= ~DC_HPD4_INTERRUPT;41794179+ queue_hotplug = true;41804180+ DRM_DEBUG("IH: HPD4\n");41854181 break;41864182 case 10:41874187- if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD5_INTERRUPT) {41884188- rdev->irq.stat_regs.r600.disp_int_cont2 &= ~DC_HPD5_INTERRUPT;41894189- queue_hotplug = true;41904190- DRM_DEBUG("IH: HPD5\n");41914191- }41834183+ if (!(rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD5_INTERRUPT))41844184+ DRM_DEBUG("IH: HPD5 - IH event w/o asserted irq bit?\n");41854185+41864186+ rdev->irq.stat_regs.r600.disp_int_cont2 &= ~DC_HPD5_INTERRUPT;41874187+ queue_hotplug = true;41884188+ DRM_DEBUG("IH: HPD5\n");41924189 break;41934190 case 12:41944194- if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD6_INTERRUPT) {41954195- rdev->irq.stat_regs.r600.disp_int_cont2 &= ~DC_HPD6_INTERRUPT;41964196- queue_hotplug = true;41974197- DRM_DEBUG("IH: HPD6\n");41984198- }41914191+ if (!(rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD6_INTERRUPT))41924192+ DRM_DEBUG("IH: HPD6 - IH event w/o asserted irq bit?\n");41934193+41944194+ rdev->irq.stat_regs.r600.disp_int_cont2 &= ~DC_HPD6_INTERRUPT;41954195+ queue_hotplug = true;41964196+ DRM_DEBUG("IH: HPD6\n");41974197+41994198 break;42004199 default:42014200 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);···42124197 case 21: /* hdmi */42134198 switch (src_data) {42144199 case 4:42154215- if (rdev->irq.stat_regs.r600.hdmi0_status & HDMI0_AZ_FORMAT_WTRIG) {42164216- rdev->irq.stat_regs.r600.hdmi0_status &= ~HDMI0_AZ_FORMAT_WTRIG;42174217- queue_hdmi = true;42184218- DRM_DEBUG("IH: HDMI0\n");42194219- }42004200+ if (!(rdev->irq.stat_regs.r600.hdmi0_status & HDMI0_AZ_FORMAT_WTRIG))42014201+ DRM_DEBUG("IH: HDMI0 - IH event w/o asserted irq bit?\n");42024202+42034203+ rdev->irq.stat_regs.r600.hdmi0_status &= ~HDMI0_AZ_FORMAT_WTRIG;42044204+ queue_hdmi = true;42054205+ DRM_DEBUG("IH: HDMI0\n");42064206+42204207 break;42214208 case 5:42224222- if (rdev->irq.stat_regs.r600.hdmi1_status & HDMI0_AZ_FORMAT_WTRIG) {42234223- rdev->irq.stat_regs.r600.hdmi1_status &= ~HDMI0_AZ_FORMAT_WTRIG;42244224- queue_hdmi = true;42254225- DRM_DEBUG("IH: HDMI1\n");42264226- }42094209+ if (!(rdev->irq.stat_regs.r600.hdmi1_status & HDMI0_AZ_FORMAT_WTRIG))42104210+ DRM_DEBUG("IH: HDMI1 - IH event w/o asserted irq bit?\n");42114211+42124212+ rdev->irq.stat_regs.r600.hdmi1_status &= ~HDMI0_AZ_FORMAT_WTRIG;42134213+ queue_hdmi = true;42144214+ DRM_DEBUG("IH: HDMI1\n");42154215+42274216 break;42284217 default:42294218 DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data);
+1-1
drivers/gpu/drm/radeon/r600_cp.c
···24832483 struct drm_buf *buf;24842484 u32 *buffer;24852485 const u8 __user *data;24862486- int size, pass_size;24862486+ unsigned int size, pass_size;24872487 u64 src_offset, dst_offset;2488248824892489 if (!radeon_check_offset(dev_priv, tex->offset)) {
+44-65
drivers/gpu/drm/radeon/radeon_cursor.c
···9191 struct radeon_device *rdev = crtc->dev->dev_private;92929393 if (ASIC_IS_DCE4(rdev)) {9494+ WREG32(EVERGREEN_CUR_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset,9595+ upper_32_bits(radeon_crtc->cursor_addr));9696+ WREG32(EVERGREEN_CUR_SURFACE_ADDRESS + radeon_crtc->crtc_offset,9797+ lower_32_bits(radeon_crtc->cursor_addr));9498 WREG32(RADEON_MM_INDEX, EVERGREEN_CUR_CONTROL + radeon_crtc->crtc_offset);9599 WREG32(RADEON_MM_DATA, EVERGREEN_CURSOR_EN |96100 EVERGREEN_CURSOR_MODE(EVERGREEN_CURSOR_24_8_PRE_MULT) |97101 EVERGREEN_CURSOR_URGENT_CONTROL(EVERGREEN_CURSOR_URGENT_1_2));98102 } else if (ASIC_IS_AVIVO(rdev)) {103103+ if (rdev->family >= CHIP_RV770) {104104+ if (radeon_crtc->crtc_id)105105+ WREG32(R700_D2CUR_SURFACE_ADDRESS_HIGH,106106+ upper_32_bits(radeon_crtc->cursor_addr));107107+ else108108+ WREG32(R700_D1CUR_SURFACE_ADDRESS_HIGH,109109+ upper_32_bits(radeon_crtc->cursor_addr));110110+ }111111+112112+ WREG32(AVIVO_D1CUR_SURFACE_ADDRESS + radeon_crtc->crtc_offset,113113+ lower_32_bits(radeon_crtc->cursor_addr));99114 WREG32(RADEON_MM_INDEX, AVIVO_D1CUR_CONTROL + radeon_crtc->crtc_offset);100115 WREG32(RADEON_MM_DATA, AVIVO_D1CURSOR_EN |101116 (AVIVO_D1CURSOR_MODE_24BPP << AVIVO_D1CURSOR_MODE_SHIFT));102117 } else {118118+ /* offset is from DISP(2)_BASE_ADDRESS */119119+ WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset,120120+ radeon_crtc->cursor_addr - radeon_crtc->legacy_display_base_addr);121121+103122 switch (radeon_crtc->crtc_id) {104123 case 0:105124 WREG32(RADEON_MM_INDEX, RADEON_CRTC_GEN_CNTL);···224205 | (x << 16)225206 | y));226207 /* offset is from DISP(2)_BASE_ADDRESS */227227- WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset, (radeon_crtc->legacy_cursor_offset +228228- (yorigin * 256)));208208+ WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset,209209+ radeon_crtc->cursor_addr - radeon_crtc->legacy_display_base_addr +210210+ yorigin * 256);229211 }230212231213 radeon_crtc->cursor_x = x;···247227 return ret;248228}249229250250-static int radeon_set_cursor(struct drm_crtc *crtc, struct drm_gem_object *obj)251251-{252252- struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);253253- struct radeon_device *rdev = crtc->dev->dev_private;254254- struct radeon_bo *robj = gem_to_radeon_bo(obj);255255- uint64_t gpu_addr;256256- int ret;257257-258258- ret = radeon_bo_reserve(robj, false);259259- if (unlikely(ret != 0))260260- goto fail;261261- /* Only 27 bit offset for legacy cursor */262262- ret = radeon_bo_pin_restricted(robj, RADEON_GEM_DOMAIN_VRAM,263263- ASIC_IS_AVIVO(rdev) ? 0 : 1 << 27,264264- &gpu_addr);265265- radeon_bo_unreserve(robj);266266- if (ret)267267- goto fail;268268-269269- if (ASIC_IS_DCE4(rdev)) {270270- WREG32(EVERGREEN_CUR_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset,271271- upper_32_bits(gpu_addr));272272- WREG32(EVERGREEN_CUR_SURFACE_ADDRESS + radeon_crtc->crtc_offset,273273- gpu_addr & 0xffffffff);274274- } else if (ASIC_IS_AVIVO(rdev)) {275275- if (rdev->family >= CHIP_RV770) {276276- if (radeon_crtc->crtc_id)277277- WREG32(R700_D2CUR_SURFACE_ADDRESS_HIGH, upper_32_bits(gpu_addr));278278- else279279- WREG32(R700_D1CUR_SURFACE_ADDRESS_HIGH, upper_32_bits(gpu_addr));280280- }281281- WREG32(AVIVO_D1CUR_SURFACE_ADDRESS + radeon_crtc->crtc_offset,282282- gpu_addr & 0xffffffff);283283- } else {284284- radeon_crtc->legacy_cursor_offset = gpu_addr - radeon_crtc->legacy_display_base_addr;285285- /* offset is from DISP(2)_BASE_ADDRESS */286286- WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset, radeon_crtc->legacy_cursor_offset);287287- }288288-289289- return 0;290290-291291-fail:292292- drm_gem_object_unreference_unlocked(obj);293293-294294- return ret;295295-}296296-297230int radeon_crtc_cursor_set2(struct drm_crtc *crtc,298231 struct drm_file *file_priv,299232 uint32_t handle,···256283 int32_t hot_y)257284{258285 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);286286+ struct radeon_device *rdev = crtc->dev->dev_private;259287 struct drm_gem_object *obj;288288+ struct radeon_bo *robj;260289 int ret;261290262291 if (!handle) {···280305 return -ENOENT;281306 }282307308308+ robj = gem_to_radeon_bo(obj);309309+ ret = radeon_bo_reserve(robj, false);310310+ if (ret != 0) {311311+ drm_gem_object_unreference_unlocked(obj);312312+ return ret;313313+ }314314+ /* Only 27 bit offset for legacy cursor */315315+ ret = radeon_bo_pin_restricted(robj, RADEON_GEM_DOMAIN_VRAM,316316+ ASIC_IS_AVIVO(rdev) ? 0 : 1 << 27,317317+ &radeon_crtc->cursor_addr);318318+ radeon_bo_unreserve(robj);319319+ if (ret) {320320+ DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret);321321+ drm_gem_object_unreference_unlocked(obj);322322+ return ret;323323+ }324324+283325 radeon_crtc->cursor_width = width;284326 radeon_crtc->cursor_height = height;285327···315323 radeon_crtc->cursor_hot_y = hot_y;316324 }317325318318- ret = radeon_set_cursor(crtc, obj);319319-320320- if (ret)321321- DRM_ERROR("radeon_set_cursor returned %d, not changing cursor\n",322322- ret);323323- else324324- radeon_show_cursor(crtc);326326+ radeon_show_cursor(crtc);325327326328 radeon_lock_cursor(crtc, false);327329···327341 radeon_bo_unpin(robj);328342 radeon_bo_unreserve(robj);329343 }330330- if (radeon_crtc->cursor_bo != obj)331331- drm_gem_object_unreference_unlocked(radeon_crtc->cursor_bo);344344+ drm_gem_object_unreference_unlocked(radeon_crtc->cursor_bo);332345 }333346334347 radeon_crtc->cursor_bo = obj;···345360void radeon_cursor_reset(struct drm_crtc *crtc)346361{347362 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);348348- int ret;349363350364 if (radeon_crtc->cursor_bo) {351365 radeon_lock_cursor(crtc, true);···352368 radeon_cursor_move_locked(crtc, radeon_crtc->cursor_x,353369 radeon_crtc->cursor_y);354370355355- ret = radeon_set_cursor(crtc, radeon_crtc->cursor_bo);356356- if (ret)357357- DRM_ERROR("radeon_set_cursor returned %d, not showing "358358- "cursor\n", ret);359359- else360360- radeon_show_cursor(crtc);371371+ radeon_show_cursor(crtc);361372362373 radeon_lock_cursor(crtc, false);363374 }
+52-14
drivers/gpu/drm/radeon/radeon_device.c
···10801080}1081108110821082/**10831083+ * Determine a sensible default GART size according to ASIC family.10841084+ *10851085+ * @family ASIC family name10861086+ */10871087+static int radeon_gart_size_auto(enum radeon_family family)10881088+{10891089+ /* default to a larger gart size on newer asics */10901090+ if (family >= CHIP_TAHITI)10911091+ return 2048;10921092+ else if (family >= CHIP_RV770)10931093+ return 1024;10941094+ else10951095+ return 512;10961096+}10971097+10981098+/**10831099 * radeon_check_arguments - validate module params10841100 *10851101 * @rdev: radeon_device pointer···11131097 }1114109811151099 if (radeon_gart_size == -1) {11161116- /* default to a larger gart size on newer asics */11171117- if (rdev->family >= CHIP_RV770)11181118- radeon_gart_size = 1024;11191119- else11201120- radeon_gart_size = 512;11001100+ radeon_gart_size = radeon_gart_size_auto(rdev->family);11211101 }11221102 /* gtt size must be power of two and greater or equal to 32M */11231103 if (radeon_gart_size < 32) {11241104 dev_warn(rdev->dev, "gart size (%d) too small\n",11251105 radeon_gart_size);11261126- if (rdev->family >= CHIP_RV770)11271127- radeon_gart_size = 1024;11281128- else11291129- radeon_gart_size = 512;11061106+ radeon_gart_size = radeon_gart_size_auto(rdev->family);11301107 } else if (!radeon_check_pot_argument(radeon_gart_size)) {11311108 dev_warn(rdev->dev, "gart size (%d) must be a power of 2\n",11321109 radeon_gart_size);11331133- if (rdev->family >= CHIP_RV770)11341134- radeon_gart_size = 1024;11351135- else11361136- radeon_gart_size = 512;11101110+ radeon_gart_size = radeon_gart_size_auto(rdev->family);11371111 }11381112 rdev->mc.gtt_size = (uint64_t)radeon_gart_size << 20;11391113···15781572 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);15791573 }1580157415811581- /* unpin the front buffers */15751575+ /* unpin the front buffers and cursors */15821576 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {15771577+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);15831578 struct radeon_framebuffer *rfb = to_radeon_framebuffer(crtc->primary->fb);15841579 struct radeon_bo *robj;15801580+15811581+ if (radeon_crtc->cursor_bo) {15821582+ struct radeon_bo *robj = gem_to_radeon_bo(radeon_crtc->cursor_bo);15831583+ r = radeon_bo_reserve(robj, false);15841584+ if (r == 0) {15851585+ radeon_bo_unpin(robj);15861586+ radeon_bo_unreserve(robj);15871587+ }15881588+ }1585158915861590 if (rfb == NULL || rfb->obj == NULL) {15871591 continue;···16551639{16561640 struct drm_connector *connector;16571641 struct radeon_device *rdev = dev->dev_private;16421642+ struct drm_crtc *crtc;16581643 int r;1659164416601645 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)···16941677 }1695167816961679 radeon_restore_bios_scratch_regs(rdev);16801680+16811681+ /* pin cursors */16821682+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {16831683+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);16841684+16851685+ if (radeon_crtc->cursor_bo) {16861686+ struct radeon_bo *robj = gem_to_radeon_bo(radeon_crtc->cursor_bo);16871687+ r = radeon_bo_reserve(robj, false);16881688+ if (r == 0) {16891689+ /* Only 27 bit offset for legacy cursor */16901690+ r = radeon_bo_pin_restricted(robj,16911691+ RADEON_GEM_DOMAIN_VRAM,16921692+ ASIC_IS_AVIVO(rdev) ?16931693+ 0 : 1 << 27,16941694+ &radeon_crtc->cursor_addr);16951695+ if (r != 0)16961696+ DRM_ERROR("Failed to pin cursor BO (%d)\n", r);16971697+ radeon_bo_unreserve(robj);16981698+ }16991699+ }17001700+ }1697170116981702 /* init dig PHYs, disp eng pll */16991703 if (rdev->is_atom_bios) {
+1
drivers/gpu/drm/radeon/radeon_fb.c
···257257 }258258259259 info->par = rfbdev;260260+ info->skip_vt_switch = true;260261261262 ret = radeon_framebuffer_init(rdev->ddev, &rfbdev->rfb, &mode_cmd, gobj);262263 if (ret) {
+9-3
drivers/gpu/drm/radeon/radeon_gem.c
···428428int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,429429 struct drm_file *filp)430430{431431- struct radeon_device *rdev = dev->dev_private;432431 struct drm_radeon_gem_busy *args = data;433432 struct drm_gem_object *gobj;434433 struct radeon_bo *robj;···439440 return -ENOENT;440441 }441442 robj = gem_to_radeon_bo(gobj);442442- r = radeon_bo_wait(robj, &cur_placement, true);443443+444444+ r = reservation_object_test_signaled_rcu(robj->tbo.resv, true);445445+ if (r == 0)446446+ r = -EBUSY;447447+ else448448+ r = 0;449449+450450+ cur_placement = ACCESS_ONCE(robj->tbo.mem.mem_type);443451 args->domain = radeon_mem_type_to_domain(cur_placement);444452 drm_gem_object_unreference_unlocked(gobj);445445- r = radeon_gem_handle_lockup(rdev, r);446453 return r;447454}448455···476471 r = ret;477472478473 /* Flush HDP cache via MMIO if necessary */474474+ cur_placement = ACCESS_ONCE(robj->tbo.mem.mem_type);479475 if (rdev->asic->mmio_hdp_flush &&480476 radeon_mem_type_to_domain(cur_placement) == RADEON_GEM_DOMAIN_VRAM)481477 robj->rdev->asic->mmio_hdp_flush(rdev);
-1
drivers/gpu/drm/radeon/radeon_mode.h
···343343 int max_cursor_width;344344 int max_cursor_height;345345 uint32_t legacy_display_base_addr;346346- uint32_t legacy_cursor_offset;347346 enum radeon_rmx_type rmx_type;348347 u8 h_border;349348 u8 v_border;
+19-21
drivers/gpu/drm/radeon/radeon_vm.c
···493493 }494494495495 if (bo_va->it.start || bo_va->it.last) {496496- spin_lock(&vm->status_lock);497497- if (list_empty(&bo_va->vm_status)) {498498- /* add a clone of the bo_va to clear the old address */499499- struct radeon_bo_va *tmp;500500- spin_unlock(&vm->status_lock);501501- tmp = kzalloc(sizeof(struct radeon_bo_va), GFP_KERNEL);502502- if (!tmp) {503503- mutex_unlock(&vm->mutex);504504- r = -ENOMEM;505505- goto error_unreserve;506506- }507507- tmp->it.start = bo_va->it.start;508508- tmp->it.last = bo_va->it.last;509509- tmp->vm = vm;510510- tmp->bo = radeon_bo_ref(bo_va->bo);511511- spin_lock(&vm->status_lock);512512- list_add(&tmp->vm_status, &vm->freed);496496+ /* add a clone of the bo_va to clear the old address */497497+ struct radeon_bo_va *tmp;498498+ tmp = kzalloc(sizeof(struct radeon_bo_va), GFP_KERNEL);499499+ if (!tmp) {500500+ mutex_unlock(&vm->mutex);501501+ r = -ENOMEM;502502+ goto error_unreserve;513503 }514514- spin_unlock(&vm->status_lock);504504+ tmp->it.start = bo_va->it.start;505505+ tmp->it.last = bo_va->it.last;506506+ tmp->vm = vm;507507+ tmp->bo = radeon_bo_ref(bo_va->bo);515508516509 interval_tree_remove(&bo_va->it, &vm->va);510510+ spin_lock(&vm->status_lock);517511 bo_va->it.start = 0;518512 bo_va->it.last = 0;513513+ list_del_init(&bo_va->vm_status);514514+ list_add(&tmp->vm_status, &vm->freed);515515+ spin_unlock(&vm->status_lock);519516 }520517521518 if (soffset || eoffset) {519519+ spin_lock(&vm->status_lock);522520 bo_va->it.start = soffset;523521 bo_va->it.last = eoffset - 1;524524- interval_tree_insert(&bo_va->it, &vm->va);525525- spin_lock(&vm->status_lock);526522 list_add(&bo_va->vm_status, &vm->cleared);527523 spin_unlock(&vm->status_lock);524524+ interval_tree_insert(&bo_va->it, &vm->va);528525 }529526530527 bo_va->flags = flags;···1155115811561159 list_for_each_entry(bo_va, &bo->va, bo_list) {11571160 spin_lock(&bo_va->vm->status_lock);11581158- if (list_empty(&bo_va->vm_status))11611161+ if (list_empty(&bo_va->vm_status) &&11621162+ (bo_va->it.start || bo_va->it.last))11591163 list_add(&bo_va->vm_status, &bo_va->vm->invalidated);11601164 spin_unlock(&bo_va->vm->status_lock);11611165 }
+192-144
drivers/gpu/drm/radeon/si.c
···64666466 case 1: /* D1 vblank/vline */64676467 switch (src_data) {64686468 case 0: /* D1 vblank */64696469- if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VBLANK_INTERRUPT) {64706470- if (rdev->irq.crtc_vblank_int[0]) {64716471- drm_handle_vblank(rdev->ddev, 0);64726472- rdev->pm.vblank_sync = true;64736473- wake_up(&rdev->irq.vblank_queue);64746474- }64756475- if (atomic_read(&rdev->irq.pflip[0]))64766476- radeon_crtc_handle_vblank(rdev, 0);64776477- rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VBLANK_INTERRUPT;64786478- DRM_DEBUG("IH: D1 vblank\n");64696469+ if (!(rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VBLANK_INTERRUPT))64706470+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");64716471+64726472+ if (rdev->irq.crtc_vblank_int[0]) {64736473+ drm_handle_vblank(rdev->ddev, 0);64746474+ rdev->pm.vblank_sync = true;64756475+ wake_up(&rdev->irq.vblank_queue);64796476 }64776477+ if (atomic_read(&rdev->irq.pflip[0]))64786478+ radeon_crtc_handle_vblank(rdev, 0);64796479+ rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VBLANK_INTERRUPT;64806480+ DRM_DEBUG("IH: D1 vblank\n");64816481+64806482 break;64816483 case 1: /* D1 vline */64826482- if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VLINE_INTERRUPT) {64836483- rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VLINE_INTERRUPT;64846484- DRM_DEBUG("IH: D1 vline\n");64856485- }64846484+ if (!(rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VLINE_INTERRUPT))64856485+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");64866486+64876487+ rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VLINE_INTERRUPT;64886488+ DRM_DEBUG("IH: D1 vline\n");64896489+64866490 break;64876491 default:64886492 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);···64966492 case 2: /* D2 vblank/vline */64976493 switch (src_data) {64986494 case 0: /* D2 vblank */64996499- if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VBLANK_INTERRUPT) {65006500- if (rdev->irq.crtc_vblank_int[1]) {65016501- drm_handle_vblank(rdev->ddev, 1);65026502- rdev->pm.vblank_sync = true;65036503- wake_up(&rdev->irq.vblank_queue);65046504- }65056505- if (atomic_read(&rdev->irq.pflip[1]))65066506- radeon_crtc_handle_vblank(rdev, 1);65076507- rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT;65086508- DRM_DEBUG("IH: D2 vblank\n");64956495+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VBLANK_INTERRUPT))64966496+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");64976497+64986498+ if (rdev->irq.crtc_vblank_int[1]) {64996499+ drm_handle_vblank(rdev->ddev, 1);65006500+ rdev->pm.vblank_sync = true;65016501+ wake_up(&rdev->irq.vblank_queue);65096502 }65036503+ if (atomic_read(&rdev->irq.pflip[1]))65046504+ radeon_crtc_handle_vblank(rdev, 1);65056505+ rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT;65066506+ DRM_DEBUG("IH: D2 vblank\n");65076507+65106508 break;65116509 case 1: /* D2 vline */65126512- if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VLINE_INTERRUPT) {65136513- rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VLINE_INTERRUPT;65146514- DRM_DEBUG("IH: D2 vline\n");65156515- }65106510+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VLINE_INTERRUPT))65116511+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");65126512+65136513+ rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VLINE_INTERRUPT;65146514+ DRM_DEBUG("IH: D2 vline\n");65156515+65166516 break;65176517 default:65186518 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);···65266518 case 3: /* D3 vblank/vline */65276519 switch (src_data) {65286520 case 0: /* D3 vblank */65296529- if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) {65306530- if (rdev->irq.crtc_vblank_int[2]) {65316531- drm_handle_vblank(rdev->ddev, 2);65326532- rdev->pm.vblank_sync = true;65336533- wake_up(&rdev->irq.vblank_queue);65346534- }65356535- if (atomic_read(&rdev->irq.pflip[2]))65366536- radeon_crtc_handle_vblank(rdev, 2);65376537- rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT;65386538- DRM_DEBUG("IH: D3 vblank\n");65216521+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT))65226522+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");65236523+65246524+ if (rdev->irq.crtc_vblank_int[2]) {65256525+ drm_handle_vblank(rdev->ddev, 2);65266526+ rdev->pm.vblank_sync = true;65276527+ wake_up(&rdev->irq.vblank_queue);65396528 }65296529+ if (atomic_read(&rdev->irq.pflip[2]))65306530+ radeon_crtc_handle_vblank(rdev, 2);65316531+ rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT;65326532+ DRM_DEBUG("IH: D3 vblank\n");65336533+65406534 break;65416535 case 1: /* D3 vline */65426542- if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VLINE_INTERRUPT) {65436543- rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT;65446544- DRM_DEBUG("IH: D3 vline\n");65456545- }65366536+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VLINE_INTERRUPT))65376537+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");65386538+65396539+ rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT;65406540+ DRM_DEBUG("IH: D3 vline\n");65416541+65466542 break;65476543 default:65486544 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);···65566544 case 4: /* D4 vblank/vline */65576545 switch (src_data) {65586546 case 0: /* D4 vblank */65596559- if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) {65606560- if (rdev->irq.crtc_vblank_int[3]) {65616561- drm_handle_vblank(rdev->ddev, 3);65626562- rdev->pm.vblank_sync = true;65636563- wake_up(&rdev->irq.vblank_queue);65646564- }65656565- if (atomic_read(&rdev->irq.pflip[3]))65666566- radeon_crtc_handle_vblank(rdev, 3);65676567- rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT;65686568- DRM_DEBUG("IH: D4 vblank\n");65476547+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT))65486548+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");65496549+65506550+ if (rdev->irq.crtc_vblank_int[3]) {65516551+ drm_handle_vblank(rdev->ddev, 3);65526552+ rdev->pm.vblank_sync = true;65536553+ wake_up(&rdev->irq.vblank_queue);65696554 }65556555+ if (atomic_read(&rdev->irq.pflip[3]))65566556+ radeon_crtc_handle_vblank(rdev, 3);65576557+ rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT;65586558+ DRM_DEBUG("IH: D4 vblank\n");65596559+65706560 break;65716561 case 1: /* D4 vline */65726572- if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VLINE_INTERRUPT) {65736573- rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT;65746574- DRM_DEBUG("IH: D4 vline\n");65756575- }65626562+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VLINE_INTERRUPT))65636563+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");65646564+65656565+ rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT;65666566+ DRM_DEBUG("IH: D4 vline\n");65676567+65766568 break;65776569 default:65786570 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);···65866570 case 5: /* D5 vblank/vline */65876571 switch (src_data) {65886572 case 0: /* D5 vblank */65896589- if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) {65906590- if (rdev->irq.crtc_vblank_int[4]) {65916591- drm_handle_vblank(rdev->ddev, 4);65926592- rdev->pm.vblank_sync = true;65936593- wake_up(&rdev->irq.vblank_queue);65946594- }65956595- if (atomic_read(&rdev->irq.pflip[4]))65966596- radeon_crtc_handle_vblank(rdev, 4);65976597- rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT;65986598- DRM_DEBUG("IH: D5 vblank\n");65736573+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT))65746574+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");65756575+65766576+ if (rdev->irq.crtc_vblank_int[4]) {65776577+ drm_handle_vblank(rdev->ddev, 4);65786578+ rdev->pm.vblank_sync = true;65796579+ wake_up(&rdev->irq.vblank_queue);65996580 }65816581+ if (atomic_read(&rdev->irq.pflip[4]))65826582+ radeon_crtc_handle_vblank(rdev, 4);65836583+ rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT;65846584+ DRM_DEBUG("IH: D5 vblank\n");65856585+66006586 break;66016587 case 1: /* D5 vline */66026602- if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VLINE_INTERRUPT) {66036603- rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT;66046604- DRM_DEBUG("IH: D5 vline\n");66056605- }65886588+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VLINE_INTERRUPT))65896589+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");65906590+65916591+ rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT;65926592+ DRM_DEBUG("IH: D5 vline\n");65936593+66066594 break;66076595 default:66086596 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);···66166596 case 6: /* D6 vblank/vline */66176597 switch (src_data) {66186598 case 0: /* D6 vblank */66196619- if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) {66206620- if (rdev->irq.crtc_vblank_int[5]) {66216621- drm_handle_vblank(rdev->ddev, 5);66226622- rdev->pm.vblank_sync = true;66236623- wake_up(&rdev->irq.vblank_queue);66246624- }66256625- if (atomic_read(&rdev->irq.pflip[5]))66266626- radeon_crtc_handle_vblank(rdev, 5);66276627- rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT;66286628- DRM_DEBUG("IH: D6 vblank\n");65996599+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT))66006600+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");66016601+66026602+ if (rdev->irq.crtc_vblank_int[5]) {66036603+ drm_handle_vblank(rdev->ddev, 5);66046604+ rdev->pm.vblank_sync = true;66056605+ wake_up(&rdev->irq.vblank_queue);66296606 }66076607+ if (atomic_read(&rdev->irq.pflip[5]))66086608+ radeon_crtc_handle_vblank(rdev, 5);66096609+ rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT;66106610+ DRM_DEBUG("IH: D6 vblank\n");66116611+66306612 break;66316613 case 1: /* D6 vline */66326632- if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VLINE_INTERRUPT) {66336633- rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT;66346634- DRM_DEBUG("IH: D6 vline\n");66356635- }66146614+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VLINE_INTERRUPT))66156615+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");66166616+66176617+ rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT;66186618+ DRM_DEBUG("IH: D6 vline\n");66196619+66366620 break;66376621 default:66386622 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);···66566632 case 42: /* HPD hotplug */66576633 switch (src_data) {66586634 case 0:66596659- if (rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_INTERRUPT) {66606660- rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_INTERRUPT;66616661- queue_hotplug = true;66626662- DRM_DEBUG("IH: HPD1\n");66636663- }66356635+ if (!(rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_INTERRUPT))66366636+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");66376637+66386638+ rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_INTERRUPT;66396639+ queue_hotplug = true;66406640+ DRM_DEBUG("IH: HPD1\n");66416641+66646642 break;66656643 case 1:66666666- if (rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_INTERRUPT) {66676667- rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_INTERRUPT;66686668- queue_hotplug = true;66696669- DRM_DEBUG("IH: HPD2\n");66706670- }66446644+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_INTERRUPT))66456645+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");66466646+66476647+ rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_INTERRUPT;66486648+ queue_hotplug = true;66496649+ DRM_DEBUG("IH: HPD2\n");66506650+66716651 break;66726652 case 2:66736673- if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_INTERRUPT) {66746674- rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_INTERRUPT;66756675- queue_hotplug = true;66766676- DRM_DEBUG("IH: HPD3\n");66776677- }66536653+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_INTERRUPT))66546654+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");66556655+66566656+ rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_INTERRUPT;66576657+ queue_hotplug = true;66586658+ DRM_DEBUG("IH: HPD3\n");66596659+66786660 break;66796661 case 3:66806680- if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_INTERRUPT) {66816681- rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_INTERRUPT;66826682- queue_hotplug = true;66836683- DRM_DEBUG("IH: HPD4\n");66846684- }66626662+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_INTERRUPT))66636663+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");66646664+66656665+ rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_INTERRUPT;66666666+ queue_hotplug = true;66676667+ DRM_DEBUG("IH: HPD4\n");66686668+66856669 break;66866670 case 4:66876687- if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_INTERRUPT) {66886688- rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_INTERRUPT;66896689- queue_hotplug = true;66906690- DRM_DEBUG("IH: HPD5\n");66916691- }66716671+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_INTERRUPT))66726672+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");66736673+66746674+ rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_INTERRUPT;66756675+ queue_hotplug = true;66766676+ DRM_DEBUG("IH: HPD5\n");66776677+66926678 break;66936679 case 5:66946694- if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT) {66956695- rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_INTERRUPT;66966696- queue_hotplug = true;66976697- DRM_DEBUG("IH: HPD6\n");66986698- }66806680+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT))66816681+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");66826682+66836683+ rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_INTERRUPT;66846684+ queue_hotplug = true;66856685+ DRM_DEBUG("IH: HPD6\n");66866686+66996687 break;67006688 case 6:67016701- if (rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_RX_INTERRUPT) {67026702- rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_RX_INTERRUPT;67036703- queue_dp = true;67046704- DRM_DEBUG("IH: HPD_RX 1\n");67056705- }66896689+ if (!(rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_RX_INTERRUPT))66906690+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");66916691+66926692+ rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_RX_INTERRUPT;66936693+ queue_dp = true;66946694+ DRM_DEBUG("IH: HPD_RX 1\n");66956695+67066696 break;67076697 case 7:67086708- if (rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_RX_INTERRUPT) {67096709- rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_RX_INTERRUPT;67106710- queue_dp = true;67116711- DRM_DEBUG("IH: HPD_RX 2\n");67126712- }66986698+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_RX_INTERRUPT))66996699+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");67006700+67016701+ rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_RX_INTERRUPT;67026702+ queue_dp = true;67036703+ DRM_DEBUG("IH: HPD_RX 2\n");67046704+67136705 break;67146706 case 8:67156715- if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_RX_INTERRUPT) {67166716- rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_RX_INTERRUPT;67176717- queue_dp = true;67186718- DRM_DEBUG("IH: HPD_RX 3\n");67196719- }67076707+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_RX_INTERRUPT))67086708+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");67096709+67106710+ rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_RX_INTERRUPT;67116711+ queue_dp = true;67126712+ DRM_DEBUG("IH: HPD_RX 3\n");67136713+67206714 break;67216715 case 9:67226722- if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_RX_INTERRUPT) {67236723- rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_RX_INTERRUPT;67246724- queue_dp = true;67256725- DRM_DEBUG("IH: HPD_RX 4\n");67266726- }67166716+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_RX_INTERRUPT))67176717+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");67186718+67196719+ rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_RX_INTERRUPT;67206720+ queue_dp = true;67216721+ DRM_DEBUG("IH: HPD_RX 4\n");67226722+67276723 break;67286724 case 10:67296729- if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_RX_INTERRUPT) {67306730- rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_RX_INTERRUPT;67316731- queue_dp = true;67326732- DRM_DEBUG("IH: HPD_RX 5\n");67336733- }67256725+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_RX_INTERRUPT))67266726+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");67276727+67286728+ rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_RX_INTERRUPT;67296729+ queue_dp = true;67306730+ DRM_DEBUG("IH: HPD_RX 5\n");67316731+67346732 break;67356733 case 11:67366736- if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_RX_INTERRUPT) {67376737- rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_RX_INTERRUPT;67386738- queue_dp = true;67396739- DRM_DEBUG("IH: HPD_RX 6\n");67406740- }67346734+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_RX_INTERRUPT))67356735+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");67366736+67376737+ rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_RX_INTERRUPT;67386738+ queue_dp = true;67396739+ DRM_DEBUG("IH: HPD_RX 6\n");67406740+67416741 break;67426742 default:67436743 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
+1
drivers/i2c/busses/Kconfig
···633633config I2C_MT65XX634634 tristate "MediaTek I2C adapter"635635 depends on ARCH_MEDIATEK || COMPILE_TEST636636+ depends on HAS_DMA636637 help637638 This selects the MediaTek(R) Integrated Inter Circuit bus driver638639 for MT65xx and MT81xx.
+8-7
drivers/i2c/busses/i2c-jz4780.c
···764764 if (IS_ERR(i2c->clk))765765 return PTR_ERR(i2c->clk);766766767767- clk_prepare_enable(i2c->clk);767767+ ret = clk_prepare_enable(i2c->clk);768768+ if (ret)769769+ return ret;768770769769- if (of_property_read_u32(pdev->dev.of_node, "clock-frequency",770770- &clk_freq)) {771771+ ret = of_property_read_u32(pdev->dev.of_node, "clock-frequency",772772+ &clk_freq);773773+ if (ret) {771774 dev_err(&pdev->dev, "clock-frequency not specified in DT");772772- return clk_freq;775775+ goto err;773776 }774777775778 i2c->speed = clk_freq / 1000;···793790 i2c->irq = platform_get_irq(pdev, 0);794791 ret = devm_request_irq(&pdev->dev, i2c->irq, jz4780_i2c_irq, 0,795792 dev_name(&pdev->dev), i2c);796796- if (ret) {797797- ret = -ENODEV;793793+ if (ret)798794 goto err;799799- }800795801796 ret = i2c_add_adapter(&i2c->adap);802797 if (ret < 0) {
···257257 return MIPS_CPU_IRQ_BASE + cp0_fdc_irq;258258 }259259260260- /*261261- * Some cores claim the FDC is routable but it doesn't actually seem to262262- * be connected.263263- */264264- switch (current_cpu_type()) {265265- case CPU_INTERAPTIV:266266- case CPU_PROAPTIV:267267- return -1;268268- }269269-270260 return irq_create_mapping(gic_irq_domain,271261 GIC_LOCAL_TO_HWIRQ(GIC_LOCAL_INT_FDC));272262}
+1-7
drivers/memory/omap-gpmc.c
···20742074 ret = gpmc_probe_nand_child(pdev, child);20752075 else if (of_node_cmp(child->name, "onenand") == 0)20762076 ret = gpmc_probe_onenand_child(pdev, child);20772077- else if (of_node_cmp(child->name, "ethernet") == 0 ||20782078- of_node_cmp(child->name, "nor") == 0 ||20792079- of_node_cmp(child->name, "uart") == 0)20772077+ else20802078 ret = gpmc_probe_generic_child(pdev, child);20812081-20822082- if (WARN(ret < 0, "%s: probing gpmc child %s failed\n",20832083- __func__, child->full_name))20842084- of_node_put(child);20852079 }2086208020872081 return 0;
+5-7
drivers/misc/cxl/api.c
···23232424 afu = cxl_pci_to_afu(dev);25252626+ get_device(&afu->dev);2627 ctx = cxl_context_alloc();2728 if (IS_ERR(ctx))2829 return ctx;···3231 rc = cxl_context_init(ctx, afu, false, NULL);3332 if (rc) {3433 kfree(ctx);3434+ put_device(&afu->dev);3535 return ERR_PTR(-ENOMEM);3636 }3737 cxl_assign_psn_space(ctx);···6159{6260 if (ctx->status != CLOSED)6361 return -EBUSY;6262+6363+ put_device(&ctx->afu->dev);64646565 cxl_context_free(ctx);6666···163159 }164160165161 ctx->status = STARTED;166166- get_device(&ctx->afu->dev);167162out:168163 mutex_unlock(&ctx->status_mutex);169164 return rc;···178175/* Stop a context. Returns 0 on success, otherwise -Errno */179176int cxl_stop_context(struct cxl_context *ctx)180177{181181- int rc;182182-183183- rc = __detach_context(ctx);184184- if (!rc)185185- put_device(&ctx->afu->dev);186186- return rc;178178+ return __detach_context(ctx);187179}188180EXPORT_SYMBOL_GPL(cxl_stop_context);189181
+11-3
drivers/misc/cxl/context.c
···113113114114 if (ctx->afu->current_mode == CXL_MODE_DEDICATED) {115115 area = ctx->afu->psn_phys;116116- if (offset > ctx->afu->adapter->ps_size)116116+ if (offset >= ctx->afu->adapter->ps_size)117117 return VM_FAULT_SIGBUS;118118 } else {119119 area = ctx->psn_phys;120120- if (offset > ctx->psn_size)120120+ if (offset >= ctx->psn_size)121121 return VM_FAULT_SIGBUS;122122 }123123···145145 */146146int cxl_context_iomap(struct cxl_context *ctx, struct vm_area_struct *vma)147147{148148+ u64 start = vma->vm_pgoff << PAGE_SHIFT;148149 u64 len = vma->vm_end - vma->vm_start;149149- len = min(len, ctx->psn_size);150150+151151+ if (ctx->afu->current_mode == CXL_MODE_DEDICATED) {152152+ if (start + len > ctx->afu->adapter->ps_size)153153+ return -EINVAL;154154+ } else {155155+ if (start + len > ctx->psn_size)156156+ return -EINVAL;157157+ }150158151159 if (ctx->afu->current_mode != CXL_MODE_DEDICATED) {152160 /* make sure there is a valid per process space for this AFU */
+1-1
drivers/misc/cxl/main.c
···7373 spin_lock(&adapter->afu_list_lock);7474 for (slice = 0; slice < adapter->slices; slice++) {7575 afu = adapter->afu[slice];7676- if (!afu->enabled)7676+ if (!afu || !afu->enabled)7777 continue;7878 rcu_read_lock();7979 idr_for_each_entry(&afu->contexts_idr, ctx, id)
+1-1
drivers/misc/cxl/pci.c
···539539540540static void cxl_unmap_slice_regs(struct cxl_afu *afu)541541{542542- if (afu->p1n_mmio)542542+ if (afu->p2n_mmio)543543 iounmap(afu->p2n_mmio);544544 if (afu->p1n_mmio)545545 iounmap(afu->p1n_mmio);
+2-1
drivers/misc/cxl/vphb.c
···112112 unsigned long addr;113113114114 phb = pci_bus_to_host(bus);115115- afu = (struct cxl_afu *)phb->private_data;116115 if (phb == NULL)117116 return PCIBIOS_DEVICE_NOT_FOUND;117117+ afu = (struct cxl_afu *)phb->private_data;118118+118119 if (cxl_pcie_cfg_record(bus->number, devfn) > afu->crs_num)119120 return PCIBIOS_DEVICE_NOT_FOUND;120121 if (offset >= (unsigned long)phb->cfg_data)
···402402403403 cldev->priv_data = NULL;404404405405- mutex_lock(&dev->device_lock);406405 /* Need to remove the device here407406 * since mei_nfc_free will unlink the clients408407 */409408 mei_cl_remove_device(cldev);409409+410410+ mutex_lock(&dev->device_lock);410411 mei_nfc_free(ndev);411412 mutex_unlock(&dev->device_lock);412413}
···77 * Bjorn Helgaas <bjorn.helgaas@hp.com>88 */991010-#include <linux/acpi.h>1110#include <linux/pnp.h>1211#include <linux/device.h>1312#include <linux/init.h>···2223 {"", 0}2324};24252525-#ifdef CONFIG_ACPI2626-static bool __reserve_range(u64 start, unsigned int length, bool io, char *desc)2727-{2828- u8 space_id = io ? ACPI_ADR_SPACE_SYSTEM_IO : ACPI_ADR_SPACE_SYSTEM_MEMORY;2929- return !acpi_reserve_region(start, length, space_id, IORESOURCE_BUSY, desc);3030-}3131-#else3232-static bool __reserve_range(u64 start, unsigned int length, bool io, char *desc)3333-{3434- struct resource *res;3535-3636- res = io ? request_region(start, length, desc) :3737- request_mem_region(start, length, desc);3838- if (res) {3939- res->flags &= ~IORESOURCE_BUSY;4040- return true;4141- }4242- return false;4343-}4444-#endif4545-4626static void reserve_range(struct pnp_dev *dev, struct resource *r, int port)4727{4828 char *regionid;4929 const char *pnpid = dev_name(&dev->dev);5030 resource_size_t start = r->start, end = r->end;5151- bool reserved;3131+ struct resource *res;52325333 regionid = kmalloc(16, GFP_KERNEL);5434 if (!regionid)5535 return;56365737 snprintf(regionid, 16, "pnp %s", pnpid);5858- reserved = __reserve_range(start, end - start + 1, !!port, regionid);5959- if (!reserved)3838+ if (port)3939+ res = request_region(start, end - start + 1, regionid);4040+ else4141+ res = request_mem_region(start, end - start + 1, regionid);4242+ if (res)4343+ res->flags &= ~IORESOURCE_BUSY;4444+ else6045 kfree(regionid);61466247 /*···4966 * have double reservations.5067 */5168 dev_info(&dev->dev, "%pR %s reserved\n", r,5252- reserved ? "has been" : "could not be");6969+ res ? "has been" : "could not be");5370}54715572static void reserve_resources_of_dev(struct pnp_dev *dev)
+9
drivers/regulator/core.c
···10811081 }10821082 }1083108310841084+ if (rdev->constraints->over_current_protection10851085+ && ops->set_over_current_protection) {10861086+ ret = ops->set_over_current_protection(rdev);10871087+ if (ret < 0) {10881088+ rdev_err(rdev, "failed to set over current protection\n");10891089+ goto out;10901090+ }10911091+ }10921092+10841093 print_constraints(rdev);10851094 return 0;10861095out:
+3
drivers/regulator/of_regulator.c
···107107 if (!of_property_read_u32(np, "regulator-system-load", &pval))108108 constraints->system_load = pval;109109110110+ constraints->over_current_protection = of_property_read_bool(np,111111+ "regulator-over-current-protection");112112+110113 for (i = 0; i < ARRAY_SIZE(regulator_states); i++) {111114 switch (i) {112115 case PM_SUSPEND_MEM:
···4444#define BTRFS_INODE_IN_DELALLOC_LIST 94545#define BTRFS_INODE_READDIO_NEED_LOCK 104646#define BTRFS_INODE_HAS_PROPS 114747+/* DIO is ready to submit */4848+#define BTRFS_INODE_DIO_READY 124749/*4850 * The following 3 bits are meant only for the btree inode.4951 * When any of them is set, it means an error happened while writing an
+1
fs/btrfs/ctree.h
···17781778 spinlock_t unused_bgs_lock;17791779 struct list_head unused_bgs;17801780 struct mutex unused_bg_unpin_mutex;17811781+ struct mutex delete_unused_bgs_mutex;1781178217821783 /* For btrfs to record security options */17831784 struct security_mnt_opts security_opts;
+40-1
fs/btrfs/disk-io.c
···17511751{17521752 struct btrfs_root *root = arg;17531753 int again;17541754+ struct btrfs_trans_handle *trans;1754175517551756 do {17561757 again = 0;···17731772 }1774177317751774 btrfs_run_delayed_iputs(root);17761776- btrfs_delete_unused_bgs(root->fs_info);17771775 again = btrfs_clean_one_deleted_snapshot(root);17781776 mutex_unlock(&root->fs_info->cleaner_mutex);17791777···17811781 * needn't do anything special here.17821782 */17831783 btrfs_run_defrag_inodes(root->fs_info);17841784+17851785+ /*17861786+ * Acquires fs_info->delete_unused_bgs_mutex to avoid racing17871787+ * with relocation (btrfs_relocate_chunk) and relocation17881788+ * acquires fs_info->cleaner_mutex (btrfs_relocate_block_group)17891789+ * after acquiring fs_info->delete_unused_bgs_mutex. So we17901790+ * can't hold, nor need to, fs_info->cleaner_mutex when deleting17911791+ * unused block groups.17921792+ */17931793+ btrfs_delete_unused_bgs(root->fs_info);17841794sleep:17851795 if (!try_to_freeze() && !again) {17861796 set_current_state(TASK_INTERRUPTIBLE);···17991789 __set_current_state(TASK_RUNNING);18001790 }18011791 } while (!kthread_should_stop());17921792+17931793+ /*17941794+ * Transaction kthread is stopped before us and wakes us up.17951795+ * However we might have started a new transaction and COWed some17961796+ * tree blocks when deleting unused block groups for example. So17971797+ * make sure we commit the transaction we started to have a clean17981798+ * shutdown when evicting the btree inode - if it has dirty pages17991799+ * when we do the final iput() on it, eviction will trigger a18001800+ * writeback for it which will fail with null pointer dereferences18011801+ * since work queues and other resources were already released and18021802+ * destroyed by the time the iput/eviction/writeback is made.18031803+ */18041804+ trans = btrfs_attach_transaction(root);18051805+ if (IS_ERR(trans)) {18061806+ if (PTR_ERR(trans) != -ENOENT)18071807+ btrfs_err(root->fs_info,18081808+ "cleaner transaction attach returned %ld",18091809+ PTR_ERR(trans));18101810+ } else {18111811+ int ret;18121812+18131813+ ret = btrfs_commit_transaction(trans, root);18141814+ if (ret)18151815+ btrfs_err(root->fs_info,18161816+ "cleaner open transaction commit returned %d",18171817+ ret);18181818+ }18191819+18021820 return 0;18031821}18041822···25302492 spin_lock_init(&fs_info->unused_bgs_lock);25312493 rwlock_init(&fs_info->tree_mod_log_lock);25322494 mutex_init(&fs_info->unused_bg_unpin_mutex);24952495+ mutex_init(&fs_info->delete_unused_bgs_mutex);25332496 mutex_init(&fs_info->reloc_mutex);25342497 mutex_init(&fs_info->delalloc_root_mutex);25352498 seqlock_init(&fs_info->profiles_lock);
+3
fs/btrfs/extent-tree.c
···98899889 }98909890 spin_unlock(&fs_info->unused_bgs_lock);9891989198929892+ mutex_lock(&root->fs_info->delete_unused_bgs_mutex);98939893+98929894 /* Don't want to race with allocators so take the groups_sem */98939895 down_write(&space_info->groups_sem);98949896 spin_lock(&block_group->lock);···99859983end_trans:99869984 btrfs_end_transaction(trans, root);99879985next:99869986+ mutex_unlock(&root->fs_info->delete_unused_bgs_mutex);99889987 btrfs_put_block_group(block_group);99899988 spin_lock(&fs_info->unused_bgs_lock);99909989 }
···49894989 /*49904990 * Keep looping until we have no more ranges in the io tree.49914991 * We can have ongoing bios started by readpages (called from readahead)49924992- * that didn't get their end io callbacks called yet or they are still49934993- * in progress ((extent_io.c:end_bio_extent_readpage()). This means some49924992+ * that have their endio callback (extent_io.c:end_bio_extent_readpage)49934993+ * still in progress (unlocked the pages in the bio but did not yet49944994+ * unlocked the ranges in the io tree). Therefore this means some49944995 * ranges can still be locked and eviction started because before49954996 * submitting those bios, which are executed by a separate task (work49964997 * queue kthread), inode references (inode->i_count) were not taken···7547754675487547 current->journal_info = outstanding_extents;75497548 btrfs_free_reserved_data_space(inode, len);75497549+ set_bit(BTRFS_INODE_DIO_READY, &BTRFS_I(inode)->runtime_flags);75507550 }7551755175527552 /*···78737871 struct bio *dio_bio;78747872 int ret;7875787378767876- if (err)78777877- goto out_done;78787874again:78797875 ret = btrfs_dec_test_first_ordered_pending(inode, &ordered,78807876 &ordered_offset,···78957895 ordered = NULL;78967896 goto again;78977897 }78987898-out_done:78997898 dio_bio = dip->dio_bio;7900789979017900 kfree(dip);···81628163static void btrfs_submit_direct(int rw, struct bio *dio_bio,81638164 struct inode *inode, loff_t file_offset)81648165{81658165- struct btrfs_root *root = BTRFS_I(inode)->root;81668166- struct btrfs_dio_private *dip;81678167- struct bio *io_bio;81668166+ struct btrfs_dio_private *dip = NULL;81678167+ struct bio *io_bio = NULL;81688168 struct btrfs_io_bio *btrfs_bio;81698169 int skip_sum;81708170 int write = rw & REQ_WRITE;···81808182 dip = kzalloc(sizeof(*dip), GFP_NOFS);81818183 if (!dip) {81828184 ret = -ENOMEM;81838183- goto free_io_bio;81858185+ goto free_ordered;81848186 }8185818781868188 dip->private = dio_bio->bi_private;···8208821082098211 if (btrfs_bio->end_io)82108212 btrfs_bio->end_io(btrfs_bio, ret);82118211-free_io_bio:82128212- bio_put(io_bio);8213821382148214free_ordered:82158215 /*82168216- * If this is a write, we need to clean up the reserved space and kill82178217- * the ordered extent.82168216+ * If we arrived here it means either we failed to submit the dip82178217+ * or we either failed to clone the dio_bio or failed to allocate the82188218+ * dip. If we cloned the dio_bio and allocated the dip, we can just82198219+ * call bio_endio against our io_bio so that we get proper resource82208220+ * cleanup if we fail to submit the dip, otherwise, we must do the82218221+ * same as btrfs_endio_direct_[write|read] because we can't call these82228222+ * callbacks - they require an allocated dip and a clone of dio_bio.82188223 */82198219- if (write) {82208220- struct btrfs_ordered_extent *ordered;82218221- ordered = btrfs_lookup_ordered_extent(inode, file_offset);82228222- if (!test_bit(BTRFS_ORDERED_PREALLOC, &ordered->flags) &&82238223- !test_bit(BTRFS_ORDERED_NOCOW, &ordered->flags))82248224- btrfs_free_reserved_extent(root, ordered->start,82258225- ordered->disk_len, 1);82268226- btrfs_put_ordered_extent(ordered);82278227- btrfs_put_ordered_extent(ordered);82248224+ if (io_bio && dip) {82258225+ bio_endio(io_bio, ret);82268226+ /*82278227+ * The end io callbacks free our dip, do the final put on io_bio82288228+ * and all the cleanup and final put for dio_bio (through82298229+ * dio_end_io()).82308230+ */82318231+ dip = NULL;82328232+ io_bio = NULL;82338233+ } else {82348234+ if (write) {82358235+ struct btrfs_ordered_extent *ordered;82368236+82378237+ ordered = btrfs_lookup_ordered_extent(inode,82388238+ file_offset);82398239+ set_bit(BTRFS_ORDERED_IOERR, &ordered->flags);82408240+ /*82418241+ * Decrements our ref on the ordered extent and removes82428242+ * the ordered extent from the inode's ordered tree,82438243+ * doing all the proper resource cleanup such as for the82448244+ * reserved space and waking up any waiters for this82458245+ * ordered extent (through btrfs_remove_ordered_extent).82468246+ */82478247+ btrfs_finish_ordered_io(ordered);82488248+ } else {82498249+ unlock_extent(&BTRFS_I(inode)->io_tree, file_offset,82508250+ file_offset + dio_bio->bi_iter.bi_size - 1);82518251+ }82528252+ clear_bit(BIO_UPTODATE, &dio_bio->bi_flags);82538253+ /*82548254+ * Releases and cleans up our dio_bio, no need to bio_put()82558255+ * nor bio_endio()/bio_io_error() against dio_bio.82568256+ */82578257+ dio_end_io(dio_bio, ret);82288258 }82298229- bio_endio(dio_bio, ret);82598259+ if (io_bio)82608260+ bio_put(io_bio);82618261+ kfree(dip);82308262}8231826382328264static ssize_t check_direct_IO(struct btrfs_root *root, struct kiocb *iocb,···83588330 btrfs_submit_direct, flags);83598331 if (iov_iter_rw(iter) == WRITE) {83608332 current->journal_info = NULL;83618361- if (ret < 0 && ret != -EIOCBQUEUED)83628362- btrfs_delalloc_release_space(inode, count);83638363- else if (ret >= 0 && (size_t)ret < count)83338333+ if (ret < 0 && ret != -EIOCBQUEUED) {83348334+ /*83358335+ * If the error comes from submitting stage,83368336+ * btrfs_get_blocsk_direct() has free'd data space,83378337+ * and metadata space will be handled by83388338+ * finish_ordered_fn, don't do that again to make83398339+ * sure bytes_may_use is correct.83408340+ */83418341+ if (!test_and_clear_bit(BTRFS_INODE_DIO_READY,83428342+ &BTRFS_I(inode)->runtime_flags))83438343+ btrfs_delalloc_release_space(inode, count);83448344+ } else if (ret >= 0 && (size_t)ret < count)83648345 btrfs_delalloc_release_space(inode,83658346 count - (size_t)ret);83668347 }
+189-54
fs/btrfs/ioctl.c
···878788888989static int btrfs_clone(struct inode *src, struct inode *inode,9090- u64 off, u64 olen, u64 olen_aligned, u64 destoff);9090+ u64 off, u64 olen, u64 olen_aligned, u64 destoff,9191+ int no_time_update);91929293/* Mask out flags that are inappropriate for the given type of inode. */9394static inline __u32 btrfs_mask_flags(umode_t mode, __u32 flags)···27662765 return ret;27672766}2768276727692769-static struct page *extent_same_get_page(struct inode *inode, u64 off)27682768+static struct page *extent_same_get_page(struct inode *inode, pgoff_t index)27702769{27712770 struct page *page;27722772- pgoff_t index;27732771 struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;27742774-27752775- index = off >> PAGE_CACHE_SHIFT;2776277227772773 page = grab_cache_page(inode->i_mapping, index);27782774 if (!page)···27892791 unlock_page(page);2790279227912793 return page;27942794+}27952795+27962796+static int gather_extent_pages(struct inode *inode, struct page **pages,27972797+ int num_pages, u64 off)27982798+{27992799+ int i;28002800+ pgoff_t index = off >> PAGE_CACHE_SHIFT;28012801+28022802+ for (i = 0; i < num_pages; i++) {28032803+ pages[i] = extent_same_get_page(inode, index + i);28042804+ if (!pages[i])28052805+ return -ENOMEM;28062806+ }28072807+ return 0;27922808}2793280927942810static inline void lock_extent_range(struct inode *inode, u64 off, u64 len)···28302818 }28312819}2832282028332833-static void btrfs_double_unlock(struct inode *inode1, u64 loff1,28342834- struct inode *inode2, u64 loff2, u64 len)28212821+static void btrfs_double_inode_unlock(struct inode *inode1, struct inode *inode2)28352822{28362836- unlock_extent(&BTRFS_I(inode1)->io_tree, loff1, loff1 + len - 1);28372837- unlock_extent(&BTRFS_I(inode2)->io_tree, loff2, loff2 + len - 1);28382838-28392823 mutex_unlock(&inode1->i_mutex);28402824 mutex_unlock(&inode2->i_mutex);28412825}2842282628432843-static void btrfs_double_lock(struct inode *inode1, u64 loff1,28442844- struct inode *inode2, u64 loff2, u64 len)28272827+static void btrfs_double_inode_lock(struct inode *inode1, struct inode *inode2)28282828+{28292829+ if (inode1 < inode2)28302830+ swap(inode1, inode2);28312831+28322832+ mutex_lock_nested(&inode1->i_mutex, I_MUTEX_PARENT);28332833+ if (inode1 != inode2)28342834+ mutex_lock_nested(&inode2->i_mutex, I_MUTEX_CHILD);28352835+}28362836+28372837+static void btrfs_double_extent_unlock(struct inode *inode1, u64 loff1,28382838+ struct inode *inode2, u64 loff2, u64 len)28392839+{28402840+ unlock_extent(&BTRFS_I(inode1)->io_tree, loff1, loff1 + len - 1);28412841+ unlock_extent(&BTRFS_I(inode2)->io_tree, loff2, loff2 + len - 1);28422842+}28432843+28442844+static void btrfs_double_extent_lock(struct inode *inode1, u64 loff1,28452845+ struct inode *inode2, u64 loff2, u64 len)28452846{28462847 if (inode1 < inode2) {28472848 swap(inode1, inode2);28482849 swap(loff1, loff2);28492850 }28502850-28512851- mutex_lock_nested(&inode1->i_mutex, I_MUTEX_PARENT);28522851 lock_extent_range(inode1, loff1, len);28532853- if (inode1 != inode2) {28542854- mutex_lock_nested(&inode2->i_mutex, I_MUTEX_CHILD);28522852+ if (inode1 != inode2)28552853 lock_extent_range(inode2, loff2, len);28542854+}28552855+28562856+struct cmp_pages {28572857+ int num_pages;28582858+ struct page **src_pages;28592859+ struct page **dst_pages;28602860+};28612861+28622862+static void btrfs_cmp_data_free(struct cmp_pages *cmp)28632863+{28642864+ int i;28652865+ struct page *pg;28662866+28672867+ for (i = 0; i < cmp->num_pages; i++) {28682868+ pg = cmp->src_pages[i];28692869+ if (pg)28702870+ page_cache_release(pg);28712871+ pg = cmp->dst_pages[i];28722872+ if (pg)28732873+ page_cache_release(pg);28562874 }28752875+ kfree(cmp->src_pages);28762876+ kfree(cmp->dst_pages);28772877+}28782878+28792879+static int btrfs_cmp_data_prepare(struct inode *src, u64 loff,28802880+ struct inode *dst, u64 dst_loff,28812881+ u64 len, struct cmp_pages *cmp)28822882+{28832883+ int ret;28842884+ int num_pages = PAGE_CACHE_ALIGN(len) >> PAGE_CACHE_SHIFT;28852885+ struct page **src_pgarr, **dst_pgarr;28862886+28872887+ /*28882888+ * We must gather up all the pages before we initiate our28892889+ * extent locking. We use an array for the page pointers. Size28902890+ * of the array is bounded by len, which is in turn bounded by28912891+ * BTRFS_MAX_DEDUPE_LEN.28922892+ */28932893+ src_pgarr = kzalloc(num_pages * sizeof(struct page *), GFP_NOFS);28942894+ dst_pgarr = kzalloc(num_pages * sizeof(struct page *), GFP_NOFS);28952895+ if (!src_pgarr || !dst_pgarr) {28962896+ kfree(src_pgarr);28972897+ kfree(dst_pgarr);28982898+ return -ENOMEM;28992899+ }29002900+ cmp->num_pages = num_pages;29012901+ cmp->src_pages = src_pgarr;29022902+ cmp->dst_pages = dst_pgarr;29032903+29042904+ ret = gather_extent_pages(src, cmp->src_pages, cmp->num_pages, loff);29052905+ if (ret)29062906+ goto out;29072907+29082908+ ret = gather_extent_pages(dst, cmp->dst_pages, cmp->num_pages, dst_loff);29092909+29102910+out:29112911+ if (ret)29122912+ btrfs_cmp_data_free(cmp);29132913+ return 0;28572914}2858291528592916static int btrfs_cmp_data(struct inode *src, u64 loff, struct inode *dst,28602860- u64 dst_loff, u64 len)29172917+ u64 dst_loff, u64 len, struct cmp_pages *cmp)28612918{28622919 int ret = 0;29202920+ int i;28632921 struct page *src_page, *dst_page;28642922 unsigned int cmp_len = PAGE_CACHE_SIZE;28652923 void *addr, *dst_addr;2866292429252925+ i = 0;28672926 while (len) {28682927 if (len < PAGE_CACHE_SIZE)28692928 cmp_len = len;2870292928712871- src_page = extent_same_get_page(src, loff);28722872- if (!src_page)28732873- return -EINVAL;28742874- dst_page = extent_same_get_page(dst, dst_loff);28752875- if (!dst_page) {28762876- page_cache_release(src_page);28772877- return -EINVAL;28782878- }29302930+ BUG_ON(i >= cmp->num_pages);29312931+29322932+ src_page = cmp->src_pages[i];29332933+ dst_page = cmp->dst_pages[i];29342934+28792935 addr = kmap_atomic(src_page);28802936 dst_addr = kmap_atomic(dst_page);28812937···2955287529562876 kunmap_atomic(addr);29572877 kunmap_atomic(dst_addr);29582958- page_cache_release(src_page);29592959- page_cache_release(dst_page);2960287829612879 if (ret)29622880 break;2963288129642964- loff += cmp_len;29652965- dst_loff += cmp_len;29662882 len -= cmp_len;28832883+ i++;29672884 }2968288529692886 return ret;···29912914{29922915 int ret;29932916 u64 len = olen;29172917+ struct cmp_pages cmp;29182918+ int same_inode = 0;29192919+ u64 same_lock_start = 0;29202920+ u64 same_lock_len = 0;2994292129952995- /*29962996- * btrfs_clone() can't handle extents in the same file29972997- * yet. Once that works, we can drop this check and replace it29982998- * with a check for the same inode, but overlapping extents.29992999- */30002922 if (src == dst)30013001- return -EINVAL;29232923+ same_inode = 1;3002292430032925 if (len == 0)30042926 return 0;3005292730063006- btrfs_double_lock(src, loff, dst, dst_loff, len);29282928+ if (same_inode) {29292929+ mutex_lock(&src->i_mutex);3007293030083008- ret = extent_same_check_offsets(src, loff, &len, olen);30093009- if (ret)30103010- goto out_unlock;29312931+ ret = extent_same_check_offsets(src, loff, &len, olen);29322932+ if (ret)29332933+ goto out_unlock;3011293430123012- ret = extent_same_check_offsets(dst, dst_loff, &len, olen);30133013- if (ret)30143014- goto out_unlock;29352935+ /*29362936+ * Single inode case wants the same checks, except we29372937+ * don't want our length pushed out past i_size as29382938+ * comparing that data range makes no sense.29392939+ *29402940+ * extent_same_check_offsets() will do this for an29412941+ * unaligned length at i_size, so catch it here and29422942+ * reject the request.29432943+ *29442944+ * This effectively means we require aligned extents29452945+ * for the single-inode case, whereas the other cases29462946+ * allow an unaligned length so long as it ends at29472947+ * i_size.29482948+ */29492949+ if (len != olen) {29502950+ ret = -EINVAL;29512951+ goto out_unlock;29522952+ }29532953+29542954+ /* Check for overlapping ranges */29552955+ if (dst_loff + len > loff && dst_loff < loff + len) {29562956+ ret = -EINVAL;29572957+ goto out_unlock;29582958+ }29592959+29602960+ same_lock_start = min_t(u64, loff, dst_loff);29612961+ same_lock_len = max_t(u64, loff, dst_loff) + len - same_lock_start;29622962+ } else {29632963+ btrfs_double_inode_lock(src, dst);29642964+29652965+ ret = extent_same_check_offsets(src, loff, &len, olen);29662966+ if (ret)29672967+ goto out_unlock;29682968+29692969+ ret = extent_same_check_offsets(dst, dst_loff, &len, olen);29702970+ if (ret)29712971+ goto out_unlock;29722972+ }3015297330162974 /* don't make the dst file partly checksummed */30172975 if ((BTRFS_I(src)->flags & BTRFS_INODE_NODATASUM) !=···30552943 goto out_unlock;30562944 }3057294530583058- ret = btrfs_cmp_data(src, loff, dst, dst_loff, len);30593059- if (ret == 0)30603060- ret = btrfs_clone(src, dst, loff, olen, len, dst_loff);29462946+ ret = btrfs_cmp_data_prepare(src, loff, dst, dst_loff, olen, &cmp);29472947+ if (ret)29482948+ goto out_unlock;3061294929502950+ if (same_inode)29512951+ lock_extent_range(src, same_lock_start, same_lock_len);29522952+ else29532953+ btrfs_double_extent_lock(src, loff, dst, dst_loff, len);29542954+29552955+ /* pass original length for comparison so we stay within i_size */29562956+ ret = btrfs_cmp_data(src, loff, dst, dst_loff, olen, &cmp);29572957+ if (ret == 0)29582958+ ret = btrfs_clone(src, dst, loff, olen, len, dst_loff, 1);29592959+29602960+ if (same_inode)29612961+ unlock_extent(&BTRFS_I(src)->io_tree, same_lock_start,29622962+ same_lock_start + same_lock_len - 1);29632963+ else29642964+ btrfs_double_extent_unlock(src, loff, dst, dst_loff, len);29652965+29662966+ btrfs_cmp_data_free(&cmp);30622967out_unlock:30633063- btrfs_double_unlock(src, loff, dst, dst_loff, len);29682968+ if (same_inode)29692969+ mutex_unlock(&src->i_mutex);29702970+ else29712971+ btrfs_double_inode_unlock(src, dst);3064297230652973 return ret;30662974}···32323100 struct inode *inode,32333101 u64 endoff,32343102 const u64 destoff,32353235- const u64 olen)31033103+ const u64 olen,31043104+ int no_time_update)32363105{32373106 struct btrfs_root *root = BTRFS_I(inode)->root;32383107 int ret;3239310832403109 inode_inc_iversion(inode);32413241- inode->i_mtime = inode->i_ctime = CURRENT_TIME;31103110+ if (!no_time_update)31113111+ inode->i_mtime = inode->i_ctime = CURRENT_TIME;32423112 /*32433113 * We round up to the block size at eof when determining which32443114 * extents to clone above, but shouldn't round up the file size.···33253191 * @inode: Inode to clone to33263192 * @off: Offset within source to start clone from33273193 * @olen: Original length, passed by user, of range to clone33283328- * @olen_aligned: Block-aligned value of olen, extent_same uses33293329- * identical values here31943194+ * @olen_aligned: Block-aligned value of olen33303195 * @destoff: Offset within @inode to start clone31963196+ * @no_time_update: Whether to update mtime/ctime on the target inode33313197 */33323198static int btrfs_clone(struct inode *src, struct inode *inode,33333199 const u64 off, const u64 olen, const u64 olen_aligned,33343334- const u64 destoff)32003200+ const u64 destoff, int no_time_update)33353201{33363202 struct btrfs_root *root = BTRFS_I(inode)->root;33373203 struct btrfs_path *path = NULL;···36553521 root->sectorsize);36563522 ret = clone_finish_inode_update(trans, inode,36573523 last_dest_end,36583658- destoff, olen);35243524+ destoff, olen,35253525+ no_time_update);36593526 if (ret)36603527 goto out;36613528 if (new_key.offset + datal >= destoff + len)···36943559 clone_update_extent_map(inode, trans, NULL, last_dest_end,36953560 destoff + len - last_dest_end);36963561 ret = clone_finish_inode_update(trans, inode, destoff + len,36973697- destoff, olen);35623562+ destoff, olen, no_time_update);36983563 }3699356437003565out:···38313696 lock_extent_range(inode, destoff, len);38323697 }3833369838343834- ret = btrfs_clone(src, inode, off, olen, len, destoff);36993699+ ret = btrfs_clone(src, inode, off, olen, len, destoff, 0);3835370038363701 if (same_inode) {38373702 u64 lock_start = min_t(u64, off, destoff);
+5
fs/btrfs/ordered-data.c
···552552 trace_btrfs_ordered_extent_put(entry->inode, entry);553553554554 if (atomic_dec_and_test(&entry->refs)) {555555+ ASSERT(list_empty(&entry->log_list));556556+ ASSERT(list_empty(&entry->trans_list));557557+ ASSERT(list_empty(&entry->root_extent_list));558558+ ASSERT(RB_EMPTY_NODE(&entry->rb_node));555559 if (entry->inode)556560 btrfs_add_delayed_iput(entry->inode);557561 while (!list_empty(&entry->list)) {···583579 spin_lock_irq(&tree->lock);584580 node = &entry->rb_node;585581 rb_erase(node, &tree->tree);582582+ RB_CLEAR_NODE(node);586583 if (tree->last == node)587584 tree->last = NULL;588585 set_bit(BTRFS_ORDERED_COMPLETE, &entry->flags);
+41-8
fs/btrfs/qgroup.c
···13491349 struct btrfs_root *quota_root;13501350 struct btrfs_qgroup *qgroup;13511351 int ret = 0;13521352+ /* Sometimes we would want to clear the limit on this qgroup.13531353+ * To meet this requirement, we treat the -1 as a special value13541354+ * which tell kernel to clear the limit on this qgroup.13551355+ */13561356+ const u64 CLEAR_VALUE = -1;1352135713531358 mutex_lock(&fs_info->qgroup_ioctl_lock);13541359 quota_root = fs_info->quota_root;···13691364 }1370136513711366 spin_lock(&fs_info->qgroup_lock);13721372- if (limit->flags & BTRFS_QGROUP_LIMIT_MAX_RFER)13731373- qgroup->max_rfer = limit->max_rfer;13741374- if (limit->flags & BTRFS_QGROUP_LIMIT_MAX_EXCL)13751375- qgroup->max_excl = limit->max_excl;13761376- if (limit->flags & BTRFS_QGROUP_LIMIT_RSV_RFER)13771377- qgroup->rsv_rfer = limit->rsv_rfer;13781378- if (limit->flags & BTRFS_QGROUP_LIMIT_RSV_EXCL)13791379- qgroup->rsv_excl = limit->rsv_excl;13671367+ if (limit->flags & BTRFS_QGROUP_LIMIT_MAX_RFER) {13681368+ if (limit->max_rfer == CLEAR_VALUE) {13691369+ qgroup->lim_flags &= ~BTRFS_QGROUP_LIMIT_MAX_RFER;13701370+ limit->flags &= ~BTRFS_QGROUP_LIMIT_MAX_RFER;13711371+ qgroup->max_rfer = 0;13721372+ } else {13731373+ qgroup->max_rfer = limit->max_rfer;13741374+ }13751375+ }13761376+ if (limit->flags & BTRFS_QGROUP_LIMIT_MAX_EXCL) {13771377+ if (limit->max_excl == CLEAR_VALUE) {13781378+ qgroup->lim_flags &= ~BTRFS_QGROUP_LIMIT_MAX_EXCL;13791379+ limit->flags &= ~BTRFS_QGROUP_LIMIT_MAX_EXCL;13801380+ qgroup->max_excl = 0;13811381+ } else {13821382+ qgroup->max_excl = limit->max_excl;13831383+ }13841384+ }13851385+ if (limit->flags & BTRFS_QGROUP_LIMIT_RSV_RFER) {13861386+ if (limit->rsv_rfer == CLEAR_VALUE) {13871387+ qgroup->lim_flags &= ~BTRFS_QGROUP_LIMIT_RSV_RFER;13881388+ limit->flags &= ~BTRFS_QGROUP_LIMIT_RSV_RFER;13891389+ qgroup->rsv_rfer = 0;13901390+ } else {13911391+ qgroup->rsv_rfer = limit->rsv_rfer;13921392+ }13931393+ }13941394+ if (limit->flags & BTRFS_QGROUP_LIMIT_RSV_EXCL) {13951395+ if (limit->rsv_excl == CLEAR_VALUE) {13961396+ qgroup->lim_flags &= ~BTRFS_QGROUP_LIMIT_RSV_EXCL;13971397+ limit->flags &= ~BTRFS_QGROUP_LIMIT_RSV_EXCL;13981398+ qgroup->rsv_excl = 0;13991399+ } else {14001400+ qgroup->rsv_excl = limit->rsv_excl;14011401+ }14021402+ }13801403 qgroup->lim_flags |= limit->flags;1381140413821405 spin_unlock(&fs_info->qgroup_lock);
+1-1
fs/btrfs/relocation.c
···40494049 if (trans && progress && err == -ENOSPC) {40504050 ret = btrfs_force_chunk_alloc(trans, rc->extent_root,40514051 rc->block_group->flags);40524052- if (ret == 0) {40524052+ if (ret == 1) {40534053 err = 0;40544054 progress = 0;40554055 goto restart;
+20-19
fs/btrfs/scrub.c
···35713571static noinline_for_stack int scrub_workers_get(struct btrfs_fs_info *fs_info,35723572 int is_dev_replace)35733573{35743574- int ret = 0;35753574 unsigned int flags = WQ_FREEZABLE | WQ_UNBOUND;35763575 int max_active = fs_info->thread_pool_size;35773576···35833584 fs_info->scrub_workers =35843585 btrfs_alloc_workqueue("btrfs-scrub", flags,35853586 max_active, 4);35863586- if (!fs_info->scrub_workers) {35873587- ret = -ENOMEM;35883588- goto out;35893589- }35873587+ if (!fs_info->scrub_workers)35883588+ goto fail_scrub_workers;35893589+35903590 fs_info->scrub_wr_completion_workers =35913591 btrfs_alloc_workqueue("btrfs-scrubwrc", flags,35923592 max_active, 2);35933593- if (!fs_info->scrub_wr_completion_workers) {35943594- ret = -ENOMEM;35953595- goto out;35963596- }35933593+ if (!fs_info->scrub_wr_completion_workers)35943594+ goto fail_scrub_wr_completion_workers;35953595+35973596 fs_info->scrub_nocow_workers =35983597 btrfs_alloc_workqueue("btrfs-scrubnc", flags, 1, 0);35993599- if (!fs_info->scrub_nocow_workers) {36003600- ret = -ENOMEM;36013601- goto out;36023602- }35983598+ if (!fs_info->scrub_nocow_workers)35993599+ goto fail_scrub_nocow_workers;36033600 fs_info->scrub_parity_workers =36043601 btrfs_alloc_workqueue("btrfs-scrubparity", flags,36053602 max_active, 2);36063606- if (!fs_info->scrub_parity_workers) {36073607- ret = -ENOMEM;36083608- goto out;36093609- }36033603+ if (!fs_info->scrub_parity_workers)36043604+ goto fail_scrub_parity_workers;36103605 }36113606 ++fs_info->scrub_workers_refcnt;36123612-out:36133613- return ret;36073607+ return 0;36083608+36093609+fail_scrub_parity_workers:36103610+ btrfs_destroy_workqueue(fs_info->scrub_nocow_workers);36113611+fail_scrub_nocow_workers:36123612+ btrfs_destroy_workqueue(fs_info->scrub_wr_completion_workers);36133613+fail_scrub_wr_completion_workers:36143614+ btrfs_destroy_workqueue(fs_info->scrub_workers);36153615+fail_scrub_workers:36163616+ return -ENOMEM;36143617}3615361836163619static noinline_for_stack void scrub_workers_put(struct btrfs_fs_info *fs_info)
+221-5
fs/btrfs/tree-log.c
···41174117 return 0;41184118}4119411941204120+/*41214121+ * At the moment we always log all xattrs. This is to figure out at log replay41224122+ * time which xattrs must have their deletion replayed. If a xattr is missing41234123+ * in the log tree and exists in the fs/subvol tree, we delete it. This is41244124+ * because if a xattr is deleted, the inode is fsynced and a power failure41254125+ * happens, causing the log to be replayed the next time the fs is mounted,41264126+ * we want the xattr to not exist anymore (same behaviour as other filesystems41274127+ * with a journal, ext3/4, xfs, f2fs, etc).41284128+ */41294129+static int btrfs_log_all_xattrs(struct btrfs_trans_handle *trans,41304130+ struct btrfs_root *root,41314131+ struct inode *inode,41324132+ struct btrfs_path *path,41334133+ struct btrfs_path *dst_path)41344134+{41354135+ int ret;41364136+ struct btrfs_key key;41374137+ const u64 ino = btrfs_ino(inode);41384138+ int ins_nr = 0;41394139+ int start_slot = 0;41404140+41414141+ key.objectid = ino;41424142+ key.type = BTRFS_XATTR_ITEM_KEY;41434143+ key.offset = 0;41444144+41454145+ ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);41464146+ if (ret < 0)41474147+ return ret;41484148+41494149+ while (true) {41504150+ int slot = path->slots[0];41514151+ struct extent_buffer *leaf = path->nodes[0];41524152+ int nritems = btrfs_header_nritems(leaf);41534153+41544154+ if (slot >= nritems) {41554155+ if (ins_nr > 0) {41564156+ u64 last_extent = 0;41574157+41584158+ ret = copy_items(trans, inode, dst_path, path,41594159+ &last_extent, start_slot,41604160+ ins_nr, 1, 0);41614161+ /* can't be 1, extent items aren't processed */41624162+ ASSERT(ret <= 0);41634163+ if (ret < 0)41644164+ return ret;41654165+ ins_nr = 0;41664166+ }41674167+ ret = btrfs_next_leaf(root, path);41684168+ if (ret < 0)41694169+ return ret;41704170+ else if (ret > 0)41714171+ break;41724172+ continue;41734173+ }41744174+41754175+ btrfs_item_key_to_cpu(leaf, &key, slot);41764176+ if (key.objectid != ino || key.type != BTRFS_XATTR_ITEM_KEY)41774177+ break;41784178+41794179+ if (ins_nr == 0)41804180+ start_slot = slot;41814181+ ins_nr++;41824182+ path->slots[0]++;41834183+ cond_resched();41844184+ }41854185+ if (ins_nr > 0) {41864186+ u64 last_extent = 0;41874187+41884188+ ret = copy_items(trans, inode, dst_path, path,41894189+ &last_extent, start_slot,41904190+ ins_nr, 1, 0);41914191+ /* can't be 1, extent items aren't processed */41924192+ ASSERT(ret <= 0);41934193+ if (ret < 0)41944194+ return ret;41954195+ }41964196+41974197+ return 0;41984198+}41994199+42004200+/*42014201+ * If the no holes feature is enabled we need to make sure any hole between the42024202+ * last extent and the i_size of our inode is explicitly marked in the log. This42034203+ * is to make sure that doing something like:42044204+ *42054205+ * 1) create file with 128Kb of data42064206+ * 2) truncate file to 64Kb42074207+ * 3) truncate file to 256Kb42084208+ * 4) fsync file42094209+ * 5) <crash/power failure>42104210+ * 6) mount fs and trigger log replay42114211+ *42124212+ * Will give us a file with a size of 256Kb, the first 64Kb of data match what42134213+ * the file had in its first 64Kb of data at step 1 and the last 192Kb of the42144214+ * file correspond to a hole. The presence of explicit holes in a log tree is42154215+ * what guarantees that log replay will remove/adjust file extent items in the42164216+ * fs/subvol tree.42174217+ *42184218+ * Here we do not need to care about holes between extents, that is already done42194219+ * by copy_items(). We also only need to do this in the full sync path, where we42204220+ * lookup for extents from the fs/subvol tree only. In the fast path case, we42214221+ * lookup the list of modified extent maps and if any represents a hole, we42224222+ * insert a corresponding extent representing a hole in the log tree.42234223+ */42244224+static int btrfs_log_trailing_hole(struct btrfs_trans_handle *trans,42254225+ struct btrfs_root *root,42264226+ struct inode *inode,42274227+ struct btrfs_path *path)42284228+{42294229+ int ret;42304230+ struct btrfs_key key;42314231+ u64 hole_start;42324232+ u64 hole_size;42334233+ struct extent_buffer *leaf;42344234+ struct btrfs_root *log = root->log_root;42354235+ const u64 ino = btrfs_ino(inode);42364236+ const u64 i_size = i_size_read(inode);42374237+42384238+ if (!btrfs_fs_incompat(root->fs_info, NO_HOLES))42394239+ return 0;42404240+42414241+ key.objectid = ino;42424242+ key.type = BTRFS_EXTENT_DATA_KEY;42434243+ key.offset = (u64)-1;42444244+42454245+ ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);42464246+ ASSERT(ret != 0);42474247+ if (ret < 0)42484248+ return ret;42494249+42504250+ ASSERT(path->slots[0] > 0);42514251+ path->slots[0]--;42524252+ leaf = path->nodes[0];42534253+ btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);42544254+42554255+ if (key.objectid != ino || key.type != BTRFS_EXTENT_DATA_KEY) {42564256+ /* inode does not have any extents */42574257+ hole_start = 0;42584258+ hole_size = i_size;42594259+ } else {42604260+ struct btrfs_file_extent_item *extent;42614261+ u64 len;42624262+42634263+ /*42644264+ * If there's an extent beyond i_size, an explicit hole was42654265+ * already inserted by copy_items().42664266+ */42674267+ if (key.offset >= i_size)42684268+ return 0;42694269+42704270+ extent = btrfs_item_ptr(leaf, path->slots[0],42714271+ struct btrfs_file_extent_item);42724272+42734273+ if (btrfs_file_extent_type(leaf, extent) ==42744274+ BTRFS_FILE_EXTENT_INLINE) {42754275+ len = btrfs_file_extent_inline_len(leaf,42764276+ path->slots[0],42774277+ extent);42784278+ ASSERT(len == i_size);42794279+ return 0;42804280+ }42814281+42824282+ len = btrfs_file_extent_num_bytes(leaf, extent);42834283+ /* Last extent goes beyond i_size, no need to log a hole. */42844284+ if (key.offset + len > i_size)42854285+ return 0;42864286+ hole_start = key.offset + len;42874287+ hole_size = i_size - hole_start;42884288+ }42894289+ btrfs_release_path(path);42904290+42914291+ /* Last extent ends at i_size. */42924292+ if (hole_size == 0)42934293+ return 0;42944294+42954295+ hole_size = ALIGN(hole_size, root->sectorsize);42964296+ ret = btrfs_insert_file_extent(trans, log, ino, hole_start, 0, 0,42974297+ hole_size, 0, hole_size, 0, 0, 0);42984298+ return ret;42994299+}43004300+41204301/* log a single inode in the tree log.41214302 * At least one parent directory for this inode must exist in the tree41224303 * or be logged already.···43364155 u64 ino = btrfs_ino(inode);43374156 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;43384157 u64 logged_isize = 0;41584158+ bool need_log_inode_item = true;4339415943404160 path = btrfs_alloc_path();43414161 if (!path)···44454263 } else {44464264 if (inode_only == LOG_INODE_ALL)44474265 fast_search = true;44484448- ret = log_inode_item(trans, log, dst_path, inode);44494449- if (ret) {44504450- err = ret;44514451- goto out_unlock;44524452- }44534266 goto log_extents;44544267 }44554268···44664289 break;44674290 if (min_key.type > max_key.type)44684291 break;42924292+42934293+ if (min_key.type == BTRFS_INODE_ITEM_KEY)42944294+ need_log_inode_item = false;42954295+42964296+ /* Skip xattrs, we log them later with btrfs_log_all_xattrs() */42974297+ if (min_key.type == BTRFS_XATTR_ITEM_KEY) {42984298+ if (ins_nr == 0)42994299+ goto next_slot;43004300+ ret = copy_items(trans, inode, dst_path, path,43014301+ &last_extent, ins_start_slot,43024302+ ins_nr, inode_only, logged_isize);43034303+ if (ret < 0) {43044304+ err = ret;43054305+ goto out_unlock;43064306+ }43074307+ ins_nr = 0;43084308+ if (ret) {43094309+ btrfs_release_path(path);43104310+ continue;43114311+ }43124312+ goto next_slot;43134313+ }4469431444704315 src = path->nodes[0];44714316 if (ins_nr && ins_start_slot + ins_nr == path->slots[0]) {···45564357 ins_nr = 0;45574358 }4558435943604360+ btrfs_release_path(path);43614361+ btrfs_release_path(dst_path);43624362+ err = btrfs_log_all_xattrs(trans, root, inode, path, dst_path);43634363+ if (err)43644364+ goto out_unlock;43654365+ if (max_key.type >= BTRFS_EXTENT_DATA_KEY && !fast_search) {43664366+ btrfs_release_path(path);43674367+ btrfs_release_path(dst_path);43684368+ err = btrfs_log_trailing_hole(trans, root, inode, path);43694369+ if (err)43704370+ goto out_unlock;43714371+ }45594372log_extents:45604373 btrfs_release_path(path);45614374 btrfs_release_path(dst_path);43754375+ if (need_log_inode_item) {43764376+ err = log_inode_item(trans, log, dst_path, inode);43774377+ if (err)43784378+ goto out_unlock;43794379+ }45624380 if (fast_search) {45634381 /*45644382 * Some ordered extents started by fsync might have completed
+44-6
fs/btrfs/volumes.c
···27662766 root = root->fs_info->chunk_root;27672767 extent_root = root->fs_info->extent_root;2768276827692769+ /*27702770+ * Prevent races with automatic removal of unused block groups.27712771+ * After we relocate and before we remove the chunk with offset27722772+ * chunk_offset, automatic removal of the block group can kick in,27732773+ * resulting in a failure when calling btrfs_remove_chunk() below.27742774+ *27752775+ * Make sure to acquire this mutex before doing a tree search (dev27762776+ * or chunk trees) to find chunks. Otherwise the cleaner kthread might27772777+ * call btrfs_remove_chunk() (through btrfs_delete_unused_bgs()) after27782778+ * we release the path used to search the chunk/dev tree and before27792779+ * the current task acquires this mutex and calls us.27802780+ */27812781+ ASSERT(mutex_is_locked(&root->fs_info->delete_unused_bgs_mutex));27822782+27692783 ret = btrfs_can_relocate(extent_root, chunk_offset);27702784 if (ret)27712785 return -ENOSPC;···28282814 key.type = BTRFS_CHUNK_ITEM_KEY;2829281528302816 while (1) {28172817+ mutex_lock(&root->fs_info->delete_unused_bgs_mutex);28312818 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);28322832- if (ret < 0)28192819+ if (ret < 0) {28202820+ mutex_unlock(&root->fs_info->delete_unused_bgs_mutex);28332821 goto error;28222822+ }28342823 BUG_ON(ret == 0); /* Corruption */2835282428362825 ret = btrfs_previous_item(chunk_root, path, key.objectid,28372826 key.type);28272827+ if (ret)28282828+ mutex_unlock(&root->fs_info->delete_unused_bgs_mutex);28382829 if (ret < 0)28392830 goto error;28402831 if (ret > 0)···28622843 else28632844 BUG_ON(ret);28642845 }28462846+ mutex_unlock(&root->fs_info->delete_unused_bgs_mutex);2865284728662848 if (found_key.offset == 0)28672849 break;···33193299 goto error;33203300 }3321330133023302+ mutex_lock(&fs_info->delete_unused_bgs_mutex);33223303 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);33233323- if (ret < 0)33043304+ if (ret < 0) {33053305+ mutex_unlock(&fs_info->delete_unused_bgs_mutex);33243306 goto error;33073307+ }3325330833263309 /*33273310 * this shouldn't happen, it means the last relocate···33363313 ret = btrfs_previous_item(chunk_root, path, 0,33373314 BTRFS_CHUNK_ITEM_KEY);33383315 if (ret) {33163316+ mutex_unlock(&fs_info->delete_unused_bgs_mutex);33393317 ret = 0;33403318 break;33413319 }···33453321 slot = path->slots[0];33463322 btrfs_item_key_to_cpu(leaf, &found_key, slot);3347332333483348- if (found_key.objectid != key.objectid)33243324+ if (found_key.objectid != key.objectid) {33253325+ mutex_unlock(&fs_info->delete_unused_bgs_mutex);33493326 break;33273327+ }3350332833513329 chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);33523330···33613335 ret = should_balance_chunk(chunk_root, leaf, chunk,33623336 found_key.offset);33633337 btrfs_release_path(path);33643364- if (!ret)33383338+ if (!ret) {33393339+ mutex_unlock(&fs_info->delete_unused_bgs_mutex);33653340 goto loop;33413341+ }3366334233673343 if (counting) {33443344+ mutex_unlock(&fs_info->delete_unused_bgs_mutex);33683345 spin_lock(&fs_info->balance_lock);33693346 bctl->stat.expected++;33703347 spin_unlock(&fs_info->balance_lock);···33773348 ret = btrfs_relocate_chunk(chunk_root,33783349 found_key.objectid,33793350 found_key.offset);33513351+ mutex_unlock(&fs_info->delete_unused_bgs_mutex);33803352 if (ret && ret != -ENOSPC)33813353 goto error;33823354 if (ret == -ENOSPC) {···41174087 key.type = BTRFS_DEV_EXTENT_KEY;4118408841194089 do {40904090+ mutex_lock(&root->fs_info->delete_unused_bgs_mutex);41204091 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);41214121- if (ret < 0)40924092+ if (ret < 0) {40934093+ mutex_unlock(&root->fs_info->delete_unused_bgs_mutex);41224094 goto done;40954095+ }4123409641244097 ret = btrfs_previous_item(root, path, 0, key.type);40984098+ if (ret)40994099+ mutex_unlock(&root->fs_info->delete_unused_bgs_mutex);41254100 if (ret < 0)41264101 goto done;41274102 if (ret) {···41404105 btrfs_item_key_to_cpu(l, &key, path->slots[0]);4141410641424107 if (key.objectid != device->devid) {41084108+ mutex_unlock(&root->fs_info->delete_unused_bgs_mutex);41434109 btrfs_release_path(path);41444110 break;41454111 }···41494113 length = btrfs_dev_extent_length(l, dev_extent);4150411441514115 if (key.offset + length <= new_size) {41164116+ mutex_unlock(&root->fs_info->delete_unused_bgs_mutex);41524117 btrfs_release_path(path);41534118 break;41544119 }···41594122 btrfs_release_path(path);4160412341614124 ret = btrfs_relocate_chunk(root, chunk_objectid, chunk_offset);41254125+ mutex_unlock(&root->fs_info->delete_unused_bgs_mutex);41624126 if (ret && ret != -ENOSPC)41634127 goto done;41644128 if (ret == -ENOSPC)···57535715static void btrfs_end_bio(struct bio *bio, int err)57545716{57555717 struct btrfs_bio *bbio = bio->bi_private;57565756- struct btrfs_device *dev = bbio->stripes[0].dev;57575718 int is_orig_bio = 0;5758571957595720 if (err) {···57605723 if (err == -EIO || err == -EREMOTEIO) {57615724 unsigned int stripe_index =57625725 btrfs_io_bio(bio)->stripe_index;57265726+ struct btrfs_device *dev;5763572757645728 BUG_ON(stripe_index >= bbio->num_stripes);57655729 dev = bbio->stripes[stripe_index].dev;
+1
fs/compat_ioctl.c
···896896/* 'X' - originally XFS but some now in the VFS */897897COMPATIBLE_IOCTL(FIFREEZE)898898COMPATIBLE_IOCTL(FITHAW)899899+COMPATIBLE_IOCTL(FITRIM)899900COMPATIBLE_IOCTL(KDGETKEYCODE)900901COMPATIBLE_IOCTL(KDSETKEYCODE)901902COMPATIBLE_IOCTL(KDGKBTYPE)
+5-2
fs/dcache.c
···642642643643 /*644644 * If we have a d_op->d_delete() operation, we sould not645645- * let the dentry count go to zero, so use "put__or_lock".645645+ * let the dentry count go to zero, so use "put_or_lock".646646 */647647 if (unlikely(dentry->d_flags & DCACHE_OP_DELETE))648648 return lockref_put_or_lock(&dentry->d_lockref);···697697 */698698 smp_rmb();699699 d_flags = ACCESS_ONCE(dentry->d_flags);700700- d_flags &= DCACHE_REFERENCED | DCACHE_LRU_LIST;700700+ d_flags &= DCACHE_REFERENCED | DCACHE_LRU_LIST | DCACHE_DISCONNECTED;701701702702 /* Nothing to do? Dropping the reference was all we needed? */703703 if (d_flags == (DCACHE_REFERENCED | DCACHE_LRU_LIST) && !d_unhashed(dentry))···774774775775 /* Unreachable? Get rid of it */776776 if (unlikely(d_unhashed(dentry)))777777+ goto kill_it;778778+779779+ if (unlikely(dentry->d_flags & DCACHE_DISCONNECTED))777780 goto kill_it;778781779782 if (unlikely(dentry->d_flags & DCACHE_OP_DELETE)) {
-1
fs/ecryptfs/file.c
···325325 return rc;326326327327 switch (cmd) {328328- case FITRIM:329328 case FS_IOC32_GETFLAGS:330329 case FS_IOC32_SETFLAGS:331330 case FS_IOC32_GETVERSION:
···13231323 unsigned int offset,13241324 unsigned int length)13251325{13261326- int to_release = 0;13261326+ int to_release = 0, contiguous_blks = 0;13271327 struct buffer_head *head, *bh;13281328 unsigned int curr_off = 0;13291329 struct inode *inode = page->mapping->host;···1344134413451345 if ((offset <= curr_off) && (buffer_delay(bh))) {13461346 to_release++;13471347+ contiguous_blks++;13471348 clear_buffer_delay(bh);13491349+ } else if (contiguous_blks) {13501350+ lblk = page->index <<13511351+ (PAGE_CACHE_SHIFT - inode->i_blkbits);13521352+ lblk += (curr_off >> inode->i_blkbits) -13531353+ contiguous_blks;13541354+ ext4_es_remove_extent(inode, lblk, contiguous_blks);13551355+ contiguous_blks = 0;13481356 }13491357 curr_off = next_off;13501358 } while ((bh = bh->b_this_page) != head);1351135913521352- if (to_release) {13601360+ if (contiguous_blks) {13531361 lblk = page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);13541354- ext4_es_remove_extent(inode, lblk, to_release);13621362+ lblk += (curr_off >> inode->i_blkbits) - contiguous_blks;13631363+ ext4_es_remove_extent(inode, lblk, contiguous_blks);13551364 }1356136513571366 /* If we have released all the blocks belonging to a cluster, then we···43534344 int inode_size = EXT4_INODE_SIZE(sb);4354434543554346 oi.orig_ino = orig_ino;43564356- ino = (orig_ino & ~(inodes_per_block - 1)) + 1;43474347+ /*43484348+ * Calculate the first inode in the inode table block. Inode43494349+ * numbers are one-based. That is, the first inode in a block43504350+ * (assuming 4k blocks and 256 byte inodes) is (n*16 + 1).43514351+ */43524352+ ino = ((orig_ino - 1) & ~(inodes_per_block - 1)) + 1;43574353 for (i = 0; i < inodes_per_block; i++, ino++, buf += inode_size) {43584354 if (ino == orig_ino)43594355 continue;
-1
fs/ext4/ioctl.c
···755755 return err;756756 }757757 case EXT4_IOC_MOVE_EXT:758758- case FITRIM:759758 case EXT4_IOC_RESIZE_FS:760759 case EXT4_IOC_PRECACHE_EXTENTS:761760 case EXT4_IOC_SET_ENCRYPTION_POLICY:
+5-11
fs/ext4/mballoc.c
···48164816 /*48174817 * blocks being freed are metadata. these blocks shouldn't48184818 * be used until this transaction is committed48194819+ *48204820+ * We use __GFP_NOFAIL because ext4_free_blocks() is not allowed48214821+ * to fail.48194822 */48204820- retry:48214821- new_entry = kmem_cache_alloc(ext4_free_data_cachep, GFP_NOFS);48224822- if (!new_entry) {48234823- /*48244824- * We use a retry loop because48254825- * ext4_free_blocks() is not allowed to fail.48264826- */48274827- cond_resched();48284828- congestion_wait(BLK_RW_ASYNC, HZ/50);48294829- goto retry;48304830- }48234823+ new_entry = kmem_cache_alloc(ext4_free_data_cachep,48244824+ GFP_NOFS|__GFP_NOFAIL);48314825 new_entry->efd_start_cluster = bit;48324826 new_entry->efd_group = block_group;48334827 new_entry->efd_count = count_clusters;
+14-3
fs/ext4/migrate.c
···620620 struct ext4_inode_info *ei = EXT4_I(inode);621621 struct ext4_extent *ex;622622 unsigned int i, len;623623+ ext4_lblk_t start, end;623624 ext4_fsblk_t blk;624625 handle_t *handle;625626 int ret;···633632 if (EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,634633 EXT4_FEATURE_RO_COMPAT_BIGALLOC))635634 return -EOPNOTSUPP;635635+636636+ /*637637+ * In order to get correct extent info, force all delayed allocation638638+ * blocks to be allocated, otherwise delayed allocation blocks may not639639+ * be reflected and bypass the checks on extent header.640640+ */641641+ if (test_opt(inode->i_sb, DELALLOC))642642+ ext4_alloc_da_blocks(inode);636643637644 handle = ext4_journal_start(inode, EXT4_HT_MIGRATE, 1);638645 if (IS_ERR(handle))···659650 goto errout;660651 }661652 if (eh->eh_entries == 0)662662- blk = len = 0;653653+ blk = len = start = end = 0;663654 else {664655 len = le16_to_cpu(ex->ee_len);665656 blk = ext4_ext_pblock(ex);666666- if (len > EXT4_NDIR_BLOCKS) {657657+ start = le32_to_cpu(ex->ee_block);658658+ end = start + len - 1;659659+ if (end >= EXT4_NDIR_BLOCKS) {667660 ret = -EOPNOTSUPP;668661 goto errout;669662 }···673662674663 ext4_clear_inode_flag(inode, EXT4_INODE_EXTENTS);675664 memset(ei->i_data, 0, sizeof(ei->i_data));676676- for (i=0; i < len; i++)665665+ for (i = start; i <= end; i++)677666 ei->i_data[i] = cpu_to_le32(blk++);678667 ext4_mark_inode_dirty(handle, inode);679668errout:
+95
fs/hpfs/alloc.c
···484484 a->btree.first_free = cpu_to_le16(8);485485 return a;486486}487487+488488+static unsigned find_run(__le32 *bmp, unsigned *idx)489489+{490490+ unsigned len;491491+ while (tstbits(bmp, *idx, 1)) {492492+ (*idx)++;493493+ if (unlikely(*idx >= 0x4000))494494+ return 0;495495+ }496496+ len = 1;497497+ while (!tstbits(bmp, *idx + len, 1))498498+ len++;499499+ return len;500500+}501501+502502+static int do_trim(struct super_block *s, secno start, unsigned len, secno limit_start, secno limit_end, unsigned minlen, unsigned *result)503503+{504504+ int err;505505+ secno end;506506+ if (fatal_signal_pending(current))507507+ return -EINTR;508508+ end = start + len;509509+ if (start < limit_start)510510+ start = limit_start;511511+ if (end > limit_end)512512+ end = limit_end;513513+ if (start >= end)514514+ return 0;515515+ if (end - start < minlen)516516+ return 0;517517+ err = sb_issue_discard(s, start, end - start, GFP_NOFS, 0);518518+ if (err)519519+ return err;520520+ *result += end - start;521521+ return 0;522522+}523523+524524+int hpfs_trim_fs(struct super_block *s, u64 start, u64 end, u64 minlen, unsigned *result)525525+{526526+ int err = 0;527527+ struct hpfs_sb_info *sbi = hpfs_sb(s);528528+ unsigned idx, len, start_bmp, end_bmp;529529+ __le32 *bmp;530530+ struct quad_buffer_head qbh;531531+532532+ *result = 0;533533+ if (!end || end > sbi->sb_fs_size)534534+ end = sbi->sb_fs_size;535535+ if (start >= sbi->sb_fs_size)536536+ return 0;537537+ if (minlen > 0x4000)538538+ return 0;539539+ if (start < sbi->sb_dirband_start + sbi->sb_dirband_size && end > sbi->sb_dirband_start) {540540+ hpfs_lock(s);541541+ if (s->s_flags & MS_RDONLY) {542542+ err = -EROFS;543543+ goto unlock_1;544544+ }545545+ if (!(bmp = hpfs_map_dnode_bitmap(s, &qbh))) {546546+ err = -EIO;547547+ goto unlock_1;548548+ }549549+ idx = 0;550550+ while ((len = find_run(bmp, &idx)) && !err) {551551+ err = do_trim(s, sbi->sb_dirband_start + idx * 4, len * 4, start, end, minlen, result);552552+ idx += len;553553+ }554554+ hpfs_brelse4(&qbh);555555+unlock_1:556556+ hpfs_unlock(s);557557+ }558558+ start_bmp = start >> 14;559559+ end_bmp = (end + 0x3fff) >> 14;560560+ while (start_bmp < end_bmp && !err) {561561+ hpfs_lock(s);562562+ if (s->s_flags & MS_RDONLY) {563563+ err = -EROFS;564564+ goto unlock_2;565565+ }566566+ if (!(bmp = hpfs_map_bitmap(s, start_bmp, &qbh, "trim"))) {567567+ err = -EIO;568568+ goto unlock_2;569569+ }570570+ idx = 0;571571+ while ((len = find_run(bmp, &idx)) && !err) {572572+ err = do_trim(s, (start_bmp << 14) + idx, len, start, end, minlen, result);573573+ idx += len;574574+ }575575+ hpfs_brelse4(&qbh);576576+unlock_2:577577+ hpfs_unlock(s);578578+ start_bmp++;579579+ }580580+ return err;581581+}
···980980 case OCFS2_IOC_GROUP_EXTEND:981981 case OCFS2_IOC_GROUP_ADD:982982 case OCFS2_IOC_GROUP_ADD64:983983- case FITRIM:984983 break;985984 case OCFS2_IOC_REFLINK:986985 if (copy_from_user(&args, argp, sizeof(args)))
+3
fs/overlayfs/inode.c
···343343 struct path realpath;344344 enum ovl_path_type type;345345346346+ if (d_is_dir(dentry))347347+ return d_backing_inode(dentry);348348+346349 type = ovl_path_real(dentry, &realpath);347350 if (ovl_open_need_copy_up(file_flags, type, realpath.dentry)) {348351 err = ovl_want_write(dentry);
+14-10
include/linux/acpi.h
···5858 acpi_fwnode_handle(adev) : NULL)5959#define ACPI_HANDLE(dev) acpi_device_handle(ACPI_COMPANION(dev))60606161+/**6262+ * ACPI_DEVICE_CLASS - macro used to describe an ACPI device with6363+ * the PCI-defined class-code information6464+ *6565+ * @_cls : the class, subclass, prog-if triple for this device6666+ * @_msk : the class mask for this device6767+ *6868+ * This macro is used to create a struct acpi_device_id that matches a6969+ * specific PCI class. The .id and .driver_data fields will be left7070+ * initialized with the default value.7171+ */7272+#define ACPI_DEVICE_CLASS(_cls, _msk) .cls = (_cls), .cls_msk = (_msk),7373+6174static inline bool has_acpi_companion(struct device *dev)6275{6376 return is_acpi_node(dev->fwnode);···322309323310int acpi_resources_are_enforced(void);324311325325-int acpi_reserve_region(u64 start, unsigned int length, u8 space_id,326326- unsigned long flags, char *desc);327327-328312#ifdef CONFIG_HIBERNATION329313void __init acpi_no_s4_hw_signature(void);330314#endif···456446#define ACPI_COMPANION(dev) (NULL)457447#define ACPI_COMPANION_SET(dev, adev) do { } while (0)458448#define ACPI_HANDLE(dev) (NULL)449449+#define ACPI_DEVICE_CLASS(_cls, _msk) .cls = (0), .cls_msk = (0),459450460451struct fwnode_handle;461452···516505 const char *name)517506{518507 return 0;519519-}520520-521521-static inline int acpi_reserve_region(u64 start, unsigned int length,522522- u8 space_id, unsigned long flags,523523- char *desc)524524-{525525- return -ENXIO;526508}527509528510struct acpi_table_header;
···148148 int (*get_current_limit) (struct regulator_dev *);149149150150 int (*set_input_current_limit) (struct regulator_dev *, int lim_uA);151151+ int (*set_over_current_protection) (struct regulator_dev *);151152152153 /* enable/disable regulator */153154 int (*enable) (struct regulator_dev *);
+1
include/linux/regulator/machine.h
···147147 unsigned ramp_disable:1; /* disable ramp delay */148148 unsigned soft_start:1; /* ramp voltage slowly */149149 unsigned pull_down:1; /* pull down resistor when regulator off */150150+ unsigned over_current_protection:1; /* auto disable on over current */150151};151152152153/**
···10211021 * for strings that are too long, we should not have created10221022 * any.10231023 */10241024- if (unlikely((len == 0) || len > MAX_ARG_STRLEN - 1)) {10251025- WARN_ON(1);10241024+ if (WARN_ON_ONCE(len < 0 || len > MAX_ARG_STRLEN - 1)) {10261025 send_sig(SIGKILL, current, 0);10271026 return -1;10281027 }
+21-1
kernel/cpu.c
···2121#include <linux/suspend.h>2222#include <linux/lockdep.h>2323#include <linux/tick.h>2424+#include <linux/irq.h>2425#include <trace/events/power.h>25262627#include "smpboot.h"···393392 smpboot_park_threads(cpu);394393395394 /*395395+ * Prevent irq alloc/free while the dying cpu reorganizes the396396+ * interrupt affinities.397397+ */398398+ irq_lock_sparse();399399+400400+ /*396401 * So now all preempt/rcu users must observe !cpu_active().397402 */398398-399403 err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu));400404 if (err) {401405 /* CPU didn't die: tell everyone. Can't complain. */402406 cpu_notify_nofail(CPU_DOWN_FAILED | mod, hcpu);407407+ irq_unlock_sparse();403408 goto out_release;404409 }405410 BUG_ON(cpu_online(cpu));···421414 cpu_relax();422415 smp_mb(); /* Read from cpu_dead_idle before __cpu_die(). */423416 per_cpu(cpu_dead_idle, cpu) = false;417417+418418+ /* Interrupts are moved away from the dying cpu, reenable alloc/free */419419+ irq_unlock_sparse();424420425421 hotplug_cpu__broadcast_tick_pull(cpu);426422 /* This actually kills the CPU. */···527517 goto out_notify;528518 }529519520520+ /*521521+ * Some architectures have to walk the irq descriptors to522522+ * setup the vector space for the cpu which comes online.523523+ * Prevent irq alloc/free across the bringup.524524+ */525525+ irq_lock_sparse();526526+530527 /* Arch-specific enabling code. */531528 ret = __cpu_up(cpu, idle);529529+530530+ irq_unlock_sparse();531531+532532 if (ret != 0)533533 goto out_notify;534534 BUG_ON(!cpu_online(cpu));
···76767777#ifdef CONFIG_SPARSE_IRQ7878static inline void irq_mark_irq(unsigned int irq) { }7979-extern void irq_lock_sparse(void);8080-extern void irq_unlock_sparse(void);8179#else8280extern void irq_mark_irq(unsigned int irq);8383-static inline void irq_lock_sparse(void) { }8484-static inline void irq_unlock_sparse(void) { }8581#endif86828783extern void init_kstat_irqs(struct irq_desc *desc, int node, int nr);
+1
kernel/module.c
···35573557 mutex_lock(&module_mutex);35583558 /* Unlink carefully: kallsyms could be walking list. */35593559 list_del_rcu(&mod->list);35603560+ mod_tree_remove(mod);35603561 wake_up_all(&module_wq);35613562 /* Wait for RCU-sched synchronizing before releasing mod->list. */35623563 synchronize_sched();
+9-15
kernel/time/clockevents.c
···120120 /* The clockevent device is getting replaced. Shut it down. */121121122122 case CLOCK_EVT_STATE_SHUTDOWN:123123- return dev->set_state_shutdown(dev);123123+ if (dev->set_state_shutdown)124124+ return dev->set_state_shutdown(dev);125125+ return 0;124126125127 case CLOCK_EVT_STATE_PERIODIC:126128 /* Core internal bug */127129 if (!(dev->features & CLOCK_EVT_FEAT_PERIODIC))128130 return -ENOSYS;129129- return dev->set_state_periodic(dev);131131+ if (dev->set_state_periodic)132132+ return dev->set_state_periodic(dev);133133+ return 0;130134131135 case CLOCK_EVT_STATE_ONESHOT:132136 /* Core internal bug */133137 if (!(dev->features & CLOCK_EVT_FEAT_ONESHOT))134138 return -ENOSYS;135135- return dev->set_state_oneshot(dev);139139+ if (dev->set_state_oneshot)140140+ return dev->set_state_oneshot(dev);141141+ return 0;136142137143 case CLOCK_EVT_STATE_ONESHOT_STOPPED:138144 /* Core internal bug */···476470477471 if (dev->features & CLOCK_EVT_FEAT_DUMMY)478472 return 0;479479-480480- /* New state-specific callbacks */481481- if (!dev->set_state_shutdown)482482- return -EINVAL;483483-484484- if ((dev->features & CLOCK_EVT_FEAT_PERIODIC) &&485485- !dev->set_state_periodic)486486- return -EINVAL;487487-488488- if ((dev->features & CLOCK_EVT_FEAT_ONESHOT) &&489489- !dev->set_state_oneshot)490490- return -EINVAL;491473492474 return 0;493475}
+108-55
kernel/time/tick-broadcast.c
···159159{160160 struct clock_event_device *bc = tick_broadcast_device.evtdev;161161 unsigned long flags;162162- int ret;162162+ int ret = 0;163163164164 raw_spin_lock_irqsave(&tick_broadcast_lock, flags);165165···221221 * If we kept the cpu in the broadcast mask,222222 * tell the caller to leave the per cpu device223223 * in shutdown state. The periodic interrupt224224- * is delivered by the broadcast device.224224+ * is delivered by the broadcast device, if225225+ * the broadcast device exists and is not226226+ * hrtimer based.225227 */226226- ret = cpumask_test_cpu(cpu, tick_broadcast_mask);228228+ if (bc && !(bc->features & CLOCK_EVT_FEAT_HRTIMER))229229+ ret = cpumask_test_cpu(cpu, tick_broadcast_mask);227230 break;228231 default:229229- /* Nothing to do */230230- ret = 0;231232 break;232233 }233234 }···266265 * Check, if the current cpu is in the mask267266 */268267 if (cpumask_test_cpu(cpu, mask)) {268268+ struct clock_event_device *bc = tick_broadcast_device.evtdev;269269+269270 cpumask_clear_cpu(cpu, mask);270270- local = true;271271+ /*272272+ * We only run the local handler, if the broadcast273273+ * device is not hrtimer based. Otherwise we run into274274+ * a hrtimer recursion.275275+ *276276+ * local timer_interrupt()277277+ * local_handler()278278+ * expire_hrtimers()279279+ * bc_handler()280280+ * local_handler()281281+ * expire_hrtimers()282282+ */283283+ local = !(bc->features & CLOCK_EVT_FEAT_HRTIMER);271284 }272285273286 if (!cpumask_empty(mask)) {···316301 bool bc_local;317302318303 raw_spin_lock(&tick_broadcast_lock);304304+305305+ /* Handle spurious interrupts gracefully */306306+ if (clockevent_state_shutdown(tick_broadcast_device.evtdev)) {307307+ raw_spin_unlock(&tick_broadcast_lock);308308+ return;309309+ }310310+319311 bc_local = tick_do_periodic_broadcast();320312321313 if (clockevent_state_oneshot(dev)) {···381359 case TICK_BROADCAST_ON:382360 cpumask_set_cpu(cpu, tick_broadcast_on);383361 if (!cpumask_test_and_set_cpu(cpu, tick_broadcast_mask)) {384384- if (tick_broadcast_device.mode ==385385- TICKDEV_MODE_PERIODIC)362362+ /*363363+ * Only shutdown the cpu local device, if:364364+ *365365+ * - the broadcast device exists366366+ * - the broadcast device is not a hrtimer based one367367+ * - the broadcast device is in periodic mode to368368+ * avoid a hickup during switch to oneshot mode369369+ */370370+ if (bc && !(bc->features & CLOCK_EVT_FEAT_HRTIMER) &&371371+ tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC)386372 clockevents_shutdown(dev);387373 }388374 break;···409379 break;410380 }411381412412- if (cpumask_empty(tick_broadcast_mask)) {413413- if (!bc_stopped)414414- clockevents_shutdown(bc);415415- } else if (bc_stopped) {416416- if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC)417417- tick_broadcast_start_periodic(bc);418418- else419419- tick_broadcast_setup_oneshot(bc);382382+ if (bc) {383383+ if (cpumask_empty(tick_broadcast_mask)) {384384+ if (!bc_stopped)385385+ clockevents_shutdown(bc);386386+ } else if (bc_stopped) {387387+ if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC)388388+ tick_broadcast_start_periodic(bc);389389+ else390390+ tick_broadcast_setup_oneshot(bc);391391+ }420392 }421393 raw_spin_unlock(&tick_broadcast_lock);422394}···694662 clockevents_switch_state(dev, CLOCK_EVT_STATE_SHUTDOWN);695663}696664697697-/**698698- * tick_broadcast_oneshot_control - Enter/exit broadcast oneshot mode699699- * @state: The target state (enter/exit)700700- *701701- * The system enters/leaves a state, where affected devices might stop702702- * Returns 0 on success, -EBUSY if the cpu is used to broadcast wakeups.703703- *704704- * Called with interrupts disabled, so clockevents_lock is not705705- * required here because the local clock event device cannot go away706706- * under us.707707- */708708-int tick_broadcast_oneshot_control(enum tick_broadcast_state state)665665+int __tick_broadcast_oneshot_control(enum tick_broadcast_state state)709666{710667 struct clock_event_device *bc, *dev;711711- struct tick_device *td;712668 int cpu, ret = 0;713669 ktime_t now;714670715671 /*716716- * Periodic mode does not care about the enter/exit of power717717- * states672672+ * If there is no broadcast device, tell the caller not to go673673+ * into deep idle.718674 */719719- if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC)720720- return 0;675675+ if (!tick_broadcast_device.evtdev)676676+ return -EBUSY;721677722722- /*723723- * We are called with preemtion disabled from the depth of the724724- * idle code, so we can't be moved away.725725- */726726- td = this_cpu_ptr(&tick_cpu_device);727727- dev = td->evtdev;728728-729729- if (!(dev->features & CLOCK_EVT_FEAT_C3STOP))730730- return 0;678678+ dev = this_cpu_ptr(&tick_cpu_device)->evtdev;731679732680 raw_spin_lock(&tick_broadcast_lock);733681 bc = tick_broadcast_device.evtdev;734682 cpu = smp_processor_id();735683736684 if (state == TICK_BROADCAST_ENTER) {685685+ /*686686+ * If the current CPU owns the hrtimer broadcast687687+ * mechanism, it cannot go deep idle and we do not add688688+ * the CPU to the broadcast mask. We don't have to go689689+ * through the EXIT path as the local timer is not690690+ * shutdown.691691+ */692692+ ret = broadcast_needs_cpu(bc, cpu);693693+ if (ret)694694+ goto out;695695+696696+ /*697697+ * If the broadcast device is in periodic mode, we698698+ * return.699699+ */700700+ if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) {701701+ /* If it is a hrtimer based broadcast, return busy */702702+ if (bc->features & CLOCK_EVT_FEAT_HRTIMER)703703+ ret = -EBUSY;704704+ goto out;705705+ }706706+737707 if (!cpumask_test_and_set_cpu(cpu, tick_broadcast_oneshot_mask)) {738708 WARN_ON_ONCE(cpumask_test_cpu(cpu, tick_broadcast_pending_mask));709709+710710+ /* Conditionally shut down the local timer. */739711 broadcast_shutdown_local(bc, dev);712712+740713 /*741714 * We only reprogram the broadcast timer if we742715 * did not mark ourself in the force mask and743716 * if the cpu local event is earlier than the744717 * broadcast event. If the current CPU is in745718 * the force mask, then we are going to be746746- * woken by the IPI right away.719719+ * woken by the IPI right away; we return720720+ * busy, so the CPU does not try to go deep721721+ * idle.747722 */748748- if (!cpumask_test_cpu(cpu, tick_broadcast_force_mask) &&749749- dev->next_event.tv64 < bc->next_event.tv64)723723+ if (cpumask_test_cpu(cpu, tick_broadcast_force_mask)) {724724+ ret = -EBUSY;725725+ } else if (dev->next_event.tv64 < bc->next_event.tv64) {750726 tick_broadcast_set_event(bc, cpu, dev->next_event);727727+ /*728728+ * In case of hrtimer broadcasts the729729+ * programming might have moved the730730+ * timer to this cpu. If yes, remove731731+ * us from the broadcast mask and732732+ * return busy.733733+ */734734+ ret = broadcast_needs_cpu(bc, cpu);735735+ if (ret) {736736+ cpumask_clear_cpu(cpu,737737+ tick_broadcast_oneshot_mask);738738+ }739739+ }751740 }752752- /*753753- * If the current CPU owns the hrtimer broadcast754754- * mechanism, it cannot go deep idle and we remove the755755- * CPU from the broadcast mask. We don't have to go756756- * through the EXIT path as the local timer is not757757- * shutdown.758758- */759759- ret = broadcast_needs_cpu(bc, cpu);760760- if (ret)761761- cpumask_clear_cpu(cpu, tick_broadcast_oneshot_mask);762741 } else {763742 if (cpumask_test_and_clear_cpu(cpu, tick_broadcast_oneshot_mask)) {764743 clockevents_switch_state(dev, CLOCK_EVT_STATE_ONESHOT);···981938 return bc ? bc->features & CLOCK_EVT_FEAT_ONESHOT : false;982939}983940941941+#else942942+int __tick_broadcast_oneshot_control(enum tick_broadcast_state state)943943+{944944+ struct clock_event_device *bc = tick_broadcast_device.evtdev;945945+946946+ if (!bc || (bc->features & CLOCK_EVT_FEAT_HRTIMER))947947+ return -EBUSY;948948+949949+ return 0;950950+}984951#endif985952986953void __init tick_broadcast_init(void)
+21
kernel/time/tick-common.c
···343343 tick_install_broadcast_device(newdev);344344}345345346346+/**347347+ * tick_broadcast_oneshot_control - Enter/exit broadcast oneshot mode348348+ * @state: The target state (enter/exit)349349+ *350350+ * The system enters/leaves a state, where affected devices might stop351351+ * Returns 0 on success, -EBUSY if the cpu is used to broadcast wakeups.352352+ *353353+ * Called with interrupts disabled, so clockevents_lock is not354354+ * required here because the local clock event device cannot go away355355+ * under us.356356+ */357357+int tick_broadcast_oneshot_control(enum tick_broadcast_state state)358358+{359359+ struct tick_device *td = this_cpu_ptr(&tick_cpu_device);360360+361361+ if (!(td->evtdev->features & CLOCK_EVT_FEAT_C3STOP))362362+ return 0;363363+364364+ return __tick_broadcast_oneshot_control(state);365365+}366366+346367#ifdef CONFIG_HOTPLUG_CPU347368/*348369 * Transfer the do_timer job away from a dying cpu.
···1818 For better error detection enable CONFIG_STACKTRACE,1919 and add slub_debug=U to boot cmdline.20202121-config KASAN_SHADOW_OFFSET2222- hex2323- default 0xdffffc0000000000 if X86_642424-2521choice2622 prompt "Instrumentation type"2723 depends on KASAN
+13-7
mm/memory.c
···2670267026712671 pte_unmap(page_table);2672267226732673+ /* File mapping without ->vm_ops ? */26742674+ if (vma->vm_flags & VM_SHARED)26752675+ return VM_FAULT_SIGBUS;26762676+26732677 /* Check if we need to add a guard page to the stack */26742678 if (check_stack_guard_page(vma, address) < 0)26752679 return VM_FAULT_SIGSEGV;···31033099 - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;3104310031053101 pte_unmap(page_table);31023102+ /* The VMA was not fully populated on mmap() or missing VM_DONTEXPAND */31033103+ if (!vma->vm_ops->fault)31043104+ return VM_FAULT_SIGBUS;31063105 if (!(flags & FAULT_FLAG_WRITE))31073106 return do_read_fault(mm, vma, address, pmd, pgoff, flags,31083107 orig_pte);···32513244 barrier();32523245 if (!pte_present(entry)) {32533246 if (pte_none(entry)) {32543254- if (vma->vm_ops) {32553255- if (likely(vma->vm_ops->fault))32563256- return do_fault(mm, vma, address, pte,32573257- pmd, flags, entry);32583258- }32593259- return do_anonymous_page(mm, vma, address,32603260- pte, pmd, flags);32473247+ if (vma->vm_ops)32483248+ return do_fault(mm, vma, address, pte, pmd,32493249+ flags, entry);32503250+32513251+ return do_anonymous_page(mm, vma, address, pte, pmd,32523252+ flags);32613253 }32623254 return do_swap_page(mm, vma, address,32633255 pte, pmd, flags, entry);
+10-6
net/ceph/ceph_common.c
···99#include <keys/ceph-type.h>1010#include <linux/module.h>1111#include <linux/mount.h>1212+#include <linux/nsproxy.h>1213#include <linux/parser.h>1314#include <linux/sched.h>1415#include <linux/seq_file.h>···1716#include <linux/statfs.h>1817#include <linux/string.h>1918#include <linux/vmalloc.h>2020-#include <linux/nsproxy.h>2121-#include <net/net_namespace.h>221923202421#include <linux/ceph/ceph_features.h>···129130 int ofs = offsetof(struct ceph_options, mon_addr);130131 int i;131132 int ret;133133+134134+ /*135135+ * Don't bother comparing options if network namespaces don't136136+ * match.137137+ */138138+ if (!net_eq(current->nsproxy->net_ns, read_pnet(&client->msgr.net)))139139+ return -1;132140133141 ret = memcmp(opt1, opt2, ofs);134142 if (ret)···340334 const char *c;341335 int err = -ENOMEM;342336 substring_t argstr[MAX_OPT_ARGS];343343-344344- if (current->nsproxy->net_ns != &init_net)345345- return ERR_PTR(-EINVAL);346337347338 opt = kzalloc(sizeof(*opt), GFP_KERNEL);348339 if (!opt)···611608fail_monc:612609 ceph_monc_stop(&client->monc);613610fail:611611+ ceph_messenger_fini(&client->msgr);614612 kfree(client);615613 return ERR_PTR(err);616614}···625621626622 /* unmount */627623 ceph_osdc_stop(&client->osdc);628628-629624 ceph_monc_stop(&client->monc);625625+ ceph_messenger_fini(&client->msgr);630626631627 ceph_debugfs_client_cleanup(client);632628
···32833283 int rc = 0;3284328432853285 if (default_noexec &&32863286- (prot & PROT_EXEC) && (!file || (!shared && (prot & PROT_WRITE)))) {32863286+ (prot & PROT_EXEC) && (!file || IS_PRIVATE(file_inode(file)) ||32873287+ (!shared && (prot & PROT_WRITE)))) {32873288 /*32883289 * We are making executable an anonymous mapping or a32893290 * private file mapping that will also be writable.
+6
security/selinux/ss/ebitmap.c
···153153 if (offset == (u32)-1)154154 return 0;155155156156+ /* don't waste ebitmap space if the netlabel bitmap is empty */157157+ if (bitmap == 0) {158158+ offset += EBITMAP_UNIT_SIZE;159159+ continue;160160+ }161161+156162 if (e_iter == NULL ||157163 offset >= e_iter->startbit + EBITMAP_SIZE) {158164 e_prev = e_iter;
+58
tools/include/linux/compiler.h
···41414242#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))43434444+#include <linux/types.h>4545+4646+static __always_inline void __read_once_size(const volatile void *p, void *res, int size)4747+{4848+ switch (size) {4949+ case 1: *(__u8 *)res = *(volatile __u8 *)p; break;5050+ case 2: *(__u16 *)res = *(volatile __u16 *)p; break;5151+ case 4: *(__u32 *)res = *(volatile __u32 *)p; break;5252+ case 8: *(__u64 *)res = *(volatile __u64 *)p; break;5353+ default:5454+ barrier();5555+ __builtin_memcpy((void *)res, (const void *)p, size);5656+ barrier();5757+ }5858+}5959+6060+static __always_inline void __write_once_size(volatile void *p, void *res, int size)6161+{6262+ switch (size) {6363+ case 1: *(volatile __u8 *)p = *(__u8 *)res; break;6464+ case 2: *(volatile __u16 *)p = *(__u16 *)res; break;6565+ case 4: *(volatile __u32 *)p = *(__u32 *)res; break;6666+ case 8: *(volatile __u64 *)p = *(__u64 *)res; break;6767+ default:6868+ barrier();6969+ __builtin_memcpy((void *)p, (const void *)res, size);7070+ barrier();7171+ }7272+}7373+7474+/*7575+ * Prevent the compiler from merging or refetching reads or writes. The7676+ * compiler is also forbidden from reordering successive instances of7777+ * READ_ONCE, WRITE_ONCE and ACCESS_ONCE (see below), but only when the7878+ * compiler is aware of some particular ordering. One way to make the7979+ * compiler aware of ordering is to put the two invocations of READ_ONCE,8080+ * WRITE_ONCE or ACCESS_ONCE() in different C statements.8181+ *8282+ * In contrast to ACCESS_ONCE these two macros will also work on aggregate8383+ * data types like structs or unions. If the size of the accessed data8484+ * type exceeds the word size of the machine (e.g., 32 bits or 64 bits)8585+ * READ_ONCE() and WRITE_ONCE() will fall back to memcpy and print a8686+ * compile-time warning.8787+ *8888+ * Their two major use cases are: (1) Mediating communication between8989+ * process-level code and irq/NMI handlers, all running on the same CPU,9090+ * and (2) Ensuring that the compiler does not fold, spindle, or otherwise9191+ * mutilate accesses that either do not require ordering or that interact9292+ * with an explicit memory barrier or atomic instruction that provides the9393+ * required ordering.9494+ */9595+9696+#define READ_ONCE(x) \9797+ ({ union { typeof(x) __val; char __c[1]; } __u; __read_once_size(&(x), __u.__c, sizeof(x)); __u.__val; })9898+9999+#define WRITE_ONCE(x, val) \100100+ ({ union { typeof(x) __val; char __c[1]; } __u = { .__val = (val) }; __write_once_size(&(x), __u.__c, sizeof(x)); __u.__val; })101101+44102#endif /* _TOOLS_LINUX_COMPILER_H */
···11+/*22+ Red Black Trees33+ (C) 1999 Andrea Arcangeli <andrea@suse.de>44+55+ This program is free software; you can redistribute it and/or modify66+ it under the terms of the GNU General Public License as published by77+ the Free Software Foundation; either version 2 of the License, or88+ (at your option) any later version.99+1010+ This program is distributed in the hope that it will be useful,1111+ but WITHOUT ANY WARRANTY; without even the implied warranty of1212+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the1313+ GNU General Public License for more details.1414+1515+ You should have received a copy of the GNU General Public License1616+ along with this program; if not, write to the Free Software1717+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA1818+1919+ linux/include/linux/rbtree.h2020+2121+ To use rbtrees you'll have to implement your own insert and search cores.2222+ This will avoid us to use callbacks and to drop drammatically performances.2323+ I know it's not the cleaner way, but in C (not in C++) to get2424+ performances and genericity...2525+2626+ See Documentation/rbtree.txt for documentation and samples.2727+*/2828+2929+#ifndef __TOOLS_LINUX_PERF_RBTREE_H3030+#define __TOOLS_LINUX_PERF_RBTREE_H3131+3232+#include <linux/kernel.h>3333+#include <linux/stddef.h>3434+3535+struct rb_node {3636+ unsigned long __rb_parent_color;3737+ struct rb_node *rb_right;3838+ struct rb_node *rb_left;3939+} __attribute__((aligned(sizeof(long))));4040+ /* The alignment might seem pointless, but allegedly CRIS needs it */4141+4242+struct rb_root {4343+ struct rb_node *rb_node;4444+};4545+4646+4747+#define rb_parent(r) ((struct rb_node *)((r)->__rb_parent_color & ~3))4848+4949+#define RB_ROOT (struct rb_root) { NULL, }5050+#define rb_entry(ptr, type, member) container_of(ptr, type, member)5151+5252+#define RB_EMPTY_ROOT(root) ((root)->rb_node == NULL)5353+5454+/* 'empty' nodes are nodes that are known not to be inserted in an rbtree */5555+#define RB_EMPTY_NODE(node) \5656+ ((node)->__rb_parent_color == (unsigned long)(node))5757+#define RB_CLEAR_NODE(node) \5858+ ((node)->__rb_parent_color = (unsigned long)(node))5959+6060+6161+extern void rb_insert_color(struct rb_node *, struct rb_root *);6262+extern void rb_erase(struct rb_node *, struct rb_root *);6363+6464+6565+/* Find logical next and previous nodes in a tree */6666+extern struct rb_node *rb_next(const struct rb_node *);6767+extern struct rb_node *rb_prev(const struct rb_node *);6868+extern struct rb_node *rb_first(const struct rb_root *);6969+extern struct rb_node *rb_last(const struct rb_root *);7070+7171+/* Postorder iteration - always visit the parent after its children */7272+extern struct rb_node *rb_first_postorder(const struct rb_root *);7373+extern struct rb_node *rb_next_postorder(const struct rb_node *);7474+7575+/* Fast replacement of a single node without remove/rebalance/add/rebalance */7676+extern void rb_replace_node(struct rb_node *victim, struct rb_node *new,7777+ struct rb_root *root);7878+7979+static inline void rb_link_node(struct rb_node *node, struct rb_node *parent,8080+ struct rb_node **rb_link)8181+{8282+ node->__rb_parent_color = (unsigned long)parent;8383+ node->rb_left = node->rb_right = NULL;8484+8585+ *rb_link = node;8686+}8787+8888+#define rb_entry_safe(ptr, type, member) \8989+ ({ typeof(ptr) ____ptr = (ptr); \9090+ ____ptr ? rb_entry(____ptr, type, member) : NULL; \9191+ })9292+9393+9494+/*9595+ * Handy for checking that we are not deleting an entry that is9696+ * already in a list, found in block/{blk-throttle,cfq-iosched}.c,9797+ * probably should be moved to lib/rbtree.c...9898+ */9999+static inline void rb_erase_init(struct rb_node *n, struct rb_root *root)100100+{101101+ rb_erase(n, root);102102+ RB_CLEAR_NODE(n);103103+}104104+#endif /* __TOOLS_LINUX_PERF_RBTREE_H */
+245
tools/include/linux/rbtree_augmented.h
···11+/*22+ Red Black Trees33+ (C) 1999 Andrea Arcangeli <andrea@suse.de>44+ (C) 2002 David Woodhouse <dwmw2@infradead.org>55+ (C) 2012 Michel Lespinasse <walken@google.com>66+77+ This program is free software; you can redistribute it and/or modify88+ it under the terms of the GNU General Public License as published by99+ the Free Software Foundation; either version 2 of the License, or1010+ (at your option) any later version.1111+1212+ This program is distributed in the hope that it will be useful,1313+ but WITHOUT ANY WARRANTY; without even the implied warranty of1414+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the1515+ GNU General Public License for more details.1616+1717+ You should have received a copy of the GNU General Public License1818+ along with this program; if not, write to the Free Software1919+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA2020+2121+ tools/linux/include/linux/rbtree_augmented.h2222+2323+ Copied from:2424+ linux/include/linux/rbtree_augmented.h2525+*/2626+2727+#ifndef _TOOLS_LINUX_RBTREE_AUGMENTED_H2828+#define _TOOLS_LINUX_RBTREE_AUGMENTED_H2929+3030+#include <linux/compiler.h>3131+#include <linux/rbtree.h>3232+3333+/*3434+ * Please note - only struct rb_augment_callbacks and the prototypes for3535+ * rb_insert_augmented() and rb_erase_augmented() are intended to be public.3636+ * The rest are implementation details you are not expected to depend on.3737+ *3838+ * See Documentation/rbtree.txt for documentation and samples.3939+ */4040+4141+struct rb_augment_callbacks {4242+ void (*propagate)(struct rb_node *node, struct rb_node *stop);4343+ void (*copy)(struct rb_node *old, struct rb_node *new);4444+ void (*rotate)(struct rb_node *old, struct rb_node *new);4545+};4646+4747+extern void __rb_insert_augmented(struct rb_node *node, struct rb_root *root,4848+ void (*augment_rotate)(struct rb_node *old, struct rb_node *new));4949+/*5050+ * Fixup the rbtree and update the augmented information when rebalancing.5151+ *5252+ * On insertion, the user must update the augmented information on the path5353+ * leading to the inserted node, then call rb_link_node() as usual and5454+ * rb_augment_inserted() instead of the usual rb_insert_color() call.5555+ * If rb_augment_inserted() rebalances the rbtree, it will callback into5656+ * a user provided function to update the augmented information on the5757+ * affected subtrees.5858+ */5959+static inline void6060+rb_insert_augmented(struct rb_node *node, struct rb_root *root,6161+ const struct rb_augment_callbacks *augment)6262+{6363+ __rb_insert_augmented(node, root, augment->rotate);6464+}6565+6666+#define RB_DECLARE_CALLBACKS(rbstatic, rbname, rbstruct, rbfield, \6767+ rbtype, rbaugmented, rbcompute) \6868+static inline void \6969+rbname ## _propagate(struct rb_node *rb, struct rb_node *stop) \7070+{ \7171+ while (rb != stop) { \7272+ rbstruct *node = rb_entry(rb, rbstruct, rbfield); \7373+ rbtype augmented = rbcompute(node); \7474+ if (node->rbaugmented == augmented) \7575+ break; \7676+ node->rbaugmented = augmented; \7777+ rb = rb_parent(&node->rbfield); \7878+ } \7979+} \8080+static inline void \8181+rbname ## _copy(struct rb_node *rb_old, struct rb_node *rb_new) \8282+{ \8383+ rbstruct *old = rb_entry(rb_old, rbstruct, rbfield); \8484+ rbstruct *new = rb_entry(rb_new, rbstruct, rbfield); \8585+ new->rbaugmented = old->rbaugmented; \8686+} \8787+static void \8888+rbname ## _rotate(struct rb_node *rb_old, struct rb_node *rb_new) \8989+{ \9090+ rbstruct *old = rb_entry(rb_old, rbstruct, rbfield); \9191+ rbstruct *new = rb_entry(rb_new, rbstruct, rbfield); \9292+ new->rbaugmented = old->rbaugmented; \9393+ old->rbaugmented = rbcompute(old); \9494+} \9595+rbstatic const struct rb_augment_callbacks rbname = { \9696+ rbname ## _propagate, rbname ## _copy, rbname ## _rotate \9797+};9898+9999+100100+#define RB_RED 0101101+#define RB_BLACK 1102102+103103+#define __rb_parent(pc) ((struct rb_node *)(pc & ~3))104104+105105+#define __rb_color(pc) ((pc) & 1)106106+#define __rb_is_black(pc) __rb_color(pc)107107+#define __rb_is_red(pc) (!__rb_color(pc))108108+#define rb_color(rb) __rb_color((rb)->__rb_parent_color)109109+#define rb_is_red(rb) __rb_is_red((rb)->__rb_parent_color)110110+#define rb_is_black(rb) __rb_is_black((rb)->__rb_parent_color)111111+112112+static inline void rb_set_parent(struct rb_node *rb, struct rb_node *p)113113+{114114+ rb->__rb_parent_color = rb_color(rb) | (unsigned long)p;115115+}116116+117117+static inline void rb_set_parent_color(struct rb_node *rb,118118+ struct rb_node *p, int color)119119+{120120+ rb->__rb_parent_color = (unsigned long)p | color;121121+}122122+123123+static inline void124124+__rb_change_child(struct rb_node *old, struct rb_node *new,125125+ struct rb_node *parent, struct rb_root *root)126126+{127127+ if (parent) {128128+ if (parent->rb_left == old)129129+ parent->rb_left = new;130130+ else131131+ parent->rb_right = new;132132+ } else133133+ root->rb_node = new;134134+}135135+136136+extern void __rb_erase_color(struct rb_node *parent, struct rb_root *root,137137+ void (*augment_rotate)(struct rb_node *old, struct rb_node *new));138138+139139+static __always_inline struct rb_node *140140+__rb_erase_augmented(struct rb_node *node, struct rb_root *root,141141+ const struct rb_augment_callbacks *augment)142142+{143143+ struct rb_node *child = node->rb_right, *tmp = node->rb_left;144144+ struct rb_node *parent, *rebalance;145145+ unsigned long pc;146146+147147+ if (!tmp) {148148+ /*149149+ * Case 1: node to erase has no more than 1 child (easy!)150150+ *151151+ * Note that if there is one child it must be red due to 5)152152+ * and node must be black due to 4). We adjust colors locally153153+ * so as to bypass __rb_erase_color() later on.154154+ */155155+ pc = node->__rb_parent_color;156156+ parent = __rb_parent(pc);157157+ __rb_change_child(node, child, parent, root);158158+ if (child) {159159+ child->__rb_parent_color = pc;160160+ rebalance = NULL;161161+ } else162162+ rebalance = __rb_is_black(pc) ? parent : NULL;163163+ tmp = parent;164164+ } else if (!child) {165165+ /* Still case 1, but this time the child is node->rb_left */166166+ tmp->__rb_parent_color = pc = node->__rb_parent_color;167167+ parent = __rb_parent(pc);168168+ __rb_change_child(node, tmp, parent, root);169169+ rebalance = NULL;170170+ tmp = parent;171171+ } else {172172+ struct rb_node *successor = child, *child2;173173+ tmp = child->rb_left;174174+ if (!tmp) {175175+ /*176176+ * Case 2: node's successor is its right child177177+ *178178+ * (n) (s)179179+ * / \ / \180180+ * (x) (s) -> (x) (c)181181+ * \182182+ * (c)183183+ */184184+ parent = successor;185185+ child2 = successor->rb_right;186186+ augment->copy(node, successor);187187+ } else {188188+ /*189189+ * Case 3: node's successor is leftmost under190190+ * node's right child subtree191191+ *192192+ * (n) (s)193193+ * / \ / \194194+ * (x) (y) -> (x) (y)195195+ * / /196196+ * (p) (p)197197+ * / /198198+ * (s) (c)199199+ * \200200+ * (c)201201+ */202202+ do {203203+ parent = successor;204204+ successor = tmp;205205+ tmp = tmp->rb_left;206206+ } while (tmp);207207+ parent->rb_left = child2 = successor->rb_right;208208+ successor->rb_right = child;209209+ rb_set_parent(child, successor);210210+ augment->copy(node, successor);211211+ augment->propagate(parent, successor);212212+ }213213+214214+ successor->rb_left = tmp = node->rb_left;215215+ rb_set_parent(tmp, successor);216216+217217+ pc = node->__rb_parent_color;218218+ tmp = __rb_parent(pc);219219+ __rb_change_child(node, successor, tmp, root);220220+ if (child2) {221221+ successor->__rb_parent_color = pc;222222+ rb_set_parent_color(child2, parent, RB_BLACK);223223+ rebalance = NULL;224224+ } else {225225+ unsigned long pc2 = successor->__rb_parent_color;226226+ successor->__rb_parent_color = pc;227227+ rebalance = __rb_is_black(pc2) ? parent : NULL;228228+ }229229+ tmp = successor;230230+ }231231+232232+ augment->propagate(tmp, NULL);233233+ return rebalance;234234+}235235+236236+static __always_inline void237237+rb_erase_augmented(struct rb_node *node, struct rb_root *root,238238+ const struct rb_augment_callbacks *augment)239239+{240240+ struct rb_node *rebalance = __rb_erase_augmented(node, root, augment);241241+ if (rebalance)242242+ __rb_erase_color(rebalance, root, augment->rotate);243243+}244244+245245+#endif /* _TOOLS_LINUX_RBTREE_AUGMENTED_H */
+548
tools/lib/rbtree.c
···11+/*22+ Red Black Trees33+ (C) 1999 Andrea Arcangeli <andrea@suse.de>44+ (C) 2002 David Woodhouse <dwmw2@infradead.org>55+ (C) 2012 Michel Lespinasse <walken@google.com>66+77+ This program is free software; you can redistribute it and/or modify88+ it under the terms of the GNU General Public License as published by99+ the Free Software Foundation; either version 2 of the License, or1010+ (at your option) any later version.1111+1212+ This program is distributed in the hope that it will be useful,1313+ but WITHOUT ANY WARRANTY; without even the implied warranty of1414+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the1515+ GNU General Public License for more details.1616+1717+ You should have received a copy of the GNU General Public License1818+ along with this program; if not, write to the Free Software1919+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA2020+2121+ linux/lib/rbtree.c2222+*/2323+2424+#include <linux/rbtree_augmented.h>2525+2626+/*2727+ * red-black trees properties: http://en.wikipedia.org/wiki/Rbtree2828+ *2929+ * 1) A node is either red or black3030+ * 2) The root is black3131+ * 3) All leaves (NULL) are black3232+ * 4) Both children of every red node are black3333+ * 5) Every simple path from root to leaves contains the same number3434+ * of black nodes.3535+ *3636+ * 4 and 5 give the O(log n) guarantee, since 4 implies you cannot have two3737+ * consecutive red nodes in a path and every red node is therefore followed by3838+ * a black. So if B is the number of black nodes on every simple path (as per3939+ * 5), then the longest possible path due to 4 is 2B.4040+ *4141+ * We shall indicate color with case, where black nodes are uppercase and red4242+ * nodes will be lowercase. Unknown color nodes shall be drawn as red within4343+ * parentheses and have some accompanying text comment.4444+ */4545+4646+static inline void rb_set_black(struct rb_node *rb)4747+{4848+ rb->__rb_parent_color |= RB_BLACK;4949+}5050+5151+static inline struct rb_node *rb_red_parent(struct rb_node *red)5252+{5353+ return (struct rb_node *)red->__rb_parent_color;5454+}5555+5656+/*5757+ * Helper function for rotations:5858+ * - old's parent and color get assigned to new5959+ * - old gets assigned new as a parent and 'color' as a color.6060+ */6161+static inline void6262+__rb_rotate_set_parents(struct rb_node *old, struct rb_node *new,6363+ struct rb_root *root, int color)6464+{6565+ struct rb_node *parent = rb_parent(old);6666+ new->__rb_parent_color = old->__rb_parent_color;6767+ rb_set_parent_color(old, new, color);6868+ __rb_change_child(old, new, parent, root);6969+}7070+7171+static __always_inline void7272+__rb_insert(struct rb_node *node, struct rb_root *root,7373+ void (*augment_rotate)(struct rb_node *old, struct rb_node *new))7474+{7575+ struct rb_node *parent = rb_red_parent(node), *gparent, *tmp;7676+7777+ while (true) {7878+ /*7979+ * Loop invariant: node is red8080+ *8181+ * If there is a black parent, we are done.8282+ * Otherwise, take some corrective action as we don't8383+ * want a red root or two consecutive red nodes.8484+ */8585+ if (!parent) {8686+ rb_set_parent_color(node, NULL, RB_BLACK);8787+ break;8888+ } else if (rb_is_black(parent))8989+ break;9090+9191+ gparent = rb_red_parent(parent);9292+9393+ tmp = gparent->rb_right;9494+ if (parent != tmp) { /* parent == gparent->rb_left */9595+ if (tmp && rb_is_red(tmp)) {9696+ /*9797+ * Case 1 - color flips9898+ *9999+ * G g100100+ * / \ / \101101+ * p u --> P U102102+ * / /103103+ * n n104104+ *105105+ * However, since g's parent might be red, and106106+ * 4) does not allow this, we need to recurse107107+ * at g.108108+ */109109+ rb_set_parent_color(tmp, gparent, RB_BLACK);110110+ rb_set_parent_color(parent, gparent, RB_BLACK);111111+ node = gparent;112112+ parent = rb_parent(node);113113+ rb_set_parent_color(node, parent, RB_RED);114114+ continue;115115+ }116116+117117+ tmp = parent->rb_right;118118+ if (node == tmp) {119119+ /*120120+ * Case 2 - left rotate at parent121121+ *122122+ * G G123123+ * / \ / \124124+ * p U --> n U125125+ * \ /126126+ * n p127127+ *128128+ * This still leaves us in violation of 4), the129129+ * continuation into Case 3 will fix that.130130+ */131131+ parent->rb_right = tmp = node->rb_left;132132+ node->rb_left = parent;133133+ if (tmp)134134+ rb_set_parent_color(tmp, parent,135135+ RB_BLACK);136136+ rb_set_parent_color(parent, node, RB_RED);137137+ augment_rotate(parent, node);138138+ parent = node;139139+ tmp = node->rb_right;140140+ }141141+142142+ /*143143+ * Case 3 - right rotate at gparent144144+ *145145+ * G P146146+ * / \ / \147147+ * p U --> n g148148+ * / \149149+ * n U150150+ */151151+ gparent->rb_left = tmp; /* == parent->rb_right */152152+ parent->rb_right = gparent;153153+ if (tmp)154154+ rb_set_parent_color(tmp, gparent, RB_BLACK);155155+ __rb_rotate_set_parents(gparent, parent, root, RB_RED);156156+ augment_rotate(gparent, parent);157157+ break;158158+ } else {159159+ tmp = gparent->rb_left;160160+ if (tmp && rb_is_red(tmp)) {161161+ /* Case 1 - color flips */162162+ rb_set_parent_color(tmp, gparent, RB_BLACK);163163+ rb_set_parent_color(parent, gparent, RB_BLACK);164164+ node = gparent;165165+ parent = rb_parent(node);166166+ rb_set_parent_color(node, parent, RB_RED);167167+ continue;168168+ }169169+170170+ tmp = parent->rb_left;171171+ if (node == tmp) {172172+ /* Case 2 - right rotate at parent */173173+ parent->rb_left = tmp = node->rb_right;174174+ node->rb_right = parent;175175+ if (tmp)176176+ rb_set_parent_color(tmp, parent,177177+ RB_BLACK);178178+ rb_set_parent_color(parent, node, RB_RED);179179+ augment_rotate(parent, node);180180+ parent = node;181181+ tmp = node->rb_left;182182+ }183183+184184+ /* Case 3 - left rotate at gparent */185185+ gparent->rb_right = tmp; /* == parent->rb_left */186186+ parent->rb_left = gparent;187187+ if (tmp)188188+ rb_set_parent_color(tmp, gparent, RB_BLACK);189189+ __rb_rotate_set_parents(gparent, parent, root, RB_RED);190190+ augment_rotate(gparent, parent);191191+ break;192192+ }193193+ }194194+}195195+196196+/*197197+ * Inline version for rb_erase() use - we want to be able to inline198198+ * and eliminate the dummy_rotate callback there199199+ */200200+static __always_inline void201201+____rb_erase_color(struct rb_node *parent, struct rb_root *root,202202+ void (*augment_rotate)(struct rb_node *old, struct rb_node *new))203203+{204204+ struct rb_node *node = NULL, *sibling, *tmp1, *tmp2;205205+206206+ while (true) {207207+ /*208208+ * Loop invariants:209209+ * - node is black (or NULL on first iteration)210210+ * - node is not the root (parent is not NULL)211211+ * - All leaf paths going through parent and node have a212212+ * black node count that is 1 lower than other leaf paths.213213+ */214214+ sibling = parent->rb_right;215215+ if (node != sibling) { /* node == parent->rb_left */216216+ if (rb_is_red(sibling)) {217217+ /*218218+ * Case 1 - left rotate at parent219219+ *220220+ * P S221221+ * / \ / \222222+ * N s --> p Sr223223+ * / \ / \224224+ * Sl Sr N Sl225225+ */226226+ parent->rb_right = tmp1 = sibling->rb_left;227227+ sibling->rb_left = parent;228228+ rb_set_parent_color(tmp1, parent, RB_BLACK);229229+ __rb_rotate_set_parents(parent, sibling, root,230230+ RB_RED);231231+ augment_rotate(parent, sibling);232232+ sibling = tmp1;233233+ }234234+ tmp1 = sibling->rb_right;235235+ if (!tmp1 || rb_is_black(tmp1)) {236236+ tmp2 = sibling->rb_left;237237+ if (!tmp2 || rb_is_black(tmp2)) {238238+ /*239239+ * Case 2 - sibling color flip240240+ * (p could be either color here)241241+ *242242+ * (p) (p)243243+ * / \ / \244244+ * N S --> N s245245+ * / \ / \246246+ * Sl Sr Sl Sr247247+ *248248+ * This leaves us violating 5) which249249+ * can be fixed by flipping p to black250250+ * if it was red, or by recursing at p.251251+ * p is red when coming from Case 1.252252+ */253253+ rb_set_parent_color(sibling, parent,254254+ RB_RED);255255+ if (rb_is_red(parent))256256+ rb_set_black(parent);257257+ else {258258+ node = parent;259259+ parent = rb_parent(node);260260+ if (parent)261261+ continue;262262+ }263263+ break;264264+ }265265+ /*266266+ * Case 3 - right rotate at sibling267267+ * (p could be either color here)268268+ *269269+ * (p) (p)270270+ * / \ / \271271+ * N S --> N Sl272272+ * / \ \273273+ * sl Sr s274274+ * \275275+ * Sr276276+ */277277+ sibling->rb_left = tmp1 = tmp2->rb_right;278278+ tmp2->rb_right = sibling;279279+ parent->rb_right = tmp2;280280+ if (tmp1)281281+ rb_set_parent_color(tmp1, sibling,282282+ RB_BLACK);283283+ augment_rotate(sibling, tmp2);284284+ tmp1 = sibling;285285+ sibling = tmp2;286286+ }287287+ /*288288+ * Case 4 - left rotate at parent + color flips289289+ * (p and sl could be either color here.290290+ * After rotation, p becomes black, s acquires291291+ * p's color, and sl keeps its color)292292+ *293293+ * (p) (s)294294+ * / \ / \295295+ * N S --> P Sr296296+ * / \ / \297297+ * (sl) sr N (sl)298298+ */299299+ parent->rb_right = tmp2 = sibling->rb_left;300300+ sibling->rb_left = parent;301301+ rb_set_parent_color(tmp1, sibling, RB_BLACK);302302+ if (tmp2)303303+ rb_set_parent(tmp2, parent);304304+ __rb_rotate_set_parents(parent, sibling, root,305305+ RB_BLACK);306306+ augment_rotate(parent, sibling);307307+ break;308308+ } else {309309+ sibling = parent->rb_left;310310+ if (rb_is_red(sibling)) {311311+ /* Case 1 - right rotate at parent */312312+ parent->rb_left = tmp1 = sibling->rb_right;313313+ sibling->rb_right = parent;314314+ rb_set_parent_color(tmp1, parent, RB_BLACK);315315+ __rb_rotate_set_parents(parent, sibling, root,316316+ RB_RED);317317+ augment_rotate(parent, sibling);318318+ sibling = tmp1;319319+ }320320+ tmp1 = sibling->rb_left;321321+ if (!tmp1 || rb_is_black(tmp1)) {322322+ tmp2 = sibling->rb_right;323323+ if (!tmp2 || rb_is_black(tmp2)) {324324+ /* Case 2 - sibling color flip */325325+ rb_set_parent_color(sibling, parent,326326+ RB_RED);327327+ if (rb_is_red(parent))328328+ rb_set_black(parent);329329+ else {330330+ node = parent;331331+ parent = rb_parent(node);332332+ if (parent)333333+ continue;334334+ }335335+ break;336336+ }337337+ /* Case 3 - right rotate at sibling */338338+ sibling->rb_right = tmp1 = tmp2->rb_left;339339+ tmp2->rb_left = sibling;340340+ parent->rb_left = tmp2;341341+ if (tmp1)342342+ rb_set_parent_color(tmp1, sibling,343343+ RB_BLACK);344344+ augment_rotate(sibling, tmp2);345345+ tmp1 = sibling;346346+ sibling = tmp2;347347+ }348348+ /* Case 4 - left rotate at parent + color flips */349349+ parent->rb_left = tmp2 = sibling->rb_right;350350+ sibling->rb_right = parent;351351+ rb_set_parent_color(tmp1, sibling, RB_BLACK);352352+ if (tmp2)353353+ rb_set_parent(tmp2, parent);354354+ __rb_rotate_set_parents(parent, sibling, root,355355+ RB_BLACK);356356+ augment_rotate(parent, sibling);357357+ break;358358+ }359359+ }360360+}361361+362362+/* Non-inline version for rb_erase_augmented() use */363363+void __rb_erase_color(struct rb_node *parent, struct rb_root *root,364364+ void (*augment_rotate)(struct rb_node *old, struct rb_node *new))365365+{366366+ ____rb_erase_color(parent, root, augment_rotate);367367+}368368+369369+/*370370+ * Non-augmented rbtree manipulation functions.371371+ *372372+ * We use dummy augmented callbacks here, and have the compiler optimize them373373+ * out of the rb_insert_color() and rb_erase() function definitions.374374+ */375375+376376+static inline void dummy_propagate(struct rb_node *node, struct rb_node *stop) {}377377+static inline void dummy_copy(struct rb_node *old, struct rb_node *new) {}378378+static inline void dummy_rotate(struct rb_node *old, struct rb_node *new) {}379379+380380+static const struct rb_augment_callbacks dummy_callbacks = {381381+ dummy_propagate, dummy_copy, dummy_rotate382382+};383383+384384+void rb_insert_color(struct rb_node *node, struct rb_root *root)385385+{386386+ __rb_insert(node, root, dummy_rotate);387387+}388388+389389+void rb_erase(struct rb_node *node, struct rb_root *root)390390+{391391+ struct rb_node *rebalance;392392+ rebalance = __rb_erase_augmented(node, root, &dummy_callbacks);393393+ if (rebalance)394394+ ____rb_erase_color(rebalance, root, dummy_rotate);395395+}396396+397397+/*398398+ * Augmented rbtree manipulation functions.399399+ *400400+ * This instantiates the same __always_inline functions as in the non-augmented401401+ * case, but this time with user-defined callbacks.402402+ */403403+404404+void __rb_insert_augmented(struct rb_node *node, struct rb_root *root,405405+ void (*augment_rotate)(struct rb_node *old, struct rb_node *new))406406+{407407+ __rb_insert(node, root, augment_rotate);408408+}409409+410410+/*411411+ * This function returns the first node (in sort order) of the tree.412412+ */413413+struct rb_node *rb_first(const struct rb_root *root)414414+{415415+ struct rb_node *n;416416+417417+ n = root->rb_node;418418+ if (!n)419419+ return NULL;420420+ while (n->rb_left)421421+ n = n->rb_left;422422+ return n;423423+}424424+425425+struct rb_node *rb_last(const struct rb_root *root)426426+{427427+ struct rb_node *n;428428+429429+ n = root->rb_node;430430+ if (!n)431431+ return NULL;432432+ while (n->rb_right)433433+ n = n->rb_right;434434+ return n;435435+}436436+437437+struct rb_node *rb_next(const struct rb_node *node)438438+{439439+ struct rb_node *parent;440440+441441+ if (RB_EMPTY_NODE(node))442442+ return NULL;443443+444444+ /*445445+ * If we have a right-hand child, go down and then left as far446446+ * as we can.447447+ */448448+ if (node->rb_right) {449449+ node = node->rb_right;450450+ while (node->rb_left)451451+ node=node->rb_left;452452+ return (struct rb_node *)node;453453+ }454454+455455+ /*456456+ * No right-hand children. Everything down and left is smaller than us,457457+ * so any 'next' node must be in the general direction of our parent.458458+ * Go up the tree; any time the ancestor is a right-hand child of its459459+ * parent, keep going up. First time it's a left-hand child of its460460+ * parent, said parent is our 'next' node.461461+ */462462+ while ((parent = rb_parent(node)) && node == parent->rb_right)463463+ node = parent;464464+465465+ return parent;466466+}467467+468468+struct rb_node *rb_prev(const struct rb_node *node)469469+{470470+ struct rb_node *parent;471471+472472+ if (RB_EMPTY_NODE(node))473473+ return NULL;474474+475475+ /*476476+ * If we have a left-hand child, go down and then right as far477477+ * as we can.478478+ */479479+ if (node->rb_left) {480480+ node = node->rb_left;481481+ while (node->rb_right)482482+ node=node->rb_right;483483+ return (struct rb_node *)node;484484+ }485485+486486+ /*487487+ * No left-hand children. Go up till we find an ancestor which488488+ * is a right-hand child of its parent.489489+ */490490+ while ((parent = rb_parent(node)) && node == parent->rb_left)491491+ node = parent;492492+493493+ return parent;494494+}495495+496496+void rb_replace_node(struct rb_node *victim, struct rb_node *new,497497+ struct rb_root *root)498498+{499499+ struct rb_node *parent = rb_parent(victim);500500+501501+ /* Set the surrounding nodes to point to the replacement */502502+ __rb_change_child(victim, new, parent, root);503503+ if (victim->rb_left)504504+ rb_set_parent(victim->rb_left, new);505505+ if (victim->rb_right)506506+ rb_set_parent(victim->rb_right, new);507507+508508+ /* Copy the pointers/colour from the victim to the replacement */509509+ *new = *victim;510510+}511511+512512+static struct rb_node *rb_left_deepest_node(const struct rb_node *node)513513+{514514+ for (;;) {515515+ if (node->rb_left)516516+ node = node->rb_left;517517+ else if (node->rb_right)518518+ node = node->rb_right;519519+ else520520+ return (struct rb_node *)node;521521+ }522522+}523523+524524+struct rb_node *rb_next_postorder(const struct rb_node *node)525525+{526526+ const struct rb_node *parent;527527+ if (!node)528528+ return NULL;529529+ parent = rb_parent(node);530530+531531+ /* If we're sitting on node, we've already seen our children */532532+ if (parent && node == parent->rb_left && parent->rb_right) {533533+ /* If we are the parent's left node, go to the parent's right534534+ * node then all the way down to the left */535535+ return rb_left_deepest_node(parent->rb_right);536536+ } else537537+ /* Otherwise we are the parent's right node, and the parent538538+ * should be next */539539+ return (struct rb_node *)parent;540540+}541541+542542+struct rb_node *rb_first_postorder(const struct rb_root *root)543543+{544544+ if (!root->rb_node)545545+ return NULL;546546+547547+ return rb_left_deepest_node(root->rb_node);548548+}
···11-#ifndef __TOOLS_LINUX_PERF_RBTREE_H22-#define __TOOLS_LINUX_PERF_RBTREE_H33-#include <stdbool.h>44-#include "../../../../include/linux/rbtree.h"55-66-/*77- * Handy for checking that we are not deleting an entry that is88- * already in a list, found in block/{blk-throttle,cfq-iosched}.c,99- * probably should be moved to lib/rbtree.c...1010- */1111-static inline void rb_erase_init(struct rb_node *n, struct rb_root *root)1212-{1313- rb_erase(n, root);1414- RB_CLEAR_NODE(n);1515-}1616-#endif /* __TOOLS_LINUX_PERF_RBTREE_H */