···1864186418651865N: Martin Kepplinger18661866E: martink@posteo.de18671867-E: martin.kepplinger@theobroma-systems.com18671867+E: martin.kepplinger@ginzinger.com18681868W: http://www.martinkepplinger.com18691869D: mma8452 accelerators iio driver18701870-D: Kernel cleanups18701870+D: pegasus_notetaker input driver18711871+D: Kernel fixes and cleanups18711872S: Garnisonstraße 2618721873S: 4020 Linz18731874S: Austria
+1
Documentation/device-mapper/dm-raid.txt
···309309 with a reshape in progress.3103101.9.0 Add support for RAID level takeover/reshape/region size311311 and set size reduction.312312+1.9.1 Fix activation of existing RAID 4/10 mapped devices
···11Binding for Cadence UART Controller2233Required properties:44-- compatible : should be "cdns,uart-r1p8", or "xlnx,xuartps"44+- compatible :55+ Use "xlnx,xuartps","cdns,uart-r1p8" for Zynq-7xxx SoC.66+ Use "xlnx,zynqmp-uart","cdns,uart-r1p12" for Zynq Ultrascale+ MPSoC.57- reg: Should contain UART controller registers location and length.68- interrupts: Should contain UART controller interrupts.79- clocks: Must contain phandles to the UART clocks
···99 - "renesas,scifb-r8a73a4" for R8A73A4 (R-Mobile APE6) SCIFB compatible UART.1010 - "renesas,scifa-r8a7740" for R8A7740 (R-Mobile A1) SCIFA compatible UART.1111 - "renesas,scifb-r8a7740" for R8A7740 (R-Mobile A1) SCIFB compatible UART.1212+ - "renesas,scif-r8a7743" for R8A7743 (RZ/G1M) SCIF compatible UART.1313+ - "renesas,scifa-r8a7743" for R8A7743 (RZ/G1M) SCIFA compatible UART.1414+ - "renesas,scifb-r8a7743" for R8A7743 (RZ/G1M) SCIFB compatible UART.1515+ - "renesas,hscif-r8a7743" for R8A7743 (RZ/G1M) HSCIF compatible UART.1616+ - "renesas,scif-r8a7745" for R8A7745 (RZ/G1E) SCIF compatible UART.1717+ - "renesas,scifa-r8a7745" for R8A7745 (RZ/G1E) SCIFA compatible UART.1818+ - "renesas,scifb-r8a7745" for R8A7745 (RZ/G1E) SCIFB compatible UART.1919+ - "renesas,hscif-r8a7745" for R8A7745 (RZ/G1E) HSCIF compatible UART.1220 - "renesas,scif-r8a7778" for R8A7778 (R-Car M1) SCIF compatible UART.1321 - "renesas,scif-r8a7779" for R8A7779 (R-Car H1) SCIF compatible UART.1422 - "renesas,scif-r8a7790" for R8A7790 (R-Car H2) SCIF compatible UART.
+1-4
Documentation/devicetree/bindings/usb/dwc2.txt
···2828- g-use-dma: enable dma usage in gadget driver.2929- g-rx-fifo-size: size of rx fifo size in gadget mode.3030- g-np-tx-fifo-size: size of non-periodic tx fifo size in gadget mode.3131-3232-Deprecated properties:3333-- g-tx-fifo-size: size of periodic tx fifo per endpoint (except ep0)3434- in gadget mode.3131+- g-tx-fifo-size: size of periodic tx fifo per endpoint (except ep0) in gadget mode.35323633Example:3734
+7-4
Documentation/gpio/board.txt
···66description of the deprecated integer-based GPIO interface please refer to77gpio-legacy.txt (actually, there is no real mapping possible with the old88interface; you just fetch an integer from somewhere and request the99-corresponding GPIO.99+corresponding GPIO).10101111All platforms can enable the GPIO library, but if the platform strictly1212requires GPIO functionality to be present, it needs to select GPIOLIB from its···162162163163Since the "led" GPIOs are mapped as active-high, this example will switch their164164signals to 1, i.e. enabling the LEDs. And for the "power" GPIO, which is mapped165165-as active-low, its actual signal will be 0 after this code. Contrary to the legacy166166-integer GPIO interface, the active-low property is handled during mapping and is167167-thus transparent to GPIO consumers.165165+as active-low, its actual signal will be 0 after this code. Contrary to the166166+legacy integer GPIO interface, the active-low property is handled during167167+mapping and is thus transparent to GPIO consumers.168168+169169+A set of functions such as gpiod_set_value() is available to work with170170+the new descriptor-oriented interface.
+8
MAINTAINERS
···14421442F: arch/arm/configs/mvebu_*_defconfig1443144314441444ARM/Marvell Berlin SoC support14451445+M: Jisheng Zhang <jszhang@marvell.com>14451446M: Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>14461447L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)14471448S: Maintained···52885287S: Maintained52895288F: scripts/get_maintainer.pl5290528952905290+GENWQE (IBM Generic Workqueue Card)52915291+M: Frank Haverkamp <haver@linux.vnet.ibm.com>52925292+M: Gabriel Krisman Bertazi <krisman@linux.vnet.ibm.com>52935293+S: Supported52945294+F: drivers/misc/genwqe/52955295+52915296GFS2 FILE SYSTEM52925297M: Steven Whitehouse <swhiteho@redhat.com>52935298M: Bob Peterson <rpeterso@redhat.com>···81078100F: drivers/media/dvb-frontends/mn88473*8108810181098102MODULE SUPPORT81038103+M: Jessica Yu <jeyu@redhat.com>81108104M: Rusty Russell <rusty@rustcorp.com.au>81118105S: Maintained81128106F: include/linux/module.h
···4141 select PERF_USE_VMALLOC4242 select HAVE_DEBUG_STACKOVERFLOW4343 select HAVE_GENERIC_DMA_COHERENT4444+ select HAVE_KERNEL_GZIP4545+ select HAVE_KERNEL_LZMA44464547config MIGHT_HAVE_PCI4648 bool···188186config ARC_HAS_COH_CACHES189187 def_bool n190188191191-config ARC_MCIP192192- bool "ARConnect Multicore IP (MCIP) Support "193193- depends on ISA_ARCV2194194- help195195- This IP block enables SMP in ARC-HS38 cores.196196- It provides for cross-core interrupts, multi-core debug197197- hardware semaphores, shared memory,....198198-199189config NR_CPUS200190 int "Maximum number of CPUs (2-4096)"201191 range 2 4096···204210 entry point and spin wait for Master's signal.205211206212endif #SMP213213+214214+config ARC_MCIP215215+ bool "ARConnect Multicore IP (MCIP) Support "216216+ depends on ISA_ARCV2217217+ default y if SMP218218+ help219219+ This IP block enables SMP in ARC-HS38 cores.220220+ It provides for cross-core interrupts, multi-core debug221221+ hardware semaphores, shared memory,....207222208223menuconfig ARC_CACHE209224 bool "Enable Cache Support"···539536config ARC_DBG_TLB_PARANOIA540537 bool "Paranoia Checks in Low Level TLB Handlers"541538 default n542542-543543-config ARC_DBG_TLB_MISS_COUNT544544- bool "Profile TLB Misses"545545- default n546546- select DEBUG_FS547547- help548548- Counts number of I and D TLB Misses and exports them via Debugfs549549- The counters can be cleared via Debugfs as well550539551540endif552541
···5353extern char *arc_cache_mumbojumbo(int cpu_id, char *buf, int len);5454extern void read_decode_cache_bcr(void);55555656-extern int ioc_exists;5656+extern int ioc_enable;5757extern unsigned long perip_base, perip_end;58585959#endif /* !__ASSEMBLY__ */
+1-1
arch/arc/include/asm/elf.h
···5454 * the loader. We need to make sure that it is out of the way of the program5555 * that it will "exec", and that there is sufficient room for the brk.5656 */5757-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)5757+#define ELF_ET_DYN_BASE (2UL * TASK_SIZE / 3)58585959/*6060 * When the program starts, a1 contains a pointer to a function to be
···1515#include <asm/mcip.h>1616#include <asm/setup.h>17171818-static char smp_cpuinfo_buf[128];1919-static int idu_detected;2020-2118static DEFINE_RAW_SPINLOCK(mcip_lock);1919+2020+#ifdef CONFIG_SMP2121+2222+static char smp_cpuinfo_buf[128];22232324static void mcip_setup_per_cpu(int cpu)2425{···87868887static void mcip_probe_n_setup(void)8988{9090- struct mcip_bcr {9191-#ifdef CONFIG_CPU_BIG_ENDIAN9292- unsigned int pad3:8,9393- idu:1, llm:1, num_cores:6,9494- iocoh:1, gfrc:1, dbg:1, pad2:1,9595- msg:1, sem:1, ipi:1, pad:1,9696- ver:8;9797-#else9898- unsigned int ver:8,9999- pad:1, ipi:1, sem:1, msg:1,100100- pad2:1, dbg:1, gfrc:1, iocoh:1,101101- num_cores:6, llm:1, idu:1,102102- pad3:8;103103-#endif104104- } mp;8989+ struct mcip_bcr mp;1059010691 READ_BCR(ARC_REG_MCIP_BCR, mp);10792···101114 IS_AVAIL1(mp.gfrc, "GFRC"));102115103116 cpuinfo_arc700[0].extn.gfrc = mp.gfrc;104104- idu_detected = mp.idu;105117106118 if (mp.dbg) {107119 __mcip_cmd_data(CMD_DEBUG_SET_SELECT, 0, 0xf);···115129 .ipi_send = mcip_ipi_send,116130 .ipi_clear = mcip_ipi_clear,117131};132132+133133+#endif118134119135/***************************************************************************120136 * ARCv2 Interrupt Distribution Unit (IDU)···283295 /* Read IDU BCR to confirm nr_irqs */284296 int nr_irqs = of_irq_count(intc);285297 int i, irq;298298+ struct mcip_bcr mp;286299287287- if (!idu_detected)300300+ READ_BCR(ARC_REG_MCIP_BCR, mp);301301+302302+ if (!mp.idu)288303 panic("IDU not detected, but DeviceTree using it");289304290305 pr_info("MCIP: IDU referenced from Devicetree %d irqs\n", nr_irqs);
+28-23
arch/arc/kernel/module.c
···3030 char *secstr, struct module *mod)3131{3232#ifdef CONFIG_ARC_DW2_UNWIND3333- int i;3434-3533 mod->arch.unw_sec_idx = 0;3634 mod->arch.unw_info = NULL;3737-3838- for (i = 1; i < hdr->e_shnum; i++) {3939- if (strcmp(secstr+sechdrs[i].sh_name, ".eh_frame") == 0) {4040- mod->arch.unw_sec_idx = i;4141- break;4242- }4343- }3535+ mod->arch.secstr = secstr;4436#endif4537 return 0;4638}···5159 unsigned int relsec, /* sec index for relo sec */5260 struct module *module)5361{5454- int i, n;6262+ int i, n, relo_type;5563 Elf32_Rela *rel_entry = (void *)sechdrs[relsec].sh_addr;5664 Elf32_Sym *sym_entry, *sym_sec;5757- Elf32_Addr relocation;5858- Elf32_Addr location;5959- Elf32_Addr sec_to_patch;6060- int relo_type;6565+ Elf32_Addr relocation, location, tgt_addr;6666+ unsigned int tgtsec;61676262- sec_to_patch = sechdrs[sechdrs[relsec].sh_info].sh_addr;6868+ /*6969+ * @relsec has relocations e.g. .rela.init.text7070+ * @tgtsec is section to patch e.g. .init.text7171+ */7272+ tgtsec = sechdrs[relsec].sh_info;7373+ tgt_addr = sechdrs[tgtsec].sh_addr;6374 sym_sec = (Elf32_Sym *) sechdrs[symindex].sh_addr;6475 n = sechdrs[relsec].sh_size / sizeof(*rel_entry);65766666- pr_debug("\n========== Module Sym reloc ===========================\n");6767- pr_debug("Section to fixup %x\n", sec_to_patch);7777+ pr_debug("\nSection to fixup %s @%x\n",7878+ module->arch.secstr + sechdrs[tgtsec].sh_name, tgt_addr);6879 pr_debug("=========================================================\n");6969- pr_debug("rela->r_off | rela->addend | sym->st_value | ADDR | VALUE\n");8080+ pr_debug("r_off\tr_add\tst_value ADDRESS VALUE\n");7081 pr_debug("=========================================================\n");71827283 /* Loop thru entries in relocation section */7384 for (i = 0; i < n; i++) {8585+ const char *s;74867587 /* This is where to make the change */7676- location = sec_to_patch + rel_entry[i].r_offset;8888+ location = tgt_addr + rel_entry[i].r_offset;77897890 /* This is the symbol it is referring to. Note that all7991 undefined symbols have been resolved. */···85898690 relocation = sym_entry->st_value + rel_entry[i].r_addend;87918888- pr_debug("\t%x\t\t%x\t\t%x %x %x [%s]\n",8989- rel_entry[i].r_offset, rel_entry[i].r_addend,9090- sym_entry->st_value, location, relocation,9191- strtab + sym_entry->st_name);9292+ if (sym_entry->st_name == 0 && ELF_ST_TYPE (sym_entry->st_info) == STT_SECTION) {9393+ s = module->arch.secstr + sechdrs[sym_entry->st_shndx].sh_name;9494+ } else {9595+ s = strtab + sym_entry->st_name;9696+ }9797+9898+ pr_debug(" %x\t%x\t%x %x %x [%s]\n",9999+ rel_entry[i].r_offset, rel_entry[i].r_addend,100100+ sym_entry->st_value, location, relocation, s);9210193102 /* This assumes modules are built with -mlong-calls94103 * so any branches/jumps are absolute 32 bit jmps···112111 goto relo_err;113112114113 }114114+115115+ if (strcmp(module->arch.secstr+sechdrs[tgtsec].sh_name, ".eh_frame") == 0)116116+ module->arch.unw_sec_idx = tgtsec;117117+115118 return 0;116119117120relo_err:
+33
arch/arc/kernel/process.c
···4141 return task_thread_info(current)->thr_ptr;4242}43434444+SYSCALL_DEFINE3(arc_usr_cmpxchg, int *, uaddr, int, expected, int, new)4545+{4646+ int uval;4747+ int ret;4848+4949+ /*5050+ * This is only for old cores lacking LLOCK/SCOND, which by defintion5151+ * can't possibly be SMP. Thus doesn't need to be SMP safe.5252+ * And this also helps reduce the overhead for serializing in5353+ * the UP case5454+ */5555+ WARN_ON_ONCE(IS_ENABLED(CONFIG_SMP));5656+5757+ if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))5858+ return -EFAULT;5959+6060+ preempt_disable();6161+6262+ ret = __get_user(uval, uaddr);6363+ if (ret)6464+ goto done;6565+6666+ if (uval != expected)6767+ ret = -EAGAIN;6868+ else6969+ ret = __put_user(new, uaddr);7070+7171+done:7272+ preempt_enable();7373+7474+ return ret;7575+}7676+4477void arch_cpu_idle(void)4578{4679 /* sleep, but enable all interrupts before committing */
+64-51
arch/arc/kernel/setup.c
···40404141struct cpuinfo_arc cpuinfo_arc700[NR_CPUS];42424343+static const struct id_to_str arc_cpu_rel[] = {4444+#ifdef CONFIG_ISA_ARCOMPACT4545+ { 0x34, "R4.10"},4646+ { 0x35, "R4.11"},4747+#else4848+ { 0x51, "R2.0" },4949+ { 0x52, "R2.1" },5050+ { 0x53, "R3.0" },5151+#endif5252+ { 0x00, NULL }5353+};5454+5555+static const struct id_to_str arc_cpu_nm[] = {5656+#ifdef CONFIG_ISA_ARCOMPACT5757+ { 0x20, "ARC 600" },5858+ { 0x30, "ARC 770" }, /* 750 identified seperately */5959+#else6060+ { 0x40, "ARC EM" },6161+ { 0x50, "ARC HS38" },6262+#endif6363+ { 0x00, "Unknown" }6464+};6565+4366static void read_decode_ccm_bcr(struct cpuinfo_arc *cpu)4467{4568 if (is_isa_arcompact()) {···11592 struct bcr_timer timer;11693 struct bcr_generic bcr;11794 struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()];9595+ const struct id_to_str *tbl;9696+11897 FIX_PTR(cpu);1199812099 READ_BCR(AUX_IDENTITY, cpu->core);121100 READ_BCR(ARC_REG_ISA_CFG_BCR, cpu->isa);101101+102102+ for (tbl = &arc_cpu_rel[0]; tbl->id != 0; tbl++) {103103+ if (cpu->core.family == tbl->id) {104104+ cpu->details = tbl->str;105105+ break;106106+ }107107+ }108108+109109+ for (tbl = &arc_cpu_nm[0]; tbl->id != 0; tbl++) {110110+ if ((cpu->core.family & 0xF0) == tbl->id)111111+ break;112112+ }113113+ cpu->name = tbl->str;122114123115 READ_BCR(ARC_REG_TIMERS_BCR, timer);124116 cpu->extn.timer0 = timer.t0;···149111 cpu->extn.swap = read_aux_reg(ARC_REG_SWAP_BCR) ? 1 : 0; /* 1,3 */150112 cpu->extn.crc = read_aux_reg(ARC_REG_CRC_BCR) ? 1 : 0;151113 cpu->extn.minmax = read_aux_reg(ARC_REG_MIXMAX_BCR) > 1 ? 1 : 0; /* 2 */114114+ cpu->extn.swape = (cpu->core.family >= 0x34) ? 1 :115115+ IS_ENABLED(CONFIG_ARC_HAS_SWAPE);116116+152117 READ_BCR(ARC_REG_XY_MEM_BCR, cpu->extn_xymem);153118154119 /* Read CCM BCRs for boot reporting even if not enabled in Kconfig */···201160 cpu->extn.rtt = bcr.ver ? 1 : 0;202161203162 cpu->extn.debug = cpu->extn.ap | cpu->extn.smart | cpu->extn.rtt;163163+164164+ /* some hacks for lack of feature BCR info in old ARC700 cores */165165+ if (is_isa_arcompact()) {166166+ if (!cpu->isa.ver) /* ISA BCR absent, use Kconfig info */167167+ cpu->isa.atomic = IS_ENABLED(CONFIG_ARC_HAS_LLSC);168168+ else169169+ cpu->isa.atomic = cpu->isa.atomic1;170170+171171+ cpu->isa.be = IS_ENABLED(CONFIG_CPU_BIG_ENDIAN);172172+173173+ /* there's no direct way to distinguish 750 vs. 770 */174174+ if (unlikely(cpu->core.family < 0x34 || cpu->mmu.ver < 3))175175+ cpu->name = "ARC750";176176+ }204177}205205-206206-static const struct cpuinfo_data arc_cpu_tbl[] = {207207-#ifdef CONFIG_ISA_ARCOMPACT208208- { {0x20, "ARC 600" }, 0x2F},209209- { {0x30, "ARC 700" }, 0x33},210210- { {0x34, "ARC 700 R4.10"}, 0x34},211211- { {0x35, "ARC 700 R4.11"}, 0x35},212212-#else213213- { {0x50, "ARC HS38 R2.0"}, 0x51},214214- { {0x52, "ARC HS38 R2.1"}, 0x52},215215- { {0x53, "ARC HS38 R3.0"}, 0x53},216216-#endif217217- { {0x00, NULL } }218218-};219219-220178221179static char *arc_cpu_mumbojumbo(int cpu_id, char *buf, int len)222180{223181 struct cpuinfo_arc *cpu = &cpuinfo_arc700[cpu_id];224182 struct bcr_identity *core = &cpu->core;225225- const struct cpuinfo_data *tbl;226226- char *isa_nm;227227- int i, be, atomic;228228- int n = 0;183183+ int i, n = 0;229184230185 FIX_PTR(cpu);231231-232232- if (is_isa_arcompact()) {233233- isa_nm = "ARCompact";234234- be = IS_ENABLED(CONFIG_CPU_BIG_ENDIAN);235235-236236- atomic = cpu->isa.atomic1;237237- if (!cpu->isa.ver) /* ISA BCR absent, use Kconfig info */238238- atomic = IS_ENABLED(CONFIG_ARC_HAS_LLSC);239239- } else {240240- isa_nm = "ARCv2";241241- be = cpu->isa.be;242242- atomic = cpu->isa.atomic;243243- }244186245187 n += scnprintf(buf + n, len - n,246188 "\nIDENTITY\t: ARCVER [%#02x] ARCNUM [%#02x] CHIPID [%#4x]\n",247189 core->family, core->cpu_id, core->chip_id);248190249249- for (tbl = &arc_cpu_tbl[0]; tbl->info.id != 0; tbl++) {250250- if ((core->family >= tbl->info.id) &&251251- (core->family <= tbl->up_range)) {252252- n += scnprintf(buf + n, len - n,253253- "processor [%d]\t: %s (%s ISA) %s\n",254254- cpu_id, tbl->info.str, isa_nm,255255- IS_AVAIL1(be, "[Big-Endian]"));256256- break;257257- }258258- }259259-260260- if (tbl->info.id == 0)261261- n += scnprintf(buf + n, len - n, "UNKNOWN ARC Processor\n");191191+ n += scnprintf(buf + n, len - n, "processor [%d]\t: %s %s (%s ISA) %s\n",192192+ cpu_id, cpu->name, cpu->details,193193+ is_isa_arcompact() ? "ARCompact" : "ARCv2",194194+ IS_AVAIL1(cpu->isa.be, "[Big-Endian]"));262195263196 n += scnprintf(buf + n, len - n, "Timers\t\t: %s%s%s%s\nISA Extn\t: ",264197 IS_AVAIL1(cpu->extn.timer0, "Timer0 "),···241226 CONFIG_ARC_HAS_RTC));242227243228 n += i = scnprintf(buf + n, len - n, "%s%s%s%s%s",244244- IS_AVAIL2(atomic, "atomic ", CONFIG_ARC_HAS_LLSC),229229+ IS_AVAIL2(cpu->isa.atomic, "atomic ", CONFIG_ARC_HAS_LLSC),245230 IS_AVAIL2(cpu->isa.ldd, "ll64 ", CONFIG_ARC_HAS_LL64),246231 IS_AVAIL1(cpu->isa.unalign, "unalign (not used)"));247232···268253 IS_AVAIL1(cpu->extn.swap, "swap "),269254 IS_AVAIL1(cpu->extn.minmax, "minmax "),270255 IS_AVAIL1(cpu->extn.crc, "crc "),271271- IS_AVAIL2(1, "swape", CONFIG_ARC_HAS_SWAPE));256256+ IS_AVAIL2(cpu->extn.swape, "swape", CONFIG_ARC_HAS_SWAPE));272257273258 if (cpu->bpu.ver)274259 n += scnprintf(buf + n, len - n,···287272288273 FIX_PTR(cpu);289274290290- n += scnprintf(buf + n, len - n,291291- "Vector Table\t: %#x\nPeripherals\t: %#lx:%#lx\n",292292- cpu->vec_base, perip_base, perip_end);275275+ n += scnprintf(buf + n, len - n, "Vector Table\t: %#x\n", cpu->vec_base);293276294277 if (cpu->extn.fpu_sp || cpu->extn.fpu_dp)295278 n += scnprintf(buf + n, len - n, "FPU\t\t: %s%s\n",···520507 * way to pass it w/o having to kmalloc/free a 2 byte string.521508 * Encode cpu-id as 0xFFcccc, which is decoded by show routine.522509 */523523- return *pos < num_possible_cpus() ? cpu_to_ptr(*pos) : NULL;510510+ return *pos < nr_cpu_ids ? cpu_to_ptr(*pos) : NULL;524511}525512526513static void *c_next(struct seq_file *m, void *v, loff_t *pos)
-110
arch/arc/kernel/troubleshoot.c
···237237 if (!user_mode(regs))238238 show_stacktrace(current, regs);239239}240240-241241-#ifdef CONFIG_DEBUG_FS242242-243243-#include <linux/module.h>244244-#include <linux/fs.h>245245-#include <linux/mount.h>246246-#include <linux/pagemap.h>247247-#include <linux/init.h>248248-#include <linux/namei.h>249249-#include <linux/debugfs.h>250250-251251-static struct dentry *test_dentry;252252-static struct dentry *test_dir;253253-static struct dentry *test_u32_dentry;254254-255255-static u32 clr_on_read = 1;256256-257257-#ifdef CONFIG_ARC_DBG_TLB_MISS_COUNT258258-u32 numitlb, numdtlb, num_pte_not_present;259259-260260-static int fill_display_data(char *kbuf)261261-{262262- size_t num = 0;263263- num += sprintf(kbuf + num, "I-TLB Miss %x\n", numitlb);264264- num += sprintf(kbuf + num, "D-TLB Miss %x\n", numdtlb);265265- num += sprintf(kbuf + num, "PTE not present %x\n", num_pte_not_present);266266-267267- if (clr_on_read)268268- numitlb = numdtlb = num_pte_not_present = 0;269269-270270- return num;271271-}272272-273273-static int tlb_stats_open(struct inode *inode, struct file *file)274274-{275275- file->private_data = (void *)__get_free_page(GFP_KERNEL);276276- return 0;277277-}278278-279279-/* called on user read(): display the counters */280280-static ssize_t tlb_stats_output(struct file *file, /* file descriptor */281281- char __user *user_buf, /* user buffer */282282- size_t len, /* length of buffer */283283- loff_t *offset) /* offset in the file */284284-{285285- size_t num;286286- char *kbuf = (char *)file->private_data;287287-288288- /* All of the data can he shoved in one iteration */289289- if (*offset != 0)290290- return 0;291291-292292- num = fill_display_data(kbuf);293293-294294- /* simple_read_from_buffer() is helper for copy to user space295295- It copies up to @2 (num) bytes from kernel buffer @4 (kbuf) at offset296296- @3 (offset) into the user space address starting at @1 (user_buf).297297- @5 (len) is max size of user buffer298298- */299299- return simple_read_from_buffer(user_buf, num, offset, kbuf, len);300300-}301301-302302-/* called on user write : clears the counters */303303-static ssize_t tlb_stats_clear(struct file *file, const char __user *user_buf,304304- size_t length, loff_t *offset)305305-{306306- numitlb = numdtlb = num_pte_not_present = 0;307307- return length;308308-}309309-310310-static int tlb_stats_close(struct inode *inode, struct file *file)311311-{312312- free_page((unsigned long)(file->private_data));313313- return 0;314314-}315315-316316-static const struct file_operations tlb_stats_file_ops = {317317- .read = tlb_stats_output,318318- .write = tlb_stats_clear,319319- .open = tlb_stats_open,320320- .release = tlb_stats_close321321-};322322-#endif323323-324324-static int __init arc_debugfs_init(void)325325-{326326- test_dir = debugfs_create_dir("arc", NULL);327327-328328-#ifdef CONFIG_ARC_DBG_TLB_MISS_COUNT329329- test_dentry = debugfs_create_file("tlb_stats", 0444, test_dir, NULL,330330- &tlb_stats_file_ops);331331-#endif332332-333333- test_u32_dentry =334334- debugfs_create_u32("clr_on_read", 0444, test_dir, &clr_on_read);335335-336336- return 0;337337-}338338-339339-module_init(arc_debugfs_init);340340-341341-static void __exit arc_debugfs_exit(void)342342-{343343- debugfs_remove(test_u32_dentry);344344- debugfs_remove(test_dentry);345345- debugfs_remove(test_dir);346346-}347347-module_exit(arc_debugfs_exit);348348-349349-#endif
+9-10
arch/arc/mm/cache.c
···2222#include <asm/setup.h>23232424static int l2_line_sz;2525-int ioc_exists;2626-volatile int slc_enable = 1, ioc_enable = 1;2525+static int ioc_exists;2626+int slc_enable = 1, ioc_enable = 1;2727unsigned long perip_base = ARC_UNCACHED_ADDR_SPACE; /* legacy value for boot */2828unsigned long perip_end = 0xFFFFFFFF; /* legacy value */2929···5353 PR_CACHE(&cpuinfo_arc700[c].icache, CONFIG_ARC_HAS_ICACHE, "I-Cache");5454 PR_CACHE(&cpuinfo_arc700[c].dcache, CONFIG_ARC_HAS_DCACHE, "D-Cache");55555656- if (!is_isa_arcv2())5757- return buf;5858-5956 p = &cpuinfo_arc700[c].slc;6057 if (p->ver)6158 n += scnprintf(buf + n, len - n,6259 "SLC\t\t: %uK, %uB Line%s\n",6360 p->sz_k, p->line_len, IS_USED_RUN(slc_enable));64616565- if (ioc_exists)6666- n += scnprintf(buf + n, len - n, "IOC\t\t:%s\n",6767- IS_DISABLED_RUN(ioc_enable));6262+ n += scnprintf(buf + n, len - n, "Peripherals\t: %#lx%s%s\n",6363+ perip_base,6464+ IS_AVAIL3(ioc_exists, ioc_enable, ", IO-Coherency "));68656966 return buf;7067}···110113 }111114112115 READ_BCR(ARC_REG_CLUSTER_BCR, cbcr);113113- if (cbcr.c && ioc_enable)116116+ if (cbcr.c)114117 ioc_exists = 1;118118+ else119119+ ioc_enable = 0;115120116121 /* HS 2.0 didn't have AUX_VOL */117122 if (cpuinfo_arc700[cpu].core.family > 0x51) {···10011002 read_aux_reg(ARC_REG_SLC_CTRL) | SLC_CTRL_DISABLE);10021003 }1003100410041004- if (is_isa_arcv2() && ioc_exists) {10051005+ if (is_isa_arcv2() && ioc_enable) {10051006 /* IO coherency base - 0x8z */10061007 write_aux_reg(ARC_REG_IO_COH_AP0_BASE, 0x80000);10071008 /* IO coherency aperture size - 512Mb: 0x8z-0xAz */
+2-2
arch/arc/mm/dma.c
···4545 * -For coherent data, Read/Write to buffers terminate early in cache4646 * (vs. always going to memory - thus are faster)4747 */4848- if ((is_isa_arcv2() && ioc_exists) ||4848+ if ((is_isa_arcv2() && ioc_enable) ||4949 (attrs & DMA_ATTR_NON_CONSISTENT))5050 need_coh = 0;5151···9797 int is_non_coh = 1;98989999 is_non_coh = (attrs & DMA_ATTR_NON_CONSISTENT) ||100100- (is_isa_arcv2() && ioc_exists);100100+ (is_isa_arcv2() && ioc_enable);101101102102 if (PageHighMem(page) || !is_non_coh)103103 iounmap((void __force __iomem *)vaddr);
···2323 select CACHE_L2X02424 select ARM_CPU_SUSPEND2525 select MACH_MVEBU_ANY2626+ select MVEBU_CLK_COREDIV26272728config MACH_ARMADA_3702829 bool "Marvell Armada 370 boards"···3332 select CPU_PJ4B3433 select MACH_MVEBU_V73534 select PINCTRL_ARMADA_3703636- select MVEBU_CLK_COREDIV3735 help3836 Say 'Y' here if you want your kernel to support boards based3937 on the Marvell Armada 370 SoC with device tree.···5050 select HAVE_SMP5151 select MACH_MVEBU_V75252 select PINCTRL_ARMADA_3755353- select MVEBU_CLK_COREDIV5453 help5554 Say 'Y' here if you want your kernel to support boards based5655 on the Marvell Armada 375 SoC with device tree.···6768 select HAVE_SMP6869 select MACH_MVEBU_V76970 select PINCTRL_ARMADA_38X7070- select MVEBU_CLK_COREDIV7171 help7272 Say 'Y' here if you want your kernel to support boards based7373 on the Marvell Armada 380/385 SoC with device tree.
···9393 ld reg,PACAKBASE(r13); /* get high part of &label */ \9494 ori reg,reg,(FIXED_SYMBOL_ABS_ADDR(label))@l;95959696+#define __LOAD_HANDLER(reg, label) \9797+ ld reg,PACAKBASE(r13); \9898+ ori reg,reg,(ABS_ADDR(label))@l;9999+96100/* Exception register prefixes */97101#define EXC_HV H98102#define EXC_STD···210206#define kvmppc_interrupt kvmppc_interrupt_hv211207#else212208#define kvmppc_interrupt kvmppc_interrupt_pr209209+#endif210210+211211+#ifdef CONFIG_RELOCATABLE212212+#define BRANCH_TO_COMMON(reg, label) \213213+ __LOAD_HANDLER(reg, label); \214214+ mtctr reg; \215215+ bctr216216+217217+#else218218+#define BRANCH_TO_COMMON(reg, label) \219219+ b label220220+213221#endif214222215223#define __KVM_HANDLER_PROLOG(area, n) \
+12
arch/powerpc/include/asm/tlb.h
···5252 return cpumask_subset(mm_cpumask(mm),5353 topology_sibling_cpumask(smp_processor_id()));5454}5555+5656+static inline int mm_is_thread_local(struct mm_struct *mm)5757+{5858+ return cpumask_equal(mm_cpumask(mm),5959+ cpumask_of(smp_processor_id()));6060+}6161+5562#else5663static inline int mm_is_core_local(struct mm_struct *mm)6464+{6565+ return 1;6666+}6767+6868+static inline int mm_is_thread_local(struct mm_struct *mm)5769{5870 return 1;5971}
+29-21
arch/powerpc/kernel/exceptions-64s.S
···9595/* No virt vectors corresponding with 0x0..0x100 */9696EXC_VIRT_NONE(0x4000, 0x4100)97979898+9999+#ifdef CONFIG_PPC_P7_NAP100100+ /*101101+ * If running native on arch 2.06 or later, check if we are waking up102102+ * from nap/sleep/winkle, and branch to idle handler.103103+ */104104+#define IDLETEST(n) \105105+ BEGIN_FTR_SECTION ; \106106+ mfspr r10,SPRN_SRR1 ; \107107+ rlwinm. r10,r10,47-31,30,31 ; \108108+ beq- 1f ; \109109+ cmpwi cr3,r10,2 ; \110110+ BRANCH_TO_COMMON(r10, system_reset_idle_common) ; \111111+1: \112112+ END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)113113+#else114114+#define IDLETEST NOTEST115115+#endif116116+98117EXC_REAL_BEGIN(system_reset, 0x100, 0x200)99118 SET_SCRATCH0(r13)100100-#ifdef CONFIG_PPC_P7_NAP101101-BEGIN_FTR_SECTION102102- /* Running native on arch 2.06 or later, check if we are103103- * waking up from nap/sleep/winkle.104104- */105105- mfspr r13,SPRN_SRR1106106- rlwinm. r13,r13,47-31,30,31107107- beq 9f119119+ EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common, EXC_STD,120120+ IDLETEST, 0x100)108121109109- cmpwi cr3,r13,2110110- GET_PACA(r13)122122+EXC_REAL_END(system_reset, 0x100, 0x200)123123+EXC_VIRT_NONE(0x4100, 0x4200)124124+125125+#ifdef CONFIG_PPC_P7_NAP126126+EXC_COMMON_BEGIN(system_reset_idle_common)111127 bl pnv_restore_hyp_resource112128113129 li r0,PNV_THREAD_RUNNING···146130 blt cr3,2f147131 b pnv_wakeup_loss1481322: b pnv_wakeup_noloss133133+#endif149134150150-9:151151-END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)152152-#endif /* CONFIG_PPC_P7_NAP */153153- EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common, EXC_STD,154154- NOTEST, 0x100)155155-EXC_REAL_END(system_reset, 0x100, 0x200)156156-EXC_VIRT_NONE(0x4100, 0x4200)157135EXC_COMMON(system_reset_common, 0x100, system_reset_exception)158136159137#ifdef CONFIG_PPC_PSERIES···827817TRAMP_KVM(PACA_EXGEN, 0xb00)828818EXC_COMMON(trap_0b_common, 0xb00, unknown_exception)829819830830-831831-#define LOAD_SYSCALL_HANDLER(reg) \832832- ld reg,PACAKBASE(r13); \833833- ori reg,reg,(ABS_ADDR(system_call_common))@l;820820+#define LOAD_SYSCALL_HANDLER(reg) \821821+ __LOAD_HANDLER(reg, system_call_common)834822835823/* Syscall routine is used twice, in reloc-off and reloc-on paths */836824#define SYSCALL_PSERIES_1 \
+1-1
arch/powerpc/kernel/hw_breakpoint.c
···275275 if (!stepped) {276276 WARN(1, "Unable to handle hardware breakpoint. Breakpoint at "277277 "0x%lx will be disabled.", info->address);278278- perf_event_disable(bp);278278+ perf_event_disable_inatomic(bp);279279 goto out;280280 }281281 /*
+29-6
arch/powerpc/kernel/idle_book3s.S
···9090 * Threads will spin in HMT_LOW until the lock bit is cleared.9191 * r14 - pointer to core_idle_state9292 * r15 - used to load contents of core_idle_state9393+ * r9 - used as a temporary variable9394 */94959596core_idle_lock_held:···10099 bne 3b101100 HMT_MEDIUM102101 lwarx r15,0,r14102102+ andi. r9,r15,PNV_CORE_IDLE_LOCK_BIT103103+ bne core_idle_lock_held103104 blr104105105106/*···166163 std r9,_MSR(r1)167164 std r1,PACAR1(r13)168165169169-#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE170170- /* Tell KVM we're entering idle */171171- li r4,KVM_HWTHREAD_IN_IDLE172172- stb r4,HSTATE_HWTHREAD_STATE(r13)173173-#endif174174-175166 /*176167 * Go to real mode to do the nap, as required by the architecture.177168 * Also, we need to be in real mode before setting hwthread_state,···182185183186 .globl pnv_enter_arch207_idle_mode184187pnv_enter_arch207_idle_mode:188188+#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE189189+ /* Tell KVM we're entering idle */190190+ li r4,KVM_HWTHREAD_IN_IDLE191191+ /******************************************************/192192+ /* N O T E W E L L ! ! ! N O T E W E L L */193193+ /* The following store to HSTATE_HWTHREAD_STATE(r13) */194194+ /* MUST occur in real mode, i.e. with the MMU off, */195195+ /* and the MMU must stay off until we clear this flag */196196+ /* and test HSTATE_HWTHREAD_REQ(r13) in the system */197197+ /* reset interrupt vector in exceptions-64s.S. */198198+ /* The reason is that another thread can switch the */199199+ /* MMU to a guest context whenever this flag is set */200200+ /* to KVM_HWTHREAD_IN_IDLE, and if the MMU was on, */201201+ /* that would potentially cause this thread to start */202202+ /* executing instructions from guest memory in */203203+ /* hypervisor mode, leading to a host crash or data */204204+ /* corruption, or worse. */205205+ /******************************************************/206206+ stb r4,HSTATE_HWTHREAD_STATE(r13)207207+#endif185208 stb r3,PACA_THREAD_IDLE_STATE(r13)186209 cmpwi cr3,r3,PNV_THREAD_SLEEP187210 bge cr3,2f···267250 * r3 - requested stop state268251 */269252power_enter_stop:253253+#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE254254+ /* Tell KVM we're entering idle */255255+ li r4,KVM_HWTHREAD_IN_IDLE256256+ /* DO THIS IN REAL MODE! See comment above. */257257+ stb r4,HSTATE_HWTHREAD_STATE(r13)258258+#endif270259/*271260 * Check if the requested state is a deep idle state.272261 */
+1-1
arch/powerpc/kernel/process.c
···10121012 /* Ensure that restore_math() will restore */10131013 if (msr_diff & MSR_FP)10141014 current->thread.load_fp = 1;10151015-#ifdef CONFIG_ALIVEC10151015+#ifdef CONFIG_ALTIVEC10161016 if (cpu_has_feature(CPU_FTR_ALTIVEC) && msr_diff & MSR_VEC)10171017 current->thread.load_vec = 1;10181018#endif
···175175 if (unlikely(pid == MMU_NO_CONTEXT))176176 goto no_context;177177178178- if (!mm_is_core_local(mm)) {178178+ if (!mm_is_thread_local(mm)) {179179 int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);180180181181 if (lock_tlbie)···201201 if (unlikely(pid == MMU_NO_CONTEXT))202202 goto no_context;203203204204- if (!mm_is_core_local(mm)) {204204+ if (!mm_is_thread_local(mm)) {205205 int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);206206207207 if (lock_tlbie)···226226 pid = mm ? mm->context.id : 0;227227 if (unlikely(pid == MMU_NO_CONTEXT))228228 goto bail;229229- if (!mm_is_core_local(mm)) {229229+ if (!mm_is_thread_local(mm)) {230230 int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);231231232232 if (lock_tlbie)···321321{322322 unsigned long pid;323323 unsigned long addr;324324- int local = mm_is_core_local(mm);324324+ int local = mm_is_thread_local(mm);325325 unsigned long ap = mmu_get_ap(psize);326326 int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);327327 unsigned long page_size = 1UL << mmu_psize_defs[psize].shift;
···192192struct mm_struct;193193struct seq_file;194194195195-typedef int (*dump_trace_func_t)(void *data, unsigned long address);195195+typedef int (*dump_trace_func_t)(void *data, unsigned long address, int reliable);196196void dump_trace(dump_trace_func_t func, void *data,197197 struct task_struct *task, unsigned long sp);198198
···3838 if (sp < low || sp > high - sizeof(*sf))3939 return sp;4040 sf = (struct stack_frame *) sp;4141+ if (func(data, sf->gprs[8], 0))4242+ return sp;4143 /* Follow the backchain. */4244 while (1) {4343- if (func(data, sf->gprs[8]))4444- return sp;4545 low = sp;4646 sp = sf->back_chain;4747 if (!sp)···4949 if (sp <= low || sp > high - sizeof(*sf))5050 return sp;5151 sf = (struct stack_frame *) sp;5252+ if (func(data, sf->gprs[8], 1))5353+ return sp;5254 }5355 /* Zero backchain detected, check for interrupt frame. */5456 sp = (unsigned long) (sf + 1);···5856 return sp;5957 regs = (struct pt_regs *) sp;6058 if (!user_mode(regs)) {6161- if (func(data, regs->psw.addr))5959+ if (func(data, regs->psw.addr, 1))6260 return sp;6361 }6462 low = sp;···8785}8886EXPORT_SYMBOL_GPL(dump_trace);89879090-struct return_address_data {9191- unsigned long address;9292- int depth;9393-};9494-9595-static int __return_address(void *data, unsigned long address)8888+static int show_address(void *data, unsigned long address, int reliable)9689{9797- struct return_address_data *rd = data;9898-9999- if (rd->depth--)100100- return 0;101101- rd->address = address;102102- return 1;103103-}104104-105105-unsigned long return_address(int depth)106106-{107107- struct return_address_data rd = { .depth = depth + 2 };108108-109109- dump_trace(__return_address, &rd, NULL, current_stack_pointer());110110- return rd.address;111111-}112112-EXPORT_SYMBOL_GPL(return_address);113113-114114-static int show_address(void *data, unsigned long address)115115-{116116- printk("([<%016lx>] %pSR)\n", address, (void *)address);9090+ if (reliable)9191+ printk(" [<%016lx>] %pSR \n", address, (void *)address);9292+ else9393+ printk("([<%016lx>] %pSR)\n", address, (void *)address);11794 return 0;11895}11996···119138 else120139 stack = (unsigned long *)task->thread.ksp;121140 }141141+ printk(KERN_DEFAULT "Stack:\n");122142 for (i = 0; i < 20; i++) {123143 if (((addr_t) stack & (THREAD_SIZE-1)) == 0)124144 break;125125- if ((i * sizeof(long) % 32) == 0)126126- printk("%s ", i == 0 ? "" : "\n");127127- printk("%016lx ", *stack++);145145+ if (i % 4 == 0)146146+ printk(KERN_DEFAULT " ");147147+ pr_cont("%016lx%c", *stack++, i % 4 == 3 ? '\n' : ' ');128148 }129129- printk("\n");130149 show_trace(task, (unsigned long)sp);131150}132151···144163 mode = user_mode(regs) ? "User" : "Krnl";145164 printk("%s PSW : %p %p", mode, (void *)regs->psw.mask, (void *)regs->psw.addr);146165 if (!user_mode(regs))147147- printk(" (%pSR)", (void *)regs->psw.addr);148148- printk("\n");166166+ pr_cont(" (%pSR)", (void *)regs->psw.addr);167167+ pr_cont("\n");149168 printk(" R:%x T:%x IO:%x EX:%x Key:%x M:%x W:%x "150169 "P:%x AS:%x CC:%x PM:%x", psw->r, psw->t, psw->i, psw->e,151170 psw->key, psw->m, psw->w, psw->p, psw->as, psw->cc, psw->pm);152152- printk(" RI:%x EA:%x", psw->ri, psw->eaba);153153- printk("\n%s GPRS: %016lx %016lx %016lx %016lx\n", mode,171171+ pr_cont(" RI:%x EA:%x\n", psw->ri, psw->eaba);172172+ printk("%s GPRS: %016lx %016lx %016lx %016lx\n", mode,154173 regs->gprs[0], regs->gprs[1], regs->gprs[2], regs->gprs[3]);155174 printk(" %016lx %016lx %016lx %016lx\n",156175 regs->gprs[4], regs->gprs[5], regs->gprs[6], regs->gprs[7]);···186205 printk("%s: %04x ilc:%d [#%d] ", str, regs->int_code & 0xffff,187206 regs->int_code >> 17, ++die_counter);188207#ifdef CONFIG_PREEMPT189189- printk("PREEMPT ");208208+ pr_cont("PREEMPT ");190209#endif191210#ifdef CONFIG_SMP192192- printk("SMP ");211211+ pr_cont("SMP ");193212#endif194213 if (debug_pagealloc_enabled())195195- printk("DEBUG_PAGEALLOC");196196- printk("\n");214214+ pr_cont("DEBUG_PAGEALLOC");215215+ pr_cont("\n");197216 notify_die(DIE_OOPS, str, regs, 0, regs->int_code & 0xffff, SIGSEGV);198217 print_modules();199218 show_regs(regs);
+1-1
arch/s390/kernel/perf_event.c
···222222}223223arch_initcall(service_level_perf_register);224224225225-static int __perf_callchain_kernel(void *data, unsigned long address)225225+static int __perf_callchain_kernel(void *data, unsigned long address, int reliable)226226{227227 struct perf_callchain_entry_ctx *entry = data;228228
+2-2
arch/s390/kernel/stacktrace.c
···2727 return 1;2828}29293030-static int save_address(void *data, unsigned long address)3030+static int save_address(void *data, unsigned long address, int reliable)3131{3232 return __save_address(data, address, 0);3333}34343535-static int save_address_nosched(void *data, unsigned long address)3535+static int save_address_nosched(void *data, unsigned long address, int reliable)3636{3737 return __save_address(data, address, 1);3838}
···151151#ifdef CONFIG_MEMORY_HOTPLUG152152int arch_add_memory(int nid, u64 start, u64 size, bool for_device)153153{154154- unsigned long normal_end_pfn = PFN_DOWN(memblock_end_of_DRAM());155155- unsigned long dma_end_pfn = PFN_DOWN(MAX_DMA_ADDRESS);154154+ unsigned long zone_start_pfn, zone_end_pfn, nr_pages;156155 unsigned long start_pfn = PFN_DOWN(start);157156 unsigned long size_pages = PFN_DOWN(size);158158- unsigned long nr_pages;159159- int rc, zone_enum;157157+ pg_data_t *pgdat = NODE_DATA(nid);158158+ struct zone *zone;159159+ int rc, i;160160161161 rc = vmem_add_mapping(start, size);162162 if (rc)163163 return rc;164164165165- while (size_pages > 0) {166166- if (start_pfn < dma_end_pfn) {167167- nr_pages = (start_pfn + size_pages > dma_end_pfn) ?168168- dma_end_pfn - start_pfn : size_pages;169169- zone_enum = ZONE_DMA;170170- } else if (start_pfn < normal_end_pfn) {171171- nr_pages = (start_pfn + size_pages > normal_end_pfn) ?172172- normal_end_pfn - start_pfn : size_pages;173173- zone_enum = ZONE_NORMAL;165165+ for (i = 0; i < MAX_NR_ZONES; i++) {166166+ zone = pgdat->node_zones + i;167167+ if (zone_idx(zone) != ZONE_MOVABLE) {168168+ /* Add range within existing zone limits, if possible */169169+ zone_start_pfn = zone->zone_start_pfn;170170+ zone_end_pfn = zone->zone_start_pfn +171171+ zone->spanned_pages;174172 } else {175175- nr_pages = size_pages;176176- zone_enum = ZONE_MOVABLE;173173+ /* Add remaining range to ZONE_MOVABLE */174174+ zone_start_pfn = start_pfn;175175+ zone_end_pfn = start_pfn + size_pages;177176 }178178- rc = __add_pages(nid, NODE_DATA(nid)->node_zones + zone_enum,179179- start_pfn, size_pages);177177+ if (start_pfn < zone_start_pfn || start_pfn >= zone_end_pfn)178178+ continue;179179+ nr_pages = (start_pfn + size_pages > zone_end_pfn) ?180180+ zone_end_pfn - start_pfn : size_pages;181181+ rc = __add_pages(nid, zone, start_pfn, nr_pages);180182 if (rc)181183 break;182184 start_pfn += nr_pages;183185 size_pages -= nr_pages;186186+ if (!size_pages)187187+ break;184188 }185189 if (rc)186190 vmem_remove_mapping(start, size);
+1-1
arch/s390/oprofile/init.c
···1313#include <linux/init.h>1414#include <asm/processor.h>15151616-static int __s390_backtrace(void *data, unsigned long address)1616+static int __s390_backtrace(void *data, unsigned long address, int reliable)1717{1818 unsigned int *depth = data;1919
···3607360736083608 /*36093609 * Quirk: v2 perfmon does not report fixed-purpose events, so36103610- * assume at least 3 events:36103610+ * assume at least 3 events, when not running in a hypervisor:36113611 */36123612- if (version > 1)36133613- x86_pmu.num_counters_fixed = max((int)edx.split.num_counters_fixed, 3);36123612+ if (version > 1) {36133613+ int assume = 3 * !boot_cpu_has(X86_FEATURE_HYPERVISOR);36143614+36153615+ x86_pmu.num_counters_fixed =36163616+ max((int)edx.split.num_counters_fixed, assume);36173617+ }3614361836153619 if (boot_cpu_has(X86_FEATURE_PDCM)) {36163620 u64 capabilities;
···454454 polarity = acpi_sci_flags & ACPI_MADT_POLARITY_MASK;455455456456 mp_override_legacy_irq(bus_irq, polarity, trigger, gsi);457457+ acpi_penalize_sci_irq(bus_irq, trigger, polarity);457458458459 /*459460 * stash over-ride to indicate we've been here
+1-1
arch/x86/kernel/cpu/microcode/amd.c
···429429 * We need the physical address of the container for both bitness since430430 * boot_params.hdr.ramdisk_image is a physical address.431431 */432432- cont = __pa(container);432432+ cont = __pa_nodebug(container);433433 cont_va = container;434434#endif435435
+2-1
arch/x86/kernel/mcount_64.S
···18181919#ifdef CC_USING_FENTRY2020# define function_hook __fentry__2121+EXPORT_SYMBOL(__fentry__)2122#else2223# define function_hook mcount2424+EXPORT_SYMBOL(mcount)2325#endif24262527/* All cases save the original rbp (8 bytes) */···297295 jmp fgraph_trace298296END(function_hook)299297#endif /* CONFIG_DYNAMIC_FTRACE */300300-EXPORT_SYMBOL(function_hook)301298#endif /* CONFIG_FUNCTION_TRACER */302299303300#ifdef CONFIG_FUNCTION_GRAPH_TRACER
···12211221 */12221222 get_smp_config();1223122312241224+ /*12251225+ * Systems w/o ACPI and mptables might not have it mapped the local12261226+ * APIC yet, but prefill_possible_map() might need to access it.12271227+ */12281228+ init_apic_mappings();12291229+12241230 prefill_possible_map();1225123112261232 init_cpu_to_node();1227123312281228- init_apic_mappings();12291234 io_apic_init_mappings();1230123512311236 kvm_guest_init();
+8-1
arch/x86/kernel/unwind_guess.c
···4747 get_stack_info(first_frame, state->task, &state->stack_info,4848 &state->stack_mask);49495050- if (!__kernel_text_address(*first_frame))5050+ /*5151+ * The caller can provide the address of the first frame directly5252+ * (first_frame) or indirectly (regs->sp) to indicate which stack frame5353+ * to start unwinding at. Skip ahead until we reach it.5454+ */5555+ if (!unwind_done(state) &&5656+ (!on_stack(&state->stack_info, first_frame, sizeof(long)) ||5757+ !__kernel_text_address(*first_frame)))5158 unwind_next_frame(state);5259}5360EXPORT_SYMBOL_GPL(__unwind_start);
···133133}134134EXPORT_SYMBOL_GPL(badblocks_check);135135136136+static void badblocks_update_acked(struct badblocks *bb)137137+{138138+ u64 *p = bb->page;139139+ int i;140140+ bool unacked = false;141141+142142+ if (!bb->unacked_exist)143143+ return;144144+145145+ for (i = 0; i < bb->count ; i++) {146146+ if (!BB_ACK(p[i])) {147147+ unacked = true;148148+ break;149149+ }150150+ }151151+152152+ if (!unacked)153153+ bb->unacked_exist = 0;154154+}155155+136156/**137157 * badblocks_set() - Add a range of bad blocks to the table.138158 * @bb: the badblocks structure that holds all badblock information···314294 bb->changed = 1;315295 if (!acknowledged)316296 bb->unacked_exist = 1;297297+ else298298+ badblocks_update_acked(bb);317299 write_sequnlock_irqrestore(&bb->lock, flags);318300319301 return rv;···423401 }424402 }425403404404+ badblocks_update_acked(bb);426405 bb->changed = 1;427406out:428407 write_sequnlock_irq(&bb->lock);
+28
block/blk-flush.c
···343343 struct blk_flush_queue *fq = blk_get_flush_queue(q, NULL);344344345345 /*346346+ * Updating q->in_flight[] here for making this tag usable347347+ * early. Because in blk_queue_start_tag(),348348+ * q->in_flight[BLK_RW_ASYNC] is used to limit async I/O and349349+ * reserve tags for sync I/O.350350+ *351351+ * More importantly this way can avoid the following I/O352352+ * deadlock:353353+ *354354+ * - suppose there are 40 fua requests comming to flush queue355355+ * and queue depth is 31356356+ * - 30 rqs are scheduled then blk_queue_start_tag() can't alloc357357+ * tag for async I/O any more358358+ * - all the 30 rqs are completed before FLUSH_PENDING_TIMEOUT359359+ * and flush_data_end_io() is called360360+ * - the other rqs still can't go ahead if not updating361361+ * q->in_flight[BLK_RW_ASYNC] here, meantime these rqs362362+ * are held in flush data queue and make no progress of363363+ * handling post flush rq364364+ * - only after the post flush rq is handled, all these rqs365365+ * can be completed366366+ */367367+368368+ elv_completed_request(q, rq);369369+370370+ /* for avoiding double accounting */371371+ rq->cmd_flags &= ~REQ_STARTED;372372+373373+ /*346374 * After populating an empty queue, kick it to avoid stall. Read347375 * the comment in flush_end_io().348376 */
···4646#include "acdispat.h"4747#include "acnamesp.h"4848#include "actables.h"4949+#include "acinterp.h"49505051#define _COMPONENT ACPI_DISPATCHER5152ACPI_MODULE_NAME("dsinit")···215214216215 /* Walk entire namespace from the supplied root */217216218218- status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);219219- if (ACPI_FAILURE(status)) {220220- return_ACPI_STATUS(status);221221- }222222-223217 /*224218 * We don't use acpi_walk_namespace since we do not want to acquire225219 * the namespace reader lock.226220 */227221 status =228222 acpi_ns_walk_namespace(ACPI_TYPE_ANY, start_node, ACPI_UINT32_MAX,229229- ACPI_NS_WALK_UNLOCK, acpi_ds_init_one_object,230230- NULL, &info, NULL);223223+ 0, acpi_ds_init_one_object, NULL, &info,224224+ NULL);231225 if (ACPI_FAILURE(status)) {232226 ACPI_EXCEPTION((AE_INFO, status, "During WalkNamespace"));233227 }234234- (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);235228236229 status = acpi_get_table_by_index(table_index, &table);237230 if (ACPI_FAILURE(status)) {
+22-28
drivers/acpi/acpica/dsmethod.c
···9999 "Method auto-serialization parse [%4.4s] %p\n",100100 acpi_ut_get_node_name(node), node));101101102102- acpi_ex_enter_interpreter();103103-104102 /* Create/Init a root op for the method parse tree */105103106104 op = acpi_ps_alloc_op(AML_METHOD_OP, obj_desc->method.aml_start);107105 if (!op) {108108- status = AE_NO_MEMORY;109109- goto unlock;106106+ return_ACPI_STATUS(AE_NO_MEMORY);110107 }111108112109 acpi_ps_set_name(op, node->name.integer);···115118 acpi_ds_create_walk_state(node->owner_id, NULL, NULL, NULL);116119 if (!walk_state) {117120 acpi_ps_free_op(op);118118- status = AE_NO_MEMORY;119119- goto unlock;121121+ return_ACPI_STATUS(AE_NO_MEMORY);120122 }121123122124 status = acpi_ds_init_aml_walk(walk_state, op, node,···134138 status = acpi_ps_parse_aml(walk_state);135139136140 acpi_ps_delete_parse_tree(op);137137-unlock:138138- acpi_ex_exit_interpreter();139141 return_ACPI_STATUS(status);140142}141143···725731 acpi_ds_method_data_delete_all(walk_state);726732727733 /*728728- * If method is serialized, release the mutex and restore the729729- * current sync level for this thread730730- */731731- if (method_desc->method.mutex) {732732-733733- /* Acquisition Depth handles recursive calls */734734-735735- method_desc->method.mutex->mutex.acquisition_depth--;736736- if (!method_desc->method.mutex->mutex.acquisition_depth) {737737- walk_state->thread->current_sync_level =738738- method_desc->method.mutex->mutex.739739- original_sync_level;740740-741741- acpi_os_release_mutex(method_desc->method.742742- mutex->mutex.os_mutex);743743- method_desc->method.mutex->mutex.thread_id = 0;744744- }745745- }746746-747747- /*748734 * Delete any namespace objects created anywhere within the749735 * namespace by the execution of this method. Unless:750736 * 1) This method is a module-level executable code method, in which···758784 (void)acpi_ex_enter_interpreter();759785 method_desc->method.info_flags &=760786 ~ACPI_METHOD_MODIFIED_NAMESPACE;787787+ }788788+ }789789+790790+ /*791791+ * If method is serialized, release the mutex and restore the792792+ * current sync level for this thread793793+ */794794+ if (method_desc->method.mutex) {795795+796796+ /* Acquisition Depth handles recursive calls */797797+798798+ method_desc->method.mutex->mutex.acquisition_depth--;799799+ if (!method_desc->method.mutex->mutex.acquisition_depth) {800800+ walk_state->thread->current_sync_level =801801+ method_desc->method.mutex->mutex.802802+ original_sync_level;803803+804804+ acpi_os_release_mutex(method_desc->method.805805+ mutex->mutex.os_mutex);806806+ method_desc->method.mutex->mutex.thread_id = 0;761807 }762808 }763809 }
-2
drivers/acpi/acpica/dswload2.c
···607607 }608608 }609609610610- acpi_ex_exit_interpreter();611610 status =612611 acpi_ev_initialize_region613612 (acpi_ns_get_attached_object(node), FALSE);614614- acpi_ex_enter_interpreter();615613616614 if (ACPI_FAILURE(status)) {617615 /*
+3
drivers/acpi/acpica/evrgnini.c
···4545#include "accommon.h"4646#include "acevents.h"4747#include "acnamesp.h"4848+#include "acinterp.h"48494950#define _COMPONENT ACPI_EVENTS5051ACPI_MODULE_NAME("evrgnini")···598597 }599598 }600599600600+ acpi_ex_exit_interpreter();601601 status =602602 acpi_ev_execute_reg_method(region_obj,603603 ACPI_REG_CONNECT);604604+ acpi_ex_enter_interpreter();604605605606 if (acpi_ns_locked) {606607 status =
+2
drivers/acpi/acpica/nsload.c
···137137 ACPI_DEBUG_PRINT((ACPI_DB_INFO,138138 "**** Begin Table Object Initialization\n"));139139140140+ acpi_ex_enter_interpreter();140141 status = acpi_ds_initialize_objects(table_index, node);142142+ acpi_ex_exit_interpreter();141143142144 ACPI_DEBUG_PRINT((ACPI_DB_INFO,143145 "**** Completed Table Object Initialization\n"));
···14181418 * Message mode could be enforced. In this case assume that advantage14191419 * of multipe MSIs is negated and use single MSI mode instead.14201420 */14211421- nvec = pci_alloc_irq_vectors(pdev, n_ports, INT_MAX,14221422- PCI_IRQ_MSIX | PCI_IRQ_MSI);14231423- if (nvec > 0) {14241424- if (!(readl(hpriv->mmio + HOST_CTL) & HOST_MRSM)) {14251425- hpriv->get_irq_vector = ahci_get_irq_vector;14261426- hpriv->flags |= AHCI_HFLAG_MULTI_MSI;14271427- return nvec;14211421+ if (n_ports > 1) {14221422+ nvec = pci_alloc_irq_vectors(pdev, n_ports, INT_MAX,14231423+ PCI_IRQ_MSIX | PCI_IRQ_MSI);14241424+ if (nvec > 0) {14251425+ if (!(readl(hpriv->mmio + HOST_CTL) & HOST_MRSM)) {14261426+ hpriv->get_irq_vector = ahci_get_irq_vector;14271427+ hpriv->flags |= AHCI_HFLAG_MULTI_MSI;14281428+ return nvec;14291429+ }14301430+14311431+ /*14321432+ * Fallback to single MSI mode if the controller14331433+ * enforced MRSM mode.14341434+ */14351435+ printk(KERN_INFO14361436+ "ahci: MRSM is on, fallback to single MSI\n");14371437+ pci_free_irq_vectors(pdev);14281438 }1429143914301440 /*14311431- * Fallback to single MSI mode if the controller enforced MRSM14321432- * mode.14411441+ * -ENOSPC indicated we don't have enough vectors. Don't bother14421442+ * trying a single vectors for any other error:14331443 */14341434- printk(KERN_INFO "ahci: MRSM is on, fallback to single MSI\n");14351435- pci_free_irq_vectors(pdev);14441444+ if (nvec < 0 && nvec != -ENOSPC)14451445+ return nvec;14361446 }14371437-14381438- /*14391439- * -ENOSPC indicated we don't have enough vectors. Don't bother trying14401440- * a single vectors for any other error:14411441- */14421442- if (nvec < 0 && nvec != -ENOSPC)14431443- return nvec;1444144714451448 /*14461449 * If the host is not capable of supporting per-port vectors, fall···16201617 /* legacy intx interrupts */16211618 pci_intx(pdev, 1);16221619 }16231623- hpriv->irq = pdev->irq;16201620+ hpriv->irq = pci_irq_vector(pdev, 0);1624162116251622 if (!(hpriv->cap & HOST_CAP_SSS) || ahci_ignore_sss)16261623 host->flags |= ATA_HOST_PARALLEL_SCAN;
+4-2
drivers/base/Kconfig
···213213 If you are unsure about this, Say N here.214214215215config DEBUG_TEST_DRIVER_REMOVE216216- bool "Test driver remove calls during probe"216216+ bool "Test driver remove calls during probe (UNSTABLE)"217217 depends on DEBUG_KERNEL218218 help219219 Say Y here if you want the Driver core to test driver remove functions220220 by calling probe, remove, probe. This tests the remove path without221221 having to unbind the driver or unload the driver module.222222223223- If you are unsure about this, say N here.223223+ This option is expected to find errors and may render your system224224+ unusable. You should say N here unless you are explicitly looking to225225+ test this functionality.224226225227config SYS_HYPERVISOR226228 bool
+2-2
drivers/block/DAC960.c
···29542954 case DAC960_PD_Controller:29552955 if (!request_region(Controller->IO_Address, 0x80,29562956 Controller->FullModelName)) {29572957- DAC960_Error("IO port 0x%d busy for Controller at\n",29572957+ DAC960_Error("IO port 0x%lx busy for Controller at\n",29582958 Controller, Controller->IO_Address);29592959 goto Failure;29602960 }···29902990 case DAC960_P_Controller:29912991 if (!request_region(Controller->IO_Address, 0x80,29922992 Controller->FullModelName)){29932993- DAC960_Error("IO port 0x%d busy for Controller at\n",29932993+ DAC960_Error("IO port 0x%lx busy for Controller at\n",29942994 Controller, Controller->IO_Address);29952995 goto Failure;29962996 }
+1-1
drivers/block/nbd.c
···164164 spin_lock(&nbd->sock_lock);165165166166 if (!nbd->sock) {167167- spin_unlock_irq(&nbd->sock_lock);167167+ spin_unlock(&nbd->sock_lock);168168 return;169169 }170170
+1
drivers/bus/Kconfig
···111111config QCOM_EBI2112112 bool "Qualcomm External Bus Interface 2 (EBI2)"113113 depends on HAS_IOMEM114114+ depends on ARCH_QCOM || COMPILE_TEST114115 help115116 Say y here to enable support for the Qualcomm External Bus116117 Interface 2, which can be used to connect things like NAND Flash,
···179179/**180180 * struct cpudata - Per CPU instance data storage181181 * @cpu: CPU number for this instance data182182+ * @policy: CPUFreq policy value182183 * @update_util: CPUFreq utility callback information183184 * @update_util_set: CPUFreq utility callback is set184185 * @iowait_boost: iowait-related boost fraction···202201struct cpudata {203202 int cpu;204203204204+ unsigned int policy;205205 struct update_util_data update_util;206206 bool update_util_set;207207···11441142 *min = clamp_t(int, min_perf, cpu->pstate.min_pstate, max_perf);11451143}1146114411471147-static void intel_pstate_set_min_pstate(struct cpudata *cpu)11451145+static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate)11481146{11491149- int pstate = cpu->pstate.min_pstate;11501150-11511147 trace_cpu_frequency(pstate * cpu->pstate.scaling, cpu->cpu);11521148 cpu->pstate.current_pstate = pstate;11531149 /*···11551155 */11561156 wrmsrl_on_cpu(cpu->cpu, MSR_IA32_PERF_CTL,11571157 pstate_funcs.get_val(cpu, pstate));11581158+}11591159+11601160+static void intel_pstate_set_min_pstate(struct cpudata *cpu)11611161+{11621162+ intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate);11631163+}11641164+11651165+static void intel_pstate_max_within_limits(struct cpudata *cpu)11661166+{11671167+ int min_pstate, max_pstate;11681168+11691169+ update_turbo_state();11701170+ intel_pstate_get_min_max(cpu, &min_pstate, &max_pstate);11711171+ intel_pstate_set_pstate(cpu, max_pstate);11581172}1159117311601174static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)···1339132513401326 from = cpu->pstate.current_pstate;1341132713421342- target_pstate = pstate_funcs.get_target_pstate(cpu);13281328+ target_pstate = cpu->policy == CPUFREQ_POLICY_PERFORMANCE ?13291329+ cpu->pstate.turbo_pstate : pstate_funcs.get_target_pstate(cpu);1343133013441331 intel_pstate_update_pstate(cpu, target_pstate);13451332···15061491 pr_debug("set_policy cpuinfo.max %u policy->max %u\n",15071492 policy->cpuinfo.max_freq, policy->max);1508149315091509- cpu = all_cpu_data[0];14941494+ cpu = all_cpu_data[policy->cpu];14951495+ cpu->policy = policy->policy;14961496+15101497 if (cpu->pstate.max_pstate_physical > cpu->pstate.max_pstate &&15111498 policy->max < policy->cpuinfo.max_freq &&15121499 policy->max > cpu->pstate.max_pstate * cpu->pstate.scaling) {···15161499 policy->max = policy->cpuinfo.max_freq;15171500 }1518150115191519- if (policy->policy == CPUFREQ_POLICY_PERFORMANCE) {15021502+ if (cpu->policy == CPUFREQ_POLICY_PERFORMANCE) {15201503 limits = &performance_limits;15211504 if (policy->max >= policy->cpuinfo.max_freq) {15221505 pr_debug("set performance\n");···15521535 limits->max_perf = round_up(limits->max_perf, FRAC_BITS);1553153615541537 out:15381538+ if (cpu->policy == CPUFREQ_POLICY_PERFORMANCE) {15391539+ /*15401540+ * NOHZ_FULL CPUs need this as the governor callback may not15411541+ * be invoked on them.15421542+ */15431543+ intel_pstate_clear_update_util_hook(policy->cpu);15441544+ intel_pstate_max_within_limits(cpu);15451545+ }15461546+15551547 intel_pstate_set_update_util_hook(policy->cpu);1556154815571549 intel_pstate_hwp_set_policy(policy);
+1-1
drivers/dax/Kconfig
···14141515config DEV_DAX_PMEM1616 tristate "PMEM DAX: direct access to persistent memory"1717- depends on NVDIMM_DAX1717+ depends on LIBNVDIMM && NVDIMM_DAX1818 default DEV_DAX1919 help2020 Support raw access to persistent memory. Note that this
···284284285285config GPIO_MOCKUP286286 tristate "GPIO Testing Driver"287287- depends on GPIOLIB287287+ depends on GPIOLIB && SYSFS288288 select GPIO_SYSFS289289 help290290 This enables GPIO Testing driver, which provides a way to test GPIO
···653653{654654 int idx, i;655655 unsigned int irq_flags;656656+ int ret = -ENOENT;656657657658 for (i = 0, idx = 0; idx <= index; i++) {658659 struct acpi_gpio_info info;659660 struct gpio_desc *desc;660661661662 desc = acpi_get_gpiod_by_index(adev, NULL, i, &info);662662- if (IS_ERR(desc))663663+ if (IS_ERR(desc)) {664664+ ret = PTR_ERR(desc);663665 break;666666+ }664667 if (info.gpioint && idx++ == index) {665668 int irq = gpiod_to_irq(desc);666669···682679 }683680684681 }685685- return -ENOENT;682682+ return ret;686683}687684EXPORT_SYMBOL_GPL(acpi_dev_gpio_irq_get);688685
+41-1
drivers/gpio/gpiolib.c
···333333 u32 numdescs;334334};335335336336+#define GPIOHANDLE_REQUEST_VALID_FLAGS \337337+ (GPIOHANDLE_REQUEST_INPUT | \338338+ GPIOHANDLE_REQUEST_OUTPUT | \339339+ GPIOHANDLE_REQUEST_ACTIVE_LOW | \340340+ GPIOHANDLE_REQUEST_OPEN_DRAIN | \341341+ GPIOHANDLE_REQUEST_OPEN_SOURCE)342342+336343static long linehandle_ioctl(struct file *filep, unsigned int cmd,337344 unsigned long arg)338345{···350343351344 if (cmd == GPIOHANDLE_GET_LINE_VALUES_IOCTL) {352345 int val;346346+347347+ memset(&ghd, 0, sizeof(ghd));353348354349 /* TODO: check if descriptors are really input */355350 for (i = 0; i < lh->numdescs; i++) {···453444 u32 lflags = handlereq.flags;454445 struct gpio_desc *desc;455446447447+ if (offset >= gdev->ngpio) {448448+ ret = -EINVAL;449449+ goto out_free_descs;450450+ }451451+452452+ /* Return an error if a unknown flag is set */453453+ if (lflags & ~GPIOHANDLE_REQUEST_VALID_FLAGS) {454454+ ret = -EINVAL;455455+ goto out_free_descs;456456+ }457457+456458 desc = &gdev->descs[offset];457459 ret = gpiod_request(desc, lh->label);458460 if (ret)···556536 struct mutex read_lock;557537};558538539539+#define GPIOEVENT_REQUEST_VALID_FLAGS \540540+ (GPIOEVENT_REQUEST_RISING_EDGE | \541541+ GPIOEVENT_REQUEST_FALLING_EDGE)542542+559543static unsigned int lineevent_poll(struct file *filep,560544 struct poll_table_struct *wait)561545{···646622 */647623 if (cmd == GPIOHANDLE_GET_LINE_VALUES_IOCTL) {648624 int val;625625+626626+ memset(&ghd, 0, sizeof(ghd));649627650628 val = gpiod_get_value_cansleep(le->desc);651629 if (val < 0)···752726 lflags = eventreq.handleflags;753727 eflags = eventreq.eventflags;754728729729+ if (offset >= gdev->ngpio) {730730+ ret = -EINVAL;731731+ goto out_free_label;732732+ }733733+734734+ /* Return an error if a unknown flag is set */735735+ if ((lflags & ~GPIOHANDLE_REQUEST_VALID_FLAGS) ||736736+ (eflags & ~GPIOEVENT_REQUEST_VALID_FLAGS)) {737737+ ret = -EINVAL;738738+ goto out_free_label;739739+ }740740+755741 /* This is just wrong: we don't look for events on output lines */756742 if (lflags & GPIOHANDLE_REQUEST_OUTPUT) {757743 ret = -EINVAL;···861823 if (cmd == GPIO_GET_CHIPINFO_IOCTL) {862824 struct gpiochip_info chipinfo;863825826826+ memset(&chipinfo, 0, sizeof(chipinfo));827827+864828 strncpy(chipinfo.name, dev_name(&gdev->dev),865829 sizeof(chipinfo.name));866830 chipinfo.name[sizeof(chipinfo.name)-1] = '\0';···879839880840 if (copy_from_user(&lineinfo, ip, sizeof(lineinfo)))881841 return -EFAULT;882882- if (lineinfo.line_offset > gdev->ngpio)842842+ if (lineinfo.line_offset >= gdev->ngpio)883843 return -EINVAL;884844885845 desc = &gdev->descs[lineinfo.line_offset];
+5
drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
···754754755755int amdgpu_bo_init(struct amdgpu_device *adev)756756{757757+ /* reserve PAT memory space to WC for VRAM */758758+ arch_io_reserve_memtype_wc(adev->mc.aper_base,759759+ adev->mc.aper_size);760760+757761 /* Add an MTRR for the VRAM */758762 adev->mc.vram_mtrr = arch_phys_wc_add(adev->mc.aper_base,759763 adev->mc.aper_size);···773769{774770 amdgpu_ttm_fini(adev);775771 arch_phys_wc_del(adev->mc.vram_mtrr);772772+ arch_io_free_memtype_wc(adev->mc.aper_base, adev->mc.aper_size);776773}777774778775int amdgpu_bo_fbdev_mmap(struct amdgpu_bo *bo,
···446446447447int radeon_bo_init(struct radeon_device *rdev)448448{449449+ /* reserve PAT memory space to WC for VRAM */450450+ arch_io_reserve_memtype_wc(rdev->mc.aper_base,451451+ rdev->mc.aper_size);452452+449453 /* Add an MTRR for the VRAM */450454 if (!rdev->fastfb_working) {451455 rdev->mc.vram_mtrr = arch_phys_wc_add(rdev->mc.aper_base,···467463{468464 radeon_ttm_fini(rdev);469465 arch_phys_wc_del(rdev->mc.vram_mtrr);466466+ arch_io_free_memtype_wc(rdev->mc.aper_base, rdev->mc.aper_size);470467}471468472469/* Returns how many bytes TTM can move per IB.
···79798080config I2C_HIX5HD28181 tristate "Hix5hd2 high-speed I2C driver"8282- depends on ARCH_HIX5HD2 || COMPILE_TEST8282+ depends on ARCH_HISI || ARCH_HIX5HD2 || COMPILE_TEST8383 help8484- Say Y here to include support for high-speed I2C controller in the8585- Hisilicon based hix5hd2 SoCs.8484+ Say Y here to include support for the high-speed I2C controller8585+ used in HiSilicon hix5hd2 SoCs.86868787- This driver can also be built as a module. If so, the module8787+ This driver can also be built as a module. If so, the module8888 will be called i2c-hix5hd2.89899090config I2C_I801···589589590590config I2C_IMX591591 tristate "IMX I2C interface"592592- depends on ARCH_MXC || ARCH_LAYERSCAPE592592+ depends on ARCH_MXC || ARCH_LAYERSCAPE || COLDFIRE593593 help594594 Say Y here if you want to use the IIC bus controller on595595- the Freescale i.MX/MXC or Layerscape processors.595595+ the Freescale i.MX/MXC, Layerscape or ColdFire processors.596596597597 This driver can also be built as a module. If so, the module598598 will be called i2c-imx.
+14-3
drivers/i2c/busses/i2c-designware-core.c
···9595#define DW_IC_STATUS_TFE BIT(2)9696#define DW_IC_STATUS_MST_ACTIVITY BIT(5)97979898+#define DW_IC_SDA_HOLD_RX_SHIFT 169999+#define DW_IC_SDA_HOLD_RX_MASK GENMASK(23, DW_IC_SDA_HOLD_RX_SHIFT)100100+98101#define DW_IC_ERR_TX_ABRT 0x199102100103#define DW_IC_TAR_10BITADDR_MASTER BIT(12)···423420 /* Configure SDA Hold Time if required */424421 reg = dw_readl(dev, DW_IC_COMP_VERSION);425422 if (reg >= DW_IC_SDA_HOLD_MIN_VERS) {426426- if (dev->sda_hold_time) {427427- dw_writel(dev, dev->sda_hold_time, DW_IC_SDA_HOLD);428428- } else {423423+ if (!dev->sda_hold_time) {429424 /* Keep previous hold time setting if no one set it */430425 dev->sda_hold_time = dw_readl(dev, DW_IC_SDA_HOLD);431426 }427427+ /*428428+ * Workaround for avoiding TX arbitration lost in case I2C429429+ * slave pulls SDA down "too quickly" after falling egde of430430+ * SCL by enabling non-zero SDA RX hold. Specification says it431431+ * extends incoming SDA low to high transition while SCL is432432+ * high but it apprears to help also above issue.433433+ */434434+ if (!(dev->sda_hold_time & DW_IC_SDA_HOLD_RX_MASK))435435+ dev->sda_hold_time |= 1 << DW_IC_SDA_HOLD_RX_SHIFT;436436+ dw_writel(dev, dev->sda_hold_time, DW_IC_SDA_HOLD);432437 } else {433438 dev_warn(dev->dev,434439 "Hardware too old to adjust SDA hold time.\n");
···146146#define SMBHSTCFG_HST_EN 1147147#define SMBHSTCFG_SMB_SMI_EN 2148148#define SMBHSTCFG_I2C_EN 4149149+#define SMBHSTCFG_SPD_WD 0x10149150150151/* TCO configuration bits for TCOCTL */151152#define TCOCTL_EN 0x0100···866865 block = 1;867866 break;868867 case I2C_SMBUS_I2C_BLOCK_DATA:869869- /* NB: page 240 of ICH5 datasheet shows that the R/#W870870- * bit should be cleared here, even when reading */871871- outb_p((addr & 0x7f) << 1, SMBHSTADD(priv));868868+ /*869869+ * NB: page 240 of ICH5 datasheet shows that the R/#W870870+ * bit should be cleared here, even when reading.871871+ * However if SPD Write Disable is set (Lynx Point and later),872872+ * the read will fail if we don't set the R/#W bit.873873+ */874874+ outb_p(((addr & 0x7f) << 1) |875875+ ((priv->original_hstcfg & SMBHSTCFG_SPD_WD) ?876876+ (read_write & 0x01) : 0),877877+ SMBHSTADD(priv));872878 if (read_write == I2C_SMBUS_READ) {873879 /* NB: page 240 of ICH5 datasheet also shows874880 * that DATA1 is the cmd field when reading */···15811573 /* Disable SMBus interrupt feature if SMBus using SMI# */15821574 priv->features &= ~FEATURE_IRQ;15831575 }15761576+ if (temp & SMBHSTCFG_SPD_WD)15771577+ dev_info(&dev->dev, "SPD Write Disable is set\n");1584157815851579 /* Clear special mode bits */15861580 if (priv->features & (FEATURE_SMBUS_PEC | FEATURE_BLOCK_BUFFER))
···694694 t_calc->div_low--;695695 t_calc->div_high--;696696697697+ /* Give the tuning value 0, that would not update con register */698698+ t_calc->tuning = 0;697699 /* Maximum divider supported by hw is 0xffff */698700 if (t_calc->div_low > 0xffff) {699701 t_calc->div_low = 0xffff;
+1-1
drivers/i2c/busses/i2c-xgene-slimpro.c
···105105 struct mbox_chan *mbox_chan;106106 struct mbox_client mbox_client;107107 struct completion rd_complete;108108- u8 dma_buffer[I2C_SMBUS_BLOCK_MAX];108108+ u8 dma_buffer[I2C_SMBUS_BLOCK_MAX + 1]; /* dma_buffer[0] is used for length */109109 u32 *resp_msg;110110};111111
···16811681static void of_i2c_register_devices(struct i2c_adapter *adap)16821682{16831683 struct device_node *bus, *node;16841684+ struct i2c_client *client;1684168516851686 /* Only register child devices if the adapter has a node pointer set */16861687 if (!adap->dev.of_node)···16961695 for_each_available_child_of_node(bus, node) {16971696 if (of_node_test_and_set_flag(node, OF_POPULATED))16981697 continue;16991699- of_i2c_register_device(adap, node);16981698+16991699+ client = of_i2c_register_device(adap, node);17001700+ if (IS_ERR(client)) {17011701+ dev_warn(&adap->dev,17021702+ "Failed to create I2C device for %s\n",17031703+ node->full_name);17041704+ of_node_clear_flag(node, OF_POPULATED);17051705+ }17001706 }1701170717021708 of_node_put(bus);···23072299 if (IS_ERR(client)) {23082300 dev_err(&adap->dev, "failed to create client for '%s'\n",23092301 rd->dn->full_name);23022302+ of_node_clear_flag(rd->dn, OF_POPULATED);23102303 return notifier_from_errno(PTR_ERR(client));23112304 }23122305 break;
+2
drivers/iio/adc/Kconfig
···437437config TI_ADC081C438438 tristate "Texas Instruments ADC081C/ADC101C/ADC121C family"439439 depends on I2C440440+ select IIO_BUFFER441441+ select IIO_TRIGGERED_BUFFER440442 help441443 If you say yes here you get support for Texas Instruments ADC081C,442444 ADC101C and ADC121C ADC chips.
+4-3
drivers/iio/chemical/atlas-ph-sensor.c
···213213 struct device *dev = &data->client->dev;214214 int ret;215215 unsigned int val;216216+ __be16 rval;216217217217- ret = regmap_bulk_read(data->regmap, ATLAS_REG_EC_PROBE, &val, 2);218218+ ret = regmap_bulk_read(data->regmap, ATLAS_REG_EC_PROBE, &rval, 2);218219 if (ret)219220 return ret;220221221221- dev_info(dev, "probe set to K = %d.%.2d", be16_to_cpu(val) / 100,222222- be16_to_cpu(val) % 100);222222+ val = be16_to_cpu(rval);223223+ dev_info(dev, "probe set to K = %d.%.2d", val / 100, val % 100);223224224225 ret = regmap_read(data->regmap, ATLAS_REG_EC_CALIB_STATUS, &val);225226 if (ret)
+9-7
drivers/iio/temperature/maxim_thermocouple.c
···123123{124124 unsigned int storage_bytes = data->chip->read_size;125125 unsigned int shift = chan->scan_type.shift + (chan->address * 8);126126- unsigned int buf;126126+ __be16 buf16;127127+ __be32 buf32;127128 int ret;128128-129129- ret = spi_read(data->spi, (void *) &buf, storage_bytes);130130- if (ret)131131- return ret;132129133130 switch (storage_bytes) {134131 case 2:135135- *val = be16_to_cpu(buf);132132+ ret = spi_read(data->spi, (void *)&buf16, storage_bytes);133133+ *val = be16_to_cpu(buf16);136134 break;137135 case 4:138138- *val = be32_to_cpu(buf);136136+ ret = spi_read(data->spi, (void *)&buf32, storage_bytes);137137+ *val = be32_to_cpu(buf32);139138 break;140139 }140140+141141+ if (ret)142142+ return ret;141143142144 /* check to be sure this is a valid reading */143145 if (*val & data->chip->status_bit)
···266266 {"raid10_offset", "raid10 offset (striped mirrors)", 0, 2, 10, ALGORITHM_RAID10_OFFSET},267267 {"raid10_near", "raid10 near (striped mirrors)", 0, 2, 10, ALGORITHM_RAID10_NEAR},268268 {"raid10", "raid10 (striped mirrors)", 0, 2, 10, ALGORITHM_RAID10_DEFAULT},269269- {"raid4", "raid4 (dedicated last parity disk)", 1, 2, 4, ALGORITHM_PARITY_N}, /* raid4 layout = raid5_n */269269+ {"raid4", "raid4 (dedicated first parity disk)", 1, 2, 5, ALGORITHM_PARITY_0}, /* raid4 layout = raid5_0 */270270 {"raid5_n", "raid5 (dedicated last parity disk)", 1, 2, 5, ALGORITHM_PARITY_N},271271 {"raid5_ls", "raid5 (left symmetric)", 1, 2, 5, ALGORITHM_LEFT_SYMMETRIC},272272 {"raid5_rs", "raid5 (right symmetric)", 1, 2, 5, ALGORITHM_RIGHT_SYMMETRIC},···20872087 /*20882088 * No takeover/reshaping, because we don't have the extended v1.9.0 metadata20892089 */20902090- if (le32_to_cpu(sb->level) != mddev->level) {20902090+ if (le32_to_cpu(sb->level) != mddev->new_level) {20912091 DMERR("Reshaping/takeover raid sets not yet supported. (raid level/stripes/size change)");20922092 return -EINVAL;20932093 }20942094- if (le32_to_cpu(sb->layout) != mddev->layout) {20942094+ if (le32_to_cpu(sb->layout) != mddev->new_layout) {20952095 DMERR("Reshaping raid sets not yet supported. (raid layout change)");20962096 DMERR(" 0x%X vs 0x%X", le32_to_cpu(sb->layout), mddev->layout);20972097 DMERR(" Old layout: %s w/ %d copies",···21022102 raid10_md_layout_to_copies(mddev->layout));21032103 return -EINVAL;21042104 }21052105- if (le32_to_cpu(sb->stripe_sectors) != mddev->chunk_sectors) {21052105+ if (le32_to_cpu(sb->stripe_sectors) != mddev->new_chunk_sectors) {21062106 DMERR("Reshaping raid sets not yet supported. (stripe sectors change)");21072107 return -EINVAL;21082108 }···21142114 sb->num_devices, mddev->raid_disks);21152115 return -EINVAL;21162116 }21172117+21182118+ DMINFO("Discovered old metadata format; upgrading to extended metadata format");2117211921182120 /* Table line is checked vs. authoritative superblock */21192121 rs_set_new(rs);···22602258 if (!mddev->events && super_init_validation(rs, rdev))22612259 return -EINVAL;2262226022632263- if (le32_to_cpu(sb->compat_features) != FEATURE_FLAG_SUPPORTS_V190) {22612261+ if (le32_to_cpu(sb->compat_features) &&22622262+ le32_to_cpu(sb->compat_features) != FEATURE_FLAG_SUPPORTS_V190) {22642263 rs->ti->error = "Unable to assemble array: Unknown flag(s) in compatible feature flags";22652264 return -EINVAL;22662265 }···3649364636503647static struct target_type raid_target = {36513648 .name = "raid",36523652- .version = {1, 9, 0},36493649+ .version = {1, 9, 1},36533650 .module = THIS_MODULE,36543651 .ctr = raid_ctr,36553652 .dtr = raid_dtr,
+3-19
drivers/md/dm-raid1.c
···145145146146struct dm_raid1_bio_record {147147 struct mirror *m;148148- /* if details->bi_bdev == NULL, details were not saved */149148 struct dm_bio_details details;150149 region_t write_region;151150};···11991200 struct dm_raid1_bio_record *bio_record =12001201 dm_per_bio_data(bio, sizeof(struct dm_raid1_bio_record));1201120212021202- bio_record->details.bi_bdev = NULL;12031203-12041203 if (rw == WRITE) {12051204 /* Save region for mirror_end_io() handler */12061205 bio_record->write_region = dm_rh_bio_to_region(ms->rh, bio);···12571260 }1258126112591262 if (error == -EOPNOTSUPP)12601260- goto out;12631263+ return error;1261126412621265 if ((error == -EWOULDBLOCK) && (bio->bi_opf & REQ_RAHEAD))12631263- goto out;12661266+ return error;1264126712651268 if (unlikely(error)) {12661266- if (!bio_record->details.bi_bdev) {12671267- /*12681268- * There wasn't enough memory to record necessary12691269- * information for a retry or there was no other12701270- * mirror in-sync.12711271- */12721272- DMERR_LIMIT("Mirror read failed.");12731273- return -EIO;12741274- }12751275-12761269 m = bio_record->m;1277127012781271 DMERR("Mirror read failed from %s. Trying alternative device.",···12781291 bd = &bio_record->details;1279129212801293 dm_bio_restore(bd, bio);12811281- bio_record->details.bi_bdev = NULL;12941294+ bio->bi_error = 0;1282129512831296 queue_bio(ms, bio, rw);12841297 return DM_ENDIO_INCOMPLETE;12851298 }12861299 DMERR("All replicated volumes dead, failing I/O");12871300 }12881288-12891289-out:12901290- bio_record->details.bi_bdev = NULL;1291130112921302 return error;12931303}
+5-2
drivers/md/dm-rq.c
···856856 kthread_init_worker(&md->kworker);857857 md->kworker_task = kthread_run(kthread_worker_fn, &md->kworker,858858 "kdmwork-%s", dm_device_name(md));859859- if (IS_ERR(md->kworker_task))860860- return PTR_ERR(md->kworker_task);859859+ if (IS_ERR(md->kworker_task)) {860860+ int error = PTR_ERR(md->kworker_task);861861+ md->kworker_task = NULL;862862+ return error;863863+ }861864862865 elv_register_queue(md->queue);863866
+9-15
drivers/md/dm-table.c
···695695696696 tgt->type = dm_get_target_type(type);697697 if (!tgt->type) {698698- DMERR("%s: %s: unknown target type", dm_device_name(t->md),699699- type);698698+ DMERR("%s: %s: unknown target type", dm_device_name(t->md), type);700699 return -EINVAL;701700 }702701703702 if (dm_target_needs_singleton(tgt->type)) {704703 if (t->num_targets) {705705- DMERR("%s: target type %s must appear alone in table",706706- dm_device_name(t->md), type);707707- return -EINVAL;704704+ tgt->error = "singleton target type must appear alone in table";705705+ goto bad;708706 }709707 t->singleton = true;710708 }711709712710 if (dm_target_always_writeable(tgt->type) && !(t->mode & FMODE_WRITE)) {713713- DMERR("%s: target type %s may not be included in read-only tables",714714- dm_device_name(t->md), type);715715- return -EINVAL;711711+ tgt->error = "target type may not be included in a read-only table";712712+ goto bad;716713 }717714718715 if (t->immutable_target_type) {719716 if (t->immutable_target_type != tgt->type) {720720- DMERR("%s: immutable target type %s cannot be mixed with other target types",721721- dm_device_name(t->md), t->immutable_target_type->name);722722- return -EINVAL;717717+ tgt->error = "immutable target type cannot be mixed with other target types";718718+ goto bad;723719 }724720 } else if (dm_target_is_immutable(tgt->type)) {725721 if (t->num_targets) {726726- DMERR("%s: immutable target type %s cannot be mixed with other target types",727727- dm_device_name(t->md), tgt->type->name);728728- return -EINVAL;722722+ tgt->error = "immutable target type cannot be mixed with other target types";723723+ goto bad;729724 }730725 t->immutable_target_type = tgt->type;731726 }···735740 */736741 if (!adjoin(t, tgt)) {737742 tgt->error = "Gap in table";738738- r = -EINVAL;739743 goto bad;740744 }741745
···194194 ctx->mmio_err_ff = !!(work.flags & CXL_START_WORK_ERR_FF);195195196196 /*197197+ * Increment the mapped context count for adapter. This also checks198198+ * if adapter_context_lock is taken.199199+ */200200+ rc = cxl_adapter_context_get(ctx->afu->adapter);201201+ if (rc) {202202+ afu_release_irqs(ctx, ctx);203203+ goto out;204204+ }205205+206206+ /*197207 * We grab the PID here and not in the file open to allow for the case198208 * where a process (master, some daemon, etc) has opened the chardev on199209 * behalf of another process, so the AFU's mm gets bound to the process···215205 ctx->pid = get_task_pid(current, PIDTYPE_PID);216206 ctx->glpid = get_task_pid(current->group_leader, PIDTYPE_PID);217207218218- /*219219- * Increment the mapped context count for adapter. This also checks220220- * if adapter_context_lock is taken.221221- */222222- rc = cxl_adapter_context_get(ctx->afu->adapter);223223- if (rc) {224224- afu_release_irqs(ctx, ctx);225225- goto out;226226- }227208228209 trace_cxl_attach(ctx, work.work_element_descriptor, work.num_interrupts, amr);229210···222221 amr))) {223222 afu_release_irqs(ctx, ctx);224223 cxl_adapter_context_put(ctx->afu->adapter);224224+ put_pid(ctx->glpid);225225+ put_pid(ctx->pid);226226+ ctx->glpid = ctx->pid = NULL;225227 goto out;226228 }227229
···431431 if (vmci_handle_is_invalid(*handle)) {432432 u32 context_id = vmci_get_context_id();433433434434+ if (context_id == VMCI_INVALID_ID) {435435+ pr_warn("Failed to get context ID\n");436436+ result = VMCI_ERROR_NO_RESOURCES;437437+ goto free_mem;438438+ }439439+434440 /* Let resource code allocate a free ID for us */435441 new_handle = vmci_make_handle(context_id, VMCI_INVALID_ID);436442 } else {···531525532526 entry = container_of(resource, struct dbell_entry, resource);533527534534- if (vmci_guest_code_active()) {528528+ if (!hlist_unhashed(&entry->node)) {535529 int result;536530537531 dbell_index_table_remove(entry);
+1-1
drivers/misc/vmw_vmci/vmci_driver.c
···113113114114MODULE_AUTHOR("VMware, Inc.");115115MODULE_DESCRIPTION("VMware Virtual Machine Communication Interface.");116116-MODULE_VERSION("1.1.4.0-k");116116+MODULE_VERSION("1.1.5.0-k");117117MODULE_LICENSE("GPL v2");
+5-5
drivers/mtd/ubi/fastmap.c
···707707 fmvhdr->vol_type,708708 be32_to_cpu(fmvhdr->last_eb_bytes));709709710710- if (!av)711711- goto fail_bad;712712- if (PTR_ERR(av) == -EINVAL) {713713- ubi_err(ubi, "volume (ID %i) already exists",714714- fmvhdr->vol_id);710710+ if (IS_ERR(av)) {711711+ if (PTR_ERR(av) == -EEXIST)712712+ ubi_err(ubi, "volume (ID %i) already exists",713713+ fmvhdr->vol_id);714714+715715 goto fail_bad;716716 }717717
+1-1
drivers/nvdimm/Kconfig
···8989 Select Y if unsure90909191config NVDIMM_DAX9292- tristate "NVDIMM DAX: Raw access to persistent memory"9292+ bool "NVDIMM DAX: Raw access to persistent memory"9393 default LIBNVDIMM9494 depends on NVDIMM_PFN9595 help
+8-6
drivers/nvdimm/namespace_devs.c
···21762176 return devs;2177217721782178 err:21792179- for (i = 0; devs[i]; i++)21802180- if (is_nd_blk(&nd_region->dev))21812181- namespace_blk_release(devs[i]);21822182- else21832183- namespace_pmem_release(devs[i]);21842184- kfree(devs);21792179+ if (devs) {21802180+ for (i = 0; devs[i]; i++)21812181+ if (is_nd_blk(&nd_region->dev))21822182+ namespace_blk_release(devs[i]);21832183+ else21842184+ namespace_pmem_release(devs[i]);21852185+ kfree(devs);21862186+ }21852187 return NULL;21862188}21872189
···610610 * msi_capability_init - configure device's MSI capability structure611611 * @dev: pointer to the pci_dev data structure of MSI device function612612 * @nvec: number of interrupts to allocate613613+ * @affinity: flag to indicate cpu irq affinity mask should be set613614 *614615 * Setup the MSI capability structure of the device with the requested615616 * number of interrupts. A return value of zero indicates the successful···753752 * @dev: pointer to the pci_dev data structure of MSI-X device function754753 * @entries: pointer to an array of struct msix_entry entries755754 * @nvec: number of @entries755755+ * @affinity: flag to indicate cpu irq affinity mask should be set756756 *757757 * Setup the MSI-X capability structure of device function with a758758 * single MSI-X irq. A return of zero indicates the successful setup of
···12051205 mdc, lpm);12061206 return mdc;12071207 }12081208- fcx_max_data = mdc * FCX_MAX_DATA_FACTOR;12081208+ fcx_max_data = (u32)mdc * FCX_MAX_DATA_FACTOR;12091209 if (fcx_max_data < private->fcx_max_data) {12101210 dev_warn(&device->cdev->dev,12111211 "The maximum data size for zHPF requests %u "···16751675 " data size for zHPF requests failed\n");16761676 return 0;16771677 } else16781678- return mdc * FCX_MAX_DATA_FACTOR;16781678+ return (u32)mdc * FCX_MAX_DATA_FACTOR;16791679}1680168016811681/*
+4-2
drivers/s390/cio/chp.c
···780780static int __init chp_init(void)781781{782782 struct chp_id chpid;783783- int ret;783783+ int state, ret;784784785785 ret = crw_register_handler(CRW_RSC_CPATH, chp_process_crw);786786 if (ret)···791791 return 0;792792 /* Register available channel-paths. */793793 chp_id_for_each(&chpid) {794794- if (chp_info_get_status(chpid) != CHP_STATUS_NOT_RECOGNIZED)794794+ state = chp_info_get_status(chpid);795795+ if (state == CHP_STATUS_CONFIGURED ||796796+ state == CHP_STATUS_STANDBY)795797 chp_new(chpid);796798 }797799
+3-3
drivers/scsi/NCR5380.c
···353353#endif354354355355356356-static int probe_irq __initdata;356356+static int probe_irq;357357358358/**359359 * probe_intr - helper for IRQ autoprobe···365365 * used by the IRQ probe code.366366 */367367368368-static irqreturn_t __init probe_intr(int irq, void *dev_id)368368+static irqreturn_t probe_intr(int irq, void *dev_id)369369{370370 probe_irq = irq;371371 return IRQ_HANDLED;···380380 * and then looking to see what interrupt actually turned up.381381 */382382383383-static int __init __maybe_unused NCR5380_probe_irq(struct Scsi_Host *instance,383383+static int __maybe_unused NCR5380_probe_irq(struct Scsi_Host *instance,384384 int possible)385385{386386 struct NCR5380_hostdata *hostdata = shost_priv(instance);
···11871187 hdata.type = heap->type;11881188 hdata.heap_id = heap->id;1189118911901190- ret = copy_to_user(&buffer[cnt],11911191- &hdata, sizeof(hdata));11901190+ if (copy_to_user(&buffer[cnt], &hdata, sizeof(hdata))) {11911191+ ret = -EFAULT;11921192+ goto out;11931193+ }1192119411931195 cnt++;11941196 if (cnt >= max_cnt)
+1-1
drivers/staging/android/ion/ion_of.c
···107107108108 heap_pdev = of_platform_device_create(node, heaps[i].name,109109 &pdev->dev);110110- if (!pdev)110110+ if (!heap_pdev)111111 return ERR_PTR(-ENOMEM);112112 heap_pdev->dev.platform_data = &heaps[i];113113
+1
drivers/staging/greybus/arche-platform.c
···128128 pdev = of_find_device_by_node(np);129129 if (!pdev) {130130 pr_err("arche-platform device not found\n");131131+ of_node_put(np);131132 return -ENODEV;132133 }133134
+2-1
drivers/staging/greybus/es2.c
···15481548 INIT_LIST_HEAD(&es2->arpcs);15491549 spin_lock_init(&es2->arpc_lock);1550155015511551- if (es2_arpc_in_enable(es2))15511551+ retval = es2_arpc_in_enable(es2);15521552+ if (retval)15521553 goto error;1553155415541555 retval = gb_hd_add(hd);
+2-4
drivers/staging/greybus/gpio.c
···702702 ret = gb_gpio_irqchip_add(gpio, irqc, 0,703703 handle_level_irq, IRQ_TYPE_NONE);704704 if (ret) {705705- dev_err(&connection->bundle->dev,706706- "failed to add irq chip: %d\n", ret);705705+ dev_err(&gbphy_dev->dev, "failed to add irq chip: %d\n", ret);707706 goto exit_line_free;708707 }709708710709 ret = gpiochip_add(gpio);711710 if (ret) {712712- dev_err(&connection->bundle->dev,713713- "failed to add gpio chip: %d\n", ret);711711+ dev_err(&gbphy_dev->dev, "failed to add gpio chip: %d\n", ret);714712 goto exit_gpio_irqchip_remove;715713 }716714
+1-1
drivers/staging/greybus/module.c
···127127 return module;128128129129err_put_interfaces:130130- for (--i; i > 0; --i)130130+ for (--i; i >= 0; --i)131131 gb_interface_put(module->interfaces[i]);132132133133 put_device(&module->dev);
+1-1
drivers/staging/greybus/uart.c
···888888 minor = alloc_minor(gb_tty);889889 if (minor < 0) {890890 if (minor == -ENOSPC) {891891- dev_err(&connection->bundle->dev,891891+ dev_err(&gbphy_dev->dev,892892 "no more free minor numbers\n");893893 retval = -ENODEV;894894 } else {
+2
drivers/staging/iio/accel/sca3000_core.c
···468468 case SCA3000_MEAS_MODE_OP_2:469469 *base_freq = info->option_mode_2_freq;470470 break;471471+ default:472472+ ret = -EINVAL;471473 }472474error_ret:473475 return ret;
···9999 case UART_LCR:100100 valshift = UNIPHIER_UART_LCR_SHIFT;101101 /* Divisor latch access bit does not exist. */102102- value &= ~(UART_LCR_DLAB << valshift);102102+ value &= ~UART_LCR_DLAB;103103 /* fall through */104104 case UART_MCR:105105 offset = UNIPHIER_UART_LCR_MCR;···199199200200 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);201201 if (!regs) {202202- dev_err(dev, "failed to get memory resource");202202+ dev_err(dev, "failed to get memory resource\n");203203 return -EINVAL;204204 }205205
+1
drivers/tty/serial/Kconfig
···16381638config SERIAL_STM3216391639 tristate "STMicroelectronics STM32 serial port support"16401640 select SERIAL_CORE16411641+ depends on HAS_DMA16411642 depends on ARM || COMPILE_TEST16421643 help16431644 This driver is for the on-chip Serial Controller on
+22-4
drivers/tty/serial/atmel_serial.c
···21322132 mode |= ATMEL_US_USMODE_RS485;21332133 } else if (termios->c_cflag & CRTSCTS) {21342134 /* RS232 with hardware handshake (RTS/CTS) */21352135- if (atmel_use_dma_rx(port) && !atmel_use_fifo(port)) {21362136- dev_info(port->dev, "not enabling hardware flow control because DMA is used");21372137- termios->c_cflag &= ~CRTSCTS;21382138- } else {21352135+ if (atmel_use_fifo(port) &&21362136+ !mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_CTS)) {21372137+ /*21382138+ * with ATMEL_US_USMODE_HWHS set, the controller will21392139+ * be able to drive the RTS pin high/low when the RX21402140+ * FIFO is above RXFTHRES/below RXFTHRES2.21412141+ * It will also disable the transmitter when the CTS21422142+ * pin is high.21432143+ * This mode is not activated if CTS pin is a GPIO21442144+ * because in this case, the transmitter is always21452145+ * disabled (there must be an internal pull-up21462146+ * responsible for this behaviour).21472147+ * If the RTS pin is a GPIO, the controller won't be21482148+ * able to drive it according to the FIFO thresholds,21492149+ * but it will be handled by the driver.21502150+ */21392151 mode |= ATMEL_US_USMODE_HWHS;21522152+ } else {21532153+ /*21542154+ * For platforms without FIFO, the flow control is21552155+ * handled by the driver.21562156+ */21572157+ mode |= ATMEL_US_USMODE_NORMAL;21402158 }21412159 } else {21422160 /* RS232 without hadware handshake */
···859859 if (new_cols == vc->vc_cols && new_rows == vc->vc_rows)860860 return 0;861861862862+ if (new_screen_size > (4 << 20))863863+ return -EINVAL;862864 newscreen = kmalloc(new_screen_size, GFP_USER);863865 if (!newscreen)864866 return -ENOMEM;867867+868868+ if (vc == sel_cons)869869+ clear_selection();865870866871 old_rows = vc->vc_rows;867872 old_row_size = vc->vc_size_row;···11701165 break;11711166 case 3: /* erase scroll-back buffer (and whole display) */11721167 scr_memsetw(vc->vc_screenbuf, vc->vc_video_erase_char,11731173- vc->vc_screenbuf_size >> 1);11681168+ vc->vc_screenbuf_size);11741169 set_origin(vc);11751170 if (con_is_visible(vc))11761171 update_screen(vc);
+2
drivers/usb/chipidea/host.c
···188188189189 if (hcd) {190190 usb_remove_hcd(hcd);191191+ ci->role = CI_ROLE_END;192192+ synchronize_irq(ci->irq);191193 usb_put_hcd(hcd);192194 if (ci->platdata->reg_vbus && !ci_otg_is_fsm_mode(ci) &&193195 (ci->platdata->flags & CI_HDRC_TURN_VBUS_EARLY_ON))
+10-1
drivers/usb/dwc2/core.c
···463463 */464464void dwc2_force_dr_mode(struct dwc2_hsotg *hsotg)465465{466466+ bool ret;467467+466468 switch (hsotg->dr_mode) {467469 case USB_DR_MODE_HOST:468468- dwc2_force_mode(hsotg, true);470470+ ret = dwc2_force_mode(hsotg, true);471471+ /*472472+ * NOTE: This is required for some rockchip soc based473473+ * platforms on their host-only dwc2.474474+ */475475+ if (!ret)476476+ msleep(50);477477+469478 break;470479 case USB_DR_MODE_PERIPHERAL:471480 dwc2_force_mode(hsotg, false);
+7
drivers/usb/dwc2/core.h
···259259 DWC2_L3, /* Off state */260260};261261262262+/*263263+ * Gadget periodic tx fifo sizes as used by legacy driver264264+ * EP0 is not included265265+ */266266+#define DWC2_G_P_LEGACY_TX_FIFO_SIZE {256, 256, 256, 256, 768, 768, 768, \267267+ 768, 0, 0, 0, 0, 0, 0, 0}268268+262269/* Gadget ep0 states */263270enum dwc2_ep0_state {264271 DWC2_EP0_SETUP,
+42-11
drivers/usb/dwc2/gadget.c
···186186 */187187static void dwc2_hsotg_init_fifo(struct dwc2_hsotg *hsotg)188188{189189- unsigned int fifo;189189+ unsigned int ep;190190 unsigned int addr;191191 int timeout;192192- u32 dptxfsizn;193192 u32 val;194193195194 /* Reset fifo map if not correctly cleared during previous session */···216217 * them to endpoints dynamically according to maxpacket size value of217218 * given endpoint.218219 */219219- for (fifo = 1; fifo < MAX_EPS_CHANNELS; fifo++) {220220- dptxfsizn = dwc2_readl(hsotg->regs + DPTXFSIZN(fifo));220220+ for (ep = 1; ep < MAX_EPS_CHANNELS; ep++) {221221+ if (!hsotg->g_tx_fifo_sz[ep])222222+ continue;223223+ val = addr;224224+ val |= hsotg->g_tx_fifo_sz[ep] << FIFOSIZE_DEPTH_SHIFT;225225+ WARN_ONCE(addr + hsotg->g_tx_fifo_sz[ep] > hsotg->fifo_mem,226226+ "insufficient fifo memory");227227+ addr += hsotg->g_tx_fifo_sz[ep];221228222222- val = (dptxfsizn & FIFOSIZE_DEPTH_MASK) | addr;223223- addr += dptxfsizn >> FIFOSIZE_DEPTH_SHIFT;224224-225225- if (addr > hsotg->fifo_mem)226226- break;227227-228228- dwc2_writel(val, hsotg->regs + DPTXFSIZN(fifo));229229+ dwc2_writel(val, hsotg->regs + DPTXFSIZN(ep));229230 }230231231232 /*···38063807static void dwc2_hsotg_of_probe(struct dwc2_hsotg *hsotg)38073808{38083809 struct device_node *np = hsotg->dev->of_node;38103810+ u32 len = 0;38113811+ u32 i = 0;3809381238103813 /* Enable dma if requested in device tree */38113814 hsotg->g_using_dma = of_property_read_bool(np, "g-use-dma");3812381538163816+ /*38173817+ * Register TX periodic fifo size per endpoint.38183818+ * EP0 is excluded since it has no fifo configuration.38193819+ */38203820+ if (!of_find_property(np, "g-tx-fifo-size", &len))38213821+ goto rx_fifo;38223822+38233823+ len /= sizeof(u32);38243824+38253825+ /* Read tx fifo sizes other than ep0 */38263826+ if (of_property_read_u32_array(np, "g-tx-fifo-size",38273827+ &hsotg->g_tx_fifo_sz[1], len))38283828+ goto rx_fifo;38293829+38303830+ /* Add ep0 */38313831+ len++;38323832+38333833+ /* Make remaining TX fifos unavailable */38343834+ if (len < MAX_EPS_CHANNELS) {38353835+ for (i = len; i < MAX_EPS_CHANNELS; i++)38363836+ hsotg->g_tx_fifo_sz[i] = 0;38373837+ }38383838+38393839+rx_fifo:38133840 /* Register RX fifo size */38143841 of_property_read_u32(np, "g-rx-fifo-size", &hsotg->g_rx_fifo_sz);38153842···38573832 struct device *dev = hsotg->dev;38583833 int epnum;38593834 int ret;38353835+ int i;38363836+ u32 p_tx_fifo[] = DWC2_G_P_LEGACY_TX_FIFO_SIZE;3860383738613838 /* Initialize to legacy fifo configuration values */38623839 hsotg->g_rx_fifo_sz = 2048;38633840 hsotg->g_np_g_tx_fifo_sz = 1024;38413841+ memcpy(&hsotg->g_tx_fifo_sz[1], p_tx_fifo, sizeof(p_tx_fifo));38643842 /* Device tree specific probe */38653843 dwc2_hsotg_of_probe(hsotg);38663844···38813853 dev_dbg(dev, "NonPeriodic TXFIFO size: %d\n",38823854 hsotg->g_np_g_tx_fifo_sz);38833855 dev_dbg(dev, "RXFIFO size: %d\n", hsotg->g_rx_fifo_sz);38563856+ for (i = 0; i < MAX_EPS_CHANNELS; i++)38573857+ dev_dbg(dev, "Periodic TXFIFO%2d size: %d\n", i,38583858+ hsotg->g_tx_fifo_sz[i]);3884385938853860 hsotg->gadget.max_speed = USB_SPEED_HIGH;38863861 hsotg->gadget.ops = &dwc2_hsotg_gadget_ops;
+18-8
drivers/usb/dwc3/gadget.c
···783783 req->trb = trb;784784 req->trb_dma = dwc3_trb_dma_offset(dep, trb);785785 req->first_trb_index = dep->trb_enqueue;786786+ dep->queued_requests++;786787 }787788788789 dwc3_ep_inc_enq(dep);···833832 trb->ctrl |= DWC3_TRB_CTRL_SID_SOFN(req->request.stream_id);834833835834 trb->ctrl |= DWC3_TRB_CTRL_HWO;836836-837837- dep->queued_requests++;838835839836 trace_dwc3_prepare_trb(dep, trb);840837}···1073107410741075 list_add_tail(&req->list, &dep->pending_list);1075107610761076- if (usb_endpoint_xfer_isoc(dep->endpoint.desc) &&10771077- dep->flags & DWC3_EP_PENDING_REQUEST) {10781078- if (list_empty(&dep->started_list)) {10771077+ /*10781078+ * NOTICE: Isochronous endpoints should NEVER be prestarted. We must10791079+ * wait for a XferNotReady event so we will know what's the current10801080+ * (micro-)frame number.10811081+ *10821082+ * Without this trick, we are very, very likely gonna get Bus Expiry10831083+ * errors which will force us issue EndTransfer command.10841084+ */10851085+ if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {10861086+ if ((dep->flags & DWC3_EP_PENDING_REQUEST) &&10871087+ list_empty(&dep->started_list)) {10791088 dwc3_stop_active_transfer(dwc, dep->number, true);10801089 dep->flags = DWC3_EP_ENABLED;10811090 }···18681861 unsigned int s_pkt = 0;18691862 unsigned int trb_status;1870186318711871- dep->queued_requests--;18721864 dwc3_ep_inc_deq(dep);18651865+18661866+ if (req->trb == trb)18671867+ dep->queued_requests--;18681868+18731869 trace_dwc3_complete_trb(dep, trb);1874187018751871 /*···29902980 kfree(dwc->setup_buf);2991298129922982err2:29932993- dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb),29832983+ dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb) * 2,29942984 dwc->ep0_trb, dwc->ep0_trb_addr);2995298529962986err1:···30153005 kfree(dwc->setup_buf);30163006 kfree(dwc->zlp_buf);3017300730183018- dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb),30083008+ dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb) * 2,30193009 dwc->ep0_trb, dwc->ep0_trb_addr);3020301030213011 dma_free_coherent(dwc->dev, sizeof(*dwc->ctrl_req),
+92-15
drivers/usb/gadget/function/f_fs.c
···136136 /*137137 * Buffer for holding data from partial reads which may happen since138138 * we’re rounding user read requests to a multiple of a max packet size.139139+ *140140+ * The pointer is initialised with NULL value and may be set by141141+ * __ffs_epfile_read_data function to point to a temporary buffer.142142+ *143143+ * In normal operation, calls to __ffs_epfile_read_buffered will consume144144+ * data from said buffer and eventually free it. Importantly, while the145145+ * function is using the buffer, it sets the pointer to NULL. This is146146+ * all right since __ffs_epfile_read_data and __ffs_epfile_read_buffered147147+ * can never run concurrently (they are synchronised by epfile->mutex)148148+ * so the latter will not assign a new value to the pointer.149149+ *150150+ * Meanwhile ffs_func_eps_disable frees the buffer (if the pointer is151151+ * valid) and sets the pointer to READ_BUFFER_DROP value. This special152152+ * value is crux of the synchronisation between ffs_func_eps_disable and153153+ * __ffs_epfile_read_data.154154+ *155155+ * Once __ffs_epfile_read_data is about to finish it will try to set the156156+ * pointer back to its old value (as described above), but seeing as the157157+ * pointer is not-NULL (namely READ_BUFFER_DROP) it will instead free158158+ * the buffer.159159+ *160160+ * == State transitions ==161161+ *162162+ * • ptr == NULL: (initial state)163163+ * ◦ __ffs_epfile_read_buffer_free: go to ptr == DROP164164+ * ◦ __ffs_epfile_read_buffered: nop165165+ * ◦ __ffs_epfile_read_data allocates temp buffer: go to ptr == buf166166+ * ◦ reading finishes: n/a, not in ‘and reading’ state167167+ * • ptr == DROP:168168+ * ◦ __ffs_epfile_read_buffer_free: nop169169+ * ◦ __ffs_epfile_read_buffered: go to ptr == NULL170170+ * ◦ __ffs_epfile_read_data allocates temp buffer: free buf, nop171171+ * ◦ reading finishes: n/a, not in ‘and reading’ state172172+ * • ptr == buf:173173+ * ◦ __ffs_epfile_read_buffer_free: free buf, go to ptr == DROP174174+ * ◦ __ffs_epfile_read_buffered: go to ptr == NULL and reading175175+ * ◦ __ffs_epfile_read_data: n/a, __ffs_epfile_read_buffered176176+ * is always called first177177+ * ◦ reading finishes: n/a, not in ‘and reading’ state178178+ * • ptr == NULL and reading:179179+ * ◦ __ffs_epfile_read_buffer_free: go to ptr == DROP and reading180180+ * ◦ __ffs_epfile_read_buffered: n/a, mutex is held181181+ * ◦ __ffs_epfile_read_data: n/a, mutex is held182182+ * ◦ reading finishes and …183183+ * … all data read: free buf, go to ptr == NULL184184+ * … otherwise: go to ptr == buf and reading185185+ * • ptr == DROP and reading:186186+ * ◦ __ffs_epfile_read_buffer_free: nop187187+ * ◦ __ffs_epfile_read_buffered: n/a, mutex is held188188+ * ◦ __ffs_epfile_read_data: n/a, mutex is held189189+ * ◦ reading finishes: free buf, go to ptr == DROP139190 */140140- struct ffs_buffer *read_buffer; /* P: epfile->mutex */191191+ struct ffs_buffer *read_buffer;192192+#define READ_BUFFER_DROP ((struct ffs_buffer *)ERR_PTR(-ESHUTDOWN))141193142194 char name[5];143195···788736 schedule_work(&io_data->work);789737}790738739739+static void __ffs_epfile_read_buffer_free(struct ffs_epfile *epfile)740740+{741741+ /*742742+ * See comment in struct ffs_epfile for full read_buffer pointer743743+ * synchronisation story.744744+ */745745+ struct ffs_buffer *buf = xchg(&epfile->read_buffer, READ_BUFFER_DROP);746746+ if (buf && buf != READ_BUFFER_DROP)747747+ kfree(buf);748748+}749749+791750/* Assumes epfile->mutex is held. */792751static ssize_t __ffs_epfile_read_buffered(struct ffs_epfile *epfile,793752 struct iov_iter *iter)794753{795795- struct ffs_buffer *buf = epfile->read_buffer;754754+ /*755755+ * Null out epfile->read_buffer so ffs_func_eps_disable does not free756756+ * the buffer while we are using it. See comment in struct ffs_epfile757757+ * for full read_buffer pointer synchronisation story.758758+ */759759+ struct ffs_buffer *buf = xchg(&epfile->read_buffer, NULL);796760 ssize_t ret;797797- if (!buf)761761+ if (!buf || buf == READ_BUFFER_DROP)798762 return 0;799763800764 ret = copy_to_iter(buf->data, buf->length, iter);801765 if (buf->length == ret) {802766 kfree(buf);803803- epfile->read_buffer = NULL;804804- } else if (unlikely(iov_iter_count(iter))) {767767+ return ret;768768+ }769769+770770+ if (unlikely(iov_iter_count(iter))) {805771 ret = -EFAULT;806772 } else {807773 buf->length -= ret;808774 buf->data += ret;809775 }776776+777777+ if (cmpxchg(&epfile->read_buffer, NULL, buf))778778+ kfree(buf);779779+810780 return ret;811781}812782···857783 buf->length = data_len;858784 buf->data = buf->storage;859785 memcpy(buf->storage, data + ret, data_len);860860- epfile->read_buffer = buf;786786+787787+ /*788788+ * At this point read_buffer is NULL or READ_BUFFER_DROP (if789789+ * ffs_func_eps_disable has been called in the meanwhile). See comment790790+ * in struct ffs_epfile for full read_buffer pointer synchronisation791791+ * story.792792+ */793793+ if (unlikely(cmpxchg(&epfile->read_buffer, NULL, buf)))794794+ kfree(buf);861795862796 return ret;863797}···1179109711801098 ENTER();1181109911821182- kfree(epfile->read_buffer);11831183- epfile->read_buffer = NULL;11001100+ __ffs_epfile_read_buffer_free(epfile);11841101 ffs_data_closed(epfile->ffs);1185110211861103 return 0;···18051724 unsigned count = func->ffs->eps_count;18061725 unsigned long flags;1807172617271727+ spin_lock_irqsave(&func->ffs->eps_lock, flags);18081728 do {18091809- if (epfile)18101810- mutex_lock(&epfile->mutex);18111811- spin_lock_irqsave(&func->ffs->eps_lock, flags);18121729 /* pending requests get nuked */18131730 if (likely(ep->ep))18141731 usb_ep_disable(ep->ep);18151732 ++ep;18161816- spin_unlock_irqrestore(&func->ffs->eps_lock, flags);1817173318181734 if (epfile) {18191735 epfile->ep = NULL;18201820- kfree(epfile->read_buffer);18211821- epfile->read_buffer = NULL;18221822- mutex_unlock(&epfile->mutex);17361736+ __ffs_epfile_read_buffer_free(epfile);18231737 ++epfile;18241738 }18251739 } while (--count);17401740+ spin_unlock_irqrestore(&func->ffs->eps_lock, flags);18261741}1827174218281743static int ffs_func_eps_enable(struct ffs_function *func)
···221221 ohci->num_ports = board->ports;222222 at91_start_hc(pdev);223223224224+ /*225225+ * The RemoteWakeupConnected bit has to be set explicitly226226+ * before calling ohci_run. The reset value of this bit is 0.227227+ */228228+ ohci->hc_control = OHCI_CTRL_RWC;229229+224230 retval = usb_add_hcd(hcd, irq, IRQF_SHARED);225231 if (retval == 0) {226232 device_wakeup_enable(hcd->self.controller);···683677 * REVISIT: some boards will be able to turn VBUS off...684678 */685679 if (!ohci_at91->wakeup) {686686- ohci->hc_control = ohci_readl(ohci, &ohci->regs->control);687687- ohci->hc_control &= OHCI_CTRL_RWC;688688- ohci_writel(ohci, ohci->hc_control, &ohci->regs->control);689680 ohci->rh_state = OHCI_RH_HALTED;690681691682 /* flush the writes */
···314314#define XDEV_U2 (0x2 << 5)315315#define XDEV_U3 (0x3 << 5)316316#define XDEV_INACTIVE (0x6 << 5)317317+#define XDEV_POLLING (0x7 << 5)318318+#define XDEV_COMP_MODE (0xa << 5)317319#define XDEV_RESUME (0xf << 5)318320/* true: port has power (see HCC_PPC) */319321#define PORT_POWER (1 << 9)···16551653#define XHCI_MTK_HOST (1 << 21)16561654#define XHCI_SSIC_PORT_UNUSED (1 << 22)16571655#define XHCI_NO_64BIT_SUPPORT (1 << 23)16561656+#define XHCI_MISSING_CAS (1 << 24)16581657 unsigned int num_active_eps;16591658 unsigned int limit_active_eps;16601659 /* There are two roothubs to keep track of bus suspend info for */
+4
drivers/usb/musb/musb_gadget.c
···1255125512561256 map_dma_buffer(request, musb, musb_ep);1257125712581258+ pm_runtime_get_sync(musb->controller);12581259 spin_lock_irqsave(&musb->lock, lockflags);1259126012601261 /* don't queue if the ep is down */···1276127512771276unlock:12781277 spin_unlock_irqrestore(&musb->lock, lockflags);12781278+ pm_runtime_mark_last_busy(musb->controller);12791279+ pm_runtime_put_autosuspend(musb->controller);12801280+12791281 return status;12801282}12811283
···58055805 int ret = 0;5806580658075807 if (sctx->cur_ino != sctx->cmp_key->objectid) {58085808+58095809+ if (result == BTRFS_COMPARE_TREE_CHANGED) {58105810+ struct extent_buffer *leaf_l;58115811+ struct extent_buffer *leaf_r;58125812+ struct btrfs_file_extent_item *ei_l;58135813+ struct btrfs_file_extent_item *ei_r;58145814+58155815+ leaf_l = sctx->left_path->nodes[0];58165816+ leaf_r = sctx->right_path->nodes[0];58175817+ ei_l = btrfs_item_ptr(leaf_l,58185818+ sctx->left_path->slots[0],58195819+ struct btrfs_file_extent_item);58205820+ ei_r = btrfs_item_ptr(leaf_r,58215821+ sctx->right_path->slots[0],58225822+ struct btrfs_file_extent_item);58235823+58245824+ /*58255825+ * We may have found an extent item that has changed58265826+ * only its disk_bytenr field and the corresponding58275827+ * inode item was not updated. This case happens due to58285828+ * very specific timings during relocation when a leaf58295829+ * that contains file extent items is COWed while58305830+ * relocation is ongoing and its in the stage where it58315831+ * updates data pointers. So when this happens we can58325832+ * safely ignore it since we know it's the same extent,58335833+ * but just at different logical and physical locations58345834+ * (when an extent is fully replaced with a new one, we58355835+ * know the generation number must have changed too,58365836+ * since snapshot creation implies committing the current58375837+ * transaction, and the inode item must have been updated58385838+ * as well).58395839+ * This replacement of the disk_bytenr happens at58405840+ * relocation.c:replace_file_extents() through58415841+ * relocation.c:btrfs_reloc_cow_block().58425842+ */58435843+ if (btrfs_file_extent_generation(leaf_l, ei_l) ==58445844+ btrfs_file_extent_generation(leaf_r, ei_r) &&58455845+ btrfs_file_extent_ram_bytes(leaf_l, ei_l) ==58465846+ btrfs_file_extent_ram_bytes(leaf_r, ei_r) &&58475847+ btrfs_file_extent_compression(leaf_l, ei_l) ==58485848+ btrfs_file_extent_compression(leaf_r, ei_r) &&58495849+ btrfs_file_extent_encryption(leaf_l, ei_l) ==58505850+ btrfs_file_extent_encryption(leaf_r, ei_r) &&58515851+ btrfs_file_extent_other_encoding(leaf_l, ei_l) ==58525852+ btrfs_file_extent_other_encoding(leaf_r, ei_r) &&58535853+ btrfs_file_extent_type(leaf_l, ei_l) ==58545854+ btrfs_file_extent_type(leaf_r, ei_r) &&58555855+ btrfs_file_extent_disk_bytenr(leaf_l, ei_l) !=58565856+ btrfs_file_extent_disk_bytenr(leaf_r, ei_r) &&58575857+ btrfs_file_extent_disk_num_bytes(leaf_l, ei_l) ==58585858+ btrfs_file_extent_disk_num_bytes(leaf_r, ei_r) &&58595859+ btrfs_file_extent_offset(leaf_l, ei_l) ==58605860+ btrfs_file_extent_offset(leaf_r, ei_r) &&58615861+ btrfs_file_extent_num_bytes(leaf_l, ei_l) ==58625862+ btrfs_file_extent_num_bytes(leaf_r, ei_r))58635863+ return 0;58645864+ }58655865+58085866 inconsistent_snapshot_error(sctx, result, "extent");58095867 return -EIO;58105868 }
+6-14
fs/btrfs/tree-log.c
···27132713 int index, int error)27142714{27152715 struct btrfs_log_ctx *ctx;27162716+ struct btrfs_log_ctx *safe;2716271727172717- if (!error) {27182718- INIT_LIST_HEAD(&root->log_ctxs[index]);27192719- return;27202720- }27212721-27222722- list_for_each_entry(ctx, &root->log_ctxs[index], list)27182718+ list_for_each_entry_safe(ctx, safe, &root->log_ctxs[index], list) {27192719+ list_del_init(&ctx->list);27232720 ctx->log_ret = error;27212721+ }2724272227252723 INIT_LIST_HEAD(&root->log_ctxs[index]);27262724}···29592961 mutex_unlock(&root->log_mutex);2960296229612963out_wake_log_root:29622962- /*29632963- * We needn't get log_mutex here because we are sure all29642964- * the other tasks are blocked.29652965- */29642964+ mutex_lock(&log_root_tree->log_mutex);29662965 btrfs_remove_all_log_ctxs(log_root_tree, index2, ret);2967296629682968- mutex_lock(&log_root_tree->log_mutex);29692967 log_root_tree->log_transid_committed++;29702968 atomic_set(&log_root_tree->log_commit[index2], 0);29712969 mutex_unlock(&log_root_tree->log_mutex);···29722978 if (waitqueue_active(&log_root_tree->log_commit_wait[index2]))29732979 wake_up(&log_root_tree->log_commit_wait[index2]);29742980out:29752975- /* See above. */29762976- btrfs_remove_all_log_ctxs(root, index1, ret);29772977-29782981 mutex_lock(&root->log_mutex);29822982+ btrfs_remove_all_log_ctxs(root, index1, ret);29792983 root->log_transid_committed++;29802984 atomic_set(&root->log_commit[index1], 0);29812985 mutex_unlock(&root->log_mutex);
···7373 }7474 }75757676- dentry->d_time = jiffies + orangefs_dcache_timeout_msecs*HZ/1000;7676+ orangefs_set_timeout(dentry);7777 ret = 1;7878out_release_op:7979 op_release(new_op);···9494static int orangefs_d_revalidate(struct dentry *dentry, unsigned int flags)9595{9696 int ret;9797+ unsigned long time = (unsigned long) dentry->d_fsdata;97989898- if (time_before(jiffies, dentry->d_time))9999+ if (time_before(jiffies, time))99100 return 1;100101101102 if (flags & LOOKUP_RCU)
+7-7
fs/orangefs/file.c
···621621 * readahead cache (if any); this forces an expensive refresh of622622 * data for the next caller of mmap (or 'get_block' accesses)623623 */624624- if (file->f_path.dentry->d_inode &&625625- file->f_path.dentry->d_inode->i_mapping &&626626- mapping_nrpages(&file->f_path.dentry->d_inode->i_data)) {624624+ if (file_inode(file) &&625625+ file_inode(file)->i_mapping &&626626+ mapping_nrpages(&file_inode(file)->i_data)) {627627 if (orangefs_features & ORANGEFS_FEATURE_READAHEAD) {628628 gossip_debug(GOSSIP_INODE_DEBUG,629629 "calling flush_racache on %pU\n",···632632 gossip_debug(GOSSIP_INODE_DEBUG,633633 "flush_racache finished\n");634634 }635635- truncate_inode_pages(file->f_path.dentry->d_inode->i_mapping,635635+ truncate_inode_pages(file_inode(file)->i_mapping,636636 0);637637 }638638 return 0;···648648{649649 int ret = -EINVAL;650650 struct orangefs_inode_s *orangefs_inode =651651- ORANGEFS_I(file->f_path.dentry->d_inode);651651+ ORANGEFS_I(file_inode(file));652652 struct orangefs_kernel_op_s *new_op = NULL;653653654654 /* required call */···661661662662 ret = service_operation(new_op,663663 "orangefs_fsync",664664- get_interruptible_flag(file->f_path.dentry->d_inode));664664+ get_interruptible_flag(file_inode(file)));665665666666 gossip_debug(GOSSIP_FILE_DEBUG,667667 "orangefs_fsync got return value of %d\n",···669669670670 op_release(new_op);671671672672- orangefs_flush_inode(file->f_path.dentry->d_inode);672672+ orangefs_flush_inode(file_inode(file));673673 return ret;674674}675675
···252252 * Inherently racy -- command line shares address space253253 * with code and data.254254 */255255- rv = access_remote_vm(mm, arg_end - 1, &c, 1, FOLL_FORCE);255255+ rv = access_remote_vm(mm, arg_end - 1, &c, 1, 0);256256 if (rv <= 0)257257 goto out_free_page;258258···270270 int nr_read;271271272272 _count = min3(count, len, PAGE_SIZE);273273- nr_read = access_remote_vm(mm, p, page, _count,274274- FOLL_FORCE);273273+ nr_read = access_remote_vm(mm, p, page, _count, 0);275274 if (nr_read < 0)276275 rv = nr_read;277276 if (nr_read <= 0)···305306 bool final;306307307308 _count = min3(count, len, PAGE_SIZE);308308- nr_read = access_remote_vm(mm, p, page, _count,309309- FOLL_FORCE);309309+ nr_read = access_remote_vm(mm, p, page, _count, 0);310310 if (nr_read < 0)311311 rv = nr_read;312312 if (nr_read <= 0)···354356 bool final;355357356358 _count = min3(count, len, PAGE_SIZE);357357- nr_read = access_remote_vm(mm, p, page, _count,358358- FOLL_FORCE);359359+ nr_read = access_remote_vm(mm, p, page, _count, 0);359360 if (nr_read < 0)360361 rv = nr_read;361362 if (nr_read <= 0)···832835 unsigned long addr = *ppos;833836 ssize_t copied;834837 char *page;835835- unsigned int flags = FOLL_FORCE;838838+ unsigned int flags;836839837840 if (!mm)838841 return 0;···845848 if (!atomic_inc_not_zero(&mm->mm_users))846849 goto free;847850851851+ /* Maybe we should limit FOLL_FORCE to actual ptrace users? */852852+ flags = FOLL_FORCE;848853 if (write)849854 flags |= FOLL_WRITE;850855···970971 max_len = min_t(size_t, PAGE_SIZE, count);971972 this_len = min(max_len, this_len);972973973973- retval = access_remote_vm(mm, (env_start + src),974974- page, this_len, FOLL_FORCE);974974+ retval = access_remote_vm(mm, (env_start + src), page, this_len, 0);975975976976 if (retval <= 0) {977977 ret = retval;···10121014{10131015 struct mm_struct *mm = file->private_data;10141016 unsigned int nwords = 0;10171017+10181018+ if (!mm)10191019+ return 0;10151020 do {10161021 nwords += 2;10171022 } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
+8
fs/ubifs/dir.c
···543543544544 if (err != -ENOENT)545545 ubifs_err(c, "cannot find next direntry, error %d", err);546546+ else547547+ /*548548+ * -ENOENT is a non-fatal error in this context, the TNC uses549549+ * it to indicate that the cursor moved past the current directory550550+ * and readdir() has to stop.551551+ */552552+ err = 0;553553+546554547555 /* 2 is a special value indicating that there are no more direntries */548556 ctx->pos = 2;
+246-172
fs/xfs/libxfs/xfs_bmap.c
···39743974 * allocating, so skip that check by pretending to be freeing.39753975 */39763976 error = xfs_alloc_fix_freelist(&args, XFS_ALLOC_FLAG_FREEING);39773977- if (error)39783978- goto error0;39793979-error0:39803977 xfs_perag_put(args.pag);39813978 if (error)39823979 trace_xfs_bmap_remap_alloc_error(ap->ip, error, _RET_IP_);···39943997 xfs_alloc_is_userdata(ap->datatype))39953998 return xfs_bmap_rtalloc(ap);39963999 return xfs_bmap_btalloc(ap);40004000+}40014001+40024002+/* Trim extent to fit a logical block range. */40034003+void40044004+xfs_trim_extent(40054005+ struct xfs_bmbt_irec *irec,40064006+ xfs_fileoff_t bno,40074007+ xfs_filblks_t len)40084008+{40094009+ xfs_fileoff_t distance;40104010+ xfs_fileoff_t end = bno + len;40114011+40124012+ if (irec->br_startoff + irec->br_blockcount <= bno ||40134013+ irec->br_startoff >= end) {40144014+ irec->br_blockcount = 0;40154015+ return;40164016+ }40174017+40184018+ if (irec->br_startoff < bno) {40194019+ distance = bno - irec->br_startoff;40204020+ if (isnullstartblock(irec->br_startblock))40214021+ irec->br_startblock = DELAYSTARTBLOCK;40224022+ if (irec->br_startblock != DELAYSTARTBLOCK &&40234023+ irec->br_startblock != HOLESTARTBLOCK)40244024+ irec->br_startblock += distance;40254025+ irec->br_startoff += distance;40264026+ irec->br_blockcount -= distance;40274027+ }40284028+40294029+ if (end < irec->br_startoff + irec->br_blockcount) {40304030+ distance = irec->br_startoff + irec->br_blockcount - end;40314031+ irec->br_blockcount -= distance;40324032+ }39974033}3998403439994035/*···48594829 return stolen;48604830}4861483148324832+int48334833+xfs_bmap_del_extent_delay(48344834+ struct xfs_inode *ip,48354835+ int whichfork,48364836+ xfs_extnum_t *idx,48374837+ struct xfs_bmbt_irec *got,48384838+ struct xfs_bmbt_irec *del)48394839+{48404840+ struct xfs_mount *mp = ip->i_mount;48414841+ struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);48424842+ struct xfs_bmbt_irec new;48434843+ int64_t da_old, da_new, da_diff = 0;48444844+ xfs_fileoff_t del_endoff, got_endoff;48454845+ xfs_filblks_t got_indlen, new_indlen, stolen;48464846+ int error = 0, state = 0;48474847+ bool isrt;48484848+48494849+ XFS_STATS_INC(mp, xs_del_exlist);48504850+48514851+ isrt = (whichfork == XFS_DATA_FORK) && XFS_IS_REALTIME_INODE(ip);48524852+ del_endoff = del->br_startoff + del->br_blockcount;48534853+ got_endoff = got->br_startoff + got->br_blockcount;48544854+ da_old = startblockval(got->br_startblock);48554855+ da_new = 0;48564856+48574857+ ASSERT(*idx >= 0);48584858+ ASSERT(*idx < ifp->if_bytes / sizeof(struct xfs_bmbt_rec));48594859+ ASSERT(del->br_blockcount > 0);48604860+ ASSERT(got->br_startoff <= del->br_startoff);48614861+ ASSERT(got_endoff >= del_endoff);48624862+48634863+ if (isrt) {48644864+ int64_t rtexts = XFS_FSB_TO_B(mp, del->br_blockcount);48654865+48664866+ do_div(rtexts, mp->m_sb.sb_rextsize);48674867+ xfs_mod_frextents(mp, rtexts);48684868+ }48694869+48704870+ /*48714871+ * Update the inode delalloc counter now and wait to update the48724872+ * sb counters as we might have to borrow some blocks for the48734873+ * indirect block accounting.48744874+ */48754875+ xfs_trans_reserve_quota_nblks(NULL, ip, -((long)del->br_blockcount), 0,48764876+ isrt ? XFS_QMOPT_RES_RTBLKS : XFS_QMOPT_RES_REGBLKS);48774877+ ip->i_delayed_blks -= del->br_blockcount;48784878+48794879+ if (whichfork == XFS_COW_FORK)48804880+ state |= BMAP_COWFORK;48814881+48824882+ if (got->br_startoff == del->br_startoff)48834883+ state |= BMAP_LEFT_CONTIG;48844884+ if (got_endoff == del_endoff)48854885+ state |= BMAP_RIGHT_CONTIG;48864886+48874887+ switch (state & (BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG)) {48884888+ case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:48894889+ /*48904890+ * Matches the whole extent. Delete the entry.48914891+ */48924892+ xfs_iext_remove(ip, *idx, 1, state);48934893+ --*idx;48944894+ break;48954895+ case BMAP_LEFT_CONTIG:48964896+ /*48974897+ * Deleting the first part of the extent.48984898+ */48994899+ trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);49004900+ got->br_startoff = del_endoff;49014901+ got->br_blockcount -= del->br_blockcount;49024902+ da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip,49034903+ got->br_blockcount), da_old);49044904+ got->br_startblock = nullstartblock((int)da_new);49054905+ xfs_bmbt_set_all(xfs_iext_get_ext(ifp, *idx), got);49064906+ trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);49074907+ break;49084908+ case BMAP_RIGHT_CONTIG:49094909+ /*49104910+ * Deleting the last part of the extent.49114911+ */49124912+ trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);49134913+ got->br_blockcount = got->br_blockcount - del->br_blockcount;49144914+ da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip,49154915+ got->br_blockcount), da_old);49164916+ got->br_startblock = nullstartblock((int)da_new);49174917+ xfs_bmbt_set_all(xfs_iext_get_ext(ifp, *idx), got);49184918+ trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);49194919+ break;49204920+ case 0:49214921+ /*49224922+ * Deleting the middle of the extent.49234923+ *49244924+ * Distribute the original indlen reservation across the two new49254925+ * extents. Steal blocks from the deleted extent if necessary.49264926+ * Stealing blocks simply fudges the fdblocks accounting below.49274927+ * Warn if either of the new indlen reservations is zero as this49284928+ * can lead to delalloc problems.49294929+ */49304930+ trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);49314931+49324932+ got->br_blockcount = del->br_startoff - got->br_startoff;49334933+ got_indlen = xfs_bmap_worst_indlen(ip, got->br_blockcount);49344934+49354935+ new.br_blockcount = got_endoff - del_endoff;49364936+ new_indlen = xfs_bmap_worst_indlen(ip, new.br_blockcount);49374937+49384938+ WARN_ON_ONCE(!got_indlen || !new_indlen);49394939+ stolen = xfs_bmap_split_indlen(da_old, &got_indlen, &new_indlen,49404940+ del->br_blockcount);49414941+49424942+ got->br_startblock = nullstartblock((int)got_indlen);49434943+ xfs_bmbt_set_all(xfs_iext_get_ext(ifp, *idx), got);49444944+ trace_xfs_bmap_post_update(ip, *idx, 0, _THIS_IP_);49454945+49464946+ new.br_startoff = del_endoff;49474947+ new.br_state = got->br_state;49484948+ new.br_startblock = nullstartblock((int)new_indlen);49494949+49504950+ ++*idx;49514951+ xfs_iext_insert(ip, *idx, 1, &new, state);49524952+49534953+ da_new = got_indlen + new_indlen - stolen;49544954+ del->br_blockcount -= stolen;49554955+ break;49564956+ }49574957+49584958+ ASSERT(da_old >= da_new);49594959+ da_diff = da_old - da_new;49604960+ if (!isrt)49614961+ da_diff += del->br_blockcount;49624962+ if (da_diff)49634963+ xfs_mod_fdblocks(mp, da_diff, false);49644964+ return error;49654965+}49664966+49674967+void49684968+xfs_bmap_del_extent_cow(49694969+ struct xfs_inode *ip,49704970+ xfs_extnum_t *idx,49714971+ struct xfs_bmbt_irec *got,49724972+ struct xfs_bmbt_irec *del)49734973+{49744974+ struct xfs_mount *mp = ip->i_mount;49754975+ struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, XFS_COW_FORK);49764976+ struct xfs_bmbt_irec new;49774977+ xfs_fileoff_t del_endoff, got_endoff;49784978+ int state = BMAP_COWFORK;49794979+49804980+ XFS_STATS_INC(mp, xs_del_exlist);49814981+49824982+ del_endoff = del->br_startoff + del->br_blockcount;49834983+ got_endoff = got->br_startoff + got->br_blockcount;49844984+49854985+ ASSERT(*idx >= 0);49864986+ ASSERT(*idx < ifp->if_bytes / sizeof(struct xfs_bmbt_rec));49874987+ ASSERT(del->br_blockcount > 0);49884988+ ASSERT(got->br_startoff <= del->br_startoff);49894989+ ASSERT(got_endoff >= del_endoff);49904990+ ASSERT(!isnullstartblock(got->br_startblock));49914991+49924992+ if (got->br_startoff == del->br_startoff)49934993+ state |= BMAP_LEFT_CONTIG;49944994+ if (got_endoff == del_endoff)49954995+ state |= BMAP_RIGHT_CONTIG;49964996+49974997+ switch (state & (BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG)) {49984998+ case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:49994999+ /*50005000+ * Matches the whole extent. Delete the entry.50015001+ */50025002+ xfs_iext_remove(ip, *idx, 1, state);50035003+ --*idx;50045004+ break;50055005+ case BMAP_LEFT_CONTIG:50065006+ /*50075007+ * Deleting the first part of the extent.50085008+ */50095009+ trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);50105010+ got->br_startoff = del_endoff;50115011+ got->br_blockcount -= del->br_blockcount;50125012+ got->br_startblock = del->br_startblock + del->br_blockcount;50135013+ xfs_bmbt_set_all(xfs_iext_get_ext(ifp, *idx), got);50145014+ trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);50155015+ break;50165016+ case BMAP_RIGHT_CONTIG:50175017+ /*50185018+ * Deleting the last part of the extent.50195019+ */50205020+ trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);50215021+ got->br_blockcount -= del->br_blockcount;50225022+ xfs_bmbt_set_all(xfs_iext_get_ext(ifp, *idx), got);50235023+ trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);50245024+ break;50255025+ case 0:50265026+ /*50275027+ * Deleting the middle of the extent.50285028+ */50295029+ trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);50305030+ got->br_blockcount = del->br_startoff - got->br_startoff;50315031+ xfs_bmbt_set_all(xfs_iext_get_ext(ifp, *idx), got);50325032+ trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);50335033+50345034+ new.br_startoff = del_endoff;50355035+ new.br_blockcount = got_endoff - del_endoff;50365036+ new.br_state = got->br_state;50375037+ new.br_startblock = del->br_startblock + del->br_blockcount;50385038+50395039+ ++*idx;50405040+ xfs_iext_insert(ip, *idx, 1, &new, state);50415041+ break;50425042+ }50435043+}50445044+48625045/*48635046 * Called by xfs_bmapi to update file extent records and the btree48645047 * after removing space (or undoing a delayed allocation).···54115168 xfs_mod_fdblocks(mp, (int64_t)(da_old - da_new), false);54125169done:54135170 *logflagsp = flags;54145414- return error;54155415-}54165416-54175417-/* Remove an extent from the CoW fork. Similar to xfs_bmap_del_extent. */54185418-int54195419-xfs_bunmapi_cow(54205420- struct xfs_inode *ip,54215421- struct xfs_bmbt_irec *del)54225422-{54235423- xfs_filblks_t da_new;54245424- xfs_filblks_t da_old;54255425- xfs_fsblock_t del_endblock = 0;54265426- xfs_fileoff_t del_endoff;54275427- int delay;54285428- struct xfs_bmbt_rec_host *ep;54295429- int error;54305430- struct xfs_bmbt_irec got;54315431- xfs_fileoff_t got_endoff;54325432- struct xfs_ifork *ifp;54335433- struct xfs_mount *mp;54345434- xfs_filblks_t nblks;54355435- struct xfs_bmbt_irec new;54365436- /* REFERENCED */54375437- uint qfield;54385438- xfs_filblks_t temp;54395439- xfs_filblks_t temp2;54405440- int state = BMAP_COWFORK;54415441- int eof;54425442- xfs_extnum_t eidx;54435443-54445444- mp = ip->i_mount;54455445- XFS_STATS_INC(mp, xs_del_exlist);54465446-54475447- ep = xfs_bmap_search_extents(ip, del->br_startoff, XFS_COW_FORK, &eof,54485448- &eidx, &got, &new);54495449-54505450- ifp = XFS_IFORK_PTR(ip, XFS_COW_FORK); ifp = ifp;54515451- ASSERT((eidx >= 0) && (eidx < ifp->if_bytes /54525452- (uint)sizeof(xfs_bmbt_rec_t)));54535453- ASSERT(del->br_blockcount > 0);54545454- ASSERT(got.br_startoff <= del->br_startoff);54555455- del_endoff = del->br_startoff + del->br_blockcount;54565456- got_endoff = got.br_startoff + got.br_blockcount;54575457- ASSERT(got_endoff >= del_endoff);54585458- delay = isnullstartblock(got.br_startblock);54595459- ASSERT(isnullstartblock(del->br_startblock) == delay);54605460- qfield = 0;54615461- error = 0;54625462- /*54635463- * If deleting a real allocation, must free up the disk space.54645464- */54655465- if (!delay) {54665466- nblks = del->br_blockcount;54675467- qfield = XFS_TRANS_DQ_BCOUNT;54685468- /*54695469- * Set up del_endblock and cur for later.54705470- */54715471- del_endblock = del->br_startblock + del->br_blockcount;54725472- da_old = da_new = 0;54735473- } else {54745474- da_old = startblockval(got.br_startblock);54755475- da_new = 0;54765476- nblks = 0;54775477- }54785478- qfield = qfield;54795479- nblks = nblks;54805480-54815481- /*54825482- * Set flag value to use in switch statement.54835483- * Left-contig is 2, right-contig is 1.54845484- */54855485- switch (((got.br_startoff == del->br_startoff) << 1) |54865486- (got_endoff == del_endoff)) {54875487- case 3:54885488- /*54895489- * Matches the whole extent. Delete the entry.54905490- */54915491- xfs_iext_remove(ip, eidx, 1, BMAP_COWFORK);54925492- --eidx;54935493- break;54945494-54955495- case 2:54965496- /*54975497- * Deleting the first part of the extent.54985498- */54995499- trace_xfs_bmap_pre_update(ip, eidx, state, _THIS_IP_);55005500- xfs_bmbt_set_startoff(ep, del_endoff);55015501- temp = got.br_blockcount - del->br_blockcount;55025502- xfs_bmbt_set_blockcount(ep, temp);55035503- if (delay) {55045504- temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),55055505- da_old);55065506- xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));55075507- trace_xfs_bmap_post_update(ip, eidx, state, _THIS_IP_);55085508- da_new = temp;55095509- break;55105510- }55115511- xfs_bmbt_set_startblock(ep, del_endblock);55125512- trace_xfs_bmap_post_update(ip, eidx, state, _THIS_IP_);55135513- break;55145514-55155515- case 1:55165516- /*55175517- * Deleting the last part of the extent.55185518- */55195519- temp = got.br_blockcount - del->br_blockcount;55205520- trace_xfs_bmap_pre_update(ip, eidx, state, _THIS_IP_);55215521- xfs_bmbt_set_blockcount(ep, temp);55225522- if (delay) {55235523- temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),55245524- da_old);55255525- xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));55265526- trace_xfs_bmap_post_update(ip, eidx, state, _THIS_IP_);55275527- da_new = temp;55285528- break;55295529- }55305530- trace_xfs_bmap_post_update(ip, eidx, state, _THIS_IP_);55315531- break;55325532-55335533- case 0:55345534- /*55355535- * Deleting the middle of the extent.55365536- */55375537- temp = del->br_startoff - got.br_startoff;55385538- trace_xfs_bmap_pre_update(ip, eidx, state, _THIS_IP_);55395539- xfs_bmbt_set_blockcount(ep, temp);55405540- new.br_startoff = del_endoff;55415541- temp2 = got_endoff - del_endoff;55425542- new.br_blockcount = temp2;55435543- new.br_state = got.br_state;55445544- if (!delay) {55455545- new.br_startblock = del_endblock;55465546- } else {55475547- temp = xfs_bmap_worst_indlen(ip, temp);55485548- xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));55495549- temp2 = xfs_bmap_worst_indlen(ip, temp2);55505550- new.br_startblock = nullstartblock((int)temp2);55515551- da_new = temp + temp2;55525552- while (da_new > da_old) {55535553- if (temp) {55545554- temp--;55555555- da_new--;55565556- xfs_bmbt_set_startblock(ep,55575557- nullstartblock((int)temp));55585558- }55595559- if (da_new == da_old)55605560- break;55615561- if (temp2) {55625562- temp2--;55635563- da_new--;55645564- new.br_startblock =55655565- nullstartblock((int)temp2);55665566- }55675567- }55685568- }55695569- trace_xfs_bmap_post_update(ip, eidx, state, _THIS_IP_);55705570- xfs_iext_insert(ip, eidx + 1, 1, &new, state);55715571- ++eidx;55725572- break;55735573- }55745574-55755575- /*55765576- * Account for change in delayed indirect blocks.55775577- * Nothing to do for disk quota accounting here.55785578- */55795579- ASSERT(da_old >= da_new);55805580- if (da_old > da_new)55815581- xfs_mod_fdblocks(mp, (int64_t)(da_old - da_new), false);55825582-55835171 return error;55845172}55855173
···191191 if (mp->m_quotainfo)192192 ndquots = mp->m_quotainfo->qi_dqperchunk;193193 else194194- ndquots = xfs_calc_dquots_per_chunk(195195- XFS_BB_TO_FSB(mp, bp->b_length));194194+ ndquots = xfs_calc_dquots_per_chunk(bp->b_length);196195197196 for (i = 0; i < ndquots; i++, d++) {198197 if (!xfs_verify_cksum((char *)d, sizeof(struct xfs_dqblk),
-1
fs/xfs/libxfs/xfs_format.h
···865865 * padding field for v3 inodes.866866 */867867#define XFS_DINODE_MAGIC 0x494e /* 'IN' */868868-#define XFS_DINODE_GOOD_VERSION(v) ((v) >= 1 && (v) <= 3)869868typedef struct xfs_dinode {870869 __be16 di_magic; /* inode magic # = XFS_DINODE_MAGIC */871870 __be16 di_mode; /* mode and type of file */
+12-1
fs/xfs/libxfs/xfs_inode_buf.c
···5757}5858#endif59596060+bool6161+xfs_dinode_good_version(6262+ struct xfs_mount *mp,6363+ __u8 version)6464+{6565+ if (xfs_sb_version_hascrc(&mp->m_sb))6666+ return version == 3;6767+6868+ return version == 1 || version == 2;6969+}7070+6071/*6172 * If we are doing readahead on an inode buffer, we might be in log recovery6273 * reading an inode allocation buffer that hasn't yet been replayed, and hence···1029110392 dip = xfs_buf_offset(bp, (i << mp->m_sb.sb_inodelog));10493 di_ok = dip->di_magic == cpu_to_be16(XFS_DINODE_MAGIC) &&105105- XFS_DINODE_GOOD_VERSION(dip->di_version);9494+ xfs_dinode_good_version(mp, dip->di_version);10695 if (unlikely(XFS_TEST_ERROR(!di_ok, mp,10796 XFS_ERRTAG_ITOBP_INOTOBP,10897 XFS_RANDOM_ITOBP_INOTOBP))) {
···249249 struct xfs_inode *ip = XFS_I(inode);250250 loff_t isize = i_size_read(inode);251251 size_t count = iov_iter_count(to);252252+ loff_t end = iocb->ki_pos + count - 1;252253 struct iov_iter data;253254 struct xfs_buftarg *target;254255 ssize_t ret = 0;···273272274273 file_accessed(iocb->ki_filp);275274276276- /*277277- * Locking is a bit tricky here. If we take an exclusive lock for direct278278- * IO, we effectively serialise all new concurrent read IO to this file279279- * and block it behind IO that is currently in progress because IO in280280- * progress holds the IO lock shared. We only need to hold the lock281281- * exclusive to blow away the page cache, so only take lock exclusively282282- * if the page cache needs invalidation. This allows the normal direct283283- * IO case of no page cache pages to proceeed concurrently without284284- * serialisation.285285- */286275 xfs_rw_ilock(ip, XFS_IOLOCK_SHARED);287276 if (mapping->nrpages) {288288- xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED);289289- xfs_rw_ilock(ip, XFS_IOLOCK_EXCL);277277+ ret = filemap_write_and_wait_range(mapping, iocb->ki_pos, end);278278+ if (ret)279279+ goto out_unlock;290280291281 /*292292- * The generic dio code only flushes the range of the particular293293- * I/O. Because we take an exclusive lock here, this whole294294- * sequence is considerably more expensive for us. This has a295295- * noticeable performance impact for any file with cached pages,296296- * even when outside of the range of the particular I/O.297297- *298298- * Hence, amortize the cost of the lock against a full file299299- * flush and reduce the chances of repeated iolock cycles going300300- * forward.282282+ * Invalidate whole pages. This can return an error if we fail283283+ * to invalidate a page, but this should never happen on XFS.284284+ * Warn if it does fail.301285 */302302- if (mapping->nrpages) {303303- ret = filemap_write_and_wait(mapping);304304- if (ret) {305305- xfs_rw_iunlock(ip, XFS_IOLOCK_EXCL);306306- return ret;307307- }308308-309309- /*310310- * Invalidate whole pages. This can return an error if311311- * we fail to invalidate a page, but this should never312312- * happen on XFS. Warn if it does fail.313313- */314314- ret = invalidate_inode_pages2(mapping);315315- WARN_ON_ONCE(ret);316316- ret = 0;317317- }318318- xfs_rw_ilock_demote(ip, XFS_IOLOCK_EXCL);286286+ ret = invalidate_inode_pages2_range(mapping,287287+ iocb->ki_pos >> PAGE_SHIFT, end >> PAGE_SHIFT);288288+ WARN_ON_ONCE(ret);289289+ ret = 0;319290 }320291321292 data = *to;···297324 iocb->ki_pos += ret;298325 iov_iter_advance(to, ret);299326 }300300- xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED);301327328328+out_unlock:329329+ xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED);302330 return ret;303331}304332···544570 if ((iocb->ki_pos | count) & target->bt_logical_sectormask)545571 return -EINVAL;546572547547- /* "unaligned" here means not aligned to a filesystem block */573573+ /*574574+ * Don't take the exclusive iolock here unless the I/O is unaligned to575575+ * the file system block size. We don't need to consider the EOF576576+ * extension case here because xfs_file_aio_write_checks() will relock577577+ * the inode as necessary for EOF zeroing cases and fill out the new578578+ * inode size as appropriate.579579+ */548580 if ((iocb->ki_pos & mp->m_blockmask) ||549549- ((iocb->ki_pos + count) & mp->m_blockmask))581581+ ((iocb->ki_pos + count) & mp->m_blockmask)) {550582 unaligned_io = 1;551551-552552- /*553553- * We don't need to take an exclusive lock unless there page cache needs554554- * to be invalidated or unaligned IO is being executed. We don't need to555555- * consider the EOF extension case here because556556- * xfs_file_aio_write_checks() will relock the inode as necessary for557557- * EOF zeroing cases and fill out the new inode size as appropriate.558558- */559559- if (unaligned_io || mapping->nrpages)560583 iolock = XFS_IOLOCK_EXCL;561561- else584584+ } else {562585 iolock = XFS_IOLOCK_SHARED;563563- xfs_rw_ilock(ip, iolock);564564-565565- /*566566- * Recheck if there are cached pages that need invalidate after we got567567- * the iolock to protect against other threads adding new pages while568568- * we were waiting for the iolock.569569- */570570- if (mapping->nrpages && iolock == XFS_IOLOCK_SHARED) {571571- xfs_rw_iunlock(ip, iolock);572572- iolock = XFS_IOLOCK_EXCL;573573- xfs_rw_ilock(ip, iolock);574586 }587587+588588+ xfs_rw_ilock(ip, iolock);575589576590 ret = xfs_file_aio_write_checks(iocb, from, &iolock);577591 if (ret)···567605 count = iov_iter_count(from);568606 end = iocb->ki_pos + count - 1;569607570570- /*571571- * See xfs_file_dio_aio_read() for why we do a full-file flush here.572572- */573608 if (mapping->nrpages) {574574- ret = filemap_write_and_wait(VFS_I(ip)->i_mapping);609609+ ret = filemap_write_and_wait_range(mapping, iocb->ki_pos, end);575610 if (ret)576611 goto out;612612+577613 /*578614 * Invalidate whole pages. This can return an error if we fail579615 * to invalidate a page, but this should never happen on XFS.580616 * Warn if it does fail.581617 */582582- ret = invalidate_inode_pages2(VFS_I(ip)->i_mapping);618618+ ret = invalidate_inode_pages2_range(mapping,619619+ iocb->ki_pos >> PAGE_SHIFT, end >> PAGE_SHIFT);583620 WARN_ON_ONCE(ret);584621 ret = 0;585622 }586623587624 /*588625 * If we are doing unaligned IO, wait for all other IO to drain,589589- * otherwise demote the lock if we had to flush cached pages626626+ * otherwise demote the lock if we had to take the exclusive lock627627+ * for other reasons in xfs_file_aio_write_checks.590628 */591629 if (unaligned_io)592630 inode_dio_wait(inode);···909947 return error;910948}911949912912-/*913913- * Flush all file writes out to disk.914914- */915915-static int916916-xfs_file_wait_for_io(917917- struct inode *inode,918918- loff_t offset,919919- size_t len)920920-{921921- loff_t rounding;922922- loff_t ioffset;923923- loff_t iendoffset;924924- loff_t bs;925925- int ret;926926-927927- bs = inode->i_sb->s_blocksize;928928- inode_dio_wait(inode);929929-930930- rounding = max_t(xfs_off_t, bs, PAGE_SIZE);931931- ioffset = round_down(offset, rounding);932932- iendoffset = round_up(offset + len, rounding) - 1;933933- ret = filemap_write_and_wait_range(inode->i_mapping, ioffset,934934- iendoffset);935935- return ret;936936-}937937-938938-/* Hook up to the VFS reflink function */939939-STATIC int940940-xfs_file_share_range(941941- struct file *file_in,942942- loff_t pos_in,943943- struct file *file_out,944944- loff_t pos_out,945945- u64 len,946946- bool is_dedupe)947947-{948948- struct inode *inode_in;949949- struct inode *inode_out;950950- ssize_t ret;951951- loff_t bs;952952- loff_t isize;953953- int same_inode;954954- loff_t blen;955955- unsigned int flags = 0;956956-957957- inode_in = file_inode(file_in);958958- inode_out = file_inode(file_out);959959- bs = inode_out->i_sb->s_blocksize;960960-961961- /* Don't touch certain kinds of inodes */962962- if (IS_IMMUTABLE(inode_out))963963- return -EPERM;964964- if (IS_SWAPFILE(inode_in) ||965965- IS_SWAPFILE(inode_out))966966- return -ETXTBSY;967967-968968- /* Reflink only works within this filesystem. */969969- if (inode_in->i_sb != inode_out->i_sb)970970- return -EXDEV;971971- same_inode = (inode_in->i_ino == inode_out->i_ino);972972-973973- /* Don't reflink dirs, pipes, sockets... */974974- if (S_ISDIR(inode_in->i_mode) || S_ISDIR(inode_out->i_mode))975975- return -EISDIR;976976- if (S_ISFIFO(inode_in->i_mode) || S_ISFIFO(inode_out->i_mode))977977- return -EINVAL;978978- if (!S_ISREG(inode_in->i_mode) || !S_ISREG(inode_out->i_mode))979979- return -EINVAL;980980-981981- /* Don't share DAX file data for now. */982982- if (IS_DAX(inode_in) || IS_DAX(inode_out))983983- return -EINVAL;984984-985985- /* Are we going all the way to the end? */986986- isize = i_size_read(inode_in);987987- if (isize == 0)988988- return 0;989989- if (len == 0)990990- len = isize - pos_in;991991-992992- /* Ensure offsets don't wrap and the input is inside i_size */993993- if (pos_in + len < pos_in || pos_out + len < pos_out ||994994- pos_in + len > isize)995995- return -EINVAL;996996-997997- /* Don't allow dedupe past EOF in the dest file */998998- if (is_dedupe) {999999- loff_t disize;10001000-10011001- disize = i_size_read(inode_out);10021002- if (pos_out >= disize || pos_out + len > disize)10031003- return -EINVAL;10041004- }10051005-10061006- /* If we're linking to EOF, continue to the block boundary. */10071007- if (pos_in + len == isize)10081008- blen = ALIGN(isize, bs) - pos_in;10091009- else10101010- blen = len;10111011-10121012- /* Only reflink if we're aligned to block boundaries */10131013- if (!IS_ALIGNED(pos_in, bs) || !IS_ALIGNED(pos_in + blen, bs) ||10141014- !IS_ALIGNED(pos_out, bs) || !IS_ALIGNED(pos_out + blen, bs))10151015- return -EINVAL;10161016-10171017- /* Don't allow overlapped reflink within the same file */10181018- if (same_inode && pos_out + blen > pos_in && pos_out < pos_in + blen)10191019- return -EINVAL;10201020-10211021- /* Wait for the completion of any pending IOs on srcfile */10221022- ret = xfs_file_wait_for_io(inode_in, pos_in, len);10231023- if (ret)10241024- goto out;10251025- ret = xfs_file_wait_for_io(inode_out, pos_out, len);10261026- if (ret)10271027- goto out;10281028-10291029- if (is_dedupe)10301030- flags |= XFS_REFLINK_DEDUPE;10311031- ret = xfs_reflink_remap_range(XFS_I(inode_in), pos_in, XFS_I(inode_out),10321032- pos_out, len, flags);10331033- if (ret < 0)10341034- goto out;10351035-10361036-out:10371037- return ret;10381038-}10391039-1040950STATIC ssize_t1041951xfs_file_copy_range(1042952 struct file *file_in,···9201086{9211087 int error;9221088923923- error = xfs_file_share_range(file_in, pos_in, file_out, pos_out,10891089+ error = xfs_reflink_remap_range(file_in, pos_in, file_out, pos_out,9241090 len, false);9251091 if (error)9261092 return error;···9351101 loff_t pos_out,9361102 u64 len)9371103{938938- return xfs_file_share_range(file_in, pos_in, file_out, pos_out,11041104+ return xfs_reflink_remap_range(file_in, pos_in, file_out, pos_out,9391105 len, false);9401106}9411107···9581124 if (len > XFS_MAX_DEDUPE_LEN)9591125 len = XFS_MAX_DEDUPE_LEN;9601126961961- error = xfs_file_share_range(src_file, loff, dst_file, dst_loff,11271127+ error = xfs_reflink_remap_range(src_file, loff, dst_file, dst_loff,9621128 len, true);9631129 if (error)9641130 return error;
···326326int acpi_pci_irq_enable (struct pci_dev *dev);327327void acpi_penalize_isa_irq(int irq, int active);328328bool acpi_isa_irq_available(int irq);329329+void acpi_penalize_sci_irq(int irq, int trigger, int polarity);329330void acpi_pci_irq_disable (struct pci_dev *dev);330331331332extern int ec_read(u8 addr, u8 *val);
+1-1
include/linux/clk-provider.h
···785785 * routines, one at of_clk_init(), and one at platform device probe786786 */787787#define CLK_OF_DECLARE_DRIVER(name, compat, fn) \788788- static void name##_of_clk_init_driver(struct device_node *np) \788788+ static void __init name##_of_clk_init_driver(struct device_node *np) \789789 { \790790 of_node_clear_flag(np, OF_POPULATED); \791791 fn(np); \
+22
include/linux/io.h
···141141void *memremap(resource_size_t offset, size_t size, unsigned long flags);142142void memunmap(void *addr);143143144144+/*145145+ * On x86 PAT systems we have memory tracking that keeps track of146146+ * the allowed mappings on memory ranges. This tracking works for147147+ * all the in-kernel mapping APIs (ioremap*), but where the user148148+ * wishes to map a range from a physical device into user memory149149+ * the tracking won't be updated. This API is to be used by150150+ * drivers which remap physical device pages into userspace,151151+ * and wants to make sure they are mapped WC and not UC.152152+ */153153+#ifndef arch_io_reserve_memtype_wc154154+static inline int arch_io_reserve_memtype_wc(resource_size_t base,155155+ resource_size_t size)156156+{157157+ return 0;158158+}159159+160160+static inline void arch_io_free_memtype_wc(resource_size_t base,161161+ resource_size_t size)162162+{163163+}164164+#endif165165+144166#endif /* _LINUX_IO_H */
+11-6
include/linux/iomap.h
···1919#define IOMAP_UNWRITTEN 0x04 /* blocks allocated @blkno in unwritten state */20202121/*2222- * Flags for iomap mappings:2222+ * Flags for all iomap mappings:2323 */2424-#define IOMAP_F_MERGED 0x01 /* contains multiple blocks/extents */2525-#define IOMAP_F_SHARED 0x02 /* block shared with another file */2626-#define IOMAP_F_NEW 0x04 /* blocks have been newly allocated */2424+#define IOMAP_F_NEW 0x01 /* blocks have been newly allocated */2525+2626+/*2727+ * Flags that only need to be reported for IOMAP_REPORT requests:2828+ */2929+#define IOMAP_F_MERGED 0x10 /* contains multiple blocks/extents */3030+#define IOMAP_F_SHARED 0x20 /* block shared with another file */27312832/*2933 * Magic value for blkno:···4642/*4743 * Flags for iomap_begin / iomap_end. No flag implies a read.4844 */4949-#define IOMAP_WRITE (1 << 0)5050-#define IOMAP_ZERO (1 << 1)4545+#define IOMAP_WRITE (1 << 0) /* writing, must allocate blocks */4646+#define IOMAP_ZERO (1 << 1) /* zeroing operation, may skip holes */4747+#define IOMAP_REPORT (1 << 2) /* report extent status, e.g. FIEMAP */51485249struct iomap_ops {5350 /*
+2-3
include/linux/kconfig.h
···3131 * When CONFIG_BOOGER is not defined, we generate a (... 1, 0) pair, and when3232 * the last step cherry picks the 2nd arg, we get a zero.3333 */3434-#define config_enabled(cfg) ___is_defined(cfg)3534#define __is_defined(x) ___is_defined(x)3635#define ___is_defined(val) ____is_defined(__ARG_PLACEHOLDER_##val)3736#define ____is_defined(arg1_or_junk) __take_second_arg(arg1_or_junk 1, 0)···4041 * otherwise. For boolean options, this is equivalent to4142 * IS_ENABLED(CONFIG_FOO).4243 */4343-#define IS_BUILTIN(option) config_enabled(option)4444+#define IS_BUILTIN(option) __is_defined(option)44454546/*4647 * IS_MODULE(CONFIG_FOO) evaluates to 1 if CONFIG_FOO is set to 'm', 04748 * otherwise.4849 */4949-#define IS_MODULE(option) config_enabled(option##_MODULE)5050+#define IS_MODULE(option) __is_defined(option##_MODULE)50515152/*5253 * IS_REACHABLE(CONFIG_FOO) evaluates to 1 if the currently compiled
-4
include/linux/mm.h
···12711271extern int access_remote_vm(struct mm_struct *mm, unsigned long addr,12721272 void *buf, int len, unsigned int gup_flags);1273127312741274-long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,12751275- unsigned long start, unsigned long nr_pages,12761276- unsigned int foll_flags, struct page **pages,12771277- struct vm_area_struct **vmas, int *nonblocking);12781274long get_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm,12791275 unsigned long start, unsigned long nr_pages,12801276 unsigned int gup_flags, struct page **pages,
+2-28
include/linux/mmzone.h
···440440 seqlock_t span_seqlock;441441#endif442442443443- /*444444- * wait_table -- the array holding the hash table445445- * wait_table_hash_nr_entries -- the size of the hash table array446446- * wait_table_bits -- wait_table_size == (1 << wait_table_bits)447447- *448448- * The purpose of all these is to keep track of the people449449- * waiting for a page to become available and make them450450- * runnable again when possible. The trouble is that this451451- * consumes a lot of space, especially when so few things452452- * wait on pages at a given time. So instead of using453453- * per-page waitqueues, we use a waitqueue hash table.454454- *455455- * The bucket discipline is to sleep on the same queue when456456- * colliding and wake all in that wait queue when removing.457457- * When something wakes, it must check to be sure its page is458458- * truly available, a la thundering herd. The cost of a459459- * collision is great, but given the expected load of the460460- * table, they should be so rare as to be outweighed by the461461- * benefits from the saved space.462462- *463463- * __wait_on_page_locked() and unlock_page() in mm/filemap.c, are the464464- * primary users of these fields, and in mm/page_alloc.c465465- * free_area_init_core() performs the initialization of them.466466- */467467- wait_queue_head_t *wait_table;468468- unsigned long wait_table_hash_nr_entries;469469- unsigned long wait_table_bits;443443+ int initialized;470444471445 /* Write-intensive fields used from the page allocator */472446 ZONE_PADDING(_pad1_)···520546521547static inline bool zone_is_initialized(struct zone *zone)522548{523523- return !!zone->wait_table;549549+ return zone->initialized;524550}525551526552static inline bool zone_is_empty(struct zone *zone)
···5353 /*5454 * We are interested in code coverage as a function of a syscall inputs,5555 * so we ignore code executed in interrupts.5656+ * The checks for whether we are in an interrupt are open-coded, because5757+ * 1. We can't use in_interrupt() here, since it also returns true5858+ * when we are inside local_bh_disable() section.5959+ * 2. We don't want to use (in_irq() | in_serving_softirq() | in_nmi()),6060+ * since that leads to slower generated code (three separate tests,6161+ * one for each of the flags).5662 */5757- if (!t || in_interrupt())6363+ if (!t || (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_OFFSET6464+ | NMI_MASK)))5865 return;5966 mode = READ_ONCE(t->kcov_mode);6067 if (mode == KCOV_MODE_TRACE) {
···480480}481481EXPORT_SYMBOL(wake_up_bit);482482483483-wait_queue_head_t *bit_waitqueue(void *word, int bit)484484-{485485- const int shift = BITS_PER_LONG == 32 ? 5 : 6;486486- const struct zone *zone = page_zone(virt_to_page(word));487487- unsigned long val = (unsigned long)word << shift | bit;488488-489489- return &zone->wait_table[hash_long(val, zone->wait_table_bits)];490490-}491491-EXPORT_SYMBOL(bit_waitqueue);492492-493483/*494484 * Manipulate the atomic_t address to produce a better bit waitqueue table hash495485 * index (we're keying off bit -1, but that would produce a horrible hash
···878878879879#ifdef CONFIG_NO_HZ_COMMON880880static inline struct timer_base *881881-__get_target_base(struct timer_base *base, unsigned tflags)881881+get_target_base(struct timer_base *base, unsigned tflags)882882{883883#ifdef CONFIG_SMP884884 if ((tflags & TIMER_PINNED) || !base->migration_enabled)···891891892892static inline void forward_timer_base(struct timer_base *base)893893{894894+ unsigned long jnow = READ_ONCE(jiffies);895895+894896 /*895897 * We only forward the base when it's idle and we have a delta between896898 * base clock and jiffies.897899 */898898- if (!base->is_idle || (long) (jiffies - base->clk) < 2)900900+ if (!base->is_idle || (long) (jnow - base->clk) < 2)899901 return;900902901903 /*902904 * If the next expiry value is > jiffies, then we fast forward to903905 * jiffies otherwise we forward to the next expiry value.904906 */905905- if (time_after(base->next_expiry, jiffies))906906- base->clk = jiffies;907907+ if (time_after(base->next_expiry, jnow))908908+ base->clk = jnow;907909 else908910 base->clk = base->next_expiry;909911}910912#else911913static inline struct timer_base *912912-__get_target_base(struct timer_base *base, unsigned tflags)914914+get_target_base(struct timer_base *base, unsigned tflags)913915{914916 return get_timer_this_cpu_base(tflags);915917}···919917static inline void forward_timer_base(struct timer_base *base) { }920918#endif921919922922-static inline struct timer_base *923923-get_target_base(struct timer_base *base, unsigned tflags)924924-{925925- struct timer_base *target = __get_target_base(base, tflags);926926-927927- forward_timer_base(target);928928- return target;929929-}930920931921/*932922 * We are using hashed locking: Holding per_cpu(timer_bases[x]).lock means···937943{938944 for (;;) {939945 struct timer_base *base;940940- u32 tf = timer->flags;946946+ u32 tf;947947+948948+ /*949949+ * We need to use READ_ONCE() here, otherwise the compiler950950+ * might re-read @tf between the check for TIMER_MIGRATING951951+ * and spin_lock().952952+ */953953+ tf = READ_ONCE(timer->flags);941954942955 if (!(tf & TIMER_MIGRATING)) {943956 base = get_timer_base(tf);···965964 unsigned long clk = 0, flags;966965 int ret = 0;967966967967+ BUG_ON(!timer->function);968968+968969 /*969970 * This is a common optimization triggered by the networking code - if970971 * the timer is re-modified to have the same timeout or ends up in the···975972 if (timer_pending(timer)) {976973 if (timer->expires == expires)977974 return 1;978978- /*979979- * Take the current timer_jiffies of base, but without holding980980- * the lock!981981- */982982- base = get_timer_base(timer->flags);983983- clk = base->clk;984975976976+ /*977977+ * We lock timer base and calculate the bucket index right978978+ * here. If the timer ends up in the same bucket, then we979979+ * just update the expiry time and avoid the whole980980+ * dequeue/enqueue dance.981981+ */982982+ base = lock_timer_base(timer, &flags);983983+984984+ clk = base->clk;985985 idx = calc_wheel_index(expires, clk);986986987987 /*···994988 */995989 if (idx == timer_get_idx(timer)) {996990 timer->expires = expires;997997- return 1;991991+ ret = 1;992992+ goto out_unlock;998993 }994994+ } else {995995+ base = lock_timer_base(timer, &flags);999996 }10009971001998 timer_stats_timer_set_start_info(timer);10021002- BUG_ON(!timer->function);10031003-10041004- base = lock_timer_base(timer, &flags);100599910061000 ret = detach_if_pending(timer, base, false);10071001 if (!ret && pending_only)···10311025 }10321026 }1033102710281028+ /* Try to forward a stale timer base clock */10291029+ forward_timer_base(base);10301030+10341031 timer->expires = expires;10351032 /*10361033 * If 'idx' was calculated above and the base time did not advance10371037- * between calculating 'idx' and taking the lock, only enqueue_timer()10381038- * and trigger_dyntick_cpu() is required. Otherwise we need to10391039- * (re)calculate the wheel index via internal_add_timer().10341034+ * between calculating 'idx' and possibly switching the base, only10351035+ * enqueue_timer() and trigger_dyntick_cpu() is required. Otherwise10361036+ * we need to (re)calculate the wheel index via10371037+ * internal_add_timer().10401038 */10411039 if (idx != UINT_MAX && clk == base->clk) {10421040 enqueue_timer(base, timer, idx);···15201510 is_max_delta = (nextevt == base->clk + NEXT_TIMER_MAX_DELTA);15211511 base->next_expiry = nextevt;15221512 /*15231523- * We have a fresh next event. Check whether we can forward the base:15131513+ * We have a fresh next event. Check whether we can forward the15141514+ * base. We can only do that when @basej is past base->clk15151515+ * otherwise we might rewind base->clk.15241516 */15251525- if (time_after(nextevt, jiffies))15261526- base->clk = jiffies;15271527- else if (time_after(nextevt, base->clk))15281528- base->clk = nextevt;15171517+ if (time_after(basej, base->clk)) {15181518+ if (time_after(nextevt, basej))15191519+ base->clk = basej;15201520+ else if (time_after(nextevt, base->clk))15211521+ base->clk = nextevt;15221522+ }1529152315301524 if (time_before_eq(nextevt, basej)) {15311525 expires = basem;
+1
lib/Kconfig.debug
···198198 int "Warn for stack frames larger than (needs gcc 4.4)"199199 range 0 8192200200 default 0 if KASAN201201+ default 2048 if GCC_PLUGIN_LATENT_ENTROPY201202 default 1024 if !64BIT202203 default 2048 if 64BIT203204 help
+2-1
lib/genalloc.c
···292292 struct gen_pool_chunk *chunk;293293 unsigned long addr = 0;294294 int order = pool->min_alloc_order;295295- int nbits, start_bit = 0, end_bit, remain;295295+ int nbits, start_bit, end_bit, remain;296296297297#ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG298298 BUG_ON(in_nmi());···307307 if (size > atomic_read(&chunk->avail))308308 continue;309309310310+ start_bit = 0;310311 end_bit = chunk_size(chunk) >> order;311312retry:312313 start_bit = algo(chunk->bits, end_bit, start_bit,
···187187 bool "Allow for memory hot-add"188188 depends on SPARSEMEM || X86_64_ACPI_NUMA189189 depends on ARCH_ENABLE_MEMORY_HOTPLUG190190- depends on !KASAN190190+ depends on COMPILE_TEST || !KASAN191191192192config MEMORY_HOTPLUG_SPARSE193193 def_bool y
···526526 * instead of __get_user_pages. __get_user_pages should be used only if527527 * you need some special @gup_flags.528528 */529529-long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,529529+static long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,530530 unsigned long start, unsigned long nr_pages,531531 unsigned int gup_flags, struct page **pages,532532 struct vm_area_struct **vmas, int *nonblocking)···631631 } while (nr_pages);632632 return i;633633}634634-EXPORT_SYMBOL(__get_user_pages);635634636635bool vma_permits_fault(struct vm_area_struct *vma, unsigned int fault_flags)637636{
···554554 err = memcg_init_list_lru(lru, memcg_aware);555555 if (err) {556556 kfree(lru->node);557557+ /* Do this so a list_lru_destroy() doesn't crash: */558558+ lru->node = NULL;557559 goto out;558560 }559561
+9
mm/memcontrol.c
···19171917 current->flags & PF_EXITING))19181918 goto force;1919191919201920+ /*19211921+ * Prevent unbounded recursion when reclaim operations need to19221922+ * allocate memory. This might exceed the limits temporarily,19231923+ * but we prefer facilitating memory reclaim and getting back19241924+ * under the limit over triggering OOM kills in these cases.19251925+ */19261926+ if (unlikely(current->flags & PF_MEMALLOC))19271927+ goto force;19281928+19201929 if (unlikely(task_in_memcg_oom(current)))19211930 goto nomem;19221931
-29
mm/memory_hotplug.c
···268268 unsigned long i, pfn, end_pfn, nr_pages;269269 int node = pgdat->node_id;270270 struct page *page;271271- struct zone *zone;272271273272 nr_pages = PAGE_ALIGN(sizeof(struct pglist_data)) >> PAGE_SHIFT;274273 page = virt_to_page(pgdat);275274276275 for (i = 0; i < nr_pages; i++, page++)277276 get_page_bootmem(node, page, NODE_INFO);278278-279279- zone = &pgdat->node_zones[0];280280- for (; zone < pgdat->node_zones + MAX_NR_ZONES - 1; zone++) {281281- if (zone_is_initialized(zone)) {282282- nr_pages = zone->wait_table_hash_nr_entries283283- * sizeof(wait_queue_head_t);284284- nr_pages = PAGE_ALIGN(nr_pages) >> PAGE_SHIFT;285285- page = virt_to_page(zone->wait_table);286286-287287- for (i = 0; i < nr_pages; i++, page++)288288- get_page_bootmem(node, page, NODE_INFO);289289- }290290- }291277292278 pfn = pgdat->node_start_pfn;293279 end_pfn = pgdat_end_pfn(pgdat);···21172131 unsigned long start_pfn = pgdat->node_start_pfn;21182132 unsigned long end_pfn = start_pfn + pgdat->node_spanned_pages;21192133 unsigned long pfn;21202120- int i;2121213421222135 for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {21232136 unsigned long section_nr = pfn_to_section_nr(pfn);···21432158 */21442159 node_set_offline(nid);21452160 unregister_one_node(nid);21462146-21472147- /* free waittable in each zone */21482148- for (i = 0; i < MAX_NR_ZONES; i++) {21492149- struct zone *zone = pgdat->node_zones + i;21502150-21512151- /*21522152- * wait_table may be allocated from boot memory,21532153- * here only free if it's allocated by vmalloc.21542154- */21552155- if (is_vmalloc_addr(zone->wait_table)) {21562156- vfree(zone->wait_table);21572157- zone->wait_table = NULL;21582158- }21592159- }21602161}21612162EXPORT_SYMBOL(try_offline_node);21622163
+1-1
mm/nommu.c
···109109 return PAGE_SIZE << compound_order(page);110110}111111112112-long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,112112+static long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,113113 unsigned long start, unsigned long nr_pages,114114 unsigned int foll_flags, struct page **pages,115115 struct vm_area_struct **vmas, int *nonblocking)
+11-120
mm/page_alloc.c
···42244224 }4225422542264226 *p = '\0';42274227- printk("(%s) ", tmp);42274227+ printk(KERN_CONT "(%s) ", tmp);42284228}4229422942304230/*···43354335 free_pcp += per_cpu_ptr(zone->pageset, cpu)->pcp.count;4336433643374337 show_node(zone);43384338- printk("%s"43384338+ printk(KERN_CONT43394339+ "%s"43394340 " free:%lukB"43404341 " min:%lukB"43414342 " low:%lukB"···43834382 K(zone_page_state(zone, NR_FREE_CMA_PAGES)));43844383 printk("lowmem_reserve[]:");43854384 for (i = 0; i < MAX_NR_ZONES; i++)43864386- printk(" %ld", zone->lowmem_reserve[i]);43874387- printk("\n");43854385+ printk(KERN_CONT " %ld", zone->lowmem_reserve[i]);43864386+ printk(KERN_CONT "\n");43884387 }4389438843904389 for_each_populated_zone(zone) {···43954394 if (skip_free_areas_node(filter, zone_to_nid(zone)))43964395 continue;43974396 show_node(zone);43984398- printk("%s: ", zone->name);43974397+ printk(KERN_CONT "%s: ", zone->name);4399439844004399 spin_lock_irqsave(&zone->lock, flags);44014400 for (order = 0; order < MAX_ORDER; order++) {···44134412 }44144413 spin_unlock_irqrestore(&zone->lock, flags);44154414 for (order = 0; order < MAX_ORDER; order++) {44164416- printk("%lu*%lukB ", nr[order], K(1UL) << order);44154415+ printk(KERN_CONT "%lu*%lukB ",44164416+ nr[order], K(1UL) << order);44174417 if (nr[order])44184418 show_migration_types(types[order]);44194419 }44204420- printk("= %lukB\n", K(total));44204420+ printk(KERN_CONT "= %lukB\n", K(total));44214421 }4422442244234423 hugetlb_show_meminfo();···49794977}4980497849814979/*49824982- * Helper functions to size the waitqueue hash table.49834983- * Essentially these want to choose hash table sizes sufficiently49844984- * large so that collisions trying to wait on pages are rare.49854985- * But in fact, the number of active page waitqueues on typical49864986- * systems is ridiculously low, less than 200. So this is even49874987- * conservative, even though it seems large.49884988- *49894989- * The constant PAGES_PER_WAITQUEUE specifies the ratio of pages to49904990- * waitqueues, i.e. the size of the waitq table given the number of pages.49914991- */49924992-#define PAGES_PER_WAITQUEUE 25649934993-49944994-#ifndef CONFIG_MEMORY_HOTPLUG49954995-static inline unsigned long wait_table_hash_nr_entries(unsigned long pages)49964996-{49974997- unsigned long size = 1;49984998-49994999- pages /= PAGES_PER_WAITQUEUE;50005000-50015001- while (size < pages)50025002- size <<= 1;50035003-50045004- /*50055005- * Once we have dozens or even hundreds of threads sleeping50065006- * on IO we've got bigger problems than wait queue collision.50075007- * Limit the size of the wait table to a reasonable size.50085008- */50095009- size = min(size, 4096UL);50105010-50115011- return max(size, 4UL);50125012-}50135013-#else50145014-/*50155015- * A zone's size might be changed by hot-add, so it is not possible to determine50165016- * a suitable size for its wait_table. So we use the maximum size now.50175017- *50185018- * The max wait table size = 4096 x sizeof(wait_queue_head_t). ie:50195019- *50205020- * i386 (preemption config) : 4096 x 16 = 64Kbyte.50215021- * ia64, x86-64 (no preemption): 4096 x 20 = 80Kbyte.50225022- * ia64, x86-64 (preemption) : 4096 x 24 = 96Kbyte.50235023- *50245024- * The maximum entries are prepared when a zone's memory is (512K + 256) pages50255025- * or more by the traditional way. (See above). It equals:50265026- *50275027- * i386, x86-64, powerpc(4K page size) : = ( 2G + 1M)byte.50285028- * ia64(16K page size) : = ( 8G + 4M)byte.50295029- * powerpc (64K page size) : = (32G +16M)byte.50305030- */50315031-static inline unsigned long wait_table_hash_nr_entries(unsigned long pages)50325032-{50335033- return 4096UL;50345034-}50355035-#endif50365036-50375037-/*50385038- * This is an integer logarithm so that shifts can be used later50395039- * to extract the more random high bits from the multiplicative50405040- * hash function before the remainder is taken.50415041- */50425042-static inline unsigned long wait_table_bits(unsigned long size)50435043-{50445044- return ffz(~size);50455045-}50465046-50475047-/*50484980 * Initially all pages are reserved - free ones are freed50494981 * up by free_all_bootmem() once the early boot process is50504982 * done. Non-atomic initialization, single-pass.···52405304 alloc_percpu(struct per_cpu_nodestat);52415305}5242530652435243-static noinline __ref52445244-int zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages)52455245-{52465246- int i;52475247- size_t alloc_size;52485248-52495249- /*52505250- * The per-page waitqueue mechanism uses hashed waitqueues52515251- * per zone.52525252- */52535253- zone->wait_table_hash_nr_entries =52545254- wait_table_hash_nr_entries(zone_size_pages);52555255- zone->wait_table_bits =52565256- wait_table_bits(zone->wait_table_hash_nr_entries);52575257- alloc_size = zone->wait_table_hash_nr_entries52585258- * sizeof(wait_queue_head_t);52595259-52605260- if (!slab_is_available()) {52615261- zone->wait_table = (wait_queue_head_t *)52625262- memblock_virt_alloc_node_nopanic(52635263- alloc_size, zone->zone_pgdat->node_id);52645264- } else {52655265- /*52665266- * This case means that a zone whose size was 0 gets new memory52675267- * via memory hot-add.52685268- * But it may be the case that a new node was hot-added. In52695269- * this case vmalloc() will not be able to use this new node's52705270- * memory - this wait_table must be initialized to use this new52715271- * node itself as well.52725272- * To use this new node's memory, further consideration will be52735273- * necessary.52745274- */52755275- zone->wait_table = vmalloc(alloc_size);52765276- }52775277- if (!zone->wait_table)52785278- return -ENOMEM;52795279-52805280- for (i = 0; i < zone->wait_table_hash_nr_entries; ++i)52815281- init_waitqueue_head(zone->wait_table + i);52825282-52835283- return 0;52845284-}52855285-52865307static __meminit void zone_pcp_init(struct zone *zone)52875308{52885309 /*···52605367 unsigned long size)52615368{52625369 struct pglist_data *pgdat = zone->zone_pgdat;52635263- int ret;52645264- ret = zone_wait_table_init(zone, size);52655265- if (ret)52665266- return ret;53705370+52675371 pgdat->nr_zones = zone_idx(zone) + 1;5268537252695373 zone->zone_start_pfn = zone_start_pfn;···52725382 zone_start_pfn, (zone_start_pfn + size));5273538352745384 zone_init_free_lists(zone);53855385+ zone->initialized = 1;5275538652765387 return 0;52775388}