···132132Core Complex Die (CCD) temperatures. Up to 8 such temperatures are reported133133as temp{3..10}_input, labeled Tccd{1..8}. Actual support depends on the CPU134134variant.135135-136136-Various Family 17h and 18h CPUs report voltage and current telemetry137137-information. The following attributes may be reported.138138-139139-Attribute Label Description140140-=============== ======= ================141141-in0_input Vcore Core voltage142142-in1_input Vsoc SoC voltage143143-curr1_input Icore Core current144144-curr2_input Isoc SoC current145145-=============== ======= ================146146-147147-Current values are raw (unscaled) as reported by the CPU. Core current is148148-reported as multiples of 1A / LSB. SoC is reported as multiples of 0.25A149149-/ LSB. The real current is board specific. Reported currents should be seen150150-as rough guidance, and should be scaled using sensors3.conf as appropriate151151-for a given board.
···5454# runtime. Because the hypervisor is part of the kernel binary, relocations5555# produce a kernel VA. We enumerate relocations targeting hyp at build time5656# and convert the kernel VAs at those positions to hyp VAs.5757-$(obj)/hyp-reloc.S: $(obj)/kvm_nvhe.tmp.o $(obj)/gen-hyprel5757+$(obj)/hyp-reloc.S: $(obj)/kvm_nvhe.tmp.o $(obj)/gen-hyprel FORCE5858 $(call if_changed,hyprel)59596060# 5) Compile hyp-reloc.S and link it into the existing partially linked object.
···3636#include <linux/linkage.h>3737#include <asm/errno.h>3838#include <asm/setup.h>3939-#include <asm/segment.h>4039#include <asm/traps.h>4140#include <asm/unistd.h>4241#include <asm/asm-offsets.h>···77787879ENTRY(sys_sigreturn)7980 SAVE_SWITCH_STACK8080- movel %sp,%sp@- | switch_stack pointer8181- pea %sp@(SWITCH_STACK_SIZE+4) | pt_regs pointer8181+ movel %sp,%a1 | switch_stack pointer8282+ lea %sp@(SWITCH_STACK_SIZE),%a0 | pt_regs pointer8383+ lea %sp@(-84),%sp | leave a gap8484+ movel %a1,%sp@-8585+ movel %a0,%sp@-8286 jbsr do_sigreturn8383- addql #8,%sp8484- RESTORE_SWITCH_STACK8585- rts8787+ jra 1f | shared with rt_sigreturn()86888789ENTRY(sys_rt_sigreturn)8890 SAVE_SWITCH_STACK8989- movel %sp,%sp@- | switch_stack pointer9090- pea %sp@(SWITCH_STACK_SIZE+4) | pt_regs pointer9191+ movel %sp,%a1 | switch_stack pointer9292+ lea %sp@(SWITCH_STACK_SIZE),%a0 | pt_regs pointer9393+ lea %sp@(-84),%sp | leave a gap9494+ movel %a1,%sp@-9595+ movel %a0,%sp@-9696+ | stack contents:9797+ | [original pt_regs address] [original switch_stack address]9898+ | [gap] [switch_stack] [pt_regs] [exception frame]9199 jbsr do_rt_sigreturn9292- addql #8,%sp100100+101101+1:102102+ | stack contents now:103103+ | [original pt_regs address] [original switch_stack address]104104+ | [unused part of the gap] [moved switch_stack] [moved pt_regs]105105+ | [replacement exception frame]106106+ | return value of do_{rt_,}sigreturn() points to moved switch_stack.107107+108108+ movel %d0,%sp | discard the leftover junk93109 RESTORE_SWITCH_STACK110110+ | stack contents now is just [syscall return address] [pt_regs] [frame]111111+ | return pt_regs.d0112112+ movel %sp@(PT_OFF_D0+4),%d094113 rts9511496115ENTRY(buserr)···197180 jbsr syscall_trace198181 RESTORE_SWITCH_STACK199182 addql #4,%sp200200- jra .Lret_from_exception201201-202202-ENTRY(ret_from_signal)203203- movel %curptr@(TASK_STACK),%a1204204- tstb %a1@(TINFO_FLAGS+2)205205- jge 1f206206- jbsr syscall_trace207207-1: RESTORE_SWITCH_STACK208208- addql #4,%sp209209-/* on 68040 complete pending writebacks if any */210210-#ifdef CONFIG_M68040211211- bfextu %sp@(PT_OFF_FORMATVEC){#0,#4},%d0212212- subql #7,%d0 | bus error frame ?213213- jbne 1f214214- movel %sp,%sp@-215215- jbsr berr_040cleanup216216- addql #4,%sp217217-1:218218-#endif219183 jra .Lret_from_exception220184221185ENTRY(system_call)···336338337339 /* save fs (sfc,%dfc) (may be pointing to kernel memory) */338340 movec %sfc,%d0339339- movew %d0,%a0@(TASK_THREAD+THREAD_FS)341341+ movew %d0,%a0@(TASK_THREAD+THREAD_FC)340342341343 /* save usp */342344 /* it is better to use a movel here instead of a movew 8*) */···422424 movel %a0,%usp423425424426 /* restore fs (sfc,%dfc) */425425- movew %a1@(TASK_THREAD+THREAD_FS),%a0427427+ movew %a1@(TASK_THREAD+THREAD_FC),%a0426428 movec %a0,%sfc427429 movec %a0,%dfc428430
+2-2
arch/m68k/kernel/process.c
···92929393void flush_thread(void)9494{9595- current->thread.fs = __USER_DS;9595+ current->thread.fc = USER_DATA;9696#ifdef CONFIG_FPU9797 if (!FPU_IS_EMU) {9898 unsigned long zero = 0;···155155 * Must save the current SFC/DFC value, NOT the value when156156 * the parent was last descheduled - RGH 10-08-96157157 */158158- p->thread.fs = get_fs().seg;158158+ p->thread.fc = USER_DATA;159159160160 if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) {161161 /* kernel thread */
+82-115
arch/m68k/kernel/signal.c
···447447448448 if (CPU_IS_060 ? sc->sc_fpstate[2] : sc->sc_fpstate[0]) {449449 fpu_version = sc->sc_fpstate[0];450450- if (CPU_IS_020_OR_030 &&450450+ if (CPU_IS_020_OR_030 && !regs->stkadj &&451451 regs->vector >= (VEC_FPBRUC * 4) &&452452 regs->vector <= (VEC_FPNAN * 4)) {453453 /* Clear pending exception in 68882 idle frame */···510510 if (!(CPU_IS_060 || CPU_IS_COLDFIRE))511511 context_size = fpstate[1];512512 fpu_version = fpstate[0];513513- if (CPU_IS_020_OR_030 &&513513+ if (CPU_IS_020_OR_030 && !regs->stkadj &&514514 regs->vector >= (VEC_FPBRUC * 4) &&515515 regs->vector <= (VEC_FPNAN * 4)) {516516 /* Clear pending exception in 68882 idle frame */···641641static int mangle_kernel_stack(struct pt_regs *regs, int formatvec,642642 void __user *fp)643643{644644- int fsize = frame_extra_sizes(formatvec >> 12);645645- if (fsize < 0) {644644+ int extra = frame_extra_sizes(formatvec >> 12);645645+ char buf[sizeof_field(struct frame, un)];646646+647647+ if (extra < 0) {646648 /*647649 * user process trying to return with weird frame format648650 */649651 pr_debug("user process returning with weird frame format\n");650650- return 1;652652+ return -1;651653 }652652- if (!fsize) {653653- regs->format = formatvec >> 12;654654- regs->vector = formatvec & 0xfff;655655- } else {656656- struct switch_stack *sw = (struct switch_stack *)regs - 1;657657- /* yes, twice as much as max(sizeof(frame.un.fmt<x>)) */658658- unsigned long buf[sizeof_field(struct frame, un) / 2];654654+ if (extra && copy_from_user(buf, fp, extra))655655+ return -1;656656+ regs->format = formatvec >> 12;657657+ regs->vector = formatvec & 0xfff;658658+ if (extra) {659659+ void *p = (struct switch_stack *)regs - 1;660660+ struct frame *new = (void *)regs - extra;661661+ int size = sizeof(struct pt_regs)+sizeof(struct switch_stack);659662660660- /* that'll make sure that expansion won't crap over data */661661- if (copy_from_user(buf + fsize / 4, fp, fsize))662662- return 1;663663-664664- /* point of no return */665665- regs->format = formatvec >> 12;666666- regs->vector = formatvec & 0xfff;667667-#define frame_offset (sizeof(struct pt_regs)+sizeof(struct switch_stack))668668- __asm__ __volatile__ (669669-#ifdef CONFIG_COLDFIRE670670- " movel %0,%/sp\n\t"671671- " bra ret_from_signal\n"672672-#else673673- " movel %0,%/a0\n\t"674674- " subl %1,%/a0\n\t" /* make room on stack */675675- " movel %/a0,%/sp\n\t" /* set stack pointer */676676- /* move switch_stack and pt_regs */677677- "1: movel %0@+,%/a0@+\n\t"678678- " dbra %2,1b\n\t"679679- " lea %/sp@(%c3),%/a0\n\t" /* add offset of fmt */680680- " lsrl #2,%1\n\t"681681- " subql #1,%1\n\t"682682- /* copy to the gap we'd made */683683- "2: movel %4@+,%/a0@+\n\t"684684- " dbra %1,2b\n\t"685685- " bral ret_from_signal\n"663663+ memmove(p - extra, p, size);664664+ memcpy(p - extra + size, buf, extra);665665+ current->thread.esp0 = (unsigned long)&new->ptregs;666666+#ifdef CONFIG_M68040667667+ /* on 68040 complete pending writebacks if any */668668+ if (new->ptregs.format == 7) // bus error frame669669+ berr_040cleanup(new);686670#endif687687- : /* no outputs, it doesn't ever return */688688- : "a" (sw), "d" (fsize), "d" (frame_offset/4-1),689689- "n" (frame_offset), "a" (buf + fsize/4)690690- : "a0");691691-#undef frame_offset692671 }693693- return 0;672672+ return extra;694673}695674696675static inline int···677698{678699 int formatvec;679700 struct sigcontext context;680680- int err = 0;681701682702 siginfo_build_tests();683703···685707686708 /* get previous context */687709 if (copy_from_user(&context, usc, sizeof(context)))688688- goto badframe;710710+ return -1;689711690712 /* restore passed registers */691713 regs->d0 = context.sc_d0;···698720 wrusp(context.sc_usp);699721 formatvec = context.sc_formatvec;700722701701- err = restore_fpu_state(&context);723723+ if (restore_fpu_state(&context))724724+ return -1;702725703703- if (err || mangle_kernel_stack(regs, formatvec, fp))704704- goto badframe;705705-706706- return 0;707707-708708-badframe:709709- return 1;726726+ return mangle_kernel_stack(regs, formatvec, fp);710727}711728712729static inline int···718745719746 err = __get_user(temp, &uc->uc_mcontext.version);720747 if (temp != MCONTEXT_VERSION)721721- goto badframe;748748+ return -1;722749 /* restore passed registers */723750 err |= __get_user(regs->d0, &gregs[0]);724751 err |= __get_user(regs->d1, &gregs[1]);···747774 err |= restore_altstack(&uc->uc_stack);748775749776 if (err)750750- goto badframe;777777+ return -1;751778752752- if (mangle_kernel_stack(regs, temp, &uc->uc_extra))753753- goto badframe;754754-755755- return 0;756756-757757-badframe:758758- return 1;779779+ return mangle_kernel_stack(regs, temp, &uc->uc_extra);759780}760781761761-asmlinkage int do_sigreturn(struct pt_regs *regs, struct switch_stack *sw)782782+asmlinkage void *do_sigreturn(struct pt_regs *regs, struct switch_stack *sw)762783{763784 unsigned long usp = rdusp();764785 struct sigframe __user *frame = (struct sigframe __user *)(usp - 4);765786 sigset_t set;787787+ int size;766788767789 if (!access_ok(frame, sizeof(*frame)))768790 goto badframe;···769801770802 set_current_blocked(&set);771803772772- if (restore_sigcontext(regs, &frame->sc, frame + 1))804804+ size = restore_sigcontext(regs, &frame->sc, frame + 1);805805+ if (size < 0)773806 goto badframe;774774- return regs->d0;807807+ return (void *)sw - size;775808776809badframe:777810 force_sig(SIGSEGV);778778- return 0;811811+ return sw;779812}780813781781-asmlinkage int do_rt_sigreturn(struct pt_regs *regs, struct switch_stack *sw)814814+asmlinkage void *do_rt_sigreturn(struct pt_regs *regs, struct switch_stack *sw)782815{783816 unsigned long usp = rdusp();784817 struct rt_sigframe __user *frame = (struct rt_sigframe __user *)(usp - 4);785818 sigset_t set;819819+ int size;786820787821 if (!access_ok(frame, sizeof(*frame)))788822 goto badframe;···793823794824 set_current_blocked(&set);795825796796- if (rt_restore_ucontext(regs, sw, &frame->uc))826826+ size = rt_restore_ucontext(regs, sw, &frame->uc);827827+ if (size < 0)797828 goto badframe;798798- return regs->d0;829829+ return (void *)sw - size;799830800831badframe:801832 force_sig(SIGSEGV);802802- return 0;833833+ return sw;834834+}835835+836836+static inline struct pt_regs *rte_regs(struct pt_regs *regs)837837+{838838+ return (void *)regs + regs->stkadj;803839}804840805841static void setup_sigcontext(struct sigcontext *sc, struct pt_regs *regs,806842 unsigned long mask)807843{844844+ struct pt_regs *tregs = rte_regs(regs);808845 sc->sc_mask = mask;809846 sc->sc_usp = rdusp();810847 sc->sc_d0 = regs->d0;811848 sc->sc_d1 = regs->d1;812849 sc->sc_a0 = regs->a0;813850 sc->sc_a1 = regs->a1;814814- sc->sc_sr = regs->sr;815815- sc->sc_pc = regs->pc;816816- sc->sc_formatvec = regs->format << 12 | regs->vector;851851+ sc->sc_sr = tregs->sr;852852+ sc->sc_pc = tregs->pc;853853+ sc->sc_formatvec = tregs->format << 12 | tregs->vector;817854 save_a5_state(sc, regs);818855 save_fpu_state(sc, regs);819856}···828851static inline int rt_setup_ucontext(struct ucontext __user *uc, struct pt_regs *regs)829852{830853 struct switch_stack *sw = (struct switch_stack *)regs - 1;854854+ struct pt_regs *tregs = rte_regs(regs);831855 greg_t __user *gregs = uc->uc_mcontext.gregs;832856 int err = 0;833857···849871 err |= __put_user(sw->a5, &gregs[13]);850872 err |= __put_user(sw->a6, &gregs[14]);851873 err |= __put_user(rdusp(), &gregs[15]);852852- err |= __put_user(regs->pc, &gregs[16]);853853- err |= __put_user(regs->sr, &gregs[17]);854854- err |= __put_user((regs->format << 12) | regs->vector, &uc->uc_formatvec);874874+ err |= __put_user(tregs->pc, &gregs[16]);875875+ err |= __put_user(tregs->sr, &gregs[17]);876876+ err |= __put_user((tregs->format << 12) | tregs->vector, &uc->uc_formatvec);855877 err |= rt_save_fpu_state(uc, regs);856878 return err;857879}···868890 struct pt_regs *regs)869891{870892 struct sigframe __user *frame;871871- int fsize = frame_extra_sizes(regs->format);893893+ struct pt_regs *tregs = rte_regs(regs);894894+ int fsize = frame_extra_sizes(tregs->format);872895 struct sigcontext context;873896 int err = 0, sig = ksig->sig;874897875898 if (fsize < 0) {876899 pr_debug("setup_frame: Unknown frame format %#x\n",877877- regs->format);900900+ tregs->format);878901 return -EFAULT;879902 }880903···886907887908 err |= __put_user(sig, &frame->sig);888909889889- err |= __put_user(regs->vector, &frame->code);910910+ err |= __put_user(tregs->vector, &frame->code);890911 err |= __put_user(&frame->sc, &frame->psc);891912892913 if (_NSIG_WORDS > 1)···913934 push_cache ((unsigned long) &frame->retcode);914935915936 /*916916- * Set up registers for signal handler. All the state we are about917917- * to destroy is successfully copied to sigframe.918918- */919919- wrusp ((unsigned long) frame);920920- regs->pc = (unsigned long) ksig->ka.sa.sa_handler;921921- adjustformat(regs);922922-923923- /*924937 * This is subtle; if we build more than one sigframe, all but the925938 * first one will see frame format 0 and have fsize == 0, so we won't926939 * screw stkadj.927940 */928928- if (fsize)941941+ if (fsize) {929942 regs->stkadj = fsize;930930-931931- /* Prepare to skip over the extra stuff in the exception frame. */932932- if (regs->stkadj) {933933- struct pt_regs *tregs =934934- (struct pt_regs *)((ulong)regs + regs->stkadj);943943+ tregs = rte_regs(regs);935944 pr_debug("Performing stackadjust=%04lx\n", regs->stkadj);936936- /* This must be copied with decreasing addresses to937937- handle overlaps. */938945 tregs->vector = 0;939946 tregs->format = 0;940940- tregs->pc = regs->pc;941947 tregs->sr = regs->sr;942948 }949949+950950+ /*951951+ * Set up registers for signal handler. All the state we are about952952+ * to destroy is successfully copied to sigframe.953953+ */954954+ wrusp ((unsigned long) frame);955955+ tregs->pc = (unsigned long) ksig->ka.sa.sa_handler;956956+ adjustformat(regs);957957+943958 return 0;944959}945960···941968 struct pt_regs *regs)942969{943970 struct rt_sigframe __user *frame;944944- int fsize = frame_extra_sizes(regs->format);971971+ struct pt_regs *tregs = rte_regs(regs);972972+ int fsize = frame_extra_sizes(tregs->format);945973 int err = 0, sig = ksig->sig;946974947975 if (fsize < 0) {···9931019 push_cache ((unsigned long) &frame->retcode);99410209951021 /*996996- * Set up registers for signal handler. All the state we are about997997- * to destroy is successfully copied to sigframe.998998- */999999- wrusp ((unsigned long) frame);10001000- regs->pc = (unsigned long) ksig->ka.sa.sa_handler;10011001- adjustformat(regs);10021002-10031003- /*10041022 * This is subtle; if we build more than one sigframe, all but the10051023 * first one will see frame format 0 and have fsize == 0, so we won't10061024 * screw stkadj.10071025 */10081008- if (fsize)10261026+ if (fsize) {10091027 regs->stkadj = fsize;10101010-10111011- /* Prepare to skip over the extra stuff in the exception frame. */10121012- if (regs->stkadj) {10131013- struct pt_regs *tregs =10141014- (struct pt_regs *)((ulong)regs + regs->stkadj);10281028+ tregs = rte_regs(regs);10151029 pr_debug("Performing stackadjust=%04lx\n", regs->stkadj);10161016- /* This must be copied with decreasing addresses to10171017- handle overlaps. */10181030 tregs->vector = 0;10191031 tregs->format = 0;10201020- tregs->pc = regs->pc;10211032 tregs->sr = regs->sr;10221033 }10341034+10351035+ /*10361036+ * Set up registers for signal handler. All the state we are about10371037+ * to destroy is successfully copied to sigframe.10381038+ */10391039+ wrusp ((unsigned long) frame);10401040+ tregs->pc = (unsigned long) ksig->ka.sa.sa_handler;10411041+ adjustformat(regs);10231042 return 0;10241043}10251044
+4-9
arch/m68k/kernel/traps.c
···181181static inline unsigned long probe040(int iswrite, unsigned long addr, int wbs)182182{183183 unsigned long mmusr;184184- mm_segment_t old_fs = get_fs();185184186186- set_fs(MAKE_MM_SEG(wbs));185185+ set_fc(wbs);187186188187 if (iswrite)189188 asm volatile (".chip 68040; ptestw (%0); .chip 68k" : : "a" (addr));···191192192193 asm volatile (".chip 68040; movec %%mmusr,%0; .chip 68k" : "=r" (mmusr));193194194194- set_fs(old_fs);195195+ set_fc(USER_DATA);195196196197 return mmusr;197198}···200201 unsigned long wbd)201202{202203 int res = 0;203203- mm_segment_t old_fs = get_fs();204204205205- /* set_fs can not be moved, otherwise put_user() may oops */206206- set_fs(MAKE_MM_SEG(wbs));205205+ set_fc(wbs);207206208207 switch (wbs & WBSIZ_040) {209208 case BA_SIZE_BYTE:···215218 break;216219 }217220218218- /* set_fs can not be moved, otherwise put_user() may oops */219219- set_fs(old_fs);220220-221221+ set_fc(USER_DATA);221222222223 pr_debug("do_040writeback1, res=%d\n", res);223224
···662662 ((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative : func) : \663663 func##_positive)664664665665+static bool is_bad_offset(int b_off)666666+{667667+ return b_off > 0x1ffff || b_off < -0x20000;668668+}669669+665670static int build_body(struct jit_ctx *ctx)666671{667672 const struct bpf_prog *prog = ctx->skf;···733728 /* Load return register on DS for failures */734729 emit_reg_move(r_ret, r_zero, ctx);735730 /* Return with error */736736- emit_b(b_imm(prog->len, ctx), ctx);731731+ b_off = b_imm(prog->len, ctx);732732+ if (is_bad_offset(b_off))733733+ return -E2BIG;734734+ emit_b(b_off, ctx);737735 emit_nop(ctx);738736 break;739737 case BPF_LD | BPF_W | BPF_IND:···783775 emit_jalr(MIPS_R_RA, r_s0, ctx);784776 emit_reg_move(MIPS_R_A0, r_skb, ctx); /* delay slot */785777 /* Check the error value */786786- emit_bcond(MIPS_COND_NE, r_ret, 0,787787- b_imm(prog->len, ctx), ctx);778778+ b_off = b_imm(prog->len, ctx);779779+ if (is_bad_offset(b_off))780780+ return -E2BIG;781781+ emit_bcond(MIPS_COND_NE, r_ret, 0, b_off, ctx);788782 emit_reg_move(r_ret, r_zero, ctx);789783 /* We are good */790784 /* X <- P[1:K] & 0xf */···865855 /* A /= X */866856 ctx->flags |= SEEN_X | SEEN_A;867857 /* Check if r_X is zero */868868- emit_bcond(MIPS_COND_EQ, r_X, r_zero,869869- b_imm(prog->len, ctx), ctx);858858+ b_off = b_imm(prog->len, ctx);859859+ if (is_bad_offset(b_off))860860+ return -E2BIG;861861+ emit_bcond(MIPS_COND_EQ, r_X, r_zero, b_off, ctx);870862 emit_load_imm(r_ret, 0, ctx); /* delay slot */871863 emit_div(r_A, r_X, ctx);872864 break;···876864 /* A %= X */877865 ctx->flags |= SEEN_X | SEEN_A;878866 /* Check if r_X is zero */879879- emit_bcond(MIPS_COND_EQ, r_X, r_zero,880880- b_imm(prog->len, ctx), ctx);867867+ b_off = b_imm(prog->len, ctx);868868+ if (is_bad_offset(b_off))869869+ return -E2BIG;870870+ emit_bcond(MIPS_COND_EQ, r_X, r_zero, b_off, ctx);881871 emit_load_imm(r_ret, 0, ctx); /* delay slot */882872 emit_mod(r_A, r_X, ctx);883873 break;···940926 break;941927 case BPF_JMP | BPF_JA:942928 /* pc += K */943943- emit_b(b_imm(i + k + 1, ctx), ctx);929929+ b_off = b_imm(i + k + 1, ctx);930930+ if (is_bad_offset(b_off))931931+ return -E2BIG;932932+ emit_b(b_off, ctx);944933 emit_nop(ctx);945934 break;946935 case BPF_JMP | BPF_JEQ | BPF_K:···10731056 break;10741057 case BPF_RET | BPF_A:10751058 ctx->flags |= SEEN_A;10761076- if (i != prog->len - 1)10591059+ if (i != prog->len - 1) {10771060 /*10781061 * If this is not the last instruction10791062 * then jump to the epilogue10801063 */10811081- emit_b(b_imm(prog->len, ctx), ctx);10641064+ b_off = b_imm(prog->len, ctx);10651065+ if (is_bad_offset(b_off))10661066+ return -E2BIG;10671067+ emit_b(b_off, ctx);10681068+ }10821069 emit_reg_move(r_ret, r_A, ctx); /* delay slot */10831070 break;10841071 case BPF_RET | BPF_K:···10961075 * If this is not the last instruction10971076 * then jump to the epilogue10981077 */10991099- emit_b(b_imm(prog->len, ctx), ctx);10781078+ b_off = b_imm(prog->len, ctx);10791079+ if (is_bad_offset(b_off))10801080+ return -E2BIG;10811081+ emit_b(b_off, ctx);11001082 emit_nop(ctx);11011083 }11021084 break;···11571133 /* Load *dev pointer */11581134 emit_load_ptr(r_s0, r_skb, off, ctx);11591135 /* error (0) in the delay slot */11601160- emit_bcond(MIPS_COND_EQ, r_s0, r_zero,11611161- b_imm(prog->len, ctx), ctx);11361136+ b_off = b_imm(prog->len, ctx);11371137+ if (is_bad_offset(b_off))11381138+ return -E2BIG;11391139+ emit_bcond(MIPS_COND_EQ, r_s0, r_zero, b_off, ctx);11621140 emit_reg_move(r_ret, r_zero, ctx);11631141 if (code == (BPF_ANC | SKF_AD_IFINDEX)) {11641142 BUILD_BUG_ON(sizeof_field(struct net_device, ifindex) != 4);···1270124412711245 /* Generate the actual JIT code */12721246 build_prologue(&ctx);12731273- build_body(&ctx);12471247+ if (build_body(&ctx)) {12481248+ module_memfree(ctx.target);12491249+ goto out;12501250+ }12741251 build_epilogue(&ctx);1275125212761253 /* Update the icache */
+2-1
arch/nios2/Kconfig.debug
···33config EARLY_PRINTK44 bool "Activate early kernel debugging"55 default y66+ depends on TTY67 select SERIAL_CORE_CONSOLE78 help88- Enable early printk on console99+ Enable early printk on console.910 This is useful for kernel debugging when your machine crashes very1011 early before the console code is initialized.1112 You should normally say N here, unless you want to debug such a crash.
-2
arch/nios2/kernel/setup.c
···149149150150void __init setup_arch(char **cmdline_p)151151{152152- int dram_start;153153-154152 console_verbose();155153156154 memory_start = memblock_start_of_DRAM();
···4949static struct pvclock_vsyscall_time_info5050 hv_clock_boot[HVC_BOOT_ARRAY_SIZE] __bss_decrypted __aligned(PAGE_SIZE);5151static struct pvclock_wall_clock wall_clock __bss_decrypted;5252-static DEFINE_PER_CPU(struct pvclock_vsyscall_time_info *, hv_clock_per_cpu);5352static struct pvclock_vsyscall_time_info *hvclock_mem;5454-5555-static inline struct pvclock_vcpu_time_info *this_cpu_pvti(void)5656-{5757- return &this_cpu_read(hv_clock_per_cpu)->pvti;5858-}5959-6060-static inline struct pvclock_vsyscall_time_info *this_cpu_hvclock(void)6161-{6262- return this_cpu_read(hv_clock_per_cpu);6363-}5353+DEFINE_PER_CPU(struct pvclock_vsyscall_time_info *, hv_clock_per_cpu);5454+EXPORT_PER_CPU_SYMBOL_GPL(hv_clock_per_cpu);64556556/*6657 * The wallclock is the time of day when we booted. Since then, some time may
+2-2
arch/x86/kvm/cpuid.c
···6565 for (i = 0; i < nent; i++) {6666 e = &entries[i];67676868- if (e->function == function && (e->index == index ||6969- !(e->flags & KVM_CPUID_FLAG_SIGNIFCANT_INDEX)))6868+ if (e->function == function &&6969+ (!(e->flags & KVM_CPUID_FLAG_SIGNIFCANT_INDEX) || e->index == index))7070 return e;7171 }7272
···707707 if (!is_shadow_present_pte(*it.sptep)) {708708 table_gfn = gw->table_gfn[it.level - 2];709709 access = gw->pt_access[it.level - 2];710710- sp = kvm_mmu_get_page(vcpu, table_gfn, addr, it.level-1,711711- false, access);710710+ sp = kvm_mmu_get_page(vcpu, table_gfn, addr,711711+ it.level-1, false, access);712712+ /*713713+ * We must synchronize the pagetable before linking it714714+ * because the guest doesn't need to flush tlb when715715+ * the gpte is changed from non-present to present.716716+ * Otherwise, the guest may use the wrong mapping.717717+ *718718+ * For PG_LEVEL_4K, kvm_mmu_get_page() has already719719+ * synchronized it transiently via kvm_sync_page().720720+ *721721+ * For higher level pagetable, we synchronize it via722722+ * the slower mmu_sync_children(). If it needs to723723+ * break, some progress has been made; return724724+ * RET_PF_RETRY and retry on the next #PF.725725+ * KVM_REQ_MMU_SYNC is not necessary but it726726+ * expedites the process.727727+ */728728+ if (sp->unsync_children &&729729+ mmu_sync_children(vcpu, sp, false))730730+ return RET_PF_RETRY;712731 }713732714733 /*···10661047 * Using the cached information from sp->gfns is safe because:10671048 * - The spte has a reference to the struct page, so the pfn for a given gfn10681049 * can't change unless all sptes pointing to it are nuked first.10691069- *10701070- * Note:10711071- * We should flush all tlbs if spte is dropped even though guest is10721072- * responsible for it. Since if we don't, kvm_mmu_notifier_invalidate_page10731073- * and kvm_mmu_notifier_invalidate_range_start detect the mapping page isn't10741074- * used by guest then tlbs are not flushed, so guest is allowed to access the10751075- * freed pages.10761076- * And we increase kvm->tlbs_dirty to delay tlbs flush in this case.10771050 */10781051static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)10791052{···11181107 return 0;1119110811201109 if (FNAME(prefetch_invalid_gpte)(vcpu, sp, &sp->spt[i], gpte)) {11211121- /*11221122- * Update spte before increasing tlbs_dirty to make11231123- * sure no tlb flush is lost after spte is zapped; see11241124- * the comments in kvm_flush_remote_tlbs().11251125- */11261126- smp_wmb();11271127- vcpu->kvm->tlbs_dirty++;11101110+ set_spte_ret |= SET_SPTE_NEED_REMOTE_TLB_FLUSH;11281111 continue;11291112 }11301113···1133112811341129 if (gfn != sp->gfns[i]) {11351130 drop_spte(vcpu->kvm, &sp->spt[i]);11361136- /*11371137- * The same as above where we are doing11381138- * prefetch_invalid_gpte().11391139- */11401140- smp_wmb();11411141- vcpu->kvm->tlbs_dirty++;11311131+ set_spte_ret |= SET_SPTE_NEED_REMOTE_TLB_FLUSH;11421132 continue;11431133 }11441134
···595595 return 0;596596}597597598598+static int __sev_launch_update_vmsa(struct kvm *kvm, struct kvm_vcpu *vcpu,599599+ int *error)600600+{601601+ struct sev_data_launch_update_vmsa vmsa;602602+ struct vcpu_svm *svm = to_svm(vcpu);603603+ int ret;604604+605605+ /* Perform some pre-encryption checks against the VMSA */606606+ ret = sev_es_sync_vmsa(svm);607607+ if (ret)608608+ return ret;609609+610610+ /*611611+ * The LAUNCH_UPDATE_VMSA command will perform in-place encryption of612612+ * the VMSA memory content (i.e it will write the same memory region613613+ * with the guest's key), so invalidate it first.614614+ */615615+ clflush_cache_range(svm->vmsa, PAGE_SIZE);616616+617617+ vmsa.reserved = 0;618618+ vmsa.handle = to_kvm_svm(kvm)->sev_info.handle;619619+ vmsa.address = __sme_pa(svm->vmsa);620620+ vmsa.len = PAGE_SIZE;621621+ return sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_VMSA, &vmsa, error);622622+}623623+598624static int sev_launch_update_vmsa(struct kvm *kvm, struct kvm_sev_cmd *argp)599625{600600- struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;601601- struct sev_data_launch_update_vmsa vmsa;602626 struct kvm_vcpu *vcpu;603627 int i, ret;604628605629 if (!sev_es_guest(kvm))606630 return -ENOTTY;607631608608- vmsa.reserved = 0;609609-610632 kvm_for_each_vcpu(i, vcpu, kvm) {611611- struct vcpu_svm *svm = to_svm(vcpu);612612-613613- /* Perform some pre-encryption checks against the VMSA */614614- ret = sev_es_sync_vmsa(svm);633633+ ret = mutex_lock_killable(&vcpu->mutex);615634 if (ret)616635 return ret;617636618618- /*619619- * The LAUNCH_UPDATE_VMSA command will perform in-place620620- * encryption of the VMSA memory content (i.e it will write621621- * the same memory region with the guest's key), so invalidate622622- * it first.623623- */624624- clflush_cache_range(svm->vmsa, PAGE_SIZE);637637+ ret = __sev_launch_update_vmsa(kvm, vcpu, &argp->error);625638626626- vmsa.handle = sev->handle;627627- vmsa.address = __sme_pa(svm->vmsa);628628- vmsa.len = PAGE_SIZE;629629- ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_VMSA, &vmsa,630630- &argp->error);639639+ mutex_unlock(&vcpu->mutex);631640 if (ret)632641 return ret;633633-634634- svm->vcpu.arch.guest_state_protected = true;635642 }636643637644 return 0;···1404139714051398 /* Bind ASID to this guest */14061399 ret = sev_bind_asid(kvm, start.handle, error);14071407- if (ret)14001400+ if (ret) {14011401+ sev_decommission(start.handle);14081402 goto e_free_session;14031403+ }1409140414101405 params.handle = start.handle;14111406 if (copy_to_user((void __user *)(uintptr_t)argp->data,···1473146414741465 /* Pin guest memory */14751466 guest_page = sev_pin_memory(kvm, params.guest_uaddr & PAGE_MASK,14761476- PAGE_SIZE, &n, 0);14671467+ PAGE_SIZE, &n, 1);14771468 if (IS_ERR(guest_page)) {14781469 ret = PTR_ERR(guest_page);14791470 goto e_free_trans;···15101501 return sev_issue_cmd(kvm, SEV_CMD_RECEIVE_FINISH, &data, &argp->error);15111502}1512150315041504+static bool cmd_allowed_from_miror(u32 cmd_id)15051505+{15061506+ /*15071507+ * Allow mirrors VM to call KVM_SEV_LAUNCH_UPDATE_VMSA to enable SEV-ES15081508+ * active mirror VMs. Also allow the debugging and status commands.15091509+ */15101510+ if (cmd_id == KVM_SEV_LAUNCH_UPDATE_VMSA ||15111511+ cmd_id == KVM_SEV_GUEST_STATUS || cmd_id == KVM_SEV_DBG_DECRYPT ||15121512+ cmd_id == KVM_SEV_DBG_ENCRYPT)15131513+ return true;15141514+15151515+ return false;15161516+}15171517+15131518int svm_mem_enc_op(struct kvm *kvm, void __user *argp)15141519{15151520 struct kvm_sev_cmd sev_cmd;···1540151715411518 mutex_lock(&kvm->lock);1542151915431543- /* enc_context_owner handles all memory enc operations */15441544- if (is_mirroring_enc_context(kvm)) {15201520+ /* Only the enc_context_owner handles some memory enc operations. */15211521+ if (is_mirroring_enc_context(kvm) &&15221522+ !cmd_allowed_from_miror(sev_cmd.id)) {15451523 r = -EINVAL;15461524 goto out;15471525 }···17391715{17401716 struct file *source_kvm_file;17411717 struct kvm *source_kvm;17421742- struct kvm_sev_info *mirror_sev;17431743- unsigned int asid;17181718+ struct kvm_sev_info source_sev, *mirror_sev;17441719 int ret;1745172017461721 source_kvm_file = fget(source_fd);···17621739 goto e_source_unlock;17631740 }1764174117651765- asid = to_kvm_svm(source_kvm)->sev_info.asid;17421742+ memcpy(&source_sev, &to_kvm_svm(source_kvm)->sev_info,17431743+ sizeof(source_sev));1766174417671745 /*17681746 * The mirror kvm holds an enc_context_owner ref so its asid can't···17831759 /* Set enc_context_owner and copy its encryption context over */17841760 mirror_sev = &to_kvm_svm(kvm)->sev_info;17851761 mirror_sev->enc_context_owner = source_kvm;17861786- mirror_sev->asid = asid;17871762 mirror_sev->active = true;17631763+ mirror_sev->asid = source_sev.asid;17641764+ mirror_sev->fd = source_sev.fd;17651765+ mirror_sev->es_active = source_sev.es_active;17661766+ mirror_sev->handle = source_sev.handle;17671767+ /*17681768+ * Do not copy ap_jump_table. Since the mirror does not share the same17691769+ * KVM contexts as the original, and they may have different17701770+ * memory-views.17711771+ */1788177217891773 mutex_unlock(&kvm->lock);17901774 return 0;
+74-63
arch/x86/kvm/svm/svm.c
···1566156615671567 svm->vmcb->control.int_ctl |= svm->nested.ctl.int_ctl &15681568 V_IRQ_INJECTION_BITS_MASK;15691569+15701570+ svm->vmcb->control.int_vector = svm->nested.ctl.int_vector;15691571 }1570157215711573 vmcb_mark_dirty(svm->vmcb, VMCB_INTR);···2222222022232221 /* Both #GP cases have zero error_code */22242222 if (error_code)22232223+ goto reinject;22242224+22252225+ /* All SVM instructions expect page aligned RAX */22262226+ if (svm->vmcb->save.rax & ~PAGE_MASK)22252227 goto reinject;2226222822272229 /* Decode the instruction for usage later */···42914285 struct kvm_host_map map_save;42924286 int ret;4293428742944294- if (is_guest_mode(vcpu)) {42954295- /* FED8h - SVM Guest */42964296- put_smstate(u64, smstate, 0x7ed8, 1);42974297- /* FEE0h - SVM Guest VMCB Physical Address */42984298- put_smstate(u64, smstate, 0x7ee0, svm->nested.vmcb12_gpa);42884288+ if (!is_guest_mode(vcpu))42894289+ return 0;4299429043004300- svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX];43014301- svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP];43024302- svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP];42914291+ /* FED8h - SVM Guest */42924292+ put_smstate(u64, smstate, 0x7ed8, 1);42934293+ /* FEE0h - SVM Guest VMCB Physical Address */42944294+ put_smstate(u64, smstate, 0x7ee0, svm->nested.vmcb12_gpa);4303429543044304- ret = nested_svm_vmexit(svm);43054305- if (ret)43064306- return ret;42964296+ svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX];42974297+ svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP];42984298+ svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP];4307429943084308- /*43094309- * KVM uses VMCB01 to store L1 host state while L2 runs but43104310- * VMCB01 is going to be used during SMM and thus the state will43114311- * be lost. Temporary save non-VMLOAD/VMSAVE state to the host save43124312- * area pointed to by MSR_VM_HSAVE_PA. APM guarantees that the43134313- * format of the area is identical to guest save area offsetted43144314- * by 0x400 (matches the offset of 'struct vmcb_save_area'43154315- * within 'struct vmcb'). Note: HSAVE area may also be used by43164316- * L1 hypervisor to save additional host context (e.g. KVM does43174317- * that, see svm_prepare_guest_switch()) which must be43184318- * preserved.43194319- */43204320- if (kvm_vcpu_map(vcpu, gpa_to_gfn(svm->nested.hsave_msr),43214321- &map_save) == -EINVAL)43224322- return 1;43004300+ ret = nested_svm_vmexit(svm);43014301+ if (ret)43024302+ return ret;4323430343244324- BUILD_BUG_ON(offsetof(struct vmcb, save) != 0x400);43044304+ /*43054305+ * KVM uses VMCB01 to store L1 host state while L2 runs but43064306+ * VMCB01 is going to be used during SMM and thus the state will43074307+ * be lost. Temporary save non-VMLOAD/VMSAVE state to the host save43084308+ * area pointed to by MSR_VM_HSAVE_PA. APM guarantees that the43094309+ * format of the area is identical to guest save area offsetted43104310+ * by 0x400 (matches the offset of 'struct vmcb_save_area'43114311+ * within 'struct vmcb'). Note: HSAVE area may also be used by43124312+ * L1 hypervisor to save additional host context (e.g. KVM does43134313+ * that, see svm_prepare_guest_switch()) which must be43144314+ * preserved.43154315+ */43164316+ if (kvm_vcpu_map(vcpu, gpa_to_gfn(svm->nested.hsave_msr),43174317+ &map_save) == -EINVAL)43184318+ return 1;4325431943264326- svm_copy_vmrun_state(map_save.hva + 0x400,43274327- &svm->vmcb01.ptr->save);43204320+ BUILD_BUG_ON(offsetof(struct vmcb, save) != 0x400);4328432143294329- kvm_vcpu_unmap(vcpu, &map_save, true);43304330- }43224322+ svm_copy_vmrun_state(map_save.hva + 0x400,43234323+ &svm->vmcb01.ptr->save);43244324+43254325+ kvm_vcpu_unmap(vcpu, &map_save, true);43314326 return 0;43324327}43334328···43364329{43374330 struct vcpu_svm *svm = to_svm(vcpu);43384331 struct kvm_host_map map, map_save;43394339- int ret = 0;43324332+ u64 saved_efer, vmcb12_gpa;43334333+ struct vmcb *vmcb12;43344334+ int ret;4340433543414341- if (guest_cpuid_has(vcpu, X86_FEATURE_LM)) {43424342- u64 saved_efer = GET_SMSTATE(u64, smstate, 0x7ed0);43434343- u64 guest = GET_SMSTATE(u64, smstate, 0x7ed8);43444344- u64 vmcb12_gpa = GET_SMSTATE(u64, smstate, 0x7ee0);43454345- struct vmcb *vmcb12;43364336+ if (!guest_cpuid_has(vcpu, X86_FEATURE_LM))43374337+ return 0;4346433843474347- if (guest) {43484348- if (!guest_cpuid_has(vcpu, X86_FEATURE_SVM))43494349- return 1;43394339+ /* Non-zero if SMI arrived while vCPU was in guest mode. */43404340+ if (!GET_SMSTATE(u64, smstate, 0x7ed8))43414341+ return 0;4350434243514351- if (!(saved_efer & EFER_SVME))43524352- return 1;43434343+ if (!guest_cpuid_has(vcpu, X86_FEATURE_SVM))43444344+ return 1;4353434543544354- if (kvm_vcpu_map(vcpu,43554355- gpa_to_gfn(vmcb12_gpa), &map) == -EINVAL)43564356- return 1;43464346+ saved_efer = GET_SMSTATE(u64, smstate, 0x7ed0);43474347+ if (!(saved_efer & EFER_SVME))43484348+ return 1;4357434943584358- if (svm_allocate_nested(svm))43594359- return 1;43504350+ vmcb12_gpa = GET_SMSTATE(u64, smstate, 0x7ee0);43514351+ if (kvm_vcpu_map(vcpu, gpa_to_gfn(vmcb12_gpa), &map) == -EINVAL)43524352+ return 1;4360435343614361- vmcb12 = map.hva;43544354+ ret = 1;43554355+ if (kvm_vcpu_map(vcpu, gpa_to_gfn(svm->nested.hsave_msr), &map_save) == -EINVAL)43564356+ goto unmap_map;4362435743634363- nested_load_control_from_vmcb12(svm, &vmcb12->control);43584358+ if (svm_allocate_nested(svm))43594359+ goto unmap_save;4364436043654365- ret = enter_svm_guest_mode(vcpu, vmcb12_gpa, vmcb12);43664366- kvm_vcpu_unmap(vcpu, &map, true);43614361+ /*43624362+ * Restore L1 host state from L1 HSAVE area as VMCB01 was43634363+ * used during SMM (see svm_enter_smm())43644364+ */4367436543684368- /*43694369- * Restore L1 host state from L1 HSAVE area as VMCB01 was43704370- * used during SMM (see svm_enter_smm())43714371- */43724372- if (kvm_vcpu_map(vcpu, gpa_to_gfn(svm->nested.hsave_msr),43734373- &map_save) == -EINVAL)43744374- return 1;43664366+ svm_copy_vmrun_state(&svm->vmcb01.ptr->save, map_save.hva + 0x400);4375436743764376- svm_copy_vmrun_state(&svm->vmcb01.ptr->save,43774377- map_save.hva + 0x400);43684368+ /*43694369+ * Enter the nested guest now43704370+ */4378437143794379- kvm_vcpu_unmap(vcpu, &map_save, true);43804380- }43814381- }43724372+ vmcb12 = map.hva;43734373+ nested_load_control_from_vmcb12(svm, &vmcb12->control);43744374+ ret = enter_svm_guest_mode(vcpu, vmcb12_gpa, vmcb12, false);4382437543764376+unmap_save:43774377+ kvm_vcpu_unmap(vcpu, &map_save, true);43784378+unmap_map:43794379+ kvm_vcpu_unmap(vcpu, &map, true);43834380 return ret;43844381}43854382
···353353 switch (msr_index) {354354 case MSR_IA32_VMX_EXIT_CTLS:355355 case MSR_IA32_VMX_TRUE_EXIT_CTLS:356356- ctl_high &= ~VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL;356356+ ctl_high &= ~EVMCS1_UNSUPPORTED_VMEXIT_CTRL;357357 break;358358 case MSR_IA32_VMX_ENTRY_CTLS:359359 case MSR_IA32_VMX_TRUE_ENTRY_CTLS:360360- ctl_high &= ~VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL;360360+ ctl_high &= ~EVMCS1_UNSUPPORTED_VMENTRY_CTRL;361361 break;362362 case MSR_IA32_VMX_PROCBASED_CTLS2:363363- ctl_high &= ~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;363363+ ctl_high &= ~EVMCS1_UNSUPPORTED_2NDEXEC;364364+ break;365365+ case MSR_IA32_VMX_PINBASED_CTLS:366366+ ctl_high &= ~EVMCS1_UNSUPPORTED_PINCTRL;367367+ break;368368+ case MSR_IA32_VMX_VMFUNC:369369+ ctl_low &= ~EVMCS1_UNSUPPORTED_VMFUNC;364370 break;365371 }366372
+15-9
arch/x86/kvm/vmx/nested.c
···25832583 * Guest state is invalid and unrestricted guest is disabled,25842584 * which means L1 attempted VMEntry to L2 with invalid state.25852585 * Fail the VMEntry.25862586+ *25872587+ * However when force loading the guest state (SMM exit or25882588+ * loading nested state after migration, it is possible to25892589+ * have invalid guest state now, which will be later fixed by25902590+ * restoring L2 register state25862591 */25872587- if (CC(!vmx_guest_state_valid(vcpu))) {25922592+ if (CC(from_vmentry && !vmx_guest_state_valid(vcpu))) {25882593 *entry_failure_code = ENTRY_FAIL_DEFAULT;25892594 return -EINVAL;25902595 }···43564351 if (nested_vmx_load_msr(vcpu, vmcs12->vm_exit_msr_load_addr,43574352 vmcs12->vm_exit_msr_load_count))43584353 nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_MSR_FAIL);43544354+43554355+ to_vmx(vcpu)->emulation_required = vmx_emulation_required(vcpu);43594356}4360435743614358static inline u64 nested_vmx_get_vmcs01_guest_efer(struct vcpu_vmx *vmx)···49064899 return -ENOMEM;49074900}4908490149094909-/*49104910- * Emulate the VMXON instruction.49114911- * Currently, we just remember that VMX is active, and do not save or even49124912- * inspect the argument to VMXON (the so-called "VMXON pointer") because we49134913- * do not currently need to store anything in that guest-allocated memory49144914- * region. Consequently, VMCLEAR and VMPTRLD also do not verify that the their49154915- * argument is different from the VMXON pointer (which the spec says they do).49164916- */49024902+/* Emulate the VMXON instruction. */49174903static int handle_vmon(struct kvm_vcpu *vcpu)49184904{49194905 int ret;···59025902 return true;59035903 case EXIT_REASON_VMFUNC:59045904 /* VM functions are emulated through L2->L0 vmexits. */59055905+ return true;59065906+ case EXIT_REASON_BUS_LOCK:59075907+ /*59085908+ * At present, bus lock VM exit is never exposed to L1.59095909+ * Handle L2's bus locks in L0 directly.59105910+ */59055911 return true;59065912 default:59075913 break;
+27-12
arch/x86/kvm/vmx/vmx.c
···13231323 vmx_prepare_switch_to_host(to_vmx(vcpu));13241324}1325132513261326-static bool emulation_required(struct kvm_vcpu *vcpu)13261326+bool vmx_emulation_required(struct kvm_vcpu *vcpu)13271327{13281328 return emulate_invalid_guest_state && !vmx_guest_state_valid(vcpu);13291329}···13671367 vmcs_writel(GUEST_RFLAGS, rflags);1368136813691369 if ((old_rflags ^ vmx->rflags) & X86_EFLAGS_VM)13701370- vmx->emulation_required = emulation_required(vcpu);13701370+ vmx->emulation_required = vmx_emulation_required(vcpu);13711371}1372137213731373u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu)···18371837 &msr_info->data))18381838 return 1;18391839 /*18401840- * Enlightened VMCS v1 doesn't have certain fields, but buggy18411841- * Hyper-V versions are still trying to use corresponding18421842- * features when they are exposed. Filter out the essential18431843- * minimum.18401840+ * Enlightened VMCS v1 doesn't have certain VMCS fields but18411841+ * instead of just ignoring the features, different Hyper-V18421842+ * versions are either trying to use them and fail or do some18431843+ * sanity checking and refuse to boot. Filter all unsupported18441844+ * features out.18441845 */18451846 if (!msr_info->host_initiated &&18461847 vmx->nested.enlightened_vmcs_enabled)···30783077 }3079307830803079 /* depends on vcpu->arch.cr0 to be set to a new value */30813081- vmx->emulation_required = emulation_required(vcpu);30803080+ vmx->emulation_required = vmx_emulation_required(vcpu);30823081}3083308230843083static int vmx_get_max_tdp_level(void)···33313330{33323331 __vmx_set_segment(vcpu, var, seg);3333333233343334- to_vmx(vcpu)->emulation_required = emulation_required(vcpu);33333333+ to_vmx(vcpu)->emulation_required = vmx_emulation_required(vcpu);33353334}3336333533373336static void vmx_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)···66226621 vmx->loaded_vmcs->soft_vnmi_blocked))66236622 vmx->loaded_vmcs->entry_time = ktime_get();6624662366256625- /* Don't enter VMX if guest state is invalid, let the exit handler66266626- start emulation until we arrive back to a valid state */66276627- if (vmx->emulation_required)66246624+ /*66256625+ * Don't enter VMX if guest state is invalid, let the exit handler66266626+ * start emulation until we arrive back to a valid state. Synthesize a66276627+ * consistency check VM-Exit due to invalid guest state and bail.66286628+ */66296629+ if (unlikely(vmx->emulation_required)) {66306630+66316631+ /* We don't emulate invalid state of a nested guest */66326632+ vmx->fail = is_guest_mode(vcpu);66336633+66346634+ vmx->exit_reason.full = EXIT_REASON_INVALID_STATE;66356635+ vmx->exit_reason.failed_vmentry = 1;66366636+ kvm_register_mark_available(vcpu, VCPU_EXREG_EXIT_INFO_1);66376637+ vmx->exit_qualification = ENTRY_FAIL_DEFAULT;66386638+ kvm_register_mark_available(vcpu, VCPU_EXREG_EXIT_INFO_2);66396639+ vmx->exit_intr_info = 0;66286640 return EXIT_FASTPATH_NONE;66416641+ }6629664266306643 trace_kvm_entry(vcpu);66316644···68486833 */68496834 tsx_ctrl = vmx_find_uret_msr(vmx, MSR_IA32_TSX_CTRL);68506835 if (tsx_ctrl)68516851- vmx->guest_uret_msrs[i].mask = ~(u64)TSX_CTRL_CPUID_CLEAR;68366836+ tsx_ctrl->mask = ~(u64)TSX_CTRL_CPUID_CLEAR;68526837 }6853683868546839 err = alloc_loaded_vmcs(&vmx->vmcs01);
+1-4
arch/x86/kvm/vmx/vmx.h
···248248 * only loaded into hardware when necessary, e.g. SYSCALL #UDs outside249249 * of 64-bit mode or if EFER.SCE=1, thus the SYSCALL MSRs don't need to250250 * be loaded into hardware if those conditions aren't met.251251- * nr_active_uret_msrs tracks the number of MSRs that need to be loaded252252- * into hardware when running the guest. guest_uret_msrs[] is resorted253253- * whenever the number of "active" uret MSRs is modified.254251 */255252 struct vmx_uret_msr guest_uret_msrs[MAX_NR_USER_RETURN_MSRS];256256- int nr_active_uret_msrs;257253 bool guest_uret_msrs_loaded;258254#ifdef CONFIG_X86_64259255 u64 msr_host_kernel_gs_base;···355359void vmx_set_host_fs_gs(struct vmcs_host_state *host, u16 fs_sel, u16 gs_sel,356360 unsigned long fs_base, unsigned long gs_base);357361int vmx_get_cpl(struct kvm_vcpu *vcpu);362362+bool vmx_emulation_required(struct kvm_vcpu *vcpu);358363unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu);359364void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags);360365u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu);
+26-2
arch/x86/kvm/x86.c
···13321332 MSR_ARCH_PERFMON_EVENTSEL0 + 12, MSR_ARCH_PERFMON_EVENTSEL0 + 13,13331333 MSR_ARCH_PERFMON_EVENTSEL0 + 14, MSR_ARCH_PERFMON_EVENTSEL0 + 15,13341334 MSR_ARCH_PERFMON_EVENTSEL0 + 16, MSR_ARCH_PERFMON_EVENTSEL0 + 17,13351335+13361336+ MSR_K7_EVNTSEL0, MSR_K7_EVNTSEL1, MSR_K7_EVNTSEL2, MSR_K7_EVNTSEL3,13371337+ MSR_K7_PERFCTR0, MSR_K7_PERFCTR1, MSR_K7_PERFCTR2, MSR_K7_PERFCTR3,13381338+ MSR_F15H_PERF_CTL0, MSR_F15H_PERF_CTL1, MSR_F15H_PERF_CTL2,13391339+ MSR_F15H_PERF_CTL3, MSR_F15H_PERF_CTL4, MSR_F15H_PERF_CTL5,13401340+ MSR_F15H_PERF_CTR0, MSR_F15H_PERF_CTR1, MSR_F15H_PERF_CTR2,13411341+ MSR_F15H_PERF_CTR3, MSR_F15H_PERF_CTR4, MSR_F15H_PERF_CTR5,13351342};1336134313371344static u32 msrs_to_save[ARRAY_SIZE(msrs_to_save_all)];···29762969 offsetof(struct compat_vcpu_info, time));29772970 if (vcpu->xen.vcpu_time_info_set)29782971 kvm_setup_pvclock_page(v, &vcpu->xen.vcpu_time_info_cache, 0);29792979- if (v == kvm_get_vcpu(v->kvm, 0))29722972+ if (!v->vcpu_idx)29802973 kvm_hv_setup_tsc_page(v->kvm, &vcpu->hv_clock);29812974 return 0;29822975}···7665765876667659 /* Process a latched INIT or SMI, if any. */76677660 kvm_make_request(KVM_REQ_EVENT, vcpu);76617661+76627662+ /*76637663+ * Even if KVM_SET_SREGS2 loaded PDPTRs out of band,76647664+ * on SMM exit we still need to reload them from76657665+ * guest memory76667666+ */76677667+ vcpu->arch.pdptrs_from_userspace = false;76687668 }7669766976707670 kvm_mmu_reset_context(vcpu);···1066610652 int r;10667106531066810654 vcpu->arch.last_vmentry_cpu = -1;1065510655+ vcpu->arch.regs_avail = ~0;1065610656+ vcpu->arch.regs_dirty = ~0;10669106571067010658 if (!irqchip_in_kernel(vcpu->kvm) || kvm_vcpu_is_reset_bsp(vcpu))1067110659 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;···10908108921090910893 kvm_set_rflags(vcpu, X86_EFLAGS_FIXED);1091010894 kvm_rip_write(vcpu, 0xfff0);1089510895+1089610896+ vcpu->arch.cr3 = 0;1089710897+ kvm_register_mark_dirty(vcpu, VCPU_EXREG_CR3);10911108981091210899 /*1091310900 * CR0.CD/NW are set on RESET, preserved on INIT. Note, some versions···11158111391115911140int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)1116011141{1114211142+ int ret;1114311143+1116111144 if (type)1116211145 return -EINVAL;1114611146+1114711147+ ret = kvm_page_track_init(kvm);1114811148+ if (ret)1114911149+ return ret;11163111501116411151 INIT_HLIST_HEAD(&kvm->arch.mask_notifier_list);1116511152 INIT_LIST_HEAD(&kvm->arch.active_mmu_pages);···11199111741120011175 kvm_apicv_init(kvm);1120111176 kvm_hv_init_vm(kvm);1120211202- kvm_page_track_init(kvm);1120311177 kvm_mmu_init_vm(kvm);1120411178 kvm_xen_init_vm(kvm);1120511179
+48-18
arch/x86/net/bpf_jit_comp.c
···13411341 if (insn->imm == (BPF_AND | BPF_FETCH) ||13421342 insn->imm == (BPF_OR | BPF_FETCH) ||13431343 insn->imm == (BPF_XOR | BPF_FETCH)) {13441344- u8 *branch_target;13451344 bool is64 = BPF_SIZE(insn->code) == BPF_DW;13461345 u32 real_src_reg = src_reg;13461346+ u32 real_dst_reg = dst_reg;13471347+ u8 *branch_target;1347134813481349 /*13491350 * Can't be implemented with a single x86 insn.···13551354 emit_mov_reg(&prog, true, BPF_REG_AX, BPF_REG_0);13561355 if (src_reg == BPF_REG_0)13571356 real_src_reg = BPF_REG_AX;13571357+ if (dst_reg == BPF_REG_0)13581358+ real_dst_reg = BPF_REG_AX;1358135913591360 branch_target = prog;13601361 /* Load old value */13611362 emit_ldx(&prog, BPF_SIZE(insn->code),13621362- BPF_REG_0, dst_reg, insn->off);13631363+ BPF_REG_0, real_dst_reg, insn->off);13631364 /*13641365 * Perform the (commutative) operation locally,13651366 * put the result in the AUX_REG.···13721369 add_2reg(0xC0, AUX_REG, real_src_reg));13731370 /* Attempt to swap in new value */13741371 err = emit_atomic(&prog, BPF_CMPXCHG,13751375- dst_reg, AUX_REG, insn->off,13721372+ real_dst_reg, AUX_REG,13731373+ insn->off,13761374 BPF_SIZE(insn->code));13771375 if (WARN_ON(err))13781376 return err;···13871383 /* Restore R0 after clobbering RAX */13881384 emit_mov_reg(&prog, true, BPF_REG_0, BPF_REG_AX);13891385 break;13901390-13911386 }1392138713931388 err = emit_atomic(&prog, insn->imm, dst_reg, src_reg,13941394- insn->off, BPF_SIZE(insn->code));13891389+ insn->off, BPF_SIZE(insn->code));13951390 if (err)13961391 return err;13971392 break;···17471744}1748174517491746static int invoke_bpf_prog(const struct btf_func_model *m, u8 **pprog,17501750- struct bpf_prog *p, int stack_size, bool mod_ret)17471747+ struct bpf_prog *p, int stack_size, bool save_ret)17511748{17521749 u8 *prog = *pprog;17531750 u8 *jmp_insn;···17801777 if (emit_call(&prog, p->bpf_func, prog))17811778 return -EINVAL;1782177917831783- /* BPF_TRAMP_MODIFY_RETURN trampolines can modify the return17801780+ /*17811781+ * BPF_TRAMP_MODIFY_RETURN trampolines can modify the return17841782 * of the previous call which is then passed on the stack to17851783 * the next BPF program.17841784+ *17851785+ * BPF_TRAMP_FENTRY trampoline may need to return the return17861786+ * value of BPF_PROG_TYPE_STRUCT_OPS prog.17861787 */17871787- if (mod_ret)17881788+ if (save_ret)17881789 emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8);1789179017901791 /* replace 2 nops with JE insn, since jmp target is known */···18351828}1836182918371830static int invoke_bpf(const struct btf_func_model *m, u8 **pprog,18381838- struct bpf_tramp_progs *tp, int stack_size)18311831+ struct bpf_tramp_progs *tp, int stack_size,18321832+ bool save_ret)18391833{18401834 int i;18411835 u8 *prog = *pprog;1842183618431837 for (i = 0; i < tp->nr_progs; i++) {18441844- if (invoke_bpf_prog(m, &prog, tp->progs[i], stack_size, false))18381838+ if (invoke_bpf_prog(m, &prog, tp->progs[i], stack_size,18391839+ save_ret))18451840 return -EINVAL;18461841 }18471842 *pprog = prog;···1884187518851876 *pprog = prog;18861877 return 0;18781878+}18791879+18801880+static bool is_valid_bpf_tramp_flags(unsigned int flags)18811881+{18821882+ if ((flags & BPF_TRAMP_F_RESTORE_REGS) &&18831883+ (flags & BPF_TRAMP_F_SKIP_FRAME))18841884+ return false;18851885+18861886+ /*18871887+ * BPF_TRAMP_F_RET_FENTRY_RET is only used by bpf_struct_ops,18881888+ * and it must be used alone.18891889+ */18901890+ if ((flags & BPF_TRAMP_F_RET_FENTRY_RET) &&18911891+ (flags & ~BPF_TRAMP_F_RET_FENTRY_RET))18921892+ return false;18931893+18941894+ return true;18871895}1888189618891897/* Example:···19751949 struct bpf_tramp_progs *fmod_ret = &tprogs[BPF_TRAMP_MODIFY_RETURN];19761950 u8 **branches = NULL;19771951 u8 *prog;19521952+ bool save_ret;1978195319791954 /* x86-64 supports up to 6 arguments. 7+ can be added in the future */19801955 if (nr_args > 6)19811956 return -ENOTSUPP;1982195719831983- if ((flags & BPF_TRAMP_F_RESTORE_REGS) &&19841984- (flags & BPF_TRAMP_F_SKIP_FRAME))19581958+ if (!is_valid_bpf_tramp_flags(flags))19851959 return -EINVAL;1986196019871987- if (flags & BPF_TRAMP_F_CALL_ORIG)19881988- stack_size += 8; /* room for return value of orig_call */19611961+ /* room for return value of orig_call or fentry prog */19621962+ save_ret = flags & (BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_RET_FENTRY_RET);19631963+ if (save_ret)19641964+ stack_size += 8;1989196519901966 if (flags & BPF_TRAMP_F_IP_ARG)19911967 stack_size += 8; /* room for IP address argument */···20332005 }2034200620352007 if (fentry->nr_progs)20362036- if (invoke_bpf(m, &prog, fentry, stack_size))20082008+ if (invoke_bpf(m, &prog, fentry, stack_size,20092009+ flags & BPF_TRAMP_F_RET_FENTRY_RET))20372010 return -EINVAL;2038201120392012 if (fmod_ret->nr_progs) {···20812052 }2082205320832054 if (fexit->nr_progs)20842084- if (invoke_bpf(m, &prog, fexit, stack_size)) {20552055+ if (invoke_bpf(m, &prog, fexit, stack_size, false)) {20852056 ret = -EINVAL;20862057 goto cleanup;20872058 }···21012072 ret = -EINVAL;21022073 goto cleanup;21032074 }21042104- /* restore original return value back into RAX */21052105- emit_ldx(&prog, BPF_DW, BPF_REG_0, BPF_REG_FP, -8);21062075 }20762076+ /* restore return value of orig_call or fentry prog back into RAX */20772077+ if (save_ret)20782078+ emit_ldx(&prog, BPF_DW, BPF_REG_0, BPF_REG_FP, -8);2107207921082080 EMIT1(0x5B); /* pop rbx */21092081 EMIT1(0xC9); /* leave */
+3-13
block/bfq-iosched.c
···26622662 * are likely to increase the throughput.26632663 */26642664 bfqq->new_bfqq = new_bfqq;26652665- /*26662666- * The above assignment schedules the following redirections:26672667- * each time some I/O for bfqq arrives, the process that26682668- * generated that I/O is disassociated from bfqq and26692669- * associated with new_bfqq. Here we increases new_bfqq->ref26702670- * in advance, adding the number of processes that are26712671- * expected to be associated with new_bfqq as they happen to26722672- * issue I/O.26732673- */26742665 new_bfqq->ref += process_refs;26752666 return new_bfqq;26762667}···27232732 void *io_struct, bool request, struct bfq_io_cq *bic)27242733{27252734 struct bfq_queue *in_service_bfqq, *new_bfqq;27262726-27272727- /* if a merge has already been setup, then proceed with that first */27282728- if (bfqq->new_bfqq)27292729- return bfqq->new_bfqq;2730273527312736 /*27322737 * Check delayed stable merge for rotational or non-queueing···28242837 */28252838 if (bfq_too_late_for_merging(bfqq))28262839 return NULL;28402840+28412841+ if (bfqq->new_bfqq)28422842+ return bfqq->new_bfqq;2827284328282844 if (!io_struct || unlikely(bfqq == &bfqd->oom_bfqq))28292845 return NULL;
+12
drivers/acpi/nfit/core.c
···30073007 ndr_desc->target_node = NUMA_NO_NODE;30083008 }3009300930103010+ /* Fallback to address based numa information if node lookup failed */30113011+ if (ndr_desc->numa_node == NUMA_NO_NODE) {30123012+ ndr_desc->numa_node = memory_add_physaddr_to_nid(spa->address);30133013+ dev_info(acpi_desc->dev, "changing numa node from %d to %d for nfit region [%pa-%pa]",30143014+ NUMA_NO_NODE, ndr_desc->numa_node, &res.start, &res.end);30153015+ }30163016+ if (ndr_desc->target_node == NUMA_NO_NODE) {30173017+ ndr_desc->target_node = phys_to_target_node(spa->address);30183018+ dev_info(acpi_desc->dev, "changing target node from %d to %d for nfit region [%pa-%pa]",30193019+ NUMA_NO_NODE, ndr_desc->numa_node, &res.start, &res.end);30203020+ }30213021+30103022 /*30113023 * Persistence domain bits are hierarchical, if30123024 * ACPI_NFIT_CAPABILITY_CACHE_FLUSH is set then
+63-27
drivers/base/core.c
···95959696 list_add(&link->s_hook, &sup->consumers);9797 list_add(&link->c_hook, &con->suppliers);9898+ pr_debug("%pfwP Linked as a fwnode consumer to %pfwP\n",9999+ con, sup);98100out:99101 mutex_unlock(&fwnode_link_lock);100102101103 return ret;104104+}105105+106106+/**107107+ * __fwnode_link_del - Delete a link between two fwnode_handles.108108+ * @link: the fwnode_link to be deleted109109+ *110110+ * The fwnode_link_lock needs to be held when this function is called.111111+ */112112+static void __fwnode_link_del(struct fwnode_link *link)113113+{114114+ pr_debug("%pfwP Dropping the fwnode link to %pfwP\n",115115+ link->consumer, link->supplier);116116+ list_del(&link->s_hook);117117+ list_del(&link->c_hook);118118+ kfree(link);102119}103120104121/**···129112 struct fwnode_link *link, *tmp;130113131114 mutex_lock(&fwnode_link_lock);132132- list_for_each_entry_safe(link, tmp, &fwnode->suppliers, c_hook) {133133- list_del(&link->s_hook);134134- list_del(&link->c_hook);135135- kfree(link);136136- }115115+ list_for_each_entry_safe(link, tmp, &fwnode->suppliers, c_hook)116116+ __fwnode_link_del(link);137117 mutex_unlock(&fwnode_link_lock);138118}139119···145131 struct fwnode_link *link, *tmp;146132147133 mutex_lock(&fwnode_link_lock);148148- list_for_each_entry_safe(link, tmp, &fwnode->consumers, s_hook) {149149- list_del(&link->s_hook);150150- list_del(&link->c_hook);151151- kfree(link);152152- }134134+ list_for_each_entry_safe(link, tmp, &fwnode->consumers, s_hook)135135+ __fwnode_link_del(link);153136 mutex_unlock(&fwnode_link_lock);154137}155138···986975{987976 struct device_link *link;988977 int ret = 0;978978+ struct fwnode_handle *sup_fw;989979990980 /*991981 * Device waiting for supplier to become available is not allowed to···995983 mutex_lock(&fwnode_link_lock);996984 if (dev->fwnode && !list_empty(&dev->fwnode->suppliers) &&997985 !fw_devlink_is_permissive()) {998998- dev_dbg(dev, "probe deferral - wait for supplier %pfwP\n",999999- list_first_entry(&dev->fwnode->suppliers,10001000- struct fwnode_link,10011001- c_hook)->supplier);986986+ sup_fw = list_first_entry(&dev->fwnode->suppliers,987987+ struct fwnode_link,988988+ c_hook)->supplier;989989+ dev_err_probe(dev, -EPROBE_DEFER, "wait for supplier %pfwP\n",990990+ sup_fw);1002991 mutex_unlock(&fwnode_link_lock);1003992 return -EPROBE_DEFER;1004993 }···10141001 if (link->status != DL_STATE_AVAILABLE &&10151002 !(link->flags & DL_FLAG_SYNC_STATE_ONLY)) {10161003 device_links_missing_supplier(dev);10171017- dev_dbg(dev, "probe deferral - supplier %s not ready\n",10181018- dev_name(link->supplier));10041004+ dev_err_probe(dev, -EPROBE_DEFER,10051005+ "supplier %s not ready\n",10061006+ dev_name(link->supplier));10191007 ret = -EPROBE_DEFER;10201008 break;10211009 }···17361722 struct device *sup_dev;17371723 int ret = 0;1738172417251725+ /*17261726+ * In some cases, a device P might also be a supplier to its child node17271727+ * C. However, this would defer the probe of C until the probe of P17281728+ * completes successfully. This is perfectly fine in the device driver17291729+ * model. device_add() doesn't guarantee probe completion of the device17301730+ * by the time it returns.17311731+ *17321732+ * However, there are a few drivers that assume C will finish probing17331733+ * as soon as it's added and before P finishes probing. So, we provide17341734+ * a flag to let fw_devlink know not to delay the probe of C until the17351735+ * probe of P completes successfully.17361736+ *17371737+ * When such a flag is set, we can't create device links where P is the17381738+ * supplier of C as that would delay the probe of C.17391739+ */17401740+ if (sup_handle->flags & FWNODE_FLAG_NEEDS_CHILD_BOUND_ON_ADD &&17411741+ fwnode_is_ancestor_of(sup_handle, con->fwnode))17421742+ return -EINVAL;17431743+17391744 sup_dev = get_dev_from_fwnode(sup_handle);17401745 if (sup_dev) {17411746 /*···18051772 * be broken by applying logic. Check for these types of cycles and18061773 * break them so that devices in the cycle probe properly.18071774 *18081808- * If the supplier's parent is dependent on the consumer, then18091809- * the consumer-supplier dependency is a false dependency. So,18101810- * treat it as an invalid link.17751775+ * If the supplier's parent is dependent on the consumer, then the17761776+ * consumer and supplier have a cyclic dependency. Since fw_devlink17771777+ * can't tell which of the inferred dependencies are incorrect, don't17781778+ * enforce probe ordering between any of the devices in this cyclic17791779+ * dependency. Do this by relaxing all the fw_devlink device links in17801780+ * this cycle and by treating the fwnode link between the consumer and17811781+ * the supplier as an invalid dependency.18111782 */18121783 sup_dev = fwnode_get_next_parent_dev(sup_handle);18131784 if (sup_dev && device_is_dependent(con, sup_dev)) {18141814- dev_dbg(con, "Not linking to %pfwP - False link\n",18151815- sup_handle);17851785+ dev_info(con, "Fixing up cyclic dependency with %pfwP (%s)\n",17861786+ sup_handle, dev_name(sup_dev));17871787+ device_links_write_lock();17881788+ fw_devlink_relax_cycle(con, sup_dev);17891789+ device_links_write_unlock();18161790 ret = -EINVAL;18171791 } else {18181792 /*···18981858 if (!own_link || ret == -EAGAIN)18991859 continue;1900186019011901- list_del(&link->s_hook);19021902- list_del(&link->c_hook);19031903- kfree(link);18611861+ __fwnode_link_del(link);19041862 }19051863}19061864···19501912 if (!own_link || ret == -EAGAIN)19511913 continue;1952191419531953- list_del(&link->s_hook);19541954- list_del(&link->c_hook);19551955- kfree(link);19151915+ __fwnode_link_del(link);1956191619571917 /* If no device link was created, nothing more to do. */19581918 if (ret)
···778778 in_place ? DMA_BIDIRECTIONAL779779 : DMA_TO_DEVICE);780780 if (ret)781781- goto e_ctx;781781+ goto e_aad;782782783783 if (in_place) {784784 dst = src;···863863 op.u.aes.size = 0;864864 ret = cmd_q->ccp->vdata->perform->aes(&op);865865 if (ret)866866- goto e_dst;866866+ goto e_final_wa;867867868868 if (aes->action == CCP_AES_ACTION_ENCRYPT) {869869 /* Put the ciphered tag after the ciphertext. */···873873 ret = ccp_init_dm_workarea(&tag, cmd_q, authsize,874874 DMA_BIDIRECTIONAL);875875 if (ret)876876- goto e_tag;876876+ goto e_final_wa;877877 ret = ccp_set_dm_area(&tag, 0, p_tag, 0, authsize);878878- if (ret)879879- goto e_tag;878878+ if (ret) {879879+ ccp_dm_free(&tag);880880+ goto e_final_wa;881881+ }880882881883 ret = crypto_memneq(tag.address, final_wa.address,882884 authsize) ? -EBADMSG : 0;883885 ccp_dm_free(&tag);884886 }885887886886-e_tag:888888+e_final_wa:887889 ccp_dm_free(&final_wa);888890889891e_dst:
+2-9
drivers/gpio/gpio-pca953x.c
···468468 mutex_lock(&chip->i2c_lock);469469 ret = regmap_read(chip->regmap, inreg, ®_val);470470 mutex_unlock(&chip->i2c_lock);471471- if (ret < 0) {472472- /*473473- * NOTE:474474- * diagnostic already emitted; that's all we should475475- * do unless gpio_*_value_cansleep() calls become different476476- * from their nonsleeping siblings (and report faults).477477- */478478- return 0;479479- }471471+ if (ret < 0)472472+ return ret;480473481474 return !!(reg_val & bit);482475}
+22
drivers/gpio/gpio-rockchip.c
···689689 struct device_node *pctlnp = of_get_parent(np);690690 struct pinctrl_dev *pctldev = NULL;691691 struct rockchip_pin_bank *bank = NULL;692692+ struct rockchip_pin_output_deferred *cfg;692693 static int gpio;693694 int id, ret;694695···717716 if (ret)718717 return ret;719718719719+ /*720720+ * Prevent clashes with a deferred output setting721721+ * being added right at this moment.722722+ */723723+ mutex_lock(&bank->deferred_lock);724724+720725 ret = rockchip_gpiolib_register(bank);721726 if (ret) {722727 clk_disable_unprepare(bank->clk);728728+ mutex_unlock(&bank->deferred_lock);723729 return ret;724730 }731731+732732+ while (!list_empty(&bank->deferred_output)) {733733+ cfg = list_first_entry(&bank->deferred_output,734734+ struct rockchip_pin_output_deferred, head);735735+ list_del(&cfg->head);736736+737737+ ret = rockchip_gpio_direction_output(&bank->gpio_chip, cfg->pin, cfg->arg);738738+ if (ret)739739+ dev_warn(dev, "setting output pin %u to %u failed\n", cfg->pin, cfg->arg);740740+741741+ kfree(cfg);742742+ }743743+744744+ mutex_unlock(&bank->deferred_lock);725745726746 platform_set_drvdata(pdev, bank);727747 dev_info(dev, "probed %pOF\n", np);
+31
drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
···837837 return 0;838838}839839840840+/* Mirrors the is_displayable check in radeonsi's gfx6_compute_surface */841841+static int check_tiling_flags_gfx6(struct amdgpu_framebuffer *afb)842842+{843843+ u64 micro_tile_mode;844844+845845+ /* Zero swizzle mode means linear */846846+ if (AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0)847847+ return 0;848848+849849+ micro_tile_mode = AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE);850850+ switch (micro_tile_mode) {851851+ case 0: /* DISPLAY */852852+ case 3: /* RENDER */853853+ return 0;854854+ default:855855+ drm_dbg_kms(afb->base.dev,856856+ "Micro tile mode %llu not supported for scanout\n",857857+ micro_tile_mode);858858+ return -EINVAL;859859+ }860860+}861861+840862static void get_block_dimensions(unsigned int block_log2, unsigned int cpp,841863 unsigned int *width, unsigned int *height)842864{···11251103 const struct drm_mode_fb_cmd2 *mode_cmd,11261104 struct drm_gem_object *obj)11271105{11061106+ struct amdgpu_device *adev = drm_to_adev(dev);11281107 int ret, i;1129110811301109 /*···11441121 ret = amdgpu_display_get_fb_info(rfb, &rfb->tiling_flags, &rfb->tmz_surface);11451122 if (ret)11461123 return ret;11241124+11251125+ if (!dev->mode_config.allow_fb_modifiers) {11261126+ drm_WARN_ONCE(dev, adev->family >= AMDGPU_FAMILY_AI,11271127+ "GFX9+ requires FB check based on format modifier\n");11281128+ ret = check_tiling_flags_gfx6(rfb);11291129+ if (ret)11301130+ return ret;11311131+ }1147113211481133 if (dev->mode_config.allow_fb_modifiers &&11491134 !(rfb->base.flags & DRM_MODE_FB_MODIFIERS)) {
+1-1
drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
···3599359936003600 /* set static priority for a queue/ring */36013601 gfx_v9_0_mqd_set_priority(ring, mqd);36023602- mqd->cp_hqd_quantum = RREG32(mmCP_HQD_QUANTUM);36023602+ mqd->cp_hqd_quantum = RREG32_SOC15(GC, 0, mmCP_HQD_QUANTUM);3603360336043604 /* map_queues packet doesn't need activate the queue,36053605 * so only kiq need set this field.
+2-1
drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
···10981098{10991099 struct amdgpu_device *adev = (struct amdgpu_device *)handle;1100110011011101+ gmc_v10_0_gart_disable(adev);11021102+11011103 if (amdgpu_sriov_vf(adev)) {11021104 /* full access mode, so don't touch any GMC register */11031105 DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");···1108110611091107 amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0);11101108 amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);11111111- gmc_v10_0_gart_disable(adev);1112110911131110 return 0;11141111}
+2-1
drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
···17941794{17951795 struct amdgpu_device *adev = (struct amdgpu_device *)handle;1796179617971797+ gmc_v9_0_gart_disable(adev);17981798+17971799 if (amdgpu_sriov_vf(adev)) {17981800 /* full access mode, so don't touch any GMC register */17991801 DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");···1804180218051803 amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0);18061804 amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);18071807- gmc_v9_0_gart_disable(adev);1808180518091806 return 0;18101807}
+8
drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
···868868 msleep(1000);869869 }870870871871+ /* TODO: check whether can submit a doorbell request to raise872872+ * a doorbell fence to exit gfxoff.873873+ */874874+ if (adev->in_s0ix)875875+ amdgpu_gfx_off_ctrl(adev, false);876876+871877 sdma_v5_2_soft_reset(adev);872878 /* unhalt the MEs */873879 sdma_v5_2_enable(adev, true);···882876883877 /* start the gfx rings and rlc compute queues */884878 r = sdma_v5_2_gfx_resume(adev);879879+ if (adev->in_s0ix)880880+ amdgpu_gfx_off_ctrl(adev, true);885881 if (r)886882 return r;887883 r = sdma_v5_2_rlc_resume(adev);
···18261826 if (panel_mode == DP_PANEL_MODE_EDP) {18271827 struct cp_psp *cp_psp = &stream->ctx->cp_psp;1828182818291829- if (cp_psp && cp_psp->funcs.enable_assr) {18301830- if (!cp_psp->funcs.enable_assr(cp_psp->handle, link)) {18311831- /* since eDP implies ASSR on, change panel18321832- * mode to disable ASSR18331833- */18341834- panel_mode = DP_PANEL_MODE_DEFAULT;18351835- }18361836- }18291829+ if (cp_psp && cp_psp->funcs.enable_assr)18301830+ /* ASSR is bound to fail with unsigned PSP18311831+ * verstage used during devlopment phase.18321832+ * Report and continue with eDP panel mode to18331833+ * perform eDP link training with right settings18341834+ */18351835+ cp_psp->funcs.enable_assr(cp_psp->handle, link);18371836 }18381837#endif18391838
+1-3
drivers/gpu/drm/exynos/exynos5433_drm_decon.c
···793793{794794 struct device *dev = &pdev->dev;795795 struct decon_context *ctx;796796- struct resource *res;797796 int ret;798797 int i;799798···817818 ctx->clks[i] = clk;818819 }819820820820- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);821821- ctx->addr = devm_ioremap_resource(dev, res);821821+ ctx->addr = devm_platform_ioremap_resource(pdev, 0);822822 if (IS_ERR(ctx->addr))823823 return PTR_ERR(ctx->addr);824824
+1-3
drivers/gpu/drm/exynos/exynos_drm_dsi.c
···17381738static int exynos_dsi_probe(struct platform_device *pdev)17391739{17401740 struct device *dev = &pdev->dev;17411741- struct resource *res;17421741 struct exynos_dsi *dsi;17431742 int ret, i;17441743···17881789 }17891790 }1790179117911791- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);17921792- dsi->reg_base = devm_ioremap_resource(dev, res);17921792+ dsi->reg_base = devm_platform_ioremap_resource(pdev, 0);17931793 if (IS_ERR(dsi->reg_base))17941794 return PTR_ERR(dsi->reg_base);17951795
···362362 return 0;363363}364364365365-static int sw_fence_dummy_notify(struct i915_sw_fence *sf,366366- enum i915_sw_fence_notify state)365365+static int __i915_sw_fence_call366366+sw_fence_dummy_notify(struct i915_sw_fence *sf,367367+ enum i915_sw_fence_notify state)367368{368369 return NOTIFY_DONE;369370}
-2
drivers/gpu/drm/i915/gt/intel_rps.c
···882882 if (!intel_rps_is_enabled(rps))883883 return;884884885885- GEM_BUG_ON(atomic_read(&rps->num_waiters));886886-887885 if (!intel_rps_clear_active(rps))888886 return;889887
···576576577577 /* No one is going to touch shadow bb from now on. */578578 i915_gem_object_flush_map(bb->obj);579579- i915_gem_object_unlock(bb->obj);579579+ i915_gem_ww_ctx_fini(&ww);580580 }581581 }582582 return 0;···630630 return ret;631631 }632632633633- i915_gem_object_unlock(wa_ctx->indirect_ctx.obj);633633+ i915_gem_ww_ctx_fini(&ww);634634635635 /* FIXME: we are not tracking our pinned VMA leaving it636636 * up to the core to fix up the stray pin_count upon
···255255 if (!privdata->cl_data)256256 return -ENOMEM;257257258258- rc = devm_add_action_or_reset(&pdev->dev, amd_mp2_pci_remove, privdata);258258+ mp2_select_ops(privdata);259259+260260+ rc = amd_sfh_hid_client_init(privdata);259261 if (rc)260262 return rc;261263262262- mp2_select_ops(privdata);263263-264264- return amd_sfh_hid_client_init(privdata);264264+ return devm_add_action_or_reset(&pdev->dev, amd_mp2_pci_remove, privdata);265265}266266267267static int __maybe_unused amd_mp2_pci_resume(struct device *dev)
+7
drivers/hid/hid-apple.c
···336336337337/*338338 * MacBook JIS keyboard has wrong logical maximum339339+ * Magic Keyboard JIS has wrong logical maximum339340 */340341static __u8 *apple_report_fixup(struct hid_device *hdev, __u8 *rdesc,341342 unsigned int *rsize)342343{343344 struct apple_sc *asc = hid_get_drvdata(hdev);345345+346346+ if(*rsize >=71 && rdesc[70] == 0x65 && rdesc[64] == 0x65) {347347+ hid_info(hdev,348348+ "fixing up Magic Keyboard JIS report descriptor\n");349349+ rdesc[64] = rdesc[70] = 0xe7;350350+ }344351345352 if ((asc->quirks & APPLE_RDESC_JIS) && *rsize >= 60 &&346353 rdesc[53] == 0x65 && rdesc[59] == 0x65) {
+10-3
drivers/hid/hid-betopff.c
···5656{5757 struct betopff_device *betopff;5858 struct hid_report *report;5959- struct hid_input *hidinput =6060- list_first_entry(&hid->inputs, struct hid_input, list);5959+ struct hid_input *hidinput;6160 struct list_head *report_list =6261 &hid->report_enum[HID_OUTPUT_REPORT].report_list;6363- struct input_dev *dev = hidinput->input;6262+ struct input_dev *dev;6463 int field_count = 0;6564 int error;6665 int i, j;6666+6767+ if (list_empty(&hid->inputs)) {6868+ hid_err(hid, "no inputs found\n");6969+ return -ENODEV;7070+ }7171+7272+ hidinput = list_first_entry(&hid->inputs, struct hid_input, list);7373+ dev = hidinput->input;67746875 if (list_empty(report_list)) {6976 hid_err(hid, "no output reports found\n");
+3-1
drivers/hid/hid-u2fzero.c
···198198 }199199200200 ret = u2fzero_recv(dev, &req, &resp);201201- if (ret < 0)201201+202202+ /* ignore errors or packets without data */203203+ if (ret < offsetof(struct u2f_hid_msg, init.data))202204 return 0;203205204206 /* only take the minimum amount of data it is safe to take */
···989989 return ret;990990991991 /* check external clock presence */992992- extclk = devm_clk_get(st->dev, NULL);993993- if (!IS_ERR(extclk)) {992992+ extclk = devm_clk_get_optional(st->dev, NULL);993993+ if (IS_ERR(extclk))994994+ return dev_err_probe(st->dev, PTR_ERR(extclk),995995+ "Failed to get external clock\n");996996+997997+ if (extclk) {994998 unsigned long rate_hz;995999 u8 pre = 0, div, tbctl;9961000 u64 aux;
+9-3
drivers/hwmon/mlxreg-fan.c
···315315{316316 struct mlxreg_fan *fan = cdev->devdata;317317 unsigned long cur_state;318318+ int i, config = 0;318319 u32 regval;319319- int i;320320 int err;321321322322 /*···329329 * overwritten.330330 */331331 if (state >= MLXREG_FAN_SPEED_MIN && state <= MLXREG_FAN_SPEED_MAX) {332332+ /*333333+ * This is configuration change, which is only supported through sysfs.334334+ * For configuration non-zero value is to be returned to avoid thermal335335+ * statistics update.336336+ */337337+ config = 1;332338 state -= MLXREG_FAN_MAX_STATE;333339 for (i = 0; i < state; i++)334340 fan->cooling_levels[i] = state;···349343350344 cur_state = MLXREG_FAN_PWM_DUTY2STATE(regval);351345 if (state < cur_state)352352- return 0;346346+ return config;353347354348 state = cur_state;355349 }···365359 dev_err(fan->dev, "Failed to write PWM duty\n");366360 return err;367361 }368368- return 0;362362+ return config;369363}370364371365static const struct thermal_cooling_device_ops mlxreg_fan_cooling_ops = {
+5-12
drivers/hwmon/occ/common.c
···340340 if (val == OCC_TEMP_SENSOR_FAULT)341341 return -EREMOTEIO;342342343343- /*344344- * VRM doesn't return temperature, only alarm bit. This345345- * attribute maps to tempX_alarm instead of tempX_input for346346- * VRM347347- */348348- if (temp->fru_type != OCC_FRU_TYPE_VRM) {349349- /* sensor not ready */350350- if (val == 0)351351- return -EAGAIN;343343+ /* sensor not ready */344344+ if (val == 0)345345+ return -EAGAIN;352346353353- val *= 1000;354354- }347347+ val *= 1000;355348 break;356349 case 2:357350 val = temp->fru_type;···879886 0, i);880887 attr++;881888882882- if (sensors->temp.version > 1 &&889889+ if (sensors->temp.version == 2 &&883890 temp->fru_type == OCC_FRU_TYPE_VRM) {884891 snprintf(attr->name, sizeof(attr->name),885892 "temp%d_alarm", s);
···17461746 }17471747}1748174817491749-static void cma_cancel_listens(struct rdma_id_private *id_priv)17491749+static void _cma_cancel_listens(struct rdma_id_private *id_priv)17501750{17511751 struct rdma_id_private *dev_id_priv;17521752+17531753+ lockdep_assert_held(&lock);1752175417531755 /*17541756 * Remove from listen_any_list to prevent added devices from spawning17551757 * additional listen requests.17561758 */17571757- mutex_lock(&lock);17581759 list_del(&id_priv->list);1759176017601761 while (!list_empty(&id_priv->listen_list)) {···17691768 rdma_destroy_id(&dev_id_priv->id);17701769 mutex_lock(&lock);17711770 }17711771+}17721772+17731773+static void cma_cancel_listens(struct rdma_id_private *id_priv)17741774+{17751775+ mutex_lock(&lock);17761776+ _cma_cancel_listens(id_priv);17721777 mutex_unlock(&lock);17731778}17741779···17831776{17841777 switch (state) {17851778 case RDMA_CM_ADDR_QUERY:17791779+ /*17801780+ * We can avoid doing the rdma_addr_cancel() based on state,17811781+ * only RDMA_CM_ADDR_QUERY has a work that could still execute.17821782+ * Notice that the addr_handler work could still be exiting17831783+ * outside this state, however due to the interaction with the17841784+ * handler_mutex the work is guaranteed not to touch id_priv17851785+ * during exit.17861786+ */17861787 rdma_addr_cancel(&id_priv->id.route.addr.dev_addr);17871788 break;17881789 case RDMA_CM_ROUTE_QUERY:···18251810static void destroy_mc(struct rdma_id_private *id_priv,18261811 struct cma_multicast *mc)18271812{18131813+ bool send_only = mc->join_state == BIT(SENDONLY_FULLMEMBER_JOIN);18141814+18281815 if (rdma_cap_ib_mcast(id_priv->id.device, id_priv->id.port_num))18291816 ib_sa_free_multicast(mc->sa_mc);18301817···1843182618441827 cma_set_mgid(id_priv, (struct sockaddr *)&mc->addr,18451828 &mgid);18461846- cma_igmp_send(ndev, &mgid, false);18291829+18301830+ if (!send_only)18311831+ cma_igmp_send(ndev, &mgid, false);18321832+18471833 dev_put(ndev);18481834 }18491835···25942574 return 0;2595257525962576err_listen:25972597- list_del(&id_priv->list);25772577+ _cma_cancel_listens(id_priv);25982578 mutex_unlock(&lock);25992579 if (to_destroy)26002580 rdma_destroy_id(&to_destroy->id);···34333413 if (dst_addr->sa_family == AF_IB) {34343414 ret = cma_resolve_ib_addr(id_priv);34353415 } else {34163416+ /*34173417+ * The FSM can return back to RDMA_CM_ADDR_BOUND after34183418+ * rdma_resolve_ip() is called, eg through the error34193419+ * path in addr_handler(). If this happens the existing34203420+ * request must be canceled before issuing a new one.34213421+ * Since canceling a request is a bit slow and this34223422+ * oddball path is rare, keep track once a request has34233423+ * been issued. The track turns out to be a permanent34243424+ * state since this is the only cancel as it is34253425+ * immediately before rdma_resolve_ip().34263426+ */34273427+ if (id_priv->used_resolve_ip)34283428+ rdma_addr_cancel(&id->route.addr.dev_addr);34293429+ else34303430+ id_priv->used_resolve_ip = 1;34363431 ret = rdma_resolve_ip(cma_src_addr(id_priv), dst_addr,34373432 &id->route.addr.dev_addr,34383433 timeout_ms, addr_handler,···38063771 int ret;3807377238083773 if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_LISTEN)) {37743774+ struct sockaddr_in any_in = {37753775+ .sin_family = AF_INET,37763776+ .sin_addr.s_addr = htonl(INADDR_ANY),37773777+ };37783778+38093779 /* For a well behaved ULP state will be RDMA_CM_IDLE */38103810- id->route.addr.src_addr.ss_family = AF_INET;38113811- ret = rdma_bind_addr(id, cma_src_addr(id_priv));37803780+ ret = rdma_bind_addr(id, (struct sockaddr *)&any_in);38123781 if (ret)38133782 return ret;38143783 if (WARN_ON(!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND,
···3333 unsigned int pointer_read;3434 unsigned int pointer_write;3535 struct tty_port tty_port;3636+ bool tty_registered;3637 union scc2698_channel __iomem *regs;3738 union scc2698_block __iomem *block_regs;3839 unsigned int board_id;···8281 return 0;8382}84838585-static int ipoctal_open(struct tty_struct *tty, struct file *file)8484+static int ipoctal_install(struct tty_driver *driver, struct tty_struct *tty)8685{8786 struct ipoctal_channel *channel = dev_get_drvdata(tty->dev);8887 struct ipoctal *ipoctal = chan_to_ipoctal(channel, tty->index);8989- int err;9090-9191- tty->driver_data = channel;8888+ int res;92899390 if (!ipack_get_carrier(ipoctal->dev))9491 return -EBUSY;95929696- err = tty_port_open(&channel->tty_port, tty, file);9797- if (err)9898- ipack_put_carrier(ipoctal->dev);9393+ res = tty_standard_install(driver, tty);9494+ if (res)9595+ goto err_put_carrier;9996100100- return err;9797+ tty->driver_data = channel;9898+9999+ return 0;100100+101101+err_put_carrier:102102+ ipack_put_carrier(ipoctal->dev);103103+104104+ return res;105105+}106106+107107+static int ipoctal_open(struct tty_struct *tty, struct file *file)108108+{109109+ struct ipoctal_channel *channel = tty->driver_data;110110+111111+ return tty_port_open(&channel->tty_port, tty, file);101112}102113103114static void ipoctal_reset_stats(struct ipoctal_stats *stats)···277264 int res;278265 int i;279266 struct tty_driver *tty;280280- char name[20];281267 struct ipoctal_channel *channel;282268 struct ipack_region *region;283269 void __iomem *addr;···367355 /* Fill struct tty_driver with ipoctal data */368356 tty->owner = THIS_MODULE;369357 tty->driver_name = KBUILD_MODNAME;370370- sprintf(name, KBUILD_MODNAME ".%d.%d.", bus_nr, slot);371371- tty->name = name;358358+ tty->name = kasprintf(GFP_KERNEL, KBUILD_MODNAME ".%d.%d.", bus_nr, slot);359359+ if (!tty->name) {360360+ res = -ENOMEM;361361+ goto err_put_driver;362362+ }372363 tty->major = 0;373364374365 tty->minor_start = 0;···386371 res = tty_register_driver(tty);387372 if (res) {388373 dev_err(&ipoctal->dev->dev, "Can't register tty driver.\n");389389- tty_driver_kref_put(tty);390390- return res;374374+ goto err_free_name;391375 }392376393377 /* Save struct tty_driver for use it when uninstalling the device */···397383398384 channel = &ipoctal->channel[i];399385 tty_port_init(&channel->tty_port);400400- tty_port_alloc_xmit_buf(&channel->tty_port);386386+ res = tty_port_alloc_xmit_buf(&channel->tty_port);387387+ if (res)388388+ continue;401389 channel->tty_port.ops = &ipoctal_tty_port_ops;402390403391 ipoctal_reset_stats(&channel->stats);···407391 spin_lock_init(&channel->lock);408392 channel->pointer_read = 0;409393 channel->pointer_write = 0;410410- tty_dev = tty_port_register_device(&channel->tty_port, tty, i, NULL);394394+ tty_dev = tty_port_register_device_attr(&channel->tty_port, tty,395395+ i, NULL, channel, NULL);411396 if (IS_ERR(tty_dev)) {412397 dev_err(&ipoctal->dev->dev, "Failed to register tty device.\n");398398+ tty_port_free_xmit_buf(&channel->tty_port);413399 tty_port_destroy(&channel->tty_port);414400 continue;415401 }416416- dev_set_drvdata(tty_dev, channel);402402+ channel->tty_registered = true;417403 }418404419405 /*···427409 ipoctal_irq_handler, ipoctal);428410429411 return 0;412412+413413+err_free_name:414414+ kfree(tty->name);415415+err_put_driver:416416+ tty_driver_kref_put(tty);417417+418418+ return res;430419}431420432421static inline int ipoctal_copy_write_buffer(struct ipoctal_channel *channel,···673648674649static const struct tty_operations ipoctal_fops = {675650 .ioctl = NULL,651651+ .install = ipoctal_install,676652 .open = ipoctal_open,677653 .close = ipoctal_close,678654 .write = ipoctal_write_tty,···716690717691 for (i = 0; i < NR_CHANNELS; i++) {718692 struct ipoctal_channel *channel = &ipoctal->channel[i];693693+694694+ if (!channel->tty_registered)695695+ continue;696696+719697 tty_unregister_device(ipoctal->tty_drv, i);720698 tty_port_free_xmit_buf(&channel->tty_port);721699 tty_port_destroy(&channel->tty_port);722700 }723701724702 tty_unregister_driver(ipoctal->tty_drv);703703+ kfree(ipoctal->tty_drv->name);725704 tty_driver_kref_put(ipoctal->tty_drv);726705 kfree(ipoctal);727706}
+9-9
drivers/media/platform/s5p-jpeg/jpeg-core.c
···11401140 continue;11411141 length = 0;11421142 switch (c) {11431143- /* SOF0: baseline JPEG */11441144- case SOF0:11431143+ /* JPEG_MARKER_SOF0: baseline JPEG */11441144+ case JPEG_MARKER_SOF0:11451145 if (get_word_be(&jpeg_buffer, &word))11461146 break;11471147 length = (long)word - 2;···11721172 notfound = 0;11731173 break;1174117411751175- case DQT:11751175+ case JPEG_MARKER_DQT:11761176 if (get_word_be(&jpeg_buffer, &word))11771177 break;11781178 length = (long)word - 2;···11851185 skip(&jpeg_buffer, length);11861186 break;1187118711881188- case DHT:11881188+ case JPEG_MARKER_DHT:11891189 if (get_word_be(&jpeg_buffer, &word))11901190 break;11911191 length = (long)word - 2;···11981198 skip(&jpeg_buffer, length);11991199 break;1200120012011201- case SOS:12011201+ case JPEG_MARKER_SOS:12021202 sos = jpeg_buffer.curr - 2; /* 0xffda */12031203 break;1204120412051205 /* skip payload-less markers */12061206- case RST ... RST + 7:12071207- case SOI:12081208- case EOI:12091209- case TEM:12061206+ case JPEG_MARKER_RST ... JPEG_MARKER_RST + 7:12071207+ case JPEG_MARKER_SOI:12081208+ case JPEG_MARKER_EOI:12091209+ case JPEG_MARKER_TEM:12101210 break;1211121112121212 /* skip uninteresting payload markers */
+14-14
drivers/media/platform/s5p-jpeg/jpeg-core.h
···3737#define EXYNOS3250_IRQ_TIMEOUT 0x1000000038383939/* a selection of JPEG markers */4040-#define TEM 0x014141-#define SOF0 0xc04242-#define DHT 0xc44343-#define RST 0xd04444-#define SOI 0xd84545-#define EOI 0xd94646-#define SOS 0xda4747-#define DQT 0xdb4848-#define DHP 0xde4040+#define JPEG_MARKER_TEM 0x014141+#define JPEG_MARKER_SOF0 0xc04242+#define JPEG_MARKER_DHT 0xc44343+#define JPEG_MARKER_RST 0xd04444+#define JPEG_MARKER_SOI 0xd84545+#define JPEG_MARKER_EOI 0xd94646+#define JPEG_MARKER_SOS 0xda4747+#define JPEG_MARKER_DQT 0xdb4848+#define JPEG_MARKER_DHP 0xde49495050/* Flags that indicate a format can be used for capture/output */5151#define SJPEG_FMT_FLAG_ENC_CAPTURE (1 << 0)···187187 * @fmt: driver-specific format of this queue188188 * @w: image width189189 * @h: image height190190- * @sos: SOS marker's position relative to the buffer beginning191191- * @dht: DHT markers' positions relative to the buffer beginning192192- * @dqt: DQT markers' positions relative to the buffer beginning193193- * @sof: SOF0 marker's position relative to the buffer beginning194194- * @sof_len: SOF0 marker's payload length (without length field itself)190190+ * @sos: JPEG_MARKER_SOS's position relative to the buffer beginning191191+ * @dht: JPEG_MARKER_DHT' positions relative to the buffer beginning192192+ * @dqt: JPEG_MARKER_DQT' positions relative to the buffer beginning193193+ * @sof: JPEG_MARKER_SOF0's position relative to the buffer beginning194194+ * @sof_len: JPEG_MARKER_SOF0's payload length (without length field itself)195195 * @size: image buffer size in bytes196196 */197197struct s5p_jpeg_q_data {
+20-1
drivers/media/rc/ir_toy.c
···2424// End transmit and repeat reset command so we exit sump mode2525static const u8 COMMAND_RESET[] = { 0xff, 0xff, 0, 0, 0, 0, 0 };2626static const u8 COMMAND_SMODE_ENTER[] = { 's' };2727+static const u8 COMMAND_SMODE_EXIT[] = { 0 };2728static const u8 COMMAND_TXSTART[] = { 0x26, 0x24, 0x25, 0x03 };28292930#define REPLY_XMITCOUNT 't'···310309 buf[i] = cpu_to_be16(v);311310 }312311313313- buf[count] = cpu_to_be16(0xffff);312312+ buf[count] = 0xffff;314313315314 irtoy->tx_buf = buf;316315 irtoy->tx_len = size;317316 irtoy->emitted = 0;317317+318318+ // There is an issue where if the unit is receiving IR while the319319+ // first TXSTART command is sent, the device might end up hanging320320+ // with its led on. It does not respond to any command when this321321+ // happens. To work around this, re-enter sample mode.322322+ err = irtoy_command(irtoy, COMMAND_SMODE_EXIT,323323+ sizeof(COMMAND_SMODE_EXIT), STATE_RESET);324324+ if (err) {325325+ dev_err(irtoy->dev, "exit sample mode: %d\n", err);326326+ return err;327327+ }328328+329329+ err = irtoy_command(irtoy, COMMAND_SMODE_ENTER,330330+ sizeof(COMMAND_SMODE_ENTER), STATE_COMMAND);331331+ if (err) {332332+ dev_err(irtoy->dev, "enter sample mode: %d\n", err);333333+ return err;334334+ }318335319336 err = irtoy_command(irtoy, COMMAND_TXSTART, sizeof(COMMAND_TXSTART),320337 STATE_TX);
+12-3
drivers/mmc/host/dw_mmc.c
···1802180218031803 spin_lock_irqsave(&host->irq_lock, flags);1804180418051805- if (!host->data_status)18051805+ /*18061806+ * Only inject an error if we haven't already got an error or data over18071807+ * interrupt.18081808+ */18091809+ if (!host->data_status) {18061810 host->data_status = SDMMC_INT_DCRC;18071807- set_bit(EVENT_DATA_ERROR, &host->pending_events);18081808- tasklet_schedule(&host->tasklet);18111811+ set_bit(EVENT_DATA_ERROR, &host->pending_events);18121812+ tasklet_schedule(&host->tasklet);18131813+ }1809181418101815 spin_unlock_irqrestore(&host->irq_lock, flags);18111816···27262721 }2727272227282723 if (pending & DW_MCI_DATA_ERROR_FLAGS) {27242724+ spin_lock(&host->irq_lock);27252725+27292726 /* if there is an error report DATA_ERROR */27302727 mci_writel(host, RINTSTS, DW_MCI_DATA_ERROR_FLAGS);27312728 host->data_status = pending;27322729 smp_wmb(); /* drain writebuffer */27332730 set_bit(EVENT_DATA_ERROR, &host->pending_events);27342731 tasklet_schedule(&host->tasklet);27322732+27332733+ spin_unlock(&host->irq_lock);27352734 }2736273527372736 if (pending & SDMMC_INT_DATA_OVER) {
+2
drivers/mmc/host/renesas_sdhi_core.c
···561561 /* Unknown why but without polling reset status, it will hang */562562 read_poll_timeout(reset_control_status, ret, ret == 0, 1, 100,563563 false, priv->rstc);564564+ /* At least SDHI_VER_GEN2_SDR50 needs manual release of reset */565565+ sd_ctrl_write16(host, CTL_RESET_SD, 0x0001);564566 priv->needs_adjust_hs400 = false;565567 renesas_sdhi_set_clock(host, host->clk_cache);566568 } else if (priv->scc_ctl) {
+10-7
drivers/net/dsa/mv88e6xxx/chip.c
···28342834 if (err)28352835 return err;2836283628372837- /* Port Control 2: don't force a good FCS, set the maximum frame size to28382838- * 10240 bytes, disable 802.1q tags checking, don't discard tagged or28372837+ /* Port Control 2: don't force a good FCS, set the MTU size to28382838+ * 10222 bytes, disable 802.1q tags checking, don't discard tagged or28392839 * untagged frames on this port, do a destination address lookup on all28402840 * received packets as usual, disable ARP mirroring and don't send a28412841 * copy of all transmitted/received frames on this port to the CPU.···28542854 return err;2855285528562856 if (chip->info->ops->port_set_jumbo_size) {28572857- err = chip->info->ops->port_set_jumbo_size(chip, port, 10240);28572857+ err = chip->info->ops->port_set_jumbo_size(chip, port, 10218);28582858 if (err)28592859 return err;28602860 }···29442944 struct mv88e6xxx_chip *chip = ds->priv;2945294529462946 if (chip->info->ops->port_set_jumbo_size)29472947- return 10240;29472947+ return 10240 - VLAN_ETH_HLEN - EDSA_HLEN - ETH_FCS_LEN;29482948 else if (chip->info->ops->set_max_frame_size)29492949- return 1632;29502950- return 1522;29492949+ return 1632 - VLAN_ETH_HLEN - EDSA_HLEN - ETH_FCS_LEN;29502950+ return 1522 - VLAN_ETH_HLEN - EDSA_HLEN - ETH_FCS_LEN;29512951}2952295229532953static int mv88e6xxx_change_mtu(struct dsa_switch *ds, int port, int new_mtu)29542954{29552955 struct mv88e6xxx_chip *chip = ds->priv;29562956 int ret = 0;29572957+29582958+ if (dsa_is_dsa_port(ds, port) || dsa_is_cpu_port(ds, port))29592959+ new_mtu += EDSA_HLEN;2957296029582961 mv88e6xxx_reg_lock(chip);29592962 if (chip->info->ops->port_set_jumbo_size)···37283725 .port_set_ucast_flood = mv88e6352_port_set_ucast_flood,37293726 .port_set_mcast_flood = mv88e6352_port_set_mcast_flood,37303727 .port_set_ether_type = mv88e6351_port_set_ether_type,37313731- .port_set_jumbo_size = mv88e6165_port_set_jumbo_size,37323728 .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,37333729 .port_pause_limit = mv88e6097_port_pause_limit,37343730 .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,···37523750 .avb_ops = &mv88e6165_avb_ops,37533751 .ptp_ops = &mv88e6165_ptp_ops,37543752 .phylink_validate = mv88e6185_phylink_validate,37533753+ .set_max_frame_size = mv88e6185_g1_set_max_frame_size,37553754};3756375537573756static const struct mv88e6xxx_ops mv88e6165_ops = {
+1
drivers/net/dsa/mv88e6xxx/chip.h
···1818#include <linux/timecounter.h>1919#include <net/dsa.h>20202121+#define EDSA_HLEN 82122#define MV88E6XXX_N_FID 409622232324/* PVT limits for 4-bit port and 5-bit switch */
+2
drivers/net/dsa/mv88e6xxx/global1.c
···232232 u16 val;233233 int err;234234235235+ mtu += ETH_HLEN + ETH_FCS_LEN;236236+235237 err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_CTL1, &val);236238 if (err)237239 return err;
···192192 bgmac->dma_dev = &pdev->dev;193193194194 ret = of_get_mac_address(np, bgmac->net_dev->dev_addr);195195+ if (ret == -EPROBE_DEFER)196196+ return ret;197197+195198 if (ret)196199 dev_warn(&pdev->dev,197200 "MAC address not present in device tree\n");
+1-2
drivers/net/ethernet/freescale/enetc/enetc_pf.c
···541541542542 if (phy_interface_mode_is_rgmii(phy_mode)) {543543 val = enetc_port_rd(hw, ENETC_PM0_IF_MODE);544544- val &= ~ENETC_PM0_IFM_EN_AUTO;545545- val &= ENETC_PM0_IFM_IFMODE_MASK;544544+ val &= ~(ENETC_PM0_IFM_EN_AUTO | ENETC_PM0_IFM_IFMODE_MASK);546545 val |= ENETC_PM0_IFM_IFMODE_GMII | ENETC_PM0_IFM_RG;547546 enetc_port_wr(hw, ENETC_PM0_IF_MODE, val);548547 }
-1
drivers/net/ethernet/hisilicon/hns3/hnae3.h
···752752 u8 prio_tc[HNAE3_MAX_USER_PRIO]; /* TC indexed by prio */753753 u16 tqp_count[HNAE3_MAX_TC];754754 u16 tqp_offset[HNAE3_MAX_TC];755755- unsigned long tc_en; /* bitmap of TC enabled */756755 u8 num_tc; /* Total number of enabled TCs */757756 bool mqprio_active;758757};
+7-9
drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
···623623 return ret;624624 }625625626626- for (i = 0; i < HNAE3_MAX_TC; i++) {627627- if (!test_bit(i, &tc_info->tc_en))628628- continue;629629-626626+ for (i = 0; i < tc_info->num_tc; i++)630627 netdev_set_tc_queue(netdev, i, tc_info->tqp_count[i],631628 tc_info->tqp_offset[i]);632632- }633629 }634630635631 ret = netif_set_real_num_tx_queues(netdev, queue_size);···774778775779 if (hns3_nic_resetting(netdev))776780 return -EBUSY;781781+782782+ if (!test_bit(HNS3_NIC_STATE_DOWN, &priv->state)) {783783+ netdev_warn(netdev, "net open repeatedly!\n");784784+ return 0;785785+ }777786778787 netif_carrier_off(netdev);779788···48664865 struct hnae3_tc_info *tc_info = &kinfo->tc_info;48674866 int i;4868486748694869- for (i = 0; i < HNAE3_MAX_TC; i++) {48684868+ for (i = 0; i < tc_info->num_tc; i++) {48704869 int j;48714871-48724872- if (!test_bit(i, &tc_info->tc_en))48734873- continue;4874487048754871 for (j = 0; j < tc_info->tqp_count[i]; j++) {48764872 struct hnae3_queue *q;
···334334335335#if IS_ENABLED(CONFIG_VLAN_8021Q)336336 /* Disable the vlan filter for selftest does not support it */337337- if (h->ae_algo->ops->enable_vlan_filter)337337+ if (h->ae_algo->ops->enable_vlan_filter &&338338+ ndev->features & NETIF_F_HW_VLAN_CTAG_FILTER)338339 h->ae_algo->ops->enable_vlan_filter(h, false);339340#endif340341···360359 h->ae_algo->ops->halt_autoneg(h, false);361360362361#if IS_ENABLED(CONFIG_VLAN_8021Q)363363- if (h->ae_algo->ops->enable_vlan_filter)362362+ if (h->ae_algo->ops->enable_vlan_filter &&363363+ ndev->features & NETIF_F_HW_VLAN_CTAG_FILTER)364364 h->ae_algo->ops->enable_vlan_filter(h, true);365365#endif366366
···87088708 }8709870987108710 /* check if we just hit the duplicate */87118711- if (!ret) {87128712- dev_warn(&hdev->pdev->dev, "VF %u mac(%pM) exists\n",87138713- vport->vport_id, addr);87148714- return 0;87158715- }87168716-87178717- dev_err(&hdev->pdev->dev,87188718- "PF failed to add unicast entry(%pM) in the MAC table\n",87198719- addr);87118711+ if (!ret)87128712+ return -EEXIST;8720871387218714 return ret;87228715}···88618868 } else {88628869 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE,88638870 &vport->state);88648864- break;88718871+88728872+ /* If one unicast mac address is existing in hardware,88738873+ * we need to try whether other unicast mac addresses88748874+ * are new addresses that can be added.88758875+ */88768876+ if (ret != -EEXIST)88778877+ break;88658878 }88668879 }88678880}···1279612797 continue;12797127981279812799 if (vport->vf_info.trusted) {1279912799- uc_en = vport->vf_info.request_uc_en > 0;1280012800- mc_en = vport->vf_info.request_mc_en > 0;1280012800+ uc_en = vport->vf_info.request_uc_en > 0 ||1280112801+ vport->overflow_promisc_flags &1280212802+ HNAE3_OVERFLOW_UPE;1280312803+ mc_en = vport->vf_info.request_mc_en > 0 ||1280412804+ vport->overflow_promisc_flags &1280512805+ HNAE3_OVERFLOW_MPE;1280112806 }1280212807 bc_en = vport->vf_info.request_bc_en > 0;1280312808
···687687688688 for (i = 0; i < HNAE3_MAX_TC; i++) {689689 if (hdev->hw_tc_map & BIT(i) && i < kinfo->tc_info.num_tc) {690690- set_bit(i, &kinfo->tc_info.tc_en);691690 kinfo->tc_info.tqp_offset[i] = i * kinfo->rss_size;692691 kinfo->tc_info.tqp_count[i] = kinfo->rss_size;693692 } else {694693 /* Set to default queue if TC is disable */695695- clear_bit(i, &kinfo->tc_info.tc_en);696694 kinfo->tc_info.tqp_offset[i] = 0;697695 kinfo->tc_info.tqp_count[i] = 1;698696 }···727729 for (i = 0; i < HNAE3_MAX_USER_PRIO; i++)728730 hdev->tm_info.prio_tc[i] =729731 (i >= hdev->tm_info.num_tc) ? 0 : i;730730-731731- /* DCB is enabled if we have more than 1 TC or pfc_en is732732- * non-zero.733733- */734734- if (hdev->tm_info.num_tc > 1 || hdev->tm_info.pfc_en)735735- hdev->flag |= HCLGE_FLAG_DCB_ENABLE;736736- else737737- hdev->flag &= ~HCLGE_FLAG_DCB_ENABLE;738732}739733740734static void hclge_tm_pg_info_init(struct hclge_dev *hdev)···757767758768static void hclge_update_fc_mode_by_dcb_flag(struct hclge_dev *hdev)759769{760760- if (!(hdev->flag & HCLGE_FLAG_DCB_ENABLE)) {770770+ if (hdev->tm_info.num_tc == 1 && !hdev->tm_info.pfc_en) {761771 if (hdev->fc_mode_last_time == HCLGE_FC_PFC)762772 dev_warn(&hdev->pdev->dev,763763- "DCB is disable, but last mode is FC_PFC\n");773773+ "Only 1 tc used, but last mode is FC_PFC\n");764774765775 hdev->tm_info.fc_mode = hdev->fc_mode_last_time;766776 } else if (hdev->tm_info.fc_mode != HCLGE_FC_PFC) {···786796 }787797}788798789789-static void hclge_pfc_info_init(struct hclge_dev *hdev)799799+void hclge_tm_pfc_info_update(struct hclge_dev *hdev)790800{791801 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3)792802 hclge_update_fc_mode(hdev);···802812803813 hclge_tm_vport_info_update(hdev);804814805805- hclge_pfc_info_init(hdev);815815+ hclge_tm_pfc_info_update(hdev);806816}807817808818static int hclge_tm_pg_to_pri_map(struct hclge_dev *hdev)···15481558 hclge_tm_schd_info_init(hdev);15491559}1550156015511551-void hclge_tm_pfc_info_update(struct hclge_dev *hdev)15521552-{15531553- /* DCB is enabled if we have more than 1 TC or pfc_en is15541554- * non-zero.15551555- */15561556- if (hdev->tm_info.num_tc > 1 || hdev->tm_info.pfc_en)15571557- hdev->flag |= HCLGE_FLAG_DCB_ENABLE;15581558- else15591559- hdev->flag &= ~HCLGE_FLAG_DCB_ENABLE;15601560-15611561- hclge_pfc_info_init(hdev);15621562-}15631563-15641561int hclge_tm_init_hw(struct hclge_dev *hdev, bool init)15651562{15661563 int ret;···15931616 if (ret)15941617 return ret;1595161815961596- if (!(hdev->flag & HCLGE_FLAG_DCB_ENABLE))16191619+ if (hdev->tm_info.num_tc == 1 && !hdev->tm_info.pfc_en)15971620 return 0;1598162115991622 return hclge_tm_bp_setup(hdev);
+1-1
drivers/net/ethernet/hisilicon/hns_mdio.c
···354354355355 if (dev_of_node(bus->parent)) {356356 if (!mdio_dev->subctrl_vbase) {357357- dev_err(&bus->dev, "mdio sys ctl reg has not maped\n");357357+ dev_err(&bus->dev, "mdio sys ctl reg has not mapped\n");358358 return -ENODEV;359359 }360360
-8
drivers/net/ethernet/ibm/ibmvnic.c
···47084708 return 0;47094709 }4710471047114711- if (adapter->failover_pending) {47124712- adapter->init_done_rc = -EAGAIN;47134713- netdev_dbg(netdev, "Failover pending, ignoring login response\n");47144714- complete(&adapter->init_done);47154715- /* login response buffer will be released on reset */47164716- return 0;47174717- }47184718-47194711 netdev->mtu = adapter->req_mtu - ETH_HLEN;4720471247214713 netdev_dbg(adapter->netdev, "Login Response Buffer:\n");
+15-7
drivers/net/ethernet/intel/e100.c
···24372437 sizeof(info->bus_info));24382438}2439243924402440-#define E100_PHY_REGS 0x1C24402440+#define E100_PHY_REGS 0x1D24412441static int e100_get_regs_len(struct net_device *netdev)24422442{24432443 struct nic *nic = netdev_priv(netdev);24442444- return 1 + E100_PHY_REGS + sizeof(nic->mem->dump_buf);24442444+24452445+ /* We know the number of registers, and the size of the dump buffer.24462446+ * Calculate the total size in bytes.24472447+ */24482448+ return (1 + E100_PHY_REGS) * sizeof(u32) + sizeof(nic->mem->dump_buf);24452449}2446245024472451static void e100_get_regs(struct net_device *netdev,···24592455 buff[0] = ioread8(&nic->csr->scb.cmd_hi) << 24 |24602456 ioread8(&nic->csr->scb.cmd_lo) << 16 |24612457 ioread16(&nic->csr->scb.status);24622462- for (i = E100_PHY_REGS; i >= 0; i--)24632463- buff[1 + E100_PHY_REGS - i] =24642464- mdio_read(netdev, nic->mii.phy_id, i);24582458+ for (i = 0; i < E100_PHY_REGS; i++)24592459+ /* Note that we read the registers in reverse order. This24602460+ * ordering is the ABI apparently used by ethtool and other24612461+ * applications.24622462+ */24632463+ buff[1 + i] = mdio_read(netdev, nic->mii.phy_id,24642464+ E100_PHY_REGS - 1 - i);24652465 memset(nic->mem->dump_buf, 0, sizeof(nic->mem->dump_buf));24662466 e100_exec_cb(nic, NULL, e100_dump);24672467 msleep(10);24682468- memcpy(&buff[2 + E100_PHY_REGS], nic->mem->dump_buf,24692469- sizeof(nic->mem->dump_buf));24682468+ memcpy(&buff[1 + E100_PHY_REGS], nic->mem->dump_buf,24692469+ sizeof(nic->mem->dump_buf));24702470}2471247124722472static void e100_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
···1011210112 struct ixgbe_adapter *adapter = netdev_priv(dev);1011310113 struct bpf_prog *old_prog;1011410114 bool need_reset;1011510115+ int num_queues;10115101161011610117 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)1011710118 return -EINVAL;···1016210161 /* Kick start the NAPI context if there is an AF_XDP socket open1016310162 * on that queue id. This so that receiving will start.1016410163 */1016510165- if (need_reset && prog)1016610166- for (i = 0; i < adapter->num_rx_queues; i++)1016410164+ if (need_reset && prog) {1016510165+ num_queues = min_t(int, adapter->num_rx_queues,1016610166+ adapter->num_xdp_queues);1016710167+ for (i = 0; i < num_queues; i++)1016710168 if (adapter->xdp_ring[i]->xsk_pool)1016810169 (void)ixgbe_xsk_wakeup(adapter->netdev, i,1016910170 XDP_WAKEUP_RX);1017110171+ }10170101721017110173 return 0;1017210174}
···7373config SUNVNET_COMMON7474 tristate "Common routines to support Sun Virtual Networking"7575 depends on SUN_LDOMS7676+ depends on INET7677 default m77787879config SUNVNET
+1
drivers/net/hamradio/Kconfig
···4848config DMASCC4949 tristate "High-speed (DMA) SCC driver for AX.25"5050 depends on ISA && AX25 && BROKEN_ON_SMP && ISA_DMA_API5151+ depends on VIRT_TO_BUS5152 help5253 This is a driver for high-speed SCC boards, i.e. those supporting5354 DMA on one port. You usually use those boards to connect your
+5-1
drivers/net/mdio/mdio-ipq4019.c
···207207{208208 struct ipq4019_mdio_data *priv;209209 struct mii_bus *bus;210210+ struct resource *res;210211 int ret;211212212213 bus = devm_mdiobus_alloc_size(&pdev->dev, sizeof(*priv));···225224 return PTR_ERR(priv->mdio_clk);226225227226 /* The platform resource is provided on the chipset IPQ5018 */228228- priv->eth_ldo_rdy = devm_platform_ioremap_resource(pdev, 1);227227+ /* This resource is optional */228228+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);229229+ if (res)230230+ priv->eth_ldo_rdy = devm_ioremap_resource(&pdev->dev, res);229231230232 bus->name = "ipq4019_mdio";231233 bus->read = ipq4019_mdio_read;
+10-5
drivers/net/mdio/mdio-mscc-miim.c
···134134135135static int mscc_miim_probe(struct platform_device *pdev)136136{137137- struct mii_bus *bus;138137 struct mscc_miim_dev *dev;138138+ struct resource *res;139139+ struct mii_bus *bus;139140 int ret;140141141142 bus = devm_mdiobus_alloc_size(&pdev->dev, sizeof(*dev));···157156 return PTR_ERR(dev->regs);158157 }159158160160- dev->phy_regs = devm_platform_ioremap_resource(pdev, 1);161161- if (IS_ERR(dev->phy_regs)) {162162- dev_err(&pdev->dev, "Unable to map internal phy registers\n");163163- return PTR_ERR(dev->phy_regs);159159+ /* This resource is optional */160160+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);161161+ if (res) {162162+ dev->phy_regs = devm_ioremap_resource(&pdev->dev, res);163163+ if (IS_ERR(dev->phy_regs)) {164164+ dev_err(&pdev->dev, "Unable to map internal phy registers\n");165165+ return PTR_ERR(dev->phy_regs);166166+ }164167 }165168166169 ret = of_mdiobus_register(bus, pdev->dev.of_node);
+1-5
drivers/net/mhi_net.c
···321321 /* Start MHI channels */322322 err = mhi_prepare_for_transfer(mhi_dev);323323 if (err)324324- goto out_err;324324+ return err;325325326326 /* Number of transfer descriptors determines size of the queue */327327 mhi_netdev->rx_queue_sz = mhi_get_free_desc_count(mhi_dev, DMA_FROM_DEVICE);···331331 return err;332332333333 return 0;334334-335335-out_err:336336- free_netdev(ndev);337337- return err;338334}339335340336static void mhi_net_dellink(struct mhi_device *mhi_dev, struct net_device *ndev)
+110-4
drivers/net/phy/bcm7xxx.c
···2727#define MII_BCM7XXX_SHD_2_ADDR_CTRL 0xe2828#define MII_BCM7XXX_SHD_2_CTRL_STAT 0xf2929#define MII_BCM7XXX_SHD_2_BIAS_TRIM 0x1a3030+#define MII_BCM7XXX_SHD_3_PCS_CTRL 0x03131+#define MII_BCM7XXX_SHD_3_PCS_STATUS 0x13232+#define MII_BCM7XXX_SHD_3_EEE_CAP 0x23033#define MII_BCM7XXX_SHD_3_AN_EEE_ADV 0x33434+#define MII_BCM7XXX_SHD_3_EEE_LP 0x43535+#define MII_BCM7XXX_SHD_3_EEE_WK_ERR 0x53136#define MII_BCM7XXX_SHD_3_PCS_CTRL_2 0x63237#define MII_BCM7XXX_PCS_CTRL_2_DEF 0x44003338#define MII_BCM7XXX_SHD_3_AN_STAT 0xb···221216 return genphy_config_aneg(phydev);222217}223218224224-static int phy_set_clr_bits(struct phy_device *dev, int location,225225- int set_mask, int clr_mask)219219+static int __phy_set_clr_bits(struct phy_device *dev, int location,220220+ int set_mask, int clr_mask)226221{227222 int v, ret;228223229229- v = phy_read(dev, location);224224+ v = __phy_read(dev, location);230225 if (v < 0)231226 return v;232227233228 v &= ~clr_mask;234229 v |= set_mask;235230236236- ret = phy_write(dev, location, v);231231+ ret = __phy_write(dev, location, v);237232 if (ret < 0)238233 return ret;239234240235 return v;236236+}237237+238238+static int phy_set_clr_bits(struct phy_device *dev, int location,239239+ int set_mask, int clr_mask)240240+{241241+ int ret;242242+243243+ mutex_lock(&dev->mdio.bus->mdio_lock);244244+ ret = __phy_set_clr_bits(dev, location, set_mask, clr_mask);245245+ mutex_unlock(&dev->mdio.bus->mdio_lock);246246+247247+ return ret;241248}242249243250static int bcm7xxx_28nm_ephy_01_afe_config_init(struct phy_device *phydev)···413396 return ret;414397415398 return bcm7xxx_28nm_ephy_apd_enable(phydev);399399+}400400+401401+#define MII_BCM7XXX_REG_INVALID 0xff402402+403403+static u8 bcm7xxx_28nm_ephy_regnum_to_shd(u16 regnum)404404+{405405+ switch (regnum) {406406+ case MDIO_CTRL1:407407+ return MII_BCM7XXX_SHD_3_PCS_CTRL;408408+ case MDIO_STAT1:409409+ return MII_BCM7XXX_SHD_3_PCS_STATUS;410410+ case MDIO_PCS_EEE_ABLE:411411+ return MII_BCM7XXX_SHD_3_EEE_CAP;412412+ case MDIO_AN_EEE_ADV:413413+ return MII_BCM7XXX_SHD_3_AN_EEE_ADV;414414+ case MDIO_AN_EEE_LPABLE:415415+ return MII_BCM7XXX_SHD_3_EEE_LP;416416+ case MDIO_PCS_EEE_WK_ERR:417417+ return MII_BCM7XXX_SHD_3_EEE_WK_ERR;418418+ default:419419+ return MII_BCM7XXX_REG_INVALID;420420+ }421421+}422422+423423+static bool bcm7xxx_28nm_ephy_dev_valid(int devnum)424424+{425425+ return devnum == MDIO_MMD_AN || devnum == MDIO_MMD_PCS;426426+}427427+428428+static int bcm7xxx_28nm_ephy_read_mmd(struct phy_device *phydev,429429+ int devnum, u16 regnum)430430+{431431+ u8 shd = bcm7xxx_28nm_ephy_regnum_to_shd(regnum);432432+ int ret;433433+434434+ if (!bcm7xxx_28nm_ephy_dev_valid(devnum) ||435435+ shd == MII_BCM7XXX_REG_INVALID)436436+ return -EOPNOTSUPP;437437+438438+ /* set shadow mode 2 */439439+ ret = __phy_set_clr_bits(phydev, MII_BCM7XXX_TEST,440440+ MII_BCM7XXX_SHD_MODE_2, 0);441441+ if (ret < 0)442442+ return ret;443443+444444+ /* Access the desired shadow register address */445445+ ret = __phy_write(phydev, MII_BCM7XXX_SHD_2_ADDR_CTRL, shd);446446+ if (ret < 0)447447+ goto reset_shadow_mode;448448+449449+ ret = __phy_read(phydev, MII_BCM7XXX_SHD_2_CTRL_STAT);450450+451451+reset_shadow_mode:452452+ /* reset shadow mode 2 */453453+ __phy_set_clr_bits(phydev, MII_BCM7XXX_TEST, 0,454454+ MII_BCM7XXX_SHD_MODE_2);455455+ return ret;456456+}457457+458458+static int bcm7xxx_28nm_ephy_write_mmd(struct phy_device *phydev,459459+ int devnum, u16 regnum, u16 val)460460+{461461+ u8 shd = bcm7xxx_28nm_ephy_regnum_to_shd(regnum);462462+ int ret;463463+464464+ if (!bcm7xxx_28nm_ephy_dev_valid(devnum) ||465465+ shd == MII_BCM7XXX_REG_INVALID)466466+ return -EOPNOTSUPP;467467+468468+ /* set shadow mode 2 */469469+ ret = __phy_set_clr_bits(phydev, MII_BCM7XXX_TEST,470470+ MII_BCM7XXX_SHD_MODE_2, 0);471471+ if (ret < 0)472472+ return ret;473473+474474+ /* Access the desired shadow register address */475475+ ret = __phy_write(phydev, MII_BCM7XXX_SHD_2_ADDR_CTRL, shd);476476+ if (ret < 0)477477+ goto reset_shadow_mode;478478+479479+ /* Write the desired value in the shadow register */480480+ __phy_write(phydev, MII_BCM7XXX_SHD_2_CTRL_STAT, val);481481+482482+reset_shadow_mode:483483+ /* reset shadow mode 2 */484484+ return __phy_set_clr_bits(phydev, MII_BCM7XXX_TEST, 0,485485+ MII_BCM7XXX_SHD_MODE_2);416486}417487418488static int bcm7xxx_28nm_ephy_resume(struct phy_device *phydev)···699595 .get_stats = bcm7xxx_28nm_get_phy_stats, \700596 .probe = bcm7xxx_28nm_probe, \701597 .remove = bcm7xxx_28nm_remove, \598598+ .read_mmd = bcm7xxx_28nm_ephy_read_mmd, \599599+ .write_mmd = bcm7xxx_28nm_ephy_write_mmd, \702600}703601704602#define BCM7XXX_40NM_EPHY(_oui, _name) \
···138138 * 48 bits.139139 */140140 NVME_QUIRK_DMA_ADDRESS_BITS_48 = (1 << 16),141141+142142+ /*143143+ * The controller requires the command_id value be be limited, so skip144144+ * encoding the generation sequence number.145145+ */146146+ NVME_QUIRK_SKIP_CID_GEN = (1 << 17),141147};142148143149/*
···2306230623072307/**23082308 * devm_pinctrl_unregister() - Resource managed version of pinctrl_unregister().23092309- * @dev: device for which which resource was allocated23092309+ * @dev: device for which resource was allocated23102310 * @pctldev: the pinctrl device to unregister.23112311 */23122312void devm_pinctrl_unregister(struct device *dev, struct pinctrl_dev *pctldev)
+14-5
drivers/pinctrl/pinctrl-amd.c
···445445 struct gpio_chip *gc = irq_data_get_irq_chip_data(d);446446 struct amd_gpio *gpio_dev = gpiochip_get_data(gc);447447 u32 wake_mask = BIT(WAKE_CNTRL_OFF_S0I3) | BIT(WAKE_CNTRL_OFF_S3);448448+ int err;448449449450 raw_spin_lock_irqsave(&gpio_dev->lock, flags);450451 pin_reg = readl(gpio_dev->base + (d->hwirq)*4);···457456458457 writel(pin_reg, gpio_dev->base + (d->hwirq)*4);459458 raw_spin_unlock_irqrestore(&gpio_dev->lock, flags);459459+460460+ if (on)461461+ err = enable_irq_wake(gpio_dev->irq);462462+ else463463+ err = disable_irq_wake(gpio_dev->irq);464464+465465+ if (err)466466+ dev_err(&gpio_dev->pdev->dev, "failed to %s wake-up interrupt\n",467467+ on ? "enable" : "disable");460468461469 return 0;462470}···912902static int amd_gpio_probe(struct platform_device *pdev)913903{914904 int ret = 0;915915- int irq_base;916905 struct resource *res;917906 struct amd_gpio *gpio_dev;918907 struct gpio_irq_chip *girq;···934925 if (!gpio_dev->base)935926 return -ENOMEM;936927937937- irq_base = platform_get_irq(pdev, 0);938938- if (irq_base < 0)939939- return irq_base;928928+ gpio_dev->irq = platform_get_irq(pdev, 0);929929+ if (gpio_dev->irq < 0)930930+ return gpio_dev->irq;940931941932#ifdef CONFIG_PM_SLEEP942933 gpio_dev->saved_regs = devm_kcalloc(&pdev->dev, amd_pinctrl_desc.npins,···996987 goto out2;997988 }998989999999- ret = devm_request_irq(&pdev->dev, irq_base, amd_gpio_irq_handler,990990+ ret = devm_request_irq(&pdev->dev, gpio_dev->irq, amd_gpio_irq_handler,1000991 IRQF_SHARED, KBUILD_MODNAME, gpio_dev);1001992 if (ret)1002993 goto out2;
···20922092 return false;20932093}2094209420952095+static int rockchip_pinconf_defer_output(struct rockchip_pin_bank *bank,20962096+ unsigned int pin, u32 arg)20972097+{20982098+ struct rockchip_pin_output_deferred *cfg;20992099+21002100+ cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);21012101+ if (!cfg)21022102+ return -ENOMEM;21032103+21042104+ cfg->pin = pin;21052105+ cfg->arg = arg;21062106+21072107+ list_add_tail(&cfg->head, &bank->deferred_output);21082108+21092109+ return 0;21102110+}21112111+20952112/* set the pin config settings for a specified pin */20962113static int rockchip_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,20972114 unsigned long *configs, unsigned num_configs)···21522135 RK_FUNC_GPIO);21532136 if (rc != RK_FUNC_GPIO)21542137 return -EINVAL;21382138+21392139+ /*21402140+ * Check for gpio driver not being probed yet.21412141+ * The lock makes sure that either gpio-probe has completed21422142+ * or the gpio driver hasn't probed yet.21432143+ */21442144+ mutex_lock(&bank->deferred_lock);21452145+ if (!gpio || !gpio->direction_output) {21462146+ rc = rockchip_pinconf_defer_output(bank, pin - bank->pin_base, arg);21472147+ mutex_unlock(&bank->deferred_lock);21482148+ if (rc)21492149+ return rc;21502150+21512151+ break;21522152+ }21532153+ mutex_unlock(&bank->deferred_lock);2155215421562155 rc = gpio->direction_output(gpio, pin - bank->pin_base,21572156 arg);···22362203 rc = rockchip_get_mux(bank, pin - bank->pin_base);22372204 if (rc != RK_FUNC_GPIO)22382205 return -EINVAL;22062206+22072207+ if (!gpio || !gpio->get) {22082208+ arg = 0;22092209+ break;22102210+ }2239221122402212 rc = gpio->get(gpio, pin - bank->pin_base);22412213 if (rc < 0)···24882450 pin_bank->name, pin);24892451 pdesc++;24902452 }24532453+24542454+ INIT_LIST_HEAD(&pin_bank->deferred_output);24552455+ mutex_init(&pin_bank->deferred_lock);24912456 }2492245724932458 ret = rockchip_pinctrl_parse_dt(pdev, info);···27522711 if (ret) {27532712 dev_err(&pdev->dev, "failed to register gpio device\n");27542713 return ret;27142714+ }27152715+27162716+ return 0;27172717+}27182718+27192719+static int rockchip_pinctrl_remove(struct platform_device *pdev)27202720+{27212721+ struct rockchip_pinctrl *info = platform_get_drvdata(pdev);27222722+ struct rockchip_pin_bank *bank;27232723+ struct rockchip_pin_output_deferred *cfg;27242724+ int i;27252725+27262726+ of_platform_depopulate(&pdev->dev);27272727+27282728+ for (i = 0; i < info->ctrl->nr_banks; i++) {27292729+ bank = &info->ctrl->pin_banks[i];27302730+27312731+ mutex_lock(&bank->deferred_lock);27322732+ while (!list_empty(&bank->deferred_output)) {27332733+ cfg = list_first_entry(&bank->deferred_output,27342734+ struct rockchip_pin_output_deferred, head);27352735+ list_del(&cfg->head);27362736+ kfree(cfg);27372737+ }27382738+ mutex_unlock(&bank->deferred_lock);27552739 }2756274027572741 return 0;···3241317532423176static struct platform_driver rockchip_pinctrl_driver = {32433177 .probe = rockchip_pinctrl_probe,31783178+ .remove = rockchip_pinctrl_remove,32443179 .driver = {32453180 .name = "rockchip-pinctrl",32463181 .pm = &rockchip_pinctrl_dev_pm_ops,
+10
drivers/pinctrl/pinctrl-rockchip.h
···141141 * @toggle_edge_mode: bit mask to toggle (falling/rising) edge mode142142 * @recalced_mask: bit mask to indicate a need to recalulate the mask143143 * @route_mask: bits describing the routing pins of per bank144144+ * @deferred_output: gpio output settings to be done after gpio bank probed145145+ * @deferred_lock: mutex for the deferred_output shared btw gpio and pinctrl144146 */145147struct rockchip_pin_bank {146148 struct device *dev;···171169 u32 toggle_edge_mode;172170 u32 recalced_mask;173171 u32 route_mask;172172+ struct list_head deferred_output;173173+ struct mutex deferred_lock;174174};175175176176/**···245241 unsigned int func;246242 unsigned long *configs;247243 unsigned int nconfigs;244244+};245245+246246+struct rockchip_pin_output_deferred {247247+ struct list_head head;248248+ unsigned int pin;249249+ u32 arg;248250};249251250252/**
···11// SPDX-License-Identifier: GPL-2.0-only22/*33- * Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.33+ * Copyright (c) 2012-2014, 2016-2021 The Linux Foundation. All rights reserved.44 */5566#include <linux/gpio/driver.h>···1414#include <linux/platform_device.h>1515#include <linux/regmap.h>1616#include <linux/slab.h>1717+#include <linux/spmi.h>1718#include <linux/types.h>18191920#include <dt-bindings/pinctrl/qcom,pmic-gpio.h>···172171 struct pinctrl_dev *ctrl;173172 struct gpio_chip chip;174173 struct irq_chip irq;174174+ u8 usid;175175+ u8 pid_base;175176};176177177178static const struct pinconf_generic_params pmic_gpio_bindings[] = {···952949 unsigned int *parent_hwirq,953950 unsigned int *parent_type)954951{955955- *parent_hwirq = child_hwirq + 0xc0;952952+ struct pmic_gpio_state *state = gpiochip_get_data(chip);953953+954954+ *parent_hwirq = child_hwirq + state->pid_base;956955 *parent_type = child_type;957956958957 return 0;958958+}959959+960960+static void *pmic_gpio_populate_parent_fwspec(struct gpio_chip *chip,961961+ unsigned int parent_hwirq,962962+ unsigned int parent_type)963963+{964964+ struct pmic_gpio_state *state = gpiochip_get_data(chip);965965+ struct irq_fwspec *fwspec;966966+967967+ fwspec = kzalloc(sizeof(*fwspec), GFP_KERNEL);968968+ if (!fwspec)969969+ return NULL;970970+971971+ fwspec->fwnode = chip->irq.parent_domain->fwnode;972972+973973+ fwspec->param_count = 4;974974+ fwspec->param[0] = state->usid;975975+ fwspec->param[1] = parent_hwirq;976976+ /* param[2] must be left as 0 */977977+ fwspec->param[3] = parent_type;978978+979979+ return fwspec;959980}960981961982static int pmic_gpio_probe(struct platform_device *pdev)···992965 struct pmic_gpio_pad *pad, *pads;993966 struct pmic_gpio_state *state;994967 struct gpio_irq_chip *girq;968968+ const struct spmi_device *parent_spmi_dev;995969 int ret, npins, i;996970 u32 reg;997971···10129841013985 state->dev = &pdev->dev;1014986 state->map = dev_get_regmap(dev->parent, NULL);987987+ parent_spmi_dev = to_spmi_device(dev->parent);988988+ state->usid = parent_spmi_dev->usid;989989+ state->pid_base = reg >> 8;10159901016991 pindesc = devm_kcalloc(dev, npins, sizeof(*pindesc), GFP_KERNEL);1017992 if (!pindesc)···10901059 girq->fwnode = of_node_to_fwnode(state->dev->of_node);10911060 girq->parent_domain = parent_domain;10921061 girq->child_to_parent_hwirq = pmic_gpio_child_to_parent_hwirq;10931093- girq->populate_parent_alloc_arg = gpiochip_populate_parent_fwspec_fourcell;10621062+ girq->populate_parent_alloc_arg = pmic_gpio_populate_parent_fwspec;10941063 girq->child_offset_to_irq = pmic_gpio_child_offset_to_irq;10951064 girq->child_irq_domain_ops.translate = pmic_gpio_domain_translate;10961065
+2-7
drivers/ptp/ptp_kvm_x86.c
···1515#include <linux/ptp_clock_kernel.h>1616#include <linux/ptp_kvm.h>17171818-struct pvclock_vsyscall_time_info *hv_clock;1919-2018static phys_addr_t clock_pair_gpa;2119static struct kvm_clock_pairing clock_pair;2220···2628 return -ENODEV;27292830 clock_pair_gpa = slow_virt_to_phys(&clock_pair);2929- hv_clock = pvclock_get_pvti_cpu0_va();3030- if (!hv_clock)3131+ if (!pvclock_get_pvti_cpu0_va())3132 return -ENODEV;32333334 ret = kvm_hypercall2(KVM_HC_CLOCK_PAIRING, clock_pair_gpa,···6164 struct pvclock_vcpu_time_info *src;6265 unsigned int version;6366 long ret;6464- int cpu;65676666- cpu = smp_processor_id();6767- src = &hv_clock[cpu].pvti;6868+ src = this_cpu_pvti();68696970 do {7071 /*
+5-3
drivers/s390/cio/blacklist.c
···262262263263 if (strcmp("free", parm) == 0) {264264 rc = blacklist_parse_parameters(buf, free, 0);265265- /* There could be subchannels without proper devices connected.266266- * evaluate all the entries265265+ /*266266+ * Evaluate the subchannels without an online device. This way,267267+ * no path-verification will be triggered on those subchannels268268+ * and it avoids unnecessary delays.267269 */268268- css_schedule_eval_all();270270+ css_schedule_eval_cond(CSS_EVAL_NOT_ONLINE, 0);269271 } else if (strcmp("add", parm) == 0)270272 rc = blacklist_parse_parameters(buf, add, 0);271273 else if (strcmp("purge", parm) == 0)
+31-9
drivers/s390/cio/css.c
···788788 return 0;789789}790790791791-void css_schedule_eval_all_unreg(unsigned long delay)791791+static int __unset_online(struct device *dev, void *data)792792+{793793+ struct idset *set = data;794794+ struct subchannel *sch = to_subchannel(dev);795795+ struct ccw_device *cdev = sch_get_cdev(sch);796796+797797+ if (cdev && cdev->online)798798+ idset_sch_del(set, sch->schid);799799+800800+ return 0;801801+}802802+803803+void css_schedule_eval_cond(enum css_eval_cond cond, unsigned long delay)792804{793805 unsigned long flags;794794- struct idset *unreg_set;806806+ struct idset *set;795807796808 /* Find unregistered subchannels. */797797- unreg_set = idset_sch_new();798798- if (!unreg_set) {809809+ set = idset_sch_new();810810+ if (!set) {799811 /* Fallback. */800812 css_schedule_eval_all();801813 return;802814 }803803- idset_fill(unreg_set);804804- bus_for_each_dev(&css_bus_type, NULL, unreg_set, __unset_registered);815815+ idset_fill(set);816816+ switch (cond) {817817+ case CSS_EVAL_UNREG:818818+ bus_for_each_dev(&css_bus_type, NULL, set, __unset_registered);819819+ break;820820+ case CSS_EVAL_NOT_ONLINE:821821+ bus_for_each_dev(&css_bus_type, NULL, set, __unset_online);822822+ break;823823+ default:824824+ break;825825+ }826826+805827 /* Apply to slow_subchannel_set. */806828 spin_lock_irqsave(&slow_subchannel_lock, flags);807807- idset_add_set(slow_subchannel_set, unreg_set);829829+ idset_add_set(slow_subchannel_set, set);808830 atomic_set(&css_eval_scheduled, 1);809831 queue_delayed_work(cio_work_q, &slow_path_work, delay);810832 spin_unlock_irqrestore(&slow_subchannel_lock, flags);811811- idset_free(unreg_set);833833+ idset_free(set);812834}813835814836void css_wait_for_slow_path(void)···842820void css_schedule_reprobe(void)843821{844822 /* Schedule with a delay to allow merging of subsequent calls. */845845- css_schedule_eval_all_unreg(1 * HZ);823823+ css_schedule_eval_cond(CSS_EVAL_UNREG, 1 * HZ);846824}847825EXPORT_SYMBOL_GPL(css_schedule_reprobe);848826
+9-1
drivers/s390/cio/css.h
···3434#define SNID_STATE3_MULTI_PATH 13535#define SNID_STATE3_SINGLE_PATH 036363737+/*3838+ * Conditions used to specify which subchannels need evaluation3939+ */4040+enum css_eval_cond {4141+ CSS_EVAL_UNREG, /* unregistered subchannels */4242+ CSS_EVAL_NOT_ONLINE /* sch without an online-device */4343+};4444+3745struct path_state {3846 __u8 state1 : 2; /* path state value 1 */3947 __u8 state2 : 2; /* path state value 2 */···144136/* Helper functions to build lists for the slow path. */145137void css_schedule_eval(struct subchannel_id schid);146138void css_schedule_eval_all(void);147147-void css_schedule_eval_all_unreg(unsigned long delay);139139+void css_schedule_eval_cond(enum css_eval_cond, unsigned long delay);148140int css_complete_work(void);149141150142int sch_is_pseudo_sch(struct subchannel *);
···640640 u64 offset, map_size, map_iova = iova;641641 struct vdpa_map_file *map_file;642642 struct vm_area_struct *vma;643643- int ret;643643+ int ret = 0;644644645645 mmap_read_lock(dev->mm);646646
+6-1
drivers/virtio/virtio.c
···345345 ret = snprintf(compat, sizeof(compat), "virtio,device%x", dev->id.device);346346 BUG_ON(ret >= sizeof(compat));347347348348+ /*349349+ * On powerpc/pseries virtio devices are PCI devices so PCI350350+ * vendor/device ids play the role of the "compatible" property.351351+ * Simply don't init of_node in this case.352352+ */348353 if (!of_device_is_compatible(np, compat)) {349349- ret = -EINVAL;354354+ ret = 0;350355 goto out;351356 }352357
+1-1
drivers/watchdog/Kconfig
···1666166616671667config SIBYTE_WDOG16681668 tristate "Sibyte SoC hardware watchdog"16691669- depends on CPU_SB1 || (MIPS && COMPILE_TEST)16691669+ depends on CPU_SB116701670 help16711671 Watchdog driver for the built in watchdog hardware in Sibyte16721672 SoC processors. There are apparently two watchdog timers
···59165916}5917591759185918/* Check if *cur is a hole and if it is, skip it */59195919-static void skip_hole(struct inode *inode, ext4_lblk_t *cur)59195919+static int skip_hole(struct inode *inode, ext4_lblk_t *cur)59205920{59215921 int ret;59225922 struct ext4_map_blocks map;···59255925 map.m_len = ((inode->i_size) >> inode->i_sb->s_blocksize_bits) - *cur;5926592659275927 ret = ext4_map_blocks(NULL, inode, &map, 0);59285928+ if (ret < 0)59295929+ return ret;59285930 if (ret != 0)59295929- return;59315931+ return 0;59305932 *cur = *cur + map.m_len;59335933+ return 0;59315934}5932593559335936/* Count number of blocks used by this inode and update i_blocks */···59795976 * iblocks by total number of differences found.59805977 */59815978 cur = 0;59825982- skip_hole(inode, &cur);59795979+ ret = skip_hole(inode, &cur);59805980+ if (ret < 0)59815981+ goto out;59835982 path = ext4_find_extent(inode, cur, NULL, 0);59845983 if (IS_ERR(path))59855984 goto out;···60005995 }60015996 cur = max(cur + 1, le32_to_cpu(ex->ee_block) +60025997 ext4_ext_get_actual_len(ex));60036003- skip_hole(inode, &cur);60046004-59985998+ ret = skip_hole(inode, &cur);59995999+ if (ret < 0) {60006000+ ext4_ext_drop_refs(path);60016001+ kfree(path);60026002+ break;60036003+ }60056004 path2 = ext4_find_extent(inode, cur, NULL, 0);60066005 if (IS_ERR(path2)) {60076006 ext4_ext_drop_refs(path);
+6
fs/ext4/fast_commit.c
···892892 sizeof(lrange), (u8 *)&lrange, crc))893893 return -ENOSPC;894894 } else {895895+ unsigned int max = (map.m_flags & EXT4_MAP_UNWRITTEN) ?896896+ EXT_UNWRITTEN_MAX_LEN : EXT_INIT_MAX_LEN;897897+898898+ /* Limit the number of blocks in one extent */899899+ map.m_len = min(max, map.m_len);900900+895901 fc_ext.fc_ino = cpu_to_le32(inode->i_ino);896902 ex = (struct ext4_extent *)&fc_ext.fc_ex;897903 ex->ee_block = cpu_to_le32(map.m_lblk);
+85-65
fs/ext4/inline.c
···77#include <linux/iomap.h>88#include <linux/fiemap.h>99#include <linux/iversion.h>1010+#include <linux/backing-dev.h>10111112#include "ext4_jbd2.h"1213#include "ext4.h"···734733int ext4_write_inline_data_end(struct inode *inode, loff_t pos, unsigned len,735734 unsigned copied, struct page *page)736735{737737- int ret, no_expand;736736+ handle_t *handle = ext4_journal_current_handle();737737+ int no_expand;738738 void *kaddr;739739 struct ext4_iloc iloc;740740+ int ret = 0, ret2;740741741741- if (unlikely(copied < len)) {742742- if (!PageUptodate(page)) {743743- copied = 0;742742+ if (unlikely(copied < len) && !PageUptodate(page))743743+ copied = 0;744744+745745+ if (likely(copied)) {746746+ ret = ext4_get_inode_loc(inode, &iloc);747747+ if (ret) {748748+ unlock_page(page);749749+ put_page(page);750750+ ext4_std_error(inode->i_sb, ret);744751 goto out;745752 }746746- }753753+ ext4_write_lock_xattr(inode, &no_expand);754754+ BUG_ON(!ext4_has_inline_data(inode));747755748748- ret = ext4_get_inode_loc(inode, &iloc);749749- if (ret) {750750- ext4_std_error(inode->i_sb, ret);751751- copied = 0;752752- goto out;753753- }756756+ /*757757+ * ei->i_inline_off may have changed since758758+ * ext4_write_begin() called759759+ * ext4_try_to_write_inline_data()760760+ */761761+ (void) ext4_find_inline_data_nolock(inode);754762755755- ext4_write_lock_xattr(inode, &no_expand);756756- BUG_ON(!ext4_has_inline_data(inode));763763+ kaddr = kmap_atomic(page);764764+ ext4_write_inline_data(inode, &iloc, kaddr, pos, copied);765765+ kunmap_atomic(kaddr);766766+ SetPageUptodate(page);767767+ /* clear page dirty so that writepages wouldn't work for us. */768768+ ClearPageDirty(page);769769+770770+ ext4_write_unlock_xattr(inode, &no_expand);771771+ brelse(iloc.bh);772772+773773+ /*774774+ * It's important to update i_size while still holding page775775+ * lock: page writeout could otherwise come in and zero776776+ * beyond i_size.777777+ */778778+ ext4_update_inode_size(inode, pos + copied);779779+ }780780+ unlock_page(page);781781+ put_page(page);757782758783 /*759759- * ei->i_inline_off may have changed since ext4_write_begin()760760- * called ext4_try_to_write_inline_data()784784+ * Don't mark the inode dirty under page lock. First, it unnecessarily785785+ * makes the holding time of page lock longer. Second, it forces lock786786+ * ordering of page lock and transaction start for journaling787787+ * filesystems.761788 */762762- (void) ext4_find_inline_data_nolock(inode);763763-764764- kaddr = kmap_atomic(page);765765- ext4_write_inline_data(inode, &iloc, kaddr, pos, len);766766- kunmap_atomic(kaddr);767767- SetPageUptodate(page);768768- /* clear page dirty so that writepages wouldn't work for us. */769769- ClearPageDirty(page);770770-771771- ext4_write_unlock_xattr(inode, &no_expand);772772- brelse(iloc.bh);773773- mark_inode_dirty(inode);789789+ if (likely(copied))790790+ mark_inode_dirty(inode);774791out:775775- return copied;792792+ /*793793+ * If we didn't copy as much data as expected, we need to trim back794794+ * size of xattr containing inline data.795795+ */796796+ if (pos + len > inode->i_size && ext4_can_truncate(inode))797797+ ext4_orphan_add(handle, inode);798798+799799+ ret2 = ext4_journal_stop(handle);800800+ if (!ret)801801+ ret = ret2;802802+ if (pos + len > inode->i_size) {803803+ ext4_truncate_failed_write(inode);804804+ /*805805+ * If truncate failed early the inode might still be806806+ * on the orphan list; we need to make sure the inode807807+ * is removed from the orphan list in that case.808808+ */809809+ if (inode->i_nlink)810810+ ext4_orphan_del(NULL, inode);811811+ }812812+ return ret ? ret : copied;776813}777814778815struct buffer_head *···990951out:991952 brelse(iloc.bh);992953 return ret;993993-}994994-995995-int ext4_da_write_inline_data_end(struct inode *inode, loff_t pos,996996- unsigned len, unsigned copied,997997- struct page *page)998998-{999999- int ret;10001000-10011001- ret = ext4_write_inline_data_end(inode, pos, len, copied, page);10021002- if (ret < 0) {10031003- unlock_page(page);10041004- put_page(page);10051005- return ret;10061006- }10071007- copied = ret;10081008-10091009- /*10101010- * No need to use i_size_read() here, the i_size10111011- * cannot change under us because we hold i_mutex.10121012- *10131013- * But it's important to update i_size while still holding page lock:10141014- * page writeout could otherwise come in and zero beyond i_size.10151015- */10161016- if (pos+copied > inode->i_size)10171017- i_size_write(inode, pos+copied);10181018- unlock_page(page);10191019- put_page(page);10201020-10211021- /*10221022- * Don't mark the inode dirty under page lock. First, it unnecessarily10231023- * makes the holding time of page lock longer. Second, it forces lock10241024- * ordering of page lock and transaction start for journaling10251025- * filesystems.10261026- */10271027- mark_inode_dirty(inode);10281028-10291029- return copied;1030954}10319551032956#ifdef INLINE_DIR_DEBUG···19191917 EXT4_I(inode)->i_disksize = i_size;1920191819211919 if (i_size < inline_size) {19201920+ /*19211921+ * if there's inline data to truncate and this file was19221922+ * converted to extents after that inline data was written,19231923+ * the extent status cache must be cleared to avoid leaving19241924+ * behind stale delayed allocated extent entries19251925+ */19261926+ if (!ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) {19271927+retry:19281928+ err = ext4_es_remove_extent(inode, 0, EXT_MAX_BLOCKS);19291929+ if (err == -ENOMEM) {19301930+ cond_resched();19311931+ congestion_wait(BLK_RW_ASYNC, HZ/50);19321932+ goto retry;19331933+ }19341934+ if (err)19351935+ goto out_error;19361936+ }19371937+19221938 /* Clear the content in the xattr space. */19231939 if (inline_size > EXT4_MIN_INLINE_DATA_SIZE) {19241940 if ((err = ext4_xattr_ibody_find(inode, &i, &is)) != 0)
+60-118
fs/ext4/inode.c
···12841284 loff_t old_size = inode->i_size;12851285 int ret = 0, ret2;12861286 int i_size_changed = 0;12871287- int inline_data = ext4_has_inline_data(inode);12881287 bool verity = ext4_verity_in_progress(inode);1289128812901289 trace_ext4_write_end(inode, pos, len, copied);12911291- if (inline_data) {12921292- ret = ext4_write_inline_data_end(inode, pos, len,12931293- copied, page);12941294- if (ret < 0) {12951295- unlock_page(page);12961296- put_page(page);12971297- goto errout;12981298- }12991299- copied = ret;13001300- } else13011301- copied = block_write_end(file, mapping, pos,13021302- len, copied, page, fsdata);12901290+12911291+ if (ext4_has_inline_data(inode))12921292+ return ext4_write_inline_data_end(inode, pos, len, copied, page);12931293+12941294+ copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);13031295 /*13041296 * it's important to update i_size while still holding page lock:13051297 * page writeout could otherwise come in and zero beyond i_size.···13121320 * ordering of page lock and transaction start for journaling13131321 * filesystems.13141322 */13151315- if (i_size_changed || inline_data)13231323+ if (i_size_changed)13161324 ret = ext4_mark_inode_dirty(handle, inode);1317132513181326 if (pos + len > inode->i_size && !verity && ext4_can_truncate(inode))···13211329 * inode->i_size. So truncate them13221330 */13231331 ext4_orphan_add(handle, inode);13241324-errout:13321332+13251333 ret2 = ext4_journal_stop(handle);13261334 if (!ret)13271335 ret = ret2;···13871395 int partial = 0;13881396 unsigned from, to;13891397 int size_changed = 0;13901390- int inline_data = ext4_has_inline_data(inode);13911398 bool verity = ext4_verity_in_progress(inode);1392139913931400 trace_ext4_journalled_write_end(inode, pos, len, copied);···1395140413961405 BUG_ON(!ext4_handle_valid(handle));1397140613981398- if (inline_data) {13991399- ret = ext4_write_inline_data_end(inode, pos, len,14001400- copied, page);14011401- if (ret < 0) {14021402- unlock_page(page);14031403- put_page(page);14041404- goto errout;14051405- }14061406- copied = ret;14071407- } else if (unlikely(copied < len) && !PageUptodate(page)) {14071407+ if (ext4_has_inline_data(inode))14081408+ return ext4_write_inline_data_end(inode, pos, len, copied, page);14091409+14101410+ if (unlikely(copied < len) && !PageUptodate(page)) {14081411 copied = 0;14091412 ext4_journalled_zero_new_buffers(handle, inode, page, from, to);14101413 } else {···14211436 if (old_size < pos && !verity)14221437 pagecache_isize_extended(inode, old_size, pos);1423143814241424- if (size_changed || inline_data) {14391439+ if (size_changed) {14251440 ret2 = ext4_mark_inode_dirty(handle, inode);14261441 if (!ret)14271442 ret = ret2;···14341449 */14351450 ext4_orphan_add(handle, inode);1436145114371437-errout:14381452 ret2 = ext4_journal_stop(handle);14391453 if (!ret)14401454 ret = ret2;···16281644 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);16291645 int ret;16301646 bool allocated = false;16471647+ bool reserved = false;1631164816321649 /*16331650 * If the cluster containing lblk is shared with a delayed,···16451660 ret = ext4_da_reserve_space(inode);16461661 if (ret != 0) /* ENOSPC */16471662 goto errout;16631663+ reserved = true;16481664 } else { /* bigalloc */16491665 if (!ext4_es_scan_clu(inode, &ext4_es_is_delonly, lblk)) {16501666 if (!ext4_es_scan_clu(inode,···16581672 ret = ext4_da_reserve_space(inode);16591673 if (ret != 0) /* ENOSPC */16601674 goto errout;16751675+ reserved = true;16611676 } else {16621677 allocated = true;16631678 }···16691682 }1670168316711684 ret = ext4_es_insert_delayed_block(inode, lblk, allocated);16851685+ if (ret && reserved)16861686+ ext4_da_release_space(inode, 1);1672168716731688errout:16741689 return ret;···17111722 }1712172317131724 /*17141714- * Delayed extent could be allocated by fallocate.17151715- * So we need to check it.17251725+ * the buffer head associated with a delayed and not unwritten17261726+ * block found in the extent status cache must contain an17271727+ * invalid block number and have its BH_New and BH_Delay bits17281728+ * set, reflecting the state assigned when the block was17291729+ * initially delayed allocated17161730 */17171717- if (ext4_es_is_delayed(&es) && !ext4_es_is_unwritten(&es)) {17181718- map_bh(bh, inode->i_sb, invalid_block);17191719- set_buffer_new(bh);17201720- set_buffer_delay(bh);17311731+ if (ext4_es_is_delonly(&es)) {17321732+ BUG_ON(bh->b_blocknr != invalid_block);17331733+ BUG_ON(!buffer_new(bh));17341734+ BUG_ON(!buffer_delay(bh));17211735 return 0;17221736 }17231737···29242932 return 0;29252933}2926293429272927-/* We always reserve for an inode update; the superblock could be there too */29282928-static int ext4_da_write_credits(struct inode *inode, loff_t pos, unsigned len)29292929-{29302930- if (likely(ext4_has_feature_large_file(inode->i_sb)))29312931- return 1;29322932-29332933- if (pos + len <= 0x7fffffffULL)29342934- return 1;29352935-29362936- /* We might need to update the superblock to set LARGE_FILE */29372937- return 2;29382938-}29392939-29402935static int ext4_da_write_begin(struct file *file, struct address_space *mapping,29412936 loff_t pos, unsigned len, unsigned flags,29422937 struct page **pagep, void **fsdata)···29322953 struct page *page;29332954 pgoff_t index;29342955 struct inode *inode = mapping->host;29352935- handle_t *handle;2936295629372957 if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))29382958 return -EIO;···29572979 return 0;29582980 }2959298129602960- /*29612961- * grab_cache_page_write_begin() can take a long time if the29622962- * system is thrashing due to memory pressure, or if the page29632963- * is being written back. So grab it first before we start29642964- * the transaction handle. This also allows us to allocate29652965- * the page (if needed) without using GFP_NOFS.29662966- */29672967-retry_grab:29822982+retry:29682983 page = grab_cache_page_write_begin(mapping, index, flags);29692984 if (!page)29702985 return -ENOMEM;29712971- unlock_page(page);2972298629732973- /*29742974- * With delayed allocation, we don't log the i_disksize update29752975- * if there is delayed block allocation. But we still need29762976- * to journalling the i_disksize update if writes to the end29772977- * of file which has an already mapped buffer.29782978- */29792979-retry_journal:29802980- handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE,29812981- ext4_da_write_credits(inode, pos, len));29822982- if (IS_ERR(handle)) {29832983- put_page(page);29842984- return PTR_ERR(handle);29852985- }29862986-29872987- lock_page(page);29882988- if (page->mapping != mapping) {29892989- /* The page got truncated from under us */29902990- unlock_page(page);29912991- put_page(page);29922992- ext4_journal_stop(handle);29932993- goto retry_grab;29942994- }29952987 /* In case writeback began while the page was unlocked */29962988 wait_for_stable_page(page);29972989···29733025#endif29743026 if (ret < 0) {29753027 unlock_page(page);29762976- ext4_journal_stop(handle);30283028+ put_page(page);29773029 /*29783030 * block_write_begin may have instantiated a few blocks29793031 * outside i_size. Trim these off again. Don't need29802980- * i_size_read because we hold i_mutex.30323032+ * i_size_read because we hold inode lock.29813033 */29823034 if (pos + len > inode->i_size)29833035 ext4_truncate_failed_write(inode);2984303629853037 if (ret == -ENOSPC &&29863038 ext4_should_retry_alloc(inode->i_sb, &retries))29872987- goto retry_journal;29882988-29892989- put_page(page);30393039+ goto retry;29903040 return ret;29913041 }29923042···30213075 struct page *page, void *fsdata)30223076{30233077 struct inode *inode = mapping->host;30243024- int ret = 0, ret2;30253025- handle_t *handle = ext4_journal_current_handle();30263078 loff_t new_i_size;30273079 unsigned long start, end;30283080 int write_mode = (int)(unsigned long)fsdata;···30303086 len, copied, page, fsdata);3031308730323088 trace_ext4_da_write_end(inode, pos, len, copied);30333033- start = pos & (PAGE_SIZE - 1);30343034- end = start + copied - 1;30353035-30363036- /*30373037- * generic_write_end() will run mark_inode_dirty() if i_size30383038- * changes. So let's piggyback the i_disksize mark_inode_dirty30393039- * into that.30403040- */30413041- new_i_size = pos + copied;30423042- if (copied && new_i_size > EXT4_I(inode)->i_disksize) {30433043- if (ext4_has_inline_data(inode) ||30443044- ext4_da_should_update_i_disksize(page, end)) {30453045- ext4_update_i_disksize(inode, new_i_size);30463046- /* We need to mark inode dirty even if30473047- * new_i_size is less that inode->i_size30483048- * bu greater than i_disksize.(hint delalloc)30493049- */30503050- ret = ext4_mark_inode_dirty(handle, inode);30513051- }30523052- }3053308930543090 if (write_mode != CONVERT_INLINE_DATA &&30553091 ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA) &&30563092 ext4_has_inline_data(inode))30573057- ret2 = ext4_da_write_inline_data_end(inode, pos, len, copied,30583058- page);30593059- else30603060- ret2 = generic_write_end(file, mapping, pos, len, copied,30613061- page, fsdata);30933093+ return ext4_write_inline_data_end(inode, pos, len, copied, page);3062309430633063- copied = ret2;30643064- if (ret2 < 0)30653065- ret = ret2;30663066- ret2 = ext4_journal_stop(handle);30673067- if (unlikely(ret2 && !ret))30683068- ret = ret2;30953095+ start = pos & (PAGE_SIZE - 1);30963096+ end = start + copied - 1;3069309730703070- return ret ? ret : copied;30983098+ /*30993099+ * Since we are holding inode lock, we are sure i_disksize <=31003100+ * i_size. We also know that if i_disksize < i_size, there are31013101+ * delalloc writes pending in the range upto i_size. If the end of31023102+ * the current write is <= i_size, there's no need to touch31033103+ * i_disksize since writeback will push i_disksize upto i_size31043104+ * eventually. If the end of the current write is > i_size and31053105+ * inside an allocated block (ext4_da_should_update_i_disksize()31063106+ * check), we need to update i_disksize here as neither31073107+ * ext4_writepage() nor certain ext4_writepages() paths not31083108+ * allocating blocks update i_disksize.31093109+ *31103110+ * Note that we defer inode dirtying to generic_write_end() /31113111+ * ext4_da_write_inline_data_end().31123112+ */31133113+ new_i_size = pos + copied;31143114+ if (copied && new_i_size > inode->i_size &&31153115+ ext4_da_should_update_i_disksize(page, end))31163116+ ext4_update_i_disksize(inode, new_i_size);31173117+31183118+ return generic_write_end(file, mapping, pos, len, copied, page, fsdata);30713119}3072312030733121/*···42764340 goto has_buffer;4277434142784342 lock_buffer(bh);43434343+ if (ext4_buffer_uptodate(bh)) {43444344+ /* Someone brought it uptodate while we waited */43454345+ unlock_buffer(bh);43464346+ goto has_buffer;43474347+ }43484348+42794349 /*42804350 * If we have all information of the inode in memory and this42814351 * is the only valid inode in the block, we need not read the
+15-6
fs/ext4/super.c
···658658 * constraints, it may not be safe to do it right here so we659659 * defer superblock flushing to a workqueue.660660 */661661- if (continue_fs)661661+ if (continue_fs && journal)662662 schedule_work(&EXT4_SB(sb)->s_error_work);663663 else664664 ext4_commit_super(sb);···13501350 true);13511351 dump_stack();13521352 }13531353+13541354+ if (EXT4_I(inode)->i_reserved_data_blocks)13551355+ ext4_msg(inode->i_sb, KERN_ERR,13561356+ "Inode %lu (%p): i_reserved_data_blocks (%u) not cleared!",13571357+ inode->i_ino, EXT4_I(inode),13581358+ EXT4_I(inode)->i_reserved_data_blocks);13531359}1354136013551361static void init_once(void *foo)···30273021 */30283022static loff_t ext4_max_bitmap_size(int bits, int has_huge_files)30293023{30303030- loff_t res = EXT4_NDIR_BLOCKS;30243024+ unsigned long long upper_limit, res = EXT4_NDIR_BLOCKS;30313025 int meta_blocks;30323032- loff_t upper_limit;30333033- /* This is calculated to be the largest file size for a dense, block30263026+30273027+ /*30283028+ * This is calculated to be the largest file size for a dense, block30343029 * mapped file such that the file's total number of 512-byte sectors,30353030 * including data and all indirect blocks, does not exceed (2^48 - 1).30363031 *30373032 * __u32 i_blocks_lo and _u16 i_blocks_high represent the total30383033 * number of 512-byte sectors of the file.30393034 */30403040-30413035 if (!has_huge_files) {30423036 /*30433037 * !has_huge_files or implies that the inode i_block field···30803074 if (res > MAX_LFS_FILESIZE)30813075 res = MAX_LFS_FILESIZE;3082307630833083- return res;30773077+ return (loff_t)res;30843078}3085307930863080static ext4_fsblk_t descriptor_loc(struct super_block *sb,···50485042 sbi->s_ea_block_cache = NULL;5049504350505044 if (sbi->s_journal) {50455045+ /* flush s_error_work before journal destroy. */50465046+ flush_work(&sbi->s_error_work);50515047 jbd2_journal_destroy(sbi->s_journal);50525048 sbi->s_journal = NULL;50535049 }50545050failed_mount3a:50555051 ext4_es_unregister_shrinker(sbi);50565052failed_mount3:50535053+ /* flush s_error_work before sbi destroy */50575054 flush_work(&sbi->s_error_work);50585055 del_timer_sync(&sbi->s_err_report);50595056 ext4_stop_mmpd(sbi);
+1-4
fs/io-wq.c
···584584585585 if (!get_signal(&ksig))586586 continue;587587- if (fatal_signal_pending(current) ||588588- signal_group_exit(current->signal))589589- break;590590- continue;587587+ break;591588 }592589 last_timeout = !ret;593590 }
+2-15
fs/io_uring.c
···403403 struct wait_queue_head cq_wait;404404 unsigned cq_extra;405405 atomic_t cq_timeouts;406406- struct fasync_struct *cq_fasync;407406 unsigned cq_last_tm_flush;408407 } ____cacheline_aligned_in_smp;409408···16131614 wake_up(&ctx->sq_data->wait);16141615 if (io_should_trigger_evfd(ctx))16151616 eventfd_signal(ctx->cq_ev_fd, 1);16161616- if (waitqueue_active(&ctx->poll_wait)) {16171617+ if (waitqueue_active(&ctx->poll_wait))16171618 wake_up_interruptible(&ctx->poll_wait);16181618- kill_fasync(&ctx->cq_fasync, SIGIO, POLL_IN);16191619- }16201619}1621162016221621static void io_cqring_ev_posted_iopoll(struct io_ring_ctx *ctx)···16281631 }16291632 if (io_should_trigger_evfd(ctx))16301633 eventfd_signal(ctx->cq_ev_fd, 1);16311631- if (waitqueue_active(&ctx->poll_wait)) {16341634+ if (waitqueue_active(&ctx->poll_wait))16321635 wake_up_interruptible(&ctx->poll_wait);16331633- kill_fasync(&ctx->cq_fasync, SIGIO, POLL_IN);16341634- }16351636}1636163716371638/* Returns true if there are no backlogged entries after the flush */···93409345 return mask;93419346}9342934793439343-static int io_uring_fasync(int fd, struct file *file, int on)93449344-{93459345- struct io_ring_ctx *ctx = file->private_data;93469346-93479347- return fasync_helper(fd, file, on, &ctx->cq_fasync);93489348-}93499349-93509348static int io_unregister_personality(struct io_ring_ctx *ctx, unsigned id)93519349{93529350 const struct cred *creds;···1013310145 .mmap_capabilities = io_uring_nommu_mmap_capabilities,1013410146#endif1013510147 .poll = io_uring_poll,1013610136- .fasync = io_uring_fasync,1013710148#ifdef CONFIG_PROC_FS1013810149 .show_fdinfo = io_uring_show_fdinfo,1013910150#endif
+7-2
fs/kernfs/dir.c
···11161116 if (!inode)11171117 inode = ERR_PTR(-ENOMEM);11181118 }11191119- /* Needed only for negative dentry validation */11201120- if (!inode)11191119+ /*11201120+ * Needed for negative dentry validation.11211121+ * The negative dentry can be created in kernfs_iop_lookup()11221122+ * or transforms from positive dentry in dentry_unlink_inode()11231123+ * called from vfs_rmdir().11241124+ */11251125+ if (!IS_ERR(inode))11211126 kernfs_set_rev(parent, dentry);11221127 up_read(&kernfs_rwsem);11231128
-205
fs/ksmbd/auth.c
···6868 memcpy(buf, NEGOTIATE_GSS_HEADER, AUTH_GSS_LENGTH);6969}70707171-static void7272-str_to_key(unsigned char *str, unsigned char *key)7373-{7474- int i;7575-7676- key[0] = str[0] >> 1;7777- key[1] = ((str[0] & 0x01) << 6) | (str[1] >> 2);7878- key[2] = ((str[1] & 0x03) << 5) | (str[2] >> 3);7979- key[3] = ((str[2] & 0x07) << 4) | (str[3] >> 4);8080- key[4] = ((str[3] & 0x0F) << 3) | (str[4] >> 5);8181- key[5] = ((str[4] & 0x1F) << 2) | (str[5] >> 6);8282- key[6] = ((str[5] & 0x3F) << 1) | (str[6] >> 7);8383- key[7] = str[6] & 0x7F;8484- for (i = 0; i < 8; i++)8585- key[i] = (key[i] << 1);8686-}8787-8888-static int8989-smbhash(unsigned char *out, const unsigned char *in, unsigned char *key)9090-{9191- unsigned char key2[8];9292- struct des_ctx ctx;9393-9494- if (fips_enabled) {9595- ksmbd_debug(AUTH, "FIPS compliance enabled: DES not permitted\n");9696- return -ENOENT;9797- }9898-9999- str_to_key(key, key2);100100- des_expand_key(&ctx, key2, DES_KEY_SIZE);101101- des_encrypt(&ctx, out, in);102102- memzero_explicit(&ctx, sizeof(ctx));103103- return 0;104104-}105105-106106-static int ksmbd_enc_p24(unsigned char *p21, const unsigned char *c8, unsigned char *p24)107107-{108108- int rc;109109-110110- rc = smbhash(p24, c8, p21);111111- if (rc)112112- return rc;113113- rc = smbhash(p24 + 8, c8, p21 + 7);114114- if (rc)115115- return rc;116116- return smbhash(p24 + 16, c8, p21 + 14);117117-}118118-119119-/* produce a md4 message digest from data of length n bytes */120120-static int ksmbd_enc_md4(unsigned char *md4_hash, unsigned char *link_str,121121- int link_len)122122-{123123- int rc;124124- struct ksmbd_crypto_ctx *ctx;125125-126126- ctx = ksmbd_crypto_ctx_find_md4();127127- if (!ctx) {128128- ksmbd_debug(AUTH, "Crypto md4 allocation error\n");129129- return -ENOMEM;130130- }131131-132132- rc = crypto_shash_init(CRYPTO_MD4(ctx));133133- if (rc) {134134- ksmbd_debug(AUTH, "Could not init md4 shash\n");135135- goto out;136136- }137137-138138- rc = crypto_shash_update(CRYPTO_MD4(ctx), link_str, link_len);139139- if (rc) {140140- ksmbd_debug(AUTH, "Could not update with link_str\n");141141- goto out;142142- }143143-144144- rc = crypto_shash_final(CRYPTO_MD4(ctx), md4_hash);145145- if (rc)146146- ksmbd_debug(AUTH, "Could not generate md4 hash\n");147147-out:148148- ksmbd_release_crypto_ctx(ctx);149149- return rc;150150-}151151-152152-static int ksmbd_enc_update_sess_key(unsigned char *md5_hash, char *nonce,153153- char *server_challenge, int len)154154-{155155- int rc;156156- struct ksmbd_crypto_ctx *ctx;157157-158158- ctx = ksmbd_crypto_ctx_find_md5();159159- if (!ctx) {160160- ksmbd_debug(AUTH, "Crypto md5 allocation error\n");161161- return -ENOMEM;162162- }163163-164164- rc = crypto_shash_init(CRYPTO_MD5(ctx));165165- if (rc) {166166- ksmbd_debug(AUTH, "Could not init md5 shash\n");167167- goto out;168168- }169169-170170- rc = crypto_shash_update(CRYPTO_MD5(ctx), server_challenge, len);171171- if (rc) {172172- ksmbd_debug(AUTH, "Could not update with challenge\n");173173- goto out;174174- }175175-176176- rc = crypto_shash_update(CRYPTO_MD5(ctx), nonce, len);177177- if (rc) {178178- ksmbd_debug(AUTH, "Could not update with nonce\n");179179- goto out;180180- }181181-182182- rc = crypto_shash_final(CRYPTO_MD5(ctx), md5_hash);183183- if (rc)184184- ksmbd_debug(AUTH, "Could not generate md5 hash\n");185185-out:186186- ksmbd_release_crypto_ctx(ctx);187187- return rc;188188-}189189-19071/**19172 * ksmbd_gen_sess_key() - function to generate session key19273 * @sess: session of connection···206325}207326208327/**209209- * ksmbd_auth_ntlm() - NTLM authentication handler210210- * @sess: session of connection211211- * @pw_buf: NTLM challenge response212212- * @passkey: user password213213- *214214- * Return: 0 on success, error number on error215215- */216216-int ksmbd_auth_ntlm(struct ksmbd_session *sess, char *pw_buf)217217-{218218- int rc;219219- unsigned char p21[21];220220- char key[CIFS_AUTH_RESP_SIZE];221221-222222- memset(p21, '\0', 21);223223- memcpy(p21, user_passkey(sess->user), CIFS_NTHASH_SIZE);224224- rc = ksmbd_enc_p24(p21, sess->ntlmssp.cryptkey, key);225225- if (rc) {226226- pr_err("password processing failed\n");227227- return rc;228228- }229229-230230- ksmbd_enc_md4(sess->sess_key, user_passkey(sess->user),231231- CIFS_SMB1_SESSKEY_SIZE);232232- memcpy(sess->sess_key + CIFS_SMB1_SESSKEY_SIZE, key,233233- CIFS_AUTH_RESP_SIZE);234234- sess->sequence_number = 1;235235-236236- if (strncmp(pw_buf, key, CIFS_AUTH_RESP_SIZE) != 0) {237237- ksmbd_debug(AUTH, "ntlmv1 authentication failed\n");238238- return -EINVAL;239239- }240240-241241- ksmbd_debug(AUTH, "ntlmv1 authentication pass\n");242242- return 0;243243-}244244-245245-/**246328 * ksmbd_auth_ntlmv2() - NTLMv2 authentication handler247329 * @sess: session of connection248330 * @ntlmv2: NTLMv2 challenge response···286442}287443288444/**289289- * __ksmbd_auth_ntlmv2() - NTLM2(extended security) authentication handler290290- * @sess: session of connection291291- * @client_nonce: client nonce from LM response.292292- * @ntlm_resp: ntlm response data from client.293293- *294294- * Return: 0 on success, error number on error295295- */296296-static int __ksmbd_auth_ntlmv2(struct ksmbd_session *sess, char *client_nonce,297297- char *ntlm_resp)298298-{299299- char sess_key[CIFS_SMB1_SESSKEY_SIZE] = {0};300300- int rc;301301- unsigned char p21[21];302302- char key[CIFS_AUTH_RESP_SIZE];303303-304304- rc = ksmbd_enc_update_sess_key(sess_key,305305- client_nonce,306306- (char *)sess->ntlmssp.cryptkey, 8);307307- if (rc) {308308- pr_err("password processing failed\n");309309- goto out;310310- }311311-312312- memset(p21, '\0', 21);313313- memcpy(p21, user_passkey(sess->user), CIFS_NTHASH_SIZE);314314- rc = ksmbd_enc_p24(p21, sess_key, key);315315- if (rc) {316316- pr_err("password processing failed\n");317317- goto out;318318- }319319-320320- if (memcmp(ntlm_resp, key, CIFS_AUTH_RESP_SIZE) != 0)321321- rc = -EINVAL;322322-out:323323- return rc;324324-}325325-326326-/**327445 * ksmbd_decode_ntlmssp_auth_blob() - helper function to construct328446 * authenticate blob329447 * @authblob: authenticate blob source pointer···317511 lm_off = le32_to_cpu(authblob->LmChallengeResponse.BufferOffset);318512 nt_off = le32_to_cpu(authblob->NtChallengeResponse.BufferOffset);319513 nt_len = le16_to_cpu(authblob->NtChallengeResponse.Length);320320-321321- /* process NTLM authentication */322322- if (nt_len == CIFS_AUTH_RESP_SIZE) {323323- if (le32_to_cpu(authblob->NegotiateFlags) &324324- NTLMSSP_NEGOTIATE_EXTENDED_SEC)325325- return __ksmbd_auth_ntlmv2(sess, (char *)authblob +326326- lm_off, (char *)authblob + nt_off);327327- else328328- return ksmbd_auth_ntlm(sess, (char *)authblob +329329- nt_off);330330- }331514332515 /* TODO : use domain name that imported from configuration file */333516 domain_name = smb_strndup_from_utf16((const char *)authblob +
···14511451 */14521452struct create_context *smb2_find_context_vals(void *open_req, const char *tag)14531453{14541454- char *data_offset;14551454 struct create_context *cc;14561455 unsigned int next = 0;14571456 char *name;14581457 struct smb2_create_req *req = (struct smb2_create_req *)open_req;14581458+ unsigned int remain_len, name_off, name_len, value_off, value_len,14591459+ cc_len;1459146014601460- data_offset = (char *)req + 4 + le32_to_cpu(req->CreateContextsOffset);14611461- cc = (struct create_context *)data_offset;14611461+ /*14621462+ * CreateContextsOffset and CreateContextsLength are guaranteed to14631463+ * be valid because of ksmbd_smb2_check_message().14641464+ */14651465+ cc = (struct create_context *)((char *)req + 4 +14661466+ le32_to_cpu(req->CreateContextsOffset));14671467+ remain_len = le32_to_cpu(req->CreateContextsLength);14621468 do {14631463- int val;14641464-14651469 cc = (struct create_context *)((char *)cc + next);14661466- name = le16_to_cpu(cc->NameOffset) + (char *)cc;14671467- val = le16_to_cpu(cc->NameLength);14681468- if (val < 4)14701470+ if (remain_len < offsetof(struct create_context, Buffer))14691471 return ERR_PTR(-EINVAL);1470147214711471- if (memcmp(name, tag, val) == 0)14721472- return cc;14731473 next = le32_to_cpu(cc->Next);14741474+ name_off = le16_to_cpu(cc->NameOffset);14751475+ name_len = le16_to_cpu(cc->NameLength);14761476+ value_off = le16_to_cpu(cc->DataOffset);14771477+ value_len = le32_to_cpu(cc->DataLength);14781478+ cc_len = next ? next : remain_len;14791479+14801480+ if ((next & 0x7) != 0 ||14811481+ next > remain_len ||14821482+ name_off != offsetof(struct create_context, Buffer) ||14831483+ name_len < 4 ||14841484+ name_off + name_len > cc_len ||14851485+ (value_off & 0x7) != 0 ||14861486+ (value_off && (value_off < name_off + name_len)) ||14871487+ ((u64)value_off + value_len > cc_len))14881488+ return ERR_PTR(-EINVAL);14891489+14901490+ name = (char *)cc + name_off;14911491+ if (memcmp(name, tag, name_len) == 0)14921492+ return cc;14931493+14941494+ remain_len -= next;14741495 } while (next != 0);1475149614761497 return NULL;
+197-61
fs/ksmbd/smb2pdu.c
···459459bool is_chained_smb2_message(struct ksmbd_work *work)460460{461461 struct smb2_hdr *hdr = work->request_buf;462462- unsigned int len;462462+ unsigned int len, next_cmd;463463464464 if (hdr->ProtocolId != SMB2_PROTO_NUMBER)465465 return false;466466467467 hdr = ksmbd_req_buf_next(work);468468- if (le32_to_cpu(hdr->NextCommand) > 0) {468468+ next_cmd = le32_to_cpu(hdr->NextCommand);469469+ if (next_cmd > 0) {470470+ if ((u64)work->next_smb2_rcv_hdr_off + next_cmd +471471+ __SMB2_HEADER_STRUCTURE_SIZE >472472+ get_rfc1002_len(work->request_buf)) {473473+ pr_err("next command(%u) offset exceeds smb msg size\n",474474+ next_cmd);475475+ return false;476476+ }477477+469478 ksmbd_debug(SMB, "got SMB2 chained command\n");470479 init_chained_smb2_rsp(work);471480 return true;···10671058 struct smb2_negotiate_req *req = work->request_buf;10681059 struct smb2_negotiate_rsp *rsp = work->response_buf;10691060 int rc = 0;10611061+ unsigned int smb2_buf_len, smb2_neg_size;10701062 __le32 status;1071106310721064 ksmbd_debug(SMB, "Received negotiate request\n");···10831073 rsp->hdr.Status = STATUS_INVALID_PARAMETER;10841074 rc = -EINVAL;10851075 goto err_out;10761076+ }10771077+10781078+ smb2_buf_len = get_rfc1002_len(work->request_buf);10791079+ smb2_neg_size = offsetof(struct smb2_negotiate_req, Dialects) - 4;10801080+ if (smb2_neg_size > smb2_buf_len) {10811081+ rsp->hdr.Status = STATUS_INVALID_PARAMETER;10821082+ rc = -EINVAL;10831083+ goto err_out;10841084+ }10851085+10861086+ if (conn->dialect == SMB311_PROT_ID) {10871087+ unsigned int nego_ctxt_off = le32_to_cpu(req->NegotiateContextOffset);10881088+10891089+ if (smb2_buf_len < nego_ctxt_off) {10901090+ rsp->hdr.Status = STATUS_INVALID_PARAMETER;10911091+ rc = -EINVAL;10921092+ goto err_out;10931093+ }10941094+10951095+ if (smb2_neg_size > nego_ctxt_off) {10961096+ rsp->hdr.Status = STATUS_INVALID_PARAMETER;10971097+ rc = -EINVAL;10981098+ goto err_out;10991099+ }11001100+11011101+ if (smb2_neg_size + le16_to_cpu(req->DialectCount) * sizeof(__le16) >11021102+ nego_ctxt_off) {11031103+ rsp->hdr.Status = STATUS_INVALID_PARAMETER;11041104+ rc = -EINVAL;11051105+ goto err_out;11061106+ }11071107+ } else {11081108+ if (smb2_neg_size + le16_to_cpu(req->DialectCount) * sizeof(__le16) >11091109+ smb2_buf_len) {11101110+ rsp->hdr.Status = STATUS_INVALID_PARAMETER;11111111+ rc = -EINVAL;11121112+ goto err_out;11131113+ }10861114 }1087111510881116 conn->cli_cap = le32_to_cpu(req->Capabilities);···21412093 * smb2_set_ea() - handler for setting extended attributes using set21422094 * info command21432095 * @eabuf: set info command buffer20962096+ * @buf_len: set info command buffer length21442097 * @path: dentry path for get ea21452098 *21462099 * Return: 0 on success, otherwise error21472100 */21482148-static int smb2_set_ea(struct smb2_ea_info *eabuf, struct path *path)21012101+static int smb2_set_ea(struct smb2_ea_info *eabuf, unsigned int buf_len,21022102+ struct path *path)21492103{21502104 struct user_namespace *user_ns = mnt_user_ns(path->mnt);21512105 char *attr_name = NULL, *value;21522106 int rc = 0;21532153- int next = 0;21072107+ unsigned int next = 0;21082108+21092109+ if (buf_len < sizeof(struct smb2_ea_info) + eabuf->EaNameLength +21102110+ le16_to_cpu(eabuf->EaValueLength))21112111+ return -EINVAL;2154211221552113 attr_name = kmalloc(XATTR_NAME_MAX + 1, GFP_KERNEL);21562114 if (!attr_name)···2221216722222168next:22232169 next = le32_to_cpu(eabuf->NextEntryOffset);21702170+ if (next == 0 || buf_len < next)21712171+ break;21722172+ buf_len -= next;22242173 eabuf = (struct smb2_ea_info *)((char *)eabuf + next);21742174+ if (next < (u32)eabuf->EaNameLength + le16_to_cpu(eabuf->EaValueLength))21752175+ break;21762176+22252177 } while (next != 0);2226217822272179 kfree(attr_name);···24272367 ksmbd_debug(SMB,24282368 "Set ACLs using SMB2_CREATE_SD_BUFFER context\n");24292369 sd_buf = (struct create_sd_buf_req *)context;23702370+ if (le16_to_cpu(context->DataOffset) +23712371+ le32_to_cpu(context->DataLength) <23722372+ sizeof(struct create_sd_buf_req))23732373+ return -EINVAL;24302374 return set_info_sec(work->conn, work->tcon, path, &sd_buf->ntsd,24312375 le32_to_cpu(sd_buf->ccontext.DataLength), true);24322376}···26252561 goto err_out1;26262562 } else if (context) {26272563 ea_buf = (struct create_ea_buf_req *)context;25642564+ if (le16_to_cpu(context->DataOffset) +25652565+ le32_to_cpu(context->DataLength) <25662566+ sizeof(struct create_ea_buf_req)) {25672567+ rc = -EINVAL;25682568+ goto err_out1;25692569+ }26282570 if (req->CreateOptions & FILE_NO_EA_KNOWLEDGE_LE) {26292571 rsp->hdr.Status = STATUS_ACCESS_DENIED;26302572 rc = -EACCES;···26692599 } else if (context) {26702600 struct create_posix *posix =26712601 (struct create_posix *)context;26022602+ if (le16_to_cpu(context->DataOffset) +26032603+ le32_to_cpu(context->DataLength) <26042604+ sizeof(struct create_posix)) {26052605+ rc = -EINVAL;26062606+ goto err_out1;26072607+ }26722608 ksmbd_debug(SMB, "get posix context\n");2673260926742610 posix_mode = le32_to_cpu(posix->Mode);···28242748 created = true;28252749 user_ns = mnt_user_ns(path.mnt);28262750 if (ea_buf) {28272827- rc = smb2_set_ea(&ea_buf->ea, &path);27512751+ if (le32_to_cpu(ea_buf->ccontext.DataLength) <27522752+ sizeof(struct smb2_ea_info)) {27532753+ rc = -EINVAL;27542754+ goto err_out;27552755+ }27562756+27572757+ rc = smb2_set_ea(&ea_buf->ea,27582758+ le32_to_cpu(ea_buf->ccontext.DataLength),27592759+ &path);28282760 if (rc == -EOPNOTSUPP)28292761 rc = 0;28302762 else if (rc)···30652981 rc = PTR_ERR(az_req);30662982 goto err_out;30672983 } else if (az_req) {30683068- loff_t alloc_size = le64_to_cpu(az_req->AllocationSize);29842984+ loff_t alloc_size;30692985 int err;3070298629872987+ if (le16_to_cpu(az_req->ccontext.DataOffset) +29882988+ le32_to_cpu(az_req->ccontext.DataLength) <29892989+ sizeof(struct create_alloc_size_req)) {29902990+ rc = -EINVAL;29912991+ goto err_out;29922992+ }29932993+ alloc_size = le64_to_cpu(az_req->AllocationSize);30712994 ksmbd_debug(SMB,30722995 "request smb2 create allocate size : %llu\n",30732996 alloc_size);···42434152static int get_file_basic_info(struct smb2_query_info_rsp *rsp,42444153 struct ksmbd_file *fp, void *rsp_org)42454154{42464246- struct smb2_file_all_info *basic_info;41554155+ struct smb2_file_basic_info *basic_info;42474156 struct kstat stat;42484157 u64 time;42494158···42534162 return -EACCES;42544163 }4255416442564256- basic_info = (struct smb2_file_all_info *)rsp->Buffer;41654165+ basic_info = (struct smb2_file_basic_info *)rsp->Buffer;42574166 generic_fillattr(file_mnt_user_ns(fp->filp), file_inode(fp->filp),42584167 &stat);42594168 basic_info->CreationTime = cpu_to_le64(fp->create_time);···42664175 basic_info->Attributes = fp->f_ci->m_fattr;42674176 basic_info->Pad1 = 0;42684177 rsp->OutputBufferLength =42694269- cpu_to_le32(offsetof(struct smb2_file_all_info, AllocationSize));42704270- inc_rfc1001_len(rsp_org, offsetof(struct smb2_file_all_info,42714271- AllocationSize));41784178+ cpu_to_le32(sizeof(struct smb2_file_basic_info));41794179+ inc_rfc1001_len(rsp_org, sizeof(struct smb2_file_basic_info));42724180 return 0;42734181}42744182···54235333static int smb2_create_link(struct ksmbd_work *work,54245334 struct ksmbd_share_config *share,54255335 struct smb2_file_link_info *file_info,54265426- struct file *filp,53365336+ unsigned int buf_len, struct file *filp,54275337 struct nls_table *local_nls)54285338{54295339 char *link_name = NULL, *target_name = NULL, *pathname = NULL;54305340 struct path path;54315341 bool file_present = true;54325342 int rc;53435343+53445344+ if (buf_len < (u64)sizeof(struct smb2_file_link_info) +53455345+ le32_to_cpu(file_info->FileNameLength))53465346+ return -EINVAL;5433534754345348 ksmbd_debug(SMB, "setting FILE_LINK_INFORMATION\n");54355349 pathname = kmalloc(PATH_MAX, GFP_KERNEL);···54945400 return rc;54955401}5496540254975497-static int set_file_basic_info(struct ksmbd_file *fp, char *buf,54035403+static int set_file_basic_info(struct ksmbd_file *fp,54045404+ struct smb2_file_basic_info *file_info,54985405 struct ksmbd_share_config *share)54995406{55005500- struct smb2_file_all_info *file_info;55015407 struct iattr attrs;55025408 struct timespec64 ctime;55035409 struct file *filp;···55085414 if (!(fp->daccess & FILE_WRITE_ATTRIBUTES_LE))55095415 return -EACCES;5510541655115511- file_info = (struct smb2_file_all_info *)buf;55125417 attrs.ia_valid = 0;55135418 filp = fp->filp;55145419 inode = file_inode(filp);···55845491}5585549255865493static int set_file_allocation_info(struct ksmbd_work *work,55875587- struct ksmbd_file *fp, char *buf)54945494+ struct ksmbd_file *fp,54955495+ struct smb2_file_alloc_info *file_alloc_info)55885496{55895497 /*55905498 * TODO : It's working fine only when store dos attributes···55935499 * properly with any smb.conf option55945500 */5595550155965596- struct smb2_file_alloc_info *file_alloc_info;55975502 loff_t alloc_blks;55985503 struct inode *inode;55995504 int rc;···56005507 if (!(fp->daccess & FILE_WRITE_DATA_LE))56015508 return -EACCES;5602550956035603- file_alloc_info = (struct smb2_file_alloc_info *)buf;56045510 alloc_blks = (le64_to_cpu(file_alloc_info->AllocationSize) + 511) >> 9;56055511 inode = file_inode(fp->filp);56065512···56355543}5636554456375545static int set_end_of_file_info(struct ksmbd_work *work, struct ksmbd_file *fp,56385638- char *buf)55465546+ struct smb2_file_eof_info *file_eof_info)56395547{56405640- struct smb2_file_eof_info *file_eof_info;56415548 loff_t newsize;56425549 struct inode *inode;56435550 int rc;···56445553 if (!(fp->daccess & FILE_WRITE_DATA_LE))56455554 return -EACCES;5646555556475647- file_eof_info = (struct smb2_file_eof_info *)buf;56485556 newsize = le64_to_cpu(file_eof_info->EndOfFile);56495557 inode = file_inode(fp->filp);56505558···56705580}5671558156725582static int set_rename_info(struct ksmbd_work *work, struct ksmbd_file *fp,56735673- char *buf)55835583+ struct smb2_file_rename_info *rename_info,55845584+ unsigned int buf_len)56745585{56755586 struct user_namespace *user_ns;56765587 struct ksmbd_file *parent_fp;···56835592 pr_err("no right to delete : 0x%x\n", fp->daccess);56845593 return -EACCES;56855594 }55955595+55965596+ if (buf_len < (u64)sizeof(struct smb2_file_rename_info) +55975597+ le32_to_cpu(rename_info->FileNameLength))55985598+ return -EINVAL;5686559956875600 user_ns = file_mnt_user_ns(fp->filp);56885601 if (ksmbd_stream_fd(fp))···57105615 }57115616 }57125617next:57135713- return smb2_rename(work, fp, user_ns,57145714- (struct smb2_file_rename_info *)buf,56185618+ return smb2_rename(work, fp, user_ns, rename_info,57155619 work->sess->conn->local_nls);57165620}5717562157185718-static int set_file_disposition_info(struct ksmbd_file *fp, char *buf)56225622+static int set_file_disposition_info(struct ksmbd_file *fp,56235623+ struct smb2_file_disposition_info *file_info)57195624{57205720- struct smb2_file_disposition_info *file_info;57215625 struct inode *inode;5722562657235627 if (!(fp->daccess & FILE_DELETE_LE)) {···57255631 }5726563257275633 inode = file_inode(fp->filp);57285728- file_info = (struct smb2_file_disposition_info *)buf;57295634 if (file_info->DeletePending) {57305635 if (S_ISDIR(inode->i_mode) &&57315636 ksmbd_vfs_empty_dir(fp) == -ENOTEMPTY)···57365643 return 0;57375644}5738564557395739-static int set_file_position_info(struct ksmbd_file *fp, char *buf)56465646+static int set_file_position_info(struct ksmbd_file *fp,56475647+ struct smb2_file_pos_info *file_info)57405648{57415741- struct smb2_file_pos_info *file_info;57425649 loff_t current_byte_offset;57435650 unsigned long sector_size;57445651 struct inode *inode;5745565257465653 inode = file_inode(fp->filp);57475747- file_info = (struct smb2_file_pos_info *)buf;57485654 current_byte_offset = le64_to_cpu(file_info->CurrentByteOffset);57495655 sector_size = inode->i_sb->s_blocksize;57505656···57595667 return 0;57605668}5761566957625762-static int set_file_mode_info(struct ksmbd_file *fp, char *buf)56705670+static int set_file_mode_info(struct ksmbd_file *fp,56715671+ struct smb2_file_mode_info *file_info)57635672{57645764- struct smb2_file_mode_info *file_info;57655673 __le32 mode;5766567457675767- file_info = (struct smb2_file_mode_info *)buf;57685675 mode = file_info->Mode;5769567657705677 if ((mode & ~FILE_MODE_INFO_MASK) ||···57935702 * TODO: need to implement an error handling for STATUS_INFO_LENGTH_MISMATCH57945703 */57955704static int smb2_set_info_file(struct ksmbd_work *work, struct ksmbd_file *fp,57965796- int info_class, char *buf,57055705+ struct smb2_set_info_req *req,57975706 struct ksmbd_share_config *share)57985707{57995799- switch (info_class) {57085708+ unsigned int buf_len = le32_to_cpu(req->BufferLength);57095709+57105710+ switch (req->FileInfoClass) {58005711 case FILE_BASIC_INFORMATION:58015801- return set_file_basic_info(fp, buf, share);57125712+ {57135713+ if (buf_len < sizeof(struct smb2_file_basic_info))57145714+ return -EINVAL;5802571557165716+ return set_file_basic_info(fp, (struct smb2_file_basic_info *)req->Buffer, share);57175717+ }58035718 case FILE_ALLOCATION_INFORMATION:58045804- return set_file_allocation_info(work, fp, buf);57195719+ {57205720+ if (buf_len < sizeof(struct smb2_file_alloc_info))57215721+ return -EINVAL;5805572257235723+ return set_file_allocation_info(work, fp,57245724+ (struct smb2_file_alloc_info *)req->Buffer);57255725+ }58065726 case FILE_END_OF_FILE_INFORMATION:58075807- return set_end_of_file_info(work, fp, buf);57275727+ {57285728+ if (buf_len < sizeof(struct smb2_file_eof_info))57295729+ return -EINVAL;5808573057315731+ return set_end_of_file_info(work, fp,57325732+ (struct smb2_file_eof_info *)req->Buffer);57335733+ }58095734 case FILE_RENAME_INFORMATION:57355735+ {58105736 if (!test_tree_conn_flag(work->tcon, KSMBD_TREE_CONN_FLAG_WRITABLE)) {58115737 ksmbd_debug(SMB,58125738 "User does not have write permission\n");58135739 return -EACCES;58145740 }58155815- return set_rename_info(work, fp, buf);5816574157425742+ if (buf_len < sizeof(struct smb2_file_rename_info))57435743+ return -EINVAL;57445744+57455745+ return set_rename_info(work, fp,57465746+ (struct smb2_file_rename_info *)req->Buffer,57475747+ buf_len);57485748+ }58175749 case FILE_LINK_INFORMATION:58185818- return smb2_create_link(work, work->tcon->share_conf,58195819- (struct smb2_file_link_info *)buf, fp->filp,58205820- work->sess->conn->local_nls);57505750+ {57515751+ if (buf_len < sizeof(struct smb2_file_link_info))57525752+ return -EINVAL;5821575357545754+ return smb2_create_link(work, work->tcon->share_conf,57555755+ (struct smb2_file_link_info *)req->Buffer,57565756+ buf_len, fp->filp,57575757+ work->sess->conn->local_nls);57585758+ }58225759 case FILE_DISPOSITION_INFORMATION:57605760+ {58235761 if (!test_tree_conn_flag(work->tcon, KSMBD_TREE_CONN_FLAG_WRITABLE)) {58245762 ksmbd_debug(SMB,58255763 "User does not have write permission\n");58265764 return -EACCES;58275765 }58285828- return set_file_disposition_info(fp, buf);5829576657675767+ if (buf_len < sizeof(struct smb2_file_disposition_info))57685768+ return -EINVAL;57695769+57705770+ return set_file_disposition_info(fp,57715771+ (struct smb2_file_disposition_info *)req->Buffer);57725772+ }58305773 case FILE_FULL_EA_INFORMATION:58315774 {58325775 if (!(fp->daccess & FILE_WRITE_EA_LE)) {···58695744 return -EACCES;58705745 }5871574658725872- return smb2_set_ea((struct smb2_ea_info *)buf,58735873- &fp->filp->f_path);58745874- }57475747+ if (buf_len < sizeof(struct smb2_ea_info))57485748+ return -EINVAL;5875574957505750+ return smb2_set_ea((struct smb2_ea_info *)req->Buffer,57515751+ buf_len, &fp->filp->f_path);57525752+ }58765753 case FILE_POSITION_INFORMATION:58775877- return set_file_position_info(fp, buf);57545754+ {57555755+ if (buf_len < sizeof(struct smb2_file_pos_info))57565756+ return -EINVAL;5878575757585758+ return set_file_position_info(fp, (struct smb2_file_pos_info *)req->Buffer);57595759+ }58795760 case FILE_MODE_INFORMATION:58805880- return set_file_mode_info(fp, buf);57615761+ {57625762+ if (buf_len < sizeof(struct smb2_file_mode_info))57635763+ return -EINVAL;57645764+57655765+ return set_file_mode_info(fp, (struct smb2_file_mode_info *)req->Buffer);57665766+ }58815767 }5882576858835883- pr_err("Unimplemented Fileinfoclass :%d\n", info_class);57695769+ pr_err("Unimplemented Fileinfoclass :%d\n", req->FileInfoClass);58845770 return -EOPNOTSUPP;58855771}58865772···59525816 switch (req->InfoType) {59535817 case SMB2_O_INFO_FILE:59545818 ksmbd_debug(SMB, "GOT SMB2_O_INFO_FILE\n");59555955- rc = smb2_set_info_file(work, fp, req->FileInfoClass,59565956- req->Buffer, work->tcon->share_conf);58195819+ rc = smb2_set_info_file(work, fp, req, work->tcon->share_conf);59575820 break;59585821 case SMB2_O_INFO_SECURITY:59595822 ksmbd_debug(SMB, "GOT SMB2_O_INFO_SECURITY\n");···8306817183078172 WORK_BUFFERS(work, req, rsp);8308817383098309- if (le16_to_cpu(req->Command) == SMB2_NEGOTIATE_HE)81748174+ if (le16_to_cpu(req->Command) == SMB2_NEGOTIATE_HE &&81758175+ conn->preauth_info)83108176 ksmbd_gen_preauth_integrity_hash(conn, (char *)rsp,83118177 conn->preauth_info->Preauth_HashValue);83128178···84148278 unsigned int buf_data_size = pdu_length + 4 -84158279 sizeof(struct smb2_transform_hdr);84168280 struct smb2_transform_hdr *tr_hdr = (struct smb2_transform_hdr *)buf;84178417- unsigned int orig_len = le32_to_cpu(tr_hdr->OriginalMessageSize);84188281 int rc = 0;84198419-84208420- sess = ksmbd_session_lookup_all(conn, le64_to_cpu(tr_hdr->SessionId));84218421- if (!sess) {84228422- pr_err("invalid session id(%llx) in transform header\n",84238423- le64_to_cpu(tr_hdr->SessionId));84248424- return -ECONNABORTED;84258425- }8426828284278283 if (pdu_length + 4 <84288284 sizeof(struct smb2_transform_hdr) + sizeof(struct smb2_hdr)) {···84238295 return -ECONNABORTED;84248296 }8425829784268426- if (pdu_length + 4 < orig_len + sizeof(struct smb2_transform_hdr)) {82988298+ if (pdu_length + 4 <82998299+ le32_to_cpu(tr_hdr->OriginalMessageSize) + sizeof(struct smb2_transform_hdr)) {84278300 pr_err("Transform message is broken\n");83018301+ return -ECONNABORTED;83028302+ }83038303+83048304+ sess = ksmbd_session_lookup_all(conn, le64_to_cpu(tr_hdr->SessionId));83058305+ if (!sess) {83068306+ pr_err("invalid session id(%llx) in transform header\n",83078307+ le64_to_cpu(tr_hdr->SessionId));84288308 return -ECONNABORTED;84298309 }84308310
+9
fs/ksmbd/smb2pdu.h
···14641464 char FileName[1];14651465} __packed; /* level 18 Query */1466146614671467+struct smb2_file_basic_info { /* data block encoding of response to level 18 */14681468+ __le64 CreationTime; /* Beginning of FILE_BASIC_INFO equivalent */14691469+ __le64 LastAccessTime;14701470+ __le64 LastWriteTime;14711471+ __le64 ChangeTime;14721472+ __le32 Attributes;14731473+ __u32 Pad1; /* End of FILE_BASIC_INFO_INFO equivalent */14741474+} __packed;14751475+14671476struct smb2_file_alt_name_info {14681477 __le32 FileNameLength;14691478 char FileName[0];
+28-19
fs/ksmbd/smb_common.c
···155155 */156156bool ksmbd_smb_request(struct ksmbd_conn *conn)157157{158158- int type = *(char *)conn->request_buf;159159-160160- switch (type) {161161- case RFC1002_SESSION_MESSAGE:162162- /* Regular SMB request */163163- return true;164164- case RFC1002_SESSION_KEEP_ALIVE:165165- ksmbd_debug(SMB, "RFC 1002 session keep alive\n");166166- break;167167- default:168168- ksmbd_debug(SMB, "RFC 1002 unknown request type 0x%x\n", type);169169- }170170-171171- return false;158158+ return conn->request_buf[0] == 0;172159}173160174161static bool supported_protocol(int idx)···169182 idx <= server_conf.max_protocol);170183}171184172172-static char *next_dialect(char *dialect, int *next_off)185185+static char *next_dialect(char *dialect, int *next_off, int bcount)173186{174187 dialect = dialect + *next_off;175175- *next_off = strlen(dialect);188188+ *next_off = strnlen(dialect, bcount);189189+ if (dialect[*next_off] != '\0')190190+ return NULL;176191 return dialect;177192}178193···189200 dialect = cli_dialects;190201 bcount = le16_to_cpu(byte_count);191202 do {192192- dialect = next_dialect(dialect, &next);203203+ dialect = next_dialect(dialect, &next, bcount);204204+ if (!dialect)205205+ break;193206 ksmbd_debug(SMB, "client requested dialect %s\n",194207 dialect);195208 if (!strcmp(dialect, smb1_protos[i].name)) {···239248240249static int ksmbd_negotiate_smb_dialect(void *buf)241250{242242- __le32 proto;251251+ int smb_buf_length = get_rfc1002_len(buf);252252+ __le32 proto = ((struct smb2_hdr *)buf)->ProtocolId;243253244244- proto = ((struct smb2_hdr *)buf)->ProtocolId;245254 if (proto == SMB2_PROTO_NUMBER) {246255 struct smb2_negotiate_req *req;256256+ int smb2_neg_size =257257+ offsetof(struct smb2_negotiate_req, Dialects) - 4;247258248259 req = (struct smb2_negotiate_req *)buf;260260+ if (smb2_neg_size > smb_buf_length)261261+ goto err_out;262262+263263+ if (smb2_neg_size + le16_to_cpu(req->DialectCount) * sizeof(__le16) >264264+ smb_buf_length)265265+ goto err_out;266266+249267 return ksmbd_lookup_dialect_by_id(req->Dialects,250268 req->DialectCount);251269 }···264264 struct smb_negotiate_req *req;265265266266 req = (struct smb_negotiate_req *)buf;267267+ if (le16_to_cpu(req->ByteCount) < 2)268268+ goto err_out;269269+270270+ if (offsetof(struct smb_negotiate_req, DialectsArray) - 4 +271271+ le16_to_cpu(req->ByteCount) > smb_buf_length) {272272+ goto err_out;273273+ }274274+267275 return ksmbd_lookup_dialect_by_name(req->DialectsArray,268276 req->ByteCount);269277 }270278279279+err_out:271280 return BAD_PROT_ID;272281}273282
-8
fs/ksmbd/smb_common.h
···4848#define CIFS_DEFAULT_IOSIZE (64 * 1024)4949#define MAX_CIFS_SMALL_BUFFER_SIZE 448 /* big enough for most */50505151-/* RFC 1002 session packet types */5252-#define RFC1002_SESSION_MESSAGE 0x005353-#define RFC1002_SESSION_REQUEST 0x815454-#define RFC1002_POSITIVE_SESSION_RESPONSE 0x825555-#define RFC1002_NEGATIVE_SESSION_RESPONSE 0x835656-#define RFC1002_RETARGET_SESSION_RESPONSE 0x845757-#define RFC1002_SESSION_KEEP_ALIVE 0x855858-5951/* Responses when opening a file. */6052#define F_SUPERSEDED 06153#define F_OPENED 1
+19-2
fs/ksmbd/smbacl.c
···380380{381381 int i, ret;382382 int num_aces = 0;383383- int acl_size;383383+ unsigned int acl_size;384384 char *acl_base;385385 struct smb_ace **ppace;386386 struct posix_acl_entry *cf_pace, *cf_pdace;···392392 return;393393394394 /* validate that we do not go past end of acl */395395- if (end_of_acl <= (char *)pdacl ||395395+ if (end_of_acl < (char *)pdacl + sizeof(struct smb_acl) ||396396 end_of_acl < (char *)pdacl + le16_to_cpu(pdacl->size)) {397397 pr_err("ACL too small to parse DACL\n");398398 return;···431431 * user/group/other have no permissions432432 */433433 for (i = 0; i < num_aces; ++i) {434434+ if (end_of_acl - acl_base < acl_size)435435+ break;436436+434437 ppace[i] = (struct smb_ace *)(acl_base + acl_size);435438 acl_base = (char *)ppace[i];439439+ acl_size = offsetof(struct smb_ace, sid) +440440+ offsetof(struct smb_sid, sub_auth);441441+442442+ if (end_of_acl - acl_base < acl_size ||443443+ ppace[i]->sid.num_subauth > SID_MAX_SUB_AUTHORITIES ||444444+ (end_of_acl - acl_base <445445+ acl_size + sizeof(__le32) * ppace[i]->sid.num_subauth) ||446446+ (le16_to_cpu(ppace[i]->size) <447447+ acl_size + sizeof(__le32) * ppace[i]->sid.num_subauth))448448+ break;449449+436450 acl_size = le16_to_cpu(ppace[i]->size);437451 ppace[i]->access_req =438452 smb_map_generic_desired_access(ppace[i]->access_req);···820806821807 if (!pntsd)822808 return -EIO;809809+810810+ if (acl_len < sizeof(struct smb_ntsd))811811+ return -EINVAL;823812824813 owner_sid_ptr = (struct smb_sid *)((char *)pntsd +825814 le32_to_cpu(pntsd->osidoffset));
+2-2
fs/ksmbd/transport_tcp.c
···215215 * ksmbd_kthread_fn() - listen to new SMB connections and callback server216216 * @p: arguments to forker thread217217 *218218- * Return: Returns a task_struct or ERR_PTR218218+ * Return: 0 on success, error number otherwise219219 */220220static int ksmbd_kthread_fn(void *p)221221{···387387/**388388 * create_socket - create socket for ksmbd/0389389 *390390- * Return: Returns a task_struct or ERR_PTR390390+ * Return: 0 on success, error number otherwise391391 */392392static int create_socket(struct interface *iface)393393{
+2-10
fs/vboxsf/super.c
···21212222#define VBOXSF_SUPER_MAGIC 0x786f4256 /* 'VBox' little endian */23232424-#define VBSF_MOUNT_SIGNATURE_BYTE_0 ('\000')2525-#define VBSF_MOUNT_SIGNATURE_BYTE_1 ('\377')2626-#define VBSF_MOUNT_SIGNATURE_BYTE_2 ('\376')2727-#define VBSF_MOUNT_SIGNATURE_BYTE_3 ('\375')2424+static const unsigned char VBSF_MOUNT_SIGNATURE[4] = "\000\377\376\375";28252926static int follow_symlinks;3027module_param(follow_symlinks, int, 0444);···383386384387static int vboxsf_parse_monolithic(struct fs_context *fc, void *data)385388{386386- unsigned char *options = data;387387-388388- if (options && options[0] == VBSF_MOUNT_SIGNATURE_BYTE_0 &&389389- options[1] == VBSF_MOUNT_SIGNATURE_BYTE_1 &&390390- options[2] == VBSF_MOUNT_SIGNATURE_BYTE_2 &&391391- options[3] == VBSF_MOUNT_SIGNATURE_BYTE_3) {389389+ if (data && !memcmp(data, VBSF_MOUNT_SIGNATURE, 4)) {392390 vbg_err("vboxsf: Old binary mount data not supported, remove obsolete mount.vboxsf and/or update your VBoxService.\n");393391 return -EINVAL;394392 }
+1-1
fs/verity/enable.c
···177177 * (level 0) and ascending to the root node (level 'num_levels - 1').178178 * Then at the end (level 'num_levels'), calculate the root hash.179179 */180180- blocks = (inode->i_size + params->block_size - 1) >>180180+ blocks = ((u64)inode->i_size + params->block_size - 1) >>181181 params->log_blocksize;182182 for (level = 0; level <= params->num_levels; level++) {183183 err = build_merkle_tree_level(filp, level, blocks, params,
+1-1
fs/verity/open.c
···8989 */90909191 /* Compute number of levels and the number of blocks in each level */9292- blocks = (inode->i_size + params->block_size - 1) >> log_blocksize;9292+ blocks = ((u64)inode->i_size + params->block_size - 1) >> log_blocksize;9393 pr_debug("Data is %lld bytes (%llu blocks)\n", inode->i_size, blocks);9494 while (blocks > 1) {9595 if (params->num_levels >= FS_VERITY_MAX_LEVELS) {
···578578 * programs only. Should not be used with normal calls and indirect calls.579579 */580580#define BPF_TRAMP_F_SKIP_FRAME BIT(2)581581-582581/* Store IP address of the caller on the trampoline stack,583582 * so it's available for trampoline's programs.584583 */585584#define BPF_TRAMP_F_IP_ARG BIT(3)585585+/* Return the return value of fentry prog. Only used by bpf_struct_ops. */586586+#define BPF_TRAMP_F_RET_FENTRY_RET BIT(4)586587587588/* Each call __bpf_prog_enter + call bpf_func + call __bpf_prog_exit is ~50588589 * bytes on x86. Pick a number to fit into BPF_IMAGE_SIZE / 2
+4-3
include/linux/cpumask.h
···996996 * cpumask; Typically used by bin_attribute to export cpumask bitmask997997 * ABI.998998 *999999- * Returns the length of how many bytes have been copied.999999+ * Returns the length of how many bytes have been copied, excluding10001000+ * terminating '\0'.10001001 */10011002static inline ssize_t10021003cpumap_print_bitmask_to_buf(char *buf, const struct cpumask *mask,10031004 loff_t off, size_t count)10041005{10051006 return bitmap_print_bitmask_to_buf(buf, cpumask_bits(mask),10061006- nr_cpu_ids, off, count);10071007+ nr_cpu_ids, off, count) - 1;10071008}1008100910091010/**···10191018 loff_t off, size_t count)10201019{10211020 return bitmap_print_list_to_buf(buf, cpumask_bits(mask),10221022- nr_cpu_ids, off, count);10211021+ nr_cpu_ids, off, count) - 1;10231022}1024102310251024#if NR_CPUS <= BITS_PER_LONG
+8-3
include/linux/fwnode.h
···2222 * LINKS_ADDED: The fwnode has already be parsed to add fwnode links.2323 * NOT_DEVICE: The fwnode will never be populated as a struct device.2424 * INITIALIZED: The hardware corresponding to fwnode has been initialized.2525+ * NEEDS_CHILD_BOUND_ON_ADD: For this fwnode/device to probe successfully, its2626+ * driver needs its child devices to be bound with2727+ * their respective drivers as soon as they are2828+ * added.2529 */2626-#define FWNODE_FLAG_LINKS_ADDED BIT(0)2727-#define FWNODE_FLAG_NOT_DEVICE BIT(1)2828-#define FWNODE_FLAG_INITIALIZED BIT(2)3030+#define FWNODE_FLAG_LINKS_ADDED BIT(0)3131+#define FWNODE_FLAG_NOT_DEVICE BIT(1)3232+#define FWNODE_FLAG_INITIALIZED BIT(2)3333+#define FWNODE_FLAG_NEEDS_CHILD_BOUND_ON_ADD BIT(3)29343035struct fwnode_handle {3136 struct fwnode_handle *secondary;
-6
include/linux/kvm_host.h
···608608 unsigned long mmu_notifier_range_start;609609 unsigned long mmu_notifier_range_end;610610#endif611611- long tlbs_dirty;612611 struct list_head devices;613612 u64 manual_dirty_log_protect;614613 struct dentry *debugfs_dentry;···718719 if (vcpu->vcpu_id == id)719720 return vcpu;720721 return NULL;721721-}722722-723723-static inline int kvm_vcpu_get_idx(struct kvm_vcpu *vcpu)724724-{725725- return vcpu->vcpu_idx;726722}727723728724#define kvm_for_each_memslot(memslot, slots) \
+6
include/linux/perf/arm_pmu.h
···163163static inline int arm_pmu_acpi_probe(armpmu_init_fn init_fn) { return 0; }164164#endif165165166166+#ifdef CONFIG_KVM167167+void kvm_host_pmu_init(struct arm_pmu *pmu);168168+#else169169+#define kvm_host_pmu_init(x) do { } while(0)170170+#endif171171+166172/* Internal functions only for core arm_pmu code */167173struct arm_pmu *armpmu_alloc(void);168174struct arm_pmu *armpmu_alloc_atomic(void);
+3-1
include/linux/perf_event.h
···683683 /*684684 * timestamp shadows the actual context timing but it can685685 * be safely used in NMI interrupt context. It reflects the686686- * context time as it was when the event was last scheduled in.686686+ * context time as it was when the event was last scheduled in,687687+ * or when ctx_sched_in failed to schedule the event because we688688+ * run out of PMC.687689 *688690 * ctx_time already accounts for ctx->timestamp. Therefore to689691 * compute ctx_time for a sample, simply add perf_clock().
···28182818 * Mac80211 drivers should set the @NL80211_EXT_FEATURE_CAN_REPLACE_PTK0 flag28192819 * when they are able to replace in-use PTK keys according to the following28202820 * requirements:28212821- * 1) They do not hand over frames decrypted with the old key to28222822- mac80211 once the call to set_key() with command %DISABLE_KEY has been28232823- completed when also setting @IEEE80211_KEY_FLAG_GENERATE_IV for any key,28212821+ * 1) They do not hand over frames decrypted with the old key to mac8021128222822+ once the call to set_key() with command %DISABLE_KEY has been completed,28242823 2) either drop or continue to use the old key for any outgoing frames queued28252824 at the time of the key deletion (including re-transmits),28262825 3) never send out a frame queued prior to the set_key() %SET_KEY command28272827- encrypted with the new key and28262826+ encrypted with the new key when also needing28272827+ @IEEE80211_KEY_FLAG_GENERATE_IV and28282828 4) never send out a frame unencrypted when it should be encrypted.28292829 Mac80211 will not queue any new frames for a deleted key to the driver.28302830 */
+1-1
include/net/nexthop.h
···325325 struct fib_nh_common *nhc = &nhi->fib_nhc;326326 int weight = nhg->nh_entries[i].weight;327327328328- if (fib_add_nexthop(skb, nhc, weight, rt_family) < 0)328328+ if (fib_add_nexthop(skb, nhc, weight, rt_family, 0) < 0)329329 return -EMSGSIZE;330330 }331331
+1
include/net/pkt_sched.h
···1111#include <uapi/linux/pkt_sched.h>12121313#define DEFAULT_TX_QUEUE_LEN 10001414+#define STAB_SIZE_LOG_MAX 3014151516struct qdisc_walker {1617 int stop;
+32-1
include/net/sock.h
···488488 u8 sk_prefer_busy_poll;489489 u16 sk_busy_poll_budget;490490#endif491491+ spinlock_t sk_peer_lock;491492 struct pid *sk_peer_pid;492493 const struct cred *sk_peer_cred;494494+493495 long sk_rcvtimeo;494496 ktime_t sk_stamp;495497#if BITS_PER_LONG==32···16251623 SINGLE_DEPTH_NESTING)16261624#define bh_unlock_sock(__sk) spin_unlock(&((__sk)->sk_lock.slock))1627162516281628-bool lock_sock_fast(struct sock *sk) __acquires(&sk->sk_lock.slock);16261626+bool __lock_sock_fast(struct sock *sk) __acquires(&sk->sk_lock.slock);16271627+16281628+/**16291629+ * lock_sock_fast - fast version of lock_sock16301630+ * @sk: socket16311631+ *16321632+ * This version should be used for very small section, where process wont block16331633+ * return false if fast path is taken:16341634+ *16351635+ * sk_lock.slock locked, owned = 0, BH disabled16361636+ *16371637+ * return true if slow path is taken:16381638+ *16391639+ * sk_lock.slock unlocked, owned = 1, BH enabled16401640+ */16411641+static inline bool lock_sock_fast(struct sock *sk)16421642+{16431643+ /* The sk_lock has mutex_lock() semantics here. */16441644+ mutex_acquire(&sk->sk_lock.dep_map, 0, 0, _RET_IP_);16451645+16461646+ return __lock_sock_fast(sk);16471647+}16481648+16491649+/* fast socket lock variant for caller already holding a [different] socket lock */16501650+static inline bool lock_sock_fast_nested(struct sock *sk)16511651+{16521652+ mutex_acquire(&sk->sk_lock.dep_map, SINGLE_DEPTH_NESTING, 0, _RET_IP_);16531653+16541654+ return __lock_sock_fast(sk);16551655+}1629165616301657/**16311658 * unlock_sock_fast - complement of lock_sock_fast
+1
include/sound/rawmidi.h
···9898 struct snd_rawmidi *rmidi;9999 struct snd_rawmidi_substream *input;100100 struct snd_rawmidi_substream *output;101101+ unsigned int user_pversion; /* supported protocol version */101102};102103103104struct snd_rawmidi_str {
···173173 size_t cnt, loff_t *ppos)174174{175175 char buf[16];176176+ unsigned int scaling;176177177178 if (cnt > 15)178179 cnt = 15;179180180181 if (copy_from_user(&buf, ubuf, cnt))181182 return -EFAULT;183183+ buf[cnt] = '\0';182184183183- if (kstrtouint(buf, 10, &sysctl_sched_tunable_scaling))185185+ if (kstrtouint(buf, 10, &scaling))184186 return -EINVAL;185187188188+ if (scaling >= SCHED_TUNABLESCALING_END)189189+ return -EINVAL;190190+191191+ sysctl_sched_tunable_scaling = scaling;186192 if (sched_update_scaling())187193 return -EINVAL;188194
+5-1
kernel/sched/fair.c
···49364936 /* update hierarchical throttle state */49374937 walk_tg_tree_from(cfs_rq->tg, tg_nop, tg_unthrottle_up, (void *)rq);4938493849394939- if (!cfs_rq->load.weight)49394939+ /* Nothing to run but something to decay (on_list)? Complete the branch */49404940+ if (!cfs_rq->load.weight) {49414941+ if (cfs_rq->on_list)49424942+ goto unthrottle_throttle;49404943 return;49444944+ }4941494549424946 task_delta = cfs_rq->h_nr_running;49434947 idle_task_delta = cfs_rq->idle_h_nr_running;
+9-5
net/bpf/test_run.c
···552552 __skb->gso_segs = skb_shinfo(skb)->gso_segs;553553}554554555555+static struct proto bpf_dummy_proto = {556556+ .name = "bpf_dummy",557557+ .owner = THIS_MODULE,558558+ .obj_size = sizeof(struct sock),559559+};560560+555561int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,556562 union bpf_attr __user *uattr)557563{···602596 break;603597 }604598605605- sk = kzalloc(sizeof(struct sock), GFP_USER);599599+ sk = sk_alloc(net, AF_UNSPEC, GFP_USER, &bpf_dummy_proto, 1);606600 if (!sk) {607601 kfree(data);608602 kfree(ctx);609603 return -ENOMEM;610604 }611611- sock_net_set(sk, net);612605 sock_init_data(NULL, sk);613606614607 skb = build_skb(data, 0);615608 if (!skb) {616609 kfree(data);617610 kfree(ctx);618618- kfree(sk);611611+ sk_free(sk);619612 return -ENOMEM;620613 }621614 skb->sk = sk;···687682 if (dev && dev != net->loopback_dev)688683 dev_put(dev);689684 kfree_skb(skb);690690- bpf_sk_storage_free(sk);691691- kfree(sk);685685+ sk_free(sk);692686 kfree(ctx);693687 return ret;694688}
···42424343static struct nf_hook_ops *rawtable_ops __read_mostly;44444545-static int __net_init iptable_raw_table_init(struct net *net)4545+static int iptable_raw_table_init(struct net *net)4646{4747 struct ipt_replace *repl;4848 const struct xt_table *table = &packet_raw;
+5-5
net/ipv4/udp.c
···10531053 __be16 dport;10541054 u8 tos;10551055 int err, is_udplite = IS_UDPLITE(sk);10561056- int corkreq = up->corkflag || msg->msg_flags&MSG_MORE;10561056+ int corkreq = READ_ONCE(up->corkflag) || msg->msg_flags&MSG_MORE;10571057 int (*getfrag)(void *, char *, int, int, int, struct sk_buff *);10581058 struct sk_buff *skb;10591059 struct ip_options_data opt_copy;···13611361 }1362136213631363 up->len += size;13641364- if (!(up->corkflag || (flags&MSG_MORE)))13641364+ if (!(READ_ONCE(up->corkflag) || (flags&MSG_MORE)))13651365 ret = udp_push_pending_frames(sk);13661366 if (!ret)13671367 ret = size;···26622662 switch (optname) {26632663 case UDP_CORK:26642664 if (val != 0) {26652665- up->corkflag = 1;26652665+ WRITE_ONCE(up->corkflag, 1);26662666 } else {26672667- up->corkflag = 0;26672667+ WRITE_ONCE(up->corkflag, 0);26682668 lock_sock(sk);26692669 push_pending_frames(sk);26702670 release_sock(sk);···2787278727882788 switch (optname) {27892789 case UDP_CORK:27902790- val = up->corkflag;27902790+ val = READ_ONCE(up->corkflag);27912791 break;2792279227932793 case UDP_ENCAP:
+1
net/ipv6/netfilter/ip6_tables.c
···273273 * things we don't know, ie. tcp syn flag or ports). If the274274 * rule is also a fragment-specific rule, non-fragments won't275275 * match it. */276276+ acpar.fragoff = 0;276277 acpar.hotdrop = false;277278 acpar.state = state;278279
···22/*33 * Copyright 2012-2013, Marco Porsch <marco.porsch@s2005.tu-chemnitz.de>44 * Copyright 2012-2013, cozybit Inc.55+ * Copyright (C) 2021 Intel Corporation56 */6778#include "mesh.h"···589588590589 /* only transmit to PS STA with announced, non-zero awake window */591590 if (test_sta_flag(sta, WLAN_STA_PS_STA) &&592592- (!elems->awake_window || !le16_to_cpu(*elems->awake_window)))591591+ (!elems->awake_window || !get_unaligned_le16(elems->awake_window)))593592 return;594593595594 if (!test_sta_flag(sta, WLAN_STA_MPSP_OWNER))
-4
net/mac80211/rate.c
···392392 int mcast_rate;393393 bool use_basicrate = false;394394395395- if (ieee80211_is_tx_data(txrc->skb) &&396396- info->flags & IEEE80211_TX_CTL_NO_ACK)397397- return false;398398-399395 if (!pubsta || rc_no_data_or_no_ack_use_min(txrc)) {400396 __rate_control_send_low(txrc->hw, sband, pubsta, info,401397 txrc->rate_idx_mask);
+2-1
net/mac80211/rx.c
···41314131 if (!bssid)41324132 return false;41334133 if (ether_addr_equal(sdata->vif.addr, hdr->addr2) ||41344134- ether_addr_equal(sdata->u.ibss.bssid, hdr->addr2))41344134+ ether_addr_equal(sdata->u.ibss.bssid, hdr->addr2) ||41354135+ !is_valid_ether_addr(hdr->addr2))41354136 return false;41364137 if (ieee80211_is_beacon(hdr->frame_control))41374138 return true;
+12
net/mac80211/tx.c
···22092209 }2210221022112211 vht_mcs = iterator.this_arg[4] >> 4;22122212+ if (vht_mcs > 11)22132213+ vht_mcs = 0;22122214 vht_nss = iterator.this_arg[4] & 0xF;22152215+ if (!vht_nss || vht_nss > 8)22162216+ vht_nss = 1;22132217 break;2214221822152219 /*···3383337933843380 if (!ieee80211_amsdu_prepare_head(sdata, fast_tx, head))33853381 goto out;33823382+33833383+ /* If n == 2, the "while (*frag_tail)" loop above didn't execute33843384+ * and frag_tail should be &skb_shinfo(head)->frag_list.33853385+ * However, ieee80211_amsdu_prepare_head() can reallocate it.33863386+ * Reload frag_tail to have it pointing to the correct place.33873387+ */33883388+ if (n == 2)33893389+ frag_tail = &skb_shinfo(head)->frag_list;3386339033873391 /*33883392 * Pad out the previous subframe to a multiple of 4 by adding the
+6
net/mac80211/wpa.c
···520520 return RX_DROP_UNUSABLE;521521 }522522523523+ /* reload hdr - skb might have been reallocated */524524+ hdr = (void *)rx->skb->data;525525+523526 data_len = skb->len - hdrlen - IEEE80211_CCMP_HDR_LEN - mic_len;524527 if (!rx->sta || data_len < 0)525528 return RX_DROP_UNUSABLE;···751748 if (skb_linearize(rx->skb))752749 return RX_DROP_UNUSABLE;753750 }751751+752752+ /* reload hdr - skb might have been reallocated */753753+ hdr = (void *)rx->skb->data;754754755755 data_len = skb->len - hdrlen - IEEE80211_GCMP_HDR_LEN - mic_len;756756 if (!rx->sta || data_len < 0)
+1-1
net/mptcp/mptcp_diag.c
···3636 struct sock *sk;37373838 net = sock_net(in_skb->sk);3939- msk = mptcp_token_get_sock(req->id.idiag_cookie[0]);3939+ msk = mptcp_token_get_sock(net, req->id.idiag_cookie[0]);4040 if (!msk)4141 goto out_nosk;4242
+1-3
net/mptcp/pm_netlink.c
···1718171817191719 list_for_each_entry(entry, &pernet->local_addr_list, list) {17201720 if (addresses_equal(&entry->addr, &addr.addr, true)) {17211721- ret = mptcp_nl_addr_backup(net, &entry->addr, bkup);17221722- if (ret)17231723- return ret;17211721+ mptcp_nl_addr_backup(net, &entry->addr, bkup);1724172217251723 if (bkup)17261724 entry->flags |= MPTCP_PM_ADDR_FLAG_BACKUP;
···709709void mptcp_token_accept(struct mptcp_subflow_request_sock *r,710710 struct mptcp_sock *msk);711711bool mptcp_token_exists(u32 token);712712-struct mptcp_sock *mptcp_token_get_sock(u32 token);712712+struct mptcp_sock *mptcp_token_get_sock(struct net *net, u32 token);713713struct mptcp_sock *mptcp_token_iter_next(const struct net *net, long *s_slot,714714 long *s_num);715715void mptcp_token_destroy(struct mptcp_sock *msk);
+1-1
net/mptcp/subflow.c
···8686 struct mptcp_sock *msk;8787 int local_id;88888989- msk = mptcp_token_get_sock(subflow_req->token);8989+ msk = mptcp_token_get_sock(sock_net(req_to_sk(req)), subflow_req->token);9090 if (!msk) {9191 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINNOTOKEN);9292 return NULL;
+1-12
net/mptcp/syncookies.c
···108108109109 e->valid = 0;110110111111- msk = mptcp_token_get_sock(e->token);111111+ msk = mptcp_token_get_sock(net, e->token);112112 if (!msk) {113113 spin_unlock_bh(&join_entry_locks[i]);114114 return false;115115 }116116-117117- /* If this fails, the token got re-used in the mean time by another118118- * mptcp socket in a different netns, i.e. entry is outdated.119119- */120120- if (!net_eq(sock_net((struct sock *)msk), net))121121- goto err_put;122116123117 subflow_req->remote_nonce = e->remote_nonce;124118 subflow_req->local_nonce = e->local_nonce;···122128 subflow_req->msk = msk;123129 spin_unlock_bh(&join_entry_locks[i]);124130 return true;125125-126126-err_put:127127- spin_unlock_bh(&join_entry_locks[i]);128128- sock_put((struct sock *)msk);129129- return false;130131}131132132133void __init mptcp_join_cookie_init(void)
+8-3
net/mptcp/token.c
···231231232232/**233233 * mptcp_token_get_sock - retrieve mptcp connection sock using its token234234+ * @net: restrict to this namespace234235 * @token: token of the mptcp connection to retrieve235236 *236237 * This function returns the mptcp connection structure with the given token.···239238 *240239 * returns NULL if no connection with the given token value exists.241240 */242242-struct mptcp_sock *mptcp_token_get_sock(u32 token)241241+struct mptcp_sock *mptcp_token_get_sock(struct net *net, u32 token)243242{244243 struct hlist_nulls_node *pos;245244 struct token_bucket *bucket;···252251again:253252 sk_nulls_for_each_rcu(sk, pos, &bucket->msk_chain) {254253 msk = mptcp_sk(sk);255255- if (READ_ONCE(msk->token) != token)254254+ if (READ_ONCE(msk->token) != token ||255255+ !net_eq(sock_net(sk), net))256256 continue;257257+257258 if (!refcount_inc_not_zero(&sk->sk_refcnt))258259 goto not_found;259259- if (READ_ONCE(msk->token) != token) {260260+261261+ if (READ_ONCE(msk->token) != token ||262262+ !net_eq(sock_net(sk), net)) {260263 sock_put(sk);261264 goto again;262265 }
···130130{131131 size_t hsize;132132133133- /* We must fit both into u32 in jhash and size_t */133133+ /* We must fit both into u32 in jhash and INT_MAX in kvmalloc_node() */134134 if (hbits > 31)135135 return 0;136136 hsize = jhash_size(hbits);137137- if ((((size_t)-1) - sizeof(struct htable)) / sizeof(struct hbucket *)137137+ if ((INT_MAX - sizeof(struct htable)) / sizeof(struct hbucket *)138138 < hsize)139139 return 0;140140
+4
net/netfilter/ipvs/ip_vs_conn.c
···14681468 int idx;1469146914701470 /* Compute size and mask */14711471+ if (ip_vs_conn_tab_bits < 8 || ip_vs_conn_tab_bits > 20) {14721472+ pr_info("conn_tab_bits not in [8, 20]. Using default value\n");14731473+ ip_vs_conn_tab_bits = CONFIG_IP_VS_TAB_BITS;14741474+ }14711475 ip_vs_conn_tab_size = 1 << ip_vs_conn_tab_bits;14721476 ip_vs_conn_tab_mask = ip_vs_conn_tab_size - 1;14731477
+100-54
net/netfilter/nf_conntrack_core.c
···7474static DEFINE_SPINLOCK(nf_conntrack_locks_all_lock);7575static __read_mostly bool nf_conntrack_locks_all;76767777+/* serialize hash resizes and nf_ct_iterate_cleanup */7878+static DEFINE_MUTEX(nf_conntrack_mutex);7979+7780#define GC_SCAN_INTERVAL (120u * HZ)7881#define GC_SCAN_MAX_DURATION msecs_to_jiffies(10)79828080-#define MAX_CHAINLEN 64u8383+#define MIN_CHAINLEN 8u8484+#define MAX_CHAINLEN (32u - MIN_CHAINLEN)81858286static struct conntrack_gc_work conntrack_gc_work;8387···192188static siphash_key_t nf_conntrack_hash_rnd __read_mostly;193189194190static u32 hash_conntrack_raw(const struct nf_conntrack_tuple *tuple,191191+ unsigned int zoneid,195192 const struct net *net)196193{197194 struct {198195 struct nf_conntrack_man src;199196 union nf_inet_addr dst_addr;197197+ unsigned int zone;200198 u32 net_mix;201199 u16 dport;202200 u16 proto;···211205 /* The direction must be ignored, so handle usable members manually. */212206 combined.src = tuple->src;213207 combined.dst_addr = tuple->dst.u3;208208+ combined.zone = zoneid;214209 combined.net_mix = net_hash_mix(net);215210 combined.dport = (__force __u16)tuple->dst.u.all;216211 combined.proto = tuple->dst.protonum;···226219227220static u32 __hash_conntrack(const struct net *net,228221 const struct nf_conntrack_tuple *tuple,222222+ unsigned int zoneid,229223 unsigned int size)230224{231231- return reciprocal_scale(hash_conntrack_raw(tuple, net), size);225225+ return reciprocal_scale(hash_conntrack_raw(tuple, zoneid, net), size);232226}233227234228static u32 hash_conntrack(const struct net *net,235235- const struct nf_conntrack_tuple *tuple)229229+ const struct nf_conntrack_tuple *tuple,230230+ unsigned int zoneid)236231{237237- return scale_hash(hash_conntrack_raw(tuple, net));232232+ return scale_hash(hash_conntrack_raw(tuple, zoneid, net));238233}239234240235static bool nf_ct_get_tuple_ports(const struct sk_buff *skb,···659650 do {660651 sequence = read_seqcount_begin(&nf_conntrack_generation);661652 hash = hash_conntrack(net,662662- &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);653653+ &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,654654+ nf_ct_zone_id(nf_ct_zone(ct), IP_CT_DIR_ORIGINAL));663655 reply_hash = hash_conntrack(net,664664- &ct->tuplehash[IP_CT_DIR_REPLY].tuple);656656+ &ct->tuplehash[IP_CT_DIR_REPLY].tuple,657657+ nf_ct_zone_id(nf_ct_zone(ct), IP_CT_DIR_REPLY));665658 } while (nf_conntrack_double_lock(net, hash, reply_hash, sequence));666659667660 clean_from_lists(ct);···830819nf_conntrack_find_get(struct net *net, const struct nf_conntrack_zone *zone,831820 const struct nf_conntrack_tuple *tuple)832821{833833- return __nf_conntrack_find_get(net, zone, tuple,834834- hash_conntrack_raw(tuple, net));822822+ unsigned int rid, zone_id = nf_ct_zone_id(zone, IP_CT_DIR_ORIGINAL);823823+ struct nf_conntrack_tuple_hash *thash;824824+825825+ thash = __nf_conntrack_find_get(net, zone, tuple,826826+ hash_conntrack_raw(tuple, zone_id, net));827827+828828+ if (thash)829829+ return thash;830830+831831+ rid = nf_ct_zone_id(zone, IP_CT_DIR_REPLY);832832+ if (rid != zone_id)833833+ return __nf_conntrack_find_get(net, zone, tuple,834834+ hash_conntrack_raw(tuple, rid, net));835835+ return thash;835836}836837EXPORT_SYMBOL_GPL(nf_conntrack_find_get);837838···865842 unsigned int hash, reply_hash;866843 struct nf_conntrack_tuple_hash *h;867844 struct hlist_nulls_node *n;845845+ unsigned int max_chainlen;868846 unsigned int chainlen = 0;869847 unsigned int sequence;870848 int err = -EEXIST;···876852 do {877853 sequence = read_seqcount_begin(&nf_conntrack_generation);878854 hash = hash_conntrack(net,879879- &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);855855+ &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,856856+ nf_ct_zone_id(nf_ct_zone(ct), IP_CT_DIR_ORIGINAL));880857 reply_hash = hash_conntrack(net,881881- &ct->tuplehash[IP_CT_DIR_REPLY].tuple);858858+ &ct->tuplehash[IP_CT_DIR_REPLY].tuple,859859+ nf_ct_zone_id(nf_ct_zone(ct), IP_CT_DIR_REPLY));882860 } while (nf_conntrack_double_lock(net, hash, reply_hash, sequence));861861+862862+ max_chainlen = MIN_CHAINLEN + prandom_u32_max(MAX_CHAINLEN);883863884864 /* See if there's one in the list already, including reverse */885865 hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[hash], hnnode) {···891863 zone, net))892864 goto out;893865894894- if (chainlen++ > MAX_CHAINLEN)866866+ if (chainlen++ > max_chainlen)895867 goto chaintoolong;896868 }897869···901873 if (nf_ct_key_equal(h, &ct->tuplehash[IP_CT_DIR_REPLY].tuple,902874 zone, net))903875 goto out;904904- if (chainlen++ > MAX_CHAINLEN)876876+ if (chainlen++ > max_chainlen)905877 goto chaintoolong;906878 }907879···11311103int11321104__nf_conntrack_confirm(struct sk_buff *skb)11331105{11061106+ unsigned int chainlen = 0, sequence, max_chainlen;11341107 const struct nf_conntrack_zone *zone;11351135- unsigned int chainlen = 0, sequence;11361108 unsigned int hash, reply_hash;11371109 struct nf_conntrack_tuple_hash *h;11381110 struct nf_conn *ct;···11611133 hash = *(unsigned long *)&ct->tuplehash[IP_CT_DIR_REPLY].hnnode.pprev;11621134 hash = scale_hash(hash);11631135 reply_hash = hash_conntrack(net,11641164- &ct->tuplehash[IP_CT_DIR_REPLY].tuple);11651165-11361136+ &ct->tuplehash[IP_CT_DIR_REPLY].tuple,11371137+ nf_ct_zone_id(nf_ct_zone(ct), IP_CT_DIR_REPLY));11661138 } while (nf_conntrack_double_lock(net, hash, reply_hash, sequence));1167113911681140 /* We're not in hash table, and we refuse to set up related···11961168 goto dying;11971169 }1198117011711171+ max_chainlen = MIN_CHAINLEN + prandom_u32_max(MAX_CHAINLEN);11991172 /* See if there's one in the list already, including reverse:12001173 NAT could have grabbed it without realizing, since we're12011174 not in the hash. If there is, we lost race. */···12041175 if (nf_ct_key_equal(h, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,12051176 zone, net))12061177 goto out;12071207- if (chainlen++ > MAX_CHAINLEN)11781178+ if (chainlen++ > max_chainlen)12081179 goto chaintoolong;12091180 }12101181···12131184 if (nf_ct_key_equal(h, &ct->tuplehash[IP_CT_DIR_REPLY].tuple,12141185 zone, net))12151186 goto out;12161216- if (chainlen++ > MAX_CHAINLEN) {11871187+ if (chainlen++ > max_chainlen) {12171188chaintoolong:12181189 nf_ct_add_to_dying_list(ct);12191190 NF_CT_STAT_INC(net, chaintoolong);···12751246 rcu_read_lock();12761247 begin:12771248 nf_conntrack_get_ht(&ct_hash, &hsize);12781278- hash = __hash_conntrack(net, tuple, hsize);12491249+ hash = __hash_conntrack(net, tuple, nf_ct_zone_id(zone, IP_CT_DIR_REPLY), hsize);1279125012801251 hlist_nulls_for_each_entry_rcu(h, n, &ct_hash[hash], hnnode) {12811252 ct = nf_ct_tuplehash_to_ctrack(h);···17161687 struct nf_conntrack_tuple_hash *h;17171688 enum ip_conntrack_info ctinfo;17181689 struct nf_conntrack_zone tmp;16901690+ u32 hash, zone_id, rid;17191691 struct nf_conn *ct;17201720- u32 hash;1721169217221693 if (!nf_ct_get_tuple(skb, skb_network_offset(skb),17231694 dataoff, state->pf, protonum, state->net,···1728169917291700 /* look for tuple match */17301701 zone = nf_ct_zone_tmpl(tmpl, skb, &tmp);17311731- hash = hash_conntrack_raw(&tuple, state->net);17021702+17031703+ zone_id = nf_ct_zone_id(zone, IP_CT_DIR_ORIGINAL);17041704+ hash = hash_conntrack_raw(&tuple, zone_id, state->net);17321705 h = __nf_conntrack_find_get(state->net, zone, &tuple, hash);17061706+17071707+ if (!h) {17081708+ rid = nf_ct_zone_id(zone, IP_CT_DIR_REPLY);17091709+ if (zone_id != rid) {17101710+ u32 tmp = hash_conntrack_raw(&tuple, rid, state->net);17111711+17121712+ h = __nf_conntrack_find_get(state->net, zone, &tuple, tmp);17131713+ }17141714+ }17151715+17331716 if (!h) {17341717 h = init_conntrack(state->net, tmpl, &tuple,17351718 skb, dataoff, hash);···22662225 spinlock_t *lockp;2267222622682227 for (; *bucket < nf_conntrack_htable_size; (*bucket)++) {22282228+ struct hlist_nulls_head *hslot = &nf_conntrack_hash[*bucket];22292229+22302230+ if (hlist_nulls_empty(hslot))22312231+ continue;22322232+22692233 lockp = &nf_conntrack_locks[*bucket % CONNTRACK_LOCKS];22702234 local_bh_disable();22712235 nf_conntrack_lock(lockp);22722272- if (*bucket < nf_conntrack_htable_size) {22732273- hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[*bucket], hnnode) {22742274- if (NF_CT_DIRECTION(h) != IP_CT_DIR_REPLY)22752275- continue;22762276- /* All nf_conn objects are added to hash table twice, one22772277- * for original direction tuple, once for the reply tuple.22782278- *22792279- * Exception: In the IPS_NAT_CLASH case, only the reply22802280- * tuple is added (the original tuple already existed for22812281- * a different object).22822282- *22832283- * We only need to call the iterator once for each22842284- * conntrack, so we just use the 'reply' direction22852285- * tuple while iterating.22862286- */22872287- ct = nf_ct_tuplehash_to_ctrack(h);22882288- if (iter(ct, data))22892289- goto found;22902290- }22362236+ hlist_nulls_for_each_entry(h, n, hslot, hnnode) {22372237+ if (NF_CT_DIRECTION(h) != IP_CT_DIR_REPLY)22382238+ continue;22392239+ /* All nf_conn objects are added to hash table twice, one22402240+ * for original direction tuple, once for the reply tuple.22412241+ *22422242+ * Exception: In the IPS_NAT_CLASH case, only the reply22432243+ * tuple is added (the original tuple already existed for22442244+ * a different object).22452245+ *22462246+ * We only need to call the iterator once for each22472247+ * conntrack, so we just use the 'reply' direction22482248+ * tuple while iterating.22492249+ */22502250+ ct = nf_ct_tuplehash_to_ctrack(h);22512251+ if (iter(ct, data))22522252+ goto found;22912253 }22922254 spin_unlock(lockp);22932255 local_bh_enable();···23082264static void nf_ct_iterate_cleanup(int (*iter)(struct nf_conn *i, void *data),23092265 void *data, u32 portid, int report)23102266{23112311- unsigned int bucket = 0, sequence;22672267+ unsigned int bucket = 0;23122268 struct nf_conn *ct;2313226923142270 might_sleep();2315227123162316- for (;;) {23172317- sequence = read_seqcount_begin(&nf_conntrack_generation);22722272+ mutex_lock(&nf_conntrack_mutex);22732273+ while ((ct = get_next_corpse(iter, data, &bucket)) != NULL) {22742274+ /* Time to push up daises... */2318227523192319- while ((ct = get_next_corpse(iter, data, &bucket)) != NULL) {23202320- /* Time to push up daises... */23212321-23222322- nf_ct_delete(ct, portid, report);23232323- nf_ct_put(ct);23242324- cond_resched();23252325- }23262326-23272327- if (!read_seqcount_retry(&nf_conntrack_generation, sequence))23282328- break;23292329- bucket = 0;22762276+ nf_ct_delete(ct, portid, report);22772277+ nf_ct_put(ct);22782278+ cond_resched();23302279 }22802280+ mutex_unlock(&nf_conntrack_mutex);23312281}2332228223332283struct iter_data {···25572519 if (!hash)25582520 return -ENOMEM;2559252125222522+ mutex_lock(&nf_conntrack_mutex);25602523 old_size = nf_conntrack_htable_size;25612524 if (old_size == hashsize) {25252525+ mutex_unlock(&nf_conntrack_mutex);25622526 kvfree(hash);25632527 return 0;25642528 }···2577253725782538 for (i = 0; i < nf_conntrack_htable_size; i++) {25792539 while (!hlist_nulls_empty(&nf_conntrack_hash[i])) {25402540+ unsigned int zone_id;25412541+25802542 h = hlist_nulls_entry(nf_conntrack_hash[i].first,25812543 struct nf_conntrack_tuple_hash, hnnode);25822544 ct = nf_ct_tuplehash_to_ctrack(h);25832545 hlist_nulls_del_rcu(&h->hnnode);25462546+25472547+ zone_id = nf_ct_zone_id(nf_ct_zone(ct), NF_CT_DIRECTION(h));25842548 bucket = __hash_conntrack(nf_ct_net(ct),25852585- &h->tuple, hashsize);25492549+ &h->tuple, zone_id, hashsize);25862550 hlist_nulls_add_head_rcu(&h->hnnode, &hash[bucket]);25872551 }25882552 }···25992555 write_seqcount_end(&nf_conntrack_generation);26002556 nf_conntrack_all_unlock();26012557 local_bh_enable();25582558+25592559+ mutex_unlock(&nf_conntrack_mutex);2602256026032561 synchronize_net();26042562 kvfree(old_hash);
+12-5
net/netfilter/nf_nat_core.c
···150150151151/* We keep an extra hash for each conntrack, for fast searching. */152152static unsigned int153153-hash_by_src(const struct net *n, const struct nf_conntrack_tuple *tuple)153153+hash_by_src(const struct net *net,154154+ const struct nf_conntrack_zone *zone,155155+ const struct nf_conntrack_tuple *tuple)154156{155157 unsigned int hash;156158 struct {157159 struct nf_conntrack_man src;158160 u32 net_mix;159161 u32 protonum;162162+ u32 zone;160163 } __aligned(SIPHASH_ALIGNMENT) combined;161164162165 get_random_once(&nf_nat_hash_rnd, sizeof(nf_nat_hash_rnd));···168165169166 /* Original src, to ensure we map it consistently if poss. */170167 combined.src = tuple->src;171171- combined.net_mix = net_hash_mix(n);168168+ combined.net_mix = net_hash_mix(net);172169 combined.protonum = tuple->dst.protonum;170170+171171+ /* Zone ID can be used provided its valid for both directions */172172+ if (zone->dir == NF_CT_DEFAULT_ZONE_DIR)173173+ combined.zone = zone->id;173174174175 hash = siphash(&combined, sizeof(combined), &nf_nat_hash_rnd);175176···279272 struct nf_conntrack_tuple *result,280273 const struct nf_nat_range2 *range)281274{282282- unsigned int h = hash_by_src(net, tuple);275275+ unsigned int h = hash_by_src(net, zone, tuple);283276 const struct nf_conn *ct;284277285278 hlist_for_each_entry_rcu(ct, &nf_nat_bysource[h], nat_bysource) {···626619 unsigned int srchash;627620 spinlock_t *lock;628621629629- srchash = hash_by_src(net,622622+ srchash = hash_by_src(net, nf_ct_zone(ct),630623 &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);631624 lock = &nf_nat_locks[srchash % CONNTRACK_LOCKS];632625 spin_lock_bh(lock);···795788{796789 unsigned int h;797790798798- h = hash_by_src(nf_ct_net(ct), &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);791791+ h = hash_by_src(nf_ct_net(ct), nf_ct_zone(ct), &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);799792 spin_lock_bh(&nf_nat_locks[h % CONNTRACK_LOCKS]);800793 hlist_del_rcu(&ct->nat_bysource);801794 spin_unlock_bh(&nf_nat_locks[h % CONNTRACK_LOCKS]);
+97-71
net/netfilter/nf_nat_masquerade.c
···991010#include <net/netfilter/nf_nat_masquerade.h>11111212+struct masq_dev_work {1313+ struct work_struct work;1414+ struct net *net;1515+ union nf_inet_addr addr;1616+ int ifindex;1717+ int (*iter)(struct nf_conn *i, void *data);1818+};1919+2020+#define MAX_MASQ_WORKER_COUNT 162121+1222static DEFINE_MUTEX(masq_mutex);1323static unsigned int masq_refcnt __read_mostly;2424+static atomic_t masq_worker_count __read_mostly;14251526unsigned int1627nf_nat_masquerade_ipv4(struct sk_buff *skb, unsigned int hooknum,···7463}7564EXPORT_SYMBOL_GPL(nf_nat_masquerade_ipv4);76657777-static int device_cmp(struct nf_conn *i, void *ifindex)6666+static void iterate_cleanup_work(struct work_struct *work)6767+{6868+ struct masq_dev_work *w;6969+7070+ w = container_of(work, struct masq_dev_work, work);7171+7272+ nf_ct_iterate_cleanup_net(w->net, w->iter, (void *)w, 0, 0);7373+7474+ put_net(w->net);7575+ kfree(w);7676+ atomic_dec(&masq_worker_count);7777+ module_put(THIS_MODULE);7878+}7979+8080+/* Iterate conntrack table in the background and remove conntrack entries8181+ * that use the device/address being removed.8282+ *8383+ * In case too many work items have been queued already or memory allocation8484+ * fails iteration is skipped, conntrack entries will time out eventually.8585+ */8686+static void nf_nat_masq_schedule(struct net *net, union nf_inet_addr *addr,8787+ int ifindex,8888+ int (*iter)(struct nf_conn *i, void *data),8989+ gfp_t gfp_flags)9090+{9191+ struct masq_dev_work *w;9292+9393+ if (atomic_read(&masq_worker_count) > MAX_MASQ_WORKER_COUNT)9494+ return;9595+9696+ net = maybe_get_net(net);9797+ if (!net)9898+ return;9999+100100+ if (!try_module_get(THIS_MODULE))101101+ goto err_module;102102+103103+ w = kzalloc(sizeof(*w), gfp_flags);104104+ if (w) {105105+ /* We can overshoot MAX_MASQ_WORKER_COUNT, no big deal */106106+ atomic_inc(&masq_worker_count);107107+108108+ INIT_WORK(&w->work, iterate_cleanup_work);109109+ w->ifindex = ifindex;110110+ w->net = net;111111+ w->iter = iter;112112+ if (addr)113113+ w->addr = *addr;114114+ schedule_work(&w->work);115115+ return;116116+ }117117+118118+ module_put(THIS_MODULE);119119+ err_module:120120+ put_net(net);121121+}122122+123123+static int device_cmp(struct nf_conn *i, void *arg)78124{79125 const struct nf_conn_nat *nat = nfct_nat(i);126126+ const struct masq_dev_work *w = arg;8012781128 if (!nat)82129 return 0;8383- return nat->masq_index == (int)(long)ifindex;130130+ return nat->masq_index == w->ifindex;84131}8513286133static int masq_device_event(struct notifier_block *this,···15485 * and forget them.15586 */15687157157- nf_ct_iterate_cleanup_net(net, device_cmp,158158- (void *)(long)dev->ifindex, 0, 0);8888+ nf_nat_masq_schedule(net, NULL, dev->ifindex,8989+ device_cmp, GFP_KERNEL);15990 }1609116192 return NOTIFY_DONE;···1639416495static int inet_cmp(struct nf_conn *ct, void *ptr)16596{166166- struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;167167- struct net_device *dev = ifa->ifa_dev->dev;16897 struct nf_conntrack_tuple *tuple;9898+ struct masq_dev_work *w = ptr;16999170170- if (!device_cmp(ct, (void *)(long)dev->ifindex))100100+ if (!device_cmp(ct, ptr))171101 return 0;172102173103 tuple = &ct->tuplehash[IP_CT_DIR_REPLY].tuple;174104175175- return ifa->ifa_address == tuple->dst.u3.ip;105105+ return nf_inet_addr_cmp(&w->addr, &tuple->dst.u3);176106}177107178108static int masq_inet_event(struct notifier_block *this,179109 unsigned long event,180110 void *ptr)181111{182182- struct in_device *idev = ((struct in_ifaddr *)ptr)->ifa_dev;183183- struct net *net = dev_net(idev->dev);112112+ const struct in_ifaddr *ifa = ptr;113113+ const struct in_device *idev;114114+ const struct net_device *dev;115115+ union nf_inet_addr addr;116116+117117+ if (event != NETDEV_DOWN)118118+ return NOTIFY_DONE;184119185120 /* The masq_dev_notifier will catch the case of the device going186121 * down. So if the inetdev is dead and being destroyed we have187122 * no work to do. Otherwise this is an individual address removal188123 * and we have to perform the flush.189124 */125125+ idev = ifa->ifa_dev;190126 if (idev->dead)191127 return NOTIFY_DONE;192128193193- if (event == NETDEV_DOWN)194194- nf_ct_iterate_cleanup_net(net, inet_cmp, ptr, 0, 0);129129+ memset(&addr, 0, sizeof(addr));130130+131131+ addr.ip = ifa->ifa_address;132132+133133+ dev = idev->dev;134134+ nf_nat_masq_schedule(dev_net(idev->dev), &addr, dev->ifindex,135135+ inet_cmp, GFP_KERNEL);195136196137 return NOTIFY_DONE;197138}···215136};216137217138#if IS_ENABLED(CONFIG_IPV6)218218-static atomic_t v6_worker_count __read_mostly;219219-220139static int221140nat_ipv6_dev_get_saddr(struct net *net, const struct net_device *dev,222141 const struct in6_addr *daddr, unsigned int srcprefs,···264187}265188EXPORT_SYMBOL_GPL(nf_nat_masquerade_ipv6);266189267267-struct masq_dev_work {268268- struct work_struct work;269269- struct net *net;270270- struct in6_addr addr;271271- int ifindex;272272-};273273-274274-static int inet6_cmp(struct nf_conn *ct, void *work)275275-{276276- struct masq_dev_work *w = (struct masq_dev_work *)work;277277- struct nf_conntrack_tuple *tuple;278278-279279- if (!device_cmp(ct, (void *)(long)w->ifindex))280280- return 0;281281-282282- tuple = &ct->tuplehash[IP_CT_DIR_REPLY].tuple;283283-284284- return ipv6_addr_equal(&w->addr, &tuple->dst.u3.in6);285285-}286286-287287-static void iterate_cleanup_work(struct work_struct *work)288288-{289289- struct masq_dev_work *w;290290-291291- w = container_of(work, struct masq_dev_work, work);292292-293293- nf_ct_iterate_cleanup_net(w->net, inet6_cmp, (void *)w, 0, 0);294294-295295- put_net(w->net);296296- kfree(w);297297- atomic_dec(&v6_worker_count);298298- module_put(THIS_MODULE);299299-}300300-301190/* atomic notifier; can't call nf_ct_iterate_cleanup_net (it can sleep).302191 *303192 * Defer it to the system workqueue.···276233{277234 struct inet6_ifaddr *ifa = ptr;278235 const struct net_device *dev;279279- struct masq_dev_work *w;280280- struct net *net;236236+ union nf_inet_addr addr;281237282282- if (event != NETDEV_DOWN || atomic_read(&v6_worker_count) >= 16)238238+ if (event != NETDEV_DOWN)283239 return NOTIFY_DONE;284240285241 dev = ifa->idev->dev;286286- net = maybe_get_net(dev_net(dev));287287- if (!net)288288- return NOTIFY_DONE;289242290290- if (!try_module_get(THIS_MODULE))291291- goto err_module;243243+ memset(&addr, 0, sizeof(addr));292244293293- w = kmalloc(sizeof(*w), GFP_ATOMIC);294294- if (w) {295295- atomic_inc(&v6_worker_count);245245+ addr.in6 = ifa->addr;296246297297- INIT_WORK(&w->work, iterate_cleanup_work);298298- w->ifindex = dev->ifindex;299299- w->net = net;300300- w->addr = ifa->addr;301301- schedule_work(&w->work);302302-303303- return NOTIFY_DONE;304304- }305305-306306- module_put(THIS_MODULE);307307- err_module:308308- put_net(net);247247+ nf_nat_masq_schedule(dev_net(dev), &addr, dev->ifindex, inet_cmp,248248+ GFP_ATOMIC);309249 return NOTIFY_DONE;310250}311251
+19-11
net/netfilter/nf_tables_api.c
···43364336 if (ops->privsize != NULL)43374337 size = ops->privsize(nla, &desc);43384338 alloc_size = sizeof(*set) + size + udlen;43394339- if (alloc_size < size)43394339+ if (alloc_size < size || alloc_size > INT_MAX)43404340 return -ENOMEM;43414341 set = kvzalloc(alloc_size, GFP_KERNEL);43424342 if (!set)···95999599 table->use--;96009600 nf_tables_chain_destroy(&ctx);96019601 }96029602- list_del(&table->list);96039602 nf_tables_table_destroy(&ctx);96049603}96059604···96119612 if (nft_table_has_owner(table))96129613 continue;9613961496159615+ list_del(&table->list);96169616+96149617 __nft_release_table(net, table);96159618 }96169619}···96209619static int nft_rcv_nl_event(struct notifier_block *this, unsigned long event,96219620 void *ptr)96229621{96229622+ struct nft_table *table, *to_delete[8];96239623 struct nftables_pernet *nft_net;96249624 struct netlink_notify *n = ptr;96259625- struct nft_table *table, *nt;96269625 struct net *net = n->net;96279627- bool release = false;96269626+ unsigned int deleted;96279627+ bool restart = false;9628962896299629 if (event != NETLINK_URELEASE || n->protocol != NETLINK_NETFILTER)96309630 return NOTIFY_DONE;9631963196329632 nft_net = nft_pernet(net);96339633+ deleted = 0;96339634 mutex_lock(&nft_net->commit_mutex);96359635+again:96349636 list_for_each_entry(table, &nft_net->tables, list) {96359637 if (nft_table_has_owner(table) &&96369638 n->portid == table->nlpid) {96379639 __nft_release_hook(net, table);96389638- release = true;96409640+ list_del_rcu(&table->list);96419641+ to_delete[deleted++] = table;96429642+ if (deleted >= ARRAY_SIZE(to_delete))96439643+ break;96399644 }96409645 }96419641- if (release) {96469646+ if (deleted) {96479647+ restart = deleted >= ARRAY_SIZE(to_delete);96429648 synchronize_rcu();96439643- list_for_each_entry_safe(table, nt, &nft_net->tables, list) {96449644- if (nft_table_has_owner(table) &&96459645- n->portid == table->nlpid)96469646- __nft_release_table(net, table);96479647- }96499649+ while (deleted)96509650+ __nft_release_table(net, to_delete[--deleted]);96519651+96529652+ if (restart)96539653+ goto again;96489654 }96499655 mutex_unlock(&nft_net->commit_mutex);96509656
+16-1
net/netfilter/nft_compat.c
···1919#include <linux/netfilter_bridge/ebtables.h>2020#include <linux/netfilter_arp/arp_tables.h>2121#include <net/netfilter/nf_tables.h>2222+#include <net/netfilter/nf_log.h>22232324/* Used for matches where *info is larger than X byte */2425#define NFT_MATCH_LARGE_THRESH 192···258257 nft_compat_wait_for_destructors();259258260259 ret = xt_check_target(&par, size, proto, inv);261261- if (ret < 0)260260+ if (ret < 0) {261261+ if (ret == -ENOENT) {262262+ const char *modname = NULL;263263+264264+ if (strcmp(target->name, "LOG") == 0)265265+ modname = "nf_log_syslog";266266+ else if (strcmp(target->name, "NFLOG") == 0)267267+ modname = "nfnetlink_log";268268+269269+ if (modname &&270270+ nft_request_module(ctx->net, "%s", modname) == -EAGAIN)271271+ return -EAGAIN;272272+ }273273+262274 return ret;275275+ }263276264277 /* The standard target cannot be used */265278 if (!target->target)
+9-1
net/netfilter/xt_LOG.c
···4444static int log_tg_check(const struct xt_tgchk_param *par)4545{4646 const struct xt_log_info *loginfo = par->targinfo;4747+ int ret;47484849 if (par->family != NFPROTO_IPV4 && par->family != NFPROTO_IPV6)4950 return -EINVAL;···5958 return -EINVAL;6059 }61606262- return nf_logger_find_get(par->family, NF_LOG_TYPE_LOG);6161+ ret = nf_logger_find_get(par->family, NF_LOG_TYPE_LOG);6262+ if (ret != 0 && !par->nft_compat) {6363+ request_module("%s", "nf_log_syslog");6464+6565+ ret = nf_logger_find_get(par->family, NF_LOG_TYPE_LOG);6666+ }6767+6868+ return ret;6369}64706571static void log_tg_destroy(const struct xt_tgdtor_param *par)
+9-1
net/netfilter/xt_NFLOG.c
···4242static int nflog_tg_check(const struct xt_tgchk_param *par)4343{4444 const struct xt_nflog_info *info = par->targinfo;4545+ int ret;45464647 if (info->flags & ~XT_NFLOG_MASK)4748 return -EINVAL;4849 if (info->prefix[sizeof(info->prefix) - 1] != '\0')4950 return -EINVAL;50515151- return nf_logger_find_get(par->family, NF_LOG_TYPE_ULOG);5252+ ret = nf_logger_find_get(par->family, NF_LOG_TYPE_ULOG);5353+ if (ret != 0 && !par->nft_compat) {5454+ request_module("%s", "nfnetlink_log");5555+5656+ ret = nf_logger_find_get(par->family, NF_LOG_TYPE_ULOG);5757+ }5858+5959+ return ret;5260}53615462static void nflog_tg_destroy(const struct xt_tgdtor_param *par)
···702702 ch = skb_header_pointer(skb, offset, sizeof(*ch), &_ch);703703704704 /* Break out if chunk length is less then minimal. */705705- if (ntohs(ch->length) < sizeof(_ch))705705+ if (!ch || ntohs(ch->length) < sizeof(_ch))706706 break;707707708708 ch_end = offset + SCTP_PAD4(ntohs(ch->length));
+60-23
net/unix/af_unix.c
···608608609609static void init_peercred(struct sock *sk)610610{611611- put_pid(sk->sk_peer_pid);612612- if (sk->sk_peer_cred)613613- put_cred(sk->sk_peer_cred);611611+ const struct cred *old_cred;612612+ struct pid *old_pid;613613+614614+ spin_lock(&sk->sk_peer_lock);615615+ old_pid = sk->sk_peer_pid;616616+ old_cred = sk->sk_peer_cred;614617 sk->sk_peer_pid = get_pid(task_tgid(current));615618 sk->sk_peer_cred = get_current_cred();619619+ spin_unlock(&sk->sk_peer_lock);620620+621621+ put_pid(old_pid);622622+ put_cred(old_cred);616623}617624618625static void copy_peercred(struct sock *sk, struct sock *peersk)619626{620620- put_pid(sk->sk_peer_pid);621621- if (sk->sk_peer_cred)622622- put_cred(sk->sk_peer_cred);627627+ const struct cred *old_cred;628628+ struct pid *old_pid;629629+630630+ if (sk < peersk) {631631+ spin_lock(&sk->sk_peer_lock);632632+ spin_lock_nested(&peersk->sk_peer_lock, SINGLE_DEPTH_NESTING);633633+ } else {634634+ spin_lock(&peersk->sk_peer_lock);635635+ spin_lock_nested(&sk->sk_peer_lock, SINGLE_DEPTH_NESTING);636636+ }637637+ old_pid = sk->sk_peer_pid;638638+ old_cred = sk->sk_peer_cred;623639 sk->sk_peer_pid = get_pid(peersk->sk_peer_pid);624640 sk->sk_peer_cred = get_cred(peersk->sk_peer_cred);641641+642642+ spin_unlock(&sk->sk_peer_lock);643643+ spin_unlock(&peersk->sk_peer_lock);644644+645645+ put_pid(old_pid);646646+ put_cred(old_cred);625647}626648627649static int unix_listen(struct socket *sock, int backlog)···850828851829static struct sock *unix_create1(struct net *net, struct socket *sock, int kern, int type)852830{853853- struct sock *sk = NULL;854831 struct unix_sock *u;832832+ struct sock *sk;833833+ int err;855834856835 atomic_long_inc(&unix_nr_socks);857857- if (atomic_long_read(&unix_nr_socks) > 2 * get_max_files())858858- goto out;836836+ if (atomic_long_read(&unix_nr_socks) > 2 * get_max_files()) {837837+ err = -ENFILE;838838+ goto err;839839+ }859840860841 if (type == SOCK_STREAM)861842 sk = sk_alloc(net, PF_UNIX, GFP_KERNEL, &unix_stream_proto, kern);862843 else /*dgram and seqpacket */863844 sk = sk_alloc(net, PF_UNIX, GFP_KERNEL, &unix_dgram_proto, kern);864845865865- if (!sk)866866- goto out;846846+ if (!sk) {847847+ err = -ENOMEM;848848+ goto err;849849+ }867850868851 sock_init_data(sock, sk);869852···888861 init_waitqueue_func_entry(&u->peer_wake, unix_dgram_peer_wake_relay);889862 memset(&u->scm_stat, 0, sizeof(struct scm_stat));890863 unix_insert_socket(unix_sockets_unbound(sk), sk);891891-out:892892- if (sk == NULL)893893- atomic_long_dec(&unix_nr_socks);894894- else {895895- local_bh_disable();896896- sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);897897- local_bh_enable();898898- }864864+865865+ local_bh_disable();866866+ sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);867867+ local_bh_enable();868868+899869 return sk;870870+871871+err:872872+ atomic_long_dec(&unix_nr_socks);873873+ return ERR_PTR(err);900874}901875902876static int unix_create(struct net *net, struct socket *sock, int protocol,903877 int kern)904878{879879+ struct sock *sk;880880+905881 if (protocol && protocol != PF_UNIX)906882 return -EPROTONOSUPPORT;907883···931901 return -ESOCKTNOSUPPORT;932902 }933903934934- return unix_create1(net, sock, kern, sock->type) ? 0 : -ENOMEM;904904+ sk = unix_create1(net, sock, kern, sock->type);905905+ if (IS_ERR(sk))906906+ return PTR_ERR(sk);907907+908908+ return 0;935909}936910937911static int unix_release(struct socket *sock)···13481314 we will have to recheck all again in any case.13491315 */1350131613511351- err = -ENOMEM;13521352-13531317 /* create new sock for complete connection */13541318 newsk = unix_create1(sock_net(sk), NULL, 0, sock->type);13551355- if (newsk == NULL)13191319+ if (IS_ERR(newsk)) {13201320+ err = PTR_ERR(newsk);13211321+ newsk = NULL;13561322 goto out;13231323+ }13241324+13251325+ err = -ENOMEM;1357132613581327 /* Allocate skb for sending to listening sock */13591328 skb = sock_wmalloc(newsk, 1, 0, GFP_KERNEL);
+9
sound/core/rawmidi.c
···873873 return -EINVAL;874874 }875875 }876876+ case SNDRV_RAWMIDI_IOCTL_USER_PVERSION:877877+ if (get_user(rfile->user_pversion, (unsigned int __user *)arg))878878+ return -EFAULT;879879+ return 0;880880+876881 case SNDRV_RAWMIDI_IOCTL_PARAMS:877882 {878883 struct snd_rawmidi_params params;879884880885 if (copy_from_user(¶ms, argp, sizeof(struct snd_rawmidi_params)))881886 return -EFAULT;887887+ if (rfile->user_pversion < SNDRV_PROTOCOL_VERSION(2, 0, 2)) {888888+ params.mode = 0;889889+ memset(params.reserved, 0, sizeof(params.reserved));890890+ }882891 switch (params.stream) {883892 case SNDRV_RAWMIDI_STREAM_OUTPUT:884893 if (rfile->output == NULL)
···276276277277 /* This is just for v2/v3 protocol. */278278 for (i = 0; i < data_blocks; ++i) {279279- *frames = (be32_to_cpu(buffer[1]) << 16) |280280- (be32_to_cpu(buffer[2]) >> 16);279279+ *frames = be32_to_cpu(buffer[1]);280280+ *frames <<= 16;281281+ *frames |= be32_to_cpu(buffer[2]) >> 16;282282+ ++frames;281283 buffer += data_block_quadlets;282282- frames++;283284 }284285}285286
+8-5
sound/firewire/oxfw/oxfw.c
···184184 model = val;185185 }186186187187- /*188188- * Mackie Onyx Satellite with base station has a quirk to report a wrong189189- * value in 'dbs' field of CIP header against its format information.190190- */191191- if (vendor == VENDOR_LOUD && model == MODEL_SATELLITE)187187+ if (vendor == VENDOR_LOUD) {188188+ // Mackie Onyx Satellite with base station has a quirk to report a wrong189189+ // value in 'dbs' field of CIP header against its format information.192190 oxfw->quirks |= SND_OXFW_QUIRK_WRONG_DBS;191191+192192+ // OXFW971-based models may transfer events by blocking method.193193+ if (!(oxfw->quirks & SND_OXFW_QUIRK_JUMBO_PAYLOAD))194194+ oxfw->quirks |= SND_OXFW_QUIRK_BLOCKING_TRANSMISSION;195195+ }193196194197 return 0;195198}
···10731073 if (ret < 0)10741074 goto err_pm_get_sync;1075107510761076+ /*10771077+ * Register platform component before registering cpu dai for there10781078+ * is not defer probe for platform component in snd_soc_add_pcm_runtime().10791079+ */10801080+ ret = imx_pcm_dma_init(pdev, IMX_ESAI_DMABUF_SIZE);10811081+ if (ret) {10821082+ dev_err(&pdev->dev, "failed to init imx pcm dma: %d\n", ret);10831083+ goto err_pm_get_sync;10841084+ }10851085+10761086 ret = devm_snd_soc_register_component(&pdev->dev, &fsl_esai_component,10771087 &fsl_esai_dai, 1);10781088 if (ret) {···10911081 }1092108210931083 INIT_WORK(&esai_priv->work, fsl_esai_hw_reset);10941094-10951095- ret = imx_pcm_dma_init(pdev, IMX_ESAI_DMABUF_SIZE);10961096- if (ret) {10971097- dev_err(&pdev->dev, "failed to init imx pcm dma: %d\n", ret);10981098- goto err_pm_get_sync;10991099- }1100108411011085 return ret;11021086
+10-5
sound/soc/fsl/fsl_micfil.c
···737737 pm_runtime_enable(&pdev->dev);738738 regcache_cache_only(micfil->regmap, true);739739740740+ /*741741+ * Register platform component before registering cpu dai for there742742+ * is not defer probe for platform component in snd_soc_add_pcm_runtime().743743+ */744744+ ret = devm_snd_dmaengine_pcm_register(&pdev->dev, NULL, 0);745745+ if (ret) {746746+ dev_err(&pdev->dev, "failed to pcm register\n");747747+ return ret;748748+ }749749+740750 ret = devm_snd_soc_register_component(&pdev->dev, &fsl_micfil_component,741751 &fsl_micfil_dai, 1);742752 if (ret) {743753 dev_err(&pdev->dev, "failed to register component %s\n",744754 fsl_micfil_component.name);745745- return ret;746755 }747747-748748- ret = devm_snd_dmaengine_pcm_register(&pdev->dev, NULL, 0);749749- if (ret)750750- dev_err(&pdev->dev, "failed to pcm register\n");751756752757 return ret;753758}
+9-5
sound/soc/fsl/fsl_sai.c
···11521152 if (ret < 0)11531153 goto err_pm_get_sync;1154115411551155- ret = devm_snd_soc_register_component(&pdev->dev, &fsl_component,11561156- &sai->cpu_dai_drv, 1);11571157- if (ret)11581158- goto err_pm_get_sync;11591159-11551155+ /*11561156+ * Register platform component before registering cpu dai for there11571157+ * is not defer probe for platform component in snd_soc_add_pcm_runtime().11581158+ */11601159 if (sai->soc_data->use_imx_pcm) {11611160 ret = imx_pcm_dma_init(pdev, IMX_SAI_DMABUF_SIZE);11621161 if (ret)···11651166 if (ret)11661167 goto err_pm_get_sync;11671168 }11691169+11701170+ ret = devm_snd_soc_register_component(&pdev->dev, &fsl_component,11711171+ &sai->cpu_dai_drv, 1);11721172+ if (ret)11731173+ goto err_pm_get_sync;1168117411691175 return ret;11701176
+10-6
sound/soc/fsl/fsl_spdif.c
···14341434 pm_runtime_enable(&pdev->dev);14351435 regcache_cache_only(spdif_priv->regmap, true);1436143614371437+ /*14381438+ * Register platform component before registering cpu dai for there14391439+ * is not defer probe for platform component in snd_soc_add_pcm_runtime().14401440+ */14411441+ ret = imx_pcm_dma_init(pdev, IMX_SPDIF_DMABUF_SIZE);14421442+ if (ret) {14431443+ dev_err_probe(&pdev->dev, ret, "imx_pcm_dma_init failed\n");14441444+ goto err_pm_disable;14451445+ }14461446+14371447 ret = devm_snd_soc_register_component(&pdev->dev, &fsl_spdif_component,14381448 &spdif_priv->cpu_dai_drv, 1);14391449 if (ret) {14401450 dev_err(&pdev->dev, "failed to register DAI: %d\n", ret);14411441- goto err_pm_disable;14421442- }14431443-14441444- ret = imx_pcm_dma_init(pdev, IMX_SPDIF_DMABUF_SIZE);14451445- if (ret) {14461446- dev_err_probe(&pdev->dev, ret, "imx_pcm_dma_init failed\n");14471451 goto err_pm_disable;14481452 }14491453
+10-5
sound/soc/fsl/fsl_xcvr.c
···12151215 pm_runtime_enable(dev);12161216 regcache_cache_only(xcvr->regmap, true);1217121712181218+ /*12191219+ * Register platform component before registering cpu dai for there12201220+ * is not defer probe for platform component in snd_soc_add_pcm_runtime().12211221+ */12221222+ ret = devm_snd_dmaengine_pcm_register(dev, NULL, 0);12231223+ if (ret) {12241224+ dev_err(dev, "failed to pcm register\n");12251225+ return ret;12261226+ }12271227+12181228 ret = devm_snd_soc_register_component(dev, &fsl_xcvr_comp,12191229 &fsl_xcvr_dai, 1);12201230 if (ret) {12211231 dev_err(dev, "failed to register component %s\n",12221232 fsl_xcvr_comp.name);12231223- return ret;12241233 }12251225-12261226- ret = devm_snd_dmaengine_pcm_register(dev, NULL, 0);12271227- if (ret)12281228- dev_err(dev, "failed to pcm register\n");1229123412301235 return ret;12311236}
+5
sound/soc/intel/boards/sof_sdw.c
···929929 cpus + *cpu_id, cpu_dai_num,930930 codecs, codec_num,931931 NULL, &sdw_ops);932932+ /*933933+ * SoundWire DAILINKs use 'stream' functions and Bank Switch operations934934+ * based on wait_for_completion(), tag them as 'nonatomic'.935935+ */936936+ dai_links[*be_index].nonatomic = true;932937933938 ret = set_codec_init_func(card, link, dai_links + (*be_index)++,934939 playback, group_id);
+3
sound/soc/mediatek/Kconfig
···11# SPDX-License-Identifier: GPL-2.0-only22config SND_SOC_MEDIATEK33 tristate44+ select REGMAP_MMIO4556config SND_SOC_MT270167 tristate "ASoC support for Mediatek MT2701 chip"···189188config SND_SOC_MT8195190189 tristate "ASoC support for Mediatek MT8195 chip"191190 depends on ARCH_MEDIATEK || COMPILE_TEST191191+ depends on COMMON_CLK192192 select SND_SOC_MEDIATEK193193+ select MFD_SYSCON if SND_SOC_MT6359193194 help194195 This adds ASoC platform driver support for Mediatek MT8195 chip195196 that can be used with other codecs.
+11-8
sound/soc/mediatek/common/mtk-afe-fe-dai.c
···334334 devm_kcalloc(dev, afe->reg_back_up_list_num,335335 sizeof(unsigned int), GFP_KERNEL);336336337337- for (i = 0; i < afe->reg_back_up_list_num; i++)338338- regmap_read(regmap, afe->reg_back_up_list[i],339339- &afe->reg_back_up[i]);337337+ if (afe->reg_back_up) {338338+ for (i = 0; i < afe->reg_back_up_list_num; i++)339339+ regmap_read(regmap, afe->reg_back_up_list[i],340340+ &afe->reg_back_up[i]);341341+ }340342341343 afe->suspended = true;342344 afe->runtime_suspend(dev);···358356359357 afe->runtime_resume(dev);360358361361- if (!afe->reg_back_up)359359+ if (!afe->reg_back_up) {362360 dev_dbg(dev, "%s no reg_backup\n", __func__);363363-364364- for (i = 0; i < afe->reg_back_up_list_num; i++)365365- mtk_regmap_write(regmap, afe->reg_back_up_list[i],366366- afe->reg_back_up[i]);361361+ } else {362362+ for (i = 0; i < afe->reg_back_up_list_num; i++)363363+ mtk_regmap_write(regmap, afe->reg_back_up_list[i],364364+ afe->reg_back_up[i]);365365+ }367366368367 afe->suspended = false;369368 return 0;
···365365/* on i.MX8 there is 1 to 1 match between type and BAR idx */366366static int imx8_get_bar_index(struct snd_sof_dev *sdev, u32 type)367367{368368- return type;368368+ /* Only IRAM and SRAM bars are valid */369369+ switch (type) {370370+ case SOF_FW_BLK_TYPE_IRAM:371371+ case SOF_FW_BLK_TYPE_SRAM:372372+ return type;373373+ default:374374+ return -EINVAL;375375+ }369376}370377371378static void imx8_ipc_msg_data(struct snd_sof_dev *sdev,
+8-1
sound/soc/sof/imx/imx8m.c
···228228/* on i.MX8 there is 1 to 1 match between type and BAR idx */229229static int imx8m_get_bar_index(struct snd_sof_dev *sdev, u32 type)230230{231231- return type;231231+ /* Only IRAM and SRAM bars are valid */232232+ switch (type) {233233+ case SOF_FW_BLK_TYPE_IRAM:234234+ case SOF_FW_BLK_TYPE_SRAM:235235+ return type;236236+ default:237237+ return -EINVAL;238238+ }232239}233240234241static void imx8m_ipc_msg_data(struct snd_sof_dev *sdev,
+5-3
sound/soc/sof/loader.c
···729729 ret = request_firmware(&plat_data->fw, fw_filename, sdev->dev);730730731731 if (ret < 0) {732732- dev_err(sdev->dev, "error: request firmware %s failed err: %d\n",733733- fw_filename, ret);734732 dev_err(sdev->dev,735735- "you may need to download the firmware from https://github.com/thesofproject/sof-bin/\n");733733+ "error: sof firmware file is missing, you might need to\n");734734+ dev_err(sdev->dev,735735+ " download it from https://github.com/thesofproject/sof-bin/\n");736736 goto err;737737 } else {738738 dev_dbg(sdev->dev, "request_firmware %s successful\n",···880880void snd_sof_fw_unload(struct snd_sof_dev *sdev)881881{882882 /* TODO: support module unloading at runtime */883883+ release_firmware(sdev->pdata->fw);884884+ sdev->pdata->fw = NULL;883885}884886EXPORT_SYMBOL(snd_sof_fw_unload);
-1
sound/soc/sof/trace.c
···530530 return;531531532532 if (sdev->dtrace_is_enabled) {533533- dev_err(sdev->dev, "error: waking up any trace sleepers\n");534533 sdev->dtrace_error = true;535534 wake_up(&sdev->trace_sleep);536535 }
+2-2
sound/soc/sof/xtensa/core.c
···122122 * 0x0049fbb0: 8000f2d0 0049fc00 6f6c6c61 00632e63123123 */124124 for (i = 0; i < stack_words; i += 4) {125125- hex_dump_to_buffer(stack + i * 4, 16, 16, 4,125125+ hex_dump_to_buffer(stack + i, 16, 16, 4,126126 buf, sizeof(buf), false);127127- dev_err(sdev->dev, "0x%08x: %s\n", stack_ptr + i, buf);127127+ dev_err(sdev->dev, "0x%08x: %s\n", stack_ptr + i * 4, buf);128128 }129129}130130
+4-14
sound/usb/card.c
···10541054 return 0;10551055}1056105610571057-static int __usb_audio_resume(struct usb_interface *intf, bool reset_resume)10571057+static int usb_audio_resume(struct usb_interface *intf)10581058{10591059 struct snd_usb_audio *chip = usb_get_intfdata(intf);10601060 struct snd_usb_stream *as;···10801080 * we just notify and restart the mixers10811081 */10821082 list_for_each_entry(mixer, &chip->mixer_list, list) {10831083- err = snd_usb_mixer_resume(mixer, reset_resume);10831083+ err = snd_usb_mixer_resume(mixer);10841084 if (err < 0)10851085 goto err_out;10861086 }···11001100 atomic_dec(&chip->active); /* allow autopm after this point */11011101 return err;11021102}11031103-11041104-static int usb_audio_resume(struct usb_interface *intf)11051105-{11061106- return __usb_audio_resume(intf, false);11071107-}11081108-11091109-static int usb_audio_reset_resume(struct usb_interface *intf)11101110-{11111111- return __usb_audio_resume(intf, true);11121112-}11131103#else11141104#define usb_audio_suspend NULL11151105#define usb_audio_resume NULL11161116-#define usb_audio_reset_resume NULL11061106+#define usb_audio_resume NULL11171107#endif /* CONFIG_PM */1118110811191109static const struct usb_device_id usb_audio_ids [] = {···11251135 .disconnect = usb_audio_disconnect,11261136 .suspend = usb_audio_suspend,11271137 .resume = usb_audio_resume,11281128- .reset_resume = usb_audio_reset_resume,11381138+ .reset_resume = usb_audio_resume,11291139 .id_table = usb_audio_ids,11301140 .supports_autosuspend = 1,11311141};
+4-22
sound/usb/mixer.c
···36533653 return 0;36543654}3655365536563656-static int default_mixer_reset_resume(struct usb_mixer_elem_list *list)36573657-{36583658- int err;36593659-36603660- if (list->resume) {36613661- err = list->resume(list);36623662- if (err < 0)36633663- return err;36643664- }36653665- return restore_mixer_value(list);36663666-}36673667-36683668-int snd_usb_mixer_resume(struct usb_mixer_interface *mixer, bool reset_resume)36563656+int snd_usb_mixer_resume(struct usb_mixer_interface *mixer)36693657{36703658 struct usb_mixer_elem_list *list;36713671- usb_mixer_elem_resume_func_t f;36723659 int id, err;3673366036743661 /* restore cached mixer values */36753662 for (id = 0; id < MAX_ID_ELEMS; id++) {36763663 for_each_mixer_elem(list, mixer, id) {36773677- if (reset_resume)36783678- f = list->reset_resume;36793679- else36803680- f = list->resume;36813681- if (f) {36823682- err = f(list);36643664+ if (list->resume) {36653665+ err = list->resume(list);36833666 if (err < 0)36843667 return err;36853668 }···36833700 list->id = unitid;36843701 list->dump = snd_usb_mixer_dump_cval;36853702#ifdef CONFIG_PM36863686- list->resume = NULL;36873687- list->reset_resume = default_mixer_reset_resume;37033703+ list->resume = restore_mixer_value;36883704#endif36893705}
+1-2
sound/usb/mixer.h
···7070 bool is_std_info;7171 usb_mixer_elem_dump_func_t dump;7272 usb_mixer_elem_resume_func_t resume;7373- usb_mixer_elem_resume_func_t reset_resume;7473};75747675/* iterate over mixer element list of the given unit id */···120121121122#ifdef CONFIG_PM122123int snd_usb_mixer_suspend(struct usb_mixer_interface *mixer);123123-int snd_usb_mixer_resume(struct usb_mixer_interface *mixer, bool reset_resume);124124+int snd_usb_mixer_resume(struct usb_mixer_interface *mixer);124125#endif125126126127int snd_usb_set_cur_mix_value(struct usb_mixer_elem_info *cval, int channel,
···16491649static int find_glob_sym_btf(struct src_obj *obj, Elf64_Sym *sym, const char *sym_name,16501650 int *out_btf_sec_id, int *out_btf_id)16511651{16521652- int i, j, n = btf__get_nr_types(obj->btf), m, btf_id = 0;16521652+ int i, j, n, m, btf_id = 0;16531653 const struct btf_type *t;16541654 const struct btf_var_secinfo *vi;16551655 const char *name;1656165616571657+ if (!obj->btf) {16581658+ pr_warn("failed to find BTF info for object '%s'\n", obj->filename);16591659+ return -EINVAL;16601660+ }16611661+16621662+ n = btf__get_nr_types(obj->btf);16571663 for (i = 1; i <= n; i++) {16581664 t = btf__type_by_id(obj->btf, i);16591665
+30-8
tools/objtool/special.c
···5858{5959}60606161+static bool reloc2sec_off(struct reloc *reloc, struct section **sec, unsigned long *off)6262+{6363+ switch (reloc->sym->type) {6464+ case STT_FUNC:6565+ *sec = reloc->sym->sec;6666+ *off = reloc->sym->offset + reloc->addend;6767+ return true;6868+6969+ case STT_SECTION:7070+ *sec = reloc->sym->sec;7171+ *off = reloc->addend;7272+ return true;7373+7474+ default:7575+ return false;7676+ }7777+}7878+6179static int get_alt_entry(struct elf *elf, struct special_entry *entry,6280 struct section *sec, int idx,6381 struct special_alt *alt)···10991 WARN_FUNC("can't find orig reloc", sec, offset + entry->orig);11092 return -1;11193 }112112- if (orig_reloc->sym->type != STT_SECTION) {113113- WARN_FUNC("don't know how to handle non-section reloc symbol %s",114114- sec, offset + entry->orig, orig_reloc->sym->name);9494+ if (!reloc2sec_off(orig_reloc, &alt->orig_sec, &alt->orig_off)) {9595+ WARN_FUNC("don't know how to handle reloc symbol type %d: %s",9696+ sec, offset + entry->orig,9797+ orig_reloc->sym->type,9898+ orig_reloc->sym->name);11599 return -1;116100 }117117-118118- alt->orig_sec = orig_reloc->sym->sec;119119- alt->orig_off = orig_reloc->addend;120101121102 if (!entry->group || alt->new_len) {122103 new_reloc = find_reloc_by_dest(elf, sec, offset + entry->new);···133116 if (arch_is_retpoline(new_reloc->sym))134117 return 1;135118136136- alt->new_sec = new_reloc->sym->sec;137137- alt->new_off = (unsigned int)new_reloc->addend;119119+ if (!reloc2sec_off(new_reloc, &alt->new_sec, &alt->new_off)) {120120+ WARN_FUNC("don't know how to handle reloc symbol type %d: %s",121121+ sec, offset + entry->new,122122+ new_reloc->sym->type,123123+ new_reloc->sym->name);124124+ return -1;125125+ }138126139127 /* _ASM_EXTABLE_EX hack */140128 if (alt->new_off >= 0x7ffffff0)
···164164The EH Frame header follows the Linux Standard Base (LSB) specification as described in the document at https://refspecs.linuxfoundation.org/LSB_1.3.0/gLSB/gLSB/ehframehdr.html165165166166167167-The EH Frame follows the LSB specicfication as described in the document at https://refspecs.linuxbase.org/LSB_3.0.0/LSB-PDA/LSB-PDA/ehframechpt.html167167+The EH Frame follows the LSB specification as described in the document at https://refspecs.linuxbase.org/LSB_3.0.0/LSB-PDA/LSB-PDA/ehframechpt.html168168169169170170NOTE: The mapped_size is generally either the same as unwind_data_size (if the unwinding data was mapped in memory by the running process) or zero (if the unwinding data is not mapped by the process). If the unwinding data was not mapped, then only the EH Frame Header will be read, which can be used to specify FP based unwinding for a function which does not have unwinding information.
+1-1
tools/perf/Documentation/perf-c2c.txt
···261261User can specify how to sort offsets for cacheline.262262263263Following fields are available and governs the final264264-output fields set for caheline offsets output:264264+output fields set for cacheline offsets output:265265266266 tid - coalesced by process TIDs267267 pid - coalesced by process PIDs
+1-1
tools/perf/Documentation/perf-intel-pt.txt
···883883884884"Transactions" events correspond to the start or end of transactions. The885885'flags' field can be used in perf script to determine whether the event is a886886-tranasaction start, commit or abort.886886+transaction start, commit or abort.887887888888Note that "instructions", "branches" and "transactions" events depend on code889889flow packets which can be disabled by using the config term "branch=0". Refer
+1-1
tools/perf/Documentation/perf-lock.txt
···44444545-f::4646--force::4747- Don't complan, do it.4747+ Don't complain, do it.48484949REPORT OPTIONS5050--------------
+1-1
tools/perf/Documentation/perf-script-perl.txt
···5454Traces meant to be processed using a script should be recorded with5555the above option: -a to enable system-wide collection.56565757-The format file for the sched_wakep event defines the following fields5757+The format file for the sched_wakeup event defines the following fields5858(see /sys/kernel/debug/tracing/events/sched/sched_wakeup/format):59596060----
+1-1
tools/perf/Documentation/perf-script-python.txt
···448448Traces meant to be processed using a script should be recorded with449449the above option: -a to enable system-wide collection.450450451451-The format file for the sched_wakep event defines the following fields451451+The format file for the sched_wakeup event defines the following fields452452(see /sys/kernel/debug/tracing/events/sched/sched_wakeup/format):453453454454----
+1-1
tools/perf/Documentation/perf-stat.txt
···385385Print metrics or metricgroups specified in a comma separated list.386386For a group all metrics from the group are added.387387The events from the metrics are automatically measured.388388-See perf list output for the possble metrics and metricgroups.388388+See perf list output for the possible metrics and metricgroups.389389390390-A::391391--no-aggr::
+1-1
tools/perf/Documentation/topdown.txt
···22-----------------------------------3344Intel CPUs (since Sandy Bridge and Silvermont) support a TopDown55-methology to break down CPU pipeline execution into 4 bottlenecks:55+methodology to break down CPU pipeline execution into 4 bottlenecks:66frontend bound, backend bound, bad speculation, retiring.7788For more details on Topdown see [1][5]
···10461046 {10471047 "EventCode": "0x4e010",10481048 "EventName": "PM_GCT_NOSLOT_IC_L3MISS",10491049- "BriefDescription": "Gct empty for this thread due to icach l3 miss",10491049+ "BriefDescription": "Gct empty for this thread due to icache l3 miss",10501050 "PublicDescription": ""10511051 },10521052 {
···2020/* For bsearch. We try to unwind functions in shared object. */2121#include <stdlib.h>22222323+/*2424+ * The test will assert frames are on the stack but tail call optimizations lose2525+ * the frame of the caller. Clang can disable this optimization on a called2626+ * function but GCC currently (11/2020) lacks this attribute. The barrier is2727+ * used to inhibit tail calls in these cases.2828+ */2929+#ifdef __has_attribute3030+#if __has_attribute(disable_tail_calls)3131+#define NO_TAIL_CALL_ATTRIBUTE __attribute__((disable_tail_calls))3232+#define NO_TAIL_CALL_BARRIER3333+#endif3434+#endif3535+#ifndef NO_TAIL_CALL_ATTRIBUTE3636+#define NO_TAIL_CALL_ATTRIBUTE3737+#define NO_TAIL_CALL_BARRIER __asm__ __volatile__("" : : : "memory");3838+#endif3939+2340static int mmap_handler(struct perf_tool *tool __maybe_unused,2441 union perf_event *event,2542 struct perf_sample *sample,···10891 return strcmp((const char *) symbol, funcs[idx]);10992}11093111111-noinline int test_dwarf_unwind__thread(struct thread *thread)9494+NO_TAIL_CALL_ATTRIBUTE noinline int test_dwarf_unwind__thread(struct thread *thread)11295{11396 struct perf_sample sample;11497 unsigned long cnt = 0;···139122140123static int global_unwind_retval = -INT_MAX;141124142142-noinline int test_dwarf_unwind__compare(void *p1, void *p2)125125+NO_TAIL_CALL_ATTRIBUTE noinline int test_dwarf_unwind__compare(void *p1, void *p2)143126{144127 /* Any possible value should be 'thread' */145128 struct thread *thread = *(struct thread **)p1;···158141 return p1 - p2;159142}160143161161-noinline int test_dwarf_unwind__krava_3(struct thread *thread)144144+NO_TAIL_CALL_ATTRIBUTE noinline int test_dwarf_unwind__krava_3(struct thread *thread)162145{163146 struct thread *array[2] = {thread, thread};164147 void *fp = &bsearch;···177160 return global_unwind_retval;178161}179162180180-noinline int test_dwarf_unwind__krava_2(struct thread *thread)163163+NO_TAIL_CALL_ATTRIBUTE noinline int test_dwarf_unwind__krava_2(struct thread *thread)181164{182182- return test_dwarf_unwind__krava_3(thread);165165+ int ret;166166+167167+ ret = test_dwarf_unwind__krava_3(thread);168168+ NO_TAIL_CALL_BARRIER;169169+ return ret;183170}184171185185-noinline int test_dwarf_unwind__krava_1(struct thread *thread)172172+NO_TAIL_CALL_ATTRIBUTE noinline int test_dwarf_unwind__krava_1(struct thread *thread)186173{187187- return test_dwarf_unwind__krava_2(thread);174174+ int ret;175175+176176+ ret = test_dwarf_unwind__krava_2(thread);177177+ NO_TAIL_CALL_BARRIER;178178+ return ret;188179}189180190181int test__dwarf_unwind(struct test *test __maybe_unused, int subtest __maybe_unused)
+1-1
tools/perf/util/config.c
···801801 section->name, item->name);802802 ret = fn(key, value, data);803803 if (ret < 0) {804804- pr_err("Error: wrong config key-value pair %s=%s\n",804804+ pr_err("Error in the given config file: wrong config key-value pair %s=%s\n",805805 key, value);806806 /*807807 * Can't be just a 'break', as perf_config_set__for_each_entry()
···112112 ip netns add "${NS2}"113113 ip netns add "${NS3}"114114115115+ # rp_filter gets confused by what these tests are doing, so disable it116116+ ip netns exec ${NS1} sysctl -wq net.ipv4.conf.all.rp_filter=0117117+ ip netns exec ${NS2} sysctl -wq net.ipv4.conf.all.rp_filter=0118118+ ip netns exec ${NS3} sysctl -wq net.ipv4.conf.all.rp_filter=0119119+ ip netns exec ${NS1} sysctl -wq net.ipv4.conf.default.rp_filter=0120120+ ip netns exec ${NS2} sysctl -wq net.ipv4.conf.default.rp_filter=0121121+ ip netns exec ${NS3} sysctl -wq net.ipv4.conf.default.rp_filter=0122122+115123 ip link add veth1 type veth peer name veth2116124 ip link add veth3 type veth peer name veth4117125 ip link add veth5 type veth peer name veth6···243235 ip -netns ${NS3} -6 addr add ${IPv6_GRE} nodad dev gre6_dev244236 ip -netns ${NS1} -6 route add ${IPv6_GRE}/128 dev veth5 via ${IPv6_6} ${VRF}245237 ip -netns ${NS2} -6 route add ${IPv6_GRE}/128 dev veth7 via ${IPv6_8} ${VRF}246246-247247- # rp_filter gets confused by what these tests are doing, so disable it248248- ip netns exec ${NS1} sysctl -wq net.ipv4.conf.all.rp_filter=0249249- ip netns exec ${NS2} sysctl -wq net.ipv4.conf.all.rp_filter=0250250- ip netns exec ${NS3} sysctl -wq net.ipv4.conf.all.rp_filter=0251238252239 TMPFILE=$(mktemp /tmp/test_lwt_ip_encap.XXXXXX)253240
···371371 printf(" -v: specify the number of vCPUs to run.\n");372372 printf(" -o: Overlap guest memory accesses instead of partitioning\n"373373 " them into a separate region of memory for each vCPU.\n");374374- printf(" -s: specify the type of memory that should be used to\n"375375- " back the guest data region.\n\n");376376- backing_src_help();374374+ backing_src_help("-s");377375 puts("");378376 exit(0);379377}···379381int main(int argc, char *argv[])380382{381383 struct test_params params = {382382- .backing_src = VM_MEM_SRC_ANONYMOUS,384384+ .backing_src = DEFAULT_VM_MEM_SRC,383385 .vcpu_memory_bytes = DEFAULT_PER_VCPU_MEM_SIZE,384386 .vcpus = 1,385387 };
+7-8
tools/testing/selftests/kvm/demand_paging_test.c
···179179 return NULL;180180 }181181182182- if (!pollfd[0].revents & POLLIN)182182+ if (!(pollfd[0].revents & POLLIN))183183 continue;184184185185 r = read(uffd, &msg, sizeof(msg));···416416{417417 puts("");418418 printf("usage: %s [-h] [-m vm_mode] [-u uffd_mode] [-d uffd_delay_usec]\n"419419- " [-b memory] [-t type] [-v vcpus] [-o]\n", name);419419+ " [-b memory] [-s type] [-v vcpus] [-o]\n", name);420420 guest_modes_help();421421 printf(" -u: use userfaultfd to handle vCPU page faults. Mode is a\n"422422 " UFFD registration mode: 'MISSING' or 'MINOR'.\n");···426426 printf(" -b: specify the size of the memory region which should be\n"427427 " demand paged by each vCPU. e.g. 10M or 3G.\n"428428 " Default: 1G\n");429429- printf(" -t: The type of backing memory to use. Default: anonymous\n");430430- backing_src_help();429429+ backing_src_help("-s");431430 printf(" -v: specify the number of vCPUs to run.\n");432431 printf(" -o: Overlap guest memory accesses instead of partitioning\n"433432 " them into a separate region of memory for each vCPU.\n");···438439{439440 int max_vcpus = kvm_check_cap(KVM_CAP_MAX_VCPUS);440441 struct test_params p = {441441- .src_type = VM_MEM_SRC_ANONYMOUS,442442+ .src_type = DEFAULT_VM_MEM_SRC,442443 .partition_vcpu_memory_access = true,443444 };444445 int opt;445446446447 guest_modes_append_default();447448448448- while ((opt = getopt(argc, argv, "hm:u:d:b:t:v:o")) != -1) {449449+ while ((opt = getopt(argc, argv, "hm:u:d:b:s:v:o")) != -1) {449450 switch (opt) {450451 case 'm':451452 guest_modes_cmdline(optarg);···464465 case 'b':465466 guest_percpu_mem_size = parse_size(optarg);466467 break;467467- case 't':468468+ case 's':468469 p.src_type = parse_backing_src_type(optarg);469470 break;470471 case 'v':···484485485486 if (p.uffd_mode == UFFDIO_REGISTER_MODE_MINOR &&486487 !backing_src_is_shared(p.src_type)) {487487- TEST_FAIL("userfaultfd MINOR mode requires shared memory; pick a different -t");488488+ TEST_FAIL("userfaultfd MINOR mode requires shared memory; pick a different -s");488489 }489490490491 for_each_guest_mode(run_test, &p);
+42-20
tools/testing/selftests/kvm/dirty_log_perf_test.c
···118118 toggle_dirty_logging(vm, slots, false);119119}120120121121-static void get_dirty_log(struct kvm_vm *vm, int slots, unsigned long *bitmap,122122- uint64_t nr_pages)121121+static void get_dirty_log(struct kvm_vm *vm, unsigned long *bitmaps[], int slots)123122{124124- uint64_t slot_pages = nr_pages / slots;125123 int i;126124127125 for (i = 0; i < slots; i++) {128126 int slot = PERF_TEST_MEM_SLOT_INDEX + i;129129- unsigned long *slot_bitmap = bitmap + i * slot_pages;130127131131- kvm_vm_get_dirty_log(vm, slot, slot_bitmap);128128+ kvm_vm_get_dirty_log(vm, slot, bitmaps[i]);132129 }133130}134131135135-static void clear_dirty_log(struct kvm_vm *vm, int slots, unsigned long *bitmap,136136- uint64_t nr_pages)132132+static void clear_dirty_log(struct kvm_vm *vm, unsigned long *bitmaps[],133133+ int slots, uint64_t pages_per_slot)137134{138138- uint64_t slot_pages = nr_pages / slots;139135 int i;140136141137 for (i = 0; i < slots; i++) {142138 int slot = PERF_TEST_MEM_SLOT_INDEX + i;143143- unsigned long *slot_bitmap = bitmap + i * slot_pages;144139145145- kvm_vm_clear_dirty_log(vm, slot, slot_bitmap, 0, slot_pages);140140+ kvm_vm_clear_dirty_log(vm, slot, bitmaps[i], 0, pages_per_slot);146141 }142142+}143143+144144+static unsigned long **alloc_bitmaps(int slots, uint64_t pages_per_slot)145145+{146146+ unsigned long **bitmaps;147147+ int i;148148+149149+ bitmaps = malloc(slots * sizeof(bitmaps[0]));150150+ TEST_ASSERT(bitmaps, "Failed to allocate bitmaps array.");151151+152152+ for (i = 0; i < slots; i++) {153153+ bitmaps[i] = bitmap_zalloc(pages_per_slot);154154+ TEST_ASSERT(bitmaps[i], "Failed to allocate slot bitmap.");155155+ }156156+157157+ return bitmaps;158158+}159159+160160+static void free_bitmaps(unsigned long *bitmaps[], int slots)161161+{162162+ int i;163163+164164+ for (i = 0; i < slots; i++)165165+ free(bitmaps[i]);166166+167167+ free(bitmaps);147168}148169149170static void run_test(enum vm_guest_mode mode, void *arg)···172151 struct test_params *p = arg;173152 pthread_t *vcpu_threads;174153 struct kvm_vm *vm;175175- unsigned long *bmap;154154+ unsigned long **bitmaps;176155 uint64_t guest_num_pages;177156 uint64_t host_num_pages;157157+ uint64_t pages_per_slot;178158 int vcpu_id;179159 struct timespec start;180160 struct timespec ts_diff;···193171 guest_num_pages = (nr_vcpus * guest_percpu_mem_size) >> vm_get_page_shift(vm);194172 guest_num_pages = vm_adjust_num_guest_pages(mode, guest_num_pages);195173 host_num_pages = vm_num_host_pages(mode, guest_num_pages);196196- bmap = bitmap_zalloc(host_num_pages);174174+ pages_per_slot = host_num_pages / p->slots;175175+176176+ bitmaps = alloc_bitmaps(p->slots, pages_per_slot);197177198178 if (dirty_log_manual_caps) {199179 cap.cap = KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2;···263239 iteration, ts_diff.tv_sec, ts_diff.tv_nsec);264240265241 clock_gettime(CLOCK_MONOTONIC, &start);266266- get_dirty_log(vm, p->slots, bmap, host_num_pages);242242+ get_dirty_log(vm, bitmaps, p->slots);267243 ts_diff = timespec_elapsed(start);268244 get_dirty_log_total = timespec_add(get_dirty_log_total,269245 ts_diff);···272248273249 if (dirty_log_manual_caps) {274250 clock_gettime(CLOCK_MONOTONIC, &start);275275- clear_dirty_log(vm, p->slots, bmap, host_num_pages);251251+ clear_dirty_log(vm, bitmaps, p->slots, pages_per_slot);276252 ts_diff = timespec_elapsed(start);277253 clear_dirty_log_total = timespec_add(clear_dirty_log_total,278254 ts_diff);···305281 clear_dirty_log_total.tv_nsec, avg.tv_sec, avg.tv_nsec);306282 }307283308308- free(bmap);284284+ free_bitmaps(bitmaps, p->slots);309285 free(vcpu_threads);310286 perf_test_destroy_vm(vm);311287}···332308 printf(" -v: specify the number of vCPUs to run.\n");333309 printf(" -o: Overlap guest memory accesses instead of partitioning\n"334310 " them into a separate region of memory for each vCPU.\n");335335- printf(" -s: specify the type of memory that should be used to\n"336336- " back the guest data region.\n\n");311311+ backing_src_help("-s");337312 printf(" -x: Split the memory region into this number of memslots.\n"338338- " (default: 1)");339339- backing_src_help();313313+ " (default: 1)\n");340314 puts("");341315 exit(0);342316}···346324 .iterations = TEST_HOST_LOOP_N,347325 .wr_fract = 1,348326 .partition_vcpu_memory_access = true,349349- .backing_src = VM_MEM_SRC_ANONYMOUS,327327+ .backing_src = DEFAULT_VM_MEM_SRC,350328 .slots = 1,351329 };352330 int opt;
···456456 " (default: 1G)\n");457457 printf(" -v: specify the number of vCPUs to run\n"458458 " (default: 1)\n");459459- printf(" -s: specify the type of memory that should be used to\n"460460- " back the guest data region.\n"461461- " (default: anonymous)\n\n");462462- backing_src_help();459459+ backing_src_help("-s");463460 puts("");464461}465462···465468 int max_vcpus = kvm_check_cap(KVM_CAP_MAX_VCPUS);466469 struct test_params p = {467470 .test_mem_size = DEFAULT_TEST_MEM_SIZE,468468- .src_type = VM_MEM_SRC_ANONYMOUS,471471+ .src_type = DEFAULT_VM_MEM_SRC,469472 };470473 int opt;471474
+13-4
tools/testing/selftests/kvm/lib/test_util.c
···283283 }284284}285285286286-void backing_src_help(void)286286+static void print_available_backing_src_types(const char *prefix)287287{288288 int i;289289290290- printf("Available backing src types:\n");290290+ printf("%sAvailable backing src types:\n", prefix);291291+291292 for (i = 0; i < NUM_SRC_TYPES; i++)292292- printf("\t%s\n", vm_mem_backing_src_alias(i)->name);293293+ printf("%s %s\n", prefix, vm_mem_backing_src_alias(i)->name);294294+}295295+296296+void backing_src_help(const char *flag)297297+{298298+ printf(" %s: specify the type of memory that should be used to\n"299299+ " back the guest data region. (default: %s)\n",300300+ flag, vm_mem_backing_src_alias(DEFAULT_VM_MEM_SRC)->name);301301+ print_available_backing_src_types(" ");293302}294303295304enum vm_mem_backing_src_type parse_backing_src_type(const char *type_name)···309300 if (!strcmp(type_name, vm_mem_backing_src_alias(i)->name))310301 return i;311302312312- backing_src_help();303303+ print_available_backing_src_types("");313304 TEST_FAIL("Unknown backing src type: %s", type_name);314305 return -1;315306}
+60-10
tools/testing/selftests/kvm/rseq_test.c
···1010#include <signal.h>1111#include <syscall.h>1212#include <sys/ioctl.h>1313+#include <sys/sysinfo.h>1314#include <asm/barrier.h>1415#include <linux/atomic.h>1516#include <linux/rseq.h>···40394140static pthread_t migration_thread;4241static cpu_set_t possible_mask;4242+static int min_cpu, max_cpu;4343static bool done;44444545static atomic_t seq_cnt;···5957 TEST_ASSERT(!r, "rseq failed, errno = %d (%s)", errno, strerror(errno));6058}61596060+static int next_cpu(int cpu)6161+{6262+ /*6363+ * Advance to the next CPU, skipping those that weren't in the original6464+ * affinity set. Sadly, there is no CPU_SET_FOR_EACH, and cpu_set_t's6565+ * data storage is considered as opaque. Note, if this task is pinned6666+ * to a small set of discontigous CPUs, e.g. 2 and 1023, this loop will6767+ * burn a lot cycles and the test will take longer than normal to6868+ * complete.6969+ */7070+ do {7171+ cpu++;7272+ if (cpu > max_cpu) {7373+ cpu = min_cpu;7474+ TEST_ASSERT(CPU_ISSET(cpu, &possible_mask),7575+ "Min CPU = %d must always be usable", cpu);7676+ break;7777+ }7878+ } while (!CPU_ISSET(cpu, &possible_mask));7979+8080+ return cpu;8181+}8282+6283static void *migration_worker(void *ign)6384{6485 cpu_set_t allowed_mask;6565- int r, i, nr_cpus, cpu;8686+ int r, i, cpu;66876788 CPU_ZERO(&allowed_mask);68896969- nr_cpus = CPU_COUNT(&possible_mask);7070-7171- for (i = 0; i < NR_TASK_MIGRATIONS; i++) {7272- cpu = i % nr_cpus;7373- if (!CPU_ISSET(cpu, &possible_mask))7474- continue;7575-9090+ for (i = 0, cpu = min_cpu; i < NR_TASK_MIGRATIONS; i++, cpu = next_cpu(cpu)) {7691 CPU_SET(cpu, &allowed_mask);77927893 /*···173154 return NULL;174155}175156157157+static int calc_min_max_cpu(void)158158+{159159+ int i, cnt, nproc;160160+161161+ if (CPU_COUNT(&possible_mask) < 2)162162+ return -EINVAL;163163+164164+ /*165165+ * CPU_SET doesn't provide a FOR_EACH helper, get the min/max CPU that166166+ * this task is affined to in order to reduce the time spent querying167167+ * unusable CPUs, e.g. if this task is pinned to a small percentage of168168+ * total CPUs.169169+ */170170+ nproc = get_nprocs_conf();171171+ min_cpu = -1;172172+ max_cpu = -1;173173+ cnt = 0;174174+175175+ for (i = 0; i < nproc; i++) {176176+ if (!CPU_ISSET(i, &possible_mask))177177+ continue;178178+ if (min_cpu == -1)179179+ min_cpu = i;180180+ max_cpu = i;181181+ cnt++;182182+ }183183+184184+ return (cnt < 2) ? -EINVAL : 0;185185+}186186+176187int main(int argc, char *argv[])177188{178189 int r, i, snapshot;···216167 TEST_ASSERT(!r, "sched_getaffinity failed, errno = %d (%s)", errno,217168 strerror(errno));218169219219- if (CPU_COUNT(&possible_mask) < 2) {220220- print_skip("Only one CPU, task migration not possible\n");170170+ if (calc_min_max_cpu()) {171171+ print_skip("Only one usable CPU, task migration not possible");221172 exit(KSFT_SKIP);222173 }223174···229180 * CPU affinity.230181 */231182 vm = vm_create_default(VCPU_ID, 0, guest_code);183183+ ucall_init(vm, NULL);232184233185 pthread_create(&migration_thread, NULL, migration_worker, 0);234186
···11+#!/bin/bash22+33+# Test insertion speed for packets with identical addresses/ports44+# that are all placed in distinct conntrack zones.55+66+sfx=$(mktemp -u "XXXXXXXX")77+ns="ns-$sfx"88+99+# Kselftest framework requirement - SKIP code is 4.1010+ksft_skip=41111+1212+zones=200001313+have_ct_tool=01414+ret=01515+1616+cleanup()1717+{1818+ ip netns del $ns1919+}2020+2121+ip netns add $ns2222+if [ $? -ne 0 ];then2323+ echo "SKIP: Could not create net namespace $gw"2424+ exit $ksft_skip2525+fi2626+2727+trap cleanup EXIT2828+2929+conntrack -V > /dev/null 2>&13030+if [ $? -eq 0 ];then3131+ have_ct_tool=13232+fi3333+3434+ip -net "$ns" link set lo up3535+3636+test_zones() {3737+ local max_zones=$13838+3939+ip netns exec $ns sysctl -q net.netfilter.nf_conntrack_udp_timeout=36004040+ip netns exec $ns nft -f /dev/stdin<<EOF4141+flush ruleset4242+table inet raw {4343+ map rndzone {4444+ typeof numgen inc mod $max_zones : ct zone4545+ }4646+4747+ chain output {4848+ type filter hook output priority -64000; policy accept;4949+ udp dport 12345 ct zone set numgen inc mod 65536 map @rndzone5050+ }5151+}5252+EOF5353+ (5454+ echo "add element inet raw rndzone {"5555+ for i in $(seq 1 $max_zones);do5656+ echo -n "$i : $i"5757+ if [ $i -lt $max_zones ]; then5858+ echo ","5959+ else6060+ echo "}"6161+ fi6262+ done6363+ ) | ip netns exec $ns nft -f /dev/stdin6464+6565+ local i=06666+ local j=06767+ local outerstart=$(date +%s%3N)6868+ local stop=$outerstart6969+7070+ while [ $i -lt $max_zones ]; do7171+ local start=$(date +%s%3N)7272+ i=$((i + 10000))7373+ j=$((j + 1))7474+ dd if=/dev/zero of=/dev/stdout bs=8k count=10000 2>/dev/null | ip netns exec "$ns" nc -w 1 -q 1 -u -p 12345 127.0.0.1 12345 > /dev/null7575+ if [ $? -ne 0 ] ;then7676+ ret=17777+ break7878+ fi7979+8080+ stop=$(date +%s%3N)8181+ local duration=$((stop-start))8282+ echo "PASS: added 10000 entries in $duration ms (now $i total, loop $j)"8383+ done8484+8585+ if [ $have_ct_tool -eq 1 ]; then8686+ local count=$(ip netns exec "$ns" conntrack -C)8787+ local duration=$((stop-outerstart))8888+8989+ if [ $count -eq $max_zones ]; then9090+ echo "PASS: inserted $count entries from packet path in $duration ms total"9191+ else9292+ ip netns exec $ns conntrack -S 1>&29393+ echo "FAIL: inserted $count entries from packet path in $duration ms total, expected $max_zones entries"9494+ ret=19595+ fi9696+ fi9797+9898+ if [ $ret -ne 0 ];then9999+ echo "FAIL: insert $max_zones entries from packet path" 1>&2100100+ fi101101+}102102+103103+test_conntrack_tool() {104104+ local max_zones=$1105105+106106+ ip netns exec $ns conntrack -F >/dev/null 2>/dev/null107107+108108+ local outerstart=$(date +%s%3N)109109+ local start=$(date +%s%3N)110110+ local stop=$start111111+ local i=0112112+ while [ $i -lt $max_zones ]; do113113+ i=$((i + 1))114114+ ip netns exec "$ns" conntrack -I -s 1.1.1.1 -d 2.2.2.2 --protonum 6 \115115+ --timeout 3600 --state ESTABLISHED --sport 12345 --dport 1000 --zone $i >/dev/null 2>&1116116+ if [ $? -ne 0 ];then117117+ ip netns exec "$ns" conntrack -I -s 1.1.1.1 -d 2.2.2.2 --protonum 6 \118118+ --timeout 3600 --state ESTABLISHED --sport 12345 --dport 1000 --zone $i > /dev/null119119+ echo "FAIL: conntrack -I returned an error"120120+ ret=1121121+ break122122+ fi123123+124124+ if [ $((i%10000)) -eq 0 ];then125125+ stop=$(date +%s%3N)126126+127127+ local duration=$((stop-start))128128+ echo "PASS: added 10000 entries in $duration ms (now $i total)"129129+ start=$stop130130+ fi131131+ done132132+133133+ local count=$(ip netns exec "$ns" conntrack -C)134134+ local duration=$((stop-outerstart))135135+136136+ if [ $count -eq $max_zones ]; then137137+ echo "PASS: inserted $count entries via ctnetlink in $duration ms"138138+ else139139+ ip netns exec $ns conntrack -S 1>&2140140+ echo "FAIL: inserted $count entries via ctnetlink in $duration ms, expected $max_zones entries ($duration ms)"141141+ ret=1142142+ fi143143+}144144+145145+test_zones $zones146146+147147+if [ $have_ct_tool -eq 1 ];then148148+ test_conntrack_tool $zones149149+else150150+ echo "SKIP: Could not run ctnetlink insertion test without conntrack tool"151151+ if [ $ret -eq 0 ];then152152+ exit $ksft_skip153153+ fi154154+fi155155+156156+exit $ret
+49-19
virt/kvm/kvm_main.c
···235235{236236}237237238238-static inline bool kvm_kick_many_cpus(const struct cpumask *cpus, bool wait)238238+static inline bool kvm_kick_many_cpus(cpumask_var_t tmp, bool wait)239239{240240- if (unlikely(!cpus))240240+ const struct cpumask *cpus;241241+242242+ if (likely(cpumask_available(tmp)))243243+ cpus = tmp;244244+ else241245 cpus = cpu_online_mask;242246243247 if (cpumask_empty(cpus))···267263 continue;268264269265 kvm_make_request(req, vcpu);270270- cpu = vcpu->cpu;271266272267 if (!(req & KVM_REQUEST_NO_WAKEUP) && kvm_vcpu_wake_up(vcpu))273268 continue;274269275275- if (tmp != NULL && cpu != -1 && cpu != me &&276276- kvm_request_needs_ipi(vcpu, req))277277- __cpumask_set_cpu(cpu, tmp);270270+ /*271271+ * tmp can be "unavailable" if cpumasks are allocated off stack272272+ * as allocation of the mask is deliberately not fatal and is273273+ * handled by falling back to kicking all online CPUs.274274+ */275275+ if (!cpumask_available(tmp))276276+ continue;277277+278278+ /*279279+ * Note, the vCPU could get migrated to a different pCPU at any280280+ * point after kvm_request_needs_ipi(), which could result in281281+ * sending an IPI to the previous pCPU. But, that's ok because282282+ * the purpose of the IPI is to ensure the vCPU returns to283283+ * OUTSIDE_GUEST_MODE, which is satisfied if the vCPU migrates.284284+ * Entering READING_SHADOW_PAGE_TABLES after this point is also285285+ * ok, as the requirement is only that KVM wait for vCPUs that286286+ * were reading SPTEs _before_ any changes were finalized. See287287+ * kvm_vcpu_kick() for more details on handling requests.288288+ */289289+ if (kvm_request_needs_ipi(vcpu, req)) {290290+ cpu = READ_ONCE(vcpu->cpu);291291+ if (cpu != -1 && cpu != me)292292+ __cpumask_set_cpu(cpu, tmp);293293+ }278294 }279295280296 called = kvm_kick_many_cpus(tmp, !!(req & KVM_REQUEST_WAIT));···326302#ifndef CONFIG_HAVE_KVM_ARCH_TLB_FLUSH_ALL327303void kvm_flush_remote_tlbs(struct kvm *kvm)328304{329329- /*330330- * Read tlbs_dirty before setting KVM_REQ_TLB_FLUSH in331331- * kvm_make_all_cpus_request.332332- */333333- long dirty_count = smp_load_acquire(&kvm->tlbs_dirty);334334-335305 ++kvm->stat.generic.remote_tlb_flush_requests;306306+336307 /*337308 * We want to publish modifications to the page tables before reading338309 * mode. Pairs with a memory barrier in arch-specific code.···342323 if (!kvm_arch_flush_remote_tlb(kvm)343324 || kvm_make_all_cpus_request(kvm, KVM_REQ_TLB_FLUSH))344325 ++kvm->stat.generic.remote_tlb_flush;345345- cmpxchg(&kvm->tlbs_dirty, dirty_count, 0);346326}347327EXPORT_SYMBOL_GPL(kvm_flush_remote_tlbs);348328#endif···546528 }547529 }548530549549- if (range->flush_on_ret && (ret || kvm->tlbs_dirty))531531+ if (range->flush_on_ret && ret)550532 kvm_flush_remote_tlbs(kvm);551533552534 if (locked)···3152313431533135static void shrink_halt_poll_ns(struct kvm_vcpu *vcpu)31543136{31553155- unsigned int old, val, shrink;31373137+ unsigned int old, val, shrink, grow_start;3156313831573139 old = val = vcpu->halt_poll_ns;31583140 shrink = READ_ONCE(halt_poll_ns_shrink);31413141+ grow_start = READ_ONCE(halt_poll_ns_grow_start);31593142 if (shrink == 0)31603143 val = 0;31613144 else31623145 val /= shrink;31463146+31473147+ if (val < grow_start)31483148+ val = 0;3163314931643150 vcpu->halt_poll_ns = val;31653151 trace_kvm_halt_poll_ns_shrink(vcpu->vcpu_id, val, old);···33123290 */33133291void kvm_vcpu_kick(struct kvm_vcpu *vcpu)33143292{33153315- int me;33163316- int cpu = vcpu->cpu;32933293+ int me, cpu;3317329433183295 if (kvm_vcpu_wake_up(vcpu))33193296 return;3320329732983298+ /*32993299+ * Note, the vCPU could get migrated to a different pCPU at any point33003300+ * after kvm_arch_vcpu_should_kick(), which could result in sending an33013301+ * IPI to the previous pCPU. But, that's ok because the purpose of the33023302+ * IPI is to force the vCPU to leave IN_GUEST_MODE, and migrating the33033303+ * vCPU also requires it to leave IN_GUEST_MODE.33043304+ */33213305 me = get_cpu();33223322- if (cpu != me && (unsigned)cpu < nr_cpu_ids && cpu_online(cpu))33233323- if (kvm_arch_vcpu_should_kick(vcpu))33063306+ if (kvm_arch_vcpu_should_kick(vcpu)) {33073307+ cpu = READ_ONCE(vcpu->cpu);33083308+ if (cpu != me && (unsigned)cpu < nr_cpu_ids && cpu_online(cpu))33243309 smp_send_reschedule(cpu);33103310+ }33253311 put_cpu();33263312}33273313EXPORT_SYMBOL_GPL(kvm_vcpu_kick);