···6060Atish Patra <atishp@atishpatra.org> <atish.patra@wdc.com>6161Axel Dyks <xl@xlsigned.net>6262Axel Lin <axel.lin@gmail.com>6363+Baolin Wang <baolin.wang@linux.alibaba.com> <baolin.wang@linaro.org>6464+Baolin Wang <baolin.wang@linux.alibaba.com> <baolin.wang@spreadtrum.com>6565+Baolin Wang <baolin.wang@linux.alibaba.com> <baolin.wang@unisoc.com>6666+Baolin Wang <baolin.wang@linux.alibaba.com> <baolin.wang7@gmail.com>6367Bart Van Assche <bvanassche@acm.org> <bart.vanassche@sandisk.com>6468Bart Van Assche <bvanassche@acm.org> <bart.vanassche@wdc.com>6569Ben Gardner <bgardner@wabtec.com>
+2
Documentation/admin-guide/kernel-parameters.txt
···31763176 no_entry_flush [PPC]31773177 no_uaccess_flush [PPC]31783178 mmio_stale_data=off [X86]31793179+ retbleed=off [X86]3179318031803181 Exceptions:31813182 This does not have any effect on···31993198 mds=full,nosmt [X86]32003199 tsx_async_abort=full,nosmt [X86]32013200 mmio_stale_data=full,nosmt [X86]32013201+ retbleed=auto,nosmt [X86]3202320232033203 mminit_loglevel=32043204 [KNL] When CONFIG_DEBUG_MEMORY_INIT is set, this
···183183 Should specify the gpio for phy reset.184184185185 phy-reset-duration:186186+ $ref: /schemas/types.yaml#/definitions/uint32186187 deprecated: true187188 description:188189 Reset duration in milliseconds. Should present only if property···192191 and 1 millisecond will be used instead.193192194193 phy-reset-active-high:194194+ type: boolean195195 deprecated: true196196 description:197197 If present then the reset sequence using the GPIO specified in the198198 "phy-reset-gpios" property is reversed (H=reset state, L=operation state).199199200200 phy-reset-post-delay:201201+ $ref: /schemas/types.yaml#/definitions/uint32201202 deprecated: true202203 description:203204 Post reset delay in milliseconds. If present then a delay of phy-reset-post-delay
+8-1
Documentation/networking/ip-sysctl.rst
···28662866 Default: 4K2867286728682868sctp_wmem - vector of 3 INTEGERs: min, default, max28692869- Currently this tunable has no effect.28692869+ Only the first value ("min") is used, "default" and "max" are28702870+ ignored.28712871+28722872+ min: Minimum size of send buffer that can be used by SCTP sockets.28732873+ It is guaranteed to each SCTP socket (but not association) even28742874+ under moderate memory pressure.28752875+28762876+ Default: 4K2870287728712878addr_scope_policy - INTEGER28722879 Control IPv4 address scoping - draft-stewart-tsvwg-sctp-ipv4-00
···2323static __always_inline void prepare_frametrace(struct pt_regs *regs)2424{2525 __asm__ __volatile__(2626- /* Save $r1 */2626+ /* Save $ra */2727 STORE_ONE_REG(1)2828- /* Use $r1 to save PC */2929- "pcaddi $r1, 0\n\t"3030- STR_LONG_S " $r1, %0\n\t"3131- /* Restore $r1 */3232- STR_LONG_L " $r1, %1, "STR_LONGSIZE"\n\t"2828+ /* Use $ra to save PC */2929+ "pcaddi $ra, 0\n\t"3030+ STR_LONG_S " $ra, %0\n\t"3131+ /* Restore $ra */3232+ STR_LONG_L " $ra, %1, "STR_LONGSIZE"\n\t"3333 STORE_ONE_REG(2)3434 STORE_ONE_REG(3)3535 STORE_ONE_REG(4)
+2-2
arch/loongarch/include/asm/thread_info.h
···4444}45454646/* How to get the thread information struct from C. */4747-register struct thread_info *__current_thread_info __asm__("$r2");4747+register struct thread_info *__current_thread_info __asm__("$tp");48484949static inline struct thread_info *current_thread_info(void)5050{5151 return __current_thread_info;5252}53535454-register unsigned long current_stack_pointer __asm__("$r3");5454+register unsigned long current_stack_pointer __asm__("$sp");55555656#endif /* !__ASSEMBLY__ */5757
···3232 /* We might not get launched at the address the kernel is linked to,3333 so we jump there. */3434 la.abs t0, 0f3535- jirl zero, t0, 03535+ jr t036360:3737 la t0, __bss_start # clear .bss3838 st.d zero, t0, 0···5050 /* KSave3 used for percpu base, initialized as 0 */5151 csrwr zero, PERCPU_BASE_KS5252 /* GPR21 used for percpu base (runtime), initialized as 0 */5353- or u0, zero, zero5353+ move u0, zero54545555 la tp, init_thread_union5656 /* Set the SP after an empty pt_regs. */···8585 ld.d sp, t0, CPU_BOOT_STACK8686 ld.d tp, t0, CPU_BOOT_TINFO87878888- la.abs t0, 0f8989- jirl zero, t0, 08888+ la.abs t0, 0f8989+ jr t090900:9191 bl start_secondary9292SYM_CODE_END(smpboot_entry)
···103103104104 dmi_memdev_name(handle, &bank, &device);105105106106- /* both strings must be non-zero */107107- if (bank && *bank && device && *device)108108- snprintf(dimm->label, sizeof(dimm->label), "%s %s", bank, device);106106+ /*107107+ * Set to a NULL string when both bank and device are zero. In this case,108108+ * the label assigned by default will be preserved.109109+ */110110+ snprintf(dimm->label, sizeof(dimm->label), "%s%s%s",111111+ (bank && *bank) ? bank : "",112112+ (bank && *bank && device && *device) ? " " : "",113113+ (device && *device) ? device : "");109114}110115111116static void assign_dmi_dimm_info(struct dimm_info *dimm, struct memdev_dmi_entry *entry)
···66 bool "AMD DC - Enable new display engine"77 default y88 select SND_HDA_COMPONENT if SND_HDA_CORE99- select DRM_AMD_DC_DCN if X86 && !(KCOV_INSTRUMENT_ALL && KCOV_ENABLE_COMPARISONS)99+ select DRM_AMD_DC_DCN if (X86 || PPC_LONG_DOUBLE_128) && !(KCOV_INSTRUMENT_ALL && KCOV_ENABLE_COMPARISONS)1010 help1111 Choose this option if you want to use the new display engine1212 support for AMDGPU. This adds required support for Vega and
···12821282 intel_uncore_write_fw(uncore, mode, _MASKED_BIT_ENABLE(STOP_RING));1283128312841284 /*12851285- * Wa_22011802037 : gen12, Prior to doing a reset, ensure CS is12851285+ * Wa_22011802037 : gen11, gen12, Prior to doing a reset, ensure CS is12861286 * stopped, set ring stop bit and prefetch disable bit to halt CS12871287 */12881288- if (GRAPHICS_VER(engine->i915) == 12)12881288+ if (IS_GRAPHICS_VER(engine->i915, 11, 12))12891289 intel_uncore_write_fw(uncore, RING_MODE_GEN7(engine->mmio_base),12901290 _MASKED_BIT_ENABLE(GEN12_GFX_PREFETCH_DISABLE));12911291···13081308 return -ENODEV;1309130913101310 ENGINE_TRACE(engine, "\n");13111311+ /*13121312+ * TODO: Find out why occasionally stopping the CS times out. Seen13131313+ * especially with gem_eio tests.13141314+ *13151315+ * Occasionally trying to stop the cs times out, but does not adversely13161316+ * affect functionality. The timeout is set as a config parameter that13171317+ * defaults to 100ms. In most cases the follow up operation is to wait13181318+ * for pending MI_FORCE_WAKES. The assumption is that this timeout is13191319+ * sufficient for any pending MI_FORCEWAKEs to complete. Once root13201320+ * caused, the caller must check and handle the return from this13211321+ * function.13221322+ */13111323 if (__intel_engine_stop_cs(engine, 1000, stop_timeout(engine))) {13121324 ENGINE_TRACE(engine,13131325 "timed out on STOP_RING -> IDLE; HEAD:%04x, TAIL:%04x\n",···13441332 ENGINE_TRACE(engine, "\n");1345133313461334 ENGINE_WRITE_FW(engine, RING_MI_MODE, _MASKED_BIT_DISABLE(STOP_RING));13351335+}13361336+13371337+static u32 __cs_pending_mi_force_wakes(struct intel_engine_cs *engine)13381338+{13391339+ static const i915_reg_t _reg[I915_NUM_ENGINES] = {13401340+ [RCS0] = MSG_IDLE_CS,13411341+ [BCS0] = MSG_IDLE_BCS,13421342+ [VCS0] = MSG_IDLE_VCS0,13431343+ [VCS1] = MSG_IDLE_VCS1,13441344+ [VCS2] = MSG_IDLE_VCS2,13451345+ [VCS3] = MSG_IDLE_VCS3,13461346+ [VCS4] = MSG_IDLE_VCS4,13471347+ [VCS5] = MSG_IDLE_VCS5,13481348+ [VCS6] = MSG_IDLE_VCS6,13491349+ [VCS7] = MSG_IDLE_VCS7,13501350+ [VECS0] = MSG_IDLE_VECS0,13511351+ [VECS1] = MSG_IDLE_VECS1,13521352+ [VECS2] = MSG_IDLE_VECS2,13531353+ [VECS3] = MSG_IDLE_VECS3,13541354+ [CCS0] = MSG_IDLE_CS,13551355+ [CCS1] = MSG_IDLE_CS,13561356+ [CCS2] = MSG_IDLE_CS,13571357+ [CCS3] = MSG_IDLE_CS,13581358+ };13591359+ u32 val;13601360+13611361+ if (!_reg[engine->id].reg) {13621362+ drm_err(&engine->i915->drm,13631363+ "MSG IDLE undefined for engine id %u\n", engine->id);13641364+ return 0;13651365+ }13661366+13671367+ val = intel_uncore_read(engine->uncore, _reg[engine->id]);13681368+13691369+ /* bits[29:25] & bits[13:9] >> shift */13701370+ return (val & (val >> 16) & MSG_IDLE_FW_MASK) >> MSG_IDLE_FW_SHIFT;13711371+}13721372+13731373+static void __gpm_wait_for_fw_complete(struct intel_gt *gt, u32 fw_mask)13741374+{13751375+ int ret;13761376+13771377+ /* Ensure GPM receives fw up/down after CS is stopped */13781378+ udelay(1);13791379+13801380+ /* Wait for forcewake request to complete in GPM */13811381+ ret = __intel_wait_for_register_fw(gt->uncore,13821382+ GEN9_PWRGT_DOMAIN_STATUS,13831383+ fw_mask, fw_mask, 5000, 0, NULL);13841384+13851385+ /* Ensure CS receives fw ack from GPM */13861386+ udelay(1);13871387+13881388+ if (ret)13891389+ GT_TRACE(gt, "Failed to complete pending forcewake %d\n", ret);13901390+}13911391+13921392+/*13931393+ * Wa_22011802037:gen12: In addition to stopping the cs, we need to wait for any13941394+ * pending MI_FORCE_WAKEUP requests that the CS has initiated to complete. The13951395+ * pending status is indicated by bits[13:9] (masked by bits[29:25]) in the13961396+ * MSG_IDLE register. There's one MSG_IDLE register per reset domain. Since we13971397+ * are concerned only with the gt reset here, we use a logical OR of pending13981398+ * forcewakeups from all reset domains and then wait for them to complete by13991399+ * querying PWRGT_DOMAIN_STATUS.14001400+ */14011401+void intel_engine_wait_for_pending_mi_fw(struct intel_engine_cs *engine)14021402+{14031403+ u32 fw_pending = __cs_pending_mi_force_wakes(engine);14041404+14051405+ if (fw_pending)14061406+ __gpm_wait_for_fw_complete(engine->gt, fw_pending);13471407}1348140813491409static u32
···29682968 ring_set_paused(engine, 1);29692969 intel_engine_stop_cs(engine);2970297029712971+ /*29722972+ * Wa_22011802037:gen11/gen12: In addition to stopping the cs, we need29732973+ * to wait for any pending mi force wakeups29742974+ */29752975+ if (IS_GRAPHICS_VER(engine->i915, 11, 12))29762976+ intel_engine_wait_for_pending_mi_fw(engine);29772977+29712978 engine->execlists.reset_ccid = active_ccid(engine);29722979}29732980
+2-2
drivers/gpu/drm/i915/gt/uc/intel_guc.c
···310310 if (IS_DG2(gt->i915))311311 flags |= GUC_WA_DUAL_QUEUE;312312313313- /* Wa_22011802037: graphics version 12 */314314- if (GRAPHICS_VER(gt->i915) == 12)313313+ /* Wa_22011802037: graphics version 11/12 */314314+ if (IS_GRAPHICS_VER(gt->i915, 11, 12))315315 flags |= GUC_WA_PRE_PARSER;316316317317 /* Wa_16011777198:dg2 */
+6-75
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
···15781578 lrc_update_regs(ce, engine, head);15791579}1580158015811581-static u32 __cs_pending_mi_force_wakes(struct intel_engine_cs *engine)15821582-{15831583- static const i915_reg_t _reg[I915_NUM_ENGINES] = {15841584- [RCS0] = MSG_IDLE_CS,15851585- [BCS0] = MSG_IDLE_BCS,15861586- [VCS0] = MSG_IDLE_VCS0,15871587- [VCS1] = MSG_IDLE_VCS1,15881588- [VCS2] = MSG_IDLE_VCS2,15891589- [VCS3] = MSG_IDLE_VCS3,15901590- [VCS4] = MSG_IDLE_VCS4,15911591- [VCS5] = MSG_IDLE_VCS5,15921592- [VCS6] = MSG_IDLE_VCS6,15931593- [VCS7] = MSG_IDLE_VCS7,15941594- [VECS0] = MSG_IDLE_VECS0,15951595- [VECS1] = MSG_IDLE_VECS1,15961596- [VECS2] = MSG_IDLE_VECS2,15971597- [VECS3] = MSG_IDLE_VECS3,15981598- [CCS0] = MSG_IDLE_CS,15991599- [CCS1] = MSG_IDLE_CS,16001600- [CCS2] = MSG_IDLE_CS,16011601- [CCS3] = MSG_IDLE_CS,16021602- };16031603- u32 val;16041604-16051605- if (!_reg[engine->id].reg)16061606- return 0;16071607-16081608- val = intel_uncore_read(engine->uncore, _reg[engine->id]);16091609-16101610- /* bits[29:25] & bits[13:9] >> shift */16111611- return (val & (val >> 16) & MSG_IDLE_FW_MASK) >> MSG_IDLE_FW_SHIFT;16121612-}16131613-16141614-static void __gpm_wait_for_fw_complete(struct intel_gt *gt, u32 fw_mask)16151615-{16161616- int ret;16171617-16181618- /* Ensure GPM receives fw up/down after CS is stopped */16191619- udelay(1);16201620-16211621- /* Wait for forcewake request to complete in GPM */16221622- ret = __intel_wait_for_register_fw(gt->uncore,16231623- GEN9_PWRGT_DOMAIN_STATUS,16241624- fw_mask, fw_mask, 5000, 0, NULL);16251625-16261626- /* Ensure CS receives fw ack from GPM */16271627- udelay(1);16281628-16291629- if (ret)16301630- GT_TRACE(gt, "Failed to complete pending forcewake %d\n", ret);16311631-}16321632-16331633-/*16341634- * Wa_22011802037:gen12: In addition to stopping the cs, we need to wait for any16351635- * pending MI_FORCE_WAKEUP requests that the CS has initiated to complete. The16361636- * pending status is indicated by bits[13:9] (masked by bits[ 29:25]) in the16371637- * MSG_IDLE register. There's one MSG_IDLE register per reset domain. Since we16381638- * are concerned only with the gt reset here, we use a logical OR of pending16391639- * forcewakeups from all reset domains and then wait for them to complete by16401640- * querying PWRGT_DOMAIN_STATUS.16411641- */16421581static void guc_engine_reset_prepare(struct intel_engine_cs *engine)16431582{16441644- u32 fw_pending;16451645-16461646- if (GRAPHICS_VER(engine->i915) != 12)15831583+ if (!IS_GRAPHICS_VER(engine->i915, 11, 12))16471584 return;1648158516491649- /*16501650- * Wa_2201180203716511651- * TODO: Occasionally trying to stop the cs times out, but does not16521652- * adversely affect functionality. The timeout is set as a config16531653- * parameter that defaults to 100ms. Assuming that this timeout is16541654- * sufficient for any pending MI_FORCEWAKEs to complete, ignore the16551655- * timeout returned here until it is root caused.16561656- */16571586 intel_engine_stop_cs(engine);1658158716591659- fw_pending = __cs_pending_mi_force_wakes(engine);16601660- if (fw_pending)16611661- __gpm_wait_for_fw_complete(engine->gt, fw_pending);15881588+ /*15891589+ * Wa_22011802037:gen11/gen12: In addition to stopping the cs, we need15901590+ * to wait for any pending mi force wakeups15911591+ */15921592+ intel_engine_wait_for_pending_mi_fw(engine);16621593}1663159416641595static void guc_reset_nop(struct intel_engine_cs *engine)
+5-1
drivers/gpu/drm/nouveau/nouveau_dmem.c
···680680 goto out_free_dma;681681682682 for (i = 0; i < npages; i += max) {683683- args.end = start + (max << PAGE_SHIFT);683683+ if (args.start + (max << PAGE_SHIFT) > end)684684+ args.end = end;685685+ else686686+ args.end = args.start + (max << PAGE_SHIFT);687687+684688 ret = migrate_vma_setup(&args);685689 if (ret)686690 goto out_free_pfns;
···162162163163 raw_local_irq_enable();164164 ret = __intel_idle(dev, drv, index);165165- raw_local_irq_disable();165165+166166+ /*167167+ * The lockdep hardirqs state may be changed to 'on' with timer168168+ * tick interrupt followed by __do_softirq(). Use local_irq_disable()169169+ * to keep the hardirqs state correct.170170+ */171171+ local_irq_disable();166172167173 return ret;168174}
+4-1
drivers/net/ethernet/fungible/funeth/funeth_rx.c
···142142 int ref_ok, struct funeth_txq *xdp_q)143143{144144 struct bpf_prog *xdp_prog;145145+ struct xdp_frame *xdpf;145146 struct xdp_buff xdp;146147 u32 act;147148···164163 case XDP_TX:165164 if (unlikely(!ref_ok))166165 goto pass;167167- if (!fun_xdp_tx(xdp_q, xdp.data, xdp.data_end - xdp.data))166166+167167+ xdpf = xdp_convert_buff_to_frame(&xdp);168168+ if (!xdpf || !fun_xdp_tx(xdp_q, xdpf))168169 goto xdp_error;169170 FUN_QSTAT_INC(q, xdp_tx);170171 q->xdp_flush |= FUN_XDP_FLUSH_TX;
+9-11
drivers/net/ethernet/fungible/funeth/funeth_tx.c
···466466467467 do {468468 fun_xdp_unmap(q, reclaim_idx);469469- page_frag_free(q->info[reclaim_idx].vaddr);469469+ xdp_return_frame(q->info[reclaim_idx].xdpf);470470471471 trace_funeth_tx_free(q, reclaim_idx, 1, head);472472···479479 return npkts;480480}481481482482-bool fun_xdp_tx(struct funeth_txq *q, void *data, unsigned int len)482482+bool fun_xdp_tx(struct funeth_txq *q, struct xdp_frame *xdpf)483483{484484 struct fun_eth_tx_req *req;485485 struct fun_dataop_gl *gle;486486- unsigned int idx;486486+ unsigned int idx, len;487487 dma_addr_t dma;488488489489 if (fun_txq_avail(q) < FUN_XDP_CLEAN_THRES)···494494 return false;495495 }496496497497- dma = dma_map_single(q->dma_dev, data, len, DMA_TO_DEVICE);497497+ len = xdpf->len;498498+ dma = dma_map_single(q->dma_dev, xdpf->data, len, DMA_TO_DEVICE);498499 if (unlikely(dma_mapping_error(q->dma_dev, dma))) {499500 FUN_QSTAT_INC(q, tx_map_err);500501 return false;···515514 gle = (struct fun_dataop_gl *)req->dataop.imm;516515 fun_dataop_gl_init(gle, 0, 0, len, dma);517516518518- q->info[idx].vaddr = data;517517+ q->info[idx].xdpf = xdpf;519518520519 u64_stats_update_begin(&q->syncp);521520 q->stats.tx_bytes += len;···546545 if (unlikely(q_idx >= fp->num_xdpqs))547546 return -ENXIO;548547549549- for (q = xdpqs[q_idx], i = 0; i < n; i++) {550550- const struct xdp_frame *xdpf = frames[i];551551-552552- if (!fun_xdp_tx(q, xdpf->data, xdpf->len))548548+ for (q = xdpqs[q_idx], i = 0; i < n; i++)549549+ if (!fun_xdp_tx(q, frames[i]))553550 break;554554- }555551556552 if (unlikely(flags & XDP_XMIT_FLUSH))557553 fun_txq_wr_db(q);···575577 unsigned int idx = q->cons_cnt & q->mask;576578577579 fun_xdp_unmap(q, idx);578578- page_frag_free(q->info[idx].vaddr);580580+ xdp_return_frame(q->info[idx].xdpf);579581 q->cons_cnt++;580582 }581583}
···19251925 * non-zero req_queue_pairs says that user requested a new19261926 * queue count via ethtool's set_channels, so use this19271927 * value for queues distribution across traffic classes19281928+ * We need at least one queue pair for the interface19291929+ * to be usable as we see in else statement.19281930 */19291931 if (vsi->req_queue_pairs > 0)19301932 vsi->num_queue_pairs = vsi->req_queue_pairs;19311933 else if (pf->flags & I40E_FLAG_MSIX_ENABLED)19321934 vsi->num_queue_pairs = pf->num_lan_msix;19351935+ else19361936+ vsi->num_queue_pairs = 1;19331937 }1934193819351939 /* Number of queues per enabled TC */
···42334233 }4234423442354235 /* If the chain is ended by an load/store pair then this42364236- * could serve as the new head of the the next chain.42364236+ * could serve as the new head of the next chain.42374237 */42384238 if (curr_pair_is_memcpy(meta1, meta2)) {42394239 head_ld_meta = meta1;
+22
drivers/net/ethernet/sfc/ptp.c
···1100110011011101 tx_queue = efx_channel_get_tx_queue(ptp_data->channel, type);11021102 if (tx_queue && tx_queue->timestamping) {11031103+ /* This code invokes normal driver TX code which is always11041104+ * protected from softirqs when called from generic TX code,11051105+ * which in turn disables preemption. Look at __dev_queue_xmit11061106+ * which uses rcu_read_lock_bh disabling preemption for RCU11071107+ * plus disabling softirqs. We do not need RCU reader11081108+ * protection here.11091109+ *11101110+ * Although it is theoretically safe for current PTP TX/RX code11111111+ * running without disabling softirqs, there are three good11121112+ * reasond for doing so:11131113+ *11141114+ * 1) The code invoked is mainly implemented for non-PTP11151115+ * packets and it is always executed with softirqs11161116+ * disabled.11171117+ * 2) This being a single PTP packet, better to not11181118+ * interrupt its processing by softirqs which can lead11191119+ * to high latencies.11201120+ * 3) netdev_xmit_more checks preemption is disabled and11211121+ * triggers a BUG_ON if not.11221122+ */11231123+ local_bh_disable();11031124 efx_enqueue_skb(tx_queue, skb);11251125+ local_bh_enable();11041126 } else {11051127 WARN_ONCE(1, "PTP channel has no timestamped tx queue\n");11061128 dev_kfree_skb_any(skb);
···688688689689 ret = mediatek_dwmac_clks_config(priv_plat, true);690690 if (ret)691691- return ret;691691+ goto err_remove_config_dt;692692693693 ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);694694- if (ret) {695695- stmmac_remove_config_dt(pdev, plat_dat);694694+ if (ret)696695 goto err_drv_probe;697697- }698696699697 return 0;700698701699err_drv_probe:702700 mediatek_dwmac_clks_config(priv_plat, false);701701+err_remove_config_dt:702702+ stmmac_remove_config_dt(pdev, plat_dat);703703+703704 return ret;704705}705706
+1-1
drivers/net/ipa/ipa_qmi_msg.h
···214214215215/* The response to a IPA_QMI_INIT_DRIVER request begins with a standard216216 * QMI response, but contains other information as well. Currently we217217- * simply wait for the the INIT_DRIVER transaction to complete and217217+ * simply wait for the INIT_DRIVER transaction to complete and218218 * ignore any other data that might be returned.219219 */220220struct ipa_init_modem_driver_rsp {
+21-12
drivers/net/macsec.c
···243243#define DEFAULT_SEND_SCI true244244#define DEFAULT_ENCRYPT false245245#define DEFAULT_ENCODING_SA 0246246+#define MACSEC_XPN_MAX_REPLAY_WINDOW (((1 << 30) - 1))246247247248static bool send_sci(const struct macsec_secy *secy)248249{···16981697 return false;1699169817001699 if (attrs[MACSEC_SA_ATTR_PN] &&17011701- *(u64 *)nla_data(attrs[MACSEC_SA_ATTR_PN]) == 0)17001700+ nla_get_u64(attrs[MACSEC_SA_ATTR_PN]) == 0)17021701 return false;1703170217041703 if (attrs[MACSEC_SA_ATTR_ACTIVE]) {···17541753 }1755175417561755 pn_len = secy->xpn ? MACSEC_XPN_PN_LEN : MACSEC_DEFAULT_PN_LEN;17571757- if (nla_len(tb_sa[MACSEC_SA_ATTR_PN]) != pn_len) {17561756+ if (tb_sa[MACSEC_SA_ATTR_PN] &&17571757+ nla_len(tb_sa[MACSEC_SA_ATTR_PN]) != pn_len) {17581758 pr_notice("macsec: nl: add_rxsa: bad pn length: %d != %d\n",17591759 nla_len(tb_sa[MACSEC_SA_ATTR_PN]), pn_len);17601760 rtnl_unlock();···17711769 if (nla_len(tb_sa[MACSEC_SA_ATTR_SALT]) != MACSEC_SALT_LEN) {17721770 pr_notice("macsec: nl: add_rxsa: bad salt length: %d != %d\n",17731771 nla_len(tb_sa[MACSEC_SA_ATTR_SALT]),17741774- MACSEC_SA_ATTR_SALT);17721772+ MACSEC_SALT_LEN);17751773 rtnl_unlock();17761774 return -EINVAL;17771775 }···18441842 return 0;1845184318461844cleanup:18471847- kfree(rx_sa);18451845+ macsec_rxsa_put(rx_sa);18481846 rtnl_unlock();18491847 return err;18501848}···19411939 if (nla_get_u8(attrs[MACSEC_SA_ATTR_AN]) >= MACSEC_NUM_AN)19421940 return false;1943194119441944- if (nla_get_u32(attrs[MACSEC_SA_ATTR_PN]) == 0)19421942+ if (nla_get_u64(attrs[MACSEC_SA_ATTR_PN]) == 0)19451943 return false;1946194419471945 if (attrs[MACSEC_SA_ATTR_ACTIVE]) {···20132011 if (nla_len(tb_sa[MACSEC_SA_ATTR_SALT]) != MACSEC_SALT_LEN) {20142012 pr_notice("macsec: nl: add_txsa: bad salt length: %d != %d\n",20152013 nla_len(tb_sa[MACSEC_SA_ATTR_SALT]),20162016- MACSEC_SA_ATTR_SALT);20142014+ MACSEC_SALT_LEN);20172015 rtnl_unlock();20182016 return -EINVAL;20192017 }···2087208520882086cleanup:20892087 secy->operational = was_operational;20902090- kfree(tx_sa);20882088+ macsec_txsa_put(tx_sa);20912089 rtnl_unlock();20922090 return err;20932091}···22952293 if (nla_get_u8(attrs[MACSEC_SA_ATTR_AN]) >= MACSEC_NUM_AN)22962294 return false;2297229522982298- if (attrs[MACSEC_SA_ATTR_PN] && nla_get_u32(attrs[MACSEC_SA_ATTR_PN]) == 0)22962296+ if (attrs[MACSEC_SA_ATTR_PN] && nla_get_u64(attrs[MACSEC_SA_ATTR_PN]) == 0)22992297 return false;2300229823012299 if (attrs[MACSEC_SA_ATTR_ACTIVE]) {···37473745 secy->operational = tx_sa && tx_sa->active;37483746 }3749374737503750- if (data[IFLA_MACSEC_WINDOW])37513751- secy->replay_window = nla_get_u32(data[IFLA_MACSEC_WINDOW]);37523752-37533748 if (data[IFLA_MACSEC_ENCRYPT])37543749 tx_sc->encrypt = !!nla_get_u8(data[IFLA_MACSEC_ENCRYPT]);37553750···37923793 }37933794 }3794379537963796+ if (data[IFLA_MACSEC_WINDOW]) {37973797+ secy->replay_window = nla_get_u32(data[IFLA_MACSEC_WINDOW]);37983798+37993799+ /* IEEE 802.1AEbw-2013 10.7.8 - maximum replay window38003800+ * for XPN cipher suites */38013801+ if (secy->xpn &&38023802+ secy->replay_window > MACSEC_XPN_MAX_REPLAY_WINDOW)38033803+ return -EINVAL;38043804+ }38053805+37953806 return 0;37963807}37973808···3831382238323823 ret = macsec_changelink_common(dev, data);38333824 if (ret)38343834- return ret;38253825+ goto cleanup;3835382638363827 /* If h/w offloading is available, propagate to the device */38373828 if (macsec_is_offloaded(macsec)) {
+1-1
drivers/net/pcs/pcs-xpcs.c
···896896 */897897 ret = xpcs_read(xpcs, MDIO_MMD_VEND2, DW_VR_MII_AN_INTR_STS);898898 if (ret < 0)899899- return false;899899+ return ret;900900901901 if (ret & DW_VR_MII_C37_ANSGM_SP_LNKSTS) {902902 int speed_value;
+1
drivers/net/sungem_phy.c
···450450 int can_low_power = 1;451451 if (np == NULL || of_get_property(np, "no-autolowpower", NULL))452452 can_low_power = 0;453453+ of_node_put(np);453454 if (can_low_power) {454455 /* Enable automatic low-power */455456 sungem_phy_write(phy, 0x1c, 0x9002);
+34-3
drivers/net/virtio_net.c
···242242 /* Packet virtio header size */243243 u8 hdr_len;244244245245- /* Work struct for refilling if we run low on memory. */245245+ /* Work struct for delayed refilling if we run low on memory. */246246 struct delayed_work refill;247247+248248+ /* Is delayed refill enabled? */249249+ bool refill_enabled;250250+251251+ /* The lock to synchronize the access to refill_enabled */252252+ spinlock_t refill_lock;247253248254 /* Work struct for config space updates */249255 struct work_struct config_work;···352346 } else353347 p = alloc_page(gfp_mask);354348 return p;349349+}350350+351351+static void enable_delayed_refill(struct virtnet_info *vi)352352+{353353+ spin_lock_bh(&vi->refill_lock);354354+ vi->refill_enabled = true;355355+ spin_unlock_bh(&vi->refill_lock);356356+}357357+358358+static void disable_delayed_refill(struct virtnet_info *vi)359359+{360360+ spin_lock_bh(&vi->refill_lock);361361+ vi->refill_enabled = false;362362+ spin_unlock_bh(&vi->refill_lock);355363}356364357365static void virtqueue_napi_schedule(struct napi_struct *napi,···15471527 }1548152815491529 if (rq->vq->num_free > min((unsigned int)budget, virtqueue_get_vring_size(rq->vq)) / 2) {15501550- if (!try_fill_recv(vi, rq, GFP_ATOMIC))15511551- schedule_delayed_work(&vi->refill, 0);15301530+ if (!try_fill_recv(vi, rq, GFP_ATOMIC)) {15311531+ spin_lock(&vi->refill_lock);15321532+ if (vi->refill_enabled)15331533+ schedule_delayed_work(&vi->refill, 0);15341534+ spin_unlock(&vi->refill_lock);15351535+ }15521536 }1553153715541538 u64_stats_update_begin(&rq->stats.syncp);···16741650{16751651 struct virtnet_info *vi = netdev_priv(dev);16761652 int i, err;16531653+16541654+ enable_delayed_refill(vi);1677165516781656 for (i = 0; i < vi->max_queue_pairs; i++) {16791657 if (i < vi->curr_queue_pairs)···20592033 struct virtnet_info *vi = netdev_priv(dev);20602034 int i;2061203520362036+ /* Make sure NAPI doesn't schedule refill work */20372037+ disable_delayed_refill(vi);20622038 /* Make sure refill_work doesn't re-enable napi! */20632039 cancel_delayed_work_sync(&vi->refill);20642040···2820279228212793 virtio_device_ready(vdev);2822279427952795+ enable_delayed_refill(vi);27962796+28232797 if (netif_running(vi->dev)) {28242798 err = virtnet_open(vi->dev);28252799 if (err)···35653535 vdev->priv = vi;3566353635673537 INIT_WORK(&vi->config_work, virtnet_config_changed_work);35383538+ spin_lock_init(&vi->refill_lock);3568353935693540 /* If we can receive ANY GSO packets, we must allocate large ones. */35703541 if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) ||
···176176 depends on !S390177177 depends on COMMON_CLK178178 select NET_DEVLINK179179+ select CRC16179180 help180181 This driver adds support for an OpenCompute time card.181182
+1-1
drivers/s390/net/qeth_core_main.c
···35653565 if (!atomic_read(&queue->set_pci_flags_count)) {35663566 /*35673567 * there's no outstanding PCI any more, so we35683568- * have to request a PCI to be sure the the PCI35683568+ * have to request a PCI to be sure the PCI35693569 * will wake at some time in the future then we35703570 * can flush packed buffers that might still be35713571 * hanging around, which can happen if no
···450450 goto out_put_request;451451452452 ret = 0;453453- if (hdr->iovec_count) {453453+ if (hdr->iovec_count && hdr->dxfer_len) {454454 struct iov_iter i;455455 struct iovec *iov = NULL;456456
+40-18
drivers/ufs/core/ufshcd.c
···29532953static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba,29542954 struct ufshcd_lrb *lrbp, int max_timeout)29552955{29562956- int err = 0;29572957- unsigned long time_left;29562956+ unsigned long time_left = msecs_to_jiffies(max_timeout);29582957 unsigned long flags;29582958+ bool pending;29592959+ int err;2959296029612961+retry:29602962 time_left = wait_for_completion_timeout(hba->dev_cmd.complete,29612961- msecs_to_jiffies(max_timeout));29632963+ time_left);2962296429632963- spin_lock_irqsave(hba->host->host_lock, flags);29642964- hba->dev_cmd.complete = NULL;29652965 if (likely(time_left)) {29662966+ /*29672967+ * The completion handler called complete() and the caller of29682968+ * this function still owns the @lrbp tag so the code below does29692969+ * not trigger any race conditions.29702970+ */29712971+ hba->dev_cmd.complete = NULL;29662972 err = ufshcd_get_tr_ocs(lrbp);29672973 if (!err)29682974 err = ufshcd_dev_cmd_completion(hba, lrbp);29692969- }29702970- spin_unlock_irqrestore(hba->host->host_lock, flags);29712971-29722972- if (!time_left) {29752975+ } else {29732976 err = -ETIMEDOUT;29742977 dev_dbg(hba->dev, "%s: dev_cmd request timedout, tag %d\n",29752978 __func__, lrbp->task_tag);29762976- if (!ufshcd_clear_cmds(hba, 1U << lrbp->task_tag))29792979+ if (ufshcd_clear_cmds(hba, 1U << lrbp->task_tag) == 0) {29772980 /* successfully cleared the command, retry if needed */29782981 err = -EAGAIN;29792979- /*29802980- * in case of an error, after clearing the doorbell,29812981- * we also need to clear the outstanding_request29822982- * field in hba29832983- */29842984- spin_lock_irqsave(&hba->outstanding_lock, flags);29852985- __clear_bit(lrbp->task_tag, &hba->outstanding_reqs);29862986- spin_unlock_irqrestore(&hba->outstanding_lock, flags);29822982+ /*29832983+ * Since clearing the command succeeded we also need to29842984+ * clear the task tag bit from the outstanding_reqs29852985+ * variable.29862986+ */29872987+ spin_lock_irqsave(&hba->outstanding_lock, flags);29882988+ pending = test_bit(lrbp->task_tag,29892989+ &hba->outstanding_reqs);29902990+ if (pending) {29912991+ hba->dev_cmd.complete = NULL;29922992+ __clear_bit(lrbp->task_tag,29932993+ &hba->outstanding_reqs);29942994+ }29952995+ spin_unlock_irqrestore(&hba->outstanding_lock, flags);29962996+29972997+ if (!pending) {29982998+ /*29992999+ * The completion handler ran while we tried to30003000+ * clear the command.30013001+ */30023002+ time_left = 1;30033003+ goto retry;30043004+ }30053005+ } else {30063006+ dev_err(hba->dev, "%s: failed to clear tag %d\n",30073007+ __func__, lrbp->task_tag);30083008+ }29873009 }2988301029893011 return err;
···28432843{28442844 /* Does this proto have per netns sysctl_wmem ? */28452845 if (proto->sysctl_wmem_offset)28462846- return *(int *)((void *)sock_net(sk) + proto->sysctl_wmem_offset);28462846+ return READ_ONCE(*(int *)((void *)sock_net(sk) + proto->sysctl_wmem_offset));2847284728482848- return *proto->sysctl_wmem;28482848+ return READ_ONCE(*proto->sysctl_wmem);28492849}2850285028512851static inline int sk_get_rmem0(const struct sock *sk, const struct proto *proto)28522852{28532853 /* Does this proto have per netns sysctl_rmem ? */28542854 if (proto->sysctl_rmem_offset)28552855- return *(int *)((void *)sock_net(sk) + proto->sysctl_rmem_offset);28552855+ return READ_ONCE(*(int *)((void *)sock_net(sk) + proto->sysctl_rmem_offset));2856285628572857- return *proto->sysctl_rmem;28572857+ return READ_ONCE(*proto->sysctl_rmem);28582858}2859285928602860/* Default TCP Small queue budget is ~1 ms of data (1sec >> 10)
+1-1
include/net/tcp.h
···1419141914201420static inline int tcp_win_from_space(const struct sock *sk, int space)14211421{14221422- int tcp_adv_win_scale = sock_net(sk)->ipv4.sysctl_tcp_adv_win_scale;14221422+ int tcp_adv_win_scale = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_adv_win_scale);1423142314241424 return tcp_adv_win_scale <= 0 ?14251425 (space>>(-tcp_adv_win_scale)) :
+2
include/uapi/asm-generic/fcntl.h
···192192193193#define F_LINUX_SPECIFIC_BASE 1024194194195195+#ifndef HAVE_ARCH_STRUCT_FLOCK195196struct flock {196197 short l_type;197198 short l_whence;···217216 __ARCH_FLOCK64_PAD218217#endif219218};219219+#endif /* HAVE_ARCH_STRUCT_FLOCK */220220221221#endif /* _ASM_GENERIC_FCNTL_H */
···335335 struct task_struct *task;336336 enum rwsem_waiter_type type;337337 unsigned long timeout;338338-339339- /* Writer only, not initialized in reader */340338 bool handoff_set;341339};342340#define rwsem_first_waiter(sem) \···457459 * to give up the lock), request a HANDOFF to458460 * force the issue.459461 */460460- if (!(oldcount & RWSEM_FLAG_HANDOFF) &&461461- time_after(jiffies, waiter->timeout)) {462462- adjustment -= RWSEM_FLAG_HANDOFF;463463- lockevent_inc(rwsem_rlock_handoff);462462+ if (time_after(jiffies, waiter->timeout)) {463463+ if (!(oldcount & RWSEM_FLAG_HANDOFF)) {464464+ adjustment -= RWSEM_FLAG_HANDOFF;465465+ lockevent_inc(rwsem_rlock_handoff);466466+ }467467+ waiter->handoff_set = true;464468 }465469466470 atomic_long_add(-adjustment, &sem->count);···599599static inline bool rwsem_try_write_lock(struct rw_semaphore *sem,600600 struct rwsem_waiter *waiter)601601{602602- bool first = rwsem_first_waiter(sem) == waiter;602602+ struct rwsem_waiter *first = rwsem_first_waiter(sem);603603 long count, new;604604605605 lockdep_assert_held(&sem->wait_lock);···609609 bool has_handoff = !!(count & RWSEM_FLAG_HANDOFF);610610611611 if (has_handoff) {612612- if (!first)612612+ /*613613+ * Honor handoff bit and yield only when the first614614+ * waiter is the one that set it. Otherwisee, we615615+ * still try to acquire the rwsem.616616+ */617617+ if (first->handoff_set && (waiter != first))613618 return false;614619615615- /* First waiter inherits a previously set handoff bit */616616- waiter->handoff_set = true;620620+ /*621621+ * First waiter can inherit a previously set handoff622622+ * bit and spin on rwsem if lock acquisition fails.623623+ */624624+ if (waiter == first)625625+ waiter->handoff_set = true;617626 }618627619628 new = count;···10361027 waiter.task = current;10371028 waiter.type = RWSEM_WAITING_FOR_READ;10381029 waiter.timeout = jiffies + RWSEM_WAIT_TIMEOUT;10301030+ waiter.handoff_set = false;1039103110401032 raw_spin_lock_irq(&sem->wait_lock);10411033 if (list_empty(&sem->wait_list)) {
···212212 unsigned long end, unsigned long hmm_pfns[], pmd_t pmd);213213#endif /* CONFIG_TRANSPARENT_HUGEPAGE */214214215215-static inline bool hmm_is_device_private_entry(struct hmm_range *range,216216- swp_entry_t entry)217217-{218218- return is_device_private_entry(entry) &&219219- pfn_swap_entry_to_page(entry)->pgmap->owner ==220220- range->dev_private_owner;221221-}222222-223215static inline unsigned long pte_to_hmm_pfn_flags(struct hmm_range *range,224216 pte_t pte)225217{···244252 swp_entry_t entry = pte_to_swp_entry(pte);245253246254 /*247247- * Never fault in device private pages, but just report248248- * the PFN even if not present.255255+ * Don't fault in device private pages owned by the caller,256256+ * just report the PFN.249257 */250250- if (hmm_is_device_private_entry(range, entry)) {258258+ if (is_device_private_entry(entry) &&259259+ pfn_swap_entry_to_page(entry)->pgmap->owner ==260260+ range->dev_private_owner) {251261 cpu_flags = HMM_PFN_VALID;252262 if (is_writable_device_private_entry(entry))253263 cpu_flags |= HMM_PFN_WRITE;···265271 }266272267273 if (!non_swap_entry(entry))274274+ goto fault;275275+276276+ if (is_device_private_entry(entry))268277 goto fault;269278270279 if (is_device_exclusive_entry(entry))
+8-4
mm/page_alloc.c
···39683968 * need to be calculated.39693969 */39703970 if (!order) {39713971- long fast_free;39713971+ long usable_free;39723972+ long reserved;3972397339733973- fast_free = free_pages;39743974- fast_free -= __zone_watermark_unusable_free(z, 0, alloc_flags);39753975- if (fast_free > mark + z->lowmem_reserve[highest_zoneidx])39743974+ usable_free = free_pages;39753975+ reserved = __zone_watermark_unusable_free(z, 0, alloc_flags);39763976+39773977+ /* reserved may over estimate high-atomic reserves. */39783978+ usable_free -= min(usable_free, reserved);39793979+ if (usable_free > mark + z->lowmem_reserve[highest_zoneidx])39763980 return true;39773981 }39783982
+3-3
net/bluetooth/hci_sync.c
···49734973 return err;49744974 }4975497549764976+ /* Update event mask so only the allowed event can wakeup the host */49774977+ hci_set_event_mask_sync(hdev);49784978+49764979 /* Only configure accept list if disconnect succeeded and wake49774980 * isn't being prevented.49784981 */···4986498349874984 /* Unpause to take care of updating scanning params */49884985 hdev->scanning_paused = false;49894989-49904990- /* Update event mask so only the allowed event can wakeup the host */49914991- hci_set_event_mask_sync(hdev);4992498649934987 /* Enable event filter for paired devices */49944988 hci_update_event_filter_sync(hdev);
+48-13
net/bluetooth/l2cap_core.c
···111111}112112113113/* Find channel with given SCID.114114- * Returns locked channel. */114114+ * Returns a reference locked channel.115115+ */115116static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,116117 u16 cid)117118{···120119121120 mutex_lock(&conn->chan_lock);122121 c = __l2cap_get_chan_by_scid(conn, cid);123123- if (c)124124- l2cap_chan_lock(c);122122+ if (c) {123123+ /* Only lock if chan reference is not 0 */124124+ c = l2cap_chan_hold_unless_zero(c);125125+ if (c)126126+ l2cap_chan_lock(c);127127+ }125128 mutex_unlock(&conn->chan_lock);126129127130 return c;128131}129132130133/* Find channel with given DCID.131131- * Returns locked channel.134134+ * Returns a reference locked channel.132135 */133136static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,134137 u16 cid)···141136142137 mutex_lock(&conn->chan_lock);143138 c = __l2cap_get_chan_by_dcid(conn, cid);144144- if (c)145145- l2cap_chan_lock(c);139139+ if (c) {140140+ /* Only lock if chan reference is not 0 */141141+ c = l2cap_chan_hold_unless_zero(c);142142+ if (c)143143+ l2cap_chan_lock(c);144144+ }146145 mutex_unlock(&conn->chan_lock);147146148147 return c;···171162172163 mutex_lock(&conn->chan_lock);173164 c = __l2cap_get_chan_by_ident(conn, ident);174174- if (c)175175- l2cap_chan_lock(c);165165+ if (c) {166166+ /* Only lock if chan reference is not 0 */167167+ c = l2cap_chan_hold_unless_zero(c);168168+ if (c)169169+ l2cap_chan_lock(c);170170+ }176171 mutex_unlock(&conn->chan_lock);177172178173 return c;···508495 BT_DBG("chan %p orig refcnt %u", c, kref_read(&c->kref));509496510497 kref_get(&c->kref);498498+}499499+500500+struct l2cap_chan *l2cap_chan_hold_unless_zero(struct l2cap_chan *c)501501+{502502+ BT_DBG("chan %p orig refcnt %u", c, kref_read(&c->kref));503503+504504+ if (!kref_get_unless_zero(&c->kref))505505+ return NULL;506506+507507+ return c;511508}512509513510void l2cap_chan_put(struct l2cap_chan *c)···19911968 src_match = !bacmp(&c->src, src);19921969 dst_match = !bacmp(&c->dst, dst);19931970 if (src_match && dst_match) {19941994- l2cap_chan_hold(c);19711971+ c = l2cap_chan_hold_unless_zero(c);19721972+ if (!c)19731973+ continue;19741974+19951975 read_unlock(&chan_list_lock);19961976 return c;19971977 }···20091983 }2010198420111985 if (c1)20122012- l2cap_chan_hold(c1);19861986+ c1 = l2cap_chan_hold_unless_zero(c1);2013198720141988 read_unlock(&chan_list_lock);20151989···4489446344904464unlock:44914465 l2cap_chan_unlock(chan);44664466+ l2cap_chan_put(chan);44924467 return err;44934468}44944469···4604457746054578done:46064579 l2cap_chan_unlock(chan);45804580+ l2cap_chan_put(chan);46074581 return err;46084582}46094583···53325304 l2cap_send_move_chan_rsp(chan, result);5333530553345306 l2cap_chan_unlock(chan);53075307+ l2cap_chan_put(chan);5335530853365309 return 0;53375310}···54255396 }5426539754275398 l2cap_chan_unlock(chan);53995399+ l2cap_chan_put(chan);54285400}5429540154305402static void l2cap_move_fail(struct l2cap_conn *conn, u8 ident, u16 icid,···54555425 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);5456542654575427 l2cap_chan_unlock(chan);54285428+ l2cap_chan_put(chan);54585429}5459543054605431static int l2cap_move_channel_rsp(struct l2cap_conn *conn,···55195488 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);5520548955215490 l2cap_chan_unlock(chan);54915491+ l2cap_chan_put(chan);5522549255235493 return 0;55245494}···55555523 }5556552455575525 l2cap_chan_unlock(chan);55265526+ l2cap_chan_put(chan);5558552755595528 return 0;55605529}···59285895 if (credits > max_credits) {59295896 BT_ERR("LE credits overflow");59305897 l2cap_send_disconn_req(chan, ECONNRESET);59315931- l2cap_chan_unlock(chan);5932589859335899 /* Return 0 so that we don't trigger an unnecessary59345900 * command reject packet.59355901 */59365936- return 0;59025902+ goto unlock;59375903 }5938590459395905 chan->tx_credits += credits;···59435911 if (chan->tx_credits)59445912 chan->ops->resume(chan);5945591359145914+unlock:59465915 l2cap_chan_unlock(chan);59165916+ l2cap_chan_put(chan);5947591759485918 return 0;59495919}···7631759776327598done:76337599 l2cap_chan_unlock(chan);76007600+ l2cap_chan_put(chan);76347601}7635760276367603static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,···81208085 if (src_type != c->src_type)81218086 continue;8122808781238123- l2cap_chan_hold(c);80888088+ c = l2cap_chan_hold_unless_zero(c);81248089 read_unlock(&chan_list_lock);81258090 return c;81268091 }
···1042104210431043void fib_alias_hw_flags_set(struct net *net, const struct fib_rt_info *fri)10441044{10451045+ u8 fib_notify_on_flag_change;10451046 struct fib_alias *fa_match;10461047 struct sk_buff *skb;10471048 int err;···10641063 WRITE_ONCE(fa_match->offload, fri->offload);10651064 WRITE_ONCE(fa_match->trap, fri->trap);1066106510661066+ fib_notify_on_flag_change = READ_ONCE(net->ipv4.sysctl_fib_notify_on_flag_change);10671067+10671068 /* 2 means send notifications only if offload_failed was changed. */10681068- if (net->ipv4.sysctl_fib_notify_on_flag_change == 2 &&10691069+ if (fib_notify_on_flag_change == 2 &&10691070 READ_ONCE(fa_match->offload_failed) == fri->offload_failed)10701071 goto out;1071107210721073 WRITE_ONCE(fa_match->offload_failed, fri->offload_failed);1073107410741074- if (!net->ipv4.sysctl_fib_notify_on_flag_change)10751075+ if (!fib_notify_on_flag_change)10751076 goto out;1076107710771078 skb = nlmsg_new(fib_nlmsg_size(fa_match->fa_info), GFP_ATOMIC);
+16-7
net/ipv4/tcp.c
···452452453453 icsk->icsk_sync_mss = tcp_sync_mss;454454455455- WRITE_ONCE(sk->sk_sndbuf, sock_net(sk)->ipv4.sysctl_tcp_wmem[1]);456456- WRITE_ONCE(sk->sk_rcvbuf, sock_net(sk)->ipv4.sysctl_tcp_rmem[1]);455455+ WRITE_ONCE(sk->sk_sndbuf, READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_wmem[1]));456456+ WRITE_ONCE(sk->sk_rcvbuf, READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_rmem[1]));457457458458 sk_sockets_allocated_inc(sk);459459}···686686 int size_goal)687687{688688 return skb->len < size_goal &&689689- sock_net(sk)->ipv4.sysctl_tcp_autocorking &&689689+ READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_autocorking) &&690690 !tcp_rtx_queue_empty(sk) &&691691 refcount_read(&sk->sk_wmem_alloc) > skb->truesize &&692692 tcp_skb_can_collapse_to(skb);···17241724 if (sk->sk_userlocks & SOCK_RCVBUF_LOCK)17251725 cap = sk->sk_rcvbuf >> 1;17261726 else17271727- cap = sock_net(sk)->ipv4.sysctl_tcp_rmem[2] >> 1;17271727+ cap = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_rmem[2]) >> 1;17281728 val = min(val, cap);17291729 WRITE_ONCE(sk->sk_rcvlowat, val ? : 1);17301730···44594459 return SKB_DROP_REASON_TCP_MD5UNEXPECTED;44604460 }4461446144624462- /* check the signature */44634463- genhash = tp->af_specific->calc_md5_hash(newhash, hash_expected,44644464- NULL, skb);44624462+ /* Check the signature.44634463+ * To support dual stack listeners, we need to handle44644464+ * IPv4-mapped case.44654465+ */44664466+ if (family == AF_INET)44674467+ genhash = tcp_v4_md5_hash_skb(newhash,44684468+ hash_expected,44694469+ NULL, skb);44704470+ else44714471+ genhash = tp->af_specific->calc_md5_hash(newhash,44724472+ hash_expected,44734473+ NULL, skb);4465447444664475 if (genhash || memcmp(hash_location, newhash, 16) != 0) {44674476 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5FAILURE);
+22-19
net/ipv4/tcp_input.c
···426426427427 if (sk->sk_sndbuf < sndmem)428428 WRITE_ONCE(sk->sk_sndbuf,429429- min(sndmem, sock_net(sk)->ipv4.sysctl_tcp_wmem[2]));429429+ min(sndmem, READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_wmem[2])));430430}431431432432/* 2. Tuning advertised window (window_clamp, rcv_ssthresh)···461461 struct tcp_sock *tp = tcp_sk(sk);462462 /* Optimize this! */463463 int truesize = tcp_win_from_space(sk, skbtruesize) >> 1;464464- int window = tcp_win_from_space(sk, sock_net(sk)->ipv4.sysctl_tcp_rmem[2]) >> 1;464464+ int window = tcp_win_from_space(sk, READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_rmem[2])) >> 1;465465466466 while (tp->rcv_ssthresh <= window) {467467 if (truesize <= skb->len)···534534 */535535static void tcp_init_buffer_space(struct sock *sk)536536{537537- int tcp_app_win = sock_net(sk)->ipv4.sysctl_tcp_app_win;537537+ int tcp_app_win = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_app_win);538538 struct tcp_sock *tp = tcp_sk(sk);539539 int maxwin;540540···574574 struct tcp_sock *tp = tcp_sk(sk);575575 struct inet_connection_sock *icsk = inet_csk(sk);576576 struct net *net = sock_net(sk);577577+ int rmem2;577578578579 icsk->icsk_ack.quick = 0;580580+ rmem2 = READ_ONCE(net->ipv4.sysctl_tcp_rmem[2]);579581580580- if (sk->sk_rcvbuf < net->ipv4.sysctl_tcp_rmem[2] &&582582+ if (sk->sk_rcvbuf < rmem2 &&581583 !(sk->sk_userlocks & SOCK_RCVBUF_LOCK) &&582584 !tcp_under_memory_pressure(sk) &&583585 sk_memory_allocated(sk) < sk_prot_mem_limits(sk, 0)) {584586 WRITE_ONCE(sk->sk_rcvbuf,585585- min(atomic_read(&sk->sk_rmem_alloc),586586- net->ipv4.sysctl_tcp_rmem[2]));587587+ min(atomic_read(&sk->sk_rmem_alloc), rmem2));587588 }588589 if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf)589590 tp->rcv_ssthresh = min(tp->window_clamp, 2U * tp->advmss);···725724 * <prev RTT . ><current RTT .. ><next RTT .... >726725 */727726728728- if (sock_net(sk)->ipv4.sysctl_tcp_moderate_rcvbuf &&727727+ if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_moderate_rcvbuf) &&729728 !(sk->sk_userlocks & SOCK_RCVBUF_LOCK)) {730729 int rcvmem, rcvbuf;731730 u64 rcvwin, grow;···746745747746 do_div(rcvwin, tp->advmss);748747 rcvbuf = min_t(u64, rcvwin * rcvmem,749749- sock_net(sk)->ipv4.sysctl_tcp_rmem[2]);748748+ READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_rmem[2]));750749 if (rcvbuf > sk->sk_rcvbuf) {751750 WRITE_ONCE(sk->sk_rcvbuf, rcvbuf);752751···911910 * end of slow start and should slow down.912911 */913912 if (tcp_snd_cwnd(tp) < tp->snd_ssthresh / 2)914914- rate *= sock_net(sk)->ipv4.sysctl_tcp_pacing_ss_ratio;913913+ rate *= READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_pacing_ss_ratio);915914 else916916- rate *= sock_net(sk)->ipv4.sysctl_tcp_pacing_ca_ratio;915915+ rate *= READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_pacing_ca_ratio);917916918917 rate *= max(tcp_snd_cwnd(tp), tp->packets_out);919918···21762175 * loss recovery is underway except recurring timeout(s) on21772176 * the same SND.UNA (sec 3.2). Disable F-RTO on path MTU probing21782177 */21792179- tp->frto = net->ipv4.sysctl_tcp_frto &&21782178+ tp->frto = READ_ONCE(net->ipv4.sysctl_tcp_frto) &&21802179 (new_recovery || icsk->icsk_retransmits) &&21812180 !inet_csk(sk)->icsk_mtup.probe_size;21822181}···3059305830603059static void tcp_update_rtt_min(struct sock *sk, u32 rtt_us, const int flag)30613060{30623062- u32 wlen = sock_net(sk)->ipv4.sysctl_tcp_min_rtt_wlen * HZ;30613061+ u32 wlen = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_min_rtt_wlen) * HZ;30633062 struct tcp_sock *tp = tcp_sk(sk);3064306330653064 if ((flag & FLAG_ACK_MAYBE_DELAYED) && rtt_us > tcp_min_rtt(tp)) {···35823581 if (*last_oow_ack_time) {35833582 s32 elapsed = (s32)(tcp_jiffies32 - *last_oow_ack_time);3584358335853585- if (0 <= elapsed && elapsed < net->ipv4.sysctl_tcp_invalid_ratelimit) {35843584+ if (0 <= elapsed &&35853585+ elapsed < READ_ONCE(net->ipv4.sysctl_tcp_invalid_ratelimit)) {35863586 NET_INC_STATS(net, mib_idx);35873587 return true; /* rate-limited: don't send yet! */35883588 }···36313629 /* Then check host-wide RFC 5961 rate limit. */36323630 now = jiffies / HZ;36333631 if (now != challenge_timestamp) {36343634- u32 ack_limit = net->ipv4.sysctl_tcp_challenge_ack_limit;36323632+ u32 ack_limit = READ_ONCE(net->ipv4.sysctl_tcp_challenge_ack_limit);36353633 u32 half = (ack_limit + 1) >> 1;3636363436373635 challenge_timestamp = now;···44284426{44294427 struct tcp_sock *tp = tcp_sk(sk);4430442844314431- if (tcp_is_sack(tp) && sock_net(sk)->ipv4.sysctl_tcp_dsack) {44294429+ if (tcp_is_sack(tp) && READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_dsack)) {44324430 int mib_idx;4433443144344432 if (before(seq, tp->rcv_nxt))···44754473 NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKLOST);44764474 tcp_enter_quickack_mode(sk, TCP_MAX_QUICKACKS);4477447544784478- if (tcp_is_sack(tp) && sock_net(sk)->ipv4.sysctl_tcp_dsack) {44764476+ if (tcp_is_sack(tp) && READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_dsack)) {44794477 u32 end_seq = TCP_SKB_CB(skb)->end_seq;4480447844814479 tcp_rcv_spurious_retrans(sk, skb);···55215519 }5522552055235521 if (!tcp_is_sack(tp) ||55245524- tp->compressed_ack >= sock_net(sk)->ipv4.sysctl_tcp_comp_sack_nr)55225522+ tp->compressed_ack >= READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_comp_sack_nr))55255523 goto send_now;5526552455275525 if (tp->compressed_ack_rcv_nxt != tp->rcv_nxt) {···55425540 if (tp->srtt_us && tp->srtt_us < rtt)55435541 rtt = tp->srtt_us;5544554255455545- delay = min_t(unsigned long, sock_net(sk)->ipv4.sysctl_tcp_comp_sack_delay_ns,55435543+ delay = min_t(unsigned long,55445544+ READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_comp_sack_delay_ns),55465545 rtt * (NSEC_PER_USEC >> 3)/20);55475546 sock_hold(sk);55485547 hrtimer_start_range_ns(&tp->compressed_ack_timer, ns_to_ktime(delay),55495549- sock_net(sk)->ipv4.sysctl_tcp_comp_sack_slack_ns,55485548+ READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_comp_sack_slack_ns),55505549 HRTIMER_MODE_REL_PINNED_SOFT);55515550}55525551
+2-2
net/ipv4/tcp_ipv4.c
···10061006 if (skb) {10071007 __tcp_v4_send_check(skb, ireq->ir_loc_addr, ireq->ir_rmt_addr);1008100810091009- tos = sock_net(sk)->ipv4.sysctl_tcp_reflect_tos ?10091009+ tos = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_reflect_tos) ?10101010 (tcp_rsk(req)->syn_tos & ~INET_ECN_MASK) |10111011 (inet_sk(sk)->tos & INET_ECN_MASK) :10121012 inet_sk(sk)->tos;···15261526 /* Set ToS of the new socket based upon the value of incoming SYN.15271527 * ECT bits are set later in tcp_init_transfer().15281528 */15291529- if (sock_net(sk)->ipv4.sysctl_tcp_reflect_tos)15291529+ if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_reflect_tos))15301530 newinet->tos = tcp_rsk(req)->syn_tos & ~INET_ECN_MASK;1531153115321532 if (!dst) {
+5-5
net/ipv4/tcp_metrics.c
···329329 int m;330330331331 sk_dst_confirm(sk);332332- if (net->ipv4.sysctl_tcp_nometrics_save || !dst)332332+ if (READ_ONCE(net->ipv4.sysctl_tcp_nometrics_save) || !dst)333333 return;334334335335 rcu_read_lock();···385385386386 if (tcp_in_initial_slowstart(tp)) {387387 /* Slow start still did not finish. */388388- if (!net->ipv4.sysctl_tcp_no_ssthresh_metrics_save &&388388+ if (!READ_ONCE(net->ipv4.sysctl_tcp_no_ssthresh_metrics_save) &&389389 !tcp_metric_locked(tm, TCP_METRIC_SSTHRESH)) {390390 val = tcp_metric_get(tm, TCP_METRIC_SSTHRESH);391391 if (val && (tcp_snd_cwnd(tp) >> 1) > val)···401401 } else if (!tcp_in_slow_start(tp) &&402402 icsk->icsk_ca_state == TCP_CA_Open) {403403 /* Cong. avoidance phase, cwnd is reliable. */404404- if (!net->ipv4.sysctl_tcp_no_ssthresh_metrics_save &&404404+ if (!READ_ONCE(net->ipv4.sysctl_tcp_no_ssthresh_metrics_save) &&405405 !tcp_metric_locked(tm, TCP_METRIC_SSTHRESH))406406 tcp_metric_set(tm, TCP_METRIC_SSTHRESH,407407 max(tcp_snd_cwnd(tp) >> 1, tp->snd_ssthresh));···418418 tcp_metric_set(tm, TCP_METRIC_CWND,419419 (val + tp->snd_ssthresh) >> 1);420420 }421421- if (!net->ipv4.sysctl_tcp_no_ssthresh_metrics_save &&421421+ if (!READ_ONCE(net->ipv4.sysctl_tcp_no_ssthresh_metrics_save) &&422422 !tcp_metric_locked(tm, TCP_METRIC_SSTHRESH)) {423423 val = tcp_metric_get(tm, TCP_METRIC_SSTHRESH);424424 if (val && tp->snd_ssthresh > val)···463463 if (tcp_metric_locked(tm, TCP_METRIC_CWND))464464 tp->snd_cwnd_clamp = tcp_metric_get(tm, TCP_METRIC_CWND);465465466466- val = net->ipv4.sysctl_tcp_no_ssthresh_metrics_save ?466466+ val = READ_ONCE(net->ipv4.sysctl_tcp_no_ssthresh_metrics_save) ?467467 0 : tcp_metric_get(tm, TCP_METRIC_SSTHRESH);468468 if (val) {469469 tp->snd_ssthresh = val;
+12-15
net/ipv4/tcp_output.c
···167167 if (tcp_packets_in_flight(tp) == 0)168168 tcp_ca_event(sk, CA_EVENT_TX_START);169169170170- /* If this is the first data packet sent in response to the171171- * previous received data,172172- * and it is a reply for ato after last received packet,173173- * increase pingpong count.174174- */175175- if (before(tp->lsndtime, icsk->icsk_ack.lrcvtime) &&176176- (u32)(now - icsk->icsk_ack.lrcvtime) < icsk->icsk_ack.ato)177177- inet_csk_inc_pingpong_cnt(sk);178178-179170 tp->lsndtime = now;171171+172172+ /* If it is a reply for ato after last received173173+ * packet, enter pingpong mode.174174+ */175175+ if ((u32)(now - icsk->icsk_ack.lrcvtime) < icsk->icsk_ack.ato)176176+ inet_csk_enter_pingpong_mode(sk);180177}181178182179/* Account for an ACK we sent. */···227230 * which we interpret as a sign the remote TCP is not228231 * misinterpreting the window field as a signed quantity.229232 */230230- if (sock_net(sk)->ipv4.sysctl_tcp_workaround_signed_windows)233233+ if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_workaround_signed_windows))231234 (*rcv_wnd) = min(space, MAX_TCP_WINDOW);232235 else233236 (*rcv_wnd) = min_t(u32, space, U16_MAX);···238241 *rcv_wscale = 0;239242 if (wscale_ok) {240243 /* Set window scaling on max possible window */241241- space = max_t(u32, space, sock_net(sk)->ipv4.sysctl_tcp_rmem[2]);244244+ space = max_t(u32, space, READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_rmem[2]));242245 space = max_t(u32, space, sysctl_rmem_max);243246 space = min_t(u32, space, *window_clamp);244247 *rcv_wscale = clamp_t(int, ilog2(space) - 15,···282285 * scaled window.283286 */284287 if (!tp->rx_opt.rcv_wscale &&285285- sock_net(sk)->ipv4.sysctl_tcp_workaround_signed_windows)288288+ READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_workaround_signed_windows))286289 new_win = min(new_win, MAX_TCP_WINDOW);287290 else288291 new_win = min(new_win, (65535U << tp->rx_opt.rcv_wscale));···1973197619741977 bytes = sk->sk_pacing_rate >> READ_ONCE(sk->sk_pacing_shift);1975197819761976- r = tcp_min_rtt(tcp_sk(sk)) >> sock_net(sk)->ipv4.sysctl_tcp_tso_rtt_log;19791979+ r = tcp_min_rtt(tcp_sk(sk)) >> READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_tso_rtt_log);19771980 if (r < BITS_PER_TYPE(sk->sk_gso_max_size))19781981 bytes += sk->sk_gso_max_size >> r;19791982···1992199519931996 min_tso = ca_ops->min_tso_segs ?19941997 ca_ops->min_tso_segs(sk) :19951995- sock_net(sk)->ipv4.sysctl_tcp_min_tso_segs;19981998+ READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_min_tso_segs);1996199919972000 tso_segs = tcp_tso_autosize(sk, mss_now, min_tso);19982001 return min_t(u32, tso_segs, sk->sk_gso_max_segs);···25042507 sk->sk_pacing_rate >> READ_ONCE(sk->sk_pacing_shift));25052508 if (sk->sk_pacing_status == SK_PACING_NONE)25062509 limit = min_t(unsigned long, limit,25072507- sock_net(sk)->ipv4.sysctl_tcp_limit_output_bytes);25102510+ READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_limit_output_bytes));25082511 limit <<= factor;2509251225102513 if (static_branch_unlikely(&tcp_tx_delay_enabled) &&
+8-6
net/ipv6/mcast.c
···1522152215231523 if (++cnt >= MLD_MAX_QUEUE) {15241524 rework = true;15251525- schedule_delayed_work(&idev->mc_query_work, 0);15261525 break;15271526 }15281527 }···15321533 __mld_query_work(skb);15331534 mutex_unlock(&idev->mc_lock);1534153515351535- if (!rework)15361536- in6_dev_put(idev);15361536+ if (rework && queue_delayed_work(mld_wq, &idev->mc_query_work, 0))15371537+ return;15381538+15391539+ in6_dev_put(idev);15371540}1538154115391542/* called with rcu_read_lock() */···1625162416261625 if (++cnt >= MLD_MAX_QUEUE) {16271626 rework = true;16281628- schedule_delayed_work(&idev->mc_report_work, 0);16291627 break;16301628 }16311629 }···16351635 __mld_report_work(skb);16361636 mutex_unlock(&idev->mc_lock);1637163716381638- if (!rework)16391639- in6_dev_put(idev);16381638+ if (rework && queue_delayed_work(mld_wq, &idev->mc_report_work, 0))16391639+ return;16401640+16411641+ in6_dev_put(idev);16401642}1641164316421644static bool is_in(struct ifmcaddr6 *pmc, struct ip6_sf_list *psf, int type,
+6
net/ipv6/ping.c
···2222#include <linux/proc_fs.h>2323#include <net/ping.h>24242525+static void ping_v6_destroy(struct sock *sk)2626+{2727+ inet6_destroy_sock(sk);2828+}2929+2530/* Compatibility glue so we can support IPv6 when it's compiled as a module */2631static int dummy_ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len,2732 int *addr_len)···186181 .owner = THIS_MODULE,187182 .init = ping_init_sock,188183 .close = ping_close,184184+ .destroy = ping_v6_destroy,189185 .connect = ip6_datagram_connect_v6_only,190186 .disconnect = __udp_disconnect,191187 .setsockopt = ipv6_setsockopt,
+2-2
net/ipv6/tcp_ipv6.c
···546546 if (np->repflow && ireq->pktopts)547547 fl6->flowlabel = ip6_flowlabel(ipv6_hdr(ireq->pktopts));548548549549- tclass = sock_net(sk)->ipv4.sysctl_tcp_reflect_tos ?549549+ tclass = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_reflect_tos) ?550550 (tcp_rsk(req)->syn_tos & ~INET_ECN_MASK) |551551 (np->tclass & INET_ECN_MASK) :552552 np->tclass;···13141314 /* Set ToS of the new socket based upon the value of incoming SYN.13151315 * ECT bits are set later in tcp_init_transfer().13161316 */13171317- if (sock_net(sk)->ipv4.sysctl_tcp_reflect_tos)13171317+ if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_reflect_tos))13181318 newnp->tclass = tcp_rsk(req)->syn_tos & ~INET_ECN_MASK;1319131913201320 /* Clone native IPv6 options from listening socket (if any)
···229229 if (!sctp_ulpq_init(&asoc->ulpq, asoc))230230 goto fail_init;231231232232- if (sctp_stream_init(&asoc->stream, asoc->c.sinit_num_ostreams,233233- 0, gfp))234234- goto fail_init;232232+ if (sctp_stream_init(&asoc->stream, asoc->c.sinit_num_ostreams, 0, gfp))233233+ goto stream_free;235234236235 /* Initialize default path MTU. */237236 asoc->pathmtu = sp->pathmtu;
+3-16
net/sctp/stream.c
···137137138138 ret = sctp_stream_alloc_out(stream, outcnt, gfp);139139 if (ret)140140- goto out_err;140140+ return ret;141141142142 for (i = 0; i < stream->outcnt; i++)143143 SCTP_SO(stream, i)->state = SCTP_STREAM_OPEN;···145145handle_in:146146 sctp_stream_interleave_init(stream);147147 if (!incnt)148148- goto out;148148+ return 0;149149150150- ret = sctp_stream_alloc_in(stream, incnt, gfp);151151- if (ret)152152- goto in_err;153153-154154- goto out;155155-156156-in_err:157157- sched->free(stream);158158- genradix_free(&stream->in);159159-out_err:160160- genradix_free(&stream->out);161161- stream->outcnt = 0;162162-out:163163- return ret;150150+ return sctp_stream_alloc_in(stream, incnt, gfp);164151}165152166153int sctp_stream_init_ext(struct sctp_stream *stream, __u16 sid)
+1-1
net/sctp/stream_sched.c
···160160 if (!SCTP_SO(&asoc->stream, i)->ext)161161 continue;162162163163- ret = n->init_sid(&asoc->stream, i, GFP_KERNEL);163163+ ret = n->init_sid(&asoc->stream, i, GFP_ATOMIC);164164 if (ret)165165 goto err;166166 }
···13761376 * by tls_device_free_ctx. rx_conf and tx_conf stay in TLS_HW.13771377 * Now release the ref taken above.13781378 */13791379- if (refcount_dec_and_test(&ctx->refcount))13791379+ if (refcount_dec_and_test(&ctx->refcount)) {13801380+ /* sk_destruct ran after tls_device_down took a ref, and13811381+ * it returned early. Complete the destruction here.13821382+ */13831383+ list_del(&ctx->list);13801384 tls_device_free_ctx(ctx);13851385+ }13811386 }1382138713831388 up_write(&device_offload_lock);
+1
tools/arch/x86/include/asm/cpufeatures.h
···302302#define X86_FEATURE_RETPOLINE_LFENCE (11*32+13) /* "" Use LFENCE for Spectre variant 2 */303303#define X86_FEATURE_RETHUNK (11*32+14) /* "" Use REturn THUNK */304304#define X86_FEATURE_UNRET (11*32+15) /* "" AMD BTB untrain return */305305+#define X86_FEATURE_USE_IBPB_FW (11*32+16) /* "" Use IBPB during runtime firmware calls */305306306307/* Intel-defined CPU features, CPUID level 0x00000007:1 (EAX), word 12 */307308#define X86_FEATURE_AVX_VNNI (12*32+ 4) /* AVX VNNI instructions */
+10-1
tools/include/uapi/asm-generic/fcntl.h
···11+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */12#ifndef _ASM_GENERIC_FCNTL_H23#define _ASM_GENERIC_FCNTL_H34···91909291/* a horrid kludge trying to make sure that this will fail on old kernels */9392#define O_TMPFILE (__O_TMPFILE | O_DIRECTORY)9494-#define O_TMPFILE_MASK (__O_TMPFILE | O_DIRECTORY | O_CREAT)9393+#define O_TMPFILE_MASK (__O_TMPFILE | O_DIRECTORY | O_CREAT) 95949695#ifndef O_NDELAY9796#define O_NDELAY O_NONBLOCK···116115#define F_GETSIG 11 /* for sockets. */117116#endif118117118118+#if __BITS_PER_LONG == 32 || defined(__KERNEL__)119119#ifndef F_GETLK64120120#define F_GETLK64 12 /* using 'struct flock64' */121121#define F_SETLK64 13122122#define F_SETLKW64 14123123#endif124124+#endif /* __BITS_PER_LONG == 32 || defined(__KERNEL__) */124125125126#ifndef F_SETOWN_EX126127#define F_SETOWN_EX 15···181178 blocking */182179#define LOCK_UN 8 /* remove lock */183180181181+/*182182+ * LOCK_MAND support has been removed from the kernel. We leave the symbols183183+ * here to not break legacy builds, but these should not be used in new code.184184+ */184185#define LOCK_MAND 32 /* This is a mandatory flock ... */185186#define LOCK_READ 64 /* which allows concurrent read operations */186187#define LOCK_WRITE 128 /* which allows concurrent write operations */···192185193186#define F_LINUX_SPECIFIC_BASE 1024194187188188+#ifndef HAVE_ARCH_STRUCT_FLOCK195189struct flock {196190 short l_type;197191 short l_whence;···217209 __ARCH_FLOCK64_PAD218210#endif219211};212212+#endif /* HAVE_ARCH_STRUCT_FLOCK */220213221214#endif /* _ASM_GENERIC_FCNTL_H */
+18-16
tools/perf/scripts/python/arm-cs-trace-disasm.py
···61616262def get_offset(perf_dict, field):6363 if field in perf_dict:6464- return f"+0x{perf_dict[field]:x}"6464+ return "+%#x" % perf_dict[field]6565 return ""66666767def get_dso_file_path(dso_name, dso_build_id):···7676 else:7777 append = "/elf"78787979- dso_path = f"{os.environ['PERF_BUILDID_DIR']}/{dso_name}/{dso_build_id}{append}"7979+ dso_path = os.environ['PERF_BUILDID_DIR'] + "/" + dso_name + "/" + dso_build_id + append;8080 # Replace duplicate slash chars to single slash char8181 dso_path = dso_path.replace('//', '/', 1)8282 return dso_path···9494 start_addr = start_addr - dso_start;9595 stop_addr = stop_addr - dso_start;9696 disasm = [ options.objdump_name, "-d", "-z",9797- f"--start-address=0x{start_addr:x}",9898- f"--stop-address=0x{stop_addr:x}" ]9797+ "--start-address="+format(start_addr,"#x"),9898+ "--stop-address="+format(stop_addr,"#x") ]9999 disasm += [ dso_fname ]100100 disasm_output = check_output(disasm).decode('utf-8').split('\n')101101 disasm_cache[addr_range] = disasm_output···109109 m = disasm_re.search(line)110110 if m is None:111111 continue112112- print(f"\t{line}")112112+ print("\t" + line)113113114114def print_sample(sample):115115- print(f"Sample = {{ cpu: {sample['cpu']:04} addr: 0x{sample['addr']:016x} " \116116- f"phys_addr: 0x{sample['phys_addr']:016x} ip: 0x{sample['ip']:016x} " \117117- f"pid: {sample['pid']} tid: {sample['tid']} period: {sample['period']} time: {sample['time']} }}")115115+ print("Sample = { cpu: %04d addr: 0x%016x phys_addr: 0x%016x ip: 0x%016x " \116116+ "pid: %d tid: %d period: %d time: %d }" % \117117+ (sample['cpu'], sample['addr'], sample['phys_addr'], \118118+ sample['ip'], sample['pid'], sample['tid'], \119119+ sample['period'], sample['time']))118120119121def trace_begin():120122 print('ARM CoreSight Trace Data Assembler Dump')···133131 cpu = sample["cpu"]134132 pid = sample["pid"]135133 tid = sample["tid"]136136- return f"{comm:>16} {pid:>5}/{tid:<5} [{cpu:04}] {sec:9}.{ns:09} "134134+ return "%16s %5u/%-5u [%04u] %9u.%09u " % (comm, pid, tid, cpu, sec, ns)137135138136# This code is copied from intel-pt-events.py for printing source code139137# line and symbols.···173171 glb_line_number = line_number174172 glb_source_file_name = source_file_name175173176176- print(f"{start_str}{src_str}")174174+ print(start_str, src_str)177175178176def process_event(param_dict):179177 global cache_size···190188 symbol = get_optional(param_dict, "symbol")191189192190 if (options.verbose == True):193193- print(f"Event type: {name}")191191+ print("Event type: %s" % name)194192 print_sample(sample)195193196194 # If cannot find dso so cannot dump assembler, bail out···199197200198 # Validate dso start and end addresses201199 if ((dso_start == '[unknown]') or (dso_end == '[unknown]')):202202- print(f"Failed to find valid dso map for dso {dso}")200200+ print("Failed to find valid dso map for dso %s" % dso)203201 return204202205203 if (name[0:12] == "instructions"):···246244247245 # Handle CS_ETM_TRACE_ON packet if start_addr=0 and stop_addr=4248246 if (start_addr == 0 and stop_addr == 4):249249- print(f"CPU{cpu}: CS_ETM_TRACE_ON packet is inserted")247247+ print("CPU%d: CS_ETM_TRACE_ON packet is inserted" % cpu)250248 return251249252250 if (start_addr < int(dso_start) or start_addr > int(dso_end)):253253- print(f"Start address 0x{start_addr:x} is out of range [ 0x{dso_start:x} .. 0x{dso_end:x} ] for dso {dso}")251251+ print("Start address 0x%x is out of range [ 0x%x .. 0x%x ] for dso %s" % (start_addr, int(dso_start), int(dso_end), dso))254252 return255253256254 if (stop_addr < int(dso_start) or stop_addr > int(dso_end)):257257- print(f"Stop address 0x{stop_addr:x} is out of range [ 0x{dso_start:x} .. 0x{dso_end:x} ] for dso {dso}")255255+ print("Stop address 0x%x is out of range [ 0x%x .. 0x%x ] for dso %s" % (stop_addr, int(dso_start), int(dso_end), dso))258256 return259257260258 if (options.objdump_name != None):···269267 if path.exists(dso_fname):270268 print_disam(dso_fname, dso_vm_start, start_addr, stop_addr)271269 else:272272- print(f"Failed to find dso {dso} for address range [ 0x{start_addr:x} .. 0x{stop_addr:x} ]")270270+ print("Failed to find dso %s for address range [ 0x%x .. 0x%x ]" % (dso, start_addr, stop_addr))273271274272 print_srccode(comm, param_dict, sample, symbol, dso)
+7-11
tools/perf/util/bpf-loader.c
···6363static struct bpf_perf_object *6464bpf_perf_object__next(struct bpf_perf_object *prev)6565{6666- struct bpf_perf_object *next;6666+ if (!prev) {6767+ if (list_empty(&bpf_objects_list))6868+ return NULL;67696868- if (!prev)6969- next = list_first_entry(&bpf_objects_list,7070- struct bpf_perf_object,7171- list);7272- else7373- next = list_next_entry(prev, list);7474-7575- /* Empty list is noticed here so don't need checking on entry. */7676- if (&next->list == &bpf_objects_list)7070+ return list_first_entry(&bpf_objects_list, struct bpf_perf_object, list);7171+ }7272+ if (list_is_last(&prev->list, &bpf_objects_list))7773 return NULL;78747979- return next;7575+ return list_next_entry(prev, list);8076}81778278#define bpf_perf_object__for_each(perf_obj, tmp) \
+52-4
tools/perf/util/symbol-elf.c
···233233 return NULL;234234}235235236236+static int elf_read_program_header(Elf *elf, u64 vaddr, GElf_Phdr *phdr)237237+{238238+ size_t i, phdrnum;239239+ u64 sz;240240+241241+ if (elf_getphdrnum(elf, &phdrnum))242242+ return -1;243243+244244+ for (i = 0; i < phdrnum; i++) {245245+ if (gelf_getphdr(elf, i, phdr) == NULL)246246+ return -1;247247+248248+ if (phdr->p_type != PT_LOAD)249249+ continue;250250+251251+ sz = max(phdr->p_memsz, phdr->p_filesz);252252+ if (!sz)253253+ continue;254254+255255+ if (vaddr >= phdr->p_vaddr && (vaddr < phdr->p_vaddr + sz))256256+ return 0;257257+ }258258+259259+ /* Not found any valid program header */260260+ return -1;261261+}262262+236263static bool want_demangle(bool is_kernel_sym)237264{238265 return is_kernel_sym ? symbol_conf.demangle_kernel : symbol_conf.demangle;···12361209 sym.st_value);12371210 used_opd = true;12381211 }12121212+12391213 /*12401214 * When loading symbols in a data mapping, ABS symbols (which12411215 * has a value of SHN_ABS in its st_shndx) failed at···12541226 goto out_elf_end;1255122712561228 gelf_getshdr(sec, &shdr);12291229+12301230+ /*12311231+ * If the attribute bit SHF_ALLOC is not set, the section12321232+ * doesn't occupy memory during process execution.12331233+ * E.g. ".gnu.warning.*" section is used by linker to generate12341234+ * warnings when calling deprecated functions, the symbols in12351235+ * the section aren't loaded to memory during process execution,12361236+ * so skip them.12371237+ */12381238+ if (!(shdr.sh_flags & SHF_ALLOC))12391239+ continue;1257124012581241 secstrs = secstrs_sym;12591242···13011262 goto out_elf_end;13021263 } else if ((used_opd && runtime_ss->adjust_symbols) ||13031264 (!used_opd && syms_ss->adjust_symbols)) {12651265+ GElf_Phdr phdr;12661266+12671267+ if (elf_read_program_header(syms_ss->elf,12681268+ (u64)sym.st_value, &phdr)) {12691269+ pr_warning("%s: failed to find program header for "12701270+ "symbol: %s st_value: %#" PRIx64 "\n",12711271+ __func__, elf_name, (u64)sym.st_value);12721272+ continue;12731273+ }13041274 pr_debug4("%s: adjusting symbol: st_value: %#" PRIx64 " "13051305- "sh_addr: %#" PRIx64 " sh_offset: %#" PRIx64 "\n", __func__,13061306- (u64)sym.st_value, (u64)shdr.sh_addr,13071307- (u64)shdr.sh_offset);13081308- sym.st_value -= shdr.sh_addr - shdr.sh_offset;12751275+ "p_vaddr: %#" PRIx64 " p_offset: %#" PRIx64 "\n",12761276+ __func__, (u64)sym.st_value, (u64)phdr.p_vaddr,12771277+ (u64)phdr.p_offset);12781278+ sym.st_value -= phdr.p_vaddr - phdr.p_offset;13091279 }1310128013111281 demangled = demangle_sym(dso, kmodule, elf_name);