···731731Description:732732 [RW] If the device is registered for writeback throttling, then733733 this file shows the target minimum read latency. If this latency734734- is exceeded in a given window of time (see wb_window_usec), then734734+ is exceeded in a given window of time (see curr_win_nsec), then735735 the writeback throttling will start scaling back writes. Writing736736 a value of '0' to this file disables the feature. Writing a737737 value of '-1' to this file resets the value to the default
+1-1
Documentation/admin-guide/blockdev/zoned_loop.rst
···7979 the zone size. Default: zone size.8080conv_zones Total number of conventioanl zones starting from sector 0.8181 Default: 8.8282-base_dir Path to the base directoy where to create the directory8282+base_dir Path to the base directory where to create the directory8383 containing the zone files of the device.8484 Default=/var/local/zloop.8585 The device directory containing the zone files is always
···214214Spectre_v2 X X215215Spectre_v2_user X X * (Note 1)216216SRBDS X X X X217217-SRSO X X217217+SRSO X X X X218218SSB (Note 4)219219TAA X X X X * (Note 2)220220TSA X X X X
+6-5
Documentation/core-api/symbol-namespaces.rst
···7676within the corresponding compilation unit before the #include for7777<linux/export.h>. Typically it's placed before the first #include statement.78787979-Using the EXPORT_SYMBOL_GPL_FOR_MODULES() macro8080------------------------------------------------7979+Using the EXPORT_SYMBOL_FOR_MODULES() macro8080+-------------------------------------------81818282Symbols exported using this macro are put into a module namespace. This8383-namespace cannot be imported.8383+namespace cannot be imported. These exports are GPL-only as they are only8484+intended for in-tree modules.84858586The macro takes a comma separated list of module names, allowing only those8687modules to access this symbol. Simple tail-globs are supported.87888889For example::89909090- EXPORT_SYMBOL_GPL_FOR_MODULES(preempt_notifier_inc, "kvm,kvm-*")9191+ EXPORT_SYMBOL_FOR_MODULES(preempt_notifier_inc, "kvm,kvm-*")91929292-will limit usage of this symbol to modules whoes name matches the given9393+will limit usage of this symbol to modules whose name matches the given9394patterns.94959596How to use Symbols exported in Namespaces
+2
Documentation/networking/mptcp-sysctl.rst
···1212 resent to an MPTCP peer that has not acknowledged a previous1313 ADD_ADDR message.14141515+ Do not retransmit if set to 0.1616+1517 The default value matches TCP_RTO_MAX. This is a per-namespace1618 sysctl.1719
···102102103103ifdef CONFIG_OBJTOOL104104ifdef CONFIG_CC_HAS_ANNOTATE_TABLEJUMP105105+# The annotate-tablejump option can not be passed to LLVM backend when LTO is enabled.106106+# Ensure it is aware of linker with LTO, '--loongarch-annotate-tablejump' also needs to107107+# be passed via '-mllvm' to ld.lld.105108KBUILD_CFLAGS += -mannotate-tablejump109109+ifdef CONFIG_LTO_CLANG110110+KBUILD_LDFLAGS += -mllvm --loongarch-annotate-tablejump111111+endif106112else107113KBUILD_CFLAGS += -fno-jump-tables # keep compatibility with older compilers108114endif
···88#include <linux/module.h>99#include <linux/moduleloader.h>1010#include <linux/ftrace.h>1111+#include <linux/sort.h>11121213Elf_Addr module_emit_got_entry(struct module *mod, Elf_Shdr *sechdrs, Elf_Addr val)1314{···6261 return (Elf_Addr)&plt[nr];6362}64636565-static int is_rela_equal(const Elf_Rela *x, const Elf_Rela *y)6464+#define cmp_3way(a, b) ((a) < (b) ? -1 : (a) > (b))6565+6666+static int compare_rela(const void *x, const void *y)6667{6767- return x->r_info == y->r_info && x->r_addend == y->r_addend;6868-}6868+ int ret;6969+ const Elf_Rela *rela_x = x, *rela_y = y;69707070-static bool duplicate_rela(const Elf_Rela *rela, int idx)7171-{7272- int i;7171+ ret = cmp_3way(rela_x->r_info, rela_y->r_info);7272+ if (ret == 0)7373+ ret = cmp_3way(rela_x->r_addend, rela_y->r_addend);73747474- for (i = 0; i < idx; i++) {7575- if (is_rela_equal(&rela[i], &rela[idx]))7676- return true;7777- }7878-7979- return false;7575+ return ret;8076}81778278static void count_max_entries(Elf_Rela *relas, int num,8379 unsigned int *plts, unsigned int *gots)8480{8585- unsigned int i, type;8181+ unsigned int i;8282+8383+ sort(relas, num, sizeof(Elf_Rela), compare_rela, NULL);86848785 for (i = 0; i < num; i++) {8888- type = ELF_R_TYPE(relas[i].r_info);8989- switch (type) {8686+ if (i && !compare_rela(&relas[i-1], &relas[i]))8787+ continue;8888+8989+ switch (ELF_R_TYPE(relas[i].r_info)) {9090 case R_LARCH_SOP_PUSH_PLT_PCREL:9191 case R_LARCH_B26:9292- if (!duplicate_rela(relas, i))9393- (*plts)++;9292+ (*plts)++;9493 break;9594 case R_LARCH_GOT_PC_HI20:9696- if (!duplicate_rela(relas, i))9797- (*gots)++;9595+ (*gots)++;9896 break;9997 default:10098 break; /* Do nothing. */
+5-5
arch/loongarch/kernel/signal.c
···677677 for (i = 1; i < 32; i++)678678 err |= __put_user(regs->regs[i], &sc->sc_regs[i]);679679680680+#ifdef CONFIG_CPU_HAS_LBT681681+ if (extctx->lbt.addr)682682+ err |= protected_save_lbt_context(extctx);683683+#endif684684+680685 if (extctx->lasx.addr)681686 err |= protected_save_lasx_context(extctx);682687 else if (extctx->lsx.addr)683688 err |= protected_save_lsx_context(extctx);684689 else if (extctx->fpu.addr)685690 err |= protected_save_fpu_context(extctx);686686-687687-#ifdef CONFIG_CPU_HAS_LBT688688- if (extctx->lbt.addr)689689- err |= protected_save_lbt_context(extctx);690690-#endif691691692692 /* Set the "end" magic */693693 info = (struct sctx_info *)extctx->end.addr;
+22
arch/loongarch/kernel/time.c
···55 * Copyright (C) 2020-2022 Loongson Technology Corporation Limited66 */77#include <linux/clockchips.h>88+#include <linux/cpuhotplug.h>89#include <linux/delay.h>910#include <linux/export.h>1011#include <linux/init.h>···103102 return 0;104103}105104105105+static int arch_timer_starting(unsigned int cpu)106106+{107107+ set_csr_ecfg(ECFGF_TIMER);108108+109109+ return 0;110110+}111111+112112+static int arch_timer_dying(unsigned int cpu)113113+{114114+ constant_set_state_shutdown(this_cpu_ptr(&constant_clockevent_device));115115+116116+ /* Clear Timer Interrupt */117117+ write_csr_tintclear(CSR_TINTCLR_TI);118118+119119+ return 0;120120+}121121+106122static unsigned long get_loops_per_jiffy(void)107123{108124 unsigned long lpj = (unsigned long)const_clock_freq;···189171190172 lpj_fine = get_loops_per_jiffy();191173 pr_info("Constant clock event device register\n");174174+175175+ cpuhp_setup_state(CPUHP_AP_LOONGARCH_ARCH_TIMER_STARTING,176176+ "clockevents/loongarch/timer:starting",177177+ arch_timer_starting, arch_timer_dying);192178193179 return 0;194180}
+6-1
arch/loongarch/kvm/intc/eiointc.c
···4545 }46464747 cpu = s->sw_coremap[irq];4848- vcpu = kvm_get_vcpu(s->kvm, cpu);4848+ vcpu = kvm_get_vcpu_by_id(s->kvm, cpu);4949+ if (unlikely(vcpu == NULL)) {5050+ kvm_err("%s: invalid target cpu: %d\n", __func__, cpu);5151+ return;5252+ }5353+4954 if (level) {5055 /* if not enable return false */5156 if (!test_bit(irq, (unsigned long *)s->enable.reg_u32))
+4-4
arch/loongarch/kvm/intc/ipi.c
···9999static int send_ipi_data(struct kvm_vcpu *vcpu, gpa_t addr, uint64_t data)100100{101101 int i, idx, ret;102102- uint32_t val = 0, mask = 0;102102+ uint64_t val = 0, mask = 0;103103104104 /*105105 * Bit 27-30 is mask for byte writing.···108108 if ((data >> 27) & 0xf) {109109 /* Read the old val */110110 idx = srcu_read_lock(&vcpu->kvm->srcu);111111- ret = kvm_io_bus_read(vcpu, KVM_IOCSR_BUS, addr, sizeof(val), &val);111111+ ret = kvm_io_bus_read(vcpu, KVM_IOCSR_BUS, addr, 4, &val);112112 srcu_read_unlock(&vcpu->kvm->srcu, idx);113113 if (unlikely(ret)) {114114 kvm_err("%s: : read data from addr %llx failed\n", __func__, addr);···124124 }125125 val |= ((uint32_t)(data >> 32) & ~mask);126126 idx = srcu_read_lock(&vcpu->kvm->srcu);127127- ret = kvm_io_bus_write(vcpu, KVM_IOCSR_BUS, addr, sizeof(val), &val);127127+ ret = kvm_io_bus_write(vcpu, KVM_IOCSR_BUS, addr, 4, &val);128128 srcu_read_unlock(&vcpu->kvm->srcu, idx);129129 if (unlikely(ret))130130 kvm_err("%s: : write data to addr %llx failed\n", __func__, addr);···298298 cpu = (attr->attr >> 16) & 0x3ff;299299 addr = attr->attr & 0xff;300300301301- vcpu = kvm_get_vcpu(dev->kvm, cpu);301301+ vcpu = kvm_get_vcpu_by_id(dev->kvm, cpu);302302 if (unlikely(vcpu == NULL)) {303303 kvm_err("%s: invalid target cpu: %d\n", __func__, cpu);304304 return -EINVAL;
+10
arch/loongarch/kvm/intc/pch_pic.c
···195195 return -EINVAL;196196 }197197198198+ if (addr & (len - 1)) {199199+ kvm_err("%s: pch pic not aligned addr %llx len %d\n", __func__, addr, len);200200+ return -EINVAL;201201+ }202202+198203 /* statistics of pch pic reading */199204 vcpu->stat.pch_pic_read_exits++;200205 ret = loongarch_pch_pic_read(s, addr, len, val);···304299305300 if (!s) {306301 kvm_err("%s: pch pic irqchip not valid!\n", __func__);302302+ return -EINVAL;303303+ }304304+305305+ if (addr & (len - 1)) {306306+ kvm_err("%s: pch pic not aligned addr %llx len %d\n", __func__, addr, len);307307 return -EINVAL;308308 }309309
···371371 * executing with Secure TSC enabled, so special handling is required for372372 * accesses of MSR_IA32_TSC and MSR_AMD64_GUEST_TSC_FREQ.373373 */374374-static enum es_result __vc_handle_secure_tsc_msrs(struct pt_regs *regs, bool write)374374+static enum es_result __vc_handle_secure_tsc_msrs(struct es_em_ctxt *ctxt, bool write)375375{376376+ struct pt_regs *regs = ctxt->regs;376377 u64 tsc;377378378379 /*379379- * GUEST_TSC_FREQ should not be intercepted when Secure TSC is enabled.380380- * Terminate the SNP guest when the interception is enabled.380380+ * Writing to MSR_IA32_TSC can cause subsequent reads of the TSC to381381+ * return undefined values, and GUEST_TSC_FREQ is read-only. Generate382382+ * a #GP on all writes.383383+ */384384+ if (write) {385385+ ctxt->fi.vector = X86_TRAP_GP;386386+ ctxt->fi.error_code = 0;387387+ return ES_EXCEPTION;388388+ }389389+390390+ /*391391+ * GUEST_TSC_FREQ read should not be intercepted when Secure TSC is392392+ * enabled. Terminate the guest if a read is attempted.381393 */382394 if (regs->cx == MSR_AMD64_GUEST_TSC_FREQ)383395 return ES_VMM_ERROR;384396385385- /*386386- * Writes: Writing to MSR_IA32_TSC can cause subsequent reads of the TSC387387- * to return undefined values, so ignore all writes.388388- *389389- * Reads: Reads of MSR_IA32_TSC should return the current TSC value, use390390- * the value returned by rdtsc_ordered().391391- */392392- if (write) {393393- WARN_ONCE(1, "TSC MSR writes are verboten!\n");394394- return ES_OK;395395- }396396-397397+ /* Reads of MSR_IA32_TSC should return the current TSC value. */397398 tsc = rdtsc_ordered();398399 regs->ax = lower_32_bits(tsc);399400 regs->dx = upper_32_bits(tsc);···417416 case MSR_IA32_TSC:418417 case MSR_AMD64_GUEST_TSC_FREQ:419418 if (sev_status & MSR_AMD64_SNP_SECURE_TSC)420420- return __vc_handle_secure_tsc_msrs(regs, write);419419+ return __vc_handle_secure_tsc_msrs(ctxt, write);421420 break;422421 default:423422 break;
···386386387387 case X86_BUG_SPECTRE_V2:388388 case X86_BUG_RETBLEED:389389- case X86_BUG_SRSO:390389 case X86_BUG_L1TF:391390 case X86_BUG_ITS:392391 return cpu_attack_vector_mitigated(CPU_MITIGATE_USER_KERNEL) ||···31833184 }3184318531853186 if (srso_mitigation == SRSO_MITIGATION_AUTO) {31863186- if (should_mitigate_vuln(X86_BUG_SRSO)) {31873187+ /*31883188+ * Use safe-RET if user->kernel or guest->host protection is31893189+ * required. Otherwise the 'microcode' mitigation is sufficient31903190+ * to protect the user->user and guest->guest vectors.31913191+ */31923192+ if (cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_HOST) ||31933193+ (cpu_attack_vector_mitigated(CPU_MITIGATE_USER_KERNEL) &&31943194+ !boot_cpu_has(X86_FEATURE_SRSO_USER_KERNEL_NO))) {31873195 srso_mitigation = SRSO_MITIGATION_SAFE_RET;31963196+ } else if (cpu_attack_vector_mitigated(CPU_MITIGATE_USER_USER) ||31973197+ cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_GUEST)) {31983198+ srso_mitigation = SRSO_MITIGATION_MICROCODE;31883199 } else {31893200 srso_mitigation = SRSO_MITIGATION_NONE;31903201 return;
+10-9
arch/x86/kernel/fpu/xstate.c
···18811881#ifdef CONFIG_PROC_PID_ARCH_STATUS18821882/*18831883 * Report the amount of time elapsed in millisecond since last AVX51218841884- * use in the task.18841884+ * use in the task. Report -1 if no AVX-512 usage.18851885 */18861886static void avx512_status(struct seq_file *m, struct task_struct *task)18871887{18881888- unsigned long timestamp = READ_ONCE(x86_task_fpu(task)->avx512_timestamp);18891889- long delta;18881888+ unsigned long timestamp;18891889+ long delta = -1;1890189018911891- if (!timestamp) {18921892- /*18931893- * Report -1 if no AVX512 usage18941894- */18951895- delta = -1;18961896- } else {18911891+ /* AVX-512 usage is not tracked for kernel threads. Don't report anything. */18921892+ if (task->flags & (PF_KTHREAD | PF_USER_WORKER))18931893+ return;18941894+18951895+ timestamp = READ_ONCE(x86_task_fpu(task)->avx512_timestamp);18961896+18971897+ if (timestamp) {18971898 delta = (long)(jiffies - timestamp);18981899 /*18991900 * Cap to LONG_MAX if time difference > LONG_MAX
···20332033 goto out;20342034 }2035203520362036- if (!strstarts(ecdt_ptr->id, "\\")) {20362036+ if (!strlen(ecdt_ptr->id)) {20372037 /*20382038 * The ECDT table on some MSI notebooks contains invalid data, together20392039 * with an empty ID string ("").···20422042 * a "fully qualified reference to the (...) embedded controller device",20432043 * so this string always has to start with a backslash.20442044 *20452045- * By verifying this we can avoid such faulty ECDT tables in a safe way.20452045+ * However some ThinkBook machines have a ECDT table with a valid EC20462046+ * description but an invalid ID string ("_SB.PC00.LPCB.EC0").20472047+ *20482048+ * Because of this we only check if the ID string is empty in order to20492049+ * avoid the obvious cases.20462050 */20472047- pr_err(FW_BUG "Ignoring ECDT due to invalid ID string \"%s\"\n", ecdt_ptr->id);20512051+ pr_err(FW_BUG "Ignoring ECDT due to empty ID string\n");20482052 goto out;20492053 }20502054
+4-1
drivers/acpi/processor_perflib.c
···180180 struct acpi_processor *pr = per_cpu(processors, cpu);181181 int ret;182182183183- if (!pr || !pr->performance)183183+ if (!pr)184184 continue;185185186186 /*···196196 if (ret < 0)197197 pr_err("Failed to add freq constraint for CPU%d (%d)\n",198198 cpu, ret);199199+200200+ if (!pr->performance)201201+ continue;199202200203 ret = acpi_processor_get_platform_limit(pr);201204 if (ret)
+7-2
drivers/ata/libata-eh.c
···20752075 * Check if a link is established. This is a relaxed version of20762076 * ata_phys_link_online() which accounts for the fact that this is potentially20772077 * called after changing the link power management policy, which may not be20782078- * reflected immediately in the SSTAUS register (e.g., we may still be seeing20782078+ * reflected immediately in the SStatus register (e.g., we may still be seeing20792079 * the PHY in partial, slumber or devsleep Partial power management state.20802080 * So check that:20812081 * - A device is still present, that is, DET is 1h (Device presence detected···20892089 u32 sstatus;20902090 u8 det, ipm;2091209120922092+ /*20932093+ * For old IDE/PATA adapters that do not have a valid scr_read method,20942094+ * or if reading the SStatus register fails, assume that the device is20952095+ * present. Device probe will determine if that is really the case.20962096+ */20922097 if (sata_scr_read(link, SCR_STATUS, &sstatus))20932093- return false;20982098+ return true;2094209920952100 det = sstatus & 0x0f;20962101 ipm = (sstatus >> 8) & 0x0f;
+3-8
drivers/ata/libata-scsi.c
···39043904 /* Check cdl_ctrl */39053905 switch (buf[0] & 0x03) {39063906 case 0:39073907- /* Disable CDL if it is enabled */39083908- if (!(dev->flags & ATA_DFLAG_CDL_ENABLED))39093909- return 0;39073907+ /* Disable CDL */39103908 ata_dev_dbg(dev, "Disabling CDL\n");39113909 cdl_action = 0;39123910 dev->flags &= ~ATA_DFLAG_CDL_ENABLED;39133911 break;39143912 case 0x02:39153913 /*39163916- * Enable CDL if not already enabled. Since this is mutually39173917- * exclusive with NCQ priority, allow this only if NCQ priority39183918- * is disabled.39143914+ * Enable CDL. Since CDL is mutually exclusive with NCQ39153915+ * priority, allow this only if NCQ priority is disabled.39193916 */39203920- if (dev->flags & ATA_DFLAG_CDL_ENABLED)39213921- return 0;39223917 if (dev->flags & ATA_DFLAG_NCQ_PRIO_ENABLED) {39233918 ata_dev_err(dev,39243919 "NCQ priority must be disabled to enable CDL\n");
+6-33
drivers/block/drbd/drbd_int.h
···380380 /* this is/was a write request */381381 __EE_WRITE,382382383383+ /* hand back using mempool_free(e, drbd_buffer_page_pool) */384384+ __EE_RELEASE_TO_MEMPOOL,385385+383386 /* this is/was a write same request */384387 __EE_WRITE_SAME,385388···405402#define EE_IN_INTERVAL_TREE (1<<__EE_IN_INTERVAL_TREE)406403#define EE_SUBMITTED (1<<__EE_SUBMITTED)407404#define EE_WRITE (1<<__EE_WRITE)405405+#define EE_RELEASE_TO_MEMPOOL (1<<__EE_RELEASE_TO_MEMPOOL)408406#define EE_WRITE_SAME (1<<__EE_WRITE_SAME)409407#define EE_APPLICATION (1<<__EE_APPLICATION)410408#define EE_RS_THIN_REQ (1<<__EE_RS_THIN_REQ)···862858 struct list_head sync_ee; /* IO in progress (P_RS_DATA_REPLY gets written to disk) */863859 struct list_head done_ee; /* need to send P_WRITE_ACK */864860 struct list_head read_ee; /* [RS]P_DATA_REQUEST being read */865865- struct list_head net_ee; /* zero-copy network send in progress */866861867862 struct list_head resync_reads;868863 atomic_t pp_in_use; /* allocated from page pool */···13321329extern mempool_t drbd_request_mempool;13331330extern mempool_t drbd_ee_mempool;1334133113351335-/* drbd's page pool, used to buffer data received from the peer,13361336- * or data requested by the peer.13371337- *13381338- * This does not have an emergency reserve.13391339- *13401340- * When allocating from this pool, it first takes pages from the pool.13411341- * Only if the pool is depleted will try to allocate from the system.13421342- *13431343- * The assumption is that pages taken from this pool will be processed,13441344- * and given back, "quickly", and then can be recycled, so we can avoid13451345- * frequent calls to alloc_page(), and still will be able to make progress even13461346- * under memory pressure.13471347- */13481348-extern struct page *drbd_pp_pool;13491349-extern spinlock_t drbd_pp_lock;13501350-extern int drbd_pp_vacant;13511351-extern wait_queue_head_t drbd_pp_wait;13521352-13531332/* We also need a standard (emergency-reserve backed) page pool13541333 * for meta data IO (activity log, bitmap).13551334 * We can keep it global, as long as it is used as "N pages at a time".···13391354 */13401355#define DRBD_MIN_POOL_PAGES 12813411356extern mempool_t drbd_md_io_page_pool;13571357+extern mempool_t drbd_buffer_page_pool;1342135813431359/* We also need to make sure we get a bio13441360 * when we need it for housekeeping purposes */···14741488 sector_t, unsigned int,14751489 unsigned int,14761490 gfp_t) __must_hold(local);14771477-extern void __drbd_free_peer_req(struct drbd_device *, struct drbd_peer_request *,14781478- int);14791479-#define drbd_free_peer_req(m,e) __drbd_free_peer_req(m, e, 0)14801480-#define drbd_free_net_peer_req(m,e) __drbd_free_peer_req(m, e, 1)14911491+extern void drbd_free_peer_req(struct drbd_device *device, struct drbd_peer_request *req);14811492extern struct page *drbd_alloc_pages(struct drbd_peer_device *, unsigned int, bool);14821493extern void _drbd_clear_done_ee(struct drbd_device *device, struct list_head *to_be_freed);14831494extern int drbd_connected(struct drbd_peer_device *);···15921609#define page_chain_for_each_safe(page, n) \15931610 for (; page && ({ n = page_chain_next(page); 1; }); page = n)1594161115951595-15961596-static inline int drbd_peer_req_has_active_page(struct drbd_peer_request *peer_req)15971597-{15981598- struct page *page = peer_req->pages;15991599- page_chain_for_each(page) {16001600- if (page_count(page) > 1)16011601- return 1;16021602- }16031603- return 0;16041604-}1605161216061613static inline union drbd_state drbd_read_state(struct drbd_device *device)16071614{
+15-44
drivers/block/drbd/drbd_main.c
···114114mempool_t drbd_request_mempool;115115mempool_t drbd_ee_mempool;116116mempool_t drbd_md_io_page_pool;117117+mempool_t drbd_buffer_page_pool;117118struct bio_set drbd_md_io_bio_set;118119struct bio_set drbd_io_bio_set;119119-120120-/* I do not use a standard mempool, because:121121- 1) I want to hand out the pre-allocated objects first.122122- 2) I want to be able to interrupt sleeping allocation with a signal.123123- Note: This is a single linked list, the next pointer is the private124124- member of struct page.125125- */126126-struct page *drbd_pp_pool;127127-DEFINE_SPINLOCK(drbd_pp_lock);128128-int drbd_pp_vacant;129129-wait_queue_head_t drbd_pp_wait;130120131121DEFINE_RATELIMIT_STATE(drbd_ratelimit_state, 5 * HZ, 5);132122···16011611static int _drbd_send_zc_ee(struct drbd_peer_device *peer_device,16021612 struct drbd_peer_request *peer_req)16031613{16141614+ bool use_sendpage = !(peer_req->flags & EE_RELEASE_TO_MEMPOOL);16041615 struct page *page = peer_req->pages;16051616 unsigned len = peer_req->i.size;16061617 int err;···16101619 page_chain_for_each(page) {16111620 unsigned l = min_t(unsigned, len, PAGE_SIZE);1612162116131613- err = _drbd_send_page(peer_device, page, 0, l,16141614- page_chain_next(page) ? MSG_MORE : 0);16221622+ if (likely(use_sendpage))16231623+ err = _drbd_send_page(peer_device, page, 0, l,16241624+ page_chain_next(page) ? MSG_MORE : 0);16251625+ else16261626+ err = _drbd_no_send_page(peer_device, page, 0, l,16271627+ page_chain_next(page) ? MSG_MORE : 0);16281628+16151629 if (err)16161630 return err;16171631 len -= l;···19581962 INIT_LIST_HEAD(&device->sync_ee);19591963 INIT_LIST_HEAD(&device->done_ee);19601964 INIT_LIST_HEAD(&device->read_ee);19611961- INIT_LIST_HEAD(&device->net_ee);19621965 INIT_LIST_HEAD(&device->resync_reads);19631966 INIT_LIST_HEAD(&device->resync_work.list);19641967 INIT_LIST_HEAD(&device->unplug_work.list);···20382043 D_ASSERT(device, list_empty(&device->sync_ee));20392044 D_ASSERT(device, list_empty(&device->done_ee));20402045 D_ASSERT(device, list_empty(&device->read_ee));20412041- D_ASSERT(device, list_empty(&device->net_ee));20422046 D_ASSERT(device, list_empty(&device->resync_reads));20432047 D_ASSERT(device, list_empty(&first_peer_device(device)->connection->sender_work.q));20442048 D_ASSERT(device, list_empty(&device->resync_work.list));···2049205520502056static void drbd_destroy_mempools(void)20512057{20522052- struct page *page;20532053-20542054- while (drbd_pp_pool) {20552055- page = drbd_pp_pool;20562056- drbd_pp_pool = (struct page *)page_private(page);20572057- __free_page(page);20582058- drbd_pp_vacant--;20592059- }20602060-20612058 /* D_ASSERT(device, atomic_read(&drbd_pp_vacant)==0); */2062205920632060 bioset_exit(&drbd_io_bio_set);20642061 bioset_exit(&drbd_md_io_bio_set);20622062+ mempool_exit(&drbd_buffer_page_pool);20652063 mempool_exit(&drbd_md_io_page_pool);20662064 mempool_exit(&drbd_ee_mempool);20672065 mempool_exit(&drbd_request_mempool);···2072208620732087static int drbd_create_mempools(void)20742088{20752075- struct page *page;20762089 const int number = (DRBD_MAX_BIO_SIZE/PAGE_SIZE) * drbd_minor_count;20772077- int i, ret;20902090+ int ret;2078209120792092 /* caches */20802093 drbd_request_cache = kmem_cache_create(···21102125 if (ret)21112126 goto Enomem;2112212721282128+ ret = mempool_init_page_pool(&drbd_buffer_page_pool, number, 0);21292129+ if (ret)21302130+ goto Enomem;21312131+21132132 ret = mempool_init_slab_pool(&drbd_request_mempool, number,21142133 drbd_request_cache);21152134 if (ret)···21222133 ret = mempool_init_slab_pool(&drbd_ee_mempool, number, drbd_ee_cache);21232134 if (ret)21242135 goto Enomem;21252125-21262126- for (i = 0; i < number; i++) {21272127- page = alloc_page(GFP_HIGHUSER);21282128- if (!page)21292129- goto Enomem;21302130- set_page_private(page, (unsigned long)drbd_pp_pool);21312131- drbd_pp_pool = page;21322132- }21332133- drbd_pp_vacant = number;2134213621352137 return 0;21362138···21492169 rr = drbd_free_peer_reqs(device, &device->done_ee);21502170 if (rr)21512171 drbd_err(device, "%d EEs in done list found!\n", rr);21522152-21532153- rr = drbd_free_peer_reqs(device, &device->net_ee);21542154- if (rr)21552155- drbd_err(device, "%d EEs in net list found!\n", rr);21562172}2157217321582174/* caution. no locking. */···28382862 DRBD_MAJOR);28392863 return err;28402864 }28412841-28422842- /*28432843- * allocate all necessary structs28442844- */28452845- init_waitqueue_head(&drbd_pp_wait);2846286528472866 drbd_proc = NULL; /* play safe for drbd_cleanup */28482867 idr_init(&drbd_devices);
+31-231
drivers/block/drbd/drbd_receiver.c
···3333#include <linux/string.h>3434#include <linux/scatterlist.h>3535#include <linux/part_stat.h>3636+#include <linux/mempool.h>3637#include "drbd_int.h"3738#include "drbd_protocol.h"3839#include "drbd_req.h"···64636564#define GFP_TRY (__GFP_HIGHMEM | __GFP_NOWARN)66656767-/*6868- * some helper functions to deal with single linked page lists,6969- * page->private being our "next" pointer.7070- */7171-7272-/* If at least n pages are linked at head, get n pages off.7373- * Otherwise, don't modify head, and return NULL.7474- * Locking is the responsibility of the caller.7575- */7676-static struct page *page_chain_del(struct page **head, int n)7777-{7878- struct page *page;7979- struct page *tmp;8080-8181- BUG_ON(!n);8282- BUG_ON(!head);8383-8484- page = *head;8585-8686- if (!page)8787- return NULL;8888-8989- while (page) {9090- tmp = page_chain_next(page);9191- if (--n == 0)9292- break; /* found sufficient pages */9393- if (tmp == NULL)9494- /* insufficient pages, don't use any of them. */9595- return NULL;9696- page = tmp;9797- }9898-9999- /* add end of list marker for the returned list */100100- set_page_private(page, 0);101101- /* actual return value, and adjustment of head */102102- page = *head;103103- *head = tmp;104104- return page;105105-}106106-107107-/* may be used outside of locks to find the tail of a (usually short)108108- * "private" page chain, before adding it back to a global chain head109109- * with page_chain_add() under a spinlock. */110110-static struct page *page_chain_tail(struct page *page, int *len)111111-{112112- struct page *tmp;113113- int i = 1;114114- while ((tmp = page_chain_next(page))) {115115- ++i;116116- page = tmp;117117- }118118- if (len)119119- *len = i;120120- return page;121121-}122122-123123-static int page_chain_free(struct page *page)124124-{125125- struct page *tmp;126126- int i = 0;127127- page_chain_for_each_safe(page, tmp) {128128- put_page(page);129129- ++i;130130- }131131- return i;132132-}133133-134134-static void page_chain_add(struct page **head,135135- struct page *chain_first, struct page *chain_last)136136-{137137-#if 1138138- struct page *tmp;139139- tmp = page_chain_tail(chain_first, NULL);140140- BUG_ON(tmp != chain_last);141141-#endif142142-143143- /* add chain to head */144144- set_page_private(chain_last, (unsigned long)*head);145145- *head = chain_first;146146-}147147-148148-static struct page *__drbd_alloc_pages(struct drbd_device *device,149149- unsigned int number)6666+static struct page *__drbd_alloc_pages(unsigned int number)15067{15168 struct page *page = NULL;15269 struct page *tmp = NULL;15370 unsigned int i = 0;15471155155- /* Yes, testing drbd_pp_vacant outside the lock is racy.156156- * So what. It saves a spin_lock. */157157- if (drbd_pp_vacant >= number) {158158- spin_lock(&drbd_pp_lock);159159- page = page_chain_del(&drbd_pp_pool, number);160160- if (page)161161- drbd_pp_vacant -= number;162162- spin_unlock(&drbd_pp_lock);163163- if (page)164164- return page;165165- }166166-16772 /* GFP_TRY, because we must not cause arbitrary write-out: in a DRBD16873 * "criss-cross" setup, that might cause write-out on some other DRBD,16974 * which in turn might block on the other node at this very place. */17075 for (i = 0; i < number; i++) {171171- tmp = alloc_page(GFP_TRY);7676+ tmp = mempool_alloc(&drbd_buffer_page_pool, GFP_TRY);17277 if (!tmp)173173- break;7878+ goto fail;17479 set_page_private(tmp, (unsigned long)page);17580 page = tmp;17681 }177177-178178- if (i == number)179179- return page;180180-181181- /* Not enough pages immediately available this time.182182- * No need to jump around here, drbd_alloc_pages will retry this183183- * function "soon". */184184- if (page) {185185- tmp = page_chain_tail(page, NULL);186186- spin_lock(&drbd_pp_lock);187187- page_chain_add(&drbd_pp_pool, page, tmp);188188- drbd_pp_vacant += i;189189- spin_unlock(&drbd_pp_lock);8282+ return page;8383+fail:8484+ page_chain_for_each_safe(page, tmp) {8585+ set_page_private(page, 0);8686+ mempool_free(page, &drbd_buffer_page_pool);19087 }19188 return NULL;192192-}193193-194194-static void reclaim_finished_net_peer_reqs(struct drbd_device *device,195195- struct list_head *to_be_freed)196196-{197197- struct drbd_peer_request *peer_req, *tmp;198198-199199- /* The EEs are always appended to the end of the list. Since200200- they are sent in order over the wire, they have to finish201201- in order. As soon as we see the first not finished we can202202- stop to examine the list... */203203-204204- list_for_each_entry_safe(peer_req, tmp, &device->net_ee, w.list) {205205- if (drbd_peer_req_has_active_page(peer_req))206206- break;207207- list_move(&peer_req->w.list, to_be_freed);208208- }209209-}210210-211211-static void drbd_reclaim_net_peer_reqs(struct drbd_device *device)212212-{213213- LIST_HEAD(reclaimed);214214- struct drbd_peer_request *peer_req, *t;215215-216216- spin_lock_irq(&device->resource->req_lock);217217- reclaim_finished_net_peer_reqs(device, &reclaimed);218218- spin_unlock_irq(&device->resource->req_lock);219219- list_for_each_entry_safe(peer_req, t, &reclaimed, w.list)220220- drbd_free_net_peer_req(device, peer_req);221221-}222222-223223-static void conn_reclaim_net_peer_reqs(struct drbd_connection *connection)224224-{225225- struct drbd_peer_device *peer_device;226226- int vnr;227227-228228- rcu_read_lock();229229- idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {230230- struct drbd_device *device = peer_device->device;231231- if (!atomic_read(&device->pp_in_use_by_net))232232- continue;233233-234234- kref_get(&device->kref);235235- rcu_read_unlock();236236- drbd_reclaim_net_peer_reqs(device);237237- kref_put(&device->kref, drbd_destroy_device);238238- rcu_read_lock();239239- }240240- rcu_read_unlock();24189}2429024391/**···113263 bool retry)114264{115265 struct drbd_device *device = peer_device->device;116116- struct page *page = NULL;266266+ struct page *page;117267 struct net_conf *nc;118118- DEFINE_WAIT(wait);119268 unsigned int mxb;120269121270 rcu_read_lock();···122273 mxb = nc ? nc->max_buffers : 1000000;123274 rcu_read_unlock();124275125125- if (atomic_read(&device->pp_in_use) < mxb)126126- page = __drbd_alloc_pages(device, number);127127-128128- /* Try to keep the fast path fast, but occasionally we need129129- * to reclaim the pages we lended to the network stack. */130130- if (page && atomic_read(&device->pp_in_use_by_net) > 512)131131- drbd_reclaim_net_peer_reqs(device);132132-133133- while (page == NULL) {134134- prepare_to_wait(&drbd_pp_wait, &wait, TASK_INTERRUPTIBLE);135135-136136- drbd_reclaim_net_peer_reqs(device);137137-138138- if (atomic_read(&device->pp_in_use) < mxb) {139139- page = __drbd_alloc_pages(device, number);140140- if (page)141141- break;142142- }143143-144144- if (!retry)145145- break;146146-147147- if (signal_pending(current)) {148148- drbd_warn(device, "drbd_alloc_pages interrupted!\n");149149- break;150150- }151151-152152- if (schedule_timeout(HZ/10) == 0)153153- mxb = UINT_MAX;154154- }155155- finish_wait(&drbd_pp_wait, &wait);276276+ if (atomic_read(&device->pp_in_use) >= mxb)277277+ schedule_timeout_interruptible(HZ / 10);278278+ page = __drbd_alloc_pages(number);156279157280 if (page)158281 atomic_add(number, &device->pp_in_use);···135314 * Is also used from inside an other spin_lock_irq(&resource->req_lock);136315 * Either links the page chain back to the global pool,137316 * or returns all pages to the system. */138138-static void drbd_free_pages(struct drbd_device *device, struct page *page, int is_net)317317+static void drbd_free_pages(struct drbd_device *device, struct page *page)139318{140140- atomic_t *a = is_net ? &device->pp_in_use_by_net : &device->pp_in_use;141141- int i;319319+ struct page *tmp;320320+ int i = 0;142321143322 if (page == NULL)144323 return;145324146146- if (drbd_pp_vacant > (DRBD_MAX_BIO_SIZE/PAGE_SIZE) * drbd_minor_count)147147- i = page_chain_free(page);148148- else {149149- struct page *tmp;150150- tmp = page_chain_tail(page, &i);151151- spin_lock(&drbd_pp_lock);152152- page_chain_add(&drbd_pp_pool, page, tmp);153153- drbd_pp_vacant += i;154154- spin_unlock(&drbd_pp_lock);325325+ page_chain_for_each_safe(page, tmp) {326326+ set_page_private(page, 0);327327+ if (page_count(page) == 1)328328+ mempool_free(page, &drbd_buffer_page_pool);329329+ else330330+ put_page(page);331331+ i++;155332 }156156- i = atomic_sub_return(i, a);333333+ i = atomic_sub_return(i, &device->pp_in_use);157334 if (i < 0)158158- drbd_warn(device, "ASSERTION FAILED: %s: %d < 0\n",159159- is_net ? "pp_in_use_by_net" : "pp_in_use", i);160160- wake_up(&drbd_pp_wait);335335+ drbd_warn(device, "ASSERTION FAILED: pp_in_use: %d < 0\n", i);161336}162337163338/*···197380 gfpflags_allow_blocking(gfp_mask));198381 if (!page)199382 goto fail;383383+ if (!mempool_is_saturated(&drbd_buffer_page_pool))384384+ peer_req->flags |= EE_RELEASE_TO_MEMPOOL;200385 }201386202387 memset(peer_req, 0, sizeof(*peer_req));···222403 return NULL;223404}224405225225-void __drbd_free_peer_req(struct drbd_device *device, struct drbd_peer_request *peer_req,226226- int is_net)406406+void drbd_free_peer_req(struct drbd_device *device, struct drbd_peer_request *peer_req)227407{228408 might_sleep();229409 if (peer_req->flags & EE_HAS_DIGEST)230410 kfree(peer_req->digest);231231- drbd_free_pages(device, peer_req->pages, is_net);411411+ drbd_free_pages(device, peer_req->pages);232412 D_ASSERT(device, atomic_read(&peer_req->pending_bios) == 0);233413 D_ASSERT(device, drbd_interval_empty(&peer_req->i));234414 if (!expect(device, !(peer_req->flags & EE_CALL_AL_COMPLETE_IO))) {···242424 LIST_HEAD(work_list);243425 struct drbd_peer_request *peer_req, *t;244426 int count = 0;245245- int is_net = list == &device->net_ee;246427247428 spin_lock_irq(&device->resource->req_lock);248429 list_splice_init(list, &work_list);249430 spin_unlock_irq(&device->resource->req_lock);250431251432 list_for_each_entry_safe(peer_req, t, &work_list, w.list) {252252- __drbd_free_peer_req(device, peer_req, is_net);433433+ drbd_free_peer_req(device, peer_req);253434 count++;254435 }255436 return count;···260443static int drbd_finish_peer_reqs(struct drbd_device *device)261444{262445 LIST_HEAD(work_list);263263- LIST_HEAD(reclaimed);264446 struct drbd_peer_request *peer_req, *t;265447 int err = 0;266448267449 spin_lock_irq(&device->resource->req_lock);268268- reclaim_finished_net_peer_reqs(device, &reclaimed);269450 list_splice_init(&device->done_ee, &work_list);270451 spin_unlock_irq(&device->resource->req_lock);271271-272272- list_for_each_entry_safe(peer_req, t, &reclaimed, w.list)273273- drbd_free_net_peer_req(device, peer_req);274452275453 /* possible callbacks here:276454 * e_end_block, and e_end_resync_block, e_send_superseded.···17871975 data_size -= len;17881976 }17891977 kunmap(page);17901790- drbd_free_pages(peer_device->device, page, 0);19781978+ drbd_free_pages(peer_device->device, page);17911979 return err;17921980}17931981···50365224 put_ldev(device);50375225 }5038522650395039- /* tcp_close and release of sendpage pages can be deferred. I don't50405040- * want to use SO_LINGER, because apparently it can be deferred for50415041- * more than 20 seconds (longest time I checked).50425042- *50435043- * Actually we don't care for exactly when the network stack does its50445044- * put_page(), but release our reference on these pages right here.50455045- */50465046- i = drbd_free_peer_reqs(device, &device->net_ee);50475047- if (i)50485048- drbd_info(device, "net_ee not empty, killed %u entries\n", i);50495227 i = atomic_read(&device->pp_in_use_by_net);50505228 if (i)50515229 drbd_info(device, "pp_in_use_by_net = %d, expected 0\n", i);···5781597957825980 while (get_t_state(thi) == RUNNING) {57835981 drbd_thread_current_set_cpu(thi);57845784-57855785- conn_reclaim_net_peer_reqs(connection);5786598257875983 if (test_and_clear_bit(SEND_PING, &connection->flags)) {57885984 if (drbd_send_ping(connection)) {
+18-38
drivers/block/drbd/drbd_worker.c
···10301030 return 1;10311031}1032103210331033-/* helper */10341034-static void move_to_net_ee_or_free(struct drbd_device *device, struct drbd_peer_request *peer_req)10351035-{10361036- if (drbd_peer_req_has_active_page(peer_req)) {10371037- /* This might happen if sendpage() has not finished */10381038- int i = PFN_UP(peer_req->i.size);10391039- atomic_add(i, &device->pp_in_use_by_net);10401040- atomic_sub(i, &device->pp_in_use);10411041- spin_lock_irq(&device->resource->req_lock);10421042- list_add_tail(&peer_req->w.list, &device->net_ee);10431043- spin_unlock_irq(&device->resource->req_lock);10441044- wake_up(&drbd_pp_wait);10451045- } else10461046- drbd_free_peer_req(device, peer_req);10471047-}10481048-10491033/**10501034 * w_e_end_data_req() - Worker callback, to send a P_DATA_REPLY packet in response to a P_DATA_REQUEST10511035 * @w: work object.···10431059 int err;1044106010451061 if (unlikely(cancel)) {10461046- drbd_free_peer_req(device, peer_req);10471047- dec_unacked(device);10481048- return 0;10621062+ err = 0;10631063+ goto out;10491064 }1050106510511066 if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) {···10571074 err = drbd_send_ack(peer_device, P_NEG_DREPLY, peer_req);10581075 }1059107610601060- dec_unacked(device);10611061-10621062- move_to_net_ee_or_free(device, peer_req);10631063-10641077 if (unlikely(err))10651078 drbd_err(device, "drbd_send_block() failed\n");10791079+out:10801080+ dec_unacked(device);10811081+ drbd_free_peer_req(device, peer_req);10821082+10661083 return err;10671084}10681085···11031120 int err;1104112111051122 if (unlikely(cancel)) {11061106- drbd_free_peer_req(device, peer_req);11071107- dec_unacked(device);11081108- return 0;11231123+ err = 0;11241124+ goto out;11091125 }1110112611111127 if (get_ldev_if_state(device, D_FAILED)) {···11371155 /* update resync data with failure */11381156 drbd_rs_failed_io(peer_device, peer_req->i.sector, peer_req->i.size);11391157 }11401140-11411141- dec_unacked(device);11421142-11431143- move_to_net_ee_or_free(device, peer_req);11441144-11451158 if (unlikely(err))11461159 drbd_err(device, "drbd_send_block() failed\n");11601160+out:11611161+ dec_unacked(device);11621162+ drbd_free_peer_req(device, peer_req);11631163+11471164 return err;11481165}11491166···11571176 int err, eq = 0;1158117711591178 if (unlikely(cancel)) {11601160- drbd_free_peer_req(device, peer_req);11611161- dec_unacked(device);11621162- return 0;11791179+ err = 0;11801180+ goto out;11631181 }1164118211651183 if (get_ldev(device)) {···12001220 if (drbd_ratelimit())12011221 drbd_err(device, "Sending NegDReply. I guess it gets messy.\n");12021222 }12031203-12041204- dec_unacked(device);12051205- move_to_net_ee_or_free(device, peer_req);12061206-12071223 if (unlikely(err))12081224 drbd_err(device, "drbd_send_block/ack() failed\n");12251225+out:12261226+ dec_unacked(device);12271227+ drbd_free_peer_req(device, peer_req);12281228+12091229 return err;12101230}12111231
+12-16
drivers/block/ublk_drv.c
···235235236236 struct completion completion;237237 unsigned int nr_queues_ready;238238- unsigned int nr_privileged_daemon;238238+ bool unprivileged_daemons;239239 struct mutex cancel_mutex;240240 bool canceling;241241 pid_t ublksrv_tgid;···13891389{13901390 blk_status_t res;1391139113921392- if (unlikely(ubq->fail_io))13921392+ if (unlikely(READ_ONCE(ubq->fail_io)))13931393 return BLK_STS_TARGET;1394139413951395 /* With recovery feature enabled, force_abort is set in···14011401 * Note: force_abort is guaranteed to be seen because it is set14021402 * before request queue is unqiuesced.14031403 */14041404- if (ublk_nosrv_should_queue_io(ubq) && unlikely(ubq->force_abort))14041404+ if (ublk_nosrv_should_queue_io(ubq) &&14051405+ unlikely(READ_ONCE(ubq->force_abort)))14051406 return BLK_STS_IOERR;1406140714071408 if (check_cancel && unlikely(ubq->canceling))···15511550 /* set to NULL, otherwise new tasks cannot mmap io_cmd_buf */15521551 ub->mm = NULL;15531552 ub->nr_queues_ready = 0;15541554- ub->nr_privileged_daemon = 0;15531553+ ub->unprivileged_daemons = false;15551554 ub->ublksrv_tgid = -1;15561555}15571556···16451644 * Transition the device to the nosrv state. What exactly this16461645 * means depends on the recovery flags16471646 */16481648- blk_mq_quiesce_queue(disk->queue);16491647 if (ublk_nosrv_should_stop_dev(ub)) {16501648 /*16511649 * Allow any pending/future I/O to pass through quickly···16521652 * waits for all pending I/O to complete16531653 */16541654 for (i = 0; i < ub->dev_info.nr_hw_queues; i++)16551655- ublk_get_queue(ub, i)->force_abort = true;16561656- blk_mq_unquiesce_queue(disk->queue);16551655+ WRITE_ONCE(ublk_get_queue(ub, i)->force_abort, true);1657165616581657 ublk_stop_dev_unlocked(ub);16591658 } else {···16621663 } else {16631664 ub->dev_info.state = UBLK_S_DEV_FAIL_IO;16641665 for (i = 0; i < ub->dev_info.nr_hw_queues; i++)16651665- ublk_get_queue(ub, i)->fail_io = true;16661666+ WRITE_ONCE(ublk_get_queue(ub, i)->fail_io, true);16661667 }16671667- blk_mq_unquiesce_queue(disk->queue);16681668 }16691669unlock:16701670 mutex_unlock(&ub->mutex);···19781980 __must_hold(&ub->mutex)19791981{19801982 ubq->nr_io_ready++;19811981- if (ublk_queue_ready(ubq)) {19831983+ if (ublk_queue_ready(ubq))19821984 ub->nr_queues_ready++;19831983-19841984- if (capable(CAP_SYS_ADMIN))19851985- ub->nr_privileged_daemon++;19861986- }19851985+ if (!ub->unprivileged_daemons && !capable(CAP_SYS_ADMIN))19861986+ ub->unprivileged_daemons = true;1987198719881988 if (ub->nr_queues_ready == ub->dev_info.nr_hw_queues) {19891989 /* now we are ready for handling ublk io request */···2876288028772881 ublk_apply_params(ub);2878288228792879- /* don't probe partitions if any one ubq daemon is un-trusted */28802880- if (ub->nr_privileged_daemon != ub->nr_queues_ready)28832883+ /* don't probe partitions if any daemon task is un-trusted */28842884+ if (ub->unprivileged_daemons)28812885 set_bit(GD_SUPPRESS_PART_SCAN, &disk->state);2882288628832887 ublk_get_device(ub);
+1-6
drivers/bluetooth/btmtk.c
···642642 * WMT command.643643 */644644 err = wait_on_bit_timeout(&data->flags, BTMTK_TX_WAIT_VND_EVT,645645- TASK_INTERRUPTIBLE, HCI_INIT_TIMEOUT);646646- if (err == -EINTR) {647647- bt_dev_err(hdev, "Execution of wmt command interrupted");648648- clear_bit(BTMTK_TX_WAIT_VND_EVT, &data->flags);649649- goto err_free_wc;650650- }645645+ TASK_UNINTERRUPTIBLE, HCI_INIT_TIMEOUT);651646652647 if (err) {653648 bt_dev_err(hdev, "Execution of wmt command timed out");
+4-4
drivers/bluetooth/btnxpuart.c
···543543 }544544545545 if (psdata->wakeup_source) {546546- ret = devm_request_irq(&serdev->dev, psdata->irq_handler,547547- ps_host_wakeup_irq_handler,548548- IRQF_ONESHOT | IRQF_TRIGGER_FALLING,549549- dev_name(&serdev->dev), nxpdev);546546+ ret = devm_request_threaded_irq(&serdev->dev, psdata->irq_handler,547547+ NULL, ps_host_wakeup_irq_handler,548548+ IRQF_ONESHOT,549549+ dev_name(&serdev->dev), nxpdev);550550 if (ret)551551 bt_dev_info(hdev, "error setting wakeup IRQ handler, ignoring\n");552552 disable_irq(psdata->irq_handler);
···8888 }89899090 r = amdgpu_vm_bo_map(adev, *bo_va, csa_addr, 0, size,9191- AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE |9292- AMDGPU_PTE_EXECUTABLE);9191+ AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_PAGE_WRITEABLE |9292+ AMDGPU_VM_PAGE_EXECUTABLE);93939494 if (r) {9595 DRM_ERROR("failed to do bo_map on static CSA, err=%d\n", r);
+16-3
drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
···10391039{10401040 int ret;10411041 uint64_t reserv_addr, reserv_addr_ext;10421042- uint32_t reserv_size, reserv_size_ext;10421042+ uint32_t reserv_size, reserv_size_ext, mp0_ip_ver;10431043 struct amdgpu_device *adev = psp->adev;10441044+10451045+ mp0_ip_ver = amdgpu_ip_version(adev, MP0_HWIP, 0);1044104610451047 if (amdgpu_sriov_vf(psp->adev))10461048 return 0;1047104910481048- if ((amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(14, 0, 2)) &&10491049- (amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(14, 0, 3)))10501050+ switch (mp0_ip_ver) {10511051+ case IP_VERSION(14, 0, 2):10521052+ if (adev->psp.sos.fw_version < 0x3b0e0d)10531053+ return 0;10541054+ break;10551055+10561056+ case IP_VERSION(14, 0, 3):10571057+ if (adev->psp.sos.fw_version < 0x3a0e14)10581058+ return 0;10591059+ break;10601060+10611061+ default:10501062 return 0;10631063+ }1051106410521065 ret = psp_get_fw_reservation_info(psp, GFX_CMD_ID_FB_FW_RESERV_ADDR, &reserv_addr, &reserv_size);10531066 if (ret)
+11-4
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
···654654 * Check if all VM PDs/PTs are ready for updates655655 *656656 * Returns:657657- * True if VM is not evicting.657657+ * True if VM is not evicting and all VM entities are not stopped658658 */659659bool amdgpu_vm_ready(struct amdgpu_vm *vm)660660{661661- bool empty;662661 bool ret;663662664663 amdgpu_vm_eviction_lock(vm);···665666 amdgpu_vm_eviction_unlock(vm);666667667668 spin_lock(&vm->status_lock);668668- empty = list_empty(&vm->evicted);669669+ ret &= list_empty(&vm->evicted);669670 spin_unlock(&vm->status_lock);670671671671- return ret && empty;672672+ spin_lock(&vm->immediate.lock);673673+ ret &= !vm->immediate.stopped;674674+ spin_unlock(&vm->immediate.lock);675675+676676+ spin_lock(&vm->delayed.lock);677677+ ret &= !vm->delayed.stopped;678678+ spin_unlock(&vm->delayed.lock);679679+680680+ return ret;672681}673682674683/**
···12271227/**12281228 * drm_bridge_detect - check if anything is attached to the bridge output12291229 * @bridge: bridge control structure12301230+ * @connector: attached connector12301231 *12311232 * If the bridge supports output detection, as reported by the12321233 * DRM_BRIDGE_OP_DETECT bridge ops flag, call &drm_bridge_funcs.detect for the
···332332 int ret = 0;333333 u32 reg_val, max;334334 struct xe_reg rapl_limit;335335+ u64 max_supp_power_limit = 0;335336336337 mutex_lock(&hwmon->hwmon_lock);337338···355354 ret = -EOPNOTSUPP;356355 }357356 goto unlock;357357+ }358358+359359+ /*360360+ * If the sysfs value exceeds the maximum pcode supported power limit value, clamp it to361361+ * the supported maximum (U12.3 format).362362+ * This is to avoid truncation during reg_val calculation below and ensure the valid363363+ * power limit is sent for pcode which would clamp it to card-supported value.364364+ */365365+ max_supp_power_limit = ((PWR_LIM_VAL) >> hwmon->scl_shift_power) * SF_POWER;366366+ if (value > max_supp_power_limit) {367367+ value = max_supp_power_limit;368368+ drm_info(&hwmon->xe->drm,369369+ "Power limit clamped as selected %s exceeds channel %d limit\n",370370+ PWR_ATTR_TO_STR(attr), channel);358371 }359372360373 /* Computation in 64-bits to avoid overflow. Round to nearest. */···754739{755740 int ret;756741 u32 uval;742742+ u64 max_crit_power_curr = 0;757743758744 mutex_lock(&hwmon->hwmon_lock);759745746746+ /*747747+ * If the sysfs value exceeds the pcode mailbox cmd POWER_SETUP_SUBCOMMAND_WRITE_I1748748+ * max supported value, clamp it to the command's max (U10.6 format).749749+ * This is to avoid truncation during uval calculation below and ensure the valid power750750+ * limit is sent for pcode which would clamp it to card-supported value.751751+ */752752+ max_crit_power_curr = (POWER_SETUP_I1_DATA_MASK >> POWER_SETUP_I1_SHIFT) * scale_factor;753753+ if (value > max_crit_power_curr) {754754+ value = max_crit_power_curr;755755+ drm_info(&hwmon->xe->drm,756756+ "Power limit clamped as selected exceeds channel %d limit\n",757757+ channel);758758+ }760759 uval = DIV_ROUND_CLOSEST_ULL(value << POWER_SETUP_I1_SHIFT, scale_factor);761760 ret = xe_hwmon_pcode_write_i1(hwmon, uval);762761
+27-15
drivers/gpu/drm/xe/xe_migrate.c
···18201820 if (!IS_ALIGNED(len, XE_CACHELINE_BYTES) ||18211821 !IS_ALIGNED((unsigned long)buf + offset, XE_CACHELINE_BYTES)) {18221822 int buf_offset = 0;18231823+ void *bounce;18241824+ int err;18251825+18261826+ BUILD_BUG_ON(!is_power_of_2(XE_CACHELINE_BYTES));18271827+ bounce = kmalloc(XE_CACHELINE_BYTES, GFP_KERNEL);18281828+ if (!bounce)18291829+ return -ENOMEM;1823183018241831 /*18251832 * Less than ideal for large unaligned access but this should be18261833 * fairly rare, can fixup if this becomes common.18271834 */18281835 do {18291829- u8 bounce[XE_CACHELINE_BYTES];18301830- void *ptr = (void *)bounce;18311831- int err;18321836 int copy_bytes = min_t(int, bytes_left,18331837 XE_CACHELINE_BYTES -18341838 (offset & XE_CACHELINE_MASK));···18411837 err = xe_migrate_access_memory(m, bo,18421838 offset &18431839 ~XE_CACHELINE_MASK,18441844- (void *)ptr,18451845- sizeof(bounce), 0);18401840+ bounce,18411841+ XE_CACHELINE_BYTES, 0);18461842 if (err)18471847- return err;18431843+ break;1848184418491845 if (write) {18501850- memcpy(ptr + ptr_offset, buf + buf_offset, copy_bytes);18461846+ memcpy(bounce + ptr_offset, buf + buf_offset, copy_bytes);1851184718521848 err = xe_migrate_access_memory(m, bo,18531849 offset & ~XE_CACHELINE_MASK,18541854- (void *)ptr,18551855- sizeof(bounce), write);18501850+ bounce,18511851+ XE_CACHELINE_BYTES, write);18561852 if (err)18571857- return err;18531853+ break;18581854 } else {18591859- memcpy(buf + buf_offset, ptr + ptr_offset,18551855+ memcpy(buf + buf_offset, bounce + ptr_offset,18601856 copy_bytes);18611857 }18621858···18651861 offset += copy_bytes;18661862 } while (bytes_left);1867186318681868- return 0;18641864+ kfree(bounce);18651865+ return err;18691866 }1870186718711868 dma_addr = xe_migrate_dma_map(xe, buf, len + page_offset, write);···18871882 else18881883 current_bytes = min_t(int, bytes_left, cursor.size);1889188418901890- if (fence)18911891- dma_fence_put(fence);18851885+ if (current_bytes & ~PAGE_MASK) {18861886+ int pitch = 4;18871887+18881888+ current_bytes = min_t(int, current_bytes, S16_MAX * pitch);18891889+ }1892189018931891 __fence = xe_migrate_vram(m, current_bytes,18941892 (unsigned long)buf & ~PAGE_MASK,···19001892 XE_MIGRATE_COPY_TO_VRAM :19011893 XE_MIGRATE_COPY_TO_SRAM);19021894 if (IS_ERR(__fence)) {19031903- if (fence)18951895+ if (fence) {19041896 dma_fence_wait(fence, false);18971897+ dma_fence_put(fence);18981898+ }19051899 fence = __fence;19061900 goto out_err;19071901 }19021902+19031903+ dma_fence_put(fence);19081904 fence = __fence;1909190519101906 buf += current_bytes;
···24572457 dev->dev_ops->cfg_port_member(dev, i, val | cpu_port);24582458 }2459245924602460+ /* HSR ports are setup once so need to use the assigned membership24612461+ * when the port is enabled.24622462+ */24632463+ if (!port_member && p->stp_state == BR_STATE_FORWARDING &&24642464+ (dev->hsr_ports & BIT(port)))24652465+ port_member = dev->hsr_ports;24602466 dev->dev_ops->cfg_port_member(dev, port, port_member | cpu_port);24612467}24622468
···53365336{53375337 int i;5338533853395339- netdev_assert_locked(bp->dev);53395339+ netdev_assert_locked_or_invisible(bp->dev);5340534053415341 /* Under netdev instance lock and all our NAPIs have been disabled.53425342 * It's safe to delete the hash table.
···606606 if (!npc_check_field(rvu, blkaddr, NPC_LB, intf))607607 *features &= ~BIT_ULL(NPC_OUTER_VID);608608609609- /* Set SPI flag only if AH/ESP and IPSEC_SPI are in the key */610610- if (npc_check_field(rvu, blkaddr, NPC_IPSEC_SPI, intf) &&609609+ /* Allow extracting SPI field from AH and ESP headers at same offset */610610+ if (npc_is_field_present(rvu, NPC_IPSEC_SPI, intf) &&611611 (*features & (BIT_ULL(NPC_IPPROTO_ESP) | BIT_ULL(NPC_IPPROTO_AH))))612612 *features |= BIT_ULL(NPC_IPSEC_SPI);613613
+2
drivers/net/ethernet/mediatek/mtk_ppe_offload.c
···101101 if (!IS_ENABLED(CONFIG_NET_MEDIATEK_SOC_WED))102102 return -1;103103104104+ rcu_read_lock();104105 err = dev_fill_forward_path(dev, addr, &stack);106106+ rcu_read_unlock();105107 if (err)106108 return err;107109
···380380 refcount_t refcnt;381381 u32 root_tsar_ix;382382 struct mlx5_qos_domain *domain;383383- /* Contains all vports with QoS enabled but no explicit node.384384- * Cannot be NULL if QoS is enabled, but may be a fake node385385- * referencing the root TSAR if the esw doesn't support nodes.386386- */387387- struct mlx5_esw_sched_node *node0;388383 } qos;389384390385 struct mlx5_esw_bridge_offloads *br_offloads;
···85858686 ret = mlx5hws_table_create_default_ft(tbl->ctx->mdev,8787 tbl,8888+ 0,8889 &matcher->end_ft_id);8990 if (ret) {9091 mlx5hws_err(tbl->ctx, "Isolated matcher: failed to create end flow table\n");···113112 if (mlx5hws_matcher_is_isolated(matcher))114113 ret = hws_matcher_create_end_ft_isolated(matcher);115114 else116116- ret = mlx5hws_table_create_default_ft(tbl->ctx->mdev, tbl,115115+ ret = mlx5hws_table_create_default_ft(tbl->ctx->mdev,116116+ tbl,117117+ 0,117118 &matcher->end_ft_id);118119119120 if (ret) {
···3232/* MAC Specific Addr 1 Top Reg */3333#define LAN865X_REG_MAC_H_SADDR1 0x0001002334343535+/* MAC TSU Timer Increment Register */3636+#define LAN865X_REG_MAC_TSU_TIMER_INCR 0x000100773737+#define MAC_TSU_TIMER_INCR_COUNT_NANOSECONDS 0x00283838+3539struct lan865x_priv {3640 struct work_struct multicast_work;3741 struct net_device *netdev;···315311316312 phy_start(netdev->phydev);317313314314+ netif_start_queue(netdev);315315+318316 return 0;319317}320318···348342 if (!priv->tc6) {349343 ret = -ENODEV;350344 goto free_netdev;345345+ }346346+347347+ /* LAN865x Rev.B0/B1 configuration parameters from AN1760348348+ * As per the Configuration Application Note AN1760 published in the349349+ * link, https://www.microchip.com/en-us/application-notes/an1760350350+ * Revision F (DS60001760G - June 2024), configure the MAC to set time351351+ * stamping at the end of the Start of Frame Delimiter (SFD) and set the352352+ * Timer Increment reg to 40 ns to be used as a 25 MHz internal clock.353353+ */354354+ ret = oa_tc6_write_register(priv->tc6, LAN865X_REG_MAC_TSU_TIMER_INCR,355355+ MAC_TSU_TIMER_INCR_COUNT_NANOSECONDS);356356+ if (ret) {357357+ dev_err(&spi->dev, "Failed to config TSU Timer Incr reg: %d\n",358358+ ret);359359+ goto oa_tc6_exit;351360 }352361353362 /* As per the point s3 in the below errata, SPI receive Ethernet frame
···203203 }204204}205205206206+static void icssg_enable_fw_offload(struct prueth *prueth)207207+{208208+ struct prueth_emac *emac;209209+ int mac;210210+211211+ for (mac = PRUETH_MAC0; mac < PRUETH_NUM_MACS; mac++) {212212+ emac = prueth->emac[mac];213213+ if (prueth->is_hsr_offload_mode) {214214+ if (emac->ndev->features & NETIF_F_HW_HSR_TAG_RM)215215+ icssg_set_port_state(emac, ICSSG_EMAC_HSR_RX_OFFLOAD_ENABLE);216216+ else217217+ icssg_set_port_state(emac, ICSSG_EMAC_HSR_RX_OFFLOAD_DISABLE);218218+ }219219+220220+ if (prueth->is_switch_mode || prueth->is_hsr_offload_mode) {221221+ if (netif_running(emac->ndev)) {222222+ icssg_fdb_add_del(emac, eth_stp_addr, prueth->default_vlan,223223+ ICSSG_FDB_ENTRY_P0_MEMBERSHIP |224224+ ICSSG_FDB_ENTRY_P1_MEMBERSHIP |225225+ ICSSG_FDB_ENTRY_P2_MEMBERSHIP |226226+ ICSSG_FDB_ENTRY_BLOCK,227227+ true);228228+ icssg_vtbl_modify(emac, emac->port_vlan | DEFAULT_VID,229229+ BIT(emac->port_id) | DEFAULT_PORT_MASK,230230+ BIT(emac->port_id) | DEFAULT_UNTAG_MASK,231231+ true);232232+ if (prueth->is_hsr_offload_mode)233233+ icssg_vtbl_modify(emac, DEFAULT_VID,234234+ DEFAULT_PORT_MASK,235235+ DEFAULT_UNTAG_MASK, true);236236+ icssg_set_pvid(prueth, emac->port_vlan, emac->port_id);237237+ if (prueth->is_switch_mode)238238+ icssg_set_port_state(emac, ICSSG_EMAC_PORT_VLAN_AWARE_ENABLE);239239+ }240240+ }241241+ }242242+}243243+206244static int prueth_emac_common_start(struct prueth *prueth)207245{208246 struct prueth_emac *emac;···791753 ret = prueth_emac_common_start(prueth);792754 if (ret)793755 goto free_rx_irq;756756+ icssg_enable_fw_offload(prueth);794757 }795758796759 flow_cfg = emac->dram.va + ICSSG_CONFIG_OFFSET + PSI_L_REGULAR_FLOW_ID_BASE_OFFSET;···1399136014001361static void icssg_change_mode(struct prueth *prueth)14011362{14021402- struct prueth_emac *emac;14031403- int mac, ret;13631363+ int ret;1404136414051365 ret = prueth_emac_restart(prueth);14061366 if (ret) {···14071369 return;14081370 }1409137114101410- for (mac = PRUETH_MAC0; mac < PRUETH_NUM_MACS; mac++) {14111411- emac = prueth->emac[mac];14121412- if (prueth->is_hsr_offload_mode) {14131413- if (emac->ndev->features & NETIF_F_HW_HSR_TAG_RM)14141414- icssg_set_port_state(emac, ICSSG_EMAC_HSR_RX_OFFLOAD_ENABLE);14151415- else14161416- icssg_set_port_state(emac, ICSSG_EMAC_HSR_RX_OFFLOAD_DISABLE);14171417- }14181418-14191419- if (netif_running(emac->ndev)) {14201420- icssg_fdb_add_del(emac, eth_stp_addr, prueth->default_vlan,14211421- ICSSG_FDB_ENTRY_P0_MEMBERSHIP |14221422- ICSSG_FDB_ENTRY_P1_MEMBERSHIP |14231423- ICSSG_FDB_ENTRY_P2_MEMBERSHIP |14241424- ICSSG_FDB_ENTRY_BLOCK,14251425- true);14261426- icssg_vtbl_modify(emac, emac->port_vlan | DEFAULT_VID,14271427- BIT(emac->port_id) | DEFAULT_PORT_MASK,14281428- BIT(emac->port_id) | DEFAULT_UNTAG_MASK,14291429- true);14301430- if (prueth->is_hsr_offload_mode)14311431- icssg_vtbl_modify(emac, DEFAULT_VID,14321432- DEFAULT_PORT_MASK,14331433- DEFAULT_UNTAG_MASK, true);14341434- icssg_set_pvid(prueth, emac->port_vlan, emac->port_id);14351435- if (prueth->is_switch_mode)14361436- icssg_set_port_state(emac, ICSSG_EMAC_PORT_VLAN_AWARE_ENABLE);14371437- }14381438- }13721372+ icssg_enable_fw_offload(prueth);14391373}1440137414411375static int prueth_netdevice_port_link(struct net_device *ndev,
+1-1
drivers/net/ethernet/wangxun/libwx/wx_vf_lib.c
···192192 u8 i, j;193193194194 /* Fill out hash function seeds */195195- netdev_rss_key_fill(wx->rss_key, sizeof(wx->rss_key));195195+ netdev_rss_key_fill(wx->rss_key, WX_RSS_KEY_SIZE);196196 for (i = 0; i < WX_RSS_KEY_SIZE / 4; i++)197197 wr32(wx, WX_VXRSSRK(i), wx->rss_key[i]);198198
···365365 u16 mask;366366};367367368368+struct vsc8531_skb_cb {369369+ u32 ns;370370+};371371+372372+#define VSC8531_SKB_CB(skb) \373373+ ((struct vsc8531_skb_cb *)((skb)->cb))374374+368375struct vsc8531_private {369376 int rate_magic;370377 u16 supp_led_modes;···420413 */421414 struct mutex ts_lock;422415 struct mutex phc_lock;416416+417417+ /* list of skbs that were received and need timestamp information but it418418+ * didn't received it yet419419+ */420420+ struct sk_buff_head rx_skbs_list;423421};424422425423/* Shared structure between the PHYs of the same package.
···666666 * Take early refcount for outstanding I/O requests we schedule during667667 * delete processing for unreg_vpi. Always keep this before668668 * scsi_remove_host() as we can no longer obtain a reference through669669- * scsi_host_get() after scsi_host_remove as shost is set to SHOST_DEL.669669+ * scsi_host_get() after scsi_remove_host as shost is set to SHOST_DEL.670670 */671671 if (!scsi_host_get(shost))672672 return VPORT_INVAL;
···12321232}1233123312341234static int tegra_powergate_of_get_resets(struct tegra_powergate *pg,12351235- struct device_node *np, bool off)12351235+ struct device_node *np)12361236{12371237 struct device *dev = pg->pmc->dev;12381238 int err;···12471247 err = reset_control_acquire(pg->reset);12481248 if (err < 0) {12491249 pr_err("failed to acquire resets: %d\n", err);12501250- goto out;12511251- }12521252-12531253- if (off) {12541254- err = reset_control_assert(pg->reset);12551255- } else {12561256- err = reset_control_deassert(pg->reset);12571257- if (err < 0)12581258- goto out;12591259-12601260- reset_control_release(pg->reset);12611261- }12621262-12631263-out:12641264- if (err) {12651265- reset_control_release(pg->reset);12661250 reset_control_put(pg->reset);12671251 }12681252···12921308 goto set_available;12931309 }1294131012951295- err = tegra_powergate_of_get_resets(pg, np, off);13111311+ err = tegra_powergate_of_get_resets(pg, np);12961312 if (err < 0) {12971313 dev_err(dev, "failed to get resets for %pOFn: %d\n", np, err);12981314 goto remove_clks;12991315 }1300131613011301- if (!IS_ENABLED(CONFIG_PM_GENERIC_DOMAINS)) {13021302- if (off)13031303- WARN_ON(tegra_powergate_power_up(pg, true));13171317+ /*13181318+ * If the power-domain is off, then ensure the resets are asserted.13191319+ * If the power-domain is on, then power down to ensure that when is13201320+ * it turned on the power-domain, clocks and resets are all in the13211321+ * expected state.13221322+ */13231323+ if (off) {13241324+ err = reset_control_assert(pg->reset);13251325+ if (err) {13261326+ pr_err("failed to assert resets: %d\n", err);13271327+ goto remove_resets;13281328+ }13291329+ } else {13301330+ err = tegra_powergate_power_down(pg);13311331+ if (err) {13321332+ dev_err(dev, "failed to turn off PM domain %s: %d\n",13331333+ pg->genpd.name, err);13341334+ goto remove_resets;13351335+ }13361336+ }1304133713381338+ /*13391339+ * If PM_GENERIC_DOMAINS is not enabled, power-on13401340+ * the domain and skip the genpd registration.13411341+ */13421342+ if (!IS_ENABLED(CONFIG_PM_GENERIC_DOMAINS)) {13431343+ WARN_ON(tegra_powergate_power_up(pg, true));13051344 goto remove_resets;13061345 }1307134613081308- err = pm_genpd_init(&pg->genpd, NULL, off);13471347+ err = pm_genpd_init(&pg->genpd, NULL, true);13091348 if (err < 0) {13101349 dev_err(dev, "failed to initialise PM domain %pOFn: %d\n", np,13111350 err);
+4-4
drivers/tty/serial/8250/8250_rsa.c
···147147 if (up->port.uartclk == SERIAL_RSA_BAUD_BASE * 16)148148 serial_out(up, UART_RSA_FRR, 0);149149}150150-EXPORT_SYMBOL_GPL_FOR_MODULES(rsa_enable, "8250_base");150150+EXPORT_SYMBOL_FOR_MODULES(rsa_enable, "8250_base");151151152152/*153153 * Attempts to turn off the RSA FIFO and resets the RSA board back to 115kbps compat mode. It is···179179 up->port.uartclk = SERIAL_RSA_BAUD_BASE_LO * 16;180180 uart_port_unlock_irq(&up->port);181181}182182-EXPORT_SYMBOL_GPL_FOR_MODULES(rsa_disable, "8250_base");182182+EXPORT_SYMBOL_FOR_MODULES(rsa_disable, "8250_base");183183184184void rsa_autoconfig(struct uart_8250_port *up)185185{···192192 if (__rsa_enable(up))193193 up->port.type = PORT_RSA;194194}195195-EXPORT_SYMBOL_GPL_FOR_MODULES(rsa_autoconfig, "8250_base");195195+EXPORT_SYMBOL_FOR_MODULES(rsa_autoconfig, "8250_base");196196197197void rsa_reset(struct uart_8250_port *up)198198{···201201202202 serial_out(up, UART_RSA_FRR, 0);203203}204204-EXPORT_SYMBOL_GPL_FOR_MODULES(rsa_reset, "8250_base");204204+EXPORT_SYMBOL_FOR_MODULES(rsa_reset, "8250_base");205205206206#ifdef CONFIG_SERIAL_8250_DEPRECATED_OPTIONS207207#ifndef MODULE
+8-4
drivers/ufs/core/ufshcd.c
···71387138static irqreturn_t ufshcd_intr(int irq, void *__hba)71397139{71407140 struct ufs_hba *hba = __hba;71417141+ u32 intr_status, enabled_intr_status;7141714271427143 /* Move interrupt handling to thread when MCQ & ESI are not enabled */71437144 if (!hba->mcq_enabled || !hba->mcq_esi_enabled)71447145 return IRQ_WAKE_THREAD;7145714671477147+ intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);71487148+ enabled_intr_status = intr_status & ufshcd_readl(hba, REG_INTERRUPT_ENABLE);71497149+71507150+ ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS);71517151+71467152 /* Directly handle interrupts since MCQ ESI handlers does the hard job */71477147- return ufshcd_sl_intr(hba, ufshcd_readl(hba, REG_INTERRUPT_STATUS) &71487148- ufshcd_readl(hba, REG_INTERRUPT_ENABLE));71537153+ return ufshcd_sl_intr(hba, enabled_intr_status);71497154}7150715571517156static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag)···1052110516 err = devm_add_action_or_reset(dev, ufshcd_devres_release,1052210517 host);1052310518 if (err)1052410524- return dev_err_probe(dev, err,1052510525- "failed to add ufshcd dealloc action\n");1051910519+ return err;10526105201052710521 host->nr_maps = HCTX_TYPE_POLL + 1;1052810522 hba = shost_priv(host);
+1-1
drivers/ufs/host/ufs-mediatek.c
···818818 unsigned int q_index;819819820820 q_index = map->mq_map[cpu];821821- if (q_index > nr) {821821+ if (q_index >= nr) {822822 dev_err(hba->dev, "hwq index %d exceed %d\n",823823 q_index, nr);824824 return MTK_MCQ_INVALID_IRQ;
+12-15
drivers/virt/coco/sev-guest/sev-guest.c
···116116117117static int get_derived_key(struct snp_guest_dev *snp_dev, struct snp_guest_request_ioctl *arg)118118{119119+ struct snp_derived_key_resp *derived_key_resp __free(kfree) = NULL;119120 struct snp_derived_key_req *derived_key_req __free(kfree) = NULL;120120- struct snp_derived_key_resp derived_key_resp = {0};121121 struct snp_msg_desc *mdesc = snp_dev->msg_desc;122122 struct snp_guest_req req = {};123123 int rc, resp_len;124124- /* Response data is 64 bytes and max authsize for GCM is 16 bytes. */125125- u8 buf[64 + 16];126124127125 if (!arg->req_data || !arg->resp_data)128126 return -EINVAL;···130132 * response payload. Make sure that it has enough space to cover the131133 * authtag.132134 */133133- resp_len = sizeof(derived_key_resp.data) + mdesc->ctx->authsize;134134- if (sizeof(buf) < resp_len)135135+ resp_len = sizeof(derived_key_resp->data) + mdesc->ctx->authsize;136136+ derived_key_resp = kzalloc(resp_len, GFP_KERNEL_ACCOUNT);137137+ if (!derived_key_resp)135138 return -ENOMEM;136139137140 derived_key_req = kzalloc(sizeof(*derived_key_req), GFP_KERNEL_ACCOUNT);···148149 req.vmpck_id = mdesc->vmpck_id;149150 req.req_buf = derived_key_req;150151 req.req_sz = sizeof(*derived_key_req);151151- req.resp_buf = buf;152152+ req.resp_buf = derived_key_resp;152153 req.resp_sz = resp_len;153154 req.exit_code = SVM_VMGEXIT_GUEST_REQUEST;154155155156 rc = snp_send_guest_request(mdesc, &req);156157 arg->exitinfo2 = req.exitinfo2;157157- if (rc)158158- return rc;159159-160160- memcpy(derived_key_resp.data, buf, sizeof(derived_key_resp.data));161161- if (copy_to_user((void __user *)arg->resp_data, &derived_key_resp,162162- sizeof(derived_key_resp)))163163- rc = -EFAULT;158158+ if (!rc) {159159+ if (copy_to_user((void __user *)arg->resp_data, derived_key_resp,160160+ sizeof(derived_key_resp->data)))161161+ rc = -EFAULT;162162+ }164163165164 /* The response buffer contains the sensitive data, explicitly clear it. */166166- memzero_explicit(buf, sizeof(buf));167167- memzero_explicit(&derived_key_resp, sizeof(derived_key_resp));165165+ memzero_explicit(derived_key_resp, sizeof(*derived_key_resp));166166+168167 return rc;169168}170169
···1512151215131513/*15141514 * Return 0 if we have submitted or queued the sector for submission.15151515- * Return <0 for critical errors.15151515+ * Return <0 for critical errors, and the sector will have its dirty flag cleared.15161516 *15171517 * Caller should make sure filepos < i_size and handle filepos >= i_size case.15181518 */···15351535 ASSERT(filepos < i_size);1536153615371537 em = btrfs_get_extent(inode, NULL, filepos, sectorsize);15381538- if (IS_ERR(em))15381538+ if (IS_ERR(em)) {15391539+ /*15401540+ * When submission failed, we should still clear the folio dirty.15411541+ * Or the folio will be written back again but without any15421542+ * ordered extent.15431543+ */15441544+ btrfs_folio_clear_dirty(fs_info, folio, filepos, sectorsize);15451545+ btrfs_folio_set_writeback(fs_info, folio, filepos, sectorsize);15461546+ btrfs_folio_clear_writeback(fs_info, folio, filepos, sectorsize);15391547 return PTR_ERR(em);15481548+ }1540154915411550 extent_offset = filepos - em->start;15421551 em_end = btrfs_extent_map_end(em);···16181609 folio_unlock(folio);16191610 return 1;16201611 }16211621- if (ret < 0)16121612+ if (ret < 0) {16131613+ btrfs_folio_clear_dirty(fs_info, folio, start, len);16141614+ btrfs_folio_set_writeback(fs_info, folio, start, len);16151615+ btrfs_folio_clear_writeback(fs_info, folio, start, len);16221616 return ret;16171617+ }1623161816241619 for (cur = start; cur < start + len; cur += fs_info->sectorsize)16251620 set_bit((cur - folio_start) >> fs_info->sectorsize_bits, &range_bitmap);···16791666 * Here we set writeback and clear for the range. If the full folio16801667 * is no longer dirty then we clear the PAGECACHE_TAG_DIRTY tag.16811668 *16821682- * If we hit any error, the corresponding sector will still be dirty16831683- * thus no need to clear PAGECACHE_TAG_DIRTY.16691669+ * If we hit any error, the corresponding sector will have its dirty16701670+ * flag cleared and writeback finished, thus no need to handle the error case.16841671 */16851672 if (!submitted_io && !error) {16861673 btrfs_folio_set_writeback(fs_info, folio, start, len);···18261813 xas_load(&xas);18271814 xas_set_mark(&xas, PAGECACHE_TAG_WRITEBACK);18281815 xas_clear_mark(&xas, PAGECACHE_TAG_DIRTY);18161816+ xas_clear_mark(&xas, PAGECACHE_TAG_TOWRITE);18291817 xas_unlock_irqrestore(&xas, flags);1830181818311819 btrfs_set_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN);
+19-10
fs/btrfs/inode.c
···41894189 return ret;41904190}4191419141924192+static void update_time_after_link_or_unlink(struct btrfs_inode *dir)41934193+{41944194+ struct timespec64 now;41954195+41964196+ /*41974197+ * If we are replaying a log tree, we do not want to update the mtime41984198+ * and ctime of the parent directory with the current time, since the41994199+ * log replay procedure is responsible for setting them to their correct42004200+ * values (the ones it had when the fsync was done).42014201+ */42024202+ if (test_bit(BTRFS_FS_LOG_RECOVERING, &dir->root->fs_info->flags))42034203+ return;42044204+42054205+ now = inode_set_ctime_current(&dir->vfs_inode);42064206+ inode_set_mtime_to_ts(&dir->vfs_inode, now);42074207+}42084208+41924209/*41934210 * unlink helper that gets used here in inode.c and in the tree logging41944211 * recovery code. It remove a link in a directory with a given name, and···43064289 inode_inc_iversion(&inode->vfs_inode);43074290 inode_set_ctime_current(&inode->vfs_inode);43084291 inode_inc_iversion(&dir->vfs_inode);43094309- inode_set_mtime_to_ts(&dir->vfs_inode, inode_set_ctime_current(&dir->vfs_inode));42924292+ update_time_after_link_or_unlink(dir);4310429343114294 return btrfs_update_inode(trans, dir);43124295}···67006683 btrfs_i_size_write(parent_inode, parent_inode->vfs_inode.i_size +67016684 name->len * 2);67026685 inode_inc_iversion(&parent_inode->vfs_inode);67036703- /*67046704- * If we are replaying a log tree, we do not want to update the mtime67056705- * and ctime of the parent directory with the current time, since the67066706- * log replay procedure is responsible for setting them to their correct67076707- * values (the ones it had when the fsync was done).67086708- */67096709- if (!test_bit(BTRFS_FS_LOG_RECOVERING, &root->fs_info->flags))67106710- inode_set_mtime_to_ts(&parent_inode->vfs_inode,67116711- inode_set_ctime_current(&parent_inode->vfs_inode));66866686+ update_time_after_link_or_unlink(parent_inode);6712668767136688 ret = btrfs_update_inode(trans, parent_inode);67146689 if (ret)
+18-1
fs/btrfs/subpage.c
···448448449449 spin_lock_irqsave(&bfs->lock, flags);450450 bitmap_set(bfs->bitmaps, start_bit, len >> fs_info->sectorsize_bits);451451+452452+ /*453453+ * Don't clear the TOWRITE tag when starting writeback on a still-dirty454454+ * folio. Doing so can cause WB_SYNC_ALL writepages() to overlook it,455455+ * assume writeback is complete, and exit too early — violating sync456456+ * ordering guarantees.457457+ */451458 if (!folio_test_writeback(folio))452452- folio_start_writeback(folio);459459+ __folio_start_writeback(folio, true);460460+ if (!folio_test_dirty(folio)) {461461+ struct address_space *mapping = folio_mapping(folio);462462+ XA_STATE(xas, &mapping->i_pages, folio->index);463463+ unsigned long flags;464464+465465+ xas_lock_irqsave(&xas, flags);466466+ xas_load(&xas);467467+ xas_clear_mark(&xas, PAGECACHE_TAG_TOWRITE);468468+ xas_unlock_irqrestore(&xas, flags);469469+ }453470 spin_unlock_irqrestore(&bfs->lock, flags);454471}455472
+8-5
fs/btrfs/super.c
···8888 refcount_t refs;8989};90909191+static void btrfs_emit_options(struct btrfs_fs_info *info,9292+ struct btrfs_fs_context *old);9393+9194enum {9295 Opt_acl,9396 Opt_clear_cache,···701698702699 if (!test_bit(BTRFS_FS_STATE_REMOUNTING, &info->fs_state)) {703700 if (btrfs_raw_test_opt(*mount_opt, SPACE_CACHE)) {704704- btrfs_info(info, "disk space caching is enabled");705701 btrfs_warn(info,706702"space cache v1 is being deprecated and will be removed in a future release, please use -o space_cache=v2");707703 }708708- if (btrfs_raw_test_opt(*mount_opt, FREE_SPACE_TREE))709709- btrfs_info(info, "using free-space-tree");710704 }711705712706 return ret;···979979 btrfs_err(fs_info, "open_ctree failed: %d", ret);980980 return ret;981981 }982982+983983+ btrfs_emit_options(fs_info, NULL);982984983985 inode = btrfs_iget(BTRFS_FIRST_FREE_OBJECTID, fs_info->fs_root);984986 if (IS_ERR(inode)) {···14391437{14401438 btrfs_info_if_set(info, old, NODATASUM, "setting nodatasum");14411439 btrfs_info_if_set(info, old, DEGRADED, "allowing degraded mounts");14421442- btrfs_info_if_set(info, old, NODATASUM, "setting nodatasum");14401440+ btrfs_info_if_set(info, old, NODATACOW, "setting nodatacow");14431441 btrfs_info_if_set(info, old, SSD, "enabling ssd optimizations");14441442 btrfs_info_if_set(info, old, SSD_SPREAD, "using spread ssd allocation scheme");14451443 btrfs_info_if_set(info, old, NOBARRIER, "turning off barriers");···14611459 btrfs_info_if_set(info, old, IGNOREMETACSUMS, "ignoring meta csums");14621460 btrfs_info_if_set(info, old, IGNORESUPERFLAGS, "ignoring unknown super block flags");1463146114621462+ btrfs_info_if_unset(info, old, NODATASUM, "setting datasum");14641463 btrfs_info_if_unset(info, old, NODATACOW, "setting datacow");14651464 btrfs_info_if_unset(info, old, SSD, "not using ssd optimizations");14661465 btrfs_info_if_unset(info, old, SSD_SPREAD, "not using spread ssd allocation scheme");14671467- btrfs_info_if_unset(info, old, NOBARRIER, "turning off barriers");14661466+ btrfs_info_if_unset(info, old, NOBARRIER, "turning on barriers");14681467 btrfs_info_if_unset(info, old, NOTREELOG, "enabling tree log");14691468 btrfs_info_if_unset(info, old, SPACE_CACHE, "disabling disk space caching");14701469 btrfs_info_if_unset(info, old, FREE_SPACE_TREE, "disabling free space tree");
+99-34
fs/btrfs/zoned.c
···1717#include "accessors.h"1818#include "bio.h"1919#include "transaction.h"2020+#include "sysfs.h"20212122/* Maximum number of zones to report per blkdev_report_zones() call */2223#define BTRFS_REPORT_NR_ZONES 4096···42414342/* Number of superblock log zones */4443#define BTRFS_NR_SB_LOG_ZONES 24444+4545+/* Default number of max active zones when the device has no limits. */4646+#define BTRFS_DEFAULT_MAX_ACTIVE_ZONES 12845474648/*4749 * Minimum of active zones we need:···420416 if (!IS_ALIGNED(nr_sectors, zone_sectors))421417 zone_info->nr_zones++;422418423423- max_active_zones = bdev_max_active_zones(bdev);419419+ max_active_zones = min_not_zero(bdev_max_active_zones(bdev),420420+ bdev_max_open_zones(bdev));421421+ if (!max_active_zones && zone_info->nr_zones > BTRFS_DEFAULT_MAX_ACTIVE_ZONES)422422+ max_active_zones = BTRFS_DEFAULT_MAX_ACTIVE_ZONES;424423 if (max_active_zones && max_active_zones < BTRFS_MIN_ACTIVE_ZONES) {425424 btrfs_err(fs_info,426425"zoned: %s: max active zones %u is too small, need at least %u active zones",···21752168 goto out_unlock;21762169 }2177217021782178- /* No space left */21792179- if (btrfs_zoned_bg_is_full(block_group)) {21802180- ret = false;21812181- goto out_unlock;21712171+ if (block_group->flags & BTRFS_BLOCK_GROUP_DATA) {21722172+ /* The caller should check if the block group is full. */21732173+ if (WARN_ON_ONCE(btrfs_zoned_bg_is_full(block_group))) {21742174+ ret = false;21752175+ goto out_unlock;21762176+ }21772177+ } else {21782178+ /* Since it is already written, it should have been active. */21792179+ WARN_ON_ONCE(block_group->meta_write_pointer != block_group->start);21822180 }2183218121842182 for (i = 0; i < map->num_stripes; i++) {···22422230 struct btrfs_fs_info *fs_info = block_group->fs_info;22432231 const u64 end = block_group->start + block_group->length;22442232 struct extent_buffer *eb;22452245- unsigned long index, start = (block_group->start >> fs_info->sectorsize_bits);22332233+ unsigned long index, start = (block_group->start >> fs_info->nodesize_bits);2246223422472235 rcu_read_lock();22482236 xa_for_each_start(&fs_info->buffer_tree, index, eb, start) {···22552243 rcu_read_lock();22562244 }22572245 rcu_read_unlock();22462246+}22472247+22482248+static int call_zone_finish(struct btrfs_block_group *block_group,22492249+ struct btrfs_io_stripe *stripe)22502250+{22512251+ struct btrfs_device *device = stripe->dev;22522252+ const u64 physical = stripe->physical;22532253+ struct btrfs_zoned_device_info *zinfo = device->zone_info;22542254+ int ret;22552255+22562256+ if (!device->bdev)22572257+ return 0;22582258+22592259+ if (zinfo->max_active_zones == 0)22602260+ return 0;22612261+22622262+ if (btrfs_dev_is_sequential(device, physical)) {22632263+ unsigned int nofs_flags;22642264+22652265+ nofs_flags = memalloc_nofs_save();22662266+ ret = blkdev_zone_mgmt(device->bdev, REQ_OP_ZONE_FINISH,22672267+ physical >> SECTOR_SHIFT,22682268+ zinfo->zone_size >> SECTOR_SHIFT);22692269+ memalloc_nofs_restore(nofs_flags);22702270+22712271+ if (ret)22722272+ return ret;22732273+ }22742274+22752275+ if (!(block_group->flags & BTRFS_BLOCK_GROUP_DATA))22762276+ zinfo->reserved_active_zones++;22772277+ btrfs_dev_clear_active_zone(device, physical);22782278+22792279+ return 0;22582280}2259228122602282static int do_zone_finish(struct btrfs_block_group *block_group, bool fully_written)···23752329 down_read(&dev_replace->rwsem);23762330 map = block_group->physical_map;23772331 for (i = 0; i < map->num_stripes; i++) {23782378- struct btrfs_device *device = map->stripes[i].dev;23792379- const u64 physical = map->stripes[i].physical;23802380- struct btrfs_zoned_device_info *zinfo = device->zone_info;23812381- unsigned int nofs_flags;2382233223832383- if (!device->bdev)23842384- continue;23852385-23862386- if (zinfo->max_active_zones == 0)23872387- continue;23882388-23892389- nofs_flags = memalloc_nofs_save();23902390- ret = blkdev_zone_mgmt(device->bdev, REQ_OP_ZONE_FINISH,23912391- physical >> SECTOR_SHIFT,23922392- zinfo->zone_size >> SECTOR_SHIFT);23932393- memalloc_nofs_restore(nofs_flags);23942394-23332333+ ret = call_zone_finish(block_group, &map->stripes[i]);23952334 if (ret) {23962335 up_read(&dev_replace->rwsem);23972336 return ret;23982337 }23992399-24002400- if (!(block_group->flags & BTRFS_BLOCK_GROUP_DATA))24012401- zinfo->reserved_active_zones++;24022402- btrfs_dev_clear_active_zone(device, physical);24032338 }24042339 up_read(&dev_replace->rwsem);24052340···25312504void btrfs_zoned_reserve_data_reloc_bg(struct btrfs_fs_info *fs_info)25322505{25332506 struct btrfs_space_info *data_sinfo = fs_info->data_sinfo;25342534- struct btrfs_space_info *space_info = data_sinfo->sub_group[0];25072507+ struct btrfs_space_info *space_info = data_sinfo;25352508 struct btrfs_trans_handle *trans;25362509 struct btrfs_block_group *bg;25372510 struct list_head *bg_list;25382511 u64 alloc_flags;25392539- bool initial = false;25122512+ bool first = true;25402513 bool did_chunk_alloc = false;25412514 int index;25422515 int ret;···25502523 if (sb_rdonly(fs_info->sb))25512524 return;2552252525532553- ASSERT(space_info->subgroup_id == BTRFS_SUB_GROUP_DATA_RELOC);25542526 alloc_flags = btrfs_get_alloc_profile(fs_info, space_info->flags);25552527 index = btrfs_bg_flags_to_raid_index(alloc_flags);2556252825572557- bg_list = &data_sinfo->block_groups[index];25292529+ /* Scan the data space_info to find empty block groups. Take the second one. */25582530again:25312531+ bg_list = &space_info->block_groups[index];25592532 list_for_each_entry(bg, bg_list, list) {25602560- if (bg->used > 0)25332533+ if (bg->alloc_offset != 0)25612534 continue;2562253525632563- if (!initial) {25642564- initial = true;25362536+ if (first) {25372537+ first = false;25652538 continue;25392539+ }25402540+25412541+ if (space_info == data_sinfo) {25422542+ /* Migrate the block group to the data relocation space_info. */25432543+ struct btrfs_space_info *reloc_sinfo = data_sinfo->sub_group[0];25442544+ int factor;25452545+25462546+ ASSERT(reloc_sinfo->subgroup_id == BTRFS_SUB_GROUP_DATA_RELOC);25472547+ factor = btrfs_bg_type_to_factor(bg->flags);25482548+25492549+ down_write(&space_info->groups_sem);25502550+ list_del_init(&bg->list);25512551+ /* We can assume this as we choose the second empty one. */25522552+ ASSERT(!list_empty(&space_info->block_groups[index]));25532553+ up_write(&space_info->groups_sem);25542554+25552555+ spin_lock(&space_info->lock);25562556+ space_info->total_bytes -= bg->length;25572557+ space_info->disk_total -= bg->length * factor;25582558+ /* There is no allocation ever happened. */25592559+ ASSERT(bg->used == 0);25602560+ ASSERT(bg->zone_unusable == 0);25612561+ /* No super block in a block group on the zoned setup. */25622562+ ASSERT(bg->bytes_super == 0);25632563+ spin_unlock(&space_info->lock);25642564+25652565+ bg->space_info = reloc_sinfo;25662566+ if (reloc_sinfo->block_group_kobjs[index] == NULL)25672567+ btrfs_sysfs_add_block_group_type(bg);25682568+25692569+ btrfs_add_bg_to_space_info(fs_info, bg);25662570 }2567257125682572 fs_info->data_reloc_bg = bg->start;···26102552 if (IS_ERR(trans))26112553 return;2612255425552555+ /* Allocate new BG in the data relocation space_info. */25562556+ space_info = data_sinfo->sub_group[0];25572557+ ASSERT(space_info->subgroup_id == BTRFS_SUB_GROUP_DATA_RELOC);26132558 ret = btrfs_chunk_alloc(trans, space_info, alloc_flags, CHUNK_ALLOC_FORCE);26142559 btrfs_end_transaction(trans);26152560 if (ret == 1) {25612561+ /*25622562+ * We allocated a new block group in the data relocation space_info. We25632563+ * can take that one.25642564+ */25652565+ first = false;26162566 did_chunk_alloc = true;26172617- bg_list = &space_info->block_groups[index];26182567 goto again;26192568 }26202569}
···393393 /* Reserved GDT blocks */394394 if (!ext4_has_feature_meta_bg(sb) || metagroup < first_meta_bg) {395395 len = le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks);396396+397397+ /*398398+ * mkfs.ext4 can set s_reserved_gdt_blocks as 0 in some cases,399399+ * check for that.400400+ */401401+ if (!len)402402+ return 0;403403+396404 error = ext4_getfsmap_fill(meta_list, fsb, len,397405 EXT4_FMR_OWN_RESV_GDT);398406 if (error)···534526 ext4_group_t end_ag;535527 ext4_grpblk_t first_cluster;536528 ext4_grpblk_t last_cluster;529529+ struct ext4_fsmap irec;537530 int error = 0;538531539532 bofs = le32_to_cpu(sbi->s_es->s_first_data_block);···618609 goto err;619610 }620611621621- /* Report any gaps at the end of the bg */612612+ /*613613+ * The dummy record below will cause ext4_getfsmap_helper() to report614614+ * any allocated blocks at the end of the range.615615+ */616616+ irec.fmr_device = 0;617617+ irec.fmr_physical = end_fsb + 1;618618+ irec.fmr_length = 0;619619+ irec.fmr_owner = EXT4_FMR_OWN_FREE;620620+ irec.fmr_flags = 0;621621+622622 info->gfi_last = true;623623- error = ext4_getfsmap_datadev_helper(sb, end_ag, last_cluster + 1,624624- 0, info);623623+ error = ext4_getfsmap_helper(sb, info, &irec);625624 if (error)626625 goto err;627626
+2-2
fs/ext4/indirect.c
···539539 int indirect_blks;540540 int blocks_to_boundary = 0;541541 int depth;542542- int count = 0;542542+ u64 count = 0;543543 ext4_fsblk_t first_block = 0;544544545545 trace_ext4_ind_map_blocks_enter(inode, map->m_lblk, map->m_len, flags);···588588 count++;589589 /* Fill in size of a hole we found */590590 map->m_pblk = 0;591591- map->m_len = min_t(unsigned int, map->m_len, count);591591+ map->m_len = umin(map->m_len, count);592592 goto cleanup;593593 }594594
+2-2
fs/ext4/inode.c
···146146 */147147int ext4_inode_is_fast_symlink(struct inode *inode)148148{149149- if (!(EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL)) {149149+ if (!ext4_has_feature_ea_inode(inode->i_sb)) {150150 int ea_blocks = EXT4_I(inode)->i_file_acl ?151151 EXT4_CLUSTER_SIZE(inode->i_sb) >> 9 : 0;152152···31553155 folio_unlock(folio);31563156 folio_put(folio);31573157 /*31583158- * block_write_begin may have instantiated a few blocks31583158+ * ext4_block_write_begin may have instantiated a few blocks31593159 * outside i_size. Trim these off again. Don't need31603160 * i_size_read because we hold inode lock.31613161 */
···547547 * first page of the bio. Otherwise it can deadlock.548548 */549549 if (io->io_bio)550550- gfp_flags = GFP_NOWAIT | __GFP_NOWARN;550550+ gfp_flags = GFP_NOWAIT;551551 retry_encrypt:552552 bounce_page = fscrypt_encrypt_pagecache_blocks(folio,553553 enc_bytes, 0, gfp_flags);
+8-4
fs/ext4/super.c
···268268void ext4_sb_breadahead_unmovable(struct super_block *sb, sector_t block)269269{270270 struct buffer_head *bh = bdev_getblk(sb->s_bdev, block,271271- sb->s_blocksize, GFP_NOWAIT | __GFP_NOWARN);271271+ sb->s_blocksize, GFP_NOWAIT);272272273273 if (likely(bh)) {274274 if (trylock_buffer(bh))···19981998 fc->fs_private = ctx;19991999 fc->ops = &ext4_context_ops;2000200020012001+ /* i_version is always enabled now */20022002+ fc->sb_flags |= SB_I_VERSION;20032003+20012004 return 0;20022005}20032006···29782975 SEQ_OPTS_PRINT("min_batch_time=%u", sbi->s_min_batch_time);29792976 if (nodefs || sbi->s_max_batch_time != EXT4_DEF_MAX_BATCH_TIME)29802977 SEQ_OPTS_PRINT("max_batch_time=%u", sbi->s_max_batch_time);29782978+ if (nodefs && sb->s_flags & SB_I_VERSION)29792979+ SEQ_OPTS_PUTS("i_version");29812980 if (nodefs || sbi->s_stripe)29822981 SEQ_OPTS_PRINT("stripe=%lu", sbi->s_stripe);29832982 if (nodefs || EXT4_MOUNT_DATA_FLAGS &···53195314 sb->s_flags = (sb->s_flags & ~SB_POSIXACL) |53205315 (test_opt(sb, POSIX_ACL) ? SB_POSIXACL : 0);5321531653225322- /* i_version is always enabled now */53235323- sb->s_flags |= SB_I_VERSION;53245324-53255317 /* HSM events are allowed by default. */53265318 sb->s_iflags |= SB_I_ALLOW_HSM;53275319···54165414 err = ext4_load_and_init_journal(sb, es, ctx);54175415 if (err)54185416 goto failed_mount3a;54175417+ if (bdev_read_only(sb->s_bdev))54185418+ needs_recovery = 0;54195419 } else if (test_opt(sb, NOLOAD) && !sb_rdonly(sb) &&54205420 ext4_has_feature_journal_needs_recovery(sb)) {54215421 ext4_msg(sb, KERN_ERR, "required journal recovery "
+1-1
fs/fhandle.c
···402402 if (retval)403403 return retval;404404405405- CLASS(get_unused_fd, fd)(O_CLOEXEC);405405+ CLASS(get_unused_fd, fd)(open_flag);406406 if (fd < 0)407407 return fd;408408
+5-4
fs/fs-writeback.c
···26082608 wakeup_bdi = inode_io_list_move_locked(inode, wb,26092609 dirty_list);2610261026112611- spin_unlock(&wb->list_lock);26122612- spin_unlock(&inode->i_lock);26132613- trace_writeback_dirty_inode_enqueue(inode);26142614-26152611 /*26162612 * If this is the first dirty inode for this bdi,26172613 * we have to wake-up the corresponding bdi thread···26172621 if (wakeup_bdi &&26182622 (wb->bdi->capabilities & BDI_CAP_WRITEBACK))26192623 wb_wakeup_delayed(wb);26242624+26252625+ spin_unlock(&wb->list_lock);26262626+ spin_unlock(&inode->i_lock);26272627+ trace_writeback_dirty_inode_enqueue(inode);26282628+26202629 return;26212630 }26222631 }
-5
fs/fuse/inode.c
···289289 }290290 }291291292292- if (attr->blksize != 0)293293- inode->i_blkbits = ilog2(attr->blksize);294294- else295295- inode->i_blkbits = inode->i_sb->s_blocksize_bits;296296-297292 /*298293 * Don't set the sticky bit in i_mode, unless we want the VFS299294 * to check permissions. This prevents failures due to the
+7-7
fs/iomap/direct-io.c
···363363 if (iomap->flags & IOMAP_F_SHARED)364364 dio->flags |= IOMAP_DIO_COW;365365366366- if (iomap->flags & IOMAP_F_NEW) {366366+ if (iomap->flags & IOMAP_F_NEW)367367 need_zeroout = true;368368- } else if (iomap->type == IOMAP_MAPPED) {369369- if (iomap_dio_can_use_fua(iomap, dio))370370- bio_opf |= REQ_FUA;371371- else372372- dio->flags &= ~IOMAP_DIO_WRITE_THROUGH;373373- }368368+ else if (iomap->type == IOMAP_MAPPED &&369369+ iomap_dio_can_use_fua(iomap, dio))370370+ bio_opf |= REQ_FUA;371371+372372+ if (!(bio_opf & REQ_FUA))373373+ dio->flags &= ~IOMAP_DIO_WRITE_THROUGH;374374375375 /*376376 * We can only do deferred completion for pure overwrites that
···111111 return;112112 }113113 if (IS_MNT_SHARED(mnt)) {114114- m = propagation_source(mnt);114114+ if (type == MS_SLAVE || !hlist_empty(&mnt->mnt_slave_list))115115+ m = propagation_source(mnt);115116 if (list_empty(&mnt->mnt_share)) {116117 mnt_release_group_id(mnt);117118 } else {···638637 }639638640639 // now to_umount consists of all acceptable candidates641641- // deal with reparenting of remaining overmounts on those640640+ // deal with reparenting of surviving overmounts on those642641 list_for_each_entry(m, &to_umount, mnt_list) {643643- if (m->overmount)644644- reparent(m->overmount);642642+ struct mount *over = m->overmount;643643+ if (over && !will_be_unmounted(over))644644+ reparent(over);645645 }646646647647 // and fold them into the set
···145145#endif /* CONFIG_CIFS_NFSD_EXPORT */146146147147/* when changing internal version - update following two lines at same time */148148-#define SMB3_PRODUCT_BUILD 55149149-#define CIFS_VERSION "2.55"148148+#define SMB3_PRODUCT_BUILD 56149149+#define CIFS_VERSION "2.56"150150#endif /* _CIFSFS_H */
+21
fs/smb/client/cifsglob.h
···17321732 int mid_rc; /* rc for MID_RC */17331733 __le16 command; /* smb command code */17341734 unsigned int optype; /* operation type */17351735+ spinlock_t mid_lock;17351736 bool wait_cancelled:1; /* Cancelled while waiting for response */17361737 bool deleted_from_q:1; /* Whether Mid has been dequeued frem pending_mid_q */17371738 bool large_buf:1; /* if valid response, is pointer to large buf */···20372036 * cifsFileInfo->file_info_lock cifsFileInfo->count cifs_new_fileinfo20382037 * ->invalidHandle initiate_cifs_search20392038 * ->oplock_break_cancelled20392039+ * mid_q_entry->mid_lock mid_q_entry->callback alloc_mid20402040+ * smb2_mid_entry_alloc20412041+ * (Any fields of mid_q_entry that will need protection)20402042 ****************************************************************************/2041204320422044#ifdef DECLARE_GLOBALS_HERE···23772373 }23782374 }23792375 return ret;23762376+}23772377+23782378+/*23792379+ * Execute mid callback atomically - ensures callback runs exactly once23802380+ * and prevents sleeping in atomic context.23812381+ */23822382+static inline void mid_execute_callback(struct mid_q_entry *mid)23832383+{23842384+ void (*callback)(struct mid_q_entry *mid);23852385+23862386+ spin_lock(&mid->mid_lock);23872387+ callback = mid->callback;23882388+ mid->callback = NULL; /* Mark as executed, */23892389+ spin_unlock(&mid->mid_lock);23902390+23912391+ if (callback)23922392+ callback(mid);23802393}2381239423822395#define CIFS_REPARSE_SUPPORT(tcon) \
+9-10
fs/smb/client/cifstransport.c
···4646 temp = mempool_alloc(cifs_mid_poolp, GFP_NOFS);4747 memset(temp, 0, sizeof(struct mid_q_entry));4848 kref_init(&temp->refcount);4949+ spin_lock_init(&temp->mid_lock);4950 temp->mid = get_mid(smb_buffer);5051 temp->pid = current->pid;5152 temp->command = cpu_to_le16(smb_buffer->Command);···346345 rc = wait_for_response(server, midQ);347346 if (rc != 0) {348347 send_cancel(server, &rqst, midQ);349349- spin_lock(&server->mid_queue_lock);350350- if (midQ->mid_state == MID_REQUEST_SUBMITTED ||351351- midQ->mid_state == MID_RESPONSE_RECEIVED) {348348+ spin_lock(&midQ->mid_lock);349349+ if (midQ->callback) {352350 /* no longer considered to be "in-flight" */353351 midQ->callback = release_mid;354354- spin_unlock(&server->mid_queue_lock);352352+ spin_unlock(&midQ->mid_lock);355353 add_credits(server, &credits, 0);356354 return rc;357355 }358358- spin_unlock(&server->mid_queue_lock);356356+ spin_unlock(&midQ->mid_lock);359357 }360358361359 rc = cifs_sync_mid_result(midQ, server);···527527 rc = wait_for_response(server, midQ);528528 if (rc) {529529 send_cancel(server, &rqst, midQ);530530- spin_lock(&server->mid_queue_lock);531531- if (midQ->mid_state == MID_REQUEST_SUBMITTED ||532532- midQ->mid_state == MID_RESPONSE_RECEIVED) {530530+ spin_lock(&midQ->mid_lock);531531+ if (midQ->callback) {533532 /* no longer considered to be "in-flight" */534533 midQ->callback = release_mid;535535- spin_unlock(&server->mid_queue_lock);534534+ spin_unlock(&midQ->mid_lock);536535 return rc;537536 }538538- spin_unlock(&server->mid_queue_lock);537537+ spin_unlock(&midQ->mid_lock);539538 }540539541540 /* We got the response - restart system call. */
+16-45
fs/smb/client/compress.c
···155155}156156157157/*158158- * TODO:159159- * Support other iter types, if required.160160- * Only ITER_XARRAY is supported for now.158158+ * Collect some 2K samples with 2K gaps between.161159 */162162-static int collect_sample(const struct iov_iter *iter, ssize_t max, u8 *sample)160160+static int collect_sample(const struct iov_iter *source, ssize_t max, u8 *sample)163161{164164- struct folio *folios[16], *folio;165165- unsigned int nr, i, j, npages;166166- loff_t start = iter->xarray_start + iter->iov_offset;167167- pgoff_t last, index = start / PAGE_SIZE;168168- size_t len, off, foff;169169- void *p;170170- int s = 0;162162+ struct iov_iter iter = *source;163163+ size_t s = 0;171164172172- last = (start + max - 1) / PAGE_SIZE;173173- do {174174- nr = xa_extract(iter->xarray, (void **)folios, index, last, ARRAY_SIZE(folios),175175- XA_PRESENT);176176- if (nr == 0)177177- return -EIO;165165+ while (iov_iter_count(&iter) >= SZ_2K) {166166+ size_t part = umin(umin(iov_iter_count(&iter), SZ_2K), max);167167+ size_t n;178168179179- for (i = 0; i < nr; i++) {180180- folio = folios[i];181181- npages = folio_nr_pages(folio);182182- foff = start - folio_pos(folio);183183- off = foff % PAGE_SIZE;169169+ n = copy_from_iter(sample + s, part, &iter);170170+ if (n != part)171171+ return -EFAULT;184172185185- for (j = foff / PAGE_SIZE; j < npages; j++) {186186- size_t len2;173173+ s += n;174174+ max -= n;187175188188- len = min_t(size_t, max, PAGE_SIZE - off);189189- len2 = min_t(size_t, len, SZ_2K);176176+ if (iov_iter_count(&iter) < PAGE_SIZE - SZ_2K)177177+ break;190178191191- p = kmap_local_page(folio_page(folio, j));192192- memcpy(&sample[s], p, len2);193193- kunmap_local(p);194194-195195- s += len2;196196-197197- if (len2 < SZ_2K || s >= max - SZ_2K)198198- return s;199199-200200- max -= len;201201- if (max <= 0)202202- return s;203203-204204- start += len;205205- off = 0;206206- index++;207207- }208208- }209209- } while (nr == ARRAY_SIZE(folios));179179+ iov_iter_advance(&iter, SZ_2K);180180+ }210181211182 return s;212183}
+4-5
fs/smb/client/connect.c
···335335 cifs_dbg(FYI, "%s: issuing mid callbacks\n", __func__);336336 list_for_each_entry_safe(mid, nmid, &retry_list, qhead) {337337 list_del_init(&mid->qhead);338338- mid->callback(mid);338338+ mid_execute_callback(mid);339339 release_mid(mid);340340 }341341···919919 list_del_init(&mid->qhead);920920 mid->mid_rc = mid_rc;921921 mid->mid_state = MID_RC;922922- mid->callback(mid);922922+ mid_execute_callback(mid);923923 release_mid(mid);924924 }925925···11171117 mid_entry = list_entry(tmp, struct mid_q_entry, qhead);11181118 cifs_dbg(FYI, "Callback mid %llu\n", mid_entry->mid);11191119 list_del_init(&mid_entry->qhead);11201120- mid_entry->callback(mid_entry);11201120+ mid_execute_callback(mid_entry);11211121 release_mid(mid_entry);11221122 }11231123 /* 1/8th of sec is more than enough time for them to exit */···13941394 }1395139513961396 if (!mids[i]->multiRsp || mids[i]->multiEnd)13971397- mids[i]->callback(mids[i]);13971397+ mid_execute_callback(mids[i]);1398139813991399 release_mid(mids[i]);14001400 } else if (server->ops->is_oplock_break &&···42054205 return 0;42064206 }4207420742084208- server->lstrp = jiffies;42094208 server->tcpStatus = CifsInNegotiate;42104209 server->neg_start = jiffies;42114210 spin_unlock(&server->srv_lock);
+32-2
fs/smb/client/inode.c
···19431943 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);19441944 struct tcon_link *tlink;19451945 struct cifs_tcon *tcon;19461946+ __u32 dosattr = 0, origattr = 0;19461947 struct TCP_Server_Info *server;19471948 struct iattr *attrs = NULL;19481948- __u32 dosattr = 0, origattr = 0;19491949+ bool rehash = false;1949195019501951 cifs_dbg(FYI, "cifs_unlink, dir=0x%p, dentry=0x%p\n", dir, dentry);1951195219521953 if (unlikely(cifs_forced_shutdown(cifs_sb)))19531954 return -EIO;19551955+19561956+ /* Unhash dentry in advance to prevent any concurrent opens */19571957+ spin_lock(&dentry->d_lock);19581958+ if (!d_unhashed(dentry)) {19591959+ __d_drop(dentry);19601960+ rehash = true;19611961+ }19621962+ spin_unlock(&dentry->d_lock);1954196319551964 tlink = cifs_sb_tlink(cifs_sb);19561965 if (IS_ERR(tlink))···20122003 cifs_drop_nlink(inode);20132004 }20142005 } else if (rc == -ENOENT) {20152015- d_drop(dentry);20062006+ if (simple_positive(dentry))20072007+ d_delete(dentry);20162008 } else if (rc == -EBUSY) {20172009 if (server->ops->rename_pending_delete) {20182010 rc = server->ops->rename_pending_delete(full_path,···20662056 kfree(attrs);20672057 free_xid(xid);20682058 cifs_put_tlink(tlink);20592059+ if (rehash)20602060+ d_rehash(dentry);20692061 return rc;20702062}20712063···24742462 struct cifs_sb_info *cifs_sb;24752463 struct tcon_link *tlink;24762464 struct cifs_tcon *tcon;24652465+ bool rehash = false;24772466 unsigned int xid;24782467 int rc, tmprc;24792468 int retry_count = 0;···24892476 cifs_sb = CIFS_SB(source_dir->i_sb);24902477 if (unlikely(cifs_forced_shutdown(cifs_sb)))24912478 return -EIO;24792479+24802480+ /*24812481+ * Prevent any concurrent opens on the target by unhashing the dentry.24822482+ * VFS already unhashes the target when renaming directories.24832483+ */24842484+ if (d_is_positive(target_dentry) && !d_is_dir(target_dentry)) {24852485+ if (!d_unhashed(target_dentry)) {24862486+ d_drop(target_dentry);24872487+ rehash = true;24882488+ }24892489+ }2492249024932491 tlink = cifs_sb_tlink(cifs_sb);24942492 if (IS_ERR(tlink))···25422518 }25432519 }2544252025212521+ if (!rc)25222522+ rehash = false;25452523 /*25462524 * No-replace is the natural behavior for CIFS, so skip unlink hacks.25472525 */···26022576 goto cifs_rename_exit;26032577 rc = cifs_do_rename(xid, source_dentry, from_name,26042578 target_dentry, to_name);25792579+ if (!rc)25802580+ rehash = false;26052581 }2606258226072583 /* force revalidate to go get info when needed */26082584 CIFS_I(source_dir)->time = CIFS_I(target_dir)->time = 0;2609258526102586cifs_rename_exit:25872587+ if (rehash)25882588+ d_rehash(target_dentry);26112589 kfree(info_buf_source);26122590 free_dentry_path(page2);26132591 free_dentry_path(page1);
+12-3
fs/smb/client/smb2ops.c
···772772 bytes_left -= sizeof(*p);773773 break;774774 }775775+ /* Validate that Next doesn't point beyond the buffer */776776+ if (next > bytes_left) {777777+ cifs_dbg(VFS, "%s: invalid Next pointer %zu > %zd\n",778778+ __func__, next, bytes_left);779779+ rc = -EINVAL;780780+ goto out;781781+ }775782 p = (struct network_interface_info_ioctl_rsp *)((u8 *)p+next);776783 bytes_left -= next;777784 }···790783 }791784792785 /* Azure rounds the buffer size up 8, to a 16 byte boundary */793793- if ((bytes_left > 8) || p->Next)786786+ if ((bytes_left > 8) ||787787+ (bytes_left >= offsetof(struct network_interface_info_ioctl_rsp, Next)788788+ + sizeof(p->Next) && p->Next))794789 cifs_dbg(VFS, "%s: incomplete interface info\n", __func__);795790796791 ses->iface_last_update = jiffies;···48144805 dw->server->ops->is_network_name_deleted(dw->buf,48154806 dw->server);4816480748174817- mid->callback(mid);48084808+ mid_execute_callback(mid);48184809 } else {48194810 spin_lock(&dw->server->srv_lock);48204811 if (dw->server->tcpStatus == CifsNeedReconnect) {···48224813 mid->mid_state = MID_RETRY_NEEDED;48234814 spin_unlock(&dw->server->mid_queue_lock);48244815 spin_unlock(&dw->server->srv_lock);48254825- mid->callback(mid);48164816+ mid_execute_callback(mid);48264817 } else {48274818 spin_lock(&dw->server->mid_queue_lock);48284819 mid->mid_state = MID_REQUEST_SUBMITTED;
···13371337 log_rdma_event(INFO, "cancelling idle timer\n");13381338 cancel_delayed_work_sync(&info->idle_timer_work);1339133913401340- log_rdma_event(INFO, "wait for all send posted to IB to finish\n");13411341- wait_event(info->wait_send_pending,13421342- atomic_read(&info->send_pending) == 0);13431343-13441340 /* It's not possible for upper layer to get to reassembly */13451341 log_rdma_event(INFO, "drain the reassembly queue\n");13461342 do {···19821986 */1983198719841988 wait_event(info->wait_send_pending,19851985- atomic_read(&info->send_pending) == 0);19891989+ atomic_read(&info->send_pending) == 0 ||19901990+ sc->status != SMBDIRECT_SOCKET_CONNECTED);19911991+19921992+ if (sc->status != SMBDIRECT_SOCKET_CONNECTED && rc == 0)19931993+ rc = -EAGAIN;1986199419871995 return rc;19881996}
···4646 struct mutex srv_mutex;4747 int status;4848 unsigned int cli_cap;4949- __be32 inet_addr;4949+ union {5050+ __be32 inet_addr;5151+#if IS_ENABLED(CONFIG_IPV6)5252+ u8 inet6_addr[16];5353+#endif5454+ };5055 char *request_buf;5156 struct ksmbd_transport *transport;5257 struct nls_table *local_nls;
+10-3
fs/smb/server/oplock.c
···11021102 if (!atomic_inc_not_zero(&opinfo->refcount))11031103 continue;1104110411051105- if (ksmbd_conn_releasing(opinfo->conn))11051105+ if (ksmbd_conn_releasing(opinfo->conn)) {11061106+ opinfo_put(opinfo);11061107 continue;11081108+ }1107110911081110 oplock_break(opinfo, SMB2_OPLOCK_LEVEL_NONE, NULL);11091111 opinfo_put(opinfo);···11411139 if (!atomic_inc_not_zero(&opinfo->refcount))11421140 continue;1143114111441144- if (ksmbd_conn_releasing(opinfo->conn))11421142+ if (ksmbd_conn_releasing(opinfo->conn)) {11431143+ opinfo_put(opinfo);11451144 continue;11451145+ }11461146+11461147 oplock_break(opinfo, SMB2_OPLOCK_LEVEL_NONE, NULL);11471148 opinfo_put(opinfo);11481149 }···13481343 if (!atomic_inc_not_zero(&brk_op->refcount))13491344 continue;1350134513511351- if (ksmbd_conn_releasing(brk_op->conn))13461346+ if (ksmbd_conn_releasing(brk_op->conn)) {13471347+ opinfo_put(brk_op);13521348 continue;13491349+ }1353135013541351 if (brk_op->is_lease && (brk_op->o_lease->state &13551352 (~(SMB2_LEASE_READ_CACHING_LE |
···219219 else if (XFS_INO_TO_AGNO(mp, breq->startino) < hdr->agno)220220 return -EINVAL;221221222222- breq->flags |= XFS_IBULK_SAME_AG;222222+ breq->iwalk_flags |= XFS_IWALK_SAME_AG;223223224224 /* Asking for an inode past the end of the AG? We're done! */225225 if (XFS_INO_TO_AGNO(mp, breq->startino) > hdr->agno)
+3-2
fs/xfs/xfs_iops.c
···616616 * write of exactly one single fsblock if the bdev will make that617617 * guarantee for us.618618 */619619- if (xfs_inode_can_hw_atomic_write(ip) || xfs_can_sw_atomic_write(mp))619619+ if (xfs_inode_can_hw_atomic_write(ip) ||620620+ xfs_inode_can_sw_atomic_write(ip))620621 return mp->m_sb.sb_blocksize;621622622623 return 0;···634633 * write of exactly one single fsblock if the bdev will make that635634 * guarantee for us.636635 */637637- if (!xfs_can_sw_atomic_write(mp)) {636636+ if (!xfs_inode_can_sw_atomic_write(ip)) {638637 if (xfs_inode_can_hw_atomic_write(ip))639638 return mp->m_sb.sb_blocksize;640639 return 0;
+2-6
fs/xfs/xfs_itable.c
···307307 .breq = breq,308308 };309309 struct xfs_trans *tp;310310- unsigned int iwalk_flags = 0;311310 int error;312311313312 if (breq->idmap != &nop_mnt_idmap) {···327328 * locking abilities to detect cycles in the inobt without deadlocking.328329 */329330 tp = xfs_trans_alloc_empty(breq->mp);330330- if (breq->flags & XFS_IBULK_SAME_AG)331331- iwalk_flags |= XFS_IWALK_SAME_AG;332332-333333- error = xfs_iwalk(breq->mp, tp, breq->startino, iwalk_flags,331331+ error = xfs_iwalk(breq->mp, tp, breq->startino, breq->iwalk_flags,334332 xfs_bulkstat_iwalk, breq->icount, &bc);335333 xfs_trans_cancel(tp);336334 kfree(bc.buf);···453457 * locking abilities to detect cycles in the inobt without deadlocking.454458 */455459 tp = xfs_trans_alloc_empty(breq->mp);456456- error = xfs_inobt_walk(breq->mp, tp, breq->startino, breq->flags,460460+ error = xfs_inobt_walk(breq->mp, tp, breq->startino, breq->iwalk_flags,457461 xfs_inumbers_walk, breq->icount, &ic);458462 xfs_trans_cancel(tp);459463
+4-6
fs/xfs/xfs_itable.h
···1313 xfs_ino_t startino; /* start with this inode */1414 unsigned int icount; /* number of elements in ubuffer */1515 unsigned int ocount; /* number of records returned */1616- unsigned int flags; /* see XFS_IBULK_FLAG_* */1616+ unsigned int flags; /* XFS_IBULK_FLAG_* */1717+ unsigned int iwalk_flags; /* XFS_IWALK_FLAG_* */1718};18191919-/* Only iterate within the same AG as startino */2020-#define XFS_IBULK_SAME_AG (1U << 0)2121-2220/* Fill out the bs_extents64 field if set. */2323-#define XFS_IBULK_NREXT64 (1U << 1)2121+#define XFS_IBULK_NREXT64 (1U << 0)24222523/* Signal that we can return metadata directories. */2626-#define XFS_IBULK_METADIR (1U << 2)2424+#define XFS_IBULK_METADIR (1U << 1)27252826/*2927 * Advance the user buffer pointer by one record of the given size. If the
+19
fs/xfs/xfs_mount.c
···779779 return -EINVAL;780780 }781781782782+ if (xfs_has_reflink(mp))783783+ goto set_limit;784784+785785+ if (new_max_fsbs == 1) {786786+ if (mp->m_ddev_targp->bt_awu_max ||787787+ (mp->m_rtdev_targp && mp->m_rtdev_targp->bt_awu_max)) {788788+ } else {789789+ xfs_warn(mp,790790+ "cannot support atomic writes of size %lluk with no reflink or HW support",791791+ new_max_bytes >> 10);792792+ return -EINVAL;793793+ }794794+ } else {795795+ xfs_warn(mp,796796+ "cannot support atomic writes of size %lluk with no reflink support",797797+ new_max_bytes >> 10);798798+ return -EINVAL;799799+ }800800+782801set_limit:783802 error = xfs_calc_atomic_write_reservation(mp, new_max_fsbs);784803 if (error) {
···166166static void167167xfs_zone_record_blocks(168168 struct xfs_trans *tp,169169- xfs_fsblock_t fsbno,170170- xfs_filblks_t len,171169 struct xfs_open_zone *oz,172172- bool used)170170+ xfs_fsblock_t fsbno,171171+ xfs_filblks_t len)173172{174173 struct xfs_mount *mp = tp->t_mountp;175174 struct xfs_rtgroup *rtg = oz->oz_rtg;···178179179180 xfs_rtgroup_lock(rtg, XFS_RTGLOCK_RMAP);180181 xfs_rtgroup_trans_join(tp, rtg, XFS_RTGLOCK_RMAP);181181- if (used) {182182- rmapip->i_used_blocks += len;183183- ASSERT(rmapip->i_used_blocks <= rtg_blocks(rtg));184184- } else {185185- xfs_add_frextents(mp, len);186186- }182182+ rmapip->i_used_blocks += len;183183+ ASSERT(rmapip->i_used_blocks <= rtg_blocks(rtg));187184 oz->oz_written += len;188185 if (oz->oz_written == rtg_blocks(rtg))189186 xfs_open_zone_mark_full(oz);190187 xfs_trans_log_inode(tp, rmapip, XFS_ILOG_CORE);188188+}189189+190190+/*191191+ * Called for blocks that have been written to disk, but not actually linked to192192+ * an inode, which can happen when garbage collection races with user data193193+ * writes to a file.194194+ */195195+static void196196+xfs_zone_skip_blocks(197197+ struct xfs_open_zone *oz,198198+ xfs_filblks_t len)199199+{200200+ struct xfs_rtgroup *rtg = oz->oz_rtg;201201+202202+ trace_xfs_zone_skip_blocks(oz, 0, len);203203+204204+ xfs_rtgroup_lock(rtg, XFS_RTGLOCK_RMAP);205205+ oz->oz_written += len;206206+ if (oz->oz_written == rtg_blocks(rtg))207207+ xfs_open_zone_mark_full(oz);208208+ xfs_rtgroup_unlock(rtg, XFS_RTGLOCK_RMAP);209209+210210+ xfs_add_frextents(rtg_mount(rtg), len);191211}192212193213static int···268250 }269251 }270252271271- xfs_zone_record_blocks(tp, new->br_startblock, new->br_blockcount, oz,272272- true);253253+ xfs_zone_record_blocks(tp, oz, new->br_startblock, new->br_blockcount);273254274255 /* Map the new blocks into the data fork. */275256 xfs_bmap_map_extent(tp, ip, XFS_DATA_FORK, new);···276259277260skip:278261 trace_xfs_reflink_cow_remap_skip(ip, new);279279- xfs_zone_record_blocks(tp, new->br_startblock, new->br_blockcount, oz,280280- false);262262+ xfs_zone_skip_blocks(oz, new->br_blockcount);281263 return 0;282264}283265
+48
include/drm/drm_bridge.h
···866866 struct drm_connector *connector,867867 bool enable, int direction);868868869869+ /**870870+ * @hdmi_cec_init:871871+ *872872+ * Initialize CEC part of the bridge.873873+ *874874+ * This callback is optional, it can be implemented by bridges that875875+ * set the @DRM_BRIDGE_OP_HDMI_CEC_ADAPTER flag in their876876+ * &drm_bridge->ops.877877+ *878878+ * Returns:879879+ * 0 on success, a negative error code otherwise880880+ */869881 int (*hdmi_cec_init)(struct drm_bridge *bridge,870882 struct drm_connector *connector);871883884884+ /**885885+ * @hdmi_cec_enable:886886+ *887887+ * Enable or disable the CEC adapter inside the bridge.888888+ *889889+ * This callback is optional, it can be implemented by bridges that890890+ * set the @DRM_BRIDGE_OP_HDMI_CEC_ADAPTER flag in their891891+ * &drm_bridge->ops.892892+ *893893+ * Returns:894894+ * 0 on success, a negative error code otherwise895895+ */872896 int (*hdmi_cec_enable)(struct drm_bridge *bridge, bool enable);873897898898+ /**899899+ * @hdmi_cec_log_addr:900900+ *901901+ * Set the logical address of the CEC adapter inside the bridge.902902+ *903903+ * This callback is optional, it can be implemented by bridges that904904+ * set the @DRM_BRIDGE_OP_HDMI_CEC_ADAPTER flag in their905905+ * &drm_bridge->ops.906906+ *907907+ * Returns:908908+ * 0 on success, a negative error code otherwise909909+ */874910 int (*hdmi_cec_log_addr)(struct drm_bridge *bridge, u8 logical_addr);875911912912+ /**913913+ * @hdmi_cec_transmit:914914+ *915915+ * Transmit the message using the CEC adapter inside the bridge.916916+ *917917+ * This callback is optional, it can be implemented by bridges that918918+ * set the @DRM_BRIDGE_OP_HDMI_CEC_ADAPTER flag in their919919+ * &drm_bridge->ops.920920+ *921921+ * Returns:922922+ * 0 on success, a negative error code otherwise923923+ */876924 int (*hdmi_cec_transmit)(struct drm_bridge *bridge, u8 attempts,877925 u32 signal_free_time, struct cec_msg *msg);878926
···150150 bool active; /* T if stream is active */151151 bool need_retry; /* T if this stream needs retrying */152152 bool failed; /* T if this stream failed */153153+ bool transferred_valid; /* T is ->transferred is valid */153154};154155155156/*
+17-12
include/linux/sched.h
···2152215221532153static inline void __set_task_blocked_on(struct task_struct *p, struct mutex *m)21542154{21552155+ struct mutex *blocked_on = READ_ONCE(p->blocked_on);21562156+21552157 WARN_ON_ONCE(!m);21562158 /* The task should only be setting itself as blocked */21572159 WARN_ON_ONCE(p != current);···21642162 * with a different mutex. Note, setting it to the same21652163 * lock repeatedly is ok.21662164 */21672167- WARN_ON_ONCE(p->blocked_on && p->blocked_on != m);21682168- p->blocked_on = m;21652165+ WARN_ON_ONCE(blocked_on && blocked_on != m);21662166+ WRITE_ONCE(p->blocked_on, m);21692167}2170216821712169static inline void set_task_blocked_on(struct task_struct *p, struct mutex *m)···2176217421772175static inline void __clear_task_blocked_on(struct task_struct *p, struct mutex *m)21782176{21792179- WARN_ON_ONCE(!m);21802180- /* Currently we serialize blocked_on under the mutex::wait_lock */21812181- lockdep_assert_held_once(&m->wait_lock);21822182- /*21832183- * There may be cases where we re-clear already cleared21842184- * blocked_on relationships, but make sure we are not21852185- * clearing the relationship with a different lock.21862186- */21872187- WARN_ON_ONCE(m && p->blocked_on && p->blocked_on != m);21882188- p->blocked_on = NULL;21772177+ if (m) {21782178+ struct mutex *blocked_on = READ_ONCE(p->blocked_on);21792179+21802180+ /* Currently we serialize blocked_on under the mutex::wait_lock */21812181+ lockdep_assert_held_once(&m->wait_lock);21822182+ /*21832183+ * There may be cases where we re-clear already cleared21842184+ * blocked_on relationships, but make sure we are not21852185+ * clearing the relationship with a different lock.21862186+ */21872187+ WARN_ON_ONCE(blocked_on && blocked_on != m);21882188+ }21892189+ WRITE_ONCE(p->blocked_on, NULL);21892190}2190219121912192static inline void clear_task_blocked_on(struct task_struct *p, struct mutex *m)
···319319{320320 if (can_do_masked_user_access())321321 to = masked_user_access_begin(to);322322- else if (!user_read_access_begin(to, sizeof(*to)))322322+ else if (!user_write_access_begin(to, sizeof(*to)))323323 return -EFAULT;324324 unsafe_put_user(val, to, Efault);325325- user_read_access_end();325325+ user_write_access_end();326326 return 0;327327Efault:328328- user_read_access_end();328328+ user_write_access_end();329329 return -EFAULT;330330}331331
+5-1
kernel/locking/ww_mutex.h
···342342 * When waking up the task to wound, be sure to clear the343343 * blocked_on pointer. Otherwise we can see circular344344 * blocked_on relationships that can't resolve.345345+ *346346+ * NOTE: We pass NULL here instead of lock, because we347347+ * are waking the mutex owner, who may be currently348348+ * blocked on a different mutex.345349 */346346- __clear_task_blocked_on(owner, lock);350350+ __clear_task_blocked_on(owner, NULL);347351 wake_q_add(wake_q, owner);348352 }349353 return true;
···140140config CRYPTO_LIB_SHA1141141 tristate142142 help143143- The SHA-1 library functions. Select this if your module uses any of144144- the functions from <crypto/sha1.h>.143143+ The SHA-1 and HMAC-SHA1 library functions. Select this if your module144144+ uses any of the functions from <crypto/sha1.h>.145145146146config CRYPTO_LIB_SHA1_ARCH147147 bool···157157config CRYPTO_LIB_SHA256158158 tristate159159 help160160- Enable the SHA-256 library interface. This interface may be fulfilled161161- by either the generic implementation or an arch-specific one, if one162162- is available and enabled.160160+ The SHA-224, SHA-256, HMAC-SHA224, and HMAC-SHA256 library functions.161161+ Select this if your module uses any of these functions from162162+ <crypto/sha2.h>.163163164164config CRYPTO_LIB_SHA256_ARCH165165 bool
···339339 case BT_CODEC_TRANSPARENT:340340 if (!find_next_esco_param(conn, esco_param_msbc,341341 ARRAY_SIZE(esco_param_msbc)))342342- return false;342342+ return -EINVAL;343343+343344 param = &esco_param_msbc[conn->attempt - 1];344345 cp.tx_coding_format.id = 0x03;345346 cp.rx_coding_format.id = 0x03;···831830 /* Check if ISO connection is a BIS and terminate advertising832831 * set and BIG if there are no other connections using it.833832 */834834- bis = hci_conn_hash_lookup_big(hdev, conn->iso_qos.bcast.big);833833+ bis = hci_conn_hash_lookup_big_state(hdev,834834+ conn->iso_qos.bcast.big,835835+ BT_CONNECTED,836836+ HCI_ROLE_MASTER);837837+ if (bis)838838+ return;839839+840840+ bis = hci_conn_hash_lookup_big_state(hdev,841841+ conn->iso_qos.bcast.big,842842+ BT_CONNECT,843843+ HCI_ROLE_MASTER);835844 if (bis)836845 return;837846···22602249 * the start periodic advertising and create BIG commands have22612250 * been queued22622251 */22632263- hci_conn_hash_list_state(hdev, bis_mark_per_adv, PA_LINK,22522252+ hci_conn_hash_list_state(hdev, bis_mark_per_adv, BIS_LINK,22642253 BT_BOUND, &data);2265225422662255 /* Queue start periodic advertising and create BIG */
+10-5
net/bluetooth/hci_event.c
···67456745 qos->ucast.out.latency =67466746 DIV_ROUND_CLOSEST(get_unaligned_le24(ev->p_latency),67476747 1000);67486748- qos->ucast.in.sdu = le16_to_cpu(ev->c_mtu);67496749- qos->ucast.out.sdu = le16_to_cpu(ev->p_mtu);67486748+ qos->ucast.in.sdu = ev->c_bn ? le16_to_cpu(ev->c_mtu) : 0;67496749+ qos->ucast.out.sdu = ev->p_bn ? le16_to_cpu(ev->p_mtu) : 0;67506750 qos->ucast.in.phy = ev->c_phy;67516751 qos->ucast.out.phy = ev->p_phy;67526752 break;···67606760 qos->ucast.in.latency =67616761 DIV_ROUND_CLOSEST(get_unaligned_le24(ev->p_latency),67626762 1000);67636763- qos->ucast.out.sdu = le16_to_cpu(ev->c_mtu);67646764- qos->ucast.in.sdu = le16_to_cpu(ev->p_mtu);67636763+ qos->ucast.out.sdu = ev->c_bn ? le16_to_cpu(ev->c_mtu) : 0;67646764+ qos->ucast.in.sdu = ev->p_bn ? le16_to_cpu(ev->p_mtu) : 0;67656765 qos->ucast.out.phy = ev->c_phy;67666766 qos->ucast.in.phy = ev->p_phy;67676767 break;···69576957 continue;69586958 }6959695969606960- if (ev->status != 0x42)69606960+ if (ev->status != 0x42) {69616961 /* Mark PA sync as established */69626962 set_bit(HCI_CONN_PA_SYNC, &bis->flags);69636963+ /* Reset cleanup callback of PA Sync so it doesn't69646964+ * terminate the sync when deleting the connection.69656965+ */69666966+ conn->cleanup = NULL;69676967+ }6963696869646969 bis->sync_handle = conn->sync_handle;69656970 bis->iso_qos.bcast.big = ev->handle;
+16-9
net/bluetooth/hci_sync.c
···33443344 * advertising data. This also applies to the case33453345 * where BR/EDR was toggled during the AUTO_OFF phase.33463346 */33473347- if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||33473347+ if (hci_dev_test_flag(hdev, HCI_ADVERTISING) &&33483348 list_empty(&hdev->adv_instances)) {33493349 if (ext_adv_capable(hdev)) {33503350 err = hci_setup_ext_adv_instance_sync(hdev, 0x00);···45314531{45324532 struct hci_cp_le_set_host_feature cp;4533453345344534- if (!cis_capable(hdev))45344534+ if (!iso_capable(hdev))45354535 return 0;4536453645374537 memset(&cp, 0, sizeof(cp));4538453845394539 /* Connected Isochronous Channels (Host Support) */45404540 cp.bit_number = 32;45414541- cp.bit_value = 1;45414541+ cp.bit_value = iso_enabled(hdev) ? 0x01 : 0x00;4542454245434543 return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_HOST_FEATURE,45444544 sizeof(cp), &cp, HCI_CMD_TIMEOUT);···6985698569866986 hci_dev_lock(hdev);6987698769886988- hci_dev_clear_flag(hdev, HCI_PA_SYNC);69896989-69906988 if (!hci_conn_valid(hdev, conn))69916989 clear_bit(HCI_CONN_CREATE_PA_SYNC, &conn->flags);69926990···70457047 /* SID has not been set listen for HCI_EV_LE_EXT_ADV_REPORT to update70467048 * it.70477049 */70487048- if (conn->sid == HCI_SID_INVALID)70497049- __hci_cmd_sync_status_sk(hdev, HCI_OP_NOP, 0, NULL,70507050- HCI_EV_LE_EXT_ADV_REPORT,70517051- conn->conn_timeout, NULL);70507050+ if (conn->sid == HCI_SID_INVALID) {70517051+ err = __hci_cmd_sync_status_sk(hdev, HCI_OP_NOP, 0, NULL,70527052+ HCI_EV_LE_EXT_ADV_REPORT,70537053+ conn->conn_timeout, NULL);70547054+ if (err == -ETIMEDOUT)70557055+ goto done;70567056+ }7052705770537058 memset(&cp, 0, sizeof(cp));70547059 cp.options = qos->bcast.options;···70807079 if (err == -ETIMEDOUT)70817080 __hci_cmd_sync_status(hdev, HCI_OP_LE_PA_CREATE_SYNC_CANCEL,70827081 0, NULL, HCI_CMD_TIMEOUT);70827082+70837083+done:70847084+ hci_dev_clear_flag(hdev, HCI_PA_SYNC);70857085+70867086+ /* Update passive scan since HCI_PA_SYNC flag has been cleared */70877087+ hci_update_passive_scan_sync(hdev);7083708870847089 return err;70857090}
···37793779 features &= ~NETIF_F_TSO_MANGLEID;37803780 }3781378137823782+ /* NETIF_F_IPV6_CSUM does not support IPv6 extension headers,37833783+ * so neither does TSO that depends on it.37843784+ */37853785+ if (features & NETIF_F_IPV6_CSUM &&37863786+ (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6 ||37873787+ (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4 &&37883788+ vlan_get_protocol(skb) == htons(ETH_P_IPV6))) &&37893789+ skb_transport_header_was_set(skb) &&37903790+ skb_network_header_len(skb) != sizeof(struct ipv6hdr) &&37913791+ !ipv6_has_hopopt_jumbo(skb))37923792+ features &= ~(NETIF_F_IPV6_CSUM | NETIF_F_TSO6 | NETIF_F_GSO_UDP_L4);37933793+37823794 return features;37833795}37843796
+7-1
net/hsr/hsr_slave.c
···6363 skb_push(skb, ETH_HLEN);6464 skb_reset_mac_header(skb);6565 if ((!hsr->prot_version && protocol == htons(ETH_P_PRP)) ||6666- protocol == htons(ETH_P_HSR))6666+ protocol == htons(ETH_P_HSR)) {6767+ if (!pskb_may_pull(skb, ETH_HLEN + HSR_HLEN)) {6868+ kfree_skb(skb);6969+ goto finish_consume;7070+ }7171+6772 skb_set_network_header(skb, ETH_HLEN + HSR_HLEN);7373+ }6874 skb_reset_mac_len(skb);69757076 /* Only the frames received over the interlink port will assign a
+2-4
net/ipv4/netfilter/nf_reject_ipv4.c
···247247 if (!oth)248248 return;249249250250- if ((hook == NF_INET_PRE_ROUTING || hook == NF_INET_INGRESS) &&251251- nf_reject_fill_skb_dst(oldskb) < 0)250250+ if (!skb_dst(oldskb) && nf_reject_fill_skb_dst(oldskb) < 0)252251 return;253252254253 if (skb_rtable(oldskb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))···320321 if (iph->frag_off & htons(IP_OFFSET))321322 return;322323323323- if ((hook == NF_INET_PRE_ROUTING || hook == NF_INET_INGRESS) &&324324- nf_reject_fill_skb_dst(skb_in) < 0)324324+ if (!skb_dst(skb_in) && nf_reject_fill_skb_dst(skb_in) < 0)325325 return;326326327327 if (skb_csum_unnecessary(skb_in) ||
+2-3
net/ipv6/netfilter/nf_reject_ipv6.c
···293293 fl6.fl6_sport = otcph->dest;294294 fl6.fl6_dport = otcph->source;295295296296- if (hook == NF_INET_PRE_ROUTING || hook == NF_INET_INGRESS) {296296+ if (!skb_dst(oldskb)) {297297 nf_ip6_route(net, &dst, flowi6_to_flowi(&fl6), false);298298 if (!dst)299299 return;···397397 if (hooknum == NF_INET_LOCAL_OUT && skb_in->dev == NULL)398398 skb_in->dev = net->loopback_dev;399399400400- if ((hooknum == NF_INET_PRE_ROUTING || hooknum == NF_INET_INGRESS) &&401401- nf_reject6_fill_skb_dst(skb_in) < 0)400400+ if (!skb_dst(skb_in) && nf_reject6_fill_skb_dst(skb_in) < 0)402401 return;403402404403 icmpv6_send(skb_in, ICMPV6_DEST_UNREACH, code, 0);
+5-1
net/ipv6/seg6_hmac.c
···3535#include <net/xfrm.h>36363737#include <crypto/hash.h>3838+#include <crypto/utils.h>3839#include <net/seg6.h>3940#include <net/genetlink.h>4041#include <net/seg6_hmac.h>···281280 if (seg6_hmac_compute(hinfo, srh, &ipv6_hdr(skb)->saddr, hmac_output))282281 return false;283282284284- if (memcmp(hmac_output, tlv->hmac, SEG6_HMAC_FIELD_LEN) != 0)283283+ if (crypto_memneq(hmac_output, tlv->hmac, SEG6_HMAC_FIELD_LEN))285284 return false;286285287286 return true;···304303{305304 struct seg6_pernet_data *sdata = seg6_pernet(net);306305 int err;306306+307307+ if (!__hmac_get_algo(hinfo->alg_id))308308+ return -EINVAL;307309308310 err = rhashtable_lookup_insert_fast(&sdata->hmac_infos, &hinfo->node,309311 rht_params);
+4-2
net/mptcp/options.c
···11181118 return hmac == mp_opt->ahmac;11191119}1120112011211121-/* Return false if a subflow has been reset, else return true */11211121+/* Return false in case of error (or subflow has been reset),11221122+ * else return true.11231123+ */11221124bool mptcp_incoming_options(struct sock *sk, struct sk_buff *skb)11231125{11241126 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);···1224122212251223 mpext = skb_ext_add(skb, SKB_EXT_MPTCP);12261224 if (!mpext)12271227- return true;12251225+ return false;1228122612291227 memset(mpext, 0, sizeof(*mpext));12301228
···18081808 return tls_decrypt_sg(sk, NULL, sgout, &darg);18091809}1810181018111811+/* All records returned from a recvmsg() call must have the same type.18121812+ * 0 is not a valid content type. Use it as "no type reported, yet".18131813+ */18111814static int tls_record_content_type(struct msghdr *msg, struct tls_msg *tlm,18121815 u8 *control)18131816{···20542051 if (err < 0)20552052 goto end;2056205320542054+ /* process_rx_list() will set @control if it processed any records */20572055 copied = err;20582058- if (len <= copied || (copied && control != TLS_RECORD_TYPE_DATA) || rx_more)20562056+ if (len <= copied || rx_more ||20572057+ (control && control != TLS_RECORD_TYPE_DATA))20592058 goto end;2060205920612060 target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
···11+#!/bin/bash22+# SPDX-License-Identifier: GPL-2.033+#44+# Test if a bond interface works with lacp_active=off.55+66+# shellcheck disable=SC203477+REQUIRE_MZ=no88+NUM_NETIFS=099+lib_dir=$(dirname "$0")1010+# shellcheck disable=SC10911111+source "$lib_dir"/../../../net/forwarding/lib.sh1212+1313+# shellcheck disable=SC23171414+check_port_state()1515+{1616+ local netns=$11717+ local port=$21818+ local state=$31919+2020+ ip -n "${netns}" -d -j link show "$port" | \2121+ jq -e ".[].linkinfo.info_slave_data.ad_actor_oper_port_state_str | index(\"${state}\") != null" > /dev/null2222+}2323+2424+check_pkt_count()2525+{2626+ RET=02727+ local ns="$1"2828+ local iface="$2"2929+3030+ # wait 65s, one per 30s3131+ slowwait_for_counter 65 2 tc_rule_handle_stats_get \3232+ "dev ${iface} egress" 101 ".packets" "-n ${ns}" &> /dev/null3333+}3434+3535+setup() {3636+ setup_ns c_ns s_ns3737+3838+ # shellcheck disable=SC21543939+ ip -n "${c_ns}" link add eth0 type veth peer name eth0 netns "${s_ns}"4040+ ip -n "${c_ns}" link add eth1 type veth peer name eth1 netns "${s_ns}"4141+4242+ # Add tc filter to count the pkts4343+ tc -n "${c_ns}" qdisc add dev eth0 clsact4444+ tc -n "${c_ns}" filter add dev eth0 egress handle 101 protocol 0x8809 matchall action pass4545+ tc -n "${s_ns}" qdisc add dev eth1 clsact4646+ tc -n "${s_ns}" filter add dev eth1 egress handle 101 protocol 0x8809 matchall action pass4747+4848+ ip -n "${s_ns}" link add bond0 type bond mode 802.3ad lacp_active on lacp_rate fast4949+ ip -n "${s_ns}" link set eth0 master bond05050+ ip -n "${s_ns}" link set eth1 master bond05151+5252+ ip -n "${c_ns}" link add bond0 type bond mode 802.3ad lacp_active off lacp_rate fast5353+ ip -n "${c_ns}" link set eth0 master bond05454+ ip -n "${c_ns}" link set eth1 master bond05555+5656+}5757+5858+trap cleanup_all_ns EXIT5959+setup6060+6161+# The bond will send 2 lacpdu pkts during init time, let's wait at least 2s6262+# after interface up6363+ip -n "${c_ns}" link set bond0 up6464+sleep 26565+6666+# 1. The passive side shouldn't send LACPDU.6767+check_pkt_count "${c_ns}" "eth0" && RET=16868+log_test "802.3ad lacp_active off" "init port"6969+7070+ip -n "${s_ns}" link set bond0 up7171+# 2. The passive side should not have the 'active' flag.7272+RET=07373+slowwait 2 check_port_state "${c_ns}" "eth0" "active" && RET=17474+log_test "802.3ad lacp_active off" "port state active"7575+7676+# 3. The active side should have the 'active' flag.7777+RET=07878+slowwait 2 check_port_state "${s_ns}" "eth0" "active" || RET=17979+log_test "802.3ad lacp_active on" "port state active"8080+8181+# 4. Make sure the connection is not expired.8282+RET=08383+slowwait 5 check_port_state "${s_ns}" "eth0" "distributing"8484+slowwait 10 check_port_state "${s_ns}" "eth0" "expired" && RET=18585+log_test "bond 802.3ad lacp_active off" "port connection"8686+8787+# After testing, disconnect one port on each side to check the state.8888+ip -n "${s_ns}" link set eth0 nomaster8989+ip -n "${s_ns}" link set eth0 up9090+ip -n "${c_ns}" link set eth1 nomaster9191+ip -n "${c_ns}" link set eth1 up9292+# Due to Periodic Machine and Rx Machine state change, the bond will still9393+# send lacpdu pkts in a few seconds. sleep at lease 5s to make sure9494+# negotiation finished9595+sleep 59696+9797+# 5. The active side should keep sending LACPDU.9898+check_pkt_count "${s_ns}" "eth1" || RET=19999+log_test "bond 802.3ad lacp_active on" "port pkt after disconnect"100100+101101+# 6. The passive side shouldn't send LACPDU anymore.102102+check_pkt_count "${c_ns}" "eth0" && RET=1103103+log_test "bond 802.3ad lacp_active off" "port pkt after disconnect"104104+105105+exit "$EXIT_STATUS"
···107107 #endif108108#endif109109110110+#ifndef __NR_open_tree_attr111111+ #if defined __alpha__112112+ #define __NR_open_tree_attr 577113113+ #elif defined _MIPS_SIM114114+ #if _MIPS_SIM == _MIPS_SIM_ABI32 /* o32 */115115+ #define __NR_open_tree_attr (467 + 4000)116116+ #endif117117+ #if _MIPS_SIM == _MIPS_SIM_NABI32 /* n32 */118118+ #define __NR_open_tree_attr (467 + 6000)119119+ #endif120120+ #if _MIPS_SIM == _MIPS_SIM_ABI64 /* n64 */121121+ #define __NR_open_tree_attr (467 + 5000)122122+ #endif123123+ #elif defined __ia64__124124+ #define __NR_open_tree_attr (467 + 1024)125125+ #else126126+ #define __NR_open_tree_attr 467127127+ #endif128128+#endif129129+110130#ifndef MOUNT_ATTR_IDMAP111131#define MOUNT_ATTR_IDMAP 0x00100000112132#endif···139119 struct mount_attr *attr, size_t size)140120{141121 return syscall(__NR_mount_setattr, dfd, path, flags, attr, size);122122+}123123+124124+static inline int sys_open_tree_attr(int dfd, const char *path, unsigned int flags,125125+ struct mount_attr *attr, size_t size)126126+{127127+ return syscall(__NR_open_tree_attr, dfd, path, flags, attr, size);142128}143129144130static ssize_t write_nointr(int fd, const void *buf, size_t count)···12481222 attr.userns_fd = get_userns_fd(0, 10000, 10000);12491223 ASSERT_GE(attr.userns_fd, 0);12501224 ASSERT_NE(sys_mount_setattr(open_tree_fd, "", AT_EMPTY_PATH, &attr, sizeof(attr)), 0);12251225+ /*12261226+ * Make sure that open_tree_attr() without OPEN_TREE_CLONE is not a way12271227+ * to bypass this mount_setattr() restriction.12281228+ */12291229+ ASSERT_LT(sys_open_tree_attr(open_tree_fd, "", AT_EMPTY_PATH, &attr, sizeof(attr)), 0);12301230+12511231 ASSERT_EQ(close(attr.userns_fd), 0);12521232 ASSERT_EQ(close(open_tree_fd), 0);12531233}···12871255 ASSERT_GE(attr.userns_fd, 0);12881256 ASSERT_NE(sys_mount_setattr(open_tree_fd, "", AT_EMPTY_PATH, &attr,12891257 sizeof(attr)), 0);12581258+ /*12591259+ * Make sure that open_tree_attr() without OPEN_TREE_CLONE is not a way12601260+ * to bypass this mount_setattr() restriction.12611261+ */12621262+ ASSERT_LT(sys_open_tree_attr(open_tree_fd, "", AT_EMPTY_PATH, &attr, sizeof(attr)), 0);12631263+12901264 ASSERT_EQ(close(attr.userns_fd), 0);12911265 ASSERT_EQ(close(open_tree_fd), 0);12921266}···13591321 ASSERT_EQ(close(open_tree_fd), 0);13601322}1361132313241324+static bool expected_uid_gid(int dfd, const char *path, int flags,13251325+ uid_t expected_uid, gid_t expected_gid)13261326+{13271327+ int ret;13281328+ struct stat st;13291329+13301330+ ret = fstatat(dfd, path, &st, flags);13311331+ if (ret < 0)13321332+ return false;13331333+13341334+ return st.st_uid == expected_uid && st.st_gid == expected_gid;13351335+}13361336+13621337/**13631338 * Validate that currently changing the idmapping of an idmapped mount fails.13641339 */···13811330 struct mount_attr attr = {13821331 .attr_set = MOUNT_ATTR_IDMAP,13831332 };13331333+13341334+ ASSERT_TRUE(expected_uid_gid(-EBADF, "/mnt/D", 0, 0, 0));1384133513851336 if (!mount_setattr_supported())13861337 SKIP(return, "mount_setattr syscall not supported");···14011348 AT_EMPTY_PATH, &attr, sizeof(attr)), 0);14021349 ASSERT_EQ(close(attr.userns_fd), 0);1403135013511351+ EXPECT_FALSE(expected_uid_gid(open_tree_fd, ".", 0, 0, 0));13521352+ EXPECT_TRUE(expected_uid_gid(open_tree_fd, ".", 0, 10000, 10000));13531353+14041354 /* Change idmapping on a detached mount that is already idmapped. */14051355 attr.userns_fd = get_userns_fd(0, 20000, 10000);14061356 ASSERT_GE(attr.userns_fd, 0);14071357 ASSERT_NE(sys_mount_setattr(open_tree_fd, "", AT_EMPTY_PATH, &attr, sizeof(attr)), 0);13581358+ /*13591359+ * Make sure that open_tree_attr() without OPEN_TREE_CLONE is not a way13601360+ * to bypass this mount_setattr() restriction.13611361+ */13621362+ EXPECT_LT(sys_open_tree_attr(open_tree_fd, "", AT_EMPTY_PATH, &attr, sizeof(attr)), 0);13631363+ EXPECT_FALSE(expected_uid_gid(open_tree_fd, ".", 0, 20000, 20000));13641364+ EXPECT_TRUE(expected_uid_gid(open_tree_fd, ".", 0, 10000, 10000));13651365+14081366 ASSERT_EQ(close(attr.userns_fd), 0);14091367 ASSERT_EQ(close(open_tree_fd), 0);14101410-}14111411-14121412-static bool expected_uid_gid(int dfd, const char *path, int flags,14131413- uid_t expected_uid, gid_t expected_gid)14141414-{14151415- int ret;14161416- struct stat st;14171417-14181418- ret = fstatat(dfd, path, &st, flags);14191419- if (ret < 0)14201420- return false;14211421-14221422- return st.st_uid == expected_uid && st.st_gid == expected_gid;14231368}1424136914251370TEST_F(mount_setattr_idmapped, idmap_mount_tree_invalid)
+29
tools/testing/selftests/net/forwarding/router.sh
···1818# | 2001:db8:1::1/64 2001:db8:2::1/64 |1919# | |2020# +-----------------------------------------------------------------+2121+#2222+#shellcheck disable=SC2034 # SC doesn't see our uses of global variables21232224ALL_TESTS="2325 ping_ipv4···2927 ipv4_sip_equal_dip3028 ipv6_sip_equal_dip3129 ipv4_dip_link_local3030+ ipv4_sip_link_local3231"33323433NUM_NETIFS=4···331328 ip route del 169.254.1.0/24 dev $rp2332329 ip neigh del 169.254.1.1 lladdr 00:11:22:33:44:55 dev $rp2333330 tc filter del dev $rp2 egress protocol ip pref 1 handle 101 flower331331+}332332+333333+ipv4_sip_link_local()334334+{335335+ local sip=169.254.1.1336336+337337+ RET=0338338+339339+ # Disable rpfilter to prevent packets to be dropped because of it.340340+ sysctl_set net.ipv4.conf.all.rp_filter 0341341+ sysctl_set net.ipv4.conf."$rp1".rp_filter 0342342+343343+ tc filter add dev "$rp2" egress protocol ip pref 1 handle 101 \344344+ flower src_ip "$sip" action pass345345+346346+ $MZ "$h1" -t udp "sp=54321,dp=12345" -c 5 -d 1msec -b "$rp1mac" \347347+ -A "$sip" -B 198.51.100.2 -q348348+349349+ tc_check_packets "dev $rp2 egress" 101 5350350+ check_err $? "Packets were dropped"351351+352352+ log_test "IPv4 source IP is link-local"353353+354354+ tc filter del dev "$rp2" egress protocol ip pref 1 handle 101 flower355355+ sysctl_restore net.ipv4.conf."$rp1".rp_filter356356+ sysctl_restore net.ipv4.conf.all.rp_filter334357}335358336359trap cleanup EXIT
+3-2
tools/testing/selftests/net/mptcp/mptcp_connect.c
···183183 struct addrinfo *hints,184184 struct addrinfo **res)185185{186186-again:187187- int err = getaddrinfo(node, service, hints, res);186186+ int err;188187188188+again:189189+ err = getaddrinfo(node, service, hints, res);189190 if (err) {190191 const char *errstr;191192
+3-2
tools/testing/selftests/net/mptcp/mptcp_inq.c
···7575 struct addrinfo *hints,7676 struct addrinfo **res)7777{7878-again:7979- int err = getaddrinfo(node, service, hints, res);7878+ int err;80798080+again:8181+ err = getaddrinfo(node, service, hints, res);8182 if (err) {8283 const char *errstr;8384
+1
tools/testing/selftests/net/mptcp/mptcp_join.sh
···38423842 # remove and re-add38433843 if reset_with_events "delete re-add signal" &&38443844 mptcp_lib_kallsyms_has "subflow_rebuild_header$"; then38453845+ ip netns exec $ns1 sysctl -q net.mptcp.add_addr_timeout=038453846 pm_nl_set_limits $ns1 0 338463847 pm_nl_set_limits $ns2 3 338473848 pm_nl_add_endpoint $ns1 10.0.2.1 id 1 flags signal
+3-2
tools/testing/selftests/net/mptcp/mptcp_sockopt.c
···162162 struct addrinfo *hints,163163 struct addrinfo **res)164164{165165-again:166166- int err = getaddrinfo(node, service, hints, res);165165+ int err;167166167167+again:168168+ err = getaddrinfo(node, service, hints, res);168169 if (err) {169170 const char *errstr;170171
+1
tools/testing/selftests/net/mptcp/pm_netlink.sh
···198198check "get_limits" "${default_limits}" "subflows above hard limit"199199200200set_limits 8 8201201+flush_endpoint ## to make sure it doesn't affect the limits201202check "get_limits" "$(format_limits 8 8)" "set limits"202203203204flush_endpoint