···861861 pmu_enabled = cpuc->enabled;862862 cpuc->enabled = 0;863863864864- /* stop everything (includes BRS) */865865- amd_pmu_disable_all();864864+ amd_brs_disable_all();866865867866 /* Drain BRS is in use (could be inactive) */868867 if (cpuc->lbr_users)···872873873874 cpuc->enabled = pmu_enabled;874875 if (pmu_enabled)875875- amd_pmu_enable_all(0);876876+ amd_brs_enable_all();876877877878 return amd_pmu_adjust_nmi_window(handled);878879}
+1
arch/x86/events/amd/uncore.c
···553553554554 hlist_for_each_entry_safe(uncore, n, &uncore_unused_list, node) {555555 hlist_del(&uncore->node);556556+ kfree(uncore->events);556557 kfree(uncore);557558 }558559}
+9
arch/x86/events/intel/pt.c
···12631263 if (1 << order != nr_pages)12641264 goto out;1265126512661266+ /*12671267+ * Some processors cannot always support single range for more than12681268+ * 4KB - refer errata TGL052, ADL037 and RPL017. Future processors might12691269+ * also be affected, so for now rather than trying to keep track of12701270+ * which ones, just disable it for all.12711271+ */12721272+ if (nr_pages > 1)12731273+ goto out;12741274+12661275 buf->single = true;12671276 buf->nr_pages = nr_pages;12681277 ret = 0;
···356356 if (!length || !IS_ALIGNED(length, PAGE_SIZE))357357 return -EINVAL;358358359359+ if (offset + length < offset)360360+ return -EINVAL;361361+359362 if (offset + length - PAGE_SIZE >= encl->size)360363 return -EINVAL;361364
+1-1
arch/x86/kernel/fpu/core.c
···605605 if (test_thread_flag(TIF_NEED_FPU_LOAD))606606 fpregs_restore_userregs();607607 save_fpregs_to_fpstate(dst_fpu);608608+ fpregs_unlock();608609 if (!(clone_flags & CLONE_THREAD))609610 fpu_inherit_perms(dst_fpu);610610- fpregs_unlock();611611612612 /*613613 * Children never inherit PASID state.
···444444 unsigned int pre, post;445445 unsigned int fin = spi_imx->spi_clk;446446447447- if (unlikely(fspi > fin))448448- return 0;447447+ fspi = min(fspi, fin);449448450449 post = fls(fin) - fls(fspi);451450 if (fin > fspi << post)···16071608 return spi_imx_pio_transfer_slave(spi, transfer);1608160916091610 /*16111611+ * If we decided in spi_imx_can_dma() that we want to do a DMA16121612+ * transfer, the SPI transfer has already been mapped, so we16131613+ * have to do the DMA transfer here.16141614+ */16151615+ if (spi_imx->usedma)16161616+ return spi_imx_dma_transfer(spi_imx, transfer);16171617+ /*16101618 * Calculate the estimated time in us the transfer runs. Find16111619 * the number of Hz per byte per polling limit.16121620 */···16231617 /* run in polling mode for short transfers */16241618 if (transfer->len < byte_limit)16251619 return spi_imx_poll_transfer(spi, transfer);16261626-16271627- if (spi_imx->usedma)16281628- return spi_imx_dma_transfer(spi_imx, transfer);1629162016301621 return spi_imx_pio_transfer(spi, transfer);16311622}
···203203 struct fscache_volume *volume;204204 struct fscache_cache *cache;205205 size_t klen, hlen;206206- char *key;206206+ u8 *key;207207+208208+ klen = strlen(volume_key);209209+ if (klen > NAME_MAX)210210+ return NULL;207211208212 if (!coherency_data)209213 coherency_len = 0;···233229 /* Stick the length on the front of the key and pad it out to make234230 * hashing easier.235231 */236236- klen = strlen(volume_key);237232 hlen = round_up(1 + klen + 1, sizeof(__le32));238233 key = kzalloc(hlen, GFP_KERNEL);239234 if (!key)
+1-1
include/linux/fscache.h
···7575 atomic_t n_accesses; /* Number of cache accesses in progress */7676 unsigned int debug_id;7777 unsigned int key_hash; /* Hash of key string */7878- char *key; /* Volume ID, eg. "afs@example.com@1234" */7878+ u8 *key; /* Volume ID, eg. "afs@example.com@1234" */7979 struct list_head proc_link; /* Link in /proc/fs/fscache/volumes */8080 struct hlist_bl_node hash_link; /* Link in hash table */8181 struct work_struct work;
+1-1
include/linux/ring_buffer.h
···100100101101int ring_buffer_wait(struct trace_buffer *buffer, int cpu, int full);102102__poll_t ring_buffer_poll_wait(struct trace_buffer *buffer, int cpu,103103- struct file *filp, poll_table *poll_table);103103+ struct file *filp, poll_table *poll_table, int full);104104void ring_buffer_wake_waiters(struct trace_buffer *buffer, int cpu);105105106106#define RING_BUFFER_ALL_CPUS -1
···93069306 }9307930793089308 if (event->attr.sigtrap) {93099309- /*93109310- * Should not be able to return to user space without processing93119311- * pending_sigtrap (kernel events can overflow multiple times).93129312- */93139313- WARN_ON_ONCE(event->pending_sigtrap && event->attr.exclude_kernel);93099309+ unsigned int pending_id = 1;93109310+93119311+ if (regs)93129312+ pending_id = hash32_ptr((void *)instruction_pointer(regs)) ?: 1;93149313 if (!event->pending_sigtrap) {93159315- event->pending_sigtrap = 1;93149314+ event->pending_sigtrap = pending_id;93169315 local_inc(&event->ctx->nr_pending);93169316+ } else if (event->attr.exclude_kernel) {93179317+ /*93189318+ * Should not be able to return to user space without93199319+ * consuming pending_sigtrap; with exceptions:93209320+ *93219321+ * 1. Where !exclude_kernel, events can overflow again93229322+ * in the kernel without returning to user space.93239323+ *93249324+ * 2. Events that can overflow again before the IRQ-93259325+ * work without user space progress (e.g. hrtimer).93269326+ * To approximate progress (with false negatives),93279327+ * check 32-bit hash of the current IP.93289328+ */93299329+ WARN_ON_ONCE(event->pending_sigtrap != pending_id);93179330 }93189331 event->pending_addr = data->addr;93199332 irq_work_queue(&event->pending_irq);
+7-1
kernel/kprobes.c
···17661766 if ((list_p != p) && (list_p->post_handler))17671767 goto noclean;17681768 }17691769- ap->post_handler = NULL;17691769+ /*17701770+ * For the kprobe-on-ftrace case, we keep the17711771+ * post_handler setting to identify this aggrprobe17721772+ * armed with kprobe_ipmodify_ops.17731773+ */17741774+ if (!kprobe_ftrace(ap))17751775+ ap->post_handler = NULL;17701776 }17711777noclean:17721778 /*
+17-2
kernel/rseq.c
···171171 return 0;172172}173173174174+static bool rseq_warn_flags(const char *str, u32 flags)175175+{176176+ u32 test_flags;177177+178178+ if (!flags)179179+ return false;180180+ test_flags = flags & RSEQ_CS_NO_RESTART_FLAGS;181181+ if (test_flags)182182+ pr_warn_once("Deprecated flags (%u) in %s ABI structure", test_flags, str);183183+ test_flags = flags & ~RSEQ_CS_NO_RESTART_FLAGS;184184+ if (test_flags)185185+ pr_warn_once("Unknown flags (%u) in %s ABI structure", test_flags, str);186186+ return true;187187+}188188+174189static int rseq_need_restart(struct task_struct *t, u32 cs_flags)175190{176191 u32 flags, event_mask;177192 int ret;178193179179- if (WARN_ON_ONCE(cs_flags & RSEQ_CS_NO_RESTART_FLAGS) || cs_flags)194194+ if (rseq_warn_flags("rseq_cs", cs_flags))180195 return -EINVAL;181196182197 /* Get thread flags. */···199184 if (ret)200185 return ret;201186202202- if (WARN_ON_ONCE(flags & RSEQ_CS_NO_RESTART_FLAGS) || flags)187187+ if (rseq_warn_flags("rseq", flags))203188 return -EINVAL;204189205190 /*
+35-17
kernel/sched/core.c
···42004200 return success;42014201}4202420242034203+static bool __task_needs_rq_lock(struct task_struct *p)42044204+{42054205+ unsigned int state = READ_ONCE(p->__state);42064206+42074207+ /*42084208+ * Since pi->lock blocks try_to_wake_up(), we don't need rq->lock when42094209+ * the task is blocked. Make sure to check @state since ttwu() can drop42104210+ * locks at the end, see ttwu_queue_wakelist().42114211+ */42124212+ if (state == TASK_RUNNING || state == TASK_WAKING)42134213+ return true;42144214+42154215+ /*42164216+ * Ensure we load p->on_rq after p->__state, otherwise it would be42174217+ * possible to, falsely, observe p->on_rq == 0.42184218+ *42194219+ * See try_to_wake_up() for a longer comment.42204220+ */42214221+ smp_rmb();42224222+ if (p->on_rq)42234223+ return true;42244224+42254225+#ifdef CONFIG_SMP42264226+ /*42274227+ * Ensure the task has finished __schedule() and will not be referenced42284228+ * anymore. Again, see try_to_wake_up() for a longer comment.42294229+ */42304230+ smp_rmb();42314231+ smp_cond_load_acquire(&p->on_cpu, !VAL);42324232+#endif42334233+42344234+ return false;42354235+}42364236+42034237/**42044238 * task_call_func - Invoke a function on task in fixed state42054239 * @p: Process for which the function is to be invoked, can be @current.···42514217int task_call_func(struct task_struct *p, task_call_f func, void *arg)42524218{42534219 struct rq *rq = NULL;42544254- unsigned int state;42554220 struct rq_flags rf;42564221 int ret;4257422242584223 raw_spin_lock_irqsave(&p->pi_lock, rf.flags);4259422442604260- state = READ_ONCE(p->__state);42614261-42624262- /*42634263- * Ensure we load p->on_rq after p->__state, otherwise it would be42644264- * possible to, falsely, observe p->on_rq == 0.42654265- *42664266- * See try_to_wake_up() for a longer comment.42674267- */42684268- smp_rmb();42694269-42704270- /*42714271- * Since pi->lock blocks try_to_wake_up(), we don't need rq->lock when42724272- * the task is blocked. Make sure to check @state since ttwu() can drop42734273- * locks at the end, see ttwu_queue_wakelist().42744274- */42754275- if (state == TASK_RUNNING || state == TASK_WAKING || p->on_rq)42254225+ if (__task_needs_rq_lock(p))42764226 rq = __task_rq_lock(p, &rf);4277422742784228 /*
+3-2
kernel/trace/ftrace.c
···12891289 if (!ftrace_mod)12901290 return -ENOMEM;1291129112921292+ INIT_LIST_HEAD(&ftrace_mod->list);12921293 ftrace_mod->func = kstrdup(func, GFP_KERNEL);12931294 ftrace_mod->module = kstrdup(module, GFP_KERNEL);12941295 ftrace_mod->enable = enable;···31913190 /* if we can't allocate this size, try something smaller */31923191 if (!order)31933192 return -ENOMEM;31943194- order >>= 1;31933193+ order--;31953194 goto again;31963195 }31973196···73927391 }7393739273947393 pr_info("ftrace: allocating %ld entries in %ld pages\n",73957395- count, count / ENTRIES_PER_PAGE + 1);73947394+ count, DIV_ROUND_UP(count, ENTRIES_PER_PAGE));7396739573977396 ret = ftrace_process_locs(NULL,73987397 __start_mcount_loc,
+32-16
kernel/trace/kprobe_event_gen_test.c
···7373#define KPROBE_GEN_TEST_ARG3 NULL7474#endif75757676+static bool trace_event_file_is_valid(struct trace_event_file *input)7777+{7878+ return input && !IS_ERR(input);7979+}76807781/*7882 * Test to make sure we can create a kprobe event, then add more···143139 kfree(buf);144140 return ret;145141 delete:142142+ if (trace_event_file_is_valid(gen_kprobe_test))143143+ gen_kprobe_test = NULL;146144 /* We got an error after creating the event, delete it */147145 ret = kprobe_event_delete("gen_kprobe_test");148146 goto out;···208202 kfree(buf);209203 return ret;210204 delete:205205+ if (trace_event_file_is_valid(gen_kretprobe_test))206206+ gen_kretprobe_test = NULL;211207 /* We got an error after creating the event, delete it */212208 ret = kprobe_event_delete("gen_kretprobe_test");213209 goto out;···225217226218 ret = test_gen_kretprobe_cmd();227219 if (ret) {228228- WARN_ON(trace_array_set_clr_event(gen_kretprobe_test->tr,229229- "kprobes",230230- "gen_kretprobe_test", false));231231- trace_put_event_file(gen_kretprobe_test);220220+ if (trace_event_file_is_valid(gen_kretprobe_test)) {221221+ WARN_ON(trace_array_set_clr_event(gen_kretprobe_test->tr,222222+ "kprobes",223223+ "gen_kretprobe_test", false));224224+ trace_put_event_file(gen_kretprobe_test);225225+ }232226 WARN_ON(kprobe_event_delete("gen_kretprobe_test"));233227 }234228···239229240230static void __exit kprobe_event_gen_test_exit(void)241231{242242- /* Disable the event or you can't remove it */243243- WARN_ON(trace_array_set_clr_event(gen_kprobe_test->tr,244244- "kprobes",245245- "gen_kprobe_test", false));232232+ if (trace_event_file_is_valid(gen_kprobe_test)) {233233+ /* Disable the event or you can't remove it */234234+ WARN_ON(trace_array_set_clr_event(gen_kprobe_test->tr,235235+ "kprobes",236236+ "gen_kprobe_test", false));246237247247- /* Now give the file and instance back */248248- trace_put_event_file(gen_kprobe_test);238238+ /* Now give the file and instance back */239239+ trace_put_event_file(gen_kprobe_test);240240+ }241241+249242250243 /* Now unregister and free the event */251244 WARN_ON(kprobe_event_delete("gen_kprobe_test"));252245253253- /* Disable the event or you can't remove it */254254- WARN_ON(trace_array_set_clr_event(gen_kretprobe_test->tr,255255- "kprobes",256256- "gen_kretprobe_test", false));246246+ if (trace_event_file_is_valid(gen_kretprobe_test)) {247247+ /* Disable the event or you can't remove it */248248+ WARN_ON(trace_array_set_clr_event(gen_kretprobe_test->tr,249249+ "kprobes",250250+ "gen_kretprobe_test", false));257251258258- /* Now give the file and instance back */259259- trace_put_event_file(gen_kretprobe_test);252252+ /* Now give the file and instance back */253253+ trace_put_event_file(gen_kretprobe_test);254254+ }255255+260256261257 /* Now unregister and free the event */262258 WARN_ON(kprobe_event_delete("gen_kretprobe_test"));
···519519 local_t committing;520520 local_t commits;521521 local_t pages_touched;522522+ local_t pages_lost;522523 local_t pages_read;523524 long last_pages_touch;524525 size_t shortest_full;···895894size_t ring_buffer_nr_dirty_pages(struct trace_buffer *buffer, int cpu)896895{897896 size_t read;897897+ size_t lost;898898 size_t cnt;899899900900 read = local_read(&buffer->buffers[cpu]->pages_read);901901+ lost = local_read(&buffer->buffers[cpu]->pages_lost);901902 cnt = local_read(&buffer->buffers[cpu]->pages_touched);903903+904904+ if (WARN_ON_ONCE(cnt < lost))905905+ return 0;906906+907907+ cnt -= lost;908908+902909 /* The reader can read an empty page, but not more than that */903910 if (cnt < read) {904911 WARN_ON_ONCE(read > cnt + 1);···914905 }915906916907 return cnt - read;908908+}909909+910910+static __always_inline bool full_hit(struct trace_buffer *buffer, int cpu, int full)911911+{912912+ struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];913913+ size_t nr_pages;914914+ size_t dirty;915915+916916+ nr_pages = cpu_buffer->nr_pages;917917+ if (!nr_pages || !full)918918+ return true;919919+920920+ dirty = ring_buffer_nr_dirty_pages(buffer, cpu);921921+922922+ return (dirty * 100) > (full * nr_pages);917923}918924919925/*···10701046 !ring_buffer_empty_cpu(buffer, cpu)) {10711047 unsigned long flags;10721048 bool pagebusy;10731073- size_t nr_pages;10741074- size_t dirty;10491049+ bool done;1075105010761051 if (!full)10771052 break;1078105310791054 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);10801055 pagebusy = cpu_buffer->reader_page == cpu_buffer->commit_page;10811081- nr_pages = cpu_buffer->nr_pages;10821082- dirty = ring_buffer_nr_dirty_pages(buffer, cpu);10561056+ done = !pagebusy && full_hit(buffer, cpu, full);10571057+10831058 if (!cpu_buffer->shortest_full ||10841059 cpu_buffer->shortest_full > full)10851060 cpu_buffer->shortest_full = full;10861061 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);10871087- if (!pagebusy &&10881088- (!nr_pages || (dirty * 100) > full * nr_pages))10621062+ if (done)10891063 break;10901064 }10911065···11091087 * @cpu: the cpu buffer to wait on11101088 * @filp: the file descriptor11111089 * @poll_table: The poll descriptor10901090+ * @full: wait until the percentage of pages are available, if @cpu != RING_BUFFER_ALL_CPUS11121091 *11131092 * If @cpu == RING_BUFFER_ALL_CPUS then the task will wake up as soon11141093 * as data is added to any of the @buffer's cpu buffers. Otherwise···11191096 * zero otherwise.11201097 */11211098__poll_t ring_buffer_poll_wait(struct trace_buffer *buffer, int cpu,11221122- struct file *filp, poll_table *poll_table)10991099+ struct file *filp, poll_table *poll_table, int full)11231100{11241101 struct ring_buffer_per_cpu *cpu_buffer;11251102 struct rb_irq_work *work;1126110311271127- if (cpu == RING_BUFFER_ALL_CPUS)11041104+ if (cpu == RING_BUFFER_ALL_CPUS) {11281105 work = &buffer->irq_work;11291129- else {11061106+ full = 0;11071107+ } else {11301108 if (!cpumask_test_cpu(cpu, buffer->cpumask))11311109 return -EINVAL;11321110···11351111 work = &cpu_buffer->irq_work;11361112 }1137111311381138- poll_wait(filp, &work->waiters, poll_table);11391139- work->waiters_pending = true;11141114+ if (full) {11151115+ poll_wait(filp, &work->full_waiters, poll_table);11161116+ work->full_waiters_pending = true;11171117+ } else {11181118+ poll_wait(filp, &work->waiters, poll_table);11191119+ work->waiters_pending = true;11201120+ }11211121+11401122 /*11411123 * There's a tight race between setting the waiters_pending and11421124 * checking if the ring buffer is empty. Once the waiters_pending bit···11571127 * will fix it later.11581128 */11591129 smp_mb();11301130+11311131+ if (full)11321132+ return full_hit(buffer, cpu, full) ? EPOLLIN | EPOLLRDNORM : 0;1160113311611134 if ((cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer)) ||11621135 (cpu != RING_BUFFER_ALL_CPUS && !ring_buffer_empty_cpu(buffer, cpu)))···1802176918031770 free_buffer_page(cpu_buffer->reader_page);1804177118051805- rb_head_page_deactivate(cpu_buffer);18061806-18071772 if (head) {17731773+ rb_head_page_deactivate(cpu_buffer);17741774+18081775 list_for_each_entry_safe(bpage, tmp, head, list) {18091776 list_del_init(&bpage->list);18101777 free_buffer_page(bpage);···20402007 */20412008 local_add(page_entries, &cpu_buffer->overrun);20422009 local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);20102010+ local_inc(&cpu_buffer->pages_lost);20432011 }2044201220452013 /*···25252491 */25262492 local_add(entries, &cpu_buffer->overrun);25272493 local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);24942494+ local_inc(&cpu_buffer->pages_lost);2528249525292496 /*25302497 * The entries will be zeroed out when we move the···31903155static __always_inline void31913156rb_wakeups(struct trace_buffer *buffer, struct ring_buffer_per_cpu *cpu_buffer)31923157{31933193- size_t nr_pages;31943194- size_t dirty;31953195- size_t full;31963196-31973158 if (buffer->irq_work.waiters_pending) {31983159 buffer->irq_work.waiters_pending = false;31993160 /* irq_work_queue() supplies it's own memory barriers */···3213318232143183 cpu_buffer->last_pages_touch = local_read(&cpu_buffer->pages_touched);3215318432163216- full = cpu_buffer->shortest_full;32173217- nr_pages = cpu_buffer->nr_pages;32183218- dirty = ring_buffer_nr_dirty_pages(buffer, cpu_buffer->cpu);32193219- if (full && nr_pages && (dirty * 100) <= full * nr_pages)31853185+ if (!full_hit(buffer, cpu_buffer->cpu, cpu_buffer->shortest_full))32203186 return;3221318732223188 cpu_buffer->irq_work.wakeup_full = true;···52765248 local_set(&cpu_buffer->committing, 0);52775249 local_set(&cpu_buffer->commits, 0);52785250 local_set(&cpu_buffer->pages_touched, 0);52515251+ local_set(&cpu_buffer->pages_lost, 0);52795252 local_set(&cpu_buffer->pages_read, 0);52805253 cpu_buffer->last_pages_touch = 0;52815254 cpu_buffer->shortest_full = 0;
+6-10
kernel/trace/synth_event_gen_test.c
···120120121121 /* Now generate a gen_synth_test event */122122 ret = synth_event_trace_array(gen_synth_test, vals, ARRAY_SIZE(vals));123123- out:123123+ free:124124+ kfree(buf);124125 return ret;125126 delete:126127 /* We got an error after creating the event, delete it */127128 synth_event_delete("gen_synth_test");128128- free:129129- kfree(buf);130130-131131- goto out;129129+ goto free;132130}133131134132/*···225227226228 /* Now trace an empty_synth_test event */227229 ret = synth_event_trace_array(empty_synth_test, vals, ARRAY_SIZE(vals));228228- out:230230+ free:231231+ kfree(buf);229232 return ret;230233 delete:231234 /* We got an error after creating the event, delete it */232235 synth_event_delete("empty_synth_test");233233- free:234234- kfree(buf);235235-236236- goto out;236236+ goto free;237237}238238239239static struct synth_field_desc create_synth_test_fields[] = {
···5252 kfree(ep->event_system);5353 if (ep->event)5454 trace_event_put_ref(ep->event);5555+ kfree(ep->filter_str);5556 kfree(ep);5657}5758···564563{565564 struct eprobe_data *edata = data->private_data;566565566566+ if (unlikely(!rec))567567+ return;568568+567569 __eprobe_trace_func(edata, rec);568570}569571···646642 INIT_LIST_HEAD(&trigger->list);647643648644 if (ep->filter_str) {649649- ret = create_event_filter(file->tr, file->event_call,645645+ ret = create_event_filter(file->tr, ep->event,650646 ep->filter_str, false, &filter);651647 if (ret)652648 goto error;···904900905901static int trace_eprobe_parse_filter(struct trace_eprobe *ep, int argc, const char *argv[])906902{907907- struct event_filter *dummy;903903+ struct event_filter *dummy = NULL;908904 int i, ret, len = 0;909905 char *p;910906
+2-3
kernel/trace/trace_events_synth.c
···828828 }829829830830 ret = set_synth_event_print_fmt(call);831831- if (ret < 0) {831831+ /* unregister_trace_event() will be called inside */832832+ if (ret < 0)832833 trace_remove_event_call(call);833833- goto err;834834- }835834 out:836835 return ret;837836 err: