Merge tag 'trace-v4.13-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace

Pull tracing fixes from Steven Rostedt:
"Various bug fixes:

- Two small memory leaks in error paths.

- A missed return error code on an error path.

- A fix to check the tracing ring buffer CPU when it doesn't exist
(caused by setting maxcpus on the command line that is less than
the actual number of CPUs, and then onlining them manually).

- A fix to have the reset of boot tracers called by lateinit_sync()
instead of just lateinit(). As some of the tracers register via
lateinit(), and if the clear happens before the tracer is
registered, it will never start even though it was told to via the
kernel command line"

* tag 'trace-v4.13-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace:
tracing: Fix freeing of filter in create_filter() when set_str is false
tracing: Fix kmemleak in tracing_map_array_free()
ftrace: Check for null ret_stack on profile function graph entry function
ring-buffer: Have ring_buffer_alloc_read_page() return error on offline CPU
tracing: Missing error code in tracer_alloc_buffers()
tracing: Call clear_boot_tracer() at lateinit_sync

+38 -16
+4
kernel/trace/ftrace.c
··· 889 889 890 890 function_profile_call(trace->func, 0, NULL, NULL); 891 891 892 + /* If function graph is shutting down, ret_stack can be NULL */ 893 + if (!current->ret_stack) 894 + return 0; 895 + 892 896 if (index >= 0 && index < FTRACE_RETFUNC_DEPTH) 893 897 current->ret_stack[index].subtime = 0; 894 898
+9 -5
kernel/trace/ring_buffer.c
··· 4386 4386 * the page that was allocated, with the read page of the buffer. 4387 4387 * 4388 4388 * Returns: 4389 - * The page allocated, or NULL on error. 4389 + * The page allocated, or ERR_PTR 4390 4390 */ 4391 4391 void *ring_buffer_alloc_read_page(struct ring_buffer *buffer, int cpu) 4392 4392 { 4393 - struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; 4393 + struct ring_buffer_per_cpu *cpu_buffer; 4394 4394 struct buffer_data_page *bpage = NULL; 4395 4395 unsigned long flags; 4396 4396 struct page *page; 4397 4397 4398 + if (!cpumask_test_cpu(cpu, buffer->cpumask)) 4399 + return ERR_PTR(-ENODEV); 4400 + 4401 + cpu_buffer = buffer->buffers[cpu]; 4398 4402 local_irq_save(flags); 4399 4403 arch_spin_lock(&cpu_buffer->lock); 4400 4404 ··· 4416 4412 page = alloc_pages_node(cpu_to_node(cpu), 4417 4413 GFP_KERNEL | __GFP_NORETRY, 0); 4418 4414 if (!page) 4419 - return NULL; 4415 + return ERR_PTR(-ENOMEM); 4420 4416 4421 4417 bpage = page_address(page); 4422 4418 ··· 4471 4467 * 4472 4468 * for example: 4473 4469 * rpage = ring_buffer_alloc_read_page(buffer, cpu); 4474 - * if (!rpage) 4475 - * return error; 4470 + * if (IS_ERR(rpage)) 4471 + * return PTR_ERR(rpage); 4476 4472 * ret = ring_buffer_read_page(buffer, &rpage, len, cpu, 0); 4477 4473 * if (ret >= 0) 4478 4474 * process_page(rpage, ret);
+1 -1
kernel/trace/ring_buffer_benchmark.c
··· 113 113 int i; 114 114 115 115 bpage = ring_buffer_alloc_read_page(buffer, cpu); 116 - if (!bpage) 116 + if (IS_ERR(bpage)) 117 117 return EVENT_DROPPED; 118 118 119 119 ret = ring_buffer_read_page(buffer, &bpage, PAGE_SIZE, cpu, 1);
+13 -6
kernel/trace/trace.c
··· 6598 6598 { 6599 6599 struct ftrace_buffer_info *info = filp->private_data; 6600 6600 struct trace_iterator *iter = &info->iter; 6601 - ssize_t ret; 6601 + ssize_t ret = 0; 6602 6602 ssize_t size; 6603 6603 6604 6604 if (!count) ··· 6612 6612 if (!info->spare) { 6613 6613 info->spare = ring_buffer_alloc_read_page(iter->trace_buffer->buffer, 6614 6614 iter->cpu_file); 6615 - info->spare_cpu = iter->cpu_file; 6615 + if (IS_ERR(info->spare)) { 6616 + ret = PTR_ERR(info->spare); 6617 + info->spare = NULL; 6618 + } else { 6619 + info->spare_cpu = iter->cpu_file; 6620 + } 6616 6621 } 6617 6622 if (!info->spare) 6618 - return -ENOMEM; 6623 + return ret; 6619 6624 6620 6625 /* Do we have previous read data to read? */ 6621 6626 if (info->read < PAGE_SIZE) ··· 6795 6790 ref->ref = 1; 6796 6791 ref->buffer = iter->trace_buffer->buffer; 6797 6792 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file); 6798 - if (!ref->page) { 6799 - ret = -ENOMEM; 6793 + if (IS_ERR(ref->page)) { 6794 + ret = PTR_ERR(ref->page); 6795 + ref->page = NULL; 6800 6796 kfree(ref); 6801 6797 break; 6802 6798 } ··· 8299 8293 if (ret < 0) 8300 8294 goto out_free_cpumask; 8301 8295 /* Used for event triggers */ 8296 + ret = -ENOMEM; 8302 8297 temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE); 8303 8298 if (!temp_buffer) 8304 8299 goto out_rm_hp_state; ··· 8414 8407 } 8415 8408 8416 8409 fs_initcall(tracer_init_tracefs); 8417 - late_initcall(clear_boot_tracer); 8410 + late_initcall_sync(clear_boot_tracer);
+4
kernel/trace/trace_events_filter.c
··· 1959 1959 if (err && set_str) 1960 1960 append_filter_err(ps, filter); 1961 1961 } 1962 + if (err && !set_str) { 1963 + free_event_filter(filter); 1964 + filter = NULL; 1965 + } 1962 1966 create_filter_finish(ps); 1963 1967 1964 1968 *filterp = filter;
+7 -4
kernel/trace/tracing_map.c
··· 221 221 if (!a) 222 222 return; 223 223 224 - if (!a->pages) { 225 - kfree(a); 226 - return; 227 - } 224 + if (!a->pages) 225 + goto free; 228 226 229 227 for (i = 0; i < a->n_pages; i++) { 230 228 if (!a->pages[i]) 231 229 break; 232 230 free_page((unsigned long)a->pages[i]); 233 231 } 232 + 233 + kfree(a->pages); 234 + 235 + free: 236 + kfree(a); 234 237 } 235 238 236 239 struct tracing_map_array *tracing_map_array_alloc(unsigned int n_elts,