Merge tag 'trace-v5.4-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace

Pull tracing fixes from Steven Rostedt:
"A few tracing fixes:

- Remove lockdown from tracefs itself and moved it to the trace
directory. Have the open functions there do the lockdown checks.

- Fix a few races with opening an instance file and the instance
being deleted (Discovered during the lockdown updates). Kept
separate from the clean up code such that they can be backported to
stable easier.

- Clean up and consolidated the checks done when opening a trace
file, as there were multiple checks that need to be done, and it
did not make sense having them done in each open instance.

- Fix a regression in the record mcount code.

- Small hw_lat detector tracer fixes.

- A trace_pipe read fix due to not initializing trace_seq"

* tag 'trace-v5.4-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace:
tracing: Initialize iter->seq after zeroing in tracing_read_pipe()
tracing/hwlat: Don't ignore outer-loop duration when calculating max_latency
tracing/hwlat: Report total time spent in all NMIs during the sample
recordmcount: Fix nop_mcount() function
tracing: Do not create tracefs files if tracefs lockdown is in effect
tracing: Add locked_down checks to the open calls of files created for tracefs
tracing: Add tracing_check_open_get_tr()
tracing: Have trace events system open call tracing_open_generic_tr()
tracing: Get trace_array reference for available_tracers files
ftrace: Get a reference counter for the trace_array on filter files
tracefs: Revert ccbd54ff54e8 ("tracefs: Restrict tracefs when the kernel is locked down")

+5 -41
fs/tracefs/inode.c
··· 16 16 #include <linux/namei.h> 17 17 #include <linux/tracefs.h> 18 18 #include <linux/fsnotify.h> 19 + #include <linux/security.h> 19 20 #include <linux/seq_file.h> 20 21 #include <linux/parser.h> 21 22 #include <linux/magic.h> 22 23 #include <linux/slab.h> 23 - #include <linux/security.h> 24 24 25 25 #define TRACEFS_DEFAULT_MODE 0700 26 26 27 27 static struct vfsmount *tracefs_mount; 28 28 static int tracefs_mount_count; 29 29 static bool tracefs_registered; 30 - 31 - static int default_open_file(struct inode *inode, struct file *filp) 32 - { 33 - struct dentry *dentry = filp->f_path.dentry; 34 - struct file_operations *real_fops; 35 - int ret; 36 - 37 - if (!dentry) 38 - return -EINVAL; 39 - 40 - ret = security_locked_down(LOCKDOWN_TRACEFS); 41 - if (ret) 42 - return ret; 43 - 44 - real_fops = dentry->d_fsdata; 45 - if (!real_fops->open) 46 - return 0; 47 - return real_fops->open(inode, filp); 48 - } 49 30 50 31 static ssize_t default_read_file(struct file *file, char __user *buf, 51 32 size_t count, loff_t *ppos) ··· 222 241 return 0; 223 242 } 224 243 225 - static void tracefs_destroy_inode(struct inode *inode) 226 - { 227 - if (S_ISREG(inode->i_mode)) 228 - kfree(inode->i_fop); 229 - } 230 - 231 244 static int tracefs_remount(struct super_block *sb, int *flags, char *data) 232 245 { 233 246 int err; ··· 258 283 static const struct super_operations tracefs_super_operations = { 259 284 .statfs = simple_statfs, 260 285 .remount_fs = tracefs_remount, 261 - .destroy_inode = tracefs_destroy_inode, 262 286 .show_options = tracefs_show_options, 263 287 }; 264 288 ··· 388 414 struct dentry *parent, void *data, 389 415 const struct file_operations *fops) 390 416 { 391 - struct file_operations *proxy_fops; 392 417 struct dentry *dentry; 393 418 struct inode *inode; 419 + 420 + if (security_locked_down(LOCKDOWN_TRACEFS)) 421 + return NULL; 394 422 395 423 if (!(mode & S_IFMT)) 396 424 mode |= S_IFREG; ··· 406 430 if (unlikely(!inode)) 407 431 return failed_creating(dentry); 408 432 409 - proxy_fops = kzalloc(sizeof(struct file_operations), GFP_KERNEL); 410 - if (unlikely(!proxy_fops)) { 411 - iput(inode); 412 - return failed_creating(dentry); 413 - } 414 - 415 - if (!fops) 416 - fops = &tracefs_file_operations; 417 - 418 - dentry->d_fsdata = (void *)fops; 419 - memcpy(proxy_fops, fops, sizeof(*proxy_fops)); 420 - proxy_fops->open = default_open_file; 421 433 inode->i_mode = mode; 422 - inode->i_fop = proxy_fops; 434 + inode->i_fop = fops ? fops : &tracefs_file_operations; 423 435 inode->i_private = data; 424 436 d_instantiate(dentry, inode); 425 437 fsnotify_create(dentry->d_parent->d_inode, dentry);
+43 -12
kernel/trace/ftrace.c
··· 18 18 #include <linux/clocksource.h> 19 19 #include <linux/sched/task.h> 20 20 #include <linux/kallsyms.h> 21 + #include <linux/security.h> 21 22 #include <linux/seq_file.h> 22 23 #include <linux/tracefs.h> 23 24 #include <linux/hardirq.h> ··· 3487 3486 ftrace_avail_open(struct inode *inode, struct file *file) 3488 3487 { 3489 3488 struct ftrace_iterator *iter; 3489 + int ret; 3490 + 3491 + ret = security_locked_down(LOCKDOWN_TRACEFS); 3492 + if (ret) 3493 + return ret; 3490 3494 3491 3495 if (unlikely(ftrace_disabled)) 3492 3496 return -ENODEV; ··· 3510 3504 ftrace_enabled_open(struct inode *inode, struct file *file) 3511 3505 { 3512 3506 struct ftrace_iterator *iter; 3507 + 3508 + /* 3509 + * This shows us what functions are currently being 3510 + * traced and by what. Not sure if we want lockdown 3511 + * to hide such critical information for an admin. 3512 + * Although, perhaps it can show information we don't 3513 + * want people to see, but if something is tracing 3514 + * something, we probably want to know about it. 3515 + */ 3513 3516 3514 3517 iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter)); 3515 3518 if (!iter) ··· 3555 3540 struct ftrace_hash *hash; 3556 3541 struct list_head *mod_head; 3557 3542 struct trace_array *tr = ops->private; 3558 - int ret = 0; 3543 + int ret = -ENOMEM; 3559 3544 3560 3545 ftrace_ops_init(ops); 3561 3546 3562 3547 if (unlikely(ftrace_disabled)) 3563 3548 return -ENODEV; 3564 3549 3550 + if (tracing_check_open_get_tr(tr)) 3551 + return -ENODEV; 3552 + 3565 3553 iter = kzalloc(sizeof(*iter), GFP_KERNEL); 3566 3554 if (!iter) 3567 - return -ENOMEM; 3555 + goto out; 3568 3556 3569 - if (trace_parser_get_init(&iter->parser, FTRACE_BUFF_MAX)) { 3570 - kfree(iter); 3571 - return -ENOMEM; 3572 - } 3557 + if (trace_parser_get_init(&iter->parser, FTRACE_BUFF_MAX)) 3558 + goto out; 3573 3559 3574 3560 iter->ops = ops; 3575 3561 iter->flags = flag; ··· 3600 3584 3601 3585 if (!iter->hash) { 3602 3586 trace_parser_put(&iter->parser); 3603 - kfree(iter); 3604 - ret = -ENOMEM; 3605 3587 goto out_unlock; 3606 3588 } 3607 3589 } else 3608 3590 iter->hash = hash; 3591 + 3592 + ret = 0; 3609 3593 3610 3594 if (file->f_mode & FMODE_READ) { 3611 3595 iter->pg = ftrace_pages_start; ··· 3618 3602 /* Failed */ 3619 3603 free_ftrace_hash(iter->hash); 3620 3604 trace_parser_put(&iter->parser); 3621 - kfree(iter); 3622 3605 } 3623 3606 } else 3624 3607 file->private_data = iter; 3625 3608 3626 3609 out_unlock: 3627 3610 mutex_unlock(&ops->func_hash->regex_lock); 3611 + 3612 + out: 3613 + if (ret) { 3614 + kfree(iter); 3615 + if (tr) 3616 + trace_array_put(tr); 3617 + } 3628 3618 3629 3619 return ret; 3630 3620 } ··· 3640 3618 { 3641 3619 struct ftrace_ops *ops = inode->i_private; 3642 3620 3621 + /* Checks for tracefs lockdown */ 3643 3622 return ftrace_regex_open(ops, 3644 3623 FTRACE_ITER_FILTER | FTRACE_ITER_DO_PROBES, 3645 3624 inode, file); ··· 3651 3628 { 3652 3629 struct ftrace_ops *ops = inode->i_private; 3653 3630 3631 + /* Checks for tracefs lockdown */ 3654 3632 return ftrace_regex_open(ops, FTRACE_ITER_NOTRACE, 3655 3633 inode, file); 3656 3634 } ··· 5061 5037 5062 5038 mutex_unlock(&iter->ops->func_hash->regex_lock); 5063 5039 free_ftrace_hash(iter->hash); 5040 + if (iter->tr) 5041 + trace_array_put(iter->tr); 5064 5042 kfree(iter); 5065 5043 5066 5044 return 0; ··· 5220 5194 __ftrace_graph_open(struct inode *inode, struct file *file, 5221 5195 struct ftrace_graph_data *fgd) 5222 5196 { 5223 - int ret = 0; 5197 + int ret; 5224 5198 struct ftrace_hash *new_hash = NULL; 5199 + 5200 + ret = security_locked_down(LOCKDOWN_TRACEFS); 5201 + if (ret) 5202 + return ret; 5225 5203 5226 5204 if (file->f_mode & FMODE_WRITE) { 5227 5205 const int size_bits = FTRACE_HASH_DEFAULT_BITS; ··· 6567 6537 struct seq_file *m; 6568 6538 int ret = 0; 6569 6539 6570 - if (trace_array_get(tr) < 0) 6571 - return -ENODEV; 6540 + ret = tracing_check_open_get_tr(tr); 6541 + if (ret) 6542 + return ret; 6572 6543 6573 6544 if ((file->f_mode & FMODE_WRITE) && 6574 6545 (file->f_flags & O_TRUNC))
+88 -53
kernel/trace/trace.c
··· 17 17 #include <linux/stacktrace.h> 18 18 #include <linux/writeback.h> 19 19 #include <linux/kallsyms.h> 20 + #include <linux/security.h> 20 21 #include <linux/seq_file.h> 21 22 #include <linux/notifier.h> 22 23 #include <linux/irqflags.h> ··· 303 302 mutex_lock(&trace_types_lock); 304 303 __trace_array_put(this_tr); 305 304 mutex_unlock(&trace_types_lock); 305 + } 306 + 307 + int tracing_check_open_get_tr(struct trace_array *tr) 308 + { 309 + int ret; 310 + 311 + ret = security_locked_down(LOCKDOWN_TRACEFS); 312 + if (ret) 313 + return ret; 314 + 315 + if (tracing_disabled) 316 + return -ENODEV; 317 + 318 + if (tr && trace_array_get(tr) < 0) 319 + return -ENODEV; 320 + 321 + return 0; 306 322 } 307 323 308 324 int call_filter_check_discard(struct trace_event_call *call, void *rec, ··· 4158 4140 4159 4141 int tracing_open_generic(struct inode *inode, struct file *filp) 4160 4142 { 4161 - if (tracing_disabled) 4162 - return -ENODEV; 4143 + int ret; 4144 + 4145 + ret = tracing_check_open_get_tr(NULL); 4146 + if (ret) 4147 + return ret; 4163 4148 4164 4149 filp->private_data = inode->i_private; 4165 4150 return 0; ··· 4177 4156 * Open and update trace_array ref count. 4178 4157 * Must have the current trace_array passed to it. 4179 4158 */ 4180 - static int tracing_open_generic_tr(struct inode *inode, struct file *filp) 4159 + int tracing_open_generic_tr(struct inode *inode, struct file *filp) 4181 4160 { 4182 4161 struct trace_array *tr = inode->i_private; 4162 + int ret; 4183 4163 4184 - if (tracing_disabled) 4185 - return -ENODEV; 4186 - 4187 - if (trace_array_get(tr) < 0) 4188 - return -ENODEV; 4164 + ret = tracing_check_open_get_tr(tr); 4165 + if (ret) 4166 + return ret; 4189 4167 4190 4168 filp->private_data = inode->i_private; 4191 4169 ··· 4253 4233 { 4254 4234 struct trace_array *tr = inode->i_private; 4255 4235 struct trace_iterator *iter; 4256 - int ret = 0; 4236 + int ret; 4257 4237 4258 - if (trace_array_get(tr) < 0) 4259 - return -ENODEV; 4238 + ret = tracing_check_open_get_tr(tr); 4239 + if (ret) 4240 + return ret; 4260 4241 4261 4242 /* If this file was open for write, then erase contents */ 4262 4243 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) { ··· 4373 4352 struct seq_file *m; 4374 4353 int ret; 4375 4354 4376 - if (tracing_disabled) 4377 - return -ENODEV; 4378 - 4379 - ret = seq_open(file, &show_traces_seq_ops); 4355 + ret = tracing_check_open_get_tr(tr); 4380 4356 if (ret) 4381 4357 return ret; 4358 + 4359 + ret = seq_open(file, &show_traces_seq_ops); 4360 + if (ret) { 4361 + trace_array_put(tr); 4362 + return ret; 4363 + } 4382 4364 4383 4365 m = file->private_data; 4384 4366 m->private = tr; 4385 4367 4386 4368 return 0; 4369 + } 4370 + 4371 + static int show_traces_release(struct inode *inode, struct file *file) 4372 + { 4373 + struct trace_array *tr = inode->i_private; 4374 + 4375 + trace_array_put(tr); 4376 + return seq_release(inode, file); 4387 4377 } 4388 4378 4389 4379 static ssize_t ··· 4427 4395 static const struct file_operations show_traces_fops = { 4428 4396 .open = show_traces_open, 4429 4397 .read = seq_read, 4430 - .release = seq_release, 4431 4398 .llseek = seq_lseek, 4399 + .release = show_traces_release, 4432 4400 }; 4433 4401 4434 4402 static ssize_t ··· 4729 4697 struct trace_array *tr = inode->i_private; 4730 4698 int ret; 4731 4699 4732 - if (tracing_disabled) 4733 - return -ENODEV; 4734 - 4735 - if (trace_array_get(tr) < 0) 4736 - return -ENODEV; 4700 + ret = tracing_check_open_get_tr(tr); 4701 + if (ret) 4702 + return ret; 4737 4703 4738 4704 ret = single_open(file, tracing_trace_options_show, inode->i_private); 4739 4705 if (ret < 0) ··· 5068 5038 5069 5039 static int tracing_saved_tgids_open(struct inode *inode, struct file *filp) 5070 5040 { 5071 - if (tracing_disabled) 5072 - return -ENODEV; 5041 + int ret; 5042 + 5043 + ret = tracing_check_open_get_tr(NULL); 5044 + if (ret) 5045 + return ret; 5073 5046 5074 5047 return seq_open(filp, &tracing_saved_tgids_seq_ops); 5075 5048 } ··· 5148 5115 5149 5116 static int tracing_saved_cmdlines_open(struct inode *inode, struct file *filp) 5150 5117 { 5151 - if (tracing_disabled) 5152 - return -ENODEV; 5118 + int ret; 5119 + 5120 + ret = tracing_check_open_get_tr(NULL); 5121 + if (ret) 5122 + return ret; 5153 5123 5154 5124 return seq_open(filp, &tracing_saved_cmdlines_seq_ops); 5155 5125 } ··· 5316 5280 5317 5281 static int tracing_eval_map_open(struct inode *inode, struct file *filp) 5318 5282 { 5319 - if (tracing_disabled) 5320 - return -ENODEV; 5283 + int ret; 5284 + 5285 + ret = tracing_check_open_get_tr(NULL); 5286 + if (ret) 5287 + return ret; 5321 5288 5322 5289 return seq_open(filp, &tracing_eval_map_seq_ops); 5323 5290 } ··· 5843 5804 { 5844 5805 struct trace_array *tr = inode->i_private; 5845 5806 struct trace_iterator *iter; 5846 - int ret = 0; 5807 + int ret; 5847 5808 5848 - if (tracing_disabled) 5849 - return -ENODEV; 5850 - 5851 - if (trace_array_get(tr) < 0) 5852 - return -ENODEV; 5809 + ret = tracing_check_open_get_tr(tr); 5810 + if (ret) 5811 + return ret; 5853 5812 5854 5813 mutex_lock(&trace_types_lock); 5855 5814 ··· 6036 5999 sizeof(struct trace_iterator) - 6037 6000 offsetof(struct trace_iterator, seq)); 6038 6001 cpumask_clear(iter->started); 6002 + trace_seq_init(&iter->seq); 6039 6003 iter->pos = -1; 6040 6004 6041 6005 trace_event_read_lock(); ··· 6585 6547 struct trace_array *tr = inode->i_private; 6586 6548 int ret; 6587 6549 6588 - if (tracing_disabled) 6589 - return -ENODEV; 6590 - 6591 - if (trace_array_get(tr)) 6592 - return -ENODEV; 6550 + ret = tracing_check_open_get_tr(tr); 6551 + if (ret) 6552 + return ret; 6593 6553 6594 6554 ret = single_open(file, tracing_clock_show, inode->i_private); 6595 6555 if (ret < 0) ··· 6617 6581 struct trace_array *tr = inode->i_private; 6618 6582 int ret; 6619 6583 6620 - if (tracing_disabled) 6621 - return -ENODEV; 6622 - 6623 - if (trace_array_get(tr)) 6624 - return -ENODEV; 6584 + ret = tracing_check_open_get_tr(tr); 6585 + if (ret) 6586 + return ret; 6625 6587 6626 6588 ret = single_open(file, tracing_time_stamp_mode_show, inode->i_private); 6627 6589 if (ret < 0) ··· 6672 6638 struct trace_array *tr = inode->i_private; 6673 6639 struct trace_iterator *iter; 6674 6640 struct seq_file *m; 6675 - int ret = 0; 6641 + int ret; 6676 6642 6677 - if (trace_array_get(tr) < 0) 6678 - return -ENODEV; 6643 + ret = tracing_check_open_get_tr(tr); 6644 + if (ret) 6645 + return ret; 6679 6646 6680 6647 if (file->f_mode & FMODE_READ) { 6681 6648 iter = __tracing_open(inode, file, true); ··· 6821 6786 struct ftrace_buffer_info *info; 6822 6787 int ret; 6823 6788 6789 + /* The following checks for tracefs lockdown */ 6824 6790 ret = tracing_buffers_open(inode, filp); 6825 6791 if (ret < 0) 6826 6792 return ret; ··· 7141 7105 struct trace_array *tr = inode->i_private; 7142 7106 int ret = 0; 7143 7107 7144 - if (trace_array_get(tr) < 0) 7145 - return -ENODEV; 7108 + ret = tracing_check_open_get_tr(tr); 7109 + if (ret) 7110 + return ret; 7146 7111 7147 7112 /* If this file was opened for write, then erase contents */ 7148 7113 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) ··· 7194 7157 struct ftrace_buffer_info *info; 7195 7158 int ret; 7196 7159 7197 - if (tracing_disabled) 7198 - return -ENODEV; 7199 - 7200 - if (trace_array_get(tr) < 0) 7201 - return -ENODEV; 7160 + ret = tracing_check_open_get_tr(tr); 7161 + if (ret) 7162 + return ret; 7202 7163 7203 7164 info = kzalloc(sizeof(*info), GFP_KERNEL); 7204 7165 if (!info) {
+2
kernel/trace/trace.h
··· 338 338 339 339 extern int trace_array_get(struct trace_array *tr); 340 340 extern void trace_array_put(struct trace_array *tr); 341 + extern int tracing_check_open_get_tr(struct trace_array *tr); 341 342 342 343 extern int tracing_set_time_stamp_abs(struct trace_array *tr, bool abs); 343 344 extern int tracing_set_clock(struct trace_array *tr, const char *clockstr); ··· 682 681 void tracing_reset_current(int cpu); 683 682 void tracing_reset_all_online_cpus(void); 684 683 int tracing_open_generic(struct inode *inode, struct file *filp); 684 + int tracing_open_generic_tr(struct inode *inode, struct file *filp); 685 685 bool tracing_is_disabled(void); 686 686 bool tracer_tracing_is_on(struct trace_array *tr); 687 687 void tracer_tracing_on(struct trace_array *tr);
+4
kernel/trace/trace_dynevent.c
··· 174 174 { 175 175 int ret; 176 176 177 + ret = tracing_check_open_get_tr(NULL); 178 + if (ret) 179 + return ret; 180 + 177 181 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) { 178 182 ret = dyn_events_release_all(NULL); 179 183 if (ret < 0)
+17 -18
kernel/trace/trace_events.c
··· 12 12 #define pr_fmt(fmt) fmt 13 13 14 14 #include <linux/workqueue.h> 15 + #include <linux/security.h> 15 16 #include <linux/spinlock.h> 16 17 #include <linux/kthread.h> 17 18 #include <linux/tracefs.h> ··· 1295 1294 struct seq_file *m; 1296 1295 int ret; 1297 1296 1297 + /* Do we want to hide event format files on tracefs lockdown? */ 1298 + 1298 1299 ret = seq_open(file, &trace_format_seq_ops); 1299 1300 if (ret < 0) 1300 1301 return ret; ··· 1443 1440 struct trace_array *tr = inode->i_private; 1444 1441 int ret; 1445 1442 1446 - if (tracing_is_disabled()) 1447 - return -ENODEV; 1448 - 1449 - if (trace_array_get(tr) < 0) 1450 - return -ENODEV; 1451 - 1452 1443 /* Make a temporary dir that has no system but points to tr */ 1453 1444 dir = kzalloc(sizeof(*dir), GFP_KERNEL); 1454 - if (!dir) { 1455 - trace_array_put(tr); 1445 + if (!dir) 1456 1446 return -ENOMEM; 1457 - } 1458 1447 1459 - dir->tr = tr; 1460 - 1461 - ret = tracing_open_generic(inode, filp); 1448 + ret = tracing_open_generic_tr(inode, filp); 1462 1449 if (ret < 0) { 1463 - trace_array_put(tr); 1464 1450 kfree(dir); 1465 1451 return ret; 1466 1452 } 1467 - 1453 + dir->tr = tr; 1468 1454 filp->private_data = dir; 1469 1455 1470 1456 return 0; ··· 1763 1771 struct seq_file *m; 1764 1772 int ret; 1765 1773 1774 + ret = security_locked_down(LOCKDOWN_TRACEFS); 1775 + if (ret) 1776 + return ret; 1777 + 1766 1778 ret = seq_open(file, seq_ops); 1767 1779 if (ret < 0) 1768 1780 return ret; ··· 1791 1795 { 1792 1796 const struct seq_operations *seq_ops = &show_event_seq_ops; 1793 1797 1798 + /* Checks for tracefs lockdown */ 1794 1799 return ftrace_event_open(inode, file, seq_ops); 1795 1800 } 1796 1801 ··· 1802 1805 struct trace_array *tr = inode->i_private; 1803 1806 int ret; 1804 1807 1805 - if (trace_array_get(tr) < 0) 1806 - return -ENODEV; 1808 + ret = tracing_check_open_get_tr(tr); 1809 + if (ret) 1810 + return ret; 1807 1811 1808 1812 if ((file->f_mode & FMODE_WRITE) && 1809 1813 (file->f_flags & O_TRUNC)) ··· 1823 1825 struct trace_array *tr = inode->i_private; 1824 1826 int ret; 1825 1827 1826 - if (trace_array_get(tr) < 0) 1827 - return -ENODEV; 1828 + ret = tracing_check_open_get_tr(tr); 1829 + if (ret) 1830 + return ret; 1828 1831 1829 1832 if ((file->f_mode & FMODE_WRITE) && 1830 1833 (file->f_flags & O_TRUNC))
+12 -1
kernel/trace/trace_events_hist.c
··· 7 7 8 8 #include <linux/module.h> 9 9 #include <linux/kallsyms.h> 10 + #include <linux/security.h> 10 11 #include <linux/mutex.h> 11 12 #include <linux/slab.h> 12 13 #include <linux/stacktrace.h> ··· 1449 1448 { 1450 1449 int ret; 1451 1450 1451 + ret = security_locked_down(LOCKDOWN_TRACEFS); 1452 + if (ret) 1453 + return ret; 1454 + 1452 1455 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) { 1453 1456 ret = dyn_events_release_all(&synth_event_ops); 1454 1457 if (ret < 0) ··· 1685 1680 if (var_data) 1686 1681 return 0; 1687 1682 1688 - if (trace_array_get(tr) < 0) 1683 + if (tracing_check_open_get_tr(tr)) 1689 1684 return -ENODEV; 1690 1685 1691 1686 var_data = kzalloc(sizeof(*var_data), GFP_KERNEL); ··· 5520 5515 5521 5516 static int event_hist_open(struct inode *inode, struct file *file) 5522 5517 { 5518 + int ret; 5519 + 5520 + ret = security_locked_down(LOCKDOWN_TRACEFS); 5521 + if (ret) 5522 + return ret; 5523 + 5523 5524 return single_open(file, hist_show, file); 5524 5525 } 5525 5526
+7 -1
kernel/trace/trace_events_trigger.c
··· 5 5 * Copyright (C) 2013 Tom Zanussi <tom.zanussi@linux.intel.com> 6 6 */ 7 7 8 + #include <linux/security.h> 8 9 #include <linux/module.h> 9 10 #include <linux/ctype.h> 10 11 #include <linux/mutex.h> ··· 174 173 175 174 static int event_trigger_regex_open(struct inode *inode, struct file *file) 176 175 { 177 - int ret = 0; 176 + int ret; 177 + 178 + ret = security_locked_down(LOCKDOWN_TRACEFS); 179 + if (ret) 180 + return ret; 178 181 179 182 mutex_lock(&event_mutex); 180 183 ··· 297 292 static int 298 293 event_trigger_open(struct inode *inode, struct file *filp) 299 294 { 295 + /* Checks for tracefs lockdown */ 300 296 return event_trigger_regex_open(inode, filp); 301 297 } 302 298
+3 -1
kernel/trace/trace_hwlat.c
··· 150 150 if (enter) 151 151 nmi_ts_start = time_get(); 152 152 else 153 - nmi_total_ts = time_get() - nmi_ts_start; 153 + nmi_total_ts += time_get() - nmi_ts_start; 154 154 } 155 155 156 156 if (enter) ··· 256 256 /* Keep a running maximum ever recorded hardware latency */ 257 257 if (sample > tr->max_latency) 258 258 tr->max_latency = sample; 259 + if (outer_sample > tr->max_latency) 260 + tr->max_latency = outer_sample; 259 261 } 260 262 261 263 out:
+11 -1
kernel/trace/trace_kprobe.c
··· 7 7 */ 8 8 #define pr_fmt(fmt) "trace_kprobe: " fmt 9 9 10 + #include <linux/security.h> 10 11 #include <linux/module.h> 11 12 #include <linux/uaccess.h> 12 13 #include <linux/rculist.h> 13 14 #include <linux/error-injection.h> 14 - #include <linux/security.h> 15 15 16 16 #include <asm/setup.h> /* for COMMAND_LINE_SIZE */ 17 17 ··· 936 936 { 937 937 int ret; 938 938 939 + ret = security_locked_down(LOCKDOWN_TRACEFS); 940 + if (ret) 941 + return ret; 942 + 939 943 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) { 940 944 ret = dyn_events_release_all(&trace_kprobe_ops); 941 945 if (ret < 0) ··· 992 988 993 989 static int profile_open(struct inode *inode, struct file *file) 994 990 { 991 + int ret; 992 + 993 + ret = security_locked_down(LOCKDOWN_TRACEFS); 994 + if (ret) 995 + return ret; 996 + 995 997 return seq_open(file, &profile_seq_op); 996 998 } 997 999
+7
kernel/trace/trace_printk.c
··· 6 6 * 7 7 */ 8 8 #include <linux/seq_file.h> 9 + #include <linux/security.h> 9 10 #include <linux/uaccess.h> 10 11 #include <linux/kernel.h> 11 12 #include <linux/ftrace.h> ··· 349 348 static int 350 349 ftrace_formats_open(struct inode *inode, struct file *file) 351 350 { 351 + int ret; 352 + 353 + ret = security_locked_down(LOCKDOWN_TRACEFS); 354 + if (ret) 355 + return ret; 356 + 352 357 return seq_open(file, &show_format_seq_ops); 353 358 } 354 359
+8
kernel/trace/trace_stack.c
··· 5 5 */ 6 6 #include <linux/sched/task_stack.h> 7 7 #include <linux/stacktrace.h> 8 + #include <linux/security.h> 8 9 #include <linux/kallsyms.h> 9 10 #include <linux/seq_file.h> 10 11 #include <linux/spinlock.h> ··· 471 470 472 471 static int stack_trace_open(struct inode *inode, struct file *file) 473 472 { 473 + int ret; 474 + 475 + ret = security_locked_down(LOCKDOWN_TRACEFS); 476 + if (ret) 477 + return ret; 478 + 474 479 return seq_open(file, &stack_trace_seq_ops); 475 480 } 476 481 ··· 494 487 { 495 488 struct ftrace_ops *ops = inode->i_private; 496 489 490 + /* Checks for tracefs lockdown */ 497 491 return ftrace_regex_open(ops, FTRACE_ITER_FILTER, 498 492 inode, file); 499 493 }
+5 -1
kernel/trace/trace_stat.c
··· 9 9 * 10 10 */ 11 11 12 - 12 + #include <linux/security.h> 13 13 #include <linux/list.h> 14 14 #include <linux/slab.h> 15 15 #include <linux/rbtree.h> ··· 237 237 int ret; 238 238 struct seq_file *m; 239 239 struct stat_session *session = inode->i_private; 240 + 241 + ret = security_locked_down(LOCKDOWN_TRACEFS); 242 + if (ret) 243 + return ret; 240 244 241 245 ret = stat_seq_init(session); 242 246 if (ret)
+11
kernel/trace/trace_uprobe.c
··· 7 7 */ 8 8 #define pr_fmt(fmt) "trace_uprobe: " fmt 9 9 10 + #include <linux/security.h> 10 11 #include <linux/ctype.h> 11 12 #include <linux/module.h> 12 13 #include <linux/uaccess.h> ··· 770 769 { 771 770 int ret; 772 771 772 + ret = security_locked_down(LOCKDOWN_TRACEFS); 773 + if (ret) 774 + return ret; 775 + 773 776 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) { 774 777 ret = dyn_events_release_all(&trace_uprobe_ops); 775 778 if (ret) ··· 823 818 824 819 static int profile_open(struct inode *inode, struct file *file) 825 820 { 821 + int ret; 822 + 823 + ret = security_locked_down(LOCKDOWN_TRACEFS); 824 + if (ret) 825 + return ret; 826 + 826 827 return seq_open(file, &profile_seq_op); 827 828 } 828 829
+1 -4
scripts/recordmcount.h
··· 389 389 mcountsym = get_mcountsym(sym0, relp, str0); 390 390 391 391 if (mcountsym == Elf_r_sym(relp) && !is_fake_mcount(relp)) { 392 - if (make_nop) { 392 + if (make_nop) 393 393 ret = make_nop((void *)ehdr, _w(shdr->sh_offset) + _w(relp->r_offset)); 394 - if (ret < 0) 395 - return -1; 396 - } 397 394 if (warn_on_notrace_sect && !once) { 398 395 printf("Section %s has mcount callers being ignored\n", 399 396 txtname);