at v2.6.26-rc7 3056 lines 72 kB view raw
1/* 2 * linux/fs/proc/base.c 3 * 4 * Copyright (C) 1991, 1992 Linus Torvalds 5 * 6 * proc base directory handling functions 7 * 8 * 1999, Al Viro. Rewritten. Now it covers the whole per-process part. 9 * Instead of using magical inumbers to determine the kind of object 10 * we allocate and fill in-core inodes upon lookup. They don't even 11 * go into icache. We cache the reference to task_struct upon lookup too. 12 * Eventually it should become a filesystem in its own. We don't use the 13 * rest of procfs anymore. 14 * 15 * 16 * Changelog: 17 * 17-Jan-2005 18 * Allan Bezerra 19 * Bruna Moreira <bruna.moreira@indt.org.br> 20 * Edjard Mota <edjard.mota@indt.org.br> 21 * Ilias Biris <ilias.biris@indt.org.br> 22 * Mauricio Lin <mauricio.lin@indt.org.br> 23 * 24 * Embedded Linux Lab - 10LE Instituto Nokia de Tecnologia - INdT 25 * 26 * A new process specific entry (smaps) included in /proc. It shows the 27 * size of rss for each memory area. The maps entry lacks information 28 * about physical memory size (rss) for each mapped file, i.e., 29 * rss information for executables and library files. 30 * This additional information is useful for any tools that need to know 31 * about physical memory consumption for a process specific library. 32 * 33 * Changelog: 34 * 21-Feb-2005 35 * Embedded Linux Lab - 10LE Instituto Nokia de Tecnologia - INdT 36 * Pud inclusion in the page table walking. 37 * 38 * ChangeLog: 39 * 10-Mar-2005 40 * 10LE Instituto Nokia de Tecnologia - INdT: 41 * A better way to walks through the page table as suggested by Hugh Dickins. 42 * 43 * Simo Piiroinen <simo.piiroinen@nokia.com>: 44 * Smaps information related to shared, private, clean and dirty pages. 45 * 46 * Paul Mundt <paul.mundt@nokia.com>: 47 * Overall revision about smaps. 48 */ 49 50#include <asm/uaccess.h> 51 52#include <linux/errno.h> 53#include <linux/time.h> 54#include <linux/proc_fs.h> 55#include <linux/stat.h> 56#include <linux/init.h> 57#include <linux/capability.h> 58#include <linux/file.h> 59#include <linux/fdtable.h> 60#include <linux/string.h> 61#include <linux/seq_file.h> 62#include <linux/namei.h> 63#include <linux/mnt_namespace.h> 64#include <linux/mm.h> 65#include <linux/rcupdate.h> 66#include <linux/kallsyms.h> 67#include <linux/resource.h> 68#include <linux/module.h> 69#include <linux/mount.h> 70#include <linux/security.h> 71#include <linux/ptrace.h> 72#include <linux/cgroup.h> 73#include <linux/cpuset.h> 74#include <linux/audit.h> 75#include <linux/poll.h> 76#include <linux/nsproxy.h> 77#include <linux/oom.h> 78#include <linux/elf.h> 79#include <linux/pid_namespace.h> 80#include "internal.h" 81 82/* NOTE: 83 * Implementing inode permission operations in /proc is almost 84 * certainly an error. Permission checks need to happen during 85 * each system call not at open time. The reason is that most of 86 * what we wish to check for permissions in /proc varies at runtime. 87 * 88 * The classic example of a problem is opening file descriptors 89 * in /proc for a task before it execs a suid executable. 90 */ 91 92struct pid_entry { 93 char *name; 94 int len; 95 mode_t mode; 96 const struct inode_operations *iop; 97 const struct file_operations *fop; 98 union proc_op op; 99}; 100 101#define NOD(NAME, MODE, IOP, FOP, OP) { \ 102 .name = (NAME), \ 103 .len = sizeof(NAME) - 1, \ 104 .mode = MODE, \ 105 .iop = IOP, \ 106 .fop = FOP, \ 107 .op = OP, \ 108} 109 110#define DIR(NAME, MODE, OTYPE) \ 111 NOD(NAME, (S_IFDIR|(MODE)), \ 112 &proc_##OTYPE##_inode_operations, &proc_##OTYPE##_operations, \ 113 {} ) 114#define LNK(NAME, OTYPE) \ 115 NOD(NAME, (S_IFLNK|S_IRWXUGO), \ 116 &proc_pid_link_inode_operations, NULL, \ 117 { .proc_get_link = &proc_##OTYPE##_link } ) 118#define REG(NAME, MODE, OTYPE) \ 119 NOD(NAME, (S_IFREG|(MODE)), NULL, \ 120 &proc_##OTYPE##_operations, {}) 121#define INF(NAME, MODE, OTYPE) \ 122 NOD(NAME, (S_IFREG|(MODE)), \ 123 NULL, &proc_info_file_operations, \ 124 { .proc_read = &proc_##OTYPE } ) 125#define ONE(NAME, MODE, OTYPE) \ 126 NOD(NAME, (S_IFREG|(MODE)), \ 127 NULL, &proc_single_file_operations, \ 128 { .proc_show = &proc_##OTYPE } ) 129 130/* 131 * Count the number of hardlinks for the pid_entry table, excluding the . 132 * and .. links. 133 */ 134static unsigned int pid_entry_count_dirs(const struct pid_entry *entries, 135 unsigned int n) 136{ 137 unsigned int i; 138 unsigned int count; 139 140 count = 0; 141 for (i = 0; i < n; ++i) { 142 if (S_ISDIR(entries[i].mode)) 143 ++count; 144 } 145 146 return count; 147} 148 149int maps_protect; 150EXPORT_SYMBOL(maps_protect); 151 152static struct fs_struct *get_fs_struct(struct task_struct *task) 153{ 154 struct fs_struct *fs; 155 task_lock(task); 156 fs = task->fs; 157 if(fs) 158 atomic_inc(&fs->count); 159 task_unlock(task); 160 return fs; 161} 162 163static int get_nr_threads(struct task_struct *tsk) 164{ 165 /* Must be called with the rcu_read_lock held */ 166 unsigned long flags; 167 int count = 0; 168 169 if (lock_task_sighand(tsk, &flags)) { 170 count = atomic_read(&tsk->signal->count); 171 unlock_task_sighand(tsk, &flags); 172 } 173 return count; 174} 175 176static int proc_cwd_link(struct inode *inode, struct path *path) 177{ 178 struct task_struct *task = get_proc_task(inode); 179 struct fs_struct *fs = NULL; 180 int result = -ENOENT; 181 182 if (task) { 183 fs = get_fs_struct(task); 184 put_task_struct(task); 185 } 186 if (fs) { 187 read_lock(&fs->lock); 188 *path = fs->pwd; 189 path_get(&fs->pwd); 190 read_unlock(&fs->lock); 191 result = 0; 192 put_fs_struct(fs); 193 } 194 return result; 195} 196 197static int proc_root_link(struct inode *inode, struct path *path) 198{ 199 struct task_struct *task = get_proc_task(inode); 200 struct fs_struct *fs = NULL; 201 int result = -ENOENT; 202 203 if (task) { 204 fs = get_fs_struct(task); 205 put_task_struct(task); 206 } 207 if (fs) { 208 read_lock(&fs->lock); 209 *path = fs->root; 210 path_get(&fs->root); 211 read_unlock(&fs->lock); 212 result = 0; 213 put_fs_struct(fs); 214 } 215 return result; 216} 217 218/* 219 * Return zero if current may access user memory in @task, -error if not. 220 */ 221static int check_mem_permission(struct task_struct *task) 222{ 223 /* 224 * A task can always look at itself, in case it chooses 225 * to use system calls instead of load instructions. 226 */ 227 if (task == current) 228 return 0; 229 230 /* 231 * If current is actively ptrace'ing, and would also be 232 * permitted to freshly attach with ptrace now, permit it. 233 */ 234 if (task->parent == current && (task->ptrace & PT_PTRACED) && 235 task_is_stopped_or_traced(task) && 236 ptrace_may_attach(task)) 237 return 0; 238 239 /* 240 * Noone else is allowed. 241 */ 242 return -EPERM; 243} 244 245struct mm_struct *mm_for_maps(struct task_struct *task) 246{ 247 struct mm_struct *mm = get_task_mm(task); 248 if (!mm) 249 return NULL; 250 down_read(&mm->mmap_sem); 251 task_lock(task); 252 if (task->mm != mm) 253 goto out; 254 if (task->mm != current->mm && __ptrace_may_attach(task) < 0) 255 goto out; 256 task_unlock(task); 257 return mm; 258out: 259 task_unlock(task); 260 up_read(&mm->mmap_sem); 261 mmput(mm); 262 return NULL; 263} 264 265static int proc_pid_cmdline(struct task_struct *task, char * buffer) 266{ 267 int res = 0; 268 unsigned int len; 269 struct mm_struct *mm = get_task_mm(task); 270 if (!mm) 271 goto out; 272 if (!mm->arg_end) 273 goto out_mm; /* Shh! No looking before we're done */ 274 275 len = mm->arg_end - mm->arg_start; 276 277 if (len > PAGE_SIZE) 278 len = PAGE_SIZE; 279 280 res = access_process_vm(task, mm->arg_start, buffer, len, 0); 281 282 // If the nul at the end of args has been overwritten, then 283 // assume application is using setproctitle(3). 284 if (res > 0 && buffer[res-1] != '\0' && len < PAGE_SIZE) { 285 len = strnlen(buffer, res); 286 if (len < res) { 287 res = len; 288 } else { 289 len = mm->env_end - mm->env_start; 290 if (len > PAGE_SIZE - res) 291 len = PAGE_SIZE - res; 292 res += access_process_vm(task, mm->env_start, buffer+res, len, 0); 293 res = strnlen(buffer, res); 294 } 295 } 296out_mm: 297 mmput(mm); 298out: 299 return res; 300} 301 302static int proc_pid_auxv(struct task_struct *task, char *buffer) 303{ 304 int res = 0; 305 struct mm_struct *mm = get_task_mm(task); 306 if (mm) { 307 unsigned int nwords = 0; 308 do 309 nwords += 2; 310 while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */ 311 res = nwords * sizeof(mm->saved_auxv[0]); 312 if (res > PAGE_SIZE) 313 res = PAGE_SIZE; 314 memcpy(buffer, mm->saved_auxv, res); 315 mmput(mm); 316 } 317 return res; 318} 319 320 321#ifdef CONFIG_KALLSYMS 322/* 323 * Provides a wchan file via kallsyms in a proper one-value-per-file format. 324 * Returns the resolved symbol. If that fails, simply return the address. 325 */ 326static int proc_pid_wchan(struct task_struct *task, char *buffer) 327{ 328 unsigned long wchan; 329 char symname[KSYM_NAME_LEN]; 330 331 wchan = get_wchan(task); 332 333 if (lookup_symbol_name(wchan, symname) < 0) 334 return sprintf(buffer, "%lu", wchan); 335 else 336 return sprintf(buffer, "%s", symname); 337} 338#endif /* CONFIG_KALLSYMS */ 339 340#ifdef CONFIG_SCHEDSTATS 341/* 342 * Provides /proc/PID/schedstat 343 */ 344static int proc_pid_schedstat(struct task_struct *task, char *buffer) 345{ 346 return sprintf(buffer, "%llu %llu %lu\n", 347 task->sched_info.cpu_time, 348 task->sched_info.run_delay, 349 task->sched_info.pcount); 350} 351#endif 352 353#ifdef CONFIG_LATENCYTOP 354static int lstats_show_proc(struct seq_file *m, void *v) 355{ 356 int i; 357 struct inode *inode = m->private; 358 struct task_struct *task = get_proc_task(inode); 359 360 if (!task) 361 return -ESRCH; 362 seq_puts(m, "Latency Top version : v0.1\n"); 363 for (i = 0; i < 32; i++) { 364 if (task->latency_record[i].backtrace[0]) { 365 int q; 366 seq_printf(m, "%i %li %li ", 367 task->latency_record[i].count, 368 task->latency_record[i].time, 369 task->latency_record[i].max); 370 for (q = 0; q < LT_BACKTRACEDEPTH; q++) { 371 char sym[KSYM_NAME_LEN]; 372 char *c; 373 if (!task->latency_record[i].backtrace[q]) 374 break; 375 if (task->latency_record[i].backtrace[q] == ULONG_MAX) 376 break; 377 sprint_symbol(sym, task->latency_record[i].backtrace[q]); 378 c = strchr(sym, '+'); 379 if (c) 380 *c = 0; 381 seq_printf(m, "%s ", sym); 382 } 383 seq_printf(m, "\n"); 384 } 385 386 } 387 put_task_struct(task); 388 return 0; 389} 390 391static int lstats_open(struct inode *inode, struct file *file) 392{ 393 return single_open(file, lstats_show_proc, inode); 394} 395 396static ssize_t lstats_write(struct file *file, const char __user *buf, 397 size_t count, loff_t *offs) 398{ 399 struct task_struct *task = get_proc_task(file->f_dentry->d_inode); 400 401 if (!task) 402 return -ESRCH; 403 clear_all_latency_tracing(task); 404 put_task_struct(task); 405 406 return count; 407} 408 409static const struct file_operations proc_lstats_operations = { 410 .open = lstats_open, 411 .read = seq_read, 412 .write = lstats_write, 413 .llseek = seq_lseek, 414 .release = single_release, 415}; 416 417#endif 418 419/* The badness from the OOM killer */ 420unsigned long badness(struct task_struct *p, unsigned long uptime); 421static int proc_oom_score(struct task_struct *task, char *buffer) 422{ 423 unsigned long points; 424 struct timespec uptime; 425 426 do_posix_clock_monotonic_gettime(&uptime); 427 read_lock(&tasklist_lock); 428 points = badness(task, uptime.tv_sec); 429 read_unlock(&tasklist_lock); 430 return sprintf(buffer, "%lu\n", points); 431} 432 433struct limit_names { 434 char *name; 435 char *unit; 436}; 437 438static const struct limit_names lnames[RLIM_NLIMITS] = { 439 [RLIMIT_CPU] = {"Max cpu time", "ms"}, 440 [RLIMIT_FSIZE] = {"Max file size", "bytes"}, 441 [RLIMIT_DATA] = {"Max data size", "bytes"}, 442 [RLIMIT_STACK] = {"Max stack size", "bytes"}, 443 [RLIMIT_CORE] = {"Max core file size", "bytes"}, 444 [RLIMIT_RSS] = {"Max resident set", "bytes"}, 445 [RLIMIT_NPROC] = {"Max processes", "processes"}, 446 [RLIMIT_NOFILE] = {"Max open files", "files"}, 447 [RLIMIT_MEMLOCK] = {"Max locked memory", "bytes"}, 448 [RLIMIT_AS] = {"Max address space", "bytes"}, 449 [RLIMIT_LOCKS] = {"Max file locks", "locks"}, 450 [RLIMIT_SIGPENDING] = {"Max pending signals", "signals"}, 451 [RLIMIT_MSGQUEUE] = {"Max msgqueue size", "bytes"}, 452 [RLIMIT_NICE] = {"Max nice priority", NULL}, 453 [RLIMIT_RTPRIO] = {"Max realtime priority", NULL}, 454 [RLIMIT_RTTIME] = {"Max realtime timeout", "us"}, 455}; 456 457/* Display limits for a process */ 458static int proc_pid_limits(struct task_struct *task, char *buffer) 459{ 460 unsigned int i; 461 int count = 0; 462 unsigned long flags; 463 char *bufptr = buffer; 464 465 struct rlimit rlim[RLIM_NLIMITS]; 466 467 rcu_read_lock(); 468 if (!lock_task_sighand(task,&flags)) { 469 rcu_read_unlock(); 470 return 0; 471 } 472 memcpy(rlim, task->signal->rlim, sizeof(struct rlimit) * RLIM_NLIMITS); 473 unlock_task_sighand(task, &flags); 474 rcu_read_unlock(); 475 476 /* 477 * print the file header 478 */ 479 count += sprintf(&bufptr[count], "%-25s %-20s %-20s %-10s\n", 480 "Limit", "Soft Limit", "Hard Limit", "Units"); 481 482 for (i = 0; i < RLIM_NLIMITS; i++) { 483 if (rlim[i].rlim_cur == RLIM_INFINITY) 484 count += sprintf(&bufptr[count], "%-25s %-20s ", 485 lnames[i].name, "unlimited"); 486 else 487 count += sprintf(&bufptr[count], "%-25s %-20lu ", 488 lnames[i].name, rlim[i].rlim_cur); 489 490 if (rlim[i].rlim_max == RLIM_INFINITY) 491 count += sprintf(&bufptr[count], "%-20s ", "unlimited"); 492 else 493 count += sprintf(&bufptr[count], "%-20lu ", 494 rlim[i].rlim_max); 495 496 if (lnames[i].unit) 497 count += sprintf(&bufptr[count], "%-10s\n", 498 lnames[i].unit); 499 else 500 count += sprintf(&bufptr[count], "\n"); 501 } 502 503 return count; 504} 505 506/************************************************************************/ 507/* Here the fs part begins */ 508/************************************************************************/ 509 510/* permission checks */ 511static int proc_fd_access_allowed(struct inode *inode) 512{ 513 struct task_struct *task; 514 int allowed = 0; 515 /* Allow access to a task's file descriptors if it is us or we 516 * may use ptrace attach to the process and find out that 517 * information. 518 */ 519 task = get_proc_task(inode); 520 if (task) { 521 allowed = ptrace_may_attach(task); 522 put_task_struct(task); 523 } 524 return allowed; 525} 526 527static int proc_setattr(struct dentry *dentry, struct iattr *attr) 528{ 529 int error; 530 struct inode *inode = dentry->d_inode; 531 532 if (attr->ia_valid & ATTR_MODE) 533 return -EPERM; 534 535 error = inode_change_ok(inode, attr); 536 if (!error) 537 error = inode_setattr(inode, attr); 538 return error; 539} 540 541static const struct inode_operations proc_def_inode_operations = { 542 .setattr = proc_setattr, 543}; 544 545static int mounts_open_common(struct inode *inode, struct file *file, 546 const struct seq_operations *op) 547{ 548 struct task_struct *task = get_proc_task(inode); 549 struct nsproxy *nsp; 550 struct mnt_namespace *ns = NULL; 551 struct fs_struct *fs = NULL; 552 struct path root; 553 struct proc_mounts *p; 554 int ret = -EINVAL; 555 556 if (task) { 557 rcu_read_lock(); 558 nsp = task_nsproxy(task); 559 if (nsp) { 560 ns = nsp->mnt_ns; 561 if (ns) 562 get_mnt_ns(ns); 563 } 564 rcu_read_unlock(); 565 if (ns) 566 fs = get_fs_struct(task); 567 put_task_struct(task); 568 } 569 570 if (!ns) 571 goto err; 572 if (!fs) 573 goto err_put_ns; 574 575 read_lock(&fs->lock); 576 root = fs->root; 577 path_get(&root); 578 read_unlock(&fs->lock); 579 put_fs_struct(fs); 580 581 ret = -ENOMEM; 582 p = kmalloc(sizeof(struct proc_mounts), GFP_KERNEL); 583 if (!p) 584 goto err_put_path; 585 586 file->private_data = &p->m; 587 ret = seq_open(file, op); 588 if (ret) 589 goto err_free; 590 591 p->m.private = p; 592 p->ns = ns; 593 p->root = root; 594 p->event = ns->event; 595 596 return 0; 597 598 err_free: 599 kfree(p); 600 err_put_path: 601 path_put(&root); 602 err_put_ns: 603 put_mnt_ns(ns); 604 err: 605 return ret; 606} 607 608static int mounts_release(struct inode *inode, struct file *file) 609{ 610 struct proc_mounts *p = file->private_data; 611 path_put(&p->root); 612 put_mnt_ns(p->ns); 613 return seq_release(inode, file); 614} 615 616static unsigned mounts_poll(struct file *file, poll_table *wait) 617{ 618 struct proc_mounts *p = file->private_data; 619 struct mnt_namespace *ns = p->ns; 620 unsigned res = 0; 621 622 poll_wait(file, &ns->poll, wait); 623 624 spin_lock(&vfsmount_lock); 625 if (p->event != ns->event) { 626 p->event = ns->event; 627 res = POLLERR; 628 } 629 spin_unlock(&vfsmount_lock); 630 631 return res; 632} 633 634static int mounts_open(struct inode *inode, struct file *file) 635{ 636 return mounts_open_common(inode, file, &mounts_op); 637} 638 639static const struct file_operations proc_mounts_operations = { 640 .open = mounts_open, 641 .read = seq_read, 642 .llseek = seq_lseek, 643 .release = mounts_release, 644 .poll = mounts_poll, 645}; 646 647static int mountinfo_open(struct inode *inode, struct file *file) 648{ 649 return mounts_open_common(inode, file, &mountinfo_op); 650} 651 652static const struct file_operations proc_mountinfo_operations = { 653 .open = mountinfo_open, 654 .read = seq_read, 655 .llseek = seq_lseek, 656 .release = mounts_release, 657 .poll = mounts_poll, 658}; 659 660static int mountstats_open(struct inode *inode, struct file *file) 661{ 662 return mounts_open_common(inode, file, &mountstats_op); 663} 664 665static const struct file_operations proc_mountstats_operations = { 666 .open = mountstats_open, 667 .read = seq_read, 668 .llseek = seq_lseek, 669 .release = mounts_release, 670}; 671 672#define PROC_BLOCK_SIZE (3*1024) /* 4K page size but our output routines use some slack for overruns */ 673 674static ssize_t proc_info_read(struct file * file, char __user * buf, 675 size_t count, loff_t *ppos) 676{ 677 struct inode * inode = file->f_path.dentry->d_inode; 678 unsigned long page; 679 ssize_t length; 680 struct task_struct *task = get_proc_task(inode); 681 682 length = -ESRCH; 683 if (!task) 684 goto out_no_task; 685 686 if (count > PROC_BLOCK_SIZE) 687 count = PROC_BLOCK_SIZE; 688 689 length = -ENOMEM; 690 if (!(page = __get_free_page(GFP_TEMPORARY))) 691 goto out; 692 693 length = PROC_I(inode)->op.proc_read(task, (char*)page); 694 695 if (length >= 0) 696 length = simple_read_from_buffer(buf, count, ppos, (char *)page, length); 697 free_page(page); 698out: 699 put_task_struct(task); 700out_no_task: 701 return length; 702} 703 704static const struct file_operations proc_info_file_operations = { 705 .read = proc_info_read, 706}; 707 708static int proc_single_show(struct seq_file *m, void *v) 709{ 710 struct inode *inode = m->private; 711 struct pid_namespace *ns; 712 struct pid *pid; 713 struct task_struct *task; 714 int ret; 715 716 ns = inode->i_sb->s_fs_info; 717 pid = proc_pid(inode); 718 task = get_pid_task(pid, PIDTYPE_PID); 719 if (!task) 720 return -ESRCH; 721 722 ret = PROC_I(inode)->op.proc_show(m, ns, pid, task); 723 724 put_task_struct(task); 725 return ret; 726} 727 728static int proc_single_open(struct inode *inode, struct file *filp) 729{ 730 int ret; 731 ret = single_open(filp, proc_single_show, NULL); 732 if (!ret) { 733 struct seq_file *m = filp->private_data; 734 735 m->private = inode; 736 } 737 return ret; 738} 739 740static const struct file_operations proc_single_file_operations = { 741 .open = proc_single_open, 742 .read = seq_read, 743 .llseek = seq_lseek, 744 .release = single_release, 745}; 746 747static int mem_open(struct inode* inode, struct file* file) 748{ 749 file->private_data = (void*)((long)current->self_exec_id); 750 return 0; 751} 752 753static ssize_t mem_read(struct file * file, char __user * buf, 754 size_t count, loff_t *ppos) 755{ 756 struct task_struct *task = get_proc_task(file->f_path.dentry->d_inode); 757 char *page; 758 unsigned long src = *ppos; 759 int ret = -ESRCH; 760 struct mm_struct *mm; 761 762 if (!task) 763 goto out_no_task; 764 765 if (check_mem_permission(task)) 766 goto out; 767 768 ret = -ENOMEM; 769 page = (char *)__get_free_page(GFP_TEMPORARY); 770 if (!page) 771 goto out; 772 773 ret = 0; 774 775 mm = get_task_mm(task); 776 if (!mm) 777 goto out_free; 778 779 ret = -EIO; 780 781 if (file->private_data != (void*)((long)current->self_exec_id)) 782 goto out_put; 783 784 ret = 0; 785 786 while (count > 0) { 787 int this_len, retval; 788 789 this_len = (count > PAGE_SIZE) ? PAGE_SIZE : count; 790 retval = access_process_vm(task, src, page, this_len, 0); 791 if (!retval || check_mem_permission(task)) { 792 if (!ret) 793 ret = -EIO; 794 break; 795 } 796 797 if (copy_to_user(buf, page, retval)) { 798 ret = -EFAULT; 799 break; 800 } 801 802 ret += retval; 803 src += retval; 804 buf += retval; 805 count -= retval; 806 } 807 *ppos = src; 808 809out_put: 810 mmput(mm); 811out_free: 812 free_page((unsigned long) page); 813out: 814 put_task_struct(task); 815out_no_task: 816 return ret; 817} 818 819#define mem_write NULL 820 821#ifndef mem_write 822/* This is a security hazard */ 823static ssize_t mem_write(struct file * file, const char __user *buf, 824 size_t count, loff_t *ppos) 825{ 826 int copied; 827 char *page; 828 struct task_struct *task = get_proc_task(file->f_path.dentry->d_inode); 829 unsigned long dst = *ppos; 830 831 copied = -ESRCH; 832 if (!task) 833 goto out_no_task; 834 835 if (check_mem_permission(task)) 836 goto out; 837 838 copied = -ENOMEM; 839 page = (char *)__get_free_page(GFP_TEMPORARY); 840 if (!page) 841 goto out; 842 843 copied = 0; 844 while (count > 0) { 845 int this_len, retval; 846 847 this_len = (count > PAGE_SIZE) ? PAGE_SIZE : count; 848 if (copy_from_user(page, buf, this_len)) { 849 copied = -EFAULT; 850 break; 851 } 852 retval = access_process_vm(task, dst, page, this_len, 1); 853 if (!retval) { 854 if (!copied) 855 copied = -EIO; 856 break; 857 } 858 copied += retval; 859 buf += retval; 860 dst += retval; 861 count -= retval; 862 } 863 *ppos = dst; 864 free_page((unsigned long) page); 865out: 866 put_task_struct(task); 867out_no_task: 868 return copied; 869} 870#endif 871 872loff_t mem_lseek(struct file *file, loff_t offset, int orig) 873{ 874 switch (orig) { 875 case 0: 876 file->f_pos = offset; 877 break; 878 case 1: 879 file->f_pos += offset; 880 break; 881 default: 882 return -EINVAL; 883 } 884 force_successful_syscall_return(); 885 return file->f_pos; 886} 887 888static const struct file_operations proc_mem_operations = { 889 .llseek = mem_lseek, 890 .read = mem_read, 891 .write = mem_write, 892 .open = mem_open, 893}; 894 895static ssize_t environ_read(struct file *file, char __user *buf, 896 size_t count, loff_t *ppos) 897{ 898 struct task_struct *task = get_proc_task(file->f_dentry->d_inode); 899 char *page; 900 unsigned long src = *ppos; 901 int ret = -ESRCH; 902 struct mm_struct *mm; 903 904 if (!task) 905 goto out_no_task; 906 907 if (!ptrace_may_attach(task)) 908 goto out; 909 910 ret = -ENOMEM; 911 page = (char *)__get_free_page(GFP_TEMPORARY); 912 if (!page) 913 goto out; 914 915 ret = 0; 916 917 mm = get_task_mm(task); 918 if (!mm) 919 goto out_free; 920 921 while (count > 0) { 922 int this_len, retval, max_len; 923 924 this_len = mm->env_end - (mm->env_start + src); 925 926 if (this_len <= 0) 927 break; 928 929 max_len = (count > PAGE_SIZE) ? PAGE_SIZE : count; 930 this_len = (this_len > max_len) ? max_len : this_len; 931 932 retval = access_process_vm(task, (mm->env_start + src), 933 page, this_len, 0); 934 935 if (retval <= 0) { 936 ret = retval; 937 break; 938 } 939 940 if (copy_to_user(buf, page, retval)) { 941 ret = -EFAULT; 942 break; 943 } 944 945 ret += retval; 946 src += retval; 947 buf += retval; 948 count -= retval; 949 } 950 *ppos = src; 951 952 mmput(mm); 953out_free: 954 free_page((unsigned long) page); 955out: 956 put_task_struct(task); 957out_no_task: 958 return ret; 959} 960 961static const struct file_operations proc_environ_operations = { 962 .read = environ_read, 963}; 964 965static ssize_t oom_adjust_read(struct file *file, char __user *buf, 966 size_t count, loff_t *ppos) 967{ 968 struct task_struct *task = get_proc_task(file->f_path.dentry->d_inode); 969 char buffer[PROC_NUMBUF]; 970 size_t len; 971 int oom_adjust; 972 973 if (!task) 974 return -ESRCH; 975 oom_adjust = task->oomkilladj; 976 put_task_struct(task); 977 978 len = snprintf(buffer, sizeof(buffer), "%i\n", oom_adjust); 979 980 return simple_read_from_buffer(buf, count, ppos, buffer, len); 981} 982 983static ssize_t oom_adjust_write(struct file *file, const char __user *buf, 984 size_t count, loff_t *ppos) 985{ 986 struct task_struct *task; 987 char buffer[PROC_NUMBUF], *end; 988 int oom_adjust; 989 990 memset(buffer, 0, sizeof(buffer)); 991 if (count > sizeof(buffer) - 1) 992 count = sizeof(buffer) - 1; 993 if (copy_from_user(buffer, buf, count)) 994 return -EFAULT; 995 oom_adjust = simple_strtol(buffer, &end, 0); 996 if ((oom_adjust < OOM_ADJUST_MIN || oom_adjust > OOM_ADJUST_MAX) && 997 oom_adjust != OOM_DISABLE) 998 return -EINVAL; 999 if (*end == '\n') 1000 end++; 1001 task = get_proc_task(file->f_path.dentry->d_inode); 1002 if (!task) 1003 return -ESRCH; 1004 if (oom_adjust < task->oomkilladj && !capable(CAP_SYS_RESOURCE)) { 1005 put_task_struct(task); 1006 return -EACCES; 1007 } 1008 task->oomkilladj = oom_adjust; 1009 put_task_struct(task); 1010 if (end - buffer == 0) 1011 return -EIO; 1012 return end - buffer; 1013} 1014 1015static const struct file_operations proc_oom_adjust_operations = { 1016 .read = oom_adjust_read, 1017 .write = oom_adjust_write, 1018}; 1019 1020#ifdef CONFIG_AUDITSYSCALL 1021#define TMPBUFLEN 21 1022static ssize_t proc_loginuid_read(struct file * file, char __user * buf, 1023 size_t count, loff_t *ppos) 1024{ 1025 struct inode * inode = file->f_path.dentry->d_inode; 1026 struct task_struct *task = get_proc_task(inode); 1027 ssize_t length; 1028 char tmpbuf[TMPBUFLEN]; 1029 1030 if (!task) 1031 return -ESRCH; 1032 length = scnprintf(tmpbuf, TMPBUFLEN, "%u", 1033 audit_get_loginuid(task)); 1034 put_task_struct(task); 1035 return simple_read_from_buffer(buf, count, ppos, tmpbuf, length); 1036} 1037 1038static ssize_t proc_loginuid_write(struct file * file, const char __user * buf, 1039 size_t count, loff_t *ppos) 1040{ 1041 struct inode * inode = file->f_path.dentry->d_inode; 1042 char *page, *tmp; 1043 ssize_t length; 1044 uid_t loginuid; 1045 1046 if (!capable(CAP_AUDIT_CONTROL)) 1047 return -EPERM; 1048 1049 if (current != pid_task(proc_pid(inode), PIDTYPE_PID)) 1050 return -EPERM; 1051 1052 if (count >= PAGE_SIZE) 1053 count = PAGE_SIZE - 1; 1054 1055 if (*ppos != 0) { 1056 /* No partial writes. */ 1057 return -EINVAL; 1058 } 1059 page = (char*)__get_free_page(GFP_TEMPORARY); 1060 if (!page) 1061 return -ENOMEM; 1062 length = -EFAULT; 1063 if (copy_from_user(page, buf, count)) 1064 goto out_free_page; 1065 1066 page[count] = '\0'; 1067 loginuid = simple_strtoul(page, &tmp, 10); 1068 if (tmp == page) { 1069 length = -EINVAL; 1070 goto out_free_page; 1071 1072 } 1073 length = audit_set_loginuid(current, loginuid); 1074 if (likely(length == 0)) 1075 length = count; 1076 1077out_free_page: 1078 free_page((unsigned long) page); 1079 return length; 1080} 1081 1082static const struct file_operations proc_loginuid_operations = { 1083 .read = proc_loginuid_read, 1084 .write = proc_loginuid_write, 1085}; 1086 1087static ssize_t proc_sessionid_read(struct file * file, char __user * buf, 1088 size_t count, loff_t *ppos) 1089{ 1090 struct inode * inode = file->f_path.dentry->d_inode; 1091 struct task_struct *task = get_proc_task(inode); 1092 ssize_t length; 1093 char tmpbuf[TMPBUFLEN]; 1094 1095 if (!task) 1096 return -ESRCH; 1097 length = scnprintf(tmpbuf, TMPBUFLEN, "%u", 1098 audit_get_sessionid(task)); 1099 put_task_struct(task); 1100 return simple_read_from_buffer(buf, count, ppos, tmpbuf, length); 1101} 1102 1103static const struct file_operations proc_sessionid_operations = { 1104 .read = proc_sessionid_read, 1105}; 1106#endif 1107 1108#ifdef CONFIG_FAULT_INJECTION 1109static ssize_t proc_fault_inject_read(struct file * file, char __user * buf, 1110 size_t count, loff_t *ppos) 1111{ 1112 struct task_struct *task = get_proc_task(file->f_dentry->d_inode); 1113 char buffer[PROC_NUMBUF]; 1114 size_t len; 1115 int make_it_fail; 1116 1117 if (!task) 1118 return -ESRCH; 1119 make_it_fail = task->make_it_fail; 1120 put_task_struct(task); 1121 1122 len = snprintf(buffer, sizeof(buffer), "%i\n", make_it_fail); 1123 1124 return simple_read_from_buffer(buf, count, ppos, buffer, len); 1125} 1126 1127static ssize_t proc_fault_inject_write(struct file * file, 1128 const char __user * buf, size_t count, loff_t *ppos) 1129{ 1130 struct task_struct *task; 1131 char buffer[PROC_NUMBUF], *end; 1132 int make_it_fail; 1133 1134 if (!capable(CAP_SYS_RESOURCE)) 1135 return -EPERM; 1136 memset(buffer, 0, sizeof(buffer)); 1137 if (count > sizeof(buffer) - 1) 1138 count = sizeof(buffer) - 1; 1139 if (copy_from_user(buffer, buf, count)) 1140 return -EFAULT; 1141 make_it_fail = simple_strtol(buffer, &end, 0); 1142 if (*end == '\n') 1143 end++; 1144 task = get_proc_task(file->f_dentry->d_inode); 1145 if (!task) 1146 return -ESRCH; 1147 task->make_it_fail = make_it_fail; 1148 put_task_struct(task); 1149 if (end - buffer == 0) 1150 return -EIO; 1151 return end - buffer; 1152} 1153 1154static const struct file_operations proc_fault_inject_operations = { 1155 .read = proc_fault_inject_read, 1156 .write = proc_fault_inject_write, 1157}; 1158#endif 1159 1160 1161#ifdef CONFIG_SCHED_DEBUG 1162/* 1163 * Print out various scheduling related per-task fields: 1164 */ 1165static int sched_show(struct seq_file *m, void *v) 1166{ 1167 struct inode *inode = m->private; 1168 struct task_struct *p; 1169 1170 WARN_ON(!inode); 1171 1172 p = get_proc_task(inode); 1173 if (!p) 1174 return -ESRCH; 1175 proc_sched_show_task(p, m); 1176 1177 put_task_struct(p); 1178 1179 return 0; 1180} 1181 1182static ssize_t 1183sched_write(struct file *file, const char __user *buf, 1184 size_t count, loff_t *offset) 1185{ 1186 struct inode *inode = file->f_path.dentry->d_inode; 1187 struct task_struct *p; 1188 1189 WARN_ON(!inode); 1190 1191 p = get_proc_task(inode); 1192 if (!p) 1193 return -ESRCH; 1194 proc_sched_set_task(p); 1195 1196 put_task_struct(p); 1197 1198 return count; 1199} 1200 1201static int sched_open(struct inode *inode, struct file *filp) 1202{ 1203 int ret; 1204 1205 ret = single_open(filp, sched_show, NULL); 1206 if (!ret) { 1207 struct seq_file *m = filp->private_data; 1208 1209 m->private = inode; 1210 } 1211 return ret; 1212} 1213 1214static const struct file_operations proc_pid_sched_operations = { 1215 .open = sched_open, 1216 .read = seq_read, 1217 .write = sched_write, 1218 .llseek = seq_lseek, 1219 .release = single_release, 1220}; 1221 1222#endif 1223 1224/* 1225 * We added or removed a vma mapping the executable. The vmas are only mapped 1226 * during exec and are not mapped with the mmap system call. 1227 * Callers must hold down_write() on the mm's mmap_sem for these 1228 */ 1229void added_exe_file_vma(struct mm_struct *mm) 1230{ 1231 mm->num_exe_file_vmas++; 1232} 1233 1234void removed_exe_file_vma(struct mm_struct *mm) 1235{ 1236 mm->num_exe_file_vmas--; 1237 if ((mm->num_exe_file_vmas == 0) && mm->exe_file){ 1238 fput(mm->exe_file); 1239 mm->exe_file = NULL; 1240 } 1241 1242} 1243 1244void set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file) 1245{ 1246 if (new_exe_file) 1247 get_file(new_exe_file); 1248 if (mm->exe_file) 1249 fput(mm->exe_file); 1250 mm->exe_file = new_exe_file; 1251 mm->num_exe_file_vmas = 0; 1252} 1253 1254struct file *get_mm_exe_file(struct mm_struct *mm) 1255{ 1256 struct file *exe_file; 1257 1258 /* We need mmap_sem to protect against races with removal of 1259 * VM_EXECUTABLE vmas */ 1260 down_read(&mm->mmap_sem); 1261 exe_file = mm->exe_file; 1262 if (exe_file) 1263 get_file(exe_file); 1264 up_read(&mm->mmap_sem); 1265 return exe_file; 1266} 1267 1268void dup_mm_exe_file(struct mm_struct *oldmm, struct mm_struct *newmm) 1269{ 1270 /* It's safe to write the exe_file pointer without exe_file_lock because 1271 * this is called during fork when the task is not yet in /proc */ 1272 newmm->exe_file = get_mm_exe_file(oldmm); 1273} 1274 1275static int proc_exe_link(struct inode *inode, struct path *exe_path) 1276{ 1277 struct task_struct *task; 1278 struct mm_struct *mm; 1279 struct file *exe_file; 1280 1281 task = get_proc_task(inode); 1282 if (!task) 1283 return -ENOENT; 1284 mm = get_task_mm(task); 1285 put_task_struct(task); 1286 if (!mm) 1287 return -ENOENT; 1288 exe_file = get_mm_exe_file(mm); 1289 mmput(mm); 1290 if (exe_file) { 1291 *exe_path = exe_file->f_path; 1292 path_get(&exe_file->f_path); 1293 fput(exe_file); 1294 return 0; 1295 } else 1296 return -ENOENT; 1297} 1298 1299static void *proc_pid_follow_link(struct dentry *dentry, struct nameidata *nd) 1300{ 1301 struct inode *inode = dentry->d_inode; 1302 int error = -EACCES; 1303 1304 /* We don't need a base pointer in the /proc filesystem */ 1305 path_put(&nd->path); 1306 1307 /* Are we allowed to snoop on the tasks file descriptors? */ 1308 if (!proc_fd_access_allowed(inode)) 1309 goto out; 1310 1311 error = PROC_I(inode)->op.proc_get_link(inode, &nd->path); 1312 nd->last_type = LAST_BIND; 1313out: 1314 return ERR_PTR(error); 1315} 1316 1317static int do_proc_readlink(struct path *path, char __user *buffer, int buflen) 1318{ 1319 char *tmp = (char*)__get_free_page(GFP_TEMPORARY); 1320 char *pathname; 1321 int len; 1322 1323 if (!tmp) 1324 return -ENOMEM; 1325 1326 pathname = d_path(path, tmp, PAGE_SIZE); 1327 len = PTR_ERR(pathname); 1328 if (IS_ERR(pathname)) 1329 goto out; 1330 len = tmp + PAGE_SIZE - 1 - pathname; 1331 1332 if (len > buflen) 1333 len = buflen; 1334 if (copy_to_user(buffer, pathname, len)) 1335 len = -EFAULT; 1336 out: 1337 free_page((unsigned long)tmp); 1338 return len; 1339} 1340 1341static int proc_pid_readlink(struct dentry * dentry, char __user * buffer, int buflen) 1342{ 1343 int error = -EACCES; 1344 struct inode *inode = dentry->d_inode; 1345 struct path path; 1346 1347 /* Are we allowed to snoop on the tasks file descriptors? */ 1348 if (!proc_fd_access_allowed(inode)) 1349 goto out; 1350 1351 error = PROC_I(inode)->op.proc_get_link(inode, &path); 1352 if (error) 1353 goto out; 1354 1355 error = do_proc_readlink(&path, buffer, buflen); 1356 path_put(&path); 1357out: 1358 return error; 1359} 1360 1361static const struct inode_operations proc_pid_link_inode_operations = { 1362 .readlink = proc_pid_readlink, 1363 .follow_link = proc_pid_follow_link, 1364 .setattr = proc_setattr, 1365}; 1366 1367 1368/* building an inode */ 1369 1370static int task_dumpable(struct task_struct *task) 1371{ 1372 int dumpable = 0; 1373 struct mm_struct *mm; 1374 1375 task_lock(task); 1376 mm = task->mm; 1377 if (mm) 1378 dumpable = get_dumpable(mm); 1379 task_unlock(task); 1380 if(dumpable == 1) 1381 return 1; 1382 return 0; 1383} 1384 1385 1386static struct inode *proc_pid_make_inode(struct super_block * sb, struct task_struct *task) 1387{ 1388 struct inode * inode; 1389 struct proc_inode *ei; 1390 1391 /* We need a new inode */ 1392 1393 inode = new_inode(sb); 1394 if (!inode) 1395 goto out; 1396 1397 /* Common stuff */ 1398 ei = PROC_I(inode); 1399 inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME; 1400 inode->i_op = &proc_def_inode_operations; 1401 1402 /* 1403 * grab the reference to task. 1404 */ 1405 ei->pid = get_task_pid(task, PIDTYPE_PID); 1406 if (!ei->pid) 1407 goto out_unlock; 1408 1409 inode->i_uid = 0; 1410 inode->i_gid = 0; 1411 if (task_dumpable(task)) { 1412 inode->i_uid = task->euid; 1413 inode->i_gid = task->egid; 1414 } 1415 security_task_to_inode(task, inode); 1416 1417out: 1418 return inode; 1419 1420out_unlock: 1421 iput(inode); 1422 return NULL; 1423} 1424 1425static int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat) 1426{ 1427 struct inode *inode = dentry->d_inode; 1428 struct task_struct *task; 1429 generic_fillattr(inode, stat); 1430 1431 rcu_read_lock(); 1432 stat->uid = 0; 1433 stat->gid = 0; 1434 task = pid_task(proc_pid(inode), PIDTYPE_PID); 1435 if (task) { 1436 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) || 1437 task_dumpable(task)) { 1438 stat->uid = task->euid; 1439 stat->gid = task->egid; 1440 } 1441 } 1442 rcu_read_unlock(); 1443 return 0; 1444} 1445 1446/* dentry stuff */ 1447 1448/* 1449 * Exceptional case: normally we are not allowed to unhash a busy 1450 * directory. In this case, however, we can do it - no aliasing problems 1451 * due to the way we treat inodes. 1452 * 1453 * Rewrite the inode's ownerships here because the owning task may have 1454 * performed a setuid(), etc. 1455 * 1456 * Before the /proc/pid/status file was created the only way to read 1457 * the effective uid of a /process was to stat /proc/pid. Reading 1458 * /proc/pid/status is slow enough that procps and other packages 1459 * kept stating /proc/pid. To keep the rules in /proc simple I have 1460 * made this apply to all per process world readable and executable 1461 * directories. 1462 */ 1463static int pid_revalidate(struct dentry *dentry, struct nameidata *nd) 1464{ 1465 struct inode *inode = dentry->d_inode; 1466 struct task_struct *task = get_proc_task(inode); 1467 if (task) { 1468 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) || 1469 task_dumpable(task)) { 1470 inode->i_uid = task->euid; 1471 inode->i_gid = task->egid; 1472 } else { 1473 inode->i_uid = 0; 1474 inode->i_gid = 0; 1475 } 1476 inode->i_mode &= ~(S_ISUID | S_ISGID); 1477 security_task_to_inode(task, inode); 1478 put_task_struct(task); 1479 return 1; 1480 } 1481 d_drop(dentry); 1482 return 0; 1483} 1484 1485static int pid_delete_dentry(struct dentry * dentry) 1486{ 1487 /* Is the task we represent dead? 1488 * If so, then don't put the dentry on the lru list, 1489 * kill it immediately. 1490 */ 1491 return !proc_pid(dentry->d_inode)->tasks[PIDTYPE_PID].first; 1492} 1493 1494static struct dentry_operations pid_dentry_operations = 1495{ 1496 .d_revalidate = pid_revalidate, 1497 .d_delete = pid_delete_dentry, 1498}; 1499 1500/* Lookups */ 1501 1502typedef struct dentry *instantiate_t(struct inode *, struct dentry *, 1503 struct task_struct *, const void *); 1504 1505/* 1506 * Fill a directory entry. 1507 * 1508 * If possible create the dcache entry and derive our inode number and 1509 * file type from dcache entry. 1510 * 1511 * Since all of the proc inode numbers are dynamically generated, the inode 1512 * numbers do not exist until the inode is cache. This means creating the 1513 * the dcache entry in readdir is necessary to keep the inode numbers 1514 * reported by readdir in sync with the inode numbers reported 1515 * by stat. 1516 */ 1517static int proc_fill_cache(struct file *filp, void *dirent, filldir_t filldir, 1518 char *name, int len, 1519 instantiate_t instantiate, struct task_struct *task, const void *ptr) 1520{ 1521 struct dentry *child, *dir = filp->f_path.dentry; 1522 struct inode *inode; 1523 struct qstr qname; 1524 ino_t ino = 0; 1525 unsigned type = DT_UNKNOWN; 1526 1527 qname.name = name; 1528 qname.len = len; 1529 qname.hash = full_name_hash(name, len); 1530 1531 child = d_lookup(dir, &qname); 1532 if (!child) { 1533 struct dentry *new; 1534 new = d_alloc(dir, &qname); 1535 if (new) { 1536 child = instantiate(dir->d_inode, new, task, ptr); 1537 if (child) 1538 dput(new); 1539 else 1540 child = new; 1541 } 1542 } 1543 if (!child || IS_ERR(child) || !child->d_inode) 1544 goto end_instantiate; 1545 inode = child->d_inode; 1546 if (inode) { 1547 ino = inode->i_ino; 1548 type = inode->i_mode >> 12; 1549 } 1550 dput(child); 1551end_instantiate: 1552 if (!ino) 1553 ino = find_inode_number(dir, &qname); 1554 if (!ino) 1555 ino = 1; 1556 return filldir(dirent, name, len, filp->f_pos, ino, type); 1557} 1558 1559static unsigned name_to_int(struct dentry *dentry) 1560{ 1561 const char *name = dentry->d_name.name; 1562 int len = dentry->d_name.len; 1563 unsigned n = 0; 1564 1565 if (len > 1 && *name == '0') 1566 goto out; 1567 while (len-- > 0) { 1568 unsigned c = *name++ - '0'; 1569 if (c > 9) 1570 goto out; 1571 if (n >= (~0U-9)/10) 1572 goto out; 1573 n *= 10; 1574 n += c; 1575 } 1576 return n; 1577out: 1578 return ~0U; 1579} 1580 1581#define PROC_FDINFO_MAX 64 1582 1583static int proc_fd_info(struct inode *inode, struct path *path, char *info) 1584{ 1585 struct task_struct *task = get_proc_task(inode); 1586 struct files_struct *files = NULL; 1587 struct file *file; 1588 int fd = proc_fd(inode); 1589 1590 if (task) { 1591 files = get_files_struct(task); 1592 put_task_struct(task); 1593 } 1594 if (files) { 1595 /* 1596 * We are not taking a ref to the file structure, so we must 1597 * hold ->file_lock. 1598 */ 1599 spin_lock(&files->file_lock); 1600 file = fcheck_files(files, fd); 1601 if (file) { 1602 if (path) { 1603 *path = file->f_path; 1604 path_get(&file->f_path); 1605 } 1606 if (info) 1607 snprintf(info, PROC_FDINFO_MAX, 1608 "pos:\t%lli\n" 1609 "flags:\t0%o\n", 1610 (long long) file->f_pos, 1611 file->f_flags); 1612 spin_unlock(&files->file_lock); 1613 put_files_struct(files); 1614 return 0; 1615 } 1616 spin_unlock(&files->file_lock); 1617 put_files_struct(files); 1618 } 1619 return -ENOENT; 1620} 1621 1622static int proc_fd_link(struct inode *inode, struct path *path) 1623{ 1624 return proc_fd_info(inode, path, NULL); 1625} 1626 1627static int tid_fd_revalidate(struct dentry *dentry, struct nameidata *nd) 1628{ 1629 struct inode *inode = dentry->d_inode; 1630 struct task_struct *task = get_proc_task(inode); 1631 int fd = proc_fd(inode); 1632 struct files_struct *files; 1633 1634 if (task) { 1635 files = get_files_struct(task); 1636 if (files) { 1637 rcu_read_lock(); 1638 if (fcheck_files(files, fd)) { 1639 rcu_read_unlock(); 1640 put_files_struct(files); 1641 if (task_dumpable(task)) { 1642 inode->i_uid = task->euid; 1643 inode->i_gid = task->egid; 1644 } else { 1645 inode->i_uid = 0; 1646 inode->i_gid = 0; 1647 } 1648 inode->i_mode &= ~(S_ISUID | S_ISGID); 1649 security_task_to_inode(task, inode); 1650 put_task_struct(task); 1651 return 1; 1652 } 1653 rcu_read_unlock(); 1654 put_files_struct(files); 1655 } 1656 put_task_struct(task); 1657 } 1658 d_drop(dentry); 1659 return 0; 1660} 1661 1662static struct dentry_operations tid_fd_dentry_operations = 1663{ 1664 .d_revalidate = tid_fd_revalidate, 1665 .d_delete = pid_delete_dentry, 1666}; 1667 1668static struct dentry *proc_fd_instantiate(struct inode *dir, 1669 struct dentry *dentry, struct task_struct *task, const void *ptr) 1670{ 1671 unsigned fd = *(const unsigned *)ptr; 1672 struct file *file; 1673 struct files_struct *files; 1674 struct inode *inode; 1675 struct proc_inode *ei; 1676 struct dentry *error = ERR_PTR(-ENOENT); 1677 1678 inode = proc_pid_make_inode(dir->i_sb, task); 1679 if (!inode) 1680 goto out; 1681 ei = PROC_I(inode); 1682 ei->fd = fd; 1683 files = get_files_struct(task); 1684 if (!files) 1685 goto out_iput; 1686 inode->i_mode = S_IFLNK; 1687 1688 /* 1689 * We are not taking a ref to the file structure, so we must 1690 * hold ->file_lock. 1691 */ 1692 spin_lock(&files->file_lock); 1693 file = fcheck_files(files, fd); 1694 if (!file) 1695 goto out_unlock; 1696 if (file->f_mode & 1) 1697 inode->i_mode |= S_IRUSR | S_IXUSR; 1698 if (file->f_mode & 2) 1699 inode->i_mode |= S_IWUSR | S_IXUSR; 1700 spin_unlock(&files->file_lock); 1701 put_files_struct(files); 1702 1703 inode->i_op = &proc_pid_link_inode_operations; 1704 inode->i_size = 64; 1705 ei->op.proc_get_link = proc_fd_link; 1706 dentry->d_op = &tid_fd_dentry_operations; 1707 d_add(dentry, inode); 1708 /* Close the race of the process dying before we return the dentry */ 1709 if (tid_fd_revalidate(dentry, NULL)) 1710 error = NULL; 1711 1712 out: 1713 return error; 1714out_unlock: 1715 spin_unlock(&files->file_lock); 1716 put_files_struct(files); 1717out_iput: 1718 iput(inode); 1719 goto out; 1720} 1721 1722static struct dentry *proc_lookupfd_common(struct inode *dir, 1723 struct dentry *dentry, 1724 instantiate_t instantiate) 1725{ 1726 struct task_struct *task = get_proc_task(dir); 1727 unsigned fd = name_to_int(dentry); 1728 struct dentry *result = ERR_PTR(-ENOENT); 1729 1730 if (!task) 1731 goto out_no_task; 1732 if (fd == ~0U) 1733 goto out; 1734 1735 result = instantiate(dir, dentry, task, &fd); 1736out: 1737 put_task_struct(task); 1738out_no_task: 1739 return result; 1740} 1741 1742static int proc_readfd_common(struct file * filp, void * dirent, 1743 filldir_t filldir, instantiate_t instantiate) 1744{ 1745 struct dentry *dentry = filp->f_path.dentry; 1746 struct inode *inode = dentry->d_inode; 1747 struct task_struct *p = get_proc_task(inode); 1748 unsigned int fd, ino; 1749 int retval; 1750 struct files_struct * files; 1751 1752 retval = -ENOENT; 1753 if (!p) 1754 goto out_no_task; 1755 retval = 0; 1756 1757 fd = filp->f_pos; 1758 switch (fd) { 1759 case 0: 1760 if (filldir(dirent, ".", 1, 0, inode->i_ino, DT_DIR) < 0) 1761 goto out; 1762 filp->f_pos++; 1763 case 1: 1764 ino = parent_ino(dentry); 1765 if (filldir(dirent, "..", 2, 1, ino, DT_DIR) < 0) 1766 goto out; 1767 filp->f_pos++; 1768 default: 1769 files = get_files_struct(p); 1770 if (!files) 1771 goto out; 1772 rcu_read_lock(); 1773 for (fd = filp->f_pos-2; 1774 fd < files_fdtable(files)->max_fds; 1775 fd++, filp->f_pos++) { 1776 char name[PROC_NUMBUF]; 1777 int len; 1778 1779 if (!fcheck_files(files, fd)) 1780 continue; 1781 rcu_read_unlock(); 1782 1783 len = snprintf(name, sizeof(name), "%d", fd); 1784 if (proc_fill_cache(filp, dirent, filldir, 1785 name, len, instantiate, 1786 p, &fd) < 0) { 1787 rcu_read_lock(); 1788 break; 1789 } 1790 rcu_read_lock(); 1791 } 1792 rcu_read_unlock(); 1793 put_files_struct(files); 1794 } 1795out: 1796 put_task_struct(p); 1797out_no_task: 1798 return retval; 1799} 1800 1801static struct dentry *proc_lookupfd(struct inode *dir, struct dentry *dentry, 1802 struct nameidata *nd) 1803{ 1804 return proc_lookupfd_common(dir, dentry, proc_fd_instantiate); 1805} 1806 1807static int proc_readfd(struct file *filp, void *dirent, filldir_t filldir) 1808{ 1809 return proc_readfd_common(filp, dirent, filldir, proc_fd_instantiate); 1810} 1811 1812static ssize_t proc_fdinfo_read(struct file *file, char __user *buf, 1813 size_t len, loff_t *ppos) 1814{ 1815 char tmp[PROC_FDINFO_MAX]; 1816 int err = proc_fd_info(file->f_path.dentry->d_inode, NULL, tmp); 1817 if (!err) 1818 err = simple_read_from_buffer(buf, len, ppos, tmp, strlen(tmp)); 1819 return err; 1820} 1821 1822static const struct file_operations proc_fdinfo_file_operations = { 1823 .open = nonseekable_open, 1824 .read = proc_fdinfo_read, 1825}; 1826 1827static const struct file_operations proc_fd_operations = { 1828 .read = generic_read_dir, 1829 .readdir = proc_readfd, 1830}; 1831 1832/* 1833 * /proc/pid/fd needs a special permission handler so that a process can still 1834 * access /proc/self/fd after it has executed a setuid(). 1835 */ 1836static int proc_fd_permission(struct inode *inode, int mask, 1837 struct nameidata *nd) 1838{ 1839 int rv; 1840 1841 rv = generic_permission(inode, mask, NULL); 1842 if (rv == 0) 1843 return 0; 1844 if (task_pid(current) == proc_pid(inode)) 1845 rv = 0; 1846 return rv; 1847} 1848 1849/* 1850 * proc directories can do almost nothing.. 1851 */ 1852static const struct inode_operations proc_fd_inode_operations = { 1853 .lookup = proc_lookupfd, 1854 .permission = proc_fd_permission, 1855 .setattr = proc_setattr, 1856}; 1857 1858static struct dentry *proc_fdinfo_instantiate(struct inode *dir, 1859 struct dentry *dentry, struct task_struct *task, const void *ptr) 1860{ 1861 unsigned fd = *(unsigned *)ptr; 1862 struct inode *inode; 1863 struct proc_inode *ei; 1864 struct dentry *error = ERR_PTR(-ENOENT); 1865 1866 inode = proc_pid_make_inode(dir->i_sb, task); 1867 if (!inode) 1868 goto out; 1869 ei = PROC_I(inode); 1870 ei->fd = fd; 1871 inode->i_mode = S_IFREG | S_IRUSR; 1872 inode->i_fop = &proc_fdinfo_file_operations; 1873 dentry->d_op = &tid_fd_dentry_operations; 1874 d_add(dentry, inode); 1875 /* Close the race of the process dying before we return the dentry */ 1876 if (tid_fd_revalidate(dentry, NULL)) 1877 error = NULL; 1878 1879 out: 1880 return error; 1881} 1882 1883static struct dentry *proc_lookupfdinfo(struct inode *dir, 1884 struct dentry *dentry, 1885 struct nameidata *nd) 1886{ 1887 return proc_lookupfd_common(dir, dentry, proc_fdinfo_instantiate); 1888} 1889 1890static int proc_readfdinfo(struct file *filp, void *dirent, filldir_t filldir) 1891{ 1892 return proc_readfd_common(filp, dirent, filldir, 1893 proc_fdinfo_instantiate); 1894} 1895 1896static const struct file_operations proc_fdinfo_operations = { 1897 .read = generic_read_dir, 1898 .readdir = proc_readfdinfo, 1899}; 1900 1901/* 1902 * proc directories can do almost nothing.. 1903 */ 1904static const struct inode_operations proc_fdinfo_inode_operations = { 1905 .lookup = proc_lookupfdinfo, 1906 .setattr = proc_setattr, 1907}; 1908 1909 1910static struct dentry *proc_pident_instantiate(struct inode *dir, 1911 struct dentry *dentry, struct task_struct *task, const void *ptr) 1912{ 1913 const struct pid_entry *p = ptr; 1914 struct inode *inode; 1915 struct proc_inode *ei; 1916 struct dentry *error = ERR_PTR(-EINVAL); 1917 1918 inode = proc_pid_make_inode(dir->i_sb, task); 1919 if (!inode) 1920 goto out; 1921 1922 ei = PROC_I(inode); 1923 inode->i_mode = p->mode; 1924 if (S_ISDIR(inode->i_mode)) 1925 inode->i_nlink = 2; /* Use getattr to fix if necessary */ 1926 if (p->iop) 1927 inode->i_op = p->iop; 1928 if (p->fop) 1929 inode->i_fop = p->fop; 1930 ei->op = p->op; 1931 dentry->d_op = &pid_dentry_operations; 1932 d_add(dentry, inode); 1933 /* Close the race of the process dying before we return the dentry */ 1934 if (pid_revalidate(dentry, NULL)) 1935 error = NULL; 1936out: 1937 return error; 1938} 1939 1940static struct dentry *proc_pident_lookup(struct inode *dir, 1941 struct dentry *dentry, 1942 const struct pid_entry *ents, 1943 unsigned int nents) 1944{ 1945 struct inode *inode; 1946 struct dentry *error; 1947 struct task_struct *task = get_proc_task(dir); 1948 const struct pid_entry *p, *last; 1949 1950 error = ERR_PTR(-ENOENT); 1951 inode = NULL; 1952 1953 if (!task) 1954 goto out_no_task; 1955 1956 /* 1957 * Yes, it does not scale. And it should not. Don't add 1958 * new entries into /proc/<tgid>/ without very good reasons. 1959 */ 1960 last = &ents[nents - 1]; 1961 for (p = ents; p <= last; p++) { 1962 if (p->len != dentry->d_name.len) 1963 continue; 1964 if (!memcmp(dentry->d_name.name, p->name, p->len)) 1965 break; 1966 } 1967 if (p > last) 1968 goto out; 1969 1970 error = proc_pident_instantiate(dir, dentry, task, p); 1971out: 1972 put_task_struct(task); 1973out_no_task: 1974 return error; 1975} 1976 1977static int proc_pident_fill_cache(struct file *filp, void *dirent, 1978 filldir_t filldir, struct task_struct *task, const struct pid_entry *p) 1979{ 1980 return proc_fill_cache(filp, dirent, filldir, p->name, p->len, 1981 proc_pident_instantiate, task, p); 1982} 1983 1984static int proc_pident_readdir(struct file *filp, 1985 void *dirent, filldir_t filldir, 1986 const struct pid_entry *ents, unsigned int nents) 1987{ 1988 int i; 1989 struct dentry *dentry = filp->f_path.dentry; 1990 struct inode *inode = dentry->d_inode; 1991 struct task_struct *task = get_proc_task(inode); 1992 const struct pid_entry *p, *last; 1993 ino_t ino; 1994 int ret; 1995 1996 ret = -ENOENT; 1997 if (!task) 1998 goto out_no_task; 1999 2000 ret = 0; 2001 i = filp->f_pos; 2002 switch (i) { 2003 case 0: 2004 ino = inode->i_ino; 2005 if (filldir(dirent, ".", 1, i, ino, DT_DIR) < 0) 2006 goto out; 2007 i++; 2008 filp->f_pos++; 2009 /* fall through */ 2010 case 1: 2011 ino = parent_ino(dentry); 2012 if (filldir(dirent, "..", 2, i, ino, DT_DIR) < 0) 2013 goto out; 2014 i++; 2015 filp->f_pos++; 2016 /* fall through */ 2017 default: 2018 i -= 2; 2019 if (i >= nents) { 2020 ret = 1; 2021 goto out; 2022 } 2023 p = ents + i; 2024 last = &ents[nents - 1]; 2025 while (p <= last) { 2026 if (proc_pident_fill_cache(filp, dirent, filldir, task, p) < 0) 2027 goto out; 2028 filp->f_pos++; 2029 p++; 2030 } 2031 } 2032 2033 ret = 1; 2034out: 2035 put_task_struct(task); 2036out_no_task: 2037 return ret; 2038} 2039 2040#ifdef CONFIG_SECURITY 2041static ssize_t proc_pid_attr_read(struct file * file, char __user * buf, 2042 size_t count, loff_t *ppos) 2043{ 2044 struct inode * inode = file->f_path.dentry->d_inode; 2045 char *p = NULL; 2046 ssize_t length; 2047 struct task_struct *task = get_proc_task(inode); 2048 2049 if (!task) 2050 return -ESRCH; 2051 2052 length = security_getprocattr(task, 2053 (char*)file->f_path.dentry->d_name.name, 2054 &p); 2055 put_task_struct(task); 2056 if (length > 0) 2057 length = simple_read_from_buffer(buf, count, ppos, p, length); 2058 kfree(p); 2059 return length; 2060} 2061 2062static ssize_t proc_pid_attr_write(struct file * file, const char __user * buf, 2063 size_t count, loff_t *ppos) 2064{ 2065 struct inode * inode = file->f_path.dentry->d_inode; 2066 char *page; 2067 ssize_t length; 2068 struct task_struct *task = get_proc_task(inode); 2069 2070 length = -ESRCH; 2071 if (!task) 2072 goto out_no_task; 2073 if (count > PAGE_SIZE) 2074 count = PAGE_SIZE; 2075 2076 /* No partial writes. */ 2077 length = -EINVAL; 2078 if (*ppos != 0) 2079 goto out; 2080 2081 length = -ENOMEM; 2082 page = (char*)__get_free_page(GFP_TEMPORARY); 2083 if (!page) 2084 goto out; 2085 2086 length = -EFAULT; 2087 if (copy_from_user(page, buf, count)) 2088 goto out_free; 2089 2090 length = security_setprocattr(task, 2091 (char*)file->f_path.dentry->d_name.name, 2092 (void*)page, count); 2093out_free: 2094 free_page((unsigned long) page); 2095out: 2096 put_task_struct(task); 2097out_no_task: 2098 return length; 2099} 2100 2101static const struct file_operations proc_pid_attr_operations = { 2102 .read = proc_pid_attr_read, 2103 .write = proc_pid_attr_write, 2104}; 2105 2106static const struct pid_entry attr_dir_stuff[] = { 2107 REG("current", S_IRUGO|S_IWUGO, pid_attr), 2108 REG("prev", S_IRUGO, pid_attr), 2109 REG("exec", S_IRUGO|S_IWUGO, pid_attr), 2110 REG("fscreate", S_IRUGO|S_IWUGO, pid_attr), 2111 REG("keycreate", S_IRUGO|S_IWUGO, pid_attr), 2112 REG("sockcreate", S_IRUGO|S_IWUGO, pid_attr), 2113}; 2114 2115static int proc_attr_dir_readdir(struct file * filp, 2116 void * dirent, filldir_t filldir) 2117{ 2118 return proc_pident_readdir(filp,dirent,filldir, 2119 attr_dir_stuff,ARRAY_SIZE(attr_dir_stuff)); 2120} 2121 2122static const struct file_operations proc_attr_dir_operations = { 2123 .read = generic_read_dir, 2124 .readdir = proc_attr_dir_readdir, 2125}; 2126 2127static struct dentry *proc_attr_dir_lookup(struct inode *dir, 2128 struct dentry *dentry, struct nameidata *nd) 2129{ 2130 return proc_pident_lookup(dir, dentry, 2131 attr_dir_stuff, ARRAY_SIZE(attr_dir_stuff)); 2132} 2133 2134static const struct inode_operations proc_attr_dir_inode_operations = { 2135 .lookup = proc_attr_dir_lookup, 2136 .getattr = pid_getattr, 2137 .setattr = proc_setattr, 2138}; 2139 2140#endif 2141 2142#if defined(USE_ELF_CORE_DUMP) && defined(CONFIG_ELF_CORE) 2143static ssize_t proc_coredump_filter_read(struct file *file, char __user *buf, 2144 size_t count, loff_t *ppos) 2145{ 2146 struct task_struct *task = get_proc_task(file->f_dentry->d_inode); 2147 struct mm_struct *mm; 2148 char buffer[PROC_NUMBUF]; 2149 size_t len; 2150 int ret; 2151 2152 if (!task) 2153 return -ESRCH; 2154 2155 ret = 0; 2156 mm = get_task_mm(task); 2157 if (mm) { 2158 len = snprintf(buffer, sizeof(buffer), "%08lx\n", 2159 ((mm->flags & MMF_DUMP_FILTER_MASK) >> 2160 MMF_DUMP_FILTER_SHIFT)); 2161 mmput(mm); 2162 ret = simple_read_from_buffer(buf, count, ppos, buffer, len); 2163 } 2164 2165 put_task_struct(task); 2166 2167 return ret; 2168} 2169 2170static ssize_t proc_coredump_filter_write(struct file *file, 2171 const char __user *buf, 2172 size_t count, 2173 loff_t *ppos) 2174{ 2175 struct task_struct *task; 2176 struct mm_struct *mm; 2177 char buffer[PROC_NUMBUF], *end; 2178 unsigned int val; 2179 int ret; 2180 int i; 2181 unsigned long mask; 2182 2183 ret = -EFAULT; 2184 memset(buffer, 0, sizeof(buffer)); 2185 if (count > sizeof(buffer) - 1) 2186 count = sizeof(buffer) - 1; 2187 if (copy_from_user(buffer, buf, count)) 2188 goto out_no_task; 2189 2190 ret = -EINVAL; 2191 val = (unsigned int)simple_strtoul(buffer, &end, 0); 2192 if (*end == '\n') 2193 end++; 2194 if (end - buffer == 0) 2195 goto out_no_task; 2196 2197 ret = -ESRCH; 2198 task = get_proc_task(file->f_dentry->d_inode); 2199 if (!task) 2200 goto out_no_task; 2201 2202 ret = end - buffer; 2203 mm = get_task_mm(task); 2204 if (!mm) 2205 goto out_no_mm; 2206 2207 for (i = 0, mask = 1; i < MMF_DUMP_FILTER_BITS; i++, mask <<= 1) { 2208 if (val & mask) 2209 set_bit(i + MMF_DUMP_FILTER_SHIFT, &mm->flags); 2210 else 2211 clear_bit(i + MMF_DUMP_FILTER_SHIFT, &mm->flags); 2212 } 2213 2214 mmput(mm); 2215 out_no_mm: 2216 put_task_struct(task); 2217 out_no_task: 2218 return ret; 2219} 2220 2221static const struct file_operations proc_coredump_filter_operations = { 2222 .read = proc_coredump_filter_read, 2223 .write = proc_coredump_filter_write, 2224}; 2225#endif 2226 2227/* 2228 * /proc/self: 2229 */ 2230static int proc_self_readlink(struct dentry *dentry, char __user *buffer, 2231 int buflen) 2232{ 2233 struct pid_namespace *ns = dentry->d_sb->s_fs_info; 2234 pid_t tgid = task_tgid_nr_ns(current, ns); 2235 char tmp[PROC_NUMBUF]; 2236 if (!tgid) 2237 return -ENOENT; 2238 sprintf(tmp, "%d", tgid); 2239 return vfs_readlink(dentry,buffer,buflen,tmp); 2240} 2241 2242static void *proc_self_follow_link(struct dentry *dentry, struct nameidata *nd) 2243{ 2244 struct pid_namespace *ns = dentry->d_sb->s_fs_info; 2245 pid_t tgid = task_tgid_nr_ns(current, ns); 2246 char tmp[PROC_NUMBUF]; 2247 if (!tgid) 2248 return ERR_PTR(-ENOENT); 2249 sprintf(tmp, "%d", task_tgid_nr_ns(current, ns)); 2250 return ERR_PTR(vfs_follow_link(nd,tmp)); 2251} 2252 2253static const struct inode_operations proc_self_inode_operations = { 2254 .readlink = proc_self_readlink, 2255 .follow_link = proc_self_follow_link, 2256}; 2257 2258/* 2259 * proc base 2260 * 2261 * These are the directory entries in the root directory of /proc 2262 * that properly belong to the /proc filesystem, as they describe 2263 * describe something that is process related. 2264 */ 2265static const struct pid_entry proc_base_stuff[] = { 2266 NOD("self", S_IFLNK|S_IRWXUGO, 2267 &proc_self_inode_operations, NULL, {}), 2268}; 2269 2270/* 2271 * Exceptional case: normally we are not allowed to unhash a busy 2272 * directory. In this case, however, we can do it - no aliasing problems 2273 * due to the way we treat inodes. 2274 */ 2275static int proc_base_revalidate(struct dentry *dentry, struct nameidata *nd) 2276{ 2277 struct inode *inode = dentry->d_inode; 2278 struct task_struct *task = get_proc_task(inode); 2279 if (task) { 2280 put_task_struct(task); 2281 return 1; 2282 } 2283 d_drop(dentry); 2284 return 0; 2285} 2286 2287static struct dentry_operations proc_base_dentry_operations = 2288{ 2289 .d_revalidate = proc_base_revalidate, 2290 .d_delete = pid_delete_dentry, 2291}; 2292 2293static struct dentry *proc_base_instantiate(struct inode *dir, 2294 struct dentry *dentry, struct task_struct *task, const void *ptr) 2295{ 2296 const struct pid_entry *p = ptr; 2297 struct inode *inode; 2298 struct proc_inode *ei; 2299 struct dentry *error = ERR_PTR(-EINVAL); 2300 2301 /* Allocate the inode */ 2302 error = ERR_PTR(-ENOMEM); 2303 inode = new_inode(dir->i_sb); 2304 if (!inode) 2305 goto out; 2306 2307 /* Initialize the inode */ 2308 ei = PROC_I(inode); 2309 inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME; 2310 2311 /* 2312 * grab the reference to the task. 2313 */ 2314 ei->pid = get_task_pid(task, PIDTYPE_PID); 2315 if (!ei->pid) 2316 goto out_iput; 2317 2318 inode->i_uid = 0; 2319 inode->i_gid = 0; 2320 inode->i_mode = p->mode; 2321 if (S_ISDIR(inode->i_mode)) 2322 inode->i_nlink = 2; 2323 if (S_ISLNK(inode->i_mode)) 2324 inode->i_size = 64; 2325 if (p->iop) 2326 inode->i_op = p->iop; 2327 if (p->fop) 2328 inode->i_fop = p->fop; 2329 ei->op = p->op; 2330 dentry->d_op = &proc_base_dentry_operations; 2331 d_add(dentry, inode); 2332 error = NULL; 2333out: 2334 return error; 2335out_iput: 2336 iput(inode); 2337 goto out; 2338} 2339 2340static struct dentry *proc_base_lookup(struct inode *dir, struct dentry *dentry) 2341{ 2342 struct dentry *error; 2343 struct task_struct *task = get_proc_task(dir); 2344 const struct pid_entry *p, *last; 2345 2346 error = ERR_PTR(-ENOENT); 2347 2348 if (!task) 2349 goto out_no_task; 2350 2351 /* Lookup the directory entry */ 2352 last = &proc_base_stuff[ARRAY_SIZE(proc_base_stuff) - 1]; 2353 for (p = proc_base_stuff; p <= last; p++) { 2354 if (p->len != dentry->d_name.len) 2355 continue; 2356 if (!memcmp(dentry->d_name.name, p->name, p->len)) 2357 break; 2358 } 2359 if (p > last) 2360 goto out; 2361 2362 error = proc_base_instantiate(dir, dentry, task, p); 2363 2364out: 2365 put_task_struct(task); 2366out_no_task: 2367 return error; 2368} 2369 2370static int proc_base_fill_cache(struct file *filp, void *dirent, 2371 filldir_t filldir, struct task_struct *task, const struct pid_entry *p) 2372{ 2373 return proc_fill_cache(filp, dirent, filldir, p->name, p->len, 2374 proc_base_instantiate, task, p); 2375} 2376 2377#ifdef CONFIG_TASK_IO_ACCOUNTING 2378static int proc_pid_io_accounting(struct task_struct *task, char *buffer) 2379{ 2380 return sprintf(buffer, 2381#ifdef CONFIG_TASK_XACCT 2382 "rchar: %llu\n" 2383 "wchar: %llu\n" 2384 "syscr: %llu\n" 2385 "syscw: %llu\n" 2386#endif 2387 "read_bytes: %llu\n" 2388 "write_bytes: %llu\n" 2389 "cancelled_write_bytes: %llu\n", 2390#ifdef CONFIG_TASK_XACCT 2391 (unsigned long long)task->rchar, 2392 (unsigned long long)task->wchar, 2393 (unsigned long long)task->syscr, 2394 (unsigned long long)task->syscw, 2395#endif 2396 (unsigned long long)task->ioac.read_bytes, 2397 (unsigned long long)task->ioac.write_bytes, 2398 (unsigned long long)task->ioac.cancelled_write_bytes); 2399} 2400#endif 2401 2402/* 2403 * Thread groups 2404 */ 2405static const struct file_operations proc_task_operations; 2406static const struct inode_operations proc_task_inode_operations; 2407 2408static const struct pid_entry tgid_base_stuff[] = { 2409 DIR("task", S_IRUGO|S_IXUGO, task), 2410 DIR("fd", S_IRUSR|S_IXUSR, fd), 2411 DIR("fdinfo", S_IRUSR|S_IXUSR, fdinfo), 2412#ifdef CONFIG_NET 2413 DIR("net", S_IRUGO|S_IXUGO, net), 2414#endif 2415 REG("environ", S_IRUSR, environ), 2416 INF("auxv", S_IRUSR, pid_auxv), 2417 ONE("status", S_IRUGO, pid_status), 2418 INF("limits", S_IRUSR, pid_limits), 2419#ifdef CONFIG_SCHED_DEBUG 2420 REG("sched", S_IRUGO|S_IWUSR, pid_sched), 2421#endif 2422 INF("cmdline", S_IRUGO, pid_cmdline), 2423 ONE("stat", S_IRUGO, tgid_stat), 2424 ONE("statm", S_IRUGO, pid_statm), 2425 REG("maps", S_IRUGO, maps), 2426#ifdef CONFIG_NUMA 2427 REG("numa_maps", S_IRUGO, numa_maps), 2428#endif 2429 REG("mem", S_IRUSR|S_IWUSR, mem), 2430 LNK("cwd", cwd), 2431 LNK("root", root), 2432 LNK("exe", exe), 2433 REG("mounts", S_IRUGO, mounts), 2434 REG("mountinfo", S_IRUGO, mountinfo), 2435 REG("mountstats", S_IRUSR, mountstats), 2436#ifdef CONFIG_PROC_PAGE_MONITOR 2437 REG("clear_refs", S_IWUSR, clear_refs), 2438 REG("smaps", S_IRUGO, smaps), 2439 REG("pagemap", S_IRUSR, pagemap), 2440#endif 2441#ifdef CONFIG_SECURITY 2442 DIR("attr", S_IRUGO|S_IXUGO, attr_dir), 2443#endif 2444#ifdef CONFIG_KALLSYMS 2445 INF("wchan", S_IRUGO, pid_wchan), 2446#endif 2447#ifdef CONFIG_SCHEDSTATS 2448 INF("schedstat", S_IRUGO, pid_schedstat), 2449#endif 2450#ifdef CONFIG_LATENCYTOP 2451 REG("latency", S_IRUGO, lstats), 2452#endif 2453#ifdef CONFIG_PROC_PID_CPUSET 2454 REG("cpuset", S_IRUGO, cpuset), 2455#endif 2456#ifdef CONFIG_CGROUPS 2457 REG("cgroup", S_IRUGO, cgroup), 2458#endif 2459 INF("oom_score", S_IRUGO, oom_score), 2460 REG("oom_adj", S_IRUGO|S_IWUSR, oom_adjust), 2461#ifdef CONFIG_AUDITSYSCALL 2462 REG("loginuid", S_IWUSR|S_IRUGO, loginuid), 2463 REG("sessionid", S_IRUGO, sessionid), 2464#endif 2465#ifdef CONFIG_FAULT_INJECTION 2466 REG("make-it-fail", S_IRUGO|S_IWUSR, fault_inject), 2467#endif 2468#if defined(USE_ELF_CORE_DUMP) && defined(CONFIG_ELF_CORE) 2469 REG("coredump_filter", S_IRUGO|S_IWUSR, coredump_filter), 2470#endif 2471#ifdef CONFIG_TASK_IO_ACCOUNTING 2472 INF("io", S_IRUGO, pid_io_accounting), 2473#endif 2474}; 2475 2476static int proc_tgid_base_readdir(struct file * filp, 2477 void * dirent, filldir_t filldir) 2478{ 2479 return proc_pident_readdir(filp,dirent,filldir, 2480 tgid_base_stuff,ARRAY_SIZE(tgid_base_stuff)); 2481} 2482 2483static const struct file_operations proc_tgid_base_operations = { 2484 .read = generic_read_dir, 2485 .readdir = proc_tgid_base_readdir, 2486}; 2487 2488static struct dentry *proc_tgid_base_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd){ 2489 return proc_pident_lookup(dir, dentry, 2490 tgid_base_stuff, ARRAY_SIZE(tgid_base_stuff)); 2491} 2492 2493static const struct inode_operations proc_tgid_base_inode_operations = { 2494 .lookup = proc_tgid_base_lookup, 2495 .getattr = pid_getattr, 2496 .setattr = proc_setattr, 2497}; 2498 2499static void proc_flush_task_mnt(struct vfsmount *mnt, pid_t pid, pid_t tgid) 2500{ 2501 struct dentry *dentry, *leader, *dir; 2502 char buf[PROC_NUMBUF]; 2503 struct qstr name; 2504 2505 name.name = buf; 2506 name.len = snprintf(buf, sizeof(buf), "%d", pid); 2507 dentry = d_hash_and_lookup(mnt->mnt_root, &name); 2508 if (dentry) { 2509 if (!(current->flags & PF_EXITING)) 2510 shrink_dcache_parent(dentry); 2511 d_drop(dentry); 2512 dput(dentry); 2513 } 2514 2515 if (tgid == 0) 2516 goto out; 2517 2518 name.name = buf; 2519 name.len = snprintf(buf, sizeof(buf), "%d", tgid); 2520 leader = d_hash_and_lookup(mnt->mnt_root, &name); 2521 if (!leader) 2522 goto out; 2523 2524 name.name = "task"; 2525 name.len = strlen(name.name); 2526 dir = d_hash_and_lookup(leader, &name); 2527 if (!dir) 2528 goto out_put_leader; 2529 2530 name.name = buf; 2531 name.len = snprintf(buf, sizeof(buf), "%d", pid); 2532 dentry = d_hash_and_lookup(dir, &name); 2533 if (dentry) { 2534 shrink_dcache_parent(dentry); 2535 d_drop(dentry); 2536 dput(dentry); 2537 } 2538 2539 dput(dir); 2540out_put_leader: 2541 dput(leader); 2542out: 2543 return; 2544} 2545 2546/** 2547 * proc_flush_task - Remove dcache entries for @task from the /proc dcache. 2548 * @task: task that should be flushed. 2549 * 2550 * When flushing dentries from proc, one needs to flush them from global 2551 * proc (proc_mnt) and from all the namespaces' procs this task was seen 2552 * in. This call is supposed to do all of this job. 2553 * 2554 * Looks in the dcache for 2555 * /proc/@pid 2556 * /proc/@tgid/task/@pid 2557 * if either directory is present flushes it and all of it'ts children 2558 * from the dcache. 2559 * 2560 * It is safe and reasonable to cache /proc entries for a task until 2561 * that task exits. After that they just clog up the dcache with 2562 * useless entries, possibly causing useful dcache entries to be 2563 * flushed instead. This routine is proved to flush those useless 2564 * dcache entries at process exit time. 2565 * 2566 * NOTE: This routine is just an optimization so it does not guarantee 2567 * that no dcache entries will exist at process exit time it 2568 * just makes it very unlikely that any will persist. 2569 */ 2570 2571void proc_flush_task(struct task_struct *task) 2572{ 2573 int i; 2574 struct pid *pid, *tgid = NULL; 2575 struct upid *upid; 2576 2577 pid = task_pid(task); 2578 if (thread_group_leader(task)) 2579 tgid = task_tgid(task); 2580 2581 for (i = 0; i <= pid->level; i++) { 2582 upid = &pid->numbers[i]; 2583 proc_flush_task_mnt(upid->ns->proc_mnt, upid->nr, 2584 tgid ? tgid->numbers[i].nr : 0); 2585 } 2586 2587 upid = &pid->numbers[pid->level]; 2588 if (upid->nr == 1) 2589 pid_ns_release_proc(upid->ns); 2590} 2591 2592static struct dentry *proc_pid_instantiate(struct inode *dir, 2593 struct dentry * dentry, 2594 struct task_struct *task, const void *ptr) 2595{ 2596 struct dentry *error = ERR_PTR(-ENOENT); 2597 struct inode *inode; 2598 2599 inode = proc_pid_make_inode(dir->i_sb, task); 2600 if (!inode) 2601 goto out; 2602 2603 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO; 2604 inode->i_op = &proc_tgid_base_inode_operations; 2605 inode->i_fop = &proc_tgid_base_operations; 2606 inode->i_flags|=S_IMMUTABLE; 2607 2608 inode->i_nlink = 2 + pid_entry_count_dirs(tgid_base_stuff, 2609 ARRAY_SIZE(tgid_base_stuff)); 2610 2611 dentry->d_op = &pid_dentry_operations; 2612 2613 d_add(dentry, inode); 2614 /* Close the race of the process dying before we return the dentry */ 2615 if (pid_revalidate(dentry, NULL)) 2616 error = NULL; 2617out: 2618 return error; 2619} 2620 2621struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, struct nameidata *nd) 2622{ 2623 struct dentry *result = ERR_PTR(-ENOENT); 2624 struct task_struct *task; 2625 unsigned tgid; 2626 struct pid_namespace *ns; 2627 2628 result = proc_base_lookup(dir, dentry); 2629 if (!IS_ERR(result) || PTR_ERR(result) != -ENOENT) 2630 goto out; 2631 2632 tgid = name_to_int(dentry); 2633 if (tgid == ~0U) 2634 goto out; 2635 2636 ns = dentry->d_sb->s_fs_info; 2637 rcu_read_lock(); 2638 task = find_task_by_pid_ns(tgid, ns); 2639 if (task) 2640 get_task_struct(task); 2641 rcu_read_unlock(); 2642 if (!task) 2643 goto out; 2644 2645 result = proc_pid_instantiate(dir, dentry, task, NULL); 2646 put_task_struct(task); 2647out: 2648 return result; 2649} 2650 2651/* 2652 * Find the first task with tgid >= tgid 2653 * 2654 */ 2655struct tgid_iter { 2656 unsigned int tgid; 2657 struct task_struct *task; 2658}; 2659static struct tgid_iter next_tgid(struct pid_namespace *ns, struct tgid_iter iter) 2660{ 2661 struct pid *pid; 2662 2663 if (iter.task) 2664 put_task_struct(iter.task); 2665 rcu_read_lock(); 2666retry: 2667 iter.task = NULL; 2668 pid = find_ge_pid(iter.tgid, ns); 2669 if (pid) { 2670 iter.tgid = pid_nr_ns(pid, ns); 2671 iter.task = pid_task(pid, PIDTYPE_PID); 2672 /* What we to know is if the pid we have find is the 2673 * pid of a thread_group_leader. Testing for task 2674 * being a thread_group_leader is the obvious thing 2675 * todo but there is a window when it fails, due to 2676 * the pid transfer logic in de_thread. 2677 * 2678 * So we perform the straight forward test of seeing 2679 * if the pid we have found is the pid of a thread 2680 * group leader, and don't worry if the task we have 2681 * found doesn't happen to be a thread group leader. 2682 * As we don't care in the case of readdir. 2683 */ 2684 if (!iter.task || !has_group_leader_pid(iter.task)) { 2685 iter.tgid += 1; 2686 goto retry; 2687 } 2688 get_task_struct(iter.task); 2689 } 2690 rcu_read_unlock(); 2691 return iter; 2692} 2693 2694#define TGID_OFFSET (FIRST_PROCESS_ENTRY + ARRAY_SIZE(proc_base_stuff)) 2695 2696static int proc_pid_fill_cache(struct file *filp, void *dirent, filldir_t filldir, 2697 struct tgid_iter iter) 2698{ 2699 char name[PROC_NUMBUF]; 2700 int len = snprintf(name, sizeof(name), "%d", iter.tgid); 2701 return proc_fill_cache(filp, dirent, filldir, name, len, 2702 proc_pid_instantiate, iter.task, NULL); 2703} 2704 2705/* for the /proc/ directory itself, after non-process stuff has been done */ 2706int proc_pid_readdir(struct file * filp, void * dirent, filldir_t filldir) 2707{ 2708 unsigned int nr = filp->f_pos - FIRST_PROCESS_ENTRY; 2709 struct task_struct *reaper = get_proc_task(filp->f_path.dentry->d_inode); 2710 struct tgid_iter iter; 2711 struct pid_namespace *ns; 2712 2713 if (!reaper) 2714 goto out_no_task; 2715 2716 for (; nr < ARRAY_SIZE(proc_base_stuff); filp->f_pos++, nr++) { 2717 const struct pid_entry *p = &proc_base_stuff[nr]; 2718 if (proc_base_fill_cache(filp, dirent, filldir, reaper, p) < 0) 2719 goto out; 2720 } 2721 2722 ns = filp->f_dentry->d_sb->s_fs_info; 2723 iter.task = NULL; 2724 iter.tgid = filp->f_pos - TGID_OFFSET; 2725 for (iter = next_tgid(ns, iter); 2726 iter.task; 2727 iter.tgid += 1, iter = next_tgid(ns, iter)) { 2728 filp->f_pos = iter.tgid + TGID_OFFSET; 2729 if (proc_pid_fill_cache(filp, dirent, filldir, iter) < 0) { 2730 put_task_struct(iter.task); 2731 goto out; 2732 } 2733 } 2734 filp->f_pos = PID_MAX_LIMIT + TGID_OFFSET; 2735out: 2736 put_task_struct(reaper); 2737out_no_task: 2738 return 0; 2739} 2740 2741/* 2742 * Tasks 2743 */ 2744static const struct pid_entry tid_base_stuff[] = { 2745 DIR("fd", S_IRUSR|S_IXUSR, fd), 2746 DIR("fdinfo", S_IRUSR|S_IXUSR, fdinfo), 2747 REG("environ", S_IRUSR, environ), 2748 INF("auxv", S_IRUSR, pid_auxv), 2749 ONE("status", S_IRUGO, pid_status), 2750 INF("limits", S_IRUSR, pid_limits), 2751#ifdef CONFIG_SCHED_DEBUG 2752 REG("sched", S_IRUGO|S_IWUSR, pid_sched), 2753#endif 2754 INF("cmdline", S_IRUGO, pid_cmdline), 2755 ONE("stat", S_IRUGO, tid_stat), 2756 ONE("statm", S_IRUGO, pid_statm), 2757 REG("maps", S_IRUGO, maps), 2758#ifdef CONFIG_NUMA 2759 REG("numa_maps", S_IRUGO, numa_maps), 2760#endif 2761 REG("mem", S_IRUSR|S_IWUSR, mem), 2762 LNK("cwd", cwd), 2763 LNK("root", root), 2764 LNK("exe", exe), 2765 REG("mounts", S_IRUGO, mounts), 2766 REG("mountinfo", S_IRUGO, mountinfo), 2767#ifdef CONFIG_PROC_PAGE_MONITOR 2768 REG("clear_refs", S_IWUSR, clear_refs), 2769 REG("smaps", S_IRUGO, smaps), 2770 REG("pagemap", S_IRUSR, pagemap), 2771#endif 2772#ifdef CONFIG_SECURITY 2773 DIR("attr", S_IRUGO|S_IXUGO, attr_dir), 2774#endif 2775#ifdef CONFIG_KALLSYMS 2776 INF("wchan", S_IRUGO, pid_wchan), 2777#endif 2778#ifdef CONFIG_SCHEDSTATS 2779 INF("schedstat", S_IRUGO, pid_schedstat), 2780#endif 2781#ifdef CONFIG_LATENCYTOP 2782 REG("latency", S_IRUGO, lstats), 2783#endif 2784#ifdef CONFIG_PROC_PID_CPUSET 2785 REG("cpuset", S_IRUGO, cpuset), 2786#endif 2787#ifdef CONFIG_CGROUPS 2788 REG("cgroup", S_IRUGO, cgroup), 2789#endif 2790 INF("oom_score", S_IRUGO, oom_score), 2791 REG("oom_adj", S_IRUGO|S_IWUSR, oom_adjust), 2792#ifdef CONFIG_AUDITSYSCALL 2793 REG("loginuid", S_IWUSR|S_IRUGO, loginuid), 2794 REG("sessionid", S_IRUSR, sessionid), 2795#endif 2796#ifdef CONFIG_FAULT_INJECTION 2797 REG("make-it-fail", S_IRUGO|S_IWUSR, fault_inject), 2798#endif 2799}; 2800 2801static int proc_tid_base_readdir(struct file * filp, 2802 void * dirent, filldir_t filldir) 2803{ 2804 return proc_pident_readdir(filp,dirent,filldir, 2805 tid_base_stuff,ARRAY_SIZE(tid_base_stuff)); 2806} 2807 2808static struct dentry *proc_tid_base_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd){ 2809 return proc_pident_lookup(dir, dentry, 2810 tid_base_stuff, ARRAY_SIZE(tid_base_stuff)); 2811} 2812 2813static const struct file_operations proc_tid_base_operations = { 2814 .read = generic_read_dir, 2815 .readdir = proc_tid_base_readdir, 2816}; 2817 2818static const struct inode_operations proc_tid_base_inode_operations = { 2819 .lookup = proc_tid_base_lookup, 2820 .getattr = pid_getattr, 2821 .setattr = proc_setattr, 2822}; 2823 2824static struct dentry *proc_task_instantiate(struct inode *dir, 2825 struct dentry *dentry, struct task_struct *task, const void *ptr) 2826{ 2827 struct dentry *error = ERR_PTR(-ENOENT); 2828 struct inode *inode; 2829 inode = proc_pid_make_inode(dir->i_sb, task); 2830 2831 if (!inode) 2832 goto out; 2833 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO; 2834 inode->i_op = &proc_tid_base_inode_operations; 2835 inode->i_fop = &proc_tid_base_operations; 2836 inode->i_flags|=S_IMMUTABLE; 2837 2838 inode->i_nlink = 2 + pid_entry_count_dirs(tid_base_stuff, 2839 ARRAY_SIZE(tid_base_stuff)); 2840 2841 dentry->d_op = &pid_dentry_operations; 2842 2843 d_add(dentry, inode); 2844 /* Close the race of the process dying before we return the dentry */ 2845 if (pid_revalidate(dentry, NULL)) 2846 error = NULL; 2847out: 2848 return error; 2849} 2850 2851static struct dentry *proc_task_lookup(struct inode *dir, struct dentry * dentry, struct nameidata *nd) 2852{ 2853 struct dentry *result = ERR_PTR(-ENOENT); 2854 struct task_struct *task; 2855 struct task_struct *leader = get_proc_task(dir); 2856 unsigned tid; 2857 struct pid_namespace *ns; 2858 2859 if (!leader) 2860 goto out_no_task; 2861 2862 tid = name_to_int(dentry); 2863 if (tid == ~0U) 2864 goto out; 2865 2866 ns = dentry->d_sb->s_fs_info; 2867 rcu_read_lock(); 2868 task = find_task_by_pid_ns(tid, ns); 2869 if (task) 2870 get_task_struct(task); 2871 rcu_read_unlock(); 2872 if (!task) 2873 goto out; 2874 if (!same_thread_group(leader, task)) 2875 goto out_drop_task; 2876 2877 result = proc_task_instantiate(dir, dentry, task, NULL); 2878out_drop_task: 2879 put_task_struct(task); 2880out: 2881 put_task_struct(leader); 2882out_no_task: 2883 return result; 2884} 2885 2886/* 2887 * Find the first tid of a thread group to return to user space. 2888 * 2889 * Usually this is just the thread group leader, but if the users 2890 * buffer was too small or there was a seek into the middle of the 2891 * directory we have more work todo. 2892 * 2893 * In the case of a short read we start with find_task_by_pid. 2894 * 2895 * In the case of a seek we start with the leader and walk nr 2896 * threads past it. 2897 */ 2898static struct task_struct *first_tid(struct task_struct *leader, 2899 int tid, int nr, struct pid_namespace *ns) 2900{ 2901 struct task_struct *pos; 2902 2903 rcu_read_lock(); 2904 /* Attempt to start with the pid of a thread */ 2905 if (tid && (nr > 0)) { 2906 pos = find_task_by_pid_ns(tid, ns); 2907 if (pos && (pos->group_leader == leader)) 2908 goto found; 2909 } 2910 2911 /* If nr exceeds the number of threads there is nothing todo */ 2912 pos = NULL; 2913 if (nr && nr >= get_nr_threads(leader)) 2914 goto out; 2915 2916 /* If we haven't found our starting place yet start 2917 * with the leader and walk nr threads forward. 2918 */ 2919 for (pos = leader; nr > 0; --nr) { 2920 pos = next_thread(pos); 2921 if (pos == leader) { 2922 pos = NULL; 2923 goto out; 2924 } 2925 } 2926found: 2927 get_task_struct(pos); 2928out: 2929 rcu_read_unlock(); 2930 return pos; 2931} 2932 2933/* 2934 * Find the next thread in the thread list. 2935 * Return NULL if there is an error or no next thread. 2936 * 2937 * The reference to the input task_struct is released. 2938 */ 2939static struct task_struct *next_tid(struct task_struct *start) 2940{ 2941 struct task_struct *pos = NULL; 2942 rcu_read_lock(); 2943 if (pid_alive(start)) { 2944 pos = next_thread(start); 2945 if (thread_group_leader(pos)) 2946 pos = NULL; 2947 else 2948 get_task_struct(pos); 2949 } 2950 rcu_read_unlock(); 2951 put_task_struct(start); 2952 return pos; 2953} 2954 2955static int proc_task_fill_cache(struct file *filp, void *dirent, filldir_t filldir, 2956 struct task_struct *task, int tid) 2957{ 2958 char name[PROC_NUMBUF]; 2959 int len = snprintf(name, sizeof(name), "%d", tid); 2960 return proc_fill_cache(filp, dirent, filldir, name, len, 2961 proc_task_instantiate, task, NULL); 2962} 2963 2964/* for the /proc/TGID/task/ directories */ 2965static int proc_task_readdir(struct file * filp, void * dirent, filldir_t filldir) 2966{ 2967 struct dentry *dentry = filp->f_path.dentry; 2968 struct inode *inode = dentry->d_inode; 2969 struct task_struct *leader = NULL; 2970 struct task_struct *task; 2971 int retval = -ENOENT; 2972 ino_t ino; 2973 int tid; 2974 unsigned long pos = filp->f_pos; /* avoiding "long long" filp->f_pos */ 2975 struct pid_namespace *ns; 2976 2977 task = get_proc_task(inode); 2978 if (!task) 2979 goto out_no_task; 2980 rcu_read_lock(); 2981 if (pid_alive(task)) { 2982 leader = task->group_leader; 2983 get_task_struct(leader); 2984 } 2985 rcu_read_unlock(); 2986 put_task_struct(task); 2987 if (!leader) 2988 goto out_no_task; 2989 retval = 0; 2990 2991 switch (pos) { 2992 case 0: 2993 ino = inode->i_ino; 2994 if (filldir(dirent, ".", 1, pos, ino, DT_DIR) < 0) 2995 goto out; 2996 pos++; 2997 /* fall through */ 2998 case 1: 2999 ino = parent_ino(dentry); 3000 if (filldir(dirent, "..", 2, pos, ino, DT_DIR) < 0) 3001 goto out; 3002 pos++; 3003 /* fall through */ 3004 } 3005 3006 /* f_version caches the tgid value that the last readdir call couldn't 3007 * return. lseek aka telldir automagically resets f_version to 0. 3008 */ 3009 ns = filp->f_dentry->d_sb->s_fs_info; 3010 tid = (int)filp->f_version; 3011 filp->f_version = 0; 3012 for (task = first_tid(leader, tid, pos - 2, ns); 3013 task; 3014 task = next_tid(task), pos++) { 3015 tid = task_pid_nr_ns(task, ns); 3016 if (proc_task_fill_cache(filp, dirent, filldir, task, tid) < 0) { 3017 /* returning this tgid failed, save it as the first 3018 * pid for the next readir call */ 3019 filp->f_version = (u64)tid; 3020 put_task_struct(task); 3021 break; 3022 } 3023 } 3024out: 3025 filp->f_pos = pos; 3026 put_task_struct(leader); 3027out_no_task: 3028 return retval; 3029} 3030 3031static int proc_task_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat) 3032{ 3033 struct inode *inode = dentry->d_inode; 3034 struct task_struct *p = get_proc_task(inode); 3035 generic_fillattr(inode, stat); 3036 3037 if (p) { 3038 rcu_read_lock(); 3039 stat->nlink += get_nr_threads(p); 3040 rcu_read_unlock(); 3041 put_task_struct(p); 3042 } 3043 3044 return 0; 3045} 3046 3047static const struct inode_operations proc_task_inode_operations = { 3048 .lookup = proc_task_lookup, 3049 .getattr = proc_task_getattr, 3050 .setattr = proc_setattr, 3051}; 3052 3053static const struct file_operations proc_task_operations = { 3054 .read = generic_read_dir, 3055 .readdir = proc_task_readdir, 3056};