at v2.6.27-rc4 529 lines 13 kB view raw
1/* 2 * linux/fs/proc/inode.c 3 * 4 * Copyright (C) 1991, 1992 Linus Torvalds 5 */ 6 7#include <linux/time.h> 8#include <linux/proc_fs.h> 9#include <linux/kernel.h> 10#include <linux/mm.h> 11#include <linux/string.h> 12#include <linux/stat.h> 13#include <linux/completion.h> 14#include <linux/poll.h> 15#include <linux/file.h> 16#include <linux/limits.h> 17#include <linux/init.h> 18#include <linux/module.h> 19#include <linux/smp_lock.h> 20#include <linux/sysctl.h> 21 22#include <asm/system.h> 23#include <asm/uaccess.h> 24 25#include "internal.h" 26 27struct proc_dir_entry *de_get(struct proc_dir_entry *de) 28{ 29 atomic_inc(&de->count); 30 return de; 31} 32 33/* 34 * Decrements the use count and checks for deferred deletion. 35 */ 36void de_put(struct proc_dir_entry *de) 37{ 38 lock_kernel(); 39 if (!atomic_read(&de->count)) { 40 printk("de_put: entry %s already free!\n", de->name); 41 unlock_kernel(); 42 return; 43 } 44 45 if (atomic_dec_and_test(&de->count)) 46 free_proc_entry(de); 47 unlock_kernel(); 48} 49 50/* 51 * Decrement the use count of the proc_dir_entry. 52 */ 53static void proc_delete_inode(struct inode *inode) 54{ 55 struct proc_dir_entry *de; 56 57 truncate_inode_pages(&inode->i_data, 0); 58 59 /* Stop tracking associated processes */ 60 put_pid(PROC_I(inode)->pid); 61 62 /* Let go of any associated proc directory entry */ 63 de = PROC_I(inode)->pde; 64 if (de) { 65 if (de->owner) 66 module_put(de->owner); 67 de_put(de); 68 } 69 if (PROC_I(inode)->sysctl) 70 sysctl_head_put(PROC_I(inode)->sysctl); 71 clear_inode(inode); 72} 73 74struct vfsmount *proc_mnt; 75 76static struct kmem_cache * proc_inode_cachep; 77 78static struct inode *proc_alloc_inode(struct super_block *sb) 79{ 80 struct proc_inode *ei; 81 struct inode *inode; 82 83 ei = (struct proc_inode *)kmem_cache_alloc(proc_inode_cachep, GFP_KERNEL); 84 if (!ei) 85 return NULL; 86 ei->pid = NULL; 87 ei->fd = 0; 88 ei->op.proc_get_link = NULL; 89 ei->pde = NULL; 90 ei->sysctl = NULL; 91 ei->sysctl_entry = NULL; 92 inode = &ei->vfs_inode; 93 inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME; 94 return inode; 95} 96 97static void proc_destroy_inode(struct inode *inode) 98{ 99 kmem_cache_free(proc_inode_cachep, PROC_I(inode)); 100} 101 102static void init_once(void *foo) 103{ 104 struct proc_inode *ei = (struct proc_inode *) foo; 105 106 inode_init_once(&ei->vfs_inode); 107} 108 109int __init proc_init_inodecache(void) 110{ 111 proc_inode_cachep = kmem_cache_create("proc_inode_cache", 112 sizeof(struct proc_inode), 113 0, (SLAB_RECLAIM_ACCOUNT| 114 SLAB_MEM_SPREAD|SLAB_PANIC), 115 init_once); 116 return 0; 117} 118 119static const struct super_operations proc_sops = { 120 .alloc_inode = proc_alloc_inode, 121 .destroy_inode = proc_destroy_inode, 122 .drop_inode = generic_delete_inode, 123 .delete_inode = proc_delete_inode, 124 .statfs = simple_statfs, 125}; 126 127static void __pde_users_dec(struct proc_dir_entry *pde) 128{ 129 pde->pde_users--; 130 if (pde->pde_unload_completion && pde->pde_users == 0) 131 complete(pde->pde_unload_completion); 132} 133 134static void pde_users_dec(struct proc_dir_entry *pde) 135{ 136 spin_lock(&pde->pde_unload_lock); 137 __pde_users_dec(pde); 138 spin_unlock(&pde->pde_unload_lock); 139} 140 141static loff_t proc_reg_llseek(struct file *file, loff_t offset, int whence) 142{ 143 struct proc_dir_entry *pde = PDE(file->f_path.dentry->d_inode); 144 loff_t rv = -EINVAL; 145 loff_t (*llseek)(struct file *, loff_t, int); 146 147 spin_lock(&pde->pde_unload_lock); 148 /* 149 * remove_proc_entry() is going to delete PDE (as part of module 150 * cleanup sequence). No new callers into module allowed. 151 */ 152 if (!pde->proc_fops) { 153 spin_unlock(&pde->pde_unload_lock); 154 return rv; 155 } 156 /* 157 * Bump refcount so that remove_proc_entry will wail for ->llseek to 158 * complete. 159 */ 160 pde->pde_users++; 161 /* 162 * Save function pointer under lock, to protect against ->proc_fops 163 * NULL'ifying right after ->pde_unload_lock is dropped. 164 */ 165 llseek = pde->proc_fops->llseek; 166 spin_unlock(&pde->pde_unload_lock); 167 168 if (!llseek) 169 llseek = default_llseek; 170 rv = llseek(file, offset, whence); 171 172 pde_users_dec(pde); 173 return rv; 174} 175 176static ssize_t proc_reg_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) 177{ 178 struct proc_dir_entry *pde = PDE(file->f_path.dentry->d_inode); 179 ssize_t rv = -EIO; 180 ssize_t (*read)(struct file *, char __user *, size_t, loff_t *); 181 182 spin_lock(&pde->pde_unload_lock); 183 if (!pde->proc_fops) { 184 spin_unlock(&pde->pde_unload_lock); 185 return rv; 186 } 187 pde->pde_users++; 188 read = pde->proc_fops->read; 189 spin_unlock(&pde->pde_unload_lock); 190 191 if (read) 192 rv = read(file, buf, count, ppos); 193 194 pde_users_dec(pde); 195 return rv; 196} 197 198static ssize_t proc_reg_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) 199{ 200 struct proc_dir_entry *pde = PDE(file->f_path.dentry->d_inode); 201 ssize_t rv = -EIO; 202 ssize_t (*write)(struct file *, const char __user *, size_t, loff_t *); 203 204 spin_lock(&pde->pde_unload_lock); 205 if (!pde->proc_fops) { 206 spin_unlock(&pde->pde_unload_lock); 207 return rv; 208 } 209 pde->pde_users++; 210 write = pde->proc_fops->write; 211 spin_unlock(&pde->pde_unload_lock); 212 213 if (write) 214 rv = write(file, buf, count, ppos); 215 216 pde_users_dec(pde); 217 return rv; 218} 219 220static unsigned int proc_reg_poll(struct file *file, struct poll_table_struct *pts) 221{ 222 struct proc_dir_entry *pde = PDE(file->f_path.dentry->d_inode); 223 unsigned int rv = DEFAULT_POLLMASK; 224 unsigned int (*poll)(struct file *, struct poll_table_struct *); 225 226 spin_lock(&pde->pde_unload_lock); 227 if (!pde->proc_fops) { 228 spin_unlock(&pde->pde_unload_lock); 229 return rv; 230 } 231 pde->pde_users++; 232 poll = pde->proc_fops->poll; 233 spin_unlock(&pde->pde_unload_lock); 234 235 if (poll) 236 rv = poll(file, pts); 237 238 pde_users_dec(pde); 239 return rv; 240} 241 242static long proc_reg_unlocked_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 243{ 244 struct proc_dir_entry *pde = PDE(file->f_path.dentry->d_inode); 245 long rv = -ENOTTY; 246 long (*unlocked_ioctl)(struct file *, unsigned int, unsigned long); 247 int (*ioctl)(struct inode *, struct file *, unsigned int, unsigned long); 248 249 spin_lock(&pde->pde_unload_lock); 250 if (!pde->proc_fops) { 251 spin_unlock(&pde->pde_unload_lock); 252 return rv; 253 } 254 pde->pde_users++; 255 unlocked_ioctl = pde->proc_fops->unlocked_ioctl; 256 ioctl = pde->proc_fops->ioctl; 257 spin_unlock(&pde->pde_unload_lock); 258 259 if (unlocked_ioctl) { 260 rv = unlocked_ioctl(file, cmd, arg); 261 if (rv == -ENOIOCTLCMD) 262 rv = -EINVAL; 263 } else if (ioctl) { 264 lock_kernel(); 265 rv = ioctl(file->f_path.dentry->d_inode, file, cmd, arg); 266 unlock_kernel(); 267 } 268 269 pde_users_dec(pde); 270 return rv; 271} 272 273#ifdef CONFIG_COMPAT 274static long proc_reg_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 275{ 276 struct proc_dir_entry *pde = PDE(file->f_path.dentry->d_inode); 277 long rv = -ENOTTY; 278 long (*compat_ioctl)(struct file *, unsigned int, unsigned long); 279 280 spin_lock(&pde->pde_unload_lock); 281 if (!pde->proc_fops) { 282 spin_unlock(&pde->pde_unload_lock); 283 return rv; 284 } 285 pde->pde_users++; 286 compat_ioctl = pde->proc_fops->compat_ioctl; 287 spin_unlock(&pde->pde_unload_lock); 288 289 if (compat_ioctl) 290 rv = compat_ioctl(file, cmd, arg); 291 292 pde_users_dec(pde); 293 return rv; 294} 295#endif 296 297static int proc_reg_mmap(struct file *file, struct vm_area_struct *vma) 298{ 299 struct proc_dir_entry *pde = PDE(file->f_path.dentry->d_inode); 300 int rv = -EIO; 301 int (*mmap)(struct file *, struct vm_area_struct *); 302 303 spin_lock(&pde->pde_unload_lock); 304 if (!pde->proc_fops) { 305 spin_unlock(&pde->pde_unload_lock); 306 return rv; 307 } 308 pde->pde_users++; 309 mmap = pde->proc_fops->mmap; 310 spin_unlock(&pde->pde_unload_lock); 311 312 if (mmap) 313 rv = mmap(file, vma); 314 315 pde_users_dec(pde); 316 return rv; 317} 318 319static int proc_reg_open(struct inode *inode, struct file *file) 320{ 321 struct proc_dir_entry *pde = PDE(inode); 322 int rv = 0; 323 int (*open)(struct inode *, struct file *); 324 int (*release)(struct inode *, struct file *); 325 struct pde_opener *pdeo; 326 327 /* 328 * What for, you ask? Well, we can have open, rmmod, remove_proc_entry 329 * sequence. ->release won't be called because ->proc_fops will be 330 * cleared. Depending on complexity of ->release, consequences vary. 331 * 332 * We can't wait for mercy when close will be done for real, it's 333 * deadlockable: rmmod foo </proc/foo . So, we're going to do ->release 334 * by hand in remove_proc_entry(). For this, save opener's credentials 335 * for later. 336 */ 337 pdeo = kmalloc(sizeof(struct pde_opener), GFP_KERNEL); 338 if (!pdeo) 339 return -ENOMEM; 340 341 spin_lock(&pde->pde_unload_lock); 342 if (!pde->proc_fops) { 343 spin_unlock(&pde->pde_unload_lock); 344 kfree(pdeo); 345 return rv; 346 } 347 pde->pde_users++; 348 open = pde->proc_fops->open; 349 release = pde->proc_fops->release; 350 spin_unlock(&pde->pde_unload_lock); 351 352 if (open) 353 rv = open(inode, file); 354 355 spin_lock(&pde->pde_unload_lock); 356 if (rv == 0 && release) { 357 /* To know what to release. */ 358 pdeo->inode = inode; 359 pdeo->file = file; 360 /* Strictly for "too late" ->release in proc_reg_release(). */ 361 pdeo->release = release; 362 list_add(&pdeo->lh, &pde->pde_openers); 363 } else 364 kfree(pdeo); 365 __pde_users_dec(pde); 366 spin_unlock(&pde->pde_unload_lock); 367 return rv; 368} 369 370static struct pde_opener *find_pde_opener(struct proc_dir_entry *pde, 371 struct inode *inode, struct file *file) 372{ 373 struct pde_opener *pdeo; 374 375 list_for_each_entry(pdeo, &pde->pde_openers, lh) { 376 if (pdeo->inode == inode && pdeo->file == file) 377 return pdeo; 378 } 379 return NULL; 380} 381 382static int proc_reg_release(struct inode *inode, struct file *file) 383{ 384 struct proc_dir_entry *pde = PDE(inode); 385 int rv = 0; 386 int (*release)(struct inode *, struct file *); 387 struct pde_opener *pdeo; 388 389 spin_lock(&pde->pde_unload_lock); 390 pdeo = find_pde_opener(pde, inode, file); 391 if (!pde->proc_fops) { 392 /* 393 * Can't simply exit, __fput() will think that everything is OK, 394 * and move on to freeing struct file. remove_proc_entry() will 395 * find slacker in opener's list and will try to do non-trivial 396 * things with struct file. Therefore, remove opener from list. 397 * 398 * But if opener is removed from list, who will ->release it? 399 */ 400 if (pdeo) { 401 list_del(&pdeo->lh); 402 spin_unlock(&pde->pde_unload_lock); 403 rv = pdeo->release(inode, file); 404 kfree(pdeo); 405 } else 406 spin_unlock(&pde->pde_unload_lock); 407 return rv; 408 } 409 pde->pde_users++; 410 release = pde->proc_fops->release; 411 if (pdeo) { 412 list_del(&pdeo->lh); 413 kfree(pdeo); 414 } 415 spin_unlock(&pde->pde_unload_lock); 416 417 if (release) 418 rv = release(inode, file); 419 420 pde_users_dec(pde); 421 return rv; 422} 423 424static const struct file_operations proc_reg_file_ops = { 425 .llseek = proc_reg_llseek, 426 .read = proc_reg_read, 427 .write = proc_reg_write, 428 .poll = proc_reg_poll, 429 .unlocked_ioctl = proc_reg_unlocked_ioctl, 430#ifdef CONFIG_COMPAT 431 .compat_ioctl = proc_reg_compat_ioctl, 432#endif 433 .mmap = proc_reg_mmap, 434 .open = proc_reg_open, 435 .release = proc_reg_release, 436}; 437 438#ifdef CONFIG_COMPAT 439static const struct file_operations proc_reg_file_ops_no_compat = { 440 .llseek = proc_reg_llseek, 441 .read = proc_reg_read, 442 .write = proc_reg_write, 443 .poll = proc_reg_poll, 444 .unlocked_ioctl = proc_reg_unlocked_ioctl, 445 .mmap = proc_reg_mmap, 446 .open = proc_reg_open, 447 .release = proc_reg_release, 448}; 449#endif 450 451struct inode *proc_get_inode(struct super_block *sb, unsigned int ino, 452 struct proc_dir_entry *de) 453{ 454 struct inode * inode; 455 456 if (!try_module_get(de->owner)) 457 goto out_mod; 458 459 inode = iget_locked(sb, ino); 460 if (!inode) 461 goto out_ino; 462 if (inode->i_state & I_NEW) { 463 inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME; 464 PROC_I(inode)->fd = 0; 465 PROC_I(inode)->pde = de; 466 467 if (de->mode) { 468 inode->i_mode = de->mode; 469 inode->i_uid = de->uid; 470 inode->i_gid = de->gid; 471 } 472 if (de->size) 473 inode->i_size = de->size; 474 if (de->nlink) 475 inode->i_nlink = de->nlink; 476 if (de->proc_iops) 477 inode->i_op = de->proc_iops; 478 if (de->proc_fops) { 479 if (S_ISREG(inode->i_mode)) { 480#ifdef CONFIG_COMPAT 481 if (!de->proc_fops->compat_ioctl) 482 inode->i_fop = 483 &proc_reg_file_ops_no_compat; 484 else 485#endif 486 inode->i_fop = &proc_reg_file_ops; 487 } else { 488 inode->i_fop = de->proc_fops; 489 } 490 } 491 unlock_new_inode(inode); 492 } else 493 module_put(de->owner); 494 return inode; 495 496out_ino: 497 module_put(de->owner); 498out_mod: 499 return NULL; 500} 501 502int proc_fill_super(struct super_block *s) 503{ 504 struct inode * root_inode; 505 506 s->s_flags |= MS_NODIRATIME | MS_NOSUID | MS_NOEXEC; 507 s->s_blocksize = 1024; 508 s->s_blocksize_bits = 10; 509 s->s_magic = PROC_SUPER_MAGIC; 510 s->s_op = &proc_sops; 511 s->s_time_gran = 1; 512 513 de_get(&proc_root); 514 root_inode = proc_get_inode(s, PROC_ROOT_INO, &proc_root); 515 if (!root_inode) 516 goto out_no_root; 517 root_inode->i_uid = 0; 518 root_inode->i_gid = 0; 519 s->s_root = d_alloc_root(root_inode); 520 if (!s->s_root) 521 goto out_no_root; 522 return 0; 523 524out_no_root: 525 printk("proc_read_super: get root inode failed\n"); 526 iput(root_inode); 527 de_put(&proc_root); 528 return -ENOMEM; 529}