Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v2.6.17-rc2 2474 lines 73 kB view raw
1/* 2 * kernel/cpuset.c 3 * 4 * Processor and Memory placement constraints for sets of tasks. 5 * 6 * Copyright (C) 2003 BULL SA. 7 * Copyright (C) 2004-2006 Silicon Graphics, Inc. 8 * 9 * Portions derived from Patrick Mochel's sysfs code. 10 * sysfs is Copyright (c) 2001-3 Patrick Mochel 11 * 12 * 2003-10-10 Written by Simon Derr. 13 * 2003-10-22 Updates by Stephen Hemminger. 14 * 2004 May-July Rework by Paul Jackson. 15 * 16 * This file is subject to the terms and conditions of the GNU General Public 17 * License. See the file COPYING in the main directory of the Linux 18 * distribution for more details. 19 */ 20 21#include <linux/config.h> 22#include <linux/cpu.h> 23#include <linux/cpumask.h> 24#include <linux/cpuset.h> 25#include <linux/err.h> 26#include <linux/errno.h> 27#include <linux/file.h> 28#include <linux/fs.h> 29#include <linux/init.h> 30#include <linux/interrupt.h> 31#include <linux/kernel.h> 32#include <linux/kmod.h> 33#include <linux/list.h> 34#include <linux/mempolicy.h> 35#include <linux/mm.h> 36#include <linux/module.h> 37#include <linux/mount.h> 38#include <linux/namei.h> 39#include <linux/pagemap.h> 40#include <linux/proc_fs.h> 41#include <linux/rcupdate.h> 42#include <linux/sched.h> 43#include <linux/seq_file.h> 44#include <linux/slab.h> 45#include <linux/smp_lock.h> 46#include <linux/spinlock.h> 47#include <linux/stat.h> 48#include <linux/string.h> 49#include <linux/time.h> 50#include <linux/backing-dev.h> 51#include <linux/sort.h> 52 53#include <asm/uaccess.h> 54#include <asm/atomic.h> 55#include <linux/mutex.h> 56 57#define CPUSET_SUPER_MAGIC 0x27e0eb 58 59/* 60 * Tracks how many cpusets are currently defined in system. 61 * When there is only one cpuset (the root cpuset) we can 62 * short circuit some hooks. 63 */ 64int number_of_cpusets __read_mostly; 65 66/* See "Frequency meter" comments, below. */ 67 68struct fmeter { 69 int cnt; /* unprocessed events count */ 70 int val; /* most recent output value */ 71 time_t time; /* clock (secs) when val computed */ 72 spinlock_t lock; /* guards read or write of above */ 73}; 74 75struct cpuset { 76 unsigned long flags; /* "unsigned long" so bitops work */ 77 cpumask_t cpus_allowed; /* CPUs allowed to tasks in cpuset */ 78 nodemask_t mems_allowed; /* Memory Nodes allowed to tasks */ 79 80 /* 81 * Count is atomic so can incr (fork) or decr (exit) without a lock. 82 */ 83 atomic_t count; /* count tasks using this cpuset */ 84 85 /* 86 * We link our 'sibling' struct into our parents 'children'. 87 * Our children link their 'sibling' into our 'children'. 88 */ 89 struct list_head sibling; /* my parents children */ 90 struct list_head children; /* my children */ 91 92 struct cpuset *parent; /* my parent */ 93 struct dentry *dentry; /* cpuset fs entry */ 94 95 /* 96 * Copy of global cpuset_mems_generation as of the most 97 * recent time this cpuset changed its mems_allowed. 98 */ 99 int mems_generation; 100 101 struct fmeter fmeter; /* memory_pressure filter */ 102}; 103 104/* bits in struct cpuset flags field */ 105typedef enum { 106 CS_CPU_EXCLUSIVE, 107 CS_MEM_EXCLUSIVE, 108 CS_MEMORY_MIGRATE, 109 CS_REMOVED, 110 CS_NOTIFY_ON_RELEASE, 111 CS_SPREAD_PAGE, 112 CS_SPREAD_SLAB, 113} cpuset_flagbits_t; 114 115/* convenient tests for these bits */ 116static inline int is_cpu_exclusive(const struct cpuset *cs) 117{ 118 return test_bit(CS_CPU_EXCLUSIVE, &cs->flags); 119} 120 121static inline int is_mem_exclusive(const struct cpuset *cs) 122{ 123 return test_bit(CS_MEM_EXCLUSIVE, &cs->flags); 124} 125 126static inline int is_removed(const struct cpuset *cs) 127{ 128 return test_bit(CS_REMOVED, &cs->flags); 129} 130 131static inline int notify_on_release(const struct cpuset *cs) 132{ 133 return test_bit(CS_NOTIFY_ON_RELEASE, &cs->flags); 134} 135 136static inline int is_memory_migrate(const struct cpuset *cs) 137{ 138 return test_bit(CS_MEMORY_MIGRATE, &cs->flags); 139} 140 141static inline int is_spread_page(const struct cpuset *cs) 142{ 143 return test_bit(CS_SPREAD_PAGE, &cs->flags); 144} 145 146static inline int is_spread_slab(const struct cpuset *cs) 147{ 148 return test_bit(CS_SPREAD_SLAB, &cs->flags); 149} 150 151/* 152 * Increment this integer everytime any cpuset changes its 153 * mems_allowed value. Users of cpusets can track this generation 154 * number, and avoid having to lock and reload mems_allowed unless 155 * the cpuset they're using changes generation. 156 * 157 * A single, global generation is needed because attach_task() could 158 * reattach a task to a different cpuset, which must not have its 159 * generation numbers aliased with those of that tasks previous cpuset. 160 * 161 * Generations are needed for mems_allowed because one task cannot 162 * modify anothers memory placement. So we must enable every task, 163 * on every visit to __alloc_pages(), to efficiently check whether 164 * its current->cpuset->mems_allowed has changed, requiring an update 165 * of its current->mems_allowed. 166 * 167 * Since cpuset_mems_generation is guarded by manage_mutex, 168 * there is no need to mark it atomic. 169 */ 170static int cpuset_mems_generation; 171 172static struct cpuset top_cpuset = { 173 .flags = ((1 << CS_CPU_EXCLUSIVE) | (1 << CS_MEM_EXCLUSIVE)), 174 .cpus_allowed = CPU_MASK_ALL, 175 .mems_allowed = NODE_MASK_ALL, 176 .count = ATOMIC_INIT(0), 177 .sibling = LIST_HEAD_INIT(top_cpuset.sibling), 178 .children = LIST_HEAD_INIT(top_cpuset.children), 179}; 180 181static struct vfsmount *cpuset_mount; 182static struct super_block *cpuset_sb; 183 184/* 185 * We have two global cpuset mutexes below. They can nest. 186 * It is ok to first take manage_mutex, then nest callback_mutex. We also 187 * require taking task_lock() when dereferencing a tasks cpuset pointer. 188 * See "The task_lock() exception", at the end of this comment. 189 * 190 * A task must hold both mutexes to modify cpusets. If a task 191 * holds manage_mutex, then it blocks others wanting that mutex, 192 * ensuring that it is the only task able to also acquire callback_mutex 193 * and be able to modify cpusets. It can perform various checks on 194 * the cpuset structure first, knowing nothing will change. It can 195 * also allocate memory while just holding manage_mutex. While it is 196 * performing these checks, various callback routines can briefly 197 * acquire callback_mutex to query cpusets. Once it is ready to make 198 * the changes, it takes callback_mutex, blocking everyone else. 199 * 200 * Calls to the kernel memory allocator can not be made while holding 201 * callback_mutex, as that would risk double tripping on callback_mutex 202 * from one of the callbacks into the cpuset code from within 203 * __alloc_pages(). 204 * 205 * If a task is only holding callback_mutex, then it has read-only 206 * access to cpusets. 207 * 208 * The task_struct fields mems_allowed and mems_generation may only 209 * be accessed in the context of that task, so require no locks. 210 * 211 * Any task can increment and decrement the count field without lock. 212 * So in general, code holding manage_mutex or callback_mutex can't rely 213 * on the count field not changing. However, if the count goes to 214 * zero, then only attach_task(), which holds both mutexes, can 215 * increment it again. Because a count of zero means that no tasks 216 * are currently attached, therefore there is no way a task attached 217 * to that cpuset can fork (the other way to increment the count). 218 * So code holding manage_mutex or callback_mutex can safely assume that 219 * if the count is zero, it will stay zero. Similarly, if a task 220 * holds manage_mutex or callback_mutex on a cpuset with zero count, it 221 * knows that the cpuset won't be removed, as cpuset_rmdir() needs 222 * both of those mutexes. 223 * 224 * The cpuset_common_file_write handler for operations that modify 225 * the cpuset hierarchy holds manage_mutex across the entire operation, 226 * single threading all such cpuset modifications across the system. 227 * 228 * The cpuset_common_file_read() handlers only hold callback_mutex across 229 * small pieces of code, such as when reading out possibly multi-word 230 * cpumasks and nodemasks. 231 * 232 * The fork and exit callbacks cpuset_fork() and cpuset_exit(), don't 233 * (usually) take either mutex. These are the two most performance 234 * critical pieces of code here. The exception occurs on cpuset_exit(), 235 * when a task in a notify_on_release cpuset exits. Then manage_mutex 236 * is taken, and if the cpuset count is zero, a usermode call made 237 * to /sbin/cpuset_release_agent with the name of the cpuset (path 238 * relative to the root of cpuset file system) as the argument. 239 * 240 * A cpuset can only be deleted if both its 'count' of using tasks 241 * is zero, and its list of 'children' cpusets is empty. Since all 242 * tasks in the system use _some_ cpuset, and since there is always at 243 * least one task in the system (init, pid == 1), therefore, top_cpuset 244 * always has either children cpusets and/or using tasks. So we don't 245 * need a special hack to ensure that top_cpuset cannot be deleted. 246 * 247 * The above "Tale of Two Semaphores" would be complete, but for: 248 * 249 * The task_lock() exception 250 * 251 * The need for this exception arises from the action of attach_task(), 252 * which overwrites one tasks cpuset pointer with another. It does 253 * so using both mutexes, however there are several performance 254 * critical places that need to reference task->cpuset without the 255 * expense of grabbing a system global mutex. Therefore except as 256 * noted below, when dereferencing or, as in attach_task(), modifying 257 * a tasks cpuset pointer we use task_lock(), which acts on a spinlock 258 * (task->alloc_lock) already in the task_struct routinely used for 259 * such matters. 260 * 261 * P.S. One more locking exception. RCU is used to guard the 262 * update of a tasks cpuset pointer by attach_task() and the 263 * access of task->cpuset->mems_generation via that pointer in 264 * the routine cpuset_update_task_memory_state(). 265 */ 266 267static DEFINE_MUTEX(manage_mutex); 268static DEFINE_MUTEX(callback_mutex); 269 270/* 271 * A couple of forward declarations required, due to cyclic reference loop: 272 * cpuset_mkdir -> cpuset_create -> cpuset_populate_dir -> cpuset_add_file 273 * -> cpuset_create_file -> cpuset_dir_inode_operations -> cpuset_mkdir. 274 */ 275 276static int cpuset_mkdir(struct inode *dir, struct dentry *dentry, int mode); 277static int cpuset_rmdir(struct inode *unused_dir, struct dentry *dentry); 278 279static struct backing_dev_info cpuset_backing_dev_info = { 280 .ra_pages = 0, /* No readahead */ 281 .capabilities = BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_WRITEBACK, 282}; 283 284static struct inode *cpuset_new_inode(mode_t mode) 285{ 286 struct inode *inode = new_inode(cpuset_sb); 287 288 if (inode) { 289 inode->i_mode = mode; 290 inode->i_uid = current->fsuid; 291 inode->i_gid = current->fsgid; 292 inode->i_blksize = PAGE_CACHE_SIZE; 293 inode->i_blocks = 0; 294 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; 295 inode->i_mapping->backing_dev_info = &cpuset_backing_dev_info; 296 } 297 return inode; 298} 299 300static void cpuset_diput(struct dentry *dentry, struct inode *inode) 301{ 302 /* is dentry a directory ? if so, kfree() associated cpuset */ 303 if (S_ISDIR(inode->i_mode)) { 304 struct cpuset *cs = dentry->d_fsdata; 305 BUG_ON(!(is_removed(cs))); 306 kfree(cs); 307 } 308 iput(inode); 309} 310 311static struct dentry_operations cpuset_dops = { 312 .d_iput = cpuset_diput, 313}; 314 315static struct dentry *cpuset_get_dentry(struct dentry *parent, const char *name) 316{ 317 struct dentry *d = lookup_one_len(name, parent, strlen(name)); 318 if (!IS_ERR(d)) 319 d->d_op = &cpuset_dops; 320 return d; 321} 322 323static void remove_dir(struct dentry *d) 324{ 325 struct dentry *parent = dget(d->d_parent); 326 327 d_delete(d); 328 simple_rmdir(parent->d_inode, d); 329 dput(parent); 330} 331 332/* 333 * NOTE : the dentry must have been dget()'ed 334 */ 335static void cpuset_d_remove_dir(struct dentry *dentry) 336{ 337 struct list_head *node; 338 339 spin_lock(&dcache_lock); 340 node = dentry->d_subdirs.next; 341 while (node != &dentry->d_subdirs) { 342 struct dentry *d = list_entry(node, struct dentry, d_u.d_child); 343 list_del_init(node); 344 if (d->d_inode) { 345 d = dget_locked(d); 346 spin_unlock(&dcache_lock); 347 d_delete(d); 348 simple_unlink(dentry->d_inode, d); 349 dput(d); 350 spin_lock(&dcache_lock); 351 } 352 node = dentry->d_subdirs.next; 353 } 354 list_del_init(&dentry->d_u.d_child); 355 spin_unlock(&dcache_lock); 356 remove_dir(dentry); 357} 358 359static struct super_operations cpuset_ops = { 360 .statfs = simple_statfs, 361 .drop_inode = generic_delete_inode, 362}; 363 364static int cpuset_fill_super(struct super_block *sb, void *unused_data, 365 int unused_silent) 366{ 367 struct inode *inode; 368 struct dentry *root; 369 370 sb->s_blocksize = PAGE_CACHE_SIZE; 371 sb->s_blocksize_bits = PAGE_CACHE_SHIFT; 372 sb->s_magic = CPUSET_SUPER_MAGIC; 373 sb->s_op = &cpuset_ops; 374 cpuset_sb = sb; 375 376 inode = cpuset_new_inode(S_IFDIR | S_IRUGO | S_IXUGO | S_IWUSR); 377 if (inode) { 378 inode->i_op = &simple_dir_inode_operations; 379 inode->i_fop = &simple_dir_operations; 380 /* directories start off with i_nlink == 2 (for "." entry) */ 381 inode->i_nlink++; 382 } else { 383 return -ENOMEM; 384 } 385 386 root = d_alloc_root(inode); 387 if (!root) { 388 iput(inode); 389 return -ENOMEM; 390 } 391 sb->s_root = root; 392 return 0; 393} 394 395static struct super_block *cpuset_get_sb(struct file_system_type *fs_type, 396 int flags, const char *unused_dev_name, 397 void *data) 398{ 399 return get_sb_single(fs_type, flags, data, cpuset_fill_super); 400} 401 402static struct file_system_type cpuset_fs_type = { 403 .name = "cpuset", 404 .get_sb = cpuset_get_sb, 405 .kill_sb = kill_litter_super, 406}; 407 408/* struct cftype: 409 * 410 * The files in the cpuset filesystem mostly have a very simple read/write 411 * handling, some common function will take care of it. Nevertheless some cases 412 * (read tasks) are special and therefore I define this structure for every 413 * kind of file. 414 * 415 * 416 * When reading/writing to a file: 417 * - the cpuset to use in file->f_dentry->d_parent->d_fsdata 418 * - the 'cftype' of the file is file->f_dentry->d_fsdata 419 */ 420 421struct cftype { 422 char *name; 423 int private; 424 int (*open) (struct inode *inode, struct file *file); 425 ssize_t (*read) (struct file *file, char __user *buf, size_t nbytes, 426 loff_t *ppos); 427 int (*write) (struct file *file, const char __user *buf, size_t nbytes, 428 loff_t *ppos); 429 int (*release) (struct inode *inode, struct file *file); 430}; 431 432static inline struct cpuset *__d_cs(struct dentry *dentry) 433{ 434 return dentry->d_fsdata; 435} 436 437static inline struct cftype *__d_cft(struct dentry *dentry) 438{ 439 return dentry->d_fsdata; 440} 441 442/* 443 * Call with manage_mutex held. Writes path of cpuset into buf. 444 * Returns 0 on success, -errno on error. 445 */ 446 447static int cpuset_path(const struct cpuset *cs, char *buf, int buflen) 448{ 449 char *start; 450 451 start = buf + buflen; 452 453 *--start = '\0'; 454 for (;;) { 455 int len = cs->dentry->d_name.len; 456 if ((start -= len) < buf) 457 return -ENAMETOOLONG; 458 memcpy(start, cs->dentry->d_name.name, len); 459 cs = cs->parent; 460 if (!cs) 461 break; 462 if (!cs->parent) 463 continue; 464 if (--start < buf) 465 return -ENAMETOOLONG; 466 *start = '/'; 467 } 468 memmove(buf, start, buf + buflen - start); 469 return 0; 470} 471 472/* 473 * Notify userspace when a cpuset is released, by running 474 * /sbin/cpuset_release_agent with the name of the cpuset (path 475 * relative to the root of cpuset file system) as the argument. 476 * 477 * Most likely, this user command will try to rmdir this cpuset. 478 * 479 * This races with the possibility that some other task will be 480 * attached to this cpuset before it is removed, or that some other 481 * user task will 'mkdir' a child cpuset of this cpuset. That's ok. 482 * The presumed 'rmdir' will fail quietly if this cpuset is no longer 483 * unused, and this cpuset will be reprieved from its death sentence, 484 * to continue to serve a useful existence. Next time it's released, 485 * we will get notified again, if it still has 'notify_on_release' set. 486 * 487 * The final arg to call_usermodehelper() is 0, which means don't 488 * wait. The separate /sbin/cpuset_release_agent task is forked by 489 * call_usermodehelper(), then control in this thread returns here, 490 * without waiting for the release agent task. We don't bother to 491 * wait because the caller of this routine has no use for the exit 492 * status of the /sbin/cpuset_release_agent task, so no sense holding 493 * our caller up for that. 494 * 495 * When we had only one cpuset mutex, we had to call this 496 * without holding it, to avoid deadlock when call_usermodehelper() 497 * allocated memory. With two locks, we could now call this while 498 * holding manage_mutex, but we still don't, so as to minimize 499 * the time manage_mutex is held. 500 */ 501 502static void cpuset_release_agent(const char *pathbuf) 503{ 504 char *argv[3], *envp[3]; 505 int i; 506 507 if (!pathbuf) 508 return; 509 510 i = 0; 511 argv[i++] = "/sbin/cpuset_release_agent"; 512 argv[i++] = (char *)pathbuf; 513 argv[i] = NULL; 514 515 i = 0; 516 /* minimal command environment */ 517 envp[i++] = "HOME=/"; 518 envp[i++] = "PATH=/sbin:/bin:/usr/sbin:/usr/bin"; 519 envp[i] = NULL; 520 521 call_usermodehelper(argv[0], argv, envp, 0); 522 kfree(pathbuf); 523} 524 525/* 526 * Either cs->count of using tasks transitioned to zero, or the 527 * cs->children list of child cpusets just became empty. If this 528 * cs is notify_on_release() and now both the user count is zero and 529 * the list of children is empty, prepare cpuset path in a kmalloc'd 530 * buffer, to be returned via ppathbuf, so that the caller can invoke 531 * cpuset_release_agent() with it later on, once manage_mutex is dropped. 532 * Call here with manage_mutex held. 533 * 534 * This check_for_release() routine is responsible for kmalloc'ing 535 * pathbuf. The above cpuset_release_agent() is responsible for 536 * kfree'ing pathbuf. The caller of these routines is responsible 537 * for providing a pathbuf pointer, initialized to NULL, then 538 * calling check_for_release() with manage_mutex held and the address 539 * of the pathbuf pointer, then dropping manage_mutex, then calling 540 * cpuset_release_agent() with pathbuf, as set by check_for_release(). 541 */ 542 543static void check_for_release(struct cpuset *cs, char **ppathbuf) 544{ 545 if (notify_on_release(cs) && atomic_read(&cs->count) == 0 && 546 list_empty(&cs->children)) { 547 char *buf; 548 549 buf = kmalloc(PAGE_SIZE, GFP_KERNEL); 550 if (!buf) 551 return; 552 if (cpuset_path(cs, buf, PAGE_SIZE) < 0) 553 kfree(buf); 554 else 555 *ppathbuf = buf; 556 } 557} 558 559/* 560 * Return in *pmask the portion of a cpusets's cpus_allowed that 561 * are online. If none are online, walk up the cpuset hierarchy 562 * until we find one that does have some online cpus. If we get 563 * all the way to the top and still haven't found any online cpus, 564 * return cpu_online_map. Or if passed a NULL cs from an exit'ing 565 * task, return cpu_online_map. 566 * 567 * One way or another, we guarantee to return some non-empty subset 568 * of cpu_online_map. 569 * 570 * Call with callback_mutex held. 571 */ 572 573static void guarantee_online_cpus(const struct cpuset *cs, cpumask_t *pmask) 574{ 575 while (cs && !cpus_intersects(cs->cpus_allowed, cpu_online_map)) 576 cs = cs->parent; 577 if (cs) 578 cpus_and(*pmask, cs->cpus_allowed, cpu_online_map); 579 else 580 *pmask = cpu_online_map; 581 BUG_ON(!cpus_intersects(*pmask, cpu_online_map)); 582} 583 584/* 585 * Return in *pmask the portion of a cpusets's mems_allowed that 586 * are online. If none are online, walk up the cpuset hierarchy 587 * until we find one that does have some online mems. If we get 588 * all the way to the top and still haven't found any online mems, 589 * return node_online_map. 590 * 591 * One way or another, we guarantee to return some non-empty subset 592 * of node_online_map. 593 * 594 * Call with callback_mutex held. 595 */ 596 597static void guarantee_online_mems(const struct cpuset *cs, nodemask_t *pmask) 598{ 599 while (cs && !nodes_intersects(cs->mems_allowed, node_online_map)) 600 cs = cs->parent; 601 if (cs) 602 nodes_and(*pmask, cs->mems_allowed, node_online_map); 603 else 604 *pmask = node_online_map; 605 BUG_ON(!nodes_intersects(*pmask, node_online_map)); 606} 607 608/** 609 * cpuset_update_task_memory_state - update task memory placement 610 * 611 * If the current tasks cpusets mems_allowed changed behind our 612 * backs, update current->mems_allowed, mems_generation and task NUMA 613 * mempolicy to the new value. 614 * 615 * Task mempolicy is updated by rebinding it relative to the 616 * current->cpuset if a task has its memory placement changed. 617 * Do not call this routine if in_interrupt(). 618 * 619 * Call without callback_mutex or task_lock() held. May be 620 * called with or without manage_mutex held. Thanks in part to 621 * 'the_top_cpuset_hack', the tasks cpuset pointer will never 622 * be NULL. This routine also might acquire callback_mutex and 623 * current->mm->mmap_sem during call. 624 * 625 * Reading current->cpuset->mems_generation doesn't need task_lock 626 * to guard the current->cpuset derefence, because it is guarded 627 * from concurrent freeing of current->cpuset by attach_task(), 628 * using RCU. 629 * 630 * The rcu_dereference() is technically probably not needed, 631 * as I don't actually mind if I see a new cpuset pointer but 632 * an old value of mems_generation. However this really only 633 * matters on alpha systems using cpusets heavily. If I dropped 634 * that rcu_dereference(), it would save them a memory barrier. 635 * For all other arch's, rcu_dereference is a no-op anyway, and for 636 * alpha systems not using cpusets, another planned optimization, 637 * avoiding the rcu critical section for tasks in the root cpuset 638 * which is statically allocated, so can't vanish, will make this 639 * irrelevant. Better to use RCU as intended, than to engage in 640 * some cute trick to save a memory barrier that is impossible to 641 * test, for alpha systems using cpusets heavily, which might not 642 * even exist. 643 * 644 * This routine is needed to update the per-task mems_allowed data, 645 * within the tasks context, when it is trying to allocate memory 646 * (in various mm/mempolicy.c routines) and notices that some other 647 * task has been modifying its cpuset. 648 */ 649 650void cpuset_update_task_memory_state(void) 651{ 652 int my_cpusets_mem_gen; 653 struct task_struct *tsk = current; 654 struct cpuset *cs; 655 656 if (tsk->cpuset == &top_cpuset) { 657 /* Don't need rcu for top_cpuset. It's never freed. */ 658 my_cpusets_mem_gen = top_cpuset.mems_generation; 659 } else { 660 rcu_read_lock(); 661 cs = rcu_dereference(tsk->cpuset); 662 my_cpusets_mem_gen = cs->mems_generation; 663 rcu_read_unlock(); 664 } 665 666 if (my_cpusets_mem_gen != tsk->cpuset_mems_generation) { 667 mutex_lock(&callback_mutex); 668 task_lock(tsk); 669 cs = tsk->cpuset; /* Maybe changed when task not locked */ 670 guarantee_online_mems(cs, &tsk->mems_allowed); 671 tsk->cpuset_mems_generation = cs->mems_generation; 672 if (is_spread_page(cs)) 673 tsk->flags |= PF_SPREAD_PAGE; 674 else 675 tsk->flags &= ~PF_SPREAD_PAGE; 676 if (is_spread_slab(cs)) 677 tsk->flags |= PF_SPREAD_SLAB; 678 else 679 tsk->flags &= ~PF_SPREAD_SLAB; 680 task_unlock(tsk); 681 mutex_unlock(&callback_mutex); 682 mpol_rebind_task(tsk, &tsk->mems_allowed); 683 } 684} 685 686/* 687 * is_cpuset_subset(p, q) - Is cpuset p a subset of cpuset q? 688 * 689 * One cpuset is a subset of another if all its allowed CPUs and 690 * Memory Nodes are a subset of the other, and its exclusive flags 691 * are only set if the other's are set. Call holding manage_mutex. 692 */ 693 694static int is_cpuset_subset(const struct cpuset *p, const struct cpuset *q) 695{ 696 return cpus_subset(p->cpus_allowed, q->cpus_allowed) && 697 nodes_subset(p->mems_allowed, q->mems_allowed) && 698 is_cpu_exclusive(p) <= is_cpu_exclusive(q) && 699 is_mem_exclusive(p) <= is_mem_exclusive(q); 700} 701 702/* 703 * validate_change() - Used to validate that any proposed cpuset change 704 * follows the structural rules for cpusets. 705 * 706 * If we replaced the flag and mask values of the current cpuset 707 * (cur) with those values in the trial cpuset (trial), would 708 * our various subset and exclusive rules still be valid? Presumes 709 * manage_mutex held. 710 * 711 * 'cur' is the address of an actual, in-use cpuset. Operations 712 * such as list traversal that depend on the actual address of the 713 * cpuset in the list must use cur below, not trial. 714 * 715 * 'trial' is the address of bulk structure copy of cur, with 716 * perhaps one or more of the fields cpus_allowed, mems_allowed, 717 * or flags changed to new, trial values. 718 * 719 * Return 0 if valid, -errno if not. 720 */ 721 722static int validate_change(const struct cpuset *cur, const struct cpuset *trial) 723{ 724 struct cpuset *c, *par; 725 726 /* Each of our child cpusets must be a subset of us */ 727 list_for_each_entry(c, &cur->children, sibling) { 728 if (!is_cpuset_subset(c, trial)) 729 return -EBUSY; 730 } 731 732 /* Remaining checks don't apply to root cpuset */ 733 if ((par = cur->parent) == NULL) 734 return 0; 735 736 /* We must be a subset of our parent cpuset */ 737 if (!is_cpuset_subset(trial, par)) 738 return -EACCES; 739 740 /* If either I or some sibling (!= me) is exclusive, we can't overlap */ 741 list_for_each_entry(c, &par->children, sibling) { 742 if ((is_cpu_exclusive(trial) || is_cpu_exclusive(c)) && 743 c != cur && 744 cpus_intersects(trial->cpus_allowed, c->cpus_allowed)) 745 return -EINVAL; 746 if ((is_mem_exclusive(trial) || is_mem_exclusive(c)) && 747 c != cur && 748 nodes_intersects(trial->mems_allowed, c->mems_allowed)) 749 return -EINVAL; 750 } 751 752 return 0; 753} 754 755/* 756 * For a given cpuset cur, partition the system as follows 757 * a. All cpus in the parent cpuset's cpus_allowed that are not part of any 758 * exclusive child cpusets 759 * b. All cpus in the current cpuset's cpus_allowed that are not part of any 760 * exclusive child cpusets 761 * Build these two partitions by calling partition_sched_domains 762 * 763 * Call with manage_mutex held. May nest a call to the 764 * lock_cpu_hotplug()/unlock_cpu_hotplug() pair. 765 */ 766 767static void update_cpu_domains(struct cpuset *cur) 768{ 769 struct cpuset *c, *par = cur->parent; 770 cpumask_t pspan, cspan; 771 772 if (par == NULL || cpus_empty(cur->cpus_allowed)) 773 return; 774 775 /* 776 * Get all cpus from parent's cpus_allowed not part of exclusive 777 * children 778 */ 779 pspan = par->cpus_allowed; 780 list_for_each_entry(c, &par->children, sibling) { 781 if (is_cpu_exclusive(c)) 782 cpus_andnot(pspan, pspan, c->cpus_allowed); 783 } 784 if (is_removed(cur) || !is_cpu_exclusive(cur)) { 785 cpus_or(pspan, pspan, cur->cpus_allowed); 786 if (cpus_equal(pspan, cur->cpus_allowed)) 787 return; 788 cspan = CPU_MASK_NONE; 789 } else { 790 if (cpus_empty(pspan)) 791 return; 792 cspan = cur->cpus_allowed; 793 /* 794 * Get all cpus from current cpuset's cpus_allowed not part 795 * of exclusive children 796 */ 797 list_for_each_entry(c, &cur->children, sibling) { 798 if (is_cpu_exclusive(c)) 799 cpus_andnot(cspan, cspan, c->cpus_allowed); 800 } 801 } 802 803 lock_cpu_hotplug(); 804 partition_sched_domains(&pspan, &cspan); 805 unlock_cpu_hotplug(); 806} 807 808/* 809 * Call with manage_mutex held. May take callback_mutex during call. 810 */ 811 812static int update_cpumask(struct cpuset *cs, char *buf) 813{ 814 struct cpuset trialcs; 815 int retval, cpus_unchanged; 816 817 trialcs = *cs; 818 retval = cpulist_parse(buf, trialcs.cpus_allowed); 819 if (retval < 0) 820 return retval; 821 cpus_and(trialcs.cpus_allowed, trialcs.cpus_allowed, cpu_online_map); 822 if (cpus_empty(trialcs.cpus_allowed)) 823 return -ENOSPC; 824 retval = validate_change(cs, &trialcs); 825 if (retval < 0) 826 return retval; 827 cpus_unchanged = cpus_equal(cs->cpus_allowed, trialcs.cpus_allowed); 828 mutex_lock(&callback_mutex); 829 cs->cpus_allowed = trialcs.cpus_allowed; 830 mutex_unlock(&callback_mutex); 831 if (is_cpu_exclusive(cs) && !cpus_unchanged) 832 update_cpu_domains(cs); 833 return 0; 834} 835 836/* 837 * cpuset_migrate_mm 838 * 839 * Migrate memory region from one set of nodes to another. 840 * 841 * Temporarilly set tasks mems_allowed to target nodes of migration, 842 * so that the migration code can allocate pages on these nodes. 843 * 844 * Call holding manage_mutex, so our current->cpuset won't change 845 * during this call, as manage_mutex holds off any attach_task() 846 * calls. Therefore we don't need to take task_lock around the 847 * call to guarantee_online_mems(), as we know no one is changing 848 * our tasks cpuset. 849 * 850 * Hold callback_mutex around the two modifications of our tasks 851 * mems_allowed to synchronize with cpuset_mems_allowed(). 852 * 853 * While the mm_struct we are migrating is typically from some 854 * other task, the task_struct mems_allowed that we are hacking 855 * is for our current task, which must allocate new pages for that 856 * migrating memory region. 857 * 858 * We call cpuset_update_task_memory_state() before hacking 859 * our tasks mems_allowed, so that we are assured of being in 860 * sync with our tasks cpuset, and in particular, callbacks to 861 * cpuset_update_task_memory_state() from nested page allocations 862 * won't see any mismatch of our cpuset and task mems_generation 863 * values, so won't overwrite our hacked tasks mems_allowed 864 * nodemask. 865 */ 866 867static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from, 868 const nodemask_t *to) 869{ 870 struct task_struct *tsk = current; 871 872 cpuset_update_task_memory_state(); 873 874 mutex_lock(&callback_mutex); 875 tsk->mems_allowed = *to; 876 mutex_unlock(&callback_mutex); 877 878 do_migrate_pages(mm, from, to, MPOL_MF_MOVE_ALL); 879 880 mutex_lock(&callback_mutex); 881 guarantee_online_mems(tsk->cpuset, &tsk->mems_allowed); 882 mutex_unlock(&callback_mutex); 883} 884 885/* 886 * Handle user request to change the 'mems' memory placement 887 * of a cpuset. Needs to validate the request, update the 888 * cpusets mems_allowed and mems_generation, and for each 889 * task in the cpuset, rebind any vma mempolicies and if 890 * the cpuset is marked 'memory_migrate', migrate the tasks 891 * pages to the new memory. 892 * 893 * Call with manage_mutex held. May take callback_mutex during call. 894 * Will take tasklist_lock, scan tasklist for tasks in cpuset cs, 895 * lock each such tasks mm->mmap_sem, scan its vma's and rebind 896 * their mempolicies to the cpusets new mems_allowed. 897 */ 898 899static int update_nodemask(struct cpuset *cs, char *buf) 900{ 901 struct cpuset trialcs; 902 nodemask_t oldmem; 903 struct task_struct *g, *p; 904 struct mm_struct **mmarray; 905 int i, n, ntasks; 906 int migrate; 907 int fudge; 908 int retval; 909 910 trialcs = *cs; 911 retval = nodelist_parse(buf, trialcs.mems_allowed); 912 if (retval < 0) 913 goto done; 914 nodes_and(trialcs.mems_allowed, trialcs.mems_allowed, node_online_map); 915 oldmem = cs->mems_allowed; 916 if (nodes_equal(oldmem, trialcs.mems_allowed)) { 917 retval = 0; /* Too easy - nothing to do */ 918 goto done; 919 } 920 if (nodes_empty(trialcs.mems_allowed)) { 921 retval = -ENOSPC; 922 goto done; 923 } 924 retval = validate_change(cs, &trialcs); 925 if (retval < 0) 926 goto done; 927 928 mutex_lock(&callback_mutex); 929 cs->mems_allowed = trialcs.mems_allowed; 930 cs->mems_generation = cpuset_mems_generation++; 931 mutex_unlock(&callback_mutex); 932 933 set_cpuset_being_rebound(cs); /* causes mpol_copy() rebind */ 934 935 fudge = 10; /* spare mmarray[] slots */ 936 fudge += cpus_weight(cs->cpus_allowed); /* imagine one fork-bomb/cpu */ 937 retval = -ENOMEM; 938 939 /* 940 * Allocate mmarray[] to hold mm reference for each task 941 * in cpuset cs. Can't kmalloc GFP_KERNEL while holding 942 * tasklist_lock. We could use GFP_ATOMIC, but with a 943 * few more lines of code, we can retry until we get a big 944 * enough mmarray[] w/o using GFP_ATOMIC. 945 */ 946 while (1) { 947 ntasks = atomic_read(&cs->count); /* guess */ 948 ntasks += fudge; 949 mmarray = kmalloc(ntasks * sizeof(*mmarray), GFP_KERNEL); 950 if (!mmarray) 951 goto done; 952 write_lock_irq(&tasklist_lock); /* block fork */ 953 if (atomic_read(&cs->count) <= ntasks) 954 break; /* got enough */ 955 write_unlock_irq(&tasklist_lock); /* try again */ 956 kfree(mmarray); 957 } 958 959 n = 0; 960 961 /* Load up mmarray[] with mm reference for each task in cpuset. */ 962 do_each_thread(g, p) { 963 struct mm_struct *mm; 964 965 if (n >= ntasks) { 966 printk(KERN_WARNING 967 "Cpuset mempolicy rebind incomplete.\n"); 968 continue; 969 } 970 if (p->cpuset != cs) 971 continue; 972 mm = get_task_mm(p); 973 if (!mm) 974 continue; 975 mmarray[n++] = mm; 976 } while_each_thread(g, p); 977 write_unlock_irq(&tasklist_lock); 978 979 /* 980 * Now that we've dropped the tasklist spinlock, we can 981 * rebind the vma mempolicies of each mm in mmarray[] to their 982 * new cpuset, and release that mm. The mpol_rebind_mm() 983 * call takes mmap_sem, which we couldn't take while holding 984 * tasklist_lock. Forks can happen again now - the mpol_copy() 985 * cpuset_being_rebound check will catch such forks, and rebind 986 * their vma mempolicies too. Because we still hold the global 987 * cpuset manage_mutex, we know that no other rebind effort will 988 * be contending for the global variable cpuset_being_rebound. 989 * It's ok if we rebind the same mm twice; mpol_rebind_mm() 990 * is idempotent. Also migrate pages in each mm to new nodes. 991 */ 992 migrate = is_memory_migrate(cs); 993 for (i = 0; i < n; i++) { 994 struct mm_struct *mm = mmarray[i]; 995 996 mpol_rebind_mm(mm, &cs->mems_allowed); 997 if (migrate) 998 cpuset_migrate_mm(mm, &oldmem, &cs->mems_allowed); 999 mmput(mm); 1000 } 1001 1002 /* We're done rebinding vma's to this cpusets new mems_allowed. */ 1003 kfree(mmarray); 1004 set_cpuset_being_rebound(NULL); 1005 retval = 0; 1006done: 1007 return retval; 1008} 1009 1010/* 1011 * Call with manage_mutex held. 1012 */ 1013 1014static int update_memory_pressure_enabled(struct cpuset *cs, char *buf) 1015{ 1016 if (simple_strtoul(buf, NULL, 10) != 0) 1017 cpuset_memory_pressure_enabled = 1; 1018 else 1019 cpuset_memory_pressure_enabled = 0; 1020 return 0; 1021} 1022 1023/* 1024 * update_flag - read a 0 or a 1 in a file and update associated flag 1025 * bit: the bit to update (CS_CPU_EXCLUSIVE, CS_MEM_EXCLUSIVE, 1026 * CS_NOTIFY_ON_RELEASE, CS_MEMORY_MIGRATE, 1027 * CS_SPREAD_PAGE, CS_SPREAD_SLAB) 1028 * cs: the cpuset to update 1029 * buf: the buffer where we read the 0 or 1 1030 * 1031 * Call with manage_mutex held. 1032 */ 1033 1034static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs, char *buf) 1035{ 1036 int turning_on; 1037 struct cpuset trialcs; 1038 int err, cpu_exclusive_changed; 1039 1040 turning_on = (simple_strtoul(buf, NULL, 10) != 0); 1041 1042 trialcs = *cs; 1043 if (turning_on) 1044 set_bit(bit, &trialcs.flags); 1045 else 1046 clear_bit(bit, &trialcs.flags); 1047 1048 err = validate_change(cs, &trialcs); 1049 if (err < 0) 1050 return err; 1051 cpu_exclusive_changed = 1052 (is_cpu_exclusive(cs) != is_cpu_exclusive(&trialcs)); 1053 mutex_lock(&callback_mutex); 1054 if (turning_on) 1055 set_bit(bit, &cs->flags); 1056 else 1057 clear_bit(bit, &cs->flags); 1058 mutex_unlock(&callback_mutex); 1059 1060 if (cpu_exclusive_changed) 1061 update_cpu_domains(cs); 1062 return 0; 1063} 1064 1065/* 1066 * Frequency meter - How fast is some event occuring? 1067 * 1068 * These routines manage a digitally filtered, constant time based, 1069 * event frequency meter. There are four routines: 1070 * fmeter_init() - initialize a frequency meter. 1071 * fmeter_markevent() - called each time the event happens. 1072 * fmeter_getrate() - returns the recent rate of such events. 1073 * fmeter_update() - internal routine used to update fmeter. 1074 * 1075 * A common data structure is passed to each of these routines, 1076 * which is used to keep track of the state required to manage the 1077 * frequency meter and its digital filter. 1078 * 1079 * The filter works on the number of events marked per unit time. 1080 * The filter is single-pole low-pass recursive (IIR). The time unit 1081 * is 1 second. Arithmetic is done using 32-bit integers scaled to 1082 * simulate 3 decimal digits of precision (multiplied by 1000). 1083 * 1084 * With an FM_COEF of 933, and a time base of 1 second, the filter 1085 * has a half-life of 10 seconds, meaning that if the events quit 1086 * happening, then the rate returned from the fmeter_getrate() 1087 * will be cut in half each 10 seconds, until it converges to zero. 1088 * 1089 * It is not worth doing a real infinitely recursive filter. If more 1090 * than FM_MAXTICKS ticks have elapsed since the last filter event, 1091 * just compute FM_MAXTICKS ticks worth, by which point the level 1092 * will be stable. 1093 * 1094 * Limit the count of unprocessed events to FM_MAXCNT, so as to avoid 1095 * arithmetic overflow in the fmeter_update() routine. 1096 * 1097 * Given the simple 32 bit integer arithmetic used, this meter works 1098 * best for reporting rates between one per millisecond (msec) and 1099 * one per 32 (approx) seconds. At constant rates faster than one 1100 * per msec it maxes out at values just under 1,000,000. At constant 1101 * rates between one per msec, and one per second it will stabilize 1102 * to a value N*1000, where N is the rate of events per second. 1103 * At constant rates between one per second and one per 32 seconds, 1104 * it will be choppy, moving up on the seconds that have an event, 1105 * and then decaying until the next event. At rates slower than 1106 * about one in 32 seconds, it decays all the way back to zero between 1107 * each event. 1108 */ 1109 1110#define FM_COEF 933 /* coefficient for half-life of 10 secs */ 1111#define FM_MAXTICKS ((time_t)99) /* useless computing more ticks than this */ 1112#define FM_MAXCNT 1000000 /* limit cnt to avoid overflow */ 1113#define FM_SCALE 1000 /* faux fixed point scale */ 1114 1115/* Initialize a frequency meter */ 1116static void fmeter_init(struct fmeter *fmp) 1117{ 1118 fmp->cnt = 0; 1119 fmp->val = 0; 1120 fmp->time = 0; 1121 spin_lock_init(&fmp->lock); 1122} 1123 1124/* Internal meter update - process cnt events and update value */ 1125static void fmeter_update(struct fmeter *fmp) 1126{ 1127 time_t now = get_seconds(); 1128 time_t ticks = now - fmp->time; 1129 1130 if (ticks == 0) 1131 return; 1132 1133 ticks = min(FM_MAXTICKS, ticks); 1134 while (ticks-- > 0) 1135 fmp->val = (FM_COEF * fmp->val) / FM_SCALE; 1136 fmp->time = now; 1137 1138 fmp->val += ((FM_SCALE - FM_COEF) * fmp->cnt) / FM_SCALE; 1139 fmp->cnt = 0; 1140} 1141 1142/* Process any previous ticks, then bump cnt by one (times scale). */ 1143static void fmeter_markevent(struct fmeter *fmp) 1144{ 1145 spin_lock(&fmp->lock); 1146 fmeter_update(fmp); 1147 fmp->cnt = min(FM_MAXCNT, fmp->cnt + FM_SCALE); 1148 spin_unlock(&fmp->lock); 1149} 1150 1151/* Process any previous ticks, then return current value. */ 1152static int fmeter_getrate(struct fmeter *fmp) 1153{ 1154 int val; 1155 1156 spin_lock(&fmp->lock); 1157 fmeter_update(fmp); 1158 val = fmp->val; 1159 spin_unlock(&fmp->lock); 1160 return val; 1161} 1162 1163/* 1164 * Attack task specified by pid in 'pidbuf' to cpuset 'cs', possibly 1165 * writing the path of the old cpuset in 'ppathbuf' if it needs to be 1166 * notified on release. 1167 * 1168 * Call holding manage_mutex. May take callback_mutex and task_lock of 1169 * the task 'pid' during call. 1170 */ 1171 1172static int attach_task(struct cpuset *cs, char *pidbuf, char **ppathbuf) 1173{ 1174 pid_t pid; 1175 struct task_struct *tsk; 1176 struct cpuset *oldcs; 1177 cpumask_t cpus; 1178 nodemask_t from, to; 1179 struct mm_struct *mm; 1180 1181 if (sscanf(pidbuf, "%d", &pid) != 1) 1182 return -EIO; 1183 if (cpus_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed)) 1184 return -ENOSPC; 1185 1186 if (pid) { 1187 read_lock(&tasklist_lock); 1188 1189 tsk = find_task_by_pid(pid); 1190 if (!tsk || tsk->flags & PF_EXITING) { 1191 read_unlock(&tasklist_lock); 1192 return -ESRCH; 1193 } 1194 1195 get_task_struct(tsk); 1196 read_unlock(&tasklist_lock); 1197 1198 if ((current->euid) && (current->euid != tsk->uid) 1199 && (current->euid != tsk->suid)) { 1200 put_task_struct(tsk); 1201 return -EACCES; 1202 } 1203 } else { 1204 tsk = current; 1205 get_task_struct(tsk); 1206 } 1207 1208 mutex_lock(&callback_mutex); 1209 1210 task_lock(tsk); 1211 oldcs = tsk->cpuset; 1212 if (!oldcs) { 1213 task_unlock(tsk); 1214 mutex_unlock(&callback_mutex); 1215 put_task_struct(tsk); 1216 return -ESRCH; 1217 } 1218 atomic_inc(&cs->count); 1219 rcu_assign_pointer(tsk->cpuset, cs); 1220 task_unlock(tsk); 1221 1222 guarantee_online_cpus(cs, &cpus); 1223 set_cpus_allowed(tsk, cpus); 1224 1225 from = oldcs->mems_allowed; 1226 to = cs->mems_allowed; 1227 1228 mutex_unlock(&callback_mutex); 1229 1230 mm = get_task_mm(tsk); 1231 if (mm) { 1232 mpol_rebind_mm(mm, &to); 1233 if (is_memory_migrate(cs)) 1234 cpuset_migrate_mm(mm, &from, &to); 1235 mmput(mm); 1236 } 1237 1238 put_task_struct(tsk); 1239 synchronize_rcu(); 1240 if (atomic_dec_and_test(&oldcs->count)) 1241 check_for_release(oldcs, ppathbuf); 1242 return 0; 1243} 1244 1245/* The various types of files and directories in a cpuset file system */ 1246 1247typedef enum { 1248 FILE_ROOT, 1249 FILE_DIR, 1250 FILE_MEMORY_MIGRATE, 1251 FILE_CPULIST, 1252 FILE_MEMLIST, 1253 FILE_CPU_EXCLUSIVE, 1254 FILE_MEM_EXCLUSIVE, 1255 FILE_NOTIFY_ON_RELEASE, 1256 FILE_MEMORY_PRESSURE_ENABLED, 1257 FILE_MEMORY_PRESSURE, 1258 FILE_SPREAD_PAGE, 1259 FILE_SPREAD_SLAB, 1260 FILE_TASKLIST, 1261} cpuset_filetype_t; 1262 1263static ssize_t cpuset_common_file_write(struct file *file, const char __user *userbuf, 1264 size_t nbytes, loff_t *unused_ppos) 1265{ 1266 struct cpuset *cs = __d_cs(file->f_dentry->d_parent); 1267 struct cftype *cft = __d_cft(file->f_dentry); 1268 cpuset_filetype_t type = cft->private; 1269 char *buffer; 1270 char *pathbuf = NULL; 1271 int retval = 0; 1272 1273 /* Crude upper limit on largest legitimate cpulist user might write. */ 1274 if (nbytes > 100 + 6 * NR_CPUS) 1275 return -E2BIG; 1276 1277 /* +1 for nul-terminator */ 1278 if ((buffer = kmalloc(nbytes + 1, GFP_KERNEL)) == 0) 1279 return -ENOMEM; 1280 1281 if (copy_from_user(buffer, userbuf, nbytes)) { 1282 retval = -EFAULT; 1283 goto out1; 1284 } 1285 buffer[nbytes] = 0; /* nul-terminate */ 1286 1287 mutex_lock(&manage_mutex); 1288 1289 if (is_removed(cs)) { 1290 retval = -ENODEV; 1291 goto out2; 1292 } 1293 1294 switch (type) { 1295 case FILE_CPULIST: 1296 retval = update_cpumask(cs, buffer); 1297 break; 1298 case FILE_MEMLIST: 1299 retval = update_nodemask(cs, buffer); 1300 break; 1301 case FILE_CPU_EXCLUSIVE: 1302 retval = update_flag(CS_CPU_EXCLUSIVE, cs, buffer); 1303 break; 1304 case FILE_MEM_EXCLUSIVE: 1305 retval = update_flag(CS_MEM_EXCLUSIVE, cs, buffer); 1306 break; 1307 case FILE_NOTIFY_ON_RELEASE: 1308 retval = update_flag(CS_NOTIFY_ON_RELEASE, cs, buffer); 1309 break; 1310 case FILE_MEMORY_MIGRATE: 1311 retval = update_flag(CS_MEMORY_MIGRATE, cs, buffer); 1312 break; 1313 case FILE_MEMORY_PRESSURE_ENABLED: 1314 retval = update_memory_pressure_enabled(cs, buffer); 1315 break; 1316 case FILE_MEMORY_PRESSURE: 1317 retval = -EACCES; 1318 break; 1319 case FILE_SPREAD_PAGE: 1320 retval = update_flag(CS_SPREAD_PAGE, cs, buffer); 1321 cs->mems_generation = cpuset_mems_generation++; 1322 break; 1323 case FILE_SPREAD_SLAB: 1324 retval = update_flag(CS_SPREAD_SLAB, cs, buffer); 1325 cs->mems_generation = cpuset_mems_generation++; 1326 break; 1327 case FILE_TASKLIST: 1328 retval = attach_task(cs, buffer, &pathbuf); 1329 break; 1330 default: 1331 retval = -EINVAL; 1332 goto out2; 1333 } 1334 1335 if (retval == 0) 1336 retval = nbytes; 1337out2: 1338 mutex_unlock(&manage_mutex); 1339 cpuset_release_agent(pathbuf); 1340out1: 1341 kfree(buffer); 1342 return retval; 1343} 1344 1345static ssize_t cpuset_file_write(struct file *file, const char __user *buf, 1346 size_t nbytes, loff_t *ppos) 1347{ 1348 ssize_t retval = 0; 1349 struct cftype *cft = __d_cft(file->f_dentry); 1350 if (!cft) 1351 return -ENODEV; 1352 1353 /* special function ? */ 1354 if (cft->write) 1355 retval = cft->write(file, buf, nbytes, ppos); 1356 else 1357 retval = cpuset_common_file_write(file, buf, nbytes, ppos); 1358 1359 return retval; 1360} 1361 1362/* 1363 * These ascii lists should be read in a single call, by using a user 1364 * buffer large enough to hold the entire map. If read in smaller 1365 * chunks, there is no guarantee of atomicity. Since the display format 1366 * used, list of ranges of sequential numbers, is variable length, 1367 * and since these maps can change value dynamically, one could read 1368 * gibberish by doing partial reads while a list was changing. 1369 * A single large read to a buffer that crosses a page boundary is 1370 * ok, because the result being copied to user land is not recomputed 1371 * across a page fault. 1372 */ 1373 1374static int cpuset_sprintf_cpulist(char *page, struct cpuset *cs) 1375{ 1376 cpumask_t mask; 1377 1378 mutex_lock(&callback_mutex); 1379 mask = cs->cpus_allowed; 1380 mutex_unlock(&callback_mutex); 1381 1382 return cpulist_scnprintf(page, PAGE_SIZE, mask); 1383} 1384 1385static int cpuset_sprintf_memlist(char *page, struct cpuset *cs) 1386{ 1387 nodemask_t mask; 1388 1389 mutex_lock(&callback_mutex); 1390 mask = cs->mems_allowed; 1391 mutex_unlock(&callback_mutex); 1392 1393 return nodelist_scnprintf(page, PAGE_SIZE, mask); 1394} 1395 1396static ssize_t cpuset_common_file_read(struct file *file, char __user *buf, 1397 size_t nbytes, loff_t *ppos) 1398{ 1399 struct cftype *cft = __d_cft(file->f_dentry); 1400 struct cpuset *cs = __d_cs(file->f_dentry->d_parent); 1401 cpuset_filetype_t type = cft->private; 1402 char *page; 1403 ssize_t retval = 0; 1404 char *s; 1405 1406 if (!(page = (char *)__get_free_page(GFP_KERNEL))) 1407 return -ENOMEM; 1408 1409 s = page; 1410 1411 switch (type) { 1412 case FILE_CPULIST: 1413 s += cpuset_sprintf_cpulist(s, cs); 1414 break; 1415 case FILE_MEMLIST: 1416 s += cpuset_sprintf_memlist(s, cs); 1417 break; 1418 case FILE_CPU_EXCLUSIVE: 1419 *s++ = is_cpu_exclusive(cs) ? '1' : '0'; 1420 break; 1421 case FILE_MEM_EXCLUSIVE: 1422 *s++ = is_mem_exclusive(cs) ? '1' : '0'; 1423 break; 1424 case FILE_NOTIFY_ON_RELEASE: 1425 *s++ = notify_on_release(cs) ? '1' : '0'; 1426 break; 1427 case FILE_MEMORY_MIGRATE: 1428 *s++ = is_memory_migrate(cs) ? '1' : '0'; 1429 break; 1430 case FILE_MEMORY_PRESSURE_ENABLED: 1431 *s++ = cpuset_memory_pressure_enabled ? '1' : '0'; 1432 break; 1433 case FILE_MEMORY_PRESSURE: 1434 s += sprintf(s, "%d", fmeter_getrate(&cs->fmeter)); 1435 break; 1436 case FILE_SPREAD_PAGE: 1437 *s++ = is_spread_page(cs) ? '1' : '0'; 1438 break; 1439 case FILE_SPREAD_SLAB: 1440 *s++ = is_spread_slab(cs) ? '1' : '0'; 1441 break; 1442 default: 1443 retval = -EINVAL; 1444 goto out; 1445 } 1446 *s++ = '\n'; 1447 1448 retval = simple_read_from_buffer(buf, nbytes, ppos, page, s - page); 1449out: 1450 free_page((unsigned long)page); 1451 return retval; 1452} 1453 1454static ssize_t cpuset_file_read(struct file *file, char __user *buf, size_t nbytes, 1455 loff_t *ppos) 1456{ 1457 ssize_t retval = 0; 1458 struct cftype *cft = __d_cft(file->f_dentry); 1459 if (!cft) 1460 return -ENODEV; 1461 1462 /* special function ? */ 1463 if (cft->read) 1464 retval = cft->read(file, buf, nbytes, ppos); 1465 else 1466 retval = cpuset_common_file_read(file, buf, nbytes, ppos); 1467 1468 return retval; 1469} 1470 1471static int cpuset_file_open(struct inode *inode, struct file *file) 1472{ 1473 int err; 1474 struct cftype *cft; 1475 1476 err = generic_file_open(inode, file); 1477 if (err) 1478 return err; 1479 1480 cft = __d_cft(file->f_dentry); 1481 if (!cft) 1482 return -ENODEV; 1483 if (cft->open) 1484 err = cft->open(inode, file); 1485 else 1486 err = 0; 1487 1488 return err; 1489} 1490 1491static int cpuset_file_release(struct inode *inode, struct file *file) 1492{ 1493 struct cftype *cft = __d_cft(file->f_dentry); 1494 if (cft->release) 1495 return cft->release(inode, file); 1496 return 0; 1497} 1498 1499/* 1500 * cpuset_rename - Only allow simple rename of directories in place. 1501 */ 1502static int cpuset_rename(struct inode *old_dir, struct dentry *old_dentry, 1503 struct inode *new_dir, struct dentry *new_dentry) 1504{ 1505 if (!S_ISDIR(old_dentry->d_inode->i_mode)) 1506 return -ENOTDIR; 1507 if (new_dentry->d_inode) 1508 return -EEXIST; 1509 if (old_dir != new_dir) 1510 return -EIO; 1511 return simple_rename(old_dir, old_dentry, new_dir, new_dentry); 1512} 1513 1514static struct file_operations cpuset_file_operations = { 1515 .read = cpuset_file_read, 1516 .write = cpuset_file_write, 1517 .llseek = generic_file_llseek, 1518 .open = cpuset_file_open, 1519 .release = cpuset_file_release, 1520}; 1521 1522static struct inode_operations cpuset_dir_inode_operations = { 1523 .lookup = simple_lookup, 1524 .mkdir = cpuset_mkdir, 1525 .rmdir = cpuset_rmdir, 1526 .rename = cpuset_rename, 1527}; 1528 1529static int cpuset_create_file(struct dentry *dentry, int mode) 1530{ 1531 struct inode *inode; 1532 1533 if (!dentry) 1534 return -ENOENT; 1535 if (dentry->d_inode) 1536 return -EEXIST; 1537 1538 inode = cpuset_new_inode(mode); 1539 if (!inode) 1540 return -ENOMEM; 1541 1542 if (S_ISDIR(mode)) { 1543 inode->i_op = &cpuset_dir_inode_operations; 1544 inode->i_fop = &simple_dir_operations; 1545 1546 /* start off with i_nlink == 2 (for "." entry) */ 1547 inode->i_nlink++; 1548 } else if (S_ISREG(mode)) { 1549 inode->i_size = 0; 1550 inode->i_fop = &cpuset_file_operations; 1551 } 1552 1553 d_instantiate(dentry, inode); 1554 dget(dentry); /* Extra count - pin the dentry in core */ 1555 return 0; 1556} 1557 1558/* 1559 * cpuset_create_dir - create a directory for an object. 1560 * cs: the cpuset we create the directory for. 1561 * It must have a valid ->parent field 1562 * And we are going to fill its ->dentry field. 1563 * name: The name to give to the cpuset directory. Will be copied. 1564 * mode: mode to set on new directory. 1565 */ 1566 1567static int cpuset_create_dir(struct cpuset *cs, const char *name, int mode) 1568{ 1569 struct dentry *dentry = NULL; 1570 struct dentry *parent; 1571 int error = 0; 1572 1573 parent = cs->parent->dentry; 1574 dentry = cpuset_get_dentry(parent, name); 1575 if (IS_ERR(dentry)) 1576 return PTR_ERR(dentry); 1577 error = cpuset_create_file(dentry, S_IFDIR | mode); 1578 if (!error) { 1579 dentry->d_fsdata = cs; 1580 parent->d_inode->i_nlink++; 1581 cs->dentry = dentry; 1582 } 1583 dput(dentry); 1584 1585 return error; 1586} 1587 1588static int cpuset_add_file(struct dentry *dir, const struct cftype *cft) 1589{ 1590 struct dentry *dentry; 1591 int error; 1592 1593 mutex_lock(&dir->d_inode->i_mutex); 1594 dentry = cpuset_get_dentry(dir, cft->name); 1595 if (!IS_ERR(dentry)) { 1596 error = cpuset_create_file(dentry, 0644 | S_IFREG); 1597 if (!error) 1598 dentry->d_fsdata = (void *)cft; 1599 dput(dentry); 1600 } else 1601 error = PTR_ERR(dentry); 1602 mutex_unlock(&dir->d_inode->i_mutex); 1603 return error; 1604} 1605 1606/* 1607 * Stuff for reading the 'tasks' file. 1608 * 1609 * Reading this file can return large amounts of data if a cpuset has 1610 * *lots* of attached tasks. So it may need several calls to read(), 1611 * but we cannot guarantee that the information we produce is correct 1612 * unless we produce it entirely atomically. 1613 * 1614 * Upon tasks file open(), a struct ctr_struct is allocated, that 1615 * will have a pointer to an array (also allocated here). The struct 1616 * ctr_struct * is stored in file->private_data. Its resources will 1617 * be freed by release() when the file is closed. The array is used 1618 * to sprintf the PIDs and then used by read(). 1619 */ 1620 1621/* cpusets_tasks_read array */ 1622 1623struct ctr_struct { 1624 char *buf; 1625 int bufsz; 1626}; 1627 1628/* 1629 * Load into 'pidarray' up to 'npids' of the tasks using cpuset 'cs'. 1630 * Return actual number of pids loaded. No need to task_lock(p) 1631 * when reading out p->cpuset, as we don't really care if it changes 1632 * on the next cycle, and we are not going to try to dereference it. 1633 */ 1634static int pid_array_load(pid_t *pidarray, int npids, struct cpuset *cs) 1635{ 1636 int n = 0; 1637 struct task_struct *g, *p; 1638 1639 read_lock(&tasklist_lock); 1640 1641 do_each_thread(g, p) { 1642 if (p->cpuset == cs) { 1643 pidarray[n++] = p->pid; 1644 if (unlikely(n == npids)) 1645 goto array_full; 1646 } 1647 } while_each_thread(g, p); 1648 1649array_full: 1650 read_unlock(&tasklist_lock); 1651 return n; 1652} 1653 1654static int cmppid(const void *a, const void *b) 1655{ 1656 return *(pid_t *)a - *(pid_t *)b; 1657} 1658 1659/* 1660 * Convert array 'a' of 'npids' pid_t's to a string of newline separated 1661 * decimal pids in 'buf'. Don't write more than 'sz' chars, but return 1662 * count 'cnt' of how many chars would be written if buf were large enough. 1663 */ 1664static int pid_array_to_buf(char *buf, int sz, pid_t *a, int npids) 1665{ 1666 int cnt = 0; 1667 int i; 1668 1669 for (i = 0; i < npids; i++) 1670 cnt += snprintf(buf + cnt, max(sz - cnt, 0), "%d\n", a[i]); 1671 return cnt; 1672} 1673 1674/* 1675 * Handle an open on 'tasks' file. Prepare a buffer listing the 1676 * process id's of tasks currently attached to the cpuset being opened. 1677 * 1678 * Does not require any specific cpuset mutexes, and does not take any. 1679 */ 1680static int cpuset_tasks_open(struct inode *unused, struct file *file) 1681{ 1682 struct cpuset *cs = __d_cs(file->f_dentry->d_parent); 1683 struct ctr_struct *ctr; 1684 pid_t *pidarray; 1685 int npids; 1686 char c; 1687 1688 if (!(file->f_mode & FMODE_READ)) 1689 return 0; 1690 1691 ctr = kmalloc(sizeof(*ctr), GFP_KERNEL); 1692 if (!ctr) 1693 goto err0; 1694 1695 /* 1696 * If cpuset gets more users after we read count, we won't have 1697 * enough space - tough. This race is indistinguishable to the 1698 * caller from the case that the additional cpuset users didn't 1699 * show up until sometime later on. 1700 */ 1701 npids = atomic_read(&cs->count); 1702 pidarray = kmalloc(npids * sizeof(pid_t), GFP_KERNEL); 1703 if (!pidarray) 1704 goto err1; 1705 1706 npids = pid_array_load(pidarray, npids, cs); 1707 sort(pidarray, npids, sizeof(pid_t), cmppid, NULL); 1708 1709 /* Call pid_array_to_buf() twice, first just to get bufsz */ 1710 ctr->bufsz = pid_array_to_buf(&c, sizeof(c), pidarray, npids) + 1; 1711 ctr->buf = kmalloc(ctr->bufsz, GFP_KERNEL); 1712 if (!ctr->buf) 1713 goto err2; 1714 ctr->bufsz = pid_array_to_buf(ctr->buf, ctr->bufsz, pidarray, npids); 1715 1716 kfree(pidarray); 1717 file->private_data = ctr; 1718 return 0; 1719 1720err2: 1721 kfree(pidarray); 1722err1: 1723 kfree(ctr); 1724err0: 1725 return -ENOMEM; 1726} 1727 1728static ssize_t cpuset_tasks_read(struct file *file, char __user *buf, 1729 size_t nbytes, loff_t *ppos) 1730{ 1731 struct ctr_struct *ctr = file->private_data; 1732 1733 if (*ppos + nbytes > ctr->bufsz) 1734 nbytes = ctr->bufsz - *ppos; 1735 if (copy_to_user(buf, ctr->buf + *ppos, nbytes)) 1736 return -EFAULT; 1737 *ppos += nbytes; 1738 return nbytes; 1739} 1740 1741static int cpuset_tasks_release(struct inode *unused_inode, struct file *file) 1742{ 1743 struct ctr_struct *ctr; 1744 1745 if (file->f_mode & FMODE_READ) { 1746 ctr = file->private_data; 1747 kfree(ctr->buf); 1748 kfree(ctr); 1749 } 1750 return 0; 1751} 1752 1753/* 1754 * for the common functions, 'private' gives the type of file 1755 */ 1756 1757static struct cftype cft_tasks = { 1758 .name = "tasks", 1759 .open = cpuset_tasks_open, 1760 .read = cpuset_tasks_read, 1761 .release = cpuset_tasks_release, 1762 .private = FILE_TASKLIST, 1763}; 1764 1765static struct cftype cft_cpus = { 1766 .name = "cpus", 1767 .private = FILE_CPULIST, 1768}; 1769 1770static struct cftype cft_mems = { 1771 .name = "mems", 1772 .private = FILE_MEMLIST, 1773}; 1774 1775static struct cftype cft_cpu_exclusive = { 1776 .name = "cpu_exclusive", 1777 .private = FILE_CPU_EXCLUSIVE, 1778}; 1779 1780static struct cftype cft_mem_exclusive = { 1781 .name = "mem_exclusive", 1782 .private = FILE_MEM_EXCLUSIVE, 1783}; 1784 1785static struct cftype cft_notify_on_release = { 1786 .name = "notify_on_release", 1787 .private = FILE_NOTIFY_ON_RELEASE, 1788}; 1789 1790static struct cftype cft_memory_migrate = { 1791 .name = "memory_migrate", 1792 .private = FILE_MEMORY_MIGRATE, 1793}; 1794 1795static struct cftype cft_memory_pressure_enabled = { 1796 .name = "memory_pressure_enabled", 1797 .private = FILE_MEMORY_PRESSURE_ENABLED, 1798}; 1799 1800static struct cftype cft_memory_pressure = { 1801 .name = "memory_pressure", 1802 .private = FILE_MEMORY_PRESSURE, 1803}; 1804 1805static struct cftype cft_spread_page = { 1806 .name = "memory_spread_page", 1807 .private = FILE_SPREAD_PAGE, 1808}; 1809 1810static struct cftype cft_spread_slab = { 1811 .name = "memory_spread_slab", 1812 .private = FILE_SPREAD_SLAB, 1813}; 1814 1815static int cpuset_populate_dir(struct dentry *cs_dentry) 1816{ 1817 int err; 1818 1819 if ((err = cpuset_add_file(cs_dentry, &cft_cpus)) < 0) 1820 return err; 1821 if ((err = cpuset_add_file(cs_dentry, &cft_mems)) < 0) 1822 return err; 1823 if ((err = cpuset_add_file(cs_dentry, &cft_cpu_exclusive)) < 0) 1824 return err; 1825 if ((err = cpuset_add_file(cs_dentry, &cft_mem_exclusive)) < 0) 1826 return err; 1827 if ((err = cpuset_add_file(cs_dentry, &cft_notify_on_release)) < 0) 1828 return err; 1829 if ((err = cpuset_add_file(cs_dentry, &cft_memory_migrate)) < 0) 1830 return err; 1831 if ((err = cpuset_add_file(cs_dentry, &cft_memory_pressure)) < 0) 1832 return err; 1833 if ((err = cpuset_add_file(cs_dentry, &cft_spread_page)) < 0) 1834 return err; 1835 if ((err = cpuset_add_file(cs_dentry, &cft_spread_slab)) < 0) 1836 return err; 1837 if ((err = cpuset_add_file(cs_dentry, &cft_tasks)) < 0) 1838 return err; 1839 return 0; 1840} 1841 1842/* 1843 * cpuset_create - create a cpuset 1844 * parent: cpuset that will be parent of the new cpuset. 1845 * name: name of the new cpuset. Will be strcpy'ed. 1846 * mode: mode to set on new inode 1847 * 1848 * Must be called with the mutex on the parent inode held 1849 */ 1850 1851static long cpuset_create(struct cpuset *parent, const char *name, int mode) 1852{ 1853 struct cpuset *cs; 1854 int err; 1855 1856 cs = kmalloc(sizeof(*cs), GFP_KERNEL); 1857 if (!cs) 1858 return -ENOMEM; 1859 1860 mutex_lock(&manage_mutex); 1861 cpuset_update_task_memory_state(); 1862 cs->flags = 0; 1863 if (notify_on_release(parent)) 1864 set_bit(CS_NOTIFY_ON_RELEASE, &cs->flags); 1865 if (is_spread_page(parent)) 1866 set_bit(CS_SPREAD_PAGE, &cs->flags); 1867 if (is_spread_slab(parent)) 1868 set_bit(CS_SPREAD_SLAB, &cs->flags); 1869 cs->cpus_allowed = CPU_MASK_NONE; 1870 cs->mems_allowed = NODE_MASK_NONE; 1871 atomic_set(&cs->count, 0); 1872 INIT_LIST_HEAD(&cs->sibling); 1873 INIT_LIST_HEAD(&cs->children); 1874 cs->mems_generation = cpuset_mems_generation++; 1875 fmeter_init(&cs->fmeter); 1876 1877 cs->parent = parent; 1878 1879 mutex_lock(&callback_mutex); 1880 list_add(&cs->sibling, &cs->parent->children); 1881 number_of_cpusets++; 1882 mutex_unlock(&callback_mutex); 1883 1884 err = cpuset_create_dir(cs, name, mode); 1885 if (err < 0) 1886 goto err; 1887 1888 /* 1889 * Release manage_mutex before cpuset_populate_dir() because it 1890 * will down() this new directory's i_mutex and if we race with 1891 * another mkdir, we might deadlock. 1892 */ 1893 mutex_unlock(&manage_mutex); 1894 1895 err = cpuset_populate_dir(cs->dentry); 1896 /* If err < 0, we have a half-filled directory - oh well ;) */ 1897 return 0; 1898err: 1899 list_del(&cs->sibling); 1900 mutex_unlock(&manage_mutex); 1901 kfree(cs); 1902 return err; 1903} 1904 1905static int cpuset_mkdir(struct inode *dir, struct dentry *dentry, int mode) 1906{ 1907 struct cpuset *c_parent = dentry->d_parent->d_fsdata; 1908 1909 /* the vfs holds inode->i_mutex already */ 1910 return cpuset_create(c_parent, dentry->d_name.name, mode | S_IFDIR); 1911} 1912 1913static int cpuset_rmdir(struct inode *unused_dir, struct dentry *dentry) 1914{ 1915 struct cpuset *cs = dentry->d_fsdata; 1916 struct dentry *d; 1917 struct cpuset *parent; 1918 char *pathbuf = NULL; 1919 1920 /* the vfs holds both inode->i_mutex already */ 1921 1922 mutex_lock(&manage_mutex); 1923 cpuset_update_task_memory_state(); 1924 if (atomic_read(&cs->count) > 0) { 1925 mutex_unlock(&manage_mutex); 1926 return -EBUSY; 1927 } 1928 if (!list_empty(&cs->children)) { 1929 mutex_unlock(&manage_mutex); 1930 return -EBUSY; 1931 } 1932 parent = cs->parent; 1933 mutex_lock(&callback_mutex); 1934 set_bit(CS_REMOVED, &cs->flags); 1935 if (is_cpu_exclusive(cs)) 1936 update_cpu_domains(cs); 1937 list_del(&cs->sibling); /* delete my sibling from parent->children */ 1938 spin_lock(&cs->dentry->d_lock); 1939 d = dget(cs->dentry); 1940 cs->dentry = NULL; 1941 spin_unlock(&d->d_lock); 1942 cpuset_d_remove_dir(d); 1943 dput(d); 1944 number_of_cpusets--; 1945 mutex_unlock(&callback_mutex); 1946 if (list_empty(&parent->children)) 1947 check_for_release(parent, &pathbuf); 1948 mutex_unlock(&manage_mutex); 1949 cpuset_release_agent(pathbuf); 1950 return 0; 1951} 1952 1953/* 1954 * cpuset_init_early - just enough so that the calls to 1955 * cpuset_update_task_memory_state() in early init code 1956 * are harmless. 1957 */ 1958 1959int __init cpuset_init_early(void) 1960{ 1961 struct task_struct *tsk = current; 1962 1963 tsk->cpuset = &top_cpuset; 1964 tsk->cpuset->mems_generation = cpuset_mems_generation++; 1965 return 0; 1966} 1967 1968/** 1969 * cpuset_init - initialize cpusets at system boot 1970 * 1971 * Description: Initialize top_cpuset and the cpuset internal file system, 1972 **/ 1973 1974int __init cpuset_init(void) 1975{ 1976 struct dentry *root; 1977 int err; 1978 1979 top_cpuset.cpus_allowed = CPU_MASK_ALL; 1980 top_cpuset.mems_allowed = NODE_MASK_ALL; 1981 1982 fmeter_init(&top_cpuset.fmeter); 1983 top_cpuset.mems_generation = cpuset_mems_generation++; 1984 1985 init_task.cpuset = &top_cpuset; 1986 1987 err = register_filesystem(&cpuset_fs_type); 1988 if (err < 0) 1989 goto out; 1990 cpuset_mount = kern_mount(&cpuset_fs_type); 1991 if (IS_ERR(cpuset_mount)) { 1992 printk(KERN_ERR "cpuset: could not mount!\n"); 1993 err = PTR_ERR(cpuset_mount); 1994 cpuset_mount = NULL; 1995 goto out; 1996 } 1997 root = cpuset_mount->mnt_sb->s_root; 1998 root->d_fsdata = &top_cpuset; 1999 root->d_inode->i_nlink++; 2000 top_cpuset.dentry = root; 2001 root->d_inode->i_op = &cpuset_dir_inode_operations; 2002 number_of_cpusets = 1; 2003 err = cpuset_populate_dir(root); 2004 /* memory_pressure_enabled is in root cpuset only */ 2005 if (err == 0) 2006 err = cpuset_add_file(root, &cft_memory_pressure_enabled); 2007out: 2008 return err; 2009} 2010 2011/** 2012 * cpuset_init_smp - initialize cpus_allowed 2013 * 2014 * Description: Finish top cpuset after cpu, node maps are initialized 2015 **/ 2016 2017void __init cpuset_init_smp(void) 2018{ 2019 top_cpuset.cpus_allowed = cpu_online_map; 2020 top_cpuset.mems_allowed = node_online_map; 2021} 2022 2023/** 2024 * cpuset_fork - attach newly forked task to its parents cpuset. 2025 * @tsk: pointer to task_struct of forking parent process. 2026 * 2027 * Description: A task inherits its parent's cpuset at fork(). 2028 * 2029 * A pointer to the shared cpuset was automatically copied in fork.c 2030 * by dup_task_struct(). However, we ignore that copy, since it was 2031 * not made under the protection of task_lock(), so might no longer be 2032 * a valid cpuset pointer. attach_task() might have already changed 2033 * current->cpuset, allowing the previously referenced cpuset to 2034 * be removed and freed. Instead, we task_lock(current) and copy 2035 * its present value of current->cpuset for our freshly forked child. 2036 * 2037 * At the point that cpuset_fork() is called, 'current' is the parent 2038 * task, and the passed argument 'child' points to the child task. 2039 **/ 2040 2041void cpuset_fork(struct task_struct *child) 2042{ 2043 task_lock(current); 2044 child->cpuset = current->cpuset; 2045 atomic_inc(&child->cpuset->count); 2046 task_unlock(current); 2047} 2048 2049/** 2050 * cpuset_exit - detach cpuset from exiting task 2051 * @tsk: pointer to task_struct of exiting process 2052 * 2053 * Description: Detach cpuset from @tsk and release it. 2054 * 2055 * Note that cpusets marked notify_on_release force every task in 2056 * them to take the global manage_mutex mutex when exiting. 2057 * This could impact scaling on very large systems. Be reluctant to 2058 * use notify_on_release cpusets where very high task exit scaling 2059 * is required on large systems. 2060 * 2061 * Don't even think about derefencing 'cs' after the cpuset use count 2062 * goes to zero, except inside a critical section guarded by manage_mutex 2063 * or callback_mutex. Otherwise a zero cpuset use count is a license to 2064 * any other task to nuke the cpuset immediately, via cpuset_rmdir(). 2065 * 2066 * This routine has to take manage_mutex, not callback_mutex, because 2067 * it is holding that mutex while calling check_for_release(), 2068 * which calls kmalloc(), so can't be called holding callback_mutex(). 2069 * 2070 * We don't need to task_lock() this reference to tsk->cpuset, 2071 * because tsk is already marked PF_EXITING, so attach_task() won't 2072 * mess with it, or task is a failed fork, never visible to attach_task. 2073 * 2074 * the_top_cpuset_hack: 2075 * 2076 * Set the exiting tasks cpuset to the root cpuset (top_cpuset). 2077 * 2078 * Don't leave a task unable to allocate memory, as that is an 2079 * accident waiting to happen should someone add a callout in 2080 * do_exit() after the cpuset_exit() call that might allocate. 2081 * If a task tries to allocate memory with an invalid cpuset, 2082 * it will oops in cpuset_update_task_memory_state(). 2083 * 2084 * We call cpuset_exit() while the task is still competent to 2085 * handle notify_on_release(), then leave the task attached to 2086 * the root cpuset (top_cpuset) for the remainder of its exit. 2087 * 2088 * To do this properly, we would increment the reference count on 2089 * top_cpuset, and near the very end of the kernel/exit.c do_exit() 2090 * code we would add a second cpuset function call, to drop that 2091 * reference. This would just create an unnecessary hot spot on 2092 * the top_cpuset reference count, to no avail. 2093 * 2094 * Normally, holding a reference to a cpuset without bumping its 2095 * count is unsafe. The cpuset could go away, or someone could 2096 * attach us to a different cpuset, decrementing the count on 2097 * the first cpuset that we never incremented. But in this case, 2098 * top_cpuset isn't going away, and either task has PF_EXITING set, 2099 * which wards off any attach_task() attempts, or task is a failed 2100 * fork, never visible to attach_task. 2101 * 2102 * Another way to do this would be to set the cpuset pointer 2103 * to NULL here, and check in cpuset_update_task_memory_state() 2104 * for a NULL pointer. This hack avoids that NULL check, for no 2105 * cost (other than this way too long comment ;). 2106 **/ 2107 2108void cpuset_exit(struct task_struct *tsk) 2109{ 2110 struct cpuset *cs; 2111 2112 cs = tsk->cpuset; 2113 tsk->cpuset = &top_cpuset; /* the_top_cpuset_hack - see above */ 2114 2115 if (notify_on_release(cs)) { 2116 char *pathbuf = NULL; 2117 2118 mutex_lock(&manage_mutex); 2119 if (atomic_dec_and_test(&cs->count)) 2120 check_for_release(cs, &pathbuf); 2121 mutex_unlock(&manage_mutex); 2122 cpuset_release_agent(pathbuf); 2123 } else { 2124 atomic_dec(&cs->count); 2125 } 2126} 2127 2128/** 2129 * cpuset_cpus_allowed - return cpus_allowed mask from a tasks cpuset. 2130 * @tsk: pointer to task_struct from which to obtain cpuset->cpus_allowed. 2131 * 2132 * Description: Returns the cpumask_t cpus_allowed of the cpuset 2133 * attached to the specified @tsk. Guaranteed to return some non-empty 2134 * subset of cpu_online_map, even if this means going outside the 2135 * tasks cpuset. 2136 **/ 2137 2138cpumask_t cpuset_cpus_allowed(struct task_struct *tsk) 2139{ 2140 cpumask_t mask; 2141 2142 mutex_lock(&callback_mutex); 2143 task_lock(tsk); 2144 guarantee_online_cpus(tsk->cpuset, &mask); 2145 task_unlock(tsk); 2146 mutex_unlock(&callback_mutex); 2147 2148 return mask; 2149} 2150 2151void cpuset_init_current_mems_allowed(void) 2152{ 2153 current->mems_allowed = NODE_MASK_ALL; 2154} 2155 2156/** 2157 * cpuset_mems_allowed - return mems_allowed mask from a tasks cpuset. 2158 * @tsk: pointer to task_struct from which to obtain cpuset->mems_allowed. 2159 * 2160 * Description: Returns the nodemask_t mems_allowed of the cpuset 2161 * attached to the specified @tsk. Guaranteed to return some non-empty 2162 * subset of node_online_map, even if this means going outside the 2163 * tasks cpuset. 2164 **/ 2165 2166nodemask_t cpuset_mems_allowed(struct task_struct *tsk) 2167{ 2168 nodemask_t mask; 2169 2170 mutex_lock(&callback_mutex); 2171 task_lock(tsk); 2172 guarantee_online_mems(tsk->cpuset, &mask); 2173 task_unlock(tsk); 2174 mutex_unlock(&callback_mutex); 2175 2176 return mask; 2177} 2178 2179/** 2180 * cpuset_zonelist_valid_mems_allowed - check zonelist vs. curremt mems_allowed 2181 * @zl: the zonelist to be checked 2182 * 2183 * Are any of the nodes on zonelist zl allowed in current->mems_allowed? 2184 */ 2185int cpuset_zonelist_valid_mems_allowed(struct zonelist *zl) 2186{ 2187 int i; 2188 2189 for (i = 0; zl->zones[i]; i++) { 2190 int nid = zl->zones[i]->zone_pgdat->node_id; 2191 2192 if (node_isset(nid, current->mems_allowed)) 2193 return 1; 2194 } 2195 return 0; 2196} 2197 2198/* 2199 * nearest_exclusive_ancestor() - Returns the nearest mem_exclusive 2200 * ancestor to the specified cpuset. Call holding callback_mutex. 2201 * If no ancestor is mem_exclusive (an unusual configuration), then 2202 * returns the root cpuset. 2203 */ 2204static const struct cpuset *nearest_exclusive_ancestor(const struct cpuset *cs) 2205{ 2206 while (!is_mem_exclusive(cs) && cs->parent) 2207 cs = cs->parent; 2208 return cs; 2209} 2210 2211/** 2212 * cpuset_zone_allowed - Can we allocate memory on zone z's memory node? 2213 * @z: is this zone on an allowed node? 2214 * @gfp_mask: memory allocation flags (we use __GFP_HARDWALL) 2215 * 2216 * If we're in interrupt, yes, we can always allocate. If zone 2217 * z's node is in our tasks mems_allowed, yes. If it's not a 2218 * __GFP_HARDWALL request and this zone's nodes is in the nearest 2219 * mem_exclusive cpuset ancestor to this tasks cpuset, yes. 2220 * Otherwise, no. 2221 * 2222 * GFP_USER allocations are marked with the __GFP_HARDWALL bit, 2223 * and do not allow allocations outside the current tasks cpuset. 2224 * GFP_KERNEL allocations are not so marked, so can escape to the 2225 * nearest mem_exclusive ancestor cpuset. 2226 * 2227 * Scanning up parent cpusets requires callback_mutex. The __alloc_pages() 2228 * routine only calls here with __GFP_HARDWALL bit _not_ set if 2229 * it's a GFP_KERNEL allocation, and all nodes in the current tasks 2230 * mems_allowed came up empty on the first pass over the zonelist. 2231 * So only GFP_KERNEL allocations, if all nodes in the cpuset are 2232 * short of memory, might require taking the callback_mutex mutex. 2233 * 2234 * The first loop over the zonelist in mm/page_alloc.c:__alloc_pages() 2235 * calls here with __GFP_HARDWALL always set in gfp_mask, enforcing 2236 * hardwall cpusets - no allocation on a node outside the cpuset is 2237 * allowed (unless in interrupt, of course). 2238 * 2239 * The second loop doesn't even call here for GFP_ATOMIC requests 2240 * (if the __alloc_pages() local variable 'wait' is set). That check 2241 * and the checks below have the combined affect in the second loop of 2242 * the __alloc_pages() routine that: 2243 * in_interrupt - any node ok (current task context irrelevant) 2244 * GFP_ATOMIC - any node ok 2245 * GFP_KERNEL - any node in enclosing mem_exclusive cpuset ok 2246 * GFP_USER - only nodes in current tasks mems allowed ok. 2247 **/ 2248 2249int __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) 2250{ 2251 int node; /* node that zone z is on */ 2252 const struct cpuset *cs; /* current cpuset ancestors */ 2253 int allowed; /* is allocation in zone z allowed? */ 2254 2255 if (in_interrupt()) 2256 return 1; 2257 node = z->zone_pgdat->node_id; 2258 if (node_isset(node, current->mems_allowed)) 2259 return 1; 2260 if (gfp_mask & __GFP_HARDWALL) /* If hardwall request, stop here */ 2261 return 0; 2262 2263 if (current->flags & PF_EXITING) /* Let dying task have memory */ 2264 return 1; 2265 2266 /* Not hardwall and node outside mems_allowed: scan up cpusets */ 2267 mutex_lock(&callback_mutex); 2268 2269 task_lock(current); 2270 cs = nearest_exclusive_ancestor(current->cpuset); 2271 task_unlock(current); 2272 2273 allowed = node_isset(node, cs->mems_allowed); 2274 mutex_unlock(&callback_mutex); 2275 return allowed; 2276} 2277 2278/** 2279 * cpuset_lock - lock out any changes to cpuset structures 2280 * 2281 * The out of memory (oom) code needs to mutex_lock cpusets 2282 * from being changed while it scans the tasklist looking for a 2283 * task in an overlapping cpuset. Expose callback_mutex via this 2284 * cpuset_lock() routine, so the oom code can lock it, before 2285 * locking the task list. The tasklist_lock is a spinlock, so 2286 * must be taken inside callback_mutex. 2287 */ 2288 2289void cpuset_lock(void) 2290{ 2291 mutex_lock(&callback_mutex); 2292} 2293 2294/** 2295 * cpuset_unlock - release lock on cpuset changes 2296 * 2297 * Undo the lock taken in a previous cpuset_lock() call. 2298 */ 2299 2300void cpuset_unlock(void) 2301{ 2302 mutex_unlock(&callback_mutex); 2303} 2304 2305/** 2306 * cpuset_mem_spread_node() - On which node to begin search for a page 2307 * 2308 * If a task is marked PF_SPREAD_PAGE or PF_SPREAD_SLAB (as for 2309 * tasks in a cpuset with is_spread_page or is_spread_slab set), 2310 * and if the memory allocation used cpuset_mem_spread_node() 2311 * to determine on which node to start looking, as it will for 2312 * certain page cache or slab cache pages such as used for file 2313 * system buffers and inode caches, then instead of starting on the 2314 * local node to look for a free page, rather spread the starting 2315 * node around the tasks mems_allowed nodes. 2316 * 2317 * We don't have to worry about the returned node being offline 2318 * because "it can't happen", and even if it did, it would be ok. 2319 * 2320 * The routines calling guarantee_online_mems() are careful to 2321 * only set nodes in task->mems_allowed that are online. So it 2322 * should not be possible for the following code to return an 2323 * offline node. But if it did, that would be ok, as this routine 2324 * is not returning the node where the allocation must be, only 2325 * the node where the search should start. The zonelist passed to 2326 * __alloc_pages() will include all nodes. If the slab allocator 2327 * is passed an offline node, it will fall back to the local node. 2328 * See kmem_cache_alloc_node(). 2329 */ 2330 2331int cpuset_mem_spread_node(void) 2332{ 2333 int node; 2334 2335 node = next_node(current->cpuset_mem_spread_rotor, current->mems_allowed); 2336 if (node == MAX_NUMNODES) 2337 node = first_node(current->mems_allowed); 2338 current->cpuset_mem_spread_rotor = node; 2339 return node; 2340} 2341EXPORT_SYMBOL_GPL(cpuset_mem_spread_node); 2342 2343/** 2344 * cpuset_excl_nodes_overlap - Do we overlap @p's mem_exclusive ancestors? 2345 * @p: pointer to task_struct of some other task. 2346 * 2347 * Description: Return true if the nearest mem_exclusive ancestor 2348 * cpusets of tasks @p and current overlap. Used by oom killer to 2349 * determine if task @p's memory usage might impact the memory 2350 * available to the current task. 2351 * 2352 * Call while holding callback_mutex. 2353 **/ 2354 2355int cpuset_excl_nodes_overlap(const struct task_struct *p) 2356{ 2357 const struct cpuset *cs1, *cs2; /* my and p's cpuset ancestors */ 2358 int overlap = 0; /* do cpusets overlap? */ 2359 2360 task_lock(current); 2361 if (current->flags & PF_EXITING) { 2362 task_unlock(current); 2363 goto done; 2364 } 2365 cs1 = nearest_exclusive_ancestor(current->cpuset); 2366 task_unlock(current); 2367 2368 task_lock((struct task_struct *)p); 2369 if (p->flags & PF_EXITING) { 2370 task_unlock((struct task_struct *)p); 2371 goto done; 2372 } 2373 cs2 = nearest_exclusive_ancestor(p->cpuset); 2374 task_unlock((struct task_struct *)p); 2375 2376 overlap = nodes_intersects(cs1->mems_allowed, cs2->mems_allowed); 2377done: 2378 return overlap; 2379} 2380 2381/* 2382 * Collection of memory_pressure is suppressed unless 2383 * this flag is enabled by writing "1" to the special 2384 * cpuset file 'memory_pressure_enabled' in the root cpuset. 2385 */ 2386 2387int cpuset_memory_pressure_enabled __read_mostly; 2388 2389/** 2390 * cpuset_memory_pressure_bump - keep stats of per-cpuset reclaims. 2391 * 2392 * Keep a running average of the rate of synchronous (direct) 2393 * page reclaim efforts initiated by tasks in each cpuset. 2394 * 2395 * This represents the rate at which some task in the cpuset 2396 * ran low on memory on all nodes it was allowed to use, and 2397 * had to enter the kernels page reclaim code in an effort to 2398 * create more free memory by tossing clean pages or swapping 2399 * or writing dirty pages. 2400 * 2401 * Display to user space in the per-cpuset read-only file 2402 * "memory_pressure". Value displayed is an integer 2403 * representing the recent rate of entry into the synchronous 2404 * (direct) page reclaim by any task attached to the cpuset. 2405 **/ 2406 2407void __cpuset_memory_pressure_bump(void) 2408{ 2409 struct cpuset *cs; 2410 2411 task_lock(current); 2412 cs = current->cpuset; 2413 fmeter_markevent(&cs->fmeter); 2414 task_unlock(current); 2415} 2416 2417/* 2418 * proc_cpuset_show() 2419 * - Print tasks cpuset path into seq_file. 2420 * - Used for /proc/<pid>/cpuset. 2421 * - No need to task_lock(tsk) on this tsk->cpuset reference, as it 2422 * doesn't really matter if tsk->cpuset changes after we read it, 2423 * and we take manage_mutex, keeping attach_task() from changing it 2424 * anyway. No need to check that tsk->cpuset != NULL, thanks to 2425 * the_top_cpuset_hack in cpuset_exit(), which sets an exiting tasks 2426 * cpuset to top_cpuset. 2427 */ 2428static int proc_cpuset_show(struct seq_file *m, void *v) 2429{ 2430 struct task_struct *tsk; 2431 char *buf; 2432 int retval = 0; 2433 2434 buf = kmalloc(PAGE_SIZE, GFP_KERNEL); 2435 if (!buf) 2436 return -ENOMEM; 2437 2438 tsk = m->private; 2439 mutex_lock(&manage_mutex); 2440 retval = cpuset_path(tsk->cpuset, buf, PAGE_SIZE); 2441 if (retval < 0) 2442 goto out; 2443 seq_puts(m, buf); 2444 seq_putc(m, '\n'); 2445out: 2446 mutex_unlock(&manage_mutex); 2447 kfree(buf); 2448 return retval; 2449} 2450 2451static int cpuset_open(struct inode *inode, struct file *file) 2452{ 2453 struct task_struct *tsk = PROC_I(inode)->task; 2454 return single_open(file, proc_cpuset_show, tsk); 2455} 2456 2457struct file_operations proc_cpuset_operations = { 2458 .open = cpuset_open, 2459 .read = seq_read, 2460 .llseek = seq_lseek, 2461 .release = single_release, 2462}; 2463 2464/* Display task cpus_allowed, mems_allowed in /proc/<pid>/status file. */ 2465char *cpuset_task_status_allowed(struct task_struct *task, char *buffer) 2466{ 2467 buffer += sprintf(buffer, "Cpus_allowed:\t"); 2468 buffer += cpumask_scnprintf(buffer, PAGE_SIZE, task->cpus_allowed); 2469 buffer += sprintf(buffer, "\n"); 2470 buffer += sprintf(buffer, "Mems_allowed:\t"); 2471 buffer += nodemask_scnprintf(buffer, PAGE_SIZE, task->mems_allowed); 2472 buffer += sprintf(buffer, "\n"); 2473 return buffer; 2474}