···551551 <function>spin_lock_irqsave()</function>, which is a superset552552 of all other spinlock primitives.553553 </para>554554+554555 <table>555556<title>Table of Locking Requirements</title>556557<tgroup cols="11">557558<tbody>559559+558560<row>559561<entry></entry>560562<entry>IRQ Handler A</entry>···578576579577<row>580578<entry>IRQ Handler B</entry>581581-<entry>spin_lock_irqsave</entry>579579+<entry>SLIS</entry>582580<entry>None</entry>583581</row>584582585583<row>586584<entry>Softirq A</entry>587587-<entry>spin_lock_irq</entry>588588-<entry>spin_lock_irq</entry>589589-<entry>spin_lock</entry>585585+<entry>SLI</entry>586586+<entry>SLI</entry>587587+<entry>SL</entry>590588</row>591589592590<row>593591<entry>Softirq B</entry>594594-<entry>spin_lock_irq</entry>595595-<entry>spin_lock_irq</entry>596596-<entry>spin_lock</entry>597597-<entry>spin_lock</entry>592592+<entry>SLI</entry>593593+<entry>SLI</entry>594594+<entry>SL</entry>595595+<entry>SL</entry>598596</row>599597600598<row>601599<entry>Tasklet A</entry>602602-<entry>spin_lock_irq</entry>603603-<entry>spin_lock_irq</entry>604604-<entry>spin_lock</entry>605605-<entry>spin_lock</entry>600600+<entry>SLI</entry>601601+<entry>SLI</entry>602602+<entry>SL</entry>603603+<entry>SL</entry>606604<entry>None</entry>607605</row>608606609607<row>610608<entry>Tasklet B</entry>611611-<entry>spin_lock_irq</entry>612612-<entry>spin_lock_irq</entry>613613-<entry>spin_lock</entry>614614-<entry>spin_lock</entry>615615-<entry>spin_lock</entry>609609+<entry>SLI</entry>610610+<entry>SLI</entry>611611+<entry>SL</entry>612612+<entry>SL</entry>613613+<entry>SL</entry>616614<entry>None</entry>617615</row>618616619617<row>620618<entry>Timer A</entry>621621-<entry>spin_lock_irq</entry>622622-<entry>spin_lock_irq</entry>623623-<entry>spin_lock</entry>624624-<entry>spin_lock</entry>625625-<entry>spin_lock</entry>626626-<entry>spin_lock</entry>619619+<entry>SLI</entry>620620+<entry>SLI</entry>621621+<entry>SL</entry>622622+<entry>SL</entry>623623+<entry>SL</entry>624624+<entry>SL</entry>627625<entry>None</entry>628626</row>629627630628<row>631629<entry>Timer B</entry>632632-<entry>spin_lock_irq</entry>633633-<entry>spin_lock_irq</entry>634634-<entry>spin_lock</entry>635635-<entry>spin_lock</entry>636636-<entry>spin_lock</entry>637637-<entry>spin_lock</entry>638638-<entry>spin_lock</entry>630630+<entry>SLI</entry>631631+<entry>SLI</entry>632632+<entry>SL</entry>633633+<entry>SL</entry>634634+<entry>SL</entry>635635+<entry>SL</entry>636636+<entry>SL</entry>639637<entry>None</entry>640638</row>641639642640<row>643641<entry>User Context A</entry>644644-<entry>spin_lock_irq</entry>645645-<entry>spin_lock_irq</entry>646646-<entry>spin_lock_bh</entry>647647-<entry>spin_lock_bh</entry>648648-<entry>spin_lock_bh</entry>649649-<entry>spin_lock_bh</entry>650650-<entry>spin_lock_bh</entry>651651-<entry>spin_lock_bh</entry>642642+<entry>SLI</entry>643643+<entry>SLI</entry>644644+<entry>SLBH</entry>645645+<entry>SLBH</entry>646646+<entry>SLBH</entry>647647+<entry>SLBH</entry>648648+<entry>SLBH</entry>649649+<entry>SLBH</entry>652650<entry>None</entry>653651</row>654652655653<row>656654<entry>User Context B</entry>657657-<entry>spin_lock_irq</entry>658658-<entry>spin_lock_irq</entry>659659-<entry>spin_lock_bh</entry>660660-<entry>spin_lock_bh</entry>661661-<entry>spin_lock_bh</entry>662662-<entry>spin_lock_bh</entry>663663-<entry>spin_lock_bh</entry>664664-<entry>spin_lock_bh</entry>665665-<entry>down_interruptible</entry>655655+<entry>SLI</entry>656656+<entry>SLI</entry>657657+<entry>SLBH</entry>658658+<entry>SLBH</entry>659659+<entry>SLBH</entry>660660+<entry>SLBH</entry>661661+<entry>SLBH</entry>662662+<entry>SLBH</entry>663663+<entry>DI</entry>666664<entry>None</entry>667665</row>668666669667</tbody>670668</tgroup>671669</table>670670+671671+ <table>672672+<title>Legend for Locking Requirements Table</title>673673+<tgroup cols="2">674674+<tbody>675675+676676+<row>677677+<entry>SLIS</entry>678678+<entry>spin_lock_irqsave</entry>679679+</row>680680+<row>681681+<entry>SLI</entry>682682+<entry>spin_lock_irq</entry>683683+</row>684684+<row>685685+<entry>SL</entry>686686+<entry>spin_lock</entry>687687+</row>688688+<row>689689+<entry>SLBH</entry>690690+<entry>spin_lock_bh</entry>691691+</row>692692+<row>693693+<entry>DI</entry>694694+<entry>down_interruptible</entry>695695+</row>696696+697697+</tbody>698698+</tgroup>699699+</table>700700+672701</sect1>673702</chapter>674703
+6-2
Documentation/gpio.txt
···111111112112The return value is zero for success, else a negative errno. It should113113be checked, since the get/set calls don't have error returns and since114114-misconfiguration is possible. (These calls could sleep.)114114+misconfiguration is possible. You should normally issue these calls from115115+a task context. However, for spinlock-safe GPIOs it's OK to use them116116+before tasking is enabled, as part of early board setup.115117116118For output GPIOs, the value provided becomes the initial output value.117119This helps avoid signal glitching during system startup.···199197200198Passing invalid GPIO numbers to gpio_request() will fail, as will requesting201199GPIOs that have already been claimed with that call. The return value of202202-gpio_request() must be checked. (These calls could sleep.)200200+gpio_request() must be checked. You should normally issue these calls from201201+a task context. However, for spinlock-safe GPIOs it's OK to request GPIOs202202+before tasking is enabled, as part of early board setup.203203204204These calls serve two basic purposes. One is marking the signals which205205are actually in use as GPIOs, for better diagnostics; systems may have
···26892689S: Maintained2690269026912691PARALLEL PORT SUPPORT26922692-L: linux-parport@lists.infradead.org26922692+L: linux-parport@lists.infradead.org (subscribers-only)26932693S: Orphan2694269426952695PARIDE DRIVERS FOR PARALLEL PORT IDE DEVICES26962696P: Tim Waugh26972697M: tim@cyberelk.net26982698-L: linux-parport@lists.infradead.org26982698+L: linux-parport@lists.infradead.org (subscribers-only)26992699W: http://www.torque.net/linux-pp.html27002700S: Maintained27012701
-8
arch/blackfin/Kconfig
···560560561561source "mm/Kconfig"562562563563-config LARGE_ALLOCS564564- bool "Allow allocating large blocks (> 1MB) of memory"565565- help566566- Allow the slab memory allocator to keep chains for very large567567- memory sizes - upto 32MB. You may need this if your system has568568- a lot of RAM, and you need to able to allocate very large569569- contiguous chunks. If unsure, say N.570570-571563config BFIN_DMA_5XX572564 bool "Enable DMA Support"573565 depends on (BF533 || BF532 || BF531 || BF537 || BF536 || BF534 || BF561)
-8
arch/frv/Kconfig
···102102 with a lot of RAM, this can be wasteful of precious low memory.103103 Setting this option will put user-space page tables in high memory.104104105105-config LARGE_ALLOCS106106- bool "Allow allocating large blocks (> 1MB) of memory"107107- help108108- Allow the slab memory allocator to keep chains for very large memory109109- sizes - up to 32MB. You may need this if your system has a lot of110110- RAM, and you need to able to allocate very large contiguous chunks.111111- If unsure, say N.112112-113105source "mm/Kconfig"114106115107choice
+1-1
arch/i386/kernel/cpu/mtrr/generic.c
···7878}79798080/* Grab all of the MTRR state for this CPU into *state */8181-void __init get_mtrr_state(void)8181+void get_mtrr_state(void)8282{8383 unsigned int i;8484 struct mtrr_var_range *vrs;
···470470 default y471471 depends on (AVNET5282)472472473473-config LARGE_ALLOCS474474- bool "Allow allocating large blocks (> 1MB) of memory"475475- help476476- Allow the slab memory allocator to keep chains for very large477477- memory sizes - upto 32MB. You may need this if your system has478478- a lot of RAM, and you need to able to allocate very large479479- contiguous chunks. If unsure, say N.480480-481473config 4KSTACKS482474 bool "Use 4Kb for kernel stacks instead of 8Kb"483475 default y
···240240 config RESET_GUARD241241 bool "Reset Guard"242242243243- config LARGE_ALLOCS244244- bool "Allow allocating large blocks (> 1MB) of memory"245245- help246246- Allow the slab memory allocator to keep chains for very large247247- memory sizes - upto 32MB. You may need this if your system has248248- a lot of RAM, and you need to able to allocate very large249249- contiguous chunks. If unsure, say N.250250-251243source "mm/Kconfig"252244253245endmenu
+4-4
drivers/acpi/numa.c
···4040#define NID_INVAL -141414242/* maps to convert between proximity domain and logical node ID */4343-int __cpuinitdata pxm_to_node_map[MAX_PXM_DOMAINS]4343+static int pxm_to_node_map[MAX_PXM_DOMAINS]4444 = { [0 ... MAX_PXM_DOMAINS - 1] = NID_INVAL };4545-int __cpuinitdata node_to_pxm_map[MAX_NUMNODES]4545+static int node_to_pxm_map[MAX_NUMNODES]4646 = { [0 ... MAX_NUMNODES - 1] = PXM_INVAL };47474848-int __cpuinit pxm_to_node(int pxm)4848+int pxm_to_node(int pxm)4949{5050 if (pxm < 0)5151 return NID_INVAL;5252 return pxm_to_node_map[pxm];5353}54545555-int __cpuinit node_to_pxm(int node)5555+int node_to_pxm(int node)5656{5757 if (node < 0)5858 return PXM_INVAL;
···5959 depends on RTC_CLASS60606161config RTC_INTF_SYSFS6262- boolean "sysfs"6262+ boolean "/sys/class/rtc/rtcN (sysfs)"6363 depends on RTC_CLASS && SYSFS6464 default RTC_CLASS6565 help···7070 will be called rtc-sysfs.71717272config RTC_INTF_PROC7373- boolean "proc"7373+ boolean "/proc/driver/rtc (procfs for rtc0)"7474 depends on RTC_CLASS && PROC_FS7575 default RTC_CLASS7676 help···8282 will be called rtc-proc.83838484config RTC_INTF_DEV8585- boolean "dev"8585+ boolean "/dev/rtcN (character devices)"8686 depends on RTC_CLASS8787 default RTC_CLASS8888 help
+2-2
drivers/rtc/rtc-omap.c
···371371 goto fail;372372 }373373 platform_set_drvdata(pdev, rtc);374374- dev_set_devdata(&rtc->dev, mem);374374+ dev_set_drvdata(&rtc->dev, mem);375375376376 /* clear pending irqs, and set 1/second periodic,377377 * which we'll use instead of update irqs···453453 free_irq(omap_rtc_timer, rtc);454454 free_irq(omap_rtc_alarm, rtc);455455456456- release_resource(dev_get_devdata(&rtc->dev));456456+ release_resource(dev_get_drvdata(&rtc->dev));457457 rtc_device_unregister(rtc);458458 return 0;459459}
···22302230asmlinkage long compat_sys_timerfd(int ufd, int clockid, int flags,22312231 const struct compat_itimerspec __user *utmr)22322232{22332233- long res;22342233 struct itimerspec t;22352234 struct itimerspec __user *ut;2236223522372237- res = -EFAULT;22382236 if (get_compat_itimerspec(&t, utmr))22392239- goto err_exit;22372237+ return -EFAULT;22402238 ut = compat_alloc_user_space(sizeof(*ut));22412241- if (copy_to_user(ut, &t, sizeof(t)) )22422242- goto err_exit;22392239+ if (copy_to_user(ut, &t, sizeof(t)))22402240+ return -EFAULT;2243224122442244- res = sys_timerfd(ufd, clockid, flags, ut);22452245-err_exit:22462246- return res;22422242+ return sys_timerfd(ufd, clockid, flags, ut);22472243}2248224422492245#endif /* CONFIG_TIMERFD */22502250-
+1-1
fs/dquot.c
···14211421 /* If quota was reenabled in the meantime, we have14221422 * nothing to do */14231423 if (!sb_has_quota_enabled(sb, cnt)) {14241424- mutex_lock(&toputinode[cnt]->i_mutex);14241424+ mutex_lock_nested(&toputinode[cnt]->i_mutex, I_MUTEX_QUOTA);14251425 toputinode[cnt]->i_flags &= ~(S_IMMUTABLE |14261426 S_NOATIME | S_NOQUOTA);14271427 truncate_inode_pages(&toputinode[cnt]->i_data, 0);
···7272{7373 struct efs_inode_info *ei = (struct efs_inode_info *) foo;74747575- if (flags & SLAB_CTOR_CONSTRUCTOR)7676- inode_init_once(&ei->vfs_inode);7575+ inode_init_once(&ei->vfs_inode);7776}78777978static int init_inodecache(void)
+1-3
fs/exec.c
···6060#endif61616262int core_uses_pid;6363-char core_pattern[128] = "core";6363+char core_pattern[CORENAME_MAX_SIZE] = "core";6464int suid_dumpable = 0;65656666EXPORT_SYMBOL(suid_dumpable);···12631263}1264126412651265EXPORT_SYMBOL(set_binfmt);12661266-12671267-#define CORENAME_MAX_SIZE 641268126612691267/* format_corename will inspect the pattern parameter, and output a12701268 * name into corename, which must have space for at least
···536536{537537 struct qnx4_inode_info *ei = (struct qnx4_inode_info *) foo;538538539539- if (flags & SLAB_CTOR_CONSTRUCTOR)540540- inode_init_once(&ei->vfs_inode);539539+ inode_init_once(&ei->vfs_inode);541540}542541543542static int init_inodecache(void)
+7-16
fs/quota.c
···157157static void quota_sync_sb(struct super_block *sb, int type)158158{159159 int cnt;160160- struct inode *discard[MAXQUOTAS];161160162161 sb->s_qcop->quota_sync(sb, type);163162 /* This is not very clever (and fast) but currently I don't know about···166167 sb->s_op->sync_fs(sb, 1);167168 sync_blockdev(sb->s_bdev);168169169169- /* Now when everything is written we can discard the pagecache so170170- * that userspace sees the changes. We need i_mutex and so we could171171- * not do it inside dqonoff_mutex. Moreover we need to be carefull172172- * about races with quotaoff() (that is the reason why we have own173173- * reference to inode). */170170+ /*171171+ * Now when everything is written we can discard the pagecache so172172+ * that userspace sees the changes.173173+ */174174 mutex_lock(&sb_dqopt(sb)->dqonoff_mutex);175175 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {176176- discard[cnt] = NULL;177176 if (type != -1 && cnt != type)178177 continue;179178 if (!sb_has_quota_enabled(sb, cnt))180179 continue;181181- discard[cnt] = igrab(sb_dqopt(sb)->files[cnt]);180180+ mutex_lock_nested(&sb_dqopt(sb)->files[cnt]->i_mutex, I_MUTEX_QUOTA);181181+ truncate_inode_pages(&sb_dqopt(sb)->files[cnt]->i_data, 0);182182+ mutex_unlock(&sb_dqopt(sb)->files[cnt]->i_mutex);182183 }183184 mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex);184184- for (cnt = 0; cnt < MAXQUOTAS; cnt++) {185185- if (discard[cnt]) {186186- mutex_lock(&discard[cnt]->i_mutex);187187- truncate_inode_pages(&discard[cnt]->i_data, 0);188188- mutex_unlock(&discard[cnt]->i_mutex);189189- iput(discard[cnt]);190190- }191191- }192185}193186194187void sync_dquots(struct super_block *sb, int type)
···12371237{12381238 struct ufs_inode_info *ei = (struct ufs_inode_info *) foo;1239123912401240- if (flags & SLAB_CTOR_CONSTRUCTOR)12411241- inode_init_once(&ei->vfs_inode);12401240+ inode_init_once(&ei->vfs_inode);12421241}1243124212441243static int init_inodecache(void)
+1-2
fs/xfs/linux-2.6/xfs_super.c
···360360 kmem_zone_t *zonep,361361 unsigned long flags)362362{363363- if (flags & SLAB_CTOR_CONSTRUCTOR)364364- inode_init_once(vn_to_inode((bhv_vnode_t *)vnode));363363+ inode_init_once(vn_to_inode((bhv_vnode_t *)vnode));365364}366365367366STATIC int
+2-5
include/acpi/acpi_numa.h
···1111#define MAX_PXM_DOMAINS (256) /* Old pxm spec is defined 8 bit */1212#endif13131414-extern int __cpuinitdata pxm_to_node_map[MAX_PXM_DOMAINS];1515-extern int __cpuinitdata node_to_pxm_map[MAX_NUMNODES];1616-1717-extern int __cpuinit pxm_to_node(int);1818-extern int __cpuinit node_to_pxm(int);1414+extern int pxm_to_node(int);1515+extern int node_to_pxm(int);1916extern int __cpuinit acpi_map_pxm_to_node(int);2017extern void __cpuinit acpi_unmap_pxm_to_node(int);2118
+2
include/linux/binfmts.h
···17171818#ifdef __KERNEL__19192020+#define CORENAME_MAX_SIZE 1282121+2022/*2123 * This structure is used to hold the arguments that are used when loading binaries.2224 */
···7474void page_add_file_rmap(struct page *);7575void page_remove_rmap(struct page *, struct vm_area_struct *);76767777-/**7878- * page_dup_rmap - duplicate pte mapping to a page7979- * @page: the page to add the mapping to8080- *8181- * For copy_page_range only: minimal extract from page_add_rmap,8282- * avoiding unnecessary tests (already checked) so it's quicker.8383- */8484-static inline void page_dup_rmap(struct page *page)7777+#ifdef CONFIG_DEBUG_VM7878+void page_dup_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address);7979+#else8080+static inline void page_dup_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address)8581{8682 atomic_inc(&page->_mapcount);8783}8484+#endif88858986/*9087 * Called from mm/vmscan.c to handle paging out
+15-6
include/linux/slab.h
···3232#define SLAB_MEM_SPREAD 0x00100000UL /* Spread some memory over cpuset */3333#define SLAB_TRACE 0x00200000UL /* Trace allocations and frees */34343535-/* Flags passed to a constructor functions */3636-#define SLAB_CTOR_CONSTRUCTOR 0x001UL /* If not set, then deconstructor */3737-3835/*3936 * struct kmem_cache related prototypes4037 */···7275 return kmem_cache_alloc(cachep, flags);7376}7477#endif7878+7979+/*8080+ * The largest kmalloc size supported by the slab allocators is8181+ * 32 megabyte (2^25) or the maximum allocatable page order if that is8282+ * less than 32 MB.8383+ *8484+ * WARNING: Its not easy to increase this value since the allocators have8585+ * to do various tricks to work around compiler limitations in order to8686+ * ensure proper constant folding.8787+ */8888+#define KMALLOC_SHIFT_HIGH ((MAX_ORDER + PAGE_SHIFT) <= 25 ? \8989+ (MAX_ORDER + PAGE_SHIFT) : 25)9090+9191+#define KMALLOC_MAX_SIZE (1UL << KMALLOC_SHIFT_HIGH)9292+#define KMALLOC_MAX_ORDER (KMALLOC_SHIFT_HIGH - PAGE_SHIFT)75937694/*7795 * Common kmalloc functions provided by all allocators···244232 kmalloc_track_caller(size, flags)245233246234#endif /* DEBUG_SLAB */247247-248248-extern const struct seq_operations slabinfo_op;249249-ssize_t slabinfo_write(struct file *, const char __user *, size_t, loff_t *);250235251236#endif /* __KERNEL__ */252237#endif /* _LINUX_SLAB_H */
···4040 int objects; /* Number of objects in slab */4141 int refcount; /* Refcount for slab cache destroy */4242 void (*ctor)(void *, struct kmem_cache *, unsigned long);4343- void (*dtor)(void *, struct kmem_cache *, unsigned long);4443 int inuse; /* Offset to metadata */4544 int align; /* Alignment */4645 const char *name; /* Name (only for display!) */···5859 */5960#define KMALLOC_SHIFT_LOW 360616161-#ifdef CONFIG_LARGE_ALLOCS6262-#define KMALLOC_SHIFT_HIGH ((MAX_ORDER + PAGE_SHIFT) =< 25 ? \6363- (MAX_ORDER + PAGE_SHIFT - 1) : 25)6464-#else6565-#if !defined(CONFIG_MMU) || NR_CPUS > 512 || MAX_NUMNODES > 2566666-#define KMALLOC_SHIFT_HIGH 206767-#else6868-#define KMALLOC_SHIFT_HIGH 186969-#endif7070-#endif7171-7262/*7363 * We keep the general caches in an array of slab caches that are used for7464 * 2^x bytes of allocations.···6880 * Sorry that the following has to be that ugly but some versions of GCC6981 * have trouble with constant propagation and loops.7082 */7171-static inline int kmalloc_index(int size)8383+static inline int kmalloc_index(size_t size)7284{7385 /*7486 * We should return 0 if size == 0 but we use the smallest object···7688 */7789 WARN_ON_ONCE(size == 0);78907979- if (size > (1 << KMALLOC_SHIFT_HIGH))9191+ if (size > KMALLOC_MAX_SIZE)8092 return -1;81938294 if (size > 64 && size <= 96)···99111 if (size <= 64 * 1024) return 16;100112 if (size <= 128 * 1024) return 17;101113 if (size <= 256 * 1024) return 18;102102-#if KMALLOC_SHIFT_HIGH > 18103114 if (size <= 512 * 1024) return 19;104115 if (size <= 1024 * 1024) return 20;105105-#endif106106-#if KMALLOC_SHIFT_HIGH > 20107116 if (size <= 2 * 1024 * 1024) return 21;108117 if (size <= 4 * 1024 * 1024) return 22;109118 if (size <= 8 * 1024 * 1024) return 23;110119 if (size <= 16 * 1024 * 1024) return 24;111120 if (size <= 32 * 1024 * 1024) return 25;112112-#endif113121 return -1;114122115123/*···130146 if (index == 0)131147 return NULL;132148133133- if (index < 0) {149149+ /*150150+ * This function only gets expanded if __builtin_constant_p(size), so151151+ * testing it here shouldn't be needed. But some versions of gcc need152152+ * help.153153+ */154154+ if (__builtin_constant_p(size) && index < 0) {134155 /*135156 * Generate a link failure. Would be great if we could136157 * do something to stop the compile here.
+3-4
include/linux/smp.h
···66 * Alan Cox. <alan@redhat.com>77 */8899+#include <linux/errno.h>9101011extern void cpu_idle(void);1112···10099#define num_booting_cpus() 1101100#define smp_prepare_boot_cpu() do {} while (0)102101static inline int smp_call_function_single(int cpuid, void (*func) (void *info),103103- void *info, int retry, int wait)102102+ void *info, int retry, int wait)104103{105105- /* Disable interrupts here? */106106- func(info);107107- return 0;104104+ return -EBUSY;108105}109106110107#endif /* !SMP */
···567567 a slab allocator.568568569569config SLUB570570- depends on EXPERIMENTAL && !ARCH_USES_SLAB_PAGE_STRUCT571570 bool "SLUB (Unqueued Allocator)"572571 help573572 SLUB is a slab allocator that minimizes cache line usage···576577 and has enhanced diagnostics.577578578579config SLOB579579-#580580-# SLOB does not support SMP because SLAB_DESTROY_BY_RCU is unsupported581581-#582582- depends on EMBEDDED && !SMP && !SPARSEMEM580580+ depends on EMBEDDED && !SPARSEMEM583581 bool "SLOB (Simple Allocator)"584582 help585583 SLOB replaces the SLAB allocator with a drastically simpler586586- allocator. SLOB is more space efficient that SLAB but does not584584+ allocator. SLOB is more space efficient than SLAB but does not587585 scale well (single lock for all operations) and is also highly588586 susceptible to fragmentation. SLUB can accomplish a higher object589587 density. It is usually better to use SLUB instead of SLOB.
···416416417417 mutex_lock(&pm_mutex);418418 for (i = HIBERNATION_FIRST; i <= HIBERNATION_MAX; i++) {419419- if (!strncmp(buf, hibernation_modes[i], len)) {419419+ if (len == strlen(hibernation_modes[i])420420+ && !strncmp(buf, hibernation_modes[i], len)) {420421 mode = i;421422 break;422423 }
+2-2
kernel/power/main.c
···290290 len = p ? p - buf : n;291291292292 /* First, check if we are requested to hibernate */293293- if (!strncmp(buf, "disk", len)) {293293+ if (len == 4 && !strncmp(buf, "disk", len)) {294294 error = hibernate();295295 return error ? error : n;296296 }297297298298 for (s = &pm_states[state]; state < PM_SUSPEND_MAX; s++, state++) {299299- if (*s && !strncmp(buf, *s, len))299299+ if (*s && len == strlen(*s) && !strncmp(buf, *s, len))300300 break;301301 }302302 if (state < PM_SUSPEND_MAX && *s)
···162162static void anon_vma_ctor(void *data, struct kmem_cache *cachep,163163 unsigned long flags)164164{165165- if (flags & SLAB_CTOR_CONSTRUCTOR) {166166- struct anon_vma *anon_vma = data;165165+ struct anon_vma *anon_vma = data;167166168168- spin_lock_init(&anon_vma->lock);169169- INIT_LIST_HEAD(&anon_vma->head);170170- }167167+ spin_lock_init(&anon_vma->lock);168168+ INIT_LIST_HEAD(&anon_vma->head);171169}172170173171void __init anon_vma_init(void)···530532}531533532534/**535535+ * page_set_anon_rmap - sanity check anonymous rmap addition536536+ * @page: the page to add the mapping to537537+ * @vma: the vm area in which the mapping is added538538+ * @address: the user virtual address mapped539539+ */540540+static void __page_check_anon_rmap(struct page *page,541541+ struct vm_area_struct *vma, unsigned long address)542542+{543543+#ifdef CONFIG_DEBUG_VM544544+ /*545545+ * The page's anon-rmap details (mapping and index) are guaranteed to546546+ * be set up correctly at this point.547547+ *548548+ * We have exclusion against page_add_anon_rmap because the caller549549+ * always holds the page locked, except if called from page_dup_rmap,550550+ * in which case the page is already known to be setup.551551+ *552552+ * We have exclusion against page_add_new_anon_rmap because those pages553553+ * are initially only visible via the pagetables, and the pte is locked554554+ * over the call to page_add_new_anon_rmap.555555+ */556556+ struct anon_vma *anon_vma = vma->anon_vma;557557+ anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;558558+ BUG_ON(page->mapping != (struct address_space *)anon_vma);559559+ BUG_ON(page->index != linear_page_index(vma, address));560560+#endif561561+}562562+563563+/**533564 * page_add_anon_rmap - add pte mapping to an anonymous page534565 * @page: the page to add the mapping to535566 * @vma: the vm area in which the mapping is added536567 * @address: the user virtual address mapped537568 *538538- * The caller needs to hold the pte lock.569569+ * The caller needs to hold the pte lock and the page must be locked.539570 */540571void page_add_anon_rmap(struct page *page,541572 struct vm_area_struct *vma, unsigned long address)542573{574574+ VM_BUG_ON(!PageLocked(page));575575+ VM_BUG_ON(address < vma->vm_start || address >= vma->vm_end);543576 if (atomic_inc_and_test(&page->_mapcount))544577 __page_set_anon_rmap(page, vma, address);545545- /* else checking page index and mapping is racy */578578+ else579579+ __page_check_anon_rmap(page, vma, address);546580}547581548582/*···585555 *586556 * Same as page_add_anon_rmap but must only be called on *new* pages.587557 * This means the inc-and-test can be bypassed.558558+ * Page does not have to be locked.588559 */589560void page_add_new_anon_rmap(struct page *page,590561 struct vm_area_struct *vma, unsigned long address)591562{563563+ BUG_ON(address < vma->vm_start || address >= vma->vm_end);592564 atomic_set(&page->_mapcount, 0); /* elevate count by 1 (starts at -1) */593565 __page_set_anon_rmap(page, vma, address);594566}···606574 if (atomic_inc_and_test(&page->_mapcount))607575 __inc_zone_page_state(page, NR_FILE_MAPPED);608576}577577+578578+#ifdef CONFIG_DEBUG_VM579579+/**580580+ * page_dup_rmap - duplicate pte mapping to a page581581+ * @page: the page to add the mapping to582582+ *583583+ * For copy_page_range only: minimal extract from page_add_file_rmap /584584+ * page_add_anon_rmap, avoiding unnecessary tests (already checked) so it's585585+ * quicker.586586+ *587587+ * The caller needs to hold the pte lock.588588+ */589589+void page_dup_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address)590590+{591591+ BUG_ON(page_mapcount(page) == 0);592592+ if (PageAnon(page))593593+ __page_check_anon_rmap(page, vma, address);594594+ atomic_inc(&page->_mapcount);595595+}596596+#endif609597610598/**611599 * page_remove_rmap - take down pte mapping from a page