···551 <function>spin_lock_irqsave()</function>, which is a superset552 of all other spinlock primitives.553 </para>0554 <table>555<title>Table of Locking Requirements</title>556<tgroup cols="11">557<tbody>0558<row>559<entry></entry>560<entry>IRQ Handler A</entry>···578579<row>580<entry>IRQ Handler B</entry>581-<entry>spin_lock_irqsave</entry>582<entry>None</entry>583</row>584585<row>586<entry>Softirq A</entry>587-<entry>spin_lock_irq</entry>588-<entry>spin_lock_irq</entry>589-<entry>spin_lock</entry>590</row>591592<row>593<entry>Softirq B</entry>594-<entry>spin_lock_irq</entry>595-<entry>spin_lock_irq</entry>596-<entry>spin_lock</entry>597-<entry>spin_lock</entry>598</row>599600<row>601<entry>Tasklet A</entry>602-<entry>spin_lock_irq</entry>603-<entry>spin_lock_irq</entry>604-<entry>spin_lock</entry>605-<entry>spin_lock</entry>606<entry>None</entry>607</row>608609<row>610<entry>Tasklet B</entry>611-<entry>spin_lock_irq</entry>612-<entry>spin_lock_irq</entry>613-<entry>spin_lock</entry>614-<entry>spin_lock</entry>615-<entry>spin_lock</entry>616<entry>None</entry>617</row>618619<row>620<entry>Timer A</entry>621-<entry>spin_lock_irq</entry>622-<entry>spin_lock_irq</entry>623-<entry>spin_lock</entry>624-<entry>spin_lock</entry>625-<entry>spin_lock</entry>626-<entry>spin_lock</entry>627<entry>None</entry>628</row>629630<row>631<entry>Timer B</entry>632-<entry>spin_lock_irq</entry>633-<entry>spin_lock_irq</entry>634-<entry>spin_lock</entry>635-<entry>spin_lock</entry>636-<entry>spin_lock</entry>637-<entry>spin_lock</entry>638-<entry>spin_lock</entry>639<entry>None</entry>640</row>641642<row>643<entry>User Context A</entry>644-<entry>spin_lock_irq</entry>645-<entry>spin_lock_irq</entry>646-<entry>spin_lock_bh</entry>647-<entry>spin_lock_bh</entry>648-<entry>spin_lock_bh</entry>649-<entry>spin_lock_bh</entry>650-<entry>spin_lock_bh</entry>651-<entry>spin_lock_bh</entry>652<entry>None</entry>653</row>654655<row>656<entry>User Context B</entry>657-<entry>spin_lock_irq</entry>658-<entry>spin_lock_irq</entry>659-<entry>spin_lock_bh</entry>660-<entry>spin_lock_bh</entry>661-<entry>spin_lock_bh</entry>662-<entry>spin_lock_bh</entry>663-<entry>spin_lock_bh</entry>664-<entry>spin_lock_bh</entry>665-<entry>down_interruptible</entry>666<entry>None</entry>667</row>668669</tbody>670</tgroup>671</table>0000000000000000000000000000000672</sect1>673</chapter>674
···551 <function>spin_lock_irqsave()</function>, which is a superset552 of all other spinlock primitives.553 </para>554+555 <table>556<title>Table of Locking Requirements</title>557<tgroup cols="11">558<tbody>559+560<row>561<entry></entry>562<entry>IRQ Handler A</entry>···576577<row>578<entry>IRQ Handler B</entry>579+<entry>SLIS</entry>580<entry>None</entry>581</row>582583<row>584<entry>Softirq A</entry>585+<entry>SLI</entry>586+<entry>SLI</entry>587+<entry>SL</entry>588</row>589590<row>591<entry>Softirq B</entry>592+<entry>SLI</entry>593+<entry>SLI</entry>594+<entry>SL</entry>595+<entry>SL</entry>596</row>597598<row>599<entry>Tasklet A</entry>600+<entry>SLI</entry>601+<entry>SLI</entry>602+<entry>SL</entry>603+<entry>SL</entry>604<entry>None</entry>605</row>606607<row>608<entry>Tasklet B</entry>609+<entry>SLI</entry>610+<entry>SLI</entry>611+<entry>SL</entry>612+<entry>SL</entry>613+<entry>SL</entry>614<entry>None</entry>615</row>616617<row>618<entry>Timer A</entry>619+<entry>SLI</entry>620+<entry>SLI</entry>621+<entry>SL</entry>622+<entry>SL</entry>623+<entry>SL</entry>624+<entry>SL</entry>625<entry>None</entry>626</row>627628<row>629<entry>Timer B</entry>630+<entry>SLI</entry>631+<entry>SLI</entry>632+<entry>SL</entry>633+<entry>SL</entry>634+<entry>SL</entry>635+<entry>SL</entry>636+<entry>SL</entry>637<entry>None</entry>638</row>639640<row>641<entry>User Context A</entry>642+<entry>SLI</entry>643+<entry>SLI</entry>644+<entry>SLBH</entry>645+<entry>SLBH</entry>646+<entry>SLBH</entry>647+<entry>SLBH</entry>648+<entry>SLBH</entry>649+<entry>SLBH</entry>650<entry>None</entry>651</row>652653<row>654<entry>User Context B</entry>655+<entry>SLI</entry>656+<entry>SLI</entry>657+<entry>SLBH</entry>658+<entry>SLBH</entry>659+<entry>SLBH</entry>660+<entry>SLBH</entry>661+<entry>SLBH</entry>662+<entry>SLBH</entry>663+<entry>DI</entry>664<entry>None</entry>665</row>666667</tbody>668</tgroup>669</table>670+671+ <table>672+<title>Legend for Locking Requirements Table</title>673+<tgroup cols="2">674+<tbody>675+676+<row>677+<entry>SLIS</entry>678+<entry>spin_lock_irqsave</entry>679+</row>680+<row>681+<entry>SLI</entry>682+<entry>spin_lock_irq</entry>683+</row>684+<row>685+<entry>SL</entry>686+<entry>spin_lock</entry>687+</row>688+<row>689+<entry>SLBH</entry>690+<entry>spin_lock_bh</entry>691+</row>692+<row>693+<entry>DI</entry>694+<entry>down_interruptible</entry>695+</row>696+697+</tbody>698+</tgroup>699+</table>700+701</sect1>702</chapter>703
+6-2
Documentation/gpio.txt
···111112The return value is zero for success, else a negative errno. It should113be checked, since the get/set calls don't have error returns and since114-misconfiguration is possible. (These calls could sleep.)00115116For output GPIOs, the value provided becomes the initial output value.117This helps avoid signal glitching during system startup.···199200Passing invalid GPIO numbers to gpio_request() will fail, as will requesting201GPIOs that have already been claimed with that call. The return value of202-gpio_request() must be checked. (These calls could sleep.)00203204These calls serve two basic purposes. One is marking the signals which205are actually in use as GPIOs, for better diagnostics; systems may have
···111112The return value is zero for success, else a negative errno. It should113be checked, since the get/set calls don't have error returns and since114+misconfiguration is possible. You should normally issue these calls from115+a task context. However, for spinlock-safe GPIOs it's OK to use them116+before tasking is enabled, as part of early board setup.117118For output GPIOs, the value provided becomes the initial output value.119This helps avoid signal glitching during system startup.···197198Passing invalid GPIO numbers to gpio_request() will fail, as will requesting199GPIOs that have already been claimed with that call. The return value of200+gpio_request() must be checked. You should normally issue these calls from201+a task context. However, for spinlock-safe GPIOs it's OK to request GPIOs202+before tasking is enabled, as part of early board setup.203204These calls serve two basic purposes. One is marking the signals which205are actually in use as GPIOs, for better diagnostics; systems may have
···2689S: Maintained26902691PARALLEL PORT SUPPORT2692-L: linux-parport@lists.infradead.org2693S: Orphan26942695PARIDE DRIVERS FOR PARALLEL PORT IDE DEVICES2696P: Tim Waugh2697M: tim@cyberelk.net2698-L: linux-parport@lists.infradead.org2699W: http://www.torque.net/linux-pp.html2700S: Maintained2701
···2689S: Maintained26902691PARALLEL PORT SUPPORT2692+L: linux-parport@lists.infradead.org (subscribers-only)2693S: Orphan26942695PARIDE DRIVERS FOR PARALLEL PORT IDE DEVICES2696P: Tim Waugh2697M: tim@cyberelk.net2698+L: linux-parport@lists.infradead.org (subscribers-only)2699W: http://www.torque.net/linux-pp.html2700S: Maintained2701
-8
arch/blackfin/Kconfig
···560561source "mm/Kconfig"562563-config LARGE_ALLOCS564- bool "Allow allocating large blocks (> 1MB) of memory"565- help566- Allow the slab memory allocator to keep chains for very large567- memory sizes - upto 32MB. You may need this if your system has568- a lot of RAM, and you need to able to allocate very large569- contiguous chunks. If unsure, say N.570-571config BFIN_DMA_5XX572 bool "Enable DMA Support"573 depends on (BF533 || BF532 || BF531 || BF537 || BF536 || BF534 || BF561)
···102 with a lot of RAM, this can be wasteful of precious low memory.103 Setting this option will put user-space page tables in high memory.104105-config LARGE_ALLOCS106- bool "Allow allocating large blocks (> 1MB) of memory"107- help108- Allow the slab memory allocator to keep chains for very large memory109- sizes - up to 32MB. You may need this if your system has a lot of110- RAM, and you need to able to allocate very large contiguous chunks.111- If unsure, say N.112-113source "mm/Kconfig"114115choice
···102 with a lot of RAM, this can be wasteful of precious low memory.103 Setting this option will put user-space page tables in high memory.10400000000105source "mm/Kconfig"106107choice
+1-1
arch/i386/kernel/cpu/mtrr/generic.c
···78}7980/* Grab all of the MTRR state for this CPU into *state */81-void __init get_mtrr_state(void)82{83 unsigned int i;84 struct mtrr_var_range *vrs;
···78}7980/* Grab all of the MTRR state for this CPU into *state */81+void get_mtrr_state(void)82{83 unsigned int i;84 struct mtrr_var_range *vrs;
···421 }422 if (!cpus_empty(cpu_mask))423 flush_tlb_others(cpu_mask, mm, TLB_FLUSH_ALL);424- check_pgt_cache();425 preempt_enable();426}427
···421 }422 if (!cpus_empty(cpu_mask))423 flush_tlb_others(cpu_mask, mm, TLB_FLUSH_ALL);424+425 preempt_enable();426}427
-8
arch/m68knommu/Kconfig
···470 default y471 depends on (AVNET5282)472473-config LARGE_ALLOCS474- bool "Allow allocating large blocks (> 1MB) of memory"475- help476- Allow the slab memory allocator to keep chains for very large477- memory sizes - upto 32MB. You may need this if your system has478- a lot of RAM, and you need to able to allocate very large479- contiguous chunks. If unsure, say N.480-481config 4KSTACKS482 bool "Use 4Kb for kernel stacks instead of 8Kb"483 default y
···470 default y471 depends on (AVNET5282)47200000000473config 4KSTACKS474 bool "Use 4Kb for kernel stacks instead of 8Kb"475 default y
···240 config RESET_GUARD241 bool "Reset Guard"242243- config LARGE_ALLOCS244- bool "Allow allocating large blocks (> 1MB) of memory"245- help246- Allow the slab memory allocator to keep chains for very large247- memory sizes - upto 32MB. You may need this if your system has248- a lot of RAM, and you need to able to allocate very large249- contiguous chunks. If unsure, say N.250-251source "mm/Kconfig"252253endmenu
···59 depends on RTC_CLASS6061config RTC_INTF_SYSFS62- boolean "sysfs"63 depends on RTC_CLASS && SYSFS64 default RTC_CLASS65 help···70 will be called rtc-sysfs.7172config RTC_INTF_PROC73- boolean "proc"74 depends on RTC_CLASS && PROC_FS75 default RTC_CLASS76 help···82 will be called rtc-proc.8384config RTC_INTF_DEV85- boolean "dev"86 depends on RTC_CLASS87 default RTC_CLASS88 help
···59 depends on RTC_CLASS6061config RTC_INTF_SYSFS62+ boolean "/sys/class/rtc/rtcN (sysfs)"63 depends on RTC_CLASS && SYSFS64 default RTC_CLASS65 help···70 will be called rtc-sysfs.7172config RTC_INTF_PROC73+ boolean "/proc/driver/rtc (procfs for rtc0)"74 depends on RTC_CLASS && PROC_FS75 default RTC_CLASS76 help···82 will be called rtc-proc.8384config RTC_INTF_DEV85+ boolean "/dev/rtcN (character devices)"86 depends on RTC_CLASS87 default RTC_CLASS88 help
+2-2
drivers/rtc/rtc-omap.c
···371 goto fail;372 }373 platform_set_drvdata(pdev, rtc);374- dev_set_devdata(&rtc->dev, mem);375376 /* clear pending irqs, and set 1/second periodic,377 * which we'll use instead of update irqs···453 free_irq(omap_rtc_timer, rtc);454 free_irq(omap_rtc_alarm, rtc);455456- release_resource(dev_get_devdata(&rtc->dev));457 rtc_device_unregister(rtc);458 return 0;459}
···371 goto fail;372 }373 platform_set_drvdata(pdev, rtc);374+ dev_set_drvdata(&rtc->dev, mem);375376 /* clear pending irqs, and set 1/second periodic,377 * which we'll use instead of update irqs···453 free_irq(omap_rtc_timer, rtc);454 free_irq(omap_rtc_alarm, rtc);455456+ release_resource(dev_get_drvdata(&rtc->dev));457 rtc_device_unregister(rtc);458 return 0;459}
···2230asmlinkage long compat_sys_timerfd(int ufd, int clockid, int flags,2231 const struct compat_itimerspec __user *utmr)2232{2233- long res;2234 struct itimerspec t;2235 struct itimerspec __user *ut;22362237- res = -EFAULT;2238 if (get_compat_itimerspec(&t, utmr))2239- goto err_exit;2240 ut = compat_alloc_user_space(sizeof(*ut));2241- if (copy_to_user(ut, &t, sizeof(t)) )2242- goto err_exit;22432244- res = sys_timerfd(ufd, clockid, flags, ut);2245-err_exit:2246- return res;2247}22482249#endif /* CONFIG_TIMERFD */2250-
···2230asmlinkage long compat_sys_timerfd(int ufd, int clockid, int flags,2231 const struct compat_itimerspec __user *utmr)2232{02233 struct itimerspec t;2234 struct itimerspec __user *ut;223502236 if (get_compat_itimerspec(&t, utmr))2237+ return -EFAULT;2238 ut = compat_alloc_user_space(sizeof(*ut));2239+ if (copy_to_user(ut, &t, sizeof(t)))2240+ return -EFAULT;22412242+ return sys_timerfd(ufd, clockid, flags, ut);002243}22442245#endif /* CONFIG_TIMERFD */0
+1-1
fs/dquot.c
···1421 /* If quota was reenabled in the meantime, we have1422 * nothing to do */1423 if (!sb_has_quota_enabled(sb, cnt)) {1424- mutex_lock(&toputinode[cnt]->i_mutex);1425 toputinode[cnt]->i_flags &= ~(S_IMMUTABLE |1426 S_NOATIME | S_NOQUOTA);1427 truncate_inode_pages(&toputinode[cnt]->i_data, 0);
···1421 /* If quota was reenabled in the meantime, we have1422 * nothing to do */1423 if (!sb_has_quota_enabled(sb, cnt)) {1424+ mutex_lock_nested(&toputinode[cnt]->i_mutex, I_MUTEX_QUOTA);1425 toputinode[cnt]->i_flags &= ~(S_IMMUTABLE |1426 S_NOATIME | S_NOQUOTA);1427 truncate_inode_pages(&toputinode[cnt]->i_data, 0);
···60#endif6162int core_uses_pid;63-char core_pattern[128] = "core";64int suid_dumpable = 0;6566EXPORT_SYMBOL(suid_dumpable);···1263}12641265EXPORT_SYMBOL(set_binfmt);1266-1267-#define CORENAME_MAX_SIZE 6412681269/* format_corename will inspect the pattern parameter, and output a1270 * name into corename, which must have space for at least
···60#endif6162int core_uses_pid;63+char core_pattern[CORENAME_MAX_SIZE] = "core";64int suid_dumpable = 0;6566EXPORT_SYMBOL(suid_dumpable);···1263}12641265EXPORT_SYMBOL(set_binfmt);0012661267/* format_corename will inspect the pattern parameter, and output a1268 * name into corename, which must have space for at least
···157static void quota_sync_sb(struct super_block *sb, int type)158{159 int cnt;160- struct inode *discard[MAXQUOTAS];161162 sb->s_qcop->quota_sync(sb, type);163 /* This is not very clever (and fast) but currently I don't know about···166 sb->s_op->sync_fs(sb, 1);167 sync_blockdev(sb->s_bdev);168169- /* Now when everything is written we can discard the pagecache so170- * that userspace sees the changes. We need i_mutex and so we could171- * not do it inside dqonoff_mutex. Moreover we need to be carefull172- * about races with quotaoff() (that is the reason why we have own173- * reference to inode). */174 mutex_lock(&sb_dqopt(sb)->dqonoff_mutex);175 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {176- discard[cnt] = NULL;177 if (type != -1 && cnt != type)178 continue;179 if (!sb_has_quota_enabled(sb, cnt))180 continue;181- discard[cnt] = igrab(sb_dqopt(sb)->files[cnt]);00182 }183 mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex);184- for (cnt = 0; cnt < MAXQUOTAS; cnt++) {185- if (discard[cnt]) {186- mutex_lock(&discard[cnt]->i_mutex);187- truncate_inode_pages(&discard[cnt]->i_data, 0);188- mutex_unlock(&discard[cnt]->i_mutex);189- iput(discard[cnt]);190- }191- }192}193194void sync_dquots(struct super_block *sb, int type)
···157static void quota_sync_sb(struct super_block *sb, int type)158{159 int cnt;0160161 sb->s_qcop->quota_sync(sb, type);162 /* This is not very clever (and fast) but currently I don't know about···167 sb->s_op->sync_fs(sb, 1);168 sync_blockdev(sb->s_bdev);169170+ /*171+ * Now when everything is written we can discard the pagecache so172+ * that userspace sees the changes.173+ */0174 mutex_lock(&sb_dqopt(sb)->dqonoff_mutex);175 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {0176 if (type != -1 && cnt != type)177 continue;178 if (!sb_has_quota_enabled(sb, cnt))179 continue;180+ mutex_lock_nested(&sb_dqopt(sb)->files[cnt]->i_mutex, I_MUTEX_QUOTA);181+ truncate_inode_pages(&sb_dqopt(sb)->files[cnt]->i_data, 0);182+ mutex_unlock(&sb_dqopt(sb)->files[cnt]->i_mutex);183 }184 mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex);00000000185}186187void sync_dquots(struct super_block *sb, int type)
···360 kmem_zone_t *zonep,361 unsigned long flags)362{363- if (flags & SLAB_CTOR_CONSTRUCTOR)364- inode_init_once(vn_to_inode((bhv_vnode_t *)vnode));365}366367STATIC int
···360 kmem_zone_t *zonep,361 unsigned long flags)362{363+ inode_init_once(vn_to_inode((bhv_vnode_t *)vnode));0364}365366STATIC int
+2-5
include/acpi/acpi_numa.h
···11#define MAX_PXM_DOMAINS (256) /* Old pxm spec is defined 8 bit */12#endif1314-extern int __cpuinitdata pxm_to_node_map[MAX_PXM_DOMAINS];15-extern int __cpuinitdata node_to_pxm_map[MAX_NUMNODES];16-17-extern int __cpuinit pxm_to_node(int);18-extern int __cpuinit node_to_pxm(int);19extern int __cpuinit acpi_map_pxm_to_node(int);20extern void __cpuinit acpi_unmap_pxm_to_node(int);21
···11#define MAX_PXM_DOMAINS (256) /* Old pxm spec is defined 8 bit */12#endif1314+extern int pxm_to_node(int);15+extern int node_to_pxm(int);00016extern int __cpuinit acpi_map_pxm_to_node(int);17extern void __cpuinit acpi_unmap_pxm_to_node(int);18
+2
include/linux/binfmts.h
···1718#ifdef __KERNEL__190020/*21 * This structure is used to hold the arguments that are used when loading binaries.22 */
···1718#ifdef __KERNEL__1920+#define CORENAME_MAX_SIZE 12821+22/*23 * This structure is used to hold the arguments that are used when loading binaries.24 */
···74void page_add_file_rmap(struct page *);75void page_remove_rmap(struct page *, struct vm_area_struct *);7677-/**78- * page_dup_rmap - duplicate pte mapping to a page79- * @page: the page to add the mapping to80- *81- * For copy_page_range only: minimal extract from page_add_rmap,82- * avoiding unnecessary tests (already checked) so it's quicker.83- */84-static inline void page_dup_rmap(struct page *page)85{86 atomic_inc(&page->_mapcount);87}08889/*90 * Called from mm/vmscan.c to handle paging out
···74void page_add_file_rmap(struct page *);75void page_remove_rmap(struct page *, struct vm_area_struct *);7677+#ifdef CONFIG_DEBUG_VM78+void page_dup_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address);79+#else80+static inline void page_dup_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address)000081{82 atomic_inc(&page->_mapcount);83}84+#endif8586/*87 * Called from mm/vmscan.c to handle paging out
+15-6
include/linux/slab.h
···32#define SLAB_MEM_SPREAD 0x00100000UL /* Spread some memory over cpuset */33#define SLAB_TRACE 0x00200000UL /* Trace allocations and frees */3435-/* Flags passed to a constructor functions */36-#define SLAB_CTOR_CONSTRUCTOR 0x001UL /* If not set, then deconstructor */37-38/*39 * struct kmem_cache related prototypes40 */···72 return kmem_cache_alloc(cachep, flags);73}74#endif0000000000000007576/*77 * Common kmalloc functions provided by all allocators···244 kmalloc_track_caller(size, flags)245246#endif /* DEBUG_SLAB */247-248-extern const struct seq_operations slabinfo_op;249-ssize_t slabinfo_write(struct file *, const char __user *, size_t, loff_t *);250251#endif /* __KERNEL__ */252#endif /* _LINUX_SLAB_H */
···32#define SLAB_MEM_SPREAD 0x00100000UL /* Spread some memory over cpuset */33#define SLAB_TRACE 0x00200000UL /* Trace allocations and frees */3400035/*36 * struct kmem_cache related prototypes37 */···75 return kmem_cache_alloc(cachep, flags);76}77#endif78+79+/*80+ * The largest kmalloc size supported by the slab allocators is81+ * 32 megabyte (2^25) or the maximum allocatable page order if that is82+ * less than 32 MB.83+ *84+ * WARNING: Its not easy to increase this value since the allocators have85+ * to do various tricks to work around compiler limitations in order to86+ * ensure proper constant folding.87+ */88+#define KMALLOC_SHIFT_HIGH ((MAX_ORDER + PAGE_SHIFT) <= 25 ? \89+ (MAX_ORDER + PAGE_SHIFT) : 25)90+91+#define KMALLOC_MAX_SIZE (1UL << KMALLOC_SHIFT_HIGH)92+#define KMALLOC_MAX_ORDER (KMALLOC_SHIFT_HIGH - PAGE_SHIFT)9394/*95 * Common kmalloc functions provided by all allocators···232 kmalloc_track_caller(size, flags)233234#endif /* DEBUG_SLAB */000235236#endif /* __KERNEL__ */237#endif /* _LINUX_SLAB_H */
···40 int objects; /* Number of objects in slab */41 int refcount; /* Refcount for slab cache destroy */42 void (*ctor)(void *, struct kmem_cache *, unsigned long);43- void (*dtor)(void *, struct kmem_cache *, unsigned long);44 int inuse; /* Offset to metadata */45 int align; /* Alignment */46 const char *name; /* Name (only for display!) */···58 */59#define KMALLOC_SHIFT_LOW 36061-#ifdef CONFIG_LARGE_ALLOCS62-#define KMALLOC_SHIFT_HIGH ((MAX_ORDER + PAGE_SHIFT) =< 25 ? \63- (MAX_ORDER + PAGE_SHIFT - 1) : 25)64-#else65-#if !defined(CONFIG_MMU) || NR_CPUS > 512 || MAX_NUMNODES > 25666-#define KMALLOC_SHIFT_HIGH 2067-#else68-#define KMALLOC_SHIFT_HIGH 1869-#endif70-#endif71-72/*73 * We keep the general caches in an array of slab caches that are used for74 * 2^x bytes of allocations.···68 * Sorry that the following has to be that ugly but some versions of GCC69 * have trouble with constant propagation and loops.70 */71-static inline int kmalloc_index(int size)72{73 /*74 * We should return 0 if size == 0 but we use the smallest object···76 */77 WARN_ON_ONCE(size == 0);7879- if (size > (1 << KMALLOC_SHIFT_HIGH))80 return -1;8182 if (size > 64 && size <= 96)···99 if (size <= 64 * 1024) return 16;100 if (size <= 128 * 1024) return 17;101 if (size <= 256 * 1024) return 18;102-#if KMALLOC_SHIFT_HIGH > 18103 if (size <= 512 * 1024) return 19;104 if (size <= 1024 * 1024) return 20;105-#endif106-#if KMALLOC_SHIFT_HIGH > 20107 if (size <= 2 * 1024 * 1024) return 21;108 if (size <= 4 * 1024 * 1024) return 22;109 if (size <= 8 * 1024 * 1024) return 23;110 if (size <= 16 * 1024 * 1024) return 24;111 if (size <= 32 * 1024 * 1024) return 25;112-#endif113 return -1;114115/*···130 if (index == 0)131 return NULL;132133- if (index < 0) {00000134 /*135 * Generate a link failure. Would be great if we could136 * do something to stop the compile here.
···40 int objects; /* Number of objects in slab */41 int refcount; /* Refcount for slab cache destroy */42 void (*ctor)(void *, struct kmem_cache *, unsigned long);043 int inuse; /* Offset to metadata */44 int align; /* Alignment */45 const char *name; /* Name (only for display!) */···59 */60#define KMALLOC_SHIFT_LOW 3610000000000062/*63 * We keep the general caches in an array of slab caches that are used for64 * 2^x bytes of allocations.···80 * Sorry that the following has to be that ugly but some versions of GCC81 * have trouble with constant propagation and loops.82 */83+static inline int kmalloc_index(size_t size)84{85 /*86 * We should return 0 if size == 0 but we use the smallest object···88 */89 WARN_ON_ONCE(size == 0);9091+ if (size > KMALLOC_MAX_SIZE)92 return -1;9394 if (size > 64 && size <= 96)···111 if (size <= 64 * 1024) return 16;112 if (size <= 128 * 1024) return 17;113 if (size <= 256 * 1024) return 18;0114 if (size <= 512 * 1024) return 19;115 if (size <= 1024 * 1024) return 20;00116 if (size <= 2 * 1024 * 1024) return 21;117 if (size <= 4 * 1024 * 1024) return 22;118 if (size <= 8 * 1024 * 1024) return 23;119 if (size <= 16 * 1024 * 1024) return 24;120 if (size <= 32 * 1024 * 1024) return 25;0121 return -1;122123/*···146 if (index == 0)147 return NULL;148149+ /*150+ * This function only gets expanded if __builtin_constant_p(size), so151+ * testing it here shouldn't be needed. But some versions of gcc need152+ * help.153+ */154+ if (__builtin_constant_p(size) && index < 0) {155 /*156 * Generate a link failure. Would be great if we could157 * do something to stop the compile here.
+3-4
include/linux/smp.h
···6 * Alan Cox. <alan@redhat.com>7 */80910extern void cpu_idle(void);11···100#define num_booting_cpus() 1101#define smp_prepare_boot_cpu() do {} while (0)102static inline int smp_call_function_single(int cpuid, void (*func) (void *info),103- void *info, int retry, int wait)104{105- /* Disable interrupts here? */106- func(info);107- return 0;108}109110#endif /* !SMP */
···6 * Alan Cox. <alan@redhat.com>7 */89+#include <linux/errno.h>1011extern void cpu_idle(void);12···99#define num_booting_cpus() 1100#define smp_prepare_boot_cpu() do {} while (0)101static inline int smp_call_function_single(int cpuid, void (*func) (void *info),102+ void *info, int retry, int wait)103{104+ return -EBUSY;00105}106107#endif /* !SMP */
···567 a slab allocator.568569config SLUB570- depends on EXPERIMENTAL && !ARCH_USES_SLAB_PAGE_STRUCT571 bool "SLUB (Unqueued Allocator)"572 help573 SLUB is a slab allocator that minimizes cache line usage···576 and has enhanced diagnostics.577578config SLOB579-#580-# SLOB does not support SMP because SLAB_DESTROY_BY_RCU is unsupported581-#582- depends on EMBEDDED && !SMP && !SPARSEMEM583 bool "SLOB (Simple Allocator)"584 help585 SLOB replaces the SLAB allocator with a drastically simpler586- allocator. SLOB is more space efficient that SLAB but does not587 scale well (single lock for all operations) and is also highly588 susceptible to fragmentation. SLUB can accomplish a higher object589 density. It is usually better to use SLUB instead of SLOB.
···567 a slab allocator.568569config SLUB0570 bool "SLUB (Unqueued Allocator)"571 help572 SLUB is a slab allocator that minimizes cache line usage···577 and has enhanced diagnostics.578579config SLOB580+ depends on EMBEDDED && !SPARSEMEM000581 bool "SLOB (Simple Allocator)"582 help583 SLOB replaces the SLAB allocator with a drastically simpler584+ allocator. SLOB is more space efficient than SLAB but does not585 scale well (single lock for all operations) and is also highly586 susceptible to fragmentation. SLUB can accomplish a higher object587 density. It is usually better to use SLUB instead of SLOB.
···416417 mutex_lock(&pm_mutex);418 for (i = HIBERNATION_FIRST; i <= HIBERNATION_MAX; i++) {419- if (!strncmp(buf, hibernation_modes[i], len)) {0420 mode = i;421 break;422 }
···416417 mutex_lock(&pm_mutex);418 for (i = HIBERNATION_FIRST; i <= HIBERNATION_MAX; i++) {419+ if (len == strlen(hibernation_modes[i])420+ && !strncmp(buf, hibernation_modes[i], len)) {421 mode = i;422 break;423 }
+2-2
kernel/power/main.c
···290 len = p ? p - buf : n;291292 /* First, check if we are requested to hibernate */293- if (!strncmp(buf, "disk", len)) {294 error = hibernate();295 return error ? error : n;296 }297298 for (s = &pm_states[state]; state < PM_SUSPEND_MAX; s++, state++) {299- if (*s && !strncmp(buf, *s, len))300 break;301 }302 if (state < PM_SUSPEND_MAX && *s)
···290 len = p ? p - buf : n;291292 /* First, check if we are requested to hibernate */293+ if (len == 4 && !strncmp(buf, "disk", len)) {294 error = hibernate();295 return error ? error : n;296 }297298 for (s = &pm_states[state]; state < PM_SUSPEND_MAX; s++, state++) {299+ if (*s && len == strlen(*s) && !strncmp(buf, *s, len))300 break;301 }302 if (state < PM_SUSPEND_MAX && *s)
···162static void anon_vma_ctor(void *data, struct kmem_cache *cachep,163 unsigned long flags)164{165- if (flags & SLAB_CTOR_CONSTRUCTOR) {166- struct anon_vma *anon_vma = data;167168- spin_lock_init(&anon_vma->lock);169- INIT_LIST_HEAD(&anon_vma->head);170- }171}172173void __init anon_vma_init(void)···530}531532/**00000000000000000000000000000533 * page_add_anon_rmap - add pte mapping to an anonymous page534 * @page: the page to add the mapping to535 * @vma: the vm area in which the mapping is added536 * @address: the user virtual address mapped537 *538- * The caller needs to hold the pte lock.539 */540void page_add_anon_rmap(struct page *page,541 struct vm_area_struct *vma, unsigned long address)542{00543 if (atomic_inc_and_test(&page->_mapcount))544 __page_set_anon_rmap(page, vma, address);545- /* else checking page index and mapping is racy */0546}547548/*···585 *586 * Same as page_add_anon_rmap but must only be called on *new* pages.587 * This means the inc-and-test can be bypassed.0588 */589void page_add_new_anon_rmap(struct page *page,590 struct vm_area_struct *vma, unsigned long address)591{0592 atomic_set(&page->_mapcount, 0); /* elevate count by 1 (starts at -1) */593 __page_set_anon_rmap(page, vma, address);594}···606 if (atomic_inc_and_test(&page->_mapcount))607 __inc_zone_page_state(page, NR_FILE_MAPPED);608}00000000000000000000609610/**611 * page_remove_rmap - take down pte mapping from a page
···162static void anon_vma_ctor(void *data, struct kmem_cache *cachep,163 unsigned long flags)164{165+ struct anon_vma *anon_vma = data;0166167+ spin_lock_init(&anon_vma->lock);168+ INIT_LIST_HEAD(&anon_vma->head);0169}170171void __init anon_vma_init(void)···532}533534/**535+ * page_set_anon_rmap - sanity check anonymous rmap addition536+ * @page: the page to add the mapping to537+ * @vma: the vm area in which the mapping is added538+ * @address: the user virtual address mapped539+ */540+static void __page_check_anon_rmap(struct page *page,541+ struct vm_area_struct *vma, unsigned long address)542+{543+#ifdef CONFIG_DEBUG_VM544+ /*545+ * The page's anon-rmap details (mapping and index) are guaranteed to546+ * be set up correctly at this point.547+ *548+ * We have exclusion against page_add_anon_rmap because the caller549+ * always holds the page locked, except if called from page_dup_rmap,550+ * in which case the page is already known to be setup.551+ *552+ * We have exclusion against page_add_new_anon_rmap because those pages553+ * are initially only visible via the pagetables, and the pte is locked554+ * over the call to page_add_new_anon_rmap.555+ */556+ struct anon_vma *anon_vma = vma->anon_vma;557+ anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;558+ BUG_ON(page->mapping != (struct address_space *)anon_vma);559+ BUG_ON(page->index != linear_page_index(vma, address));560+#endif561+}562+563+/**564 * page_add_anon_rmap - add pte mapping to an anonymous page565 * @page: the page to add the mapping to566 * @vma: the vm area in which the mapping is added567 * @address: the user virtual address mapped568 *569+ * The caller needs to hold the pte lock and the page must be locked.570 */571void page_add_anon_rmap(struct page *page,572 struct vm_area_struct *vma, unsigned long address)573{574+ VM_BUG_ON(!PageLocked(page));575+ VM_BUG_ON(address < vma->vm_start || address >= vma->vm_end);576 if (atomic_inc_and_test(&page->_mapcount))577 __page_set_anon_rmap(page, vma, address);578+ else579+ __page_check_anon_rmap(page, vma, address);580}581582/*···555 *556 * Same as page_add_anon_rmap but must only be called on *new* pages.557 * This means the inc-and-test can be bypassed.558+ * Page does not have to be locked.559 */560void page_add_new_anon_rmap(struct page *page,561 struct vm_area_struct *vma, unsigned long address)562{563+ BUG_ON(address < vma->vm_start || address >= vma->vm_end);564 atomic_set(&page->_mapcount, 0); /* elevate count by 1 (starts at -1) */565 __page_set_anon_rmap(page, vma, address);566}···574 if (atomic_inc_and_test(&page->_mapcount))575 __inc_zone_page_state(page, NR_FILE_MAPPED);576}577+578+#ifdef CONFIG_DEBUG_VM579+/**580+ * page_dup_rmap - duplicate pte mapping to a page581+ * @page: the page to add the mapping to582+ *583+ * For copy_page_range only: minimal extract from page_add_file_rmap /584+ * page_add_anon_rmap, avoiding unnecessary tests (already checked) so it's585+ * quicker.586+ *587+ * The caller needs to hold the pte lock.588+ */589+void page_dup_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address)590+{591+ BUG_ON(page_mapcount(page) == 0);592+ if (PageAnon(page))593+ __page_check_anon_rmap(page, vma, address);594+ atomic_inc(&page->_mapcount);595+}596+#endif597598/**599 * page_remove_rmap - take down pte mapping from a page