···1017101710181018 /* If we are below the current region then a new region is required.10191019 * Subtle, allocate a new region at the position but make it zero10201020- * size such that we can guarentee to record the reservation. */10201020+ * size such that we can guarantee to record the reservation. */10211021 if (&rg->link == head || t < rg->from) {10221022 nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);10231023 if (!nrg)
+1-1
mm/memory.c
···27132713 return 0;2714271427152715 down_read(&mm->mmap_sem);27162716- /* ignore errors, just check how much was sucessfully transfered */27162716+ /* ignore errors, just check how much was successfully transferred */27172717 while (len) {27182718 int bytes, ret, offset;27192719 void *maddr;
+1-1
mm/memory_hotplug.c
···121121 err = __add_section(zone, i << PFN_SECTION_SHIFT);122122123123 /*124124- * EEXIST is finally dealed with by ioresource collision124124+ * EEXIST is finally dealt with by ioresource collision125125 * check. see add_memory() => register_memory_resource()126126 * Warning will be printed if there is collision.127127 */
+1-1
mm/mempool.c
···299299300300/*301301 * A commonly used alloc and free fn that kmalloc/kfrees the amount of memory302302- * specfied by pool_data302302+ * specified by pool_data303303 */304304void *mempool_kmalloc(gfp_t gfp_mask, void *pool_data)305305{
+1-1
mm/page-writeback.c
···989989 * mapping is pinned by the vma's ->vm_file reference.990990 *991991 * We take care to handle the case where the page was truncated from the992992- * mapping by re-checking page_mapping() insode tree_lock.992992+ * mapping by re-checking page_mapping() inside tree_lock.993993 */994994int __set_page_dirty_nobuffers(struct page *page)995995{
+4-4
mm/page_alloc.c
···123123124124#ifdef CONFIG_ARCH_POPULATES_NODE_MAP125125 /*126126- * MAX_ACTIVE_REGIONS determines the maxmimum number of distinct126126+ * MAX_ACTIVE_REGIONS determines the maximum number of distinct127127 * ranges of memory (RAM) that may be registered with add_active_range().128128 * Ranges passed to add_active_range() will be merged if possible129129 * so the number of times add_active_range() can be called is···12601260 * skip over zones that are not allowed by the cpuset, or that have12611261 * been recently (in last second) found to be nearly full. See further12621262 * comments in mmzone.h. Reduces cache footprint of zonelist scans12631263- * that have to skip over alot of full or unallowed zones.12631263+ * that have to skip over a lot of full or unallowed zones.12641264 *12651265 * If the zonelist cache is present in the passed in zonelist, then12661266 * returns a pointer to the allowed node mask (either the current···23582358 __build_all_zonelists(NULL);23592359 cpuset_init_current_mems_allowed();23602360 } else {23612361- /* we have to stop all cpus to guaranntee there is no user23612361+ /* we have to stop all cpus to guarantee there is no user23622362 of zonelist */23632363 stop_machine_run(__build_all_zonelists, NULL, NR_CPUS);23642364 /* cpuset refresh routine should be here */···2864286428652865/*28662866 * Basic iterator support. Return the next active range of PFNs for a node28672867- * Note: nid == MAX_NUMNODES returns next region regardles of node28672867+ * Note: nid == MAX_NUMNODES returns next region regardless of node28682868 */28692869static int __meminit next_active_region_index_in_nid(int index, int nid)28702870{
+1-1
mm/prio_tree.c
···3434 * Radix priority search tree for address_space->i_mmap3535 *3636 * For each vma that map a unique set of file pages i.e., unique [radix_index,3737- * heap_index] value, we have a corresponing priority search tree node. If3737+ * heap_index] value, we have a corresponding priority search tree node. If3838 * multiple vmas have identical [radix_index, heap_index] value, then one of3939 * them is used as a tree node and others are stored in a vm_set list. The tree4040 * node points to the first vma (head) of the list using vm_set.head.
+3-3
mm/slab.c
···2626 * initialized objects.2727 *2828 * This means, that your constructor is used only for newly allocated2929- * slabs and you must pass objects with the same intializations to2929+ * slabs and you must pass objects with the same initializations to3030 * kmem_cache_free.3131 *3232 * Each cache can only support one memory type (GFP_DMA, GFP_HIGHMEM,···13691369 * structure is usually allocated from kmem_cache_create() and13701370 * gets destroyed at kmem_cache_destroy().13711371 */13721372- /* fall thru */13721372+ /* fall through */13731373#endif13741374 case CPU_UP_CANCELED:13751375 case CPU_UP_CANCELED_FROZEN:···38063806EXPORT_SYMBOL_GPL(kmem_cache_name);3807380738083808/*38093809- * This initializes kmem_list3 or resizes varioius caches for all nodes.38093809+ * This initializes kmem_list3 or resizes various caches for all nodes.38103810 */38113811static int alloc_kmemlist(struct kmem_cache *cachep)38123812{
+1-1
mm/swap.c
···55 */6677/*88- * This file contains the default values for the opereation of the88+ * This file contains the default values for the operation of the99 * Linux VM subsystem. Fine-tuning documentation can be found in1010 * Documentation/sysctl/vm.txt.1111 * Started 18.12.91
+3-3
mm/vmalloc.c
···247247EXPORT_SYMBOL_GPL(__get_vm_area);248248249249/**250250- * get_vm_area - reserve a contingous kernel virtual area250250+ * get_vm_area - reserve a contiguous kernel virtual area251251 * @size: size of the area252252 * @flags: %VM_IOREMAP for I/O mappings or VM_ALLOC253253 *···303303}304304305305/**306306- * remove_vm_area - find and remove a contingous kernel virtual area306306+ * remove_vm_area - find and remove a continuous kernel virtual area307307 * @addr: base address308308 *309309 * Search for the kernel VM area starting at @addr, and remove it.···364364 * vfree - release memory allocated by vmalloc()365365 * @addr: memory base address366366 *367367- * Free the virtually contiguous memory area starting at @addr, as367367+ * Free the virtually continuous memory area starting at @addr, as368368 * obtained from vmalloc(), vmalloc_32() or __vmalloc(). If @addr is369369 * NULL, no operation is performed.370370 *
+1-1
mm/vmscan.c
···141141 * percentages of the lru and ageable caches. This should balance the seeks142142 * generated by these structures.143143 *144144- * If the vm encounted mapped pages on the LRU it increase the pressure on144144+ * If the vm encountered mapped pages on the LRU it increase the pressure on145145 * slab to avoid swapping.146146 *147147 * We do weird things to avoid (scanned*seeks*entries) overflowing 32 bits.