Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

mm/mempool: minor coding style tweaks

Various coding style tweaks to various files under mm/

[daizhiyuan@phytium.com.cn: mm/swapfile: minor coding style tweaks]
Link: https://lkml.kernel.org/r/1614223624-16055-1-git-send-email-daizhiyuan@phytium.com.cn
[daizhiyuan@phytium.com.cn: mm/sparse: minor coding style tweaks]
Link: https://lkml.kernel.org/r/1614227288-19363-1-git-send-email-daizhiyuan@phytium.com.cn
[daizhiyuan@phytium.com.cn: mm/vmscan: minor coding style tweaks]
Link: https://lkml.kernel.org/r/1614227649-19853-1-git-send-email-daizhiyuan@phytium.com.cn
[daizhiyuan@phytium.com.cn: mm/compaction: minor coding style tweaks]
Link: https://lkml.kernel.org/r/1614228218-20770-1-git-send-email-daizhiyuan@phytium.com.cn
[daizhiyuan@phytium.com.cn: mm/oom_kill: minor coding style tweaks]
Link: https://lkml.kernel.org/r/1614228360-21168-1-git-send-email-daizhiyuan@phytium.com.cn
[daizhiyuan@phytium.com.cn: mm/shmem: minor coding style tweaks]
Link: https://lkml.kernel.org/r/1614228504-21491-1-git-send-email-daizhiyuan@phytium.com.cn
[daizhiyuan@phytium.com.cn: mm/page_alloc: minor coding style tweaks]
Link: https://lkml.kernel.org/r/1614228613-21754-1-git-send-email-daizhiyuan@phytium.com.cn
[daizhiyuan@phytium.com.cn: mm/filemap: minor coding style tweaks]
Link: https://lkml.kernel.org/r/1614228936-22337-1-git-send-email-daizhiyuan@phytium.com.cn
[daizhiyuan@phytium.com.cn: mm/mlock: minor coding style tweaks]
Link: https://lkml.kernel.org/r/1613956588-2453-1-git-send-email-daizhiyuan@phytium.com.cn
[daizhiyuan@phytium.com.cn: mm/frontswap: minor coding style tweaks]
Link: https://lkml.kernel.org/r/1613962668-15045-1-git-send-email-daizhiyuan@phytium.com.cn
[daizhiyuan@phytium.com.cn: mm/vmalloc: minor coding style tweaks]
Link: https://lkml.kernel.org/r/1613963379-15988-1-git-send-email-daizhiyuan@phytium.com.cn
[daizhiyuan@phytium.com.cn: mm/memory_hotplug: minor coding style tweaks]
Link: https://lkml.kernel.org/r/1613971784-24878-1-git-send-email-daizhiyuan@phytium.com.cn
[daizhiyuan@phytium.com.cn: mm/mempolicy: minor coding style tweaks]
Link: https://lkml.kernel.org/r/1613972228-25501-1-git-send-email-daizhiyuan@phytium.com.cn

Link: https://lkml.kernel.org/r/1614222374-13805-1-git-send-email-daizhiyuan@phytium.com.cn
Signed-off-by: Zhiyuan Dai <daizhiyuan@phytium.com.cn>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by

Zhiyuan Dai and committed by
Linus Torvalds
68d68ff6 9727688d

+27 -23
+1 -1
mm/compaction.c
··· 2885 2885 */ 2886 2886 static int kcompactd(void *p) 2887 2887 { 2888 - pg_data_t *pgdat = (pg_data_t*)p; 2888 + pg_data_t *pgdat = (pg_data_t *)p; 2889 2889 struct task_struct *tsk = current; 2890 2890 unsigned int proactive_defer = 0; 2891 2891
+4 -4
mm/filemap.c
··· 3267 3267 3268 3268 /* This is used for a general mmap of a disk file */ 3269 3269 3270 - int generic_file_mmap(struct file * file, struct vm_area_struct * vma) 3270 + int generic_file_mmap(struct file *file, struct vm_area_struct *vma) 3271 3271 { 3272 3272 struct address_space *mapping = file->f_mapping; 3273 3273 ··· 3292 3292 { 3293 3293 return VM_FAULT_SIGBUS; 3294 3294 } 3295 - int generic_file_mmap(struct file * file, struct vm_area_struct * vma) 3295 + int generic_file_mmap(struct file *file, struct vm_area_struct *vma) 3296 3296 { 3297 3297 return -ENOSYS; 3298 3298 } 3299 - int generic_file_readonly_mmap(struct file * file, struct vm_area_struct * vma) 3299 + int generic_file_readonly_mmap(struct file *file, struct vm_area_struct *vma) 3300 3300 { 3301 3301 return -ENOSYS; 3302 3302 } ··· 3724 3724 ssize_t __generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from) 3725 3725 { 3726 3726 struct file *file = iocb->ki_filp; 3727 - struct address_space * mapping = file->f_mapping; 3727 + struct address_space *mapping = file->f_mapping; 3728 3728 struct inode *inode = mapping->host; 3729 3729 ssize_t written = 0; 3730 3730 ssize_t err;
+8 -4
mm/frontswap.c
··· 60 60 static u64 frontswap_failed_stores; 61 61 static u64 frontswap_invalidates; 62 62 63 - static inline void inc_frontswap_loads(void) { 63 + static inline void inc_frontswap_loads(void) 64 + { 64 65 data_race(frontswap_loads++); 65 66 } 66 - static inline void inc_frontswap_succ_stores(void) { 67 + static inline void inc_frontswap_succ_stores(void) 68 + { 67 69 data_race(frontswap_succ_stores++); 68 70 } 69 - static inline void inc_frontswap_failed_stores(void) { 71 + static inline void inc_frontswap_failed_stores(void) 72 + { 70 73 data_race(frontswap_failed_stores++); 71 74 } 72 - static inline void inc_frontswap_invalidates(void) { 75 + static inline void inc_frontswap_invalidates(void) 76 + { 73 77 data_race(frontswap_invalidates++); 74 78 } 75 79 #else
+1 -1
mm/memory_hotplug.c
··· 834 834 return movable_node_enabled ? movable_zone : kernel_zone; 835 835 } 836 836 837 - struct zone * zone_for_pfn_range(int online_type, int nid, unsigned start_pfn, 837 + struct zone *zone_for_pfn_range(int online_type, int nid, unsigned start_pfn, 838 838 unsigned long nr_pages) 839 839 { 840 840 if (online_type == MMOP_ONLINE_KERNEL)
+2 -2
mm/mempolicy.c
··· 330 330 else if (pol->flags & MPOL_F_RELATIVE_NODES) 331 331 mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes); 332 332 else { 333 - nodes_remap(tmp, pol->v.nodes,pol->w.cpuset_mems_allowed, 333 + nodes_remap(tmp, pol->v.nodes, pol->w.cpuset_mems_allowed, 334 334 *nodes); 335 335 pol->w.cpuset_mems_allowed = *nodes; 336 336 } ··· 1161 1161 1162 1162 tmp = *from; 1163 1163 while (!nodes_empty(tmp)) { 1164 - int s,d; 1164 + int s, d; 1165 1165 int source = NUMA_NO_NODE; 1166 1166 int dest = 0; 1167 1167
+1 -1
mm/mempool.c
··· 251 251 mempool_t *mempool_create(int min_nr, mempool_alloc_t *alloc_fn, 252 252 mempool_free_t *free_fn, void *pool_data) 253 253 { 254 - return mempool_create_node(min_nr,alloc_fn,free_fn, pool_data, 254 + return mempool_create_node(min_nr, alloc_fn, free_fn, pool_data, 255 255 GFP_KERNEL, NUMA_NO_NODE); 256 256 } 257 257 EXPORT_SYMBOL(mempool_create);
+2 -2
mm/mlock.c
··· 559 559 vm_flags_t flags) 560 560 { 561 561 unsigned long nstart, end, tmp; 562 - struct vm_area_struct * vma, * prev; 562 + struct vm_area_struct *vma, *prev; 563 563 int error; 564 564 565 565 VM_BUG_ON(offset_in_page(start)); ··· 737 737 */ 738 738 static int apply_mlockall_flags(int flags) 739 739 { 740 - struct vm_area_struct * vma, * prev = NULL; 740 + struct vm_area_struct *vma, *prev = NULL; 741 741 vm_flags_t to_add = 0; 742 742 743 743 current->mm->def_flags &= VM_LOCKED_CLEAR_MASK;
+1 -1
mm/oom_kill.c
··· 993 993 if (oom_group) { 994 994 mem_cgroup_print_oom_group(oom_group); 995 995 mem_cgroup_scan_tasks(oom_group, oom_kill_memcg_member, 996 - (void*)message); 996 + (void *)message); 997 997 mem_cgroup_put(oom_group); 998 998 } 999 999 }
+1 -1
mm/page_alloc.c
··· 8808 8808 ret = __alloc_contig_migrate_range(&cc, start, end); 8809 8809 if (ret && ret != -EBUSY) 8810 8810 goto done; 8811 - ret =0; 8811 + ret = 0; 8812 8812 8813 8813 /* 8814 8814 * Pages from [start, end) are within a MAX_ORDER_NR_PAGES
+1 -1
mm/shmem.c
··· 3508 3508 } 3509 3509 } 3510 3510 if (*this_char) { 3511 - char *value = strchr(this_char,'='); 3511 + char *value = strchr(this_char, '='); 3512 3512 size_t len = 0; 3513 3513 int err; 3514 3514
+1 -1
mm/sparse.c
··· 257 257 if (unlikely(!mem_section)) { 258 258 unsigned long size, align; 259 259 260 - size = sizeof(struct mem_section*) * NR_SECTION_ROOTS; 260 + size = sizeof(struct mem_section *) * NR_SECTION_ROOTS; 261 261 align = 1 << (INTERNODE_CACHE_SHIFT); 262 262 mem_section = memblock_alloc(size, align); 263 263 if (!mem_section)
+2 -2
mm/swapfile.c
··· 2780 2780 unsigned int bytes, inuse; 2781 2781 2782 2782 if (si == SEQ_START_TOKEN) { 2783 - seq_puts(swap,"Filename\t\t\t\tType\t\tSize\t\tUsed\t\tPriority\n"); 2783 + seq_puts(swap, "Filename\t\t\t\tType\t\tSize\t\tUsed\t\tPriority\n"); 2784 2784 return 0; 2785 2785 } 2786 2786 ··· 3284 3284 sizeof(long), 3285 3285 GFP_KERNEL); 3286 3286 3287 - if (p->bdev &&(swap_flags & SWAP_FLAG_DISCARD) && swap_discardable(p)) { 3287 + if (p->bdev && (swap_flags & SWAP_FLAG_DISCARD) && swap_discardable(p)) { 3288 3288 /* 3289 3289 * When discard is enabled for swap with no particular 3290 3290 * policy flagged, we set all swap discard flags here in
+1 -1
mm/vmalloc.c
··· 3083 3083 * 64b systems should always have either DMA or DMA32 zones. For others 3084 3084 * GFP_DMA32 should do the right thing and use the normal zone. 3085 3085 */ 3086 - #define GFP_VMALLOC32 GFP_DMA32 | GFP_KERNEL 3086 + #define GFP_VMALLOC32 (GFP_DMA32 | GFP_KERNEL) 3087 3087 #endif 3088 3088 3089 3089 /**
+1 -1
mm/vmscan.c
··· 4059 4059 { 4060 4060 unsigned int alloc_order, reclaim_order; 4061 4061 unsigned int highest_zoneidx = MAX_NR_ZONES - 1; 4062 - pg_data_t *pgdat = (pg_data_t*)p; 4062 + pg_data_t *pgdat = (pg_data_t *)p; 4063 4063 struct task_struct *tsk = current; 4064 4064 const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id); 4065 4065