mm/page_alloc.c: don't cache `current' in a local

It's old-fashioned and unneeded.

akpm:/usr/src/25> size mm/page_alloc.o
text data bss dec hex filename
39884 1241317 18808 1300009 13d629 mm/page_alloc.o (before)
39838 1241317 18808 1299963 13d5fb mm/page_alloc.o (after)

Acked-by: David Rientjes <rientjes@google.com>
Acked-by: Mel Gorman <mel@csn.ul.ie>
Cc: Hugh Dickins <hughd@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by

Andrew Morton and committed by
Linus Torvalds
c06b1fca fd4a4663

+10 -14
+10 -14
mm/page_alloc.c
··· 1809 1809 bool sync_migration) 1810 1810 { 1811 1811 struct page *page; 1812 - struct task_struct *tsk = current; 1813 1812 1814 1813 if (!order || compaction_deferred(preferred_zone)) 1815 1814 return NULL; 1816 1815 1817 - tsk->flags |= PF_MEMALLOC; 1816 + current->flags |= PF_MEMALLOC; 1818 1817 *did_some_progress = try_to_compact_pages(zonelist, order, gfp_mask, 1819 1818 nodemask, sync_migration); 1820 - tsk->flags &= ~PF_MEMALLOC; 1819 + current->flags &= ~PF_MEMALLOC; 1821 1820 if (*did_some_progress != COMPACT_SKIPPED) { 1822 1821 1823 1822 /* Page migration frees to the PCP lists but we want merging */ ··· 1868 1869 { 1869 1870 struct page *page = NULL; 1870 1871 struct reclaim_state reclaim_state; 1871 - struct task_struct *p = current; 1872 1872 bool drained = false; 1873 1873 1874 1874 cond_resched(); 1875 1875 1876 1876 /* We now go into synchronous reclaim */ 1877 1877 cpuset_memory_pressure_bump(); 1878 - p->flags |= PF_MEMALLOC; 1878 + current->flags |= PF_MEMALLOC; 1879 1879 lockdep_set_current_reclaim_state(gfp_mask); 1880 1880 reclaim_state.reclaimed_slab = 0; 1881 - p->reclaim_state = &reclaim_state; 1881 + current->reclaim_state = &reclaim_state; 1882 1882 1883 1883 *did_some_progress = try_to_free_pages(zonelist, order, gfp_mask, nodemask); 1884 1884 1885 - p->reclaim_state = NULL; 1885 + current->reclaim_state = NULL; 1886 1886 lockdep_clear_current_reclaim_state(); 1887 - p->flags &= ~PF_MEMALLOC; 1887 + current->flags &= ~PF_MEMALLOC; 1888 1888 1889 1889 cond_resched(); 1890 1890 ··· 1948 1950 static inline int 1949 1951 gfp_to_alloc_flags(gfp_t gfp_mask) 1950 1952 { 1951 - struct task_struct *p = current; 1952 1953 int alloc_flags = ALLOC_WMARK_MIN | ALLOC_CPUSET; 1953 1954 const gfp_t wait = gfp_mask & __GFP_WAIT; 1954 1955 ··· 1974 1977 * See also cpuset_zone_allowed() comment in kernel/cpuset.c. 1975 1978 */ 1976 1979 alloc_flags &= ~ALLOC_CPUSET; 1977 - } else if (unlikely(rt_task(p)) && !in_interrupt()) 1980 + } else if (unlikely(rt_task(current)) && !in_interrupt()) 1978 1981 alloc_flags |= ALLOC_HARDER; 1979 1982 1980 1983 if (likely(!(gfp_mask & __GFP_NOMEMALLOC))) { 1981 1984 if (!in_interrupt() && 1982 - ((p->flags & PF_MEMALLOC) || 1985 + ((current->flags & PF_MEMALLOC) || 1983 1986 unlikely(test_thread_flag(TIF_MEMDIE)))) 1984 1987 alloc_flags |= ALLOC_NO_WATERMARKS; 1985 1988 } ··· 1998 2001 int alloc_flags; 1999 2002 unsigned long pages_reclaimed = 0; 2000 2003 unsigned long did_some_progress; 2001 - struct task_struct *p = current; 2002 2004 bool sync_migration = false; 2003 2005 2004 2006 /* ··· 2056 2060 goto nopage; 2057 2061 2058 2062 /* Avoid recursion of direct reclaim */ 2059 - if (p->flags & PF_MEMALLOC) 2063 + if (current->flags & PF_MEMALLOC) 2060 2064 goto nopage; 2061 2065 2062 2066 /* Avoid allocations with no watermarks from looping endlessly */ ··· 2149 2153 if (!(gfp_mask & __GFP_NOWARN) && printk_ratelimit()) { 2150 2154 printk(KERN_WARNING "%s: page allocation failure." 2151 2155 " order:%d, mode:0x%x\n", 2152 - p->comm, order, gfp_mask); 2156 + current->comm, order, gfp_mask); 2153 2157 dump_stack(); 2154 2158 show_mem(); 2155 2159 }