Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

mm: check the return value of lookup_page_ext for all call sites

Per the discussion with Joonsoo Kim [1], we need check the return value
of lookup_page_ext() for all call sites since it might return NULL in
some cases, although it is unlikely, i.e. memory hotplug.

Tested with ltp with "page_owner=0".

[1] http://lkml.kernel.org/r/20160519002809.GA10245@js1304-P5Q-DELUXE

[akpm@linux-foundation.org: fix build-breaking typos]
[arnd@arndb.de: fix build problems from lookup_page_ext]
Link: http://lkml.kernel.org/r/6285269.2CksypHdYp@wuerfel
[akpm@linux-foundation.org: coding-style fixes]
Link: http://lkml.kernel.org/r/1464023768-31025-1-git-send-email-yang.shi@linaro.org
Signed-off-by: Yang Shi <yang.shi@linaro.org>
Signed-off-by: Arnd Bergmann <arnd@arndb.de>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by

Yang Shi and committed by
Linus Torvalds
f86e4271 d8bae33d

+77 -8
+36 -7
include/linux/page_idle.h
··· 46 46 47 47 static inline bool page_is_young(struct page *page) 48 48 { 49 - return test_bit(PAGE_EXT_YOUNG, &lookup_page_ext(page)->flags); 49 + struct page_ext *page_ext = lookup_page_ext(page); 50 + 51 + if (unlikely(!page_ext)) 52 + return false; 53 + 54 + return test_bit(PAGE_EXT_YOUNG, &page_ext->flags); 50 55 } 51 56 52 57 static inline void set_page_young(struct page *page) 53 58 { 54 - set_bit(PAGE_EXT_YOUNG, &lookup_page_ext(page)->flags); 59 + struct page_ext *page_ext = lookup_page_ext(page); 60 + 61 + if (unlikely(!page_ext)) 62 + return; 63 + 64 + set_bit(PAGE_EXT_YOUNG, &page_ext->flags); 55 65 } 56 66 57 67 static inline bool test_and_clear_page_young(struct page *page) 58 68 { 59 - return test_and_clear_bit(PAGE_EXT_YOUNG, 60 - &lookup_page_ext(page)->flags); 69 + struct page_ext *page_ext = lookup_page_ext(page); 70 + 71 + if (unlikely(!page_ext)) 72 + return false; 73 + 74 + return test_and_clear_bit(PAGE_EXT_YOUNG, &page_ext->flags); 61 75 } 62 76 63 77 static inline bool page_is_idle(struct page *page) 64 78 { 65 - return test_bit(PAGE_EXT_IDLE, &lookup_page_ext(page)->flags); 79 + struct page_ext *page_ext = lookup_page_ext(page); 80 + 81 + if (unlikely(!page_ext)) 82 + return false; 83 + 84 + return test_bit(PAGE_EXT_IDLE, &page_ext->flags); 66 85 } 67 86 68 87 static inline void set_page_idle(struct page *page) 69 88 { 70 - set_bit(PAGE_EXT_IDLE, &lookup_page_ext(page)->flags); 89 + struct page_ext *page_ext = lookup_page_ext(page); 90 + 91 + if (unlikely(!page_ext)) 92 + return; 93 + 94 + set_bit(PAGE_EXT_IDLE, &page_ext->flags); 71 95 } 72 96 73 97 static inline void clear_page_idle(struct page *page) 74 98 { 75 - clear_bit(PAGE_EXT_IDLE, &lookup_page_ext(page)->flags); 99 + struct page_ext *page_ext = lookup_page_ext(page); 100 + 101 + if (unlikely(!page_ext)) 102 + return; 103 + 104 + clear_bit(PAGE_EXT_IDLE, &page_ext->flags); 76 105 } 77 106 #endif /* CONFIG_64BIT */ 78 107
+6
mm/page_alloc.c
··· 656 656 return; 657 657 658 658 page_ext = lookup_page_ext(page); 659 + if (unlikely(!page_ext)) 660 + return; 661 + 659 662 __set_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags); 660 663 661 664 INIT_LIST_HEAD(&page->lru); ··· 676 673 return; 677 674 678 675 page_ext = lookup_page_ext(page); 676 + if (unlikely(!page_ext)) 677 + return; 678 + 679 679 __clear_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags); 680 680 681 681 set_page_private(page, 0);
+26
mm/page_owner.c
··· 55 55 56 56 for (i = 0; i < (1 << order); i++) { 57 57 page_ext = lookup_page_ext(page + i); 58 + if (unlikely(!page_ext)) 59 + continue; 58 60 __clear_bit(PAGE_EXT_OWNER, &page_ext->flags); 59 61 } 60 62 } ··· 64 62 void __set_page_owner(struct page *page, unsigned int order, gfp_t gfp_mask) 65 63 { 66 64 struct page_ext *page_ext = lookup_page_ext(page); 65 + 67 66 struct stack_trace trace = { 68 67 .nr_entries = 0, 69 68 .max_entries = ARRAY_SIZE(page_ext->trace_entries), 70 69 .entries = &page_ext->trace_entries[0], 71 70 .skip = 3, 72 71 }; 72 + 73 + if (unlikely(!page_ext)) 74 + return; 73 75 74 76 save_stack_trace(&trace); 75 77 ··· 88 82 void __set_page_owner_migrate_reason(struct page *page, int reason) 89 83 { 90 84 struct page_ext *page_ext = lookup_page_ext(page); 85 + if (unlikely(!page_ext)) 86 + return; 91 87 92 88 page_ext->last_migrate_reason = reason; 93 89 } ··· 97 89 gfp_t __get_page_owner_gfp(struct page *page) 98 90 { 99 91 struct page_ext *page_ext = lookup_page_ext(page); 92 + if (unlikely(!page_ext)) 93 + /* 94 + * The caller just returns 0 if no valid gfp 95 + * So return 0 here too. 96 + */ 97 + return 0; 100 98 101 99 return page_ext->gfp_mask; 102 100 } ··· 112 98 struct page_ext *old_ext = lookup_page_ext(oldpage); 113 99 struct page_ext *new_ext = lookup_page_ext(newpage); 114 100 int i; 101 + 102 + if (unlikely(!old_ext || !new_ext)) 103 + return; 115 104 116 105 new_ext->order = old_ext->order; 117 106 new_ext->gfp_mask = old_ext->gfp_mask; ··· 210 193 gfp_t gfp_mask = page_ext->gfp_mask; 211 194 int mt = gfpflags_to_migratetype(gfp_mask); 212 195 196 + if (unlikely(!page_ext)) { 197 + pr_alert("There is not page extension available.\n"); 198 + return; 199 + } 200 + 213 201 if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags)) { 214 202 pr_alert("page_owner info is not active (free page?)\n"); 215 203 return; ··· 273 251 } 274 252 275 253 page_ext = lookup_page_ext(page); 254 + if (unlikely(!page_ext)) 255 + continue; 276 256 277 257 /* 278 258 * Some pages could be missed by concurrent allocation or free, ··· 341 317 continue; 342 318 343 319 page_ext = lookup_page_ext(page); 320 + if (unlikely(!page_ext)) 321 + continue; 344 322 345 323 /* Maybe overraping zone */ 346 324 if (test_bit(PAGE_EXT_OWNER, &page_ext->flags))
+7 -1
mm/page_poison.c
··· 54 54 struct page_ext *page_ext; 55 55 56 56 page_ext = lookup_page_ext(page); 57 + if (unlikely(!page_ext)) 58 + return; 59 + 57 60 __set_bit(PAGE_EXT_DEBUG_POISON, &page_ext->flags); 58 61 } 59 62 ··· 65 62 struct page_ext *page_ext; 66 63 67 64 page_ext = lookup_page_ext(page); 65 + if (unlikely(!page_ext)) 66 + return; 67 + 68 68 __clear_bit(PAGE_EXT_DEBUG_POISON, &page_ext->flags); 69 69 } 70 70 ··· 76 70 struct page_ext *page_ext; 77 71 78 72 page_ext = lookup_page_ext(page); 79 - if (!page_ext) 73 + if (unlikely(!page_ext)) 80 74 return false; 81 75 82 76 return test_bit(PAGE_EXT_DEBUG_POISON, &page_ext->flags);
+2
mm/vmstat.c
··· 1061 1061 continue; 1062 1062 1063 1063 page_ext = lookup_page_ext(page); 1064 + if (unlikely(!page_ext)) 1065 + continue; 1064 1066 1065 1067 if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags)) 1066 1068 continue;