Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

mm: support THPs in zero_user_segments

We can only kmap() one subpage of a THP at a time, so loop over all
relevant subpages, skipping ones which don't need to be zeroed. This is
too large to inline when THPs are enabled and we actually need highmem, so
put it in highmem.c.

[willy@infradead.org: start1 was allowed to be less than start2]

Link: https://lkml.kernel.org/r/20201124041507.28996-1-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Yang Shi <shy828301@gmail.com>
Cc: Jan Kara <jack@suse.cz>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Zi Yan <ziy@nvidia.com>
Cc: Song Liu <songliubraving@fb.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Naresh Kamboju <naresh.kamboju@linaro.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by

Matthew Wilcox (Oracle) and committed by
Linus Torvalds
0060ef3b 5e5dda81

+67 -4
+15 -4
include/linux/highmem.h
··· 284 284 kunmap_atomic(kaddr); 285 285 } 286 286 287 + /* 288 + * If we pass in a base or tail page, we can zero up to PAGE_SIZE. 289 + * If we pass in a head page, we can zero up to the size of the compound page. 290 + */ 291 + #if defined(CONFIG_HIGHMEM) && defined(CONFIG_TRANSPARENT_HUGEPAGE) 292 + void zero_user_segments(struct page *page, unsigned start1, unsigned end1, 293 + unsigned start2, unsigned end2); 294 + #else /* !HIGHMEM || !TRANSPARENT_HUGEPAGE */ 287 295 static inline void zero_user_segments(struct page *page, 288 - unsigned start1, unsigned end1, 289 - unsigned start2, unsigned end2) 296 + unsigned start1, unsigned end1, 297 + unsigned start2, unsigned end2) 290 298 { 291 299 void *kaddr = kmap_atomic(page); 300 + unsigned int i; 292 301 293 - BUG_ON(end1 > PAGE_SIZE || end2 > PAGE_SIZE); 302 + BUG_ON(end1 > page_size(page) || end2 > page_size(page)); 294 303 295 304 if (end1 > start1) 296 305 memset(kaddr + start1, 0, end1 - start1); ··· 308 299 memset(kaddr + start2, 0, end2 - start2); 309 300 310 301 kunmap_atomic(kaddr); 311 - flush_dcache_page(page); 302 + for (i = 0; i < compound_nr(page); i++) 303 + flush_dcache_page(page + i); 312 304 } 305 + #endif /* !HIGHMEM || !TRANSPARENT_HUGEPAGE */ 313 306 314 307 static inline void zero_user_segment(struct page *page, 315 308 unsigned start, unsigned end)
+52
mm/highmem.c
··· 369 369 } 370 370 371 371 EXPORT_SYMBOL(kunmap_high); 372 + 373 + #ifdef CONFIG_TRANSPARENT_HUGEPAGE 374 + void zero_user_segments(struct page *page, unsigned start1, unsigned end1, 375 + unsigned start2, unsigned end2) 376 + { 377 + unsigned int i; 378 + 379 + BUG_ON(end1 > page_size(page) || end2 > page_size(page)); 380 + 381 + for (i = 0; i < compound_nr(page); i++) { 382 + void *kaddr = NULL; 383 + 384 + if (start1 < PAGE_SIZE || start2 < PAGE_SIZE) 385 + kaddr = kmap_atomic(page + i); 386 + 387 + if (start1 >= PAGE_SIZE) { 388 + start1 -= PAGE_SIZE; 389 + end1 -= PAGE_SIZE; 390 + } else { 391 + unsigned this_end = min_t(unsigned, end1, PAGE_SIZE); 392 + 393 + if (end1 > start1) 394 + memset(kaddr + start1, 0, this_end - start1); 395 + end1 -= this_end; 396 + start1 = 0; 397 + } 398 + 399 + if (start2 >= PAGE_SIZE) { 400 + start2 -= PAGE_SIZE; 401 + end2 -= PAGE_SIZE; 402 + } else { 403 + unsigned this_end = min_t(unsigned, end2, PAGE_SIZE); 404 + 405 + if (end2 > start2) 406 + memset(kaddr + start2, 0, this_end - start2); 407 + end2 -= this_end; 408 + start2 = 0; 409 + } 410 + 411 + if (kaddr) { 412 + kunmap_atomic(kaddr); 413 + flush_dcache_page(page + i); 414 + } 415 + 416 + if (!end1 && !end2) 417 + break; 418 + } 419 + 420 + BUG_ON((start1 | start2 | end1 | end2) != 0); 421 + } 422 + EXPORT_SYMBOL(zero_user_segments); 423 + #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 372 424 #endif /* CONFIG_HIGHMEM */ 373 425 374 426 #if defined(HASHED_PAGE_VIRTUAL)