Merge tag 'libnvdimm-fixes-4.19-rc8' of git://git.kernel.org/pub/scm/linux/kernel/git/nvdimm/nvdimm

Dan writes:
"libnvdimm/dax 4.19-rc8

* Fix a livelock in dax_layout_busy_page() present since v4.18. The
lockup triggers when truncating an actively mapped huge page out of
a mapping pinned for direct-I/O.

* Fix mprotect() clobbers of _PAGE_DEVMAP. Broken since v4.5
mprotect() clears this flag that is needed to communicate the
liveness of device pages to the get_user_pages() path."

* tag 'libnvdimm-fixes-4.19-rc8' of git://git.kernel.org/pub/scm/linux/kernel/git/nvdimm/nvdimm:
mm: Preserve _PAGE_DEVMAP across mprotect() calls
filesystem-dax: Fix dax_layout_busy_page() livelock

Changed files
+14 -5
arch
powerpc
include
asm
book3s
x86
include
fs
+2 -2
arch/powerpc/include/asm/book3s/64/pgtable.h
··· 114 114 */ 115 115 #define _HPAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_HPTEFLAGS | _PAGE_DIRTY | \ 116 116 _PAGE_ACCESSED | H_PAGE_THP_HUGE | _PAGE_PTE | \ 117 - _PAGE_SOFT_DIRTY) 117 + _PAGE_SOFT_DIRTY | _PAGE_DEVMAP) 118 118 /* 119 119 * user access blocked by key 120 120 */ ··· 132 132 */ 133 133 #define _PAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_HPTEFLAGS | _PAGE_DIRTY | \ 134 134 _PAGE_ACCESSED | _PAGE_SPECIAL | _PAGE_PTE | \ 135 - _PAGE_SOFT_DIRTY) 135 + _PAGE_SOFT_DIRTY | _PAGE_DEVMAP) 136 136 137 137 #define H_PTE_PKEY (H_PTE_PKEY_BIT0 | H_PTE_PKEY_BIT1 | H_PTE_PKEY_BIT2 | \ 138 138 H_PTE_PKEY_BIT3 | H_PTE_PKEY_BIT4)
+1 -1
arch/x86/include/asm/pgtable_types.h
··· 124 124 */ 125 125 #define _PAGE_CHG_MASK (PTE_PFN_MASK | _PAGE_PCD | _PAGE_PWT | \ 126 126 _PAGE_SPECIAL | _PAGE_ACCESSED | _PAGE_DIRTY | \ 127 - _PAGE_SOFT_DIRTY) 127 + _PAGE_SOFT_DIRTY | _PAGE_DEVMAP) 128 128 #define _HPAGE_CHG_MASK (_PAGE_CHG_MASK | _PAGE_PSE) 129 129 130 130 /*
+11 -2
fs/dax.c
··· 666 666 while (index < end && pagevec_lookup_entries(&pvec, mapping, index, 667 667 min(end - index, (pgoff_t)PAGEVEC_SIZE), 668 668 indices)) { 669 + pgoff_t nr_pages = 1; 670 + 669 671 for (i = 0; i < pagevec_count(&pvec); i++) { 670 672 struct page *pvec_ent = pvec.pages[i]; 671 673 void *entry; ··· 682 680 683 681 xa_lock_irq(&mapping->i_pages); 684 682 entry = get_unlocked_mapping_entry(mapping, index, NULL); 685 - if (entry) 683 + if (entry) { 686 684 page = dax_busy_page(entry); 685 + /* 686 + * Account for multi-order entries at 687 + * the end of the pagevec. 688 + */ 689 + if (i + 1 >= pagevec_count(&pvec)) 690 + nr_pages = 1UL << dax_radix_order(entry); 691 + } 687 692 put_unlocked_mapping_entry(mapping, index, entry); 688 693 xa_unlock_irq(&mapping->i_pages); 689 694 if (page) ··· 705 696 */ 706 697 pagevec_remove_exceptionals(&pvec); 707 698 pagevec_release(&pvec); 708 - index++; 699 + index += nr_pages; 709 700 710 701 if (page) 711 702 break;