Merge tag 'libnvdimm-fixes-4.19-rc8' of git://git.kernel.org/pub/scm/linux/kernel/git/nvdimm/nvdimm

Dan writes:
"libnvdimm/dax 4.19-rc8

* Fix a livelock in dax_layout_busy_page() present since v4.18. The
lockup triggers when truncating an actively mapped huge page out of
a mapping pinned for direct-I/O.

* Fix mprotect() clobbers of _PAGE_DEVMAP. Broken since v4.5
mprotect() clears this flag that is needed to communicate the
liveness of device pages to the get_user_pages() path."

* tag 'libnvdimm-fixes-4.19-rc8' of git://git.kernel.org/pub/scm/linux/kernel/git/nvdimm/nvdimm:
mm: Preserve _PAGE_DEVMAP across mprotect() calls
filesystem-dax: Fix dax_layout_busy_page() livelock

+14 -5
+2 -2
arch/powerpc/include/asm/book3s/64/pgtable.h
··· 114 */ 115 #define _HPAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_HPTEFLAGS | _PAGE_DIRTY | \ 116 _PAGE_ACCESSED | H_PAGE_THP_HUGE | _PAGE_PTE | \ 117 - _PAGE_SOFT_DIRTY) 118 /* 119 * user access blocked by key 120 */ ··· 132 */ 133 #define _PAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_HPTEFLAGS | _PAGE_DIRTY | \ 134 _PAGE_ACCESSED | _PAGE_SPECIAL | _PAGE_PTE | \ 135 - _PAGE_SOFT_DIRTY) 136 137 #define H_PTE_PKEY (H_PTE_PKEY_BIT0 | H_PTE_PKEY_BIT1 | H_PTE_PKEY_BIT2 | \ 138 H_PTE_PKEY_BIT3 | H_PTE_PKEY_BIT4)
··· 114 */ 115 #define _HPAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_HPTEFLAGS | _PAGE_DIRTY | \ 116 _PAGE_ACCESSED | H_PAGE_THP_HUGE | _PAGE_PTE | \ 117 + _PAGE_SOFT_DIRTY | _PAGE_DEVMAP) 118 /* 119 * user access blocked by key 120 */ ··· 132 */ 133 #define _PAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_HPTEFLAGS | _PAGE_DIRTY | \ 134 _PAGE_ACCESSED | _PAGE_SPECIAL | _PAGE_PTE | \ 135 + _PAGE_SOFT_DIRTY | _PAGE_DEVMAP) 136 137 #define H_PTE_PKEY (H_PTE_PKEY_BIT0 | H_PTE_PKEY_BIT1 | H_PTE_PKEY_BIT2 | \ 138 H_PTE_PKEY_BIT3 | H_PTE_PKEY_BIT4)
+1 -1
arch/x86/include/asm/pgtable_types.h
··· 124 */ 125 #define _PAGE_CHG_MASK (PTE_PFN_MASK | _PAGE_PCD | _PAGE_PWT | \ 126 _PAGE_SPECIAL | _PAGE_ACCESSED | _PAGE_DIRTY | \ 127 - _PAGE_SOFT_DIRTY) 128 #define _HPAGE_CHG_MASK (_PAGE_CHG_MASK | _PAGE_PSE) 129 130 /*
··· 124 */ 125 #define _PAGE_CHG_MASK (PTE_PFN_MASK | _PAGE_PCD | _PAGE_PWT | \ 126 _PAGE_SPECIAL | _PAGE_ACCESSED | _PAGE_DIRTY | \ 127 + _PAGE_SOFT_DIRTY | _PAGE_DEVMAP) 128 #define _HPAGE_CHG_MASK (_PAGE_CHG_MASK | _PAGE_PSE) 129 130 /*
+11 -2
fs/dax.c
··· 666 while (index < end && pagevec_lookup_entries(&pvec, mapping, index, 667 min(end - index, (pgoff_t)PAGEVEC_SIZE), 668 indices)) { 669 for (i = 0; i < pagevec_count(&pvec); i++) { 670 struct page *pvec_ent = pvec.pages[i]; 671 void *entry; ··· 682 683 xa_lock_irq(&mapping->i_pages); 684 entry = get_unlocked_mapping_entry(mapping, index, NULL); 685 - if (entry) 686 page = dax_busy_page(entry); 687 put_unlocked_mapping_entry(mapping, index, entry); 688 xa_unlock_irq(&mapping->i_pages); 689 if (page) ··· 705 */ 706 pagevec_remove_exceptionals(&pvec); 707 pagevec_release(&pvec); 708 - index++; 709 710 if (page) 711 break;
··· 666 while (index < end && pagevec_lookup_entries(&pvec, mapping, index, 667 min(end - index, (pgoff_t)PAGEVEC_SIZE), 668 indices)) { 669 + pgoff_t nr_pages = 1; 670 + 671 for (i = 0; i < pagevec_count(&pvec); i++) { 672 struct page *pvec_ent = pvec.pages[i]; 673 void *entry; ··· 680 681 xa_lock_irq(&mapping->i_pages); 682 entry = get_unlocked_mapping_entry(mapping, index, NULL); 683 + if (entry) { 684 page = dax_busy_page(entry); 685 + /* 686 + * Account for multi-order entries at 687 + * the end of the pagevec. 688 + */ 689 + if (i + 1 >= pagevec_count(&pvec)) 690 + nr_pages = 1UL << dax_radix_order(entry); 691 + } 692 put_unlocked_mapping_entry(mapping, index, entry); 693 xa_unlock_irq(&mapping->i_pages); 694 if (page) ··· 696 */ 697 pagevec_remove_exceptionals(&pvec); 698 pagevec_release(&pvec); 699 + index += nr_pages; 700 701 if (page) 702 break;