Merge branch 'akpm' (patches from Andrew)

Merge misc fixes from Andrew Morton:
"10 fixes"

* emailed patches from Andrew Morton <akpm@linux-foundation.org>:
hfsplus: stop workqueue when fill_super() failed
mm: don't allow deferred pages with NEED_PER_CPU_KM
MAINTAINERS: add Q: entry to kselftest for patchwork project
radix tree: fix multi-order iteration race
radix tree test suite: multi-order iteration race
radix tree test suite: add item_delete_rcu()
radix tree test suite: fix compilation issue
radix tree test suite: fix mapshift build target
include/linux/mm.h: add new inline function vmf_error()
lib/test_bitmap.c: fix bitmap optimisation tests to report errors correctly

+116 -15
+1
MAINTAINERS
··· 7698 M: Shuah Khan <shuah@kernel.org> 7699 L: linux-kselftest@vger.kernel.org 7700 T: git git://git.kernel.org/pub/scm/linux/kernel/git/shuah/linux-kselftest.git 7701 S: Maintained 7702 F: tools/testing/selftests/ 7703 F: Documentation/dev-tools/kselftest*
··· 7698 M: Shuah Khan <shuah@kernel.org> 7699 L: linux-kselftest@vger.kernel.org 7700 T: git git://git.kernel.org/pub/scm/linux/kernel/git/shuah/linux-kselftest.git 7701 + Q: https://patchwork.kernel.org/project/linux-kselftest/list/ 7702 S: Maintained 7703 F: tools/testing/selftests/ 7704 F: Documentation/dev-tools/kselftest*
+1
fs/hfsplus/super.c
··· 588 return 0; 589 590 out_put_hidden_dir: 591 iput(sbi->hidden_dir); 592 out_put_root: 593 dput(sb->s_root);
··· 588 return 0; 589 590 out_put_hidden_dir: 591 + cancel_delayed_work_sync(&sbi->sync_work); 592 iput(sbi->hidden_dir); 593 out_put_root: 594 dput(sb->s_root);
+7
include/linux/mm.h
··· 2466 return VM_FAULT_NOPAGE; 2467 } 2468 2469 struct page *follow_page_mask(struct vm_area_struct *vma, 2470 unsigned long address, unsigned int foll_flags, 2471 unsigned int *page_mask);
··· 2466 return VM_FAULT_NOPAGE; 2467 } 2468 2469 + static inline vm_fault_t vmf_error(int err) 2470 + { 2471 + if (err == -ENOMEM) 2472 + return VM_FAULT_OOM; 2473 + return VM_FAULT_SIGBUS; 2474 + } 2475 + 2476 struct page *follow_page_mask(struct vm_area_struct *vma, 2477 unsigned long address, unsigned int foll_flags, 2478 unsigned int *page_mask);
+2 -4
lib/radix-tree.c
··· 1612 static void __rcu **skip_siblings(struct radix_tree_node **nodep, 1613 void __rcu **slot, struct radix_tree_iter *iter) 1614 { 1615 - void *sib = node_to_entry(slot - 1); 1616 - 1617 while (iter->index < iter->next_index) { 1618 *nodep = rcu_dereference_raw(*slot); 1619 - if (*nodep && *nodep != sib) 1620 return slot; 1621 slot++; 1622 iter->index = __radix_tree_iter_add(iter, 1); ··· 1629 struct radix_tree_iter *iter, unsigned flags) 1630 { 1631 unsigned tag = flags & RADIX_TREE_ITER_TAG_MASK; 1632 - struct radix_tree_node *node = rcu_dereference_raw(*slot); 1633 1634 slot = skip_siblings(&node, slot, iter); 1635
··· 1612 static void __rcu **skip_siblings(struct radix_tree_node **nodep, 1613 void __rcu **slot, struct radix_tree_iter *iter) 1614 { 1615 while (iter->index < iter->next_index) { 1616 *nodep = rcu_dereference_raw(*slot); 1617 + if (*nodep && !is_sibling_entry(iter->node, *nodep)) 1618 return slot; 1619 slot++; 1620 iter->index = __radix_tree_iter_add(iter, 1); ··· 1631 struct radix_tree_iter *iter, unsigned flags) 1632 { 1633 unsigned tag = flags & RADIX_TREE_ITER_TAG_MASK; 1634 + struct radix_tree_node *node; 1635 1636 slot = skip_siblings(&node, slot, iter); 1637
+15 -6
lib/test_bitmap.c
··· 331 unsigned int start, nbits; 332 333 for (start = 0; start < 1024; start += 8) { 334 - memset(bmap1, 0x5a, sizeof(bmap1)); 335 - memset(bmap2, 0x5a, sizeof(bmap2)); 336 for (nbits = 0; nbits < 1024 - start; nbits += 8) { 337 bitmap_set(bmap1, start, nbits); 338 __bitmap_set(bmap2, start, nbits); 339 - if (!bitmap_equal(bmap1, bmap2, 1024)) 340 printk("set not equal %d %d\n", start, nbits); 341 - if (!__bitmap_equal(bmap1, bmap2, 1024)) 342 printk("set not __equal %d %d\n", start, nbits); 343 344 bitmap_clear(bmap1, start, nbits); 345 __bitmap_clear(bmap2, start, nbits); 346 - if (!bitmap_equal(bmap1, bmap2, 1024)) 347 printk("clear not equal %d %d\n", start, nbits); 348 - if (!__bitmap_equal(bmap1, bmap2, 1024)) 349 printk("clear not __equal %d %d\n", start, 350 nbits); 351 } 352 } 353 }
··· 331 unsigned int start, nbits; 332 333 for (start = 0; start < 1024; start += 8) { 334 for (nbits = 0; nbits < 1024 - start; nbits += 8) { 335 + memset(bmap1, 0x5a, sizeof(bmap1)); 336 + memset(bmap2, 0x5a, sizeof(bmap2)); 337 + 338 bitmap_set(bmap1, start, nbits); 339 __bitmap_set(bmap2, start, nbits); 340 + if (!bitmap_equal(bmap1, bmap2, 1024)) { 341 printk("set not equal %d %d\n", start, nbits); 342 + failed_tests++; 343 + } 344 + if (!__bitmap_equal(bmap1, bmap2, 1024)) { 345 printk("set not __equal %d %d\n", start, nbits); 346 + failed_tests++; 347 + } 348 349 bitmap_clear(bmap1, start, nbits); 350 __bitmap_clear(bmap2, start, nbits); 351 + if (!bitmap_equal(bmap1, bmap2, 1024)) { 352 printk("clear not equal %d %d\n", start, nbits); 353 + failed_tests++; 354 + } 355 + if (!__bitmap_equal(bmap1, bmap2, 1024)) { 356 printk("clear not __equal %d %d\n", start, 357 nbits); 358 + failed_tests++; 359 + } 360 } 361 } 362 }
+1
mm/Kconfig
··· 636 default n 637 depends on NO_BOOTMEM 638 depends on !FLATMEM 639 help 640 Ordinarily all struct pages are initialised during early boot in a 641 single thread. On very large machines this can take a considerable
··· 636 default n 637 depends on NO_BOOTMEM 638 depends on !FLATMEM 639 + depends on !NEED_PER_CPU_KM 640 help 641 Ordinarily all struct pages are initialised during early boot in a 642 single thread. On very large machines this can take a considerable
+2 -1
tools/include/linux/spinlock.h
··· 6 #include <stdbool.h> 7 8 #define spinlock_t pthread_mutex_t 9 - #define DEFINE_SPINLOCK(x) pthread_mutex_t x = PTHREAD_MUTEX_INITIALIZER; 10 #define __SPIN_LOCK_UNLOCKED(x) (pthread_mutex_t)PTHREAD_MUTEX_INITIALIZER 11 12 #define spin_lock_irqsave(x, f) (void)f, pthread_mutex_lock(x) 13 #define spin_unlock_irqrestore(x, f) (void)f, pthread_mutex_unlock(x)
··· 6 #include <stdbool.h> 7 8 #define spinlock_t pthread_mutex_t 9 + #define DEFINE_SPINLOCK(x) pthread_mutex_t x = PTHREAD_MUTEX_INITIALIZER 10 #define __SPIN_LOCK_UNLOCKED(x) (pthread_mutex_t)PTHREAD_MUTEX_INITIALIZER 11 + #define spin_lock_init(x) pthread_mutex_init(x, NULL) 12 13 #define spin_lock_irqsave(x, f) (void)f, pthread_mutex_lock(x) 14 #define spin_unlock_irqrestore(x, f) (void)f, pthread_mutex_unlock(x)
+2 -4
tools/testing/radix-tree/Makefile
··· 17 LDFLAGS += -m32 18 endif 19 20 - targets: mapshift $(TARGETS) 21 22 main: $(OFILES) 23 ··· 42 idr.c: ../../../lib/idr.c 43 sed -e 's/^static //' -e 's/__always_inline //' -e 's/inline //' < $< > $@ 44 45 - .PHONY: mapshift 46 - 47 - mapshift: 48 @if ! grep -qws $(SHIFT) generated/map-shift.h; then \ 49 echo "#define RADIX_TREE_MAP_SHIFT $(SHIFT)" > \ 50 generated/map-shift.h; \
··· 17 LDFLAGS += -m32 18 endif 19 20 + targets: generated/map-shift.h $(TARGETS) 21 22 main: $(OFILES) 23 ··· 42 idr.c: ../../../lib/idr.c 43 sed -e 's/^static //' -e 's/__always_inline //' -e 's/inline //' < $< > $@ 44 45 + generated/map-shift.h: 46 @if ! grep -qws $(SHIFT) generated/map-shift.h; then \ 47 echo "#define RADIX_TREE_MAP_SHIFT $(SHIFT)" > \ 48 generated/map-shift.h; \
+63
tools/testing/radix-tree/multiorder.c
··· 16 #include <linux/radix-tree.h> 17 #include <linux/slab.h> 18 #include <linux/errno.h> 19 20 #include "test.h" 21 ··· 625 item_kill_tree(&tree); 626 } 627 628 void multiorder_checks(void) 629 { 630 int i; ··· 706 multiorder_join(); 707 multiorder_split(); 708 multiorder_account(); 709 710 radix_tree_cpu_dead(0); 711 }
··· 16 #include <linux/radix-tree.h> 17 #include <linux/slab.h> 18 #include <linux/errno.h> 19 + #include <pthread.h> 20 21 #include "test.h" 22 ··· 624 item_kill_tree(&tree); 625 } 626 627 + bool stop_iteration = false; 628 + 629 + static void *creator_func(void *ptr) 630 + { 631 + /* 'order' is set up to ensure we have sibling entries */ 632 + unsigned int order = RADIX_TREE_MAP_SHIFT - 1; 633 + struct radix_tree_root *tree = ptr; 634 + int i; 635 + 636 + for (i = 0; i < 10000; i++) { 637 + item_insert_order(tree, 0, order); 638 + item_delete_rcu(tree, 0); 639 + } 640 + 641 + stop_iteration = true; 642 + return NULL; 643 + } 644 + 645 + static void *iterator_func(void *ptr) 646 + { 647 + struct radix_tree_root *tree = ptr; 648 + struct radix_tree_iter iter; 649 + struct item *item; 650 + void **slot; 651 + 652 + while (!stop_iteration) { 653 + rcu_read_lock(); 654 + radix_tree_for_each_slot(slot, tree, &iter, 0) { 655 + item = radix_tree_deref_slot(slot); 656 + 657 + if (!item) 658 + continue; 659 + if (radix_tree_deref_retry(item)) { 660 + slot = radix_tree_iter_retry(&iter); 661 + continue; 662 + } 663 + 664 + item_sanity(item, iter.index); 665 + } 666 + rcu_read_unlock(); 667 + } 668 + return NULL; 669 + } 670 + 671 + static void multiorder_iteration_race(void) 672 + { 673 + const int num_threads = sysconf(_SC_NPROCESSORS_ONLN); 674 + pthread_t worker_thread[num_threads]; 675 + RADIX_TREE(tree, GFP_KERNEL); 676 + int i; 677 + 678 + pthread_create(&worker_thread[0], NULL, &creator_func, &tree); 679 + for (i = 1; i < num_threads; i++) 680 + pthread_create(&worker_thread[i], NULL, &iterator_func, &tree); 681 + 682 + for (i = 0; i < num_threads; i++) 683 + pthread_join(worker_thread[i], NULL); 684 + 685 + item_kill_tree(&tree); 686 + } 687 + 688 void multiorder_checks(void) 689 { 690 int i; ··· 644 multiorder_join(); 645 multiorder_split(); 646 multiorder_account(); 647 + multiorder_iteration_race(); 648 649 radix_tree_cpu_dead(0); 650 }
+19
tools/testing/radix-tree/test.c
··· 75 return 0; 76 } 77 78 void item_check_present(struct radix_tree_root *root, unsigned long index) 79 { 80 struct item *item;
··· 75 return 0; 76 } 77 78 + static void item_free_rcu(struct rcu_head *head) 79 + { 80 + struct item *item = container_of(head, struct item, rcu_head); 81 + 82 + free(item); 83 + } 84 + 85 + int item_delete_rcu(struct radix_tree_root *root, unsigned long index) 86 + { 87 + struct item *item = radix_tree_delete(root, index); 88 + 89 + if (item) { 90 + item_sanity(item, index); 91 + call_rcu(&item->rcu_head, item_free_rcu); 92 + return 1; 93 + } 94 + return 0; 95 + } 96 + 97 void item_check_present(struct radix_tree_root *root, unsigned long index) 98 { 99 struct item *item;
+3
tools/testing/radix-tree/test.h
··· 5 #include <linux/rcupdate.h> 6 7 struct item { 8 unsigned long index; 9 unsigned int order; 10 }; ··· 13 struct item *item_create(unsigned long index, unsigned int order); 14 int __item_insert(struct radix_tree_root *root, struct item *item); 15 int item_insert(struct radix_tree_root *root, unsigned long index); 16 int item_insert_order(struct radix_tree_root *root, unsigned long index, 17 unsigned order); 18 int item_delete(struct radix_tree_root *root, unsigned long index); 19 struct item *item_lookup(struct radix_tree_root *root, unsigned long index); 20 21 void item_check_present(struct radix_tree_root *root, unsigned long index);
··· 5 #include <linux/rcupdate.h> 6 7 struct item { 8 + struct rcu_head rcu_head; 9 unsigned long index; 10 unsigned int order; 11 }; ··· 12 struct item *item_create(unsigned long index, unsigned int order); 13 int __item_insert(struct radix_tree_root *root, struct item *item); 14 int item_insert(struct radix_tree_root *root, unsigned long index); 15 + void item_sanity(struct item *item, unsigned long index); 16 int item_insert_order(struct radix_tree_root *root, unsigned long index, 17 unsigned order); 18 int item_delete(struct radix_tree_root *root, unsigned long index); 19 + int item_delete_rcu(struct radix_tree_root *root, unsigned long index); 20 struct item *item_lookup(struct radix_tree_root *root, unsigned long index); 21 22 void item_check_present(struct radix_tree_root *root, unsigned long index);