Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'core-locking-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull locking changes from Ingo Molnar:
"Four miscellanous standalone fixes for futexes, rtmutexes and
Kconfig.locks."

* 'core-locking-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
futex: Use freezable blocking call
futex: Take hugepages into account when generating futex_key
rtmutex: Document rt_mutex_adjust_prio_chain()
locking: Fix copy/paste errors of "ARCH_INLINE_*_UNLOCK_BH"

+53 -5
+16
include/linux/hugetlb.h
··· 358 358 return h - hstates; 359 359 } 360 360 361 + pgoff_t __basepage_index(struct page *page); 362 + 363 + /* Return page->index in PAGE_SIZE units */ 364 + static inline pgoff_t basepage_index(struct page *page) 365 + { 366 + if (!PageCompound(page)) 367 + return page->index; 368 + 369 + return __basepage_index(page); 370 + } 371 + 361 372 #else /* CONFIG_HUGETLB_PAGE */ 362 373 struct hstate {}; 363 374 #define alloc_huge_page_node(h, nid) NULL ··· 389 378 } 390 379 #define hstate_index_to_shift(index) 0 391 380 #define hstate_index(h) 0 381 + 382 + static inline pgoff_t basepage_index(struct page *page) 383 + { 384 + return page->index; 385 + } 392 386 #endif /* CONFIG_HUGETLB_PAGE */ 393 387 394 388 #endif /* _LINUX_HUGETLB_H */
+3 -3
kernel/Kconfig.locks
··· 138 138 139 139 config INLINE_SPIN_UNLOCK_IRQ 140 140 def_bool y 141 - depends on !PREEMPT || ARCH_INLINE_SPIN_UNLOCK_BH 141 + depends on !PREEMPT || ARCH_INLINE_SPIN_UNLOCK_IRQ 142 142 143 143 config INLINE_SPIN_UNLOCK_IRQRESTORE 144 144 def_bool y ··· 175 175 176 176 config INLINE_READ_UNLOCK_IRQ 177 177 def_bool y 178 - depends on !PREEMPT || ARCH_INLINE_READ_UNLOCK_BH 178 + depends on !PREEMPT || ARCH_INLINE_READ_UNLOCK_IRQ 179 179 180 180 config INLINE_READ_UNLOCK_IRQRESTORE 181 181 def_bool y ··· 212 212 213 213 config INLINE_WRITE_UNLOCK_IRQ 214 214 def_bool y 215 - depends on !PREEMPT || ARCH_INLINE_WRITE_UNLOCK_BH 215 + depends on !PREEMPT || ARCH_INLINE_WRITE_UNLOCK_IRQ 216 216 217 217 config INLINE_WRITE_UNLOCK_IRQRESTORE 218 218 def_bool y
+4 -2
kernel/futex.c
··· 61 61 #include <linux/nsproxy.h> 62 62 #include <linux/ptrace.h> 63 63 #include <linux/sched/rt.h> 64 + #include <linux/hugetlb.h> 65 + #include <linux/freezer.h> 64 66 65 67 #include <asm/futex.h> 66 68 ··· 367 365 } else { 368 366 key->both.offset |= FUT_OFF_INODE; /* inode-based key */ 369 367 key->shared.inode = page_head->mapping->host; 370 - key->shared.pgoff = page_head->index; 368 + key->shared.pgoff = basepage_index(page); 371 369 } 372 370 373 371 get_futex_key_refs(key); ··· 1809 1807 * is no timeout, or if it has yet to expire. 1810 1808 */ 1811 1809 if (!timeout || timeout->task) 1812 - schedule(); 1810 + freezable_schedule(); 1813 1811 } 1814 1812 __set_current_state(TASK_RUNNING); 1815 1813 }
+13
kernel/rtmutex.c
··· 145 145 /* 146 146 * Adjust the priority chain. Also used for deadlock detection. 147 147 * Decreases task's usage by one - may thus free the task. 148 + * 149 + * @task: the task owning the mutex (owner) for which a chain walk is probably 150 + * needed 151 + * @deadlock_detect: do we have to carry out deadlock detection? 152 + * @orig_lock: the mutex (can be NULL if we are walking the chain to recheck 153 + * things for a task that has just got its priority adjusted, and 154 + * is waiting on a mutex) 155 + * @orig_waiter: rt_mutex_waiter struct for the task that has just donated 156 + * its priority to the mutex owner (can be NULL in the case 157 + * depicted above or if the top waiter is gone away and we are 158 + * actually deboosting the owner) 159 + * @top_task: the current top waiter 160 + * 148 161 * Returns 0 or -EDEADLK. 149 162 */ 150 163 static int rt_mutex_adjust_prio_chain(struct task_struct *task,
+17
mm/hugetlb.c
··· 690 690 } 691 691 EXPORT_SYMBOL_GPL(PageHuge); 692 692 693 + pgoff_t __basepage_index(struct page *page) 694 + { 695 + struct page *page_head = compound_head(page); 696 + pgoff_t index = page_index(page_head); 697 + unsigned long compound_idx; 698 + 699 + if (!PageHuge(page_head)) 700 + return page_index(page); 701 + 702 + if (compound_order(page_head) >= MAX_ORDER) 703 + compound_idx = page_to_pfn(page) - page_to_pfn(page_head); 704 + else 705 + compound_idx = page - page_head; 706 + 707 + return (index << compound_order(page_head)) + compound_idx; 708 + } 709 + 693 710 static struct page *alloc_fresh_huge_page_node(struct hstate *h, int nid) 694 711 { 695 712 struct page *page;