Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'trace-v6.2-rc7-3' of git://git.kernel.org/pub/scm/linux/kernel/git/trace/linux-trace

Pull tracing fix from Steven Rostedt:
"Fix race that causes a warning of corrupt ring buffer

With the change that allows to read the "trace" file without disabling
writing to the ring buffer, there was an integrity check of the ring
buffer in the iterator read code, that expected the ring buffer to be
write disabled. This caused the integrity check to trigger when stress
reading the "trace" file while writing was happening.

The integrity check is a bit aggressive (and has never triggered in
practice). Change it so that it checks just the integrity of the
linked pages without clearing the flags inside the pointers. This
removes the warning that was being triggered"

[ Heh. This was supposed to have gone in last week before the 6.2
release, but Steven forgot to actually add me to the participants of
the pull request, so here it is, a week later - Linus ]

* tag 'trace-v6.2-rc7-3' of git://git.kernel.org/pub/scm/linux/kernel/git/trace/linux-trace:
ring-buffer: Handle race between rb_move_tail and rb_check_pages

+10 -32
+10 -32
kernel/trace/ring_buffer.c
··· 1581 1581 } 1582 1582 1583 1583 /** 1584 - * rb_check_list - make sure a pointer to a list has the last bits zero 1585 - */ 1586 - static int rb_check_list(struct ring_buffer_per_cpu *cpu_buffer, 1587 - struct list_head *list) 1588 - { 1589 - if (RB_WARN_ON(cpu_buffer, rb_list_head(list->prev) != list->prev)) 1590 - return 1; 1591 - if (RB_WARN_ON(cpu_buffer, rb_list_head(list->next) != list->next)) 1592 - return 1; 1593 - return 0; 1594 - } 1595 - 1596 - /** 1597 1584 * rb_check_pages - integrity check of buffer pages 1598 1585 * @cpu_buffer: CPU buffer with pages to test 1599 1586 * ··· 1589 1602 */ 1590 1603 static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer) 1591 1604 { 1592 - struct list_head *head = cpu_buffer->pages; 1593 - struct buffer_page *bpage, *tmp; 1605 + struct list_head *head = rb_list_head(cpu_buffer->pages); 1606 + struct list_head *tmp; 1594 1607 1595 - /* Reset the head page if it exists */ 1596 - if (cpu_buffer->head_page) 1597 - rb_set_head_page(cpu_buffer); 1598 - 1599 - rb_head_page_deactivate(cpu_buffer); 1600 - 1601 - if (RB_WARN_ON(cpu_buffer, head->next->prev != head)) 1602 - return -1; 1603 - if (RB_WARN_ON(cpu_buffer, head->prev->next != head)) 1608 + if (RB_WARN_ON(cpu_buffer, 1609 + rb_list_head(rb_list_head(head->next)->prev) != head)) 1604 1610 return -1; 1605 1611 1606 - if (rb_check_list(cpu_buffer, head)) 1612 + if (RB_WARN_ON(cpu_buffer, 1613 + rb_list_head(rb_list_head(head->prev)->next) != head)) 1607 1614 return -1; 1608 1615 1609 - list_for_each_entry_safe(bpage, tmp, head, list) { 1616 + for (tmp = rb_list_head(head->next); tmp != head; tmp = rb_list_head(tmp->next)) { 1610 1617 if (RB_WARN_ON(cpu_buffer, 1611 - bpage->list.next->prev != &bpage->list)) 1618 + rb_list_head(rb_list_head(tmp->next)->prev) != tmp)) 1612 1619 return -1; 1620 + 1613 1621 if (RB_WARN_ON(cpu_buffer, 1614 - bpage->list.prev->next != &bpage->list)) 1615 - return -1; 1616 - if (rb_check_list(cpu_buffer, &bpage->list)) 1622 + rb_list_head(rb_list_head(tmp->prev)->next) != tmp)) 1617 1623 return -1; 1618 1624 } 1619 - 1620 - rb_head_page_activate(cpu_buffer); 1621 1625 1622 1626 return 0; 1623 1627 }