Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

rbtree, perf: Use new rbtree helpers

Reduce rbtree boiler plate by using the new helpers.

One noteworthy change is unification of the various (partial) compare
functions. We construct a subtree match by forcing the sub-order to
always match, see __group_cmp().

Due to 'const' we had to touch cgroup_id().

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Tejun Heo <tj@kernel.org>
Acked-by: Davidlohr Bueso <dbueso@suse.de>

authored by

Peter Zijlstra and committed by
Ingo Molnar
a3b89864 8ecca394

+92 -107
+2 -2
include/linux/cgroup.h
··· 307 307 * Inline functions. 308 308 */ 309 309 310 - static inline u64 cgroup_id(struct cgroup *cgrp) 310 + static inline u64 cgroup_id(const struct cgroup *cgrp) 311 311 { 312 312 return cgrp->kn->id; 313 313 } ··· 701 701 struct cgroup_subsys_state; 702 702 struct cgroup; 703 703 704 - static inline u64 cgroup_id(struct cgroup *cgrp) { return 1; } 704 + static inline u64 cgroup_id(const struct cgroup *cgrp) { return 1; } 705 705 static inline void css_get(struct cgroup_subsys_state *css) {} 706 706 static inline void css_put(struct cgroup_subsys_state *css) {} 707 707 static inline int cgroup_attach_task_all(struct task_struct *from,
+90 -105
kernel/events/core.c
··· 1595 1595 groups->index = 0; 1596 1596 } 1597 1597 1598 + static inline struct cgroup *event_cgroup(const struct perf_event *event) 1599 + { 1600 + struct cgroup *cgroup = NULL; 1601 + 1602 + #ifdef CONFIG_CGROUP_PERF 1603 + if (event->cgrp) 1604 + cgroup = event->cgrp->css.cgroup; 1605 + #endif 1606 + 1607 + return cgroup; 1608 + } 1609 + 1598 1610 /* 1599 1611 * Compare function for event groups; 1600 1612 * 1601 1613 * Implements complex key that first sorts by CPU and then by virtual index 1602 1614 * which provides ordering when rotating groups for the same CPU. 1603 1615 */ 1604 - static bool 1605 - perf_event_groups_less(struct perf_event *left, struct perf_event *right) 1616 + static __always_inline int 1617 + perf_event_groups_cmp(const int left_cpu, const struct cgroup *left_cgroup, 1618 + const u64 left_group_index, const struct perf_event *right) 1606 1619 { 1607 - if (left->cpu < right->cpu) 1608 - return true; 1609 - if (left->cpu > right->cpu) 1610 - return false; 1620 + if (left_cpu < right->cpu) 1621 + return -1; 1622 + if (left_cpu > right->cpu) 1623 + return 1; 1611 1624 1612 1625 #ifdef CONFIG_CGROUP_PERF 1613 - if (left->cgrp != right->cgrp) { 1614 - if (!left->cgrp || !left->cgrp->css.cgroup) { 1615 - /* 1616 - * Left has no cgroup but right does, no cgroups come 1617 - * first. 1618 - */ 1619 - return true; 1620 - } 1621 - if (!right->cgrp || !right->cgrp->css.cgroup) { 1622 - /* 1623 - * Right has no cgroup but left does, no cgroups come 1624 - * first. 1625 - */ 1626 - return false; 1627 - } 1628 - /* Two dissimilar cgroups, order by id. */ 1629 - if (left->cgrp->css.cgroup->kn->id < right->cgrp->css.cgroup->kn->id) 1630 - return true; 1626 + { 1627 + const struct cgroup *right_cgroup = event_cgroup(right); 1631 1628 1632 - return false; 1629 + if (left_cgroup != right_cgroup) { 1630 + if (!left_cgroup) { 1631 + /* 1632 + * Left has no cgroup but right does, no 1633 + * cgroups come first. 1634 + */ 1635 + return -1; 1636 + } 1637 + if (!right_cgroup) { 1638 + /* 1639 + * Right has no cgroup but left does, no 1640 + * cgroups come first. 1641 + */ 1642 + return 1; 1643 + } 1644 + /* Two dissimilar cgroups, order by id. */ 1645 + if (cgroup_id(left_cgroup) < cgroup_id(right_cgroup)) 1646 + return -1; 1647 + 1648 + return 1; 1649 + } 1633 1650 } 1634 1651 #endif 1635 1652 1636 - if (left->group_index < right->group_index) 1637 - return true; 1638 - if (left->group_index > right->group_index) 1639 - return false; 1653 + if (left_group_index < right->group_index) 1654 + return -1; 1655 + if (left_group_index > right->group_index) 1656 + return 1; 1640 1657 1641 - return false; 1658 + return 0; 1659 + } 1660 + 1661 + #define __node_2_pe(node) \ 1662 + rb_entry((node), struct perf_event, group_node) 1663 + 1664 + static inline bool __group_less(struct rb_node *a, const struct rb_node *b) 1665 + { 1666 + struct perf_event *e = __node_2_pe(a); 1667 + return perf_event_groups_cmp(e->cpu, event_cgroup(e), e->group_index, 1668 + __node_2_pe(b)) < 0; 1669 + } 1670 + 1671 + struct __group_key { 1672 + int cpu; 1673 + struct cgroup *cgroup; 1674 + }; 1675 + 1676 + static inline int __group_cmp(const void *key, const struct rb_node *node) 1677 + { 1678 + const struct __group_key *a = key; 1679 + const struct perf_event *b = __node_2_pe(node); 1680 + 1681 + /* partial/subtree match: @cpu, @cgroup; ignore: @group_index */ 1682 + return perf_event_groups_cmp(a->cpu, a->cgroup, b->group_index, b); 1642 1683 } 1643 1684 1644 1685 /* ··· 1691 1650 perf_event_groups_insert(struct perf_event_groups *groups, 1692 1651 struct perf_event *event) 1693 1652 { 1694 - struct perf_event *node_event; 1695 - struct rb_node *parent; 1696 - struct rb_node **node; 1697 - 1698 1653 event->group_index = ++groups->index; 1699 1654 1700 - node = &groups->tree.rb_node; 1701 - parent = *node; 1702 - 1703 - while (*node) { 1704 - parent = *node; 1705 - node_event = container_of(*node, struct perf_event, group_node); 1706 - 1707 - if (perf_event_groups_less(event, node_event)) 1708 - node = &parent->rb_left; 1709 - else 1710 - node = &parent->rb_right; 1711 - } 1712 - 1713 - rb_link_node(&event->group_node, parent, node); 1714 - rb_insert_color(&event->group_node, &groups->tree); 1655 + rb_add(&event->group_node, &groups->tree, __group_less); 1715 1656 } 1716 1657 1717 1658 /* ··· 1741 1718 perf_event_groups_first(struct perf_event_groups *groups, int cpu, 1742 1719 struct cgroup *cgrp) 1743 1720 { 1744 - struct perf_event *node_event = NULL, *match = NULL; 1745 - struct rb_node *node = groups->tree.rb_node; 1746 - #ifdef CONFIG_CGROUP_PERF 1747 - u64 node_cgrp_id, cgrp_id = 0; 1721 + struct __group_key key = { 1722 + .cpu = cpu, 1723 + .cgroup = cgrp, 1724 + }; 1725 + struct rb_node *node; 1748 1726 1749 - if (cgrp) 1750 - cgrp_id = cgrp->kn->id; 1751 - #endif 1727 + node = rb_find_first(&key, &groups->tree, __group_cmp); 1728 + if (node) 1729 + return __node_2_pe(node); 1752 1730 1753 - while (node) { 1754 - node_event = container_of(node, struct perf_event, group_node); 1755 - 1756 - if (cpu < node_event->cpu) { 1757 - node = node->rb_left; 1758 - continue; 1759 - } 1760 - if (cpu > node_event->cpu) { 1761 - node = node->rb_right; 1762 - continue; 1763 - } 1764 - #ifdef CONFIG_CGROUP_PERF 1765 - node_cgrp_id = 0; 1766 - if (node_event->cgrp && node_event->cgrp->css.cgroup) 1767 - node_cgrp_id = node_event->cgrp->css.cgroup->kn->id; 1768 - 1769 - if (cgrp_id < node_cgrp_id) { 1770 - node = node->rb_left; 1771 - continue; 1772 - } 1773 - if (cgrp_id > node_cgrp_id) { 1774 - node = node->rb_right; 1775 - continue; 1776 - } 1777 - #endif 1778 - match = node_event; 1779 - node = node->rb_left; 1780 - } 1781 - 1782 - return match; 1731 + return NULL; 1783 1732 } 1784 1733 1785 1734 /* ··· 1760 1765 static struct perf_event * 1761 1766 perf_event_groups_next(struct perf_event *event) 1762 1767 { 1763 - struct perf_event *next; 1764 - #ifdef CONFIG_CGROUP_PERF 1765 - u64 curr_cgrp_id = 0; 1766 - u64 next_cgrp_id = 0; 1767 - #endif 1768 + struct __group_key key = { 1769 + .cpu = event->cpu, 1770 + .cgroup = event_cgroup(event), 1771 + }; 1772 + struct rb_node *next; 1768 1773 1769 - next = rb_entry_safe(rb_next(&event->group_node), typeof(*event), group_node); 1770 - if (next == NULL || next->cpu != event->cpu) 1771 - return NULL; 1774 + next = rb_next_match(&key, &event->group_node, __group_cmp); 1775 + if (next) 1776 + return __node_2_pe(next); 1772 1777 1773 - #ifdef CONFIG_CGROUP_PERF 1774 - if (event->cgrp && event->cgrp->css.cgroup) 1775 - curr_cgrp_id = event->cgrp->css.cgroup->kn->id; 1776 - 1777 - if (next->cgrp && next->cgrp->css.cgroup) 1778 - next_cgrp_id = next->cgrp->css.cgroup->kn->id; 1779 - 1780 - if (curr_cgrp_id != next_cgrp_id) 1781 - return NULL; 1782 - #endif 1783 - return next; 1778 + return NULL; 1784 1779 } 1785 1780 1786 1781 /*