Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

selftests/bpf: tests with a loop state missing read/precision mark

The test case absent_mark_in_the_middle_state is equivalent of the
following C program:

1: r8 = bpf_get_prandom_u32();
2: r6 = -32;
3: bpf_iter_num_new(&fp[-8], 0, 10);
4: if (unlikely(bpf_get_prandom_u32()))
5: r6 = -31;
6: for (;;) {
7: if (!bpf_iter_num_next(&fp[-8]))
8: break;
9: if (unlikely(bpf_get_prandom_u32()))
10: *(u64 *)(fp + r6) = 7;
11: }
12: bpf_iter_num_destroy(&fp[-8]);
13: return 0;

W/o a fix that instructs verifier to ignore branches count for loop
entries verification proceeds as follows:
- 1-4, state is {r6=-32,fp-8=active};
- 6, checkpoint A is created with {r6=-32,fp-8=active};
- 7, checkpoint B is created with {r6=-32,fp-8=active},
push state {r6=-32,fp-8=active} from 7 to 9;
- 8,12,13, {r6=-32,fp-8=drained}, exit;
- pop state with {r6=-32,fp-8=active} from 7 to 9;
- 9, push state {r6=-32,fp-8=active} from 9 to 10;
- 6, checkpoint C is created with {r6=-32,fp-8=active};
- 7, checkpoint A is hit, no precision propagated for r6 to C;
- pop state {r6=-32,fp-8=active} from 9 to 10;
- 10, state is {r6=-31,fp-8=active}, r6 is marked as read and precise,
these marks are propagated to checkpoints A and B (but not C, as
it is not the parent of current state;
- 6, {r6=-31,fp-8=active} checkpoint C is hit, because r6 is not
marked precise for this checkpoint;
- the program is accepted, despite a possibility of unaligned u64
stack access at offset -31.

The test case absent_mark_in_the_middle_state2 is similar except the
following change:

r8 = bpf_get_prandom_u32();
r6 = -32;
bpf_iter_num_new(&fp[-8], 0, 10);
if (unlikely(bpf_get_prandom_u32())) {
r6 = -31;
+ jump_into_loop:
+ goto +0;
+ goto loop;
+ }
+ if (unlikely(bpf_get_prandom_u32()))
+ goto jump_into_loop;
+ loop:
for (;;) {
if (!bpf_iter_num_next(&fp[-8]))
break;
if (unlikely(bpf_get_prandom_u32()))
*(u64 *)(fp + r6) = 7;
}
bpf_iter_num_destroy(&fp[-8])
return 0

The goal is to check that read/precision marks are propagated to
checkpoint created at 'goto +0' that resides outside of the loop.

The test case absent_mark_in_the_middle_state3 is a bit different and
is equivalent to the C program below:

int absent_mark_in_the_middle_state3(void)
{
bpf_iter_num_new(&fp[-8], 0, 10)
loop1(-32, &fp[-8])
loop1_wrapper(&fp[-8])
bpf_iter_num_destroy(&fp[-8])
}

int loop1(num, iter)
{
while (bpf_iter_num_next(iter)) {
if (unlikely(bpf_get_prandom_u32()))
*(fp + num) = 7;
}
return 0
}

int loop1_wrapper(iter)
{
r6 = -32;
if (unlikely(bpf_get_prandom_u32()))
r6 = -31;
loop1(r6, iter);
return 0;
}

The unsafe state is reached in a similar manner, but the loop is
located inside a subprogram that is called from two locations in the
main subprogram. This detail is important for exercising
bpf_scc_visit->backedges memory management.

Signed-off-by: Eduard Zingerman <eddyz87@gmail.com>
Link: https://lore.kernel.org/r/20250611200836.4135542-11-eddyz87@gmail.com
Signed-off-by: Alexei Starovoitov <ast@kernel.org>

authored by

Eduard Zingerman and committed by
Alexei Starovoitov
5159482f 0f54ff54

+277
+277
tools/testing/selftests/bpf/progs/iters.c
··· 1649 1649 return 0; 1650 1650 } 1651 1651 1652 + SEC("?raw_tp") 1653 + __flag(BPF_F_TEST_STATE_FREQ) 1654 + __failure __msg("misaligned stack access off 0+-31+0 size 8") 1655 + __naked int absent_mark_in_the_middle_state(void) 1656 + { 1657 + /* This is equivalent to C program below. 1658 + * 1659 + * r8 = bpf_get_prandom_u32(); 1660 + * r6 = -32; 1661 + * bpf_iter_num_new(&fp[-8], 0, 10); 1662 + * if (unlikely(bpf_get_prandom_u32())) 1663 + * r6 = -31; 1664 + * while (bpf_iter_num_next(&fp[-8])) { 1665 + * if (unlikely(bpf_get_prandom_u32())) 1666 + * *(fp + r6) = 7; 1667 + * } 1668 + * bpf_iter_num_destroy(&fp[-8]) 1669 + * return 0 1670 + */ 1671 + asm volatile ( 1672 + "call %[bpf_get_prandom_u32];" 1673 + "r8 = r0;" 1674 + "r7 = 0;" 1675 + "r6 = -32;" 1676 + "r0 = 0;" 1677 + "*(u64 *)(r10 - 16) = r0;" 1678 + "r1 = r10;" 1679 + "r1 += -8;" 1680 + "r2 = 0;" 1681 + "r3 = 10;" 1682 + "call %[bpf_iter_num_new];" 1683 + "call %[bpf_get_prandom_u32];" 1684 + "if r0 == r8 goto change_r6_%=;" 1685 + "loop_%=:" 1686 + "call noop;" 1687 + "r1 = r10;" 1688 + "r1 += -8;" 1689 + "call %[bpf_iter_num_next];" 1690 + "if r0 == 0 goto loop_end_%=;" 1691 + "call %[bpf_get_prandom_u32];" 1692 + "if r0 == r8 goto use_r6_%=;" 1693 + "goto loop_%=;" 1694 + "loop_end_%=:" 1695 + "r1 = r10;" 1696 + "r1 += -8;" 1697 + "call %[bpf_iter_num_destroy];" 1698 + "r0 = 0;" 1699 + "exit;" 1700 + "use_r6_%=:" 1701 + "r0 = r10;" 1702 + "r0 += r6;" 1703 + "r1 = 7;" 1704 + "*(u64 *)(r0 + 0) = r1;" 1705 + "goto loop_%=;" 1706 + "change_r6_%=:" 1707 + "r6 = -31;" 1708 + "goto loop_%=;" 1709 + : 1710 + : __imm(bpf_iter_num_new), 1711 + __imm(bpf_iter_num_next), 1712 + __imm(bpf_iter_num_destroy), 1713 + __imm(bpf_get_prandom_u32) 1714 + : __clobber_all 1715 + ); 1716 + } 1717 + 1718 + __used __naked 1719 + static int noop(void) 1720 + { 1721 + asm volatile ( 1722 + "r0 = 0;" 1723 + "exit;" 1724 + ); 1725 + } 1726 + 1727 + SEC("?raw_tp") 1728 + __flag(BPF_F_TEST_STATE_FREQ) 1729 + __failure __msg("misaligned stack access off 0+-31+0 size 8") 1730 + __naked int absent_mark_in_the_middle_state2(void) 1731 + { 1732 + /* This is equivalent to C program below. 1733 + * 1734 + * r8 = bpf_get_prandom_u32(); 1735 + * r6 = -32; 1736 + * bpf_iter_num_new(&fp[-8], 0, 10); 1737 + * if (unlikely(bpf_get_prandom_u32())) { 1738 + * r6 = -31; 1739 + * jump_into_loop: 1740 + * goto +0; 1741 + * goto loop; 1742 + * } 1743 + * if (unlikely(bpf_get_prandom_u32())) 1744 + * goto jump_into_loop; 1745 + * loop: 1746 + * while (bpf_iter_num_next(&fp[-8])) { 1747 + * if (unlikely(bpf_get_prandom_u32())) 1748 + * *(fp + r6) = 7; 1749 + * } 1750 + * bpf_iter_num_destroy(&fp[-8]) 1751 + * return 0 1752 + */ 1753 + asm volatile ( 1754 + "call %[bpf_get_prandom_u32];" 1755 + "r8 = r0;" 1756 + "r7 = 0;" 1757 + "r6 = -32;" 1758 + "r0 = 0;" 1759 + "*(u64 *)(r10 - 16) = r0;" 1760 + "r1 = r10;" 1761 + "r1 += -8;" 1762 + "r2 = 0;" 1763 + "r3 = 10;" 1764 + "call %[bpf_iter_num_new];" 1765 + "call %[bpf_get_prandom_u32];" 1766 + "if r0 == r8 goto change_r6_%=;" 1767 + "call %[bpf_get_prandom_u32];" 1768 + "if r0 == r8 goto jump_into_loop_%=;" 1769 + "loop_%=:" 1770 + "r1 = r10;" 1771 + "r1 += -8;" 1772 + "call %[bpf_iter_num_next];" 1773 + "if r0 == 0 goto loop_end_%=;" 1774 + "call %[bpf_get_prandom_u32];" 1775 + "if r0 == r8 goto use_r6_%=;" 1776 + "goto loop_%=;" 1777 + "loop_end_%=:" 1778 + "r1 = r10;" 1779 + "r1 += -8;" 1780 + "call %[bpf_iter_num_destroy];" 1781 + "r0 = 0;" 1782 + "exit;" 1783 + "use_r6_%=:" 1784 + "r0 = r10;" 1785 + "r0 += r6;" 1786 + "r1 = 7;" 1787 + "*(u64 *)(r0 + 0) = r1;" 1788 + "goto loop_%=;" 1789 + "change_r6_%=:" 1790 + "r6 = -31;" 1791 + "jump_into_loop_%=: " 1792 + "goto +0;" 1793 + "goto loop_%=;" 1794 + : 1795 + : __imm(bpf_iter_num_new), 1796 + __imm(bpf_iter_num_next), 1797 + __imm(bpf_iter_num_destroy), 1798 + __imm(bpf_get_prandom_u32) 1799 + : __clobber_all 1800 + ); 1801 + } 1802 + 1803 + SEC("?raw_tp") 1804 + __flag(BPF_F_TEST_STATE_FREQ) 1805 + __failure __msg("misaligned stack access off 0+-31+0 size 8") 1806 + __naked int absent_mark_in_the_middle_state3(void) 1807 + { 1808 + /* 1809 + * bpf_iter_num_new(&fp[-8], 0, 10) 1810 + * loop1(-32, &fp[-8]) 1811 + * loop1_wrapper(&fp[-8]) 1812 + * bpf_iter_num_destroy(&fp[-8]) 1813 + */ 1814 + asm volatile ( 1815 + "r1 = r10;" 1816 + "r1 += -8;" 1817 + "r2 = 0;" 1818 + "r3 = 10;" 1819 + "call %[bpf_iter_num_new];" 1820 + /* call #1 */ 1821 + "r1 = -32;" 1822 + "r2 = r10;" 1823 + "r2 += -8;" 1824 + "call loop1;" 1825 + "r1 = r10;" 1826 + "r1 += -8;" 1827 + "call %[bpf_iter_num_destroy];" 1828 + /* call #2 */ 1829 + "r1 = r10;" 1830 + "r1 += -8;" 1831 + "r2 = 0;" 1832 + "r3 = 10;" 1833 + "call %[bpf_iter_num_new];" 1834 + "r1 = r10;" 1835 + "r1 += -8;" 1836 + "call loop1_wrapper;" 1837 + /* return */ 1838 + "r1 = r10;" 1839 + "r1 += -8;" 1840 + "call %[bpf_iter_num_destroy];" 1841 + "r0 = 0;" 1842 + "exit;" 1843 + : 1844 + : __imm(bpf_iter_num_new), 1845 + __imm(bpf_iter_num_destroy), 1846 + __imm(bpf_get_prandom_u32) 1847 + : __clobber_all 1848 + ); 1849 + } 1850 + 1851 + __used __naked 1852 + static int loop1(void) 1853 + { 1854 + /* 1855 + * int loop1(num, iter) { 1856 + * r6 = num; 1857 + * r7 = iter; 1858 + * while (bpf_iter_num_next(r7)) { 1859 + * if (unlikely(bpf_get_prandom_u32())) 1860 + * *(fp + r6) = 7; 1861 + * } 1862 + * return 0 1863 + * } 1864 + */ 1865 + asm volatile ( 1866 + "r6 = r1;" 1867 + "r7 = r2;" 1868 + "call %[bpf_get_prandom_u32];" 1869 + "r8 = r0;" 1870 + "loop_%=:" 1871 + "r1 = r7;" 1872 + "call %[bpf_iter_num_next];" 1873 + "if r0 == 0 goto loop_end_%=;" 1874 + "call %[bpf_get_prandom_u32];" 1875 + "if r0 == r8 goto use_r6_%=;" 1876 + "goto loop_%=;" 1877 + "loop_end_%=:" 1878 + "r0 = 0;" 1879 + "exit;" 1880 + "use_r6_%=:" 1881 + "r0 = r10;" 1882 + "r0 += r6;" 1883 + "r1 = 7;" 1884 + "*(u64 *)(r0 + 0) = r1;" 1885 + "goto loop_%=;" 1886 + : 1887 + : __imm(bpf_iter_num_next), 1888 + __imm(bpf_get_prandom_u32) 1889 + : __clobber_all 1890 + ); 1891 + } 1892 + 1893 + __used __naked 1894 + static int loop1_wrapper(void) 1895 + { 1896 + /* 1897 + * int loop1_wrapper(iter) { 1898 + * r6 = -32; 1899 + * r7 = iter; 1900 + * if (unlikely(bpf_get_prandom_u32())) 1901 + * r6 = -31; 1902 + * loop1(r6, r7); 1903 + * return 0; 1904 + * } 1905 + */ 1906 + asm volatile ( 1907 + "r6 = -32;" 1908 + "r7 = r1;" 1909 + "call %[bpf_get_prandom_u32];" 1910 + "r8 = r0;" 1911 + "call %[bpf_get_prandom_u32];" 1912 + "if r0 == r8 goto change_r6_%=;" 1913 + "loop_%=:" 1914 + "r1 = r6;" 1915 + "r2 = r7;" 1916 + "call loop1;" 1917 + "r0 = 0;" 1918 + "exit;" 1919 + "change_r6_%=:" 1920 + "r6 = -31;" 1921 + "goto loop_%=;" 1922 + : 1923 + : __imm(bpf_iter_num_next), 1924 + __imm(bpf_get_prandom_u32) 1925 + : __clobber_all 1926 + ); 1927 + } 1928 + 1652 1929 char _license[] SEC("license") = "GPL";