Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

bpf: fix attach type BPF_LIRC_MODE2 dependency wrt CONFIG_CGROUP_BPF

If the kernel is compiled with CONFIG_CGROUP_BPF not enabled, it is not
possible to attach, detach or query IR BPF programs to /dev/lircN devices,
making them impossible to use. For embedded devices, it should be possible
to use IR decoding without cgroups or CONFIG_CGROUP_BPF enabled.

This change requires some refactoring, since bpf_prog_{attach,detach,query}
functions are now always compiled, but their code paths for cgroups need
moving out. Rather than a #ifdef CONFIG_CGROUP_BPF in kernel/bpf/syscall.c,
moving them to kernel/bpf/cgroup.c and kernel/bpf/sockmap.c does not
require #ifdefs since that is already conditionally compiled.

Fixes: f4364dcfc86d ("media: rc: introduce BPF_PROG_LIRC_MODE2")
Signed-off-by: Sean Young <sean@mess.org>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>

authored by

Sean Young and committed by
Daniel Borkmann
fdb5c453 68d676a0

+132 -92
+2 -12
drivers/media/rc/bpf-lirc.c
··· 207 207 bpf_prog_array_free(rcdev->raw->progs); 208 208 } 209 209 210 - int lirc_prog_attach(const union bpf_attr *attr) 210 + int lirc_prog_attach(const union bpf_attr *attr, struct bpf_prog *prog) 211 211 { 212 - struct bpf_prog *prog; 213 212 struct rc_dev *rcdev; 214 213 int ret; 215 214 216 215 if (attr->attach_flags) 217 216 return -EINVAL; 218 217 219 - prog = bpf_prog_get_type(attr->attach_bpf_fd, 220 - BPF_PROG_TYPE_LIRC_MODE2); 221 - if (IS_ERR(prog)) 222 - return PTR_ERR(prog); 223 - 224 218 rcdev = rc_dev_get_from_fd(attr->target_fd); 225 - if (IS_ERR(rcdev)) { 226 - bpf_prog_put(prog); 219 + if (IS_ERR(rcdev)) 227 220 return PTR_ERR(rcdev); 228 - } 229 221 230 222 ret = lirc_bpf_attach(rcdev, prog); 231 - if (ret) 232 - bpf_prog_put(prog); 233 223 234 224 put_device(&rcdev->dev); 235 225
+26
include/linux/bpf-cgroup.h
··· 188 188 \ 189 189 __ret; \ 190 190 }) 191 + int cgroup_bpf_prog_attach(const union bpf_attr *attr, 192 + enum bpf_prog_type ptype, struct bpf_prog *prog); 193 + int cgroup_bpf_prog_detach(const union bpf_attr *attr, 194 + enum bpf_prog_type ptype); 195 + int cgroup_bpf_prog_query(const union bpf_attr *attr, 196 + union bpf_attr __user *uattr); 191 197 #else 192 198 199 + struct bpf_prog; 193 200 struct cgroup_bpf {}; 194 201 static inline void cgroup_bpf_put(struct cgroup *cgrp) {} 195 202 static inline int cgroup_bpf_inherit(struct cgroup *cgrp) { return 0; } 203 + 204 + static inline int cgroup_bpf_prog_attach(const union bpf_attr *attr, 205 + enum bpf_prog_type ptype, 206 + struct bpf_prog *prog) 207 + { 208 + return -EINVAL; 209 + } 210 + 211 + static inline int cgroup_bpf_prog_detach(const union bpf_attr *attr, 212 + enum bpf_prog_type ptype) 213 + { 214 + return -EINVAL; 215 + } 216 + 217 + static inline int cgroup_bpf_prog_query(const union bpf_attr *attr, 218 + union bpf_attr __user *uattr) 219 + { 220 + return -EINVAL; 221 + } 196 222 197 223 #define cgroup_bpf_enabled (0) 198 224 #define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) (0)
+8
include/linux/bpf.h
··· 696 696 struct sock *__sock_map_lookup_elem(struct bpf_map *map, u32 key); 697 697 struct sock *__sock_hash_lookup_elem(struct bpf_map *map, void *key); 698 698 int sock_map_prog(struct bpf_map *map, struct bpf_prog *prog, u32 type); 699 + int sockmap_get_from_fd(const union bpf_attr *attr, int type, 700 + struct bpf_prog *prog); 699 701 #else 700 702 static inline struct sock *__sock_map_lookup_elem(struct bpf_map *map, u32 key) 701 703 { ··· 715 713 u32 type) 716 714 { 717 715 return -EOPNOTSUPP; 716 + } 717 + 718 + static inline int sockmap_get_from_fd(const union bpf_attr *attr, int type, 719 + struct bpf_prog *prog) 720 + { 721 + return -EINVAL; 718 722 } 719 723 #endif 720 724
+3 -2
include/linux/bpf_lirc.h
··· 5 5 #include <uapi/linux/bpf.h> 6 6 7 7 #ifdef CONFIG_BPF_LIRC_MODE2 8 - int lirc_prog_attach(const union bpf_attr *attr); 8 + int lirc_prog_attach(const union bpf_attr *attr, struct bpf_prog *prog); 9 9 int lirc_prog_detach(const union bpf_attr *attr); 10 10 int lirc_prog_query(const union bpf_attr *attr, union bpf_attr __user *uattr); 11 11 #else 12 - static inline int lirc_prog_attach(const union bpf_attr *attr) 12 + static inline int lirc_prog_attach(const union bpf_attr *attr, 13 + struct bpf_prog *prog) 13 14 { 14 15 return -EINVAL; 15 16 }
+54
kernel/bpf/cgroup.c
··· 428 428 return ret; 429 429 } 430 430 431 + int cgroup_bpf_prog_attach(const union bpf_attr *attr, 432 + enum bpf_prog_type ptype, struct bpf_prog *prog) 433 + { 434 + struct cgroup *cgrp; 435 + int ret; 436 + 437 + cgrp = cgroup_get_from_fd(attr->target_fd); 438 + if (IS_ERR(cgrp)) 439 + return PTR_ERR(cgrp); 440 + 441 + ret = cgroup_bpf_attach(cgrp, prog, attr->attach_type, 442 + attr->attach_flags); 443 + cgroup_put(cgrp); 444 + return ret; 445 + } 446 + 447 + int cgroup_bpf_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype) 448 + { 449 + struct bpf_prog *prog; 450 + struct cgroup *cgrp; 451 + int ret; 452 + 453 + cgrp = cgroup_get_from_fd(attr->target_fd); 454 + if (IS_ERR(cgrp)) 455 + return PTR_ERR(cgrp); 456 + 457 + prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype); 458 + if (IS_ERR(prog)) 459 + prog = NULL; 460 + 461 + ret = cgroup_bpf_detach(cgrp, prog, attr->attach_type, 0); 462 + if (prog) 463 + bpf_prog_put(prog); 464 + 465 + cgroup_put(cgrp); 466 + return ret; 467 + } 468 + 469 + int cgroup_bpf_prog_query(const union bpf_attr *attr, 470 + union bpf_attr __user *uattr) 471 + { 472 + struct cgroup *cgrp; 473 + int ret; 474 + 475 + cgrp = cgroup_get_from_fd(attr->query.target_fd); 476 + if (IS_ERR(cgrp)) 477 + return PTR_ERR(cgrp); 478 + 479 + ret = cgroup_bpf_query(cgrp, attr, uattr); 480 + 481 + cgroup_put(cgrp); 482 + return ret; 483 + } 484 + 431 485 /** 432 486 * __cgroup_bpf_run_filter_skb() - Run a program for packet filtering 433 487 * @sk: The socket sending or receiving traffic
+18
kernel/bpf/sockmap.c
··· 1915 1915 return 0; 1916 1916 } 1917 1917 1918 + int sockmap_get_from_fd(const union bpf_attr *attr, int type, 1919 + struct bpf_prog *prog) 1920 + { 1921 + int ufd = attr->target_fd; 1922 + struct bpf_map *map; 1923 + struct fd f; 1924 + int err; 1925 + 1926 + f = fdget(ufd); 1927 + map = __bpf_map_get(f); 1928 + if (IS_ERR(map)) 1929 + return PTR_ERR(map); 1930 + 1931 + err = sock_map_prog(map, prog, attr->attach_type); 1932 + fdput(f); 1933 + return err; 1934 + } 1935 + 1918 1936 static void *sock_map_lookup(struct bpf_map *map, void *key) 1919 1937 { 1920 1938 return NULL;
+21 -78
kernel/bpf/syscall.c
··· 1483 1483 return err; 1484 1484 } 1485 1485 1486 - #ifdef CONFIG_CGROUP_BPF 1487 - 1488 1486 static int bpf_prog_attach_check_attach_type(const struct bpf_prog *prog, 1489 1487 enum bpf_attach_type attach_type) 1490 1488 { ··· 1497 1499 1498 1500 #define BPF_PROG_ATTACH_LAST_FIELD attach_flags 1499 1501 1500 - static int sockmap_get_from_fd(const union bpf_attr *attr, 1501 - int type, bool attach) 1502 - { 1503 - struct bpf_prog *prog = NULL; 1504 - int ufd = attr->target_fd; 1505 - struct bpf_map *map; 1506 - struct fd f; 1507 - int err; 1508 - 1509 - f = fdget(ufd); 1510 - map = __bpf_map_get(f); 1511 - if (IS_ERR(map)) 1512 - return PTR_ERR(map); 1513 - 1514 - if (attach) { 1515 - prog = bpf_prog_get_type(attr->attach_bpf_fd, type); 1516 - if (IS_ERR(prog)) { 1517 - fdput(f); 1518 - return PTR_ERR(prog); 1519 - } 1520 - } 1521 - 1522 - err = sock_map_prog(map, prog, attr->attach_type); 1523 - if (err) { 1524 - fdput(f); 1525 - if (prog) 1526 - bpf_prog_put(prog); 1527 - return err; 1528 - } 1529 - 1530 - fdput(f); 1531 - return 0; 1532 - } 1533 - 1534 1502 #define BPF_F_ATTACH_MASK \ 1535 1503 (BPF_F_ALLOW_OVERRIDE | BPF_F_ALLOW_MULTI) 1536 1504 ··· 1504 1540 { 1505 1541 enum bpf_prog_type ptype; 1506 1542 struct bpf_prog *prog; 1507 - struct cgroup *cgrp; 1508 1543 int ret; 1509 1544 1510 1545 if (!capable(CAP_NET_ADMIN)) ··· 1540 1577 ptype = BPF_PROG_TYPE_CGROUP_DEVICE; 1541 1578 break; 1542 1579 case BPF_SK_MSG_VERDICT: 1543 - return sockmap_get_from_fd(attr, BPF_PROG_TYPE_SK_MSG, true); 1580 + ptype = BPF_PROG_TYPE_SK_MSG; 1581 + break; 1544 1582 case BPF_SK_SKB_STREAM_PARSER: 1545 1583 case BPF_SK_SKB_STREAM_VERDICT: 1546 - return sockmap_get_from_fd(attr, BPF_PROG_TYPE_SK_SKB, true); 1584 + ptype = BPF_PROG_TYPE_SK_SKB; 1585 + break; 1547 1586 case BPF_LIRC_MODE2: 1548 - return lirc_prog_attach(attr); 1587 + ptype = BPF_PROG_TYPE_LIRC_MODE2; 1588 + break; 1549 1589 default: 1550 1590 return -EINVAL; 1551 1591 } ··· 1562 1596 return -EINVAL; 1563 1597 } 1564 1598 1565 - cgrp = cgroup_get_from_fd(attr->target_fd); 1566 - if (IS_ERR(cgrp)) { 1567 - bpf_prog_put(prog); 1568 - return PTR_ERR(cgrp); 1599 + switch (ptype) { 1600 + case BPF_PROG_TYPE_SK_SKB: 1601 + case BPF_PROG_TYPE_SK_MSG: 1602 + ret = sockmap_get_from_fd(attr, ptype, prog); 1603 + break; 1604 + case BPF_PROG_TYPE_LIRC_MODE2: 1605 + ret = lirc_prog_attach(attr, prog); 1606 + break; 1607 + default: 1608 + ret = cgroup_bpf_prog_attach(attr, ptype, prog); 1569 1609 } 1570 1610 1571 - ret = cgroup_bpf_attach(cgrp, prog, attr->attach_type, 1572 - attr->attach_flags); 1573 1611 if (ret) 1574 1612 bpf_prog_put(prog); 1575 - cgroup_put(cgrp); 1576 - 1577 1613 return ret; 1578 1614 } 1579 1615 ··· 1584 1616 static int bpf_prog_detach(const union bpf_attr *attr) 1585 1617 { 1586 1618 enum bpf_prog_type ptype; 1587 - struct bpf_prog *prog; 1588 - struct cgroup *cgrp; 1589 - int ret; 1590 1619 1591 1620 if (!capable(CAP_NET_ADMIN)) 1592 1621 return -EPERM; ··· 1616 1651 ptype = BPF_PROG_TYPE_CGROUP_DEVICE; 1617 1652 break; 1618 1653 case BPF_SK_MSG_VERDICT: 1619 - return sockmap_get_from_fd(attr, BPF_PROG_TYPE_SK_MSG, false); 1654 + return sockmap_get_from_fd(attr, BPF_PROG_TYPE_SK_MSG, NULL); 1620 1655 case BPF_SK_SKB_STREAM_PARSER: 1621 1656 case BPF_SK_SKB_STREAM_VERDICT: 1622 - return sockmap_get_from_fd(attr, BPF_PROG_TYPE_SK_SKB, false); 1657 + return sockmap_get_from_fd(attr, BPF_PROG_TYPE_SK_SKB, NULL); 1623 1658 case BPF_LIRC_MODE2: 1624 1659 return lirc_prog_detach(attr); 1625 1660 default: 1626 1661 return -EINVAL; 1627 1662 } 1628 1663 1629 - cgrp = cgroup_get_from_fd(attr->target_fd); 1630 - if (IS_ERR(cgrp)) 1631 - return PTR_ERR(cgrp); 1632 - 1633 - prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype); 1634 - if (IS_ERR(prog)) 1635 - prog = NULL; 1636 - 1637 - ret = cgroup_bpf_detach(cgrp, prog, attr->attach_type, 0); 1638 - if (prog) 1639 - bpf_prog_put(prog); 1640 - cgroup_put(cgrp); 1641 - return ret; 1664 + return cgroup_bpf_prog_detach(attr, ptype); 1642 1665 } 1643 1666 1644 1667 #define BPF_PROG_QUERY_LAST_FIELD query.prog_cnt ··· 1634 1681 static int bpf_prog_query(const union bpf_attr *attr, 1635 1682 union bpf_attr __user *uattr) 1636 1683 { 1637 - struct cgroup *cgrp; 1638 - int ret; 1639 - 1640 1684 if (!capable(CAP_NET_ADMIN)) 1641 1685 return -EPERM; 1642 1686 if (CHECK_ATTR(BPF_PROG_QUERY)) ··· 1661 1711 default: 1662 1712 return -EINVAL; 1663 1713 } 1664 - cgrp = cgroup_get_from_fd(attr->query.target_fd); 1665 - if (IS_ERR(cgrp)) 1666 - return PTR_ERR(cgrp); 1667 - ret = cgroup_bpf_query(cgrp, attr, uattr); 1668 - cgroup_put(cgrp); 1669 - return ret; 1714 + 1715 + return cgroup_bpf_prog_query(attr, uattr); 1670 1716 } 1671 - #endif /* CONFIG_CGROUP_BPF */ 1672 1717 1673 1718 #define BPF_PROG_TEST_RUN_LAST_FIELD test.duration 1674 1719 ··· 2310 2365 case BPF_OBJ_GET: 2311 2366 err = bpf_obj_get(&attr); 2312 2367 break; 2313 - #ifdef CONFIG_CGROUP_BPF 2314 2368 case BPF_PROG_ATTACH: 2315 2369 err = bpf_prog_attach(&attr); 2316 2370 break; ··· 2319 2375 case BPF_PROG_QUERY: 2320 2376 err = bpf_prog_query(&attr, uattr); 2321 2377 break; 2322 - #endif 2323 2378 case BPF_PROG_TEST_RUN: 2324 2379 err = bpf_prog_test_run(&attr, uattr); 2325 2380 break;