at v5.16-rc8 2074 lines 49 kB view raw
1// SPDX-License-Identifier: GPL-2.0-or-later 2/* 3 * net-sysfs.c - network device class and attributes 4 * 5 * Copyright (c) 2003 Stephen Hemminger <shemminger@osdl.org> 6 */ 7 8#include <linux/capability.h> 9#include <linux/kernel.h> 10#include <linux/netdevice.h> 11#include <linux/if_arp.h> 12#include <linux/slab.h> 13#include <linux/sched/signal.h> 14#include <linux/sched/isolation.h> 15#include <linux/nsproxy.h> 16#include <net/sock.h> 17#include <net/net_namespace.h> 18#include <linux/rtnetlink.h> 19#include <linux/vmalloc.h> 20#include <linux/export.h> 21#include <linux/jiffies.h> 22#include <linux/pm_runtime.h> 23#include <linux/of.h> 24#include <linux/of_net.h> 25#include <linux/cpu.h> 26 27#include "net-sysfs.h" 28 29#ifdef CONFIG_SYSFS 30static const char fmt_hex[] = "%#x\n"; 31static const char fmt_dec[] = "%d\n"; 32static const char fmt_ulong[] = "%lu\n"; 33static const char fmt_u64[] = "%llu\n"; 34 35static inline int dev_isalive(const struct net_device *dev) 36{ 37 return dev->reg_state <= NETREG_REGISTERED; 38} 39 40/* use same locking rules as GIF* ioctl's */ 41static ssize_t netdev_show(const struct device *dev, 42 struct device_attribute *attr, char *buf, 43 ssize_t (*format)(const struct net_device *, char *)) 44{ 45 struct net_device *ndev = to_net_dev(dev); 46 ssize_t ret = -EINVAL; 47 48 read_lock(&dev_base_lock); 49 if (dev_isalive(ndev)) 50 ret = (*format)(ndev, buf); 51 read_unlock(&dev_base_lock); 52 53 return ret; 54} 55 56/* generate a show function for simple field */ 57#define NETDEVICE_SHOW(field, format_string) \ 58static ssize_t format_##field(const struct net_device *dev, char *buf) \ 59{ \ 60 return sprintf(buf, format_string, dev->field); \ 61} \ 62static ssize_t field##_show(struct device *dev, \ 63 struct device_attribute *attr, char *buf) \ 64{ \ 65 return netdev_show(dev, attr, buf, format_##field); \ 66} \ 67 68#define NETDEVICE_SHOW_RO(field, format_string) \ 69NETDEVICE_SHOW(field, format_string); \ 70static DEVICE_ATTR_RO(field) 71 72#define NETDEVICE_SHOW_RW(field, format_string) \ 73NETDEVICE_SHOW(field, format_string); \ 74static DEVICE_ATTR_RW(field) 75 76/* use same locking and permission rules as SIF* ioctl's */ 77static ssize_t netdev_store(struct device *dev, struct device_attribute *attr, 78 const char *buf, size_t len, 79 int (*set)(struct net_device *, unsigned long)) 80{ 81 struct net_device *netdev = to_net_dev(dev); 82 struct net *net = dev_net(netdev); 83 unsigned long new; 84 int ret; 85 86 if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) 87 return -EPERM; 88 89 ret = kstrtoul(buf, 0, &new); 90 if (ret) 91 goto err; 92 93 if (!rtnl_trylock()) 94 return restart_syscall(); 95 96 if (dev_isalive(netdev)) { 97 ret = (*set)(netdev, new); 98 if (ret == 0) 99 ret = len; 100 } 101 rtnl_unlock(); 102 err: 103 return ret; 104} 105 106NETDEVICE_SHOW_RO(dev_id, fmt_hex); 107NETDEVICE_SHOW_RO(dev_port, fmt_dec); 108NETDEVICE_SHOW_RO(addr_assign_type, fmt_dec); 109NETDEVICE_SHOW_RO(addr_len, fmt_dec); 110NETDEVICE_SHOW_RO(ifindex, fmt_dec); 111NETDEVICE_SHOW_RO(type, fmt_dec); 112NETDEVICE_SHOW_RO(link_mode, fmt_dec); 113 114static ssize_t iflink_show(struct device *dev, struct device_attribute *attr, 115 char *buf) 116{ 117 struct net_device *ndev = to_net_dev(dev); 118 119 return sprintf(buf, fmt_dec, dev_get_iflink(ndev)); 120} 121static DEVICE_ATTR_RO(iflink); 122 123static ssize_t format_name_assign_type(const struct net_device *dev, char *buf) 124{ 125 return sprintf(buf, fmt_dec, dev->name_assign_type); 126} 127 128static ssize_t name_assign_type_show(struct device *dev, 129 struct device_attribute *attr, 130 char *buf) 131{ 132 struct net_device *ndev = to_net_dev(dev); 133 ssize_t ret = -EINVAL; 134 135 if (ndev->name_assign_type != NET_NAME_UNKNOWN) 136 ret = netdev_show(dev, attr, buf, format_name_assign_type); 137 138 return ret; 139} 140static DEVICE_ATTR_RO(name_assign_type); 141 142/* use same locking rules as GIFHWADDR ioctl's */ 143static ssize_t address_show(struct device *dev, struct device_attribute *attr, 144 char *buf) 145{ 146 struct net_device *ndev = to_net_dev(dev); 147 ssize_t ret = -EINVAL; 148 149 read_lock(&dev_base_lock); 150 if (dev_isalive(ndev)) 151 ret = sysfs_format_mac(buf, ndev->dev_addr, ndev->addr_len); 152 read_unlock(&dev_base_lock); 153 return ret; 154} 155static DEVICE_ATTR_RO(address); 156 157static ssize_t broadcast_show(struct device *dev, 158 struct device_attribute *attr, char *buf) 159{ 160 struct net_device *ndev = to_net_dev(dev); 161 162 if (dev_isalive(ndev)) 163 return sysfs_format_mac(buf, ndev->broadcast, ndev->addr_len); 164 return -EINVAL; 165} 166static DEVICE_ATTR_RO(broadcast); 167 168static int change_carrier(struct net_device *dev, unsigned long new_carrier) 169{ 170 if (!netif_running(dev)) 171 return -EINVAL; 172 return dev_change_carrier(dev, (bool)new_carrier); 173} 174 175static ssize_t carrier_store(struct device *dev, struct device_attribute *attr, 176 const char *buf, size_t len) 177{ 178 struct net_device *netdev = to_net_dev(dev); 179 180 /* The check is also done in change_carrier; this helps returning early 181 * without hitting the trylock/restart in netdev_store. 182 */ 183 if (!netdev->netdev_ops->ndo_change_carrier) 184 return -EOPNOTSUPP; 185 186 return netdev_store(dev, attr, buf, len, change_carrier); 187} 188 189static ssize_t carrier_show(struct device *dev, 190 struct device_attribute *attr, char *buf) 191{ 192 struct net_device *netdev = to_net_dev(dev); 193 194 if (netif_running(netdev)) 195 return sprintf(buf, fmt_dec, !!netif_carrier_ok(netdev)); 196 197 return -EINVAL; 198} 199static DEVICE_ATTR_RW(carrier); 200 201static ssize_t speed_show(struct device *dev, 202 struct device_attribute *attr, char *buf) 203{ 204 struct net_device *netdev = to_net_dev(dev); 205 int ret = -EINVAL; 206 207 /* The check is also done in __ethtool_get_link_ksettings; this helps 208 * returning early without hitting the trylock/restart below. 209 */ 210 if (!netdev->ethtool_ops->get_link_ksettings) 211 return ret; 212 213 if (!rtnl_trylock()) 214 return restart_syscall(); 215 216 if (netif_running(netdev)) { 217 struct ethtool_link_ksettings cmd; 218 219 if (!__ethtool_get_link_ksettings(netdev, &cmd)) 220 ret = sprintf(buf, fmt_dec, cmd.base.speed); 221 } 222 rtnl_unlock(); 223 return ret; 224} 225static DEVICE_ATTR_RO(speed); 226 227static ssize_t duplex_show(struct device *dev, 228 struct device_attribute *attr, char *buf) 229{ 230 struct net_device *netdev = to_net_dev(dev); 231 int ret = -EINVAL; 232 233 /* The check is also done in __ethtool_get_link_ksettings; this helps 234 * returning early without hitting the trylock/restart below. 235 */ 236 if (!netdev->ethtool_ops->get_link_ksettings) 237 return ret; 238 239 if (!rtnl_trylock()) 240 return restart_syscall(); 241 242 if (netif_running(netdev)) { 243 struct ethtool_link_ksettings cmd; 244 245 if (!__ethtool_get_link_ksettings(netdev, &cmd)) { 246 const char *duplex; 247 248 switch (cmd.base.duplex) { 249 case DUPLEX_HALF: 250 duplex = "half"; 251 break; 252 case DUPLEX_FULL: 253 duplex = "full"; 254 break; 255 default: 256 duplex = "unknown"; 257 break; 258 } 259 ret = sprintf(buf, "%s\n", duplex); 260 } 261 } 262 rtnl_unlock(); 263 return ret; 264} 265static DEVICE_ATTR_RO(duplex); 266 267static ssize_t testing_show(struct device *dev, 268 struct device_attribute *attr, char *buf) 269{ 270 struct net_device *netdev = to_net_dev(dev); 271 272 if (netif_running(netdev)) 273 return sprintf(buf, fmt_dec, !!netif_testing(netdev)); 274 275 return -EINVAL; 276} 277static DEVICE_ATTR_RO(testing); 278 279static ssize_t dormant_show(struct device *dev, 280 struct device_attribute *attr, char *buf) 281{ 282 struct net_device *netdev = to_net_dev(dev); 283 284 if (netif_running(netdev)) 285 return sprintf(buf, fmt_dec, !!netif_dormant(netdev)); 286 287 return -EINVAL; 288} 289static DEVICE_ATTR_RO(dormant); 290 291static const char *const operstates[] = { 292 "unknown", 293 "notpresent", /* currently unused */ 294 "down", 295 "lowerlayerdown", 296 "testing", 297 "dormant", 298 "up" 299}; 300 301static ssize_t operstate_show(struct device *dev, 302 struct device_attribute *attr, char *buf) 303{ 304 const struct net_device *netdev = to_net_dev(dev); 305 unsigned char operstate; 306 307 read_lock(&dev_base_lock); 308 operstate = netdev->operstate; 309 if (!netif_running(netdev)) 310 operstate = IF_OPER_DOWN; 311 read_unlock(&dev_base_lock); 312 313 if (operstate >= ARRAY_SIZE(operstates)) 314 return -EINVAL; /* should not happen */ 315 316 return sprintf(buf, "%s\n", operstates[operstate]); 317} 318static DEVICE_ATTR_RO(operstate); 319 320static ssize_t carrier_changes_show(struct device *dev, 321 struct device_attribute *attr, 322 char *buf) 323{ 324 struct net_device *netdev = to_net_dev(dev); 325 326 return sprintf(buf, fmt_dec, 327 atomic_read(&netdev->carrier_up_count) + 328 atomic_read(&netdev->carrier_down_count)); 329} 330static DEVICE_ATTR_RO(carrier_changes); 331 332static ssize_t carrier_up_count_show(struct device *dev, 333 struct device_attribute *attr, 334 char *buf) 335{ 336 struct net_device *netdev = to_net_dev(dev); 337 338 return sprintf(buf, fmt_dec, atomic_read(&netdev->carrier_up_count)); 339} 340static DEVICE_ATTR_RO(carrier_up_count); 341 342static ssize_t carrier_down_count_show(struct device *dev, 343 struct device_attribute *attr, 344 char *buf) 345{ 346 struct net_device *netdev = to_net_dev(dev); 347 348 return sprintf(buf, fmt_dec, atomic_read(&netdev->carrier_down_count)); 349} 350static DEVICE_ATTR_RO(carrier_down_count); 351 352/* read-write attributes */ 353 354static int change_mtu(struct net_device *dev, unsigned long new_mtu) 355{ 356 return dev_set_mtu(dev, (int)new_mtu); 357} 358 359static ssize_t mtu_store(struct device *dev, struct device_attribute *attr, 360 const char *buf, size_t len) 361{ 362 return netdev_store(dev, attr, buf, len, change_mtu); 363} 364NETDEVICE_SHOW_RW(mtu, fmt_dec); 365 366static int change_flags(struct net_device *dev, unsigned long new_flags) 367{ 368 return dev_change_flags(dev, (unsigned int)new_flags, NULL); 369} 370 371static ssize_t flags_store(struct device *dev, struct device_attribute *attr, 372 const char *buf, size_t len) 373{ 374 return netdev_store(dev, attr, buf, len, change_flags); 375} 376NETDEVICE_SHOW_RW(flags, fmt_hex); 377 378static ssize_t tx_queue_len_store(struct device *dev, 379 struct device_attribute *attr, 380 const char *buf, size_t len) 381{ 382 if (!capable(CAP_NET_ADMIN)) 383 return -EPERM; 384 385 return netdev_store(dev, attr, buf, len, dev_change_tx_queue_len); 386} 387NETDEVICE_SHOW_RW(tx_queue_len, fmt_dec); 388 389static int change_gro_flush_timeout(struct net_device *dev, unsigned long val) 390{ 391 WRITE_ONCE(dev->gro_flush_timeout, val); 392 return 0; 393} 394 395static ssize_t gro_flush_timeout_store(struct device *dev, 396 struct device_attribute *attr, 397 const char *buf, size_t len) 398{ 399 if (!capable(CAP_NET_ADMIN)) 400 return -EPERM; 401 402 return netdev_store(dev, attr, buf, len, change_gro_flush_timeout); 403} 404NETDEVICE_SHOW_RW(gro_flush_timeout, fmt_ulong); 405 406static int change_napi_defer_hard_irqs(struct net_device *dev, unsigned long val) 407{ 408 WRITE_ONCE(dev->napi_defer_hard_irqs, val); 409 return 0; 410} 411 412static ssize_t napi_defer_hard_irqs_store(struct device *dev, 413 struct device_attribute *attr, 414 const char *buf, size_t len) 415{ 416 if (!capable(CAP_NET_ADMIN)) 417 return -EPERM; 418 419 return netdev_store(dev, attr, buf, len, change_napi_defer_hard_irqs); 420} 421NETDEVICE_SHOW_RW(napi_defer_hard_irqs, fmt_dec); 422 423static ssize_t ifalias_store(struct device *dev, struct device_attribute *attr, 424 const char *buf, size_t len) 425{ 426 struct net_device *netdev = to_net_dev(dev); 427 struct net *net = dev_net(netdev); 428 size_t count = len; 429 ssize_t ret = 0; 430 431 if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) 432 return -EPERM; 433 434 /* ignore trailing newline */ 435 if (len > 0 && buf[len - 1] == '\n') 436 --count; 437 438 if (!rtnl_trylock()) 439 return restart_syscall(); 440 441 if (dev_isalive(netdev)) { 442 ret = dev_set_alias(netdev, buf, count); 443 if (ret < 0) 444 goto err; 445 ret = len; 446 netdev_state_change(netdev); 447 } 448err: 449 rtnl_unlock(); 450 451 return ret; 452} 453 454static ssize_t ifalias_show(struct device *dev, 455 struct device_attribute *attr, char *buf) 456{ 457 const struct net_device *netdev = to_net_dev(dev); 458 char tmp[IFALIASZ]; 459 ssize_t ret = 0; 460 461 ret = dev_get_alias(netdev, tmp, sizeof(tmp)); 462 if (ret > 0) 463 ret = sprintf(buf, "%s\n", tmp); 464 return ret; 465} 466static DEVICE_ATTR_RW(ifalias); 467 468static int change_group(struct net_device *dev, unsigned long new_group) 469{ 470 dev_set_group(dev, (int)new_group); 471 return 0; 472} 473 474static ssize_t group_store(struct device *dev, struct device_attribute *attr, 475 const char *buf, size_t len) 476{ 477 return netdev_store(dev, attr, buf, len, change_group); 478} 479NETDEVICE_SHOW(group, fmt_dec); 480static DEVICE_ATTR(netdev_group, 0644, group_show, group_store); 481 482static int change_proto_down(struct net_device *dev, unsigned long proto_down) 483{ 484 return dev_change_proto_down(dev, (bool)proto_down); 485} 486 487static ssize_t proto_down_store(struct device *dev, 488 struct device_attribute *attr, 489 const char *buf, size_t len) 490{ 491 struct net_device *netdev = to_net_dev(dev); 492 493 /* The check is also done in change_proto_down; this helps returning 494 * early without hitting the trylock/restart in netdev_store. 495 */ 496 if (!netdev->netdev_ops->ndo_change_proto_down) 497 return -EOPNOTSUPP; 498 499 return netdev_store(dev, attr, buf, len, change_proto_down); 500} 501NETDEVICE_SHOW_RW(proto_down, fmt_dec); 502 503static ssize_t phys_port_id_show(struct device *dev, 504 struct device_attribute *attr, char *buf) 505{ 506 struct net_device *netdev = to_net_dev(dev); 507 ssize_t ret = -EINVAL; 508 509 /* The check is also done in dev_get_phys_port_id; this helps returning 510 * early without hitting the trylock/restart below. 511 */ 512 if (!netdev->netdev_ops->ndo_get_phys_port_id) 513 return -EOPNOTSUPP; 514 515 if (!rtnl_trylock()) 516 return restart_syscall(); 517 518 if (dev_isalive(netdev)) { 519 struct netdev_phys_item_id ppid; 520 521 ret = dev_get_phys_port_id(netdev, &ppid); 522 if (!ret) 523 ret = sprintf(buf, "%*phN\n", ppid.id_len, ppid.id); 524 } 525 rtnl_unlock(); 526 527 return ret; 528} 529static DEVICE_ATTR_RO(phys_port_id); 530 531static ssize_t phys_port_name_show(struct device *dev, 532 struct device_attribute *attr, char *buf) 533{ 534 struct net_device *netdev = to_net_dev(dev); 535 ssize_t ret = -EINVAL; 536 537 /* The checks are also done in dev_get_phys_port_name; this helps 538 * returning early without hitting the trylock/restart below. 539 */ 540 if (!netdev->netdev_ops->ndo_get_phys_port_name && 541 !netdev->netdev_ops->ndo_get_devlink_port) 542 return -EOPNOTSUPP; 543 544 if (!rtnl_trylock()) 545 return restart_syscall(); 546 547 if (dev_isalive(netdev)) { 548 char name[IFNAMSIZ]; 549 550 ret = dev_get_phys_port_name(netdev, name, sizeof(name)); 551 if (!ret) 552 ret = sprintf(buf, "%s\n", name); 553 } 554 rtnl_unlock(); 555 556 return ret; 557} 558static DEVICE_ATTR_RO(phys_port_name); 559 560static ssize_t phys_switch_id_show(struct device *dev, 561 struct device_attribute *attr, char *buf) 562{ 563 struct net_device *netdev = to_net_dev(dev); 564 ssize_t ret = -EINVAL; 565 566 /* The checks are also done in dev_get_phys_port_name; this helps 567 * returning early without hitting the trylock/restart below. This works 568 * because recurse is false when calling dev_get_port_parent_id. 569 */ 570 if (!netdev->netdev_ops->ndo_get_port_parent_id && 571 !netdev->netdev_ops->ndo_get_devlink_port) 572 return -EOPNOTSUPP; 573 574 if (!rtnl_trylock()) 575 return restart_syscall(); 576 577 if (dev_isalive(netdev)) { 578 struct netdev_phys_item_id ppid = { }; 579 580 ret = dev_get_port_parent_id(netdev, &ppid, false); 581 if (!ret) 582 ret = sprintf(buf, "%*phN\n", ppid.id_len, ppid.id); 583 } 584 rtnl_unlock(); 585 586 return ret; 587} 588static DEVICE_ATTR_RO(phys_switch_id); 589 590static ssize_t threaded_show(struct device *dev, 591 struct device_attribute *attr, char *buf) 592{ 593 struct net_device *netdev = to_net_dev(dev); 594 ssize_t ret = -EINVAL; 595 596 if (!rtnl_trylock()) 597 return restart_syscall(); 598 599 if (dev_isalive(netdev)) 600 ret = sprintf(buf, fmt_dec, netdev->threaded); 601 602 rtnl_unlock(); 603 return ret; 604} 605 606static int modify_napi_threaded(struct net_device *dev, unsigned long val) 607{ 608 int ret; 609 610 if (list_empty(&dev->napi_list)) 611 return -EOPNOTSUPP; 612 613 if (val != 0 && val != 1) 614 return -EOPNOTSUPP; 615 616 ret = dev_set_threaded(dev, val); 617 618 return ret; 619} 620 621static ssize_t threaded_store(struct device *dev, 622 struct device_attribute *attr, 623 const char *buf, size_t len) 624{ 625 return netdev_store(dev, attr, buf, len, modify_napi_threaded); 626} 627static DEVICE_ATTR_RW(threaded); 628 629static struct attribute *net_class_attrs[] __ro_after_init = { 630 &dev_attr_netdev_group.attr, 631 &dev_attr_type.attr, 632 &dev_attr_dev_id.attr, 633 &dev_attr_dev_port.attr, 634 &dev_attr_iflink.attr, 635 &dev_attr_ifindex.attr, 636 &dev_attr_name_assign_type.attr, 637 &dev_attr_addr_assign_type.attr, 638 &dev_attr_addr_len.attr, 639 &dev_attr_link_mode.attr, 640 &dev_attr_address.attr, 641 &dev_attr_broadcast.attr, 642 &dev_attr_speed.attr, 643 &dev_attr_duplex.attr, 644 &dev_attr_dormant.attr, 645 &dev_attr_testing.attr, 646 &dev_attr_operstate.attr, 647 &dev_attr_carrier_changes.attr, 648 &dev_attr_ifalias.attr, 649 &dev_attr_carrier.attr, 650 &dev_attr_mtu.attr, 651 &dev_attr_flags.attr, 652 &dev_attr_tx_queue_len.attr, 653 &dev_attr_gro_flush_timeout.attr, 654 &dev_attr_napi_defer_hard_irqs.attr, 655 &dev_attr_phys_port_id.attr, 656 &dev_attr_phys_port_name.attr, 657 &dev_attr_phys_switch_id.attr, 658 &dev_attr_proto_down.attr, 659 &dev_attr_carrier_up_count.attr, 660 &dev_attr_carrier_down_count.attr, 661 &dev_attr_threaded.attr, 662 NULL, 663}; 664ATTRIBUTE_GROUPS(net_class); 665 666/* Show a given an attribute in the statistics group */ 667static ssize_t netstat_show(const struct device *d, 668 struct device_attribute *attr, char *buf, 669 unsigned long offset) 670{ 671 struct net_device *dev = to_net_dev(d); 672 ssize_t ret = -EINVAL; 673 674 WARN_ON(offset > sizeof(struct rtnl_link_stats64) || 675 offset % sizeof(u64) != 0); 676 677 read_lock(&dev_base_lock); 678 if (dev_isalive(dev)) { 679 struct rtnl_link_stats64 temp; 680 const struct rtnl_link_stats64 *stats = dev_get_stats(dev, &temp); 681 682 ret = sprintf(buf, fmt_u64, *(u64 *)(((u8 *)stats) + offset)); 683 } 684 read_unlock(&dev_base_lock); 685 return ret; 686} 687 688/* generate a read-only statistics attribute */ 689#define NETSTAT_ENTRY(name) \ 690static ssize_t name##_show(struct device *d, \ 691 struct device_attribute *attr, char *buf) \ 692{ \ 693 return netstat_show(d, attr, buf, \ 694 offsetof(struct rtnl_link_stats64, name)); \ 695} \ 696static DEVICE_ATTR_RO(name) 697 698NETSTAT_ENTRY(rx_packets); 699NETSTAT_ENTRY(tx_packets); 700NETSTAT_ENTRY(rx_bytes); 701NETSTAT_ENTRY(tx_bytes); 702NETSTAT_ENTRY(rx_errors); 703NETSTAT_ENTRY(tx_errors); 704NETSTAT_ENTRY(rx_dropped); 705NETSTAT_ENTRY(tx_dropped); 706NETSTAT_ENTRY(multicast); 707NETSTAT_ENTRY(collisions); 708NETSTAT_ENTRY(rx_length_errors); 709NETSTAT_ENTRY(rx_over_errors); 710NETSTAT_ENTRY(rx_crc_errors); 711NETSTAT_ENTRY(rx_frame_errors); 712NETSTAT_ENTRY(rx_fifo_errors); 713NETSTAT_ENTRY(rx_missed_errors); 714NETSTAT_ENTRY(tx_aborted_errors); 715NETSTAT_ENTRY(tx_carrier_errors); 716NETSTAT_ENTRY(tx_fifo_errors); 717NETSTAT_ENTRY(tx_heartbeat_errors); 718NETSTAT_ENTRY(tx_window_errors); 719NETSTAT_ENTRY(rx_compressed); 720NETSTAT_ENTRY(tx_compressed); 721NETSTAT_ENTRY(rx_nohandler); 722 723static struct attribute *netstat_attrs[] __ro_after_init = { 724 &dev_attr_rx_packets.attr, 725 &dev_attr_tx_packets.attr, 726 &dev_attr_rx_bytes.attr, 727 &dev_attr_tx_bytes.attr, 728 &dev_attr_rx_errors.attr, 729 &dev_attr_tx_errors.attr, 730 &dev_attr_rx_dropped.attr, 731 &dev_attr_tx_dropped.attr, 732 &dev_attr_multicast.attr, 733 &dev_attr_collisions.attr, 734 &dev_attr_rx_length_errors.attr, 735 &dev_attr_rx_over_errors.attr, 736 &dev_attr_rx_crc_errors.attr, 737 &dev_attr_rx_frame_errors.attr, 738 &dev_attr_rx_fifo_errors.attr, 739 &dev_attr_rx_missed_errors.attr, 740 &dev_attr_tx_aborted_errors.attr, 741 &dev_attr_tx_carrier_errors.attr, 742 &dev_attr_tx_fifo_errors.attr, 743 &dev_attr_tx_heartbeat_errors.attr, 744 &dev_attr_tx_window_errors.attr, 745 &dev_attr_rx_compressed.attr, 746 &dev_attr_tx_compressed.attr, 747 &dev_attr_rx_nohandler.attr, 748 NULL 749}; 750 751static const struct attribute_group netstat_group = { 752 .name = "statistics", 753 .attrs = netstat_attrs, 754}; 755 756#if IS_ENABLED(CONFIG_WIRELESS_EXT) || IS_ENABLED(CONFIG_CFG80211) 757static struct attribute *wireless_attrs[] = { 758 NULL 759}; 760 761static const struct attribute_group wireless_group = { 762 .name = "wireless", 763 .attrs = wireless_attrs, 764}; 765#endif 766 767#else /* CONFIG_SYSFS */ 768#define net_class_groups NULL 769#endif /* CONFIG_SYSFS */ 770 771#ifdef CONFIG_SYSFS 772#define to_rx_queue_attr(_attr) \ 773 container_of(_attr, struct rx_queue_attribute, attr) 774 775#define to_rx_queue(obj) container_of(obj, struct netdev_rx_queue, kobj) 776 777static ssize_t rx_queue_attr_show(struct kobject *kobj, struct attribute *attr, 778 char *buf) 779{ 780 const struct rx_queue_attribute *attribute = to_rx_queue_attr(attr); 781 struct netdev_rx_queue *queue = to_rx_queue(kobj); 782 783 if (!attribute->show) 784 return -EIO; 785 786 return attribute->show(queue, buf); 787} 788 789static ssize_t rx_queue_attr_store(struct kobject *kobj, struct attribute *attr, 790 const char *buf, size_t count) 791{ 792 const struct rx_queue_attribute *attribute = to_rx_queue_attr(attr); 793 struct netdev_rx_queue *queue = to_rx_queue(kobj); 794 795 if (!attribute->store) 796 return -EIO; 797 798 return attribute->store(queue, buf, count); 799} 800 801static const struct sysfs_ops rx_queue_sysfs_ops = { 802 .show = rx_queue_attr_show, 803 .store = rx_queue_attr_store, 804}; 805 806#ifdef CONFIG_RPS 807static ssize_t show_rps_map(struct netdev_rx_queue *queue, char *buf) 808{ 809 struct rps_map *map; 810 cpumask_var_t mask; 811 int i, len; 812 813 if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) 814 return -ENOMEM; 815 816 rcu_read_lock(); 817 map = rcu_dereference(queue->rps_map); 818 if (map) 819 for (i = 0; i < map->len; i++) 820 cpumask_set_cpu(map->cpus[i], mask); 821 822 len = snprintf(buf, PAGE_SIZE, "%*pb\n", cpumask_pr_args(mask)); 823 rcu_read_unlock(); 824 free_cpumask_var(mask); 825 826 return len < PAGE_SIZE ? len : -EINVAL; 827} 828 829static ssize_t store_rps_map(struct netdev_rx_queue *queue, 830 const char *buf, size_t len) 831{ 832 struct rps_map *old_map, *map; 833 cpumask_var_t mask; 834 int err, cpu, i, hk_flags; 835 static DEFINE_MUTEX(rps_map_mutex); 836 837 if (!capable(CAP_NET_ADMIN)) 838 return -EPERM; 839 840 if (!alloc_cpumask_var(&mask, GFP_KERNEL)) 841 return -ENOMEM; 842 843 err = bitmap_parse(buf, len, cpumask_bits(mask), nr_cpumask_bits); 844 if (err) { 845 free_cpumask_var(mask); 846 return err; 847 } 848 849 if (!cpumask_empty(mask)) { 850 hk_flags = HK_FLAG_DOMAIN | HK_FLAG_WQ; 851 cpumask_and(mask, mask, housekeeping_cpumask(hk_flags)); 852 if (cpumask_empty(mask)) { 853 free_cpumask_var(mask); 854 return -EINVAL; 855 } 856 } 857 858 map = kzalloc(max_t(unsigned int, 859 RPS_MAP_SIZE(cpumask_weight(mask)), L1_CACHE_BYTES), 860 GFP_KERNEL); 861 if (!map) { 862 free_cpumask_var(mask); 863 return -ENOMEM; 864 } 865 866 i = 0; 867 for_each_cpu_and(cpu, mask, cpu_online_mask) 868 map->cpus[i++] = cpu; 869 870 if (i) { 871 map->len = i; 872 } else { 873 kfree(map); 874 map = NULL; 875 } 876 877 mutex_lock(&rps_map_mutex); 878 old_map = rcu_dereference_protected(queue->rps_map, 879 mutex_is_locked(&rps_map_mutex)); 880 rcu_assign_pointer(queue->rps_map, map); 881 882 if (map) 883 static_branch_inc(&rps_needed); 884 if (old_map) 885 static_branch_dec(&rps_needed); 886 887 mutex_unlock(&rps_map_mutex); 888 889 if (old_map) 890 kfree_rcu(old_map, rcu); 891 892 free_cpumask_var(mask); 893 return len; 894} 895 896static ssize_t show_rps_dev_flow_table_cnt(struct netdev_rx_queue *queue, 897 char *buf) 898{ 899 struct rps_dev_flow_table *flow_table; 900 unsigned long val = 0; 901 902 rcu_read_lock(); 903 flow_table = rcu_dereference(queue->rps_flow_table); 904 if (flow_table) 905 val = (unsigned long)flow_table->mask + 1; 906 rcu_read_unlock(); 907 908 return sprintf(buf, "%lu\n", val); 909} 910 911static void rps_dev_flow_table_release(struct rcu_head *rcu) 912{ 913 struct rps_dev_flow_table *table = container_of(rcu, 914 struct rps_dev_flow_table, rcu); 915 vfree(table); 916} 917 918static ssize_t store_rps_dev_flow_table_cnt(struct netdev_rx_queue *queue, 919 const char *buf, size_t len) 920{ 921 unsigned long mask, count; 922 struct rps_dev_flow_table *table, *old_table; 923 static DEFINE_SPINLOCK(rps_dev_flow_lock); 924 int rc; 925 926 if (!capable(CAP_NET_ADMIN)) 927 return -EPERM; 928 929 rc = kstrtoul(buf, 0, &count); 930 if (rc < 0) 931 return rc; 932 933 if (count) { 934 mask = count - 1; 935 /* mask = roundup_pow_of_two(count) - 1; 936 * without overflows... 937 */ 938 while ((mask | (mask >> 1)) != mask) 939 mask |= (mask >> 1); 940 /* On 64 bit arches, must check mask fits in table->mask (u32), 941 * and on 32bit arches, must check 942 * RPS_DEV_FLOW_TABLE_SIZE(mask + 1) doesn't overflow. 943 */ 944#if BITS_PER_LONG > 32 945 if (mask > (unsigned long)(u32)mask) 946 return -EINVAL; 947#else 948 if (mask > (ULONG_MAX - RPS_DEV_FLOW_TABLE_SIZE(1)) 949 / sizeof(struct rps_dev_flow)) { 950 /* Enforce a limit to prevent overflow */ 951 return -EINVAL; 952 } 953#endif 954 table = vmalloc(RPS_DEV_FLOW_TABLE_SIZE(mask + 1)); 955 if (!table) 956 return -ENOMEM; 957 958 table->mask = mask; 959 for (count = 0; count <= mask; count++) 960 table->flows[count].cpu = RPS_NO_CPU; 961 } else { 962 table = NULL; 963 } 964 965 spin_lock(&rps_dev_flow_lock); 966 old_table = rcu_dereference_protected(queue->rps_flow_table, 967 lockdep_is_held(&rps_dev_flow_lock)); 968 rcu_assign_pointer(queue->rps_flow_table, table); 969 spin_unlock(&rps_dev_flow_lock); 970 971 if (old_table) 972 call_rcu(&old_table->rcu, rps_dev_flow_table_release); 973 974 return len; 975} 976 977static struct rx_queue_attribute rps_cpus_attribute __ro_after_init 978 = __ATTR(rps_cpus, 0644, show_rps_map, store_rps_map); 979 980static struct rx_queue_attribute rps_dev_flow_table_cnt_attribute __ro_after_init 981 = __ATTR(rps_flow_cnt, 0644, 982 show_rps_dev_flow_table_cnt, store_rps_dev_flow_table_cnt); 983#endif /* CONFIG_RPS */ 984 985static struct attribute *rx_queue_default_attrs[] __ro_after_init = { 986#ifdef CONFIG_RPS 987 &rps_cpus_attribute.attr, 988 &rps_dev_flow_table_cnt_attribute.attr, 989#endif 990 NULL 991}; 992ATTRIBUTE_GROUPS(rx_queue_default); 993 994static void rx_queue_release(struct kobject *kobj) 995{ 996 struct netdev_rx_queue *queue = to_rx_queue(kobj); 997#ifdef CONFIG_RPS 998 struct rps_map *map; 999 struct rps_dev_flow_table *flow_table; 1000 1001 map = rcu_dereference_protected(queue->rps_map, 1); 1002 if (map) { 1003 RCU_INIT_POINTER(queue->rps_map, NULL); 1004 kfree_rcu(map, rcu); 1005 } 1006 1007 flow_table = rcu_dereference_protected(queue->rps_flow_table, 1); 1008 if (flow_table) { 1009 RCU_INIT_POINTER(queue->rps_flow_table, NULL); 1010 call_rcu(&flow_table->rcu, rps_dev_flow_table_release); 1011 } 1012#endif 1013 1014 memset(kobj, 0, sizeof(*kobj)); 1015 dev_put(queue->dev); 1016} 1017 1018static const void *rx_queue_namespace(struct kobject *kobj) 1019{ 1020 struct netdev_rx_queue *queue = to_rx_queue(kobj); 1021 struct device *dev = &queue->dev->dev; 1022 const void *ns = NULL; 1023 1024 if (dev->class && dev->class->ns_type) 1025 ns = dev->class->namespace(dev); 1026 1027 return ns; 1028} 1029 1030static void rx_queue_get_ownership(struct kobject *kobj, 1031 kuid_t *uid, kgid_t *gid) 1032{ 1033 const struct net *net = rx_queue_namespace(kobj); 1034 1035 net_ns_get_ownership(net, uid, gid); 1036} 1037 1038static struct kobj_type rx_queue_ktype __ro_after_init = { 1039 .sysfs_ops = &rx_queue_sysfs_ops, 1040 .release = rx_queue_release, 1041 .default_groups = rx_queue_default_groups, 1042 .namespace = rx_queue_namespace, 1043 .get_ownership = rx_queue_get_ownership, 1044}; 1045 1046static int rx_queue_add_kobject(struct net_device *dev, int index) 1047{ 1048 struct netdev_rx_queue *queue = dev->_rx + index; 1049 struct kobject *kobj = &queue->kobj; 1050 int error = 0; 1051 1052 /* Kobject_put later will trigger rx_queue_release call which 1053 * decreases dev refcount: Take that reference here 1054 */ 1055 dev_hold(queue->dev); 1056 1057 kobj->kset = dev->queues_kset; 1058 error = kobject_init_and_add(kobj, &rx_queue_ktype, NULL, 1059 "rx-%u", index); 1060 if (error) 1061 goto err; 1062 1063 if (dev->sysfs_rx_queue_group) { 1064 error = sysfs_create_group(kobj, dev->sysfs_rx_queue_group); 1065 if (error) 1066 goto err; 1067 } 1068 1069 kobject_uevent(kobj, KOBJ_ADD); 1070 1071 return error; 1072 1073err: 1074 kobject_put(kobj); 1075 return error; 1076} 1077 1078static int rx_queue_change_owner(struct net_device *dev, int index, kuid_t kuid, 1079 kgid_t kgid) 1080{ 1081 struct netdev_rx_queue *queue = dev->_rx + index; 1082 struct kobject *kobj = &queue->kobj; 1083 int error; 1084 1085 error = sysfs_change_owner(kobj, kuid, kgid); 1086 if (error) 1087 return error; 1088 1089 if (dev->sysfs_rx_queue_group) 1090 error = sysfs_group_change_owner( 1091 kobj, dev->sysfs_rx_queue_group, kuid, kgid); 1092 1093 return error; 1094} 1095#endif /* CONFIG_SYSFS */ 1096 1097int 1098net_rx_queue_update_kobjects(struct net_device *dev, int old_num, int new_num) 1099{ 1100#ifdef CONFIG_SYSFS 1101 int i; 1102 int error = 0; 1103 1104#ifndef CONFIG_RPS 1105 if (!dev->sysfs_rx_queue_group) 1106 return 0; 1107#endif 1108 for (i = old_num; i < new_num; i++) { 1109 error = rx_queue_add_kobject(dev, i); 1110 if (error) { 1111 new_num = old_num; 1112 break; 1113 } 1114 } 1115 1116 while (--i >= new_num) { 1117 struct kobject *kobj = &dev->_rx[i].kobj; 1118 1119 if (!refcount_read(&dev_net(dev)->ns.count)) 1120 kobj->uevent_suppress = 1; 1121 if (dev->sysfs_rx_queue_group) 1122 sysfs_remove_group(kobj, dev->sysfs_rx_queue_group); 1123 kobject_put(kobj); 1124 } 1125 1126 return error; 1127#else 1128 return 0; 1129#endif 1130} 1131 1132static int net_rx_queue_change_owner(struct net_device *dev, int num, 1133 kuid_t kuid, kgid_t kgid) 1134{ 1135#ifdef CONFIG_SYSFS 1136 int error = 0; 1137 int i; 1138 1139#ifndef CONFIG_RPS 1140 if (!dev->sysfs_rx_queue_group) 1141 return 0; 1142#endif 1143 for (i = 0; i < num; i++) { 1144 error = rx_queue_change_owner(dev, i, kuid, kgid); 1145 if (error) 1146 break; 1147 } 1148 1149 return error; 1150#else 1151 return 0; 1152#endif 1153} 1154 1155#ifdef CONFIG_SYSFS 1156/* 1157 * netdev_queue sysfs structures and functions. 1158 */ 1159struct netdev_queue_attribute { 1160 struct attribute attr; 1161 ssize_t (*show)(struct netdev_queue *queue, char *buf); 1162 ssize_t (*store)(struct netdev_queue *queue, 1163 const char *buf, size_t len); 1164}; 1165#define to_netdev_queue_attr(_attr) \ 1166 container_of(_attr, struct netdev_queue_attribute, attr) 1167 1168#define to_netdev_queue(obj) container_of(obj, struct netdev_queue, kobj) 1169 1170static ssize_t netdev_queue_attr_show(struct kobject *kobj, 1171 struct attribute *attr, char *buf) 1172{ 1173 const struct netdev_queue_attribute *attribute 1174 = to_netdev_queue_attr(attr); 1175 struct netdev_queue *queue = to_netdev_queue(kobj); 1176 1177 if (!attribute->show) 1178 return -EIO; 1179 1180 return attribute->show(queue, buf); 1181} 1182 1183static ssize_t netdev_queue_attr_store(struct kobject *kobj, 1184 struct attribute *attr, 1185 const char *buf, size_t count) 1186{ 1187 const struct netdev_queue_attribute *attribute 1188 = to_netdev_queue_attr(attr); 1189 struct netdev_queue *queue = to_netdev_queue(kobj); 1190 1191 if (!attribute->store) 1192 return -EIO; 1193 1194 return attribute->store(queue, buf, count); 1195} 1196 1197static const struct sysfs_ops netdev_queue_sysfs_ops = { 1198 .show = netdev_queue_attr_show, 1199 .store = netdev_queue_attr_store, 1200}; 1201 1202static ssize_t tx_timeout_show(struct netdev_queue *queue, char *buf) 1203{ 1204 unsigned long trans_timeout; 1205 1206 spin_lock_irq(&queue->_xmit_lock); 1207 trans_timeout = queue->trans_timeout; 1208 spin_unlock_irq(&queue->_xmit_lock); 1209 1210 return sprintf(buf, fmt_ulong, trans_timeout); 1211} 1212 1213static unsigned int get_netdev_queue_index(struct netdev_queue *queue) 1214{ 1215 struct net_device *dev = queue->dev; 1216 unsigned int i; 1217 1218 i = queue - dev->_tx; 1219 BUG_ON(i >= dev->num_tx_queues); 1220 1221 return i; 1222} 1223 1224static ssize_t traffic_class_show(struct netdev_queue *queue, 1225 char *buf) 1226{ 1227 struct net_device *dev = queue->dev; 1228 int num_tc, tc; 1229 int index; 1230 1231 if (!netif_is_multiqueue(dev)) 1232 return -ENOENT; 1233 1234 if (!rtnl_trylock()) 1235 return restart_syscall(); 1236 1237 index = get_netdev_queue_index(queue); 1238 1239 /* If queue belongs to subordinate dev use its TC mapping */ 1240 dev = netdev_get_tx_queue(dev, index)->sb_dev ? : dev; 1241 1242 num_tc = dev->num_tc; 1243 tc = netdev_txq_to_tc(dev, index); 1244 1245 rtnl_unlock(); 1246 1247 if (tc < 0) 1248 return -EINVAL; 1249 1250 /* We can report the traffic class one of two ways: 1251 * Subordinate device traffic classes are reported with the traffic 1252 * class first, and then the subordinate class so for example TC0 on 1253 * subordinate device 2 will be reported as "0-2". If the queue 1254 * belongs to the root device it will be reported with just the 1255 * traffic class, so just "0" for TC 0 for example. 1256 */ 1257 return num_tc < 0 ? sprintf(buf, "%d%d\n", tc, num_tc) : 1258 sprintf(buf, "%d\n", tc); 1259} 1260 1261#ifdef CONFIG_XPS 1262static ssize_t tx_maxrate_show(struct netdev_queue *queue, 1263 char *buf) 1264{ 1265 return sprintf(buf, "%lu\n", queue->tx_maxrate); 1266} 1267 1268static ssize_t tx_maxrate_store(struct netdev_queue *queue, 1269 const char *buf, size_t len) 1270{ 1271 struct net_device *dev = queue->dev; 1272 int err, index = get_netdev_queue_index(queue); 1273 u32 rate = 0; 1274 1275 if (!capable(CAP_NET_ADMIN)) 1276 return -EPERM; 1277 1278 /* The check is also done later; this helps returning early without 1279 * hitting the trylock/restart below. 1280 */ 1281 if (!dev->netdev_ops->ndo_set_tx_maxrate) 1282 return -EOPNOTSUPP; 1283 1284 err = kstrtou32(buf, 10, &rate); 1285 if (err < 0) 1286 return err; 1287 1288 if (!rtnl_trylock()) 1289 return restart_syscall(); 1290 1291 err = -EOPNOTSUPP; 1292 if (dev->netdev_ops->ndo_set_tx_maxrate) 1293 err = dev->netdev_ops->ndo_set_tx_maxrate(dev, index, rate); 1294 1295 rtnl_unlock(); 1296 if (!err) { 1297 queue->tx_maxrate = rate; 1298 return len; 1299 } 1300 return err; 1301} 1302 1303static struct netdev_queue_attribute queue_tx_maxrate __ro_after_init 1304 = __ATTR_RW(tx_maxrate); 1305#endif 1306 1307static struct netdev_queue_attribute queue_trans_timeout __ro_after_init 1308 = __ATTR_RO(tx_timeout); 1309 1310static struct netdev_queue_attribute queue_traffic_class __ro_after_init 1311 = __ATTR_RO(traffic_class); 1312 1313#ifdef CONFIG_BQL 1314/* 1315 * Byte queue limits sysfs structures and functions. 1316 */ 1317static ssize_t bql_show(char *buf, unsigned int value) 1318{ 1319 return sprintf(buf, "%u\n", value); 1320} 1321 1322static ssize_t bql_set(const char *buf, const size_t count, 1323 unsigned int *pvalue) 1324{ 1325 unsigned int value; 1326 int err; 1327 1328 if (!strcmp(buf, "max") || !strcmp(buf, "max\n")) { 1329 value = DQL_MAX_LIMIT; 1330 } else { 1331 err = kstrtouint(buf, 10, &value); 1332 if (err < 0) 1333 return err; 1334 if (value > DQL_MAX_LIMIT) 1335 return -EINVAL; 1336 } 1337 1338 *pvalue = value; 1339 1340 return count; 1341} 1342 1343static ssize_t bql_show_hold_time(struct netdev_queue *queue, 1344 char *buf) 1345{ 1346 struct dql *dql = &queue->dql; 1347 1348 return sprintf(buf, "%u\n", jiffies_to_msecs(dql->slack_hold_time)); 1349} 1350 1351static ssize_t bql_set_hold_time(struct netdev_queue *queue, 1352 const char *buf, size_t len) 1353{ 1354 struct dql *dql = &queue->dql; 1355 unsigned int value; 1356 int err; 1357 1358 err = kstrtouint(buf, 10, &value); 1359 if (err < 0) 1360 return err; 1361 1362 dql->slack_hold_time = msecs_to_jiffies(value); 1363 1364 return len; 1365} 1366 1367static struct netdev_queue_attribute bql_hold_time_attribute __ro_after_init 1368 = __ATTR(hold_time, 0644, 1369 bql_show_hold_time, bql_set_hold_time); 1370 1371static ssize_t bql_show_inflight(struct netdev_queue *queue, 1372 char *buf) 1373{ 1374 struct dql *dql = &queue->dql; 1375 1376 return sprintf(buf, "%u\n", dql->num_queued - dql->num_completed); 1377} 1378 1379static struct netdev_queue_attribute bql_inflight_attribute __ro_after_init = 1380 __ATTR(inflight, 0444, bql_show_inflight, NULL); 1381 1382#define BQL_ATTR(NAME, FIELD) \ 1383static ssize_t bql_show_ ## NAME(struct netdev_queue *queue, \ 1384 char *buf) \ 1385{ \ 1386 return bql_show(buf, queue->dql.FIELD); \ 1387} \ 1388 \ 1389static ssize_t bql_set_ ## NAME(struct netdev_queue *queue, \ 1390 const char *buf, size_t len) \ 1391{ \ 1392 return bql_set(buf, len, &queue->dql.FIELD); \ 1393} \ 1394 \ 1395static struct netdev_queue_attribute bql_ ## NAME ## _attribute __ro_after_init \ 1396 = __ATTR(NAME, 0644, \ 1397 bql_show_ ## NAME, bql_set_ ## NAME) 1398 1399BQL_ATTR(limit, limit); 1400BQL_ATTR(limit_max, max_limit); 1401BQL_ATTR(limit_min, min_limit); 1402 1403static struct attribute *dql_attrs[] __ro_after_init = { 1404 &bql_limit_attribute.attr, 1405 &bql_limit_max_attribute.attr, 1406 &bql_limit_min_attribute.attr, 1407 &bql_hold_time_attribute.attr, 1408 &bql_inflight_attribute.attr, 1409 NULL 1410}; 1411 1412static const struct attribute_group dql_group = { 1413 .name = "byte_queue_limits", 1414 .attrs = dql_attrs, 1415}; 1416#endif /* CONFIG_BQL */ 1417 1418#ifdef CONFIG_XPS 1419static ssize_t xps_queue_show(struct net_device *dev, unsigned int index, 1420 int tc, char *buf, enum xps_map_type type) 1421{ 1422 struct xps_dev_maps *dev_maps; 1423 unsigned long *mask; 1424 unsigned int nr_ids; 1425 int j, len; 1426 1427 rcu_read_lock(); 1428 dev_maps = rcu_dereference(dev->xps_maps[type]); 1429 1430 /* Default to nr_cpu_ids/dev->num_rx_queues and do not just return 0 1431 * when dev_maps hasn't been allocated yet, to be backward compatible. 1432 */ 1433 nr_ids = dev_maps ? dev_maps->nr_ids : 1434 (type == XPS_CPUS ? nr_cpu_ids : dev->num_rx_queues); 1435 1436 mask = bitmap_zalloc(nr_ids, GFP_NOWAIT); 1437 if (!mask) { 1438 rcu_read_unlock(); 1439 return -ENOMEM; 1440 } 1441 1442 if (!dev_maps || tc >= dev_maps->num_tc) 1443 goto out_no_maps; 1444 1445 for (j = 0; j < nr_ids; j++) { 1446 int i, tci = j * dev_maps->num_tc + tc; 1447 struct xps_map *map; 1448 1449 map = rcu_dereference(dev_maps->attr_map[tci]); 1450 if (!map) 1451 continue; 1452 1453 for (i = map->len; i--;) { 1454 if (map->queues[i] == index) { 1455 set_bit(j, mask); 1456 break; 1457 } 1458 } 1459 } 1460out_no_maps: 1461 rcu_read_unlock(); 1462 1463 len = bitmap_print_to_pagebuf(false, buf, mask, nr_ids); 1464 bitmap_free(mask); 1465 1466 return len < PAGE_SIZE ? len : -EINVAL; 1467} 1468 1469static ssize_t xps_cpus_show(struct netdev_queue *queue, char *buf) 1470{ 1471 struct net_device *dev = queue->dev; 1472 unsigned int index; 1473 int len, tc; 1474 1475 if (!netif_is_multiqueue(dev)) 1476 return -ENOENT; 1477 1478 index = get_netdev_queue_index(queue); 1479 1480 if (!rtnl_trylock()) 1481 return restart_syscall(); 1482 1483 /* If queue belongs to subordinate dev use its map */ 1484 dev = netdev_get_tx_queue(dev, index)->sb_dev ? : dev; 1485 1486 tc = netdev_txq_to_tc(dev, index); 1487 if (tc < 0) { 1488 rtnl_unlock(); 1489 return -EINVAL; 1490 } 1491 1492 /* Make sure the subordinate device can't be freed */ 1493 get_device(&dev->dev); 1494 rtnl_unlock(); 1495 1496 len = xps_queue_show(dev, index, tc, buf, XPS_CPUS); 1497 1498 put_device(&dev->dev); 1499 return len; 1500} 1501 1502static ssize_t xps_cpus_store(struct netdev_queue *queue, 1503 const char *buf, size_t len) 1504{ 1505 struct net_device *dev = queue->dev; 1506 unsigned int index; 1507 cpumask_var_t mask; 1508 int err; 1509 1510 if (!netif_is_multiqueue(dev)) 1511 return -ENOENT; 1512 1513 if (!capable(CAP_NET_ADMIN)) 1514 return -EPERM; 1515 1516 if (!alloc_cpumask_var(&mask, GFP_KERNEL)) 1517 return -ENOMEM; 1518 1519 index = get_netdev_queue_index(queue); 1520 1521 err = bitmap_parse(buf, len, cpumask_bits(mask), nr_cpumask_bits); 1522 if (err) { 1523 free_cpumask_var(mask); 1524 return err; 1525 } 1526 1527 if (!rtnl_trylock()) { 1528 free_cpumask_var(mask); 1529 return restart_syscall(); 1530 } 1531 1532 err = netif_set_xps_queue(dev, mask, index); 1533 rtnl_unlock(); 1534 1535 free_cpumask_var(mask); 1536 1537 return err ? : len; 1538} 1539 1540static struct netdev_queue_attribute xps_cpus_attribute __ro_after_init 1541 = __ATTR_RW(xps_cpus); 1542 1543static ssize_t xps_rxqs_show(struct netdev_queue *queue, char *buf) 1544{ 1545 struct net_device *dev = queue->dev; 1546 unsigned int index; 1547 int tc; 1548 1549 index = get_netdev_queue_index(queue); 1550 1551 if (!rtnl_trylock()) 1552 return restart_syscall(); 1553 1554 tc = netdev_txq_to_tc(dev, index); 1555 rtnl_unlock(); 1556 if (tc < 0) 1557 return -EINVAL; 1558 1559 return xps_queue_show(dev, index, tc, buf, XPS_RXQS); 1560} 1561 1562static ssize_t xps_rxqs_store(struct netdev_queue *queue, const char *buf, 1563 size_t len) 1564{ 1565 struct net_device *dev = queue->dev; 1566 struct net *net = dev_net(dev); 1567 unsigned long *mask; 1568 unsigned int index; 1569 int err; 1570 1571 if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) 1572 return -EPERM; 1573 1574 mask = bitmap_zalloc(dev->num_rx_queues, GFP_KERNEL); 1575 if (!mask) 1576 return -ENOMEM; 1577 1578 index = get_netdev_queue_index(queue); 1579 1580 err = bitmap_parse(buf, len, mask, dev->num_rx_queues); 1581 if (err) { 1582 bitmap_free(mask); 1583 return err; 1584 } 1585 1586 if (!rtnl_trylock()) { 1587 bitmap_free(mask); 1588 return restart_syscall(); 1589 } 1590 1591 cpus_read_lock(); 1592 err = __netif_set_xps_queue(dev, mask, index, XPS_RXQS); 1593 cpus_read_unlock(); 1594 1595 rtnl_unlock(); 1596 1597 bitmap_free(mask); 1598 return err ? : len; 1599} 1600 1601static struct netdev_queue_attribute xps_rxqs_attribute __ro_after_init 1602 = __ATTR_RW(xps_rxqs); 1603#endif /* CONFIG_XPS */ 1604 1605static struct attribute *netdev_queue_default_attrs[] __ro_after_init = { 1606 &queue_trans_timeout.attr, 1607 &queue_traffic_class.attr, 1608#ifdef CONFIG_XPS 1609 &xps_cpus_attribute.attr, 1610 &xps_rxqs_attribute.attr, 1611 &queue_tx_maxrate.attr, 1612#endif 1613 NULL 1614}; 1615ATTRIBUTE_GROUPS(netdev_queue_default); 1616 1617static void netdev_queue_release(struct kobject *kobj) 1618{ 1619 struct netdev_queue *queue = to_netdev_queue(kobj); 1620 1621 memset(kobj, 0, sizeof(*kobj)); 1622 dev_put(queue->dev); 1623} 1624 1625static const void *netdev_queue_namespace(struct kobject *kobj) 1626{ 1627 struct netdev_queue *queue = to_netdev_queue(kobj); 1628 struct device *dev = &queue->dev->dev; 1629 const void *ns = NULL; 1630 1631 if (dev->class && dev->class->ns_type) 1632 ns = dev->class->namespace(dev); 1633 1634 return ns; 1635} 1636 1637static void netdev_queue_get_ownership(struct kobject *kobj, 1638 kuid_t *uid, kgid_t *gid) 1639{ 1640 const struct net *net = netdev_queue_namespace(kobj); 1641 1642 net_ns_get_ownership(net, uid, gid); 1643} 1644 1645static struct kobj_type netdev_queue_ktype __ro_after_init = { 1646 .sysfs_ops = &netdev_queue_sysfs_ops, 1647 .release = netdev_queue_release, 1648 .default_groups = netdev_queue_default_groups, 1649 .namespace = netdev_queue_namespace, 1650 .get_ownership = netdev_queue_get_ownership, 1651}; 1652 1653static int netdev_queue_add_kobject(struct net_device *dev, int index) 1654{ 1655 struct netdev_queue *queue = dev->_tx + index; 1656 struct kobject *kobj = &queue->kobj; 1657 int error = 0; 1658 1659 /* Kobject_put later will trigger netdev_queue_release call 1660 * which decreases dev refcount: Take that reference here 1661 */ 1662 dev_hold(queue->dev); 1663 1664 kobj->kset = dev->queues_kset; 1665 error = kobject_init_and_add(kobj, &netdev_queue_ktype, NULL, 1666 "tx-%u", index); 1667 if (error) 1668 goto err; 1669 1670#ifdef CONFIG_BQL 1671 error = sysfs_create_group(kobj, &dql_group); 1672 if (error) 1673 goto err; 1674#endif 1675 1676 kobject_uevent(kobj, KOBJ_ADD); 1677 return 0; 1678 1679err: 1680 kobject_put(kobj); 1681 return error; 1682} 1683 1684static int tx_queue_change_owner(struct net_device *ndev, int index, 1685 kuid_t kuid, kgid_t kgid) 1686{ 1687 struct netdev_queue *queue = ndev->_tx + index; 1688 struct kobject *kobj = &queue->kobj; 1689 int error; 1690 1691 error = sysfs_change_owner(kobj, kuid, kgid); 1692 if (error) 1693 return error; 1694 1695#ifdef CONFIG_BQL 1696 error = sysfs_group_change_owner(kobj, &dql_group, kuid, kgid); 1697#endif 1698 return error; 1699} 1700#endif /* CONFIG_SYSFS */ 1701 1702int 1703netdev_queue_update_kobjects(struct net_device *dev, int old_num, int new_num) 1704{ 1705#ifdef CONFIG_SYSFS 1706 int i; 1707 int error = 0; 1708 1709 for (i = old_num; i < new_num; i++) { 1710 error = netdev_queue_add_kobject(dev, i); 1711 if (error) { 1712 new_num = old_num; 1713 break; 1714 } 1715 } 1716 1717 while (--i >= new_num) { 1718 struct netdev_queue *queue = dev->_tx + i; 1719 1720 if (!refcount_read(&dev_net(dev)->ns.count)) 1721 queue->kobj.uevent_suppress = 1; 1722#ifdef CONFIG_BQL 1723 sysfs_remove_group(&queue->kobj, &dql_group); 1724#endif 1725 kobject_put(&queue->kobj); 1726 } 1727 1728 return error; 1729#else 1730 return 0; 1731#endif /* CONFIG_SYSFS */ 1732} 1733 1734static int net_tx_queue_change_owner(struct net_device *dev, int num, 1735 kuid_t kuid, kgid_t kgid) 1736{ 1737#ifdef CONFIG_SYSFS 1738 int error = 0; 1739 int i; 1740 1741 for (i = 0; i < num; i++) { 1742 error = tx_queue_change_owner(dev, i, kuid, kgid); 1743 if (error) 1744 break; 1745 } 1746 1747 return error; 1748#else 1749 return 0; 1750#endif /* CONFIG_SYSFS */ 1751} 1752 1753static int register_queue_kobjects(struct net_device *dev) 1754{ 1755 int error = 0, txq = 0, rxq = 0, real_rx = 0, real_tx = 0; 1756 1757#ifdef CONFIG_SYSFS 1758 dev->queues_kset = kset_create_and_add("queues", 1759 NULL, &dev->dev.kobj); 1760 if (!dev->queues_kset) 1761 return -ENOMEM; 1762 real_rx = dev->real_num_rx_queues; 1763#endif 1764 real_tx = dev->real_num_tx_queues; 1765 1766 error = net_rx_queue_update_kobjects(dev, 0, real_rx); 1767 if (error) 1768 goto error; 1769 rxq = real_rx; 1770 1771 error = netdev_queue_update_kobjects(dev, 0, real_tx); 1772 if (error) 1773 goto error; 1774 txq = real_tx; 1775 1776 return 0; 1777 1778error: 1779 netdev_queue_update_kobjects(dev, txq, 0); 1780 net_rx_queue_update_kobjects(dev, rxq, 0); 1781#ifdef CONFIG_SYSFS 1782 kset_unregister(dev->queues_kset); 1783#endif 1784 return error; 1785} 1786 1787static int queue_change_owner(struct net_device *ndev, kuid_t kuid, kgid_t kgid) 1788{ 1789 int error = 0, real_rx = 0, real_tx = 0; 1790 1791#ifdef CONFIG_SYSFS 1792 if (ndev->queues_kset) { 1793 error = sysfs_change_owner(&ndev->queues_kset->kobj, kuid, kgid); 1794 if (error) 1795 return error; 1796 } 1797 real_rx = ndev->real_num_rx_queues; 1798#endif 1799 real_tx = ndev->real_num_tx_queues; 1800 1801 error = net_rx_queue_change_owner(ndev, real_rx, kuid, kgid); 1802 if (error) 1803 return error; 1804 1805 error = net_tx_queue_change_owner(ndev, real_tx, kuid, kgid); 1806 if (error) 1807 return error; 1808 1809 return 0; 1810} 1811 1812static void remove_queue_kobjects(struct net_device *dev) 1813{ 1814 int real_rx = 0, real_tx = 0; 1815 1816#ifdef CONFIG_SYSFS 1817 real_rx = dev->real_num_rx_queues; 1818#endif 1819 real_tx = dev->real_num_tx_queues; 1820 1821 net_rx_queue_update_kobjects(dev, real_rx, 0); 1822 netdev_queue_update_kobjects(dev, real_tx, 0); 1823#ifdef CONFIG_SYSFS 1824 kset_unregister(dev->queues_kset); 1825#endif 1826} 1827 1828static bool net_current_may_mount(void) 1829{ 1830 struct net *net = current->nsproxy->net_ns; 1831 1832 return ns_capable(net->user_ns, CAP_SYS_ADMIN); 1833} 1834 1835static void *net_grab_current_ns(void) 1836{ 1837 struct net *ns = current->nsproxy->net_ns; 1838#ifdef CONFIG_NET_NS 1839 if (ns) 1840 refcount_inc(&ns->passive); 1841#endif 1842 return ns; 1843} 1844 1845static const void *net_initial_ns(void) 1846{ 1847 return &init_net; 1848} 1849 1850static const void *net_netlink_ns(struct sock *sk) 1851{ 1852 return sock_net(sk); 1853} 1854 1855const struct kobj_ns_type_operations net_ns_type_operations = { 1856 .type = KOBJ_NS_TYPE_NET, 1857 .current_may_mount = net_current_may_mount, 1858 .grab_current_ns = net_grab_current_ns, 1859 .netlink_ns = net_netlink_ns, 1860 .initial_ns = net_initial_ns, 1861 .drop_ns = net_drop_ns, 1862}; 1863EXPORT_SYMBOL_GPL(net_ns_type_operations); 1864 1865static int netdev_uevent(struct device *d, struct kobj_uevent_env *env) 1866{ 1867 struct net_device *dev = to_net_dev(d); 1868 int retval; 1869 1870 /* pass interface to uevent. */ 1871 retval = add_uevent_var(env, "INTERFACE=%s", dev->name); 1872 if (retval) 1873 goto exit; 1874 1875 /* pass ifindex to uevent. 1876 * ifindex is useful as it won't change (interface name may change) 1877 * and is what RtNetlink uses natively. 1878 */ 1879 retval = add_uevent_var(env, "IFINDEX=%d", dev->ifindex); 1880 1881exit: 1882 return retval; 1883} 1884 1885/* 1886 * netdev_release -- destroy and free a dead device. 1887 * Called when last reference to device kobject is gone. 1888 */ 1889static void netdev_release(struct device *d) 1890{ 1891 struct net_device *dev = to_net_dev(d); 1892 1893 BUG_ON(dev->reg_state != NETREG_RELEASED); 1894 1895 /* no need to wait for rcu grace period: 1896 * device is dead and about to be freed. 1897 */ 1898 kfree(rcu_access_pointer(dev->ifalias)); 1899 netdev_freemem(dev); 1900} 1901 1902static const void *net_namespace(struct device *d) 1903{ 1904 struct net_device *dev = to_net_dev(d); 1905 1906 return dev_net(dev); 1907} 1908 1909static void net_get_ownership(struct device *d, kuid_t *uid, kgid_t *gid) 1910{ 1911 struct net_device *dev = to_net_dev(d); 1912 const struct net *net = dev_net(dev); 1913 1914 net_ns_get_ownership(net, uid, gid); 1915} 1916 1917static struct class net_class __ro_after_init = { 1918 .name = "net", 1919 .dev_release = netdev_release, 1920 .dev_groups = net_class_groups, 1921 .dev_uevent = netdev_uevent, 1922 .ns_type = &net_ns_type_operations, 1923 .namespace = net_namespace, 1924 .get_ownership = net_get_ownership, 1925}; 1926 1927#ifdef CONFIG_OF 1928static int of_dev_node_match(struct device *dev, const void *data) 1929{ 1930 for (; dev; dev = dev->parent) { 1931 if (dev->of_node == data) 1932 return 1; 1933 } 1934 1935 return 0; 1936} 1937 1938/* 1939 * of_find_net_device_by_node - lookup the net device for the device node 1940 * @np: OF device node 1941 * 1942 * Looks up the net_device structure corresponding with the device node. 1943 * If successful, returns a pointer to the net_device with the embedded 1944 * struct device refcount incremented by one, or NULL on failure. The 1945 * refcount must be dropped when done with the net_device. 1946 */ 1947struct net_device *of_find_net_device_by_node(struct device_node *np) 1948{ 1949 struct device *dev; 1950 1951 dev = class_find_device(&net_class, NULL, np, of_dev_node_match); 1952 if (!dev) 1953 return NULL; 1954 1955 return to_net_dev(dev); 1956} 1957EXPORT_SYMBOL(of_find_net_device_by_node); 1958#endif 1959 1960/* Delete sysfs entries but hold kobject reference until after all 1961 * netdev references are gone. 1962 */ 1963void netdev_unregister_kobject(struct net_device *ndev) 1964{ 1965 struct device *dev = &ndev->dev; 1966 1967 if (!refcount_read(&dev_net(ndev)->ns.count)) 1968 dev_set_uevent_suppress(dev, 1); 1969 1970 kobject_get(&dev->kobj); 1971 1972 remove_queue_kobjects(ndev); 1973 1974 pm_runtime_set_memalloc_noio(dev, false); 1975 1976 device_del(dev); 1977} 1978 1979/* Create sysfs entries for network device. */ 1980int netdev_register_kobject(struct net_device *ndev) 1981{ 1982 struct device *dev = &ndev->dev; 1983 const struct attribute_group **groups = ndev->sysfs_groups; 1984 int error = 0; 1985 1986 device_initialize(dev); 1987 dev->class = &net_class; 1988 dev->platform_data = ndev; 1989 dev->groups = groups; 1990 1991 dev_set_name(dev, "%s", ndev->name); 1992 1993#ifdef CONFIG_SYSFS 1994 /* Allow for a device specific group */ 1995 if (*groups) 1996 groups++; 1997 1998 *groups++ = &netstat_group; 1999 2000#if IS_ENABLED(CONFIG_WIRELESS_EXT) || IS_ENABLED(CONFIG_CFG80211) 2001 if (ndev->ieee80211_ptr) 2002 *groups++ = &wireless_group; 2003#if IS_ENABLED(CONFIG_WIRELESS_EXT) 2004 else if (ndev->wireless_handlers) 2005 *groups++ = &wireless_group; 2006#endif 2007#endif 2008#endif /* CONFIG_SYSFS */ 2009 2010 error = device_add(dev); 2011 if (error) 2012 return error; 2013 2014 error = register_queue_kobjects(ndev); 2015 if (error) { 2016 device_del(dev); 2017 return error; 2018 } 2019 2020 pm_runtime_set_memalloc_noio(dev, true); 2021 2022 return error; 2023} 2024 2025/* Change owner for sysfs entries when moving network devices across network 2026 * namespaces owned by different user namespaces. 2027 */ 2028int netdev_change_owner(struct net_device *ndev, const struct net *net_old, 2029 const struct net *net_new) 2030{ 2031 kuid_t old_uid = GLOBAL_ROOT_UID, new_uid = GLOBAL_ROOT_UID; 2032 kgid_t old_gid = GLOBAL_ROOT_GID, new_gid = GLOBAL_ROOT_GID; 2033 struct device *dev = &ndev->dev; 2034 int error; 2035 2036 net_ns_get_ownership(net_old, &old_uid, &old_gid); 2037 net_ns_get_ownership(net_new, &new_uid, &new_gid); 2038 2039 /* The network namespace was changed but the owning user namespace is 2040 * identical so there's no need to change the owner of sysfs entries. 2041 */ 2042 if (uid_eq(old_uid, new_uid) && gid_eq(old_gid, new_gid)) 2043 return 0; 2044 2045 error = device_change_owner(dev, new_uid, new_gid); 2046 if (error) 2047 return error; 2048 2049 error = queue_change_owner(ndev, new_uid, new_gid); 2050 if (error) 2051 return error; 2052 2053 return 0; 2054} 2055 2056int netdev_class_create_file_ns(const struct class_attribute *class_attr, 2057 const void *ns) 2058{ 2059 return class_create_file_ns(&net_class, class_attr, ns); 2060} 2061EXPORT_SYMBOL(netdev_class_create_file_ns); 2062 2063void netdev_class_remove_file_ns(const struct class_attribute *class_attr, 2064 const void *ns) 2065{ 2066 class_remove_file_ns(&net_class, class_attr, ns); 2067} 2068EXPORT_SYMBOL(netdev_class_remove_file_ns); 2069 2070int __init netdev_kobject_init(void) 2071{ 2072 kobj_ns_type_register(&net_ns_type_operations); 2073 return class_register(&net_class); 2074}