Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'perf-tools' into perf-tools-next

To get some fixes in the perf test and JSON metrics into the development
branch.

Signed-off-by: Namhyung Kim <namhyung@kernel.org>

+2055 -1016
+13 -3
Documentation/filesystems/overlayfs.rst
··· 145 145 that files have been removed. This is done using whiteouts and opaque 146 146 directories (non-directories are always opaque). 147 147 148 - A whiteout is created as a character device with 0/0 device number. 148 + A whiteout is created as a character device with 0/0 device number or 149 + as a zero-size regular file with the xattr "trusted.overlay.whiteout". 150 + 149 151 When a whiteout is found in the upper level of a merged directory, any 150 152 matching name in the lower level is ignored, and the whiteout itself 151 153 is also hidden. ··· 155 153 A directory is made opaque by setting the xattr "trusted.overlay.opaque" 156 154 to "y". Where the upper filesystem contains an opaque directory, any 157 155 directory in the lower filesystem with the same name is ignored. 156 + 157 + An opaque directory should not conntain any whiteouts, because they do not 158 + serve any purpose. A merge directory containing regular files with the xattr 159 + "trusted.overlay.whiteout", should be additionally marked by setting the xattr 160 + "trusted.overlay.opaque" to "x" on the merge directory itself. 161 + This is needed to avoid the overhead of checking the "trusted.overlay.whiteout" 162 + on all entries during readdir in the common case. 158 163 159 164 readdir 160 165 ------- ··· 543 534 mount, so to support storing an effective whiteout file in an overlayfs mount an 544 535 alternative form of whiteout is supported. This form is a regular, zero-size 545 536 file with the "overlay.whiteout" xattr set, inside a directory with the 546 - "overlay.whiteouts" xattr set. Such whiteouts are never created by overlayfs, 547 - but can be used by userspace tools (like containers) that generate lower layers. 537 + "overlay.opaque" xattr set to "x" (see `whiteouts and opaque directories`_). 538 + These alternative whiteouts are never created by overlayfs, but can be used by 539 + userspace tools (like containers) that generate lower layers. 548 540 These alternative whiteouts can be escaped using the standard xattr escape 549 541 mechanism in order to properly nest to any depth. 550 542
+5 -3
MAINTAINERS
··· 4547 4547 4548 4548 CACHEFILES: FS-CACHE BACKEND FOR CACHING ON MOUNTED FILESYSTEMS 4549 4549 M: David Howells <dhowells@redhat.com> 4550 - L: linux-cachefs@redhat.com (moderated for non-subscribers) 4550 + L: netfs@lists.linux.dev 4551 4551 S: Supported 4552 4552 F: Documentation/filesystems/caching/cachefiles.rst 4553 4553 F: fs/cachefiles/ ··· 7955 7955 S: Maintained 7956 7956 F: rust/kernel/net/phy.rs 7957 7957 7958 - EXEC & BINFMT API 7958 + EXEC & BINFMT API, ELF 7959 7959 R: Eric Biederman <ebiederm@xmission.com> 7960 7960 R: Kees Cook <keescook@chromium.org> 7961 7961 L: linux-mm@kvack.org 7962 7962 S: Supported 7963 7963 T: git git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux.git for-next/execve 7964 + F: Documentation/userspace-api/ELF.rst 7964 7965 F: fs/*binfmt_*.c 7965 7966 F: fs/exec.c 7966 7967 F: include/linux/binfmts.h ··· 8224 8223 8225 8224 FILESYSTEMS [NETFS LIBRARY] 8226 8225 M: David Howells <dhowells@redhat.com> 8227 - L: linux-cachefs@redhat.com (moderated for non-subscribers) 8226 + R: Jeff Layton <jlayton@kernel.org> 8227 + L: netfs@lists.linux.dev 8228 8228 L: linux-fsdevel@vger.kernel.org 8229 8229 S: Supported 8230 8230 F: Documentation/filesystems/caching/
+4
Makefile
··· 986 986 # perform bounds checking. 987 987 KBUILD_CFLAGS += $(call cc-option, -fstrict-flex-arrays=3) 988 988 989 + #Currently, disable -Wstringop-overflow for GCC 11, globally. 990 + KBUILD_CFLAGS-$(CONFIG_CC_NO_STRINGOP_OVERFLOW) += $(call cc-option, -Wno-stringop-overflow) 991 + KBUILD_CFLAGS-$(CONFIG_CC_STRINGOP_OVERFLOW) += $(call cc-option, -Wstringop-overflow) 992 + 989 993 # disable invalid "can't wrap" optimizations for signed / pointers 990 994 KBUILD_CFLAGS += -fno-strict-overflow 991 995
+3 -2
arch/riscv/net/bpf_jit_comp64.c
··· 795 795 struct bpf_tramp_links *fentry = &tlinks[BPF_TRAMP_FENTRY]; 796 796 struct bpf_tramp_links *fexit = &tlinks[BPF_TRAMP_FEXIT]; 797 797 struct bpf_tramp_links *fmod_ret = &tlinks[BPF_TRAMP_MODIFY_RETURN]; 798 + bool is_struct_ops = flags & BPF_TRAMP_F_INDIRECT; 798 799 void *orig_call = func_addr; 799 800 bool save_ret; 800 801 u32 insn; ··· 879 878 880 879 stack_size = round_up(stack_size, 16); 881 880 882 - if (func_addr) { 881 + if (!is_struct_ops) { 883 882 /* For the trampoline called from function entry, 884 883 * the frame of traced function and the frame of 885 884 * trampoline need to be considered. ··· 999 998 1000 999 emit_ld(RV_REG_S1, -sreg_off, RV_REG_FP, ctx); 1001 1000 1002 - if (func_addr) { 1001 + if (!is_struct_ops) { 1003 1002 /* trampoline called from function entry */ 1004 1003 emit_ld(RV_REG_T0, stack_size - 8, RV_REG_SP, ctx); 1005 1004 emit_ld(RV_REG_FP, stack_size - 16, RV_REG_SP, ctx);
+19 -13
drivers/block/rbd.c
··· 3452 3452 static void rbd_lock_del_request(struct rbd_img_request *img_req) 3453 3453 { 3454 3454 struct rbd_device *rbd_dev = img_req->rbd_dev; 3455 - bool need_wakeup; 3455 + bool need_wakeup = false; 3456 3456 3457 3457 lockdep_assert_held(&rbd_dev->lock_rwsem); 3458 3458 spin_lock(&rbd_dev->lock_lists_lock); 3459 - rbd_assert(!list_empty(&img_req->lock_item)); 3460 - list_del_init(&img_req->lock_item); 3461 - need_wakeup = (rbd_dev->lock_state == RBD_LOCK_STATE_RELEASING && 3462 - list_empty(&rbd_dev->running_list)); 3459 + if (!list_empty(&img_req->lock_item)) { 3460 + list_del_init(&img_req->lock_item); 3461 + need_wakeup = (rbd_dev->lock_state == RBD_LOCK_STATE_RELEASING && 3462 + list_empty(&rbd_dev->running_list)); 3463 + } 3463 3464 spin_unlock(&rbd_dev->lock_lists_lock); 3464 3465 if (need_wakeup) 3465 3466 complete(&rbd_dev->releasing_wait); ··· 3843 3842 return; 3844 3843 } 3845 3844 3846 - list_for_each_entry(img_req, &rbd_dev->acquiring_list, lock_item) { 3845 + while (!list_empty(&rbd_dev->acquiring_list)) { 3846 + img_req = list_first_entry(&rbd_dev->acquiring_list, 3847 + struct rbd_img_request, lock_item); 3847 3848 mutex_lock(&img_req->state_mutex); 3848 3849 rbd_assert(img_req->state == RBD_IMG_EXCLUSIVE_LOCK); 3850 + if (!result) 3851 + list_move_tail(&img_req->lock_item, 3852 + &rbd_dev->running_list); 3853 + else 3854 + list_del_init(&img_req->lock_item); 3849 3855 rbd_img_schedule(img_req, result); 3850 3856 mutex_unlock(&img_req->state_mutex); 3851 3857 } 3852 - 3853 - list_splice_tail_init(&rbd_dev->acquiring_list, &rbd_dev->running_list); 3854 3858 } 3855 3859 3856 3860 static bool locker_equal(const struct ceph_locker *lhs, ··· 5332 5326 5333 5327 if (need_put) { 5334 5328 destroy_workqueue(rbd_dev->task_wq); 5335 - ida_simple_remove(&rbd_dev_id_ida, rbd_dev->dev_id); 5329 + ida_free(&rbd_dev_id_ida, rbd_dev->dev_id); 5336 5330 } 5337 5331 5338 5332 rbd_dev_free(rbd_dev); ··· 5408 5402 return NULL; 5409 5403 5410 5404 /* get an id and fill in device name */ 5411 - rbd_dev->dev_id = ida_simple_get(&rbd_dev_id_ida, 0, 5412 - minor_to_rbd_dev_id(1 << MINORBITS), 5413 - GFP_KERNEL); 5405 + rbd_dev->dev_id = ida_alloc_max(&rbd_dev_id_ida, 5406 + minor_to_rbd_dev_id(1 << MINORBITS) - 1, 5407 + GFP_KERNEL); 5414 5408 if (rbd_dev->dev_id < 0) 5415 5409 goto fail_rbd_dev; 5416 5410 ··· 5431 5425 return rbd_dev; 5432 5426 5433 5427 fail_dev_id: 5434 - ida_simple_remove(&rbd_dev_id_ida, rbd_dev->dev_id); 5428 + ida_free(&rbd_dev_id_ida, rbd_dev->dev_id); 5435 5429 fail_rbd_dev: 5436 5430 rbd_dev_free(rbd_dev); 5437 5431 return NULL;
+57 -11
drivers/dpll/dpll_core.c
··· 29 29 WARN_ON_ONCE(!xa_get_mark(&dpll_device_xa, (d)->id, DPLL_REGISTERED)) 30 30 #define ASSERT_DPLL_NOT_REGISTERED(d) \ 31 31 WARN_ON_ONCE(xa_get_mark(&dpll_device_xa, (d)->id, DPLL_REGISTERED)) 32 - #define ASSERT_PIN_REGISTERED(p) \ 33 - WARN_ON_ONCE(!xa_get_mark(&dpll_pin_xa, (p)->id, DPLL_REGISTERED)) 34 32 35 33 struct dpll_device_registration { 36 34 struct list_head list; ··· 423 425 } 424 426 EXPORT_SYMBOL_GPL(dpll_device_unregister); 425 427 428 + static void dpll_pin_prop_free(struct dpll_pin_properties *prop) 429 + { 430 + kfree(prop->package_label); 431 + kfree(prop->panel_label); 432 + kfree(prop->board_label); 433 + kfree(prop->freq_supported); 434 + } 435 + 436 + static int dpll_pin_prop_dup(const struct dpll_pin_properties *src, 437 + struct dpll_pin_properties *dst) 438 + { 439 + memcpy(dst, src, sizeof(*dst)); 440 + if (src->freq_supported && src->freq_supported_num) { 441 + size_t freq_size = src->freq_supported_num * 442 + sizeof(*src->freq_supported); 443 + dst->freq_supported = kmemdup(src->freq_supported, 444 + freq_size, GFP_KERNEL); 445 + if (!src->freq_supported) 446 + return -ENOMEM; 447 + } 448 + if (src->board_label) { 449 + dst->board_label = kstrdup(src->board_label, GFP_KERNEL); 450 + if (!dst->board_label) 451 + goto err_board_label; 452 + } 453 + if (src->panel_label) { 454 + dst->panel_label = kstrdup(src->panel_label, GFP_KERNEL); 455 + if (!dst->panel_label) 456 + goto err_panel_label; 457 + } 458 + if (src->package_label) { 459 + dst->package_label = kstrdup(src->package_label, GFP_KERNEL); 460 + if (!dst->package_label) 461 + goto err_package_label; 462 + } 463 + 464 + return 0; 465 + 466 + err_package_label: 467 + kfree(dst->panel_label); 468 + err_panel_label: 469 + kfree(dst->board_label); 470 + err_board_label: 471 + kfree(dst->freq_supported); 472 + return -ENOMEM; 473 + } 474 + 426 475 static struct dpll_pin * 427 476 dpll_pin_alloc(u64 clock_id, u32 pin_idx, struct module *module, 428 477 const struct dpll_pin_properties *prop) ··· 486 441 if (WARN_ON(prop->type < DPLL_PIN_TYPE_MUX || 487 442 prop->type > DPLL_PIN_TYPE_MAX)) { 488 443 ret = -EINVAL; 489 - goto err; 444 + goto err_pin_prop; 490 445 } 491 - pin->prop = prop; 446 + ret = dpll_pin_prop_dup(prop, &pin->prop); 447 + if (ret) 448 + goto err_pin_prop; 492 449 refcount_set(&pin->refcount, 1); 493 450 xa_init_flags(&pin->dpll_refs, XA_FLAGS_ALLOC); 494 451 xa_init_flags(&pin->parent_refs, XA_FLAGS_ALLOC); 495 452 ret = xa_alloc_cyclic(&dpll_pin_xa, &pin->id, pin, xa_limit_32b, 496 453 &dpll_pin_xa_id, GFP_KERNEL); 497 454 if (ret) 498 - goto err; 455 + goto err_xa_alloc; 499 456 return pin; 500 - err: 457 + err_xa_alloc: 501 458 xa_destroy(&pin->dpll_refs); 502 459 xa_destroy(&pin->parent_refs); 460 + dpll_pin_prop_free(&pin->prop); 461 + err_pin_prop: 503 462 kfree(pin); 504 463 return ERR_PTR(ret); 505 464 } ··· 563 514 xa_destroy(&pin->dpll_refs); 564 515 xa_destroy(&pin->parent_refs); 565 516 xa_erase(&dpll_pin_xa, pin->id); 517 + dpll_pin_prop_free(&pin->prop); 566 518 kfree(pin); 567 519 } 568 520 mutex_unlock(&dpll_lock); ··· 613 563 if (WARN_ON(!ops) || 614 564 WARN_ON(!ops->state_on_dpll_get) || 615 565 WARN_ON(!ops->direction_get)) 616 - return -EINVAL; 617 - if (ASSERT_DPLL_REGISTERED(dpll)) 618 566 return -EINVAL; 619 567 620 568 mutex_lock(&dpll_lock); ··· 684 636 unsigned long i, stop; 685 637 int ret; 686 638 687 - if (WARN_ON(parent->prop->type != DPLL_PIN_TYPE_MUX)) 639 + if (WARN_ON(parent->prop.type != DPLL_PIN_TYPE_MUX)) 688 640 return -EINVAL; 689 641 690 642 if (WARN_ON(!ops) || 691 643 WARN_ON(!ops->state_on_pin_get) || 692 644 WARN_ON(!ops->direction_get)) 693 - return -EINVAL; 694 - if (ASSERT_PIN_REGISTERED(parent)) 695 645 return -EINVAL; 696 646 697 647 mutex_lock(&dpll_lock);
+2 -2
drivers/dpll/dpll_core.h
··· 44 44 * @module: module of creator 45 45 * @dpll_refs: hold referencees to dplls pin was registered with 46 46 * @parent_refs: hold references to parent pins pin was registered with 47 - * @prop: pointer to pin properties given by registerer 47 + * @prop: pin properties copied from the registerer 48 48 * @rclk_dev_name: holds name of device when pin can recover clock from it 49 49 * @refcount: refcount 50 50 **/ ··· 55 55 struct module *module; 56 56 struct xarray dpll_refs; 57 57 struct xarray parent_refs; 58 - const struct dpll_pin_properties *prop; 58 + struct dpll_pin_properties prop; 59 59 refcount_t refcount; 60 60 }; 61 61
+41 -16
drivers/dpll/dpll_netlink.c
··· 303 303 if (nla_put_64bit(msg, DPLL_A_PIN_FREQUENCY, sizeof(freq), &freq, 304 304 DPLL_A_PIN_PAD)) 305 305 return -EMSGSIZE; 306 - for (fs = 0; fs < pin->prop->freq_supported_num; fs++) { 306 + for (fs = 0; fs < pin->prop.freq_supported_num; fs++) { 307 307 nest = nla_nest_start(msg, DPLL_A_PIN_FREQUENCY_SUPPORTED); 308 308 if (!nest) 309 309 return -EMSGSIZE; 310 - freq = pin->prop->freq_supported[fs].min; 310 + freq = pin->prop.freq_supported[fs].min; 311 311 if (nla_put_64bit(msg, DPLL_A_PIN_FREQUENCY_MIN, sizeof(freq), 312 312 &freq, DPLL_A_PIN_PAD)) { 313 313 nla_nest_cancel(msg, nest); 314 314 return -EMSGSIZE; 315 315 } 316 - freq = pin->prop->freq_supported[fs].max; 316 + freq = pin->prop.freq_supported[fs].max; 317 317 if (nla_put_64bit(msg, DPLL_A_PIN_FREQUENCY_MAX, sizeof(freq), 318 318 &freq, DPLL_A_PIN_PAD)) { 319 319 nla_nest_cancel(msg, nest); ··· 329 329 { 330 330 int fs; 331 331 332 - for (fs = 0; fs < pin->prop->freq_supported_num; fs++) 333 - if (freq >= pin->prop->freq_supported[fs].min && 334 - freq <= pin->prop->freq_supported[fs].max) 332 + for (fs = 0; fs < pin->prop.freq_supported_num; fs++) 333 + if (freq >= pin->prop.freq_supported[fs].min && 334 + freq <= pin->prop.freq_supported[fs].max) 335 335 return true; 336 336 return false; 337 337 } ··· 421 421 dpll_cmd_pin_get_one(struct sk_buff *msg, struct dpll_pin *pin, 422 422 struct netlink_ext_ack *extack) 423 423 { 424 - const struct dpll_pin_properties *prop = pin->prop; 424 + const struct dpll_pin_properties *prop = &pin->prop; 425 425 struct dpll_pin_ref *ref; 426 426 int ret; 427 427 ··· 553 553 return dpll_device_event_send(DPLL_CMD_DEVICE_CHANGE_NTF, dpll); 554 554 } 555 555 556 + static bool dpll_pin_available(struct dpll_pin *pin) 557 + { 558 + struct dpll_pin_ref *par_ref; 559 + unsigned long i; 560 + 561 + if (!xa_get_mark(&dpll_pin_xa, pin->id, DPLL_REGISTERED)) 562 + return false; 563 + xa_for_each(&pin->parent_refs, i, par_ref) 564 + if (xa_get_mark(&dpll_pin_xa, par_ref->pin->id, 565 + DPLL_REGISTERED)) 566 + return true; 567 + xa_for_each(&pin->dpll_refs, i, par_ref) 568 + if (xa_get_mark(&dpll_device_xa, par_ref->dpll->id, 569 + DPLL_REGISTERED)) 570 + return true; 571 + return false; 572 + } 573 + 556 574 /** 557 575 * dpll_device_change_ntf - notify that the dpll device has been changed 558 576 * @dpll: registered dpll pointer ··· 597 579 int ret = -ENOMEM; 598 580 void *hdr; 599 581 600 - if (WARN_ON(!xa_get_mark(&dpll_pin_xa, pin->id, DPLL_REGISTERED))) 582 + if (!dpll_pin_available(pin)) 601 583 return -ENODEV; 602 584 603 585 msg = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); ··· 735 717 int ret; 736 718 737 719 if (!(DPLL_PIN_CAPABILITIES_STATE_CAN_CHANGE & 738 - pin->prop->capabilities)) { 720 + pin->prop.capabilities)) { 739 721 NL_SET_ERR_MSG(extack, "state changing is not allowed"); 740 722 return -EOPNOTSUPP; 741 723 } ··· 771 753 int ret; 772 754 773 755 if (!(DPLL_PIN_CAPABILITIES_STATE_CAN_CHANGE & 774 - pin->prop->capabilities)) { 756 + pin->prop.capabilities)) { 775 757 NL_SET_ERR_MSG(extack, "state changing is not allowed"); 776 758 return -EOPNOTSUPP; 777 759 } ··· 798 780 int ret; 799 781 800 782 if (!(DPLL_PIN_CAPABILITIES_PRIORITY_CAN_CHANGE & 801 - pin->prop->capabilities)) { 783 + pin->prop.capabilities)) { 802 784 NL_SET_ERR_MSG(extack, "prio changing is not allowed"); 803 785 return -EOPNOTSUPP; 804 786 } ··· 826 808 int ret; 827 809 828 810 if (!(DPLL_PIN_CAPABILITIES_DIRECTION_CAN_CHANGE & 829 - pin->prop->capabilities)) { 811 + pin->prop.capabilities)) { 830 812 NL_SET_ERR_MSG(extack, "direction changing is not allowed"); 831 813 return -EOPNOTSUPP; 832 814 } ··· 856 838 int ret; 857 839 858 840 phase_adj = nla_get_s32(phase_adj_attr); 859 - if (phase_adj > pin->prop->phase_range.max || 860 - phase_adj < pin->prop->phase_range.min) { 841 + if (phase_adj > pin->prop.phase_range.max || 842 + phase_adj < pin->prop.phase_range.min) { 861 843 NL_SET_ERR_MSG_ATTR(extack, phase_adj_attr, 862 844 "phase adjust value not supported"); 863 845 return -EINVAL; ··· 1041 1023 unsigned long i; 1042 1024 1043 1025 xa_for_each_marked(&dpll_pin_xa, i, pin, DPLL_REGISTERED) { 1044 - prop = pin->prop; 1026 + prop = &pin->prop; 1045 1027 cid_match = clock_id ? pin->clock_id == clock_id : true; 1046 1028 mod_match = mod_name_attr && module_name(pin->module) ? 1047 1029 !nla_strcmp(mod_name_attr, ··· 1148 1130 } 1149 1131 pin = dpll_pin_find_from_nlattr(info); 1150 1132 if (!IS_ERR(pin)) { 1133 + if (!dpll_pin_available(pin)) { 1134 + nlmsg_free(msg); 1135 + return -ENODEV; 1136 + } 1151 1137 ret = dpll_msg_add_pin_handle(msg, pin); 1152 1138 if (ret) { 1153 1139 nlmsg_free(msg); ··· 1201 1179 1202 1180 xa_for_each_marked_start(&dpll_pin_xa, i, pin, DPLL_REGISTERED, 1203 1181 ctx->idx) { 1182 + if (!dpll_pin_available(pin)) 1183 + continue; 1204 1184 hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, 1205 1185 cb->nlh->nlmsg_seq, 1206 1186 &dpll_nl_family, NLM_F_MULTI, ··· 1465 1441 } 1466 1442 info->user_ptr[0] = xa_load(&dpll_pin_xa, 1467 1443 nla_get_u32(info->attrs[DPLL_A_PIN_ID])); 1468 - if (!info->user_ptr[0]) { 1444 + if (!info->user_ptr[0] || 1445 + !dpll_pin_available(info->user_ptr[0])) { 1469 1446 NL_SET_ERR_MSG(info->extack, "pin not found"); 1470 1447 ret = -ENODEV; 1471 1448 goto unlock_dev;
+7 -2
drivers/gpu/drm/ttm/ttm_device.c
··· 195 195 bool use_dma_alloc, bool use_dma32) 196 196 { 197 197 struct ttm_global *glob = &ttm_glob; 198 - int ret; 198 + int ret, nid; 199 199 200 200 if (WARN_ON(vma_manager == NULL)) 201 201 return -EINVAL; ··· 215 215 216 216 ttm_sys_man_init(bdev); 217 217 218 - ttm_pool_init(&bdev->pool, dev, dev_to_node(dev), use_dma_alloc, use_dma32); 218 + if (dev) 219 + nid = dev_to_node(dev); 220 + else 221 + nid = NUMA_NO_NODE; 222 + 223 + ttm_pool_init(&bdev->pool, dev, nid, use_dma_alloc, use_dma32); 219 224 220 225 bdev->vma_manager = vma_manager; 221 226 spin_lock_init(&bdev->lru_lock);
+1
drivers/net/ethernet/8390/8390.c
··· 100 100 module_init(ns8390_module_init); 101 101 module_exit(ns8390_module_exit); 102 102 #endif /* MODULE */ 103 + MODULE_DESCRIPTION("National Semiconductor 8390 core driver"); 103 104 MODULE_LICENSE("GPL");
+1
drivers/net/ethernet/8390/8390p.c
··· 102 102 103 103 module_init(NS8390p_init_module); 104 104 module_exit(NS8390p_cleanup_module); 105 + MODULE_DESCRIPTION("National Semiconductor 8390 core for ISA driver"); 105 106 MODULE_LICENSE("GPL");
+1
drivers/net/ethernet/8390/apne.c
··· 610 610 return 1; 611 611 } 612 612 613 + MODULE_DESCRIPTION("National Semiconductor 8390 Amiga PCMCIA ethernet driver"); 613 614 MODULE_LICENSE("GPL");
+1
drivers/net/ethernet/8390/hydra.c
··· 270 270 module_init(hydra_init_module); 271 271 module_exit(hydra_cleanup_module); 272 272 273 + MODULE_DESCRIPTION("Zorro-II Hydra 8390 ethernet driver"); 273 274 MODULE_LICENSE("GPL");
+1
drivers/net/ethernet/8390/stnic.c
··· 296 296 297 297 module_init(stnic_probe); 298 298 module_exit(stnic_cleanup); 299 + MODULE_DESCRIPTION("National Semiconductor DP83902AV ethernet driver"); 299 300 MODULE_LICENSE("GPL");
+1
drivers/net/ethernet/8390/zorro8390.c
··· 443 443 module_init(zorro8390_init_module); 444 444 module_exit(zorro8390_cleanup_module); 445 445 446 + MODULE_DESCRIPTION("Zorro NS8390-based ethernet driver"); 446 447 MODULE_LICENSE("GPL");
+1
drivers/net/ethernet/broadcom/bcm4908_enet.c
··· 793 793 }; 794 794 module_platform_driver(bcm4908_enet_driver); 795 795 796 + MODULE_DESCRIPTION("Broadcom BCM4908 Gigabit Ethernet driver"); 796 797 MODULE_LICENSE("GPL v2"); 797 798 MODULE_DEVICE_TABLE(of, bcm4908_enet_of_match);
+1
drivers/net/ethernet/broadcom/bgmac-bcma-mdio.c
··· 260 260 EXPORT_SYMBOL_GPL(bcma_mdio_mii_unregister); 261 261 262 262 MODULE_AUTHOR("Rafał Miłecki"); 263 + MODULE_DESCRIPTION("Broadcom iProc GBit BCMA MDIO helpers"); 263 264 MODULE_LICENSE("GPL");
+1
drivers/net/ethernet/broadcom/bgmac-bcma.c
··· 362 362 module_exit(bgmac_exit) 363 363 364 364 MODULE_AUTHOR("Rafał Miłecki"); 365 + MODULE_DESCRIPTION("Broadcom iProc GBit BCMA interface driver"); 365 366 MODULE_LICENSE("GPL");
+1
drivers/net/ethernet/broadcom/bgmac-platform.c
··· 298 298 }; 299 299 300 300 module_platform_driver(bgmac_enet_driver); 301 + MODULE_DESCRIPTION("Broadcom iProc GBit platform interface driver"); 301 302 MODULE_LICENSE("GPL");
+1
drivers/net/ethernet/broadcom/bgmac.c
··· 1626 1626 EXPORT_SYMBOL_GPL(bgmac_enet_resume); 1627 1627 1628 1628 MODULE_AUTHOR("Rafał Miłecki"); 1629 + MODULE_DESCRIPTION("Broadcom iProc GBit driver"); 1629 1630 MODULE_LICENSE("GPL");
+35 -14
drivers/net/ethernet/broadcom/bnxt/bnxt.c
··· 3817 3817 { 3818 3818 bool sh = !!(bp->flags & BNXT_FLAG_SHARED_RINGS); 3819 3819 int i, j, rc, ulp_base_vec, ulp_msix; 3820 - int tcs = netdev_get_num_tc(bp->dev); 3820 + int tcs = bp->num_tc; 3821 3821 3822 3822 if (!tcs) 3823 3823 tcs = 1; ··· 5935 5935 5936 5936 int bnxt_get_nr_rss_ctxs(struct bnxt *bp, int rx_rings) 5937 5937 { 5938 - if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 5939 - return DIV_ROUND_UP(rx_rings, BNXT_RSS_TABLE_ENTRIES_P5); 5938 + if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 5939 + if (!rx_rings) 5940 + return 0; 5941 + return bnxt_calc_nr_ring_pages(rx_rings - 1, 5942 + BNXT_RSS_TABLE_ENTRIES_P5); 5943 + } 5940 5944 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) 5941 5945 return 2; 5942 5946 return 1; ··· 6930 6926 if (cp < (rx + tx)) { 6931 6927 rc = __bnxt_trim_rings(bp, &rx, &tx, cp, false); 6932 6928 if (rc) 6933 - return rc; 6929 + goto get_rings_exit; 6934 6930 if (bp->flags & BNXT_FLAG_AGG_RINGS) 6935 6931 rx <<= 1; 6936 6932 hw_resc->resv_rx_rings = rx; ··· 6942 6938 hw_resc->resv_cp_rings = cp; 6943 6939 hw_resc->resv_stat_ctxs = stats; 6944 6940 } 6941 + get_rings_exit: 6945 6942 hwrm_req_drop(bp, req); 6946 - return 0; 6943 + return rc; 6947 6944 } 6948 6945 6949 6946 int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings) ··· 7005 7000 7006 7001 req->num_rx_rings = cpu_to_le16(rx_rings); 7007 7002 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 7003 + u16 rss_ctx = bnxt_get_nr_rss_ctxs(bp, ring_grps); 7004 + 7008 7005 req->num_cmpl_rings = cpu_to_le16(tx_rings + ring_grps); 7009 7006 req->num_msix = cpu_to_le16(cp_rings); 7010 - req->num_rsscos_ctxs = 7011 - cpu_to_le16(DIV_ROUND_UP(ring_grps, 64)); 7007 + req->num_rsscos_ctxs = cpu_to_le16(rss_ctx); 7012 7008 } else { 7013 7009 req->num_cmpl_rings = cpu_to_le16(cp_rings); 7014 7010 req->num_hw_ring_grps = cpu_to_le16(ring_grps); ··· 7056 7050 req->num_tx_rings = cpu_to_le16(tx_rings); 7057 7051 req->num_rx_rings = cpu_to_le16(rx_rings); 7058 7052 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 7053 + u16 rss_ctx = bnxt_get_nr_rss_ctxs(bp, ring_grps); 7054 + 7059 7055 req->num_cmpl_rings = cpu_to_le16(tx_rings + ring_grps); 7060 - req->num_rsscos_ctxs = cpu_to_le16(DIV_ROUND_UP(ring_grps, 64)); 7056 + req->num_rsscos_ctxs = cpu_to_le16(rss_ctx); 7061 7057 } else { 7062 7058 req->num_cmpl_rings = cpu_to_le16(cp_rings); 7063 7059 req->num_hw_ring_grps = cpu_to_le16(ring_grps); ··· 9946 9938 9947 9939 int bnxt_num_tx_to_cp(struct bnxt *bp, int tx) 9948 9940 { 9949 - int tcs = netdev_get_num_tc(bp->dev); 9941 + int tcs = bp->num_tc; 9950 9942 9951 9943 if (!tcs) 9952 9944 tcs = 1; ··· 9955 9947 9956 9948 static int bnxt_num_cp_to_tx(struct bnxt *bp, int tx_cp) 9957 9949 { 9958 - int tcs = netdev_get_num_tc(bp->dev); 9950 + int tcs = bp->num_tc; 9959 9951 9960 9952 return (tx_cp - bp->tx_nr_rings_xdp) * tcs + 9961 9953 bp->tx_nr_rings_xdp; ··· 9985 9977 struct net_device *dev = bp->dev; 9986 9978 int tcs, i; 9987 9979 9988 - tcs = netdev_get_num_tc(dev); 9980 + tcs = bp->num_tc; 9989 9981 if (tcs) { 9990 9982 int i, off, count; 9991 9983 ··· 10017 10009 { 10018 10010 const int len = sizeof(bp->irq_tbl[0].name); 10019 10011 10020 - if (netdev_get_num_tc(bp->dev)) 10012 + if (bp->num_tc) { 10021 10013 netdev_reset_tc(bp->dev); 10014 + bp->num_tc = 0; 10015 + } 10022 10016 10023 10017 snprintf(bp->irq_tbl[0].name, len, "%s-%s-%d", bp->dev->name, "TxRx", 10024 10018 0); ··· 10246 10236 10247 10237 int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init) 10248 10238 { 10249 - int tcs = netdev_get_num_tc(bp->dev); 10250 10239 bool irq_cleared = false; 10240 + int tcs = bp->num_tc; 10251 10241 int rc; 10252 10242 10253 10243 if (!bnxt_need_reserve_rings(bp)) ··· 10273 10263 bp->tx_nr_rings - bp->tx_nr_rings_xdp)) { 10274 10264 netdev_err(bp->dev, "tx ring reservation failure\n"); 10275 10265 netdev_reset_tc(bp->dev); 10266 + bp->num_tc = 0; 10276 10267 if (bp->tx_nr_rings_xdp) 10277 10268 bp->tx_nr_rings_per_tc = bp->tx_nr_rings_xdp; 10278 10269 else ··· 11575 11564 netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc); 11576 11565 goto half_open_err; 11577 11566 } 11567 + bnxt_init_napi(bp); 11578 11568 set_bit(BNXT_STATE_HALF_OPEN, &bp->state); 11579 11569 rc = bnxt_init_nic(bp, true); 11580 11570 if (rc) { 11581 11571 clear_bit(BNXT_STATE_HALF_OPEN, &bp->state); 11572 + bnxt_del_napi(bp); 11582 11573 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc); 11583 11574 goto half_open_err; 11584 11575 } ··· 11599 11586 void bnxt_half_close_nic(struct bnxt *bp) 11600 11587 { 11601 11588 bnxt_hwrm_resource_free(bp, false, true); 11589 + bnxt_del_napi(bp); 11602 11590 bnxt_free_skbs(bp); 11603 11591 bnxt_free_mem(bp, true); 11604 11592 clear_bit(BNXT_STATE_HALF_OPEN, &bp->state); ··· 13246 13232 13247 13233 bp->fw_cap = 0; 13248 13234 rc = bnxt_hwrm_ver_get(bp); 13235 + /* FW may be unresponsive after FLR. FLR must complete within 100 msec 13236 + * so wait before continuing with recovery. 13237 + */ 13238 + if (rc) 13239 + msleep(100); 13249 13240 bnxt_try_map_fw_health_reg(bp); 13250 13241 if (rc) { 13251 13242 rc = bnxt_try_recover_fw(bp); ··· 13803 13784 return -EINVAL; 13804 13785 } 13805 13786 13806 - if (netdev_get_num_tc(dev) == tc) 13787 + if (bp->num_tc == tc) 13807 13788 return 0; 13808 13789 13809 13790 if (bp->flags & BNXT_FLAG_SHARED_RINGS) ··· 13821 13802 if (tc) { 13822 13803 bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc; 13823 13804 netdev_set_num_tc(dev, tc); 13805 + bp->num_tc = tc; 13824 13806 } else { 13825 13807 bp->tx_nr_rings = bp->tx_nr_rings_per_tc; 13826 13808 netdev_reset_tc(dev); 13809 + bp->num_tc = 0; 13827 13810 } 13828 13811 bp->tx_nr_rings += bp->tx_nr_rings_xdp; 13829 13812 tx_cp = bnxt_num_tx_to_cp(bp, bp->tx_nr_rings);
+1
drivers/net/ethernet/broadcom/bnxt/bnxt.h
··· 2225 2225 u8 tc_to_qidx[BNXT_MAX_QUEUE]; 2226 2226 u8 q_ids[BNXT_MAX_QUEUE]; 2227 2227 u8 max_q; 2228 + u8 num_tc; 2228 2229 2229 2230 unsigned int current_interval; 2230 2231 #define BNXT_TIMER_INTERVAL HZ
+1 -1
drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
··· 228 228 } 229 229 } 230 230 if (bp->ieee_ets) { 231 - int tc = netdev_get_num_tc(bp->dev); 231 + int tc = bp->num_tc; 232 232 233 233 if (!tc) 234 234 tc = 1;
+4 -3
drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
··· 884 884 if (max_tx_sch_inputs) 885 885 max_tx_rings = min_t(int, max_tx_rings, max_tx_sch_inputs); 886 886 887 - tcs = netdev_get_num_tc(dev); 887 + tcs = bp->num_tc; 888 888 tx_grps = max(tcs, 1); 889 889 if (bp->tx_nr_rings_xdp) 890 890 tx_grps++; ··· 944 944 if (channel->combined_count) 945 945 sh = true; 946 946 947 - tcs = netdev_get_num_tc(dev); 947 + tcs = bp->num_tc; 948 948 949 949 req_tx_rings = sh ? channel->combined_count : channel->tx_count; 950 950 req_rx_rings = sh ? channel->combined_count : channel->rx_count; ··· 1574 1574 struct bnxt *bp = netdev_priv(dev); 1575 1575 1576 1576 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 1577 - return ALIGN(bp->rx_nr_rings, BNXT_RSS_TABLE_ENTRIES_P5); 1577 + return bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings) * 1578 + BNXT_RSS_TABLE_ENTRIES_P5; 1578 1579 return HW_HASH_INDEX_SIZE; 1579 1580 } 1580 1581
+1 -1
drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
··· 407 407 if (prog) 408 408 tx_xdp = bp->rx_nr_rings; 409 409 410 - tc = netdev_get_num_tc(dev); 410 + tc = bp->num_tc; 411 411 if (!tc) 412 412 tc = 1; 413 413 rc = bnxt_check_rings(bp, bp->tx_nr_rings_per_tc, bp->rx_nr_rings,
+1
drivers/net/ethernet/cavium/liquidio/lio_core.c
··· 27 27 #include "octeon_network.h" 28 28 29 29 MODULE_AUTHOR("Cavium Networks, <support@cavium.com>"); 30 + MODULE_DESCRIPTION("Cavium LiquidIO Intelligent Server Adapter Core"); 30 31 MODULE_LICENSE("GPL"); 31 32 32 33 /* OOM task polling interval */
+1
drivers/net/ethernet/cirrus/ep93xx_eth.c
··· 868 868 869 869 module_platform_driver(ep93xx_eth_driver); 870 870 871 + MODULE_DESCRIPTION("Cirrus EP93xx Ethernet driver"); 871 872 MODULE_LICENSE("GPL"); 872 873 MODULE_ALIAS("platform:ep93xx-eth");
+15 -2
drivers/net/ethernet/engleder/tsnep_main.c
··· 1485 1485 1486 1486 xdp_prepare_buff(&xdp, page_address(entry->page), 1487 1487 XDP_PACKET_HEADROOM + TSNEP_RX_INLINE_METADATA_SIZE, 1488 - length, false); 1488 + length - ETH_FCS_LEN, false); 1489 1489 1490 1490 consume = tsnep_xdp_run_prog(rx, prog, &xdp, 1491 1491 &xdp_status, tx_nq, tx); ··· 1568 1568 prefetch(entry->xdp->data); 1569 1569 length = __le32_to_cpu(entry->desc_wb->properties) & 1570 1570 TSNEP_DESC_LENGTH_MASK; 1571 - xsk_buff_set_size(entry->xdp, length); 1571 + xsk_buff_set_size(entry->xdp, length - ETH_FCS_LEN); 1572 1572 xsk_buff_dma_sync_for_cpu(entry->xdp, rx->xsk_pool); 1573 1573 1574 1574 /* RX metadata with timestamps is in front of actual data, ··· 1761 1761 1762 1762 allocated--; 1763 1763 } 1764 + } 1765 + 1766 + /* set need wakeup flag immediately if ring is not filled completely, 1767 + * first polling would be too late as need wakeup signalisation would 1768 + * be delayed for an indefinite time 1769 + */ 1770 + if (xsk_uses_need_wakeup(rx->xsk_pool)) { 1771 + int desc_available = tsnep_rx_desc_available(rx); 1772 + 1773 + if (desc_available) 1774 + xsk_set_rx_need_wakeup(rx->xsk_pool); 1775 + else 1776 + xsk_clear_rx_need_wakeup(rx->xsk_pool); 1764 1777 } 1765 1778 } 1766 1779
+1
drivers/net/ethernet/ezchip/nps_enet.c
··· 661 661 module_platform_driver(nps_enet_driver); 662 662 663 663 MODULE_AUTHOR("EZchip Semiconductor"); 664 + MODULE_DESCRIPTION("EZchip NPS Ethernet driver"); 664 665 MODULE_LICENSE("GPL v2");
+1
drivers/net/ethernet/freescale/enetc/enetc.c
··· 3216 3216 } 3217 3217 EXPORT_SYMBOL_GPL(enetc_pci_remove); 3218 3218 3219 + MODULE_DESCRIPTION("NXP ENETC Ethernet driver"); 3219 3220 MODULE_LICENSE("Dual BSD/GPL");
+3
drivers/net/ethernet/freescale/fec_main.c
··· 2036 2036 2037 2037 /* if any of the above changed restart the FEC */ 2038 2038 if (status_change) { 2039 + netif_stop_queue(ndev); 2039 2040 napi_disable(&fep->napi); 2040 2041 netif_tx_lock_bh(ndev); 2041 2042 fec_restart(ndev); ··· 2046 2045 } 2047 2046 } else { 2048 2047 if (fep->link) { 2048 + netif_stop_queue(ndev); 2049 2049 napi_disable(&fep->napi); 2050 2050 netif_tx_lock_bh(ndev); 2051 2051 fec_stop(ndev); ··· 4771 4769 4772 4770 module_platform_driver(fec_driver); 4773 4771 4772 + MODULE_DESCRIPTION("NXP Fast Ethernet Controller (FEC) driver"); 4774 4773 MODULE_LICENSE("GPL");
+1
drivers/net/ethernet/freescale/fsl_pq_mdio.c
··· 531 531 532 532 module_platform_driver(fsl_pq_mdio_driver); 533 533 534 + MODULE_DESCRIPTION("Freescale PQ MDIO helpers"); 534 535 MODULE_LICENSE("GPL");
+31 -16
drivers/net/ethernet/intel/i40e/i40e_main.c
··· 3588 3588 struct i40e_hmc_obj_rxq rx_ctx; 3589 3589 int err = 0; 3590 3590 bool ok; 3591 - int ret; 3592 3591 3593 3592 bitmap_zero(ring->state, __I40E_RING_STATE_NBITS); 3594 3593 3595 3594 /* clear the context structure first */ 3596 3595 memset(&rx_ctx, 0, sizeof(rx_ctx)); 3597 3596 3598 - if (ring->vsi->type == I40E_VSI_MAIN) 3599 - xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq); 3597 + ring->rx_buf_len = vsi->rx_buf_len; 3598 + 3599 + /* XDP RX-queue info only needed for RX rings exposed to XDP */ 3600 + if (ring->vsi->type != I40E_VSI_MAIN) 3601 + goto skip; 3602 + 3603 + if (!xdp_rxq_info_is_reg(&ring->xdp_rxq)) { 3604 + err = __xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev, 3605 + ring->queue_index, 3606 + ring->q_vector->napi.napi_id, 3607 + ring->rx_buf_len); 3608 + if (err) 3609 + return err; 3610 + } 3600 3611 3601 3612 ring->xsk_pool = i40e_xsk_pool(ring); 3602 3613 if (ring->xsk_pool) { 3603 - ring->rx_buf_len = 3604 - xsk_pool_get_rx_frame_size(ring->xsk_pool); 3605 - ret = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, 3614 + xdp_rxq_info_unreg(&ring->xdp_rxq); 3615 + ring->rx_buf_len = xsk_pool_get_rx_frame_size(ring->xsk_pool); 3616 + err = __xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev, 3617 + ring->queue_index, 3618 + ring->q_vector->napi.napi_id, 3619 + ring->rx_buf_len); 3620 + if (err) 3621 + return err; 3622 + err = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, 3606 3623 MEM_TYPE_XSK_BUFF_POOL, 3607 3624 NULL); 3608 - if (ret) 3609 - return ret; 3625 + if (err) 3626 + return err; 3610 3627 dev_info(&vsi->back->pdev->dev, 3611 3628 "Registered XDP mem model MEM_TYPE_XSK_BUFF_POOL on Rx ring %d\n", 3612 3629 ring->queue_index); 3613 3630 3614 3631 } else { 3615 - ring->rx_buf_len = vsi->rx_buf_len; 3616 - if (ring->vsi->type == I40E_VSI_MAIN) { 3617 - ret = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, 3618 - MEM_TYPE_PAGE_SHARED, 3619 - NULL); 3620 - if (ret) 3621 - return ret; 3622 - } 3632 + err = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, 3633 + MEM_TYPE_PAGE_SHARED, 3634 + NULL); 3635 + if (err) 3636 + return err; 3623 3637 } 3624 3638 3639 + skip: 3625 3640 xdp_init_buff(&ring->xdp, i40e_rx_pg_size(ring) / 2, &ring->xdp_rxq); 3626 3641 3627 3642 rx_ctx.dbuff = DIV_ROUND_UP(ring->rx_buf_len,
+23 -26
drivers/net/ethernet/intel/i40e/i40e_txrx.c
··· 1548 1548 int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring) 1549 1549 { 1550 1550 struct device *dev = rx_ring->dev; 1551 - int err; 1552 1551 1553 1552 u64_stats_init(&rx_ring->syncp); 1554 1553 ··· 1567 1568 rx_ring->next_to_clean = 0; 1568 1569 rx_ring->next_to_process = 0; 1569 1570 rx_ring->next_to_use = 0; 1570 - 1571 - /* XDP RX-queue info only needed for RX rings exposed to XDP */ 1572 - if (rx_ring->vsi->type == I40E_VSI_MAIN) { 1573 - err = xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev, 1574 - rx_ring->queue_index, rx_ring->q_vector->napi.napi_id); 1575 - if (err < 0) 1576 - return err; 1577 - } 1578 1571 1579 1572 rx_ring->xdp_prog = rx_ring->vsi->xdp_prog; 1580 1573 ··· 2078 2087 static void i40e_process_rx_buffs(struct i40e_ring *rx_ring, int xdp_res, 2079 2088 struct xdp_buff *xdp) 2080 2089 { 2081 - u32 next = rx_ring->next_to_clean; 2090 + u32 nr_frags = xdp_get_shared_info_from_buff(xdp)->nr_frags; 2091 + u32 next = rx_ring->next_to_clean, i = 0; 2082 2092 struct i40e_rx_buffer *rx_buffer; 2083 2093 2084 2094 xdp->flags = 0; ··· 2092 2100 if (!rx_buffer->page) 2093 2101 continue; 2094 2102 2095 - if (xdp_res == I40E_XDP_CONSUMED) 2096 - rx_buffer->pagecnt_bias++; 2097 - else 2103 + if (xdp_res != I40E_XDP_CONSUMED) 2098 2104 i40e_rx_buffer_flip(rx_buffer, xdp->frame_sz); 2105 + else if (i++ <= nr_frags) 2106 + rx_buffer->pagecnt_bias++; 2099 2107 2100 2108 /* EOP buffer will be put in i40e_clean_rx_irq() */ 2101 2109 if (next == rx_ring->next_to_process) ··· 2109 2117 * i40e_construct_skb - Allocate skb and populate it 2110 2118 * @rx_ring: rx descriptor ring to transact packets on 2111 2119 * @xdp: xdp_buff pointing to the data 2112 - * @nr_frags: number of buffers for the packet 2113 2120 * 2114 2121 * This function allocates an skb. It then populates it with the page 2115 2122 * data from the current receive descriptor, taking care to set up the 2116 2123 * skb correctly. 2117 2124 */ 2118 2125 static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring, 2119 - struct xdp_buff *xdp, 2120 - u32 nr_frags) 2126 + struct xdp_buff *xdp) 2121 2127 { 2122 2128 unsigned int size = xdp->data_end - xdp->data; 2123 2129 struct i40e_rx_buffer *rx_buffer; 2130 + struct skb_shared_info *sinfo; 2124 2131 unsigned int headlen; 2125 2132 struct sk_buff *skb; 2133 + u32 nr_frags = 0; 2126 2134 2127 2135 /* prefetch first cache line of first page */ 2128 2136 net_prefetch(xdp->data); ··· 2160 2168 memcpy(__skb_put(skb, headlen), xdp->data, 2161 2169 ALIGN(headlen, sizeof(long))); 2162 2170 2171 + if (unlikely(xdp_buff_has_frags(xdp))) { 2172 + sinfo = xdp_get_shared_info_from_buff(xdp); 2173 + nr_frags = sinfo->nr_frags; 2174 + } 2163 2175 rx_buffer = i40e_rx_bi(rx_ring, rx_ring->next_to_clean); 2164 2176 /* update all of the pointers */ 2165 2177 size -= headlen; ··· 2183 2187 } 2184 2188 2185 2189 if (unlikely(xdp_buff_has_frags(xdp))) { 2186 - struct skb_shared_info *sinfo, *skinfo = skb_shinfo(skb); 2190 + struct skb_shared_info *skinfo = skb_shinfo(skb); 2187 2191 2188 - sinfo = xdp_get_shared_info_from_buff(xdp); 2189 2192 memcpy(&skinfo->frags[skinfo->nr_frags], &sinfo->frags[0], 2190 2193 sizeof(skb_frag_t) * nr_frags); 2191 2194 ··· 2207 2212 * i40e_build_skb - Build skb around an existing buffer 2208 2213 * @rx_ring: Rx descriptor ring to transact packets on 2209 2214 * @xdp: xdp_buff pointing to the data 2210 - * @nr_frags: number of buffers for the packet 2211 2215 * 2212 2216 * This function builds an skb around an existing Rx buffer, taking care 2213 2217 * to set up the skb correctly and avoid any memcpy overhead. 2214 2218 */ 2215 2219 static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring, 2216 - struct xdp_buff *xdp, 2217 - u32 nr_frags) 2220 + struct xdp_buff *xdp) 2218 2221 { 2219 2222 unsigned int metasize = xdp->data - xdp->data_meta; 2223 + struct skb_shared_info *sinfo; 2220 2224 struct sk_buff *skb; 2225 + u32 nr_frags; 2221 2226 2222 2227 /* Prefetch first cache line of first page. If xdp->data_meta 2223 2228 * is unused, this points exactly as xdp->data, otherwise we ··· 2225 2230 * data, and then actual data. 2226 2231 */ 2227 2232 net_prefetch(xdp->data_meta); 2233 + 2234 + if (unlikely(xdp_buff_has_frags(xdp))) { 2235 + sinfo = xdp_get_shared_info_from_buff(xdp); 2236 + nr_frags = sinfo->nr_frags; 2237 + } 2228 2238 2229 2239 /* build an skb around the page buffer */ 2230 2240 skb = napi_build_skb(xdp->data_hard_start, xdp->frame_sz); ··· 2243 2243 skb_metadata_set(skb, metasize); 2244 2244 2245 2245 if (unlikely(xdp_buff_has_frags(xdp))) { 2246 - struct skb_shared_info *sinfo; 2247 - 2248 - sinfo = xdp_get_shared_info_from_buff(xdp); 2249 2246 xdp_update_skb_shared_info(skb, nr_frags, 2250 2247 sinfo->xdp_frags_size, 2251 2248 nr_frags * xdp->frame_sz, ··· 2586 2589 total_rx_bytes += size; 2587 2590 } else { 2588 2591 if (ring_uses_build_skb(rx_ring)) 2589 - skb = i40e_build_skb(rx_ring, xdp, nfrags); 2592 + skb = i40e_build_skb(rx_ring, xdp); 2590 2593 else 2591 - skb = i40e_construct_skb(rx_ring, xdp, nfrags); 2594 + skb = i40e_construct_skb(rx_ring, xdp); 2592 2595 2593 2596 /* drop if we failed to retrieve a buffer */ 2594 2597 if (!skb) {
+2 -2
drivers/net/ethernet/intel/i40e/i40e_xsk.c
··· 414 414 } 415 415 416 416 __skb_fill_page_desc_noacc(sinfo, sinfo->nr_frags++, 417 - virt_to_page(xdp->data_hard_start), 0, size); 417 + virt_to_page(xdp->data_hard_start), 418 + XDP_PACKET_HEADROOM, size); 418 419 sinfo->xdp_frags_size += size; 419 420 xsk_buff_add_frag(xdp); 420 421 ··· 499 498 xdp_res = i40e_run_xdp_zc(rx_ring, first, xdp_prog); 500 499 i40e_handle_xdp_result_zc(rx_ring, first, rx_desc, &rx_packets, 501 500 &rx_bytes, xdp_res, &failure); 502 - first->flags = 0; 503 501 next_to_clean = next_to_process; 504 502 if (failure) 505 503 break;
+23 -14
drivers/net/ethernet/intel/ice/ice_base.c
··· 547 547 ring->rx_buf_len = ring->vsi->rx_buf_len; 548 548 549 549 if (ring->vsi->type == ICE_VSI_PF) { 550 - if (!xdp_rxq_info_is_reg(&ring->xdp_rxq)) 551 - /* coverity[check_return] */ 552 - __xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev, 553 - ring->q_index, 554 - ring->q_vector->napi.napi_id, 555 - ring->vsi->rx_buf_len); 550 + if (!xdp_rxq_info_is_reg(&ring->xdp_rxq)) { 551 + err = __xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev, 552 + ring->q_index, 553 + ring->q_vector->napi.napi_id, 554 + ring->rx_buf_len); 555 + if (err) 556 + return err; 557 + } 556 558 557 559 ring->xsk_pool = ice_xsk_pool(ring); 558 560 if (ring->xsk_pool) { 559 - xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq); 561 + xdp_rxq_info_unreg(&ring->xdp_rxq); 560 562 561 563 ring->rx_buf_len = 562 564 xsk_pool_get_rx_frame_size(ring->xsk_pool); 565 + err = __xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev, 566 + ring->q_index, 567 + ring->q_vector->napi.napi_id, 568 + ring->rx_buf_len); 569 + if (err) 570 + return err; 563 571 err = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, 564 572 MEM_TYPE_XSK_BUFF_POOL, 565 573 NULL); ··· 579 571 dev_info(dev, "Registered XDP mem model MEM_TYPE_XSK_BUFF_POOL on Rx ring %d\n", 580 572 ring->q_index); 581 573 } else { 582 - if (!xdp_rxq_info_is_reg(&ring->xdp_rxq)) 583 - /* coverity[check_return] */ 584 - __xdp_rxq_info_reg(&ring->xdp_rxq, 585 - ring->netdev, 586 - ring->q_index, 587 - ring->q_vector->napi.napi_id, 588 - ring->vsi->rx_buf_len); 574 + if (!xdp_rxq_info_is_reg(&ring->xdp_rxq)) { 575 + err = __xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev, 576 + ring->q_index, 577 + ring->q_vector->napi.napi_id, 578 + ring->rx_buf_len); 579 + if (err) 580 + return err; 581 + } 589 582 590 583 err = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, 591 584 MEM_TYPE_PAGE_SHARED,
+9 -10
drivers/net/ethernet/intel/ice/ice_txrx.c
··· 513 513 if (ice_is_xdp_ena_vsi(rx_ring->vsi)) 514 514 WRITE_ONCE(rx_ring->xdp_prog, rx_ring->vsi->xdp_prog); 515 515 516 - if (rx_ring->vsi->type == ICE_VSI_PF && 517 - !xdp_rxq_info_is_reg(&rx_ring->xdp_rxq)) 518 - if (xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev, 519 - rx_ring->q_index, rx_ring->q_vector->napi.napi_id)) 520 - goto err; 521 516 return 0; 522 517 523 518 err: ··· 598 603 ret = ICE_XDP_CONSUMED; 599 604 } 600 605 exit: 601 - rx_buf->act = ret; 602 - if (unlikely(xdp_buff_has_frags(xdp))) 603 - ice_set_rx_bufs_act(xdp, rx_ring, ret); 606 + ice_set_rx_bufs_act(xdp, rx_ring, ret); 604 607 } 605 608 606 609 /** ··· 886 893 } 887 894 888 895 if (unlikely(sinfo->nr_frags == MAX_SKB_FRAGS)) { 889 - if (unlikely(xdp_buff_has_frags(xdp))) 890 - ice_set_rx_bufs_act(xdp, rx_ring, ICE_XDP_CONSUMED); 896 + ice_set_rx_bufs_act(xdp, rx_ring, ICE_XDP_CONSUMED); 891 897 return -ENOMEM; 892 898 } 893 899 894 900 __skb_fill_page_desc_noacc(sinfo, sinfo->nr_frags++, rx_buf->page, 895 901 rx_buf->page_offset, size); 896 902 sinfo->xdp_frags_size += size; 903 + /* remember frag count before XDP prog execution; bpf_xdp_adjust_tail() 904 + * can pop off frags but driver has to handle it on its own 905 + */ 906 + rx_ring->nr_frags = sinfo->nr_frags; 897 907 898 908 if (page_is_pfmemalloc(rx_buf->page)) 899 909 xdp_buff_set_frag_pfmemalloc(xdp); ··· 1247 1251 1248 1252 xdp->data = NULL; 1249 1253 rx_ring->first_desc = ntc; 1254 + rx_ring->nr_frags = 0; 1250 1255 continue; 1251 1256 construct_skb: 1252 1257 if (likely(ice_ring_uses_build_skb(rx_ring))) ··· 1263 1266 ICE_XDP_CONSUMED); 1264 1267 xdp->data = NULL; 1265 1268 rx_ring->first_desc = ntc; 1269 + rx_ring->nr_frags = 0; 1266 1270 break; 1267 1271 } 1268 1272 xdp->data = NULL; 1269 1273 rx_ring->first_desc = ntc; 1274 + rx_ring->nr_frags = 0; 1270 1275 1271 1276 stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_RXE_S); 1272 1277 if (unlikely(ice_test_staterr(rx_desc->wb.status_error0,
+1
drivers/net/ethernet/intel/ice/ice_txrx.h
··· 358 358 struct ice_tx_ring *xdp_ring; 359 359 struct ice_rx_ring *next; /* pointer to next ring in q_vector */ 360 360 struct xsk_buff_pool *xsk_pool; 361 + u32 nr_frags; 361 362 dma_addr_t dma; /* physical address of ring */ 362 363 u16 rx_buf_len; 363 364 u8 dcb_tc; /* Traffic class of ring */
+22 -9
drivers/net/ethernet/intel/ice/ice_txrx_lib.h
··· 12 12 * act: action to store onto Rx buffers related to XDP buffer parts 13 13 * 14 14 * Set action that should be taken before putting Rx buffer from first frag 15 - * to one before last. Last one is handled by caller of this function as it 16 - * is the EOP frag that is currently being processed. This function is 17 - * supposed to be called only when XDP buffer contains frags. 15 + * to the last. 18 16 */ 19 17 static inline void 20 18 ice_set_rx_bufs_act(struct xdp_buff *xdp, const struct ice_rx_ring *rx_ring, 21 19 const unsigned int act) 22 20 { 23 - const struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp); 24 - u32 first = rx_ring->first_desc; 25 - u32 nr_frags = sinfo->nr_frags; 21 + u32 sinfo_frags = xdp_get_shared_info_from_buff(xdp)->nr_frags; 22 + u32 nr_frags = rx_ring->nr_frags + 1; 23 + u32 idx = rx_ring->first_desc; 26 24 u32 cnt = rx_ring->count; 27 25 struct ice_rx_buf *buf; 28 26 29 27 for (int i = 0; i < nr_frags; i++) { 30 - buf = &rx_ring->rx_buf[first]; 28 + buf = &rx_ring->rx_buf[idx]; 31 29 buf->act = act; 32 30 33 - if (++first == cnt) 34 - first = 0; 31 + if (++idx == cnt) 32 + idx = 0; 33 + } 34 + 35 + /* adjust pagecnt_bias on frags freed by XDP prog */ 36 + if (sinfo_frags < rx_ring->nr_frags && act == ICE_XDP_CONSUMED) { 37 + u32 delta = rx_ring->nr_frags - sinfo_frags; 38 + 39 + while (delta) { 40 + if (idx == 0) 41 + idx = cnt - 1; 42 + else 43 + idx--; 44 + buf = &rx_ring->rx_buf[idx]; 45 + buf->pagecnt_bias--; 46 + delta--; 47 + } 35 48 } 36 49 } 37 50
+2 -2
drivers/net/ethernet/intel/ice/ice_xsk.c
··· 825 825 } 826 826 827 827 __skb_fill_page_desc_noacc(sinfo, sinfo->nr_frags++, 828 - virt_to_page(xdp->data_hard_start), 0, size); 828 + virt_to_page(xdp->data_hard_start), 829 + XDP_PACKET_HEADROOM, size); 829 830 sinfo->xdp_frags_size += size; 830 831 xsk_buff_add_frag(xdp); 831 832 ··· 896 895 897 896 if (!first) { 898 897 first = xdp; 899 - xdp_buff_clear_frags_flag(first); 900 898 } else if (ice_add_xsk_frag(rx_ring, first, xdp, size)) { 901 899 break; 902 900 }
+2
drivers/net/ethernet/intel/idpf/idpf_lib.c
··· 783 783 /* setup watchdog timeout value to be 5 second */ 784 784 netdev->watchdog_timeo = 5 * HZ; 785 785 786 + netdev->dev_port = idx; 787 + 786 788 /* configure default MTU size */ 787 789 netdev->min_mtu = ETH_MIN_MTU; 788 790 netdev->max_mtu = vport->max_mtu;
+1
drivers/net/ethernet/litex/litex_liteeth.c
··· 318 318 module_platform_driver(liteeth_driver); 319 319 320 320 MODULE_AUTHOR("Joel Stanley <joel@jms.id.au>"); 321 + MODULE_DESCRIPTION("LiteX Liteeth Ethernet driver"); 321 322 MODULE_LICENSE("GPL");
+26 -1
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
··· 614 614 mvpp2_write(priv, MVPP22_BM_POOL_BASE_ADDR_HIGH_REG, val); 615 615 } 616 616 617 + /* Cleanup pool before actual initialization in the OS */ 618 + static void mvpp2_bm_pool_cleanup(struct mvpp2 *priv, int pool_id) 619 + { 620 + unsigned int thread = mvpp2_cpu_to_thread(priv, get_cpu()); 621 + u32 val; 622 + int i; 623 + 624 + /* Drain the BM from all possible residues left by firmware */ 625 + for (i = 0; i < MVPP2_BM_POOL_SIZE_MAX; i++) 626 + mvpp2_thread_read(priv, thread, MVPP2_BM_PHY_ALLOC_REG(pool_id)); 627 + 628 + put_cpu(); 629 + 630 + /* Stop the BM pool */ 631 + val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(pool_id)); 632 + val |= MVPP2_BM_STOP_MASK; 633 + mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(pool_id), val); 634 + } 635 + 617 636 static int mvpp2_bm_init(struct device *dev, struct mvpp2 *priv) 618 637 { 619 638 enum dma_data_direction dma_dir = DMA_FROM_DEVICE; 620 639 int i, err, poolnum = MVPP2_BM_POOLS_NUM; 621 640 struct mvpp2_port *port; 641 + 642 + if (priv->percpu_pools) 643 + poolnum = mvpp2_get_nrxqs(priv) * 2; 644 + 645 + /* Clean up the pool state in case it contains stale state */ 646 + for (i = 0; i < poolnum; i++) 647 + mvpp2_bm_pool_cleanup(priv, i); 622 648 623 649 if (priv->percpu_pools) { 624 650 for (i = 0; i < priv->port_count; i++) { ··· 655 629 } 656 630 } 657 631 658 - poolnum = mvpp2_get_nrxqs(priv) * 2; 659 632 for (i = 0; i < poolnum; i++) { 660 633 /* the pool in use */ 661 634 int pn = i / (poolnum / 2);
+1
drivers/net/ethernet/marvell/octeontx2/af/mbox.c
··· 413 413 EXPORT_SYMBOL(otx2_mbox_id2name); 414 414 415 415 MODULE_AUTHOR("Marvell."); 416 + MODULE_DESCRIPTION("Marvell RVU NIC Mbox helpers"); 416 417 MODULE_LICENSE("GPL v2");
+3 -2
drivers/net/ethernet/mellanox/mlx5/core/cmd.c
··· 1923 1923 { 1924 1924 const char *namep = mlx5_command_str(opcode); 1925 1925 struct mlx5_cmd_stats *stats; 1926 + unsigned long flags; 1926 1927 1927 1928 if (!err || !(strcmp(namep, "unknown command opcode"))) 1928 1929 return; ··· 1931 1930 stats = xa_load(&dev->cmd.stats, opcode); 1932 1931 if (!stats) 1933 1932 return; 1934 - spin_lock_irq(&stats->lock); 1933 + spin_lock_irqsave(&stats->lock, flags); 1935 1934 stats->failed++; 1936 1935 if (err < 0) 1937 1936 stats->last_failed_errno = -err; ··· 1940 1939 stats->last_failed_mbox_status = status; 1941 1940 stats->last_failed_syndrome = syndrome; 1942 1941 } 1943 - spin_unlock_irq(&stats->lock); 1942 + spin_unlock_irqrestore(&stats->lock, flags); 1944 1943 } 1945 1944 1946 1945 /* preserve -EREMOTEIO for outbox.status != OK, otherwise return err as is */
+1 -1
drivers/net/ethernet/mellanox/mlx5/core/en.h
··· 1124 1124 extern const struct ethtool_ops mlx5e_ethtool_ops; 1125 1125 1126 1126 int mlx5e_create_mkey(struct mlx5_core_dev *mdev, u32 pdn, u32 *mkey); 1127 - int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev); 1127 + int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev, bool create_tises); 1128 1128 void mlx5e_destroy_mdev_resources(struct mlx5_core_dev *mdev); 1129 1129 int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb, 1130 1130 bool enable_mc_lb);
+1
drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.c
··· 436 436 in = kvzalloc(inlen, GFP_KERNEL); 437 437 if (!in || !ft->g) { 438 438 kfree(ft->g); 439 + ft->g = NULL; 439 440 kvfree(in); 440 441 return -ENOMEM; 441 442 }
+2 -2
drivers/net/ethernet/mellanox/mlx5/core/en/params.c
··· 1064 1064 void *wq = MLX5_ADDR_OF(sqc, sqc, wq); 1065 1065 bool allow_swp; 1066 1066 1067 - allow_swp = 1068 - mlx5_geneve_tx_allowed(mdev) || !!mlx5_ipsec_device_caps(mdev); 1067 + allow_swp = mlx5_geneve_tx_allowed(mdev) || 1068 + (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_CRYPTO); 1069 1069 mlx5e_build_sq_param_common(mdev, param); 1070 1070 MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size); 1071 1071 MLX5_SET(sqc, sqc, allow_swp, allow_swp);
+1 -1
drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
··· 213 213 mlx5e_ptpsq_mark_ts_cqes_undelivered(ptpsq, hwtstamp); 214 214 out: 215 215 napi_consume_skb(skb, budget); 216 - md_buff[*md_buff_sz++] = metadata_id; 216 + md_buff[(*md_buff_sz)++] = metadata_id; 217 217 if (unlikely(mlx5e_ptp_metadata_map_unhealthy(&ptpsq->metadata_map)) && 218 218 !test_and_set_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state)) 219 219 queue_work(ptpsq->txqsq.priv->wq, &ptpsq->report_unhealthy_work);
+8 -2
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
··· 336 336 /* iv len */ 337 337 aes_gcm->icv_len = x->aead->alg_icv_len; 338 338 339 + attrs->dir = x->xso.dir; 340 + 339 341 /* esn */ 340 342 if (x->props.flags & XFRM_STATE_ESN) { 341 343 attrs->replay_esn.trigger = true; 342 344 attrs->replay_esn.esn = sa_entry->esn_state.esn; 343 345 attrs->replay_esn.esn_msb = sa_entry->esn_state.esn_msb; 344 346 attrs->replay_esn.overlap = sa_entry->esn_state.overlap; 347 + if (attrs->dir == XFRM_DEV_OFFLOAD_OUT) 348 + goto skip_replay_window; 349 + 345 350 switch (x->replay_esn->replay_window) { 346 351 case 32: 347 352 attrs->replay_esn.replay_window = ··· 370 365 } 371 366 } 372 367 373 - attrs->dir = x->xso.dir; 368 + skip_replay_window: 374 369 /* spi */ 375 370 attrs->spi = be32_to_cpu(x->id.spi); 376 371 ··· 506 501 return -EINVAL; 507 502 } 508 503 509 - if (x->replay_esn && x->replay_esn->replay_window != 32 && 504 + if (x->replay_esn && x->xso.dir == XFRM_DEV_OFFLOAD_IN && 505 + x->replay_esn->replay_window != 32 && 510 506 x->replay_esn->replay_window != 64 && 511 507 x->replay_esn->replay_window != 128 && 512 508 x->replay_esn->replay_window != 256) {
+15 -11
drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
··· 254 254 255 255 ft->g = kcalloc(MLX5E_ARFS_NUM_GROUPS, 256 256 sizeof(*ft->g), GFP_KERNEL); 257 - in = kvzalloc(inlen, GFP_KERNEL); 258 - if (!in || !ft->g) { 259 - kfree(ft->g); 260 - kvfree(in); 257 + if (!ft->g) 261 258 return -ENOMEM; 259 + 260 + in = kvzalloc(inlen, GFP_KERNEL); 261 + if (!in) { 262 + err = -ENOMEM; 263 + goto err_free_g; 262 264 } 263 265 264 266 mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria); ··· 280 278 break; 281 279 default: 282 280 err = -EINVAL; 283 - goto out; 281 + goto err_free_in; 284 282 } 285 283 286 284 switch (type) { ··· 302 300 break; 303 301 default: 304 302 err = -EINVAL; 305 - goto out; 303 + goto err_free_in; 306 304 } 307 305 308 306 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); ··· 311 309 MLX5_SET_CFG(in, end_flow_index, ix - 1); 312 310 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); 313 311 if (IS_ERR(ft->g[ft->num_groups])) 314 - goto err; 312 + goto err_clean_group; 315 313 ft->num_groups++; 316 314 317 315 memset(in, 0, inlen); ··· 320 318 MLX5_SET_CFG(in, end_flow_index, ix - 1); 321 319 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); 322 320 if (IS_ERR(ft->g[ft->num_groups])) 323 - goto err; 321 + goto err_clean_group; 324 322 ft->num_groups++; 325 323 326 324 kvfree(in); 327 325 return 0; 328 326 329 - err: 327 + err_clean_group: 330 328 err = PTR_ERR(ft->g[ft->num_groups]); 331 329 ft->g[ft->num_groups] = NULL; 332 - out: 330 + err_free_in: 333 331 kvfree(in); 334 - 332 + err_free_g: 333 + kfree(ft->g); 334 + ft->g = NULL; 335 335 return err; 336 336 } 337 337
+13 -8
drivers/net/ethernet/mellanox/mlx5/core/en_common.c
··· 95 95 { 96 96 int tc, i; 97 97 98 - for (i = 0; i < MLX5_MAX_PORTS; i++) 98 + for (i = 0; i < mlx5e_get_num_lag_ports(mdev); i++) 99 99 for (tc = 0; tc < MLX5_MAX_NUM_TC; tc++) 100 100 mlx5e_destroy_tis(mdev, tisn[i][tc]); 101 101 } ··· 110 110 int tc, i; 111 111 int err; 112 112 113 - for (i = 0; i < MLX5_MAX_PORTS; i++) { 113 + for (i = 0; i < mlx5e_get_num_lag_ports(mdev); i++) { 114 114 for (tc = 0; tc < MLX5_MAX_NUM_TC; tc++) { 115 115 u32 in[MLX5_ST_SZ_DW(create_tis_in)] = {}; 116 116 void *tisc; ··· 140 140 return err; 141 141 } 142 142 143 - int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev) 143 + int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev, bool create_tises) 144 144 { 145 145 struct mlx5e_hw_objs *res = &mdev->mlx5e_res.hw_objs; 146 146 int err; ··· 169 169 goto err_destroy_mkey; 170 170 } 171 171 172 - err = mlx5e_create_tises(mdev, res->tisn); 173 - if (err) { 174 - mlx5_core_err(mdev, "alloc tises failed, %d\n", err); 175 - goto err_destroy_bfreg; 172 + if (create_tises) { 173 + err = mlx5e_create_tises(mdev, res->tisn); 174 + if (err) { 175 + mlx5_core_err(mdev, "alloc tises failed, %d\n", err); 176 + goto err_destroy_bfreg; 177 + } 178 + res->tisn_valid = true; 176 179 } 180 + 177 181 INIT_LIST_HEAD(&res->td.tirs_list); 178 182 mutex_init(&res->td.list_lock); 179 183 ··· 207 203 208 204 mlx5_crypto_dek_cleanup(mdev->mlx5e_res.dek_priv); 209 205 mdev->mlx5e_res.dek_priv = NULL; 210 - mlx5e_destroy_tises(mdev, res->tisn); 206 + if (res->tisn_valid) 207 + mlx5e_destroy_tises(mdev, res->tisn); 211 208 mlx5_free_bfreg(mdev, &res->bfreg); 212 209 mlx5_core_destroy_mkey(mdev, res->mkey); 213 210 mlx5_core_dealloc_transport_domain(mdev, res->td.tdn);
+1 -1
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
··· 5992 5992 if (netif_device_present(netdev)) 5993 5993 return 0; 5994 5994 5995 - err = mlx5e_create_mdev_resources(mdev); 5995 + err = mlx5e_create_mdev_resources(mdev, true); 5996 5996 if (err) 5997 5997 return err; 5998 5998
+3 -2
drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
··· 761 761 762 762 err = mlx5e_rss_params_indir_init(&indir, mdev, 763 763 mlx5e_rqt_size(mdev, hp->num_channels), 764 - mlx5e_rqt_size(mdev, priv->max_nch)); 764 + mlx5e_rqt_size(mdev, hp->num_channels)); 765 765 if (err) 766 766 return err; 767 767 ··· 2014 2014 list_for_each_entry_safe(peer_flow, tmp, &flow->peer_flows, peer_flows) { 2015 2015 if (peer_index != mlx5_get_dev_index(peer_flow->priv->mdev)) 2016 2016 continue; 2017 + 2018 + list_del(&peer_flow->peer_flows); 2017 2019 if (refcount_dec_and_test(&peer_flow->refcnt)) { 2018 2020 mlx5e_tc_del_fdb_flow(peer_flow->priv, peer_flow); 2019 - list_del(&peer_flow->peer_flows); 2020 2021 kfree(peer_flow); 2021 2022 } 2022 2023 }
+3
drivers/net/ethernet/mellanox/mlx5/core/esw/bridge_mcast.c
··· 83 83 i++; 84 84 } 85 85 86 + rule_spec->flow_context.flags |= FLOW_CONTEXT_UPLINK_HAIRPIN_EN; 86 87 rule_spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; 87 88 dmac_v = MLX5_ADDR_OF(fte_match_param, rule_spec->match_value, outer_headers.dmac_47_16); 88 89 ether_addr_copy(dmac_v, entry->key.addr); ··· 588 587 if (!rule_spec) 589 588 return ERR_PTR(-ENOMEM); 590 589 590 + rule_spec->flow_context.flags |= FLOW_CONTEXT_UPLINK_HAIRPIN_EN; 591 591 rule_spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; 592 592 593 593 flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT; ··· 664 662 dest.vport.flags = MLX5_FLOW_DEST_VPORT_VHCA_ID; 665 663 dest.vport.vhca_id = port->esw_owner_vhca_id; 666 664 } 665 + rule_spec->flow_context.flags |= FLOW_CONTEXT_UPLINK_HAIRPIN_EN; 667 666 handle = mlx5_add_flow_rules(port->mcast.ft, rule_spec, &flow_act, &dest, 1); 668 667 669 668 kvfree(rule_spec);
+2
drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
··· 566 566 fte->flow_context.flow_tag); 567 567 MLX5_SET(flow_context, in_flow_context, flow_source, 568 568 fte->flow_context.flow_source); 569 + MLX5_SET(flow_context, in_flow_context, uplink_hairpin_en, 570 + !!(fte->flow_context.flags & FLOW_CONTEXT_UPLINK_HAIRPIN_EN)); 569 571 570 572 MLX5_SET(flow_context, in_flow_context, extended_destination, 571 573 extended_dest);
+1 -1
drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
··· 783 783 } 784 784 785 785 /* This should only be called once per mdev */ 786 - err = mlx5e_create_mdev_resources(mdev); 786 + err = mlx5e_create_mdev_resources(mdev, false); 787 787 if (err) 788 788 goto destroy_ht; 789 789 }
+1 -1
drivers/net/ethernet/mellanox/mlx5/core/lib/aso.c
··· 98 98 mlx5_fill_page_frag_array(&cq->wq_ctrl.buf, 99 99 (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas)); 100 100 101 - MLX5_SET(cqc, cqc, cq_period_mode, DIM_CQ_PERIOD_MODE_START_FROM_EQE); 101 + MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_EQE); 102 102 MLX5_SET(cqc, cqc, c_eqn_or_apu_element, eqn); 103 103 MLX5_SET(cqc, cqc, uar_page, mdev->priv.uar->index); 104 104 MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.buf.page_shift -
+12 -5
drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
··· 788 788 switch (action_type) { 789 789 case DR_ACTION_TYP_DROP: 790 790 attr.final_icm_addr = nic_dmn->drop_icm_addr; 791 + attr.hit_gvmi = nic_dmn->drop_icm_addr >> 48; 791 792 break; 792 793 case DR_ACTION_TYP_FT: 793 794 dest_action = action; ··· 874 873 action->sampler->tx_icm_addr; 875 874 break; 876 875 case DR_ACTION_TYP_VPORT: 877 - attr.hit_gvmi = action->vport->caps->vhca_gvmi; 878 - dest_action = action; 879 - attr.final_icm_addr = rx_rule ? 880 - action->vport->caps->icm_address_rx : 881 - action->vport->caps->icm_address_tx; 876 + if (unlikely(rx_rule && action->vport->caps->num == MLX5_VPORT_UPLINK)) { 877 + /* can't go to uplink on RX rule - dropping instead */ 878 + attr.final_icm_addr = nic_dmn->drop_icm_addr; 879 + attr.hit_gvmi = nic_dmn->drop_icm_addr >> 48; 880 + } else { 881 + attr.hit_gvmi = action->vport->caps->vhca_gvmi; 882 + dest_action = action; 883 + attr.final_icm_addr = rx_rule ? 884 + action->vport->caps->icm_address_rx : 885 + action->vport->caps->icm_address_tx; 886 + } 882 887 break; 883 888 case DR_ACTION_TYP_POP_VLAN: 884 889 if (!rx_rule && !(dmn->ste_ctx->actions_caps &
+21
drivers/net/ethernet/mellanox/mlx5/core/vport.c
··· 440 440 } 441 441 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_system_image_guid); 442 442 443 + int mlx5_query_nic_vport_sd_group(struct mlx5_core_dev *mdev, u8 *sd_group) 444 + { 445 + int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out); 446 + u32 *out; 447 + int err; 448 + 449 + out = kvzalloc(outlen, GFP_KERNEL); 450 + if (!out) 451 + return -ENOMEM; 452 + 453 + err = mlx5_query_nic_vport_context(mdev, 0, out); 454 + if (err) 455 + goto out; 456 + 457 + *sd_group = MLX5_GET(query_nic_vport_context_out, out, 458 + nic_vport_context.sd_group); 459 + out: 460 + kvfree(out); 461 + return err; 462 + } 463 + 443 464 int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid) 444 465 { 445 466 u32 *out;
+3
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
··· 7542 7542 dev_err(priv->device, "unable to bring out of ahb reset: %pe\n", 7543 7543 ERR_PTR(ret)); 7544 7544 7545 + /* Wait a bit for the reset to take effect */ 7546 + udelay(10); 7547 + 7545 7548 /* Init MAC and get the capabilities */ 7546 7549 ret = stmmac_hw_init(priv); 7547 7550 if (ret)
+30 -7
drivers/net/fjes/fjes_hw.c
··· 221 221 222 222 mem_size = FJES_DEV_REQ_BUF_SIZE(hw->max_epid); 223 223 hw->hw_info.req_buf = kzalloc(mem_size, GFP_KERNEL); 224 - if (!(hw->hw_info.req_buf)) 225 - return -ENOMEM; 224 + if (!(hw->hw_info.req_buf)) { 225 + result = -ENOMEM; 226 + goto free_ep_info; 227 + } 226 228 227 229 hw->hw_info.req_buf_size = mem_size; 228 230 229 231 mem_size = FJES_DEV_RES_BUF_SIZE(hw->max_epid); 230 232 hw->hw_info.res_buf = kzalloc(mem_size, GFP_KERNEL); 231 - if (!(hw->hw_info.res_buf)) 232 - return -ENOMEM; 233 + if (!(hw->hw_info.res_buf)) { 234 + result = -ENOMEM; 235 + goto free_req_buf; 236 + } 233 237 234 238 hw->hw_info.res_buf_size = mem_size; 235 239 236 240 result = fjes_hw_alloc_shared_status_region(hw); 237 241 if (result) 238 - return result; 242 + goto free_res_buf; 239 243 240 244 hw->hw_info.buffer_share_bit = 0; 241 245 hw->hw_info.buffer_unshare_reserve_bit = 0; ··· 250 246 251 247 result = fjes_hw_alloc_epbuf(&buf_pair->tx); 252 248 if (result) 253 - return result; 249 + goto free_epbuf; 254 250 255 251 result = fjes_hw_alloc_epbuf(&buf_pair->rx); 256 252 if (result) 257 - return result; 253 + goto free_epbuf; 258 254 259 255 spin_lock_irqsave(&hw->rx_status_lock, flags); 260 256 fjes_hw_setup_epbuf(&buf_pair->tx, mac, ··· 277 273 fjes_hw_init_command_registers(hw, &param); 278 274 279 275 return 0; 276 + 277 + free_epbuf: 278 + for (epidx = 0; epidx < hw->max_epid ; epidx++) { 279 + if (epidx == hw->my_epid) 280 + continue; 281 + fjes_hw_free_epbuf(&hw->ep_shm_info[epidx].tx); 282 + fjes_hw_free_epbuf(&hw->ep_shm_info[epidx].rx); 283 + } 284 + fjes_hw_free_shared_status_region(hw); 285 + free_res_buf: 286 + kfree(hw->hw_info.res_buf); 287 + hw->hw_info.res_buf = NULL; 288 + free_req_buf: 289 + kfree(hw->hw_info.req_buf); 290 + hw->hw_info.req_buf = NULL; 291 + free_ep_info: 292 + kfree(hw->ep_shm_info); 293 + hw->ep_shm_info = NULL; 294 + return result; 280 295 } 281 296 282 297 static void fjes_hw_cleanup(struct fjes_hw *hw)
+2 -2
drivers/net/hyperv/netvsc_drv.c
··· 44 44 45 45 static unsigned int ring_size __ro_after_init = 128; 46 46 module_param(ring_size, uint, 0444); 47 - MODULE_PARM_DESC(ring_size, "Ring buffer size (# of pages)"); 47 + MODULE_PARM_DESC(ring_size, "Ring buffer size (# of 4K pages)"); 48 48 unsigned int netvsc_ring_bytes __ro_after_init; 49 49 50 50 static const u32 default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE | ··· 2807 2807 pr_info("Increased ring_size to %u (min allowed)\n", 2808 2808 ring_size); 2809 2809 } 2810 - netvsc_ring_bytes = ring_size * PAGE_SIZE; 2810 + netvsc_ring_bytes = VMBUS_RING_SIZE(ring_size * 4096); 2811 2811 2812 2812 register_netdevice_notifier(&netvsc_netdev_notifier); 2813 2813
+20 -5
drivers/net/macsec.c
··· 607 607 return ERR_PTR(-EINVAL); 608 608 } 609 609 610 - ret = skb_ensure_writable_head_tail(skb, dev); 611 - if (unlikely(ret < 0)) { 612 - macsec_txsa_put(tx_sa); 613 - kfree_skb(skb); 614 - return ERR_PTR(ret); 610 + if (unlikely(skb_headroom(skb) < MACSEC_NEEDED_HEADROOM || 611 + skb_tailroom(skb) < MACSEC_NEEDED_TAILROOM)) { 612 + struct sk_buff *nskb = skb_copy_expand(skb, 613 + MACSEC_NEEDED_HEADROOM, 614 + MACSEC_NEEDED_TAILROOM, 615 + GFP_ATOMIC); 616 + if (likely(nskb)) { 617 + consume_skb(skb); 618 + skb = nskb; 619 + } else { 620 + macsec_txsa_put(tx_sa); 621 + kfree_skb(skb); 622 + return ERR_PTR(-ENOMEM); 623 + } 624 + } else { 625 + skb = skb_unshare(skb, GFP_ATOMIC); 626 + if (!skb) { 627 + macsec_txsa_put(tx_sa); 628 + return ERR_PTR(-ENOMEM); 629 + } 615 630 } 616 631 617 632 unprotected_len = skb->len;
+11
drivers/net/phy/micrel.c
··· 120 120 */ 121 121 #define LAN8814_1PPM_FORMAT 17179 122 122 123 + #define PTP_RX_VERSION 0x0248 124 + #define PTP_TX_VERSION 0x0288 125 + #define PTP_MAX_VERSION(x) (((x) & GENMASK(7, 0)) << 8) 126 + #define PTP_MIN_VERSION(x) ((x) & GENMASK(7, 0)) 127 + 123 128 #define PTP_RX_MOD 0x024F 124 129 #define PTP_RX_MOD_BAD_UDPV4_CHKSUM_FORCE_FCS_DIS_ BIT(3) 125 130 #define PTP_RX_TIMESTAMP_EN 0x024D ··· 3154 3149 lanphy_write_page_reg(phydev, 5, PTP_RX_PARSE_L2_ADDR_EN, 0); 3155 3150 lanphy_write_page_reg(phydev, 5, PTP_TX_PARSE_IP_ADDR_EN, 0); 3156 3151 lanphy_write_page_reg(phydev, 5, PTP_RX_PARSE_IP_ADDR_EN, 0); 3152 + 3153 + /* Disable checking for minorVersionPTP field */ 3154 + lanphy_write_page_reg(phydev, 5, PTP_RX_VERSION, 3155 + PTP_MAX_VERSION(0xff) | PTP_MIN_VERSION(0x0)); 3156 + lanphy_write_page_reg(phydev, 5, PTP_TX_VERSION, 3157 + PTP_MAX_VERSION(0xff) | PTP_MIN_VERSION(0x0)); 3157 3158 3158 3159 skb_queue_head_init(&ptp_priv->tx_queue); 3159 3160 skb_queue_head_init(&ptp_priv->rx_queue);
+8 -2
drivers/net/tun.c
··· 1630 1630 switch (act) { 1631 1631 case XDP_REDIRECT: 1632 1632 err = xdp_do_redirect(tun->dev, xdp, xdp_prog); 1633 - if (err) 1633 + if (err) { 1634 + dev_core_stats_rx_dropped_inc(tun->dev); 1634 1635 return err; 1636 + } 1637 + dev_sw_netstats_rx_add(tun->dev, xdp->data_end - xdp->data); 1635 1638 break; 1636 1639 case XDP_TX: 1637 1640 err = tun_xdp_tx(tun->dev, xdp); 1638 - if (err < 0) 1641 + if (err < 0) { 1642 + dev_core_stats_rx_dropped_inc(tun->dev); 1639 1643 return err; 1644 + } 1645 + dev_sw_netstats_rx_add(tun->dev, xdp->data_end - xdp->data); 1640 1646 break; 1641 1647 case XDP_PASS: 1642 1648 break;
-4
drivers/net/wireless/ath/ath11k/core.h
··· 368 368 struct ieee80211_chanctx_conf chanctx; 369 369 struct ath11k_arp_ns_offload arp_ns_offload; 370 370 struct ath11k_rekey_data rekey_data; 371 - 372 - #ifdef CONFIG_ATH11K_DEBUGFS 373 - struct dentry *debugfs_twt; 374 - #endif /* CONFIG_ATH11K_DEBUGFS */ 375 371 }; 376 372 377 373 struct ath11k_vif_iter {
+10 -15
drivers/net/wireless/ath/ath11k/debugfs.c
··· 1894 1894 .open = simple_open 1895 1895 }; 1896 1896 1897 - void ath11k_debugfs_add_interface(struct ath11k_vif *arvif) 1897 + void ath11k_debugfs_op_vif_add(struct ieee80211_hw *hw, 1898 + struct ieee80211_vif *vif) 1898 1899 { 1900 + struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif); 1899 1901 struct ath11k_base *ab = arvif->ar->ab; 1902 + struct dentry *debugfs_twt; 1900 1903 1901 1904 if (arvif->vif->type != NL80211_IFTYPE_AP && 1902 1905 !(arvif->vif->type == NL80211_IFTYPE_STATION && 1903 1906 test_bit(WMI_TLV_SERVICE_STA_TWT, ab->wmi_ab.svc_map))) 1904 1907 return; 1905 1908 1906 - arvif->debugfs_twt = debugfs_create_dir("twt", 1907 - arvif->vif->debugfs_dir); 1908 - debugfs_create_file("add_dialog", 0200, arvif->debugfs_twt, 1909 + debugfs_twt = debugfs_create_dir("twt", 1910 + arvif->vif->debugfs_dir); 1911 + debugfs_create_file("add_dialog", 0200, debugfs_twt, 1909 1912 arvif, &ath11k_fops_twt_add_dialog); 1910 1913 1911 - debugfs_create_file("del_dialog", 0200, arvif->debugfs_twt, 1914 + debugfs_create_file("del_dialog", 0200, debugfs_twt, 1912 1915 arvif, &ath11k_fops_twt_del_dialog); 1913 1916 1914 - debugfs_create_file("pause_dialog", 0200, arvif->debugfs_twt, 1917 + debugfs_create_file("pause_dialog", 0200, debugfs_twt, 1915 1918 arvif, &ath11k_fops_twt_pause_dialog); 1916 1919 1917 - debugfs_create_file("resume_dialog", 0200, arvif->debugfs_twt, 1920 + debugfs_create_file("resume_dialog", 0200, debugfs_twt, 1918 1921 arvif, &ath11k_fops_twt_resume_dialog); 1919 1922 } 1920 1923 1921 - void ath11k_debugfs_remove_interface(struct ath11k_vif *arvif) 1922 - { 1923 - if (!arvif->debugfs_twt) 1924 - return; 1925 - 1926 - debugfs_remove_recursive(arvif->debugfs_twt); 1927 - arvif->debugfs_twt = NULL; 1928 - }
+2 -10
drivers/net/wireless/ath/ath11k/debugfs.h
··· 307 307 return ar->debug.rx_filter; 308 308 } 309 309 310 - void ath11k_debugfs_add_interface(struct ath11k_vif *arvif); 311 - void ath11k_debugfs_remove_interface(struct ath11k_vif *arvif); 310 + void ath11k_debugfs_op_vif_add(struct ieee80211_hw *hw, 311 + struct ieee80211_vif *vif); 312 312 void ath11k_debugfs_add_dbring_entry(struct ath11k *ar, 313 313 enum wmi_direct_buffer_module id, 314 314 enum ath11k_dbg_dbr_event event, ··· 385 385 u32 pdev_id, u32 vdev_id, u32 stats_id) 386 386 { 387 387 return 0; 388 - } 389 - 390 - static inline void ath11k_debugfs_add_interface(struct ath11k_vif *arvif) 391 - { 392 - } 393 - 394 - static inline void ath11k_debugfs_remove_interface(struct ath11k_vif *arvif) 395 - { 396 388 } 397 389 398 390 static inline void
+1 -11
drivers/net/wireless/ath/ath11k/mac.c
··· 6756 6756 goto err; 6757 6757 } 6758 6758 6759 - /* In the case of hardware recovery, debugfs files are 6760 - * not deleted since ieee80211_ops.remove_interface() is 6761 - * not invoked. In such cases, try to delete the files. 6762 - * These will be re-created later. 6763 - */ 6764 - ath11k_debugfs_remove_interface(arvif); 6765 - 6766 6759 memset(arvif, 0, sizeof(*arvif)); 6767 6760 6768 6761 arvif->ar = ar; ··· 6932 6939 6933 6940 ath11k_dp_vdev_tx_attach(ar, arvif); 6934 6941 6935 - ath11k_debugfs_add_interface(arvif); 6936 - 6937 6942 if (vif->type != NL80211_IFTYPE_MONITOR && 6938 6943 test_bit(ATH11K_FLAG_MONITOR_CONF_ENABLED, &ar->monitor_flags)) { 6939 6944 ret = ath11k_mac_monitor_vdev_create(ar); ··· 7046 7055 7047 7056 /* Recalc txpower for remaining vdev */ 7048 7057 ath11k_mac_txpower_recalc(ar); 7049 - 7050 - ath11k_debugfs_remove_interface(arvif); 7051 7058 7052 7059 /* TODO: recal traffic pause state based on the available vdevs */ 7053 7060 ··· 9142 9153 #endif 9143 9154 9144 9155 #ifdef CONFIG_ATH11K_DEBUGFS 9156 + .vif_add_debugfs = ath11k_debugfs_op_vif_add, 9145 9157 .sta_add_debugfs = ath11k_debugfs_sta_op_add, 9146 9158 #endif 9147 9159
+2 -2
drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2 2 /* 3 - * Copyright (C) 2018-2023 Intel Corporation 3 + * Copyright (C) 2018-2024 Intel Corporation 4 4 */ 5 5 #include <linux/firmware.h> 6 6 #include "iwl-drv.h" ··· 1096 1096 node_trig = (void *)node_tlv->data; 1097 1097 } 1098 1098 1099 - memcpy(node_trig->data + offset, trig->data, trig_data_len); 1099 + memcpy((u8 *)node_trig->data + offset, trig->data, trig_data_len); 1100 1100 node_tlv->length = cpu_to_le32(size); 1101 1101 1102 1102 if (policy & IWL_FW_INI_APPLY_POLICY_OVERRIDE_CFG) {
+1 -1
drivers/net/wireless/intersil/p54/fwio.c
··· 125 125 "FW rev %s - Softmac protocol %x.%x\n", 126 126 fw_version, priv->fw_var >> 8, priv->fw_var & 0xff); 127 127 snprintf(dev->wiphy->fw_version, sizeof(dev->wiphy->fw_version), 128 - "%s - %x.%x", fw_version, 128 + "%.19s - %x.%x", fw_version, 129 129 priv->fw_var >> 8, priv->fw_var & 0xff); 130 130 } 131 131
+38 -6
drivers/net/xen-netback/netback.c
··· 463 463 } 464 464 465 465 for (shinfo->nr_frags = 0; nr_slots > 0 && shinfo->nr_frags < MAX_SKB_FRAGS; 466 - shinfo->nr_frags++, gop++, nr_slots--) { 466 + nr_slots--) { 467 + if (unlikely(!txp->size)) { 468 + unsigned long flags; 469 + 470 + spin_lock_irqsave(&queue->response_lock, flags); 471 + make_tx_response(queue, txp, 0, XEN_NETIF_RSP_OKAY); 472 + push_tx_responses(queue); 473 + spin_unlock_irqrestore(&queue->response_lock, flags); 474 + ++txp; 475 + continue; 476 + } 477 + 467 478 index = pending_index(queue->pending_cons++); 468 479 pending_idx = queue->pending_ring[index]; 469 480 xenvif_tx_create_map_op(queue, pending_idx, txp, 470 481 txp == first ? extra_count : 0, gop); 471 482 frag_set_pending_idx(&frags[shinfo->nr_frags], pending_idx); 483 + ++shinfo->nr_frags; 484 + ++gop; 472 485 473 486 if (txp == first) 474 487 txp = txfrags; ··· 494 481 shinfo = skb_shinfo(nskb); 495 482 frags = shinfo->frags; 496 483 497 - for (shinfo->nr_frags = 0; shinfo->nr_frags < nr_slots; 498 - shinfo->nr_frags++, txp++, gop++) { 484 + for (shinfo->nr_frags = 0; shinfo->nr_frags < nr_slots; ++txp) { 485 + if (unlikely(!txp->size)) { 486 + unsigned long flags; 487 + 488 + spin_lock_irqsave(&queue->response_lock, flags); 489 + make_tx_response(queue, txp, 0, 490 + XEN_NETIF_RSP_OKAY); 491 + push_tx_responses(queue); 492 + spin_unlock_irqrestore(&queue->response_lock, 493 + flags); 494 + continue; 495 + } 496 + 499 497 index = pending_index(queue->pending_cons++); 500 498 pending_idx = queue->pending_ring[index]; 501 499 xenvif_tx_create_map_op(queue, pending_idx, txp, 0, 502 500 gop); 503 501 frag_set_pending_idx(&frags[shinfo->nr_frags], 504 502 pending_idx); 503 + ++shinfo->nr_frags; 504 + ++gop; 505 505 } 506 506 507 - skb_shinfo(skb)->frag_list = nskb; 508 - } else if (nskb) { 507 + if (shinfo->nr_frags) { 508 + skb_shinfo(skb)->frag_list = nskb; 509 + nskb = NULL; 510 + } 511 + } 512 + 513 + if (nskb) { 509 514 /* A frag_list skb was allocated but it is no longer needed 510 - * because enough slots were converted to copy ops above. 515 + * because enough slots were converted to copy ops above or some 516 + * were empty. 511 517 */ 512 518 kfree_skb(nskb); 513 519 }
+1 -2
drivers/video/fbdev/core/fbcon.c
··· 631 631 632 632 if (logo_lines > vc->vc_bottom) { 633 633 logo_shown = FBCON_LOGO_CANSHOW; 634 - printk(KERN_INFO 635 - "fbcon_init: disable boot-logo (boot-logo bigger than screen).\n"); 634 + pr_info("fbcon: disable boot-logo (boot-logo bigger than screen).\n"); 636 635 } else { 637 636 logo_shown = FBCON_LOGO_DRAW; 638 637 vc->vc_top = logo_lines;
+3
drivers/video/fbdev/savage/savagefb_driver.c
··· 869 869 870 870 DBG("savagefb_check_var"); 871 871 872 + if (!var->pixclock) 873 + return -EINVAL; 874 + 872 875 var->transp.offset = 0; 873 876 var->transp.length = 0; 874 877 switch (var->bits_per_pixel) {
+2
drivers/video/fbdev/sis/sis_main.c
··· 1444 1444 1445 1445 vtotal = var->upper_margin + var->lower_margin + var->vsync_len; 1446 1446 1447 + if (!var->pixclock) 1448 + return -EINVAL; 1447 1449 pixclock = var->pixclock; 1448 1450 1449 1451 if((var->vmode & FB_VMODE_MASK) == FB_VMODE_NONINTERLACED) {
+1 -1
drivers/video/fbdev/stifb.c
··· 1158 1158 } 1159 1159 break; 1160 1160 } 1161 - stifb_blank(0, (struct fb_info *)fb); /* 0=enable screen */ 1161 + stifb_blank(0, fb->info); /* 0=enable screen */ 1162 1162 1163 1163 SETUP_FB(fb); 1164 1164 }
-1
drivers/video/fbdev/vt8500lcdfb.c
··· 374 374 375 375 irq = platform_get_irq(pdev, 0); 376 376 if (irq < 0) { 377 - dev_err(&pdev->dev, "no IRQ defined\n"); 378 377 ret = -ENODEV; 379 378 goto failed_free_palette; 380 379 }
+22 -8
fs/afs/dir.c
··· 124 124 if (xas_retry(&xas, folio)) 125 125 continue; 126 126 BUG_ON(xa_is_value(folio)); 127 - ASSERTCMP(folio_file_mapping(folio), ==, mapping); 127 + ASSERTCMP(folio->mapping, ==, mapping); 128 128 129 129 folio_put(folio); 130 130 } ··· 202 202 if (xas_retry(&xas, folio)) 203 203 continue; 204 204 205 - BUG_ON(folio_file_mapping(folio) != mapping); 205 + BUG_ON(folio->mapping != mapping); 206 206 207 207 size = min_t(loff_t, folio_size(folio), req->actual_len - folio_pos(folio)); 208 208 for (offset = 0; offset < size; offset += sizeof(*block)) { 209 209 block = kmap_local_folio(folio, offset); 210 - pr_warn("[%02lx] %32phN\n", folio_index(folio) + offset, block); 210 + pr_warn("[%02lx] %32phN\n", folio->index + offset, block); 211 211 kunmap_local(block); 212 212 } 213 213 } ··· 233 233 if (xas_retry(&xas, folio)) 234 234 continue; 235 235 236 - BUG_ON(folio_file_mapping(folio) != mapping); 236 + BUG_ON(folio->mapping != mapping); 237 237 238 238 if (!afs_dir_check_folio(dvnode, folio, req->actual_len)) { 239 239 afs_dir_dump(dvnode, req); ··· 474 474 continue; 475 475 } 476 476 477 + /* Don't expose silly rename entries to userspace. */ 478 + if (nlen > 6 && 479 + dire->u.name[0] == '.' && 480 + ctx->actor != afs_lookup_filldir && 481 + ctx->actor != afs_lookup_one_filldir && 482 + memcmp(dire->u.name, ".__afs", 6) == 0) 483 + continue; 484 + 477 485 /* found the next entry */ 478 486 if (!dir_emit(ctx, dire->u.name, nlen, 479 487 ntohl(dire->u.vnode), ··· 716 708 break; 717 709 } 718 710 711 + if (vp->scb.status.abort_code) 712 + trace_afs_bulkstat_error(op, &vp->fid, i, vp->scb.status.abort_code); 719 713 if (!vp->scb.have_status && !vp->scb.have_error) 720 714 continue; 721 715 ··· 907 897 afs_begin_vnode_operation(op); 908 898 afs_wait_for_operation(op); 909 899 } 910 - inode = ERR_PTR(afs_op_error(op)); 911 900 912 901 out_op: 913 902 if (!afs_op_error(op)) { 914 - inode = &op->file[1].vnode->netfs.inode; 915 - op->file[1].vnode = NULL; 903 + if (op->file[1].scb.status.abort_code) { 904 + afs_op_accumulate_error(op, -ECONNABORTED, 905 + op->file[1].scb.status.abort_code); 906 + } else { 907 + inode = &op->file[1].vnode->netfs.inode; 908 + op->file[1].vnode = NULL; 909 + } 916 910 } 917 911 918 912 if (op->file[0].scb.have_status) ··· 2036 2022 { 2037 2023 struct afs_vnode *dvnode = AFS_FS_I(folio_inode(folio)); 2038 2024 2039 - _enter("{{%llx:%llu}[%lu]}", dvnode->fid.vid, dvnode->fid.vnode, folio_index(folio)); 2025 + _enter("{{%llx:%llu}[%lu]}", dvnode->fid.vid, dvnode->fid.vnode, folio->index); 2040 2026 2041 2027 folio_detach_private(folio); 2042 2028
-9
fs/afs/dynroot.c
··· 258 258 .lookup = afs_dynroot_lookup, 259 259 }; 260 260 261 - /* 262 - * Dirs in the dynamic root don't need revalidation. 263 - */ 264 - static int afs_dynroot_d_revalidate(struct dentry *dentry, unsigned int flags) 265 - { 266 - return 1; 267 - } 268 - 269 261 const struct dentry_operations afs_dynroot_dentry_operations = { 270 - .d_revalidate = afs_dynroot_d_revalidate, 271 262 .d_delete = always_delete_dentry, 272 263 .d_release = afs_d_release, 273 264 .d_automount = afs_d_automount,
+3 -2
fs/afs/proc.c
··· 166 166 167 167 if (!preflist) { 168 168 seq_puts(m, "NO PREFS\n"); 169 - return 0; 169 + goto out; 170 170 } 171 171 172 172 seq_printf(m, "PROT SUBNET PRIOR (v=%u n=%u/%u/%u)\n", ··· 191 191 } 192 192 } 193 193 194 - rcu_read_lock(); 194 + out: 195 + rcu_read_unlock(); 195 196 return 0; 196 197 } 197 198
+16 -7
fs/btrfs/compression.c
··· 141 141 } 142 142 143 143 static int compression_decompress(int type, struct list_head *ws, 144 - const u8 *data_in, struct page *dest_page, 145 - unsigned long start_byte, size_t srclen, size_t destlen) 144 + const u8 *data_in, struct page *dest_page, 145 + unsigned long dest_pgoff, size_t srclen, size_t destlen) 146 146 { 147 147 switch (type) { 148 148 case BTRFS_COMPRESS_ZLIB: return zlib_decompress(ws, data_in, dest_page, 149 - start_byte, srclen, destlen); 149 + dest_pgoff, srclen, destlen); 150 150 case BTRFS_COMPRESS_LZO: return lzo_decompress(ws, data_in, dest_page, 151 - start_byte, srclen, destlen); 151 + dest_pgoff, srclen, destlen); 152 152 case BTRFS_COMPRESS_ZSTD: return zstd_decompress(ws, data_in, dest_page, 153 - start_byte, srclen, destlen); 153 + dest_pgoff, srclen, destlen); 154 154 case BTRFS_COMPRESS_NONE: 155 155 default: 156 156 /* ··· 1037 1037 * start_byte tells us the offset into the compressed data we're interested in 1038 1038 */ 1039 1039 int btrfs_decompress(int type, const u8 *data_in, struct page *dest_page, 1040 - unsigned long start_byte, size_t srclen, size_t destlen) 1040 + unsigned long dest_pgoff, size_t srclen, size_t destlen) 1041 1041 { 1042 + struct btrfs_fs_info *fs_info = btrfs_sb(dest_page->mapping->host->i_sb); 1042 1043 struct list_head *workspace; 1044 + const u32 sectorsize = fs_info->sectorsize; 1043 1045 int ret; 1046 + 1047 + /* 1048 + * The full destination page range should not exceed the page size. 1049 + * And the @destlen should not exceed sectorsize, as this is only called for 1050 + * inline file extents, which should not exceed sectorsize. 1051 + */ 1052 + ASSERT(dest_pgoff + destlen <= PAGE_SIZE && destlen <= sectorsize); 1044 1053 1045 1054 workspace = get_workspace(type, 0); 1046 1055 ret = compression_decompress(type, workspace, data_in, dest_page, 1047 - start_byte, srclen, destlen); 1056 + dest_pgoff, srclen, destlen); 1048 1057 put_workspace(type, workspace); 1049 1058 1050 1059 return ret;
+2 -2
fs/btrfs/compression.h
··· 148 148 unsigned long *total_in, unsigned long *total_out); 149 149 int zlib_decompress_bio(struct list_head *ws, struct compressed_bio *cb); 150 150 int zlib_decompress(struct list_head *ws, const u8 *data_in, 151 - struct page *dest_page, unsigned long start_byte, size_t srclen, 151 + struct page *dest_page, unsigned long dest_pgoff, size_t srclen, 152 152 size_t destlen); 153 153 struct list_head *zlib_alloc_workspace(unsigned int level); 154 154 void zlib_free_workspace(struct list_head *ws); ··· 159 159 unsigned long *total_in, unsigned long *total_out); 160 160 int lzo_decompress_bio(struct list_head *ws, struct compressed_bio *cb); 161 161 int lzo_decompress(struct list_head *ws, const u8 *data_in, 162 - struct page *dest_page, unsigned long start_byte, size_t srclen, 162 + struct page *dest_page, unsigned long dest_pgoff, size_t srclen, 163 163 size_t destlen); 164 164 struct list_head *lzo_alloc_workspace(unsigned int level); 165 165 void lzo_free_workspace(struct list_head *ws);
+39 -14
fs/btrfs/extent-tree.c
··· 1260 1260 u64 bytes_left, end; 1261 1261 u64 aligned_start = ALIGN(start, 1 << SECTOR_SHIFT); 1262 1262 1263 - if (WARN_ON(start != aligned_start)) { 1263 + /* Adjust the range to be aligned to 512B sectors if necessary. */ 1264 + if (start != aligned_start) { 1264 1265 len -= aligned_start - start; 1265 1266 len = round_down(len, 1 << SECTOR_SHIFT); 1266 1267 start = aligned_start; ··· 4299 4298 return 0; 4300 4299 } 4301 4300 4301 + static int prepare_allocation_zoned(struct btrfs_fs_info *fs_info, 4302 + struct find_free_extent_ctl *ffe_ctl) 4303 + { 4304 + if (ffe_ctl->for_treelog) { 4305 + spin_lock(&fs_info->treelog_bg_lock); 4306 + if (fs_info->treelog_bg) 4307 + ffe_ctl->hint_byte = fs_info->treelog_bg; 4308 + spin_unlock(&fs_info->treelog_bg_lock); 4309 + } else if (ffe_ctl->for_data_reloc) { 4310 + spin_lock(&fs_info->relocation_bg_lock); 4311 + if (fs_info->data_reloc_bg) 4312 + ffe_ctl->hint_byte = fs_info->data_reloc_bg; 4313 + spin_unlock(&fs_info->relocation_bg_lock); 4314 + } else if (ffe_ctl->flags & BTRFS_BLOCK_GROUP_DATA) { 4315 + struct btrfs_block_group *block_group; 4316 + 4317 + spin_lock(&fs_info->zone_active_bgs_lock); 4318 + list_for_each_entry(block_group, &fs_info->zone_active_bgs, active_bg_list) { 4319 + /* 4320 + * No lock is OK here because avail is monotinically 4321 + * decreasing, and this is just a hint. 4322 + */ 4323 + u64 avail = block_group->zone_capacity - block_group->alloc_offset; 4324 + 4325 + if (block_group_bits(block_group, ffe_ctl->flags) && 4326 + avail >= ffe_ctl->num_bytes) { 4327 + ffe_ctl->hint_byte = block_group->start; 4328 + break; 4329 + } 4330 + } 4331 + spin_unlock(&fs_info->zone_active_bgs_lock); 4332 + } 4333 + 4334 + return 0; 4335 + } 4336 + 4302 4337 static int prepare_allocation(struct btrfs_fs_info *fs_info, 4303 4338 struct find_free_extent_ctl *ffe_ctl, 4304 4339 struct btrfs_space_info *space_info, ··· 4345 4308 return prepare_allocation_clustered(fs_info, ffe_ctl, 4346 4309 space_info, ins); 4347 4310 case BTRFS_EXTENT_ALLOC_ZONED: 4348 - if (ffe_ctl->for_treelog) { 4349 - spin_lock(&fs_info->treelog_bg_lock); 4350 - if (fs_info->treelog_bg) 4351 - ffe_ctl->hint_byte = fs_info->treelog_bg; 4352 - spin_unlock(&fs_info->treelog_bg_lock); 4353 - } 4354 - if (ffe_ctl->for_data_reloc) { 4355 - spin_lock(&fs_info->relocation_bg_lock); 4356 - if (fs_info->data_reloc_bg) 4357 - ffe_ctl->hint_byte = fs_info->data_reloc_bg; 4358 - spin_unlock(&fs_info->relocation_bg_lock); 4359 - } 4360 - return 0; 4311 + return prepare_allocation_zoned(fs_info, ffe_ctl); 4361 4312 default: 4362 4313 BUG(); 4363 4314 }
+13 -9
fs/btrfs/inode.c
··· 4458 4458 u64 root_flags; 4459 4459 int ret; 4460 4460 4461 + down_write(&fs_info->subvol_sem); 4462 + 4461 4463 /* 4462 4464 * Don't allow to delete a subvolume with send in progress. This is 4463 4465 * inside the inode lock so the error handling that has to drop the bit ··· 4471 4469 btrfs_warn(fs_info, 4472 4470 "attempt to delete subvolume %llu during send", 4473 4471 dest->root_key.objectid); 4474 - return -EPERM; 4472 + ret = -EPERM; 4473 + goto out_up_write; 4475 4474 } 4476 4475 if (atomic_read(&dest->nr_swapfiles)) { 4477 4476 spin_unlock(&dest->root_item_lock); 4478 4477 btrfs_warn(fs_info, 4479 4478 "attempt to delete subvolume %llu with active swapfile", 4480 4479 root->root_key.objectid); 4481 - return -EPERM; 4480 + ret = -EPERM; 4481 + goto out_up_write; 4482 4482 } 4483 4483 root_flags = btrfs_root_flags(&dest->root_item); 4484 4484 btrfs_set_root_flags(&dest->root_item, 4485 4485 root_flags | BTRFS_ROOT_SUBVOL_DEAD); 4486 4486 spin_unlock(&dest->root_item_lock); 4487 4487 4488 - down_write(&fs_info->subvol_sem); 4489 - 4490 4488 ret = may_destroy_subvol(dest); 4491 4489 if (ret) 4492 - goto out_up_write; 4490 + goto out_undead; 4493 4491 4494 4492 btrfs_init_block_rsv(&block_rsv, BTRFS_BLOCK_RSV_TEMP); 4495 4493 /* ··· 4499 4497 */ 4500 4498 ret = btrfs_subvolume_reserve_metadata(root, &block_rsv, 5, true); 4501 4499 if (ret) 4502 - goto out_up_write; 4500 + goto out_undead; 4503 4501 4504 4502 trans = btrfs_start_transaction(root, 0); 4505 4503 if (IS_ERR(trans)) { ··· 4565 4563 inode->i_flags |= S_DEAD; 4566 4564 out_release: 4567 4565 btrfs_subvolume_release_metadata(root, &block_rsv); 4568 - out_up_write: 4569 - up_write(&fs_info->subvol_sem); 4566 + out_undead: 4570 4567 if (ret) { 4571 4568 spin_lock(&dest->root_item_lock); 4572 4569 root_flags = btrfs_root_flags(&dest->root_item); 4573 4570 btrfs_set_root_flags(&dest->root_item, 4574 4571 root_flags & ~BTRFS_ROOT_SUBVOL_DEAD); 4575 4572 spin_unlock(&dest->root_item_lock); 4576 - } else { 4573 + } 4574 + out_up_write: 4575 + up_write(&fs_info->subvol_sem); 4576 + if (!ret) { 4577 4577 d_invalidate(dentry); 4578 4578 btrfs_prune_dentries(dest); 4579 4579 ASSERT(dest->send_in_progress == 0);
+7
fs/btrfs/ioctl.c
··· 790 790 return -EOPNOTSUPP; 791 791 } 792 792 793 + if (btrfs_root_refs(&root->root_item) == 0) 794 + return -ENOENT; 795 + 793 796 if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state)) 794 797 return -EINVAL; 795 798 ··· 2609 2606 if (argp) { 2610 2607 if (copy_from_user(&range, argp, sizeof(range))) { 2611 2608 ret = -EFAULT; 2609 + goto out; 2610 + } 2611 + if (range.flags & ~BTRFS_DEFRAG_RANGE_FLAGS_SUPP) { 2612 + ret = -EOPNOTSUPP; 2612 2613 goto out; 2613 2614 } 2614 2615 /* compression requires us to start the IO */
+9 -25
fs/btrfs/lzo.c
··· 425 425 } 426 426 427 427 int lzo_decompress(struct list_head *ws, const u8 *data_in, 428 - struct page *dest_page, unsigned long start_byte, size_t srclen, 428 + struct page *dest_page, unsigned long dest_pgoff, size_t srclen, 429 429 size_t destlen) 430 430 { 431 431 struct workspace *workspace = list_entry(ws, struct workspace, list); 432 + struct btrfs_fs_info *fs_info = btrfs_sb(dest_page->mapping->host->i_sb); 433 + const u32 sectorsize = fs_info->sectorsize; 432 434 size_t in_len; 433 435 size_t out_len; 434 436 size_t max_segment_len = WORKSPACE_BUF_LENGTH; 435 437 int ret = 0; 436 - char *kaddr; 437 - unsigned long bytes; 438 438 439 439 if (srclen < LZO_LEN || srclen > max_segment_len + LZO_LEN * 2) 440 440 return -EUCLEAN; ··· 451 451 } 452 452 data_in += LZO_LEN; 453 453 454 - out_len = PAGE_SIZE; 454 + out_len = sectorsize; 455 455 ret = lzo1x_decompress_safe(data_in, in_len, workspace->buf, &out_len); 456 456 if (ret != LZO_E_OK) { 457 457 pr_warn("BTRFS: decompress failed!\n"); ··· 459 459 goto out; 460 460 } 461 461 462 - if (out_len < start_byte) { 462 + ASSERT(out_len <= sectorsize); 463 + memcpy_to_page(dest_page, dest_pgoff, workspace->buf, out_len); 464 + /* Early end, considered as an error. */ 465 + if (unlikely(out_len < destlen)) { 463 466 ret = -EIO; 464 - goto out; 467 + memzero_page(dest_page, dest_pgoff + out_len, destlen - out_len); 465 468 } 466 - 467 - /* 468 - * the caller is already checking against PAGE_SIZE, but lets 469 - * move this check closer to the memcpy/memset 470 - */ 471 - destlen = min_t(unsigned long, destlen, PAGE_SIZE); 472 - bytes = min_t(unsigned long, destlen, out_len - start_byte); 473 - 474 - kaddr = kmap_local_page(dest_page); 475 - memcpy(kaddr, workspace->buf + start_byte, bytes); 476 - 477 - /* 478 - * btrfs_getblock is doing a zero on the tail of the page too, 479 - * but this will cover anything missing from the decompressed 480 - * data. 481 - */ 482 - if (bytes < destlen) 483 - memset(kaddr+bytes, 0, destlen-bytes); 484 - kunmap_local(kaddr); 485 469 out: 486 470 return ret; 487 471 }
+4 -2
fs/btrfs/ref-verify.c
··· 889 889 out_unlock: 890 890 spin_unlock(&fs_info->ref_verify_lock); 891 891 out: 892 - if (ret) 892 + if (ret) { 893 + btrfs_free_ref_cache(fs_info); 893 894 btrfs_clear_opt(fs_info->mount_opt, REF_VERIFY); 895 + } 894 896 return ret; 895 897 } 896 898 ··· 1023 1021 } 1024 1022 } 1025 1023 if (ret) { 1026 - btrfs_clear_opt(fs_info->mount_opt, REF_VERIFY); 1027 1024 btrfs_free_ref_cache(fs_info); 1025 + btrfs_clear_opt(fs_info->mount_opt, REF_VERIFY); 1028 1026 } 1029 1027 btrfs_free_path(path); 1030 1028 return ret;
+29 -7
fs/btrfs/scrub.c
··· 1098 1098 static void scrub_read_endio(struct btrfs_bio *bbio) 1099 1099 { 1100 1100 struct scrub_stripe *stripe = bbio->private; 1101 + struct bio_vec *bvec; 1102 + int sector_nr = calc_sector_number(stripe, bio_first_bvec_all(&bbio->bio)); 1103 + int num_sectors; 1104 + u32 bio_size = 0; 1105 + int i; 1106 + 1107 + ASSERT(sector_nr < stripe->nr_sectors); 1108 + bio_for_each_bvec_all(bvec, &bbio->bio, i) 1109 + bio_size += bvec->bv_len; 1110 + num_sectors = bio_size >> stripe->bg->fs_info->sectorsize_bits; 1101 1111 1102 1112 if (bbio->bio.bi_status) { 1103 - bitmap_set(&stripe->io_error_bitmap, 0, stripe->nr_sectors); 1104 - bitmap_set(&stripe->error_bitmap, 0, stripe->nr_sectors); 1113 + bitmap_set(&stripe->io_error_bitmap, sector_nr, num_sectors); 1114 + bitmap_set(&stripe->error_bitmap, sector_nr, num_sectors); 1105 1115 } else { 1106 - bitmap_clear(&stripe->io_error_bitmap, 0, stripe->nr_sectors); 1116 + bitmap_clear(&stripe->io_error_bitmap, sector_nr, num_sectors); 1107 1117 } 1108 1118 bio_put(&bbio->bio); 1109 1119 if (atomic_dec_and_test(&stripe->pending_io)) { ··· 1646 1636 { 1647 1637 struct btrfs_fs_info *fs_info = stripe->bg->fs_info; 1648 1638 struct btrfs_bio *bbio = NULL; 1639 + unsigned int nr_sectors = min(BTRFS_STRIPE_LEN, stripe->bg->start + 1640 + stripe->bg->length - stripe->logical) >> 1641 + fs_info->sectorsize_bits; 1649 1642 u64 stripe_len = BTRFS_STRIPE_LEN; 1650 1643 int mirror = stripe->mirror_num; 1651 1644 int i; ··· 1658 1645 for_each_set_bit(i, &stripe->extent_sector_bitmap, stripe->nr_sectors) { 1659 1646 struct page *page = scrub_stripe_get_page(stripe, i); 1660 1647 unsigned int pgoff = scrub_stripe_get_page_offset(stripe, i); 1648 + 1649 + /* We're beyond the chunk boundary, no need to read anymore. */ 1650 + if (i >= nr_sectors) 1651 + break; 1661 1652 1662 1653 /* The current sector cannot be merged, submit the bio. */ 1663 1654 if (bbio && ··· 1718 1701 { 1719 1702 struct btrfs_fs_info *fs_info = sctx->fs_info; 1720 1703 struct btrfs_bio *bbio; 1704 + unsigned int nr_sectors = min(BTRFS_STRIPE_LEN, stripe->bg->start + 1705 + stripe->bg->length - stripe->logical) >> 1706 + fs_info->sectorsize_bits; 1721 1707 int mirror = stripe->mirror_num; 1722 1708 1723 1709 ASSERT(stripe->bg); ··· 1735 1715 bbio = btrfs_bio_alloc(SCRUB_STRIPE_PAGES, REQ_OP_READ, fs_info, 1736 1716 scrub_read_endio, stripe); 1737 1717 1738 - /* Read the whole stripe. */ 1739 1718 bbio->bio.bi_iter.bi_sector = stripe->logical >> SECTOR_SHIFT; 1740 - for (int i = 0; i < BTRFS_STRIPE_LEN >> PAGE_SHIFT; i++) { 1719 + /* Read the whole range inside the chunk boundary. */ 1720 + for (unsigned int cur = 0; cur < nr_sectors; cur++) { 1721 + struct page *page = scrub_stripe_get_page(stripe, cur); 1722 + unsigned int pgoff = scrub_stripe_get_page_offset(stripe, cur); 1741 1723 int ret; 1742 1724 1743 - ret = bio_add_page(&bbio->bio, stripe->pages[i], PAGE_SIZE, 0); 1725 + ret = bio_add_page(&bbio->bio, page, fs_info->sectorsize, pgoff); 1744 1726 /* We should have allocated enough bio vectors. */ 1745 - ASSERT(ret == PAGE_SIZE); 1727 + ASSERT(ret == fs_info->sectorsize); 1746 1728 } 1747 1729 atomic_inc(&stripe->pending_io); 1748 1730
+2 -2
fs/btrfs/send.c
··· 8205 8205 goto out; 8206 8206 } 8207 8207 8208 - sctx->clone_roots = kvcalloc(sizeof(*sctx->clone_roots), 8209 - arg->clone_sources_count + 1, 8208 + sctx->clone_roots = kvcalloc(arg->clone_sources_count + 1, 8209 + sizeof(*sctx->clone_roots), 8210 8210 GFP_KERNEL); 8211 8211 if (!sctx->clone_roots) { 8212 8212 ret = -ENOMEM;
+2 -1
fs/btrfs/subpage.c
··· 475 475 476 476 spin_lock_irqsave(&subpage->lock, flags); 477 477 bitmap_set(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits); 478 - folio_start_writeback(folio); 478 + if (!folio_test_writeback(folio)) 479 + folio_start_writeback(folio); 479 480 spin_unlock_irqrestore(&subpage->lock, flags); 480 481 } 481 482
+8
fs/btrfs/super.c
··· 1457 1457 1458 1458 btrfs_info_to_ctx(fs_info, &old_ctx); 1459 1459 1460 + /* 1461 + * This is our "bind mount" trick, we don't want to allow the user to do 1462 + * anything other than mount a different ro/rw and a different subvol, 1463 + * all of the mount options should be maintained. 1464 + */ 1465 + if (mount_reconfigure) 1466 + ctx->mount_opt = old_ctx.mount_opt; 1467 + 1460 1468 sync_filesystem(sb); 1461 1469 set_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state); 1462 1470
+1 -1
fs/btrfs/tree-checker.c
··· 1436 1436 if (unlikely(ptr + btrfs_extent_inline_ref_size(inline_type) > end)) { 1437 1437 extent_err(leaf, slot, 1438 1438 "inline ref item overflows extent item, ptr %lu iref size %u end %lu", 1439 - ptr, inline_type, end); 1439 + ptr, btrfs_extent_inline_ref_size(inline_type), end); 1440 1440 return -EUCLEAN; 1441 1441 } 1442 1442
-2
fs/btrfs/volumes.c
··· 3087 3087 map = btrfs_find_chunk_map(fs_info, logical, length); 3088 3088 3089 3089 if (unlikely(!map)) { 3090 - read_unlock(&fs_info->mapping_tree_lock); 3091 3090 btrfs_crit(fs_info, 3092 3091 "unable to find chunk map for logical %llu length %llu", 3093 3092 logical, length); ··· 3094 3095 } 3095 3096 3096 3097 if (unlikely(map->start > logical || map->start + map->chunk_len <= logical)) { 3097 - read_unlock(&fs_info->mapping_tree_lock); 3098 3098 btrfs_crit(fs_info, 3099 3099 "found a bad chunk map, wanted %llu-%llu, found %llu-%llu", 3100 3100 logical, logical + length, map->start,
+19 -54
fs/btrfs/zlib.c
··· 354 354 } 355 355 356 356 int zlib_decompress(struct list_head *ws, const u8 *data_in, 357 - struct page *dest_page, unsigned long start_byte, size_t srclen, 357 + struct page *dest_page, unsigned long dest_pgoff, size_t srclen, 358 358 size_t destlen) 359 359 { 360 360 struct workspace *workspace = list_entry(ws, struct workspace, list); 361 361 int ret = 0; 362 362 int wbits = MAX_WBITS; 363 - unsigned long bytes_left; 364 - unsigned long total_out = 0; 365 - unsigned long pg_offset = 0; 366 - 367 - destlen = min_t(unsigned long, destlen, PAGE_SIZE); 368 - bytes_left = destlen; 363 + unsigned long to_copy; 369 364 370 365 workspace->strm.next_in = data_in; 371 366 workspace->strm.avail_in = srclen; ··· 385 390 return -EIO; 386 391 } 387 392 388 - while (bytes_left > 0) { 389 - unsigned long buf_start; 390 - unsigned long buf_offset; 391 - unsigned long bytes; 393 + /* 394 + * Everything (in/out buf) should be at most one sector, there should 395 + * be no need to switch any input/output buffer. 396 + */ 397 + ret = zlib_inflate(&workspace->strm, Z_FINISH); 398 + to_copy = min(workspace->strm.total_out, destlen); 399 + if (ret != Z_STREAM_END) 400 + goto out; 392 401 393 - ret = zlib_inflate(&workspace->strm, Z_NO_FLUSH); 394 - if (ret != Z_OK && ret != Z_STREAM_END) 395 - break; 402 + memcpy_to_page(dest_page, dest_pgoff, workspace->buf, to_copy); 396 403 397 - buf_start = total_out; 398 - total_out = workspace->strm.total_out; 399 - 400 - if (total_out == buf_start) { 401 - ret = -EIO; 402 - break; 403 - } 404 - 405 - if (total_out <= start_byte) 406 - goto next; 407 - 408 - if (total_out > start_byte && buf_start < start_byte) 409 - buf_offset = start_byte - buf_start; 410 - else 411 - buf_offset = 0; 412 - 413 - bytes = min(PAGE_SIZE - pg_offset, 414 - PAGE_SIZE - (buf_offset % PAGE_SIZE)); 415 - bytes = min(bytes, bytes_left); 416 - 417 - memcpy_to_page(dest_page, pg_offset, 418 - workspace->buf + buf_offset, bytes); 419 - 420 - pg_offset += bytes; 421 - bytes_left -= bytes; 422 - next: 423 - workspace->strm.next_out = workspace->buf; 424 - workspace->strm.avail_out = workspace->buf_size; 425 - } 426 - 427 - if (ret != Z_STREAM_END && bytes_left != 0) 404 + out: 405 + if (unlikely(to_copy != destlen)) { 406 + pr_warn_ratelimited("BTRFS: infalte failed, decompressed=%lu expected=%zu\n", 407 + to_copy, destlen); 428 408 ret = -EIO; 429 - else 409 + } else { 430 410 ret = 0; 411 + } 431 412 432 413 zlib_inflateEnd(&workspace->strm); 433 414 434 - /* 435 - * this should only happen if zlib returned fewer bytes than we 436 - * expected. btrfs_get_block is responsible for zeroing from the 437 - * end of the inline extent (destlen) to the end of the page 438 - */ 439 - if (pg_offset < destlen) { 440 - memzero_page(dest_page, pg_offset, destlen - pg_offset); 441 - } 415 + if (unlikely(to_copy < destlen)) 416 + memzero_page(dest_page, dest_pgoff + to_copy, destlen - to_copy); 442 417 return ret; 443 418 } 444 419
+2 -6
fs/btrfs/zoned.c
··· 2055 2055 2056 2056 map = block_group->physical_map; 2057 2057 2058 + spin_lock(&fs_info->zone_active_bgs_lock); 2058 2059 spin_lock(&block_group->lock); 2059 2060 if (test_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &block_group->runtime_flags)) { 2060 2061 ret = true; ··· 2068 2067 goto out_unlock; 2069 2068 } 2070 2069 2071 - spin_lock(&fs_info->zone_active_bgs_lock); 2072 2070 for (i = 0; i < map->num_stripes; i++) { 2073 2071 struct btrfs_zoned_device_info *zinfo; 2074 2072 int reserved = 0; ··· 2087 2087 */ 2088 2088 if (atomic_read(&zinfo->active_zones_left) <= reserved) { 2089 2089 ret = false; 2090 - spin_unlock(&fs_info->zone_active_bgs_lock); 2091 2090 goto out_unlock; 2092 2091 } 2093 2092 2094 2093 if (!btrfs_dev_set_active_zone(device, physical)) { 2095 2094 /* Cannot activate the zone */ 2096 2095 ret = false; 2097 - spin_unlock(&fs_info->zone_active_bgs_lock); 2098 2096 goto out_unlock; 2099 2097 } 2100 2098 if (!is_data) 2101 2099 zinfo->reserved_active_zones--; 2102 2100 } 2103 - spin_unlock(&fs_info->zone_active_bgs_lock); 2104 2101 2105 2102 /* Successfully activated all the zones */ 2106 2103 set_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &block_group->runtime_flags); ··· 2105 2108 2106 2109 /* For the active block group list */ 2107 2110 btrfs_get_block_group(block_group); 2108 - 2109 - spin_lock(&fs_info->zone_active_bgs_lock); 2110 2111 list_add_tail(&block_group->active_bg_list, &fs_info->zone_active_bgs); 2111 2112 spin_unlock(&fs_info->zone_active_bgs_lock); 2112 2113 ··· 2112 2117 2113 2118 out_unlock: 2114 2119 spin_unlock(&block_group->lock); 2120 + spin_unlock(&fs_info->zone_active_bgs_lock); 2115 2121 return ret; 2116 2122 } 2117 2123
+3
fs/cachefiles/ondemand.c
··· 539 539 struct fscache_volume *volume = object->volume->vcookie; 540 540 size_t volume_key_size, cookie_key_size, data_len; 541 541 542 + if (!object->ondemand) 543 + return 0; 544 + 542 545 /* 543 546 * CacheFiles will firstly check the cache file under the root cache 544 547 * directory. If the coherency check failed, it will fallback to
+31 -10
fs/exec.c
··· 128 128 struct filename *tmp = getname(library); 129 129 int error = PTR_ERR(tmp); 130 130 static const struct open_flags uselib_flags = { 131 - .open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC, 131 + .open_flag = O_LARGEFILE | O_RDONLY, 132 132 .acc_mode = MAY_READ | MAY_EXEC, 133 133 .intent = LOOKUP_OPEN, 134 134 .lookup_flags = LOOKUP_FOLLOW, ··· 904 904 905 905 #endif /* CONFIG_MMU */ 906 906 907 + /* 908 + * On success, caller must call do_close_execat() on the returned 909 + * struct file to close it. 910 + */ 907 911 static struct file *do_open_execat(int fd, struct filename *name, int flags) 908 912 { 909 913 struct file *file; ··· 952 948 return ERR_PTR(err); 953 949 } 954 950 951 + /** 952 + * open_exec - Open a path name for execution 953 + * 954 + * @name: path name to open with the intent of executing it. 955 + * 956 + * Returns ERR_PTR on failure or allocated struct file on success. 957 + * 958 + * As this is a wrapper for the internal do_open_execat(), callers 959 + * must call allow_write_access() before fput() on release. Also see 960 + * do_close_execat(). 961 + */ 955 962 struct file *open_exec(const char *name) 956 963 { 957 964 struct filename *filename = getname_kernel(name); ··· 1424 1409 1425 1410 out_unlock: 1426 1411 up_write(&me->signal->exec_update_lock); 1412 + if (!bprm->cred) 1413 + mutex_unlock(&me->signal->cred_guard_mutex); 1414 + 1427 1415 out: 1428 1416 return retval; 1429 1417 } ··· 1502 1484 return -ENOMEM; 1503 1485 } 1504 1486 1487 + /* Matches do_open_execat() */ 1488 + static void do_close_execat(struct file *file) 1489 + { 1490 + if (!file) 1491 + return; 1492 + allow_write_access(file); 1493 + fput(file); 1494 + } 1495 + 1505 1496 static void free_bprm(struct linux_binprm *bprm) 1506 1497 { 1507 1498 if (bprm->mm) { ··· 1522 1495 mutex_unlock(&current->signal->cred_guard_mutex); 1523 1496 abort_creds(bprm->cred); 1524 1497 } 1525 - if (bprm->file) { 1526 - allow_write_access(bprm->file); 1527 - fput(bprm->file); 1528 - } 1498 + do_close_execat(bprm->file); 1529 1499 if (bprm->executable) 1530 1500 fput(bprm->executable); 1531 1501 /* If a binfmt changed the interp, free it. */ ··· 1544 1520 1545 1521 bprm = kzalloc(sizeof(*bprm), GFP_KERNEL); 1546 1522 if (!bprm) { 1547 - allow_write_access(file); 1548 - fput(file); 1523 + do_close_execat(file); 1549 1524 return ERR_PTR(-ENOMEM); 1550 1525 } 1551 1526 ··· 1633 1610 } 1634 1611 rcu_read_unlock(); 1635 1612 1613 + /* "users" and "in_exec" locked for copy_fs() */ 1636 1614 if (p->fs->users > n_fs) 1637 1615 bprm->unsafe |= LSM_UNSAFE_SHARE; 1638 1616 else ··· 1850 1826 return 0; 1851 1827 } 1852 1828 1853 - /* 1854 - * sys_execve() executes a new program. 1855 - */ 1856 1829 static int bprm_execve(struct linux_binprm *bprm) 1857 1830 { 1858 1831 int retval;
+6 -6
fs/netfs/buffered_read.c
··· 101 101 } 102 102 103 103 if (!test_bit(NETFS_RREQ_DONT_UNLOCK_FOLIOS, &rreq->flags)) { 104 - if (folio_index(folio) == rreq->no_unlock_folio && 104 + if (folio->index == rreq->no_unlock_folio && 105 105 test_bit(NETFS_RREQ_NO_UNLOCK_FOLIO, &rreq->flags)) 106 106 _debug("no unlock"); 107 107 else ··· 246 246 */ 247 247 int netfs_read_folio(struct file *file, struct folio *folio) 248 248 { 249 - struct address_space *mapping = folio_file_mapping(folio); 249 + struct address_space *mapping = folio->mapping; 250 250 struct netfs_io_request *rreq; 251 251 struct netfs_inode *ctx = netfs_inode(mapping->host); 252 252 struct folio *sink = NULL; 253 253 int ret; 254 254 255 - _enter("%lx", folio_index(folio)); 255 + _enter("%lx", folio->index); 256 256 257 257 rreq = netfs_alloc_request(mapping, file, 258 258 folio_file_pos(folio), folio_size(folio), ··· 460 460 ret = PTR_ERR(rreq); 461 461 goto error; 462 462 } 463 - rreq->no_unlock_folio = folio_index(folio); 463 + rreq->no_unlock_folio = folio->index; 464 464 __set_bit(NETFS_RREQ_NO_UNLOCK_FOLIO, &rreq->flags); 465 465 466 466 ret = netfs_begin_cache_read(rreq, ctx); ··· 518 518 size_t offset, size_t len) 519 519 { 520 520 struct netfs_io_request *rreq; 521 - struct address_space *mapping = folio_file_mapping(folio); 521 + struct address_space *mapping = folio->mapping; 522 522 struct netfs_inode *ctx = netfs_inode(mapping->host); 523 523 unsigned long long start = folio_pos(folio); 524 524 size_t flen = folio_size(folio); ··· 535 535 goto error; 536 536 } 537 537 538 - rreq->no_unlock_folio = folio_index(folio); 538 + rreq->no_unlock_folio = folio->index; 539 539 __set_bit(NETFS_RREQ_NO_UNLOCK_FOLIO, &rreq->flags); 540 540 ret = netfs_begin_cache_read(rreq, ctx); 541 541 if (ret == -ENOMEM || ret == -EINTR || ret == -ERESTARTSYS)
+8 -7
fs/netfs/buffered_write.c
··· 221 221 if (unlikely(fault_in_iov_iter_readable(iter, part) == part)) 222 222 break; 223 223 224 - ret = -ENOMEM; 225 224 folio = netfs_grab_folio_for_write(mapping, pos, part); 226 - if (!folio) 225 + if (IS_ERR(folio)) { 226 + ret = PTR_ERR(folio); 227 227 break; 228 + } 228 229 229 230 flen = folio_size(folio); 230 231 offset = pos & (flen - 1); ··· 344 343 break; 345 344 default: 346 345 WARN(true, "Unexpected modify type %u ix=%lx\n", 347 - howto, folio_index(folio)); 346 + howto, folio->index); 348 347 ret = -EIO; 349 348 goto error_folio_unlock; 350 349 } ··· 649 648 xas_for_each(&xas, folio, last) { 650 649 WARN(!folio_test_writeback(folio), 651 650 "bad %zx @%llx page %lx %lx\n", 652 - wreq->len, wreq->start, folio_index(folio), last); 651 + wreq->len, wreq->start, folio->index, last); 653 652 654 653 if ((finfo = netfs_folio_info(folio))) { 655 654 /* Streaming writes cannot be redirtied whilst under ··· 796 795 continue; 797 796 if (xa_is_value(folio)) 798 797 break; 799 - if (folio_index(folio) != index) { 798 + if (folio->index != index) { 800 799 xas_reset(xas); 801 800 break; 802 801 } ··· 902 901 long count = wbc->nr_to_write; 903 902 int ret; 904 903 905 - _enter(",%lx,%llx-%llx,%u", folio_index(folio), start, end, caching); 904 + _enter(",%lx,%llx-%llx,%u", folio->index, start, end, caching); 906 905 907 906 wreq = netfs_alloc_request(mapping, NULL, start, folio_size(folio), 908 907 NETFS_WRITEBACK); ··· 1048 1047 1049 1048 start = folio_pos(folio); /* May regress with THPs */ 1050 1049 1051 - _debug("wback %lx", folio_index(folio)); 1050 + _debug("wback %lx", folio->index); 1052 1051 1053 1052 /* At this point we hold neither the i_pages lock nor the page lock: 1054 1053 * the page may be truncated or invalidated (changing page->mapping to
+2 -1
fs/netfs/fscache_cache.c
··· 179 179 void fscache_put_cache(struct fscache_cache *cache, 180 180 enum fscache_cache_trace where) 181 181 { 182 - unsigned int debug_id = cache->debug_id; 182 + unsigned int debug_id; 183 183 bool zero; 184 184 int ref; 185 185 186 186 if (IS_ERR_OR_NULL(cache)) 187 187 return; 188 188 189 + debug_id = cache->debug_id; 189 190 zero = __refcount_dec_and_test(&cache->ref, &ref); 190 191 trace_fscache_cache(debug_id, ref - 1, where); 191 192
+1 -1
fs/netfs/io.c
··· 124 124 /* We might have multiple writes from the same huge 125 125 * folio, but we mustn't unlock a folio more than once. 126 126 */ 127 - if (have_unlocked && folio_index(folio) <= unlocked) 127 + if (have_unlocked && folio->index <= unlocked) 128 128 continue; 129 129 unlocked = folio_next_index(folio) - 1; 130 130 trace_netfs_folio(folio, netfs_folio_trace_end_copy);
+1 -1
fs/netfs/misc.c
··· 180 180 struct netfs_folio *finfo = NULL; 181 181 size_t flen = folio_size(folio); 182 182 183 - _enter("{%lx},%zx,%zx", folio_index(folio), offset, length); 183 + _enter("{%lx},%zx,%zx", folio->index, offset, length); 184 184 185 185 folio_wait_fscache(folio); 186 186
+15 -11
fs/nfsd/nfs4state.c
··· 7911 7911 { 7912 7912 struct file_lock *fl; 7913 7913 int status = false; 7914 - struct nfsd_file *nf = find_any_file(fp); 7914 + struct nfsd_file *nf; 7915 7915 struct inode *inode; 7916 7916 struct file_lock_context *flctx; 7917 7917 7918 + spin_lock(&fp->fi_lock); 7919 + nf = find_any_file_locked(fp); 7918 7920 if (!nf) { 7919 7921 /* Any valid lock stateid should have some sort of access */ 7920 7922 WARN_ON_ONCE(1); 7921 - return status; 7923 + goto out; 7922 7924 } 7923 7925 7924 7926 inode = file_inode(nf->nf_file); ··· 7936 7934 } 7937 7935 spin_unlock(&flctx->flc_lock); 7938 7936 } 7939 - nfsd_file_put(nf); 7937 + out: 7938 + spin_unlock(&fp->fi_lock); 7940 7939 return status; 7941 7940 } 7942 7941 ··· 7947 7944 * @cstate: NFSv4 COMPOUND state 7948 7945 * @u: RELEASE_LOCKOWNER arguments 7949 7946 * 7950 - * The lockowner's so_count is bumped when a lock record is added 7951 - * or when copying a conflicting lock. The latter case is brief, 7952 - * but can lead to fleeting false positives when looking for 7953 - * locks-in-use. 7947 + * Check if theree are any locks still held and if not - free the lockowner 7948 + * and any lock state that is owned. 7954 7949 * 7955 7950 * Return values: 7956 7951 * %nfs_ok: lockowner released or not found ··· 7984 7983 spin_unlock(&clp->cl_lock); 7985 7984 return nfs_ok; 7986 7985 } 7987 - if (atomic_read(&lo->lo_owner.so_count) != 2) { 7988 - spin_unlock(&clp->cl_lock); 7989 - nfs4_put_stateowner(&lo->lo_owner); 7990 - return nfserr_locks_held; 7986 + 7987 + list_for_each_entry(stp, &lo->lo_owner.so_stateids, st_perstateowner) { 7988 + if (check_for_locks(stp->st_stid.sc_file, lo)) { 7989 + spin_unlock(&clp->cl_lock); 7990 + nfs4_put_stateowner(&lo->lo_owner); 7991 + return nfserr_locks_held; 7992 + } 7991 7993 } 7992 7994 unhash_lockowner_locked(lo); 7993 7995 while (!list_empty(&lo->lo_owner.so_stateids)) {
+27 -16
fs/overlayfs/namei.c
··· 18 18 19 19 struct ovl_lookup_data { 20 20 struct super_block *sb; 21 - struct vfsmount *mnt; 21 + const struct ovl_layer *layer; 22 22 struct qstr name; 23 23 bool is_dir; 24 24 bool opaque; 25 + bool xwhiteouts; 25 26 bool stop; 26 27 bool last; 27 28 char *redirect; ··· 202 201 return real; 203 202 } 204 203 205 - static bool ovl_is_opaquedir(struct ovl_fs *ofs, const struct path *path) 206 - { 207 - return ovl_path_check_dir_xattr(ofs, path, OVL_XATTR_OPAQUE); 208 - } 209 - 210 204 static struct dentry *ovl_lookup_positive_unlocked(struct ovl_lookup_data *d, 211 205 const char *name, 212 206 struct dentry *base, int len, 213 207 bool drop_negative) 214 208 { 215 - struct dentry *ret = lookup_one_unlocked(mnt_idmap(d->mnt), name, base, len); 209 + struct dentry *ret = lookup_one_unlocked(mnt_idmap(d->layer->mnt), name, 210 + base, len); 216 211 217 212 if (!IS_ERR(ret) && d_flags_negative(smp_load_acquire(&ret->d_flags))) { 218 213 if (drop_negative && ret->d_lockref.count == 1) { ··· 229 232 size_t prelen, const char *post, 230 233 struct dentry **ret, bool drop_negative) 231 234 { 235 + struct ovl_fs *ofs = OVL_FS(d->sb); 232 236 struct dentry *this; 233 237 struct path path; 234 238 int err; 235 239 bool last_element = !post[0]; 240 + bool is_upper = d->layer->idx == 0; 241 + char val; 236 242 237 243 this = ovl_lookup_positive_unlocked(d, name, base, namelen, drop_negative); 238 244 if (IS_ERR(this)) { ··· 253 253 } 254 254 255 255 path.dentry = this; 256 - path.mnt = d->mnt; 257 - if (ovl_path_is_whiteout(OVL_FS(d->sb), &path)) { 256 + path.mnt = d->layer->mnt; 257 + if (ovl_path_is_whiteout(ofs, &path)) { 258 258 d->stop = d->opaque = true; 259 259 goto put_and_out; 260 260 } ··· 272 272 d->stop = true; 273 273 goto put_and_out; 274 274 } 275 - err = ovl_check_metacopy_xattr(OVL_FS(d->sb), &path, NULL); 275 + err = ovl_check_metacopy_xattr(ofs, &path, NULL); 276 276 if (err < 0) 277 277 goto out_err; 278 278 ··· 292 292 if (d->last) 293 293 goto out; 294 294 295 - if (ovl_is_opaquedir(OVL_FS(d->sb), &path)) { 295 + /* overlay.opaque=x means xwhiteouts directory */ 296 + val = ovl_get_opaquedir_val(ofs, &path); 297 + if (last_element && !is_upper && val == 'x') { 298 + d->xwhiteouts = true; 299 + ovl_layer_set_xwhiteouts(ofs, d->layer); 300 + } else if (val == 'y') { 296 301 d->stop = true; 297 302 if (last_element) 298 303 d->opaque = true; ··· 868 863 * Returns next layer in stack starting from top. 869 864 * Returns -1 if this is the last layer. 870 865 */ 871 - int ovl_path_next(int idx, struct dentry *dentry, struct path *path) 866 + int ovl_path_next(int idx, struct dentry *dentry, struct path *path, 867 + const struct ovl_layer **layer) 872 868 { 873 869 struct ovl_entry *oe = OVL_E(dentry); 874 870 struct ovl_path *lowerstack = ovl_lowerstack(oe); ··· 877 871 BUG_ON(idx < 0); 878 872 if (idx == 0) { 879 873 ovl_path_upper(dentry, path); 880 - if (path->dentry) 874 + if (path->dentry) { 875 + *layer = &OVL_FS(dentry->d_sb)->layers[0]; 881 876 return ovl_numlower(oe) ? 1 : -1; 877 + } 882 878 idx++; 883 879 } 884 880 BUG_ON(idx > ovl_numlower(oe)); 885 881 path->dentry = lowerstack[idx - 1].dentry; 886 - path->mnt = lowerstack[idx - 1].layer->mnt; 882 + *layer = lowerstack[idx - 1].layer; 883 + path->mnt = (*layer)->mnt; 887 884 888 885 return (idx < ovl_numlower(oe)) ? idx + 1 : -1; 889 886 } ··· 1064 1055 old_cred = ovl_override_creds(dentry->d_sb); 1065 1056 upperdir = ovl_dentry_upper(dentry->d_parent); 1066 1057 if (upperdir) { 1067 - d.mnt = ovl_upper_mnt(ofs); 1058 + d.layer = &ofs->layers[0]; 1068 1059 err = ovl_lookup_layer(upperdir, &d, &upperdentry, true); 1069 1060 if (err) 1070 1061 goto out; ··· 1120 1111 else if (d.is_dir || !ofs->numdatalayer) 1121 1112 d.last = lower.layer->idx == ovl_numlower(roe); 1122 1113 1123 - d.mnt = lower.layer->mnt; 1114 + d.layer = lower.layer; 1124 1115 err = ovl_lookup_layer(lower.dentry, &d, &this, false); 1125 1116 if (err) 1126 1117 goto out_put; ··· 1287 1278 1288 1279 if (upperopaque) 1289 1280 ovl_dentry_set_opaque(dentry); 1281 + if (d.xwhiteouts) 1282 + ovl_dentry_set_xwhiteouts(dentry); 1290 1283 1291 1284 if (upperdentry) 1292 1285 ovl_dentry_set_upper_alias(dentry);
+17 -6
fs/overlayfs/overlayfs.h
··· 50 50 OVL_XATTR_METACOPY, 51 51 OVL_XATTR_PROTATTR, 52 52 OVL_XATTR_XWHITEOUT, 53 - OVL_XATTR_XWHITEOUTS, 54 53 }; 55 54 56 55 enum ovl_inode_flag { ··· 69 70 OVL_E_UPPER_ALIAS, 70 71 OVL_E_OPAQUE, 71 72 OVL_E_CONNECTED, 73 + /* Lower stack may contain xwhiteout entries */ 74 + OVL_E_XWHITEOUTS, 72 75 }; 73 76 74 77 enum { ··· 478 477 bool ovl_dentry_is_opaque(struct dentry *dentry); 479 478 bool ovl_dentry_is_whiteout(struct dentry *dentry); 480 479 void ovl_dentry_set_opaque(struct dentry *dentry); 480 + bool ovl_dentry_has_xwhiteouts(struct dentry *dentry); 481 + void ovl_dentry_set_xwhiteouts(struct dentry *dentry); 482 + void ovl_layer_set_xwhiteouts(struct ovl_fs *ofs, 483 + const struct ovl_layer *layer); 481 484 bool ovl_dentry_has_upper_alias(struct dentry *dentry); 482 485 void ovl_dentry_set_upper_alias(struct dentry *dentry); 483 486 bool ovl_dentry_needs_data_copy_up(struct dentry *dentry, int flags); ··· 499 494 int ovl_copy_up_start(struct dentry *dentry, int flags); 500 495 void ovl_copy_up_end(struct dentry *dentry); 501 496 bool ovl_already_copied_up(struct dentry *dentry, int flags); 502 - bool ovl_path_check_dir_xattr(struct ovl_fs *ofs, const struct path *path, 503 - enum ovl_xattr ox); 497 + char ovl_get_dir_xattr_val(struct ovl_fs *ofs, const struct path *path, 498 + enum ovl_xattr ox); 504 499 bool ovl_path_check_origin_xattr(struct ovl_fs *ofs, const struct path *path); 505 500 bool ovl_path_check_xwhiteout_xattr(struct ovl_fs *ofs, const struct path *path); 506 - bool ovl_path_check_xwhiteouts_xattr(struct ovl_fs *ofs, const struct path *path); 507 501 bool ovl_init_uuid_xattr(struct super_block *sb, struct ovl_fs *ofs, 508 502 const struct path *upperpath); 509 503 ··· 577 573 .mnt = ovl_upper_mnt(ofs), 578 574 }; 579 575 580 - return ovl_path_check_dir_xattr(ofs, &upperpath, OVL_XATTR_IMPURE); 576 + return ovl_get_dir_xattr_val(ofs, &upperpath, OVL_XATTR_IMPURE) == 'y'; 577 + } 578 + 579 + static inline char ovl_get_opaquedir_val(struct ovl_fs *ofs, 580 + const struct path *path) 581 + { 582 + return ovl_get_dir_xattr_val(ofs, path, OVL_XATTR_OPAQUE); 581 583 } 582 584 583 585 static inline bool ovl_redirect_follow(struct ovl_fs *ofs) ··· 690 680 struct dentry *ovl_get_index_fh(struct ovl_fs *ofs, struct ovl_fh *fh); 691 681 struct dentry *ovl_lookup_index(struct ovl_fs *ofs, struct dentry *upper, 692 682 struct dentry *origin, bool verify); 693 - int ovl_path_next(int idx, struct dentry *dentry, struct path *path); 683 + int ovl_path_next(int idx, struct dentry *dentry, struct path *path, 684 + const struct ovl_layer **layer); 694 685 int ovl_verify_lowerdata(struct dentry *dentry); 695 686 struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry, 696 687 unsigned int flags);
+3 -1
fs/overlayfs/ovl_entry.h
··· 40 40 int idx; 41 41 /* One fsid per unique underlying sb (upper fsid == 0) */ 42 42 int fsid; 43 + /* xwhiteouts were found on this layer */ 44 + bool has_xwhiteouts; 43 45 }; 44 46 45 47 struct ovl_path { ··· 61 59 unsigned int numfs; 62 60 /* Number of data-only lower layers */ 63 61 unsigned int numdatalayer; 64 - const struct ovl_layer *layers; 62 + struct ovl_layer *layers; 65 63 struct ovl_sb *fs; 66 64 /* workbasedir is the path at workdir= mount option */ 67 65 struct dentry *workbasedir;
+4 -3
fs/overlayfs/readdir.c
··· 305 305 if (IS_ERR(realfile)) 306 306 return PTR_ERR(realfile); 307 307 308 - rdd->in_xwhiteouts_dir = rdd->dentry && 309 - ovl_path_check_xwhiteouts_xattr(OVL_FS(rdd->dentry->d_sb), realpath); 310 308 rdd->first_maybe_whiteout = NULL; 311 309 rdd->ctx.pos = 0; 312 310 do { ··· 357 359 .is_lowest = false, 358 360 }; 359 361 int idx, next; 362 + const struct ovl_layer *layer; 360 363 361 364 for (idx = 0; idx != -1; idx = next) { 362 - next = ovl_path_next(idx, dentry, &realpath); 365 + next = ovl_path_next(idx, dentry, &realpath, &layer); 363 366 rdd.is_upper = ovl_dentry_upper(dentry) == realpath.dentry; 367 + rdd.in_xwhiteouts_dir = layer->has_xwhiteouts && 368 + ovl_dentry_has_xwhiteouts(dentry); 364 369 365 370 if (next != -1) { 366 371 err = ovl_dir_read(&realpath, &rdd);
+15
fs/overlayfs/super.c
··· 1249 1249 struct ovl_entry *oe) 1250 1250 { 1251 1251 struct dentry *root; 1252 + struct ovl_fs *ofs = OVL_FS(sb); 1252 1253 struct ovl_path *lowerpath = ovl_lowerstack(oe); 1253 1254 unsigned long ino = d_inode(lowerpath->dentry)->i_ino; 1254 1255 int fsid = lowerpath->layer->fsid; ··· 1269 1268 ovl_dentry_set_upper_alias(root); 1270 1269 if (ovl_is_impuredir(sb, upperdentry)) 1271 1270 ovl_set_flag(OVL_IMPURE, d_inode(root)); 1271 + } 1272 + 1273 + /* Look for xwhiteouts marker except in the lowermost layer */ 1274 + for (int i = 0; i < ovl_numlower(oe) - 1; i++, lowerpath++) { 1275 + struct path path = { 1276 + .mnt = lowerpath->layer->mnt, 1277 + .dentry = lowerpath->dentry, 1278 + }; 1279 + 1280 + /* overlay.opaque=x means xwhiteouts directory */ 1281 + if (ovl_get_opaquedir_val(ofs, &path) == 'x') { 1282 + ovl_layer_set_xwhiteouts(ofs, lowerpath->layer); 1283 + ovl_dentry_set_xwhiteouts(root); 1284 + } 1272 1285 } 1273 1286 1274 1287 /* Root is always merge -> can have whiteouts */
+31 -22
fs/overlayfs/util.c
··· 461 461 ovl_dentry_set_flag(OVL_E_OPAQUE, dentry); 462 462 } 463 463 464 + bool ovl_dentry_has_xwhiteouts(struct dentry *dentry) 465 + { 466 + return ovl_dentry_test_flag(OVL_E_XWHITEOUTS, dentry); 467 + } 468 + 469 + void ovl_dentry_set_xwhiteouts(struct dentry *dentry) 470 + { 471 + ovl_dentry_set_flag(OVL_E_XWHITEOUTS, dentry); 472 + } 473 + 474 + /* 475 + * ovl_layer_set_xwhiteouts() is called before adding the overlay dir 476 + * dentry to dcache, while readdir of that same directory happens after 477 + * the overlay dir dentry is in dcache, so if some cpu observes that 478 + * ovl_dentry_is_xwhiteouts(), it will also observe layer->has_xwhiteouts 479 + * for the layers where xwhiteouts marker was found in that merge dir. 480 + */ 481 + void ovl_layer_set_xwhiteouts(struct ovl_fs *ofs, 482 + const struct ovl_layer *layer) 483 + { 484 + if (layer->has_xwhiteouts) 485 + return; 486 + 487 + /* Write once to read-mostly layer properties */ 488 + ofs->layers[layer->idx].has_xwhiteouts = true; 489 + } 490 + 464 491 /* 465 492 * For hard links and decoded file handles, it's possible for ovl_dentry_upper() 466 493 * to return positive, while there's no actual upper alias for the inode. ··· 766 739 return res >= 0; 767 740 } 768 741 769 - bool ovl_path_check_xwhiteouts_xattr(struct ovl_fs *ofs, const struct path *path) 770 - { 771 - struct dentry *dentry = path->dentry; 772 - int res; 773 - 774 - /* xattr.whiteouts must be a directory */ 775 - if (!d_is_dir(dentry)) 776 - return false; 777 - 778 - res = ovl_path_getxattr(ofs, path, OVL_XATTR_XWHITEOUTS, NULL, 0); 779 - return res >= 0; 780 - } 781 - 782 742 /* 783 743 * Load persistent uuid from xattr into s_uuid if found, or store a new 784 744 * random generated value in s_uuid and in xattr. ··· 825 811 return false; 826 812 } 827 813 828 - bool ovl_path_check_dir_xattr(struct ovl_fs *ofs, const struct path *path, 829 - enum ovl_xattr ox) 814 + char ovl_get_dir_xattr_val(struct ovl_fs *ofs, const struct path *path, 815 + enum ovl_xattr ox) 830 816 { 831 817 int res; 832 818 char val; 833 819 834 820 if (!d_is_dir(path->dentry)) 835 - return false; 821 + return 0; 836 822 837 823 res = ovl_path_getxattr(ofs, path, ox, &val, 1); 838 - if (res == 1 && val == 'y') 839 - return true; 840 - 841 - return false; 824 + return res == 1 ? val : 0; 842 825 } 843 826 844 827 #define OVL_XATTR_OPAQUE_POSTFIX "opaque" ··· 848 837 #define OVL_XATTR_METACOPY_POSTFIX "metacopy" 849 838 #define OVL_XATTR_PROTATTR_POSTFIX "protattr" 850 839 #define OVL_XATTR_XWHITEOUT_POSTFIX "whiteout" 851 - #define OVL_XATTR_XWHITEOUTS_POSTFIX "whiteouts" 852 840 853 841 #define OVL_XATTR_TAB_ENTRY(x) \ 854 842 [x] = { [false] = OVL_XATTR_TRUSTED_PREFIX x ## _POSTFIX, \ ··· 864 854 OVL_XATTR_TAB_ENTRY(OVL_XATTR_METACOPY), 865 855 OVL_XATTR_TAB_ENTRY(OVL_XATTR_PROTATTR), 866 856 OVL_XATTR_TAB_ENTRY(OVL_XATTR_XWHITEOUT), 867 - OVL_XATTR_TAB_ENTRY(OVL_XATTR_XWHITEOUTS), 868 857 }; 869 858 870 859 int ovl_check_setxattr(struct ovl_fs *ofs, struct dentry *upperdentry,
+5 -5
fs/smb/client/file.c
··· 87 87 continue; 88 88 if (!folio_test_writeback(folio)) { 89 89 WARN_ONCE(1, "bad %x @%llx page %lx %lx\n", 90 - len, start, folio_index(folio), end); 90 + len, start, folio->index, end); 91 91 continue; 92 92 } 93 93 ··· 120 120 continue; 121 121 if (!folio_test_writeback(folio)) { 122 122 WARN_ONCE(1, "bad %x @%llx page %lx %lx\n", 123 - len, start, folio_index(folio), end); 123 + len, start, folio->index, end); 124 124 continue; 125 125 } 126 126 ··· 151 151 xas_for_each(&xas, folio, end) { 152 152 if (!folio_test_writeback(folio)) { 153 153 WARN_ONCE(1, "bad %x @%llx page %lx %lx\n", 154 - len, start, folio_index(folio), end); 154 + len, start, folio->index, end); 155 155 continue; 156 156 } 157 157 ··· 2651 2651 continue; 2652 2652 if (xa_is_value(folio)) 2653 2653 break; 2654 - if (folio_index(folio) != index) 2654 + if (folio->index != index) 2655 2655 break; 2656 2656 if (!folio_try_get_rcu(folio)) { 2657 2657 xas_reset(&xas); ··· 2899 2899 goto skip_write; 2900 2900 } 2901 2901 2902 - if (folio_mapping(folio) != mapping || 2902 + if (folio->mapping != mapping || 2903 2903 !folio_test_dirty(folio)) { 2904 2904 start += folio_size(folio); 2905 2905 folio_unlock(folio);
+11 -3
fs/tracefs/event_inode.c
··· 34 34 35 35 /* Choose something "unique" ;-) */ 36 36 #define EVENTFS_FILE_INODE_INO 0x12c4e37 37 - #define EVENTFS_DIR_INODE_INO 0x134b2f5 37 + 38 + /* Just try to make something consistent and unique */ 39 + static int eventfs_dir_ino(struct eventfs_inode *ei) 40 + { 41 + if (!ei->ino) 42 + ei->ino = get_next_ino(); 43 + 44 + return ei->ino; 45 + } 38 46 39 47 /* 40 48 * The eventfs_inode (ei) itself is protected by SRCU. It is released from ··· 404 396 inode->i_fop = &eventfs_file_operations; 405 397 406 398 /* All directories will have the same inode number */ 407 - inode->i_ino = EVENTFS_DIR_INODE_INO; 399 + inode->i_ino = eventfs_dir_ino(ei); 408 400 409 401 ti = get_tracefs(inode); 410 402 ti->flags |= TRACEFS_EVENT_INODE; ··· 810 802 811 803 name = ei_child->name; 812 804 813 - ino = EVENTFS_DIR_INODE_INO; 805 + ino = eventfs_dir_ino(ei_child); 814 806 815 807 if (!dir_emit(ctx, name, strlen(name), ino, DT_DIR)) 816 808 goto out_dec;
+4 -3
fs/tracefs/internal.h
··· 55 55 struct eventfs_attr *entry_attrs; 56 56 struct eventfs_attr attr; 57 57 void *data; 58 + unsigned int is_freed:1; 59 + unsigned int is_events:1; 60 + unsigned int nr_entries:30; 61 + unsigned int ino; 58 62 /* 59 63 * Union - used for deletion 60 64 * @llist: for calling dput() if needed after RCU ··· 68 64 struct llist_node llist; 69 65 struct rcu_head rcu; 70 66 }; 71 - unsigned int is_freed:1; 72 - unsigned int is_events:1; 73 - unsigned int nr_entries:30; 74 67 }; 75 68 76 69 static inline struct tracefs_inode *get_tracefs(const struct inode *inode)
+1
include/linux/mlx5/driver.h
··· 681 681 struct mlx5_sq_bfreg bfreg; 682 682 #define MLX5_MAX_NUM_TC 8 683 683 u32 tisn[MLX5_MAX_PORTS][MLX5_MAX_NUM_TC]; 684 + bool tisn_valid; 684 685 } hw_objs; 685 686 struct net_device *uplink_netdev; 686 687 struct mutex uplink_netdev_lock;
+1
include/linux/mlx5/fs.h
··· 132 132 133 133 enum { 134 134 FLOW_CONTEXT_HAS_TAG = BIT(0), 135 + FLOW_CONTEXT_UPLINK_HAIRPIN_EN = BIT(1), 135 136 }; 136 137 137 138 struct mlx5_flow_context {
+8 -4
include/linux/mlx5/mlx5_ifc.h
··· 3576 3576 u8 action[0x10]; 3577 3577 3578 3578 u8 extended_destination[0x1]; 3579 - u8 reserved_at_81[0x1]; 3579 + u8 uplink_hairpin_en[0x1]; 3580 3580 u8 flow_source[0x2]; 3581 3581 u8 encrypt_decrypt_type[0x4]; 3582 3582 u8 destination_list_size[0x18]; ··· 4036 4036 u8 affiliation_criteria[0x4]; 4037 4037 u8 affiliated_vhca_id[0x10]; 4038 4038 4039 - u8 reserved_at_60[0xd0]; 4039 + u8 reserved_at_60[0xa0]; 4040 4040 4041 + u8 reserved_at_100[0x1]; 4042 + u8 sd_group[0x3]; 4043 + u8 reserved_at_104[0x1c]; 4044 + 4045 + u8 reserved_at_120[0x10]; 4041 4046 u8 mtu[0x10]; 4042 4047 4043 4048 u8 system_image_guid[0x40]; ··· 10127 10122 u8 reserved_at_20[0x20]; 10128 10123 10129 10124 u8 local_port[0x8]; 10130 - u8 reserved_at_28[0x15]; 10131 - u8 sd_group[0x3]; 10125 + u8 reserved_at_28[0x18]; 10132 10126 10133 10127 u8 reserved_at_60[0x20]; 10134 10128 };
+1
include/linux/mlx5/vport.h
··· 72 72 int mlx5_modify_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 mtu); 73 73 int mlx5_query_nic_vport_system_image_guid(struct mlx5_core_dev *mdev, 74 74 u64 *system_image_guid); 75 + int mlx5_query_nic_vport_sd_group(struct mlx5_core_dev *mdev, u8 *sd_group); 75 76 int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid); 76 77 int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev, 77 78 u16 vport, u64 node_guid);
+1 -1
include/linux/sched.h
··· 920 920 unsigned sched_rt_mutex:1; 921 921 #endif 922 922 923 - /* Bit to tell LSMs we're in execve(): */ 923 + /* Bit to tell TOMOYO we're in execve(): */ 924 924 unsigned in_execve:1; 925 925 unsigned in_iowait:1; 926 926 #ifndef TIF_RESTORE_SIGMASK
-6
include/linux/skmsg.h
··· 505 505 return !!psock->saved_data_ready; 506 506 } 507 507 508 - static inline bool sk_is_udp(const struct sock *sk) 509 - { 510 - return sk->sk_type == SOCK_DGRAM && 511 - sk->sk_protocol == IPPROTO_UDP; 512 - } 513 - 514 508 #if IS_ENABLED(CONFIG_NET_SOCK_MSG) 515 509 516 510 #define BPF_F_STRPARSER (1UL << 1)
+8
include/net/inet_connection_sock.h
··· 357 357 return inet_test_bit(IS_ICSK, sk) && !!inet_csk(sk)->icsk_ulp_ops; 358 358 } 359 359 360 + static inline void inet_init_csk_locks(struct sock *sk) 361 + { 362 + struct inet_connection_sock *icsk = inet_csk(sk); 363 + 364 + spin_lock_init(&icsk->icsk_accept_queue.rskq_lock); 365 + spin_lock_init(&icsk->icsk_accept_queue.fastopenq.lock); 366 + } 367 + 360 368 #endif /* _INET_CONNECTION_SOCK_H */
-5
include/net/inet_sock.h
··· 307 307 #define inet_assign_bit(nr, sk, val) \ 308 308 assign_bit(INET_FLAGS_##nr, &inet_sk(sk)->inet_flags, val) 309 309 310 - static inline bool sk_is_inet(struct sock *sk) 311 - { 312 - return sk->sk_family == AF_INET || sk->sk_family == AF_INET6; 313 - } 314 - 315 310 /** 316 311 * sk_to_full_sk - Access to a full socket 317 312 * @sk: pointer to a socket
+2 -4
include/net/llc_pdu.h
··· 262 262 */ 263 263 static inline void llc_pdu_decode_sa(struct sk_buff *skb, u8 *sa) 264 264 { 265 - if (skb->protocol == htons(ETH_P_802_2)) 266 - memcpy(sa, eth_hdr(skb)->h_source, ETH_ALEN); 265 + memcpy(sa, eth_hdr(skb)->h_source, ETH_ALEN); 267 266 } 268 267 269 268 /** ··· 274 275 */ 275 276 static inline void llc_pdu_decode_da(struct sk_buff *skb, u8 *da) 276 277 { 277 - if (skb->protocol == htons(ETH_P_802_2)) 278 - memcpy(da, eth_hdr(skb)->h_dest, ETH_ALEN); 278 + memcpy(da, eth_hdr(skb)->h_dest, ETH_ALEN); 279 279 } 280 280 281 281 /**
+39 -10
include/net/netfilter/nf_tables.h
··· 205 205 * @nla: netlink attributes 206 206 * @portid: netlink portID of the original message 207 207 * @seq: netlink sequence number 208 + * @flags: modifiers to new request 208 209 * @family: protocol family 209 210 * @level: depth of the chains 210 211 * @report: notify via unicast netlink message ··· 283 282 * 284 283 * @key: element key 285 284 * @key_end: closing element key 285 + * @data: element data 286 286 * @priv: element private data and extensions 287 287 */ 288 288 struct nft_set_elem { ··· 327 325 * @dtype: data type 328 326 * @dlen: data length 329 327 * @objtype: object type 330 - * @flags: flags 331 328 * @size: number of set elements 332 329 * @policy: set policy 333 330 * @gc_int: garbage collector interval 331 + * @timeout: element timeout 334 332 * @field_len: length of each field in concatenation, bytes 335 333 * @field_count: number of concatenated fields in element 336 334 * @expr: set must support for expressions ··· 353 351 /** 354 352 * enum nft_set_class - performance class 355 353 * 356 - * @NFT_LOOKUP_O_1: constant, O(1) 357 - * @NFT_LOOKUP_O_LOG_N: logarithmic, O(log N) 358 - * @NFT_LOOKUP_O_N: linear, O(N) 354 + * @NFT_SET_CLASS_O_1: constant, O(1) 355 + * @NFT_SET_CLASS_O_LOG_N: logarithmic, O(log N) 356 + * @NFT_SET_CLASS_O_N: linear, O(N) 359 357 */ 360 358 enum nft_set_class { 361 359 NFT_SET_CLASS_O_1, ··· 424 422 * @remove: remove element from set 425 423 * @walk: iterate over all set elements 426 424 * @get: get set elements 425 + * @commit: commit set elements 426 + * @abort: abort set elements 427 427 * @privsize: function to return size of set private data 428 + * @estimate: estimate the required memory size and the lookup complexity class 428 429 * @init: initialize private data of new set instance 429 430 * @destroy: destroy private data of set instance 431 + * @gc_init: initialize garbage collection 430 432 * @elemsize: element private size 431 433 * 432 434 * Operations lookup, update and delete have simpler interfaces, are faster ··· 546 540 * @policy: set parameterization (see enum nft_set_policies) 547 541 * @udlen: user data length 548 542 * @udata: user data 549 - * @expr: stateful expression 543 + * @pending_update: list of pending update set element 550 544 * @ops: set ops 551 545 * @flags: set flags 552 546 * @dead: set will be freed, never cleared 553 547 * @genmask: generation mask 554 548 * @klen: key length 555 549 * @dlen: data length 550 + * @num_exprs: numbers of exprs 551 + * @exprs: stateful expression 552 + * @catchall_list: list of catch-all set element 556 553 * @data: private set data 557 554 */ 558 555 struct nft_set { ··· 701 692 * 702 693 * @len: length of extension area 703 694 * @offset: offsets of individual extension types 695 + * @ext_len: length of the expected extension(used to sanity check) 704 696 */ 705 697 struct nft_set_ext_tmpl { 706 698 u16 len; ··· 850 840 * @select_ops: function to select nft_expr_ops 851 841 * @release_ops: release nft_expr_ops 852 842 * @ops: default ops, used when no select_ops functions is present 843 + * @inner_ops: inner ops, used for inner packet operation 853 844 * @list: used internally 854 845 * @name: Identifier 855 846 * @owner: module reference ··· 892 881 * struct nft_expr_ops - nf_tables expression operations 893 882 * 894 883 * @eval: Expression evaluation function 884 + * @clone: Expression clone function 895 885 * @size: full expression size, including private data size 896 886 * @init: initialization function 897 887 * @activate: activate expression in the next generation 898 888 * @deactivate: deactivate expression in next generation 899 889 * @destroy: destruction function, called after synchronize_rcu 890 + * @destroy_clone: destruction clone function 900 891 * @dump: function to dump parameters 901 - * @type: expression type 902 892 * @validate: validate expression, called during loop detection 893 + * @reduce: reduce expression 894 + * @gc: garbage collection expression 895 + * @offload: hardware offload expression 896 + * @offload_action: function to report true/false to allocate one slot or not in the flow 897 + * offload array 898 + * @offload_stats: function to synchronize hardware stats via updating the counter expression 899 + * @type: expression type 903 900 * @data: extra data to attach to this expression operation 904 901 */ 905 902 struct nft_expr_ops { ··· 1060 1041 /** 1061 1042 * struct nft_chain - nf_tables chain 1062 1043 * 1044 + * @blob_gen_0: rule blob pointer to the current generation 1045 + * @blob_gen_1: rule blob pointer to the future generation 1063 1046 * @rules: list of rules in the chain 1064 1047 * @list: used internally 1065 1048 * @rhlhead: used internally 1066 1049 * @table: table that this chain belongs to 1067 1050 * @handle: chain handle 1068 1051 * @use: number of jump references to this chain 1069 - * @flags: bitmask of enum nft_chain_flags 1052 + * @flags: bitmask of enum NFTA_CHAIN_FLAGS 1053 + * @bound: bind or not 1054 + * @genmask: generation mask 1070 1055 * @name: name of the chain 1056 + * @udlen: user data length 1057 + * @udata: user data in the chain 1058 + * @blob_next: rule blob pointer to the next in the chain 1071 1059 */ 1072 1060 struct nft_chain { 1073 1061 struct nft_rule_blob __rcu *blob_gen_0; ··· 1172 1146 * @hook_list: list of netfilter hooks (for NFPROTO_NETDEV family) 1173 1147 * @type: chain type 1174 1148 * @policy: default policy 1149 + * @flags: indicate the base chain disabled or not 1175 1150 * @stats: per-cpu chain stats 1176 1151 * @chain: the chain 1177 1152 * @flow_block: flow block (for hardware offload) ··· 1301 1274 * struct nft_object - nf_tables stateful object 1302 1275 * 1303 1276 * @list: table stateful object list node 1304 - * @key: keys that identify this object 1305 1277 * @rhlhead: nft_objname_ht node 1278 + * @key: keys that identify this object 1306 1279 * @genmask: generation mask 1307 1280 * @use: number of references to this stateful object 1308 1281 * @handle: unique object handle 1282 + * @udlen: length of user data 1283 + * @udata: user data 1309 1284 * @ops: object operations 1310 1285 * @data: object data, layout depends on type 1311 1286 */ ··· 1373 1344 * @destroy: release existing stateful object 1374 1345 * @dump: netlink dump stateful object 1375 1346 * @update: update stateful object 1347 + * @type: pointer to object type 1376 1348 */ 1377 1349 struct nft_object_ops { 1378 1350 void (*eval)(struct nft_object *obj, ··· 1409 1379 * @genmask: generation mask 1410 1380 * @use: number of references to this flow table 1411 1381 * @handle: unique object handle 1412 - * @dev_name: array of device names 1382 + * @hook_list: hook list for hooks per net_device in flowtables 1413 1383 * @data: rhashtable and garbage collector 1414 - * @ops: array of hooks 1415 1384 */ 1416 1385 struct nft_flowtable { 1417 1386 struct list_head list;
+4
include/net/sch_generic.h
··· 375 375 struct nlattr **tca, 376 376 struct netlink_ext_ack *extack); 377 377 void (*tmplt_destroy)(void *tmplt_priv); 378 + void (*tmplt_reoffload)(struct tcf_chain *chain, 379 + bool add, 380 + flow_setup_cb_t *cb, 381 + void *cb_priv); 378 382 struct tcf_exts * (*get_exts)(const struct tcf_proto *tp, 379 383 u32 handle); 380 384
+17 -1
include/net/sock.h
··· 2765 2765 &skb_shinfo(skb)->tskey); 2766 2766 } 2767 2767 2768 + static inline bool sk_is_inet(const struct sock *sk) 2769 + { 2770 + int family = READ_ONCE(sk->sk_family); 2771 + 2772 + return family == AF_INET || family == AF_INET6; 2773 + } 2774 + 2768 2775 static inline bool sk_is_tcp(const struct sock *sk) 2769 2776 { 2770 - return sk->sk_type == SOCK_STREAM && sk->sk_protocol == IPPROTO_TCP; 2777 + return sk_is_inet(sk) && 2778 + sk->sk_type == SOCK_STREAM && 2779 + sk->sk_protocol == IPPROTO_TCP; 2780 + } 2781 + 2782 + static inline bool sk_is_udp(const struct sock *sk) 2783 + { 2784 + return sk_is_inet(sk) && 2785 + sk->sk_type == SOCK_DGRAM && 2786 + sk->sk_protocol == IPPROTO_UDP; 2771 2787 } 2772 2788 2773 2789 static inline bool sk_is_stream_unix(const struct sock *sk)
+27
include/net/xdp_sock_drv.h
··· 159 159 return ret; 160 160 } 161 161 162 + static inline void xsk_buff_del_tail(struct xdp_buff *tail) 163 + { 164 + struct xdp_buff_xsk *xskb = container_of(tail, struct xdp_buff_xsk, xdp); 165 + 166 + list_del(&xskb->xskb_list_node); 167 + } 168 + 169 + static inline struct xdp_buff *xsk_buff_get_tail(struct xdp_buff *first) 170 + { 171 + struct xdp_buff_xsk *xskb = container_of(first, struct xdp_buff_xsk, xdp); 172 + struct xdp_buff_xsk *frag; 173 + 174 + frag = list_last_entry(&xskb->pool->xskb_list, struct xdp_buff_xsk, 175 + xskb_list_node); 176 + return &frag->xdp; 177 + } 178 + 162 179 static inline void xsk_buff_set_size(struct xdp_buff *xdp, u32 size) 163 180 { 164 181 xdp->data = xdp->data_hard_start + XDP_PACKET_HEADROOM; 165 182 xdp->data_meta = xdp->data; 166 183 xdp->data_end = xdp->data + size; 184 + xdp->flags = 0; 167 185 } 168 186 169 187 static inline dma_addr_t xsk_buff_raw_get_dma(struct xsk_buff_pool *pool, ··· 364 346 } 365 347 366 348 static inline struct xdp_buff *xsk_buff_get_frag(struct xdp_buff *first) 349 + { 350 + return NULL; 351 + } 352 + 353 + static inline void xsk_buff_del_tail(struct xdp_buff *tail) 354 + { 355 + } 356 + 357 + static inline struct xdp_buff *xsk_buff_get_tail(struct xdp_buff *first) 367 358 { 368 359 return NULL; 369 360 }
+25
include/trace/events/afs.h
··· 1071 1071 __print_symbolic(__entry->where, afs_file_errors)) 1072 1072 ); 1073 1073 1074 + TRACE_EVENT(afs_bulkstat_error, 1075 + TP_PROTO(struct afs_operation *op, struct afs_fid *fid, unsigned int index, s32 abort), 1076 + 1077 + TP_ARGS(op, fid, index, abort), 1078 + 1079 + TP_STRUCT__entry( 1080 + __field_struct(struct afs_fid, fid) 1081 + __field(unsigned int, op) 1082 + __field(unsigned int, index) 1083 + __field(s32, abort) 1084 + ), 1085 + 1086 + TP_fast_assign( 1087 + __entry->op = op->debug_id; 1088 + __entry->fid = *fid; 1089 + __entry->index = index; 1090 + __entry->abort = abort; 1091 + ), 1092 + 1093 + TP_printk("OP=%08x[%02x] %llx:%llx:%x a=%d", 1094 + __entry->op, __entry->index, 1095 + __entry->fid.vid, __entry->fid.vnode, __entry->fid.unique, 1096 + __entry->abort) 1097 + ); 1098 + 1074 1099 TRACE_EVENT(afs_cm_no_server, 1075 1100 TP_PROTO(struct afs_call *call, struct sockaddr_rxrpc *srx), 1076 1101
+3
include/uapi/linux/btrfs.h
··· 614 614 */ 615 615 #define BTRFS_DEFRAG_RANGE_COMPRESS 1 616 616 #define BTRFS_DEFRAG_RANGE_START_IO 2 617 + #define BTRFS_DEFRAG_RANGE_FLAGS_SUPP (BTRFS_DEFRAG_RANGE_COMPRESS | \ 618 + BTRFS_DEFRAG_RANGE_START_IO) 619 + 617 620 struct btrfs_ioctl_defrag_range_args { 618 621 /* start of the defrag operation */ 619 622 __u64 start;
+12
init/Kconfig
··· 876 876 bool 877 877 default y if CC_IS_GCC && GCC_VERSION >= 110000 && GCC11_NO_ARRAY_BOUNDS 878 878 879 + # Currently, disable -Wstringop-overflow for GCC 11, globally. 880 + config GCC11_NO_STRINGOP_OVERFLOW 881 + def_bool y 882 + 883 + config CC_NO_STRINGOP_OVERFLOW 884 + bool 885 + default y if CC_IS_GCC && GCC_VERSION >= 110000 && GCC_VERSION < 120000 && GCC11_NO_STRINGOP_OVERFLOW 886 + 887 + config CC_STRINGOP_OVERFLOW 888 + bool 889 + default y if CC_IS_GCC && !CC_NO_STRINGOP_OVERFLOW 890 + 879 891 # 880 892 # For architectures that know their GCC __int128 support is sound 881 893 #
+1
kernel/fork.c
··· 1748 1748 if (clone_flags & CLONE_FS) { 1749 1749 /* tsk->fs is already what we want */ 1750 1750 spin_lock(&fs->lock); 1751 + /* "users" and "in_exec" locked for check_unsafe_exec() */ 1751 1752 if (fs->in_exec) { 1752 1753 spin_unlock(&fs->lock); 1753 1754 return -EAGAIN;
+33 -1
kernel/rcu/tree.c
··· 1013 1013 return needmore; 1014 1014 } 1015 1015 1016 + static void swake_up_one_online_ipi(void *arg) 1017 + { 1018 + struct swait_queue_head *wqh = arg; 1019 + 1020 + swake_up_one(wqh); 1021 + } 1022 + 1023 + static void swake_up_one_online(struct swait_queue_head *wqh) 1024 + { 1025 + int cpu = get_cpu(); 1026 + 1027 + /* 1028 + * If called from rcutree_report_cpu_starting(), wake up 1029 + * is dangerous that late in the CPU-down hotplug process. The 1030 + * scheduler might queue an ignored hrtimer. Defer the wake up 1031 + * to an online CPU instead. 1032 + */ 1033 + if (unlikely(cpu_is_offline(cpu))) { 1034 + int target; 1035 + 1036 + target = cpumask_any_and(housekeeping_cpumask(HK_TYPE_RCU), 1037 + cpu_online_mask); 1038 + 1039 + smp_call_function_single(target, swake_up_one_online_ipi, 1040 + wqh, 0); 1041 + put_cpu(); 1042 + } else { 1043 + put_cpu(); 1044 + swake_up_one(wqh); 1045 + } 1046 + } 1047 + 1016 1048 /* 1017 1049 * Awaken the grace-period kthread. Don't do a self-awaken (unless in an 1018 1050 * interrupt or softirq handler, in which case we just might immediately ··· 1069 1037 return; 1070 1038 WRITE_ONCE(rcu_state.gp_wake_time, jiffies); 1071 1039 WRITE_ONCE(rcu_state.gp_wake_seq, READ_ONCE(rcu_state.gp_seq)); 1072 - swake_up_one(&rcu_state.gp_wq); 1040 + swake_up_one_online(&rcu_state.gp_wq); 1073 1041 } 1074 1042 1075 1043 /*
+1 -2
kernel/rcu/tree_exp.h
··· 173 173 return ret; 174 174 } 175 175 176 - 177 176 /* 178 177 * Report the exit from RCU read-side critical section for the last task 179 178 * that queued itself during or before the current expedited preemptible-RCU ··· 200 201 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 201 202 if (wake) { 202 203 smp_mb(); /* EGP done before wake_up(). */ 203 - swake_up_one(&rcu_state.expedited_wq); 204 + swake_up_one_online(&rcu_state.expedited_wq); 204 205 } 205 206 break; 206 207 }
+6 -1
kernel/trace/tracing_map.c
··· 574 574 } 575 575 576 576 memcpy(elt->key, key, map->key_size); 577 - entry->val = elt; 577 + /* 578 + * Ensure the initialization is visible and 579 + * publish the elt. 580 + */ 581 + smp_wmb(); 582 + WRITE_ONCE(entry->val, elt); 578 583 atomic64_inc(&map->hits); 579 584 580 585 return entry->val;
+4
net/8021q/vlan_netlink.c
··· 118 118 } 119 119 if (data[IFLA_VLAN_INGRESS_QOS]) { 120 120 nla_for_each_nested(attr, data[IFLA_VLAN_INGRESS_QOS], rem) { 121 + if (nla_type(attr) != IFLA_VLAN_QOS_MAPPING) 122 + continue; 121 123 m = nla_data(attr); 122 124 vlan_dev_set_ingress_priority(dev, m->to, m->from); 123 125 } 124 126 } 125 127 if (data[IFLA_VLAN_EGRESS_QOS]) { 126 128 nla_for_each_nested(attr, data[IFLA_VLAN_EGRESS_QOS], rem) { 129 + if (nla_type(attr) != IFLA_VLAN_QOS_MAPPING) 130 + continue; 127 131 m = nla_data(attr); 128 132 err = vlan_dev_set_egress_priority(dev, m->from, m->to); 129 133 if (err)
+9
net/core/dev.c
··· 11551 11551 11552 11552 static void __net_exit default_device_exit_net(struct net *net) 11553 11553 { 11554 + struct netdev_name_node *name_node, *tmp; 11554 11555 struct net_device *dev, *aux; 11555 11556 /* 11556 11557 * Push all migratable network devices back to the ··· 11574 11573 snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex); 11575 11574 if (netdev_name_in_use(&init_net, fb_name)) 11576 11575 snprintf(fb_name, IFNAMSIZ, "dev%%d"); 11576 + 11577 + netdev_for_each_altname_safe(dev, name_node, tmp) 11578 + if (netdev_name_in_use(&init_net, name_node->name)) { 11579 + netdev_name_node_del(name_node); 11580 + synchronize_rcu(); 11581 + __netdev_name_node_alt_destroy(name_node); 11582 + } 11583 + 11577 11584 err = dev_change_net_namespace(dev, &init_net, fb_name); 11578 11585 if (err) { 11579 11586 pr_emerg("%s: failed to move %s to init_net: %d\n",
+3
net/core/dev.h
··· 63 63 64 64 #define netdev_for_each_altname(dev, namenode) \ 65 65 list_for_each_entry((namenode), &(dev)->name_node->list, list) 66 + #define netdev_for_each_altname_safe(dev, namenode, next) \ 67 + list_for_each_entry_safe((namenode), (next), &(dev)->name_node->list, \ 68 + list) 66 69 67 70 int netdev_name_node_alt_create(struct net_device *dev, const char *name); 68 71 int netdev_name_node_alt_destroy(struct net_device *dev, const char *name);
+38 -6
net/core/filter.c
··· 83 83 #include <net/netfilter/nf_conntrack_bpf.h> 84 84 #include <net/netkit.h> 85 85 #include <linux/un.h> 86 + #include <net/xdp_sock_drv.h> 86 87 87 88 #include "dev.h" 88 89 ··· 4093 4092 memset(skb_frag_address(frag) + skb_frag_size(frag), 0, offset); 4094 4093 skb_frag_size_add(frag, offset); 4095 4094 sinfo->xdp_frags_size += offset; 4095 + if (rxq->mem.type == MEM_TYPE_XSK_BUFF_POOL) 4096 + xsk_buff_get_tail(xdp)->data_end += offset; 4096 4097 4097 4098 return 0; 4099 + } 4100 + 4101 + static void bpf_xdp_shrink_data_zc(struct xdp_buff *xdp, int shrink, 4102 + struct xdp_mem_info *mem_info, bool release) 4103 + { 4104 + struct xdp_buff *zc_frag = xsk_buff_get_tail(xdp); 4105 + 4106 + if (release) { 4107 + xsk_buff_del_tail(zc_frag); 4108 + __xdp_return(NULL, mem_info, false, zc_frag); 4109 + } else { 4110 + zc_frag->data_end -= shrink; 4111 + } 4112 + } 4113 + 4114 + static bool bpf_xdp_shrink_data(struct xdp_buff *xdp, skb_frag_t *frag, 4115 + int shrink) 4116 + { 4117 + struct xdp_mem_info *mem_info = &xdp->rxq->mem; 4118 + bool release = skb_frag_size(frag) == shrink; 4119 + 4120 + if (mem_info->type == MEM_TYPE_XSK_BUFF_POOL) { 4121 + bpf_xdp_shrink_data_zc(xdp, shrink, mem_info, release); 4122 + goto out; 4123 + } 4124 + 4125 + if (release) { 4126 + struct page *page = skb_frag_page(frag); 4127 + 4128 + __xdp_return(page_address(page), mem_info, false, NULL); 4129 + } 4130 + 4131 + out: 4132 + return release; 4098 4133 } 4099 4134 4100 4135 static int bpf_xdp_frags_shrink_tail(struct xdp_buff *xdp, int offset) ··· 4147 4110 4148 4111 len_free += shrink; 4149 4112 offset -= shrink; 4150 - 4151 - if (skb_frag_size(frag) == shrink) { 4152 - struct page *page = skb_frag_page(frag); 4153 - 4154 - __xdp_return(page_address(page), &xdp->rxq->mem, 4155 - false, NULL); 4113 + if (bpf_xdp_shrink_data(xdp, frag, shrink)) { 4156 4114 n_frags_free++; 4157 4115 } else { 4158 4116 skb_frag_size_sub(frag, shrink);
-3
net/core/request_sock.c
··· 33 33 34 34 void reqsk_queue_alloc(struct request_sock_queue *queue) 35 35 { 36 - spin_lock_init(&queue->rskq_lock); 37 - 38 - spin_lock_init(&queue->fastopenq.lock); 39 36 queue->fastopenq.rskq_rst_head = NULL; 40 37 queue->fastopenq.rskq_rst_tail = NULL; 41 38 queue->fastopenq.qlen = 0;
+9 -2
net/core/sock.c
··· 107 107 #include <linux/interrupt.h> 108 108 #include <linux/poll.h> 109 109 #include <linux/tcp.h> 110 + #include <linux/udp.h> 110 111 #include <linux/init.h> 111 112 #include <linux/highmem.h> 112 113 #include <linux/user_namespace.h> ··· 4145 4144 { 4146 4145 struct sock *sk = p; 4147 4146 4148 - return !skb_queue_empty_lockless(&sk->sk_receive_queue) || 4149 - sk_busy_loop_timeout(sk, start_time); 4147 + if (!skb_queue_empty_lockless(&sk->sk_receive_queue)) 4148 + return true; 4149 + 4150 + if (sk_is_udp(sk) && 4151 + !skb_queue_empty_lockless(&udp_sk(sk)->reader_queue)) 4152 + return true; 4153 + 4154 + return sk_busy_loop_timeout(sk, start_time); 4150 4155 } 4151 4156 EXPORT_SYMBOL(sk_busy_loop_end); 4152 4157 #endif /* CONFIG_NET_RX_BUSY_POLL */
+3
net/ipv4/af_inet.c
··· 330 330 if (INET_PROTOSW_REUSE & answer_flags) 331 331 sk->sk_reuse = SK_CAN_REUSE; 332 332 333 + if (INET_PROTOSW_ICSK & answer_flags) 334 + inet_init_csk_locks(sk); 335 + 333 336 inet = inet_sk(sk); 334 337 inet_assign_bit(IS_ICSK, sk, INET_PROTOSW_ICSK & answer_flags); 335 338
+4
net/ipv4/inet_connection_sock.c
··· 727 727 } 728 728 if (req) 729 729 reqsk_put(req); 730 + 731 + if (newsk) 732 + inet_init_csk_locks(newsk); 733 + 730 734 return newsk; 731 735 out_err: 732 736 newsk = NULL;
+1
net/ipv4/tcp.c
··· 722 722 if (!test_bit(TSQ_THROTTLED, &sk->sk_tsq_flags)) { 723 723 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPAUTOCORKING); 724 724 set_bit(TSQ_THROTTLED, &sk->sk_tsq_flags); 725 + smp_mb__after_atomic(); 725 726 } 726 727 /* It is possible TX completion already happened 727 728 * before we set TSQ_THROTTLED.
+3
net/ipv6/af_inet6.c
··· 199 199 if (INET_PROTOSW_REUSE & answer_flags) 200 200 sk->sk_reuse = SK_CAN_REUSE; 201 201 202 + if (INET_PROTOSW_ICSK & answer_flags) 203 + inet_init_csk_locks(sk); 204 + 202 205 inet = inet_sk(sk); 203 206 inet_assign_bit(IS_ICSK, sk, INET_PROTOSW_ICSK & answer_flags); 204 207
+16 -8
net/llc/af_llc.c
··· 928 928 */ 929 929 static int llc_ui_sendmsg(struct socket *sock, struct msghdr *msg, size_t len) 930 930 { 931 + DECLARE_SOCKADDR(struct sockaddr_llc *, addr, msg->msg_name); 931 932 struct sock *sk = sock->sk; 932 933 struct llc_sock *llc = llc_sk(sk); 933 - DECLARE_SOCKADDR(struct sockaddr_llc *, addr, msg->msg_name); 934 934 int flags = msg->msg_flags; 935 935 int noblock = flags & MSG_DONTWAIT; 936 + int rc = -EINVAL, copied = 0, hdrlen, hh_len; 936 937 struct sk_buff *skb = NULL; 938 + struct net_device *dev; 937 939 size_t size = 0; 938 - int rc = -EINVAL, copied = 0, hdrlen; 939 940 940 941 dprintk("%s: sending from %02X to %02X\n", __func__, 941 942 llc->laddr.lsap, llc->daddr.lsap); ··· 956 955 if (rc) 957 956 goto out; 958 957 } 959 - hdrlen = llc->dev->hard_header_len + llc_ui_header_len(sk, addr); 958 + dev = llc->dev; 959 + hh_len = LL_RESERVED_SPACE(dev); 960 + hdrlen = llc_ui_header_len(sk, addr); 960 961 size = hdrlen + len; 961 - if (size > llc->dev->mtu) 962 - size = llc->dev->mtu; 962 + size = min_t(size_t, size, READ_ONCE(dev->mtu)); 963 963 copied = size - hdrlen; 964 964 rc = -EINVAL; 965 965 if (copied < 0) 966 966 goto out; 967 967 release_sock(sk); 968 - skb = sock_alloc_send_skb(sk, size, noblock, &rc); 968 + skb = sock_alloc_send_skb(sk, hh_len + size, noblock, &rc); 969 969 lock_sock(sk); 970 970 if (!skb) 971 971 goto out; 972 - skb->dev = llc->dev; 972 + if (sock_flag(sk, SOCK_ZAPPED) || 973 + llc->dev != dev || 974 + hdrlen != llc_ui_header_len(sk, addr) || 975 + hh_len != LL_RESERVED_SPACE(dev) || 976 + size > READ_ONCE(dev->mtu)) 977 + goto out; 978 + skb->dev = dev; 973 979 skb->protocol = llc_proto_type(addr->sllc_arphrd); 974 - skb_reserve(skb, hdrlen); 980 + skb_reserve(skb, hh_len + hdrlen); 975 981 rc = memcpy_from_msg(skb_put(skb, copied), msg, copied); 976 982 if (rc) 977 983 goto out;
-7
net/llc/llc_core.c
··· 135 135 .func = llc_rcv, 136 136 }; 137 137 138 - static struct packet_type llc_tr_packet_type __read_mostly = { 139 - .type = cpu_to_be16(ETH_P_TR_802_2), 140 - .func = llc_rcv, 141 - }; 142 - 143 138 static int __init llc_init(void) 144 139 { 145 140 dev_add_pack(&llc_packet_type); 146 - dev_add_pack(&llc_tr_packet_type); 147 141 return 0; 148 142 } 149 143 150 144 static void __exit llc_exit(void) 151 145 { 152 146 dev_remove_pack(&llc_packet_type); 153 - dev_remove_pack(&llc_tr_packet_type); 154 147 } 155 148 156 149 module_init(llc_init);
-1
net/mac80211/Kconfig
··· 62 62 depends on KUNIT 63 63 depends on MAC80211 64 64 default KUNIT_ALL_TESTS 65 - depends on !KERNEL_6_2 66 65 help 67 66 Enable this option to test mac80211 internals with kunit. 68 67
+6 -1
net/mac80211/sta_info.c
··· 404 404 int i; 405 405 406 406 for (i = 0; i < ARRAY_SIZE(sta->link); i++) { 407 - if (!(sta->sta.valid_links & BIT(i))) 407 + struct link_sta_info *link_sta; 408 + 409 + link_sta = rcu_access_pointer(sta->link[i]); 410 + if (!link_sta) 408 411 continue; 409 412 410 413 sta_remove_link(sta, i, false); ··· 912 909 913 910 if (ieee80211_vif_is_mesh(&sdata->vif)) 914 911 mesh_accept_plinks_update(sdata); 912 + 913 + ieee80211_check_fast_xmit(sta); 915 914 916 915 return 0; 917 916 out_remove:
+1 -1
net/mac80211/tx.c
··· 3048 3048 sdata->vif.type == NL80211_IFTYPE_STATION) 3049 3049 goto out; 3050 3050 3051 - if (!test_sta_flag(sta, WLAN_STA_AUTHORIZED)) 3051 + if (!test_sta_flag(sta, WLAN_STA_AUTHORIZED) || !sta->uploaded) 3052 3052 goto out; 3053 3053 3054 3054 if (test_sta_flag(sta, WLAN_STA_PS_STA) ||
+10 -10
net/netfilter/nf_tables_api.c
··· 24 24 #include <net/sock.h> 25 25 26 26 #define NFT_MODULE_AUTOLOAD_LIMIT (MODULE_NAME_LEN - sizeof("nft-expr-255-")) 27 + #define NFT_SET_MAX_ANONLEN 16 27 28 28 29 unsigned int nf_tables_net_id __read_mostly; 29 30 ··· 4412 4411 p = strchr(name, '%'); 4413 4412 if (p != NULL) { 4414 4413 if (p[1] != 'd' || strchr(p + 2, '%')) 4414 + return -EINVAL; 4415 + 4416 + if (strnlen(name, NFT_SET_MAX_ANONLEN) >= NFT_SET_MAX_ANONLEN) 4415 4417 return -EINVAL; 4416 4418 4417 4419 inuse = (unsigned long *)get_zeroed_page(GFP_KERNEL); ··· 10992 10988 data->verdict.code = ntohl(nla_get_be32(tb[NFTA_VERDICT_CODE])); 10993 10989 10994 10990 switch (data->verdict.code) { 10995 - default: 10996 - switch (data->verdict.code & NF_VERDICT_MASK) { 10997 - case NF_ACCEPT: 10998 - case NF_DROP: 10999 - case NF_QUEUE: 11000 - break; 11001 - default: 11002 - return -EINVAL; 11003 - } 11004 - fallthrough; 10991 + case NF_ACCEPT: 10992 + case NF_DROP: 10993 + case NF_QUEUE: 10994 + break; 11005 10995 case NFT_CONTINUE: 11006 10996 case NFT_BREAK: 11007 10997 case NFT_RETURN: ··· 11030 11032 11031 11033 data->verdict.chain = chain; 11032 11034 break; 11035 + default: 11036 + return -EINVAL; 11033 11037 } 11034 11038 11035 11039 desc->len = sizeof(data->verdict);
+9 -2
net/netfilter/nft_chain_filter.c
··· 357 357 unsigned long event, void *ptr) 358 358 { 359 359 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 360 + struct nft_base_chain *basechain; 360 361 struct nftables_pernet *nft_net; 361 - struct nft_table *table; 362 362 struct nft_chain *chain, *nr; 363 + struct nft_table *table; 363 364 struct nft_ctx ctx = { 364 365 .net = dev_net(dev), 365 366 }; ··· 372 371 nft_net = nft_pernet(ctx.net); 373 372 mutex_lock(&nft_net->commit_mutex); 374 373 list_for_each_entry(table, &nft_net->tables, list) { 375 - if (table->family != NFPROTO_NETDEV) 374 + if (table->family != NFPROTO_NETDEV && 375 + table->family != NFPROTO_INET) 376 376 continue; 377 377 378 378 ctx.family = table->family; 379 379 ctx.table = table; 380 380 list_for_each_entry_safe(chain, nr, &table->chains, list) { 381 381 if (!nft_is_base_chain(chain)) 382 + continue; 383 + 384 + basechain = nft_base_chain(chain); 385 + if (table->family == NFPROTO_INET && 386 + basechain->ops.hooknum != NF_INET_INGRESS) 382 387 continue; 383 388 384 389 ctx.chain = chain;
+12
net/netfilter/nft_compat.c
··· 350 350 unsigned int hook_mask = 0; 351 351 int ret; 352 352 353 + if (ctx->family != NFPROTO_IPV4 && 354 + ctx->family != NFPROTO_IPV6 && 355 + ctx->family != NFPROTO_BRIDGE && 356 + ctx->family != NFPROTO_ARP) 357 + return -EOPNOTSUPP; 358 + 353 359 if (nft_is_base_chain(ctx->chain)) { 354 360 const struct nft_base_chain *basechain = 355 361 nft_base_chain(ctx->chain); ··· 600 594 struct xt_match *match = expr->ops->data; 601 595 unsigned int hook_mask = 0; 602 596 int ret; 597 + 598 + if (ctx->family != NFPROTO_IPV4 && 599 + ctx->family != NFPROTO_IPV6 && 600 + ctx->family != NFPROTO_BRIDGE && 601 + ctx->family != NFPROTO_ARP) 602 + return -EOPNOTSUPP; 603 603 604 604 if (nft_is_base_chain(ctx->chain)) { 605 605 const struct nft_base_chain *basechain =
+5
net/netfilter/nft_flow_offload.c
··· 384 384 { 385 385 unsigned int hook_mask = (1 << NF_INET_FORWARD); 386 386 387 + if (ctx->family != NFPROTO_IPV4 && 388 + ctx->family != NFPROTO_IPV6 && 389 + ctx->family != NFPROTO_INET) 390 + return -EOPNOTSUPP; 391 + 387 392 return nft_chain_validate_hooks(ctx->chain, hook_mask); 388 393 } 389 394
+16 -7
net/netfilter/nft_limit.c
··· 58 58 static int nft_limit_init(struct nft_limit_priv *priv, 59 59 const struct nlattr * const tb[], bool pkts) 60 60 { 61 + u64 unit, tokens, rate_with_burst; 61 62 bool invert = false; 62 - u64 unit, tokens; 63 63 64 64 if (tb[NFTA_LIMIT_RATE] == NULL || 65 65 tb[NFTA_LIMIT_UNIT] == NULL) 66 66 return -EINVAL; 67 67 68 68 priv->rate = be64_to_cpu(nla_get_be64(tb[NFTA_LIMIT_RATE])); 69 + if (priv->rate == 0) 70 + return -EINVAL; 71 + 69 72 unit = be64_to_cpu(nla_get_be64(tb[NFTA_LIMIT_UNIT])); 70 - priv->nsecs = unit * NSEC_PER_SEC; 71 - if (priv->rate == 0 || priv->nsecs < unit) 73 + if (check_mul_overflow(unit, NSEC_PER_SEC, &priv->nsecs)) 72 74 return -EOVERFLOW; 73 75 74 76 if (tb[NFTA_LIMIT_BURST]) ··· 79 77 if (pkts && priv->burst == 0) 80 78 priv->burst = NFT_LIMIT_PKT_BURST_DEFAULT; 81 79 82 - if (priv->rate + priv->burst < priv->rate) 80 + if (check_add_overflow(priv->rate, priv->burst, &rate_with_burst)) 83 81 return -EOVERFLOW; 84 82 85 83 if (pkts) { 86 - tokens = div64_u64(priv->nsecs, priv->rate) * priv->burst; 84 + u64 tmp = div64_u64(priv->nsecs, priv->rate); 85 + 86 + if (check_mul_overflow(tmp, priv->burst, &tokens)) 87 + return -EOVERFLOW; 87 88 } else { 89 + u64 tmp; 90 + 88 91 /* The token bucket size limits the number of tokens can be 89 92 * accumulated. tokens_max specifies the bucket size. 90 93 * tokens_max = unit * (rate + burst) / rate. 91 94 */ 92 - tokens = div64_u64(priv->nsecs * (priv->rate + priv->burst), 93 - priv->rate); 95 + if (check_mul_overflow(priv->nsecs, rate_with_burst, &tmp)) 96 + return -EOVERFLOW; 97 + 98 + tokens = div64_u64(tmp, priv->rate); 94 99 } 95 100 96 101 if (tb[NFTA_LIMIT_FLAGS]) {
+5
net/netfilter/nft_nat.c
··· 143 143 struct nft_nat *priv = nft_expr_priv(expr); 144 144 int err; 145 145 146 + if (ctx->family != NFPROTO_IPV4 && 147 + ctx->family != NFPROTO_IPV6 && 148 + ctx->family != NFPROTO_INET) 149 + return -EOPNOTSUPP; 150 + 146 151 err = nft_chain_validate_dependency(ctx->chain, NFT_CHAIN_T_NAT); 147 152 if (err < 0) 148 153 return err;
+5
net/netfilter/nft_rt.c
··· 166 166 const struct nft_rt *priv = nft_expr_priv(expr); 167 167 unsigned int hooks; 168 168 169 + if (ctx->family != NFPROTO_IPV4 && 170 + ctx->family != NFPROTO_IPV6 && 171 + ctx->family != NFPROTO_INET) 172 + return -EOPNOTSUPP; 173 + 169 174 switch (priv->key) { 170 175 case NFT_RT_NEXTHOP4: 171 176 case NFT_RT_NEXTHOP6:
+5
net/netfilter/nft_socket.c
··· 242 242 const struct nft_expr *expr, 243 243 const struct nft_data **data) 244 244 { 245 + if (ctx->family != NFPROTO_IPV4 && 246 + ctx->family != NFPROTO_IPV6 && 247 + ctx->family != NFPROTO_INET) 248 + return -EOPNOTSUPP; 249 + 245 250 return nft_chain_validate_hooks(ctx->chain, 246 251 (1 << NF_INET_PRE_ROUTING) | 247 252 (1 << NF_INET_LOCAL_IN) |
+5 -2
net/netfilter/nft_synproxy.c
··· 186 186 break; 187 187 #endif 188 188 case NFPROTO_INET: 189 - case NFPROTO_BRIDGE: 190 189 err = nf_synproxy_ipv4_init(snet, ctx->net); 191 190 if (err) 192 191 goto nf_ct_failure; ··· 218 219 break; 219 220 #endif 220 221 case NFPROTO_INET: 221 - case NFPROTO_BRIDGE: 222 222 nf_synproxy_ipv4_fini(snet, ctx->net); 223 223 nf_synproxy_ipv6_fini(snet, ctx->net); 224 224 break; ··· 251 253 const struct nft_expr *expr, 252 254 const struct nft_data **data) 253 255 { 256 + if (ctx->family != NFPROTO_IPV4 && 257 + ctx->family != NFPROTO_IPV6 && 258 + ctx->family != NFPROTO_INET) 259 + return -EOPNOTSUPP; 260 + 254 261 return nft_chain_validate_hooks(ctx->chain, (1 << NF_INET_LOCAL_IN) | 255 262 (1 << NF_INET_FORWARD)); 256 263 }
+5
net/netfilter/nft_tproxy.c
··· 316 316 const struct nft_expr *expr, 317 317 const struct nft_data **data) 318 318 { 319 + if (ctx->family != NFPROTO_IPV4 && 320 + ctx->family != NFPROTO_IPV6 && 321 + ctx->family != NFPROTO_INET) 322 + return -EOPNOTSUPP; 323 + 319 324 return nft_chain_validate_hooks(ctx->chain, 1 << NF_INET_PRE_ROUTING); 320 325 } 321 326
+5
net/netfilter/nft_xfrm.c
··· 235 235 const struct nft_xfrm *priv = nft_expr_priv(expr); 236 236 unsigned int hooks; 237 237 238 + if (ctx->family != NFPROTO_IPV4 && 239 + ctx->family != NFPROTO_IPV6 && 240 + ctx->family != NFPROTO_INET) 241 + return -EOPNOTSUPP; 242 + 238 243 switch (priv->dir) { 239 244 case XFRM_POLICY_IN: 240 245 hooks = (1 << NF_INET_FORWARD) |
+1 -1
net/netlink/af_netlink.c
··· 374 374 if (is_vmalloc_addr(skb->head)) { 375 375 if (!skb->cloned || 376 376 !atomic_dec_return(&(skb_shinfo(skb)->dataref))) 377 - vfree(skb->head); 377 + vfree_atomic(skb->head); 378 378 379 379 skb->head = NULL; 380 380 }
+1 -1
net/rds/af_rds.c
··· 419 419 420 420 rs->rs_rx_traces = trace.rx_traces; 421 421 for (i = 0; i < rs->rs_rx_traces; i++) { 422 - if (trace.rx_trace_pos[i] > RDS_MSG_RX_DGRAM_TRACE_MAX) { 422 + if (trace.rx_trace_pos[i] >= RDS_MSG_RX_DGRAM_TRACE_MAX) { 423 423 rs->rs_rx_traces = 0; 424 424 return -EFAULT; 425 425 }
+8 -1
net/sched/cls_api.c
··· 1560 1560 chain_prev = chain, 1561 1561 chain = __tcf_get_next_chain(block, chain), 1562 1562 tcf_chain_put(chain_prev)) { 1563 + if (chain->tmplt_ops && add) 1564 + chain->tmplt_ops->tmplt_reoffload(chain, true, cb, 1565 + cb_priv); 1563 1566 for (tp = __tcf_get_next_proto(chain, NULL); tp; 1564 1567 tp_prev = tp, 1565 1568 tp = __tcf_get_next_proto(chain, tp), ··· 1578 1575 goto err_playback_remove; 1579 1576 } 1580 1577 } 1578 + if (chain->tmplt_ops && !add) 1579 + chain->tmplt_ops->tmplt_reoffload(chain, false, cb, 1580 + cb_priv); 1581 1581 } 1582 1582 1583 1583 return 0; ··· 3006 3000 ops = tcf_proto_lookup_ops(name, true, extack); 3007 3001 if (IS_ERR(ops)) 3008 3002 return PTR_ERR(ops); 3009 - if (!ops->tmplt_create || !ops->tmplt_destroy || !ops->tmplt_dump) { 3003 + if (!ops->tmplt_create || !ops->tmplt_destroy || !ops->tmplt_dump || 3004 + !ops->tmplt_reoffload) { 3010 3005 NL_SET_ERR_MSG(extack, "Chain templates are not supported with specified classifier"); 3011 3006 module_put(ops->owner); 3012 3007 return -EOPNOTSUPP;
+23
net/sched/cls_flower.c
··· 2721 2721 kfree(tmplt); 2722 2722 } 2723 2723 2724 + static void fl_tmplt_reoffload(struct tcf_chain *chain, bool add, 2725 + flow_setup_cb_t *cb, void *cb_priv) 2726 + { 2727 + struct fl_flow_tmplt *tmplt = chain->tmplt_priv; 2728 + struct flow_cls_offload cls_flower = {}; 2729 + 2730 + cls_flower.rule = flow_rule_alloc(0); 2731 + if (!cls_flower.rule) 2732 + return; 2733 + 2734 + cls_flower.common.chain_index = chain->index; 2735 + cls_flower.command = add ? FLOW_CLS_TMPLT_CREATE : 2736 + FLOW_CLS_TMPLT_DESTROY; 2737 + cls_flower.cookie = (unsigned long) tmplt; 2738 + cls_flower.rule->match.dissector = &tmplt->dissector; 2739 + cls_flower.rule->match.mask = &tmplt->mask; 2740 + cls_flower.rule->match.key = &tmplt->dummy_key; 2741 + 2742 + cb(TC_SETUP_CLSFLOWER, &cls_flower, cb_priv); 2743 + kfree(cls_flower.rule); 2744 + } 2745 + 2724 2746 static int fl_dump_key_val(struct sk_buff *skb, 2725 2747 void *val, int val_type, 2726 2748 void *mask, int mask_type, int len) ··· 3650 3628 .bind_class = fl_bind_class, 3651 3629 .tmplt_create = fl_tmplt_create, 3652 3630 .tmplt_destroy = fl_tmplt_destroy, 3631 + .tmplt_reoffload = fl_tmplt_reoffload, 3653 3632 .tmplt_dump = fl_tmplt_dump, 3654 3633 .get_exts = fl_get_exts, 3655 3634 .owner = THIS_MODULE,
+1 -1
net/smc/smc_diag.c
··· 164 164 } 165 165 if (smc_conn_lgr_valid(&smc->conn) && smc->conn.lgr->is_smcd && 166 166 (req->diag_ext & (1 << (SMC_DIAG_DMBINFO - 1))) && 167 - !list_empty(&smc->conn.lgr->list)) { 167 + !list_empty(&smc->conn.lgr->list) && smc->conn.rmb_desc) { 168 168 struct smc_connection *conn = &smc->conn; 169 169 struct smcd_diag_dmbinfo dinfo; 170 170 struct smcd_dev *smcd = conn->lgr->smcd;
+2 -2
net/sunrpc/svcsock.c
··· 717 717 ARRAY_SIZE(rqstp->rq_bvec), xdr); 718 718 719 719 iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, rqstp->rq_bvec, 720 - count, 0); 720 + count, rqstp->rq_res.len); 721 721 err = sock_sendmsg(svsk->sk_sock, &msg); 722 722 if (err == -ECONNREFUSED) { 723 723 /* ICMP error on earlier request. */ 724 724 iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, rqstp->rq_bvec, 725 - count, 0); 725 + count, rqstp->rq_res.len); 726 726 err = sock_sendmsg(svsk->sk_sock, &msg); 727 727 } 728 728
-1
net/wireless/Kconfig
··· 206 206 depends on KUNIT 207 207 depends on CFG80211 208 208 default KUNIT_ALL_TESTS 209 - depends on !KERNEL_6_2 210 209 help 211 210 Enable this option to test cfg80211 functions with kunit. 212 211
+1
net/wireless/nl80211.c
··· 4020 4020 } 4021 4021 wiphy_unlock(&rdev->wiphy); 4022 4022 4023 + if_start = 0; 4023 4024 wp_idx++; 4024 4025 } 4025 4026 out:
+8 -4
net/xdp/xsk.c
··· 167 167 contd = XDP_PKT_CONTD; 168 168 169 169 err = __xsk_rcv_zc(xs, xskb, len, contd); 170 - if (err || likely(!frags)) 171 - goto out; 170 + if (err) 171 + goto err; 172 + if (likely(!frags)) 173 + return 0; 172 174 173 175 xskb_list = &xskb->pool->xskb_list; 174 176 list_for_each_entry_safe(pos, tmp, xskb_list, xskb_list_node) { ··· 179 177 len = pos->xdp.data_end - pos->xdp.data; 180 178 err = __xsk_rcv_zc(xs, pos, len, contd); 181 179 if (err) 182 - return err; 180 + goto err; 183 181 list_del(&pos->xskb_list_node); 184 182 } 185 183 186 - out: 184 + return 0; 185 + err: 186 + xsk_buff_free(xdp); 187 187 return err; 188 188 } 189 189
+1
net/xdp/xsk_buff_pool.c
··· 555 555 556 556 xskb->xdp.data = xskb->xdp.data_hard_start + XDP_PACKET_HEADROOM; 557 557 xskb->xdp.data_meta = xskb->xdp.data; 558 + xskb->xdp.flags = 0; 558 559 559 560 if (pool->dma_need_sync) { 560 561 dma_sync_single_range_for_device(pool->dev, xskb->dma, 0,
+3
samples/cgroup/.gitignore
··· 1 + /cgroup_event_listener 2 + /memcg_event_listener 3 +
-2
scripts/Makefile.extrawarn
··· 97 97 KBUILD_CFLAGS += $(call cc-option, -Wpacked-not-aligned) 98 98 KBUILD_CFLAGS += $(call cc-option, -Wformat-overflow) 99 99 KBUILD_CFLAGS += $(call cc-option, -Wformat-truncation) 100 - KBUILD_CFLAGS += $(call cc-option, -Wstringop-overflow) 101 100 KBUILD_CFLAGS += $(call cc-option, -Wstringop-truncation) 102 101 103 102 KBUILD_CPPFLAGS += -Wundef ··· 112 113 KBUILD_CFLAGS += $(call cc-disable-warning, packed-not-aligned) 113 114 KBUILD_CFLAGS += $(call cc-disable-warning, format-overflow) 114 115 KBUILD_CFLAGS += $(call cc-disable-warning, format-truncation) 115 - KBUILD_CFLAGS += $(call cc-disable-warning, stringop-overflow) 116 116 KBUILD_CFLAGS += $(call cc-disable-warning, stringop-truncation) 117 117 118 118 ifdef CONFIG_CC_IS_CLANG
+3 -1
security/apparmor/lsm.c
··· 469 469 * Cache permissions granted by the previous exec check, with 470 470 * implicit read and executable mmap which are required to 471 471 * actually execute the image. 472 + * 473 + * Illogically, FMODE_EXEC is in f_flags, not f_mode. 472 474 */ 473 - if (current->in_execve) { 475 + if (file->f_flags & __FMODE_EXEC) { 474 476 fctx->allow = MAY_EXEC | MAY_READ | AA_EXEC_MMAP; 475 477 return 0; 476 478 }
-4
security/keys/encrypted-keys/encrypted.c
··· 237 237 break; 238 238 } 239 239 *decrypted_data = strsep(&datablob, " \t"); 240 - if (!*decrypted_data) { 241 - pr_info("encrypted_key: decrypted_data is missing\n"); 242 - break; 243 - } 244 240 ret = 0; 245 241 break; 246 242 case Opt_load:
+2 -1
security/tomoyo/tomoyo.c
··· 328 328 static int tomoyo_file_open(struct file *f) 329 329 { 330 330 /* Don't check read permission here if called from execve(). */ 331 - if (current->in_execve) 331 + /* Illogically, FMODE_EXEC is in f_flags, not f_mode. */ 332 + if (f->f_flags & __FMODE_EXEC) 332 333 return 0; 333 334 return tomoyo_check_open_permission(tomoyo_domain(), &f->f_path, 334 335 f->f_flags);
+7 -1
tools/arch/x86/include/asm/cpufeatures.h
··· 198 198 #define X86_FEATURE_CAT_L3 ( 7*32+ 4) /* Cache Allocation Technology L3 */ 199 199 #define X86_FEATURE_CAT_L2 ( 7*32+ 5) /* Cache Allocation Technology L2 */ 200 200 #define X86_FEATURE_CDP_L3 ( 7*32+ 6) /* Code and Data Prioritization L3 */ 201 + #define X86_FEATURE_TDX_HOST_PLATFORM ( 7*32+ 7) /* Platform supports being a TDX host */ 201 202 #define X86_FEATURE_HW_PSTATE ( 7*32+ 8) /* AMD HW-PState */ 202 203 #define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */ 203 204 #define X86_FEATURE_XCOMPACTED ( 7*32+10) /* "" Use compacted XSTATE (XSAVES or XSAVEC) */ ··· 309 308 #define X86_FEATURE_SMBA (11*32+21) /* "" Slow Memory Bandwidth Allocation */ 310 309 #define X86_FEATURE_BMEC (11*32+22) /* "" Bandwidth Monitoring Event Configuration */ 311 310 #define X86_FEATURE_USER_SHSTK (11*32+23) /* Shadow stack support for user mode applications */ 312 - 313 311 #define X86_FEATURE_SRSO (11*32+24) /* "" AMD BTB untrain RETs */ 314 312 #define X86_FEATURE_SRSO_ALIAS (11*32+25) /* "" AMD BTB untrain RETs through aliasing */ 315 313 #define X86_FEATURE_IBPB_ON_VMEXIT (11*32+26) /* "" Issue an IBPB only on VMEXIT */ 314 + #define X86_FEATURE_APIC_MSRS_FENCE (11*32+27) /* "" IA32_TSC_DEADLINE and X2APIC MSRs need fencing */ 315 + #define X86_FEATURE_ZEN2 (11*32+28) /* "" CPU based on Zen2 microarchitecture */ 316 + #define X86_FEATURE_ZEN3 (11*32+29) /* "" CPU based on Zen3 microarchitecture */ 317 + #define X86_FEATURE_ZEN4 (11*32+30) /* "" CPU based on Zen4 microarchitecture */ 318 + #define X86_FEATURE_ZEN1 (11*32+31) /* "" CPU based on Zen1 microarchitecture */ 316 319 317 320 /* Intel-defined CPU features, CPUID level 0x00000007:1 (EAX), word 12 */ 318 321 #define X86_FEATURE_AVX_VNNI (12*32+ 4) /* AVX VNNI instructions */ ··· 500 495 #define X86_BUG_EIBRS_PBRSB X86_BUG(28) /* EIBRS is vulnerable to Post Barrier RSB Predictions */ 501 496 #define X86_BUG_SMT_RSB X86_BUG(29) /* CPU is vulnerable to Cross-Thread Return Address Predictions */ 502 497 #define X86_BUG_GDS X86_BUG(30) /* CPU is affected by Gather Data Sampling */ 498 + #define X86_BUG_TDX_PW_MCE X86_BUG(31) /* CPU may incur #MC if non-TD software does partial write to TDX private memory */ 503 499 504 500 /* BUG word 2 */ 505 501 #define X86_BUG_SRSO X86_BUG(1*32 + 0) /* AMD SRSO bug */
+8
tools/arch/x86/include/asm/msr-index.h
··· 237 237 #define LBR_INFO_CYCLES 0xffff 238 238 #define LBR_INFO_BR_TYPE_OFFSET 56 239 239 #define LBR_INFO_BR_TYPE (0xfull << LBR_INFO_BR_TYPE_OFFSET) 240 + #define LBR_INFO_BR_CNTR_OFFSET 32 241 + #define LBR_INFO_BR_CNTR_NUM 4 242 + #define LBR_INFO_BR_CNTR_BITS 2 243 + #define LBR_INFO_BR_CNTR_MASK GENMASK_ULL(LBR_INFO_BR_CNTR_BITS - 1, 0) 244 + #define LBR_INFO_BR_CNTR_FULL_MASK GENMASK_ULL(LBR_INFO_BR_CNTR_NUM * LBR_INFO_BR_CNTR_BITS - 1, 0) 240 245 241 246 #define MSR_ARCH_LBR_CTL 0x000014ce 242 247 #define ARCH_LBR_CTL_LBREN BIT(0) ··· 540 535 /* Auto-reload via MSR instead of DS area */ 541 536 #define MSR_RELOAD_PMC0 0x000014c1 542 537 #define MSR_RELOAD_FIXED_CTR0 0x00001309 538 + 539 + /* KeyID partitioning between MKTME and TDX */ 540 + #define MSR_IA32_MKTME_KEYID_PARTITIONING 0x00000087 543 541 544 542 /* 545 543 * AMD64 MSRs. Not complete. See the architecture manual for a more
+3
tools/arch/x86/include/uapi/asm/kvm.h
··· 562 562 /* x86-specific KVM_EXIT_HYPERCALL flags. */ 563 563 #define KVM_EXIT_HYPERCALL_LONG_MODE BIT(0) 564 564 565 + #define KVM_X86_DEFAULT_VM 0 566 + #define KVM_X86_SW_PROTECTED_VM 1 567 + 565 568 #endif /* _ASM_X86_KVM_H */
+2 -2
tools/arch/x86/lib/memcpy_64.S
··· 1 1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 2 /* Copyright 2002 Andi Kleen */ 3 3 4 + #include <linux/export.h> 4 5 #include <linux/linkage.h> 5 6 #include <asm/errno.h> 6 7 #include <asm/cpufeatures.h> 7 8 #include <asm/alternative.h> 8 - #include <asm/export.h> 9 9 10 10 .section .noinstr.text, "ax" 11 11 ··· 39 39 SYM_FUNC_END(__memcpy) 40 40 EXPORT_SYMBOL(__memcpy) 41 41 42 - SYM_FUNC_ALIAS(memcpy, __memcpy) 42 + SYM_FUNC_ALIAS_MEMFUNC(memcpy, __memcpy) 43 43 EXPORT_SYMBOL(memcpy) 44 44 45 45 SYM_FUNC_START_LOCAL(memcpy_orig)
+2 -2
tools/arch/x86/lib/memset_64.S
··· 1 1 /* SPDX-License-Identifier: GPL-2.0 */ 2 2 /* Copyright 2002 Andi Kleen, SuSE Labs */ 3 3 4 + #include <linux/export.h> 4 5 #include <linux/linkage.h> 5 6 #include <asm/cpufeatures.h> 6 7 #include <asm/alternative.h> 7 - #include <asm/export.h> 8 8 9 9 .section .noinstr.text, "ax" 10 10 ··· 40 40 SYM_FUNC_END(__memset) 41 41 EXPORT_SYMBOL(__memset) 42 42 43 - SYM_FUNC_ALIAS(memset, __memset) 43 + SYM_FUNC_ALIAS_MEMFUNC(memset, __memset) 44 44 EXPORT_SYMBOL(memset) 45 45 46 46 SYM_FUNC_START_LOCAL(memset_orig)
+12 -12
tools/include/asm-generic/unaligned.h
··· 105 105 106 106 static inline void __put_unaligned_be24(const u32 val, u8 *p) 107 107 { 108 - *p++ = val >> 16; 109 - *p++ = val >> 8; 110 - *p++ = val; 108 + *p++ = (val >> 16) & 0xff; 109 + *p++ = (val >> 8) & 0xff; 110 + *p++ = val & 0xff; 111 111 } 112 112 113 113 static inline void put_unaligned_be24(const u32 val, void *p) ··· 117 117 118 118 static inline void __put_unaligned_le24(const u32 val, u8 *p) 119 119 { 120 - *p++ = val; 121 - *p++ = val >> 8; 122 - *p++ = val >> 16; 120 + *p++ = val & 0xff; 121 + *p++ = (val >> 8) & 0xff; 122 + *p++ = (val >> 16) & 0xff; 123 123 } 124 124 125 125 static inline void put_unaligned_le24(const u32 val, void *p) ··· 129 129 130 130 static inline void __put_unaligned_be48(const u64 val, u8 *p) 131 131 { 132 - *p++ = val >> 40; 133 - *p++ = val >> 32; 134 - *p++ = val >> 24; 135 - *p++ = val >> 16; 136 - *p++ = val >> 8; 137 - *p++ = val; 132 + *p++ = (val >> 40) & 0xff; 133 + *p++ = (val >> 32) & 0xff; 134 + *p++ = (val >> 24) & 0xff; 135 + *p++ = (val >> 16) & 0xff; 136 + *p++ = (val >> 8) & 0xff; 137 + *p++ = val & 0xff; 138 138 } 139 139 140 140 static inline void put_unaligned_be48(const u64 val, void *p)
+14 -1
tools/include/uapi/asm-generic/unistd.h
··· 829 829 #define __NR_futex_requeue 456 830 830 __SYSCALL(__NR_futex_requeue, sys_futex_requeue) 831 831 832 + #define __NR_statmount 457 833 + __SYSCALL(__NR_statmount, sys_statmount) 834 + 835 + #define __NR_listmount 458 836 + __SYSCALL(__NR_listmount, sys_listmount) 837 + 838 + #define __NR_lsm_get_self_attr 459 839 + __SYSCALL(__NR_lsm_get_self_attr, sys_lsm_get_self_attr) 840 + #define __NR_lsm_set_self_attr 460 841 + __SYSCALL(__NR_lsm_set_self_attr, sys_lsm_set_self_attr) 842 + #define __NR_lsm_list_modules 461 843 + __SYSCALL(__NR_lsm_list_modules, sys_lsm_list_modules) 844 + 832 845 #undef __NR_syscalls 833 - #define __NR_syscalls 457 846 + #define __NR_syscalls 462 834 847 835 848 /* 836 849 * 32 bit systems traditionally used different
+71 -1
tools/include/uapi/drm/drm.h
··· 713 713 /** 714 714 * DRM_CAP_ASYNC_PAGE_FLIP 715 715 * 716 - * If set to 1, the driver supports &DRM_MODE_PAGE_FLIP_ASYNC. 716 + * If set to 1, the driver supports &DRM_MODE_PAGE_FLIP_ASYNC for legacy 717 + * page-flips. 717 718 */ 718 719 #define DRM_CAP_ASYNC_PAGE_FLIP 0x7 719 720 /** ··· 774 773 * :ref:`drm_sync_objects`. 775 774 */ 776 775 #define DRM_CAP_SYNCOBJ_TIMELINE 0x14 776 + /** 777 + * DRM_CAP_ATOMIC_ASYNC_PAGE_FLIP 778 + * 779 + * If set to 1, the driver supports &DRM_MODE_PAGE_FLIP_ASYNC for atomic 780 + * commits. 781 + */ 782 + #define DRM_CAP_ATOMIC_ASYNC_PAGE_FLIP 0x15 777 783 778 784 /* DRM_IOCTL_GET_CAP ioctl argument type */ 779 785 struct drm_get_cap { ··· 850 842 */ 851 843 #define DRM_CLIENT_CAP_WRITEBACK_CONNECTORS 5 852 844 845 + /** 846 + * DRM_CLIENT_CAP_CURSOR_PLANE_HOTSPOT 847 + * 848 + * Drivers for para-virtualized hardware (e.g. vmwgfx, qxl, virtio and 849 + * virtualbox) have additional restrictions for cursor planes (thus 850 + * making cursor planes on those drivers not truly universal,) e.g. 851 + * they need cursor planes to act like one would expect from a mouse 852 + * cursor and have correctly set hotspot properties. 853 + * If this client cap is not set the DRM core will hide cursor plane on 854 + * those virtualized drivers because not setting it implies that the 855 + * client is not capable of dealing with those extra restictions. 856 + * Clients which do set cursor hotspot and treat the cursor plane 857 + * like a mouse cursor should set this property. 858 + * The client must enable &DRM_CLIENT_CAP_ATOMIC first. 859 + * 860 + * Setting this property on drivers which do not special case 861 + * cursor planes (i.e. non-virtualized drivers) will return 862 + * EOPNOTSUPP, which can be used by userspace to gauge 863 + * requirements of the hardware/drivers they're running on. 864 + * 865 + * This capability is always supported for atomic-capable virtualized 866 + * drivers starting from kernel version 6.6. 867 + */ 868 + #define DRM_CLIENT_CAP_CURSOR_PLANE_HOTSPOT 6 869 + 853 870 /* DRM_IOCTL_SET_CLIENT_CAP ioctl argument type */ 854 871 struct drm_set_client_cap { 855 872 __u64 capability; ··· 926 893 #define DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL (1 << 0) 927 894 #define DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT (1 << 1) 928 895 #define DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE (1 << 2) /* wait for time point to become available */ 896 + #define DRM_SYNCOBJ_WAIT_FLAGS_WAIT_DEADLINE (1 << 3) /* set fence deadline to deadline_nsec */ 929 897 struct drm_syncobj_wait { 930 898 __u64 handles; 931 899 /* absolute timeout */ ··· 935 901 __u32 flags; 936 902 __u32 first_signaled; /* only valid when not waiting all */ 937 903 __u32 pad; 904 + /** 905 + * @deadline_nsec - fence deadline hint 906 + * 907 + * Deadline hint, in absolute CLOCK_MONOTONIC, to set on backing 908 + * fence(s) if the DRM_SYNCOBJ_WAIT_FLAGS_WAIT_DEADLINE flag is 909 + * set. 910 + */ 911 + __u64 deadline_nsec; 938 912 }; 939 913 940 914 struct drm_syncobj_timeline_wait { ··· 955 913 __u32 flags; 956 914 __u32 first_signaled; /* only valid when not waiting all */ 957 915 __u32 pad; 916 + /** 917 + * @deadline_nsec - fence deadline hint 918 + * 919 + * Deadline hint, in absolute CLOCK_MONOTONIC, to set on backing 920 + * fence(s) if the DRM_SYNCOBJ_WAIT_FLAGS_WAIT_DEADLINE flag is 921 + * set. 922 + */ 923 + __u64 deadline_nsec; 958 924 }; 959 925 960 926 /** ··· 1267 1217 #define DRM_IOCTL_MODE_GETFB2 DRM_IOWR(0xCE, struct drm_mode_fb_cmd2) 1268 1218 1269 1219 #define DRM_IOCTL_SYNCOBJ_EVENTFD DRM_IOWR(0xCF, struct drm_syncobj_eventfd) 1220 + 1221 + /** 1222 + * DRM_IOCTL_MODE_CLOSEFB - Close a framebuffer. 1223 + * 1224 + * This closes a framebuffer previously added via ADDFB/ADDFB2. The IOCTL 1225 + * argument is a framebuffer object ID. 1226 + * 1227 + * This IOCTL is similar to &DRM_IOCTL_MODE_RMFB, except it doesn't disable 1228 + * planes and CRTCs. As long as the framebuffer is used by a plane, it's kept 1229 + * alive. When the plane no longer uses the framebuffer (because the 1230 + * framebuffer is replaced with another one, or the plane is disabled), the 1231 + * framebuffer is cleaned up. 1232 + * 1233 + * This is useful to implement flicker-free transitions between two processes. 1234 + * 1235 + * Depending on the threat model, user-space may want to ensure that the 1236 + * framebuffer doesn't expose any sensitive user information: closed 1237 + * framebuffers attached to a plane can be read back by the next DRM master. 1238 + */ 1239 + #define DRM_IOCTL_MODE_CLOSEFB DRM_IOWR(0xD0, struct drm_mode_closefb) 1270 1240 1271 1241 /* 1272 1242 * Device specific ioctls should only be in their respective headers
+6 -6
tools/include/uapi/drm/i915_drm.h
··· 693 693 #define I915_PARAM_HAS_EXEC_FENCE 44 694 694 695 695 /* Query whether DRM_I915_GEM_EXECBUFFER2 supports the ability to capture 696 - * user specified bufffers for post-mortem debugging of GPU hangs. See 696 + * user-specified buffers for post-mortem debugging of GPU hangs. See 697 697 * EXEC_OBJECT_CAPTURE. 698 698 */ 699 699 #define I915_PARAM_HAS_EXEC_CAPTURE 45 ··· 1606 1606 * is accurate. 1607 1607 * 1608 1608 * The returned dword is split into two fields to indicate both 1609 - * the engine classess on which the object is being read, and the 1609 + * the engine classes on which the object is being read, and the 1610 1610 * engine class on which it is currently being written (if any). 1611 1611 * 1612 1612 * The low word (bits 0:15) indicate if the object is being written ··· 1815 1815 __u32 handle; 1816 1816 1817 1817 /* Advice: either the buffer will be needed again in the near future, 1818 - * or wont be and could be discarded under memory pressure. 1818 + * or won't be and could be discarded under memory pressure. 1819 1819 */ 1820 1820 __u32 madv; 1821 1821 ··· 3246 3246 * // enough to hold our array of engines. The kernel will fill out the 3247 3247 * // item.length for us, which is the number of bytes we need. 3248 3248 * // 3249 - * // Alternatively a large buffer can be allocated straight away enabling 3249 + * // Alternatively a large buffer can be allocated straightaway enabling 3250 3250 * // querying in one pass, in which case item.length should contain the 3251 3251 * // length of the provided buffer. 3252 3252 * err = ioctl(fd, DRM_IOCTL_I915_QUERY, &query); ··· 3256 3256 * // Now that we allocated the required number of bytes, we call the ioctl 3257 3257 * // again, this time with the data_ptr pointing to our newly allocated 3258 3258 * // blob, which the kernel can then populate with info on all engines. 3259 - * item.data_ptr = (uintptr_t)&info, 3259 + * item.data_ptr = (uintptr_t)&info; 3260 3260 * 3261 3261 * err = ioctl(fd, DRM_IOCTL_I915_QUERY, &query); 3262 3262 * if (err) ... ··· 3286 3286 /** 3287 3287 * struct drm_i915_engine_info 3288 3288 * 3289 - * Describes one engine and it's capabilities as known to the driver. 3289 + * Describes one engine and its capabilities as known to the driver. 3290 3290 */ 3291 3291 struct drm_i915_engine_info { 3292 3292 /** @engine: Engine class and instance. */
+3
tools/include/uapi/linux/fcntl.h
··· 116 116 #define AT_HANDLE_FID AT_REMOVEDIR /* file handle is needed to 117 117 compare object identity and may not 118 118 be usable to open_by_handle_at(2) */ 119 + #if defined(__KERNEL__) 120 + #define AT_GETATTR_NOSEC 0x80000000 121 + #endif 119 122 120 123 #endif /* _UAPI_LINUX_FCNTL_H */
+50 -90
tools/include/uapi/linux/kvm.h
··· 16 16 17 17 #define KVM_API_VERSION 12 18 18 19 - /* *** Deprecated interfaces *** */ 20 - 21 - #define KVM_TRC_SHIFT 16 22 - 23 - #define KVM_TRC_ENTRYEXIT (1 << KVM_TRC_SHIFT) 24 - #define KVM_TRC_HANDLER (1 << (KVM_TRC_SHIFT + 1)) 25 - 26 - #define KVM_TRC_VMENTRY (KVM_TRC_ENTRYEXIT + 0x01) 27 - #define KVM_TRC_VMEXIT (KVM_TRC_ENTRYEXIT + 0x02) 28 - #define KVM_TRC_PAGE_FAULT (KVM_TRC_HANDLER + 0x01) 29 - 30 - #define KVM_TRC_HEAD_SIZE 12 31 - #define KVM_TRC_CYCLE_SIZE 8 32 - #define KVM_TRC_EXTRA_MAX 7 33 - 34 - #define KVM_TRC_INJ_VIRQ (KVM_TRC_HANDLER + 0x02) 35 - #define KVM_TRC_REDELIVER_EVT (KVM_TRC_HANDLER + 0x03) 36 - #define KVM_TRC_PEND_INTR (KVM_TRC_HANDLER + 0x04) 37 - #define KVM_TRC_IO_READ (KVM_TRC_HANDLER + 0x05) 38 - #define KVM_TRC_IO_WRITE (KVM_TRC_HANDLER + 0x06) 39 - #define KVM_TRC_CR_READ (KVM_TRC_HANDLER + 0x07) 40 - #define KVM_TRC_CR_WRITE (KVM_TRC_HANDLER + 0x08) 41 - #define KVM_TRC_DR_READ (KVM_TRC_HANDLER + 0x09) 42 - #define KVM_TRC_DR_WRITE (KVM_TRC_HANDLER + 0x0A) 43 - #define KVM_TRC_MSR_READ (KVM_TRC_HANDLER + 0x0B) 44 - #define KVM_TRC_MSR_WRITE (KVM_TRC_HANDLER + 0x0C) 45 - #define KVM_TRC_CPUID (KVM_TRC_HANDLER + 0x0D) 46 - #define KVM_TRC_INTR (KVM_TRC_HANDLER + 0x0E) 47 - #define KVM_TRC_NMI (KVM_TRC_HANDLER + 0x0F) 48 - #define KVM_TRC_VMMCALL (KVM_TRC_HANDLER + 0x10) 49 - #define KVM_TRC_HLT (KVM_TRC_HANDLER + 0x11) 50 - #define KVM_TRC_CLTS (KVM_TRC_HANDLER + 0x12) 51 - #define KVM_TRC_LMSW (KVM_TRC_HANDLER + 0x13) 52 - #define KVM_TRC_APIC_ACCESS (KVM_TRC_HANDLER + 0x14) 53 - #define KVM_TRC_TDP_FAULT (KVM_TRC_HANDLER + 0x15) 54 - #define KVM_TRC_GTLB_WRITE (KVM_TRC_HANDLER + 0x16) 55 - #define KVM_TRC_STLB_WRITE (KVM_TRC_HANDLER + 0x17) 56 - #define KVM_TRC_STLB_INVAL (KVM_TRC_HANDLER + 0x18) 57 - #define KVM_TRC_PPC_INSTR (KVM_TRC_HANDLER + 0x19) 58 - 59 - struct kvm_user_trace_setup { 60 - __u32 buf_size; 61 - __u32 buf_nr; 62 - }; 63 - 64 - #define __KVM_DEPRECATED_MAIN_W_0x06 \ 65 - _IOW(KVMIO, 0x06, struct kvm_user_trace_setup) 66 - #define __KVM_DEPRECATED_MAIN_0x07 _IO(KVMIO, 0x07) 67 - #define __KVM_DEPRECATED_MAIN_0x08 _IO(KVMIO, 0x08) 68 - 69 - #define __KVM_DEPRECATED_VM_R_0x70 _IOR(KVMIO, 0x70, struct kvm_assigned_irq) 70 - 71 - struct kvm_breakpoint { 72 - __u32 enabled; 73 - __u32 padding; 74 - __u64 address; 75 - }; 76 - 77 - struct kvm_debug_guest { 78 - __u32 enabled; 79 - __u32 pad; 80 - struct kvm_breakpoint breakpoints[4]; 81 - __u32 singlestep; 82 - }; 83 - 84 - #define __KVM_DEPRECATED_VCPU_W_0x87 _IOW(KVMIO, 0x87, struct kvm_debug_guest) 85 - 86 - /* *** End of deprecated interfaces *** */ 87 - 88 - 89 19 /* for KVM_SET_USER_MEMORY_REGION */ 90 20 struct kvm_userspace_memory_region { 91 21 __u32 slot; ··· 25 95 __u64 userspace_addr; /* start of the userspace allocated memory */ 26 96 }; 27 97 98 + /* for KVM_SET_USER_MEMORY_REGION2 */ 99 + struct kvm_userspace_memory_region2 { 100 + __u32 slot; 101 + __u32 flags; 102 + __u64 guest_phys_addr; 103 + __u64 memory_size; 104 + __u64 userspace_addr; 105 + __u64 guest_memfd_offset; 106 + __u32 guest_memfd; 107 + __u32 pad1; 108 + __u64 pad2[14]; 109 + }; 110 + 28 111 /* 29 112 * The bit 0 ~ bit 15 of kvm_userspace_memory_region::flags are visible for 30 113 * userspace, other bits are reserved for kvm internal use which are defined ··· 45 102 */ 46 103 #define KVM_MEM_LOG_DIRTY_PAGES (1UL << 0) 47 104 #define KVM_MEM_READONLY (1UL << 1) 105 + #define KVM_MEM_GUEST_MEMFD (1UL << 2) 48 106 49 107 /* for KVM_IRQ_LINE */ 50 108 struct kvm_irq_level { ··· 209 265 #define KVM_EXIT_RISCV_CSR 36 210 266 #define KVM_EXIT_NOTIFY 37 211 267 #define KVM_EXIT_LOONGARCH_IOCSR 38 268 + #define KVM_EXIT_MEMORY_FAULT 39 212 269 213 270 /* For KVM_EXIT_INTERNAL_ERROR */ 214 271 /* Emulate instruction failed. */ ··· 463 518 #define KVM_NOTIFY_CONTEXT_INVALID (1 << 0) 464 519 __u32 flags; 465 520 } notify; 521 + /* KVM_EXIT_MEMORY_FAULT */ 522 + struct { 523 + #define KVM_MEMORY_EXIT_FLAG_PRIVATE (1ULL << 3) 524 + __u64 flags; 525 + __u64 gpa; 526 + __u64 size; 527 + } memory_fault; 466 528 /* Fix the size of the union. */ 467 529 char padding[256]; 468 530 }; ··· 897 945 */ 898 946 #define KVM_GET_VCPU_MMAP_SIZE _IO(KVMIO, 0x04) /* in bytes */ 899 947 #define KVM_GET_SUPPORTED_CPUID _IOWR(KVMIO, 0x05, struct kvm_cpuid2) 900 - #define KVM_TRACE_ENABLE __KVM_DEPRECATED_MAIN_W_0x06 901 - #define KVM_TRACE_PAUSE __KVM_DEPRECATED_MAIN_0x07 902 - #define KVM_TRACE_DISABLE __KVM_DEPRECATED_MAIN_0x08 903 948 #define KVM_GET_EMULATED_CPUID _IOWR(KVMIO, 0x09, struct kvm_cpuid2) 904 949 #define KVM_GET_MSR_FEATURE_INDEX_LIST _IOWR(KVMIO, 0x0a, struct kvm_msr_list) 905 950 ··· 1150 1201 #define KVM_CAP_ARM_EAGER_SPLIT_CHUNK_SIZE 228 1151 1202 #define KVM_CAP_ARM_SUPPORTED_BLOCK_SIZES 229 1152 1203 #define KVM_CAP_ARM_SUPPORTED_REG_MASK_RANGES 230 1204 + #define KVM_CAP_USER_MEMORY2 231 1205 + #define KVM_CAP_MEMORY_FAULT_INFO 232 1206 + #define KVM_CAP_MEMORY_ATTRIBUTES 233 1207 + #define KVM_CAP_GUEST_MEMFD 234 1208 + #define KVM_CAP_VM_TYPES 235 1153 1209 1154 1210 #ifdef KVM_CAP_IRQ_ROUTING 1155 1211 ··· 1245 1291 #define KVM_XEN_HVM_CONFIG_EVTCHN_2LEVEL (1 << 4) 1246 1292 #define KVM_XEN_HVM_CONFIG_EVTCHN_SEND (1 << 5) 1247 1293 #define KVM_XEN_HVM_CONFIG_RUNSTATE_UPDATE_FLAG (1 << 6) 1294 + #define KVM_XEN_HVM_CONFIG_PVCLOCK_TSC_UNSTABLE (1 << 7) 1248 1295 1249 1296 struct kvm_xen_hvm_config { 1250 1297 __u32 flags; ··· 1438 1483 struct kvm_userspace_memory_region) 1439 1484 #define KVM_SET_TSS_ADDR _IO(KVMIO, 0x47) 1440 1485 #define KVM_SET_IDENTITY_MAP_ADDR _IOW(KVMIO, 0x48, __u64) 1486 + #define KVM_SET_USER_MEMORY_REGION2 _IOW(KVMIO, 0x49, \ 1487 + struct kvm_userspace_memory_region2) 1441 1488 1442 1489 /* enable ucontrol for s390 */ 1443 1490 struct kvm_s390_ucas_mapping { ··· 1464 1507 _IOW(KVMIO, 0x67, struct kvm_coalesced_mmio_zone) 1465 1508 #define KVM_UNREGISTER_COALESCED_MMIO \ 1466 1509 _IOW(KVMIO, 0x68, struct kvm_coalesced_mmio_zone) 1467 - #define KVM_ASSIGN_PCI_DEVICE _IOR(KVMIO, 0x69, \ 1468 - struct kvm_assigned_pci_dev) 1469 1510 #define KVM_SET_GSI_ROUTING _IOW(KVMIO, 0x6a, struct kvm_irq_routing) 1470 - /* deprecated, replaced by KVM_ASSIGN_DEV_IRQ */ 1471 - #define KVM_ASSIGN_IRQ __KVM_DEPRECATED_VM_R_0x70 1472 - #define KVM_ASSIGN_DEV_IRQ _IOW(KVMIO, 0x70, struct kvm_assigned_irq) 1473 1511 #define KVM_REINJECT_CONTROL _IO(KVMIO, 0x71) 1474 - #define KVM_DEASSIGN_PCI_DEVICE _IOW(KVMIO, 0x72, \ 1475 - struct kvm_assigned_pci_dev) 1476 - #define KVM_ASSIGN_SET_MSIX_NR _IOW(KVMIO, 0x73, \ 1477 - struct kvm_assigned_msix_nr) 1478 - #define KVM_ASSIGN_SET_MSIX_ENTRY _IOW(KVMIO, 0x74, \ 1479 - struct kvm_assigned_msix_entry) 1480 - #define KVM_DEASSIGN_DEV_IRQ _IOW(KVMIO, 0x75, struct kvm_assigned_irq) 1481 1512 #define KVM_IRQFD _IOW(KVMIO, 0x76, struct kvm_irqfd) 1482 1513 #define KVM_CREATE_PIT2 _IOW(KVMIO, 0x77, struct kvm_pit_config) 1483 1514 #define KVM_SET_BOOT_CPU_ID _IO(KVMIO, 0x78) ··· 1482 1537 * KVM_CAP_VM_TSC_CONTROL to set defaults for a VM */ 1483 1538 #define KVM_SET_TSC_KHZ _IO(KVMIO, 0xa2) 1484 1539 #define KVM_GET_TSC_KHZ _IO(KVMIO, 0xa3) 1485 - /* Available with KVM_CAP_PCI_2_3 */ 1486 - #define KVM_ASSIGN_SET_INTX_MASK _IOW(KVMIO, 0xa4, \ 1487 - struct kvm_assigned_pci_dev) 1488 1540 /* Available with KVM_CAP_SIGNAL_MSI */ 1489 1541 #define KVM_SIGNAL_MSI _IOW(KVMIO, 0xa5, struct kvm_msi) 1490 1542 /* Available with KVM_CAP_PPC_GET_SMMU_INFO */ ··· 1534 1592 #define KVM_SET_SREGS _IOW(KVMIO, 0x84, struct kvm_sregs) 1535 1593 #define KVM_TRANSLATE _IOWR(KVMIO, 0x85, struct kvm_translation) 1536 1594 #define KVM_INTERRUPT _IOW(KVMIO, 0x86, struct kvm_interrupt) 1537 - /* KVM_DEBUG_GUEST is no longer supported, use KVM_SET_GUEST_DEBUG instead */ 1538 - #define KVM_DEBUG_GUEST __KVM_DEPRECATED_VCPU_W_0x87 1539 1595 #define KVM_GET_MSRS _IOWR(KVMIO, 0x88, struct kvm_msrs) 1540 1596 #define KVM_SET_MSRS _IOW(KVMIO, 0x89, struct kvm_msrs) 1541 1597 #define KVM_SET_CPUID _IOW(KVMIO, 0x8a, struct kvm_cpuid) ··· 2206 2266 2207 2267 /* flags for kvm_s390_zpci_op->u.reg_aen.flags */ 2208 2268 #define KVM_S390_ZPCIOP_REGAEN_HOST (1 << 0) 2269 + 2270 + /* Available with KVM_CAP_MEMORY_ATTRIBUTES */ 2271 + #define KVM_SET_MEMORY_ATTRIBUTES _IOW(KVMIO, 0xd2, struct kvm_memory_attributes) 2272 + 2273 + struct kvm_memory_attributes { 2274 + __u64 address; 2275 + __u64 size; 2276 + __u64 attributes; 2277 + __u64 flags; 2278 + }; 2279 + 2280 + #define KVM_MEMORY_ATTRIBUTE_PRIVATE (1ULL << 3) 2281 + 2282 + #define KVM_CREATE_GUEST_MEMFD _IOWR(KVMIO, 0xd4, struct kvm_create_guest_memfd) 2283 + 2284 + struct kvm_create_guest_memfd { 2285 + __u64 size; 2286 + __u64 flags; 2287 + __u64 reserved[6]; 2288 + }; 2209 2289 2210 2290 #endif /* __LINUX_KVM_H */
+70
tools/include/uapi/linux/mount.h
··· 138 138 /* List of all mount_attr versions. */ 139 139 #define MOUNT_ATTR_SIZE_VER0 32 /* sizeof first published struct */ 140 140 141 + 142 + /* 143 + * Structure for getting mount/superblock/filesystem info with statmount(2). 144 + * 145 + * The interface is similar to statx(2): individual fields or groups can be 146 + * selected with the @mask argument of statmount(). Kernel will set the @mask 147 + * field according to the supported fields. 148 + * 149 + * If string fields are selected, then the caller needs to pass a buffer that 150 + * has space after the fixed part of the structure. Nul terminated strings are 151 + * copied there and offsets relative to @str are stored in the relevant fields. 152 + * If the buffer is too small, then EOVERFLOW is returned. The actually used 153 + * size is returned in @size. 154 + */ 155 + struct statmount { 156 + __u32 size; /* Total size, including strings */ 157 + __u32 __spare1; 158 + __u64 mask; /* What results were written */ 159 + __u32 sb_dev_major; /* Device ID */ 160 + __u32 sb_dev_minor; 161 + __u64 sb_magic; /* ..._SUPER_MAGIC */ 162 + __u32 sb_flags; /* SB_{RDONLY,SYNCHRONOUS,DIRSYNC,LAZYTIME} */ 163 + __u32 fs_type; /* [str] Filesystem type */ 164 + __u64 mnt_id; /* Unique ID of mount */ 165 + __u64 mnt_parent_id; /* Unique ID of parent (for root == mnt_id) */ 166 + __u32 mnt_id_old; /* Reused IDs used in proc/.../mountinfo */ 167 + __u32 mnt_parent_id_old; 168 + __u64 mnt_attr; /* MOUNT_ATTR_... */ 169 + __u64 mnt_propagation; /* MS_{SHARED,SLAVE,PRIVATE,UNBINDABLE} */ 170 + __u64 mnt_peer_group; /* ID of shared peer group */ 171 + __u64 mnt_master; /* Mount receives propagation from this ID */ 172 + __u64 propagate_from; /* Propagation from in current namespace */ 173 + __u32 mnt_root; /* [str] Root of mount relative to root of fs */ 174 + __u32 mnt_point; /* [str] Mountpoint relative to current root */ 175 + __u64 __spare2[50]; 176 + char str[]; /* Variable size part containing strings */ 177 + }; 178 + 179 + /* 180 + * Structure for passing mount ID and miscellaneous parameters to statmount(2) 181 + * and listmount(2). 182 + * 183 + * For statmount(2) @param represents the request mask. 184 + * For listmount(2) @param represents the last listed mount id (or zero). 185 + */ 186 + struct mnt_id_req { 187 + __u32 size; 188 + __u32 spare; 189 + __u64 mnt_id; 190 + __u64 param; 191 + }; 192 + 193 + /* List of all mnt_id_req versions. */ 194 + #define MNT_ID_REQ_SIZE_VER0 24 /* sizeof first published struct */ 195 + 196 + /* 197 + * @mask bits for statmount(2) 198 + */ 199 + #define STATMOUNT_SB_BASIC 0x00000001U /* Want/got sb_... */ 200 + #define STATMOUNT_MNT_BASIC 0x00000002U /* Want/got mnt_... */ 201 + #define STATMOUNT_PROPAGATE_FROM 0x00000004U /* Want/got propagate_from */ 202 + #define STATMOUNT_MNT_ROOT 0x00000008U /* Want/got mnt_root */ 203 + #define STATMOUNT_MNT_POINT 0x00000010U /* Want/got mnt_point */ 204 + #define STATMOUNT_FS_TYPE 0x00000020U /* Want/got fs_type */ 205 + 206 + /* 207 + * Special @mnt_id values that can be passed to listmount 208 + */ 209 + #define LSMT_ROOT 0xffffffffffffffff /* root mount */ 210 + 141 211 #endif /* _UAPI_LINUX_MOUNT_H */
+1
tools/include/uapi/linux/stat.h
··· 154 154 #define STATX_BTIME 0x00000800U /* Want/got stx_btime */ 155 155 #define STATX_MNT_ID 0x00001000U /* Got stx_mnt_id */ 156 156 #define STATX_DIOALIGN 0x00002000U /* Want/got direct I/O alignment info */ 157 + #define STATX_MNT_ID_UNIQUE 0x00004000U /* Want/got extended stx_mount_id */ 157 158 158 159 #define STATX__RESERVED 0x80000000U /* Reserved for future struct statx expansion */ 159 160
+4
tools/perf/Documentation/perf-list.txt
··· 47 47 --json:: 48 48 Output in JSON format. 49 49 50 + -o:: 51 + --output=:: 52 + Output file name. By default output is written to stdout. 53 + 50 54 [[EVENT_MODIFIERS]] 51 55 EVENT MODIFIERS 52 56 ---------------
+10
tools/perf/Makefile.perf
··· 236 236 SHELLCHECK := $(shell which shellcheck 2> /dev/null) 237 237 endif 238 238 239 + # shellcheck is using in tools/perf/tests/Build with option -a/--check-sourced ( 240 + # introduced in v0.4.7) and -S/--severity (introduced in v0.6.0). So make the 241 + # minimal shellcheck version as v0.6.0. 242 + ifneq ($(SHELLCHECK),) 243 + ifeq ($(shell expr $(shell $(SHELLCHECK) --version | grep version: | \ 244 + sed -e 's/.\+ \([0-9]\+\).\([0-9]\+\).\([0-9]\+\)/\1\2\3/g') \< 060), 1) 245 + SHELLCHECK := 246 + endif 247 + endif 248 + 239 249 export srctree OUTPUT RM CC CXX LD AR CFLAGS CXXFLAGS V BISON FLEX AWK 240 250 export HOSTCC HOSTLD HOSTAR HOSTCFLAGS SHELLCHECK 241 251
+129 -82
tools/perf/builtin-list.c
··· 30 30 * functions. 31 31 */ 32 32 struct print_state { 33 + /** @fp: File to write output to. */ 34 + FILE *fp; 33 35 /** 34 36 * @pmu_glob: Optionally restrict PMU and metric matching to PMU or 35 37 * debugfs subsystem name. ··· 68 66 { 69 67 struct print_state *print_state = ps; 70 68 71 - if (!print_state->name_only && pager_in_use()) 72 - printf("\nList of pre-defined events (to be used in -e or -M):\n\n"); 69 + if (!print_state->name_only && pager_in_use()) { 70 + fprintf(print_state->fp, 71 + "\nList of pre-defined events (to be used in -e or -M):\n\n"); 72 + } 73 73 } 74 74 75 75 static void default_print_end(void *print_state __maybe_unused) {} 76 76 77 - static void wordwrap(const char *s, int start, int max, int corr) 77 + static void wordwrap(FILE *fp, const char *s, int start, int max, int corr) 78 78 { 79 79 int column = start; 80 80 int n; ··· 86 82 int wlen = strcspn(s, " \t\n"); 87 83 88 84 if ((column + wlen >= max && column > start) || saw_newline) { 89 - printf("\n%*s", start, ""); 85 + fprintf(fp, "\n%*s", start, ""); 90 86 column = start + corr; 91 87 } 92 - n = printf("%s%.*s", column > start ? " " : "", wlen, s); 88 + n = fprintf(fp, "%s%.*s", column > start ? " " : "", wlen, s); 93 89 if (n <= 0) 94 90 break; 95 91 saw_newline = s[wlen] == '\n'; ··· 108 104 { 109 105 struct print_state *print_state = ps; 110 106 int pos; 107 + FILE *fp = print_state->fp; 111 108 112 109 if (deprecated && !print_state->deprecated) 113 110 return; ··· 124 119 125 120 if (print_state->name_only) { 126 121 if (event_alias && strlen(event_alias)) 127 - printf("%s ", event_alias); 122 + fprintf(fp, "%s ", event_alias); 128 123 else 129 - printf("%s ", event_name); 124 + fprintf(fp, "%s ", event_name); 130 125 return; 131 126 } 132 127 133 128 if (strcmp(print_state->last_topic, topic ?: "")) { 134 129 if (topic) 135 - printf("\n%s:\n", topic); 130 + fprintf(fp, "\n%s:\n", topic); 136 131 zfree(&print_state->last_topic); 137 132 print_state->last_topic = strdup(topic ?: ""); 138 133 } 139 134 140 135 if (event_alias && strlen(event_alias)) 141 - pos = printf(" %s OR %s", event_name, event_alias); 136 + pos = fprintf(fp, " %s OR %s", event_name, event_alias); 142 137 else 143 - pos = printf(" %s", event_name); 138 + pos = fprintf(fp, " %s", event_name); 144 139 145 140 if (!topic && event_type_desc) { 146 141 for (; pos < 53; pos++) 147 - putchar(' '); 148 - printf("[%s]\n", event_type_desc); 142 + fputc(' ', fp); 143 + fprintf(fp, "[%s]\n", event_type_desc); 149 144 } else 150 - putchar('\n'); 145 + fputc('\n', fp); 151 146 152 147 if (desc && print_state->desc) { 153 148 char *desc_with_unit = NULL; ··· 160 155 ? "%s. Unit: %s" : "%s Unit: %s", 161 156 desc, pmu_name); 162 157 } 163 - printf("%*s", 8, "["); 164 - wordwrap(desc_len > 0 ? desc_with_unit : desc, 8, pager_get_columns(), 0); 165 - printf("]\n"); 158 + fprintf(fp, "%*s", 8, "["); 159 + wordwrap(fp, desc_len > 0 ? desc_with_unit : desc, 8, pager_get_columns(), 0); 160 + fprintf(fp, "]\n"); 166 161 free(desc_with_unit); 167 162 } 168 163 long_desc = long_desc ?: desc; 169 164 if (long_desc && print_state->long_desc) { 170 - printf("%*s", 8, "["); 171 - wordwrap(long_desc, 8, pager_get_columns(), 0); 172 - printf("]\n"); 165 + fprintf(fp, "%*s", 8, "["); 166 + wordwrap(fp, long_desc, 8, pager_get_columns(), 0); 167 + fprintf(fp, "]\n"); 173 168 } 174 169 175 170 if (print_state->detailed && encoding_desc) { 176 - printf("%*s", 8, ""); 177 - wordwrap(encoding_desc, 8, pager_get_columns(), 0); 178 - putchar('\n'); 171 + fprintf(fp, "%*s", 8, ""); 172 + wordwrap(fp, encoding_desc, 8, pager_get_columns(), 0); 173 + fputc('\n', fp); 179 174 } 180 175 } 181 176 ··· 189 184 const char *unit __maybe_unused) 190 185 { 191 186 struct print_state *print_state = ps; 187 + FILE *fp = print_state->fp; 192 188 193 189 if (print_state->event_glob && 194 190 (!print_state->metrics || !name || !strglobmatch(name, print_state->event_glob)) && ··· 198 192 199 193 if (!print_state->name_only && !print_state->last_metricgroups) { 200 194 if (print_state->metricgroups) { 201 - printf("\nMetric Groups:\n"); 195 + fprintf(fp, "\nMetric Groups:\n"); 202 196 if (!print_state->metrics) 203 - putchar('\n'); 197 + fputc('\n', fp); 204 198 } else { 205 - printf("\nMetrics:\n\n"); 199 + fprintf(fp, "\nMetrics:\n\n"); 206 200 } 207 201 } 208 202 if (!print_state->last_metricgroups || 209 203 strcmp(print_state->last_metricgroups, group ?: "")) { 210 204 if (group && print_state->metricgroups) { 211 205 if (print_state->name_only) 212 - printf("%s ", group); 206 + fprintf(fp, "%s ", group); 213 207 else if (print_state->metrics) { 214 208 const char *gdesc = describe_metricgroup(group); 215 209 216 210 if (gdesc) 217 - printf("\n%s: [%s]\n", group, gdesc); 211 + fprintf(fp, "\n%s: [%s]\n", group, gdesc); 218 212 else 219 - printf("\n%s:\n", group); 213 + fprintf(fp, "\n%s:\n", group); 220 214 } else 221 - printf("%s\n", group); 215 + fprintf(fp, "%s\n", group); 222 216 } 223 217 zfree(&print_state->last_metricgroups); 224 218 print_state->last_metricgroups = strdup(group ?: ""); ··· 229 223 if (print_state->name_only) { 230 224 if (print_state->metrics && 231 225 !strlist__has_entry(print_state->visited_metrics, name)) { 232 - printf("%s ", name); 226 + fprintf(fp, "%s ", name); 233 227 strlist__add(print_state->visited_metrics, name); 234 228 } 235 229 return; 236 230 } 237 - printf(" %s\n", name); 231 + fprintf(fp, " %s\n", name); 238 232 239 233 if (desc && print_state->desc) { 240 - printf("%*s", 8, "["); 241 - wordwrap(desc, 8, pager_get_columns(), 0); 242 - printf("]\n"); 234 + fprintf(fp, "%*s", 8, "["); 235 + wordwrap(fp, desc, 8, pager_get_columns(), 0); 236 + fprintf(fp, "]\n"); 243 237 } 244 238 if (long_desc && print_state->long_desc) { 245 - printf("%*s", 8, "["); 246 - wordwrap(long_desc, 8, pager_get_columns(), 0); 247 - printf("]\n"); 239 + fprintf(fp, "%*s", 8, "["); 240 + wordwrap(fp, long_desc, 8, pager_get_columns(), 0); 241 + fprintf(fp, "]\n"); 248 242 } 249 243 if (expr && print_state->detailed) { 250 - printf("%*s", 8, "["); 251 - wordwrap(expr, 8, pager_get_columns(), 0); 252 - printf("]\n"); 244 + fprintf(fp, "%*s", 8, "["); 245 + wordwrap(fp, expr, 8, pager_get_columns(), 0); 246 + fprintf(fp, "]\n"); 253 247 } 254 248 if (threshold && print_state->detailed) { 255 - printf("%*s", 8, "["); 256 - wordwrap(threshold, 8, pager_get_columns(), 0); 257 - printf("]\n"); 249 + fprintf(fp, "%*s", 8, "["); 250 + wordwrap(fp, threshold, 8, pager_get_columns(), 0); 251 + fprintf(fp, "]\n"); 258 252 } 259 253 } 260 254 261 255 struct json_print_state { 256 + /** @fp: File to write output to. */ 257 + FILE *fp; 262 258 /** Should a separator be printed prior to the next item? */ 263 259 bool need_sep; 264 260 }; 265 261 266 - static void json_print_start(void *print_state __maybe_unused) 262 + static void json_print_start(void *ps) 267 263 { 268 - printf("[\n"); 264 + struct json_print_state *print_state = ps; 265 + FILE *fp = print_state->fp; 266 + 267 + fprintf(fp, "[\n"); 269 268 } 270 269 271 270 static void json_print_end(void *ps) 272 271 { 273 272 struct json_print_state *print_state = ps; 273 + FILE *fp = print_state->fp; 274 274 275 - printf("%s]\n", print_state->need_sep ? "\n" : ""); 275 + fprintf(fp, "%s]\n", print_state->need_sep ? "\n" : ""); 276 276 } 277 277 278 - static void fix_escape_printf(struct strbuf *buf, const char *fmt, ...) 278 + static void fix_escape_fprintf(FILE *fp, struct strbuf *buf, const char *fmt, ...) 279 279 { 280 280 va_list args; 281 281 ··· 330 318 } 331 319 } 332 320 va_end(args); 333 - fputs(buf->buf, stdout); 321 + fputs(buf->buf, fp); 334 322 } 335 323 336 324 static void json_print_event(void *ps, const char *pmu_name, const char *topic, ··· 342 330 { 343 331 struct json_print_state *print_state = ps; 344 332 bool need_sep = false; 333 + FILE *fp = print_state->fp; 345 334 struct strbuf buf; 346 335 347 336 strbuf_init(&buf, 0); 348 - printf("%s{\n", print_state->need_sep ? ",\n" : ""); 337 + fprintf(fp, "%s{\n", print_state->need_sep ? ",\n" : ""); 349 338 print_state->need_sep = true; 350 339 if (pmu_name) { 351 - fix_escape_printf(&buf, "\t\"Unit\": \"%S\"", pmu_name); 340 + fix_escape_fprintf(fp, &buf, "\t\"Unit\": \"%S\"", pmu_name); 352 341 need_sep = true; 353 342 } 354 343 if (topic) { 355 - fix_escape_printf(&buf, "%s\t\"Topic\": \"%S\"", need_sep ? ",\n" : "", topic); 344 + fix_escape_fprintf(fp, &buf, "%s\t\"Topic\": \"%S\"", 345 + need_sep ? ",\n" : "", 346 + topic); 356 347 need_sep = true; 357 348 } 358 349 if (event_name) { 359 - fix_escape_printf(&buf, "%s\t\"EventName\": \"%S\"", need_sep ? ",\n" : "", 360 - event_name); 350 + fix_escape_fprintf(fp, &buf, "%s\t\"EventName\": \"%S\"", 351 + need_sep ? ",\n" : "", 352 + event_name); 361 353 need_sep = true; 362 354 } 363 355 if (event_alias && strlen(event_alias)) { 364 - fix_escape_printf(&buf, "%s\t\"EventAlias\": \"%S\"", need_sep ? ",\n" : "", 365 - event_alias); 356 + fix_escape_fprintf(fp, &buf, "%s\t\"EventAlias\": \"%S\"", 357 + need_sep ? ",\n" : "", 358 + event_alias); 366 359 need_sep = true; 367 360 } 368 361 if (scale_unit && strlen(scale_unit)) { 369 - fix_escape_printf(&buf, "%s\t\"ScaleUnit\": \"%S\"", need_sep ? ",\n" : "", 370 - scale_unit); 362 + fix_escape_fprintf(fp, &buf, "%s\t\"ScaleUnit\": \"%S\"", 363 + need_sep ? ",\n" : "", 364 + scale_unit); 371 365 need_sep = true; 372 366 } 373 367 if (event_type_desc) { 374 - fix_escape_printf(&buf, "%s\t\"EventType\": \"%S\"", need_sep ? ",\n" : "", 375 - event_type_desc); 368 + fix_escape_fprintf(fp, &buf, "%s\t\"EventType\": \"%S\"", 369 + need_sep ? ",\n" : "", 370 + event_type_desc); 376 371 need_sep = true; 377 372 } 378 373 if (deprecated) { 379 - fix_escape_printf(&buf, "%s\t\"Deprecated\": \"%S\"", need_sep ? ",\n" : "", 380 - deprecated ? "1" : "0"); 374 + fix_escape_fprintf(fp, &buf, "%s\t\"Deprecated\": \"%S\"", 375 + need_sep ? ",\n" : "", 376 + deprecated ? "1" : "0"); 381 377 need_sep = true; 382 378 } 383 379 if (desc) { 384 - fix_escape_printf(&buf, "%s\t\"BriefDescription\": \"%S\"", need_sep ? ",\n" : "", 385 - desc); 380 + fix_escape_fprintf(fp, &buf, "%s\t\"BriefDescription\": \"%S\"", 381 + need_sep ? ",\n" : "", 382 + desc); 386 383 need_sep = true; 387 384 } 388 385 if (long_desc) { 389 - fix_escape_printf(&buf, "%s\t\"PublicDescription\": \"%S\"", need_sep ? ",\n" : "", 390 - long_desc); 386 + fix_escape_fprintf(fp, &buf, "%s\t\"PublicDescription\": \"%S\"", 387 + need_sep ? ",\n" : "", 388 + long_desc); 391 389 need_sep = true; 392 390 } 393 391 if (encoding_desc) { 394 - fix_escape_printf(&buf, "%s\t\"Encoding\": \"%S\"", need_sep ? ",\n" : "", 395 - encoding_desc); 392 + fix_escape_fprintf(fp, &buf, "%s\t\"Encoding\": \"%S\"", 393 + need_sep ? ",\n" : "", 394 + encoding_desc); 396 395 need_sep = true; 397 396 } 398 - printf("%s}", need_sep ? "\n" : ""); 397 + fprintf(fp, "%s}", need_sep ? "\n" : ""); 399 398 strbuf_release(&buf); 400 399 } 401 400 ··· 417 394 { 418 395 struct json_print_state *print_state = ps; 419 396 bool need_sep = false; 397 + FILE *fp = print_state->fp; 420 398 struct strbuf buf; 421 399 422 400 strbuf_init(&buf, 0); 423 - printf("%s{\n", print_state->need_sep ? ",\n" : ""); 401 + fprintf(fp, "%s{\n", print_state->need_sep ? ",\n" : ""); 424 402 print_state->need_sep = true; 425 403 if (group) { 426 - fix_escape_printf(&buf, "\t\"MetricGroup\": \"%S\"", group); 404 + fix_escape_fprintf(fp, &buf, "\t\"MetricGroup\": \"%S\"", group); 427 405 need_sep = true; 428 406 } 429 407 if (name) { 430 - fix_escape_printf(&buf, "%s\t\"MetricName\": \"%S\"", need_sep ? ",\n" : "", name); 408 + fix_escape_fprintf(fp, &buf, "%s\t\"MetricName\": \"%S\"", 409 + need_sep ? ",\n" : "", 410 + name); 431 411 need_sep = true; 432 412 } 433 413 if (expr) { 434 - fix_escape_printf(&buf, "%s\t\"MetricExpr\": \"%S\"", need_sep ? ",\n" : "", expr); 414 + fix_escape_fprintf(fp, &buf, "%s\t\"MetricExpr\": \"%S\"", 415 + need_sep ? ",\n" : "", 416 + expr); 435 417 need_sep = true; 436 418 } 437 419 if (threshold) { 438 - fix_escape_printf(&buf, "%s\t\"MetricThreshold\": \"%S\"", need_sep ? ",\n" : "", 439 - threshold); 420 + fix_escape_fprintf(fp, &buf, "%s\t\"MetricThreshold\": \"%S\"", 421 + need_sep ? ",\n" : "", 422 + threshold); 440 423 need_sep = true; 441 424 } 442 425 if (unit) { 443 - fix_escape_printf(&buf, "%s\t\"ScaleUnit\": \"%S\"", need_sep ? ",\n" : "", unit); 426 + fix_escape_fprintf(fp, &buf, "%s\t\"ScaleUnit\": \"%S\"", 427 + need_sep ? ",\n" : "", 428 + unit); 444 429 need_sep = true; 445 430 } 446 431 if (desc) { 447 - fix_escape_printf(&buf, "%s\t\"BriefDescription\": \"%S\"", need_sep ? ",\n" : "", 448 - desc); 432 + fix_escape_fprintf(fp, &buf, "%s\t\"BriefDescription\": \"%S\"", 433 + need_sep ? ",\n" : "", 434 + desc); 449 435 need_sep = true; 450 436 } 451 437 if (long_desc) { 452 - fix_escape_printf(&buf, "%s\t\"PublicDescription\": \"%S\"", need_sep ? ",\n" : "", 453 - long_desc); 438 + fix_escape_fprintf(fp, &buf, "%s\t\"PublicDescription\": \"%S\"", 439 + need_sep ? ",\n" : "", 440 + long_desc); 454 441 need_sep = true; 455 442 } 456 - printf("%s}", need_sep ? "\n" : ""); 443 + fprintf(fp, "%s}", need_sep ? "\n" : ""); 457 444 strbuf_release(&buf); 458 445 } 459 446 ··· 482 449 int cmd_list(int argc, const char **argv) 483 450 { 484 451 int i, ret = 0; 485 - struct print_state default_ps = {}; 486 - struct print_state json_ps = {}; 452 + struct print_state default_ps = { 453 + .fp = stdout, 454 + }; 455 + struct print_state json_ps = { 456 + .fp = stdout, 457 + }; 487 458 void *ps = &default_ps; 488 459 struct print_callbacks print_cb = { 489 460 .print_start = default_print_start, ··· 498 461 }; 499 462 const char *cputype = NULL; 500 463 const char *unit_name = NULL; 464 + const char *output_path = NULL; 501 465 bool json = false; 502 466 struct option list_options[] = { 503 467 OPT_BOOLEAN(0, "raw-dump", &default_ps.name_only, "Dump raw events"), ··· 509 471 "Print longer event descriptions."), 510 472 OPT_BOOLEAN(0, "details", &default_ps.detailed, 511 473 "Print information on the perf event names and expressions used internally by events."), 474 + OPT_STRING('o', "output", &output_path, "file", "output file name"), 512 475 OPT_BOOLEAN(0, "deprecated", &default_ps.deprecated, 513 476 "Print deprecated events."), 514 477 OPT_STRING(0, "cputype", &cputype, "cpu type", ··· 535 496 536 497 argc = parse_options(argc, argv, list_options, list_usage, 537 498 PARSE_OPT_STOP_AT_NON_OPTION); 499 + 500 + if (output_path) { 501 + default_ps.fp = fopen(output_path, "w"); 502 + json_ps.fp = default_ps.fp; 503 + } 538 504 539 505 setup_pager(); 540 506 ··· 662 618 free(default_ps.last_topic); 663 619 free(default_ps.last_metricgroups); 664 620 strlist__delete(default_ps.visited_metrics); 621 + if (output_path) 622 + fclose(default_ps.fp); 623 + 665 624 return ret; 666 625 }
+2 -2
tools/perf/builtin-record.c
··· 4089 4089 } 4090 4090 4091 4091 if (rec->switch_output.num_files) { 4092 - rec->switch_output.filenames = calloc(sizeof(char *), 4093 - rec->switch_output.num_files); 4092 + rec->switch_output.filenames = calloc(rec->switch_output.num_files, 4093 + sizeof(char *)); 4094 4094 if (!rec->switch_output.filenames) { 4095 4095 err = -EINVAL; 4096 4096 goto out_opts;
+1 -1
tools/perf/builtin-top.c
··· 357 357 358 358 static void prompt_integer(int *target, const char *msg) 359 359 { 360 - char *buf = malloc(0), *p; 360 + char *buf = NULL, *p; 361 361 size_t dummy = 0; 362 362 int tmp; 363 363
+118 -136
tools/perf/pmu-events/arch/x86/alderlake/adl-metrics.json
··· 114 114 }, 115 115 { 116 116 "BriefDescription": "Counts the number of issue slots that were not consumed by the backend due to certain allocation restrictions.", 117 - "MetricExpr": "TOPDOWN_BE_BOUND.ALLOC_RESTRICTIONS / tma_info_core_slots", 117 + "MetricExpr": "cpu_atom@TOPDOWN_BE_BOUND.ALLOC_RESTRICTIONS@ / tma_info_core_slots", 118 118 "MetricGroup": "TopdownL3;tma_L3_group;tma_resource_bound_group", 119 119 "MetricName": "tma_alloc_restriction", 120 120 "MetricThreshold": "tma_alloc_restriction > 0.1", ··· 124 124 { 125 125 "BriefDescription": "Counts the total number of issue slots that were not consumed by the backend due to backend stalls", 126 126 "DefaultMetricgroupName": "TopdownL1", 127 - "MetricExpr": "TOPDOWN_BE_BOUND.ALL / tma_info_core_slots", 127 + "MetricExpr": "cpu_atom@TOPDOWN_BE_BOUND.ALL@ / tma_info_core_slots", 128 128 "MetricGroup": "Default;TopdownL1;tma_L1_group", 129 129 "MetricName": "tma_backend_bound", 130 130 "MetricThreshold": "tma_backend_bound > 0.1", ··· 169 169 }, 170 170 { 171 171 "BriefDescription": "Counts the number of issue slots that were not delivered by the frontend due to BACLEARS, which occurs when the Branch Target Buffer (BTB) prediction or lack thereof, was corrected by a later branch predictor in the frontend", 172 - "MetricExpr": "TOPDOWN_FE_BOUND.BRANCH_DETECT / tma_info_core_slots", 172 + "MetricExpr": "cpu_atom@TOPDOWN_FE_BOUND.BRANCH_DETECT@ / tma_info_core_slots", 173 173 "MetricGroup": "TopdownL3;tma_L3_group;tma_fetch_latency_group", 174 174 "MetricName": "tma_branch_detect", 175 175 "MetricThreshold": "tma_branch_detect > 0.05", ··· 179 179 }, 180 180 { 181 181 "BriefDescription": "Counts the number of issue slots that were not consumed by the backend due to branch mispredicts.", 182 - "MetricExpr": "TOPDOWN_BAD_SPECULATION.MISPREDICT / tma_info_core_slots", 182 + "MetricExpr": "cpu_atom@TOPDOWN_BAD_SPECULATION.MISPREDICT@ / tma_info_core_slots", 183 183 "MetricGroup": "TopdownL2;tma_L2_group;tma_bad_speculation_group", 184 184 "MetricName": "tma_branch_mispredicts", 185 185 "MetricThreshold": "tma_branch_mispredicts > 0.05", ··· 189 189 }, 190 190 { 191 191 "BriefDescription": "Counts the number of issue slots that were not delivered by the frontend due to BTCLEARS, which occurs when the Branch Target Buffer (BTB) predicts a taken branch.", 192 - "MetricExpr": "TOPDOWN_FE_BOUND.BRANCH_RESTEER / tma_info_core_slots", 192 + "MetricExpr": "cpu_atom@TOPDOWN_FE_BOUND.BRANCH_RESTEER@ / tma_info_core_slots", 193 193 "MetricGroup": "TopdownL3;tma_L3_group;tma_fetch_latency_group", 194 194 "MetricName": "tma_branch_resteer", 195 195 "MetricThreshold": "tma_branch_resteer > 0.05", ··· 198 198 }, 199 199 { 200 200 "BriefDescription": "Counts the number of issue slots that were not delivered by the frontend due to the microcode sequencer (MS).", 201 - "MetricExpr": "TOPDOWN_FE_BOUND.CISC / tma_info_core_slots", 201 + "MetricExpr": "cpu_atom@TOPDOWN_FE_BOUND.CISC@ / tma_info_core_slots", 202 202 "MetricGroup": "TopdownL3;tma_L3_group;tma_fetch_bandwidth_group", 203 203 "MetricName": "tma_cisc", 204 204 "MetricThreshold": "tma_cisc > 0.05", ··· 217 217 }, 218 218 { 219 219 "BriefDescription": "Counts the number of issue slots that were not delivered by the frontend due to decode stalls.", 220 - "MetricExpr": "TOPDOWN_FE_BOUND.DECODE / tma_info_core_slots", 220 + "MetricExpr": "cpu_atom@TOPDOWN_FE_BOUND.DECODE@ / tma_info_core_slots", 221 221 "MetricGroup": "TopdownL3;tma_L3_group;tma_fetch_bandwidth_group", 222 222 "MetricName": "tma_decode", 223 223 "MetricThreshold": "tma_decode > 0.05", ··· 235 235 }, 236 236 { 237 237 "BriefDescription": "Counts the number of cycles the core is stalled due to a demand load miss which hit in DRAM or MMIO (Non-DRAM).", 238 - "MetricConstraint": "NO_GROUP_EVENTS", 239 238 "MetricExpr": "cpu_atom@MEM_BOUND_STALLS.LOAD_DRAM_HIT@ / tma_info_core_clks - max((cpu_atom@MEM_BOUND_STALLS.LOAD@ - cpu_atom@LD_HEAD.L1_MISS_AT_RET@) / tma_info_core_clks, 0) * cpu_atom@MEM_BOUND_STALLS.LOAD_DRAM_HIT@ / cpu_atom@MEM_BOUND_STALLS.LOAD@", 240 239 "MetricGroup": "TopdownL3;tma_L3_group;tma_memory_bound_group", 241 240 "MetricName": "tma_dram_bound", ··· 244 245 }, 245 246 { 246 247 "BriefDescription": "Counts the number of issue slots that were not consumed by the backend due to a machine clear classified as a fast nuke due to memory ordering, memory disambiguation and memory renaming.", 247 - "MetricExpr": "TOPDOWN_BAD_SPECULATION.FASTNUKE / tma_info_core_slots", 248 + "MetricExpr": "cpu_atom@TOPDOWN_BAD_SPECULATION.FASTNUKE@ / tma_info_core_slots", 248 249 "MetricGroup": "TopdownL3;tma_L3_group;tma_machine_clears_group", 249 250 "MetricName": "tma_fast_nuke", 250 251 "MetricThreshold": "tma_fast_nuke > 0.05", ··· 253 254 }, 254 255 { 255 256 "BriefDescription": "Counts the number of issue slots that were not delivered by the frontend due to frontend bandwidth restrictions due to decode, predecode, cisc, and other limitations.", 256 - "MetricExpr": "TOPDOWN_FE_BOUND.FRONTEND_BANDWIDTH / tma_info_core_slots", 257 + "MetricExpr": "cpu_atom@TOPDOWN_FE_BOUND.FRONTEND_BANDWIDTH@ / tma_info_core_slots", 257 258 "MetricGroup": "TopdownL2;tma_L2_group;tma_frontend_bound_group", 258 259 "MetricName": "tma_fetch_bandwidth", 259 260 "MetricThreshold": "tma_fetch_bandwidth > 0.1", ··· 263 264 }, 264 265 { 265 266 "BriefDescription": "Counts the number of issue slots that were not delivered by the frontend due to frontend bandwidth restrictions due to decode, predecode, cisc, and other limitations.", 266 - "MetricExpr": "TOPDOWN_FE_BOUND.FRONTEND_LATENCY / tma_info_core_slots", 267 + "MetricExpr": "cpu_atom@TOPDOWN_FE_BOUND.FRONTEND_LATENCY@ / tma_info_core_slots", 267 268 "MetricGroup": "TopdownL2;tma_L2_group;tma_frontend_bound_group", 268 269 "MetricName": "tma_fetch_latency", 269 270 "MetricThreshold": "tma_fetch_latency > 0.15", ··· 282 283 }, 283 284 { 284 285 "BriefDescription": "Counts the number of floating point divide operations per uop.", 285 - "MetricExpr": "UOPS_RETIRED.FPDIV / tma_info_core_slots", 286 + "MetricExpr": "cpu_atom@UOPS_RETIRED.FPDIV@ / tma_info_core_slots", 286 287 "MetricGroup": "TopdownL3;tma_L3_group;tma_base_group", 287 288 "MetricName": "tma_fpdiv_uops", 288 289 "MetricThreshold": "tma_fpdiv_uops > 0.2", ··· 292 293 { 293 294 "BriefDescription": "Counts the number of issue slots that were not consumed by the backend due to frontend stalls.", 294 295 "DefaultMetricgroupName": "TopdownL1", 295 - "MetricExpr": "TOPDOWN_FE_BOUND.ALL / tma_info_core_slots", 296 + "MetricExpr": "cpu_atom@TOPDOWN_FE_BOUND.ALL@ / tma_info_core_slots", 296 297 "MetricGroup": "Default;TopdownL1;tma_L1_group", 297 298 "MetricName": "tma_frontend_bound", 298 299 "MetricThreshold": "tma_frontend_bound > 0.2", ··· 302 303 }, 303 304 { 304 305 "BriefDescription": "Counts the number of issue slots that were not delivered by the frontend due to instruction cache misses.", 305 - "MetricExpr": "TOPDOWN_FE_BOUND.ICACHE / tma_info_core_slots", 306 + "MetricExpr": "cpu_atom@TOPDOWN_FE_BOUND.ICACHE@ / tma_info_core_slots", 306 307 "MetricGroup": "TopdownL3;tma_L3_group;tma_fetch_latency_group", 307 308 "MetricName": "tma_icache_misses", 308 309 "MetricThreshold": "tma_icache_misses > 0.05", ··· 329 330 }, 330 331 { 331 332 "BriefDescription": "Instructions Per Cycle", 332 - "MetricExpr": "INST_RETIRED.ANY / tma_info_core_clks", 333 + "MetricExpr": "cpu_atom@INST_RETIRED.ANY@ / tma_info_core_clks", 333 334 "MetricName": "tma_info_core_ipc", 334 335 "Unit": "cpu_atom" 335 336 }, ··· 341 342 }, 342 343 { 343 344 "BriefDescription": "Uops Per Instruction", 344 - "MetricExpr": "UOPS_RETIRED.ALL / INST_RETIRED.ANY", 345 + "MetricExpr": "cpu_atom@UOPS_RETIRED.ALL@ / INST_RETIRED.ANY", 345 346 "MetricName": "tma_info_core_upi", 346 347 "Unit": "cpu_atom" 347 348 }, ··· 365 366 }, 366 367 { 367 368 "BriefDescription": "Ratio of all branches which mispredict", 368 - "MetricExpr": "BR_MISP_RETIRED.ALL_BRANCHES / BR_INST_RETIRED.ALL_BRANCHES", 369 + "MetricExpr": "cpu_atom@BR_MISP_RETIRED.ALL_BRANCHES@ / BR_INST_RETIRED.ALL_BRANCHES", 369 370 "MetricName": "tma_info_inst_mix_branch_mispredict_ratio", 370 371 "Unit": "cpu_atom" 371 372 }, 372 373 { 373 374 "BriefDescription": "Ratio between Mispredicted branches and unknown branches", 374 - "MetricExpr": "BR_MISP_RETIRED.ALL_BRANCHES / BACLEARS.ANY", 375 + "MetricExpr": "cpu_atom@BR_MISP_RETIRED.ALL_BRANCHES@ / BACLEARS.ANY", 375 376 "MetricName": "tma_info_inst_mix_branch_mispredict_to_unknown_branch_ratio", 376 377 "Unit": "cpu_atom" 377 378 }, ··· 389 390 }, 390 391 { 391 392 "BriefDescription": "Instructions per Branch (lower number means higher occurrence rate)", 392 - "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.ALL_BRANCHES", 393 + "MetricExpr": "cpu_atom@INST_RETIRED.ANY@ / BR_INST_RETIRED.ALL_BRANCHES", 393 394 "MetricName": "tma_info_inst_mix_ipbranch", 394 395 "Unit": "cpu_atom" 395 396 }, 396 397 { 397 398 "BriefDescription": "Instruction per (near) call (lower number means higher occurrence rate)", 398 - "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.CALL", 399 + "MetricExpr": "cpu_atom@INST_RETIRED.ANY@ / BR_INST_RETIRED.CALL", 399 400 "MetricName": "tma_info_inst_mix_ipcall", 400 401 "Unit": "cpu_atom" 401 402 }, 402 403 { 403 404 "BriefDescription": "Instructions per Far Branch", 404 - "MetricExpr": "INST_RETIRED.ANY / (cpu_atom@BR_INST_RETIRED.FAR_BRANCH@ / 2)", 405 + "MetricExpr": "cpu_atom@INST_RETIRED.ANY@ / (cpu_atom@BR_INST_RETIRED.FAR_BRANCH@ / 2)", 405 406 "MetricName": "tma_info_inst_mix_ipfarbranch", 406 407 "Unit": "cpu_atom" 407 408 }, 408 409 { 409 410 "BriefDescription": "Instructions per Load", 410 - "MetricExpr": "INST_RETIRED.ANY / MEM_UOPS_RETIRED.ALL_LOADS", 411 + "MetricExpr": "cpu_atom@INST_RETIRED.ANY@ / MEM_UOPS_RETIRED.ALL_LOADS", 411 412 "MetricName": "tma_info_inst_mix_ipload", 412 413 "Unit": "cpu_atom" 413 414 }, 414 415 { 415 416 "BriefDescription": "Instructions per retired conditional Branch Misprediction where the branch was not taken", 416 - "MetricExpr": "INST_RETIRED.ANY / (cpu_atom@BR_MISP_RETIRED.COND@ - cpu_atom@BR_MISP_RETIRED.COND_TAKEN@)", 417 + "MetricExpr": "cpu_atom@INST_RETIRED.ANY@ / (cpu_atom@BR_MISP_RETIRED.COND@ - cpu_atom@BR_MISP_RETIRED.COND_TAKEN@)", 417 418 "MetricName": "tma_info_inst_mix_ipmisp_cond_ntaken", 418 419 "Unit": "cpu_atom" 419 420 }, 420 421 { 421 422 "BriefDescription": "Instructions per retired conditional Branch Misprediction where the branch was taken", 422 - "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.COND_TAKEN", 423 + "MetricExpr": "cpu_atom@INST_RETIRED.ANY@ / BR_MISP_RETIRED.COND_TAKEN", 423 424 "MetricName": "tma_info_inst_mix_ipmisp_cond_taken", 424 425 "Unit": "cpu_atom" 425 426 }, 426 427 { 427 428 "BriefDescription": "Instructions per retired indirect call or jump Branch Misprediction", 428 - "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.INDIRECT", 429 + "MetricExpr": "cpu_atom@INST_RETIRED.ANY@ / BR_MISP_RETIRED.INDIRECT", 429 430 "MetricName": "tma_info_inst_mix_ipmisp_indirect", 430 431 "Unit": "cpu_atom" 431 432 }, 432 433 { 433 434 "BriefDescription": "Instructions per retired return Branch Misprediction", 434 - "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.RETURN", 435 + "MetricExpr": "cpu_atom@INST_RETIRED.ANY@ / BR_MISP_RETIRED.RETURN", 435 436 "MetricName": "tma_info_inst_mix_ipmisp_ret", 436 437 "Unit": "cpu_atom" 437 438 }, 438 439 { 439 440 "BriefDescription": "Instructions per retired Branch Misprediction", 440 - "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.ALL_BRANCHES", 441 + "MetricExpr": "cpu_atom@INST_RETIRED.ANY@ / BR_MISP_RETIRED.ALL_BRANCHES", 441 442 "MetricName": "tma_info_inst_mix_ipmispredict", 442 443 "Unit": "cpu_atom" 443 444 }, 444 445 { 445 446 "BriefDescription": "Instructions per Store", 446 - "MetricExpr": "INST_RETIRED.ANY / MEM_UOPS_RETIRED.ALL_STORES", 447 + "MetricExpr": "cpu_atom@INST_RETIRED.ANY@ / MEM_UOPS_RETIRED.ALL_STORES", 447 448 "MetricName": "tma_info_inst_mix_ipstore", 448 449 "Unit": "cpu_atom" 449 450 }, ··· 479 480 }, 480 481 { 481 482 "BriefDescription": "Cycle cost per DRAM hit", 482 - "MetricExpr": "MEM_BOUND_STALLS.LOAD_DRAM_HIT / MEM_LOAD_UOPS_RETIRED.DRAM_HIT", 483 + "MetricExpr": "cpu_atom@MEM_BOUND_STALLS.LOAD_DRAM_HIT@ / MEM_LOAD_UOPS_RETIRED.DRAM_HIT", 483 484 "MetricName": "tma_info_memory_cycles_per_demand_load_dram_hit", 484 485 "Unit": "cpu_atom" 485 486 }, 486 487 { 487 488 "BriefDescription": "Cycle cost per L2 hit", 488 - "MetricExpr": "MEM_BOUND_STALLS.LOAD_L2_HIT / MEM_LOAD_UOPS_RETIRED.L2_HIT", 489 + "MetricExpr": "cpu_atom@MEM_BOUND_STALLS.LOAD_L2_HIT@ / MEM_LOAD_UOPS_RETIRED.L2_HIT", 489 490 "MetricName": "tma_info_memory_cycles_per_demand_load_l2_hit", 490 491 "Unit": "cpu_atom" 491 492 }, 492 493 { 493 494 "BriefDescription": "Cycle cost per LLC hit", 494 - "MetricExpr": "MEM_BOUND_STALLS.LOAD_LLC_HIT / MEM_LOAD_UOPS_RETIRED.L3_HIT", 495 + "MetricExpr": "cpu_atom@MEM_BOUND_STALLS.LOAD_LLC_HIT@ / MEM_LOAD_UOPS_RETIRED.L3_HIT", 495 496 "MetricName": "tma_info_memory_cycles_per_demand_load_l3_hit", 496 497 "Unit": "cpu_atom" 497 498 }, ··· 503 504 }, 504 505 { 505 506 "BriefDescription": "Average CPU Utilization", 506 - "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / TSC", 507 + "MetricExpr": "cpu_atom@CPU_CLK_UNHALTED.REF_TSC@ / TSC", 507 508 "MetricName": "tma_info_system_cpu_utilization", 508 509 "Unit": "cpu_atom" 509 510 }, ··· 523 524 }, 524 525 { 525 526 "BriefDescription": "Counts the number of issue slots that were not delivered by the frontend due to Instruction Table Lookaside Buffer (ITLB) misses.", 526 - "MetricExpr": "TOPDOWN_FE_BOUND.ITLB / tma_info_core_slots", 527 + "MetricExpr": "cpu_atom@TOPDOWN_FE_BOUND.ITLB@ / tma_info_core_slots", 527 528 "MetricGroup": "TopdownL3;tma_L3_group;tma_fetch_latency_group", 528 529 "MetricName": "tma_itlb_misses", 529 530 "MetricThreshold": "tma_itlb_misses > 0.05", ··· 532 533 }, 533 534 { 534 535 "BriefDescription": "Counts the number of cycles that the oldest load of the load buffer is stalled at retirement due to a load block.", 535 - "MetricExpr": "LD_HEAD.L1_BOUND_AT_RET / tma_info_core_clks", 536 + "MetricExpr": "cpu_atom@LD_HEAD.L1_BOUND_AT_RET@ / tma_info_core_clks", 536 537 "MetricGroup": "TopdownL3;tma_L3_group;tma_memory_bound_group", 537 538 "MetricName": "tma_l1_bound", 538 539 "MetricThreshold": "tma_l1_bound > 0.1", ··· 541 542 }, 542 543 { 543 544 "BriefDescription": "Counts the number of cycles a core is stalled due to a demand load which hit in the L2 Cache.", 544 - "MetricConstraint": "NO_GROUP_EVENTS", 545 545 "MetricExpr": "cpu_atom@MEM_BOUND_STALLS.LOAD_L2_HIT@ / tma_info_core_clks - max((cpu_atom@MEM_BOUND_STALLS.LOAD@ - cpu_atom@LD_HEAD.L1_MISS_AT_RET@) / tma_info_core_clks, 0) * cpu_atom@MEM_BOUND_STALLS.LOAD_L2_HIT@ / cpu_atom@MEM_BOUND_STALLS.LOAD@", 546 546 "MetricGroup": "TopdownL3;tma_L3_group;tma_memory_bound_group", 547 547 "MetricName": "tma_l2_bound", ··· 550 552 }, 551 553 { 552 554 "BriefDescription": "Counts the number of cycles a core is stalled due to a demand load which hit in the Last Level Cache (LLC) or other core with HITE/F/M.", 553 - "MetricConstraint": "NO_GROUP_EVENTS_NMI", 554 555 "MetricExpr": "cpu_atom@MEM_BOUND_STALLS.LOAD_LLC_HIT@ / tma_info_core_clks - max((cpu_atom@MEM_BOUND_STALLS.LOAD@ - cpu_atom@LD_HEAD.L1_MISS_AT_RET@) / tma_info_core_clks, 0) * cpu_atom@MEM_BOUND_STALLS.LOAD_LLC_HIT@ / cpu_atom@MEM_BOUND_STALLS.LOAD@", 555 556 "MetricGroup": "TopdownL3;tma_L3_group;tma_memory_bound_group", 556 557 "MetricName": "tma_l3_bound", ··· 568 571 }, 569 572 { 570 573 "BriefDescription": "Counts the total number of issue slots that were not consumed by the backend because allocation is stalled due to a machine clear (nuke) of any kind including memory ordering and memory disambiguation.", 571 - "MetricExpr": "TOPDOWN_BAD_SPECULATION.MACHINE_CLEARS / tma_info_core_slots", 574 + "MetricExpr": "cpu_atom@TOPDOWN_BAD_SPECULATION.MACHINE_CLEARS@ / tma_info_core_slots", 572 575 "MetricGroup": "TopdownL2;tma_L2_group;tma_bad_speculation_group", 573 576 "MetricName": "tma_machine_clears", 574 577 "MetricThreshold": "tma_machine_clears > 0.05", ··· 578 581 }, 579 582 { 580 583 "BriefDescription": "Counts the number of issue slots that were not consumed by the backend due to memory reservation stalls in which a scheduler is not able to accept uops.", 581 - "MetricExpr": "TOPDOWN_BE_BOUND.MEM_SCHEDULER / tma_info_core_slots", 584 + "MetricExpr": "cpu_atom@TOPDOWN_BE_BOUND.MEM_SCHEDULER@ / tma_info_core_slots", 582 585 "MetricGroup": "TopdownL3;tma_L3_group;tma_resource_bound_group", 583 586 "MetricName": "tma_mem_scheduler", 584 587 "MetricThreshold": "tma_mem_scheduler > 0.1", ··· 587 590 }, 588 591 { 589 592 "BriefDescription": "Counts the number of cycles the core is stalled due to stores or loads.", 590 - "MetricExpr": "min(cpu_atom@TOPDOWN_BE_BOUND.ALL@ / tma_info_core_slots, cpu_atom@LD_HEAD.ANY_AT_RET@ / tma_info_core_clks + tma_store_bound)", 593 + "MetricExpr": "min(tma_backend_bound, cpu_atom@LD_HEAD.ANY_AT_RET@ / tma_info_core_clks + tma_store_bound)", 591 594 "MetricGroup": "TopdownL2;tma_L2_group;tma_backend_bound_group", 592 595 "MetricName": "tma_memory_bound", 593 596 "MetricThreshold": "tma_memory_bound > 0.2", ··· 606 609 }, 607 610 { 608 611 "BriefDescription": "Counts the number of uops that are from the complex flows issued by the micro-sequencer (MS)", 609 - "MetricExpr": "UOPS_RETIRED.MS / tma_info_core_slots", 612 + "MetricExpr": "cpu_atom@UOPS_RETIRED.MS@ / tma_info_core_slots", 610 613 "MetricGroup": "TopdownL2;tma_L2_group;tma_retiring_group", 611 614 "MetricName": "tma_ms_uops", 612 615 "MetricThreshold": "tma_ms_uops > 0.05", ··· 617 620 }, 618 621 { 619 622 "BriefDescription": "Counts the number of issue slots that were not consumed by the backend due to IEC or FPC RAT stalls, which can be due to FIQ or IEC reservation stalls in which the integer, floating point or SIMD scheduler is not able to accept uops.", 620 - "MetricExpr": "TOPDOWN_BE_BOUND.NON_MEM_SCHEDULER / tma_info_core_slots", 623 + "MetricExpr": "cpu_atom@TOPDOWN_BE_BOUND.NON_MEM_SCHEDULER@ / tma_info_core_slots", 621 624 "MetricGroup": "TopdownL3;tma_L3_group;tma_resource_bound_group", 622 625 "MetricName": "tma_non_mem_scheduler", 623 626 "MetricThreshold": "tma_non_mem_scheduler > 0.1", ··· 626 629 }, 627 630 { 628 631 "BriefDescription": "Counts the number of issue slots that were not consumed by the backend due to a machine clear (slow nuke).", 629 - "MetricExpr": "TOPDOWN_BAD_SPECULATION.NUKE / tma_info_core_slots", 632 + "MetricExpr": "cpu_atom@TOPDOWN_BAD_SPECULATION.NUKE@ / tma_info_core_slots", 630 633 "MetricGroup": "TopdownL3;tma_L3_group;tma_machine_clears_group", 631 634 "MetricName": "tma_nuke", 632 635 "MetricThreshold": "tma_nuke > 0.05", ··· 635 638 }, 636 639 { 637 640 "BriefDescription": "Counts the number of issue slots that were not delivered by the frontend due to other common frontend stalls not categorized.", 638 - "MetricExpr": "TOPDOWN_FE_BOUND.OTHER / tma_info_core_slots", 641 + "MetricExpr": "cpu_atom@TOPDOWN_FE_BOUND.OTHER@ / tma_info_core_slots", 639 642 "MetricGroup": "TopdownL3;tma_L3_group;tma_fetch_bandwidth_group", 640 643 "MetricName": "tma_other_fb", 641 644 "MetricThreshold": "tma_other_fb > 0.05", ··· 644 647 }, 645 648 { 646 649 "BriefDescription": "Counts the number of cycles that the oldest load of the load buffer is stalled at retirement due to a number of other load blocks.", 647 - "MetricExpr": "LD_HEAD.OTHER_AT_RET / tma_info_core_clks", 650 + "MetricExpr": "cpu_atom@LD_HEAD.OTHER_AT_RET@ / tma_info_core_clks", 648 651 "MetricGroup": "TopdownL4;tma_L4_group;tma_l1_bound_group", 649 652 "MetricName": "tma_other_l1", 650 653 "MetricThreshold": "tma_other_l1 > 0.05", ··· 680 683 }, 681 684 { 682 685 "BriefDescription": "Counts the number of issue slots that were not delivered by the frontend due to wrong predecodes.", 683 - "MetricExpr": "TOPDOWN_FE_BOUND.PREDECODE / tma_info_core_slots", 686 + "MetricExpr": "cpu_atom@TOPDOWN_FE_BOUND.PREDECODE@ / tma_info_core_slots", 684 687 "MetricGroup": "TopdownL3;tma_L3_group;tma_fetch_bandwidth_group", 685 688 "MetricName": "tma_predecode", 686 689 "MetricThreshold": "tma_predecode > 0.05", ··· 689 692 }, 690 693 { 691 694 "BriefDescription": "Counts the number of issue slots that were not consumed by the backend due to the physical register file unable to accept an entry (marble stalls).", 692 - "MetricExpr": "TOPDOWN_BE_BOUND.REGISTER / tma_info_core_slots", 695 + "MetricExpr": "cpu_atom@TOPDOWN_BE_BOUND.REGISTER@ / tma_info_core_slots", 693 696 "MetricGroup": "TopdownL3;tma_L3_group;tma_resource_bound_group", 694 697 "MetricName": "tma_register", 695 698 "MetricThreshold": "tma_register > 0.1", ··· 698 701 }, 699 702 { 700 703 "BriefDescription": "Counts the number of issue slots that were not consumed by the backend due to the reorder buffer being full (ROB stalls).", 701 - "MetricExpr": "TOPDOWN_BE_BOUND.REORDER_BUFFER / tma_info_core_slots", 704 + "MetricExpr": "cpu_atom@TOPDOWN_BE_BOUND.REORDER_BUFFER@ / tma_info_core_slots", 702 705 "MetricGroup": "TopdownL3;tma_L3_group;tma_resource_bound_group", 703 706 "MetricName": "tma_reorder_buffer", 704 707 "MetricThreshold": "tma_reorder_buffer > 0.1", ··· 719 722 { 720 723 "BriefDescription": "Counts the number of issue slots that result in retirement slots.", 721 724 "DefaultMetricgroupName": "TopdownL1", 722 - "MetricExpr": "TOPDOWN_RETIRING.ALL / tma_info_core_slots", 725 + "MetricExpr": "cpu_atom@TOPDOWN_RETIRING.ALL@ / tma_info_core_slots", 723 726 "MetricGroup": "Default;TopdownL1;tma_L1_group", 724 727 "MetricName": "tma_retiring", 725 728 "MetricThreshold": "tma_retiring > 0.75", ··· 738 741 }, 739 742 { 740 743 "BriefDescription": "Counts the number of issue slots that were not consumed by the backend due to scoreboards from the instruction queue (IQ), jump execution unit (JEU), or microcode sequencer (MS).", 741 - "MetricExpr": "TOPDOWN_BE_BOUND.SERIALIZATION / tma_info_core_slots", 744 + "MetricExpr": "cpu_atom@TOPDOWN_BE_BOUND.SERIALIZATION@ / tma_info_core_slots", 742 745 "MetricGroup": "TopdownL3;tma_L3_group;tma_resource_bound_group", 743 746 "MetricName": "tma_serialization", 744 747 "MetricThreshold": "tma_serialization > 0.1", ··· 765 768 }, 766 769 { 767 770 "BriefDescription": "Counts the number of cycles that the oldest load of the load buffer is stalled at retirement due to a first level TLB miss.", 768 - "MetricExpr": "LD_HEAD.DTLB_MISS_AT_RET / tma_info_core_clks", 771 + "MetricExpr": "cpu_atom@LD_HEAD.DTLB_MISS_AT_RET@ / tma_info_core_clks", 769 772 "MetricGroup": "TopdownL4;tma_L4_group;tma_l1_bound_group", 770 773 "MetricName": "tma_stlb_hit", 771 774 "MetricThreshold": "tma_stlb_hit > 0.05", ··· 774 777 }, 775 778 { 776 779 "BriefDescription": "Counts the number of cycles that the oldest load of the load buffer is stalled at retirement due to a second level TLB miss requiring a page walk.", 777 - "MetricExpr": "LD_HEAD.PGWALK_AT_RET / tma_info_core_clks", 780 + "MetricExpr": "cpu_atom@LD_HEAD.PGWALK_AT_RET@ / tma_info_core_clks", 778 781 "MetricGroup": "TopdownL4;tma_L4_group;tma_l1_bound_group", 779 782 "MetricName": "tma_stlb_miss", 780 783 "MetricThreshold": "tma_stlb_miss > 0.05", ··· 792 795 }, 793 796 { 794 797 "BriefDescription": "Counts the number of cycles that the oldest load of the load buffer is stalled at retirement due to a store forward block.", 795 - "MetricConstraint": "NO_GROUP_EVENTS_NMI", 796 - "MetricExpr": "LD_HEAD.ST_ADDR_AT_RET / tma_info_core_clks", 798 + "MetricExpr": "cpu_atom@LD_HEAD.ST_ADDR_AT_RET@ / tma_info_core_clks", 797 799 "MetricGroup": "TopdownL4;tma_L4_group;tma_l1_bound_group", 798 800 "MetricName": "tma_store_fwd_blk", 799 801 "MetricThreshold": "tma_store_fwd_blk > 0.05", ··· 871 875 }, 872 876 { 873 877 "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers", 874 - "MetricExpr": "INT_MISC.CLEAR_RESTEER_CYCLES / tma_info_thread_clks + tma_unknown_branches", 878 + "MetricExpr": "cpu_core@INT_MISC.CLEAR_RESTEER_CYCLES@ / tma_info_thread_clks + tma_unknown_branches", 875 879 "MetricGroup": "FetchLat;TopdownL3;tma_L3_group;tma_fetch_latency_group", 876 880 "MetricName": "tma_branch_resteers", 877 881 "MetricThreshold": "tma_branch_resteers > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)", ··· 901 905 }, 902 906 { 903 907 "BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to contested accesses", 904 - "MetricConstraint": "NO_GROUP_EVENTS", 905 908 "MetricExpr": "(25 * tma_info_system_average_frequency * (cpu_core@MEM_LOAD_L3_HIT_RETIRED.XSNP_FWD@ * (cpu_core@OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM@ / (cpu_core@OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM@ + cpu_core@OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD@))) + 24 * tma_info_system_average_frequency * cpu_core@MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS@) * (1 + cpu_core@MEM_LOAD_RETIRED.FB_HIT@ / cpu_core@MEM_LOAD_RETIRED.L1_MISS@ / 2) / tma_info_thread_clks", 906 909 "MetricGroup": "DataSharing;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_l3_bound_group", 907 910 "MetricName": "tma_contested_accesses", ··· 922 927 }, 923 928 { 924 929 "BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to data-sharing accesses", 925 - "MetricConstraint": "NO_GROUP_EVENTS", 926 930 "MetricExpr": "24 * tma_info_system_average_frequency * (cpu_core@MEM_LOAD_L3_HIT_RETIRED.XSNP_NO_FWD@ + cpu_core@MEM_LOAD_L3_HIT_RETIRED.XSNP_FWD@ * (1 - cpu_core@OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM@ / (cpu_core@OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM@ + cpu_core@OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD@))) * (1 + cpu_core@MEM_LOAD_RETIRED.FB_HIT@ / cpu_core@MEM_LOAD_RETIRED.L1_MISS@ / 2) / tma_info_thread_clks", 927 931 "MetricGroup": "Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_l3_bound_group", 928 932 "MetricName": "tma_data_sharing", ··· 942 948 }, 943 949 { 944 950 "BriefDescription": "This metric represents fraction of cycles where the Divider unit was active", 945 - "MetricExpr": "ARITH.DIV_ACTIVE / tma_info_thread_clks", 951 + "MetricExpr": "cpu_core@ARITH.DIV_ACTIVE@ / tma_info_thread_clks", 946 952 "MetricGroup": "TopdownL3;tma_L3_group;tma_core_bound_group", 947 953 "MetricName": "tma_divider", 948 954 "MetricThreshold": "tma_divider > 0.2 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2)", ··· 952 958 }, 953 959 { 954 960 "BriefDescription": "This metric estimates how often the CPU was stalled on accesses to external memory (DRAM) by loads", 955 - "MetricConstraint": "NO_GROUP_EVENTS", 956 961 "MetricExpr": "cpu_core@MEMORY_ACTIVITY.STALLS_L3_MISS@ / tma_info_thread_clks", 957 962 "MetricGroup": "MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group", 958 963 "MetricName": "tma_dram_bound", ··· 972 979 }, 973 980 { 974 981 "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to switches from DSB to MITE pipelines", 975 - "MetricExpr": "DSB2MITE_SWITCHES.PENALTY_CYCLES / tma_info_thread_clks", 982 + "MetricExpr": "cpu_core@DSB2MITE_SWITCHES.PENALTY_CYCLES@ / tma_info_thread_clks", 976 983 "MetricGroup": "DSBmiss;FetchLat;TopdownL3;tma_L3_group;tma_fetch_latency_group;tma_issueFB", 977 984 "MetricName": "tma_dsb_switches", 978 985 "MetricThreshold": "tma_dsb_switches > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)", ··· 1012 1019 }, 1013 1020 { 1014 1021 "BriefDescription": "This metric does a *rough estimation* of how often L1D Fill Buffer unavailability limited additional L1D miss memory access requests to proceed", 1015 - "MetricExpr": "L1D_PEND_MISS.FB_FULL / tma_info_thread_clks", 1022 + "MetricExpr": "cpu_core@L1D_PEND_MISS.FB_FULL@ / tma_info_thread_clks", 1016 1023 "MetricGroup": "MemoryBW;TopdownL4;tma_L4_group;tma_issueBW;tma_issueSL;tma_issueSmSt;tma_l1_bound_group", 1017 1024 "MetricName": "tma_fb_full", 1018 1025 "MetricThreshold": "tma_fb_full > 0.3", ··· 1147 1154 }, 1148 1155 { 1149 1156 "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to instruction cache misses", 1150 - "MetricExpr": "ICACHE_DATA.STALLS / tma_info_thread_clks", 1157 + "MetricExpr": "cpu_core@ICACHE_DATA.STALLS@ / tma_info_thread_clks", 1151 1158 "MetricGroup": "BigFoot;FetchLat;IcMiss;TopdownL3;tma_L3_group;tma_fetch_latency_group", 1152 1159 "MetricName": "tma_icache_misses", 1153 1160 "MetricThreshold": "tma_icache_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)", ··· 1157 1164 }, 1158 1165 { 1159 1166 "BriefDescription": "Branch Misprediction Cost: Fraction of TMA slots wasted per non-speculative branch misprediction (retired JEClear)", 1160 - "MetricConstraint": "NO_GROUP_EVENTS", 1161 1167 "MetricExpr": "(tma_branch_mispredicts + tma_fetch_latency * tma_mispredicts_resteers / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches)) * tma_info_thread_slots / BR_MISP_RETIRED.ALL_BRANCHES", 1162 1168 "MetricGroup": "Bad;BrMispredicts;tma_issueBM", 1163 1169 "MetricName": "tma_info_bad_spec_branch_misprediction_cost", ··· 1165 1173 }, 1166 1174 { 1167 1175 "BriefDescription": "Instructions per retired mispredicts for conditional non-taken branches (lower number means higher occurrence rate).", 1168 - "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.COND_NTAKEN", 1176 + "MetricExpr": "cpu_core@INST_RETIRED.ANY@ / BR_MISP_RETIRED.COND_NTAKEN", 1169 1177 "MetricGroup": "Bad;BrMispredicts", 1170 1178 "MetricName": "tma_info_bad_spec_ipmisp_cond_ntaken", 1171 1179 "MetricThreshold": "tma_info_bad_spec_ipmisp_cond_ntaken < 200", ··· 1173 1181 }, 1174 1182 { 1175 1183 "BriefDescription": "Instructions per retired mispredicts for conditional taken branches (lower number means higher occurrence rate).", 1176 - "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.COND_TAKEN", 1184 + "MetricExpr": "cpu_core@INST_RETIRED.ANY@ / BR_MISP_RETIRED.COND_TAKEN", 1177 1185 "MetricGroup": "Bad;BrMispredicts", 1178 1186 "MetricName": "tma_info_bad_spec_ipmisp_cond_taken", 1179 1187 "MetricThreshold": "tma_info_bad_spec_ipmisp_cond_taken < 200", ··· 1189 1197 }, 1190 1198 { 1191 1199 "BriefDescription": "Instructions per retired mispredicts for return branches (lower number means higher occurrence rate).", 1192 - "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.RET", 1200 + "MetricExpr": "cpu_core@INST_RETIRED.ANY@ / BR_MISP_RETIRED.RET", 1193 1201 "MetricGroup": "Bad;BrMispredicts", 1194 1202 "MetricName": "tma_info_bad_spec_ipmisp_ret", 1195 1203 "MetricThreshold": "tma_info_bad_spec_ipmisp_ret < 500", ··· 1197 1205 }, 1198 1206 { 1199 1207 "BriefDescription": "Number of Instructions per non-speculative Branch Misprediction (JEClear) (lower number means higher occurrence rate)", 1200 - "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.ALL_BRANCHES", 1208 + "MetricExpr": "cpu_core@INST_RETIRED.ANY@ / BR_MISP_RETIRED.ALL_BRANCHES", 1201 1209 "MetricGroup": "Bad;BadSpec;BrMispredicts", 1202 1210 "MetricName": "tma_info_bad_spec_ipmispredict", 1203 1211 "MetricThreshold": "tma_info_bad_spec_ipmispredict < 200", ··· 1205 1213 }, 1206 1214 { 1207 1215 "BriefDescription": "Probability of Core Bound bottleneck hidden by SMT-profiling artifacts", 1208 - "MetricConstraint": "NO_GROUP_EVENTS", 1209 1216 "MetricExpr": "(100 * (1 - tma_core_bound / tma_ports_utilization if tma_core_bound < tma_ports_utilization else 1) if tma_info_system_smt_2t_utilization > 0.5 else 0)", 1210 1217 "MetricGroup": "Cor;SMT", 1211 1218 "MetricName": "tma_info_botlnk_l0_core_bound_likely", ··· 1213 1222 }, 1214 1223 { 1215 1224 "BriefDescription": "Total pipeline cost of DSB (uop cache) misses - subset of the Instruction_Fetch_BW Bottleneck", 1216 - "MetricConstraint": "NO_GROUP_EVENTS", 1217 1225 "MetricExpr": "100 * (tma_fetch_latency * tma_dsb_switches / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches) + tma_fetch_bandwidth * tma_mite / (tma_dsb + tma_lsd + tma_mite))", 1218 1226 "MetricGroup": "DSBmiss;Fed;tma_issueFB", 1219 1227 "MetricName": "tma_info_botlnk_l2_dsb_misses", ··· 1222 1232 }, 1223 1233 { 1224 1234 "BriefDescription": "Total pipeline cost of Instruction Cache misses - subset of the Big_Code Bottleneck", 1225 - "MetricConstraint": "NO_GROUP_EVENTS", 1226 1235 "MetricExpr": "100 * (tma_fetch_latency * tma_icache_misses / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches))", 1227 1236 "MetricGroup": "Fed;FetchLat;IcMiss;tma_issueFL", 1228 1237 "MetricName": "tma_info_botlnk_l2_ic_misses", ··· 1231 1242 }, 1232 1243 { 1233 1244 "BriefDescription": "Total pipeline cost of instruction fetch related bottlenecks by large code footprint programs (i-side cache; TLB and BTB misses)", 1234 - "MetricConstraint": "NO_GROUP_EVENTS", 1235 1245 "MetricExpr": "100 * tma_fetch_latency * (tma_itlb_misses + tma_icache_misses + tma_unknown_branches) / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches)", 1236 1246 "MetricGroup": "BigFoot;Fed;Frontend;IcMiss;MemoryTLB;tma_issueBC", 1237 1247 "MetricName": "tma_info_bottleneck_big_code", ··· 1249 1261 }, 1250 1262 { 1251 1263 "BriefDescription": "Total pipeline cost of instruction fetch bandwidth related bottlenecks", 1252 - "MetricConstraint": "NO_GROUP_EVENTS", 1253 1264 "MetricExpr": "100 * (tma_frontend_bound - tma_fetch_latency * tma_mispredicts_resteers / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches)) - tma_info_bottleneck_big_code", 1254 1265 "MetricGroup": "Fed;FetchBW;Frontend", 1255 1266 "MetricName": "tma_info_bottleneck_instruction_fetch_bw", ··· 1257 1270 }, 1258 1271 { 1259 1272 "BriefDescription": "Total pipeline cost of (external) Memory Bandwidth related bottlenecks", 1260 - "MetricConstraint": "NO_GROUP_EVENTS", 1261 1273 "MetricExpr": "100 * tma_memory_bound * (tma_dram_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) * (tma_mem_bandwidth / (tma_mem_bandwidth + tma_mem_latency)) + tma_l3_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) * (tma_sq_full / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full))) + tma_l1_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) * (tma_fb_full / (tma_dtlb_load + tma_fb_full + tma_lock_latency + tma_split_loads + tma_store_fwd_blk))", 1262 1274 "MetricGroup": "Mem;MemoryBW;Offcore;tma_issueBW", 1263 1275 "MetricName": "tma_info_bottleneck_memory_bandwidth", ··· 1266 1280 }, 1267 1281 { 1268 1282 "BriefDescription": "Total pipeline cost of Memory Address Translation related bottlenecks (data-side TLBs)", 1269 - "MetricConstraint": "NO_GROUP_EVENTS", 1270 1283 "MetricExpr": "100 * tma_memory_bound * (tma_l1_bound / max(tma_memory_bound, tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) * (tma_dtlb_load / max(tma_l1_bound, tma_dtlb_load + tma_fb_full + tma_lock_latency + tma_split_loads + tma_store_fwd_blk)) + tma_store_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) * (tma_dtlb_store / (tma_dtlb_store + tma_false_sharing + tma_split_stores + tma_store_latency + tma_streaming_stores)))", 1271 1284 "MetricGroup": "Mem;MemoryTLB;Offcore;tma_issueTLB", 1272 1285 "MetricName": "tma_info_bottleneck_memory_data_tlbs", ··· 1275 1290 }, 1276 1291 { 1277 1292 "BriefDescription": "Total pipeline cost of Memory Latency related bottlenecks (external memory and off-core caches)", 1278 - "MetricConstraint": "NO_GROUP_EVENTS", 1279 1293 "MetricExpr": "100 * tma_memory_bound * (tma_dram_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) * (tma_mem_latency / (tma_mem_bandwidth + tma_mem_latency)) + tma_l3_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) * (tma_l3_hit_latency / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full)) + tma_l2_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound))", 1280 1294 "MetricGroup": "Mem;MemoryLat;Offcore;tma_issueLat", 1281 1295 "MetricName": "tma_info_bottleneck_memory_latency", ··· 1284 1300 }, 1285 1301 { 1286 1302 "BriefDescription": "Total pipeline cost of Branch Misprediction related bottlenecks", 1287 - "MetricConstraint": "NO_GROUP_EVENTS", 1288 1303 "MetricExpr": "100 * (tma_branch_mispredicts + tma_fetch_latency * tma_mispredicts_resteers / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches))", 1289 1304 "MetricGroup": "Bad;BadSpec;BrMispredicts;tma_issueBM", 1290 1305 "MetricName": "tma_info_bottleneck_mispredictions", ··· 1300 1317 }, 1301 1318 { 1302 1319 "BriefDescription": "Fraction of branches that are non-taken conditionals", 1303 - "MetricExpr": "BR_INST_RETIRED.COND_NTAKEN / BR_INST_RETIRED.ALL_BRANCHES", 1320 + "MetricExpr": "cpu_core@BR_INST_RETIRED.COND_NTAKEN@ / BR_INST_RETIRED.ALL_BRANCHES", 1304 1321 "MetricGroup": "Bad;Branches;CodeGen;PGO", 1305 1322 "MetricName": "tma_info_branches_cond_nt", 1306 1323 "Unit": "cpu_core" 1307 1324 }, 1308 1325 { 1309 1326 "BriefDescription": "Fraction of branches that are taken conditionals", 1310 - "MetricExpr": "BR_INST_RETIRED.COND_TAKEN / BR_INST_RETIRED.ALL_BRANCHES", 1327 + "MetricExpr": "cpu_core@BR_INST_RETIRED.COND_TAKEN@ / BR_INST_RETIRED.ALL_BRANCHES", 1311 1328 "MetricGroup": "Bad;Branches;CodeGen;PGO", 1312 1329 "MetricName": "tma_info_branches_cond_tk", 1313 1330 "Unit": "cpu_core" ··· 1335 1352 }, 1336 1353 { 1337 1354 "BriefDescription": "Instructions Per Cycle across hyper-threads (per physical core)", 1338 - "MetricExpr": "INST_RETIRED.ANY / tma_info_core_core_clks", 1355 + "MetricExpr": "cpu_core@INST_RETIRED.ANY@ / tma_info_core_core_clks", 1339 1356 "MetricGroup": "Ret;SMT;TmaL1;tma_L1_group", 1340 1357 "MetricName": "tma_info_core_coreipc", 1341 1358 "Unit": "cpu_core" ··· 1357 1374 }, 1358 1375 { 1359 1376 "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is execution) per-core", 1360 - "MetricExpr": "UOPS_EXECUTED.THREAD / (cpu_core@UOPS_EXECUTED.CORE_CYCLES_GE_1@ / 2 if #SMT_on else cpu_core@UOPS_EXECUTED.CORE_CYCLES_GE_1@)", 1377 + "MetricExpr": "cpu_core@UOPS_EXECUTED.THREAD@ / (cpu_core@UOPS_EXECUTED.CORE_CYCLES_GE_1@ / 2 if #SMT_on else cpu_core@UOPS_EXECUTED.CORE_CYCLES_GE_1@)", 1361 1378 "MetricGroup": "Backend;Cor;Pipeline;PortsUtil", 1362 1379 "MetricName": "tma_info_core_ilp", 1363 1380 "Unit": "cpu_core" 1364 1381 }, 1365 1382 { 1366 1383 "BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache)", 1367 - "MetricExpr": "IDQ.DSB_UOPS / cpu_core@UOPS_ISSUED.ANY@", 1384 + "MetricExpr": "cpu_core@IDQ.DSB_UOPS@ / cpu_core@UOPS_ISSUED.ANY@", 1368 1385 "MetricGroup": "DSB;Fed;FetchBW;tma_issueFB", 1369 1386 "MetricName": "tma_info_frontend_dsb_coverage", 1370 1387 "MetricThreshold": "tma_info_frontend_dsb_coverage < 0.7 & tma_info_thread_ipc / 6 > 0.35", ··· 1373 1390 }, 1374 1391 { 1375 1392 "BriefDescription": "Average number of cycles of a switch from the DSB fetch-unit to MITE fetch unit - see DSB_Switches tree node for details.", 1376 - "MetricExpr": "DSB2MITE_SWITCHES.PENALTY_CYCLES / cpu_core@DSB2MITE_SWITCHES.PENALTY_CYCLES\\,cmask\\=1\\,edge@", 1393 + "MetricExpr": "cpu_core@DSB2MITE_SWITCHES.PENALTY_CYCLES@ / cpu_core@DSB2MITE_SWITCHES.PENALTY_CYCLES\\,cmask\\=1\\,edge@", 1377 1394 "MetricGroup": "DSBmiss", 1378 1395 "MetricName": "tma_info_frontend_dsb_switch_cost", 1379 1396 "Unit": "cpu_core" 1380 1397 }, 1381 1398 { 1382 1399 "BriefDescription": "Average number of Uops issued by front-end when it issued something", 1383 - "MetricExpr": "UOPS_ISSUED.ANY / cpu_core@UOPS_ISSUED.ANY\\,cmask\\=1@", 1400 + "MetricExpr": "cpu_core@UOPS_ISSUED.ANY@ / cpu_core@UOPS_ISSUED.ANY\\,cmask\\=1@", 1384 1401 "MetricGroup": "Fed;FetchBW", 1385 1402 "MetricName": "tma_info_frontend_fetch_upc", 1386 1403 "Unit": "cpu_core" 1387 1404 }, 1388 1405 { 1389 1406 "BriefDescription": "Average Latency for L1 instruction cache misses", 1390 - "MetricExpr": "ICACHE_DATA.STALLS / cpu_core@ICACHE_DATA.STALLS\\,cmask\\=1\\,edge@", 1407 + "MetricExpr": "cpu_core@ICACHE_DATA.STALLS@ / cpu_core@ICACHE_DATA.STALLS\\,cmask\\=1\\,edge@", 1391 1408 "MetricGroup": "Fed;FetchLat;IcMiss", 1392 1409 "MetricName": "tma_info_frontend_icache_miss_latency", 1393 1410 "Unit": "cpu_core" 1394 1411 }, 1395 1412 { 1396 1413 "BriefDescription": "Instructions per non-speculative DSB miss (lower number means higher occurrence rate)", 1397 - "MetricExpr": "INST_RETIRED.ANY / FRONTEND_RETIRED.ANY_DSB_MISS", 1414 + "MetricExpr": "cpu_core@INST_RETIRED.ANY@ / FRONTEND_RETIRED.ANY_DSB_MISS", 1398 1415 "MetricGroup": "DSBmiss;Fed", 1399 1416 "MetricName": "tma_info_frontend_ipdsb_miss_ret", 1400 1417 "MetricThreshold": "tma_info_frontend_ipdsb_miss_ret < 50", ··· 1423 1440 }, 1424 1441 { 1425 1442 "BriefDescription": "Fraction of Uops delivered by the LSD (Loop Stream Detector; aka Loop Cache)", 1426 - "MetricExpr": "LSD.UOPS / cpu_core@UOPS_ISSUED.ANY@", 1443 + "MetricExpr": "cpu_core@LSD.UOPS@ / cpu_core@UOPS_ISSUED.ANY@", 1427 1444 "MetricGroup": "Fed;LSD", 1428 1445 "MetricName": "tma_info_frontend_lsd_coverage", 1429 1446 "Unit": "cpu_core" 1430 1447 }, 1431 1448 { 1432 1449 "BriefDescription": "Branch instructions per taken branch.", 1433 - "MetricExpr": "BR_INST_RETIRED.ALL_BRANCHES / BR_INST_RETIRED.NEAR_TAKEN", 1450 + "MetricExpr": "cpu_core@BR_INST_RETIRED.ALL_BRANCHES@ / BR_INST_RETIRED.NEAR_TAKEN", 1434 1451 "MetricGroup": "Branches;Fed;PGO", 1435 1452 "MetricName": "tma_info_inst_mix_bptkbranch", 1436 1453 "Unit": "cpu_core" ··· 1445 1462 }, 1446 1463 { 1447 1464 "BriefDescription": "Instructions per FP Arithmetic instruction (lower number means higher occurrence rate)", 1448 - "MetricExpr": "INST_RETIRED.ANY / (cpu_core@FP_ARITH_INST_RETIRED.SCALAR_SINGLE\\,umask\\=0x03@ + cpu_core@FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE\\,umask\\=0x3c@)", 1465 + "MetricExpr": "cpu_core@INST_RETIRED.ANY@ / (cpu_core@FP_ARITH_INST_RETIRED.SCALAR_SINGLE\\,umask\\=0x03@ + cpu_core@FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE\\,umask\\=0x3c@)", 1449 1466 "MetricGroup": "Flops;InsType", 1450 1467 "MetricName": "tma_info_inst_mix_iparith", 1451 1468 "MetricThreshold": "tma_info_inst_mix_iparith < 10", ··· 1454 1471 }, 1455 1472 { 1456 1473 "BriefDescription": "Instructions per FP Arithmetic AVX/SSE 128-bit instruction (lower number means higher occurrence rate)", 1457 - "MetricExpr": "INST_RETIRED.ANY / (cpu_core@FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE@ + cpu_core@FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE@)", 1474 + "MetricExpr": "cpu_core@INST_RETIRED.ANY@ / (cpu_core@FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE@ + cpu_core@FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE@)", 1458 1475 "MetricGroup": "Flops;FpVector;InsType", 1459 1476 "MetricName": "tma_info_inst_mix_iparith_avx128", 1460 1477 "MetricThreshold": "tma_info_inst_mix_iparith_avx128 < 10", ··· 1463 1480 }, 1464 1481 { 1465 1482 "BriefDescription": "Instructions per FP Arithmetic AVX* 256-bit instruction (lower number means higher occurrence rate)", 1466 - "MetricExpr": "INST_RETIRED.ANY / (cpu_core@FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE@ + cpu_core@FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE@)", 1483 + "MetricExpr": "cpu_core@INST_RETIRED.ANY@ / (cpu_core@FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE@ + cpu_core@FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE@)", 1467 1484 "MetricGroup": "Flops;FpVector;InsType", 1468 1485 "MetricName": "tma_info_inst_mix_iparith_avx256", 1469 1486 "MetricThreshold": "tma_info_inst_mix_iparith_avx256 < 10", ··· 1472 1489 }, 1473 1490 { 1474 1491 "BriefDescription": "Instructions per FP Arithmetic Scalar Double-Precision instruction (lower number means higher occurrence rate)", 1475 - "MetricExpr": "INST_RETIRED.ANY / FP_ARITH_INST_RETIRED.SCALAR_DOUBLE", 1492 + "MetricExpr": "cpu_core@INST_RETIRED.ANY@ / FP_ARITH_INST_RETIRED.SCALAR_DOUBLE", 1476 1493 "MetricGroup": "Flops;FpScalar;InsType", 1477 1494 "MetricName": "tma_info_inst_mix_iparith_scalar_dp", 1478 1495 "MetricThreshold": "tma_info_inst_mix_iparith_scalar_dp < 10", ··· 1481 1498 }, 1482 1499 { 1483 1500 "BriefDescription": "Instructions per FP Arithmetic Scalar Single-Precision instruction (lower number means higher occurrence rate)", 1484 - "MetricExpr": "INST_RETIRED.ANY / FP_ARITH_INST_RETIRED.SCALAR_SINGLE", 1501 + "MetricExpr": "cpu_core@INST_RETIRED.ANY@ / FP_ARITH_INST_RETIRED.SCALAR_SINGLE", 1485 1502 "MetricGroup": "Flops;FpScalar;InsType", 1486 1503 "MetricName": "tma_info_inst_mix_iparith_scalar_sp", 1487 1504 "MetricThreshold": "tma_info_inst_mix_iparith_scalar_sp < 10", ··· 1490 1507 }, 1491 1508 { 1492 1509 "BriefDescription": "Instructions per Branch (lower number means higher occurrence rate)", 1493 - "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.ALL_BRANCHES", 1510 + "MetricExpr": "cpu_core@INST_RETIRED.ANY@ / BR_INST_RETIRED.ALL_BRANCHES", 1494 1511 "MetricGroup": "Branches;Fed;InsType", 1495 1512 "MetricName": "tma_info_inst_mix_ipbranch", 1496 1513 "MetricThreshold": "tma_info_inst_mix_ipbranch < 8", ··· 1498 1515 }, 1499 1516 { 1500 1517 "BriefDescription": "Instructions per (near) call (lower number means higher occurrence rate)", 1501 - "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_CALL", 1518 + "MetricExpr": "cpu_core@INST_RETIRED.ANY@ / BR_INST_RETIRED.NEAR_CALL", 1502 1519 "MetricGroup": "Branches;Fed;PGO", 1503 1520 "MetricName": "tma_info_inst_mix_ipcall", 1504 1521 "MetricThreshold": "tma_info_inst_mix_ipcall < 200", ··· 1506 1523 }, 1507 1524 { 1508 1525 "BriefDescription": "Instructions per Floating Point (FP) Operation (lower number means higher occurrence rate)", 1509 - "MetricExpr": "INST_RETIRED.ANY / (cpu_core@FP_ARITH_INST_RETIRED.SCALAR_SINGLE@ + cpu_core@FP_ARITH_INST_RETIRED.SCALAR_DOUBLE@ + 2 * cpu_core@FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE@ + 4 * (cpu_core@FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE@ + cpu_core@FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE@) + 8 * cpu_core@FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE@)", 1526 + "MetricExpr": "cpu_core@INST_RETIRED.ANY@ / (cpu_core@FP_ARITH_INST_RETIRED.SCALAR_SINGLE@ + cpu_core@FP_ARITH_INST_RETIRED.SCALAR_DOUBLE@ + 2 * cpu_core@FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE@ + 4 * (cpu_core@FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE@ + cpu_core@FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE@) + 8 * cpu_core@FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE@)", 1510 1527 "MetricGroup": "Flops;InsType", 1511 1528 "MetricName": "tma_info_inst_mix_ipflop", 1512 1529 "MetricThreshold": "tma_info_inst_mix_ipflop < 10", ··· 1514 1531 }, 1515 1532 { 1516 1533 "BriefDescription": "Instructions per Load (lower number means higher occurrence rate)", 1517 - "MetricExpr": "INST_RETIRED.ANY / MEM_INST_RETIRED.ALL_LOADS", 1534 + "MetricExpr": "cpu_core@INST_RETIRED.ANY@ / MEM_INST_RETIRED.ALL_LOADS", 1518 1535 "MetricGroup": "InsType", 1519 1536 "MetricName": "tma_info_inst_mix_ipload", 1520 1537 "MetricThreshold": "tma_info_inst_mix_ipload < 3", ··· 1522 1539 }, 1523 1540 { 1524 1541 "BriefDescription": "Instructions per Store (lower number means higher occurrence rate)", 1525 - "MetricExpr": "INST_RETIRED.ANY / MEM_INST_RETIRED.ALL_STORES", 1542 + "MetricExpr": "cpu_core@INST_RETIRED.ANY@ / MEM_INST_RETIRED.ALL_STORES", 1526 1543 "MetricGroup": "InsType", 1527 1544 "MetricName": "tma_info_inst_mix_ipstore", 1528 1545 "MetricThreshold": "tma_info_inst_mix_ipstore < 8", ··· 1530 1547 }, 1531 1548 { 1532 1549 "BriefDescription": "Instructions per Software prefetch instruction (of any type: NTA/T0/T1/T2/Prefetch) (lower number means higher occurrence rate)", 1533 - "MetricExpr": "INST_RETIRED.ANY / cpu_core@SW_PREFETCH_ACCESS.T0\\,umask\\=0xF@", 1550 + "MetricExpr": "cpu_core@INST_RETIRED.ANY@ / cpu_core@SW_PREFETCH_ACCESS.T0\\,umask\\=0xF@", 1534 1551 "MetricGroup": "Prefetches", 1535 1552 "MetricName": "tma_info_inst_mix_ipswpf", 1536 1553 "MetricThreshold": "tma_info_inst_mix_ipswpf < 100", ··· 1538 1555 }, 1539 1556 { 1540 1557 "BriefDescription": "Instruction per taken branch", 1541 - "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_TAKEN", 1558 + "MetricExpr": "cpu_core@INST_RETIRED.ANY@ / BR_INST_RETIRED.NEAR_TAKEN", 1542 1559 "MetricGroup": "Branches;Fed;FetchBW;Frontend;PGO;tma_issueFB", 1543 1560 "MetricName": "tma_info_inst_mix_iptb", 1544 1561 "MetricThreshold": "tma_info_inst_mix_iptb < 13", ··· 1638 1655 }, 1639 1656 { 1640 1657 "BriefDescription": "Actual Average Latency for L1 data-cache miss demand load operations (in core cycles)", 1641 - "MetricExpr": "L1D_PEND_MISS.PENDING / MEM_LOAD_COMPLETED.L1_MISS_ANY", 1658 + "MetricExpr": "cpu_core@L1D_PEND_MISS.PENDING@ / MEM_LOAD_COMPLETED.L1_MISS_ANY", 1642 1659 "MetricGroup": "Mem;MemoryBound;MemoryLat", 1643 1660 "MetricName": "tma_info_memory_load_miss_real_latency", 1644 1661 "Unit": "cpu_core" 1645 1662 }, 1646 1663 { 1647 1664 "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss", 1648 - "MetricExpr": "L1D_PEND_MISS.PENDING / L1D_PEND_MISS.PENDING_CYCLES", 1665 + "MetricExpr": "cpu_core@L1D_PEND_MISS.PENDING@ / L1D_PEND_MISS.PENDING_CYCLES", 1649 1666 "MetricGroup": "Mem;MemoryBW;MemoryBound", 1650 1667 "MetricName": "tma_info_memory_mlp", 1651 1668 "PublicDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-Logical Processor)", ··· 1653 1670 }, 1654 1671 { 1655 1672 "BriefDescription": "Average Parallel L2 cache miss data reads", 1656 - "MetricExpr": "OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD / OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD", 1673 + "MetricExpr": "cpu_core@OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD@ / OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD", 1657 1674 "MetricGroup": "Memory_BW;Offcore", 1658 1675 "MetricName": "tma_info_memory_oro_data_l2_mlp", 1659 1676 "Unit": "cpu_core" 1660 1677 }, 1661 1678 { 1662 1679 "BriefDescription": "Average Latency for L2 cache miss demand Loads", 1663 - "MetricExpr": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD / OFFCORE_REQUESTS.DEMAND_DATA_RD", 1680 + "MetricExpr": "cpu_core@OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD@ / OFFCORE_REQUESTS.DEMAND_DATA_RD", 1664 1681 "MetricGroup": "Memory_Lat;Offcore", 1665 1682 "MetricName": "tma_info_memory_oro_load_l2_miss_latency", 1666 1683 "Unit": "cpu_core" 1667 1684 }, 1668 1685 { 1669 1686 "BriefDescription": "Average Parallel L2 cache miss demand Loads", 1670 - "MetricExpr": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD / cpu_core@OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD\\,cmask\\=1@", 1687 + "MetricExpr": "cpu_core@OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD@ / cpu_core@OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD\\,cmask\\=1@", 1671 1688 "MetricGroup": "Memory_BW;Offcore", 1672 1689 "MetricName": "tma_info_memory_oro_load_l2_mlp", 1673 1690 "Unit": "cpu_core" 1674 1691 }, 1675 1692 { 1676 1693 "BriefDescription": "Average Latency for L3 cache miss demand Loads", 1677 - "MetricExpr": "OFFCORE_REQUESTS_OUTSTANDING.L3_MISS_DEMAND_DATA_RD / OFFCORE_REQUESTS.L3_MISS_DEMAND_DATA_RD", 1694 + "MetricExpr": "cpu_core@OFFCORE_REQUESTS_OUTSTANDING.L3_MISS_DEMAND_DATA_RD@ / OFFCORE_REQUESTS.L3_MISS_DEMAND_DATA_RD", 1678 1695 "MetricGroup": "Memory_Lat;Offcore", 1679 1696 "MetricName": "tma_info_memory_oro_load_l3_miss_latency", 1680 1697 "Unit": "cpu_core" ··· 1738 1755 }, 1739 1756 { 1740 1757 "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is execution) per-thread", 1741 - "MetricExpr": "UOPS_EXECUTED.THREAD / cpu_core@UOPS_EXECUTED.THREAD\\,cmask\\=1@", 1758 + "MetricExpr": "cpu_core@UOPS_EXECUTED.THREAD@ / cpu_core@UOPS_EXECUTED.THREAD\\,cmask\\=1@", 1742 1759 "MetricGroup": "Cor;Pipeline;PortsUtil;SMT", 1743 1760 "MetricName": "tma_info_pipeline_execute", 1744 1761 "Unit": "cpu_core" 1745 1762 }, 1746 1763 { 1747 1764 "BriefDescription": "Instructions per a microcode Assist invocation", 1748 - "MetricExpr": "INST_RETIRED.ANY / cpu_core@ASSISTS.ANY\\,umask\\=0x1B@", 1765 + "MetricExpr": "cpu_core@INST_RETIRED.ANY@ / cpu_core@ASSISTS.ANY\\,umask\\=0x1B@", 1749 1766 "MetricGroup": "Pipeline;Ret;Retire", 1750 1767 "MetricName": "tma_info_pipeline_ipassist", 1751 1768 "MetricThreshold": "tma_info_pipeline_ipassist < 100e3", ··· 1761 1778 }, 1762 1779 { 1763 1780 "BriefDescription": "Estimated fraction of retirement-cycles dealing with repeat instructions", 1764 - "MetricExpr": "INST_RETIRED.REP_ITERATION / cpu_core@UOPS_RETIRED.SLOTS\\,cmask\\=1@", 1781 + "MetricExpr": "cpu_core@INST_RETIRED.REP_ITERATION@ / cpu_core@UOPS_RETIRED.SLOTS\\,cmask\\=1@", 1765 1782 "MetricGroup": "Pipeline;Ret", 1766 1783 "MetricName": "tma_info_pipeline_strings_cycles", 1767 1784 "MetricThreshold": "tma_info_pipeline_strings_cycles > 0.1", ··· 1776 1793 }, 1777 1794 { 1778 1795 "BriefDescription": "Average CPU Utilization", 1779 - "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / TSC", 1796 + "MetricExpr": "cpu_core@CPU_CLK_UNHALTED.REF_TSC@ / TSC", 1780 1797 "MetricGroup": "HPC;Summary", 1781 1798 "MetricName": "tma_info_system_cpu_utilization", 1782 1799 "Unit": "cpu_core" ··· 1799 1816 }, 1800 1817 { 1801 1818 "BriefDescription": "Instructions per Far Branch ( Far Branches apply upon transition from application to operating system, handling interrupts, exceptions) [lower number means higher occurrence rate]", 1802 - "MetricExpr": "INST_RETIRED.ANY / cpu_core@BR_INST_RETIRED.FAR_BRANCH@u", 1819 + "MetricExpr": "cpu_core@INST_RETIRED.ANY@ / cpu_core@BR_INST_RETIRED.FAR_BRANCH@u", 1803 1820 "MetricGroup": "Branches;OS", 1804 1821 "MetricName": "tma_info_system_ipfarbranch", 1805 1822 "MetricThreshold": "tma_info_system_ipfarbranch < 1e6", ··· 1830 1847 }, 1831 1848 { 1832 1849 "BriefDescription": "Average latency of data read request to external memory (in nanoseconds)", 1850 + "MetricConstraint": "NO_GROUP_EVENTS", 1833 1851 "MetricExpr": "(UNC_ARB_TRK_OCCUPANCY.RD + UNC_ARB_DAT_OCCUPANCY.RD) / UNC_ARB_TRK_REQUESTS.RD", 1834 1852 "MetricGroup": "Mem;MemoryLat;SoC", 1835 1853 "MetricName": "tma_info_system_mem_read_latency", ··· 1839 1855 }, 1840 1856 { 1841 1857 "BriefDescription": "Average latency of all requests to external memory (in Uncore cycles)", 1858 + "MetricConstraint": "NO_GROUP_EVENTS", 1842 1859 "MetricExpr": "(UNC_ARB_TRK_OCCUPANCY.ALL + UNC_ARB_DAT_OCCUPANCY.RD) / UNC_ARB_TRK_REQUESTS.ALL", 1843 1860 "MetricGroup": "Mem;SoC", 1844 1861 "MetricName": "tma_info_system_mem_request_latency", ··· 1882 1897 }, 1883 1898 { 1884 1899 "BriefDescription": "The ratio of Executed- by Issued-Uops", 1885 - "MetricExpr": "UOPS_EXECUTED.THREAD / UOPS_ISSUED.ANY", 1900 + "MetricExpr": "cpu_core@UOPS_EXECUTED.THREAD@ / UOPS_ISSUED.ANY", 1886 1901 "MetricGroup": "Cor;Pipeline", 1887 1902 "MetricName": "tma_info_thread_execute_per_issue", 1888 1903 "PublicDescription": "The ratio of Executed- by Issued-Uops. Ratio > 1 suggests high rate of uop micro-fusions. Ratio < 1 suggest high rate of \"execute\" at rename stage.", ··· 1890 1905 }, 1891 1906 { 1892 1907 "BriefDescription": "Instructions Per Cycle (per Logical Processor)", 1893 - "MetricExpr": "INST_RETIRED.ANY / tma_info_thread_clks", 1908 + "MetricExpr": "cpu_core@INST_RETIRED.ANY@ / tma_info_thread_clks", 1894 1909 "MetricGroup": "Ret;Summary", 1895 1910 "MetricName": "tma_info_thread_ipc", 1896 1911 "Unit": "cpu_core" ··· 1957 1972 }, 1958 1973 { 1959 1974 "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to Instruction TLB (ITLB) misses", 1960 - "MetricExpr": "ICACHE_TAG.STALLS / tma_info_thread_clks", 1975 + "MetricExpr": "cpu_core@ICACHE_TAG.STALLS@ / tma_info_thread_clks", 1961 1976 "MetricGroup": "BigFoot;FetchLat;MemoryTLB;TopdownL3;tma_L3_group;tma_fetch_latency_group", 1962 1977 "MetricName": "tma_itlb_misses", 1963 1978 "MetricThreshold": "tma_itlb_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)", ··· 1977 1992 }, 1978 1993 { 1979 1994 "BriefDescription": "This metric estimates how often the CPU was stalled due to L2 cache accesses by loads", 1980 - "MetricConstraint": "NO_GROUP_EVENTS", 1981 1995 "MetricExpr": "(cpu_core@MEMORY_ACTIVITY.STALLS_L1D_MISS@ - cpu_core@MEMORY_ACTIVITY.STALLS_L2_MISS@) / tma_info_thread_clks", 1982 1996 "MetricGroup": "CacheMisses;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group", 1983 1997 "MetricName": "tma_l2_bound", ··· 1987 2003 }, 1988 2004 { 1989 2005 "BriefDescription": "This metric estimates how often the CPU was stalled due to loads accesses to L3 cache or contended with a sibling Core", 1990 - "MetricConstraint": "NO_GROUP_EVENTS_NMI", 1991 2006 "MetricExpr": "(cpu_core@MEMORY_ACTIVITY.STALLS_L2_MISS@ - cpu_core@MEMORY_ACTIVITY.STALLS_L3_MISS@) / tma_info_thread_clks", 1992 2007 "MetricGroup": "CacheMisses;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group", 1993 2008 "MetricName": "tma_l3_bound", ··· 2007 2024 }, 2008 2025 { 2009 2026 "BriefDescription": "This metric represents fraction of cycles CPU was stalled due to Length Changing Prefixes (LCPs)", 2010 - "MetricExpr": "DECODE.LCP / tma_info_thread_clks", 2027 + "MetricExpr": "cpu_core@DECODE.LCP@ / tma_info_thread_clks", 2011 2028 "MetricGroup": "FetchLat;TopdownL3;tma_L3_group;tma_fetch_latency_group;tma_issueFB", 2012 2029 "MetricName": "tma_lcp", 2013 2030 "MetricThreshold": "tma_lcp > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)", ··· 2028 2045 }, 2029 2046 { 2030 2047 "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port for Load operations", 2031 - "MetricExpr": "UOPS_DISPATCHED.PORT_2_3_10 / (3 * tma_info_core_core_clks)", 2048 + "MetricExpr": "cpu_core@UOPS_DISPATCHED.PORT_2_3_10@ / (3 * tma_info_core_core_clks)", 2032 2049 "MetricGroup": "TopdownL5;tma_L5_group;tma_ports_utilized_3m_group", 2033 2050 "MetricName": "tma_load_op_utilization", 2034 2051 "MetricThreshold": "tma_load_op_utilization > 0.6", ··· 2047 2064 }, 2048 2065 { 2049 2066 "BriefDescription": "This metric estimates the fraction of cycles where the Second-level TLB (STLB) was missed by load accesses, performing a hardware page walk", 2050 - "MetricExpr": "DTLB_LOAD_MISSES.WALK_ACTIVE / tma_info_thread_clks", 2067 + "MetricExpr": "cpu_core@DTLB_LOAD_MISSES.WALK_ACTIVE@ / tma_info_thread_clks", 2051 2068 "MetricGroup": "MemoryTLB;TopdownL5;tma_L5_group;tma_dtlb_load_group", 2052 2069 "MetricName": "tma_load_stlb_miss", 2053 2070 "MetricThreshold": "tma_load_stlb_miss > 0.05 & (tma_dtlb_load > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))", ··· 2056 2073 }, 2057 2074 { 2058 2075 "BriefDescription": "This metric represents fraction of cycles the CPU spent handling cache misses due to lock operations", 2059 - "MetricConstraint": "NO_GROUP_EVENTS", 2060 2076 "MetricExpr": "(16 * max(0, cpu_core@MEM_INST_RETIRED.LOCK_LOADS@ - cpu_core@L2_RQSTS.ALL_RFO@) + cpu_core@MEM_INST_RETIRED.LOCK_LOADS@ / cpu_core@MEM_INST_RETIRED.ALL_STORES@ * (10 * cpu_core@L2_RQSTS.RFO_HIT@ + min(cpu_core@CPU_CLK_UNHALTED.THREAD@, cpu_core@OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO@))) / tma_info_thread_clks", 2061 2077 "MetricGroup": "Offcore;TopdownL4;tma_L4_group;tma_issueRFO;tma_l1_bound_group", 2062 2078 "MetricName": "tma_lock_latency", ··· 2118 2136 }, 2119 2137 { 2120 2138 "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to LFENCE Instructions.", 2139 + "MetricConstraint": "NO_GROUP_EVENTS_NMI", 2121 2140 "MetricExpr": "13 * cpu_core@MISC2_RETIRED.LFENCE@ / tma_info_thread_clks", 2122 2141 "MetricGroup": "TopdownL6;tma_L6_group;tma_serializing_operation_group", 2123 2142 "MetricName": "tma_memory_fence", ··· 2128 2145 }, 2129 2146 { 2130 2147 "BriefDescription": "This metric represents fraction of slots where the CPU was retiring memory operations -- uops for memory load or store accesses.", 2131 - "MetricConstraint": "NO_GROUP_EVENTS", 2132 2148 "MetricExpr": "tma_light_operations * cpu_core@MEM_UOP_RETIRED.ANY@ / (tma_retiring * tma_info_thread_slots)", 2133 2149 "MetricGroup": "Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group", 2134 2150 "MetricName": "tma_memory_operations", ··· 2137 2155 }, 2138 2156 { 2139 2157 "BriefDescription": "This metric represents fraction of slots the CPU was retiring uops fetched by the Microcode Sequencer (MS) unit", 2140 - "MetricExpr": "UOPS_RETIRED.MS / tma_info_thread_slots", 2158 + "MetricExpr": "cpu_core@UOPS_RETIRED.MS@ / tma_info_thread_slots", 2141 2159 "MetricGroup": "MicroSeq;TopdownL3;tma_L3_group;tma_heavy_operations_group;tma_issueMC;tma_issueMS", 2142 2160 "MetricName": "tma_microcode_sequencer", 2143 2161 "MetricThreshold": "tma_microcode_sequencer > 0.05 & tma_heavy_operations > 0.1", ··· 2207 2225 }, 2208 2226 { 2209 2227 "BriefDescription": "This metric represents the remaining light uops fraction the CPU has executed - remaining means not covered by other sibling nodes", 2210 - "MetricConstraint": "NO_GROUP_EVENTS", 2211 2228 "MetricExpr": "max(0, tma_light_operations - (tma_fp_arith + tma_int_operations + tma_memory_operations + tma_fused_instructions + tma_non_fused_branches + tma_nop_instructions))", 2212 2229 "MetricGroup": "Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group", 2213 2230 "MetricName": "tma_other_light_ops", ··· 2227 2246 }, 2228 2247 { 2229 2248 "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 0 ([SNB+] ALU; [HSW+] ALU and 2nd branch)", 2230 - "MetricExpr": "UOPS_DISPATCHED.PORT_0 / tma_info_core_core_clks", 2249 + "MetricExpr": "cpu_core@UOPS_DISPATCHED.PORT_0@ / tma_info_core_core_clks", 2231 2250 "MetricGroup": "Compute;TopdownL6;tma_L6_group;tma_alu_op_utilization_group;tma_issue2P", 2232 2251 "MetricName": "tma_port_0", 2233 2252 "MetricThreshold": "tma_port_0 > 0.6", ··· 2237 2256 }, 2238 2257 { 2239 2258 "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 1 (ALU)", 2240 - "MetricExpr": "UOPS_DISPATCHED.PORT_1 / tma_info_core_core_clks", 2259 + "MetricExpr": "cpu_core@UOPS_DISPATCHED.PORT_1@ / tma_info_core_core_clks", 2241 2260 "MetricGroup": "TopdownL6;tma_L6_group;tma_alu_op_utilization_group;tma_issue2P", 2242 2261 "MetricName": "tma_port_1", 2243 2262 "MetricThreshold": "tma_port_1 > 0.6", ··· 2247 2266 }, 2248 2267 { 2249 2268 "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 6 ([HSW+]Primary Branch and simple ALU)", 2250 - "MetricExpr": "UOPS_DISPATCHED.PORT_6 / tma_info_core_core_clks", 2269 + "MetricExpr": "cpu_core@UOPS_DISPATCHED.PORT_6@ / tma_info_core_core_clks", 2251 2270 "MetricGroup": "TopdownL6;tma_L6_group;tma_alu_op_utilization_group;tma_issue2P", 2252 2271 "MetricName": "tma_port_6", 2253 2272 "MetricThreshold": "tma_port_6 > 0.6", ··· 2277 2296 }, 2278 2297 { 2279 2298 "BriefDescription": "This metric represents fraction of cycles where the CPU executed total of 1 uop per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise)", 2280 - "MetricExpr": "EXE_ACTIVITY.1_PORTS_UTIL / tma_info_thread_clks", 2299 + "MetricExpr": "cpu_core@EXE_ACTIVITY.1_PORTS_UTIL@ / tma_info_thread_clks", 2281 2300 "MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_issueL1;tma_ports_utilization_group", 2282 2301 "MetricName": "tma_ports_utilized_1", 2283 2302 "MetricThreshold": "tma_ports_utilized_1 > 0.2 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))", ··· 2287 2306 }, 2288 2307 { 2289 2308 "BriefDescription": "This metric represents fraction of cycles CPU executed total of 2 uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise)", 2290 - "MetricExpr": "EXE_ACTIVITY.2_PORTS_UTIL / tma_info_thread_clks", 2309 + "MetricConstraint": "NO_GROUP_EVENTS_NMI", 2310 + "MetricExpr": "cpu_core@EXE_ACTIVITY.2_PORTS_UTIL@ / tma_info_thread_clks", 2291 2311 "MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_issue2P;tma_ports_utilization_group", 2292 2312 "MetricName": "tma_ports_utilized_2", 2293 2313 "MetricThreshold": "tma_ports_utilized_2 > 0.15 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))", ··· 2298 2316 }, 2299 2317 { 2300 2318 "BriefDescription": "This metric represents fraction of cycles CPU executed total of 3 or more uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise)", 2301 - "MetricExpr": "UOPS_EXECUTED.CYCLES_GE_3 / tma_info_thread_clks", 2319 + "MetricConstraint": "NO_GROUP_EVENTS_NMI", 2320 + "MetricExpr": "cpu_core@UOPS_EXECUTED.CYCLES_GE_3@ / tma_info_thread_clks", 2302 2321 "MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_ports_utilization_group", 2303 2322 "MetricName": "tma_ports_utilized_3m", 2304 2323 "MetricThreshold": "tma_ports_utilized_3m > 0.7 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))", ··· 2321 2338 }, 2322 2339 { 2323 2340 "BriefDescription": "This metric represents fraction of cycles the CPU issue-pipeline was stalled due to serializing operations", 2324 - "MetricExpr": "RESOURCE_STALLS.SCOREBOARD / tma_info_thread_clks", 2341 + "MetricExpr": "cpu_core@RESOURCE_STALLS.SCOREBOARD@ / tma_info_thread_clks", 2325 2342 "MetricGroup": "PortsUtil;TopdownL5;tma_L5_group;tma_issueSO;tma_ports_utilized_0_group", 2326 2343 "MetricName": "tma_serializing_operation", 2327 2344 "MetricThreshold": "tma_serializing_operation > 0.1 & (tma_ports_utilized_0 > 0.2 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2)))", ··· 2331 2348 }, 2332 2349 { 2333 2350 "BriefDescription": "This metric represents Shuffle (cross \"vector lane\" data transfers) uops fraction the CPU has retired.", 2334 - "MetricExpr": "INT_VEC_RETIRED.SHUFFLES / (tma_retiring * tma_info_thread_slots)", 2351 + "MetricExpr": "cpu_core@INT_VEC_RETIRED.SHUFFLES@ / (tma_retiring * tma_info_thread_slots)", 2335 2352 "MetricGroup": "HPC;Pipeline;TopdownL4;tma_L4_group;tma_int_operations_group", 2336 2353 "MetricName": "tma_shuffles", 2337 2354 "MetricThreshold": "tma_shuffles > 0.1 & (tma_int_operations > 0.1 & tma_light_operations > 0.6)", ··· 2340 2357 }, 2341 2358 { 2342 2359 "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to PAUSE Instructions", 2343 - "MetricExpr": "CPU_CLK_UNHALTED.PAUSE / tma_info_thread_clks", 2360 + "MetricConstraint": "NO_GROUP_EVENTS_NMI", 2361 + "MetricExpr": "cpu_core@CPU_CLK_UNHALTED.PAUSE@ / tma_info_thread_clks", 2344 2362 "MetricGroup": "TopdownL6;tma_L6_group;tma_serializing_operation_group", 2345 2363 "MetricName": "tma_slow_pause", 2346 2364 "MetricThreshold": "tma_slow_pause > 0.05 & (tma_serializing_operation > 0.1 & (tma_ports_utilized_0 > 0.2 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))))", ··· 2361 2377 }, 2362 2378 { 2363 2379 "BriefDescription": "This metric represents rate of split store accesses", 2364 - "MetricConstraint": "NO_GROUP_EVENTS_NMI", 2365 - "MetricExpr": "MEM_INST_RETIRED.SPLIT_STORES / tma_info_core_core_clks", 2380 + "MetricExpr": "cpu_core@MEM_INST_RETIRED.SPLIT_STORES@ / tma_info_core_core_clks", 2366 2381 "MetricGroup": "TopdownL4;tma_L4_group;tma_issueSpSt;tma_store_bound_group", 2367 2382 "MetricName": "tma_split_stores", 2368 2383 "MetricThreshold": "tma_split_stores > 0.2 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))", ··· 2381 2398 }, 2382 2399 { 2383 2400 "BriefDescription": "This metric estimates how often CPU was stalled due to RFO store memory accesses; RFO store issue a read-for-ownership request before the write", 2384 - "MetricExpr": "EXE_ACTIVITY.BOUND_ON_STORES / tma_info_thread_clks", 2401 + "MetricExpr": "cpu_core@EXE_ACTIVITY.BOUND_ON_STORES@ / tma_info_thread_clks", 2385 2402 "MetricGroup": "MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group", 2386 2403 "MetricName": "tma_store_bound", 2387 2404 "MetricThreshold": "tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)", ··· 2391 2408 }, 2392 2409 { 2393 2410 "BriefDescription": "This metric roughly estimates fraction of cycles when the memory subsystem had loads blocked since they could not forward data from earlier (in program order) overlapping stores", 2394 - "MetricConstraint": "NO_GROUP_EVENTS_NMI", 2395 2411 "MetricExpr": "13 * cpu_core@LD_BLOCKS.STORE_FORWARD@ / tma_info_thread_clks", 2396 2412 "MetricGroup": "TopdownL4;tma_L4_group;tma_l1_bound_group", 2397 2413 "MetricName": "tma_store_fwd_blk", ··· 2430 2448 }, 2431 2449 { 2432 2450 "BriefDescription": "This metric estimates the fraction of cycles where the STLB was missed by store accesses, performing a hardware page walk", 2433 - "MetricExpr": "DTLB_STORE_MISSES.WALK_ACTIVE / tma_info_core_core_clks", 2451 + "MetricExpr": "cpu_core@DTLB_STORE_MISSES.WALK_ACTIVE@ / tma_info_core_core_clks", 2434 2452 "MetricGroup": "MemoryTLB;TopdownL5;tma_L5_group;tma_dtlb_store_group", 2435 2453 "MetricName": "tma_store_stlb_miss", 2436 2454 "MetricThreshold": "tma_store_stlb_miss > 0.05 & (tma_dtlb_store > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))", ··· 2449 2467 }, 2450 2468 { 2451 2469 "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to new branch address clears", 2452 - "MetricExpr": "INT_MISC.UNKNOWN_BRANCH_CYCLES / tma_info_thread_clks", 2470 + "MetricExpr": "cpu_core@INT_MISC.UNKNOWN_BRANCH_CYCLES@ / tma_info_thread_clks", 2453 2471 "MetricGroup": "BigFoot;FetchLat;TopdownL4;tma_L4_group;tma_branch_resteers_group", 2454 2472 "MetricName": "tma_unknown_branches", 2455 2473 "MetricThreshold": "tma_unknown_branches > 0.05 & (tma_branch_resteers > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))",
-4
tools/perf/pmu-events/arch/x86/alderlaken/adln-metrics.json
··· 195 195 }, 196 196 { 197 197 "BriefDescription": "Counts the number of cycles the core is stalled due to a demand load miss which hit in DRAM or MMIO (Non-DRAM).", 198 - "MetricConstraint": "NO_GROUP_EVENTS", 199 198 "MetricExpr": "MEM_BOUND_STALLS.LOAD_DRAM_HIT / tma_info_core_clks - max((MEM_BOUND_STALLS.LOAD - LD_HEAD.L1_MISS_AT_RET) / tma_info_core_clks, 0) * MEM_BOUND_STALLS.LOAD_DRAM_HIT / MEM_BOUND_STALLS.LOAD", 200 199 "MetricGroup": "TopdownL3;tma_L3_group;tma_memory_bound_group", 201 200 "MetricName": "tma_dram_bound", ··· 456 457 }, 457 458 { 458 459 "BriefDescription": "Counts the number of cycles a core is stalled due to a demand load which hit in the L2 Cache.", 459 - "MetricConstraint": "NO_GROUP_EVENTS", 460 460 "MetricExpr": "MEM_BOUND_STALLS.LOAD_L2_HIT / tma_info_core_clks - max((MEM_BOUND_STALLS.LOAD - LD_HEAD.L1_MISS_AT_RET) / tma_info_core_clks, 0) * MEM_BOUND_STALLS.LOAD_L2_HIT / MEM_BOUND_STALLS.LOAD", 461 461 "MetricGroup": "TopdownL3;tma_L3_group;tma_memory_bound_group", 462 462 "MetricName": "tma_l2_bound", ··· 464 466 }, 465 467 { 466 468 "BriefDescription": "Counts the number of cycles a core is stalled due to a demand load which hit in the Last Level Cache (LLC) or other core with HITE/F/M.", 467 - "MetricConstraint": "NO_GROUP_EVENTS_NMI", 468 469 "MetricExpr": "MEM_BOUND_STALLS.LOAD_LLC_HIT / tma_info_core_clks - max((MEM_BOUND_STALLS.LOAD - LD_HEAD.L1_MISS_AT_RET) / tma_info_core_clks, 0) * MEM_BOUND_STALLS.LOAD_LLC_HIT / MEM_BOUND_STALLS.LOAD", 469 470 "MetricGroup": "TopdownL3;tma_L3_group;tma_memory_bound_group", 470 471 "MetricName": "tma_l3_bound", ··· 680 683 }, 681 684 { 682 685 "BriefDescription": "Counts the number of cycles that the oldest load of the load buffer is stalled at retirement due to a store forward block.", 683 - "MetricConstraint": "NO_GROUP_EVENTS_NMI", 684 686 "MetricExpr": "LD_HEAD.ST_ADDR_AT_RET / tma_info_core_clks", 685 687 "MetricGroup": "TopdownL4;tma_L4_group;tma_l1_bound_group", 686 688 "MetricName": "tma_store_fwd_blk",
+5 -20
tools/perf/pmu-events/arch/x86/sapphirerapids/spr-metrics.json
··· 400 400 }, 401 401 { 402 402 "BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to contested accesses", 403 - "MetricConstraint": "NO_GROUP_EVENTS", 404 403 "MetricExpr": "(76 * tma_info_system_average_frequency * (MEM_LOAD_L3_HIT_RETIRED.XSNP_FWD * (OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM / (OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM + OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD))) + 75.5 * tma_info_system_average_frequency * MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS) * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks", 405 404 "MetricGroup": "DataSharing;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_l3_bound_group", 406 405 "MetricName": "tma_contested_accesses", ··· 420 421 }, 421 422 { 422 423 "BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to data-sharing accesses", 423 - "MetricConstraint": "NO_GROUP_EVENTS", 424 424 "MetricExpr": "75.5 * tma_info_system_average_frequency * (MEM_LOAD_L3_HIT_RETIRED.XSNP_NO_FWD + MEM_LOAD_L3_HIT_RETIRED.XSNP_FWD * (1 - OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM / (OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM + OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD))) * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks", 425 425 "MetricGroup": "Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_l3_bound_group", 426 426 "MetricName": "tma_data_sharing", ··· 447 449 }, 448 450 { 449 451 "BriefDescription": "This metric estimates how often the CPU was stalled on accesses to external memory (DRAM) by loads", 450 - "MetricConstraint": "NO_GROUP_EVENTS", 451 452 "MetricExpr": "(MEMORY_ACTIVITY.STALLS_L3_MISS / tma_info_thread_clks - tma_pmm_bound if #has_pmem > 0 else MEMORY_ACTIVITY.STALLS_L3_MISS / tma_info_thread_clks)", 452 453 "MetricGroup": "MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group", 453 454 "MetricName": "tma_dram_bound", ··· 653 656 }, 654 657 { 655 658 "BriefDescription": "Branch Misprediction Cost: Fraction of TMA slots wasted per non-speculative branch misprediction (retired JEClear)", 656 - "MetricConstraint": "NO_GROUP_EVENTS", 657 659 "MetricExpr": "(tma_branch_mispredicts + tma_fetch_latency * tma_mispredicts_resteers / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches)) * tma_info_thread_slots / BR_MISP_RETIRED.ALL_BRANCHES", 658 660 "MetricGroup": "Bad;BrMispredicts;tma_issueBM", 659 661 "MetricName": "tma_info_bad_spec_branch_misprediction_cost", ··· 695 699 }, 696 700 { 697 701 "BriefDescription": "Probability of Core Bound bottleneck hidden by SMT-profiling artifacts", 698 - "MetricConstraint": "NO_GROUP_EVENTS", 699 702 "MetricExpr": "(100 * (1 - tma_core_bound / tma_ports_utilization if tma_core_bound < tma_ports_utilization else 1) if tma_info_system_smt_2t_utilization > 0.5 else 0)", 700 703 "MetricGroup": "Cor;SMT", 701 704 "MetricName": "tma_info_botlnk_l0_core_bound_likely", ··· 702 707 }, 703 708 { 704 709 "BriefDescription": "Total pipeline cost of DSB (uop cache) misses - subset of the Instruction_Fetch_BW Bottleneck", 705 - "MetricConstraint": "NO_GROUP_EVENTS", 706 710 "MetricExpr": "100 * (tma_fetch_latency * tma_dsb_switches / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches) + tma_fetch_bandwidth * tma_mite / (tma_dsb + tma_mite))", 707 711 "MetricGroup": "DSBmiss;Fed;tma_issueFB", 708 712 "MetricName": "tma_info_botlnk_l2_dsb_misses", ··· 710 716 }, 711 717 { 712 718 "BriefDescription": "Total pipeline cost of Instruction Cache misses - subset of the Big_Code Bottleneck", 713 - "MetricConstraint": "NO_GROUP_EVENTS", 714 719 "MetricExpr": "100 * (tma_fetch_latency * tma_icache_misses / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches))", 715 720 "MetricGroup": "Fed;FetchLat;IcMiss;tma_issueFL", 716 721 "MetricName": "tma_info_botlnk_l2_ic_misses", ··· 718 725 }, 719 726 { 720 727 "BriefDescription": "Total pipeline cost of instruction fetch related bottlenecks by large code footprint programs (i-side cache; TLB and BTB misses)", 721 - "MetricConstraint": "NO_GROUP_EVENTS", 722 728 "MetricExpr": "100 * tma_fetch_latency * (tma_itlb_misses + tma_icache_misses + tma_unknown_branches) / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches)", 723 729 "MetricGroup": "BigFoot;Fed;Frontend;IcMiss;MemoryTLB;tma_issueBC", 724 730 "MetricName": "tma_info_bottleneck_big_code", ··· 734 742 }, 735 743 { 736 744 "BriefDescription": "Total pipeline cost of instruction fetch bandwidth related bottlenecks", 737 - "MetricConstraint": "NO_GROUP_EVENTS", 738 745 "MetricExpr": "100 * (tma_frontend_bound - tma_fetch_latency * tma_mispredicts_resteers / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches)) - tma_info_bottleneck_big_code", 739 746 "MetricGroup": "Fed;FetchBW;Frontend", 740 747 "MetricName": "tma_info_bottleneck_instruction_fetch_bw", ··· 741 750 }, 742 751 { 743 752 "BriefDescription": "Total pipeline cost of (external) Memory Bandwidth related bottlenecks", 744 - "MetricConstraint": "NO_GROUP_EVENTS", 745 753 "MetricExpr": "100 * tma_memory_bound * (tma_dram_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_pmm_bound + tma_store_bound) * (tma_mem_bandwidth / (tma_mem_bandwidth + tma_mem_latency)) + tma_l3_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_pmm_bound + tma_store_bound) * (tma_sq_full / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full))) + tma_l1_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_pmm_bound + tma_store_bound) * (tma_fb_full / (tma_dtlb_load + tma_fb_full + tma_lock_latency + tma_split_loads + tma_store_fwd_blk))", 746 754 "MetricGroup": "Mem;MemoryBW;Offcore;tma_issueBW", 747 755 "MetricName": "tma_info_bottleneck_memory_bandwidth", ··· 749 759 }, 750 760 { 751 761 "BriefDescription": "Total pipeline cost of Memory Address Translation related bottlenecks (data-side TLBs)", 752 - "MetricConstraint": "NO_GROUP_EVENTS", 753 762 "MetricExpr": "100 * tma_memory_bound * (tma_l1_bound / max(tma_memory_bound, tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_pmm_bound + tma_store_bound) * (tma_dtlb_load / max(tma_l1_bound, tma_dtlb_load + tma_fb_full + tma_lock_latency + tma_split_loads + tma_store_fwd_blk)) + tma_store_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_pmm_bound + tma_store_bound) * (tma_dtlb_store / (tma_dtlb_store + tma_false_sharing + tma_split_stores + tma_store_latency + tma_streaming_stores)))", 754 763 "MetricGroup": "Mem;MemoryTLB;Offcore;tma_issueTLB", 755 764 "MetricName": "tma_info_bottleneck_memory_data_tlbs", ··· 757 768 }, 758 769 { 759 770 "BriefDescription": "Total pipeline cost of Memory Latency related bottlenecks (external memory and off-core caches)", 760 - "MetricConstraint": "NO_GROUP_EVENTS", 761 771 "MetricExpr": "100 * tma_memory_bound * (tma_dram_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_pmm_bound + tma_store_bound) * (tma_mem_latency / (tma_mem_bandwidth + tma_mem_latency)) + tma_l3_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_pmm_bound + tma_store_bound) * (tma_l3_hit_latency / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full)) + tma_l2_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_pmm_bound + tma_store_bound))", 762 772 "MetricGroup": "Mem;MemoryLat;Offcore;tma_issueLat", 763 773 "MetricName": "tma_info_bottleneck_memory_latency", ··· 765 777 }, 766 778 { 767 779 "BriefDescription": "Total pipeline cost of Branch Misprediction related bottlenecks", 768 - "MetricConstraint": "NO_GROUP_EVENTS", 769 780 "MetricExpr": "100 * (tma_branch_mispredicts + tma_fetch_latency * tma_mispredicts_resteers / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches))", 770 781 "MetricGroup": "Bad;BadSpec;BrMispredicts;tma_issueBM", 771 782 "MetricName": "tma_info_bottleneck_mispredictions", ··· 1288 1301 }, 1289 1302 { 1290 1303 "BriefDescription": "Average latency of data read request to external memory (in nanoseconds)", 1304 + "MetricConstraint": "NO_GROUP_EVENTS", 1291 1305 "MetricExpr": "1e9 * (UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD / UNC_CHA_TOR_INSERTS.IA_MISS_DRD) / (tma_info_system_socket_clks / duration_time)", 1292 1306 "MetricGroup": "Mem;MemoryLat;SoC", 1293 1307 "MetricName": "tma_info_system_mem_read_latency", ··· 1443 1455 }, 1444 1456 { 1445 1457 "BriefDescription": "This metric estimates how often the CPU was stalled due to L2 cache accesses by loads", 1446 - "MetricConstraint": "NO_GROUP_EVENTS", 1447 1458 "MetricExpr": "(MEMORY_ACTIVITY.STALLS_L1D_MISS - MEMORY_ACTIVITY.STALLS_L2_MISS) / tma_info_thread_clks", 1448 1459 "MetricGroup": "CacheMisses;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group", 1449 1460 "MetricName": "tma_l2_bound", ··· 1452 1465 }, 1453 1466 { 1454 1467 "BriefDescription": "This metric estimates how often the CPU was stalled due to loads accesses to L3 cache or contended with a sibling Core", 1455 - "MetricConstraint": "NO_GROUP_EVENTS_NMI", 1456 1468 "MetricExpr": "(MEMORY_ACTIVITY.STALLS_L2_MISS - MEMORY_ACTIVITY.STALLS_L3_MISS) / tma_info_thread_clks", 1457 1469 "MetricGroup": "CacheMisses;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group", 1458 1470 "MetricName": "tma_l3_bound", ··· 1524 1538 }, 1525 1539 { 1526 1540 "BriefDescription": "This metric represents fraction of cycles the CPU spent handling cache misses due to lock operations", 1527 - "MetricConstraint": "NO_GROUP_EVENTS", 1528 1541 "MetricExpr": "(16 * max(0, MEM_INST_RETIRED.LOCK_LOADS - L2_RQSTS.ALL_RFO) + MEM_INST_RETIRED.LOCK_LOADS / MEM_INST_RETIRED.ALL_STORES * (10 * L2_RQSTS.RFO_HIT + min(CPU_CLK_UNHALTED.THREAD, OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO))) / tma_info_thread_clks", 1529 1542 "MetricGroup": "Offcore;TopdownL4;tma_L4_group;tma_issueRFO;tma_l1_bound_group", 1530 1543 "MetricName": "tma_lock_latency", ··· 1581 1596 }, 1582 1597 { 1583 1598 "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to LFENCE Instructions.", 1599 + "MetricConstraint": "NO_GROUP_EVENTS_NMI", 1584 1600 "MetricExpr": "13 * MISC2_RETIRED.LFENCE / tma_info_thread_clks", 1585 1601 "MetricGroup": "TopdownL6;tma_L6_group;tma_serializing_operation_group", 1586 1602 "MetricName": "tma_memory_fence", ··· 1590 1604 }, 1591 1605 { 1592 1606 "BriefDescription": "This metric represents fraction of slots where the CPU was retiring memory operations -- uops for memory load or store accesses.", 1593 - "MetricConstraint": "NO_GROUP_EVENTS", 1594 1607 "MetricExpr": "tma_light_operations * MEM_UOP_RETIRED.ANY / (tma_retiring * tma_info_thread_slots)", 1595 1608 "MetricGroup": "Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group", 1596 1609 "MetricName": "tma_memory_operations", ··· 1661 1676 }, 1662 1677 { 1663 1678 "BriefDescription": "This metric represents the remaining light uops fraction the CPU has executed - remaining means not covered by other sibling nodes", 1664 - "MetricConstraint": "NO_GROUP_EVENTS", 1665 1679 "MetricExpr": "max(0, tma_light_operations - (tma_fp_arith + tma_int_operations + tma_memory_operations + tma_fused_instructions + tma_non_fused_branches + tma_nop_instructions))", 1666 1680 "MetricGroup": "Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group", 1667 1681 "MetricName": "tma_other_light_ops", ··· 1742 1758 }, 1743 1759 { 1744 1760 "BriefDescription": "This metric represents fraction of cycles CPU executed total of 2 uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise)", 1761 + "MetricConstraint": "NO_GROUP_EVENTS_NMI", 1745 1762 "MetricExpr": "EXE_ACTIVITY.2_PORTS_UTIL / tma_info_thread_clks", 1746 1763 "MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_issue2P;tma_ports_utilization_group", 1747 1764 "MetricName": "tma_ports_utilized_2", ··· 1752 1767 }, 1753 1768 { 1754 1769 "BriefDescription": "This metric represents fraction of cycles CPU executed total of 3 or more uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise)", 1770 + "MetricConstraint": "NO_GROUP_EVENTS_NMI", 1755 1771 "MetricExpr": "UOPS_EXECUTED.CYCLES_GE_3 / tma_info_thread_clks", 1756 1772 "MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_ports_utilization_group", 1757 1773 "MetricName": "tma_ports_utilized_3m", ··· 1808 1822 }, 1809 1823 { 1810 1824 "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to PAUSE Instructions", 1825 + "MetricConstraint": "NO_GROUP_EVENTS_NMI", 1811 1826 "MetricExpr": "CPU_CLK_UNHALTED.PAUSE / tma_info_thread_clks", 1812 1827 "MetricGroup": "TopdownL6;tma_L6_group;tma_serializing_operation_group", 1813 1828 "MetricName": "tma_slow_pause", ··· 1827 1840 }, 1828 1841 { 1829 1842 "BriefDescription": "This metric represents rate of split store accesses", 1830 - "MetricConstraint": "NO_GROUP_EVENTS_NMI", 1831 1843 "MetricExpr": "MEM_INST_RETIRED.SPLIT_STORES / tma_info_core_core_clks", 1832 1844 "MetricGroup": "TopdownL4;tma_L4_group;tma_issueSpSt;tma_store_bound_group", 1833 1845 "MetricName": "tma_split_stores", ··· 1854 1868 }, 1855 1869 { 1856 1870 "BriefDescription": "This metric roughly estimates fraction of cycles when the memory subsystem had loads blocked since they could not forward data from earlier (in program order) overlapping stores", 1857 - "MetricConstraint": "NO_GROUP_EVENTS_NMI", 1858 1871 "MetricExpr": "13 * LD_BLOCKS.STORE_FORWARD / tma_info_thread_clks", 1859 1872 "MetricGroup": "TopdownL4;tma_L4_group;tma_l1_bound_group", 1860 1873 "MetricName": "tma_store_fwd_blk",
+24 -10
tools/perf/tests/shell/daemon.sh
··· 414 414 # start daemon 415 415 daemon_start ${config} test 416 416 417 - # send 2 signals 418 - perf daemon signal --config ${config} --session test 419 - perf daemon signal --config ${config} 420 - 421 - # stop daemon 422 - daemon_exit ${config} 423 - 424 - # count is 2 perf.data for signals and 1 for perf record finished 425 - count=`ls ${base}/session-test/*perf.data* | wc -l` 426 - if [ ${count} -ne 3 ]; then 417 + # send 2 signals then exit. Do this in a loop watching the number of 418 + # files to avoid races. If the loop retries more than 600 times then 419 + # give up. 420 + local retries=0 421 + local signals=0 422 + local success=0 423 + while [ ${retries} -lt 600 ] && [ ${success} -eq 0 ]; do 424 + local files 425 + files=`ls ${base}/session-test/*perf.data* 2> /dev/null | wc -l` 426 + if [ ${signals} -eq 0 ]; then 427 + perf daemon signal --config ${config} --session test 428 + signals=1 429 + elif [ ${signals} -eq 1 ] && [ $files -ge 1 ]; then 430 + perf daemon signal --config ${config} 431 + signals=2 432 + elif [ ${signals} -eq 2 ] && [ $files -ge 2 ]; then 433 + daemon_exit ${config} 434 + signals=3 435 + elif [ ${signals} -eq 3 ] && [ $files -ge 3 ]; then 436 + success=1 437 + fi 438 + retries=$((${retries} +1)) 439 + done 440 + if [ ${success} -eq 0 ]; then 427 441 error=1 428 442 echo "FAILED: perf data no generated" 429 443 fi
+18 -3
tools/perf/tests/shell/list.sh
··· 3 3 # SPDX-License-Identifier: GPL-2.0 4 4 5 5 set -e 6 - err=0 7 6 8 7 shelldir=$(dirname "$0") 9 8 # shellcheck source=lib/setup_python.sh 10 9 . "${shelldir}"/lib/setup_python.sh 11 10 11 + list_output=$(mktemp /tmp/__perf_test.list_output.json.XXXXX) 12 + 13 + cleanup() { 14 + rm -f "${list_output}" 15 + 16 + trap - EXIT TERM INT 17 + } 18 + 19 + trap_cleanup() { 20 + cleanup 21 + exit 1 22 + } 23 + trap trap_cleanup EXIT TERM INT 24 + 12 25 test_list_json() { 13 26 echo "Json output test" 14 - perf list -j | $PYTHON -m json.tool 27 + perf list -j -o "${list_output}" 28 + $PYTHON -m json.tool "${list_output}" 15 29 echo "Json output test [Success]" 16 30 } 17 31 18 32 test_list_json 19 - exit $err 33 + cleanup 34 + exit 0
+9 -3
tools/perf/tests/shell/script.sh
··· 36 36 echo "DB test" 37 37 38 38 # Check if python script is supported 39 - libpython=$(perf version --build-options | grep python | grep -cv OFF) 40 - if [ "${libpython}" != "1" ] ; then 39 + if perf version --build-options | grep python | grep -q OFF ; then 41 40 echo "SKIP: python scripting is not supported" 42 41 err=2 43 42 return ··· 53 54 def call_path_table(*args): 54 55 print(f'call_path_table({args}') 55 56 _end_of_file_ 56 - perf record -g -o "${perfdatafile}" true 57 + case $(uname -m) 58 + in s390x) 59 + cmd_flags="--call-graph dwarf -e cpu-clock";; 60 + *) 61 + cmd_flags="-g";; 62 + esac 63 + 64 + perf record $cmd_flags -o "${perfdatafile}" true 57 65 perf script -i "${perfdatafile}" -s "${db_test}" 58 66 echo "DB test [Success]" 59 67 }
+1
tools/perf/trace/beauty/statx.c
··· 67 67 P_FLAG(BTIME); 68 68 P_FLAG(MNT_ID); 69 69 P_FLAG(DIOALIGN); 70 + P_FLAG(MNT_ID_UNIQUE); 70 71 71 72 #undef P_FLAG 72 73
+2 -2
tools/perf/util/hist.c
··· 491 491 } 492 492 493 493 if (symbol_conf.res_sample) { 494 - he->res_samples = calloc(sizeof(struct res_sample), 495 - symbol_conf.res_sample); 494 + he->res_samples = calloc(symbol_conf.res_sample, 495 + sizeof(struct res_sample)); 496 496 if (!he->res_samples) 497 497 goto err_srcline; 498 498 }
+4
tools/perf/util/include/linux/linkage.h
··· 115 115 SYM_ALIAS(alias, name, SYM_T_FUNC, SYM_L_WEAK) 116 116 #endif 117 117 118 + #ifndef SYM_FUNC_ALIAS_MEMFUNC 119 + #define SYM_FUNC_ALIAS_MEMFUNC SYM_FUNC_ALIAS 120 + #endif 121 + 118 122 // In the kernel sources (include/linux/cfi_types.h), this has a different 119 123 // definition when CONFIG_CFI_CLANG is used, for tools/ just use the !clang 120 124 // definition:
+1 -1
tools/perf/util/metricgroup.c
··· 286 286 *out_metric_events = NULL; 287 287 ids_size = hashmap__size(ids); 288 288 289 - metric_events = calloc(sizeof(void *), ids_size + 1); 289 + metric_events = calloc(ids_size + 1, sizeof(void *)); 290 290 if (!metric_events) 291 291 return -ENOMEM; 292 292
+1 -1
tools/perf/util/print-events.c
··· 66 66 67 67 put_tracing_file(events_path); 68 68 if (events_fd < 0) { 69 - printf("Error: failed to open tracing events directory\n"); 69 + pr_err("Error: failed to open tracing events directory\n"); 70 70 return; 71 71 } 72 72
+2 -2
tools/perf/util/synthetic-events.c
··· 1055 1055 if (thread_nr > n) 1056 1056 thread_nr = n; 1057 1057 1058 - synthesize_threads = calloc(sizeof(pthread_t), thread_nr); 1058 + synthesize_threads = calloc(thread_nr, sizeof(pthread_t)); 1059 1059 if (synthesize_threads == NULL) 1060 1060 goto free_dirent; 1061 1061 1062 - args = calloc(sizeof(*args), thread_nr); 1062 + args = calloc(thread_nr, sizeof(*args)); 1063 1063 if (args == NULL) 1064 1064 goto free_threads; 1065 1065
+4 -4
tools/testing/selftests/drivers/net/bonding/bond_options.sh
··· 162 162 local mode=$1 163 163 164 164 for primary_reselect in 0 1 2; do 165 - prio_test "mode active-backup arp_interval 100 arp_ip_target ${g_ip4} primary eth1 primary_reselect $primary_reselect" 165 + prio_test "mode $mode arp_interval 100 arp_ip_target ${g_ip4} primary eth1 primary_reselect $primary_reselect" 166 166 log_test "prio" "$mode arp_ip_target primary_reselect $primary_reselect" 167 167 done 168 168 } ··· 178 178 fi 179 179 180 180 for primary_reselect in 0 1 2; do 181 - prio_test "mode active-backup arp_interval 100 ns_ip6_target ${g_ip6} primary eth1 primary_reselect $primary_reselect" 181 + prio_test "mode $mode arp_interval 100 ns_ip6_target ${g_ip6} primary eth1 primary_reselect $primary_reselect" 182 182 log_test "prio" "$mode ns_ip6_target primary_reselect $primary_reselect" 183 183 done 184 184 } ··· 194 194 195 195 for mode in $modes; do 196 196 prio_miimon $mode 197 - prio_arp $mode 198 - prio_ns $mode 199 197 done 198 + prio_arp "active-backup" 199 + prio_ns "active-backup" 200 200 } 201 201 202 202 arp_validate_test()
+1 -1
tools/testing/selftests/drivers/net/bonding/settings
··· 1 - timeout=120 1 + timeout=1200
+9
tools/testing/selftests/drivers/net/netdevsim/udp_tunnel_nic.sh
··· 270 270 echo 1 > $NSIM_DEV_SYS/new_port 271 271 fi 272 272 NSIM_NETDEV=`get_netdev_name old_netdevs` 273 + ifconfig $NSIM_NETDEV up 273 274 274 275 msg="new NIC device created" 275 276 exp0=( 0 0 0 0 ) ··· 432 431 fi 433 432 434 433 echo $port > $NSIM_DEV_SYS/new_port 434 + NSIM_NETDEV=`get_netdev_name old_netdevs` 435 435 ifconfig $NSIM_NETDEV up 436 436 437 437 overflow_table0 "overflow NIC table" ··· 490 488 fi 491 489 492 490 echo $port > $NSIM_DEV_SYS/new_port 491 + NSIM_NETDEV=`get_netdev_name old_netdevs` 493 492 ifconfig $NSIM_NETDEV up 494 493 495 494 overflow_table0 "overflow NIC table" ··· 547 544 fi 548 545 549 546 echo $port > $NSIM_DEV_SYS/new_port 547 + NSIM_NETDEV=`get_netdev_name old_netdevs` 550 548 ifconfig $NSIM_NETDEV up 551 549 552 550 overflow_table0 "destroy NIC" ··· 577 573 fi 578 574 579 575 echo $port > $NSIM_DEV_SYS/new_port 576 + NSIM_NETDEV=`get_netdev_name old_netdevs` 580 577 ifconfig $NSIM_NETDEV up 581 578 582 579 msg="create VxLANs v6" ··· 638 633 fi 639 634 640 635 echo $port > $NSIM_DEV_SYS/new_port 636 + NSIM_NETDEV=`get_netdev_name old_netdevs` 641 637 ifconfig $NSIM_NETDEV up 642 638 643 639 echo 110 > $NSIM_DEV_DFS/ports/$port/udp_ports_inject_error ··· 694 688 fi 695 689 696 690 echo $port > $NSIM_DEV_SYS/new_port 691 + NSIM_NETDEV=`get_netdev_name old_netdevs` 697 692 ifconfig $NSIM_NETDEV up 698 693 699 694 msg="create VxLANs v6" ··· 754 747 fi 755 748 756 749 echo $port > $NSIM_DEV_SYS/new_port 750 + NSIM_NETDEV=`get_netdev_name old_netdevs` 757 751 ifconfig $NSIM_NETDEV up 758 752 759 753 msg="create VxLANs v6" ··· 885 877 886 878 echo 2 > $NSIM_DEV_SYS/del_port 887 879 echo 2 > $NSIM_DEV_SYS/new_port 880 + NSIM_NETDEV=`get_netdev_name old_netdevs` 888 881 check_tables 889 882 890 883 msg="replace VxLAN in overflow table"
+28
tools/testing/selftests/net/config
··· 1 1 CONFIG_USER_NS=y 2 2 CONFIG_NET_NS=y 3 + CONFIG_BONDING=m 3 4 CONFIG_BPF_SYSCALL=y 4 5 CONFIG_TEST_BPF=m 5 6 CONFIG_NUMA=y ··· 15 14 CONFIG_NET_IPVTI=y 16 15 CONFIG_IPV6_VTI=y 17 16 CONFIG_DUMMY=y 17 + CONFIG_BRIDGE_VLAN_FILTERING=y 18 18 CONFIG_BRIDGE=y 19 + CONFIG_CRYPTO_CHACHA20POLY1305=m 19 20 CONFIG_VLAN_8021Q=y 20 21 CONFIG_IFB=y 22 + CONFIG_INET_DIAG=y 23 + CONFIG_IP_GRE=m 21 24 CONFIG_NETFILTER=y 22 25 CONFIG_NETFILTER_ADVANCED=y 23 26 CONFIG_NF_CONNTRACK=m ··· 30 25 CONFIG_IP_NF_IPTABLES=m 31 26 CONFIG_IP6_NF_NAT=m 32 27 CONFIG_IP_NF_NAT=m 28 + CONFIG_IPV6_GRE=m 29 + CONFIG_IPV6_SEG6_LWTUNNEL=y 30 + CONFIG_L2TP_ETH=m 31 + CONFIG_L2TP_IP=m 32 + CONFIG_L2TP=m 33 + CONFIG_L2TP_V3=y 34 + CONFIG_MACSEC=m 35 + CONFIG_MACVLAN=y 36 + CONFIG_MACVTAP=y 37 + CONFIG_MPLS=y 38 + CONFIG_MPTCP=y 33 39 CONFIG_NF_TABLES=m 34 40 CONFIG_NF_TABLES_IPV6=y 35 41 CONFIG_NF_TABLES_IPV4=y 36 42 CONFIG_NFT_NAT=m 43 + CONFIG_NET_ACT_GACT=m 44 + CONFIG_NET_CLS_BASIC=m 45 + CONFIG_NET_CLS_U32=m 46 + CONFIG_NET_IPGRE_DEMUX=m 47 + CONFIG_NET_IPGRE=m 48 + CONFIG_NET_SCH_FQ_CODEL=m 49 + CONFIG_NET_SCH_HTB=m 37 50 CONFIG_NET_SCH_FQ=m 38 51 CONFIG_NET_SCH_ETF=m 39 52 CONFIG_NET_SCH_NETEM=y 53 + CONFIG_PSAMPLE=m 54 + CONFIG_TCP_MD5SIG=y 40 55 CONFIG_TEST_BLACKHOLE_DEV=m 41 56 CONFIG_KALLSYMS=y 57 + CONFIG_TLS=m 42 58 CONFIG_TRACEPOINTS=y 43 59 CONFIG_NET_DROP_MONITOR=m 44 60 CONFIG_NETDEVSIM=m ··· 74 48 CONFIG_IPV6_IOAM6_LWTUNNEL=y 75 49 CONFIG_CRYPTO_SM4_GENERIC=y 76 50 CONFIG_AMT=m 51 + CONFIG_TUN=y 77 52 CONFIG_VXLAN=m 78 53 CONFIG_IP_SCTP=m 79 54 CONFIG_NETFILTER_XT_MATCH_POLICY=m 80 55 CONFIG_CRYPTO_ARIA=y 56 + CONFIG_XFRM_INTERFACE=m
+5 -1
tools/testing/selftests/net/rps_default_mask.sh
··· 1 - #!/bin/sh 1 + #!/bin/bash 2 2 # SPDX-License-Identifier: GPL-2.0 3 3 4 4 readonly ksft_skip=4 ··· 33 33 34 34 rps_mask=$($cmd /sys/class/net/$dev_name/queues/rx-0/rps_cpus) 35 35 printf "%-60s" "$msg" 36 + 37 + # In case there is more than 32 CPUs we need to remove commas from masks 38 + rps_mask=${rps_mask//,} 39 + expected_rps_mask=${expected_rps_mask//,} 36 40 if [ $rps_mask -eq $expected_rps_mask ]; then 37 41 echo "[ ok ]" 38 42 else
+50 -18
tools/testing/selftests/net/so_incoming_cpu.c
··· 3 3 #define _GNU_SOURCE 4 4 #include <sched.h> 5 5 6 + #include <fcntl.h> 7 + 6 8 #include <netinet/in.h> 7 9 #include <sys/socket.h> 8 10 #include <sys/sysinfo.h> 9 11 10 12 #include "../kselftest_harness.h" 11 13 12 - #define CLIENT_PER_SERVER 32 /* More sockets, more reliable */ 13 - #define NR_SERVER self->nproc 14 - #define NR_CLIENT (CLIENT_PER_SERVER * NR_SERVER) 15 - 16 14 FIXTURE(so_incoming_cpu) 17 15 { 18 - int nproc; 19 16 int *servers; 20 17 union { 21 18 struct sockaddr addr; ··· 53 56 .when_to_set = AFTER_ALL_LISTEN, 54 57 }; 55 58 59 + static void write_sysctl(struct __test_metadata *_metadata, 60 + char *filename, char *string) 61 + { 62 + int fd, len, ret; 63 + 64 + fd = open(filename, O_WRONLY); 65 + ASSERT_NE(fd, -1); 66 + 67 + len = strlen(string); 68 + ret = write(fd, string, len); 69 + ASSERT_EQ(ret, len); 70 + } 71 + 72 + static void setup_netns(struct __test_metadata *_metadata) 73 + { 74 + ASSERT_EQ(unshare(CLONE_NEWNET), 0); 75 + ASSERT_EQ(system("ip link set lo up"), 0); 76 + 77 + write_sysctl(_metadata, "/proc/sys/net/ipv4/ip_local_port_range", "10000 60001"); 78 + write_sysctl(_metadata, "/proc/sys/net/ipv4/tcp_tw_reuse", "0"); 79 + } 80 + 81 + #define NR_PORT (60001 - 10000 - 1) 82 + #define NR_CLIENT_PER_SERVER_DEFAULT 32 83 + static int nr_client_per_server, nr_server, nr_client; 84 + 56 85 FIXTURE_SETUP(so_incoming_cpu) 57 86 { 58 - self->nproc = get_nprocs(); 59 - ASSERT_LE(2, self->nproc); 87 + setup_netns(_metadata); 60 88 61 - self->servers = malloc(sizeof(int) * NR_SERVER); 89 + nr_server = get_nprocs(); 90 + ASSERT_LE(2, nr_server); 91 + 92 + if (NR_CLIENT_PER_SERVER_DEFAULT * nr_server < NR_PORT) 93 + nr_client_per_server = NR_CLIENT_PER_SERVER_DEFAULT; 94 + else 95 + nr_client_per_server = NR_PORT / nr_server; 96 + 97 + nr_client = nr_client_per_server * nr_server; 98 + 99 + self->servers = malloc(sizeof(int) * nr_server); 62 100 ASSERT_NE(self->servers, NULL); 63 101 64 102 self->in_addr.sin_family = AF_INET; ··· 106 74 { 107 75 int i; 108 76 109 - for (i = 0; i < NR_SERVER; i++) 77 + for (i = 0; i < nr_server; i++) 110 78 close(self->servers[i]); 111 79 112 80 free(self->servers); ··· 142 110 if (variant->when_to_set == BEFORE_LISTEN) 143 111 set_so_incoming_cpu(_metadata, fd, cpu); 144 112 145 - /* We don't use CLIENT_PER_SERVER here not to block 113 + /* We don't use nr_client_per_server here not to block 146 114 * this test at connect() if SO_INCOMING_CPU is broken. 147 115 */ 148 - ret = listen(fd, NR_CLIENT); 116 + ret = listen(fd, nr_client); 149 117 ASSERT_EQ(ret, 0); 150 118 151 119 if (variant->when_to_set == AFTER_LISTEN) ··· 160 128 { 161 129 int i, ret; 162 130 163 - for (i = 0; i < NR_SERVER; i++) { 131 + for (i = 0; i < nr_server; i++) { 164 132 self->servers[i] = create_server(_metadata, self, variant, i); 165 133 166 134 if (i == 0) { ··· 170 138 } 171 139 172 140 if (variant->when_to_set == AFTER_ALL_LISTEN) { 173 - for (i = 0; i < NR_SERVER; i++) 141 + for (i = 0; i < nr_server; i++) 174 142 set_so_incoming_cpu(_metadata, self->servers[i], i); 175 143 } 176 144 } ··· 181 149 cpu_set_t cpu_set; 182 150 int i, j, fd, ret; 183 151 184 - for (i = 0; i < NR_SERVER; i++) { 152 + for (i = 0; i < nr_server; i++) { 185 153 CPU_ZERO(&cpu_set); 186 154 187 155 CPU_SET(i, &cpu_set); ··· 194 162 ret = sched_setaffinity(0, sizeof(cpu_set), &cpu_set); 195 163 ASSERT_EQ(ret, 0); 196 164 197 - for (j = 0; j < CLIENT_PER_SERVER; j++) { 165 + for (j = 0; j < nr_client_per_server; j++) { 198 166 fd = socket(AF_INET, SOCK_STREAM, 0); 199 167 ASSERT_NE(fd, -1); 200 168 ··· 212 180 int i, j, fd, cpu, ret, total = 0; 213 181 socklen_t len = sizeof(int); 214 182 215 - for (i = 0; i < NR_SERVER; i++) { 216 - for (j = 0; j < CLIENT_PER_SERVER; j++) { 183 + for (i = 0; i < nr_server; i++) { 184 + for (j = 0; j < nr_client_per_server; j++) { 217 185 /* If we see -EAGAIN here, SO_INCOMING_CPU is broken */ 218 186 fd = accept(self->servers[i], &self->addr, &self->addrlen); 219 187 ASSERT_NE(fd, -1); ··· 227 195 } 228 196 } 229 197 230 - ASSERT_EQ(total, NR_CLIENT); 198 + ASSERT_EQ(total, nr_client); 231 199 TH_LOG("SO_INCOMING_CPU is very likely to be " 232 200 "working correctly with %d sockets.", total); 233 201 }