Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

+595 -291
+2 -3
CREDITS
··· 2478 2478 S: Germany 2479 2479 2480 2480 N: Arnaldo Carvalho de Melo 2481 - E: acme@ghostprotocols.net 2481 + E: acme@kernel.org 2482 2482 E: arnaldo.melo@gmail.com 2483 2483 E: acme@redhat.com 2484 - W: http://oops.ghostprotocols.net:81/blog/ 2485 2484 P: 1024D/9224DF01 D5DF E3BB E3C8 BCBB F8AD 841A B6AB 4681 9224 DF01 2486 - D: IPX, LLC, DCCP, cyc2x, wl3501_cs, net/ hacks 2485 + D: tools/, IPX, LLC, DCCP, cyc2x, wl3501_cs, net/ hacks 2487 2486 S: Brazil 2488 2487 2489 2488 N: Karsten Merker
+17 -6
Documentation/media/uapi/v4l/pixfmt-007.rst
··· 211 211 The :ref:`srgb` standard defines the colorspace used by most webcams 212 212 and computer graphics. The default transfer function is 213 213 ``V4L2_XFER_FUNC_SRGB``. The default Y'CbCr encoding is 214 - ``V4L2_YCBCR_ENC_601``. The default Y'CbCr quantization is full range. 214 + ``V4L2_YCBCR_ENC_601``. The default Y'CbCr quantization is limited range. 215 + 216 + Note that the :ref:`sycc` standard specifies full range quantization, 217 + however all current capture hardware supported by the kernel convert 218 + R'G'B' to limited range Y'CbCr. So choosing full range as the default 219 + would break how applications interpret the quantization range. 220 + 215 221 The chromaticities of the primary colors and the white reference are: 216 222 217 223 ··· 282 276 283 277 Y' is clamped to the range [0…1] and Cb and Cr are clamped to the range 284 278 [-0.5…0.5]. This transform is identical to one defined in SMPTE 285 - 170M/BT.601. The Y'CbCr quantization is full range. 279 + 170M/BT.601. The Y'CbCr quantization is limited range. 286 280 287 281 288 282 .. _col-adobergb: ··· 294 288 graphics that use the AdobeRGB colorspace. This is also known as the 295 289 :ref:`oprgb` standard. The default transfer function is 296 290 ``V4L2_XFER_FUNC_ADOBERGB``. The default Y'CbCr encoding is 297 - ``V4L2_YCBCR_ENC_601``. The default Y'CbCr quantization is full 298 - range. The chromaticities of the primary colors and the white reference 299 - are: 291 + ``V4L2_YCBCR_ENC_601``. The default Y'CbCr quantization is limited 292 + range. 300 293 294 + Note that the :ref:`oprgb` standard specifies full range quantization, 295 + however all current capture hardware supported by the kernel convert 296 + R'G'B' to limited range Y'CbCr. So choosing full range as the default 297 + would break how applications interpret the quantization range. 298 + 299 + The chromaticities of the primary colors and the white reference are: 301 300 302 301 303 302 .. tabularcolumns:: |p{4.4cm}|p{4.4cm}|p{8.7cm}| ··· 355 344 356 345 Y' is clamped to the range [0…1] and Cb and Cr are clamped to the range 357 346 [-0.5…0.5]. This transform is identical to one defined in SMPTE 358 - 170M/BT.601. The Y'CbCr quantization is full range. 347 + 170M/BT.601. The Y'CbCr quantization is limited range. 359 348 360 349 361 350 .. _col-bt2020:
+6 -9
MAINTAINERS
··· 877 877 F: drivers/hwmon/applesmc.c 878 878 879 879 APPLETALK NETWORK LAYER 880 - M: Arnaldo Carvalho de Melo <acme@ghostprotocols.net> 881 - S: Maintained 880 + L: netdev@vger.kernel.org 881 + S: Odd fixes 882 882 F: drivers/net/appletalk/ 883 883 F: net/appletalk/ 884 884 ··· 6748 6748 F: drivers/tty/ipwireless/ 6749 6749 6750 6750 IPX NETWORK LAYER 6751 - M: Arnaldo Carvalho de Melo <acme@ghostprotocols.net> 6752 6751 L: netdev@vger.kernel.org 6753 - S: Maintained 6752 + S: Odd fixes 6754 6753 F: include/net/ipx.h 6755 6754 F: include/uapi/linux/ipx.h 6756 6755 F: net/ipx/ ··· 7521 7522 F: drivers/misc/lkdtm* 7522 7523 7523 7524 LLC (802.2) 7524 - M: Arnaldo Carvalho de Melo <acme@ghostprotocols.net> 7525 - S: Maintained 7525 + L: netdev@vger.kernel.org 7526 + S: Odd fixes 7526 7527 F: include/linux/llc.h 7527 7528 F: include/uapi/linux/llc.h 7528 7529 F: include/net/llc* ··· 13415 13416 F: drivers/input/misc/wistron_btns.c 13416 13417 13417 13418 WL3501 WIRELESS PCMCIA CARD DRIVER 13418 - M: Arnaldo Carvalho de Melo <acme@ghostprotocols.net> 13419 13419 L: linux-wireless@vger.kernel.org 13420 - W: http://oops.ghostprotocols.net:81/blog 13421 - S: Maintained 13420 + S: Odd fixes 13422 13421 F: drivers/net/wireless/wl3501* 13423 13422 13424 13423 WOLFSON MICROELECTRONICS DRIVERS
+1 -1
Makefile
··· 1 1 VERSION = 4 2 2 PATCHLEVEL = 10 3 3 SUBLEVEL = 0 4 - EXTRAVERSION = -rc7 4 + EXTRAVERSION = -rc8 5 5 NAME = Fearless Coyote 6 6 7 7 # *DOCUMENTATION*
+1
arch/x86/include/asm/processor.h
··· 104 104 __u8 x86_phys_bits; 105 105 /* CPUID returned core id bits: */ 106 106 __u8 x86_coreid_bits; 107 + __u8 cu_id; 107 108 /* Max extended CPUID function supported: */ 108 109 __u32 extended_cpuid_level; 109 110 /* Maximum supported CPUID level, -1=no CPUID: */
+15 -1
arch/x86/kernel/cpu/amd.c
··· 309 309 310 310 /* get information required for multi-node processors */ 311 311 if (boot_cpu_has(X86_FEATURE_TOPOEXT)) { 312 + u32 eax, ebx, ecx, edx; 312 313 313 - node_id = cpuid_ecx(0x8000001e) & 7; 314 + cpuid(0x8000001e, &eax, &ebx, &ecx, &edx); 315 + 316 + node_id = ecx & 0xff; 317 + smp_num_siblings = ((ebx >> 8) & 0xff) + 1; 318 + 319 + if (c->x86 == 0x15) 320 + c->cu_id = ebx & 0xff; 321 + 322 + if (c->x86 >= 0x17) { 323 + c->cpu_core_id = ebx & 0xff; 324 + 325 + if (smp_num_siblings > 1) 326 + c->x86_max_cores /= smp_num_siblings; 327 + } 314 328 315 329 /* 316 330 * We may have multiple LLCs if L3 caches exist, so check if we
+1
arch/x86/kernel/cpu/common.c
··· 1015 1015 c->x86_model_id[0] = '\0'; /* Unset */ 1016 1016 c->x86_max_cores = 1; 1017 1017 c->x86_coreid_bits = 0; 1018 + c->cu_id = 0xff; 1018 1019 #ifdef CONFIG_X86_64 1019 1020 c->x86_clflush_size = 64; 1020 1021 c->x86_phys_bits = 36;
+9 -3
arch/x86/kernel/smpboot.c
··· 433 433 int cpu1 = c->cpu_index, cpu2 = o->cpu_index; 434 434 435 435 if (c->phys_proc_id == o->phys_proc_id && 436 - per_cpu(cpu_llc_id, cpu1) == per_cpu(cpu_llc_id, cpu2) && 437 - c->cpu_core_id == o->cpu_core_id) 438 - return topology_sane(c, o, "smt"); 436 + per_cpu(cpu_llc_id, cpu1) == per_cpu(cpu_llc_id, cpu2)) { 437 + if (c->cpu_core_id == o->cpu_core_id) 438 + return topology_sane(c, o, "smt"); 439 + 440 + if ((c->cu_id != 0xff) && 441 + (o->cu_id != 0xff) && 442 + (c->cu_id == o->cu_id)) 443 + return topology_sane(c, o, "smt"); 444 + } 439 445 440 446 } else if (c->phys_proc_id == o->phys_proc_id && 441 447 c->cpu_core_id == o->cpu_core_id) {
+3 -2
arch/x86/kernel/tsc.c
··· 1356 1356 (unsigned long)cpu_khz / 1000, 1357 1357 (unsigned long)cpu_khz % 1000); 1358 1358 1359 + /* Sanitize TSC ADJUST before cyc2ns gets initialized */ 1360 + tsc_store_and_check_tsc_adjust(true); 1361 + 1359 1362 /* 1360 1363 * Secondary CPUs do not run through tsc_init(), so set up 1361 1364 * all the scale factors for all CPUs, assuming the same ··· 1389 1386 1390 1387 if (unsynchronized_tsc()) 1391 1388 mark_tsc_unstable("TSCs unsynchronized"); 1392 - else 1393 - tsc_store_and_check_tsc_adjust(true); 1394 1389 1395 1390 check_system_tsc_reliable(); 1396 1391
+7 -9
arch/x86/kernel/tsc_sync.c
··· 286 286 if (unsynchronized_tsc()) 287 287 return; 288 288 289 - if (tsc_clocksource_reliable) { 290 - if (cpu == (nr_cpu_ids-1) || system_state != SYSTEM_BOOTING) 291 - pr_info( 292 - "Skipped synchronization checks as TSC is reliable.\n"); 293 - return; 294 - } 295 - 296 289 /* 297 290 * Set the maximum number of test runs to 298 291 * 1 if the CPU does not provide the TSC_ADJUST MSR ··· 373 380 int cpus = 2; 374 381 375 382 /* Also aborts if there is no TSC. */ 376 - if (unsynchronized_tsc() || tsc_clocksource_reliable) 383 + if (unsynchronized_tsc()) 377 384 return; 378 385 379 386 /* 380 387 * Store, verify and sanitize the TSC adjust register. If 381 388 * successful skip the test. 389 + * 390 + * The test is also skipped when the TSC is marked reliable. This 391 + * is true for SoCs which have no fallback clocksource. On these 392 + * SoCs the TSC is frequency synchronized, but still the TSC ADJUST 393 + * register might have been wreckaged by the BIOS.. 382 394 */ 383 - if (tsc_store_and_check_tsc_adjust(false)) { 395 + if (tsc_store_and_check_tsc_adjust(false) || tsc_clocksource_reliable) { 384 396 atomic_inc(&skip_test); 385 397 return; 386 398 }
+2
arch/x86/mm/dump_pagetables.c
··· 15 15 #include <linux/debugfs.h> 16 16 #include <linux/mm.h> 17 17 #include <linux/init.h> 18 + #include <linux/sched.h> 18 19 #include <linux/seq_file.h> 19 20 20 21 #include <asm/pgtable.h> ··· 407 406 } else 408 407 note_page(m, &st, __pgprot(0), 1); 409 408 409 + cond_resched(); 410 410 start++; 411 411 } 412 412
+19 -9
drivers/irqchip/irq-keystone.c
··· 19 19 #include <linux/bitops.h> 20 20 #include <linux/module.h> 21 21 #include <linux/moduleparam.h> 22 + #include <linux/interrupt.h> 22 23 #include <linux/irqdomain.h> 23 24 #include <linux/irqchip.h> 24 - #include <linux/irqchip/chained_irq.h> 25 25 #include <linux/of.h> 26 26 #include <linux/of_platform.h> 27 27 #include <linux/mfd/syscon.h> ··· 39 39 struct irq_domain *irqd; 40 40 struct regmap *devctrl_regs; 41 41 u32 devctrl_offset; 42 + raw_spinlock_t wa_lock; 42 43 }; 43 44 44 45 static inline u32 keystone_irq_readl(struct keystone_irq_device *kirq) ··· 84 83 /* nothing to do here */ 85 84 } 86 85 87 - static void keystone_irq_handler(struct irq_desc *desc) 86 + static irqreturn_t keystone_irq_handler(int irq, void *keystone_irq) 88 87 { 89 - unsigned int irq = irq_desc_get_irq(desc); 90 - struct keystone_irq_device *kirq = irq_desc_get_handler_data(desc); 88 + struct keystone_irq_device *kirq = keystone_irq; 89 + unsigned long wa_lock_flags; 91 90 unsigned long pending; 92 91 int src, virq; 93 92 94 93 dev_dbg(kirq->dev, "start irq %d\n", irq); 95 - 96 - chained_irq_enter(irq_desc_get_chip(desc), desc); 97 94 98 95 pending = keystone_irq_readl(kirq); 99 96 keystone_irq_writel(kirq, pending); ··· 110 111 if (!virq) 111 112 dev_warn(kirq->dev, "spurious irq detected hwirq %d, virq %d\n", 112 113 src, virq); 114 + raw_spin_lock_irqsave(&kirq->wa_lock, wa_lock_flags); 113 115 generic_handle_irq(virq); 116 + raw_spin_unlock_irqrestore(&kirq->wa_lock, 117 + wa_lock_flags); 114 118 } 115 119 } 116 120 117 - chained_irq_exit(irq_desc_get_chip(desc), desc); 118 - 119 121 dev_dbg(kirq->dev, "end irq %d\n", irq); 122 + return IRQ_HANDLED; 120 123 } 121 124 122 125 static int keystone_irq_map(struct irq_domain *h, unsigned int virq, ··· 183 182 return -ENODEV; 184 183 } 185 184 185 + raw_spin_lock_init(&kirq->wa_lock); 186 + 186 187 platform_set_drvdata(pdev, kirq); 187 188 188 - irq_set_chained_handler_and_data(kirq->irq, keystone_irq_handler, kirq); 189 + ret = request_irq(kirq->irq, keystone_irq_handler, 190 + 0, dev_name(dev), kirq); 191 + if (ret) { 192 + irq_domain_remove(kirq->irqd); 193 + return ret; 194 + } 189 195 190 196 /* clear all source bits */ 191 197 keystone_irq_writel(kirq, ~0x0); ··· 206 198 { 207 199 struct keystone_irq_device *kirq = platform_get_drvdata(pdev); 208 200 int hwirq; 201 + 202 + free_irq(kirq->irq, kirq); 209 203 210 204 for (hwirq = 0; hwirq < KEYSTONE_N_IRQ; hwirq++) 211 205 irq_dispose_mapping(irq_find_mapping(kirq->irqd, hwirq));
+4
drivers/irqchip/irq-mxs.c
··· 131 131 .irq_ack = icoll_ack_irq, 132 132 .irq_mask = icoll_mask_irq, 133 133 .irq_unmask = icoll_unmask_irq, 134 + .flags = IRQCHIP_MASK_ON_SUSPEND | 135 + IRQCHIP_SKIP_SET_WAKE, 134 136 }; 135 137 136 138 static struct irq_chip asm9260_icoll_chip = { 137 139 .irq_ack = icoll_ack_irq, 138 140 .irq_mask = asm9260_mask_irq, 139 141 .irq_unmask = asm9260_unmask_irq, 142 + .flags = IRQCHIP_MASK_ON_SUSPEND | 143 + IRQCHIP_SKIP_SET_WAKE, 140 144 }; 141 145 142 146 asmlinkage void __exception_irq_entry icoll_handle_irq(struct pt_regs *regs)
+3 -4
drivers/media/cec/cec-adap.c
··· 612 612 } 613 613 memset(msg->msg + msg->len, 0, sizeof(msg->msg) - msg->len); 614 614 if (msg->len == 1) { 615 - if (cec_msg_initiator(msg) != 0xf || 616 - cec_msg_destination(msg) == 0xf) { 615 + if (cec_msg_destination(msg) == 0xf) { 617 616 dprintk(1, "cec_transmit_msg: invalid poll message\n"); 618 617 return -EINVAL; 619 618 } ··· 637 638 dprintk(1, "cec_transmit_msg: destination is the adapter itself\n"); 638 639 return -EINVAL; 639 640 } 640 - if (cec_msg_initiator(msg) != 0xf && 641 + if (msg->len > 1 && adap->is_configured && 641 642 !cec_has_log_addr(adap, cec_msg_initiator(msg))) { 642 643 dprintk(1, "cec_transmit_msg: initiator has unknown logical address %d\n", 643 644 cec_msg_initiator(msg)); ··· 1071 1072 1072 1073 /* Send poll message */ 1073 1074 msg.len = 1; 1074 - msg.msg[0] = 0xf0 | log_addr; 1075 + msg.msg[0] = (log_addr << 4) | log_addr; 1075 1076 err = cec_transmit_msg_fh(adap, &msg, NULL, true); 1076 1077 1077 1078 /*
+13 -5
drivers/media/usb/siano/smsusb.c
··· 218 218 static int smsusb_sendrequest(void *context, void *buffer, size_t size) 219 219 { 220 220 struct smsusb_device_t *dev = (struct smsusb_device_t *) context; 221 - struct sms_msg_hdr *phdr = (struct sms_msg_hdr *) buffer; 222 - int dummy; 221 + struct sms_msg_hdr *phdr; 222 + int dummy, ret; 223 223 224 224 if (dev->state != SMSUSB_ACTIVE) { 225 225 pr_debug("Device not active yet\n"); 226 226 return -ENOENT; 227 227 } 228 228 229 + phdr = kmalloc(size, GFP_KERNEL); 230 + if (!phdr) 231 + return -ENOMEM; 232 + memcpy(phdr, buffer, size); 233 + 229 234 pr_debug("sending %s(%d) size: %d\n", 230 235 smscore_translate_msg(phdr->msg_type), phdr->msg_type, 231 236 phdr->msg_length); 232 237 233 238 smsendian_handle_tx_message((struct sms_msg_data *) phdr); 234 - smsendian_handle_message_header((struct sms_msg_hdr *)buffer); 235 - return usb_bulk_msg(dev->udev, usb_sndbulkpipe(dev->udev, 2), 236 - buffer, size, &dummy, 1000); 239 + smsendian_handle_message_header((struct sms_msg_hdr *)phdr); 240 + ret = usb_bulk_msg(dev->udev, usb_sndbulkpipe(dev->udev, 2), 241 + phdr, size, &dummy, 1000); 242 + 243 + kfree(phdr); 244 + return ret; 237 245 } 238 246 239 247 static char *smsusb1_fw_lkup[] = {
+9 -14
drivers/net/ethernet/freescale/fec_main.c
··· 2910 2910 struct netdev_hw_addr *ha; 2911 2911 unsigned int i, bit, data, crc, tmp; 2912 2912 unsigned char hash; 2913 + unsigned int hash_high = 0, hash_low = 0; 2913 2914 2914 2915 if (ndev->flags & IFF_PROMISC) { 2915 2916 tmp = readl(fep->hwp + FEC_R_CNTRL); ··· 2933 2932 return; 2934 2933 } 2935 2934 2936 - /* Clear filter and add the addresses in hash register 2937 - */ 2938 - writel(0, fep->hwp + FEC_GRP_HASH_TABLE_HIGH); 2939 - writel(0, fep->hwp + FEC_GRP_HASH_TABLE_LOW); 2940 - 2935 + /* Add the addresses in hash register */ 2941 2936 netdev_for_each_mc_addr(ha, ndev) { 2942 2937 /* calculate crc32 value of mac address */ 2943 2938 crc = 0xffffffff; ··· 2951 2954 */ 2952 2955 hash = (crc >> (32 - FEC_HASH_BITS)) & 0x3f; 2953 2956 2954 - if (hash > 31) { 2955 - tmp = readl(fep->hwp + FEC_GRP_HASH_TABLE_HIGH); 2956 - tmp |= 1 << (hash - 32); 2957 - writel(tmp, fep->hwp + FEC_GRP_HASH_TABLE_HIGH); 2958 - } else { 2959 - tmp = readl(fep->hwp + FEC_GRP_HASH_TABLE_LOW); 2960 - tmp |= 1 << hash; 2961 - writel(tmp, fep->hwp + FEC_GRP_HASH_TABLE_LOW); 2962 - } 2957 + if (hash > 31) 2958 + hash_high |= 1 << (hash - 32); 2959 + else 2960 + hash_low |= 1 << hash; 2963 2961 } 2962 + 2963 + writel(hash_high, fep->hwp + FEC_GRP_HASH_TABLE_HIGH); 2964 + writel(hash_low, fep->hwp + FEC_GRP_HASH_TABLE_LOW); 2964 2965 } 2965 2966 2966 2967 /* Set a MAC change in hardware. */
+21 -22
drivers/net/ethernet/ibm/ibmvnic.c
··· 189 189 } 190 190 ltb->map_id = adapter->map_id; 191 191 adapter->map_id++; 192 + 193 + init_completion(&adapter->fw_done); 192 194 send_request_map(adapter, ltb->addr, 193 195 ltb->size, ltb->map_id); 194 - init_completion(&adapter->fw_done); 195 196 wait_for_completion(&adapter->fw_done); 196 197 return 0; 197 198 } ··· 506 505 adapter->rx_pool = NULL; 507 506 rx_pool_arr_alloc_failed: 508 507 for (i = 0; i < adapter->req_rx_queues; i++) 509 - napi_enable(&adapter->napi[i]); 508 + napi_disable(&adapter->napi[i]); 510 509 alloc_napi_failed: 511 510 return -ENOMEM; 512 511 } ··· 1127 1126 crq.request_statistics.ioba = cpu_to_be32(adapter->stats_token); 1128 1127 crq.request_statistics.len = 1129 1128 cpu_to_be32(sizeof(struct ibmvnic_statistics)); 1130 - ibmvnic_send_crq(adapter, &crq); 1131 1129 1132 1130 /* Wait for data to be written */ 1133 1131 init_completion(&adapter->stats_done); 1132 + ibmvnic_send_crq(adapter, &crq); 1134 1133 wait_for_completion(&adapter->stats_done); 1135 1134 1136 1135 for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++) ··· 1502 1501 adapter->req_rx_queues = adapter->opt_rx_comp_queues; 1503 1502 adapter->req_rx_add_queues = adapter->max_rx_add_queues; 1504 1503 1505 - adapter->req_mtu = adapter->max_mtu; 1504 + adapter->req_mtu = adapter->netdev->mtu + ETH_HLEN; 1506 1505 } 1507 1506 1508 1507 total_queues = adapter->req_tx_queues + adapter->req_rx_queues; ··· 2191 2190 2192 2191 if (!found) { 2193 2192 dev_err(dev, "Couldn't find error id %x\n", 2194 - crq->request_error_rsp.error_id); 2193 + be32_to_cpu(crq->request_error_rsp.error_id)); 2195 2194 return; 2196 2195 } 2197 2196 2198 2197 dev_err(dev, "Detailed info for error id %x:", 2199 - crq->request_error_rsp.error_id); 2198 + be32_to_cpu(crq->request_error_rsp.error_id)); 2200 2199 2201 2200 for (i = 0; i < error_buff->len; i++) { 2202 2201 pr_cont("%02x", (int)error_buff->buff[i]); ··· 2275 2274 dev_err(dev, "Firmware reports %serror id %x, cause %d\n", 2276 2275 crq->error_indication. 2277 2276 flags & IBMVNIC_FATAL_ERROR ? "FATAL " : "", 2278 - crq->error_indication.error_id, 2279 - crq->error_indication.error_cause); 2277 + be32_to_cpu(crq->error_indication.error_id), 2278 + be16_to_cpu(crq->error_indication.error_cause)); 2280 2279 2281 2280 error_buff = kmalloc(sizeof(*error_buff), GFP_ATOMIC); 2282 2281 if (!error_buff) ··· 2394 2393 case PARTIALSUCCESS: 2395 2394 dev_info(dev, "req=%lld, rsp=%ld in %s queue, retrying.\n", 2396 2395 *req_value, 2397 - (long int)be32_to_cpu(crq->request_capability_rsp. 2396 + (long int)be64_to_cpu(crq->request_capability_rsp. 2398 2397 number), name); 2399 2398 release_sub_crqs_no_irqs(adapter); 2400 - *req_value = be32_to_cpu(crq->request_capability_rsp.number); 2399 + *req_value = be64_to_cpu(crq->request_capability_rsp.number); 2401 2400 init_sub_crqs(adapter, 1); 2402 2401 return; 2403 2402 default: ··· 2632 2631 break; 2633 2632 case MIN_MTU: 2634 2633 adapter->min_mtu = be64_to_cpu(crq->query_capability.number); 2635 - netdev->min_mtu = adapter->min_mtu; 2634 + netdev->min_mtu = adapter->min_mtu - ETH_HLEN; 2636 2635 netdev_dbg(netdev, "min_mtu = %lld\n", adapter->min_mtu); 2637 2636 break; 2638 2637 case MAX_MTU: 2639 2638 adapter->max_mtu = be64_to_cpu(crq->query_capability.number); 2640 - netdev->max_mtu = adapter->max_mtu; 2639 + netdev->max_mtu = adapter->max_mtu - ETH_HLEN; 2641 2640 netdev_dbg(netdev, "max_mtu = %lld\n", adapter->max_mtu); 2642 2641 break; 2643 2642 case MAX_MULTICAST_FILTERS: ··· 2805 2804 crq.collect_fw_trace.correlator = adapter->ras_comps[num].correlator; 2806 2805 crq.collect_fw_trace.ioba = cpu_to_be32(trace_tok); 2807 2806 crq.collect_fw_trace.len = adapter->ras_comps[num].trace_buff_size; 2808 - ibmvnic_send_crq(adapter, &crq); 2809 2807 2810 2808 init_completion(&adapter->fw_done); 2809 + ibmvnic_send_crq(adapter, &crq); 2811 2810 wait_for_completion(&adapter->fw_done); 2812 2811 2813 2812 if (*ppos + len > be32_to_cpu(adapter->ras_comps[num].trace_buff_size)) ··· 3587 3586 memset(&crq, 0, sizeof(crq)); 3588 3587 crq.request_dump_size.first = IBMVNIC_CRQ_CMD; 3589 3588 crq.request_dump_size.cmd = REQUEST_DUMP_SIZE; 3590 - ibmvnic_send_crq(adapter, &crq); 3591 3589 3592 3590 init_completion(&adapter->fw_done); 3591 + ibmvnic_send_crq(adapter, &crq); 3593 3592 wait_for_completion(&adapter->fw_done); 3594 3593 3595 3594 seq_write(seq, adapter->dump_data, adapter->dump_data_size); ··· 3635 3634 } 3636 3635 } 3637 3636 3638 - send_version_xchg(adapter); 3639 3637 reinit_completion(&adapter->init_done); 3638 + send_version_xchg(adapter); 3640 3639 if (!wait_for_completion_timeout(&adapter->init_done, timeout)) { 3641 3640 dev_err(dev, "Passive init timeout\n"); 3642 3641 goto task_failed; ··· 3646 3645 if (adapter->renegotiate) { 3647 3646 adapter->renegotiate = false; 3648 3647 release_sub_crqs_no_irqs(adapter); 3649 - send_cap_queries(adapter); 3650 3648 3651 3649 reinit_completion(&adapter->init_done); 3650 + send_cap_queries(adapter); 3652 3651 if (!wait_for_completion_timeout(&adapter->init_done, 3653 3652 timeout)) { 3654 3653 dev_err(dev, "Passive init timeout\n"); ··· 3662 3661 goto task_failed; 3663 3662 3664 3663 netdev->real_num_tx_queues = adapter->req_tx_queues; 3665 - netdev->mtu = adapter->req_mtu; 3666 - netdev->min_mtu = adapter->min_mtu; 3667 - netdev->max_mtu = adapter->max_mtu; 3664 + netdev->mtu = adapter->req_mtu - ETH_HLEN; 3668 3665 3669 3666 if (adapter->failover) { 3670 3667 adapter->failover = false; ··· 3776 3777 adapter->debugfs_dump = ent; 3777 3778 } 3778 3779 } 3779 - ibmvnic_send_crq_init(adapter); 3780 3780 3781 3781 init_completion(&adapter->init_done); 3782 + ibmvnic_send_crq_init(adapter); 3782 3783 if (!wait_for_completion_timeout(&adapter->init_done, timeout)) 3783 3784 return 0; 3784 3785 ··· 3786 3787 if (adapter->renegotiate) { 3787 3788 adapter->renegotiate = false; 3788 3789 release_sub_crqs_no_irqs(adapter); 3789 - send_cap_queries(adapter); 3790 3790 3791 3791 reinit_completion(&adapter->init_done); 3792 + send_cap_queries(adapter); 3792 3793 if (!wait_for_completion_timeout(&adapter->init_done, 3793 3794 timeout)) 3794 3795 return 0; ··· 3802 3803 } 3803 3804 3804 3805 netdev->real_num_tx_queues = adapter->req_tx_queues; 3805 - netdev->mtu = adapter->req_mtu; 3806 + netdev->mtu = adapter->req_mtu - ETH_HLEN; 3806 3807 3807 3808 rc = register_netdev(netdev); 3808 3809 if (rc) {
+4
drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
··· 1240 1240 1241 1241 mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse); 1242 1242 1243 + preempt_disable(); 1244 + 1243 1245 tcf_exts_to_list(f->exts, &actions); 1244 1246 list_for_each_entry(a, &actions, list) 1245 1247 tcf_action_stats_update(a, bytes, packets, lastuse); 1248 + 1249 + preempt_enable(); 1246 1250 1247 1251 return 0; 1248 1252 }
+1 -1
drivers/net/ethernet/ti/cpsw.c
··· 3207 3207 { 3208 3208 struct platform_device *pdev = to_platform_device(dev); 3209 3209 struct net_device *ndev = platform_get_drvdata(pdev); 3210 - struct cpsw_common *cpsw = netdev_priv(ndev); 3210 + struct cpsw_common *cpsw = ndev_to_cpsw(ndev); 3211 3211 3212 3212 /* Select default pin state */ 3213 3213 pinctrl_pm_select_default_state(dev);
+69 -57
drivers/net/ethernet/xilinx/xilinx_emaclite.c
··· 100 100 /* BUFFER_ALIGN(adr) calculates the number of bytes to the next alignment. */ 101 101 #define BUFFER_ALIGN(adr) ((ALIGNMENT - ((u32) adr)) % ALIGNMENT) 102 102 103 + #ifdef __BIG_ENDIAN 104 + #define xemaclite_readl ioread32be 105 + #define xemaclite_writel iowrite32be 106 + #else 107 + #define xemaclite_readl ioread32 108 + #define xemaclite_writel iowrite32 109 + #endif 110 + 103 111 /** 104 112 * struct net_local - Our private per device data 105 113 * @ndev: instance of the network device ··· 164 156 u32 reg_data; 165 157 166 158 /* Enable the Tx interrupts for the first Buffer */ 167 - reg_data = __raw_readl(drvdata->base_addr + XEL_TSR_OFFSET); 168 - __raw_writel(reg_data | XEL_TSR_XMIT_IE_MASK, 169 - drvdata->base_addr + XEL_TSR_OFFSET); 159 + reg_data = xemaclite_readl(drvdata->base_addr + XEL_TSR_OFFSET); 160 + xemaclite_writel(reg_data | XEL_TSR_XMIT_IE_MASK, 161 + drvdata->base_addr + XEL_TSR_OFFSET); 170 162 171 163 /* Enable the Rx interrupts for the first buffer */ 172 - __raw_writel(XEL_RSR_RECV_IE_MASK, drvdata->base_addr + XEL_RSR_OFFSET); 164 + xemaclite_writel(XEL_RSR_RECV_IE_MASK, drvdata->base_addr + XEL_RSR_OFFSET); 173 165 174 166 /* Enable the Global Interrupt Enable */ 175 - __raw_writel(XEL_GIER_GIE_MASK, drvdata->base_addr + XEL_GIER_OFFSET); 167 + xemaclite_writel(XEL_GIER_GIE_MASK, drvdata->base_addr + XEL_GIER_OFFSET); 176 168 } 177 169 178 170 /** ··· 187 179 u32 reg_data; 188 180 189 181 /* Disable the Global Interrupt Enable */ 190 - __raw_writel(XEL_GIER_GIE_MASK, drvdata->base_addr + XEL_GIER_OFFSET); 182 + xemaclite_writel(XEL_GIER_GIE_MASK, drvdata->base_addr + XEL_GIER_OFFSET); 191 183 192 184 /* Disable the Tx interrupts for the first buffer */ 193 - reg_data = __raw_readl(drvdata->base_addr + XEL_TSR_OFFSET); 194 - __raw_writel(reg_data & (~XEL_TSR_XMIT_IE_MASK), 195 - drvdata->base_addr + XEL_TSR_OFFSET); 185 + reg_data = xemaclite_readl(drvdata->base_addr + XEL_TSR_OFFSET); 186 + xemaclite_writel(reg_data & (~XEL_TSR_XMIT_IE_MASK), 187 + drvdata->base_addr + XEL_TSR_OFFSET); 196 188 197 189 /* Disable the Rx interrupts for the first buffer */ 198 - reg_data = __raw_readl(drvdata->base_addr + XEL_RSR_OFFSET); 199 - __raw_writel(reg_data & (~XEL_RSR_RECV_IE_MASK), 200 - drvdata->base_addr + XEL_RSR_OFFSET); 190 + reg_data = xemaclite_readl(drvdata->base_addr + XEL_RSR_OFFSET); 191 + xemaclite_writel(reg_data & (~XEL_RSR_RECV_IE_MASK), 192 + drvdata->base_addr + XEL_RSR_OFFSET); 201 193 } 202 194 203 195 /** ··· 329 321 byte_count = ETH_FRAME_LEN; 330 322 331 323 /* Check if the expected buffer is available */ 332 - reg_data = __raw_readl(addr + XEL_TSR_OFFSET); 324 + reg_data = xemaclite_readl(addr + XEL_TSR_OFFSET); 333 325 if ((reg_data & (XEL_TSR_XMIT_BUSY_MASK | 334 326 XEL_TSR_XMIT_ACTIVE_MASK)) == 0) { 335 327 ··· 342 334 343 335 addr = (void __iomem __force *)((u32 __force)addr ^ 344 336 XEL_BUFFER_OFFSET); 345 - reg_data = __raw_readl(addr + XEL_TSR_OFFSET); 337 + reg_data = xemaclite_readl(addr + XEL_TSR_OFFSET); 346 338 347 339 if ((reg_data & (XEL_TSR_XMIT_BUSY_MASK | 348 340 XEL_TSR_XMIT_ACTIVE_MASK)) != 0) ··· 353 345 /* Write the frame to the buffer */ 354 346 xemaclite_aligned_write(data, (u32 __force *) addr, byte_count); 355 347 356 - __raw_writel((byte_count & XEL_TPLR_LENGTH_MASK), 357 - addr + XEL_TPLR_OFFSET); 348 + xemaclite_writel((byte_count & XEL_TPLR_LENGTH_MASK), 349 + addr + XEL_TPLR_OFFSET); 358 350 359 351 /* Update the Tx Status Register to indicate that there is a 360 352 * frame to send. Set the XEL_TSR_XMIT_ACTIVE_MASK flag which 361 353 * is used by the interrupt handler to check whether a frame 362 354 * has been transmitted */ 363 - reg_data = __raw_readl(addr + XEL_TSR_OFFSET); 355 + reg_data = xemaclite_readl(addr + XEL_TSR_OFFSET); 364 356 reg_data |= (XEL_TSR_XMIT_BUSY_MASK | XEL_TSR_XMIT_ACTIVE_MASK); 365 - __raw_writel(reg_data, addr + XEL_TSR_OFFSET); 357 + xemaclite_writel(reg_data, addr + XEL_TSR_OFFSET); 366 358 367 359 return 0; 368 360 } ··· 377 369 * 378 370 * Return: Total number of bytes received 379 371 */ 380 - static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data) 372 + static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data, int maxlen) 381 373 { 382 374 void __iomem *addr; 383 375 u16 length, proto_type; ··· 387 379 addr = (drvdata->base_addr + drvdata->next_rx_buf_to_use); 388 380 389 381 /* Verify which buffer has valid data */ 390 - reg_data = __raw_readl(addr + XEL_RSR_OFFSET); 382 + reg_data = xemaclite_readl(addr + XEL_RSR_OFFSET); 391 383 392 384 if ((reg_data & XEL_RSR_RECV_DONE_MASK) == XEL_RSR_RECV_DONE_MASK) { 393 385 if (drvdata->rx_ping_pong != 0) ··· 404 396 return 0; /* No data was available */ 405 397 406 398 /* Verify that buffer has valid data */ 407 - reg_data = __raw_readl(addr + XEL_RSR_OFFSET); 399 + reg_data = xemaclite_readl(addr + XEL_RSR_OFFSET); 408 400 if ((reg_data & XEL_RSR_RECV_DONE_MASK) != 409 401 XEL_RSR_RECV_DONE_MASK) 410 402 return 0; /* No data was available */ 411 403 } 412 404 413 405 /* Get the protocol type of the ethernet frame that arrived */ 414 - proto_type = ((ntohl(__raw_readl(addr + XEL_HEADER_OFFSET + 406 + proto_type = ((ntohl(xemaclite_readl(addr + XEL_HEADER_OFFSET + 415 407 XEL_RXBUFF_OFFSET)) >> XEL_HEADER_SHIFT) & 416 408 XEL_RPLR_LENGTH_MASK); 417 409 418 410 /* Check if received ethernet frame is a raw ethernet frame 419 411 * or an IP packet or an ARP packet */ 420 - if (proto_type > (ETH_FRAME_LEN + ETH_FCS_LEN)) { 412 + if (proto_type > ETH_DATA_LEN) { 421 413 422 414 if (proto_type == ETH_P_IP) { 423 - length = ((ntohl(__raw_readl(addr + 415 + length = ((ntohl(xemaclite_readl(addr + 424 416 XEL_HEADER_IP_LENGTH_OFFSET + 425 417 XEL_RXBUFF_OFFSET)) >> 426 418 XEL_HEADER_SHIFT) & 427 419 XEL_RPLR_LENGTH_MASK); 420 + length = min_t(u16, length, ETH_DATA_LEN); 428 421 length += ETH_HLEN + ETH_FCS_LEN; 429 422 430 423 } else if (proto_type == ETH_P_ARP) ··· 438 429 /* Use the length in the frame, plus the header and trailer */ 439 430 length = proto_type + ETH_HLEN + ETH_FCS_LEN; 440 431 432 + if (WARN_ON(length > maxlen)) 433 + length = maxlen; 434 + 441 435 /* Read from the EmacLite device */ 442 436 xemaclite_aligned_read((u32 __force *) (addr + XEL_RXBUFF_OFFSET), 443 437 data, length); 444 438 445 439 /* Acknowledge the frame */ 446 - reg_data = __raw_readl(addr + XEL_RSR_OFFSET); 440 + reg_data = xemaclite_readl(addr + XEL_RSR_OFFSET); 447 441 reg_data &= ~XEL_RSR_RECV_DONE_MASK; 448 - __raw_writel(reg_data, addr + XEL_RSR_OFFSET); 442 + xemaclite_writel(reg_data, addr + XEL_RSR_OFFSET); 449 443 450 444 return length; 451 445 } ··· 475 463 476 464 xemaclite_aligned_write(address_ptr, (u32 __force *) addr, ETH_ALEN); 477 465 478 - __raw_writel(ETH_ALEN, addr + XEL_TPLR_OFFSET); 466 + xemaclite_writel(ETH_ALEN, addr + XEL_TPLR_OFFSET); 479 467 480 468 /* Update the MAC address in the EmacLite */ 481 - reg_data = __raw_readl(addr + XEL_TSR_OFFSET); 482 - __raw_writel(reg_data | XEL_TSR_PROG_MAC_ADDR, addr + XEL_TSR_OFFSET); 469 + reg_data = xemaclite_readl(addr + XEL_TSR_OFFSET); 470 + xemaclite_writel(reg_data | XEL_TSR_PROG_MAC_ADDR, addr + XEL_TSR_OFFSET); 483 471 484 472 /* Wait for EmacLite to finish with the MAC address update */ 485 - while ((__raw_readl(addr + XEL_TSR_OFFSET) & 473 + while ((xemaclite_readl(addr + XEL_TSR_OFFSET) & 486 474 XEL_TSR_PROG_MAC_ADDR) != 0) 487 475 ; 488 476 } ··· 615 603 616 604 skb_reserve(skb, 2); 617 605 618 - len = xemaclite_recv_data(lp, (u8 *) skb->data); 606 + len = xemaclite_recv_data(lp, (u8 *) skb->data, len); 619 607 620 608 if (!len) { 621 609 dev->stats.rx_errors++; ··· 652 640 u32 tx_status; 653 641 654 642 /* Check if there is Rx Data available */ 655 - if ((__raw_readl(base_addr + XEL_RSR_OFFSET) & 643 + if ((xemaclite_readl(base_addr + XEL_RSR_OFFSET) & 656 644 XEL_RSR_RECV_DONE_MASK) || 657 - (__raw_readl(base_addr + XEL_BUFFER_OFFSET + XEL_RSR_OFFSET) 645 + (xemaclite_readl(base_addr + XEL_BUFFER_OFFSET + XEL_RSR_OFFSET) 658 646 & XEL_RSR_RECV_DONE_MASK)) 659 647 660 648 xemaclite_rx_handler(dev); 661 649 662 650 /* Check if the Transmission for the first buffer is completed */ 663 - tx_status = __raw_readl(base_addr + XEL_TSR_OFFSET); 651 + tx_status = xemaclite_readl(base_addr + XEL_TSR_OFFSET); 664 652 if (((tx_status & XEL_TSR_XMIT_BUSY_MASK) == 0) && 665 653 (tx_status & XEL_TSR_XMIT_ACTIVE_MASK) != 0) { 666 654 667 655 tx_status &= ~XEL_TSR_XMIT_ACTIVE_MASK; 668 - __raw_writel(tx_status, base_addr + XEL_TSR_OFFSET); 656 + xemaclite_writel(tx_status, base_addr + XEL_TSR_OFFSET); 669 657 670 658 tx_complete = true; 671 659 } 672 660 673 661 /* Check if the Transmission for the second buffer is completed */ 674 - tx_status = __raw_readl(base_addr + XEL_BUFFER_OFFSET + XEL_TSR_OFFSET); 662 + tx_status = xemaclite_readl(base_addr + XEL_BUFFER_OFFSET + XEL_TSR_OFFSET); 675 663 if (((tx_status & XEL_TSR_XMIT_BUSY_MASK) == 0) && 676 664 (tx_status & XEL_TSR_XMIT_ACTIVE_MASK) != 0) { 677 665 678 666 tx_status &= ~XEL_TSR_XMIT_ACTIVE_MASK; 679 - __raw_writel(tx_status, base_addr + XEL_BUFFER_OFFSET + 680 - XEL_TSR_OFFSET); 667 + xemaclite_writel(tx_status, base_addr + XEL_BUFFER_OFFSET + 668 + XEL_TSR_OFFSET); 681 669 682 670 tx_complete = true; 683 671 } ··· 710 698 /* wait for the MDIO interface to not be busy or timeout 711 699 after some time. 712 700 */ 713 - while (__raw_readl(lp->base_addr + XEL_MDIOCTRL_OFFSET) & 701 + while (xemaclite_readl(lp->base_addr + XEL_MDIOCTRL_OFFSET) & 714 702 XEL_MDIOCTRL_MDIOSTS_MASK) { 715 703 if (time_before_eq(end, jiffies)) { 716 704 WARN_ON(1); ··· 746 734 * MDIO Address register. Set the Status bit in the MDIO Control 747 735 * register to start a MDIO read transaction. 748 736 */ 749 - ctrl_reg = __raw_readl(lp->base_addr + XEL_MDIOCTRL_OFFSET); 750 - __raw_writel(XEL_MDIOADDR_OP_MASK | 751 - ((phy_id << XEL_MDIOADDR_PHYADR_SHIFT) | reg), 752 - lp->base_addr + XEL_MDIOADDR_OFFSET); 753 - __raw_writel(ctrl_reg | XEL_MDIOCTRL_MDIOSTS_MASK, 754 - lp->base_addr + XEL_MDIOCTRL_OFFSET); 737 + ctrl_reg = xemaclite_readl(lp->base_addr + XEL_MDIOCTRL_OFFSET); 738 + xemaclite_writel(XEL_MDIOADDR_OP_MASK | 739 + ((phy_id << XEL_MDIOADDR_PHYADR_SHIFT) | reg), 740 + lp->base_addr + XEL_MDIOADDR_OFFSET); 741 + xemaclite_writel(ctrl_reg | XEL_MDIOCTRL_MDIOSTS_MASK, 742 + lp->base_addr + XEL_MDIOCTRL_OFFSET); 755 743 756 744 if (xemaclite_mdio_wait(lp)) 757 745 return -ETIMEDOUT; 758 746 759 - rc = __raw_readl(lp->base_addr + XEL_MDIORD_OFFSET); 747 + rc = xemaclite_readl(lp->base_addr + XEL_MDIORD_OFFSET); 760 748 761 749 dev_dbg(&lp->ndev->dev, 762 750 "xemaclite_mdio_read(phy_id=%i, reg=%x) == %x\n", ··· 793 781 * Data register. Finally, set the Status bit in the MDIO Control 794 782 * register to start a MDIO write transaction. 795 783 */ 796 - ctrl_reg = __raw_readl(lp->base_addr + XEL_MDIOCTRL_OFFSET); 797 - __raw_writel(~XEL_MDIOADDR_OP_MASK & 798 - ((phy_id << XEL_MDIOADDR_PHYADR_SHIFT) | reg), 799 - lp->base_addr + XEL_MDIOADDR_OFFSET); 800 - __raw_writel(val, lp->base_addr + XEL_MDIOWR_OFFSET); 801 - __raw_writel(ctrl_reg | XEL_MDIOCTRL_MDIOSTS_MASK, 802 - lp->base_addr + XEL_MDIOCTRL_OFFSET); 784 + ctrl_reg = xemaclite_readl(lp->base_addr + XEL_MDIOCTRL_OFFSET); 785 + xemaclite_writel(~XEL_MDIOADDR_OP_MASK & 786 + ((phy_id << XEL_MDIOADDR_PHYADR_SHIFT) | reg), 787 + lp->base_addr + XEL_MDIOADDR_OFFSET); 788 + xemaclite_writel(val, lp->base_addr + XEL_MDIOWR_OFFSET); 789 + xemaclite_writel(ctrl_reg | XEL_MDIOCTRL_MDIOSTS_MASK, 790 + lp->base_addr + XEL_MDIOCTRL_OFFSET); 803 791 804 792 return 0; 805 793 } ··· 846 834 /* Enable the MDIO bus by asserting the enable bit in MDIO Control 847 835 * register. 848 836 */ 849 - __raw_writel(XEL_MDIOCTRL_MDIOEN_MASK, 850 - lp->base_addr + XEL_MDIOCTRL_OFFSET); 837 + xemaclite_writel(XEL_MDIOCTRL_MDIOEN_MASK, 838 + lp->base_addr + XEL_MDIOCTRL_OFFSET); 851 839 852 840 bus = mdiobus_alloc(); 853 841 if (!bus) { ··· 1138 1126 } 1139 1127 1140 1128 /* Clear the Tx CSR's in case this is a restart */ 1141 - __raw_writel(0, lp->base_addr + XEL_TSR_OFFSET); 1142 - __raw_writel(0, lp->base_addr + XEL_BUFFER_OFFSET + XEL_TSR_OFFSET); 1129 + xemaclite_writel(0, lp->base_addr + XEL_TSR_OFFSET); 1130 + xemaclite_writel(0, lp->base_addr + XEL_BUFFER_OFFSET + XEL_TSR_OFFSET); 1143 1131 1144 1132 /* Set the MAC address in the EmacLite device */ 1145 1133 xemaclite_update_address(lp, ndev->dev_addr);
+4 -4
drivers/net/xen-netback/common.h
··· 113 113 * A subset of struct net_device_stats that contains only the 114 114 * fields that are updated in netback.c for each queue. 115 115 */ 116 - unsigned int rx_bytes; 117 - unsigned int rx_packets; 118 - unsigned int tx_bytes; 119 - unsigned int tx_packets; 116 + u64 rx_bytes; 117 + u64 rx_packets; 118 + u64 tx_bytes; 119 + u64 tx_packets; 120 120 121 121 /* Additional stats used by xenvif */ 122 122 unsigned long rx_gso_checksum_fixup;
+4 -4
drivers/net/xen-netback/interface.c
··· 221 221 { 222 222 struct xenvif *vif = netdev_priv(dev); 223 223 struct xenvif_queue *queue = NULL; 224 - unsigned long rx_bytes = 0; 225 - unsigned long rx_packets = 0; 226 - unsigned long tx_bytes = 0; 227 - unsigned long tx_packets = 0; 224 + u64 rx_bytes = 0; 225 + u64 rx_packets = 0; 226 + u64 tx_bytes = 0; 227 + u64 tx_packets = 0; 228 228 unsigned int index; 229 229 230 230 spin_lock(&vif->lock);
+12
drivers/pci/pcie/pme.c
··· 433 433 return 0; 434 434 } 435 435 436 + /** 437 + * pcie_pme_remove - Prepare PCIe PME service device for removal. 438 + * @srv - PCIe service device to remove. 439 + */ 440 + static void pcie_pme_remove(struct pcie_device *srv) 441 + { 442 + pcie_pme_suspend(srv); 443 + free_irq(srv->irq, srv); 444 + kfree(get_service_data(srv)); 445 + } 446 + 436 447 static struct pcie_port_service_driver pcie_pme_driver = { 437 448 .name = "pcie_pme", 438 449 .port_type = PCI_EXP_TYPE_ROOT_PORT, ··· 452 441 .probe = pcie_pme_probe, 453 442 .suspend = pcie_pme_suspend, 454 443 .resume = pcie_pme_resume, 444 + .remove = pcie_pme_remove, 455 445 }; 456 446 457 447 /**
+4 -4
drivers/s390/scsi/zfcp_fsf.c
··· 1583 1583 int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *wka_port) 1584 1584 { 1585 1585 struct zfcp_qdio *qdio = wka_port->adapter->qdio; 1586 - struct zfcp_fsf_req *req = NULL; 1586 + struct zfcp_fsf_req *req; 1587 1587 int retval = -EIO; 1588 1588 1589 1589 spin_lock_irq(&qdio->req_q_lock); ··· 1612 1612 zfcp_fsf_req_free(req); 1613 1613 out: 1614 1614 spin_unlock_irq(&qdio->req_q_lock); 1615 - if (req && !IS_ERR(req)) 1615 + if (!retval) 1616 1616 zfcp_dbf_rec_run_wka("fsowp_1", wka_port, req->req_id); 1617 1617 return retval; 1618 1618 } ··· 1638 1638 int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *wka_port) 1639 1639 { 1640 1640 struct zfcp_qdio *qdio = wka_port->adapter->qdio; 1641 - struct zfcp_fsf_req *req = NULL; 1641 + struct zfcp_fsf_req *req; 1642 1642 int retval = -EIO; 1643 1643 1644 1644 spin_lock_irq(&qdio->req_q_lock); ··· 1667 1667 zfcp_fsf_req_free(req); 1668 1668 out: 1669 1669 spin_unlock_irq(&qdio->req_q_lock); 1670 - if (req && !IS_ERR(req)) 1670 + if (!retval) 1671 1671 zfcp_dbf_rec_run_wka("fscwp_1", wka_port, req->req_id); 1672 1672 return retval; 1673 1673 }
+6 -2
drivers/scsi/aacraid/comminit.c
··· 50 50 51 51 static inline int aac_is_msix_mode(struct aac_dev *dev) 52 52 { 53 - u32 status; 53 + u32 status = 0; 54 54 55 - status = src_readl(dev, MUnit.OMR); 55 + if (dev->pdev->device == PMC_DEVICE_S6 || 56 + dev->pdev->device == PMC_DEVICE_S7 || 57 + dev->pdev->device == PMC_DEVICE_S8) { 58 + status = src_readl(dev, MUnit.OMR); 59 + } 56 60 return (status & AAC_INT_MODE_MSIX); 57 61 } 58 62
+18
drivers/scsi/mpt3sas/mpt3sas_scsih.c
··· 51 51 #include <linux/workqueue.h> 52 52 #include <linux/delay.h> 53 53 #include <linux/pci.h> 54 + #include <linux/pci-aspm.h> 54 55 #include <linux/interrupt.h> 55 56 #include <linux/aer.h> 56 57 #include <linux/raid_class.h> ··· 4658 4657 struct MPT3SAS_DEVICE *sas_device_priv_data; 4659 4658 u32 response_code = 0; 4660 4659 unsigned long flags; 4660 + unsigned int sector_sz; 4661 4661 4662 4662 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply); 4663 4663 scmd = _scsih_scsi_lookup_get_clear(ioc, smid); ··· 4717 4715 } 4718 4716 4719 4717 xfer_cnt = le32_to_cpu(mpi_reply->TransferCount); 4718 + 4719 + /* In case of bogus fw or device, we could end up having 4720 + * unaligned partial completion. We can force alignment here, 4721 + * then scsi-ml does not need to handle this misbehavior. 4722 + */ 4723 + sector_sz = scmd->device->sector_size; 4724 + if (unlikely(scmd->request->cmd_type == REQ_TYPE_FS && sector_sz && 4725 + xfer_cnt % sector_sz)) { 4726 + sdev_printk(KERN_INFO, scmd->device, 4727 + "unaligned partial completion avoided (xfer_cnt=%u, sector_sz=%u)\n", 4728 + xfer_cnt, sector_sz); 4729 + xfer_cnt = round_down(xfer_cnt, sector_sz); 4730 + } 4731 + 4720 4732 scsi_set_resid(scmd, scsi_bufflen(scmd) - xfer_cnt); 4721 4733 if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) 4722 4734 log_info = le32_to_cpu(mpi_reply->IOCLogInfo); ··· 8762 8746 8763 8747 switch (hba_mpi_version) { 8764 8748 case MPI2_VERSION: 8749 + pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | 8750 + PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM); 8765 8751 /* Use mpt2sas driver host template for SAS 2.0 HBA's */ 8766 8752 shost = scsi_host_alloc(&mpt2sas_driver_template, 8767 8753 sizeof(struct MPT3SAS_ADAPTER));
+2 -1
drivers/scsi/qla2xxx/qla_isr.c
··· 3242 3242 * from a probe failure context. 3243 3243 */ 3244 3244 if (!ha->rsp_q_map || !ha->rsp_q_map[0]) 3245 - return; 3245 + goto free_irqs; 3246 3246 rsp = ha->rsp_q_map[0]; 3247 3247 3248 3248 if (ha->flags.msix_enabled) { ··· 3262 3262 free_irq(pci_irq_vector(ha->pdev, 0), rsp); 3263 3263 } 3264 3264 3265 + free_irqs: 3265 3266 pci_free_irq_vectors(ha->pdev); 3266 3267 } 3267 3268
+1 -1
drivers/scsi/qla2xxx/qla_os.c
··· 1616 1616 /* Don't abort commands in adapter during EEH 1617 1617 * recovery as it's not accessible/responding. 1618 1618 */ 1619 - if (!ha->flags.eeh_busy) { 1619 + if (GET_CMD_SP(sp) && !ha->flags.eeh_busy) { 1620 1620 /* Get a reference to the sp and drop the lock. 1621 1621 * The reference ensures this sp->done() call 1622 1622 * - and not the call in qla2xxx_eh_abort() -
+24 -15
fs/btrfs/compression.c
··· 1024 1024 unsigned long buf_offset; 1025 1025 unsigned long current_buf_start; 1026 1026 unsigned long start_byte; 1027 + unsigned long prev_start_byte; 1027 1028 unsigned long working_bytes = total_out - buf_start; 1028 1029 unsigned long bytes; 1029 1030 char *kaddr; ··· 1072 1071 if (!bio->bi_iter.bi_size) 1073 1072 return 0; 1074 1073 bvec = bio_iter_iovec(bio, bio->bi_iter); 1075 - 1074 + prev_start_byte = start_byte; 1076 1075 start_byte = page_offset(bvec.bv_page) - disk_start; 1077 1076 1078 1077 /* 1079 - * make sure our new page is covered by this 1080 - * working buffer 1078 + * We need to make sure we're only adjusting 1079 + * our offset into compression working buffer when 1080 + * we're switching pages. Otherwise we can incorrectly 1081 + * keep copying when we were actually done. 1081 1082 */ 1082 - if (total_out <= start_byte) 1083 - return 1; 1083 + if (start_byte != prev_start_byte) { 1084 + /* 1085 + * make sure our new page is covered by this 1086 + * working buffer 1087 + */ 1088 + if (total_out <= start_byte) 1089 + return 1; 1084 1090 1085 - /* 1086 - * the next page in the biovec might not be adjacent 1087 - * to the last page, but it might still be found 1088 - * inside this working buffer. bump our offset pointer 1089 - */ 1090 - if (total_out > start_byte && 1091 - current_buf_start < start_byte) { 1092 - buf_offset = start_byte - buf_start; 1093 - working_bytes = total_out - start_byte; 1094 - current_buf_start = buf_start + buf_offset; 1091 + /* 1092 + * the next page in the biovec might not be adjacent 1093 + * to the last page, but it might still be found 1094 + * inside this working buffer. bump our offset pointer 1095 + */ 1096 + if (total_out > start_byte && 1097 + current_buf_start < start_byte) { 1098 + buf_offset = start_byte - buf_start; 1099 + working_bytes = total_out - start_byte; 1100 + current_buf_start = buf_start + buf_offset; 1101 + } 1095 1102 } 1096 1103 } 1097 1104
+4 -2
fs/btrfs/ioctl.c
··· 5653 5653 #ifdef CONFIG_COMPAT 5654 5654 long btrfs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 5655 5655 { 5656 + /* 5657 + * These all access 32-bit values anyway so no further 5658 + * handling is necessary. 5659 + */ 5656 5660 switch (cmd) { 5657 5661 case FS_IOC32_GETFLAGS: 5658 5662 cmd = FS_IOC_GETFLAGS; ··· 5667 5663 case FS_IOC32_GETVERSION: 5668 5664 cmd = FS_IOC_GETVERSION; 5669 5665 break; 5670 - default: 5671 - return -ENOIOCTLCMD; 5672 5666 } 5673 5667 5674 5668 return btrfs_ioctl(file, cmd, (unsigned long) compat_ptr(arg));
+5
fs/fuse/dev.c
··· 399 399 static void queue_interrupt(struct fuse_iqueue *fiq, struct fuse_req *req) 400 400 { 401 401 spin_lock(&fiq->waitq.lock); 402 + if (test_bit(FR_FINISHED, &req->flags)) { 403 + spin_unlock(&fiq->waitq.lock); 404 + return; 405 + } 402 406 if (list_empty(&req->intr_entry)) { 403 407 list_add_tail(&req->intr_entry, &fiq->interrupts); 404 408 wake_up_locked(&fiq->waitq); ··· 1376 1372 * code can Oops if the buffer persists after module unload. 1377 1373 */ 1378 1374 bufs[page_nr].ops = &nosteal_pipe_buf_ops; 1375 + bufs[page_nr].flags = 0; 1379 1376 ret = add_to_pipe(pipe, &bufs[page_nr++]); 1380 1377 if (unlikely(ret < 0)) 1381 1378 break;
+1
fs/splice.c
··· 204 204 buf->len = spd->partial[page_nr].len; 205 205 buf->private = spd->partial[page_nr].private; 206 206 buf->ops = spd->ops; 207 + buf->flags = 0; 207 208 208 209 pipe->nrbufs++; 209 210 page_nr++;
+6 -7
include/linux/bpf-cgroup.h
··· 21 21 */ 22 22 struct bpf_prog *prog[MAX_BPF_ATTACH_TYPE]; 23 23 struct bpf_prog __rcu *effective[MAX_BPF_ATTACH_TYPE]; 24 + bool disallow_override[MAX_BPF_ATTACH_TYPE]; 24 25 }; 25 26 26 27 void cgroup_bpf_put(struct cgroup *cgrp); 27 28 void cgroup_bpf_inherit(struct cgroup *cgrp, struct cgroup *parent); 28 29 29 - void __cgroup_bpf_update(struct cgroup *cgrp, 30 - struct cgroup *parent, 31 - struct bpf_prog *prog, 32 - enum bpf_attach_type type); 30 + int __cgroup_bpf_update(struct cgroup *cgrp, struct cgroup *parent, 31 + struct bpf_prog *prog, enum bpf_attach_type type, 32 + bool overridable); 33 33 34 34 /* Wrapper for __cgroup_bpf_update() protected by cgroup_mutex */ 35 - void cgroup_bpf_update(struct cgroup *cgrp, 36 - struct bpf_prog *prog, 37 - enum bpf_attach_type type); 35 + int cgroup_bpf_update(struct cgroup *cgrp, struct bpf_prog *prog, 36 + enum bpf_attach_type type, bool overridable); 38 37 39 38 int __cgroup_bpf_run_filter_skb(struct sock *sk, 40 39 struct sk_buff *skb,
+7
include/uapi/linux/bpf.h
··· 123 123 124 124 #define MAX_BPF_ATTACH_TYPE __MAX_BPF_ATTACH_TYPE 125 125 126 + /* If BPF_F_ALLOW_OVERRIDE flag is used in BPF_PROG_ATTACH command 127 + * to the given target_fd cgroup the descendent cgroup will be able to 128 + * override effective bpf program that was inherited from this cgroup 129 + */ 130 + #define BPF_F_ALLOW_OVERRIDE (1U << 0) 131 + 126 132 #define BPF_PSEUDO_MAP_FD 1 127 133 128 134 /* flags for BPF_MAP_UPDATE_ELEM command */ ··· 184 178 __u32 target_fd; /* container object to attach to */ 185 179 __u32 attach_bpf_fd; /* eBPF program to attach */ 186 180 __u32 attach_type; 181 + __u32 attach_flags; 187 182 }; 188 183 } __attribute__((aligned(8))); 189 184
+3 -4
include/uapi/linux/l2tp.h
··· 9 9 10 10 #include <linux/types.h> 11 11 #include <linux/socket.h> 12 - #ifndef __KERNEL__ 13 - #include <netinet/in.h> 14 - #endif 12 + #include <linux/in.h> 13 + #include <linux/in6.h> 15 14 16 15 #define IPPROTO_L2TP 115 17 16 ··· 30 31 __u32 l2tp_conn_id; /* Connection ID of tunnel */ 31 32 32 33 /* Pad to size of `struct sockaddr'. */ 33 - unsigned char __pad[sizeof(struct sockaddr) - 34 + unsigned char __pad[__SOCK_SIZE__ - 34 35 sizeof(__kernel_sa_family_t) - 35 36 sizeof(__be16) - sizeof(struct in_addr) - 36 37 sizeof(__u32)];
+3 -4
include/uapi/linux/videodev2.h
··· 362 362 /* 363 363 * The default for R'G'B' quantization is always full range, except 364 364 * for the BT2020 colorspace. For Y'CbCr the quantization is always 365 - * limited range, except for COLORSPACE_JPEG, SRGB, ADOBERGB, 366 - * XV601 or XV709: those are full range. 365 + * limited range, except for COLORSPACE_JPEG, XV601 or XV709: those 366 + * are full range. 367 367 */ 368 368 V4L2_QUANTIZATION_DEFAULT = 0, 369 369 V4L2_QUANTIZATION_FULL_RANGE = 1, ··· 379 379 (((is_rgb_or_hsv) && (colsp) == V4L2_COLORSPACE_BT2020) ? \ 380 380 V4L2_QUANTIZATION_LIM_RANGE : \ 381 381 (((is_rgb_or_hsv) || (ycbcr_enc) == V4L2_YCBCR_ENC_XV601 || \ 382 - (ycbcr_enc) == V4L2_YCBCR_ENC_XV709 || (colsp) == V4L2_COLORSPACE_JPEG) || \ 383 - (colsp) == V4L2_COLORSPACE_ADOBERGB || (colsp) == V4L2_COLORSPACE_SRGB ? \ 382 + (ycbcr_enc) == V4L2_YCBCR_ENC_XV709 || (colsp) == V4L2_COLORSPACE_JPEG) ? \ 384 383 V4L2_QUANTIZATION_FULL_RANGE : V4L2_QUANTIZATION_LIM_RANGE)) 385 384 386 385 enum v4l2_priority {
+47 -12
kernel/bpf/cgroup.c
··· 52 52 e = rcu_dereference_protected(parent->bpf.effective[type], 53 53 lockdep_is_held(&cgroup_mutex)); 54 54 rcu_assign_pointer(cgrp->bpf.effective[type], e); 55 + cgrp->bpf.disallow_override[type] = parent->bpf.disallow_override[type]; 55 56 } 56 57 } 57 58 ··· 83 82 * 84 83 * Must be called with cgroup_mutex held. 85 84 */ 86 - void __cgroup_bpf_update(struct cgroup *cgrp, 87 - struct cgroup *parent, 88 - struct bpf_prog *prog, 89 - enum bpf_attach_type type) 85 + int __cgroup_bpf_update(struct cgroup *cgrp, struct cgroup *parent, 86 + struct bpf_prog *prog, enum bpf_attach_type type, 87 + bool new_overridable) 90 88 { 91 - struct bpf_prog *old_prog, *effective; 89 + struct bpf_prog *old_prog, *effective = NULL; 92 90 struct cgroup_subsys_state *pos; 91 + bool overridable = true; 93 92 94 - old_prog = xchg(cgrp->bpf.prog + type, prog); 93 + if (parent) { 94 + overridable = !parent->bpf.disallow_override[type]; 95 + effective = rcu_dereference_protected(parent->bpf.effective[type], 96 + lockdep_is_held(&cgroup_mutex)); 97 + } 95 98 96 - effective = (!prog && parent) ? 97 - rcu_dereference_protected(parent->bpf.effective[type], 98 - lockdep_is_held(&cgroup_mutex)) : 99 - prog; 99 + if (prog && effective && !overridable) 100 + /* if parent has non-overridable prog attached, disallow 101 + * attaching new programs to descendent cgroup 102 + */ 103 + return -EPERM; 104 + 105 + if (prog && effective && overridable != new_overridable) 106 + /* if parent has overridable prog attached, only 107 + * allow overridable programs in descendent cgroup 108 + */ 109 + return -EPERM; 110 + 111 + old_prog = cgrp->bpf.prog[type]; 112 + 113 + if (prog) { 114 + overridable = new_overridable; 115 + effective = prog; 116 + if (old_prog && 117 + cgrp->bpf.disallow_override[type] == new_overridable) 118 + /* disallow attaching non-overridable on top 119 + * of existing overridable in this cgroup 120 + * and vice versa 121 + */ 122 + return -EPERM; 123 + } 124 + 125 + if (!prog && !old_prog) 126 + /* report error when trying to detach and nothing is attached */ 127 + return -ENOENT; 128 + 129 + cgrp->bpf.prog[type] = prog; 100 130 101 131 css_for_each_descendant_pre(pos, &cgrp->self) { 102 132 struct cgroup *desc = container_of(pos, struct cgroup, self); 103 133 104 134 /* skip the subtree if the descendant has its own program */ 105 - if (desc->bpf.prog[type] && desc != cgrp) 135 + if (desc->bpf.prog[type] && desc != cgrp) { 106 136 pos = css_rightmost_descendant(pos); 107 - else 137 + } else { 108 138 rcu_assign_pointer(desc->bpf.effective[type], 109 139 effective); 140 + desc->bpf.disallow_override[type] = !overridable; 141 + } 110 142 } 111 143 112 144 if (prog) ··· 149 115 bpf_prog_put(old_prog); 150 116 static_branch_dec(&cgroup_bpf_enabled_key); 151 117 } 118 + return 0; 152 119 } 153 120 154 121 /**
+14 -6
kernel/bpf/syscall.c
··· 935 935 936 936 #ifdef CONFIG_CGROUP_BPF 937 937 938 - #define BPF_PROG_ATTACH_LAST_FIELD attach_type 938 + #define BPF_PROG_ATTACH_LAST_FIELD attach_flags 939 939 940 940 static int bpf_prog_attach(const union bpf_attr *attr) 941 941 { 942 + enum bpf_prog_type ptype; 942 943 struct bpf_prog *prog; 943 944 struct cgroup *cgrp; 944 - enum bpf_prog_type ptype; 945 + int ret; 945 946 946 947 if (!capable(CAP_NET_ADMIN)) 947 948 return -EPERM; 948 949 949 950 if (CHECK_ATTR(BPF_PROG_ATTACH)) 951 + return -EINVAL; 952 + 953 + if (attr->attach_flags & ~BPF_F_ALLOW_OVERRIDE) 950 954 return -EINVAL; 951 955 952 956 switch (attr->attach_type) { ··· 975 971 return PTR_ERR(cgrp); 976 972 } 977 973 978 - cgroup_bpf_update(cgrp, prog, attr->attach_type); 974 + ret = cgroup_bpf_update(cgrp, prog, attr->attach_type, 975 + attr->attach_flags & BPF_F_ALLOW_OVERRIDE); 976 + if (ret) 977 + bpf_prog_put(prog); 979 978 cgroup_put(cgrp); 980 979 981 - return 0; 980 + return ret; 982 981 } 983 982 984 983 #define BPF_PROG_DETACH_LAST_FIELD attach_type ··· 989 982 static int bpf_prog_detach(const union bpf_attr *attr) 990 983 { 991 984 struct cgroup *cgrp; 985 + int ret; 992 986 993 987 if (!capable(CAP_NET_ADMIN)) 994 988 return -EPERM; ··· 1005 997 if (IS_ERR(cgrp)) 1006 998 return PTR_ERR(cgrp); 1007 999 1008 - cgroup_bpf_update(cgrp, NULL, attr->attach_type); 1000 + ret = cgroup_bpf_update(cgrp, NULL, attr->attach_type, false); 1009 1001 cgroup_put(cgrp); 1010 1002 break; 1011 1003 ··· 1013 1005 return -EINVAL; 1014 1006 } 1015 1007 1016 - return 0; 1008 + return ret; 1017 1009 } 1018 1010 #endif /* CONFIG_CGROUP_BPF */ 1019 1011
+5 -4
kernel/cgroup.c
··· 6498 6498 subsys_initcall(cgroup_namespaces_init); 6499 6499 6500 6500 #ifdef CONFIG_CGROUP_BPF 6501 - void cgroup_bpf_update(struct cgroup *cgrp, 6502 - struct bpf_prog *prog, 6503 - enum bpf_attach_type type) 6501 + int cgroup_bpf_update(struct cgroup *cgrp, struct bpf_prog *prog, 6502 + enum bpf_attach_type type, bool overridable) 6504 6503 { 6505 6504 struct cgroup *parent = cgroup_parent(cgrp); 6505 + int ret; 6506 6506 6507 6507 mutex_lock(&cgroup_mutex); 6508 - __cgroup_bpf_update(cgrp, parent, prog, type); 6508 + ret = __cgroup_bpf_update(cgrp, parent, prog, type, overridable); 6509 6509 mutex_unlock(&cgroup_mutex); 6510 + return ret; 6510 6511 } 6511 6512 #endif /* CONFIG_CGROUP_BPF */ 6512 6513
+15 -10
kernel/events/core.c
··· 3487 3487 int ret; 3488 3488 }; 3489 3489 3490 - static int find_cpu_to_read(struct perf_event *event, int local_cpu) 3490 + static int __perf_event_read_cpu(struct perf_event *event, int event_cpu) 3491 3491 { 3492 - int event_cpu = event->oncpu; 3493 3492 u16 local_pkg, event_pkg; 3494 3493 3495 3494 if (event->group_caps & PERF_EV_CAP_READ_ACTIVE_PKG) { 3496 - event_pkg = topology_physical_package_id(event_cpu); 3497 - local_pkg = topology_physical_package_id(local_cpu); 3495 + int local_cpu = smp_processor_id(); 3496 + 3497 + event_pkg = topology_physical_package_id(event_cpu); 3498 + local_pkg = topology_physical_package_id(local_cpu); 3498 3499 3499 3500 if (event_pkg == local_pkg) 3500 3501 return local_cpu; ··· 3625 3624 3626 3625 static int perf_event_read(struct perf_event *event, bool group) 3627 3626 { 3628 - int ret = 0, cpu_to_read, local_cpu; 3627 + int event_cpu, ret = 0; 3629 3628 3630 3629 /* 3631 3630 * If event is enabled and currently active on a CPU, update the ··· 3638 3637 .ret = 0, 3639 3638 }; 3640 3639 3641 - local_cpu = get_cpu(); 3642 - cpu_to_read = find_cpu_to_read(event, local_cpu); 3643 - put_cpu(); 3640 + event_cpu = READ_ONCE(event->oncpu); 3641 + if ((unsigned)event_cpu >= nr_cpu_ids) 3642 + return 0; 3643 + 3644 + preempt_disable(); 3645 + event_cpu = __perf_event_read_cpu(event, event_cpu); 3644 3646 3645 3647 /* 3646 3648 * Purposely ignore the smp_call_function_single() return 3647 3649 * value. 3648 3650 * 3649 - * If event->oncpu isn't a valid CPU it means the event got 3651 + * If event_cpu isn't a valid CPU it means the event got 3650 3652 * scheduled out and that will have updated the event count. 3651 3653 * 3652 3654 * Therefore, either way, we'll have an up-to-date event count 3653 3655 * after this. 3654 3656 */ 3655 - (void)smp_call_function_single(cpu_to_read, __perf_event_read, &data, 1); 3657 + (void)smp_call_function_single(event_cpu, __perf_event_read, &data, 1); 3658 + preempt_enable(); 3656 3659 ret = data.ret; 3657 3660 } else if (event->state == PERF_EVENT_STATE_INACTIVE) { 3658 3661 struct perf_event_context *ctx = event->ctx;
+4 -8
kernel/stacktrace.c
··· 18 18 if (WARN_ON(!trace->entries)) 19 19 return; 20 20 21 - for (i = 0; i < trace->nr_entries; i++) { 22 - printk("%*c", 1 + spaces, ' '); 23 - print_ip_sym(trace->entries[i]); 24 - } 21 + for (i = 0; i < trace->nr_entries; i++) 22 + printk("%*c%pS\n", 1 + spaces, ' ', (void *)trace->entries[i]); 25 23 } 26 24 EXPORT_SYMBOL_GPL(print_stack_trace); 27 25 ··· 27 29 struct stack_trace *trace, int spaces) 28 30 { 29 31 int i; 30 - unsigned long ip; 31 32 int generated; 32 33 int total = 0; 33 34 ··· 34 37 return 0; 35 38 36 39 for (i = 0; i < trace->nr_entries; i++) { 37 - ip = trace->entries[i]; 38 - generated = snprintf(buf, size, "%*c[<%p>] %pS\n", 39 - 1 + spaces, ' ', (void *) ip, (void *) ip); 40 + generated = snprintf(buf, size, "%*c%pS\n", 1 + spaces, ' ', 41 + (void *)trace->entries[i]); 40 42 41 43 total += generated; 42 44
+5
kernel/time/tick-sched.c
··· 725 725 */ 726 726 if (delta == 0) { 727 727 tick_nohz_restart(ts, now); 728 + /* 729 + * Make sure next tick stop doesn't get fooled by past 730 + * clock deadline 731 + */ 732 + ts->next_tick = 0; 728 733 goto out; 729 734 } 730 735 }
+2 -1
net/core/neighbour.c
··· 2923 2923 return; 2924 2924 2925 2925 set_bit(index, p->data_state); 2926 - call_netevent_notifiers(NETEVENT_DELAY_PROBE_TIME_UPDATE, p); 2926 + if (index == NEIGH_VAR_DELAY_PROBE_TIME) 2927 + call_netevent_notifiers(NETEVENT_DELAY_PROBE_TIME_UPDATE, p); 2927 2928 if (!dev) /* NULL dev means this is default value */ 2928 2929 neigh_copy_dflt_parms(net, p, index); 2929 2930 }
+6 -6
net/ipv4/arp.c
··· 1263 1263 /* 1264 1264 * ax25 -> ASCII conversion 1265 1265 */ 1266 - static char *ax2asc2(ax25_address *a, char *buf) 1266 + static void ax2asc2(ax25_address *a, char *buf) 1267 1267 { 1268 1268 char c, *s; 1269 1269 int n; ··· 1285 1285 *s++ = n + '0'; 1286 1286 *s++ = '\0'; 1287 1287 1288 - if (*buf == '\0' || *buf == '-') 1289 - return "*"; 1290 - 1291 - return buf; 1288 + if (*buf == '\0' || *buf == '-') { 1289 + buf[0] = '*'; 1290 + buf[1] = '\0'; 1291 + } 1292 1292 } 1293 1293 #endif /* CONFIG_AX25 */ 1294 1294 ··· 1322 1322 } 1323 1323 #endif 1324 1324 sprintf(tbuf, "%pI4", n->primary_key); 1325 - seq_printf(seq, "%-16s 0x%-10x0x%-10x%s * %s\n", 1325 + seq_printf(seq, "%-16s 0x%-10x0x%-10x%-17s * %s\n", 1326 1326 tbuf, hatype, arp_state_to_flags(n), hbuffer, dev->name); 1327 1327 read_unlock(&n->lock); 1328 1328 }
+2 -2
net/ipv4/tcp_probe.c
··· 117 117 (fwmark > 0 && skb->mark == fwmark)) && 118 118 (full || tp->snd_cwnd != tcp_probe.lastcwnd)) { 119 119 120 - spin_lock(&tcp_probe.lock); 120 + spin_lock_bh(&tcp_probe.lock); 121 121 /* If log fills, just silently drop */ 122 122 if (tcp_probe_avail() > 1) { 123 123 struct tcp_log *p = tcp_probe.log + tcp_probe.head; ··· 157 157 tcp_probe.head = (tcp_probe.head + 1) & (bufsize - 1); 158 158 } 159 159 tcp_probe.lastcwnd = tp->snd_cwnd; 160 - spin_unlock(&tcp_probe.lock); 160 + spin_unlock_bh(&tcp_probe.lock); 161 161 162 162 wake_up(&tcp_probe.wait); 163 163 }
+9 -5
net/ipv6/datagram.c
··· 167 167 if (np->sndflow) 168 168 fl6_flowlabel = usin->sin6_flowinfo & IPV6_FLOWINFO_MASK; 169 169 170 - addr_type = ipv6_addr_type(&usin->sin6_addr); 171 - 172 - if (addr_type == IPV6_ADDR_ANY) { 170 + if (ipv6_addr_any(&usin->sin6_addr)) { 173 171 /* 174 172 * connect to self 175 173 */ 176 - usin->sin6_addr.s6_addr[15] = 0x01; 174 + if (ipv6_addr_v4mapped(&sk->sk_v6_rcv_saddr)) 175 + ipv6_addr_set_v4mapped(htonl(INADDR_LOOPBACK), 176 + &usin->sin6_addr); 177 + else 178 + usin->sin6_addr = in6addr_loopback; 177 179 } 180 + 181 + addr_type = ipv6_addr_type(&usin->sin6_addr); 178 182 179 183 daddr = &usin->sin6_addr; 180 184 181 - if (addr_type == IPV6_ADDR_MAPPED) { 185 + if (addr_type & IPV6_ADDR_MAPPED) { 182 186 struct sockaddr_in sin; 183 187 184 188 if (__ipv6_only_sock(sk)) {
+3
net/ipv6/ip6_output.c
··· 1022 1022 } 1023 1023 } 1024 1024 #endif 1025 + if (ipv6_addr_v4mapped(&fl6->saddr) && 1026 + !(ipv6_addr_v4mapped(&fl6->daddr) || ipv6_addr_any(&fl6->daddr))) 1027 + return -EAFNOSUPPORT; 1025 1028 1026 1029 return 0; 1027 1030
+8 -3
net/ipv6/tcp_ipv6.c
··· 149 149 * connect() to INADDR_ANY means loopback (BSD'ism). 150 150 */ 151 151 152 - if (ipv6_addr_any(&usin->sin6_addr)) 153 - usin->sin6_addr.s6_addr[15] = 0x1; 152 + if (ipv6_addr_any(&usin->sin6_addr)) { 153 + if (ipv6_addr_v4mapped(&sk->sk_v6_rcv_saddr)) 154 + ipv6_addr_set_v4mapped(htonl(INADDR_LOOPBACK), 155 + &usin->sin6_addr); 156 + else 157 + usin->sin6_addr = in6addr_loopback; 158 + } 154 159 155 160 addr_type = ipv6_addr_type(&usin->sin6_addr); 156 161 ··· 194 189 * TCP over IPv4 195 190 */ 196 191 197 - if (addr_type == IPV6_ADDR_MAPPED) { 192 + if (addr_type & IPV6_ADDR_MAPPED) { 198 193 u32 exthdrlen = icsk->icsk_ext_hdr_len; 199 194 struct sockaddr_in sin; 200 195
+4
net/ipv6/udp.c
··· 1046 1046 if (addr_len < SIN6_LEN_RFC2133) 1047 1047 return -EINVAL; 1048 1048 daddr = &sin6->sin6_addr; 1049 + if (ipv6_addr_any(daddr) && 1050 + ipv6_addr_v4mapped(&np->saddr)) 1051 + ipv6_addr_set_v4mapped(htonl(INADDR_LOOPBACK), 1052 + daddr); 1049 1053 break; 1050 1054 case AF_INET: 1051 1055 goto do_udp_sendmsg;
+4 -2
net/kcm/kcmsock.c
··· 1044 1044 } else { 1045 1045 /* Message not complete, save state */ 1046 1046 partial_message: 1047 - kcm->seq_skb = head; 1048 - kcm_tx_msg(head)->last_skb = skb; 1047 + if (head) { 1048 + kcm->seq_skb = head; 1049 + kcm_tx_msg(head)->last_skb = skb; 1050 + } 1049 1051 } 1050 1052 1051 1053 KCM_STATS_ADD(kcm->stats.tx_bytes, copied);
+3
net/llc/llc_conn.c
··· 821 821 * another trick required to cope with how the PROCOM state 822 822 * machine works. -acme 823 823 */ 824 + skb_orphan(skb); 825 + sock_hold(sk); 824 826 skb->sk = sk; 827 + skb->destructor = sock_efree; 825 828 } 826 829 if (!sock_owned_by_user(sk)) 827 830 llc_conn_rcv(sk, skb);
+3
net/llc/llc_sap.c
··· 290 290 291 291 ev->type = LLC_SAP_EV_TYPE_PDU; 292 292 ev->reason = 0; 293 + skb_orphan(skb); 294 + sock_hold(sk); 293 295 skb->sk = sk; 296 + skb->destructor = sock_efree; 294 297 llc_sap_state_process(sap, skb); 295 298 } 296 299
+31 -26
net/packet/af_packet.c
··· 1627 1627 1628 1628 static int fanout_add(struct sock *sk, u16 id, u16 type_flags) 1629 1629 { 1630 + struct packet_rollover *rollover = NULL; 1630 1631 struct packet_sock *po = pkt_sk(sk); 1631 1632 struct packet_fanout *f, *match; 1632 1633 u8 type = type_flags & 0xff; ··· 1650 1649 return -EINVAL; 1651 1650 } 1652 1651 1653 - if (!po->running) 1654 - return -EINVAL; 1652 + mutex_lock(&fanout_mutex); 1655 1653 1654 + err = -EINVAL; 1655 + if (!po->running) 1656 + goto out; 1657 + 1658 + err = -EALREADY; 1656 1659 if (po->fanout) 1657 - return -EALREADY; 1660 + goto out; 1658 1661 1659 1662 if (type == PACKET_FANOUT_ROLLOVER || 1660 1663 (type_flags & PACKET_FANOUT_FLAG_ROLLOVER)) { 1661 - po->rollover = kzalloc(sizeof(*po->rollover), GFP_KERNEL); 1662 - if (!po->rollover) 1663 - return -ENOMEM; 1664 - atomic_long_set(&po->rollover->num, 0); 1665 - atomic_long_set(&po->rollover->num_huge, 0); 1666 - atomic_long_set(&po->rollover->num_failed, 0); 1664 + err = -ENOMEM; 1665 + rollover = kzalloc(sizeof(*rollover), GFP_KERNEL); 1666 + if (!rollover) 1667 + goto out; 1668 + atomic_long_set(&rollover->num, 0); 1669 + atomic_long_set(&rollover->num_huge, 0); 1670 + atomic_long_set(&rollover->num_failed, 0); 1671 + po->rollover = rollover; 1667 1672 } 1668 1673 1669 - mutex_lock(&fanout_mutex); 1670 1674 match = NULL; 1671 1675 list_for_each_entry(f, &fanout_list, list) { 1672 1676 if (f->id == id && ··· 1718 1712 } 1719 1713 } 1720 1714 out: 1721 - mutex_unlock(&fanout_mutex); 1722 - if (err) { 1723 - kfree(po->rollover); 1715 + if (err && rollover) { 1716 + kfree(rollover); 1724 1717 po->rollover = NULL; 1725 1718 } 1719 + mutex_unlock(&fanout_mutex); 1726 1720 return err; 1727 1721 } 1728 1722 ··· 1731 1725 struct packet_sock *po = pkt_sk(sk); 1732 1726 struct packet_fanout *f; 1733 1727 1734 - f = po->fanout; 1735 - if (!f) 1736 - return; 1737 - 1738 1728 mutex_lock(&fanout_mutex); 1739 - po->fanout = NULL; 1729 + f = po->fanout; 1730 + if (f) { 1731 + po->fanout = NULL; 1740 1732 1741 - if (atomic_dec_and_test(&f->sk_ref)) { 1742 - list_del(&f->list); 1743 - dev_remove_pack(&f->prot_hook); 1744 - fanout_release_data(f); 1745 - kfree(f); 1733 + if (atomic_dec_and_test(&f->sk_ref)) { 1734 + list_del(&f->list); 1735 + dev_remove_pack(&f->prot_hook); 1736 + fanout_release_data(f); 1737 + kfree(f); 1738 + } 1739 + 1740 + if (po->rollover) 1741 + kfree_rcu(po->rollover, rcu); 1746 1742 } 1747 1743 mutex_unlock(&fanout_mutex); 1748 - 1749 - if (po->rollover) 1750 - kfree_rcu(po->rollover, rcu); 1751 1744 } 1752 1745 1753 1746 static bool packet_extra_vlan_len_allowed(const struct net_device *dev,
+1 -1
samples/bpf/test_cgrp2_attach.c
··· 104 104 return EXIT_FAILURE; 105 105 } 106 106 107 - ret = bpf_prog_attach(prog_fd, cg_fd, type); 107 + ret = bpf_prog_attach(prog_fd, cg_fd, type, 0); 108 108 if (ret < 0) { 109 109 printf("Failed to attach prog to cgroup: '%s'\n", 110 110 strerror(errno));
+64 -4
samples/bpf/test_cgrp2_attach2.c
··· 79 79 if (join_cgroup(FOO)) 80 80 goto err; 81 81 82 - if (bpf_prog_attach(drop_prog, foo, BPF_CGROUP_INET_EGRESS)) { 82 + if (bpf_prog_attach(drop_prog, foo, BPF_CGROUP_INET_EGRESS, 1)) { 83 83 log_err("Attaching prog to /foo"); 84 84 goto err; 85 85 } 86 86 87 + printf("Attached DROP prog. This ping in cgroup /foo should fail...\n"); 87 88 assert(system(PING_CMD) != 0); 88 89 89 90 /* Create cgroup /foo/bar, get fd, and join it */ ··· 95 94 if (join_cgroup(BAR)) 96 95 goto err; 97 96 97 + printf("Attached DROP prog. This ping in cgroup /foo/bar should fail...\n"); 98 98 assert(system(PING_CMD) != 0); 99 99 100 - if (bpf_prog_attach(allow_prog, bar, BPF_CGROUP_INET_EGRESS)) { 100 + if (bpf_prog_attach(allow_prog, bar, BPF_CGROUP_INET_EGRESS, 1)) { 101 101 log_err("Attaching prog to /foo/bar"); 102 102 goto err; 103 103 } 104 104 105 + printf("Attached PASS prog. This ping in cgroup /foo/bar should pass...\n"); 105 106 assert(system(PING_CMD) == 0); 106 - 107 107 108 108 if (bpf_prog_detach(bar, BPF_CGROUP_INET_EGRESS)) { 109 109 log_err("Detaching program from /foo/bar"); 110 110 goto err; 111 111 } 112 112 113 + printf("Detached PASS from /foo/bar while DROP is attached to /foo.\n" 114 + "This ping in cgroup /foo/bar should fail...\n"); 113 115 assert(system(PING_CMD) != 0); 114 116 115 - if (bpf_prog_attach(allow_prog, bar, BPF_CGROUP_INET_EGRESS)) { 117 + if (bpf_prog_attach(allow_prog, bar, BPF_CGROUP_INET_EGRESS, 1)) { 116 118 log_err("Attaching prog to /foo/bar"); 117 119 goto err; 118 120 } ··· 125 121 goto err; 126 122 } 127 123 124 + printf("Attached PASS from /foo/bar and detached DROP from /foo.\n" 125 + "This ping in cgroup /foo/bar should pass...\n"); 128 126 assert(system(PING_CMD) == 0); 127 + 128 + if (bpf_prog_attach(allow_prog, bar, BPF_CGROUP_INET_EGRESS, 1)) { 129 + log_err("Attaching prog to /foo/bar"); 130 + goto err; 131 + } 132 + 133 + if (!bpf_prog_attach(allow_prog, bar, BPF_CGROUP_INET_EGRESS, 0)) { 134 + errno = 0; 135 + log_err("Unexpected success attaching prog to /foo/bar"); 136 + goto err; 137 + } 138 + 139 + if (bpf_prog_detach(bar, BPF_CGROUP_INET_EGRESS)) { 140 + log_err("Detaching program from /foo/bar"); 141 + goto err; 142 + } 143 + 144 + if (!bpf_prog_detach(foo, BPF_CGROUP_INET_EGRESS)) { 145 + errno = 0; 146 + log_err("Unexpected success in double detach from /foo"); 147 + goto err; 148 + } 149 + 150 + if (bpf_prog_attach(allow_prog, foo, BPF_CGROUP_INET_EGRESS, 0)) { 151 + log_err("Attaching non-overridable prog to /foo"); 152 + goto err; 153 + } 154 + 155 + if (!bpf_prog_attach(allow_prog, bar, BPF_CGROUP_INET_EGRESS, 0)) { 156 + errno = 0; 157 + log_err("Unexpected success attaching non-overridable prog to /foo/bar"); 158 + goto err; 159 + } 160 + 161 + if (!bpf_prog_attach(allow_prog, bar, BPF_CGROUP_INET_EGRESS, 1)) { 162 + errno = 0; 163 + log_err("Unexpected success attaching overridable prog to /foo/bar"); 164 + goto err; 165 + } 166 + 167 + if (!bpf_prog_attach(allow_prog, foo, BPF_CGROUP_INET_EGRESS, 1)) { 168 + errno = 0; 169 + log_err("Unexpected success attaching overridable prog to /foo"); 170 + goto err; 171 + } 172 + 173 + if (bpf_prog_attach(drop_prog, foo, BPF_CGROUP_INET_EGRESS, 0)) { 174 + log_err("Attaching different non-overridable prog to /foo"); 175 + goto err; 176 + } 129 177 130 178 goto out; 131 179 ··· 188 132 close(foo); 189 133 close(bar); 190 134 cleanup_cgroup_environment(); 135 + if (!rc) 136 + printf("PASS\n"); 137 + else 138 + printf("FAIL\n"); 191 139 return rc; 192 140 }
+1 -1
samples/bpf/test_cgrp2_sock.c
··· 75 75 return EXIT_FAILURE; 76 76 } 77 77 78 - ret = bpf_prog_attach(prog_fd, cg_fd, BPF_CGROUP_INET_SOCK_CREATE); 78 + ret = bpf_prog_attach(prog_fd, cg_fd, BPF_CGROUP_INET_SOCK_CREATE, 0); 79 79 if (ret < 0) { 80 80 printf("Failed to attach prog to cgroup: '%s'\n", 81 81 strerror(errno));
+1 -1
samples/bpf/test_cgrp2_sock2.c
··· 55 55 } 56 56 57 57 ret = bpf_prog_attach(prog_fd[filter_id], cg_fd, 58 - BPF_CGROUP_INET_SOCK_CREATE); 58 + BPF_CGROUP_INET_SOCK_CREATE, 0); 59 59 if (ret < 0) { 60 60 printf("Failed to attach prog to cgroup: '%s'\n", 61 61 strerror(errno));
+7
tools/include/uapi/linux/bpf.h
··· 123 123 124 124 #define MAX_BPF_ATTACH_TYPE __MAX_BPF_ATTACH_TYPE 125 125 126 + /* If BPF_F_ALLOW_OVERRIDE flag is used in BPF_PROG_ATTACH command 127 + * to the given target_fd cgroup the descendent cgroup will be able to 128 + * override effective bpf program that was inherited from this cgroup 129 + */ 130 + #define BPF_F_ALLOW_OVERRIDE (1U << 0) 131 + 126 132 #define BPF_PSEUDO_MAP_FD 1 127 133 128 134 /* flags for BPF_MAP_UPDATE_ELEM command */ ··· 184 178 __u32 target_fd; /* container object to attach to */ 185 179 __u32 attach_bpf_fd; /* eBPF program to attach */ 186 180 __u32 attach_type; 181 + __u32 attach_flags; 187 182 }; 188 183 } __attribute__((aligned(8))); 189 184
+3 -1
tools/lib/bpf/bpf.c
··· 168 168 return sys_bpf(BPF_OBJ_GET, &attr, sizeof(attr)); 169 169 } 170 170 171 - int bpf_prog_attach(int prog_fd, int target_fd, enum bpf_attach_type type) 171 + int bpf_prog_attach(int prog_fd, int target_fd, enum bpf_attach_type type, 172 + unsigned int flags) 172 173 { 173 174 union bpf_attr attr; 174 175 ··· 177 176 attr.target_fd = target_fd; 178 177 attr.attach_bpf_fd = prog_fd; 179 178 attr.attach_type = type; 179 + attr.attach_flags = flags; 180 180 181 181 return sys_bpf(BPF_PROG_ATTACH, &attr, sizeof(attr)); 182 182 }
+2 -1
tools/lib/bpf/bpf.h
··· 41 41 int bpf_map_get_next_key(int fd, const void *key, void *next_key); 42 42 int bpf_obj_pin(int fd, const char *pathname); 43 43 int bpf_obj_get(const char *pathname); 44 - int bpf_prog_attach(int prog_fd, int attachable_fd, enum bpf_attach_type type); 44 + int bpf_prog_attach(int prog_fd, int attachable_fd, enum bpf_attach_type type, 45 + unsigned int flags); 45 46 int bpf_prog_detach(int attachable_fd, enum bpf_attach_type type); 46 47 47 48
+1 -1
tools/perf/builtin-diff.c
··· 1199 1199 BUG_ON(1); 1200 1200 } 1201 1201 1202 - perf_hpp__register_sort_field(fmt); 1202 + perf_hpp__prepend_sort_field(fmt); 1203 1203 return 0; 1204 1204 } 1205 1205
+10
tools/perf/ui/hist.c
··· 521 521 list_add_tail(&format->sort_list, &list->sorts); 522 522 } 523 523 524 + void perf_hpp_list__prepend_sort_field(struct perf_hpp_list *list, 525 + struct perf_hpp_fmt *format) 526 + { 527 + list_add(&format->sort_list, &list->sorts); 528 + } 529 + 524 530 void perf_hpp__column_unregister(struct perf_hpp_fmt *format) 525 531 { 526 532 list_del(&format->list); ··· 565 559 /* append sort keys to output field */ 566 560 perf_hpp_list__for_each_sort_list(list, fmt) { 567 561 struct perf_hpp_fmt *pos; 562 + 563 + /* skip sort-only fields ("sort_compute" in perf diff) */ 564 + if (!fmt->entry && !fmt->color) 565 + continue; 568 566 569 567 perf_hpp_list__for_each_format(list, pos) { 570 568 if (fmt_equal(fmt, pos))
+9 -2
tools/perf/util/callchain.c
··· 437 437 } 438 438 call->ip = cursor_node->ip; 439 439 call->ms.sym = cursor_node->sym; 440 - call->ms.map = cursor_node->map; 440 + call->ms.map = map__get(cursor_node->map); 441 441 442 442 if (cursor_node->branch) { 443 443 call->branch_count = 1; ··· 477 477 478 478 list_for_each_entry_safe(call, tmp, &new->val, list) { 479 479 list_del(&call->list); 480 + map__zput(call->ms.map); 480 481 free(call); 481 482 } 482 483 free(new); ··· 762 761 list->ms.map, list->ms.sym, 763 762 false, NULL, 0, 0); 764 763 list_del(&list->list); 764 + map__zput(list->ms.map); 765 765 free(list); 766 766 } 767 767 ··· 813 811 } 814 812 815 813 node->ip = ip; 816 - node->map = map; 814 + map__zput(node->map); 815 + node->map = map__get(map); 817 816 node->sym = sym; 818 817 node->branch = branch; 819 818 node->nr_loop_iter = nr_loop_iter; ··· 1145 1142 1146 1143 list_for_each_entry_safe(list, tmp, &node->parent_val, list) { 1147 1144 list_del(&list->list); 1145 + map__zput(list->ms.map); 1148 1146 free(list); 1149 1147 } 1150 1148 1151 1149 list_for_each_entry_safe(list, tmp, &node->val, list) { 1152 1150 list_del(&list->list); 1151 + map__zput(list->ms.map); 1153 1152 free(list); 1154 1153 } 1155 1154 ··· 1215 1210 goto out; 1216 1211 *new = *chain; 1217 1212 new->has_children = false; 1213 + map__get(new->ms.map); 1218 1214 list_add_tail(&new->list, &head); 1219 1215 } 1220 1216 parent = parent->parent; ··· 1236 1230 out: 1237 1231 list_for_each_entry_safe(chain, new, &head, list) { 1238 1232 list_del(&chain->list); 1233 + map__zput(chain->ms.map); 1239 1234 free(chain); 1240 1235 } 1241 1236 return -ENOMEM;
+6
tools/perf/util/callchain.h
··· 5 5 #include <linux/list.h> 6 6 #include <linux/rbtree.h> 7 7 #include "event.h" 8 + #include "map.h" 8 9 #include "symbol.h" 9 10 10 11 #define HELP_PAD "\t\t\t\t" ··· 185 184 */ 186 185 static inline void callchain_cursor_reset(struct callchain_cursor *cursor) 187 186 { 187 + struct callchain_cursor_node *node; 188 + 188 189 cursor->nr = 0; 189 190 cursor->last = &cursor->first; 191 + 192 + for (node = cursor->first; node != NULL; node = node->next) 193 + map__zput(node->map); 190 194 } 191 195 192 196 int callchain_cursor_append(struct callchain_cursor *cursor, u64 ip,
+7
tools/perf/util/hist.c
··· 1 1 #include "util.h" 2 2 #include "build-id.h" 3 3 #include "hist.h" 4 + #include "map.h" 4 5 #include "session.h" 5 6 #include "sort.h" 6 7 #include "evlist.h" ··· 1020 1019 int max_stack_depth, void *arg) 1021 1020 { 1022 1021 int err, err2; 1022 + struct map *alm = NULL; 1023 + 1024 + if (al && al->map) 1025 + alm = map__get(al->map); 1023 1026 1024 1027 err = sample__resolve_callchain(iter->sample, &callchain_cursor, &iter->parent, 1025 1028 iter->evsel, al, max_stack_depth); ··· 1062 1057 err2 = iter->ops->finish_entry(iter, al); 1063 1058 if (!err) 1064 1059 err = err2; 1060 + 1061 + map__put(alm); 1065 1062 1066 1063 return err; 1067 1064 }
+7
tools/perf/util/hist.h
··· 283 283 struct perf_hpp_fmt *format); 284 284 void perf_hpp_list__register_sort_field(struct perf_hpp_list *list, 285 285 struct perf_hpp_fmt *format); 286 + void perf_hpp_list__prepend_sort_field(struct perf_hpp_list *list, 287 + struct perf_hpp_fmt *format); 286 288 287 289 static inline void perf_hpp__column_register(struct perf_hpp_fmt *format) 288 290 { ··· 294 292 static inline void perf_hpp__register_sort_field(struct perf_hpp_fmt *format) 295 293 { 296 294 perf_hpp_list__register_sort_field(&perf_hpp_list, format); 295 + } 296 + 297 + static inline void perf_hpp__prepend_sort_field(struct perf_hpp_fmt *format) 298 + { 299 + perf_hpp_list__prepend_sort_field(&perf_hpp_list, format); 297 300 } 298 301 299 302 #define perf_hpp_list__for_each_format(_list, format) \