Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

+264 -181
+9 -3
MAINTAINERS
··· 1667 1667 S: Supported 1668 1668 F: drivers/tty/serial/atmel_serial.c 1669 1669 1670 + ATMEL Audio ALSA driver 1671 + M: Bo Shen <voice.shen@atmel.com> 1672 + L: alsa-devel@alsa-project.org (moderated for non-subscribers) 1673 + S: Supported 1674 + F: sound/soc/atmel 1675 + 1670 1676 ATMEL DMA DRIVER 1671 1677 M: Nicolas Ferre <nicolas.ferre@atmel.com> 1672 1678 L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) ··· 4797 4791 S: Maintained 4798 4792 F: drivers/char/hw_random/ixp4xx-rng.c 4799 4793 4800 - INTEL ETHERNET DRIVERS (e100/e1000/e1000e/igb/igbvf/ixgb/ixgbe/ixgbevf/i40e/i40evf) 4794 + INTEL ETHERNET DRIVERS (e100/e1000/e1000e/fm10k/igb/igbvf/ixgb/ixgbe/ixgbevf/i40e/i40evf) 4801 4795 M: Jeff Kirsher <jeffrey.t.kirsher@intel.com> 4802 4796 M: Jesse Brandeburg <jesse.brandeburg@intel.com> 4803 4797 M: Bruce Allan <bruce.w.allan@intel.com> 4804 4798 M: Carolyn Wyborny <carolyn.wyborny@intel.com> 4805 4799 M: Don Skidmore <donald.c.skidmore@intel.com> 4806 4800 M: Greg Rose <gregory.v.rose@intel.com> 4807 - M: Alex Duyck <alexander.h.duyck@intel.com> 4801 + M: Matthew Vick <matthew.vick@intel.com> 4808 4802 M: John Ronciak <john.ronciak@intel.com> 4809 4803 M: Mitch Williams <mitch.a.williams@intel.com> 4810 4804 M: Linux NICS <linux.nics@intel.com> ··· 5492 5486 LINUX FOR POWERPC EMBEDDED MPC5XXX 5493 5487 M: Anatolij Gustschin <agust@denx.de> 5494 5488 L: linuxppc-dev@lists.ozlabs.org 5495 - T: git git://git.denx.de/linux-2.6-agust.git 5489 + T: git git://git.denx.de/linux-denx-agust.git 5496 5490 S: Maintained 5497 5491 F: arch/powerpc/platforms/512x/ 5498 5492 F: arch/powerpc/platforms/52xx/
+4 -3
drivers/cpufreq/cpufreq.c
··· 1658 1658 if (!cpufreq_driver) 1659 1659 return; 1660 1660 1661 - cpufreq_suspended = true; 1662 - 1663 1661 if (!has_target()) 1664 - return; 1662 + goto suspend; 1665 1663 1666 1664 pr_debug("%s: Suspending Governors\n", __func__); 1667 1665 ··· 1672 1674 pr_err("%s: Failed to suspend driver: %p\n", __func__, 1673 1675 policy); 1674 1676 } 1677 + 1678 + suspend: 1679 + cpufreq_suspended = true; 1675 1680 } 1676 1681 1677 1682 /**
+2 -2
drivers/cpufreq/integrator-cpufreq.c
··· 213 213 return cpufreq_register_driver(&integrator_driver); 214 214 } 215 215 216 - static void __exit integrator_cpufreq_remove(struct platform_device *pdev) 216 + static int __exit integrator_cpufreq_remove(struct platform_device *pdev) 217 217 { 218 - cpufreq_unregister_driver(&integrator_driver); 218 + return cpufreq_unregister_driver(&integrator_driver); 219 219 } 220 220 221 221 static const struct of_device_id integrator_cpufreq_match[] = {
+1 -1
drivers/cpufreq/pcc-cpufreq.c
··· 204 204 u32 input_buffer; 205 205 int cpu; 206 206 207 - spin_lock(&pcc_lock); 208 207 cpu = policy->cpu; 209 208 pcc_cpu_data = per_cpu_ptr(pcc_cpu_info, cpu); 210 209 ··· 215 216 freqs.old = policy->cur; 216 217 freqs.new = target_freq; 217 218 cpufreq_freq_transition_begin(policy, &freqs); 219 + spin_lock(&pcc_lock); 218 220 219 221 input_buffer = 0x1 | (((target_freq * 100) 220 222 / (ioread32(&pcch_hdr->nominal) * 1000)) << 8);
+13 -1
drivers/gpu/drm/i915/i915_gem_gtt.c
··· 1310 1310 POSTING_READ(RING_FAULT_REG(&dev_priv->ring[RCS])); 1311 1311 } 1312 1312 1313 + static void i915_ggtt_flush(struct drm_i915_private *dev_priv) 1314 + { 1315 + if (INTEL_INFO(dev_priv->dev)->gen < 6) { 1316 + intel_gtt_chipset_flush(); 1317 + } else { 1318 + I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN); 1319 + POSTING_READ(GFX_FLSH_CNTL_GEN6); 1320 + } 1321 + } 1322 + 1313 1323 void i915_gem_suspend_gtt_mappings(struct drm_device *dev) 1314 1324 { 1315 1325 struct drm_i915_private *dev_priv = dev->dev_private; ··· 1336 1326 dev_priv->gtt.base.start, 1337 1327 dev_priv->gtt.base.total, 1338 1328 true); 1329 + 1330 + i915_ggtt_flush(dev_priv); 1339 1331 } 1340 1332 1341 1333 void i915_gem_restore_gtt_mappings(struct drm_device *dev) ··· 1390 1378 gen6_write_pdes(container_of(vm, struct i915_hw_ppgtt, base)); 1391 1379 } 1392 1380 1393 - i915_gem_chipset_flush(dev); 1381 + i915_ggtt_flush(dev_priv); 1394 1382 } 1395 1383 1396 1384 int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
+11 -5
drivers/gpu/drm/i915/intel_opregion.c
··· 396 396 return -EINVAL; 397 397 } 398 398 399 + /* 400 + * If the vendor backlight interface is not in use and ACPI backlight interface 401 + * is broken, do not bother processing backlight change requests from firmware. 402 + */ 403 + static bool should_ignore_backlight_request(void) 404 + { 405 + return acpi_video_backlight_support() && 406 + !acpi_video_verify_backlight_support(); 407 + } 408 + 399 409 static u32 asle_set_backlight(struct drm_device *dev, u32 bclp) 400 410 { 401 411 struct drm_i915_private *dev_priv = dev->dev_private; ··· 414 404 415 405 DRM_DEBUG_DRIVER("bclp = 0x%08x\n", bclp); 416 406 417 - /* 418 - * If the acpi_video interface is not supposed to be used, don't 419 - * bother processing backlight level change requests from firmware. 420 - */ 421 - if (!acpi_video_verify_backlight_support()) { 407 + if (should_ignore_backlight_request()) { 422 408 DRM_DEBUG_KMS("opregion backlight request ignored\n"); 423 409 return 0; 424 410 }
+2 -1
drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
··· 1763 1763 const int or = ffs(outp->or) - 1; 1764 1764 const u32 loff = (or * 0x800) + (link * 0x80); 1765 1765 const u16 mask = (outp->sorconf.link << 6) | outp->or; 1766 + struct dcb_output match; 1766 1767 u8 ver, hdr; 1767 1768 1768 - if (dcb_outp_match(bios, DCB_OUTPUT_DP, mask, &ver, &hdr, outp)) 1769 + if (dcb_outp_match(bios, DCB_OUTPUT_DP, mask, &ver, &hdr, &match)) 1769 1770 nv_mask(priv, 0x61c10c + loff, 0x00000001, 0x00000000); 1770 1771 } 1771 1772
+6 -1
drivers/gpu/drm/nouveau/nouveau_chan.c
··· 285 285 struct nouveau_software_chan *swch; 286 286 struct nv_dma_v0 args = {}; 287 287 int ret, i; 288 + bool save; 288 289 289 290 nvif_object_map(chan->object); 290 291 ··· 387 386 } 388 387 389 388 /* initialise synchronisation */ 390 - return nouveau_fence(chan->drm)->context_new(chan); 389 + save = cli->base.super; 390 + cli->base.super = true; /* hack until fencenv50 fixed */ 391 + ret = nouveau_fence(chan->drm)->context_new(chan); 392 + cli->base.super = save; 393 + return ret; 391 394 } 392 395 393 396 int
+11 -12
drivers/gpu/drm/nouveau/nouveau_display.c
··· 550 550 } 551 551 552 552 int 553 - nouveau_display_suspend(struct drm_device *dev) 553 + nouveau_display_suspend(struct drm_device *dev, bool runtime) 554 554 { 555 - struct nouveau_drm *drm = nouveau_drm(dev); 556 555 struct drm_crtc *crtc; 557 556 558 557 nouveau_display_fini(dev); 559 558 560 - NV_INFO(drm, "unpinning framebuffer(s)...\n"); 561 559 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 562 560 struct nouveau_framebuffer *nouveau_fb; 563 561 ··· 577 579 } 578 580 579 581 void 580 - nouveau_display_repin(struct drm_device *dev) 582 + nouveau_display_resume(struct drm_device *dev, bool runtime) 581 583 { 582 584 struct nouveau_drm *drm = nouveau_drm(dev); 583 585 struct drm_crtc *crtc; 584 - int ret; 586 + int ret, head; 585 587 588 + /* re-pin fb/cursors */ 586 589 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 587 590 struct nouveau_framebuffer *nouveau_fb; 588 591 ··· 605 606 if (ret) 606 607 NV_ERROR(drm, "Could not pin/map cursor.\n"); 607 608 } 608 - } 609 - 610 - void 611 - nouveau_display_resume(struct drm_device *dev) 612 - { 613 - struct drm_crtc *crtc; 614 - int head; 615 609 616 610 nouveau_display_init(dev); 617 611 ··· 618 626 /* Make sure that drm and hw vblank irqs get resumed if needed. */ 619 627 for (head = 0; head < dev->mode_config.num_crtc; head++) 620 628 drm_vblank_on(dev, head); 629 + 630 + /* This should ensure we don't hit a locking problem when someone 631 + * wakes us up via a connector. We should never go into suspend 632 + * while the display is on anyways. 633 + */ 634 + if (runtime) 635 + return; 621 636 622 637 drm_helper_resume_force_mode(dev); 623 638
+2 -3
drivers/gpu/drm/nouveau/nouveau_display.h
··· 63 63 void nouveau_display_destroy(struct drm_device *dev); 64 64 int nouveau_display_init(struct drm_device *dev); 65 65 void nouveau_display_fini(struct drm_device *dev); 66 - int nouveau_display_suspend(struct drm_device *dev); 67 - void nouveau_display_repin(struct drm_device *dev); 68 - void nouveau_display_resume(struct drm_device *dev); 66 + int nouveau_display_suspend(struct drm_device *dev, bool runtime); 67 + void nouveau_display_resume(struct drm_device *dev, bool runtime); 69 68 int nouveau_display_vblank_enable(struct drm_device *, int); 70 69 void nouveau_display_vblank_disable(struct drm_device *, int); 71 70 int nouveau_display_scanoutpos(struct drm_device *, int, unsigned int,
+13 -38
drivers/gpu/drm/nouveau/nouveau_drm.c
··· 547 547 struct nouveau_cli *cli; 548 548 int ret; 549 549 550 - if (dev->mode_config.num_crtc && !runtime) { 550 + if (dev->mode_config.num_crtc) { 551 + NV_INFO(drm, "suspending console...\n"); 552 + nouveau_fbcon_set_suspend(dev, 1); 551 553 NV_INFO(drm, "suspending display...\n"); 552 - ret = nouveau_display_suspend(dev); 554 + ret = nouveau_display_suspend(dev, runtime); 553 555 if (ret) 554 556 return ret; 555 557 } ··· 605 603 fail_display: 606 604 if (dev->mode_config.num_crtc) { 607 605 NV_INFO(drm, "resuming display...\n"); 608 - nouveau_display_resume(dev); 606 + nouveau_display_resume(dev, runtime); 609 607 } 610 608 return ret; 611 609 } ··· 620 618 drm_dev->switch_power_state == DRM_SWITCH_POWER_DYNAMIC_OFF) 621 619 return 0; 622 620 623 - if (drm_dev->mode_config.num_crtc) 624 - nouveau_fbcon_set_suspend(drm_dev, 1); 625 - 626 621 ret = nouveau_do_suspend(drm_dev, false); 627 622 if (ret) 628 623 return ret; ··· 632 633 } 633 634 634 635 static int 635 - nouveau_do_resume(struct drm_device *dev) 636 + nouveau_do_resume(struct drm_device *dev, bool runtime) 636 637 { 637 638 struct nouveau_drm *drm = nouveau_drm(dev); 638 639 struct nouveau_cli *cli; ··· 657 658 658 659 if (dev->mode_config.num_crtc) { 659 660 NV_INFO(drm, "resuming display...\n"); 660 - nouveau_display_repin(dev); 661 + nouveau_display_resume(dev, runtime); 662 + NV_INFO(drm, "resuming console...\n"); 663 + nouveau_fbcon_set_suspend(dev, 0); 661 664 } 662 665 663 666 return 0; ··· 682 681 return ret; 683 682 pci_set_master(pdev); 684 683 685 - ret = nouveau_do_resume(drm_dev); 686 - if (ret) 687 - return ret; 688 - 689 - if (drm_dev->mode_config.num_crtc) { 690 - nouveau_display_resume(drm_dev); 691 - nouveau_fbcon_set_suspend(drm_dev, 0); 692 - } 693 - 694 - return 0; 684 + return nouveau_do_resume(drm_dev, false); 695 685 } 696 686 697 687 static int nouveau_pmops_freeze(struct device *dev) 698 688 { 699 689 struct pci_dev *pdev = to_pci_dev(dev); 700 690 struct drm_device *drm_dev = pci_get_drvdata(pdev); 701 - int ret; 702 - 703 - if (drm_dev->mode_config.num_crtc) 704 - nouveau_fbcon_set_suspend(drm_dev, 1); 705 - 706 - ret = nouveau_do_suspend(drm_dev, false); 707 - return ret; 691 + return nouveau_do_suspend(drm_dev, false); 708 692 } 709 693 710 694 static int nouveau_pmops_thaw(struct device *dev) 711 695 { 712 696 struct pci_dev *pdev = to_pci_dev(dev); 713 697 struct drm_device *drm_dev = pci_get_drvdata(pdev); 714 - int ret; 715 - 716 - ret = nouveau_do_resume(drm_dev); 717 - if (ret) 718 - return ret; 719 - 720 - if (drm_dev->mode_config.num_crtc) { 721 - nouveau_display_resume(drm_dev); 722 - nouveau_fbcon_set_suspend(drm_dev, 0); 723 - } 724 - 725 - return 0; 698 + return nouveau_do_resume(drm_dev, false); 726 699 } 727 700 728 701 ··· 952 977 return ret; 953 978 pci_set_master(pdev); 954 979 955 - ret = nouveau_do_resume(drm_dev); 980 + ret = nouveau_do_resume(drm_dev, true); 956 981 drm_kms_helper_poll_enable(drm_dev); 957 982 /* do magic */ 958 983 nvif_mask(device, 0x88488, (1 << 25), (1 << 25));
+17 -6
drivers/gpu/drm/nouveau/nouveau_fbcon.c
··· 486 486 .fb_probe = nouveau_fbcon_create, 487 487 }; 488 488 489 + static void 490 + nouveau_fbcon_set_suspend_work(struct work_struct *work) 491 + { 492 + struct nouveau_fbdev *fbcon = container_of(work, typeof(*fbcon), work); 493 + console_lock(); 494 + nouveau_fbcon_accel_restore(fbcon->dev); 495 + nouveau_fbcon_zfill(fbcon->dev, fbcon); 496 + fb_set_suspend(fbcon->helper.fbdev, FBINFO_STATE_RUNNING); 497 + console_unlock(); 498 + } 489 499 490 500 int 491 501 nouveau_fbcon_init(struct drm_device *dev) ··· 513 503 if (!fbcon) 514 504 return -ENOMEM; 515 505 506 + INIT_WORK(&fbcon->work, nouveau_fbcon_set_suspend_work); 516 507 fbcon->dev = dev; 517 508 drm->fbcon = fbcon; 518 509 ··· 562 551 { 563 552 struct nouveau_drm *drm = nouveau_drm(dev); 564 553 if (drm->fbcon) { 565 - console_lock(); 566 - if (state == 0) { 567 - nouveau_fbcon_accel_restore(dev); 568 - nouveau_fbcon_zfill(dev, drm->fbcon); 554 + if (state == FBINFO_STATE_RUNNING) { 555 + schedule_work(&drm->fbcon->work); 556 + return; 569 557 } 558 + flush_work(&drm->fbcon->work); 559 + console_lock(); 570 560 fb_set_suspend(drm->fbcon->helper.fbdev, state); 571 - if (state == 1) 572 - nouveau_fbcon_accel_save_disable(dev); 561 + nouveau_fbcon_accel_save_disable(dev); 573 562 console_unlock(); 574 563 } 575 564 }
+1
drivers/gpu/drm/nouveau/nouveau_fbcon.h
··· 36 36 struct nouveau_framebuffer nouveau_fb; 37 37 struct list_head fbdev_list; 38 38 struct drm_device *dev; 39 + struct work_struct work; 39 40 unsigned int saved_flags; 40 41 struct nvif_object surf2d; 41 42 struct nvif_object clip;
+17 -1
drivers/md/raid5.c
··· 64 64 #define cpu_to_group(cpu) cpu_to_node(cpu) 65 65 #define ANY_GROUP NUMA_NO_NODE 66 66 67 + static bool devices_handle_discard_safely = false; 68 + module_param(devices_handle_discard_safely, bool, 0644); 69 + MODULE_PARM_DESC(devices_handle_discard_safely, 70 + "Set to Y if all devices in each array reliably return zeroes on reads from discarded regions"); 67 71 static struct workqueue_struct *raid5_wq; 68 72 /* 69 73 * Stripe cache ··· 6212 6208 mddev->queue->limits.discard_granularity = stripe; 6213 6209 /* 6214 6210 * unaligned part of discard request will be ignored, so can't 6215 - * guarantee discard_zerors_data 6211 + * guarantee discard_zeroes_data 6216 6212 */ 6217 6213 mddev->queue->limits.discard_zeroes_data = 0; 6218 6214 ··· 6237 6233 !bdev_get_queue(rdev->bdev)-> 6238 6234 limits.discard_zeroes_data) 6239 6235 discard_supported = false; 6236 + /* Unfortunately, discard_zeroes_data is not currently 6237 + * a guarantee - just a hint. So we only allow DISCARD 6238 + * if the sysadmin has confirmed that only safe devices 6239 + * are in use by setting a module parameter. 6240 + */ 6241 + if (!devices_handle_discard_safely) { 6242 + if (discard_supported) { 6243 + pr_info("md/raid456: discard support disabled due to uncertainty.\n"); 6244 + pr_info("Set raid456.devices_handle_discard_safely=Y to override.\n"); 6245 + } 6246 + discard_supported = false; 6247 + } 6240 6248 } 6241 6249 6242 6250 if (discard_supported &&
+1
drivers/media/usb/em28xx/em28xx-cards.c
··· 3524 3524 .disconnect = em28xx_usb_disconnect, 3525 3525 .suspend = em28xx_usb_suspend, 3526 3526 .resume = em28xx_usb_resume, 3527 + .reset_resume = em28xx_usb_resume, 3527 3528 .id_table = em28xx_id_table, 3528 3529 }; 3529 3530
+1 -1
drivers/net/ethernet/3com/3c59x.c
··· 2213 2213 } 2214 2214 } 2215 2215 #else 2216 - dma_addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, skb->len, PCI_DMA_TODEVICE)); 2216 + dma_addr = pci_map_single(VORTEX_PCI(vp), skb->data, skb->len, PCI_DMA_TODEVICE); 2217 2217 if (dma_mapping_error(&VORTEX_PCI(vp)->dev, dma_addr)) 2218 2218 goto out_dma_err; 2219 2219 vp->tx_ring[entry].addr = cpu_to_le32(dma_addr);
+7 -6
drivers/net/ethernet/broadcom/bcmsysport.c
··· 857 857 return IRQ_HANDLED; 858 858 } 859 859 860 - static int bcm_sysport_insert_tsb(struct sk_buff *skb, struct net_device *dev) 860 + static struct sk_buff *bcm_sysport_insert_tsb(struct sk_buff *skb, 861 + struct net_device *dev) 861 862 { 862 863 struct sk_buff *nskb; 863 864 struct bcm_tsb *tsb; ··· 874 873 if (!nskb) { 875 874 dev->stats.tx_errors++; 876 875 dev->stats.tx_dropped++; 877 - return -ENOMEM; 876 + return NULL; 878 877 } 879 878 skb = nskb; 880 879 } ··· 893 892 ip_proto = ipv6_hdr(skb)->nexthdr; 894 893 break; 895 894 default: 896 - return 0; 895 + return skb; 897 896 } 898 897 899 898 /* Get the checksum offset and the L4 (transport) offset */ ··· 912 911 tsb->l4_ptr_dest_map = csum_info; 913 912 } 914 913 915 - return 0; 914 + return skb; 916 915 } 917 916 918 917 static netdev_tx_t bcm_sysport_xmit(struct sk_buff *skb, ··· 946 945 947 946 /* Insert TSB and checksum infos */ 948 947 if (priv->tsb_en) { 949 - ret = bcm_sysport_insert_tsb(skb, dev); 950 - if (ret) { 948 + skb = bcm_sysport_insert_tsb(skb, dev); 949 + if (!skb) { 951 950 ret = NETDEV_TX_OK; 952 951 goto out; 953 952 }
+1 -1
drivers/net/ethernet/brocade/bna/bna_tx_rx.c
··· 3410 3410 3411 3411 cfg_req->tx_cfg.vlan_mode = BFI_ENET_TX_VLAN_WI; 3412 3412 cfg_req->tx_cfg.vlan_id = htons((u16)tx->txf_vlan_id); 3413 - cfg_req->tx_cfg.admit_tagged_frame = BNA_STATUS_T_DISABLED; 3413 + cfg_req->tx_cfg.admit_tagged_frame = BNA_STATUS_T_ENABLED; 3414 3414 cfg_req->tx_cfg.apply_vlan_filter = BNA_STATUS_T_DISABLED; 3415 3415 3416 3416 bfa_msgq_cmd_set(&tx->msgq_cmd, NULL, NULL,
+1 -1
drivers/net/ethernet/brocade/bna/bnad.c
··· 2864 2864 txqent->hdr.wi.opcode = htons(BNA_TXQ_WI_SEND); 2865 2865 txqent->hdr.wi.lso_mss = 0; 2866 2866 2867 - if (unlikely(skb->len > (bnad->netdev->mtu + ETH_HLEN))) { 2867 + if (unlikely(skb->len > (bnad->netdev->mtu + VLAN_ETH_HLEN))) { 2868 2868 BNAD_UPDATE_CTR(bnad, tx_skb_non_tso_too_long); 2869 2869 return -EINVAL; 2870 2870 }
+5 -37
drivers/net/ethernet/toshiba/spider_net.c
··· 267 267 } 268 268 269 269 /** 270 - * spider_net_get_mac_address - read mac address from spider card 271 - * @card: device structure 272 - * 273 - * reads MAC address from GMACUNIMACU and GMACUNIMACL registers 274 - */ 275 - static int 276 - spider_net_get_mac_address(struct net_device *netdev) 277 - { 278 - struct spider_net_card *card = netdev_priv(netdev); 279 - u32 macl, macu; 280 - 281 - macl = spider_net_read_reg(card, SPIDER_NET_GMACUNIMACL); 282 - macu = spider_net_read_reg(card, SPIDER_NET_GMACUNIMACU); 283 - 284 - netdev->dev_addr[0] = (macu >> 24) & 0xff; 285 - netdev->dev_addr[1] = (macu >> 16) & 0xff; 286 - netdev->dev_addr[2] = (macu >> 8) & 0xff; 287 - netdev->dev_addr[3] = macu & 0xff; 288 - netdev->dev_addr[4] = (macl >> 8) & 0xff; 289 - netdev->dev_addr[5] = macl & 0xff; 290 - 291 - if (!is_valid_ether_addr(&netdev->dev_addr[0])) 292 - return -EINVAL; 293 - 294 - return 0; 295 - } 296 - 297 - /** 298 270 * spider_net_get_descr_status -- returns the status of a descriptor 299 271 * @descr: descriptor to look at 300 272 * ··· 1317 1345 if (!is_valid_ether_addr(addr->sa_data)) 1318 1346 return -EADDRNOTAVAIL; 1319 1347 1348 + memcpy(netdev->dev_addr, addr->sa_data, ETH_ALEN); 1349 + 1320 1350 /* switch off GMACTPE and GMACRPE */ 1321 1351 regvalue = spider_net_read_reg(card, SPIDER_NET_GMACOPEMD); 1322 1352 regvalue &= ~((1 << 5) | (1 << 6)); 1323 1353 spider_net_write_reg(card, SPIDER_NET_GMACOPEMD, regvalue); 1324 1354 1325 1355 /* write mac */ 1326 - macu = (addr->sa_data[0]<<24) + (addr->sa_data[1]<<16) + 1327 - (addr->sa_data[2]<<8) + (addr->sa_data[3]); 1328 - macl = (addr->sa_data[4]<<8) + (addr->sa_data[5]); 1356 + macu = (netdev->dev_addr[0]<<24) + (netdev->dev_addr[1]<<16) + 1357 + (netdev->dev_addr[2]<<8) + (netdev->dev_addr[3]); 1358 + macl = (netdev->dev_addr[4]<<8) + (netdev->dev_addr[5]); 1329 1359 spider_net_write_reg(card, SPIDER_NET_GMACUNIMACU, macu); 1330 1360 spider_net_write_reg(card, SPIDER_NET_GMACUNIMACL, macl); 1331 1361 ··· 1337 1363 spider_net_write_reg(card, SPIDER_NET_GMACOPEMD, regvalue); 1338 1364 1339 1365 spider_net_set_promisc(card); 1340 - 1341 - /* look up, whether we have been successful */ 1342 - if (spider_net_get_mac_address(netdev)) 1343 - return -EADDRNOTAVAIL; 1344 - if (memcmp(netdev->dev_addr,addr->sa_data,netdev->addr_len)) 1345 - return -EADDRNOTAVAIL; 1346 1366 1347 1367 return 0; 1348 1368 }
+8 -7
drivers/net/hyperv/netvsc.c
··· 717 717 unsigned int section_index = NETVSC_INVALID_INDEX; 718 718 u32 msg_size = 0; 719 719 struct sk_buff *skb; 720 + u16 q_idx = packet->q_idx; 720 721 721 722 722 723 net_device = get_outbound_net_device(device); ··· 782 781 783 782 if (ret == 0) { 784 783 atomic_inc(&net_device->num_outstanding_sends); 785 - atomic_inc(&net_device->queue_sends[packet->q_idx]); 784 + atomic_inc(&net_device->queue_sends[q_idx]); 786 785 787 786 if (hv_ringbuf_avail_percent(&out_channel->outbound) < 788 787 RING_AVAIL_PERCENT_LOWATER) { 789 788 netif_tx_stop_queue(netdev_get_tx_queue( 790 - ndev, packet->q_idx)); 789 + ndev, q_idx)); 791 790 792 791 if (atomic_read(&net_device-> 793 - queue_sends[packet->q_idx]) < 1) 792 + queue_sends[q_idx]) < 1) 794 793 netif_tx_wake_queue(netdev_get_tx_queue( 795 - ndev, packet->q_idx)); 794 + ndev, q_idx)); 796 795 } 797 796 } else if (ret == -EAGAIN) { 798 797 netif_tx_stop_queue(netdev_get_tx_queue( 799 - ndev, packet->q_idx)); 800 - if (atomic_read(&net_device->queue_sends[packet->q_idx]) < 1) { 798 + ndev, q_idx)); 799 + if (atomic_read(&net_device->queue_sends[q_idx]) < 1) { 801 800 netif_tx_wake_queue(netdev_get_tx_queue( 802 - ndev, packet->q_idx)); 801 + ndev, q_idx)); 803 802 ret = -ENOSPC; 804 803 } 805 804 } else {
+2 -2
drivers/net/team/team.c
··· 647 647 { 648 648 if (!team->notify_peers.count || !netif_running(team->dev)) 649 649 return; 650 - atomic_set(&team->notify_peers.count_pending, team->notify_peers.count); 650 + atomic_add(team->notify_peers.count, &team->notify_peers.count_pending); 651 651 schedule_delayed_work(&team->notify_peers.dw, 0); 652 652 } 653 653 ··· 687 687 { 688 688 if (!team->mcast_rejoin.count || !netif_running(team->dev)) 689 689 return; 690 - atomic_set(&team->mcast_rejoin.count_pending, team->mcast_rejoin.count); 690 + atomic_add(team->mcast_rejoin.count, &team->mcast_rejoin.count_pending); 691 691 schedule_delayed_work(&team->mcast_rejoin.dw, 0); 692 692 } 693 693
+1 -1
drivers/net/usb/asix_devices.c
··· 890 890 .unbind = ax88772_unbind, 891 891 .status = asix_status, 892 892 .link_reset = ax88772_link_reset, 893 - .reset = ax88772_reset, 893 + .reset = ax88772_link_reset, 894 894 .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_LINK_INTR | FLAG_MULTI_PACKET, 895 895 .rx_fixup = asix_rx_fixup_common, 896 896 .tx_fixup = asix_tx_fixup,
+9 -2
drivers/net/usb/r8152.c
··· 980 980 { 981 981 struct r8152 *tp = netdev_priv(netdev); 982 982 struct sockaddr *addr = p; 983 + int ret = -EADDRNOTAVAIL; 983 984 984 985 if (!is_valid_ether_addr(addr->sa_data)) 985 - return -EADDRNOTAVAIL; 986 + goto out1; 987 + 988 + ret = usb_autopm_get_interface(tp->intf); 989 + if (ret < 0) 990 + goto out1; 986 991 987 992 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); 988 993 ··· 995 990 pla_ocp_write(tp, PLA_IDR, BYTE_EN_SIX_BYTES, 8, addr->sa_data); 996 991 ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_NORAML); 997 992 998 - return 0; 993 + usb_autopm_put_interface(tp->intf); 994 + out1: 995 + return ret; 999 996 } 1000 997 1001 998 static int set_ethernet_addr(struct r8152 *tp)
+2 -1
drivers/parisc/superio.c
··· 395 395 serial_port.iotype = UPIO_PORT; 396 396 serial_port.type = PORT_16550A; 397 397 serial_port.uartclk = 115200*16; 398 - serial_port.fifosize = 16; 398 + serial_port.flags = UPF_FIXED_PORT | UPF_FIXED_TYPE | 399 + UPF_BOOT_AUTOCONF; 399 400 400 401 /* serial port #1 */ 401 402 serial_port.iobase = sio_dev.sp1_base;
+4
fs/ocfs2/dlm/dlmmaster.c
··· 2039 2039 "and killing the other node now! This node is OK and can continue.\n"); 2040 2040 __dlm_print_one_lock_resource(res); 2041 2041 spin_unlock(&res->spinlock); 2042 + spin_lock(&dlm->master_lock); 2043 + if (mle) 2044 + __dlm_put_mle(mle); 2045 + spin_unlock(&dlm->master_lock); 2042 2046 spin_unlock(&dlm->spinlock); 2043 2047 *ret_data = (void *)res; 2044 2048 dlm_put(dlm);
+1 -1
include/net/sctp/command.h
··· 115 115 * analysis of the state functions, but in reality just taken from 116 116 * thin air in the hopes othat we don't trigger a kernel panic. 117 117 */ 118 - #define SCTP_MAX_NUM_COMMANDS 14 118 + #define SCTP_MAX_NUM_COMMANDS 20 119 119 120 120 typedef union { 121 121 void *zero_all; /* Set to NULL to clear the entire union */
+3 -1
kernel/events/core.c
··· 7948 7948 7949 7949 for_each_task_context_nr(ctxn) { 7950 7950 ret = perf_event_init_context(child, ctxn); 7951 - if (ret) 7951 + if (ret) { 7952 + perf_event_free_task(child); 7952 7953 return ret; 7954 + } 7953 7955 } 7954 7956 7955 7957 return 0;
+3 -2
kernel/fork.c
··· 1360 1360 goto bad_fork_cleanup_policy; 1361 1361 retval = audit_alloc(p); 1362 1362 if (retval) 1363 - goto bad_fork_cleanup_policy; 1363 + goto bad_fork_cleanup_perf; 1364 1364 /* copy all the process information */ 1365 1365 shm_init_task(p); 1366 1366 retval = copy_semundo(clone_flags, p); ··· 1566 1566 exit_sem(p); 1567 1567 bad_fork_cleanup_audit: 1568 1568 audit_free(p); 1569 - bad_fork_cleanup_policy: 1569 + bad_fork_cleanup_perf: 1570 1570 perf_event_free_task(p); 1571 + bad_fork_cleanup_policy: 1571 1572 #ifdef CONFIG_NUMA 1572 1573 mpol_put(p->mempolicy); 1573 1574 bad_fork_cleanup_threadgroup_lock:
+5 -2
mm/huge_memory.c
··· 1795 1795 for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) { 1796 1796 pte_t *pte, entry; 1797 1797 BUG_ON(PageCompound(page+i)); 1798 + /* 1799 + * Note that pmd_numa is not transferred deliberately 1800 + * to avoid any possibility that pte_numa leaks to 1801 + * a PROT_NONE VMA by accident. 1802 + */ 1798 1803 entry = mk_pte(page + i, vma->vm_page_prot); 1799 1804 entry = maybe_mkwrite(pte_mkdirty(entry), vma); 1800 1805 if (!pmd_write(*pmd)) 1801 1806 entry = pte_wrprotect(entry); 1802 1807 if (!pmd_young(*pmd)) 1803 1808 entry = pte_mkold(entry); 1804 - if (pmd_numa(*pmd)) 1805 - entry = pte_mknuma(entry); 1806 1809 pte = pte_offset_map(&_pmd, haddr); 1807 1810 BUG_ON(!pte_none(*pte)); 1808 1811 set_pte_at(mm, haddr, pte, entry);
+31 -5
mm/memcontrol.c
··· 292 292 /* vmpressure notifications */ 293 293 struct vmpressure vmpressure; 294 294 295 + /* css_online() has been completed */ 296 + int initialized; 297 + 295 298 /* 296 299 * the counter to account for mem+swap usage. 297 300 */ ··· 1102 1099 * skipping css reference should be safe. 1103 1100 */ 1104 1101 if (next_css) { 1105 - if ((next_css == &root->css) || 1106 - ((next_css->flags & CSS_ONLINE) && 1107 - css_tryget_online(next_css))) 1108 - return mem_cgroup_from_css(next_css); 1102 + struct mem_cgroup *memcg = mem_cgroup_from_css(next_css); 1103 + 1104 + if (next_css == &root->css) 1105 + return memcg; 1106 + 1107 + if (css_tryget_online(next_css)) { 1108 + /* 1109 + * Make sure the memcg is initialized: 1110 + * mem_cgroup_css_online() orders the the 1111 + * initialization against setting the flag. 1112 + */ 1113 + if (smp_load_acquire(&memcg->initialized)) 1114 + return memcg; 1115 + css_put(next_css); 1116 + } 1109 1117 1110 1118 prev_css = next_css; 1111 1119 goto skip_node; ··· 5563 5549 { 5564 5550 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 5565 5551 struct mem_cgroup *parent = mem_cgroup_from_css(css->parent); 5552 + int ret; 5566 5553 5567 5554 if (css->id > MEM_CGROUP_ID_MAX) 5568 5555 return -ENOSPC; ··· 5600 5585 } 5601 5586 mutex_unlock(&memcg_create_mutex); 5602 5587 5603 - return memcg_init_kmem(memcg, &memory_cgrp_subsys); 5588 + ret = memcg_init_kmem(memcg, &memory_cgrp_subsys); 5589 + if (ret) 5590 + return ret; 5591 + 5592 + /* 5593 + * Make sure the memcg is initialized: mem_cgroup_iter() 5594 + * orders reading memcg->initialized against its callers 5595 + * reading the memcg members. 5596 + */ 5597 + smp_store_release(&memcg->initialized, 1); 5598 + 5599 + return 0; 5604 5600 } 5605 5601 5606 5602 /*
+4 -1
mm/migrate.c
··· 146 146 pte = pte_mkold(mk_pte(new, vma->vm_page_prot)); 147 147 if (pte_swp_soft_dirty(*ptep)) 148 148 pte = pte_mksoft_dirty(pte); 149 + 150 + /* Recheck VMA as permissions can change since migration started */ 149 151 if (is_write_migration_entry(entry)) 150 - pte = pte_mkwrite(pte); 152 + pte = maybe_mkwrite(pte, vma); 153 + 151 154 #ifdef CONFIG_HUGETLB_PAGE 152 155 if (PageHuge(new)) { 153 156 pte = pte_mkhuge(pte);
+3 -4
mm/page_alloc.c
··· 1612 1612 } 1613 1613 1614 1614 __mod_zone_page_state(zone, NR_ALLOC_BATCH, -(1 << order)); 1615 - if (zone_page_state(zone, NR_ALLOC_BATCH) == 0 && 1615 + if (atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH]) <= 0 && 1616 1616 !zone_is_fair_depleted(zone)) 1617 1617 zone_set_flag(zone, ZONE_FAIR_DEPLETED); 1618 1618 ··· 5701 5701 zone->watermark[WMARK_HIGH] = min_wmark_pages(zone) + (tmp >> 1); 5702 5702 5703 5703 __mod_zone_page_state(zone, NR_ALLOC_BATCH, 5704 - high_wmark_pages(zone) - 5705 - low_wmark_pages(zone) - 5706 - zone_page_state(zone, NR_ALLOC_BATCH)); 5704 + high_wmark_pages(zone) - low_wmark_pages(zone) - 5705 + atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH])); 5707 5706 5708 5707 setup_zone_migrate_reserve(zone); 5709 5708 spin_unlock_irqrestore(&zone->lock, flags);
+11
net/bridge/br_netfilter.c
··· 316 316 ETH_HLEN-ETH_ALEN); 317 317 /* tell br_dev_xmit to continue with forwarding */ 318 318 nf_bridge->mask |= BRNF_BRIDGED_DNAT; 319 + /* FIXME Need to refragment */ 319 320 ret = neigh->output(neigh, skb); 320 321 } 321 322 neigh_release(neigh); ··· 372 371 struct nf_bridge_info *nf_bridge = skb->nf_bridge; 373 372 struct rtable *rt; 374 373 int err; 374 + int frag_max_size; 375 + 376 + frag_max_size = IPCB(skb)->frag_max_size; 377 + BR_INPUT_SKB_CB(skb)->frag_max_size = frag_max_size; 375 378 376 379 if (nf_bridge->mask & BRNF_PKT_TYPE) { 377 380 skb->pkt_type = PACKET_OTHERHOST; ··· 780 775 static int br_nf_dev_queue_xmit(struct sk_buff *skb) 781 776 { 782 777 int ret; 778 + int frag_max_size; 783 779 780 + /* This is wrong! We should preserve the original fragment 781 + * boundaries by preserving frag_list rather than refragmenting. 782 + */ 784 783 if (skb->protocol == htons(ETH_P_IP) && 785 784 skb->len + nf_bridge_mtu_reduction(skb) > skb->dev->mtu && 786 785 !skb_is_gso(skb)) { 786 + frag_max_size = BR_INPUT_SKB_CB(skb)->frag_max_size; 787 787 if (br_parse_ip_options(skb)) 788 788 /* Drop invalid packet */ 789 789 return NF_DROP; 790 + IPCB(skb)->frag_max_size = frag_max_size; 790 791 ret = ip_fragment(skb, br_dev_queue_push_xmit); 791 792 } else 792 793 ret = br_dev_queue_push_xmit(skb);
+4
net/bridge/br_private.h
··· 306 306 307 307 struct br_input_skb_cb { 308 308 struct net_device *brdev; 309 + 309 310 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING 310 311 int igmp; 311 312 int mrouters_only; 312 313 #endif 314 + 315 + u16 frag_max_size; 316 + 313 317 #ifdef CONFIG_BRIDGE_VLAN_FILTERING 314 318 bool vlan_filtered; 315 319 #endif
+2 -2
net/ipv6/ip6_gre.c
··· 786 786 encap_limit = t->parms.encap_limit; 787 787 788 788 memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6)); 789 - fl6.flowi6_proto = IPPROTO_IPIP; 789 + fl6.flowi6_proto = IPPROTO_GRE; 790 790 791 791 dsfield = ipv4_get_dsfield(iph); 792 792 ··· 836 836 encap_limit = t->parms.encap_limit; 837 837 838 838 memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6)); 839 - fl6.flowi6_proto = IPPROTO_IPV6; 839 + fl6.flowi6_proto = IPPROTO_GRE; 840 840 841 841 dsfield = ipv6_get_dsfield(ipv6h); 842 842 if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)
+7 -4
net/rds/send.c
··· 593 593 sock_put(rds_rs_to_sk(rs)); 594 594 } 595 595 rs = rm->m_rs; 596 - sock_hold(rds_rs_to_sk(rs)); 596 + if (rs) 597 + sock_hold(rds_rs_to_sk(rs)); 597 598 } 599 + if (!rs) 600 + goto unlock_and_drop; 598 601 spin_lock(&rs->rs_lock); 599 602 600 603 if (test_and_clear_bit(RDS_MSG_ON_SOCK, &rm->m_flags)) { ··· 641 638 * queue. This means that in the TCP case, the message may not have been 642 639 * assigned the m_ack_seq yet - but that's fine as long as tcp_is_acked 643 640 * checks the RDS_MSG_HAS_ACK_SEQ bit. 644 - * 645 - * XXX It's not clear to me how this is safely serialized with socket 646 - * destruction. Maybe it should bail if it sees SOCK_DEAD. 647 641 */ 648 642 void rds_send_drop_acked(struct rds_connection *conn, u64 ack, 649 643 is_acked_func is_acked) ··· 711 711 */ 712 712 if (!test_and_clear_bit(RDS_MSG_ON_CONN, &rm->m_flags)) { 713 713 spin_unlock_irqrestore(&conn->c_lock, flags); 714 + spin_lock_irqsave(&rm->m_rs_lock, flags); 715 + rm->m_rs = NULL; 716 + spin_unlock_irqrestore(&rm->m_rs_lock, flags); 714 717 continue; 715 718 } 716 719 list_del_init(&rm->m_conn_item);
+4 -1
net/rds/tcp_connect.c
··· 106 106 rds_tcp_set_callbacks(sock, conn); 107 107 ret = sock->ops->connect(sock, (struct sockaddr *)&dest, sizeof(dest), 108 108 O_NONBLOCK); 109 - sock = NULL; 110 109 111 110 rdsdebug("connect to address %pI4 returned %d\n", &conn->c_faddr, ret); 112 111 if (ret == -EINPROGRESS) 113 112 ret = 0; 113 + if (ret == 0) 114 + sock = NULL; 115 + else 116 + rds_tcp_restore_callbacks(sock, conn->c_transport_data); 114 117 115 118 out: 116 119 if (sock)
+1 -2
net/rds/threads.c
··· 78 78 "current state is %d\n", 79 79 __func__, 80 80 atomic_read(&conn->c_state)); 81 - atomic_set(&conn->c_state, RDS_CONN_ERROR); 82 - queue_work(rds_wq, &conn->c_down_w); 81 + rds_conn_drop(conn); 83 82 return; 84 83 } 85 84
+1
net/sched/cls_api.c
··· 549 549 tcf_tree_lock(tp); 550 550 list_splice_init(&dst->actions, &tmp); 551 551 list_splice(&src->actions, &dst->actions); 552 + dst->type = src->type; 552 553 tcf_tree_unlock(tp); 553 554 tcf_action_destroy(&tmp, TCA_ACT_UNBIND); 554 555 #endif
+3 -2
net/sched/ematch.c
··· 528 528 match_idx = stack[--stackp]; 529 529 cur_match = tcf_em_get_match(tree, match_idx); 530 530 531 + if (tcf_em_is_inverted(cur_match)) 532 + res = !res; 533 + 531 534 if (tcf_em_early_end(cur_match, res)) { 532 - if (tcf_em_is_inverted(cur_match)) 533 - res = !res; 534 535 goto pop_stack; 535 536 } else { 536 537 match_idx++;
+16 -3
net/sctp/sm_statefuns.c
··· 1775 1775 /* Update the content of current association. */ 1776 1776 sctp_add_cmd_sf(commands, SCTP_CMD_UPDATE_ASSOC, SCTP_ASOC(new_asoc)); 1777 1777 sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, SCTP_ULPEVENT(ev)); 1778 - sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, 1779 - SCTP_STATE(SCTP_STATE_ESTABLISHED)); 1780 - sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl)); 1778 + if (sctp_state(asoc, SHUTDOWN_PENDING) && 1779 + (sctp_sstate(asoc->base.sk, CLOSING) || 1780 + sock_flag(asoc->base.sk, SOCK_DEAD))) { 1781 + /* if were currently in SHUTDOWN_PENDING, but the socket 1782 + * has been closed by user, don't transition to ESTABLISHED. 1783 + * Instead trigger SHUTDOWN bundled with COOKIE_ACK. 1784 + */ 1785 + sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl)); 1786 + return sctp_sf_do_9_2_start_shutdown(net, ep, asoc, 1787 + SCTP_ST_CHUNK(0), NULL, 1788 + commands); 1789 + } else { 1790 + sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, 1791 + SCTP_STATE(SCTP_STATE_ESTABLISHED)); 1792 + sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl)); 1793 + } 1781 1794 return SCTP_DISPOSITION_CONSUME; 1782 1795 1783 1796 nomem_ev:
+3 -4
sound/soc/codecs/rt286.c
··· 51 51 { 0x04, 0xaf01 }, 52 52 { 0x08, 0x000d }, 53 53 { 0x09, 0xd810 }, 54 - { 0x0a, 0x0060 }, 54 + { 0x0a, 0x0120 }, 55 55 { 0x0b, 0x0000 }, 56 56 { 0x0d, 0x2800 }, 57 57 { 0x0f, 0x0000 }, ··· 60 60 { 0x33, 0x0208 }, 61 61 { 0x49, 0x0004 }, 62 62 { 0x4f, 0x50e9 }, 63 - { 0x50, 0x2c00 }, 63 + { 0x50, 0x2000 }, 64 64 { 0x63, 0x2902 }, 65 65 { 0x67, 0x1111 }, 66 66 { 0x68, 0x1016 }, ··· 104 104 { 0x02170700, 0x00000000 }, 105 105 { 0x02270100, 0x00000000 }, 106 106 { 0x02370100, 0x00000000 }, 107 - { 0x02040000, 0x00004002 }, 108 107 { 0x01870700, 0x00000020 }, 109 108 { 0x00830000, 0x000000c3 }, 110 109 { 0x00930000, 0x000000c3 }, ··· 191 192 /*handle index registers*/ 192 193 if (reg <= 0xff) { 193 194 rt286_hw_write(client, RT286_COEF_INDEX, reg); 194 - reg = RT286_PROC_COEF; 195 195 for (i = 0; i < INDEX_CACHE_SIZE; i++) { 196 196 if (reg == rt286->index_cache[i].reg) { 197 197 rt286->index_cache[i].def = value; ··· 198 200 } 199 201 200 202 } 203 + reg = RT286_PROC_COEF; 201 204 } 202 205 203 206 data[0] = (reg >> 24) & 0xff;
+1 -1
sound/soc/codecs/ssm2602.c
··· 647 647 return -ENOMEM; 648 648 649 649 dev_set_drvdata(dev, ssm2602); 650 - ssm2602->type = SSM2602; 650 + ssm2602->type = type; 651 651 ssm2602->regmap = regmap; 652 652 653 653 return snd_soc_register_codec(dev, &soc_codec_dev_ssm2602,
+7 -5
sound/soc/fsl/fsl_ssi.c
··· 748 748 return 0; 749 749 } 750 750 751 - static int _fsl_ssi_set_dai_fmt(struct fsl_ssi_private *ssi_private, 752 - unsigned int fmt) 751 + static int _fsl_ssi_set_dai_fmt(struct device *dev, 752 + struct fsl_ssi_private *ssi_private, 753 + unsigned int fmt) 753 754 { 754 755 struct regmap *regs = ssi_private->regs; 755 756 u32 strcr = 0, stcr, srcr, scr, mask; ··· 759 758 ssi_private->dai_fmt = fmt; 760 759 761 760 if (fsl_ssi_is_i2s_master(ssi_private) && IS_ERR(ssi_private->baudclk)) { 762 - dev_err(&ssi_private->pdev->dev, "baudclk is missing which is necessary for master mode\n"); 761 + dev_err(dev, "baudclk is missing which is necessary for master mode\n"); 763 762 return -EINVAL; 764 763 } 765 764 ··· 914 913 { 915 914 struct fsl_ssi_private *ssi_private = snd_soc_dai_get_drvdata(cpu_dai); 916 915 917 - return _fsl_ssi_set_dai_fmt(ssi_private, fmt); 916 + return _fsl_ssi_set_dai_fmt(cpu_dai->dev, ssi_private, fmt); 918 917 } 919 918 920 919 /** ··· 1388 1387 1389 1388 done: 1390 1389 if (ssi_private->dai_fmt) 1391 - _fsl_ssi_set_dai_fmt(ssi_private, ssi_private->dai_fmt); 1390 + _fsl_ssi_set_dai_fmt(&pdev->dev, ssi_private, 1391 + ssi_private->dai_fmt); 1392 1392 1393 1393 return 0; 1394 1394
+2 -4
sound/soc/soc-compress.c
··· 102 102 fe->dpcm[stream].runtime = fe_substream->runtime; 103 103 104 104 ret = dpcm_path_get(fe, stream, &list); 105 - if (ret < 0) { 106 - mutex_unlock(&fe->card->mutex); 105 + if (ret < 0) 107 106 goto fe_err; 108 - } else if (ret == 0) { 107 + else if (ret == 0) 109 108 dev_dbg(fe->dev, "ASoC: %s no valid %s route\n", 110 109 fe->dai_link->name, stream ? "capture" : "playback"); 111 - } 112 110 113 111 /* calculate valid and active FE <-> BE dpcms */ 114 112 dpcm_process_paths(fe, stream, &list, 1);
+1 -1
sound/soc/soc-core.c
··· 3203 3203 unsigned int val, mask; 3204 3204 void *data; 3205 3205 3206 - if (!component->regmap) 3206 + if (!component->regmap || !params->num_regs) 3207 3207 return -EINVAL; 3208 3208 3209 3209 len = params->num_regs * component->val_bytes;