Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'topic/firewire' into for-next

This is a merge of big firewire audio stack updates by Takashi Sakamoto.

+10915 -1901
+1 -1
Documentation/ABI/testing/sysfs-bus-pci
··· 117 117 118 118 What: /sys/bus/pci/devices/.../vpd 119 119 Date: February 2008 120 - Contact: Ben Hutchings <bhutchings@solarflare.com> 120 + Contact: Ben Hutchings <bwh@kernel.org> 121 121 Description: 122 122 A file named vpd in a device directory will be a 123 123 binary file containing the Vital Product Data for the
+1 -1
Documentation/devicetree/bindings/clock/at91-clock.txt
··· 62 62 - interrupt-controller : tell that the PMC is an interrupt controller. 63 63 - #interrupt-cells : must be set to 1. The first cell encodes the interrupt id, 64 64 and reflect the bit position in the PMC_ER/DR/SR registers. 65 - You can use the dt macros defined in dt-bindings/clk/at91.h. 65 + You can use the dt macros defined in dt-bindings/clock/at91.h. 66 66 0 (AT91_PMC_MOSCS) -> main oscillator ready 67 67 1 (AT91_PMC_LOCKA) -> PLL A ready 68 68 2 (AT91_PMC_LOCKB) -> PLL B ready
+1 -1
Documentation/devicetree/bindings/clock/renesas,cpg-mstp-clocks.txt
··· 43 43 clock-output-names = 44 44 "tpu0", "mmcif1", "sdhi3", "sdhi2", 45 45 "sdhi1", "sdhi0", "mmcif0"; 46 - renesas,clock-indices = < 46 + clock-indices = < 47 47 R8A7790_CLK_TPU0 R8A7790_CLK_MMCIF1 R8A7790_CLK_SDHI3 48 48 R8A7790_CLK_SDHI2 R8A7790_CLK_SDHI1 R8A7790_CLK_SDHI0 49 49 R8A7790_CLK_MMCIF0
+2 -2
Documentation/devicetree/bindings/dma/ti-edma.txt
··· 29 29 dma-channels = <64>; 30 30 ti,edma-regions = <4>; 31 31 ti,edma-slots = <256>; 32 - ti,edma-xbar-event-map = <1 12 33 - 2 13>; 32 + ti,edma-xbar-event-map = /bits/ 16 <1 12 33 + 2 13>; 34 34 };
+4 -4
Documentation/kernel-parameters.txt
··· 2218 2218 noreplace-smp [X86-32,SMP] Don't replace SMP instructions 2219 2219 with UP alternatives 2220 2220 2221 - nordrand [X86] Disable the direct use of the RDRAND 2222 - instruction even if it is supported by the 2223 - processor. RDRAND is still available to user 2224 - space applications. 2221 + nordrand [X86] Disable kernel use of the RDRAND and 2222 + RDSEED instructions even if they are supported 2223 + by the processor. RDRAND and RDSEED are still 2224 + available to user space applications. 2225 2225 2226 2226 noresume [SWSUSP] Disables resume and restores original swap 2227 2227 space.
+18 -6
MAINTAINERS
··· 1893 1893 S: Supported 1894 1894 F: drivers/net/ethernet/broadcom/bnx2x/ 1895 1895 1896 - BROADCOM BCM281XX/BCM11XXX ARM ARCHITECTURE 1896 + BROADCOM BCM281XX/BCM11XXX/BCM216XX ARM ARCHITECTURE 1897 1897 M: Christian Daudt <bcm@fixthebug.org> 1898 1898 M: Matt Porter <mporter@linaro.org> 1899 1899 L: bcm-kernel-feedback-list@broadcom.com 1900 - T: git git://git.github.com/broadcom/bcm11351 1900 + T: git git://github.com/broadcom/mach-bcm 1901 1901 S: Maintained 1902 1902 F: arch/arm/mach-bcm/ 1903 1903 F: arch/arm/boot/dts/bcm113* 1904 + F: arch/arm/boot/dts/bcm216* 1904 1905 F: arch/arm/boot/dts/bcm281* 1905 1906 F: arch/arm/configs/bcm_defconfig 1906 1907 F: drivers/mmc/host/sdhci_bcm_kona.c ··· 4813 4812 S: Maintained 4814 4813 T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git irq/core 4815 4814 F: kernel/irq/ 4815 + 4816 + IRQCHIP DRIVERS 4817 + M: Thomas Gleixner <tglx@linutronix.de> 4818 + M: Jason Cooper <jason@lakedaemon.net> 4819 + L: linux-kernel@vger.kernel.org 4820 + S: Maintained 4821 + T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git irq/core 4822 + T: git git://git.infradead.org/users/jcooper/linux.git irqchip/core 4816 4823 F: drivers/irqchip/ 4817 4824 4818 4825 IRQ DOMAINS (IRQ NUMBER MAPPING LIBRARY) ··· 5493 5484 F: drivers/hwmon/ltc4261.c 5494 5485 5495 5486 LTP (Linux Test Project) 5496 - M: Shubham Goyal <shubham@linux.vnet.ibm.com> 5497 5487 M: Mike Frysinger <vapier@gentoo.org> 5498 5488 M: Cyril Hrubis <chrubis@suse.cz> 5499 - M: Caspar Zhang <caspar@casparzhang.com> 5500 5489 M: Wanlong Gao <gaowanlong@cn.fujitsu.com> 5490 + M: Jan Stancek <jstancek@redhat.com> 5491 + M: Stanislav Kholmanskikh <stanislav.kholmanskikh@oracle.com> 5492 + M: Alexey Kodanev <alexey.kodanev@oracle.com> 5501 5493 L: ltp-list@lists.sourceforge.net (subscribers-only) 5502 - W: http://ltp.sourceforge.net/ 5494 + W: http://linux-test-project.github.io/ 5503 5495 T: git git://github.com/linux-test-project/ltp.git 5504 - T: git git://ltp.git.sourceforge.net/gitroot/ltp/ltp-dev 5505 5496 S: Maintained 5506 5497 5507 5498 M32R ARCHITECTURE ··· 9111 9102 9112 9103 TURBOCHANNEL SUBSYSTEM 9113 9104 M: "Maciej W. Rozycki" <macro@linux-mips.org> 9105 + M: Ralf Baechle <ralf@linux-mips.org> 9106 + L: linux-mips@linux-mips.org 9107 + Q: http://patchwork.linux-mips.org/project/linux-mips/list/ 9114 9108 S: Maintained 9115 9109 F: drivers/tc/ 9116 9110 F: include/linux/tc.h
+1 -1
Makefile
··· 1 1 VERSION = 3 2 2 PATCHLEVEL = 15 3 3 SUBLEVEL = 0 4 - EXTRAVERSION = -rc5 4 + EXTRAVERSION = -rc6 5 5 NAME = Shuffling Zombie Juror 6 6 7 7 # *DOCUMENTATION*
+1 -1
arch/arm/boot/dts/am33xx.dtsi
··· 144 144 compatible = "ti,edma3"; 145 145 ti,hwmods = "tpcc", "tptc0", "tptc1", "tptc2"; 146 146 reg = <0x49000000 0x10000>, 147 - <0x44e10f90 0x10>; 147 + <0x44e10f90 0x40>; 148 148 interrupts = <12 13 14>; 149 149 #dma-cells = <1>; 150 150 dma-channels = <64>;
+16
arch/arm/boot/dts/am3517.dtsi
··· 62 62 }; 63 63 }; 64 64 65 + &iva { 66 + status = "disabled"; 67 + }; 68 + 69 + &mailbox { 70 + status = "disabled"; 71 + }; 72 + 73 + &mmu_isp { 74 + status = "disabled"; 75 + }; 76 + 77 + &smartreflex_mpu_iva { 78 + status = "disabled"; 79 + }; 80 + 65 81 /include/ "am35xx-clocks.dtsi" 66 82 /include/ "omap36xx-am35xx-omap3430es2plus-clocks.dtsi"
+5
arch/arm/boot/dts/am437x-gp-evm.dts
··· 117 117 status = "okay"; 118 118 }; 119 119 120 + &gpio5 { 121 + status = "okay"; 122 + ti,no-reset-on-init; 123 + }; 124 + 120 125 &mmc1 { 121 126 status = "okay"; 122 127 vmmc-supply = <&vmmcsd_fixed>;
+1
arch/arm/boot/dts/armada-370-db.dts
··· 67 67 i2c@11000 { 68 68 pinctrl-0 = <&i2c0_pins>; 69 69 pinctrl-names = "default"; 70 + clock-frequency = <100000>; 70 71 status = "okay"; 71 72 audio_codec: audio-codec@4a { 72 73 compatible = "cirrus,cs42l51";
+5
arch/arm/boot/dts/armada-375-db.dts
··· 79 79 }; 80 80 }; 81 81 82 + sata@a0000 { 83 + status = "okay"; 84 + nr-ports = <2>; 85 + }; 86 + 82 87 nand: nand@d0000 { 83 88 pinctrl-0 = <&nand_pins>; 84 89 pinctrl-names = "default";
+1 -1
arch/arm/boot/dts/armada-xp-db.dts
··· 49 49 /* Device Bus parameters are required */ 50 50 51 51 /* Read parameters */ 52 - devbus,bus-width = <8>; 52 + devbus,bus-width = <16>; 53 53 devbus,turn-off-ps = <60000>; 54 54 devbus,badr-skew-ps = <0>; 55 55 devbus,acc-first-ps = <124000>;
+5 -5
arch/arm/boot/dts/armada-xp-gp.dts
··· 59 59 /* Device Bus parameters are required */ 60 60 61 61 /* Read parameters */ 62 - devbus,bus-width = <8>; 62 + devbus,bus-width = <16>; 63 63 devbus,turn-off-ps = <60000>; 64 64 devbus,badr-skew-ps = <0>; 65 65 devbus,acc-first-ps = <124000>; ··· 146 146 ethernet@70000 { 147 147 status = "okay"; 148 148 phy = <&phy0>; 149 - phy-mode = "rgmii-id"; 149 + phy-mode = "qsgmii"; 150 150 }; 151 151 ethernet@74000 { 152 152 status = "okay"; 153 153 phy = <&phy1>; 154 - phy-mode = "rgmii-id"; 154 + phy-mode = "qsgmii"; 155 155 }; 156 156 ethernet@30000 { 157 157 status = "okay"; 158 158 phy = <&phy2>; 159 - phy-mode = "rgmii-id"; 159 + phy-mode = "qsgmii"; 160 160 }; 161 161 ethernet@34000 { 162 162 status = "okay"; 163 163 phy = <&phy3>; 164 - phy-mode = "rgmii-id"; 164 + phy-mode = "qsgmii"; 165 165 }; 166 166 167 167 /* Front-side USB slot */
+1 -1
arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts
··· 39 39 /* Device Bus parameters are required */ 40 40 41 41 /* Read parameters */ 42 - devbus,bus-width = <8>; 42 + devbus,bus-width = <16>; 43 43 devbus,turn-off-ps = <60000>; 44 44 devbus,badr-skew-ps = <0>; 45 45 devbus,acc-first-ps = <124000>;
+2 -2
arch/arm/boot/dts/at91-sama5d3_xplained.dts
··· 34 34 }; 35 35 36 36 spi0: spi@f0004000 { 37 - cs-gpios = <&pioD 13 0>; 37 + cs-gpios = <&pioD 13 0>, <0>, <0>, <&pioD 16 0>; 38 38 status = "okay"; 39 39 }; 40 40 ··· 79 79 }; 80 80 81 81 spi1: spi@f8008000 { 82 - cs-gpios = <&pioC 25 0>, <0>, <0>, <&pioD 16 0>; 82 + cs-gpios = <&pioC 25 0>; 83 83 status = "okay"; 84 84 }; 85 85
+1 -1
arch/arm/boot/dts/at91sam9261.dtsi
··· 10 10 #include <dt-bindings/pinctrl/at91.h> 11 11 #include <dt-bindings/interrupt-controller/irq.h> 12 12 #include <dt-bindings/gpio/gpio.h> 13 - #include <dt-bindings/clk/at91.h> 13 + #include <dt-bindings/clock/at91.h> 14 14 15 15 / { 16 16 model = "Atmel AT91SAM9261 family SoC";
+1 -1
arch/arm/boot/dts/at91sam9rl.dtsi
··· 8 8 9 9 #include "skeleton.dtsi" 10 10 #include <dt-bindings/pinctrl/at91.h> 11 - #include <dt-bindings/clk/at91.h> 11 + #include <dt-bindings/clock/at91.h> 12 12 #include <dt-bindings/interrupt-controller/irq.h> 13 13 #include <dt-bindings/gpio/gpio.h> 14 14
+1 -1
arch/arm/boot/dts/imx53-mba53.dts
··· 244 244 &tve { 245 245 pinctrl-names = "default"; 246 246 pinctrl-0 = <&pinctrl_vga_sync_1>; 247 - i2c-ddc-bus = <&i2c3>; 247 + ddc-i2c-bus = <&i2c3>; 248 248 fsl,tve-mode = "vga"; 249 249 fsl,hsync-pin = <4>; 250 250 fsl,vsync-pin = <6>;
+1 -1
arch/arm/boot/dts/imx53.dtsi
··· 115 115 #address-cells = <1>; 116 116 #size-cells = <0>; 117 117 compatible = "fsl,imx53-ipu"; 118 - reg = <0x18000000 0x080000000>; 118 + reg = <0x18000000 0x08000000>; 119 119 interrupts = <11 10>; 120 120 clocks = <&clks IMX5_CLK_IPU_GATE>, 121 121 <&clks IMX5_CLK_IPU_DI0_GATE>,
+10 -8
arch/arm/boot/dts/kirkwood-mv88f6281gtw-ge.dts
··· 30 30 bootargs = "console=ttyS0,115200n8 earlyprintk"; 31 31 }; 32 32 33 + mbus { 34 + pcie-controller { 35 + status = "okay"; 36 + 37 + pcie@1,0 { 38 + status = "okay"; 39 + }; 40 + }; 41 + }; 42 + 33 43 ocp@f1000000 { 34 44 pinctrl@10000 { 35 45 pmx_usb_led: pmx-usb-led { ··· 82 72 83 73 ehci@50000 { 84 74 status = "okay"; 85 - }; 86 - 87 - pcie-controller { 88 - status = "okay"; 89 - 90 - pcie@1,0 { 91 - status = "okay"; 92 - }; 93 75 }; 94 76 }; 95 77
+10 -8
arch/arm/boot/dts/kirkwood-nsa310-common.dtsi
··· 4 4 / { 5 5 model = "ZyXEL NSA310"; 6 6 7 + mbus { 8 + pcie-controller { 9 + status = "okay"; 10 + 11 + pcie@1,0 { 12 + status = "okay"; 13 + }; 14 + }; 15 + }; 16 + 7 17 ocp@f1000000 { 8 18 pinctrl: pinctrl@10000 { 9 19 ··· 35 25 sata@80000 { 36 26 status = "okay"; 37 27 nr-ports = <2>; 38 - }; 39 - 40 - pcie-controller { 41 - status = "okay"; 42 - 43 - pcie@1,0 { 44 - status = "okay"; 45 - }; 46 28 }; 47 29 }; 48 30
-5
arch/arm/boot/dts/kirkwood-t5325.dts
··· 127 127 128 128 i2c@11000 { 129 129 status = "okay"; 130 - 131 - alc5621: alc5621@1a { 132 - compatible = "realtek,alc5621"; 133 - reg = <0x1a>; 134 - }; 135 130 }; 136 131 137 132 serial@12000 {
+8 -11
arch/arm/boot/dts/omap-gpmc-smsc911x.dtsi
··· 24 24 compatible = "smsc,lan9221", "smsc,lan9115"; 25 25 bank-width = <2>; 26 26 gpmc,mux-add-data; 27 - gpmc,cs-on-ns = <0>; 28 - gpmc,cs-rd-off-ns = <186>; 29 - gpmc,cs-wr-off-ns = <186>; 30 - gpmc,adv-on-ns = <12>; 31 - gpmc,adv-rd-off-ns = <48>; 27 + gpmc,cs-on-ns = <1>; 28 + gpmc,cs-rd-off-ns = <180>; 29 + gpmc,cs-wr-off-ns = <180>; 30 + gpmc,adv-rd-off-ns = <18>; 32 31 gpmc,adv-wr-off-ns = <48>; 33 32 gpmc,oe-on-ns = <54>; 34 33 gpmc,oe-off-ns = <168>; ··· 35 36 gpmc,we-off-ns = <168>; 36 37 gpmc,rd-cycle-ns = <186>; 37 38 gpmc,wr-cycle-ns = <186>; 38 - gpmc,access-ns = <114>; 39 - gpmc,page-burst-access-ns = <6>; 40 - gpmc,bus-turnaround-ns = <12>; 41 - gpmc,cycle2cycle-delay-ns = <18>; 42 - gpmc,wr-data-mux-bus-ns = <90>; 43 - gpmc,wr-access-ns = <186>; 39 + gpmc,access-ns = <144>; 40 + gpmc,page-burst-access-ns = <24>; 41 + gpmc,bus-turnaround-ns = <90>; 42 + gpmc,cycle2cycle-delay-ns = <90>; 44 43 gpmc,cycle2cycle-samecsen; 45 44 gpmc,cycle2cycle-diffcsen; 46 45 vddvario-supply = <&vddvario>;
-7
arch/arm/boot/dts/omap2.dtsi
··· 71 71 interrupts = <58>; 72 72 }; 73 73 74 - mailbox: mailbox@48094000 { 75 - compatible = "ti,omap2-mailbox"; 76 - ti,hwmods = "mailbox"; 77 - reg = <0x48094000 0x200>; 78 - interrupts = <26>; 79 - }; 80 - 81 74 intc: interrupt-controller@1 { 82 75 compatible = "ti,omap2-intc"; 83 76 interrupt-controller;
+8
arch/arm/boot/dts/omap2420.dtsi
··· 125 125 dma-names = "tx", "rx"; 126 126 }; 127 127 128 + mailbox: mailbox@48094000 { 129 + compatible = "ti,omap2-mailbox"; 130 + reg = <0x48094000 0x200>; 131 + interrupts = <26>, <34>; 132 + interrupt-names = "dsp", "iva"; 133 + ti,hwmods = "mailbox"; 134 + }; 135 + 128 136 timer1: timer@48028000 { 129 137 compatible = "ti,omap2420-timer"; 130 138 reg = <0x48028000 0x400>;
+7
arch/arm/boot/dts/omap2430.dtsi
··· 216 216 dma-names = "tx", "rx"; 217 217 }; 218 218 219 + mailbox: mailbox@48094000 { 220 + compatible = "ti,omap2-mailbox"; 221 + reg = <0x48094000 0x200>; 222 + interrupts = <26>; 223 + ti,hwmods = "mailbox"; 224 + }; 225 + 219 226 timer1: timer@49018000 { 220 227 compatible = "ti,omap2420-timer"; 221 228 reg = <0x49018000 0x400>;
+15 -51
arch/arm/boot/dts/omap3-cm-t3x30.dtsi
··· 10 10 cpu0-supply = <&vcc>; 11 11 }; 12 12 }; 13 - 14 - vddvario: regulator-vddvario { 15 - compatible = "regulator-fixed"; 16 - regulator-name = "vddvario"; 17 - regulator-always-on; 18 - }; 19 - 20 - vdd33a: regulator-vdd33a { 21 - compatible = "regulator-fixed"; 22 - regulator-name = "vdd33a"; 23 - regulator-always-on; 24 - }; 25 13 }; 26 14 27 15 &omap3_pmx_core { ··· 23 35 24 36 hsusb0_pins: pinmux_hsusb0_pins { 25 37 pinctrl-single,pins = < 26 - OMAP3_CORE1_IOPAD(0x21a0, PIN_OUTPUT | MUX_MODE0) /* hsusb0_clk.hsusb0_clk */ 27 - OMAP3_CORE1_IOPAD(0x21a2, PIN_OUTPUT | MUX_MODE0) /* hsusb0_stp.hsusb0_stp */ 28 - OMAP3_CORE1_IOPAD(0x21a4, PIN_INPUT_PULLDOWN | MUX_MODE0) /* hsusb0_dir.hsusb0_dir */ 29 - OMAP3_CORE1_IOPAD(0x21a6, PIN_INPUT_PULLDOWN | MUX_MODE0) /* hsusb0_nxt.hsusb0_nxt */ 30 - OMAP3_CORE1_IOPAD(0x21a8, PIN_INPUT_PULLDOWN | MUX_MODE0) /* hsusb0_data0.hsusb2_data0 */ 31 - OMAP3_CORE1_IOPAD(0x21aa, PIN_INPUT_PULLDOWN | MUX_MODE0) /* hsusb0_data1.hsusb0_data1 */ 32 - OMAP3_CORE1_IOPAD(0x21ac, PIN_INPUT_PULLDOWN | MUX_MODE0) /* hsusb0_data2.hsusb0_data2 */ 33 - OMAP3_CORE1_IOPAD(0x21ae, PIN_INPUT_PULLDOWN | MUX_MODE0) /* hsusb0_data7.hsusb0_data3 */ 34 - OMAP3_CORE1_IOPAD(0x21b0, PIN_INPUT_PULLDOWN | MUX_MODE0) /* hsusb0_data7.hsusb0_data4 */ 35 - OMAP3_CORE1_IOPAD(0x21b2, PIN_INPUT_PULLDOWN | MUX_MODE0) /* hsusb0_data7.hsusb0_data5 */ 36 - OMAP3_CORE1_IOPAD(0x21b4, PIN_INPUT_PULLDOWN | MUX_MODE0) /* hsusb0_data7.hsusb0_data6 */ 37 - OMAP3_CORE1_IOPAD(0x21b6, PIN_INPUT_PULLDOWN | MUX_MODE0) /* hsusb0_data7.hsusb0_data7 */ 38 + OMAP3_CORE1_IOPAD(0x21a2, PIN_OUTPUT | MUX_MODE0) /* hsusb0_clk.hsusb0_clk */ 39 + OMAP3_CORE1_IOPAD(0x21a4, PIN_OUTPUT | MUX_MODE0) /* hsusb0_stp.hsusb0_stp */ 40 + OMAP3_CORE1_IOPAD(0x21a6, PIN_INPUT_PULLDOWN | MUX_MODE0) /* hsusb0_dir.hsusb0_dir */ 41 + OMAP3_CORE1_IOPAD(0x21a8, PIN_INPUT_PULLDOWN | MUX_MODE0) /* hsusb0_nxt.hsusb0_nxt */ 42 + OMAP3_CORE1_IOPAD(0x21aa, PIN_INPUT_PULLDOWN | MUX_MODE0) /* hsusb0_data0.hsusb2_data0 */ 43 + OMAP3_CORE1_IOPAD(0x21ac, PIN_INPUT_PULLDOWN | MUX_MODE0) /* hsusb0_data1.hsusb0_data1 */ 44 + OMAP3_CORE1_IOPAD(0x21ae, PIN_INPUT_PULLDOWN | MUX_MODE0) /* hsusb0_data2.hsusb0_data2 */ 45 + OMAP3_CORE1_IOPAD(0x21b0, PIN_INPUT_PULLDOWN | MUX_MODE0) /* hsusb0_data7.hsusb0_data3 */ 46 + OMAP3_CORE1_IOPAD(0x21b2, PIN_INPUT_PULLDOWN | MUX_MODE0) /* hsusb0_data7.hsusb0_data4 */ 47 + OMAP3_CORE1_IOPAD(0x21b4, PIN_INPUT_PULLDOWN | MUX_MODE0) /* hsusb0_data7.hsusb0_data5 */ 48 + OMAP3_CORE1_IOPAD(0x21b6, PIN_INPUT_PULLDOWN | MUX_MODE0) /* hsusb0_data7.hsusb0_data6 */ 49 + OMAP3_CORE1_IOPAD(0x21b8, PIN_INPUT_PULLDOWN | MUX_MODE0) /* hsusb0_data7.hsusb0_data7 */ 38 50 >; 39 51 }; 40 52 }; 41 53 54 + #include "omap-gpmc-smsc911x.dtsi" 55 + 42 56 &gpmc { 43 57 ranges = <5 0 0x2c000000 0x01000000>; 44 58 45 - smsc1: ethernet@5,0 { 59 + smsc1: ethernet@gpmc { 46 60 compatible = "smsc,lan9221", "smsc,lan9115"; 47 61 pinctrl-names = "default"; 48 62 pinctrl-0 = <&smsc1_pins>; 49 63 interrupt-parent = <&gpio6>; 50 64 interrupts = <3 IRQ_TYPE_LEVEL_LOW>; 51 65 reg = <5 0 0xff>; 52 - bank-width = <2>; 53 - gpmc,mux-add-data; 54 - gpmc,cs-on-ns = <0>; 55 - gpmc,cs-rd-off-ns = <186>; 56 - gpmc,cs-wr-off-ns = <186>; 57 - gpmc,adv-on-ns = <12>; 58 - gpmc,adv-rd-off-ns = <48>; 59 - gpmc,adv-wr-off-ns = <48>; 60 - gpmc,oe-on-ns = <54>; 61 - gpmc,oe-off-ns = <168>; 62 - gpmc,we-on-ns = <54>; 63 - gpmc,we-off-ns = <168>; 64 - gpmc,rd-cycle-ns = <186>; 65 - gpmc,wr-cycle-ns = <186>; 66 - gpmc,access-ns = <114>; 67 - gpmc,page-burst-access-ns = <6>; 68 - gpmc,bus-turnaround-ns = <12>; 69 - gpmc,cycle2cycle-delay-ns = <18>; 70 - gpmc,wr-data-mux-bus-ns = <90>; 71 - gpmc,wr-access-ns = <186>; 72 - gpmc,cycle2cycle-samecsen; 73 - gpmc,cycle2cycle-diffcsen; 74 - vddvario-supply = <&vddvario>; 75 - vdd33a-supply = <&vdd33a>; 76 - reg-io-width = <4>; 77 - smsc,save-mac-address; 78 66 }; 79 67 }; 80 68
+1 -1
arch/arm/boot/dts/omap3-igep.dtsi
··· 107 107 >; 108 108 }; 109 109 110 - smsc911x_pins: pinmux_smsc911x_pins { 110 + smsc9221_pins: pinmux_smsc9221_pins { 111 111 pinctrl-single,pins = < 112 112 0x1a2 (PIN_INPUT | MUX_MODE4) /* mcspi1_cs2.gpio_176 */ 113 113 >;
+2 -2
arch/arm/boot/dts/omap3-igep0020.dts
··· 10 10 */ 11 11 12 12 #include "omap3-igep.dtsi" 13 - #include "omap-gpmc-smsc911x.dtsi" 13 + #include "omap-gpmc-smsc9221.dtsi" 14 14 15 15 / { 16 16 model = "IGEPv2 (TI OMAP AM/DM37x)"; ··· 248 248 249 249 ethernet@gpmc { 250 250 pinctrl-names = "default"; 251 - pinctrl-0 = <&smsc911x_pins>; 251 + pinctrl-0 = <&smsc9221_pins>; 252 252 reg = <5 0 0xff>; 253 253 interrupt-parent = <&gpio6>; 254 254 interrupts = <16 IRQ_TYPE_LEVEL_LOW>;
+10 -27
arch/arm/boot/dts/omap3-sb-t35.dtsi
··· 2 2 * Common support for CompuLab SB-T35 used on SBC-T3530, SBC-T3517 and SBC-T3730 3 3 */ 4 4 5 - / { 6 - vddvario_sb_t35: regulator-vddvario-sb-t35 { 7 - compatible = "regulator-fixed"; 8 - regulator-name = "vddvario"; 9 - regulator-always-on; 10 - }; 11 - 12 - vdd33a_sb_t35: regulator-vdd33a-sb-t35 { 13 - compatible = "regulator-fixed"; 14 - regulator-name = "vdd33a"; 15 - regulator-always-on; 16 - }; 17 - }; 18 - 19 5 &omap3_pmx_core { 20 6 smsc2_pins: pinmux_smsc2_pins { 21 7 pinctrl-single,pins = < ··· 23 37 reg = <4 0 0xff>; 24 38 bank-width = <2>; 25 39 gpmc,mux-add-data; 26 - gpmc,cs-on-ns = <0>; 27 - gpmc,cs-rd-off-ns = <186>; 28 - gpmc,cs-wr-off-ns = <186>; 29 - gpmc,adv-on-ns = <12>; 30 - gpmc,adv-rd-off-ns = <48>; 40 + gpmc,cs-on-ns = <1>; 41 + gpmc,cs-rd-off-ns = <180>; 42 + gpmc,cs-wr-off-ns = <180>; 43 + gpmc,adv-rd-off-ns = <18>; 31 44 gpmc,adv-wr-off-ns = <48>; 32 45 gpmc,oe-on-ns = <54>; 33 46 gpmc,oe-off-ns = <168>; ··· 34 49 gpmc,we-off-ns = <168>; 35 50 gpmc,rd-cycle-ns = <186>; 36 51 gpmc,wr-cycle-ns = <186>; 37 - gpmc,access-ns = <114>; 38 - gpmc,page-burst-access-ns = <6>; 39 - gpmc,bus-turnaround-ns = <12>; 40 - gpmc,cycle2cycle-delay-ns = <18>; 41 - gpmc,wr-data-mux-bus-ns = <90>; 42 - gpmc,wr-access-ns = <186>; 52 + gpmc,access-ns = <144>; 53 + gpmc,page-burst-access-ns = <24>; 54 + gpmc,bus-turnaround-ns = <90>; 55 + gpmc,cycle2cycle-delay-ns = <90>; 43 56 gpmc,cycle2cycle-samecsen; 44 57 gpmc,cycle2cycle-diffcsen; 45 - vddvario-supply = <&vddvario_sb_t35>; 46 - vdd33a-supply = <&vdd33a_sb_t35>; 58 + vddvario-supply = <&vddvario>; 59 + vdd33a-supply = <&vdd33a>; 47 60 reg-io-width = <4>; 48 61 smsc,save-mac-address; 49 62 };
+13
arch/arm/boot/dts/omap3-sbc-t3517.dts
··· 8 8 / { 9 9 model = "CompuLab SBC-T3517 with CM-T3517"; 10 10 compatible = "compulab,omap3-sbc-t3517", "compulab,omap3-cm-t3517", "ti,am3517", "ti,omap3"; 11 + 12 + /* Only one GPMC smsc9220 on SBC-T3517, CM-T3517 uses am35x Ethernet */ 13 + vddvario: regulator-vddvario-sb-t35 { 14 + compatible = "regulator-fixed"; 15 + regulator-name = "vddvario"; 16 + regulator-always-on; 17 + }; 18 + 19 + vdd33a: regulator-vdd33a-sb-t35 { 20 + compatible = "regulator-fixed"; 21 + regulator-name = "vdd33a"; 22 + regulator-always-on; 23 + }; 11 24 }; 12 25 13 26 &omap3_pmx_core {
+1 -1
arch/arm/boot/dts/omap3.dtsi
··· 61 61 ti,hwmods = "mpu"; 62 62 }; 63 63 64 - iva { 64 + iva: iva { 65 65 compatible = "ti,iva2.2"; 66 66 ti,hwmods = "iva"; 67 67
+7
arch/arm/boot/dts/omap5.dtsi
··· 630 630 status = "disabled"; 631 631 }; 632 632 633 + mailbox: mailbox@4a0f4000 { 634 + compatible = "ti,omap4-mailbox"; 635 + reg = <0x4a0f4000 0x200>; 636 + interrupts = <GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH>; 637 + ti,hwmods = "mailbox"; 638 + }; 639 + 633 640 timer1: timer@4ae18000 { 634 641 compatible = "ti,omap5430-timer"; 635 642 reg = <0x4ae18000 0x80>;
+1 -1
arch/arm/boot/dts/sama5d3.dtsi
··· 13 13 #include <dt-bindings/pinctrl/at91.h> 14 14 #include <dt-bindings/interrupt-controller/irq.h> 15 15 #include <dt-bindings/gpio/gpio.h> 16 - #include <dt-bindings/clk/at91.h> 16 + #include <dt-bindings/clock/at91.h> 17 17 18 18 / { 19 19 model = "Atmel SAMA5D3 family SoC";
+1 -1
arch/arm/boot/dts/sama5d3_mci2.dtsi
··· 9 9 10 10 #include <dt-bindings/pinctrl/at91.h> 11 11 #include <dt-bindings/interrupt-controller/irq.h> 12 - #include <dt-bindings/clk/at91.h> 12 + #include <dt-bindings/clock/at91.h> 13 13 14 14 / { 15 15 ahb {
+1 -1
arch/arm/boot/dts/sama5d3_tcb1.dtsi
··· 9 9 10 10 #include <dt-bindings/pinctrl/at91.h> 11 11 #include <dt-bindings/interrupt-controller/irq.h> 12 - #include <dt-bindings/clk/at91.h> 12 + #include <dt-bindings/clock/at91.h> 13 13 14 14 / { 15 15 aliases {
+1 -1
arch/arm/boot/dts/sama5d3_uart.dtsi
··· 9 9 10 10 #include <dt-bindings/pinctrl/at91.h> 11 11 #include <dt-bindings/interrupt-controller/irq.h> 12 - #include <dt-bindings/clk/at91.h> 12 + #include <dt-bindings/clock/at91.h> 13 13 14 14 / { 15 15 aliases {
+1
arch/arm/boot/dts/ste-ccu8540.dts
··· 18 18 compatible = "st-ericsson,ccu8540", "st-ericsson,u8540"; 19 19 20 20 memory@0 { 21 + device_type = "memory"; 21 22 reg = <0x20000000 0x1f000000>, <0xc0000000 0x3f000000>; 22 23 }; 23 24
+11 -3
arch/arm/boot/dts/sun7i-a20.dtsi
··· 87 87 88 88 pll4: clk@01c20018 { 89 89 #clock-cells = <0>; 90 - compatible = "allwinner,sun4i-a10-pll1-clk"; 90 + compatible = "allwinner,sun7i-a20-pll4-clk"; 91 91 reg = <0x01c20018 0x4>; 92 92 clocks = <&osc24M>; 93 93 clock-output-names = "pll4"; ··· 107 107 reg = <0x01c20028 0x4>; 108 108 clocks = <&osc24M>; 109 109 clock-output-names = "pll6_sata", "pll6_other", "pll6"; 110 + }; 111 + 112 + pll8: clk@01c20040 { 113 + #clock-cells = <0>; 114 + compatible = "allwinner,sun7i-a20-pll4-clk"; 115 + reg = <0x01c20040 0x4>; 116 + clocks = <&osc24M>; 117 + clock-output-names = "pll8"; 110 118 }; 111 119 112 120 cpu: cpu@01c20054 { ··· 813 805 status = "disabled"; 814 806 }; 815 807 816 - i2c4: i2c@01c2bc00 { 808 + i2c4: i2c@01c2c000 { 817 809 compatible = "allwinner,sun4i-i2c"; 818 - reg = <0x01c2bc00 0x400>; 810 + reg = <0x01c2c000 0x400>; 819 811 interrupts = <0 89 4>; 820 812 clocks = <&apb1_gates 15>; 821 813 clock-frequency = <100000>;
+15 -33
arch/arm/common/edma.c
··· 1423 1423 1424 1424 #if IS_ENABLED(CONFIG_OF) && IS_ENABLED(CONFIG_DMADEVICES) 1425 1425 1426 - static int edma_of_read_u32_to_s16_array(const struct device_node *np, 1427 - const char *propname, s16 *out_values, 1428 - size_t sz) 1426 + static int edma_xbar_event_map(struct device *dev, struct device_node *node, 1427 + struct edma_soc_info *pdata, size_t sz) 1429 1428 { 1430 - int ret; 1431 - 1432 - ret = of_property_read_u16_array(np, propname, out_values, sz); 1433 - if (ret) 1434 - return ret; 1435 - 1436 - /* Terminate it */ 1437 - *out_values++ = -1; 1438 - *out_values++ = -1; 1439 - 1440 - return 0; 1441 - } 1442 - 1443 - static int edma_xbar_event_map(struct device *dev, 1444 - struct device_node *node, 1445 - struct edma_soc_info *pdata, int len) 1446 - { 1447 - int ret, i; 1429 + const char pname[] = "ti,edma-xbar-event-map"; 1448 1430 struct resource res; 1449 1431 void __iomem *xbar; 1450 - const s16 (*xbar_chans)[2]; 1432 + s16 (*xbar_chans)[2]; 1433 + size_t nelm = sz / sizeof(s16); 1451 1434 u32 shift, offset, mux; 1435 + int ret, i; 1452 1436 1453 - xbar_chans = devm_kzalloc(dev, 1454 - len/sizeof(s16) + 2*sizeof(s16), 1455 - GFP_KERNEL); 1437 + xbar_chans = devm_kzalloc(dev, (nelm + 2) * sizeof(s16), GFP_KERNEL); 1456 1438 if (!xbar_chans) 1457 1439 return -ENOMEM; 1458 1440 1459 1441 ret = of_address_to_resource(node, 1, &res); 1460 1442 if (ret) 1461 - return -EIO; 1443 + return -ENOMEM; 1462 1444 1463 1445 xbar = devm_ioremap(dev, res.start, resource_size(&res)); 1464 1446 if (!xbar) 1465 1447 return -ENOMEM; 1466 1448 1467 - ret = edma_of_read_u32_to_s16_array(node, 1468 - "ti,edma-xbar-event-map", 1469 - (s16 *)xbar_chans, 1470 - len/sizeof(u32)); 1449 + ret = of_property_read_u16_array(node, pname, (u16 *)xbar_chans, nelm); 1471 1450 if (ret) 1472 1451 return -EIO; 1473 1452 1474 - for (i = 0; xbar_chans[i][0] != -1; i++) { 1453 + /* Invalidate last entry for the other user of this mess */ 1454 + nelm >>= 1; 1455 + xbar_chans[nelm][0] = xbar_chans[nelm][1] = -1; 1456 + 1457 + for (i = 0; i < nelm; i++) { 1475 1458 shift = (xbar_chans[i][1] & 0x03) << 3; 1476 1459 offset = xbar_chans[i][1] & 0xfffffffc; 1477 1460 mux = readl(xbar + offset); ··· 1463 1480 writel(mux, (xbar + offset)); 1464 1481 } 1465 1482 1466 - pdata->xbar_chans = xbar_chans; 1467 - 1483 + pdata->xbar_chans = (const s16 (*)[2]) xbar_chans; 1468 1484 return 0; 1469 1485 } 1470 1486
+1 -1
arch/arm/configs/sunxi_defconfig
··· 37 37 # CONFIG_NET_VENDOR_NATSEMI is not set 38 38 # CONFIG_NET_VENDOR_SEEQ is not set 39 39 # CONFIG_NET_VENDOR_SMSC is not set 40 - # CONFIG_NET_VENDOR_STMICRO is not set 40 + CONFIG_STMMAC_ETH=y 41 41 # CONFIG_NET_VENDOR_WIZNET is not set 42 42 # CONFIG_WLAN is not set 43 43 CONFIG_SERIAL_8250=y
-1
arch/arm/include/asm/xen/page.h
··· 77 77 } 78 78 /* VIRT <-> MACHINE conversion */ 79 79 #define virt_to_machine(v) (phys_to_machine(XPADDR(__pa(v)))) 80 - #define virt_to_pfn(v) (PFN_DOWN(__pa(v))) 81 80 #define virt_to_mfn(v) (pfn_to_mfn(virt_to_pfn(v))) 82 81 #define mfn_to_virt(m) (__va(mfn_to_pfn(m) << PAGE_SHIFT)) 83 82
+6 -2
arch/arm/mach-omap2/omap-headsmp.S
··· 1 1 /* 2 2 * Secondary CPU startup routine source file. 3 3 * 4 - * Copyright (C) 2009 Texas Instruments, Inc. 4 + * Copyright (C) 2009-2014 Texas Instruments, Inc. 5 5 * 6 6 * Author: 7 7 * Santosh Shilimkar <santosh.shilimkar@ti.com> ··· 28 28 * code. This routine also provides a holding flag into which 29 29 * secondary core is held until we're ready for it to initialise. 30 30 * The primary core will update this flag using a hardware 31 - + * register AuxCoreBoot0. 31 + * register AuxCoreBoot0. 32 32 */ 33 33 ENTRY(omap5_secondary_startup) 34 + .arm 35 + THUMB( adr r9, BSYM(wait) ) @ CPU may be entered in ARM mode. 36 + THUMB( bx r9 ) @ If this is a Thumb-2 kernel, 37 + THUMB( .thumb ) @ switch to Thumb now. 34 38 wait: ldr r2, =AUX_CORE_BOOT0_PA @ read from AuxCoreBoot0 35 39 ldr r0, [r2] 36 40 mov r0, r0, lsr #5
+1 -1
arch/arm/mach-orion5x/common.h
··· 21 21 #define ORION_MBUS_DEVBUS_BOOT_ATTR 0x0f 22 22 #define ORION_MBUS_DEVBUS_TARGET(cs) 0x01 23 23 #define ORION_MBUS_DEVBUS_ATTR(cs) (~(1 << cs)) 24 - #define ORION_MBUS_SRAM_TARGET 0x00 24 + #define ORION_MBUS_SRAM_TARGET 0x09 25 25 #define ORION_MBUS_SRAM_ATTR 0x00 26 26 27 27 /*
+1
arch/arm64/include/asm/memory.h
··· 138 138 #define __pa(x) __virt_to_phys((unsigned long)(x)) 139 139 #define __va(x) ((void *)__phys_to_virt((phys_addr_t)(x))) 140 140 #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT) 141 + #define virt_to_pfn(x) __phys_to_pfn(__virt_to_phys(x)) 141 142 142 143 /* 143 144 * virt_to_page(k) convert a _valid_ virtual address to struct page *
+7 -3
arch/arm64/kernel/irq.c
··· 97 97 if (irqd_is_per_cpu(d) || !cpumask_test_cpu(smp_processor_id(), affinity)) 98 98 return false; 99 99 100 - if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) { 101 - affinity = cpu_online_mask; 100 + if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) 102 101 ret = true; 103 - } 104 102 103 + /* 104 + * when using forced irq_set_affinity we must ensure that the cpu 105 + * being offlined is not present in the affinity mask, it may be 106 + * selected as the target CPU otherwise 107 + */ 108 + affinity = cpu_online_mask; 105 109 c = irq_data_get_irq_chip(d); 106 110 if (!c->irq_set_affinity) 107 111 pr_debug("IRQ%u: unable to set affinity\n", d->irq);
+4
arch/arm64/mm/hugetlbpage.c
··· 51 51 52 52 int pud_huge(pud_t pud) 53 53 { 54 + #ifndef __PAGETABLE_PMD_FOLDED 54 55 return !(pud_val(pud) & PUD_TABLE_BIT); 56 + #else 57 + return 0; 58 + #endif 55 59 } 56 60 57 61 int pmd_huge_support(void)
+1 -1
arch/ia64/include/asm/unistd.h
··· 11 11 12 12 13 13 14 - #define NR_syscalls 314 /* length of syscall table */ 14 + #define NR_syscalls 315 /* length of syscall table */ 15 15 16 16 /* 17 17 * The following defines stop scripts/checksyscalls.sh from complaining about
+1
arch/ia64/include/uapi/asm/unistd.h
··· 327 327 #define __NR_finit_module 1335 328 328 #define __NR_sched_setattr 1336 329 329 #define __NR_sched_getattr 1337 330 + #define __NR_renameat2 1338 330 331 331 332 #endif /* _UAPI_ASM_IA64_UNISTD_H */
+1
arch/ia64/kernel/entry.S
··· 1775 1775 data8 sys_finit_module // 1335 1776 1776 data8 sys_sched_setattr 1777 1777 data8 sys_sched_getattr 1778 + data8 sys_renameat2 1778 1779 1779 1780 .org sys_call_table + 8*NR_syscalls // guard against failures to increase NR_syscalls 1780 1781 #endif /* __IA64_ASM_PARAVIRTUALIZED_NATIVE */
+1 -1
arch/m68k/include/asm/unistd.h
··· 4 4 #include <uapi/asm/unistd.h> 5 5 6 6 7 - #define NR_syscalls 351 7 + #define NR_syscalls 352 8 8 9 9 #define __ARCH_WANT_OLD_READDIR 10 10 #define __ARCH_WANT_OLD_STAT
+1
arch/m68k/include/uapi/asm/unistd.h
··· 356 356 #define __NR_finit_module 348 357 357 #define __NR_sched_setattr 349 358 358 #define __NR_sched_getattr 350 359 + #define __NR_renameat2 351 359 360 360 361 #endif /* _UAPI_ASM_M68K_UNISTD_H_ */
+1
arch/m68k/kernel/syscalltable.S
··· 371 371 .long sys_finit_module 372 372 .long sys_sched_setattr 373 373 .long sys_sched_getattr /* 350 */ 374 + .long sys_renameat2 374 375
+3
arch/metag/include/asm/barrier.h
··· 15 15 volatile int *flushptr = (volatile int *) LINSYSEVENT_WR_FENCE; 16 16 barrier(); 17 17 *flushptr = 0; 18 + barrier(); 18 19 } 19 20 20 21 #else /* CONFIG_METAG_META21 */ ··· 36 35 *flushptr = 0; 37 36 *flushptr = 0; 38 37 *flushptr = 0; 38 + barrier(); 39 39 } 40 40 41 41 #endif /* !CONFIG_METAG_META21 */ ··· 70 68 volatile int *flushptr = (volatile int *) LINSYSEVENT_WR_ATOMIC_UNLOCK; 71 69 barrier(); 72 70 *flushptr = 0; 71 + barrier(); 73 72 } 74 73 #define smp_mb() fence() 75 74 #define smp_rmb() fence()
+2
arch/metag/include/asm/processor.h
··· 22 22 /* Add an extra page of padding at the top of the stack for the guard page. */ 23 23 #define STACK_TOP (TASK_SIZE - PAGE_SIZE) 24 24 #define STACK_TOP_MAX STACK_TOP 25 + /* Maximum virtual space for stack */ 26 + #define STACK_SIZE_MAX (CONFIG_MAX_STACK_SIZE_MB*1024*1024) 25 27 26 28 /* This decides where the kernel will search for a free chunk of vm 27 29 * space during mmap's.
+1 -1
arch/metag/include/uapi/asm/Kbuild
··· 4 4 header-y += byteorder.h 5 5 header-y += ech.h 6 6 header-y += ptrace.h 7 - header-y += resource.h 8 7 header-y += sigcontext.h 9 8 header-y += siginfo.h 10 9 header-y += swab.h 11 10 header-y += unistd.h 12 11 13 12 generic-y += mman.h 13 + generic-y += resource.h 14 14 generic-y += setup.h
-7
arch/metag/include/uapi/asm/resource.h
··· 1 - #ifndef _UAPI_METAG_RESOURCE_H 2 - #define _UAPI_METAG_RESOURCE_H 3 - 4 - #define _STK_LIM_MAX (1 << 28) 5 - #include <asm-generic/resource.h> 6 - 7 - #endif /* _UAPI_METAG_RESOURCE_H */
+1
arch/mips/dec/ecc-berr.c
··· 21 21 #include <asm/addrspace.h> 22 22 #include <asm/bootinfo.h> 23 23 #include <asm/cpu.h> 24 + #include <asm/cpu-type.h> 24 25 #include <asm/irq_regs.h> 25 26 #include <asm/processor.h> 26 27 #include <asm/ptrace.h>
+1
arch/mips/dec/kn02xa-berr.c
··· 19 19 #include <linux/types.h> 20 20 21 21 #include <asm/addrspace.h> 22 + #include <asm/cpu-type.h> 22 23 #include <asm/irq_regs.h> 23 24 #include <asm/ptrace.h> 24 25 #include <asm/traps.h>
-1
arch/mips/dec/prom/Makefile
··· 6 6 lib-y += init.o memory.o cmdline.o identify.o console.o 7 7 8 8 lib-$(CONFIG_32BIT) += locore.o 9 - lib-$(CONFIG_64BIT) += call_o32.o
-89
arch/mips/dec/prom/call_o32.S
··· 1 - /* 2 - * O32 interface for the 64 (or N32) ABI. 3 - * 4 - * Copyright (C) 2002 Maciej W. Rozycki 5 - * 6 - * This program is free software; you can redistribute it and/or 7 - * modify it under the terms of the GNU General Public License 8 - * as published by the Free Software Foundation; either version 9 - * 2 of the License, or (at your option) any later version. 10 - */ 11 - 12 - #include <asm/asm.h> 13 - #include <asm/regdef.h> 14 - 15 - /* Maximum number of arguments supported. Must be even! */ 16 - #define O32_ARGC 32 17 - /* Number of static registers we save. */ 18 - #define O32_STATC 11 19 - /* Frame size for both of the above. */ 20 - #define O32_FRAMESZ (4 * O32_ARGC + SZREG * O32_STATC) 21 - 22 - .text 23 - 24 - /* 25 - * O32 function call dispatcher, for interfacing 32-bit ROM routines. 26 - * 27 - * The standard 64 (N32) calling sequence is supported, with a0 28 - * holding a function pointer, a1-a7 -- its first seven arguments 29 - * and the stack -- remaining ones (up to O32_ARGC, including a1-a7). 30 - * Static registers, gp and fp are preserved, v0 holds a result. 31 - * This code relies on the called o32 function for sp and ra 32 - * restoration and thus both this dispatcher and the current stack 33 - * have to be placed in a KSEGx (or KUSEG) address space. Any 34 - * pointers passed have to point to addresses within one of these 35 - * spaces as well. 36 - */ 37 - NESTED(call_o32, O32_FRAMESZ, ra) 38 - REG_SUBU sp,O32_FRAMESZ 39 - 40 - REG_S ra,O32_FRAMESZ-1*SZREG(sp) 41 - REG_S fp,O32_FRAMESZ-2*SZREG(sp) 42 - REG_S gp,O32_FRAMESZ-3*SZREG(sp) 43 - REG_S s7,O32_FRAMESZ-4*SZREG(sp) 44 - REG_S s6,O32_FRAMESZ-5*SZREG(sp) 45 - REG_S s5,O32_FRAMESZ-6*SZREG(sp) 46 - REG_S s4,O32_FRAMESZ-7*SZREG(sp) 47 - REG_S s3,O32_FRAMESZ-8*SZREG(sp) 48 - REG_S s2,O32_FRAMESZ-9*SZREG(sp) 49 - REG_S s1,O32_FRAMESZ-10*SZREG(sp) 50 - REG_S s0,O32_FRAMESZ-11*SZREG(sp) 51 - 52 - move jp,a0 53 - 54 - sll a0,a1,zero 55 - sll a1,a2,zero 56 - sll a2,a3,zero 57 - sll a3,a4,zero 58 - sw a5,0x10(sp) 59 - sw a6,0x14(sp) 60 - sw a7,0x18(sp) 61 - 62 - PTR_LA t0,O32_FRAMESZ(sp) 63 - PTR_LA t1,0x1c(sp) 64 - li t2,O32_ARGC-7 65 - 1: 66 - lw t3,(t0) 67 - REG_ADDU t0,SZREG 68 - sw t3,(t1) 69 - REG_SUBU t2,1 70 - REG_ADDU t1,4 71 - bnez t2,1b 72 - 73 - jalr jp 74 - 75 - REG_L s0,O32_FRAMESZ-11*SZREG(sp) 76 - REG_L s1,O32_FRAMESZ-10*SZREG(sp) 77 - REG_L s2,O32_FRAMESZ-9*SZREG(sp) 78 - REG_L s3,O32_FRAMESZ-8*SZREG(sp) 79 - REG_L s4,O32_FRAMESZ-7*SZREG(sp) 80 - REG_L s5,O32_FRAMESZ-6*SZREG(sp) 81 - REG_L s6,O32_FRAMESZ-5*SZREG(sp) 82 - REG_L s7,O32_FRAMESZ-4*SZREG(sp) 83 - REG_L gp,O32_FRAMESZ-3*SZREG(sp) 84 - REG_L fp,O32_FRAMESZ-2*SZREG(sp) 85 - REG_L ra,O32_FRAMESZ-1*SZREG(sp) 86 - 87 - REG_ADDU sp,O32_FRAMESZ 88 - jr ra 89 - END(call_o32)
+35 -22
arch/mips/fw/lib/call_o32.S
··· 1 1 /* 2 2 * O32 interface for the 64 (or N32) ABI. 3 3 * 4 - * Copyright (C) 2002 Maciej W. Rozycki 4 + * Copyright (C) 2002, 2014 Maciej W. Rozycki 5 5 * 6 6 * This program is free software; you can redistribute it and/or 7 7 * modify it under the terms of the GNU General Public License ··· 12 12 #include <asm/asm.h> 13 13 #include <asm/regdef.h> 14 14 15 + /* O32 register size. */ 16 + #define O32_SZREG 4 15 17 /* Maximum number of arguments supported. Must be even! */ 16 18 #define O32_ARGC 32 17 - /* Number of static registers we save. */ 19 + /* Number of static registers we save. */ 18 20 #define O32_STATC 11 19 - /* Frame size for static register */ 20 - #define O32_FRAMESZ (SZREG * O32_STATC) 21 - /* Frame size on new stack */ 22 - #define O32_FRAMESZ_NEW (SZREG + 4 * O32_ARGC) 21 + /* Argument area frame size. */ 22 + #define O32_ARGSZ (O32_SZREG * O32_ARGC) 23 + /* Static register save area frame size. */ 24 + #define O32_STATSZ (SZREG * O32_STATC) 25 + /* Stack pointer register save area frame size. */ 26 + #define O32_SPSZ SZREG 27 + /* Combined area frame size. */ 28 + #define O32_FRAMESZ (O32_ARGSZ + O32_SPSZ + O32_STATSZ) 29 + /* Switched stack frame size. */ 30 + #define O32_NFRAMESZ (O32_ARGSZ + O32_SPSZ) 23 31 24 32 .text 25 33 26 34 /* 27 35 * O32 function call dispatcher, for interfacing 32-bit ROM routines. 28 36 * 29 - * The standard 64 (N32) calling sequence is supported, with a0 30 - * holding a function pointer, a1 a new stack pointer, a2-a7 -- its 31 - * first six arguments and the stack -- remaining ones (up to O32_ARGC, 32 - * including a2-a7). Static registers, gp and fp are preserved, v0 holds 33 - * a result. This code relies on the called o32 function for sp and ra 34 - * restoration and this dispatcher has to be placed in a KSEGx (or KUSEG) 35 - * address space. Any pointers passed have to point to addresses within 36 - * one of these spaces as well. 37 + * The standard 64 (N32) calling sequence is supported, with a0 holding 38 + * a function pointer, a1 a pointer to the new stack to call the 39 + * function with or 0 if no stack switching is requested, a2-a7 -- the 40 + * function call's first six arguments, and the stack -- the remaining 41 + * arguments (up to O32_ARGC, including a2-a7). Static registers, gp 42 + * and fp are preserved, v0 holds the result. This code relies on the 43 + * called o32 function for sp and ra restoration and this dispatcher has 44 + * to be placed in a KSEGx (or KUSEG) address space. Any pointers 45 + * passed have to point to addresses within one of these spaces as well. 37 46 */ 38 47 NESTED(call_o32, O32_FRAMESZ, ra) 39 48 REG_SUBU sp,O32_FRAMESZ ··· 60 51 REG_S s0,O32_FRAMESZ-11*SZREG(sp) 61 52 62 53 move jp,a0 63 - REG_SUBU s0,a1,O32_FRAMESZ_NEW 64 - REG_S sp,O32_FRAMESZ_NEW-1*SZREG(s0) 54 + 55 + move fp,sp 56 + beqz a1,0f 57 + REG_SUBU fp,a1,O32_NFRAMESZ 58 + 0: 59 + REG_S sp,O32_NFRAMESZ-1*SZREG(fp) 65 60 66 61 sll a0,a2,zero 67 62 sll a1,a3,zero 68 63 sll a2,a4,zero 69 64 sll a3,a5,zero 70 - sw a6,0x10(s0) 71 - sw a7,0x14(s0) 65 + sw a6,4*O32_SZREG(fp) 66 + sw a7,5*O32_SZREG(fp) 72 67 73 68 PTR_LA t0,O32_FRAMESZ(sp) 74 - PTR_LA t1,0x18(s0) 69 + PTR_LA t1,6*O32_SZREG(fp) 75 70 li t2,O32_ARGC-6 76 71 1: 77 72 lw t3,(t0) 78 73 REG_ADDU t0,SZREG 79 74 sw t3,(t1) 80 75 REG_SUBU t2,1 81 - REG_ADDU t1,4 76 + REG_ADDU t1,O32_SZREG 82 77 bnez t2,1b 83 78 84 - move sp,s0 79 + move sp,fp 85 80 86 81 jalr jp 87 82 88 - REG_L sp,O32_FRAMESZ_NEW-1*SZREG(sp) 83 + REG_L sp,O32_NFRAMESZ-1*SZREG(sp) 89 84 90 85 REG_L s0,O32_FRAMESZ-11*SZREG(sp) 91 86 REG_L s1,O32_FRAMESZ-10*SZREG(sp)
+2 -1
arch/mips/fw/sni/sniprom.c
··· 40 40 41 41 #ifdef CONFIG_64BIT 42 42 43 - static u8 o32_stk[16384]; 43 + /* O32 stack has to be 8-byte aligned. */ 44 + static u64 o32_stk[4096]; 44 45 #define O32_STK &o32_stk[sizeof(o32_stk)] 45 46 46 47 #define __PROM_O32(fun, arg) fun arg __asm__(#fun); \
+20 -20
arch/mips/include/asm/dec/prom.h
··· 113 113 #define __DEC_PROM_O32(fun, arg) fun arg __asm__(#fun); \ 114 114 __asm__(#fun " = call_o32") 115 115 116 - int __DEC_PROM_O32(_rex_bootinit, (int (*)(void))); 117 - int __DEC_PROM_O32(_rex_bootread, (int (*)(void))); 118 - int __DEC_PROM_O32(_rex_getbitmap, (int (*)(memmap *), memmap *)); 116 + int __DEC_PROM_O32(_rex_bootinit, (int (*)(void), void *)); 117 + int __DEC_PROM_O32(_rex_bootread, (int (*)(void), void *)); 118 + int __DEC_PROM_O32(_rex_getbitmap, (int (*)(memmap *), void *, memmap *)); 119 119 unsigned long *__DEC_PROM_O32(_rex_slot_address, 120 - (unsigned long *(*)(int), int)); 121 - void *__DEC_PROM_O32(_rex_gettcinfo, (void *(*)(void))); 122 - int __DEC_PROM_O32(_rex_getsysid, (int (*)(void))); 123 - void __DEC_PROM_O32(_rex_clear_cache, (void (*)(void))); 120 + (unsigned long *(*)(int), void *, int)); 121 + void *__DEC_PROM_O32(_rex_gettcinfo, (void *(*)(void), void *)); 122 + int __DEC_PROM_O32(_rex_getsysid, (int (*)(void), void *)); 123 + void __DEC_PROM_O32(_rex_clear_cache, (void (*)(void), void *)); 124 124 125 - int __DEC_PROM_O32(_prom_getchar, (int (*)(void))); 126 - char *__DEC_PROM_O32(_prom_getenv, (char *(*)(char *), char *)); 127 - int __DEC_PROM_O32(_prom_printf, (int (*)(char *, ...), char *, ...)); 125 + int __DEC_PROM_O32(_prom_getchar, (int (*)(void), void *)); 126 + char *__DEC_PROM_O32(_prom_getenv, (char *(*)(char *), void *, char *)); 127 + int __DEC_PROM_O32(_prom_printf, (int (*)(char *, ...), void *, char *, ...)); 128 128 129 129 130 - #define rex_bootinit() _rex_bootinit(__rex_bootinit) 131 - #define rex_bootread() _rex_bootread(__rex_bootread) 132 - #define rex_getbitmap(x) _rex_getbitmap(__rex_getbitmap, x) 133 - #define rex_slot_address(x) _rex_slot_address(__rex_slot_address, x) 134 - #define rex_gettcinfo() _rex_gettcinfo(__rex_gettcinfo) 135 - #define rex_getsysid() _rex_getsysid(__rex_getsysid) 136 - #define rex_clear_cache() _rex_clear_cache(__rex_clear_cache) 130 + #define rex_bootinit() _rex_bootinit(__rex_bootinit, NULL) 131 + #define rex_bootread() _rex_bootread(__rex_bootread, NULL) 132 + #define rex_getbitmap(x) _rex_getbitmap(__rex_getbitmap, NULL, x) 133 + #define rex_slot_address(x) _rex_slot_address(__rex_slot_address, NULL, x) 134 + #define rex_gettcinfo() _rex_gettcinfo(__rex_gettcinfo, NULL) 135 + #define rex_getsysid() _rex_getsysid(__rex_getsysid, NULL) 136 + #define rex_clear_cache() _rex_clear_cache(__rex_clear_cache, NULL) 137 137 138 - #define prom_getchar() _prom_getchar(__prom_getchar) 139 - #define prom_getenv(x) _prom_getenv(__prom_getenv, x) 140 - #define prom_printf(x...) _prom_printf(__prom_printf, x) 138 + #define prom_getchar() _prom_getchar(__prom_getchar, NULL) 139 + #define prom_getenv(x) _prom_getenv(__prom_getenv, NULL, x) 140 + #define prom_printf(x...) _prom_printf(__prom_printf, NULL, x) 141 141 142 142 #else /* !CONFIG_64BIT */ 143 143
-56
arch/mips/include/asm/rm9k-ocd.h
··· 1 - /* 2 - * Copyright (C) 2004 by Basler Vision Technologies AG 3 - * Author: Thomas Koeller <thomas.koeller@baslerweb.com> 4 - * 5 - * This program is free software; you can redistribute it and/or modify 6 - * it under the terms of the GNU General Public License as published by 7 - * the Free Software Foundation; either version 2 of the License, or 8 - * (at your option) any later version. 9 - * 10 - * This program is distributed in the hope that it will be useful, 11 - * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 - * GNU General Public License for more details. 14 - * 15 - * You should have received a copy of the GNU General Public License 16 - * along with this program; if not, write to the Free Software 17 - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 18 - */ 19 - 20 - #if !defined(_ASM_RM9K_OCD_H) 21 - #define _ASM_RM9K_OCD_H 22 - 23 - #include <linux/types.h> 24 - #include <linux/spinlock.h> 25 - #include <asm/io.h> 26 - 27 - extern volatile void __iomem * const ocd_base; 28 - extern volatile void __iomem * const titan_base; 29 - 30 - #define ocd_addr(__x__) (ocd_base + (__x__)) 31 - #define titan_addr(__x__) (titan_base + (__x__)) 32 - #define scram_addr(__x__) (scram_base + (__x__)) 33 - 34 - /* OCD register access */ 35 - #define ocd_readl(__offs__) __raw_readl(ocd_addr(__offs__)) 36 - #define ocd_readw(__offs__) __raw_readw(ocd_addr(__offs__)) 37 - #define ocd_readb(__offs__) __raw_readb(ocd_addr(__offs__)) 38 - #define ocd_writel(__val__, __offs__) \ 39 - __raw_writel((__val__), ocd_addr(__offs__)) 40 - #define ocd_writew(__val__, __offs__) \ 41 - __raw_writew((__val__), ocd_addr(__offs__)) 42 - #define ocd_writeb(__val__, __offs__) \ 43 - __raw_writeb((__val__), ocd_addr(__offs__)) 44 - 45 - /* TITAN register access - 32 bit-wide only */ 46 - #define titan_readl(__offs__) __raw_readl(titan_addr(__offs__)) 47 - #define titan_writel(__val__, __offs__) \ 48 - __raw_writel((__val__), titan_addr(__offs__)) 49 - 50 - /* Protect access to shared TITAN registers */ 51 - extern spinlock_t titan_lock; 52 - extern int titan_irqflags; 53 - #define lock_titan_regs() spin_lock_irqsave(&titan_lock, titan_irqflags) 54 - #define unlock_titan_regs() spin_unlock_irqrestore(&titan_lock, titan_irqflags) 55 - 56 - #endif /* !defined(_ASM_RM9K_OCD_H) */
+2
arch/mips/include/asm/syscall.h
··· 133 133 #ifdef CONFIG_64BIT 134 134 if (!test_thread_flag(TIF_32BIT_REGS)) 135 135 arch |= __AUDIT_ARCH_64BIT; 136 + if (test_thread_flag(TIF_32BIT_ADDR)) 137 + arch |= __AUDIT_ARCH_CONVENTION_MIPS64_N32; 136 138 #endif 137 139 #if defined(__LITTLE_ENDIAN) 138 140 arch |= __AUDIT_ARCH_LE;
+199 -199
arch/mips/include/uapi/asm/inst.h
··· 484 484 * Damn ... bitfields depend from byteorder :-( 485 485 */ 486 486 #ifdef __MIPSEB__ 487 - #define BITFIELD_FIELD(field, more) \ 487 + #define __BITFIELD_FIELD(field, more) \ 488 488 field; \ 489 489 more 490 490 491 491 #elif defined(__MIPSEL__) 492 492 493 - #define BITFIELD_FIELD(field, more) \ 493 + #define __BITFIELD_FIELD(field, more) \ 494 494 more \ 495 495 field; 496 496 ··· 499 499 #endif 500 500 501 501 struct j_format { 502 - BITFIELD_FIELD(unsigned int opcode : 6, /* Jump format */ 503 - BITFIELD_FIELD(unsigned int target : 26, 502 + __BITFIELD_FIELD(unsigned int opcode : 6, /* Jump format */ 503 + __BITFIELD_FIELD(unsigned int target : 26, 504 504 ;)) 505 505 }; 506 506 507 507 struct i_format { /* signed immediate format */ 508 - BITFIELD_FIELD(unsigned int opcode : 6, 509 - BITFIELD_FIELD(unsigned int rs : 5, 510 - BITFIELD_FIELD(unsigned int rt : 5, 511 - BITFIELD_FIELD(signed int simmediate : 16, 508 + __BITFIELD_FIELD(unsigned int opcode : 6, 509 + __BITFIELD_FIELD(unsigned int rs : 5, 510 + __BITFIELD_FIELD(unsigned int rt : 5, 511 + __BITFIELD_FIELD(signed int simmediate : 16, 512 512 ;)))) 513 513 }; 514 514 515 515 struct u_format { /* unsigned immediate format */ 516 - BITFIELD_FIELD(unsigned int opcode : 6, 517 - BITFIELD_FIELD(unsigned int rs : 5, 518 - BITFIELD_FIELD(unsigned int rt : 5, 519 - BITFIELD_FIELD(unsigned int uimmediate : 16, 516 + __BITFIELD_FIELD(unsigned int opcode : 6, 517 + __BITFIELD_FIELD(unsigned int rs : 5, 518 + __BITFIELD_FIELD(unsigned int rt : 5, 519 + __BITFIELD_FIELD(unsigned int uimmediate : 16, 520 520 ;)))) 521 521 }; 522 522 523 523 struct c_format { /* Cache (>= R6000) format */ 524 - BITFIELD_FIELD(unsigned int opcode : 6, 525 - BITFIELD_FIELD(unsigned int rs : 5, 526 - BITFIELD_FIELD(unsigned int c_op : 3, 527 - BITFIELD_FIELD(unsigned int cache : 2, 528 - BITFIELD_FIELD(unsigned int simmediate : 16, 524 + __BITFIELD_FIELD(unsigned int opcode : 6, 525 + __BITFIELD_FIELD(unsigned int rs : 5, 526 + __BITFIELD_FIELD(unsigned int c_op : 3, 527 + __BITFIELD_FIELD(unsigned int cache : 2, 528 + __BITFIELD_FIELD(unsigned int simmediate : 16, 529 529 ;))))) 530 530 }; 531 531 532 532 struct r_format { /* Register format */ 533 - BITFIELD_FIELD(unsigned int opcode : 6, 534 - BITFIELD_FIELD(unsigned int rs : 5, 535 - BITFIELD_FIELD(unsigned int rt : 5, 536 - BITFIELD_FIELD(unsigned int rd : 5, 537 - BITFIELD_FIELD(unsigned int re : 5, 538 - BITFIELD_FIELD(unsigned int func : 6, 533 + __BITFIELD_FIELD(unsigned int opcode : 6, 534 + __BITFIELD_FIELD(unsigned int rs : 5, 535 + __BITFIELD_FIELD(unsigned int rt : 5, 536 + __BITFIELD_FIELD(unsigned int rd : 5, 537 + __BITFIELD_FIELD(unsigned int re : 5, 538 + __BITFIELD_FIELD(unsigned int func : 6, 539 539 ;)))))) 540 540 }; 541 541 542 542 struct p_format { /* Performance counter format (R10000) */ 543 - BITFIELD_FIELD(unsigned int opcode : 6, 544 - BITFIELD_FIELD(unsigned int rs : 5, 545 - BITFIELD_FIELD(unsigned int rt : 5, 546 - BITFIELD_FIELD(unsigned int rd : 5, 547 - BITFIELD_FIELD(unsigned int re : 5, 548 - BITFIELD_FIELD(unsigned int func : 6, 543 + __BITFIELD_FIELD(unsigned int opcode : 6, 544 + __BITFIELD_FIELD(unsigned int rs : 5, 545 + __BITFIELD_FIELD(unsigned int rt : 5, 546 + __BITFIELD_FIELD(unsigned int rd : 5, 547 + __BITFIELD_FIELD(unsigned int re : 5, 548 + __BITFIELD_FIELD(unsigned int func : 6, 549 549 ;)))))) 550 550 }; 551 551 552 552 struct f_format { /* FPU register format */ 553 - BITFIELD_FIELD(unsigned int opcode : 6, 554 - BITFIELD_FIELD(unsigned int : 1, 555 - BITFIELD_FIELD(unsigned int fmt : 4, 556 - BITFIELD_FIELD(unsigned int rt : 5, 557 - BITFIELD_FIELD(unsigned int rd : 5, 558 - BITFIELD_FIELD(unsigned int re : 5, 559 - BITFIELD_FIELD(unsigned int func : 6, 553 + __BITFIELD_FIELD(unsigned int opcode : 6, 554 + __BITFIELD_FIELD(unsigned int : 1, 555 + __BITFIELD_FIELD(unsigned int fmt : 4, 556 + __BITFIELD_FIELD(unsigned int rt : 5, 557 + __BITFIELD_FIELD(unsigned int rd : 5, 558 + __BITFIELD_FIELD(unsigned int re : 5, 559 + __BITFIELD_FIELD(unsigned int func : 6, 560 560 ;))))))) 561 561 }; 562 562 563 563 struct ma_format { /* FPU multiply and add format (MIPS IV) */ 564 - BITFIELD_FIELD(unsigned int opcode : 6, 565 - BITFIELD_FIELD(unsigned int fr : 5, 566 - BITFIELD_FIELD(unsigned int ft : 5, 567 - BITFIELD_FIELD(unsigned int fs : 5, 568 - BITFIELD_FIELD(unsigned int fd : 5, 569 - BITFIELD_FIELD(unsigned int func : 4, 570 - BITFIELD_FIELD(unsigned int fmt : 2, 564 + __BITFIELD_FIELD(unsigned int opcode : 6, 565 + __BITFIELD_FIELD(unsigned int fr : 5, 566 + __BITFIELD_FIELD(unsigned int ft : 5, 567 + __BITFIELD_FIELD(unsigned int fs : 5, 568 + __BITFIELD_FIELD(unsigned int fd : 5, 569 + __BITFIELD_FIELD(unsigned int func : 4, 570 + __BITFIELD_FIELD(unsigned int fmt : 2, 571 571 ;))))))) 572 572 }; 573 573 574 574 struct b_format { /* BREAK and SYSCALL */ 575 - BITFIELD_FIELD(unsigned int opcode : 6, 576 - BITFIELD_FIELD(unsigned int code : 20, 577 - BITFIELD_FIELD(unsigned int func : 6, 575 + __BITFIELD_FIELD(unsigned int opcode : 6, 576 + __BITFIELD_FIELD(unsigned int code : 20, 577 + __BITFIELD_FIELD(unsigned int func : 6, 578 578 ;))) 579 579 }; 580 580 581 581 struct ps_format { /* MIPS-3D / paired single format */ 582 - BITFIELD_FIELD(unsigned int opcode : 6, 583 - BITFIELD_FIELD(unsigned int rs : 5, 584 - BITFIELD_FIELD(unsigned int ft : 5, 585 - BITFIELD_FIELD(unsigned int fs : 5, 586 - BITFIELD_FIELD(unsigned int fd : 5, 587 - BITFIELD_FIELD(unsigned int func : 6, 582 + __BITFIELD_FIELD(unsigned int opcode : 6, 583 + __BITFIELD_FIELD(unsigned int rs : 5, 584 + __BITFIELD_FIELD(unsigned int ft : 5, 585 + __BITFIELD_FIELD(unsigned int fs : 5, 586 + __BITFIELD_FIELD(unsigned int fd : 5, 587 + __BITFIELD_FIELD(unsigned int func : 6, 588 588 ;)))))) 589 589 }; 590 590 591 591 struct v_format { /* MDMX vector format */ 592 - BITFIELD_FIELD(unsigned int opcode : 6, 593 - BITFIELD_FIELD(unsigned int sel : 4, 594 - BITFIELD_FIELD(unsigned int fmt : 1, 595 - BITFIELD_FIELD(unsigned int vt : 5, 596 - BITFIELD_FIELD(unsigned int vs : 5, 597 - BITFIELD_FIELD(unsigned int vd : 5, 598 - BITFIELD_FIELD(unsigned int func : 6, 592 + __BITFIELD_FIELD(unsigned int opcode : 6, 593 + __BITFIELD_FIELD(unsigned int sel : 4, 594 + __BITFIELD_FIELD(unsigned int fmt : 1, 595 + __BITFIELD_FIELD(unsigned int vt : 5, 596 + __BITFIELD_FIELD(unsigned int vs : 5, 597 + __BITFIELD_FIELD(unsigned int vd : 5, 598 + __BITFIELD_FIELD(unsigned int func : 6, 599 599 ;))))))) 600 600 }; 601 601 602 602 struct spec3_format { /* SPEC3 */ 603 - BITFIELD_FIELD(unsigned int opcode:6, 604 - BITFIELD_FIELD(unsigned int rs:5, 605 - BITFIELD_FIELD(unsigned int rt:5, 606 - BITFIELD_FIELD(signed int simmediate:9, 607 - BITFIELD_FIELD(unsigned int func:7, 603 + __BITFIELD_FIELD(unsigned int opcode:6, 604 + __BITFIELD_FIELD(unsigned int rs:5, 605 + __BITFIELD_FIELD(unsigned int rt:5, 606 + __BITFIELD_FIELD(signed int simmediate:9, 607 + __BITFIELD_FIELD(unsigned int func:7, 608 608 ;))))) 609 609 }; 610 610 ··· 616 616 * if it is MIPS32 instruction re-encoded for use in the microMIPS ASE. 617 617 */ 618 618 struct fb_format { /* FPU branch format (MIPS32) */ 619 - BITFIELD_FIELD(unsigned int opcode : 6, 620 - BITFIELD_FIELD(unsigned int bc : 5, 621 - BITFIELD_FIELD(unsigned int cc : 3, 622 - BITFIELD_FIELD(unsigned int flag : 2, 623 - BITFIELD_FIELD(signed int simmediate : 16, 619 + __BITFIELD_FIELD(unsigned int opcode : 6, 620 + __BITFIELD_FIELD(unsigned int bc : 5, 621 + __BITFIELD_FIELD(unsigned int cc : 3, 622 + __BITFIELD_FIELD(unsigned int flag : 2, 623 + __BITFIELD_FIELD(signed int simmediate : 16, 624 624 ;))))) 625 625 }; 626 626 627 627 struct fp0_format { /* FPU multiply and add format (MIPS32) */ 628 - BITFIELD_FIELD(unsigned int opcode : 6, 629 - BITFIELD_FIELD(unsigned int fmt : 5, 630 - BITFIELD_FIELD(unsigned int ft : 5, 631 - BITFIELD_FIELD(unsigned int fs : 5, 632 - BITFIELD_FIELD(unsigned int fd : 5, 633 - BITFIELD_FIELD(unsigned int func : 6, 628 + __BITFIELD_FIELD(unsigned int opcode : 6, 629 + __BITFIELD_FIELD(unsigned int fmt : 5, 630 + __BITFIELD_FIELD(unsigned int ft : 5, 631 + __BITFIELD_FIELD(unsigned int fs : 5, 632 + __BITFIELD_FIELD(unsigned int fd : 5, 633 + __BITFIELD_FIELD(unsigned int func : 6, 634 634 ;)))))) 635 635 }; 636 636 637 637 struct mm_fp0_format { /* FPU multipy and add format (microMIPS) */ 638 - BITFIELD_FIELD(unsigned int opcode : 6, 639 - BITFIELD_FIELD(unsigned int ft : 5, 640 - BITFIELD_FIELD(unsigned int fs : 5, 641 - BITFIELD_FIELD(unsigned int fd : 5, 642 - BITFIELD_FIELD(unsigned int fmt : 3, 643 - BITFIELD_FIELD(unsigned int op : 2, 644 - BITFIELD_FIELD(unsigned int func : 6, 638 + __BITFIELD_FIELD(unsigned int opcode : 6, 639 + __BITFIELD_FIELD(unsigned int ft : 5, 640 + __BITFIELD_FIELD(unsigned int fs : 5, 641 + __BITFIELD_FIELD(unsigned int fd : 5, 642 + __BITFIELD_FIELD(unsigned int fmt : 3, 643 + __BITFIELD_FIELD(unsigned int op : 2, 644 + __BITFIELD_FIELD(unsigned int func : 6, 645 645 ;))))))) 646 646 }; 647 647 648 648 struct fp1_format { /* FPU mfc1 and cfc1 format (MIPS32) */ 649 - BITFIELD_FIELD(unsigned int opcode : 6, 650 - BITFIELD_FIELD(unsigned int op : 5, 651 - BITFIELD_FIELD(unsigned int rt : 5, 652 - BITFIELD_FIELD(unsigned int fs : 5, 653 - BITFIELD_FIELD(unsigned int fd : 5, 654 - BITFIELD_FIELD(unsigned int func : 6, 649 + __BITFIELD_FIELD(unsigned int opcode : 6, 650 + __BITFIELD_FIELD(unsigned int op : 5, 651 + __BITFIELD_FIELD(unsigned int rt : 5, 652 + __BITFIELD_FIELD(unsigned int fs : 5, 653 + __BITFIELD_FIELD(unsigned int fd : 5, 654 + __BITFIELD_FIELD(unsigned int func : 6, 655 655 ;)))))) 656 656 }; 657 657 658 658 struct mm_fp1_format { /* FPU mfc1 and cfc1 format (microMIPS) */ 659 - BITFIELD_FIELD(unsigned int opcode : 6, 660 - BITFIELD_FIELD(unsigned int rt : 5, 661 - BITFIELD_FIELD(unsigned int fs : 5, 662 - BITFIELD_FIELD(unsigned int fmt : 2, 663 - BITFIELD_FIELD(unsigned int op : 8, 664 - BITFIELD_FIELD(unsigned int func : 6, 659 + __BITFIELD_FIELD(unsigned int opcode : 6, 660 + __BITFIELD_FIELD(unsigned int rt : 5, 661 + __BITFIELD_FIELD(unsigned int fs : 5, 662 + __BITFIELD_FIELD(unsigned int fmt : 2, 663 + __BITFIELD_FIELD(unsigned int op : 8, 664 + __BITFIELD_FIELD(unsigned int func : 6, 665 665 ;)))))) 666 666 }; 667 667 668 668 struct mm_fp2_format { /* FPU movt and movf format (microMIPS) */ 669 - BITFIELD_FIELD(unsigned int opcode : 6, 670 - BITFIELD_FIELD(unsigned int fd : 5, 671 - BITFIELD_FIELD(unsigned int fs : 5, 672 - BITFIELD_FIELD(unsigned int cc : 3, 673 - BITFIELD_FIELD(unsigned int zero : 2, 674 - BITFIELD_FIELD(unsigned int fmt : 2, 675 - BITFIELD_FIELD(unsigned int op : 3, 676 - BITFIELD_FIELD(unsigned int func : 6, 669 + __BITFIELD_FIELD(unsigned int opcode : 6, 670 + __BITFIELD_FIELD(unsigned int fd : 5, 671 + __BITFIELD_FIELD(unsigned int fs : 5, 672 + __BITFIELD_FIELD(unsigned int cc : 3, 673 + __BITFIELD_FIELD(unsigned int zero : 2, 674 + __BITFIELD_FIELD(unsigned int fmt : 2, 675 + __BITFIELD_FIELD(unsigned int op : 3, 676 + __BITFIELD_FIELD(unsigned int func : 6, 677 677 ;)))))))) 678 678 }; 679 679 680 680 struct mm_fp3_format { /* FPU abs and neg format (microMIPS) */ 681 - BITFIELD_FIELD(unsigned int opcode : 6, 682 - BITFIELD_FIELD(unsigned int rt : 5, 683 - BITFIELD_FIELD(unsigned int fs : 5, 684 - BITFIELD_FIELD(unsigned int fmt : 3, 685 - BITFIELD_FIELD(unsigned int op : 7, 686 - BITFIELD_FIELD(unsigned int func : 6, 681 + __BITFIELD_FIELD(unsigned int opcode : 6, 682 + __BITFIELD_FIELD(unsigned int rt : 5, 683 + __BITFIELD_FIELD(unsigned int fs : 5, 684 + __BITFIELD_FIELD(unsigned int fmt : 3, 685 + __BITFIELD_FIELD(unsigned int op : 7, 686 + __BITFIELD_FIELD(unsigned int func : 6, 687 687 ;)))))) 688 688 }; 689 689 690 690 struct mm_fp4_format { /* FPU c.cond format (microMIPS) */ 691 - BITFIELD_FIELD(unsigned int opcode : 6, 692 - BITFIELD_FIELD(unsigned int rt : 5, 693 - BITFIELD_FIELD(unsigned int fs : 5, 694 - BITFIELD_FIELD(unsigned int cc : 3, 695 - BITFIELD_FIELD(unsigned int fmt : 3, 696 - BITFIELD_FIELD(unsigned int cond : 4, 697 - BITFIELD_FIELD(unsigned int func : 6, 691 + __BITFIELD_FIELD(unsigned int opcode : 6, 692 + __BITFIELD_FIELD(unsigned int rt : 5, 693 + __BITFIELD_FIELD(unsigned int fs : 5, 694 + __BITFIELD_FIELD(unsigned int cc : 3, 695 + __BITFIELD_FIELD(unsigned int fmt : 3, 696 + __BITFIELD_FIELD(unsigned int cond : 4, 697 + __BITFIELD_FIELD(unsigned int func : 6, 698 698 ;))))))) 699 699 }; 700 700 701 701 struct mm_fp5_format { /* FPU lwxc1 and swxc1 format (microMIPS) */ 702 - BITFIELD_FIELD(unsigned int opcode : 6, 703 - BITFIELD_FIELD(unsigned int index : 5, 704 - BITFIELD_FIELD(unsigned int base : 5, 705 - BITFIELD_FIELD(unsigned int fd : 5, 706 - BITFIELD_FIELD(unsigned int op : 5, 707 - BITFIELD_FIELD(unsigned int func : 6, 702 + __BITFIELD_FIELD(unsigned int opcode : 6, 703 + __BITFIELD_FIELD(unsigned int index : 5, 704 + __BITFIELD_FIELD(unsigned int base : 5, 705 + __BITFIELD_FIELD(unsigned int fd : 5, 706 + __BITFIELD_FIELD(unsigned int op : 5, 707 + __BITFIELD_FIELD(unsigned int func : 6, 708 708 ;)))))) 709 709 }; 710 710 711 711 struct fp6_format { /* FPU madd and msub format (MIPS IV) */ 712 - BITFIELD_FIELD(unsigned int opcode : 6, 713 - BITFIELD_FIELD(unsigned int fr : 5, 714 - BITFIELD_FIELD(unsigned int ft : 5, 715 - BITFIELD_FIELD(unsigned int fs : 5, 716 - BITFIELD_FIELD(unsigned int fd : 5, 717 - BITFIELD_FIELD(unsigned int func : 6, 712 + __BITFIELD_FIELD(unsigned int opcode : 6, 713 + __BITFIELD_FIELD(unsigned int fr : 5, 714 + __BITFIELD_FIELD(unsigned int ft : 5, 715 + __BITFIELD_FIELD(unsigned int fs : 5, 716 + __BITFIELD_FIELD(unsigned int fd : 5, 717 + __BITFIELD_FIELD(unsigned int func : 6, 718 718 ;)))))) 719 719 }; 720 720 721 721 struct mm_fp6_format { /* FPU madd and msub format (microMIPS) */ 722 - BITFIELD_FIELD(unsigned int opcode : 6, 723 - BITFIELD_FIELD(unsigned int ft : 5, 724 - BITFIELD_FIELD(unsigned int fs : 5, 725 - BITFIELD_FIELD(unsigned int fd : 5, 726 - BITFIELD_FIELD(unsigned int fr : 5, 727 - BITFIELD_FIELD(unsigned int func : 6, 722 + __BITFIELD_FIELD(unsigned int opcode : 6, 723 + __BITFIELD_FIELD(unsigned int ft : 5, 724 + __BITFIELD_FIELD(unsigned int fs : 5, 725 + __BITFIELD_FIELD(unsigned int fd : 5, 726 + __BITFIELD_FIELD(unsigned int fr : 5, 727 + __BITFIELD_FIELD(unsigned int func : 6, 728 728 ;)))))) 729 729 }; 730 730 731 731 struct mm_i_format { /* Immediate format (microMIPS) */ 732 - BITFIELD_FIELD(unsigned int opcode : 6, 733 - BITFIELD_FIELD(unsigned int rt : 5, 734 - BITFIELD_FIELD(unsigned int rs : 5, 735 - BITFIELD_FIELD(signed int simmediate : 16, 732 + __BITFIELD_FIELD(unsigned int opcode : 6, 733 + __BITFIELD_FIELD(unsigned int rt : 5, 734 + __BITFIELD_FIELD(unsigned int rs : 5, 735 + __BITFIELD_FIELD(signed int simmediate : 16, 736 736 ;)))) 737 737 }; 738 738 739 739 struct mm_m_format { /* Multi-word load/store format (microMIPS) */ 740 - BITFIELD_FIELD(unsigned int opcode : 6, 741 - BITFIELD_FIELD(unsigned int rd : 5, 742 - BITFIELD_FIELD(unsigned int base : 5, 743 - BITFIELD_FIELD(unsigned int func : 4, 744 - BITFIELD_FIELD(signed int simmediate : 12, 740 + __BITFIELD_FIELD(unsigned int opcode : 6, 741 + __BITFIELD_FIELD(unsigned int rd : 5, 742 + __BITFIELD_FIELD(unsigned int base : 5, 743 + __BITFIELD_FIELD(unsigned int func : 4, 744 + __BITFIELD_FIELD(signed int simmediate : 12, 745 745 ;))))) 746 746 }; 747 747 748 748 struct mm_x_format { /* Scaled indexed load format (microMIPS) */ 749 - BITFIELD_FIELD(unsigned int opcode : 6, 750 - BITFIELD_FIELD(unsigned int index : 5, 751 - BITFIELD_FIELD(unsigned int base : 5, 752 - BITFIELD_FIELD(unsigned int rd : 5, 753 - BITFIELD_FIELD(unsigned int func : 11, 749 + __BITFIELD_FIELD(unsigned int opcode : 6, 750 + __BITFIELD_FIELD(unsigned int index : 5, 751 + __BITFIELD_FIELD(unsigned int base : 5, 752 + __BITFIELD_FIELD(unsigned int rd : 5, 753 + __BITFIELD_FIELD(unsigned int func : 11, 754 754 ;))))) 755 755 }; 756 756 ··· 758 758 * microMIPS instruction formats (16-bit length) 759 759 */ 760 760 struct mm_b0_format { /* Unconditional branch format (microMIPS) */ 761 - BITFIELD_FIELD(unsigned int opcode : 6, 762 - BITFIELD_FIELD(signed int simmediate : 10, 763 - BITFIELD_FIELD(unsigned int : 16, /* Ignored */ 761 + __BITFIELD_FIELD(unsigned int opcode : 6, 762 + __BITFIELD_FIELD(signed int simmediate : 10, 763 + __BITFIELD_FIELD(unsigned int : 16, /* Ignored */ 764 764 ;))) 765 765 }; 766 766 767 767 struct mm_b1_format { /* Conditional branch format (microMIPS) */ 768 - BITFIELD_FIELD(unsigned int opcode : 6, 769 - BITFIELD_FIELD(unsigned int rs : 3, 770 - BITFIELD_FIELD(signed int simmediate : 7, 771 - BITFIELD_FIELD(unsigned int : 16, /* Ignored */ 768 + __BITFIELD_FIELD(unsigned int opcode : 6, 769 + __BITFIELD_FIELD(unsigned int rs : 3, 770 + __BITFIELD_FIELD(signed int simmediate : 7, 771 + __BITFIELD_FIELD(unsigned int : 16, /* Ignored */ 772 772 ;)))) 773 773 }; 774 774 775 775 struct mm16_m_format { /* Multi-word load/store format */ 776 - BITFIELD_FIELD(unsigned int opcode : 6, 777 - BITFIELD_FIELD(unsigned int func : 4, 778 - BITFIELD_FIELD(unsigned int rlist : 2, 779 - BITFIELD_FIELD(unsigned int imm : 4, 780 - BITFIELD_FIELD(unsigned int : 16, /* Ignored */ 776 + __BITFIELD_FIELD(unsigned int opcode : 6, 777 + __BITFIELD_FIELD(unsigned int func : 4, 778 + __BITFIELD_FIELD(unsigned int rlist : 2, 779 + __BITFIELD_FIELD(unsigned int imm : 4, 780 + __BITFIELD_FIELD(unsigned int : 16, /* Ignored */ 781 781 ;))))) 782 782 }; 783 783 784 784 struct mm16_rb_format { /* Signed immediate format */ 785 - BITFIELD_FIELD(unsigned int opcode : 6, 786 - BITFIELD_FIELD(unsigned int rt : 3, 787 - BITFIELD_FIELD(unsigned int base : 3, 788 - BITFIELD_FIELD(signed int simmediate : 4, 789 - BITFIELD_FIELD(unsigned int : 16, /* Ignored */ 785 + __BITFIELD_FIELD(unsigned int opcode : 6, 786 + __BITFIELD_FIELD(unsigned int rt : 3, 787 + __BITFIELD_FIELD(unsigned int base : 3, 788 + __BITFIELD_FIELD(signed int simmediate : 4, 789 + __BITFIELD_FIELD(unsigned int : 16, /* Ignored */ 790 790 ;))))) 791 791 }; 792 792 793 793 struct mm16_r3_format { /* Load from global pointer format */ 794 - BITFIELD_FIELD(unsigned int opcode : 6, 795 - BITFIELD_FIELD(unsigned int rt : 3, 796 - BITFIELD_FIELD(signed int simmediate : 7, 797 - BITFIELD_FIELD(unsigned int : 16, /* Ignored */ 794 + __BITFIELD_FIELD(unsigned int opcode : 6, 795 + __BITFIELD_FIELD(unsigned int rt : 3, 796 + __BITFIELD_FIELD(signed int simmediate : 7, 797 + __BITFIELD_FIELD(unsigned int : 16, /* Ignored */ 798 798 ;)))) 799 799 }; 800 800 801 801 struct mm16_r5_format { /* Load/store from stack pointer format */ 802 - BITFIELD_FIELD(unsigned int opcode : 6, 803 - BITFIELD_FIELD(unsigned int rt : 5, 804 - BITFIELD_FIELD(signed int simmediate : 5, 805 - BITFIELD_FIELD(unsigned int : 16, /* Ignored */ 802 + __BITFIELD_FIELD(unsigned int opcode : 6, 803 + __BITFIELD_FIELD(unsigned int rt : 5, 804 + __BITFIELD_FIELD(signed int simmediate : 5, 805 + __BITFIELD_FIELD(unsigned int : 16, /* Ignored */ 806 806 ;)))) 807 807 }; 808 808 ··· 810 810 * MIPS16e instruction formats (16-bit length) 811 811 */ 812 812 struct m16e_rr { 813 - BITFIELD_FIELD(unsigned int opcode : 5, 814 - BITFIELD_FIELD(unsigned int rx : 3, 815 - BITFIELD_FIELD(unsigned int nd : 1, 816 - BITFIELD_FIELD(unsigned int l : 1, 817 - BITFIELD_FIELD(unsigned int ra : 1, 818 - BITFIELD_FIELD(unsigned int func : 5, 813 + __BITFIELD_FIELD(unsigned int opcode : 5, 814 + __BITFIELD_FIELD(unsigned int rx : 3, 815 + __BITFIELD_FIELD(unsigned int nd : 1, 816 + __BITFIELD_FIELD(unsigned int l : 1, 817 + __BITFIELD_FIELD(unsigned int ra : 1, 818 + __BITFIELD_FIELD(unsigned int func : 5, 819 819 ;)))))) 820 820 }; 821 821 822 822 struct m16e_jal { 823 - BITFIELD_FIELD(unsigned int opcode : 5, 824 - BITFIELD_FIELD(unsigned int x : 1, 825 - BITFIELD_FIELD(unsigned int imm20_16 : 5, 826 - BITFIELD_FIELD(signed int imm25_21 : 5, 823 + __BITFIELD_FIELD(unsigned int opcode : 5, 824 + __BITFIELD_FIELD(unsigned int x : 1, 825 + __BITFIELD_FIELD(unsigned int imm20_16 : 5, 826 + __BITFIELD_FIELD(signed int imm25_21 : 5, 827 827 ;)))) 828 828 }; 829 829 830 830 struct m16e_i64 { 831 - BITFIELD_FIELD(unsigned int opcode : 5, 832 - BITFIELD_FIELD(unsigned int func : 3, 833 - BITFIELD_FIELD(unsigned int imm : 8, 831 + __BITFIELD_FIELD(unsigned int opcode : 5, 832 + __BITFIELD_FIELD(unsigned int func : 3, 833 + __BITFIELD_FIELD(unsigned int imm : 8, 834 834 ;))) 835 835 }; 836 836 837 837 struct m16e_ri64 { 838 - BITFIELD_FIELD(unsigned int opcode : 5, 839 - BITFIELD_FIELD(unsigned int func : 3, 840 - BITFIELD_FIELD(unsigned int ry : 3, 841 - BITFIELD_FIELD(unsigned int imm : 5, 838 + __BITFIELD_FIELD(unsigned int opcode : 5, 839 + __BITFIELD_FIELD(unsigned int func : 3, 840 + __BITFIELD_FIELD(unsigned int ry : 3, 841 + __BITFIELD_FIELD(unsigned int imm : 5, 842 842 ;)))) 843 843 }; 844 844 845 845 struct m16e_ri { 846 - BITFIELD_FIELD(unsigned int opcode : 5, 847 - BITFIELD_FIELD(unsigned int rx : 3, 848 - BITFIELD_FIELD(unsigned int imm : 8, 846 + __BITFIELD_FIELD(unsigned int opcode : 5, 847 + __BITFIELD_FIELD(unsigned int rx : 3, 848 + __BITFIELD_FIELD(unsigned int imm : 8, 849 849 ;))) 850 850 }; 851 851 852 852 struct m16e_rri { 853 - BITFIELD_FIELD(unsigned int opcode : 5, 854 - BITFIELD_FIELD(unsigned int rx : 3, 855 - BITFIELD_FIELD(unsigned int ry : 3, 856 - BITFIELD_FIELD(unsigned int imm : 5, 853 + __BITFIELD_FIELD(unsigned int opcode : 5, 854 + __BITFIELD_FIELD(unsigned int rx : 3, 855 + __BITFIELD_FIELD(unsigned int ry : 3, 856 + __BITFIELD_FIELD(unsigned int imm : 5, 857 857 ;)))) 858 858 }; 859 859 860 860 struct m16e_i8 { 861 - BITFIELD_FIELD(unsigned int opcode : 5, 862 - BITFIELD_FIELD(unsigned int func : 3, 863 - BITFIELD_FIELD(unsigned int imm : 8, 861 + __BITFIELD_FIELD(unsigned int opcode : 5, 862 + __BITFIELD_FIELD(unsigned int func : 3, 863 + __BITFIELD_FIELD(unsigned int imm : 8, 864 864 ;))) 865 865 }; 866 866
+6 -3
arch/mips/include/uapi/asm/unistd.h
··· 371 371 #define __NR_finit_module (__NR_Linux + 348) 372 372 #define __NR_sched_setattr (__NR_Linux + 349) 373 373 #define __NR_sched_getattr (__NR_Linux + 350) 374 + #define __NR_renameat2 (__NR_Linux + 351) 374 375 375 376 /* 376 377 * Offset of the last Linux o32 flavoured syscall 377 378 */ 378 - #define __NR_Linux_syscalls 350 379 + #define __NR_Linux_syscalls 351 379 380 380 381 #endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */ 381 382 ··· 700 699 #define __NR_getdents64 (__NR_Linux + 308) 701 700 #define __NR_sched_setattr (__NR_Linux + 309) 702 701 #define __NR_sched_getattr (__NR_Linux + 310) 702 + #define __NR_renameat2 (__NR_Linux + 311) 703 703 704 704 /* 705 705 * Offset of the last Linux 64-bit flavoured syscall 706 706 */ 707 - #define __NR_Linux_syscalls 310 707 + #define __NR_Linux_syscalls 311 708 708 709 709 #endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */ 710 710 ··· 1033 1031 #define __NR_finit_module (__NR_Linux + 312) 1034 1032 #define __NR_sched_setattr (__NR_Linux + 313) 1035 1033 #define __NR_sched_getattr (__NR_Linux + 314) 1034 + #define __NR_renameat2 (__NR_Linux + 315) 1036 1035 1037 1036 /* 1038 1037 * Offset of the last N32 flavoured syscall 1039 1038 */ 1040 - #define __NR_Linux_syscalls 314 1039 + #define __NR_Linux_syscalls 315 1041 1040 1042 1041 #endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */ 1043 1042
+1 -8
arch/mips/kernel/proc.c
··· 124 124 seq_printf(m, "kscratch registers\t: %d\n", 125 125 hweight8(cpu_data[n].kscratch_mask)); 126 126 seq_printf(m, "core\t\t\t: %d\n", cpu_data[n].core); 127 - #if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_MIPS_MT_SMTC) 128 - if (cpu_has_mipsmt) { 129 - seq_printf(m, "VPE\t\t\t: %d\n", cpu_data[n].vpe_id); 130 - #if defined(CONFIG_MIPS_MT_SMTC) 131 - seq_printf(m, "TC\t\t\t: %d\n", cpu_data[n].tc_id); 132 - #endif 133 - } 134 - #endif 127 + 135 128 sprintf(fmt, "VCE%%c exceptions\t\t: %s\n", 136 129 cpu_has_vce ? "%u" : "not available"); 137 130 seq_printf(m, fmt, 'D', vced_count);
+1
arch/mips/kernel/scall32-o32.S
··· 577 577 PTR sys_finit_module 578 578 PTR sys_sched_setattr 579 579 PTR sys_sched_getattr /* 4350 */ 580 + PTR sys_renameat2
+1
arch/mips/kernel/scall64-64.S
··· 430 430 PTR sys_getdents64 431 431 PTR sys_sched_setattr 432 432 PTR sys_sched_getattr /* 5310 */ 433 + PTR sys_renameat2 433 434 .size sys_call_table,.-sys_call_table
+1
arch/mips/kernel/scall64-n32.S
··· 423 423 PTR sys_finit_module 424 424 PTR sys_sched_setattr 425 425 PTR sys_sched_getattr 426 + PTR sys_renameat2 /* 6315 */ 426 427 .size sysn32_call_table,.-sysn32_call_table
+1
arch/mips/kernel/scall64-o32.S
··· 556 556 PTR sys_finit_module 557 557 PTR sys_sched_setattr 558 558 PTR sys_sched_getattr /* 4350 */ 559 + PTR sys_renameat2 559 560 .size sys32_call_table,.-sys32_call_table
+1
arch/mips/lantiq/dts/easy50712.dts
··· 8 8 }; 9 9 10 10 memory@0 { 11 + device_type = "memory"; 11 12 reg = <0x0 0x2000000>; 12 13 }; 13 14
+9
arch/mips/lib/csum_partial.S
··· 56 56 #define UNIT(unit) ((unit)*NBYTES) 57 57 58 58 #define ADDC(sum,reg) \ 59 + .set push; \ 60 + .set noat; \ 59 61 ADD sum, reg; \ 60 62 sltu v1, sum, reg; \ 61 63 ADD sum, v1; \ 64 + .set pop 62 65 63 66 #define ADDC32(sum,reg) \ 67 + .set push; \ 68 + .set noat; \ 64 69 addu sum, reg; \ 65 70 sltu v1, sum, reg; \ 66 71 addu sum, v1; \ 72 + .set pop 67 73 68 74 #define CSUM_BIGCHUNK1(src, offset, sum, _t0, _t1, _t2, _t3) \ 69 75 LOAD _t0, (offset + UNIT(0))(src); \ ··· 716 710 ADDC(sum, t2) 717 711 .Ldone\@: 718 712 /* fold checksum */ 713 + .set push 714 + .set noat 719 715 #ifdef USE_DOUBLE 720 716 dsll32 v1, sum, 0 721 717 daddu sum, v1 ··· 740 732 or sum, sum, t0 741 733 1: 742 734 #endif 735 + .set pop 743 736 .set reorder 744 737 ADDC32(sum, psum) 745 738 jr ra
+10 -4
arch/mips/lib/delay.c
··· 6 6 * Copyright (C) 1994 by Waldorf Electronics 7 7 * Copyright (C) 1995 - 2000, 01, 03 by Ralf Baechle 8 8 * Copyright (C) 1999, 2000 Silicon Graphics, Inc. 9 - * Copyright (C) 2007 Maciej W. Rozycki 9 + * Copyright (C) 2007, 2014 Maciej W. Rozycki 10 10 */ 11 11 #include <linux/module.h> 12 12 #include <linux/param.h> ··· 15 15 #include <asm/compiler.h> 16 16 #include <asm/war.h> 17 17 18 + #ifndef CONFIG_CPU_DADDI_WORKAROUNDS 19 + #define GCC_DADDI_IMM_ASM() "I" 20 + #else 21 + #define GCC_DADDI_IMM_ASM() "r" 22 + #endif 23 + 18 24 void __delay(unsigned long loops) 19 25 { 20 26 __asm__ __volatile__ ( ··· 28 22 " .align 3 \n" 29 23 "1: bnez %0, 1b \n" 30 24 #if BITS_PER_LONG == 32 31 - " subu %0, 1 \n" 25 + " subu %0, %1 \n" 32 26 #else 33 - " dsubu %0, 1 \n" 27 + " dsubu %0, %1 \n" 34 28 #endif 35 29 " .set reorder \n" 36 30 : "=r" (loops) 37 - : "0" (loops)); 31 + : GCC_DADDI_IMM_ASM() (1), "0" (loops)); 38 32 } 39 33 EXPORT_SYMBOL(__delay); 40 34
+6 -7
arch/mips/lib/strncpy_user.S
··· 35 35 bnez v0, .Lfault\@ 36 36 37 37 FEXPORT(__strncpy_from_\func\()_nocheck_asm) 38 - .set noreorder 39 38 move t0, zero 40 39 move v1, a1 41 40 .ifeqs "\func","kernel" ··· 44 45 .endif 45 46 PTR_ADDIU v1, 1 46 47 R10KCBARRIER(0(ra)) 48 + sb v0, (a0) 47 49 beqz v0, 2f 48 - sb v0, (a0) 49 50 PTR_ADDIU t0, 1 51 + PTR_ADDIU a0, 1 50 52 bne t0, a2, 1b 51 - PTR_ADDIU a0, 1 52 53 2: PTR_ADDU v0, a1, t0 53 54 xor v0, a1 54 55 bltz v0, .Lfault\@ 55 - nop 56 + move v0, t0 56 57 jr ra # return n 57 - move v0, t0 58 58 END(__strncpy_from_\func\()_asm) 59 59 60 - .Lfault\@: jr ra 61 - li v0, -EFAULT 60 + .Lfault\@: 61 + li v0, -EFAULT 62 + jr ra 62 63 63 64 .section __ex_table,"a" 64 65 PTR 1b, .Lfault\@
-1
arch/mips/loongson/Kconfig
··· 64 64 bool "Lemote Loongson 3A family machines" 65 65 select ARCH_SPARSEMEM_ENABLE 66 66 select GENERIC_ISA_DMA_SUPPORT_BROKEN 67 - select GENERIC_HARDIRQS_NO__DO_IRQ 68 67 select BOOT_ELF32 69 68 select BOARD_SCACHE 70 69 select CSRC_R4K
+3 -2
arch/mips/loongson/lemote-2f/clock.c
··· 91 91 92 92 int clk_set_rate(struct clk *clk, unsigned long rate) 93 93 { 94 + unsigned int rate_khz = rate / 1000; 94 95 int ret = 0; 95 96 int regval; 96 97 int i; ··· 112 111 if (loongson2_clockmod_table[i].frequency == 113 112 CPUFREQ_ENTRY_INVALID) 114 113 continue; 115 - if (rate == loongson2_clockmod_table[i].frequency) 114 + if (rate_khz == loongson2_clockmod_table[i].frequency) 116 115 break; 117 116 } 118 - if (rate != loongson2_clockmod_table[i].frequency) 117 + if (rate_khz != loongson2_clockmod_table[i].frequency) 119 118 return -ENOTSUPP; 120 119 121 120 clk->rate = rate;
+3 -1
arch/mips/mm/tlb-funcs.S
··· 16 16 17 17 #define FASTPATH_SIZE 128 18 18 19 + EXPORT(tlbmiss_handler_setup_pgd_start) 19 20 LEAF(tlbmiss_handler_setup_pgd) 20 - .space 16 * 4 21 + 1: j 1b /* Dummy, will be replaced. */ 22 + .space 64 21 23 END(tlbmiss_handler_setup_pgd) 22 24 EXPORT(tlbmiss_handler_setup_pgd_end) 23 25
+4 -3
arch/mips/mm/tlbex.c
··· 1422 1422 extern u32 handle_tlbl[], handle_tlbl_end[]; 1423 1423 extern u32 handle_tlbs[], handle_tlbs_end[]; 1424 1424 extern u32 handle_tlbm[], handle_tlbm_end[]; 1425 - extern u32 tlbmiss_handler_setup_pgd[], tlbmiss_handler_setup_pgd_end[]; 1425 + extern u32 tlbmiss_handler_setup_pgd_start[], tlbmiss_handler_setup_pgd[]; 1426 + extern u32 tlbmiss_handler_setup_pgd_end[]; 1426 1427 1427 1428 static void build_setup_pgd(void) 1428 1429 { 1429 1430 const int a0 = 4; 1430 1431 const int __maybe_unused a1 = 5; 1431 1432 const int __maybe_unused a2 = 6; 1432 - u32 *p = tlbmiss_handler_setup_pgd; 1433 + u32 *p = tlbmiss_handler_setup_pgd_start; 1433 1434 const int tlbmiss_handler_setup_pgd_size = 1434 - tlbmiss_handler_setup_pgd_end - tlbmiss_handler_setup_pgd; 1435 + tlbmiss_handler_setup_pgd_end - tlbmiss_handler_setup_pgd_start; 1435 1436 #ifndef CONFIG_MIPS_PGD_C0_CONTEXT 1436 1437 long pgdc = (long)pgd_current; 1437 1438 #endif
+1
arch/mips/ralink/dts/mt7620a_eval.dts
··· 7 7 model = "Ralink MT7620A evaluation board"; 8 8 9 9 memory@0 { 10 + device_type = "memory"; 10 11 reg = <0x0 0x2000000>; 11 12 }; 12 13
+1
arch/mips/ralink/dts/rt2880_eval.dts
··· 7 7 model = "Ralink RT2880 evaluation board"; 8 8 9 9 memory@0 { 10 + device_type = "memory"; 10 11 reg = <0x8000000 0x2000000>; 11 12 }; 12 13
+1
arch/mips/ralink/dts/rt3052_eval.dts
··· 7 7 model = "Ralink RT3052 evaluation board"; 8 8 9 9 memory@0 { 10 + device_type = "memory"; 10 11 reg = <0x0 0x2000000>; 11 12 }; 12 13
+1
arch/mips/ralink/dts/rt3883_eval.dts
··· 7 7 model = "Ralink RT3883 evaluation board"; 8 8 9 9 memory@0 { 10 + device_type = "memory"; 10 11 reg = <0x0 0x2000000>; 11 12 }; 12 13
+1
arch/parisc/Kconfig
··· 22 22 select GENERIC_SMP_IDLE_THREAD 23 23 select GENERIC_STRNCPY_FROM_USER 24 24 select SYSCTL_ARCH_UNALIGN_ALLOW 25 + select SYSCTL_EXCEPTION_TRACE 25 26 select HAVE_MOD_ARCH_SPECIFIC 26 27 select VIRT_TO_BUS 27 28 select MODULES_USE_ELF_RELA
+5
arch/parisc/include/asm/processor.h
··· 55 55 #define STACK_TOP TASK_SIZE 56 56 #define STACK_TOP_MAX DEFAULT_TASK_SIZE 57 57 58 + /* Allow bigger stacks for 64-bit processes */ 59 + #define STACK_SIZE_MAX (USER_WIDE_MODE \ 60 + ? (1 << 30) /* 1 GB */ \ 61 + : (CONFIG_MAX_STACK_SIZE_MB*1024*1024)) 62 + 58 63 #endif 59 64 60 65 #ifndef __ASSEMBLY__
+2 -1
arch/parisc/include/uapi/asm/unistd.h
··· 829 829 #define __NR_sched_setattr (__NR_Linux + 334) 830 830 #define __NR_sched_getattr (__NR_Linux + 335) 831 831 #define __NR_utimes (__NR_Linux + 336) 832 + #define __NR_renameat2 (__NR_Linux + 337) 832 833 833 - #define __NR_Linux_syscalls (__NR_utimes + 1) 834 + #define __NR_Linux_syscalls (__NR_renameat2 + 1) 834 835 835 836 836 837 #define __IGNORE_select /* newselect */
+3 -3
arch/parisc/kernel/sys_parisc.c
··· 72 72 { 73 73 unsigned long stack_base; 74 74 75 - /* Limit stack size to 1GB - see setup_arg_pages() in fs/exec.c */ 75 + /* Limit stack size - see setup_arg_pages() in fs/exec.c */ 76 76 stack_base = rlimit_max(RLIMIT_STACK); 77 - if (stack_base > (1 << 30)) 78 - stack_base = 1 << 30; 77 + if (stack_base > STACK_SIZE_MAX) 78 + stack_base = STACK_SIZE_MAX; 79 79 80 80 return PAGE_ALIGN(STACK_TOP - stack_base); 81 81 }
+9 -3
arch/parisc/kernel/syscall.S
··· 589 589 # endif 590 590 /* ENABLE_LWS_DEBUG */ 591 591 592 + rsm PSW_SM_I, %r0 /* Disable interrupts */ 593 + /* COW breaks can cause contention on UP systems */ 592 594 LDCW 0(%sr2,%r20), %r28 /* Try to acquire the lock */ 593 595 cmpb,<>,n %r0, %r28, cas_action /* Did we get it? */ 594 596 cas_wouldblock: 595 597 ldo 2(%r0), %r28 /* 2nd case */ 598 + ssm PSW_SM_I, %r0 596 599 b lws_exit /* Contended... */ 597 600 ldo -EAGAIN(%r0), %r21 /* Spin in userspace */ 598 601 ··· 622 619 stw %r1, 4(%sr2,%r20) 623 620 #endif 624 621 /* The load and store could fail */ 625 - 1: ldw 0(%sr3,%r26), %r28 622 + 1: ldw,ma 0(%sr3,%r26), %r28 626 623 sub,<> %r28, %r25, %r0 627 - 2: stw %r24, 0(%sr3,%r26) 624 + 2: stw,ma %r24, 0(%sr3,%r26) 628 625 /* Free lock */ 629 - stw %r20, 0(%sr2,%r20) 626 + stw,ma %r20, 0(%sr2,%r20) 630 627 #if ENABLE_LWS_DEBUG 631 628 /* Clear thread register indicator */ 632 629 stw %r0, 4(%sr2,%r20) 633 630 #endif 631 + /* Enable interrupts */ 632 + ssm PSW_SM_I, %r0 634 633 /* Return to userspace, set no error */ 635 634 b lws_exit 636 635 copy %r0, %r21 ··· 644 639 #if ENABLE_LWS_DEBUG 645 640 stw %r0, 4(%sr2,%r20) 646 641 #endif 642 + ssm PSW_SM_I, %r0 647 643 b lws_exit 648 644 ldo -EFAULT(%r0),%r21 /* set errno */ 649 645 nop
+1
arch/parisc/kernel/syscall_table.S
··· 432 432 ENTRY_SAME(sched_setattr) 433 433 ENTRY_SAME(sched_getattr) /* 335 */ 434 434 ENTRY_COMP(utimes) 435 + ENTRY_COMP(renameat2) 435 436 436 437 /* Nothing yet */ 437 438
+25 -29
arch/parisc/kernel/traps.c
··· 25 25 #include <linux/interrupt.h> 26 26 #include <linux/console.h> 27 27 #include <linux/bug.h> 28 + #include <linux/ratelimit.h> 28 29 29 30 #include <asm/assembly.h> 30 31 #include <asm/uaccess.h> ··· 42 41 #include <asm/cacheflush.h> 43 42 44 43 #include "../math-emu/math-emu.h" /* for handle_fpe() */ 45 - 46 - #define PRINT_USER_FAULTS /* (turn this on if you want user faults to be */ 47 - /* dumped to the console via printk) */ 48 44 49 45 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) 50 46 DEFINE_SPINLOCK(pa_dbit_lock); ··· 158 160 } 159 161 } 160 162 163 + static DEFINE_RATELIMIT_STATE(_hppa_rs, 164 + DEFAULT_RATELIMIT_INTERVAL, DEFAULT_RATELIMIT_BURST); 165 + 166 + #define parisc_printk_ratelimited(critical, regs, fmt, ...) { \ 167 + if ((critical || show_unhandled_signals) && __ratelimit(&_hppa_rs)) { \ 168 + printk(fmt, ##__VA_ARGS__); \ 169 + show_regs(regs); \ 170 + } \ 171 + } 172 + 173 + 161 174 static void do_show_stack(struct unwind_frame_info *info) 162 175 { 163 176 int i = 1; ··· 238 229 if (err == 0) 239 230 return; /* STFU */ 240 231 241 - printk(KERN_CRIT "%s (pid %d): %s (code %ld) at " RFMT "\n", 232 + parisc_printk_ratelimited(1, regs, 233 + KERN_CRIT "%s (pid %d): %s (code %ld) at " RFMT "\n", 242 234 current->comm, task_pid_nr(current), str, err, regs->iaoq[0]); 243 - #ifdef PRINT_USER_FAULTS 244 - /* XXX for debugging only */ 245 - show_regs(regs); 246 - #endif 235 + 247 236 return; 248 237 } 249 238 ··· 328 321 (tt == BUG_TRAP_TYPE_NONE) ? 9 : 0); 329 322 } 330 323 331 - #ifdef PRINT_USER_FAULTS 332 - if (unlikely(iir != GDB_BREAK_INSN)) { 333 - printk(KERN_DEBUG "break %d,%d: pid=%d command='%s'\n", 324 + if (unlikely(iir != GDB_BREAK_INSN)) 325 + parisc_printk_ratelimited(0, regs, 326 + KERN_DEBUG "break %d,%d: pid=%d command='%s'\n", 334 327 iir & 31, (iir>>13) & ((1<<13)-1), 335 328 task_pid_nr(current), current->comm); 336 - show_regs(regs); 337 - } 338 - #endif 339 329 340 330 /* send standard GDB signal */ 341 331 handle_gdb_break(regs, TRAP_BRKPT); ··· 762 758 763 759 default: 764 760 if (user_mode(regs)) { 765 - #ifdef PRINT_USER_FAULTS 766 - printk(KERN_DEBUG "\nhandle_interruption() pid=%d command='%s'\n", 767 - task_pid_nr(current), current->comm); 768 - show_regs(regs); 769 - #endif 761 + parisc_printk_ratelimited(0, regs, KERN_DEBUG 762 + "handle_interruption() pid=%d command='%s'\n", 763 + task_pid_nr(current), current->comm); 770 764 /* SIGBUS, for lack of a better one. */ 771 765 si.si_signo = SIGBUS; 772 766 si.si_code = BUS_OBJERR; ··· 781 779 782 780 if (user_mode(regs)) { 783 781 if ((fault_space >> SPACEID_SHIFT) != (regs->sr[7] >> SPACEID_SHIFT)) { 784 - #ifdef PRINT_USER_FAULTS 785 - if (fault_space == 0) 786 - printk(KERN_DEBUG "User Fault on Kernel Space "); 787 - else 788 - printk(KERN_DEBUG "User Fault (long pointer) (fault %d) ", 789 - code); 790 - printk(KERN_CONT "pid=%d command='%s'\n", 791 - task_pid_nr(current), current->comm); 792 - show_regs(regs); 793 - #endif 782 + parisc_printk_ratelimited(0, regs, KERN_DEBUG 783 + "User fault %d on space 0x%08lx, pid=%d command='%s'\n", 784 + code, fault_space, 785 + task_pid_nr(current), current->comm); 794 786 si.si_signo = SIGSEGV; 795 787 si.si_errno = 0; 796 788 si.si_code = SEGV_MAPERR;
+30 -14
arch/parisc/mm/fault.c
··· 19 19 #include <asm/uaccess.h> 20 20 #include <asm/traps.h> 21 21 22 - #define PRINT_USER_FAULTS /* (turn this on if you want user faults to be */ 23 - /* dumped to the console via printk) */ 24 - 25 - 26 22 /* Various important other fields */ 27 23 #define bit22set(x) (x & 0x00000200) 28 24 #define bits23_25set(x) (x & 0x000001c0) ··· 29 33 30 34 31 35 DEFINE_PER_CPU(struct exception_data, exception_data); 36 + 37 + int show_unhandled_signals = 1; 32 38 33 39 /* 34 40 * parisc_acctyp(unsigned int inst) -- ··· 171 173 return 0; 172 174 } 173 175 176 + /* 177 + * Print out info about fatal segfaults, if the show_unhandled_signals 178 + * sysctl is set: 179 + */ 180 + static inline void 181 + show_signal_msg(struct pt_regs *regs, unsigned long code, 182 + unsigned long address, struct task_struct *tsk, 183 + struct vm_area_struct *vma) 184 + { 185 + if (!unhandled_signal(tsk, SIGSEGV)) 186 + return; 187 + 188 + if (!printk_ratelimit()) 189 + return; 190 + 191 + pr_warn("\n"); 192 + pr_warn("do_page_fault() command='%s' type=%lu address=0x%08lx", 193 + tsk->comm, code, address); 194 + print_vma_addr(KERN_CONT " in ", regs->iaoq[0]); 195 + if (vma) 196 + pr_warn(" vm_start = 0x%08lx, vm_end = 0x%08lx\n", 197 + vma->vm_start, vma->vm_end); 198 + 199 + show_regs(regs); 200 + } 201 + 174 202 void do_page_fault(struct pt_regs *regs, unsigned long code, 175 203 unsigned long address) 176 204 { ··· 294 270 if (user_mode(regs)) { 295 271 struct siginfo si; 296 272 297 - #ifdef PRINT_USER_FAULTS 298 - printk(KERN_DEBUG "\n"); 299 - printk(KERN_DEBUG "do_page_fault() pid=%d command='%s' type=%lu address=0x%08lx\n", 300 - task_pid_nr(tsk), tsk->comm, code, address); 301 - if (vma) { 302 - printk(KERN_DEBUG "vm_start = 0x%08lx, vm_end = 0x%08lx\n", 303 - vma->vm_start, vma->vm_end); 304 - } 305 - show_regs(regs); 306 - #endif 273 + show_signal_msg(regs, code, address, tsk, vma); 274 + 307 275 switch (code) { 308 276 case 15: /* Data TLB miss fault/Data page fault */ 309 277 /* send SIGSEGV when outside of vma */
-3
arch/powerpc/kernel/time.c
··· 813 813 static int decrementer_set_next_event(unsigned long evt, 814 814 struct clock_event_device *dev) 815 815 { 816 - /* Don't adjust the decrementer if some irq work is pending */ 817 - if (test_irq_work_pending()) 818 - return 0; 819 816 __get_cpu_var(decrementers_next_tb) = get_tb_or_rtc() + evt; 820 817 set_dec(evt); 821 818
+2 -1
arch/powerpc/platforms/powernv/eeh-ioda.c
··· 549 549 ret = ioda_eeh_phb_reset(hose, option); 550 550 } else { 551 551 bus = eeh_pe_bus_get(pe); 552 - if (pci_is_root_bus(bus)) 552 + if (pci_is_root_bus(bus) || 553 + pci_is_root_bus(bus->parent)) 553 554 ret = ioda_eeh_root_reset(hose, option); 554 555 else 555 556 ret = ioda_eeh_bridge_reset(hose, bus->self, option);
+3
arch/s390/crypto/aes_s390.c
··· 820 820 else 821 821 memcpy(walk->iv, ctrptr, AES_BLOCK_SIZE); 822 822 spin_unlock(&ctrblk_lock); 823 + } else { 824 + if (!nbytes) 825 + memcpy(walk->iv, ctrptr, AES_BLOCK_SIZE); 823 826 } 824 827 /* 825 828 * final block may be < AES_BLOCK_SIZE, copy only nbytes
+3
arch/s390/crypto/des_s390.c
··· 429 429 else 430 430 memcpy(walk->iv, ctrptr, DES_BLOCK_SIZE); 431 431 spin_unlock(&ctrblk_lock); 432 + } else { 433 + if (!nbytes) 434 + memcpy(walk->iv, ctrptr, DES_BLOCK_SIZE); 432 435 } 433 436 /* final block may be < DES_BLOCK_SIZE, copy only nbytes */ 434 437 if (nbytes) {
+1
arch/x86/include/asm/hugetlb.h
··· 52 52 static inline void huge_ptep_clear_flush(struct vm_area_struct *vma, 53 53 unsigned long addr, pte_t *ptep) 54 54 { 55 + ptep_clear_flush(vma, addr, ptep); 55 56 } 56 57 57 58 static inline int huge_pte_none(pte_t pte)
+1
arch/x86/kernel/cpu/rdrand.c
··· 27 27 static int __init x86_rdrand_setup(char *s) 28 28 { 29 29 setup_clear_cpu_cap(X86_FEATURE_RDRAND); 30 + setup_clear_cpu_cap(X86_FEATURE_RDSEED); 30 31 return 1; 31 32 } 32 33 __setup("nordrand", x86_rdrand_setup);
+3 -1
arch/x86/kernel/ldt.c
··· 20 20 #include <asm/mmu_context.h> 21 21 #include <asm/syscalls.h> 22 22 23 + int sysctl_ldt16 = 0; 24 + 23 25 #ifdef CONFIG_SMP 24 26 static void flush_ldt(void *current_mm) 25 27 { ··· 236 234 * IRET leaking the high bits of the kernel stack address. 237 235 */ 238 236 #ifdef CONFIG_X86_64 239 - if (!ldt_info.seg_32bit) { 237 + if (!ldt_info.seg_32bit && !sysctl_ldt16) { 240 238 error = -EINVAL; 241 239 goto out_unlock; 242 240 }
+8
arch/x86/vdso/vdso32-setup.c
··· 39 39 #ifdef CONFIG_X86_64 40 40 #define vdso_enabled sysctl_vsyscall32 41 41 #define arch_setup_additional_pages syscall32_setup_pages 42 + extern int sysctl_ldt16; 42 43 #endif 43 44 44 45 /* ··· 246 245 { 247 246 .procname = "vsyscall32", 248 247 .data = &sysctl_vsyscall32, 248 + .maxlen = sizeof(int), 249 + .mode = 0644, 250 + .proc_handler = proc_dointvec 251 + }, 252 + { 253 + .procname = "ldt16", 254 + .data = &sysctl_ldt16, 249 255 .maxlen = sizeof(int), 250 256 .mode = 0644, 251 257 .proc_handler = proc_dointvec
+14 -1
block/blk-cgroup.c
··· 451 451 struct blkcg_gq *blkg; 452 452 int i; 453 453 454 - mutex_lock(&blkcg_pol_mutex); 454 + /* 455 + * XXX: We invoke cgroup_add/rm_cftypes() under blkcg_pol_mutex 456 + * which ends up putting cgroup's internal cgroup_tree_mutex under 457 + * it; however, cgroup_tree_mutex is nested above cgroup file 458 + * active protection and grabbing blkcg_pol_mutex from a cgroup 459 + * file operation creates a possible circular dependency. cgroup 460 + * internal locking is planned to go through further simplification 461 + * and this issue should go away soon. For now, let's trylock 462 + * blkcg_pol_mutex and restart the write on failure. 463 + * 464 + * http://lkml.kernel.org/g/5363C04B.4010400@oracle.com 465 + */ 466 + if (!mutex_trylock(&blkcg_pol_mutex)) 467 + return restart_syscall(); 455 468 spin_lock_irq(&blkcg->lock); 456 469 457 470 /*
+1 -1
drivers/Makefile
··· 119 119 obj-y += firmware/ 120 120 obj-$(CONFIG_CRYPTO) += crypto/ 121 121 obj-$(CONFIG_SUPERH) += sh/ 122 - obj-$(CONFIG_ARCH_SHMOBILE_LEGACY) += sh/ 122 + obj-$(CONFIG_ARCH_SHMOBILE) += sh/ 123 123 ifndef CONFIG_ARCH_USES_GETTIMEOFFSET 124 124 obj-y += clocksource/ 125 125 endif
+17
drivers/acpi/Kconfig
··· 47 47 depends on SUSPEND || HIBERNATION 48 48 default y 49 49 50 + config ACPI_PROCFS_POWER 51 + bool "Deprecated power /proc/acpi directories" 52 + depends on PROC_FS 53 + help 54 + For backwards compatibility, this option allows 55 + deprecated power /proc/acpi/ directories to exist, even when 56 + they have been replaced by functions in /sys. 57 + The deprecated directories (and their replacements) include: 58 + /proc/acpi/battery/* (/sys/class/power_supply/*) 59 + /proc/acpi/ac_adapter/* (sys/class/power_supply/*) 60 + This option has no effect on /proc/acpi/ directories 61 + and functions, which do not yet exist in /sys 62 + This option, together with the proc directories, will be 63 + deleted in the future. 64 + 65 + Say N to delete power /proc/acpi/ directories that have moved to /sys/ 66 + 50 67 config ACPI_EC_DEBUGFS 51 68 tristate "EC read/write access through /sys/kernel/debug/ec" 52 69 default n
+1
drivers/acpi/Makefile
··· 47 47 acpi-$(CONFIG_X86) += acpi_cmos_rtc.o 48 48 acpi-$(CONFIG_DEBUG_FS) += debugfs.o 49 49 acpi-$(CONFIG_ACPI_NUMA) += numa.o 50 + acpi-$(CONFIG_ACPI_PROCFS_POWER) += cm_sbs.o 50 51 ifdef CONFIG_ACPI_VIDEO 51 52 acpi-y += video_detect.o 52 53 endif
+58 -59
drivers/acpi/ac.c
··· 52 52 MODULE_DESCRIPTION("ACPI AC Adapter Driver"); 53 53 MODULE_LICENSE("GPL"); 54 54 55 + static int acpi_ac_add(struct acpi_device *device); 56 + static int acpi_ac_remove(struct acpi_device *device); 57 + static void acpi_ac_notify(struct acpi_device *device, u32 event); 58 + 59 + static const struct acpi_device_id ac_device_ids[] = { 60 + {"ACPI0003", 0}, 61 + {"", 0}, 62 + }; 63 + MODULE_DEVICE_TABLE(acpi, ac_device_ids); 64 + 65 + #ifdef CONFIG_PM_SLEEP 66 + static int acpi_ac_resume(struct device *dev); 67 + #endif 68 + static SIMPLE_DEV_PM_OPS(acpi_ac_pm, NULL, acpi_ac_resume); 69 + 55 70 static int ac_sleep_before_get_state_ms; 71 + 72 + static struct acpi_driver acpi_ac_driver = { 73 + .name = "ac", 74 + .class = ACPI_AC_CLASS, 75 + .ids = ac_device_ids, 76 + .flags = ACPI_DRIVER_ALL_NOTIFY_EVENTS, 77 + .ops = { 78 + .add = acpi_ac_add, 79 + .remove = acpi_ac_remove, 80 + .notify = acpi_ac_notify, 81 + }, 82 + .drv.pm = &acpi_ac_pm, 83 + }; 56 84 57 85 struct acpi_ac { 58 86 struct power_supply charger; 59 - struct platform_device *pdev; 87 + struct acpi_device * device; 60 88 unsigned long long state; 61 89 struct notifier_block battery_nb; 62 90 }; ··· 97 69 98 70 static int acpi_ac_get_state(struct acpi_ac *ac) 99 71 { 100 - acpi_status status; 101 - acpi_handle handle = ACPI_HANDLE(&ac->pdev->dev); 72 + acpi_status status = AE_OK; 102 73 103 - status = acpi_evaluate_integer(handle, "_PSR", NULL, 74 + if (!ac) 75 + return -EINVAL; 76 + 77 + status = acpi_evaluate_integer(ac->device->handle, "_PSR", NULL, 104 78 &ac->state); 105 79 if (ACPI_FAILURE(status)) { 106 80 ACPI_EXCEPTION((AE_INFO, status, ··· 147 117 Driver Model 148 118 -------------------------------------------------------------------------- */ 149 119 150 - static void acpi_ac_notify_handler(acpi_handle handle, u32 event, void *data) 120 + static void acpi_ac_notify(struct acpi_device *device, u32 event) 151 121 { 152 - struct acpi_ac *ac = data; 153 - struct acpi_device *adev; 122 + struct acpi_ac *ac = acpi_driver_data(device); 154 123 155 124 if (!ac) 156 125 return; ··· 172 143 msleep(ac_sleep_before_get_state_ms); 173 144 174 145 acpi_ac_get_state(ac); 175 - adev = ACPI_COMPANION(&ac->pdev->dev); 176 - acpi_bus_generate_netlink_event(adev->pnp.device_class, 177 - dev_name(&ac->pdev->dev), 178 - event, (u32) ac->state); 179 - acpi_notifier_call_chain(adev, event, (u32) ac->state); 146 + acpi_bus_generate_netlink_event(device->pnp.device_class, 147 + dev_name(&device->dev), event, 148 + (u32) ac->state); 149 + acpi_notifier_call_chain(device, event, (u32) ac->state); 180 150 kobject_uevent(&ac->charger.dev->kobj, KOBJ_CHANGE); 181 151 } 182 152 ··· 220 192 {}, 221 193 }; 222 194 223 - static int acpi_ac_probe(struct platform_device *pdev) 195 + static int acpi_ac_add(struct acpi_device *device) 224 196 { 225 197 int result = 0; 226 198 struct acpi_ac *ac = NULL; 227 - struct acpi_device *adev; 228 199 229 - if (!pdev) 200 + 201 + if (!device) 230 202 return -EINVAL; 231 - 232 - adev = ACPI_COMPANION(&pdev->dev); 233 - if (!adev) 234 - return -ENODEV; 235 203 236 204 ac = kzalloc(sizeof(struct acpi_ac), GFP_KERNEL); 237 205 if (!ac) 238 206 return -ENOMEM; 239 207 240 - strcpy(acpi_device_name(adev), ACPI_AC_DEVICE_NAME); 241 - strcpy(acpi_device_class(adev), ACPI_AC_CLASS); 242 - ac->pdev = pdev; 243 - platform_set_drvdata(pdev, ac); 208 + ac->device = device; 209 + strcpy(acpi_device_name(device), ACPI_AC_DEVICE_NAME); 210 + strcpy(acpi_device_class(device), ACPI_AC_CLASS); 211 + device->driver_data = ac; 244 212 245 213 result = acpi_ac_get_state(ac); 246 214 if (result) 247 215 goto end; 248 216 249 - ac->charger.name = acpi_device_bid(adev); 217 + ac->charger.name = acpi_device_bid(device); 250 218 ac->charger.type = POWER_SUPPLY_TYPE_MAINS; 251 219 ac->charger.properties = ac_props; 252 220 ac->charger.num_properties = ARRAY_SIZE(ac_props); 253 221 ac->charger.get_property = get_ac_property; 254 - result = power_supply_register(&pdev->dev, &ac->charger); 222 + result = power_supply_register(&ac->device->dev, &ac->charger); 255 223 if (result) 256 224 goto end; 257 225 258 - result = acpi_install_notify_handler(ACPI_HANDLE(&pdev->dev), 259 - ACPI_ALL_NOTIFY, acpi_ac_notify_handler, ac); 260 - if (result) { 261 - power_supply_unregister(&ac->charger); 262 - goto end; 263 - } 264 226 printk(KERN_INFO PREFIX "%s [%s] (%s)\n", 265 - acpi_device_name(adev), acpi_device_bid(adev), 227 + acpi_device_name(device), acpi_device_bid(device), 266 228 ac->state ? "on-line" : "off-line"); 267 229 268 230 ac->battery_nb.notifier_call = acpi_ac_battery_notify; ··· 274 256 if (!dev) 275 257 return -EINVAL; 276 258 277 - ac = platform_get_drvdata(to_platform_device(dev)); 259 + ac = acpi_driver_data(to_acpi_device(dev)); 278 260 if (!ac) 279 261 return -EINVAL; 280 262 ··· 288 270 #else 289 271 #define acpi_ac_resume NULL 290 272 #endif 291 - static SIMPLE_DEV_PM_OPS(acpi_ac_pm_ops, NULL, acpi_ac_resume); 292 273 293 - static int acpi_ac_remove(struct platform_device *pdev) 274 + static int acpi_ac_remove(struct acpi_device *device) 294 275 { 295 - struct acpi_ac *ac; 276 + struct acpi_ac *ac = NULL; 296 277 297 - if (!pdev) 278 + 279 + if (!device || !acpi_driver_data(device)) 298 280 return -EINVAL; 299 281 300 - acpi_remove_notify_handler(ACPI_HANDLE(&pdev->dev), 301 - ACPI_ALL_NOTIFY, acpi_ac_notify_handler); 282 + ac = acpi_driver_data(device); 302 283 303 - ac = platform_get_drvdata(pdev); 304 284 if (ac->charger.dev) 305 285 power_supply_unregister(&ac->charger); 306 286 unregister_acpi_notifier(&ac->battery_nb); ··· 308 292 return 0; 309 293 } 310 294 311 - static const struct acpi_device_id acpi_ac_match[] = { 312 - { "ACPI0003", 0 }, 313 - { } 314 - }; 315 - MODULE_DEVICE_TABLE(acpi, acpi_ac_match); 316 - 317 - static struct platform_driver acpi_ac_driver = { 318 - .probe = acpi_ac_probe, 319 - .remove = acpi_ac_remove, 320 - .driver = { 321 - .name = "acpi-ac", 322 - .owner = THIS_MODULE, 323 - .pm = &acpi_ac_pm_ops, 324 - .acpi_match_table = ACPI_PTR(acpi_ac_match), 325 - }, 326 - }; 327 - 328 295 static int __init acpi_ac_init(void) 329 296 { 330 297 int result; ··· 315 316 if (acpi_disabled) 316 317 return -ENODEV; 317 318 318 - result = platform_driver_register(&acpi_ac_driver); 319 + result = acpi_bus_register_driver(&acpi_ac_driver); 319 320 if (result < 0) 320 321 return -ENODEV; 321 322 ··· 324 325 325 326 static void __exit acpi_ac_exit(void) 326 327 { 327 - platform_driver_unregister(&acpi_ac_driver); 328 + acpi_bus_unregister_driver(&acpi_ac_driver); 328 329 } 329 330 module_init(acpi_ac_init); 330 331 module_exit(acpi_ac_exit);
-1
drivers/acpi/acpi_platform.c
··· 29 29 static const struct acpi_device_id acpi_platform_device_ids[] = { 30 30 31 31 { "PNP0D40" }, 32 - { "ACPI0003" }, 33 32 { "VPC2004" }, 34 33 { "BCM4752" }, 35 34
-1
drivers/acpi/acpi_processor.c
··· 405 405 goto err; 406 406 407 407 pr->dev = dev; 408 - dev->offline = pr->flags.need_hotplug_init; 409 408 410 409 /* Trigger the processor driver's .probe() if present. */ 411 410 if (device_attach(dev) >= 0)
+2 -2
drivers/acpi/acpica/acglobal.h
··· 141 141 * address. Although ACPICA adheres to the ACPI specification which 142 142 * requires the use of the corresponding 64-bit address if it is non-zero, 143 143 * some machines have been found to have a corrupted non-zero 64-bit 144 - * address. Default is FALSE, do not favor the 32-bit addresses. 144 + * address. Default is TRUE, favor the 32-bit addresses. 145 145 */ 146 - ACPI_INIT_GLOBAL(u8, acpi_gbl_use32_bit_fadt_addresses, FALSE); 146 + ACPI_INIT_GLOBAL(u8, acpi_gbl_use32_bit_fadt_addresses, TRUE); 147 147 148 148 /* 149 149 * Optionally truncate I/O addresses to 16 bits. Provides compatibility
+5 -2
drivers/acpi/acpica/tbutils.c
··· 461 461 u32 table_count; 462 462 struct acpi_table_header *table; 463 463 acpi_physical_address address; 464 + acpi_physical_address rsdt_address; 464 465 u32 length; 465 466 u8 *table_entry; 466 467 acpi_status status; ··· 489 488 * as per the ACPI specification. 490 489 */ 491 490 address = (acpi_physical_address) rsdp->xsdt_physical_address; 491 + rsdt_address = 492 + (acpi_physical_address) rsdp->rsdt_physical_address; 492 493 table_entry_size = ACPI_XSDT_ENTRY_SIZE; 493 494 } else { 494 495 /* Root table is an RSDT (32-bit physical addresses) */ 495 496 496 497 address = (acpi_physical_address) rsdp->rsdt_physical_address; 498 + rsdt_address = address; 497 499 table_entry_size = ACPI_RSDT_ENTRY_SIZE; 498 500 } 499 501 ··· 519 515 520 516 /* Fall back to the RSDT */ 521 517 522 - address = 523 - (acpi_physical_address) rsdp->rsdt_physical_address; 518 + address = rsdt_address; 524 519 table_entry_size = ACPI_RSDT_ENTRY_SIZE; 525 520 } 526 521 }
+328 -1
drivers/acpi/battery.c
··· 36 36 #include <linux/suspend.h> 37 37 #include <asm/unaligned.h> 38 38 39 + #ifdef CONFIG_ACPI_PROCFS_POWER 40 + #include <linux/proc_fs.h> 41 + #include <linux/seq_file.h> 42 + #include <asm/uaccess.h> 43 + #endif 44 + 39 45 #include <linux/acpi.h> 40 46 #include <linux/power_supply.h> 41 47 ··· 69 63 static unsigned int cache_time = 1000; 70 64 module_param(cache_time, uint, 0644); 71 65 MODULE_PARM_DESC(cache_time, "cache time in milliseconds"); 66 + 67 + #ifdef CONFIG_ACPI_PROCFS_POWER 68 + extern struct proc_dir_entry *acpi_lock_battery_dir(void); 69 + extern void *acpi_unlock_battery_dir(struct proc_dir_entry *acpi_battery_dir); 70 + 71 + enum acpi_battery_files { 72 + info_tag = 0, 73 + state_tag, 74 + alarm_tag, 75 + ACPI_BATTERY_NUMFILES, 76 + }; 77 + 78 + #endif 72 79 73 80 static const struct acpi_device_id battery_device_ids[] = { 74 81 {"PNP0C0A", 0}, ··· 317 298 POWER_SUPPLY_PROP_MANUFACTURER, 318 299 POWER_SUPPLY_PROP_SERIAL_NUMBER, 319 300 }; 301 + 302 + #ifdef CONFIG_ACPI_PROCFS_POWER 303 + inline char *acpi_battery_units(struct acpi_battery *battery) 304 + { 305 + return (battery->power_unit == ACPI_BATTERY_POWER_UNIT_MA) ? 306 + "mA" : "mW"; 307 + } 308 + #endif 320 309 321 310 /* -------------------------------------------------------------------------- 322 311 Battery Management ··· 744 717 } 745 718 746 719 /* -------------------------------------------------------------------------- 720 + FS Interface (/proc) 721 + -------------------------------------------------------------------------- */ 722 + 723 + #ifdef CONFIG_ACPI_PROCFS_POWER 724 + static struct proc_dir_entry *acpi_battery_dir; 725 + 726 + static int acpi_battery_print_info(struct seq_file *seq, int result) 727 + { 728 + struct acpi_battery *battery = seq->private; 729 + 730 + if (result) 731 + goto end; 732 + 733 + seq_printf(seq, "present: %s\n", 734 + acpi_battery_present(battery) ? "yes" : "no"); 735 + if (!acpi_battery_present(battery)) 736 + goto end; 737 + if (battery->design_capacity == ACPI_BATTERY_VALUE_UNKNOWN) 738 + seq_printf(seq, "design capacity: unknown\n"); 739 + else 740 + seq_printf(seq, "design capacity: %d %sh\n", 741 + battery->design_capacity, 742 + acpi_battery_units(battery)); 743 + 744 + if (battery->full_charge_capacity == ACPI_BATTERY_VALUE_UNKNOWN) 745 + seq_printf(seq, "last full capacity: unknown\n"); 746 + else 747 + seq_printf(seq, "last full capacity: %d %sh\n", 748 + battery->full_charge_capacity, 749 + acpi_battery_units(battery)); 750 + 751 + seq_printf(seq, "battery technology: %srechargeable\n", 752 + (!battery->technology)?"non-":""); 753 + 754 + if (battery->design_voltage == ACPI_BATTERY_VALUE_UNKNOWN) 755 + seq_printf(seq, "design voltage: unknown\n"); 756 + else 757 + seq_printf(seq, "design voltage: %d mV\n", 758 + battery->design_voltage); 759 + seq_printf(seq, "design capacity warning: %d %sh\n", 760 + battery->design_capacity_warning, 761 + acpi_battery_units(battery)); 762 + seq_printf(seq, "design capacity low: %d %sh\n", 763 + battery->design_capacity_low, 764 + acpi_battery_units(battery)); 765 + seq_printf(seq, "cycle count: %i\n", battery->cycle_count); 766 + seq_printf(seq, "capacity granularity 1: %d %sh\n", 767 + battery->capacity_granularity_1, 768 + acpi_battery_units(battery)); 769 + seq_printf(seq, "capacity granularity 2: %d %sh\n", 770 + battery->capacity_granularity_2, 771 + acpi_battery_units(battery)); 772 + seq_printf(seq, "model number: %s\n", battery->model_number); 773 + seq_printf(seq, "serial number: %s\n", battery->serial_number); 774 + seq_printf(seq, "battery type: %s\n", battery->type); 775 + seq_printf(seq, "OEM info: %s\n", battery->oem_info); 776 + end: 777 + if (result) 778 + seq_printf(seq, "ERROR: Unable to read battery info\n"); 779 + return result; 780 + } 781 + 782 + static int acpi_battery_print_state(struct seq_file *seq, int result) 783 + { 784 + struct acpi_battery *battery = seq->private; 785 + 786 + if (result) 787 + goto end; 788 + 789 + seq_printf(seq, "present: %s\n", 790 + acpi_battery_present(battery) ? "yes" : "no"); 791 + if (!acpi_battery_present(battery)) 792 + goto end; 793 + 794 + seq_printf(seq, "capacity state: %s\n", 795 + (battery->state & 0x04) ? "critical" : "ok"); 796 + if ((battery->state & 0x01) && (battery->state & 0x02)) 797 + seq_printf(seq, 798 + "charging state: charging/discharging\n"); 799 + else if (battery->state & 0x01) 800 + seq_printf(seq, "charging state: discharging\n"); 801 + else if (battery->state & 0x02) 802 + seq_printf(seq, "charging state: charging\n"); 803 + else 804 + seq_printf(seq, "charging state: charged\n"); 805 + 806 + if (battery->rate_now == ACPI_BATTERY_VALUE_UNKNOWN) 807 + seq_printf(seq, "present rate: unknown\n"); 808 + else 809 + seq_printf(seq, "present rate: %d %s\n", 810 + battery->rate_now, acpi_battery_units(battery)); 811 + 812 + if (battery->capacity_now == ACPI_BATTERY_VALUE_UNKNOWN) 813 + seq_printf(seq, "remaining capacity: unknown\n"); 814 + else 815 + seq_printf(seq, "remaining capacity: %d %sh\n", 816 + battery->capacity_now, acpi_battery_units(battery)); 817 + if (battery->voltage_now == ACPI_BATTERY_VALUE_UNKNOWN) 818 + seq_printf(seq, "present voltage: unknown\n"); 819 + else 820 + seq_printf(seq, "present voltage: %d mV\n", 821 + battery->voltage_now); 822 + end: 823 + if (result) 824 + seq_printf(seq, "ERROR: Unable to read battery state\n"); 825 + 826 + return result; 827 + } 828 + 829 + static int acpi_battery_print_alarm(struct seq_file *seq, int result) 830 + { 831 + struct acpi_battery *battery = seq->private; 832 + 833 + if (result) 834 + goto end; 835 + 836 + if (!acpi_battery_present(battery)) { 837 + seq_printf(seq, "present: no\n"); 838 + goto end; 839 + } 840 + seq_printf(seq, "alarm: "); 841 + if (!battery->alarm) 842 + seq_printf(seq, "unsupported\n"); 843 + else 844 + seq_printf(seq, "%u %sh\n", battery->alarm, 845 + acpi_battery_units(battery)); 846 + end: 847 + if (result) 848 + seq_printf(seq, "ERROR: Unable to read battery alarm\n"); 849 + return result; 850 + } 851 + 852 + static ssize_t acpi_battery_write_alarm(struct file *file, 853 + const char __user * buffer, 854 + size_t count, loff_t * ppos) 855 + { 856 + int result = 0; 857 + char alarm_string[12] = { '\0' }; 858 + struct seq_file *m = file->private_data; 859 + struct acpi_battery *battery = m->private; 860 + 861 + if (!battery || (count > sizeof(alarm_string) - 1)) 862 + return -EINVAL; 863 + if (!acpi_battery_present(battery)) { 864 + result = -ENODEV; 865 + goto end; 866 + } 867 + if (copy_from_user(alarm_string, buffer, count)) { 868 + result = -EFAULT; 869 + goto end; 870 + } 871 + alarm_string[count] = '\0'; 872 + battery->alarm = simple_strtol(alarm_string, NULL, 0); 873 + result = acpi_battery_set_alarm(battery); 874 + end: 875 + if (!result) 876 + return count; 877 + return result; 878 + } 879 + 880 + typedef int(*print_func)(struct seq_file *seq, int result); 881 + 882 + static print_func acpi_print_funcs[ACPI_BATTERY_NUMFILES] = { 883 + acpi_battery_print_info, 884 + acpi_battery_print_state, 885 + acpi_battery_print_alarm, 886 + }; 887 + 888 + static int acpi_battery_read(int fid, struct seq_file *seq) 889 + { 890 + struct acpi_battery *battery = seq->private; 891 + int result = acpi_battery_update(battery); 892 + return acpi_print_funcs[fid](seq, result); 893 + } 894 + 895 + #define DECLARE_FILE_FUNCTIONS(_name) \ 896 + static int acpi_battery_read_##_name(struct seq_file *seq, void *offset) \ 897 + { \ 898 + return acpi_battery_read(_name##_tag, seq); \ 899 + } \ 900 + static int acpi_battery_##_name##_open_fs(struct inode *inode, struct file *file) \ 901 + { \ 902 + return single_open(file, acpi_battery_read_##_name, PDE_DATA(inode)); \ 903 + } 904 + 905 + DECLARE_FILE_FUNCTIONS(info); 906 + DECLARE_FILE_FUNCTIONS(state); 907 + DECLARE_FILE_FUNCTIONS(alarm); 908 + 909 + #undef DECLARE_FILE_FUNCTIONS 910 + 911 + #define FILE_DESCRIPTION_RO(_name) \ 912 + { \ 913 + .name = __stringify(_name), \ 914 + .mode = S_IRUGO, \ 915 + .ops = { \ 916 + .open = acpi_battery_##_name##_open_fs, \ 917 + .read = seq_read, \ 918 + .llseek = seq_lseek, \ 919 + .release = single_release, \ 920 + .owner = THIS_MODULE, \ 921 + }, \ 922 + } 923 + 924 + #define FILE_DESCRIPTION_RW(_name) \ 925 + { \ 926 + .name = __stringify(_name), \ 927 + .mode = S_IFREG | S_IRUGO | S_IWUSR, \ 928 + .ops = { \ 929 + .open = acpi_battery_##_name##_open_fs, \ 930 + .read = seq_read, \ 931 + .llseek = seq_lseek, \ 932 + .write = acpi_battery_write_##_name, \ 933 + .release = single_release, \ 934 + .owner = THIS_MODULE, \ 935 + }, \ 936 + } 937 + 938 + static const struct battery_file { 939 + struct file_operations ops; 940 + umode_t mode; 941 + const char *name; 942 + } acpi_battery_file[] = { 943 + FILE_DESCRIPTION_RO(info), 944 + FILE_DESCRIPTION_RO(state), 945 + FILE_DESCRIPTION_RW(alarm), 946 + }; 947 + 948 + #undef FILE_DESCRIPTION_RO 949 + #undef FILE_DESCRIPTION_RW 950 + 951 + static int acpi_battery_add_fs(struct acpi_device *device) 952 + { 953 + struct proc_dir_entry *entry = NULL; 954 + int i; 955 + 956 + printk(KERN_WARNING PREFIX "Deprecated procfs I/F for battery is loaded," 957 + " please retry with CONFIG_ACPI_PROCFS_POWER cleared\n"); 958 + if (!acpi_device_dir(device)) { 959 + acpi_device_dir(device) = proc_mkdir(acpi_device_bid(device), 960 + acpi_battery_dir); 961 + if (!acpi_device_dir(device)) 962 + return -ENODEV; 963 + } 964 + 965 + for (i = 0; i < ACPI_BATTERY_NUMFILES; ++i) { 966 + entry = proc_create_data(acpi_battery_file[i].name, 967 + acpi_battery_file[i].mode, 968 + acpi_device_dir(device), 969 + &acpi_battery_file[i].ops, 970 + acpi_driver_data(device)); 971 + if (!entry) 972 + return -ENODEV; 973 + } 974 + return 0; 975 + } 976 + 977 + static void acpi_battery_remove_fs(struct acpi_device *device) 978 + { 979 + int i; 980 + if (!acpi_device_dir(device)) 981 + return; 982 + for (i = 0; i < ACPI_BATTERY_NUMFILES; ++i) 983 + remove_proc_entry(acpi_battery_file[i].name, 984 + acpi_device_dir(device)); 985 + 986 + remove_proc_entry(acpi_device_bid(device), acpi_battery_dir); 987 + acpi_device_dir(device) = NULL; 988 + } 989 + 990 + #endif 991 + 992 + /* -------------------------------------------------------------------------- 747 993 Driver Interface 748 994 -------------------------------------------------------------------------- */ 749 995 ··· 1090 790 result = acpi_battery_update(battery); 1091 791 if (result) 1092 792 goto fail; 793 + #ifdef CONFIG_ACPI_PROCFS_POWER 794 + result = acpi_battery_add_fs(device); 795 + #endif 796 + if (result) { 797 + #ifdef CONFIG_ACPI_PROCFS_POWER 798 + acpi_battery_remove_fs(device); 799 + #endif 800 + goto fail; 801 + } 1093 802 1094 803 printk(KERN_INFO PREFIX "%s Slot [%s] (battery %s)\n", 1095 804 ACPI_BATTERY_DEVICE_NAME, acpi_device_bid(device), ··· 1125 816 return -EINVAL; 1126 817 battery = acpi_driver_data(device); 1127 818 unregister_pm_notifier(&battery->pm_nb); 819 + #ifdef CONFIG_ACPI_PROCFS_POWER 820 + acpi_battery_remove_fs(device); 821 + #endif 1128 822 sysfs_remove_battery(battery); 1129 823 mutex_destroy(&battery->lock); 1130 824 mutex_destroy(&battery->sysfs_lock); ··· 1178 866 1179 867 if (dmi_check_system(bat_dmi_table)) 1180 868 battery_bix_broken_package = 1; 1181 - acpi_bus_register_driver(&acpi_battery_driver); 869 + 870 + #ifdef CONFIG_ACPI_PROCFS_POWER 871 + acpi_battery_dir = acpi_lock_battery_dir(); 872 + if (!acpi_battery_dir) 873 + return; 874 + #endif 875 + if (acpi_bus_register_driver(&acpi_battery_driver) < 0) { 876 + #ifdef CONFIG_ACPI_PROCFS_POWER 877 + acpi_unlock_battery_dir(acpi_battery_dir); 878 + #endif 879 + return; 880 + } 881 + return; 1182 882 } 1183 883 1184 884 static int __init acpi_battery_init(void) ··· 1202 878 static void __exit acpi_battery_exit(void) 1203 879 { 1204 880 acpi_bus_unregister_driver(&acpi_battery_driver); 881 + #ifdef CONFIG_ACPI_PROCFS_POWER 882 + acpi_unlock_battery_dir(acpi_battery_dir); 883 + #endif 1205 884 } 1206 885 1207 886 module_init(acpi_battery_init);
+21
drivers/acpi/blacklist.c
··· 314 314 DMI_MATCH(DMI_PRODUCT_VERSION, "2349D15"), 315 315 }, 316 316 }, 317 + { 318 + .callback = dmi_disable_osi_win8, 319 + .ident = "Dell Inspiron 7737", 320 + .matches = { 321 + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), 322 + DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 7737"), 323 + }, 324 + }, 317 325 318 326 /* 319 327 * BIOS invocation of _OSI(Linux) is almost always a BIOS bug. ··· 380 372 .matches = { 381 373 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), 382 374 DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T500"), 375 + }, 376 + }, 377 + /* 378 + * Without this this EEEpc exports a non working WMI interface, with 379 + * this it exports a working "good old" eeepc_laptop interface, fixing 380 + * both brightness control, and rfkill not working. 381 + */ 382 + { 383 + .callback = dmi_enable_osi_linux, 384 + .ident = "Asus EEE PC 1015PX", 385 + .matches = { 386 + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer INC."), 387 + DMI_MATCH(DMI_PRODUCT_NAME, "1015PX"), 383 388 }, 384 389 }, 385 390 {}
+105
drivers/acpi/cm_sbs.c
··· 1 + /* 2 + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 3 + * 4 + * This program is free software; you can redistribute it and/or modify 5 + * it under the terms of the GNU General Public License as published by 6 + * the Free Software Foundation; either version 2 of the License, or (at 7 + * your option) any later version. 8 + * 9 + * This program is distributed in the hope that it will be useful, but 10 + * WITHOUT ANY WARRANTY; without even the implied warranty of 11 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 12 + * General Public License for more details. 13 + * 14 + * You should have received a copy of the GNU General Public License along 15 + * with this program; if not, write to the Free Software Foundation, Inc., 16 + * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. 17 + * 18 + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 19 + */ 20 + 21 + #include <linux/kernel.h> 22 + #include <linux/module.h> 23 + #include <linux/init.h> 24 + #include <linux/acpi.h> 25 + #include <linux/types.h> 26 + #include <linux/proc_fs.h> 27 + #include <linux/seq_file.h> 28 + #include <acpi/acpi_bus.h> 29 + #include <acpi/acpi_drivers.h> 30 + 31 + #define PREFIX "ACPI: " 32 + 33 + ACPI_MODULE_NAME("cm_sbs"); 34 + #define ACPI_AC_CLASS "ac_adapter" 35 + #define ACPI_BATTERY_CLASS "battery" 36 + #define _COMPONENT ACPI_SBS_COMPONENT 37 + static struct proc_dir_entry *acpi_ac_dir; 38 + static struct proc_dir_entry *acpi_battery_dir; 39 + 40 + static DEFINE_MUTEX(cm_sbs_mutex); 41 + 42 + static int lock_ac_dir_cnt; 43 + static int lock_battery_dir_cnt; 44 + 45 + struct proc_dir_entry *acpi_lock_ac_dir(void) 46 + { 47 + mutex_lock(&cm_sbs_mutex); 48 + if (!acpi_ac_dir) 49 + acpi_ac_dir = proc_mkdir(ACPI_AC_CLASS, acpi_root_dir); 50 + if (acpi_ac_dir) { 51 + lock_ac_dir_cnt++; 52 + } else { 53 + printk(KERN_ERR PREFIX 54 + "Cannot create %s\n", ACPI_AC_CLASS); 55 + } 56 + mutex_unlock(&cm_sbs_mutex); 57 + return acpi_ac_dir; 58 + } 59 + EXPORT_SYMBOL(acpi_lock_ac_dir); 60 + 61 + void acpi_unlock_ac_dir(struct proc_dir_entry *acpi_ac_dir_param) 62 + { 63 + mutex_lock(&cm_sbs_mutex); 64 + if (acpi_ac_dir_param) 65 + lock_ac_dir_cnt--; 66 + if (lock_ac_dir_cnt == 0 && acpi_ac_dir_param && acpi_ac_dir) { 67 + remove_proc_entry(ACPI_AC_CLASS, acpi_root_dir); 68 + acpi_ac_dir = NULL; 69 + } 70 + mutex_unlock(&cm_sbs_mutex); 71 + } 72 + EXPORT_SYMBOL(acpi_unlock_ac_dir); 73 + 74 + struct proc_dir_entry *acpi_lock_battery_dir(void) 75 + { 76 + mutex_lock(&cm_sbs_mutex); 77 + if (!acpi_battery_dir) { 78 + acpi_battery_dir = 79 + proc_mkdir(ACPI_BATTERY_CLASS, acpi_root_dir); 80 + } 81 + if (acpi_battery_dir) { 82 + lock_battery_dir_cnt++; 83 + } else { 84 + printk(KERN_ERR PREFIX 85 + "Cannot create %s\n", ACPI_BATTERY_CLASS); 86 + } 87 + mutex_unlock(&cm_sbs_mutex); 88 + return acpi_battery_dir; 89 + } 90 + EXPORT_SYMBOL(acpi_lock_battery_dir); 91 + 92 + void acpi_unlock_battery_dir(struct proc_dir_entry *acpi_battery_dir_param) 93 + { 94 + mutex_lock(&cm_sbs_mutex); 95 + if (acpi_battery_dir_param) 96 + lock_battery_dir_cnt--; 97 + if (lock_battery_dir_cnt == 0 && acpi_battery_dir_param 98 + && acpi_battery_dir) { 99 + remove_proc_entry(ACPI_BATTERY_CLASS, acpi_root_dir); 100 + acpi_battery_dir = NULL; 101 + } 102 + mutex_unlock(&cm_sbs_mutex); 103 + return; 104 + } 105 + EXPORT_SYMBOL(acpi_unlock_battery_dir);
+12 -4
drivers/acpi/video.c
··· 457 457 }, 458 458 { 459 459 .callback = video_set_use_native_backlight, 460 - .ident = "ThinkPad T430s", 460 + .ident = "ThinkPad T430 and T430s", 461 461 .matches = { 462 462 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), 463 - DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T430s"), 463 + DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T430"), 464 464 }, 465 465 }, 466 466 { ··· 472 472 }, 473 473 }, 474 474 { 475 - .callback = video_set_use_native_backlight, 475 + .callback = video_set_use_native_backlight, 476 476 .ident = "ThinkPad X1 Carbon", 477 477 .matches = { 478 478 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), ··· 500 500 .ident = "Dell Inspiron 7520", 501 501 .matches = { 502 502 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), 503 - DMI_MATCH(DMI_PRODUCT_VERSION, "Inspiron 7520"), 503 + DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 7520"), 504 504 }, 505 505 }, 506 506 { ··· 509 509 .matches = { 510 510 DMI_MATCH(DMI_SYS_VENDOR, "Acer"), 511 511 DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5733Z"), 512 + }, 513 + }, 514 + { 515 + .callback = video_set_use_native_backlight, 516 + .ident = "Acer Aspire 5742G", 517 + .matches = { 518 + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), 519 + DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5742G"), 512 520 }, 513 521 }, 514 522 {
+1 -1
drivers/ata/Kconfig
··· 815 815 816 816 config PATA_AT91 817 817 tristate "PATA support for AT91SAM9260" 818 - depends on ARM && ARCH_AT91 818 + depends on ARM && SOC_AT91SAM9 819 819 help 820 820 This option enables support for IDE devices on the Atmel AT91SAM9260 SoC. 821 821
+15
drivers/ata/ahci.c
··· 1115 1115 return pdev->bus->number == (val >> 8) && pdev->devfn == (val & 0xff); 1116 1116 } 1117 1117 1118 + static bool ahci_broken_devslp(struct pci_dev *pdev) 1119 + { 1120 + /* device with broken DEVSLP but still showing SDS capability */ 1121 + static const struct pci_device_id ids[] = { 1122 + { PCI_VDEVICE(INTEL, 0x0f23)}, /* Valleyview SoC */ 1123 + {} 1124 + }; 1125 + 1126 + return pci_match_id(ids, pdev); 1127 + } 1128 + 1118 1129 #ifdef CONFIG_ATA_ACPI 1119 1130 static void ahci_gtf_filter_workaround(struct ata_host *host) 1120 1131 { ··· 1374 1363 hpriv->flags &= ~AHCI_HFLAG_32BIT_ONLY; 1375 1364 1376 1365 hpriv->mmio = pcim_iomap_table(pdev)[ahci_pci_bar]; 1366 + 1367 + /* must set flag prior to save config in order to take effect */ 1368 + if (ahci_broken_devslp(pdev)) 1369 + hpriv->flags |= AHCI_HFLAG_NO_DEVSLP; 1377 1370 1378 1371 /* save initial config */ 1379 1372 ahci_pci_save_initial_config(pdev, hpriv);
+1
drivers/ata/ahci.h
··· 236 236 port start (wait until 237 237 error-handling stage) */ 238 238 AHCI_HFLAG_MULTI_MSI = (1 << 16), /* multiple PCI MSIs */ 239 + AHCI_HFLAG_NO_DEVSLP = (1 << 17), /* no device sleep */ 239 240 240 241 /* ap->flags bits */ 241 242
+172 -7
drivers/ata/ahci_imx.c
··· 29 29 #include "ahci.h" 30 30 31 31 enum { 32 - PORT_PHY_CTL = 0x178, /* Port0 PHY Control */ 33 - PORT_PHY_CTL_PDDQ_LOC = 0x100000, /* PORT_PHY_CTL bits */ 34 - HOST_TIMER1MS = 0xe0, /* Timer 1-ms */ 32 + /* Timer 1-ms Register */ 33 + IMX_TIMER1MS = 0x00e0, 34 + /* Port0 PHY Control Register */ 35 + IMX_P0PHYCR = 0x0178, 36 + IMX_P0PHYCR_TEST_PDDQ = 1 << 20, 37 + IMX_P0PHYCR_CR_READ = 1 << 19, 38 + IMX_P0PHYCR_CR_WRITE = 1 << 18, 39 + IMX_P0PHYCR_CR_CAP_DATA = 1 << 17, 40 + IMX_P0PHYCR_CR_CAP_ADDR = 1 << 16, 41 + /* Port0 PHY Status Register */ 42 + IMX_P0PHYSR = 0x017c, 43 + IMX_P0PHYSR_CR_ACK = 1 << 18, 44 + IMX_P0PHYSR_CR_DATA_OUT = 0xffff << 0, 45 + /* Lane0 Output Status Register */ 46 + IMX_LANE0_OUT_STAT = 0x2003, 47 + IMX_LANE0_OUT_STAT_RX_PLL_STATE = 1 << 1, 48 + /* Clock Reset Register */ 49 + IMX_CLOCK_RESET = 0x7f3f, 50 + IMX_CLOCK_RESET_RESET = 1 << 0, 35 51 }; 36 52 37 53 enum ahci_imx_type { ··· 70 54 71 55 static void ahci_imx_host_stop(struct ata_host *host); 72 56 57 + static int imx_phy_crbit_assert(void __iomem *mmio, u32 bit, bool assert) 58 + { 59 + int timeout = 10; 60 + u32 crval; 61 + u32 srval; 62 + 63 + /* Assert or deassert the bit */ 64 + crval = readl(mmio + IMX_P0PHYCR); 65 + if (assert) 66 + crval |= bit; 67 + else 68 + crval &= ~bit; 69 + writel(crval, mmio + IMX_P0PHYCR); 70 + 71 + /* Wait for the cr_ack signal */ 72 + do { 73 + srval = readl(mmio + IMX_P0PHYSR); 74 + if ((assert ? srval : ~srval) & IMX_P0PHYSR_CR_ACK) 75 + break; 76 + usleep_range(100, 200); 77 + } while (--timeout); 78 + 79 + return timeout ? 0 : -ETIMEDOUT; 80 + } 81 + 82 + static int imx_phy_reg_addressing(u16 addr, void __iomem *mmio) 83 + { 84 + u32 crval = addr; 85 + int ret; 86 + 87 + /* Supply the address on cr_data_in */ 88 + writel(crval, mmio + IMX_P0PHYCR); 89 + 90 + /* Assert the cr_cap_addr signal */ 91 + ret = imx_phy_crbit_assert(mmio, IMX_P0PHYCR_CR_CAP_ADDR, true); 92 + if (ret) 93 + return ret; 94 + 95 + /* Deassert cr_cap_addr */ 96 + ret = imx_phy_crbit_assert(mmio, IMX_P0PHYCR_CR_CAP_ADDR, false); 97 + if (ret) 98 + return ret; 99 + 100 + return 0; 101 + } 102 + 103 + static int imx_phy_reg_write(u16 val, void __iomem *mmio) 104 + { 105 + u32 crval = val; 106 + int ret; 107 + 108 + /* Supply the data on cr_data_in */ 109 + writel(crval, mmio + IMX_P0PHYCR); 110 + 111 + /* Assert the cr_cap_data signal */ 112 + ret = imx_phy_crbit_assert(mmio, IMX_P0PHYCR_CR_CAP_DATA, true); 113 + if (ret) 114 + return ret; 115 + 116 + /* Deassert cr_cap_data */ 117 + ret = imx_phy_crbit_assert(mmio, IMX_P0PHYCR_CR_CAP_DATA, false); 118 + if (ret) 119 + return ret; 120 + 121 + if (val & IMX_CLOCK_RESET_RESET) { 122 + /* 123 + * In case we're resetting the phy, it's unable to acknowledge, 124 + * so we return immediately here. 125 + */ 126 + crval |= IMX_P0PHYCR_CR_WRITE; 127 + writel(crval, mmio + IMX_P0PHYCR); 128 + goto out; 129 + } 130 + 131 + /* Assert the cr_write signal */ 132 + ret = imx_phy_crbit_assert(mmio, IMX_P0PHYCR_CR_WRITE, true); 133 + if (ret) 134 + return ret; 135 + 136 + /* Deassert cr_write */ 137 + ret = imx_phy_crbit_assert(mmio, IMX_P0PHYCR_CR_WRITE, false); 138 + if (ret) 139 + return ret; 140 + 141 + out: 142 + return 0; 143 + } 144 + 145 + static int imx_phy_reg_read(u16 *val, void __iomem *mmio) 146 + { 147 + int ret; 148 + 149 + /* Assert the cr_read signal */ 150 + ret = imx_phy_crbit_assert(mmio, IMX_P0PHYCR_CR_READ, true); 151 + if (ret) 152 + return ret; 153 + 154 + /* Capture the data from cr_data_out[] */ 155 + *val = readl(mmio + IMX_P0PHYSR) & IMX_P0PHYSR_CR_DATA_OUT; 156 + 157 + /* Deassert cr_read */ 158 + ret = imx_phy_crbit_assert(mmio, IMX_P0PHYCR_CR_READ, false); 159 + if (ret) 160 + return ret; 161 + 162 + return 0; 163 + } 164 + 165 + static int imx_sata_phy_reset(struct ahci_host_priv *hpriv) 166 + { 167 + void __iomem *mmio = hpriv->mmio; 168 + int timeout = 10; 169 + u16 val; 170 + int ret; 171 + 172 + /* Reset SATA PHY by setting RESET bit of PHY register CLOCK_RESET */ 173 + ret = imx_phy_reg_addressing(IMX_CLOCK_RESET, mmio); 174 + if (ret) 175 + return ret; 176 + ret = imx_phy_reg_write(IMX_CLOCK_RESET_RESET, mmio); 177 + if (ret) 178 + return ret; 179 + 180 + /* Wait for PHY RX_PLL to be stable */ 181 + do { 182 + usleep_range(100, 200); 183 + ret = imx_phy_reg_addressing(IMX_LANE0_OUT_STAT, mmio); 184 + if (ret) 185 + return ret; 186 + ret = imx_phy_reg_read(&val, mmio); 187 + if (ret) 188 + return ret; 189 + if (val & IMX_LANE0_OUT_STAT_RX_PLL_STATE) 190 + break; 191 + } while (--timeout); 192 + 193 + return timeout ? 0 : -ETIMEDOUT; 194 + } 195 + 73 196 static int imx_sata_enable(struct ahci_host_priv *hpriv) 74 197 { 75 198 struct imx_ahci_priv *imxpriv = hpriv->plat_data; 199 + struct device *dev = &imxpriv->ahci_pdev->dev; 76 200 int ret; 77 201 78 202 if (imxpriv->no_device) ··· 257 101 regmap_update_bits(imxpriv->gpr, IOMUXC_GPR13, 258 102 IMX6Q_GPR13_SATA_MPLL_CLK_EN, 259 103 IMX6Q_GPR13_SATA_MPLL_CLK_EN); 104 + 105 + usleep_range(100, 200); 106 + 107 + ret = imx_sata_phy_reset(hpriv); 108 + if (ret) { 109 + dev_err(dev, "failed to reset phy: %d\n", ret); 110 + goto disable_regulator; 111 + } 260 112 } 261 113 262 114 usleep_range(1000, 2000); ··· 320 156 * without full reset once the pddq mode is enabled making it 321 157 * impossible to use as part of libata LPM. 322 158 */ 323 - reg_val = readl(mmio + PORT_PHY_CTL); 324 - writel(reg_val | PORT_PHY_CTL_PDDQ_LOC, mmio + PORT_PHY_CTL); 159 + reg_val = readl(mmio + IMX_P0PHYCR); 160 + writel(reg_val | IMX_P0PHYCR_TEST_PDDQ, mmio + IMX_P0PHYCR); 325 161 imx_sata_disable(hpriv); 326 162 imxpriv->no_device = true; 327 163 } ··· 381 217 if (!imxpriv) 382 218 return -ENOMEM; 383 219 220 + imxpriv->ahci_pdev = pdev; 384 221 imxpriv->no_device = false; 385 222 imxpriv->first_time = true; 386 223 imxpriv->type = (enum ahci_imx_type)of_id->data; ··· 413 248 414 249 /* 415 250 * Configure the HWINIT bits of the HOST_CAP and HOST_PORTS_IMPL, 416 - * and IP vendor specific register HOST_TIMER1MS. 251 + * and IP vendor specific register IMX_TIMER1MS. 417 252 * Configure CAP_SSS (support stagered spin up). 418 253 * Implement the port0. 419 254 * Get the ahb clock rate, and configure the TIMER1MS register. ··· 430 265 } 431 266 432 267 reg_val = clk_get_rate(imxpriv->ahb_clk) / 1000; 433 - writel(reg_val, hpriv->mmio + HOST_TIMER1MS); 268 + writel(reg_val, hpriv->mmio + IMX_TIMER1MS); 434 269 435 270 ret = ahci_platform_init_host(pdev, hpriv, &ahci_imx_port_info, 0, 0); 436 271 if (ret)
+7
drivers/ata/libahci.c
··· 452 452 cap &= ~HOST_CAP_SNTF; 453 453 } 454 454 455 + if ((cap2 & HOST_CAP2_SDS) && (hpriv->flags & AHCI_HFLAG_NO_DEVSLP)) { 456 + dev_info(dev, 457 + "controller can't do DEVSLP, turning off\n"); 458 + cap2 &= ~HOST_CAP2_SDS; 459 + cap2 &= ~HOST_CAP2_SADM; 460 + } 461 + 455 462 if (!(cap & HOST_CAP_FBS) && (hpriv->flags & AHCI_HFLAG_YES_FBS)) { 456 463 dev_info(dev, "controller can do FBS, turning on CAP_FBS\n"); 457 464 cap |= HOST_CAP_FBS;
+9
drivers/ata/libata-core.c
··· 6314 6314 static void ata_port_detach(struct ata_port *ap) 6315 6315 { 6316 6316 unsigned long flags; 6317 + struct ata_link *link; 6318 + struct ata_device *dev; 6317 6319 6318 6320 if (!ap->ops->error_handler) 6319 6321 goto skip_eh; ··· 6335 6333 cancel_delayed_work_sync(&ap->hotplug_task); 6336 6334 6337 6335 skip_eh: 6336 + /* clean up zpodd on port removal */ 6337 + ata_for_each_link(link, ap, HOST_FIRST) { 6338 + ata_for_each_dev(dev, link, ALL) { 6339 + if (zpodd_dev_enabled(dev)) 6340 + zpodd_exit(dev); 6341 + } 6342 + } 6338 6343 if (ap->pmp_link) { 6339 6344 int i; 6340 6345 for (i = 0; i < SATA_PMP_MAX_PORTS; i++)
+16 -6
drivers/bus/mvebu-mbus.c
··· 56 56 #include <linux/of.h> 57 57 #include <linux/of_address.h> 58 58 #include <linux/debugfs.h> 59 + #include <linux/log2.h> 59 60 60 61 /* 61 62 * DDR target is the same on all platforms. ··· 223 222 */ 224 223 if ((u64)base < wend && end > wbase) 225 224 return 0; 226 - 227 - /* 228 - * Check if target/attribute conflicts 229 - */ 230 - if (target == wtarget && attr == wattr) 231 - return 0; 232 225 } 233 226 234 227 return 1; ··· 260 265 void __iomem *addr = mbus->mbuswins_base + 261 266 mbus->soc->win_cfg_offset(win); 262 267 u32 ctrl, remap_addr; 268 + 269 + if (!is_power_of_2(size)) { 270 + WARN(true, "Invalid MBus window size: 0x%zx\n", size); 271 + return -EINVAL; 272 + } 273 + 274 + if ((base & (phys_addr_t)(size - 1)) != 0) { 275 + WARN(true, "Invalid MBus base/size: %pa len 0x%zx\n", &base, 276 + size); 277 + return -EINVAL; 278 + } 263 279 264 280 ctrl = ((size - 1) & WIN_CTRL_SIZE_MASK) | 265 281 (attr << WIN_CTRL_ATTR_SHIFT) | ··· 418 412 seq_printf(seq, "[%02d] %016llx - %016llx : %04x:%04x", 419 413 win, (unsigned long long)wbase, 420 414 (unsigned long long)(wbase + wsize), wtarget, wattr); 415 + 416 + if (!is_power_of_2(wsize) || 417 + ((wbase & (u64)(wsize - 1)) != 0)) 418 + seq_puts(seq, " (Invalid base/size!!)"); 421 419 422 420 if (win < mbus->soc->num_remappable_wins) { 423 421 seq_printf(seq, " (remap %016llx)\n",
+5 -2
drivers/char/random.c
··· 995 995 ibytes = min_t(size_t, ibytes, have_bytes - reserved); 996 996 if (ibytes < min) 997 997 ibytes = 0; 998 - entropy_count = max_t(int, 0, 999 - entropy_count - (ibytes << (ENTROPY_SHIFT + 3))); 998 + if (have_bytes >= ibytes + reserved) 999 + entropy_count -= ibytes << (ENTROPY_SHIFT + 3); 1000 + else 1001 + entropy_count = reserved << (ENTROPY_SHIFT + 3); 1002 + 1000 1003 if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig) 1001 1004 goto retry; 1002 1005
+3 -5
drivers/char/tpm/tpm_ppi.c
··· 328 328 /* Cache TPM ACPI handle and version string */ 329 329 acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT, ACPI_UINT32_MAX, 330 330 ppi_callback, NULL, NULL, &tpm_ppi_handle); 331 - if (tpm_ppi_handle == NULL) 332 - return -ENODEV; 333 - 334 - return sysfs_create_group(parent, &ppi_attr_grp); 331 + return tpm_ppi_handle ? sysfs_create_group(parent, &ppi_attr_grp) : 0; 335 332 } 336 333 337 334 void tpm_remove_ppi(struct kobject *parent) 338 335 { 339 - sysfs_remove_group(parent, &ppi_attr_grp); 336 + if (tpm_ppi_handle) 337 + sysfs_remove_group(parent, &ppi_attr_grp); 340 338 }
+17 -16
drivers/clk/bcm/clk-kona-setup.c
··· 27 27 28 28 static bool clk_requires_trigger(struct kona_clk *bcm_clk) 29 29 { 30 - struct peri_clk_data *peri = bcm_clk->peri; 30 + struct peri_clk_data *peri = bcm_clk->u.peri; 31 31 struct bcm_clk_sel *sel; 32 32 struct bcm_clk_div *div; 33 33 ··· 63 63 u32 limit; 64 64 65 65 BUG_ON(bcm_clk->type != bcm_clk_peri); 66 - peri = bcm_clk->peri; 66 + peri = bcm_clk->u.peri; 67 67 name = bcm_clk->name; 68 68 range = bcm_clk->ccu->range; 69 69 ··· 81 81 82 82 div = &peri->div; 83 83 if (divider_exists(div)) { 84 - if (div->offset > limit) { 84 + if (div->u.s.offset > limit) { 85 85 pr_err("%s: bad divider offset for %s (%u > %u)\n", 86 - __func__, name, div->offset, limit); 86 + __func__, name, div->u.s.offset, limit); 87 87 return false; 88 88 } 89 89 } 90 90 91 91 div = &peri->pre_div; 92 92 if (divider_exists(div)) { 93 - if (div->offset > limit) { 93 + if (div->u.s.offset > limit) { 94 94 pr_err("%s: bad pre-divider offset for %s " 95 95 "(%u > %u)\n", 96 - __func__, name, div->offset, limit); 96 + __func__, name, div->u.s.offset, limit); 97 97 return false; 98 98 } 99 99 } ··· 249 249 { 250 250 if (divider_is_fixed(div)) { 251 251 /* Any fixed divider value but 0 is OK */ 252 - if (div->fixed == 0) { 252 + if (div->u.fixed == 0) { 253 253 pr_err("%s: bad %s fixed value 0 for %s\n", __func__, 254 254 field_name, clock_name); 255 255 return false; 256 256 } 257 257 return true; 258 258 } 259 - if (!bitfield_valid(div->shift, div->width, field_name, clock_name)) 259 + if (!bitfield_valid(div->u.s.shift, div->u.s.width, 260 + field_name, clock_name)) 260 261 return false; 261 262 262 263 if (divider_has_fraction(div)) 263 - if (div->frac_width > div->width) { 264 + if (div->u.s.frac_width > div->u.s.width) { 264 265 pr_warn("%s: bad %s fraction width for %s (%u > %u)\n", 265 266 __func__, field_name, clock_name, 266 - div->frac_width, div->width); 267 + div->u.s.frac_width, div->u.s.width); 267 268 return false; 268 269 } 269 270 ··· 279 278 */ 280 279 static bool kona_dividers_valid(struct kona_clk *bcm_clk) 281 280 { 282 - struct peri_clk_data *peri = bcm_clk->peri; 281 + struct peri_clk_data *peri = bcm_clk->u.peri; 283 282 struct bcm_clk_div *div; 284 283 struct bcm_clk_div *pre_div; 285 284 u32 limit; ··· 296 295 297 296 limit = BITS_PER_BYTE * sizeof(u32); 298 297 299 - return div->frac_width + pre_div->frac_width <= limit; 298 + return div->u.s.frac_width + pre_div->u.s.frac_width <= limit; 300 299 } 301 300 302 301 ··· 329 328 if (!peri_clk_data_offsets_valid(bcm_clk)) 330 329 return false; 331 330 332 - peri = bcm_clk->peri; 331 + peri = bcm_clk->u.peri; 333 332 name = bcm_clk->name; 334 333 gate = &peri->gate; 335 334 if (gate_exists(gate) && !gate_valid(gate, "gate", name)) ··· 589 588 { 590 589 switch (bcm_clk->type) { 591 590 case bcm_clk_peri: 592 - peri_clk_teardown(bcm_clk->data, &bcm_clk->init_data); 591 + peri_clk_teardown(bcm_clk->u.data, &bcm_clk->init_data); 593 592 break; 594 593 default: 595 594 break; 596 595 } 597 - bcm_clk->data = NULL; 596 + bcm_clk->u.data = NULL; 598 597 bcm_clk->type = bcm_clk_none; 599 598 } 600 599 ··· 645 644 break; 646 645 } 647 646 bcm_clk->type = type; 648 - bcm_clk->data = data; 647 + bcm_clk->u.data = data; 649 648 650 649 /* Make sure everything makes sense before we set it up */ 651 650 if (!kona_clk_valid(bcm_clk)) {
+33 -31
drivers/clk/bcm/clk-kona.c
··· 61 61 /* Convert a divider into the scaled divisor value it represents. */ 62 62 static inline u64 scaled_div_value(struct bcm_clk_div *div, u32 reg_div) 63 63 { 64 - return (u64)reg_div + ((u64)1 << div->frac_width); 64 + return (u64)reg_div + ((u64)1 << div->u.s.frac_width); 65 65 } 66 66 67 67 /* ··· 77 77 BUG_ON(billionths >= BILLION); 78 78 79 79 combined = (u64)div_value * BILLION + billionths; 80 - combined <<= div->frac_width; 80 + combined <<= div->u.s.frac_width; 81 81 82 82 return do_div_round_closest(combined, BILLION); 83 83 } ··· 87 87 scaled_div_min(struct bcm_clk_div *div) 88 88 { 89 89 if (divider_is_fixed(div)) 90 - return (u64)div->fixed; 90 + return (u64)div->u.fixed; 91 91 92 92 return scaled_div_value(div, 0); 93 93 } ··· 98 98 u32 reg_div; 99 99 100 100 if (divider_is_fixed(div)) 101 - return (u64)div->fixed; 101 + return (u64)div->u.fixed; 102 102 103 - reg_div = ((u32)1 << div->width) - 1; 103 + reg_div = ((u32)1 << div->u.s.width) - 1; 104 104 105 105 return scaled_div_value(div, reg_div); 106 106 } ··· 115 115 BUG_ON(scaled_div < scaled_div_min(div)); 116 116 BUG_ON(scaled_div > scaled_div_max(div)); 117 117 118 - return (u32)(scaled_div - ((u64)1 << div->frac_width)); 118 + return (u32)(scaled_div - ((u64)1 << div->u.s.frac_width)); 119 119 } 120 120 121 121 /* Return a rate scaled for use when dividing by a scaled divisor. */ ··· 125 125 if (divider_is_fixed(div)) 126 126 return (u64)rate; 127 127 128 - return (u64)rate << div->frac_width; 128 + return (u64)rate << div->u.s.frac_width; 129 129 } 130 130 131 131 /* CCU access */ ··· 398 398 u32 reg_div; 399 399 400 400 if (divider_is_fixed(div)) 401 - return (u64)div->fixed; 401 + return (u64)div->u.fixed; 402 402 403 403 flags = ccu_lock(ccu); 404 - reg_val = __ccu_read(ccu, div->offset); 404 + reg_val = __ccu_read(ccu, div->u.s.offset); 405 405 ccu_unlock(ccu, flags); 406 406 407 407 /* Extract the full divider field from the register value */ 408 - reg_div = bitfield_extract(reg_val, div->shift, div->width); 408 + reg_div = bitfield_extract(reg_val, div->u.s.shift, div->u.s.width); 409 409 410 410 /* Return the scaled divisor value it represents */ 411 411 return scaled_div_value(div, reg_div); ··· 433 433 * state was defined in the device tree, we just find out 434 434 * what its current value is rather than updating it. 435 435 */ 436 - if (div->scaled_div == BAD_SCALED_DIV_VALUE) { 437 - reg_val = __ccu_read(ccu, div->offset); 438 - reg_div = bitfield_extract(reg_val, div->shift, div->width); 439 - div->scaled_div = scaled_div_value(div, reg_div); 436 + if (div->u.s.scaled_div == BAD_SCALED_DIV_VALUE) { 437 + reg_val = __ccu_read(ccu, div->u.s.offset); 438 + reg_div = bitfield_extract(reg_val, div->u.s.shift, 439 + div->u.s.width); 440 + div->u.s.scaled_div = scaled_div_value(div, reg_div); 440 441 441 442 return 0; 442 443 } 443 444 444 445 /* Convert the scaled divisor to the value we need to record */ 445 - reg_div = divider(div, div->scaled_div); 446 + reg_div = divider(div, div->u.s.scaled_div); 446 447 447 448 /* Clock needs to be enabled before changing the rate */ 448 449 enabled = __is_clk_gate_enabled(ccu, gate); ··· 453 452 } 454 453 455 454 /* Replace the divider value and record the result */ 456 - reg_val = __ccu_read(ccu, div->offset); 457 - reg_val = bitfield_replace(reg_val, div->shift, div->width, reg_div); 458 - __ccu_write(ccu, div->offset, reg_val); 455 + reg_val = __ccu_read(ccu, div->u.s.offset); 456 + reg_val = bitfield_replace(reg_val, div->u.s.shift, div->u.s.width, 457 + reg_div); 458 + __ccu_write(ccu, div->u.s.offset, reg_val); 459 459 460 460 /* If the trigger fails we still want to disable the gate */ 461 461 if (!__clk_trigger(ccu, trig)) ··· 492 490 493 491 BUG_ON(divider_is_fixed(div)); 494 492 495 - previous = div->scaled_div; 493 + previous = div->u.s.scaled_div; 496 494 if (previous == scaled_div) 497 495 return 0; /* No change */ 498 496 499 - div->scaled_div = scaled_div; 497 + div->u.s.scaled_div = scaled_div; 500 498 501 499 flags = ccu_lock(ccu); 502 500 __ccu_write_enable(ccu); ··· 507 505 ccu_unlock(ccu, flags); 508 506 509 507 if (ret) 510 - div->scaled_div = previous; /* Revert the change */ 508 + div->u.s.scaled_div = previous; /* Revert the change */ 511 509 512 510 return ret; 513 511 ··· 804 802 static int kona_peri_clk_enable(struct clk_hw *hw) 805 803 { 806 804 struct kona_clk *bcm_clk = to_kona_clk(hw); 807 - struct bcm_clk_gate *gate = &bcm_clk->peri->gate; 805 + struct bcm_clk_gate *gate = &bcm_clk->u.peri->gate; 808 806 809 807 return clk_gate(bcm_clk->ccu, bcm_clk->name, gate, true); 810 808 } ··· 812 810 static void kona_peri_clk_disable(struct clk_hw *hw) 813 811 { 814 812 struct kona_clk *bcm_clk = to_kona_clk(hw); 815 - struct bcm_clk_gate *gate = &bcm_clk->peri->gate; 813 + struct bcm_clk_gate *gate = &bcm_clk->u.peri->gate; 816 814 817 815 (void)clk_gate(bcm_clk->ccu, bcm_clk->name, gate, false); 818 816 } ··· 820 818 static int kona_peri_clk_is_enabled(struct clk_hw *hw) 821 819 { 822 820 struct kona_clk *bcm_clk = to_kona_clk(hw); 823 - struct bcm_clk_gate *gate = &bcm_clk->peri->gate; 821 + struct bcm_clk_gate *gate = &bcm_clk->u.peri->gate; 824 822 825 823 return is_clk_gate_enabled(bcm_clk->ccu, gate) ? 1 : 0; 826 824 } ··· 829 827 unsigned long parent_rate) 830 828 { 831 829 struct kona_clk *bcm_clk = to_kona_clk(hw); 832 - struct peri_clk_data *data = bcm_clk->peri; 830 + struct peri_clk_data *data = bcm_clk->u.peri; 833 831 834 832 return clk_recalc_rate(bcm_clk->ccu, &data->div, &data->pre_div, 835 833 parent_rate); ··· 839 837 unsigned long *parent_rate) 840 838 { 841 839 struct kona_clk *bcm_clk = to_kona_clk(hw); 842 - struct bcm_clk_div *div = &bcm_clk->peri->div; 840 + struct bcm_clk_div *div = &bcm_clk->u.peri->div; 843 841 844 842 if (!divider_exists(div)) 845 843 return __clk_get_rate(hw->clk); 846 844 847 845 /* Quietly avoid a zero rate */ 848 - return round_rate(bcm_clk->ccu, div, &bcm_clk->peri->pre_div, 846 + return round_rate(bcm_clk->ccu, div, &bcm_clk->u.peri->pre_div, 849 847 rate ? rate : 1, *parent_rate, NULL); 850 848 } 851 849 852 850 static int kona_peri_clk_set_parent(struct clk_hw *hw, u8 index) 853 851 { 854 852 struct kona_clk *bcm_clk = to_kona_clk(hw); 855 - struct peri_clk_data *data = bcm_clk->peri; 853 + struct peri_clk_data *data = bcm_clk->u.peri; 856 854 struct bcm_clk_sel *sel = &data->sel; 857 855 struct bcm_clk_trig *trig; 858 856 int ret; ··· 886 884 static u8 kona_peri_clk_get_parent(struct clk_hw *hw) 887 885 { 888 886 struct kona_clk *bcm_clk = to_kona_clk(hw); 889 - struct peri_clk_data *data = bcm_clk->peri; 887 + struct peri_clk_data *data = bcm_clk->u.peri; 890 888 u8 index; 891 889 892 890 index = selector_read_index(bcm_clk->ccu, &data->sel); ··· 899 897 unsigned long parent_rate) 900 898 { 901 899 struct kona_clk *bcm_clk = to_kona_clk(hw); 902 - struct peri_clk_data *data = bcm_clk->peri; 900 + struct peri_clk_data *data = bcm_clk->u.peri; 903 901 struct bcm_clk_div *div = &data->div; 904 902 u64 scaled_div = 0; 905 903 int ret; ··· 960 958 static bool __peri_clk_init(struct kona_clk *bcm_clk) 961 959 { 962 960 struct ccu_data *ccu = bcm_clk->ccu; 963 - struct peri_clk_data *peri = bcm_clk->peri; 961 + struct peri_clk_data *peri = bcm_clk->u.peri; 964 962 const char *name = bcm_clk->name; 965 963 struct bcm_clk_trig *trig; 966 964
+14 -14
drivers/clk/bcm/clk-kona.h
··· 57 57 #define divider_exists(div) FLAG_TEST(div, DIV, EXISTS) 58 58 #define divider_is_fixed(div) FLAG_TEST(div, DIV, FIXED) 59 59 #define divider_has_fraction(div) (!divider_is_fixed(div) && \ 60 - (div)->frac_width > 0) 60 + (div)->u.s.frac_width > 0) 61 61 62 62 #define selector_exists(sel) ((sel)->width != 0) 63 63 #define trigger_exists(trig) FLAG_TEST(trig, TRIG, EXISTS) ··· 244 244 u32 frac_width; /* field fraction width */ 245 245 246 246 u64 scaled_div; /* scaled divider value */ 247 - }; 247 + } s; 248 248 u32 fixed; /* non-zero fixed divider value */ 249 - }; 249 + } u; 250 250 u32 flags; /* BCM_CLK_DIV_FLAGS_* below */ 251 251 }; 252 252 ··· 263 263 /* A fixed (non-zero) divider */ 264 264 #define FIXED_DIVIDER(_value) \ 265 265 { \ 266 - .fixed = (_value), \ 266 + .u.fixed = (_value), \ 267 267 .flags = FLAG(DIV, EXISTS)|FLAG(DIV, FIXED), \ 268 268 } 269 269 270 270 /* A divider with an integral divisor */ 271 271 #define DIVIDER(_offset, _shift, _width) \ 272 272 { \ 273 - .offset = (_offset), \ 274 - .shift = (_shift), \ 275 - .width = (_width), \ 276 - .scaled_div = BAD_SCALED_DIV_VALUE, \ 273 + .u.s.offset = (_offset), \ 274 + .u.s.shift = (_shift), \ 275 + .u.s.width = (_width), \ 276 + .u.s.scaled_div = BAD_SCALED_DIV_VALUE, \ 277 277 .flags = FLAG(DIV, EXISTS), \ 278 278 } 279 279 280 280 /* A divider whose divisor has an integer and fractional part */ 281 281 #define FRAC_DIVIDER(_offset, _shift, _width, _frac_width) \ 282 282 { \ 283 - .offset = (_offset), \ 284 - .shift = (_shift), \ 285 - .width = (_width), \ 286 - .frac_width = (_frac_width), \ 287 - .scaled_div = BAD_SCALED_DIV_VALUE, \ 283 + .u.s.offset = (_offset), \ 284 + .u.s.shift = (_shift), \ 285 + .u.s.width = (_width), \ 286 + .u.s.frac_width = (_frac_width), \ 287 + .u.s.scaled_div = BAD_SCALED_DIV_VALUE, \ 288 288 .flags = FLAG(DIV, EXISTS), \ 289 289 } 290 290 ··· 380 380 union { 381 381 void *data; 382 382 struct peri_clk_data *peri; 383 - }; 383 + } u; 384 384 }; 385 385 #define to_kona_clk(_hw) \ 386 386 container_of(_hw, struct kona_clk, hw)
+36 -1
drivers/clk/clk-divider.c
··· 144 144 return true; 145 145 } 146 146 147 + static int _round_up_table(const struct clk_div_table *table, int div) 148 + { 149 + const struct clk_div_table *clkt; 150 + int up = _get_table_maxdiv(table); 151 + 152 + for (clkt = table; clkt->div; clkt++) { 153 + if (clkt->div == div) 154 + return clkt->div; 155 + else if (clkt->div < div) 156 + continue; 157 + 158 + if ((clkt->div - div) < (up - div)) 159 + up = clkt->div; 160 + } 161 + 162 + return up; 163 + } 164 + 165 + static int _div_round_up(struct clk_divider *divider, 166 + unsigned long parent_rate, unsigned long rate) 167 + { 168 + int div = DIV_ROUND_UP(parent_rate, rate); 169 + 170 + if (divider->flags & CLK_DIVIDER_POWER_OF_TWO) 171 + div = __roundup_pow_of_two(div); 172 + if (divider->table) 173 + div = _round_up_table(divider->table, div); 174 + 175 + return div; 176 + } 177 + 147 178 static int clk_divider_bestdiv(struct clk_hw *hw, unsigned long rate, 148 179 unsigned long *best_parent_rate) 149 180 { ··· 190 159 191 160 if (!(__clk_get_flags(hw->clk) & CLK_SET_RATE_PARENT)) { 192 161 parent_rate = *best_parent_rate; 193 - bestdiv = DIV_ROUND_UP(parent_rate, rate); 162 + bestdiv = _div_round_up(divider, parent_rate, rate); 194 163 bestdiv = bestdiv == 0 ? 1 : bestdiv; 195 164 bestdiv = bestdiv > maxdiv ? maxdiv : bestdiv; 196 165 return bestdiv; ··· 250 219 u32 val; 251 220 252 221 div = DIV_ROUND_UP(parent_rate, rate); 222 + 223 + if (!_is_valid_div(divider, div)) 224 + return -EINVAL; 225 + 253 226 value = _get_val(divider, div); 254 227 255 228 if (value > div_mask(divider))
+32 -42
drivers/clk/clk.c
··· 1984 1984 } 1985 1985 EXPORT_SYMBOL_GPL(__clk_register); 1986 1986 1987 - static int _clk_register(struct device *dev, struct clk_hw *hw, struct clk *clk) 1987 + /** 1988 + * clk_register - allocate a new clock, register it and return an opaque cookie 1989 + * @dev: device that is registering this clock 1990 + * @hw: link to hardware-specific clock data 1991 + * 1992 + * clk_register is the primary interface for populating the clock tree with new 1993 + * clock nodes. It returns a pointer to the newly allocated struct clk which 1994 + * cannot be dereferenced by driver code but may be used in conjuction with the 1995 + * rest of the clock API. In the event of an error clk_register will return an 1996 + * error code; drivers must test for an error code after calling clk_register. 1997 + */ 1998 + struct clk *clk_register(struct device *dev, struct clk_hw *hw) 1988 1999 { 1989 2000 int i, ret; 2001 + struct clk *clk; 2002 + 2003 + clk = kzalloc(sizeof(*clk), GFP_KERNEL); 2004 + if (!clk) { 2005 + pr_err("%s: could not allocate clk\n", __func__); 2006 + ret = -ENOMEM; 2007 + goto fail_out; 2008 + } 1990 2009 1991 2010 clk->name = kstrdup(hw->init->name, GFP_KERNEL); 1992 2011 if (!clk->name) { ··· 2045 2026 2046 2027 ret = __clk_init(dev, clk); 2047 2028 if (!ret) 2048 - return 0; 2029 + return clk; 2049 2030 2050 2031 fail_parent_names_copy: 2051 2032 while (--i >= 0) ··· 2054 2035 fail_parent_names: 2055 2036 kfree(clk->name); 2056 2037 fail_name: 2057 - return ret; 2058 - } 2059 - 2060 - /** 2061 - * clk_register - allocate a new clock, register it and return an opaque cookie 2062 - * @dev: device that is registering this clock 2063 - * @hw: link to hardware-specific clock data 2064 - * 2065 - * clk_register is the primary interface for populating the clock tree with new 2066 - * clock nodes. It returns a pointer to the newly allocated struct clk which 2067 - * cannot be dereferenced by driver code but may be used in conjuction with the 2068 - * rest of the clock API. In the event of an error clk_register will return an 2069 - * error code; drivers must test for an error code after calling clk_register. 2070 - */ 2071 - struct clk *clk_register(struct device *dev, struct clk_hw *hw) 2072 - { 2073 - int ret; 2074 - struct clk *clk; 2075 - 2076 - clk = kzalloc(sizeof(*clk), GFP_KERNEL); 2077 - if (!clk) { 2078 - pr_err("%s: could not allocate clk\n", __func__); 2079 - ret = -ENOMEM; 2080 - goto fail_out; 2081 - } 2082 - 2083 - ret = _clk_register(dev, hw, clk); 2084 - if (!ret) 2085 - return clk; 2086 - 2087 2038 kfree(clk); 2088 2039 fail_out: 2089 2040 return ERR_PTR(ret); ··· 2140 2151 2141 2152 if (!hlist_empty(&clk->children)) { 2142 2153 struct clk *child; 2154 + struct hlist_node *t; 2143 2155 2144 2156 /* Reparent all children to the orphan list. */ 2145 - hlist_for_each_entry(child, &clk->children, child_node) 2157 + hlist_for_each_entry_safe(child, t, &clk->children, child_node) 2146 2158 clk_set_parent(child, NULL); 2147 2159 } 2148 2160 ··· 2163 2173 2164 2174 static void devm_clk_release(struct device *dev, void *res) 2165 2175 { 2166 - clk_unregister(res); 2176 + clk_unregister(*(struct clk **)res); 2167 2177 } 2168 2178 2169 2179 /** ··· 2178 2188 struct clk *devm_clk_register(struct device *dev, struct clk_hw *hw) 2179 2189 { 2180 2190 struct clk *clk; 2181 - int ret; 2191 + struct clk **clkp; 2182 2192 2183 - clk = devres_alloc(devm_clk_release, sizeof(*clk), GFP_KERNEL); 2184 - if (!clk) 2193 + clkp = devres_alloc(devm_clk_release, sizeof(*clkp), GFP_KERNEL); 2194 + if (!clkp) 2185 2195 return ERR_PTR(-ENOMEM); 2186 2196 2187 - ret = _clk_register(dev, hw, clk); 2188 - if (!ret) { 2189 - devres_add(dev, clk); 2197 + clk = clk_register(dev, hw); 2198 + if (!IS_ERR(clk)) { 2199 + *clkp = clk; 2200 + devres_add(dev, clkp); 2190 2201 } else { 2191 - devres_free(clk); 2192 - clk = ERR_PTR(ret); 2202 + devres_free(clkp); 2193 2203 } 2194 2204 2195 2205 return clk;
+7 -2
drivers/clk/shmobile/clk-mstp.c
··· 156 156 static void __init cpg_mstp_clocks_init(struct device_node *np) 157 157 { 158 158 struct mstp_clock_group *group; 159 + const char *idxname; 159 160 struct clk **clks; 160 161 unsigned int i; 161 162 ··· 185 184 for (i = 0; i < MSTP_MAX_CLOCKS; ++i) 186 185 clks[i] = ERR_PTR(-ENOENT); 187 186 187 + if (of_find_property(np, "clock-indices", &i)) 188 + idxname = "clock-indices"; 189 + else 190 + idxname = "renesas,clock-indices"; 191 + 188 192 for (i = 0; i < MSTP_MAX_CLOCKS; ++i) { 189 193 const char *parent_name; 190 194 const char *name; ··· 203 197 continue; 204 198 205 199 parent_name = of_clk_get_parent_name(np, i); 206 - ret = of_property_read_u32_index(np, "renesas,clock-indices", i, 207 - &clkidx); 200 + ret = of_property_read_u32_index(np, idxname, i, &clkidx); 208 201 if (parent_name == NULL || ret < 0) 209 202 break; 210 203
+7
drivers/clk/socfpga/clk-pll.c
··· 20 20 #include <linux/clk-provider.h> 21 21 #include <linux/io.h> 22 22 #include <linux/of.h> 23 + #include <linux/of_address.h> 23 24 24 25 #include "clk.h" 25 26 ··· 43 42 #define CLK_MGR_PLL_CLK_SRC_MASK 0x3 44 43 45 44 #define to_socfpga_clk(p) container_of(p, struct socfpga_pll, hw.hw) 45 + 46 + void __iomem *clk_mgr_base_addr; 46 47 47 48 static unsigned long clk_pll_recalc_rate(struct clk_hw *hwclk, 48 49 unsigned long parent_rate) ··· 90 87 const char *clk_name = node->name; 91 88 const char *parent_name[SOCFPGA_MAX_PARENTS]; 92 89 struct clk_init_data init; 90 + struct device_node *clkmgr_np; 93 91 int rc; 94 92 int i = 0; 95 93 ··· 100 96 if (WARN_ON(!pll_clk)) 101 97 return NULL; 102 98 99 + clkmgr_np = of_find_compatible_node(NULL, NULL, "altr,clk-mgr"); 100 + clk_mgr_base_addr = of_iomap(clkmgr_np, 0); 101 + BUG_ON(!clk_mgr_base_addr); 103 102 pll_clk->hw.reg = clk_mgr_base_addr + reg; 104 103 105 104 of_property_read_string(node, "clock-output-names", &clk_name);
+3 -20
drivers/clk/socfpga/clk.c
··· 17 17 * You should have received a copy of the GNU General Public License 18 18 * along with this program. If not, see <http://www.gnu.org/licenses/>. 19 19 */ 20 - #include <linux/clk.h> 21 - #include <linux/clkdev.h> 22 - #include <linux/clk-provider.h> 23 - #include <linux/io.h> 24 20 #include <linux/of.h> 25 - #include <linux/of_address.h> 26 21 27 22 #include "clk.h" 28 23 29 - void __iomem *clk_mgr_base_addr; 30 - 31 - static const struct of_device_id socfpga_child_clocks[] __initconst = { 32 - { .compatible = "altr,socfpga-pll-clock", socfpga_pll_init, }, 33 - { .compatible = "altr,socfpga-perip-clk", socfpga_periph_init, }, 34 - { .compatible = "altr,socfpga-gate-clk", socfpga_gate_init, }, 35 - {}, 36 - }; 37 - 38 - static void __init socfpga_clkmgr_init(struct device_node *node) 39 - { 40 - clk_mgr_base_addr = of_iomap(node, 0); 41 - of_clk_init(socfpga_child_clocks); 42 - } 43 - CLK_OF_DECLARE(socfpga_mgr, "altr,clk-mgr", socfpga_clkmgr_init); 24 + CLK_OF_DECLARE(socfpga_pll_clk, "altr,socfpga-pll-clock", socfpga_pll_init); 25 + CLK_OF_DECLARE(socfpga_perip_clk, "altr,socfpga-perip-clk", socfpga_periph_init); 26 + CLK_OF_DECLARE(socfpga_gate_clk, "altr,socfpga-gate-clk", socfpga_gate_init); 44 27
+1 -1
drivers/clk/tegra/clk-pll.c
··· 1718 1718 "pll_re_vco"); 1719 1719 } else { 1720 1720 val_aux &= ~(PLLE_AUX_PLLRE_SEL | PLLE_AUX_PLLP_SEL); 1721 - pll_writel(val, pll_params->aux_reg, pll); 1721 + pll_writel(val_aux, pll_params->aux_reg, pll); 1722 1722 } 1723 1723 1724 1724 clk = _tegra_clk_register_pll(pll, name, parent_name, flags,
+16 -18
drivers/cpufreq/intel_pstate.c
··· 37 37 #define BYT_RATIOS 0x66a 38 38 #define BYT_VIDS 0x66b 39 39 #define BYT_TURBO_RATIOS 0x66c 40 + #define BYT_TURBO_VIDS 0x66d 40 41 41 42 42 43 #define FRAC_BITS 6 ··· 71 70 }; 72 71 73 72 struct vid_data { 74 - int32_t min; 75 - int32_t max; 73 + int min; 74 + int max; 75 + int turbo; 76 76 int32_t ratio; 77 77 }; 78 78 ··· 361 359 { 362 360 u64 value; 363 361 rdmsrl(BYT_RATIOS, value); 364 - return (value >> 8) & 0xFF; 362 + return (value >> 8) & 0x3F; 365 363 } 366 364 367 365 static int byt_get_max_pstate(void) 368 366 { 369 367 u64 value; 370 368 rdmsrl(BYT_RATIOS, value); 371 - return (value >> 16) & 0xFF; 369 + return (value >> 16) & 0x3F; 372 370 } 373 371 374 372 static int byt_get_turbo_pstate(void) ··· 395 393 vid_fp = clamp_t(int32_t, vid_fp, cpudata->vid.min, cpudata->vid.max); 396 394 vid = fp_toint(vid_fp); 397 395 396 + if (pstate > cpudata->pstate.max_pstate) 397 + vid = cpudata->vid.turbo; 398 + 398 399 val |= vid; 399 400 400 401 wrmsrl(MSR_IA32_PERF_CTL, val); ··· 407 402 { 408 403 u64 value; 409 404 405 + 410 406 rdmsrl(BYT_VIDS, value); 411 - cpudata->vid.min = int_tofp((value >> 8) & 0x7f); 412 - cpudata->vid.max = int_tofp((value >> 16) & 0x7f); 407 + cpudata->vid.min = int_tofp((value >> 8) & 0x3f); 408 + cpudata->vid.max = int_tofp((value >> 16) & 0x3f); 413 409 cpudata->vid.ratio = div_fp( 414 410 cpudata->vid.max - cpudata->vid.min, 415 411 int_tofp(cpudata->pstate.max_pstate - 416 412 cpudata->pstate.min_pstate)); 413 + 414 + rdmsrl(BYT_TURBO_VIDS, value); 415 + cpudata->vid.turbo = value & 0x7f; 417 416 } 418 417 419 418 ··· 554 545 555 546 if (pstate_funcs.get_vid) 556 547 pstate_funcs.get_vid(cpu); 557 - 558 - /* 559 - * goto max pstate so we don't slow up boot if we are built-in if we are 560 - * a module we will take care of it during normal operation 561 - */ 562 - intel_pstate_set_pstate(cpu, cpu->pstate.max_pstate); 548 + intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate); 563 549 } 564 550 565 551 static inline void intel_pstate_calc_busy(struct cpudata *cpu, ··· 699 695 cpu = all_cpu_data[cpunum]; 700 696 701 697 intel_pstate_get_cpu_pstates(cpu); 702 - if (!cpu->pstate.current_pstate) { 703 - all_cpu_data[cpunum] = NULL; 704 - kfree(cpu); 705 - return -ENODATA; 706 - } 707 698 708 699 cpu->cpu = cpunum; 709 700 ··· 709 710 cpu->timer.expires = jiffies + HZ/100; 710 711 intel_pstate_busy_pid_reset(cpu); 711 712 intel_pstate_sample(cpu); 712 - intel_pstate_set_pstate(cpu, cpu->pstate.max_pstate); 713 713 714 714 add_timer_on(&cpu->timer, cpunum); 715 715
+2 -2
drivers/cpufreq/loongson2_cpufreq.c
··· 62 62 set_cpus_allowed_ptr(current, &cpus_allowed); 63 63 64 64 /* setting the cpu frequency */ 65 - clk_set_rate(policy->clk, freq); 65 + clk_set_rate(policy->clk, freq * 1000); 66 66 67 67 return 0; 68 68 } ··· 92 92 i++) 93 93 loongson2_clockmod_table[i].frequency = (rate * i) / 8; 94 94 95 - ret = clk_set_rate(cpuclk, rate); 95 + ret = clk_set_rate(cpuclk, rate * 1000); 96 96 if (ret) { 97 97 clk_put(cpuclk); 98 98 return ret;
+7 -3
drivers/crypto/caam/error.c
··· 16 16 char *tmp; \ 17 17 \ 18 18 tmp = kmalloc(sizeof(format) + max_alloc, GFP_ATOMIC); \ 19 - sprintf(tmp, format, param); \ 20 - strcat(str, tmp); \ 21 - kfree(tmp); \ 19 + if (likely(tmp)) { \ 20 + sprintf(tmp, format, param); \ 21 + strcat(str, tmp); \ 22 + kfree(tmp); \ 23 + } else { \ 24 + strcat(str, "kmalloc failure in SPRINTFCAT"); \ 25 + } \ 22 26 } 23 27 24 28 static void report_jump_idx(u32 status, char *outstr)
-1
drivers/firewire/core.h
··· 118 118 u32 max_receive, u32 link_speed, u64 guid); 119 119 void fw_core_remove_card(struct fw_card *card); 120 120 int fw_compute_block_crc(__be32 *block); 121 - void fw_schedule_bus_reset(struct fw_card *card, bool delayed, bool short_reset); 122 121 void fw_schedule_bm_work(struct fw_card *card, unsigned long delay); 123 122 124 123 /* -cdev */
+1
drivers/firmware/iscsi_ibft.c
··· 756 756 */ 757 757 { ACPI_SIG_IBFT }, 758 758 { "iBFT" }, 759 + { "BIFT" }, /* Broadcom iSCSI Offload */ 759 760 }; 760 761 761 762 static void __init acpi_find_ibft_region(void)
+4
drivers/gpio/gpio-ich.c
··· 305 305 306 306 .ngpio = 50, 307 307 .have_blink = true, 308 + .regs = ichx_regs, 309 + .reglen = ichx_reglen, 308 310 }; 309 311 310 312 /* Intel 3100 */ ··· 326 324 .uses_gpe0 = true, 327 325 328 326 .ngpio = 50, 327 + .regs = ichx_regs, 328 + .reglen = ichx_reglen, 329 329 }; 330 330 331 331 /* ICH7 and ICH8-based */
+7 -5
drivers/gpio/gpio-mcp23s08.c
··· 894 894 dev_err(&spi->dev, "invalid spi-present-mask\n"); 895 895 return -ENODEV; 896 896 } 897 - 898 - for (addr = 0; addr < ARRAY_SIZE(pdata->chip); addr++) 897 + for (addr = 0; addr < ARRAY_SIZE(pdata->chip); addr++) { 898 + if ((spi_present_mask & (1 << addr))) 899 + chips++; 899 900 pullups[addr] = 0; 901 + } 900 902 } else { 901 903 type = spi_get_device_id(spi)->driver_data; 902 904 pdata = dev_get_platdata(&spi->dev); ··· 921 919 pullups[addr] = pdata->chip[addr].pullups; 922 920 } 923 921 924 - if (!chips) 925 - return -ENODEV; 926 - 927 922 base = pdata->base; 928 923 } 924 + 925 + if (!chips) 926 + return -ENODEV; 929 927 930 928 data = kzalloc(sizeof(*data) + chips * sizeof(struct mcp23s08), 931 929 GFP_KERNEL);
+38 -14
drivers/gpu/drm/i915/intel_bios.c
··· 560 560 561 561 dev_priv->vbt.edp_pps = *edp_pps; 562 562 563 - dev_priv->vbt.edp_rate = edp_link_params->rate ? DP_LINK_BW_2_7 : 564 - DP_LINK_BW_1_62; 563 + switch (edp_link_params->rate) { 564 + case EDP_RATE_1_62: 565 + dev_priv->vbt.edp_rate = DP_LINK_BW_1_62; 566 + break; 567 + case EDP_RATE_2_7: 568 + dev_priv->vbt.edp_rate = DP_LINK_BW_2_7; 569 + break; 570 + default: 571 + DRM_DEBUG_KMS("VBT has unknown eDP link rate value %u\n", 572 + edp_link_params->rate); 573 + break; 574 + } 575 + 565 576 switch (edp_link_params->lanes) { 566 - case 0: 577 + case EDP_LANE_1: 567 578 dev_priv->vbt.edp_lanes = 1; 568 579 break; 569 - case 1: 580 + case EDP_LANE_2: 570 581 dev_priv->vbt.edp_lanes = 2; 571 582 break; 572 - case 3: 573 - default: 583 + case EDP_LANE_4: 574 584 dev_priv->vbt.edp_lanes = 4; 575 585 break; 586 + default: 587 + DRM_DEBUG_KMS("VBT has unknown eDP lane count value %u\n", 588 + edp_link_params->lanes); 589 + break; 576 590 } 591 + 577 592 switch (edp_link_params->preemphasis) { 578 - case 0: 593 + case EDP_PREEMPHASIS_NONE: 579 594 dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPHASIS_0; 580 595 break; 581 - case 1: 596 + case EDP_PREEMPHASIS_3_5dB: 582 597 dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPHASIS_3_5; 583 598 break; 584 - case 2: 599 + case EDP_PREEMPHASIS_6dB: 585 600 dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPHASIS_6; 586 601 break; 587 - case 3: 602 + case EDP_PREEMPHASIS_9_5dB: 588 603 dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPHASIS_9_5; 589 604 break; 605 + default: 606 + DRM_DEBUG_KMS("VBT has unknown eDP pre-emphasis value %u\n", 607 + edp_link_params->preemphasis); 608 + break; 590 609 } 610 + 591 611 switch (edp_link_params->vswing) { 592 - case 0: 612 + case EDP_VSWING_0_4V: 593 613 dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_400; 594 614 break; 595 - case 1: 615 + case EDP_VSWING_0_6V: 596 616 dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_600; 597 617 break; 598 - case 2: 618 + case EDP_VSWING_0_8V: 599 619 dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_800; 600 620 break; 601 - case 3: 621 + case EDP_VSWING_1_2V: 602 622 dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_1200; 623 + break; 624 + default: 625 + DRM_DEBUG_KMS("VBT has unknown eDP voltage swing value %u\n", 626 + edp_link_params->vswing); 603 627 break; 604 628 } 605 629 }
+46 -9
drivers/gpu/drm/i915/intel_dp.c
··· 121 121 return max_link_bw; 122 122 } 123 123 124 + static u8 intel_dp_max_lane_count(struct intel_dp *intel_dp) 125 + { 126 + struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 127 + struct drm_device *dev = intel_dig_port->base.base.dev; 128 + u8 source_max, sink_max; 129 + 130 + source_max = 4; 131 + if (HAS_DDI(dev) && intel_dig_port->port == PORT_A && 132 + (intel_dig_port->saved_port_bits & DDI_A_4_LANES) == 0) 133 + source_max = 2; 134 + 135 + sink_max = drm_dp_max_lane_count(intel_dp->dpcd); 136 + 137 + return min(source_max, sink_max); 138 + } 139 + 124 140 /* 125 141 * The units on the numbers in the next two are... bizarre. Examples will 126 142 * make it clearer; this one parallels an example in the eDP spec. ··· 187 171 } 188 172 189 173 max_link_clock = drm_dp_bw_code_to_link_rate(intel_dp_max_link_bw(intel_dp)); 190 - max_lanes = drm_dp_max_lane_count(intel_dp->dpcd); 174 + max_lanes = intel_dp_max_lane_count(intel_dp); 191 175 192 176 max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes); 193 177 mode_rate = intel_dp_link_required(target_clock, 18); ··· 767 751 struct intel_crtc *intel_crtc = encoder->new_crtc; 768 752 struct intel_connector *intel_connector = intel_dp->attached_connector; 769 753 int lane_count, clock; 770 - int max_lane_count = drm_dp_max_lane_count(intel_dp->dpcd); 754 + int min_lane_count = 1; 755 + int max_lane_count = intel_dp_max_lane_count(intel_dp); 771 756 /* Conveniently, the link BW constants become indices with a shift...*/ 757 + int min_clock = 0; 772 758 int max_clock = intel_dp_max_link_bw(intel_dp) >> 3; 773 759 int bpp, mode_rate; 774 760 static int bws[] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7, DP_LINK_BW_5_4 }; ··· 803 785 /* Walk through all bpp values. Luckily they're all nicely spaced with 2 804 786 * bpc in between. */ 805 787 bpp = pipe_config->pipe_bpp; 806 - if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp && 807 - dev_priv->vbt.edp_bpp < bpp) { 808 - DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n", 809 - dev_priv->vbt.edp_bpp); 810 - bpp = dev_priv->vbt.edp_bpp; 788 + if (is_edp(intel_dp)) { 789 + if (dev_priv->vbt.edp_bpp && dev_priv->vbt.edp_bpp < bpp) { 790 + DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n", 791 + dev_priv->vbt.edp_bpp); 792 + bpp = dev_priv->vbt.edp_bpp; 793 + } 794 + 795 + if (IS_BROADWELL(dev)) { 796 + /* Yes, it's an ugly hack. */ 797 + min_lane_count = max_lane_count; 798 + DRM_DEBUG_KMS("forcing lane count to max (%u) on BDW\n", 799 + min_lane_count); 800 + } else if (dev_priv->vbt.edp_lanes) { 801 + min_lane_count = min(dev_priv->vbt.edp_lanes, 802 + max_lane_count); 803 + DRM_DEBUG_KMS("using min %u lanes per VBT\n", 804 + min_lane_count); 805 + } 806 + 807 + if (dev_priv->vbt.edp_rate) { 808 + min_clock = min(dev_priv->vbt.edp_rate >> 3, max_clock); 809 + DRM_DEBUG_KMS("using min %02x link bw per VBT\n", 810 + bws[min_clock]); 811 + } 811 812 } 812 813 813 814 for (; bpp >= 6*3; bpp -= 2*3) { 814 815 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock, 815 816 bpp); 816 817 817 - for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) { 818 - for (clock = 0; clock <= max_clock; clock++) { 818 + for (lane_count = min_lane_count; lane_count <= max_lane_count; lane_count <<= 1) { 819 + for (clock = min_clock; clock <= max_clock; clock++) { 819 820 link_clock = drm_dp_bw_code_to_link_rate(bws[clock]); 820 821 link_avail = intel_dp_max_data_rate(link_clock, 821 822 lane_count);
+9
drivers/gpu/drm/i915/intel_fbdev.c
··· 387 387 height); 388 388 } 389 389 390 + /* No preferred mode marked by the EDID? Are there any modes? */ 391 + if (!modes[i] && !list_empty(&connector->modes)) { 392 + DRM_DEBUG_KMS("using first mode listed on connector %s\n", 393 + drm_get_connector_name(connector)); 394 + modes[i] = list_first_entry(&connector->modes, 395 + struct drm_display_mode, 396 + head); 397 + } 398 + 390 399 /* last resort: use current mode */ 391 400 if (!modes[i]) { 392 401 /*
+4 -4
drivers/gpu/drm/i915/intel_panel.c
··· 492 492 enum pipe pipe = intel_get_pipe_from_connector(connector); 493 493 u32 freq; 494 494 unsigned long flags; 495 + u64 n; 495 496 496 497 if (!panel->backlight.present || pipe == INVALID_PIPE) 497 498 return; ··· 503 502 504 503 /* scale to hardware max, but be careful to not overflow */ 505 504 freq = panel->backlight.max; 506 - if (freq < max) 507 - level = level * freq / max; 508 - else 509 - level = freq / max * level; 505 + n = (u64)level * freq; 506 + do_div(n, max); 507 + level = n; 510 508 511 509 panel->backlight.level = level; 512 510 if (panel->backlight.device)
+40
drivers/gpu/drm/i915/intel_pm.c
··· 2095 2095 } 2096 2096 } 2097 2097 2098 + static bool ilk_increase_wm_latency(struct drm_i915_private *dev_priv, 2099 + uint16_t wm[5], uint16_t min) 2100 + { 2101 + int level, max_level = ilk_wm_max_level(dev_priv->dev); 2102 + 2103 + if (wm[0] >= min) 2104 + return false; 2105 + 2106 + wm[0] = max(wm[0], min); 2107 + for (level = 1; level <= max_level; level++) 2108 + wm[level] = max_t(uint16_t, wm[level], DIV_ROUND_UP(min, 5)); 2109 + 2110 + return true; 2111 + } 2112 + 2113 + static void snb_wm_latency_quirk(struct drm_device *dev) 2114 + { 2115 + struct drm_i915_private *dev_priv = dev->dev_private; 2116 + bool changed; 2117 + 2118 + /* 2119 + * The BIOS provided WM memory latency values are often 2120 + * inadequate for high resolution displays. Adjust them. 2121 + */ 2122 + changed = ilk_increase_wm_latency(dev_priv, dev_priv->wm.pri_latency, 12) | 2123 + ilk_increase_wm_latency(dev_priv, dev_priv->wm.spr_latency, 12) | 2124 + ilk_increase_wm_latency(dev_priv, dev_priv->wm.cur_latency, 12); 2125 + 2126 + if (!changed) 2127 + return; 2128 + 2129 + DRM_DEBUG_KMS("WM latency values increased to avoid potential underruns\n"); 2130 + intel_print_wm_latency(dev, "Primary", dev_priv->wm.pri_latency); 2131 + intel_print_wm_latency(dev, "Sprite", dev_priv->wm.spr_latency); 2132 + intel_print_wm_latency(dev, "Cursor", dev_priv->wm.cur_latency); 2133 + } 2134 + 2098 2135 static void ilk_setup_wm_latency(struct drm_device *dev) 2099 2136 { 2100 2137 struct drm_i915_private *dev_priv = dev->dev_private; ··· 2149 2112 intel_print_wm_latency(dev, "Primary", dev_priv->wm.pri_latency); 2150 2113 intel_print_wm_latency(dev, "Sprite", dev_priv->wm.spr_latency); 2151 2114 intel_print_wm_latency(dev, "Cursor", dev_priv->wm.cur_latency); 2115 + 2116 + if (IS_GEN6(dev)) 2117 + snb_wm_latency_quirk(dev); 2152 2118 } 2153 2119 2154 2120 static void ilk_compute_wm_parameters(struct drm_crtc *crtc,
+2 -2
drivers/gpu/drm/i915/intel_sdvo.c
··· 2424 2424 if (ret < 0) 2425 2425 goto err1; 2426 2426 2427 - ret = sysfs_create_link(&encoder->ddc.dev.kobj, 2428 - &drm_connector->kdev->kobj, 2427 + ret = sysfs_create_link(&drm_connector->kdev->kobj, 2428 + &encoder->ddc.dev.kobj, 2429 2429 encoder->ddc.dev.kobj.name); 2430 2430 if (ret < 0) 2431 2431 goto err2;
+2
drivers/gpu/drm/i915/intel_uncore.c
··· 185 185 { 186 186 __raw_i915_write32(dev_priv, FORCEWAKE_VLV, 187 187 _MASKED_BIT_DISABLE(0xffff)); 188 + __raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV, 189 + _MASKED_BIT_DISABLE(0xffff)); 188 190 /* something from same cacheline, but !FORCEWAKE_VLV */ 189 191 __raw_posting_read(dev_priv, FORCEWAKE_ACK_VLV); 190 192 }
+5 -5
drivers/hwmon/emc1403.c
··· 163 163 if (retval < 0) 164 164 goto fail; 165 165 166 - hyst = val - retval * 1000; 166 + hyst = retval * 1000 - val; 167 167 hyst = DIV_ROUND_CLOSEST(hyst, 1000); 168 168 if (hyst < 0 || hyst > 255) { 169 169 retval = -ERANGE; ··· 330 330 } 331 331 332 332 id = i2c_smbus_read_byte_data(client, THERMAL_REVISION_REG); 333 - if (id != 0x01) 333 + if (id < 0x01 || id > 0x04) 334 334 return -ENODEV; 335 335 336 336 return 0; ··· 355 355 if (id->driver_data) 356 356 data->groups[1] = &emc1404_group; 357 357 358 - hwmon_dev = hwmon_device_register_with_groups(&client->dev, 359 - client->name, data, 360 - data->groups); 358 + hwmon_dev = devm_hwmon_device_register_with_groups(&client->dev, 359 + client->name, data, 360 + data->groups); 361 361 if (IS_ERR(hwmon_dev)) 362 362 return PTR_ERR(hwmon_dev); 363 363
+3
drivers/i2c/busses/i2c-designware-core.c
··· 422 422 */ 423 423 dw_writel(dev, msgs[dev->msg_write_idx].addr | ic_tar, DW_IC_TAR); 424 424 425 + /* enforce disabled interrupts (due to HW issues) */ 426 + i2c_dw_disable_int(dev); 427 + 425 428 /* Enable the adapter */ 426 429 __i2c_dw_enable(dev, true); 427 430
+1 -1
drivers/i2c/busses/i2c-nomadik.c
··· 999 999 1000 1000 dev->virtbase = devm_ioremap(&adev->dev, adev->res.start, 1001 1001 resource_size(&adev->res)); 1002 - if (IS_ERR(dev->virtbase)) { 1002 + if (!dev->virtbase) { 1003 1003 ret = -ENOMEM; 1004 1004 goto err_no_mem; 1005 1005 }
+1 -1
drivers/i2c/busses/i2c-qup.c
··· 479 479 int ret, idx; 480 480 481 481 ret = pm_runtime_get_sync(qup->dev); 482 - if (ret) 482 + if (ret < 0) 483 483 goto out; 484 484 485 485 writel(1, qup->base + QUP_SW_RESET);
+8 -1
drivers/i2c/busses/i2c-rcar.c
··· 561 561 562 562 ret = -EINVAL; 563 563 for (i = 0; i < num; i++) { 564 + /* This HW can't send STOP after address phase */ 565 + if (msgs[i].len == 0) { 566 + ret = -EOPNOTSUPP; 567 + break; 568 + } 569 + 564 570 /*-------------- spin lock -----------------*/ 565 571 spin_lock_irqsave(&priv->lock, flags); 566 572 ··· 631 625 632 626 static u32 rcar_i2c_func(struct i2c_adapter *adap) 633 627 { 634 - return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; 628 + /* This HW can't do SMBUS_QUICK and NOSTART */ 629 + return I2C_FUNC_I2C | (I2C_FUNC_SMBUS_EMUL & ~I2C_FUNC_SMBUS_QUICK); 635 630 } 636 631 637 632 static const struct i2c_algorithm rcar_i2c_algo = {
+1 -1
drivers/i2c/busses/i2c-s3c2410.c
··· 1276 1276 struct platform_device *pdev = to_platform_device(dev); 1277 1277 struct s3c24xx_i2c *i2c = platform_get_drvdata(pdev); 1278 1278 1279 - i2c->suspended = 0; 1280 1279 clk_prepare_enable(i2c->clk); 1281 1280 s3c24xx_i2c_init(i2c); 1282 1281 clk_disable_unprepare(i2c->clk); 1282 + i2c->suspended = 0; 1283 1283 1284 1284 return 0; 1285 1285 }
+17 -21
drivers/infiniband/ulp/isert/ib_isert.c
··· 28 28 #include <target/target_core_base.h> 29 29 #include <target/target_core_fabric.h> 30 30 #include <target/iscsi/iscsi_transport.h> 31 + #include <linux/semaphore.h> 31 32 32 33 #include "isert_proto.h" 33 34 #include "ib_isert.h" ··· 562 561 struct isert_device *device; 563 562 struct ib_device *ib_dev = cma_id->device; 564 563 int ret = 0; 565 - u8 pi_support = np->tpg_np->tpg->tpg_attrib.t10_pi; 564 + u8 pi_support; 565 + 566 + spin_lock_bh(&np->np_thread_lock); 567 + if (!np->enabled) { 568 + spin_unlock_bh(&np->np_thread_lock); 569 + pr_debug("iscsi_np is not enabled, reject connect request\n"); 570 + return rdma_reject(cma_id, NULL, 0); 571 + } 572 + spin_unlock_bh(&np->np_thread_lock); 566 573 567 574 pr_debug("Entering isert_connect_request cma_id: %p, context: %p\n", 568 575 cma_id, cma_id->context); ··· 661 652 goto out_mr; 662 653 } 663 654 655 + pi_support = np->tpg_np->tpg->tpg_attrib.t10_pi; 664 656 if (pi_support && !device->pi_capable) { 665 657 pr_err("Protection information requested but not supported\n"); 666 658 ret = -EINVAL; ··· 673 663 goto out_conn_dev; 674 664 675 665 mutex_lock(&isert_np->np_accept_mutex); 676 - list_add_tail(&isert_np->np_accept_list, &isert_conn->conn_accept_node); 666 + list_add_tail(&isert_conn->conn_accept_node, &isert_np->np_accept_list); 677 667 mutex_unlock(&isert_np->np_accept_mutex); 678 668 679 - pr_debug("isert_connect_request() waking up np_accept_wq: %p\n", np); 680 - wake_up(&isert_np->np_accept_wq); 669 + pr_debug("isert_connect_request() up np_sem np: %p\n", np); 670 + up(&isert_np->np_sem); 681 671 return 0; 682 672 683 673 out_conn_dev: ··· 3009 2999 pr_err("Unable to allocate struct isert_np\n"); 3010 3000 return -ENOMEM; 3011 3001 } 3012 - init_waitqueue_head(&isert_np->np_accept_wq); 3002 + sema_init(&isert_np->np_sem, 0); 3013 3003 mutex_init(&isert_np->np_accept_mutex); 3014 3004 INIT_LIST_HEAD(&isert_np->np_accept_list); 3015 3005 init_completion(&isert_np->np_login_comp); ··· 3055 3045 out: 3056 3046 kfree(isert_np); 3057 3047 return ret; 3058 - } 3059 - 3060 - static int 3061 - isert_check_accept_queue(struct isert_np *isert_np) 3062 - { 3063 - int empty; 3064 - 3065 - mutex_lock(&isert_np->np_accept_mutex); 3066 - empty = list_empty(&isert_np->np_accept_list); 3067 - mutex_unlock(&isert_np->np_accept_mutex); 3068 - 3069 - return empty; 3070 3048 } 3071 3049 3072 3050 static int ··· 3149 3151 int max_accept = 0, ret; 3150 3152 3151 3153 accept_wait: 3152 - ret = wait_event_interruptible(isert_np->np_accept_wq, 3153 - !isert_check_accept_queue(isert_np) || 3154 - np->np_thread_state == ISCSI_NP_THREAD_RESET); 3154 + ret = down_interruptible(&isert_np->np_sem); 3155 3155 if (max_accept > 5) 3156 3156 return -ENODEV; 3157 3157 3158 3158 spin_lock_bh(&np->np_thread_lock); 3159 3159 if (np->np_thread_state == ISCSI_NP_THREAD_RESET) { 3160 3160 spin_unlock_bh(&np->np_thread_lock); 3161 - pr_err("ISCSI_NP_THREAD_RESET for isert_accept_np\n"); 3161 + pr_debug("ISCSI_NP_THREAD_RESET for isert_accept_np\n"); 3162 3162 return -ENODEV; 3163 3163 } 3164 3164 spin_unlock_bh(&np->np_thread_lock);
+1 -1
drivers/infiniband/ulp/isert/ib_isert.h
··· 182 182 }; 183 183 184 184 struct isert_np { 185 - wait_queue_head_t np_accept_wq; 185 + struct semaphore np_sem; 186 186 struct rdma_cm_id *np_cm_id; 187 187 struct mutex np_accept_mutex; 188 188 struct list_head np_accept_list;
+1 -1
drivers/iommu/amd_iommu.c
··· 3999 3999 iommu_flush_dte(iommu, devid); 4000 4000 if (devid != alias) { 4001 4001 irq_lookup_table[alias] = table; 4002 - set_dte_irq_entry(devid, table); 4002 + set_dte_irq_entry(alias, table); 4003 4003 iommu_flush_dte(iommu, alias); 4004 4004 } 4005 4005
+1 -1
drivers/iommu/amd_iommu_init.c
··· 788 788 * per device. But we can enable the exclusion range per 789 789 * device. This is done here 790 790 */ 791 - set_dev_entry_bit(m->devid, DEV_ENTRY_EX); 791 + set_dev_entry_bit(devid, DEV_ENTRY_EX); 792 792 iommu->exclusion_start = m->range_start; 793 793 iommu->exclusion_length = m->range_length; 794 794 }
+2
drivers/iommu/amd_iommu_v2.c
··· 504 504 505 505 write = !!(fault->flags & PPR_FAULT_WRITE); 506 506 507 + down_read(&fault->state->mm->mmap_sem); 507 508 npages = get_user_pages(fault->state->task, fault->state->mm, 508 509 fault->address, 1, write, 0, &page, NULL); 510 + up_read(&fault->state->mm->mmap_sem); 509 511 510 512 if (npages == 1) { 511 513 put_page(page);
+12 -49
drivers/md/dm-crypt.c
··· 19 19 #include <linux/crypto.h> 20 20 #include <linux/workqueue.h> 21 21 #include <linux/backing-dev.h> 22 - #include <linux/percpu.h> 23 22 #include <linux/atomic.h> 24 23 #include <linux/scatterlist.h> 25 24 #include <asm/page.h> ··· 42 43 struct bvec_iter iter_out; 43 44 sector_t cc_sector; 44 45 atomic_t cc_pending; 46 + struct ablkcipher_request *req; 45 47 }; 46 48 47 49 /* ··· 111 111 enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID }; 112 112 113 113 /* 114 - * Duplicated per-CPU state for cipher. 115 - */ 116 - struct crypt_cpu { 117 - struct ablkcipher_request *req; 118 - }; 119 - 120 - /* 121 - * The fields in here must be read only after initialization, 122 - * changing state should be in crypt_cpu. 114 + * The fields in here must be read only after initialization. 123 115 */ 124 116 struct crypt_config { 125 117 struct dm_dev *dev; ··· 141 149 } iv_gen_private; 142 150 sector_t iv_offset; 143 151 unsigned int iv_size; 144 - 145 - /* 146 - * Duplicated per cpu state. Access through 147 - * per_cpu_ptr() only. 148 - */ 149 - struct crypt_cpu __percpu *cpu; 150 152 151 153 /* ESSIV: struct crypto_cipher *essiv_tfm */ 152 154 void *iv_private; ··· 177 191 static void clone_init(struct dm_crypt_io *, struct bio *); 178 192 static void kcryptd_queue_crypt(struct dm_crypt_io *io); 179 193 static u8 *iv_of_dmreq(struct crypt_config *cc, struct dm_crypt_request *dmreq); 180 - 181 - static struct crypt_cpu *this_crypt_config(struct crypt_config *cc) 182 - { 183 - return this_cpu_ptr(cc->cpu); 184 - } 185 194 186 195 /* 187 196 * Use this to access cipher attributes that are the same for each CPU. ··· 884 903 static void crypt_alloc_req(struct crypt_config *cc, 885 904 struct convert_context *ctx) 886 905 { 887 - struct crypt_cpu *this_cc = this_crypt_config(cc); 888 906 unsigned key_index = ctx->cc_sector & (cc->tfms_count - 1); 889 907 890 - if (!this_cc->req) 891 - this_cc->req = mempool_alloc(cc->req_pool, GFP_NOIO); 908 + if (!ctx->req) 909 + ctx->req = mempool_alloc(cc->req_pool, GFP_NOIO); 892 910 893 - ablkcipher_request_set_tfm(this_cc->req, cc->tfms[key_index]); 894 - ablkcipher_request_set_callback(this_cc->req, 911 + ablkcipher_request_set_tfm(ctx->req, cc->tfms[key_index]); 912 + ablkcipher_request_set_callback(ctx->req, 895 913 CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP, 896 - kcryptd_async_done, dmreq_of_req(cc, this_cc->req)); 914 + kcryptd_async_done, dmreq_of_req(cc, ctx->req)); 897 915 } 898 916 899 917 /* ··· 901 921 static int crypt_convert(struct crypt_config *cc, 902 922 struct convert_context *ctx) 903 923 { 904 - struct crypt_cpu *this_cc = this_crypt_config(cc); 905 924 int r; 906 925 907 926 atomic_set(&ctx->cc_pending, 1); ··· 911 932 912 933 atomic_inc(&ctx->cc_pending); 913 934 914 - r = crypt_convert_block(cc, ctx, this_cc->req); 935 + r = crypt_convert_block(cc, ctx, ctx->req); 915 936 916 937 switch (r) { 917 938 /* async */ ··· 920 941 reinit_completion(&ctx->restart); 921 942 /* fall through*/ 922 943 case -EINPROGRESS: 923 - this_cc->req = NULL; 944 + ctx->req = NULL; 924 945 ctx->cc_sector++; 925 946 continue; 926 947 ··· 1019 1040 io->sector = sector; 1020 1041 io->error = 0; 1021 1042 io->base_io = NULL; 1043 + io->ctx.req = NULL; 1022 1044 atomic_set(&io->io_pending, 0); 1023 1045 1024 1046 return io; ··· 1045 1065 if (!atomic_dec_and_test(&io->io_pending)) 1046 1066 return; 1047 1067 1068 + if (io->ctx.req) 1069 + mempool_free(io->ctx.req, cc->req_pool); 1048 1070 mempool_free(io, cc->io_pool); 1049 1071 1050 1072 if (likely(!base_io)) ··· 1474 1492 static void crypt_dtr(struct dm_target *ti) 1475 1493 { 1476 1494 struct crypt_config *cc = ti->private; 1477 - struct crypt_cpu *cpu_cc; 1478 - int cpu; 1479 1495 1480 1496 ti->private = NULL; 1481 1497 ··· 1484 1504 destroy_workqueue(cc->io_queue); 1485 1505 if (cc->crypt_queue) 1486 1506 destroy_workqueue(cc->crypt_queue); 1487 - 1488 - if (cc->cpu) 1489 - for_each_possible_cpu(cpu) { 1490 - cpu_cc = per_cpu_ptr(cc->cpu, cpu); 1491 - if (cpu_cc->req) 1492 - mempool_free(cpu_cc->req, cc->req_pool); 1493 - } 1494 1507 1495 1508 crypt_free_tfms(cc); 1496 1509 ··· 1502 1529 1503 1530 if (cc->dev) 1504 1531 dm_put_device(ti, cc->dev); 1505 - 1506 - if (cc->cpu) 1507 - free_percpu(cc->cpu); 1508 1532 1509 1533 kzfree(cc->cipher); 1510 1534 kzfree(cc->cipher_string); ··· 1557 1587 1558 1588 if (tmp) 1559 1589 DMWARN("Ignoring unexpected additional cipher options"); 1560 - 1561 - cc->cpu = __alloc_percpu(sizeof(*(cc->cpu)), 1562 - __alignof__(struct crypt_cpu)); 1563 - if (!cc->cpu) { 1564 - ti->error = "Cannot allocate per cpu state"; 1565 - goto bad_mem; 1566 - } 1567 1590 1568 1591 /* 1569 1592 * For compatibility with the original dm-crypt mapping format, if
+1 -1
drivers/md/dm-mpath.c
··· 1566 1566 } 1567 1567 if (m->pg_init_required) 1568 1568 __pg_init_all_paths(m); 1569 - spin_unlock_irqrestore(&m->lock, flags); 1570 1569 dm_table_run_md_queue_async(m->ti->table); 1570 + spin_unlock_irqrestore(&m->lock, flags); 1571 1571 } 1572 1572 1573 1573 return r ? : __blkdev_driver_ioctl(bdev, mode, cmd, arg);
+22 -1
drivers/md/dm-thin.c
··· 27 27 #define MAPPING_POOL_SIZE 1024 28 28 #define PRISON_CELLS 1024 29 29 #define COMMIT_PERIOD HZ 30 + #define NO_SPACE_TIMEOUT (HZ * 60) 30 31 31 32 DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(snapshot_copy_throttle, 32 33 "A percentage of time allocated for copy on write"); ··· 176 175 struct workqueue_struct *wq; 177 176 struct work_struct worker; 178 177 struct delayed_work waker; 178 + struct delayed_work no_space_timeout; 179 179 180 180 unsigned long last_commit_jiffies; 181 181 unsigned ref_count; ··· 937 935 { 938 936 int r; 939 937 940 - if (get_pool_mode(pool) != PM_WRITE) 938 + if (get_pool_mode(pool) >= PM_READ_ONLY) 941 939 return -EINVAL; 942 940 943 941 r = dm_pool_commit_metadata(pool->pmd); ··· 1592 1590 queue_delayed_work(pool->wq, &pool->waker, COMMIT_PERIOD); 1593 1591 } 1594 1592 1593 + /* 1594 + * We're holding onto IO to allow userland time to react. After the 1595 + * timeout either the pool will have been resized (and thus back in 1596 + * PM_WRITE mode), or we degrade to PM_READ_ONLY and start erroring IO. 1597 + */ 1598 + static void do_no_space_timeout(struct work_struct *ws) 1599 + { 1600 + struct pool *pool = container_of(to_delayed_work(ws), struct pool, 1601 + no_space_timeout); 1602 + 1603 + if (get_pool_mode(pool) == PM_OUT_OF_DATA_SPACE && !pool->pf.error_if_no_space) 1604 + set_pool_mode(pool, PM_READ_ONLY); 1605 + } 1606 + 1595 1607 /*----------------------------------------------------------------*/ 1596 1608 1597 1609 struct noflush_work { ··· 1731 1715 pool->process_discard = process_discard; 1732 1716 pool->process_prepared_mapping = process_prepared_mapping; 1733 1717 pool->process_prepared_discard = process_prepared_discard_passdown; 1718 + 1719 + if (!pool->pf.error_if_no_space) 1720 + queue_delayed_work(pool->wq, &pool->no_space_timeout, NO_SPACE_TIMEOUT); 1734 1721 break; 1735 1722 1736 1723 case PM_WRITE: ··· 2119 2100 2120 2101 INIT_WORK(&pool->worker, do_worker); 2121 2102 INIT_DELAYED_WORK(&pool->waker, do_waker); 2103 + INIT_DELAYED_WORK(&pool->no_space_timeout, do_no_space_timeout); 2122 2104 spin_lock_init(&pool->lock); 2123 2105 bio_list_init(&pool->deferred_flush_bios); 2124 2106 INIT_LIST_HEAD(&pool->prepared_mappings); ··· 2682 2662 struct pool *pool = pt->pool; 2683 2663 2684 2664 cancel_delayed_work(&pool->waker); 2665 + cancel_delayed_work(&pool->no_space_timeout); 2685 2666 flush_workqueue(pool->wq); 2686 2667 (void) commit(pool); 2687 2668 }
+2 -1
drivers/md/md.c
··· 8516 8516 if (mddev_trylock(mddev)) { 8517 8517 if (mddev->pers) 8518 8518 __md_stop_writes(mddev); 8519 - mddev->safemode = 2; 8519 + if (mddev->persistent) 8520 + mddev->safemode = 2; 8520 8521 mddev_unlock(mddev); 8521 8522 } 8522 8523 need_delay = 1;
+7 -6
drivers/md/raid10.c
··· 1172 1172 int max_sectors; 1173 1173 int sectors; 1174 1174 1175 + /* 1176 + * Register the new request and wait if the reconstruction 1177 + * thread has put up a bar for new requests. 1178 + * Continue immediately if no resync is active currently. 1179 + */ 1180 + wait_barrier(conf); 1181 + 1175 1182 sectors = bio_sectors(bio); 1176 1183 while (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && 1177 1184 bio->bi_iter.bi_sector < conf->reshape_progress && ··· 1559 1552 1560 1553 md_write_start(mddev, bio); 1561 1554 1562 - /* 1563 - * Register the new request and wait if the reconstruction 1564 - * thread has put up a bar for new requests. 1565 - * Continue immediately if no resync is active currently. 1566 - */ 1567 - wait_barrier(conf); 1568 1555 1569 1556 do { 1570 1557
+1 -1
drivers/media/i2c/ov7670.c
··· 1109 1109 * windows that fall outside that. 1110 1110 */ 1111 1111 for (i = 0; i < n_win_sizes; i++) { 1112 - struct ov7670_win_size *win = &info->devtype->win_sizes[index]; 1112 + struct ov7670_win_size *win = &info->devtype->win_sizes[i]; 1113 1113 if (info->min_width && win->width < info->min_width) 1114 1114 continue; 1115 1115 if (info->min_height && win->height < info->min_height)
+1 -1
drivers/media/i2c/s5c73m3/s5c73m3-core.c
··· 1616 1616 if (ret < 0) 1617 1617 return -EINVAL; 1618 1618 1619 - node_ep = v4l2_of_get_next_endpoint(node, NULL); 1619 + node_ep = of_graph_get_next_endpoint(node, NULL); 1620 1620 if (!node_ep) { 1621 1621 dev_warn(dev, "no endpoint defined for node: %s\n", 1622 1622 node->full_name);
+1
drivers/media/media-device.c
··· 93 93 struct media_entity *ent; 94 94 struct media_entity_desc u_ent; 95 95 96 + memset(&u_ent, 0, sizeof(u_ent)); 96 97 if (copy_from_user(&u_ent.id, &uent->id, sizeof(u_ent.id))) 97 98 return -EFAULT; 98 99
+15 -1
drivers/media/platform/davinci/vpbe_display.c
··· 372 372 { 373 373 struct vpbe_fh *fh = vb2_get_drv_priv(vq); 374 374 struct vpbe_layer *layer = fh->layer; 375 + struct vpbe_display *disp = fh->disp_dev; 376 + unsigned long flags; 375 377 376 378 if (!vb2_is_streaming(vq)) 377 379 return 0; 378 380 379 381 /* release all active buffers */ 382 + spin_lock_irqsave(&disp->dma_queue_lock, flags); 383 + if (layer->cur_frm == layer->next_frm) { 384 + vb2_buffer_done(&layer->cur_frm->vb, VB2_BUF_STATE_ERROR); 385 + } else { 386 + if (layer->cur_frm != NULL) 387 + vb2_buffer_done(&layer->cur_frm->vb, 388 + VB2_BUF_STATE_ERROR); 389 + if (layer->next_frm != NULL) 390 + vb2_buffer_done(&layer->next_frm->vb, 391 + VB2_BUF_STATE_ERROR); 392 + } 393 + 380 394 while (!list_empty(&layer->dma_queue)) { 381 395 layer->next_frm = list_entry(layer->dma_queue.next, 382 396 struct vpbe_disp_buffer, list); 383 397 list_del(&layer->next_frm->list); 384 398 vb2_buffer_done(&layer->next_frm->vb, VB2_BUF_STATE_ERROR); 385 399 } 386 - 400 + spin_unlock_irqrestore(&disp->dma_queue_lock, flags); 387 401 return 0; 388 402 } 389 403
+2
drivers/media/platform/davinci/vpfe_capture.c
··· 734 734 } 735 735 vpfe_dev->io_usrs = 0; 736 736 vpfe_dev->numbuffers = config_params.numbuffers; 737 + videobuf_stop(&vpfe_dev->buffer_queue); 738 + videobuf_mmap_free(&vpfe_dev->buffer_queue); 737 739 } 738 740 739 741 /* Decrement device usrs counter */
+23 -11
drivers/media/platform/davinci/vpif_capture.c
··· 358 358 359 359 common = &ch->common[VPIF_VIDEO_INDEX]; 360 360 361 + /* Disable channel as per its device type and channel id */ 362 + if (VPIF_CHANNEL0_VIDEO == ch->channel_id) { 363 + enable_channel0(0); 364 + channel0_intr_enable(0); 365 + } 366 + if ((VPIF_CHANNEL1_VIDEO == ch->channel_id) || 367 + (2 == common->started)) { 368 + enable_channel1(0); 369 + channel1_intr_enable(0); 370 + } 371 + common->started = 0; 372 + 361 373 /* release all active buffers */ 362 374 spin_lock_irqsave(&common->irqlock, flags); 375 + if (common->cur_frm == common->next_frm) { 376 + vb2_buffer_done(&common->cur_frm->vb, VB2_BUF_STATE_ERROR); 377 + } else { 378 + if (common->cur_frm != NULL) 379 + vb2_buffer_done(&common->cur_frm->vb, 380 + VB2_BUF_STATE_ERROR); 381 + if (common->next_frm != NULL) 382 + vb2_buffer_done(&common->next_frm->vb, 383 + VB2_BUF_STATE_ERROR); 384 + } 385 + 363 386 while (!list_empty(&common->dma_queue)) { 364 387 common->next_frm = list_entry(common->dma_queue.next, 365 388 struct vpif_cap_buffer, list); ··· 956 933 if (fh->io_allowed[VPIF_VIDEO_INDEX]) { 957 934 /* Reset io_usrs member of channel object */ 958 935 common->io_usrs = 0; 959 - /* Disable channel as per its device type and channel id */ 960 - if (VPIF_CHANNEL0_VIDEO == ch->channel_id) { 961 - enable_channel0(0); 962 - channel0_intr_enable(0); 963 - } 964 - if ((VPIF_CHANNEL1_VIDEO == ch->channel_id) || 965 - (2 == common->started)) { 966 - enable_channel1(0); 967 - channel1_intr_enable(0); 968 - } 969 - common->started = 0; 970 936 /* Free buffers allocated */ 971 937 vb2_queue_release(&common->buffer_queue); 972 938 vb2_dma_contig_cleanup_ctx(common->alloc_ctx);
+23 -12
drivers/media/platform/davinci/vpif_display.c
··· 320 320 321 321 common = &ch->common[VPIF_VIDEO_INDEX]; 322 322 323 + /* Disable channel */ 324 + if (VPIF_CHANNEL2_VIDEO == ch->channel_id) { 325 + enable_channel2(0); 326 + channel2_intr_enable(0); 327 + } 328 + if ((VPIF_CHANNEL3_VIDEO == ch->channel_id) || 329 + (2 == common->started)) { 330 + enable_channel3(0); 331 + channel3_intr_enable(0); 332 + } 333 + common->started = 0; 334 + 323 335 /* release all active buffers */ 324 336 spin_lock_irqsave(&common->irqlock, flags); 337 + if (common->cur_frm == common->next_frm) { 338 + vb2_buffer_done(&common->cur_frm->vb, VB2_BUF_STATE_ERROR); 339 + } else { 340 + if (common->cur_frm != NULL) 341 + vb2_buffer_done(&common->cur_frm->vb, 342 + VB2_BUF_STATE_ERROR); 343 + if (common->next_frm != NULL) 344 + vb2_buffer_done(&common->next_frm->vb, 345 + VB2_BUF_STATE_ERROR); 346 + } 347 + 325 348 while (!list_empty(&common->dma_queue)) { 326 349 common->next_frm = list_entry(common->dma_queue.next, 327 350 struct vpif_disp_buffer, list); ··· 796 773 if (fh->io_allowed[VPIF_VIDEO_INDEX]) { 797 774 /* Reset io_usrs member of channel object */ 798 775 common->io_usrs = 0; 799 - /* Disable channel */ 800 - if (VPIF_CHANNEL2_VIDEO == ch->channel_id) { 801 - enable_channel2(0); 802 - channel2_intr_enable(0); 803 - } 804 - if ((VPIF_CHANNEL3_VIDEO == ch->channel_id) || 805 - (2 == common->started)) { 806 - enable_channel3(0); 807 - channel3_intr_enable(0); 808 - } 809 - common->started = 0; 810 - 811 776 /* Free buffers allocated */ 812 777 vb2_queue_release(&common->buffer_queue); 813 778 vb2_dma_contig_cleanup_ctx(common->alloc_ctx);
+1 -1
drivers/media/platform/exynos4-is/fimc-core.c
··· 122 122 }, { 123 123 .name = "YUV 4:2:2 planar, Y/Cb/Cr", 124 124 .fourcc = V4L2_PIX_FMT_YUV422P, 125 - .depth = { 12 }, 125 + .depth = { 16 }, 126 126 .color = FIMC_FMT_YCBYCR422, 127 127 .memplanes = 1, 128 128 .colplanes = 3,
+3 -3
drivers/media/tuners/fc2580.c
··· 195 195 196 196 f_ref = 2UL * priv->cfg->clock / r_val; 197 197 n_val = div_u64_rem(f_vco, f_ref, &k_val); 198 - k_val_reg = 1UL * k_val * (1 << 20) / f_ref; 198 + k_val_reg = div_u64(1ULL * k_val * (1 << 20), f_ref); 199 199 200 200 ret = fc2580_wr_reg(priv, 0x18, r18_val | ((k_val_reg >> 16) & 0xff)); 201 201 if (ret < 0) ··· 348 348 if (ret < 0) 349 349 goto err; 350 350 351 - ret = fc2580_wr_reg(priv, 0x37, 1UL * priv->cfg->clock * \ 352 - fc2580_if_filter_lut[i].mul / 1000000000); 351 + ret = fc2580_wr_reg(priv, 0x37, div_u64(1ULL * priv->cfg->clock * 352 + fc2580_if_filter_lut[i].mul, 1000000000)); 353 353 if (ret < 0) 354 354 goto err; 355 355
+1
drivers/media/tuners/fc2580_priv.h
··· 22 22 #define FC2580_PRIV_H 23 23 24 24 #include "fc2580.h" 25 + #include <linux/math64.h> 25 26 26 27 struct fc2580_reg_val { 27 28 u8 reg;
-1
drivers/media/usb/dvb-usb-v2/Makefile
··· 41 41 ccflags-y += -I$(srctree)/drivers/media/dvb-frontends 42 42 ccflags-y += -I$(srctree)/drivers/media/tuners 43 43 ccflags-y += -I$(srctree)/drivers/media/common 44 - ccflags-y += -I$(srctree)/drivers/staging/media/rtl2832u_sdr
+43 -5
drivers/media/usb/dvb-usb-v2/rtl28xxu.c
··· 24 24 25 25 #include "rtl2830.h" 26 26 #include "rtl2832.h" 27 - #include "rtl2832_sdr.h" 28 27 29 28 #include "qt1010.h" 30 29 #include "mt2060.h" ··· 34 35 #include "fc2580.h" 35 36 #include "tua9001.h" 36 37 #include "r820t.h" 38 + 39 + /* 40 + * RTL2832_SDR module is in staging. That logic is added in order to avoid any 41 + * hard dependency to drivers/staging/ directory as we want compile mainline 42 + * driver even whole staging directory is missing. 43 + */ 44 + #include <media/v4l2-subdev.h> 45 + 46 + #if IS_ENABLED(CONFIG_DVB_RTL2832_SDR) 47 + struct dvb_frontend *rtl2832_sdr_attach(struct dvb_frontend *fe, 48 + struct i2c_adapter *i2c, const struct rtl2832_config *cfg, 49 + struct v4l2_subdev *sd); 50 + #else 51 + static inline struct dvb_frontend *rtl2832_sdr_attach(struct dvb_frontend *fe, 52 + struct i2c_adapter *i2c, const struct rtl2832_config *cfg, 53 + struct v4l2_subdev *sd) 54 + { 55 + return NULL; 56 + } 57 + #endif 58 + 59 + #ifdef CONFIG_MEDIA_ATTACH 60 + #define dvb_attach_sdr(FUNCTION, ARGS...) ({ \ 61 + void *__r = NULL; \ 62 + typeof(&FUNCTION) __a = symbol_request(FUNCTION); \ 63 + if (__a) { \ 64 + __r = (void *) __a(ARGS); \ 65 + if (__r == NULL) \ 66 + symbol_put(FUNCTION); \ 67 + } \ 68 + __r; \ 69 + }) 70 + 71 + #else 72 + #define dvb_attach_sdr(FUNCTION, ARGS...) ({ \ 73 + FUNCTION(ARGS); \ 74 + }) 75 + 76 + #endif 37 77 38 78 static int rtl28xxu_disable_rc; 39 79 module_param_named(disable_rc, rtl28xxu_disable_rc, int, 0644); ··· 946 908 adap->fe[0]->ops.tuner_ops.get_rf_strength; 947 909 948 910 /* attach SDR */ 949 - dvb_attach(rtl2832_sdr_attach, adap->fe[0], &d->i2c_adap, 911 + dvb_attach_sdr(rtl2832_sdr_attach, adap->fe[0], &d->i2c_adap, 950 912 &rtl28xxu_rtl2832_fc0012_config, NULL); 951 913 break; 952 914 case TUNER_RTL2832_FC0013: ··· 958 920 adap->fe[0]->ops.tuner_ops.get_rf_strength; 959 921 960 922 /* attach SDR */ 961 - dvb_attach(rtl2832_sdr_attach, adap->fe[0], &d->i2c_adap, 923 + dvb_attach_sdr(rtl2832_sdr_attach, adap->fe[0], &d->i2c_adap, 962 924 &rtl28xxu_rtl2832_fc0013_config, NULL); 963 925 break; 964 926 case TUNER_RTL2832_E4000: { ··· 989 951 i2c_set_adapdata(i2c_adap_internal, d); 990 952 991 953 /* attach SDR */ 992 - dvb_attach(rtl2832_sdr_attach, adap->fe[0], 954 + dvb_attach_sdr(rtl2832_sdr_attach, adap->fe[0], 993 955 i2c_adap_internal, 994 956 &rtl28xxu_rtl2832_e4000_config, sd); 995 957 } ··· 1020 982 adap->fe[0]->ops.tuner_ops.get_rf_strength; 1021 983 1022 984 /* attach SDR */ 1023 - dvb_attach(rtl2832_sdr_attach, adap->fe[0], &d->i2c_adap, 985 + dvb_attach_sdr(rtl2832_sdr_attach, adap->fe[0], &d->i2c_adap, 1024 986 &rtl28xxu_rtl2832_r820t_config, NULL); 1025 987 break; 1026 988 case TUNER_RTL2832_R828D:
-2
drivers/media/usb/gspca/sonixb.c
··· 1430 1430 {USB_DEVICE(0x0c45, 0x600d), SB(PAS106, 101)}, 1431 1431 {USB_DEVICE(0x0c45, 0x6011), SB(OV6650, 101)}, 1432 1432 {USB_DEVICE(0x0c45, 0x6019), SB(OV7630, 101)}, 1433 - #if !IS_ENABLED(CONFIG_USB_SN9C102) 1434 1433 {USB_DEVICE(0x0c45, 0x6024), SB(TAS5130CXX, 102)}, 1435 1434 {USB_DEVICE(0x0c45, 0x6025), SB(TAS5130CXX, 102)}, 1436 - #endif 1437 1435 {USB_DEVICE(0x0c45, 0x6027), SB(OV7630, 101)}, /* Genius Eye 310 */ 1438 1436 {USB_DEVICE(0x0c45, 0x6028), SB(PAS202, 102)}, 1439 1437 {USB_DEVICE(0x0c45, 0x6029), SB(PAS106, 102)},
+7 -5
drivers/media/v4l2-core/v4l2-compat-ioctl32.c
··· 178 178 179 179 static int __get_v4l2_format32(struct v4l2_format *kp, struct v4l2_format32 __user *up) 180 180 { 181 + if (get_user(kp->type, &up->type)) 182 + return -EFAULT; 183 + 181 184 switch (kp->type) { 182 185 case V4L2_BUF_TYPE_VIDEO_CAPTURE: 183 186 case V4L2_BUF_TYPE_VIDEO_OUTPUT: ··· 207 204 208 205 static int get_v4l2_format32(struct v4l2_format *kp, struct v4l2_format32 __user *up) 209 206 { 210 - if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_format32)) || 211 - get_user(kp->type, &up->type)) 212 - return -EFAULT; 207 + if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_format32))) 208 + return -EFAULT; 213 209 return __get_v4l2_format32(kp, up); 214 210 } 215 211 216 212 static int get_v4l2_create32(struct v4l2_create_buffers *kp, struct v4l2_create_buffers32 __user *up) 217 213 { 218 214 if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_create_buffers32)) || 219 - copy_from_user(kp, up, offsetof(struct v4l2_create_buffers32, format.fmt))) 220 - return -EFAULT; 215 + copy_from_user(kp, up, offsetof(struct v4l2_create_buffers32, format))) 216 + return -EFAULT; 221 217 return __get_v4l2_format32(&kp->format, &up->format); 222 218 } 223 219
+13 -2
drivers/memory/mvebu-devbus.c
··· 108 108 node->full_name); 109 109 return err; 110 110 } 111 - /* Convert bit width to byte width */ 112 - r.bus_width /= 8; 111 + 112 + /* 113 + * The bus width is encoded into the register as 0 for 8 bits, 114 + * and 1 for 16 bits, so we do the necessary conversion here. 115 + */ 116 + if (r.bus_width == 8) 117 + r.bus_width = 0; 118 + else if (r.bus_width == 16) 119 + r.bus_width = 1; 120 + else { 121 + dev_err(devbus->dev, "invalid bus width %d\n", r.bus_width); 122 + return -EINVAL; 123 + } 113 124 114 125 err = get_timing_param_ps(devbus, node, "devbus,badr-skew-ps", 115 126 &r.badr_skew);
+4
drivers/of/base.c
··· 1831 1831 if (!found) 1832 1832 return -ENODEV; 1833 1833 1834 + /* At early boot, bail out and defer setup to of_init() */ 1835 + if (!of_kset) 1836 + return found ? 0 : -ENODEV; 1837 + 1834 1838 /* Update the sysfs attribute */ 1835 1839 sysfs_remove_bin_file(&np->kobj, &oldprop->attr); 1836 1840 __of_add_property_sysfs(np, newprop);
+76 -16
drivers/pci/host/pci-mvebu.c
··· 293 293 return PCIBIOS_SUCCESSFUL; 294 294 } 295 295 296 + /* 297 + * Remove windows, starting from the largest ones to the smallest 298 + * ones. 299 + */ 300 + static void mvebu_pcie_del_windows(struct mvebu_pcie_port *port, 301 + phys_addr_t base, size_t size) 302 + { 303 + while (size) { 304 + size_t sz = 1 << (fls(size) - 1); 305 + 306 + mvebu_mbus_del_window(base, sz); 307 + base += sz; 308 + size -= sz; 309 + } 310 + } 311 + 312 + /* 313 + * MBus windows can only have a power of two size, but PCI BARs do not 314 + * have this constraint. Therefore, we have to split the PCI BAR into 315 + * areas each having a power of two size. We start from the largest 316 + * one (i.e highest order bit set in the size). 317 + */ 318 + static void mvebu_pcie_add_windows(struct mvebu_pcie_port *port, 319 + unsigned int target, unsigned int attribute, 320 + phys_addr_t base, size_t size, 321 + phys_addr_t remap) 322 + { 323 + size_t size_mapped = 0; 324 + 325 + while (size) { 326 + size_t sz = 1 << (fls(size) - 1); 327 + int ret; 328 + 329 + ret = mvebu_mbus_add_window_remap_by_id(target, attribute, base, 330 + sz, remap); 331 + if (ret) { 332 + dev_err(&port->pcie->pdev->dev, 333 + "Could not create MBus window at 0x%x, size 0x%x: %d\n", 334 + base, sz, ret); 335 + mvebu_pcie_del_windows(port, base - size_mapped, 336 + size_mapped); 337 + return; 338 + } 339 + 340 + size -= sz; 341 + size_mapped += sz; 342 + base += sz; 343 + if (remap != MVEBU_MBUS_NO_REMAP) 344 + remap += sz; 345 + } 346 + } 347 + 296 348 static void mvebu_pcie_handle_iobase_change(struct mvebu_pcie_port *port) 297 349 { 298 350 phys_addr_t iobase; ··· 356 304 357 305 /* If a window was configured, remove it */ 358 306 if (port->iowin_base) { 359 - mvebu_mbus_del_window(port->iowin_base, 360 - port->iowin_size); 307 + mvebu_pcie_del_windows(port, port->iowin_base, 308 + port->iowin_size); 361 309 port->iowin_base = 0; 362 310 port->iowin_size = 0; 363 311 } ··· 383 331 port->iowin_base = port->pcie->io.start + iobase; 384 332 port->iowin_size = ((0xFFF | ((port->bridge.iolimit & 0xF0) << 8) | 385 333 (port->bridge.iolimitupper << 16)) - 386 - iobase); 334 + iobase) + 1; 387 335 388 - mvebu_mbus_add_window_remap_by_id(port->io_target, port->io_attr, 389 - port->iowin_base, port->iowin_size, 390 - iobase); 336 + mvebu_pcie_add_windows(port, port->io_target, port->io_attr, 337 + port->iowin_base, port->iowin_size, 338 + iobase); 391 339 } 392 340 393 341 static void mvebu_pcie_handle_membase_change(struct mvebu_pcie_port *port) ··· 398 346 399 347 /* If a window was configured, remove it */ 400 348 if (port->memwin_base) { 401 - mvebu_mbus_del_window(port->memwin_base, 402 - port->memwin_size); 349 + mvebu_pcie_del_windows(port, port->memwin_base, 350 + port->memwin_size); 403 351 port->memwin_base = 0; 404 352 port->memwin_size = 0; 405 353 } ··· 416 364 port->memwin_base = ((port->bridge.membase & 0xFFF0) << 16); 417 365 port->memwin_size = 418 366 (((port->bridge.memlimit & 0xFFF0) << 16) | 0xFFFFF) - 419 - port->memwin_base; 367 + port->memwin_base + 1; 420 368 421 - mvebu_mbus_add_window_by_id(port->mem_target, port->mem_attr, 422 - port->memwin_base, port->memwin_size); 369 + mvebu_pcie_add_windows(port, port->mem_target, port->mem_attr, 370 + port->memwin_base, port->memwin_size, 371 + MVEBU_MBUS_NO_REMAP); 423 372 } 424 373 425 374 /* ··· 796 743 797 744 /* 798 745 * On the PCI-to-PCI bridge side, the I/O windows must have at 799 - * least a 64 KB size and be aligned on their size, and the 800 - * memory windows must have at least a 1 MB size and be 801 - * aligned on their size 746 + * least a 64 KB size and the memory windows must have at 747 + * least a 1 MB size. Moreover, MBus windows need to have a 748 + * base address aligned on their size, and their size must be 749 + * a power of two. This means that if the BAR doesn't have a 750 + * power of two size, several MBus windows will actually be 751 + * created. We need to ensure that the biggest MBus window 752 + * (which will be the first one) is aligned on its size, which 753 + * explains the rounddown_pow_of_two() being done here. 802 754 */ 803 755 if (res->flags & IORESOURCE_IO) 804 - return round_up(start, max_t(resource_size_t, SZ_64K, size)); 756 + return round_up(start, max_t(resource_size_t, SZ_64K, 757 + rounddown_pow_of_two(size))); 805 758 else if (res->flags & IORESOURCE_MEM) 806 - return round_up(start, max_t(resource_size_t, SZ_1M, size)); 759 + return round_up(start, max_t(resource_size_t, SZ_1M, 760 + rounddown_pow_of_two(size))); 807 761 else 808 762 return start; 809 763 }
+2 -2
drivers/pci/hotplug/shpchp_ctrl.c
··· 282 282 return WRONG_BUS_FREQUENCY; 283 283 } 284 284 285 - bsp = ctrl->pci_dev->bus->cur_bus_speed; 286 - msp = ctrl->pci_dev->bus->max_bus_speed; 285 + bsp = ctrl->pci_dev->subordinate->cur_bus_speed; 286 + msp = ctrl->pci_dev->subordinate->max_bus_speed; 287 287 288 288 /* Check if there are other slots or devices on the same bus */ 289 289 if (!list_empty(&ctrl->pci_dev->subordinate->devices))
+3 -2
drivers/pci/pci.c
··· 3067 3067 if (!pci_is_pcie(dev)) 3068 3068 return 1; 3069 3069 3070 - return pci_wait_for_pending(dev, PCI_EXP_DEVSTA, PCI_EXP_DEVSTA_TRPND); 3070 + return pci_wait_for_pending(dev, pci_pcie_cap(dev) + PCI_EXP_DEVSTA, 3071 + PCI_EXP_DEVSTA_TRPND); 3071 3072 } 3072 3073 EXPORT_SYMBOL(pci_wait_for_pending_transaction); 3073 3074 ··· 3110 3109 return 0; 3111 3110 3112 3111 /* Wait for Transaction Pending bit clean */ 3113 - if (pci_wait_for_pending(dev, PCI_AF_STATUS, PCI_AF_STATUS_TP)) 3112 + if (pci_wait_for_pending(dev, pos + PCI_AF_STATUS, PCI_AF_STATUS_TP)) 3114 3113 goto clear; 3115 3114 3116 3115 dev_err(&dev->dev, "transaction is not cleared; "
+3
drivers/rtc/rtc-hym8563.c
··· 569 569 if (IS_ERR(hym8563->rtc)) 570 570 return PTR_ERR(hym8563->rtc); 571 571 572 + /* the hym8563 alarm only supports a minute accuracy */ 573 + hym8563->rtc->uie_unsupported = 1; 574 + 572 575 #ifdef CONFIG_COMMON_CLK 573 576 hym8563_clkout_register_clk(hym8563); 574 577 #endif
+8 -6
drivers/sh/Makefile
··· 1 1 # 2 2 # Makefile for the SuperH specific drivers. 3 3 # 4 - obj-y := intc/ 4 + obj-$(CONFIG_SUPERH) += intc/ 5 + obj-$(CONFIG_ARCH_SHMOBILE_LEGACY) += intc/ 6 + ifneq ($(CONFIG_COMMON_CLK),y) 7 + obj-$(CONFIG_HAVE_CLK) += clk/ 8 + endif 9 + obj-$(CONFIG_MAPLE) += maple/ 10 + obj-$(CONFIG_SUPERHYWAY) += superhyway/ 5 11 6 - obj-$(CONFIG_HAVE_CLK) += clk/ 7 - obj-$(CONFIG_MAPLE) += maple/ 8 - obj-$(CONFIG_SUPERHYWAY) += superhyway/ 9 - 10 - obj-y += pm_runtime.o 12 + obj-y += pm_runtime.o
+19 -1
drivers/sh/pm_runtime.c
··· 50 50 .con_ids = { NULL, }, 51 51 }; 52 52 53 + static bool default_pm_on; 54 + 53 55 static int __init sh_pm_runtime_init(void) 54 56 { 57 + if (IS_ENABLED(CONFIG_ARCH_SHMOBILE_MULTI)) { 58 + if (!of_machine_is_compatible("renesas,emev2") && 59 + !of_machine_is_compatible("renesas,r7s72100") && 60 + !of_machine_is_compatible("renesas,r8a73a4") && 61 + !of_machine_is_compatible("renesas,r8a7740") && 62 + !of_machine_is_compatible("renesas,r8a7778") && 63 + !of_machine_is_compatible("renesas,r8a7779") && 64 + !of_machine_is_compatible("renesas,r8a7790") && 65 + !of_machine_is_compatible("renesas,r8a7791") && 66 + !of_machine_is_compatible("renesas,sh7372") && 67 + !of_machine_is_compatible("renesas,sh73a0")) 68 + return 0; 69 + } 70 + 71 + default_pm_on = true; 55 72 pm_clk_add_notifier(&platform_bus_type, &platform_bus_notifier); 56 73 return 0; 57 74 } ··· 76 59 77 60 static int __init sh_pm_runtime_late_init(void) 78 61 { 79 - pm_genpd_poweroff_unused(); 62 + if (default_pm_on) 63 + pm_genpd_poweroff_unused(); 80 64 return 0; 81 65 } 82 66 late_initcall(sh_pm_runtime_late_init);
-16
drivers/spi/spi-pxa2xx-dma.c
··· 29 29 struct sg_table *sgt; 30 30 void *buf, *pbuf; 31 31 32 - /* 33 - * Some DMA controllers have problems transferring buffers that are 34 - * not multiple of 4 bytes. So we truncate the transfer so that it 35 - * is suitable for such controllers, and handle the trailing bytes 36 - * manually after the DMA completes. 37 - * 38 - * REVISIT: It would be better if this information could be 39 - * retrieved directly from the DMA device in a similar way than 40 - * ->copy_align etc. is done. 41 - */ 42 - len = ALIGN(drv_data->len, 4); 43 - 44 32 if (dir == DMA_TO_DEVICE) { 45 33 dmadev = drv_data->tx_chan->device->dev; 46 34 sgt = &drv_data->tx_sgt; ··· 132 144 if (!error) { 133 145 pxa2xx_spi_unmap_dma_buffers(drv_data); 134 146 135 - /* Handle the last bytes of unaligned transfer */ 136 147 drv_data->tx += drv_data->tx_map_len; 137 - drv_data->write(drv_data); 138 - 139 148 drv_data->rx += drv_data->rx_map_len; 140 - drv_data->read(drv_data); 141 149 142 150 msg->actual_length += drv_data->len; 143 151 msg->state = pxa2xx_spi_next_transfer(drv_data);
+1 -1
drivers/spi/spi-qup.c
··· 734 734 int ret; 735 735 736 736 ret = pm_runtime_get_sync(&pdev->dev); 737 - if (ret) 737 + if (ret < 0) 738 738 return ret; 739 739 740 740 ret = spi_qup_set_state(controller, QUP_STATE_RESET);
+77 -47
drivers/spi/spi.c
··· 580 580 spi->master->set_cs(spi, !enable); 581 581 } 582 582 583 + #ifdef CONFIG_HAS_DMA 583 584 static int spi_map_buf(struct spi_master *master, struct device *dev, 584 585 struct sg_table *sgt, void *buf, size_t len, 585 586 enum dma_data_direction dir) ··· 638 637 } 639 638 } 640 639 641 - static int spi_map_msg(struct spi_master *master, struct spi_message *msg) 640 + static int __spi_map_msg(struct spi_master *master, struct spi_message *msg) 642 641 { 643 642 struct device *tx_dev, *rx_dev; 644 643 struct spi_transfer *xfer; 645 - void *tmp; 646 - unsigned int max_tx, max_rx; 647 644 int ret; 648 - 649 - if (master->flags & (SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX)) { 650 - max_tx = 0; 651 - max_rx = 0; 652 - 653 - list_for_each_entry(xfer, &msg->transfers, transfer_list) { 654 - if ((master->flags & SPI_MASTER_MUST_TX) && 655 - !xfer->tx_buf) 656 - max_tx = max(xfer->len, max_tx); 657 - if ((master->flags & SPI_MASTER_MUST_RX) && 658 - !xfer->rx_buf) 659 - max_rx = max(xfer->len, max_rx); 660 - } 661 - 662 - if (max_tx) { 663 - tmp = krealloc(master->dummy_tx, max_tx, 664 - GFP_KERNEL | GFP_DMA); 665 - if (!tmp) 666 - return -ENOMEM; 667 - master->dummy_tx = tmp; 668 - memset(tmp, 0, max_tx); 669 - } 670 - 671 - if (max_rx) { 672 - tmp = krealloc(master->dummy_rx, max_rx, 673 - GFP_KERNEL | GFP_DMA); 674 - if (!tmp) 675 - return -ENOMEM; 676 - master->dummy_rx = tmp; 677 - } 678 - 679 - if (max_tx || max_rx) { 680 - list_for_each_entry(xfer, &msg->transfers, 681 - transfer_list) { 682 - if (!xfer->tx_buf) 683 - xfer->tx_buf = master->dummy_tx; 684 - if (!xfer->rx_buf) 685 - xfer->rx_buf = master->dummy_rx; 686 - } 687 - } 688 - } 689 645 690 646 if (!master->can_dma) 691 647 return 0; ··· 699 741 } 700 742 701 743 return 0; 744 + } 745 + #else /* !CONFIG_HAS_DMA */ 746 + static inline int __spi_map_msg(struct spi_master *master, 747 + struct spi_message *msg) 748 + { 749 + return 0; 750 + } 751 + 752 + static inline int spi_unmap_msg(struct spi_master *master, 753 + struct spi_message *msg) 754 + { 755 + return 0; 756 + } 757 + #endif /* !CONFIG_HAS_DMA */ 758 + 759 + static int spi_map_msg(struct spi_master *master, struct spi_message *msg) 760 + { 761 + struct spi_transfer *xfer; 762 + void *tmp; 763 + unsigned int max_tx, max_rx; 764 + 765 + if (master->flags & (SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX)) { 766 + max_tx = 0; 767 + max_rx = 0; 768 + 769 + list_for_each_entry(xfer, &msg->transfers, transfer_list) { 770 + if ((master->flags & SPI_MASTER_MUST_TX) && 771 + !xfer->tx_buf) 772 + max_tx = max(xfer->len, max_tx); 773 + if ((master->flags & SPI_MASTER_MUST_RX) && 774 + !xfer->rx_buf) 775 + max_rx = max(xfer->len, max_rx); 776 + } 777 + 778 + if (max_tx) { 779 + tmp = krealloc(master->dummy_tx, max_tx, 780 + GFP_KERNEL | GFP_DMA); 781 + if (!tmp) 782 + return -ENOMEM; 783 + master->dummy_tx = tmp; 784 + memset(tmp, 0, max_tx); 785 + } 786 + 787 + if (max_rx) { 788 + tmp = krealloc(master->dummy_rx, max_rx, 789 + GFP_KERNEL | GFP_DMA); 790 + if (!tmp) 791 + return -ENOMEM; 792 + master->dummy_rx = tmp; 793 + } 794 + 795 + if (max_tx || max_rx) { 796 + list_for_each_entry(xfer, &msg->transfers, 797 + transfer_list) { 798 + if (!xfer->tx_buf) 799 + xfer->tx_buf = master->dummy_tx; 800 + if (!xfer->rx_buf) 801 + xfer->rx_buf = master->dummy_rx; 802 + } 803 + } 804 + } 805 + 806 + return __spi_map_msg(master, msg); 702 807 } 703 808 704 809 /* ··· 1172 1151 { 1173 1152 int ret; 1174 1153 1175 - master->queued = true; 1176 1154 master->transfer = spi_queued_transfer; 1177 1155 if (!master->transfer_one_message) 1178 1156 master->transfer_one_message = spi_transfer_one_message; ··· 1182 1162 dev_err(&master->dev, "problem initializing queue\n"); 1183 1163 goto err_init_queue; 1184 1164 } 1165 + master->queued = true; 1185 1166 ret = spi_start_queue(master); 1186 1167 if (ret) { 1187 1168 dev_err(&master->dev, "problem starting queue\n"); ··· 1192 1171 return 0; 1193 1172 1194 1173 err_start_queue: 1195 - err_init_queue: 1196 1174 spi_destroy_queue(master); 1175 + err_init_queue: 1197 1176 return ret; 1198 1177 } 1199 1178 ··· 1777 1756 */ 1778 1757 int spi_setup(struct spi_device *spi) 1779 1758 { 1780 - unsigned bad_bits; 1759 + unsigned bad_bits, ugly_bits; 1781 1760 int status = 0; 1782 1761 1783 1762 /* check mode to prevent that DUAL and QUAD set at the same time ··· 1797 1776 * that aren't supported with their current master 1798 1777 */ 1799 1778 bad_bits = spi->mode & ~spi->master->mode_bits; 1779 + ugly_bits = bad_bits & 1780 + (SPI_TX_DUAL | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD); 1781 + if (ugly_bits) { 1782 + dev_warn(&spi->dev, 1783 + "setup: ignoring unsupported mode bits %x\n", 1784 + ugly_bits); 1785 + spi->mode &= ~ugly_bits; 1786 + bad_bits &= ~ugly_bits; 1787 + } 1800 1788 if (bad_bits) { 1801 1789 dev_err(&spi->dev, "setup: unsupported mode bits %x\n", 1802 1790 bad_bits);
+6 -1
drivers/staging/imx-drm/imx-drm-core.c
··· 517 517 of_node_put(port); 518 518 if (port == imx_crtc->port) { 519 519 ret = of_graph_parse_endpoint(ep, &endpoint); 520 - return ret ? ret : endpoint.id; 520 + return ret ? ret : endpoint.port; 521 521 } 522 522 } while (ep); 523 523 ··· 673 673 for_each_child_of_node(port, ep) { 674 674 remote = of_graph_get_remote_port_parent(ep); 675 675 if (!remote || !of_device_is_available(remote)) { 676 + of_node_put(remote); 677 + continue; 678 + } else if (!of_device_is_available(remote->parent)) { 679 + dev_warn(&pdev->dev, "parent device of %s is not available\n", 680 + remote->full_name); 676 681 of_node_put(remote); 677 682 continue; 678 683 }
+1 -1
drivers/staging/imx-drm/imx-tve.c
··· 582 582 tve->dev = dev; 583 583 spin_lock_init(&tve->lock); 584 584 585 - ddc_node = of_parse_phandle(np, "i2c-ddc-bus", 0); 585 + ddc_node = of_parse_phandle(np, "ddc-i2c-bus", 0); 586 586 if (ddc_node) { 587 587 tve->ddc = of_find_i2c_adapter_by_node(ddc_node); 588 588 of_node_put(ddc_node);
+11 -2
drivers/staging/media/davinci_vpfe/vpfe_video.c
··· 1247 1247 struct vpfe_fh *fh = vb2_get_drv_priv(vq); 1248 1248 struct vpfe_video_device *video = fh->video; 1249 1249 1250 - if (!vb2_is_streaming(vq)) 1251 - return 0; 1252 1250 /* release all active buffers */ 1251 + if (video->cur_frm == video->next_frm) { 1252 + vb2_buffer_done(&video->cur_frm->vb, VB2_BUF_STATE_ERROR); 1253 + } else { 1254 + if (video->cur_frm != NULL) 1255 + vb2_buffer_done(&video->cur_frm->vb, 1256 + VB2_BUF_STATE_ERROR); 1257 + if (video->next_frm != NULL) 1258 + vb2_buffer_done(&video->next_frm->vb, 1259 + VB2_BUF_STATE_ERROR); 1260 + } 1261 + 1253 1262 while (!list_empty(&video->dma_queue)) { 1254 1263 video->next_frm = list_entry(video->dma_queue.next, 1255 1264 struct vpfe_cap_buffer, list);
-2
drivers/staging/media/sn9c102/sn9c102_devtable.h
··· 48 48 { SN9C102_USB_DEVICE(0x0c45, 0x600d, BRIDGE_SN9C102), }, 49 49 /* { SN9C102_USB_DEVICE(0x0c45, 0x6011, BRIDGE_SN9C102), }, OV6650 */ 50 50 { SN9C102_USB_DEVICE(0x0c45, 0x6019, BRIDGE_SN9C102), }, 51 - #endif 52 51 { SN9C102_USB_DEVICE(0x0c45, 0x6024, BRIDGE_SN9C102), }, 53 52 { SN9C102_USB_DEVICE(0x0c45, 0x6025, BRIDGE_SN9C102), }, 54 - #if !defined CONFIG_USB_GSPCA_SONIXB && !defined CONFIG_USB_GSPCA_SONIXB_MODULE 55 53 { SN9C102_USB_DEVICE(0x0c45, 0x6028, BRIDGE_SN9C102), }, 56 54 { SN9C102_USB_DEVICE(0x0c45, 0x6029, BRIDGE_SN9C102), }, 57 55 { SN9C102_USB_DEVICE(0x0c45, 0x602a, BRIDGE_SN9C102), },
-2
drivers/staging/rtl8723au/os_dep/os_intfs.c
··· 953 953 #endif /* CONFIG_8723AU_P2P */ 954 954 955 955 rtw_scan_abort23a(padapter); 956 - /* set this at the end */ 957 - padapter->rtw_wdev->iftype = NL80211_IFTYPE_MONITOR; 958 956 959 957 RT_TRACE(_module_os_intfs_c_, _drv_info_, ("-871x_drv - drv_close\n")); 960 958 DBG_8723A("-871x_drv - drv_close, bup =%d\n", padapter->bup);
+1 -1
drivers/staging/rtl8723au/os_dep/usb_ops_linux.c
··· 26 26 if (addr == RECV_BULK_IN_ADDR) { 27 27 pipe = usb_rcvbulkpipe(pusbd, pdvobj->RtInPipe[0]); 28 28 } else if (addr == RECV_INT_IN_ADDR) { 29 - pipe = usb_rcvbulkpipe(pusbd, pdvobj->RtInPipe[1]); 29 + pipe = usb_rcvintpipe(pusbd, pdvobj->RtInPipe[1]); 30 30 } else if (addr < HW_QUEUE_ENTRY) { 31 31 ep_num = pdvobj->Queue2Pipe[addr]; 32 32 pipe = usb_sndbulkpipe(pusbd, ep_num);
+3 -1
drivers/target/iscsi/iscsi_target.c
··· 1593 1593 * Initiator is expecting a NopIN ping reply.. 1594 1594 */ 1595 1595 if (hdr->itt != RESERVED_ITT) { 1596 - BUG_ON(!cmd); 1596 + if (!cmd) 1597 + return iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR, 1598 + (unsigned char *)hdr); 1597 1599 1598 1600 spin_lock_bh(&conn->cmd_lock); 1599 1601 list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list);
+1
drivers/target/iscsi/iscsi_target_core.h
··· 775 775 int np_ip_proto; 776 776 int np_sock_type; 777 777 enum np_thread_state_table np_thread_state; 778 + bool enabled; 778 779 enum iscsi_timer_flags_table np_login_timer_flags; 779 780 u32 np_exports; 780 781 enum np_flags_table np_flags;
+27 -1
drivers/target/iscsi/iscsi_target_login.c
··· 436 436 } 437 437 off = mrdsl % PAGE_SIZE; 438 438 if (!off) 439 - return 0; 439 + goto check_prot; 440 440 441 441 if (mrdsl < PAGE_SIZE) 442 442 mrdsl = PAGE_SIZE; ··· 451 451 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, 452 452 ISCSI_LOGIN_STATUS_NO_RESOURCES); 453 453 return -1; 454 + } 455 + /* 456 + * ISER currently requires that ImmediateData + Unsolicited 457 + * Data be disabled when protection / signature MRs are enabled. 458 + */ 459 + check_prot: 460 + if (sess->se_sess->sup_prot_ops & 461 + (TARGET_PROT_DOUT_STRIP | TARGET_PROT_DOUT_PASS | 462 + TARGET_PROT_DOUT_INSERT)) { 463 + 464 + sprintf(buf, "ImmediateData=No"); 465 + if (iscsi_change_param_value(buf, conn->param_list, 0) < 0) { 466 + iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, 467 + ISCSI_LOGIN_STATUS_NO_RESOURCES); 468 + return -1; 469 + } 470 + 471 + sprintf(buf, "InitialR2T=Yes"); 472 + if (iscsi_change_param_value(buf, conn->param_list, 0) < 0) { 473 + iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, 474 + ISCSI_LOGIN_STATUS_NO_RESOURCES); 475 + return -1; 476 + } 477 + pr_debug("Forcing ImmediateData=No + InitialR2T=Yes for" 478 + " T10-PI enabled ISER session\n"); 454 479 } 455 480 } 456 481 ··· 1009 984 } 1010 985 1011 986 np->np_transport = t; 987 + np->enabled = true; 1012 988 return 0; 1013 989 } 1014 990
+1
drivers/target/iscsi/iscsi_target_tpg.c
··· 184 184 return; 185 185 } 186 186 187 + tpg_np->tpg_np->enabled = false; 187 188 iscsit_reset_np_thread(tpg_np->tpg_np, tpg_np, tpg, shutdown); 188 189 } 189 190
+8 -4
drivers/target/target_core_device.c
··· 798 798 pr_err("emulate_write_cache not supported for pSCSI\n"); 799 799 return -EINVAL; 800 800 } 801 - if (dev->transport->get_write_cache) { 802 - pr_warn("emulate_write_cache cannot be changed when underlying" 803 - " HW reports WriteCacheEnabled, ignoring request\n"); 804 - return 0; 801 + if (flag && 802 + dev->transport->get_write_cache) { 803 + pr_err("emulate_write_cache not supported for this device\n"); 804 + return -EINVAL; 805 805 } 806 806 807 807 dev->dev_attrib.emulate_write_cache = flag; ··· 936 936 return 0; 937 937 } 938 938 if (!dev->transport->init_prot || !dev->transport->free_prot) { 939 + /* 0 is only allowed value for non-supporting backends */ 940 + if (flag == 0) 941 + return 0; 942 + 939 943 pr_err("DIF protection not supported by backend: %s\n", 940 944 dev->transport->name); 941 945 return -ENOSYS;
+1 -1
drivers/target/target_core_transport.c
··· 1113 1113 init_completion(&cmd->cmd_wait_comp); 1114 1114 init_completion(&cmd->task_stop_comp); 1115 1115 spin_lock_init(&cmd->t_state_lock); 1116 + kref_init(&cmd->cmd_kref); 1116 1117 cmd->transport_state = CMD_T_DEV_ACTIVE; 1117 1118 1118 1119 cmd->se_tfo = tfo; ··· 2358 2357 unsigned long flags; 2359 2358 int ret = 0; 2360 2359 2361 - kref_init(&se_cmd->cmd_kref); 2362 2360 /* 2363 2361 * Add a second kref if the fabric caller is expecting to handle 2364 2362 * fabric acknowledgement that requires two target_put_sess_cmd()
+4 -4
drivers/target/tcm_fc/tfc_cmd.c
··· 90 90 { 91 91 struct fc_frame *fp; 92 92 struct fc_lport *lport; 93 - struct se_session *se_sess; 93 + struct ft_sess *sess; 94 94 95 95 if (!cmd) 96 96 return; 97 - se_sess = cmd->sess->se_sess; 97 + sess = cmd->sess; 98 98 fp = cmd->req_frame; 99 99 lport = fr_dev(fp); 100 100 if (fr_seq(fp)) 101 101 lport->tt.seq_release(fr_seq(fp)); 102 102 fc_frame_free(fp); 103 - percpu_ida_free(&se_sess->sess_tag_pool, cmd->se_cmd.map_tag); 104 - ft_sess_put(cmd->sess); /* undo get from lookup at recv */ 103 + percpu_ida_free(&sess->se_sess->sess_tag_pool, cmd->se_cmd.map_tag); 104 + ft_sess_put(sess); /* undo get from lookup at recv */ 105 105 } 106 106 107 107 void ft_release_cmd(struct se_cmd *se_cmd)
+30 -11
drivers/xen/events/events_fifo.c
··· 66 66 static event_word_t *event_array[MAX_EVENT_ARRAY_PAGES] __read_mostly; 67 67 static unsigned event_array_pages __read_mostly; 68 68 69 + /* 70 + * sync_set_bit() and friends must be unsigned long aligned on non-x86 71 + * platforms. 72 + */ 73 + #if !defined(CONFIG_X86) && BITS_PER_LONG > 32 74 + 75 + #define BM(w) (unsigned long *)((unsigned long)w & ~0x7UL) 76 + #define EVTCHN_FIFO_BIT(b, w) \ 77 + (((unsigned long)w & 0x4UL) ? (EVTCHN_FIFO_ ##b + 32) : EVTCHN_FIFO_ ##b) 78 + 79 + #else 80 + 69 81 #define BM(w) ((unsigned long *)(w)) 82 + #define EVTCHN_FIFO_BIT(b, w) EVTCHN_FIFO_ ##b 83 + 84 + #endif 70 85 71 86 static inline event_word_t *event_word_from_port(unsigned port) 72 87 { ··· 176 161 static void evtchn_fifo_clear_pending(unsigned port) 177 162 { 178 163 event_word_t *word = event_word_from_port(port); 179 - sync_clear_bit(EVTCHN_FIFO_PENDING, BM(word)); 164 + sync_clear_bit(EVTCHN_FIFO_BIT(PENDING, word), BM(word)); 180 165 } 181 166 182 167 static void evtchn_fifo_set_pending(unsigned port) 183 168 { 184 169 event_word_t *word = event_word_from_port(port); 185 - sync_set_bit(EVTCHN_FIFO_PENDING, BM(word)); 170 + sync_set_bit(EVTCHN_FIFO_BIT(PENDING, word), BM(word)); 186 171 } 187 172 188 173 static bool evtchn_fifo_is_pending(unsigned port) 189 174 { 190 175 event_word_t *word = event_word_from_port(port); 191 - return sync_test_bit(EVTCHN_FIFO_PENDING, BM(word)); 176 + return sync_test_bit(EVTCHN_FIFO_BIT(PENDING, word), BM(word)); 192 177 } 193 178 194 179 static bool evtchn_fifo_test_and_set_mask(unsigned port) 195 180 { 196 181 event_word_t *word = event_word_from_port(port); 197 - return sync_test_and_set_bit(EVTCHN_FIFO_MASKED, BM(word)); 182 + return sync_test_and_set_bit(EVTCHN_FIFO_BIT(MASKED, word), BM(word)); 198 183 } 199 184 200 185 static void evtchn_fifo_mask(unsigned port) 201 186 { 202 187 event_word_t *word = event_word_from_port(port); 203 - sync_set_bit(EVTCHN_FIFO_MASKED, BM(word)); 188 + sync_set_bit(EVTCHN_FIFO_BIT(MASKED, word), BM(word)); 204 189 } 205 190 191 + static bool evtchn_fifo_is_masked(unsigned port) 192 + { 193 + event_word_t *word = event_word_from_port(port); 194 + return sync_test_bit(EVTCHN_FIFO_BIT(MASKED, word), BM(word)); 195 + } 206 196 /* 207 197 * Clear MASKED, spinning if BUSY is set. 208 198 */ ··· 231 211 BUG_ON(!irqs_disabled()); 232 212 233 213 clear_masked(word); 234 - if (sync_test_bit(EVTCHN_FIFO_PENDING, BM(word))) { 214 + if (evtchn_fifo_is_pending(port)) { 235 215 struct evtchn_unmask unmask = { .port = port }; 236 216 (void)HYPERVISOR_event_channel_op(EVTCHNOP_unmask, &unmask); 237 217 } ··· 263 243 264 244 static void consume_one_event(unsigned cpu, 265 245 struct evtchn_fifo_control_block *control_block, 266 - unsigned priority, uint32_t *ready) 246 + unsigned priority, unsigned long *ready) 267 247 { 268 248 struct evtchn_fifo_queue *q = &per_cpu(cpu_queue, cpu); 269 249 uint32_t head; ··· 293 273 * copy of the ready word. 294 274 */ 295 275 if (head == 0) 296 - clear_bit(priority, BM(ready)); 276 + clear_bit(priority, ready); 297 277 298 - if (sync_test_bit(EVTCHN_FIFO_PENDING, BM(word)) 299 - && !sync_test_bit(EVTCHN_FIFO_MASKED, BM(word))) 278 + if (evtchn_fifo_is_pending(port) && !evtchn_fifo_is_masked(port)) 300 279 handle_irq_for_port(port); 301 280 302 281 q->head[priority] = head; ··· 304 285 static void evtchn_fifo_handle_events(unsigned cpu) 305 286 { 306 287 struct evtchn_fifo_control_block *control_block; 307 - uint32_t ready; 288 + unsigned long ready; 308 289 unsigned q; 309 290 310 291 control_block = per_cpu(cpu_control_block, cpu);
+5 -1
fs/btrfs/ioctl.c
··· 3120 3120 } else if (type == BTRFS_FILE_EXTENT_INLINE) { 3121 3121 u64 skip = 0; 3122 3122 u64 trim = 0; 3123 + u64 aligned_end = 0; 3124 + 3123 3125 if (off > key.offset) { 3124 3126 skip = off - key.offset; 3125 3127 new_key.offset += skip; ··· 3138 3136 size -= skip + trim; 3139 3137 datal -= skip + trim; 3140 3138 3139 + aligned_end = ALIGN(new_key.offset + datal, 3140 + root->sectorsize); 3141 3141 ret = btrfs_drop_extents(trans, root, inode, 3142 3142 new_key.offset, 3143 - new_key.offset + datal, 3143 + aligned_end, 3144 3144 1); 3145 3145 if (ret) { 3146 3146 if (ret != -EOPNOTSUPP)
+1 -1
fs/btrfs/send.c
··· 1668 1668 goto out; 1669 1669 } 1670 1670 1671 - if (key.type == BTRFS_INODE_REF_KEY) { 1671 + if (found_key.type == BTRFS_INODE_REF_KEY) { 1672 1672 struct btrfs_inode_ref *iref; 1673 1673 iref = btrfs_item_ptr(path->nodes[0], path->slots[0], 1674 1674 struct btrfs_inode_ref);
+3
fs/cifs/inode.c
··· 1737 1737 if (cifs_i->time == 0) 1738 1738 return true; 1739 1739 1740 + if (!cifs_sb->actimeo) 1741 + return true; 1742 + 1740 1743 if (!time_in_range(jiffies, cifs_i->time, 1741 1744 cifs_i->time + cifs_sb->actimeo)) 1742 1745 return true;
+3 -3
fs/exec.c
··· 657 657 unsigned long rlim_stack; 658 658 659 659 #ifdef CONFIG_STACK_GROWSUP 660 - /* Limit stack size to 1GB */ 660 + /* Limit stack size */ 661 661 stack_base = rlimit_max(RLIMIT_STACK); 662 - if (stack_base > (1 << 30)) 663 - stack_base = 1 << 30; 662 + if (stack_base > STACK_SIZE_MAX) 663 + stack_base = STACK_SIZE_MAX; 664 664 665 665 /* Make sure we didn't let the argument array grow too large. */ 666 666 if (vma->vm_end - vma->vm_start > stack_base)
+10 -7
fs/kernfs/file.c
··· 610 610 static int kernfs_fop_open(struct inode *inode, struct file *file) 611 611 { 612 612 struct kernfs_node *kn = file->f_path.dentry->d_fsdata; 613 + struct kernfs_root *root = kernfs_root(kn); 613 614 const struct kernfs_ops *ops; 614 615 struct kernfs_open_file *of; 615 616 bool has_read, has_write, has_mmap; ··· 625 624 has_write = ops->write || ops->mmap; 626 625 has_mmap = ops->mmap; 627 626 628 - /* check perms and supported operations */ 629 - if ((file->f_mode & FMODE_WRITE) && 630 - (!(inode->i_mode & S_IWUGO) || !has_write)) 631 - goto err_out; 627 + /* see the flag definition for details */ 628 + if (root->flags & KERNFS_ROOT_EXTRA_OPEN_PERM_CHECK) { 629 + if ((file->f_mode & FMODE_WRITE) && 630 + (!(inode->i_mode & S_IWUGO) || !has_write)) 631 + goto err_out; 632 632 633 - if ((file->f_mode & FMODE_READ) && 634 - (!(inode->i_mode & S_IRUGO) || !has_read)) 635 - goto err_out; 633 + if ((file->f_mode & FMODE_READ) && 634 + (!(inode->i_mode & S_IRUGO) || !has_read)) 635 + goto err_out; 636 + } 636 637 637 638 /* allocate a kernfs_open_file for the file */ 638 639 error = -ENOMEM;
+24 -12
fs/locks.c
··· 389 389 fl->fl_ops = NULL; 390 390 fl->fl_lmops = NULL; 391 391 392 - /* Ensure that fl->fl_filp has compatible f_mode */ 393 - switch (l->l_type) { 394 - case F_RDLCK: 395 - if (!(filp->f_mode & FMODE_READ)) 396 - return -EBADF; 397 - break; 398 - case F_WRLCK: 399 - if (!(filp->f_mode & FMODE_WRITE)) 400 - return -EBADF; 401 - break; 402 - } 403 - 404 392 return assign_type(fl, l->l_type); 405 393 } 406 394 ··· 2022 2034 return error; 2023 2035 } 2024 2036 2037 + /* Ensure that fl->fl_filp has compatible f_mode for F_SETLK calls */ 2038 + static int 2039 + check_fmode_for_setlk(struct file_lock *fl) 2040 + { 2041 + switch (fl->fl_type) { 2042 + case F_RDLCK: 2043 + if (!(fl->fl_file->f_mode & FMODE_READ)) 2044 + return -EBADF; 2045 + break; 2046 + case F_WRLCK: 2047 + if (!(fl->fl_file->f_mode & FMODE_WRITE)) 2048 + return -EBADF; 2049 + } 2050 + return 0; 2051 + } 2052 + 2025 2053 /* Apply the lock described by l to an open file descriptor. 2026 2054 * This implements both the F_SETLK and F_SETLKW commands of fcntl(). 2027 2055 */ ··· 2072 2068 2073 2069 again: 2074 2070 error = flock_to_posix_lock(filp, file_lock, &flock); 2071 + if (error) 2072 + goto out; 2073 + 2074 + error = check_fmode_for_setlk(file_lock); 2075 2075 if (error) 2076 2076 goto out; 2077 2077 ··· 2211 2203 2212 2204 again: 2213 2205 error = flock64_to_posix_lock(filp, file_lock, &flock); 2206 + if (error) 2207 + goto out; 2208 + 2209 + error = check_fmode_for_setlk(file_lock); 2214 2210 if (error) 2215 2211 goto out; 2216 2212
+9 -8
fs/nfsd/nfs4acl.c
··· 402 402 * by uid/gid. */ 403 403 int i, j; 404 404 405 - if (pacl->a_count <= 4) 406 - return; /* no users or groups */ 405 + /* no users or groups */ 406 + if (!pacl || pacl->a_count <= 4) 407 + return; 408 + 407 409 i = 1; 408 410 while (pacl->a_entries[i].e_tag == ACL_USER) 409 411 i++; ··· 532 530 533 531 /* 534 532 * ACLs with no ACEs are treated differently in the inheritable 535 - * and effective cases: when there are no inheritable ACEs, we 536 - * set a zero-length default posix acl: 533 + * and effective cases: when there are no inheritable ACEs, 534 + * calls ->set_acl with a NULL ACL structure. 537 535 */ 538 - if (state->empty && (flags & NFS4_ACL_TYPE_DEFAULT)) { 539 - pacl = posix_acl_alloc(0, GFP_KERNEL); 540 - return pacl ? pacl : ERR_PTR(-ENOMEM); 541 - } 536 + if (state->empty && (flags & NFS4_ACL_TYPE_DEFAULT)) 537 + return NULL; 538 + 542 539 /* 543 540 * When there are no effective ACEs, the following will end 544 541 * up setting a 3-element effective posix ACL with all
+13 -12
fs/nfsd/nfs4state.c
··· 1078 1078 return NULL; 1079 1079 } 1080 1080 clp->cl_name.len = name.len; 1081 + INIT_LIST_HEAD(&clp->cl_sessions); 1082 + idr_init(&clp->cl_stateids); 1083 + atomic_set(&clp->cl_refcount, 0); 1084 + clp->cl_cb_state = NFSD4_CB_UNKNOWN; 1085 + INIT_LIST_HEAD(&clp->cl_idhash); 1086 + INIT_LIST_HEAD(&clp->cl_openowners); 1087 + INIT_LIST_HEAD(&clp->cl_delegations); 1088 + INIT_LIST_HEAD(&clp->cl_lru); 1089 + INIT_LIST_HEAD(&clp->cl_callbacks); 1090 + INIT_LIST_HEAD(&clp->cl_revoked); 1091 + spin_lock_init(&clp->cl_lock); 1092 + rpc_init_wait_queue(&clp->cl_cb_waitq, "Backchannel slot table"); 1081 1093 return clp; 1082 1094 } 1083 1095 ··· 1107 1095 WARN_ON_ONCE(atomic_read(&ses->se_ref)); 1108 1096 free_session(ses); 1109 1097 } 1098 + rpc_destroy_wait_queue(&clp->cl_cb_waitq); 1110 1099 free_svc_cred(&clp->cl_cred); 1111 1100 kfree(clp->cl_name.data); 1112 1101 idr_destroy(&clp->cl_stateids); ··· 1360 1347 if (clp == NULL) 1361 1348 return NULL; 1362 1349 1363 - INIT_LIST_HEAD(&clp->cl_sessions); 1364 1350 ret = copy_cred(&clp->cl_cred, &rqstp->rq_cred); 1365 1351 if (ret) { 1366 1352 spin_lock(&nn->client_lock); ··· 1367 1355 spin_unlock(&nn->client_lock); 1368 1356 return NULL; 1369 1357 } 1370 - idr_init(&clp->cl_stateids); 1371 - atomic_set(&clp->cl_refcount, 0); 1372 - clp->cl_cb_state = NFSD4_CB_UNKNOWN; 1373 - INIT_LIST_HEAD(&clp->cl_idhash); 1374 - INIT_LIST_HEAD(&clp->cl_openowners); 1375 - INIT_LIST_HEAD(&clp->cl_delegations); 1376 - INIT_LIST_HEAD(&clp->cl_lru); 1377 - INIT_LIST_HEAD(&clp->cl_callbacks); 1378 - INIT_LIST_HEAD(&clp->cl_revoked); 1379 - spin_lock_init(&clp->cl_lock); 1380 1358 nfsd4_init_callback(&clp->cl_cb_null); 1381 1359 clp->cl_time = get_seconds(); 1382 1360 clear_bit(0, &clp->cl_cb_slot_busy); 1383 - rpc_init_wait_queue(&clp->cl_cb_waitq, "Backchannel slot table"); 1384 1361 copy_verf(clp, verf); 1385 1362 rpc_copy_addr((struct sockaddr *) &clp->cl_addr, sa); 1386 1363 gen_confirm(clp);
+2 -1
fs/sysfs/file.c
··· 47 47 ssize_t count; 48 48 char *buf; 49 49 50 - /* acquire buffer and ensure that it's >= PAGE_SIZE */ 50 + /* acquire buffer and ensure that it's >= PAGE_SIZE and clear */ 51 51 count = seq_get_buf(sf, &buf); 52 52 if (count < PAGE_SIZE) { 53 53 seq_commit(sf, -1); 54 54 return 0; 55 55 } 56 + memset(buf, 0, PAGE_SIZE); 56 57 57 58 /* 58 59 * Invoke show(). Control may reach here via seq file lseek even
+2 -1
fs/sysfs/mount.c
··· 63 63 { 64 64 int err; 65 65 66 - sysfs_root = kernfs_create_root(NULL, 0, NULL); 66 + sysfs_root = kernfs_create_root(NULL, KERNFS_ROOT_EXTRA_OPEN_PERM_CHECK, 67 + NULL); 67 68 if (IS_ERR(sysfs_root)) 68 69 return PTR_ERR(sysfs_root); 69 70
+1 -1
fs/xfs/xfs_export.c
··· 237 237 238 238 if (!lsn) 239 239 return 0; 240 - return _xfs_log_force_lsn(mp, lsn, XFS_LOG_SYNC, NULL); 240 + return -_xfs_log_force_lsn(mp, lsn, XFS_LOG_SYNC, NULL); 241 241 } 242 242 243 243 const struct export_operations xfs_export_operations = {
+4 -4
fs/xfs/xfs_file.c
··· 155 155 156 156 if (!lsn) 157 157 return 0; 158 - return _xfs_log_force_lsn(mp, lsn, XFS_LOG_SYNC, NULL); 158 + return -_xfs_log_force_lsn(mp, lsn, XFS_LOG_SYNC, NULL); 159 159 } 160 160 161 161 STATIC int ··· 295 295 xfs_rw_ilock(ip, XFS_IOLOCK_EXCL); 296 296 297 297 if (inode->i_mapping->nrpages) { 298 - ret = -filemap_write_and_wait_range( 298 + ret = filemap_write_and_wait_range( 299 299 VFS_I(ip)->i_mapping, 300 300 pos, -1); 301 301 if (ret) { ··· 837 837 unsigned blksize_mask = (1 << inode->i_blkbits) - 1; 838 838 839 839 if (offset & blksize_mask || len & blksize_mask) { 840 - error = -EINVAL; 840 + error = EINVAL; 841 841 goto out_unlock; 842 842 } 843 843 ··· 846 846 * in which case it is effectively a truncate operation 847 847 */ 848 848 if (offset + len >= i_size_read(inode)) { 849 - error = -EINVAL; 849 + error = EINVAL; 850 850 goto out_unlock; 851 851 } 852 852
+6 -6
fs/xfs/xfs_iops.c
··· 72 72 int error = 0; 73 73 74 74 for (xattr = xattr_array; xattr->name != NULL; xattr++) { 75 - error = xfs_attr_set(ip, xattr->name, xattr->value, 76 - xattr->value_len, ATTR_SECURE); 75 + error = -xfs_attr_set(ip, xattr->name, xattr->value, 76 + xattr->value_len, ATTR_SECURE); 77 77 if (error < 0) 78 78 break; 79 79 } ··· 93 93 struct inode *dir, 94 94 const struct qstr *qstr) 95 95 { 96 - return security_inode_init_security(inode, dir, qstr, 97 - &xfs_initxattrs, NULL); 96 + return -security_inode_init_security(inode, dir, qstr, 97 + &xfs_initxattrs, NULL); 98 98 } 99 99 100 100 static void ··· 173 173 174 174 #ifdef CONFIG_XFS_POSIX_ACL 175 175 if (default_acl) { 176 - error = xfs_set_acl(inode, default_acl, ACL_TYPE_DEFAULT); 176 + error = -xfs_set_acl(inode, default_acl, ACL_TYPE_DEFAULT); 177 177 if (error) 178 178 goto out_cleanup_inode; 179 179 } 180 180 if (acl) { 181 - error = xfs_set_acl(inode, acl, ACL_TYPE_ACCESS); 181 + error = -xfs_set_acl(inode, acl, ACL_TYPE_ACCESS); 182 182 if (error) 183 183 goto out_cleanup_inode; 184 184 }
+14 -12
fs/xfs/xfs_qm.c
··· 843 843 844 844 qinf = mp->m_quotainfo = kmem_zalloc(sizeof(xfs_quotainfo_t), KM_SLEEP); 845 845 846 - if ((error = list_lru_init(&qinf->qi_lru))) { 847 - kmem_free(qinf); 848 - mp->m_quotainfo = NULL; 849 - return error; 850 - } 846 + error = -list_lru_init(&qinf->qi_lru); 847 + if (error) 848 + goto out_free_qinf; 851 849 852 850 /* 853 851 * See if quotainodes are setup, and if not, allocate them, 854 852 * and change the superblock accordingly. 855 853 */ 856 - if ((error = xfs_qm_init_quotainos(mp))) { 857 - list_lru_destroy(&qinf->qi_lru); 858 - kmem_free(qinf); 859 - mp->m_quotainfo = NULL; 860 - return error; 861 - } 854 + error = xfs_qm_init_quotainos(mp); 855 + if (error) 856 + goto out_free_lru; 862 857 863 858 INIT_RADIX_TREE(&qinf->qi_uquota_tree, GFP_NOFS); 864 859 INIT_RADIX_TREE(&qinf->qi_gquota_tree, GFP_NOFS); ··· 913 918 qinf->qi_isoftlimit = be64_to_cpu(ddqp->d_ino_softlimit); 914 919 qinf->qi_rtbhardlimit = be64_to_cpu(ddqp->d_rtb_hardlimit); 915 920 qinf->qi_rtbsoftlimit = be64_to_cpu(ddqp->d_rtb_softlimit); 916 - 921 + 917 922 xfs_qm_dqdestroy(dqp); 918 923 } else { 919 924 qinf->qi_btimelimit = XFS_QM_BTIMELIMIT; ··· 930 935 qinf->qi_shrinker.flags = SHRINKER_NUMA_AWARE; 931 936 register_shrinker(&qinf->qi_shrinker); 932 937 return 0; 938 + 939 + out_free_lru: 940 + list_lru_destroy(&qinf->qi_lru); 941 + out_free_qinf: 942 + kmem_free(qinf); 943 + mp->m_quotainfo = NULL; 944 + return error; 933 945 } 934 946 935 947
+2 -2
fs/xfs/xfs_super.c
··· 1433 1433 if (error) 1434 1434 goto out_free_fsname; 1435 1435 1436 - error = xfs_init_mount_workqueues(mp); 1436 + error = -xfs_init_mount_workqueues(mp); 1437 1437 if (error) 1438 1438 goto out_close_devices; 1439 1439 1440 - error = xfs_icsb_init_counters(mp); 1440 + error = -xfs_icsb_init_counters(mp); 1441 1441 if (error) 1442 1442 goto out_destroy_workqueues; 1443 1443
+1 -1
include/asm-generic/resource.h
··· 12 12 [RLIMIT_CPU] = { RLIM_INFINITY, RLIM_INFINITY }, \ 13 13 [RLIMIT_FSIZE] = { RLIM_INFINITY, RLIM_INFINITY }, \ 14 14 [RLIMIT_DATA] = { RLIM_INFINITY, RLIM_INFINITY }, \ 15 - [RLIMIT_STACK] = { _STK_LIM, _STK_LIM_MAX }, \ 15 + [RLIMIT_STACK] = { _STK_LIM, RLIM_INFINITY }, \ 16 16 [RLIMIT_CORE] = { 0, RLIM_INFINITY }, \ 17 17 [RLIMIT_RSS] = { RLIM_INFINITY, RLIM_INFINITY }, \ 18 18 [RLIMIT_NPROC] = { 0, 0 }, \
include/dt-bindings/clk/at91.h include/dt-bindings/clock/at91.h
+15
include/linux/cgroup.h
··· 473 473 }; 474 474 475 475 extern struct cgroup_root cgrp_dfl_root; 476 + extern struct css_set init_css_set; 476 477 477 478 static inline bool cgroup_on_dfl(const struct cgroup *cgrp) 478 479 { ··· 699 698 int subsys_id) 700 699 { 701 700 return task_css_check(task, subsys_id, false); 701 + } 702 + 703 + /** 704 + * task_css_is_root - test whether a task belongs to the root css 705 + * @task: the target task 706 + * @subsys_id: the target subsystem ID 707 + * 708 + * Test whether @task belongs to the root css on the specified subsystem. 709 + * May be invoked in any context. 710 + */ 711 + static inline bool task_css_is_root(struct task_struct *task, int subsys_id) 712 + { 713 + return task_css_check(task, subsys_id, true) == 714 + init_css_set.subsys[subsys_id]; 702 715 } 703 716 704 717 static inline struct cgroup *task_cgroup(struct task_struct *task,
+3
include/linux/firewire.h
··· 367 367 return tag << 14 | channel << 8 | sy; 368 368 } 369 369 370 + void fw_schedule_bus_reset(struct fw_card *card, bool delayed, 371 + bool short_reset); 372 + 370 373 struct fw_descriptor { 371 374 struct list_head link; 372 375 size_t length;
+5
include/linux/interrupt.h
··· 272 272 return -EINVAL; 273 273 } 274 274 275 + static inline int irq_force_affinity(unsigned int irq, const struct cpumask *cpumask) 276 + { 277 + return 0; 278 + } 279 + 275 280 static inline int irq_can_set_affinity(unsigned int irq) 276 281 { 277 282 return 0;
+18 -1
include/linux/kernfs.h
··· 50 50 51 51 /* @flags for kernfs_create_root() */ 52 52 enum kernfs_root_flag { 53 - KERNFS_ROOT_CREATE_DEACTIVATED = 0x0001, 53 + /* 54 + * kernfs_nodes are created in the deactivated state and invisible. 55 + * They require explicit kernfs_activate() to become visible. This 56 + * can be used to make related nodes become visible atomically 57 + * after all nodes are created successfully. 58 + */ 59 + KERNFS_ROOT_CREATE_DEACTIVATED = 0x0001, 60 + 61 + /* 62 + * For regular flies, if the opener has CAP_DAC_OVERRIDE, open(2) 63 + * succeeds regardless of the RW permissions. sysfs had an extra 64 + * layer of enforcement where open(2) fails with -EACCES regardless 65 + * of CAP_DAC_OVERRIDE if the permission doesn't have the 66 + * respective read or write access at all (none of S_IRUGO or 67 + * S_IWUGO) or the respective operation isn't implemented. The 68 + * following flag enables that behavior. 69 + */ 70 + KERNFS_ROOT_EXTRA_OPEN_PERM_CHECK = 0x0002, 54 71 }; 55 72 56 73 /* type-specific structures for kernfs_node union members */
+1 -1
include/linux/of.h
··· 349 349 350 350 #else /* CONFIG_OF */ 351 351 352 - static inline const char* of_node_full_name(struct device_node *np) 352 + static inline const char* of_node_full_name(const struct device_node *np) 353 353 { 354 354 return "<no-node>"; 355 355 }
-7
include/uapi/asm-generic/resource.h
··· 57 57 # define RLIM_INFINITY (~0UL) 58 58 #endif 59 59 60 - /* 61 - * RLIMIT_STACK default maximum - some architectures override it: 62 - */ 63 - #ifndef _STK_LIM_MAX 64 - # define _STK_LIM_MAX RLIM_INFINITY 65 - #endif 66 - 67 60 68 61 #endif /* _UAPI_ASM_GENERIC_RESOURCE_H */
+3 -1
include/uapi/asm-generic/unistd.h
··· 697 697 __SYSCALL(__NR_sched_setattr, sys_sched_setattr) 698 698 #define __NR_sched_getattr 275 699 699 __SYSCALL(__NR_sched_getattr, sys_sched_getattr) 700 + #define __NR_renameat2 276 701 + __SYSCALL(__NR_renameat2, sys_renameat2) 700 702 701 703 #undef __NR_syscalls 702 - #define __NR_syscalls 276 704 + #define __NR_syscalls 277 703 705 704 706 /* 705 707 * All syscalls below here should go away really,
+12
include/uapi/linux/audit.h
··· 331 331 #define AUDIT_FAIL_PRINTK 1 332 332 #define AUDIT_FAIL_PANIC 2 333 333 334 + /* 335 + * These bits disambiguate different calling conventions that share an 336 + * ELF machine type, bitness, and endianness 337 + */ 338 + #define __AUDIT_ARCH_CONVENTION_MASK 0x30000000 339 + #define __AUDIT_ARCH_CONVENTION_MIPS64_N32 0x20000000 340 + 334 341 /* distinguish syscall tables */ 335 342 #define __AUDIT_ARCH_64BIT 0x80000000 336 343 #define __AUDIT_ARCH_LE 0x40000000 344 + 337 345 #define AUDIT_ARCH_ALPHA (EM_ALPHA|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE) 338 346 #define AUDIT_ARCH_ARM (EM_ARM|__AUDIT_ARCH_LE) 339 347 #define AUDIT_ARCH_ARMEB (EM_ARM) ··· 354 346 #define AUDIT_ARCH_MIPS (EM_MIPS) 355 347 #define AUDIT_ARCH_MIPSEL (EM_MIPS|__AUDIT_ARCH_LE) 356 348 #define AUDIT_ARCH_MIPS64 (EM_MIPS|__AUDIT_ARCH_64BIT) 349 + #define AUDIT_ARCH_MIPS64N32 (EM_MIPS|__AUDIT_ARCH_64BIT|\ 350 + __AUDIT_ARCH_CONVENTION_MIPS64_N32) 357 351 #define AUDIT_ARCH_MIPSEL64 (EM_MIPS|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE) 352 + #define AUDIT_ARCH_MIPSEL64N32 (EM_MIPS|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE\ 353 + __AUDIT_ARCH_CONVENTION_MIPS64_N32) 358 354 #define AUDIT_ARCH_OPENRISC (EM_OPENRISC) 359 355 #define AUDIT_ARCH_PARISC (EM_PARISC) 360 356 #define AUDIT_ARCH_PARISC64 (EM_PARISC|__AUDIT_ARCH_64BIT)
+3 -1
include/uapi/sound/asound.h
··· 94 94 SNDRV_HWDEP_IFACE_HDA, /* HD-audio */ 95 95 SNDRV_HWDEP_IFACE_USB_STREAM, /* direct access to usb stream */ 96 96 SNDRV_HWDEP_IFACE_FW_DICE, /* TC DICE FireWire device */ 97 + SNDRV_HWDEP_IFACE_FW_FIREWORKS, /* Echo Audio Fireworks based device */ 98 + SNDRV_HWDEP_IFACE_FW_BEBOB, /* BridgeCo BeBoB based device */ 97 99 98 100 /* Don't forget to change the following: */ 99 - SNDRV_HWDEP_IFACE_LAST = SNDRV_HWDEP_IFACE_FW_DICE 101 + SNDRV_HWDEP_IFACE_LAST = SNDRV_HWDEP_IFACE_FW_BEBOB 100 102 }; 101 103 102 104 struct snd_hwdep_info {
+22 -1
include/uapi/sound/firewire.h
··· 2 2 #define _UAPI_SOUND_FIREWIRE_H_INCLUDED 3 3 4 4 #include <linux/ioctl.h> 5 + #include <linux/types.h> 5 6 6 7 /* events can be read() from the hwdep device */ 7 8 8 9 #define SNDRV_FIREWIRE_EVENT_LOCK_STATUS 0x000010cc 9 10 #define SNDRV_FIREWIRE_EVENT_DICE_NOTIFICATION 0xd1ce004e 11 + #define SNDRV_FIREWIRE_EVENT_EFW_RESPONSE 0x4e617475 10 12 11 13 struct snd_firewire_event_common { 12 14 unsigned int type; /* SNDRV_FIREWIRE_EVENT_xxx */ ··· 24 22 unsigned int notification; /* DICE-specific bits */ 25 23 }; 26 24 25 + #define SND_EFW_TRANSACTION_USER_SEQNUM_MAX ((__u32)((__u16)~0) - 1) 26 + /* each field should be in big endian */ 27 + struct snd_efw_transaction { 28 + __be32 length; 29 + __be32 version; 30 + __be32 seqnum; 31 + __be32 category; 32 + __be32 command; 33 + __be32 status; 34 + __be32 params[0]; 35 + }; 36 + struct snd_firewire_event_efw_response { 37 + unsigned int type; 38 + __be32 response[0]; /* some responses */ 39 + }; 40 + 27 41 union snd_firewire_event { 28 42 struct snd_firewire_event_common common; 29 43 struct snd_firewire_event_lock_status lock_status; 30 44 struct snd_firewire_event_dice_notification dice_notification; 45 + struct snd_firewire_event_efw_response efw_response; 31 46 }; 32 47 33 48 ··· 53 34 #define SNDRV_FIREWIRE_IOCTL_UNLOCK _IO('H', 0xfa) 54 35 55 36 #define SNDRV_FIREWIRE_TYPE_DICE 1 56 - /* Fireworks, AV/C, RME, MOTU, ... */ 37 + #define SNDRV_FIREWIRE_TYPE_FIREWORKS 2 38 + #define SNDRV_FIREWIRE_TYPE_BEBOB 3 39 + /* AV/C, RME, MOTU, ... */ 57 40 58 41 struct snd_firewire_get_info { 59 42 unsigned int type; /* SNDRV_FIREWIRE_TYPE_xxx */
+5 -5
kernel/cgroup.c
··· 348 348 * reference-counted, to improve performance when child cgroups 349 349 * haven't been created. 350 350 */ 351 - static struct css_set init_css_set = { 351 + struct css_set init_css_set = { 352 352 .refcount = ATOMIC_INIT(1), 353 353 .cgrp_links = LIST_HEAD_INIT(init_css_set.cgrp_links), 354 354 .tasks = LIST_HEAD_INIT(init_css_set.tasks), ··· 1495 1495 */ 1496 1496 if (!use_task_css_set_links) 1497 1497 cgroup_enable_task_cg_lists(); 1498 - retry: 1498 + 1499 1499 mutex_lock(&cgroup_tree_mutex); 1500 1500 mutex_lock(&cgroup_mutex); 1501 1501 ··· 1503 1503 ret = parse_cgroupfs_options(data, &opts); 1504 1504 if (ret) 1505 1505 goto out_unlock; 1506 - 1506 + retry: 1507 1507 /* look for a matching existing root */ 1508 1508 if (!opts.subsys_mask && !opts.none && !opts.name) { 1509 1509 cgrp_dfl_root_visible = true; ··· 1562 1562 if (!atomic_inc_not_zero(&root->cgrp.refcnt)) { 1563 1563 mutex_unlock(&cgroup_mutex); 1564 1564 mutex_unlock(&cgroup_tree_mutex); 1565 - kfree(opts.release_agent); 1566 - kfree(opts.name); 1567 1565 msleep(10); 1566 + mutex_lock(&cgroup_tree_mutex); 1567 + mutex_lock(&cgroup_mutex); 1568 1568 goto retry; 1569 1569 } 1570 1570
+49 -67
kernel/cgroup_freezer.c
··· 21 21 #include <linux/uaccess.h> 22 22 #include <linux/freezer.h> 23 23 #include <linux/seq_file.h> 24 + #include <linux/mutex.h> 24 25 25 26 /* 26 27 * A cgroup is freezing if any FREEZING flags are set. FREEZING_SELF is ··· 43 42 struct freezer { 44 43 struct cgroup_subsys_state css; 45 44 unsigned int state; 46 - spinlock_t lock; 47 45 }; 46 + 47 + static DEFINE_MUTEX(freezer_mutex); 48 48 49 49 static inline struct freezer *css_freezer(struct cgroup_subsys_state *css) 50 50 { ··· 95 93 if (!freezer) 96 94 return ERR_PTR(-ENOMEM); 97 95 98 - spin_lock_init(&freezer->lock); 99 96 return &freezer->css; 100 97 } 101 98 ··· 111 110 struct freezer *freezer = css_freezer(css); 112 111 struct freezer *parent = parent_freezer(freezer); 113 112 114 - /* 115 - * The following double locking and freezing state inheritance 116 - * guarantee that @cgroup can never escape ancestors' freezing 117 - * states. See css_for_each_descendant_pre() for details. 118 - */ 119 - if (parent) 120 - spin_lock_irq(&parent->lock); 121 - spin_lock_nested(&freezer->lock, SINGLE_DEPTH_NESTING); 113 + mutex_lock(&freezer_mutex); 122 114 123 115 freezer->state |= CGROUP_FREEZER_ONLINE; 124 116 ··· 120 126 atomic_inc(&system_freezing_cnt); 121 127 } 122 128 123 - spin_unlock(&freezer->lock); 124 - if (parent) 125 - spin_unlock_irq(&parent->lock); 126 - 129 + mutex_unlock(&freezer_mutex); 127 130 return 0; 128 131 } 129 132 ··· 135 144 { 136 145 struct freezer *freezer = css_freezer(css); 137 146 138 - spin_lock_irq(&freezer->lock); 147 + mutex_lock(&freezer_mutex); 139 148 140 149 if (freezer->state & CGROUP_FREEZING) 141 150 atomic_dec(&system_freezing_cnt); 142 151 143 152 freezer->state = 0; 144 153 145 - spin_unlock_irq(&freezer->lock); 154 + mutex_unlock(&freezer_mutex); 146 155 } 147 156 148 157 static void freezer_css_free(struct cgroup_subsys_state *css) ··· 166 175 struct task_struct *task; 167 176 bool clear_frozen = false; 168 177 169 - spin_lock_irq(&freezer->lock); 178 + mutex_lock(&freezer_mutex); 170 179 171 180 /* 172 181 * Make the new tasks conform to the current state of @new_css. ··· 188 197 } 189 198 } 190 199 191 - spin_unlock_irq(&freezer->lock); 192 - 193 - /* 194 - * Propagate FROZEN clearing upwards. We may race with 195 - * update_if_frozen(), but as long as both work bottom-up, either 196 - * update_if_frozen() sees child's FROZEN cleared or we clear the 197 - * parent's FROZEN later. No parent w/ !FROZEN children can be 198 - * left FROZEN. 199 - */ 200 + /* propagate FROZEN clearing upwards */ 200 201 while (clear_frozen && (freezer = parent_freezer(freezer))) { 201 - spin_lock_irq(&freezer->lock); 202 202 freezer->state &= ~CGROUP_FROZEN; 203 203 clear_frozen = freezer->state & CGROUP_FREEZING; 204 - spin_unlock_irq(&freezer->lock); 205 204 } 205 + 206 + mutex_unlock(&freezer_mutex); 206 207 } 207 208 208 209 /** ··· 211 228 { 212 229 struct freezer *freezer; 213 230 214 - rcu_read_lock(); 215 - freezer = task_freezer(task); 216 - 217 231 /* 218 232 * The root cgroup is non-freezable, so we can skip locking the 219 233 * freezer. This is safe regardless of race with task migration. ··· 218 238 * to do. If we lost and root is the new cgroup, noop is still the 219 239 * right thing to do. 220 240 */ 221 - if (!parent_freezer(freezer)) 222 - goto out; 241 + if (task_css_is_root(task, freezer_cgrp_id)) 242 + return; 223 243 224 - /* 225 - * Grab @freezer->lock and freeze @task after verifying @task still 226 - * belongs to @freezer and it's freezing. The former is for the 227 - * case where we have raced against task migration and lost and 228 - * @task is already in a different cgroup which may not be frozen. 229 - * This isn't strictly necessary as freeze_task() is allowed to be 230 - * called spuriously but let's do it anyway for, if nothing else, 231 - * documentation. 232 - */ 233 - spin_lock_irq(&freezer->lock); 234 - if (freezer == task_freezer(task) && (freezer->state & CGROUP_FREEZING)) 244 + mutex_lock(&freezer_mutex); 245 + rcu_read_lock(); 246 + 247 + freezer = task_freezer(task); 248 + if (freezer->state & CGROUP_FREEZING) 235 249 freeze_task(task); 236 - spin_unlock_irq(&freezer->lock); 237 - out: 250 + 238 251 rcu_read_unlock(); 252 + mutex_unlock(&freezer_mutex); 239 253 } 240 254 241 255 /** ··· 255 281 struct css_task_iter it; 256 282 struct task_struct *task; 257 283 258 - WARN_ON_ONCE(!rcu_read_lock_held()); 259 - 260 - spin_lock_irq(&freezer->lock); 284 + lockdep_assert_held(&freezer_mutex); 261 285 262 286 if (!(freezer->state & CGROUP_FREEZING) || 263 287 (freezer->state & CGROUP_FROZEN)) 264 - goto out_unlock; 288 + return; 265 289 266 290 /* are all (live) children frozen? */ 291 + rcu_read_lock(); 267 292 css_for_each_child(pos, css) { 268 293 struct freezer *child = css_freezer(pos); 269 294 270 295 if ((child->state & CGROUP_FREEZER_ONLINE) && 271 - !(child->state & CGROUP_FROZEN)) 272 - goto out_unlock; 296 + !(child->state & CGROUP_FROZEN)) { 297 + rcu_read_unlock(); 298 + return; 299 + } 273 300 } 301 + rcu_read_unlock(); 274 302 275 303 /* are all tasks frozen? */ 276 304 css_task_iter_start(css, &it); ··· 293 317 freezer->state |= CGROUP_FROZEN; 294 318 out_iter_end: 295 319 css_task_iter_end(&it); 296 - out_unlock: 297 - spin_unlock_irq(&freezer->lock); 298 320 } 299 321 300 322 static int freezer_read(struct seq_file *m, void *v) 301 323 { 302 324 struct cgroup_subsys_state *css = seq_css(m), *pos; 303 325 326 + mutex_lock(&freezer_mutex); 304 327 rcu_read_lock(); 305 328 306 329 /* update states bottom-up */ 307 - css_for_each_descendant_post(pos, css) 330 + css_for_each_descendant_post(pos, css) { 331 + if (!css_tryget(pos)) 332 + continue; 333 + rcu_read_unlock(); 334 + 308 335 update_if_frozen(pos); 309 336 337 + rcu_read_lock(); 338 + css_put(pos); 339 + } 340 + 310 341 rcu_read_unlock(); 342 + mutex_unlock(&freezer_mutex); 311 343 312 344 seq_puts(m, freezer_state_strs(css_freezer(css)->state)); 313 345 seq_putc(m, '\n'); ··· 357 373 unsigned int state) 358 374 { 359 375 /* also synchronizes against task migration, see freezer_attach() */ 360 - lockdep_assert_held(&freezer->lock); 376 + lockdep_assert_held(&freezer_mutex); 361 377 362 378 if (!(freezer->state & CGROUP_FREEZER_ONLINE)) 363 379 return; ··· 398 414 * descendant will try to inherit its parent's FREEZING state as 399 415 * CGROUP_FREEZING_PARENT. 400 416 */ 417 + mutex_lock(&freezer_mutex); 401 418 rcu_read_lock(); 402 419 css_for_each_descendant_pre(pos, &freezer->css) { 403 420 struct freezer *pos_f = css_freezer(pos); 404 421 struct freezer *parent = parent_freezer(pos_f); 405 422 406 - spin_lock_irq(&pos_f->lock); 423 + if (!css_tryget(pos)) 424 + continue; 425 + rcu_read_unlock(); 407 426 408 - if (pos_f == freezer) { 427 + if (pos_f == freezer) 409 428 freezer_apply_state(pos_f, freeze, 410 429 CGROUP_FREEZING_SELF); 411 - } else { 412 - /* 413 - * Our update to @parent->state is already visible 414 - * which is all we need. No need to lock @parent. 415 - * For more info on synchronization, see 416 - * freezer_post_create(). 417 - */ 430 + else 418 431 freezer_apply_state(pos_f, 419 432 parent->state & CGROUP_FREEZING, 420 433 CGROUP_FREEZING_PARENT); 421 - } 422 434 423 - spin_unlock_irq(&pos_f->lock); 435 + rcu_read_lock(); 436 + css_put(pos); 424 437 } 425 438 rcu_read_unlock(); 439 + mutex_unlock(&freezer_mutex); 426 440 } 427 441 428 442 static int freezer_write(struct cgroup_subsys_state *css, struct cftype *cft,
+4 -4
kernel/hrtimer.c
··· 990 990 /* Remove an active timer from the queue: */ 991 991 ret = remove_hrtimer(timer, base); 992 992 993 - /* Switch the timer base, if necessary: */ 994 - new_base = switch_hrtimer_base(timer, base, mode & HRTIMER_MODE_PINNED); 995 - 996 993 if (mode & HRTIMER_MODE_REL) { 997 - tim = ktime_add_safe(tim, new_base->get_time()); 994 + tim = ktime_add_safe(tim, base->get_time()); 998 995 /* 999 996 * CONFIG_TIME_LOW_RES is a temporary way for architectures 1000 997 * to signal that they simply return xtime in ··· 1005 1008 } 1006 1009 1007 1010 hrtimer_set_expires_range_ns(timer, tim, delta_ns); 1011 + 1012 + /* Switch the timer base, if necessary: */ 1013 + new_base = switch_hrtimer_base(timer, base, mode & HRTIMER_MODE_PINNED); 1008 1014 1009 1015 timer_stats_hrtimer_set_start_info(timer); 1010 1016
+30 -6
kernel/workqueue.c
··· 1916 1916 1917 1917 /* mayday mayday mayday */ 1918 1918 if (list_empty(&pwq->mayday_node)) { 1919 + /* 1920 + * If @pwq is for an unbound wq, its base ref may be put at 1921 + * any time due to an attribute change. Pin @pwq until the 1922 + * rescuer is done with it. 1923 + */ 1924 + get_pwq(pwq); 1919 1925 list_add_tail(&pwq->mayday_node, &wq->maydays); 1920 1926 wake_up_process(wq->rescuer->task); 1921 1927 } ··· 2404 2398 struct worker *rescuer = __rescuer; 2405 2399 struct workqueue_struct *wq = rescuer->rescue_wq; 2406 2400 struct list_head *scheduled = &rescuer->scheduled; 2401 + bool should_stop; 2407 2402 2408 2403 set_user_nice(current, RESCUER_NICE_LEVEL); 2409 2404 ··· 2416 2409 repeat: 2417 2410 set_current_state(TASK_INTERRUPTIBLE); 2418 2411 2419 - if (kthread_should_stop()) { 2420 - __set_current_state(TASK_RUNNING); 2421 - rescuer->task->flags &= ~PF_WQ_WORKER; 2422 - return 0; 2423 - } 2412 + /* 2413 + * By the time the rescuer is requested to stop, the workqueue 2414 + * shouldn't have any work pending, but @wq->maydays may still have 2415 + * pwq(s) queued. This can happen by non-rescuer workers consuming 2416 + * all the work items before the rescuer got to them. Go through 2417 + * @wq->maydays processing before acting on should_stop so that the 2418 + * list is always empty on exit. 2419 + */ 2420 + should_stop = kthread_should_stop(); 2424 2421 2425 2422 /* see whether any pwq is asking for help */ 2426 2423 spin_lock_irq(&wq_mayday_lock); ··· 2456 2445 process_scheduled_works(rescuer); 2457 2446 2458 2447 /* 2448 + * Put the reference grabbed by send_mayday(). @pool won't 2449 + * go away while we're holding its lock. 2450 + */ 2451 + put_pwq(pwq); 2452 + 2453 + /* 2459 2454 * Leave this pool. If keep_working() is %true, notify a 2460 2455 * regular worker; otherwise, we end up with 0 concurrency 2461 2456 * and stalling the execution. ··· 2475 2458 } 2476 2459 2477 2460 spin_unlock_irq(&wq_mayday_lock); 2461 + 2462 + if (should_stop) { 2463 + __set_current_state(TASK_RUNNING); 2464 + rescuer->task->flags &= ~PF_WQ_WORKER; 2465 + return 0; 2466 + } 2478 2467 2479 2468 /* rescuers should never participate in concurrency management */ 2480 2469 WARN_ON_ONCE(!(rescuer->flags & WORKER_NOT_RUNNING)); ··· 4123 4100 if (!pwq) { 4124 4101 pr_warning("workqueue: allocation failed while updating NUMA affinity of \"%s\"\n", 4125 4102 wq->name); 4126 - goto out_unlock; 4103 + mutex_lock(&wq->mutex); 4104 + goto use_dfl_pwq; 4127 4105 } 4128 4106 4129 4107 /*
+15
mm/Kconfig
··· 581 581 582 582 config GENERIC_EARLY_IOREMAP 583 583 bool 584 + 585 + config MAX_STACK_SIZE_MB 586 + int "Maximum user stack size for 32-bit processes (MB)" 587 + default 80 588 + range 8 256 if METAG 589 + range 8 2048 590 + depends on STACK_GROWSUP && (!64BIT || COMPAT) 591 + help 592 + This is the maximum stack size in Megabytes in the VM layout of 32-bit 593 + user processes when the stack grows upwards (currently only on parisc 594 + and metag arch). The stack will be located at the highest memory 595 + address minus the given value, unless the RLIMIT_STACK hard limit is 596 + changed to a smaller value in which case that is used. 597 + 598 + A sane initial value is 80 MB.
+2 -2
mm/kmemleak.c
··· 1775 1775 int i; 1776 1776 unsigned long flags; 1777 1777 1778 - kmemleak_early_log = 0; 1779 - 1780 1778 #ifdef CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF 1781 1779 if (!kmemleak_skip_disable) { 1780 + kmemleak_early_log = 0; 1782 1781 kmemleak_disable(); 1783 1782 return; 1784 1783 } ··· 1795 1796 1796 1797 /* the kernel is still in UP mode, so disabling the IRQs is enough */ 1797 1798 local_irq_save(flags); 1799 + kmemleak_early_log = 0; 1798 1800 if (kmemleak_error) { 1799 1801 local_irq_restore(flags); 1800 1802 return;
+8 -1
mm/mremap.c
··· 194 194 break; 195 195 if (pmd_trans_huge(*old_pmd)) { 196 196 int err = 0; 197 - if (extent == HPAGE_PMD_SIZE) 197 + if (extent == HPAGE_PMD_SIZE) { 198 + VM_BUG_ON(vma->vm_file || !vma->anon_vma); 199 + /* See comment in move_ptes() */ 200 + if (need_rmap_locks) 201 + anon_vma_lock_write(vma->anon_vma); 198 202 err = move_huge_pmd(vma, new_vma, old_addr, 199 203 new_addr, old_end, 200 204 old_pmd, new_pmd); 205 + if (need_rmap_locks) 206 + anon_vma_unlock_write(vma->anon_vma); 207 + } 201 208 if (err > 0) { 202 209 need_flush = true; 203 210 continue;
+1 -1
mm/percpu.c
··· 610 610 chunk->map = pcpu_mem_zalloc(PCPU_DFL_MAP_ALLOC * 611 611 sizeof(chunk->map[0])); 612 612 if (!chunk->map) { 613 - kfree(chunk); 613 + pcpu_mem_free(chunk, pcpu_chunk_struct_size); 614 614 return NULL; 615 615 } 616 616
+19 -1
net/ceph/messenger.c
··· 557 557 return r; 558 558 } 559 559 560 - static int ceph_tcp_sendpage(struct socket *sock, struct page *page, 560 + static int __ceph_tcp_sendpage(struct socket *sock, struct page *page, 561 561 int offset, size_t size, bool more) 562 562 { 563 563 int flags = MSG_DONTWAIT | MSG_NOSIGNAL | (more ? MSG_MORE : MSG_EOR); ··· 570 570 return ret; 571 571 } 572 572 573 + static int ceph_tcp_sendpage(struct socket *sock, struct page *page, 574 + int offset, size_t size, bool more) 575 + { 576 + int ret; 577 + struct kvec iov; 578 + 579 + /* sendpage cannot properly handle pages with page_count == 0, 580 + * we need to fallback to sendmsg if that's the case */ 581 + if (page_count(page) >= 1) 582 + return __ceph_tcp_sendpage(sock, page, offset, size, more); 583 + 584 + iov.iov_base = kmap(page) + offset; 585 + iov.iov_len = size; 586 + ret = ceph_tcp_sendmsg(sock, &iov, 1, size, more); 587 + kunmap(page); 588 + 589 + return ret; 590 + } 573 591 574 592 /* 575 593 * Shutdown/close the socket for the given connection.
+5
net/ceph/osdmap.c
··· 329 329 dout("crush decode tunable chooseleaf_descend_once = %d", 330 330 c->chooseleaf_descend_once); 331 331 332 + ceph_decode_need(p, end, sizeof(u8), done); 333 + c->chooseleaf_vary_r = ceph_decode_8(p); 334 + dout("crush decode tunable chooseleaf_vary_r = %d", 335 + c->chooseleaf_vary_r); 336 + 332 337 done: 333 338 dout("crush_decode success\n"); 334 339 return c;
+4 -1
scripts/checksyscalls.sh
··· 25 25 #define __IGNORE_rmdir /* unlinkat */ 26 26 #define __IGNORE_lchown /* fchownat */ 27 27 #define __IGNORE_access /* faccessat */ 28 - #define __IGNORE_rename /* renameat */ 28 + #define __IGNORE_rename /* renameat2 */ 29 29 #define __IGNORE_readlink /* readlinkat */ 30 30 #define __IGNORE_symlink /* symlinkat */ 31 31 #define __IGNORE_utimes /* futimesat */ ··· 36 36 #define __IGNORE_stat64 /* fstatat64 */ 37 37 #define __IGNORE_lstat64 /* fstatat64 */ 38 38 #endif 39 + 40 + /* Missing flags argument */ 41 + #define __IGNORE_renameat /* renameat2 */ 39 42 40 43 /* CLOEXEC flag */ 41 44 #define __IGNORE_pipe /* pipe2 */
+163 -47
security/device_cgroup.c
··· 306 306 } 307 307 308 308 /** 309 - * may_access - verifies if a new exception is part of what is allowed 310 - * by a dev cgroup based on the default policy + 311 - * exceptions. This is used to make sure a child cgroup 312 - * won't have more privileges than its parent or to 313 - * verify if a certain access is allowed. 314 - * @dev_cgroup: dev cgroup to be tested against 315 - * @refex: new exception 316 - * @behavior: behavior of the exception 309 + * match_exception - iterates the exception list trying to find a complete match 310 + * @exceptions: list of exceptions 311 + * @type: device type (DEV_BLOCK or DEV_CHAR) 312 + * @major: device file major number, ~0 to match all 313 + * @minor: device file minor number, ~0 to match all 314 + * @access: permission mask (ACC_READ, ACC_WRITE, ACC_MKNOD) 315 + * 316 + * It is considered a complete match if an exception is found that will 317 + * contain the entire range of provided parameters. 318 + * 319 + * Return: true in case it matches an exception completely 317 320 */ 318 - static bool may_access(struct dev_cgroup *dev_cgroup, 319 - struct dev_exception_item *refex, 320 - enum devcg_behavior behavior) 321 + static bool match_exception(struct list_head *exceptions, short type, 322 + u32 major, u32 minor, short access) 321 323 { 322 324 struct dev_exception_item *ex; 325 + 326 + list_for_each_entry_rcu(ex, exceptions, list) { 327 + if ((type & DEV_BLOCK) && !(ex->type & DEV_BLOCK)) 328 + continue; 329 + if ((type & DEV_CHAR) && !(ex->type & DEV_CHAR)) 330 + continue; 331 + if (ex->major != ~0 && ex->major != major) 332 + continue; 333 + if (ex->minor != ~0 && ex->minor != minor) 334 + continue; 335 + /* provided access cannot have more than the exception rule */ 336 + if (access & (~ex->access)) 337 + continue; 338 + return true; 339 + } 340 + return false; 341 + } 342 + 343 + /** 344 + * match_exception_partial - iterates the exception list trying to find a partial match 345 + * @exceptions: list of exceptions 346 + * @type: device type (DEV_BLOCK or DEV_CHAR) 347 + * @major: device file major number, ~0 to match all 348 + * @minor: device file minor number, ~0 to match all 349 + * @access: permission mask (ACC_READ, ACC_WRITE, ACC_MKNOD) 350 + * 351 + * It is considered a partial match if an exception's range is found to 352 + * contain *any* of the devices specified by provided parameters. This is 353 + * used to make sure no extra access is being granted that is forbidden by 354 + * any of the exception list. 355 + * 356 + * Return: true in case the provided range mat matches an exception completely 357 + */ 358 + static bool match_exception_partial(struct list_head *exceptions, short type, 359 + u32 major, u32 minor, short access) 360 + { 361 + struct dev_exception_item *ex; 362 + 363 + list_for_each_entry_rcu(ex, exceptions, list) { 364 + if ((type & DEV_BLOCK) && !(ex->type & DEV_BLOCK)) 365 + continue; 366 + if ((type & DEV_CHAR) && !(ex->type & DEV_CHAR)) 367 + continue; 368 + /* 369 + * We must be sure that both the exception and the provided 370 + * range aren't masking all devices 371 + */ 372 + if (ex->major != ~0 && major != ~0 && ex->major != major) 373 + continue; 374 + if (ex->minor != ~0 && minor != ~0 && ex->minor != minor) 375 + continue; 376 + /* 377 + * In order to make sure the provided range isn't matching 378 + * an exception, all its access bits shouldn't match the 379 + * exception's access bits 380 + */ 381 + if (!(access & ex->access)) 382 + continue; 383 + return true; 384 + } 385 + return false; 386 + } 387 + 388 + /** 389 + * verify_new_ex - verifies if a new exception is allowed by parent cgroup's permissions 390 + * @dev_cgroup: dev cgroup to be tested against 391 + * @refex: new exception 392 + * @behavior: behavior of the exception's dev_cgroup 393 + * 394 + * This is used to make sure a child cgroup won't have more privileges 395 + * than its parent 396 + */ 397 + static bool verify_new_ex(struct dev_cgroup *dev_cgroup, 398 + struct dev_exception_item *refex, 399 + enum devcg_behavior behavior) 400 + { 323 401 bool match = false; 324 402 325 403 rcu_lockdep_assert(rcu_read_lock_held() || 326 404 lockdep_is_held(&devcgroup_mutex), 327 - "device_cgroup::may_access() called without proper synchronization"); 328 - 329 - list_for_each_entry_rcu(ex, &dev_cgroup->exceptions, list) { 330 - if ((refex->type & DEV_BLOCK) && !(ex->type & DEV_BLOCK)) 331 - continue; 332 - if ((refex->type & DEV_CHAR) && !(ex->type & DEV_CHAR)) 333 - continue; 334 - if (ex->major != ~0 && ex->major != refex->major) 335 - continue; 336 - if (ex->minor != ~0 && ex->minor != refex->minor) 337 - continue; 338 - if (refex->access & (~ex->access)) 339 - continue; 340 - match = true; 341 - break; 342 - } 405 + "device_cgroup:verify_new_ex called without proper synchronization"); 343 406 344 407 if (dev_cgroup->behavior == DEVCG_DEFAULT_ALLOW) { 345 408 if (behavior == DEVCG_DEFAULT_ALLOW) { 346 - /* the exception will deny access to certain devices */ 409 + /* 410 + * new exception in the child doesn't matter, only 411 + * adding extra restrictions 412 + */ 347 413 return true; 348 414 } else { 349 - /* the exception will allow access to certain devices */ 415 + /* 416 + * new exception in the child will add more devices 417 + * that can be acessed, so it can't match any of 418 + * parent's exceptions, even slightly 419 + */ 420 + match = match_exception_partial(&dev_cgroup->exceptions, 421 + refex->type, 422 + refex->major, 423 + refex->minor, 424 + refex->access); 425 + 350 426 if (match) 351 - /* 352 - * a new exception allowing access shouldn't 353 - * match an parent's exception 354 - */ 355 427 return false; 356 428 return true; 357 429 } 358 430 } else { 359 - /* only behavior == DEVCG_DEFAULT_DENY allowed here */ 431 + /* 432 + * Only behavior == DEVCG_DEFAULT_DENY allowed here, therefore 433 + * the new exception will add access to more devices and must 434 + * be contained completely in an parent's exception to be 435 + * allowed 436 + */ 437 + match = match_exception(&dev_cgroup->exceptions, refex->type, 438 + refex->major, refex->minor, 439 + refex->access); 440 + 360 441 if (match) 361 442 /* parent has an exception that matches the proposed */ 362 443 return true; ··· 459 378 460 379 if (!parent) 461 380 return 1; 462 - return may_access(parent, ex, childcg->behavior); 381 + return verify_new_ex(parent, ex, childcg->behavior); 382 + } 383 + 384 + /** 385 + * parent_allows_removal - verify if it's ok to remove an exception 386 + * @childcg: child cgroup from where the exception will be removed 387 + * @ex: exception being removed 388 + * 389 + * When removing an exception in cgroups with default ALLOW policy, it must 390 + * be checked if removing it will give the child cgroup more access than the 391 + * parent. 392 + * 393 + * Return: true if it's ok to remove exception, false otherwise 394 + */ 395 + static bool parent_allows_removal(struct dev_cgroup *childcg, 396 + struct dev_exception_item *ex) 397 + { 398 + struct dev_cgroup *parent = css_to_devcgroup(css_parent(&childcg->css)); 399 + 400 + if (!parent) 401 + return true; 402 + 403 + /* It's always allowed to remove access to devices */ 404 + if (childcg->behavior == DEVCG_DEFAULT_DENY) 405 + return true; 406 + 407 + /* 408 + * Make sure you're not removing part or a whole exception existing in 409 + * the parent cgroup 410 + */ 411 + return !match_exception_partial(&parent->exceptions, ex->type, 412 + ex->major, ex->minor, ex->access); 463 413 } 464 414 465 415 /** ··· 728 616 729 617 switch (filetype) { 730 618 case DEVCG_ALLOW: 731 - if (!parent_has_perm(devcgroup, &ex)) 732 - return -EPERM; 733 619 /* 734 620 * If the default policy is to allow by default, try to remove 735 621 * an matching exception instead. And be silent about it: we 736 622 * don't want to break compatibility 737 623 */ 738 624 if (devcgroup->behavior == DEVCG_DEFAULT_ALLOW) { 625 + /* Check if the parent allows removing it first */ 626 + if (!parent_allows_removal(devcgroup, &ex)) 627 + return -EPERM; 739 628 dev_exception_rm(devcgroup, &ex); 740 - return 0; 629 + break; 741 630 } 631 + 632 + if (!parent_has_perm(devcgroup, &ex)) 633 + return -EPERM; 742 634 rc = dev_exception_add(devcgroup, &ex); 743 635 break; 744 636 case DEVCG_DENY: ··· 820 704 short access) 821 705 { 822 706 struct dev_cgroup *dev_cgroup; 823 - struct dev_exception_item ex; 824 - int rc; 825 - 826 - memset(&ex, 0, sizeof(ex)); 827 - ex.type = type; 828 - ex.major = major; 829 - ex.minor = minor; 830 - ex.access = access; 707 + bool rc; 831 708 832 709 rcu_read_lock(); 833 710 dev_cgroup = task_devcgroup(current); 834 - rc = may_access(dev_cgroup, &ex, dev_cgroup->behavior); 711 + if (dev_cgroup->behavior == DEVCG_DEFAULT_ALLOW) 712 + /* Can't match any of the exceptions, even partially */ 713 + rc = !match_exception_partial(&dev_cgroup->exceptions, 714 + type, major, minor, access); 715 + else 716 + /* Need to match completely one exception to be allowed */ 717 + rc = match_exception(&dev_cgroup->exceptions, type, major, 718 + minor, access); 835 719 rcu_read_unlock(); 836 720 837 721 if (!rc)
+59
sound/firewire/Kconfig
··· 61 61 To compile this driver as a module, choose M here: the module 62 62 will be called snd-scs1x. 63 63 64 + config SND_FIREWORKS 65 + tristate "Echo Fireworks board module support" 66 + select SND_FIREWIRE_LIB 67 + select SND_RAWMIDI 68 + select SND_PCM 69 + select SND_HWDEP 70 + help 71 + Say Y here to include support for FireWire devices based 72 + on Echo Digital Audio Fireworks board: 73 + * Mackie Onyx 400F/1200F 74 + * Echo AudioFire12/8(until 2009 July) 75 + * Echo AudioFire2/4/Pre8/8(since 2009 July) 76 + * Echo Fireworks 8/HDMI 77 + * Gibson Robot Interface Pack/GoldTop 78 + 79 + To compile this driver as a module, choose M here: the module 80 + will be called snd-fireworks. 81 + 82 + config SND_BEBOB 83 + tristate "BridgeCo DM1000/DM1100/DM1500 with BeBoB firmware" 84 + select SND_FIREWIRE_LIB 85 + select SND_RAWMIDI 86 + select SND_PCM 87 + select SND_HWDEP 88 + help 89 + Say Y here to include support for FireWire devices based 90 + on BridgeCo DM1000/DM1100/DM1500 with BeBoB firmware: 91 + * Edirol FA-66/FA-101 92 + * PreSonus FIREBOX/FIREPOD/FP10/Inspire1394 93 + * BridgeCo RDAudio1/Audio5 94 + * Mackie Onyx 1220/1620/1640 (Firewire I/O Card) 95 + * Mackie d.2 (Firewire Option) 96 + * Stanton FinalScratch 2 (ScratchAmp) 97 + * Tascam IF-FW/DM 98 + * Behringer XENIX UFX 1204/1604 99 + * Behringer Digital Mixer X32 series (X-UF Card) 100 + * Apogee Rosetta 200/400 (X-FireWire card) 101 + * Apogee DA/AD/DD-16X (X-FireWire card) 102 + * Apogee Ensemble 103 + * ESI Quotafire610 104 + * AcousticReality eARMasterOne 105 + * CME MatrixKFW 106 + * Phonic Helix Board 12 MkII/18 MkII/24 MkII 107 + * Phonic Helix Board 12 Universal/18 Universal/24 Universal 108 + * Lynx Aurora 8/16 (LT-FW) 109 + * ICON FireXon 110 + * PrismSound Orpheus/ADA-8XR 111 + * TerraTec PHASE 24 FW/PHASE X24 FW/PHASE 88 Rack FW 112 + * Terratec EWS MIC2/EWS MIC4 113 + * Terratec Aureon 7.1 Firewire 114 + * Yamaha GO44/GO46 115 + * Focusrite Saffire/Saffire LE/SaffirePro10 IO/SaffirePro26 IO 116 + * M-Audio Firewire410/AudioPhile/Solo 117 + * M-Audio Ozonic/NRV10/ProfireLightBridge 118 + * M-Audio Firewire 1814/ProjectMix IO 119 + 120 + To compile this driver as a module, choose M here: the module 121 + will be called snd-bebob. 122 + 64 123 endif # SND_FIREWIRE
+2
sound/firewire/Makefile
··· 10 10 obj-$(CONFIG_SND_FIREWIRE_SPEAKERS) += snd-firewire-speakers.o 11 11 obj-$(CONFIG_SND_ISIGHT) += snd-isight.o 12 12 obj-$(CONFIG_SND_SCS1X) += snd-scs1x.o 13 + obj-$(CONFIG_SND_FIREWORKS) += fireworks/ 14 + obj-$(CONFIG_SND_BEBOB) += bebob/
+551 -252
sound/firewire/amdtp.c
··· 11 11 #include <linux/firewire.h> 12 12 #include <linux/module.h> 13 13 #include <linux/slab.h> 14 + #include <linux/sched.h> 14 15 #include <sound/pcm.h> 16 + #include <sound/pcm_params.h> 17 + #include <sound/rawmidi.h> 15 18 #include "amdtp.h" 16 19 17 20 #define TICKS_PER_CYCLE 3072 ··· 23 20 24 21 #define TRANSFER_DELAY_TICKS 0x2e00 /* 479.17 µs */ 25 22 23 + /* isochronous header parameters */ 24 + #define ISO_DATA_LENGTH_SHIFT 16 26 25 #define TAG_CIP 1 27 26 27 + /* common isochronous packet header parameters */ 28 28 #define CIP_EOH (1u << 31) 29 + #define CIP_EOH_MASK 0x80000000 29 30 #define CIP_FMT_AM (0x10 << 24) 30 - #define AMDTP_FDF_AM824 (0 << 19) 31 - #define AMDTP_FDF_SFC_SHIFT 16 31 + #define CIP_FMT_MASK 0x3f000000 32 + #define CIP_SYT_MASK 0x0000ffff 33 + #define CIP_SYT_NO_INFO 0xffff 34 + #define CIP_FDF_MASK 0x00ff0000 35 + #define CIP_FDF_SFC_SHIFT 16 36 + 37 + /* 38 + * Audio and Music transfer protocol specific parameters 39 + * only "Clock-based rate control mode" is supported 40 + */ 41 + #define AMDTP_FDF_AM824 (0 << (CIP_FDF_SFC_SHIFT + 3)) 42 + #define AMDTP_FDF_NO_DATA 0xff 43 + #define AMDTP_DBS_MASK 0x00ff0000 44 + #define AMDTP_DBS_SHIFT 16 45 + #define AMDTP_DBC_MASK 0x000000ff 32 46 33 47 /* TODO: make these configurable */ 34 48 #define INTERRUPT_INTERVAL 16 35 49 #define QUEUE_LENGTH 48 36 50 51 + #define IN_PACKET_HEADER_SIZE 4 52 + #define OUT_PACKET_HEADER_SIZE 0 53 + 37 54 static void pcm_period_tasklet(unsigned long data); 38 55 39 56 /** 40 - * amdtp_out_stream_init - initialize an AMDTP output stream structure 41 - * @s: the AMDTP output stream to initialize 57 + * amdtp_stream_init - initialize an AMDTP stream structure 58 + * @s: the AMDTP stream to initialize 42 59 * @unit: the target of the stream 60 + * @dir: the direction of stream 43 61 * @flags: the packet transmission method to use 44 62 */ 45 - int amdtp_out_stream_init(struct amdtp_out_stream *s, struct fw_unit *unit, 46 - enum cip_out_flags flags) 63 + int amdtp_stream_init(struct amdtp_stream *s, struct fw_unit *unit, 64 + enum amdtp_stream_direction dir, enum cip_flags flags) 47 65 { 48 66 s->unit = fw_unit_get(unit); 67 + s->direction = dir; 49 68 s->flags = flags; 50 69 s->context = ERR_PTR(-1); 51 70 mutex_init(&s->mutex); 52 71 tasklet_init(&s->period_tasklet, pcm_period_tasklet, (unsigned long)s); 53 72 s->packet_index = 0; 54 73 74 + init_waitqueue_head(&s->callback_wait); 75 + s->callbacked = false; 76 + s->sync_slave = NULL; 77 + 78 + s->rx_blocks_for_midi = UINT_MAX; 79 + 55 80 return 0; 56 81 } 57 - EXPORT_SYMBOL(amdtp_out_stream_init); 82 + EXPORT_SYMBOL(amdtp_stream_init); 58 83 59 84 /** 60 - * amdtp_out_stream_destroy - free stream resources 61 - * @s: the AMDTP output stream to destroy 85 + * amdtp_stream_destroy - free stream resources 86 + * @s: the AMDTP stream to destroy 62 87 */ 63 - void amdtp_out_stream_destroy(struct amdtp_out_stream *s) 88 + void amdtp_stream_destroy(struct amdtp_stream *s) 64 89 { 65 - WARN_ON(amdtp_out_stream_running(s)); 90 + WARN_ON(amdtp_stream_running(s)); 66 91 mutex_destroy(&s->mutex); 67 92 fw_unit_put(s->unit); 68 93 } 69 - EXPORT_SYMBOL(amdtp_out_stream_destroy); 94 + EXPORT_SYMBOL(amdtp_stream_destroy); 70 95 71 96 const unsigned int amdtp_syt_intervals[CIP_SFC_COUNT] = { 72 97 [CIP_SFC_32000] = 8, ··· 107 76 }; 108 77 EXPORT_SYMBOL(amdtp_syt_intervals); 109 78 79 + const unsigned int amdtp_rate_table[CIP_SFC_COUNT] = { 80 + [CIP_SFC_32000] = 32000, 81 + [CIP_SFC_44100] = 44100, 82 + [CIP_SFC_48000] = 48000, 83 + [CIP_SFC_88200] = 88200, 84 + [CIP_SFC_96000] = 96000, 85 + [CIP_SFC_176400] = 176400, 86 + [CIP_SFC_192000] = 192000, 87 + }; 88 + EXPORT_SYMBOL(amdtp_rate_table); 89 + 110 90 /** 111 - * amdtp_out_stream_set_parameters - set stream parameters 112 - * @s: the AMDTP output stream to configure 91 + * amdtp_stream_add_pcm_hw_constraints - add hw constraints for PCM substream 92 + * @s: the AMDTP stream, which must be initialized. 93 + * @runtime: the PCM substream runtime 94 + */ 95 + int amdtp_stream_add_pcm_hw_constraints(struct amdtp_stream *s, 96 + struct snd_pcm_runtime *runtime) 97 + { 98 + int err; 99 + 100 + /* AM824 in IEC 61883-6 can deliver 24bit data */ 101 + err = snd_pcm_hw_constraint_msbits(runtime, 0, 32, 24); 102 + if (err < 0) 103 + goto end; 104 + 105 + /* 106 + * Currently firewire-lib processes 16 packets in one software 107 + * interrupt callback. This equals to 2msec but actually the 108 + * interval of the interrupts has a jitter. 109 + * Additionally, even if adding a constraint to fit period size to 110 + * 2msec, actual calculated frames per period doesn't equal to 2msec, 111 + * depending on sampling rate. 112 + * Anyway, the interval to call snd_pcm_period_elapsed() cannot 2msec. 113 + * Here let us use 5msec for safe period interrupt. 114 + */ 115 + err = snd_pcm_hw_constraint_minmax(runtime, 116 + SNDRV_PCM_HW_PARAM_PERIOD_TIME, 117 + 5000, UINT_MAX); 118 + if (err < 0) 119 + goto end; 120 + 121 + /* Non-Blocking stream has no more constraints */ 122 + if (!(s->flags & CIP_BLOCKING)) 123 + goto end; 124 + 125 + /* 126 + * One AMDTP packet can include some frames. In blocking mode, the 127 + * number equals to SYT_INTERVAL. So the number is 8, 16 or 32, 128 + * depending on its sampling rate. For accurate period interrupt, it's 129 + * preferrable to aligh period/buffer sizes to current SYT_INTERVAL. 130 + * 131 + * TODO: These constraints can be improved with propper rules. 132 + * Currently apply LCM of SYT_INTEVALs. 133 + */ 134 + err = snd_pcm_hw_constraint_step(runtime, 0, 135 + SNDRV_PCM_HW_PARAM_PERIOD_SIZE, 32); 136 + if (err < 0) 137 + goto end; 138 + err = snd_pcm_hw_constraint_step(runtime, 0, 139 + SNDRV_PCM_HW_PARAM_BUFFER_SIZE, 32); 140 + end: 141 + return err; 142 + } 143 + EXPORT_SYMBOL(amdtp_stream_add_pcm_hw_constraints); 144 + 145 + /** 146 + * amdtp_stream_set_parameters - set stream parameters 147 + * @s: the AMDTP stream to configure 113 148 * @rate: the sample rate 114 149 * @pcm_channels: the number of PCM samples in each data block, to be encoded 115 150 * as AM824 multi-bit linear audio ··· 184 87 * The parameters must be set before the stream is started, and must not be 185 88 * changed while the stream is running. 186 89 */ 187 - void amdtp_out_stream_set_parameters(struct amdtp_out_stream *s, 188 - unsigned int rate, 189 - unsigned int pcm_channels, 190 - unsigned int midi_ports) 90 + void amdtp_stream_set_parameters(struct amdtp_stream *s, 91 + unsigned int rate, 92 + unsigned int pcm_channels, 93 + unsigned int midi_ports) 191 94 { 192 - static const unsigned int rates[] = { 193 - [CIP_SFC_32000] = 32000, 194 - [CIP_SFC_44100] = 44100, 195 - [CIP_SFC_48000] = 48000, 196 - [CIP_SFC_88200] = 88200, 197 - [CIP_SFC_96000] = 96000, 198 - [CIP_SFC_176400] = 176400, 199 - [CIP_SFC_192000] = 192000, 200 - }; 201 - unsigned int sfc; 95 + unsigned int i, sfc, midi_channels; 202 96 203 - if (WARN_ON(amdtp_out_stream_running(s))) 97 + midi_channels = DIV_ROUND_UP(midi_ports, 8); 98 + 99 + if (WARN_ON(amdtp_stream_running(s)) | 100 + WARN_ON(pcm_channels > AMDTP_MAX_CHANNELS_FOR_PCM) | 101 + WARN_ON(midi_channels > AMDTP_MAX_CHANNELS_FOR_MIDI)) 204 102 return; 205 103 206 - for (sfc = 0; sfc < CIP_SFC_COUNT; ++sfc) 207 - if (rates[sfc] == rate) 104 + for (sfc = 0; sfc < ARRAY_SIZE(amdtp_rate_table); ++sfc) 105 + if (amdtp_rate_table[sfc] == rate) 208 106 goto sfc_found; 209 107 WARN_ON(1); 210 108 return; 211 109 212 110 sfc_found: 213 - s->dual_wire = (s->flags & CIP_HI_DUALWIRE) && sfc > CIP_SFC_96000; 214 - if (s->dual_wire) { 215 - sfc -= 2; 216 - rate /= 2; 217 - pcm_channels *= 2; 218 - } 219 - s->sfc = sfc; 220 - s->data_block_quadlets = pcm_channels + DIV_ROUND_UP(midi_ports, 8); 221 111 s->pcm_channels = pcm_channels; 112 + s->sfc = sfc; 113 + s->data_block_quadlets = s->pcm_channels + midi_channels; 222 114 s->midi_ports = midi_ports; 223 115 224 116 s->syt_interval = amdtp_syt_intervals[sfc]; ··· 217 131 if (s->flags & CIP_BLOCKING) 218 132 /* additional buffering needed to adjust for no-data packets */ 219 133 s->transfer_delay += TICKS_PER_SECOND * s->syt_interval / rate; 134 + 135 + /* init the position map for PCM and MIDI channels */ 136 + for (i = 0; i < pcm_channels; i++) 137 + s->pcm_positions[i] = i; 138 + s->midi_position = s->pcm_channels; 220 139 } 221 - EXPORT_SYMBOL(amdtp_out_stream_set_parameters); 140 + EXPORT_SYMBOL(amdtp_stream_set_parameters); 222 141 223 142 /** 224 - * amdtp_out_stream_get_max_payload - get the stream's packet size 225 - * @s: the AMDTP output stream 143 + * amdtp_stream_get_max_payload - get the stream's packet size 144 + * @s: the AMDTP stream 226 145 * 227 146 * This function must not be called before the stream has been configured 228 - * with amdtp_out_stream_set_parameters(). 147 + * with amdtp_stream_set_parameters(). 229 148 */ 230 - unsigned int amdtp_out_stream_get_max_payload(struct amdtp_out_stream *s) 149 + unsigned int amdtp_stream_get_max_payload(struct amdtp_stream *s) 231 150 { 232 151 return 8 + s->syt_interval * s->data_block_quadlets * 4; 233 152 } 234 - EXPORT_SYMBOL(amdtp_out_stream_get_max_payload); 153 + EXPORT_SYMBOL(amdtp_stream_get_max_payload); 235 154 236 - static void amdtp_write_s16(struct amdtp_out_stream *s, 155 + static void amdtp_write_s16(struct amdtp_stream *s, 237 156 struct snd_pcm_substream *pcm, 238 157 __be32 *buffer, unsigned int frames); 239 - static void amdtp_write_s32(struct amdtp_out_stream *s, 158 + static void amdtp_write_s32(struct amdtp_stream *s, 240 159 struct snd_pcm_substream *pcm, 241 160 __be32 *buffer, unsigned int frames); 242 - static void amdtp_write_s16_dualwire(struct amdtp_out_stream *s, 243 - struct snd_pcm_substream *pcm, 244 - __be32 *buffer, unsigned int frames); 245 - static void amdtp_write_s32_dualwire(struct amdtp_out_stream *s, 246 - struct snd_pcm_substream *pcm, 247 - __be32 *buffer, unsigned int frames); 161 + static void amdtp_read_s32(struct amdtp_stream *s, 162 + struct snd_pcm_substream *pcm, 163 + __be32 *buffer, unsigned int frames); 248 164 249 165 /** 250 - * amdtp_out_stream_set_pcm_format - set the PCM format 251 - * @s: the AMDTP output stream to configure 166 + * amdtp_stream_set_pcm_format - set the PCM format 167 + * @s: the AMDTP stream to configure 252 168 * @format: the format of the ALSA PCM device 253 169 * 254 170 * The sample format must be set after the other paramters (rate/PCM channels/ 255 171 * MIDI) and before the stream is started, and must not be changed while the 256 172 * stream is running. 257 173 */ 258 - void amdtp_out_stream_set_pcm_format(struct amdtp_out_stream *s, 259 - snd_pcm_format_t format) 174 + void amdtp_stream_set_pcm_format(struct amdtp_stream *s, 175 + snd_pcm_format_t format) 260 176 { 261 - if (WARN_ON(amdtp_out_stream_running(s))) 177 + if (WARN_ON(amdtp_stream_pcm_running(s))) 262 178 return; 263 179 264 180 switch (format) { ··· 268 180 WARN_ON(1); 269 181 /* fall through */ 270 182 case SNDRV_PCM_FORMAT_S16: 271 - if (s->dual_wire) 272 - s->transfer_samples = amdtp_write_s16_dualwire; 273 - else 183 + if (s->direction == AMDTP_OUT_STREAM) { 274 184 s->transfer_samples = amdtp_write_s16; 275 - break; 185 + break; 186 + } 187 + WARN_ON(1); 188 + /* fall through */ 276 189 case SNDRV_PCM_FORMAT_S32: 277 - if (s->dual_wire) 278 - s->transfer_samples = amdtp_write_s32_dualwire; 279 - else 190 + if (s->direction == AMDTP_OUT_STREAM) 280 191 s->transfer_samples = amdtp_write_s32; 192 + else 193 + s->transfer_samples = amdtp_read_s32; 281 194 break; 282 195 } 283 196 } 284 - EXPORT_SYMBOL(amdtp_out_stream_set_pcm_format); 197 + EXPORT_SYMBOL(amdtp_stream_set_pcm_format); 285 198 286 199 /** 287 - * amdtp_out_stream_pcm_prepare - prepare PCM device for running 288 - * @s: the AMDTP output stream 200 + * amdtp_stream_pcm_prepare - prepare PCM device for running 201 + * @s: the AMDTP stream 289 202 * 290 203 * This function should be called from the PCM device's .prepare callback. 291 204 */ 292 - void amdtp_out_stream_pcm_prepare(struct amdtp_out_stream *s) 205 + void amdtp_stream_pcm_prepare(struct amdtp_stream *s) 293 206 { 294 207 tasklet_kill(&s->period_tasklet); 295 208 s->pcm_buffer_pointer = 0; 296 209 s->pcm_period_pointer = 0; 297 210 s->pointer_flush = true; 298 211 } 299 - EXPORT_SYMBOL(amdtp_out_stream_pcm_prepare); 212 + EXPORT_SYMBOL(amdtp_stream_pcm_prepare); 300 213 301 - static unsigned int calculate_data_blocks(struct amdtp_out_stream *s) 214 + static unsigned int calculate_data_blocks(struct amdtp_stream *s) 302 215 { 303 216 unsigned int phase, data_blocks; 304 217 305 - if (!cip_sfc_is_base_44100(s->sfc)) { 218 + if (s->flags & CIP_BLOCKING) 219 + data_blocks = s->syt_interval; 220 + else if (!cip_sfc_is_base_44100(s->sfc)) { 306 221 /* Sample_rate / 8000 is an integer, and precomputed. */ 307 222 data_blocks = s->data_block_state; 308 223 } else { ··· 334 243 return data_blocks; 335 244 } 336 245 337 - static unsigned int calculate_syt(struct amdtp_out_stream *s, 246 + static unsigned int calculate_syt(struct amdtp_stream *s, 338 247 unsigned int cycle) 339 248 { 340 249 unsigned int syt_offset, phase, index, syt; ··· 371 280 syt = (cycle + syt_offset / TICKS_PER_CYCLE) << 12; 372 281 syt += syt_offset % TICKS_PER_CYCLE; 373 282 374 - return syt & 0xffff; 283 + return syt & CIP_SYT_MASK; 375 284 } else { 376 - return 0xffff; /* no info */ 285 + return CIP_SYT_NO_INFO; 377 286 } 378 287 } 379 288 380 - static void amdtp_write_s32(struct amdtp_out_stream *s, 289 + static void amdtp_write_s32(struct amdtp_stream *s, 381 290 struct snd_pcm_substream *pcm, 382 291 __be32 *buffer, unsigned int frames) 383 292 { 384 293 struct snd_pcm_runtime *runtime = pcm->runtime; 385 - unsigned int channels, remaining_frames, frame_step, i, c; 294 + unsigned int channels, remaining_frames, i, c; 386 295 const u32 *src; 387 296 388 297 channels = s->pcm_channels; 389 298 src = (void *)runtime->dma_area + 390 299 frames_to_bytes(runtime, s->pcm_buffer_pointer); 391 300 remaining_frames = runtime->buffer_size - s->pcm_buffer_pointer; 392 - frame_step = s->data_block_quadlets - channels; 393 301 394 302 for (i = 0; i < frames; ++i) { 395 303 for (c = 0; c < channels; ++c) { 396 - *buffer = cpu_to_be32((*src >> 8) | 0x40000000); 304 + buffer[s->pcm_positions[c]] = 305 + cpu_to_be32((*src >> 8) | 0x40000000); 397 306 src++; 398 - buffer++; 399 307 } 400 - buffer += frame_step; 308 + buffer += s->data_block_quadlets; 401 309 if (--remaining_frames == 0) 402 310 src = (void *)runtime->dma_area; 403 311 } 404 312 } 405 313 406 - static void amdtp_write_s16(struct amdtp_out_stream *s, 314 + static void amdtp_write_s16(struct amdtp_stream *s, 407 315 struct snd_pcm_substream *pcm, 408 316 __be32 *buffer, unsigned int frames) 409 317 { 410 318 struct snd_pcm_runtime *runtime = pcm->runtime; 411 - unsigned int channels, remaining_frames, frame_step, i, c; 319 + unsigned int channels, remaining_frames, i, c; 412 320 const u16 *src; 413 321 414 322 channels = s->pcm_channels; 415 323 src = (void *)runtime->dma_area + 416 324 frames_to_bytes(runtime, s->pcm_buffer_pointer); 417 325 remaining_frames = runtime->buffer_size - s->pcm_buffer_pointer; 418 - frame_step = s->data_block_quadlets - channels; 419 326 420 327 for (i = 0; i < frames; ++i) { 421 328 for (c = 0; c < channels; ++c) { 422 - *buffer = cpu_to_be32((*src << 8) | 0x40000000); 329 + buffer[s->pcm_positions[c]] = 330 + cpu_to_be32((*src << 8) | 0x40000000); 423 331 src++; 424 - buffer++; 425 332 } 426 - buffer += frame_step; 333 + buffer += s->data_block_quadlets; 427 334 if (--remaining_frames == 0) 428 335 src = (void *)runtime->dma_area; 429 336 } 430 337 } 431 338 432 - static void amdtp_write_s32_dualwire(struct amdtp_out_stream *s, 433 - struct snd_pcm_substream *pcm, 434 - __be32 *buffer, unsigned int frames) 339 + static void amdtp_read_s32(struct amdtp_stream *s, 340 + struct snd_pcm_substream *pcm, 341 + __be32 *buffer, unsigned int frames) 435 342 { 436 343 struct snd_pcm_runtime *runtime = pcm->runtime; 437 - unsigned int channels, frame_adjust_1, frame_adjust_2, i, c; 438 - const u32 *src; 344 + unsigned int channels, remaining_frames, i, c; 345 + u32 *dst; 439 346 440 347 channels = s->pcm_channels; 441 - src = (void *)runtime->dma_area + 442 - s->pcm_buffer_pointer * (runtime->frame_bits / 8); 443 - frame_adjust_1 = channels - 1; 444 - frame_adjust_2 = 1 - (s->data_block_quadlets - channels); 348 + dst = (void *)runtime->dma_area + 349 + frames_to_bytes(runtime, s->pcm_buffer_pointer); 350 + remaining_frames = runtime->buffer_size - s->pcm_buffer_pointer; 445 351 446 - channels /= 2; 447 352 for (i = 0; i < frames; ++i) { 448 353 for (c = 0; c < channels; ++c) { 449 - *buffer = cpu_to_be32((*src >> 8) | 0x40000000); 450 - src++; 451 - buffer += 2; 354 + *dst = be32_to_cpu(buffer[s->pcm_positions[c]]) << 8; 355 + dst++; 452 356 } 453 - buffer -= frame_adjust_1; 454 - for (c = 0; c < channels; ++c) { 455 - *buffer = cpu_to_be32((*src >> 8) | 0x40000000); 456 - src++; 457 - buffer += 2; 458 - } 459 - buffer -= frame_adjust_2; 357 + buffer += s->data_block_quadlets; 358 + if (--remaining_frames == 0) 359 + dst = (void *)runtime->dma_area; 460 360 } 461 361 } 462 362 463 - static void amdtp_write_s16_dualwire(struct amdtp_out_stream *s, 464 - struct snd_pcm_substream *pcm, 465 - __be32 *buffer, unsigned int frames) 466 - { 467 - struct snd_pcm_runtime *runtime = pcm->runtime; 468 - unsigned int channels, frame_adjust_1, frame_adjust_2, i, c; 469 - const u16 *src; 470 - 471 - channels = s->pcm_channels; 472 - src = (void *)runtime->dma_area + 473 - s->pcm_buffer_pointer * (runtime->frame_bits / 8); 474 - frame_adjust_1 = channels - 1; 475 - frame_adjust_2 = 1 - (s->data_block_quadlets - channels); 476 - 477 - channels /= 2; 478 - for (i = 0; i < frames; ++i) { 479 - for (c = 0; c < channels; ++c) { 480 - *buffer = cpu_to_be32((*src << 8) | 0x40000000); 481 - src++; 482 - buffer += 2; 483 - } 484 - buffer -= frame_adjust_1; 485 - for (c = 0; c < channels; ++c) { 486 - *buffer = cpu_to_be32((*src << 8) | 0x40000000); 487 - src++; 488 - buffer += 2; 489 - } 490 - buffer -= frame_adjust_2; 491 - } 492 - } 493 - 494 - static void amdtp_fill_pcm_silence(struct amdtp_out_stream *s, 363 + static void amdtp_fill_pcm_silence(struct amdtp_stream *s, 495 364 __be32 *buffer, unsigned int frames) 496 365 { 497 366 unsigned int i, c; 498 367 499 368 for (i = 0; i < frames; ++i) { 500 369 for (c = 0; c < s->pcm_channels; ++c) 501 - buffer[c] = cpu_to_be32(0x40000000); 370 + buffer[s->pcm_positions[c]] = cpu_to_be32(0x40000000); 502 371 buffer += s->data_block_quadlets; 503 372 } 504 373 } 505 374 506 - static void amdtp_fill_midi(struct amdtp_out_stream *s, 375 + static void amdtp_fill_midi(struct amdtp_stream *s, 507 376 __be32 *buffer, unsigned int frames) 508 377 { 509 - unsigned int i; 378 + unsigned int f, port; 379 + u8 *b; 510 380 511 - for (i = 0; i < frames; ++i) 512 - buffer[s->pcm_channels + i * s->data_block_quadlets] = 513 - cpu_to_be32(0x80000000); 381 + for (f = 0; f < frames; f++) { 382 + buffer[s->midi_position] = 0; 383 + b = (u8 *)&buffer[s->midi_position]; 384 + 385 + port = (s->data_block_counter + f) % 8; 386 + if ((f >= s->rx_blocks_for_midi) || 387 + (s->midi[port] == NULL) || 388 + (snd_rawmidi_transmit(s->midi[port], b + 1, 1) <= 0)) 389 + b[0] = 0x80; 390 + else 391 + b[0] = 0x81; 392 + 393 + buffer += s->data_block_quadlets; 394 + } 514 395 } 515 396 516 - static void queue_out_packet(struct amdtp_out_stream *s, unsigned int cycle) 397 + static void amdtp_pull_midi(struct amdtp_stream *s, 398 + __be32 *buffer, unsigned int frames) 399 + { 400 + unsigned int f, port; 401 + int len; 402 + u8 *b; 403 + 404 + for (f = 0; f < frames; f++) { 405 + port = (s->data_block_counter + f) % 8; 406 + b = (u8 *)&buffer[s->midi_position]; 407 + 408 + len = b[0] - 0x80; 409 + if ((1 <= len) && (len <= 3) && (s->midi[port])) 410 + snd_rawmidi_receive(s->midi[port], b + 1, len); 411 + 412 + buffer += s->data_block_quadlets; 413 + } 414 + } 415 + 416 + static void update_pcm_pointers(struct amdtp_stream *s, 417 + struct snd_pcm_substream *pcm, 418 + unsigned int frames) 419 + { unsigned int ptr; 420 + 421 + ptr = s->pcm_buffer_pointer + frames; 422 + if (ptr >= pcm->runtime->buffer_size) 423 + ptr -= pcm->runtime->buffer_size; 424 + ACCESS_ONCE(s->pcm_buffer_pointer) = ptr; 425 + 426 + s->pcm_period_pointer += frames; 427 + if (s->pcm_period_pointer >= pcm->runtime->period_size) { 428 + s->pcm_period_pointer -= pcm->runtime->period_size; 429 + s->pointer_flush = false; 430 + tasklet_hi_schedule(&s->period_tasklet); 431 + } 432 + } 433 + 434 + static void pcm_period_tasklet(unsigned long data) 435 + { 436 + struct amdtp_stream *s = (void *)data; 437 + struct snd_pcm_substream *pcm = ACCESS_ONCE(s->pcm); 438 + 439 + if (pcm) 440 + snd_pcm_period_elapsed(pcm); 441 + } 442 + 443 + static int queue_packet(struct amdtp_stream *s, 444 + unsigned int header_length, 445 + unsigned int payload_length, bool skip) 446 + { 447 + struct fw_iso_packet p = {0}; 448 + int err = 0; 449 + 450 + if (IS_ERR(s->context)) 451 + goto end; 452 + 453 + p.interrupt = IS_ALIGNED(s->packet_index + 1, INTERRUPT_INTERVAL); 454 + p.tag = TAG_CIP; 455 + p.header_length = header_length; 456 + p.payload_length = (!skip) ? payload_length : 0; 457 + p.skip = skip; 458 + err = fw_iso_context_queue(s->context, &p, &s->buffer.iso_buffer, 459 + s->buffer.packets[s->packet_index].offset); 460 + if (err < 0) { 461 + dev_err(&s->unit->device, "queueing error: %d\n", err); 462 + goto end; 463 + } 464 + 465 + if (++s->packet_index >= QUEUE_LENGTH) 466 + s->packet_index = 0; 467 + end: 468 + return err; 469 + } 470 + 471 + static inline int queue_out_packet(struct amdtp_stream *s, 472 + unsigned int payload_length, bool skip) 473 + { 474 + return queue_packet(s, OUT_PACKET_HEADER_SIZE, 475 + payload_length, skip); 476 + } 477 + 478 + static inline int queue_in_packet(struct amdtp_stream *s) 479 + { 480 + return queue_packet(s, IN_PACKET_HEADER_SIZE, 481 + amdtp_stream_get_max_payload(s), false); 482 + } 483 + 484 + static void handle_out_packet(struct amdtp_stream *s, unsigned int syt) 517 485 { 518 486 __be32 *buffer; 519 - unsigned int index, data_blocks, syt, ptr; 487 + unsigned int data_blocks, payload_length; 520 488 struct snd_pcm_substream *pcm; 521 - struct fw_iso_packet packet; 522 - int err; 523 489 524 490 if (s->packet_index < 0) 525 491 return; 526 - index = s->packet_index; 527 492 528 493 /* this module generate empty packet for 'no data' */ 529 - syt = calculate_syt(s, cycle); 530 - if (!(s->flags & CIP_BLOCKING)) 494 + if (!(s->flags & CIP_BLOCKING) || (syt != CIP_SYT_NO_INFO)) 531 495 data_blocks = calculate_data_blocks(s); 532 - else if (syt != 0xffff) 533 - data_blocks = s->syt_interval; 534 496 else 535 497 data_blocks = 0; 536 498 537 - buffer = s->buffer.packets[index].buffer; 499 + buffer = s->buffer.packets[s->packet_index].buffer; 538 500 buffer[0] = cpu_to_be32(ACCESS_ONCE(s->source_node_id_field) | 539 - (s->data_block_quadlets << 16) | 501 + (s->data_block_quadlets << AMDTP_DBS_SHIFT) | 540 502 s->data_block_counter); 541 503 buffer[1] = cpu_to_be32(CIP_EOH | CIP_FMT_AM | AMDTP_FDF_AM824 | 542 - (s->sfc << AMDTP_FDF_SFC_SHIFT) | syt); 504 + (s->sfc << CIP_FDF_SFC_SHIFT) | syt); 543 505 buffer += 2; 544 506 545 507 pcm = ACCESS_ONCE(s->pcm); ··· 605 461 606 462 s->data_block_counter = (s->data_block_counter + data_blocks) & 0xff; 607 463 608 - packet.payload_length = 8 + data_blocks * 4 * s->data_block_quadlets; 609 - packet.interrupt = IS_ALIGNED(index + 1, INTERRUPT_INTERVAL); 610 - packet.skip = 0; 611 - packet.tag = TAG_CIP; 612 - packet.sy = 0; 613 - packet.header_length = 0; 614 - 615 - err = fw_iso_context_queue(s->context, &packet, &s->buffer.iso_buffer, 616 - s->buffer.packets[index].offset); 617 - if (err < 0) { 618 - dev_err(&s->unit->device, "queueing error: %d\n", err); 464 + payload_length = 8 + data_blocks * 4 * s->data_block_quadlets; 465 + if (queue_out_packet(s, payload_length, false) < 0) { 619 466 s->packet_index = -1; 620 - amdtp_out_stream_pcm_abort(s); 467 + amdtp_stream_pcm_abort(s); 621 468 return; 622 469 } 623 470 624 - if (++index >= QUEUE_LENGTH) 625 - index = 0; 626 - s->packet_index = index; 627 - 628 - if (pcm) { 629 - if (s->dual_wire) 630 - data_blocks *= 2; 631 - 632 - ptr = s->pcm_buffer_pointer + data_blocks; 633 - if (ptr >= pcm->runtime->buffer_size) 634 - ptr -= pcm->runtime->buffer_size; 635 - ACCESS_ONCE(s->pcm_buffer_pointer) = ptr; 636 - 637 - s->pcm_period_pointer += data_blocks; 638 - if (s->pcm_period_pointer >= pcm->runtime->period_size) { 639 - s->pcm_period_pointer -= pcm->runtime->period_size; 640 - s->pointer_flush = false; 641 - tasklet_hi_schedule(&s->period_tasklet); 642 - } 643 - } 471 + if (pcm) 472 + update_pcm_pointers(s, pcm, data_blocks); 644 473 } 645 474 646 - static void pcm_period_tasklet(unsigned long data) 475 + static void handle_in_packet(struct amdtp_stream *s, 476 + unsigned int payload_quadlets, 477 + __be32 *buffer) 647 478 { 648 - struct amdtp_out_stream *s = (void *)data; 649 - struct snd_pcm_substream *pcm = ACCESS_ONCE(s->pcm); 479 + u32 cip_header[2]; 480 + unsigned int data_blocks, data_block_quadlets, data_block_counter, 481 + dbc_interval; 482 + struct snd_pcm_substream *pcm = NULL; 483 + bool lost; 484 + 485 + cip_header[0] = be32_to_cpu(buffer[0]); 486 + cip_header[1] = be32_to_cpu(buffer[1]); 487 + 488 + /* 489 + * This module supports 'Two-quadlet CIP header with SYT field'. 490 + * For convenience, also check FMT field is AM824 or not. 491 + */ 492 + if (((cip_header[0] & CIP_EOH_MASK) == CIP_EOH) || 493 + ((cip_header[1] & CIP_EOH_MASK) != CIP_EOH) || 494 + ((cip_header[1] & CIP_FMT_MASK) != CIP_FMT_AM)) { 495 + dev_info_ratelimited(&s->unit->device, 496 + "Invalid CIP header for AMDTP: %08X:%08X\n", 497 + cip_header[0], cip_header[1]); 498 + goto end; 499 + } 500 + 501 + /* Calculate data blocks */ 502 + if (payload_quadlets < 3 || 503 + ((cip_header[1] & CIP_FDF_MASK) == 504 + (AMDTP_FDF_NO_DATA << CIP_FDF_SFC_SHIFT))) { 505 + data_blocks = 0; 506 + } else { 507 + data_block_quadlets = 508 + (cip_header[0] & AMDTP_DBS_MASK) >> AMDTP_DBS_SHIFT; 509 + /* avoid division by zero */ 510 + if (data_block_quadlets == 0) { 511 + dev_info_ratelimited(&s->unit->device, 512 + "Detect invalid value in dbs field: %08X\n", 513 + cip_header[0]); 514 + goto err; 515 + } 516 + if (s->flags & CIP_WRONG_DBS) 517 + data_block_quadlets = s->data_block_quadlets; 518 + 519 + data_blocks = (payload_quadlets - 2) / data_block_quadlets; 520 + } 521 + 522 + /* Check data block counter continuity */ 523 + data_block_counter = cip_header[0] & AMDTP_DBC_MASK; 524 + if (data_blocks == 0 && (s->flags & CIP_EMPTY_HAS_WRONG_DBC) && 525 + s->data_block_counter != UINT_MAX) 526 + data_block_counter = s->data_block_counter; 527 + 528 + if (((s->flags & CIP_SKIP_DBC_ZERO_CHECK) && data_block_counter == 0) || 529 + (s->data_block_counter == UINT_MAX)) { 530 + lost = false; 531 + } else if (!(s->flags & CIP_DBC_IS_END_EVENT)) { 532 + lost = data_block_counter != s->data_block_counter; 533 + } else { 534 + if ((data_blocks > 0) && (s->tx_dbc_interval > 0)) 535 + dbc_interval = s->tx_dbc_interval; 536 + else 537 + dbc_interval = data_blocks; 538 + 539 + lost = data_block_counter != 540 + ((s->data_block_counter + dbc_interval) & 0xff); 541 + } 542 + 543 + if (lost) { 544 + dev_info(&s->unit->device, 545 + "Detect discontinuity of CIP: %02X %02X\n", 546 + s->data_block_counter, data_block_counter); 547 + goto err; 548 + } 549 + 550 + if (data_blocks > 0) { 551 + buffer += 2; 552 + 553 + pcm = ACCESS_ONCE(s->pcm); 554 + if (pcm) 555 + s->transfer_samples(s, pcm, buffer, data_blocks); 556 + 557 + if (s->midi_ports) 558 + amdtp_pull_midi(s, buffer, data_blocks); 559 + } 560 + 561 + if (s->flags & CIP_DBC_IS_END_EVENT) 562 + s->data_block_counter = data_block_counter; 563 + else 564 + s->data_block_counter = 565 + (data_block_counter + data_blocks) & 0xff; 566 + end: 567 + if (queue_in_packet(s) < 0) 568 + goto err; 650 569 651 570 if (pcm) 652 - snd_pcm_period_elapsed(pcm); 571 + update_pcm_pointers(s, pcm, data_blocks); 572 + 573 + return; 574 + err: 575 + s->packet_index = -1; 576 + amdtp_stream_pcm_abort(s); 653 577 } 654 578 655 - static void out_packet_callback(struct fw_iso_context *context, u32 cycle, 656 - size_t header_length, void *header, void *data) 579 + static void out_stream_callback(struct fw_iso_context *context, u32 cycle, 580 + size_t header_length, void *header, 581 + void *private_data) 657 582 { 658 - struct amdtp_out_stream *s = data; 659 - unsigned int i, packets = header_length / 4; 583 + struct amdtp_stream *s = private_data; 584 + unsigned int i, syt, packets = header_length / 4; 660 585 661 586 /* 662 587 * Compute the cycle of the last queued packet. ··· 734 521 */ 735 522 cycle += QUEUE_LENGTH - packets; 736 523 737 - for (i = 0; i < packets; ++i) 738 - queue_out_packet(s, ++cycle); 524 + for (i = 0; i < packets; ++i) { 525 + syt = calculate_syt(s, ++cycle); 526 + handle_out_packet(s, syt); 527 + } 739 528 fw_iso_context_queue_flush(s->context); 740 529 } 741 530 742 - static int queue_initial_skip_packets(struct amdtp_out_stream *s) 531 + static void in_stream_callback(struct fw_iso_context *context, u32 cycle, 532 + size_t header_length, void *header, 533 + void *private_data) 743 534 { 744 - struct fw_iso_packet skip_packet = { 745 - .skip = 1, 746 - }; 747 - unsigned int i; 748 - int err; 535 + struct amdtp_stream *s = private_data; 536 + unsigned int p, syt, packets, payload_quadlets; 537 + __be32 *buffer, *headers = header; 749 538 750 - for (i = 0; i < QUEUE_LENGTH; ++i) { 751 - skip_packet.interrupt = IS_ALIGNED(s->packet_index + 1, 752 - INTERRUPT_INTERVAL); 753 - err = fw_iso_context_queue(s->context, &skip_packet, NULL, 0); 754 - if (err < 0) 755 - return err; 756 - if (++s->packet_index >= QUEUE_LENGTH) 757 - s->packet_index = 0; 539 + /* The number of packets in buffer */ 540 + packets = header_length / IN_PACKET_HEADER_SIZE; 541 + 542 + for (p = 0; p < packets; p++) { 543 + if (s->packet_index < 0) 544 + break; 545 + 546 + buffer = s->buffer.packets[s->packet_index].buffer; 547 + 548 + /* Process sync slave stream */ 549 + if (s->sync_slave && s->sync_slave->callbacked) { 550 + syt = be32_to_cpu(buffer[1]) & CIP_SYT_MASK; 551 + handle_out_packet(s->sync_slave, syt); 552 + } 553 + 554 + /* The number of quadlets in this packet */ 555 + payload_quadlets = 556 + (be32_to_cpu(headers[p]) >> ISO_DATA_LENGTH_SHIFT) / 4; 557 + handle_in_packet(s, payload_quadlets, buffer); 758 558 } 759 559 760 - return 0; 560 + /* Queueing error or detecting discontinuity */ 561 + if (s->packet_index < 0) { 562 + /* Abort sync slave. */ 563 + if (s->sync_slave) { 564 + s->sync_slave->packet_index = -1; 565 + amdtp_stream_pcm_abort(s->sync_slave); 566 + } 567 + return; 568 + } 569 + 570 + /* when sync to device, flush the packets for slave stream */ 571 + if (s->sync_slave && s->sync_slave->callbacked) 572 + fw_iso_context_queue_flush(s->sync_slave->context); 573 + 574 + fw_iso_context_queue_flush(s->context); 575 + } 576 + 577 + /* processing is done by master callback */ 578 + static void slave_stream_callback(struct fw_iso_context *context, u32 cycle, 579 + size_t header_length, void *header, 580 + void *private_data) 581 + { 582 + return; 583 + } 584 + 585 + /* this is executed one time */ 586 + static void amdtp_stream_first_callback(struct fw_iso_context *context, 587 + u32 cycle, size_t header_length, 588 + void *header, void *private_data) 589 + { 590 + struct amdtp_stream *s = private_data; 591 + 592 + /* 593 + * For in-stream, first packet has come. 594 + * For out-stream, prepared to transmit first packet 595 + */ 596 + s->callbacked = true; 597 + wake_up(&s->callback_wait); 598 + 599 + if (s->direction == AMDTP_IN_STREAM) 600 + context->callback.sc = in_stream_callback; 601 + else if ((s->flags & CIP_BLOCKING) && (s->flags & CIP_SYNC_TO_DEVICE)) 602 + context->callback.sc = slave_stream_callback; 603 + else 604 + context->callback.sc = out_stream_callback; 605 + 606 + context->callback.sc(context, cycle, header_length, header, s); 761 607 } 762 608 763 609 /** 764 - * amdtp_out_stream_start - start sending packets 765 - * @s: the AMDTP output stream to start 610 + * amdtp_stream_start - start transferring packets 611 + * @s: the AMDTP stream to start 766 612 * @channel: the isochronous channel on the bus 767 613 * @speed: firewire speed code 768 614 * 769 615 * The stream cannot be started until it has been configured with 770 - * amdtp_out_stream_set_parameters() and amdtp_out_stream_set_pcm_format(), 771 - * and it must be started before any PCM or MIDI device can be started. 616 + * amdtp_stream_set_parameters() and it must be started before any PCM or MIDI 617 + * device can be started. 772 618 */ 773 - int amdtp_out_stream_start(struct amdtp_out_stream *s, int channel, int speed) 619 + int amdtp_stream_start(struct amdtp_stream *s, int channel, int speed) 774 620 { 775 621 static const struct { 776 622 unsigned int data_block; ··· 843 571 [CIP_SFC_88200] = { 0, 67 }, 844 572 [CIP_SFC_176400] = { 0, 67 }, 845 573 }; 846 - int err; 574 + unsigned int header_size; 575 + enum dma_data_direction dir; 576 + int type, tag, err; 847 577 848 578 mutex_lock(&s->mutex); 849 579 850 - if (WARN_ON(amdtp_out_stream_running(s) || 851 - (!s->pcm_channels && !s->midi_ports))) { 580 + if (WARN_ON(amdtp_stream_running(s) || 581 + (s->data_block_quadlets < 1))) { 852 582 err = -EBADFD; 853 583 goto err_unlock; 854 584 } 855 585 586 + if (s->direction == AMDTP_IN_STREAM && 587 + s->flags & CIP_SKIP_INIT_DBC_CHECK) 588 + s->data_block_counter = UINT_MAX; 589 + else 590 + s->data_block_counter = 0; 856 591 s->data_block_state = initial_state[s->sfc].data_block; 857 592 s->syt_offset_state = initial_state[s->sfc].syt_offset; 858 593 s->last_syt_offset = TICKS_PER_CYCLE; 859 594 595 + /* initialize packet buffer */ 596 + if (s->direction == AMDTP_IN_STREAM) { 597 + dir = DMA_FROM_DEVICE; 598 + type = FW_ISO_CONTEXT_RECEIVE; 599 + header_size = IN_PACKET_HEADER_SIZE; 600 + } else { 601 + dir = DMA_TO_DEVICE; 602 + type = FW_ISO_CONTEXT_TRANSMIT; 603 + header_size = OUT_PACKET_HEADER_SIZE; 604 + } 860 605 err = iso_packets_buffer_init(&s->buffer, s->unit, QUEUE_LENGTH, 861 - amdtp_out_stream_get_max_payload(s), 862 - DMA_TO_DEVICE); 606 + amdtp_stream_get_max_payload(s), dir); 863 607 if (err < 0) 864 608 goto err_unlock; 865 609 866 610 s->context = fw_iso_context_create(fw_parent_device(s->unit)->card, 867 - FW_ISO_CONTEXT_TRANSMIT, 868 - channel, speed, 0, 869 - out_packet_callback, s); 611 + type, channel, speed, header_size, 612 + amdtp_stream_first_callback, s); 870 613 if (IS_ERR(s->context)) { 871 614 err = PTR_ERR(s->context); 872 615 if (err == -EBUSY) 873 616 dev_err(&s->unit->device, 874 - "no free output stream on this controller\n"); 617 + "no free stream on this controller\n"); 875 618 goto err_buffer; 876 619 } 877 620 878 - amdtp_out_stream_update(s); 621 + amdtp_stream_update(s); 879 622 880 623 s->packet_index = 0; 881 - s->data_block_counter = 0; 882 - err = queue_initial_skip_packets(s); 883 - if (err < 0) 884 - goto err_context; 624 + do { 625 + if (s->direction == AMDTP_IN_STREAM) 626 + err = queue_in_packet(s); 627 + else 628 + err = queue_out_packet(s, 0, true); 629 + if (err < 0) 630 + goto err_context; 631 + } while (s->packet_index > 0); 885 632 886 - err = fw_iso_context_start(s->context, -1, 0, 0); 633 + /* NOTE: TAG1 matches CIP. This just affects in stream. */ 634 + tag = FW_ISO_CONTEXT_MATCH_TAG1; 635 + if (s->flags & CIP_EMPTY_WITH_TAG0) 636 + tag |= FW_ISO_CONTEXT_MATCH_TAG0; 637 + 638 + s->callbacked = false; 639 + err = fw_iso_context_start(s->context, -1, 0, tag); 887 640 if (err < 0) 888 641 goto err_context; 889 642 ··· 926 629 927 630 return err; 928 631 } 929 - EXPORT_SYMBOL(amdtp_out_stream_start); 632 + EXPORT_SYMBOL(amdtp_stream_start); 930 633 931 634 /** 932 - * amdtp_out_stream_pcm_pointer - get the PCM buffer position 933 - * @s: the AMDTP output stream that transports the PCM data 635 + * amdtp_stream_pcm_pointer - get the PCM buffer position 636 + * @s: the AMDTP stream that transports the PCM data 934 637 * 935 638 * Returns the current buffer position, in frames. 936 639 */ 937 - unsigned long amdtp_out_stream_pcm_pointer(struct amdtp_out_stream *s) 640 + unsigned long amdtp_stream_pcm_pointer(struct amdtp_stream *s) 938 641 { 939 642 /* this optimization is allowed to be racy */ 940 - if (s->pointer_flush) 643 + if (s->pointer_flush && amdtp_stream_running(s)) 941 644 fw_iso_context_flush_completions(s->context); 942 645 else 943 646 s->pointer_flush = true; 944 647 945 648 return ACCESS_ONCE(s->pcm_buffer_pointer); 946 649 } 947 - EXPORT_SYMBOL(amdtp_out_stream_pcm_pointer); 650 + EXPORT_SYMBOL(amdtp_stream_pcm_pointer); 948 651 949 652 /** 950 - * amdtp_out_stream_update - update the stream after a bus reset 951 - * @s: the AMDTP output stream 653 + * amdtp_stream_update - update the stream after a bus reset 654 + * @s: the AMDTP stream 952 655 */ 953 - void amdtp_out_stream_update(struct amdtp_out_stream *s) 656 + void amdtp_stream_update(struct amdtp_stream *s) 954 657 { 955 658 ACCESS_ONCE(s->source_node_id_field) = 956 659 (fw_parent_device(s->unit)->card->node_id & 0x3f) << 24; 957 660 } 958 - EXPORT_SYMBOL(amdtp_out_stream_update); 661 + EXPORT_SYMBOL(amdtp_stream_update); 959 662 960 663 /** 961 - * amdtp_out_stream_stop - stop sending packets 962 - * @s: the AMDTP output stream to stop 664 + * amdtp_stream_stop - stop sending packets 665 + * @s: the AMDTP stream to stop 963 666 * 964 667 * All PCM and MIDI devices of the stream must be stopped before the stream 965 668 * itself can be stopped. 966 669 */ 967 - void amdtp_out_stream_stop(struct amdtp_out_stream *s) 670 + void amdtp_stream_stop(struct amdtp_stream *s) 968 671 { 969 672 mutex_lock(&s->mutex); 970 673 971 - if (!amdtp_out_stream_running(s)) { 674 + if (!amdtp_stream_running(s)) { 972 675 mutex_unlock(&s->mutex); 973 676 return; 974 677 } ··· 979 682 s->context = ERR_PTR(-1); 980 683 iso_packets_buffer_destroy(&s->buffer, s->unit); 981 684 685 + s->callbacked = false; 686 + 982 687 mutex_unlock(&s->mutex); 983 688 } 984 - EXPORT_SYMBOL(amdtp_out_stream_stop); 689 + EXPORT_SYMBOL(amdtp_stream_stop); 985 690 986 691 /** 987 - * amdtp_out_stream_pcm_abort - abort the running PCM device 692 + * amdtp_stream_pcm_abort - abort the running PCM device 988 693 * @s: the AMDTP stream about to be stopped 989 694 * 990 695 * If the isochronous stream needs to be stopped asynchronously, call this 991 696 * function first to stop the PCM device. 992 697 */ 993 - void amdtp_out_stream_pcm_abort(struct amdtp_out_stream *s) 698 + void amdtp_stream_pcm_abort(struct amdtp_stream *s) 994 699 { 995 700 struct snd_pcm_substream *pcm; 996 701 ··· 1004 705 snd_pcm_stream_unlock_irq(pcm); 1005 706 } 1006 707 } 1007 - EXPORT_SYMBOL(amdtp_out_stream_pcm_abort); 708 + EXPORT_SYMBOL(amdtp_stream_pcm_abort);
+163 -37
sound/firewire/amdtp.h
··· 8 8 #include "packets-buffer.h" 9 9 10 10 /** 11 - * enum cip_out_flags - describes details of the streaming protocol 11 + * enum cip_flags - describes details of the streaming protocol 12 12 * @CIP_NONBLOCKING: In non-blocking mode, each packet contains 13 13 * sample_rate/8000 samples, with rounding up or down to adjust 14 14 * for clock skew and left-over fractional samples. This should ··· 16 16 * @CIP_BLOCKING: In blocking mode, each packet contains either zero or 17 17 * SYT_INTERVAL samples, with these two types alternating so that 18 18 * the overall sample rate comes out right. 19 - * @CIP_HI_DUALWIRE: At rates above 96 kHz, pretend that the stream runs 20 - * at half the actual sample rate with twice the number of channels; 21 - * two samples of a channel are stored consecutively in the packet. 22 - * Requires blocking mode and SYT_INTERVAL-aligned PCM buffer size. 19 + * @CIP_SYNC_TO_DEVICE: In sync to device mode, time stamp in out packets is 20 + * generated by in packets. Defaultly this driver generates timestamp. 21 + * @CIP_EMPTY_WITH_TAG0: Only for in-stream. Empty in-packets have TAG0. 22 + * @CIP_DBC_IS_END_EVENT: Only for in-stream. The value of dbc in an in-packet 23 + * corresponds to the end of event in the packet. Out of IEC 61883. 24 + * @CIP_WRONG_DBS: Only for in-stream. The value of dbs is wrong in in-packets. 25 + * The value of data_block_quadlets is used instead of reported value. 26 + * @SKIP_DBC_ZERO_CHECK: Only for in-stream. Packets with zero in dbc is 27 + * skipped for detecting discontinuity. 28 + * @CIP_SKIP_INIT_DBC_CHECK: Only for in-stream. The value of dbc in first 29 + * packet is not continuous from an initial value. 30 + * @CIP_EMPTY_HAS_WRONG_DBC: Only for in-stream. The value of dbc in empty 31 + * packet is wrong but the others are correct. 23 32 */ 24 - enum cip_out_flags { 25 - CIP_NONBLOCKING = 0x00, 26 - CIP_BLOCKING = 0x01, 27 - CIP_HI_DUALWIRE = 0x02, 33 + enum cip_flags { 34 + CIP_NONBLOCKING = 0x00, 35 + CIP_BLOCKING = 0x01, 36 + CIP_SYNC_TO_DEVICE = 0x02, 37 + CIP_EMPTY_WITH_TAG0 = 0x04, 38 + CIP_DBC_IS_END_EVENT = 0x08, 39 + CIP_WRONG_DBS = 0x10, 40 + CIP_SKIP_DBC_ZERO_CHECK = 0x20, 41 + CIP_SKIP_INIT_DBC_CHECK = 0x40, 42 + CIP_EMPTY_HAS_WRONG_DBC = 0x80, 28 43 }; 29 44 30 45 /** ··· 56 41 CIP_SFC_COUNT 57 42 }; 58 43 44 + #define AMDTP_IN_PCM_FORMAT_BITS SNDRV_PCM_FMTBIT_S32 45 + 59 46 #define AMDTP_OUT_PCM_FORMAT_BITS (SNDRV_PCM_FMTBIT_S16 | \ 60 47 SNDRV_PCM_FMTBIT_S32) 48 + 49 + 50 + /* 51 + * This module supports maximum 64 PCM channels for one PCM stream 52 + * This is for our convenience. 53 + */ 54 + #define AMDTP_MAX_CHANNELS_FOR_PCM 64 55 + 56 + /* 57 + * AMDTP packet can include channels for MIDI conformant data. 58 + * Each MIDI conformant data channel includes 8 MPX-MIDI data stream. 59 + * Each MPX-MIDI data stream includes one data stream from/to MIDI ports. 60 + * 61 + * This module supports maximum 1 MIDI conformant data channels. 62 + * Then this AMDTP packets can transfer maximum 8 MIDI data streams. 63 + */ 64 + #define AMDTP_MAX_CHANNELS_FOR_MIDI 1 61 65 62 66 struct fw_unit; 63 67 struct fw_iso_context; 64 68 struct snd_pcm_substream; 69 + struct snd_pcm_runtime; 70 + struct snd_rawmidi_substream; 65 71 66 - struct amdtp_out_stream { 72 + enum amdtp_stream_direction { 73 + AMDTP_OUT_STREAM = 0, 74 + AMDTP_IN_STREAM 75 + }; 76 + 77 + struct amdtp_stream { 67 78 struct fw_unit *unit; 68 - enum cip_out_flags flags; 79 + enum cip_flags flags; 80 + enum amdtp_stream_direction direction; 69 81 struct fw_iso_context *context; 70 82 struct mutex mutex; 71 83 72 84 enum cip_sfc sfc; 73 - bool dual_wire; 74 85 unsigned int data_block_quadlets; 75 86 unsigned int pcm_channels; 76 87 unsigned int midi_ports; 77 - void (*transfer_samples)(struct amdtp_out_stream *s, 88 + void (*transfer_samples)(struct amdtp_stream *s, 78 89 struct snd_pcm_substream *pcm, 79 90 __be32 *buffer, unsigned int frames); 91 + u8 pcm_positions[AMDTP_MAX_CHANNELS_FOR_PCM]; 92 + u8 midi_position; 80 93 81 94 unsigned int syt_interval; 82 95 unsigned int transfer_delay; ··· 125 82 unsigned int pcm_buffer_pointer; 126 83 unsigned int pcm_period_pointer; 127 84 bool pointer_flush; 85 + 86 + struct snd_rawmidi_substream *midi[AMDTP_MAX_CHANNELS_FOR_MIDI * 8]; 87 + 88 + /* quirk: fixed interval of dbc between previos/current packets. */ 89 + unsigned int tx_dbc_interval; 90 + 91 + /* quirk: the first count of data blocks in an rx packet for MIDI */ 92 + unsigned int rx_blocks_for_midi; 93 + 94 + bool callbacked; 95 + wait_queue_head_t callback_wait; 96 + struct amdtp_stream *sync_slave; 128 97 }; 129 98 130 - int amdtp_out_stream_init(struct amdtp_out_stream *s, struct fw_unit *unit, 131 - enum cip_out_flags flags); 132 - void amdtp_out_stream_destroy(struct amdtp_out_stream *s); 99 + int amdtp_stream_init(struct amdtp_stream *s, struct fw_unit *unit, 100 + enum amdtp_stream_direction dir, 101 + enum cip_flags flags); 102 + void amdtp_stream_destroy(struct amdtp_stream *s); 133 103 134 - void amdtp_out_stream_set_parameters(struct amdtp_out_stream *s, 135 - unsigned int rate, 136 - unsigned int pcm_channels, 137 - unsigned int midi_ports); 138 - unsigned int amdtp_out_stream_get_max_payload(struct amdtp_out_stream *s); 104 + void amdtp_stream_set_parameters(struct amdtp_stream *s, 105 + unsigned int rate, 106 + unsigned int pcm_channels, 107 + unsigned int midi_ports); 108 + unsigned int amdtp_stream_get_max_payload(struct amdtp_stream *s); 139 109 140 - int amdtp_out_stream_start(struct amdtp_out_stream *s, int channel, int speed); 141 - void amdtp_out_stream_update(struct amdtp_out_stream *s); 142 - void amdtp_out_stream_stop(struct amdtp_out_stream *s); 110 + int amdtp_stream_start(struct amdtp_stream *s, int channel, int speed); 111 + void amdtp_stream_update(struct amdtp_stream *s); 112 + void amdtp_stream_stop(struct amdtp_stream *s); 143 113 144 - void amdtp_out_stream_set_pcm_format(struct amdtp_out_stream *s, 145 - snd_pcm_format_t format); 146 - void amdtp_out_stream_pcm_prepare(struct amdtp_out_stream *s); 147 - unsigned long amdtp_out_stream_pcm_pointer(struct amdtp_out_stream *s); 148 - void amdtp_out_stream_pcm_abort(struct amdtp_out_stream *s); 114 + int amdtp_stream_add_pcm_hw_constraints(struct amdtp_stream *s, 115 + struct snd_pcm_runtime *runtime); 116 + void amdtp_stream_set_pcm_format(struct amdtp_stream *s, 117 + snd_pcm_format_t format); 118 + void amdtp_stream_pcm_prepare(struct amdtp_stream *s); 119 + unsigned long amdtp_stream_pcm_pointer(struct amdtp_stream *s); 120 + void amdtp_stream_pcm_abort(struct amdtp_stream *s); 149 121 150 122 extern const unsigned int amdtp_syt_intervals[CIP_SFC_COUNT]; 123 + extern const unsigned int amdtp_rate_table[CIP_SFC_COUNT]; 151 124 152 - static inline bool amdtp_out_stream_running(struct amdtp_out_stream *s) 125 + /** 126 + * amdtp_stream_running - check stream is running or not 127 + * @s: the AMDTP stream 128 + * 129 + * If this function returns true, the stream is running. 130 + */ 131 + static inline bool amdtp_stream_running(struct amdtp_stream *s) 153 132 { 154 133 return !IS_ERR(s->context); 155 134 } 156 135 157 136 /** 158 - * amdtp_out_streaming_error - check for streaming error 159 - * @s: the AMDTP output stream 137 + * amdtp_streaming_error - check for streaming error 138 + * @s: the AMDTP stream 160 139 * 161 140 * If this function returns true, the stream's packet queue has stopped due to 162 141 * an asynchronous error. 163 142 */ 164 - static inline bool amdtp_out_streaming_error(struct amdtp_out_stream *s) 143 + static inline bool amdtp_streaming_error(struct amdtp_stream *s) 165 144 { 166 145 return s->packet_index < 0; 167 146 } 168 147 169 148 /** 170 - * amdtp_out_stream_pcm_trigger - start/stop playback from a PCM device 171 - * @s: the AMDTP output stream 149 + * amdtp_stream_pcm_running - check PCM substream is running or not 150 + * @s: the AMDTP stream 151 + * 152 + * If this function returns true, PCM substream in the AMDTP stream is running. 153 + */ 154 + static inline bool amdtp_stream_pcm_running(struct amdtp_stream *s) 155 + { 156 + return !!s->pcm; 157 + } 158 + 159 + /** 160 + * amdtp_stream_pcm_trigger - start/stop playback from a PCM device 161 + * @s: the AMDTP stream 172 162 * @pcm: the PCM device to be started, or %NULL to stop the current device 173 163 * 174 164 * Call this function on a running isochronous stream to enable the actual 175 165 * transmission of PCM data. This function should be called from the PCM 176 166 * device's .trigger callback. 177 167 */ 178 - static inline void amdtp_out_stream_pcm_trigger(struct amdtp_out_stream *s, 179 - struct snd_pcm_substream *pcm) 168 + static inline void amdtp_stream_pcm_trigger(struct amdtp_stream *s, 169 + struct snd_pcm_substream *pcm) 180 170 { 181 171 ACCESS_ONCE(s->pcm) = pcm; 172 + } 173 + 174 + /** 175 + * amdtp_stream_midi_trigger - start/stop playback/capture with a MIDI device 176 + * @s: the AMDTP stream 177 + * @port: index of MIDI port 178 + * @midi: the MIDI device to be started, or %NULL to stop the current device 179 + * 180 + * Call this function on a running isochronous stream to enable the actual 181 + * transmission of MIDI data. This function should be called from the MIDI 182 + * device's .trigger callback. 183 + */ 184 + static inline void amdtp_stream_midi_trigger(struct amdtp_stream *s, 185 + unsigned int port, 186 + struct snd_rawmidi_substream *midi) 187 + { 188 + if (port < s->midi_ports) 189 + ACCESS_ONCE(s->midi[port]) = midi; 182 190 } 183 191 184 192 static inline bool cip_sfc_is_base_44100(enum cip_sfc sfc) 185 193 { 186 194 return sfc & 1; 195 + } 196 + 197 + static inline void amdtp_stream_set_sync(enum cip_flags sync_mode, 198 + struct amdtp_stream *master, 199 + struct amdtp_stream *slave) 200 + { 201 + if (sync_mode == CIP_SYNC_TO_DEVICE) { 202 + master->flags |= CIP_SYNC_TO_DEVICE; 203 + slave->flags |= CIP_SYNC_TO_DEVICE; 204 + master->sync_slave = slave; 205 + } else { 206 + master->flags &= ~CIP_SYNC_TO_DEVICE; 207 + slave->flags &= ~CIP_SYNC_TO_DEVICE; 208 + master->sync_slave = NULL; 209 + } 210 + 211 + slave->sync_slave = NULL; 212 + } 213 + 214 + /** 215 + * amdtp_stream_wait_callback - sleep till callbacked or timeout 216 + * @s: the AMDTP stream 217 + * @timeout: msec till timeout 218 + * 219 + * If this function return false, the AMDTP stream should be stopped. 220 + */ 221 + static inline bool amdtp_stream_wait_callback(struct amdtp_stream *s, 222 + unsigned int timeout) 223 + { 224 + return wait_event_timeout(s->callback_wait, 225 + s->callbacked == true, 226 + msecs_to_jiffies(timeout)) > 0; 187 227 } 188 228 189 229 #endif
+4
sound/firewire/bebob/Makefile
··· 1 + snd-bebob-objs := bebob_command.o bebob_stream.o bebob_proc.o bebob_midi.o \ 2 + bebob_pcm.o bebob_hwdep.o bebob_terratec.o bebob_yamaha.o \ 3 + bebob_focusrite.o bebob_maudio.o bebob.o 4 + obj-m += snd-bebob.o
+471
sound/firewire/bebob/bebob.c
··· 1 + /* 2 + * bebob.c - a part of driver for BeBoB based devices 3 + * 4 + * Copyright (c) 2013-2014 Takashi Sakamoto 5 + * 6 + * Licensed under the terms of the GNU General Public License, version 2. 7 + */ 8 + 9 + /* 10 + * BeBoB is 'BridgeCo enhanced Breakout Box'. This is installed to firewire 11 + * devices with DM1000/DM1100/DM1500 chipset. It gives common way for host 12 + * system to handle BeBoB based devices. 13 + */ 14 + 15 + #include "bebob.h" 16 + 17 + MODULE_DESCRIPTION("BridgeCo BeBoB driver"); 18 + MODULE_AUTHOR("Takashi Sakamoto <o-takashi@sakamocchi.jp>"); 19 + MODULE_LICENSE("GPL v2"); 20 + 21 + static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; 22 + static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; 23 + static bool enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE_PNP; 24 + 25 + module_param_array(index, int, NULL, 0444); 26 + MODULE_PARM_DESC(index, "card index"); 27 + module_param_array(id, charp, NULL, 0444); 28 + MODULE_PARM_DESC(id, "ID string"); 29 + module_param_array(enable, bool, NULL, 0444); 30 + MODULE_PARM_DESC(enable, "enable BeBoB sound card"); 31 + 32 + static DEFINE_MUTEX(devices_mutex); 33 + static DECLARE_BITMAP(devices_used, SNDRV_CARDS); 34 + 35 + /* Offsets from information register. */ 36 + #define INFO_OFFSET_GUID 0x10 37 + #define INFO_OFFSET_HW_MODEL_ID 0x18 38 + #define INFO_OFFSET_HW_MODEL_REVISION 0x1c 39 + 40 + #define VEN_EDIROL 0x000040ab 41 + #define VEN_PRESONUS 0x00000a92 42 + #define VEN_BRIDGECO 0x000007f5 43 + #define VEN_MACKIE 0x0000000f 44 + #define VEN_STANTON 0x00001260 45 + #define VEN_TASCAM 0x0000022e 46 + #define VEN_BEHRINGER 0x00001564 47 + #define VEN_APOGEE 0x000003db 48 + #define VEN_ESI 0x00000f1b 49 + #define VEN_ACOUSTIC 0x00000002 50 + #define VEN_CME 0x0000000a 51 + #define VEN_PHONIC 0x00001496 52 + #define VEN_LYNX 0x000019e5 53 + #define VEN_ICON 0x00001a9e 54 + #define VEN_PRISMSOUND 0x00001198 55 + #define VEN_TERRATEC 0x00000aac 56 + #define VEN_YAMAHA 0x0000a0de 57 + #define VEN_FOCUSRITE 0x0000130e 58 + #define VEN_MAUDIO1 0x00000d6c 59 + #define VEN_MAUDIO2 0x000007f5 60 + 61 + #define MODEL_FOCUSRITE_SAFFIRE_BOTH 0x00000000 62 + #define MODEL_MAUDIO_AUDIOPHILE_BOTH 0x00010060 63 + #define MODEL_MAUDIO_FW1814 0x00010071 64 + #define MODEL_MAUDIO_PROJECTMIX 0x00010091 65 + 66 + static int 67 + name_device(struct snd_bebob *bebob, unsigned int vendor_id) 68 + { 69 + struct fw_device *fw_dev = fw_parent_device(bebob->unit); 70 + char vendor[24] = {0}; 71 + char model[32] = {0}; 72 + u32 hw_id; 73 + u32 data[2] = {0}; 74 + u32 revision; 75 + int err; 76 + 77 + /* get vendor name from root directory */ 78 + err = fw_csr_string(fw_dev->config_rom + 5, CSR_VENDOR, 79 + vendor, sizeof(vendor)); 80 + if (err < 0) 81 + goto end; 82 + 83 + /* get model name from unit directory */ 84 + err = fw_csr_string(bebob->unit->directory, CSR_MODEL, 85 + model, sizeof(model)); 86 + if (err < 0) 87 + goto end; 88 + 89 + /* get hardware id */ 90 + err = snd_bebob_read_quad(bebob->unit, INFO_OFFSET_HW_MODEL_ID, 91 + &hw_id); 92 + if (err < 0) 93 + goto end; 94 + 95 + /* get hardware revision */ 96 + err = snd_bebob_read_quad(bebob->unit, INFO_OFFSET_HW_MODEL_REVISION, 97 + &revision); 98 + if (err < 0) 99 + goto end; 100 + 101 + /* get GUID */ 102 + err = snd_bebob_read_block(bebob->unit, INFO_OFFSET_GUID, 103 + data, sizeof(data)); 104 + if (err < 0) 105 + goto end; 106 + 107 + strcpy(bebob->card->driver, "BeBoB"); 108 + strcpy(bebob->card->shortname, model); 109 + strcpy(bebob->card->mixername, model); 110 + snprintf(bebob->card->longname, sizeof(bebob->card->longname), 111 + "%s %s (id:%d, rev:%d), GUID %08x%08x at %s, S%d", 112 + vendor, model, hw_id, revision, 113 + data[0], data[1], dev_name(&bebob->unit->device), 114 + 100 << fw_dev->max_speed); 115 + end: 116 + return err; 117 + } 118 + 119 + static void 120 + bebob_card_free(struct snd_card *card) 121 + { 122 + struct snd_bebob *bebob = card->private_data; 123 + 124 + if (bebob->card_index >= 0) { 125 + mutex_lock(&devices_mutex); 126 + clear_bit(bebob->card_index, devices_used); 127 + mutex_unlock(&devices_mutex); 128 + } 129 + 130 + mutex_destroy(&bebob->mutex); 131 + } 132 + 133 + static const struct snd_bebob_spec * 134 + get_saffire_spec(struct fw_unit *unit) 135 + { 136 + char name[24] = {0}; 137 + 138 + if (fw_csr_string(unit->directory, CSR_MODEL, name, sizeof(name)) < 0) 139 + return NULL; 140 + 141 + if (strcmp(name, "SaffireLE") == 0) 142 + return &saffire_le_spec; 143 + else 144 + return &saffire_spec; 145 + } 146 + 147 + static bool 148 + check_audiophile_booted(struct fw_unit *unit) 149 + { 150 + char name[24] = {0}; 151 + 152 + if (fw_csr_string(unit->directory, CSR_MODEL, name, sizeof(name)) < 0) 153 + return false; 154 + 155 + return strncmp(name, "FW Audiophile Bootloader", 15) != 0; 156 + } 157 + 158 + static int 159 + bebob_probe(struct fw_unit *unit, 160 + const struct ieee1394_device_id *entry) 161 + { 162 + struct snd_card *card; 163 + struct snd_bebob *bebob; 164 + const struct snd_bebob_spec *spec; 165 + unsigned int card_index; 166 + int err; 167 + 168 + mutex_lock(&devices_mutex); 169 + 170 + for (card_index = 0; card_index < SNDRV_CARDS; card_index++) { 171 + if (!test_bit(card_index, devices_used) && enable[card_index]) 172 + break; 173 + } 174 + if (card_index >= SNDRV_CARDS) { 175 + err = -ENOENT; 176 + goto end; 177 + } 178 + 179 + if ((entry->vendor_id == VEN_FOCUSRITE) && 180 + (entry->model_id == MODEL_FOCUSRITE_SAFFIRE_BOTH)) 181 + spec = get_saffire_spec(unit); 182 + else if ((entry->vendor_id == VEN_MAUDIO1) && 183 + (entry->model_id == MODEL_MAUDIO_AUDIOPHILE_BOTH) && 184 + !check_audiophile_booted(unit)) 185 + spec = NULL; 186 + else 187 + spec = (const struct snd_bebob_spec *)entry->driver_data; 188 + 189 + if (spec == NULL) { 190 + if ((entry->vendor_id == VEN_MAUDIO1) || 191 + (entry->vendor_id == VEN_MAUDIO2)) 192 + err = snd_bebob_maudio_load_firmware(unit); 193 + else 194 + err = -ENOSYS; 195 + goto end; 196 + } 197 + 198 + err = snd_card_new(&unit->device, index[card_index], id[card_index], 199 + THIS_MODULE, sizeof(struct snd_bebob), &card); 200 + if (err < 0) 201 + goto end; 202 + bebob = card->private_data; 203 + bebob->card_index = card_index; 204 + set_bit(card_index, devices_used); 205 + card->private_free = bebob_card_free; 206 + 207 + bebob->card = card; 208 + bebob->unit = unit; 209 + bebob->spec = spec; 210 + mutex_init(&bebob->mutex); 211 + spin_lock_init(&bebob->lock); 212 + init_waitqueue_head(&bebob->hwdep_wait); 213 + 214 + err = name_device(bebob, entry->vendor_id); 215 + if (err < 0) 216 + goto error; 217 + 218 + if ((entry->vendor_id == VEN_MAUDIO1) && 219 + (entry->model_id == MODEL_MAUDIO_FW1814)) 220 + err = snd_bebob_maudio_special_discover(bebob, true); 221 + else if ((entry->vendor_id == VEN_MAUDIO1) && 222 + (entry->model_id == MODEL_MAUDIO_PROJECTMIX)) 223 + err = snd_bebob_maudio_special_discover(bebob, false); 224 + else 225 + err = snd_bebob_stream_discover(bebob); 226 + if (err < 0) 227 + goto error; 228 + 229 + snd_bebob_proc_init(bebob); 230 + 231 + if ((bebob->midi_input_ports > 0) || 232 + (bebob->midi_output_ports > 0)) { 233 + err = snd_bebob_create_midi_devices(bebob); 234 + if (err < 0) 235 + goto error; 236 + } 237 + 238 + err = snd_bebob_create_pcm_devices(bebob); 239 + if (err < 0) 240 + goto error; 241 + 242 + err = snd_bebob_create_hwdep_device(bebob); 243 + if (err < 0) 244 + goto error; 245 + 246 + err = snd_bebob_stream_init_duplex(bebob); 247 + if (err < 0) 248 + goto error; 249 + 250 + if (!bebob->maudio_special_quirk) { 251 + err = snd_card_register(card); 252 + if (err < 0) { 253 + snd_bebob_stream_destroy_duplex(bebob); 254 + goto error; 255 + } 256 + } else { 257 + /* 258 + * This is a workaround. This bus reset seems to have an effect 259 + * to make devices correctly handling transactions. Without 260 + * this, the devices have gap_count mismatch. This causes much 261 + * failure of transaction. 262 + * 263 + * Just after registration, user-land application receive 264 + * signals from dbus and starts I/Os. To avoid I/Os till the 265 + * future bus reset, registration is done in next update(). 266 + */ 267 + bebob->deferred_registration = true; 268 + fw_schedule_bus_reset(fw_parent_device(bebob->unit)->card, 269 + false, true); 270 + } 271 + 272 + dev_set_drvdata(&unit->device, bebob); 273 + end: 274 + mutex_unlock(&devices_mutex); 275 + return err; 276 + error: 277 + mutex_unlock(&devices_mutex); 278 + snd_card_free(card); 279 + return err; 280 + } 281 + 282 + static void 283 + bebob_update(struct fw_unit *unit) 284 + { 285 + struct snd_bebob *bebob = dev_get_drvdata(&unit->device); 286 + 287 + if (bebob == NULL) 288 + return; 289 + 290 + fcp_bus_reset(bebob->unit); 291 + snd_bebob_stream_update_duplex(bebob); 292 + 293 + if (bebob->deferred_registration) { 294 + if (snd_card_register(bebob->card) < 0) { 295 + snd_bebob_stream_destroy_duplex(bebob); 296 + snd_card_free(bebob->card); 297 + } 298 + bebob->deferred_registration = false; 299 + } 300 + } 301 + 302 + static void bebob_remove(struct fw_unit *unit) 303 + { 304 + struct snd_bebob *bebob = dev_get_drvdata(&unit->device); 305 + 306 + if (bebob == NULL) 307 + return; 308 + 309 + kfree(bebob->maudio_special_quirk); 310 + 311 + snd_bebob_stream_destroy_duplex(bebob); 312 + snd_card_disconnect(bebob->card); 313 + snd_card_free_when_closed(bebob->card); 314 + } 315 + 316 + static struct snd_bebob_rate_spec normal_rate_spec = { 317 + .get = &snd_bebob_stream_get_rate, 318 + .set = &snd_bebob_stream_set_rate 319 + }; 320 + static const struct snd_bebob_spec spec_normal = { 321 + .clock = NULL, 322 + .rate = &normal_rate_spec, 323 + .meter = NULL 324 + }; 325 + 326 + static const struct ieee1394_device_id bebob_id_table[] = { 327 + /* Edirol, FA-66 */ 328 + SND_BEBOB_DEV_ENTRY(VEN_EDIROL, 0x00010049, &spec_normal), 329 + /* Edirol, FA-101 */ 330 + SND_BEBOB_DEV_ENTRY(VEN_EDIROL, 0x00010048, &spec_normal), 331 + /* Presonus, FIREBOX */ 332 + SND_BEBOB_DEV_ENTRY(VEN_PRESONUS, 0x00010000, &spec_normal), 333 + /* PreSonus, FIREPOD/FP10 */ 334 + SND_BEBOB_DEV_ENTRY(VEN_PRESONUS, 0x00010066, &spec_normal), 335 + /* PreSonus, Inspire1394 */ 336 + SND_BEBOB_DEV_ENTRY(VEN_PRESONUS, 0x00010001, &spec_normal), 337 + /* BridgeCo, RDAudio1 */ 338 + SND_BEBOB_DEV_ENTRY(VEN_BRIDGECO, 0x00010048, &spec_normal), 339 + /* BridgeCo, Audio5 */ 340 + SND_BEBOB_DEV_ENTRY(VEN_BRIDGECO, 0x00010049, &spec_normal), 341 + /* Mackie, Onyx 1220/1620/1640 (Firewire I/O Card) */ 342 + SND_BEBOB_DEV_ENTRY(VEN_MACKIE, 0x00010065, &spec_normal), 343 + /* Mackie, d.2 (Firewire Option) */ 344 + SND_BEBOB_DEV_ENTRY(VEN_MACKIE, 0x00010067, &spec_normal), 345 + /* Stanton, ScratchAmp */ 346 + SND_BEBOB_DEV_ENTRY(VEN_STANTON, 0x00000001, &spec_normal), 347 + /* Tascam, IF-FW DM */ 348 + SND_BEBOB_DEV_ENTRY(VEN_TASCAM, 0x00010067, &spec_normal), 349 + /* Behringer, XENIX UFX 1204 */ 350 + SND_BEBOB_DEV_ENTRY(VEN_BEHRINGER, 0x00001204, &spec_normal), 351 + /* Behringer, XENIX UFX 1604 */ 352 + SND_BEBOB_DEV_ENTRY(VEN_BEHRINGER, 0x00001604, &spec_normal), 353 + /* Behringer, Digital Mixer X32 series (X-UF Card) */ 354 + SND_BEBOB_DEV_ENTRY(VEN_BEHRINGER, 0x00000006, &spec_normal), 355 + /* Apogee Electronics, Rosetta 200/400 (X-FireWire card) */ 356 + /* Apogee Electronics, DA/AD/DD-16X (X-FireWire card) */ 357 + SND_BEBOB_DEV_ENTRY(VEN_APOGEE, 0x00010048, &spec_normal), 358 + /* Apogee Electronics, Ensemble */ 359 + SND_BEBOB_DEV_ENTRY(VEN_APOGEE, 0x00001eee, &spec_normal), 360 + /* ESI, Quatafire610 */ 361 + SND_BEBOB_DEV_ENTRY(VEN_ESI, 0x00010064, &spec_normal), 362 + /* AcousticReality, eARMasterOne */ 363 + SND_BEBOB_DEV_ENTRY(VEN_ACOUSTIC, 0x00000002, &spec_normal), 364 + /* CME, MatrixKFW */ 365 + SND_BEBOB_DEV_ENTRY(VEN_CME, 0x00030000, &spec_normal), 366 + /* Phonic, Helix Board 12 MkII */ 367 + SND_BEBOB_DEV_ENTRY(VEN_PHONIC, 0x00050000, &spec_normal), 368 + /* Phonic, Helix Board 18 MkII */ 369 + SND_BEBOB_DEV_ENTRY(VEN_PHONIC, 0x00060000, &spec_normal), 370 + /* Phonic, Helix Board 24 MkII */ 371 + SND_BEBOB_DEV_ENTRY(VEN_PHONIC, 0x00070000, &spec_normal), 372 + /* Phonic, Helix Board 12 Universal/18 Universal/24 Universal */ 373 + SND_BEBOB_DEV_ENTRY(VEN_PHONIC, 0x00000000, &spec_normal), 374 + /* Lynx, Aurora 8/16 (LT-FW) */ 375 + SND_BEBOB_DEV_ENTRY(VEN_LYNX, 0x00000001, &spec_normal), 376 + /* ICON, FireXon */ 377 + SND_BEBOB_DEV_ENTRY(VEN_ICON, 0x00000001, &spec_normal), 378 + /* PrismSound, Orpheus */ 379 + SND_BEBOB_DEV_ENTRY(VEN_PRISMSOUND, 0x00010048, &spec_normal), 380 + /* PrismSound, ADA-8XR */ 381 + SND_BEBOB_DEV_ENTRY(VEN_PRISMSOUND, 0x0000ada8, &spec_normal), 382 + /* TerraTec Electronic GmbH, PHASE 88 Rack FW */ 383 + SND_BEBOB_DEV_ENTRY(VEN_TERRATEC, 0x00000003, &phase88_rack_spec), 384 + /* TerraTec Electronic GmbH, PHASE 24 FW */ 385 + SND_BEBOB_DEV_ENTRY(VEN_TERRATEC, 0x00000004, &phase24_series_spec), 386 + /* TerraTec Electronic GmbH, Phase X24 FW */ 387 + SND_BEBOB_DEV_ENTRY(VEN_TERRATEC, 0x00000007, &phase24_series_spec), 388 + /* TerraTec Electronic GmbH, EWS MIC2/MIC8 */ 389 + SND_BEBOB_DEV_ENTRY(VEN_TERRATEC, 0x00000005, &spec_normal), 390 + /* Terratec Electronic GmbH, Aureon 7.1 Firewire */ 391 + SND_BEBOB_DEV_ENTRY(VEN_TERRATEC, 0x00000002, &spec_normal), 392 + /* Yamaha, GO44 */ 393 + SND_BEBOB_DEV_ENTRY(VEN_YAMAHA, 0x0010000b, &yamaha_go_spec), 394 + /* YAMAHA, GO46 */ 395 + SND_BEBOB_DEV_ENTRY(VEN_YAMAHA, 0x0010000c, &yamaha_go_spec), 396 + /* Focusrite, SaffirePro 26 I/O */ 397 + SND_BEBOB_DEV_ENTRY(VEN_FOCUSRITE, 0x00000003, &saffirepro_26_spec), 398 + /* Focusrite, SaffirePro 10 I/O */ 399 + SND_BEBOB_DEV_ENTRY(VEN_FOCUSRITE, 0x00000006, &saffirepro_10_spec), 400 + /* Focusrite, Saffire(no label and LE) */ 401 + SND_BEBOB_DEV_ENTRY(VEN_FOCUSRITE, MODEL_FOCUSRITE_SAFFIRE_BOTH, 402 + &saffire_spec), 403 + /* M-Audio, Firewire 410 */ 404 + SND_BEBOB_DEV_ENTRY(VEN_MAUDIO2, 0x00010058, NULL), /* bootloader */ 405 + SND_BEBOB_DEV_ENTRY(VEN_MAUDIO2, 0x00010046, &maudio_fw410_spec), 406 + /* M-Audio, Firewire Audiophile */ 407 + SND_BEBOB_DEV_ENTRY(VEN_MAUDIO1, MODEL_MAUDIO_AUDIOPHILE_BOTH, 408 + &maudio_audiophile_spec), 409 + /* M-Audio, Firewire Solo */ 410 + SND_BEBOB_DEV_ENTRY(VEN_MAUDIO1, 0x00010062, &maudio_solo_spec), 411 + /* M-Audio, Ozonic */ 412 + SND_BEBOB_DEV_ENTRY(VEN_MAUDIO1, 0x0000000a, &maudio_ozonic_spec), 413 + /* M-Audio NRV10 */ 414 + SND_BEBOB_DEV_ENTRY(VEN_MAUDIO1, 0x00010081, &maudio_nrv10_spec), 415 + /* M-Audio, ProFireLightbridge */ 416 + SND_BEBOB_DEV_ENTRY(VEN_MAUDIO1, 0x000100a1, &spec_normal), 417 + /* Firewire 1814 */ 418 + SND_BEBOB_DEV_ENTRY(VEN_MAUDIO1, 0x00010070, NULL), /* bootloader */ 419 + SND_BEBOB_DEV_ENTRY(VEN_MAUDIO1, MODEL_MAUDIO_FW1814, 420 + &maudio_special_spec), 421 + /* M-Audio ProjectMix */ 422 + SND_BEBOB_DEV_ENTRY(VEN_MAUDIO1, MODEL_MAUDIO_PROJECTMIX, 423 + &maudio_special_spec), 424 + /* IDs are unknown but able to be supported */ 425 + /* Apogee, Mini-ME Firewire */ 426 + /* Apogee, Mini-DAC Firewire */ 427 + /* Behringer, F-Control Audio 1616 */ 428 + /* Behringer, F-Control Audio 610 */ 429 + /* Cakawalk, Sonar Power Studio 66 */ 430 + /* CME, UF400e */ 431 + /* ESI, Quotafire XL */ 432 + /* Infrasonic, DewX */ 433 + /* Infrasonic, Windy6 */ 434 + /* Mackie, Digital X Bus x.200 */ 435 + /* Mackie, Digital X Bus x.400 */ 436 + /* Phonic, HB 12 */ 437 + /* Phonic, HB 24 */ 438 + /* Phonic, HB 18 */ 439 + /* Phonic, FireFly 202 */ 440 + /* Phonic, FireFly 302 */ 441 + /* Rolf Spuler, Firewire Guitar */ 442 + {} 443 + }; 444 + MODULE_DEVICE_TABLE(ieee1394, bebob_id_table); 445 + 446 + static struct fw_driver bebob_driver = { 447 + .driver = { 448 + .owner = THIS_MODULE, 449 + .name = "snd-bebob", 450 + .bus = &fw_bus_type, 451 + }, 452 + .probe = bebob_probe, 453 + .update = bebob_update, 454 + .remove = bebob_remove, 455 + .id_table = bebob_id_table, 456 + }; 457 + 458 + static int __init 459 + snd_bebob_init(void) 460 + { 461 + return driver_register(&bebob_driver.driver); 462 + } 463 + 464 + static void __exit 465 + snd_bebob_exit(void) 466 + { 467 + driver_unregister(&bebob_driver.driver); 468 + } 469 + 470 + module_init(snd_bebob_init); 471 + module_exit(snd_bebob_exit);
+257
sound/firewire/bebob/bebob.h
··· 1 + /* 2 + * bebob.h - a part of driver for BeBoB based devices 3 + * 4 + * Copyright (c) 2013-2014 Takashi Sakamoto 5 + * 6 + * Licensed under the terms of the GNU General Public License, version 2. 7 + */ 8 + 9 + #ifndef SOUND_BEBOB_H_INCLUDED 10 + #define SOUND_BEBOB_H_INCLUDED 11 + 12 + #include <linux/compat.h> 13 + #include <linux/device.h> 14 + #include <linux/firewire.h> 15 + #include <linux/firewire-constants.h> 16 + #include <linux/module.h> 17 + #include <linux/mod_devicetable.h> 18 + #include <linux/delay.h> 19 + #include <linux/slab.h> 20 + 21 + #include <sound/core.h> 22 + #include <sound/initval.h> 23 + #include <sound/info.h> 24 + #include <sound/rawmidi.h> 25 + #include <sound/pcm.h> 26 + #include <sound/pcm_params.h> 27 + #include <sound/firewire.h> 28 + #include <sound/hwdep.h> 29 + 30 + #include "../lib.h" 31 + #include "../fcp.h" 32 + #include "../packets-buffer.h" 33 + #include "../iso-resources.h" 34 + #include "../amdtp.h" 35 + #include "../cmp.h" 36 + 37 + /* basic register addresses on DM1000/DM1100/DM1500 */ 38 + #define BEBOB_ADDR_REG_INFO 0xffffc8020000ULL 39 + #define BEBOB_ADDR_REG_REQ 0xffffc8021000ULL 40 + 41 + struct snd_bebob; 42 + 43 + #define SND_BEBOB_STRM_FMT_ENTRIES 7 44 + struct snd_bebob_stream_formation { 45 + unsigned int pcm; 46 + unsigned int midi; 47 + }; 48 + /* this is a lookup table for index of stream formations */ 49 + extern const unsigned int snd_bebob_rate_table[SND_BEBOB_STRM_FMT_ENTRIES]; 50 + 51 + /* device specific operations */ 52 + #define SND_BEBOB_CLOCK_INTERNAL "Internal" 53 + struct snd_bebob_clock_spec { 54 + unsigned int num; 55 + char *const *labels; 56 + int (*get)(struct snd_bebob *bebob, unsigned int *id); 57 + }; 58 + struct snd_bebob_rate_spec { 59 + int (*get)(struct snd_bebob *bebob, unsigned int *rate); 60 + int (*set)(struct snd_bebob *bebob, unsigned int rate); 61 + }; 62 + struct snd_bebob_meter_spec { 63 + unsigned int num; 64 + char *const *labels; 65 + int (*get)(struct snd_bebob *bebob, u32 *target, unsigned int size); 66 + }; 67 + struct snd_bebob_spec { 68 + struct snd_bebob_clock_spec *clock; 69 + struct snd_bebob_rate_spec *rate; 70 + struct snd_bebob_meter_spec *meter; 71 + }; 72 + 73 + struct snd_bebob { 74 + struct snd_card *card; 75 + struct fw_unit *unit; 76 + int card_index; 77 + 78 + struct mutex mutex; 79 + spinlock_t lock; 80 + 81 + const struct snd_bebob_spec *spec; 82 + 83 + unsigned int midi_input_ports; 84 + unsigned int midi_output_ports; 85 + 86 + /* for bus reset quirk */ 87 + struct completion bus_reset; 88 + bool connected; 89 + 90 + struct amdtp_stream *master; 91 + struct amdtp_stream tx_stream; 92 + struct amdtp_stream rx_stream; 93 + struct cmp_connection out_conn; 94 + struct cmp_connection in_conn; 95 + atomic_t capture_substreams; 96 + atomic_t playback_substreams; 97 + 98 + struct snd_bebob_stream_formation 99 + tx_stream_formations[SND_BEBOB_STRM_FMT_ENTRIES]; 100 + struct snd_bebob_stream_formation 101 + rx_stream_formations[SND_BEBOB_STRM_FMT_ENTRIES]; 102 + 103 + int sync_input_plug; 104 + 105 + /* for uapi */ 106 + int dev_lock_count; 107 + bool dev_lock_changed; 108 + wait_queue_head_t hwdep_wait; 109 + 110 + /* for M-Audio special devices */ 111 + void *maudio_special_quirk; 112 + bool deferred_registration; 113 + }; 114 + 115 + static inline int 116 + snd_bebob_read_block(struct fw_unit *unit, u64 addr, void *buf, int size) 117 + { 118 + return snd_fw_transaction(unit, TCODE_READ_BLOCK_REQUEST, 119 + BEBOB_ADDR_REG_INFO + addr, 120 + buf, size, 0); 121 + } 122 + 123 + static inline int 124 + snd_bebob_read_quad(struct fw_unit *unit, u64 addr, u32 *buf) 125 + { 126 + return snd_fw_transaction(unit, TCODE_READ_QUADLET_REQUEST, 127 + BEBOB_ADDR_REG_INFO + addr, 128 + (void *)buf, sizeof(u32), 0); 129 + } 130 + 131 + /* AV/C Audio Subunit Specification 1.0 (Oct 2000, 1394TA) */ 132 + int avc_audio_set_selector(struct fw_unit *unit, unsigned int subunit_id, 133 + unsigned int fb_id, unsigned int num); 134 + int avc_audio_get_selector(struct fw_unit *unit, unsigned int subunit_id, 135 + unsigned int fb_id, unsigned int *num); 136 + 137 + /* 138 + * AVC command extensions, AV/C Unit and Subunit, Revision 17 139 + * (Nov 2003, BridgeCo) 140 + */ 141 + #define AVC_BRIDGECO_ADDR_BYTES 6 142 + enum avc_bridgeco_plug_dir { 143 + AVC_BRIDGECO_PLUG_DIR_IN = 0x00, 144 + AVC_BRIDGECO_PLUG_DIR_OUT = 0x01 145 + }; 146 + enum avc_bridgeco_plug_mode { 147 + AVC_BRIDGECO_PLUG_MODE_UNIT = 0x00, 148 + AVC_BRIDGECO_PLUG_MODE_SUBUNIT = 0x01, 149 + AVC_BRIDGECO_PLUG_MODE_FUNCTION_BLOCK = 0x02 150 + }; 151 + enum avc_bridgeco_plug_unit { 152 + AVC_BRIDGECO_PLUG_UNIT_ISOC = 0x00, 153 + AVC_BRIDGECO_PLUG_UNIT_EXT = 0x01, 154 + AVC_BRIDGECO_PLUG_UNIT_ASYNC = 0x02 155 + }; 156 + enum avc_bridgeco_plug_type { 157 + AVC_BRIDGECO_PLUG_TYPE_ISOC = 0x00, 158 + AVC_BRIDGECO_PLUG_TYPE_ASYNC = 0x01, 159 + AVC_BRIDGECO_PLUG_TYPE_MIDI = 0x02, 160 + AVC_BRIDGECO_PLUG_TYPE_SYNC = 0x03, 161 + AVC_BRIDGECO_PLUG_TYPE_ANA = 0x04, 162 + AVC_BRIDGECO_PLUG_TYPE_DIG = 0x05 163 + }; 164 + static inline void 165 + avc_bridgeco_fill_unit_addr(u8 buf[AVC_BRIDGECO_ADDR_BYTES], 166 + enum avc_bridgeco_plug_dir dir, 167 + enum avc_bridgeco_plug_unit unit, 168 + unsigned int pid) 169 + { 170 + buf[0] = 0xff; /* Unit */ 171 + buf[1] = dir; 172 + buf[2] = AVC_BRIDGECO_PLUG_MODE_UNIT; 173 + buf[3] = unit; 174 + buf[4] = 0xff & pid; 175 + buf[5] = 0xff; /* reserved */ 176 + } 177 + static inline void 178 + avc_bridgeco_fill_msu_addr(u8 buf[AVC_BRIDGECO_ADDR_BYTES], 179 + enum avc_bridgeco_plug_dir dir, 180 + unsigned int pid) 181 + { 182 + buf[0] = 0x60; /* Music subunit */ 183 + buf[1] = dir; 184 + buf[2] = AVC_BRIDGECO_PLUG_MODE_SUBUNIT; 185 + buf[3] = 0xff & pid; 186 + buf[4] = 0xff; /* reserved */ 187 + buf[5] = 0xff; /* reserved */ 188 + } 189 + int avc_bridgeco_get_plug_ch_pos(struct fw_unit *unit, 190 + u8 addr[AVC_BRIDGECO_ADDR_BYTES], 191 + u8 *buf, unsigned int len); 192 + int avc_bridgeco_get_plug_type(struct fw_unit *unit, 193 + u8 addr[AVC_BRIDGECO_ADDR_BYTES], 194 + enum avc_bridgeco_plug_type *type); 195 + int avc_bridgeco_get_plug_section_type(struct fw_unit *unit, 196 + u8 addr[AVC_BRIDGECO_ADDR_BYTES], 197 + unsigned int id, u8 *type); 198 + int avc_bridgeco_get_plug_input(struct fw_unit *unit, 199 + u8 addr[AVC_BRIDGECO_ADDR_BYTES], 200 + u8 input[7]); 201 + int avc_bridgeco_get_plug_strm_fmt(struct fw_unit *unit, 202 + u8 addr[AVC_BRIDGECO_ADDR_BYTES], u8 *buf, 203 + unsigned int *len, unsigned int eid); 204 + 205 + /* for AMDTP streaming */ 206 + int snd_bebob_stream_get_rate(struct snd_bebob *bebob, unsigned int *rate); 207 + int snd_bebob_stream_set_rate(struct snd_bebob *bebob, unsigned int rate); 208 + int snd_bebob_stream_check_internal_clock(struct snd_bebob *bebob, 209 + bool *internal); 210 + int snd_bebob_stream_discover(struct snd_bebob *bebob); 211 + int snd_bebob_stream_map(struct snd_bebob *bebob, 212 + struct amdtp_stream *stream); 213 + int snd_bebob_stream_init_duplex(struct snd_bebob *bebob); 214 + int snd_bebob_stream_start_duplex(struct snd_bebob *bebob, unsigned int rate); 215 + void snd_bebob_stream_stop_duplex(struct snd_bebob *bebob); 216 + void snd_bebob_stream_update_duplex(struct snd_bebob *bebob); 217 + void snd_bebob_stream_destroy_duplex(struct snd_bebob *bebob); 218 + 219 + void snd_bebob_stream_lock_changed(struct snd_bebob *bebob); 220 + int snd_bebob_stream_lock_try(struct snd_bebob *bebob); 221 + void snd_bebob_stream_lock_release(struct snd_bebob *bebob); 222 + 223 + void snd_bebob_proc_init(struct snd_bebob *bebob); 224 + 225 + int snd_bebob_create_midi_devices(struct snd_bebob *bebob); 226 + 227 + int snd_bebob_create_pcm_devices(struct snd_bebob *bebob); 228 + 229 + int snd_bebob_create_hwdep_device(struct snd_bebob *bebob); 230 + 231 + /* model specific operations */ 232 + extern struct snd_bebob_spec phase88_rack_spec; 233 + extern struct snd_bebob_spec phase24_series_spec; 234 + extern struct snd_bebob_spec yamaha_go_spec; 235 + extern struct snd_bebob_spec saffirepro_26_spec; 236 + extern struct snd_bebob_spec saffirepro_10_spec; 237 + extern struct snd_bebob_spec saffire_le_spec; 238 + extern struct snd_bebob_spec saffire_spec; 239 + extern struct snd_bebob_spec maudio_fw410_spec; 240 + extern struct snd_bebob_spec maudio_audiophile_spec; 241 + extern struct snd_bebob_spec maudio_solo_spec; 242 + extern struct snd_bebob_spec maudio_ozonic_spec; 243 + extern struct snd_bebob_spec maudio_nrv10_spec; 244 + extern struct snd_bebob_spec maudio_special_spec; 245 + int snd_bebob_maudio_special_discover(struct snd_bebob *bebob, bool is1814); 246 + int snd_bebob_maudio_load_firmware(struct fw_unit *unit); 247 + 248 + #define SND_BEBOB_DEV_ENTRY(vendor, model, data) \ 249 + { \ 250 + .match_flags = IEEE1394_MATCH_VENDOR_ID | \ 251 + IEEE1394_MATCH_MODEL_ID, \ 252 + .vendor_id = vendor, \ 253 + .model_id = model, \ 254 + .driver_data = (kernel_ulong_t)data \ 255 + } 256 + 257 + #endif
+282
sound/firewire/bebob/bebob_command.c
··· 1 + /* 2 + * bebob_command.c - driver for BeBoB based devices 3 + * 4 + * Copyright (c) 2013-2014 Takashi Sakamoto 5 + * 6 + * Licensed under the terms of the GNU General Public License, version 2. 7 + */ 8 + 9 + #include "./bebob.h" 10 + 11 + int avc_audio_set_selector(struct fw_unit *unit, unsigned int subunit_id, 12 + unsigned int fb_id, unsigned int num) 13 + { 14 + u8 *buf; 15 + int err; 16 + 17 + buf = kzalloc(12, GFP_KERNEL); 18 + if (buf == NULL) 19 + return -ENOMEM; 20 + 21 + buf[0] = 0x00; /* AV/C CONTROL */ 22 + buf[1] = 0x08 | (0x07 & subunit_id); /* AUDIO SUBUNIT ID */ 23 + buf[2] = 0xb8; /* FUNCTION BLOCK */ 24 + buf[3] = 0x80; /* type is 'selector'*/ 25 + buf[4] = 0xff & fb_id; /* function block id */ 26 + buf[5] = 0x10; /* control attribute is CURRENT */ 27 + buf[6] = 0x02; /* selector length is 2 */ 28 + buf[7] = 0xff & num; /* input function block plug number */ 29 + buf[8] = 0x01; /* control selector is SELECTOR_CONTROL */ 30 + 31 + err = fcp_avc_transaction(unit, buf, 12, buf, 12, 32 + BIT(1) | BIT(2) | BIT(3) | BIT(4) | BIT(5) | 33 + BIT(6) | BIT(7) | BIT(8)); 34 + if (err > 0 && err < 9) 35 + err = -EIO; 36 + else if (buf[0] == 0x08) /* NOT IMPLEMENTED */ 37 + err = -ENOSYS; 38 + else if (buf[0] == 0x0a) /* REJECTED */ 39 + err = -EINVAL; 40 + else if (err > 0) 41 + err = 0; 42 + 43 + kfree(buf); 44 + return err; 45 + } 46 + 47 + int avc_audio_get_selector(struct fw_unit *unit, unsigned int subunit_id, 48 + unsigned int fb_id, unsigned int *num) 49 + { 50 + u8 *buf; 51 + int err; 52 + 53 + buf = kzalloc(12, GFP_KERNEL); 54 + if (buf == NULL) 55 + return -ENOMEM; 56 + 57 + buf[0] = 0x01; /* AV/C STATUS */ 58 + buf[1] = 0x08 | (0x07 & subunit_id); /* AUDIO SUBUNIT ID */ 59 + buf[2] = 0xb8; /* FUNCTION BLOCK */ 60 + buf[3] = 0x80; /* type is 'selector'*/ 61 + buf[4] = 0xff & fb_id; /* function block id */ 62 + buf[5] = 0x10; /* control attribute is CURRENT */ 63 + buf[6] = 0x02; /* selector length is 2 */ 64 + buf[7] = 0xff; /* input function block plug number */ 65 + buf[8] = 0x01; /* control selector is SELECTOR_CONTROL */ 66 + 67 + err = fcp_avc_transaction(unit, buf, 12, buf, 12, 68 + BIT(1) | BIT(2) | BIT(3) | BIT(4) | BIT(5) | 69 + BIT(6) | BIT(8)); 70 + if (err > 0 && err < 9) 71 + err = -EIO; 72 + else if (buf[0] == 0x08) /* NOT IMPLEMENTED */ 73 + err = -ENOSYS; 74 + else if (buf[0] == 0x0a) /* REJECTED */ 75 + err = -EINVAL; 76 + else if (buf[0] == 0x0b) /* IN TRANSITION */ 77 + err = -EAGAIN; 78 + if (err < 0) 79 + goto end; 80 + 81 + *num = buf[7]; 82 + err = 0; 83 + end: 84 + kfree(buf); 85 + return err; 86 + } 87 + 88 + static inline void 89 + avc_bridgeco_fill_extension_addr(u8 *buf, u8 *addr) 90 + { 91 + buf[1] = addr[0]; 92 + memcpy(buf + 4, addr + 1, 5); 93 + } 94 + 95 + static inline void 96 + avc_bridgeco_fill_plug_info_extension_command(u8 *buf, u8 *addr, 97 + unsigned int itype) 98 + { 99 + buf[0] = 0x01; /* AV/C STATUS */ 100 + buf[2] = 0x02; /* AV/C GENERAL PLUG INFO */ 101 + buf[3] = 0xc0; /* BridgeCo extension */ 102 + avc_bridgeco_fill_extension_addr(buf, addr); 103 + buf[9] = itype; /* info type */ 104 + } 105 + 106 + int avc_bridgeco_get_plug_type(struct fw_unit *unit, 107 + u8 addr[AVC_BRIDGECO_ADDR_BYTES], 108 + enum avc_bridgeco_plug_type *type) 109 + { 110 + u8 *buf; 111 + int err; 112 + 113 + buf = kzalloc(12, GFP_KERNEL); 114 + if (buf == NULL) 115 + return -ENOMEM; 116 + 117 + /* Info type is 'plug type'. */ 118 + avc_bridgeco_fill_plug_info_extension_command(buf, addr, 0x00); 119 + 120 + err = fcp_avc_transaction(unit, buf, 12, buf, 12, 121 + BIT(1) | BIT(2) | BIT(3) | BIT(4) | BIT(5) | 122 + BIT(6) | BIT(7) | BIT(9)); 123 + if ((err >= 0) && (err < 8)) 124 + err = -EIO; 125 + else if (buf[0] == 0x08) /* NOT IMPLEMENTED */ 126 + err = -ENOSYS; 127 + else if (buf[0] == 0x0a) /* REJECTED */ 128 + err = -EINVAL; 129 + else if (buf[0] == 0x0b) /* IN TRANSITION */ 130 + err = -EAGAIN; 131 + if (err < 0) 132 + goto end; 133 + 134 + *type = buf[10]; 135 + err = 0; 136 + end: 137 + kfree(buf); 138 + return err; 139 + } 140 + 141 + int avc_bridgeco_get_plug_ch_pos(struct fw_unit *unit, 142 + u8 addr[AVC_BRIDGECO_ADDR_BYTES], 143 + u8 *buf, unsigned int len) 144 + { 145 + int err; 146 + 147 + /* Info type is 'channel position'. */ 148 + avc_bridgeco_fill_plug_info_extension_command(buf, addr, 0x03); 149 + 150 + err = fcp_avc_transaction(unit, buf, 12, buf, 256, 151 + BIT(1) | BIT(2) | BIT(3) | BIT(4) | 152 + BIT(5) | BIT(6) | BIT(7) | BIT(9)); 153 + if ((err >= 0) && (err < 8)) 154 + err = -EIO; 155 + else if (buf[0] == 0x08) /* NOT IMPLEMENTED */ 156 + err = -ENOSYS; 157 + else if (buf[0] == 0x0a) /* REJECTED */ 158 + err = -EINVAL; 159 + else if (buf[0] == 0x0b) /* IN TRANSITION */ 160 + err = -EAGAIN; 161 + if (err < 0) 162 + goto end; 163 + 164 + /* Pick up specific data. */ 165 + memmove(buf, buf + 10, err - 10); 166 + err = 0; 167 + end: 168 + return err; 169 + } 170 + 171 + int avc_bridgeco_get_plug_section_type(struct fw_unit *unit, 172 + u8 addr[AVC_BRIDGECO_ADDR_BYTES], 173 + unsigned int id, u8 *type) 174 + { 175 + u8 *buf; 176 + int err; 177 + 178 + /* section info includes charactors but this module don't need it */ 179 + buf = kzalloc(12, GFP_KERNEL); 180 + if (buf == NULL) 181 + return -ENOMEM; 182 + 183 + /* Info type is 'section info'. */ 184 + avc_bridgeco_fill_plug_info_extension_command(buf, addr, 0x07); 185 + buf[10] = 0xff & ++id; /* section id */ 186 + 187 + err = fcp_avc_transaction(unit, buf, 12, buf, 12, 188 + BIT(1) | BIT(2) | BIT(3) | BIT(4) | BIT(5) | 189 + BIT(6) | BIT(7) | BIT(9) | BIT(10)); 190 + if ((err >= 0) && (err < 8)) 191 + err = -EIO; 192 + else if (buf[0] == 0x08) /* NOT IMPLEMENTED */ 193 + err = -ENOSYS; 194 + else if (buf[0] == 0x0a) /* REJECTED */ 195 + err = -EINVAL; 196 + else if (buf[0] == 0x0b) /* IN TRANSITION */ 197 + err = -EAGAIN; 198 + if (err < 0) 199 + goto end; 200 + 201 + *type = buf[11]; 202 + err = 0; 203 + end: 204 + kfree(buf); 205 + return err; 206 + } 207 + 208 + int avc_bridgeco_get_plug_input(struct fw_unit *unit, 209 + u8 addr[AVC_BRIDGECO_ADDR_BYTES], u8 input[7]) 210 + { 211 + int err; 212 + u8 *buf; 213 + 214 + buf = kzalloc(18, GFP_KERNEL); 215 + if (buf == NULL) 216 + return -ENOMEM; 217 + 218 + /* Info type is 'plug input'. */ 219 + avc_bridgeco_fill_plug_info_extension_command(buf, addr, 0x05); 220 + 221 + err = fcp_avc_transaction(unit, buf, 16, buf, 16, 222 + BIT(1) | BIT(2) | BIT(3) | BIT(4) | BIT(5) | 223 + BIT(6) | BIT(7)); 224 + if ((err >= 0) && (err < 8)) 225 + err = -EIO; 226 + else if (buf[0] == 0x08) /* NOT IMPLEMENTED */ 227 + err = -ENOSYS; 228 + else if (buf[0] == 0x0a) /* REJECTED */ 229 + err = -EINVAL; 230 + else if (buf[0] == 0x0b) /* IN TRANSITION */ 231 + err = -EAGAIN; 232 + if (err < 0) 233 + goto end; 234 + 235 + memcpy(input, buf + 10, 5); 236 + err = 0; 237 + end: 238 + kfree(buf); 239 + return err; 240 + } 241 + 242 + int avc_bridgeco_get_plug_strm_fmt(struct fw_unit *unit, 243 + u8 addr[AVC_BRIDGECO_ADDR_BYTES], u8 *buf, 244 + unsigned int *len, unsigned int eid) 245 + { 246 + int err; 247 + 248 + /* check given buffer */ 249 + if ((buf == NULL) || (*len < 12)) { 250 + err = -EINVAL; 251 + goto end; 252 + } 253 + 254 + buf[0] = 0x01; /* AV/C STATUS */ 255 + buf[2] = 0x2f; /* AV/C STREAM FORMAT SUPPORT */ 256 + buf[3] = 0xc1; /* Bridgeco extension - List Request */ 257 + avc_bridgeco_fill_extension_addr(buf, addr); 258 + buf[10] = 0xff & eid; /* Entry ID */ 259 + 260 + err = fcp_avc_transaction(unit, buf, 12, buf, *len, 261 + BIT(1) | BIT(2) | BIT(3) | BIT(4) | BIT(5) | 262 + BIT(6) | BIT(7) | BIT(10)); 263 + if ((err >= 0) && (err < 12)) 264 + err = -EIO; 265 + else if (buf[0] == 0x08) /* NOT IMPLEMENTED */ 266 + err = -ENOSYS; 267 + else if (buf[0] == 0x0a) /* REJECTED */ 268 + err = -EINVAL; 269 + else if (buf[0] == 0x0b) /* IN TRANSITION */ 270 + err = -EAGAIN; 271 + else if (buf[10] != eid) 272 + err = -EIO; 273 + if (err < 0) 274 + goto end; 275 + 276 + /* Pick up 'stream format info'. */ 277 + memmove(buf, buf + 11, err - 11); 278 + *len = err - 11; 279 + err = 0; 280 + end: 281 + return err; 282 + }
+279
sound/firewire/bebob/bebob_focusrite.c
··· 1 + /* 2 + * bebob_focusrite.c - a part of driver for BeBoB based devices 3 + * 4 + * Copyright (c) 2013-2014 Takashi Sakamoto 5 + * 6 + * Licensed under the terms of the GNU General Public License, version 2. 7 + */ 8 + 9 + #include "./bebob.h" 10 + 11 + #define ANA_IN "Analog In" 12 + #define DIG_IN "Digital In" 13 + #define ANA_OUT "Analog Out" 14 + #define DIG_OUT "Digital Out" 15 + #define STM_IN "Stream In" 16 + 17 + #define SAFFIRE_ADDRESS_BASE 0x000100000000ULL 18 + 19 + #define SAFFIRE_OFFSET_CLOCK_SOURCE 0x00f8 20 + #define SAFFIREPRO_OFFSET_CLOCK_SOURCE 0x0174 21 + 22 + /* whether sync to external device or not */ 23 + #define SAFFIRE_OFFSET_CLOCK_SYNC_EXT 0x013c 24 + #define SAFFIRE_LE_OFFSET_CLOCK_SYNC_EXT 0x0432 25 + #define SAFFIREPRO_OFFSET_CLOCK_SYNC_EXT 0x0164 26 + 27 + #define SAFFIRE_CLOCK_SOURCE_INTERNAL 0 28 + #define SAFFIRE_CLOCK_SOURCE_SPDIF 1 29 + 30 + /* '1' is absent, why... */ 31 + #define SAFFIREPRO_CLOCK_SOURCE_INTERNAL 0 32 + #define SAFFIREPRO_CLOCK_SOURCE_SPDIF 2 33 + #define SAFFIREPRO_CLOCK_SOURCE_ADAT1 3 34 + #define SAFFIREPRO_CLOCK_SOURCE_ADAT2 4 35 + #define SAFFIREPRO_CLOCK_SOURCE_WORDCLOCK 5 36 + 37 + /* S/PDIF, ADAT1, ADAT2 is enabled or not. three quadlets */ 38 + #define SAFFIREPRO_ENABLE_DIG_IFACES 0x01a4 39 + 40 + /* saffirepro has its own parameter for sampling frequency */ 41 + #define SAFFIREPRO_RATE_NOREBOOT 0x01cc 42 + /* index is the value for this register */ 43 + static const unsigned int rates[] = { 44 + [0] = 0, 45 + [1] = 44100, 46 + [2] = 48000, 47 + [3] = 88200, 48 + [4] = 96000, 49 + [5] = 176400, 50 + [6] = 192000 51 + }; 52 + 53 + /* saffire(no label)/saffire LE has metering */ 54 + #define SAFFIRE_OFFSET_METER 0x0100 55 + #define SAFFIRE_LE_OFFSET_METER 0x0168 56 + 57 + static inline int 58 + saffire_read_block(struct snd_bebob *bebob, u64 offset, 59 + u32 *buf, unsigned int size) 60 + { 61 + unsigned int i; 62 + int err; 63 + __be32 *tmp = (__be32 *)buf; 64 + 65 + err = snd_fw_transaction(bebob->unit, TCODE_READ_BLOCK_REQUEST, 66 + SAFFIRE_ADDRESS_BASE + offset, 67 + tmp, size, 0); 68 + if (err < 0) 69 + goto end; 70 + 71 + for (i = 0; i < size / sizeof(u32); i++) 72 + buf[i] = be32_to_cpu(tmp[i]); 73 + end: 74 + return err; 75 + } 76 + 77 + static inline int 78 + saffire_read_quad(struct snd_bebob *bebob, u64 offset, u32 *value) 79 + { 80 + int err; 81 + __be32 tmp; 82 + 83 + err = snd_fw_transaction(bebob->unit, TCODE_READ_QUADLET_REQUEST, 84 + SAFFIRE_ADDRESS_BASE + offset, 85 + &tmp, sizeof(__be32), 0); 86 + if (err < 0) 87 + goto end; 88 + 89 + *value = be32_to_cpu(tmp); 90 + end: 91 + return err; 92 + } 93 + 94 + static inline int 95 + saffire_write_quad(struct snd_bebob *bebob, u64 offset, u32 value) 96 + { 97 + __be32 data = cpu_to_be32(value); 98 + 99 + return snd_fw_transaction(bebob->unit, TCODE_WRITE_QUADLET_REQUEST, 100 + SAFFIRE_ADDRESS_BASE + offset, 101 + &data, sizeof(__be32), 0); 102 + } 103 + 104 + static char *const saffirepro_26_clk_src_labels[] = { 105 + SND_BEBOB_CLOCK_INTERNAL, "S/PDIF", "ADAT1", "ADAT2", "Word Clock" 106 + }; 107 + 108 + static char *const saffirepro_10_clk_src_labels[] = { 109 + SND_BEBOB_CLOCK_INTERNAL, "S/PDIF", "Word Clock" 110 + }; 111 + static int 112 + saffirepro_both_clk_freq_get(struct snd_bebob *bebob, unsigned int *rate) 113 + { 114 + u32 id; 115 + int err; 116 + 117 + err = saffire_read_quad(bebob, SAFFIREPRO_RATE_NOREBOOT, &id); 118 + if (err < 0) 119 + goto end; 120 + if (id >= ARRAY_SIZE(rates)) 121 + err = -EIO; 122 + else 123 + *rate = rates[id]; 124 + end: 125 + return err; 126 + } 127 + static int 128 + saffirepro_both_clk_freq_set(struct snd_bebob *bebob, unsigned int rate) 129 + { 130 + u32 id; 131 + 132 + for (id = 0; id < ARRAY_SIZE(rates); id++) { 133 + if (rates[id] == rate) 134 + break; 135 + } 136 + if (id == ARRAY_SIZE(rates)) 137 + return -EINVAL; 138 + 139 + return saffire_write_quad(bebob, SAFFIREPRO_RATE_NOREBOOT, id); 140 + } 141 + static int 142 + saffirepro_both_clk_src_get(struct snd_bebob *bebob, unsigned int *id) 143 + { 144 + int err; 145 + u32 value; 146 + 147 + err = saffire_read_quad(bebob, SAFFIREPRO_OFFSET_CLOCK_SOURCE, &value); 148 + if (err < 0) 149 + goto end; 150 + 151 + if (bebob->spec->clock->labels == saffirepro_10_clk_src_labels) { 152 + if (value == SAFFIREPRO_CLOCK_SOURCE_WORDCLOCK) 153 + *id = 2; 154 + else if (value == SAFFIREPRO_CLOCK_SOURCE_SPDIF) 155 + *id = 1; 156 + } else if (value > 1) { 157 + *id = value - 1; 158 + } 159 + end: 160 + return err; 161 + } 162 + 163 + struct snd_bebob_spec saffire_le_spec; 164 + static char *const saffire_both_clk_src_labels[] = { 165 + SND_BEBOB_CLOCK_INTERNAL, "S/PDIF" 166 + }; 167 + static int 168 + saffire_both_clk_src_get(struct snd_bebob *bebob, unsigned int *id) 169 + { 170 + int err; 171 + u32 value; 172 + 173 + err = saffire_read_quad(bebob, SAFFIRE_OFFSET_CLOCK_SOURCE, &value); 174 + if (err >= 0) 175 + *id = 0xff & value; 176 + 177 + return err; 178 + }; 179 + static char *const saffire_le_meter_labels[] = { 180 + ANA_IN, ANA_IN, DIG_IN, 181 + ANA_OUT, ANA_OUT, ANA_OUT, ANA_OUT, 182 + STM_IN, STM_IN 183 + }; 184 + static char *const saffire_meter_labels[] = { 185 + ANA_IN, ANA_IN, 186 + STM_IN, STM_IN, STM_IN, STM_IN, STM_IN, 187 + }; 188 + static int 189 + saffire_meter_get(struct snd_bebob *bebob, u32 *buf, unsigned int size) 190 + { 191 + struct snd_bebob_meter_spec *spec = bebob->spec->meter; 192 + unsigned int channels; 193 + u64 offset; 194 + int err; 195 + 196 + if (spec->labels == saffire_le_meter_labels) 197 + offset = SAFFIRE_LE_OFFSET_METER; 198 + else 199 + offset = SAFFIRE_OFFSET_METER; 200 + 201 + channels = spec->num * 2; 202 + if (size < channels * sizeof(u32)) 203 + return -EIO; 204 + 205 + err = saffire_read_block(bebob, offset, buf, size); 206 + if (err >= 0 && spec->labels == saffire_le_meter_labels) { 207 + swap(buf[1], buf[3]); 208 + swap(buf[2], buf[3]); 209 + swap(buf[3], buf[4]); 210 + 211 + swap(buf[7], buf[10]); 212 + swap(buf[8], buf[10]); 213 + swap(buf[9], buf[11]); 214 + swap(buf[11], buf[12]); 215 + 216 + swap(buf[15], buf[16]); 217 + } 218 + 219 + return err; 220 + } 221 + 222 + static struct snd_bebob_rate_spec saffirepro_both_rate_spec = { 223 + .get = &saffirepro_both_clk_freq_get, 224 + .set = &saffirepro_both_clk_freq_set, 225 + }; 226 + /* Saffire Pro 26 I/O */ 227 + static struct snd_bebob_clock_spec saffirepro_26_clk_spec = { 228 + .num = ARRAY_SIZE(saffirepro_26_clk_src_labels), 229 + .labels = saffirepro_26_clk_src_labels, 230 + .get = &saffirepro_both_clk_src_get, 231 + }; 232 + struct snd_bebob_spec saffirepro_26_spec = { 233 + .clock = &saffirepro_26_clk_spec, 234 + .rate = &saffirepro_both_rate_spec, 235 + .meter = NULL 236 + }; 237 + /* Saffire Pro 10 I/O */ 238 + static struct snd_bebob_clock_spec saffirepro_10_clk_spec = { 239 + .num = ARRAY_SIZE(saffirepro_10_clk_src_labels), 240 + .labels = saffirepro_10_clk_src_labels, 241 + .get = &saffirepro_both_clk_src_get, 242 + }; 243 + struct snd_bebob_spec saffirepro_10_spec = { 244 + .clock = &saffirepro_10_clk_spec, 245 + .rate = &saffirepro_both_rate_spec, 246 + .meter = NULL 247 + }; 248 + 249 + static struct snd_bebob_rate_spec saffire_both_rate_spec = { 250 + .get = &snd_bebob_stream_get_rate, 251 + .set = &snd_bebob_stream_set_rate, 252 + }; 253 + static struct snd_bebob_clock_spec saffire_both_clk_spec = { 254 + .num = ARRAY_SIZE(saffire_both_clk_src_labels), 255 + .labels = saffire_both_clk_src_labels, 256 + .get = &saffire_both_clk_src_get, 257 + }; 258 + /* Saffire LE */ 259 + static struct snd_bebob_meter_spec saffire_le_meter_spec = { 260 + .num = ARRAY_SIZE(saffire_le_meter_labels), 261 + .labels = saffire_le_meter_labels, 262 + .get = &saffire_meter_get, 263 + }; 264 + struct snd_bebob_spec saffire_le_spec = { 265 + .clock = &saffire_both_clk_spec, 266 + .rate = &saffire_both_rate_spec, 267 + .meter = &saffire_le_meter_spec 268 + }; 269 + /* Saffire */ 270 + static struct snd_bebob_meter_spec saffire_meter_spec = { 271 + .num = ARRAY_SIZE(saffire_meter_labels), 272 + .labels = saffire_meter_labels, 273 + .get = &saffire_meter_get, 274 + }; 275 + struct snd_bebob_spec saffire_spec = { 276 + .clock = &saffire_both_clk_spec, 277 + .rate = &saffire_both_rate_spec, 278 + .meter = &saffire_meter_spec 279 + };
+199
sound/firewire/bebob/bebob_hwdep.c
··· 1 + /* 2 + * bebob_hwdep.c - a part of driver for BeBoB based devices 3 + * 4 + * Copyright (c) 2013-2014 Takashi Sakamoto 5 + * 6 + * Licensed under the terms of the GNU General Public License, version 2. 7 + */ 8 + 9 + /* 10 + * This codes give three functionality. 11 + * 12 + * 1.get firewire node infomation 13 + * 2.get notification about starting/stopping stream 14 + * 3.lock/unlock stream 15 + */ 16 + 17 + #include "bebob.h" 18 + 19 + static long 20 + hwdep_read(struct snd_hwdep *hwdep, char __user *buf, long count, 21 + loff_t *offset) 22 + { 23 + struct snd_bebob *bebob = hwdep->private_data; 24 + DEFINE_WAIT(wait); 25 + union snd_firewire_event event; 26 + 27 + spin_lock_irq(&bebob->lock); 28 + 29 + while (!bebob->dev_lock_changed) { 30 + prepare_to_wait(&bebob->hwdep_wait, &wait, TASK_INTERRUPTIBLE); 31 + spin_unlock_irq(&bebob->lock); 32 + schedule(); 33 + finish_wait(&bebob->hwdep_wait, &wait); 34 + if (signal_pending(current)) 35 + return -ERESTARTSYS; 36 + spin_lock_irq(&bebob->lock); 37 + } 38 + 39 + memset(&event, 0, sizeof(event)); 40 + if (bebob->dev_lock_changed) { 41 + event.lock_status.type = SNDRV_FIREWIRE_EVENT_LOCK_STATUS; 42 + event.lock_status.status = (bebob->dev_lock_count > 0); 43 + bebob->dev_lock_changed = false; 44 + 45 + count = min_t(long, count, sizeof(event.lock_status)); 46 + } 47 + 48 + spin_unlock_irq(&bebob->lock); 49 + 50 + if (copy_to_user(buf, &event, count)) 51 + return -EFAULT; 52 + 53 + return count; 54 + } 55 + 56 + static unsigned int 57 + hwdep_poll(struct snd_hwdep *hwdep, struct file *file, poll_table *wait) 58 + { 59 + struct snd_bebob *bebob = hwdep->private_data; 60 + unsigned int events; 61 + 62 + poll_wait(file, &bebob->hwdep_wait, wait); 63 + 64 + spin_lock_irq(&bebob->lock); 65 + if (bebob->dev_lock_changed) 66 + events = POLLIN | POLLRDNORM; 67 + else 68 + events = 0; 69 + spin_unlock_irq(&bebob->lock); 70 + 71 + return events; 72 + } 73 + 74 + static int 75 + hwdep_get_info(struct snd_bebob *bebob, void __user *arg) 76 + { 77 + struct fw_device *dev = fw_parent_device(bebob->unit); 78 + struct snd_firewire_get_info info; 79 + 80 + memset(&info, 0, sizeof(info)); 81 + info.type = SNDRV_FIREWIRE_TYPE_BEBOB; 82 + info.card = dev->card->index; 83 + *(__be32 *)&info.guid[0] = cpu_to_be32(dev->config_rom[3]); 84 + *(__be32 *)&info.guid[4] = cpu_to_be32(dev->config_rom[4]); 85 + strlcpy(info.device_name, dev_name(&dev->device), 86 + sizeof(info.device_name)); 87 + 88 + if (copy_to_user(arg, &info, sizeof(info))) 89 + return -EFAULT; 90 + 91 + return 0; 92 + } 93 + 94 + static int 95 + hwdep_lock(struct snd_bebob *bebob) 96 + { 97 + int err; 98 + 99 + spin_lock_irq(&bebob->lock); 100 + 101 + if (bebob->dev_lock_count == 0) { 102 + bebob->dev_lock_count = -1; 103 + err = 0; 104 + } else { 105 + err = -EBUSY; 106 + } 107 + 108 + spin_unlock_irq(&bebob->lock); 109 + 110 + return err; 111 + } 112 + 113 + static int 114 + hwdep_unlock(struct snd_bebob *bebob) 115 + { 116 + int err; 117 + 118 + spin_lock_irq(&bebob->lock); 119 + 120 + if (bebob->dev_lock_count == -1) { 121 + bebob->dev_lock_count = 0; 122 + err = 0; 123 + } else { 124 + err = -EBADFD; 125 + } 126 + 127 + spin_unlock_irq(&bebob->lock); 128 + 129 + return err; 130 + } 131 + 132 + static int 133 + hwdep_release(struct snd_hwdep *hwdep, struct file *file) 134 + { 135 + struct snd_bebob *bebob = hwdep->private_data; 136 + 137 + spin_lock_irq(&bebob->lock); 138 + if (bebob->dev_lock_count == -1) 139 + bebob->dev_lock_count = 0; 140 + spin_unlock_irq(&bebob->lock); 141 + 142 + return 0; 143 + } 144 + 145 + static int 146 + hwdep_ioctl(struct snd_hwdep *hwdep, struct file *file, 147 + unsigned int cmd, unsigned long arg) 148 + { 149 + struct snd_bebob *bebob = hwdep->private_data; 150 + 151 + switch (cmd) { 152 + case SNDRV_FIREWIRE_IOCTL_GET_INFO: 153 + return hwdep_get_info(bebob, (void __user *)arg); 154 + case SNDRV_FIREWIRE_IOCTL_LOCK: 155 + return hwdep_lock(bebob); 156 + case SNDRV_FIREWIRE_IOCTL_UNLOCK: 157 + return hwdep_unlock(bebob); 158 + default: 159 + return -ENOIOCTLCMD; 160 + } 161 + } 162 + 163 + #ifdef CONFIG_COMPAT 164 + static int 165 + hwdep_compat_ioctl(struct snd_hwdep *hwdep, struct file *file, 166 + unsigned int cmd, unsigned long arg) 167 + { 168 + return hwdep_ioctl(hwdep, file, cmd, 169 + (unsigned long)compat_ptr(arg)); 170 + } 171 + #else 172 + #define hwdep_compat_ioctl NULL 173 + #endif 174 + 175 + static const struct snd_hwdep_ops hwdep_ops = { 176 + .read = hwdep_read, 177 + .release = hwdep_release, 178 + .poll = hwdep_poll, 179 + .ioctl = hwdep_ioctl, 180 + .ioctl_compat = hwdep_compat_ioctl, 181 + }; 182 + 183 + int snd_bebob_create_hwdep_device(struct snd_bebob *bebob) 184 + { 185 + struct snd_hwdep *hwdep; 186 + int err; 187 + 188 + err = snd_hwdep_new(bebob->card, "BeBoB", 0, &hwdep); 189 + if (err < 0) 190 + goto end; 191 + strcpy(hwdep->name, "BeBoB"); 192 + hwdep->iface = SNDRV_HWDEP_IFACE_FW_BEBOB; 193 + hwdep->ops = hwdep_ops; 194 + hwdep->private_data = bebob; 195 + hwdep->exclusive = true; 196 + end: 197 + return err; 198 + } 199 +
+792
sound/firewire/bebob/bebob_maudio.c
··· 1 + /* 2 + * bebob_maudio.c - a part of driver for BeBoB based devices 3 + * 4 + * Copyright (c) 2013-2014 Takashi Sakamoto 5 + * 6 + * Licensed under the terms of the GNU General Public License, version 2. 7 + */ 8 + 9 + #include "./bebob.h" 10 + #include <sound/control.h> 11 + 12 + /* 13 + * Just powering on, Firewire 410/Audiophile/1814 and ProjectMix I/O wait to 14 + * download firmware blob. To enable these devices, drivers should upload 15 + * firmware blob and send a command to initialize configuration to factory 16 + * settings when completing uploading. Then these devices generate bus reset 17 + * and are recognized as new devices with the firmware. 18 + * 19 + * But with firmware version 5058 or later, the firmware is stored to flash 20 + * memory in the device and drivers can tell bootloader to load the firmware 21 + * by sending a cue. This cue must be sent one time. 22 + * 23 + * For streaming, both of output and input streams are needed for Firewire 410 24 + * and Ozonic. The single stream is OK for the other devices even if the clock 25 + * source is not SYT-Match (I note no devices use SYT-Match). 26 + * 27 + * Without streaming, the devices except for Firewire Audiophile can mix any 28 + * input and output. For this reason, Audiophile cannot be used as standalone 29 + * mixer. 30 + * 31 + * Firewire 1814 and ProjectMix I/O uses special firmware. It will be freezed 32 + * when receiving any commands which the firmware can't understand. These 33 + * devices utilize completely different system to control. It is some 34 + * write-transaction directly into a certain address. All of addresses for mixer 35 + * functionality is between 0xffc700700000 to 0xffc70070009c. 36 + */ 37 + 38 + /* Offset from information register */ 39 + #define INFO_OFFSET_SW_DATE 0x20 40 + 41 + /* Bootloader Protocol Version 1 */ 42 + #define MAUDIO_BOOTLOADER_CUE1 0x00000001 43 + /* 44 + * Initializing configuration to factory settings (= 0x1101), (swapped in line), 45 + * Command code is zero (= 0x00), 46 + * the number of operands is zero (= 0x00)(at least significant byte) 47 + */ 48 + #define MAUDIO_BOOTLOADER_CUE2 0x01110000 49 + /* padding */ 50 + #define MAUDIO_BOOTLOADER_CUE3 0x00000000 51 + 52 + #define MAUDIO_SPECIFIC_ADDRESS 0xffc700000000ULL 53 + 54 + #define METER_OFFSET 0x00600000 55 + 56 + /* some device has sync info after metering data */ 57 + #define METER_SIZE_SPECIAL 84 /* with sync info */ 58 + #define METER_SIZE_FW410 76 /* with sync info */ 59 + #define METER_SIZE_AUDIOPHILE 60 /* with sync info */ 60 + #define METER_SIZE_SOLO 52 /* with sync info */ 61 + #define METER_SIZE_OZONIC 48 62 + #define METER_SIZE_NRV10 80 63 + 64 + /* labels for metering */ 65 + #define ANA_IN "Analog In" 66 + #define ANA_OUT "Analog Out" 67 + #define DIG_IN "Digital In" 68 + #define SPDIF_IN "S/PDIF In" 69 + #define ADAT_IN "ADAT In" 70 + #define DIG_OUT "Digital Out" 71 + #define SPDIF_OUT "S/PDIF Out" 72 + #define ADAT_OUT "ADAT Out" 73 + #define STRM_IN "Stream In" 74 + #define AUX_OUT "Aux Out" 75 + #define HP_OUT "HP Out" 76 + /* for NRV */ 77 + #define UNKNOWN_METER "Unknown" 78 + 79 + struct special_params { 80 + bool is1814; 81 + unsigned int clk_src; 82 + unsigned int dig_in_fmt; 83 + unsigned int dig_out_fmt; 84 + unsigned int clk_lock; 85 + struct snd_ctl_elem_id *ctl_id_sync; 86 + }; 87 + 88 + /* 89 + * For some M-Audio devices, this module just send cue to load firmware. After 90 + * loading, the device generates bus reset and newly detected. 91 + * 92 + * If we make any transactions to load firmware, the operation may failed. 93 + */ 94 + int snd_bebob_maudio_load_firmware(struct fw_unit *unit) 95 + { 96 + struct fw_device *device = fw_parent_device(unit); 97 + int err, rcode; 98 + u64 date; 99 + __be32 cues[3] = { 100 + MAUDIO_BOOTLOADER_CUE1, 101 + MAUDIO_BOOTLOADER_CUE2, 102 + MAUDIO_BOOTLOADER_CUE3 103 + }; 104 + 105 + /* check date of software used to build */ 106 + err = snd_bebob_read_block(unit, INFO_OFFSET_SW_DATE, 107 + &date, sizeof(u64)); 108 + if (err < 0) 109 + goto end; 110 + /* 111 + * firmware version 5058 or later has date later than "20070401", but 112 + * 'date' is not null-terminated. 113 + */ 114 + if (date < 0x3230303730343031LL) { 115 + dev_err(&unit->device, 116 + "Use firmware version 5058 or later\n"); 117 + err = -ENOSYS; 118 + goto end; 119 + } 120 + 121 + rcode = fw_run_transaction(device->card, TCODE_WRITE_BLOCK_REQUEST, 122 + device->node_id, device->generation, 123 + device->max_speed, BEBOB_ADDR_REG_REQ, 124 + cues, sizeof(cues)); 125 + if (rcode != RCODE_COMPLETE) { 126 + dev_err(&unit->device, 127 + "Failed to send a cue to load firmware\n"); 128 + err = -EIO; 129 + } 130 + end: 131 + return err; 132 + } 133 + 134 + static inline int 135 + get_meter(struct snd_bebob *bebob, void *buf, unsigned int size) 136 + { 137 + return snd_fw_transaction(bebob->unit, TCODE_READ_BLOCK_REQUEST, 138 + MAUDIO_SPECIFIC_ADDRESS + METER_OFFSET, 139 + buf, size, 0); 140 + } 141 + 142 + static int 143 + check_clk_sync(struct snd_bebob *bebob, unsigned int size, bool *sync) 144 + { 145 + int err; 146 + u8 *buf; 147 + 148 + buf = kmalloc(size, GFP_KERNEL); 149 + if (buf == NULL) 150 + return -ENOMEM; 151 + 152 + err = get_meter(bebob, buf, size); 153 + if (err < 0) 154 + goto end; 155 + 156 + /* if synced, this value is the same as SFC of FDF in CIP header */ 157 + *sync = (buf[size - 2] != 0xff); 158 + end: 159 + kfree(buf); 160 + return err; 161 + } 162 + 163 + /* 164 + * dig_fmt: 0x00:S/PDIF, 0x01:ADAT 165 + * clk_lock: 0x00:unlock, 0x01:lock 166 + */ 167 + static int 168 + avc_maudio_set_special_clk(struct snd_bebob *bebob, unsigned int clk_src, 169 + unsigned int dig_in_fmt, unsigned int dig_out_fmt, 170 + unsigned int clk_lock) 171 + { 172 + struct special_params *params = bebob->maudio_special_quirk; 173 + int err; 174 + u8 *buf; 175 + 176 + if (amdtp_stream_running(&bebob->rx_stream) || 177 + amdtp_stream_running(&bebob->tx_stream)) 178 + return -EBUSY; 179 + 180 + buf = kmalloc(12, GFP_KERNEL); 181 + if (buf == NULL) 182 + return -ENOMEM; 183 + 184 + buf[0] = 0x00; /* CONTROL */ 185 + buf[1] = 0xff; /* UNIT */ 186 + buf[2] = 0x00; /* vendor dependent */ 187 + buf[3] = 0x04; /* company ID high */ 188 + buf[4] = 0x00; /* company ID middle */ 189 + buf[5] = 0x04; /* company ID low */ 190 + buf[6] = 0xff & clk_src; /* clock source */ 191 + buf[7] = 0xff & dig_in_fmt; /* input digital format */ 192 + buf[8] = 0xff & dig_out_fmt; /* output digital format */ 193 + buf[9] = 0xff & clk_lock; /* lock these settings */ 194 + buf[10] = 0x00; /* padding */ 195 + buf[11] = 0x00; /* padding */ 196 + 197 + err = fcp_avc_transaction(bebob->unit, buf, 12, buf, 12, 198 + BIT(1) | BIT(2) | BIT(3) | BIT(4) | 199 + BIT(5) | BIT(6) | BIT(7) | BIT(8) | 200 + BIT(9)); 201 + if ((err > 0) && (err < 10)) 202 + err = -EIO; 203 + else if (buf[0] == 0x08) /* NOT IMPLEMENTED */ 204 + err = -ENOSYS; 205 + else if (buf[0] == 0x0a) /* REJECTED */ 206 + err = -EINVAL; 207 + if (err < 0) 208 + goto end; 209 + 210 + params->clk_src = buf[6]; 211 + params->dig_in_fmt = buf[7]; 212 + params->dig_out_fmt = buf[8]; 213 + params->clk_lock = buf[9]; 214 + 215 + if (params->ctl_id_sync) 216 + snd_ctl_notify(bebob->card, SNDRV_CTL_EVENT_MASK_VALUE, 217 + params->ctl_id_sync); 218 + 219 + err = 0; 220 + end: 221 + kfree(buf); 222 + return err; 223 + } 224 + static void 225 + special_stream_formation_set(struct snd_bebob *bebob) 226 + { 227 + static const unsigned int ch_table[2][2][3] = { 228 + /* AMDTP_OUT_STREAM */ 229 + { { 6, 6, 4 }, /* SPDIF */ 230 + { 12, 8, 4 } }, /* ADAT */ 231 + /* AMDTP_IN_STREAM */ 232 + { { 10, 10, 2 }, /* SPDIF */ 233 + { 16, 12, 2 } } /* ADAT */ 234 + }; 235 + struct special_params *params = bebob->maudio_special_quirk; 236 + unsigned int i, max; 237 + 238 + max = SND_BEBOB_STRM_FMT_ENTRIES - 1; 239 + if (!params->is1814) 240 + max -= 2; 241 + 242 + for (i = 0; i < max; i++) { 243 + bebob->tx_stream_formations[i + 1].pcm = 244 + ch_table[AMDTP_IN_STREAM][params->dig_in_fmt][i / 2]; 245 + bebob->tx_stream_formations[i + 1].midi = 1; 246 + 247 + bebob->rx_stream_formations[i + 1].pcm = 248 + ch_table[AMDTP_OUT_STREAM][params->dig_out_fmt][i / 2]; 249 + bebob->rx_stream_formations[i + 1].midi = 1; 250 + } 251 + } 252 + 253 + static int add_special_controls(struct snd_bebob *bebob); 254 + int 255 + snd_bebob_maudio_special_discover(struct snd_bebob *bebob, bool is1814) 256 + { 257 + struct special_params *params; 258 + int err; 259 + 260 + params = kzalloc(sizeof(struct special_params), GFP_KERNEL); 261 + if (params == NULL) 262 + return -ENOMEM; 263 + 264 + mutex_lock(&bebob->mutex); 265 + 266 + bebob->maudio_special_quirk = (void *)params; 267 + params->is1814 = is1814; 268 + 269 + /* initialize these parameters because driver is not allowed to ask */ 270 + bebob->rx_stream.context = ERR_PTR(-1); 271 + bebob->tx_stream.context = ERR_PTR(-1); 272 + err = avc_maudio_set_special_clk(bebob, 0x03, 0x00, 0x00, 0x00); 273 + if (err < 0) { 274 + dev_err(&bebob->unit->device, 275 + "fail to initialize clock params: %d\n", err); 276 + goto end; 277 + } 278 + 279 + err = add_special_controls(bebob); 280 + if (err < 0) 281 + goto end; 282 + 283 + special_stream_formation_set(bebob); 284 + 285 + if (params->is1814) { 286 + bebob->midi_input_ports = 1; 287 + bebob->midi_output_ports = 1; 288 + } else { 289 + bebob->midi_input_ports = 2; 290 + bebob->midi_output_ports = 2; 291 + } 292 + end: 293 + if (err < 0) { 294 + kfree(params); 295 + bebob->maudio_special_quirk = NULL; 296 + } 297 + mutex_unlock(&bebob->mutex); 298 + return err; 299 + } 300 + 301 + /* Input plug shows actual rate. Output plug is needless for this purpose. */ 302 + static int special_get_rate(struct snd_bebob *bebob, unsigned int *rate) 303 + { 304 + int err, trials; 305 + 306 + trials = 0; 307 + do { 308 + err = avc_general_get_sig_fmt(bebob->unit, rate, 309 + AVC_GENERAL_PLUG_DIR_IN, 0); 310 + } while (err == -EAGAIN && ++trials < 3); 311 + 312 + return err; 313 + } 314 + static int special_set_rate(struct snd_bebob *bebob, unsigned int rate) 315 + { 316 + struct special_params *params = bebob->maudio_special_quirk; 317 + int err; 318 + 319 + err = avc_general_set_sig_fmt(bebob->unit, rate, 320 + AVC_GENERAL_PLUG_DIR_OUT, 0); 321 + if (err < 0) 322 + goto end; 323 + 324 + /* 325 + * Just after changing sampling rate for output, a followed command 326 + * for input is easy to fail. This is a workaround fot this issue. 327 + */ 328 + msleep(100); 329 + 330 + err = avc_general_set_sig_fmt(bebob->unit, rate, 331 + AVC_GENERAL_PLUG_DIR_IN, 0); 332 + if (err < 0) 333 + goto end; 334 + 335 + if (params->ctl_id_sync) 336 + snd_ctl_notify(bebob->card, SNDRV_CTL_EVENT_MASK_VALUE, 337 + params->ctl_id_sync); 338 + end: 339 + return err; 340 + } 341 + 342 + /* Clock source control for special firmware */ 343 + static char *const special_clk_labels[] = { 344 + SND_BEBOB_CLOCK_INTERNAL " with Digital Mute", "Digital", 345 + "Word Clock", SND_BEBOB_CLOCK_INTERNAL}; 346 + static int special_clk_get(struct snd_bebob *bebob, unsigned int *id) 347 + { 348 + struct special_params *params = bebob->maudio_special_quirk; 349 + *id = params->clk_src; 350 + return 0; 351 + } 352 + static int special_clk_ctl_info(struct snd_kcontrol *kctl, 353 + struct snd_ctl_elem_info *einf) 354 + { 355 + einf->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED; 356 + einf->count = 1; 357 + einf->value.enumerated.items = ARRAY_SIZE(special_clk_labels); 358 + 359 + if (einf->value.enumerated.item >= einf->value.enumerated.items) 360 + einf->value.enumerated.item = einf->value.enumerated.items - 1; 361 + 362 + strcpy(einf->value.enumerated.name, 363 + special_clk_labels[einf->value.enumerated.item]); 364 + 365 + return 0; 366 + } 367 + static int special_clk_ctl_get(struct snd_kcontrol *kctl, 368 + struct snd_ctl_elem_value *uval) 369 + { 370 + struct snd_bebob *bebob = snd_kcontrol_chip(kctl); 371 + struct special_params *params = bebob->maudio_special_quirk; 372 + uval->value.enumerated.item[0] = params->clk_src; 373 + return 0; 374 + } 375 + static int special_clk_ctl_put(struct snd_kcontrol *kctl, 376 + struct snd_ctl_elem_value *uval) 377 + { 378 + struct snd_bebob *bebob = snd_kcontrol_chip(kctl); 379 + struct special_params *params = bebob->maudio_special_quirk; 380 + int err, id; 381 + 382 + mutex_lock(&bebob->mutex); 383 + 384 + id = uval->value.enumerated.item[0]; 385 + if (id >= ARRAY_SIZE(special_clk_labels)) 386 + return 0; 387 + 388 + err = avc_maudio_set_special_clk(bebob, id, 389 + params->dig_in_fmt, 390 + params->dig_out_fmt, 391 + params->clk_lock); 392 + mutex_unlock(&bebob->mutex); 393 + 394 + return err >= 0; 395 + } 396 + static struct snd_kcontrol_new special_clk_ctl = { 397 + .name = "Clock Source", 398 + .iface = SNDRV_CTL_ELEM_IFACE_MIXER, 399 + .access = SNDRV_CTL_ELEM_ACCESS_READWRITE, 400 + .info = special_clk_ctl_info, 401 + .get = special_clk_ctl_get, 402 + .put = special_clk_ctl_put 403 + }; 404 + 405 + /* Clock synchronization control for special firmware */ 406 + static int special_sync_ctl_info(struct snd_kcontrol *kctl, 407 + struct snd_ctl_elem_info *einf) 408 + { 409 + einf->type = SNDRV_CTL_ELEM_TYPE_BOOLEAN; 410 + einf->count = 1; 411 + einf->value.integer.min = 0; 412 + einf->value.integer.max = 1; 413 + 414 + return 0; 415 + } 416 + static int special_sync_ctl_get(struct snd_kcontrol *kctl, 417 + struct snd_ctl_elem_value *uval) 418 + { 419 + struct snd_bebob *bebob = snd_kcontrol_chip(kctl); 420 + int err; 421 + bool synced = 0; 422 + 423 + err = check_clk_sync(bebob, METER_SIZE_SPECIAL, &synced); 424 + if (err >= 0) 425 + uval->value.integer.value[0] = synced; 426 + 427 + return 0; 428 + } 429 + static struct snd_kcontrol_new special_sync_ctl = { 430 + .name = "Sync Status", 431 + .iface = SNDRV_CTL_ELEM_IFACE_MIXER, 432 + .access = SNDRV_CTL_ELEM_ACCESS_READ, 433 + .info = special_sync_ctl_info, 434 + .get = special_sync_ctl_get, 435 + }; 436 + 437 + /* Digital interface control for special firmware */ 438 + static char *const special_dig_iface_labels[] = { 439 + "S/PDIF Optical", "S/PDIF Coaxial", "ADAT Optical" 440 + }; 441 + static int special_dig_in_iface_ctl_info(struct snd_kcontrol *kctl, 442 + struct snd_ctl_elem_info *einf) 443 + { 444 + einf->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED; 445 + einf->count = 1; 446 + einf->value.enumerated.items = ARRAY_SIZE(special_dig_iface_labels); 447 + 448 + if (einf->value.enumerated.item >= einf->value.enumerated.items) 449 + einf->value.enumerated.item = einf->value.enumerated.items - 1; 450 + 451 + strcpy(einf->value.enumerated.name, 452 + special_dig_iface_labels[einf->value.enumerated.item]); 453 + 454 + return 0; 455 + } 456 + static int special_dig_in_iface_ctl_get(struct snd_kcontrol *kctl, 457 + struct snd_ctl_elem_value *uval) 458 + { 459 + struct snd_bebob *bebob = snd_kcontrol_chip(kctl); 460 + struct special_params *params = bebob->maudio_special_quirk; 461 + unsigned int dig_in_iface; 462 + int err, val; 463 + 464 + mutex_lock(&bebob->mutex); 465 + 466 + err = avc_audio_get_selector(bebob->unit, 0x00, 0x04, 467 + &dig_in_iface); 468 + if (err < 0) { 469 + dev_err(&bebob->unit->device, 470 + "fail to get digital input interface: %d\n", err); 471 + goto end; 472 + } 473 + 474 + /* encoded id for user value */ 475 + val = (params->dig_in_fmt << 1) | (dig_in_iface & 0x01); 476 + 477 + /* for ADAT Optical */ 478 + if (val > 2) 479 + val = 2; 480 + 481 + uval->value.enumerated.item[0] = val; 482 + end: 483 + mutex_unlock(&bebob->mutex); 484 + return err; 485 + } 486 + static int special_dig_in_iface_ctl_set(struct snd_kcontrol *kctl, 487 + struct snd_ctl_elem_value *uval) 488 + { 489 + struct snd_bebob *bebob = snd_kcontrol_chip(kctl); 490 + struct special_params *params = bebob->maudio_special_quirk; 491 + unsigned int id, dig_in_fmt, dig_in_iface; 492 + int err; 493 + 494 + mutex_lock(&bebob->mutex); 495 + 496 + id = uval->value.enumerated.item[0]; 497 + 498 + /* decode user value */ 499 + dig_in_fmt = (id >> 1) & 0x01; 500 + dig_in_iface = id & 0x01; 501 + 502 + err = avc_maudio_set_special_clk(bebob, 503 + params->clk_src, 504 + dig_in_fmt, 505 + params->dig_out_fmt, 506 + params->clk_lock); 507 + if ((err < 0) || (params->dig_in_fmt > 0)) /* ADAT */ 508 + goto end; 509 + 510 + err = avc_audio_set_selector(bebob->unit, 0x00, 0x04, dig_in_iface); 511 + if (err < 0) 512 + dev_err(&bebob->unit->device, 513 + "fail to set digital input interface: %d\n", err); 514 + end: 515 + special_stream_formation_set(bebob); 516 + mutex_unlock(&bebob->mutex); 517 + return err; 518 + } 519 + static struct snd_kcontrol_new special_dig_in_iface_ctl = { 520 + .name = "Digital Input Interface", 521 + .iface = SNDRV_CTL_ELEM_IFACE_MIXER, 522 + .access = SNDRV_CTL_ELEM_ACCESS_READWRITE, 523 + .info = special_dig_in_iface_ctl_info, 524 + .get = special_dig_in_iface_ctl_get, 525 + .put = special_dig_in_iface_ctl_set 526 + }; 527 + 528 + static int special_dig_out_iface_ctl_info(struct snd_kcontrol *kctl, 529 + struct snd_ctl_elem_info *einf) 530 + { 531 + einf->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED; 532 + einf->count = 1; 533 + einf->value.enumerated.items = ARRAY_SIZE(special_dig_iface_labels) - 1; 534 + 535 + if (einf->value.enumerated.item >= einf->value.enumerated.items) 536 + einf->value.enumerated.item = einf->value.enumerated.items - 1; 537 + 538 + strcpy(einf->value.enumerated.name, 539 + special_dig_iface_labels[einf->value.enumerated.item + 1]); 540 + 541 + return 0; 542 + } 543 + static int special_dig_out_iface_ctl_get(struct snd_kcontrol *kctl, 544 + struct snd_ctl_elem_value *uval) 545 + { 546 + struct snd_bebob *bebob = snd_kcontrol_chip(kctl); 547 + struct special_params *params = bebob->maudio_special_quirk; 548 + mutex_lock(&bebob->mutex); 549 + uval->value.enumerated.item[0] = params->dig_out_fmt; 550 + mutex_unlock(&bebob->mutex); 551 + return 0; 552 + } 553 + static int special_dig_out_iface_ctl_set(struct snd_kcontrol *kctl, 554 + struct snd_ctl_elem_value *uval) 555 + { 556 + struct snd_bebob *bebob = snd_kcontrol_chip(kctl); 557 + struct special_params *params = bebob->maudio_special_quirk; 558 + unsigned int id; 559 + int err; 560 + 561 + mutex_lock(&bebob->mutex); 562 + 563 + id = uval->value.enumerated.item[0]; 564 + 565 + err = avc_maudio_set_special_clk(bebob, 566 + params->clk_src, 567 + params->dig_in_fmt, 568 + id, params->clk_lock); 569 + if (err >= 0) 570 + special_stream_formation_set(bebob); 571 + 572 + mutex_unlock(&bebob->mutex); 573 + return err; 574 + } 575 + static struct snd_kcontrol_new special_dig_out_iface_ctl = { 576 + .name = "Digital Output Interface", 577 + .iface = SNDRV_CTL_ELEM_IFACE_MIXER, 578 + .access = SNDRV_CTL_ELEM_ACCESS_READWRITE, 579 + .info = special_dig_out_iface_ctl_info, 580 + .get = special_dig_out_iface_ctl_get, 581 + .put = special_dig_out_iface_ctl_set 582 + }; 583 + 584 + static int add_special_controls(struct snd_bebob *bebob) 585 + { 586 + struct snd_kcontrol *kctl; 587 + struct special_params *params = bebob->maudio_special_quirk; 588 + int err; 589 + 590 + kctl = snd_ctl_new1(&special_clk_ctl, bebob); 591 + err = snd_ctl_add(bebob->card, kctl); 592 + if (err < 0) 593 + goto end; 594 + 595 + kctl = snd_ctl_new1(&special_sync_ctl, bebob); 596 + err = snd_ctl_add(bebob->card, kctl); 597 + if (err < 0) 598 + goto end; 599 + params->ctl_id_sync = &kctl->id; 600 + 601 + kctl = snd_ctl_new1(&special_dig_in_iface_ctl, bebob); 602 + err = snd_ctl_add(bebob->card, kctl); 603 + if (err < 0) 604 + goto end; 605 + 606 + kctl = snd_ctl_new1(&special_dig_out_iface_ctl, bebob); 607 + err = snd_ctl_add(bebob->card, kctl); 608 + end: 609 + return err; 610 + } 611 + 612 + /* Hardware metering for special firmware */ 613 + static char *const special_meter_labels[] = { 614 + ANA_IN, ANA_IN, ANA_IN, ANA_IN, 615 + SPDIF_IN, 616 + ADAT_IN, ADAT_IN, ADAT_IN, ADAT_IN, 617 + ANA_OUT, ANA_OUT, 618 + SPDIF_OUT, 619 + ADAT_OUT, ADAT_OUT, ADAT_OUT, ADAT_OUT, 620 + HP_OUT, HP_OUT, 621 + AUX_OUT 622 + }; 623 + static int 624 + special_meter_get(struct snd_bebob *bebob, u32 *target, unsigned int size) 625 + { 626 + u16 *buf; 627 + unsigned int i, c, channels; 628 + int err; 629 + 630 + channels = ARRAY_SIZE(special_meter_labels) * 2; 631 + if (size < channels * sizeof(u32)) 632 + return -EINVAL; 633 + 634 + /* omit last 4 bytes because it's clock info. */ 635 + buf = kmalloc(METER_SIZE_SPECIAL - 4, GFP_KERNEL); 636 + if (buf == NULL) 637 + return -ENOMEM; 638 + 639 + err = get_meter(bebob, (void *)buf, METER_SIZE_SPECIAL - 4); 640 + if (err < 0) 641 + goto end; 642 + 643 + /* Its format is u16 and some channels are unknown. */ 644 + i = 0; 645 + for (c = 2; c < channels + 2; c++) 646 + target[i++] = be16_to_cpu(buf[c]) << 16; 647 + end: 648 + kfree(buf); 649 + return err; 650 + } 651 + 652 + /* last 4 bytes are omitted because it's clock info. */ 653 + static char *const fw410_meter_labels[] = { 654 + ANA_IN, DIG_IN, 655 + ANA_OUT, ANA_OUT, ANA_OUT, ANA_OUT, DIG_OUT, 656 + HP_OUT 657 + }; 658 + static char *const audiophile_meter_labels[] = { 659 + ANA_IN, DIG_IN, 660 + ANA_OUT, ANA_OUT, DIG_OUT, 661 + HP_OUT, AUX_OUT, 662 + }; 663 + static char *const solo_meter_labels[] = { 664 + ANA_IN, DIG_IN, 665 + STRM_IN, STRM_IN, 666 + ANA_OUT, DIG_OUT 667 + }; 668 + 669 + /* no clock info */ 670 + static char *const ozonic_meter_labels[] = { 671 + ANA_IN, ANA_IN, 672 + STRM_IN, STRM_IN, 673 + ANA_OUT, ANA_OUT 674 + }; 675 + /* TODO: need testers. these positions are based on authour's assumption */ 676 + static char *const nrv10_meter_labels[] = { 677 + ANA_IN, ANA_IN, ANA_IN, ANA_IN, 678 + DIG_IN, 679 + ANA_OUT, ANA_OUT, ANA_OUT, ANA_OUT, 680 + DIG_IN 681 + }; 682 + static int 683 + normal_meter_get(struct snd_bebob *bebob, u32 *buf, unsigned int size) 684 + { 685 + struct snd_bebob_meter_spec *spec = bebob->spec->meter; 686 + unsigned int c, channels; 687 + int err; 688 + 689 + channels = spec->num * 2; 690 + if (size < channels * sizeof(u32)) 691 + return -EINVAL; 692 + 693 + err = get_meter(bebob, (void *)buf, size); 694 + if (err < 0) 695 + goto end; 696 + 697 + for (c = 0; c < channels; c++) 698 + be32_to_cpus(&buf[c]); 699 + 700 + /* swap stream channels because inverted */ 701 + if (spec->labels == solo_meter_labels) { 702 + swap(buf[4], buf[6]); 703 + swap(buf[5], buf[7]); 704 + } 705 + end: 706 + return err; 707 + } 708 + 709 + /* for special customized devices */ 710 + static struct snd_bebob_rate_spec special_rate_spec = { 711 + .get = &special_get_rate, 712 + .set = &special_set_rate, 713 + }; 714 + static struct snd_bebob_clock_spec special_clk_spec = { 715 + .num = ARRAY_SIZE(special_clk_labels), 716 + .labels = special_clk_labels, 717 + .get = &special_clk_get, 718 + }; 719 + static struct snd_bebob_meter_spec special_meter_spec = { 720 + .num = ARRAY_SIZE(special_meter_labels), 721 + .labels = special_meter_labels, 722 + .get = &special_meter_get 723 + }; 724 + struct snd_bebob_spec maudio_special_spec = { 725 + .clock = &special_clk_spec, 726 + .rate = &special_rate_spec, 727 + .meter = &special_meter_spec 728 + }; 729 + 730 + /* Firewire 410 specification */ 731 + static struct snd_bebob_rate_spec usual_rate_spec = { 732 + .get = &snd_bebob_stream_get_rate, 733 + .set = &snd_bebob_stream_set_rate, 734 + }; 735 + static struct snd_bebob_meter_spec fw410_meter_spec = { 736 + .num = ARRAY_SIZE(fw410_meter_labels), 737 + .labels = fw410_meter_labels, 738 + .get = &normal_meter_get 739 + }; 740 + struct snd_bebob_spec maudio_fw410_spec = { 741 + .clock = NULL, 742 + .rate = &usual_rate_spec, 743 + .meter = &fw410_meter_spec 744 + }; 745 + 746 + /* Firewire Audiophile specification */ 747 + static struct snd_bebob_meter_spec audiophile_meter_spec = { 748 + .num = ARRAY_SIZE(audiophile_meter_labels), 749 + .labels = audiophile_meter_labels, 750 + .get = &normal_meter_get 751 + }; 752 + struct snd_bebob_spec maudio_audiophile_spec = { 753 + .clock = NULL, 754 + .rate = &usual_rate_spec, 755 + .meter = &audiophile_meter_spec 756 + }; 757 + 758 + /* Firewire Solo specification */ 759 + static struct snd_bebob_meter_spec solo_meter_spec = { 760 + .num = ARRAY_SIZE(solo_meter_labels), 761 + .labels = solo_meter_labels, 762 + .get = &normal_meter_get 763 + }; 764 + struct snd_bebob_spec maudio_solo_spec = { 765 + .clock = NULL, 766 + .rate = &usual_rate_spec, 767 + .meter = &solo_meter_spec 768 + }; 769 + 770 + /* Ozonic specification */ 771 + static struct snd_bebob_meter_spec ozonic_meter_spec = { 772 + .num = ARRAY_SIZE(ozonic_meter_labels), 773 + .labels = ozonic_meter_labels, 774 + .get = &normal_meter_get 775 + }; 776 + struct snd_bebob_spec maudio_ozonic_spec = { 777 + .clock = NULL, 778 + .rate = &usual_rate_spec, 779 + .meter = &ozonic_meter_spec 780 + }; 781 + 782 + /* NRV10 specification */ 783 + static struct snd_bebob_meter_spec nrv10_meter_spec = { 784 + .num = ARRAY_SIZE(nrv10_meter_labels), 785 + .labels = nrv10_meter_labels, 786 + .get = &normal_meter_get 787 + }; 788 + struct snd_bebob_spec maudio_nrv10_spec = { 789 + .clock = NULL, 790 + .rate = &usual_rate_spec, 791 + .meter = &nrv10_meter_spec 792 + };
+168
sound/firewire/bebob/bebob_midi.c
··· 1 + /* 2 + * bebob_midi.c - a part of driver for BeBoB based devices 3 + * 4 + * Copyright (c) 2013-2014 Takashi Sakamoto 5 + * 6 + * Licensed under the terms of the GNU General Public License, version 2. 7 + */ 8 + 9 + #include "bebob.h" 10 + 11 + static int midi_capture_open(struct snd_rawmidi_substream *substream) 12 + { 13 + struct snd_bebob *bebob = substream->rmidi->private_data; 14 + int err; 15 + 16 + err = snd_bebob_stream_lock_try(bebob); 17 + if (err < 0) 18 + goto end; 19 + 20 + atomic_inc(&bebob->capture_substreams); 21 + err = snd_bebob_stream_start_duplex(bebob, 0); 22 + if (err < 0) 23 + snd_bebob_stream_lock_release(bebob); 24 + end: 25 + return err; 26 + } 27 + 28 + static int midi_playback_open(struct snd_rawmidi_substream *substream) 29 + { 30 + struct snd_bebob *bebob = substream->rmidi->private_data; 31 + int err; 32 + 33 + err = snd_bebob_stream_lock_try(bebob); 34 + if (err < 0) 35 + goto end; 36 + 37 + atomic_inc(&bebob->playback_substreams); 38 + err = snd_bebob_stream_start_duplex(bebob, 0); 39 + if (err < 0) 40 + snd_bebob_stream_lock_release(bebob); 41 + end: 42 + return err; 43 + } 44 + 45 + static int midi_capture_close(struct snd_rawmidi_substream *substream) 46 + { 47 + struct snd_bebob *bebob = substream->rmidi->private_data; 48 + 49 + atomic_dec(&bebob->capture_substreams); 50 + snd_bebob_stream_stop_duplex(bebob); 51 + 52 + snd_bebob_stream_lock_release(bebob); 53 + return 0; 54 + } 55 + 56 + static int midi_playback_close(struct snd_rawmidi_substream *substream) 57 + { 58 + struct snd_bebob *bebob = substream->rmidi->private_data; 59 + 60 + atomic_dec(&bebob->playback_substreams); 61 + snd_bebob_stream_stop_duplex(bebob); 62 + 63 + snd_bebob_stream_lock_release(bebob); 64 + return 0; 65 + } 66 + 67 + static void midi_capture_trigger(struct snd_rawmidi_substream *substrm, int up) 68 + { 69 + struct snd_bebob *bebob = substrm->rmidi->private_data; 70 + unsigned long flags; 71 + 72 + spin_lock_irqsave(&bebob->lock, flags); 73 + 74 + if (up) 75 + amdtp_stream_midi_trigger(&bebob->tx_stream, 76 + substrm->number, substrm); 77 + else 78 + amdtp_stream_midi_trigger(&bebob->tx_stream, 79 + substrm->number, NULL); 80 + 81 + spin_unlock_irqrestore(&bebob->lock, flags); 82 + } 83 + 84 + static void midi_playback_trigger(struct snd_rawmidi_substream *substrm, int up) 85 + { 86 + struct snd_bebob *bebob = substrm->rmidi->private_data; 87 + unsigned long flags; 88 + 89 + spin_lock_irqsave(&bebob->lock, flags); 90 + 91 + if (up) 92 + amdtp_stream_midi_trigger(&bebob->rx_stream, 93 + substrm->number, substrm); 94 + else 95 + amdtp_stream_midi_trigger(&bebob->rx_stream, 96 + substrm->number, NULL); 97 + 98 + spin_unlock_irqrestore(&bebob->lock, flags); 99 + } 100 + 101 + static struct snd_rawmidi_ops midi_capture_ops = { 102 + .open = midi_capture_open, 103 + .close = midi_capture_close, 104 + .trigger = midi_capture_trigger, 105 + }; 106 + 107 + static struct snd_rawmidi_ops midi_playback_ops = { 108 + .open = midi_playback_open, 109 + .close = midi_playback_close, 110 + .trigger = midi_playback_trigger, 111 + }; 112 + 113 + static void set_midi_substream_names(struct snd_bebob *bebob, 114 + struct snd_rawmidi_str *str) 115 + { 116 + struct snd_rawmidi_substream *subs; 117 + 118 + list_for_each_entry(subs, &str->substreams, list) { 119 + snprintf(subs->name, sizeof(subs->name), 120 + "%s MIDI %d", 121 + bebob->card->shortname, subs->number + 1); 122 + } 123 + } 124 + 125 + int snd_bebob_create_midi_devices(struct snd_bebob *bebob) 126 + { 127 + struct snd_rawmidi *rmidi; 128 + struct snd_rawmidi_str *str; 129 + int err; 130 + 131 + /* create midi ports */ 132 + err = snd_rawmidi_new(bebob->card, bebob->card->driver, 0, 133 + bebob->midi_output_ports, bebob->midi_input_ports, 134 + &rmidi); 135 + if (err < 0) 136 + return err; 137 + 138 + snprintf(rmidi->name, sizeof(rmidi->name), 139 + "%s MIDI", bebob->card->shortname); 140 + rmidi->private_data = bebob; 141 + 142 + if (bebob->midi_input_ports > 0) { 143 + rmidi->info_flags |= SNDRV_RAWMIDI_INFO_INPUT; 144 + 145 + snd_rawmidi_set_ops(rmidi, SNDRV_RAWMIDI_STREAM_INPUT, 146 + &midi_capture_ops); 147 + 148 + str = &rmidi->streams[SNDRV_RAWMIDI_STREAM_INPUT]; 149 + 150 + set_midi_substream_names(bebob, str); 151 + } 152 + 153 + if (bebob->midi_output_ports > 0) { 154 + rmidi->info_flags |= SNDRV_RAWMIDI_INFO_OUTPUT; 155 + 156 + snd_rawmidi_set_ops(rmidi, SNDRV_RAWMIDI_STREAM_OUTPUT, 157 + &midi_playback_ops); 158 + 159 + str = &rmidi->streams[SNDRV_RAWMIDI_STREAM_OUTPUT]; 160 + 161 + set_midi_substream_names(bebob, str); 162 + } 163 + 164 + if ((bebob->midi_output_ports > 0) && (bebob->midi_input_ports > 0)) 165 + rmidi->info_flags |= SNDRV_RAWMIDI_INFO_DUPLEX; 166 + 167 + return 0; 168 + }
+378
sound/firewire/bebob/bebob_pcm.c
··· 1 + /* 2 + * bebob_pcm.c - a part of driver for BeBoB based devices 3 + * 4 + * Copyright (c) 2013-2014 Takashi Sakamoto 5 + * 6 + * Licensed under the terms of the GNU General Public License, version 2. 7 + */ 8 + 9 + #include "./bebob.h" 10 + 11 + static int 12 + hw_rule_rate(struct snd_pcm_hw_params *params, struct snd_pcm_hw_rule *rule) 13 + { 14 + struct snd_bebob_stream_formation *formations = rule->private; 15 + struct snd_interval *r = 16 + hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE); 17 + const struct snd_interval *c = 18 + hw_param_interval_c(params, SNDRV_PCM_HW_PARAM_CHANNELS); 19 + struct snd_interval t = { 20 + .min = UINT_MAX, .max = 0, .integer = 1 21 + }; 22 + unsigned int i; 23 + 24 + for (i = 0; i < SND_BEBOB_STRM_FMT_ENTRIES; i++) { 25 + /* entry is invalid */ 26 + if (formations[i].pcm == 0) 27 + continue; 28 + 29 + if (!snd_interval_test(c, formations[i].pcm)) 30 + continue; 31 + 32 + t.min = min(t.min, snd_bebob_rate_table[i]); 33 + t.max = max(t.max, snd_bebob_rate_table[i]); 34 + 35 + } 36 + return snd_interval_refine(r, &t); 37 + } 38 + 39 + static int 40 + hw_rule_channels(struct snd_pcm_hw_params *params, struct snd_pcm_hw_rule *rule) 41 + { 42 + struct snd_bebob_stream_formation *formations = rule->private; 43 + struct snd_interval *c = 44 + hw_param_interval(params, SNDRV_PCM_HW_PARAM_CHANNELS); 45 + const struct snd_interval *r = 46 + hw_param_interval_c(params, SNDRV_PCM_HW_PARAM_RATE); 47 + struct snd_interval t = { 48 + .min = UINT_MAX, .max = 0, .integer = 1 49 + }; 50 + 51 + unsigned int i; 52 + 53 + for (i = 0; i < SND_BEBOB_STRM_FMT_ENTRIES; i++) { 54 + /* entry is invalid */ 55 + if (formations[i].pcm == 0) 56 + continue; 57 + 58 + if (!snd_interval_test(r, snd_bebob_rate_table[i])) 59 + continue; 60 + 61 + t.min = min(t.min, formations[i].pcm); 62 + t.max = max(t.max, formations[i].pcm); 63 + } 64 + 65 + return snd_interval_refine(c, &t); 66 + } 67 + 68 + static void 69 + limit_channels_and_rates(struct snd_pcm_hardware *hw, 70 + struct snd_bebob_stream_formation *formations) 71 + { 72 + unsigned int i; 73 + 74 + hw->channels_min = UINT_MAX; 75 + hw->channels_max = 0; 76 + 77 + hw->rate_min = UINT_MAX; 78 + hw->rate_max = 0; 79 + hw->rates = 0; 80 + 81 + for (i = 0; i < SND_BEBOB_STRM_FMT_ENTRIES; i++) { 82 + /* entry has no PCM channels */ 83 + if (formations[i].pcm == 0) 84 + continue; 85 + 86 + hw->channels_min = min(hw->channels_min, formations[i].pcm); 87 + hw->channels_max = max(hw->channels_max, formations[i].pcm); 88 + 89 + hw->rate_min = min(hw->rate_min, snd_bebob_rate_table[i]); 90 + hw->rate_max = max(hw->rate_max, snd_bebob_rate_table[i]); 91 + hw->rates |= snd_pcm_rate_to_rate_bit(snd_bebob_rate_table[i]); 92 + } 93 + } 94 + 95 + static void 96 + limit_period_and_buffer(struct snd_pcm_hardware *hw) 97 + { 98 + hw->periods_min = 2; /* SNDRV_PCM_INFO_BATCH */ 99 + hw->periods_max = UINT_MAX; 100 + 101 + hw->period_bytes_min = 4 * hw->channels_max; /* bytes for a frame */ 102 + 103 + /* Just to prevent from allocating much pages. */ 104 + hw->period_bytes_max = hw->period_bytes_min * 2048; 105 + hw->buffer_bytes_max = hw->period_bytes_max * hw->periods_min; 106 + } 107 + 108 + static int 109 + pcm_init_hw_params(struct snd_bebob *bebob, 110 + struct snd_pcm_substream *substream) 111 + { 112 + struct snd_pcm_runtime *runtime = substream->runtime; 113 + struct amdtp_stream *s; 114 + struct snd_bebob_stream_formation *formations; 115 + int err; 116 + 117 + runtime->hw.info = SNDRV_PCM_INFO_BATCH | 118 + SNDRV_PCM_INFO_BLOCK_TRANSFER | 119 + SNDRV_PCM_INFO_INTERLEAVED | 120 + SNDRV_PCM_INFO_JOINT_DUPLEX | 121 + SNDRV_PCM_INFO_MMAP | 122 + SNDRV_PCM_INFO_MMAP_VALID; 123 + 124 + if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) { 125 + runtime->hw.formats = AMDTP_IN_PCM_FORMAT_BITS; 126 + s = &bebob->tx_stream; 127 + formations = bebob->tx_stream_formations; 128 + } else { 129 + runtime->hw.formats = AMDTP_OUT_PCM_FORMAT_BITS; 130 + s = &bebob->rx_stream; 131 + formations = bebob->rx_stream_formations; 132 + } 133 + 134 + limit_channels_and_rates(&runtime->hw, formations); 135 + limit_period_and_buffer(&runtime->hw); 136 + 137 + err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_CHANNELS, 138 + hw_rule_channels, formations, 139 + SNDRV_PCM_HW_PARAM_RATE, -1); 140 + if (err < 0) 141 + goto end; 142 + 143 + err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_RATE, 144 + hw_rule_rate, formations, 145 + SNDRV_PCM_HW_PARAM_CHANNELS, -1); 146 + if (err < 0) 147 + goto end; 148 + 149 + err = amdtp_stream_add_pcm_hw_constraints(s, runtime); 150 + end: 151 + return err; 152 + } 153 + 154 + static int 155 + pcm_open(struct snd_pcm_substream *substream) 156 + { 157 + struct snd_bebob *bebob = substream->private_data; 158 + struct snd_bebob_rate_spec *spec = bebob->spec->rate; 159 + unsigned int sampling_rate; 160 + bool internal; 161 + int err; 162 + 163 + err = snd_bebob_stream_lock_try(bebob); 164 + if (err < 0) 165 + goto end; 166 + 167 + err = pcm_init_hw_params(bebob, substream); 168 + if (err < 0) 169 + goto err_locked; 170 + 171 + err = snd_bebob_stream_check_internal_clock(bebob, &internal); 172 + if (err < 0) 173 + goto err_locked; 174 + 175 + /* 176 + * When source of clock is internal or any PCM stream are running, 177 + * the available sampling rate is limited at current sampling rate. 178 + */ 179 + if (!internal || 180 + amdtp_stream_pcm_running(&bebob->tx_stream) || 181 + amdtp_stream_pcm_running(&bebob->rx_stream)) { 182 + err = spec->get(bebob, &sampling_rate); 183 + if (err < 0) { 184 + dev_err(&bebob->unit->device, 185 + "fail to get sampling rate: %d\n", err); 186 + goto err_locked; 187 + } 188 + 189 + substream->runtime->hw.rate_min = sampling_rate; 190 + substream->runtime->hw.rate_max = sampling_rate; 191 + } 192 + 193 + snd_pcm_set_sync(substream); 194 + end: 195 + return err; 196 + err_locked: 197 + snd_bebob_stream_lock_release(bebob); 198 + return err; 199 + } 200 + 201 + static int 202 + pcm_close(struct snd_pcm_substream *substream) 203 + { 204 + struct snd_bebob *bebob = substream->private_data; 205 + snd_bebob_stream_lock_release(bebob); 206 + return 0; 207 + } 208 + 209 + static int 210 + pcm_capture_hw_params(struct snd_pcm_substream *substream, 211 + struct snd_pcm_hw_params *hw_params) 212 + { 213 + struct snd_bebob *bebob = substream->private_data; 214 + 215 + if (substream->runtime->status->state == SNDRV_PCM_STATE_OPEN) 216 + atomic_inc(&bebob->capture_substreams); 217 + amdtp_stream_set_pcm_format(&bebob->tx_stream, 218 + params_format(hw_params)); 219 + return snd_pcm_lib_alloc_vmalloc_buffer(substream, 220 + params_buffer_bytes(hw_params)); 221 + } 222 + static int 223 + pcm_playback_hw_params(struct snd_pcm_substream *substream, 224 + struct snd_pcm_hw_params *hw_params) 225 + { 226 + struct snd_bebob *bebob = substream->private_data; 227 + 228 + if (substream->runtime->status->state == SNDRV_PCM_STATE_OPEN) 229 + atomic_inc(&bebob->playback_substreams); 230 + amdtp_stream_set_pcm_format(&bebob->rx_stream, 231 + params_format(hw_params)); 232 + return snd_pcm_lib_alloc_vmalloc_buffer(substream, 233 + params_buffer_bytes(hw_params)); 234 + } 235 + 236 + static int 237 + pcm_capture_hw_free(struct snd_pcm_substream *substream) 238 + { 239 + struct snd_bebob *bebob = substream->private_data; 240 + 241 + if (substream->runtime->status->state != SNDRV_PCM_STATE_OPEN) 242 + atomic_dec(&bebob->capture_substreams); 243 + 244 + snd_bebob_stream_stop_duplex(bebob); 245 + 246 + return snd_pcm_lib_free_vmalloc_buffer(substream); 247 + } 248 + static int 249 + pcm_playback_hw_free(struct snd_pcm_substream *substream) 250 + { 251 + struct snd_bebob *bebob = substream->private_data; 252 + 253 + if (substream->runtime->status->state != SNDRV_PCM_STATE_OPEN) 254 + atomic_dec(&bebob->playback_substreams); 255 + 256 + snd_bebob_stream_stop_duplex(bebob); 257 + 258 + return snd_pcm_lib_free_vmalloc_buffer(substream); 259 + } 260 + 261 + static int 262 + pcm_capture_prepare(struct snd_pcm_substream *substream) 263 + { 264 + struct snd_bebob *bebob = substream->private_data; 265 + struct snd_pcm_runtime *runtime = substream->runtime; 266 + int err; 267 + 268 + err = snd_bebob_stream_start_duplex(bebob, runtime->rate); 269 + if (err >= 0) 270 + amdtp_stream_pcm_prepare(&bebob->tx_stream); 271 + 272 + return err; 273 + } 274 + static int 275 + pcm_playback_prepare(struct snd_pcm_substream *substream) 276 + { 277 + struct snd_bebob *bebob = substream->private_data; 278 + struct snd_pcm_runtime *runtime = substream->runtime; 279 + int err; 280 + 281 + err = snd_bebob_stream_start_duplex(bebob, runtime->rate); 282 + if (err >= 0) 283 + amdtp_stream_pcm_prepare(&bebob->rx_stream); 284 + 285 + return err; 286 + } 287 + 288 + static int 289 + pcm_capture_trigger(struct snd_pcm_substream *substream, int cmd) 290 + { 291 + struct snd_bebob *bebob = substream->private_data; 292 + 293 + switch (cmd) { 294 + case SNDRV_PCM_TRIGGER_START: 295 + amdtp_stream_pcm_trigger(&bebob->tx_stream, substream); 296 + break; 297 + case SNDRV_PCM_TRIGGER_STOP: 298 + amdtp_stream_pcm_trigger(&bebob->tx_stream, NULL); 299 + break; 300 + default: 301 + return -EINVAL; 302 + } 303 + 304 + return 0; 305 + } 306 + static int 307 + pcm_playback_trigger(struct snd_pcm_substream *substream, int cmd) 308 + { 309 + struct snd_bebob *bebob = substream->private_data; 310 + 311 + switch (cmd) { 312 + case SNDRV_PCM_TRIGGER_START: 313 + amdtp_stream_pcm_trigger(&bebob->rx_stream, substream); 314 + break; 315 + case SNDRV_PCM_TRIGGER_STOP: 316 + amdtp_stream_pcm_trigger(&bebob->rx_stream, NULL); 317 + break; 318 + default: 319 + return -EINVAL; 320 + } 321 + 322 + return 0; 323 + } 324 + 325 + static snd_pcm_uframes_t 326 + pcm_capture_pointer(struct snd_pcm_substream *sbstrm) 327 + { 328 + struct snd_bebob *bebob = sbstrm->private_data; 329 + return amdtp_stream_pcm_pointer(&bebob->tx_stream); 330 + } 331 + static snd_pcm_uframes_t 332 + pcm_playback_pointer(struct snd_pcm_substream *sbstrm) 333 + { 334 + struct snd_bebob *bebob = sbstrm->private_data; 335 + return amdtp_stream_pcm_pointer(&bebob->rx_stream); 336 + } 337 + 338 + static const struct snd_pcm_ops pcm_capture_ops = { 339 + .open = pcm_open, 340 + .close = pcm_close, 341 + .ioctl = snd_pcm_lib_ioctl, 342 + .hw_params = pcm_capture_hw_params, 343 + .hw_free = pcm_capture_hw_free, 344 + .prepare = pcm_capture_prepare, 345 + .trigger = pcm_capture_trigger, 346 + .pointer = pcm_capture_pointer, 347 + .page = snd_pcm_lib_get_vmalloc_page, 348 + }; 349 + static const struct snd_pcm_ops pcm_playback_ops = { 350 + .open = pcm_open, 351 + .close = pcm_close, 352 + .ioctl = snd_pcm_lib_ioctl, 353 + .hw_params = pcm_playback_hw_params, 354 + .hw_free = pcm_playback_hw_free, 355 + .prepare = pcm_playback_prepare, 356 + .trigger = pcm_playback_trigger, 357 + .pointer = pcm_playback_pointer, 358 + .page = snd_pcm_lib_get_vmalloc_page, 359 + .mmap = snd_pcm_lib_mmap_vmalloc, 360 + }; 361 + 362 + int snd_bebob_create_pcm_devices(struct snd_bebob *bebob) 363 + { 364 + struct snd_pcm *pcm; 365 + int err; 366 + 367 + err = snd_pcm_new(bebob->card, bebob->card->driver, 0, 1, 1, &pcm); 368 + if (err < 0) 369 + goto end; 370 + 371 + pcm->private_data = bebob; 372 + snprintf(pcm->name, sizeof(pcm->name), 373 + "%s PCM", bebob->card->shortname); 374 + snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &pcm_playback_ops); 375 + snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &pcm_capture_ops); 376 + end: 377 + return err; 378 + }
+196
sound/firewire/bebob/bebob_proc.c
··· 1 + /* 2 + * bebob_proc.c - a part of driver for BeBoB based devices 3 + * 4 + * Copyright (c) 2013-2014 Takashi Sakamoto 5 + * 6 + * Licensed under the terms of the GNU General Public License, version 2. 7 + */ 8 + 9 + #include "./bebob.h" 10 + 11 + /* contents of information register */ 12 + struct hw_info { 13 + u64 manufacturer; 14 + u32 protocol_ver; 15 + u32 bld_ver; 16 + u32 guid[2]; 17 + u32 model_id; 18 + u32 model_rev; 19 + u64 fw_date; 20 + u64 fw_time; 21 + u32 fw_id; 22 + u32 fw_ver; 23 + u32 base_addr; 24 + u32 max_size; 25 + u64 bld_date; 26 + u64 bld_time; 27 + /* may not used in product 28 + u64 dbg_date; 29 + u64 dbg_time; 30 + u32 dbg_id; 31 + u32 dbg_version; 32 + */ 33 + } __packed; 34 + 35 + static void 36 + proc_read_hw_info(struct snd_info_entry *entry, 37 + struct snd_info_buffer *buffer) 38 + { 39 + struct snd_bebob *bebob = entry->private_data; 40 + struct hw_info *info; 41 + 42 + info = kzalloc(sizeof(struct hw_info), GFP_KERNEL); 43 + if (info == NULL) 44 + return; 45 + 46 + if (snd_bebob_read_block(bebob->unit, 0, 47 + info, sizeof(struct hw_info)) < 0) 48 + goto end; 49 + 50 + snd_iprintf(buffer, "Manufacturer:\t%.8s\n", 51 + (char *)&info->manufacturer); 52 + snd_iprintf(buffer, "Protocol Ver:\t%d\n", info->protocol_ver); 53 + snd_iprintf(buffer, "Build Ver:\t%d\n", info->bld_ver); 54 + snd_iprintf(buffer, "GUID:\t\t0x%.8X%.8X\n", 55 + info->guid[0], info->guid[1]); 56 + snd_iprintf(buffer, "Model ID:\t0x%02X\n", info->model_id); 57 + snd_iprintf(buffer, "Model Rev:\t%d\n", info->model_rev); 58 + snd_iprintf(buffer, "Firmware Date:\t%.8s\n", (char *)&info->fw_date); 59 + snd_iprintf(buffer, "Firmware Time:\t%.8s\n", (char *)&info->fw_time); 60 + snd_iprintf(buffer, "Firmware ID:\t0x%X\n", info->fw_id); 61 + snd_iprintf(buffer, "Firmware Ver:\t%d\n", info->fw_ver); 62 + snd_iprintf(buffer, "Base Addr:\t0x%X\n", info->base_addr); 63 + snd_iprintf(buffer, "Max Size:\t%d\n", info->max_size); 64 + snd_iprintf(buffer, "Loader Date:\t%.8s\n", (char *)&info->bld_date); 65 + snd_iprintf(buffer, "Loader Time:\t%.8s\n", (char *)&info->bld_time); 66 + 67 + end: 68 + kfree(info); 69 + } 70 + 71 + static void 72 + proc_read_meters(struct snd_info_entry *entry, 73 + struct snd_info_buffer *buffer) 74 + { 75 + struct snd_bebob *bebob = entry->private_data; 76 + struct snd_bebob_meter_spec *spec = bebob->spec->meter; 77 + u32 *buf; 78 + unsigned int i, c, channels, size; 79 + 80 + if (spec == NULL) 81 + return; 82 + 83 + channels = spec->num * 2; 84 + size = channels * sizeof(u32); 85 + buf = kmalloc(size, GFP_KERNEL); 86 + if (buf == NULL) 87 + return; 88 + 89 + if (spec->get(bebob, buf, size) < 0) 90 + goto end; 91 + 92 + for (i = 0, c = 1; i < channels; i++) { 93 + snd_iprintf(buffer, "%s %d:\t%d\n", 94 + spec->labels[i / 2], c++, buf[i]); 95 + if ((i + 1 < channels - 1) && 96 + (strcmp(spec->labels[i / 2], 97 + spec->labels[(i + 1) / 2]) != 0)) 98 + c = 1; 99 + } 100 + end: 101 + kfree(buf); 102 + } 103 + 104 + static void 105 + proc_read_formation(struct snd_info_entry *entry, 106 + struct snd_info_buffer *buffer) 107 + { 108 + struct snd_bebob *bebob = entry->private_data; 109 + struct snd_bebob_stream_formation *formation; 110 + unsigned int i; 111 + 112 + snd_iprintf(buffer, "Output Stream from device:\n"); 113 + snd_iprintf(buffer, "\tRate\tPCM\tMIDI\n"); 114 + formation = bebob->tx_stream_formations; 115 + for (i = 0; i < SND_BEBOB_STRM_FMT_ENTRIES; i++) { 116 + snd_iprintf(buffer, 117 + "\t%d\t%d\t%d\n", snd_bebob_rate_table[i], 118 + formation[i].pcm, formation[i].midi); 119 + } 120 + 121 + snd_iprintf(buffer, "Input Stream to device:\n"); 122 + snd_iprintf(buffer, "\tRate\tPCM\tMIDI\n"); 123 + formation = bebob->rx_stream_formations; 124 + for (i = 0; i < SND_BEBOB_STRM_FMT_ENTRIES; i++) { 125 + snd_iprintf(buffer, 126 + "\t%d\t%d\t%d\n", snd_bebob_rate_table[i], 127 + formation[i].pcm, formation[i].midi); 128 + } 129 + } 130 + 131 + static void 132 + proc_read_clock(struct snd_info_entry *entry, 133 + struct snd_info_buffer *buffer) 134 + { 135 + struct snd_bebob *bebob = entry->private_data; 136 + struct snd_bebob_rate_spec *rate_spec = bebob->spec->rate; 137 + struct snd_bebob_clock_spec *clk_spec = bebob->spec->clock; 138 + unsigned int rate, id; 139 + bool internal; 140 + 141 + if (rate_spec->get(bebob, &rate) >= 0) 142 + snd_iprintf(buffer, "Sampling rate: %d\n", rate); 143 + 144 + if (clk_spec) { 145 + if (clk_spec->get(bebob, &id) >= 0) 146 + snd_iprintf(buffer, "Clock Source: %s\n", 147 + clk_spec->labels[id]); 148 + } else { 149 + if (snd_bebob_stream_check_internal_clock(bebob, 150 + &internal) >= 0) 151 + snd_iprintf(buffer, "Clock Source: %s (MSU-dest: %d)\n", 152 + (internal) ? "Internal" : "External", 153 + bebob->sync_input_plug); 154 + } 155 + } 156 + 157 + static void 158 + add_node(struct snd_bebob *bebob, struct snd_info_entry *root, const char *name, 159 + void (*op)(struct snd_info_entry *e, struct snd_info_buffer *b)) 160 + { 161 + struct snd_info_entry *entry; 162 + 163 + entry = snd_info_create_card_entry(bebob->card, name, root); 164 + if (entry == NULL) 165 + return; 166 + 167 + snd_info_set_text_ops(entry, bebob, op); 168 + if (snd_info_register(entry) < 0) 169 + snd_info_free_entry(entry); 170 + } 171 + 172 + void snd_bebob_proc_init(struct snd_bebob *bebob) 173 + { 174 + struct snd_info_entry *root; 175 + 176 + /* 177 + * All nodes are automatically removed at snd_card_disconnect(), 178 + * by following to link list. 179 + */ 180 + root = snd_info_create_card_entry(bebob->card, "firewire", 181 + bebob->card->proc_root); 182 + if (root == NULL) 183 + return; 184 + root->mode = S_IFDIR | S_IRUGO | S_IXUGO; 185 + if (snd_info_register(root) < 0) { 186 + snd_info_free_entry(root); 187 + return; 188 + } 189 + 190 + add_node(bebob, root, "clock", proc_read_clock); 191 + add_node(bebob, root, "firmware", proc_read_hw_info); 192 + add_node(bebob, root, "formation", proc_read_formation); 193 + 194 + if (bebob->spec->meter != NULL) 195 + add_node(bebob, root, "meter", proc_read_meters); 196 + }
+1021
sound/firewire/bebob/bebob_stream.c
··· 1 + /* 2 + * bebob_stream.c - a part of driver for BeBoB based devices 3 + * 4 + * Copyright (c) 2013-2014 Takashi Sakamoto 5 + * 6 + * Licensed under the terms of the GNU General Public License, version 2. 7 + */ 8 + 9 + #include "./bebob.h" 10 + 11 + #define CALLBACK_TIMEOUT 1000 12 + #define FW_ISO_RESOURCE_DELAY 1000 13 + 14 + /* 15 + * NOTE; 16 + * For BeBoB streams, Both of input and output CMP connection are important. 17 + * 18 + * For most devices, each CMP connection starts to transmit/receive a 19 + * corresponding stream. But for a few devices, both of CMP connection needs 20 + * to start transmitting stream. An example is 'M-Audio Firewire 410'. 21 + */ 22 + 23 + /* 128 is an arbitrary length but it seems to be enough */ 24 + #define FORMAT_MAXIMUM_LENGTH 128 25 + 26 + const unsigned int snd_bebob_rate_table[SND_BEBOB_STRM_FMT_ENTRIES] = { 27 + [0] = 32000, 28 + [1] = 44100, 29 + [2] = 48000, 30 + [3] = 88200, 31 + [4] = 96000, 32 + [5] = 176400, 33 + [6] = 192000, 34 + }; 35 + 36 + /* 37 + * See: Table 51: Extended Stream Format Info ‘Sampling Frequency’ 38 + * in Additional AVC commands (Nov 2003, BridgeCo) 39 + */ 40 + static const unsigned int bridgeco_freq_table[] = { 41 + [0] = 0x02, 42 + [1] = 0x03, 43 + [2] = 0x04, 44 + [3] = 0x0a, 45 + [4] = 0x05, 46 + [5] = 0x06, 47 + [6] = 0x07, 48 + }; 49 + 50 + static unsigned int 51 + get_formation_index(unsigned int rate) 52 + { 53 + unsigned int i; 54 + 55 + for (i = 0; i < ARRAY_SIZE(snd_bebob_rate_table); i++) { 56 + if (snd_bebob_rate_table[i] == rate) 57 + return i; 58 + } 59 + return -EINVAL; 60 + } 61 + 62 + int 63 + snd_bebob_stream_get_rate(struct snd_bebob *bebob, unsigned int *curr_rate) 64 + { 65 + unsigned int tx_rate, rx_rate, trials; 66 + int err; 67 + 68 + trials = 0; 69 + do { 70 + err = avc_general_get_sig_fmt(bebob->unit, &tx_rate, 71 + AVC_GENERAL_PLUG_DIR_OUT, 0); 72 + } while (err == -EAGAIN && ++trials < 3); 73 + if (err < 0) 74 + goto end; 75 + 76 + trials = 0; 77 + do { 78 + err = avc_general_get_sig_fmt(bebob->unit, &rx_rate, 79 + AVC_GENERAL_PLUG_DIR_IN, 0); 80 + } while (err == -EAGAIN && ++trials < 3); 81 + if (err < 0) 82 + goto end; 83 + 84 + *curr_rate = rx_rate; 85 + if (rx_rate == tx_rate) 86 + goto end; 87 + 88 + /* synchronize receive stream rate to transmit stream rate */ 89 + err = avc_general_set_sig_fmt(bebob->unit, rx_rate, 90 + AVC_GENERAL_PLUG_DIR_IN, 0); 91 + end: 92 + return err; 93 + } 94 + 95 + int 96 + snd_bebob_stream_set_rate(struct snd_bebob *bebob, unsigned int rate) 97 + { 98 + int err; 99 + 100 + err = avc_general_set_sig_fmt(bebob->unit, rate, 101 + AVC_GENERAL_PLUG_DIR_OUT, 0); 102 + if (err < 0) 103 + goto end; 104 + 105 + err = avc_general_set_sig_fmt(bebob->unit, rate, 106 + AVC_GENERAL_PLUG_DIR_IN, 0); 107 + if (err < 0) 108 + goto end; 109 + 110 + /* 111 + * Some devices need a bit time for transition. 112 + * 300msec is got by some experiments. 113 + */ 114 + msleep(300); 115 + end: 116 + return err; 117 + } 118 + 119 + int 120 + snd_bebob_stream_check_internal_clock(struct snd_bebob *bebob, bool *internal) 121 + { 122 + struct snd_bebob_clock_spec *clk_spec = bebob->spec->clock; 123 + u8 addr[AVC_BRIDGECO_ADDR_BYTES], input[7]; 124 + unsigned int id; 125 + int err = 0; 126 + 127 + *internal = false; 128 + 129 + /* 1.The device has its own operation to switch source of clock */ 130 + if (clk_spec) { 131 + err = clk_spec->get(bebob, &id); 132 + if (err < 0) 133 + dev_err(&bebob->unit->device, 134 + "fail to get clock source: %d\n", err); 135 + else if (strncmp(clk_spec->labels[id], SND_BEBOB_CLOCK_INTERNAL, 136 + strlen(SND_BEBOB_CLOCK_INTERNAL)) == 0) 137 + *internal = true; 138 + goto end; 139 + } 140 + 141 + /* 142 + * 2.The device don't support to switch source of clock then assumed 143 + * to use internal clock always 144 + */ 145 + if (bebob->sync_input_plug < 0) { 146 + *internal = true; 147 + goto end; 148 + } 149 + 150 + /* 151 + * 3.The device supports to switch source of clock by an usual way. 152 + * Let's check input for 'Music Sub Unit Sync Input' plug. 153 + */ 154 + avc_bridgeco_fill_msu_addr(addr, AVC_BRIDGECO_PLUG_DIR_IN, 155 + bebob->sync_input_plug); 156 + err = avc_bridgeco_get_plug_input(bebob->unit, addr, input); 157 + if (err < 0) { 158 + dev_err(&bebob->unit->device, 159 + "fail to get an input for MSU in plug %d: %d\n", 160 + bebob->sync_input_plug, err); 161 + goto end; 162 + } 163 + 164 + /* 165 + * If there are no input plugs, all of fields are 0xff. 166 + * Here check the first field. This field is used for direction. 167 + */ 168 + if (input[0] == 0xff) { 169 + *internal = true; 170 + goto end; 171 + } 172 + 173 + /* 174 + * If source of clock is internal CSR, Music Sub Unit Sync Input is 175 + * a destination of Music Sub Unit Sync Output. 176 + */ 177 + *internal = ((input[0] == AVC_BRIDGECO_PLUG_DIR_OUT) && 178 + (input[1] == AVC_BRIDGECO_PLUG_MODE_SUBUNIT) && 179 + (input[2] == 0x0c) && 180 + (input[3] == 0x00)); 181 + end: 182 + return err; 183 + } 184 + 185 + static unsigned int 186 + map_data_channels(struct snd_bebob *bebob, struct amdtp_stream *s) 187 + { 188 + unsigned int sec, sections, ch, channels; 189 + unsigned int pcm, midi, location; 190 + unsigned int stm_pos, sec_loc, pos; 191 + u8 *buf, addr[AVC_BRIDGECO_ADDR_BYTES], type; 192 + enum avc_bridgeco_plug_dir dir; 193 + int err; 194 + 195 + /* 196 + * The length of return value of this command cannot be expected. Here 197 + * use the maximum length of FCP. 198 + */ 199 + buf = kzalloc(256, GFP_KERNEL); 200 + if (buf == NULL) 201 + return -ENOMEM; 202 + 203 + if (s == &bebob->tx_stream) 204 + dir = AVC_BRIDGECO_PLUG_DIR_OUT; 205 + else 206 + dir = AVC_BRIDGECO_PLUG_DIR_IN; 207 + 208 + avc_bridgeco_fill_unit_addr(addr, dir, AVC_BRIDGECO_PLUG_UNIT_ISOC, 0); 209 + err = avc_bridgeco_get_plug_ch_pos(bebob->unit, addr, buf, 256); 210 + if (err < 0) { 211 + dev_err(&bebob->unit->device, 212 + "fail to get channel position for isoc %s plug 0: %d\n", 213 + (dir == AVC_BRIDGECO_PLUG_DIR_IN) ? "in" : "out", 214 + err); 215 + goto end; 216 + } 217 + pos = 0; 218 + 219 + /* positions in I/O buffer */ 220 + pcm = 0; 221 + midi = 0; 222 + 223 + /* the number of sections in AMDTP packet */ 224 + sections = buf[pos++]; 225 + 226 + for (sec = 0; sec < sections; sec++) { 227 + /* type of this section */ 228 + avc_bridgeco_fill_unit_addr(addr, dir, 229 + AVC_BRIDGECO_PLUG_UNIT_ISOC, 0); 230 + err = avc_bridgeco_get_plug_section_type(bebob->unit, addr, 231 + sec, &type); 232 + if (err < 0) { 233 + dev_err(&bebob->unit->device, 234 + "fail to get section type for isoc %s plug 0: %d\n", 235 + (dir == AVC_BRIDGECO_PLUG_DIR_IN) ? "in" : 236 + "out", 237 + err); 238 + goto end; 239 + } 240 + /* NoType */ 241 + if (type == 0xff) { 242 + err = -ENOSYS; 243 + goto end; 244 + } 245 + 246 + /* the number of channels in this section */ 247 + channels = buf[pos++]; 248 + 249 + for (ch = 0; ch < channels; ch++) { 250 + /* position of this channel in AMDTP packet */ 251 + stm_pos = buf[pos++] - 1; 252 + /* location of this channel in this section */ 253 + sec_loc = buf[pos++] - 1; 254 + 255 + /* 256 + * Basically the number of location is within the 257 + * number of channels in this section. But some models 258 + * of M-Audio don't follow this. Its location for MIDI 259 + * is the position of MIDI channels in AMDTP packet. 260 + */ 261 + if (sec_loc >= channels) 262 + sec_loc = ch; 263 + 264 + switch (type) { 265 + /* for MIDI conformant data channel */ 266 + case 0x0a: 267 + /* AMDTP_MAX_CHANNELS_FOR_MIDI is 1. */ 268 + if ((midi > 0) && (stm_pos != midi)) { 269 + err = -ENOSYS; 270 + goto end; 271 + } 272 + s->midi_position = stm_pos; 273 + midi = stm_pos; 274 + break; 275 + /* for PCM data channel */ 276 + case 0x01: /* Headphone */ 277 + case 0x02: /* Microphone */ 278 + case 0x03: /* Line */ 279 + case 0x04: /* SPDIF */ 280 + case 0x05: /* ADAT */ 281 + case 0x06: /* TDIF */ 282 + case 0x07: /* MADI */ 283 + /* for undefined/changeable signal */ 284 + case 0x08: /* Analog */ 285 + case 0x09: /* Digital */ 286 + default: 287 + location = pcm + sec_loc; 288 + if (location >= AMDTP_MAX_CHANNELS_FOR_PCM) { 289 + err = -ENOSYS; 290 + goto end; 291 + } 292 + s->pcm_positions[location] = stm_pos; 293 + break; 294 + } 295 + } 296 + 297 + if (type != 0x0a) 298 + pcm += channels; 299 + else 300 + midi += channels; 301 + } 302 + end: 303 + kfree(buf); 304 + return err; 305 + } 306 + 307 + static int 308 + init_both_connections(struct snd_bebob *bebob) 309 + { 310 + int err; 311 + 312 + err = cmp_connection_init(&bebob->in_conn, 313 + bebob->unit, CMP_INPUT, 0); 314 + if (err < 0) 315 + goto end; 316 + 317 + err = cmp_connection_init(&bebob->out_conn, 318 + bebob->unit, CMP_OUTPUT, 0); 319 + if (err < 0) 320 + cmp_connection_destroy(&bebob->in_conn); 321 + end: 322 + return err; 323 + } 324 + 325 + static int 326 + check_connection_used_by_others(struct snd_bebob *bebob, struct amdtp_stream *s) 327 + { 328 + struct cmp_connection *conn; 329 + bool used; 330 + int err; 331 + 332 + if (s == &bebob->tx_stream) 333 + conn = &bebob->out_conn; 334 + else 335 + conn = &bebob->in_conn; 336 + 337 + err = cmp_connection_check_used(conn, &used); 338 + if ((err >= 0) && used && !amdtp_stream_running(s)) { 339 + dev_err(&bebob->unit->device, 340 + "Connection established by others: %cPCR[%d]\n", 341 + (conn->direction == CMP_OUTPUT) ? 'o' : 'i', 342 + conn->pcr_index); 343 + err = -EBUSY; 344 + } 345 + 346 + return err; 347 + } 348 + 349 + static int 350 + make_both_connections(struct snd_bebob *bebob, unsigned int rate) 351 + { 352 + int index, pcm_channels, midi_channels, err = 0; 353 + 354 + if (bebob->connected) 355 + goto end; 356 + 357 + /* confirm params for both streams */ 358 + index = get_formation_index(rate); 359 + pcm_channels = bebob->tx_stream_formations[index].pcm; 360 + midi_channels = bebob->tx_stream_formations[index].midi; 361 + amdtp_stream_set_parameters(&bebob->tx_stream, 362 + rate, pcm_channels, midi_channels * 8); 363 + pcm_channels = bebob->rx_stream_formations[index].pcm; 364 + midi_channels = bebob->rx_stream_formations[index].midi; 365 + amdtp_stream_set_parameters(&bebob->rx_stream, 366 + rate, pcm_channels, midi_channels * 8); 367 + 368 + /* establish connections for both streams */ 369 + err = cmp_connection_establish(&bebob->out_conn, 370 + amdtp_stream_get_max_payload(&bebob->tx_stream)); 371 + if (err < 0) 372 + goto end; 373 + err = cmp_connection_establish(&bebob->in_conn, 374 + amdtp_stream_get_max_payload(&bebob->rx_stream)); 375 + if (err < 0) { 376 + cmp_connection_break(&bebob->out_conn); 377 + goto end; 378 + } 379 + 380 + bebob->connected = true; 381 + end: 382 + return err; 383 + } 384 + 385 + static void 386 + break_both_connections(struct snd_bebob *bebob) 387 + { 388 + cmp_connection_break(&bebob->in_conn); 389 + cmp_connection_break(&bebob->out_conn); 390 + 391 + bebob->connected = false; 392 + 393 + /* These models seems to be in transition state for a longer time. */ 394 + if (bebob->maudio_special_quirk != NULL) 395 + msleep(200); 396 + } 397 + 398 + static void 399 + destroy_both_connections(struct snd_bebob *bebob) 400 + { 401 + break_both_connections(bebob); 402 + 403 + cmp_connection_destroy(&bebob->in_conn); 404 + cmp_connection_destroy(&bebob->out_conn); 405 + } 406 + 407 + static int 408 + get_sync_mode(struct snd_bebob *bebob, enum cip_flags *sync_mode) 409 + { 410 + /* currently this module doesn't support SYT-Match mode */ 411 + *sync_mode = CIP_SYNC_TO_DEVICE; 412 + return 0; 413 + } 414 + 415 + static int 416 + start_stream(struct snd_bebob *bebob, struct amdtp_stream *stream, 417 + unsigned int rate) 418 + { 419 + struct cmp_connection *conn; 420 + int err = 0; 421 + 422 + if (stream == &bebob->rx_stream) 423 + conn = &bebob->in_conn; 424 + else 425 + conn = &bebob->out_conn; 426 + 427 + /* channel mapping */ 428 + if (bebob->maudio_special_quirk == NULL) { 429 + err = map_data_channels(bebob, stream); 430 + if (err < 0) 431 + goto end; 432 + } 433 + 434 + /* start amdtp stream */ 435 + err = amdtp_stream_start(stream, 436 + conn->resources.channel, 437 + conn->speed); 438 + end: 439 + return err; 440 + } 441 + 442 + int snd_bebob_stream_init_duplex(struct snd_bebob *bebob) 443 + { 444 + int err; 445 + 446 + err = init_both_connections(bebob); 447 + if (err < 0) 448 + goto end; 449 + 450 + err = amdtp_stream_init(&bebob->tx_stream, bebob->unit, 451 + AMDTP_IN_STREAM, CIP_BLOCKING); 452 + if (err < 0) { 453 + amdtp_stream_destroy(&bebob->tx_stream); 454 + destroy_both_connections(bebob); 455 + goto end; 456 + } 457 + /* See comments in next function */ 458 + init_completion(&bebob->bus_reset); 459 + bebob->tx_stream.flags |= CIP_SKIP_INIT_DBC_CHECK; 460 + /* 461 + * At high sampling rate, M-Audio special firmware transmits empty 462 + * packet with the value of dbc incremented by 8 but the others are 463 + * valid to IEC 61883-1. 464 + */ 465 + if (bebob->maudio_special_quirk) 466 + bebob->tx_stream.flags |= CIP_EMPTY_HAS_WRONG_DBC; 467 + 468 + err = amdtp_stream_init(&bebob->rx_stream, bebob->unit, 469 + AMDTP_OUT_STREAM, CIP_BLOCKING); 470 + if (err < 0) { 471 + amdtp_stream_destroy(&bebob->tx_stream); 472 + amdtp_stream_destroy(&bebob->rx_stream); 473 + destroy_both_connections(bebob); 474 + } 475 + /* 476 + * The firmware for these devices ignore MIDI messages in more than 477 + * first 8 data blocks of an received AMDTP packet. 478 + */ 479 + if (bebob->spec == &maudio_fw410_spec || 480 + bebob->spec == &maudio_special_spec) 481 + bebob->rx_stream.rx_blocks_for_midi = 8; 482 + end: 483 + return err; 484 + } 485 + 486 + int snd_bebob_stream_start_duplex(struct snd_bebob *bebob, unsigned int rate) 487 + { 488 + struct snd_bebob_rate_spec *rate_spec = bebob->spec->rate; 489 + struct amdtp_stream *master, *slave; 490 + atomic_t *slave_substreams; 491 + enum cip_flags sync_mode; 492 + unsigned int curr_rate; 493 + bool updated = false; 494 + int err = 0; 495 + 496 + /* 497 + * Normal BeBoB firmware has a quirk at bus reset to transmits packets 498 + * with discontinuous value in dbc field. 499 + * 500 + * This 'struct completion' is used to call .update() at first to update 501 + * connections/streams. Next following codes handle streaming error. 502 + */ 503 + if (amdtp_streaming_error(&bebob->tx_stream)) { 504 + if (completion_done(&bebob->bus_reset)) 505 + reinit_completion(&bebob->bus_reset); 506 + 507 + updated = (wait_for_completion_interruptible_timeout( 508 + &bebob->bus_reset, 509 + msecs_to_jiffies(FW_ISO_RESOURCE_DELAY)) > 0); 510 + } 511 + 512 + mutex_lock(&bebob->mutex); 513 + 514 + /* Need no substreams */ 515 + if (atomic_read(&bebob->playback_substreams) == 0 && 516 + atomic_read(&bebob->capture_substreams) == 0) 517 + goto end; 518 + 519 + err = get_sync_mode(bebob, &sync_mode); 520 + if (err < 0) 521 + goto end; 522 + if (sync_mode == CIP_SYNC_TO_DEVICE) { 523 + master = &bebob->tx_stream; 524 + slave = &bebob->rx_stream; 525 + slave_substreams = &bebob->playback_substreams; 526 + } else { 527 + master = &bebob->rx_stream; 528 + slave = &bebob->tx_stream; 529 + slave_substreams = &bebob->capture_substreams; 530 + } 531 + 532 + /* 533 + * Considering JACK/FFADO streaming: 534 + * TODO: This can be removed hwdep functionality becomes popular. 535 + */ 536 + err = check_connection_used_by_others(bebob, master); 537 + if (err < 0) 538 + goto end; 539 + 540 + /* 541 + * packet queueing error or detecting discontinuity 542 + * 543 + * At bus reset, connections should not be broken here. So streams need 544 + * to be re-started. This is a reason to use SKIP_INIT_DBC_CHECK flag. 545 + */ 546 + if (amdtp_streaming_error(master)) 547 + amdtp_stream_stop(master); 548 + if (amdtp_streaming_error(slave)) 549 + amdtp_stream_stop(slave); 550 + if (!updated && 551 + !amdtp_stream_running(master) && !amdtp_stream_running(slave)) 552 + break_both_connections(bebob); 553 + 554 + /* stop streams if rate is different */ 555 + err = rate_spec->get(bebob, &curr_rate); 556 + if (err < 0) { 557 + dev_err(&bebob->unit->device, 558 + "fail to get sampling rate: %d\n", err); 559 + goto end; 560 + } 561 + if (rate == 0) 562 + rate = curr_rate; 563 + if (rate != curr_rate) { 564 + amdtp_stream_stop(master); 565 + amdtp_stream_stop(slave); 566 + break_both_connections(bebob); 567 + } 568 + 569 + /* master should be always running */ 570 + if (!amdtp_stream_running(master)) { 571 + amdtp_stream_set_sync(sync_mode, master, slave); 572 + bebob->master = master; 573 + 574 + /* 575 + * NOTE: 576 + * If establishing connections at first, Yamaha GO46 577 + * (and maybe Terratec X24) don't generate sound. 578 + * 579 + * For firmware customized by M-Audio, refer to next NOTE. 580 + */ 581 + if (bebob->maudio_special_quirk == NULL) { 582 + err = rate_spec->set(bebob, rate); 583 + if (err < 0) { 584 + dev_err(&bebob->unit->device, 585 + "fail to set sampling rate: %d\n", 586 + err); 587 + goto end; 588 + } 589 + } 590 + 591 + err = make_both_connections(bebob, rate); 592 + if (err < 0) 593 + goto end; 594 + 595 + err = start_stream(bebob, master, rate); 596 + if (err < 0) { 597 + dev_err(&bebob->unit->device, 598 + "fail to run AMDTP master stream:%d\n", err); 599 + break_both_connections(bebob); 600 + goto end; 601 + } 602 + 603 + /* 604 + * NOTE: 605 + * The firmware customized by M-Audio uses these commands to 606 + * start transmitting stream. This is not usual way. 607 + */ 608 + if (bebob->maudio_special_quirk != NULL) { 609 + err = rate_spec->set(bebob, rate); 610 + if (err < 0) { 611 + dev_err(&bebob->unit->device, 612 + "fail to ensure sampling rate: %d\n", 613 + err); 614 + amdtp_stream_stop(master); 615 + break_both_connections(bebob); 616 + goto end; 617 + } 618 + } 619 + 620 + /* wait first callback */ 621 + if (!amdtp_stream_wait_callback(master, CALLBACK_TIMEOUT)) { 622 + amdtp_stream_stop(master); 623 + break_both_connections(bebob); 624 + err = -ETIMEDOUT; 625 + goto end; 626 + } 627 + } 628 + 629 + /* start slave if needed */ 630 + if (atomic_read(slave_substreams) > 0 && !amdtp_stream_running(slave)) { 631 + err = start_stream(bebob, slave, rate); 632 + if (err < 0) { 633 + dev_err(&bebob->unit->device, 634 + "fail to run AMDTP slave stream:%d\n", err); 635 + amdtp_stream_stop(master); 636 + break_both_connections(bebob); 637 + goto end; 638 + } 639 + 640 + /* wait first callback */ 641 + if (!amdtp_stream_wait_callback(slave, CALLBACK_TIMEOUT)) { 642 + amdtp_stream_stop(slave); 643 + amdtp_stream_stop(master); 644 + break_both_connections(bebob); 645 + err = -ETIMEDOUT; 646 + } 647 + } 648 + end: 649 + mutex_unlock(&bebob->mutex); 650 + return err; 651 + } 652 + 653 + void snd_bebob_stream_stop_duplex(struct snd_bebob *bebob) 654 + { 655 + struct amdtp_stream *master, *slave; 656 + atomic_t *master_substreams, *slave_substreams; 657 + 658 + mutex_lock(&bebob->mutex); 659 + 660 + if (bebob->master == &bebob->rx_stream) { 661 + slave = &bebob->tx_stream; 662 + master = &bebob->rx_stream; 663 + slave_substreams = &bebob->capture_substreams; 664 + master_substreams = &bebob->playback_substreams; 665 + } else { 666 + slave = &bebob->rx_stream; 667 + master = &bebob->tx_stream; 668 + slave_substreams = &bebob->playback_substreams; 669 + master_substreams = &bebob->capture_substreams; 670 + } 671 + 672 + if (atomic_read(slave_substreams) == 0) { 673 + amdtp_stream_pcm_abort(slave); 674 + amdtp_stream_stop(slave); 675 + 676 + if (atomic_read(master_substreams) == 0) { 677 + amdtp_stream_pcm_abort(master); 678 + amdtp_stream_stop(master); 679 + break_both_connections(bebob); 680 + } 681 + } 682 + 683 + mutex_unlock(&bebob->mutex); 684 + } 685 + 686 + void snd_bebob_stream_update_duplex(struct snd_bebob *bebob) 687 + { 688 + /* vs. XRUN recovery due to discontinuity at bus reset */ 689 + mutex_lock(&bebob->mutex); 690 + 691 + if ((cmp_connection_update(&bebob->in_conn) < 0) || 692 + (cmp_connection_update(&bebob->out_conn) < 0)) { 693 + amdtp_stream_pcm_abort(&bebob->rx_stream); 694 + amdtp_stream_pcm_abort(&bebob->tx_stream); 695 + amdtp_stream_stop(&bebob->rx_stream); 696 + amdtp_stream_stop(&bebob->tx_stream); 697 + break_both_connections(bebob); 698 + } else { 699 + amdtp_stream_update(&bebob->rx_stream); 700 + amdtp_stream_update(&bebob->tx_stream); 701 + } 702 + 703 + /* wake up stream_start_duplex() */ 704 + if (!completion_done(&bebob->bus_reset)) 705 + complete_all(&bebob->bus_reset); 706 + 707 + mutex_unlock(&bebob->mutex); 708 + } 709 + 710 + void snd_bebob_stream_destroy_duplex(struct snd_bebob *bebob) 711 + { 712 + mutex_lock(&bebob->mutex); 713 + 714 + amdtp_stream_pcm_abort(&bebob->rx_stream); 715 + amdtp_stream_pcm_abort(&bebob->tx_stream); 716 + 717 + amdtp_stream_stop(&bebob->rx_stream); 718 + amdtp_stream_stop(&bebob->tx_stream); 719 + 720 + amdtp_stream_destroy(&bebob->rx_stream); 721 + amdtp_stream_destroy(&bebob->tx_stream); 722 + 723 + destroy_both_connections(bebob); 724 + 725 + mutex_unlock(&bebob->mutex); 726 + } 727 + 728 + /* 729 + * See: Table 50: Extended Stream Format Info Format Hierarchy Level 2’ 730 + * in Additional AVC commands (Nov 2003, BridgeCo) 731 + * Also 'Clause 12 AM824 sequence adaption layers' in IEC 61883-6:2005 732 + */ 733 + static int 734 + parse_stream_formation(u8 *buf, unsigned int len, 735 + struct snd_bebob_stream_formation *formation) 736 + { 737 + unsigned int i, e, channels, format; 738 + 739 + /* 740 + * this module can support a hierarchy combination that: 741 + * Root: Audio and Music (0x90) 742 + * Level 1: AM824 Compound (0x40) 743 + */ 744 + if ((buf[0] != 0x90) || (buf[1] != 0x40)) 745 + return -ENOSYS; 746 + 747 + /* check sampling rate */ 748 + for (i = 0; i < ARRAY_SIZE(bridgeco_freq_table); i++) { 749 + if (buf[2] == bridgeco_freq_table[i]) 750 + break; 751 + } 752 + if (i == sizeof(bridgeco_freq_table)) 753 + return -ENOSYS; 754 + 755 + /* Avoid double count by different entries for the same rate. */ 756 + memset(&formation[i], 0, sizeof(struct snd_bebob_stream_formation)); 757 + 758 + for (e = 0; e < buf[4]; e++) { 759 + channels = buf[5 + e * 2]; 760 + format = buf[6 + e * 2]; 761 + 762 + switch (format) { 763 + /* IEC 60958 Conformant, currently handled as MBLA */ 764 + case 0x00: 765 + /* Multi bit linear audio */ 766 + case 0x06: /* Raw */ 767 + formation[i].pcm += channels; 768 + break; 769 + /* MIDI Conformant */ 770 + case 0x0d: 771 + formation[i].midi += channels; 772 + break; 773 + /* IEC 61937-3 to 7 */ 774 + case 0x01: 775 + case 0x02: 776 + case 0x03: 777 + case 0x04: 778 + case 0x05: 779 + /* Multi bit linear audio */ 780 + case 0x07: /* DVD-Audio */ 781 + case 0x0c: /* High Precision */ 782 + /* One Bit Audio */ 783 + case 0x08: /* (Plain) Raw */ 784 + case 0x09: /* (Plain) SACD */ 785 + case 0x0a: /* (Encoded) Raw */ 786 + case 0x0b: /* (Encoded) SACD */ 787 + /* Synchronization Stream (Stereo Raw audio) */ 788 + case 0x40: 789 + /* Don't care */ 790 + case 0xff: 791 + default: 792 + return -ENOSYS; /* not supported */ 793 + } 794 + } 795 + 796 + if (formation[i].pcm > AMDTP_MAX_CHANNELS_FOR_PCM || 797 + formation[i].midi > AMDTP_MAX_CHANNELS_FOR_MIDI) 798 + return -ENOSYS; 799 + 800 + return 0; 801 + } 802 + 803 + static int 804 + fill_stream_formations(struct snd_bebob *bebob, enum avc_bridgeco_plug_dir dir, 805 + unsigned short pid) 806 + { 807 + u8 *buf; 808 + struct snd_bebob_stream_formation *formations; 809 + unsigned int len, eid; 810 + u8 addr[AVC_BRIDGECO_ADDR_BYTES]; 811 + int err; 812 + 813 + buf = kmalloc(FORMAT_MAXIMUM_LENGTH, GFP_KERNEL); 814 + if (buf == NULL) 815 + return -ENOMEM; 816 + 817 + if (dir == AVC_BRIDGECO_PLUG_DIR_IN) 818 + formations = bebob->rx_stream_formations; 819 + else 820 + formations = bebob->tx_stream_formations; 821 + 822 + for (eid = 0; eid < SND_BEBOB_STRM_FMT_ENTRIES; eid++) { 823 + len = FORMAT_MAXIMUM_LENGTH; 824 + avc_bridgeco_fill_unit_addr(addr, dir, 825 + AVC_BRIDGECO_PLUG_UNIT_ISOC, pid); 826 + err = avc_bridgeco_get_plug_strm_fmt(bebob->unit, addr, buf, 827 + &len, eid); 828 + /* No entries remained. */ 829 + if (err == -EINVAL && eid > 0) { 830 + err = 0; 831 + break; 832 + } else if (err < 0) { 833 + dev_err(&bebob->unit->device, 834 + "fail to get stream format %d for isoc %s plug %d:%d\n", 835 + eid, 836 + (dir == AVC_BRIDGECO_PLUG_DIR_IN) ? "in" : 837 + "out", 838 + pid, err); 839 + break; 840 + } 841 + 842 + err = parse_stream_formation(buf, len, formations); 843 + if (err < 0) 844 + break; 845 + } 846 + 847 + kfree(buf); 848 + return err; 849 + } 850 + 851 + static int 852 + seek_msu_sync_input_plug(struct snd_bebob *bebob) 853 + { 854 + u8 plugs[AVC_PLUG_INFO_BUF_BYTES], addr[AVC_BRIDGECO_ADDR_BYTES]; 855 + unsigned int i; 856 + enum avc_bridgeco_plug_type type; 857 + int err; 858 + 859 + /* Get the number of Music Sub Unit for both direction. */ 860 + err = avc_general_get_plug_info(bebob->unit, 0x0c, 0x00, 0x00, plugs); 861 + if (err < 0) { 862 + dev_err(&bebob->unit->device, 863 + "fail to get info for MSU in/out plugs: %d\n", 864 + err); 865 + goto end; 866 + } 867 + 868 + /* seek destination plugs for 'MSU sync input' */ 869 + bebob->sync_input_plug = -1; 870 + for (i = 0; i < plugs[0]; i++) { 871 + avc_bridgeco_fill_msu_addr(addr, AVC_BRIDGECO_PLUG_DIR_IN, i); 872 + err = avc_bridgeco_get_plug_type(bebob->unit, addr, &type); 873 + if (err < 0) { 874 + dev_err(&bebob->unit->device, 875 + "fail to get type for MSU in plug %d: %d\n", 876 + i, err); 877 + goto end; 878 + } 879 + 880 + if (type == AVC_BRIDGECO_PLUG_TYPE_SYNC) { 881 + bebob->sync_input_plug = i; 882 + break; 883 + } 884 + } 885 + end: 886 + return err; 887 + } 888 + 889 + int snd_bebob_stream_discover(struct snd_bebob *bebob) 890 + { 891 + struct snd_bebob_clock_spec *clk_spec = bebob->spec->clock; 892 + u8 plugs[AVC_PLUG_INFO_BUF_BYTES], addr[AVC_BRIDGECO_ADDR_BYTES]; 893 + enum avc_bridgeco_plug_type type; 894 + unsigned int i; 895 + int err; 896 + 897 + /* the number of plugs for isoc in/out, ext in/out */ 898 + err = avc_general_get_plug_info(bebob->unit, 0x1f, 0x07, 0x00, plugs); 899 + if (err < 0) { 900 + dev_err(&bebob->unit->device, 901 + "fail to get info for isoc/external in/out plugs: %d\n", 902 + err); 903 + goto end; 904 + } 905 + 906 + /* 907 + * This module supports at least one isoc input plug and one isoc 908 + * output plug. 909 + */ 910 + if ((plugs[0] == 0) || (plugs[1] == 0)) { 911 + err = -ENOSYS; 912 + goto end; 913 + } 914 + 915 + avc_bridgeco_fill_unit_addr(addr, AVC_BRIDGECO_PLUG_DIR_IN, 916 + AVC_BRIDGECO_PLUG_UNIT_ISOC, 0); 917 + err = avc_bridgeco_get_plug_type(bebob->unit, addr, &type); 918 + if (err < 0) { 919 + dev_err(&bebob->unit->device, 920 + "fail to get type for isoc in plug 0: %d\n", err); 921 + goto end; 922 + } else if (type != AVC_BRIDGECO_PLUG_TYPE_ISOC) { 923 + err = -ENOSYS; 924 + goto end; 925 + } 926 + err = fill_stream_formations(bebob, AVC_BRIDGECO_PLUG_DIR_IN, 0); 927 + if (err < 0) 928 + goto end; 929 + 930 + avc_bridgeco_fill_unit_addr(addr, AVC_BRIDGECO_PLUG_DIR_OUT, 931 + AVC_BRIDGECO_PLUG_UNIT_ISOC, 0); 932 + err = avc_bridgeco_get_plug_type(bebob->unit, addr, &type); 933 + if (err < 0) { 934 + dev_err(&bebob->unit->device, 935 + "fail to get type for isoc out plug 0: %d\n", err); 936 + goto end; 937 + } else if (type != AVC_BRIDGECO_PLUG_TYPE_ISOC) { 938 + err = -ENOSYS; 939 + goto end; 940 + } 941 + err = fill_stream_formations(bebob, AVC_BRIDGECO_PLUG_DIR_OUT, 0); 942 + if (err < 0) 943 + goto end; 944 + 945 + /* count external input plugs for MIDI */ 946 + bebob->midi_input_ports = 0; 947 + for (i = 0; i < plugs[2]; i++) { 948 + avc_bridgeco_fill_unit_addr(addr, AVC_BRIDGECO_PLUG_DIR_IN, 949 + AVC_BRIDGECO_PLUG_UNIT_EXT, i); 950 + err = avc_bridgeco_get_plug_type(bebob->unit, addr, &type); 951 + if (err < 0) { 952 + dev_err(&bebob->unit->device, 953 + "fail to get type for external in plug %d: %d\n", 954 + i, err); 955 + goto end; 956 + } else if (type == AVC_BRIDGECO_PLUG_TYPE_MIDI) { 957 + bebob->midi_input_ports++; 958 + } 959 + } 960 + 961 + /* count external output plugs for MIDI */ 962 + bebob->midi_output_ports = 0; 963 + for (i = 0; i < plugs[3]; i++) { 964 + avc_bridgeco_fill_unit_addr(addr, AVC_BRIDGECO_PLUG_DIR_OUT, 965 + AVC_BRIDGECO_PLUG_UNIT_EXT, i); 966 + err = avc_bridgeco_get_plug_type(bebob->unit, addr, &type); 967 + if (err < 0) { 968 + dev_err(&bebob->unit->device, 969 + "fail to get type for external out plug %d: %d\n", 970 + i, err); 971 + goto end; 972 + } else if (type == AVC_BRIDGECO_PLUG_TYPE_MIDI) { 973 + bebob->midi_output_ports++; 974 + } 975 + } 976 + 977 + /* for check source of clock later */ 978 + if (!clk_spec) 979 + err = seek_msu_sync_input_plug(bebob); 980 + end: 981 + return err; 982 + } 983 + 984 + void snd_bebob_stream_lock_changed(struct snd_bebob *bebob) 985 + { 986 + bebob->dev_lock_changed = true; 987 + wake_up(&bebob->hwdep_wait); 988 + } 989 + 990 + int snd_bebob_stream_lock_try(struct snd_bebob *bebob) 991 + { 992 + int err; 993 + 994 + spin_lock_irq(&bebob->lock); 995 + 996 + /* user land lock this */ 997 + if (bebob->dev_lock_count < 0) { 998 + err = -EBUSY; 999 + goto end; 1000 + } 1001 + 1002 + /* this is the first time */ 1003 + if (bebob->dev_lock_count++ == 0) 1004 + snd_bebob_stream_lock_changed(bebob); 1005 + err = 0; 1006 + end: 1007 + spin_unlock_irq(&bebob->lock); 1008 + return err; 1009 + } 1010 + 1011 + void snd_bebob_stream_lock_release(struct snd_bebob *bebob) 1012 + { 1013 + spin_lock_irq(&bebob->lock); 1014 + 1015 + if (WARN_ON(bebob->dev_lock_count <= 0)) 1016 + goto end; 1017 + if (--bebob->dev_lock_count == 0) 1018 + snd_bebob_stream_lock_changed(bebob); 1019 + end: 1020 + spin_unlock_irq(&bebob->lock); 1021 + }
+68
sound/firewire/bebob/bebob_terratec.c
··· 1 + /* 2 + * bebob_terratec.c - a part of driver for BeBoB based devices 3 + * 4 + * Copyright (c) 2013-2014 Takashi Sakamoto 5 + * 6 + * Licensed under the terms of the GNU General Public License, version 2. 7 + */ 8 + 9 + #include "./bebob.h" 10 + 11 + static char *const phase88_rack_clk_src_labels[] = { 12 + SND_BEBOB_CLOCK_INTERNAL, "Digital In", "Word Clock" 13 + }; 14 + static int 15 + phase88_rack_clk_src_get(struct snd_bebob *bebob, unsigned int *id) 16 + { 17 + unsigned int enable_ext, enable_word; 18 + int err; 19 + 20 + err = avc_audio_get_selector(bebob->unit, 0, 0, &enable_ext); 21 + if (err < 0) 22 + goto end; 23 + err = avc_audio_get_selector(bebob->unit, 0, 0, &enable_word); 24 + if (err < 0) 25 + goto end; 26 + 27 + *id = (enable_ext & 0x01) | ((enable_word & 0x01) << 1); 28 + end: 29 + return err; 30 + } 31 + 32 + static char *const phase24_series_clk_src_labels[] = { 33 + SND_BEBOB_CLOCK_INTERNAL, "Digital In" 34 + }; 35 + static int 36 + phase24_series_clk_src_get(struct snd_bebob *bebob, unsigned int *id) 37 + { 38 + return avc_audio_get_selector(bebob->unit, 0, 4, id); 39 + } 40 + 41 + static struct snd_bebob_rate_spec phase_series_rate_spec = { 42 + .get = &snd_bebob_stream_get_rate, 43 + .set = &snd_bebob_stream_set_rate, 44 + }; 45 + 46 + /* PHASE 88 Rack FW */ 47 + static struct snd_bebob_clock_spec phase88_rack_clk = { 48 + .num = ARRAY_SIZE(phase88_rack_clk_src_labels), 49 + .labels = phase88_rack_clk_src_labels, 50 + .get = &phase88_rack_clk_src_get, 51 + }; 52 + struct snd_bebob_spec phase88_rack_spec = { 53 + .clock = &phase88_rack_clk, 54 + .rate = &phase_series_rate_spec, 55 + .meter = NULL 56 + }; 57 + 58 + /* 'PHASE 24 FW' and 'PHASE X24 FW' */ 59 + static struct snd_bebob_clock_spec phase24_series_clk = { 60 + .num = ARRAY_SIZE(phase24_series_clk_src_labels), 61 + .labels = phase24_series_clk_src_labels, 62 + .get = &phase24_series_clk_src_get, 63 + }; 64 + struct snd_bebob_spec phase24_series_spec = { 65 + .clock = &phase24_series_clk, 66 + .rate = &phase_series_rate_spec, 67 + .meter = NULL 68 + };
+50
sound/firewire/bebob/bebob_yamaha.c
··· 1 + /* 2 + * bebob_yamaha.c - a part of driver for BeBoB based devices 3 + * 4 + * Copyright (c) 2013-2014 Takashi Sakamoto 5 + * 6 + * Licensed under the terms of the GNU General Public License, version 2. 7 + */ 8 + 9 + #include "./bebob.h" 10 + 11 + /* 12 + * NOTE: 13 + * Yamaha GO44 is not designed to be used as stand-alone mixer. So any streams 14 + * must be accompanied. If changing the state, a LED on the device starts to 15 + * blink and its sync status is false. In this state, the device sounds nothing 16 + * even if streaming. To start streaming at the current sampling rate is only 17 + * way to revocer this state. GO46 is better for stand-alone mixer. 18 + * 19 + * Both of them have a capability to change its sampling rate up to 192.0kHz. 20 + * At 192.0kHz, the device reports 4 PCM-in, 1 MIDI-in, 6 PCM-out, 1 MIDI-out. 21 + * But Yamaha's driver reduce 2 PCM-in, 1 MIDI-in, 2 PCM-out, 1 MIDI-out to use 22 + * 'Extended Stream Format Information Command - Single Request' in 'Additional 23 + * AVC commands' defined by BridgeCo. 24 + * This ALSA driver don't do this because a bit tiresome. Then isochronous 25 + * streaming with many asynchronous transactions brings sounds with noises. 26 + * Unfortunately current 'ffado-mixer' generated many asynchronous transaction 27 + * to observe device's state, mainly check cmp connection and signal format. I 28 + * reccomend users to close ffado-mixer at 192.0kHz if mixer is needless. 29 + */ 30 + 31 + static char *const clk_src_labels[] = {SND_BEBOB_CLOCK_INTERNAL, "SPDIF"}; 32 + static int 33 + clk_src_get(struct snd_bebob *bebob, unsigned int *id) 34 + { 35 + return avc_audio_get_selector(bebob->unit, 0, 4, id); 36 + } 37 + static struct snd_bebob_clock_spec clock_spec = { 38 + .num = ARRAY_SIZE(clk_src_labels), 39 + .labels = clk_src_labels, 40 + .get = &clk_src_get, 41 + }; 42 + static struct snd_bebob_rate_spec rate_spec = { 43 + .get = &snd_bebob_stream_get_rate, 44 + .set = &snd_bebob_stream_set_rate, 45 + }; 46 + struct snd_bebob_spec yamaha_go_spec = { 47 + .clock = &clock_spec, 48 + .rate = &rate_spec, 49 + .meter = NULL 50 + };
+153 -50
sound/firewire/cmp.c
··· 14 14 #include "iso-resources.h" 15 15 #include "cmp.h" 16 16 17 - #define IMPR_SPEED_MASK 0xc0000000 18 - #define IMPR_SPEED_SHIFT 30 19 - #define IMPR_XSPEED_MASK 0x00000060 20 - #define IMPR_XSPEED_SHIFT 5 21 - #define IMPR_PLUGS_MASK 0x0000001f 17 + /* MPR common fields */ 18 + #define MPR_SPEED_MASK 0xc0000000 19 + #define MPR_SPEED_SHIFT 30 20 + #define MPR_XSPEED_MASK 0x00000060 21 + #define MPR_XSPEED_SHIFT 5 22 + #define MPR_PLUGS_MASK 0x0000001f 22 23 23 - #define IPCR_ONLINE 0x80000000 24 - #define IPCR_BCAST_CONN 0x40000000 25 - #define IPCR_P2P_CONN_MASK 0x3f000000 26 - #define IPCR_P2P_CONN_SHIFT 24 27 - #define IPCR_CHANNEL_MASK 0x003f0000 28 - #define IPCR_CHANNEL_SHIFT 16 24 + /* PCR common fields */ 25 + #define PCR_ONLINE 0x80000000 26 + #define PCR_BCAST_CONN 0x40000000 27 + #define PCR_P2P_CONN_MASK 0x3f000000 28 + #define PCR_P2P_CONN_SHIFT 24 29 + #define PCR_CHANNEL_MASK 0x003f0000 30 + #define PCR_CHANNEL_SHIFT 16 31 + 32 + /* oPCR specific fields */ 33 + #define OPCR_XSPEED_MASK 0x00C00000 34 + #define OPCR_XSPEED_SHIFT 22 35 + #define OPCR_SPEED_MASK 0x0000C000 36 + #define OPCR_SPEED_SHIFT 14 37 + #define OPCR_OVERHEAD_ID_MASK 0x00003C00 38 + #define OPCR_OVERHEAD_ID_SHIFT 10 29 39 30 40 enum bus_reset_handling { 31 41 ABORT_ON_BUS_RESET, ··· 49 39 50 40 va_start(va, fmt); 51 41 dev_err(&c->resources.unit->device, "%cPCR%u: %pV", 52 - 'i', c->pcr_index, &(struct va_format){ fmt, &va }); 42 + (c->direction == CMP_INPUT) ? 'i' : 'o', 43 + c->pcr_index, &(struct va_format){ fmt, &va }); 53 44 va_end(va); 45 + } 46 + 47 + static u64 mpr_address(struct cmp_connection *c) 48 + { 49 + if (c->direction == CMP_INPUT) 50 + return CSR_REGISTER_BASE + CSR_IMPR; 51 + else 52 + return CSR_REGISTER_BASE + CSR_OMPR; 53 + } 54 + 55 + static u64 pcr_address(struct cmp_connection *c) 56 + { 57 + if (c->direction == CMP_INPUT) 58 + return CSR_REGISTER_BASE + CSR_IPCR(c->pcr_index); 59 + else 60 + return CSR_REGISTER_BASE + CSR_OPCR(c->pcr_index); 54 61 } 55 62 56 63 static int pcr_modify(struct cmp_connection *c, ··· 85 58 86 59 err = snd_fw_transaction( 87 60 c->resources.unit, TCODE_LOCK_COMPARE_SWAP, 88 - CSR_REGISTER_BASE + CSR_IPCR(c->pcr_index), 89 - buffer, 8, 61 + pcr_address(c), buffer, 8, 90 62 FW_FIXED_GENERATION | c->resources.generation); 91 63 92 64 if (err < 0) { ··· 114 88 * cmp_connection_init - initializes a connection manager 115 89 * @c: the connection manager to initialize 116 90 * @unit: a unit of the target device 117 - * @ipcr_index: the index of the iPCR on the target device 91 + * @pcr_index: the index of the iPCR/oPCR on the target device 118 92 */ 119 93 int cmp_connection_init(struct cmp_connection *c, 120 94 struct fw_unit *unit, 121 - unsigned int ipcr_index) 95 + enum cmp_direction direction, 96 + unsigned int pcr_index) 122 97 { 123 - __be32 impr_be; 124 - u32 impr; 98 + __be32 mpr_be; 99 + u32 mpr; 125 100 int err; 126 101 102 + c->direction = direction; 127 103 err = snd_fw_transaction(unit, TCODE_READ_QUADLET_REQUEST, 128 - CSR_REGISTER_BASE + CSR_IMPR, 129 - &impr_be, 4, 0); 104 + mpr_address(c), &mpr_be, 4, 0); 130 105 if (err < 0) 131 106 return err; 132 - impr = be32_to_cpu(impr_be); 107 + mpr = be32_to_cpu(mpr_be); 133 108 134 - if (ipcr_index >= (impr & IMPR_PLUGS_MASK)) 109 + if (pcr_index >= (mpr & MPR_PLUGS_MASK)) 135 110 return -EINVAL; 136 111 137 112 err = fw_iso_resources_init(&c->resources, unit); ··· 142 115 c->connected = false; 143 116 mutex_init(&c->mutex); 144 117 c->last_pcr_value = cpu_to_be32(0x80000000); 145 - c->pcr_index = ipcr_index; 146 - c->max_speed = (impr & IMPR_SPEED_MASK) >> IMPR_SPEED_SHIFT; 118 + c->pcr_index = pcr_index; 119 + c->max_speed = (mpr & MPR_SPEED_MASK) >> MPR_SPEED_SHIFT; 147 120 if (c->max_speed == SCODE_BETA) 148 - c->max_speed += (impr & IMPR_XSPEED_MASK) >> IMPR_XSPEED_SHIFT; 121 + c->max_speed += (mpr & MPR_XSPEED_MASK) >> MPR_XSPEED_SHIFT; 149 122 150 123 return 0; 151 124 } 152 125 EXPORT_SYMBOL(cmp_connection_init); 126 + 127 + /** 128 + * cmp_connection_check_used - check connection is already esablished or not 129 + * @c: the connection manager to be checked 130 + */ 131 + int cmp_connection_check_used(struct cmp_connection *c, bool *used) 132 + { 133 + __be32 pcr; 134 + int err; 135 + 136 + err = snd_fw_transaction( 137 + c->resources.unit, TCODE_READ_QUADLET_REQUEST, 138 + pcr_address(c), &pcr, 4, 0); 139 + if (err >= 0) 140 + *used = !!(pcr & cpu_to_be32(PCR_BCAST_CONN | 141 + PCR_P2P_CONN_MASK)); 142 + 143 + return err; 144 + } 145 + EXPORT_SYMBOL(cmp_connection_check_used); 153 146 154 147 /** 155 148 * cmp_connection_destroy - free connection manager resources ··· 186 139 187 140 static __be32 ipcr_set_modify(struct cmp_connection *c, __be32 ipcr) 188 141 { 189 - ipcr &= ~cpu_to_be32(IPCR_BCAST_CONN | 190 - IPCR_P2P_CONN_MASK | 191 - IPCR_CHANNEL_MASK); 192 - ipcr |= cpu_to_be32(1 << IPCR_P2P_CONN_SHIFT); 193 - ipcr |= cpu_to_be32(c->resources.channel << IPCR_CHANNEL_SHIFT); 142 + ipcr &= ~cpu_to_be32(PCR_BCAST_CONN | 143 + PCR_P2P_CONN_MASK | 144 + PCR_CHANNEL_MASK); 145 + ipcr |= cpu_to_be32(1 << PCR_P2P_CONN_SHIFT); 146 + ipcr |= cpu_to_be32(c->resources.channel << PCR_CHANNEL_SHIFT); 194 147 195 148 return ipcr; 196 149 } 197 150 198 - static int ipcr_set_check(struct cmp_connection *c, __be32 ipcr) 151 + static int get_overhead_id(struct cmp_connection *c) 199 152 { 200 - if (ipcr & cpu_to_be32(IPCR_BCAST_CONN | 201 - IPCR_P2P_CONN_MASK)) { 153 + int id; 154 + 155 + /* 156 + * apply "oPCR overhead ID encoding" 157 + * the encoding table can convert up to 512. 158 + * here the value over 512 is converted as the same way as 512. 159 + */ 160 + for (id = 1; id < 16; id++) { 161 + if (c->resources.bandwidth_overhead < (id << 5)) 162 + break; 163 + } 164 + if (id == 16) 165 + id = 0; 166 + 167 + return id; 168 + } 169 + 170 + static __be32 opcr_set_modify(struct cmp_connection *c, __be32 opcr) 171 + { 172 + unsigned int spd, xspd; 173 + 174 + /* generate speed and extended speed field value */ 175 + if (c->speed > SCODE_400) { 176 + spd = SCODE_800; 177 + xspd = c->speed - SCODE_800; 178 + } else { 179 + spd = c->speed; 180 + xspd = 0; 181 + } 182 + 183 + opcr &= ~cpu_to_be32(PCR_BCAST_CONN | 184 + PCR_P2P_CONN_MASK | 185 + OPCR_XSPEED_MASK | 186 + PCR_CHANNEL_MASK | 187 + OPCR_SPEED_MASK | 188 + OPCR_OVERHEAD_ID_MASK); 189 + opcr |= cpu_to_be32(1 << PCR_P2P_CONN_SHIFT); 190 + opcr |= cpu_to_be32(xspd << OPCR_XSPEED_SHIFT); 191 + opcr |= cpu_to_be32(c->resources.channel << PCR_CHANNEL_SHIFT); 192 + opcr |= cpu_to_be32(spd << OPCR_SPEED_SHIFT); 193 + opcr |= cpu_to_be32(get_overhead_id(c) << OPCR_OVERHEAD_ID_SHIFT); 194 + 195 + return opcr; 196 + } 197 + 198 + static int pcr_set_check(struct cmp_connection *c, __be32 pcr) 199 + { 200 + if (pcr & cpu_to_be32(PCR_BCAST_CONN | 201 + PCR_P2P_CONN_MASK)) { 202 202 cmp_error(c, "plug is already in use\n"); 203 203 return -EBUSY; 204 204 } 205 - if (!(ipcr & cpu_to_be32(IPCR_ONLINE))) { 205 + if (!(pcr & cpu_to_be32(PCR_ONLINE))) { 206 206 cmp_error(c, "plug is not on-line\n"); 207 207 return -ECONNREFUSED; 208 208 } ··· 264 170 * 265 171 * This function establishes a point-to-point connection from the local 266 172 * computer to the target by allocating isochronous resources (channel and 267 - * bandwidth) and setting the target's input plug control register. When this 268 - * function succeeds, the caller is responsible for starting transmitting 269 - * packets. 173 + * bandwidth) and setting the target's input/output plug control register. 174 + * When this function succeeds, the caller is responsible for starting 175 + * transmitting packets. 270 176 */ 271 177 int cmp_connection_establish(struct cmp_connection *c, 272 178 unsigned int max_payload_bytes) ··· 287 193 if (err < 0) 288 194 goto err_mutex; 289 195 290 - err = pcr_modify(c, ipcr_set_modify, ipcr_set_check, 291 - ABORT_ON_BUS_RESET); 196 + if (c->direction == CMP_OUTPUT) 197 + err = pcr_modify(c, opcr_set_modify, pcr_set_check, 198 + ABORT_ON_BUS_RESET); 199 + else 200 + err = pcr_modify(c, ipcr_set_modify, pcr_set_check, 201 + ABORT_ON_BUS_RESET); 202 + 292 203 if (err == -EAGAIN) { 293 204 fw_iso_resources_free(&c->resources); 294 205 goto retry_after_bus_reset; ··· 320 221 * cmp_connection_update - update the connection after a bus reset 321 222 * @c: the connection manager 322 223 * 323 - * This function must be called from the driver's .update handler to reestablish 324 - * any connection that might have been active. 224 + * This function must be called from the driver's .update handler to 225 + * reestablish any connection that might have been active. 325 226 * 326 227 * Returns zero on success, or a negative error code. On an error, the 327 228 * connection is broken and the caller must stop transmitting iso packets. ··· 341 242 if (err < 0) 342 243 goto err_unconnect; 343 244 344 - err = pcr_modify(c, ipcr_set_modify, ipcr_set_check, 345 - SUCCEED_ON_BUS_RESET); 245 + if (c->direction == CMP_OUTPUT) 246 + err = pcr_modify(c, opcr_set_modify, pcr_set_check, 247 + SUCCEED_ON_BUS_RESET); 248 + else 249 + err = pcr_modify(c, ipcr_set_modify, pcr_set_check, 250 + SUCCEED_ON_BUS_RESET); 251 + 346 252 if (err < 0) 347 253 goto err_resources; 348 254 ··· 365 261 } 366 262 EXPORT_SYMBOL(cmp_connection_update); 367 263 368 - 369 - static __be32 ipcr_break_modify(struct cmp_connection *c, __be32 ipcr) 264 + static __be32 pcr_break_modify(struct cmp_connection *c, __be32 pcr) 370 265 { 371 - return ipcr & ~cpu_to_be32(IPCR_BCAST_CONN | IPCR_P2P_CONN_MASK); 266 + return pcr & ~cpu_to_be32(PCR_BCAST_CONN | PCR_P2P_CONN_MASK); 372 267 } 373 268 374 269 /** 375 270 * cmp_connection_break - break the connection to the target 376 271 * @c: the connection manager 377 272 * 378 - * This function deactives the connection in the target's input plug control 379 - * register, and frees the isochronous resources of the connection. Before 380 - * calling this function, the caller should cease transmitting packets. 273 + * This function deactives the connection in the target's input/output plug 274 + * control register, and frees the isochronous resources of the connection. 275 + * Before calling this function, the caller should cease transmitting packets. 381 276 */ 382 277 void cmp_connection_break(struct cmp_connection *c) 383 278 { ··· 389 286 return; 390 287 } 391 288 392 - err = pcr_modify(c, ipcr_break_modify, NULL, SUCCEED_ON_BUS_RESET); 289 + err = pcr_modify(c, pcr_break_modify, NULL, SUCCEED_ON_BUS_RESET); 393 290 if (err < 0) 394 291 cmp_error(c, "plug is still connected\n"); 395 292
+11 -3
sound/firewire/cmp.h
··· 7 7 8 8 struct fw_unit; 9 9 10 + enum cmp_direction { 11 + CMP_INPUT = 0, 12 + CMP_OUTPUT, 13 + }; 14 + 10 15 /** 11 16 * struct cmp_connection - manages an isochronous connection to a device 12 17 * @speed: the connection's actual speed 13 18 * 14 - * This structure manages (using CMP) an isochronous stream from the local 15 - * computer to a device's input plug (iPCR). 19 + * This structure manages (using CMP) an isochronous stream between the local 20 + * computer and a device's input plug (iPCR) and output plug (oPCR). 16 21 * 17 22 * There is no corresponding oPCR created on the local computer, so it is not 18 23 * possible to overlay connections on top of this one. ··· 31 26 __be32 last_pcr_value; 32 27 unsigned int pcr_index; 33 28 unsigned int max_speed; 29 + enum cmp_direction direction; 34 30 }; 35 31 36 32 int cmp_connection_init(struct cmp_connection *connection, 37 33 struct fw_unit *unit, 38 - unsigned int ipcr_index); 34 + enum cmp_direction direction, 35 + unsigned int pcr_index); 36 + int cmp_connection_check_used(struct cmp_connection *connection, bool *used); 39 37 void cmp_connection_destroy(struct cmp_connection *connection); 40 38 41 39 int cmp_connection_establish(struct cmp_connection *connection,
+47 -41
sound/firewire/dice.c
··· 51 51 wait_queue_head_t hwdep_wait; 52 52 u32 notification_bits; 53 53 struct fw_iso_resources resources; 54 - struct amdtp_out_stream stream; 54 + struct amdtp_stream stream; 55 55 }; 56 56 57 57 MODULE_DESCRIPTION("DICE driver"); ··· 420 420 if (err < 0) 421 421 goto err_lock; 422 422 423 - err = snd_pcm_hw_constraint_step(runtime, 0, 424 - SNDRV_PCM_HW_PARAM_PERIOD_SIZE, 32); 425 - if (err < 0) 426 - goto err_lock; 427 - err = snd_pcm_hw_constraint_step(runtime, 0, 428 - SNDRV_PCM_HW_PARAM_BUFFER_SIZE, 32); 429 - if (err < 0) 430 - goto err_lock; 431 - 432 - err = snd_pcm_hw_constraint_minmax(runtime, 433 - SNDRV_PCM_HW_PARAM_PERIOD_TIME, 434 - 5000, UINT_MAX); 435 - if (err < 0) 436 - goto err_lock; 437 - 438 - err = snd_pcm_hw_constraint_msbits(runtime, 0, 32, 24); 423 + err = amdtp_stream_add_pcm_hw_constraints(&dice->stream, runtime); 439 424 if (err < 0) 440 425 goto err_lock; 441 426 ··· 445 460 { 446 461 int err; 447 462 448 - if (amdtp_out_stream_running(&dice->stream)) 463 + if (amdtp_stream_running(&dice->stream)) 449 464 return 0; 450 465 451 - err = amdtp_out_stream_start(&dice->stream, dice->resources.channel, 452 - fw_parent_device(dice->unit)->max_speed); 466 + err = amdtp_stream_start(&dice->stream, dice->resources.channel, 467 + fw_parent_device(dice->unit)->max_speed); 453 468 if (err < 0) 454 469 return err; 455 470 456 471 err = dice_enable_set(dice); 457 472 if (err < 0) { 458 - amdtp_out_stream_stop(&dice->stream); 473 + amdtp_stream_stop(&dice->stream); 459 474 return err; 460 475 } 461 476 ··· 469 484 470 485 if (!dice->resources.allocated) { 471 486 err = fw_iso_resources_allocate(&dice->resources, 472 - amdtp_out_stream_get_max_payload(&dice->stream), 487 + amdtp_stream_get_max_payload(&dice->stream), 473 488 fw_parent_device(dice->unit)->max_speed); 474 489 if (err < 0) 475 490 goto error; ··· 501 516 502 517 static void dice_stream_stop_packets(struct dice *dice) 503 518 { 504 - if (amdtp_out_stream_running(&dice->stream)) { 519 + if (amdtp_stream_running(&dice->stream)) { 505 520 dice_enable_clear(dice); 506 - amdtp_out_stream_stop(&dice->stream); 521 + amdtp_stream_stop(&dice->stream); 507 522 } 508 523 } 509 524 ··· 548 563 struct snd_pcm_hw_params *hw_params) 549 564 { 550 565 struct dice *dice = substream->private_data; 551 - unsigned int rate_index, mode; 566 + unsigned int rate_index, mode, rate, channels, i; 552 567 int err; 553 568 554 569 mutex_lock(&dice->mutex); ··· 560 575 if (err < 0) 561 576 return err; 562 577 563 - rate_index = rate_to_index(params_rate(hw_params)); 578 + rate = params_rate(hw_params); 579 + rate_index = rate_to_index(rate); 564 580 err = dice_change_rate(dice, rate_index << CLOCK_RATE_SHIFT); 565 581 if (err < 0) 566 582 return err; 567 583 584 + /* 585 + * At rates above 96 kHz, pretend that the stream runs at half the 586 + * actual sample rate with twice the number of channels; two samples 587 + * of a channel are stored consecutively in the packet. Requires 588 + * blocking mode and PCM buffer size should be aligned to SYT_INTERVAL. 589 + */ 590 + channels = params_channels(hw_params); 591 + if (rate_index > 4) { 592 + if (channels > AMDTP_MAX_CHANNELS_FOR_PCM / 2) { 593 + err = -ENOSYS; 594 + return err; 595 + } 596 + 597 + for (i = 0; i < channels; i++) { 598 + dice->stream.pcm_positions[i * 2] = i; 599 + dice->stream.pcm_positions[i * 2 + 1] = i + channels; 600 + } 601 + 602 + rate /= 2; 603 + channels *= 2; 604 + } 605 + 568 606 mode = rate_index_to_mode(rate_index); 569 - amdtp_out_stream_set_parameters(&dice->stream, 570 - params_rate(hw_params), 571 - params_channels(hw_params), 572 - dice->rx_midi_ports[mode]); 573 - amdtp_out_stream_set_pcm_format(&dice->stream, 574 - params_format(hw_params)); 607 + amdtp_stream_set_parameters(&dice->stream, rate, channels, 608 + dice->rx_midi_ports[mode]); 609 + amdtp_stream_set_pcm_format(&dice->stream, 610 + params_format(hw_params)); 575 611 576 612 return 0; 577 613 } ··· 615 609 616 610 mutex_lock(&dice->mutex); 617 611 618 - if (amdtp_out_streaming_error(&dice->stream)) 612 + if (amdtp_streaming_error(&dice->stream)) 619 613 dice_stream_stop_packets(dice); 620 614 621 615 err = dice_stream_start(dice); ··· 626 620 627 621 mutex_unlock(&dice->mutex); 628 622 629 - amdtp_out_stream_pcm_prepare(&dice->stream); 623 + amdtp_stream_pcm_prepare(&dice->stream); 630 624 631 625 return 0; 632 626 } ··· 646 640 default: 647 641 return -EINVAL; 648 642 } 649 - amdtp_out_stream_pcm_trigger(&dice->stream, pcm); 643 + amdtp_stream_pcm_trigger(&dice->stream, pcm); 650 644 651 645 return 0; 652 646 } ··· 655 649 { 656 650 struct dice *dice = substream->private_data; 657 651 658 - return amdtp_out_stream_pcm_pointer(&dice->stream); 652 + return amdtp_stream_pcm_pointer(&dice->stream); 659 653 } 660 654 661 655 static int dice_create_pcm(struct dice *dice) ··· 1110 1104 { 1111 1105 struct dice *dice = card->private_data; 1112 1106 1113 - amdtp_out_stream_destroy(&dice->stream); 1107 + amdtp_stream_destroy(&dice->stream); 1114 1108 fw_core_remove_address_handler(&dice->notification_handler); 1115 1109 mutex_destroy(&dice->mutex); 1116 1110 } ··· 1366 1360 goto err_owner; 1367 1361 dice->resources.channels_mask = 0x00000000ffffffffuLL; 1368 1362 1369 - err = amdtp_out_stream_init(&dice->stream, unit, 1370 - CIP_BLOCKING | CIP_HI_DUALWIRE); 1363 + err = amdtp_stream_init(&dice->stream, unit, AMDTP_OUT_STREAM, 1364 + CIP_BLOCKING); 1371 1365 if (err < 0) 1372 1366 goto err_resources; 1373 1367 ··· 1423 1417 { 1424 1418 struct dice *dice = dev_get_drvdata(&unit->device); 1425 1419 1426 - amdtp_out_stream_pcm_abort(&dice->stream); 1420 + amdtp_stream_pcm_abort(&dice->stream); 1427 1421 1428 1422 snd_card_disconnect(dice->card); 1429 1423 ··· 1449 1443 * to stop so that the application can restart them in an orderly 1450 1444 * manner. 1451 1445 */ 1452 - amdtp_out_stream_pcm_abort(&dice->stream); 1446 + amdtp_stream_pcm_abort(&dice->stream); 1453 1447 1454 1448 mutex_lock(&dice->mutex); 1455 1449
+182 -7
sound/firewire/fcp.c
··· 10 10 #include <linux/firewire-constants.h> 11 11 #include <linux/list.h> 12 12 #include <linux/module.h> 13 + #include <linux/slab.h> 13 14 #include <linux/sched.h> 14 15 #include <linux/spinlock.h> 15 16 #include <linux/wait.h> 16 17 #include <linux/delay.h> 17 18 #include "fcp.h" 18 19 #include "lib.h" 20 + #include "amdtp.h" 19 21 20 22 #define CTS_AVC 0x00 21 23 22 24 #define ERROR_RETRIES 3 23 25 #define ERROR_DELAY_MS 5 24 26 #define FCP_TIMEOUT_MS 125 27 + 28 + int avc_general_set_sig_fmt(struct fw_unit *unit, unsigned int rate, 29 + enum avc_general_plug_dir dir, 30 + unsigned short pid) 31 + { 32 + unsigned int sfc; 33 + u8 *buf; 34 + bool flag; 35 + int err; 36 + 37 + flag = false; 38 + for (sfc = 0; sfc < CIP_SFC_COUNT; sfc++) { 39 + if (amdtp_rate_table[sfc] == rate) { 40 + flag = true; 41 + break; 42 + } 43 + } 44 + if (!flag) 45 + return -EINVAL; 46 + 47 + buf = kzalloc(8, GFP_KERNEL); 48 + if (buf == NULL) 49 + return -ENOMEM; 50 + 51 + buf[0] = 0x00; /* AV/C CONTROL */ 52 + buf[1] = 0xff; /* UNIT */ 53 + if (dir == AVC_GENERAL_PLUG_DIR_IN) 54 + buf[2] = 0x19; /* INPUT PLUG SIGNAL FORMAT */ 55 + else 56 + buf[2] = 0x18; /* OUTPUT PLUG SIGNAL FORMAT */ 57 + buf[3] = 0xff & pid; /* plug id */ 58 + buf[4] = 0x90; /* EOH_1, Form_1, FMT. AM824 */ 59 + buf[5] = 0x07 & sfc; /* FDF-hi. AM824, frequency */ 60 + buf[6] = 0xff; /* FDF-mid. AM824, SYT hi (not used)*/ 61 + buf[7] = 0xff; /* FDF-low. AM824, SYT lo (not used) */ 62 + 63 + /* do transaction and check buf[1-5] are the same against command */ 64 + err = fcp_avc_transaction(unit, buf, 8, buf, 8, 65 + BIT(1) | BIT(2) | BIT(3) | BIT(4) | BIT(5)); 66 + if (err >= 0 && err < 8) 67 + err = -EIO; 68 + else if (buf[0] == 0x08) /* NOT IMPLEMENTED */ 69 + err = -ENOSYS; 70 + else if (buf[0] == 0x0a) /* REJECTED */ 71 + err = -EINVAL; 72 + if (err < 0) 73 + goto end; 74 + 75 + err = 0; 76 + end: 77 + kfree(buf); 78 + return err; 79 + } 80 + EXPORT_SYMBOL(avc_general_set_sig_fmt); 81 + 82 + int avc_general_get_sig_fmt(struct fw_unit *unit, unsigned int *rate, 83 + enum avc_general_plug_dir dir, 84 + unsigned short pid) 85 + { 86 + unsigned int sfc; 87 + u8 *buf; 88 + int err; 89 + 90 + buf = kzalloc(8, GFP_KERNEL); 91 + if (buf == NULL) 92 + return -ENOMEM; 93 + 94 + buf[0] = 0x01; /* AV/C STATUS */ 95 + buf[1] = 0xff; /* Unit */ 96 + if (dir == AVC_GENERAL_PLUG_DIR_IN) 97 + buf[2] = 0x19; /* INPUT PLUG SIGNAL FORMAT */ 98 + else 99 + buf[2] = 0x18; /* OUTPUT PLUG SIGNAL FORMAT */ 100 + buf[3] = 0xff & pid; /* plug id */ 101 + buf[4] = 0x90; /* EOH_1, Form_1, FMT. AM824 */ 102 + buf[5] = 0xff; /* FDF-hi. AM824, frequency */ 103 + buf[6] = 0xff; /* FDF-mid. AM824, SYT hi (not used) */ 104 + buf[7] = 0xff; /* FDF-low. AM824, SYT lo (not used) */ 105 + 106 + /* do transaction and check buf[1-4] are the same against command */ 107 + err = fcp_avc_transaction(unit, buf, 8, buf, 8, 108 + BIT(1) | BIT(2) | BIT(3) | BIT(4)); 109 + if (err >= 0 && err < 8) 110 + err = -EIO; 111 + else if (buf[0] == 0x08) /* NOT IMPLEMENTED */ 112 + err = -ENOSYS; 113 + else if (buf[0] == 0x0a) /* REJECTED */ 114 + err = -EINVAL; 115 + else if (buf[0] == 0x0b) /* IN TRANSITION */ 116 + err = -EAGAIN; 117 + if (err < 0) 118 + goto end; 119 + 120 + /* check sfc field and pick up rate */ 121 + sfc = 0x07 & buf[5]; 122 + if (sfc >= CIP_SFC_COUNT) { 123 + err = -EAGAIN; /* also in transition */ 124 + goto end; 125 + } 126 + 127 + *rate = amdtp_rate_table[sfc]; 128 + err = 0; 129 + end: 130 + kfree(buf); 131 + return err; 132 + } 133 + EXPORT_SYMBOL(avc_general_get_sig_fmt); 134 + 135 + int avc_general_get_plug_info(struct fw_unit *unit, unsigned int subunit_type, 136 + unsigned int subunit_id, unsigned int subfunction, 137 + u8 info[AVC_PLUG_INFO_BUF_BYTES]) 138 + { 139 + u8 *buf; 140 + int err; 141 + 142 + /* extended subunit in spec.4.2 is not supported */ 143 + if ((subunit_type == 0x1E) || (subunit_id == 5)) 144 + return -EINVAL; 145 + 146 + buf = kzalloc(8, GFP_KERNEL); 147 + if (buf == NULL) 148 + return -ENOMEM; 149 + 150 + buf[0] = 0x01; /* AV/C STATUS */ 151 + /* UNIT or Subunit, Functionblock */ 152 + buf[1] = ((subunit_type & 0x1f) << 3) | (subunit_id & 0x7); 153 + buf[2] = 0x02; /* PLUG INFO */ 154 + buf[3] = 0xff & subfunction; 155 + 156 + err = fcp_avc_transaction(unit, buf, 8, buf, 8, BIT(1) | BIT(2)); 157 + if (err >= 0 && err < 8) 158 + err = -EIO; 159 + else if (buf[0] == 0x08) /* NOT IMPLEMENTED */ 160 + err = -ENOSYS; 161 + else if (buf[0] == 0x0a) /* REJECTED */ 162 + err = -EINVAL; 163 + else if (buf[0] == 0x0b) /* IN TRANSITION */ 164 + err = -EAGAIN; 165 + if (err < 0) 166 + goto end; 167 + 168 + info[0] = buf[4]; 169 + info[1] = buf[5]; 170 + info[2] = buf[6]; 171 + info[3] = buf[7]; 172 + 173 + err = 0; 174 + end: 175 + kfree(buf); 176 + return err; 177 + } 178 + EXPORT_SYMBOL(avc_general_get_plug_info); 25 179 26 180 static DEFINE_SPINLOCK(transactions_lock); 27 181 static LIST_HEAD(transactions); ··· 184 30 STATE_PENDING, 185 31 STATE_BUS_RESET, 186 32 STATE_COMPLETE, 33 + STATE_DEFERRED, 187 34 }; 188 35 189 36 struct fcp_transaction { ··· 195 40 unsigned int response_match_bytes; 196 41 enum fcp_state state; 197 42 wait_queue_head_t wait; 43 + bool deferrable; 198 44 }; 199 45 200 46 /** ··· 237 81 t.state = STATE_PENDING; 238 82 init_waitqueue_head(&t.wait); 239 83 84 + if (*(const u8 *)command == 0x00 || *(const u8 *)command == 0x03) 85 + t.deferrable = true; 86 + 240 87 spin_lock_irq(&transactions_lock); 241 88 list_add_tail(&t.list, &transactions); 242 89 spin_unlock_irq(&transactions_lock); ··· 252 93 (void *)command, command_size, 0); 253 94 if (ret < 0) 254 95 break; 255 - 96 + deferred: 256 97 wait_event_timeout(t.wait, t.state != STATE_PENDING, 257 98 msecs_to_jiffies(FCP_TIMEOUT_MS)); 258 99 259 - if (t.state == STATE_COMPLETE) { 100 + if (t.state == STATE_DEFERRED) { 101 + /* 102 + * 'AV/C General Specification' define no time limit 103 + * on command completion once an INTERIM response has 104 + * been sent. but we promise to finish this function 105 + * for a caller. Here we use FCP_TIMEOUT_MS for next 106 + * interval. This is not in the specification. 107 + */ 108 + t.state = STATE_PENDING; 109 + goto deferred; 110 + } else if (t.state == STATE_COMPLETE) { 260 111 ret = t.response_size; 261 112 break; 262 113 } else if (t.state == STATE_BUS_RESET) { ··· 301 132 spin_lock_irq(&transactions_lock); 302 133 list_for_each_entry(t, &transactions, list) { 303 134 if (t->unit == unit && 304 - t->state == STATE_PENDING) { 135 + (t->state == STATE_PENDING || 136 + t->state == STATE_DEFERRED)) { 305 137 t->state = STATE_BUS_RESET; 306 138 wake_up(&t->wait); 307 139 } ··· 356 186 357 187 if (t->state == STATE_PENDING && 358 188 is_matching_response(t, data, length)) { 359 - t->state = STATE_COMPLETE; 360 - t->response_size = min((unsigned int)length, 361 - t->response_size); 362 - memcpy(t->response_buffer, data, t->response_size); 189 + if (t->deferrable && *(const u8 *)data == 0x0f) { 190 + t->state = STATE_DEFERRED; 191 + } else { 192 + t->state = STATE_COMPLETE; 193 + t->response_size = min_t(unsigned int, length, 194 + t->response_size); 195 + memcpy(t->response_buffer, data, 196 + t->response_size); 197 + } 363 198 wake_up(&t->wait); 364 199 } 365 200 }
+21
sound/firewire/fcp.h
··· 1 1 #ifndef SOUND_FIREWIRE_FCP_H_INCLUDED 2 2 #define SOUND_FIREWIRE_FCP_H_INCLUDED 3 3 4 + #define AVC_PLUG_INFO_BUF_BYTES 4 5 + 4 6 struct fw_unit; 7 + 8 + /* 9 + * AV/C Digital Interface Command Set General Specification 4.2 10 + * (Sep 2004, 1394TA) 11 + */ 12 + enum avc_general_plug_dir { 13 + AVC_GENERAL_PLUG_DIR_IN = 0, 14 + AVC_GENERAL_PLUG_DIR_OUT = 1, 15 + AVC_GENERAL_PLUG_DIR_COUNT 16 + }; 17 + int avc_general_set_sig_fmt(struct fw_unit *unit, unsigned int rate, 18 + enum avc_general_plug_dir dir, 19 + unsigned short plug); 20 + int avc_general_get_sig_fmt(struct fw_unit *unit, unsigned int *rate, 21 + enum avc_general_plug_dir dir, 22 + unsigned short plug); 23 + int avc_general_get_plug_info(struct fw_unit *unit, unsigned int subunit_type, 24 + unsigned int subunit_id, unsigned int subfunction, 25 + u8 info[AVC_PLUG_INFO_BUF_BYTES]); 5 26 6 27 int fcp_avc_transaction(struct fw_unit *unit, 7 28 const void *command, unsigned int command_size,
+4
sound/firewire/fireworks/Makefile
··· 1 + snd-fireworks-objs := fireworks_transaction.o fireworks_command.o \ 2 + fireworks_stream.o fireworks_proc.o fireworks_midi.o \ 3 + fireworks_pcm.o fireworks_hwdep.o fireworks.o 4 + obj-m += snd-fireworks.o
+357
sound/firewire/fireworks/fireworks.c
··· 1 + /* 2 + * fireworks.c - a part of driver for Fireworks based devices 3 + * 4 + * Copyright (c) 2009-2010 Clemens Ladisch 5 + * Copyright (c) 2013-2014 Takashi Sakamoto 6 + * 7 + * Licensed under the terms of the GNU General Public License, version 2. 8 + */ 9 + 10 + /* 11 + * Fireworks is a board module which Echo Audio produced. This module consists 12 + * of three chipsets: 13 + * - Communication chipset for IEEE1394 PHY/Link and IEC 61883-1/6 14 + * - DSP or/and FPGA for signal processing 15 + * - Flash Memory to store firmwares 16 + */ 17 + 18 + #include "fireworks.h" 19 + 20 + MODULE_DESCRIPTION("Echo Fireworks driver"); 21 + MODULE_AUTHOR("Takashi Sakamoto <o-takashi@sakamocchi.jp>"); 22 + MODULE_LICENSE("GPL v2"); 23 + 24 + static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; 25 + static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; 26 + static bool enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE_PNP; 27 + unsigned int snd_efw_resp_buf_size = 1024; 28 + bool snd_efw_resp_buf_debug = false; 29 + 30 + module_param_array(index, int, NULL, 0444); 31 + MODULE_PARM_DESC(index, "card index"); 32 + module_param_array(id, charp, NULL, 0444); 33 + MODULE_PARM_DESC(id, "ID string"); 34 + module_param_array(enable, bool, NULL, 0444); 35 + MODULE_PARM_DESC(enable, "enable Fireworks sound card"); 36 + module_param_named(resp_buf_size, snd_efw_resp_buf_size, uint, 0444); 37 + MODULE_PARM_DESC(resp_buf_size, 38 + "response buffer size (max 4096, default 1024)"); 39 + module_param_named(resp_buf_debug, snd_efw_resp_buf_debug, bool, 0444); 40 + MODULE_PARM_DESC(resp_buf_debug, "store all responses to buffer"); 41 + 42 + static DEFINE_MUTEX(devices_mutex); 43 + static DECLARE_BITMAP(devices_used, SNDRV_CARDS); 44 + 45 + #define VENDOR_LOUD 0x000ff2 46 + #define MODEL_MACKIE_400F 0x00400f 47 + #define MODEL_MACKIE_1200F 0x01200f 48 + 49 + #define VENDOR_ECHO 0x001486 50 + #define MODEL_ECHO_AUDIOFIRE_12 0x00af12 51 + #define MODEL_ECHO_AUDIOFIRE_12HD 0x0af12d 52 + #define MODEL_ECHO_AUDIOFIRE_12_APPLE 0x0af12a 53 + /* This is applied for AudioFire8 (until 2009 July) */ 54 + #define MODEL_ECHO_AUDIOFIRE_8 0x000af8 55 + #define MODEL_ECHO_AUDIOFIRE_2 0x000af2 56 + #define MODEL_ECHO_AUDIOFIRE_4 0x000af4 57 + /* AudioFire9 is applied for AudioFire8(since 2009 July) and AudioFirePre8 */ 58 + #define MODEL_ECHO_AUDIOFIRE_9 0x000af9 59 + /* unknown as product */ 60 + #define MODEL_ECHO_FIREWORKS_8 0x0000f8 61 + #define MODEL_ECHO_FIREWORKS_HDMI 0x00afd1 62 + 63 + #define VENDOR_GIBSON 0x00075b 64 + /* for Robot Interface Pack of Dark Fire, Dusk Tiger, Les Paul Standard 2010 */ 65 + #define MODEL_GIBSON_RIP 0x00afb2 66 + /* unknown as product */ 67 + #define MODEL_GIBSON_GOLDTOP 0x00afb9 68 + 69 + /* part of hardware capability flags */ 70 + #define FLAG_RESP_ADDR_CHANGABLE 0 71 + 72 + static int 73 + get_hardware_info(struct snd_efw *efw) 74 + { 75 + struct fw_device *fw_dev = fw_parent_device(efw->unit); 76 + struct snd_efw_hwinfo *hwinfo; 77 + char version[12] = {0}; 78 + int err; 79 + 80 + hwinfo = kzalloc(sizeof(struct snd_efw_hwinfo), GFP_KERNEL); 81 + if (hwinfo == NULL) 82 + return -ENOMEM; 83 + 84 + err = snd_efw_command_get_hwinfo(efw, hwinfo); 85 + if (err < 0) 86 + goto end; 87 + 88 + /* firmware version for communication chipset */ 89 + snprintf(version, sizeof(version), "%u.%u", 90 + (hwinfo->arm_version >> 24) & 0xff, 91 + (hwinfo->arm_version >> 16) & 0xff); 92 + if (err < 0) 93 + goto end; 94 + efw->firmware_version = hwinfo->arm_version; 95 + 96 + strcpy(efw->card->driver, "Fireworks"); 97 + strcpy(efw->card->shortname, hwinfo->model_name); 98 + strcpy(efw->card->mixername, hwinfo->model_name); 99 + snprintf(efw->card->longname, sizeof(efw->card->longname), 100 + "%s %s v%s, GUID %08x%08x at %s, S%d", 101 + hwinfo->vendor_name, hwinfo->model_name, version, 102 + hwinfo->guid_hi, hwinfo->guid_lo, 103 + dev_name(&efw->unit->device), 100 << fw_dev->max_speed); 104 + if (err < 0) 105 + goto end; 106 + 107 + if (hwinfo->flags & BIT(FLAG_RESP_ADDR_CHANGABLE)) 108 + efw->resp_addr_changable = true; 109 + 110 + efw->supported_sampling_rate = 0; 111 + if ((hwinfo->min_sample_rate <= 22050) 112 + && (22050 <= hwinfo->max_sample_rate)) 113 + efw->supported_sampling_rate |= SNDRV_PCM_RATE_22050; 114 + if ((hwinfo->min_sample_rate <= 32000) 115 + && (32000 <= hwinfo->max_sample_rate)) 116 + efw->supported_sampling_rate |= SNDRV_PCM_RATE_32000; 117 + if ((hwinfo->min_sample_rate <= 44100) 118 + && (44100 <= hwinfo->max_sample_rate)) 119 + efw->supported_sampling_rate |= SNDRV_PCM_RATE_44100; 120 + if ((hwinfo->min_sample_rate <= 48000) 121 + && (48000 <= hwinfo->max_sample_rate)) 122 + efw->supported_sampling_rate |= SNDRV_PCM_RATE_48000; 123 + if ((hwinfo->min_sample_rate <= 88200) 124 + && (88200 <= hwinfo->max_sample_rate)) 125 + efw->supported_sampling_rate |= SNDRV_PCM_RATE_88200; 126 + if ((hwinfo->min_sample_rate <= 96000) 127 + && (96000 <= hwinfo->max_sample_rate)) 128 + efw->supported_sampling_rate |= SNDRV_PCM_RATE_96000; 129 + if ((hwinfo->min_sample_rate <= 176400) 130 + && (176400 <= hwinfo->max_sample_rate)) 131 + efw->supported_sampling_rate |= SNDRV_PCM_RATE_176400; 132 + if ((hwinfo->min_sample_rate <= 192000) 133 + && (192000 <= hwinfo->max_sample_rate)) 134 + efw->supported_sampling_rate |= SNDRV_PCM_RATE_192000; 135 + 136 + /* the number of MIDI ports, not of MIDI conformant data channels */ 137 + if (hwinfo->midi_out_ports > SND_EFW_MAX_MIDI_OUT_PORTS || 138 + hwinfo->midi_in_ports > SND_EFW_MAX_MIDI_IN_PORTS) { 139 + err = -EIO; 140 + goto end; 141 + } 142 + efw->midi_out_ports = hwinfo->midi_out_ports; 143 + efw->midi_in_ports = hwinfo->midi_in_ports; 144 + 145 + if (hwinfo->amdtp_tx_pcm_channels > AMDTP_MAX_CHANNELS_FOR_PCM || 146 + hwinfo->amdtp_tx_pcm_channels_2x > AMDTP_MAX_CHANNELS_FOR_PCM || 147 + hwinfo->amdtp_tx_pcm_channels_4x > AMDTP_MAX_CHANNELS_FOR_PCM || 148 + hwinfo->amdtp_rx_pcm_channels > AMDTP_MAX_CHANNELS_FOR_PCM || 149 + hwinfo->amdtp_rx_pcm_channels_2x > AMDTP_MAX_CHANNELS_FOR_PCM || 150 + hwinfo->amdtp_rx_pcm_channels_4x > AMDTP_MAX_CHANNELS_FOR_PCM) { 151 + err = -ENOSYS; 152 + goto end; 153 + } 154 + efw->pcm_capture_channels[0] = hwinfo->amdtp_tx_pcm_channels; 155 + efw->pcm_capture_channels[1] = hwinfo->amdtp_tx_pcm_channels_2x; 156 + efw->pcm_capture_channels[2] = hwinfo->amdtp_tx_pcm_channels_4x; 157 + efw->pcm_playback_channels[0] = hwinfo->amdtp_rx_pcm_channels; 158 + efw->pcm_playback_channels[1] = hwinfo->amdtp_rx_pcm_channels_2x; 159 + efw->pcm_playback_channels[2] = hwinfo->amdtp_rx_pcm_channels_4x; 160 + 161 + /* Hardware metering. */ 162 + if (hwinfo->phys_in_grp_count > HWINFO_MAX_CAPS_GROUPS || 163 + hwinfo->phys_out_grp_count > HWINFO_MAX_CAPS_GROUPS) { 164 + return -EIO; 165 + goto end; 166 + } 167 + efw->phys_in = hwinfo->phys_in; 168 + efw->phys_out = hwinfo->phys_out; 169 + efw->phys_in_grp_count = hwinfo->phys_in_grp_count; 170 + efw->phys_out_grp_count = hwinfo->phys_out_grp_count; 171 + memcpy(&efw->phys_in_grps, hwinfo->phys_in_grps, 172 + sizeof(struct snd_efw_phys_grp) * hwinfo->phys_in_grp_count); 173 + memcpy(&efw->phys_out_grps, hwinfo->phys_out_grps, 174 + sizeof(struct snd_efw_phys_grp) * hwinfo->phys_out_grp_count); 175 + end: 176 + kfree(hwinfo); 177 + return err; 178 + } 179 + 180 + static void 181 + efw_card_free(struct snd_card *card) 182 + { 183 + struct snd_efw *efw = card->private_data; 184 + 185 + if (efw->card_index >= 0) { 186 + mutex_lock(&devices_mutex); 187 + clear_bit(efw->card_index, devices_used); 188 + mutex_unlock(&devices_mutex); 189 + } 190 + 191 + mutex_destroy(&efw->mutex); 192 + kfree(efw->resp_buf); 193 + } 194 + 195 + static int 196 + efw_probe(struct fw_unit *unit, 197 + const struct ieee1394_device_id *entry) 198 + { 199 + struct snd_card *card; 200 + struct snd_efw *efw; 201 + int card_index, err; 202 + 203 + mutex_lock(&devices_mutex); 204 + 205 + /* check registered cards */ 206 + for (card_index = 0; card_index < SNDRV_CARDS; ++card_index) { 207 + if (!test_bit(card_index, devices_used) && enable[card_index]) 208 + break; 209 + } 210 + if (card_index >= SNDRV_CARDS) { 211 + err = -ENOENT; 212 + goto end; 213 + } 214 + 215 + err = snd_card_new(&unit->device, index[card_index], id[card_index], 216 + THIS_MODULE, sizeof(struct snd_efw), &card); 217 + if (err < 0) 218 + goto end; 219 + efw = card->private_data; 220 + efw->card_index = card_index; 221 + set_bit(card_index, devices_used); 222 + card->private_free = efw_card_free; 223 + 224 + efw->card = card; 225 + efw->unit = unit; 226 + mutex_init(&efw->mutex); 227 + spin_lock_init(&efw->lock); 228 + init_waitqueue_head(&efw->hwdep_wait); 229 + 230 + /* prepare response buffer */ 231 + snd_efw_resp_buf_size = clamp(snd_efw_resp_buf_size, 232 + SND_EFW_RESPONSE_MAXIMUM_BYTES, 4096U); 233 + efw->resp_buf = kzalloc(snd_efw_resp_buf_size, GFP_KERNEL); 234 + if (efw->resp_buf == NULL) { 235 + err = -ENOMEM; 236 + goto error; 237 + } 238 + efw->pull_ptr = efw->push_ptr = efw->resp_buf; 239 + snd_efw_transaction_add_instance(efw); 240 + 241 + err = get_hardware_info(efw); 242 + if (err < 0) 243 + goto error; 244 + if (entry->model_id == MODEL_ECHO_AUDIOFIRE_9) 245 + efw->is_af9 = true; 246 + 247 + snd_efw_proc_init(efw); 248 + 249 + if (efw->midi_out_ports || efw->midi_in_ports) { 250 + err = snd_efw_create_midi_devices(efw); 251 + if (err < 0) 252 + goto error; 253 + } 254 + 255 + err = snd_efw_create_pcm_devices(efw); 256 + if (err < 0) 257 + goto error; 258 + 259 + err = snd_efw_create_hwdep_device(efw); 260 + if (err < 0) 261 + goto error; 262 + 263 + err = snd_efw_stream_init_duplex(efw); 264 + if (err < 0) 265 + goto error; 266 + 267 + err = snd_card_register(card); 268 + if (err < 0) { 269 + snd_efw_stream_destroy_duplex(efw); 270 + goto error; 271 + } 272 + 273 + dev_set_drvdata(&unit->device, efw); 274 + end: 275 + mutex_unlock(&devices_mutex); 276 + return err; 277 + error: 278 + snd_efw_transaction_remove_instance(efw); 279 + mutex_unlock(&devices_mutex); 280 + snd_card_free(card); 281 + return err; 282 + } 283 + 284 + static void efw_update(struct fw_unit *unit) 285 + { 286 + struct snd_efw *efw = dev_get_drvdata(&unit->device); 287 + 288 + snd_efw_transaction_bus_reset(efw->unit); 289 + snd_efw_stream_update_duplex(efw); 290 + } 291 + 292 + static void efw_remove(struct fw_unit *unit) 293 + { 294 + struct snd_efw *efw = dev_get_drvdata(&unit->device); 295 + 296 + snd_efw_stream_destroy_duplex(efw); 297 + snd_efw_transaction_remove_instance(efw); 298 + 299 + snd_card_disconnect(efw->card); 300 + snd_card_free_when_closed(efw->card); 301 + } 302 + 303 + static const struct ieee1394_device_id efw_id_table[] = { 304 + SND_EFW_DEV_ENTRY(VENDOR_LOUD, MODEL_MACKIE_400F), 305 + SND_EFW_DEV_ENTRY(VENDOR_LOUD, MODEL_MACKIE_1200F), 306 + SND_EFW_DEV_ENTRY(VENDOR_ECHO, MODEL_ECHO_AUDIOFIRE_8), 307 + SND_EFW_DEV_ENTRY(VENDOR_ECHO, MODEL_ECHO_AUDIOFIRE_12), 308 + SND_EFW_DEV_ENTRY(VENDOR_ECHO, MODEL_ECHO_AUDIOFIRE_12HD), 309 + SND_EFW_DEV_ENTRY(VENDOR_ECHO, MODEL_ECHO_AUDIOFIRE_12_APPLE), 310 + SND_EFW_DEV_ENTRY(VENDOR_ECHO, MODEL_ECHO_AUDIOFIRE_2), 311 + SND_EFW_DEV_ENTRY(VENDOR_ECHO, MODEL_ECHO_AUDIOFIRE_4), 312 + SND_EFW_DEV_ENTRY(VENDOR_ECHO, MODEL_ECHO_AUDIOFIRE_9), 313 + SND_EFW_DEV_ENTRY(VENDOR_ECHO, MODEL_ECHO_FIREWORKS_8), 314 + SND_EFW_DEV_ENTRY(VENDOR_ECHO, MODEL_ECHO_FIREWORKS_HDMI), 315 + SND_EFW_DEV_ENTRY(VENDOR_GIBSON, MODEL_GIBSON_RIP), 316 + SND_EFW_DEV_ENTRY(VENDOR_GIBSON, MODEL_GIBSON_GOLDTOP), 317 + {} 318 + }; 319 + MODULE_DEVICE_TABLE(ieee1394, efw_id_table); 320 + 321 + static struct fw_driver efw_driver = { 322 + .driver = { 323 + .owner = THIS_MODULE, 324 + .name = "snd-fireworks", 325 + .bus = &fw_bus_type, 326 + }, 327 + .probe = efw_probe, 328 + .update = efw_update, 329 + .remove = efw_remove, 330 + .id_table = efw_id_table, 331 + }; 332 + 333 + static int __init snd_efw_init(void) 334 + { 335 + int err; 336 + 337 + err = snd_efw_transaction_register(); 338 + if (err < 0) 339 + goto end; 340 + 341 + err = driver_register(&efw_driver.driver); 342 + if (err < 0) 343 + snd_efw_transaction_unregister(); 344 + 345 + end: 346 + return err; 347 + } 348 + 349 + static void __exit snd_efw_exit(void) 350 + { 351 + snd_efw_transaction_unregister(); 352 + driver_unregister(&efw_driver.driver); 353 + mutex_destroy(&devices_mutex); 354 + } 355 + 356 + module_init(snd_efw_init); 357 + module_exit(snd_efw_exit);
+233
sound/firewire/fireworks/fireworks.h
··· 1 + /* 2 + * fireworks.h - a part of driver for Fireworks based devices 3 + * 4 + * Copyright (c) 2009-2010 Clemens Ladisch 5 + * Copyright (c) 2013-2014 Takashi Sakamoto 6 + * 7 + * Licensed under the terms of the GNU General Public License, version 2. 8 + */ 9 + #ifndef SOUND_FIREWORKS_H_INCLUDED 10 + #define SOUND_FIREWORKS_H_INCLUDED 11 + 12 + #include <linux/compat.h> 13 + #include <linux/device.h> 14 + #include <linux/firewire.h> 15 + #include <linux/firewire-constants.h> 16 + #include <linux/module.h> 17 + #include <linux/mod_devicetable.h> 18 + #include <linux/delay.h> 19 + #include <linux/slab.h> 20 + 21 + #include <sound/core.h> 22 + #include <sound/initval.h> 23 + #include <sound/pcm.h> 24 + #include <sound/info.h> 25 + #include <sound/rawmidi.h> 26 + #include <sound/pcm_params.h> 27 + #include <sound/firewire.h> 28 + #include <sound/hwdep.h> 29 + 30 + #include "../packets-buffer.h" 31 + #include "../iso-resources.h" 32 + #include "../amdtp.h" 33 + #include "../cmp.h" 34 + #include "../lib.h" 35 + 36 + #define SND_EFW_MAX_MIDI_OUT_PORTS 2 37 + #define SND_EFW_MAX_MIDI_IN_PORTS 2 38 + 39 + #define SND_EFW_MULTIPLIER_MODES 3 40 + #define HWINFO_NAME_SIZE_BYTES 32 41 + #define HWINFO_MAX_CAPS_GROUPS 8 42 + 43 + /* 44 + * This should be greater than maximum bytes for EFW response content. 45 + * Currently response against command for isochronous channel mapping is 46 + * confirmed to be the maximum one. But for flexibility, use maximum data 47 + * payload for asynchronous primary packets at S100 (Cable base rate) in 48 + * IEEE Std 1394-1995. 49 + */ 50 + #define SND_EFW_RESPONSE_MAXIMUM_BYTES 0x200U 51 + 52 + extern unsigned int snd_efw_resp_buf_size; 53 + extern bool snd_efw_resp_buf_debug; 54 + 55 + struct snd_efw_phys_grp { 56 + u8 type; /* see enum snd_efw_grp_type */ 57 + u8 count; 58 + } __packed; 59 + 60 + struct snd_efw { 61 + struct snd_card *card; 62 + struct fw_unit *unit; 63 + int card_index; 64 + 65 + struct mutex mutex; 66 + spinlock_t lock; 67 + 68 + /* for transaction */ 69 + u32 seqnum; 70 + bool resp_addr_changable; 71 + 72 + /* for quirks */ 73 + bool is_af9; 74 + u32 firmware_version; 75 + 76 + unsigned int midi_in_ports; 77 + unsigned int midi_out_ports; 78 + 79 + unsigned int supported_sampling_rate; 80 + unsigned int pcm_capture_channels[SND_EFW_MULTIPLIER_MODES]; 81 + unsigned int pcm_playback_channels[SND_EFW_MULTIPLIER_MODES]; 82 + 83 + struct amdtp_stream *master; 84 + struct amdtp_stream tx_stream; 85 + struct amdtp_stream rx_stream; 86 + struct cmp_connection out_conn; 87 + struct cmp_connection in_conn; 88 + atomic_t capture_substreams; 89 + atomic_t playback_substreams; 90 + 91 + /* hardware metering parameters */ 92 + unsigned int phys_out; 93 + unsigned int phys_in; 94 + unsigned int phys_out_grp_count; 95 + unsigned int phys_in_grp_count; 96 + struct snd_efw_phys_grp phys_out_grps[HWINFO_MAX_CAPS_GROUPS]; 97 + struct snd_efw_phys_grp phys_in_grps[HWINFO_MAX_CAPS_GROUPS]; 98 + 99 + /* for uapi */ 100 + int dev_lock_count; 101 + bool dev_lock_changed; 102 + wait_queue_head_t hwdep_wait; 103 + 104 + /* response queue */ 105 + u8 *resp_buf; 106 + u8 *pull_ptr; 107 + u8 *push_ptr; 108 + unsigned int resp_queues; 109 + }; 110 + 111 + int snd_efw_transaction_cmd(struct fw_unit *unit, 112 + const void *cmd, unsigned int size); 113 + int snd_efw_transaction_run(struct fw_unit *unit, 114 + const void *cmd, unsigned int cmd_size, 115 + void *resp, unsigned int resp_size); 116 + int snd_efw_transaction_register(void); 117 + void snd_efw_transaction_unregister(void); 118 + void snd_efw_transaction_bus_reset(struct fw_unit *unit); 119 + void snd_efw_transaction_add_instance(struct snd_efw *efw); 120 + void snd_efw_transaction_remove_instance(struct snd_efw *efw); 121 + 122 + struct snd_efw_hwinfo { 123 + u32 flags; 124 + u32 guid_hi; 125 + u32 guid_lo; 126 + u32 type; 127 + u32 version; 128 + char vendor_name[HWINFO_NAME_SIZE_BYTES]; 129 + char model_name[HWINFO_NAME_SIZE_BYTES]; 130 + u32 supported_clocks; 131 + u32 amdtp_rx_pcm_channels; 132 + u32 amdtp_tx_pcm_channels; 133 + u32 phys_out; 134 + u32 phys_in; 135 + u32 phys_out_grp_count; 136 + struct snd_efw_phys_grp phys_out_grps[HWINFO_MAX_CAPS_GROUPS]; 137 + u32 phys_in_grp_count; 138 + struct snd_efw_phys_grp phys_in_grps[HWINFO_MAX_CAPS_GROUPS]; 139 + u32 midi_out_ports; 140 + u32 midi_in_ports; 141 + u32 max_sample_rate; 142 + u32 min_sample_rate; 143 + u32 dsp_version; 144 + u32 arm_version; 145 + u32 mixer_playback_channels; 146 + u32 mixer_capture_channels; 147 + u32 fpga_version; 148 + u32 amdtp_rx_pcm_channels_2x; 149 + u32 amdtp_tx_pcm_channels_2x; 150 + u32 amdtp_rx_pcm_channels_4x; 151 + u32 amdtp_tx_pcm_channels_4x; 152 + u32 reserved[16]; 153 + } __packed; 154 + enum snd_efw_grp_type { 155 + SND_EFW_CH_TYPE_ANALOG = 0, 156 + SND_EFW_CH_TYPE_SPDIF = 1, 157 + SND_EFW_CH_TYPE_ADAT = 2, 158 + SND_EFW_CH_TYPE_SPDIF_OR_ADAT = 3, 159 + SND_EFW_CH_TYPE_ANALOG_MIRRORING = 4, 160 + SND_EFW_CH_TYPE_HEADPHONES = 5, 161 + SND_EFW_CH_TYPE_I2S = 6, 162 + SND_EFW_CH_TYPE_GUITAR = 7, 163 + SND_EFW_CH_TYPE_PIEZO_GUITAR = 8, 164 + SND_EFW_CH_TYPE_GUITAR_STRING = 9, 165 + SND_EFW_CH_TYPE_VIRTUAL = 0x10000, 166 + SND_EFW_CH_TYPE_DUMMY 167 + }; 168 + struct snd_efw_phys_meters { 169 + u32 status; /* guitar state/midi signal/clock input detect */ 170 + u32 reserved0; 171 + u32 reserved1; 172 + u32 reserved2; 173 + u32 reserved3; 174 + u32 out_meters; 175 + u32 in_meters; 176 + u32 reserved4; 177 + u32 reserved5; 178 + u32 values[0]; 179 + } __packed; 180 + enum snd_efw_clock_source { 181 + SND_EFW_CLOCK_SOURCE_INTERNAL = 0, 182 + SND_EFW_CLOCK_SOURCE_SYTMATCH = 1, 183 + SND_EFW_CLOCK_SOURCE_WORDCLOCK = 2, 184 + SND_EFW_CLOCK_SOURCE_SPDIF = 3, 185 + SND_EFW_CLOCK_SOURCE_ADAT_1 = 4, 186 + SND_EFW_CLOCK_SOURCE_ADAT_2 = 5, 187 + SND_EFW_CLOCK_SOURCE_CONTINUOUS = 6 /* internal variable clock */ 188 + }; 189 + enum snd_efw_transport_mode { 190 + SND_EFW_TRANSPORT_MODE_WINDOWS = 0, 191 + SND_EFW_TRANSPORT_MODE_IEC61883 = 1, 192 + }; 193 + int snd_efw_command_set_resp_addr(struct snd_efw *efw, 194 + u16 addr_high, u32 addr_low); 195 + int snd_efw_command_set_tx_mode(struct snd_efw *efw, 196 + enum snd_efw_transport_mode mode); 197 + int snd_efw_command_get_hwinfo(struct snd_efw *efw, 198 + struct snd_efw_hwinfo *hwinfo); 199 + int snd_efw_command_get_phys_meters(struct snd_efw *efw, 200 + struct snd_efw_phys_meters *meters, 201 + unsigned int len); 202 + int snd_efw_command_get_clock_source(struct snd_efw *efw, 203 + enum snd_efw_clock_source *source); 204 + int snd_efw_command_get_sampling_rate(struct snd_efw *efw, unsigned int *rate); 205 + int snd_efw_command_set_sampling_rate(struct snd_efw *efw, unsigned int rate); 206 + 207 + int snd_efw_stream_init_duplex(struct snd_efw *efw); 208 + int snd_efw_stream_start_duplex(struct snd_efw *efw, unsigned int rate); 209 + void snd_efw_stream_stop_duplex(struct snd_efw *efw); 210 + void snd_efw_stream_update_duplex(struct snd_efw *efw); 211 + void snd_efw_stream_destroy_duplex(struct snd_efw *efw); 212 + void snd_efw_stream_lock_changed(struct snd_efw *efw); 213 + int snd_efw_stream_lock_try(struct snd_efw *efw); 214 + void snd_efw_stream_lock_release(struct snd_efw *efw); 215 + 216 + void snd_efw_proc_init(struct snd_efw *efw); 217 + 218 + int snd_efw_create_midi_devices(struct snd_efw *efw); 219 + 220 + int snd_efw_create_pcm_devices(struct snd_efw *efw); 221 + int snd_efw_get_multiplier_mode(unsigned int sampling_rate, unsigned int *mode); 222 + 223 + int snd_efw_create_hwdep_device(struct snd_efw *efw); 224 + 225 + #define SND_EFW_DEV_ENTRY(vendor, model) \ 226 + { \ 227 + .match_flags = IEEE1394_MATCH_VENDOR_ID | \ 228 + IEEE1394_MATCH_MODEL_ID, \ 229 + .vendor_id = vendor,\ 230 + .model_id = model \ 231 + } 232 + 233 + #endif
+372
sound/firewire/fireworks/fireworks_command.c
··· 1 + /* 2 + * fireworks_command.c - a part of driver for Fireworks based devices 3 + * 4 + * Copyright (c) 2013-2014 Takashi Sakamoto 5 + * 6 + * Licensed under the terms of the GNU General Public License, version 2. 7 + */ 8 + 9 + #include "./fireworks.h" 10 + 11 + /* 12 + * This driver uses transaction version 1 or later to use extended hardware 13 + * information. Then too old devices are not available. 14 + * 15 + * Each commands are not required to have continuous sequence numbers. This 16 + * number is just used to match command and response. 17 + * 18 + * This module support a part of commands. Please see FFADO if you want to see 19 + * whole commands. But there are some commands which FFADO don't implement. 20 + * 21 + * Fireworks also supports AV/C general commands and AV/C Stream Format 22 + * Information commands. But this module don't use them. 23 + */ 24 + 25 + #define KERNEL_SEQNUM_MIN (SND_EFW_TRANSACTION_USER_SEQNUM_MAX + 2) 26 + #define KERNEL_SEQNUM_MAX ((u32)~0) 27 + 28 + /* for clock source and sampling rate */ 29 + struct efc_clock { 30 + u32 source; 31 + u32 sampling_rate; 32 + u32 index; 33 + }; 34 + 35 + /* command categories */ 36 + enum efc_category { 37 + EFC_CAT_HWINFO = 0, 38 + EFC_CAT_TRANSPORT = 2, 39 + EFC_CAT_HWCTL = 3, 40 + }; 41 + 42 + /* hardware info category commands */ 43 + enum efc_cmd_hwinfo { 44 + EFC_CMD_HWINFO_GET_CAPS = 0, 45 + EFC_CMD_HWINFO_GET_POLLED = 1, 46 + EFC_CMD_HWINFO_SET_RESP_ADDR = 2 47 + }; 48 + 49 + enum efc_cmd_transport { 50 + EFC_CMD_TRANSPORT_SET_TX_MODE = 0 51 + }; 52 + 53 + /* hardware control category commands */ 54 + enum efc_cmd_hwctl { 55 + EFC_CMD_HWCTL_SET_CLOCK = 0, 56 + EFC_CMD_HWCTL_GET_CLOCK = 1, 57 + EFC_CMD_HWCTL_IDENTIFY = 5 58 + }; 59 + 60 + /* return values in response */ 61 + enum efr_status { 62 + EFR_STATUS_OK = 0, 63 + EFR_STATUS_BAD = 1, 64 + EFR_STATUS_BAD_COMMAND = 2, 65 + EFR_STATUS_COMM_ERR = 3, 66 + EFR_STATUS_BAD_QUAD_COUNT = 4, 67 + EFR_STATUS_UNSUPPORTED = 5, 68 + EFR_STATUS_1394_TIMEOUT = 6, 69 + EFR_STATUS_DSP_TIMEOUT = 7, 70 + EFR_STATUS_BAD_RATE = 8, 71 + EFR_STATUS_BAD_CLOCK = 9, 72 + EFR_STATUS_BAD_CHANNEL = 10, 73 + EFR_STATUS_BAD_PAN = 11, 74 + EFR_STATUS_FLASH_BUSY = 12, 75 + EFR_STATUS_BAD_MIRROR = 13, 76 + EFR_STATUS_BAD_LED = 14, 77 + EFR_STATUS_BAD_PARAMETER = 15, 78 + EFR_STATUS_INCOMPLETE = 0x80000000 79 + }; 80 + 81 + static const char *const efr_status_names[] = { 82 + [EFR_STATUS_OK] = "OK", 83 + [EFR_STATUS_BAD] = "bad", 84 + [EFR_STATUS_BAD_COMMAND] = "bad command", 85 + [EFR_STATUS_COMM_ERR] = "comm err", 86 + [EFR_STATUS_BAD_QUAD_COUNT] = "bad quad count", 87 + [EFR_STATUS_UNSUPPORTED] = "unsupported", 88 + [EFR_STATUS_1394_TIMEOUT] = "1394 timeout", 89 + [EFR_STATUS_DSP_TIMEOUT] = "DSP timeout", 90 + [EFR_STATUS_BAD_RATE] = "bad rate", 91 + [EFR_STATUS_BAD_CLOCK] = "bad clock", 92 + [EFR_STATUS_BAD_CHANNEL] = "bad channel", 93 + [EFR_STATUS_BAD_PAN] = "bad pan", 94 + [EFR_STATUS_FLASH_BUSY] = "flash busy", 95 + [EFR_STATUS_BAD_MIRROR] = "bad mirror", 96 + [EFR_STATUS_BAD_LED] = "bad LED", 97 + [EFR_STATUS_BAD_PARAMETER] = "bad parameter", 98 + [EFR_STATUS_BAD_PARAMETER + 1] = "incomplete" 99 + }; 100 + 101 + static int 102 + efw_transaction(struct snd_efw *efw, unsigned int category, 103 + unsigned int command, 104 + const __be32 *params, unsigned int param_bytes, 105 + const __be32 *resp, unsigned int resp_bytes) 106 + { 107 + struct snd_efw_transaction *header; 108 + __be32 *buf; 109 + u32 seqnum; 110 + unsigned int buf_bytes, cmd_bytes; 111 + int err; 112 + 113 + /* calculate buffer size*/ 114 + buf_bytes = sizeof(struct snd_efw_transaction) + 115 + max(param_bytes, resp_bytes); 116 + 117 + /* keep buffer */ 118 + buf = kzalloc(buf_bytes, GFP_KERNEL); 119 + if (buf == NULL) 120 + return -ENOMEM; 121 + 122 + /* to keep consistency of sequence number */ 123 + spin_lock(&efw->lock); 124 + if ((efw->seqnum < KERNEL_SEQNUM_MIN) || 125 + (efw->seqnum >= KERNEL_SEQNUM_MAX - 2)) 126 + efw->seqnum = KERNEL_SEQNUM_MIN; 127 + else 128 + efw->seqnum += 2; 129 + seqnum = efw->seqnum; 130 + spin_unlock(&efw->lock); 131 + 132 + /* fill transaction header fields */ 133 + cmd_bytes = sizeof(struct snd_efw_transaction) + param_bytes; 134 + header = (struct snd_efw_transaction *)buf; 135 + header->length = cpu_to_be32(cmd_bytes / sizeof(__be32)); 136 + header->version = cpu_to_be32(1); 137 + header->seqnum = cpu_to_be32(seqnum); 138 + header->category = cpu_to_be32(category); 139 + header->command = cpu_to_be32(command); 140 + header->status = 0; 141 + 142 + /* fill transaction command parameters */ 143 + memcpy(header->params, params, param_bytes); 144 + 145 + err = snd_efw_transaction_run(efw->unit, buf, cmd_bytes, 146 + buf, buf_bytes); 147 + if (err < 0) 148 + goto end; 149 + 150 + /* check transaction header fields */ 151 + if ((be32_to_cpu(header->version) < 1) || 152 + (be32_to_cpu(header->category) != category) || 153 + (be32_to_cpu(header->command) != command) || 154 + (be32_to_cpu(header->status) != EFR_STATUS_OK)) { 155 + dev_err(&efw->unit->device, "EFW command failed [%u/%u]: %s\n", 156 + be32_to_cpu(header->category), 157 + be32_to_cpu(header->command), 158 + efr_status_names[be32_to_cpu(header->status)]); 159 + err = -EIO; 160 + goto end; 161 + } 162 + 163 + if (resp == NULL) 164 + goto end; 165 + 166 + /* fill transaction response parameters */ 167 + memset((void *)resp, 0, resp_bytes); 168 + resp_bytes = min_t(unsigned int, resp_bytes, 169 + be32_to_cpu(header->length) * sizeof(__be32) - 170 + sizeof(struct snd_efw_transaction)); 171 + memcpy((void *)resp, &buf[6], resp_bytes); 172 + end: 173 + kfree(buf); 174 + return err; 175 + } 176 + 177 + /* 178 + * The address in host system for transaction response is changable when the 179 + * device supports. struct hwinfo.flags includes its flag. The default is 180 + * MEMORY_SPACE_EFW_RESPONSE. 181 + */ 182 + int snd_efw_command_set_resp_addr(struct snd_efw *efw, 183 + u16 addr_high, u32 addr_low) 184 + { 185 + __be32 addr[2]; 186 + 187 + addr[0] = cpu_to_be32(addr_high); 188 + addr[1] = cpu_to_be32(addr_low); 189 + 190 + if (!efw->resp_addr_changable) 191 + return -ENOSYS; 192 + 193 + return efw_transaction(efw, EFC_CAT_HWCTL, 194 + EFC_CMD_HWINFO_SET_RESP_ADDR, 195 + addr, sizeof(addr), NULL, 0); 196 + } 197 + 198 + /* 199 + * This is for timestamp processing. In Windows mode, all 32bit fields of second 200 + * CIP header in AMDTP transmit packet is used for 'presentation timestamp'. In 201 + * 'no data' packet the value of this field is 0x90ffffff. 202 + */ 203 + int snd_efw_command_set_tx_mode(struct snd_efw *efw, 204 + enum snd_efw_transport_mode mode) 205 + { 206 + __be32 param = cpu_to_be32(mode); 207 + return efw_transaction(efw, EFC_CAT_TRANSPORT, 208 + EFC_CMD_TRANSPORT_SET_TX_MODE, 209 + &param, sizeof(param), NULL, 0); 210 + } 211 + 212 + int snd_efw_command_get_hwinfo(struct snd_efw *efw, 213 + struct snd_efw_hwinfo *hwinfo) 214 + { 215 + int err; 216 + 217 + err = efw_transaction(efw, EFC_CAT_HWINFO, 218 + EFC_CMD_HWINFO_GET_CAPS, 219 + NULL, 0, (__be32 *)hwinfo, sizeof(*hwinfo)); 220 + if (err < 0) 221 + goto end; 222 + 223 + be32_to_cpus(&hwinfo->flags); 224 + be32_to_cpus(&hwinfo->guid_hi); 225 + be32_to_cpus(&hwinfo->guid_lo); 226 + be32_to_cpus(&hwinfo->type); 227 + be32_to_cpus(&hwinfo->version); 228 + be32_to_cpus(&hwinfo->supported_clocks); 229 + be32_to_cpus(&hwinfo->amdtp_rx_pcm_channels); 230 + be32_to_cpus(&hwinfo->amdtp_tx_pcm_channels); 231 + be32_to_cpus(&hwinfo->phys_out); 232 + be32_to_cpus(&hwinfo->phys_in); 233 + be32_to_cpus(&hwinfo->phys_out_grp_count); 234 + be32_to_cpus(&hwinfo->phys_in_grp_count); 235 + be32_to_cpus(&hwinfo->midi_out_ports); 236 + be32_to_cpus(&hwinfo->midi_in_ports); 237 + be32_to_cpus(&hwinfo->max_sample_rate); 238 + be32_to_cpus(&hwinfo->min_sample_rate); 239 + be32_to_cpus(&hwinfo->dsp_version); 240 + be32_to_cpus(&hwinfo->arm_version); 241 + be32_to_cpus(&hwinfo->mixer_playback_channels); 242 + be32_to_cpus(&hwinfo->mixer_capture_channels); 243 + be32_to_cpus(&hwinfo->fpga_version); 244 + be32_to_cpus(&hwinfo->amdtp_rx_pcm_channels_2x); 245 + be32_to_cpus(&hwinfo->amdtp_tx_pcm_channels_2x); 246 + be32_to_cpus(&hwinfo->amdtp_rx_pcm_channels_4x); 247 + be32_to_cpus(&hwinfo->amdtp_tx_pcm_channels_4x); 248 + 249 + /* ensure terminated */ 250 + hwinfo->vendor_name[HWINFO_NAME_SIZE_BYTES - 1] = '\0'; 251 + hwinfo->model_name[HWINFO_NAME_SIZE_BYTES - 1] = '\0'; 252 + end: 253 + return err; 254 + } 255 + 256 + int snd_efw_command_get_phys_meters(struct snd_efw *efw, 257 + struct snd_efw_phys_meters *meters, 258 + unsigned int len) 259 + { 260 + __be32 *buf = (__be32 *)meters; 261 + unsigned int i; 262 + int err; 263 + 264 + err = efw_transaction(efw, EFC_CAT_HWINFO, 265 + EFC_CMD_HWINFO_GET_POLLED, 266 + NULL, 0, (__be32 *)meters, len); 267 + if (err >= 0) 268 + for (i = 0; i < len / sizeof(u32); i++) 269 + be32_to_cpus(&buf[i]); 270 + 271 + return err; 272 + } 273 + 274 + static int 275 + command_get_clock(struct snd_efw *efw, struct efc_clock *clock) 276 + { 277 + int err; 278 + 279 + err = efw_transaction(efw, EFC_CAT_HWCTL, 280 + EFC_CMD_HWCTL_GET_CLOCK, 281 + NULL, 0, 282 + (__be32 *)clock, sizeof(struct efc_clock)); 283 + if (err >= 0) { 284 + be32_to_cpus(&clock->source); 285 + be32_to_cpus(&clock->sampling_rate); 286 + be32_to_cpus(&clock->index); 287 + } 288 + 289 + return err; 290 + } 291 + 292 + /* give UINT_MAX if set nothing */ 293 + static int 294 + command_set_clock(struct snd_efw *efw, 295 + unsigned int source, unsigned int rate) 296 + { 297 + struct efc_clock clock = {0}; 298 + int err; 299 + 300 + /* check arguments */ 301 + if ((source == UINT_MAX) && (rate == UINT_MAX)) { 302 + err = -EINVAL; 303 + goto end; 304 + } 305 + 306 + /* get current status */ 307 + err = command_get_clock(efw, &clock); 308 + if (err < 0) 309 + goto end; 310 + 311 + /* no need */ 312 + if ((clock.source == source) && (clock.sampling_rate == rate)) 313 + goto end; 314 + 315 + /* set params */ 316 + if ((source != UINT_MAX) && (clock.source != source)) 317 + clock.source = source; 318 + if ((rate != UINT_MAX) && (clock.sampling_rate != rate)) 319 + clock.sampling_rate = rate; 320 + clock.index = 0; 321 + 322 + cpu_to_be32s(&clock.source); 323 + cpu_to_be32s(&clock.sampling_rate); 324 + cpu_to_be32s(&clock.index); 325 + 326 + err = efw_transaction(efw, EFC_CAT_HWCTL, 327 + EFC_CMD_HWCTL_SET_CLOCK, 328 + (__be32 *)&clock, sizeof(struct efc_clock), 329 + NULL, 0); 330 + if (err < 0) 331 + goto end; 332 + 333 + /* 334 + * With firmware version 5.8, just after changing clock state, these 335 + * parameters are not immediately retrieved by get command. In my 336 + * trial, there needs to be 100msec to get changed parameters. 337 + */ 338 + msleep(150); 339 + end: 340 + return err; 341 + } 342 + 343 + int snd_efw_command_get_clock_source(struct snd_efw *efw, 344 + enum snd_efw_clock_source *source) 345 + { 346 + int err; 347 + struct efc_clock clock = {0}; 348 + 349 + err = command_get_clock(efw, &clock); 350 + if (err >= 0) 351 + *source = clock.source; 352 + 353 + return err; 354 + } 355 + 356 + int snd_efw_command_get_sampling_rate(struct snd_efw *efw, unsigned int *rate) 357 + { 358 + int err; 359 + struct efc_clock clock = {0}; 360 + 361 + err = command_get_clock(efw, &clock); 362 + if (err >= 0) 363 + *rate = clock.sampling_rate; 364 + 365 + return err; 366 + } 367 + 368 + int snd_efw_command_set_sampling_rate(struct snd_efw *efw, unsigned int rate) 369 + { 370 + return command_set_clock(efw, UINT_MAX, rate); 371 + } 372 +
+298
sound/firewire/fireworks/fireworks_hwdep.c
··· 1 + /* 2 + * fireworks_hwdep.c - a part of driver for Fireworks based devices 3 + * 4 + * Copyright (c) 2013-2014 Takashi Sakamoto 5 + * 6 + * Licensed under the terms of the GNU General Public License, version 2. 7 + */ 8 + 9 + /* 10 + * This codes have five functionalities. 11 + * 12 + * 1.get information about firewire node 13 + * 2.get notification about starting/stopping stream 14 + * 3.lock/unlock streaming 15 + * 4.transmit command of EFW transaction 16 + * 5.receive response of EFW transaction 17 + * 18 + */ 19 + 20 + #include "fireworks.h" 21 + 22 + static long 23 + hwdep_read_resp_buf(struct snd_efw *efw, char __user *buf, long remained, 24 + loff_t *offset) 25 + { 26 + unsigned int length, till_end, type; 27 + struct snd_efw_transaction *t; 28 + long count = 0; 29 + 30 + if (remained < sizeof(type) + sizeof(struct snd_efw_transaction)) 31 + return -ENOSPC; 32 + 33 + /* data type is SNDRV_FIREWIRE_EVENT_EFW_RESPONSE */ 34 + type = SNDRV_FIREWIRE_EVENT_EFW_RESPONSE; 35 + if (copy_to_user(buf, &type, sizeof(type))) 36 + return -EFAULT; 37 + remained -= sizeof(type); 38 + buf += sizeof(type); 39 + 40 + /* write into buffer as many responses as possible */ 41 + while (efw->resp_queues > 0) { 42 + t = (struct snd_efw_transaction *)(efw->pull_ptr); 43 + length = be32_to_cpu(t->length) * sizeof(__be32); 44 + 45 + /* confirm enough space for this response */ 46 + if (remained < length) 47 + break; 48 + 49 + /* copy from ring buffer to user buffer */ 50 + while (length > 0) { 51 + till_end = snd_efw_resp_buf_size - 52 + (unsigned int)(efw->pull_ptr - efw->resp_buf); 53 + till_end = min_t(unsigned int, length, till_end); 54 + 55 + if (copy_to_user(buf, efw->pull_ptr, till_end)) 56 + return -EFAULT; 57 + 58 + efw->pull_ptr += till_end; 59 + if (efw->pull_ptr >= efw->resp_buf + 60 + snd_efw_resp_buf_size) 61 + efw->pull_ptr = efw->resp_buf; 62 + 63 + length -= till_end; 64 + buf += till_end; 65 + count += till_end; 66 + remained -= till_end; 67 + } 68 + 69 + efw->resp_queues--; 70 + } 71 + 72 + return count; 73 + } 74 + 75 + static long 76 + hwdep_read_locked(struct snd_efw *efw, char __user *buf, long count, 77 + loff_t *offset) 78 + { 79 + union snd_firewire_event event; 80 + 81 + memset(&event, 0, sizeof(event)); 82 + 83 + event.lock_status.type = SNDRV_FIREWIRE_EVENT_LOCK_STATUS; 84 + event.lock_status.status = (efw->dev_lock_count > 0); 85 + efw->dev_lock_changed = false; 86 + 87 + count = min_t(long, count, sizeof(event.lock_status)); 88 + 89 + if (copy_to_user(buf, &event, count)) 90 + return -EFAULT; 91 + 92 + return count; 93 + } 94 + 95 + static long 96 + hwdep_read(struct snd_hwdep *hwdep, char __user *buf, long count, 97 + loff_t *offset) 98 + { 99 + struct snd_efw *efw = hwdep->private_data; 100 + DEFINE_WAIT(wait); 101 + 102 + spin_lock_irq(&efw->lock); 103 + 104 + while ((!efw->dev_lock_changed) && (efw->resp_queues == 0)) { 105 + prepare_to_wait(&efw->hwdep_wait, &wait, TASK_INTERRUPTIBLE); 106 + spin_unlock_irq(&efw->lock); 107 + schedule(); 108 + finish_wait(&efw->hwdep_wait, &wait); 109 + if (signal_pending(current)) 110 + return -ERESTARTSYS; 111 + spin_lock_irq(&efw->lock); 112 + } 113 + 114 + if (efw->dev_lock_changed) 115 + count = hwdep_read_locked(efw, buf, count, offset); 116 + else if (efw->resp_queues > 0) 117 + count = hwdep_read_resp_buf(efw, buf, count, offset); 118 + 119 + spin_unlock_irq(&efw->lock); 120 + 121 + return count; 122 + } 123 + 124 + static long 125 + hwdep_write(struct snd_hwdep *hwdep, const char __user *data, long count, 126 + loff_t *offset) 127 + { 128 + struct snd_efw *efw = hwdep->private_data; 129 + u32 seqnum; 130 + u8 *buf; 131 + 132 + if (count < sizeof(struct snd_efw_transaction) || 133 + SND_EFW_RESPONSE_MAXIMUM_BYTES < count) 134 + return -EINVAL; 135 + 136 + buf = memdup_user(data, count); 137 + if (IS_ERR(buf)) 138 + return PTR_ERR(buf); 139 + 140 + /* check seqnum is not for kernel-land */ 141 + seqnum = be32_to_cpu(((struct snd_efw_transaction *)buf)->seqnum); 142 + if (seqnum > SND_EFW_TRANSACTION_USER_SEQNUM_MAX) { 143 + count = -EINVAL; 144 + goto end; 145 + } 146 + 147 + if (snd_efw_transaction_cmd(efw->unit, buf, count) < 0) 148 + count = -EIO; 149 + end: 150 + kfree(buf); 151 + return count; 152 + } 153 + 154 + static unsigned int 155 + hwdep_poll(struct snd_hwdep *hwdep, struct file *file, poll_table *wait) 156 + { 157 + struct snd_efw *efw = hwdep->private_data; 158 + unsigned int events; 159 + 160 + poll_wait(file, &efw->hwdep_wait, wait); 161 + 162 + spin_lock_irq(&efw->lock); 163 + if (efw->dev_lock_changed || (efw->resp_queues > 0)) 164 + events = POLLIN | POLLRDNORM; 165 + else 166 + events = 0; 167 + spin_unlock_irq(&efw->lock); 168 + 169 + return events | POLLOUT; 170 + } 171 + 172 + static int 173 + hwdep_get_info(struct snd_efw *efw, void __user *arg) 174 + { 175 + struct fw_device *dev = fw_parent_device(efw->unit); 176 + struct snd_firewire_get_info info; 177 + 178 + memset(&info, 0, sizeof(info)); 179 + info.type = SNDRV_FIREWIRE_TYPE_FIREWORKS; 180 + info.card = dev->card->index; 181 + *(__be32 *)&info.guid[0] = cpu_to_be32(dev->config_rom[3]); 182 + *(__be32 *)&info.guid[4] = cpu_to_be32(dev->config_rom[4]); 183 + strlcpy(info.device_name, dev_name(&dev->device), 184 + sizeof(info.device_name)); 185 + 186 + if (copy_to_user(arg, &info, sizeof(info))) 187 + return -EFAULT; 188 + 189 + return 0; 190 + } 191 + 192 + static int 193 + hwdep_lock(struct snd_efw *efw) 194 + { 195 + int err; 196 + 197 + spin_lock_irq(&efw->lock); 198 + 199 + if (efw->dev_lock_count == 0) { 200 + efw->dev_lock_count = -1; 201 + err = 0; 202 + } else { 203 + err = -EBUSY; 204 + } 205 + 206 + spin_unlock_irq(&efw->lock); 207 + 208 + return err; 209 + } 210 + 211 + static int 212 + hwdep_unlock(struct snd_efw *efw) 213 + { 214 + int err; 215 + 216 + spin_lock_irq(&efw->lock); 217 + 218 + if (efw->dev_lock_count == -1) { 219 + efw->dev_lock_count = 0; 220 + err = 0; 221 + } else { 222 + err = -EBADFD; 223 + } 224 + 225 + spin_unlock_irq(&efw->lock); 226 + 227 + return err; 228 + } 229 + 230 + static int 231 + hwdep_release(struct snd_hwdep *hwdep, struct file *file) 232 + { 233 + struct snd_efw *efw = hwdep->private_data; 234 + 235 + spin_lock_irq(&efw->lock); 236 + if (efw->dev_lock_count == -1) 237 + efw->dev_lock_count = 0; 238 + spin_unlock_irq(&efw->lock); 239 + 240 + return 0; 241 + } 242 + 243 + static int 244 + hwdep_ioctl(struct snd_hwdep *hwdep, struct file *file, 245 + unsigned int cmd, unsigned long arg) 246 + { 247 + struct snd_efw *efw = hwdep->private_data; 248 + 249 + switch (cmd) { 250 + case SNDRV_FIREWIRE_IOCTL_GET_INFO: 251 + return hwdep_get_info(efw, (void __user *)arg); 252 + case SNDRV_FIREWIRE_IOCTL_LOCK: 253 + return hwdep_lock(efw); 254 + case SNDRV_FIREWIRE_IOCTL_UNLOCK: 255 + return hwdep_unlock(efw); 256 + default: 257 + return -ENOIOCTLCMD; 258 + } 259 + } 260 + 261 + #ifdef CONFIG_COMPAT 262 + static int 263 + hwdep_compat_ioctl(struct snd_hwdep *hwdep, struct file *file, 264 + unsigned int cmd, unsigned long arg) 265 + { 266 + return hwdep_ioctl(hwdep, file, cmd, 267 + (unsigned long)compat_ptr(arg)); 268 + } 269 + #else 270 + #define hwdep_compat_ioctl NULL 271 + #endif 272 + 273 + static const struct snd_hwdep_ops hwdep_ops = { 274 + .read = hwdep_read, 275 + .write = hwdep_write, 276 + .release = hwdep_release, 277 + .poll = hwdep_poll, 278 + .ioctl = hwdep_ioctl, 279 + .ioctl_compat = hwdep_compat_ioctl, 280 + }; 281 + 282 + int snd_efw_create_hwdep_device(struct snd_efw *efw) 283 + { 284 + struct snd_hwdep *hwdep; 285 + int err; 286 + 287 + err = snd_hwdep_new(efw->card, "Fireworks", 0, &hwdep); 288 + if (err < 0) 289 + goto end; 290 + strcpy(hwdep->name, "Fireworks"); 291 + hwdep->iface = SNDRV_HWDEP_IFACE_FW_FIREWORKS; 292 + hwdep->ops = hwdep_ops; 293 + hwdep->private_data = efw; 294 + hwdep->exclusive = true; 295 + end: 296 + return err; 297 + } 298 +
+168
sound/firewire/fireworks/fireworks_midi.c
··· 1 + /* 2 + * fireworks_midi.c - a part of driver for Fireworks based devices 3 + * 4 + * Copyright (c) 2009-2010 Clemens Ladisch 5 + * Copyright (c) 2013-2014 Takashi Sakamoto 6 + * 7 + * Licensed under the terms of the GNU General Public License, version 2. 8 + */ 9 + #include "fireworks.h" 10 + 11 + static int midi_capture_open(struct snd_rawmidi_substream *substream) 12 + { 13 + struct snd_efw *efw = substream->rmidi->private_data; 14 + int err; 15 + 16 + err = snd_efw_stream_lock_try(efw); 17 + if (err < 0) 18 + goto end; 19 + 20 + atomic_inc(&efw->capture_substreams); 21 + err = snd_efw_stream_start_duplex(efw, 0); 22 + if (err < 0) 23 + snd_efw_stream_lock_release(efw); 24 + 25 + end: 26 + return err; 27 + } 28 + 29 + static int midi_playback_open(struct snd_rawmidi_substream *substream) 30 + { 31 + struct snd_efw *efw = substream->rmidi->private_data; 32 + int err; 33 + 34 + err = snd_efw_stream_lock_try(efw); 35 + if (err < 0) 36 + goto end; 37 + 38 + atomic_inc(&efw->playback_substreams); 39 + err = snd_efw_stream_start_duplex(efw, 0); 40 + if (err < 0) 41 + snd_efw_stream_lock_release(efw); 42 + end: 43 + return err; 44 + } 45 + 46 + static int midi_capture_close(struct snd_rawmidi_substream *substream) 47 + { 48 + struct snd_efw *efw = substream->rmidi->private_data; 49 + 50 + atomic_dec(&efw->capture_substreams); 51 + snd_efw_stream_stop_duplex(efw); 52 + 53 + snd_efw_stream_lock_release(efw); 54 + return 0; 55 + } 56 + 57 + static int midi_playback_close(struct snd_rawmidi_substream *substream) 58 + { 59 + struct snd_efw *efw = substream->rmidi->private_data; 60 + 61 + atomic_dec(&efw->playback_substreams); 62 + snd_efw_stream_stop_duplex(efw); 63 + 64 + snd_efw_stream_lock_release(efw); 65 + return 0; 66 + } 67 + 68 + static void midi_capture_trigger(struct snd_rawmidi_substream *substrm, int up) 69 + { 70 + struct snd_efw *efw = substrm->rmidi->private_data; 71 + unsigned long flags; 72 + 73 + spin_lock_irqsave(&efw->lock, flags); 74 + 75 + if (up) 76 + amdtp_stream_midi_trigger(&efw->tx_stream, 77 + substrm->number, substrm); 78 + else 79 + amdtp_stream_midi_trigger(&efw->tx_stream, 80 + substrm->number, NULL); 81 + 82 + spin_unlock_irqrestore(&efw->lock, flags); 83 + } 84 + 85 + static void midi_playback_trigger(struct snd_rawmidi_substream *substrm, int up) 86 + { 87 + struct snd_efw *efw = substrm->rmidi->private_data; 88 + unsigned long flags; 89 + 90 + spin_lock_irqsave(&efw->lock, flags); 91 + 92 + if (up) 93 + amdtp_stream_midi_trigger(&efw->rx_stream, 94 + substrm->number, substrm); 95 + else 96 + amdtp_stream_midi_trigger(&efw->rx_stream, 97 + substrm->number, NULL); 98 + 99 + spin_unlock_irqrestore(&efw->lock, flags); 100 + } 101 + 102 + static struct snd_rawmidi_ops midi_capture_ops = { 103 + .open = midi_capture_open, 104 + .close = midi_capture_close, 105 + .trigger = midi_capture_trigger, 106 + }; 107 + 108 + static struct snd_rawmidi_ops midi_playback_ops = { 109 + .open = midi_playback_open, 110 + .close = midi_playback_close, 111 + .trigger = midi_playback_trigger, 112 + }; 113 + 114 + static void set_midi_substream_names(struct snd_efw *efw, 115 + struct snd_rawmidi_str *str) 116 + { 117 + struct snd_rawmidi_substream *subs; 118 + 119 + list_for_each_entry(subs, &str->substreams, list) { 120 + snprintf(subs->name, sizeof(subs->name), 121 + "%s MIDI %d", efw->card->shortname, subs->number + 1); 122 + } 123 + } 124 + 125 + int snd_efw_create_midi_devices(struct snd_efw *efw) 126 + { 127 + struct snd_rawmidi *rmidi; 128 + struct snd_rawmidi_str *str; 129 + int err; 130 + 131 + /* create midi ports */ 132 + err = snd_rawmidi_new(efw->card, efw->card->driver, 0, 133 + efw->midi_out_ports, efw->midi_in_ports, 134 + &rmidi); 135 + if (err < 0) 136 + return err; 137 + 138 + snprintf(rmidi->name, sizeof(rmidi->name), 139 + "%s MIDI", efw->card->shortname); 140 + rmidi->private_data = efw; 141 + 142 + if (efw->midi_in_ports > 0) { 143 + rmidi->info_flags |= SNDRV_RAWMIDI_INFO_INPUT; 144 + 145 + snd_rawmidi_set_ops(rmidi, SNDRV_RAWMIDI_STREAM_INPUT, 146 + &midi_capture_ops); 147 + 148 + str = &rmidi->streams[SNDRV_RAWMIDI_STREAM_INPUT]; 149 + 150 + set_midi_substream_names(efw, str); 151 + } 152 + 153 + if (efw->midi_out_ports > 0) { 154 + rmidi->info_flags |= SNDRV_RAWMIDI_INFO_OUTPUT; 155 + 156 + snd_rawmidi_set_ops(rmidi, SNDRV_RAWMIDI_STREAM_OUTPUT, 157 + &midi_playback_ops); 158 + 159 + str = &rmidi->streams[SNDRV_RAWMIDI_STREAM_OUTPUT]; 160 + 161 + set_midi_substream_names(efw, str); 162 + } 163 + 164 + if ((efw->midi_out_ports > 0) && (efw->midi_in_ports > 0)) 165 + rmidi->info_flags |= SNDRV_RAWMIDI_INFO_DUPLEX; 166 + 167 + return 0; 168 + }
+403
sound/firewire/fireworks/fireworks_pcm.c
··· 1 + /* 2 + * fireworks_pcm.c - a part of driver for Fireworks based devices 3 + * 4 + * Copyright (c) 2009-2010 Clemens Ladisch 5 + * Copyright (c) 2013-2014 Takashi Sakamoto 6 + * 7 + * Licensed under the terms of the GNU General Public License, version 2. 8 + */ 9 + #include "./fireworks.h" 10 + 11 + /* 12 + * NOTE: 13 + * Fireworks changes its AMDTP channels for PCM data according to its sampling 14 + * rate. There are three modes. Here _XX is either _rx or _tx. 15 + * 0: 32.0- 48.0 kHz then snd_efw_hwinfo.amdtp_XX_pcm_channels applied 16 + * 1: 88.2- 96.0 kHz then snd_efw_hwinfo.amdtp_XX_pcm_channels_2x applied 17 + * 2: 176.4-192.0 kHz then snd_efw_hwinfo.amdtp_XX_pcm_channels_4x applied 18 + * 19 + * The number of PCM channels for analog input and output are always fixed but 20 + * the number of PCM channels for digital input and output are differed. 21 + * 22 + * Additionally, according to "AudioFire Owner's Manual Version 2.2", in some 23 + * model, the number of PCM channels for digital input has more restriction 24 + * depending on which digital interface is selected. 25 + * - S/PDIF coaxial and optical : use input 1-2 26 + * - ADAT optical at 32.0-48.0 kHz : use input 1-8 27 + * - ADAT optical at 88.2-96.0 kHz : use input 1-4 (S/MUX format) 28 + * 29 + * The data in AMDTP channels for blank PCM channels are zero. 30 + */ 31 + static const unsigned int freq_table[] = { 32 + /* multiplier mode 0 */ 33 + [0] = 32000, 34 + [1] = 44100, 35 + [2] = 48000, 36 + /* multiplier mode 1 */ 37 + [3] = 88200, 38 + [4] = 96000, 39 + /* multiplier mode 2 */ 40 + [5] = 176400, 41 + [6] = 192000, 42 + }; 43 + 44 + static inline unsigned int 45 + get_multiplier_mode_with_index(unsigned int index) 46 + { 47 + return ((int)index - 1) / 2; 48 + } 49 + 50 + int snd_efw_get_multiplier_mode(unsigned int sampling_rate, unsigned int *mode) 51 + { 52 + unsigned int i; 53 + 54 + for (i = 0; i < ARRAY_SIZE(freq_table); i++) { 55 + if (freq_table[i] == sampling_rate) { 56 + *mode = get_multiplier_mode_with_index(i); 57 + return 0; 58 + } 59 + } 60 + 61 + return -EINVAL; 62 + } 63 + 64 + static int 65 + hw_rule_rate(struct snd_pcm_hw_params *params, struct snd_pcm_hw_rule *rule) 66 + { 67 + unsigned int *pcm_channels = rule->private; 68 + struct snd_interval *r = 69 + hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE); 70 + const struct snd_interval *c = 71 + hw_param_interval_c(params, SNDRV_PCM_HW_PARAM_CHANNELS); 72 + struct snd_interval t = { 73 + .min = UINT_MAX, .max = 0, .integer = 1 74 + }; 75 + unsigned int i, mode; 76 + 77 + for (i = 0; i < ARRAY_SIZE(freq_table); i++) { 78 + mode = get_multiplier_mode_with_index(i); 79 + if (!snd_interval_test(c, pcm_channels[mode])) 80 + continue; 81 + 82 + t.min = min(t.min, freq_table[i]); 83 + t.max = max(t.max, freq_table[i]); 84 + } 85 + 86 + return snd_interval_refine(r, &t); 87 + } 88 + 89 + static int 90 + hw_rule_channels(struct snd_pcm_hw_params *params, struct snd_pcm_hw_rule *rule) 91 + { 92 + unsigned int *pcm_channels = rule->private; 93 + struct snd_interval *c = 94 + hw_param_interval(params, SNDRV_PCM_HW_PARAM_CHANNELS); 95 + const struct snd_interval *r = 96 + hw_param_interval_c(params, SNDRV_PCM_HW_PARAM_RATE); 97 + struct snd_interval t = { 98 + .min = UINT_MAX, .max = 0, .integer = 1 99 + }; 100 + unsigned int i, mode; 101 + 102 + for (i = 0; i < ARRAY_SIZE(freq_table); i++) { 103 + mode = get_multiplier_mode_with_index(i); 104 + if (!snd_interval_test(r, freq_table[i])) 105 + continue; 106 + 107 + t.min = min(t.min, pcm_channels[mode]); 108 + t.max = max(t.max, pcm_channels[mode]); 109 + } 110 + 111 + return snd_interval_refine(c, &t); 112 + } 113 + 114 + static void 115 + limit_channels(struct snd_pcm_hardware *hw, unsigned int *pcm_channels) 116 + { 117 + unsigned int i, mode; 118 + 119 + hw->channels_min = UINT_MAX; 120 + hw->channels_max = 0; 121 + 122 + for (i = 0; i < ARRAY_SIZE(freq_table); i++) { 123 + mode = get_multiplier_mode_with_index(i); 124 + if (pcm_channels[mode] == 0) 125 + continue; 126 + 127 + hw->channels_min = min(hw->channels_min, pcm_channels[mode]); 128 + hw->channels_max = max(hw->channels_max, pcm_channels[mode]); 129 + } 130 + } 131 + 132 + static void 133 + limit_period_and_buffer(struct snd_pcm_hardware *hw) 134 + { 135 + hw->periods_min = 2; /* SNDRV_PCM_INFO_BATCH */ 136 + hw->periods_max = UINT_MAX; 137 + 138 + hw->period_bytes_min = 4 * hw->channels_max; /* bytes for a frame */ 139 + 140 + /* Just to prevent from allocating much pages. */ 141 + hw->period_bytes_max = hw->period_bytes_min * 2048; 142 + hw->buffer_bytes_max = hw->period_bytes_max * hw->periods_min; 143 + } 144 + 145 + static int 146 + pcm_init_hw_params(struct snd_efw *efw, 147 + struct snd_pcm_substream *substream) 148 + { 149 + struct snd_pcm_runtime *runtime = substream->runtime; 150 + struct amdtp_stream *s; 151 + unsigned int *pcm_channels; 152 + int err; 153 + 154 + runtime->hw.info = SNDRV_PCM_INFO_BATCH | 155 + SNDRV_PCM_INFO_BLOCK_TRANSFER | 156 + SNDRV_PCM_INFO_INTERLEAVED | 157 + SNDRV_PCM_INFO_JOINT_DUPLEX | 158 + SNDRV_PCM_INFO_MMAP | 159 + SNDRV_PCM_INFO_MMAP_VALID; 160 + 161 + if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) { 162 + runtime->hw.formats = AMDTP_IN_PCM_FORMAT_BITS; 163 + s = &efw->tx_stream; 164 + pcm_channels = efw->pcm_capture_channels; 165 + } else { 166 + runtime->hw.formats = AMDTP_OUT_PCM_FORMAT_BITS; 167 + s = &efw->rx_stream; 168 + pcm_channels = efw->pcm_playback_channels; 169 + } 170 + 171 + /* limit rates */ 172 + runtime->hw.rates = efw->supported_sampling_rate, 173 + snd_pcm_limit_hw_rates(runtime); 174 + 175 + limit_channels(&runtime->hw, pcm_channels); 176 + limit_period_and_buffer(&runtime->hw); 177 + 178 + err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_CHANNELS, 179 + hw_rule_channels, pcm_channels, 180 + SNDRV_PCM_HW_PARAM_RATE, -1); 181 + if (err < 0) 182 + goto end; 183 + 184 + err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_RATE, 185 + hw_rule_rate, pcm_channels, 186 + SNDRV_PCM_HW_PARAM_CHANNELS, -1); 187 + if (err < 0) 188 + goto end; 189 + 190 + err = amdtp_stream_add_pcm_hw_constraints(s, runtime); 191 + end: 192 + return err; 193 + } 194 + 195 + static int pcm_open(struct snd_pcm_substream *substream) 196 + { 197 + struct snd_efw *efw = substream->private_data; 198 + unsigned int sampling_rate; 199 + enum snd_efw_clock_source clock_source; 200 + int err; 201 + 202 + err = snd_efw_stream_lock_try(efw); 203 + if (err < 0) 204 + goto end; 205 + 206 + err = pcm_init_hw_params(efw, substream); 207 + if (err < 0) 208 + goto err_locked; 209 + 210 + err = snd_efw_command_get_clock_source(efw, &clock_source); 211 + if (err < 0) 212 + goto err_locked; 213 + 214 + /* 215 + * When source of clock is not internal or any PCM streams are running, 216 + * available sampling rate is limited at current sampling rate. 217 + */ 218 + if ((clock_source != SND_EFW_CLOCK_SOURCE_INTERNAL) || 219 + amdtp_stream_pcm_running(&efw->tx_stream) || 220 + amdtp_stream_pcm_running(&efw->rx_stream)) { 221 + err = snd_efw_command_get_sampling_rate(efw, &sampling_rate); 222 + if (err < 0) 223 + goto err_locked; 224 + substream->runtime->hw.rate_min = sampling_rate; 225 + substream->runtime->hw.rate_max = sampling_rate; 226 + } 227 + 228 + snd_pcm_set_sync(substream); 229 + end: 230 + return err; 231 + err_locked: 232 + snd_efw_stream_lock_release(efw); 233 + return err; 234 + } 235 + 236 + static int pcm_close(struct snd_pcm_substream *substream) 237 + { 238 + struct snd_efw *efw = substream->private_data; 239 + snd_efw_stream_lock_release(efw); 240 + return 0; 241 + } 242 + 243 + static int pcm_capture_hw_params(struct snd_pcm_substream *substream, 244 + struct snd_pcm_hw_params *hw_params) 245 + { 246 + struct snd_efw *efw = substream->private_data; 247 + 248 + if (substream->runtime->status->state == SNDRV_PCM_STATE_OPEN) 249 + atomic_inc(&efw->capture_substreams); 250 + amdtp_stream_set_pcm_format(&efw->tx_stream, params_format(hw_params)); 251 + 252 + return snd_pcm_lib_alloc_vmalloc_buffer(substream, 253 + params_buffer_bytes(hw_params)); 254 + } 255 + static int pcm_playback_hw_params(struct snd_pcm_substream *substream, 256 + struct snd_pcm_hw_params *hw_params) 257 + { 258 + struct snd_efw *efw = substream->private_data; 259 + 260 + if (substream->runtime->status->state == SNDRV_PCM_STATE_OPEN) 261 + atomic_inc(&efw->playback_substreams); 262 + amdtp_stream_set_pcm_format(&efw->rx_stream, params_format(hw_params)); 263 + 264 + return snd_pcm_lib_alloc_vmalloc_buffer(substream, 265 + params_buffer_bytes(hw_params)); 266 + } 267 + 268 + static int pcm_capture_hw_free(struct snd_pcm_substream *substream) 269 + { 270 + struct snd_efw *efw = substream->private_data; 271 + 272 + if (substream->runtime->status->state != SNDRV_PCM_STATE_OPEN) 273 + atomic_dec(&efw->capture_substreams); 274 + 275 + snd_efw_stream_stop_duplex(efw); 276 + 277 + return snd_pcm_lib_free_vmalloc_buffer(substream); 278 + } 279 + static int pcm_playback_hw_free(struct snd_pcm_substream *substream) 280 + { 281 + struct snd_efw *efw = substream->private_data; 282 + 283 + if (substream->runtime->status->state != SNDRV_PCM_STATE_OPEN) 284 + atomic_dec(&efw->playback_substreams); 285 + 286 + snd_efw_stream_stop_duplex(efw); 287 + 288 + return snd_pcm_lib_free_vmalloc_buffer(substream); 289 + } 290 + 291 + static int pcm_capture_prepare(struct snd_pcm_substream *substream) 292 + { 293 + struct snd_efw *efw = substream->private_data; 294 + struct snd_pcm_runtime *runtime = substream->runtime; 295 + int err; 296 + 297 + err = snd_efw_stream_start_duplex(efw, runtime->rate); 298 + if (err >= 0) 299 + amdtp_stream_pcm_prepare(&efw->tx_stream); 300 + 301 + return err; 302 + } 303 + static int pcm_playback_prepare(struct snd_pcm_substream *substream) 304 + { 305 + struct snd_efw *efw = substream->private_data; 306 + struct snd_pcm_runtime *runtime = substream->runtime; 307 + int err; 308 + 309 + err = snd_efw_stream_start_duplex(efw, runtime->rate); 310 + if (err >= 0) 311 + amdtp_stream_pcm_prepare(&efw->rx_stream); 312 + 313 + return err; 314 + } 315 + 316 + static int pcm_capture_trigger(struct snd_pcm_substream *substream, int cmd) 317 + { 318 + struct snd_efw *efw = substream->private_data; 319 + 320 + switch (cmd) { 321 + case SNDRV_PCM_TRIGGER_START: 322 + amdtp_stream_pcm_trigger(&efw->tx_stream, substream); 323 + break; 324 + case SNDRV_PCM_TRIGGER_STOP: 325 + amdtp_stream_pcm_trigger(&efw->tx_stream, NULL); 326 + break; 327 + default: 328 + return -EINVAL; 329 + } 330 + 331 + return 0; 332 + } 333 + static int pcm_playback_trigger(struct snd_pcm_substream *substream, int cmd) 334 + { 335 + struct snd_efw *efw = substream->private_data; 336 + 337 + switch (cmd) { 338 + case SNDRV_PCM_TRIGGER_START: 339 + amdtp_stream_pcm_trigger(&efw->rx_stream, substream); 340 + break; 341 + case SNDRV_PCM_TRIGGER_STOP: 342 + amdtp_stream_pcm_trigger(&efw->rx_stream, NULL); 343 + break; 344 + default: 345 + return -EINVAL; 346 + } 347 + 348 + return 0; 349 + } 350 + 351 + static snd_pcm_uframes_t pcm_capture_pointer(struct snd_pcm_substream *sbstrm) 352 + { 353 + struct snd_efw *efw = sbstrm->private_data; 354 + return amdtp_stream_pcm_pointer(&efw->tx_stream); 355 + } 356 + static snd_pcm_uframes_t pcm_playback_pointer(struct snd_pcm_substream *sbstrm) 357 + { 358 + struct snd_efw *efw = sbstrm->private_data; 359 + return amdtp_stream_pcm_pointer(&efw->rx_stream); 360 + } 361 + 362 + static const struct snd_pcm_ops pcm_capture_ops = { 363 + .open = pcm_open, 364 + .close = pcm_close, 365 + .ioctl = snd_pcm_lib_ioctl, 366 + .hw_params = pcm_capture_hw_params, 367 + .hw_free = pcm_capture_hw_free, 368 + .prepare = pcm_capture_prepare, 369 + .trigger = pcm_capture_trigger, 370 + .pointer = pcm_capture_pointer, 371 + .page = snd_pcm_lib_get_vmalloc_page, 372 + }; 373 + 374 + static const struct snd_pcm_ops pcm_playback_ops = { 375 + .open = pcm_open, 376 + .close = pcm_close, 377 + .ioctl = snd_pcm_lib_ioctl, 378 + .hw_params = pcm_playback_hw_params, 379 + .hw_free = pcm_playback_hw_free, 380 + .prepare = pcm_playback_prepare, 381 + .trigger = pcm_playback_trigger, 382 + .pointer = pcm_playback_pointer, 383 + .page = snd_pcm_lib_get_vmalloc_page, 384 + .mmap = snd_pcm_lib_mmap_vmalloc, 385 + }; 386 + 387 + int snd_efw_create_pcm_devices(struct snd_efw *efw) 388 + { 389 + struct snd_pcm *pcm; 390 + int err; 391 + 392 + err = snd_pcm_new(efw->card, efw->card->driver, 0, 1, 1, &pcm); 393 + if (err < 0) 394 + goto end; 395 + 396 + pcm->private_data = efw; 397 + snprintf(pcm->name, sizeof(pcm->name), "%s PCM", efw->card->shortname); 398 + snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &pcm_playback_ops); 399 + snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &pcm_capture_ops); 400 + end: 401 + return err; 402 + } 403 +
+232
sound/firewire/fireworks/fireworks_proc.c
··· 1 + /* 2 + * fireworks_proc.c - a part of driver for Fireworks based devices 3 + * 4 + * Copyright (c) 2009-2010 Clemens Ladisch 5 + * Copyright (c) 2013-2014 Takashi Sakamoto 6 + * 7 + * Licensed under the terms of the GNU General Public License, version 2. 8 + */ 9 + 10 + #include "./fireworks.h" 11 + 12 + static inline const char* 13 + get_phys_name(struct snd_efw_phys_grp *grp, bool input) 14 + { 15 + const char *const ch_type[] = { 16 + "Analog", "S/PDIF", "ADAT", "S/PDIF or ADAT", "Mirroring", 17 + "Headphones", "I2S", "Guitar", "Pirzo Guitar", "Guitar String", 18 + }; 19 + 20 + if (grp->type < ARRAY_SIZE(ch_type)) 21 + return ch_type[grp->type]; 22 + else if (input) 23 + return "Input"; 24 + else 25 + return "Output"; 26 + } 27 + 28 + static void 29 + proc_read_hwinfo(struct snd_info_entry *entry, struct snd_info_buffer *buffer) 30 + { 31 + struct snd_efw *efw = entry->private_data; 32 + unsigned short i; 33 + struct snd_efw_hwinfo *hwinfo; 34 + 35 + hwinfo = kmalloc(sizeof(struct snd_efw_hwinfo), GFP_KERNEL); 36 + if (hwinfo == NULL) 37 + return; 38 + 39 + if (snd_efw_command_get_hwinfo(efw, hwinfo) < 0) 40 + goto end; 41 + 42 + snd_iprintf(buffer, "guid_hi: 0x%X\n", hwinfo->guid_hi); 43 + snd_iprintf(buffer, "guid_lo: 0x%X\n", hwinfo->guid_lo); 44 + snd_iprintf(buffer, "type: 0x%X\n", hwinfo->type); 45 + snd_iprintf(buffer, "version: 0x%X\n", hwinfo->version); 46 + snd_iprintf(buffer, "vendor_name: %s\n", hwinfo->vendor_name); 47 + snd_iprintf(buffer, "model_name: %s\n", hwinfo->model_name); 48 + 49 + snd_iprintf(buffer, "dsp_version: 0x%X\n", hwinfo->dsp_version); 50 + snd_iprintf(buffer, "arm_version: 0x%X\n", hwinfo->arm_version); 51 + snd_iprintf(buffer, "fpga_version: 0x%X\n", hwinfo->fpga_version); 52 + 53 + snd_iprintf(buffer, "flags: 0x%X\n", hwinfo->flags); 54 + 55 + snd_iprintf(buffer, "max_sample_rate: 0x%X\n", hwinfo->max_sample_rate); 56 + snd_iprintf(buffer, "min_sample_rate: 0x%X\n", hwinfo->min_sample_rate); 57 + snd_iprintf(buffer, "supported_clock: 0x%X\n", 58 + hwinfo->supported_clocks); 59 + 60 + snd_iprintf(buffer, "phys out: 0x%X\n", hwinfo->phys_out); 61 + snd_iprintf(buffer, "phys in: 0x%X\n", hwinfo->phys_in); 62 + 63 + snd_iprintf(buffer, "phys in grps: 0x%X\n", 64 + hwinfo->phys_in_grp_count); 65 + for (i = 0; i < hwinfo->phys_in_grp_count; i++) { 66 + snd_iprintf(buffer, 67 + "phys in grp[0x%d]: type 0x%d, count 0x%d\n", 68 + i, hwinfo->phys_out_grps[i].type, 69 + hwinfo->phys_out_grps[i].count); 70 + } 71 + 72 + snd_iprintf(buffer, "phys out grps: 0x%X\n", 73 + hwinfo->phys_out_grp_count); 74 + for (i = 0; i < hwinfo->phys_out_grp_count; i++) { 75 + snd_iprintf(buffer, 76 + "phys out grps[0x%d]: type 0x%d, count 0x%d\n", 77 + i, hwinfo->phys_out_grps[i].type, 78 + hwinfo->phys_out_grps[i].count); 79 + } 80 + 81 + snd_iprintf(buffer, "amdtp rx pcm channels 1x: 0x%X\n", 82 + hwinfo->amdtp_rx_pcm_channels); 83 + snd_iprintf(buffer, "amdtp tx pcm channels 1x: 0x%X\n", 84 + hwinfo->amdtp_tx_pcm_channels); 85 + snd_iprintf(buffer, "amdtp rx pcm channels 2x: 0x%X\n", 86 + hwinfo->amdtp_rx_pcm_channels_2x); 87 + snd_iprintf(buffer, "amdtp tx pcm channels 2x: 0x%X\n", 88 + hwinfo->amdtp_tx_pcm_channels_2x); 89 + snd_iprintf(buffer, "amdtp rx pcm channels 4x: 0x%X\n", 90 + hwinfo->amdtp_rx_pcm_channels_4x); 91 + snd_iprintf(buffer, "amdtp tx pcm channels 4x: 0x%X\n", 92 + hwinfo->amdtp_tx_pcm_channels_4x); 93 + 94 + snd_iprintf(buffer, "midi out ports: 0x%X\n", hwinfo->midi_out_ports); 95 + snd_iprintf(buffer, "midi in ports: 0x%X\n", hwinfo->midi_in_ports); 96 + 97 + snd_iprintf(buffer, "mixer playback channels: 0x%X\n", 98 + hwinfo->mixer_playback_channels); 99 + snd_iprintf(buffer, "mixer capture channels: 0x%X\n", 100 + hwinfo->mixer_capture_channels); 101 + end: 102 + kfree(hwinfo); 103 + } 104 + 105 + static void 106 + proc_read_clock(struct snd_info_entry *entry, struct snd_info_buffer *buffer) 107 + { 108 + struct snd_efw *efw = entry->private_data; 109 + enum snd_efw_clock_source clock_source; 110 + unsigned int sampling_rate; 111 + 112 + if (snd_efw_command_get_clock_source(efw, &clock_source) < 0) 113 + return; 114 + 115 + if (snd_efw_command_get_sampling_rate(efw, &sampling_rate) < 0) 116 + return; 117 + 118 + snd_iprintf(buffer, "Clock Source: %d\n", clock_source); 119 + snd_iprintf(buffer, "Sampling Rate: %d\n", sampling_rate); 120 + } 121 + 122 + /* 123 + * NOTE: 124 + * dB = 20 * log10(linear / 0x01000000) 125 + * -144.0 dB when linear is 0 126 + */ 127 + static void 128 + proc_read_phys_meters(struct snd_info_entry *entry, 129 + struct snd_info_buffer *buffer) 130 + { 131 + struct snd_efw *efw = entry->private_data; 132 + struct snd_efw_phys_meters *meters; 133 + unsigned int g, c, m, max, size; 134 + const char *name; 135 + u32 *linear; 136 + int err; 137 + 138 + size = sizeof(struct snd_efw_phys_meters) + 139 + (efw->phys_in + efw->phys_out) * sizeof(u32); 140 + meters = kzalloc(size, GFP_KERNEL); 141 + if (meters == NULL) 142 + return; 143 + 144 + err = snd_efw_command_get_phys_meters(efw, meters, size); 145 + if (err < 0) 146 + goto end; 147 + 148 + snd_iprintf(buffer, "Physical Meters:\n"); 149 + 150 + m = 0; 151 + max = min(efw->phys_out, meters->out_meters); 152 + linear = meters->values; 153 + snd_iprintf(buffer, " %d Outputs:\n", max); 154 + for (g = 0; g < efw->phys_out_grp_count; g++) { 155 + name = get_phys_name(&efw->phys_out_grps[g], false); 156 + for (c = 0; c < efw->phys_out_grps[g].count; c++) { 157 + if (m < max) 158 + snd_iprintf(buffer, "\t%s [%d]: %d\n", 159 + name, c, linear[m++]); 160 + } 161 + } 162 + 163 + m = 0; 164 + max = min(efw->phys_in, meters->in_meters); 165 + linear = meters->values + meters->out_meters; 166 + snd_iprintf(buffer, " %d Inputs:\n", max); 167 + for (g = 0; g < efw->phys_in_grp_count; g++) { 168 + name = get_phys_name(&efw->phys_in_grps[g], true); 169 + for (c = 0; c < efw->phys_in_grps[g].count; c++) 170 + if (m < max) 171 + snd_iprintf(buffer, "\t%s [%d]: %d\n", 172 + name, c, linear[m++]); 173 + } 174 + end: 175 + kfree(meters); 176 + } 177 + 178 + static void 179 + proc_read_queues_state(struct snd_info_entry *entry, 180 + struct snd_info_buffer *buffer) 181 + { 182 + struct snd_efw *efw = entry->private_data; 183 + unsigned int consumed; 184 + 185 + if (efw->pull_ptr > efw->push_ptr) 186 + consumed = snd_efw_resp_buf_size - 187 + (unsigned int)(efw->pull_ptr - efw->push_ptr); 188 + else 189 + consumed = (unsigned int)(efw->push_ptr - efw->pull_ptr); 190 + 191 + snd_iprintf(buffer, "%d %d/%d\n", 192 + efw->resp_queues, consumed, snd_efw_resp_buf_size); 193 + } 194 + 195 + static void 196 + add_node(struct snd_efw *efw, struct snd_info_entry *root, const char *name, 197 + void (*op)(struct snd_info_entry *e, struct snd_info_buffer *b)) 198 + { 199 + struct snd_info_entry *entry; 200 + 201 + entry = snd_info_create_card_entry(efw->card, name, root); 202 + if (entry == NULL) 203 + return; 204 + 205 + snd_info_set_text_ops(entry, efw, op); 206 + if (snd_info_register(entry) < 0) 207 + snd_info_free_entry(entry); 208 + } 209 + 210 + void snd_efw_proc_init(struct snd_efw *efw) 211 + { 212 + struct snd_info_entry *root; 213 + 214 + /* 215 + * All nodes are automatically removed at snd_card_disconnect(), 216 + * by following to link list. 217 + */ 218 + root = snd_info_create_card_entry(efw->card, "firewire", 219 + efw->card->proc_root); 220 + if (root == NULL) 221 + return; 222 + root->mode = S_IFDIR | S_IRUGO | S_IXUGO; 223 + if (snd_info_register(root) < 0) { 224 + snd_info_free_entry(root); 225 + return; 226 + } 227 + 228 + add_node(efw, root, "clock", proc_read_clock); 229 + add_node(efw, root, "firmware", proc_read_hwinfo); 230 + add_node(efw, root, "meters", proc_read_phys_meters); 231 + add_node(efw, root, "queues", proc_read_queues_state); 232 + }
+372
sound/firewire/fireworks/fireworks_stream.c
··· 1 + /* 2 + * fireworks_stream.c - a part of driver for Fireworks based devices 3 + * 4 + * Copyright (c) 2013-2014 Takashi Sakamoto 5 + * 6 + * Licensed under the terms of the GNU General Public License, version 2. 7 + */ 8 + #include "./fireworks.h" 9 + 10 + #define CALLBACK_TIMEOUT 100 11 + 12 + static int 13 + init_stream(struct snd_efw *efw, struct amdtp_stream *stream) 14 + { 15 + struct cmp_connection *conn; 16 + enum cmp_direction c_dir; 17 + enum amdtp_stream_direction s_dir; 18 + int err; 19 + 20 + if (stream == &efw->tx_stream) { 21 + conn = &efw->out_conn; 22 + c_dir = CMP_OUTPUT; 23 + s_dir = AMDTP_IN_STREAM; 24 + } else { 25 + conn = &efw->in_conn; 26 + c_dir = CMP_INPUT; 27 + s_dir = AMDTP_OUT_STREAM; 28 + } 29 + 30 + err = cmp_connection_init(conn, efw->unit, c_dir, 0); 31 + if (err < 0) 32 + goto end; 33 + 34 + err = amdtp_stream_init(stream, efw->unit, s_dir, CIP_BLOCKING); 35 + if (err < 0) { 36 + amdtp_stream_destroy(stream); 37 + cmp_connection_destroy(conn); 38 + } 39 + end: 40 + return err; 41 + } 42 + 43 + static void 44 + stop_stream(struct snd_efw *efw, struct amdtp_stream *stream) 45 + { 46 + amdtp_stream_pcm_abort(stream); 47 + amdtp_stream_stop(stream); 48 + 49 + if (stream == &efw->tx_stream) 50 + cmp_connection_break(&efw->out_conn); 51 + else 52 + cmp_connection_break(&efw->in_conn); 53 + } 54 + 55 + static int 56 + start_stream(struct snd_efw *efw, struct amdtp_stream *stream, 57 + unsigned int sampling_rate) 58 + { 59 + struct cmp_connection *conn; 60 + unsigned int mode, pcm_channels, midi_ports; 61 + int err; 62 + 63 + err = snd_efw_get_multiplier_mode(sampling_rate, &mode); 64 + if (err < 0) 65 + goto end; 66 + if (stream == &efw->tx_stream) { 67 + conn = &efw->out_conn; 68 + pcm_channels = efw->pcm_capture_channels[mode]; 69 + midi_ports = efw->midi_out_ports; 70 + } else { 71 + conn = &efw->in_conn; 72 + pcm_channels = efw->pcm_playback_channels[mode]; 73 + midi_ports = efw->midi_in_ports; 74 + } 75 + 76 + amdtp_stream_set_parameters(stream, sampling_rate, 77 + pcm_channels, midi_ports); 78 + 79 + /* establish connection via CMP */ 80 + err = cmp_connection_establish(conn, 81 + amdtp_stream_get_max_payload(stream)); 82 + if (err < 0) 83 + goto end; 84 + 85 + /* start amdtp stream */ 86 + err = amdtp_stream_start(stream, 87 + conn->resources.channel, 88 + conn->speed); 89 + if (err < 0) { 90 + stop_stream(efw, stream); 91 + goto end; 92 + } 93 + 94 + /* wait first callback */ 95 + if (!amdtp_stream_wait_callback(stream, CALLBACK_TIMEOUT)) { 96 + stop_stream(efw, stream); 97 + err = -ETIMEDOUT; 98 + } 99 + end: 100 + return err; 101 + } 102 + 103 + static void 104 + destroy_stream(struct snd_efw *efw, struct amdtp_stream *stream) 105 + { 106 + stop_stream(efw, stream); 107 + 108 + amdtp_stream_destroy(stream); 109 + 110 + if (stream == &efw->tx_stream) 111 + cmp_connection_destroy(&efw->out_conn); 112 + else 113 + cmp_connection_destroy(&efw->in_conn); 114 + } 115 + 116 + static int 117 + get_sync_mode(struct snd_efw *efw, enum cip_flags *sync_mode) 118 + { 119 + enum snd_efw_clock_source clock_source; 120 + int err; 121 + 122 + err = snd_efw_command_get_clock_source(efw, &clock_source); 123 + if (err < 0) 124 + return err; 125 + 126 + if (clock_source == SND_EFW_CLOCK_SOURCE_SYTMATCH) 127 + return -ENOSYS; 128 + 129 + *sync_mode = CIP_SYNC_TO_DEVICE; 130 + return 0; 131 + } 132 + 133 + static int 134 + check_connection_used_by_others(struct snd_efw *efw, struct amdtp_stream *s) 135 + { 136 + struct cmp_connection *conn; 137 + bool used; 138 + int err; 139 + 140 + if (s == &efw->tx_stream) 141 + conn = &efw->out_conn; 142 + else 143 + conn = &efw->in_conn; 144 + 145 + err = cmp_connection_check_used(conn, &used); 146 + if ((err >= 0) && used && !amdtp_stream_running(s)) { 147 + dev_err(&efw->unit->device, 148 + "Connection established by others: %cPCR[%d]\n", 149 + (conn->direction == CMP_OUTPUT) ? 'o' : 'i', 150 + conn->pcr_index); 151 + err = -EBUSY; 152 + } 153 + 154 + return err; 155 + } 156 + 157 + int snd_efw_stream_init_duplex(struct snd_efw *efw) 158 + { 159 + int err; 160 + 161 + err = init_stream(efw, &efw->tx_stream); 162 + if (err < 0) 163 + goto end; 164 + /* Fireworks transmits NODATA packets with TAG0. */ 165 + efw->tx_stream.flags |= CIP_EMPTY_WITH_TAG0; 166 + /* Fireworks has its own meaning for dbc. */ 167 + efw->tx_stream.flags |= CIP_DBC_IS_END_EVENT; 168 + /* Fireworks reset dbc at bus reset. */ 169 + efw->tx_stream.flags |= CIP_SKIP_DBC_ZERO_CHECK; 170 + /* AudioFire9 always reports wrong dbs. */ 171 + if (efw->is_af9) 172 + efw->tx_stream.flags |= CIP_WRONG_DBS; 173 + /* Firmware version 5.5 reports fixed interval for dbc. */ 174 + if (efw->firmware_version == 0x5050000) 175 + efw->tx_stream.tx_dbc_interval = 8; 176 + 177 + err = init_stream(efw, &efw->rx_stream); 178 + if (err < 0) { 179 + destroy_stream(efw, &efw->tx_stream); 180 + goto end; 181 + } 182 + /* 183 + * Fireworks ignores MIDI messages in more than first 8 data 184 + * blocks of an received AMDTP packet. 185 + */ 186 + efw->rx_stream.rx_blocks_for_midi = 8; 187 + 188 + /* set IEC61883 compliant mode (actually not fully compliant...) */ 189 + err = snd_efw_command_set_tx_mode(efw, SND_EFW_TRANSPORT_MODE_IEC61883); 190 + if (err < 0) { 191 + destroy_stream(efw, &efw->tx_stream); 192 + destroy_stream(efw, &efw->rx_stream); 193 + } 194 + end: 195 + return err; 196 + } 197 + 198 + int snd_efw_stream_start_duplex(struct snd_efw *efw, unsigned int rate) 199 + { 200 + struct amdtp_stream *master, *slave; 201 + atomic_t *slave_substreams; 202 + enum cip_flags sync_mode; 203 + unsigned int curr_rate; 204 + int err = 0; 205 + 206 + mutex_lock(&efw->mutex); 207 + 208 + /* Need no substreams */ 209 + if ((atomic_read(&efw->playback_substreams) == 0) && 210 + (atomic_read(&efw->capture_substreams) == 0)) 211 + goto end; 212 + 213 + err = get_sync_mode(efw, &sync_mode); 214 + if (err < 0) 215 + goto end; 216 + if (sync_mode == CIP_SYNC_TO_DEVICE) { 217 + master = &efw->tx_stream; 218 + slave = &efw->rx_stream; 219 + slave_substreams = &efw->playback_substreams; 220 + } else { 221 + master = &efw->rx_stream; 222 + slave = &efw->tx_stream; 223 + slave_substreams = &efw->capture_substreams; 224 + } 225 + 226 + /* 227 + * Considering JACK/FFADO streaming: 228 + * TODO: This can be removed hwdep functionality becomes popular. 229 + */ 230 + err = check_connection_used_by_others(efw, master); 231 + if (err < 0) 232 + goto end; 233 + 234 + /* packet queueing error */ 235 + if (amdtp_streaming_error(slave)) 236 + stop_stream(efw, slave); 237 + if (amdtp_streaming_error(master)) 238 + stop_stream(efw, master); 239 + 240 + /* stop streams if rate is different */ 241 + err = snd_efw_command_get_sampling_rate(efw, &curr_rate); 242 + if (err < 0) 243 + goto end; 244 + if (rate == 0) 245 + rate = curr_rate; 246 + if (rate != curr_rate) { 247 + stop_stream(efw, slave); 248 + stop_stream(efw, master); 249 + } 250 + 251 + /* master should be always running */ 252 + if (!amdtp_stream_running(master)) { 253 + amdtp_stream_set_sync(sync_mode, master, slave); 254 + efw->master = master; 255 + 256 + err = snd_efw_command_set_sampling_rate(efw, rate); 257 + if (err < 0) 258 + goto end; 259 + 260 + err = start_stream(efw, master, rate); 261 + if (err < 0) { 262 + dev_err(&efw->unit->device, 263 + "fail to start AMDTP master stream:%d\n", err); 264 + goto end; 265 + } 266 + } 267 + 268 + /* start slave if needed */ 269 + if (atomic_read(slave_substreams) > 0 && !amdtp_stream_running(slave)) { 270 + err = start_stream(efw, slave, rate); 271 + if (err < 0) { 272 + dev_err(&efw->unit->device, 273 + "fail to start AMDTP slave stream:%d\n", err); 274 + stop_stream(efw, master); 275 + } 276 + } 277 + end: 278 + mutex_unlock(&efw->mutex); 279 + return err; 280 + } 281 + 282 + void snd_efw_stream_stop_duplex(struct snd_efw *efw) 283 + { 284 + struct amdtp_stream *master, *slave; 285 + atomic_t *master_substreams, *slave_substreams; 286 + 287 + mutex_lock(&efw->mutex); 288 + 289 + if (efw->master == &efw->rx_stream) { 290 + slave = &efw->tx_stream; 291 + master = &efw->rx_stream; 292 + slave_substreams = &efw->capture_substreams; 293 + master_substreams = &efw->playback_substreams; 294 + } else { 295 + slave = &efw->rx_stream; 296 + master = &efw->tx_stream; 297 + slave_substreams = &efw->playback_substreams; 298 + master_substreams = &efw->capture_substreams; 299 + } 300 + 301 + if (atomic_read(slave_substreams) == 0) { 302 + stop_stream(efw, slave); 303 + 304 + if (atomic_read(master_substreams) == 0) 305 + stop_stream(efw, master); 306 + } 307 + 308 + mutex_unlock(&efw->mutex); 309 + } 310 + 311 + void snd_efw_stream_update_duplex(struct snd_efw *efw) 312 + { 313 + if ((cmp_connection_update(&efw->out_conn) < 0) || 314 + (cmp_connection_update(&efw->in_conn) < 0)) { 315 + mutex_lock(&efw->mutex); 316 + stop_stream(efw, &efw->rx_stream); 317 + stop_stream(efw, &efw->tx_stream); 318 + mutex_unlock(&efw->mutex); 319 + } else { 320 + amdtp_stream_update(&efw->rx_stream); 321 + amdtp_stream_update(&efw->tx_stream); 322 + } 323 + } 324 + 325 + void snd_efw_stream_destroy_duplex(struct snd_efw *efw) 326 + { 327 + mutex_lock(&efw->mutex); 328 + 329 + destroy_stream(efw, &efw->rx_stream); 330 + destroy_stream(efw, &efw->tx_stream); 331 + 332 + mutex_unlock(&efw->mutex); 333 + } 334 + 335 + void snd_efw_stream_lock_changed(struct snd_efw *efw) 336 + { 337 + efw->dev_lock_changed = true; 338 + wake_up(&efw->hwdep_wait); 339 + } 340 + 341 + int snd_efw_stream_lock_try(struct snd_efw *efw) 342 + { 343 + int err; 344 + 345 + spin_lock_irq(&efw->lock); 346 + 347 + /* user land lock this */ 348 + if (efw->dev_lock_count < 0) { 349 + err = -EBUSY; 350 + goto end; 351 + } 352 + 353 + /* this is the first time */ 354 + if (efw->dev_lock_count++ == 0) 355 + snd_efw_stream_lock_changed(efw); 356 + err = 0; 357 + end: 358 + spin_unlock_irq(&efw->lock); 359 + return err; 360 + } 361 + 362 + void snd_efw_stream_lock_release(struct snd_efw *efw) 363 + { 364 + spin_lock_irq(&efw->lock); 365 + 366 + if (WARN_ON(efw->dev_lock_count <= 0)) 367 + goto end; 368 + if (--efw->dev_lock_count == 0) 369 + snd_efw_stream_lock_changed(efw); 370 + end: 371 + spin_unlock_irq(&efw->lock); 372 + }
+326
sound/firewire/fireworks/fireworks_transaction.c
··· 1 + /* 2 + * fireworks_transaction.c - a part of driver for Fireworks based devices 3 + * 4 + * Copyright (c) 2013-2014 Takashi Sakamoto 5 + * 6 + * Licensed under the terms of the GNU General Public License, version 2. 7 + */ 8 + 9 + /* 10 + * Fireworks have its own transaction. The transaction can be delivered by AV/C 11 + * Vendor Specific command. But at least Windows driver and firmware version 5.5 12 + * or later don't use it. 13 + * 14 + * Transaction substance: 15 + * At first, 6 data exist. Following to the 6 data, parameters for each 16 + * commands exists. All of parameters are 32 bit alighed to big endian. 17 + * data[0]: Length of transaction substance 18 + * data[1]: Transaction version 19 + * data[2]: Sequence number. This is incremented by the device 20 + * data[3]: transaction category 21 + * data[4]: transaction command 22 + * data[5]: return value in response. 23 + * data[6-]: parameters 24 + * 25 + * Transaction address: 26 + * command: 0xecc000000000 27 + * response: 0xecc080000000 (default) 28 + * 29 + * I note that the address for response can be changed by command. But this 30 + * module uses the default address. 31 + */ 32 + #include "./fireworks.h" 33 + 34 + #define MEMORY_SPACE_EFW_COMMAND 0xecc000000000ULL 35 + #define MEMORY_SPACE_EFW_RESPONSE 0xecc080000000ULL 36 + 37 + #define ERROR_RETRIES 3 38 + #define ERROR_DELAY_MS 5 39 + #define EFC_TIMEOUT_MS 125 40 + 41 + static DEFINE_SPINLOCK(instances_lock); 42 + static struct snd_efw *instances[SNDRV_CARDS] = SNDRV_DEFAULT_PTR; 43 + 44 + static DEFINE_SPINLOCK(transaction_queues_lock); 45 + static LIST_HEAD(transaction_queues); 46 + 47 + enum transaction_queue_state { 48 + STATE_PENDING, 49 + STATE_BUS_RESET, 50 + STATE_COMPLETE 51 + }; 52 + 53 + struct transaction_queue { 54 + struct list_head list; 55 + struct fw_unit *unit; 56 + void *buf; 57 + unsigned int size; 58 + u32 seqnum; 59 + enum transaction_queue_state state; 60 + wait_queue_head_t wait; 61 + }; 62 + 63 + int snd_efw_transaction_cmd(struct fw_unit *unit, 64 + const void *cmd, unsigned int size) 65 + { 66 + return snd_fw_transaction(unit, TCODE_WRITE_BLOCK_REQUEST, 67 + MEMORY_SPACE_EFW_COMMAND, 68 + (void *)cmd, size, 0); 69 + } 70 + 71 + int snd_efw_transaction_run(struct fw_unit *unit, 72 + const void *cmd, unsigned int cmd_size, 73 + void *resp, unsigned int resp_size) 74 + { 75 + struct transaction_queue t; 76 + unsigned int tries; 77 + int ret; 78 + 79 + t.unit = unit; 80 + t.buf = resp; 81 + t.size = resp_size; 82 + t.seqnum = be32_to_cpu(((struct snd_efw_transaction *)cmd)->seqnum) + 1; 83 + t.state = STATE_PENDING; 84 + init_waitqueue_head(&t.wait); 85 + 86 + spin_lock_irq(&transaction_queues_lock); 87 + list_add_tail(&t.list, &transaction_queues); 88 + spin_unlock_irq(&transaction_queues_lock); 89 + 90 + tries = 0; 91 + do { 92 + ret = snd_efw_transaction_cmd(t.unit, (void *)cmd, cmd_size); 93 + if (ret < 0) 94 + break; 95 + 96 + wait_event_timeout(t.wait, t.state != STATE_PENDING, 97 + msecs_to_jiffies(EFC_TIMEOUT_MS)); 98 + 99 + if (t.state == STATE_COMPLETE) { 100 + ret = t.size; 101 + break; 102 + } else if (t.state == STATE_BUS_RESET) { 103 + msleep(ERROR_DELAY_MS); 104 + } else if (++tries >= ERROR_RETRIES) { 105 + dev_err(&t.unit->device, "EFW transaction timed out\n"); 106 + ret = -EIO; 107 + break; 108 + } 109 + } while (1); 110 + 111 + spin_lock_irq(&transaction_queues_lock); 112 + list_del(&t.list); 113 + spin_unlock_irq(&transaction_queues_lock); 114 + 115 + return ret; 116 + } 117 + 118 + static void 119 + copy_resp_to_buf(struct snd_efw *efw, void *data, size_t length, int *rcode) 120 + { 121 + size_t capacity, till_end; 122 + struct snd_efw_transaction *t; 123 + 124 + spin_lock_irq(&efw->lock); 125 + 126 + t = (struct snd_efw_transaction *)data; 127 + length = min_t(size_t, t->length * sizeof(t->length), length); 128 + 129 + if (efw->push_ptr < efw->pull_ptr) 130 + capacity = (unsigned int)(efw->pull_ptr - efw->push_ptr); 131 + else 132 + capacity = snd_efw_resp_buf_size - 133 + (unsigned int)(efw->push_ptr - efw->pull_ptr); 134 + 135 + /* confirm enough space for this response */ 136 + if (capacity < length) { 137 + *rcode = RCODE_CONFLICT_ERROR; 138 + goto end; 139 + } 140 + 141 + /* copy to ring buffer */ 142 + while (length > 0) { 143 + till_end = snd_efw_resp_buf_size - 144 + (unsigned int)(efw->push_ptr - efw->resp_buf); 145 + till_end = min_t(unsigned int, length, till_end); 146 + 147 + memcpy(efw->push_ptr, data, till_end); 148 + 149 + efw->push_ptr += till_end; 150 + if (efw->push_ptr >= efw->resp_buf + snd_efw_resp_buf_size) 151 + efw->push_ptr = efw->resp_buf; 152 + 153 + length -= till_end; 154 + data += till_end; 155 + } 156 + 157 + /* for hwdep */ 158 + efw->resp_queues++; 159 + wake_up(&efw->hwdep_wait); 160 + 161 + *rcode = RCODE_COMPLETE; 162 + end: 163 + spin_unlock_irq(&efw->lock); 164 + } 165 + 166 + static void 167 + handle_resp_for_user(struct fw_card *card, int generation, int source, 168 + void *data, size_t length, int *rcode) 169 + { 170 + struct fw_device *device; 171 + struct snd_efw *efw; 172 + unsigned int i; 173 + 174 + spin_lock_irq(&instances_lock); 175 + 176 + for (i = 0; i < SNDRV_CARDS; i++) { 177 + efw = instances[i]; 178 + if (efw == NULL) 179 + continue; 180 + device = fw_parent_device(efw->unit); 181 + if ((device->card != card) || 182 + (device->generation != generation)) 183 + continue; 184 + smp_rmb(); /* node id vs. generation */ 185 + if (device->node_id != source) 186 + continue; 187 + 188 + break; 189 + } 190 + if (i == SNDRV_CARDS) 191 + goto end; 192 + 193 + copy_resp_to_buf(efw, data, length, rcode); 194 + end: 195 + spin_unlock_irq(&instances_lock); 196 + } 197 + 198 + static void 199 + handle_resp_for_kernel(struct fw_card *card, int generation, int source, 200 + void *data, size_t length, int *rcode, u32 seqnum) 201 + { 202 + struct fw_device *device; 203 + struct transaction_queue *t; 204 + unsigned long flags; 205 + 206 + spin_lock_irqsave(&transaction_queues_lock, flags); 207 + list_for_each_entry(t, &transaction_queues, list) { 208 + device = fw_parent_device(t->unit); 209 + if ((device->card != card) || 210 + (device->generation != generation)) 211 + continue; 212 + smp_rmb(); /* node_id vs. generation */ 213 + if (device->node_id != source) 214 + continue; 215 + 216 + if ((t->state == STATE_PENDING) && (t->seqnum == seqnum)) { 217 + t->state = STATE_COMPLETE; 218 + t->size = min_t(unsigned int, length, t->size); 219 + memcpy(t->buf, data, t->size); 220 + wake_up(&t->wait); 221 + *rcode = RCODE_COMPLETE; 222 + } 223 + } 224 + spin_unlock_irqrestore(&transaction_queues_lock, flags); 225 + } 226 + 227 + static void 228 + efw_response(struct fw_card *card, struct fw_request *request, 229 + int tcode, int destination, int source, 230 + int generation, unsigned long long offset, 231 + void *data, size_t length, void *callback_data) 232 + { 233 + int rcode, dummy; 234 + u32 seqnum; 235 + 236 + rcode = RCODE_TYPE_ERROR; 237 + if (length < sizeof(struct snd_efw_transaction)) { 238 + rcode = RCODE_DATA_ERROR; 239 + goto end; 240 + } else if (offset != MEMORY_SPACE_EFW_RESPONSE) { 241 + rcode = RCODE_ADDRESS_ERROR; 242 + goto end; 243 + } 244 + 245 + seqnum = be32_to_cpu(((struct snd_efw_transaction *)data)->seqnum); 246 + if (seqnum > SND_EFW_TRANSACTION_USER_SEQNUM_MAX + 1) { 247 + handle_resp_for_kernel(card, generation, source, 248 + data, length, &rcode, seqnum); 249 + if (snd_efw_resp_buf_debug) 250 + handle_resp_for_user(card, generation, source, 251 + data, length, &dummy); 252 + } else { 253 + handle_resp_for_user(card, generation, source, 254 + data, length, &rcode); 255 + } 256 + end: 257 + fw_send_response(card, request, rcode); 258 + } 259 + 260 + void snd_efw_transaction_add_instance(struct snd_efw *efw) 261 + { 262 + unsigned int i; 263 + 264 + spin_lock_irq(&instances_lock); 265 + 266 + for (i = 0; i < SNDRV_CARDS; i++) { 267 + if (instances[i] != NULL) 268 + continue; 269 + instances[i] = efw; 270 + break; 271 + } 272 + 273 + spin_unlock_irq(&instances_lock); 274 + } 275 + 276 + void snd_efw_transaction_remove_instance(struct snd_efw *efw) 277 + { 278 + unsigned int i; 279 + 280 + spin_lock_irq(&instances_lock); 281 + 282 + for (i = 0; i < SNDRV_CARDS; i++) { 283 + if (instances[i] != efw) 284 + continue; 285 + instances[i] = NULL; 286 + } 287 + 288 + spin_unlock_irq(&instances_lock); 289 + } 290 + 291 + void snd_efw_transaction_bus_reset(struct fw_unit *unit) 292 + { 293 + struct transaction_queue *t; 294 + 295 + spin_lock_irq(&transaction_queues_lock); 296 + list_for_each_entry(t, &transaction_queues, list) { 297 + if ((t->unit == unit) && 298 + (t->state == STATE_PENDING)) { 299 + t->state = STATE_BUS_RESET; 300 + wake_up(&t->wait); 301 + } 302 + } 303 + spin_unlock_irq(&transaction_queues_lock); 304 + } 305 + 306 + static struct fw_address_handler resp_register_handler = { 307 + .length = SND_EFW_RESPONSE_MAXIMUM_BYTES, 308 + .address_callback = efw_response 309 + }; 310 + 311 + int snd_efw_transaction_register(void) 312 + { 313 + static const struct fw_address_region resp_register_region = { 314 + .start = MEMORY_SPACE_EFW_RESPONSE, 315 + .end = MEMORY_SPACE_EFW_RESPONSE + 316 + SND_EFW_RESPONSE_MAXIMUM_BYTES 317 + }; 318 + return fw_core_add_address_handler(&resp_register_handler, 319 + &resp_register_region); 320 + } 321 + 322 + void snd_efw_transaction_unregister(void) 323 + { 324 + WARN_ON(!list_empty(&transaction_queues)); 325 + fw_core_remove_address_handler(&resp_register_handler); 326 + }
+31 -69
sound/firewire/speakers.c
··· 51 51 const struct device_info *device_info; 52 52 struct mutex mutex; 53 53 struct cmp_connection connection; 54 - struct amdtp_out_stream stream; 54 + struct amdtp_stream stream; 55 55 bool mute; 56 56 s16 volume[6]; 57 57 s16 volume_min; ··· 167 167 if (err < 0) 168 168 return err; 169 169 170 - err = snd_pcm_hw_constraint_minmax(runtime, 171 - SNDRV_PCM_HW_PARAM_PERIOD_TIME, 172 - 5000, UINT_MAX); 173 - if (err < 0) 174 - return err; 175 - 176 - err = snd_pcm_hw_constraint_msbits(runtime, 0, 32, 24); 170 + err = amdtp_stream_add_pcm_hw_constraints(&fwspk->stream, runtime); 177 171 if (err < 0) 178 172 return err; 179 173 ··· 181 187 182 188 static void fwspk_stop_stream(struct fwspk *fwspk) 183 189 { 184 - if (amdtp_out_stream_running(&fwspk->stream)) { 185 - amdtp_out_stream_stop(&fwspk->stream); 190 + if (amdtp_stream_running(&fwspk->stream)) { 191 + amdtp_stream_stop(&fwspk->stream); 186 192 cmp_connection_break(&fwspk->connection); 187 193 } 188 - } 189 - 190 - static int fwspk_set_rate(struct fwspk *fwspk, unsigned int sfc) 191 - { 192 - u8 *buf; 193 - int err; 194 - 195 - buf = kmalloc(8, GFP_KERNEL); 196 - if (!buf) 197 - return -ENOMEM; 198 - 199 - buf[0] = 0x00; /* AV/C, CONTROL */ 200 - buf[1] = 0xff; /* unit */ 201 - buf[2] = 0x19; /* INPUT PLUG SIGNAL FORMAT */ 202 - buf[3] = 0x00; /* plug 0 */ 203 - buf[4] = 0x90; /* format: audio */ 204 - buf[5] = 0x00 | sfc; /* AM824, frequency */ 205 - buf[6] = 0xff; /* SYT (not used) */ 206 - buf[7] = 0xff; 207 - 208 - err = fcp_avc_transaction(fwspk->unit, buf, 8, buf, 8, 209 - BIT(1) | BIT(2) | BIT(3) | BIT(4) | BIT(5)); 210 - if (err < 0) 211 - goto error; 212 - if (err < 6 || buf[0] != 0x09 /* ACCEPTED */) { 213 - dev_err(&fwspk->unit->device, "failed to set sample rate\n"); 214 - err = -EIO; 215 - goto error; 216 - } 217 - 218 - err = 0; 219 - 220 - error: 221 - kfree(buf); 222 - 223 - return err; 224 194 } 225 195 226 196 static int fwspk_hw_params(struct snd_pcm_substream *substream, ··· 202 244 if (err < 0) 203 245 goto error; 204 246 205 - amdtp_out_stream_set_parameters(&fwspk->stream, 206 - params_rate(hw_params), 207 - params_channels(hw_params), 208 - 0); 247 + amdtp_stream_set_parameters(&fwspk->stream, 248 + params_rate(hw_params), 249 + params_channels(hw_params), 250 + 0); 209 251 210 - amdtp_out_stream_set_pcm_format(&fwspk->stream, 211 - params_format(hw_params)); 252 + amdtp_stream_set_pcm_format(&fwspk->stream, 253 + params_format(hw_params)); 212 254 213 - err = fwspk_set_rate(fwspk, fwspk->stream.sfc); 214 - if (err < 0) 255 + err = avc_general_set_sig_fmt(fwspk->unit, params_rate(hw_params), 256 + AVC_GENERAL_PLUG_DIR_IN, 0); 257 + if (err < 0) { 258 + dev_err(&fwspk->unit->device, "failed to set sample rate\n"); 215 259 goto err_buffer; 260 + } 216 261 217 262 return 0; 218 263 ··· 243 282 244 283 mutex_lock(&fwspk->mutex); 245 284 246 - if (amdtp_out_streaming_error(&fwspk->stream)) 285 + if (amdtp_streaming_error(&fwspk->stream)) 247 286 fwspk_stop_stream(fwspk); 248 287 249 - if (!amdtp_out_stream_running(&fwspk->stream)) { 288 + if (!amdtp_stream_running(&fwspk->stream)) { 250 289 err = cmp_connection_establish(&fwspk->connection, 251 - amdtp_out_stream_get_max_payload(&fwspk->stream)); 290 + amdtp_stream_get_max_payload(&fwspk->stream)); 252 291 if (err < 0) 253 292 goto err_mutex; 254 293 255 - err = amdtp_out_stream_start(&fwspk->stream, 256 - fwspk->connection.resources.channel, 257 - fwspk->connection.speed); 294 + err = amdtp_stream_start(&fwspk->stream, 295 + fwspk->connection.resources.channel, 296 + fwspk->connection.speed); 258 297 if (err < 0) 259 298 goto err_connection; 260 299 } 261 300 262 301 mutex_unlock(&fwspk->mutex); 263 302 264 - amdtp_out_stream_pcm_prepare(&fwspk->stream); 303 + amdtp_stream_pcm_prepare(&fwspk->stream); 265 304 266 305 return 0; 267 306 ··· 288 327 default: 289 328 return -EINVAL; 290 329 } 291 - amdtp_out_stream_pcm_trigger(&fwspk->stream, pcm); 330 + amdtp_stream_pcm_trigger(&fwspk->stream, pcm); 292 331 return 0; 293 332 } 294 333 ··· 296 335 { 297 336 struct fwspk *fwspk = substream->private_data; 298 337 299 - return amdtp_out_stream_pcm_pointer(&fwspk->stream); 338 + return amdtp_stream_pcm_pointer(&fwspk->stream); 300 339 } 301 340 302 341 static int fwspk_create_pcm(struct fwspk *fwspk) ··· 614 653 { 615 654 struct fwspk *fwspk = card->private_data; 616 655 617 - amdtp_out_stream_destroy(&fwspk->stream); 656 + amdtp_stream_destroy(&fwspk->stream); 618 657 cmp_connection_destroy(&fwspk->connection); 619 658 fw_unit_put(fwspk->unit); 620 659 mutex_destroy(&fwspk->mutex); ··· 640 679 fwspk->unit = fw_unit_get(unit); 641 680 fwspk->device_info = (const struct device_info *)id->driver_data; 642 681 643 - err = cmp_connection_init(&fwspk->connection, unit, 0); 682 + err = cmp_connection_init(&fwspk->connection, unit, CMP_INPUT, 0); 644 683 if (err < 0) 645 684 goto err_unit; 646 685 647 - err = amdtp_out_stream_init(&fwspk->stream, unit, CIP_NONBLOCKING); 686 + err = amdtp_stream_init(&fwspk->stream, unit, AMDTP_OUT_STREAM, 687 + CIP_NONBLOCKING); 648 688 if (err < 0) 649 689 goto err_connection; 650 690 ··· 695 733 fcp_bus_reset(fwspk->unit); 696 734 697 735 if (cmp_connection_update(&fwspk->connection) < 0) { 698 - amdtp_out_stream_pcm_abort(&fwspk->stream); 736 + amdtp_stream_pcm_abort(&fwspk->stream); 699 737 mutex_lock(&fwspk->mutex); 700 738 fwspk_stop_stream(fwspk); 701 739 mutex_unlock(&fwspk->mutex); 702 740 return; 703 741 } 704 742 705 - amdtp_out_stream_update(&fwspk->stream); 743 + amdtp_stream_update(&fwspk->stream); 706 744 } 707 745 708 746 static void fwspk_remove(struct fw_unit *unit) 709 747 { 710 748 struct fwspk *fwspk = dev_get_drvdata(&unit->device); 711 749 712 - amdtp_out_stream_pcm_abort(&fwspk->stream); 750 + amdtp_stream_pcm_abort(&fwspk->stream); 713 751 snd_card_disconnect(fwspk->card); 714 752 715 753 mutex_lock(&fwspk->mutex);
+8 -6
sound/isa/sb/sb_mixer.c
··· 818 818 return err; 819 819 break; 820 820 case SB_HW_DT019X: 821 - if ((err = snd_sbmixer_init(chip, 822 - snd_dt019x_controls, 823 - ARRAY_SIZE(snd_dt019x_controls), 824 - snd_dt019x_init_values, 825 - ARRAY_SIZE(snd_dt019x_init_values), 826 - "DT019X")) < 0) 821 + err = snd_sbmixer_init(chip, 822 + snd_dt019x_controls, 823 + ARRAY_SIZE(snd_dt019x_controls), 824 + snd_dt019x_init_values, 825 + ARRAY_SIZE(snd_dt019x_init_values), 826 + "DT019X"); 827 + if (err < 0) 828 + return err; 827 829 break; 828 830 default: 829 831 strcpy(card->mixername, "???");
+6
sound/pci/hda/hda_intel.c
··· 1367 1367 /* initialize streams */ 1368 1368 azx_init_stream(chip); 1369 1369 1370 + /* workaround for Broadwell HDMI: the first stream is broken, 1371 + * so mask it by keeping it as if opened 1372 + */ 1373 + if (pci->vendor == 0x8086 && pci->device == 0x160c) 1374 + chip->azx_dev[0].opened = 1; 1375 + 1370 1376 /* initialize chip */ 1371 1377 azx_init_pci(chip); 1372 1378 azx_init_chip(chip, (probe_only[dev] & 2) == 0);
+2
sound/pci/hda/patch_hdmi.c
··· 3329 3329 { .id = 0x10de0051, .name = "GPU 51 HDMI/DP", .patch = patch_nvhdmi }, 3330 3330 { .id = 0x10de0060, .name = "GPU 60 HDMI/DP", .patch = patch_nvhdmi }, 3331 3331 { .id = 0x10de0067, .name = "MCP67 HDMI", .patch = patch_nvhdmi_2ch }, 3332 + { .id = 0x10de0071, .name = "GPU 71 HDMI/DP", .patch = patch_nvhdmi }, 3332 3333 { .id = 0x10de8001, .name = "MCP73 HDMI", .patch = patch_nvhdmi_2ch }, 3333 3334 { .id = 0x11069f80, .name = "VX900 HDMI/DP", .patch = patch_via_hdmi }, 3334 3335 { .id = 0x11069f81, .name = "VX900 HDMI/DP", .patch = patch_via_hdmi }, ··· 3386 3385 MODULE_ALIAS("snd-hda-codec-id:10de0051"); 3387 3386 MODULE_ALIAS("snd-hda-codec-id:10de0060"); 3388 3387 MODULE_ALIAS("snd-hda-codec-id:10de0067"); 3388 + MODULE_ALIAS("snd-hda-codec-id:10de0071"); 3389 3389 MODULE_ALIAS("snd-hda-codec-id:10de8001"); 3390 3390 MODULE_ALIAS("snd-hda-codec-id:11069f80"); 3391 3391 MODULE_ALIAS("snd-hda-codec-id:11069f81");
+3
sound/pci/hda/patch_realtek.c
··· 4760 4760 SND_PCI_QUIRK(0x1028, 0x0653, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE), 4761 4761 SND_PCI_QUIRK(0x1028, 0x0657, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE), 4762 4762 SND_PCI_QUIRK(0x1028, 0x0658, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE), 4763 + SND_PCI_QUIRK(0x1028, 0x065c, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE), 4763 4764 SND_PCI_QUIRK(0x1028, 0x065f, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE), 4764 4765 SND_PCI_QUIRK(0x1028, 0x0662, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE), 4765 4766 SND_PCI_QUIRK(0x1028, 0x0667, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE), ··· 4769 4768 SND_PCI_QUIRK(0x1028, 0x0674, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE), 4770 4769 SND_PCI_QUIRK(0x1028, 0x067e, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE), 4771 4770 SND_PCI_QUIRK(0x1028, 0x067f, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE), 4771 + SND_PCI_QUIRK(0x1028, 0x0680, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE), 4772 + SND_PCI_QUIRK(0x1028, 0x0684, "Dell", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE), 4772 4773 SND_PCI_QUIRK(0x1028, 0x15cc, "Dell X5 Precision", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE), 4773 4774 SND_PCI_QUIRK(0x1028, 0x15cd, "Dell X5 Precision", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE), 4774 4775 SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),