Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'sh/dmaengine'

Conflicts:
arch/sh/drivers/dma/dma-sh.c

+922 -562
+6 -6
Documentation/ABI/testing/ima_policy
··· 20 20 lsm: [[subj_user=] [subj_role=] [subj_type=] 21 21 [obj_user=] [obj_role=] [obj_type=]] 22 22 23 - base: func:= [BPRM_CHECK][FILE_MMAP][INODE_PERMISSION] 23 + base: func:= [BPRM_CHECK][FILE_MMAP][FILE_CHECK] 24 24 mask:= [MAY_READ] [MAY_WRITE] [MAY_APPEND] [MAY_EXEC] 25 25 fsmagic:= hex value 26 26 uid:= decimal value ··· 40 40 41 41 measure func=BPRM_CHECK 42 42 measure func=FILE_MMAP mask=MAY_EXEC 43 - measure func=INODE_PERM mask=MAY_READ uid=0 43 + measure func=FILE_CHECK mask=MAY_READ uid=0 44 44 45 45 The default policy measures all executables in bprm_check, 46 46 all files mmapped executable in file_mmap, and all files 47 - open for read by root in inode_permission. 47 + open for read by root in do_filp_open. 48 48 49 49 Examples of LSM specific definitions: 50 50 ··· 54 54 55 55 dont_measure obj_type=var_log_t 56 56 dont_measure obj_type=auditd_log_t 57 - measure subj_user=system_u func=INODE_PERM mask=MAY_READ 58 - measure subj_role=system_r func=INODE_PERM mask=MAY_READ 57 + measure subj_user=system_u func=FILE_CHECK mask=MAY_READ 58 + measure subj_role=system_r func=FILE_CHECK mask=MAY_READ 59 59 60 60 Smack: 61 - measure subj_user=_ func=INODE_PERM mask=MAY_READ 61 + measure subj_user=_ func=FILE_CHECK mask=MAY_READ
+1 -1
Makefile
··· 1 1 VERSION = 2 2 2 PATCHLEVEL = 6 3 3 SUBLEVEL = 33 4 - EXTRAVERSION = -rc6 4 + EXTRAVERSION = -rc7 5 5 NAME = Man-Eating Seals of Antiquity 6 6 7 7 # *DOCUMENTATION*
+4 -1
arch/sh/drivers/dma/dma-sh.c
··· 52 52 * 53 53 * iterations to complete the transfer. 54 54 */ 55 + static unsigned int ts_shift[] = TS_SHIFT; 55 56 static inline unsigned int calc_xmit_shift(struct dma_channel *chan) 56 57 { 57 58 u32 chcr = __raw_readl(dma_base_addr[chan->chan] + CHCR); 59 + int cnt = ((chcr & CHCR_TS_LOW_MASK) >> CHCR_TS_LOW_SHIFT) | 60 + ((chcr & CHCR_TS_HIGH_MASK) >> CHCR_TS_HIGH_SHIFT); 58 61 59 - return ts_shift[(chcr & CHCR_TS_MASK)>>CHCR_TS_SHIFT]; 62 + return ts_shift[cnt]; 60 63 } 61 64 62 65 /*
+41 -2
arch/sh/include/asm/dma-sh.h
··· 64 64 #define ACK_L 0x00010000 65 65 #define DM_INC 0x00004000 66 66 #define DM_DEC 0x00008000 67 + #define DM_FIX 0x0000c000 67 68 #define SM_INC 0x00001000 68 69 #define SM_DEC 0x00002000 70 + #define SM_FIX 0x00003000 69 71 #define RS_IN 0x00000200 70 72 #define RS_OUT 0x00000300 71 73 #define TS_BLK 0x00000040 ··· 85 83 * Define the default configuration for dual address memory-memory transfer. 86 84 * The 0x400 value represents auto-request, external->external. 87 85 */ 88 - #define RS_DUAL (DM_INC | SM_INC | 0x400 | TS_32) 86 + #define RS_DUAL (DM_INC | SM_INC | 0x400 | TS_INDEX2VAL(XMIT_SZ_32BIT)) 89 87 90 88 /* DMA base address */ 91 89 static u32 dma_base_addr[] __maybe_unused = { ··· 125 123 */ 126 124 #define SHDMA_MIX_IRQ (1 << 1) 127 125 #define SHDMA_DMAOR1 (1 << 2) 128 - #define SHDMA_DMAE1 (1 << 3) 126 + #define SHDMA_DMAE1 (1 << 3) 127 + 128 + enum sh_dmae_slave_chan_id { 129 + SHDMA_SLAVE_SCIF0_TX, 130 + SHDMA_SLAVE_SCIF0_RX, 131 + SHDMA_SLAVE_SCIF1_TX, 132 + SHDMA_SLAVE_SCIF1_RX, 133 + SHDMA_SLAVE_SCIF2_TX, 134 + SHDMA_SLAVE_SCIF2_RX, 135 + SHDMA_SLAVE_SCIF3_TX, 136 + SHDMA_SLAVE_SCIF3_RX, 137 + SHDMA_SLAVE_SCIF4_TX, 138 + SHDMA_SLAVE_SCIF4_RX, 139 + SHDMA_SLAVE_SCIF5_TX, 140 + SHDMA_SLAVE_SCIF5_RX, 141 + SHDMA_SLAVE_SIUA_TX, 142 + SHDMA_SLAVE_SIUA_RX, 143 + SHDMA_SLAVE_SIUB_TX, 144 + SHDMA_SLAVE_SIUB_RX, 145 + SHDMA_SLAVE_NUMBER, /* Must stay last */ 146 + }; 147 + 148 + struct sh_dmae_slave_config { 149 + enum sh_dmae_slave_chan_id slave_id; 150 + dma_addr_t addr; 151 + u32 chcr; 152 + char mid_rid; 153 + }; 129 154 130 155 struct sh_dmae_pdata { 131 156 unsigned int mode; 157 + struct sh_dmae_slave_config *config; 158 + int config_num; 159 + }; 160 + 161 + struct device; 162 + 163 + struct sh_dmae_slave { 164 + enum sh_dmae_slave_chan_id slave_id; /* Set by the platform */ 165 + struct device *dma_dev; /* Set by the platform */ 166 + struct sh_dmae_slave_config *config; /* Set by the driver */ 132 167 }; 133 168 134 169 #endif /* __DMA_SH_H */
+12 -8
arch/sh/include/cpu-sh3/cpu/dma.h
··· 20 20 #define TS_32 0x00000010 21 21 #define TS_128 0x00000018 22 22 23 - #define CHCR_TS_MASK 0x18 24 - #define CHCR_TS_SHIFT 3 23 + #define CHCR_TS_LOW_MASK 0x18 24 + #define CHCR_TS_LOW_SHIFT 3 25 + #define CHCR_TS_HIGH_MASK 0 26 + #define CHCR_TS_HIGH_SHIFT 0 25 27 26 28 #define DMAOR_INIT DMAOR_DME 27 29 ··· 38 36 XMIT_SZ_128BIT, 39 37 }; 40 38 41 - static unsigned int ts_shift[] __maybe_unused = { 42 - [XMIT_SZ_8BIT] = 0, 43 - [XMIT_SZ_16BIT] = 1, 44 - [XMIT_SZ_32BIT] = 2, 45 - [XMIT_SZ_128BIT] = 4, 46 - }; 39 + #define TS_SHIFT { \ 40 + [XMIT_SZ_8BIT] = 0, \ 41 + [XMIT_SZ_16BIT] = 1, \ 42 + [XMIT_SZ_32BIT] = 2, \ 43 + [XMIT_SZ_128BIT] = 4, \ 44 + } 45 + 46 + #define TS_INDEX2VAL(i) (((i) & 3) << CHCR_TS_LOW_SHIFT) 47 47 48 48 #endif /* __ASM_CPU_SH3_DMA_H */
+79 -29
arch/sh/include/cpu-sh4/cpu/dma-sh4a.h
··· 2 2 #define __ASM_SH_CPU_SH4_DMA_SH7780_H 3 3 4 4 #if defined(CONFIG_CPU_SUBTYPE_SH7343) || \ 5 - defined(CONFIG_CPU_SUBTYPE_SH7722) || \ 6 5 defined(CONFIG_CPU_SUBTYPE_SH7730) 7 6 #define DMTE0_IRQ 48 8 7 #define DMTE4_IRQ 76 9 8 #define DMAE0_IRQ 78 /* DMA Error IRQ*/ 10 9 #define SH_DMAC_BASE0 0xFE008020 11 - #define SH_DMARS_BASE 0xFE009000 10 + #define SH_DMARS_BASE0 0xFE009000 11 + #define CHCR_TS_LOW_MASK 0x00000018 12 + #define CHCR_TS_LOW_SHIFT 3 13 + #define CHCR_TS_HIGH_MASK 0 14 + #define CHCR_TS_HIGH_SHIFT 0 15 + #elif defined(CONFIG_CPU_SUBTYPE_SH7722) 16 + #define DMTE0_IRQ 48 17 + #define DMTE4_IRQ 76 18 + #define DMAE0_IRQ 78 /* DMA Error IRQ*/ 19 + #define SH_DMAC_BASE0 0xFE008020 20 + #define SH_DMARS_BASE0 0xFE009000 21 + #define CHCR_TS_LOW_MASK 0x00000018 22 + #define CHCR_TS_LOW_SHIFT 3 23 + #define CHCR_TS_HIGH_MASK 0x00300000 24 + #define CHCR_TS_HIGH_SHIFT 20 12 25 #elif defined(CONFIG_CPU_SUBTYPE_SH7763) || \ 13 26 defined(CONFIG_CPU_SUBTYPE_SH7764) 14 27 #define DMTE0_IRQ 34 15 28 #define DMTE4_IRQ 44 16 29 #define DMAE0_IRQ 38 17 30 #define SH_DMAC_BASE0 0xFF608020 18 - #define SH_DMARS_BASE 0xFF609000 19 - #elif defined(CONFIG_CPU_SUBTYPE_SH7723) || \ 20 - defined(CONFIG_CPU_SUBTYPE_SH7724) 31 + #define SH_DMARS_BASE0 0xFF609000 32 + #define CHCR_TS_LOW_MASK 0x00000018 33 + #define CHCR_TS_LOW_SHIFT 3 34 + #define CHCR_TS_HIGH_MASK 0 35 + #define CHCR_TS_HIGH_SHIFT 0 36 + #elif defined(CONFIG_CPU_SUBTYPE_SH7723) 21 37 #define DMTE0_IRQ 48 /* DMAC0A*/ 22 38 #define DMTE4_IRQ 76 /* DMAC0B */ 23 39 #define DMTE6_IRQ 40 ··· 45 29 #define DMAE1_IRQ 74 /* DMA Error IRQ*/ 46 30 #define SH_DMAC_BASE0 0xFE008020 47 31 #define SH_DMAC_BASE1 0xFDC08020 48 - #define SH_DMARS_BASE 0xFDC09000 32 + #define SH_DMARS_BASE0 0xFDC09000 33 + #define CHCR_TS_LOW_MASK 0x00000018 34 + #define CHCR_TS_LOW_SHIFT 3 35 + #define CHCR_TS_HIGH_MASK 0 36 + #define CHCR_TS_HIGH_SHIFT 0 37 + #elif defined(CONFIG_CPU_SUBTYPE_SH7724) 38 + #define DMTE0_IRQ 48 /* DMAC0A*/ 39 + #define DMTE4_IRQ 76 /* DMAC0B */ 40 + #define DMTE6_IRQ 40 41 + #define DMTE8_IRQ 42 /* DMAC1A */ 42 + #define DMTE9_IRQ 43 43 + #define DMTE10_IRQ 72 /* DMAC1B */ 44 + #define DMTE11_IRQ 73 45 + #define DMAE0_IRQ 78 /* DMA Error IRQ*/ 46 + #define DMAE1_IRQ 74 /* DMA Error IRQ*/ 47 + #define SH_DMAC_BASE0 0xFE008020 48 + #define SH_DMAC_BASE1 0xFDC08020 49 + #define SH_DMARS_BASE0 0xFE009000 50 + #define SH_DMARS_BASE1 0xFDC09000 51 + #define CHCR_TS_LOW_MASK 0x00000018 52 + #define CHCR_TS_LOW_SHIFT 3 53 + #define CHCR_TS_HIGH_MASK 0x00600000 54 + #define CHCR_TS_HIGH_SHIFT 21 49 55 #elif defined(CONFIG_CPU_SUBTYPE_SH7780) 50 56 #define DMTE0_IRQ 34 51 57 #define DMTE4_IRQ 44 ··· 79 41 #define DMAE0_IRQ 38 /* DMA Error IRQ */ 80 42 #define SH_DMAC_BASE0 0xFC808020 81 43 #define SH_DMAC_BASE1 0xFC818020 82 - #define SH_DMARS_BASE 0xFC809000 44 + #define SH_DMARS_BASE0 0xFC809000 45 + #define CHCR_TS_LOW_MASK 0x00000018 46 + #define CHCR_TS_LOW_SHIFT 3 47 + #define CHCR_TS_HIGH_MASK 0 48 + #define CHCR_TS_HIGH_SHIFT 0 83 49 #else /* SH7785 */ 84 50 #define DMTE0_IRQ 33 85 51 #define DMTE4_IRQ 37 ··· 96 54 #define DMAE1_IRQ 58 /* DMA Error IRQ1 */ 97 55 #define SH_DMAC_BASE0 0xFC808020 98 56 #define SH_DMAC_BASE1 0xFCC08020 99 - #define SH_DMARS_BASE 0xFC809000 57 + #define SH_DMARS_BASE0 0xFC809000 58 + #define CHCR_TS_LOW_MASK 0x00000018 59 + #define CHCR_TS_LOW_SHIFT 3 60 + #define CHCR_TS_HIGH_MASK 0 61 + #define CHCR_TS_HIGH_SHIFT 0 100 62 #endif 101 63 102 - #define REQ_HE 0x000000C0 103 - #define REQ_H 0x00000080 104 - #define REQ_LE 0x00000040 105 - #define TM_BURST 0x0000020 106 - #define TS_8 0x00000000 107 - #define TS_16 0x00000008 108 - #define TS_32 0x00000010 109 - #define TS_16BLK 0x00000018 110 - #define TS_32BLK 0x00100000 64 + #define REQ_HE 0x000000C0 65 + #define REQ_H 0x00000080 66 + #define REQ_LE 0x00000040 67 + #define TM_BURST 0x00000020 111 68 112 69 /* 113 70 * The SuperH DMAC supports a number of transmit sizes, we list them here, ··· 115 74 * Defaults to a 64-bit transfer size. 116 75 */ 117 76 enum { 118 - XMIT_SZ_8BIT, 119 - XMIT_SZ_16BIT, 120 - XMIT_SZ_32BIT, 121 - XMIT_SZ_128BIT, 122 - XMIT_SZ_256BIT, 77 + XMIT_SZ_8BIT = 0, 78 + XMIT_SZ_16BIT = 1, 79 + XMIT_SZ_32BIT = 2, 80 + XMIT_SZ_64BIT = 7, 81 + XMIT_SZ_128BIT = 3, 82 + XMIT_SZ_256BIT = 4, 83 + XMIT_SZ_128BIT_BLK = 0xb, 84 + XMIT_SZ_256BIT_BLK = 0xc, 123 85 }; 124 86 125 87 /* 126 88 * The DMA count is defined as the number of bytes to transfer. 127 89 */ 128 - static unsigned int ts_shift[] __maybe_unused = { 129 - [XMIT_SZ_8BIT] = 0, 130 - [XMIT_SZ_16BIT] = 1, 131 - [XMIT_SZ_32BIT] = 2, 132 - [XMIT_SZ_128BIT] = 4, 133 - [XMIT_SZ_256BIT] = 5, 134 - }; 90 + #define TS_SHIFT { \ 91 + [XMIT_SZ_8BIT] = 0, \ 92 + [XMIT_SZ_16BIT] = 1, \ 93 + [XMIT_SZ_32BIT] = 2, \ 94 + [XMIT_SZ_64BIT] = 3, \ 95 + [XMIT_SZ_128BIT] = 4, \ 96 + [XMIT_SZ_256BIT] = 5, \ 97 + [XMIT_SZ_128BIT_BLK] = 4, \ 98 + [XMIT_SZ_256BIT_BLK] = 5, \ 99 + } 100 + 101 + #define TS_INDEX2VAL(i) ((((i) & 3) << CHCR_TS_LOW_SHIFT) | \ 102 + ((((i) >> 2) & 3) << CHCR_TS_HIGH_SHIFT)) 135 103 136 104 #endif /* __ASM_SH_CPU_SH4_DMA_SH7780_H */
+19 -16
arch/sh/include/cpu-sh4/cpu/dma.h
··· 6 6 #ifdef CONFIG_CPU_SH4A 7 7 8 8 #define DMAOR_INIT (DMAOR_DME) 9 - #define CHCR_TS_MASK 0x18 10 - #define CHCR_TS_SHIFT 3 11 9 12 10 #include <cpu/dma-sh4a.h> 13 11 #else /* CONFIG_CPU_SH4A */ ··· 27 29 #define TS_32 0x00000030 28 30 #define TS_64 0x00000000 29 31 30 - #define CHCR_TS_MASK 0x70 31 - #define CHCR_TS_SHIFT 4 32 + #define CHCR_TS_LOW_MASK 0x70 33 + #define CHCR_TS_LOW_SHIFT 4 34 + #define CHCR_TS_HIGH_MASK 0 35 + #define CHCR_TS_HIGH_SHIFT 0 32 36 33 37 #define DMAOR_COD 0x00000008 34 38 ··· 41 41 * Defaults to a 64-bit transfer size. 42 42 */ 43 43 enum { 44 - XMIT_SZ_64BIT, 45 - XMIT_SZ_8BIT, 46 - XMIT_SZ_16BIT, 47 - XMIT_SZ_32BIT, 48 - XMIT_SZ_256BIT, 44 + XMIT_SZ_8BIT = 1, 45 + XMIT_SZ_16BIT = 2, 46 + XMIT_SZ_32BIT = 3, 47 + XMIT_SZ_64BIT = 0, 48 + XMIT_SZ_256BIT = 4, 49 49 }; 50 50 51 51 /* 52 52 * The DMA count is defined as the number of bytes to transfer. 53 53 */ 54 - static unsigned int ts_shift[] __maybe_unused = { 55 - [XMIT_SZ_64BIT] = 3, 56 - [XMIT_SZ_8BIT] = 0, 57 - [XMIT_SZ_16BIT] = 1, 58 - [XMIT_SZ_32BIT] = 2, 59 - [XMIT_SZ_256BIT] = 5, 60 - }; 54 + #define TS_SHIFT { \ 55 + [XMIT_SZ_8BIT] = 0, \ 56 + [XMIT_SZ_16BIT] = 1, \ 57 + [XMIT_SZ_32BIT] = 2, \ 58 + [XMIT_SZ_64BIT] = 3, \ 59 + [XMIT_SZ_256BIT] = 5, \ 60 + } 61 + 62 + #define TS_INDEX2VAL(i) (((i) & 7) << CHCR_TS_LOW_SHIFT) 63 + 61 64 #endif 62 65 63 66 #endif /* __ASM_CPU_SH4_DMA_H */
+15
drivers/ata/ahci.c
··· 2868 2868 }, 2869 2869 .driver_data = "F.23", /* cutoff BIOS version */ 2870 2870 }, 2871 + /* 2872 + * Acer eMachines G725 has the same problem. BIOS 2873 + * V1.03 is known to be broken. V3.04 is known to 2874 + * work. Inbetween, there are V1.06, V2.06 and V3.03 2875 + * that we don't have much idea about. For now, 2876 + * blacklist anything older than V3.04. 2877 + */ 2878 + { 2879 + .ident = "G725", 2880 + .matches = { 2881 + DMI_MATCH(DMI_SYS_VENDOR, "eMachines"), 2882 + DMI_MATCH(DMI_PRODUCT_NAME, "eMachines G725"), 2883 + }, 2884 + .driver_data = "V3.04", /* cutoff BIOS version */ 2885 + }, 2871 2886 { } /* terminate list */ 2872 2887 }; 2873 2888 const struct dmi_system_id *dmi = dmi_first_match(sysids);
+1 -1
drivers/ata/libata-scsi.c
··· 2875 2875 * write indication (used for PIO/DMA setup), result TF is 2876 2876 * copied back and we don't whine too much about its failure. 2877 2877 */ 2878 - tf->flags = ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; 2878 + tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; 2879 2879 if (scmd->sc_data_direction == DMA_TO_DEVICE) 2880 2880 tf->flags |= ATA_TFLAG_WRITE; 2881 2881
+3
drivers/ata/libata-sff.c
··· 893 893 do_write); 894 894 } 895 895 896 + if (!do_write) 897 + flush_dcache_page(page); 898 + 896 899 qc->curbytes += qc->sect_size; 897 900 qc->cursg_ofs += qc->sect_size; 898 901
+3 -1
drivers/char/tty_io.c
··· 1951 1951 pid = task_pid(current); 1952 1952 type = PIDTYPE_PID; 1953 1953 } 1954 - retval = __f_setown(filp, pid, type, 0); 1954 + get_pid(pid); 1955 1955 spin_unlock_irqrestore(&tty->ctrl_lock, flags); 1956 + retval = __f_setown(filp, pid, type, 0); 1957 + put_pid(pid); 1956 1958 if (retval) 1957 1959 goto out; 1958 1960 } else {
+294 -111
drivers/dma/shdma.c
··· 48 48 */ 49 49 #define RS_DEFAULT (RS_DUAL) 50 50 51 + /* A bitmask with bits enough for enum sh_dmae_slave_chan_id */ 52 + static unsigned long sh_dmae_slave_used[BITS_TO_LONGS(SHDMA_SLAVE_NUMBER)]; 53 + 51 54 static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan, bool all); 52 55 53 56 #define SH_DMAC_CHAN_BASE(id) (dma_base_addr[id]) 54 57 static void sh_dmae_writel(struct sh_dmae_chan *sh_dc, u32 data, u32 reg) 55 58 { 56 - ctrl_outl(data, (SH_DMAC_CHAN_BASE(sh_dc->id) + reg)); 59 + ctrl_outl(data, SH_DMAC_CHAN_BASE(sh_dc->id) + reg); 57 60 } 58 61 59 62 static u32 sh_dmae_readl(struct sh_dmae_chan *sh_dc, u32 reg) 60 63 { 61 - return ctrl_inl((SH_DMAC_CHAN_BASE(sh_dc->id) + reg)); 62 - } 63 - 64 - static void dmae_init(struct sh_dmae_chan *sh_chan) 65 - { 66 - u32 chcr = RS_DEFAULT; /* default is DUAL mode */ 67 - sh_dmae_writel(sh_chan, chcr, CHCR); 64 + return ctrl_inl(SH_DMAC_CHAN_BASE(sh_dc->id) + reg); 68 65 } 69 66 70 67 /* ··· 92 95 return 0; 93 96 } 94 97 95 - static int dmae_is_busy(struct sh_dmae_chan *sh_chan) 98 + static bool dmae_is_busy(struct sh_dmae_chan *sh_chan) 96 99 { 97 100 u32 chcr = sh_dmae_readl(sh_chan, CHCR); 98 - if (chcr & CHCR_DE) { 99 - if (!(chcr & CHCR_TE)) 100 - return -EBUSY; /* working */ 101 - } 102 - return 0; /* waiting */ 101 + 102 + if ((chcr & (CHCR_DE | CHCR_TE)) == CHCR_DE) 103 + return true; /* working */ 104 + 105 + return false; /* waiting */ 103 106 } 104 107 105 - static inline unsigned int calc_xmit_shift(struct sh_dmae_chan *sh_chan) 108 + static unsigned int ts_shift[] = TS_SHIFT; 109 + static inline unsigned int calc_xmit_shift(u32 chcr) 106 110 { 107 - u32 chcr = sh_dmae_readl(sh_chan, CHCR); 108 - return ts_shift[(chcr & CHCR_TS_MASK) >> CHCR_TS_SHIFT]; 111 + int cnt = ((chcr & CHCR_TS_LOW_MASK) >> CHCR_TS_LOW_SHIFT) | 112 + ((chcr & CHCR_TS_HIGH_MASK) >> CHCR_TS_HIGH_SHIFT); 113 + 114 + return ts_shift[cnt]; 109 115 } 110 116 111 117 static void dmae_set_reg(struct sh_dmae_chan *sh_chan, struct sh_dmae_regs *hw) 112 118 { 113 119 sh_dmae_writel(sh_chan, hw->sar, SAR); 114 120 sh_dmae_writel(sh_chan, hw->dar, DAR); 115 - sh_dmae_writel(sh_chan, hw->tcr >> calc_xmit_shift(sh_chan), TCR); 121 + sh_dmae_writel(sh_chan, hw->tcr >> sh_chan->xmit_shift, TCR); 116 122 } 117 123 118 124 static void dmae_start(struct sh_dmae_chan *sh_chan) ··· 123 123 u32 chcr = sh_dmae_readl(sh_chan, CHCR); 124 124 125 125 chcr |= CHCR_DE | CHCR_IE; 126 - sh_dmae_writel(sh_chan, chcr, CHCR); 126 + sh_dmae_writel(sh_chan, chcr & ~CHCR_TE, CHCR); 127 127 } 128 128 129 129 static void dmae_halt(struct sh_dmae_chan *sh_chan) ··· 134 134 sh_dmae_writel(sh_chan, chcr, CHCR); 135 135 } 136 136 137 + static void dmae_init(struct sh_dmae_chan *sh_chan) 138 + { 139 + u32 chcr = RS_DEFAULT; /* default is DUAL mode */ 140 + sh_chan->xmit_shift = calc_xmit_shift(chcr); 141 + sh_dmae_writel(sh_chan, chcr, CHCR); 142 + } 143 + 137 144 static int dmae_set_chcr(struct sh_dmae_chan *sh_chan, u32 val) 138 145 { 139 - int ret = dmae_is_busy(sh_chan); 140 146 /* When DMA was working, can not set data to CHCR */ 141 - if (ret) 142 - return ret; 147 + if (dmae_is_busy(sh_chan)) 148 + return -EBUSY; 143 149 150 + sh_chan->xmit_shift = calc_xmit_shift(val); 144 151 sh_dmae_writel(sh_chan, val, CHCR); 152 + 145 153 return 0; 146 154 } 147 155 148 - #define DMARS1_ADDR 0x04 149 - #define DMARS2_ADDR 0x08 150 - #define DMARS_SHIFT 8 151 - #define DMARS_CHAN_MSK 0x01 156 + #define DMARS_SHIFT 8 157 + #define DMARS_CHAN_MSK 0x01 152 158 static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val) 153 159 { 154 160 u32 addr; 155 161 int shift = 0; 156 - int ret = dmae_is_busy(sh_chan); 157 - if (ret) 158 - return ret; 162 + 163 + if (dmae_is_busy(sh_chan)) 164 + return -EBUSY; 159 165 160 166 if (sh_chan->id & DMARS_CHAN_MSK) 161 167 shift = DMARS_SHIFT; 162 168 163 - switch (sh_chan->id) { 164 - /* DMARS0 */ 165 - case 0: 166 - case 1: 167 - addr = SH_DMARS_BASE; 168 - break; 169 - /* DMARS1 */ 170 - case 2: 171 - case 3: 172 - addr = (SH_DMARS_BASE + DMARS1_ADDR); 173 - break; 174 - /* DMARS2 */ 175 - case 4: 176 - case 5: 177 - addr = (SH_DMARS_BASE + DMARS2_ADDR); 178 - break; 179 - default: 169 + if (sh_chan->id < 6) 170 + /* DMA0RS0 - DMA0RS2 */ 171 + addr = SH_DMARS_BASE0 + (sh_chan->id / 2) * 4; 172 + #ifdef SH_DMARS_BASE1 173 + else if (sh_chan->id < 12) 174 + /* DMA1RS0 - DMA1RS2 */ 175 + addr = SH_DMARS_BASE1 + ((sh_chan->id - 6) / 2) * 4; 176 + #endif 177 + else 180 178 return -EINVAL; 181 - } 182 179 183 - ctrl_outw((val << shift) | 184 - (ctrl_inw(addr) & (shift ? 0xFF00 : 0x00FF)), 185 - addr); 180 + ctrl_outw((val << shift) | (ctrl_inw(addr) & (0xFF00 >> shift)), addr); 186 181 187 182 return 0; 188 183 } ··· 245 250 return NULL; 246 251 } 247 252 253 + static struct sh_dmae_slave_config *sh_dmae_find_slave( 254 + struct sh_dmae_chan *sh_chan, enum sh_dmae_slave_chan_id slave_id) 255 + { 256 + struct dma_device *dma_dev = sh_chan->common.device; 257 + struct sh_dmae_device *shdev = container_of(dma_dev, 258 + struct sh_dmae_device, common); 259 + struct sh_dmae_pdata *pdata = &shdev->pdata; 260 + int i; 261 + 262 + if ((unsigned)slave_id >= SHDMA_SLAVE_NUMBER) 263 + return NULL; 264 + 265 + for (i = 0; i < pdata->config_num; i++) 266 + if (pdata->config[i].slave_id == slave_id) 267 + return pdata->config + i; 268 + 269 + return NULL; 270 + } 271 + 248 272 static int sh_dmae_alloc_chan_resources(struct dma_chan *chan) 249 273 { 250 274 struct sh_dmae_chan *sh_chan = to_sh_chan(chan); 251 275 struct sh_desc *desc; 276 + struct sh_dmae_slave *param = chan->private; 277 + 278 + /* 279 + * This relies on the guarantee from dmaengine that alloc_chan_resources 280 + * never runs concurrently with itself or free_chan_resources. 281 + */ 282 + if (param) { 283 + struct sh_dmae_slave_config *cfg; 284 + 285 + cfg = sh_dmae_find_slave(sh_chan, param->slave_id); 286 + if (!cfg) 287 + return -EINVAL; 288 + 289 + if (test_and_set_bit(param->slave_id, sh_dmae_slave_used)) 290 + return -EBUSY; 291 + 292 + param->config = cfg; 293 + 294 + dmae_set_dmars(sh_chan, cfg->mid_rid); 295 + dmae_set_chcr(sh_chan, cfg->chcr); 296 + } else { 297 + if ((sh_dmae_readl(sh_chan, CHCR) & 0x700) != 0x400) 298 + dmae_set_chcr(sh_chan, RS_DEFAULT); 299 + } 252 300 253 301 spin_lock_bh(&sh_chan->desc_lock); 254 302 while (sh_chan->descs_allocated < NR_DESCS_PER_CHANNEL) { ··· 324 286 struct sh_desc *desc, *_desc; 325 287 LIST_HEAD(list); 326 288 289 + dmae_halt(sh_chan); 290 + 327 291 /* Prepared and not submitted descriptors can still be on the queue */ 328 292 if (!list_empty(&sh_chan->ld_queue)) 329 293 sh_dmae_chan_ld_cleanup(sh_chan, true); 294 + 295 + if (chan->private) { 296 + /* The caller is holding dma_list_mutex */ 297 + struct sh_dmae_slave *param = chan->private; 298 + clear_bit(param->slave_id, sh_dmae_slave_used); 299 + } 330 300 331 301 spin_lock_bh(&sh_chan->desc_lock); 332 302 ··· 347 301 kfree(desc); 348 302 } 349 303 350 - static struct dma_async_tx_descriptor *sh_dmae_prep_memcpy( 351 - struct dma_chan *chan, dma_addr_t dma_dest, dma_addr_t dma_src, 352 - size_t len, unsigned long flags) 304 + /** 305 + * sh_dmae_add_desc - get, set up and return one transfer descriptor 306 + * @sh_chan: DMA channel 307 + * @flags: DMA transfer flags 308 + * @dest: destination DMA address, incremented when direction equals 309 + * DMA_FROM_DEVICE or DMA_BIDIRECTIONAL 310 + * @src: source DMA address, incremented when direction equals 311 + * DMA_TO_DEVICE or DMA_BIDIRECTIONAL 312 + * @len: DMA transfer length 313 + * @first: if NULL, set to the current descriptor and cookie set to -EBUSY 314 + * @direction: needed for slave DMA to decide which address to keep constant, 315 + * equals DMA_BIDIRECTIONAL for MEMCPY 316 + * Returns 0 or an error 317 + * Locks: called with desc_lock held 318 + */ 319 + static struct sh_desc *sh_dmae_add_desc(struct sh_dmae_chan *sh_chan, 320 + unsigned long flags, dma_addr_t *dest, dma_addr_t *src, size_t *len, 321 + struct sh_desc **first, enum dma_data_direction direction) 353 322 { 354 - struct sh_dmae_chan *sh_chan; 355 - struct sh_desc *first = NULL, *prev = NULL, *new; 323 + struct sh_desc *new; 356 324 size_t copy_size; 325 + 326 + if (!*len) 327 + return NULL; 328 + 329 + /* Allocate the link descriptor from the free list */ 330 + new = sh_dmae_get_desc(sh_chan); 331 + if (!new) { 332 + dev_err(sh_chan->dev, "No free link descriptor available\n"); 333 + return NULL; 334 + } 335 + 336 + copy_size = min(*len, (size_t)SH_DMA_TCR_MAX + 1); 337 + 338 + new->hw.sar = *src; 339 + new->hw.dar = *dest; 340 + new->hw.tcr = copy_size; 341 + 342 + if (!*first) { 343 + /* First desc */ 344 + new->async_tx.cookie = -EBUSY; 345 + *first = new; 346 + } else { 347 + /* Other desc - invisible to the user */ 348 + new->async_tx.cookie = -EINVAL; 349 + } 350 + 351 + dev_dbg(sh_chan->dev, 352 + "chaining (%u/%u)@%x -> %x with %p, cookie %d, shift %d\n", 353 + copy_size, *len, *src, *dest, &new->async_tx, 354 + new->async_tx.cookie, sh_chan->xmit_shift); 355 + 356 + new->mark = DESC_PREPARED; 357 + new->async_tx.flags = flags; 358 + new->direction = direction; 359 + 360 + *len -= copy_size; 361 + if (direction == DMA_BIDIRECTIONAL || direction == DMA_TO_DEVICE) 362 + *src += copy_size; 363 + if (direction == DMA_BIDIRECTIONAL || direction == DMA_FROM_DEVICE) 364 + *dest += copy_size; 365 + 366 + return new; 367 + } 368 + 369 + /* 370 + * sh_dmae_prep_sg - prepare transfer descriptors from an SG list 371 + * 372 + * Common routine for public (MEMCPY) and slave DMA. The MEMCPY case is also 373 + * converted to scatter-gather to guarantee consistent locking and a correct 374 + * list manipulation. For slave DMA direction carries the usual meaning, and, 375 + * logically, the SG list is RAM and the addr variable contains slave address, 376 + * e.g., the FIFO I/O register. For MEMCPY direction equals DMA_BIDIRECTIONAL 377 + * and the SG list contains only one element and points at the source buffer. 378 + */ 379 + static struct dma_async_tx_descriptor *sh_dmae_prep_sg(struct sh_dmae_chan *sh_chan, 380 + struct scatterlist *sgl, unsigned int sg_len, dma_addr_t *addr, 381 + enum dma_data_direction direction, unsigned long flags) 382 + { 383 + struct scatterlist *sg; 384 + struct sh_desc *first = NULL, *new = NULL /* compiler... */; 357 385 LIST_HEAD(tx_list); 358 - int chunks = (len + SH_DMA_TCR_MAX) / (SH_DMA_TCR_MAX + 1); 386 + int chunks = 0; 387 + int i; 359 388 360 - if (!chan) 389 + if (!sg_len) 361 390 return NULL; 362 391 363 - if (!len) 364 - return NULL; 365 - 366 - sh_chan = to_sh_chan(chan); 392 + for_each_sg(sgl, sg, sg_len, i) 393 + chunks += (sg_dma_len(sg) + SH_DMA_TCR_MAX) / 394 + (SH_DMA_TCR_MAX + 1); 367 395 368 396 /* Have to lock the whole loop to protect against concurrent release */ 369 397 spin_lock_bh(&sh_chan->desc_lock); ··· 453 333 * only during this function, then they are immediately spliced 454 334 * back onto the free list in form of a chain 455 335 */ 456 - do { 457 - /* Allocate the link descriptor from the free list */ 458 - new = sh_dmae_get_desc(sh_chan); 459 - if (!new) { 460 - dev_err(sh_chan->dev, 461 - "No free memory for link descriptor\n"); 462 - list_for_each_entry(new, &tx_list, node) 463 - new->mark = DESC_IDLE; 464 - list_splice(&tx_list, &sh_chan->ld_free); 465 - spin_unlock_bh(&sh_chan->desc_lock); 466 - return NULL; 467 - } 336 + for_each_sg(sgl, sg, sg_len, i) { 337 + dma_addr_t sg_addr = sg_dma_address(sg); 338 + size_t len = sg_dma_len(sg); 468 339 469 - copy_size = min(len, (size_t)SH_DMA_TCR_MAX + 1); 340 + if (!len) 341 + goto err_get_desc; 470 342 471 - new->hw.sar = dma_src; 472 - new->hw.dar = dma_dest; 473 - new->hw.tcr = copy_size; 474 - if (!first) { 475 - /* First desc */ 476 - new->async_tx.cookie = -EBUSY; 477 - first = new; 478 - } else { 479 - /* Other desc - invisible to the user */ 480 - new->async_tx.cookie = -EINVAL; 481 - } 343 + do { 344 + dev_dbg(sh_chan->dev, "Add SG #%d@%p[%d], dma %llx\n", 345 + i, sg, len, (unsigned long long)sg_addr); 482 346 483 - dev_dbg(sh_chan->dev, 484 - "chaining %u of %u with %p, dst %x, cookie %d\n", 485 - copy_size, len, &new->async_tx, dma_dest, 486 - new->async_tx.cookie); 347 + if (direction == DMA_FROM_DEVICE) 348 + new = sh_dmae_add_desc(sh_chan, flags, 349 + &sg_addr, addr, &len, &first, 350 + direction); 351 + else 352 + new = sh_dmae_add_desc(sh_chan, flags, 353 + addr, &sg_addr, &len, &first, 354 + direction); 355 + if (!new) 356 + goto err_get_desc; 487 357 488 - new->mark = DESC_PREPARED; 489 - new->async_tx.flags = flags; 490 - new->chunks = chunks--; 491 - 492 - prev = new; 493 - len -= copy_size; 494 - dma_src += copy_size; 495 - dma_dest += copy_size; 496 - /* Insert the link descriptor to the LD ring */ 497 - list_add_tail(&new->node, &tx_list); 498 - } while (len); 358 + new->chunks = chunks--; 359 + list_add_tail(&new->node, &tx_list); 360 + } while (len); 361 + } 499 362 500 363 if (new != first) 501 364 new->async_tx.cookie = -ENOSPC; ··· 489 386 spin_unlock_bh(&sh_chan->desc_lock); 490 387 491 388 return &first->async_tx; 389 + 390 + err_get_desc: 391 + list_for_each_entry(new, &tx_list, node) 392 + new->mark = DESC_IDLE; 393 + list_splice(&tx_list, &sh_chan->ld_free); 394 + 395 + spin_unlock_bh(&sh_chan->desc_lock); 396 + 397 + return NULL; 398 + } 399 + 400 + static struct dma_async_tx_descriptor *sh_dmae_prep_memcpy( 401 + struct dma_chan *chan, dma_addr_t dma_dest, dma_addr_t dma_src, 402 + size_t len, unsigned long flags) 403 + { 404 + struct sh_dmae_chan *sh_chan; 405 + struct scatterlist sg; 406 + 407 + if (!chan || !len) 408 + return NULL; 409 + 410 + chan->private = NULL; 411 + 412 + sh_chan = to_sh_chan(chan); 413 + 414 + sg_init_table(&sg, 1); 415 + sg_set_page(&sg, pfn_to_page(PFN_DOWN(dma_src)), len, 416 + offset_in_page(dma_src)); 417 + sg_dma_address(&sg) = dma_src; 418 + sg_dma_len(&sg) = len; 419 + 420 + return sh_dmae_prep_sg(sh_chan, &sg, 1, &dma_dest, DMA_BIDIRECTIONAL, 421 + flags); 422 + } 423 + 424 + static struct dma_async_tx_descriptor *sh_dmae_prep_slave_sg( 425 + struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len, 426 + enum dma_data_direction direction, unsigned long flags) 427 + { 428 + struct sh_dmae_slave *param; 429 + struct sh_dmae_chan *sh_chan; 430 + 431 + if (!chan) 432 + return NULL; 433 + 434 + sh_chan = to_sh_chan(chan); 435 + param = chan->private; 436 + 437 + /* Someone calling slave DMA on a public channel? */ 438 + if (!param || !sg_len) { 439 + dev_warn(sh_chan->dev, "%s: bad parameter: %p, %d, %d\n", 440 + __func__, param, sg_len, param ? param->slave_id : -1); 441 + return NULL; 442 + } 443 + 444 + /* 445 + * if (param != NULL), this is a successfully requested slave channel, 446 + * therefore param->config != NULL too. 447 + */ 448 + return sh_dmae_prep_sg(sh_chan, sgl, sg_len, &param->config->addr, 449 + direction, flags); 450 + } 451 + 452 + static void sh_dmae_terminate_all(struct dma_chan *chan) 453 + { 454 + struct sh_dmae_chan *sh_chan = to_sh_chan(chan); 455 + 456 + if (!chan) 457 + return; 458 + 459 + sh_dmae_chan_ld_cleanup(sh_chan, true); 492 460 } 493 461 494 462 static dma_async_tx_callback __ld_cleanup(struct sh_dmae_chan *sh_chan, bool all) ··· 593 419 cookie = tx->cookie; 594 420 595 421 if (desc->mark == DESC_COMPLETED && desc->chunks == 1) { 596 - BUG_ON(sh_chan->completed_cookie != desc->cookie - 1); 422 + if (sh_chan->completed_cookie != desc->cookie - 1) 423 + dev_dbg(sh_chan->dev, 424 + "Completing cookie %d, expected %d\n", 425 + desc->cookie, 426 + sh_chan->completed_cookie + 1); 597 427 sh_chan->completed_cookie = desc->cookie; 598 428 } 599 429 ··· 670 492 return; 671 493 } 672 494 673 - /* Find the first un-transfer desciptor */ 495 + /* Find the first not transferred desciptor */ 674 496 list_for_each_entry(sd, &sh_chan->ld_queue, node) 675 497 if (sd->mark == DESC_SUBMITTED) { 676 498 /* Get the ld start address from ld_queue */ ··· 737 559 738 560 /* IRQ Multi */ 739 561 if (shdev->pdata.mode & SHDMA_MIX_IRQ) { 740 - int cnt = 0; 562 + int __maybe_unused cnt = 0; 741 563 switch (irq) { 742 564 #if defined(DMTE6_IRQ) && defined(DMAE1_IRQ) 743 565 case DMTE6_IRQ: ··· 774 596 struct sh_dmae_chan *sh_chan = (struct sh_dmae_chan *)data; 775 597 struct sh_desc *desc; 776 598 u32 sar_buf = sh_dmae_readl(sh_chan, SAR); 599 + u32 dar_buf = sh_dmae_readl(sh_chan, DAR); 777 600 778 601 spin_lock(&sh_chan->desc_lock); 779 602 list_for_each_entry(desc, &sh_chan->ld_queue, node) { 780 - if ((desc->hw.sar + desc->hw.tcr) == sar_buf && 781 - desc->mark == DESC_SUBMITTED) { 603 + if (desc->mark == DESC_SUBMITTED && 604 + ((desc->direction == DMA_FROM_DEVICE && 605 + (desc->hw.dar + desc->hw.tcr) == dar_buf) || 606 + (desc->hw.sar + desc->hw.tcr) == sar_buf)) { 782 607 dev_dbg(sh_chan->dev, "done #%d@%p dst %u\n", 783 608 desc->async_tx.cookie, &desc->async_tx, 784 609 desc->hw.dar); ··· 854 673 } 855 674 856 675 snprintf(new_sh_chan->dev_id, sizeof(new_sh_chan->dev_id), 857 - "sh-dmae%d", new_sh_chan->id); 676 + "sh-dmae%d", new_sh_chan->id); 858 677 859 678 /* set up channel irq */ 860 679 err = request_irq(irq, &sh_dmae_interrupt, irqflags, ··· 864 683 "with return %d\n", id, err); 865 684 goto err_no_irq; 866 685 } 867 - 868 - /* CHCR register control function */ 869 - new_sh_chan->set_chcr = dmae_set_chcr; 870 - /* DMARS register control function */ 871 - new_sh_chan->set_dmars = dmae_set_dmars; 872 686 873 687 shdev->chan[id] = new_sh_chan; 874 688 return 0; ··· 935 759 INIT_LIST_HEAD(&shdev->common.channels); 936 760 937 761 dma_cap_set(DMA_MEMCPY, shdev->common.cap_mask); 762 + dma_cap_set(DMA_SLAVE, shdev->common.cap_mask); 763 + 938 764 shdev->common.device_alloc_chan_resources 939 765 = sh_dmae_alloc_chan_resources; 940 766 shdev->common.device_free_chan_resources = sh_dmae_free_chan_resources; 941 767 shdev->common.device_prep_dma_memcpy = sh_dmae_prep_memcpy; 942 768 shdev->common.device_is_tx_complete = sh_dmae_is_complete; 943 769 shdev->common.device_issue_pending = sh_dmae_memcpy_issue_pending; 770 + 771 + /* Compulsory for DMA_SLAVE fields */ 772 + shdev->common.device_prep_slave_sg = sh_dmae_prep_slave_sg; 773 + shdev->common.device_terminate_all = sh_dmae_terminate_all; 774 + 944 775 shdev->common.dev = &pdev->dev; 945 776 /* Default transfer size of 32 bytes requires 32-byte alignment */ 946 777 shdev->common.copy_align = 5;
+2 -5
drivers/dma/shdma.h
··· 29 29 struct sh_dmae_regs hw; 30 30 struct list_head node; 31 31 struct dma_async_tx_descriptor async_tx; 32 + enum dma_data_direction direction; 32 33 dma_cookie_t cookie; 33 34 int chunks; 34 35 int mark; ··· 46 45 struct device *dev; /* Channel device */ 47 46 struct tasklet_struct tasklet; /* Tasklet */ 48 47 int descs_allocated; /* desc count */ 48 + int xmit_shift; /* log_2(bytes_per_xfer) */ 49 49 int id; /* Raw id of this channel */ 50 50 char dev_id[16]; /* unique name per DMAC of channel */ 51 - 52 - /* Set chcr */ 53 - int (*set_chcr)(struct sh_dmae_chan *sh_chan, u32 regs); 54 - /* Set DMA resource */ 55 - int (*set_dmars)(struct sh_dmae_chan *sh_chan, u16 res); 56 51 }; 57 52 58 53 struct sh_dmae_device {
+1 -1
drivers/gpu/drm/ati_pcigart.c
··· 113 113 114 114 if (pci_set_dma_mask(dev->pdev, gart_info->table_mask)) { 115 115 DRM_ERROR("fail to set dma mask to 0x%Lx\n", 116 - gart_info->table_mask); 116 + (unsigned long long)gart_info->table_mask); 117 117 ret = 1; 118 118 goto done; 119 119 }
+1 -1
drivers/gpu/drm/i915/i915_drv.c
··· 120 120 121 121 const static struct intel_device_info intel_pineview_info = { 122 122 .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .is_i9xx = 1, 123 - .has_pipe_cxsr = 1, 123 + .need_gfx_hws = 1, 124 124 .has_hotplug = 1, 125 125 }; 126 126
+9 -2
drivers/gpu/drm/i915/i915_gem.c
··· 3564 3564 uint32_t reloc_count = 0, i; 3565 3565 int ret = 0; 3566 3566 3567 + if (relocs == NULL) 3568 + return 0; 3569 + 3567 3570 for (i = 0; i < buffer_count; i++) { 3568 3571 struct drm_i915_gem_relocation_entry __user *user_relocs; 3569 3572 int unwritten; ··· 3656 3653 struct drm_gem_object *batch_obj; 3657 3654 struct drm_i915_gem_object *obj_priv; 3658 3655 struct drm_clip_rect *cliprects = NULL; 3659 - struct drm_i915_gem_relocation_entry *relocs; 3656 + struct drm_i915_gem_relocation_entry *relocs = NULL; 3660 3657 int ret = 0, ret2, i, pinned = 0; 3661 3658 uint64_t exec_offset; 3662 3659 uint32_t seqno, flush_domains, reloc_index; ··· 3725 3722 if (object_list[i] == NULL) { 3726 3723 DRM_ERROR("Invalid object handle %d at index %d\n", 3727 3724 exec_list[i].handle, i); 3725 + /* prevent error path from reading uninitialized data */ 3726 + args->buffer_count = i + 1; 3728 3727 ret = -EBADF; 3729 3728 goto err; 3730 3729 } ··· 3735 3730 if (obj_priv->in_execbuffer) { 3736 3731 DRM_ERROR("Object %p appears more than once in object list\n", 3737 3732 object_list[i]); 3733 + /* prevent error path from reading uninitialized data */ 3734 + args->buffer_count = i + 1; 3738 3735 ret = -EBADF; 3739 3736 goto err; 3740 3737 } ··· 3933 3926 3934 3927 mutex_unlock(&dev->struct_mutex); 3935 3928 3929 + pre_mutex_err: 3936 3930 /* Copy the updated relocations out regardless of current error 3937 3931 * state. Failure to update the relocs would mean that the next 3938 3932 * time userland calls execbuf, it would do so with presumed offset ··· 3948 3940 ret = ret2; 3949 3941 } 3950 3942 3951 - pre_mutex_err: 3952 3943 drm_free_large(object_list); 3953 3944 kfree(cliprects); 3954 3945
+30 -12
drivers/gpu/drm/i915/i915_irq.c
··· 309 309 if (de_iir & DE_GSE) 310 310 ironlake_opregion_gse_intr(dev); 311 311 312 + if (de_iir & DE_PLANEA_FLIP_DONE) 313 + intel_prepare_page_flip(dev, 0); 314 + 315 + if (de_iir & DE_PLANEB_FLIP_DONE) 316 + intel_prepare_page_flip(dev, 1); 317 + 318 + if (de_iir & DE_PIPEA_VBLANK) { 319 + drm_handle_vblank(dev, 0); 320 + intel_finish_page_flip(dev, 0); 321 + } 322 + 323 + if (de_iir & DE_PIPEB_VBLANK) { 324 + drm_handle_vblank(dev, 1); 325 + intel_finish_page_flip(dev, 1); 326 + } 327 + 312 328 /* check event from PCH */ 313 329 if ((de_iir & DE_PCH_EVENT) && 314 330 (pch_iir & SDE_HOTPLUG_MASK)) { ··· 860 844 if (!(pipeconf & PIPEACONF_ENABLE)) 861 845 return -EINVAL; 862 846 863 - if (IS_IRONLAKE(dev)) 864 - return 0; 865 - 866 847 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); 867 - if (IS_I965G(dev)) 848 + if (IS_IRONLAKE(dev)) 849 + ironlake_enable_display_irq(dev_priv, (pipe == 0) ? 850 + DE_PIPEA_VBLANK: DE_PIPEB_VBLANK); 851 + else if (IS_I965G(dev)) 868 852 i915_enable_pipestat(dev_priv, pipe, 869 853 PIPE_START_VBLANK_INTERRUPT_ENABLE); 870 854 else ··· 882 866 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 883 867 unsigned long irqflags; 884 868 885 - if (IS_IRONLAKE(dev)) 886 - return; 887 - 888 869 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); 889 - i915_disable_pipestat(dev_priv, pipe, 890 - PIPE_VBLANK_INTERRUPT_ENABLE | 891 - PIPE_START_VBLANK_INTERRUPT_ENABLE); 870 + if (IS_IRONLAKE(dev)) 871 + ironlake_disable_display_irq(dev_priv, (pipe == 0) ? 872 + DE_PIPEA_VBLANK: DE_PIPEB_VBLANK); 873 + else 874 + i915_disable_pipestat(dev_priv, pipe, 875 + PIPE_VBLANK_INTERRUPT_ENABLE | 876 + PIPE_START_VBLANK_INTERRUPT_ENABLE); 892 877 spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags); 893 878 } 894 879 ··· 1032 1015 { 1033 1016 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1034 1017 /* enable kind of interrupts always enabled */ 1035 - u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT; 1018 + u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | 1019 + DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE; 1036 1020 u32 render_mask = GT_USER_INTERRUPT; 1037 1021 u32 hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG | 1038 1022 SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG; 1039 1023 1040 1024 dev_priv->irq_mask_reg = ~display_mask; 1041 - dev_priv->de_irq_enable_reg = display_mask; 1025 + dev_priv->de_irq_enable_reg = display_mask | DE_PIPEA_VBLANK | DE_PIPEB_VBLANK; 1042 1026 1043 1027 /* should always can generate irq */ 1044 1028 I915_WRITE(DEIIR, I915_READ(DEIIR));
+3
drivers/gpu/drm/i915/intel_crt.c
··· 157 157 adpa = I915_READ(PCH_ADPA); 158 158 159 159 adpa &= ~ADPA_CRT_HOTPLUG_MASK; 160 + /* disable HPD first */ 161 + I915_WRITE(PCH_ADPA, adpa); 162 + (void)I915_READ(PCH_ADPA); 160 163 161 164 adpa |= (ADPA_CRT_HOTPLUG_PERIOD_128 | 162 165 ADPA_CRT_HOTPLUG_WARMUP_10MS |
+31 -2
drivers/gpu/drm/i915/intel_display.c
··· 1638 1638 case DRM_MODE_DPMS_OFF: 1639 1639 DRM_DEBUG_KMS("crtc %d dpms off\n", pipe); 1640 1640 1641 + drm_vblank_off(dev, pipe); 1641 1642 /* Disable display plane */ 1642 1643 temp = I915_READ(dspcntr_reg); 1643 1644 if ((temp & DISPLAY_PLANE_ENABLE) != 0) { ··· 2520 2519 sr_entries = roundup(sr_entries / cacheline_size, 1); 2521 2520 DRM_DEBUG("self-refresh entries: %d\n", sr_entries); 2522 2521 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN); 2522 + } else { 2523 + /* Turn off self refresh if both pipes are enabled */ 2524 + I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF) 2525 + & ~FW_BLC_SELF_EN); 2523 2526 } 2524 2527 2525 2528 DRM_DEBUG("Setting FIFO watermarks - A: %d, B: %d, SR %d\n", ··· 2567 2562 srwm = 1; 2568 2563 srwm &= 0x3f; 2569 2564 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN); 2565 + } else { 2566 + /* Turn off self refresh if both pipes are enabled */ 2567 + I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF) 2568 + & ~FW_BLC_SELF_EN); 2570 2569 } 2571 2570 2572 2571 DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n", ··· 2639 2630 if (srwm < 0) 2640 2631 srwm = 1; 2641 2632 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN | (srwm & 0x3f)); 2633 + } else { 2634 + /* Turn off self refresh if both pipes are enabled */ 2635 + I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF) 2636 + & ~FW_BLC_SELF_EN); 2642 2637 } 2643 2638 2644 2639 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n", ··· 3997 3984 spin_lock_irqsave(&dev->event_lock, flags); 3998 3985 work = intel_crtc->unpin_work; 3999 3986 if (work == NULL || !work->pending) { 3987 + if (work && !work->pending) { 3988 + obj_priv = work->obj->driver_private; 3989 + DRM_DEBUG_DRIVER("flip finish: %p (%d) not pending?\n", 3990 + obj_priv, 3991 + atomic_read(&obj_priv->pending_flip)); 3992 + } 4000 3993 spin_unlock_irqrestore(&dev->event_lock, flags); 4001 3994 return; 4002 3995 } ··· 4024 4005 spin_unlock_irqrestore(&dev->event_lock, flags); 4025 4006 4026 4007 obj_priv = work->obj->driver_private; 4027 - if (atomic_dec_and_test(&obj_priv->pending_flip)) 4008 + 4009 + /* Initial scanout buffer will have a 0 pending flip count */ 4010 + if ((atomic_read(&obj_priv->pending_flip) == 0) || 4011 + atomic_dec_and_test(&obj_priv->pending_flip)) 4028 4012 DRM_WAKEUP(&dev_priv->pending_flip_queue); 4029 4013 schedule_work(&work->work); 4030 4014 } ··· 4040 4018 unsigned long flags; 4041 4019 4042 4020 spin_lock_irqsave(&dev->event_lock, flags); 4043 - if (intel_crtc->unpin_work) 4021 + if (intel_crtc->unpin_work) { 4044 4022 intel_crtc->unpin_work->pending = 1; 4023 + } else { 4024 + DRM_DEBUG_DRIVER("preparing flip with no unpin work?\n"); 4025 + } 4045 4026 spin_unlock_irqrestore(&dev->event_lock, flags); 4046 4027 } 4047 4028 ··· 4078 4053 /* We borrow the event spin lock for protecting unpin_work */ 4079 4054 spin_lock_irqsave(&dev->event_lock, flags); 4080 4055 if (intel_crtc->unpin_work) { 4056 + DRM_DEBUG_DRIVER("flip queue: crtc already busy\n"); 4081 4057 spin_unlock_irqrestore(&dev->event_lock, flags); 4082 4058 kfree(work); 4083 4059 mutex_unlock(&dev->struct_mutex); ··· 4092 4066 4093 4067 ret = intel_pin_and_fence_fb_obj(dev, obj); 4094 4068 if (ret != 0) { 4069 + DRM_DEBUG_DRIVER("flip queue: %p pin & fence failed\n", 4070 + obj->driver_private); 4095 4071 kfree(work); 4072 + intel_crtc->unpin_work = NULL; 4096 4073 mutex_unlock(&dev->struct_mutex); 4097 4074 return ret; 4098 4075 }
+9 -2
drivers/gpu/drm/i915/intel_lvds.c
··· 611 611 { 612 612 .ident = "Samsung SX20S", 613 613 .matches = { 614 - DMI_MATCH(DMI_SYS_VENDOR, "Phoenix Technologies LTD"), 614 + DMI_MATCH(DMI_SYS_VENDOR, "Samsung Electronics"), 615 615 DMI_MATCH(DMI_BOARD_NAME, "SX20S"), 616 616 }, 617 617 }, ··· 620 620 .matches = { 621 621 DMI_MATCH(DMI_SYS_VENDOR, "Acer"), 622 622 DMI_MATCH(DMI_PRODUCT_NAME, "Aspire one"), 623 + }, 624 + }, 625 + { 626 + .ident = "Aspire 1810T", 627 + .matches = { 628 + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), 629 + DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 1810T"), 623 630 }, 624 631 }, 625 632 { ··· 650 643 { 651 644 enum drm_connector_status status = connector_status_connected; 652 645 653 - if (!acpi_lid_open() && !dmi_check_system(bad_lid_status)) 646 + if (!dmi_check_system(bad_lid_status) && !acpi_lid_open()) 654 647 status = connector_status_disconnected; 655 648 656 649 return status;
+8
drivers/gpu/drm/i915/intel_sdvo.c
··· 2345 2345 connector->connector_type = DRM_MODE_CONNECTOR_VGA; 2346 2346 intel_output->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) | 2347 2347 (1 << INTEL_ANALOG_CLONE_BIT); 2348 + } else if (flags & SDVO_OUTPUT_CVBS0) { 2349 + 2350 + sdvo_priv->controlled_output = SDVO_OUTPUT_CVBS0; 2351 + encoder->encoder_type = DRM_MODE_ENCODER_TVDAC; 2352 + connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO; 2353 + sdvo_priv->is_tv = true; 2354 + intel_output->needs_tv_clock = true; 2355 + intel_output->clone_mask = 1 << INTEL_SDVO_TV_CLONE_BIT; 2348 2356 } else if (flags & SDVO_OUTPUT_LVDS0) { 2349 2357 2350 2358 sdvo_priv->controlled_output = SDVO_OUTPUT_LVDS0;
+9 -5
drivers/gpu/drm/radeon/r100.c
··· 354 354 return RREG32(RADEON_CRTC2_CRNT_FRAME); 355 355 } 356 356 357 + /* Who ever call radeon_fence_emit should call ring_lock and ask 358 + * for enough space (today caller are ib schedule and buffer move) */ 357 359 void r100_fence_ring_emit(struct radeon_device *rdev, 358 360 struct radeon_fence *fence) 359 361 { 360 - /* Who ever call radeon_fence_emit should call ring_lock and ask 361 - * for enough space (today caller are ib schedule and buffer move) */ 362 + /* We have to make sure that caches are flushed before 363 + * CPU might read something from VRAM. */ 364 + radeon_ring_write(rdev, PACKET0(RADEON_RB3D_DSTCACHE_CTLSTAT, 0)); 365 + radeon_ring_write(rdev, RADEON_RB3D_DC_FLUSH_ALL); 366 + radeon_ring_write(rdev, PACKET0(RADEON_RB3D_ZCACHE_CTLSTAT, 0)); 367 + radeon_ring_write(rdev, RADEON_RB3D_ZC_FLUSH_ALL); 362 368 /* Wait until IDLE & CLEAN */ 363 369 radeon_ring_write(rdev, PACKET0(0x1720, 0)); 364 370 radeon_ring_write(rdev, (1 << 16) | (1 << 17)); ··· 3375 3369 3376 3370 void r100_fini(struct radeon_device *rdev) 3377 3371 { 3378 - r100_suspend(rdev); 3379 3372 r100_cp_fini(rdev); 3380 3373 r100_wb_fini(rdev); 3381 3374 r100_ib_fini(rdev); ··· 3486 3481 if (r) { 3487 3482 /* Somethings want wront with the accel init stop accel */ 3488 3483 dev_err(rdev->dev, "Disabling GPU acceleration\n"); 3489 - r100_suspend(rdev); 3490 3484 r100_cp_fini(rdev); 3491 3485 r100_wb_fini(rdev); 3492 3486 r100_ib_fini(rdev); 3487 + radeon_irq_kms_fini(rdev); 3493 3488 if (rdev->flags & RADEON_IS_PCI) 3494 3489 r100_pci_gart_fini(rdev); 3495 - radeon_irq_kms_fini(rdev); 3496 3490 rdev->accel_working = false; 3497 3491 } 3498 3492 return 0;
+9 -7
drivers/gpu/drm/radeon/r300.c
··· 506 506 507 507 /* DDR for all card after R300 & IGP */ 508 508 rdev->mc.vram_is_ddr = true; 509 + 509 510 tmp = RREG32(RADEON_MEM_CNTL); 510 - if (tmp & R300_MEM_NUM_CHANNELS_MASK) { 511 - rdev->mc.vram_width = 128; 512 - } else { 513 - rdev->mc.vram_width = 64; 511 + tmp &= R300_MEM_NUM_CHANNELS_MASK; 512 + switch (tmp) { 513 + case 0: rdev->mc.vram_width = 64; break; 514 + case 1: rdev->mc.vram_width = 128; break; 515 + case 2: rdev->mc.vram_width = 256; break; 516 + default: rdev->mc.vram_width = 128; break; 514 517 } 515 518 516 519 r100_vram_init_sizes(rdev); ··· 1330 1327 1331 1328 void r300_fini(struct radeon_device *rdev) 1332 1329 { 1333 - r300_suspend(rdev); 1334 1330 r100_cp_fini(rdev); 1335 1331 r100_wb_fini(rdev); 1336 1332 r100_ib_fini(rdev); ··· 1420 1418 if (r) { 1421 1419 /* Somethings want wront with the accel init stop accel */ 1422 1420 dev_err(rdev->dev, "Disabling GPU acceleration\n"); 1423 - r300_suspend(rdev); 1424 1421 r100_cp_fini(rdev); 1425 1422 r100_wb_fini(rdev); 1426 1423 r100_ib_fini(rdev); 1424 + radeon_irq_kms_fini(rdev); 1427 1425 if (rdev->flags & RADEON_IS_PCIE) 1428 1426 rv370_pcie_gart_fini(rdev); 1429 1427 if (rdev->flags & RADEON_IS_PCI) 1430 1428 r100_pci_gart_fini(rdev); 1431 - radeon_irq_kms_fini(rdev); 1429 + radeon_agp_fini(rdev); 1432 1430 rdev->accel_working = false; 1433 1431 } 1434 1432 return 0;
+1 -2
drivers/gpu/drm/radeon/r420.c
··· 389 389 if (r) { 390 390 /* Somethings want wront with the accel init stop accel */ 391 391 dev_err(rdev->dev, "Disabling GPU acceleration\n"); 392 - r420_suspend(rdev); 393 392 r100_cp_fini(rdev); 394 393 r100_wb_fini(rdev); 395 394 r100_ib_fini(rdev); 395 + radeon_irq_kms_fini(rdev); 396 396 if (rdev->flags & RADEON_IS_PCIE) 397 397 rv370_pcie_gart_fini(rdev); 398 398 if (rdev->flags & RADEON_IS_PCI) 399 399 r100_pci_gart_fini(rdev); 400 400 radeon_agp_fini(rdev); 401 - radeon_irq_kms_fini(rdev); 402 401 rdev->accel_working = false; 403 402 } 404 403 return 0;
+1 -2
drivers/gpu/drm/radeon/r520.c
··· 294 294 if (r) { 295 295 /* Somethings want wront with the accel init stop accel */ 296 296 dev_err(rdev->dev, "Disabling GPU acceleration\n"); 297 - rv515_suspend(rdev); 298 297 r100_cp_fini(rdev); 299 298 r100_wb_fini(rdev); 300 299 r100_ib_fini(rdev); 300 + radeon_irq_kms_fini(rdev); 301 301 rv370_pcie_gart_fini(rdev); 302 302 radeon_agp_fini(rdev); 303 - radeon_irq_kms_fini(rdev); 304 303 rdev->accel_working = false; 305 304 } 306 305 return 0;
+34 -14
drivers/gpu/drm/radeon/r600.c
··· 1654 1654 rdev->cp.align_mask = 16 - 1; 1655 1655 } 1656 1656 1657 + void r600_cp_fini(struct radeon_device *rdev) 1658 + { 1659 + r600_cp_stop(rdev); 1660 + radeon_ring_fini(rdev); 1661 + } 1662 + 1657 1663 1658 1664 /* 1659 1665 * GPU scratch registers helpers function. ··· 1867 1861 return r; 1868 1862 } 1869 1863 r600_gpu_init(rdev); 1864 + r = r600_blit_init(rdev); 1865 + if (r) { 1866 + r600_blit_fini(rdev); 1867 + rdev->asic->copy = NULL; 1868 + dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r); 1869 + } 1870 1870 /* pin copy shader into vram */ 1871 1871 if (rdev->r600_blit.shader_obj) { 1872 1872 r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false); ··· 2057 2045 r = r600_pcie_gart_init(rdev); 2058 2046 if (r) 2059 2047 return r; 2060 - r = r600_blit_init(rdev); 2061 - if (r) { 2062 - r600_blit_fini(rdev); 2063 - rdev->asic->copy = NULL; 2064 - dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r); 2065 - } 2066 2048 2067 2049 rdev->accel_working = true; 2068 2050 r = r600_startup(rdev); 2069 2051 if (r) { 2070 - r600_suspend(rdev); 2052 + dev_err(rdev->dev, "disabling GPU acceleration\n"); 2053 + r600_cp_fini(rdev); 2071 2054 r600_wb_fini(rdev); 2072 - radeon_ring_fini(rdev); 2055 + r600_irq_fini(rdev); 2056 + radeon_irq_kms_fini(rdev); 2073 2057 r600_pcie_gart_fini(rdev); 2074 2058 rdev->accel_working = false; 2075 2059 } ··· 2091 2083 2092 2084 void r600_fini(struct radeon_device *rdev) 2093 2085 { 2094 - /* Suspend operations */ 2095 - r600_suspend(rdev); 2096 - 2097 2086 r600_audio_fini(rdev); 2098 2087 r600_blit_fini(rdev); 2088 + r600_cp_fini(rdev); 2089 + r600_wb_fini(rdev); 2099 2090 r600_irq_fini(rdev); 2100 2091 radeon_irq_kms_fini(rdev); 2101 - radeon_ring_fini(rdev); 2102 - r600_wb_fini(rdev); 2103 2092 r600_pcie_gart_fini(rdev); 2093 + radeon_agp_fini(rdev); 2104 2094 radeon_gem_fini(rdev); 2105 2095 radeon_fence_driver_fini(rdev); 2106 2096 radeon_clocks_fini(rdev); 2107 - radeon_agp_fini(rdev); 2108 2097 radeon_bo_fini(rdev); 2109 2098 radeon_atombios_fini(rdev); 2110 2099 kfree(rdev->bios); ··· 2904 2899 #else 2905 2900 return 0; 2906 2901 #endif 2902 + } 2903 + 2904 + /** 2905 + * r600_ioctl_wait_idle - flush host path cache on wait idle ioctl 2906 + * rdev: radeon device structure 2907 + * bo: buffer object struct which userspace is waiting for idle 2908 + * 2909 + * Some R6XX/R7XX doesn't seems to take into account HDP flush performed 2910 + * through ring buffer, this leads to corruption in rendering, see 2911 + * http://bugzilla.kernel.org/show_bug.cgi?id=15186 to avoid this we 2912 + * directly perform HDP flush by writing register through MMIO. 2913 + */ 2914 + void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo) 2915 + { 2916 + WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1); 2907 2917 }
+1 -1
drivers/gpu/drm/radeon/r600_audio.c
··· 35 35 */ 36 36 static int r600_audio_chipset_supported(struct radeon_device *rdev) 37 37 { 38 - return rdev->family >= CHIP_R600 38 + return (rdev->family >= CHIP_R600 && rdev->family < CHIP_RV710) 39 39 || rdev->family == CHIP_RS600 40 40 || rdev->family == CHIP_RS690 41 41 || rdev->family == CHIP_RS740;
+8
drivers/gpu/drm/radeon/radeon.h
··· 661 661 void (*hpd_fini)(struct radeon_device *rdev); 662 662 bool (*hpd_sense)(struct radeon_device *rdev, enum radeon_hpd_id hpd); 663 663 void (*hpd_set_polarity)(struct radeon_device *rdev, enum radeon_hpd_id hpd); 664 + /* ioctl hw specific callback. Some hw might want to perform special 665 + * operation on specific ioctl. For instance on wait idle some hw 666 + * might want to perform and HDP flush through MMIO as it seems that 667 + * some R6XX/R7XX hw doesn't take HDP flush into account if programmed 668 + * through ring. 669 + */ 670 + void (*ioctl_wait_idle)(struct radeon_device *rdev, struct radeon_bo *bo); 664 671 }; 665 672 666 673 /* ··· 1150 1143 extern void r600_cp_stop(struct radeon_device *rdev); 1151 1144 extern void r600_ring_init(struct radeon_device *rdev, unsigned ring_size); 1152 1145 extern int r600_cp_resume(struct radeon_device *rdev); 1146 + extern void r600_cp_fini(struct radeon_device *rdev); 1153 1147 extern int r600_count_pipe_bits(uint32_t val); 1154 1148 extern int r600_gart_clear_page(struct radeon_device *rdev, int i); 1155 1149 extern int r600_mc_wait_for_idle(struct radeon_device *rdev);
+11
drivers/gpu/drm/radeon/radeon_asic.h
··· 117 117 .hpd_fini = &r100_hpd_fini, 118 118 .hpd_sense = &r100_hpd_sense, 119 119 .hpd_set_polarity = &r100_hpd_set_polarity, 120 + .ioctl_wait_idle = NULL, 120 121 }; 121 122 122 123 ··· 177 176 .hpd_fini = &r100_hpd_fini, 178 177 .hpd_sense = &r100_hpd_sense, 179 178 .hpd_set_polarity = &r100_hpd_set_polarity, 179 + .ioctl_wait_idle = NULL, 180 180 }; 181 181 182 182 /* ··· 221 219 .hpd_fini = &r100_hpd_fini, 222 220 .hpd_sense = &r100_hpd_sense, 223 221 .hpd_set_polarity = &r100_hpd_set_polarity, 222 + .ioctl_wait_idle = NULL, 224 223 }; 225 224 226 225 ··· 270 267 .hpd_fini = &r100_hpd_fini, 271 268 .hpd_sense = &r100_hpd_sense, 272 269 .hpd_set_polarity = &r100_hpd_set_polarity, 270 + .ioctl_wait_idle = NULL, 273 271 }; 274 272 275 273 ··· 327 323 .hpd_fini = &rs600_hpd_fini, 328 324 .hpd_sense = &rs600_hpd_sense, 329 325 .hpd_set_polarity = &rs600_hpd_set_polarity, 326 + .ioctl_wait_idle = NULL, 330 327 }; 331 328 332 329 ··· 375 370 .hpd_fini = &rs600_hpd_fini, 376 371 .hpd_sense = &rs600_hpd_sense, 377 372 .hpd_set_polarity = &rs600_hpd_set_polarity, 373 + .ioctl_wait_idle = NULL, 378 374 }; 379 375 380 376 ··· 427 421 .hpd_fini = &rs600_hpd_fini, 428 422 .hpd_sense = &rs600_hpd_sense, 429 423 .hpd_set_polarity = &rs600_hpd_set_polarity, 424 + .ioctl_wait_idle = NULL, 430 425 }; 431 426 432 427 ··· 470 463 .hpd_fini = &rs600_hpd_fini, 471 464 .hpd_sense = &rs600_hpd_sense, 472 465 .hpd_set_polarity = &rs600_hpd_set_polarity, 466 + .ioctl_wait_idle = NULL, 473 467 }; 474 468 475 469 /* ··· 512 504 bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd); 513 505 void r600_hpd_set_polarity(struct radeon_device *rdev, 514 506 enum radeon_hpd_id hpd); 507 + extern void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo); 515 508 516 509 static struct radeon_asic r600_asic = { 517 510 .init = &r600_init, ··· 547 538 .hpd_fini = &r600_hpd_fini, 548 539 .hpd_sense = &r600_hpd_sense, 549 540 .hpd_set_polarity = &r600_hpd_set_polarity, 541 + .ioctl_wait_idle = r600_ioctl_wait_idle, 550 542 }; 551 543 552 544 /* ··· 592 582 .hpd_fini = &r600_hpd_fini, 593 583 .hpd_sense = &r600_hpd_sense, 594 584 .hpd_set_polarity = &r600_hpd_set_polarity, 585 + .ioctl_wait_idle = r600_ioctl_wait_idle, 595 586 }; 596 587 597 588 #endif
+1 -2
drivers/gpu/drm/radeon/radeon_combios.c
··· 971 971 lvds->native_mode.vdisplay); 972 972 973 973 lvds->panel_vcc_delay = RBIOS16(lcd_info + 0x2c); 974 - if (lvds->panel_vcc_delay > 2000 || lvds->panel_vcc_delay < 0) 975 - lvds->panel_vcc_delay = 2000; 974 + lvds->panel_vcc_delay = min_t(u16, lvds->panel_vcc_delay, 2000); 976 975 977 976 lvds->panel_pwr_delay = RBIOS8(lcd_info + 0x24); 978 977 lvds->panel_digon_delay = RBIOS16(lcd_info + 0x38) & 0xf;
+1 -1
drivers/gpu/drm/radeon/radeon_connectors.c
··· 1343 1343 radeon_connector->dac_load_detect = false; 1344 1344 drm_connector_attach_property(&radeon_connector->base, 1345 1345 rdev->mode_info.load_detect_property, 1346 - 1); 1346 + radeon_connector->dac_load_detect); 1347 1347 drm_connector_attach_property(&radeon_connector->base, 1348 1348 rdev->mode_info.tv_std_property, 1349 1349 radeon_combios_get_tv_info(rdev));
+3
drivers/gpu/drm/radeon/radeon_gem.c
··· 308 308 } 309 309 robj = gobj->driver_private; 310 310 r = radeon_bo_wait(robj, NULL, false); 311 + /* callback hw specific functions if any */ 312 + if (robj->rdev->asic->ioctl_wait_idle) 313 + robj->rdev->asic->ioctl_wait_idle(robj->rdev, robj); 311 314 mutex_lock(&dev->struct_mutex); 312 315 drm_gem_object_unreference(gobj); 313 316 mutex_unlock(&dev->struct_mutex);
+21 -7
drivers/gpu/drm/radeon/rs400.c
··· 223 223 return 0; 224 224 } 225 225 226 + int rs400_mc_wait_for_idle(struct radeon_device *rdev) 227 + { 228 + unsigned i; 229 + uint32_t tmp; 230 + 231 + for (i = 0; i < rdev->usec_timeout; i++) { 232 + /* read MC_STATUS */ 233 + tmp = RREG32(0x0150); 234 + if (tmp & (1 << 2)) { 235 + return 0; 236 + } 237 + DRM_UDELAY(1); 238 + } 239 + return -1; 240 + } 241 + 226 242 void rs400_gpu_init(struct radeon_device *rdev) 227 243 { 228 244 /* FIXME: HDP same place on rs400 ? */ 229 245 r100_hdp_reset(rdev); 230 246 /* FIXME: is this correct ? */ 231 247 r420_pipes_init(rdev); 232 - if (r300_mc_wait_for_idle(rdev)) { 233 - printk(KERN_WARNING "Failed to wait MC idle while " 234 - "programming pipes. Bad things might happen.\n"); 248 + if (rs400_mc_wait_for_idle(rdev)) { 249 + printk(KERN_WARNING "rs400: Failed to wait MC idle while " 250 + "programming pipes. Bad things might happen. %08x\n", RREG32(0x150)); 235 251 } 236 252 } 237 253 ··· 386 370 r100_mc_stop(rdev, &save); 387 371 388 372 /* Wait for mc idle */ 389 - if (r300_mc_wait_for_idle(rdev)) 390 - dev_warn(rdev->dev, "Wait MC idle timeout before updating MC.\n"); 373 + if (rs400_mc_wait_for_idle(rdev)) 374 + dev_warn(rdev->dev, "rs400: Wait MC idle timeout before updating MC.\n"); 391 375 WREG32(R_000148_MC_FB_LOCATION, 392 376 S_000148_MC_FB_START(rdev->mc.vram_start >> 16) | 393 377 S_000148_MC_FB_TOP(rdev->mc.vram_end >> 16)); ··· 464 448 465 449 void rs400_fini(struct radeon_device *rdev) 466 450 { 467 - rs400_suspend(rdev); 468 451 r100_cp_fini(rdev); 469 452 r100_wb_fini(rdev); 470 453 r100_ib_fini(rdev); ··· 542 527 if (r) { 543 528 /* Somethings want wront with the accel init stop accel */ 544 529 dev_err(rdev->dev, "Disabling GPU acceleration\n"); 545 - rs400_suspend(rdev); 546 530 r100_cp_fini(rdev); 547 531 r100_wb_fini(rdev); 548 532 r100_ib_fini(rdev);
-2
drivers/gpu/drm/radeon/rs600.c
··· 610 610 611 611 void rs600_fini(struct radeon_device *rdev) 612 612 { 613 - rs600_suspend(rdev); 614 613 r100_cp_fini(rdev); 615 614 r100_wb_fini(rdev); 616 615 r100_ib_fini(rdev); ··· 688 689 if (r) { 689 690 /* Somethings want wront with the accel init stop accel */ 690 691 dev_err(rdev->dev, "Disabling GPU acceleration\n"); 691 - rs600_suspend(rdev); 692 692 r100_cp_fini(rdev); 693 693 r100_wb_fini(rdev); 694 694 r100_ib_fini(rdev);
-2
drivers/gpu/drm/radeon/rs690.c
··· 676 676 677 677 void rs690_fini(struct radeon_device *rdev) 678 678 { 679 - rs690_suspend(rdev); 680 679 r100_cp_fini(rdev); 681 680 r100_wb_fini(rdev); 682 681 r100_ib_fini(rdev); ··· 755 756 if (r) { 756 757 /* Somethings want wront with the accel init stop accel */ 757 758 dev_err(rdev->dev, "Disabling GPU acceleration\n"); 758 - rs690_suspend(rdev); 759 759 r100_cp_fini(rdev); 760 760 r100_wb_fini(rdev); 761 761 r100_ib_fini(rdev);
+1 -3
drivers/gpu/drm/radeon/rv515.c
··· 537 537 538 538 void rv515_fini(struct radeon_device *rdev) 539 539 { 540 - rv515_suspend(rdev); 541 540 r100_cp_fini(rdev); 542 541 r100_wb_fini(rdev); 543 542 r100_ib_fini(rdev); ··· 614 615 if (r) { 615 616 /* Somethings want wront with the accel init stop accel */ 616 617 dev_err(rdev->dev, "Disabling GPU acceleration\n"); 617 - rv515_suspend(rdev); 618 618 r100_cp_fini(rdev); 619 619 r100_wb_fini(rdev); 620 620 r100_ib_fini(rdev); 621 + radeon_irq_kms_fini(rdev); 621 622 rv370_pcie_gart_fini(rdev); 622 623 radeon_agp_fini(rdev); 623 - radeon_irq_kms_fini(rdev); 624 624 rdev->accel_working = false; 625 625 } 626 626 return 0;
+12 -12
drivers/gpu/drm/radeon/rv770.c
··· 887 887 return r; 888 888 } 889 889 rv770_gpu_init(rdev); 890 + r = r600_blit_init(rdev); 891 + if (r) { 892 + r600_blit_fini(rdev); 893 + rdev->asic->copy = NULL; 894 + dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r); 895 + } 890 896 /* pin copy shader into vram */ 891 897 if (rdev->r600_blit.shader_obj) { 892 898 r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false); ··· 1061 1055 r = r600_pcie_gart_init(rdev); 1062 1056 if (r) 1063 1057 return r; 1064 - r = r600_blit_init(rdev); 1065 - if (r) { 1066 - r600_blit_fini(rdev); 1067 - rdev->asic->copy = NULL; 1068 - dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r); 1069 - } 1070 1058 1071 1059 rdev->accel_working = true; 1072 1060 r = rv770_startup(rdev); 1073 1061 if (r) { 1074 - rv770_suspend(rdev); 1062 + dev_err(rdev->dev, "disabling GPU acceleration\n"); 1063 + r600_cp_fini(rdev); 1075 1064 r600_wb_fini(rdev); 1076 - radeon_ring_fini(rdev); 1065 + r600_irq_fini(rdev); 1066 + radeon_irq_kms_fini(rdev); 1077 1067 rv770_pcie_gart_fini(rdev); 1078 1068 rdev->accel_working = false; 1079 1069 } ··· 1091 1089 1092 1090 void rv770_fini(struct radeon_device *rdev) 1093 1091 { 1094 - rv770_suspend(rdev); 1095 - 1096 1092 r600_blit_fini(rdev); 1093 + r600_cp_fini(rdev); 1094 + r600_wb_fini(rdev); 1097 1095 r600_irq_fini(rdev); 1098 1096 radeon_irq_kms_fini(rdev); 1099 - radeon_ring_fini(rdev); 1100 - r600_wb_fini(rdev); 1101 1097 rv770_pcie_gart_fini(rdev); 1102 1098 radeon_gem_fini(rdev); 1103 1099 radeon_fence_driver_fini(rdev);
+1 -1
drivers/hwmon/adt7462.c
··· 179 179 * 180 180 * Some, but not all, of these voltages have low/high limits. 181 181 */ 182 - #define ADT7462_VOLT_COUNT 12 182 + #define ADT7462_VOLT_COUNT 13 183 183 184 184 #define ADT7462_VENDOR 0x41 185 185 #define ADT7462_DEVICE 0x62
+11 -12
drivers/hwmon/lm78.c
··· 851 851 static int __init lm78_isa_found(unsigned short address) 852 852 { 853 853 int val, save, found = 0; 854 + int port; 854 855 855 - /* We have to request the region in two parts because some 856 - boards declare base+4 to base+7 as a PNP device */ 857 - if (!request_region(address, 4, "lm78")) { 858 - pr_debug("lm78: Failed to request low part of region\n"); 859 - return 0; 860 - } 861 - if (!request_region(address + 4, 4, "lm78")) { 862 - pr_debug("lm78: Failed to request high part of region\n"); 863 - release_region(address, 4); 864 - return 0; 856 + /* Some boards declare base+0 to base+7 as a PNP device, some base+4 857 + * to base+7 and some base+5 to base+6. So we better request each port 858 + * individually for the probing phase. */ 859 + for (port = address; port < address + LM78_EXTENT; port++) { 860 + if (!request_region(port, 1, "lm78")) { 861 + pr_debug("lm78: Failed to request port 0x%x\n", port); 862 + goto release; 863 + } 865 864 } 866 865 867 866 #define REALLY_SLOW_IO ··· 924 925 val & 0x80 ? "LM79" : "LM78", (int)address); 925 926 926 927 release: 927 - release_region(address + 4, 4); 928 - release_region(address, 4); 928 + for (port--; port >= address; port--) 929 + release_region(port, 1); 929 930 return found; 930 931 } 931 932
+12 -12
drivers/hwmon/w83781d.c
··· 1793 1793 w83781d_isa_found(unsigned short address) 1794 1794 { 1795 1795 int val, save, found = 0; 1796 + int port; 1796 1797 1797 - /* We have to request the region in two parts because some 1798 - boards declare base+4 to base+7 as a PNP device */ 1799 - if (!request_region(address, 4, "w83781d")) { 1800 - pr_debug("w83781d: Failed to request low part of region\n"); 1801 - return 0; 1802 - } 1803 - if (!request_region(address + 4, 4, "w83781d")) { 1804 - pr_debug("w83781d: Failed to request high part of region\n"); 1805 - release_region(address, 4); 1806 - return 0; 1798 + /* Some boards declare base+0 to base+7 as a PNP device, some base+4 1799 + * to base+7 and some base+5 to base+6. So we better request each port 1800 + * individually for the probing phase. */ 1801 + for (port = address; port < address + W83781D_EXTENT; port++) { 1802 + if (!request_region(port, 1, "w83781d")) { 1803 + pr_debug("w83781d: Failed to request port 0x%x\n", 1804 + port); 1805 + goto release; 1806 + } 1807 1807 } 1808 1808 1809 1809 #define REALLY_SLOW_IO ··· 1877 1877 val == 0x30 ? "W83782D" : "W83781D", (int)address); 1878 1878 1879 1879 release: 1880 - release_region(address + 4, 4); 1881 - release_region(address, 4); 1880 + for (port--; port >= address; port--) 1881 + release_region(port, 1); 1882 1882 return found; 1883 1883 } 1884 1884
+17
drivers/pci/quirks.c
··· 338 338 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_S3, PCI_DEVICE_ID_S3_868, quirk_s3_64M); 339 339 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_S3, PCI_DEVICE_ID_S3_968, quirk_s3_64M); 340 340 341 + /* 342 + * Some CS5536 BIOSes (for example, the Soekris NET5501 board w/ comBIOS 343 + * ver. 1.33 20070103) don't set the correct ISA PCI region header info. 344 + * BAR0 should be 8 bytes; instead, it may be set to something like 8k 345 + * (which conflicts w/ BAR1's memory range). 346 + */ 347 + static void __devinit quirk_cs5536_vsa(struct pci_dev *dev) 348 + { 349 + if (pci_resource_len(dev, 0) != 8) { 350 + struct resource *res = &dev->resource[0]; 351 + res->end = res->start + 8 - 1; 352 + dev_info(&dev->dev, "CS5536 ISA bridge bug detected " 353 + "(incorrect header); workaround applied.\n"); 354 + } 355 + } 356 + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CS5536_ISA, quirk_cs5536_vsa); 357 + 341 358 static void __devinit quirk_io_region(struct pci_dev *dev, unsigned region, 342 359 unsigned size, int nr, const char *name) 343 360 {
+1
fs/befs/linuxvfs.c
··· 873 873 brelse(bh); 874 874 875 875 unacquire_priv_sbp: 876 + kfree(befs_sb->mount_opts.iocharset); 876 877 kfree(sb->s_fs_info); 877 878 878 879 unacquire_none:
+4 -3
fs/block_dev.c
··· 246 246 if (!sb) 247 247 goto out; 248 248 if (sb->s_flags & MS_RDONLY) { 249 - deactivate_locked_super(sb); 249 + sb->s_frozen = SB_FREEZE_TRANS; 250 + up_write(&sb->s_umount); 250 251 mutex_unlock(&bdev->bd_fsfreeze_mutex); 251 252 return sb; 252 253 } ··· 308 307 BUG_ON(sb->s_bdev != bdev); 309 308 down_write(&sb->s_umount); 310 309 if (sb->s_flags & MS_RDONLY) 311 - goto out_deactivate; 310 + goto out_unfrozen; 312 311 313 312 if (sb->s_op->unfreeze_fs) { 314 313 error = sb->s_op->unfreeze_fs(sb); ··· 322 321 } 323 322 } 324 323 324 + out_unfrozen: 325 325 sb->s_frozen = SB_UNFROZEN; 326 326 smp_wmb(); 327 327 wake_up(&sb->s_wait_unfrozen); 328 328 329 - out_deactivate: 330 329 if (sb) 331 330 deactivate_locked_super(sb); 332 331 out_unlock:
+6 -1
fs/btrfs/disk-io.c
··· 1982 1982 1983 1983 if (!(sb->s_flags & MS_RDONLY)) { 1984 1984 ret = btrfs_recover_relocation(tree_root); 1985 - BUG_ON(ret); 1985 + if (ret < 0) { 1986 + printk(KERN_WARNING 1987 + "btrfs: failed to recover relocation\n"); 1988 + err = -EINVAL; 1989 + goto fail_trans_kthread; 1990 + } 1986 1991 } 1987 1992 1988 1993 location.objectid = BTRFS_FS_TREE_OBJECTID;
+4 -4
fs/btrfs/extent-tree.c
··· 5402 5402 int ret; 5403 5403 5404 5404 while (level >= 0) { 5405 - if (path->slots[level] >= 5406 - btrfs_header_nritems(path->nodes[level])) 5407 - break; 5408 - 5409 5405 ret = walk_down_proc(trans, root, path, wc, lookup_info); 5410 5406 if (ret > 0) 5411 5407 break; 5412 5408 5413 5409 if (level == 0) 5410 + break; 5411 + 5412 + if (path->slots[level] >= 5413 + btrfs_header_nritems(path->nodes[level])) 5414 5414 break; 5415 5415 5416 5416 ret = do_walk_down(trans, root, path, wc, &lookup_info);
+1 -2
fs/btrfs/extent_io.c
··· 3165 3165 spin_unlock(&tree->buffer_lock); 3166 3166 goto free_eb; 3167 3167 } 3168 - spin_unlock(&tree->buffer_lock); 3169 - 3170 3168 /* add one reference for the tree */ 3171 3169 atomic_inc(&eb->refs); 3170 + spin_unlock(&tree->buffer_lock); 3172 3171 return eb; 3173 3172 3174 3173 free_eb:
+1 -1
fs/btrfs/file.c
··· 1133 1133 } 1134 1134 mutex_lock(&dentry->d_inode->i_mutex); 1135 1135 out: 1136 - return ret > 0 ? EIO : ret; 1136 + return ret > 0 ? -EIO : ret; 1137 1137 } 1138 1138 1139 1139 static const struct vm_operations_struct btrfs_file_vm_ops = {
+5 -45
fs/btrfs/inode.c
··· 1681 1681 * before we start the transaction. It limits the amount of btree 1682 1682 * reads required while inside the transaction. 1683 1683 */ 1684 - static noinline void reada_csum(struct btrfs_root *root, 1685 - struct btrfs_path *path, 1686 - struct btrfs_ordered_extent *ordered_extent) 1687 - { 1688 - struct btrfs_ordered_sum *sum; 1689 - u64 bytenr; 1690 - 1691 - sum = list_entry(ordered_extent->list.next, struct btrfs_ordered_sum, 1692 - list); 1693 - bytenr = sum->sums[0].bytenr; 1694 - 1695 - /* 1696 - * we don't care about the results, the point of this search is 1697 - * just to get the btree leaves into ram 1698 - */ 1699 - btrfs_lookup_csum(NULL, root->fs_info->csum_root, path, bytenr, 0); 1700 - } 1701 - 1702 1684 /* as ordered data IO finishes, this gets called so we can finish 1703 1685 * an ordered extent if the range of bytes in the file it covers are 1704 1686 * fully written. ··· 1691 1709 struct btrfs_trans_handle *trans; 1692 1710 struct btrfs_ordered_extent *ordered_extent = NULL; 1693 1711 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; 1694 - struct btrfs_path *path; 1695 1712 int compressed = 0; 1696 1713 int ret; 1697 1714 ··· 1698 1717 if (!ret) 1699 1718 return 0; 1700 1719 1701 - /* 1702 - * before we join the transaction, try to do some of our IO. 1703 - * This will limit the amount of IO that we have to do with 1704 - * the transaction running. We're unlikely to need to do any 1705 - * IO if the file extents are new, the disk_i_size checks 1706 - * covers the most common case. 1707 - */ 1708 - if (start < BTRFS_I(inode)->disk_i_size) { 1709 - path = btrfs_alloc_path(); 1710 - if (path) { 1711 - ret = btrfs_lookup_file_extent(NULL, root, path, 1712 - inode->i_ino, 1713 - start, 0); 1714 - ordered_extent = btrfs_lookup_ordered_extent(inode, 1715 - start); 1716 - if (!list_empty(&ordered_extent->list)) { 1717 - btrfs_release_path(root, path); 1718 - reada_csum(root, path, ordered_extent); 1719 - } 1720 - btrfs_free_path(path); 1721 - } 1722 - } 1723 - 1724 - if (!ordered_extent) 1725 - ordered_extent = btrfs_lookup_ordered_extent(inode, start); 1720 + ordered_extent = btrfs_lookup_ordered_extent(inode, start); 1726 1721 BUG_ON(!ordered_extent); 1722 + 1727 1723 if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags)) { 1728 1724 BUG_ON(!list_empty(&ordered_extent->list)); 1729 1725 ret = btrfs_ordered_update_i_size(inode, 0, ordered_extent); ··· 5799 5841 inode->i_ctime = CURRENT_TIME; 5800 5842 BTRFS_I(inode)->flags |= BTRFS_INODE_PREALLOC; 5801 5843 if (!(mode & FALLOC_FL_KEEP_SIZE) && 5802 - cur_offset > inode->i_size) { 5844 + (actual_len > inode->i_size) && 5845 + (cur_offset > inode->i_size)) { 5846 + 5803 5847 if (cur_offset > actual_len) 5804 5848 i_size = actual_len; 5805 5849 else
+2 -1
fs/btrfs/relocation.c
··· 3764 3764 BTRFS_DATA_RELOC_TREE_OBJECTID); 3765 3765 if (IS_ERR(fs_root)) 3766 3766 err = PTR_ERR(fs_root); 3767 - btrfs_orphan_cleanup(fs_root); 3767 + else 3768 + btrfs_orphan_cleanup(fs_root); 3768 3769 } 3769 3770 return err; 3770 3771 }
+2 -4
fs/fcntl.c
··· 199 199 static void f_modown(struct file *filp, struct pid *pid, enum pid_type type, 200 200 int force) 201 201 { 202 - unsigned long flags; 203 - 204 - write_lock_irqsave(&filp->f_owner.lock, flags); 202 + write_lock_irq(&filp->f_owner.lock); 205 203 if (force || !filp->f_owner.pid) { 206 204 put_pid(filp->f_owner.pid); 207 205 filp->f_owner.pid = get_pid(pid); ··· 211 213 filp->f_owner.euid = cred->euid; 212 214 } 213 215 } 214 - write_unlock_irqrestore(&filp->f_owner.lock, flags); 216 + write_unlock_irq(&filp->f_owner.lock); 215 217 } 216 218 217 219 int __f_setown(struct file *filp, struct pid *pid, enum pid_type type,
+1
fs/file_table.c
··· 253 253 if (file->f_op && file->f_op->release) 254 254 file->f_op->release(inode, file); 255 255 security_file_free(file); 256 + ima_file_free(file); 256 257 if (unlikely(S_ISCHR(inode->i_mode) && inode->i_cdev != NULL)) 257 258 cdev_put(inode->i_cdev); 258 259 fops_put(file->f_op);
+2 -4
fs/namei.c
··· 1736 1736 if (nd.root.mnt) 1737 1737 path_put(&nd.root); 1738 1738 if (!IS_ERR(filp)) { 1739 - error = ima_path_check(&filp->f_path, filp->f_mode & 1740 - (MAY_READ | MAY_WRITE | MAY_EXEC)); 1739 + error = ima_file_check(filp, acc_mode); 1741 1740 if (error) { 1742 1741 fput(filp); 1743 1742 filp = ERR_PTR(error); ··· 1796 1797 } 1797 1798 filp = nameidata_to_filp(&nd); 1798 1799 if (!IS_ERR(filp)) { 1799 - error = ima_path_check(&filp->f_path, filp->f_mode & 1800 - (MAY_READ | MAY_WRITE | MAY_EXEC)); 1800 + error = ima_file_check(filp, acc_mode); 1801 1801 if (error) { 1802 1802 fput(filp); 1803 1803 filp = ERR_PTR(error);
+1 -1
fs/nfsd/vfs.c
··· 752 752 flags, current_cred()); 753 753 if (IS_ERR(*filp)) 754 754 host_err = PTR_ERR(*filp); 755 + host_err = ima_file_check(*filp, access); 755 756 out_nfserr: 756 757 err = nfserrno(host_err); 757 758 out: ··· 2128 2127 */ 2129 2128 path.mnt = exp->ex_path.mnt; 2130 2129 path.dentry = dentry; 2131 - err = ima_path_check(&path, acc & (MAY_READ | MAY_WRITE | MAY_EXEC)); 2132 2130 nfsd_out: 2133 2131 return err? nfserrno(err) : 0; 2134 2132 }
+2 -2
include/linux/ata.h
··· 647 647 return id[ATA_ID_SECTOR_SIZE] & (1 << 13); 648 648 } 649 649 650 - static inline u8 ata_id_logical_per_physical_sectors(const u16 *id) 650 + static inline u16 ata_id_logical_per_physical_sectors(const u16 *id) 651 651 { 652 - return id[ATA_ID_SECTOR_SIZE] & 0xf; 652 + return 1 << (id[ATA_ID_SECTOR_SIZE] & 0xf); 653 653 } 654 654 655 655 static inline int ata_id_has_lba48(const u16 *id)
+2
include/linux/compiler.h
··· 15 15 # define __acquire(x) __context__(x,1) 16 16 # define __release(x) __context__(x,-1) 17 17 # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0) 18 + # define __percpu __attribute__((noderef, address_space(3))) 18 19 extern void __chk_user_ptr(const volatile void __user *); 19 20 extern void __chk_io_ptr(const volatile void __iomem *); 20 21 #else ··· 33 32 # define __acquire(x) (void)0 34 33 # define __release(x) (void)0 35 34 # define __cond_lock(x,c) (c) 35 + # define __percpu 36 36 #endif 37 37 38 38 #ifdef __KERNEL__
+2 -2
include/linux/ima.h
··· 17 17 extern int ima_bprm_check(struct linux_binprm *bprm); 18 18 extern int ima_inode_alloc(struct inode *inode); 19 19 extern void ima_inode_free(struct inode *inode); 20 - extern int ima_path_check(struct path *path, int mask); 20 + extern int ima_file_check(struct file *file, int mask); 21 21 extern void ima_file_free(struct file *file); 22 22 extern int ima_file_mmap(struct file *file, unsigned long prot); 23 23 extern void ima_counts_get(struct file *file); ··· 38 38 return; 39 39 } 40 40 41 - static inline int ima_path_check(struct path *path, int mask) 41 + static inline int ima_file_check(struct file *file, int mask) 42 42 { 43 43 return 0; 44 44 }
+1 -1
init/main.c
··· 657 657 proc_caches_init(); 658 658 buffer_init(); 659 659 key_init(); 660 + radix_tree_init(); 660 661 security_init(); 661 662 vfs_caches_init(totalram_pages); 662 - radix_tree_init(); 663 663 signals_init(); 664 664 /* rootfs populating might need page-writeback */ 665 665 page_writeback_init();
+3
mm/migrate.c
··· 912 912 goto out_pm; 913 913 914 914 err = -ENODEV; 915 + if (node < 0 || node >= MAX_NUMNODES) 916 + goto out_pm; 917 + 915 918 if (!node_state(node, N_HIGH_MEMORY)) 916 919 goto out_pm; 917 920
+1 -2
security/integrity/ima/ima.h
··· 65 65 const char *cause, int result, int info); 66 66 67 67 /* Internal IMA function definitions */ 68 - void ima_iintcache_init(void); 69 68 int ima_init(void); 70 69 void ima_cleanup(void); 71 70 int ima_fs_init(void); ··· 130 131 void iint_rcu_free(struct rcu_head *rcu); 131 132 132 133 /* IMA policy related functions */ 133 - enum ima_hooks { PATH_CHECK = 1, FILE_MMAP, BPRM_CHECK }; 134 + enum ima_hooks { FILE_CHECK = 1, FILE_MMAP, BPRM_CHECK }; 134 135 135 136 int ima_match_policy(struct inode *inode, enum ima_hooks func, int mask); 136 137 void ima_init_policy(void);
+2 -2
security/integrity/ima/ima_api.c
··· 95 95 * ima_must_measure - measure decision based on policy. 96 96 * @inode: pointer to inode to measure 97 97 * @mask: contains the permission mask (MAY_READ, MAY_WRITE, MAY_EXECUTE) 98 - * @function: calling function (PATH_CHECK, BPRM_CHECK, FILE_MMAP) 98 + * @function: calling function (FILE_CHECK, BPRM_CHECK, FILE_MMAP) 99 99 * 100 100 * The policy is defined in terms of keypairs: 101 101 * subj=, obj=, type=, func=, mask=, fsmagic= 102 102 * subj,obj, and type: are LSM specific. 103 - * func: PATH_CHECK | BPRM_CHECK | FILE_MMAP 103 + * func: FILE_CHECK | BPRM_CHECK | FILE_MMAP 104 104 * mask: contains the permission mask 105 105 * fsmagic: hex value 106 106 *
+3 -6
security/integrity/ima/ima_iint.c
··· 52 52 struct ima_iint_cache *iint = NULL; 53 53 int rc = 0; 54 54 55 - if (!ima_initialized) 56 - return 0; 57 - 58 55 iint = kmem_cache_alloc(iint_cache, GFP_NOFS); 59 56 if (!iint) 60 57 return -ENOMEM; ··· 115 118 { 116 119 struct ima_iint_cache *iint; 117 120 118 - if (!ima_initialized) 119 - return; 120 121 spin_lock(&ima_iint_lock); 121 122 iint = radix_tree_delete(&ima_iint_store, (unsigned long)inode); 122 123 spin_unlock(&ima_iint_lock); ··· 136 141 kref_set(&iint->refcount, 1); 137 142 } 138 143 139 - void __init ima_iintcache_init(void) 144 + static int __init ima_iintcache_init(void) 140 145 { 141 146 iint_cache = 142 147 kmem_cache_create("iint_cache", sizeof(struct ima_iint_cache), 0, 143 148 SLAB_PANIC, init_once); 149 + return 0; 144 150 } 151 + security_initcall(ima_iintcache_init);
+93 -146
security/integrity/ima/ima_main.c
··· 14 14 * 15 15 * File: ima_main.c 16 16 * implements the IMA hooks: ima_bprm_check, ima_file_mmap, 17 - * and ima_path_check. 17 + * and ima_file_check. 18 18 */ 19 19 #include <linux/module.h> 20 20 #include <linux/file.h> ··· 84 84 return found; 85 85 } 86 86 87 + /* ima_read_write_check - reflect possible reading/writing errors in the PCR. 88 + * 89 + * When opening a file for read, if the file is already open for write, 90 + * the file could change, resulting in a file measurement error. 91 + * 92 + * Opening a file for write, if the file is already open for read, results 93 + * in a time of measure, time of use (ToMToU) error. 94 + * 95 + * In either case invalidate the PCR. 96 + */ 97 + enum iint_pcr_error { TOMTOU, OPEN_WRITERS }; 98 + static void ima_read_write_check(enum iint_pcr_error error, 99 + struct ima_iint_cache *iint, 100 + struct inode *inode, 101 + const unsigned char *filename) 102 + { 103 + switch (error) { 104 + case TOMTOU: 105 + if (iint->readcount > 0) 106 + ima_add_violation(inode, filename, "invalid_pcr", 107 + "ToMToU"); 108 + break; 109 + case OPEN_WRITERS: 110 + if (iint->writecount > 0) 111 + ima_add_violation(inode, filename, "invalid_pcr", 112 + "open_writers"); 113 + break; 114 + } 115 + } 116 + 87 117 /* 88 118 * Update the counts given an fmode_t 89 119 */ ··· 126 96 iint->readcount++; 127 97 if (mode & FMODE_WRITE) 128 98 iint->writecount++; 99 + } 100 + 101 + /* 102 + * ima_counts_get - increment file counts 103 + * 104 + * Maintain read/write counters for all files, but only 105 + * invalidate the PCR for measured files: 106 + * - Opening a file for write when already open for read, 107 + * results in a time of measure, time of use (ToMToU) error. 108 + * - Opening a file for read when already open for write, 109 + * could result in a file measurement error. 110 + * 111 + */ 112 + void ima_counts_get(struct file *file) 113 + { 114 + struct dentry *dentry = file->f_path.dentry; 115 + struct inode *inode = dentry->d_inode; 116 + fmode_t mode = file->f_mode; 117 + struct ima_iint_cache *iint; 118 + int rc; 119 + 120 + if (!ima_initialized || !S_ISREG(inode->i_mode)) 121 + return; 122 + iint = ima_iint_find_get(inode); 123 + if (!iint) 124 + return; 125 + mutex_lock(&iint->mutex); 126 + rc = ima_must_measure(iint, inode, MAY_READ, FILE_CHECK); 127 + if (rc < 0) 128 + goto out; 129 + 130 + if (mode & FMODE_WRITE) { 131 + ima_read_write_check(TOMTOU, iint, inode, dentry->d_name.name); 132 + goto out; 133 + } 134 + ima_read_write_check(OPEN_WRITERS, iint, inode, dentry->d_name.name); 135 + out: 136 + ima_inc_counts(iint, file->f_mode); 137 + mutex_unlock(&iint->mutex); 138 + 139 + kref_put(&iint->refcount, iint_free); 129 140 } 130 141 131 142 /* ··· 224 153 kref_put(&iint->refcount, iint_free); 225 154 } 226 155 227 - /* ima_read_write_check - reflect possible reading/writing errors in the PCR. 228 - * 229 - * When opening a file for read, if the file is already open for write, 230 - * the file could change, resulting in a file measurement error. 231 - * 232 - * Opening a file for write, if the file is already open for read, results 233 - * in a time of measure, time of use (ToMToU) error. 234 - * 235 - * In either case invalidate the PCR. 236 - */ 237 - enum iint_pcr_error { TOMTOU, OPEN_WRITERS }; 238 - static void ima_read_write_check(enum iint_pcr_error error, 239 - struct ima_iint_cache *iint, 240 - struct inode *inode, 241 - const unsigned char *filename) 242 - { 243 - switch (error) { 244 - case TOMTOU: 245 - if (iint->readcount > 0) 246 - ima_add_violation(inode, filename, "invalid_pcr", 247 - "ToMToU"); 248 - break; 249 - case OPEN_WRITERS: 250 - if (iint->writecount > 0) 251 - ima_add_violation(inode, filename, "invalid_pcr", 252 - "open_writers"); 253 - break; 254 - } 255 - } 256 - 257 - static int get_path_measurement(struct ima_iint_cache *iint, struct file *file, 258 - const unsigned char *filename) 259 - { 260 - int rc = 0; 261 - 262 - ima_inc_counts(iint, file->f_mode); 263 - 264 - rc = ima_collect_measurement(iint, file); 265 - if (!rc) 266 - ima_store_measurement(iint, file, filename); 267 - return rc; 268 - } 269 - 270 - /** 271 - * ima_path_check - based on policy, collect/store measurement. 272 - * @path: contains a pointer to the path to be measured 273 - * @mask: contains MAY_READ, MAY_WRITE or MAY_EXECUTE 274 - * 275 - * Measure the file being open for readonly, based on the 276 - * ima_must_measure() policy decision. 277 - * 278 - * Keep read/write counters for all files, but only 279 - * invalidate the PCR for measured files: 280 - * - Opening a file for write when already open for read, 281 - * results in a time of measure, time of use (ToMToU) error. 282 - * - Opening a file for read when already open for write, 283 - * could result in a file measurement error. 284 - * 285 - * Always return 0 and audit dentry_open failures. 286 - * (Return code will be based upon measurement appraisal.) 287 - */ 288 - int ima_path_check(struct path *path, int mask) 289 - { 290 - struct inode *inode = path->dentry->d_inode; 291 - struct ima_iint_cache *iint; 292 - struct file *file = NULL; 293 - int rc; 294 - 295 - if (!ima_initialized || !S_ISREG(inode->i_mode)) 296 - return 0; 297 - iint = ima_iint_find_get(inode); 298 - if (!iint) 299 - return 0; 300 - 301 - mutex_lock(&iint->mutex); 302 - 303 - rc = ima_must_measure(iint, inode, MAY_READ, PATH_CHECK); 304 - if (rc < 0) 305 - goto out; 306 - 307 - if ((mask & MAY_WRITE) || (mask == 0)) 308 - ima_read_write_check(TOMTOU, iint, inode, 309 - path->dentry->d_name.name); 310 - 311 - if ((mask & (MAY_WRITE | MAY_READ | MAY_EXEC)) != MAY_READ) 312 - goto out; 313 - 314 - ima_read_write_check(OPEN_WRITERS, iint, inode, 315 - path->dentry->d_name.name); 316 - if (!(iint->flags & IMA_MEASURED)) { 317 - struct dentry *dentry = dget(path->dentry); 318 - struct vfsmount *mnt = mntget(path->mnt); 319 - 320 - file = dentry_open(dentry, mnt, O_RDONLY | O_LARGEFILE, 321 - current_cred()); 322 - if (IS_ERR(file)) { 323 - int audit_info = 0; 324 - 325 - integrity_audit_msg(AUDIT_INTEGRITY_PCR, inode, 326 - dentry->d_name.name, 327 - "add_measurement", 328 - "dentry_open failed", 329 - 1, audit_info); 330 - file = NULL; 331 - goto out; 332 - } 333 - rc = get_path_measurement(iint, file, dentry->d_name.name); 334 - } 335 - out: 336 - mutex_unlock(&iint->mutex); 337 - if (file) 338 - fput(file); 339 - kref_put(&iint->refcount, iint_free); 340 - return 0; 341 - } 342 - EXPORT_SYMBOL_GPL(ima_path_check); 343 - 344 156 static int process_measurement(struct file *file, const unsigned char *filename, 345 157 int mask, int function) 346 158 { ··· 250 296 kref_put(&iint->refcount, iint_free); 251 297 return rc; 252 298 } 253 - 254 - /* 255 - * ima_counts_get - increment file counts 256 - * 257 - * - for IPC shm and shmat file. 258 - * - for nfsd exported files. 259 - * 260 - * Increment the counts for these files to prevent unnecessary 261 - * imbalance messages. 262 - */ 263 - void ima_counts_get(struct file *file) 264 - { 265 - struct inode *inode = file->f_dentry->d_inode; 266 - struct ima_iint_cache *iint; 267 - 268 - if (!ima_initialized || !S_ISREG(inode->i_mode)) 269 - return; 270 - iint = ima_iint_find_get(inode); 271 - if (!iint) 272 - return; 273 - mutex_lock(&iint->mutex); 274 - ima_inc_counts(iint, file->f_mode); 275 - mutex_unlock(&iint->mutex); 276 - 277 - kref_put(&iint->refcount, iint_free); 278 - } 279 - EXPORT_SYMBOL_GPL(ima_counts_get); 280 299 281 300 /** 282 301 * ima_file_mmap - based on policy, collect/store measurement. ··· 296 369 return 0; 297 370 } 298 371 372 + /** 373 + * ima_path_check - based on policy, collect/store measurement. 374 + * @file: pointer to the file to be measured 375 + * @mask: contains MAY_READ, MAY_WRITE or MAY_EXECUTE 376 + * 377 + * Measure files based on the ima_must_measure() policy decision. 378 + * 379 + * Always return 0 and audit dentry_open failures. 380 + * (Return code will be based upon measurement appraisal.) 381 + */ 382 + int ima_file_check(struct file *file, int mask) 383 + { 384 + int rc; 385 + 386 + rc = process_measurement(file, file->f_dentry->d_name.name, 387 + mask & (MAY_READ | MAY_WRITE | MAY_EXEC), 388 + FILE_CHECK); 389 + return 0; 390 + } 391 + EXPORT_SYMBOL_GPL(ima_file_check); 392 + 299 393 static int __init init_ima(void) 300 394 { 301 395 int error; 302 396 303 - ima_iintcache_init(); 304 397 error = ima_init(); 305 398 ima_initialized = 1; 306 399 return error;
+6 -3
security/integrity/ima/ima_policy.c
··· 67 67 .flags = IMA_FUNC | IMA_MASK}, 68 68 {.action = MEASURE,.func = BPRM_CHECK,.mask = MAY_EXEC, 69 69 .flags = IMA_FUNC | IMA_MASK}, 70 - {.action = MEASURE,.func = PATH_CHECK,.mask = MAY_READ,.uid = 0, 70 + {.action = MEASURE,.func = FILE_CHECK,.mask = MAY_READ,.uid = 0, 71 71 .flags = IMA_FUNC | IMA_MASK | IMA_UID}, 72 72 }; 73 73 ··· 282 282 break; 283 283 case Opt_func: 284 284 audit_log_format(ab, "func=%s ", args[0].from); 285 - if (strcmp(args[0].from, "PATH_CHECK") == 0) 286 - entry->func = PATH_CHECK; 285 + if (strcmp(args[0].from, "FILE_CHECK") == 0) 286 + entry->func = FILE_CHECK; 287 + /* PATH_CHECK is for backwards compat */ 288 + else if (strcmp(args[0].from, "PATH_CHECK") == 0) 289 + entry->func = FILE_CHECK; 287 290 else if (strcmp(args[0].from, "FILE_MMAP") == 0) 288 291 entry->func = FILE_MMAP; 289 292 else if (strcmp(args[0].from, "BPRM_CHECK") == 0)
-2
security/security.c
··· 666 666 void security_file_free(struct file *file) 667 667 { 668 668 security_ops->file_free_security(file); 669 - if (file->f_dentry) 670 - ima_file_free(file); 671 669 } 672 670 673 671 int security_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+2 -13
sound/pci/ctxfi/ctatc.c
··· 166 166 167 167 static unsigned long atc_get_ptp_phys(struct ct_atc *atc, int index) 168 168 { 169 - struct ct_vm *vm; 170 - void *kvirt_addr; 171 - unsigned long phys_addr; 172 - 173 - vm = atc->vm; 174 - kvirt_addr = vm->get_ptp_virt(vm, index); 175 - if (kvirt_addr == NULL) 176 - phys_addr = (~0UL); 177 - else 178 - phys_addr = virt_to_phys(kvirt_addr); 179 - 180 - return phys_addr; 169 + return atc->vm->get_ptp_phys(atc->vm, index); 181 170 } 182 171 183 172 static unsigned int convert_format(snd_pcm_format_t snd_format) ··· 1658 1669 } 1659 1670 1660 1671 /* Set up device virtual memory management object */ 1661 - err = ct_vm_create(&atc->vm); 1672 + err = ct_vm_create(&atc->vm, pci); 1662 1673 if (err < 0) 1663 1674 goto error1; 1664 1675
+18 -20
sound/pci/ctxfi/ctvmem.c
··· 138 138 return NULL; 139 139 } 140 140 141 - ptp = vm->ptp[0]; 141 + ptp = (unsigned long *)vm->ptp[0].area; 142 142 pte_start = (block->addr >> CT_PAGE_SHIFT); 143 143 pages = block->size >> CT_PAGE_SHIFT; 144 144 for (i = 0; i < pages; i++) { ··· 158 158 } 159 159 160 160 /* * 161 - * return the host (kmalloced) addr of the @index-th device 162 - * page talbe page on success, or NULL on failure. 163 - * The first returned NULL indicates the termination. 161 + * return the host physical addr of the @index-th device 162 + * page table page on success, or ~0UL on failure. 163 + * The first returned ~0UL indicates the termination. 164 164 * */ 165 - static void * 166 - ct_get_ptp_virt(struct ct_vm *vm, int index) 165 + static dma_addr_t 166 + ct_get_ptp_phys(struct ct_vm *vm, int index) 167 167 { 168 - void *addr; 168 + dma_addr_t addr; 169 169 170 - addr = (index >= CT_PTP_NUM) ? NULL : vm->ptp[index]; 170 + addr = (index >= CT_PTP_NUM) ? ~0UL : vm->ptp[index].addr; 171 171 172 172 return addr; 173 173 } 174 174 175 - int ct_vm_create(struct ct_vm **rvm) 175 + int ct_vm_create(struct ct_vm **rvm, struct pci_dev *pci) 176 176 { 177 177 struct ct_vm *vm; 178 178 struct ct_vm_block *block; 179 - int i; 179 + int i, err = 0; 180 180 181 181 *rvm = NULL; 182 182 ··· 188 188 189 189 /* Allocate page table pages */ 190 190 for (i = 0; i < CT_PTP_NUM; i++) { 191 - vm->ptp[i] = kmalloc(PAGE_SIZE, GFP_KERNEL); 192 - if (!vm->ptp[i]) 191 + err = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, 192 + snd_dma_pci_data(pci), 193 + PAGE_SIZE, &vm->ptp[i]); 194 + if (err < 0) 193 195 break; 194 196 } 195 - if (!i) { 197 + if (err < 0) { 196 198 /* no page table pages are allocated */ 197 - kfree(vm); 199 + ct_vm_destroy(vm); 198 200 return -ENOMEM; 199 201 } 200 202 vm->size = CT_ADDRS_PER_PAGE * i; 201 - /* Initialise remaining ptps */ 202 - for (; i < CT_PTP_NUM; i++) 203 - vm->ptp[i] = NULL; 204 - 205 203 vm->map = ct_vm_map; 206 204 vm->unmap = ct_vm_unmap; 207 - vm->get_ptp_virt = ct_get_ptp_virt; 205 + vm->get_ptp_phys = ct_get_ptp_phys; 208 206 INIT_LIST_HEAD(&vm->unused); 209 207 INIT_LIST_HEAD(&vm->used); 210 208 block = kzalloc(sizeof(*block), GFP_KERNEL); ··· 240 242 241 243 /* free allocated page table pages */ 242 244 for (i = 0; i < CT_PTP_NUM; i++) 243 - kfree(vm->ptp[i]); 245 + snd_dma_free_pages(&vm->ptp[i]); 244 246 245 247 vm->size = 0; 246 248
+5 -3
sound/pci/ctxfi/ctvmem.h
··· 22 22 23 23 #include <linux/mutex.h> 24 24 #include <linux/list.h> 25 + #include <linux/pci.h> 26 + #include <sound/memalloc.h> 25 27 26 28 /* The chip can handle the page table of 4k pages 27 29 * (emu20k1 can handle even 8k pages, but we don't use it right now) ··· 43 41 44 42 /* Virtual memory management object for card device */ 45 43 struct ct_vm { 46 - void *ptp[CT_PTP_NUM]; /* Device page table pages */ 44 + struct snd_dma_buffer ptp[CT_PTP_NUM]; /* Device page table pages */ 47 45 unsigned int size; /* Available addr space in bytes */ 48 46 struct list_head unused; /* List of unused blocks */ 49 47 struct list_head used; /* List of used blocks */ ··· 54 52 int size); 55 53 /* Unmap device logical addr area. */ 56 54 void (*unmap)(struct ct_vm *, struct ct_vm_block *block); 57 - void *(*get_ptp_virt)(struct ct_vm *vm, int index); 55 + dma_addr_t (*get_ptp_phys)(struct ct_vm *vm, int index); 58 56 }; 59 57 60 - int ct_vm_create(struct ct_vm **rvm); 58 + int ct_vm_create(struct ct_vm **rvm, struct pci_dev *pci); 61 59 void ct_vm_destroy(struct ct_vm *vm); 62 60 63 61 #endif /* CTVMEM_H */
+18 -3
sound/pci/hda/hda_intel.c
··· 426 426 427 427 /* flags */ 428 428 int position_fix; 429 + int poll_count; 429 430 unsigned int running :1; 430 431 unsigned int initialized :1; 431 432 unsigned int single_cmd :1; ··· 507 506 #define get_azx_dev(substream) (substream->runtime->private_data) 508 507 509 508 static int azx_acquire_irq(struct azx *chip, int do_disconnect); 510 - 509 + static int azx_send_cmd(struct hda_bus *bus, unsigned int val); 511 510 /* 512 511 * Interface for HD codec 513 512 */ ··· 665 664 { 666 665 struct azx *chip = bus->private_data; 667 666 unsigned long timeout; 667 + int do_poll = 0; 668 668 669 669 again: 670 670 timeout = jiffies + msecs_to_jiffies(1000); 671 671 for (;;) { 672 - if (chip->polling_mode) { 672 + if (chip->polling_mode || do_poll) { 673 673 spin_lock_irq(&chip->reg_lock); 674 674 azx_update_rirb(chip); 675 675 spin_unlock_irq(&chip->reg_lock); ··· 678 676 if (!chip->rirb.cmds[addr]) { 679 677 smp_rmb(); 680 678 bus->rirb_error = 0; 679 + 680 + if (!do_poll) 681 + chip->poll_count = 0; 681 682 return chip->rirb.res[addr]; /* the last value */ 682 683 } 683 684 if (time_after(jiffies, timeout)) ··· 692 687 cond_resched(); 693 688 } 694 689 } 690 + 691 + if (!chip->polling_mode && chip->poll_count < 2) { 692 + snd_printdd(SFX "azx_get_response timeout, " 693 + "polling the codec once: last cmd=0x%08x\n", 694 + chip->last_cmd[addr]); 695 + do_poll = 1; 696 + chip->poll_count++; 697 + goto again; 698 + } 699 + 695 700 696 701 if (!chip->polling_mode) { 697 702 snd_printk(KERN_WARNING SFX "azx_get_response timeout, " ··· 2058 2043 { 2059 2044 if (request_irq(chip->pci->irq, azx_interrupt, 2060 2045 chip->msi ? 0 : IRQF_SHARED, 2061 - "HDA Intel", chip)) { 2046 + "hda_intel", chip)) { 2062 2047 printk(KERN_ERR "hda-intel: unable to grab IRQ %d, " 2063 2048 "disabling device\n", chip->pci->irq); 2064 2049 if (do_disconnect)
+7 -5
sound/pci/ice1712/aureon.c
··· 703 703 { 704 704 unsigned char nvol; 705 705 706 - if ((master & WM_VOL_MUTE) || (vol & WM_VOL_MUTE)) 706 + if ((master & WM_VOL_MUTE) || (vol & WM_VOL_MUTE)) { 707 707 nvol = 0; 708 - else 708 + } else { 709 709 nvol = ((vol % WM_VOL_CNT) * (master % WM_VOL_CNT)) / 710 710 WM_VOL_MAX; 711 + nvol += 0x1b; 712 + } 711 713 712 714 wm_put(ice, index, nvol); 713 715 wm_put_nocache(ice, index, 0x180 | nvol); ··· 780 778 for (ch = 0; ch < 2; ch++) { 781 779 unsigned int vol = ucontrol->value.integer.value[ch]; 782 780 if (vol > WM_VOL_MAX) 783 - continue; 781 + vol = WM_VOL_MAX; 784 782 vol |= spec->master[ch] & WM_VOL_MUTE; 785 783 if (vol != spec->master[ch]) { 786 784 int dac; ··· 836 834 for (i = 0; i < voices; i++) { 837 835 unsigned int vol = ucontrol->value.integer.value[i]; 838 836 if (vol > WM_VOL_MAX) 839 - continue; 840 - vol |= spec->vol[ofs+i]; 837 + vol = WM_VOL_MAX; 838 + vol |= spec->vol[ofs+i] & WM_VOL_MUTE; 841 839 if (vol != spec->vol[ofs+i]) { 842 840 spec->vol[ofs+i] = vol; 843 841 idx = WM_DAC_ATTEN + ofs + i;
+1
sound/soc/omap/omap3pandora.c
··· 145 145 }; 146 146 147 147 static const struct snd_soc_dapm_route omap3pandora_out_map[] = { 148 + {"PCM DAC", NULL, "APLL Enable"}, 148 149 {"Headphone Amplifier", NULL, "PCM DAC"}, 149 150 {"Line Out", NULL, "PCM DAC"}, 150 151 {"Headphone Jack", NULL, "Headphone Amplifier"},