Auto-update from upstream

Len Brown 5b2db367 d3e4cefc

+672 -1215
+2
arch/arm/kernel/module.c
··· 101 101 break; 102 102 103 103 case R_ARM_PC24: 104 + case R_ARM_CALL: 105 + case R_ARM_JUMP24: 104 106 offset = (*(u32 *)loc & 0x00ffffff) << 2; 105 107 if (offset & 0x02000000) 106 108 offset -= 0x04000000;
+5 -4
arch/arm/mach-pxa/pm.c
··· 155 155 PSPR = 0; 156 156 157 157 /* restore registers */ 158 + RESTORE_GPLEVEL(0); RESTORE_GPLEVEL(1); RESTORE_GPLEVEL(2); 159 + RESTORE(GPDR0); RESTORE(GPDR1); RESTORE(GPDR2); 158 160 RESTORE(GAFR0_L); RESTORE(GAFR0_U); 159 161 RESTORE(GAFR1_L); RESTORE(GAFR1_U); 160 162 RESTORE(GAFR2_L); RESTORE(GAFR2_U); 161 - RESTORE_GPLEVEL(0); RESTORE_GPLEVEL(1); RESTORE_GPLEVEL(2); 162 - RESTORE(GPDR0); RESTORE(GPDR1); RESTORE(GPDR2); 163 163 RESTORE(GRER0); RESTORE(GRER1); RESTORE(GRER2); 164 164 RESTORE(GFER0); RESTORE(GFER1); RESTORE(GFER2); 165 165 RESTORE(PGSR0); RESTORE(PGSR1); RESTORE(PGSR2); 166 166 167 167 #ifdef CONFIG_PXA27x 168 168 RESTORE(MDREFR); 169 - RESTORE(GAFR3_L); RESTORE(GAFR3_U); RESTORE_GPLEVEL(3); 170 - RESTORE(GPDR3); RESTORE(GRER3); RESTORE(GFER3); RESTORE(PGSR3); 169 + RESTORE_GPLEVEL(3); RESTORE(GPDR3); 170 + RESTORE(GAFR3_L); RESTORE(GAFR3_U); 171 + RESTORE(GRER3); RESTORE(GFER3); RESTORE(PGSR3); 171 172 RESTORE(PWER); RESTORE(PCFR); RESTORE(PRER); 172 173 RESTORE(PFER); RESTORE(PKWR); 173 174 #endif
+2
arch/ia64/kernel/process.c
··· 721 721 /* drop floating-point and debug-register state if it exists: */ 722 722 current->thread.flags &= ~(IA64_THREAD_FPH_VALID | IA64_THREAD_DBG_VALID); 723 723 ia64_drop_fpu(current); 724 + #ifdef CONFIG_IA32_SUPPORT 724 725 if (IS_IA32_PROCESS(ia64_task_regs(current))) { 725 726 ia32_drop_partial_page_list(current); 726 727 current->thread.task_size = IA32_PAGE_OFFSET; 727 728 set_fs(USER_DS); 728 729 } 730 + #endif 729 731 } 730 732 731 733 /*
+1 -9
drivers/ide/Kconfig
··· 807 807 depends on SOC_AU1200 && BLK_DEV_IDE_AU1XXX 808 808 endchoice 809 809 810 - config BLK_DEV_IDE_AU1XXX_BURSTABLE_ON 811 - bool "Enable burstable Mode on DbDMA" 812 - default false 813 - depends BLK_DEV_IDE_AU1XXX 814 - help 815 - This option enable the burstable Flag on DbDMA controller 816 - (cf. "AMD Alchemy 'Au1200' Processor Data Book - PRELIMINARY"). 817 - 818 810 config BLK_DEV_IDE_AU1XXX_SEQTS_PER_RQ 819 811 int "Maximum transfer size (KB) per request (up to 128)" 820 812 default "128" ··· 932 940 933 941 config BLK_DEV_MPC8xx_IDE 934 942 bool "MPC8xx IDE support" 935 - depends on 8xx 943 + depends on 8xx && IDE=y && BLK_DEV_IDE=y 936 944 help 937 945 This option provides support for IDE on Motorola MPC8xx Systems. 938 946 Please see 'Type of MPC8xx IDE interface' for details.
-7
drivers/ide/ide-cd.c
··· 1292 1292 struct cdrom_info *info = drive->driver_data; 1293 1293 1294 1294 info->dma = 0; 1295 - info->cmd = 0; 1296 1295 info->start_seek = jiffies; 1297 1296 return cdrom_start_packet_command(drive, 0, cdrom_start_seek_continuation); 1298 1297 } ··· 1342 1343 if ((rq->sector & (sectors_per_frame - 1)) || 1343 1344 (rq->nr_sectors & (sectors_per_frame - 1))) 1344 1345 info->dma = 0; 1345 - 1346 - info->cmd = READ; 1347 1346 1348 1347 /* Start sending the read request to the drive. */ 1349 1348 return cdrom_start_packet_command(drive, 32768, cdrom_start_read_continuation); ··· 1481 1484 struct cdrom_info *info = drive->driver_data; 1482 1485 1483 1486 info->dma = 0; 1484 - info->cmd = 0; 1485 1487 rq->flags &= ~REQ_FAILED; 1486 1488 len = rq->data_len; 1487 1489 ··· 1887 1891 /* use dma, if possible. we don't need to check more, since we 1888 1892 * know that the transfer is always (at least!) frame aligned */ 1889 1893 info->dma = drive->using_dma ? 1 : 0; 1890 - info->cmd = WRITE; 1891 1894 1892 1895 info->devinfo.media_written = 1; 1893 1896 ··· 1911 1916 rq->flags |= REQ_QUIET; 1912 1917 1913 1918 info->dma = 0; 1914 - info->cmd = 0; 1915 1919 1916 1920 /* 1917 1921 * sg request ··· 1919 1925 int mask = drive->queue->dma_alignment; 1920 1926 unsigned long addr = (unsigned long) page_address(bio_page(rq->bio)); 1921 1927 1922 - info->cmd = rq_data_dir(rq); 1923 1928 info->dma = drive->using_dma; 1924 1929 1925 1930 /*
-1
drivers/ide/ide-cd.h
··· 480 480 481 481 struct request request_sense_request; 482 482 int dma; 483 - int cmd; 484 483 unsigned long last_block; 485 484 unsigned long start_seek; 486 485 /* Buffer to hold mechanism status and changer slot table. */
+2 -2
drivers/ide/ide-disk.c
··· 1034 1034 struct ide_disk_obj *idkp = drive->driver_data; 1035 1035 struct gendisk *g = idkp->disk; 1036 1036 1037 - ide_cacheflush_p(drive); 1038 - 1039 1037 ide_unregister_subdriver(drive, idkp->driver); 1040 1038 1041 1039 del_gendisk(g); 1040 + 1041 + ide_cacheflush_p(drive); 1042 1042 1043 1043 ide_disk_put(idkp); 1044 1044
+6 -9
drivers/ide/ide-dma.c
··· 90 90 #include <asm/io.h> 91 91 #include <asm/irq.h> 92 92 93 - struct drive_list_entry { 94 - const char *id_model; 95 - const char *id_firmware; 96 - }; 97 - 98 93 static const struct drive_list_entry drive_whitelist [] = { 99 94 100 95 { "Micropolis 2112A" , "ALL" }, ··· 134 139 }; 135 140 136 141 /** 137 - * in_drive_list - look for drive in black/white list 142 + * ide_in_drive_list - look for drive in black/white list 138 143 * @id: drive identifier 139 144 * @drive_table: list to inspect 140 145 * ··· 142 147 * Returns 1 if the drive is found in the table. 143 148 */ 144 149 145 - static int in_drive_list(struct hd_driveid *id, const struct drive_list_entry *drive_table) 150 + int ide_in_drive_list(struct hd_driveid *id, const struct drive_list_entry *drive_table) 146 151 { 147 152 for ( ; drive_table->id_model ; drive_table++) 148 153 if ((!strcmp(drive_table->id_model, id->model)) && ··· 151 156 return 1; 152 157 return 0; 153 158 } 159 + 160 + EXPORT_SYMBOL_GPL(ide_in_drive_list); 154 161 155 162 /** 156 163 * ide_dma_intr - IDE DMA interrupt handler ··· 660 663 { 661 664 struct hd_driveid *id = drive->id; 662 665 663 - int blacklist = in_drive_list(id, drive_blacklist); 666 + int blacklist = ide_in_drive_list(id, drive_blacklist); 664 667 if (blacklist) { 665 668 printk(KERN_WARNING "%s: Disabling (U)DMA for %s (blacklisted)\n", 666 669 drive->name, id->model); ··· 674 677 int __ide_dma_good_drive (ide_drive_t *drive) 675 678 { 676 679 struct hd_driveid *id = drive->id; 677 - return in_drive_list(id, drive_whitelist); 680 + return ide_in_drive_list(id, drive_whitelist); 678 681 } 679 682 680 683 EXPORT_SYMBOL(__ide_dma_good_drive);
+3
drivers/ide/mips/Makefile
··· 1 1 obj-$(CONFIG_BLK_DEV_IDE_SWARM) += swarm.o 2 + obj-$(CONFIG_BLK_DEV_IDE_AU1XXX) += au1xxx-ide.o 3 + 4 + EXTRA_CFLAGS := -Idrivers/ide
+501 -935
drivers/ide/mips/au1xxx-ide.c
··· 31 31 */ 32 32 #undef REALLY_SLOW_IO /* most systems can safely undef this */ 33 33 34 - #include <linux/config.h> /* for CONFIG_BLK_DEV_IDEPCI */ 35 34 #include <linux/types.h> 36 35 #include <linux/module.h> 37 36 #include <linux/kernel.h> 38 37 #include <linux/delay.h> 39 - #include <linux/timer.h> 40 - #include <linux/mm.h> 41 - #include <linux/ioport.h> 42 - #include <linux/hdreg.h> 38 + #include <linux/platform_device.h> 39 + 43 40 #include <linux/init.h> 44 41 #include <linux/ide.h> 45 42 #include <linux/sysdev.h> 46 43 47 44 #include <linux/dma-mapping.h> 48 45 46 + #include "ide-timing.h" 47 + 49 48 #include <asm/io.h> 50 49 #include <asm/mach-au1x00/au1xxx.h> 51 50 #include <asm/mach-au1x00/au1xxx_dbdma.h> 52 - 53 - #if CONFIG_PM 54 - #include <asm/mach-au1x00/au1xxx_pm.h> 55 - #endif 56 51 57 52 #include <asm/mach-au1x00/au1xxx_ide.h> 58 53 59 54 #define DRV_NAME "au1200-ide" 60 55 #define DRV_VERSION "1.0" 61 - #define DRV_AUTHOR "AMD PCS / Pete Popov <ppopov@embeddedalley.com>" 62 - #define DRV_DESC "Au1200 IDE" 56 + #define DRV_AUTHOR "Enrico Walther <enrico.walther@amd.com> / Pete Popov <ppopov@embeddedalley.com>" 57 + 58 + /* enable the burstmode in the dbdma */ 59 + #define IDE_AU1XXX_BURSTMODE 1 63 60 64 61 static _auide_hwif auide_hwif; 65 - static spinlock_t ide_tune_drive_spin_lock = SPIN_LOCK_UNLOCKED; 66 - static spinlock_t ide_tune_chipset_spin_lock = SPIN_LOCK_UNLOCKED; 67 - static int dbdma_init_done = 0; 62 + static int dbdma_init_done; 68 63 69 - /* 70 - * local I/O functions 71 - */ 72 - u8 auide_inb(unsigned long port) 73 - { 74 - return (au_readb(port)); 75 - } 76 - 77 - u16 auide_inw(unsigned long port) 78 - { 79 - return (au_readw(port)); 80 - } 81 - 82 - u32 auide_inl(unsigned long port) 83 - { 84 - return (au_readl(port)); 85 - } 64 + #if defined(CONFIG_BLK_DEV_IDE_AU1XXX_PIO_DBDMA) 86 65 87 66 void auide_insw(unsigned long port, void *addr, u32 count) 88 67 { 89 - #if defined(CONFIG_BLK_DEV_IDE_AU1XXX_PIO_DBDMA) 68 + _auide_hwif *ahwif = &auide_hwif; 69 + chan_tab_t *ctp; 70 + au1x_ddma_desc_t *dp; 90 71 91 - _auide_hwif *ahwif = &auide_hwif; 92 - chan_tab_t *ctp; 93 - au1x_ddma_desc_t *dp; 94 - 95 - if(!put_dest_flags(ahwif->rx_chan, (void*)addr, count << 1, 96 - DDMA_FLAGS_NOIE)) { 97 - printk(KERN_ERR "%s failed %d\n", __FUNCTION__, __LINE__); 98 - return; 99 - } 100 - ctp = *((chan_tab_t **)ahwif->rx_chan); 101 - dp = ctp->cur_ptr; 102 - while (dp->dscr_cmd0 & DSCR_CMD0_V) 103 - ; 104 - ctp->cur_ptr = au1xxx_ddma_get_nextptr_virt(dp); 105 - #else 106 - while (count--) 107 - { 108 - *(u16 *)addr = au_readw(port); 109 - addr +=2 ; 110 - } 111 - #endif 112 - } 113 - 114 - void auide_insl(unsigned long port, void *addr, u32 count) 115 - { 116 - while (count--) 117 - { 118 - *(u32 *)addr = au_readl(port); 119 - /* NOTE: For IDE interfaces over PCMCIA, 120 - * 32-bit access does not work 121 - */ 122 - addr += 4; 123 - } 124 - } 125 - 126 - void auide_outb(u8 addr, unsigned long port) 127 - { 128 - return (au_writeb(addr, port)); 129 - } 130 - 131 - void auide_outbsync(ide_drive_t *drive, u8 addr, unsigned long port) 132 - { 133 - return (au_writeb(addr, port)); 134 - } 135 - 136 - void auide_outw(u16 addr, unsigned long port) 137 - { 138 - return (au_writew(addr, port)); 139 - } 140 - 141 - void auide_outl(u32 addr, unsigned long port) 142 - { 143 - return (au_writel(addr, port)); 72 + if(!put_dest_flags(ahwif->rx_chan, (void*)addr, count << 1, 73 + DDMA_FLAGS_NOIE)) { 74 + printk(KERN_ERR "%s failed %d\n", __FUNCTION__, __LINE__); 75 + return; 76 + } 77 + ctp = *((chan_tab_t **)ahwif->rx_chan); 78 + dp = ctp->cur_ptr; 79 + while (dp->dscr_cmd0 & DSCR_CMD0_V) 80 + ; 81 + ctp->cur_ptr = au1xxx_ddma_get_nextptr_virt(dp); 144 82 } 145 83 146 84 void auide_outsw(unsigned long port, void *addr, u32 count) 147 85 { 148 - #if defined(CONFIG_BLK_DEV_IDE_AU1XXX_PIO_DBDMA) 149 - _auide_hwif *ahwif = &auide_hwif; 150 - chan_tab_t *ctp; 151 - au1x_ddma_desc_t *dp; 86 + _auide_hwif *ahwif = &auide_hwif; 87 + chan_tab_t *ctp; 88 + au1x_ddma_desc_t *dp; 152 89 153 - if(!put_source_flags(ahwif->tx_chan, (void*)addr, 154 - count << 1, DDMA_FLAGS_NOIE)) { 155 - printk(KERN_ERR "%s failed %d\n", __FUNCTION__, __LINE__); 156 - return; 157 - } 158 - ctp = *((chan_tab_t **)ahwif->tx_chan); 159 - dp = ctp->cur_ptr; 160 - while (dp->dscr_cmd0 & DSCR_CMD0_V) 161 - ; 162 - ctp->cur_ptr = au1xxx_ddma_get_nextptr_virt(dp); 163 - #else 164 - while (count--) 165 - { 166 - au_writew(*(u16 *)addr, port); 167 - addr += 2; 168 - } 90 + if(!put_source_flags(ahwif->tx_chan, (void*)addr, 91 + count << 1, DDMA_FLAGS_NOIE)) { 92 + printk(KERN_ERR "%s failed %d\n", __FUNCTION__, __LINE__); 93 + return; 94 + } 95 + ctp = *((chan_tab_t **)ahwif->tx_chan); 96 + dp = ctp->cur_ptr; 97 + while (dp->dscr_cmd0 & DSCR_CMD0_V) 98 + ; 99 + ctp->cur_ptr = au1xxx_ddma_get_nextptr_virt(dp); 100 + } 101 + 169 102 #endif 170 - } 171 - 172 - void auide_outsl(unsigned long port, void *addr, u32 count) 173 - { 174 - while (count--) 175 - { 176 - au_writel(*(u32 *)addr, port); 177 - /* NOTE: For IDE interfaces over PCMCIA, 178 - * 32-bit access does not work 179 - */ 180 - addr += 4; 181 - } 182 - } 183 103 184 104 static void auide_tune_drive(ide_drive_t *drive, byte pio) 185 105 { 186 - int mem_sttime; 187 - int mem_stcfg; 188 - unsigned long flags; 189 - u8 speed; 106 + int mem_sttime; 107 + int mem_stcfg; 108 + u8 speed; 190 109 191 - /* get the best pio mode for the drive */ 192 - pio = ide_get_best_pio_mode(drive, pio, 4, NULL); 110 + /* get the best pio mode for the drive */ 111 + pio = ide_get_best_pio_mode(drive, pio, 4, NULL); 193 112 194 - printk("%s: setting Au1XXX IDE to PIO mode%d\n", 195 - drive->name, pio); 113 + printk(KERN_INFO "%s: setting Au1XXX IDE to PIO mode%d\n", 114 + drive->name, pio); 196 115 197 - spin_lock_irqsave(&ide_tune_drive_spin_lock, flags); 116 + mem_sttime = 0; 117 + mem_stcfg = au_readl(MEM_STCFG2); 198 118 199 - mem_sttime = 0; 200 - mem_stcfg = au_readl(MEM_STCFG2); 119 + /* set pio mode! */ 120 + switch(pio) { 121 + case 0: 122 + mem_sttime = SBC_IDE_TIMING(PIO0); 201 123 202 - /* set pio mode! */ 203 - switch(pio) { 204 - case 0: 205 - /* set timing parameters for RCS2# */ 206 - mem_sttime = SBC_IDE_PIO0_TWCS 207 - | SBC_IDE_PIO0_TCSH 208 - | SBC_IDE_PIO0_TCSOFF 209 - | SBC_IDE_PIO0_TWP 210 - | SBC_IDE_PIO0_TCSW 211 - | SBC_IDE_PIO0_TPM 212 - | SBC_IDE_PIO0_TA; 213 - /* set configuration for RCS2# */ 214 - mem_stcfg |= TS_MASK; 215 - mem_stcfg &= ~TCSOE_MASK; 216 - mem_stcfg &= ~TOECS_MASK; 217 - mem_stcfg |= SBC_IDE_PIO0_TCSOE | SBC_IDE_PIO0_TOECS; 124 + /* set configuration for RCS2# */ 125 + mem_stcfg |= TS_MASK; 126 + mem_stcfg &= ~TCSOE_MASK; 127 + mem_stcfg &= ~TOECS_MASK; 128 + mem_stcfg |= SBC_IDE_PIO0_TCSOE | SBC_IDE_PIO0_TOECS; 129 + break; 218 130 219 - au_writel(mem_sttime,MEM_STTIME2); 220 - au_writel(mem_stcfg,MEM_STCFG2); 221 - break; 131 + case 1: 132 + mem_sttime = SBC_IDE_TIMING(PIO1); 222 133 223 - case 1: 224 - /* set timing parameters for RCS2# */ 225 - mem_sttime = SBC_IDE_PIO1_TWCS 226 - | SBC_IDE_PIO1_TCSH 227 - | SBC_IDE_PIO1_TCSOFF 228 - | SBC_IDE_PIO1_TWP 229 - | SBC_IDE_PIO1_TCSW 230 - | SBC_IDE_PIO1_TPM 231 - | SBC_IDE_PIO1_TA; 232 - /* set configuration for RCS2# */ 233 - mem_stcfg |= TS_MASK; 234 - mem_stcfg &= ~TCSOE_MASK; 235 - mem_stcfg &= ~TOECS_MASK; 236 - mem_stcfg |= SBC_IDE_PIO1_TCSOE | SBC_IDE_PIO1_TOECS; 237 - break; 134 + /* set configuration for RCS2# */ 135 + mem_stcfg |= TS_MASK; 136 + mem_stcfg &= ~TCSOE_MASK; 137 + mem_stcfg &= ~TOECS_MASK; 138 + mem_stcfg |= SBC_IDE_PIO1_TCSOE | SBC_IDE_PIO1_TOECS; 139 + break; 238 140 239 - case 2: 240 - /* set timing parameters for RCS2# */ 241 - mem_sttime = SBC_IDE_PIO2_TWCS 242 - | SBC_IDE_PIO2_TCSH 243 - | SBC_IDE_PIO2_TCSOFF 244 - | SBC_IDE_PIO2_TWP 245 - | SBC_IDE_PIO2_TCSW 246 - | SBC_IDE_PIO2_TPM 247 - | SBC_IDE_PIO2_TA; 248 - /* set configuration for RCS2# */ 249 - mem_stcfg &= ~TS_MASK; 250 - mem_stcfg &= ~TCSOE_MASK; 251 - mem_stcfg &= ~TOECS_MASK; 252 - mem_stcfg |= SBC_IDE_PIO2_TCSOE | SBC_IDE_PIO2_TOECS; 253 - break; 141 + case 2: 142 + mem_sttime = SBC_IDE_TIMING(PIO2); 254 143 255 - case 3: 256 - /* set timing parameters for RCS2# */ 257 - mem_sttime = SBC_IDE_PIO3_TWCS 258 - | SBC_IDE_PIO3_TCSH 259 - | SBC_IDE_PIO3_TCSOFF 260 - | SBC_IDE_PIO3_TWP 261 - | SBC_IDE_PIO3_TCSW 262 - | SBC_IDE_PIO3_TPM 263 - | SBC_IDE_PIO3_TA; 264 - /* set configuration for RCS2# */ 265 - mem_stcfg |= TS_MASK; 266 - mem_stcfg &= ~TS_MASK; 267 - mem_stcfg &= ~TCSOE_MASK; 268 - mem_stcfg &= ~TOECS_MASK; 269 - mem_stcfg |= SBC_IDE_PIO3_TCSOE | SBC_IDE_PIO3_TOECS; 144 + /* set configuration for RCS2# */ 145 + mem_stcfg &= ~TS_MASK; 146 + mem_stcfg &= ~TCSOE_MASK; 147 + mem_stcfg &= ~TOECS_MASK; 148 + mem_stcfg |= SBC_IDE_PIO2_TCSOE | SBC_IDE_PIO2_TOECS; 149 + break; 270 150 271 - break; 151 + case 3: 152 + mem_sttime = SBC_IDE_TIMING(PIO3); 272 153 273 - case 4: 274 - /* set timing parameters for RCS2# */ 275 - mem_sttime = SBC_IDE_PIO4_TWCS 276 - | SBC_IDE_PIO4_TCSH 277 - | SBC_IDE_PIO4_TCSOFF 278 - | SBC_IDE_PIO4_TWP 279 - | SBC_IDE_PIO4_TCSW 280 - | SBC_IDE_PIO4_TPM 281 - | SBC_IDE_PIO4_TA; 282 - /* set configuration for RCS2# */ 283 - mem_stcfg &= ~TS_MASK; 284 - mem_stcfg &= ~TCSOE_MASK; 285 - mem_stcfg &= ~TOECS_MASK; 286 - mem_stcfg |= SBC_IDE_PIO4_TCSOE | SBC_IDE_PIO4_TOECS; 287 - break; 288 - } 154 + /* set configuration for RCS2# */ 155 + mem_stcfg &= ~TS_MASK; 156 + mem_stcfg &= ~TCSOE_MASK; 157 + mem_stcfg &= ~TOECS_MASK; 158 + mem_stcfg |= SBC_IDE_PIO3_TCSOE | SBC_IDE_PIO3_TOECS; 289 159 290 - au_writel(mem_sttime,MEM_STTIME2); 291 - au_writel(mem_stcfg,MEM_STCFG2); 160 + break; 292 161 293 - spin_unlock_irqrestore(&ide_tune_drive_spin_lock, flags); 162 + case 4: 163 + mem_sttime = SBC_IDE_TIMING(PIO4); 294 164 295 - speed = pio + XFER_PIO_0; 296 - ide_config_drive_speed(drive, speed); 165 + /* set configuration for RCS2# */ 166 + mem_stcfg &= ~TS_MASK; 167 + mem_stcfg &= ~TCSOE_MASK; 168 + mem_stcfg &= ~TOECS_MASK; 169 + mem_stcfg |= SBC_IDE_PIO4_TCSOE | SBC_IDE_PIO4_TOECS; 170 + break; 171 + } 172 + 173 + au_writel(mem_sttime,MEM_STTIME2); 174 + au_writel(mem_stcfg,MEM_STCFG2); 175 + 176 + speed = pio + XFER_PIO_0; 177 + ide_config_drive_speed(drive, speed); 297 178 } 298 179 299 180 static int auide_tune_chipset (ide_drive_t *drive, u8 speed) 300 181 { 301 - u8 mode = 0; 302 - int mem_sttime; 303 - int mem_stcfg; 304 - unsigned long flags; 305 - #ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA 306 - struct hd_driveid *id = drive->id; 182 + int mem_sttime; 183 + int mem_stcfg; 184 + unsigned long mode; 307 185 308 - /* 309 - * Now see what the current drive is capable of, 310 - * selecting UDMA only if the mate said it was ok. 311 - */ 312 - if (id && (id->capability & 1) && drive->autodma && 313 - !__ide_dma_bad_drive(drive)) { 314 - if (!mode && (id->field_valid & 2) && (id->dma_mword & 7)) { 315 - if (id->dma_mword & 4) 316 - mode = XFER_MW_DMA_2; 317 - else if (id->dma_mword & 2) 318 - mode = XFER_MW_DMA_1; 319 - else if (id->dma_mword & 1) 320 - mode = XFER_MW_DMA_0; 321 - } 322 - } 186 + #ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA 187 + if (ide_use_dma(drive)) 188 + mode = ide_dma_speed(drive, 0); 323 189 #endif 324 190 325 - spin_lock_irqsave(&ide_tune_chipset_spin_lock, flags); 191 + mem_sttime = 0; 192 + mem_stcfg = au_readl(MEM_STCFG2); 326 193 327 - mem_sttime = 0; 328 - mem_stcfg = au_readl(MEM_STCFG2); 329 - 330 - switch(speed) { 331 - case XFER_PIO_4: 332 - case XFER_PIO_3: 333 - case XFER_PIO_2: 334 - case XFER_PIO_1: 335 - case XFER_PIO_0: 336 - auide_tune_drive(drive, (speed - XFER_PIO_0)); 337 - break; 194 + if (speed >= XFER_PIO_0 && speed <= XFER_PIO_4) { 195 + auide_tune_drive(drive, speed - XFER_PIO_0); 196 + return 0; 197 + } 198 + 199 + switch(speed) { 338 200 #ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA 339 - case XFER_MW_DMA_2: 340 - /* set timing parameters for RCS2# */ 341 - mem_sttime = SBC_IDE_MDMA2_TWCS 342 - | SBC_IDE_MDMA2_TCSH 343 - | SBC_IDE_MDMA2_TCSOFF 344 - | SBC_IDE_MDMA2_TWP 345 - | SBC_IDE_MDMA2_TCSW 346 - | SBC_IDE_MDMA2_TPM 347 - | SBC_IDE_MDMA2_TA; 348 - /* set configuration for RCS2# */ 349 - mem_stcfg &= ~TS_MASK; 350 - mem_stcfg &= ~TCSOE_MASK; 351 - mem_stcfg &= ~TOECS_MASK; 352 - mem_stcfg |= SBC_IDE_MDMA2_TCSOE | SBC_IDE_MDMA2_TOECS; 201 + case XFER_MW_DMA_2: 202 + mem_sttime = SBC_IDE_TIMING(MDMA2); 353 203 354 - mode = XFER_MW_DMA_2; 355 - break; 356 - case XFER_MW_DMA_1: 357 - /* set timing parameters for RCS2# */ 358 - mem_sttime = SBC_IDE_MDMA1_TWCS 359 - | SBC_IDE_MDMA1_TCSH 360 - | SBC_IDE_MDMA1_TCSOFF 361 - | SBC_IDE_MDMA1_TWP 362 - | SBC_IDE_MDMA1_TCSW 363 - | SBC_IDE_MDMA1_TPM 364 - | SBC_IDE_MDMA1_TA; 365 - /* set configuration for RCS2# */ 366 - mem_stcfg &= ~TS_MASK; 367 - mem_stcfg &= ~TCSOE_MASK; 368 - mem_stcfg &= ~TOECS_MASK; 369 - mem_stcfg |= SBC_IDE_MDMA1_TCSOE | SBC_IDE_MDMA1_TOECS; 204 + /* set configuration for RCS2# */ 205 + mem_stcfg &= ~TS_MASK; 206 + mem_stcfg &= ~TCSOE_MASK; 207 + mem_stcfg &= ~TOECS_MASK; 208 + mem_stcfg |= SBC_IDE_MDMA2_TCSOE | SBC_IDE_MDMA2_TOECS; 370 209 371 - mode = XFER_MW_DMA_1; 372 - break; 373 - case XFER_MW_DMA_0: 374 - /* set timing parameters for RCS2# */ 375 - mem_sttime = SBC_IDE_MDMA0_TWCS 376 - | SBC_IDE_MDMA0_TCSH 377 - | SBC_IDE_MDMA0_TCSOFF 378 - | SBC_IDE_MDMA0_TWP 379 - | SBC_IDE_MDMA0_TCSW 380 - | SBC_IDE_MDMA0_TPM 381 - | SBC_IDE_MDMA0_TA; 382 - /* set configuration for RCS2# */ 383 - mem_stcfg |= TS_MASK; 384 - mem_stcfg &= ~TCSOE_MASK; 385 - mem_stcfg &= ~TOECS_MASK; 386 - mem_stcfg |= SBC_IDE_MDMA0_TCSOE | SBC_IDE_MDMA0_TOECS; 210 + mode = XFER_MW_DMA_2; 211 + break; 212 + case XFER_MW_DMA_1: 213 + mem_sttime = SBC_IDE_TIMING(MDMA1); 387 214 388 - mode = XFER_MW_DMA_0; 389 - break; 215 + /* set configuration for RCS2# */ 216 + mem_stcfg &= ~TS_MASK; 217 + mem_stcfg &= ~TCSOE_MASK; 218 + mem_stcfg &= ~TOECS_MASK; 219 + mem_stcfg |= SBC_IDE_MDMA1_TCSOE | SBC_IDE_MDMA1_TOECS; 220 + 221 + mode = XFER_MW_DMA_1; 222 + break; 223 + case XFER_MW_DMA_0: 224 + mem_sttime = SBC_IDE_TIMING(MDMA0); 225 + 226 + /* set configuration for RCS2# */ 227 + mem_stcfg |= TS_MASK; 228 + mem_stcfg &= ~TCSOE_MASK; 229 + mem_stcfg &= ~TOECS_MASK; 230 + mem_stcfg |= SBC_IDE_MDMA0_TCSOE | SBC_IDE_MDMA0_TOECS; 231 + 232 + mode = XFER_MW_DMA_0; 233 + break; 390 234 #endif 391 - default: 392 - return 1; 393 - } 235 + default: 236 + return 1; 237 + } 238 + 239 + if (ide_config_drive_speed(drive, mode)) 240 + return 1; 394 241 395 - /* 396 - * Tell the drive to switch to the new mode; abort on failure. 397 - */ 398 - if (!mode || ide_config_drive_speed(drive, mode)) 399 - { 400 - return 1; /* failure */ 401 - } 242 + au_writel(mem_sttime,MEM_STTIME2); 243 + au_writel(mem_stcfg,MEM_STCFG2); 402 244 403 - 404 - au_writel(mem_sttime,MEM_STTIME2); 405 - au_writel(mem_stcfg,MEM_STCFG2); 406 - 407 - spin_unlock_irqrestore(&ide_tune_chipset_spin_lock, flags); 408 - 409 - return 0; 245 + return 0; 410 246 } 411 247 412 248 /* 413 249 * Multi-Word DMA + DbDMA functions 414 250 */ 415 - #ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA 416 251 417 - static int in_drive_list(struct hd_driveid *id, 418 - const struct drive_list_entry *drive_table) 419 - { 420 - for ( ; drive_table->id_model ; drive_table++){ 421 - if ((!strcmp(drive_table->id_model, id->model)) && 422 - ((strstr(drive_table->id_firmware, id->fw_rev)) || 423 - (!strcmp(drive_table->id_firmware, "ALL"))) 424 - ) 425 - return 1; 426 - } 427 - return 0; 428 - } 252 + #ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA 429 253 430 254 static int auide_build_sglist(ide_drive_t *drive, struct request *rq) 431 255 { 432 - ide_hwif_t *hwif = drive->hwif; 433 - _auide_hwif *ahwif = (_auide_hwif*)hwif->hwif_data; 434 - struct scatterlist *sg = hwif->sg_table; 256 + ide_hwif_t *hwif = drive->hwif; 257 + _auide_hwif *ahwif = (_auide_hwif*)hwif->hwif_data; 258 + struct scatterlist *sg = hwif->sg_table; 435 259 436 - ide_map_sg(drive, rq); 260 + ide_map_sg(drive, rq); 437 261 438 - if (rq_data_dir(rq) == READ) 439 - hwif->sg_dma_direction = DMA_FROM_DEVICE; 440 - else 441 - hwif->sg_dma_direction = DMA_TO_DEVICE; 262 + if (rq_data_dir(rq) == READ) 263 + hwif->sg_dma_direction = DMA_FROM_DEVICE; 264 + else 265 + hwif->sg_dma_direction = DMA_TO_DEVICE; 442 266 443 - return dma_map_sg(ahwif->dev, sg, hwif->sg_nents, 444 - hwif->sg_dma_direction); 267 + return dma_map_sg(ahwif->dev, sg, hwif->sg_nents, 268 + hwif->sg_dma_direction); 445 269 } 446 270 447 271 static int auide_build_dmatable(ide_drive_t *drive) 448 272 { 449 - int i, iswrite, count = 0; 450 - ide_hwif_t *hwif = HWIF(drive); 273 + int i, iswrite, count = 0; 274 + ide_hwif_t *hwif = HWIF(drive); 451 275 452 - struct request *rq = HWGROUP(drive)->rq; 276 + struct request *rq = HWGROUP(drive)->rq; 453 277 454 - _auide_hwif *ahwif = (_auide_hwif*)hwif->hwif_data; 455 - struct scatterlist *sg; 278 + _auide_hwif *ahwif = (_auide_hwif*)hwif->hwif_data; 279 + struct scatterlist *sg; 456 280 457 - iswrite = (rq_data_dir(rq) == WRITE); 458 - /* Save for interrupt context */ 459 - ahwif->drive = drive; 281 + iswrite = (rq_data_dir(rq) == WRITE); 282 + /* Save for interrupt context */ 283 + ahwif->drive = drive; 460 284 461 - /* Build sglist */ 462 - hwif->sg_nents = i = auide_build_sglist(drive, rq); 285 + /* Build sglist */ 286 + hwif->sg_nents = i = auide_build_sglist(drive, rq); 463 287 464 - if (!i) 465 - return 0; 288 + if (!i) 289 + return 0; 466 290 467 - /* fill the descriptors */ 468 - sg = hwif->sg_table; 469 - while (i && sg_dma_len(sg)) { 470 - u32 cur_addr; 471 - u32 cur_len; 291 + /* fill the descriptors */ 292 + sg = hwif->sg_table; 293 + while (i && sg_dma_len(sg)) { 294 + u32 cur_addr; 295 + u32 cur_len; 472 296 473 - cur_addr = sg_dma_address(sg); 474 - cur_len = sg_dma_len(sg); 297 + cur_addr = sg_dma_address(sg); 298 + cur_len = sg_dma_len(sg); 475 299 476 - while (cur_len) { 477 - u32 flags = DDMA_FLAGS_NOIE; 478 - unsigned int tc = (cur_len < 0xfe00)? cur_len: 0xfe00; 300 + while (cur_len) { 301 + u32 flags = DDMA_FLAGS_NOIE; 302 + unsigned int tc = (cur_len < 0xfe00)? cur_len: 0xfe00; 479 303 480 - if (++count >= PRD_ENTRIES) { 481 - printk(KERN_WARNING "%s: DMA table too small\n", 482 - drive->name); 483 - goto use_pio_instead; 484 - } 304 + if (++count >= PRD_ENTRIES) { 305 + printk(KERN_WARNING "%s: DMA table too small\n", 306 + drive->name); 307 + goto use_pio_instead; 308 + } 485 309 486 - /* Lets enable intr for the last descriptor only */ 487 - if (1==i) 488 - flags = DDMA_FLAGS_IE; 489 - else 490 - flags = DDMA_FLAGS_NOIE; 310 + /* Lets enable intr for the last descriptor only */ 311 + if (1==i) 312 + flags = DDMA_FLAGS_IE; 313 + else 314 + flags = DDMA_FLAGS_NOIE; 491 315 492 - if (iswrite) { 493 - if(!put_source_flags(ahwif->tx_chan, 494 - (void*)(page_address(sg->page) 495 - + sg->offset), 496 - tc, flags)) { 497 - printk(KERN_ERR "%s failed %d\n", 498 - __FUNCTION__, __LINE__); 316 + if (iswrite) { 317 + if(!put_source_flags(ahwif->tx_chan, 318 + (void*)(page_address(sg->page) 319 + + sg->offset), 320 + tc, flags)) { 321 + printk(KERN_ERR "%s failed %d\n", 322 + __FUNCTION__, __LINE__); 499 323 } 500 - } else 324 + } else 501 325 { 502 - if(!put_dest_flags(ahwif->rx_chan, 503 - (void*)(page_address(sg->page) 504 - + sg->offset), 505 - tc, flags)) { 506 - printk(KERN_ERR "%s failed %d\n", 507 - __FUNCTION__, __LINE__); 326 + if(!put_dest_flags(ahwif->rx_chan, 327 + (void*)(page_address(sg->page) 328 + + sg->offset), 329 + tc, flags)) { 330 + printk(KERN_ERR "%s failed %d\n", 331 + __FUNCTION__, __LINE__); 508 332 } 509 - } 333 + } 510 334 511 - cur_addr += tc; 512 - cur_len -= tc; 513 - } 514 - sg++; 515 - i--; 516 - } 335 + cur_addr += tc; 336 + cur_len -= tc; 337 + } 338 + sg++; 339 + i--; 340 + } 517 341 518 - if (count) 519 - return 1; 342 + if (count) 343 + return 1; 520 344 521 - use_pio_instead: 522 - dma_unmap_sg(ahwif->dev, 523 - hwif->sg_table, 524 - hwif->sg_nents, 525 - hwif->sg_dma_direction); 345 + use_pio_instead: 346 + dma_unmap_sg(ahwif->dev, 347 + hwif->sg_table, 348 + hwif->sg_nents, 349 + hwif->sg_dma_direction); 526 350 527 - return 0; /* revert to PIO for this request */ 351 + return 0; /* revert to PIO for this request */ 528 352 } 529 353 530 354 static int auide_dma_end(ide_drive_t *drive) 531 355 { 532 - ide_hwif_t *hwif = HWIF(drive); 533 - _auide_hwif *ahwif = (_auide_hwif*)hwif->hwif_data; 356 + ide_hwif_t *hwif = HWIF(drive); 357 + _auide_hwif *ahwif = (_auide_hwif*)hwif->hwif_data; 534 358 535 - if (hwif->sg_nents) { 536 - dma_unmap_sg(ahwif->dev, hwif->sg_table, hwif->sg_nents, 537 - hwif->sg_dma_direction); 538 - hwif->sg_nents = 0; 539 - } 359 + if (hwif->sg_nents) { 360 + dma_unmap_sg(ahwif->dev, hwif->sg_table, hwif->sg_nents, 361 + hwif->sg_dma_direction); 362 + hwif->sg_nents = 0; 363 + } 540 364 541 - return 0; 365 + return 0; 542 366 } 543 367 544 368 static void auide_dma_start(ide_drive_t *drive ) 545 369 { 546 - // printk("%s\n", __FUNCTION__); 547 370 } 548 371 549 - ide_startstop_t auide_dma_intr(ide_drive_t *drive) 550 - { 551 - //printk("%s\n", __FUNCTION__); 552 - 553 - u8 stat = 0, dma_stat = 0; 554 - 555 - dma_stat = HWIF(drive)->ide_dma_end(drive); 556 - stat = HWIF(drive)->INB(IDE_STATUS_REG); /* get drive status */ 557 - if (OK_STAT(stat,DRIVE_READY,drive->bad_wstat|DRQ_STAT)) { 558 - if (!dma_stat) { 559 - struct request *rq = HWGROUP(drive)->rq; 560 - 561 - ide_end_request(drive, 1, rq->nr_sectors); 562 - return ide_stopped; 563 - } 564 - printk(KERN_ERR "%s: dma_intr: bad DMA status (dma_stat=%x)\n", 565 - drive->name, dma_stat); 566 - } 567 - return ide_error(drive, "dma_intr", stat); 568 - } 569 372 570 373 static void auide_dma_exec_cmd(ide_drive_t *drive, u8 command) 571 374 { 572 - //printk("%s\n", __FUNCTION__); 573 - 574 - /* issue cmd to drive */ 575 - ide_execute_command(drive, command, &auide_dma_intr, 576 - (2*WAIT_CMD), NULL); 375 + /* issue cmd to drive */ 376 + ide_execute_command(drive, command, &ide_dma_intr, 377 + (2*WAIT_CMD), NULL); 577 378 } 578 379 579 380 static int auide_dma_setup(ide_drive_t *drive) 580 - { 581 - // printk("%s\n", __FUNCTION__); 381 + { 382 + struct request *rq = HWGROUP(drive)->rq; 582 383 583 - if (drive->media != ide_disk) 584 - return 1; 384 + if (!auide_build_dmatable(drive)) { 385 + ide_map_sg(drive, rq); 386 + return 1; 387 + } 585 388 586 - if (!auide_build_dmatable(drive)) 587 - /* try PIO instead of DMA */ 588 - return 1; 589 - 590 - drive->waiting_for_dma = 1; 591 - 592 - return 0; 389 + drive->waiting_for_dma = 1; 390 + return 0; 593 391 } 594 392 595 393 static int auide_dma_check(ide_drive_t *drive) 596 394 { 597 - // printk("%s\n", __FUNCTION__); 395 + u8 speed; 598 396 599 397 #ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA 600 - if( !dbdma_init_done ){ 601 - auide_hwif.white_list = in_drive_list(drive->id, 602 - dma_white_list); 603 - auide_hwif.black_list = in_drive_list(drive->id, 604 - dma_black_list); 605 - auide_hwif.drive = drive; 606 - auide_ddma_init(&auide_hwif); 607 - dbdma_init_done = 1; 608 - } 398 + 399 + if( dbdma_init_done == 0 ){ 400 + auide_hwif.white_list = ide_in_drive_list(drive->id, 401 + dma_white_list); 402 + auide_hwif.black_list = ide_in_drive_list(drive->id, 403 + dma_black_list); 404 + auide_hwif.drive = drive; 405 + auide_ddma_init(&auide_hwif); 406 + dbdma_init_done = 1; 407 + } 609 408 #endif 610 409 611 - /* Is the drive in our DMA black list? */ 612 - if ( auide_hwif.black_list ) { 613 - drive->using_dma = 0; 614 - printk("%s found in dma_blacklist[]! Disabling DMA.\n", 615 - drive->id->model); 616 - } 617 - else 618 - drive->using_dma = 1; 410 + /* Is the drive in our DMA black list? */ 619 411 620 - return HWIF(drive)->ide_dma_host_on(drive); 412 + if ( auide_hwif.black_list ) { 413 + drive->using_dma = 0; 414 + 415 + /* Borrowed the warning message from ide-dma.c */ 416 + 417 + printk(KERN_WARNING "%s: Disabling DMA for %s (blacklisted)\n", 418 + drive->name, drive->id->model); 419 + } 420 + else 421 + drive->using_dma = 1; 422 + 423 + speed = ide_find_best_mode(drive, XFER_PIO | XFER_MWDMA); 424 + 425 + if (drive->autodma && (speed & XFER_MODE) != XFER_PIO) 426 + return HWIF(drive)->ide_dma_on(drive); 427 + 428 + return HWIF(drive)->ide_dma_off_quietly(drive); 621 429 } 622 430 623 431 static int auide_dma_test_irq(ide_drive_t *drive) 624 - { 625 - // printk("%s\n", __FUNCTION__); 626 - 627 - if (!drive->waiting_for_dma) 628 - printk(KERN_WARNING "%s: ide_dma_test_irq \ 432 + { 433 + if (drive->waiting_for_dma == 0) 434 + printk(KERN_WARNING "%s: ide_dma_test_irq \ 629 435 called while not waiting\n", drive->name); 630 436 631 - /* If dbdma didn't execute the STOP command yet, the 632 - * active bit is still set 437 + /* If dbdma didn't execute the STOP command yet, the 438 + * active bit is still set 633 439 */ 634 - drive->waiting_for_dma++; 635 - if (drive->waiting_for_dma >= DMA_WAIT_TIMEOUT) { 636 - printk(KERN_WARNING "%s: timeout waiting for ddma to \ 440 + drive->waiting_for_dma++; 441 + if (drive->waiting_for_dma >= DMA_WAIT_TIMEOUT) { 442 + printk(KERN_WARNING "%s: timeout waiting for ddma to \ 637 443 complete\n", drive->name); 638 - return 1; 639 - } 640 - udelay(10); 641 - return 0; 444 + return 1; 445 + } 446 + udelay(10); 447 + return 0; 642 448 } 643 449 644 450 static int auide_dma_host_on(ide_drive_t *drive) 645 451 { 646 - // printk("%s\n", __FUNCTION__); 647 - return 0; 452 + return 0; 648 453 } 649 454 650 455 static int auide_dma_on(ide_drive_t *drive) 651 456 { 652 - // printk("%s\n", __FUNCTION__); 653 - drive->using_dma = 1; 654 - return auide_dma_host_on(drive); 457 + drive->using_dma = 1; 458 + return auide_dma_host_on(drive); 655 459 } 656 460 657 461 658 462 static int auide_dma_host_off(ide_drive_t *drive) 659 463 { 660 - // printk("%s\n", __FUNCTION__); 661 - return 0; 464 + return 0; 662 465 } 663 466 664 467 static int auide_dma_off_quietly(ide_drive_t *drive) 665 468 { 666 - // printk("%s\n", __FUNCTION__); 667 - drive->using_dma = 0; 668 - return auide_dma_host_off(drive); 469 + drive->using_dma = 0; 470 + return auide_dma_host_off(drive); 669 471 } 670 472 671 473 static int auide_dma_lostirq(ide_drive_t *drive) 672 474 { 673 - // printk("%s\n", __FUNCTION__); 674 - 675 - printk(KERN_ERR "%s: IRQ lost\n", drive->name); 676 - return 0; 475 + printk(KERN_ERR "%s: IRQ lost\n", drive->name); 476 + return 0; 677 477 } 678 478 679 479 static void auide_ddma_tx_callback(int irq, void *param, struct pt_regs *regs) 680 480 { 681 - // printk("%s\n", __FUNCTION__); 682 - 683 - _auide_hwif *ahwif = (_auide_hwif*)param; 684 - ahwif->drive->waiting_for_dma = 0; 685 - return; 481 + _auide_hwif *ahwif = (_auide_hwif*)param; 482 + ahwif->drive->waiting_for_dma = 0; 686 483 } 687 484 688 485 static void auide_ddma_rx_callback(int irq, void *param, struct pt_regs *regs) 689 486 { 690 - // printk("%s\n", __FUNCTION__); 691 - 692 - _auide_hwif *ahwif = (_auide_hwif*)param; 693 - ahwif->drive->waiting_for_dma = 0; 694 - return; 487 + _auide_hwif *ahwif = (_auide_hwif*)param; 488 + ahwif->drive->waiting_for_dma = 0; 695 489 } 490 + 491 + #endif /* end CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA */ 492 + 493 + static void auide_init_dbdma_dev(dbdev_tab_t *dev, u32 dev_id, u32 tsize, u32 devwidth, u32 flags) 494 + { 495 + dev->dev_id = dev_id; 496 + dev->dev_physaddr = (u32)AU1XXX_ATA_PHYS_ADDR; 497 + dev->dev_intlevel = 0; 498 + dev->dev_intpolarity = 0; 499 + dev->dev_tsize = tsize; 500 + dev->dev_devwidth = devwidth; 501 + dev->dev_flags = flags; 502 + } 503 + 504 + #if defined(CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA) 696 505 697 506 static int auide_dma_timeout(ide_drive_t *drive) 698 507 { 699 508 // printk("%s\n", __FUNCTION__); 700 509 701 - printk(KERN_ERR "%s: DMA timeout occurred: ", drive->name); 510 + printk(KERN_ERR "%s: DMA timeout occurred: ", drive->name); 702 511 703 - if (HWIF(drive)->ide_dma_test_irq(drive)) 704 - return 0; 512 + if (HWIF(drive)->ide_dma_test_irq(drive)) 513 + return 0; 705 514 706 - return HWIF(drive)->ide_dma_end(drive); 515 + return HWIF(drive)->ide_dma_end(drive); 707 516 } 708 - #endif /* end CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA */ 517 + 709 518 519 + static int auide_ddma_init(_auide_hwif *auide) { 520 + 521 + dbdev_tab_t source_dev_tab, target_dev_tab; 522 + u32 dev_id, tsize, devwidth, flags; 523 + ide_hwif_t *hwif = auide->hwif; 710 524 525 + dev_id = AU1XXX_ATA_DDMA_REQ; 526 + 527 + if (auide->white_list || auide->black_list) { 528 + tsize = 8; 529 + devwidth = 32; 530 + } 531 + else { 532 + tsize = 1; 533 + devwidth = 16; 534 + 535 + printk(KERN_ERR "au1xxx-ide: %s is not on ide driver whitelist.\n",auide_hwif.drive->id->model); 536 + printk(KERN_ERR " please read 'Documentation/mips/AU1xxx_IDE.README'"); 537 + } 538 + 539 + #ifdef IDE_AU1XXX_BURSTMODE 540 + flags = DEV_FLAGS_SYNC | DEV_FLAGS_BURSTABLE; 541 + #else 542 + flags = DEV_FLAGS_SYNC; 543 + #endif 544 + 545 + /* setup dev_tab for tx channel */ 546 + auide_init_dbdma_dev( &source_dev_tab, 547 + dev_id, 548 + tsize, devwidth, DEV_FLAGS_OUT | flags); 549 + auide->tx_dev_id = au1xxx_ddma_add_device( &source_dev_tab ); 550 + 551 + auide_init_dbdma_dev( &source_dev_tab, 552 + dev_id, 553 + tsize, devwidth, DEV_FLAGS_IN | flags); 554 + auide->rx_dev_id = au1xxx_ddma_add_device( &source_dev_tab ); 555 + 556 + /* We also need to add a target device for the DMA */ 557 + auide_init_dbdma_dev( &target_dev_tab, 558 + (u32)DSCR_CMD0_ALWAYS, 559 + tsize, devwidth, DEV_FLAGS_ANYUSE); 560 + auide->target_dev_id = au1xxx_ddma_add_device(&target_dev_tab); 561 + 562 + /* Get a channel for TX */ 563 + auide->tx_chan = au1xxx_dbdma_chan_alloc(auide->target_dev_id, 564 + auide->tx_dev_id, 565 + auide_ddma_tx_callback, 566 + (void*)auide); 567 + 568 + /* Get a channel for RX */ 569 + auide->rx_chan = au1xxx_dbdma_chan_alloc(auide->rx_dev_id, 570 + auide->target_dev_id, 571 + auide_ddma_rx_callback, 572 + (void*)auide); 573 + 574 + auide->tx_desc_head = (void*)au1xxx_dbdma_ring_alloc(auide->tx_chan, 575 + NUM_DESCRIPTORS); 576 + auide->rx_desc_head = (void*)au1xxx_dbdma_ring_alloc(auide->rx_chan, 577 + NUM_DESCRIPTORS); 578 + 579 + hwif->dmatable_cpu = dma_alloc_coherent(auide->dev, 580 + PRD_ENTRIES * PRD_BYTES, /* 1 Page */ 581 + &hwif->dmatable_dma, GFP_KERNEL); 582 + 583 + au1xxx_dbdma_start( auide->tx_chan ); 584 + au1xxx_dbdma_start( auide->rx_chan ); 585 + 586 + return 0; 587 + } 588 + #else 589 + 711 590 static int auide_ddma_init( _auide_hwif *auide ) 712 591 { 713 - // printk("%s\n", __FUNCTION__); 592 + dbdev_tab_t source_dev_tab; 593 + int flags; 714 594 715 - dbdev_tab_t source_dev_tab; 716 - #if defined(CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA) 717 - dbdev_tab_t target_dev_tab; 718 - ide_hwif_t *hwif = auide->hwif; 719 - char warning_output [2][80]; 720 - int i; 721 - #endif 722 - 723 - /* Add our custom device to DDMA device table */ 724 - /* Create our new device entries in the table */ 725 - #if defined(CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA) 726 - source_dev_tab.dev_id = AU1XXX_ATA_DDMA_REQ; 727 - 728 - if( auide->white_list || auide->black_list ){ 729 - source_dev_tab.dev_tsize = 8; 730 - source_dev_tab.dev_devwidth = 32; 731 - source_dev_tab.dev_physaddr = (u32)AU1XXX_ATA_PHYS_ADDR; 732 - source_dev_tab.dev_intlevel = 0; 733 - source_dev_tab.dev_intpolarity = 0; 734 - 735 - /* init device table for target - static bus controller - */ 736 - target_dev_tab.dev_id = DSCR_CMD0_ALWAYS; 737 - target_dev_tab.dev_tsize = 8; 738 - target_dev_tab.dev_devwidth = 32; 739 - target_dev_tab.dev_physaddr = (u32)AU1XXX_ATA_PHYS_ADDR; 740 - target_dev_tab.dev_intlevel = 0; 741 - target_dev_tab.dev_intpolarity = 0; 742 - target_dev_tab.dev_flags = DEV_FLAGS_ANYUSE; 743 - } 744 - else{ 745 - source_dev_tab.dev_tsize = 1; 746 - source_dev_tab.dev_devwidth = 16; 747 - source_dev_tab.dev_physaddr = (u32)AU1XXX_ATA_PHYS_ADDR; 748 - source_dev_tab.dev_intlevel = 0; 749 - source_dev_tab.dev_intpolarity = 0; 750 - 751 - /* init device table for target - static bus controller - */ 752 - target_dev_tab.dev_id = DSCR_CMD0_ALWAYS; 753 - target_dev_tab.dev_tsize = 1; 754 - target_dev_tab.dev_devwidth = 16; 755 - target_dev_tab.dev_physaddr = (u32)AU1XXX_ATA_PHYS_ADDR; 756 - target_dev_tab.dev_intlevel = 0; 757 - target_dev_tab.dev_intpolarity = 0; 758 - target_dev_tab.dev_flags = DEV_FLAGS_ANYUSE; 759 - 760 - sprintf(&warning_output[0][0], 761 - "%s is not on ide driver white list.", 762 - auide_hwif.drive->id->model); 763 - for ( i=strlen(&warning_output[0][0]) ; i<76; i++ ){ 764 - sprintf(&warning_output[0][i]," "); 765 - } 766 - 767 - sprintf(&warning_output[1][0], 768 - "To add %s please read 'Documentation/mips/AU1xxx_IDE.README'.", 769 - auide_hwif.drive->id->model); 770 - for ( i=strlen(&warning_output[1][0]) ; i<76; i++ ){ 771 - sprintf(&warning_output[1][i]," "); 772 - } 773 - 774 - printk("\n****************************************"); 775 - printk("****************************************\n"); 776 - printk("* %s *\n",&warning_output[0][0]); 777 - printk("* Switch to safe MWDMA Mode! "); 778 - printk(" *\n"); 779 - printk("* %s *\n",&warning_output[1][0]); 780 - printk("****************************************"); 781 - printk("****************************************\n\n"); 782 - } 595 + #ifdef IDE_AU1XXX_BURSTMODE 596 + flags = DEV_FLAGS_SYNC | DEV_FLAGS_BURSTABLE; 783 597 #else 784 - source_dev_tab.dev_id = DSCR_CMD0_ALWAYS; 785 - source_dev_tab.dev_tsize = 8; 786 - source_dev_tab.dev_devwidth = 32; 787 - source_dev_tab.dev_physaddr = (u32)AU1XXX_ATA_PHYS_ADDR; 788 - source_dev_tab.dev_intlevel = 0; 789 - source_dev_tab.dev_intpolarity = 0; 598 + flags = DEV_FLAGS_SYNC; 790 599 #endif 791 600 792 - #if CONFIG_BLK_DEV_IDE_AU1XXX_BURSTABLE_ON 793 - /* set flags for tx channel */ 794 - source_dev_tab.dev_flags = DEV_FLAGS_OUT 795 - | DEV_FLAGS_SYNC 796 - | DEV_FLAGS_BURSTABLE; 797 - auide->tx_dev_id = au1xxx_ddma_add_device( &source_dev_tab ); 798 - /* set flags for rx channel */ 799 - source_dev_tab.dev_flags = DEV_FLAGS_IN 800 - | DEV_FLAGS_SYNC 801 - | DEV_FLAGS_BURSTABLE; 802 - auide->rx_dev_id = au1xxx_ddma_add_device( &source_dev_tab ); 803 - #else 804 - /* set flags for tx channel */ 805 - source_dev_tab.dev_flags = DEV_FLAGS_OUT | DEV_FLAGS_SYNC; 806 - auide->tx_dev_id = au1xxx_ddma_add_device( &source_dev_tab ); 807 - /* set flags for rx channel */ 808 - source_dev_tab.dev_flags = DEV_FLAGS_IN | DEV_FLAGS_SYNC; 809 - auide->rx_dev_id = au1xxx_ddma_add_device( &source_dev_tab ); 810 - #endif 601 + /* setup dev_tab for tx channel */ 602 + auide_init_dbdma_dev( &source_dev_tab, 603 + (u32)DSCR_CMD0_ALWAYS, 604 + 8, 32, DEV_FLAGS_OUT | flags); 605 + auide->tx_dev_id = au1xxx_ddma_add_device( &source_dev_tab ); 811 606 812 - #if defined(CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA) 813 - 814 - auide->target_dev_id = au1xxx_ddma_add_device(&target_dev_tab); 815 - 816 - /* Get a channel for TX */ 817 - auide->tx_chan = au1xxx_dbdma_chan_alloc(auide->target_dev_id, 818 - auide->tx_dev_id, 819 - auide_ddma_tx_callback, 820 - (void*)auide); 821 - /* Get a channel for RX */ 822 - auide->rx_chan = au1xxx_dbdma_chan_alloc(auide->rx_dev_id, 823 - auide->target_dev_id, 824 - auide_ddma_rx_callback, 825 - (void*)auide); 826 - #else /* CONFIG_BLK_DEV_IDE_AU1XXX_PIO_DBDMA */ 827 - /* 828 - * Note: if call back is not enabled, update ctp->cur_ptr manually 829 - */ 830 - auide->tx_chan = au1xxx_dbdma_chan_alloc(DSCR_CMD0_ALWAYS, 831 - auide->tx_dev_id, 832 - NULL, 833 - (void*)auide); 834 - auide->rx_chan = au1xxx_dbdma_chan_alloc(auide->rx_dev_id, 835 - DSCR_CMD0_ALWAYS, 836 - NULL, 837 - (void*)auide); 838 - #endif 839 - auide->tx_desc_head = (void*)au1xxx_dbdma_ring_alloc(auide->tx_chan, 840 - NUM_DESCRIPTORS); 841 - auide->rx_desc_head = (void*)au1xxx_dbdma_ring_alloc(auide->rx_chan, 842 - NUM_DESCRIPTORS); 843 - 844 - #if defined(CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA) 845 - hwif->dmatable_cpu = dma_alloc_coherent(auide->dev, 846 - PRD_ENTRIES * PRD_BYTES, /* 1 Page */ 847 - &hwif->dmatable_dma, GFP_KERNEL); 848 - 849 - auide->sg_table = kmalloc(sizeof(struct scatterlist) * PRD_ENTRIES, 850 - GFP_KERNEL|GFP_DMA); 851 - if (auide->sg_table == NULL) { 852 - return -ENOMEM; 853 - } 854 - #endif 855 - au1xxx_dbdma_start( auide->tx_chan ); 856 - au1xxx_dbdma_start( auide->rx_chan ); 857 - return 0; 607 + auide_init_dbdma_dev( &source_dev_tab, 608 + (u32)DSCR_CMD0_ALWAYS, 609 + 8, 32, DEV_FLAGS_IN | flags); 610 + auide->rx_dev_id = au1xxx_ddma_add_device( &source_dev_tab ); 611 + 612 + /* Get a channel for TX */ 613 + auide->tx_chan = au1xxx_dbdma_chan_alloc(DSCR_CMD0_ALWAYS, 614 + auide->tx_dev_id, 615 + NULL, 616 + (void*)auide); 617 + 618 + /* Get a channel for RX */ 619 + auide->rx_chan = au1xxx_dbdma_chan_alloc(auide->rx_dev_id, 620 + DSCR_CMD0_ALWAYS, 621 + NULL, 622 + (void*)auide); 623 + 624 + auide->tx_desc_head = (void*)au1xxx_dbdma_ring_alloc(auide->tx_chan, 625 + NUM_DESCRIPTORS); 626 + auide->rx_desc_head = (void*)au1xxx_dbdma_ring_alloc(auide->rx_chan, 627 + NUM_DESCRIPTORS); 628 + 629 + au1xxx_dbdma_start( auide->tx_chan ); 630 + au1xxx_dbdma_start( auide->rx_chan ); 631 + 632 + return 0; 858 633 } 634 + #endif 859 635 860 636 static void auide_setup_ports(hw_regs_t *hw, _auide_hwif *ahwif) 861 637 { 862 - int i; 863 - #define ide_ioreg_t unsigned long 864 - ide_ioreg_t *ata_regs = hw->io_ports; 638 + int i; 639 + unsigned long *ata_regs = hw->io_ports; 865 640 866 - /* fixme */ 867 - for (i = 0; i < IDE_CONTROL_OFFSET; i++) { 868 - *ata_regs++ = (ide_ioreg_t) ahwif->regbase 869 - + (ide_ioreg_t)(i << AU1XXX_ATA_REG_OFFSET); 870 - } 641 + /* FIXME? */ 642 + for (i = 0; i < IDE_CONTROL_OFFSET; i++) { 643 + *ata_regs++ = ahwif->regbase + (i << AU1XXX_ATA_REG_OFFSET); 644 + } 871 645 872 - /* set the Alternative Status register */ 873 - *ata_regs = (ide_ioreg_t) ahwif->regbase 874 - + (ide_ioreg_t)(14 << AU1XXX_ATA_REG_OFFSET); 646 + /* set the Alternative Status register */ 647 + *ata_regs = ahwif->regbase + (14 << AU1XXX_ATA_REG_OFFSET); 875 648 } 876 649 877 650 static int au_ide_probe(struct device *dev) 878 651 { 879 652 struct platform_device *pdev = to_platform_device(dev); 880 - _auide_hwif *ahwif = &auide_hwif; 881 - ide_hwif_t *hwif; 653 + _auide_hwif *ahwif = &auide_hwif; 654 + ide_hwif_t *hwif; 882 655 struct resource *res; 883 656 int ret = 0; 884 657 885 658 #if defined(CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA) 886 - char *mode = "MWDMA2"; 659 + char *mode = "MWDMA2"; 887 660 #elif defined(CONFIG_BLK_DEV_IDE_AU1XXX_PIO_DBDMA) 888 - char *mode = "PIO+DDMA(offload)"; 661 + char *mode = "PIO+DDMA(offload)"; 889 662 #endif 890 663 891 - memset(&auide_hwif, 0, sizeof(_auide_hwif)); 892 - auide_hwif.dev = 0; 664 + memset(&auide_hwif, 0, sizeof(_auide_hwif)); 665 + auide_hwif.dev = 0; 893 666 894 667 ahwif->dev = dev; 895 668 ahwif->irq = platform_get_irq(pdev, 0); ··· 675 902 goto out; 676 903 } 677 904 678 - if (!request_mem_region (res->start, res->end-res->start, pdev->name)) { 905 + if (!request_mem_region (res->start, res->end-res->start, pdev->name)) { 679 906 pr_debug("%s: request_mem_region failed\n", DRV_NAME); 680 - ret = -EBUSY; 907 + ret = -EBUSY; 681 908 goto out; 682 - } 909 + } 683 910 684 911 ahwif->regbase = (u32)ioremap(res->start, res->end-res->start); 685 912 if (ahwif->regbase == 0) { ··· 687 914 goto out; 688 915 } 689 916 690 - hwif = &ide_hwifs[pdev->id]; 691 - hw_regs_t *hw = &hwif->hw; 692 - hwif->irq = hw->irq = ahwif->irq; 693 - hwif->chipset = ide_au1xxx; 917 + /* FIXME: This might possibly break PCMCIA IDE devices */ 694 918 695 - auide_setup_ports(hw, ahwif); 919 + hwif = &ide_hwifs[pdev->id]; 920 + hw_regs_t *hw = &hwif->hw; 921 + hwif->irq = hw->irq = ahwif->irq; 922 + hwif->chipset = ide_au1xxx; 923 + 924 + auide_setup_ports(hw, ahwif); 696 925 memcpy(hwif->io_ports, hw->io_ports, sizeof(hwif->io_ports)); 697 926 698 - #ifdef CONFIG_BLK_DEV_IDE_AU1XXX_SEQTS_PER_RQ 699 - hwif->rqsize = CONFIG_BLK_DEV_IDE_AU1XXX_SEQTS_PER_RQ; 700 - hwif->rqsize = ((hwif->rqsize > AU1XXX_ATA_RQSIZE) 701 - || (hwif->rqsize < 32)) ? AU1XXX_ATA_RQSIZE : hwif->rqsize; 702 - #else /* if kernel config is not set */ 703 - hwif->rqsize = AU1XXX_ATA_RQSIZE; 704 - #endif 705 - 706 - hwif->ultra_mask = 0x0; /* Disable Ultra DMA */ 927 + hwif->ultra_mask = 0x0; /* Disable Ultra DMA */ 707 928 #ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA 708 - hwif->mwdma_mask = 0x07; /* Multimode-2 DMA */ 709 - hwif->swdma_mask = 0x07; 929 + hwif->mwdma_mask = 0x07; /* Multimode-2 DMA */ 930 + hwif->swdma_mask = 0x00; 710 931 #else 711 - hwif->mwdma_mask = 0x0; 712 - hwif->swdma_mask = 0x0; 932 + hwif->mwdma_mask = 0x0; 933 + hwif->swdma_mask = 0x0; 713 934 #endif 714 - //hwif->noprobe = !hwif->io_ports[IDE_DATA_OFFSET]; 715 - hwif->noprobe = 0; 716 - hwif->drives[0].unmask = 1; 717 - hwif->drives[1].unmask = 1; 718 935 719 - /* hold should be on in all cases */ 720 - hwif->hold = 1; 721 - hwif->mmio = 2; 936 + hwif->noprobe = 0; 937 + hwif->drives[0].unmask = 1; 938 + hwif->drives[1].unmask = 1; 722 939 723 - /* set up local I/O function entry points */ 724 - hwif->INB = auide_inb; 725 - hwif->INW = auide_inw; 726 - hwif->INL = auide_inl; 727 - hwif->INSW = auide_insw; 728 - hwif->INSL = auide_insl; 729 - hwif->OUTB = auide_outb; 730 - hwif->OUTBSYNC = auide_outbsync; 731 - hwif->OUTW = auide_outw; 732 - hwif->OUTL = auide_outl; 733 - hwif->OUTSW = auide_outsw; 734 - hwif->OUTSL = auide_outsl; 940 + /* hold should be on in all cases */ 941 + hwif->hold = 1; 942 + hwif->mmio = 2; 735 943 736 - hwif->tuneproc = &auide_tune_drive; 737 - hwif->speedproc = &auide_tune_chipset; 944 + /* If the user has selected DDMA assisted copies, 945 + then set up a few local I/O function entry points 946 + */ 947 + 948 + #ifdef CONFIG_BLK_DEV_IDE_AU1XXX_PIO_DBDMA 949 + hwif->INSW = auide_insw; 950 + hwif->OUTSW = auide_outsw; 951 + #endif 952 + 953 + hwif->tuneproc = &auide_tune_drive; 954 + hwif->speedproc = &auide_tune_chipset; 738 955 739 956 #ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA 740 - hwif->ide_dma_off_quietly = &auide_dma_off_quietly; 741 - hwif->ide_dma_timeout = &auide_dma_timeout; 957 + hwif->ide_dma_off_quietly = &auide_dma_off_quietly; 958 + hwif->ide_dma_timeout = &auide_dma_timeout; 742 959 743 - hwif->ide_dma_check = &auide_dma_check; 744 - hwif->dma_exec_cmd = &auide_dma_exec_cmd; 745 - hwif->dma_start = &auide_dma_start; 746 - hwif->ide_dma_end = &auide_dma_end; 747 - hwif->dma_setup = &auide_dma_setup; 748 - hwif->ide_dma_test_irq = &auide_dma_test_irq; 749 - hwif->ide_dma_host_off = &auide_dma_host_off; 750 - hwif->ide_dma_host_on = &auide_dma_host_on; 751 - hwif->ide_dma_lostirq = &auide_dma_lostirq; 752 - hwif->ide_dma_on = &auide_dma_on; 960 + hwif->ide_dma_check = &auide_dma_check; 961 + hwif->dma_exec_cmd = &auide_dma_exec_cmd; 962 + hwif->dma_start = &auide_dma_start; 963 + hwif->ide_dma_end = &auide_dma_end; 964 + hwif->dma_setup = &auide_dma_setup; 965 + hwif->ide_dma_test_irq = &auide_dma_test_irq; 966 + hwif->ide_dma_host_off = &auide_dma_host_off; 967 + hwif->ide_dma_host_on = &auide_dma_host_on; 968 + hwif->ide_dma_lostirq = &auide_dma_lostirq; 969 + hwif->ide_dma_on = &auide_dma_on; 753 970 754 - hwif->autodma = 1; 755 - hwif->drives[0].autodma = hwif->autodma; 756 - hwif->drives[1].autodma = hwif->autodma; 757 - hwif->atapi_dma = 1; 758 - hwif->drives[0].using_dma = 1; 759 - hwif->drives[1].using_dma = 1; 971 + hwif->autodma = 1; 972 + hwif->drives[0].autodma = hwif->autodma; 973 + hwif->drives[1].autodma = hwif->autodma; 974 + hwif->atapi_dma = 1; 975 + 760 976 #else /* !CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA */ 761 - hwif->autodma = 0; 762 - hwif->channel = 0; 763 - hwif->hold = 1; 764 - hwif->select_data = 0; /* no chipset-specific code */ 765 - hwif->config_data = 0; /* no chipset-specific code */ 977 + hwif->autodma = 0; 978 + hwif->channel = 0; 979 + hwif->hold = 1; 980 + hwif->select_data = 0; /* no chipset-specific code */ 981 + hwif->config_data = 0; /* no chipset-specific code */ 766 982 767 - hwif->drives[0].autodma = 0; 768 - hwif->drives[0].drive_data = 0; /* no drive data */ 769 - hwif->drives[0].using_dma = 0; 770 - hwif->drives[0].waiting_for_dma = 0; 771 - hwif->drives[0].autotune = 1; /* 1=autotune, 2=noautotune, 0=default */ 772 - /* secondary hdd not supported */ 773 - hwif->drives[1].autodma = 0; 774 - 775 - hwif->drives[1].drive_data = 0; 776 - hwif->drives[1].using_dma = 0; 777 - hwif->drives[1].waiting_for_dma = 0; 778 - hwif->drives[1].autotune = 2; /* 1=autotune, 2=noautotune, 0=default */ 983 + hwif->drives[0].autodma = 0; 984 + hwif->drives[0].autotune = 1; /* 1=autotune, 2=noautotune, 0=default */ 779 985 #endif 780 - hwif->drives[0].io_32bit = 0; /* 0=16-bit, 1=32-bit, 2/3=32bit+sync */ 781 - hwif->drives[1].io_32bit = 0; /* 0=16-bit, 1=32-bit, 2/3=32bit+sync */ 986 + hwif->drives[0].no_io_32bit = 1; 782 987 783 - /*Register Driver with PM Framework*/ 784 - #ifdef CONFIG_PM 785 - auide_hwif.pm.lock = SPIN_LOCK_UNLOCKED; 786 - auide_hwif.pm.stopped = 0; 988 + auide_hwif.hwif = hwif; 989 + hwif->hwif_data = &auide_hwif; 787 990 788 - auide_hwif.pm.dev = new_au1xxx_power_device( "ide", 789 - &au1200ide_pm_callback, 790 - NULL); 791 - if ( auide_hwif.pm.dev == NULL ) 792 - printk(KERN_INFO "Unable to create a power management \ 793 - device entry for the au1200-IDE.\n"); 794 - else 795 - printk(KERN_INFO "Power management device entry for the \ 796 - au1200-IDE loaded.\n"); 797 - #endif 798 - 799 - auide_hwif.hwif = hwif; 800 - hwif->hwif_data = &auide_hwif; 801 - 802 - #ifdef CONFIG_BLK_DEV_IDE_AU1XXX_PIO_DBDMA 803 - auide_ddma_init(&auide_hwif); 804 - dbdma_init_done = 1; 991 + #ifdef CONFIG_BLK_DEV_IDE_AU1XXX_PIO_DBDMA 992 + auide_ddma_init(&auide_hwif); 993 + dbdma_init_done = 1; 805 994 #endif 806 995 807 996 probe_hwif_init(hwif); 808 997 dev_set_drvdata(dev, hwif); 809 998 810 - printk(KERN_INFO "Au1xxx IDE(builtin) configured for %s\n", mode ); 999 + printk(KERN_INFO "Au1xxx IDE(builtin) configured for %s\n", mode ); 811 1000 812 - out: 813 - return ret; 1001 + out: 1002 + return ret; 814 1003 } 815 1004 816 1005 static int au_ide_remove(struct device *dev) ··· 780 1045 struct platform_device *pdev = to_platform_device(dev); 781 1046 struct resource *res; 782 1047 ide_hwif_t *hwif = dev_get_drvdata(dev); 783 - _auide_hwif *ahwif = &auide_hwif; 1048 + _auide_hwif *ahwif = &auide_hwif; 784 1049 785 1050 ide_unregister(hwif - ide_hwifs); 786 1051 ··· 804 1069 return driver_register(&au1200_ide_driver); 805 1070 } 806 1071 807 - static void __init au_ide_exit(void) 1072 + static void __exit au_ide_exit(void) 808 1073 { 809 1074 driver_unregister(&au1200_ide_driver); 810 1075 } 811 - 812 - #ifdef CONFIG_PM 813 - int au1200ide_pm_callback( au1xxx_power_dev_t *dev,\ 814 - au1xxx_request_t request, void *data) { 815 - 816 - unsigned int d, err = 0; 817 - unsigned long flags; 818 - 819 - spin_lock_irqsave(auide_hwif.pm.lock, flags); 820 - 821 - switch (request){ 822 - case AU1XXX_PM_SLEEP: 823 - err = au1xxxide_pm_sleep(dev); 824 - break; 825 - case AU1XXX_PM_WAKEUP: 826 - d = *((unsigned int*)data); 827 - if ( d > 0 && d <= 99) { 828 - err = au1xxxide_pm_standby(dev); 829 - } 830 - else { 831 - err = au1xxxide_pm_resume(dev); 832 - } 833 - break; 834 - case AU1XXX_PM_GETSTATUS: 835 - err = au1xxxide_pm_getstatus(dev); 836 - break; 837 - case AU1XXX_PM_ACCESS: 838 - err = au1xxxide_pm_access(dev); 839 - break; 840 - case AU1XXX_PM_IDLE: 841 - err = au1xxxide_pm_idle(dev); 842 - break; 843 - case AU1XXX_PM_CLEANUP: 844 - err = au1xxxide_pm_cleanup(dev); 845 - break; 846 - default: 847 - err = -1; 848 - break; 849 - } 850 - 851 - spin_unlock_irqrestore(auide_hwif.pm.lock, flags); 852 - 853 - return err; 854 - } 855 - 856 - static int au1xxxide_pm_standby( au1xxx_power_dev_t *dev ) { 857 - return 0; 858 - } 859 - 860 - static int au1xxxide_pm_sleep( au1xxx_power_dev_t *dev ) { 861 - 862 - int retval; 863 - ide_hwif_t *hwif = auide_hwif.hwif; 864 - struct request rq; 865 - struct request_pm_state rqpm; 866 - ide_task_t args; 867 - 868 - if(auide_hwif.pm.stopped) 869 - return -1; 870 - 871 - /* 872 - * wait until hard disc is ready 873 - */ 874 - if ( wait_for_ready(&hwif->drives[0], 35000) ) { 875 - printk("Wait for drive sleep timeout!\n"); 876 - retval = -1; 877 - } 878 - 879 - /* 880 - * sequenz to tell the high level ide driver that pm is resuming 881 - */ 882 - memset(&rq, 0, sizeof(rq)); 883 - memset(&rqpm, 0, sizeof(rqpm)); 884 - memset(&args, 0, sizeof(args)); 885 - rq.flags = REQ_PM_SUSPEND; 886 - rq.special = &args; 887 - rq.pm = &rqpm; 888 - rqpm.pm_step = ide_pm_state_start_suspend; 889 - rqpm.pm_state = PMSG_SUSPEND; 890 - 891 - retval = ide_do_drive_cmd(&hwif->drives[0], &rq, ide_wait); 892 - 893 - if (wait_for_ready (&hwif->drives[0], 35000)) { 894 - printk("Wait for drive sleep timeout!\n"); 895 - retval = -1; 896 - } 897 - 898 - /* 899 - * stop dbdma channels 900 - */ 901 - au1xxx_dbdma_reset(auide_hwif.tx_chan); 902 - au1xxx_dbdma_reset(auide_hwif.rx_chan); 903 - 904 - auide_hwif.pm.stopped = 1; 905 - 906 - return retval; 907 - } 908 - 909 - static int au1xxxide_pm_resume( au1xxx_power_dev_t *dev ) { 910 - 911 - int retval; 912 - ide_hwif_t *hwif = auide_hwif.hwif; 913 - struct request rq; 914 - struct request_pm_state rqpm; 915 - ide_task_t args; 916 - 917 - if(!auide_hwif.pm.stopped) 918 - return -1; 919 - 920 - /* 921 - * start dbdma channels 922 - */ 923 - au1xxx_dbdma_start(auide_hwif.tx_chan); 924 - au1xxx_dbdma_start(auide_hwif.rx_chan); 925 - 926 - /* 927 - * wait until hard disc is ready 928 - */ 929 - if (wait_for_ready ( &hwif->drives[0], 35000)) { 930 - printk("Wait for drive wake up timeout!\n"); 931 - retval = -1; 932 - } 933 - 934 - /* 935 - * sequenz to tell the high level ide driver that pm is resuming 936 - */ 937 - memset(&rq, 0, sizeof(rq)); 938 - memset(&rqpm, 0, sizeof(rqpm)); 939 - memset(&args, 0, sizeof(args)); 940 - rq.flags = REQ_PM_RESUME; 941 - rq.special = &args; 942 - rq.pm = &rqpm; 943 - rqpm.pm_step = ide_pm_state_start_resume; 944 - rqpm.pm_state = PMSG_ON; 945 - 946 - retval = ide_do_drive_cmd(&hwif->drives[0], &rq, ide_head_wait); 947 - 948 - /* 949 - * wait for hard disc 950 - */ 951 - if ( wait_for_ready(&hwif->drives[0], 35000) ) { 952 - printk("Wait for drive wake up timeout!\n"); 953 - retval = -1; 954 - } 955 - 956 - auide_hwif.pm.stopped = 0; 957 - 958 - return retval; 959 - } 960 - 961 - static int au1xxxide_pm_getstatus( au1xxx_power_dev_t *dev ) { 962 - return dev->cur_state; 963 - } 964 - 965 - static int au1xxxide_pm_access( au1xxx_power_dev_t *dev ) { 966 - if (dev->cur_state != AWAKE_STATE) 967 - return 0; 968 - else 969 - return -1; 970 - } 971 - 972 - static int au1xxxide_pm_idle( au1xxx_power_dev_t *dev ) { 973 - return 0; 974 - } 975 - 976 - static int au1xxxide_pm_cleanup( au1xxx_power_dev_t *dev ) { 977 - return 0; 978 - } 979 - #endif /* CONFIG_PM */ 980 1076 981 1077 MODULE_LICENSE("GPL"); 982 1078 MODULE_DESCRIPTION("AU1200 IDE driver");
+7 -1
drivers/ide/pci/sgiioc4.c
··· 622 622 ide_hwif_t *hwif; 623 623 int h; 624 624 625 + /* 626 + * Find an empty HWIF; if none available, return -ENOMEM. 627 + */ 625 628 for (h = 0; h < MAX_HWIFS; ++h) { 626 629 hwif = &ide_hwifs[h]; 627 - /* Find an empty HWIF */ 628 630 if (hwif->chipset == ide_unknown) 629 631 break; 632 + } 633 + if (h == MAX_HWIFS) { 634 + printk(KERN_ERR "%s: too many IDE interfaces, no room in table\n", d->name); 635 + return -ENOMEM; 630 636 } 631 637 632 638 /* Get the CmdBlk and CtrlBlk Base Registers */
+1
drivers/ide/pci/via82cxxx.c
··· 80 80 u16 flags; 81 81 } via_isa_bridges[] = { 82 82 { "vt6410", PCI_DEVICE_ID_VIA_6410, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST }, 83 + { "vt8251", PCI_DEVICE_ID_VIA_8251, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST }, 83 84 { "vt8237", PCI_DEVICE_ID_VIA_8237, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST }, 84 85 { "vt8235", PCI_DEVICE_ID_VIA_8235, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST }, 85 86 { "vt8233a", PCI_DEVICE_ID_VIA_8233A, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST },
+1 -1
drivers/input/mouse/alps.c
··· 42 42 { { 0x53, 0x02, 0x14 }, 0xf8, 0xf8, 0 }, 43 43 { { 0x63, 0x02, 0x0a }, 0xf8, 0xf8, 0 }, 44 44 { { 0x63, 0x02, 0x14 }, 0xf8, 0xf8, 0 }, 45 - { { 0x63, 0x02, 0x28 }, 0xf8, 0xf8, 0 }, 45 + { { 0x63, 0x02, 0x28 }, 0xf8, 0xf8, ALPS_FW_BK_2 }, /* Fujitsu Siemens S6010 */ 46 46 { { 0x63, 0x02, 0x3c }, 0x8f, 0x8f, ALPS_WHEEL }, /* Toshiba Satellite S2400-103 */ 47 47 { { 0x63, 0x02, 0x50 }, 0xef, 0xef, ALPS_FW_BK_1 }, /* NEC Versa L320 */ 48 48 { { 0x63, 0x02, 0x64 }, 0xf8, 0xf8, 0 },
+9 -1
drivers/mmc/mmc.c
··· 679 679 } 680 680 681 681 /* 682 - * Apply power to the MMC stack. 682 + * Apply power to the MMC stack. This is a two-stage process. 683 + * First, we enable power to the card without the clock running. 684 + * We then wait a bit for the power to stabilise. Finally, 685 + * enable the bus drivers and clock to the card. 686 + * 687 + * We must _NOT_ enable the clock prior to power stablising. 688 + * 689 + * If a host does all the power sequencing itself, ignore the 690 + * initial MMC_POWER_UP stage. 683 691 */ 684 692 static void mmc_power_up(struct mmc_host *host) 685 693 {
+1 -1
drivers/scsi/ibmvscsi/ibmvscsi.h
··· 100 100 void ibmvscsi_release_crq_queue(struct crq_queue *queue, 101 101 struct ibmvscsi_host_data *hostdata, 102 102 int max_requests); 103 - void ibmvscsi_reset_crq_queue(struct crq_queue *queue, 103 + int ibmvscsi_reset_crq_queue(struct crq_queue *queue, 104 104 struct ibmvscsi_host_data *hostdata); 105 105 106 106 void ibmvscsi_handle_crq(struct viosrp_crq *crq,
+2 -1
drivers/scsi/ibmvscsi/iseries_vscsi.c
··· 117 117 * 118 118 * no-op for iSeries 119 119 */ 120 - void ibmvscsi_reset_crq_queue(struct crq_queue *queue, 120 + int ibmvscsi_reset_crq_queue(struct crq_queue *queue, 121 121 struct ibmvscsi_host_data *hostdata) 122 122 { 123 + return 0; 123 124 } 124 125 125 126 /**
+7 -1
drivers/scsi/ibmvscsi/rpa_vscsi.c
··· 230 230 rc = plpar_hcall_norets(H_REG_CRQ, 231 231 vdev->unit_address, 232 232 queue->msg_token, PAGE_SIZE); 233 + if (rc == H_Resource) 234 + /* maybe kexecing and resource is busy. try a reset */ 235 + rc = ibmvscsi_reset_crq_queue(queue, 236 + hostdata); 237 + 233 238 if (rc == 2) { 234 239 /* Adapter is good, but other end is not ready */ 235 240 printk(KERN_WARNING "ibmvscsi: Partner adapter not ready\n"); ··· 286 281 * @hostdata: ibmvscsi_host_data of host 287 282 * 288 283 */ 289 - void ibmvscsi_reset_crq_queue(struct crq_queue *queue, 284 + int ibmvscsi_reset_crq_queue(struct crq_queue *queue, 290 285 struct ibmvscsi_host_data *hostdata) 291 286 { 292 287 int rc; ··· 314 309 printk(KERN_WARNING 315 310 "ibmvscsi: couldn't register crq--rc 0x%x\n", rc); 316 311 } 312 + return rc; 317 313 }
+1 -1
drivers/scsi/megaraid.c
··· 664 664 sg->offset; 665 665 } else 666 666 buf = cmd->request_buffer; 667 - memset(cmd->request_buffer, 0, cmd->cmnd[4]); 667 + memset(buf, 0, cmd->cmnd[4]); 668 668 if (cmd->use_sg) { 669 669 struct scatterlist *sg; 670 670
+1 -9
drivers/scsi/qla2xxx/qla_def.h
··· 2476 2476 */ 2477 2477 #define LOOP_TRANSITION(ha) \ 2478 2478 (test_bit(ISP_ABORT_NEEDED, &ha->dpc_flags) || \ 2479 - test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags)) 2480 - 2481 - #define LOOP_NOT_READY(ha) \ 2482 - ((test_bit(ISP_ABORT_NEEDED, &ha->dpc_flags) || \ 2483 - test_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags) || \ 2484 - test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags) || \ 2485 - test_bit(LOOP_RESYNC_ACTIVE, &ha->dpc_flags)) || \ 2479 + test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags) || \ 2486 2480 atomic_read(&ha->loop_state) == LOOP_DOWN) 2487 - 2488 - #define LOOP_RDY(ha) (!LOOP_NOT_READY(ha)) 2489 2481 2490 2482 #define TGT_Q(ha, t) (ha->otgt[t]) 2491 2483
+3 -3
drivers/scsi/qla2xxx/qla_init.c
··· 1259 1259 rval = qla2x00_get_adapter_id(ha, 1260 1260 &loop_id, &al_pa, &area, &domain, &topo); 1261 1261 if (rval != QLA_SUCCESS) { 1262 - if (LOOP_NOT_READY(ha) || atomic_read(&ha->loop_down_timer) || 1262 + if (LOOP_TRANSITION(ha) || atomic_read(&ha->loop_down_timer) || 1263 1263 (rval == QLA_COMMAND_ERROR && loop_id == 0x7)) { 1264 1264 DEBUG2(printk("%s(%ld) Loop is in a transition state\n", 1265 1265 __func__, ha->host_no)); ··· 1796 1796 } 1797 1797 1798 1798 if (rval == QLA_SUCCESS && test_bit(RSCN_UPDATE, &flags)) { 1799 - if (LOOP_NOT_READY(ha)) { 1799 + if (LOOP_TRANSITION(ha)) { 1800 1800 rval = QLA_FUNCTION_FAILED; 1801 1801 } else { 1802 1802 rval = qla2x00_configure_fabric(ha); ··· 2369 2369 if (qla2x00_is_reserved_id(ha, loop_id)) 2370 2370 continue; 2371 2371 2372 - if (atomic_read(&ha->loop_down_timer) || LOOP_NOT_READY(ha)) 2372 + if (atomic_read(&ha->loop_down_timer) || LOOP_TRANSITION(ha)) 2373 2373 break; 2374 2374 2375 2375 if (swl != NULL) {
+15
drivers/scsi/qla2xxx/qla_isr.c
··· 909 909 resid = resid_len; 910 910 cp->resid = resid; 911 911 CMD_RESID_LEN(cp) = resid; 912 + 913 + if (!lscsi_status && 914 + ((unsigned)(cp->request_bufflen - resid) < 915 + cp->underflow)) { 916 + qla_printk(KERN_INFO, ha, 917 + "scsi(%ld:%d:%d:%d): Mid-layer underflow " 918 + "detected (%x of %x bytes)...returning " 919 + "error status.\n", ha->host_no, 920 + cp->device->channel, cp->device->id, 921 + cp->device->lun, resid, 922 + cp->request_bufflen); 923 + 924 + cp->result = DID_ERROR << 16; 925 + break; 926 + } 912 927 } 913 928 cp->result = DID_OK << 16 | lscsi_status; 914 929
+6 -1
drivers/scsi/scsi_error.c
··· 422 422 **/ 423 423 static void scsi_eh_done(struct scsi_cmnd *scmd) 424 424 { 425 + struct completion *eh_action; 426 + 425 427 SCSI_LOG_ERROR_RECOVERY(3, 426 428 printk("%s scmd: %p result: %x\n", 427 429 __FUNCTION__, scmd, scmd->result)); 428 - complete(scmd->device->host->eh_action); 430 + 431 + eh_action = scmd->device->host->eh_action; 432 + if (eh_action) 433 + complete(eh_action); 429 434 } 430 435 431 436 /**
+21 -12
drivers/scsi/scsi_lib.c
··· 1085 1085 scsi_io_completion(cmd, cmd->result == 0 ? cmd->bufflen : 0, 0); 1086 1086 } 1087 1087 1088 + void scsi_setup_blk_pc_cmnd(struct scsi_cmnd *cmd, int retries) 1089 + { 1090 + struct request *req = cmd->request; 1091 + 1092 + BUG_ON(sizeof(req->cmd) > sizeof(cmd->cmnd)); 1093 + memcpy(cmd->cmnd, req->cmd, sizeof(cmd->cmnd)); 1094 + cmd->cmd_len = req->cmd_len; 1095 + if (!req->data_len) 1096 + cmd->sc_data_direction = DMA_NONE; 1097 + else if (rq_data_dir(req) == WRITE) 1098 + cmd->sc_data_direction = DMA_TO_DEVICE; 1099 + else 1100 + cmd->sc_data_direction = DMA_FROM_DEVICE; 1101 + 1102 + cmd->transfersize = req->data_len; 1103 + cmd->allowed = retries; 1104 + cmd->timeout_per_command = req->timeout; 1105 + } 1106 + EXPORT_SYMBOL_GPL(scsi_setup_blk_pc_cmnd); 1107 + 1088 1108 static int scsi_prep_fn(struct request_queue *q, struct request *req) 1089 1109 { 1090 1110 struct scsi_device *sdev = q->queuedata; ··· 1240 1220 goto kill; 1241 1221 } 1242 1222 } else { 1243 - memcpy(cmd->cmnd, req->cmd, sizeof(cmd->cmnd)); 1244 - cmd->cmd_len = req->cmd_len; 1245 - if (rq_data_dir(req) == WRITE) 1246 - cmd->sc_data_direction = DMA_TO_DEVICE; 1247 - else if (req->data_len) 1248 - cmd->sc_data_direction = DMA_FROM_DEVICE; 1249 - else 1250 - cmd->sc_data_direction = DMA_NONE; 1251 - 1252 - cmd->transfersize = req->data_len; 1253 - cmd->allowed = 3; 1254 - cmd->timeout_per_command = req->timeout; 1223 + scsi_setup_blk_pc_cmnd(cmd, 3); 1255 1224 cmd->done = scsi_generic_done; 1256 1225 } 1257 1226 }
+1 -15
drivers/scsi/sd.c
··· 245 245 * SG_IO from block layer already setup, just copy cdb basically 246 246 */ 247 247 if (blk_pc_request(rq)) { 248 - if (sizeof(rq->cmd) > sizeof(SCpnt->cmnd)) 249 - return 0; 250 - 251 - memcpy(SCpnt->cmnd, rq->cmd, sizeof(SCpnt->cmnd)); 252 - SCpnt->cmd_len = rq->cmd_len; 253 - if (rq_data_dir(rq) == WRITE) 254 - SCpnt->sc_data_direction = DMA_TO_DEVICE; 255 - else if (rq->data_len) 256 - SCpnt->sc_data_direction = DMA_FROM_DEVICE; 257 - else 258 - SCpnt->sc_data_direction = DMA_NONE; 259 - 260 - this_count = rq->data_len; 248 + scsi_setup_blk_pc_cmnd(SCpnt, SD_PASSTHROUGH_RETRIES); 261 249 if (rq->timeout) 262 250 timeout = rq->timeout; 263 251 264 - SCpnt->transfersize = rq->data_len; 265 - SCpnt->allowed = SD_PASSTHROUGH_RETRIES; 266 252 goto queue; 267 253 } 268 254
+3 -17
drivers/scsi/sr.c
··· 320 320 * these are already setup, just copy cdb basically 321 321 */ 322 322 if (SCpnt->request->flags & REQ_BLOCK_PC) { 323 - struct request *rq = SCpnt->request; 323 + scsi_setup_blk_pc_cmnd(SCpnt, MAX_RETRIES); 324 324 325 - if (sizeof(rq->cmd) > sizeof(SCpnt->cmnd)) 326 - return 0; 325 + if (SCpnt->timeout_per_command) 326 + timeout = SCpnt->timeout_per_command; 327 327 328 - memcpy(SCpnt->cmnd, rq->cmd, sizeof(SCpnt->cmnd)); 329 - SCpnt->cmd_len = rq->cmd_len; 330 - if (!rq->data_len) 331 - SCpnt->sc_data_direction = DMA_NONE; 332 - else if (rq_data_dir(rq) == WRITE) 333 - SCpnt->sc_data_direction = DMA_TO_DEVICE; 334 - else 335 - SCpnt->sc_data_direction = DMA_FROM_DEVICE; 336 - 337 - this_count = rq->data_len; 338 - if (rq->timeout) 339 - timeout = rq->timeout; 340 - 341 - SCpnt->transfersize = rq->data_len; 342 328 goto queue; 343 329 } 344 330
+1 -18
drivers/scsi/st.c
··· 4194 4194 */ 4195 4195 static int st_init_command(struct scsi_cmnd *SCpnt) 4196 4196 { 4197 - struct request *rq; 4198 - 4199 4197 if (!(SCpnt->request->flags & REQ_BLOCK_PC)) 4200 4198 return 0; 4201 4199 4202 - rq = SCpnt->request; 4203 - if (sizeof(rq->cmd) > sizeof(SCpnt->cmnd)) 4204 - return 0; 4205 - 4206 - memcpy(SCpnt->cmnd, rq->cmd, sizeof(SCpnt->cmnd)); 4207 - SCpnt->cmd_len = rq->cmd_len; 4208 - 4209 - if (rq_data_dir(rq) == WRITE) 4210 - SCpnt->sc_data_direction = DMA_TO_DEVICE; 4211 - else if (rq->data_len) 4212 - SCpnt->sc_data_direction = DMA_FROM_DEVICE; 4213 - else 4214 - SCpnt->sc_data_direction = DMA_NONE; 4215 - 4216 - SCpnt->timeout_per_command = rq->timeout; 4217 - SCpnt->transfersize = rq->data_len; 4200 + scsi_setup_blk_pc_cmnd(SCpnt, 0); 4218 4201 SCpnt->done = st_intr; 4219 4202 return 1; 4220 4203 }
+2 -2
drivers/scsi/sym53c8xx_2/sym_hipd.c
··· 1405 1405 goal->iu = 0; 1406 1406 goal->dt = 0; 1407 1407 goal->qas = 0; 1408 - goal->period = 0; 1409 1408 goal->offset = 0; 1410 1409 return; 1411 1410 } ··· 1464 1465 * Many devices implement PPR in a buggy way, so only use it if we 1465 1466 * really want to. 1466 1467 */ 1467 - if (goal->iu || goal->dt || goal->qas || (goal->period < 0xa)) { 1468 + if (goal->offset && 1469 + (goal->iu || goal->dt || goal->qas || (goal->period < 0xa))) { 1468 1470 nego = NS_PPR; 1469 1471 } else if (spi_width(starget) != goal->width) { 1470 1472 nego = NS_WIDE;
+18 -8
fs/reiserfs/inode.c
··· 32 32 JOURNAL_PER_BALANCE_CNT * 2 + 33 33 2 * REISERFS_QUOTA_INIT_BLOCKS(inode->i_sb); 34 34 struct reiserfs_transaction_handle th; 35 + int err; 35 36 36 37 truncate_inode_pages(&inode->i_data, 0); 37 38 ··· 50 49 } 51 50 reiserfs_update_inode_transaction(inode); 52 51 53 - if (reiserfs_delete_object(&th, inode)) { 54 - up(&inode->i_sem); 55 - goto out; 56 - } 52 + err = reiserfs_delete_object(&th, inode); 57 53 58 54 /* Do quota update inside a transaction for journaled quotas. We must do that 59 55 * after delete_object so that quota updates go into the same transaction as 60 56 * stat data deletion */ 61 - DQUOT_FREE_INODE(inode); 57 + if (!err) 58 + DQUOT_FREE_INODE(inode); 62 59 63 60 if (journal_end(&th, inode->i_sb, jbegin_count)) { 64 61 up(&inode->i_sem); ··· 64 65 } 65 66 66 67 up(&inode->i_sem); 68 + 69 + /* check return value from reiserfs_delete_object after 70 + * ending the transaction 71 + */ 72 + if (err) 73 + goto out; 67 74 68 75 /* all items of file are deleted, so we can remove "save" link */ 69 76 remove_save_link(inode, 0 /* not truncate */ ); /* we can't do anything ··· 2104 2099 struct page *page = NULL; 2105 2100 int error; 2106 2101 struct buffer_head *bh = NULL; 2102 + int err2; 2107 2103 2108 2104 reiserfs_write_lock(p_s_inode->i_sb); 2109 2105 ··· 2142 2136 transaction of truncating gets committed - on reboot the file 2143 2137 either appears truncated properly or not truncated at all */ 2144 2138 add_save_link(&th, p_s_inode, 1); 2145 - error = reiserfs_do_truncate(&th, p_s_inode, page, update_timestamps); 2146 - if (error) 2147 - goto out; 2139 + err2 = reiserfs_do_truncate(&th, p_s_inode, page, update_timestamps); 2148 2140 error = 2149 2141 journal_end(&th, p_s_inode->i_sb, JOURNAL_PER_BALANCE_CNT * 2 + 1); 2150 2142 if (error) 2151 2143 goto out; 2152 2144 2145 + /* check reiserfs_do_truncate after ending the transaction */ 2146 + if (err2) { 2147 + error = err2; 2148 + goto out; 2149 + } 2150 + 2153 2151 if (update_timestamps) { 2154 2152 error = remove_save_link(p_s_inode, 1 /* truncate */ ); 2155 2153 if (error)
+14 -4
fs/reiserfs/journal.c
··· 1039 1039 } 1040 1040 atomic_dec(&journal->j_async_throttle); 1041 1041 1042 + /* We're skipping the commit if there's an error */ 1043 + if (retval || reiserfs_is_journal_aborted(journal)) 1044 + barrier = 0; 1045 + 1042 1046 /* wait on everything written so far before writing the commit 1043 1047 * if we are in barrier mode, send the commit down now 1044 1048 */ ··· 1081 1077 BUG_ON(atomic_read(&(jl->j_commit_left)) != 1); 1082 1078 1083 1079 if (!barrier) { 1084 - if (buffer_dirty(jl->j_commit_bh)) 1085 - BUG(); 1086 - mark_buffer_dirty(jl->j_commit_bh); 1087 - sync_dirty_buffer(jl->j_commit_bh); 1080 + /* If there was a write error in the journal - we can't commit 1081 + * this transaction - it will be invalid and, if successful, 1082 + * will just end up propogating the write error out to 1083 + * the file system. */ 1084 + if (likely(!retval && !reiserfs_is_journal_aborted (journal))) { 1085 + if (buffer_dirty(jl->j_commit_bh)) 1086 + BUG(); 1087 + mark_buffer_dirty(jl->j_commit_bh) ; 1088 + sync_dirty_buffer(jl->j_commit_bh) ; 1089 + } 1088 1090 } else 1089 1091 wait_on_buffer(jl->j_commit_bh); 1090 1092
+2
include/asm-arm/elf.h
··· 22 22 #define R_ARM_NONE 0 23 23 #define R_ARM_PC24 1 24 24 #define R_ARM_ABS32 2 25 + #define R_ARM_CALL 28 26 + #define R_ARM_JUMP24 29 25 27 26 28 #define ELF_NGREG (sizeof (struct pt_regs) / sizeof(elf_greg_t)) 27 29 typedef elf_greg_t elf_gregset_t[ELF_NGREG];
+8 -14
include/asm-mips/mach-au1x00/au1xxx_ide.h
··· 74 74 u8 white_list, black_list; 75 75 struct dbdma_cmd *dma_table_cpu; 76 76 dma_addr_t dma_table_dma; 77 - struct scatterlist *sg_table; 78 - int sg_nents; 79 - int sg_dma_direction; 80 77 #endif 81 78 struct device *dev; 82 79 int irq; ··· 84 87 } _auide_hwif; 85 88 86 89 #ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA 87 - struct drive_list_entry { 88 - const char * id_model; 89 - const char * id_firmware; 90 - }; 91 - 92 90 /* HD white list */ 93 91 static const struct drive_list_entry dma_white_list [] = { 94 92 /* ··· 159 167 * Multi-Word DMA + DbDMA functions 160 168 */ 161 169 #ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA 162 - 163 - static int in_drive_list(struct hd_driveid *id, 164 - const struct drive_list_entry *drive_table); 165 170 static int auide_build_sglist(ide_drive_t *drive, struct request *rq); 166 171 static int auide_build_dmatable(ide_drive_t *drive); 167 172 static int auide_dma_end(ide_drive_t *drive); 168 - static void auide_dma_start(ide_drive_t *drive ); 169 173 ide_startstop_t auide_dma_intr (ide_drive_t *drive); 170 174 static void auide_dma_exec_cmd(ide_drive_t *drive, u8 command); 171 175 static int auide_dma_setup(ide_drive_t *drive); ··· 176 188 static void auide_ddma_rx_callback(int irq, void *param, 177 189 struct pt_regs *regs); 178 190 static int auide_dma_off_quietly(ide_drive_t *drive); 179 - static int auide_dma_timeout(ide_drive_t *drive); 180 - 181 191 #endif /* end CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA */ 182 192 183 193 /******************************************************************************* ··· 285 299 #define SBC_IDE_MDMA2_TPM (0x00<<6) 286 300 #define SBC_IDE_MDMA2_TA (0x12<<0) 287 301 302 + #define SBC_IDE_TIMING(mode) \ 303 + SBC_IDE_##mode##_TWCS | \ 304 + SBC_IDE_##mode##_TCSH | \ 305 + SBC_IDE_##mode##_TCSOFF | \ 306 + SBC_IDE_##mode##_TWP | \ 307 + SBC_IDE_##mode##_TCSW | \ 308 + SBC_IDE_##mode##_TPM | \ 309 + SBC_IDE_##mode##_TA
+6 -125
include/linux/ide.h
··· 23 23 #include <asm/io.h> 24 24 #include <asm/semaphore.h> 25 25 26 - /* 27 - * This is the multiple IDE interface driver, as evolved from hd.c. 28 - * It supports up to four IDE interfaces, on one or more IRQs (usually 14 & 15). 29 - * There can be up to two drives per interface, as per the ATA-2 spec. 30 - * 31 - * Primary i/f: ide0: major=3; (hda) minor=0; (hdb) minor=64 32 - * Secondary i/f: ide1: major=22; (hdc or hd1a) minor=0; (hdd or hd1b) minor=64 33 - * Tertiary i/f: ide2: major=33; (hde) minor=0; (hdf) minor=64 34 - * Quaternary i/f: ide3: major=34; (hdg) minor=0; (hdh) minor=64 35 - */ 36 - 37 26 /****************************************************************************** 38 27 * IDE driver configuration options (play with these as desired): 39 28 * ··· 181 192 #define WAIT_WORSTCASE (30*HZ) /* 30sec - worst case when spinning up */ 182 193 #define WAIT_CMD (10*HZ) /* 10sec - maximum wait for an IRQ to happen */ 183 194 #define WAIT_MIN_SLEEP (2*HZ/100) /* 20msec - minimum sleep time */ 184 - 185 - #define HOST(hwif,chipset) \ 186 - { \ 187 - return ((hwif)->chipset == chipset) ? 1 : 0; \ 188 - } 189 195 190 196 /* 191 197 * Check for an interrupt and acknowledge the interrupt status ··· 375 391 } ata_nsector_t, ata_data_t, atapi_bcount_t, ata_index_t; 376 392 377 393 /* 378 - * ATA-IDE Error Register 379 - * 380 - * mark : Bad address mark 381 - * tzero : Couldn't find track 0 382 - * abrt : Aborted Command 383 - * mcr : Media Change Request 384 - * id : ID field not found 385 - * mce : Media Change Event 386 - * ecc : Uncorrectable ECC error 387 - * bdd : dual meaing 388 - */ 389 - typedef union { 390 - unsigned all :8; 391 - struct { 392 - #if defined(__LITTLE_ENDIAN_BITFIELD) 393 - unsigned mark :1; 394 - unsigned tzero :1; 395 - unsigned abrt :1; 396 - unsigned mcr :1; 397 - unsigned id :1; 398 - unsigned mce :1; 399 - unsigned ecc :1; 400 - unsigned bdd :1; 401 - #elif defined(__BIG_ENDIAN_BITFIELD) 402 - unsigned bdd :1; 403 - unsigned ecc :1; 404 - unsigned mce :1; 405 - unsigned id :1; 406 - unsigned mcr :1; 407 - unsigned abrt :1; 408 - unsigned tzero :1; 409 - unsigned mark :1; 410 - #else 411 - #error "Please fix <asm/byteorder.h>" 412 - #endif 413 - } b; 414 - } ata_error_t; 415 - 416 - /* 417 394 * ATA-IDE Select Register, aka Device-Head 418 395 * 419 396 * head : always zeros here ··· 447 502 #endif 448 503 } b; 449 504 } ata_status_t, atapi_status_t; 450 - 451 - /* 452 - * ATA-IDE Control Register 453 - * 454 - * bit0 : Should be set to zero 455 - * nIEN : device INTRQ to host 456 - * SRST : host soft reset bit 457 - * bit3 : ATA-2 thingy, Should be set to 1 458 - * reserved456 : Reserved 459 - * HOB : 48-bit address ordering, High Ordered Bit 460 - */ 461 - typedef union { 462 - unsigned all : 8; 463 - struct { 464 - #if defined(__LITTLE_ENDIAN_BITFIELD) 465 - unsigned bit0 : 1; 466 - unsigned nIEN : 1; 467 - unsigned SRST : 1; 468 - unsigned bit3 : 1; 469 - unsigned reserved456 : 3; 470 - unsigned HOB : 1; 471 - #elif defined(__BIG_ENDIAN_BITFIELD) 472 - unsigned HOB : 1; 473 - unsigned reserved456 : 3; 474 - unsigned bit3 : 1; 475 - unsigned SRST : 1; 476 - unsigned nIEN : 1; 477 - unsigned bit0 : 1; 478 - #else 479 - #error "Please fix <asm/byteorder.h>" 480 - #endif 481 - } b; 482 - } ata_control_t; 483 505 484 506 /* 485 507 * ATAPI Feature Register ··· 528 616 #endif 529 617 } b; 530 618 } atapi_error_t; 531 - 532 - /* 533 - * ATAPI floppy Drive Select Register 534 - * 535 - * sam_lun : Logical unit number 536 - * reserved3 : Reserved 537 - * drv : The responding drive will be drive 0 (0) or drive 1 (1) 538 - * one5 : Should be set to 1 539 - * reserved6 : Reserved 540 - * one7 : Should be set to 1 541 - */ 542 - typedef union { 543 - unsigned all :8; 544 - struct { 545 - #if defined(__LITTLE_ENDIAN_BITFIELD) 546 - unsigned sam_lun :3; 547 - unsigned reserved3 :1; 548 - unsigned drv :1; 549 - unsigned one5 :1; 550 - unsigned reserved6 :1; 551 - unsigned one7 :1; 552 - #elif defined(__BIG_ENDIAN_BITFIELD) 553 - unsigned one7 :1; 554 - unsigned reserved6 :1; 555 - unsigned one5 :1; 556 - unsigned drv :1; 557 - unsigned reserved3 :1; 558 - unsigned sam_lun :3; 559 - #else 560 - #error "Please fix <asm/byteorder.h>" 561 - #endif 562 - } b; 563 - } atapi_select_t; 564 619 565 620 /* 566 621 * Status returned from various ide_ functions ··· 980 1101 int (*end_request)(ide_drive_t *, int, int); 981 1102 ide_startstop_t (*error)(ide_drive_t *, struct request *rq, u8, u8); 982 1103 ide_startstop_t (*abort)(ide_drive_t *, struct request *rq); 983 - int (*ioctl)(ide_drive_t *, struct inode *, struct file *, unsigned int, unsigned long); 984 1104 ide_proc_entry_t *proc; 985 - void (*ata_prebuilder)(ide_drive_t *); 986 - void (*atapi_prebuilder)(ide_drive_t *); 987 1105 struct device_driver gen_driver; 988 1106 } ide_driver_t; 989 1107 ··· 1174 1298 extern void ide_timer_expiry(unsigned long); 1175 1299 extern irqreturn_t ide_intr(int irq, void *dev_id, struct pt_regs *regs); 1176 1300 extern void do_ide_request(request_queue_t *); 1177 - extern void ide_init_subdrivers(void); 1178 1301 1179 1302 void ide_init_disk(struct gendisk *, ide_drive_t *); 1180 1303 ··· 1246 1371 #define GOOD_DMA_DRIVE 1 1247 1372 1248 1373 #ifdef CONFIG_BLK_DEV_IDEDMA 1374 + struct drive_list_entry { 1375 + const char *id_model; 1376 + const char *id_firmware; 1377 + }; 1378 + 1379 + int ide_in_drive_list(struct hd_driveid *, const struct drive_list_entry *); 1249 1380 int __ide_dma_bad_drive(ide_drive_t *); 1250 1381 int __ide_dma_good_drive(ide_drive_t *); 1251 1382 int ide_use_dma(ide_drive_t *);
+1
include/linux/pci_ids.h
··· 1244 1244 #define PCI_DEVICE_ID_VIA_8378_0 0x3205 1245 1245 #define PCI_DEVICE_ID_VIA_8783_0 0x3208 1246 1246 #define PCI_DEVICE_ID_VIA_8237 0x3227 1247 + #define PCI_DEVICE_ID_VIA_8251 0x3287 1247 1248 #define PCI_DEVICE_ID_VIA_3296_0 0x0296 1248 1249 #define PCI_DEVICE_ID_VIA_8231 0x8231 1249 1250 #define PCI_DEVICE_ID_VIA_8231_4 0x8235
+1
include/scsi/scsi_cmnd.h
··· 151 151 extern void scsi_put_command(struct scsi_cmnd *); 152 152 extern void scsi_io_completion(struct scsi_cmnd *, unsigned int, unsigned int); 153 153 extern void scsi_finish_command(struct scsi_cmnd *cmd); 154 + extern void scsi_setup_blk_pc_cmnd(struct scsi_cmnd *cmd, int retries); 154 155 155 156 #endif /* _SCSI_SCSI_CMND_H */
+14 -12
init/Kconfig
··· 256 256 257 257 source "usr/Kconfig" 258 258 259 + config CC_OPTIMIZE_FOR_SIZE 260 + bool "Optimize for size (Look out for broken compilers!)" 261 + default y 262 + depends on ARM || H8300 || EXPERIMENTAL 263 + depends on !SPARC64 264 + help 265 + Enabling this option will pass "-Os" instead of "-O2" to gcc 266 + resulting in a smaller kernel. 267 + 268 + WARNING: some versions of gcc may generate incorrect code with this 269 + option. If problems are observed, a gcc upgrade may be needed. 270 + 271 + If unsure, say N. 272 + 259 273 menuconfig EMBEDDED 260 274 bool "Configure standard kernel features (for small systems)" 261 275 help ··· 351 337 help 352 338 Disabling this option will cause the kernel to be built without 353 339 support for epoll family of system calls. 354 - 355 - config CC_OPTIMIZE_FOR_SIZE 356 - bool "Optimize for size" 357 - default y if ARM || H8300 358 - help 359 - Enabling this option will pass "-Os" instead of "-O2" to gcc 360 - resulting in a smaller kernel. 361 - 362 - WARNING: some versions of gcc may generate incorrect code with this 363 - option. If problems are observed, a gcc upgrade may be needed. 364 - 365 - If unsure, say N. 366 340 367 341 config SHMEM 368 342 bool "Use full shmem filesystem" if EMBEDDED
+3
net/8021q/vlan_dev.c
··· 165 165 166 166 skb_pull(skb, VLAN_HLEN); /* take off the VLAN header (4 bytes currently) */ 167 167 168 + /* Need to correct hardware checksum */ 169 + skb_postpull_rcsum(skb, vhdr, VLAN_HLEN); 170 + 168 171 /* Ok, lets check to make sure the device (dev) we 169 172 * came in on is what this VLAN is attached to. 170 173 */
+1 -1
net/ipv4/ip_gre.c
··· 618 618 619 619 skb->mac.raw = skb->nh.raw; 620 620 skb->nh.raw = __pskb_pull(skb, offset); 621 - skb_postpull_rcsum(skb, skb->mac.raw, offset); 621 + skb_postpull_rcsum(skb, skb->h.raw, offset); 622 622 memset(&(IPCB(skb)->opt), 0, sizeof(struct ip_options)); 623 623 skb->pkt_type = PACKET_HOST; 624 624 #ifdef CONFIG_NET_IPGRE_BROADCAST