···721 /* drop floating-point and debug-register state if it exists: */722 current->thread.flags &= ~(IA64_THREAD_FPH_VALID | IA64_THREAD_DBG_VALID);723 ia64_drop_fpu(current);0724 if (IS_IA32_PROCESS(ia64_task_regs(current))) {725 ia32_drop_partial_page_list(current);726 current->thread.task_size = IA32_PAGE_OFFSET;727 set_fs(USER_DS);728 }0729}730731/*
···721 /* drop floating-point and debug-register state if it exists: */722 current->thread.flags &= ~(IA64_THREAD_FPH_VALID | IA64_THREAD_DBG_VALID);723 ia64_drop_fpu(current);724+#ifdef CONFIG_IA32_SUPPORT725 if (IS_IA32_PROCESS(ia64_task_regs(current))) {726 ia32_drop_partial_page_list(current);727 current->thread.task_size = IA32_PAGE_OFFSET;728 set_fs(USER_DS);729 }730+#endif731}732733/*
+1-9
drivers/ide/Kconfig
···807 depends on SOC_AU1200 && BLK_DEV_IDE_AU1XXX808endchoice809810-config BLK_DEV_IDE_AU1XXX_BURSTABLE_ON811- bool "Enable burstable Mode on DbDMA"812- default false813- depends BLK_DEV_IDE_AU1XXX814- help815- This option enable the burstable Flag on DbDMA controller816- (cf. "AMD Alchemy 'Au1200' Processor Data Book - PRELIMINARY").817-818config BLK_DEV_IDE_AU1XXX_SEQTS_PER_RQ819 int "Maximum transfer size (KB) per request (up to 128)"820 default "128"···932933config BLK_DEV_MPC8xx_IDE934 bool "MPC8xx IDE support"935- depends on 8xx936 help937 This option provides support for IDE on Motorola MPC8xx Systems.938 Please see 'Type of MPC8xx IDE interface' for details.
···807 depends on SOC_AU1200 && BLK_DEV_IDE_AU1XXX808endchoice80900000000810config BLK_DEV_IDE_AU1XXX_SEQTS_PER_RQ811 int "Maximum transfer size (KB) per request (up to 128)"812 default "128"···940941config BLK_DEV_MPC8xx_IDE942 bool "MPC8xx IDE support"943+ depends on 8xx && IDE=y && BLK_DEV_IDE=y944 help945 This option provides support for IDE on Motorola MPC8xx Systems.946 Please see 'Type of MPC8xx IDE interface' for details.
-7
drivers/ide/ide-cd.c
···1292 struct cdrom_info *info = drive->driver_data;12931294 info->dma = 0;1295- info->cmd = 0;1296 info->start_seek = jiffies;1297 return cdrom_start_packet_command(drive, 0, cdrom_start_seek_continuation);1298}···1342 if ((rq->sector & (sectors_per_frame - 1)) ||1343 (rq->nr_sectors & (sectors_per_frame - 1)))1344 info->dma = 0;1345-1346- info->cmd = READ;13471348 /* Start sending the read request to the drive. */1349 return cdrom_start_packet_command(drive, 32768, cdrom_start_read_continuation);···1481 struct cdrom_info *info = drive->driver_data;14821483 info->dma = 0;1484- info->cmd = 0;1485 rq->flags &= ~REQ_FAILED;1486 len = rq->data_len;1487···1887 /* use dma, if possible. we don't need to check more, since we1888 * know that the transfer is always (at least!) frame aligned */1889 info->dma = drive->using_dma ? 1 : 0;1890- info->cmd = WRITE;18911892 info->devinfo.media_written = 1;1893···1911 rq->flags |= REQ_QUIET;19121913 info->dma = 0;1914- info->cmd = 0;19151916 /*1917 * sg request···1919 int mask = drive->queue->dma_alignment;1920 unsigned long addr = (unsigned long) page_address(bio_page(rq->bio));19211922- info->cmd = rq_data_dir(rq);1923 info->dma = drive->using_dma;19241925 /*
···1292 struct cdrom_info *info = drive->driver_data;12931294 info->dma = 0;01295 info->start_seek = jiffies;1296 return cdrom_start_packet_command(drive, 0, cdrom_start_seek_continuation);1297}···1343 if ((rq->sector & (sectors_per_frame - 1)) ||1344 (rq->nr_sectors & (sectors_per_frame - 1)))1345 info->dma = 0;0013461347 /* Start sending the read request to the drive. */1348 return cdrom_start_packet_command(drive, 32768, cdrom_start_read_continuation);···1484 struct cdrom_info *info = drive->driver_data;14851486 info->dma = 0;01487 rq->flags &= ~REQ_FAILED;1488 len = rq->data_len;1489···1891 /* use dma, if possible. we don't need to check more, since we1892 * know that the transfer is always (at least!) frame aligned */1893 info->dma = drive->using_dma ? 1 : 0;018941895 info->devinfo.media_written = 1;1896···1916 rq->flags |= REQ_QUIET;19171918 info->dma = 0;019191920 /*1921 * sg request···1925 int mask = drive->queue->dma_alignment;1926 unsigned long addr = (unsigned long) page_address(bio_page(rq->bio));192701928 info->dma = drive->using_dma;19291930 /*
-1
drivers/ide/ide-cd.h
···480481 struct request request_sense_request;482 int dma;483- int cmd;484 unsigned long last_block;485 unsigned long start_seek;486 /* Buffer to hold mechanism status and changer slot table. */
···480481 struct request request_sense_request;482 int dma;0483 unsigned long last_block;484 unsigned long start_seek;485 /* Buffer to hold mechanism status and changer slot table. */
···31 */32#undef REALLY_SLOW_IO /* most systems can safely undef this */3334-#include <linux/config.h> /* for CONFIG_BLK_DEV_IDEPCI */35#include <linux/types.h>36#include <linux/module.h>37#include <linux/kernel.h>38#include <linux/delay.h>39-#include <linux/timer.h>40-#include <linux/mm.h>41-#include <linux/ioport.h>42-#include <linux/hdreg.h>43#include <linux/init.h>44#include <linux/ide.h>45#include <linux/sysdev.h>4647#include <linux/dma-mapping.h>480049#include <asm/io.h>50#include <asm/mach-au1x00/au1xxx.h>51#include <asm/mach-au1x00/au1xxx_dbdma.h>52-53-#if CONFIG_PM54-#include <asm/mach-au1x00/au1xxx_pm.h>55-#endif5657#include <asm/mach-au1x00/au1xxx_ide.h>5859#define DRV_NAME "au1200-ide"60#define DRV_VERSION "1.0"61-#define DRV_AUTHOR "AMD PCS / Pete Popov <ppopov@embeddedalley.com>"62-#define DRV_DESC "Au1200 IDE"006364static _auide_hwif auide_hwif;65-static spinlock_t ide_tune_drive_spin_lock = SPIN_LOCK_UNLOCKED;66-static spinlock_t ide_tune_chipset_spin_lock = SPIN_LOCK_UNLOCKED;67-static int dbdma_init_done = 0;6869-/*70- * local I/O functions71- */72-u8 auide_inb(unsigned long port)73-{74- return (au_readb(port));75-}76-77-u16 auide_inw(unsigned long port)78-{79- return (au_readw(port));80-}81-82-u32 auide_inl(unsigned long port)83-{84- return (au_readl(port));85-}8687void auide_insw(unsigned long port, void *addr, u32 count)88{89-#if defined(CONFIG_BLK_DEV_IDE_AU1XXX_PIO_DBDMA)009091- _auide_hwif *ahwif = &auide_hwif;92- chan_tab_t *ctp;93- au1x_ddma_desc_t *dp;94-95- if(!put_dest_flags(ahwif->rx_chan, (void*)addr, count << 1,96- DDMA_FLAGS_NOIE)) {97- printk(KERN_ERR "%s failed %d\n", __FUNCTION__, __LINE__);98- return;99- }100- ctp = *((chan_tab_t **)ahwif->rx_chan);101- dp = ctp->cur_ptr;102- while (dp->dscr_cmd0 & DSCR_CMD0_V)103- ;104- ctp->cur_ptr = au1xxx_ddma_get_nextptr_virt(dp);105-#else106- while (count--)107- {108- *(u16 *)addr = au_readw(port);109- addr +=2 ;110- }111-#endif112-}113-114-void auide_insl(unsigned long port, void *addr, u32 count)115-{116- while (count--)117- {118- *(u32 *)addr = au_readl(port);119- /* NOTE: For IDE interfaces over PCMCIA,120- * 32-bit access does not work121- */122- addr += 4;123- }124-}125-126-void auide_outb(u8 addr, unsigned long port)127-{128- return (au_writeb(addr, port));129-}130-131-void auide_outbsync(ide_drive_t *drive, u8 addr, unsigned long port)132-{133- return (au_writeb(addr, port));134-}135-136-void auide_outw(u16 addr, unsigned long port)137-{138- return (au_writew(addr, port));139-}140-141-void auide_outl(u32 addr, unsigned long port)142-{143- return (au_writel(addr, port));144}145146void auide_outsw(unsigned long port, void *addr, u32 count)147{148-#if defined(CONFIG_BLK_DEV_IDE_AU1XXX_PIO_DBDMA)149- _auide_hwif *ahwif = &auide_hwif;150- chan_tab_t *ctp;151- au1x_ddma_desc_t *dp;152153- if(!put_source_flags(ahwif->tx_chan, (void*)addr,154- count << 1, DDMA_FLAGS_NOIE)) {155- printk(KERN_ERR "%s failed %d\n", __FUNCTION__, __LINE__);156- return;157- }158- ctp = *((chan_tab_t **)ahwif->tx_chan);159- dp = ctp->cur_ptr;160- while (dp->dscr_cmd0 & DSCR_CMD0_V)161- ;162- ctp->cur_ptr = au1xxx_ddma_get_nextptr_virt(dp);163-#else164- while (count--)165- {166- au_writew(*(u16 *)addr, port);167- addr += 2;168- }169#endif170-}171-172-void auide_outsl(unsigned long port, void *addr, u32 count)173-{174- while (count--)175- {176- au_writel(*(u32 *)addr, port);177- /* NOTE: For IDE interfaces over PCMCIA,178- * 32-bit access does not work179- */180- addr += 4;181- }182-}183184static void auide_tune_drive(ide_drive_t *drive, byte pio)185{186- int mem_sttime;187- int mem_stcfg;188- unsigned long flags;189- u8 speed;190191- /* get the best pio mode for the drive */192- pio = ide_get_best_pio_mode(drive, pio, 4, NULL);193194- printk("%s: setting Au1XXX IDE to PIO mode%d\n",195- drive->name, pio);196197- spin_lock_irqsave(&ide_tune_drive_spin_lock, flags);0198199- mem_sttime = 0;200- mem_stcfg = au_readl(MEM_STCFG2);00201202- /* set pio mode! */203- switch(pio) {204- case 0:205- /* set timing parameters for RCS2# */206- mem_sttime = SBC_IDE_PIO0_TWCS207- | SBC_IDE_PIO0_TCSH208- | SBC_IDE_PIO0_TCSOFF209- | SBC_IDE_PIO0_TWP210- | SBC_IDE_PIO0_TCSW211- | SBC_IDE_PIO0_TPM212- | SBC_IDE_PIO0_TA;213- /* set configuration for RCS2# */214- mem_stcfg |= TS_MASK;215- mem_stcfg &= ~TCSOE_MASK;216- mem_stcfg &= ~TOECS_MASK;217- mem_stcfg |= SBC_IDE_PIO0_TCSOE | SBC_IDE_PIO0_TOECS;218219- au_writel(mem_sttime,MEM_STTIME2);220- au_writel(mem_stcfg,MEM_STCFG2);221- break;222223- case 1:224- /* set timing parameters for RCS2# */225- mem_sttime = SBC_IDE_PIO1_TWCS226- | SBC_IDE_PIO1_TCSH227- | SBC_IDE_PIO1_TCSOFF228- | SBC_IDE_PIO1_TWP229- | SBC_IDE_PIO1_TCSW230- | SBC_IDE_PIO1_TPM231- | SBC_IDE_PIO1_TA;232- /* set configuration for RCS2# */233- mem_stcfg |= TS_MASK;234- mem_stcfg &= ~TCSOE_MASK;235- mem_stcfg &= ~TOECS_MASK;236- mem_stcfg |= SBC_IDE_PIO1_TCSOE | SBC_IDE_PIO1_TOECS;237- break;238239- case 2:240- /* set timing parameters for RCS2# */241- mem_sttime = SBC_IDE_PIO2_TWCS242- | SBC_IDE_PIO2_TCSH243- | SBC_IDE_PIO2_TCSOFF244- | SBC_IDE_PIO2_TWP245- | SBC_IDE_PIO2_TCSW246- | SBC_IDE_PIO2_TPM247- | SBC_IDE_PIO2_TA;248- /* set configuration for RCS2# */249- mem_stcfg &= ~TS_MASK;250- mem_stcfg &= ~TCSOE_MASK;251- mem_stcfg &= ~TOECS_MASK;252- mem_stcfg |= SBC_IDE_PIO2_TCSOE | SBC_IDE_PIO2_TOECS;253- break;254255- case 3:256- /* set timing parameters for RCS2# */257- mem_sttime = SBC_IDE_PIO3_TWCS258- | SBC_IDE_PIO3_TCSH259- | SBC_IDE_PIO3_TCSOFF260- | SBC_IDE_PIO3_TWP261- | SBC_IDE_PIO3_TCSW262- | SBC_IDE_PIO3_TPM263- | SBC_IDE_PIO3_TA;264- /* set configuration for RCS2# */265- mem_stcfg |= TS_MASK;266- mem_stcfg &= ~TS_MASK;267- mem_stcfg &= ~TCSOE_MASK;268- mem_stcfg &= ~TOECS_MASK;269- mem_stcfg |= SBC_IDE_PIO3_TCSOE | SBC_IDE_PIO3_TOECS;270271- break;0272273- case 4:274- /* set timing parameters for RCS2# */275- mem_sttime = SBC_IDE_PIO4_TWCS276- | SBC_IDE_PIO4_TCSH277- | SBC_IDE_PIO4_TCSOFF278- | SBC_IDE_PIO4_TWP279- | SBC_IDE_PIO4_TCSW280- | SBC_IDE_PIO4_TPM281- | SBC_IDE_PIO4_TA;282- /* set configuration for RCS2# */283- mem_stcfg &= ~TS_MASK;284- mem_stcfg &= ~TCSOE_MASK;285- mem_stcfg &= ~TOECS_MASK;286- mem_stcfg |= SBC_IDE_PIO4_TCSOE | SBC_IDE_PIO4_TOECS;287- break;288- }289290- au_writel(mem_sttime,MEM_STTIME2);291- au_writel(mem_stcfg,MEM_STCFG2);292293- spin_unlock_irqrestore(&ide_tune_drive_spin_lock, flags);0294295- speed = pio + XFER_PIO_0;296- ide_config_drive_speed(drive, speed);00000000000297}298299static int auide_tune_chipset (ide_drive_t *drive, u8 speed)300{301- u8 mode = 0;302- int mem_sttime;303- int mem_stcfg;304- unsigned long flags;305-#ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA306- struct hd_driveid *id = drive->id;307308- /*309- * Now see what the current drive is capable of,310- * selecting UDMA only if the mate said it was ok.311- */312- if (id && (id->capability & 1) && drive->autodma &&313- !__ide_dma_bad_drive(drive)) {314- if (!mode && (id->field_valid & 2) && (id->dma_mword & 7)) {315- if (id->dma_mword & 4)316- mode = XFER_MW_DMA_2;317- else if (id->dma_mword & 2)318- mode = XFER_MW_DMA_1;319- else if (id->dma_mword & 1)320- mode = XFER_MW_DMA_0;321- }322- }323#endif324325- spin_lock_irqsave(&ide_tune_chipset_spin_lock, flags);0326327- mem_sttime = 0;328- mem_stcfg = au_readl(MEM_STCFG2);329-330- switch(speed) {331- case XFER_PIO_4:332- case XFER_PIO_3:333- case XFER_PIO_2:334- case XFER_PIO_1:335- case XFER_PIO_0:336- auide_tune_drive(drive, (speed - XFER_PIO_0));337- break;338#ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA339- case XFER_MW_DMA_2:340- /* set timing parameters for RCS2# */341- mem_sttime = SBC_IDE_MDMA2_TWCS342- | SBC_IDE_MDMA2_TCSH343- | SBC_IDE_MDMA2_TCSOFF344- | SBC_IDE_MDMA2_TWP345- | SBC_IDE_MDMA2_TCSW346- | SBC_IDE_MDMA2_TPM347- | SBC_IDE_MDMA2_TA;348- /* set configuration for RCS2# */349- mem_stcfg &= ~TS_MASK;350- mem_stcfg &= ~TCSOE_MASK;351- mem_stcfg &= ~TOECS_MASK;352- mem_stcfg |= SBC_IDE_MDMA2_TCSOE | SBC_IDE_MDMA2_TOECS;353354- mode = XFER_MW_DMA_2;355- break;356- case XFER_MW_DMA_1:357- /* set timing parameters for RCS2# */358- mem_sttime = SBC_IDE_MDMA1_TWCS359- | SBC_IDE_MDMA1_TCSH360- | SBC_IDE_MDMA1_TCSOFF361- | SBC_IDE_MDMA1_TWP362- | SBC_IDE_MDMA1_TCSW363- | SBC_IDE_MDMA1_TPM364- | SBC_IDE_MDMA1_TA;365- /* set configuration for RCS2# */366- mem_stcfg &= ~TS_MASK;367- mem_stcfg &= ~TCSOE_MASK;368- mem_stcfg &= ~TOECS_MASK;369- mem_stcfg |= SBC_IDE_MDMA1_TCSOE | SBC_IDE_MDMA1_TOECS;370371- mode = XFER_MW_DMA_1;372- break;373- case XFER_MW_DMA_0:374- /* set timing parameters for RCS2# */375- mem_sttime = SBC_IDE_MDMA0_TWCS376- | SBC_IDE_MDMA0_TCSH377- | SBC_IDE_MDMA0_TCSOFF378- | SBC_IDE_MDMA0_TWP379- | SBC_IDE_MDMA0_TCSW380- | SBC_IDE_MDMA0_TPM381- | SBC_IDE_MDMA0_TA;382- /* set configuration for RCS2# */383- mem_stcfg |= TS_MASK;384- mem_stcfg &= ~TCSOE_MASK;385- mem_stcfg &= ~TOECS_MASK;386- mem_stcfg |= SBC_IDE_MDMA0_TCSOE | SBC_IDE_MDMA0_TOECS;387388- mode = XFER_MW_DMA_0;389- break;00000000000000000390#endif391- default:392- return 1;393- }000394395- /*396- * Tell the drive to switch to the new mode; abort on failure.397- */398- if (!mode || ide_config_drive_speed(drive, mode))399- {400- return 1; /* failure */401- }402403-404- au_writel(mem_sttime,MEM_STTIME2);405- au_writel(mem_stcfg,MEM_STCFG2);406-407- spin_unlock_irqrestore(&ide_tune_chipset_spin_lock, flags);408-409- return 0;410}411412/*413 * Multi-Word DMA + DbDMA functions414 */415-#ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA416417-static int in_drive_list(struct hd_driveid *id,418- const struct drive_list_entry *drive_table)419-{420- for ( ; drive_table->id_model ; drive_table++){421- if ((!strcmp(drive_table->id_model, id->model)) &&422- ((strstr(drive_table->id_firmware, id->fw_rev)) ||423- (!strcmp(drive_table->id_firmware, "ALL")))424- )425- return 1;426- }427- return 0;428-}429430static int auide_build_sglist(ide_drive_t *drive, struct request *rq)431{432- ide_hwif_t *hwif = drive->hwif;433- _auide_hwif *ahwif = (_auide_hwif*)hwif->hwif_data;434- struct scatterlist *sg = hwif->sg_table;435436- ide_map_sg(drive, rq);437438- if (rq_data_dir(rq) == READ)439- hwif->sg_dma_direction = DMA_FROM_DEVICE;440- else441- hwif->sg_dma_direction = DMA_TO_DEVICE;442443- return dma_map_sg(ahwif->dev, sg, hwif->sg_nents,444- hwif->sg_dma_direction);445}446447static int auide_build_dmatable(ide_drive_t *drive)448{449- int i, iswrite, count = 0;450- ide_hwif_t *hwif = HWIF(drive);451452- struct request *rq = HWGROUP(drive)->rq;453454- _auide_hwif *ahwif = (_auide_hwif*)hwif->hwif_data;455- struct scatterlist *sg;456457- iswrite = (rq_data_dir(rq) == WRITE);458- /* Save for interrupt context */459- ahwif->drive = drive;460461- /* Build sglist */462- hwif->sg_nents = i = auide_build_sglist(drive, rq);463464- if (!i)465- return 0;466467- /* fill the descriptors */468- sg = hwif->sg_table;469- while (i && sg_dma_len(sg)) {470- u32 cur_addr;471- u32 cur_len;472473- cur_addr = sg_dma_address(sg);474- cur_len = sg_dma_len(sg);475476- while (cur_len) {477- u32 flags = DDMA_FLAGS_NOIE;478- unsigned int tc = (cur_len < 0xfe00)? cur_len: 0xfe00;479480- if (++count >= PRD_ENTRIES) {481- printk(KERN_WARNING "%s: DMA table too small\n",482- drive->name);483- goto use_pio_instead;484- }485486- /* Lets enable intr for the last descriptor only */487- if (1==i)488- flags = DDMA_FLAGS_IE;489- else490- flags = DDMA_FLAGS_NOIE;491492- if (iswrite) {493- if(!put_source_flags(ahwif->tx_chan,494- (void*)(page_address(sg->page)495- + sg->offset),496- tc, flags)) {497- printk(KERN_ERR "%s failed %d\n",498- __FUNCTION__, __LINE__);499 }500- } else501 {502- if(!put_dest_flags(ahwif->rx_chan,503- (void*)(page_address(sg->page)504- + sg->offset),505- tc, flags)) {506- printk(KERN_ERR "%s failed %d\n",507- __FUNCTION__, __LINE__);508 }509- }510511- cur_addr += tc;512- cur_len -= tc;513- }514- sg++;515- i--;516- }517518- if (count)519- return 1;520521-use_pio_instead:522- dma_unmap_sg(ahwif->dev,523- hwif->sg_table,524- hwif->sg_nents,525- hwif->sg_dma_direction);526527- return 0; /* revert to PIO for this request */528}529530static int auide_dma_end(ide_drive_t *drive)531{532- ide_hwif_t *hwif = HWIF(drive);533- _auide_hwif *ahwif = (_auide_hwif*)hwif->hwif_data;534535- if (hwif->sg_nents) {536- dma_unmap_sg(ahwif->dev, hwif->sg_table, hwif->sg_nents,537- hwif->sg_dma_direction);538- hwif->sg_nents = 0;539- }540541- return 0;542}543544static void auide_dma_start(ide_drive_t *drive )545{546-// printk("%s\n", __FUNCTION__);547}548549-ide_startstop_t auide_dma_intr(ide_drive_t *drive)550-{551- //printk("%s\n", __FUNCTION__);552-553- u8 stat = 0, dma_stat = 0;554-555- dma_stat = HWIF(drive)->ide_dma_end(drive);556- stat = HWIF(drive)->INB(IDE_STATUS_REG); /* get drive status */557- if (OK_STAT(stat,DRIVE_READY,drive->bad_wstat|DRQ_STAT)) {558- if (!dma_stat) {559- struct request *rq = HWGROUP(drive)->rq;560-561- ide_end_request(drive, 1, rq->nr_sectors);562- return ide_stopped;563- }564- printk(KERN_ERR "%s: dma_intr: bad DMA status (dma_stat=%x)\n",565- drive->name, dma_stat);566- }567- return ide_error(drive, "dma_intr", stat);568-}569570static void auide_dma_exec_cmd(ide_drive_t *drive, u8 command)571{572- //printk("%s\n", __FUNCTION__);573-574- /* issue cmd to drive */575- ide_execute_command(drive, command, &auide_dma_intr,576- (2*WAIT_CMD), NULL);577}578579static int auide_dma_setup(ide_drive_t *drive)580-{581-// printk("%s\n", __FUNCTION__);582583- if (drive->media != ide_disk)584- return 1;00585586- if (!auide_build_dmatable(drive))587- /* try PIO instead of DMA */588- return 1;589-590- drive->waiting_for_dma = 1;591-592- return 0;593}594595static int auide_dma_check(ide_drive_t *drive)596{597-// printk("%s\n", __FUNCTION__);598599#ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA600- if( !dbdma_init_done ){601- auide_hwif.white_list = in_drive_list(drive->id,602- dma_white_list);603- auide_hwif.black_list = in_drive_list(drive->id,604- dma_black_list);605- auide_hwif.drive = drive;606- auide_ddma_init(&auide_hwif);607- dbdma_init_done = 1;608- }0609#endif610611- /* Is the drive in our DMA black list? */612- if ( auide_hwif.black_list ) {613- drive->using_dma = 0;614- printk("%s found in dma_blacklist[]! Disabling DMA.\n",615- drive->id->model);616- }617- else618- drive->using_dma = 1;619620- return HWIF(drive)->ide_dma_host_on(drive);0000000000000000621}622623static int auide_dma_test_irq(ide_drive_t *drive)624-{625-// printk("%s\n", __FUNCTION__);626-627- if (!drive->waiting_for_dma)628- printk(KERN_WARNING "%s: ide_dma_test_irq \629 called while not waiting\n", drive->name);630631- /* If dbdma didn't execute the STOP command yet, the632- * active bit is still set633 */634- drive->waiting_for_dma++;635- if (drive->waiting_for_dma >= DMA_WAIT_TIMEOUT) {636- printk(KERN_WARNING "%s: timeout waiting for ddma to \637 complete\n", drive->name);638- return 1;639- }640- udelay(10);641- return 0;642}643644static int auide_dma_host_on(ide_drive_t *drive)645{646-// printk("%s\n", __FUNCTION__);647- return 0;648}649650static int auide_dma_on(ide_drive_t *drive)651{652-// printk("%s\n", __FUNCTION__);653- drive->using_dma = 1;654- return auide_dma_host_on(drive);655}656657658static int auide_dma_host_off(ide_drive_t *drive)659{660-// printk("%s\n", __FUNCTION__);661- return 0;662}663664static int auide_dma_off_quietly(ide_drive_t *drive)665{666-// printk("%s\n", __FUNCTION__);667- drive->using_dma = 0;668- return auide_dma_host_off(drive);669}670671static int auide_dma_lostirq(ide_drive_t *drive)672{673-// printk("%s\n", __FUNCTION__);674-675- printk(KERN_ERR "%s: IRQ lost\n", drive->name);676- return 0;677}678679static void auide_ddma_tx_callback(int irq, void *param, struct pt_regs *regs)680{681-// printk("%s\n", __FUNCTION__);682-683- _auide_hwif *ahwif = (_auide_hwif*)param;684- ahwif->drive->waiting_for_dma = 0;685- return;686}687688static void auide_ddma_rx_callback(int irq, void *param, struct pt_regs *regs)689{690-// printk("%s\n", __FUNCTION__);691-692- _auide_hwif *ahwif = (_auide_hwif*)param;693- ahwif->drive->waiting_for_dma = 0;694- return;695}000000000000000696697static int auide_dma_timeout(ide_drive_t *drive)698{699// printk("%s\n", __FUNCTION__);700701- printk(KERN_ERR "%s: DMA timeout occurred: ", drive->name);702703- if (HWIF(drive)->ide_dma_test_irq(drive))704- return 0;705706- return HWIF(drive)->ide_dma_end(drive);707}708-#endif /* end CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA */7090000071000000000000000000000000000000000000000000000000000000000000000000711static int auide_ddma_init( _auide_hwif *auide )712{713-// printk("%s\n", __FUNCTION__);0714715- dbdev_tab_t source_dev_tab;716-#if defined(CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA)717- dbdev_tab_t target_dev_tab;718- ide_hwif_t *hwif = auide->hwif;719- char warning_output [2][80];720- int i;721-#endif722-723- /* Add our custom device to DDMA device table */724- /* Create our new device entries in the table */725-#if defined(CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA)726- source_dev_tab.dev_id = AU1XXX_ATA_DDMA_REQ;727-728- if( auide->white_list || auide->black_list ){729- source_dev_tab.dev_tsize = 8;730- source_dev_tab.dev_devwidth = 32;731- source_dev_tab.dev_physaddr = (u32)AU1XXX_ATA_PHYS_ADDR;732- source_dev_tab.dev_intlevel = 0;733- source_dev_tab.dev_intpolarity = 0;734-735- /* init device table for target - static bus controller - */736- target_dev_tab.dev_id = DSCR_CMD0_ALWAYS;737- target_dev_tab.dev_tsize = 8;738- target_dev_tab.dev_devwidth = 32;739- target_dev_tab.dev_physaddr = (u32)AU1XXX_ATA_PHYS_ADDR;740- target_dev_tab.dev_intlevel = 0;741- target_dev_tab.dev_intpolarity = 0;742- target_dev_tab.dev_flags = DEV_FLAGS_ANYUSE;743- }744- else{745- source_dev_tab.dev_tsize = 1;746- source_dev_tab.dev_devwidth = 16;747- source_dev_tab.dev_physaddr = (u32)AU1XXX_ATA_PHYS_ADDR;748- source_dev_tab.dev_intlevel = 0;749- source_dev_tab.dev_intpolarity = 0;750-751- /* init device table for target - static bus controller - */752- target_dev_tab.dev_id = DSCR_CMD0_ALWAYS;753- target_dev_tab.dev_tsize = 1;754- target_dev_tab.dev_devwidth = 16;755- target_dev_tab.dev_physaddr = (u32)AU1XXX_ATA_PHYS_ADDR;756- target_dev_tab.dev_intlevel = 0;757- target_dev_tab.dev_intpolarity = 0;758- target_dev_tab.dev_flags = DEV_FLAGS_ANYUSE;759-760- sprintf(&warning_output[0][0],761- "%s is not on ide driver white list.",762- auide_hwif.drive->id->model);763- for ( i=strlen(&warning_output[0][0]) ; i<76; i++ ){764- sprintf(&warning_output[0][i]," ");765- }766-767- sprintf(&warning_output[1][0],768- "To add %s please read 'Documentation/mips/AU1xxx_IDE.README'.",769- auide_hwif.drive->id->model);770- for ( i=strlen(&warning_output[1][0]) ; i<76; i++ ){771- sprintf(&warning_output[1][i]," ");772- }773-774- printk("\n****************************************");775- printk("****************************************\n");776- printk("* %s *\n",&warning_output[0][0]);777- printk("* Switch to safe MWDMA Mode! ");778- printk(" *\n");779- printk("* %s *\n",&warning_output[1][0]);780- printk("****************************************");781- printk("****************************************\n\n");782- }783#else784- source_dev_tab.dev_id = DSCR_CMD0_ALWAYS;785- source_dev_tab.dev_tsize = 8;786- source_dev_tab.dev_devwidth = 32;787- source_dev_tab.dev_physaddr = (u32)AU1XXX_ATA_PHYS_ADDR;788- source_dev_tab.dev_intlevel = 0;789- source_dev_tab.dev_intpolarity = 0;790#endif791792-#if CONFIG_BLK_DEV_IDE_AU1XXX_BURSTABLE_ON793- /* set flags for tx channel */794- source_dev_tab.dev_flags = DEV_FLAGS_OUT795- | DEV_FLAGS_SYNC796- | DEV_FLAGS_BURSTABLE;797- auide->tx_dev_id = au1xxx_ddma_add_device( &source_dev_tab );798- /* set flags for rx channel */799- source_dev_tab.dev_flags = DEV_FLAGS_IN800- | DEV_FLAGS_SYNC801- | DEV_FLAGS_BURSTABLE;802- auide->rx_dev_id = au1xxx_ddma_add_device( &source_dev_tab );803-#else804- /* set flags for tx channel */805- source_dev_tab.dev_flags = DEV_FLAGS_OUT | DEV_FLAGS_SYNC;806- auide->tx_dev_id = au1xxx_ddma_add_device( &source_dev_tab );807- /* set flags for rx channel */808- source_dev_tab.dev_flags = DEV_FLAGS_IN | DEV_FLAGS_SYNC;809- auide->rx_dev_id = au1xxx_ddma_add_device( &source_dev_tab );810-#endif811812-#if defined(CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA)813-814- auide->target_dev_id = au1xxx_ddma_add_device(&target_dev_tab);815-816- /* Get a channel for TX */817- auide->tx_chan = au1xxx_dbdma_chan_alloc(auide->target_dev_id,818- auide->tx_dev_id,819- auide_ddma_tx_callback,820- (void*)auide);821- /* Get a channel for RX */822- auide->rx_chan = au1xxx_dbdma_chan_alloc(auide->rx_dev_id,823- auide->target_dev_id,824- auide_ddma_rx_callback,825- (void*)auide);826-#else /* CONFIG_BLK_DEV_IDE_AU1XXX_PIO_DBDMA */827- /*828- * Note: if call back is not enabled, update ctp->cur_ptr manually829- */830- auide->tx_chan = au1xxx_dbdma_chan_alloc(DSCR_CMD0_ALWAYS,831- auide->tx_dev_id,832- NULL,833- (void*)auide);834- auide->rx_chan = au1xxx_dbdma_chan_alloc(auide->rx_dev_id,835- DSCR_CMD0_ALWAYS,836- NULL,837- (void*)auide);838-#endif839- auide->tx_desc_head = (void*)au1xxx_dbdma_ring_alloc(auide->tx_chan,840- NUM_DESCRIPTORS);841- auide->rx_desc_head = (void*)au1xxx_dbdma_ring_alloc(auide->rx_chan,842- NUM_DESCRIPTORS);843-844-#if defined(CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA)845- hwif->dmatable_cpu = dma_alloc_coherent(auide->dev,846- PRD_ENTRIES * PRD_BYTES, /* 1 Page */847- &hwif->dmatable_dma, GFP_KERNEL);848-849- auide->sg_table = kmalloc(sizeof(struct scatterlist) * PRD_ENTRIES,850- GFP_KERNEL|GFP_DMA);851- if (auide->sg_table == NULL) {852- return -ENOMEM;853- }854-#endif855- au1xxx_dbdma_start( auide->tx_chan );856- au1xxx_dbdma_start( auide->rx_chan );857- return 0;858}0859860static void auide_setup_ports(hw_regs_t *hw, _auide_hwif *ahwif)861{862- int i;863-#define ide_ioreg_t unsigned long864- ide_ioreg_t *ata_regs = hw->io_ports;865866- /* fixme */867- for (i = 0; i < IDE_CONTROL_OFFSET; i++) {868- *ata_regs++ = (ide_ioreg_t) ahwif->regbase869- + (ide_ioreg_t)(i << AU1XXX_ATA_REG_OFFSET);870- }871872- /* set the Alternative Status register */873- *ata_regs = (ide_ioreg_t) ahwif->regbase874- + (ide_ioreg_t)(14 << AU1XXX_ATA_REG_OFFSET);875}876877static int au_ide_probe(struct device *dev)878{879 struct platform_device *pdev = to_platform_device(dev);880- _auide_hwif *ahwif = &auide_hwif;881- ide_hwif_t *hwif;882 struct resource *res;883 int ret = 0;884885#if defined(CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA)886- char *mode = "MWDMA2";887#elif defined(CONFIG_BLK_DEV_IDE_AU1XXX_PIO_DBDMA)888- char *mode = "PIO+DDMA(offload)";889#endif890891- memset(&auide_hwif, 0, sizeof(_auide_hwif));892- auide_hwif.dev = 0;893894 ahwif->dev = dev;895 ahwif->irq = platform_get_irq(pdev, 0);···675 goto out;676 }677678- if (!request_mem_region (res->start, res->end-res->start, pdev->name)) {679 pr_debug("%s: request_mem_region failed\n", DRV_NAME);680- ret = -EBUSY;681 goto out;682- }683684 ahwif->regbase = (u32)ioremap(res->start, res->end-res->start);685 if (ahwif->regbase == 0) {···687 goto out;688 }689690- hwif = &ide_hwifs[pdev->id];691- hw_regs_t *hw = &hwif->hw;692- hwif->irq = hw->irq = ahwif->irq;693- hwif->chipset = ide_au1xxx;694695- auide_setup_ports(hw, ahwif);00000696 memcpy(hwif->io_ports, hw->io_ports, sizeof(hwif->io_ports));697698-#ifdef CONFIG_BLK_DEV_IDE_AU1XXX_SEQTS_PER_RQ699- hwif->rqsize = CONFIG_BLK_DEV_IDE_AU1XXX_SEQTS_PER_RQ;700- hwif->rqsize = ((hwif->rqsize > AU1XXX_ATA_RQSIZE)701- || (hwif->rqsize < 32)) ? AU1XXX_ATA_RQSIZE : hwif->rqsize;702-#else /* if kernel config is not set */703- hwif->rqsize = AU1XXX_ATA_RQSIZE;704-#endif705-706- hwif->ultra_mask = 0x0; /* Disable Ultra DMA */707#ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA708- hwif->mwdma_mask = 0x07; /* Multimode-2 DMA */709- hwif->swdma_mask = 0x07;710#else711- hwif->mwdma_mask = 0x0;712- hwif->swdma_mask = 0x0;713#endif714- //hwif->noprobe = !hwif->io_ports[IDE_DATA_OFFSET];715- hwif->noprobe = 0;716- hwif->drives[0].unmask = 1;717- hwif->drives[1].unmask = 1;718719- /* hold should be on in all cases */720- hwif->hold = 1;721- hwif->mmio = 2;722723- /* set up local I/O function entry points */724- hwif->INB = auide_inb;725- hwif->INW = auide_inw;726- hwif->INL = auide_inl;727- hwif->INSW = auide_insw;728- hwif->INSL = auide_insl;729- hwif->OUTB = auide_outb;730- hwif->OUTBSYNC = auide_outbsync;731- hwif->OUTW = auide_outw;732- hwif->OUTL = auide_outl;733- hwif->OUTSW = auide_outsw;734- hwif->OUTSL = auide_outsl;735736- hwif->tuneproc = &auide_tune_drive;737- hwif->speedproc = &auide_tune_chipset;000000000738739#ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA740- hwif->ide_dma_off_quietly = &auide_dma_off_quietly;741- hwif->ide_dma_timeout = &auide_dma_timeout;742743- hwif->ide_dma_check = &auide_dma_check;744- hwif->dma_exec_cmd = &auide_dma_exec_cmd;745- hwif->dma_start = &auide_dma_start;746- hwif->ide_dma_end = &auide_dma_end;747- hwif->dma_setup = &auide_dma_setup;748- hwif->ide_dma_test_irq = &auide_dma_test_irq;749- hwif->ide_dma_host_off = &auide_dma_host_off;750- hwif->ide_dma_host_on = &auide_dma_host_on;751- hwif->ide_dma_lostirq = &auide_dma_lostirq;752- hwif->ide_dma_on = &auide_dma_on;753754- hwif->autodma = 1;755- hwif->drives[0].autodma = hwif->autodma;756- hwif->drives[1].autodma = hwif->autodma;757- hwif->atapi_dma = 1;758- hwif->drives[0].using_dma = 1;759- hwif->drives[1].using_dma = 1;760#else /* !CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA */761- hwif->autodma = 0;762- hwif->channel = 0;763- hwif->hold = 1;764- hwif->select_data = 0; /* no chipset-specific code */765- hwif->config_data = 0; /* no chipset-specific code */766767- hwif->drives[0].autodma = 0;768- hwif->drives[0].drive_data = 0; /* no drive data */769- hwif->drives[0].using_dma = 0;770- hwif->drives[0].waiting_for_dma = 0;771- hwif->drives[0].autotune = 1; /* 1=autotune, 2=noautotune, 0=default */772- /* secondary hdd not supported */773- hwif->drives[1].autodma = 0;774-775- hwif->drives[1].drive_data = 0;776- hwif->drives[1].using_dma = 0;777- hwif->drives[1].waiting_for_dma = 0;778- hwif->drives[1].autotune = 2; /* 1=autotune, 2=noautotune, 0=default */779#endif780- hwif->drives[0].io_32bit = 0; /* 0=16-bit, 1=32-bit, 2/3=32bit+sync */781- hwif->drives[1].io_32bit = 0; /* 0=16-bit, 1=32-bit, 2/3=32bit+sync */782783- /*Register Driver with PM Framework*/784-#ifdef CONFIG_PM785- auide_hwif.pm.lock = SPIN_LOCK_UNLOCKED;786- auide_hwif.pm.stopped = 0;787788- auide_hwif.pm.dev = new_au1xxx_power_device( "ide",789- &au1200ide_pm_callback,790- NULL);791- if ( auide_hwif.pm.dev == NULL )792- printk(KERN_INFO "Unable to create a power management \793- device entry for the au1200-IDE.\n");794- else795- printk(KERN_INFO "Power management device entry for the \796- au1200-IDE loaded.\n");797-#endif798-799- auide_hwif.hwif = hwif;800- hwif->hwif_data = &auide_hwif;801-802-#ifdef CONFIG_BLK_DEV_IDE_AU1XXX_PIO_DBDMA803- auide_ddma_init(&auide_hwif);804- dbdma_init_done = 1;805#endif806807 probe_hwif_init(hwif);808 dev_set_drvdata(dev, hwif);809810- printk(KERN_INFO "Au1xxx IDE(builtin) configured for %s\n", mode );811812-out:813- return ret;814}815816static int au_ide_remove(struct device *dev)···780 struct platform_device *pdev = to_platform_device(dev);781 struct resource *res;782 ide_hwif_t *hwif = dev_get_drvdata(dev);783- _auide_hwif *ahwif = &auide_hwif;784785 ide_unregister(hwif - ide_hwifs);786···804 return driver_register(&au1200_ide_driver);805}806807-static void __init au_ide_exit(void)808{809 driver_unregister(&au1200_ide_driver);810}811-812-#ifdef CONFIG_PM813-int au1200ide_pm_callback( au1xxx_power_dev_t *dev,\814- au1xxx_request_t request, void *data) {815-816- unsigned int d, err = 0;817- unsigned long flags;818-819- spin_lock_irqsave(auide_hwif.pm.lock, flags);820-821- switch (request){822- case AU1XXX_PM_SLEEP:823- err = au1xxxide_pm_sleep(dev);824- break;825- case AU1XXX_PM_WAKEUP:826- d = *((unsigned int*)data);827- if ( d > 0 && d <= 99) {828- err = au1xxxide_pm_standby(dev);829- }830- else {831- err = au1xxxide_pm_resume(dev);832- }833- break;834- case AU1XXX_PM_GETSTATUS:835- err = au1xxxide_pm_getstatus(dev);836- break;837- case AU1XXX_PM_ACCESS:838- err = au1xxxide_pm_access(dev);839- break;840- case AU1XXX_PM_IDLE:841- err = au1xxxide_pm_idle(dev);842- break;843- case AU1XXX_PM_CLEANUP:844- err = au1xxxide_pm_cleanup(dev);845- break;846- default:847- err = -1;848- break;849- }850-851- spin_unlock_irqrestore(auide_hwif.pm.lock, flags);852-853- return err;854-}855-856-static int au1xxxide_pm_standby( au1xxx_power_dev_t *dev ) {857- return 0;858-}859-860-static int au1xxxide_pm_sleep( au1xxx_power_dev_t *dev ) {861-862- int retval;863- ide_hwif_t *hwif = auide_hwif.hwif;864- struct request rq;865- struct request_pm_state rqpm;866- ide_task_t args;867-868- if(auide_hwif.pm.stopped)869- return -1;870-871- /*872- * wait until hard disc is ready873- */874- if ( wait_for_ready(&hwif->drives[0], 35000) ) {875- printk("Wait for drive sleep timeout!\n");876- retval = -1;877- }878-879- /*880- * sequenz to tell the high level ide driver that pm is resuming881- */882- memset(&rq, 0, sizeof(rq));883- memset(&rqpm, 0, sizeof(rqpm));884- memset(&args, 0, sizeof(args));885- rq.flags = REQ_PM_SUSPEND;886- rq.special = &args;887- rq.pm = &rqpm;888- rqpm.pm_step = ide_pm_state_start_suspend;889- rqpm.pm_state = PMSG_SUSPEND;890-891- retval = ide_do_drive_cmd(&hwif->drives[0], &rq, ide_wait);892-893- if (wait_for_ready (&hwif->drives[0], 35000)) {894- printk("Wait for drive sleep timeout!\n");895- retval = -1;896- }897-898- /*899- * stop dbdma channels900- */901- au1xxx_dbdma_reset(auide_hwif.tx_chan);902- au1xxx_dbdma_reset(auide_hwif.rx_chan);903-904- auide_hwif.pm.stopped = 1;905-906- return retval;907-}908-909-static int au1xxxide_pm_resume( au1xxx_power_dev_t *dev ) {910-911- int retval;912- ide_hwif_t *hwif = auide_hwif.hwif;913- struct request rq;914- struct request_pm_state rqpm;915- ide_task_t args;916-917- if(!auide_hwif.pm.stopped)918- return -1;919-920- /*921- * start dbdma channels922- */923- au1xxx_dbdma_start(auide_hwif.tx_chan);924- au1xxx_dbdma_start(auide_hwif.rx_chan);925-926- /*927- * wait until hard disc is ready928- */929- if (wait_for_ready ( &hwif->drives[0], 35000)) {930- printk("Wait for drive wake up timeout!\n");931- retval = -1;932- }933-934- /*935- * sequenz to tell the high level ide driver that pm is resuming936- */937- memset(&rq, 0, sizeof(rq));938- memset(&rqpm, 0, sizeof(rqpm));939- memset(&args, 0, sizeof(args));940- rq.flags = REQ_PM_RESUME;941- rq.special = &args;942- rq.pm = &rqpm;943- rqpm.pm_step = ide_pm_state_start_resume;944- rqpm.pm_state = PMSG_ON;945-946- retval = ide_do_drive_cmd(&hwif->drives[0], &rq, ide_head_wait);947-948- /*949- * wait for hard disc950- */951- if ( wait_for_ready(&hwif->drives[0], 35000) ) {952- printk("Wait for drive wake up timeout!\n");953- retval = -1;954- }955-956- auide_hwif.pm.stopped = 0;957-958- return retval;959-}960-961-static int au1xxxide_pm_getstatus( au1xxx_power_dev_t *dev ) {962- return dev->cur_state;963-}964-965-static int au1xxxide_pm_access( au1xxx_power_dev_t *dev ) {966- if (dev->cur_state != AWAKE_STATE)967- return 0;968- else969- return -1;970-}971-972-static int au1xxxide_pm_idle( au1xxx_power_dev_t *dev ) {973- return 0;974-}975-976-static int au1xxxide_pm_cleanup( au1xxx_power_dev_t *dev ) {977- return 0;978-}979-#endif /* CONFIG_PM */980981MODULE_LICENSE("GPL");982MODULE_DESCRIPTION("AU1200 IDE driver");
···31 */32#undef REALLY_SLOW_IO /* most systems can safely undef this */33034#include <linux/types.h>35#include <linux/module.h>36#include <linux/kernel.h>37#include <linux/delay.h>38+#include <linux/platform_device.h>39+0040#include <linux/init.h>41#include <linux/ide.h>42#include <linux/sysdev.h>4344#include <linux/dma-mapping.h>4546+#include "ide-timing.h"47+48#include <asm/io.h>49#include <asm/mach-au1x00/au1xxx.h>50#include <asm/mach-au1x00/au1xxx_dbdma.h>00005152#include <asm/mach-au1x00/au1xxx_ide.h>5354#define DRV_NAME "au1200-ide"55#define DRV_VERSION "1.0"56+#define DRV_AUTHOR "Enrico Walther <enrico.walther@amd.com> / Pete Popov <ppopov@embeddedalley.com>"57+58+/* enable the burstmode in the dbdma */59+#define IDE_AU1XXX_BURSTMODE 16061static _auide_hwif auide_hwif;62+static int dbdma_init_done;006364+#if defined(CONFIG_BLK_DEV_IDE_AU1XXX_PIO_DBDMA)00000000000000006566void auide_insw(unsigned long port, void *addr, u32 count)67{68+ _auide_hwif *ahwif = &auide_hwif;69+ chan_tab_t *ctp;70+ au1x_ddma_desc_t *dp;7172+ if(!put_dest_flags(ahwif->rx_chan, (void*)addr, count << 1, 73+ DDMA_FLAGS_NOIE)) {74+ printk(KERN_ERR "%s failed %d\n", __FUNCTION__, __LINE__);75+ return;76+ }77+ ctp = *((chan_tab_t **)ahwif->rx_chan);78+ dp = ctp->cur_ptr;79+ while (dp->dscr_cmd0 & DSCR_CMD0_V)80+ ;81+ ctp->cur_ptr = au1xxx_ddma_get_nextptr_virt(dp);000000000000000000000000000000000000000000082}8384void auide_outsw(unsigned long port, void *addr, u32 count)85{86+ _auide_hwif *ahwif = &auide_hwif;87+ chan_tab_t *ctp;88+ au1x_ddma_desc_t *dp;08990+ if(!put_source_flags(ahwif->tx_chan, (void*)addr,91+ count << 1, DDMA_FLAGS_NOIE)) {92+ printk(KERN_ERR "%s failed %d\n", __FUNCTION__, __LINE__);93+ return;94+ }95+ ctp = *((chan_tab_t **)ahwif->tx_chan);96+ dp = ctp->cur_ptr;97+ while (dp->dscr_cmd0 & DSCR_CMD0_V)98+ ;99+ ctp->cur_ptr = au1xxx_ddma_get_nextptr_virt(dp);100+}101+0000102#endif0000000000000103104static void auide_tune_drive(ide_drive_t *drive, byte pio)105{106+ int mem_sttime;107+ int mem_stcfg;108+ u8 speed;0109110+ /* get the best pio mode for the drive */111+ pio = ide_get_best_pio_mode(drive, pio, 4, NULL);112113+ printk(KERN_INFO "%s: setting Au1XXX IDE to PIO mode%d\n",114+ drive->name, pio);115116+ mem_sttime = 0;117+ mem_stcfg = au_readl(MEM_STCFG2);118119+ /* set pio mode! */120+ switch(pio) {121+ case 0:122+ mem_sttime = SBC_IDE_TIMING(PIO0);123124+ /* set configuration for RCS2# */125+ mem_stcfg |= TS_MASK;126+ mem_stcfg &= ~TCSOE_MASK;127+ mem_stcfg &= ~TOECS_MASK;128+ mem_stcfg |= SBC_IDE_PIO0_TCSOE | SBC_IDE_PIO0_TOECS;129+ break;0000000000130131+ case 1:132+ mem_sttime = SBC_IDE_TIMING(PIO1);0133134+ /* set configuration for RCS2# */135+ mem_stcfg |= TS_MASK;136+ mem_stcfg &= ~TCSOE_MASK;137+ mem_stcfg &= ~TOECS_MASK;138+ mem_stcfg |= SBC_IDE_PIO1_TCSOE | SBC_IDE_PIO1_TOECS;139+ break;000000000140141+ case 2:142+ mem_sttime = SBC_IDE_TIMING(PIO2);0000000000000143144+ /* set configuration for RCS2# */145+ mem_stcfg &= ~TS_MASK;146+ mem_stcfg &= ~TCSOE_MASK;147+ mem_stcfg &= ~TOECS_MASK;148+ mem_stcfg |= SBC_IDE_PIO2_TCSOE | SBC_IDE_PIO2_TOECS;149+ break;000000000150151+ case 3:152+ mem_sttime = SBC_IDE_TIMING(PIO3);153154+ /* set configuration for RCS2# */155+ mem_stcfg &= ~TS_MASK;156+ mem_stcfg &= ~TCSOE_MASK;157+ mem_stcfg &= ~TOECS_MASK;158+ mem_stcfg |= SBC_IDE_PIO3_TCSOE | SBC_IDE_PIO3_TOECS;00000000000159160+ break;0161162+ case 4:163+ mem_sttime = SBC_IDE_TIMING(PIO4);164165+ /* set configuration for RCS2# */166+ mem_stcfg &= ~TS_MASK;167+ mem_stcfg &= ~TCSOE_MASK;168+ mem_stcfg &= ~TOECS_MASK;169+ mem_stcfg |= SBC_IDE_PIO4_TCSOE | SBC_IDE_PIO4_TOECS;170+ break;171+ }172+173+ au_writel(mem_sttime,MEM_STTIME2);174+ au_writel(mem_stcfg,MEM_STCFG2);175+176+ speed = pio + XFER_PIO_0;177+ ide_config_drive_speed(drive, speed);178}179180static int auide_tune_chipset (ide_drive_t *drive, u8 speed)181{182+ int mem_sttime;183+ int mem_stcfg;184+ unsigned long mode;000185186+#ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA187+ if (ide_use_dma(drive))188+ mode = ide_dma_speed(drive, 0);000000000000189#endif190191+ mem_sttime = 0;192+ mem_stcfg = au_readl(MEM_STCFG2);193194+ if (speed >= XFER_PIO_0 && speed <= XFER_PIO_4) {195+ auide_tune_drive(drive, speed - XFER_PIO_0);196+ return 0;197+ }198+199+ switch(speed) {00000200#ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA201+ case XFER_MW_DMA_2:202+ mem_sttime = SBC_IDE_TIMING(MDMA2);000000000000203204+ /* set configuration for RCS2# */205+ mem_stcfg &= ~TS_MASK;206+ mem_stcfg &= ~TCSOE_MASK;207+ mem_stcfg &= ~TOECS_MASK;208+ mem_stcfg |= SBC_IDE_MDMA2_TCSOE | SBC_IDE_MDMA2_TOECS;00000000000209210+ mode = XFER_MW_DMA_2;211+ break;212+ case XFER_MW_DMA_1:213+ mem_sttime = SBC_IDE_TIMING(MDMA1);000000000000214215+ /* set configuration for RCS2# */216+ mem_stcfg &= ~TS_MASK;217+ mem_stcfg &= ~TCSOE_MASK;218+ mem_stcfg &= ~TOECS_MASK;219+ mem_stcfg |= SBC_IDE_MDMA1_TCSOE | SBC_IDE_MDMA1_TOECS;220+221+ mode = XFER_MW_DMA_1;222+ break;223+ case XFER_MW_DMA_0:224+ mem_sttime = SBC_IDE_TIMING(MDMA0);225+226+ /* set configuration for RCS2# */227+ mem_stcfg |= TS_MASK;228+ mem_stcfg &= ~TCSOE_MASK;229+ mem_stcfg &= ~TOECS_MASK;230+ mem_stcfg |= SBC_IDE_MDMA0_TCSOE | SBC_IDE_MDMA0_TOECS;231+232+ mode = XFER_MW_DMA_0;233+ break;234#endif235+ default:236+ return 1;237+ }238+239+ if (ide_config_drive_speed(drive, mode))240+ return 1;241242+ au_writel(mem_sttime,MEM_STTIME2);243+ au_writel(mem_stcfg,MEM_STCFG2);00000244245+ return 0;000000246}247248/*249 * Multi-Word DMA + DbDMA functions250 */0251252+#ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA00000000000253254static int auide_build_sglist(ide_drive_t *drive, struct request *rq)255{256+ ide_hwif_t *hwif = drive->hwif;257+ _auide_hwif *ahwif = (_auide_hwif*)hwif->hwif_data;258+ struct scatterlist *sg = hwif->sg_table;259260+ ide_map_sg(drive, rq);261262+ if (rq_data_dir(rq) == READ)263+ hwif->sg_dma_direction = DMA_FROM_DEVICE;264+ else265+ hwif->sg_dma_direction = DMA_TO_DEVICE;266267+ return dma_map_sg(ahwif->dev, sg, hwif->sg_nents,268+ hwif->sg_dma_direction);269}270271static int auide_build_dmatable(ide_drive_t *drive)272{273+ int i, iswrite, count = 0;274+ ide_hwif_t *hwif = HWIF(drive);275276+ struct request *rq = HWGROUP(drive)->rq;277278+ _auide_hwif *ahwif = (_auide_hwif*)hwif->hwif_data;279+ struct scatterlist *sg;280281+ iswrite = (rq_data_dir(rq) == WRITE);282+ /* Save for interrupt context */283+ ahwif->drive = drive;284285+ /* Build sglist */286+ hwif->sg_nents = i = auide_build_sglist(drive, rq);287288+ if (!i)289+ return 0;290291+ /* fill the descriptors */292+ sg = hwif->sg_table;293+ while (i && sg_dma_len(sg)) {294+ u32 cur_addr;295+ u32 cur_len;296297+ cur_addr = sg_dma_address(sg);298+ cur_len = sg_dma_len(sg);299300+ while (cur_len) {301+ u32 flags = DDMA_FLAGS_NOIE;302+ unsigned int tc = (cur_len < 0xfe00)? cur_len: 0xfe00;303304+ if (++count >= PRD_ENTRIES) {305+ printk(KERN_WARNING "%s: DMA table too small\n",306+ drive->name);307+ goto use_pio_instead;308+ }309310+ /* Lets enable intr for the last descriptor only */311+ if (1==i)312+ flags = DDMA_FLAGS_IE;313+ else314+ flags = DDMA_FLAGS_NOIE;315316+ if (iswrite) {317+ if(!put_source_flags(ahwif->tx_chan, 318+ (void*)(page_address(sg->page) 319+ + sg->offset), 320+ tc, flags)) { 321+ printk(KERN_ERR "%s failed %d\n", 322+ __FUNCTION__, __LINE__);323 }324+ } else 325 {326+ if(!put_dest_flags(ahwif->rx_chan, 327+ (void*)(page_address(sg->page) 328+ + sg->offset), 329+ tc, flags)) { 330+ printk(KERN_ERR "%s failed %d\n", 331+ __FUNCTION__, __LINE__);332 }333+ }334335+ cur_addr += tc;336+ cur_len -= tc;337+ }338+ sg++;339+ i--;340+ }341342+ if (count)343+ return 1;344345+ use_pio_instead:346+ dma_unmap_sg(ahwif->dev,347+ hwif->sg_table,348+ hwif->sg_nents,349+ hwif->sg_dma_direction);350351+ return 0; /* revert to PIO for this request */352}353354static int auide_dma_end(ide_drive_t *drive)355{356+ ide_hwif_t *hwif = HWIF(drive);357+ _auide_hwif *ahwif = (_auide_hwif*)hwif->hwif_data;358359+ if (hwif->sg_nents) {360+ dma_unmap_sg(ahwif->dev, hwif->sg_table, hwif->sg_nents,361+ hwif->sg_dma_direction);362+ hwif->sg_nents = 0;363+ }364365+ return 0;366}367368static void auide_dma_start(ide_drive_t *drive )369{0370}37100000000000000000000372373static void auide_dma_exec_cmd(ide_drive_t *drive, u8 command)374{375+ /* issue cmd to drive */376+ ide_execute_command(drive, command, &ide_dma_intr,377+ (2*WAIT_CMD), NULL);00378}379380static int auide_dma_setup(ide_drive_t *drive)381+{ 382+ struct request *rq = HWGROUP(drive)->rq;383384+ if (!auide_build_dmatable(drive)) {385+ ide_map_sg(drive, rq);386+ return 1;387+ }388389+ drive->waiting_for_dma = 1;390+ return 0;00000391}392393static int auide_dma_check(ide_drive_t *drive)394{395+ u8 speed;396397#ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA398+399+ if( dbdma_init_done == 0 ){400+ auide_hwif.white_list = ide_in_drive_list(drive->id,401+ dma_white_list);402+ auide_hwif.black_list = ide_in_drive_list(drive->id,403+ dma_black_list);404+ auide_hwif.drive = drive;405+ auide_ddma_init(&auide_hwif);406+ dbdma_init_done = 1;407+ }408#endif409410+ /* Is the drive in our DMA black list? */0000000411412+ if ( auide_hwif.black_list ) {413+ drive->using_dma = 0;414+415+ /* Borrowed the warning message from ide-dma.c */416+417+ printk(KERN_WARNING "%s: Disabling DMA for %s (blacklisted)\n",418+ drive->name, drive->id->model); 419+ }420+ else421+ drive->using_dma = 1;422+423+ speed = ide_find_best_mode(drive, XFER_PIO | XFER_MWDMA);424+425+ if (drive->autodma && (speed & XFER_MODE) != XFER_PIO)426+ return HWIF(drive)->ide_dma_on(drive);427+428+ return HWIF(drive)->ide_dma_off_quietly(drive);429}430431static int auide_dma_test_irq(ide_drive_t *drive)432+{ 433+ if (drive->waiting_for_dma == 0)434+ printk(KERN_WARNING "%s: ide_dma_test_irq \00435 called while not waiting\n", drive->name);436437+ /* If dbdma didn't execute the STOP command yet, the438+ * active bit is still set439 */440+ drive->waiting_for_dma++;441+ if (drive->waiting_for_dma >= DMA_WAIT_TIMEOUT) {442+ printk(KERN_WARNING "%s: timeout waiting for ddma to \443 complete\n", drive->name);444+ return 1;445+ }446+ udelay(10);447+ return 0;448}449450static int auide_dma_host_on(ide_drive_t *drive)451{452+ return 0;0453}454455static int auide_dma_on(ide_drive_t *drive)456{457+ drive->using_dma = 1;458+ return auide_dma_host_on(drive);0459}460461462static int auide_dma_host_off(ide_drive_t *drive)463{464+ return 0;0465}466467static int auide_dma_off_quietly(ide_drive_t *drive)468{469+ drive->using_dma = 0;470+ return auide_dma_host_off(drive);0471}472473static int auide_dma_lostirq(ide_drive_t *drive)474{475+ printk(KERN_ERR "%s: IRQ lost\n", drive->name);476+ return 0;00477}478479static void auide_ddma_tx_callback(int irq, void *param, struct pt_regs *regs)480{481+ _auide_hwif *ahwif = (_auide_hwif*)param;482+ ahwif->drive->waiting_for_dma = 0;000483}484485static void auide_ddma_rx_callback(int irq, void *param, struct pt_regs *regs)486{487+ _auide_hwif *ahwif = (_auide_hwif*)param;488+ ahwif->drive->waiting_for_dma = 0;000489}490+491+#endif /* end CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA */492+493+static void auide_init_dbdma_dev(dbdev_tab_t *dev, u32 dev_id, u32 tsize, u32 devwidth, u32 flags)494+{495+ dev->dev_id = dev_id;496+ dev->dev_physaddr = (u32)AU1XXX_ATA_PHYS_ADDR;497+ dev->dev_intlevel = 0;498+ dev->dev_intpolarity = 0;499+ dev->dev_tsize = tsize;500+ dev->dev_devwidth = devwidth;501+ dev->dev_flags = flags;502+}503+504+#if defined(CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA)505506static int auide_dma_timeout(ide_drive_t *drive)507{508// printk("%s\n", __FUNCTION__);509510+ printk(KERN_ERR "%s: DMA timeout occurred: ", drive->name);511512+ if (HWIF(drive)->ide_dma_test_irq(drive))513+ return 0;514515+ return HWIF(drive)->ide_dma_end(drive);516}517+518519+static int auide_ddma_init(_auide_hwif *auide) {520+521+ dbdev_tab_t source_dev_tab, target_dev_tab;522+ u32 dev_id, tsize, devwidth, flags;523+ ide_hwif_t *hwif = auide->hwif;524525+ dev_id = AU1XXX_ATA_DDMA_REQ;526+527+ if (auide->white_list || auide->black_list) {528+ tsize = 8;529+ devwidth = 32;530+ }531+ else { 532+ tsize = 1;533+ devwidth = 16;534+535+ printk(KERN_ERR "au1xxx-ide: %s is not on ide driver whitelist.\n",auide_hwif.drive->id->model);536+ printk(KERN_ERR " please read 'Documentation/mips/AU1xxx_IDE.README'");537+ }538+539+#ifdef IDE_AU1XXX_BURSTMODE 540+ flags = DEV_FLAGS_SYNC | DEV_FLAGS_BURSTABLE;541+#else542+ flags = DEV_FLAGS_SYNC;543+#endif544+545+ /* setup dev_tab for tx channel */546+ auide_init_dbdma_dev( &source_dev_tab,547+ dev_id,548+ tsize, devwidth, DEV_FLAGS_OUT | flags);549+ auide->tx_dev_id = au1xxx_ddma_add_device( &source_dev_tab );550+551+ auide_init_dbdma_dev( &source_dev_tab,552+ dev_id,553+ tsize, devwidth, DEV_FLAGS_IN | flags);554+ auide->rx_dev_id = au1xxx_ddma_add_device( &source_dev_tab );555+556+ /* We also need to add a target device for the DMA */557+ auide_init_dbdma_dev( &target_dev_tab,558+ (u32)DSCR_CMD0_ALWAYS,559+ tsize, devwidth, DEV_FLAGS_ANYUSE);560+ auide->target_dev_id = au1xxx_ddma_add_device(&target_dev_tab); 561+562+ /* Get a channel for TX */563+ auide->tx_chan = au1xxx_dbdma_chan_alloc(auide->target_dev_id,564+ auide->tx_dev_id,565+ auide_ddma_tx_callback,566+ (void*)auide);567+568+ /* Get a channel for RX */569+ auide->rx_chan = au1xxx_dbdma_chan_alloc(auide->rx_dev_id,570+ auide->target_dev_id,571+ auide_ddma_rx_callback,572+ (void*)auide);573+574+ auide->tx_desc_head = (void*)au1xxx_dbdma_ring_alloc(auide->tx_chan,575+ NUM_DESCRIPTORS);576+ auide->rx_desc_head = (void*)au1xxx_dbdma_ring_alloc(auide->rx_chan,577+ NUM_DESCRIPTORS);578+579+ hwif->dmatable_cpu = dma_alloc_coherent(auide->dev,580+ PRD_ENTRIES * PRD_BYTES, /* 1 Page */581+ &hwif->dmatable_dma, GFP_KERNEL);582+583+ au1xxx_dbdma_start( auide->tx_chan );584+ au1xxx_dbdma_start( auide->rx_chan );585+586+ return 0;587+} 588+#else589+590static int auide_ddma_init( _auide_hwif *auide )591{592+ dbdev_tab_t source_dev_tab;593+ int flags;594595+#ifdef IDE_AU1XXX_BURSTMODE 596+ flags = DEV_FLAGS_SYNC | DEV_FLAGS_BURSTABLE;000000000000000000000000000000000000000000000000000000000000000000597#else598+ flags = DEV_FLAGS_SYNC;00000599#endif600601+ /* setup dev_tab for tx channel */602+ auide_init_dbdma_dev( &source_dev_tab,603+ (u32)DSCR_CMD0_ALWAYS,604+ 8, 32, DEV_FLAGS_OUT | flags);605+ auide->tx_dev_id = au1xxx_ddma_add_device( &source_dev_tab );00000000000000606607+ auide_init_dbdma_dev( &source_dev_tab,608+ (u32)DSCR_CMD0_ALWAYS,609+ 8, 32, DEV_FLAGS_IN | flags);610+ auide->rx_dev_id = au1xxx_ddma_add_device( &source_dev_tab );611+612+ /* Get a channel for TX */613+ auide->tx_chan = au1xxx_dbdma_chan_alloc(DSCR_CMD0_ALWAYS,614+ auide->tx_dev_id,615+ NULL,616+ (void*)auide);617+618+ /* Get a channel for RX */619+ auide->rx_chan = au1xxx_dbdma_chan_alloc(auide->rx_dev_id,620+ DSCR_CMD0_ALWAYS,621+ NULL,622+ (void*)auide);623+624+ auide->tx_desc_head = (void*)au1xxx_dbdma_ring_alloc(auide->tx_chan,625+ NUM_DESCRIPTORS);626+ auide->rx_desc_head = (void*)au1xxx_dbdma_ring_alloc(auide->rx_chan,627+ NUM_DESCRIPTORS);628+629+ au1xxx_dbdma_start( auide->tx_chan );630+ au1xxx_dbdma_start( auide->rx_chan );631+632+ return 0;00000000000000000000633}634+#endif635636static void auide_setup_ports(hw_regs_t *hw, _auide_hwif *ahwif)637{638+ int i;639+ unsigned long *ata_regs = hw->io_ports;0640641+ /* FIXME? */642+ for (i = 0; i < IDE_CONTROL_OFFSET; i++) {643+ *ata_regs++ = ahwif->regbase + (i << AU1XXX_ATA_REG_OFFSET);644+ }0645646+ /* set the Alternative Status register */647+ *ata_regs = ahwif->regbase + (14 << AU1XXX_ATA_REG_OFFSET);0648}649650static int au_ide_probe(struct device *dev)651{652 struct platform_device *pdev = to_platform_device(dev);653+ _auide_hwif *ahwif = &auide_hwif;654+ ide_hwif_t *hwif;655 struct resource *res;656 int ret = 0;657658#if defined(CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA)659+ char *mode = "MWDMA2";660#elif defined(CONFIG_BLK_DEV_IDE_AU1XXX_PIO_DBDMA)661+ char *mode = "PIO+DDMA(offload)";662#endif663664+ memset(&auide_hwif, 0, sizeof(_auide_hwif));665+ auide_hwif.dev = 0;666667 ahwif->dev = dev;668 ahwif->irq = platform_get_irq(pdev, 0);···902 goto out;903 }904905+ if (!request_mem_region (res->start, res->end-res->start, pdev->name)) {906 pr_debug("%s: request_mem_region failed\n", DRV_NAME);907+ ret = -EBUSY;908 goto out;909+ }910911 ahwif->regbase = (u32)ioremap(res->start, res->end-res->start);912 if (ahwif->regbase == 0) {···914 goto out;915 }916917+ /* FIXME: This might possibly break PCMCIA IDE devices */000918919+ hwif = &ide_hwifs[pdev->id];920+ hw_regs_t *hw = &hwif->hw;921+ hwif->irq = hw->irq = ahwif->irq;922+ hwif->chipset = ide_au1xxx;923+924+ auide_setup_ports(hw, ahwif);925 memcpy(hwif->io_ports, hw->io_ports, sizeof(hwif->io_ports));926927+ hwif->ultra_mask = 0x0; /* Disable Ultra DMA */00000000928#ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA929+ hwif->mwdma_mask = 0x07; /* Multimode-2 DMA */930+ hwif->swdma_mask = 0x00;931#else932+ hwif->mwdma_mask = 0x0;933+ hwif->swdma_mask = 0x0;934#endif0000935936+ hwif->noprobe = 0;937+ hwif->drives[0].unmask = 1;938+ hwif->drives[1].unmask = 1;939940+ /* hold should be on in all cases */941+ hwif->hold = 1;942+ hwif->mmio = 2;000000000943944+ /* If the user has selected DDMA assisted copies,945+ then set up a few local I/O function entry points 946+ */947+948+#ifdef CONFIG_BLK_DEV_IDE_AU1XXX_PIO_DBDMA 949+ hwif->INSW = auide_insw;950+ hwif->OUTSW = auide_outsw;951+#endif952+953+ hwif->tuneproc = &auide_tune_drive;954+ hwif->speedproc = &auide_tune_chipset;955956#ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA957+ hwif->ide_dma_off_quietly = &auide_dma_off_quietly;958+ hwif->ide_dma_timeout = &auide_dma_timeout;959960+ hwif->ide_dma_check = &auide_dma_check;961+ hwif->dma_exec_cmd = &auide_dma_exec_cmd;962+ hwif->dma_start = &auide_dma_start;963+ hwif->ide_dma_end = &auide_dma_end;964+ hwif->dma_setup = &auide_dma_setup;965+ hwif->ide_dma_test_irq = &auide_dma_test_irq;966+ hwif->ide_dma_host_off = &auide_dma_host_off;967+ hwif->ide_dma_host_on = &auide_dma_host_on;968+ hwif->ide_dma_lostirq = &auide_dma_lostirq;969+ hwif->ide_dma_on = &auide_dma_on;970971+ hwif->autodma = 1;972+ hwif->drives[0].autodma = hwif->autodma;973+ hwif->drives[1].autodma = hwif->autodma;974+ hwif->atapi_dma = 1;975+0976#else /* !CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA */977+ hwif->autodma = 0;978+ hwif->channel = 0;979+ hwif->hold = 1;980+ hwif->select_data = 0; /* no chipset-specific code */981+ hwif->config_data = 0; /* no chipset-specific code */982983+ hwif->drives[0].autodma = 0;984+ hwif->drives[0].autotune = 1; /* 1=autotune, 2=noautotune, 0=default */0000000000985#endif986+ hwif->drives[0].no_io_32bit = 1; 0987988+ auide_hwif.hwif = hwif;989+ hwif->hwif_data = &auide_hwif;00990991+#ifdef CONFIG_BLK_DEV_IDE_AU1XXX_PIO_DBDMA 992+ auide_ddma_init(&auide_hwif);993+ dbdma_init_done = 1;00000000000000994#endif995996 probe_hwif_init(hwif);997 dev_set_drvdata(dev, hwif);998999+ printk(KERN_INFO "Au1xxx IDE(builtin) configured for %s\n", mode );10001001+ out:1002+ return ret;1003}10041005static int au_ide_remove(struct device *dev)···1045 struct platform_device *pdev = to_platform_device(dev);1046 struct resource *res;1047 ide_hwif_t *hwif = dev_get_drvdata(dev);1048+ _auide_hwif *ahwif = &auide_hwif;10491050 ide_unregister(hwif - ide_hwifs);1051···1069 return driver_register(&au1200_ide_driver);1070}10711072+static void __exit au_ide_exit(void)1073{1074 driver_unregister(&au1200_ide_driver);1075}000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000010761077MODULE_LICENSE("GPL");1078MODULE_DESCRIPTION("AU1200 IDE driver");
+7-1
drivers/ide/pci/sgiioc4.c
···622 ide_hwif_t *hwif;623 int h;624000625 for (h = 0; h < MAX_HWIFS; ++h) {626 hwif = &ide_hwifs[h];627- /* Find an empty HWIF */628 if (hwif->chipset == ide_unknown)629 break;0000630 }631632 /* Get the CmdBlk and CtrlBlk Base Registers */
···622 ide_hwif_t *hwif;623 int h;624625+ /*626+ * Find an empty HWIF; if none available, return -ENOMEM.627+ */628 for (h = 0; h < MAX_HWIFS; ++h) {629 hwif = &ide_hwifs[h];0630 if (hwif->chipset == ide_unknown)631 break;632+ }633+ if (h == MAX_HWIFS) {634+ printk(KERN_ERR "%s: too many IDE interfaces, no room in table\n", d->name);635+ return -ENOMEM;636 }637638 /* Get the CmdBlk and CtrlBlk Base Registers */
···679}680681/*682- * Apply power to the MMC stack.00000000683 */684static void mmc_power_up(struct mmc_host *host)685{
···679}680681/*682+ * Apply power to the MMC stack. This is a two-stage process.683+ * First, we enable power to the card without the clock running.684+ * We then wait a bit for the power to stabilise. Finally,685+ * enable the bus drivers and clock to the card.686+ *687+ * We must _NOT_ enable the clock prior to power stablising.688+ *689+ * If a host does all the power sequencing itself, ignore the690+ * initial MMC_POWER_UP stage.691 */692static void mmc_power_up(struct mmc_host *host)693{
···4194 */4195static int st_init_command(struct scsi_cmnd *SCpnt)4196{004197 if (!(SCpnt->request->flags & REQ_BLOCK_PC))4198 return 0;41994200+ scsi_setup_blk_pc_cmnd(SCpnt, 0);0000000000000004201 SCpnt->done = st_intr;4202 return 1;4203}
+2-2
drivers/scsi/sym53c8xx_2/sym_hipd.c
···1405 goal->iu = 0;1406 goal->dt = 0;1407 goal->qas = 0;1408- goal->period = 0;1409 goal->offset = 0;1410 return;1411 }···1464 * Many devices implement PPR in a buggy way, so only use it if we1465 * really want to.1466 */1467- if (goal->iu || goal->dt || goal->qas || (goal->period < 0xa)) {01468 nego = NS_PPR;1469 } else if (spi_width(starget) != goal->width) {1470 nego = NS_WIDE;
···1405 goal->iu = 0;1406 goal->dt = 0;1407 goal->qas = 0;01408 goal->offset = 0;1409 return;1410 }···1465 * Many devices implement PPR in a buggy way, so only use it if we1466 * really want to.1467 */1468+ if (goal->offset &&1469+ (goal->iu || goal->dt || goal->qas || (goal->period < 0xa))) {1470 nego = NS_PPR;1471 } else if (spi_width(starget) != goal->width) {1472 nego = NS_WIDE;
+18-8
fs/reiserfs/inode.c
···32 JOURNAL_PER_BALANCE_CNT * 2 +33 2 * REISERFS_QUOTA_INIT_BLOCKS(inode->i_sb);34 struct reiserfs_transaction_handle th;03536 truncate_inode_pages(&inode->i_data, 0);37···50 }51 reiserfs_update_inode_transaction(inode);5253- if (reiserfs_delete_object(&th, inode)) {54- up(&inode->i_sem);55- goto out;56- }5758 /* Do quota update inside a transaction for journaled quotas. We must do that59 * after delete_object so that quota updates go into the same transaction as60 * stat data deletion */61- DQUOT_FREE_INODE(inode);06263 if (journal_end(&th, inode->i_sb, jbegin_count)) {64 up(&inode->i_sem);···64 }6566 up(&inode->i_sem);0000006768 /* all items of file are deleted, so we can remove "save" link */69 remove_save_link(inode, 0 /* not truncate */ ); /* we can't do anything···2104 struct page *page = NULL;2105 int error;2106 struct buffer_head *bh = NULL;021072108 reiserfs_write_lock(p_s_inode->i_sb);2109···2142 transaction of truncating gets committed - on reboot the file2143 either appears truncated properly or not truncated at all */2144 add_save_link(&th, p_s_inode, 1);2145- error = reiserfs_do_truncate(&th, p_s_inode, page, update_timestamps);2146- if (error)2147- goto out;2148 error =2149 journal_end(&th, p_s_inode->i_sb, JOURNAL_PER_BALANCE_CNT * 2 + 1);2150 if (error)2151 goto out;21520000002153 if (update_timestamps) {2154 error = remove_save_link(p_s_inode, 1 /* truncate */ );2155 if (error)
···32 JOURNAL_PER_BALANCE_CNT * 2 +33 2 * REISERFS_QUOTA_INIT_BLOCKS(inode->i_sb);34 struct reiserfs_transaction_handle th;35+ int err;3637 truncate_inode_pages(&inode->i_data, 0);38···49 }50 reiserfs_update_inode_transaction(inode);5152+ err = reiserfs_delete_object(&th, inode);0005354 /* Do quota update inside a transaction for journaled quotas. We must do that55 * after delete_object so that quota updates go into the same transaction as56 * stat data deletion */57+ if (!err) 58+ DQUOT_FREE_INODE(inode);5960 if (journal_end(&th, inode->i_sb, jbegin_count)) {61 up(&inode->i_sem);···65 }6667 up(&inode->i_sem);68+69+ /* check return value from reiserfs_delete_object after70+ * ending the transaction71+ */72+ if (err)73+ goto out;7475 /* all items of file are deleted, so we can remove "save" link */76 remove_save_link(inode, 0 /* not truncate */ ); /* we can't do anything···2099 struct page *page = NULL;2100 int error;2101 struct buffer_head *bh = NULL;2102+ int err2;21032104 reiserfs_write_lock(p_s_inode->i_sb);2105···2136 transaction of truncating gets committed - on reboot the file2137 either appears truncated properly or not truncated at all */2138 add_save_link(&th, p_s_inode, 1);2139+ err2 = reiserfs_do_truncate(&th, p_s_inode, page, update_timestamps);002140 error =2141 journal_end(&th, p_s_inode->i_sb, JOURNAL_PER_BALANCE_CNT * 2 + 1);2142 if (error)2143 goto out;21442145+ /* check reiserfs_do_truncate after ending the transaction */2146+ if (err2) {2147+ error = err2;2148+ goto out;2149+ }2150+2151 if (update_timestamps) {2152 error = remove_save_link(p_s_inode, 1 /* truncate */ );2153 if (error)
+14-4
fs/reiserfs/journal.c
···1039 }1040 atomic_dec(&journal->j_async_throttle);104100001042 /* wait on everything written so far before writing the commit1043 * if we are in barrier mode, send the commit down now1044 */···1081 BUG_ON(atomic_read(&(jl->j_commit_left)) != 1);10821083 if (!barrier) {1084- if (buffer_dirty(jl->j_commit_bh))1085- BUG();1086- mark_buffer_dirty(jl->j_commit_bh);1087- sync_dirty_buffer(jl->j_commit_bh);0000001088 } else1089 wait_on_buffer(jl->j_commit_bh);1090
···1039 }1040 atomic_dec(&journal->j_async_throttle);10411042+ /* We're skipping the commit if there's an error */1043+ if (retval || reiserfs_is_journal_aborted(journal))1044+ barrier = 0;1045+1046 /* wait on everything written so far before writing the commit1047 * if we are in barrier mode, send the commit down now1048 */···1077 BUG_ON(atomic_read(&(jl->j_commit_left)) != 1);10781079 if (!barrier) {1080+ /* If there was a write error in the journal - we can't commit1081+ * this transaction - it will be invalid and, if successful,1082+ * will just end up propogating the write error out to1083+ * the file system. */1084+ if (likely(!retval && !reiserfs_is_journal_aborted (journal))) {1085+ if (buffer_dirty(jl->j_commit_bh))1086+ BUG();1087+ mark_buffer_dirty(jl->j_commit_bh) ;1088+ sync_dirty_buffer(jl->j_commit_bh) ;1089+ }1090 } else1091 wait_on_buffer(jl->j_commit_bh);1092
···256257source "usr/Kconfig"25800000000000000259menuconfig EMBEDDED260 bool "Configure standard kernel features (for small systems)"261 help···351 help352 Disabling this option will cause the kernel to be built without353 support for epoll family of system calls.354-355-config CC_OPTIMIZE_FOR_SIZE356- bool "Optimize for size"357- default y if ARM || H8300358- help359- Enabling this option will pass "-Os" instead of "-O2" to gcc360- resulting in a smaller kernel.361-362- WARNING: some versions of gcc may generate incorrect code with this363- option. If problems are observed, a gcc upgrade may be needed.364-365- If unsure, say N.366367config SHMEM368 bool "Use full shmem filesystem" if EMBEDDED
···256257source "usr/Kconfig"258259+config CC_OPTIMIZE_FOR_SIZE260+ bool "Optimize for size (Look out for broken compilers!)"261+ default y262+ depends on ARM || H8300 || EXPERIMENTAL263+ depends on !SPARC64264+ help265+ Enabling this option will pass "-Os" instead of "-O2" to gcc266+ resulting in a smaller kernel.267+268+ WARNING: some versions of gcc may generate incorrect code with this269+ option. If problems are observed, a gcc upgrade may be needed.270+271+ If unsure, say N.272+273menuconfig EMBEDDED274 bool "Configure standard kernel features (for small systems)"275 help···337 help338 Disabling this option will cause the kernel to be built without339 support for epoll family of system calls.000000000000340341config SHMEM342 bool "Use full shmem filesystem" if EMBEDDED
+3
net/8021q/vlan_dev.c
···165166 skb_pull(skb, VLAN_HLEN); /* take off the VLAN header (4 bytes currently) */167000168 /* Ok, lets check to make sure the device (dev) we169 * came in on is what this VLAN is attached to.170 */
···165166 skb_pull(skb, VLAN_HLEN); /* take off the VLAN header (4 bytes currently) */167168+ /* Need to correct hardware checksum */169+ skb_postpull_rcsum(skb, vhdr, VLAN_HLEN);170+171 /* Ok, lets check to make sure the device (dev) we172 * came in on is what this VLAN is attached to.173 */