Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v2.6.33-rc5 1761 lines 48 kB view raw
1/* 2 * File: drivers/ata/pata_bf54x.c 3 * Author: Sonic Zhang <sonic.zhang@analog.com> 4 * 5 * Created: 6 * Description: PATA Driver for blackfin 54x 7 * 8 * Modified: 9 * Copyright 2007 Analog Devices Inc. 10 * 11 * Bugs: Enter bugs at http://blackfin.uclinux.org/ 12 * 13 * This program is free software; you can redistribute it and/or modify 14 * it under the terms of the GNU General Public License as published by 15 * the Free Software Foundation; either version 2 of the License, or 16 * (at your option) any later version. 17 * 18 * This program is distributed in the hope that it will be useful, 19 * but WITHOUT ANY WARRANTY; without even the implied warranty of 20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 21 * GNU General Public License for more details. 22 * 23 * You should have received a copy of the GNU General Public License 24 * along with this program; if not, see the file COPYING, or write 25 * to the Free Software Foundation, Inc., 26 * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 27 */ 28 29#include <linux/kernel.h> 30#include <linux/module.h> 31#include <linux/pci.h> 32#include <linux/init.h> 33#include <linux/blkdev.h> 34#include <linux/delay.h> 35#include <linux/device.h> 36#include <scsi/scsi_host.h> 37#include <linux/libata.h> 38#include <linux/platform_device.h> 39#include <asm/dma.h> 40#include <asm/gpio.h> 41#include <asm/portmux.h> 42 43#define DRV_NAME "pata-bf54x" 44#define DRV_VERSION "0.9" 45 46#define ATA_REG_CTRL 0x0E 47#define ATA_REG_ALTSTATUS ATA_REG_CTRL 48 49/* These are the offset of the controller's registers */ 50#define ATAPI_OFFSET_CONTROL 0x00 51#define ATAPI_OFFSET_STATUS 0x04 52#define ATAPI_OFFSET_DEV_ADDR 0x08 53#define ATAPI_OFFSET_DEV_TXBUF 0x0c 54#define ATAPI_OFFSET_DEV_RXBUF 0x10 55#define ATAPI_OFFSET_INT_MASK 0x14 56#define ATAPI_OFFSET_INT_STATUS 0x18 57#define ATAPI_OFFSET_XFER_LEN 0x1c 58#define ATAPI_OFFSET_LINE_STATUS 0x20 59#define ATAPI_OFFSET_SM_STATE 0x24 60#define ATAPI_OFFSET_TERMINATE 0x28 61#define ATAPI_OFFSET_PIO_TFRCNT 0x2c 62#define ATAPI_OFFSET_DMA_TFRCNT 0x30 63#define ATAPI_OFFSET_UMAIN_TFRCNT 0x34 64#define ATAPI_OFFSET_UDMAOUT_TFRCNT 0x38 65#define ATAPI_OFFSET_REG_TIM_0 0x40 66#define ATAPI_OFFSET_PIO_TIM_0 0x44 67#define ATAPI_OFFSET_PIO_TIM_1 0x48 68#define ATAPI_OFFSET_MULTI_TIM_0 0x50 69#define ATAPI_OFFSET_MULTI_TIM_1 0x54 70#define ATAPI_OFFSET_MULTI_TIM_2 0x58 71#define ATAPI_OFFSET_ULTRA_TIM_0 0x60 72#define ATAPI_OFFSET_ULTRA_TIM_1 0x64 73#define ATAPI_OFFSET_ULTRA_TIM_2 0x68 74#define ATAPI_OFFSET_ULTRA_TIM_3 0x6c 75 76 77#define ATAPI_GET_CONTROL(base)\ 78 bfin_read16(base + ATAPI_OFFSET_CONTROL) 79#define ATAPI_SET_CONTROL(base, val)\ 80 bfin_write16(base + ATAPI_OFFSET_CONTROL, val) 81#define ATAPI_GET_STATUS(base)\ 82 bfin_read16(base + ATAPI_OFFSET_STATUS) 83#define ATAPI_GET_DEV_ADDR(base)\ 84 bfin_read16(base + ATAPI_OFFSET_DEV_ADDR) 85#define ATAPI_SET_DEV_ADDR(base, val)\ 86 bfin_write16(base + ATAPI_OFFSET_DEV_ADDR, val) 87#define ATAPI_GET_DEV_TXBUF(base)\ 88 bfin_read16(base + ATAPI_OFFSET_DEV_TXBUF) 89#define ATAPI_SET_DEV_TXBUF(base, val)\ 90 bfin_write16(base + ATAPI_OFFSET_DEV_TXBUF, val) 91#define ATAPI_GET_DEV_RXBUF(base)\ 92 bfin_read16(base + ATAPI_OFFSET_DEV_RXBUF) 93#define ATAPI_SET_DEV_RXBUF(base, val)\ 94 bfin_write16(base + ATAPI_OFFSET_DEV_RXBUF, val) 95#define ATAPI_GET_INT_MASK(base)\ 96 bfin_read16(base + ATAPI_OFFSET_INT_MASK) 97#define ATAPI_SET_INT_MASK(base, val)\ 98 bfin_write16(base + ATAPI_OFFSET_INT_MASK, val) 99#define ATAPI_GET_INT_STATUS(base)\ 100 bfin_read16(base + ATAPI_OFFSET_INT_STATUS) 101#define ATAPI_SET_INT_STATUS(base, val)\ 102 bfin_write16(base + ATAPI_OFFSET_INT_STATUS, val) 103#define ATAPI_GET_XFER_LEN(base)\ 104 bfin_read16(base + ATAPI_OFFSET_XFER_LEN) 105#define ATAPI_SET_XFER_LEN(base, val)\ 106 bfin_write16(base + ATAPI_OFFSET_XFER_LEN, val) 107#define ATAPI_GET_LINE_STATUS(base)\ 108 bfin_read16(base + ATAPI_OFFSET_LINE_STATUS) 109#define ATAPI_GET_SM_STATE(base)\ 110 bfin_read16(base + ATAPI_OFFSET_SM_STATE) 111#define ATAPI_GET_TERMINATE(base)\ 112 bfin_read16(base + ATAPI_OFFSET_TERMINATE) 113#define ATAPI_SET_TERMINATE(base, val)\ 114 bfin_write16(base + ATAPI_OFFSET_TERMINATE, val) 115#define ATAPI_GET_PIO_TFRCNT(base)\ 116 bfin_read16(base + ATAPI_OFFSET_PIO_TFRCNT) 117#define ATAPI_GET_DMA_TFRCNT(base)\ 118 bfin_read16(base + ATAPI_OFFSET_DMA_TFRCNT) 119#define ATAPI_GET_UMAIN_TFRCNT(base)\ 120 bfin_read16(base + ATAPI_OFFSET_UMAIN_TFRCNT) 121#define ATAPI_GET_UDMAOUT_TFRCNT(base)\ 122 bfin_read16(base + ATAPI_OFFSET_UDMAOUT_TFRCNT) 123#define ATAPI_GET_REG_TIM_0(base)\ 124 bfin_read16(base + ATAPI_OFFSET_REG_TIM_0) 125#define ATAPI_SET_REG_TIM_0(base, val)\ 126 bfin_write16(base + ATAPI_OFFSET_REG_TIM_0, val) 127#define ATAPI_GET_PIO_TIM_0(base)\ 128 bfin_read16(base + ATAPI_OFFSET_PIO_TIM_0) 129#define ATAPI_SET_PIO_TIM_0(base, val)\ 130 bfin_write16(base + ATAPI_OFFSET_PIO_TIM_0, val) 131#define ATAPI_GET_PIO_TIM_1(base)\ 132 bfin_read16(base + ATAPI_OFFSET_PIO_TIM_1) 133#define ATAPI_SET_PIO_TIM_1(base, val)\ 134 bfin_write16(base + ATAPI_OFFSET_PIO_TIM_1, val) 135#define ATAPI_GET_MULTI_TIM_0(base)\ 136 bfin_read16(base + ATAPI_OFFSET_MULTI_TIM_0) 137#define ATAPI_SET_MULTI_TIM_0(base, val)\ 138 bfin_write16(base + ATAPI_OFFSET_MULTI_TIM_0, val) 139#define ATAPI_GET_MULTI_TIM_1(base)\ 140 bfin_read16(base + ATAPI_OFFSET_MULTI_TIM_1) 141#define ATAPI_SET_MULTI_TIM_1(base, val)\ 142 bfin_write16(base + ATAPI_OFFSET_MULTI_TIM_1, val) 143#define ATAPI_GET_MULTI_TIM_2(base)\ 144 bfin_read16(base + ATAPI_OFFSET_MULTI_TIM_2) 145#define ATAPI_SET_MULTI_TIM_2(base, val)\ 146 bfin_write16(base + ATAPI_OFFSET_MULTI_TIM_2, val) 147#define ATAPI_GET_ULTRA_TIM_0(base)\ 148 bfin_read16(base + ATAPI_OFFSET_ULTRA_TIM_0) 149#define ATAPI_SET_ULTRA_TIM_0(base, val)\ 150 bfin_write16(base + ATAPI_OFFSET_ULTRA_TIM_0, val) 151#define ATAPI_GET_ULTRA_TIM_1(base)\ 152 bfin_read16(base + ATAPI_OFFSET_ULTRA_TIM_1) 153#define ATAPI_SET_ULTRA_TIM_1(base, val)\ 154 bfin_write16(base + ATAPI_OFFSET_ULTRA_TIM_1, val) 155#define ATAPI_GET_ULTRA_TIM_2(base)\ 156 bfin_read16(base + ATAPI_OFFSET_ULTRA_TIM_2) 157#define ATAPI_SET_ULTRA_TIM_2(base, val)\ 158 bfin_write16(base + ATAPI_OFFSET_ULTRA_TIM_2, val) 159#define ATAPI_GET_ULTRA_TIM_3(base)\ 160 bfin_read16(base + ATAPI_OFFSET_ULTRA_TIM_3) 161#define ATAPI_SET_ULTRA_TIM_3(base, val)\ 162 bfin_write16(base + ATAPI_OFFSET_ULTRA_TIM_3, val) 163 164/** 165 * PIO Mode - Frequency compatibility 166 */ 167/* mode: 0 1 2 3 4 */ 168static const u32 pio_fsclk[] = 169{ 33333333, 33333333, 33333333, 33333333, 33333333 }; 170 171/** 172 * MDMA Mode - Frequency compatibility 173 */ 174/* mode: 0 1 2 */ 175static const u32 mdma_fsclk[] = { 33333333, 33333333, 33333333 }; 176 177/** 178 * UDMA Mode - Frequency compatibility 179 * 180 * UDMA5 - 100 MB/s - SCLK = 133 MHz 181 * UDMA4 - 66 MB/s - SCLK >= 80 MHz 182 * UDMA3 - 44.4 MB/s - SCLK >= 50 MHz 183 * UDMA2 - 33 MB/s - SCLK >= 40 MHz 184 */ 185/* mode: 0 1 2 3 4 5 */ 186static const u32 udma_fsclk[] = 187{ 33333333, 33333333, 40000000, 50000000, 80000000, 133333333 }; 188 189/** 190 * Register transfer timing table 191 */ 192/* mode: 0 1 2 3 4 */ 193/* Cycle Time */ 194static const u32 reg_t0min[] = { 600, 383, 330, 180, 120 }; 195/* DIOR/DIOW to end cycle */ 196static const u32 reg_t2min[] = { 290, 290, 290, 70, 25 }; 197/* DIOR/DIOW asserted pulse width */ 198static const u32 reg_teocmin[] = { 290, 290, 290, 80, 70 }; 199 200/** 201 * PIO timing table 202 */ 203/* mode: 0 1 2 3 4 */ 204/* Cycle Time */ 205static const u32 pio_t0min[] = { 600, 383, 240, 180, 120 }; 206/* Address valid to DIOR/DIORW */ 207static const u32 pio_t1min[] = { 70, 50, 30, 30, 25 }; 208/* DIOR/DIOW to end cycle */ 209static const u32 pio_t2min[] = { 165, 125, 100, 80, 70 }; 210/* DIOR/DIOW asserted pulse width */ 211static const u32 pio_teocmin[] = { 165, 125, 100, 70, 25 }; 212/* DIOW data hold */ 213static const u32 pio_t4min[] = { 30, 20, 15, 10, 10 }; 214 215/* ****************************************************************** 216 * Multiword DMA timing table 217 * ****************************************************************** 218 */ 219/* mode: 0 1 2 */ 220/* Cycle Time */ 221static const u32 mdma_t0min[] = { 480, 150, 120 }; 222/* DIOR/DIOW asserted pulse width */ 223static const u32 mdma_tdmin[] = { 215, 80, 70 }; 224/* DMACK to read data released */ 225static const u32 mdma_thmin[] = { 20, 15, 10 }; 226/* DIOR/DIOW to DMACK hold */ 227static const u32 mdma_tjmin[] = { 20, 5, 5 }; 228/* DIOR negated pulse width */ 229static const u32 mdma_tkrmin[] = { 50, 50, 25 }; 230/* DIOR negated pulse width */ 231static const u32 mdma_tkwmin[] = { 215, 50, 25 }; 232/* CS[1:0] valid to DIOR/DIOW */ 233static const u32 mdma_tmmin[] = { 50, 30, 25 }; 234/* DMACK to read data released */ 235static const u32 mdma_tzmax[] = { 20, 25, 25 }; 236 237/** 238 * Ultra DMA timing table 239 */ 240/* mode: 0 1 2 3 4 5 */ 241static const u32 udma_tcycmin[] = { 112, 73, 54, 39, 25, 17 }; 242static const u32 udma_tdvsmin[] = { 70, 48, 31, 20, 7, 5 }; 243static const u32 udma_tenvmax[] = { 70, 70, 70, 55, 55, 50 }; 244static const u32 udma_trpmin[] = { 160, 125, 100, 100, 100, 85 }; 245static const u32 udma_tmin[] = { 5, 5, 5, 5, 3, 3 }; 246 247 248static const u32 udma_tmlimin = 20; 249static const u32 udma_tzahmin = 20; 250static const u32 udma_tenvmin = 20; 251static const u32 udma_tackmin = 20; 252static const u32 udma_tssmin = 50; 253 254/** 255 * 256 * Function: num_clocks_min 257 * 258 * Description: 259 * calculate number of SCLK cycles to meet minimum timing 260 */ 261static unsigned short num_clocks_min(unsigned long tmin, 262 unsigned long fsclk) 263{ 264 unsigned long tmp ; 265 unsigned short result; 266 267 tmp = tmin * (fsclk/1000/1000) / 1000; 268 result = (unsigned short)tmp; 269 if ((tmp*1000*1000) < (tmin*(fsclk/1000))) { 270 result++; 271 } 272 273 return result; 274} 275 276/** 277 * bfin_set_piomode - Initialize host controller PATA PIO timings 278 * @ap: Port whose timings we are configuring 279 * @adev: um 280 * 281 * Set PIO mode for device. 282 * 283 * LOCKING: 284 * None (inherited from caller). 285 */ 286 287static void bfin_set_piomode(struct ata_port *ap, struct ata_device *adev) 288{ 289 int mode = adev->pio_mode - XFER_PIO_0; 290 void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr; 291 unsigned int fsclk = get_sclk(); 292 unsigned short teoc_reg, t2_reg, teoc_pio; 293 unsigned short t4_reg, t2_pio, t1_reg; 294 unsigned short n0, n6, t6min = 5; 295 296 /* the most restrictive timing value is t6 and tc, the DIOW - data hold 297 * If one SCLK pulse is longer than this minimum value then register 298 * transfers cannot be supported at this frequency. 299 */ 300 n6 = num_clocks_min(t6min, fsclk); 301 if (mode >= 0 && mode <= 4 && n6 >= 1) { 302 dev_dbg(adev->link->ap->dev, "set piomode: mode=%d, fsclk=%ud\n", mode, fsclk); 303 /* calculate the timing values for register transfers. */ 304 while (mode > 0 && pio_fsclk[mode] > fsclk) 305 mode--; 306 307 /* DIOR/DIOW to end cycle time */ 308 t2_reg = num_clocks_min(reg_t2min[mode], fsclk); 309 /* DIOR/DIOW asserted pulse width */ 310 teoc_reg = num_clocks_min(reg_teocmin[mode], fsclk); 311 /* Cycle Time */ 312 n0 = num_clocks_min(reg_t0min[mode], fsclk); 313 314 /* increase t2 until we meed the minimum cycle length */ 315 if (t2_reg + teoc_reg < n0) 316 t2_reg = n0 - teoc_reg; 317 318 /* calculate the timing values for pio transfers. */ 319 320 /* DIOR/DIOW to end cycle time */ 321 t2_pio = num_clocks_min(pio_t2min[mode], fsclk); 322 /* DIOR/DIOW asserted pulse width */ 323 teoc_pio = num_clocks_min(pio_teocmin[mode], fsclk); 324 /* Cycle Time */ 325 n0 = num_clocks_min(pio_t0min[mode], fsclk); 326 327 /* increase t2 until we meed the minimum cycle length */ 328 if (t2_pio + teoc_pio < n0) 329 t2_pio = n0 - teoc_pio; 330 331 /* Address valid to DIOR/DIORW */ 332 t1_reg = num_clocks_min(pio_t1min[mode], fsclk); 333 334 /* DIOW data hold */ 335 t4_reg = num_clocks_min(pio_t4min[mode], fsclk); 336 337 ATAPI_SET_REG_TIM_0(base, (teoc_reg<<8 | t2_reg)); 338 ATAPI_SET_PIO_TIM_0(base, (t4_reg<<12 | t2_pio<<4 | t1_reg)); 339 ATAPI_SET_PIO_TIM_1(base, teoc_pio); 340 if (mode > 2) { 341 ATAPI_SET_CONTROL(base, 342 ATAPI_GET_CONTROL(base) | IORDY_EN); 343 } else { 344 ATAPI_SET_CONTROL(base, 345 ATAPI_GET_CONTROL(base) & ~IORDY_EN); 346 } 347 348 /* Disable host ATAPI PIO interrupts */ 349 ATAPI_SET_INT_MASK(base, ATAPI_GET_INT_MASK(base) 350 & ~(PIO_DONE_MASK | HOST_TERM_XFER_MASK)); 351 SSYNC(); 352 } 353} 354 355/** 356 * bfin_set_dmamode - Initialize host controller PATA DMA timings 357 * @ap: Port whose timings we are configuring 358 * @adev: um 359 * 360 * Set UDMA mode for device. 361 * 362 * LOCKING: 363 * None (inherited from caller). 364 */ 365 366static void bfin_set_dmamode(struct ata_port *ap, struct ata_device *adev) 367{ 368 int mode; 369 void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr; 370 unsigned long fsclk = get_sclk(); 371 unsigned short tenv, tack, tcyc_tdvs, tdvs, tmli, tss, trp, tzah; 372 unsigned short tm, td, tkr, tkw, teoc, th; 373 unsigned short n0, nf, tfmin = 5; 374 unsigned short nmin, tcyc; 375 376 mode = adev->dma_mode - XFER_UDMA_0; 377 if (mode >= 0 && mode <= 5) { 378 dev_dbg(adev->link->ap->dev, "set udmamode: mode=%d\n", mode); 379 /* the most restrictive timing value is t6 and tc, 380 * the DIOW - data hold. If one SCLK pulse is longer 381 * than this minimum value then register 382 * transfers cannot be supported at this frequency. 383 */ 384 while (mode > 0 && udma_fsclk[mode] > fsclk) 385 mode--; 386 387 nmin = num_clocks_min(udma_tmin[mode], fsclk); 388 if (nmin >= 1) { 389 /* calculate the timing values for Ultra DMA. */ 390 tdvs = num_clocks_min(udma_tdvsmin[mode], fsclk); 391 tcyc = num_clocks_min(udma_tcycmin[mode], fsclk); 392 tcyc_tdvs = 2; 393 394 /* increase tcyc - tdvs (tcyc_tdvs) until we meed 395 * the minimum cycle length 396 */ 397 if (tdvs + tcyc_tdvs < tcyc) 398 tcyc_tdvs = tcyc - tdvs; 399 400 /* Mow assign the values required for the timing 401 * registers 402 */ 403 if (tcyc_tdvs < 2) 404 tcyc_tdvs = 2; 405 406 if (tdvs < 2) 407 tdvs = 2; 408 409 tack = num_clocks_min(udma_tackmin, fsclk); 410 tss = num_clocks_min(udma_tssmin, fsclk); 411 tmli = num_clocks_min(udma_tmlimin, fsclk); 412 tzah = num_clocks_min(udma_tzahmin, fsclk); 413 trp = num_clocks_min(udma_trpmin[mode], fsclk); 414 tenv = num_clocks_min(udma_tenvmin, fsclk); 415 if (tenv <= udma_tenvmax[mode]) { 416 ATAPI_SET_ULTRA_TIM_0(base, (tenv<<8 | tack)); 417 ATAPI_SET_ULTRA_TIM_1(base, 418 (tcyc_tdvs<<8 | tdvs)); 419 ATAPI_SET_ULTRA_TIM_2(base, (tmli<<8 | tss)); 420 ATAPI_SET_ULTRA_TIM_3(base, (trp<<8 | tzah)); 421 422 /* Enable host ATAPI Untra DMA interrupts */ 423 ATAPI_SET_INT_MASK(base, 424 ATAPI_GET_INT_MASK(base) 425 | UDMAIN_DONE_MASK 426 | UDMAOUT_DONE_MASK 427 | UDMAIN_TERM_MASK 428 | UDMAOUT_TERM_MASK); 429 } 430 } 431 } 432 433 mode = adev->dma_mode - XFER_MW_DMA_0; 434 if (mode >= 0 && mode <= 2) { 435 dev_dbg(adev->link->ap->dev, "set mdmamode: mode=%d\n", mode); 436 /* the most restrictive timing value is tf, the DMACK to 437 * read data released. If one SCLK pulse is longer than 438 * this maximum value then the MDMA mode 439 * cannot be supported at this frequency. 440 */ 441 while (mode > 0 && mdma_fsclk[mode] > fsclk) 442 mode--; 443 444 nf = num_clocks_min(tfmin, fsclk); 445 if (nf >= 1) { 446 /* calculate the timing values for Multi-word DMA. */ 447 448 /* DIOR/DIOW asserted pulse width */ 449 td = num_clocks_min(mdma_tdmin[mode], fsclk); 450 451 /* DIOR negated pulse width */ 452 tkw = num_clocks_min(mdma_tkwmin[mode], fsclk); 453 454 /* Cycle Time */ 455 n0 = num_clocks_min(mdma_t0min[mode], fsclk); 456 457 /* increase tk until we meed the minimum cycle length */ 458 if (tkw + td < n0) 459 tkw = n0 - td; 460 461 /* DIOR negated pulse width - read */ 462 tkr = num_clocks_min(mdma_tkrmin[mode], fsclk); 463 /* CS{1:0] valid to DIOR/DIOW */ 464 tm = num_clocks_min(mdma_tmmin[mode], fsclk); 465 /* DIOR/DIOW to DMACK hold */ 466 teoc = num_clocks_min(mdma_tjmin[mode], fsclk); 467 /* DIOW Data hold */ 468 th = num_clocks_min(mdma_thmin[mode], fsclk); 469 470 ATAPI_SET_MULTI_TIM_0(base, (tm<<8 | td)); 471 ATAPI_SET_MULTI_TIM_1(base, (tkr<<8 | tkw)); 472 ATAPI_SET_MULTI_TIM_2(base, (teoc<<8 | th)); 473 474 /* Enable host ATAPI Multi DMA interrupts */ 475 ATAPI_SET_INT_MASK(base, ATAPI_GET_INT_MASK(base) 476 | MULTI_DONE_MASK | MULTI_TERM_MASK); 477 SSYNC(); 478 } 479 } 480 return; 481} 482 483/** 484 * 485 * Function: wait_complete 486 * 487 * Description: Waits the interrupt from device 488 * 489 */ 490static inline void wait_complete(void __iomem *base, unsigned short mask) 491{ 492 unsigned short status; 493 unsigned int i = 0; 494 495#define PATA_BF54X_WAIT_TIMEOUT 10000 496 497 for (i = 0; i < PATA_BF54X_WAIT_TIMEOUT; i++) { 498 status = ATAPI_GET_INT_STATUS(base) & mask; 499 if (status) 500 break; 501 } 502 503 ATAPI_SET_INT_STATUS(base, mask); 504} 505 506/** 507 * 508 * Function: write_atapi_register 509 * 510 * Description: Writes to ATA Device Resgister 511 * 512 */ 513 514static void write_atapi_register(void __iomem *base, 515 unsigned long ata_reg, unsigned short value) 516{ 517 /* Program the ATA_DEV_TXBUF register with write data (to be 518 * written into the device). 519 */ 520 ATAPI_SET_DEV_TXBUF(base, value); 521 522 /* Program the ATA_DEV_ADDR register with address of the 523 * device register (0x01 to 0x0F). 524 */ 525 ATAPI_SET_DEV_ADDR(base, ata_reg); 526 527 /* Program the ATA_CTRL register with dir set to write (1) 528 */ 529 ATAPI_SET_CONTROL(base, (ATAPI_GET_CONTROL(base) | XFER_DIR)); 530 531 /* ensure PIO DMA is not set */ 532 ATAPI_SET_CONTROL(base, (ATAPI_GET_CONTROL(base) & ~PIO_USE_DMA)); 533 534 /* and start the transfer */ 535 ATAPI_SET_CONTROL(base, (ATAPI_GET_CONTROL(base) | PIO_START)); 536 537 /* Wait for the interrupt to indicate the end of the transfer. 538 * (We need to wait on and clear rhe ATA_DEV_INT interrupt status) 539 */ 540 wait_complete(base, PIO_DONE_INT); 541} 542 543/** 544 * 545 * Function: read_atapi_register 546 * 547 *Description: Reads from ATA Device Resgister 548 * 549 */ 550 551static unsigned short read_atapi_register(void __iomem *base, 552 unsigned long ata_reg) 553{ 554 /* Program the ATA_DEV_ADDR register with address of the 555 * device register (0x01 to 0x0F). 556 */ 557 ATAPI_SET_DEV_ADDR(base, ata_reg); 558 559 /* Program the ATA_CTRL register with dir set to read (0) and 560 */ 561 ATAPI_SET_CONTROL(base, (ATAPI_GET_CONTROL(base) & ~XFER_DIR)); 562 563 /* ensure PIO DMA is not set */ 564 ATAPI_SET_CONTROL(base, (ATAPI_GET_CONTROL(base) & ~PIO_USE_DMA)); 565 566 /* and start the transfer */ 567 ATAPI_SET_CONTROL(base, (ATAPI_GET_CONTROL(base) | PIO_START)); 568 569 /* Wait for the interrupt to indicate the end of the transfer. 570 * (PIO_DONE interrupt is set and it doesn't seem to matter 571 * that we don't clear it) 572 */ 573 wait_complete(base, PIO_DONE_INT); 574 575 /* Read the ATA_DEV_RXBUF register with write data (to be 576 * written into the device). 577 */ 578 return ATAPI_GET_DEV_RXBUF(base); 579} 580 581/** 582 * 583 * Function: write_atapi_register_data 584 * 585 * Description: Writes to ATA Device Resgister 586 * 587 */ 588 589static void write_atapi_data(void __iomem *base, 590 int len, unsigned short *buf) 591{ 592 int i; 593 594 /* Set transfer length to 1 */ 595 ATAPI_SET_XFER_LEN(base, 1); 596 597 /* Program the ATA_DEV_ADDR register with address of the 598 * ATA_REG_DATA 599 */ 600 ATAPI_SET_DEV_ADDR(base, ATA_REG_DATA); 601 602 /* Program the ATA_CTRL register with dir set to write (1) 603 */ 604 ATAPI_SET_CONTROL(base, (ATAPI_GET_CONTROL(base) | XFER_DIR)); 605 606 /* ensure PIO DMA is not set */ 607 ATAPI_SET_CONTROL(base, (ATAPI_GET_CONTROL(base) & ~PIO_USE_DMA)); 608 609 for (i = 0; i < len; i++) { 610 /* Program the ATA_DEV_TXBUF register with write data (to be 611 * written into the device). 612 */ 613 ATAPI_SET_DEV_TXBUF(base, buf[i]); 614 615 /* and start the transfer */ 616 ATAPI_SET_CONTROL(base, (ATAPI_GET_CONTROL(base) | PIO_START)); 617 618 /* Wait for the interrupt to indicate the end of the transfer. 619 * (We need to wait on and clear rhe ATA_DEV_INT 620 * interrupt status) 621 */ 622 wait_complete(base, PIO_DONE_INT); 623 } 624} 625 626/** 627 * 628 * Function: read_atapi_register_data 629 * 630 * Description: Reads from ATA Device Resgister 631 * 632 */ 633 634static void read_atapi_data(void __iomem *base, 635 int len, unsigned short *buf) 636{ 637 int i; 638 639 /* Set transfer length to 1 */ 640 ATAPI_SET_XFER_LEN(base, 1); 641 642 /* Program the ATA_DEV_ADDR register with address of the 643 * ATA_REG_DATA 644 */ 645 ATAPI_SET_DEV_ADDR(base, ATA_REG_DATA); 646 647 /* Program the ATA_CTRL register with dir set to read (0) and 648 */ 649 ATAPI_SET_CONTROL(base, (ATAPI_GET_CONTROL(base) & ~XFER_DIR)); 650 651 /* ensure PIO DMA is not set */ 652 ATAPI_SET_CONTROL(base, (ATAPI_GET_CONTROL(base) & ~PIO_USE_DMA)); 653 654 for (i = 0; i < len; i++) { 655 /* and start the transfer */ 656 ATAPI_SET_CONTROL(base, (ATAPI_GET_CONTROL(base) | PIO_START)); 657 658 /* Wait for the interrupt to indicate the end of the transfer. 659 * (PIO_DONE interrupt is set and it doesn't seem to matter 660 * that we don't clear it) 661 */ 662 wait_complete(base, PIO_DONE_INT); 663 664 /* Read the ATA_DEV_RXBUF register with write data (to be 665 * written into the device). 666 */ 667 buf[i] = ATAPI_GET_DEV_RXBUF(base); 668 } 669} 670 671/** 672 * bfin_tf_load - send taskfile registers to host controller 673 * @ap: Port to which output is sent 674 * @tf: ATA taskfile register set 675 * 676 * Note: Original code is ata_sff_tf_load(). 677 */ 678 679static void bfin_tf_load(struct ata_port *ap, const struct ata_taskfile *tf) 680{ 681 void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr; 682 unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR; 683 684 if (tf->ctl != ap->last_ctl) { 685 write_atapi_register(base, ATA_REG_CTRL, tf->ctl); 686 ap->last_ctl = tf->ctl; 687 ata_wait_idle(ap); 688 } 689 690 if (is_addr) { 691 if (tf->flags & ATA_TFLAG_LBA48) { 692 write_atapi_register(base, ATA_REG_FEATURE, 693 tf->hob_feature); 694 write_atapi_register(base, ATA_REG_NSECT, 695 tf->hob_nsect); 696 write_atapi_register(base, ATA_REG_LBAL, tf->hob_lbal); 697 write_atapi_register(base, ATA_REG_LBAM, tf->hob_lbam); 698 write_atapi_register(base, ATA_REG_LBAH, tf->hob_lbah); 699 dev_dbg(ap->dev, "hob: feat 0x%X nsect 0x%X, lba 0x%X " 700 "0x%X 0x%X\n", 701 tf->hob_feature, 702 tf->hob_nsect, 703 tf->hob_lbal, 704 tf->hob_lbam, 705 tf->hob_lbah); 706 } 707 708 write_atapi_register(base, ATA_REG_FEATURE, tf->feature); 709 write_atapi_register(base, ATA_REG_NSECT, tf->nsect); 710 write_atapi_register(base, ATA_REG_LBAL, tf->lbal); 711 write_atapi_register(base, ATA_REG_LBAM, tf->lbam); 712 write_atapi_register(base, ATA_REG_LBAH, tf->lbah); 713 dev_dbg(ap->dev, "feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n", 714 tf->feature, 715 tf->nsect, 716 tf->lbal, 717 tf->lbam, 718 tf->lbah); 719 } 720 721 if (tf->flags & ATA_TFLAG_DEVICE) { 722 write_atapi_register(base, ATA_REG_DEVICE, tf->device); 723 dev_dbg(ap->dev, "device 0x%X\n", tf->device); 724 } 725 726 ata_wait_idle(ap); 727} 728 729/** 730 * bfin_check_status - Read device status reg & clear interrupt 731 * @ap: port where the device is 732 * 733 * Note: Original code is ata_check_status(). 734 */ 735 736static u8 bfin_check_status(struct ata_port *ap) 737{ 738 void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr; 739 return read_atapi_register(base, ATA_REG_STATUS); 740} 741 742/** 743 * bfin_tf_read - input device's ATA taskfile shadow registers 744 * @ap: Port from which input is read 745 * @tf: ATA taskfile register set for storing input 746 * 747 * Note: Original code is ata_sff_tf_read(). 748 */ 749 750static void bfin_tf_read(struct ata_port *ap, struct ata_taskfile *tf) 751{ 752 void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr; 753 754 tf->command = bfin_check_status(ap); 755 tf->feature = read_atapi_register(base, ATA_REG_ERR); 756 tf->nsect = read_atapi_register(base, ATA_REG_NSECT); 757 tf->lbal = read_atapi_register(base, ATA_REG_LBAL); 758 tf->lbam = read_atapi_register(base, ATA_REG_LBAM); 759 tf->lbah = read_atapi_register(base, ATA_REG_LBAH); 760 tf->device = read_atapi_register(base, ATA_REG_DEVICE); 761 762 if (tf->flags & ATA_TFLAG_LBA48) { 763 write_atapi_register(base, ATA_REG_CTRL, tf->ctl | ATA_HOB); 764 tf->hob_feature = read_atapi_register(base, ATA_REG_ERR); 765 tf->hob_nsect = read_atapi_register(base, ATA_REG_NSECT); 766 tf->hob_lbal = read_atapi_register(base, ATA_REG_LBAL); 767 tf->hob_lbam = read_atapi_register(base, ATA_REG_LBAM); 768 tf->hob_lbah = read_atapi_register(base, ATA_REG_LBAH); 769 } 770} 771 772/** 773 * bfin_exec_command - issue ATA command to host controller 774 * @ap: port to which command is being issued 775 * @tf: ATA taskfile register set 776 * 777 * Note: Original code is ata_sff_exec_command(). 778 */ 779 780static void bfin_exec_command(struct ata_port *ap, 781 const struct ata_taskfile *tf) 782{ 783 void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr; 784 dev_dbg(ap->dev, "ata%u: cmd 0x%X\n", ap->print_id, tf->command); 785 786 write_atapi_register(base, ATA_REG_CMD, tf->command); 787 ata_sff_pause(ap); 788} 789 790/** 791 * bfin_check_altstatus - Read device alternate status reg 792 * @ap: port where the device is 793 */ 794 795static u8 bfin_check_altstatus(struct ata_port *ap) 796{ 797 void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr; 798 return read_atapi_register(base, ATA_REG_ALTSTATUS); 799} 800 801/** 802 * bfin_dev_select - Select device 0/1 on ATA bus 803 * @ap: ATA channel to manipulate 804 * @device: ATA device (numbered from zero) to select 805 * 806 * Note: Original code is ata_sff_dev_select(). 807 */ 808 809static void bfin_dev_select(struct ata_port *ap, unsigned int device) 810{ 811 void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr; 812 u8 tmp; 813 814 if (device == 0) 815 tmp = ATA_DEVICE_OBS; 816 else 817 tmp = ATA_DEVICE_OBS | ATA_DEV1; 818 819 write_atapi_register(base, ATA_REG_DEVICE, tmp); 820 ata_sff_pause(ap); 821} 822 823/** 824 * bfin_bmdma_setup - Set up IDE DMA transaction 825 * @qc: Info associated with this ATA transaction. 826 * 827 * Note: Original code is ata_bmdma_setup(). 828 */ 829 830static void bfin_bmdma_setup(struct ata_queued_cmd *qc) 831{ 832 unsigned short config = WDSIZE_16; 833 struct scatterlist *sg; 834 unsigned int si; 835 836 dev_dbg(qc->ap->dev, "in atapi dma setup\n"); 837 /* Program the ATA_CTRL register with dir */ 838 if (qc->tf.flags & ATA_TFLAG_WRITE) { 839 /* fill the ATAPI DMA controller */ 840 set_dma_config(CH_ATAPI_TX, config); 841 set_dma_x_modify(CH_ATAPI_TX, 2); 842 for_each_sg(qc->sg, sg, qc->n_elem, si) { 843 set_dma_start_addr(CH_ATAPI_TX, sg_dma_address(sg)); 844 set_dma_x_count(CH_ATAPI_TX, sg_dma_len(sg) >> 1); 845 } 846 } else { 847 config |= WNR; 848 /* fill the ATAPI DMA controller */ 849 set_dma_config(CH_ATAPI_RX, config); 850 set_dma_x_modify(CH_ATAPI_RX, 2); 851 for_each_sg(qc->sg, sg, qc->n_elem, si) { 852 set_dma_start_addr(CH_ATAPI_RX, sg_dma_address(sg)); 853 set_dma_x_count(CH_ATAPI_RX, sg_dma_len(sg) >> 1); 854 } 855 } 856} 857 858/** 859 * bfin_bmdma_start - Start an IDE DMA transaction 860 * @qc: Info associated with this ATA transaction. 861 * 862 * Note: Original code is ata_bmdma_start(). 863 */ 864 865static void bfin_bmdma_start(struct ata_queued_cmd *qc) 866{ 867 struct ata_port *ap = qc->ap; 868 void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr; 869 struct scatterlist *sg; 870 unsigned int si; 871 872 dev_dbg(qc->ap->dev, "in atapi dma start\n"); 873 if (!(ap->udma_mask || ap->mwdma_mask)) 874 return; 875 876 /* start ATAPI DMA controller*/ 877 if (qc->tf.flags & ATA_TFLAG_WRITE) { 878 /* 879 * On blackfin arch, uncacheable memory is not 880 * allocated with flag GFP_DMA. DMA buffer from 881 * common kenel code should be flushed if WB 882 * data cache is enabled. Otherwise, this loop 883 * is an empty loop and optimized out. 884 */ 885 for_each_sg(qc->sg, sg, qc->n_elem, si) { 886 flush_dcache_range(sg_dma_address(sg), 887 sg_dma_address(sg) + sg_dma_len(sg)); 888 } 889 enable_dma(CH_ATAPI_TX); 890 dev_dbg(qc->ap->dev, "enable udma write\n"); 891 892 /* Send ATA DMA write command */ 893 bfin_exec_command(ap, &qc->tf); 894 895 /* set ATA DMA write direction */ 896 ATAPI_SET_CONTROL(base, (ATAPI_GET_CONTROL(base) 897 | XFER_DIR)); 898 } else { 899 enable_dma(CH_ATAPI_RX); 900 dev_dbg(qc->ap->dev, "enable udma read\n"); 901 902 /* Send ATA DMA read command */ 903 bfin_exec_command(ap, &qc->tf); 904 905 /* set ATA DMA read direction */ 906 ATAPI_SET_CONTROL(base, (ATAPI_GET_CONTROL(base) 907 & ~XFER_DIR)); 908 } 909 910 /* Reset all transfer count */ 911 ATAPI_SET_CONTROL(base, ATAPI_GET_CONTROL(base) | TFRCNT_RST); 912 913 /* Set ATAPI state machine contorl in terminate sequence */ 914 ATAPI_SET_CONTROL(base, ATAPI_GET_CONTROL(base) | END_ON_TERM); 915 916 /* Set transfer length to buffer len */ 917 for_each_sg(qc->sg, sg, qc->n_elem, si) { 918 ATAPI_SET_XFER_LEN(base, (sg_dma_len(sg) >> 1)); 919 } 920 921 /* Enable ATA DMA operation*/ 922 if (ap->udma_mask) 923 ATAPI_SET_CONTROL(base, ATAPI_GET_CONTROL(base) 924 | ULTRA_START); 925 else 926 ATAPI_SET_CONTROL(base, ATAPI_GET_CONTROL(base) 927 | MULTI_START); 928} 929 930/** 931 * bfin_bmdma_stop - Stop IDE DMA transfer 932 * @qc: Command we are ending DMA for 933 */ 934 935static void bfin_bmdma_stop(struct ata_queued_cmd *qc) 936{ 937 struct ata_port *ap = qc->ap; 938 struct scatterlist *sg; 939 unsigned int si; 940 941 dev_dbg(qc->ap->dev, "in atapi dma stop\n"); 942 if (!(ap->udma_mask || ap->mwdma_mask)) 943 return; 944 945 /* stop ATAPI DMA controller*/ 946 if (qc->tf.flags & ATA_TFLAG_WRITE) 947 disable_dma(CH_ATAPI_TX); 948 else { 949 disable_dma(CH_ATAPI_RX); 950 if (ap->hsm_task_state & HSM_ST_LAST) { 951 /* 952 * On blackfin arch, uncacheable memory is not 953 * allocated with flag GFP_DMA. DMA buffer from 954 * common kenel code should be invalidated if 955 * data cache is enabled. Otherwise, this loop 956 * is an empty loop and optimized out. 957 */ 958 for_each_sg(qc->sg, sg, qc->n_elem, si) { 959 invalidate_dcache_range( 960 sg_dma_address(sg), 961 sg_dma_address(sg) 962 + sg_dma_len(sg)); 963 } 964 } 965 } 966} 967 968/** 969 * bfin_devchk - PATA device presence detection 970 * @ap: ATA channel to examine 971 * @device: Device to examine (starting at zero) 972 * 973 * Note: Original code is ata_devchk(). 974 */ 975 976static unsigned int bfin_devchk(struct ata_port *ap, 977 unsigned int device) 978{ 979 void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr; 980 u8 nsect, lbal; 981 982 bfin_dev_select(ap, device); 983 984 write_atapi_register(base, ATA_REG_NSECT, 0x55); 985 write_atapi_register(base, ATA_REG_LBAL, 0xaa); 986 987 write_atapi_register(base, ATA_REG_NSECT, 0xaa); 988 write_atapi_register(base, ATA_REG_LBAL, 0x55); 989 990 write_atapi_register(base, ATA_REG_NSECT, 0x55); 991 write_atapi_register(base, ATA_REG_LBAL, 0xaa); 992 993 nsect = read_atapi_register(base, ATA_REG_NSECT); 994 lbal = read_atapi_register(base, ATA_REG_LBAL); 995 996 if ((nsect == 0x55) && (lbal == 0xaa)) 997 return 1; /* we found a device */ 998 999 return 0; /* nothing found */ 1000} 1001 1002/** 1003 * bfin_bus_post_reset - PATA device post reset 1004 * 1005 * Note: Original code is ata_bus_post_reset(). 1006 */ 1007 1008static void bfin_bus_post_reset(struct ata_port *ap, unsigned int devmask) 1009{ 1010 void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr; 1011 unsigned int dev0 = devmask & (1 << 0); 1012 unsigned int dev1 = devmask & (1 << 1); 1013 unsigned long deadline; 1014 1015 /* if device 0 was found in ata_devchk, wait for its 1016 * BSY bit to clear 1017 */ 1018 if (dev0) 1019 ata_sff_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT); 1020 1021 /* if device 1 was found in ata_devchk, wait for 1022 * register access, then wait for BSY to clear 1023 */ 1024 deadline = ata_deadline(jiffies, ATA_TMOUT_BOOT); 1025 while (dev1) { 1026 u8 nsect, lbal; 1027 1028 bfin_dev_select(ap, 1); 1029 nsect = read_atapi_register(base, ATA_REG_NSECT); 1030 lbal = read_atapi_register(base, ATA_REG_LBAL); 1031 if ((nsect == 1) && (lbal == 1)) 1032 break; 1033 if (time_after(jiffies, deadline)) { 1034 dev1 = 0; 1035 break; 1036 } 1037 msleep(50); /* give drive a breather */ 1038 } 1039 if (dev1) 1040 ata_sff_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT); 1041 1042 /* is all this really necessary? */ 1043 bfin_dev_select(ap, 0); 1044 if (dev1) 1045 bfin_dev_select(ap, 1); 1046 if (dev0) 1047 bfin_dev_select(ap, 0); 1048} 1049 1050/** 1051 * bfin_bus_softreset - PATA device software reset 1052 * 1053 * Note: Original code is ata_bus_softreset(). 1054 */ 1055 1056static unsigned int bfin_bus_softreset(struct ata_port *ap, 1057 unsigned int devmask) 1058{ 1059 void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr; 1060 1061 /* software reset. causes dev0 to be selected */ 1062 write_atapi_register(base, ATA_REG_CTRL, ap->ctl); 1063 udelay(20); 1064 write_atapi_register(base, ATA_REG_CTRL, ap->ctl | ATA_SRST); 1065 udelay(20); 1066 write_atapi_register(base, ATA_REG_CTRL, ap->ctl); 1067 1068 /* spec mandates ">= 2ms" before checking status. 1069 * We wait 150ms, because that was the magic delay used for 1070 * ATAPI devices in Hale Landis's ATADRVR, for the period of time 1071 * between when the ATA command register is written, and then 1072 * status is checked. Because waiting for "a while" before 1073 * checking status is fine, post SRST, we perform this magic 1074 * delay here as well. 1075 * 1076 * Old drivers/ide uses the 2mS rule and then waits for ready 1077 */ 1078 msleep(150); 1079 1080 /* Before we perform post reset processing we want to see if 1081 * the bus shows 0xFF because the odd clown forgets the D7 1082 * pulldown resistor. 1083 */ 1084 if (bfin_check_status(ap) == 0xFF) 1085 return 0; 1086 1087 bfin_bus_post_reset(ap, devmask); 1088 1089 return 0; 1090} 1091 1092/** 1093 * bfin_softreset - reset host port via ATA SRST 1094 * @ap: port to reset 1095 * @classes: resulting classes of attached devices 1096 * 1097 * Note: Original code is ata_sff_softreset(). 1098 */ 1099 1100static int bfin_softreset(struct ata_link *link, unsigned int *classes, 1101 unsigned long deadline) 1102{ 1103 struct ata_port *ap = link->ap; 1104 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS; 1105 unsigned int devmask = 0, err_mask; 1106 u8 err; 1107 1108 /* determine if device 0/1 are present */ 1109 if (bfin_devchk(ap, 0)) 1110 devmask |= (1 << 0); 1111 if (slave_possible && bfin_devchk(ap, 1)) 1112 devmask |= (1 << 1); 1113 1114 /* select device 0 again */ 1115 bfin_dev_select(ap, 0); 1116 1117 /* issue bus reset */ 1118 err_mask = bfin_bus_softreset(ap, devmask); 1119 if (err_mask) { 1120 ata_port_printk(ap, KERN_ERR, "SRST failed (err_mask=0x%x)\n", 1121 err_mask); 1122 return -EIO; 1123 } 1124 1125 /* determine by signature whether we have ATA or ATAPI devices */ 1126 classes[0] = ata_sff_dev_classify(&ap->link.device[0], 1127 devmask & (1 << 0), &err); 1128 if (slave_possible && err != 0x81) 1129 classes[1] = ata_sff_dev_classify(&ap->link.device[1], 1130 devmask & (1 << 1), &err); 1131 1132 return 0; 1133} 1134 1135/** 1136 * bfin_bmdma_status - Read IDE DMA status 1137 * @ap: Port associated with this ATA transaction. 1138 */ 1139 1140static unsigned char bfin_bmdma_status(struct ata_port *ap) 1141{ 1142 unsigned char host_stat = 0; 1143 void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr; 1144 unsigned short int_status = ATAPI_GET_INT_STATUS(base); 1145 1146 if (ATAPI_GET_STATUS(base) & (MULTI_XFER_ON|ULTRA_XFER_ON)) 1147 host_stat |= ATA_DMA_ACTIVE; 1148 if (int_status & (MULTI_DONE_INT|UDMAIN_DONE_INT|UDMAOUT_DONE_INT| 1149 ATAPI_DEV_INT)) 1150 host_stat |= ATA_DMA_INTR; 1151 if (int_status & (MULTI_TERM_INT|UDMAIN_TERM_INT|UDMAOUT_TERM_INT)) 1152 host_stat |= ATA_DMA_ERR|ATA_DMA_INTR; 1153 1154 dev_dbg(ap->dev, "ATAPI: host_stat=0x%x\n", host_stat); 1155 1156 return host_stat; 1157} 1158 1159/** 1160 * bfin_data_xfer - Transfer data by PIO 1161 * @adev: device for this I/O 1162 * @buf: data buffer 1163 * @buflen: buffer length 1164 * @write_data: read/write 1165 * 1166 * Note: Original code is ata_sff_data_xfer(). 1167 */ 1168 1169static unsigned int bfin_data_xfer(struct ata_device *dev, unsigned char *buf, 1170 unsigned int buflen, int rw) 1171{ 1172 struct ata_port *ap = dev->link->ap; 1173 void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr; 1174 unsigned int words = buflen >> 1; 1175 unsigned short *buf16 = (u16 *)buf; 1176 1177 /* Transfer multiple of 2 bytes */ 1178 if (rw == READ) 1179 read_atapi_data(base, words, buf16); 1180 else 1181 write_atapi_data(base, words, buf16); 1182 1183 /* Transfer trailing 1 byte, if any. */ 1184 if (unlikely(buflen & 0x01)) { 1185 unsigned short align_buf[1] = { 0 }; 1186 unsigned char *trailing_buf = buf + buflen - 1; 1187 1188 if (rw == READ) { 1189 read_atapi_data(base, 1, align_buf); 1190 memcpy(trailing_buf, align_buf, 1); 1191 } else { 1192 memcpy(align_buf, trailing_buf, 1); 1193 write_atapi_data(base, 1, align_buf); 1194 } 1195 words++; 1196 } 1197 1198 return words << 1; 1199} 1200 1201/** 1202 * bfin_irq_clear - Clear ATAPI interrupt. 1203 * @ap: Port associated with this ATA transaction. 1204 * 1205 * Note: Original code is ata_sff_irq_clear(). 1206 */ 1207 1208static void bfin_irq_clear(struct ata_port *ap) 1209{ 1210 void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr; 1211 1212 dev_dbg(ap->dev, "in atapi irq clear\n"); 1213 ATAPI_SET_INT_STATUS(base, ATAPI_GET_INT_STATUS(base)|ATAPI_DEV_INT 1214 | MULTI_DONE_INT | UDMAIN_DONE_INT | UDMAOUT_DONE_INT 1215 | MULTI_TERM_INT | UDMAIN_TERM_INT | UDMAOUT_TERM_INT); 1216} 1217 1218/** 1219 * bfin_irq_on - Enable interrupts on a port. 1220 * @ap: Port on which interrupts are enabled. 1221 * 1222 * Note: Original code is ata_sff_irq_on(). 1223 */ 1224 1225static unsigned char bfin_irq_on(struct ata_port *ap) 1226{ 1227 void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr; 1228 u8 tmp; 1229 1230 dev_dbg(ap->dev, "in atapi irq on\n"); 1231 ap->ctl &= ~ATA_NIEN; 1232 ap->last_ctl = ap->ctl; 1233 1234 write_atapi_register(base, ATA_REG_CTRL, ap->ctl); 1235 tmp = ata_wait_idle(ap); 1236 1237 bfin_irq_clear(ap); 1238 1239 return tmp; 1240} 1241 1242/** 1243 * bfin_freeze - Freeze DMA controller port 1244 * @ap: port to freeze 1245 * 1246 * Note: Original code is ata_sff_freeze(). 1247 */ 1248 1249static void bfin_freeze(struct ata_port *ap) 1250{ 1251 void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr; 1252 1253 dev_dbg(ap->dev, "in atapi dma freeze\n"); 1254 ap->ctl |= ATA_NIEN; 1255 ap->last_ctl = ap->ctl; 1256 1257 write_atapi_register(base, ATA_REG_CTRL, ap->ctl); 1258 1259 /* Under certain circumstances, some controllers raise IRQ on 1260 * ATA_NIEN manipulation. Also, many controllers fail to mask 1261 * previously pending IRQ on ATA_NIEN assertion. Clear it. 1262 */ 1263 ap->ops->sff_check_status(ap); 1264 1265 bfin_irq_clear(ap); 1266} 1267 1268/** 1269 * bfin_thaw - Thaw DMA controller port 1270 * @ap: port to thaw 1271 * 1272 * Note: Original code is ata_sff_thaw(). 1273 */ 1274 1275void bfin_thaw(struct ata_port *ap) 1276{ 1277 dev_dbg(ap->dev, "in atapi dma thaw\n"); 1278 bfin_check_status(ap); 1279 bfin_irq_on(ap); 1280} 1281 1282/** 1283 * bfin_postreset - standard postreset callback 1284 * @ap: the target ata_port 1285 * @classes: classes of attached devices 1286 * 1287 * Note: Original code is ata_sff_postreset(). 1288 */ 1289 1290static void bfin_postreset(struct ata_link *link, unsigned int *classes) 1291{ 1292 struct ata_port *ap = link->ap; 1293 void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr; 1294 1295 /* re-enable interrupts */ 1296 bfin_irq_on(ap); 1297 1298 /* is double-select really necessary? */ 1299 if (classes[0] != ATA_DEV_NONE) 1300 bfin_dev_select(ap, 1); 1301 if (classes[1] != ATA_DEV_NONE) 1302 bfin_dev_select(ap, 0); 1303 1304 /* bail out if no device is present */ 1305 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) { 1306 return; 1307 } 1308 1309 /* set up device control */ 1310 write_atapi_register(base, ATA_REG_CTRL, ap->ctl); 1311} 1312 1313static void bfin_port_stop(struct ata_port *ap) 1314{ 1315 dev_dbg(ap->dev, "in atapi port stop\n"); 1316 if (ap->udma_mask != 0 || ap->mwdma_mask != 0) { 1317 free_dma(CH_ATAPI_RX); 1318 free_dma(CH_ATAPI_TX); 1319 } 1320} 1321 1322static int bfin_port_start(struct ata_port *ap) 1323{ 1324 dev_dbg(ap->dev, "in atapi port start\n"); 1325 if (!(ap->udma_mask || ap->mwdma_mask)) 1326 return 0; 1327 1328 if (request_dma(CH_ATAPI_RX, "BFIN ATAPI RX DMA") >= 0) { 1329 if (request_dma(CH_ATAPI_TX, 1330 "BFIN ATAPI TX DMA") >= 0) 1331 return 0; 1332 1333 free_dma(CH_ATAPI_RX); 1334 } 1335 1336 ap->udma_mask = 0; 1337 ap->mwdma_mask = 0; 1338 dev_err(ap->dev, "Unable to request ATAPI DMA!" 1339 " Continue in PIO mode.\n"); 1340 1341 return 0; 1342} 1343 1344static unsigned int bfin_ata_host_intr(struct ata_port *ap, 1345 struct ata_queued_cmd *qc) 1346{ 1347 struct ata_eh_info *ehi = &ap->link.eh_info; 1348 u8 status, host_stat = 0; 1349 1350 VPRINTK("ata%u: protocol %d task_state %d\n", 1351 ap->print_id, qc->tf.protocol, ap->hsm_task_state); 1352 1353 /* Check whether we are expecting interrupt in this state */ 1354 switch (ap->hsm_task_state) { 1355 case HSM_ST_FIRST: 1356 /* Some pre-ATAPI-4 devices assert INTRQ 1357 * at this state when ready to receive CDB. 1358 */ 1359 1360 /* Check the ATA_DFLAG_CDB_INTR flag is enough here. 1361 * The flag was turned on only for atapi devices. 1362 * No need to check is_atapi_taskfile(&qc->tf) again. 1363 */ 1364 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) 1365 goto idle_irq; 1366 break; 1367 case HSM_ST_LAST: 1368 if (qc->tf.protocol == ATA_PROT_DMA || 1369 qc->tf.protocol == ATAPI_PROT_DMA) { 1370 /* check status of DMA engine */ 1371 host_stat = ap->ops->bmdma_status(ap); 1372 VPRINTK("ata%u: host_stat 0x%X\n", 1373 ap->print_id, host_stat); 1374 1375 /* if it's not our irq... */ 1376 if (!(host_stat & ATA_DMA_INTR)) 1377 goto idle_irq; 1378 1379 /* before we do anything else, clear DMA-Start bit */ 1380 ap->ops->bmdma_stop(qc); 1381 1382 if (unlikely(host_stat & ATA_DMA_ERR)) { 1383 /* error when transfering data to/from memory */ 1384 qc->err_mask |= AC_ERR_HOST_BUS; 1385 ap->hsm_task_state = HSM_ST_ERR; 1386 } 1387 } 1388 break; 1389 case HSM_ST: 1390 break; 1391 default: 1392 goto idle_irq; 1393 } 1394 1395 /* check altstatus */ 1396 status = ap->ops->sff_check_altstatus(ap); 1397 if (status & ATA_BUSY) 1398 goto busy_ata; 1399 1400 /* check main status, clearing INTRQ */ 1401 status = ap->ops->sff_check_status(ap); 1402 if (unlikely(status & ATA_BUSY)) 1403 goto busy_ata; 1404 1405 /* ack bmdma irq events */ 1406 ap->ops->sff_irq_clear(ap); 1407 1408 ata_sff_hsm_move(ap, qc, status, 0); 1409 1410 if (unlikely(qc->err_mask) && (qc->tf.protocol == ATA_PROT_DMA || 1411 qc->tf.protocol == ATAPI_PROT_DMA)) 1412 ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat); 1413 1414busy_ata: 1415 return 1; /* irq handled */ 1416 1417idle_irq: 1418 ap->stats.idle_irq++; 1419 1420#ifdef ATA_IRQ_TRAP 1421 if ((ap->stats.idle_irq % 1000) == 0) { 1422 ap->ops->irq_ack(ap, 0); /* debug trap */ 1423 ata_port_printk(ap, KERN_WARNING, "irq trap\n"); 1424 return 1; 1425 } 1426#endif 1427 return 0; /* irq not handled */ 1428} 1429 1430static irqreturn_t bfin_ata_interrupt(int irq, void *dev_instance) 1431{ 1432 struct ata_host *host = dev_instance; 1433 unsigned int i; 1434 unsigned int handled = 0; 1435 unsigned long flags; 1436 1437 /* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */ 1438 spin_lock_irqsave(&host->lock, flags); 1439 1440 for (i = 0; i < host->n_ports; i++) { 1441 struct ata_port *ap; 1442 1443 ap = host->ports[i]; 1444 if (ap && 1445 !(ap->flags & ATA_FLAG_DISABLED)) { 1446 struct ata_queued_cmd *qc; 1447 1448 qc = ata_qc_from_tag(ap, ap->link.active_tag); 1449 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)) && 1450 (qc->flags & ATA_QCFLAG_ACTIVE)) 1451 handled |= bfin_ata_host_intr(ap, qc); 1452 } 1453 } 1454 1455 spin_unlock_irqrestore(&host->lock, flags); 1456 1457 return IRQ_RETVAL(handled); 1458} 1459 1460 1461static struct scsi_host_template bfin_sht = { 1462 ATA_BASE_SHT(DRV_NAME), 1463 .sg_tablesize = SG_NONE, 1464 .dma_boundary = ATA_DMA_BOUNDARY, 1465}; 1466 1467static struct ata_port_operations bfin_pata_ops = { 1468 .inherits = &ata_sff_port_ops, 1469 1470 .set_piomode = bfin_set_piomode, 1471 .set_dmamode = bfin_set_dmamode, 1472 1473 .sff_tf_load = bfin_tf_load, 1474 .sff_tf_read = bfin_tf_read, 1475 .sff_exec_command = bfin_exec_command, 1476 .sff_check_status = bfin_check_status, 1477 .sff_check_altstatus = bfin_check_altstatus, 1478 .sff_dev_select = bfin_dev_select, 1479 1480 .bmdma_setup = bfin_bmdma_setup, 1481 .bmdma_start = bfin_bmdma_start, 1482 .bmdma_stop = bfin_bmdma_stop, 1483 .bmdma_status = bfin_bmdma_status, 1484 .sff_data_xfer = bfin_data_xfer, 1485 1486 .qc_prep = ata_noop_qc_prep, 1487 1488 .freeze = bfin_freeze, 1489 .thaw = bfin_thaw, 1490 .softreset = bfin_softreset, 1491 .postreset = bfin_postreset, 1492 1493 .sff_irq_clear = bfin_irq_clear, 1494 .sff_irq_on = bfin_irq_on, 1495 1496 .port_start = bfin_port_start, 1497 .port_stop = bfin_port_stop, 1498}; 1499 1500static struct ata_port_info bfin_port_info[] = { 1501 { 1502 .flags = ATA_FLAG_SLAVE_POSS 1503 | ATA_FLAG_MMIO 1504 | ATA_FLAG_NO_LEGACY, 1505 .pio_mask = ATA_PIO4, 1506 .mwdma_mask = 0, 1507 .udma_mask = 0, 1508 .port_ops = &bfin_pata_ops, 1509 }, 1510}; 1511 1512/** 1513 * bfin_reset_controller - initialize BF54x ATAPI controller. 1514 */ 1515 1516static int bfin_reset_controller(struct ata_host *host) 1517{ 1518 void __iomem *base = (void __iomem *)host->ports[0]->ioaddr.ctl_addr; 1519 int count; 1520 unsigned short status; 1521 1522 /* Disable all ATAPI interrupts */ 1523 ATAPI_SET_INT_MASK(base, 0); 1524 SSYNC(); 1525 1526 /* Assert the RESET signal 25us*/ 1527 ATAPI_SET_CONTROL(base, ATAPI_GET_CONTROL(base) | DEV_RST); 1528 udelay(30); 1529 1530 /* Negate the RESET signal for 2ms*/ 1531 ATAPI_SET_CONTROL(base, ATAPI_GET_CONTROL(base) & ~DEV_RST); 1532 msleep(2); 1533 1534 /* Wait on Busy flag to clear */ 1535 count = 10000000; 1536 do { 1537 status = read_atapi_register(base, ATA_REG_STATUS); 1538 } while (--count && (status & ATA_BUSY)); 1539 1540 /* Enable only ATAPI Device interrupt */ 1541 ATAPI_SET_INT_MASK(base, 1); 1542 SSYNC(); 1543 1544 return (!count); 1545} 1546 1547/** 1548 * atapi_io_port - define atapi peripheral port pins. 1549 */ 1550static unsigned short atapi_io_port[] = { 1551 P_ATAPI_RESET, 1552 P_ATAPI_DIOR, 1553 P_ATAPI_DIOW, 1554 P_ATAPI_CS0, 1555 P_ATAPI_CS1, 1556 P_ATAPI_DMACK, 1557 P_ATAPI_DMARQ, 1558 P_ATAPI_INTRQ, 1559 P_ATAPI_IORDY, 1560 P_ATAPI_D0A, 1561 P_ATAPI_D1A, 1562 P_ATAPI_D2A, 1563 P_ATAPI_D3A, 1564 P_ATAPI_D4A, 1565 P_ATAPI_D5A, 1566 P_ATAPI_D6A, 1567 P_ATAPI_D7A, 1568 P_ATAPI_D8A, 1569 P_ATAPI_D9A, 1570 P_ATAPI_D10A, 1571 P_ATAPI_D11A, 1572 P_ATAPI_D12A, 1573 P_ATAPI_D13A, 1574 P_ATAPI_D14A, 1575 P_ATAPI_D15A, 1576 P_ATAPI_A0A, 1577 P_ATAPI_A1A, 1578 P_ATAPI_A2A, 1579 0 1580}; 1581 1582/** 1583 * bfin_atapi_probe - attach a bfin atapi interface 1584 * @pdev: platform device 1585 * 1586 * Register a bfin atapi interface. 1587 * 1588 * 1589 * Platform devices are expected to contain 2 resources per port: 1590 * 1591 * - I/O Base (IORESOURCE_IO) 1592 * - IRQ (IORESOURCE_IRQ) 1593 * 1594 */ 1595static int __devinit bfin_atapi_probe(struct platform_device *pdev) 1596{ 1597 int board_idx = 0; 1598 struct resource *res; 1599 struct ata_host *host; 1600 unsigned int fsclk = get_sclk(); 1601 int udma_mode = 5; 1602 const struct ata_port_info *ppi[] = 1603 { &bfin_port_info[board_idx], NULL }; 1604 1605 /* 1606 * Simple resource validation .. 1607 */ 1608 if (unlikely(pdev->num_resources != 2)) { 1609 dev_err(&pdev->dev, "invalid number of resources\n"); 1610 return -EINVAL; 1611 } 1612 1613 /* 1614 * Get the register base first 1615 */ 1616 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1617 if (res == NULL) 1618 return -EINVAL; 1619 1620 while (bfin_port_info[board_idx].udma_mask > 0 && 1621 udma_fsclk[udma_mode] > fsclk) { 1622 udma_mode--; 1623 bfin_port_info[board_idx].udma_mask >>= 1; 1624 } 1625 1626 /* 1627 * Now that that's out of the way, wire up the port.. 1628 */ 1629 host = ata_host_alloc_pinfo(&pdev->dev, ppi, 1); 1630 if (!host) 1631 return -ENOMEM; 1632 1633 host->ports[0]->ioaddr.ctl_addr = (void *)res->start; 1634 1635 if (peripheral_request_list(atapi_io_port, "atapi-io-port")) { 1636 dev_err(&pdev->dev, "Requesting Peripherals faild\n"); 1637 return -EFAULT; 1638 } 1639 1640 if (bfin_reset_controller(host)) { 1641 peripheral_free_list(atapi_io_port); 1642 dev_err(&pdev->dev, "Fail to reset ATAPI device\n"); 1643 return -EFAULT; 1644 } 1645 1646 if (ata_host_activate(host, platform_get_irq(pdev, 0), 1647 bfin_ata_interrupt, IRQF_SHARED, &bfin_sht) != 0) { 1648 peripheral_free_list(atapi_io_port); 1649 dev_err(&pdev->dev, "Fail to attach ATAPI device\n"); 1650 return -ENODEV; 1651 } 1652 1653 dev_set_drvdata(&pdev->dev, host); 1654 1655 return 0; 1656} 1657 1658/** 1659 * bfin_atapi_remove - unplug a bfin atapi interface 1660 * @pdev: platform device 1661 * 1662 * A bfin atapi device has been unplugged. Perform the needed 1663 * cleanup. Also called on module unload for any active devices. 1664 */ 1665static int __devexit bfin_atapi_remove(struct platform_device *pdev) 1666{ 1667 struct device *dev = &pdev->dev; 1668 struct ata_host *host = dev_get_drvdata(dev); 1669 1670 ata_host_detach(host); 1671 dev_set_drvdata(&pdev->dev, NULL); 1672 1673 peripheral_free_list(atapi_io_port); 1674 1675 return 0; 1676} 1677 1678#ifdef CONFIG_PM 1679static int bfin_atapi_suspend(struct platform_device *pdev, pm_message_t state) 1680{ 1681 struct ata_host *host = dev_get_drvdata(&pdev->dev); 1682 if (host) 1683 return ata_host_suspend(host, state); 1684 else 1685 return 0; 1686} 1687 1688static int bfin_atapi_resume(struct platform_device *pdev) 1689{ 1690 struct ata_host *host = dev_get_drvdata(&pdev->dev); 1691 int ret; 1692 1693 if (host) { 1694 ret = bfin_reset_controller(host); 1695 if (ret) { 1696 printk(KERN_ERR DRV_NAME ": Error during HW init\n"); 1697 return ret; 1698 } 1699 ata_host_resume(host); 1700 } 1701 1702 return 0; 1703} 1704#else 1705#define bfin_atapi_suspend NULL 1706#define bfin_atapi_resume NULL 1707#endif 1708 1709static struct platform_driver bfin_atapi_driver = { 1710 .probe = bfin_atapi_probe, 1711 .remove = __devexit_p(bfin_atapi_remove), 1712 .suspend = bfin_atapi_suspend, 1713 .resume = bfin_atapi_resume, 1714 .driver = { 1715 .name = DRV_NAME, 1716 .owner = THIS_MODULE, 1717 }, 1718}; 1719 1720#define ATAPI_MODE_SIZE 10 1721static char bfin_atapi_mode[ATAPI_MODE_SIZE]; 1722 1723static int __init bfin_atapi_init(void) 1724{ 1725 pr_info("register bfin atapi driver\n"); 1726 1727 switch(bfin_atapi_mode[0]) { 1728 case 'p': 1729 case 'P': 1730 break; 1731 case 'm': 1732 case 'M': 1733 bfin_port_info[0].mwdma_mask = ATA_MWDMA2; 1734 break; 1735 default: 1736 bfin_port_info[0].udma_mask = ATA_UDMA5; 1737 }; 1738 1739 return platform_driver_register(&bfin_atapi_driver); 1740} 1741 1742static void __exit bfin_atapi_exit(void) 1743{ 1744 platform_driver_unregister(&bfin_atapi_driver); 1745} 1746 1747module_init(bfin_atapi_init); 1748module_exit(bfin_atapi_exit); 1749/* 1750 * ATAPI mode: 1751 * pio/PIO 1752 * udma/UDMA (default) 1753 * mwdma/MWDMA 1754 */ 1755module_param_string(bfin_atapi_mode, bfin_atapi_mode, ATAPI_MODE_SIZE, 0); 1756 1757MODULE_AUTHOR("Sonic Zhang <sonic.zhang@analog.com>"); 1758MODULE_DESCRIPTION("PATA driver for blackfin 54x ATAPI controller"); 1759MODULE_LICENSE("GPL"); 1760MODULE_VERSION(DRV_VERSION); 1761MODULE_ALIAS("platform:" DRV_NAME);