Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/drzeus/mmc

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/drzeus/mmc: (68 commits)
sdio_uart: Fix SDIO break control to now return success or an error
mmc: host driver for Ricoh Bay1Controllers
sdio: sdio_io.c Fix sparse warnings
sdio: fix the use of hard coded timeout value.
mmc: OLPC: update vdd/powerup quirk comment
mmc: fix spares errors of sdhci.c
mmc: remove multiwrite capability
wbsd: fix bad dma_addr_t conversion
atmel-mci: Driver for Atmel on-chip MMC controllers
mmc: fix sdio_io sparse errors
mmc: wbsd.c fix shadowing of 'dma' variable
MMC: S3C24XX: Refuse incorrectly aligned transfers
MMC: S3C24XX: Add maintainer entry
MMC: S3C24XX: Update error debugging.
MMC: S3C24XX: Add media presence test to request handling.
MMC: S3C24XX: Fix use of msecs where jiffies are needed
MMC: S3C24XX: Add MODULE_ALIAS() entries for the platform devices
MMC: S3C24XX: Fix s3c2410_dma_request() return code check.
MMC: S3C24XX: Allow card-detect on non-IRQ capable pin
MMC: S3C24XX: Ensure host->mrq->data is valid
...

Manually fixed up bogus executable bits on drivers/mmc/core/sdio_io.c
and include/linux/mmc/sdio_func.h when merging.

+6086 -1352
+16 -1
MAINTAINERS
··· 348 348 S: Maintained 349 349 350 350 ALCHEMY AU1XX0 MMC DRIVER 351 - S: Orphan 351 + P: Manuel Lauss 352 + M: manuel.lauss@gmail.com 353 + S: Maintained 352 354 353 355 ALI1563 I2C DRIVER 354 356 P: Rudolf Marek ··· 3561 3559 W: http://www.ibm.com/developerworks/linux/linux390/ 3562 3560 S: Supported 3563 3561 3562 + S3C24XX SD/MMC Driver 3563 + P: Ben Dooks 3564 + M: ben-linux@fluff.org 3565 + L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only) 3566 + L: linux-kernel@vger.kernel.org 3567 + S: Supported 3568 + 3564 3569 SAA7146 VIDEO4LINUX-2 DRIVER 3565 3570 P: Michael Hunold 3566 3571 M: michael@mihu.de ··· 3638 3629 SCx200 HRT CLOCKSOURCE DRIVER 3639 3630 P: Jim Cromie 3640 3631 M: jim.cromie@gmail.com 3632 + S: Maintained 3633 + 3634 + SDRICOH_CS MMC/SD HOST CONTROLLER INTERFACE DRIVER 3635 + P: Sascha Sommer 3636 + M: saschasommer@freenet.de 3637 + L: sdricohcs-devel@lists.sourceforge.net (subscribers-only) 3641 3638 S: Maintained 3642 3639 3643 3640 SECURITY CONTACT
+7
arch/avr32/boards/atngw100/setup.c
··· 19 19 #include <linux/leds.h> 20 20 #include <linux/spi/spi.h> 21 21 22 + #include <asm/atmel-mci.h> 22 23 #include <asm/io.h> 23 24 #include <asm/setup.h> 24 25 ··· 50 49 .max_speed_hz = 10000000, 51 50 .chip_select = 0, 52 51 }, 52 + }; 53 + 54 + static struct mci_platform_data __initdata mci0_data = { 55 + .detect_pin = GPIO_PIN_PC(25), 56 + .wp_pin = GPIO_PIN_PE(0), 53 57 }; 54 58 55 59 /* ··· 176 170 set_hw_addr(at32_add_device_eth(1, &eth_data[1])); 177 171 178 172 at32_add_device_spi(0, spi0_board_info, ARRAY_SIZE(spi0_board_info)); 173 + at32_add_device_mci(0, &mci0_data); 179 174 at32_add_device_usba(0, NULL); 180 175 181 176 for (i = 0; i < ARRAY_SIZE(ngw_leds); i++) {
+3
arch/avr32/boards/atstk1000/atstk1002.c
··· 234 234 #ifdef CONFIG_BOARD_ATSTK100X_SPI1 235 235 at32_add_device_spi(1, spi1_board_info, ARRAY_SIZE(spi1_board_info)); 236 236 #endif 237 + #ifndef CONFIG_BOARD_ATSTK1002_SW2_CUSTOM 238 + at32_add_device_mci(0, NULL); 239 + #endif 237 240 #ifdef CONFIG_BOARD_ATSTK1002_SW5_CUSTOM 238 241 set_hw_addr(at32_add_device_eth(1, &eth_data[1])); 239 242 #else
+25 -5
arch/avr32/mach-at32ap/at32ap700x.c
··· 14 14 #include <linux/spi/spi.h> 15 15 #include <linux/usb/atmel_usba_udc.h> 16 16 17 + #include <asm/atmel-mci.h> 17 18 #include <asm/io.h> 18 19 #include <asm/irq.h> 19 20 ··· 1279 1278 .index = 9, 1280 1279 }; 1281 1280 1282 - struct platform_device *__init at32_add_device_mci(unsigned int id) 1281 + struct platform_device *__init 1282 + at32_add_device_mci(unsigned int id, struct mci_platform_data *data) 1283 1283 { 1284 - struct platform_device *pdev; 1284 + struct mci_platform_data _data; 1285 + struct platform_device *pdev; 1286 + struct dw_dma_slave *dws; 1285 1287 1286 1288 if (id != 0) 1287 1289 return NULL; 1288 1290 1289 1291 pdev = platform_device_alloc("atmel_mci", id); 1290 1292 if (!pdev) 1291 - return NULL; 1293 + goto fail; 1292 1294 1293 1295 if (platform_device_add_resources(pdev, atmel_mci0_resource, 1294 1296 ARRAY_SIZE(atmel_mci0_resource))) 1295 - goto err_add_resources; 1297 + goto fail; 1298 + 1299 + if (!data) { 1300 + data = &_data; 1301 + memset(data, 0, sizeof(struct mci_platform_data)); 1302 + } 1303 + 1304 + if (platform_device_add_data(pdev, data, 1305 + sizeof(struct mci_platform_data))) 1306 + goto fail; 1296 1307 1297 1308 select_peripheral(PA(10), PERIPH_A, 0); /* CLK */ 1298 1309 select_peripheral(PA(11), PERIPH_A, 0); /* CMD */ ··· 1313 1300 select_peripheral(PA(14), PERIPH_A, 0); /* DATA2 */ 1314 1301 select_peripheral(PA(15), PERIPH_A, 0); /* DATA3 */ 1315 1302 1303 + if (data) { 1304 + if (data->detect_pin != GPIO_PIN_NONE) 1305 + at32_select_gpio(data->detect_pin, 0); 1306 + if (data->wp_pin != GPIO_PIN_NONE) 1307 + at32_select_gpio(data->wp_pin, 0); 1308 + } 1309 + 1316 1310 atmel_mci0_pclk.dev = &pdev->dev; 1317 1311 1318 1312 platform_device_add(pdev); 1319 1313 return pdev; 1320 1314 1321 - err_add_resources: 1315 + fail: 1322 1316 platform_device_put(pdev); 1323 1317 return NULL; 1324 1318 }
+27 -31
drivers/mmc/card/block.c
··· 2 2 * Block driver for media (i.e., flash cards) 3 3 * 4 4 * Copyright 2002 Hewlett-Packard Company 5 - * Copyright 2005-2007 Pierre Ossman 5 + * Copyright 2005-2008 Pierre Ossman 6 6 * 7 7 * Use consistent with the GNU GPL is permitted, 8 8 * provided that this copyright notice is ··· 237 237 if (brq.data.blocks > card->host->max_blk_count) 238 238 brq.data.blocks = card->host->max_blk_count; 239 239 240 - /* 241 - * If the host doesn't support multiple block writes, force 242 - * block writes to single block. SD cards are excepted from 243 - * this rule as they support querying the number of 244 - * successfully written sectors. 245 - */ 246 - if (rq_data_dir(req) != READ && 247 - !(card->host->caps & MMC_CAP_MULTIWRITE) && 248 - !mmc_card_sd(card)) 249 - brq.data.blocks = 1; 250 - 251 240 if (brq.data.blocks > 1) { 252 241 /* SPI multiblock writes terminate using a special 253 242 * token, not a STOP_TRANSMISSION request. ··· 285 296 286 297 mmc_queue_bounce_post(mq); 287 298 299 + /* 300 + * Check for errors here, but don't jump to cmd_err 301 + * until later as we need to wait for the card to leave 302 + * programming mode even when things go wrong. 303 + */ 288 304 if (brq.cmd.error) { 289 305 printk(KERN_ERR "%s: error %d sending read/write command\n", 290 306 req->rq_disk->disk_name, brq.cmd.error); 291 - goto cmd_err; 292 307 } 293 308 294 309 if (brq.data.error) { 295 310 printk(KERN_ERR "%s: error %d transferring data\n", 296 311 req->rq_disk->disk_name, brq.data.error); 297 - goto cmd_err; 298 312 } 299 313 300 314 if (brq.stop.error) { 301 315 printk(KERN_ERR "%s: error %d sending stop command\n", 302 316 req->rq_disk->disk_name, brq.stop.error); 303 - goto cmd_err; 304 317 } 305 318 306 319 if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ) { ··· 335 344 #endif 336 345 } 337 346 347 + if (brq.cmd.error || brq.data.error || brq.stop.error) 348 + goto cmd_err; 349 + 338 350 /* 339 351 * A block was successfully transferred. 340 352 */ ··· 356 362 * mark the known good sectors as ok. 357 363 * 358 364 * If the card is not SD, we can still ok written sectors 359 - * if the controller can do proper error reporting. 365 + * as reported by the controller (which might be less than 366 + * the real number of written sectors, but never more). 360 367 * 361 368 * For reads we just fail the entire chunk as that should 362 369 * be safe in all cases. 363 370 */ 364 - if (rq_data_dir(req) != READ && mmc_card_sd(card)) { 365 - u32 blocks; 366 - unsigned int bytes; 371 + if (rq_data_dir(req) != READ) { 372 + if (mmc_card_sd(card)) { 373 + u32 blocks; 374 + unsigned int bytes; 367 375 368 - blocks = mmc_sd_num_wr_blocks(card); 369 - if (blocks != (u32)-1) { 370 - if (card->csd.write_partial) 371 - bytes = blocks << md->block_bits; 372 - else 373 - bytes = blocks << 9; 376 + blocks = mmc_sd_num_wr_blocks(card); 377 + if (blocks != (u32)-1) { 378 + if (card->csd.write_partial) 379 + bytes = blocks << md->block_bits; 380 + else 381 + bytes = blocks << 9; 382 + spin_lock_irq(&md->lock); 383 + ret = __blk_end_request(req, 0, bytes); 384 + spin_unlock_irq(&md->lock); 385 + } 386 + } else { 374 387 spin_lock_irq(&md->lock); 375 - ret = __blk_end_request(req, 0, bytes); 388 + ret = __blk_end_request(req, 0, brq.data.bytes_xfered); 376 389 spin_unlock_irq(&md->lock); 377 390 } 378 - } else if (rq_data_dir(req) != READ && 379 - (card->host->caps & MMC_CAP_MULTIWRITE)) { 380 - spin_lock_irq(&md->lock); 381 - ret = __blk_end_request(req, 0, brq.data.bytes_xfered); 382 - spin_unlock_irq(&md->lock); 383 391 } 384 392 385 393 mmc_release_host(card->host);
+383 -200
drivers/mmc/card/mmc_test.c
··· 1 1 /* 2 2 * linux/drivers/mmc/card/mmc_test.c 3 3 * 4 - * Copyright 2007 Pierre Ossman 4 + * Copyright 2007-2008 Pierre Ossman 5 5 * 6 6 * This program is free software; you can redistribute it and/or modify 7 7 * it under the terms of the GNU General Public License as published by ··· 26 26 struct mmc_test_card { 27 27 struct mmc_card *card; 28 28 29 + u8 scratch[BUFFER_SIZE]; 29 30 u8 *buffer; 30 31 }; 31 32 32 33 /*******************************************************************/ 33 - /* Helper functions */ 34 + /* General helper functions */ 34 35 /*******************************************************************/ 35 36 37 + /* 38 + * Configure correct block size in card 39 + */ 36 40 static int mmc_test_set_blksize(struct mmc_test_card *test, unsigned size) 37 41 { 38 42 struct mmc_command cmd; ··· 52 48 return 0; 53 49 } 54 50 55 - static int __mmc_test_transfer(struct mmc_test_card *test, int write, 56 - unsigned broken_xfer, u8 *buffer, unsigned addr, 57 - unsigned blocks, unsigned blksz) 51 + /* 52 + * Fill in the mmc_request structure given a set of transfer parameters. 53 + */ 54 + static void mmc_test_prepare_mrq(struct mmc_test_card *test, 55 + struct mmc_request *mrq, struct scatterlist *sg, unsigned sg_len, 56 + unsigned dev_addr, unsigned blocks, unsigned blksz, int write) 57 + { 58 + BUG_ON(!mrq || !mrq->cmd || !mrq->data || !mrq->stop); 59 + 60 + if (blocks > 1) { 61 + mrq->cmd->opcode = write ? 62 + MMC_WRITE_MULTIPLE_BLOCK : MMC_READ_MULTIPLE_BLOCK; 63 + } else { 64 + mrq->cmd->opcode = write ? 65 + MMC_WRITE_BLOCK : MMC_READ_SINGLE_BLOCK; 66 + } 67 + 68 + mrq->cmd->arg = dev_addr; 69 + mrq->cmd->flags = MMC_RSP_R1 | MMC_CMD_ADTC; 70 + 71 + if (blocks == 1) 72 + mrq->stop = NULL; 73 + else { 74 + mrq->stop->opcode = MMC_STOP_TRANSMISSION; 75 + mrq->stop->arg = 0; 76 + mrq->stop->flags = MMC_RSP_R1B | MMC_CMD_AC; 77 + } 78 + 79 + mrq->data->blksz = blksz; 80 + mrq->data->blocks = blocks; 81 + mrq->data->flags = write ? MMC_DATA_WRITE : MMC_DATA_READ; 82 + mrq->data->sg = sg; 83 + mrq->data->sg_len = sg_len; 84 + 85 + mmc_set_data_timeout(mrq->data, test->card); 86 + } 87 + 88 + /* 89 + * Wait for the card to finish the busy state 90 + */ 91 + static int mmc_test_wait_busy(struct mmc_test_card *test) 58 92 { 59 93 int ret, busy; 60 - 61 - struct mmc_request mrq; 62 94 struct mmc_command cmd; 63 - struct mmc_command stop; 64 - struct mmc_data data; 65 - 66 - struct scatterlist sg; 67 - 68 - memset(&mrq, 0, sizeof(struct mmc_request)); 69 - 70 - mrq.cmd = &cmd; 71 - mrq.data = &data; 72 - 73 - memset(&cmd, 0, sizeof(struct mmc_command)); 74 - 75 - if (broken_xfer) { 76 - if (blocks > 1) { 77 - cmd.opcode = write ? 78 - MMC_WRITE_BLOCK : MMC_READ_SINGLE_BLOCK; 79 - } else { 80 - cmd.opcode = MMC_SEND_STATUS; 81 - } 82 - } else { 83 - if (blocks > 1) { 84 - cmd.opcode = write ? 85 - MMC_WRITE_MULTIPLE_BLOCK : MMC_READ_MULTIPLE_BLOCK; 86 - } else { 87 - cmd.opcode = write ? 88 - MMC_WRITE_BLOCK : MMC_READ_SINGLE_BLOCK; 89 - } 90 - } 91 - 92 - if (broken_xfer && blocks == 1) 93 - cmd.arg = test->card->rca << 16; 94 - else 95 - cmd.arg = addr; 96 - cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC; 97 - 98 - memset(&stop, 0, sizeof(struct mmc_command)); 99 - 100 - if (!broken_xfer && (blocks > 1)) { 101 - stop.opcode = MMC_STOP_TRANSMISSION; 102 - stop.arg = 0; 103 - stop.flags = MMC_RSP_R1B | MMC_CMD_AC; 104 - 105 - mrq.stop = &stop; 106 - } 107 - 108 - memset(&data, 0, sizeof(struct mmc_data)); 109 - 110 - data.blksz = blksz; 111 - data.blocks = blocks; 112 - data.flags = write ? MMC_DATA_WRITE : MMC_DATA_READ; 113 - data.sg = &sg; 114 - data.sg_len = 1; 115 - 116 - sg_init_one(&sg, buffer, blocks * blksz); 117 - 118 - mmc_set_data_timeout(&data, test->card); 119 - 120 - mmc_wait_for_req(test->card->host, &mrq); 121 - 122 - ret = 0; 123 - 124 - if (broken_xfer) { 125 - if (!ret && cmd.error) 126 - ret = cmd.error; 127 - if (!ret && data.error == 0) 128 - ret = RESULT_FAIL; 129 - if (!ret && data.error != -ETIMEDOUT) 130 - ret = data.error; 131 - if (!ret && stop.error) 132 - ret = stop.error; 133 - if (blocks > 1) { 134 - if (!ret && data.bytes_xfered > blksz) 135 - ret = RESULT_FAIL; 136 - } else { 137 - if (!ret && data.bytes_xfered > 0) 138 - ret = RESULT_FAIL; 139 - } 140 - } else { 141 - if (!ret && cmd.error) 142 - ret = cmd.error; 143 - if (!ret && data.error) 144 - ret = data.error; 145 - if (!ret && stop.error) 146 - ret = stop.error; 147 - if (!ret && data.bytes_xfered != blocks * blksz) 148 - ret = RESULT_FAIL; 149 - } 150 - 151 - if (ret == -EINVAL) 152 - ret = RESULT_UNSUP_HOST; 153 95 154 96 busy = 0; 155 97 do { 156 - int ret2; 157 - 158 98 memset(&cmd, 0, sizeof(struct mmc_command)); 159 99 160 100 cmd.opcode = MMC_SEND_STATUS; 161 101 cmd.arg = test->card->rca << 16; 162 102 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; 163 103 164 - ret2 = mmc_wait_for_cmd(test->card->host, &cmd, 0); 165 - if (ret2) 104 + ret = mmc_wait_for_cmd(test->card->host, &cmd, 0); 105 + if (ret) 166 106 break; 167 107 168 108 if (!busy && !(cmd.resp[0] & R1_READY_FOR_DATA)) { ··· 120 172 return ret; 121 173 } 122 174 123 - static int mmc_test_transfer(struct mmc_test_card *test, int write, 124 - u8 *buffer, unsigned addr, unsigned blocks, unsigned blksz) 175 + /* 176 + * Transfer a single sector of kernel addressable data 177 + */ 178 + static int mmc_test_buffer_transfer(struct mmc_test_card *test, 179 + u8 *buffer, unsigned addr, unsigned blksz, int write) 125 180 { 126 - return __mmc_test_transfer(test, write, 0, buffer, 127 - addr, blocks, blksz); 181 + int ret; 182 + 183 + struct mmc_request mrq; 184 + struct mmc_command cmd; 185 + struct mmc_command stop; 186 + struct mmc_data data; 187 + 188 + struct scatterlist sg; 189 + 190 + memset(&mrq, 0, sizeof(struct mmc_request)); 191 + memset(&cmd, 0, sizeof(struct mmc_command)); 192 + memset(&data, 0, sizeof(struct mmc_data)); 193 + memset(&stop, 0, sizeof(struct mmc_command)); 194 + 195 + mrq.cmd = &cmd; 196 + mrq.data = &data; 197 + mrq.stop = &stop; 198 + 199 + sg_init_one(&sg, buffer, blksz); 200 + 201 + mmc_test_prepare_mrq(test, &mrq, &sg, 1, addr, 1, blksz, write); 202 + 203 + mmc_wait_for_req(test->card->host, &mrq); 204 + 205 + if (cmd.error) 206 + return cmd.error; 207 + if (data.error) 208 + return data.error; 209 + 210 + ret = mmc_test_wait_busy(test); 211 + if (ret) 212 + return ret; 213 + 214 + return 0; 128 215 } 129 216 130 - static int mmc_test_prepare_verify(struct mmc_test_card *test, int write) 217 + /*******************************************************************/ 218 + /* Test preparation and cleanup */ 219 + /*******************************************************************/ 220 + 221 + /* 222 + * Fill the first couple of sectors of the card with known data 223 + * so that bad reads/writes can be detected 224 + */ 225 + static int __mmc_test_prepare(struct mmc_test_card *test, int write) 131 226 { 132 227 int ret, i; 133 228 ··· 179 188 return ret; 180 189 181 190 if (write) 182 - memset(test->buffer, 0xDF, BUFFER_SIZE); 191 + memset(test->buffer, 0xDF, 512); 183 192 else { 184 - for (i = 0;i < BUFFER_SIZE;i++) 193 + for (i = 0;i < 512;i++) 185 194 test->buffer[i] = i; 186 195 } 187 196 188 197 for (i = 0;i < BUFFER_SIZE / 512;i++) { 189 - ret = mmc_test_transfer(test, 1, test->buffer + i * 512, 190 - i * 512, 1, 512); 198 + ret = mmc_test_buffer_transfer(test, test->buffer, i * 512, 512, 1); 191 199 if (ret) 192 200 return ret; 193 201 } ··· 194 204 return 0; 195 205 } 196 206 197 - static int mmc_test_prepare_verify_write(struct mmc_test_card *test) 207 + static int mmc_test_prepare_write(struct mmc_test_card *test) 198 208 { 199 - return mmc_test_prepare_verify(test, 1); 209 + return __mmc_test_prepare(test, 1); 200 210 } 201 211 202 - static int mmc_test_prepare_verify_read(struct mmc_test_card *test) 212 + static int mmc_test_prepare_read(struct mmc_test_card *test) 203 213 { 204 - return mmc_test_prepare_verify(test, 0); 214 + return __mmc_test_prepare(test, 0); 205 215 } 206 216 207 - static int mmc_test_verified_transfer(struct mmc_test_card *test, int write, 208 - u8 *buffer, unsigned addr, unsigned blocks, unsigned blksz) 217 + static int mmc_test_cleanup(struct mmc_test_card *test) 209 218 { 210 - int ret, i, sectors; 219 + int ret, i; 211 220 212 - /* 213 - * It is assumed that the above preparation has been done. 214 - */ 221 + ret = mmc_test_set_blksize(test, 512); 222 + if (ret) 223 + return ret; 215 224 216 - memset(test->buffer, 0, BUFFER_SIZE); 225 + memset(test->buffer, 0, 512); 226 + 227 + for (i = 0;i < BUFFER_SIZE / 512;i++) { 228 + ret = mmc_test_buffer_transfer(test, test->buffer, i * 512, 512, 1); 229 + if (ret) 230 + return ret; 231 + } 232 + 233 + return 0; 234 + } 235 + 236 + /*******************************************************************/ 237 + /* Test execution helpers */ 238 + /*******************************************************************/ 239 + 240 + /* 241 + * Modifies the mmc_request to perform the "short transfer" tests 242 + */ 243 + static void mmc_test_prepare_broken_mrq(struct mmc_test_card *test, 244 + struct mmc_request *mrq, int write) 245 + { 246 + BUG_ON(!mrq || !mrq->cmd || !mrq->data); 247 + 248 + if (mrq->data->blocks > 1) { 249 + mrq->cmd->opcode = write ? 250 + MMC_WRITE_BLOCK : MMC_READ_SINGLE_BLOCK; 251 + mrq->stop = NULL; 252 + } else { 253 + mrq->cmd->opcode = MMC_SEND_STATUS; 254 + mrq->cmd->arg = test->card->rca << 16; 255 + } 256 + } 257 + 258 + /* 259 + * Checks that a normal transfer didn't have any errors 260 + */ 261 + static int mmc_test_check_result(struct mmc_test_card *test, 262 + struct mmc_request *mrq) 263 + { 264 + int ret; 265 + 266 + BUG_ON(!mrq || !mrq->cmd || !mrq->data); 267 + 268 + ret = 0; 269 + 270 + if (!ret && mrq->cmd->error) 271 + ret = mrq->cmd->error; 272 + if (!ret && mrq->data->error) 273 + ret = mrq->data->error; 274 + if (!ret && mrq->stop && mrq->stop->error) 275 + ret = mrq->stop->error; 276 + if (!ret && mrq->data->bytes_xfered != 277 + mrq->data->blocks * mrq->data->blksz) 278 + ret = RESULT_FAIL; 279 + 280 + if (ret == -EINVAL) 281 + ret = RESULT_UNSUP_HOST; 282 + 283 + return ret; 284 + } 285 + 286 + /* 287 + * Checks that a "short transfer" behaved as expected 288 + */ 289 + static int mmc_test_check_broken_result(struct mmc_test_card *test, 290 + struct mmc_request *mrq) 291 + { 292 + int ret; 293 + 294 + BUG_ON(!mrq || !mrq->cmd || !mrq->data); 295 + 296 + ret = 0; 297 + 298 + if (!ret && mrq->cmd->error) 299 + ret = mrq->cmd->error; 300 + if (!ret && mrq->data->error == 0) 301 + ret = RESULT_FAIL; 302 + if (!ret && mrq->data->error != -ETIMEDOUT) 303 + ret = mrq->data->error; 304 + if (!ret && mrq->stop && mrq->stop->error) 305 + ret = mrq->stop->error; 306 + if (mrq->data->blocks > 1) { 307 + if (!ret && mrq->data->bytes_xfered > mrq->data->blksz) 308 + ret = RESULT_FAIL; 309 + } else { 310 + if (!ret && mrq->data->bytes_xfered > 0) 311 + ret = RESULT_FAIL; 312 + } 313 + 314 + if (ret == -EINVAL) 315 + ret = RESULT_UNSUP_HOST; 316 + 317 + return ret; 318 + } 319 + 320 + /* 321 + * Tests a basic transfer with certain parameters 322 + */ 323 + static int mmc_test_simple_transfer(struct mmc_test_card *test, 324 + struct scatterlist *sg, unsigned sg_len, unsigned dev_addr, 325 + unsigned blocks, unsigned blksz, int write) 326 + { 327 + struct mmc_request mrq; 328 + struct mmc_command cmd; 329 + struct mmc_command stop; 330 + struct mmc_data data; 331 + 332 + memset(&mrq, 0, sizeof(struct mmc_request)); 333 + memset(&cmd, 0, sizeof(struct mmc_command)); 334 + memset(&data, 0, sizeof(struct mmc_data)); 335 + memset(&stop, 0, sizeof(struct mmc_command)); 336 + 337 + mrq.cmd = &cmd; 338 + mrq.data = &data; 339 + mrq.stop = &stop; 340 + 341 + mmc_test_prepare_mrq(test, &mrq, sg, sg_len, dev_addr, 342 + blocks, blksz, write); 343 + 344 + mmc_wait_for_req(test->card->host, &mrq); 345 + 346 + mmc_test_wait_busy(test); 347 + 348 + return mmc_test_check_result(test, &mrq); 349 + } 350 + 351 + /* 352 + * Tests a transfer where the card will fail completely or partly 353 + */ 354 + static int mmc_test_broken_transfer(struct mmc_test_card *test, 355 + unsigned blocks, unsigned blksz, int write) 356 + { 357 + struct mmc_request mrq; 358 + struct mmc_command cmd; 359 + struct mmc_command stop; 360 + struct mmc_data data; 361 + 362 + struct scatterlist sg; 363 + 364 + memset(&mrq, 0, sizeof(struct mmc_request)); 365 + memset(&cmd, 0, sizeof(struct mmc_command)); 366 + memset(&data, 0, sizeof(struct mmc_data)); 367 + memset(&stop, 0, sizeof(struct mmc_command)); 368 + 369 + mrq.cmd = &cmd; 370 + mrq.data = &data; 371 + mrq.stop = &stop; 372 + 373 + sg_init_one(&sg, test->buffer, blocks * blksz); 374 + 375 + mmc_test_prepare_mrq(test, &mrq, &sg, 1, 0, blocks, blksz, write); 376 + mmc_test_prepare_broken_mrq(test, &mrq, write); 377 + 378 + mmc_wait_for_req(test->card->host, &mrq); 379 + 380 + mmc_test_wait_busy(test); 381 + 382 + return mmc_test_check_broken_result(test, &mrq); 383 + } 384 + 385 + /* 386 + * Does a complete transfer test where data is also validated 387 + * 388 + * Note: mmc_test_prepare() must have been done before this call 389 + */ 390 + static int mmc_test_transfer(struct mmc_test_card *test, 391 + struct scatterlist *sg, unsigned sg_len, unsigned dev_addr, 392 + unsigned blocks, unsigned blksz, int write) 393 + { 394 + int ret, i; 395 + unsigned long flags; 217 396 218 397 if (write) { 219 398 for (i = 0;i < blocks * blksz;i++) 220 - buffer[i] = i; 399 + test->scratch[i] = i; 400 + } else { 401 + memset(test->scratch, 0, BUFFER_SIZE); 221 402 } 403 + local_irq_save(flags); 404 + sg_copy_from_buffer(sg, sg_len, test->scratch, BUFFER_SIZE); 405 + local_irq_restore(flags); 222 406 223 407 ret = mmc_test_set_blksize(test, blksz); 224 408 if (ret) 225 409 return ret; 226 410 227 - ret = mmc_test_transfer(test, write, buffer, addr, blocks, blksz); 411 + ret = mmc_test_simple_transfer(test, sg, sg_len, dev_addr, 412 + blocks, blksz, write); 228 413 if (ret) 229 414 return ret; 230 415 231 416 if (write) { 417 + int sectors; 418 + 232 419 ret = mmc_test_set_blksize(test, 512); 233 420 if (ret) 234 421 return ret; ··· 420 253 memset(test->buffer, 0, sectors * 512); 421 254 422 255 for (i = 0;i < sectors;i++) { 423 - ret = mmc_test_transfer(test, 0, 256 + ret = mmc_test_buffer_transfer(test, 424 257 test->buffer + i * 512, 425 - addr + i * 512, 1, 512); 258 + dev_addr + i * 512, 512, 0); 426 259 if (ret) 427 260 return ret; 428 261 } ··· 437 270 return RESULT_FAIL; 438 271 } 439 272 } else { 273 + local_irq_save(flags); 274 + sg_copy_to_buffer(sg, sg_len, test->scratch, BUFFER_SIZE); 275 + local_irq_restore(flags); 440 276 for (i = 0;i < blocks * blksz;i++) { 441 - if (buffer[i] != (u8)i) 277 + if (test->scratch[i] != (u8)i) 442 278 return RESULT_FAIL; 443 279 } 444 - } 445 - 446 - return 0; 447 - } 448 - 449 - static int mmc_test_cleanup_verify(struct mmc_test_card *test) 450 - { 451 - int ret, i; 452 - 453 - ret = mmc_test_set_blksize(test, 512); 454 - if (ret) 455 - return ret; 456 - 457 - memset(test->buffer, 0, BUFFER_SIZE); 458 - 459 - for (i = 0;i < BUFFER_SIZE / 512;i++) { 460 - ret = mmc_test_transfer(test, 1, test->buffer + i * 512, 461 - i * 512, 1, 512); 462 - if (ret) 463 - return ret; 464 280 } 465 281 466 282 return 0; ··· 464 314 static int mmc_test_basic_write(struct mmc_test_card *test) 465 315 { 466 316 int ret; 317 + struct scatterlist sg; 467 318 468 319 ret = mmc_test_set_blksize(test, 512); 469 320 if (ret) 470 321 return ret; 471 322 472 - ret = mmc_test_transfer(test, 1, test->buffer, 0, 1, 512); 323 + sg_init_one(&sg, test->buffer, 512); 324 + 325 + ret = mmc_test_simple_transfer(test, &sg, 1, 0, 1, 512, 1); 473 326 if (ret) 474 327 return ret; 475 328 ··· 482 329 static int mmc_test_basic_read(struct mmc_test_card *test) 483 330 { 484 331 int ret; 332 + struct scatterlist sg; 485 333 486 334 ret = mmc_test_set_blksize(test, 512); 487 335 if (ret) 488 336 return ret; 489 337 490 - ret = mmc_test_transfer(test, 0, test->buffer, 0, 1, 512); 338 + sg_init_one(&sg, test->buffer, 512); 339 + 340 + ret = mmc_test_simple_transfer(test, &sg, 1, 0, 1, 512, 1); 491 341 if (ret) 492 342 return ret; 493 343 ··· 500 344 static int mmc_test_verify_write(struct mmc_test_card *test) 501 345 { 502 346 int ret; 347 + struct scatterlist sg; 503 348 504 - ret = mmc_test_verified_transfer(test, 1, test->buffer, 0, 1, 512); 349 + sg_init_one(&sg, test->buffer, 512); 350 + 351 + ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 1); 505 352 if (ret) 506 353 return ret; 507 354 ··· 514 355 static int mmc_test_verify_read(struct mmc_test_card *test) 515 356 { 516 357 int ret; 358 + struct scatterlist sg; 517 359 518 - ret = mmc_test_verified_transfer(test, 0, test->buffer, 0, 1, 512); 360 + sg_init_one(&sg, test->buffer, 512); 361 + 362 + ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 0); 519 363 if (ret) 520 364 return ret; 521 365 ··· 529 367 { 530 368 int ret; 531 369 unsigned int size; 370 + struct scatterlist sg; 532 371 533 372 if (test->card->host->max_blk_count == 1) 534 373 return RESULT_UNSUP_HOST; ··· 542 379 if (size < 1024) 543 380 return RESULT_UNSUP_HOST; 544 381 545 - ret = mmc_test_verified_transfer(test, 1, test->buffer, 0, 546 - size / 512, 512); 382 + sg_init_one(&sg, test->buffer, size); 383 + 384 + ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 1); 547 385 if (ret) 548 386 return ret; 549 387 ··· 555 391 { 556 392 int ret; 557 393 unsigned int size; 394 + struct scatterlist sg; 558 395 559 396 if (test->card->host->max_blk_count == 1) 560 397 return RESULT_UNSUP_HOST; ··· 568 403 if (size < 1024) 569 404 return RESULT_UNSUP_HOST; 570 405 571 - ret = mmc_test_verified_transfer(test, 0, test->buffer, 0, 572 - size / 512, 512); 406 + sg_init_one(&sg, test->buffer, size); 407 + 408 + ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 0); 573 409 if (ret) 574 410 return ret; 575 411 ··· 580 414 static int mmc_test_pow2_write(struct mmc_test_card *test) 581 415 { 582 416 int ret, i; 417 + struct scatterlist sg; 583 418 584 419 if (!test->card->csd.write_partial) 585 420 return RESULT_UNSUP_CARD; 586 421 587 422 for (i = 1; i < 512;i <<= 1) { 588 - ret = mmc_test_verified_transfer(test, 1, 589 - test->buffer, 0, 1, i); 423 + sg_init_one(&sg, test->buffer, i); 424 + ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 1); 590 425 if (ret) 591 426 return ret; 592 427 } ··· 598 431 static int mmc_test_pow2_read(struct mmc_test_card *test) 599 432 { 600 433 int ret, i; 434 + struct scatterlist sg; 601 435 602 436 if (!test->card->csd.read_partial) 603 437 return RESULT_UNSUP_CARD; 604 438 605 439 for (i = 1; i < 512;i <<= 1) { 606 - ret = mmc_test_verified_transfer(test, 0, 607 - test->buffer, 0, 1, i); 440 + sg_init_one(&sg, test->buffer, i); 441 + ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 0); 608 442 if (ret) 609 443 return ret; 610 444 } ··· 616 448 static int mmc_test_weird_write(struct mmc_test_card *test) 617 449 { 618 450 int ret, i; 451 + struct scatterlist sg; 619 452 620 453 if (!test->card->csd.write_partial) 621 454 return RESULT_UNSUP_CARD; 622 455 623 456 for (i = 3; i < 512;i += 7) { 624 - ret = mmc_test_verified_transfer(test, 1, 625 - test->buffer, 0, 1, i); 457 + sg_init_one(&sg, test->buffer, i); 458 + ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 1); 626 459 if (ret) 627 460 return ret; 628 461 } ··· 634 465 static int mmc_test_weird_read(struct mmc_test_card *test) 635 466 { 636 467 int ret, i; 468 + struct scatterlist sg; 637 469 638 470 if (!test->card->csd.read_partial) 639 471 return RESULT_UNSUP_CARD; 640 472 641 473 for (i = 3; i < 512;i += 7) { 642 - ret = mmc_test_verified_transfer(test, 0, 643 - test->buffer, 0, 1, i); 474 + sg_init_one(&sg, test->buffer, i); 475 + ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 0); 644 476 if (ret) 645 477 return ret; 646 478 } ··· 652 482 static int mmc_test_align_write(struct mmc_test_card *test) 653 483 { 654 484 int ret, i; 485 + struct scatterlist sg; 655 486 656 487 for (i = 1;i < 4;i++) { 657 - ret = mmc_test_verified_transfer(test, 1, test->buffer + i, 658 - 0, 1, 512); 488 + sg_init_one(&sg, test->buffer + i, 512); 489 + ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 1); 659 490 if (ret) 660 491 return ret; 661 492 } ··· 667 496 static int mmc_test_align_read(struct mmc_test_card *test) 668 497 { 669 498 int ret, i; 499 + struct scatterlist sg; 670 500 671 501 for (i = 1;i < 4;i++) { 672 - ret = mmc_test_verified_transfer(test, 0, test->buffer + i, 673 - 0, 1, 512); 502 + sg_init_one(&sg, test->buffer + i, 512); 503 + ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 0); 674 504 if (ret) 675 505 return ret; 676 506 } ··· 683 511 { 684 512 int ret, i; 685 513 unsigned int size; 514 + struct scatterlist sg; 686 515 687 516 if (test->card->host->max_blk_count == 1) 688 517 return RESULT_UNSUP_HOST; ··· 697 524 return RESULT_UNSUP_HOST; 698 525 699 526 for (i = 1;i < 4;i++) { 700 - ret = mmc_test_verified_transfer(test, 1, test->buffer + i, 701 - 0, size / 512, 512); 527 + sg_init_one(&sg, test->buffer + i, size); 528 + ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 1); 702 529 if (ret) 703 530 return ret; 704 531 } ··· 710 537 { 711 538 int ret, i; 712 539 unsigned int size; 540 + struct scatterlist sg; 713 541 714 542 if (test->card->host->max_blk_count == 1) 715 543 return RESULT_UNSUP_HOST; ··· 724 550 return RESULT_UNSUP_HOST; 725 551 726 552 for (i = 1;i < 4;i++) { 727 - ret = mmc_test_verified_transfer(test, 0, test->buffer + i, 728 - 0, size / 512, 512); 553 + sg_init_one(&sg, test->buffer + i, size); 554 + ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 0); 729 555 if (ret) 730 556 return ret; 731 557 } ··· 741 567 if (ret) 742 568 return ret; 743 569 744 - ret = __mmc_test_transfer(test, 1, 1, test->buffer, 0, 1, 512); 570 + ret = mmc_test_broken_transfer(test, 1, 512, 1); 745 571 if (ret) 746 572 return ret; 747 573 ··· 756 582 if (ret) 757 583 return ret; 758 584 759 - ret = __mmc_test_transfer(test, 0, 1, test->buffer, 0, 1, 512); 585 + ret = mmc_test_broken_transfer(test, 1, 512, 0); 760 586 if (ret) 761 587 return ret; 762 588 ··· 774 600 if (ret) 775 601 return ret; 776 602 777 - ret = __mmc_test_transfer(test, 1, 1, test->buffer, 0, 2, 512); 603 + ret = mmc_test_broken_transfer(test, 2, 512, 1); 778 604 if (ret) 779 605 return ret; 780 606 ··· 792 618 if (ret) 793 619 return ret; 794 620 795 - ret = __mmc_test_transfer(test, 0, 1, test->buffer, 0, 2, 512); 621 + ret = mmc_test_broken_transfer(test, 2, 512, 0); 796 622 if (ret) 797 623 return ret; 798 624 ··· 812 638 813 639 { 814 640 .name = "Basic write (with data verification)", 815 - .prepare = mmc_test_prepare_verify_write, 641 + .prepare = mmc_test_prepare_write, 816 642 .run = mmc_test_verify_write, 817 - .cleanup = mmc_test_cleanup_verify, 643 + .cleanup = mmc_test_cleanup, 818 644 }, 819 645 820 646 { 821 647 .name = "Basic read (with data verification)", 822 - .prepare = mmc_test_prepare_verify_read, 648 + .prepare = mmc_test_prepare_read, 823 649 .run = mmc_test_verify_read, 824 - .cleanup = mmc_test_cleanup_verify, 650 + .cleanup = mmc_test_cleanup, 825 651 }, 826 652 827 653 { 828 654 .name = "Multi-block write", 829 - .prepare = mmc_test_prepare_verify_write, 655 + .prepare = mmc_test_prepare_write, 830 656 .run = mmc_test_multi_write, 831 - .cleanup = mmc_test_cleanup_verify, 657 + .cleanup = mmc_test_cleanup, 832 658 }, 833 659 834 660 { 835 661 .name = "Multi-block read", 836 - .prepare = mmc_test_prepare_verify_read, 662 + .prepare = mmc_test_prepare_read, 837 663 .run = mmc_test_multi_read, 838 - .cleanup = mmc_test_cleanup_verify, 664 + .cleanup = mmc_test_cleanup, 839 665 }, 840 666 841 667 { 842 668 .name = "Power of two block writes", 843 - .prepare = mmc_test_prepare_verify_write, 669 + .prepare = mmc_test_prepare_write, 844 670 .run = mmc_test_pow2_write, 845 - .cleanup = mmc_test_cleanup_verify, 671 + .cleanup = mmc_test_cleanup, 846 672 }, 847 673 848 674 { 849 675 .name = "Power of two block reads", 850 - .prepare = mmc_test_prepare_verify_read, 676 + .prepare = mmc_test_prepare_read, 851 677 .run = mmc_test_pow2_read, 852 - .cleanup = mmc_test_cleanup_verify, 678 + .cleanup = mmc_test_cleanup, 853 679 }, 854 680 855 681 { 856 682 .name = "Weird sized block writes", 857 - .prepare = mmc_test_prepare_verify_write, 683 + .prepare = mmc_test_prepare_write, 858 684 .run = mmc_test_weird_write, 859 - .cleanup = mmc_test_cleanup_verify, 685 + .cleanup = mmc_test_cleanup, 860 686 }, 861 687 862 688 { 863 689 .name = "Weird sized block reads", 864 - .prepare = mmc_test_prepare_verify_read, 690 + .prepare = mmc_test_prepare_read, 865 691 .run = mmc_test_weird_read, 866 - .cleanup = mmc_test_cleanup_verify, 692 + .cleanup = mmc_test_cleanup, 867 693 }, 868 694 869 695 { 870 696 .name = "Badly aligned write", 871 - .prepare = mmc_test_prepare_verify_write, 697 + .prepare = mmc_test_prepare_write, 872 698 .run = mmc_test_align_write, 873 - .cleanup = mmc_test_cleanup_verify, 699 + .cleanup = mmc_test_cleanup, 874 700 }, 875 701 876 702 { 877 703 .name = "Badly aligned read", 878 - .prepare = mmc_test_prepare_verify_read, 704 + .prepare = mmc_test_prepare_read, 879 705 .run = mmc_test_align_read, 880 - .cleanup = mmc_test_cleanup_verify, 706 + .cleanup = mmc_test_cleanup, 881 707 }, 882 708 883 709 { 884 710 .name = "Badly aligned multi-block write", 885 - .prepare = mmc_test_prepare_verify_write, 711 + .prepare = mmc_test_prepare_write, 886 712 .run = mmc_test_align_multi_write, 887 - .cleanup = mmc_test_cleanup_verify, 713 + .cleanup = mmc_test_cleanup, 888 714 }, 889 715 890 716 { 891 717 .name = "Badly aligned multi-block read", 892 - .prepare = mmc_test_prepare_verify_read, 718 + .prepare = mmc_test_prepare_read, 893 719 .run = mmc_test_align_multi_read, 894 - .cleanup = mmc_test_cleanup_verify, 720 + .cleanup = mmc_test_cleanup, 895 721 }, 896 722 897 723 { ··· 917 743 918 744 static struct mutex mmc_test_lock; 919 745 920 - static void mmc_test_run(struct mmc_test_card *test) 746 + static void mmc_test_run(struct mmc_test_card *test, int testcase) 921 747 { 922 748 int i, ret; 923 749 ··· 927 753 mmc_claim_host(test->card->host); 928 754 929 755 for (i = 0;i < ARRAY_SIZE(mmc_test_cases);i++) { 756 + if (testcase && ((i + 1) != testcase)) 757 + continue; 758 + 930 759 printk(KERN_INFO "%s: Test case %d. %s...\n", 931 760 mmc_hostname(test->card->host), i + 1, 932 761 mmc_test_cases[i].name); ··· 1001 824 { 1002 825 struct mmc_card *card; 1003 826 struct mmc_test_card *test; 827 + int testcase; 1004 828 1005 829 card = container_of(dev, struct mmc_card, dev); 830 + 831 + testcase = simple_strtol(buf, NULL, 10); 1006 832 1007 833 test = kzalloc(sizeof(struct mmc_test_card), GFP_KERNEL); 1008 834 if (!test) ··· 1016 836 test->buffer = kzalloc(BUFFER_SIZE, GFP_KERNEL); 1017 837 if (test->buffer) { 1018 838 mutex_lock(&mmc_test_lock); 1019 - mmc_test_run(test); 839 + mmc_test_run(test, testcase); 1020 840 mutex_unlock(&mmc_test_lock); 1021 841 } 1022 842 ··· 1031 851 static int mmc_test_probe(struct mmc_card *card) 1032 852 { 1033 853 int ret; 854 + 855 + if ((card->type != MMC_TYPE_MMC) && (card->type != MMC_TYPE_SD)) 856 + return -ENODEV; 1034 857 1035 858 mutex_init(&mmc_test_lock); 1036 859
+6 -3
drivers/mmc/card/sdio_uart.c
··· 885 885 sdio_uart_release_func(port); 886 886 } 887 887 888 - static void sdio_uart_break_ctl(struct tty_struct *tty, int break_state) 888 + static int sdio_uart_break_ctl(struct tty_struct *tty, int break_state) 889 889 { 890 890 struct sdio_uart_port *port = tty->driver_data; 891 + int result; 891 892 892 - if (sdio_uart_claim_func(port) != 0) 893 - return; 893 + result = sdio_uart_claim_func(port); 894 + if (result != 0) 895 + return result; 894 896 895 897 if (break_state == -1) 896 898 port->lcr |= UART_LCR_SBC; ··· 901 899 sdio_out(port, UART_LCR, port->lcr); 902 900 903 901 sdio_uart_release_func(port); 902 + return 0; 904 903 } 905 904 906 905 static int sdio_uart_tiocmget(struct tty_struct *tty, struct file *file)
+37 -4
drivers/mmc/core/core.c
··· 3 3 * 4 4 * Copyright (C) 2003-2004 Russell King, All Rights Reserved. 5 5 * SD support Copyright (C) 2004 Ian Molton, All Rights Reserved. 6 - * Copyright (C) 2005-2007 Pierre Ossman, All Rights Reserved. 6 + * Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved. 7 7 * MMCv4 support Copyright (C) 2006 Philip Langdale, All Rights Reserved. 8 8 * 9 9 * This program is free software; you can redistribute it and/or modify ··· 293 293 } 294 294 } 295 295 EXPORT_SYMBOL(mmc_set_data_timeout); 296 + 297 + /** 298 + * mmc_align_data_size - pads a transfer size to a more optimal value 299 + * @card: the MMC card associated with the data transfer 300 + * @sz: original transfer size 301 + * 302 + * Pads the original data size with a number of extra bytes in 303 + * order to avoid controller bugs and/or performance hits 304 + * (e.g. some controllers revert to PIO for certain sizes). 305 + * 306 + * Returns the improved size, which might be unmodified. 307 + * 308 + * Note that this function is only relevant when issuing a 309 + * single scatter gather entry. 310 + */ 311 + unsigned int mmc_align_data_size(struct mmc_card *card, unsigned int sz) 312 + { 313 + /* 314 + * FIXME: We don't have a system for the controller to tell 315 + * the core about its problems yet, so for now we just 32-bit 316 + * align the size. 317 + */ 318 + sz = ((sz + 3) / 4) * 4; 319 + 320 + return sz; 321 + } 322 + EXPORT_SYMBOL(mmc_align_data_size); 296 323 297 324 /** 298 325 * __mmc_claim_host - exclusively claim a host ··· 665 638 */ 666 639 mmc_bus_put(host); 667 640 641 + if (host->ops->get_cd && host->ops->get_cd(host) == 0) 642 + goto out; 643 + 668 644 mmc_claim_host(host); 669 645 670 646 mmc_power_up(host); ··· 682 652 if (!err) { 683 653 if (mmc_attach_sdio(host, ocr)) 684 654 mmc_power_off(host); 685 - return; 655 + goto out; 686 656 } 687 657 688 658 /* ··· 692 662 if (!err) { 693 663 if (mmc_attach_sd(host, ocr)) 694 664 mmc_power_off(host); 695 - return; 665 + goto out; 696 666 } 697 667 698 668 /* ··· 702 672 if (!err) { 703 673 if (mmc_attach_mmc(host, ocr)) 704 674 mmc_power_off(host); 705 - return; 675 + goto out; 706 676 } 707 677 708 678 mmc_release_host(host); ··· 713 683 714 684 mmc_bus_put(host); 715 685 } 686 + out: 687 + if (host->caps & MMC_CAP_NEEDS_POLL) 688 + mmc_schedule_delayed_work(&host->detect, HZ); 716 689 } 717 690 718 691 void mmc_start_host(struct mmc_host *host)
+1 -1
drivers/mmc/core/mmc.c
··· 288 288 /* 289 289 * Handle the detection and initialisation of a card. 290 290 * 291 - * In the case of a resume, "curcard" will contain the card 291 + * In the case of a resume, "oldcard" will contain the card 292 292 * we're trying to reinitialise. 293 293 */ 294 294 static int mmc_init_card(struct mmc_host *host, u32 ocr,
+3 -3
drivers/mmc/core/sd.c
··· 326 326 /* 327 327 * Handle the detection and initialisation of a card. 328 328 * 329 - * In the case of a resume, "curcard" will contain the card 329 + * In the case of a resume, "oldcard" will contain the card 330 330 * we're trying to reinitialise. 331 331 */ 332 332 static int mmc_sd_init_card(struct mmc_host *host, u32 ocr, ··· 494 494 * Check if read-only switch is active. 495 495 */ 496 496 if (!oldcard) { 497 - if (!host->ops->get_ro) { 497 + if (!host->ops->get_ro || host->ops->get_ro(host) < 0) { 498 498 printk(KERN_WARNING "%s: host does not " 499 499 "support reading read-only " 500 500 "switch. assuming write-enable.\n", 501 501 mmc_hostname(host)); 502 502 } else { 503 - if (host->ops->get_ro(host)) 503 + if (host->ops->get_ro(host) > 0) 504 504 mmc_card_set_readonly(card); 505 505 } 506 506 }
+6
drivers/mmc/core/sdio_cis.c
··· 129 129 /* TPLFE_MAX_BLK_SIZE */ 130 130 func->max_blksize = buf[12] | (buf[13] << 8); 131 131 132 + /* TPLFE_ENABLE_TIMEOUT_VAL, present in ver 1.1 and above */ 133 + if (vsn > SDIO_SDIO_REV_1_00) 134 + func->enable_timeout = (buf[28] | (buf[29] << 8)) * 10; 135 + else 136 + func->enable_timeout = jiffies_to_msecs(HZ); 137 + 132 138 return 0; 133 139 } 134 140
+128 -39
drivers/mmc/core/sdio_io.c
··· 1 1 /* 2 2 * linux/drivers/mmc/core/sdio_io.c 3 3 * 4 - * Copyright 2007 Pierre Ossman 4 + * Copyright 2007-2008 Pierre Ossman 5 5 * 6 6 * This program is free software; you can redistribute it and/or modify 7 7 * it under the terms of the GNU General Public License as published by ··· 76 76 if (ret) 77 77 goto err; 78 78 79 - /* 80 - * FIXME: This should timeout based on information in the CIS, 81 - * but we don't have card to parse that yet. 82 - */ 83 - timeout = jiffies + HZ; 79 + timeout = jiffies + msecs_to_jiffies(func->enable_timeout); 84 80 85 81 while (1) { 86 82 ret = mmc_io_rw_direct(func->card, 0, 0, SDIO_CCCR_IORx, 0, &reg); ··· 163 167 return -EINVAL; 164 168 165 169 if (blksz == 0) { 166 - blksz = min(min( 167 - func->max_blksize, 168 - func->card->host->max_blk_size), 169 - 512u); 170 + blksz = min(func->max_blksize, func->card->host->max_blk_size); 171 + blksz = min(blksz, 512u); 170 172 } 171 173 172 174 ret = mmc_io_rw_direct(func->card, 1, 0, ··· 180 186 func->cur_blksize = blksz; 181 187 return 0; 182 188 } 183 - 184 189 EXPORT_SYMBOL_GPL(sdio_set_block_size); 190 + 191 + /* 192 + * Calculate the maximum byte mode transfer size 193 + */ 194 + static inline unsigned int sdio_max_byte_size(struct sdio_func *func) 195 + { 196 + unsigned mval = min(func->card->host->max_seg_size, 197 + func->card->host->max_blk_size); 198 + mval = min(mval, func->max_blksize); 199 + return min(mval, 512u); /* maximum size for byte mode */ 200 + } 201 + 202 + /** 203 + * sdio_align_size - pads a transfer size to a more optimal value 204 + * @func: SDIO function 205 + * @sz: original transfer size 206 + * 207 + * Pads the original data size with a number of extra bytes in 208 + * order to avoid controller bugs and/or performance hits 209 + * (e.g. some controllers revert to PIO for certain sizes). 210 + * 211 + * If possible, it will also adjust the size so that it can be 212 + * handled in just a single request. 213 + * 214 + * Returns the improved size, which might be unmodified. 215 + */ 216 + unsigned int sdio_align_size(struct sdio_func *func, unsigned int sz) 217 + { 218 + unsigned int orig_sz; 219 + unsigned int blk_sz, byte_sz; 220 + unsigned chunk_sz; 221 + 222 + orig_sz = sz; 223 + 224 + /* 225 + * Do a first check with the controller, in case it 226 + * wants to increase the size up to a point where it 227 + * might need more than one block. 228 + */ 229 + sz = mmc_align_data_size(func->card, sz); 230 + 231 + /* 232 + * If we can still do this with just a byte transfer, then 233 + * we're done. 234 + */ 235 + if (sz <= sdio_max_byte_size(func)) 236 + return sz; 237 + 238 + if (func->card->cccr.multi_block) { 239 + /* 240 + * Check if the transfer is already block aligned 241 + */ 242 + if ((sz % func->cur_blksize) == 0) 243 + return sz; 244 + 245 + /* 246 + * Realign it so that it can be done with one request, 247 + * and recheck if the controller still likes it. 248 + */ 249 + blk_sz = ((sz + func->cur_blksize - 1) / 250 + func->cur_blksize) * func->cur_blksize; 251 + blk_sz = mmc_align_data_size(func->card, blk_sz); 252 + 253 + /* 254 + * This value is only good if it is still just 255 + * one request. 256 + */ 257 + if ((blk_sz % func->cur_blksize) == 0) 258 + return blk_sz; 259 + 260 + /* 261 + * We failed to do one request, but at least try to 262 + * pad the remainder properly. 263 + */ 264 + byte_sz = mmc_align_data_size(func->card, 265 + sz % func->cur_blksize); 266 + if (byte_sz <= sdio_max_byte_size(func)) { 267 + blk_sz = sz / func->cur_blksize; 268 + return blk_sz * func->cur_blksize + byte_sz; 269 + } 270 + } else { 271 + /* 272 + * We need multiple requests, so first check that the 273 + * controller can handle the chunk size; 274 + */ 275 + chunk_sz = mmc_align_data_size(func->card, 276 + sdio_max_byte_size(func)); 277 + if (chunk_sz == sdio_max_byte_size(func)) { 278 + /* 279 + * Fix up the size of the remainder (if any) 280 + */ 281 + byte_sz = orig_sz % chunk_sz; 282 + if (byte_sz) { 283 + byte_sz = mmc_align_data_size(func->card, 284 + byte_sz); 285 + } 286 + 287 + return (orig_sz / chunk_sz) * chunk_sz + byte_sz; 288 + } 289 + } 290 + 291 + /* 292 + * The controller is simply incapable of transferring the size 293 + * we want in decent manner, so just return the original size. 294 + */ 295 + return orig_sz; 296 + } 297 + EXPORT_SYMBOL_GPL(sdio_align_size); 185 298 186 299 /* Split an arbitrarily sized data transfer into several 187 300 * IO_RW_EXTENDED commands. */ ··· 300 199 int ret; 301 200 302 201 /* Do the bulk of the transfer using block mode (if supported). */ 303 - if (func->card->cccr.multi_block) { 202 + if (func->card->cccr.multi_block && (size > sdio_max_byte_size(func))) { 304 203 /* Blocks per command is limited by host count, host transfer 305 204 * size (we only use a single sg entry) and the maximum for 306 205 * IO_RW_EXTENDED of 511 blocks. */ 307 - max_blocks = min(min( 308 - func->card->host->max_blk_count, 309 - func->card->host->max_seg_size / func->cur_blksize), 310 - 511u); 206 + max_blocks = min(func->card->host->max_blk_count, 207 + func->card->host->max_seg_size / func->cur_blksize); 208 + max_blocks = min(max_blocks, 511u); 311 209 312 210 while (remainder > func->cur_blksize) { 313 211 unsigned blocks; ··· 331 231 332 232 /* Write the remainder using byte mode. */ 333 233 while (remainder > 0) { 334 - size = remainder; 335 - if (size > func->cur_blksize) 336 - size = func->cur_blksize; 337 - if (size > 512) 338 - size = 512; /* maximum size for byte mode */ 234 + size = min(remainder, sdio_max_byte_size(func)); 339 235 340 236 ret = mmc_io_rw_extended(func->card, write, func->num, addr, 341 237 incr_addr, buf, 1, size); ··· 356 260 * function. If there is a problem reading the address, 0xff 357 261 * is returned and @err_ret will contain the error code. 358 262 */ 359 - unsigned char sdio_readb(struct sdio_func *func, unsigned int addr, 360 - int *err_ret) 263 + u8 sdio_readb(struct sdio_func *func, unsigned int addr, int *err_ret) 361 264 { 362 265 int ret; 363 - unsigned char val; 266 + u8 val; 364 267 365 268 BUG_ON(!func); 366 269 ··· 388 293 * function. @err_ret will contain the status of the actual 389 294 * transfer. 390 295 */ 391 - void sdio_writeb(struct sdio_func *func, unsigned char b, unsigned int addr, 392 - int *err_ret) 296 + void sdio_writeb(struct sdio_func *func, u8 b, unsigned int addr, int *err_ret) 393 297 { 394 298 int ret; 395 299 ··· 449 355 { 450 356 return sdio_io_rw_ext_helper(func, 0, addr, 0, dst, count); 451 357 } 452 - 453 358 EXPORT_SYMBOL_GPL(sdio_readsb); 454 359 455 360 /** ··· 478 385 * function. If there is a problem reading the address, 0xffff 479 386 * is returned and @err_ret will contain the error code. 480 387 */ 481 - unsigned short sdio_readw(struct sdio_func *func, unsigned int addr, 482 - int *err_ret) 388 + u16 sdio_readw(struct sdio_func *func, unsigned int addr, int *err_ret) 483 389 { 484 390 int ret; 485 391 ··· 492 400 return 0xFFFF; 493 401 } 494 402 495 - return le16_to_cpu(*(u16*)func->tmpbuf); 403 + return le16_to_cpup((__le16 *)func->tmpbuf); 496 404 } 497 405 EXPORT_SYMBOL_GPL(sdio_readw); 498 406 ··· 507 415 * function. @err_ret will contain the status of the actual 508 416 * transfer. 509 417 */ 510 - void sdio_writew(struct sdio_func *func, unsigned short b, unsigned int addr, 511 - int *err_ret) 418 + void sdio_writew(struct sdio_func *func, u16 b, unsigned int addr, int *err_ret) 512 419 { 513 420 int ret; 514 421 515 - *(u16*)func->tmpbuf = cpu_to_le16(b); 422 + *(__le16 *)func->tmpbuf = cpu_to_le16(b); 516 423 517 424 ret = sdio_memcpy_toio(func, addr, func->tmpbuf, 2); 518 425 if (err_ret) ··· 530 439 * 0xffffffff is returned and @err_ret will contain the error 531 440 * code. 532 441 */ 533 - unsigned long sdio_readl(struct sdio_func *func, unsigned int addr, 534 - int *err_ret) 442 + u32 sdio_readl(struct sdio_func *func, unsigned int addr, int *err_ret) 535 443 { 536 444 int ret; 537 445 ··· 544 454 return 0xFFFFFFFF; 545 455 } 546 456 547 - return le32_to_cpu(*(u32*)func->tmpbuf); 457 + return le32_to_cpup((__le32 *)func->tmpbuf); 548 458 } 549 459 EXPORT_SYMBOL_GPL(sdio_readl); 550 460 ··· 559 469 * function. @err_ret will contain the status of the actual 560 470 * transfer. 561 471 */ 562 - void sdio_writel(struct sdio_func *func, unsigned long b, unsigned int addr, 563 - int *err_ret) 472 + void sdio_writel(struct sdio_func *func, u32 b, unsigned int addr, int *err_ret) 564 473 { 565 474 int ret; 566 475 567 - *(u32*)func->tmpbuf = cpu_to_le32(b); 476 + *(__le32 *)func->tmpbuf = cpu_to_le32(b); 568 477 569 478 ret = sdio_memcpy_toio(func, addr, func->tmpbuf, 4); 570 479 if (err_ret)
+47 -3
drivers/mmc/host/Kconfig
··· 26 26 27 27 config MMC_SDHCI 28 28 tristate "Secure Digital Host Controller Interface support" 29 - depends on PCI 29 + depends on HAS_DMA 30 30 help 31 - This select the generic Secure Digital Host Controller Interface. 31 + This selects the generic Secure Digital Host Controller Interface. 32 32 It is used by manufacturers such as Texas Instruments(R), Ricoh(R) 33 33 and Toshiba(R). Most controllers found in laptops are of this type. 34 + 35 + If you have a controller with this interface, say Y or M here. You 36 + also need to enable an appropriate bus interface. 37 + 38 + If unsure, say N. 39 + 40 + config MMC_SDHCI_PCI 41 + tristate "SDHCI support on PCI bus" 42 + depends on MMC_SDHCI && PCI 43 + help 44 + This selects the PCI Secure Digital Host Controller Interface. 45 + Most controllers found today are PCI devices. 46 + 34 47 If you have a controller with this interface, say Y or M here. 35 48 36 49 If unsure, say N. 37 50 38 51 config MMC_RICOH_MMC 39 52 tristate "Ricoh MMC Controller Disabler (EXPERIMENTAL)" 40 - depends on PCI && EXPERIMENTAL && MMC_SDHCI 53 + depends on MMC_SDHCI_PCI 41 54 help 42 55 This selects the disabler for the Ricoh MMC Controller. This 43 56 proprietary controller is unnecessary because the SDHCI driver ··· 104 91 105 92 If unsure, say N. 106 93 94 + config MMC_ATMELMCI 95 + tristate "Atmel Multimedia Card Interface support" 96 + depends on AVR32 97 + help 98 + This selects the Atmel Multimedia Card Interface driver. If 99 + you have an AT32 (AVR32) platform with a Multimedia Card 100 + slot, say Y or M here. 101 + 102 + If unsure, say N. 103 + 107 104 config MMC_IMX 108 105 tristate "Motorola i.MX Multimedia Card Interface support" 109 106 depends on ARCH_IMX ··· 152 129 working on many systems without dedicated MMC/SD controllers. 153 130 154 131 If unsure, or if your system has no SPI master driver, say N. 132 + 133 + config MMC_S3C 134 + tristate "Samsung S3C SD/MMC Card Interface support" 135 + depends on ARCH_S3C2410 && MMC 136 + help 137 + This selects a driver for the MCI interface found in 138 + Samsung's S3C2410, S3C2412, S3C2440, S3C2442 CPUs. 139 + If you have a board based on one of those and a MMC/SD 140 + slot, say Y or M here. 141 + 142 + If unsure, say N. 143 + 144 + config MMC_SDRICOH_CS 145 + tristate "MMC/SD driver for Ricoh Bay1Controllers (EXPERIMENTAL)" 146 + depends on EXPERIMENTAL && MMC && PCI && PCMCIA 147 + help 148 + Say Y here if your Notebook reports a Ricoh Bay1Controller PCMCIA 149 + card whenever you insert a MMC or SD card into the card slot. 150 + 151 + To compile this driver as a module, choose M here: the 152 + module will be called sdricoh_cs. 155 153
+4
drivers/mmc/host/Makefile
··· 10 10 obj-$(CONFIG_MMC_PXA) += pxamci.o 11 11 obj-$(CONFIG_MMC_IMX) += imxmmc.o 12 12 obj-$(CONFIG_MMC_SDHCI) += sdhci.o 13 + obj-$(CONFIG_MMC_SDHCI_PCI) += sdhci-pci.o 13 14 obj-$(CONFIG_MMC_RICOH_MMC) += ricoh_mmc.o 14 15 obj-$(CONFIG_MMC_WBSD) += wbsd.o 15 16 obj-$(CONFIG_MMC_AU1X) += au1xmmc.o 16 17 obj-$(CONFIG_MMC_OMAP) += omap.o 17 18 obj-$(CONFIG_MMC_AT91) += at91_mci.o 19 + obj-$(CONFIG_MMC_ATMELMCI) += atmel-mci.o 18 20 obj-$(CONFIG_MMC_TIFM_SD) += tifm_sd.o 19 21 obj-$(CONFIG_MMC_SPI) += mmc_spi.o 22 + obj-$(CONFIG_MMC_S3C) += s3cmci.o 23 + obj-$(CONFIG_MMC_SDRICOH_CS) += sdricoh_cs.o 20 24
+208 -51
drivers/mmc/host/at91_mci.c
··· 125 125 126 126 /* Latest in the scatterlist that has been enabled for transfer */ 127 127 int transfer_index; 128 + 129 + /* Timer for timeouts */ 130 + struct timer_list timer; 128 131 }; 132 + 133 + /* 134 + * Reset the controller and restore most of the state 135 + */ 136 + static void at91_reset_host(struct at91mci_host *host) 137 + { 138 + unsigned long flags; 139 + u32 mr; 140 + u32 sdcr; 141 + u32 dtor; 142 + u32 imr; 143 + 144 + local_irq_save(flags); 145 + imr = at91_mci_read(host, AT91_MCI_IMR); 146 + 147 + at91_mci_write(host, AT91_MCI_IDR, 0xffffffff); 148 + 149 + /* save current state */ 150 + mr = at91_mci_read(host, AT91_MCI_MR) & 0x7fff; 151 + sdcr = at91_mci_read(host, AT91_MCI_SDCR); 152 + dtor = at91_mci_read(host, AT91_MCI_DTOR); 153 + 154 + /* reset the controller */ 155 + at91_mci_write(host, AT91_MCI_CR, AT91_MCI_MCIDIS | AT91_MCI_SWRST); 156 + 157 + /* restore state */ 158 + at91_mci_write(host, AT91_MCI_CR, AT91_MCI_MCIEN); 159 + at91_mci_write(host, AT91_MCI_MR, mr); 160 + at91_mci_write(host, AT91_MCI_SDCR, sdcr); 161 + at91_mci_write(host, AT91_MCI_DTOR, dtor); 162 + at91_mci_write(host, AT91_MCI_IER, imr); 163 + 164 + /* make sure sdio interrupts will fire */ 165 + at91_mci_read(host, AT91_MCI_SR); 166 + 167 + local_irq_restore(flags); 168 + } 169 + 170 + static void at91_timeout_timer(unsigned long data) 171 + { 172 + struct at91mci_host *host; 173 + 174 + host = (struct at91mci_host *)data; 175 + 176 + if (host->request) { 177 + dev_err(host->mmc->parent, "Timeout waiting end of packet\n"); 178 + 179 + if (host->cmd && host->cmd->data) { 180 + host->cmd->data->error = -ETIMEDOUT; 181 + } else { 182 + if (host->cmd) 183 + host->cmd->error = -ETIMEDOUT; 184 + else 185 + host->request->cmd->error = -ETIMEDOUT; 186 + } 187 + 188 + at91_reset_host(host); 189 + mmc_request_done(host->mmc, host->request); 190 + } 191 + } 129 192 130 193 /* 131 194 * Copy from sg to a dma block - used for transfers ··· 198 135 unsigned int len, i, size; 199 136 unsigned *dmabuf = host->buffer; 200 137 201 - size = host->total_length; 138 + size = data->blksz * data->blocks; 202 139 len = data->sg_len; 140 + 141 + /* AT91SAM926[0/3] Data Write Operation and number of bytes erratum */ 142 + if (cpu_is_at91sam9260() || cpu_is_at91sam9263()) 143 + if (host->total_length == 12) 144 + memset(dmabuf, 0, 12); 203 145 204 146 /* 205 147 * Just loop through all entries. Size might not ··· 227 159 228 160 for (index = 0; index < (amount / 4); index++) 229 161 *dmabuf++ = swab32(sgbuffer[index]); 230 - } 231 - else 162 + } else { 232 163 memcpy(dmabuf, sgbuffer, amount); 164 + dmabuf += amount; 165 + } 233 166 234 167 kunmap_atomic(sgbuffer, KM_BIO_SRC_IRQ); 235 168 ··· 302 233 303 234 if (i == 0) { 304 235 at91_mci_write(host, ATMEL_PDC_RPR, sg->dma_address); 305 - at91_mci_write(host, ATMEL_PDC_RCR, sg->length / 4); 236 + at91_mci_write(host, ATMEL_PDC_RCR, (data->blksz & 0x3) ? sg->length : sg->length / 4); 306 237 } 307 238 else { 308 239 at91_mci_write(host, ATMEL_PDC_RNPR, sg->dma_address); 309 - at91_mci_write(host, ATMEL_PDC_RNCR, sg->length / 4); 240 + at91_mci_write(host, ATMEL_PDC_RNCR, (data->blksz & 0x3) ? sg->length : sg->length / 4); 310 241 } 311 242 } 312 243 ··· 346 277 347 278 dma_unmap_page(NULL, sg->dma_address, sg->length, DMA_FROM_DEVICE); 348 279 349 - data->bytes_xfered += sg->length; 350 - 351 280 if (cpu_is_at91rm9200()) { /* AT91RM9200 errata */ 352 281 unsigned int *buffer; 353 282 int index; ··· 361 294 } 362 295 363 296 flush_dcache_page(sg_page(sg)); 297 + 298 + data->bytes_xfered += sg->length; 364 299 } 365 300 366 301 /* Is there another transfer to trigger? */ ··· 403 334 at91_mci_write(host, AT91_MCI_IER, AT91_MCI_BLKE); 404 335 } else 405 336 at91_mci_write(host, AT91_MCI_IER, AT91_MCI_NOTBUSY); 406 - 407 - data->bytes_xfered = host->total_length; 408 337 } 338 + 339 + /* 340 + * Update bytes tranfered count during a write operation 341 + */ 342 + static void at91_mci_update_bytes_xfered(struct at91mci_host *host) 343 + { 344 + struct mmc_data *data; 345 + 346 + /* always deal with the effective request (and not the current cmd) */ 347 + 348 + if (host->request->cmd && host->request->cmd->error != 0) 349 + return; 350 + 351 + if (host->request->data) { 352 + data = host->request->data; 353 + if (data->flags & MMC_DATA_WRITE) { 354 + /* card is in IDLE mode now */ 355 + pr_debug("-> bytes_xfered %d, total_length = %d\n", 356 + data->bytes_xfered, host->total_length); 357 + data->bytes_xfered = data->blksz * data->blocks; 358 + } 359 + } 360 + } 361 + 409 362 410 363 /*Handle after command sent ready*/ 411 364 static int at91_mci_handle_cmdrdy(struct at91mci_host *host) ··· 441 350 } else return 1; 442 351 } else if (host->cmd->data->flags & MMC_DATA_WRITE) { 443 352 /*After sendding multi-block-write command, start DMA transfer*/ 444 - at91_mci_write(host, AT91_MCI_IER, AT91_MCI_TXBUFE); 445 - at91_mci_write(host, AT91_MCI_IER, AT91_MCI_BLKE); 353 + at91_mci_write(host, AT91_MCI_IER, AT91_MCI_TXBUFE | AT91_MCI_BLKE); 446 354 at91_mci_write(host, ATMEL_PDC_PTCR, ATMEL_PDC_TXTEN); 447 355 } 448 356 ··· 520 430 521 431 if (data) { 522 432 523 - if ( data->blksz & 0x3 ) { 524 - pr_debug("Unsupported block size\n"); 525 - cmd->error = -EINVAL; 526 - mmc_request_done(host->mmc, host->request); 527 - return; 433 + if (cpu_is_at91rm9200() || cpu_is_at91sam9261()) { 434 + if (data->blksz & 0x3) { 435 + pr_debug("Unsupported block size\n"); 436 + cmd->error = -EINVAL; 437 + mmc_request_done(host->mmc, host->request); 438 + return; 439 + } 440 + if (data->flags & MMC_DATA_STREAM) { 441 + pr_debug("Stream commands not supported\n"); 442 + cmd->error = -EINVAL; 443 + mmc_request_done(host->mmc, host->request); 444 + return; 445 + } 528 446 } 529 447 530 448 block_length = data->blksz; ··· 579 481 ier = AT91_MCI_CMDRDY; 580 482 } else { 581 483 /* zero block length and PDC mode */ 582 - mr = at91_mci_read(host, AT91_MCI_MR) & 0x7fff; 583 - at91_mci_write(host, AT91_MCI_MR, mr | (block_length << 16) | AT91_MCI_PDCMODE); 484 + mr = at91_mci_read(host, AT91_MCI_MR) & 0x5fff; 485 + mr |= (data->blksz & 0x3) ? AT91_MCI_PDCFBYTE : 0; 486 + mr |= (block_length << 16); 487 + mr |= AT91_MCI_PDCMODE; 488 + at91_mci_write(host, AT91_MCI_MR, mr); 489 + 490 + if (!(cpu_is_at91rm9200() || cpu_is_at91sam9261())) 491 + at91_mci_write(host, AT91_MCI_BLKR, 492 + AT91_MCI_BLKR_BCNT(blocks) | 493 + AT91_MCI_BLKR_BLKLEN(block_length)); 584 494 585 495 /* 586 496 * Disable the PDC controller ··· 614 508 * Handle a write 615 509 */ 616 510 host->total_length = block_length * blocks; 511 + /* 512 + * AT91SAM926[0/3] Data Write Operation and 513 + * number of bytes erratum 514 + */ 515 + if (cpu_is_at91sam9260 () || cpu_is_at91sam9263()) 516 + if (host->total_length < 12) 517 + host->total_length = 12; 617 518 host->buffer = dma_alloc_coherent(NULL, 618 519 host->total_length, 619 520 &host->physical_address, GFP_KERNEL); ··· 630 517 pr_debug("Transmitting %d bytes\n", host->total_length); 631 518 632 519 at91_mci_write(host, ATMEL_PDC_TPR, host->physical_address); 633 - at91_mci_write(host, ATMEL_PDC_TCR, host->total_length / 4); 520 + at91_mci_write(host, ATMEL_PDC_TCR, (data->blksz & 0x3) ? 521 + host->total_length : host->total_length / 4); 522 + 634 523 ier = AT91_MCI_CMDRDY; 635 524 } 636 525 } ··· 667 552 else if ((!(host->flags & FL_SENT_STOP)) && host->request->stop) { 668 553 host->flags |= FL_SENT_STOP; 669 554 at91_mci_send_command(host, host->request->stop); 670 - } 671 - else 555 + } else { 556 + del_timer(&host->timer); 557 + /* the at91rm9200 mci controller hangs after some transfers, 558 + * and the workaround is to reset it after each transfer. 559 + */ 560 + if (cpu_is_at91rm9200()) 561 + at91_reset_host(host); 672 562 mmc_request_done(host->mmc, host->request); 563 + } 673 564 } 674 565 675 566 /* 676 567 * Handle a command that has been completed 677 568 */ 678 - static void at91_mci_completed_command(struct at91mci_host *host) 569 + static void at91_mci_completed_command(struct at91mci_host *host, unsigned int status) 679 570 { 680 571 struct mmc_command *cmd = host->cmd; 681 - unsigned int status; 572 + struct mmc_data *data = cmd->data; 682 573 683 - at91_mci_write(host, AT91_MCI_IDR, 0xffffffff); 574 + at91_mci_write(host, AT91_MCI_IDR, 0xffffffff & ~(AT91_MCI_SDIOIRQA | AT91_MCI_SDIOIRQB)); 684 575 685 576 cmd->resp[0] = at91_mci_read(host, AT91_MCI_RSPR(0)); 686 577 cmd->resp[1] = at91_mci_read(host, AT91_MCI_RSPR(1)); ··· 698 577 host->buffer = NULL; 699 578 } 700 579 701 - status = at91_mci_read(host, AT91_MCI_SR); 702 - 703 - pr_debug("Status = %08X [%08X %08X %08X %08X]\n", 704 - status, cmd->resp[0], cmd->resp[1], cmd->resp[2], cmd->resp[3]); 580 + pr_debug("Status = %08X/%08x [%08X %08X %08X %08X]\n", 581 + status, at91_mci_read(host, AT91_MCI_SR), 582 + cmd->resp[0], cmd->resp[1], cmd->resp[2], cmd->resp[3]); 705 583 706 584 if (status & AT91_MCI_ERRORS) { 707 585 if ((status & AT91_MCI_RCRCE) && !(mmc_resp_type(cmd) & MMC_RSP_CRC)) { 708 586 cmd->error = 0; 709 587 } 710 588 else { 711 - if (status & (AT91_MCI_RTOE | AT91_MCI_DTOE)) 712 - cmd->error = -ETIMEDOUT; 713 - else if (status & (AT91_MCI_RCRCE | AT91_MCI_DCRCE)) 714 - cmd->error = -EILSEQ; 715 - else 716 - cmd->error = -EIO; 589 + if (status & (AT91_MCI_DTOE | AT91_MCI_DCRCE)) { 590 + if (data) { 591 + if (status & AT91_MCI_DTOE) 592 + data->error = -ETIMEDOUT; 593 + else if (status & AT91_MCI_DCRCE) 594 + data->error = -EILSEQ; 595 + } 596 + } else { 597 + if (status & AT91_MCI_RTOE) 598 + cmd->error = -ETIMEDOUT; 599 + else if (status & AT91_MCI_RCRCE) 600 + cmd->error = -EILSEQ; 601 + else 602 + cmd->error = -EIO; 603 + } 717 604 718 - pr_debug("Error detected and set to %d (cmd = %d, retries = %d)\n", 719 - cmd->error, cmd->opcode, cmd->retries); 605 + pr_debug("Error detected and set to %d/%d (cmd = %d, retries = %d)\n", 606 + cmd->error, data ? data->error : 0, 607 + cmd->opcode, cmd->retries); 720 608 } 721 609 } 722 610 else ··· 742 612 struct at91mci_host *host = mmc_priv(mmc); 743 613 host->request = mrq; 744 614 host->flags = 0; 615 + 616 + mod_timer(&host->timer, jiffies + HZ); 745 617 746 618 at91_mci_process_next(host); 747 619 } ··· 868 736 869 737 if (int_status & AT91_MCI_NOTBUSY) { 870 738 pr_debug("Card is ready\n"); 739 + at91_mci_update_bytes_xfered(host); 871 740 completed = 1; 872 741 } 873 742 ··· 877 744 878 745 if (int_status & AT91_MCI_BLKE) { 879 746 pr_debug("Block transfer has ended\n"); 880 - completed = 1; 747 + if (host->request->data && host->request->data->blocks > 1) { 748 + /* multi block write : complete multi write 749 + * command and send stop */ 750 + completed = 1; 751 + } else { 752 + at91_mci_write(host, AT91_MCI_IER, AT91_MCI_NOTBUSY); 753 + } 881 754 } 755 + 756 + if (int_status & AT91_MCI_SDIOIRQA) 757 + mmc_signal_sdio_irq(host->mmc); 758 + 759 + if (int_status & AT91_MCI_SDIOIRQB) 760 + mmc_signal_sdio_irq(host->mmc); 882 761 883 762 if (int_status & AT91_MCI_TXRDY) 884 763 pr_debug("Ready to transmit\n"); ··· 906 761 907 762 if (completed) { 908 763 pr_debug("Completed command\n"); 909 - at91_mci_write(host, AT91_MCI_IDR, 0xffffffff); 910 - at91_mci_completed_command(host); 764 + at91_mci_write(host, AT91_MCI_IDR, 0xffffffff & ~(AT91_MCI_SDIOIRQA | AT91_MCI_SDIOIRQB)); 765 + at91_mci_completed_command(host, int_status); 911 766 } else 912 - at91_mci_write(host, AT91_MCI_IDR, int_status); 767 + at91_mci_write(host, AT91_MCI_IDR, int_status & ~(AT91_MCI_SDIOIRQA | AT91_MCI_SDIOIRQB)); 913 768 914 769 return IRQ_HANDLED; 915 770 } ··· 938 793 939 794 static int at91_mci_get_ro(struct mmc_host *mmc) 940 795 { 941 - int read_only = 0; 942 796 struct at91mci_host *host = mmc_priv(mmc); 943 797 944 - if (host->board->wp_pin) { 945 - read_only = gpio_get_value(host->board->wp_pin); 946 - printk(KERN_WARNING "%s: card is %s\n", mmc_hostname(mmc), 947 - (read_only ? "read-only" : "read-write") ); 948 - } 949 - else { 950 - printk(KERN_WARNING "%s: host does not support reading read-only " 951 - "switch. Assuming write-enable.\n", mmc_hostname(mmc)); 952 - } 953 - return read_only; 798 + if (host->board->wp_pin) 799 + return !!gpio_get_value(host->board->wp_pin); 800 + /* 801 + * Board doesn't support read only detection; let the mmc core 802 + * decide what to do. 803 + */ 804 + return -ENOSYS; 805 + } 806 + 807 + static void at91_mci_enable_sdio_irq(struct mmc_host *mmc, int enable) 808 + { 809 + struct at91mci_host *host = mmc_priv(mmc); 810 + 811 + pr_debug("%s: sdio_irq %c : %s\n", mmc_hostname(host->mmc), 812 + host->board->slot_b ? 'B':'A', enable ? "enable" : "disable"); 813 + at91_mci_write(host, enable ? AT91_MCI_IER : AT91_MCI_IDR, 814 + host->board->slot_b ? AT91_MCI_SDIOIRQB : AT91_MCI_SDIOIRQA); 815 + 954 816 } 955 817 956 818 static const struct mmc_host_ops at91_mci_ops = { 957 819 .request = at91_mci_request, 958 820 .set_ios = at91_mci_set_ios, 959 821 .get_ro = at91_mci_get_ro, 822 + .enable_sdio_irq = at91_mci_enable_sdio_irq, 960 823 }; 961 824 962 825 /* ··· 995 842 mmc->f_min = 375000; 996 843 mmc->f_max = 25000000; 997 844 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; 845 + mmc->caps = MMC_CAP_SDIO_IRQ; 998 846 999 847 mmc->max_blk_size = 4095; 1000 848 mmc->max_blk_count = mmc->max_req_size; ··· 1089 935 1090 936 mmc_add_host(mmc); 1091 937 938 + setup_timer(&host->timer, at91_timeout_timer, (unsigned long)host); 939 + 1092 940 /* 1093 941 * monitor card insertion/removal if we can 1094 942 */ ··· 1151 995 } 1152 996 1153 997 at91_mci_disable(host); 998 + del_timer_sync(&host->timer); 1154 999 mmc_remove_host(mmc); 1155 1000 free_irq(host->irq, host); 1156 1001
+91
drivers/mmc/host/atmel-mci-regs.h
··· 1 + /* 2 + * Atmel MultiMedia Card Interface driver 3 + * 4 + * Copyright (C) 2004-2006 Atmel Corporation 5 + * 6 + * This program is free software; you can redistribute it and/or modify 7 + * it under the terms of the GNU General Public License version 2 as 8 + * published by the Free Software Foundation. 9 + */ 10 + #ifndef __DRIVERS_MMC_ATMEL_MCI_H__ 11 + #define __DRIVERS_MMC_ATMEL_MCI_H__ 12 + 13 + /* MCI Register Definitions */ 14 + #define MCI_CR 0x0000 /* Control */ 15 + # define MCI_CR_MCIEN ( 1 << 0) /* MCI Enable */ 16 + # define MCI_CR_MCIDIS ( 1 << 1) /* MCI Disable */ 17 + # define MCI_CR_SWRST ( 1 << 7) /* Software Reset */ 18 + #define MCI_MR 0x0004 /* Mode */ 19 + # define MCI_MR_CLKDIV(x) ((x) << 0) /* Clock Divider */ 20 + # define MCI_MR_RDPROOF ( 1 << 11) /* Read Proof */ 21 + # define MCI_MR_WRPROOF ( 1 << 12) /* Write Proof */ 22 + #define MCI_DTOR 0x0008 /* Data Timeout */ 23 + # define MCI_DTOCYC(x) ((x) << 0) /* Data Timeout Cycles */ 24 + # define MCI_DTOMUL(x) ((x) << 4) /* Data Timeout Multiplier */ 25 + #define MCI_SDCR 0x000c /* SD Card / SDIO */ 26 + # define MCI_SDCSEL_SLOT_A ( 0 << 0) /* Select SD slot A */ 27 + # define MCI_SDCSEL_SLOT_B ( 1 << 0) /* Select SD slot A */ 28 + # define MCI_SDCBUS_1BIT ( 0 << 7) /* 1-bit data bus */ 29 + # define MCI_SDCBUS_4BIT ( 1 << 7) /* 4-bit data bus */ 30 + #define MCI_ARGR 0x0010 /* Command Argument */ 31 + #define MCI_CMDR 0x0014 /* Command */ 32 + # define MCI_CMDR_CMDNB(x) ((x) << 0) /* Command Opcode */ 33 + # define MCI_CMDR_RSPTYP_NONE ( 0 << 6) /* No response */ 34 + # define MCI_CMDR_RSPTYP_48BIT ( 1 << 6) /* 48-bit response */ 35 + # define MCI_CMDR_RSPTYP_136BIT ( 2 << 6) /* 136-bit response */ 36 + # define MCI_CMDR_SPCMD_INIT ( 1 << 8) /* Initialization command */ 37 + # define MCI_CMDR_SPCMD_SYNC ( 2 << 8) /* Synchronized command */ 38 + # define MCI_CMDR_SPCMD_INT ( 4 << 8) /* Interrupt command */ 39 + # define MCI_CMDR_SPCMD_INTRESP ( 5 << 8) /* Interrupt response */ 40 + # define MCI_CMDR_OPDCMD ( 1 << 11) /* Open Drain */ 41 + # define MCI_CMDR_MAXLAT_5CYC ( 0 << 12) /* Max latency 5 cycles */ 42 + # define MCI_CMDR_MAXLAT_64CYC ( 1 << 12) /* Max latency 64 cycles */ 43 + # define MCI_CMDR_START_XFER ( 1 << 16) /* Start data transfer */ 44 + # define MCI_CMDR_STOP_XFER ( 2 << 16) /* Stop data transfer */ 45 + # define MCI_CMDR_TRDIR_WRITE ( 0 << 18) /* Write data */ 46 + # define MCI_CMDR_TRDIR_READ ( 1 << 18) /* Read data */ 47 + # define MCI_CMDR_BLOCK ( 0 << 19) /* Single-block transfer */ 48 + # define MCI_CMDR_MULTI_BLOCK ( 1 << 19) /* Multi-block transfer */ 49 + # define MCI_CMDR_STREAM ( 2 << 19) /* MMC Stream transfer */ 50 + # define MCI_CMDR_SDIO_BYTE ( 4 << 19) /* SDIO Byte transfer */ 51 + # define MCI_CMDR_SDIO_BLOCK ( 5 << 19) /* SDIO Block transfer */ 52 + # define MCI_CMDR_SDIO_SUSPEND ( 1 << 24) /* SDIO Suspend Command */ 53 + # define MCI_CMDR_SDIO_RESUME ( 2 << 24) /* SDIO Resume Command */ 54 + #define MCI_BLKR 0x0018 /* Block */ 55 + # define MCI_BCNT(x) ((x) << 0) /* Data Block Count */ 56 + # define MCI_BLKLEN(x) ((x) << 16) /* Data Block Length */ 57 + #define MCI_RSPR 0x0020 /* Response 0 */ 58 + #define MCI_RSPR1 0x0024 /* Response 1 */ 59 + #define MCI_RSPR2 0x0028 /* Response 2 */ 60 + #define MCI_RSPR3 0x002c /* Response 3 */ 61 + #define MCI_RDR 0x0030 /* Receive Data */ 62 + #define MCI_TDR 0x0034 /* Transmit Data */ 63 + #define MCI_SR 0x0040 /* Status */ 64 + #define MCI_IER 0x0044 /* Interrupt Enable */ 65 + #define MCI_IDR 0x0048 /* Interrupt Disable */ 66 + #define MCI_IMR 0x004c /* Interrupt Mask */ 67 + # define MCI_CMDRDY ( 1 << 0) /* Command Ready */ 68 + # define MCI_RXRDY ( 1 << 1) /* Receiver Ready */ 69 + # define MCI_TXRDY ( 1 << 2) /* Transmitter Ready */ 70 + # define MCI_BLKE ( 1 << 3) /* Data Block Ended */ 71 + # define MCI_DTIP ( 1 << 4) /* Data Transfer In Progress */ 72 + # define MCI_NOTBUSY ( 1 << 5) /* Data Not Busy */ 73 + # define MCI_SDIOIRQA ( 1 << 8) /* SDIO IRQ in slot A */ 74 + # define MCI_SDIOIRQB ( 1 << 9) /* SDIO IRQ in slot B */ 75 + # define MCI_RINDE ( 1 << 16) /* Response Index Error */ 76 + # define MCI_RDIRE ( 1 << 17) /* Response Direction Error */ 77 + # define MCI_RCRCE ( 1 << 18) /* Response CRC Error */ 78 + # define MCI_RENDE ( 1 << 19) /* Response End Bit Error */ 79 + # define MCI_RTOE ( 1 << 20) /* Response Time-Out Error */ 80 + # define MCI_DCRCE ( 1 << 21) /* Data CRC Error */ 81 + # define MCI_DTOE ( 1 << 22) /* Data Time-Out Error */ 82 + # define MCI_OVRE ( 1 << 30) /* RX Overrun Error */ 83 + # define MCI_UNRE ( 1 << 31) /* TX Underrun Error */ 84 + 85 + /* Register access macros */ 86 + #define mci_readl(port,reg) \ 87 + __raw_readl((port)->regs + MCI_##reg) 88 + #define mci_writel(port,reg,value) \ 89 + __raw_writel((value), (port)->regs + MCI_##reg) 90 + 91 + #endif /* __DRIVERS_MMC_ATMEL_MCI_H__ */
+981
drivers/mmc/host/atmel-mci.c
··· 1 + /* 2 + * Atmel MultiMedia Card Interface driver 3 + * 4 + * Copyright (C) 2004-2008 Atmel Corporation 5 + * 6 + * This program is free software; you can redistribute it and/or modify 7 + * it under the terms of the GNU General Public License version 2 as 8 + * published by the Free Software Foundation. 9 + */ 10 + #include <linux/blkdev.h> 11 + #include <linux/clk.h> 12 + #include <linux/device.h> 13 + #include <linux/init.h> 14 + #include <linux/interrupt.h> 15 + #include <linux/ioport.h> 16 + #include <linux/module.h> 17 + #include <linux/platform_device.h> 18 + #include <linux/scatterlist.h> 19 + 20 + #include <linux/mmc/host.h> 21 + 22 + #include <asm/atmel-mci.h> 23 + #include <asm/io.h> 24 + #include <asm/unaligned.h> 25 + 26 + #include <asm/arch/board.h> 27 + #include <asm/arch/gpio.h> 28 + 29 + #include "atmel-mci-regs.h" 30 + 31 + #define ATMCI_DATA_ERROR_FLAGS (MCI_DCRCE | MCI_DTOE | MCI_OVRE | MCI_UNRE) 32 + 33 + enum { 34 + EVENT_CMD_COMPLETE = 0, 35 + EVENT_DATA_ERROR, 36 + EVENT_DATA_COMPLETE, 37 + EVENT_STOP_SENT, 38 + EVENT_STOP_COMPLETE, 39 + EVENT_XFER_COMPLETE, 40 + }; 41 + 42 + struct atmel_mci { 43 + struct mmc_host *mmc; 44 + void __iomem *regs; 45 + 46 + struct scatterlist *sg; 47 + unsigned int pio_offset; 48 + 49 + struct mmc_request *mrq; 50 + struct mmc_command *cmd; 51 + struct mmc_data *data; 52 + 53 + u32 cmd_status; 54 + u32 data_status; 55 + u32 stop_status; 56 + u32 stop_cmdr; 57 + 58 + u32 mode_reg; 59 + u32 sdc_reg; 60 + 61 + struct tasklet_struct tasklet; 62 + unsigned long pending_events; 63 + unsigned long completed_events; 64 + 65 + int present; 66 + int detect_pin; 67 + int wp_pin; 68 + 69 + /* For detect pin debouncing */ 70 + struct timer_list detect_timer; 71 + 72 + unsigned long bus_hz; 73 + unsigned long mapbase; 74 + struct clk *mck; 75 + struct platform_device *pdev; 76 + }; 77 + 78 + #define atmci_is_completed(host, event) \ 79 + test_bit(event, &host->completed_events) 80 + #define atmci_test_and_clear_pending(host, event) \ 81 + test_and_clear_bit(event, &host->pending_events) 82 + #define atmci_test_and_set_completed(host, event) \ 83 + test_and_set_bit(event, &host->completed_events) 84 + #define atmci_set_completed(host, event) \ 85 + set_bit(event, &host->completed_events) 86 + #define atmci_set_pending(host, event) \ 87 + set_bit(event, &host->pending_events) 88 + #define atmci_clear_pending(host, event) \ 89 + clear_bit(event, &host->pending_events) 90 + 91 + 92 + static void atmci_enable(struct atmel_mci *host) 93 + { 94 + clk_enable(host->mck); 95 + mci_writel(host, CR, MCI_CR_MCIEN); 96 + mci_writel(host, MR, host->mode_reg); 97 + mci_writel(host, SDCR, host->sdc_reg); 98 + } 99 + 100 + static void atmci_disable(struct atmel_mci *host) 101 + { 102 + mci_writel(host, CR, MCI_CR_SWRST); 103 + 104 + /* Stall until write is complete, then disable the bus clock */ 105 + mci_readl(host, SR); 106 + clk_disable(host->mck); 107 + } 108 + 109 + static inline unsigned int ns_to_clocks(struct atmel_mci *host, 110 + unsigned int ns) 111 + { 112 + return (ns * (host->bus_hz / 1000000) + 999) / 1000; 113 + } 114 + 115 + static void atmci_set_timeout(struct atmel_mci *host, 116 + struct mmc_data *data) 117 + { 118 + static unsigned dtomul_to_shift[] = { 119 + 0, 4, 7, 8, 10, 12, 16, 20 120 + }; 121 + unsigned timeout; 122 + unsigned dtocyc; 123 + unsigned dtomul; 124 + 125 + timeout = ns_to_clocks(host, data->timeout_ns) + data->timeout_clks; 126 + 127 + for (dtomul = 0; dtomul < 8; dtomul++) { 128 + unsigned shift = dtomul_to_shift[dtomul]; 129 + dtocyc = (timeout + (1 << shift) - 1) >> shift; 130 + if (dtocyc < 15) 131 + break; 132 + } 133 + 134 + if (dtomul >= 8) { 135 + dtomul = 7; 136 + dtocyc = 15; 137 + } 138 + 139 + dev_vdbg(&host->mmc->class_dev, "setting timeout to %u cycles\n", 140 + dtocyc << dtomul_to_shift[dtomul]); 141 + mci_writel(host, DTOR, (MCI_DTOMUL(dtomul) | MCI_DTOCYC(dtocyc))); 142 + } 143 + 144 + /* 145 + * Return mask with command flags to be enabled for this command. 146 + */ 147 + static u32 atmci_prepare_command(struct mmc_host *mmc, 148 + struct mmc_command *cmd) 149 + { 150 + struct mmc_data *data; 151 + u32 cmdr; 152 + 153 + cmd->error = -EINPROGRESS; 154 + 155 + cmdr = MCI_CMDR_CMDNB(cmd->opcode); 156 + 157 + if (cmd->flags & MMC_RSP_PRESENT) { 158 + if (cmd->flags & MMC_RSP_136) 159 + cmdr |= MCI_CMDR_RSPTYP_136BIT; 160 + else 161 + cmdr |= MCI_CMDR_RSPTYP_48BIT; 162 + } 163 + 164 + /* 165 + * This should really be MAXLAT_5 for CMD2 and ACMD41, but 166 + * it's too difficult to determine whether this is an ACMD or 167 + * not. Better make it 64. 168 + */ 169 + cmdr |= MCI_CMDR_MAXLAT_64CYC; 170 + 171 + if (mmc->ios.bus_mode == MMC_BUSMODE_OPENDRAIN) 172 + cmdr |= MCI_CMDR_OPDCMD; 173 + 174 + data = cmd->data; 175 + if (data) { 176 + cmdr |= MCI_CMDR_START_XFER; 177 + if (data->flags & MMC_DATA_STREAM) 178 + cmdr |= MCI_CMDR_STREAM; 179 + else if (data->blocks > 1) 180 + cmdr |= MCI_CMDR_MULTI_BLOCK; 181 + else 182 + cmdr |= MCI_CMDR_BLOCK; 183 + 184 + if (data->flags & MMC_DATA_READ) 185 + cmdr |= MCI_CMDR_TRDIR_READ; 186 + } 187 + 188 + return cmdr; 189 + } 190 + 191 + static void atmci_start_command(struct atmel_mci *host, 192 + struct mmc_command *cmd, 193 + u32 cmd_flags) 194 + { 195 + /* Must read host->cmd after testing event flags */ 196 + smp_rmb(); 197 + WARN_ON(host->cmd); 198 + host->cmd = cmd; 199 + 200 + dev_vdbg(&host->mmc->class_dev, 201 + "start command: ARGR=0x%08x CMDR=0x%08x\n", 202 + cmd->arg, cmd_flags); 203 + 204 + mci_writel(host, ARGR, cmd->arg); 205 + mci_writel(host, CMDR, cmd_flags); 206 + } 207 + 208 + static void send_stop_cmd(struct mmc_host *mmc, struct mmc_data *data) 209 + { 210 + struct atmel_mci *host = mmc_priv(mmc); 211 + 212 + atmci_start_command(host, data->stop, host->stop_cmdr); 213 + mci_writel(host, IER, MCI_CMDRDY); 214 + } 215 + 216 + static void atmci_request_end(struct mmc_host *mmc, struct mmc_request *mrq) 217 + { 218 + struct atmel_mci *host = mmc_priv(mmc); 219 + 220 + WARN_ON(host->cmd || host->data); 221 + host->mrq = NULL; 222 + 223 + atmci_disable(host); 224 + 225 + mmc_request_done(mmc, mrq); 226 + } 227 + 228 + /* 229 + * Returns a mask of interrupt flags to be enabled after the whole 230 + * request has been prepared. 231 + */ 232 + static u32 atmci_submit_data(struct mmc_host *mmc, struct mmc_data *data) 233 + { 234 + struct atmel_mci *host = mmc_priv(mmc); 235 + u32 iflags; 236 + 237 + data->error = -EINPROGRESS; 238 + 239 + WARN_ON(host->data); 240 + host->sg = NULL; 241 + host->data = data; 242 + 243 + mci_writel(host, BLKR, MCI_BCNT(data->blocks) 244 + | MCI_BLKLEN(data->blksz)); 245 + dev_vdbg(&mmc->class_dev, "BLKR=0x%08x\n", 246 + MCI_BCNT(data->blocks) | MCI_BLKLEN(data->blksz)); 247 + 248 + iflags = ATMCI_DATA_ERROR_FLAGS; 249 + host->sg = data->sg; 250 + host->pio_offset = 0; 251 + if (data->flags & MMC_DATA_READ) 252 + iflags |= MCI_RXRDY; 253 + else 254 + iflags |= MCI_TXRDY; 255 + 256 + return iflags; 257 + } 258 + 259 + static void atmci_request(struct mmc_host *mmc, struct mmc_request *mrq) 260 + { 261 + struct atmel_mci *host = mmc_priv(mmc); 262 + struct mmc_data *data; 263 + struct mmc_command *cmd; 264 + u32 iflags; 265 + u32 cmdflags = 0; 266 + 267 + iflags = mci_readl(host, IMR); 268 + if (iflags) 269 + dev_warn(&mmc->class_dev, "WARNING: IMR=0x%08x\n", 270 + mci_readl(host, IMR)); 271 + 272 + WARN_ON(host->mrq != NULL); 273 + 274 + /* 275 + * We may "know" the card is gone even though there's still an 276 + * electrical connection. If so, we really need to communicate 277 + * this to the MMC core since there won't be any more 278 + * interrupts as the card is completely removed. Otherwise, 279 + * the MMC core might believe the card is still there even 280 + * though the card was just removed very slowly. 281 + */ 282 + if (!host->present) { 283 + mrq->cmd->error = -ENOMEDIUM; 284 + mmc_request_done(mmc, mrq); 285 + return; 286 + } 287 + 288 + host->mrq = mrq; 289 + host->pending_events = 0; 290 + host->completed_events = 0; 291 + 292 + atmci_enable(host); 293 + 294 + /* We don't support multiple blocks of weird lengths. */ 295 + data = mrq->data; 296 + if (data) { 297 + if (data->blocks > 1 && data->blksz & 3) 298 + goto fail; 299 + atmci_set_timeout(host, data); 300 + } 301 + 302 + iflags = MCI_CMDRDY; 303 + cmd = mrq->cmd; 304 + cmdflags = atmci_prepare_command(mmc, cmd); 305 + atmci_start_command(host, cmd, cmdflags); 306 + 307 + if (data) 308 + iflags |= atmci_submit_data(mmc, data); 309 + 310 + if (mrq->stop) { 311 + host->stop_cmdr = atmci_prepare_command(mmc, mrq->stop); 312 + host->stop_cmdr |= MCI_CMDR_STOP_XFER; 313 + if (!(data->flags & MMC_DATA_WRITE)) 314 + host->stop_cmdr |= MCI_CMDR_TRDIR_READ; 315 + if (data->flags & MMC_DATA_STREAM) 316 + host->stop_cmdr |= MCI_CMDR_STREAM; 317 + else 318 + host->stop_cmdr |= MCI_CMDR_MULTI_BLOCK; 319 + } 320 + 321 + /* 322 + * We could have enabled interrupts earlier, but I suspect 323 + * that would open up a nice can of interesting race 324 + * conditions (e.g. command and data complete, but stop not 325 + * prepared yet.) 326 + */ 327 + mci_writel(host, IER, iflags); 328 + 329 + return; 330 + 331 + fail: 332 + atmci_disable(host); 333 + host->mrq = NULL; 334 + mrq->cmd->error = -EINVAL; 335 + mmc_request_done(mmc, mrq); 336 + } 337 + 338 + static void atmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) 339 + { 340 + struct atmel_mci *host = mmc_priv(mmc); 341 + 342 + if (ios->clock) { 343 + u32 clkdiv; 344 + 345 + /* Set clock rate */ 346 + clkdiv = DIV_ROUND_UP(host->bus_hz, 2 * ios->clock) - 1; 347 + if (clkdiv > 255) { 348 + dev_warn(&mmc->class_dev, 349 + "clock %u too slow; using %lu\n", 350 + ios->clock, host->bus_hz / (2 * 256)); 351 + clkdiv = 255; 352 + } 353 + 354 + host->mode_reg = MCI_MR_CLKDIV(clkdiv) | MCI_MR_WRPROOF 355 + | MCI_MR_RDPROOF; 356 + } 357 + 358 + switch (ios->bus_width) { 359 + case MMC_BUS_WIDTH_1: 360 + host->sdc_reg = 0; 361 + break; 362 + case MMC_BUS_WIDTH_4: 363 + host->sdc_reg = MCI_SDCBUS_4BIT; 364 + break; 365 + } 366 + 367 + switch (ios->power_mode) { 368 + case MMC_POWER_ON: 369 + /* Send init sequence (74 clock cycles) */ 370 + atmci_enable(host); 371 + mci_writel(host, CMDR, MCI_CMDR_SPCMD_INIT); 372 + while (!(mci_readl(host, SR) & MCI_CMDRDY)) 373 + cpu_relax(); 374 + atmci_disable(host); 375 + break; 376 + default: 377 + /* 378 + * TODO: None of the currently available AVR32-based 379 + * boards allow MMC power to be turned off. Implement 380 + * power control when this can be tested properly. 381 + */ 382 + break; 383 + } 384 + } 385 + 386 + static int atmci_get_ro(struct mmc_host *mmc) 387 + { 388 + int read_only = 0; 389 + struct atmel_mci *host = mmc_priv(mmc); 390 + 391 + if (host->wp_pin >= 0) { 392 + read_only = gpio_get_value(host->wp_pin); 393 + dev_dbg(&mmc->class_dev, "card is %s\n", 394 + read_only ? "read-only" : "read-write"); 395 + } else { 396 + dev_dbg(&mmc->class_dev, 397 + "no pin for checking read-only switch." 398 + " Assuming write-enable.\n"); 399 + } 400 + 401 + return read_only; 402 + } 403 + 404 + static struct mmc_host_ops atmci_ops = { 405 + .request = atmci_request, 406 + .set_ios = atmci_set_ios, 407 + .get_ro = atmci_get_ro, 408 + }; 409 + 410 + static void atmci_command_complete(struct atmel_mci *host, 411 + struct mmc_command *cmd, u32 status) 412 + { 413 + /* Read the response from the card (up to 16 bytes) */ 414 + cmd->resp[0] = mci_readl(host, RSPR); 415 + cmd->resp[1] = mci_readl(host, RSPR); 416 + cmd->resp[2] = mci_readl(host, RSPR); 417 + cmd->resp[3] = mci_readl(host, RSPR); 418 + 419 + if (status & MCI_RTOE) 420 + cmd->error = -ETIMEDOUT; 421 + else if ((cmd->flags & MMC_RSP_CRC) && (status & MCI_RCRCE)) 422 + cmd->error = -EILSEQ; 423 + else if (status & (MCI_RINDE | MCI_RDIRE | MCI_RENDE)) 424 + cmd->error = -EIO; 425 + else 426 + cmd->error = 0; 427 + 428 + if (cmd->error) { 429 + dev_dbg(&host->mmc->class_dev, 430 + "command error: status=0x%08x\n", status); 431 + 432 + if (cmd->data) { 433 + host->data = NULL; 434 + mci_writel(host, IDR, MCI_NOTBUSY 435 + | MCI_TXRDY | MCI_RXRDY 436 + | ATMCI_DATA_ERROR_FLAGS); 437 + } 438 + } 439 + } 440 + 441 + static void atmci_detect_change(unsigned long data) 442 + { 443 + struct atmel_mci *host = (struct atmel_mci *)data; 444 + struct mmc_request *mrq = host->mrq; 445 + int present; 446 + 447 + /* 448 + * atmci_remove() sets detect_pin to -1 before freeing the 449 + * interrupt. We must not re-enable the interrupt if it has 450 + * been freed. 451 + */ 452 + smp_rmb(); 453 + if (host->detect_pin < 0) 454 + return; 455 + 456 + enable_irq(gpio_to_irq(host->detect_pin)); 457 + present = !gpio_get_value(host->detect_pin); 458 + 459 + dev_vdbg(&host->pdev->dev, "detect change: %d (was %d)\n", 460 + present, host->present); 461 + 462 + if (present != host->present) { 463 + dev_dbg(&host->mmc->class_dev, "card %s\n", 464 + present ? "inserted" : "removed"); 465 + host->present = present; 466 + 467 + /* Reset controller if card is gone */ 468 + if (!present) { 469 + mci_writel(host, CR, MCI_CR_SWRST); 470 + mci_writel(host, IDR, ~0UL); 471 + mci_writel(host, CR, MCI_CR_MCIEN); 472 + } 473 + 474 + /* Clean up queue if present */ 475 + if (mrq) { 476 + /* 477 + * Reset controller to terminate any ongoing 478 + * commands or data transfers. 479 + */ 480 + mci_writel(host, CR, MCI_CR_SWRST); 481 + 482 + if (!atmci_is_completed(host, EVENT_CMD_COMPLETE)) 483 + mrq->cmd->error = -ENOMEDIUM; 484 + 485 + if (mrq->data && !atmci_is_completed(host, 486 + EVENT_DATA_COMPLETE)) { 487 + host->data = NULL; 488 + mrq->data->error = -ENOMEDIUM; 489 + } 490 + if (mrq->stop && !atmci_is_completed(host, 491 + EVENT_STOP_COMPLETE)) 492 + mrq->stop->error = -ENOMEDIUM; 493 + 494 + host->cmd = NULL; 495 + atmci_request_end(host->mmc, mrq); 496 + } 497 + 498 + mmc_detect_change(host->mmc, 0); 499 + } 500 + } 501 + 502 + static void atmci_tasklet_func(unsigned long priv) 503 + { 504 + struct mmc_host *mmc = (struct mmc_host *)priv; 505 + struct atmel_mci *host = mmc_priv(mmc); 506 + struct mmc_request *mrq = host->mrq; 507 + struct mmc_data *data = host->data; 508 + 509 + dev_vdbg(&mmc->class_dev, 510 + "tasklet: pending/completed/mask %lx/%lx/%x\n", 511 + host->pending_events, host->completed_events, 512 + mci_readl(host, IMR)); 513 + 514 + if (atmci_test_and_clear_pending(host, EVENT_CMD_COMPLETE)) { 515 + /* 516 + * host->cmd must be set to NULL before the interrupt 517 + * handler sees EVENT_CMD_COMPLETE 518 + */ 519 + host->cmd = NULL; 520 + smp_wmb(); 521 + atmci_set_completed(host, EVENT_CMD_COMPLETE); 522 + atmci_command_complete(host, mrq->cmd, host->cmd_status); 523 + 524 + if (!mrq->cmd->error && mrq->stop 525 + && atmci_is_completed(host, EVENT_XFER_COMPLETE) 526 + && !atmci_test_and_set_completed(host, 527 + EVENT_STOP_SENT)) 528 + send_stop_cmd(host->mmc, mrq->data); 529 + } 530 + if (atmci_test_and_clear_pending(host, EVENT_STOP_COMPLETE)) { 531 + /* 532 + * host->cmd must be set to NULL before the interrupt 533 + * handler sees EVENT_STOP_COMPLETE 534 + */ 535 + host->cmd = NULL; 536 + smp_wmb(); 537 + atmci_set_completed(host, EVENT_STOP_COMPLETE); 538 + atmci_command_complete(host, mrq->stop, host->stop_status); 539 + } 540 + if (atmci_test_and_clear_pending(host, EVENT_DATA_ERROR)) { 541 + u32 status = host->data_status; 542 + 543 + dev_vdbg(&mmc->class_dev, "data error: status=%08x\n", status); 544 + 545 + atmci_set_completed(host, EVENT_DATA_ERROR); 546 + atmci_set_completed(host, EVENT_DATA_COMPLETE); 547 + 548 + if (status & MCI_DTOE) { 549 + dev_dbg(&mmc->class_dev, 550 + "data timeout error\n"); 551 + data->error = -ETIMEDOUT; 552 + } else if (status & MCI_DCRCE) { 553 + dev_dbg(&mmc->class_dev, "data CRC error\n"); 554 + data->error = -EILSEQ; 555 + } else { 556 + dev_dbg(&mmc->class_dev, 557 + "data FIFO error (status=%08x)\n", 558 + status); 559 + data->error = -EIO; 560 + } 561 + 562 + if (host->present && data->stop 563 + && atmci_is_completed(host, EVENT_CMD_COMPLETE) 564 + && !atmci_test_and_set_completed( 565 + host, EVENT_STOP_SENT)) 566 + send_stop_cmd(host->mmc, data); 567 + 568 + host->data = NULL; 569 + } 570 + if (atmci_test_and_clear_pending(host, EVENT_DATA_COMPLETE)) { 571 + atmci_set_completed(host, EVENT_DATA_COMPLETE); 572 + 573 + if (!atmci_is_completed(host, EVENT_DATA_ERROR)) { 574 + data->bytes_xfered = data->blocks * data->blksz; 575 + data->error = 0; 576 + } 577 + 578 + host->data = NULL; 579 + } 580 + 581 + if (host->mrq && !host->cmd && !host->data) 582 + atmci_request_end(mmc, host->mrq); 583 + } 584 + 585 + static void atmci_read_data_pio(struct atmel_mci *host) 586 + { 587 + struct scatterlist *sg = host->sg; 588 + void *buf = sg_virt(sg); 589 + unsigned int offset = host->pio_offset; 590 + struct mmc_data *data = host->data; 591 + u32 value; 592 + u32 status; 593 + unsigned int nbytes = 0; 594 + 595 + do { 596 + value = mci_readl(host, RDR); 597 + if (likely(offset + 4 <= sg->length)) { 598 + put_unaligned(value, (u32 *)(buf + offset)); 599 + 600 + offset += 4; 601 + nbytes += 4; 602 + 603 + if (offset == sg->length) { 604 + host->sg = sg = sg_next(sg); 605 + if (!sg) 606 + goto done; 607 + 608 + offset = 0; 609 + buf = sg_virt(sg); 610 + } 611 + } else { 612 + unsigned int remaining = sg->length - offset; 613 + memcpy(buf + offset, &value, remaining); 614 + nbytes += remaining; 615 + 616 + flush_dcache_page(sg_page(sg)); 617 + host->sg = sg = sg_next(sg); 618 + if (!sg) 619 + goto done; 620 + 621 + offset = 4 - remaining; 622 + buf = sg_virt(sg); 623 + memcpy(buf, (u8 *)&value + remaining, offset); 624 + nbytes += offset; 625 + } 626 + 627 + status = mci_readl(host, SR); 628 + if (status & ATMCI_DATA_ERROR_FLAGS) { 629 + mci_writel(host, IDR, (MCI_NOTBUSY | MCI_RXRDY 630 + | ATMCI_DATA_ERROR_FLAGS)); 631 + host->data_status = status; 632 + atmci_set_pending(host, EVENT_DATA_ERROR); 633 + tasklet_schedule(&host->tasklet); 634 + break; 635 + } 636 + } while (status & MCI_RXRDY); 637 + 638 + host->pio_offset = offset; 639 + data->bytes_xfered += nbytes; 640 + 641 + return; 642 + 643 + done: 644 + mci_writel(host, IDR, MCI_RXRDY); 645 + mci_writel(host, IER, MCI_NOTBUSY); 646 + data->bytes_xfered += nbytes; 647 + atmci_set_completed(host, EVENT_XFER_COMPLETE); 648 + if (data->stop && atmci_is_completed(host, EVENT_CMD_COMPLETE) 649 + && !atmci_test_and_set_completed(host, EVENT_STOP_SENT)) 650 + send_stop_cmd(host->mmc, data); 651 + } 652 + 653 + static void atmci_write_data_pio(struct atmel_mci *host) 654 + { 655 + struct scatterlist *sg = host->sg; 656 + void *buf = sg_virt(sg); 657 + unsigned int offset = host->pio_offset; 658 + struct mmc_data *data = host->data; 659 + u32 value; 660 + u32 status; 661 + unsigned int nbytes = 0; 662 + 663 + do { 664 + if (likely(offset + 4 <= sg->length)) { 665 + value = get_unaligned((u32 *)(buf + offset)); 666 + mci_writel(host, TDR, value); 667 + 668 + offset += 4; 669 + nbytes += 4; 670 + if (offset == sg->length) { 671 + host->sg = sg = sg_next(sg); 672 + if (!sg) 673 + goto done; 674 + 675 + offset = 0; 676 + buf = sg_virt(sg); 677 + } 678 + } else { 679 + unsigned int remaining = sg->length - offset; 680 + 681 + value = 0; 682 + memcpy(&value, buf + offset, remaining); 683 + nbytes += remaining; 684 + 685 + host->sg = sg = sg_next(sg); 686 + if (!sg) { 687 + mci_writel(host, TDR, value); 688 + goto done; 689 + } 690 + 691 + offset = 4 - remaining; 692 + buf = sg_virt(sg); 693 + memcpy((u8 *)&value + remaining, buf, offset); 694 + mci_writel(host, TDR, value); 695 + nbytes += offset; 696 + } 697 + 698 + status = mci_readl(host, SR); 699 + if (status & ATMCI_DATA_ERROR_FLAGS) { 700 + mci_writel(host, IDR, (MCI_NOTBUSY | MCI_TXRDY 701 + | ATMCI_DATA_ERROR_FLAGS)); 702 + host->data_status = status; 703 + atmci_set_pending(host, EVENT_DATA_ERROR); 704 + tasklet_schedule(&host->tasklet); 705 + break; 706 + } 707 + } while (status & MCI_TXRDY); 708 + 709 + host->pio_offset = offset; 710 + data->bytes_xfered += nbytes; 711 + 712 + return; 713 + 714 + done: 715 + mci_writel(host, IDR, MCI_TXRDY); 716 + mci_writel(host, IER, MCI_NOTBUSY); 717 + data->bytes_xfered += nbytes; 718 + atmci_set_completed(host, EVENT_XFER_COMPLETE); 719 + if (data->stop && atmci_is_completed(host, EVENT_CMD_COMPLETE) 720 + && !atmci_test_and_set_completed(host, EVENT_STOP_SENT)) 721 + send_stop_cmd(host->mmc, data); 722 + } 723 + 724 + static void atmci_cmd_interrupt(struct mmc_host *mmc, u32 status) 725 + { 726 + struct atmel_mci *host = mmc_priv(mmc); 727 + 728 + mci_writel(host, IDR, MCI_CMDRDY); 729 + 730 + if (atmci_is_completed(host, EVENT_STOP_SENT)) { 731 + host->stop_status = status; 732 + atmci_set_pending(host, EVENT_STOP_COMPLETE); 733 + } else { 734 + host->cmd_status = status; 735 + atmci_set_pending(host, EVENT_CMD_COMPLETE); 736 + } 737 + 738 + tasklet_schedule(&host->tasklet); 739 + } 740 + 741 + static irqreturn_t atmci_interrupt(int irq, void *dev_id) 742 + { 743 + struct mmc_host *mmc = dev_id; 744 + struct atmel_mci *host = mmc_priv(mmc); 745 + u32 status, mask, pending; 746 + unsigned int pass_count = 0; 747 + 748 + spin_lock(&mmc->lock); 749 + 750 + do { 751 + status = mci_readl(host, SR); 752 + mask = mci_readl(host, IMR); 753 + pending = status & mask; 754 + if (!pending) 755 + break; 756 + 757 + if (pending & ATMCI_DATA_ERROR_FLAGS) { 758 + mci_writel(host, IDR, ATMCI_DATA_ERROR_FLAGS 759 + | MCI_RXRDY | MCI_TXRDY); 760 + pending &= mci_readl(host, IMR); 761 + host->data_status = status; 762 + atmci_set_pending(host, EVENT_DATA_ERROR); 763 + tasklet_schedule(&host->tasklet); 764 + } 765 + if (pending & MCI_NOTBUSY) { 766 + mci_writel(host, IDR, (MCI_NOTBUSY 767 + | ATMCI_DATA_ERROR_FLAGS)); 768 + atmci_set_pending(host, EVENT_DATA_COMPLETE); 769 + tasklet_schedule(&host->tasklet); 770 + } 771 + if (pending & MCI_RXRDY) 772 + atmci_read_data_pio(host); 773 + if (pending & MCI_TXRDY) 774 + atmci_write_data_pio(host); 775 + 776 + if (pending & MCI_CMDRDY) 777 + atmci_cmd_interrupt(mmc, status); 778 + } while (pass_count++ < 5); 779 + 780 + spin_unlock(&mmc->lock); 781 + 782 + return pass_count ? IRQ_HANDLED : IRQ_NONE; 783 + } 784 + 785 + static irqreturn_t atmci_detect_interrupt(int irq, void *dev_id) 786 + { 787 + struct mmc_host *mmc = dev_id; 788 + struct atmel_mci *host = mmc_priv(mmc); 789 + 790 + /* 791 + * Disable interrupts until the pin has stabilized and check 792 + * the state then. Use mod_timer() since we may be in the 793 + * middle of the timer routine when this interrupt triggers. 794 + */ 795 + disable_irq_nosync(irq); 796 + mod_timer(&host->detect_timer, jiffies + msecs_to_jiffies(20)); 797 + 798 + return IRQ_HANDLED; 799 + } 800 + 801 + static int __init atmci_probe(struct platform_device *pdev) 802 + { 803 + struct mci_platform_data *pdata; 804 + struct atmel_mci *host; 805 + struct mmc_host *mmc; 806 + struct resource *regs; 807 + int irq; 808 + int ret; 809 + 810 + regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); 811 + if (!regs) 812 + return -ENXIO; 813 + pdata = pdev->dev.platform_data; 814 + if (!pdata) 815 + return -ENXIO; 816 + irq = platform_get_irq(pdev, 0); 817 + if (irq < 0) 818 + return irq; 819 + 820 + mmc = mmc_alloc_host(sizeof(struct atmel_mci), &pdev->dev); 821 + if (!mmc) 822 + return -ENOMEM; 823 + 824 + host = mmc_priv(mmc); 825 + host->pdev = pdev; 826 + host->mmc = mmc; 827 + host->detect_pin = pdata->detect_pin; 828 + host->wp_pin = pdata->wp_pin; 829 + 830 + host->mck = clk_get(&pdev->dev, "mci_clk"); 831 + if (IS_ERR(host->mck)) { 832 + ret = PTR_ERR(host->mck); 833 + goto err_clk_get; 834 + } 835 + 836 + ret = -ENOMEM; 837 + host->regs = ioremap(regs->start, regs->end - regs->start + 1); 838 + if (!host->regs) 839 + goto err_ioremap; 840 + 841 + clk_enable(host->mck); 842 + mci_writel(host, CR, MCI_CR_SWRST); 843 + host->bus_hz = clk_get_rate(host->mck); 844 + clk_disable(host->mck); 845 + 846 + host->mapbase = regs->start; 847 + 848 + mmc->ops = &atmci_ops; 849 + mmc->f_min = (host->bus_hz + 511) / 512; 850 + mmc->f_max = host->bus_hz / 2; 851 + mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; 852 + mmc->caps |= MMC_CAP_4_BIT_DATA; 853 + 854 + mmc->max_hw_segs = 64; 855 + mmc->max_phys_segs = 64; 856 + mmc->max_req_size = 32768 * 512; 857 + mmc->max_blk_size = 32768; 858 + mmc->max_blk_count = 512; 859 + 860 + tasklet_init(&host->tasklet, atmci_tasklet_func, (unsigned long)mmc); 861 + 862 + ret = request_irq(irq, atmci_interrupt, 0, pdev->dev.bus_id, mmc); 863 + if (ret) 864 + goto err_request_irq; 865 + 866 + /* Assume card is present if we don't have a detect pin */ 867 + host->present = 1; 868 + if (host->detect_pin >= 0) { 869 + if (gpio_request(host->detect_pin, "mmc_detect")) { 870 + dev_dbg(&mmc->class_dev, "no detect pin available\n"); 871 + host->detect_pin = -1; 872 + } else { 873 + host->present = !gpio_get_value(host->detect_pin); 874 + } 875 + } 876 + if (host->wp_pin >= 0) { 877 + if (gpio_request(host->wp_pin, "mmc_wp")) { 878 + dev_dbg(&mmc->class_dev, "no WP pin available\n"); 879 + host->wp_pin = -1; 880 + } 881 + } 882 + 883 + platform_set_drvdata(pdev, host); 884 + 885 + mmc_add_host(mmc); 886 + 887 + if (host->detect_pin >= 0) { 888 + setup_timer(&host->detect_timer, atmci_detect_change, 889 + (unsigned long)host); 890 + 891 + ret = request_irq(gpio_to_irq(host->detect_pin), 892 + atmci_detect_interrupt, 893 + IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING, 894 + "mmc-detect", mmc); 895 + if (ret) { 896 + dev_dbg(&mmc->class_dev, 897 + "could not request IRQ %d for detect pin\n", 898 + gpio_to_irq(host->detect_pin)); 899 + gpio_free(host->detect_pin); 900 + host->detect_pin = -1; 901 + } 902 + } 903 + 904 + dev_info(&mmc->class_dev, 905 + "Atmel MCI controller at 0x%08lx irq %d\n", 906 + host->mapbase, irq); 907 + 908 + return 0; 909 + 910 + err_request_irq: 911 + iounmap(host->regs); 912 + err_ioremap: 913 + clk_put(host->mck); 914 + err_clk_get: 915 + mmc_free_host(mmc); 916 + return ret; 917 + } 918 + 919 + static int __exit atmci_remove(struct platform_device *pdev) 920 + { 921 + struct atmel_mci *host = platform_get_drvdata(pdev); 922 + 923 + platform_set_drvdata(pdev, NULL); 924 + 925 + if (host) { 926 + if (host->detect_pin >= 0) { 927 + int pin = host->detect_pin; 928 + 929 + /* Make sure the timer doesn't enable the interrupt */ 930 + host->detect_pin = -1; 931 + smp_wmb(); 932 + 933 + free_irq(gpio_to_irq(pin), host->mmc); 934 + del_timer_sync(&host->detect_timer); 935 + gpio_free(pin); 936 + } 937 + 938 + mmc_remove_host(host->mmc); 939 + 940 + clk_enable(host->mck); 941 + mci_writel(host, IDR, ~0UL); 942 + mci_writel(host, CR, MCI_CR_MCIDIS); 943 + mci_readl(host, SR); 944 + clk_disable(host->mck); 945 + 946 + if (host->wp_pin >= 0) 947 + gpio_free(host->wp_pin); 948 + 949 + free_irq(platform_get_irq(pdev, 0), host->mmc); 950 + iounmap(host->regs); 951 + 952 + clk_put(host->mck); 953 + 954 + mmc_free_host(host->mmc); 955 + } 956 + return 0; 957 + } 958 + 959 + static struct platform_driver atmci_driver = { 960 + .remove = __exit_p(atmci_remove), 961 + .driver = { 962 + .name = "atmel_mci", 963 + }, 964 + }; 965 + 966 + static int __init atmci_init(void) 967 + { 968 + return platform_driver_probe(&atmci_driver, atmci_probe); 969 + } 970 + 971 + static void __exit atmci_exit(void) 972 + { 973 + platform_driver_unregister(&atmci_driver); 974 + } 975 + 976 + module_init(atmci_init); 977 + module_exit(atmci_exit); 978 + 979 + MODULE_DESCRIPTION("Atmel Multimedia Card Interface driver"); 980 + MODULE_AUTHOR("Haavard Skinnemoen <haavard.skinnemoen@atmel.com>"); 981 + MODULE_LICENSE("GPL v2");
+485 -339
drivers/mmc/host/au1xmmc.c
··· 21 21 * published by the Free Software Foundation. 22 22 */ 23 23 24 - /* Why is a timer used to detect insert events? 24 + /* Why don't we use the SD controllers' carddetect feature? 25 25 * 26 26 * From the AU1100 MMC application guide: 27 27 * If the Au1100-based design is intended to support both MultiMediaCards ··· 30 30 * In doing so, a MMC card never enters SPI-mode communications, 31 31 * but now the SecureDigital card-detect feature of CD/DAT3 is ineffective 32 32 * (the low to high transition will not occur). 33 - * 34 - * So we use the timer to check the status manually. 35 33 */ 36 34 37 35 #include <linux/module.h> ··· 39 41 #include <linux/interrupt.h> 40 42 #include <linux/dma-mapping.h> 41 43 #include <linux/scatterlist.h> 42 - 44 + #include <linux/leds.h> 43 45 #include <linux/mmc/host.h> 46 + 44 47 #include <asm/io.h> 45 48 #include <asm/mach-au1x00/au1000.h> 46 49 #include <asm/mach-au1x00/au1xxx_dbdma.h> 47 50 #include <asm/mach-au1x00/au1100_mmc.h> 48 51 49 - #include <au1xxx.h> 50 - #include "au1xmmc.h" 51 - 52 52 #define DRIVER_NAME "au1xxx-mmc" 53 53 54 54 /* Set this to enable special debugging macros */ 55 + /* #define DEBUG */ 55 56 56 57 #ifdef DEBUG 57 - #define DBG(fmt, idx, args...) printk("au1xx(%d): DEBUG: " fmt, idx, ##args) 58 + #define DBG(fmt, idx, args...) \ 59 + printk(KERN_DEBUG "au1xmmc(%d): DEBUG: " fmt, idx, ##args) 58 60 #else 59 - #define DBG(fmt, idx, args...) 61 + #define DBG(fmt, idx, args...) do {} while (0) 60 62 #endif 61 63 62 - const struct { 64 + /* Hardware definitions */ 65 + #define AU1XMMC_DESCRIPTOR_COUNT 1 66 + #define AU1XMMC_DESCRIPTOR_SIZE 2048 67 + 68 + #define AU1XMMC_OCR (MMC_VDD_27_28 | MMC_VDD_28_29 | MMC_VDD_29_30 | \ 69 + MMC_VDD_30_31 | MMC_VDD_31_32 | MMC_VDD_32_33 | \ 70 + MMC_VDD_33_34 | MMC_VDD_34_35 | MMC_VDD_35_36) 71 + 72 + /* This gives us a hard value for the stop command that we can write directly 73 + * to the command register. 74 + */ 75 + #define STOP_CMD \ 76 + (SD_CMD_RT_1B | SD_CMD_CT_7 | (0xC << SD_CMD_CI_SHIFT) | SD_CMD_GO) 77 + 78 + /* This is the set of interrupts that we configure by default. */ 79 + #define AU1XMMC_INTERRUPTS \ 80 + (SD_CONFIG_SC | SD_CONFIG_DT | SD_CONFIG_RAT | \ 81 + SD_CONFIG_CR | SD_CONFIG_I) 82 + 83 + /* The poll event (looking for insert/remove events runs twice a second. */ 84 + #define AU1XMMC_DETECT_TIMEOUT (HZ/2) 85 + 86 + struct au1xmmc_host { 87 + struct mmc_host *mmc; 88 + struct mmc_request *mrq; 89 + 90 + u32 flags; 63 91 u32 iobase; 64 - u32 tx_devid, rx_devid; 65 - u16 bcsrpwr; 66 - u16 bcsrstatus; 67 - u16 wpstatus; 68 - } au1xmmc_card_table[] = { 69 - { SD0_BASE, DSCR_CMD0_SDMS_TX0, DSCR_CMD0_SDMS_RX0, 70 - BCSR_BOARD_SD0PWR, BCSR_INT_SD0INSERT, BCSR_STATUS_SD0WP }, 71 - #ifndef CONFIG_MIPS_DB1200 72 - { SD1_BASE, DSCR_CMD0_SDMS_TX1, DSCR_CMD0_SDMS_RX1, 73 - BCSR_BOARD_DS1PWR, BCSR_INT_SD1INSERT, BCSR_STATUS_SD1WP } 74 - #endif 92 + u32 clock; 93 + u32 bus_width; 94 + u32 power_mode; 95 + 96 + int status; 97 + 98 + struct { 99 + int len; 100 + int dir; 101 + } dma; 102 + 103 + struct { 104 + int index; 105 + int offset; 106 + int len; 107 + } pio; 108 + 109 + u32 tx_chan; 110 + u32 rx_chan; 111 + 112 + int irq; 113 + 114 + struct tasklet_struct finish_task; 115 + struct tasklet_struct data_task; 116 + struct au1xmmc_platform_data *platdata; 117 + struct platform_device *pdev; 118 + struct resource *ioarea; 75 119 }; 76 120 77 - #define AU1XMMC_CONTROLLER_COUNT (ARRAY_SIZE(au1xmmc_card_table)) 121 + /* Status flags used by the host structure */ 122 + #define HOST_F_XMIT 0x0001 123 + #define HOST_F_RECV 0x0002 124 + #define HOST_F_DMA 0x0010 125 + #define HOST_F_ACTIVE 0x0100 126 + #define HOST_F_STOP 0x1000 78 127 79 - /* This array stores pointers for the hosts (used by the IRQ handler) */ 80 - struct au1xmmc_host *au1xmmc_hosts[AU1XMMC_CONTROLLER_COUNT]; 81 - static int dma = 1; 128 + #define HOST_S_IDLE 0x0001 129 + #define HOST_S_CMD 0x0002 130 + #define HOST_S_DATA 0x0003 131 + #define HOST_S_STOP 0x0004 82 132 83 - #ifdef MODULE 84 - module_param(dma, bool, 0); 85 - MODULE_PARM_DESC(dma, "Use DMA engine for data transfers (0 = disabled)"); 86 - #endif 133 + /* Easy access macros */ 134 + #define HOST_STATUS(h) ((h)->iobase + SD_STATUS) 135 + #define HOST_CONFIG(h) ((h)->iobase + SD_CONFIG) 136 + #define HOST_ENABLE(h) ((h)->iobase + SD_ENABLE) 137 + #define HOST_TXPORT(h) ((h)->iobase + SD_TXPORT) 138 + #define HOST_RXPORT(h) ((h)->iobase + SD_RXPORT) 139 + #define HOST_CMDARG(h) ((h)->iobase + SD_CMDARG) 140 + #define HOST_BLKSIZE(h) ((h)->iobase + SD_BLKSIZE) 141 + #define HOST_CMD(h) ((h)->iobase + SD_CMD) 142 + #define HOST_CONFIG2(h) ((h)->iobase + SD_CONFIG2) 143 + #define HOST_TIMEOUT(h) ((h)->iobase + SD_TIMEOUT) 144 + #define HOST_DEBUG(h) ((h)->iobase + SD_DEBUG) 145 + 146 + #define DMA_CHANNEL(h) \ 147 + (((h)->flags & HOST_F_XMIT) ? (h)->tx_chan : (h)->rx_chan) 87 148 88 149 static inline void IRQ_ON(struct au1xmmc_host *host, u32 mask) 89 150 { ··· 176 119 177 120 static inline void SEND_STOP(struct au1xmmc_host *host) 178 121 { 179 - 180 - /* We know the value of CONFIG2, so avoid a read we don't need */ 181 - u32 mask = SD_CONFIG2_EN; 122 + u32 config2; 182 123 183 124 WARN_ON(host->status != HOST_S_DATA); 184 125 host->status = HOST_S_STOP; 185 126 186 - au_writel(mask | SD_CONFIG2_DF, HOST_CONFIG2(host)); 127 + config2 = au_readl(HOST_CONFIG2(host)); 128 + au_writel(config2 | SD_CONFIG2_DF, HOST_CONFIG2(host)); 187 129 au_sync(); 188 130 189 131 /* Send the stop commmand */ ··· 191 135 192 136 static void au1xmmc_set_power(struct au1xmmc_host *host, int state) 193 137 { 194 - 195 - u32 val = au1xmmc_card_table[host->id].bcsrpwr; 196 - 197 - bcsr->board &= ~val; 198 - if (state) bcsr->board |= val; 199 - 200 - au_sync_delay(1); 138 + if (host->platdata && host->platdata->set_power) 139 + host->platdata->set_power(host->mmc, state); 201 140 } 202 141 203 - static inline int au1xmmc_card_inserted(struct au1xmmc_host *host) 142 + static int au1xmmc_card_inserted(struct mmc_host *mmc) 204 143 { 205 - return (bcsr->sig_status & au1xmmc_card_table[host->id].bcsrstatus) 206 - ? 1 : 0; 144 + struct au1xmmc_host *host = mmc_priv(mmc); 145 + 146 + if (host->platdata && host->platdata->card_inserted) 147 + return !!host->platdata->card_inserted(host->mmc); 148 + 149 + return -ENOSYS; 207 150 } 208 151 209 152 static int au1xmmc_card_readonly(struct mmc_host *mmc) 210 153 { 211 154 struct au1xmmc_host *host = mmc_priv(mmc); 212 - return (bcsr->status & au1xmmc_card_table[host->id].wpstatus) 213 - ? 1 : 0; 155 + 156 + if (host->platdata && host->platdata->card_readonly) 157 + return !!host->platdata->card_readonly(mmc); 158 + 159 + return -ENOSYS; 214 160 } 215 161 216 162 static void au1xmmc_finish_request(struct au1xmmc_host *host) 217 163 { 218 - 219 164 struct mmc_request *mrq = host->mrq; 220 165 221 166 host->mrq = NULL; 222 - host->flags &= HOST_F_ACTIVE; 167 + host->flags &= HOST_F_ACTIVE | HOST_F_DMA; 223 168 224 169 host->dma.len = 0; 225 170 host->dma.dir = 0; ··· 230 173 host->pio.len = 0; 231 174 232 175 host->status = HOST_S_IDLE; 233 - 234 - bcsr->disk_leds |= (1 << 8); 235 176 236 177 mmc_request_done(host->mmc, mrq); 237 178 } ··· 290 235 au_sync(); 291 236 292 237 /* Wait for the command to go on the line */ 293 - 294 - while(1) { 295 - if (!(au_readl(HOST_CMD(host)) & SD_CMD_GO)) 296 - break; 297 - } 238 + while (au_readl(HOST_CMD(host)) & SD_CMD_GO) 239 + /* nop */; 298 240 299 241 /* Wait for the command to come back */ 300 - 301 242 if (wait) { 302 243 u32 status = au_readl(HOST_STATUS(host)); 303 244 304 - while(!(status & SD_STATUS_CR)) 245 + while (!(status & SD_STATUS_CR)) 305 246 status = au_readl(HOST_STATUS(host)); 306 247 307 248 /* Clear the CR status */ ··· 311 260 312 261 static void au1xmmc_data_complete(struct au1xmmc_host *host, u32 status) 313 262 { 314 - 315 263 struct mmc_request *mrq = host->mrq; 316 264 struct mmc_data *data; 317 265 u32 crc; 318 266 319 - WARN_ON(host->status != HOST_S_DATA && host->status != HOST_S_STOP); 267 + WARN_ON((host->status != HOST_S_DATA) && (host->status != HOST_S_STOP)); 320 268 321 269 if (host->mrq == NULL) 322 270 return; ··· 326 276 status = au_readl(HOST_STATUS(host)); 327 277 328 278 /* The transaction is really over when the SD_STATUS_DB bit is clear */ 329 - 330 - while((host->flags & HOST_F_XMIT) && (status & SD_STATUS_DB)) 279 + while ((host->flags & HOST_F_XMIT) && (status & SD_STATUS_DB)) 331 280 status = au_readl(HOST_STATUS(host)); 332 281 333 282 data->error = 0; 334 283 dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len, host->dma.dir); 335 284 336 285 /* Process any errors */ 337 - 338 286 crc = (status & (SD_STATUS_WC | SD_STATUS_RC)); 339 287 if (host->flags & HOST_F_XMIT) 340 288 crc |= ((status & 0x07) == 0x02) ? 0 : 1; ··· 347 299 348 300 if (!data->error) { 349 301 if (host->flags & HOST_F_DMA) { 302 + #ifdef CONFIG_SOC_AU1200 /* DBDMA */ 350 303 u32 chan = DMA_CHANNEL(host); 351 304 352 - chan_tab_t *c = *((chan_tab_t **) chan); 305 + chan_tab_t *c = *((chan_tab_t **)chan); 353 306 au1x_dma_chan_t *cp = c->chan_ptr; 354 307 data->bytes_xfered = cp->ddma_bytecnt; 355 - } 356 - else 308 + #endif 309 + } else 357 310 data->bytes_xfered = 358 - (data->blocks * data->blksz) - 359 - host->pio.len; 311 + (data->blocks * data->blksz) - host->pio.len; 360 312 } 361 313 362 314 au1xmmc_finish_request(host); ··· 364 316 365 317 static void au1xmmc_tasklet_data(unsigned long param) 366 318 { 367 - struct au1xmmc_host *host = (struct au1xmmc_host *) param; 319 + struct au1xmmc_host *host = (struct au1xmmc_host *)param; 368 320 369 321 u32 status = au_readl(HOST_STATUS(host)); 370 322 au1xmmc_data_complete(host, status); ··· 374 326 375 327 static void au1xmmc_send_pio(struct au1xmmc_host *host) 376 328 { 377 - 378 - struct mmc_data *data = 0; 379 - int sg_len, max, count = 0; 380 - unsigned char *sg_ptr; 381 - u32 status = 0; 329 + struct mmc_data *data; 330 + int sg_len, max, count; 331 + unsigned char *sg_ptr, val; 332 + u32 status; 382 333 struct scatterlist *sg; 383 334 384 335 data = host->mrq->data; ··· 392 345 /* This is the space left inside the buffer */ 393 346 sg_len = data->sg[host->pio.index].length - host->pio.offset; 394 347 395 - /* Check to if we need less then the size of the sg_buffer */ 396 - 348 + /* Check if we need less than the size of the sg_buffer */ 397 349 max = (sg_len > host->pio.len) ? host->pio.len : sg_len; 398 - if (max > AU1XMMC_MAX_TRANSFER) max = AU1XMMC_MAX_TRANSFER; 350 + if (max > AU1XMMC_MAX_TRANSFER) 351 + max = AU1XMMC_MAX_TRANSFER; 399 352 400 - for(count = 0; count < max; count++ ) { 401 - unsigned char val; 402 - 353 + for (count = 0; count < max; count++) { 403 354 status = au_readl(HOST_STATUS(host)); 404 355 405 356 if (!(status & SD_STATUS_TH)) ··· 405 360 406 361 val = *sg_ptr++; 407 362 408 - au_writel((unsigned long) val, HOST_TXPORT(host)); 363 + au_writel((unsigned long)val, HOST_TXPORT(host)); 409 364 au_sync(); 410 365 } 411 366 ··· 429 384 430 385 static void au1xmmc_receive_pio(struct au1xmmc_host *host) 431 386 { 432 - 433 - struct mmc_data *data = 0; 434 - int sg_len = 0, max = 0, count = 0; 435 - unsigned char *sg_ptr = 0; 436 - u32 status = 0; 387 + struct mmc_data *data; 388 + int max, count, sg_len = 0; 389 + unsigned char *sg_ptr = NULL; 390 + u32 status, val; 437 391 struct scatterlist *sg; 438 392 439 393 data = host->mrq->data; ··· 449 405 /* This is the space left inside the buffer */ 450 406 sg_len = sg_dma_len(&data->sg[host->pio.index]) - host->pio.offset; 451 407 452 - /* Check to if we need less then the size of the sg_buffer */ 453 - if (sg_len < max) max = sg_len; 408 + /* Check if we need less than the size of the sg_buffer */ 409 + if (sg_len < max) 410 + max = sg_len; 454 411 } 455 412 456 413 if (max > AU1XMMC_MAX_TRANSFER) 457 414 max = AU1XMMC_MAX_TRANSFER; 458 415 459 - for(count = 0; count < max; count++ ) { 460 - u32 val; 416 + for (count = 0; count < max; count++) { 461 417 status = au_readl(HOST_STATUS(host)); 462 418 463 419 if (!(status & SD_STATUS_NE)) 464 420 break; 465 421 466 422 if (status & SD_STATUS_RC) { 467 - DBG("RX CRC Error [%d + %d].\n", host->id, 423 + DBG("RX CRC Error [%d + %d].\n", host->pdev->id, 468 424 host->pio.len, count); 469 425 break; 470 426 } 471 427 472 428 if (status & SD_STATUS_RO) { 473 - DBG("RX Overrun [%d + %d]\n", host->id, 429 + DBG("RX Overrun [%d + %d]\n", host->pdev->id, 474 430 host->pio.len, count); 475 431 break; 476 432 } 477 433 else if (status & SD_STATUS_RU) { 478 - DBG("RX Underrun [%d + %d]\n", host->id, 434 + DBG("RX Underrun [%d + %d]\n", host->pdev->id, 479 435 host->pio.len, count); 480 436 break; 481 437 } ··· 483 439 val = au_readl(HOST_RXPORT(host)); 484 440 485 441 if (sg_ptr) 486 - *sg_ptr++ = (unsigned char) (val & 0xFF); 442 + *sg_ptr++ = (unsigned char)(val & 0xFF); 487 443 } 488 444 489 445 host->pio.len -= count; ··· 495 451 } 496 452 497 453 if (host->pio.len == 0) { 498 - //IRQ_OFF(host, SD_CONFIG_RA | SD_CONFIG_RF); 454 + /* IRQ_OFF(host, SD_CONFIG_RA | SD_CONFIG_RF); */ 499 455 IRQ_OFF(host, SD_CONFIG_NE); 500 456 501 457 if (host->flags & HOST_F_STOP) ··· 505 461 } 506 462 } 507 463 508 - /* static void au1xmmc_cmd_complete 509 - This is called when a command has been completed - grab the response 510 - and check for errors. Then start the data transfer if it is indicated. 511 - */ 512 - 464 + /* This is called when a command has been completed - grab the response 465 + * and check for errors. Then start the data transfer if it is indicated. 466 + */ 513 467 static void au1xmmc_cmd_complete(struct au1xmmc_host *host, u32 status) 514 468 { 515 - 516 469 struct mmc_request *mrq = host->mrq; 517 470 struct mmc_command *cmd; 518 - int trans; 471 + u32 r[4]; 472 + int i, trans; 519 473 520 474 if (!host->mrq) 521 475 return; ··· 523 481 524 482 if (cmd->flags & MMC_RSP_PRESENT) { 525 483 if (cmd->flags & MMC_RSP_136) { 526 - u32 r[4]; 527 - int i; 528 - 529 484 r[0] = au_readl(host->iobase + SD_RESP3); 530 485 r[1] = au_readl(host->iobase + SD_RESP2); 531 486 r[2] = au_readl(host->iobase + SD_RESP1); ··· 530 491 531 492 /* The CRC is omitted from the response, so really 532 493 * we only got 120 bytes, but the engine expects 533 - * 128 bits, so we have to shift things up 494 + * 128 bits, so we have to shift things up. 534 495 */ 535 - 536 - for(i = 0; i < 4; i++) { 496 + for (i = 0; i < 4; i++) { 537 497 cmd->resp[i] = (r[i] & 0x00FFFFFF) << 8; 538 498 if (i != 3) 539 499 cmd->resp[i] |= (r[i + 1] & 0xFF000000) >> 24; ··· 543 505 * our response omits the CRC, our data ends up 544 506 * being shifted 8 bits to the right. In this case, 545 507 * that means that the OSR data starts at bit 31, 546 - * so we can just read RESP0 and return that 508 + * so we can just read RESP0 and return that. 547 509 */ 548 510 cmd->resp[0] = au_readl(host->iobase + SD_RESP0); 549 511 } 550 512 } 551 513 552 514 /* Figure out errors */ 553 - 554 515 if (status & (SD_STATUS_SC | SD_STATUS_WC | SD_STATUS_RC)) 555 516 cmd->error = -EILSEQ; 556 517 557 518 trans = host->flags & (HOST_F_XMIT | HOST_F_RECV); 558 519 559 520 if (!trans || cmd->error) { 560 - 561 - IRQ_OFF(host, SD_CONFIG_TH | SD_CONFIG_RA|SD_CONFIG_RF); 521 + IRQ_OFF(host, SD_CONFIG_TH | SD_CONFIG_RA | SD_CONFIG_RF); 562 522 tasklet_schedule(&host->finish_task); 563 523 return; 564 524 } ··· 564 528 host->status = HOST_S_DATA; 565 529 566 530 if (host->flags & HOST_F_DMA) { 531 + #ifdef CONFIG_SOC_AU1200 /* DBDMA */ 567 532 u32 channel = DMA_CHANNEL(host); 568 533 569 534 /* Start the DMA as soon as the buffer gets something in it */ ··· 577 540 } 578 541 579 542 au1xxx_dbdma_start(channel); 543 + #endif 580 544 } 581 545 } 582 546 583 547 static void au1xmmc_set_clock(struct au1xmmc_host *host, int rate) 584 548 { 585 - 586 549 unsigned int pbus = get_au1x00_speed(); 587 550 unsigned int divisor; 588 551 u32 config; 589 552 590 553 /* From databook: 591 - divisor = ((((cpuclock / sbus_divisor) / 2) / mmcclock) / 2) - 1 592 - */ 593 - 554 + * divisor = ((((cpuclock / sbus_divisor) / 2) / mmcclock) / 2) - 1 555 + */ 594 556 pbus /= ((au_readl(SYS_POWERCTRL) & 0x3) + 2); 595 557 pbus /= 2; 596 - 597 558 divisor = ((pbus / rate) / 2) - 1; 598 559 599 560 config = au_readl(HOST_CONFIG(host)); ··· 603 568 au_sync(); 604 569 } 605 570 606 - static int 607 - au1xmmc_prepare_data(struct au1xmmc_host *host, struct mmc_data *data) 571 + static int au1xmmc_prepare_data(struct au1xmmc_host *host, 572 + struct mmc_data *data) 608 573 { 609 - 610 574 int datalen = data->blocks * data->blksz; 611 - 612 - if (dma != 0) 613 - host->flags |= HOST_F_DMA; 614 575 615 576 if (data->flags & MMC_DATA_READ) 616 577 host->flags |= HOST_F_RECV; ··· 627 596 au_writel(data->blksz - 1, HOST_BLKSIZE(host)); 628 597 629 598 if (host->flags & HOST_F_DMA) { 599 + #ifdef CONFIG_SOC_AU1200 /* DBDMA */ 630 600 int i; 631 601 u32 channel = DMA_CHANNEL(host); 632 602 633 603 au1xxx_dbdma_stop(channel); 634 604 635 - for(i = 0; i < host->dma.len; i++) { 605 + for (i = 0; i < host->dma.len; i++) { 636 606 u32 ret = 0, flags = DDMA_FLAGS_NOIE; 637 607 struct scatterlist *sg = &data->sg[i]; 638 608 int sg_len = sg->length; ··· 643 611 if (i == host->dma.len - 1) 644 612 flags = DDMA_FLAGS_IE; 645 613 646 - if (host->flags & HOST_F_XMIT){ 647 - ret = au1xxx_dbdma_put_source_flags(channel, 648 - (void *) sg_virt(sg), len, flags); 649 - } 650 - else { 651 - ret = au1xxx_dbdma_put_dest_flags(channel, 652 - (void *) sg_virt(sg), 653 - len, flags); 614 + if (host->flags & HOST_F_XMIT) { 615 + ret = au1xxx_dbdma_put_source_flags(channel, 616 + (void *)sg_virt(sg), len, flags); 617 + } else { 618 + ret = au1xxx_dbdma_put_dest_flags(channel, 619 + (void *)sg_virt(sg), len, flags); 654 620 } 655 621 656 - if (!ret) 622 + if (!ret) 657 623 goto dataerr; 658 624 659 625 datalen -= len; 660 626 } 661 - } 662 - else { 627 + #endif 628 + } else { 663 629 host->pio.index = 0; 664 630 host->pio.offset = 0; 665 631 host->pio.len = datalen; ··· 666 636 IRQ_ON(host, SD_CONFIG_TH); 667 637 else 668 638 IRQ_ON(host, SD_CONFIG_NE); 669 - //IRQ_ON(host, SD_CONFIG_RA|SD_CONFIG_RF); 639 + /* IRQ_ON(host, SD_CONFIG_RA | SD_CONFIG_RF); */ 670 640 } 671 641 672 642 return 0; 673 643 674 - dataerr: 675 - dma_unmap_sg(mmc_dev(host->mmc),data->sg,data->sg_len,host->dma.dir); 644 + dataerr: 645 + dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len, 646 + host->dma.dir); 676 647 return -ETIMEDOUT; 677 648 } 678 649 679 - /* static void au1xmmc_request 680 - This actually starts a command or data transaction 681 - */ 682 - 650 + /* This actually starts a command or data transaction */ 683 651 static void au1xmmc_request(struct mmc_host* mmc, struct mmc_request* mrq) 684 652 { 685 - 686 653 struct au1xmmc_host *host = mmc_priv(mmc); 687 - unsigned int flags = 0; 688 654 int ret = 0; 689 655 690 656 WARN_ON(irqs_disabled()); ··· 689 663 host->mrq = mrq; 690 664 host->status = HOST_S_CMD; 691 665 692 - bcsr->disk_leds &= ~(1 << 8); 666 + /* fail request immediately if no card is present */ 667 + if (0 == au1xmmc_card_inserted(mmc)) { 668 + mrq->cmd->error = -ENOMEDIUM; 669 + au1xmmc_finish_request(host); 670 + return; 671 + } 693 672 694 673 if (mrq->data) { 695 674 FLUSH_FIFO(host); 696 - flags = mrq->data->flags; 697 675 ret = au1xmmc_prepare_data(host, mrq->data); 698 676 } 699 677 ··· 712 682 713 683 static void au1xmmc_reset_controller(struct au1xmmc_host *host) 714 684 { 715 - 716 685 /* Apply the clock */ 717 686 au_writel(SD_ENABLE_CE, HOST_ENABLE(host)); 718 687 au_sync_delay(1); ··· 741 712 } 742 713 743 714 744 - static void au1xmmc_set_ios(struct mmc_host* mmc, struct mmc_ios* ios) 715 + static void au1xmmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) 745 716 { 746 717 struct au1xmmc_host *host = mmc_priv(mmc); 718 + u32 config2; 747 719 748 720 if (ios->power_mode == MMC_POWER_OFF) 749 721 au1xmmc_set_power(host, 0); ··· 756 726 au1xmmc_set_clock(host, ios->clock); 757 727 host->clock = ios->clock; 758 728 } 729 + 730 + config2 = au_readl(HOST_CONFIG2(host)); 731 + switch (ios->bus_width) { 732 + case MMC_BUS_WIDTH_4: 733 + config2 |= SD_CONFIG2_WB; 734 + break; 735 + case MMC_BUS_WIDTH_1: 736 + config2 &= ~SD_CONFIG2_WB; 737 + break; 738 + } 739 + au_writel(config2, HOST_CONFIG2(host)); 740 + au_sync(); 759 741 } 760 742 761 - static void au1xmmc_dma_callback(int irq, void *dev_id) 743 + #define STATUS_TIMEOUT (SD_STATUS_RAT | SD_STATUS_DT) 744 + #define STATUS_DATA_IN (SD_STATUS_NE) 745 + #define STATUS_DATA_OUT (SD_STATUS_TH) 746 + 747 + static irqreturn_t au1xmmc_irq(int irq, void *dev_id) 762 748 { 763 - struct au1xmmc_host *host = (struct au1xmmc_host *) dev_id; 749 + struct au1xmmc_host *host = dev_id; 750 + u32 status; 751 + 752 + status = au_readl(HOST_STATUS(host)); 753 + 754 + if (!(status & SD_STATUS_I)) 755 + return IRQ_NONE; /* not ours */ 756 + 757 + if (status & SD_STATUS_SI) /* SDIO */ 758 + mmc_signal_sdio_irq(host->mmc); 759 + 760 + if (host->mrq && (status & STATUS_TIMEOUT)) { 761 + if (status & SD_STATUS_RAT) 762 + host->mrq->cmd->error = -ETIMEDOUT; 763 + else if (status & SD_STATUS_DT) 764 + host->mrq->data->error = -ETIMEDOUT; 765 + 766 + /* In PIO mode, interrupts might still be enabled */ 767 + IRQ_OFF(host, SD_CONFIG_NE | SD_CONFIG_TH); 768 + 769 + /* IRQ_OFF(host, SD_CONFIG_TH | SD_CONFIG_RA | SD_CONFIG_RF); */ 770 + tasklet_schedule(&host->finish_task); 771 + } 772 + #if 0 773 + else if (status & SD_STATUS_DD) { 774 + /* Sometimes we get a DD before a NE in PIO mode */ 775 + if (!(host->flags & HOST_F_DMA) && (status & SD_STATUS_NE)) 776 + au1xmmc_receive_pio(host); 777 + else { 778 + au1xmmc_data_complete(host, status); 779 + /* tasklet_schedule(&host->data_task); */ 780 + } 781 + } 782 + #endif 783 + else if (status & SD_STATUS_CR) { 784 + if (host->status == HOST_S_CMD) 785 + au1xmmc_cmd_complete(host, status); 786 + 787 + } else if (!(host->flags & HOST_F_DMA)) { 788 + if ((host->flags & HOST_F_XMIT) && (status & STATUS_DATA_OUT)) 789 + au1xmmc_send_pio(host); 790 + else if ((host->flags & HOST_F_RECV) && (status & STATUS_DATA_IN)) 791 + au1xmmc_receive_pio(host); 792 + 793 + } else if (status & 0x203F3C70) { 794 + DBG("Unhandled status %8.8x\n", host->pdev->id, 795 + status); 796 + } 797 + 798 + au_writel(status, HOST_STATUS(host)); 799 + au_sync(); 800 + 801 + return IRQ_HANDLED; 802 + } 803 + 804 + #ifdef CONFIG_SOC_AU1200 805 + /* 8bit memory DMA device */ 806 + static dbdev_tab_t au1xmmc_mem_dbdev = { 807 + .dev_id = DSCR_CMD0_ALWAYS, 808 + .dev_flags = DEV_FLAGS_ANYUSE, 809 + .dev_tsize = 0, 810 + .dev_devwidth = 8, 811 + .dev_physaddr = 0x00000000, 812 + .dev_intlevel = 0, 813 + .dev_intpolarity = 0, 814 + }; 815 + static int memid; 816 + 817 + static void au1xmmc_dbdma_callback(int irq, void *dev_id) 818 + { 819 + struct au1xmmc_host *host = (struct au1xmmc_host *)dev_id; 764 820 765 821 /* Avoid spurious interrupts */ 766 - 767 822 if (!host->mrq) 768 823 return; 769 824 ··· 858 743 tasklet_schedule(&host->data_task); 859 744 } 860 745 861 - #define STATUS_TIMEOUT (SD_STATUS_RAT | SD_STATUS_DT) 862 - #define STATUS_DATA_IN (SD_STATUS_NE) 863 - #define STATUS_DATA_OUT (SD_STATUS_TH) 864 - 865 - static irqreturn_t au1xmmc_irq(int irq, void *dev_id) 746 + static int au1xmmc_dbdma_init(struct au1xmmc_host *host) 866 747 { 748 + struct resource *res; 749 + int txid, rxid; 867 750 868 - u32 status; 869 - int i, ret = 0; 751 + res = platform_get_resource(host->pdev, IORESOURCE_DMA, 0); 752 + if (!res) 753 + return -ENODEV; 754 + txid = res->start; 870 755 871 - disable_irq(AU1100_SD_IRQ); 756 + res = platform_get_resource(host->pdev, IORESOURCE_DMA, 1); 757 + if (!res) 758 + return -ENODEV; 759 + rxid = res->start; 872 760 873 - for(i = 0; i < AU1XMMC_CONTROLLER_COUNT; i++) { 874 - struct au1xmmc_host * host = au1xmmc_hosts[i]; 875 - u32 handled = 1; 761 + if (!memid) 762 + return -ENODEV; 876 763 877 - status = au_readl(HOST_STATUS(host)); 764 + host->tx_chan = au1xxx_dbdma_chan_alloc(memid, txid, 765 + au1xmmc_dbdma_callback, (void *)host); 766 + if (!host->tx_chan) { 767 + dev_err(&host->pdev->dev, "cannot allocate TX DMA\n"); 768 + return -ENODEV; 769 + } 878 770 879 - if (host->mrq && (status & STATUS_TIMEOUT)) { 880 - if (status & SD_STATUS_RAT) 881 - host->mrq->cmd->error = -ETIMEDOUT; 771 + host->rx_chan = au1xxx_dbdma_chan_alloc(rxid, memid, 772 + au1xmmc_dbdma_callback, (void *)host); 773 + if (!host->rx_chan) { 774 + dev_err(&host->pdev->dev, "cannot allocate RX DMA\n"); 775 + au1xxx_dbdma_chan_free(host->tx_chan); 776 + return -ENODEV; 777 + } 882 778 883 - else if (status & SD_STATUS_DT) 884 - host->mrq->data->error = -ETIMEDOUT; 779 + au1xxx_dbdma_set_devwidth(host->tx_chan, 8); 780 + au1xxx_dbdma_set_devwidth(host->rx_chan, 8); 885 781 886 - /* In PIO mode, interrupts might still be enabled */ 887 - IRQ_OFF(host, SD_CONFIG_NE | SD_CONFIG_TH); 782 + au1xxx_dbdma_ring_alloc(host->tx_chan, AU1XMMC_DESCRIPTOR_COUNT); 783 + au1xxx_dbdma_ring_alloc(host->rx_chan, AU1XMMC_DESCRIPTOR_COUNT); 888 784 889 - //IRQ_OFF(host, SD_CONFIG_TH|SD_CONFIG_RA|SD_CONFIG_RF); 890 - tasklet_schedule(&host->finish_task); 891 - } 892 - #if 0 893 - else if (status & SD_STATUS_DD) { 785 + /* DBDMA is good to go */ 786 + host->flags |= HOST_F_DMA; 894 787 895 - /* Sometimes we get a DD before a NE in PIO mode */ 788 + return 0; 789 + } 896 790 897 - if (!(host->flags & HOST_F_DMA) && 898 - (status & SD_STATUS_NE)) 899 - au1xmmc_receive_pio(host); 900 - else { 901 - au1xmmc_data_complete(host, status); 902 - //tasklet_schedule(&host->data_task); 903 - } 904 - } 791 + static void au1xmmc_dbdma_shutdown(struct au1xmmc_host *host) 792 + { 793 + if (host->flags & HOST_F_DMA) { 794 + host->flags &= ~HOST_F_DMA; 795 + au1xxx_dbdma_chan_free(host->tx_chan); 796 + au1xxx_dbdma_chan_free(host->rx_chan); 797 + } 798 + } 905 799 #endif 906 - else if (status & (SD_STATUS_CR)) { 907 - if (host->status == HOST_S_CMD) 908 - au1xmmc_cmd_complete(host,status); 909 - } 910 - else if (!(host->flags & HOST_F_DMA)) { 911 - if ((host->flags & HOST_F_XMIT) && 912 - (status & STATUS_DATA_OUT)) 913 - au1xmmc_send_pio(host); 914 - else if ((host->flags & HOST_F_RECV) && 915 - (status & STATUS_DATA_IN)) 916 - au1xmmc_receive_pio(host); 917 - } 918 - else if (status & 0x203FBC70) { 919 - DBG("Unhandled status %8.8x\n", host->id, status); 920 - handled = 0; 921 - } 922 800 923 - au_writel(status, HOST_STATUS(host)); 924 - au_sync(); 925 - 926 - ret |= handled; 927 - } 928 - 929 - enable_irq(AU1100_SD_IRQ); 930 - return ret; 931 - } 932 - 933 - static void au1xmmc_poll_event(unsigned long arg) 801 + static void au1xmmc_enable_sdio_irq(struct mmc_host *mmc, int en) 934 802 { 935 - struct au1xmmc_host *host = (struct au1xmmc_host *) arg; 803 + struct au1xmmc_host *host = mmc_priv(mmc); 936 804 937 - int card = au1xmmc_card_inserted(host); 938 - int controller = (host->flags & HOST_F_ACTIVE) ? 1 : 0; 939 - 940 - if (card != controller) { 941 - host->flags &= ~HOST_F_ACTIVE; 942 - if (card) host->flags |= HOST_F_ACTIVE; 943 - mmc_detect_change(host->mmc, 0); 944 - } 945 - 946 - if (host->mrq != NULL) { 947 - u32 status = au_readl(HOST_STATUS(host)); 948 - DBG("PENDING - %8.8x\n", host->id, status); 949 - } 950 - 951 - mod_timer(&host->timer, jiffies + AU1XMMC_DETECT_TIMEOUT); 952 - } 953 - 954 - static dbdev_tab_t au1xmmc_mem_dbdev = 955 - { 956 - DSCR_CMD0_ALWAYS, DEV_FLAGS_ANYUSE, 0, 8, 0x00000000, 0, 0 957 - }; 958 - 959 - static void au1xmmc_init_dma(struct au1xmmc_host *host) 960 - { 961 - 962 - u32 rxchan, txchan; 963 - 964 - int txid = au1xmmc_card_table[host->id].tx_devid; 965 - int rxid = au1xmmc_card_table[host->id].rx_devid; 966 - 967 - /* DSCR_CMD0_ALWAYS has a stride of 32 bits, we need a stride 968 - of 8 bits. And since devices are shared, we need to create 969 - our own to avoid freaking out other devices 970 - */ 971 - 972 - int memid = au1xxx_ddma_add_device(&au1xmmc_mem_dbdev); 973 - 974 - txchan = au1xxx_dbdma_chan_alloc(memid, txid, 975 - au1xmmc_dma_callback, (void *) host); 976 - 977 - rxchan = au1xxx_dbdma_chan_alloc(rxid, memid, 978 - au1xmmc_dma_callback, (void *) host); 979 - 980 - au1xxx_dbdma_set_devwidth(txchan, 8); 981 - au1xxx_dbdma_set_devwidth(rxchan, 8); 982 - 983 - au1xxx_dbdma_ring_alloc(txchan, AU1XMMC_DESCRIPTOR_COUNT); 984 - au1xxx_dbdma_ring_alloc(rxchan, AU1XMMC_DESCRIPTOR_COUNT); 985 - 986 - host->tx_chan = txchan; 987 - host->rx_chan = rxchan; 805 + if (en) 806 + IRQ_ON(host, SD_CONFIG_SI); 807 + else 808 + IRQ_OFF(host, SD_CONFIG_SI); 988 809 } 989 810 990 811 static const struct mmc_host_ops au1xmmc_ops = { 991 812 .request = au1xmmc_request, 992 813 .set_ios = au1xmmc_set_ios, 993 814 .get_ro = au1xmmc_card_readonly, 815 + .get_cd = au1xmmc_card_inserted, 816 + .enable_sdio_irq = au1xmmc_enable_sdio_irq, 994 817 }; 995 818 996 819 static int __devinit au1xmmc_probe(struct platform_device *pdev) 997 820 { 821 + struct mmc_host *mmc; 822 + struct au1xmmc_host *host; 823 + struct resource *r; 824 + int ret; 998 825 999 - int i, ret = 0; 826 + mmc = mmc_alloc_host(sizeof(struct au1xmmc_host), &pdev->dev); 827 + if (!mmc) { 828 + dev_err(&pdev->dev, "no memory for mmc_host\n"); 829 + ret = -ENOMEM; 830 + goto out0; 831 + } 1000 832 1001 - /* THe interrupt is shared among all controllers */ 1002 - ret = request_irq(AU1100_SD_IRQ, au1xmmc_irq, IRQF_DISABLED, "MMC", 0); 833 + host = mmc_priv(mmc); 834 + host->mmc = mmc; 835 + host->platdata = pdev->dev.platform_data; 836 + host->pdev = pdev; 1003 837 838 + ret = -ENODEV; 839 + r = platform_get_resource(pdev, IORESOURCE_MEM, 0); 840 + if (!r) { 841 + dev_err(&pdev->dev, "no mmio defined\n"); 842 + goto out1; 843 + } 844 + 845 + host->ioarea = request_mem_region(r->start, r->end - r->start + 1, 846 + pdev->name); 847 + if (!host->ioarea) { 848 + dev_err(&pdev->dev, "mmio already in use\n"); 849 + goto out1; 850 + } 851 + 852 + host->iobase = (unsigned long)ioremap(r->start, 0x3c); 853 + if (!host->iobase) { 854 + dev_err(&pdev->dev, "cannot remap mmio\n"); 855 + goto out2; 856 + } 857 + 858 + r = platform_get_resource(pdev, IORESOURCE_IRQ, 0); 859 + if (!r) { 860 + dev_err(&pdev->dev, "no IRQ defined\n"); 861 + goto out3; 862 + } 863 + 864 + host->irq = r->start; 865 + /* IRQ is shared among both SD controllers */ 866 + ret = request_irq(host->irq, au1xmmc_irq, IRQF_SHARED, 867 + DRIVER_NAME, host); 1004 868 if (ret) { 1005 - printk(DRIVER_NAME "ERROR: Couldn't get int %d: %d\n", 1006 - AU1100_SD_IRQ, ret); 1007 - return -ENXIO; 869 + dev_err(&pdev->dev, "cannot grab IRQ\n"); 870 + goto out3; 1008 871 } 1009 872 1010 - disable_irq(AU1100_SD_IRQ); 873 + mmc->ops = &au1xmmc_ops; 1011 874 1012 - for(i = 0; i < AU1XMMC_CONTROLLER_COUNT; i++) { 1013 - struct mmc_host *mmc = mmc_alloc_host(sizeof(struct au1xmmc_host), &pdev->dev); 1014 - struct au1xmmc_host *host = 0; 875 + mmc->f_min = 450000; 876 + mmc->f_max = 24000000; 1015 877 1016 - if (!mmc) { 1017 - printk(DRIVER_NAME "ERROR: no mem for host %d\n", i); 1018 - au1xmmc_hosts[i] = 0; 1019 - continue; 878 + mmc->max_seg_size = AU1XMMC_DESCRIPTOR_SIZE; 879 + mmc->max_phys_segs = AU1XMMC_DESCRIPTOR_COUNT; 880 + 881 + mmc->max_blk_size = 2048; 882 + mmc->max_blk_count = 512; 883 + 884 + mmc->ocr_avail = AU1XMMC_OCR; 885 + mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_SDIO_IRQ; 886 + 887 + host->status = HOST_S_IDLE; 888 + 889 + /* board-specific carddetect setup, if any */ 890 + if (host->platdata && host->platdata->cd_setup) { 891 + ret = host->platdata->cd_setup(mmc, 1); 892 + if (ret) { 893 + dev_warn(&pdev->dev, "board CD setup failed\n"); 894 + mmc->caps |= MMC_CAP_NEEDS_POLL; 1020 895 } 896 + } else 897 + mmc->caps |= MMC_CAP_NEEDS_POLL; 1021 898 1022 - mmc->ops = &au1xmmc_ops; 899 + tasklet_init(&host->data_task, au1xmmc_tasklet_data, 900 + (unsigned long)host); 1023 901 1024 - mmc->f_min = 450000; 1025 - mmc->f_max = 24000000; 902 + tasklet_init(&host->finish_task, au1xmmc_tasklet_finish, 903 + (unsigned long)host); 1026 904 1027 - mmc->max_seg_size = AU1XMMC_DESCRIPTOR_SIZE; 1028 - mmc->max_phys_segs = AU1XMMC_DESCRIPTOR_COUNT; 905 + #ifdef CONFIG_SOC_AU1200 906 + ret = au1xmmc_dbdma_init(host); 907 + if (ret) 908 + printk(KERN_INFO DRIVER_NAME ": DBDMA init failed; using PIO\n"); 909 + #endif 1029 910 1030 - mmc->max_blk_size = 2048; 1031 - mmc->max_blk_count = 512; 911 + #ifdef CONFIG_LEDS_CLASS 912 + if (host->platdata && host->platdata->led) { 913 + struct led_classdev *led = host->platdata->led; 914 + led->name = mmc_hostname(mmc); 915 + led->brightness = LED_OFF; 916 + led->default_trigger = mmc_hostname(mmc); 917 + ret = led_classdev_register(mmc_dev(mmc), led); 918 + if (ret) 919 + goto out5; 920 + } 921 + #endif 1032 922 1033 - mmc->ocr_avail = AU1XMMC_OCR; 923 + au1xmmc_reset_controller(host); 1034 924 1035 - host = mmc_priv(mmc); 1036 - host->mmc = mmc; 1037 - 1038 - host->id = i; 1039 - host->iobase = au1xmmc_card_table[host->id].iobase; 1040 - host->clock = 0; 1041 - host->power_mode = MMC_POWER_OFF; 1042 - 1043 - host->flags = au1xmmc_card_inserted(host) ? HOST_F_ACTIVE : 0; 1044 - host->status = HOST_S_IDLE; 1045 - 1046 - init_timer(&host->timer); 1047 - 1048 - host->timer.function = au1xmmc_poll_event; 1049 - host->timer.data = (unsigned long) host; 1050 - host->timer.expires = jiffies + AU1XMMC_DETECT_TIMEOUT; 1051 - 1052 - tasklet_init(&host->data_task, au1xmmc_tasklet_data, 1053 - (unsigned long) host); 1054 - 1055 - tasklet_init(&host->finish_task, au1xmmc_tasklet_finish, 1056 - (unsigned long) host); 1057 - 1058 - spin_lock_init(&host->lock); 1059 - 1060 - if (dma != 0) 1061 - au1xmmc_init_dma(host); 1062 - 1063 - au1xmmc_reset_controller(host); 1064 - 1065 - mmc_add_host(mmc); 1066 - au1xmmc_hosts[i] = host; 1067 - 1068 - add_timer(&host->timer); 1069 - 1070 - printk(KERN_INFO DRIVER_NAME ": MMC Controller %d set up at %8.8X (mode=%s)\n", 1071 - host->id, host->iobase, dma ? "dma" : "pio"); 925 + ret = mmc_add_host(mmc); 926 + if (ret) { 927 + dev_err(&pdev->dev, "cannot add mmc host\n"); 928 + goto out6; 1072 929 } 1073 930 1074 - enable_irq(AU1100_SD_IRQ); 931 + platform_set_drvdata(pdev, mmc); 1075 932 1076 - return 0; 933 + printk(KERN_INFO DRIVER_NAME ": MMC Controller %d set up at %8.8X" 934 + " (mode=%s)\n", pdev->id, host->iobase, 935 + host->flags & HOST_F_DMA ? "dma" : "pio"); 936 + 937 + return 0; /* all ok */ 938 + 939 + out6: 940 + #ifdef CONFIG_LEDS_CLASS 941 + if (host->platdata && host->platdata->led) 942 + led_classdev_unregister(host->platdata->led); 943 + out5: 944 + #endif 945 + au_writel(0, HOST_ENABLE(host)); 946 + au_writel(0, HOST_CONFIG(host)); 947 + au_writel(0, HOST_CONFIG2(host)); 948 + au_sync(); 949 + 950 + #ifdef CONFIG_SOC_AU1200 951 + au1xmmc_dbdma_shutdown(host); 952 + #endif 953 + 954 + tasklet_kill(&host->data_task); 955 + tasklet_kill(&host->finish_task); 956 + 957 + if (host->platdata && host->platdata->cd_setup && 958 + !(mmc->caps & MMC_CAP_NEEDS_POLL)) 959 + host->platdata->cd_setup(mmc, 0); 960 + 961 + free_irq(host->irq, host); 962 + out3: 963 + iounmap((void *)host->iobase); 964 + out2: 965 + release_resource(host->ioarea); 966 + kfree(host->ioarea); 967 + out1: 968 + mmc_free_host(mmc); 969 + out0: 970 + return ret; 1077 971 } 1078 972 1079 973 static int __devexit au1xmmc_remove(struct platform_device *pdev) 1080 974 { 975 + struct mmc_host *mmc = platform_get_drvdata(pdev); 976 + struct au1xmmc_host *host; 1081 977 1082 - int i; 978 + if (mmc) { 979 + host = mmc_priv(mmc); 1083 980 1084 - disable_irq(AU1100_SD_IRQ); 981 + mmc_remove_host(mmc); 1085 982 1086 - for(i = 0; i < AU1XMMC_CONTROLLER_COUNT; i++) { 1087 - struct au1xmmc_host *host = au1xmmc_hosts[i]; 1088 - if (!host) continue; 983 + #ifdef CONFIG_LEDS_CLASS 984 + if (host->platdata && host->platdata->led) 985 + led_classdev_unregister(host->platdata->led); 986 + #endif 987 + 988 + if (host->platdata && host->platdata->cd_setup && 989 + !(mmc->caps & MMC_CAP_NEEDS_POLL)) 990 + host->platdata->cd_setup(mmc, 0); 991 + 992 + au_writel(0, HOST_ENABLE(host)); 993 + au_writel(0, HOST_CONFIG(host)); 994 + au_writel(0, HOST_CONFIG2(host)); 995 + au_sync(); 1089 996 1090 997 tasklet_kill(&host->data_task); 1091 998 tasklet_kill(&host->finish_task); 1092 999 1093 - del_timer_sync(&host->timer); 1000 + #ifdef CONFIG_SOC_AU1200 1001 + au1xmmc_dbdma_shutdown(host); 1002 + #endif 1094 1003 au1xmmc_set_power(host, 0); 1095 1004 1096 - mmc_remove_host(host->mmc); 1005 + free_irq(host->irq, host); 1006 + iounmap((void *)host->iobase); 1007 + release_resource(host->ioarea); 1008 + kfree(host->ioarea); 1097 1009 1098 - au1xxx_dbdma_chan_free(host->tx_chan); 1099 - au1xxx_dbdma_chan_free(host->rx_chan); 1100 - 1101 - au_writel(0x0, HOST_ENABLE(host)); 1102 - au_sync(); 1010 + mmc_free_host(mmc); 1103 1011 } 1104 - 1105 - free_irq(AU1100_SD_IRQ, 0); 1106 1012 return 0; 1107 1013 } 1108 1014 ··· 1140 1004 1141 1005 static int __init au1xmmc_init(void) 1142 1006 { 1007 + #ifdef CONFIG_SOC_AU1200 1008 + /* DSCR_CMD0_ALWAYS has a stride of 32 bits, we need a stride 1009 + * of 8 bits. And since devices are shared, we need to create 1010 + * our own to avoid freaking out other devices. 1011 + */ 1012 + memid = au1xxx_ddma_add_device(&au1xmmc_mem_dbdev); 1013 + if (!memid) 1014 + printk(KERN_ERR "au1xmmc: cannot add memory dbdma dev\n"); 1015 + #endif 1143 1016 return platform_driver_register(&au1xmmc_driver); 1144 1017 } 1145 1018 1146 1019 static void __exit au1xmmc_exit(void) 1147 1020 { 1021 + #ifdef CONFIG_SOC_AU1200 1022 + if (memid) 1023 + au1xxx_ddma_del_device(memid); 1024 + #endif 1148 1025 platform_driver_unregister(&au1xmmc_driver); 1149 1026 } 1150 1027 1151 1028 module_init(au1xmmc_init); 1152 1029 module_exit(au1xmmc_exit); 1153 1030 1154 - #ifdef MODULE 1155 1031 MODULE_AUTHOR("Advanced Micro Devices, Inc"); 1156 1032 MODULE_DESCRIPTION("MMC/SD driver for the Alchemy Au1XXX"); 1157 1033 MODULE_LICENSE("GPL"); 1158 1034 MODULE_ALIAS("platform:au1xxx-mmc"); 1159 - #endif 1160 -
-96
drivers/mmc/host/au1xmmc.h
··· 1 - #ifndef _AU1XMMC_H_ 2 - #define _AU1XMMC_H_ 3 - 4 - /* Hardware definitions */ 5 - 6 - #define AU1XMMC_DESCRIPTOR_COUNT 1 7 - #define AU1XMMC_DESCRIPTOR_SIZE 2048 8 - 9 - #define AU1XMMC_OCR ( MMC_VDD_27_28 | MMC_VDD_28_29 | MMC_VDD_29_30 | \ 10 - MMC_VDD_30_31 | MMC_VDD_31_32 | MMC_VDD_32_33 | \ 11 - MMC_VDD_33_34 | MMC_VDD_34_35 | MMC_VDD_35_36) 12 - 13 - /* Easy access macros */ 14 - 15 - #define HOST_STATUS(h) ((h)->iobase + SD_STATUS) 16 - #define HOST_CONFIG(h) ((h)->iobase + SD_CONFIG) 17 - #define HOST_ENABLE(h) ((h)->iobase + SD_ENABLE) 18 - #define HOST_TXPORT(h) ((h)->iobase + SD_TXPORT) 19 - #define HOST_RXPORT(h) ((h)->iobase + SD_RXPORT) 20 - #define HOST_CMDARG(h) ((h)->iobase + SD_CMDARG) 21 - #define HOST_BLKSIZE(h) ((h)->iobase + SD_BLKSIZE) 22 - #define HOST_CMD(h) ((h)->iobase + SD_CMD) 23 - #define HOST_CONFIG2(h) ((h)->iobase + SD_CONFIG2) 24 - #define HOST_TIMEOUT(h) ((h)->iobase + SD_TIMEOUT) 25 - #define HOST_DEBUG(h) ((h)->iobase + SD_DEBUG) 26 - 27 - #define DMA_CHANNEL(h) \ 28 - ( ((h)->flags & HOST_F_XMIT) ? (h)->tx_chan : (h)->rx_chan) 29 - 30 - /* This gives us a hard value for the stop command that we can write directly 31 - * to the command register 32 - */ 33 - 34 - #define STOP_CMD (SD_CMD_RT_1B|SD_CMD_CT_7|(0xC << SD_CMD_CI_SHIFT)|SD_CMD_GO) 35 - 36 - /* This is the set of interrupts that we configure by default */ 37 - 38 - #if 0 39 - #define AU1XMMC_INTERRUPTS (SD_CONFIG_SC | SD_CONFIG_DT | SD_CONFIG_DD | \ 40 - SD_CONFIG_RAT | SD_CONFIG_CR | SD_CONFIG_I) 41 - #endif 42 - 43 - #define AU1XMMC_INTERRUPTS (SD_CONFIG_SC | SD_CONFIG_DT | \ 44 - SD_CONFIG_RAT | SD_CONFIG_CR | SD_CONFIG_I) 45 - /* The poll event (looking for insert/remove events runs twice a second */ 46 - #define AU1XMMC_DETECT_TIMEOUT (HZ/2) 47 - 48 - struct au1xmmc_host { 49 - struct mmc_host *mmc; 50 - struct mmc_request *mrq; 51 - 52 - u32 id; 53 - 54 - u32 flags; 55 - u32 iobase; 56 - u32 clock; 57 - u32 bus_width; 58 - u32 power_mode; 59 - 60 - int status; 61 - 62 - struct { 63 - int len; 64 - int dir; 65 - } dma; 66 - 67 - struct { 68 - int index; 69 - int offset; 70 - int len; 71 - } pio; 72 - 73 - u32 tx_chan; 74 - u32 rx_chan; 75 - 76 - struct timer_list timer; 77 - struct tasklet_struct finish_task; 78 - struct tasklet_struct data_task; 79 - 80 - spinlock_t lock; 81 - }; 82 - 83 - /* Status flags used by the host structure */ 84 - 85 - #define HOST_F_XMIT 0x0001 86 - #define HOST_F_RECV 0x0002 87 - #define HOST_F_DMA 0x0010 88 - #define HOST_F_ACTIVE 0x0100 89 - #define HOST_F_STOP 0x1000 90 - 91 - #define HOST_S_IDLE 0x0001 92 - #define HOST_S_CMD 0x0002 93 - #define HOST_S_DATA 0x0003 94 - #define HOST_S_STOP 0x0004 95 - 96 - #endif
+6 -3
drivers/mmc/host/imxmmc.c
··· 892 892 struct imxmci_host *host = mmc_priv(mmc); 893 893 894 894 if (host->pdata && host->pdata->get_ro) 895 - return host->pdata->get_ro(mmc_dev(mmc)); 896 - /* Host doesn't support read only detection so assume writeable */ 897 - return 0; 895 + return !!host->pdata->get_ro(mmc_dev(mmc)); 896 + /* 897 + * Board doesn't support read only detection; let the mmc core 898 + * decide what to do. 899 + */ 900 + return -ENOSYS; 898 901 } 899 902 900 903
+24 -9
drivers/mmc/host/mmc_spi.c
··· 1126 1126 struct mmc_spi_host *host = mmc_priv(mmc); 1127 1127 1128 1128 if (host->pdata && host->pdata->get_ro) 1129 - return host->pdata->get_ro(mmc->parent); 1130 - /* board doesn't support read only detection; assume writeable */ 1131 - return 0; 1129 + return !!host->pdata->get_ro(mmc->parent); 1130 + /* 1131 + * Board doesn't support read only detection; let the mmc core 1132 + * decide what to do. 1133 + */ 1134 + return -ENOSYS; 1132 1135 } 1133 1136 1137 + static int mmc_spi_get_cd(struct mmc_host *mmc) 1138 + { 1139 + struct mmc_spi_host *host = mmc_priv(mmc); 1140 + 1141 + if (host->pdata && host->pdata->get_cd) 1142 + return !!host->pdata->get_cd(mmc->parent); 1143 + return -ENOSYS; 1144 + } 1134 1145 1135 1146 static const struct mmc_host_ops mmc_spi_ops = { 1136 1147 .request = mmc_spi_request, 1137 1148 .set_ios = mmc_spi_set_ios, 1138 1149 .get_ro = mmc_spi_get_ro, 1150 + .get_cd = mmc_spi_get_cd, 1139 1151 }; 1140 1152 1141 1153 ··· 1252 1240 mmc->ops = &mmc_spi_ops; 1253 1241 mmc->max_blk_size = MMC_SPI_BLOCKSIZE; 1254 1242 1255 - /* As long as we keep track of the number of successfully 1256 - * transmitted blocks, we're good for multiwrite. 1257 - */ 1258 - mmc->caps = MMC_CAP_SPI | MMC_CAP_MULTIWRITE; 1243 + mmc->caps = MMC_CAP_SPI; 1259 1244 1260 1245 /* SPI doesn't need the lowspeed device identification thing for 1261 1246 * MMC or SD cards, since it never comes up in open drain mode. ··· 1328 1319 goto fail_glue_init; 1329 1320 } 1330 1321 1322 + /* pass platform capabilities, if any */ 1323 + if (host->pdata) 1324 + mmc->caps |= host->pdata->caps; 1325 + 1331 1326 status = mmc_add_host(mmc); 1332 1327 if (status != 0) 1333 1328 goto fail_add_host; 1334 1329 1335 - dev_info(&spi->dev, "SD/MMC host %s%s%s%s\n", 1330 + dev_info(&spi->dev, "SD/MMC host %s%s%s%s%s\n", 1336 1331 mmc->class_dev.bus_id, 1337 1332 host->dma_dev ? "" : ", no DMA", 1338 1333 (host->pdata && host->pdata->get_ro) 1339 1334 ? "" : ", no WP", 1340 1335 (host->pdata && host->pdata->setpower) 1341 - ? "" : ", no poweroff"); 1336 + ? "" : ", no poweroff", 1337 + (mmc->caps & MMC_CAP_NEEDS_POLL) 1338 + ? ", cd polling" : ""); 1342 1339 return 0; 1343 1340 1344 1341 fail_add_host:
-1
drivers/mmc/host/mmci.c
··· 535 535 mmc->f_min = (host->mclk + 511) / 512; 536 536 mmc->f_max = min(host->mclk, fmax); 537 537 mmc->ocr_avail = plat->ocr_mask; 538 - mmc->caps = MMC_CAP_MULTIWRITE; 539 538 540 539 /* 541 540 * We can do SGIO
+1 -1
drivers/mmc/host/omap.c
··· 1317 1317 1318 1318 host->slots[id] = slot; 1319 1319 1320 - mmc->caps = MMC_CAP_MULTIWRITE; 1320 + mmc->caps = 0; 1321 1321 if (host->pdata->conf.wire4) 1322 1322 mmc->caps |= MMC_CAP_4_BIT_DATA; 1323 1323
+6 -3
drivers/mmc/host/pxamci.c
··· 374 374 struct pxamci_host *host = mmc_priv(mmc); 375 375 376 376 if (host->pdata && host->pdata->get_ro) 377 - return host->pdata->get_ro(mmc_dev(mmc)); 378 - /* Host doesn't support read only detection so assume writeable */ 379 - return 0; 377 + return !!host->pdata->get_ro(mmc_dev(mmc)); 378 + /* 379 + * Board doesn't support read only detection; let the mmc core 380 + * decide what to do. 381 + */ 382 + return -ENOSYS; 380 383 } 381 384 382 385 static void pxamci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
+1446
drivers/mmc/host/s3cmci.c
··· 1 + /* 2 + * linux/drivers/mmc/s3cmci.h - Samsung S3C MCI driver 3 + * 4 + * Copyright (C) 2004-2006 maintech GmbH, Thomas Kleffel <tk@maintech.de> 5 + * 6 + * This program is free software; you can redistribute it and/or modify 7 + * it under the terms of the GNU General Public License version 2 as 8 + * published by the Free Software Foundation. 9 + */ 10 + 11 + #include <linux/module.h> 12 + #include <linux/dma-mapping.h> 13 + #include <linux/clk.h> 14 + #include <linux/mmc/host.h> 15 + #include <linux/platform_device.h> 16 + #include <linux/irq.h> 17 + #include <linux/io.h> 18 + 19 + #include <asm/dma.h> 20 + 21 + #include <asm/arch/regs-sdi.h> 22 + #include <asm/arch/regs-gpio.h> 23 + 24 + #include <asm/plat-s3c24xx/mci.h> 25 + 26 + #include "s3cmci.h" 27 + 28 + #define DRIVER_NAME "s3c-mci" 29 + 30 + enum dbg_channels { 31 + dbg_err = (1 << 0), 32 + dbg_debug = (1 << 1), 33 + dbg_info = (1 << 2), 34 + dbg_irq = (1 << 3), 35 + dbg_sg = (1 << 4), 36 + dbg_dma = (1 << 5), 37 + dbg_pio = (1 << 6), 38 + dbg_fail = (1 << 7), 39 + dbg_conf = (1 << 8), 40 + }; 41 + 42 + static const int dbgmap_err = dbg_err | dbg_fail; 43 + static const int dbgmap_info = dbg_info | dbg_conf; 44 + static const int dbgmap_debug = dbg_debug; 45 + 46 + #define dbg(host, channels, args...) \ 47 + do { \ 48 + if (dbgmap_err & channels) \ 49 + dev_err(&host->pdev->dev, args); \ 50 + else if (dbgmap_info & channels) \ 51 + dev_info(&host->pdev->dev, args); \ 52 + else if (dbgmap_debug & channels) \ 53 + dev_dbg(&host->pdev->dev, args); \ 54 + } while (0) 55 + 56 + #define RESSIZE(ressource) (((ressource)->end - (ressource)->start)+1) 57 + 58 + static struct s3c2410_dma_client s3cmci_dma_client = { 59 + .name = "s3c-mci", 60 + }; 61 + 62 + static void finalize_request(struct s3cmci_host *host); 63 + static void s3cmci_send_request(struct mmc_host *mmc); 64 + static void s3cmci_reset(struct s3cmci_host *host); 65 + 66 + #ifdef CONFIG_MMC_DEBUG 67 + 68 + static void dbg_dumpregs(struct s3cmci_host *host, char *prefix) 69 + { 70 + u32 con, pre, cmdarg, cmdcon, cmdsta, r0, r1, r2, r3, timer, bsize; 71 + u32 datcon, datcnt, datsta, fsta, imask; 72 + 73 + con = readl(host->base + S3C2410_SDICON); 74 + pre = readl(host->base + S3C2410_SDIPRE); 75 + cmdarg = readl(host->base + S3C2410_SDICMDARG); 76 + cmdcon = readl(host->base + S3C2410_SDICMDCON); 77 + cmdsta = readl(host->base + S3C2410_SDICMDSTAT); 78 + r0 = readl(host->base + S3C2410_SDIRSP0); 79 + r1 = readl(host->base + S3C2410_SDIRSP1); 80 + r2 = readl(host->base + S3C2410_SDIRSP2); 81 + r3 = readl(host->base + S3C2410_SDIRSP3); 82 + timer = readl(host->base + S3C2410_SDITIMER); 83 + bsize = readl(host->base + S3C2410_SDIBSIZE); 84 + datcon = readl(host->base + S3C2410_SDIDCON); 85 + datcnt = readl(host->base + S3C2410_SDIDCNT); 86 + datsta = readl(host->base + S3C2410_SDIDSTA); 87 + fsta = readl(host->base + S3C2410_SDIFSTA); 88 + imask = readl(host->base + host->sdiimsk); 89 + 90 + dbg(host, dbg_debug, "%s CON:[%08x] PRE:[%08x] TMR:[%08x]\n", 91 + prefix, con, pre, timer); 92 + 93 + dbg(host, dbg_debug, "%s CCON:[%08x] CARG:[%08x] CSTA:[%08x]\n", 94 + prefix, cmdcon, cmdarg, cmdsta); 95 + 96 + dbg(host, dbg_debug, "%s DCON:[%08x] FSTA:[%08x]" 97 + " DSTA:[%08x] DCNT:[%08x]\n", 98 + prefix, datcon, fsta, datsta, datcnt); 99 + 100 + dbg(host, dbg_debug, "%s R0:[%08x] R1:[%08x]" 101 + " R2:[%08x] R3:[%08x]\n", 102 + prefix, r0, r1, r2, r3); 103 + } 104 + 105 + static void prepare_dbgmsg(struct s3cmci_host *host, struct mmc_command *cmd, 106 + int stop) 107 + { 108 + snprintf(host->dbgmsg_cmd, 300, 109 + "#%u%s op:%i arg:0x%08x flags:0x08%x retries:%u", 110 + host->ccnt, (stop ? " (STOP)" : ""), 111 + cmd->opcode, cmd->arg, cmd->flags, cmd->retries); 112 + 113 + if (cmd->data) { 114 + snprintf(host->dbgmsg_dat, 300, 115 + "#%u bsize:%u blocks:%u bytes:%u", 116 + host->dcnt, cmd->data->blksz, 117 + cmd->data->blocks, 118 + cmd->data->blocks * cmd->data->blksz); 119 + } else { 120 + host->dbgmsg_dat[0] = '\0'; 121 + } 122 + } 123 + 124 + static void dbg_dumpcmd(struct s3cmci_host *host, struct mmc_command *cmd, 125 + int fail) 126 + { 127 + unsigned int dbglvl = fail ? dbg_fail : dbg_debug; 128 + 129 + if (!cmd) 130 + return; 131 + 132 + if (cmd->error == 0) { 133 + dbg(host, dbglvl, "CMD[OK] %s R0:0x%08x\n", 134 + host->dbgmsg_cmd, cmd->resp[0]); 135 + } else { 136 + dbg(host, dbglvl, "CMD[ERR %i] %s Status:%s\n", 137 + cmd->error, host->dbgmsg_cmd, host->status); 138 + } 139 + 140 + if (!cmd->data) 141 + return; 142 + 143 + if (cmd->data->error == 0) { 144 + dbg(host, dbglvl, "DAT[OK] %s\n", host->dbgmsg_dat); 145 + } else { 146 + dbg(host, dbglvl, "DAT[ERR %i] %s DCNT:0x%08x\n", 147 + cmd->data->error, host->dbgmsg_dat, 148 + readl(host->base + S3C2410_SDIDCNT)); 149 + } 150 + } 151 + #else 152 + static void dbg_dumpcmd(struct s3cmci_host *host, 153 + struct mmc_command *cmd, int fail) { } 154 + 155 + static void prepare_dbgmsg(struct s3cmci_host *host, struct mmc_command *cmd, 156 + int stop) { } 157 + 158 + static void dbg_dumpregs(struct s3cmci_host *host, char *prefix) { } 159 + 160 + #endif /* CONFIG_MMC_DEBUG */ 161 + 162 + static inline u32 enable_imask(struct s3cmci_host *host, u32 imask) 163 + { 164 + u32 newmask; 165 + 166 + newmask = readl(host->base + host->sdiimsk); 167 + newmask |= imask; 168 + 169 + writel(newmask, host->base + host->sdiimsk); 170 + 171 + return newmask; 172 + } 173 + 174 + static inline u32 disable_imask(struct s3cmci_host *host, u32 imask) 175 + { 176 + u32 newmask; 177 + 178 + newmask = readl(host->base + host->sdiimsk); 179 + newmask &= ~imask; 180 + 181 + writel(newmask, host->base + host->sdiimsk); 182 + 183 + return newmask; 184 + } 185 + 186 + static inline void clear_imask(struct s3cmci_host *host) 187 + { 188 + writel(0, host->base + host->sdiimsk); 189 + } 190 + 191 + static inline int get_data_buffer(struct s3cmci_host *host, 192 + u32 *words, u32 **pointer) 193 + { 194 + struct scatterlist *sg; 195 + 196 + if (host->pio_active == XFER_NONE) 197 + return -EINVAL; 198 + 199 + if ((!host->mrq) || (!host->mrq->data)) 200 + return -EINVAL; 201 + 202 + if (host->pio_sgptr >= host->mrq->data->sg_len) { 203 + dbg(host, dbg_debug, "no more buffers (%i/%i)\n", 204 + host->pio_sgptr, host->mrq->data->sg_len); 205 + return -EBUSY; 206 + } 207 + sg = &host->mrq->data->sg[host->pio_sgptr]; 208 + 209 + *words = sg->length >> 2; 210 + *pointer = sg_virt(sg); 211 + 212 + host->pio_sgptr++; 213 + 214 + dbg(host, dbg_sg, "new buffer (%i/%i)\n", 215 + host->pio_sgptr, host->mrq->data->sg_len); 216 + 217 + return 0; 218 + } 219 + 220 + static inline u32 fifo_count(struct s3cmci_host *host) 221 + { 222 + u32 fifostat = readl(host->base + S3C2410_SDIFSTA); 223 + 224 + fifostat &= S3C2410_SDIFSTA_COUNTMASK; 225 + return fifostat >> 2; 226 + } 227 + 228 + static inline u32 fifo_free(struct s3cmci_host *host) 229 + { 230 + u32 fifostat = readl(host->base + S3C2410_SDIFSTA); 231 + 232 + fifostat &= S3C2410_SDIFSTA_COUNTMASK; 233 + return (63 - fifostat) >> 2; 234 + } 235 + 236 + static void do_pio_read(struct s3cmci_host *host) 237 + { 238 + int res; 239 + u32 fifo; 240 + void __iomem *from_ptr; 241 + 242 + /* write real prescaler to host, it might be set slow to fix */ 243 + writel(host->prescaler, host->base + S3C2410_SDIPRE); 244 + 245 + from_ptr = host->base + host->sdidata; 246 + 247 + while ((fifo = fifo_count(host))) { 248 + if (!host->pio_words) { 249 + res = get_data_buffer(host, &host->pio_words, 250 + &host->pio_ptr); 251 + if (res) { 252 + host->pio_active = XFER_NONE; 253 + host->complete_what = COMPLETION_FINALIZE; 254 + 255 + dbg(host, dbg_pio, "pio_read(): " 256 + "complete (no more data).\n"); 257 + return; 258 + } 259 + 260 + dbg(host, dbg_pio, 261 + "pio_read(): new target: [%i]@[%p]\n", 262 + host->pio_words, host->pio_ptr); 263 + } 264 + 265 + dbg(host, dbg_pio, 266 + "pio_read(): fifo:[%02i] buffer:[%03i] dcnt:[%08X]\n", 267 + fifo, host->pio_words, 268 + readl(host->base + S3C2410_SDIDCNT)); 269 + 270 + if (fifo > host->pio_words) 271 + fifo = host->pio_words; 272 + 273 + host->pio_words -= fifo; 274 + host->pio_count += fifo; 275 + 276 + while (fifo--) 277 + *(host->pio_ptr++) = readl(from_ptr); 278 + } 279 + 280 + if (!host->pio_words) { 281 + res = get_data_buffer(host, &host->pio_words, &host->pio_ptr); 282 + if (res) { 283 + dbg(host, dbg_pio, 284 + "pio_read(): complete (no more buffers).\n"); 285 + host->pio_active = XFER_NONE; 286 + host->complete_what = COMPLETION_FINALIZE; 287 + 288 + return; 289 + } 290 + } 291 + 292 + enable_imask(host, 293 + S3C2410_SDIIMSK_RXFIFOHALF | S3C2410_SDIIMSK_RXFIFOLAST); 294 + } 295 + 296 + static void do_pio_write(struct s3cmci_host *host) 297 + { 298 + void __iomem *to_ptr; 299 + int res; 300 + u32 fifo; 301 + 302 + to_ptr = host->base + host->sdidata; 303 + 304 + while ((fifo = fifo_free(host))) { 305 + if (!host->pio_words) { 306 + res = get_data_buffer(host, &host->pio_words, 307 + &host->pio_ptr); 308 + if (res) { 309 + dbg(host, dbg_pio, 310 + "pio_write(): complete (no more data).\n"); 311 + host->pio_active = XFER_NONE; 312 + 313 + return; 314 + } 315 + 316 + dbg(host, dbg_pio, 317 + "pio_write(): new source: [%i]@[%p]\n", 318 + host->pio_words, host->pio_ptr); 319 + 320 + } 321 + 322 + if (fifo > host->pio_words) 323 + fifo = host->pio_words; 324 + 325 + host->pio_words -= fifo; 326 + host->pio_count += fifo; 327 + 328 + while (fifo--) 329 + writel(*(host->pio_ptr++), to_ptr); 330 + } 331 + 332 + enable_imask(host, S3C2410_SDIIMSK_TXFIFOHALF); 333 + } 334 + 335 + static void pio_tasklet(unsigned long data) 336 + { 337 + struct s3cmci_host *host = (struct s3cmci_host *) data; 338 + 339 + 340 + disable_irq(host->irq); 341 + 342 + if (host->pio_active == XFER_WRITE) 343 + do_pio_write(host); 344 + 345 + if (host->pio_active == XFER_READ) 346 + do_pio_read(host); 347 + 348 + if (host->complete_what == COMPLETION_FINALIZE) { 349 + clear_imask(host); 350 + if (host->pio_active != XFER_NONE) { 351 + dbg(host, dbg_err, "unfinished %s " 352 + "- pio_count:[%u] pio_words:[%u]\n", 353 + (host->pio_active == XFER_READ) ? "read" : "write", 354 + host->pio_count, host->pio_words); 355 + 356 + if (host->mrq->data) 357 + host->mrq->data->error = -EINVAL; 358 + } 359 + 360 + finalize_request(host); 361 + } else 362 + enable_irq(host->irq); 363 + } 364 + 365 + /* 366 + * ISR for SDI Interface IRQ 367 + * Communication between driver and ISR works as follows: 368 + * host->mrq points to current request 369 + * host->complete_what Indicates when the request is considered done 370 + * COMPLETION_CMDSENT when the command was sent 371 + * COMPLETION_RSPFIN when a response was received 372 + * COMPLETION_XFERFINISH when the data transfer is finished 373 + * COMPLETION_XFERFINISH_RSPFIN both of the above. 374 + * host->complete_request is the completion-object the driver waits for 375 + * 376 + * 1) Driver sets up host->mrq and host->complete_what 377 + * 2) Driver prepares the transfer 378 + * 3) Driver enables interrupts 379 + * 4) Driver starts transfer 380 + * 5) Driver waits for host->complete_rquest 381 + * 6) ISR checks for request status (errors and success) 382 + * 6) ISR sets host->mrq->cmd->error and host->mrq->data->error 383 + * 7) ISR completes host->complete_request 384 + * 8) ISR disables interrupts 385 + * 9) Driver wakes up and takes care of the request 386 + * 387 + * Note: "->error"-fields are expected to be set to 0 before the request 388 + * was issued by mmc.c - therefore they are only set, when an error 389 + * contition comes up 390 + */ 391 + 392 + static irqreturn_t s3cmci_irq(int irq, void *dev_id) 393 + { 394 + struct s3cmci_host *host = dev_id; 395 + struct mmc_command *cmd; 396 + u32 mci_csta, mci_dsta, mci_fsta, mci_dcnt, mci_imsk; 397 + u32 mci_cclear, mci_dclear; 398 + unsigned long iflags; 399 + 400 + spin_lock_irqsave(&host->complete_lock, iflags); 401 + 402 + mci_csta = readl(host->base + S3C2410_SDICMDSTAT); 403 + mci_dsta = readl(host->base + S3C2410_SDIDSTA); 404 + mci_dcnt = readl(host->base + S3C2410_SDIDCNT); 405 + mci_fsta = readl(host->base + S3C2410_SDIFSTA); 406 + mci_imsk = readl(host->base + host->sdiimsk); 407 + mci_cclear = 0; 408 + mci_dclear = 0; 409 + 410 + if ((host->complete_what == COMPLETION_NONE) || 411 + (host->complete_what == COMPLETION_FINALIZE)) { 412 + host->status = "nothing to complete"; 413 + clear_imask(host); 414 + goto irq_out; 415 + } 416 + 417 + if (!host->mrq) { 418 + host->status = "no active mrq"; 419 + clear_imask(host); 420 + goto irq_out; 421 + } 422 + 423 + cmd = host->cmd_is_stop ? host->mrq->stop : host->mrq->cmd; 424 + 425 + if (!cmd) { 426 + host->status = "no active cmd"; 427 + clear_imask(host); 428 + goto irq_out; 429 + } 430 + 431 + if (!host->dodma) { 432 + if ((host->pio_active == XFER_WRITE) && 433 + (mci_fsta & S3C2410_SDIFSTA_TFDET)) { 434 + 435 + disable_imask(host, S3C2410_SDIIMSK_TXFIFOHALF); 436 + tasklet_schedule(&host->pio_tasklet); 437 + host->status = "pio tx"; 438 + } 439 + 440 + if ((host->pio_active == XFER_READ) && 441 + (mci_fsta & S3C2410_SDIFSTA_RFDET)) { 442 + 443 + disable_imask(host, 444 + S3C2410_SDIIMSK_RXFIFOHALF | 445 + S3C2410_SDIIMSK_RXFIFOLAST); 446 + 447 + tasklet_schedule(&host->pio_tasklet); 448 + host->status = "pio rx"; 449 + } 450 + } 451 + 452 + if (mci_csta & S3C2410_SDICMDSTAT_CMDTIMEOUT) { 453 + dbg(host, dbg_err, "CMDSTAT: error CMDTIMEOUT\n"); 454 + cmd->error = -ETIMEDOUT; 455 + host->status = "error: command timeout"; 456 + goto fail_transfer; 457 + } 458 + 459 + if (mci_csta & S3C2410_SDICMDSTAT_CMDSENT) { 460 + if (host->complete_what == COMPLETION_CMDSENT) { 461 + host->status = "ok: command sent"; 462 + goto close_transfer; 463 + } 464 + 465 + mci_cclear |= S3C2410_SDICMDSTAT_CMDSENT; 466 + } 467 + 468 + if (mci_csta & S3C2410_SDICMDSTAT_CRCFAIL) { 469 + if (cmd->flags & MMC_RSP_CRC) { 470 + if (host->mrq->cmd->flags & MMC_RSP_136) { 471 + dbg(host, dbg_irq, 472 + "fixup: ignore CRC fail with long rsp\n"); 473 + } else { 474 + /* note, we used to fail the transfer 475 + * here, but it seems that this is just 476 + * the hardware getting it wrong. 477 + * 478 + * cmd->error = -EILSEQ; 479 + * host->status = "error: bad command crc"; 480 + * goto fail_transfer; 481 + */ 482 + } 483 + } 484 + 485 + mci_cclear |= S3C2410_SDICMDSTAT_CRCFAIL; 486 + } 487 + 488 + if (mci_csta & S3C2410_SDICMDSTAT_RSPFIN) { 489 + if (host->complete_what == COMPLETION_RSPFIN) { 490 + host->status = "ok: command response received"; 491 + goto close_transfer; 492 + } 493 + 494 + if (host->complete_what == COMPLETION_XFERFINISH_RSPFIN) 495 + host->complete_what = COMPLETION_XFERFINISH; 496 + 497 + mci_cclear |= S3C2410_SDICMDSTAT_RSPFIN; 498 + } 499 + 500 + /* errors handled after this point are only relevant 501 + when a data transfer is in progress */ 502 + 503 + if (!cmd->data) 504 + goto clear_status_bits; 505 + 506 + /* Check for FIFO failure */ 507 + if (host->is2440) { 508 + if (mci_fsta & S3C2440_SDIFSTA_FIFOFAIL) { 509 + dbg(host, dbg_err, "FIFO failure\n"); 510 + host->mrq->data->error = -EILSEQ; 511 + host->status = "error: 2440 fifo failure"; 512 + goto fail_transfer; 513 + } 514 + } else { 515 + if (mci_dsta & S3C2410_SDIDSTA_FIFOFAIL) { 516 + dbg(host, dbg_err, "FIFO failure\n"); 517 + cmd->data->error = -EILSEQ; 518 + host->status = "error: fifo failure"; 519 + goto fail_transfer; 520 + } 521 + } 522 + 523 + if (mci_dsta & S3C2410_SDIDSTA_RXCRCFAIL) { 524 + dbg(host, dbg_err, "bad data crc (outgoing)\n"); 525 + cmd->data->error = -EILSEQ; 526 + host->status = "error: bad data crc (outgoing)"; 527 + goto fail_transfer; 528 + } 529 + 530 + if (mci_dsta & S3C2410_SDIDSTA_CRCFAIL) { 531 + dbg(host, dbg_err, "bad data crc (incoming)\n"); 532 + cmd->data->error = -EILSEQ; 533 + host->status = "error: bad data crc (incoming)"; 534 + goto fail_transfer; 535 + } 536 + 537 + if (mci_dsta & S3C2410_SDIDSTA_DATATIMEOUT) { 538 + dbg(host, dbg_err, "data timeout\n"); 539 + cmd->data->error = -ETIMEDOUT; 540 + host->status = "error: data timeout"; 541 + goto fail_transfer; 542 + } 543 + 544 + if (mci_dsta & S3C2410_SDIDSTA_XFERFINISH) { 545 + if (host->complete_what == COMPLETION_XFERFINISH) { 546 + host->status = "ok: data transfer completed"; 547 + goto close_transfer; 548 + } 549 + 550 + if (host->complete_what == COMPLETION_XFERFINISH_RSPFIN) 551 + host->complete_what = COMPLETION_RSPFIN; 552 + 553 + mci_dclear |= S3C2410_SDIDSTA_XFERFINISH; 554 + } 555 + 556 + clear_status_bits: 557 + writel(mci_cclear, host->base + S3C2410_SDICMDSTAT); 558 + writel(mci_dclear, host->base + S3C2410_SDIDSTA); 559 + 560 + goto irq_out; 561 + 562 + fail_transfer: 563 + host->pio_active = XFER_NONE; 564 + 565 + close_transfer: 566 + host->complete_what = COMPLETION_FINALIZE; 567 + 568 + clear_imask(host); 569 + tasklet_schedule(&host->pio_tasklet); 570 + 571 + goto irq_out; 572 + 573 + irq_out: 574 + dbg(host, dbg_irq, 575 + "csta:0x%08x dsta:0x%08x fsta:0x%08x dcnt:0x%08x status:%s.\n", 576 + mci_csta, mci_dsta, mci_fsta, mci_dcnt, host->status); 577 + 578 + spin_unlock_irqrestore(&host->complete_lock, iflags); 579 + return IRQ_HANDLED; 580 + 581 + } 582 + 583 + /* 584 + * ISR for the CardDetect Pin 585 + */ 586 + 587 + static irqreturn_t s3cmci_irq_cd(int irq, void *dev_id) 588 + { 589 + struct s3cmci_host *host = (struct s3cmci_host *)dev_id; 590 + 591 + dbg(host, dbg_irq, "card detect\n"); 592 + 593 + mmc_detect_change(host->mmc, msecs_to_jiffies(500)); 594 + 595 + return IRQ_HANDLED; 596 + } 597 + 598 + void s3cmci_dma_done_callback(struct s3c2410_dma_chan *dma_ch, void *buf_id, 599 + int size, enum s3c2410_dma_buffresult result) 600 + { 601 + struct s3cmci_host *host = buf_id; 602 + unsigned long iflags; 603 + u32 mci_csta, mci_dsta, mci_fsta, mci_dcnt; 604 + 605 + mci_csta = readl(host->base + S3C2410_SDICMDSTAT); 606 + mci_dsta = readl(host->base + S3C2410_SDIDSTA); 607 + mci_fsta = readl(host->base + S3C2410_SDIFSTA); 608 + mci_dcnt = readl(host->base + S3C2410_SDIDCNT); 609 + 610 + BUG_ON(!host->mrq); 611 + BUG_ON(!host->mrq->data); 612 + BUG_ON(!host->dmatogo); 613 + 614 + spin_lock_irqsave(&host->complete_lock, iflags); 615 + 616 + if (result != S3C2410_RES_OK) { 617 + dbg(host, dbg_fail, "DMA FAILED: csta=0x%08x dsta=0x%08x " 618 + "fsta=0x%08x dcnt:0x%08x result:0x%08x toGo:%u\n", 619 + mci_csta, mci_dsta, mci_fsta, 620 + mci_dcnt, result, host->dmatogo); 621 + 622 + goto fail_request; 623 + } 624 + 625 + host->dmatogo--; 626 + if (host->dmatogo) { 627 + dbg(host, dbg_dma, "DMA DONE Size:%i DSTA:[%08x] " 628 + "DCNT:[%08x] toGo:%u\n", 629 + size, mci_dsta, mci_dcnt, host->dmatogo); 630 + 631 + goto out; 632 + } 633 + 634 + dbg(host, dbg_dma, "DMA FINISHED Size:%i DSTA:%08x DCNT:%08x\n", 635 + size, mci_dsta, mci_dcnt); 636 + 637 + host->complete_what = COMPLETION_FINALIZE; 638 + 639 + out: 640 + tasklet_schedule(&host->pio_tasklet); 641 + spin_unlock_irqrestore(&host->complete_lock, iflags); 642 + return; 643 + 644 + fail_request: 645 + host->mrq->data->error = -EINVAL; 646 + host->complete_what = COMPLETION_FINALIZE; 647 + writel(0, host->base + host->sdiimsk); 648 + goto out; 649 + 650 + } 651 + 652 + static void finalize_request(struct s3cmci_host *host) 653 + { 654 + struct mmc_request *mrq = host->mrq; 655 + struct mmc_command *cmd = host->cmd_is_stop ? mrq->stop : mrq->cmd; 656 + int debug_as_failure = 0; 657 + 658 + if (host->complete_what != COMPLETION_FINALIZE) 659 + return; 660 + 661 + if (!mrq) 662 + return; 663 + 664 + if (cmd->data && (cmd->error == 0) && 665 + (cmd->data->error == 0)) { 666 + if (host->dodma && (!host->dma_complete)) { 667 + dbg(host, dbg_dma, "DMA Missing!\n"); 668 + return; 669 + } 670 + } 671 + 672 + /* Read response from controller. */ 673 + cmd->resp[0] = readl(host->base + S3C2410_SDIRSP0); 674 + cmd->resp[1] = readl(host->base + S3C2410_SDIRSP1); 675 + cmd->resp[2] = readl(host->base + S3C2410_SDIRSP2); 676 + cmd->resp[3] = readl(host->base + S3C2410_SDIRSP3); 677 + 678 + writel(host->prescaler, host->base + S3C2410_SDIPRE); 679 + 680 + if (cmd->error) 681 + debug_as_failure = 1; 682 + 683 + if (cmd->data && cmd->data->error) 684 + debug_as_failure = 1; 685 + 686 + dbg_dumpcmd(host, cmd, debug_as_failure); 687 + 688 + /* Cleanup controller */ 689 + writel(0, host->base + S3C2410_SDICMDARG); 690 + writel(S3C2410_SDIDCON_STOP, host->base + S3C2410_SDIDCON); 691 + writel(0, host->base + S3C2410_SDICMDCON); 692 + writel(0, host->base + host->sdiimsk); 693 + 694 + if (cmd->data && cmd->error) 695 + cmd->data->error = cmd->error; 696 + 697 + if (cmd->data && cmd->data->stop && (!host->cmd_is_stop)) { 698 + host->cmd_is_stop = 1; 699 + s3cmci_send_request(host->mmc); 700 + return; 701 + } 702 + 703 + /* If we have no data transfer we are finished here */ 704 + if (!mrq->data) 705 + goto request_done; 706 + 707 + /* Calulate the amout of bytes transfer if there was no error */ 708 + if (mrq->data->error == 0) { 709 + mrq->data->bytes_xfered = 710 + (mrq->data->blocks * mrq->data->blksz); 711 + } else { 712 + mrq->data->bytes_xfered = 0; 713 + } 714 + 715 + /* If we had an error while transfering data we flush the 716 + * DMA channel and the fifo to clear out any garbage. */ 717 + if (mrq->data->error != 0) { 718 + if (host->dodma) 719 + s3c2410_dma_ctrl(host->dma, S3C2410_DMAOP_FLUSH); 720 + 721 + if (host->is2440) { 722 + /* Clear failure register and reset fifo. */ 723 + writel(S3C2440_SDIFSTA_FIFORESET | 724 + S3C2440_SDIFSTA_FIFOFAIL, 725 + host->base + S3C2410_SDIFSTA); 726 + } else { 727 + u32 mci_con; 728 + 729 + /* reset fifo */ 730 + mci_con = readl(host->base + S3C2410_SDICON); 731 + mci_con |= S3C2410_SDICON_FIFORESET; 732 + 733 + writel(mci_con, host->base + S3C2410_SDICON); 734 + } 735 + } 736 + 737 + request_done: 738 + host->complete_what = COMPLETION_NONE; 739 + host->mrq = NULL; 740 + mmc_request_done(host->mmc, mrq); 741 + } 742 + 743 + 744 + void s3cmci_dma_setup(struct s3cmci_host *host, enum s3c2410_dmasrc source) 745 + { 746 + static enum s3c2410_dmasrc last_source = -1; 747 + static int setup_ok; 748 + 749 + if (last_source == source) 750 + return; 751 + 752 + last_source = source; 753 + 754 + s3c2410_dma_devconfig(host->dma, source, 3, 755 + host->mem->start + host->sdidata); 756 + 757 + if (!setup_ok) { 758 + s3c2410_dma_config(host->dma, 4, 759 + (S3C2410_DCON_HWTRIG | S3C2410_DCON_CH0_SDI)); 760 + s3c2410_dma_set_buffdone_fn(host->dma, 761 + s3cmci_dma_done_callback); 762 + s3c2410_dma_setflags(host->dma, S3C2410_DMAF_AUTOSTART); 763 + setup_ok = 1; 764 + } 765 + } 766 + 767 + static void s3cmci_send_command(struct s3cmci_host *host, 768 + struct mmc_command *cmd) 769 + { 770 + u32 ccon, imsk; 771 + 772 + imsk = S3C2410_SDIIMSK_CRCSTATUS | S3C2410_SDIIMSK_CMDTIMEOUT | 773 + S3C2410_SDIIMSK_RESPONSEND | S3C2410_SDIIMSK_CMDSENT | 774 + S3C2410_SDIIMSK_RESPONSECRC; 775 + 776 + enable_imask(host, imsk); 777 + 778 + if (cmd->data) 779 + host->complete_what = COMPLETION_XFERFINISH_RSPFIN; 780 + else if (cmd->flags & MMC_RSP_PRESENT) 781 + host->complete_what = COMPLETION_RSPFIN; 782 + else 783 + host->complete_what = COMPLETION_CMDSENT; 784 + 785 + writel(cmd->arg, host->base + S3C2410_SDICMDARG); 786 + 787 + ccon = cmd->opcode & S3C2410_SDICMDCON_INDEX; 788 + ccon |= S3C2410_SDICMDCON_SENDERHOST | S3C2410_SDICMDCON_CMDSTART; 789 + 790 + if (cmd->flags & MMC_RSP_PRESENT) 791 + ccon |= S3C2410_SDICMDCON_WAITRSP; 792 + 793 + if (cmd->flags & MMC_RSP_136) 794 + ccon |= S3C2410_SDICMDCON_LONGRSP; 795 + 796 + writel(ccon, host->base + S3C2410_SDICMDCON); 797 + } 798 + 799 + static int s3cmci_setup_data(struct s3cmci_host *host, struct mmc_data *data) 800 + { 801 + u32 dcon, imsk, stoptries = 3; 802 + 803 + /* write DCON register */ 804 + 805 + if (!data) { 806 + writel(0, host->base + S3C2410_SDIDCON); 807 + return 0; 808 + } 809 + 810 + if ((data->blksz & 3) != 0) { 811 + /* We cannot deal with unaligned blocks with more than 812 + * one block being transfered. */ 813 + 814 + if (data->blocks > 1) 815 + return -EINVAL; 816 + 817 + /* No support yet for non-word block transfers. */ 818 + return -EINVAL; 819 + } 820 + 821 + while (readl(host->base + S3C2410_SDIDSTA) & 822 + (S3C2410_SDIDSTA_TXDATAON | S3C2410_SDIDSTA_RXDATAON)) { 823 + 824 + dbg(host, dbg_err, 825 + "mci_setup_data() transfer stillin progress.\n"); 826 + 827 + writel(S3C2410_SDIDCON_STOP, host->base + S3C2410_SDIDCON); 828 + s3cmci_reset(host); 829 + 830 + if ((stoptries--) == 0) { 831 + dbg_dumpregs(host, "DRF"); 832 + return -EINVAL; 833 + } 834 + } 835 + 836 + dcon = data->blocks & S3C2410_SDIDCON_BLKNUM_MASK; 837 + 838 + if (host->dodma) 839 + dcon |= S3C2410_SDIDCON_DMAEN; 840 + 841 + if (host->bus_width == MMC_BUS_WIDTH_4) 842 + dcon |= S3C2410_SDIDCON_WIDEBUS; 843 + 844 + if (!(data->flags & MMC_DATA_STREAM)) 845 + dcon |= S3C2410_SDIDCON_BLOCKMODE; 846 + 847 + if (data->flags & MMC_DATA_WRITE) { 848 + dcon |= S3C2410_SDIDCON_TXAFTERRESP; 849 + dcon |= S3C2410_SDIDCON_XFER_TXSTART; 850 + } 851 + 852 + if (data->flags & MMC_DATA_READ) { 853 + dcon |= S3C2410_SDIDCON_RXAFTERCMD; 854 + dcon |= S3C2410_SDIDCON_XFER_RXSTART; 855 + } 856 + 857 + if (host->is2440) { 858 + dcon |= S3C2440_SDIDCON_DS_WORD; 859 + dcon |= S3C2440_SDIDCON_DATSTART; 860 + } 861 + 862 + writel(dcon, host->base + S3C2410_SDIDCON); 863 + 864 + /* write BSIZE register */ 865 + 866 + writel(data->blksz, host->base + S3C2410_SDIBSIZE); 867 + 868 + /* add to IMASK register */ 869 + imsk = S3C2410_SDIIMSK_FIFOFAIL | S3C2410_SDIIMSK_DATACRC | 870 + S3C2410_SDIIMSK_DATATIMEOUT | S3C2410_SDIIMSK_DATAFINISH; 871 + 872 + enable_imask(host, imsk); 873 + 874 + /* write TIMER register */ 875 + 876 + if (host->is2440) { 877 + writel(0x007FFFFF, host->base + S3C2410_SDITIMER); 878 + } else { 879 + writel(0x0000FFFF, host->base + S3C2410_SDITIMER); 880 + 881 + /* FIX: set slow clock to prevent timeouts on read */ 882 + if (data->flags & MMC_DATA_READ) 883 + writel(0xFF, host->base + S3C2410_SDIPRE); 884 + } 885 + 886 + return 0; 887 + } 888 + 889 + #define BOTH_DIR (MMC_DATA_WRITE | MMC_DATA_READ) 890 + 891 + static int s3cmci_prepare_pio(struct s3cmci_host *host, struct mmc_data *data) 892 + { 893 + int rw = (data->flags & MMC_DATA_WRITE) ? 1 : 0; 894 + 895 + BUG_ON((data->flags & BOTH_DIR) == BOTH_DIR); 896 + 897 + host->pio_sgptr = 0; 898 + host->pio_words = 0; 899 + host->pio_count = 0; 900 + host->pio_active = rw ? XFER_WRITE : XFER_READ; 901 + 902 + if (rw) { 903 + do_pio_write(host); 904 + enable_imask(host, S3C2410_SDIIMSK_TXFIFOHALF); 905 + } else { 906 + enable_imask(host, S3C2410_SDIIMSK_RXFIFOHALF 907 + | S3C2410_SDIIMSK_RXFIFOLAST); 908 + } 909 + 910 + return 0; 911 + } 912 + 913 + static int s3cmci_prepare_dma(struct s3cmci_host *host, struct mmc_data *data) 914 + { 915 + int dma_len, i; 916 + int rw = (data->flags & MMC_DATA_WRITE) ? 1 : 0; 917 + 918 + BUG_ON((data->flags & BOTH_DIR) == BOTH_DIR); 919 + 920 + s3cmci_dma_setup(host, rw ? S3C2410_DMASRC_MEM : S3C2410_DMASRC_HW); 921 + s3c2410_dma_ctrl(host->dma, S3C2410_DMAOP_FLUSH); 922 + 923 + dma_len = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len, 924 + (rw) ? DMA_TO_DEVICE : DMA_FROM_DEVICE); 925 + 926 + if (dma_len == 0) 927 + return -ENOMEM; 928 + 929 + host->dma_complete = 0; 930 + host->dmatogo = dma_len; 931 + 932 + for (i = 0; i < dma_len; i++) { 933 + int res; 934 + 935 + dbg(host, dbg_dma, "enqueue %i:%u@%u\n", i, 936 + sg_dma_address(&data->sg[i]), 937 + sg_dma_len(&data->sg[i])); 938 + 939 + res = s3c2410_dma_enqueue(host->dma, (void *) host, 940 + sg_dma_address(&data->sg[i]), 941 + sg_dma_len(&data->sg[i])); 942 + 943 + if (res) { 944 + s3c2410_dma_ctrl(host->dma, S3C2410_DMAOP_FLUSH); 945 + return -EBUSY; 946 + } 947 + } 948 + 949 + s3c2410_dma_ctrl(host->dma, S3C2410_DMAOP_START); 950 + 951 + return 0; 952 + } 953 + 954 + static void s3cmci_send_request(struct mmc_host *mmc) 955 + { 956 + struct s3cmci_host *host = mmc_priv(mmc); 957 + struct mmc_request *mrq = host->mrq; 958 + struct mmc_command *cmd = host->cmd_is_stop ? mrq->stop : mrq->cmd; 959 + 960 + host->ccnt++; 961 + prepare_dbgmsg(host, cmd, host->cmd_is_stop); 962 + 963 + /* Clear command, data and fifo status registers 964 + Fifo clear only necessary on 2440, but doesn't hurt on 2410 965 + */ 966 + writel(0xFFFFFFFF, host->base + S3C2410_SDICMDSTAT); 967 + writel(0xFFFFFFFF, host->base + S3C2410_SDIDSTA); 968 + writel(0xFFFFFFFF, host->base + S3C2410_SDIFSTA); 969 + 970 + if (cmd->data) { 971 + int res = s3cmci_setup_data(host, cmd->data); 972 + 973 + host->dcnt++; 974 + 975 + if (res) { 976 + dbg(host, dbg_err, "setup data error %d\n", res); 977 + cmd->error = res; 978 + cmd->data->error = res; 979 + 980 + mmc_request_done(mmc, mrq); 981 + return; 982 + } 983 + 984 + if (host->dodma) 985 + res = s3cmci_prepare_dma(host, cmd->data); 986 + else 987 + res = s3cmci_prepare_pio(host, cmd->data); 988 + 989 + if (res) { 990 + dbg(host, dbg_err, "data prepare error %d\n", res); 991 + cmd->error = res; 992 + cmd->data->error = res; 993 + 994 + mmc_request_done(mmc, mrq); 995 + return; 996 + } 997 + } 998 + 999 + /* Send command */ 1000 + s3cmci_send_command(host, cmd); 1001 + 1002 + /* Enable Interrupt */ 1003 + enable_irq(host->irq); 1004 + } 1005 + 1006 + static int s3cmci_card_present(struct s3cmci_host *host) 1007 + { 1008 + struct s3c24xx_mci_pdata *pdata = host->pdata; 1009 + int ret; 1010 + 1011 + if (pdata->gpio_detect == 0) 1012 + return -ENOSYS; 1013 + 1014 + ret = s3c2410_gpio_getpin(pdata->gpio_detect) ? 0 : 1; 1015 + return ret ^ pdata->detect_invert; 1016 + } 1017 + 1018 + static void s3cmci_request(struct mmc_host *mmc, struct mmc_request *mrq) 1019 + { 1020 + struct s3cmci_host *host = mmc_priv(mmc); 1021 + 1022 + host->status = "mmc request"; 1023 + host->cmd_is_stop = 0; 1024 + host->mrq = mrq; 1025 + 1026 + if (s3cmci_card_present(host) == 0) { 1027 + dbg(host, dbg_err, "%s: no medium present\n", __func__); 1028 + host->mrq->cmd->error = -ENOMEDIUM; 1029 + mmc_request_done(mmc, mrq); 1030 + } else 1031 + s3cmci_send_request(mmc); 1032 + } 1033 + 1034 + static void s3cmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) 1035 + { 1036 + struct s3cmci_host *host = mmc_priv(mmc); 1037 + u32 mci_psc, mci_con; 1038 + 1039 + /* Set the power state */ 1040 + 1041 + mci_con = readl(host->base + S3C2410_SDICON); 1042 + 1043 + switch (ios->power_mode) { 1044 + case MMC_POWER_ON: 1045 + case MMC_POWER_UP: 1046 + s3c2410_gpio_cfgpin(S3C2410_GPE5, S3C2410_GPE5_SDCLK); 1047 + s3c2410_gpio_cfgpin(S3C2410_GPE6, S3C2410_GPE6_SDCMD); 1048 + s3c2410_gpio_cfgpin(S3C2410_GPE7, S3C2410_GPE7_SDDAT0); 1049 + s3c2410_gpio_cfgpin(S3C2410_GPE8, S3C2410_GPE8_SDDAT1); 1050 + s3c2410_gpio_cfgpin(S3C2410_GPE9, S3C2410_GPE9_SDDAT2); 1051 + s3c2410_gpio_cfgpin(S3C2410_GPE10, S3C2410_GPE10_SDDAT3); 1052 + 1053 + if (host->pdata->set_power) 1054 + host->pdata->set_power(ios->power_mode, ios->vdd); 1055 + 1056 + if (!host->is2440) 1057 + mci_con |= S3C2410_SDICON_FIFORESET; 1058 + 1059 + break; 1060 + 1061 + case MMC_POWER_OFF: 1062 + default: 1063 + s3c2410_gpio_setpin(S3C2410_GPE5, 0); 1064 + s3c2410_gpio_cfgpin(S3C2410_GPE5, S3C2410_GPE5_OUTP); 1065 + 1066 + if (host->is2440) 1067 + mci_con |= S3C2440_SDICON_SDRESET; 1068 + 1069 + if (host->pdata->set_power) 1070 + host->pdata->set_power(ios->power_mode, ios->vdd); 1071 + 1072 + break; 1073 + } 1074 + 1075 + /* Set clock */ 1076 + for (mci_psc = 0; mci_psc < 255; mci_psc++) { 1077 + host->real_rate = host->clk_rate / (host->clk_div*(mci_psc+1)); 1078 + 1079 + if (host->real_rate <= ios->clock) 1080 + break; 1081 + } 1082 + 1083 + if (mci_psc > 255) 1084 + mci_psc = 255; 1085 + 1086 + host->prescaler = mci_psc; 1087 + writel(host->prescaler, host->base + S3C2410_SDIPRE); 1088 + 1089 + /* If requested clock is 0, real_rate will be 0, too */ 1090 + if (ios->clock == 0) 1091 + host->real_rate = 0; 1092 + 1093 + /* Set CLOCK_ENABLE */ 1094 + if (ios->clock) 1095 + mci_con |= S3C2410_SDICON_CLOCKTYPE; 1096 + else 1097 + mci_con &= ~S3C2410_SDICON_CLOCKTYPE; 1098 + 1099 + writel(mci_con, host->base + S3C2410_SDICON); 1100 + 1101 + if ((ios->power_mode == MMC_POWER_ON) || 1102 + (ios->power_mode == MMC_POWER_UP)) { 1103 + dbg(host, dbg_conf, "running at %lukHz (requested: %ukHz).\n", 1104 + host->real_rate/1000, ios->clock/1000); 1105 + } else { 1106 + dbg(host, dbg_conf, "powered down.\n"); 1107 + } 1108 + 1109 + host->bus_width = ios->bus_width; 1110 + } 1111 + 1112 + static void s3cmci_reset(struct s3cmci_host *host) 1113 + { 1114 + u32 con = readl(host->base + S3C2410_SDICON); 1115 + 1116 + con |= S3C2440_SDICON_SDRESET; 1117 + writel(con, host->base + S3C2410_SDICON); 1118 + } 1119 + 1120 + static int s3cmci_get_ro(struct mmc_host *mmc) 1121 + { 1122 + struct s3cmci_host *host = mmc_priv(mmc); 1123 + struct s3c24xx_mci_pdata *pdata = host->pdata; 1124 + int ret; 1125 + 1126 + if (pdata->gpio_wprotect == 0) 1127 + return 0; 1128 + 1129 + ret = s3c2410_gpio_getpin(pdata->gpio_wprotect); 1130 + 1131 + if (pdata->wprotect_invert) 1132 + ret = !ret; 1133 + 1134 + return ret; 1135 + } 1136 + 1137 + static struct mmc_host_ops s3cmci_ops = { 1138 + .request = s3cmci_request, 1139 + .set_ios = s3cmci_set_ios, 1140 + .get_ro = s3cmci_get_ro, 1141 + }; 1142 + 1143 + static struct s3c24xx_mci_pdata s3cmci_def_pdata = { 1144 + /* This is currently here to avoid a number of if (host->pdata) 1145 + * checks. Any zero fields to ensure reaonable defaults are picked. */ 1146 + }; 1147 + 1148 + static int __devinit s3cmci_probe(struct platform_device *pdev, int is2440) 1149 + { 1150 + struct s3cmci_host *host; 1151 + struct mmc_host *mmc; 1152 + int ret; 1153 + 1154 + mmc = mmc_alloc_host(sizeof(struct s3cmci_host), &pdev->dev); 1155 + if (!mmc) { 1156 + ret = -ENOMEM; 1157 + goto probe_out; 1158 + } 1159 + 1160 + host = mmc_priv(mmc); 1161 + host->mmc = mmc; 1162 + host->pdev = pdev; 1163 + host->is2440 = is2440; 1164 + 1165 + host->pdata = pdev->dev.platform_data; 1166 + if (!host->pdata) { 1167 + pdev->dev.platform_data = &s3cmci_def_pdata; 1168 + host->pdata = &s3cmci_def_pdata; 1169 + } 1170 + 1171 + spin_lock_init(&host->complete_lock); 1172 + tasklet_init(&host->pio_tasklet, pio_tasklet, (unsigned long) host); 1173 + 1174 + if (is2440) { 1175 + host->sdiimsk = S3C2440_SDIIMSK; 1176 + host->sdidata = S3C2440_SDIDATA; 1177 + host->clk_div = 1; 1178 + } else { 1179 + host->sdiimsk = S3C2410_SDIIMSK; 1180 + host->sdidata = S3C2410_SDIDATA; 1181 + host->clk_div = 2; 1182 + } 1183 + 1184 + host->dodma = 0; 1185 + host->complete_what = COMPLETION_NONE; 1186 + host->pio_active = XFER_NONE; 1187 + 1188 + host->dma = S3CMCI_DMA; 1189 + 1190 + host->mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1191 + if (!host->mem) { 1192 + dev_err(&pdev->dev, 1193 + "failed to get io memory region resouce.\n"); 1194 + 1195 + ret = -ENOENT; 1196 + goto probe_free_host; 1197 + } 1198 + 1199 + host->mem = request_mem_region(host->mem->start, 1200 + RESSIZE(host->mem), pdev->name); 1201 + 1202 + if (!host->mem) { 1203 + dev_err(&pdev->dev, "failed to request io memory region.\n"); 1204 + ret = -ENOENT; 1205 + goto probe_free_host; 1206 + } 1207 + 1208 + host->base = ioremap(host->mem->start, RESSIZE(host->mem)); 1209 + if (host->base == 0) { 1210 + dev_err(&pdev->dev, "failed to ioremap() io memory region.\n"); 1211 + ret = -EINVAL; 1212 + goto probe_free_mem_region; 1213 + } 1214 + 1215 + host->irq = platform_get_irq(pdev, 0); 1216 + if (host->irq == 0) { 1217 + dev_err(&pdev->dev, "failed to get interrupt resouce.\n"); 1218 + ret = -EINVAL; 1219 + goto probe_iounmap; 1220 + } 1221 + 1222 + if (request_irq(host->irq, s3cmci_irq, 0, DRIVER_NAME, host)) { 1223 + dev_err(&pdev->dev, "failed to request mci interrupt.\n"); 1224 + ret = -ENOENT; 1225 + goto probe_iounmap; 1226 + } 1227 + 1228 + /* We get spurious interrupts even when we have set the IMSK 1229 + * register to ignore everything, so use disable_irq() to make 1230 + * ensure we don't lock the system with un-serviceable requests. */ 1231 + 1232 + disable_irq(host->irq); 1233 + 1234 + host->irq_cd = s3c2410_gpio_getirq(host->pdata->gpio_detect); 1235 + 1236 + if (host->irq_cd >= 0) { 1237 + if (request_irq(host->irq_cd, s3cmci_irq_cd, 1238 + IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING, 1239 + DRIVER_NAME, host)) { 1240 + dev_err(&pdev->dev, "can't get card detect irq.\n"); 1241 + ret = -ENOENT; 1242 + goto probe_free_irq; 1243 + } 1244 + } else { 1245 + dev_warn(&pdev->dev, "host detect has no irq available\n"); 1246 + s3c2410_gpio_cfgpin(host->pdata->gpio_detect, 1247 + S3C2410_GPIO_INPUT); 1248 + } 1249 + 1250 + if (host->pdata->gpio_wprotect) 1251 + s3c2410_gpio_cfgpin(host->pdata->gpio_wprotect, 1252 + S3C2410_GPIO_INPUT); 1253 + 1254 + if (s3c2410_dma_request(S3CMCI_DMA, &s3cmci_dma_client, NULL) < 0) { 1255 + dev_err(&pdev->dev, "unable to get DMA channel.\n"); 1256 + ret = -EBUSY; 1257 + goto probe_free_irq_cd; 1258 + } 1259 + 1260 + host->clk = clk_get(&pdev->dev, "sdi"); 1261 + if (IS_ERR(host->clk)) { 1262 + dev_err(&pdev->dev, "failed to find clock source.\n"); 1263 + ret = PTR_ERR(host->clk); 1264 + host->clk = NULL; 1265 + goto probe_free_host; 1266 + } 1267 + 1268 + ret = clk_enable(host->clk); 1269 + if (ret) { 1270 + dev_err(&pdev->dev, "failed to enable clock source.\n"); 1271 + goto clk_free; 1272 + } 1273 + 1274 + host->clk_rate = clk_get_rate(host->clk); 1275 + 1276 + mmc->ops = &s3cmci_ops; 1277 + mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; 1278 + mmc->caps = MMC_CAP_4_BIT_DATA; 1279 + mmc->f_min = host->clk_rate / (host->clk_div * 256); 1280 + mmc->f_max = host->clk_rate / host->clk_div; 1281 + 1282 + if (host->pdata->ocr_avail) 1283 + mmc->ocr_avail = host->pdata->ocr_avail; 1284 + 1285 + mmc->max_blk_count = 4095; 1286 + mmc->max_blk_size = 4095; 1287 + mmc->max_req_size = 4095 * 512; 1288 + mmc->max_seg_size = mmc->max_req_size; 1289 + 1290 + mmc->max_phys_segs = 128; 1291 + mmc->max_hw_segs = 128; 1292 + 1293 + dbg(host, dbg_debug, 1294 + "probe: mode:%s mapped mci_base:%p irq:%u irq_cd:%u dma:%u.\n", 1295 + (host->is2440?"2440":""), 1296 + host->base, host->irq, host->irq_cd, host->dma); 1297 + 1298 + ret = mmc_add_host(mmc); 1299 + if (ret) { 1300 + dev_err(&pdev->dev, "failed to add mmc host.\n"); 1301 + goto free_dmabuf; 1302 + } 1303 + 1304 + platform_set_drvdata(pdev, mmc); 1305 + dev_info(&pdev->dev, "initialisation done.\n"); 1306 + 1307 + return 0; 1308 + 1309 + free_dmabuf: 1310 + clk_disable(host->clk); 1311 + 1312 + clk_free: 1313 + clk_put(host->clk); 1314 + 1315 + probe_free_irq_cd: 1316 + if (host->irq_cd >= 0) 1317 + free_irq(host->irq_cd, host); 1318 + 1319 + probe_free_irq: 1320 + free_irq(host->irq, host); 1321 + 1322 + probe_iounmap: 1323 + iounmap(host->base); 1324 + 1325 + probe_free_mem_region: 1326 + release_mem_region(host->mem->start, RESSIZE(host->mem)); 1327 + 1328 + probe_free_host: 1329 + mmc_free_host(mmc); 1330 + probe_out: 1331 + return ret; 1332 + } 1333 + 1334 + static int __devexit s3cmci_remove(struct platform_device *pdev) 1335 + { 1336 + struct mmc_host *mmc = platform_get_drvdata(pdev); 1337 + struct s3cmci_host *host = mmc_priv(mmc); 1338 + 1339 + mmc_remove_host(mmc); 1340 + 1341 + clk_disable(host->clk); 1342 + clk_put(host->clk); 1343 + 1344 + tasklet_disable(&host->pio_tasklet); 1345 + s3c2410_dma_free(S3CMCI_DMA, &s3cmci_dma_client); 1346 + 1347 + if (host->irq_cd >= 0) 1348 + free_irq(host->irq_cd, host); 1349 + free_irq(host->irq, host); 1350 + 1351 + iounmap(host->base); 1352 + release_mem_region(host->mem->start, RESSIZE(host->mem)); 1353 + 1354 + mmc_free_host(mmc); 1355 + return 0; 1356 + } 1357 + 1358 + static int __devinit s3cmci_probe_2410(struct platform_device *dev) 1359 + { 1360 + return s3cmci_probe(dev, 0); 1361 + } 1362 + 1363 + static int __devinit s3cmci_probe_2412(struct platform_device *dev) 1364 + { 1365 + return s3cmci_probe(dev, 1); 1366 + } 1367 + 1368 + static int __devinit s3cmci_probe_2440(struct platform_device *dev) 1369 + { 1370 + return s3cmci_probe(dev, 1); 1371 + } 1372 + 1373 + #ifdef CONFIG_PM 1374 + 1375 + static int s3cmci_suspend(struct platform_device *dev, pm_message_t state) 1376 + { 1377 + struct mmc_host *mmc = platform_get_drvdata(dev); 1378 + 1379 + return mmc_suspend_host(mmc, state); 1380 + } 1381 + 1382 + static int s3cmci_resume(struct platform_device *dev) 1383 + { 1384 + struct mmc_host *mmc = platform_get_drvdata(dev); 1385 + 1386 + return mmc_resume_host(mmc); 1387 + } 1388 + 1389 + #else /* CONFIG_PM */ 1390 + #define s3cmci_suspend NULL 1391 + #define s3cmci_resume NULL 1392 + #endif /* CONFIG_PM */ 1393 + 1394 + 1395 + static struct platform_driver s3cmci_driver_2410 = { 1396 + .driver.name = "s3c2410-sdi", 1397 + .driver.owner = THIS_MODULE, 1398 + .probe = s3cmci_probe_2410, 1399 + .remove = __devexit_p(s3cmci_remove), 1400 + .suspend = s3cmci_suspend, 1401 + .resume = s3cmci_resume, 1402 + }; 1403 + 1404 + static struct platform_driver s3cmci_driver_2412 = { 1405 + .driver.name = "s3c2412-sdi", 1406 + .driver.owner = THIS_MODULE, 1407 + .probe = s3cmci_probe_2412, 1408 + .remove = __devexit_p(s3cmci_remove), 1409 + .suspend = s3cmci_suspend, 1410 + .resume = s3cmci_resume, 1411 + }; 1412 + 1413 + static struct platform_driver s3cmci_driver_2440 = { 1414 + .driver.name = "s3c2440-sdi", 1415 + .driver.owner = THIS_MODULE, 1416 + .probe = s3cmci_probe_2440, 1417 + .remove = __devexit_p(s3cmci_remove), 1418 + .suspend = s3cmci_suspend, 1419 + .resume = s3cmci_resume, 1420 + }; 1421 + 1422 + 1423 + static int __init s3cmci_init(void) 1424 + { 1425 + platform_driver_register(&s3cmci_driver_2410); 1426 + platform_driver_register(&s3cmci_driver_2412); 1427 + platform_driver_register(&s3cmci_driver_2440); 1428 + return 0; 1429 + } 1430 + 1431 + static void __exit s3cmci_exit(void) 1432 + { 1433 + platform_driver_unregister(&s3cmci_driver_2410); 1434 + platform_driver_unregister(&s3cmci_driver_2412); 1435 + platform_driver_unregister(&s3cmci_driver_2440); 1436 + } 1437 + 1438 + module_init(s3cmci_init); 1439 + module_exit(s3cmci_exit); 1440 + 1441 + MODULE_DESCRIPTION("Samsung S3C MMC/SD Card Interface driver"); 1442 + MODULE_LICENSE("GPL v2"); 1443 + MODULE_AUTHOR("Thomas Kleffel <tk@maintech.de>"); 1444 + MODULE_ALIAS("platform:s3c2410-sdi"); 1445 + MODULE_ALIAS("platform:s3c2412-sdi"); 1446 + MODULE_ALIAS("platform:s3c2440-sdi");
+70
drivers/mmc/host/s3cmci.h
··· 1 + /* 2 + * linux/drivers/mmc/s3cmci.h - Samsung S3C MCI driver 3 + * 4 + * Copyright (C) 2004-2006 Thomas Kleffel, All Rights Reserved. 5 + * 6 + * This program is free software; you can redistribute it and/or modify 7 + * it under the terms of the GNU General Public License version 2 as 8 + * published by the Free Software Foundation. 9 + */ 10 + 11 + /* FIXME: DMA Resource management ?! */ 12 + #define S3CMCI_DMA 0 13 + 14 + enum s3cmci_waitfor { 15 + COMPLETION_NONE, 16 + COMPLETION_FINALIZE, 17 + COMPLETION_CMDSENT, 18 + COMPLETION_RSPFIN, 19 + COMPLETION_XFERFINISH, 20 + COMPLETION_XFERFINISH_RSPFIN, 21 + }; 22 + 23 + struct s3cmci_host { 24 + struct platform_device *pdev; 25 + struct s3c24xx_mci_pdata *pdata; 26 + struct mmc_host *mmc; 27 + struct resource *mem; 28 + struct clk *clk; 29 + void __iomem *base; 30 + int irq; 31 + int irq_cd; 32 + int dma; 33 + 34 + unsigned long clk_rate; 35 + unsigned long clk_div; 36 + unsigned long real_rate; 37 + u8 prescaler; 38 + 39 + int is2440; 40 + unsigned sdiimsk; 41 + unsigned sdidata; 42 + int dodma; 43 + int dmatogo; 44 + 45 + struct mmc_request *mrq; 46 + int cmd_is_stop; 47 + 48 + spinlock_t complete_lock; 49 + enum s3cmci_waitfor complete_what; 50 + 51 + int dma_complete; 52 + 53 + u32 pio_sgptr; 54 + u32 pio_words; 55 + u32 pio_count; 56 + u32 *pio_ptr; 57 + #define XFER_NONE 0 58 + #define XFER_READ 1 59 + #define XFER_WRITE 2 60 + u32 pio_active; 61 + 62 + int bus_width; 63 + 64 + char dbgmsg_cmd[301]; 65 + char dbgmsg_dat[301]; 66 + char *status; 67 + 68 + unsigned int ccnt, dcnt; 69 + struct tasklet_struct pio_tasklet; 70 + };
+732
drivers/mmc/host/sdhci-pci.c
··· 1 + /* linux/drivers/mmc/host/sdhci-pci.c - SDHCI on PCI bus interface 2 + * 3 + * Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved. 4 + * 5 + * This program is free software; you can redistribute it and/or modify 6 + * it under the terms of the GNU General Public License as published by 7 + * the Free Software Foundation; either version 2 of the License, or (at 8 + * your option) any later version. 9 + * 10 + * Thanks to the following companies for their support: 11 + * 12 + * - JMicron (hardware and technical support) 13 + */ 14 + 15 + #include <linux/delay.h> 16 + #include <linux/highmem.h> 17 + #include <linux/pci.h> 18 + #include <linux/dma-mapping.h> 19 + 20 + #include <linux/mmc/host.h> 21 + 22 + #include <asm/scatterlist.h> 23 + #include <asm/io.h> 24 + 25 + #include "sdhci.h" 26 + 27 + /* 28 + * PCI registers 29 + */ 30 + 31 + #define PCI_SDHCI_IFPIO 0x00 32 + #define PCI_SDHCI_IFDMA 0x01 33 + #define PCI_SDHCI_IFVENDOR 0x02 34 + 35 + #define PCI_SLOT_INFO 0x40 /* 8 bits */ 36 + #define PCI_SLOT_INFO_SLOTS(x) ((x >> 4) & 7) 37 + #define PCI_SLOT_INFO_FIRST_BAR_MASK 0x07 38 + 39 + #define MAX_SLOTS 8 40 + 41 + struct sdhci_pci_chip; 42 + struct sdhci_pci_slot; 43 + 44 + struct sdhci_pci_fixes { 45 + unsigned int quirks; 46 + 47 + int (*probe)(struct sdhci_pci_chip*); 48 + 49 + int (*probe_slot)(struct sdhci_pci_slot*); 50 + void (*remove_slot)(struct sdhci_pci_slot*, int); 51 + 52 + int (*suspend)(struct sdhci_pci_chip*, 53 + pm_message_t); 54 + int (*resume)(struct sdhci_pci_chip*); 55 + }; 56 + 57 + struct sdhci_pci_slot { 58 + struct sdhci_pci_chip *chip; 59 + struct sdhci_host *host; 60 + 61 + int pci_bar; 62 + }; 63 + 64 + struct sdhci_pci_chip { 65 + struct pci_dev *pdev; 66 + 67 + unsigned int quirks; 68 + const struct sdhci_pci_fixes *fixes; 69 + 70 + int num_slots; /* Slots on controller */ 71 + struct sdhci_pci_slot *slots[MAX_SLOTS]; /* Pointers to host slots */ 72 + }; 73 + 74 + 75 + /*****************************************************************************\ 76 + * * 77 + * Hardware specific quirk handling * 78 + * * 79 + \*****************************************************************************/ 80 + 81 + static int ricoh_probe(struct sdhci_pci_chip *chip) 82 + { 83 + if (chip->pdev->subsystem_vendor == PCI_VENDOR_ID_IBM) 84 + chip->quirks |= SDHCI_QUIRK_CLOCK_BEFORE_RESET; 85 + 86 + if (chip->pdev->subsystem_vendor == PCI_VENDOR_ID_SAMSUNG) 87 + chip->quirks |= SDHCI_QUIRK_NO_CARD_NO_RESET; 88 + 89 + return 0; 90 + } 91 + 92 + static const struct sdhci_pci_fixes sdhci_ricoh = { 93 + .probe = ricoh_probe, 94 + .quirks = SDHCI_QUIRK_32BIT_DMA_ADDR, 95 + }; 96 + 97 + static const struct sdhci_pci_fixes sdhci_ene_712 = { 98 + .quirks = SDHCI_QUIRK_SINGLE_POWER_WRITE | 99 + SDHCI_QUIRK_BROKEN_DMA, 100 + }; 101 + 102 + static const struct sdhci_pci_fixes sdhci_ene_714 = { 103 + .quirks = SDHCI_QUIRK_SINGLE_POWER_WRITE | 104 + SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS | 105 + SDHCI_QUIRK_BROKEN_DMA, 106 + }; 107 + 108 + static const struct sdhci_pci_fixes sdhci_cafe = { 109 + .quirks = SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER | 110 + SDHCI_QUIRK_BROKEN_TIMEOUT_VAL, 111 + }; 112 + 113 + static int jmicron_pmos(struct sdhci_pci_chip *chip, int on) 114 + { 115 + u8 scratch; 116 + int ret; 117 + 118 + ret = pci_read_config_byte(chip->pdev, 0xAE, &scratch); 119 + if (ret) 120 + return ret; 121 + 122 + /* 123 + * Turn PMOS on [bit 0], set over current detection to 2.4 V 124 + * [bit 1:2] and enable over current debouncing [bit 6]. 125 + */ 126 + if (on) 127 + scratch |= 0x47; 128 + else 129 + scratch &= ~0x47; 130 + 131 + ret = pci_write_config_byte(chip->pdev, 0xAE, scratch); 132 + if (ret) 133 + return ret; 134 + 135 + return 0; 136 + } 137 + 138 + static int jmicron_probe(struct sdhci_pci_chip *chip) 139 + { 140 + int ret; 141 + 142 + if (chip->pdev->revision == 0) { 143 + chip->quirks |= SDHCI_QUIRK_32BIT_DMA_ADDR | 144 + SDHCI_QUIRK_32BIT_DMA_SIZE | 145 + SDHCI_QUIRK_32BIT_ADMA_SIZE | 146 + SDHCI_QUIRK_RESET_AFTER_REQUEST; 147 + } 148 + 149 + /* 150 + * JMicron chips can have two interfaces to the same hardware 151 + * in order to work around limitations in Microsoft's driver. 152 + * We need to make sure we only bind to one of them. 153 + * 154 + * This code assumes two things: 155 + * 156 + * 1. The PCI code adds subfunctions in order. 157 + * 158 + * 2. The MMC interface has a lower subfunction number 159 + * than the SD interface. 160 + */ 161 + if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_SD) { 162 + struct pci_dev *sd_dev; 163 + 164 + sd_dev = NULL; 165 + while ((sd_dev = pci_get_device(PCI_VENDOR_ID_JMICRON, 166 + PCI_DEVICE_ID_JMICRON_JMB38X_MMC, sd_dev)) != NULL) { 167 + if ((PCI_SLOT(chip->pdev->devfn) == 168 + PCI_SLOT(sd_dev->devfn)) && 169 + (chip->pdev->bus == sd_dev->bus)) 170 + break; 171 + } 172 + 173 + if (sd_dev) { 174 + pci_dev_put(sd_dev); 175 + dev_info(&chip->pdev->dev, "Refusing to bind to " 176 + "secondary interface.\n"); 177 + return -ENODEV; 178 + } 179 + } 180 + 181 + /* 182 + * JMicron chips need a bit of a nudge to enable the power 183 + * output pins. 184 + */ 185 + ret = jmicron_pmos(chip, 1); 186 + if (ret) { 187 + dev_err(&chip->pdev->dev, "Failure enabling card power\n"); 188 + return ret; 189 + } 190 + 191 + return 0; 192 + } 193 + 194 + static void jmicron_enable_mmc(struct sdhci_host *host, int on) 195 + { 196 + u8 scratch; 197 + 198 + scratch = readb(host->ioaddr + 0xC0); 199 + 200 + if (on) 201 + scratch |= 0x01; 202 + else 203 + scratch &= ~0x01; 204 + 205 + writeb(scratch, host->ioaddr + 0xC0); 206 + } 207 + 208 + static int jmicron_probe_slot(struct sdhci_pci_slot *slot) 209 + { 210 + if (slot->chip->pdev->revision == 0) { 211 + u16 version; 212 + 213 + version = readl(slot->host->ioaddr + SDHCI_HOST_VERSION); 214 + version = (version & SDHCI_VENDOR_VER_MASK) >> 215 + SDHCI_VENDOR_VER_SHIFT; 216 + 217 + /* 218 + * Older versions of the chip have lots of nasty glitches 219 + * in the ADMA engine. It's best just to avoid it 220 + * completely. 221 + */ 222 + if (version < 0xAC) 223 + slot->host->quirks |= SDHCI_QUIRK_BROKEN_ADMA; 224 + } 225 + 226 + /* 227 + * The secondary interface requires a bit set to get the 228 + * interrupts. 229 + */ 230 + if (slot->chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC) 231 + jmicron_enable_mmc(slot->host, 1); 232 + 233 + return 0; 234 + } 235 + 236 + static void jmicron_remove_slot(struct sdhci_pci_slot *slot, int dead) 237 + { 238 + if (dead) 239 + return; 240 + 241 + if (slot->chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC) 242 + jmicron_enable_mmc(slot->host, 0); 243 + } 244 + 245 + static int jmicron_suspend(struct sdhci_pci_chip *chip, pm_message_t state) 246 + { 247 + int i; 248 + 249 + if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC) { 250 + for (i = 0;i < chip->num_slots;i++) 251 + jmicron_enable_mmc(chip->slots[i]->host, 0); 252 + } 253 + 254 + return 0; 255 + } 256 + 257 + static int jmicron_resume(struct sdhci_pci_chip *chip) 258 + { 259 + int ret, i; 260 + 261 + if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC) { 262 + for (i = 0;i < chip->num_slots;i++) 263 + jmicron_enable_mmc(chip->slots[i]->host, 1); 264 + } 265 + 266 + ret = jmicron_pmos(chip, 1); 267 + if (ret) { 268 + dev_err(&chip->pdev->dev, "Failure enabling card power\n"); 269 + return ret; 270 + } 271 + 272 + return 0; 273 + } 274 + 275 + static const struct sdhci_pci_fixes sdhci_jmicron = { 276 + .probe = jmicron_probe, 277 + 278 + .probe_slot = jmicron_probe_slot, 279 + .remove_slot = jmicron_remove_slot, 280 + 281 + .suspend = jmicron_suspend, 282 + .resume = jmicron_resume, 283 + }; 284 + 285 + static const struct pci_device_id pci_ids[] __devinitdata = { 286 + { 287 + .vendor = PCI_VENDOR_ID_RICOH, 288 + .device = PCI_DEVICE_ID_RICOH_R5C822, 289 + .subvendor = PCI_ANY_ID, 290 + .subdevice = PCI_ANY_ID, 291 + .driver_data = (kernel_ulong_t)&sdhci_ricoh, 292 + }, 293 + 294 + { 295 + .vendor = PCI_VENDOR_ID_ENE, 296 + .device = PCI_DEVICE_ID_ENE_CB712_SD, 297 + .subvendor = PCI_ANY_ID, 298 + .subdevice = PCI_ANY_ID, 299 + .driver_data = (kernel_ulong_t)&sdhci_ene_712, 300 + }, 301 + 302 + { 303 + .vendor = PCI_VENDOR_ID_ENE, 304 + .device = PCI_DEVICE_ID_ENE_CB712_SD_2, 305 + .subvendor = PCI_ANY_ID, 306 + .subdevice = PCI_ANY_ID, 307 + .driver_data = (kernel_ulong_t)&sdhci_ene_712, 308 + }, 309 + 310 + { 311 + .vendor = PCI_VENDOR_ID_ENE, 312 + .device = PCI_DEVICE_ID_ENE_CB714_SD, 313 + .subvendor = PCI_ANY_ID, 314 + .subdevice = PCI_ANY_ID, 315 + .driver_data = (kernel_ulong_t)&sdhci_ene_714, 316 + }, 317 + 318 + { 319 + .vendor = PCI_VENDOR_ID_ENE, 320 + .device = PCI_DEVICE_ID_ENE_CB714_SD_2, 321 + .subvendor = PCI_ANY_ID, 322 + .subdevice = PCI_ANY_ID, 323 + .driver_data = (kernel_ulong_t)&sdhci_ene_714, 324 + }, 325 + 326 + { 327 + .vendor = PCI_VENDOR_ID_MARVELL, 328 + .device = PCI_DEVICE_ID_MARVELL_CAFE_SD, 329 + .subvendor = PCI_ANY_ID, 330 + .subdevice = PCI_ANY_ID, 331 + .driver_data = (kernel_ulong_t)&sdhci_cafe, 332 + }, 333 + 334 + { 335 + .vendor = PCI_VENDOR_ID_JMICRON, 336 + .device = PCI_DEVICE_ID_JMICRON_JMB38X_SD, 337 + .subvendor = PCI_ANY_ID, 338 + .subdevice = PCI_ANY_ID, 339 + .driver_data = (kernel_ulong_t)&sdhci_jmicron, 340 + }, 341 + 342 + { 343 + .vendor = PCI_VENDOR_ID_JMICRON, 344 + .device = PCI_DEVICE_ID_JMICRON_JMB38X_MMC, 345 + .subvendor = PCI_ANY_ID, 346 + .subdevice = PCI_ANY_ID, 347 + .driver_data = (kernel_ulong_t)&sdhci_jmicron, 348 + }, 349 + 350 + { /* Generic SD host controller */ 351 + PCI_DEVICE_CLASS((PCI_CLASS_SYSTEM_SDHCI << 8), 0xFFFF00) 352 + }, 353 + 354 + { /* end: all zeroes */ }, 355 + }; 356 + 357 + MODULE_DEVICE_TABLE(pci, pci_ids); 358 + 359 + /*****************************************************************************\ 360 + * * 361 + * SDHCI core callbacks * 362 + * * 363 + \*****************************************************************************/ 364 + 365 + static int sdhci_pci_enable_dma(struct sdhci_host *host) 366 + { 367 + struct sdhci_pci_slot *slot; 368 + struct pci_dev *pdev; 369 + int ret; 370 + 371 + slot = sdhci_priv(host); 372 + pdev = slot->chip->pdev; 373 + 374 + if (((pdev->class & 0xFFFF00) == (PCI_CLASS_SYSTEM_SDHCI << 8)) && 375 + ((pdev->class & 0x0000FF) != PCI_SDHCI_IFDMA) && 376 + (host->flags & SDHCI_USE_DMA)) { 377 + dev_warn(&pdev->dev, "Will use DMA mode even though HW " 378 + "doesn't fully claim to support it.\n"); 379 + } 380 + 381 + ret = pci_set_dma_mask(pdev, DMA_32BIT_MASK); 382 + if (ret) 383 + return ret; 384 + 385 + pci_set_master(pdev); 386 + 387 + return 0; 388 + } 389 + 390 + static struct sdhci_ops sdhci_pci_ops = { 391 + .enable_dma = sdhci_pci_enable_dma, 392 + }; 393 + 394 + /*****************************************************************************\ 395 + * * 396 + * Suspend/resume * 397 + * * 398 + \*****************************************************************************/ 399 + 400 + #ifdef CONFIG_PM 401 + 402 + static int sdhci_pci_suspend (struct pci_dev *pdev, pm_message_t state) 403 + { 404 + struct sdhci_pci_chip *chip; 405 + struct sdhci_pci_slot *slot; 406 + int i, ret; 407 + 408 + chip = pci_get_drvdata(pdev); 409 + if (!chip) 410 + return 0; 411 + 412 + for (i = 0;i < chip->num_slots;i++) { 413 + slot = chip->slots[i]; 414 + if (!slot) 415 + continue; 416 + 417 + ret = sdhci_suspend_host(slot->host, state); 418 + 419 + if (ret) { 420 + for (i--;i >= 0;i--) 421 + sdhci_resume_host(chip->slots[i]->host); 422 + return ret; 423 + } 424 + } 425 + 426 + if (chip->fixes && chip->fixes->suspend) { 427 + ret = chip->fixes->suspend(chip, state); 428 + if (ret) { 429 + for (i = chip->num_slots - 1;i >= 0;i--) 430 + sdhci_resume_host(chip->slots[i]->host); 431 + return ret; 432 + } 433 + } 434 + 435 + pci_save_state(pdev); 436 + pci_enable_wake(pdev, pci_choose_state(pdev, state), 0); 437 + pci_disable_device(pdev); 438 + pci_set_power_state(pdev, pci_choose_state(pdev, state)); 439 + 440 + return 0; 441 + } 442 + 443 + static int sdhci_pci_resume (struct pci_dev *pdev) 444 + { 445 + struct sdhci_pci_chip *chip; 446 + struct sdhci_pci_slot *slot; 447 + int i, ret; 448 + 449 + chip = pci_get_drvdata(pdev); 450 + if (!chip) 451 + return 0; 452 + 453 + pci_set_power_state(pdev, PCI_D0); 454 + pci_restore_state(pdev); 455 + ret = pci_enable_device(pdev); 456 + if (ret) 457 + return ret; 458 + 459 + if (chip->fixes && chip->fixes->resume) { 460 + ret = chip->fixes->resume(chip); 461 + if (ret) 462 + return ret; 463 + } 464 + 465 + for (i = 0;i < chip->num_slots;i++) { 466 + slot = chip->slots[i]; 467 + if (!slot) 468 + continue; 469 + 470 + ret = sdhci_resume_host(slot->host); 471 + if (ret) 472 + return ret; 473 + } 474 + 475 + return 0; 476 + } 477 + 478 + #else /* CONFIG_PM */ 479 + 480 + #define sdhci_pci_suspend NULL 481 + #define sdhci_pci_resume NULL 482 + 483 + #endif /* CONFIG_PM */ 484 + 485 + /*****************************************************************************\ 486 + * * 487 + * Device probing/removal * 488 + * * 489 + \*****************************************************************************/ 490 + 491 + static struct sdhci_pci_slot * __devinit sdhci_pci_probe_slot( 492 + struct pci_dev *pdev, struct sdhci_pci_chip *chip, int bar) 493 + { 494 + struct sdhci_pci_slot *slot; 495 + struct sdhci_host *host; 496 + 497 + resource_size_t addr; 498 + 499 + int ret; 500 + 501 + if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM)) { 502 + dev_err(&pdev->dev, "BAR %d is not iomem. Aborting.\n", bar); 503 + return ERR_PTR(-ENODEV); 504 + } 505 + 506 + if (pci_resource_len(pdev, bar) != 0x100) { 507 + dev_err(&pdev->dev, "Invalid iomem size. You may " 508 + "experience problems.\n"); 509 + } 510 + 511 + if ((pdev->class & 0x0000FF) == PCI_SDHCI_IFVENDOR) { 512 + dev_err(&pdev->dev, "Vendor specific interface. Aborting.\n"); 513 + return ERR_PTR(-ENODEV); 514 + } 515 + 516 + if ((pdev->class & 0x0000FF) > PCI_SDHCI_IFVENDOR) { 517 + dev_err(&pdev->dev, "Unknown interface. Aborting.\n"); 518 + return ERR_PTR(-ENODEV); 519 + } 520 + 521 + host = sdhci_alloc_host(&pdev->dev, sizeof(struct sdhci_pci_slot)); 522 + if (IS_ERR(host)) { 523 + ret = PTR_ERR(host); 524 + goto unmap; 525 + } 526 + 527 + slot = sdhci_priv(host); 528 + 529 + slot->chip = chip; 530 + slot->host = host; 531 + slot->pci_bar = bar; 532 + 533 + host->hw_name = "PCI"; 534 + host->ops = &sdhci_pci_ops; 535 + host->quirks = chip->quirks; 536 + 537 + host->irq = pdev->irq; 538 + 539 + ret = pci_request_region(pdev, bar, mmc_hostname(host->mmc)); 540 + if (ret) { 541 + dev_err(&pdev->dev, "cannot request region\n"); 542 + return ERR_PTR(ret); 543 + } 544 + 545 + addr = pci_resource_start(pdev, bar); 546 + host->ioaddr = ioremap_nocache(addr, pci_resource_len(pdev, bar)); 547 + if (!host->ioaddr) { 548 + dev_err(&pdev->dev, "failed to remap registers\n"); 549 + goto release; 550 + } 551 + 552 + if (chip->fixes && chip->fixes->probe_slot) { 553 + ret = chip->fixes->probe_slot(slot); 554 + if (ret) 555 + goto unmap; 556 + } 557 + 558 + ret = sdhci_add_host(host); 559 + if (ret) 560 + goto remove; 561 + 562 + return slot; 563 + 564 + remove: 565 + if (chip->fixes && chip->fixes->remove_slot) 566 + chip->fixes->remove_slot(slot, 0); 567 + 568 + unmap: 569 + iounmap(host->ioaddr); 570 + 571 + release: 572 + pci_release_region(pdev, bar); 573 + sdhci_free_host(host); 574 + 575 + return ERR_PTR(ret); 576 + } 577 + 578 + static void sdhci_pci_remove_slot(struct sdhci_pci_slot *slot) 579 + { 580 + int dead; 581 + u32 scratch; 582 + 583 + dead = 0; 584 + scratch = readl(slot->host->ioaddr + SDHCI_INT_STATUS); 585 + if (scratch == (u32)-1) 586 + dead = 1; 587 + 588 + sdhci_remove_host(slot->host, dead); 589 + 590 + if (slot->chip->fixes && slot->chip->fixes->remove_slot) 591 + slot->chip->fixes->remove_slot(slot, dead); 592 + 593 + pci_release_region(slot->chip->pdev, slot->pci_bar); 594 + 595 + sdhci_free_host(slot->host); 596 + } 597 + 598 + static int __devinit sdhci_pci_probe(struct pci_dev *pdev, 599 + const struct pci_device_id *ent) 600 + { 601 + struct sdhci_pci_chip *chip; 602 + struct sdhci_pci_slot *slot; 603 + 604 + u8 slots, rev, first_bar; 605 + int ret, i; 606 + 607 + BUG_ON(pdev == NULL); 608 + BUG_ON(ent == NULL); 609 + 610 + pci_read_config_byte(pdev, PCI_CLASS_REVISION, &rev); 611 + 612 + dev_info(&pdev->dev, "SDHCI controller found [%04x:%04x] (rev %x)\n", 613 + (int)pdev->vendor, (int)pdev->device, (int)rev); 614 + 615 + ret = pci_read_config_byte(pdev, PCI_SLOT_INFO, &slots); 616 + if (ret) 617 + return ret; 618 + 619 + slots = PCI_SLOT_INFO_SLOTS(slots) + 1; 620 + dev_dbg(&pdev->dev, "found %d slot(s)\n", slots); 621 + if (slots == 0) 622 + return -ENODEV; 623 + 624 + BUG_ON(slots > MAX_SLOTS); 625 + 626 + ret = pci_read_config_byte(pdev, PCI_SLOT_INFO, &first_bar); 627 + if (ret) 628 + return ret; 629 + 630 + first_bar &= PCI_SLOT_INFO_FIRST_BAR_MASK; 631 + 632 + if (first_bar > 5) { 633 + dev_err(&pdev->dev, "Invalid first BAR. Aborting.\n"); 634 + return -ENODEV; 635 + } 636 + 637 + ret = pci_enable_device(pdev); 638 + if (ret) 639 + return ret; 640 + 641 + chip = kzalloc(sizeof(struct sdhci_pci_chip), GFP_KERNEL); 642 + if (!chip) { 643 + ret = -ENOMEM; 644 + goto err; 645 + } 646 + 647 + chip->pdev = pdev; 648 + chip->fixes = (const struct sdhci_pci_fixes*)ent->driver_data; 649 + if (chip->fixes) 650 + chip->quirks = chip->fixes->quirks; 651 + chip->num_slots = slots; 652 + 653 + pci_set_drvdata(pdev, chip); 654 + 655 + if (chip->fixes && chip->fixes->probe) { 656 + ret = chip->fixes->probe(chip); 657 + if (ret) 658 + goto free; 659 + } 660 + 661 + for (i = 0;i < slots;i++) { 662 + slot = sdhci_pci_probe_slot(pdev, chip, first_bar + i); 663 + if (IS_ERR(slot)) { 664 + for (i--;i >= 0;i--) 665 + sdhci_pci_remove_slot(chip->slots[i]); 666 + ret = PTR_ERR(slot); 667 + goto free; 668 + } 669 + 670 + chip->slots[i] = slot; 671 + } 672 + 673 + return 0; 674 + 675 + free: 676 + pci_set_drvdata(pdev, NULL); 677 + kfree(chip); 678 + 679 + err: 680 + pci_disable_device(pdev); 681 + return ret; 682 + } 683 + 684 + static void __devexit sdhci_pci_remove(struct pci_dev *pdev) 685 + { 686 + int i; 687 + struct sdhci_pci_chip *chip; 688 + 689 + chip = pci_get_drvdata(pdev); 690 + 691 + if (chip) { 692 + for (i = 0;i < chip->num_slots; i++) 693 + sdhci_pci_remove_slot(chip->slots[i]); 694 + 695 + pci_set_drvdata(pdev, NULL); 696 + kfree(chip); 697 + } 698 + 699 + pci_disable_device(pdev); 700 + } 701 + 702 + static struct pci_driver sdhci_driver = { 703 + .name = "sdhci-pci", 704 + .id_table = pci_ids, 705 + .probe = sdhci_pci_probe, 706 + .remove = __devexit_p(sdhci_pci_remove), 707 + .suspend = sdhci_pci_suspend, 708 + .resume = sdhci_pci_resume, 709 + }; 710 + 711 + /*****************************************************************************\ 712 + * * 713 + * Driver init/exit * 714 + * * 715 + \*****************************************************************************/ 716 + 717 + static int __init sdhci_drv_init(void) 718 + { 719 + return pci_register_driver(&sdhci_driver); 720 + } 721 + 722 + static void __exit sdhci_drv_exit(void) 723 + { 724 + pci_unregister_driver(&sdhci_driver); 725 + } 726 + 727 + module_init(sdhci_drv_init); 728 + module_exit(sdhci_drv_exit); 729 + 730 + MODULE_AUTHOR("Pierre Ossman <drzeus@drzeus.cx>"); 731 + MODULE_DESCRIPTION("Secure Digital Host Controller Interface PCI driver"); 732 + MODULE_LICENSE("GPL");
+540 -472
drivers/mmc/host/sdhci.c
··· 15 15 16 16 #include <linux/delay.h> 17 17 #include <linux/highmem.h> 18 - #include <linux/pci.h> 18 + #include <linux/io.h> 19 19 #include <linux/dma-mapping.h> 20 20 #include <linux/scatterlist.h> 21 21 ··· 31 31 pr_debug(DRIVER_NAME " [%s()]: " f, __func__,## x) 32 32 33 33 static unsigned int debug_quirks = 0; 34 - 35 - /* 36 - * Different quirks to handle when the hardware deviates from a strict 37 - * interpretation of the SDHCI specification. 38 - */ 39 - 40 - /* Controller doesn't honor resets unless we touch the clock register */ 41 - #define SDHCI_QUIRK_CLOCK_BEFORE_RESET (1<<0) 42 - /* Controller has bad caps bits, but really supports DMA */ 43 - #define SDHCI_QUIRK_FORCE_DMA (1<<1) 44 - /* Controller doesn't like to be reset when there is no card inserted. */ 45 - #define SDHCI_QUIRK_NO_CARD_NO_RESET (1<<2) 46 - /* Controller doesn't like clearing the power reg before a change */ 47 - #define SDHCI_QUIRK_SINGLE_POWER_WRITE (1<<3) 48 - /* Controller has flaky internal state so reset it on each ios change */ 49 - #define SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS (1<<4) 50 - /* Controller has an unusable DMA engine */ 51 - #define SDHCI_QUIRK_BROKEN_DMA (1<<5) 52 - /* Controller can only DMA from 32-bit aligned addresses */ 53 - #define SDHCI_QUIRK_32BIT_DMA_ADDR (1<<6) 54 - /* Controller can only DMA chunk sizes that are a multiple of 32 bits */ 55 - #define SDHCI_QUIRK_32BIT_DMA_SIZE (1<<7) 56 - /* Controller needs to be reset after each request to stay stable */ 57 - #define SDHCI_QUIRK_RESET_AFTER_REQUEST (1<<8) 58 - /* Controller needs voltage and power writes to happen separately */ 59 - #define SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER (1<<9) 60 - /* Controller has an off-by-one issue with timeout value */ 61 - #define SDHCI_QUIRK_INCR_TIMEOUT_CONTROL (1<<10) 62 - 63 - static const struct pci_device_id pci_ids[] __devinitdata = { 64 - { 65 - .vendor = PCI_VENDOR_ID_RICOH, 66 - .device = PCI_DEVICE_ID_RICOH_R5C822, 67 - .subvendor = PCI_VENDOR_ID_IBM, 68 - .subdevice = PCI_ANY_ID, 69 - .driver_data = SDHCI_QUIRK_CLOCK_BEFORE_RESET | 70 - SDHCI_QUIRK_FORCE_DMA, 71 - }, 72 - 73 - { 74 - .vendor = PCI_VENDOR_ID_RICOH, 75 - .device = PCI_DEVICE_ID_RICOH_R5C822, 76 - .subvendor = PCI_VENDOR_ID_SAMSUNG, 77 - .subdevice = PCI_ANY_ID, 78 - .driver_data = SDHCI_QUIRK_FORCE_DMA | 79 - SDHCI_QUIRK_NO_CARD_NO_RESET, 80 - }, 81 - 82 - { 83 - .vendor = PCI_VENDOR_ID_RICOH, 84 - .device = PCI_DEVICE_ID_RICOH_R5C822, 85 - .subvendor = PCI_ANY_ID, 86 - .subdevice = PCI_ANY_ID, 87 - .driver_data = SDHCI_QUIRK_FORCE_DMA, 88 - }, 89 - 90 - { 91 - .vendor = PCI_VENDOR_ID_TI, 92 - .device = PCI_DEVICE_ID_TI_XX21_XX11_SD, 93 - .subvendor = PCI_ANY_ID, 94 - .subdevice = PCI_ANY_ID, 95 - .driver_data = SDHCI_QUIRK_FORCE_DMA, 96 - }, 97 - 98 - { 99 - .vendor = PCI_VENDOR_ID_ENE, 100 - .device = PCI_DEVICE_ID_ENE_CB712_SD, 101 - .subvendor = PCI_ANY_ID, 102 - .subdevice = PCI_ANY_ID, 103 - .driver_data = SDHCI_QUIRK_SINGLE_POWER_WRITE | 104 - SDHCI_QUIRK_BROKEN_DMA, 105 - }, 106 - 107 - { 108 - .vendor = PCI_VENDOR_ID_ENE, 109 - .device = PCI_DEVICE_ID_ENE_CB712_SD_2, 110 - .subvendor = PCI_ANY_ID, 111 - .subdevice = PCI_ANY_ID, 112 - .driver_data = SDHCI_QUIRK_SINGLE_POWER_WRITE | 113 - SDHCI_QUIRK_BROKEN_DMA, 114 - }, 115 - 116 - { 117 - .vendor = PCI_VENDOR_ID_ENE, 118 - .device = PCI_DEVICE_ID_ENE_CB714_SD, 119 - .subvendor = PCI_ANY_ID, 120 - .subdevice = PCI_ANY_ID, 121 - .driver_data = SDHCI_QUIRK_SINGLE_POWER_WRITE | 122 - SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS | 123 - SDHCI_QUIRK_BROKEN_DMA, 124 - }, 125 - 126 - { 127 - .vendor = PCI_VENDOR_ID_ENE, 128 - .device = PCI_DEVICE_ID_ENE_CB714_SD_2, 129 - .subvendor = PCI_ANY_ID, 130 - .subdevice = PCI_ANY_ID, 131 - .driver_data = SDHCI_QUIRK_SINGLE_POWER_WRITE | 132 - SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS | 133 - SDHCI_QUIRK_BROKEN_DMA, 134 - }, 135 - 136 - { 137 - .vendor = PCI_VENDOR_ID_MARVELL, 138 - .device = PCI_DEVICE_ID_MARVELL_CAFE_SD, 139 - .subvendor = PCI_ANY_ID, 140 - .subdevice = PCI_ANY_ID, 141 - .driver_data = SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER | 142 - SDHCI_QUIRK_INCR_TIMEOUT_CONTROL, 143 - }, 144 - 145 - { 146 - .vendor = PCI_VENDOR_ID_JMICRON, 147 - .device = PCI_DEVICE_ID_JMICRON_JMB38X_SD, 148 - .subvendor = PCI_ANY_ID, 149 - .subdevice = PCI_ANY_ID, 150 - .driver_data = SDHCI_QUIRK_32BIT_DMA_ADDR | 151 - SDHCI_QUIRK_32BIT_DMA_SIZE | 152 - SDHCI_QUIRK_RESET_AFTER_REQUEST, 153 - }, 154 - 155 - { /* Generic SD host controller */ 156 - PCI_DEVICE_CLASS((PCI_CLASS_SYSTEM_SDHCI << 8), 0xFFFF00) 157 - }, 158 - 159 - { /* end: all zeroes */ }, 160 - }; 161 - 162 - MODULE_DEVICE_TABLE(pci, pci_ids); 163 34 164 35 static void sdhci_prepare_data(struct sdhci_host *, struct mmc_data *); 165 36 static void sdhci_finish_data(struct sdhci_host *); ··· 86 215 { 87 216 unsigned long timeout; 88 217 89 - if (host->chip->quirks & SDHCI_QUIRK_NO_CARD_NO_RESET) { 218 + if (host->quirks & SDHCI_QUIRK_NO_CARD_NO_RESET) { 90 219 if (!(readl(host->ioaddr + SDHCI_PRESENT_STATE) & 91 220 SDHCI_CARD_PRESENT)) 92 221 return; ··· 124 253 SDHCI_INT_END_BIT | SDHCI_INT_CRC | SDHCI_INT_TIMEOUT | 125 254 SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT | 126 255 SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL | 127 - SDHCI_INT_DMA_END | SDHCI_INT_DATA_END | SDHCI_INT_RESPONSE; 256 + SDHCI_INT_DMA_END | SDHCI_INT_DATA_END | SDHCI_INT_RESPONSE | 257 + SDHCI_INT_ADMA_ERROR; 128 258 129 259 writel(intmask, host->ioaddr + SDHCI_INT_ENABLE); 130 260 writel(intmask, host->ioaddr + SDHCI_SIGNAL_ENABLE); ··· 315 443 DBG("PIO transfer complete.\n"); 316 444 } 317 445 318 - static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_data *data) 446 + static char *sdhci_kmap_atomic(struct scatterlist *sg, unsigned long *flags) 447 + { 448 + local_irq_save(*flags); 449 + return kmap_atomic(sg_page(sg), KM_BIO_SRC_IRQ) + sg->offset; 450 + } 451 + 452 + static void sdhci_kunmap_atomic(void *buffer, unsigned long *flags) 453 + { 454 + kunmap_atomic(buffer, KM_BIO_SRC_IRQ); 455 + local_irq_restore(*flags); 456 + } 457 + 458 + static int sdhci_adma_table_pre(struct sdhci_host *host, 459 + struct mmc_data *data) 460 + { 461 + int direction; 462 + 463 + u8 *desc; 464 + u8 *align; 465 + dma_addr_t addr; 466 + dma_addr_t align_addr; 467 + int len, offset; 468 + 469 + struct scatterlist *sg; 470 + int i; 471 + char *buffer; 472 + unsigned long flags; 473 + 474 + /* 475 + * The spec does not specify endianness of descriptor table. 476 + * We currently guess that it is LE. 477 + */ 478 + 479 + if (data->flags & MMC_DATA_READ) 480 + direction = DMA_FROM_DEVICE; 481 + else 482 + direction = DMA_TO_DEVICE; 483 + 484 + /* 485 + * The ADMA descriptor table is mapped further down as we 486 + * need to fill it with data first. 487 + */ 488 + 489 + host->align_addr = dma_map_single(mmc_dev(host->mmc), 490 + host->align_buffer, 128 * 4, direction); 491 + if (dma_mapping_error(host->align_addr)) 492 + goto fail; 493 + BUG_ON(host->align_addr & 0x3); 494 + 495 + host->sg_count = dma_map_sg(mmc_dev(host->mmc), 496 + data->sg, data->sg_len, direction); 497 + if (host->sg_count == 0) 498 + goto unmap_align; 499 + 500 + desc = host->adma_desc; 501 + align = host->align_buffer; 502 + 503 + align_addr = host->align_addr; 504 + 505 + for_each_sg(data->sg, sg, host->sg_count, i) { 506 + addr = sg_dma_address(sg); 507 + len = sg_dma_len(sg); 508 + 509 + /* 510 + * The SDHCI specification states that ADMA 511 + * addresses must be 32-bit aligned. If they 512 + * aren't, then we use a bounce buffer for 513 + * the (up to three) bytes that screw up the 514 + * alignment. 515 + */ 516 + offset = (4 - (addr & 0x3)) & 0x3; 517 + if (offset) { 518 + if (data->flags & MMC_DATA_WRITE) { 519 + buffer = sdhci_kmap_atomic(sg, &flags); 520 + memcpy(align, buffer, offset); 521 + sdhci_kunmap_atomic(buffer, &flags); 522 + } 523 + 524 + desc[7] = (align_addr >> 24) & 0xff; 525 + desc[6] = (align_addr >> 16) & 0xff; 526 + desc[5] = (align_addr >> 8) & 0xff; 527 + desc[4] = (align_addr >> 0) & 0xff; 528 + 529 + BUG_ON(offset > 65536); 530 + 531 + desc[3] = (offset >> 8) & 0xff; 532 + desc[2] = (offset >> 0) & 0xff; 533 + 534 + desc[1] = 0x00; 535 + desc[0] = 0x21; /* tran, valid */ 536 + 537 + align += 4; 538 + align_addr += 4; 539 + 540 + desc += 8; 541 + 542 + addr += offset; 543 + len -= offset; 544 + } 545 + 546 + desc[7] = (addr >> 24) & 0xff; 547 + desc[6] = (addr >> 16) & 0xff; 548 + desc[5] = (addr >> 8) & 0xff; 549 + desc[4] = (addr >> 0) & 0xff; 550 + 551 + BUG_ON(len > 65536); 552 + 553 + desc[3] = (len >> 8) & 0xff; 554 + desc[2] = (len >> 0) & 0xff; 555 + 556 + desc[1] = 0x00; 557 + desc[0] = 0x21; /* tran, valid */ 558 + 559 + desc += 8; 560 + 561 + /* 562 + * If this triggers then we have a calculation bug 563 + * somewhere. :/ 564 + */ 565 + WARN_ON((desc - host->adma_desc) > (128 * 2 + 1) * 4); 566 + } 567 + 568 + /* 569 + * Add a terminating entry. 570 + */ 571 + desc[7] = 0; 572 + desc[6] = 0; 573 + desc[5] = 0; 574 + desc[4] = 0; 575 + 576 + desc[3] = 0; 577 + desc[2] = 0; 578 + 579 + desc[1] = 0x00; 580 + desc[0] = 0x03; /* nop, end, valid */ 581 + 582 + /* 583 + * Resync align buffer as we might have changed it. 584 + */ 585 + if (data->flags & MMC_DATA_WRITE) { 586 + dma_sync_single_for_device(mmc_dev(host->mmc), 587 + host->align_addr, 128 * 4, direction); 588 + } 589 + 590 + host->adma_addr = dma_map_single(mmc_dev(host->mmc), 591 + host->adma_desc, (128 * 2 + 1) * 4, DMA_TO_DEVICE); 592 + if (dma_mapping_error(host->align_addr)) 593 + goto unmap_entries; 594 + BUG_ON(host->adma_addr & 0x3); 595 + 596 + return 0; 597 + 598 + unmap_entries: 599 + dma_unmap_sg(mmc_dev(host->mmc), data->sg, 600 + data->sg_len, direction); 601 + unmap_align: 602 + dma_unmap_single(mmc_dev(host->mmc), host->align_addr, 603 + 128 * 4, direction); 604 + fail: 605 + return -EINVAL; 606 + } 607 + 608 + static void sdhci_adma_table_post(struct sdhci_host *host, 609 + struct mmc_data *data) 610 + { 611 + int direction; 612 + 613 + struct scatterlist *sg; 614 + int i, size; 615 + u8 *align; 616 + char *buffer; 617 + unsigned long flags; 618 + 619 + if (data->flags & MMC_DATA_READ) 620 + direction = DMA_FROM_DEVICE; 621 + else 622 + direction = DMA_TO_DEVICE; 623 + 624 + dma_unmap_single(mmc_dev(host->mmc), host->adma_addr, 625 + (128 * 2 + 1) * 4, DMA_TO_DEVICE); 626 + 627 + dma_unmap_single(mmc_dev(host->mmc), host->align_addr, 628 + 128 * 4, direction); 629 + 630 + if (data->flags & MMC_DATA_READ) { 631 + dma_sync_sg_for_cpu(mmc_dev(host->mmc), data->sg, 632 + data->sg_len, direction); 633 + 634 + align = host->align_buffer; 635 + 636 + for_each_sg(data->sg, sg, host->sg_count, i) { 637 + if (sg_dma_address(sg) & 0x3) { 638 + size = 4 - (sg_dma_address(sg) & 0x3); 639 + 640 + buffer = sdhci_kmap_atomic(sg, &flags); 641 + memcpy(buffer, align, size); 642 + sdhci_kunmap_atomic(buffer, &flags); 643 + 644 + align += 4; 645 + } 646 + } 647 + } 648 + 649 + dma_unmap_sg(mmc_dev(host->mmc), data->sg, 650 + data->sg_len, direction); 651 + } 652 + 653 + static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_data *data) 319 654 { 320 655 u8 count; 321 656 unsigned target_timeout, current_timeout; 322 657 323 - WARN_ON(host->data); 324 - 325 - if (data == NULL) 326 - return; 327 - 328 - /* Sanity checks */ 329 - BUG_ON(data->blksz * data->blocks > 524288); 330 - BUG_ON(data->blksz > host->mmc->max_blk_size); 331 - BUG_ON(data->blocks > 65535); 332 - 333 - host->data = data; 334 - host->data_early = 0; 658 + /* 659 + * If the host controller provides us with an incorrect timeout 660 + * value, just skip the check and use 0xE. The hardware may take 661 + * longer to time out, but that's much better than having a too-short 662 + * timeout value. 663 + */ 664 + if ((host->quirks & SDHCI_QUIRK_BROKEN_TIMEOUT_VAL)) 665 + return 0xE; 335 666 336 667 /* timeout in us */ 337 668 target_timeout = data->timeout_ns / 1000 + ··· 559 484 break; 560 485 } 561 486 562 - /* 563 - * Compensate for an off-by-one error in the CaFe hardware; otherwise, 564 - * a too-small count gives us interrupt timeouts. 565 - */ 566 - if ((host->chip->quirks & SDHCI_QUIRK_INCR_TIMEOUT_CONTROL)) 567 - count++; 568 - 569 487 if (count >= 0xF) { 570 488 printk(KERN_WARNING "%s: Too large timeout requested!\n", 571 489 mmc_hostname(host->mmc)); 572 490 count = 0xE; 573 491 } 574 492 493 + return count; 494 + } 495 + 496 + static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_data *data) 497 + { 498 + u8 count; 499 + u8 ctrl; 500 + int ret; 501 + 502 + WARN_ON(host->data); 503 + 504 + if (data == NULL) 505 + return; 506 + 507 + /* Sanity checks */ 508 + BUG_ON(data->blksz * data->blocks > 524288); 509 + BUG_ON(data->blksz > host->mmc->max_blk_size); 510 + BUG_ON(data->blocks > 65535); 511 + 512 + host->data = data; 513 + host->data_early = 0; 514 + 515 + count = sdhci_calc_timeout(host, data); 575 516 writeb(count, host->ioaddr + SDHCI_TIMEOUT_CONTROL); 576 517 577 518 if (host->flags & SDHCI_USE_DMA) 578 519 host->flags |= SDHCI_REQ_USE_DMA; 579 520 580 - if (unlikely((host->flags & SDHCI_REQ_USE_DMA) && 581 - (host->chip->quirks & SDHCI_QUIRK_32BIT_DMA_SIZE) && 582 - ((data->blksz * data->blocks) & 0x3))) { 583 - DBG("Reverting to PIO because of transfer size (%d)\n", 584 - data->blksz * data->blocks); 585 - host->flags &= ~SDHCI_REQ_USE_DMA; 521 + /* 522 + * FIXME: This doesn't account for merging when mapping the 523 + * scatterlist. 524 + */ 525 + if (host->flags & SDHCI_REQ_USE_DMA) { 526 + int broken, i; 527 + struct scatterlist *sg; 528 + 529 + broken = 0; 530 + if (host->flags & SDHCI_USE_ADMA) { 531 + if (host->quirks & SDHCI_QUIRK_32BIT_ADMA_SIZE) 532 + broken = 1; 533 + } else { 534 + if (host->quirks & SDHCI_QUIRK_32BIT_DMA_SIZE) 535 + broken = 1; 536 + } 537 + 538 + if (unlikely(broken)) { 539 + for_each_sg(data->sg, sg, data->sg_len, i) { 540 + if (sg->length & 0x3) { 541 + DBG("Reverting to PIO because of " 542 + "transfer size (%d)\n", 543 + sg->length); 544 + host->flags &= ~SDHCI_REQ_USE_DMA; 545 + break; 546 + } 547 + } 548 + } 586 549 } 587 550 588 551 /* 589 552 * The assumption here being that alignment is the same after 590 553 * translation to device address space. 591 554 */ 592 - if (unlikely((host->flags & SDHCI_REQ_USE_DMA) && 593 - (host->chip->quirks & SDHCI_QUIRK_32BIT_DMA_ADDR) && 594 - (data->sg->offset & 0x3))) { 595 - DBG("Reverting to PIO because of bad alignment\n"); 596 - host->flags &= ~SDHCI_REQ_USE_DMA; 555 + if (host->flags & SDHCI_REQ_USE_DMA) { 556 + int broken, i; 557 + struct scatterlist *sg; 558 + 559 + broken = 0; 560 + if (host->flags & SDHCI_USE_ADMA) { 561 + /* 562 + * As we use 3 byte chunks to work around 563 + * alignment problems, we need to check this 564 + * quirk. 565 + */ 566 + if (host->quirks & SDHCI_QUIRK_32BIT_ADMA_SIZE) 567 + broken = 1; 568 + } else { 569 + if (host->quirks & SDHCI_QUIRK_32BIT_DMA_ADDR) 570 + broken = 1; 571 + } 572 + 573 + if (unlikely(broken)) { 574 + for_each_sg(data->sg, sg, data->sg_len, i) { 575 + if (sg->offset & 0x3) { 576 + DBG("Reverting to PIO because of " 577 + "bad alignment\n"); 578 + host->flags &= ~SDHCI_REQ_USE_DMA; 579 + break; 580 + } 581 + } 582 + } 597 583 } 598 584 599 585 if (host->flags & SDHCI_REQ_USE_DMA) { 600 - int count; 586 + if (host->flags & SDHCI_USE_ADMA) { 587 + ret = sdhci_adma_table_pre(host, data); 588 + if (ret) { 589 + /* 590 + * This only happens when someone fed 591 + * us an invalid request. 592 + */ 593 + WARN_ON(1); 594 + host->flags &= ~SDHCI_USE_DMA; 595 + } else { 596 + writel(host->adma_addr, 597 + host->ioaddr + SDHCI_ADMA_ADDRESS); 598 + } 599 + } else { 600 + int sg_cnt; 601 601 602 - count = pci_map_sg(host->chip->pdev, data->sg, data->sg_len, 603 - (data->flags & MMC_DATA_READ)?PCI_DMA_FROMDEVICE:PCI_DMA_TODEVICE); 604 - BUG_ON(count != 1); 602 + sg_cnt = dma_map_sg(mmc_dev(host->mmc), 603 + data->sg, data->sg_len, 604 + (data->flags & MMC_DATA_READ) ? 605 + DMA_FROM_DEVICE : 606 + DMA_TO_DEVICE); 607 + if (sg_cnt == 0) { 608 + /* 609 + * This only happens when someone fed 610 + * us an invalid request. 611 + */ 612 + WARN_ON(1); 613 + host->flags &= ~SDHCI_USE_DMA; 614 + } else { 615 + WARN_ON(count != 1); 616 + writel(sg_dma_address(data->sg), 617 + host->ioaddr + SDHCI_DMA_ADDRESS); 618 + } 619 + } 620 + } 605 621 606 - writel(sg_dma_address(data->sg), host->ioaddr + SDHCI_DMA_ADDRESS); 607 - } else { 622 + /* 623 + * Always adjust the DMA selection as some controllers 624 + * (e.g. JMicron) can't do PIO properly when the selection 625 + * is ADMA. 626 + */ 627 + if (host->version >= SDHCI_SPEC_200) { 628 + ctrl = readb(host->ioaddr + SDHCI_HOST_CONTROL); 629 + ctrl &= ~SDHCI_CTRL_DMA_MASK; 630 + if ((host->flags & SDHCI_REQ_USE_DMA) && 631 + (host->flags & SDHCI_USE_ADMA)) 632 + ctrl |= SDHCI_CTRL_ADMA32; 633 + else 634 + ctrl |= SDHCI_CTRL_SDMA; 635 + writeb(ctrl, host->ioaddr + SDHCI_HOST_CONTROL); 636 + } 637 + 638 + if (!(host->flags & SDHCI_REQ_USE_DMA)) { 608 639 host->cur_sg = data->sg; 609 640 host->num_sg = data->sg_len; 610 641 ··· 748 567 static void sdhci_finish_data(struct sdhci_host *host) 749 568 { 750 569 struct mmc_data *data; 751 - u16 blocks; 752 570 753 571 BUG_ON(!host->data); 754 572 ··· 755 575 host->data = NULL; 756 576 757 577 if (host->flags & SDHCI_REQ_USE_DMA) { 758 - pci_unmap_sg(host->chip->pdev, data->sg, data->sg_len, 759 - (data->flags & MMC_DATA_READ)?PCI_DMA_FROMDEVICE:PCI_DMA_TODEVICE); 578 + if (host->flags & SDHCI_USE_ADMA) 579 + sdhci_adma_table_post(host, data); 580 + else { 581 + dma_unmap_sg(mmc_dev(host->mmc), data->sg, 582 + data->sg_len, (data->flags & MMC_DATA_READ) ? 583 + DMA_FROM_DEVICE : DMA_TO_DEVICE); 584 + } 760 585 } 761 586 762 587 /* 763 - * Controller doesn't count down when in single block mode. 588 + * The specification states that the block count register must 589 + * be updated, but it does not specify at what point in the 590 + * data flow. That makes the register entirely useless to read 591 + * back so we have to assume that nothing made it to the card 592 + * in the event of an error. 764 593 */ 765 - if (data->blocks == 1) 766 - blocks = (data->error == 0) ? 0 : 1; 594 + if (data->error) 595 + data->bytes_xfered = 0; 767 596 else 768 - blocks = readw(host->ioaddr + SDHCI_BLOCK_COUNT); 769 - data->bytes_xfered = data->blksz * (data->blocks - blocks); 770 - 771 - if (!data->error && blocks) { 772 - printk(KERN_ERR "%s: Controller signalled completion even " 773 - "though there were blocks left.\n", 774 - mmc_hostname(host->mmc)); 775 - data->error = -EIO; 776 - } 597 + data->bytes_xfered = data->blksz * data->blocks; 777 598 778 599 if (data->stop) { 779 600 /* ··· 956 775 * Spec says that we should clear the power reg before setting 957 776 * a new value. Some controllers don't seem to like this though. 958 777 */ 959 - if (!(host->chip->quirks & SDHCI_QUIRK_SINGLE_POWER_WRITE)) 778 + if (!(host->quirks & SDHCI_QUIRK_SINGLE_POWER_WRITE)) 960 779 writeb(0, host->ioaddr + SDHCI_POWER_CONTROL); 961 780 962 781 pwr = SDHCI_POWER_ON; ··· 978 797 } 979 798 980 799 /* 981 - * At least the CaFe chip gets confused if we set the voltage 800 + * At least the Marvell CaFe chip gets confused if we set the voltage 982 801 * and set turn on power at the same time, so set the voltage first. 983 802 */ 984 - if ((host->chip->quirks & SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER)) 803 + if ((host->quirks & SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER)) 985 804 writeb(pwr & ~SDHCI_POWER_ON, 986 805 host->ioaddr + SDHCI_POWER_CONTROL); 987 806 ··· 1014 833 1015 834 host->mrq = mrq; 1016 835 1017 - if (!(readl(host->ioaddr + SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT)) { 836 + if (!(readl(host->ioaddr + SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT) 837 + || (host->flags & SDHCI_DEVICE_DEAD)) { 1018 838 host->mrq->cmd->error = -ENOMEDIUM; 1019 839 tasklet_schedule(&host->finish_tasklet); 1020 840 } else ··· 1034 852 host = mmc_priv(mmc); 1035 853 1036 854 spin_lock_irqsave(&host->lock, flags); 855 + 856 + if (host->flags & SDHCI_DEVICE_DEAD) 857 + goto out; 1037 858 1038 859 /* 1039 860 * Reset the chip on each power off. ··· 1073 888 * signalling timeout and CRC errors even on CMD0. Resetting 1074 889 * it on each ios seems to solve the problem. 1075 890 */ 1076 - if(host->chip->quirks & SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS) 891 + if(host->quirks & SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS) 1077 892 sdhci_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA); 1078 893 894 + out: 1079 895 mmiowb(); 1080 896 spin_unlock_irqrestore(&host->lock, flags); 1081 897 } ··· 1091 905 1092 906 spin_lock_irqsave(&host->lock, flags); 1093 907 1094 - present = readl(host->ioaddr + SDHCI_PRESENT_STATE); 908 + if (host->flags & SDHCI_DEVICE_DEAD) 909 + present = 0; 910 + else 911 + present = readl(host->ioaddr + SDHCI_PRESENT_STATE); 1095 912 1096 913 spin_unlock_irqrestore(&host->lock, flags); 1097 914 ··· 1111 922 1112 923 spin_lock_irqsave(&host->lock, flags); 1113 924 925 + if (host->flags & SDHCI_DEVICE_DEAD) 926 + goto out; 927 + 1114 928 ier = readl(host->ioaddr + SDHCI_INT_ENABLE); 1115 929 1116 930 ier &= ~SDHCI_INT_CARD_INT; ··· 1123 931 writel(ier, host->ioaddr + SDHCI_INT_ENABLE); 1124 932 writel(ier, host->ioaddr + SDHCI_SIGNAL_ENABLE); 1125 933 934 + out: 1126 935 mmiowb(); 1127 936 1128 937 spin_unlock_irqrestore(&host->lock, flags); ··· 1189 996 * The controller needs a reset of internal state machines 1190 997 * upon error conditions. 1191 998 */ 1192 - if (mrq->cmd->error || 1193 - (mrq->data && (mrq->data->error || 1194 - (mrq->data->stop && mrq->data->stop->error))) || 1195 - (host->chip->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST)) { 999 + if (!(host->flags & SDHCI_DEVICE_DEAD) && 1000 + (mrq->cmd->error || 1001 + (mrq->data && (mrq->data->error || 1002 + (mrq->data->stop && mrq->data->stop->error))) || 1003 + (host->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST))) { 1196 1004 1197 1005 /* Some controllers need this kick or reset won't work here */ 1198 - if (host->chip->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET) { 1006 + if (host->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET) { 1199 1007 unsigned int clock; 1200 1008 1201 1009 /* This is to force an update */ ··· 1310 1116 host->data->error = -ETIMEDOUT; 1311 1117 else if (intmask & (SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_END_BIT)) 1312 1118 host->data->error = -EILSEQ; 1119 + else if (intmask & SDHCI_INT_ADMA_ERROR) 1120 + host->data->error = -EIO; 1313 1121 1314 1122 if (host->data->error) 1315 1123 sdhci_finish_data(host); ··· 1430 1234 1431 1235 #ifdef CONFIG_PM 1432 1236 1433 - static int sdhci_suspend (struct pci_dev *pdev, pm_message_t state) 1237 + int sdhci_suspend_host(struct sdhci_host *host, pm_message_t state) 1434 1238 { 1435 - struct sdhci_chip *chip; 1436 - int i, ret; 1239 + int ret; 1437 1240 1438 - chip = pci_get_drvdata(pdev); 1439 - if (!chip) 1440 - return 0; 1441 - 1442 - DBG("Suspending...\n"); 1443 - 1444 - for (i = 0;i < chip->num_slots;i++) { 1445 - if (!chip->hosts[i]) 1446 - continue; 1447 - ret = mmc_suspend_host(chip->hosts[i]->mmc, state); 1448 - if (ret) { 1449 - for (i--;i >= 0;i--) 1450 - mmc_resume_host(chip->hosts[i]->mmc); 1451 - return ret; 1452 - } 1453 - } 1454 - 1455 - pci_save_state(pdev); 1456 - pci_enable_wake(pdev, pci_choose_state(pdev, state), 0); 1457 - 1458 - for (i = 0;i < chip->num_slots;i++) { 1459 - if (!chip->hosts[i]) 1460 - continue; 1461 - free_irq(chip->hosts[i]->irq, chip->hosts[i]); 1462 - } 1463 - 1464 - pci_disable_device(pdev); 1465 - pci_set_power_state(pdev, pci_choose_state(pdev, state)); 1466 - 1467 - return 0; 1468 - } 1469 - 1470 - static int sdhci_resume (struct pci_dev *pdev) 1471 - { 1472 - struct sdhci_chip *chip; 1473 - int i, ret; 1474 - 1475 - chip = pci_get_drvdata(pdev); 1476 - if (!chip) 1477 - return 0; 1478 - 1479 - DBG("Resuming...\n"); 1480 - 1481 - pci_set_power_state(pdev, PCI_D0); 1482 - pci_restore_state(pdev); 1483 - ret = pci_enable_device(pdev); 1241 + ret = mmc_suspend_host(host->mmc, state); 1484 1242 if (ret) 1485 1243 return ret; 1486 1244 1487 - for (i = 0;i < chip->num_slots;i++) { 1488 - if (!chip->hosts[i]) 1489 - continue; 1490 - if (chip->hosts[i]->flags & SDHCI_USE_DMA) 1491 - pci_set_master(pdev); 1492 - ret = request_irq(chip->hosts[i]->irq, sdhci_irq, 1493 - IRQF_SHARED, mmc_hostname(chip->hosts[i]->mmc), 1494 - chip->hosts[i]); 1495 - if (ret) 1496 - return ret; 1497 - sdhci_init(chip->hosts[i]); 1498 - mmiowb(); 1499 - ret = mmc_resume_host(chip->hosts[i]->mmc); 1500 - if (ret) 1501 - return ret; 1502 - } 1245 + free_irq(host->irq, host); 1503 1246 1504 1247 return 0; 1505 1248 } 1506 1249 1507 - #else /* CONFIG_PM */ 1250 + EXPORT_SYMBOL_GPL(sdhci_suspend_host); 1508 1251 1509 - #define sdhci_suspend NULL 1510 - #define sdhci_resume NULL 1252 + int sdhci_resume_host(struct sdhci_host *host) 1253 + { 1254 + int ret; 1255 + 1256 + if (host->flags & SDHCI_USE_DMA) { 1257 + if (host->ops->enable_dma) 1258 + host->ops->enable_dma(host); 1259 + } 1260 + 1261 + ret = request_irq(host->irq, sdhci_irq, IRQF_SHARED, 1262 + mmc_hostname(host->mmc), host); 1263 + if (ret) 1264 + return ret; 1265 + 1266 + sdhci_init(host); 1267 + mmiowb(); 1268 + 1269 + ret = mmc_resume_host(host->mmc); 1270 + if (ret) 1271 + return ret; 1272 + 1273 + return 0; 1274 + } 1275 + 1276 + EXPORT_SYMBOL_GPL(sdhci_resume_host); 1511 1277 1512 1278 #endif /* CONFIG_PM */ 1513 1279 1514 1280 /*****************************************************************************\ 1515 1281 * * 1516 - * Device probing/removal * 1282 + * Device allocation/registration * 1517 1283 * * 1518 1284 \*****************************************************************************/ 1519 1285 1520 - static int __devinit sdhci_probe_slot(struct pci_dev *pdev, int slot) 1286 + struct sdhci_host *sdhci_alloc_host(struct device *dev, 1287 + size_t priv_size) 1521 1288 { 1522 - int ret; 1523 - unsigned int version; 1524 - struct sdhci_chip *chip; 1525 1289 struct mmc_host *mmc; 1526 1290 struct sdhci_host *host; 1527 1291 1528 - u8 first_bar; 1529 - unsigned int caps; 1292 + WARN_ON(dev == NULL); 1530 1293 1531 - chip = pci_get_drvdata(pdev); 1532 - BUG_ON(!chip); 1533 - 1534 - ret = pci_read_config_byte(pdev, PCI_SLOT_INFO, &first_bar); 1535 - if (ret) 1536 - return ret; 1537 - 1538 - first_bar &= PCI_SLOT_INFO_FIRST_BAR_MASK; 1539 - 1540 - if (first_bar > 5) { 1541 - printk(KERN_ERR DRIVER_NAME ": Invalid first BAR. Aborting.\n"); 1542 - return -ENODEV; 1543 - } 1544 - 1545 - if (!(pci_resource_flags(pdev, first_bar + slot) & IORESOURCE_MEM)) { 1546 - printk(KERN_ERR DRIVER_NAME ": BAR is not iomem. Aborting.\n"); 1547 - return -ENODEV; 1548 - } 1549 - 1550 - if (pci_resource_len(pdev, first_bar + slot) != 0x100) { 1551 - printk(KERN_ERR DRIVER_NAME ": Invalid iomem size. " 1552 - "You may experience problems.\n"); 1553 - } 1554 - 1555 - if ((pdev->class & 0x0000FF) == PCI_SDHCI_IFVENDOR) { 1556 - printk(KERN_ERR DRIVER_NAME ": Vendor specific interface. Aborting.\n"); 1557 - return -ENODEV; 1558 - } 1559 - 1560 - if ((pdev->class & 0x0000FF) > PCI_SDHCI_IFVENDOR) { 1561 - printk(KERN_ERR DRIVER_NAME ": Unknown interface. Aborting.\n"); 1562 - return -ENODEV; 1563 - } 1564 - 1565 - mmc = mmc_alloc_host(sizeof(struct sdhci_host), &pdev->dev); 1294 + mmc = mmc_alloc_host(sizeof(struct sdhci_host) + priv_size, dev); 1566 1295 if (!mmc) 1567 - return -ENOMEM; 1296 + return ERR_PTR(-ENOMEM); 1568 1297 1569 1298 host = mmc_priv(mmc); 1570 1299 host->mmc = mmc; 1571 1300 1572 - host->chip = chip; 1573 - chip->hosts[slot] = host; 1301 + return host; 1302 + } 1574 1303 1575 - host->bar = first_bar + slot; 1304 + EXPORT_SYMBOL_GPL(sdhci_alloc_host); 1576 1305 1577 - host->addr = pci_resource_start(pdev, host->bar); 1578 - host->irq = pdev->irq; 1306 + int sdhci_add_host(struct sdhci_host *host) 1307 + { 1308 + struct mmc_host *mmc; 1309 + unsigned int caps; 1310 + int ret; 1579 1311 1580 - DBG("slot %d at 0x%08lx, irq %d\n", slot, host->addr, host->irq); 1312 + WARN_ON(host == NULL); 1313 + if (host == NULL) 1314 + return -EINVAL; 1581 1315 1582 - ret = pci_request_region(pdev, host->bar, mmc_hostname(mmc)); 1583 - if (ret) 1584 - goto free; 1316 + mmc = host->mmc; 1585 1317 1586 - host->ioaddr = ioremap_nocache(host->addr, 1587 - pci_resource_len(pdev, host->bar)); 1588 - if (!host->ioaddr) { 1589 - ret = -ENOMEM; 1590 - goto release; 1591 - } 1318 + if (debug_quirks) 1319 + host->quirks = debug_quirks; 1592 1320 1593 1321 sdhci_reset(host, SDHCI_RESET_ALL); 1594 1322 1595 - version = readw(host->ioaddr + SDHCI_HOST_VERSION); 1596 - version = (version & SDHCI_SPEC_VER_MASK) >> SDHCI_SPEC_VER_SHIFT; 1597 - if (version > 1) { 1323 + host->version = readw(host->ioaddr + SDHCI_HOST_VERSION); 1324 + host->version = (host->version & SDHCI_SPEC_VER_MASK) 1325 + >> SDHCI_SPEC_VER_SHIFT; 1326 + if (host->version > SDHCI_SPEC_200) { 1598 1327 printk(KERN_ERR "%s: Unknown controller version (%d). " 1599 1328 "You may experience problems.\n", mmc_hostname(mmc), 1600 - version); 1329 + host->version); 1601 1330 } 1602 1331 1603 1332 caps = readl(host->ioaddr + SDHCI_CAPABILITIES); 1604 1333 1605 - if (chip->quirks & SDHCI_QUIRK_FORCE_DMA) 1334 + if (host->quirks & SDHCI_QUIRK_FORCE_DMA) 1606 1335 host->flags |= SDHCI_USE_DMA; 1607 1336 else if (!(caps & SDHCI_CAN_DO_DMA)) 1608 1337 DBG("Controller doesn't have DMA capability\n"); 1609 1338 else 1610 1339 host->flags |= SDHCI_USE_DMA; 1611 1340 1612 - if ((chip->quirks & SDHCI_QUIRK_BROKEN_DMA) && 1341 + if ((host->quirks & SDHCI_QUIRK_BROKEN_DMA) && 1613 1342 (host->flags & SDHCI_USE_DMA)) { 1614 1343 DBG("Disabling DMA as it is marked broken\n"); 1615 1344 host->flags &= ~SDHCI_USE_DMA; 1616 1345 } 1617 1346 1618 - if (((pdev->class & 0x0000FF) != PCI_SDHCI_IFDMA) && 1619 - (host->flags & SDHCI_USE_DMA)) { 1620 - printk(KERN_WARNING "%s: Will use DMA " 1621 - "mode even though HW doesn't fully " 1622 - "claim to support it.\n", mmc_hostname(mmc)); 1347 + if (host->flags & SDHCI_USE_DMA) { 1348 + if ((host->version >= SDHCI_SPEC_200) && 1349 + (caps & SDHCI_CAN_DO_ADMA2)) 1350 + host->flags |= SDHCI_USE_ADMA; 1351 + } 1352 + 1353 + if ((host->quirks & SDHCI_QUIRK_BROKEN_ADMA) && 1354 + (host->flags & SDHCI_USE_ADMA)) { 1355 + DBG("Disabling ADMA as it is marked broken\n"); 1356 + host->flags &= ~SDHCI_USE_ADMA; 1623 1357 } 1624 1358 1625 1359 if (host->flags & SDHCI_USE_DMA) { 1626 - if (pci_set_dma_mask(pdev, DMA_32BIT_MASK)) { 1627 - printk(KERN_WARNING "%s: No suitable DMA available. " 1628 - "Falling back to PIO.\n", mmc_hostname(mmc)); 1629 - host->flags &= ~SDHCI_USE_DMA; 1360 + if (host->ops->enable_dma) { 1361 + if (host->ops->enable_dma(host)) { 1362 + printk(KERN_WARNING "%s: No suitable DMA " 1363 + "available. Falling back to PIO.\n", 1364 + mmc_hostname(mmc)); 1365 + host->flags &= ~(SDHCI_USE_DMA | SDHCI_USE_ADMA); 1366 + } 1630 1367 } 1631 1368 } 1632 1369 1633 - if (host->flags & SDHCI_USE_DMA) 1634 - pci_set_master(pdev); 1635 - else /* XXX: Hack to get MMC layer to avoid highmem */ 1636 - pdev->dma_mask = 0; 1370 + if (host->flags & SDHCI_USE_ADMA) { 1371 + /* 1372 + * We need to allocate descriptors for all sg entries 1373 + * (128) and potentially one alignment transfer for 1374 + * each of those entries. 1375 + */ 1376 + host->adma_desc = kmalloc((128 * 2 + 1) * 4, GFP_KERNEL); 1377 + host->align_buffer = kmalloc(128 * 4, GFP_KERNEL); 1378 + if (!host->adma_desc || !host->align_buffer) { 1379 + kfree(host->adma_desc); 1380 + kfree(host->align_buffer); 1381 + printk(KERN_WARNING "%s: Unable to allocate ADMA " 1382 + "buffers. Falling back to standard DMA.\n", 1383 + mmc_hostname(mmc)); 1384 + host->flags &= ~SDHCI_USE_ADMA; 1385 + } 1386 + } 1387 + 1388 + /* XXX: Hack to get MMC layer to avoid highmem */ 1389 + if (!(host->flags & SDHCI_USE_DMA)) 1390 + mmc_dev(host->mmc)->dma_mask = NULL; 1637 1391 1638 1392 host->max_clk = 1639 1393 (caps & SDHCI_CLOCK_BASE_MASK) >> SDHCI_CLOCK_BASE_SHIFT; 1640 1394 if (host->max_clk == 0) { 1641 1395 printk(KERN_ERR "%s: Hardware doesn't specify base clock " 1642 1396 "frequency.\n", mmc_hostname(mmc)); 1643 - ret = -ENODEV; 1644 - goto unmap; 1397 + return -ENODEV; 1645 1398 } 1646 1399 host->max_clk *= 1000000; 1647 1400 ··· 1599 1454 if (host->timeout_clk == 0) { 1600 1455 printk(KERN_ERR "%s: Hardware doesn't specify timeout clock " 1601 1456 "frequency.\n", mmc_hostname(mmc)); 1602 - ret = -ENODEV; 1603 - goto unmap; 1457 + return -ENODEV; 1604 1458 } 1605 1459 if (caps & SDHCI_TIMEOUT_CLK_UNIT) 1606 1460 host->timeout_clk *= 1000; ··· 1610 1466 mmc->ops = &sdhci_ops; 1611 1467 mmc->f_min = host->max_clk / 256; 1612 1468 mmc->f_max = host->max_clk; 1613 - mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_MULTIWRITE | MMC_CAP_SDIO_IRQ; 1469 + mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_SDIO_IRQ; 1614 1470 1615 1471 if (caps & SDHCI_CAN_DO_HISPD) 1616 1472 mmc->caps |= MMC_CAP_SD_HIGHSPEED; ··· 1626 1482 if (mmc->ocr_avail == 0) { 1627 1483 printk(KERN_ERR "%s: Hardware doesn't report any " 1628 1484 "support voltages.\n", mmc_hostname(mmc)); 1629 - ret = -ENODEV; 1630 - goto unmap; 1485 + return -ENODEV; 1631 1486 } 1632 1487 1633 1488 spin_lock_init(&host->lock); 1634 1489 1635 1490 /* 1636 - * Maximum number of segments. Hardware cannot do scatter lists. 1491 + * Maximum number of segments. Depends on if the hardware 1492 + * can do scatter/gather or not. 1637 1493 */ 1638 - if (host->flags & SDHCI_USE_DMA) 1494 + if (host->flags & SDHCI_USE_ADMA) 1495 + mmc->max_hw_segs = 128; 1496 + else if (host->flags & SDHCI_USE_DMA) 1639 1497 mmc->max_hw_segs = 1; 1640 - else 1641 - mmc->max_hw_segs = 16; 1642 - mmc->max_phys_segs = 16; 1498 + else /* PIO */ 1499 + mmc->max_hw_segs = 128; 1500 + mmc->max_phys_segs = 128; 1643 1501 1644 1502 /* 1645 1503 * Maximum number of sectors in one transfer. Limited by DMA boundary ··· 1651 1505 1652 1506 /* 1653 1507 * Maximum segment size. Could be one segment with the maximum number 1654 - * of bytes. 1508 + * of bytes. When doing hardware scatter/gather, each entry cannot 1509 + * be larger than 64 KiB though. 1655 1510 */ 1656 - mmc->max_seg_size = mmc->max_req_size; 1511 + if (host->flags & SDHCI_USE_ADMA) 1512 + mmc->max_seg_size = 65536; 1513 + else 1514 + mmc->max_seg_size = mmc->max_req_size; 1657 1515 1658 1516 /* 1659 1517 * Maximum block size. This varies from controller to controller and ··· 1703 1553 host->led.default_trigger = mmc_hostname(mmc); 1704 1554 host->led.brightness_set = sdhci_led_control; 1705 1555 1706 - ret = led_classdev_register(&pdev->dev, &host->led); 1556 + ret = led_classdev_register(mmc_dev(mmc), &host->led); 1707 1557 if (ret) 1708 1558 goto reset; 1709 1559 #endif ··· 1712 1562 1713 1563 mmc_add_host(mmc); 1714 1564 1715 - printk(KERN_INFO "%s: SDHCI at 0x%08lx irq %d %s\n", 1716 - mmc_hostname(mmc), host->addr, host->irq, 1565 + printk(KERN_INFO "%s: SDHCI controller on %s [%s] using %s%s\n", 1566 + mmc_hostname(mmc), host->hw_name, mmc_dev(mmc)->bus_id, 1567 + (host->flags & SDHCI_USE_ADMA)?"A":"", 1717 1568 (host->flags & SDHCI_USE_DMA)?"DMA":"PIO"); 1718 1569 1719 1570 return 0; ··· 1727 1576 untasklet: 1728 1577 tasklet_kill(&host->card_tasklet); 1729 1578 tasklet_kill(&host->finish_tasklet); 1730 - unmap: 1731 - iounmap(host->ioaddr); 1732 - release: 1733 - pci_release_region(pdev, host->bar); 1734 - free: 1735 - mmc_free_host(mmc); 1736 1579 1737 1580 return ret; 1738 1581 } 1739 1582 1740 - static void sdhci_remove_slot(struct pci_dev *pdev, int slot) 1583 + EXPORT_SYMBOL_GPL(sdhci_add_host); 1584 + 1585 + void sdhci_remove_host(struct sdhci_host *host, int dead) 1741 1586 { 1742 - struct sdhci_chip *chip; 1743 - struct mmc_host *mmc; 1744 - struct sdhci_host *host; 1587 + unsigned long flags; 1745 1588 1746 - chip = pci_get_drvdata(pdev); 1747 - host = chip->hosts[slot]; 1748 - mmc = host->mmc; 1589 + if (dead) { 1590 + spin_lock_irqsave(&host->lock, flags); 1749 1591 1750 - chip->hosts[slot] = NULL; 1592 + host->flags |= SDHCI_DEVICE_DEAD; 1751 1593 1752 - mmc_remove_host(mmc); 1594 + if (host->mrq) { 1595 + printk(KERN_ERR "%s: Controller removed during " 1596 + " transfer!\n", mmc_hostname(host->mmc)); 1597 + 1598 + host->mrq->cmd->error = -ENOMEDIUM; 1599 + tasklet_schedule(&host->finish_tasklet); 1600 + } 1601 + 1602 + spin_unlock_irqrestore(&host->lock, flags); 1603 + } 1604 + 1605 + mmc_remove_host(host->mmc); 1753 1606 1754 1607 #ifdef CONFIG_LEDS_CLASS 1755 1608 led_classdev_unregister(&host->led); 1756 1609 #endif 1757 1610 1758 - sdhci_reset(host, SDHCI_RESET_ALL); 1611 + if (!dead) 1612 + sdhci_reset(host, SDHCI_RESET_ALL); 1759 1613 1760 1614 free_irq(host->irq, host); 1761 1615 ··· 1769 1613 tasklet_kill(&host->card_tasklet); 1770 1614 tasklet_kill(&host->finish_tasklet); 1771 1615 1772 - iounmap(host->ioaddr); 1616 + kfree(host->adma_desc); 1617 + kfree(host->align_buffer); 1773 1618 1774 - pci_release_region(pdev, host->bar); 1775 - 1776 - mmc_free_host(mmc); 1619 + host->adma_desc = NULL; 1620 + host->align_buffer = NULL; 1777 1621 } 1778 1622 1779 - static int __devinit sdhci_probe(struct pci_dev *pdev, 1780 - const struct pci_device_id *ent) 1623 + EXPORT_SYMBOL_GPL(sdhci_remove_host); 1624 + 1625 + void sdhci_free_host(struct sdhci_host *host) 1781 1626 { 1782 - int ret, i; 1783 - u8 slots, rev; 1784 - struct sdhci_chip *chip; 1785 - 1786 - BUG_ON(pdev == NULL); 1787 - BUG_ON(ent == NULL); 1788 - 1789 - pci_read_config_byte(pdev, PCI_CLASS_REVISION, &rev); 1790 - 1791 - printk(KERN_INFO DRIVER_NAME 1792 - ": SDHCI controller found at %s [%04x:%04x] (rev %x)\n", 1793 - pci_name(pdev), (int)pdev->vendor, (int)pdev->device, 1794 - (int)rev); 1795 - 1796 - ret = pci_read_config_byte(pdev, PCI_SLOT_INFO, &slots); 1797 - if (ret) 1798 - return ret; 1799 - 1800 - slots = PCI_SLOT_INFO_SLOTS(slots) + 1; 1801 - DBG("found %d slot(s)\n", slots); 1802 - if (slots == 0) 1803 - return -ENODEV; 1804 - 1805 - ret = pci_enable_device(pdev); 1806 - if (ret) 1807 - return ret; 1808 - 1809 - chip = kzalloc(sizeof(struct sdhci_chip) + 1810 - sizeof(struct sdhci_host*) * slots, GFP_KERNEL); 1811 - if (!chip) { 1812 - ret = -ENOMEM; 1813 - goto err; 1814 - } 1815 - 1816 - chip->pdev = pdev; 1817 - chip->quirks = ent->driver_data; 1818 - 1819 - if (debug_quirks) 1820 - chip->quirks = debug_quirks; 1821 - 1822 - chip->num_slots = slots; 1823 - pci_set_drvdata(pdev, chip); 1824 - 1825 - for (i = 0;i < slots;i++) { 1826 - ret = sdhci_probe_slot(pdev, i); 1827 - if (ret) { 1828 - for (i--;i >= 0;i--) 1829 - sdhci_remove_slot(pdev, i); 1830 - goto free; 1831 - } 1832 - } 1833 - 1834 - return 0; 1835 - 1836 - free: 1837 - pci_set_drvdata(pdev, NULL); 1838 - kfree(chip); 1839 - 1840 - err: 1841 - pci_disable_device(pdev); 1842 - return ret; 1627 + mmc_free_host(host->mmc); 1843 1628 } 1844 1629 1845 - static void __devexit sdhci_remove(struct pci_dev *pdev) 1846 - { 1847 - int i; 1848 - struct sdhci_chip *chip; 1849 - 1850 - chip = pci_get_drvdata(pdev); 1851 - 1852 - if (chip) { 1853 - for (i = 0;i < chip->num_slots;i++) 1854 - sdhci_remove_slot(pdev, i); 1855 - 1856 - pci_set_drvdata(pdev, NULL); 1857 - 1858 - kfree(chip); 1859 - } 1860 - 1861 - pci_disable_device(pdev); 1862 - } 1863 - 1864 - static struct pci_driver sdhci_driver = { 1865 - .name = DRIVER_NAME, 1866 - .id_table = pci_ids, 1867 - .probe = sdhci_probe, 1868 - .remove = __devexit_p(sdhci_remove), 1869 - .suspend = sdhci_suspend, 1870 - .resume = sdhci_resume, 1871 - }; 1630 + EXPORT_SYMBOL_GPL(sdhci_free_host); 1872 1631 1873 1632 /*****************************************************************************\ 1874 1633 * * ··· 1797 1726 ": Secure Digital Host Controller Interface driver\n"); 1798 1727 printk(KERN_INFO DRIVER_NAME ": Copyright(c) Pierre Ossman\n"); 1799 1728 1800 - return pci_register_driver(&sdhci_driver); 1729 + return 0; 1801 1730 } 1802 1731 1803 1732 static void __exit sdhci_drv_exit(void) 1804 1733 { 1805 - DBG("Exiting\n"); 1806 - 1807 - pci_unregister_driver(&sdhci_driver); 1808 1734 } 1809 1735 1810 1736 module_init(sdhci_drv_init); ··· 1810 1742 module_param(debug_quirks, uint, 0444); 1811 1743 1812 1744 MODULE_AUTHOR("Pierre Ossman <drzeus@drzeus.cx>"); 1813 - MODULE_DESCRIPTION("Secure Digital Host Controller Interface driver"); 1745 + MODULE_DESCRIPTION("Secure Digital Host Controller Interface core driver"); 1814 1746 MODULE_LICENSE("GPL"); 1815 1747 1816 1748 MODULE_PARM_DESC(debug_quirks, "Force certain quirks.");
+94 -26
drivers/mmc/host/sdhci.h
··· 10 10 */ 11 11 12 12 /* 13 - * PCI registers 14 - */ 15 - 16 - #define PCI_SDHCI_IFPIO 0x00 17 - #define PCI_SDHCI_IFDMA 0x01 18 - #define PCI_SDHCI_IFVENDOR 0x02 19 - 20 - #define PCI_SLOT_INFO 0x40 /* 8 bits */ 21 - #define PCI_SLOT_INFO_SLOTS(x) ((x >> 4) & 7) 22 - #define PCI_SLOT_INFO_FIRST_BAR_MASK 0x07 23 - 24 - /* 25 13 * Controller registers 26 14 */ 27 15 ··· 60 72 #define SDHCI_CTRL_LED 0x01 61 73 #define SDHCI_CTRL_4BITBUS 0x02 62 74 #define SDHCI_CTRL_HISPD 0x04 75 + #define SDHCI_CTRL_DMA_MASK 0x18 76 + #define SDHCI_CTRL_SDMA 0x00 77 + #define SDHCI_CTRL_ADMA1 0x08 78 + #define SDHCI_CTRL_ADMA32 0x10 79 + #define SDHCI_CTRL_ADMA64 0x18 63 80 64 81 #define SDHCI_POWER_CONTROL 0x29 65 82 #define SDHCI_POWER_ON 0x01 ··· 110 117 #define SDHCI_INT_DATA_END_BIT 0x00400000 111 118 #define SDHCI_INT_BUS_POWER 0x00800000 112 119 #define SDHCI_INT_ACMD12ERR 0x01000000 120 + #define SDHCI_INT_ADMA_ERROR 0x02000000 113 121 114 122 #define SDHCI_INT_NORMAL_MASK 0x00007FFF 115 123 #define SDHCI_INT_ERROR_MASK 0xFFFF8000 ··· 134 140 #define SDHCI_CLOCK_BASE_SHIFT 8 135 141 #define SDHCI_MAX_BLOCK_MASK 0x00030000 136 142 #define SDHCI_MAX_BLOCK_SHIFT 16 143 + #define SDHCI_CAN_DO_ADMA2 0x00080000 144 + #define SDHCI_CAN_DO_ADMA1 0x00100000 137 145 #define SDHCI_CAN_DO_HISPD 0x00200000 138 146 #define SDHCI_CAN_DO_DMA 0x00400000 139 147 #define SDHCI_CAN_VDD_330 0x01000000 140 148 #define SDHCI_CAN_VDD_300 0x02000000 141 149 #define SDHCI_CAN_VDD_180 0x04000000 150 + #define SDHCI_CAN_64BIT 0x10000000 142 151 143 152 /* 44-47 reserved for more caps */ 144 153 ··· 149 152 150 153 /* 4C-4F reserved for more max current */ 151 154 152 - /* 50-FB reserved */ 155 + #define SDHCI_SET_ACMD12_ERROR 0x50 156 + #define SDHCI_SET_INT_ERROR 0x52 157 + 158 + #define SDHCI_ADMA_ERROR 0x54 159 + 160 + /* 55-57 reserved */ 161 + 162 + #define SDHCI_ADMA_ADDRESS 0x58 163 + 164 + /* 60-FB reserved */ 153 165 154 166 #define SDHCI_SLOT_INT_STATUS 0xFC 155 167 ··· 167 161 #define SDHCI_VENDOR_VER_SHIFT 8 168 162 #define SDHCI_SPEC_VER_MASK 0x00FF 169 163 #define SDHCI_SPEC_VER_SHIFT 0 164 + #define SDHCI_SPEC_100 0 165 + #define SDHCI_SPEC_200 1 170 166 171 - struct sdhci_chip; 167 + struct sdhci_ops; 172 168 173 169 struct sdhci_host { 174 - struct sdhci_chip *chip; 170 + /* Data set by hardware interface driver */ 171 + const char *hw_name; /* Hardware bus name */ 172 + 173 + unsigned int quirks; /* Deviations from spec. */ 174 + 175 + /* Controller doesn't honor resets unless we touch the clock register */ 176 + #define SDHCI_QUIRK_CLOCK_BEFORE_RESET (1<<0) 177 + /* Controller has bad caps bits, but really supports DMA */ 178 + #define SDHCI_QUIRK_FORCE_DMA (1<<1) 179 + /* Controller doesn't like to be reset when there is no card inserted. */ 180 + #define SDHCI_QUIRK_NO_CARD_NO_RESET (1<<2) 181 + /* Controller doesn't like clearing the power reg before a change */ 182 + #define SDHCI_QUIRK_SINGLE_POWER_WRITE (1<<3) 183 + /* Controller has flaky internal state so reset it on each ios change */ 184 + #define SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS (1<<4) 185 + /* Controller has an unusable DMA engine */ 186 + #define SDHCI_QUIRK_BROKEN_DMA (1<<5) 187 + /* Controller has an unusable ADMA engine */ 188 + #define SDHCI_QUIRK_BROKEN_ADMA (1<<6) 189 + /* Controller can only DMA from 32-bit aligned addresses */ 190 + #define SDHCI_QUIRK_32BIT_DMA_ADDR (1<<7) 191 + /* Controller can only DMA chunk sizes that are a multiple of 32 bits */ 192 + #define SDHCI_QUIRK_32BIT_DMA_SIZE (1<<8) 193 + /* Controller can only ADMA chunks that are a multiple of 32 bits */ 194 + #define SDHCI_QUIRK_32BIT_ADMA_SIZE (1<<9) 195 + /* Controller needs to be reset after each request to stay stable */ 196 + #define SDHCI_QUIRK_RESET_AFTER_REQUEST (1<<10) 197 + /* Controller needs voltage and power writes to happen separately */ 198 + #define SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER (1<<11) 199 + /* Controller provides an incorrect timeout value for transfers */ 200 + #define SDHCI_QUIRK_BROKEN_TIMEOUT_VAL (1<<12) 201 + 202 + int irq; /* Device IRQ */ 203 + void __iomem * ioaddr; /* Mapped address */ 204 + 205 + const struct sdhci_ops *ops; /* Low level hw interface */ 206 + 207 + /* Internal data */ 175 208 struct mmc_host *mmc; /* MMC structure */ 176 209 177 210 #ifdef CONFIG_LEDS_CLASS ··· 221 176 222 177 int flags; /* Host attributes */ 223 178 #define SDHCI_USE_DMA (1<<0) /* Host is DMA capable */ 224 - #define SDHCI_REQ_USE_DMA (1<<1) /* Use DMA for this req. */ 179 + #define SDHCI_USE_ADMA (1<<1) /* Host is ADMA capable */ 180 + #define SDHCI_REQ_USE_DMA (1<<2) /* Use DMA for this req. */ 181 + #define SDHCI_DEVICE_DEAD (1<<3) /* Device unresponsive */ 182 + 183 + unsigned int version; /* SDHCI spec. version */ 225 184 226 185 unsigned int max_clk; /* Max possible freq (MHz) */ 227 186 unsigned int timeout_clk; /* Timeout freq (KHz) */ ··· 243 194 int offset; /* Offset into current sg */ 244 195 int remain; /* Bytes left in current */ 245 196 246 - int irq; /* Device IRQ */ 247 - int bar; /* PCI BAR index */ 248 - unsigned long addr; /* Bus address */ 249 - void __iomem * ioaddr; /* Mapped address */ 197 + int sg_count; /* Mapped sg entries */ 198 + 199 + u8 *adma_desc; /* ADMA descriptor table */ 200 + u8 *align_buffer; /* Bounce buffer */ 201 + 202 + dma_addr_t adma_addr; /* Mapped ADMA descr. table */ 203 + dma_addr_t align_addr; /* Mapped bounce buffer */ 250 204 251 205 struct tasklet_struct card_tasklet; /* Tasklet structures */ 252 206 struct tasklet_struct finish_tasklet; 253 207 254 208 struct timer_list timer; /* Timer for timeouts */ 209 + 210 + unsigned long private[0] ____cacheline_aligned; 255 211 }; 256 212 257 - struct sdhci_chip { 258 - struct pci_dev *pdev; 259 213 260 - unsigned long quirks; 261 - 262 - int num_slots; /* Slots on controller */ 263 - struct sdhci_host *hosts[0]; /* Pointers to hosts */ 214 + struct sdhci_ops { 215 + int (*enable_dma)(struct sdhci_host *host); 264 216 }; 217 + 218 + 219 + extern struct sdhci_host *sdhci_alloc_host(struct device *dev, 220 + size_t priv_size); 221 + extern void sdhci_free_host(struct sdhci_host *host); 222 + 223 + static inline void *sdhci_priv(struct sdhci_host *host) 224 + { 225 + return (void *)host->private; 226 + } 227 + 228 + extern int sdhci_add_host(struct sdhci_host *host); 229 + extern void sdhci_remove_host(struct sdhci_host *host, int dead); 230 + 231 + #ifdef CONFIG_PM 232 + extern int sdhci_suspend_host(struct sdhci_host *host, pm_message_t state); 233 + extern int sdhci_resume_host(struct sdhci_host *host); 234 + #endif
+575
drivers/mmc/host/sdricoh_cs.c
··· 1 + /* 2 + * sdricoh_cs.c - driver for Ricoh Secure Digital Card Readers that can be 3 + * found on some Ricoh RL5c476 II cardbus bridge 4 + * 5 + * Copyright (C) 2006 - 2008 Sascha Sommer <saschasommer@freenet.de> 6 + * 7 + * This program is free software; you can redistribute it and/or modify 8 + * it under the terms of the GNU General Public License as published by 9 + * the Free Software Foundation; either version 2 of the License, or 10 + * (at your option) any later version. 11 + * 12 + * This program is distributed in the hope that it will be useful, 13 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 + * GNU General Public License for more details. 16 + * 17 + * You should have received a copy of the GNU General Public License 18 + * along with this program; if not, write to the Free Software 19 + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 20 + * 21 + */ 22 + 23 + /* 24 + #define DEBUG 25 + #define VERBOSE_DEBUG 26 + */ 27 + #include <linux/delay.h> 28 + #include <linux/highmem.h> 29 + #include <linux/pci.h> 30 + #include <linux/ioport.h> 31 + #include <linux/scatterlist.h> 32 + #include <linux/version.h> 33 + 34 + #include <pcmcia/cs_types.h> 35 + #include <pcmcia/cs.h> 36 + #include <pcmcia/cistpl.h> 37 + #include <pcmcia/ds.h> 38 + #include <linux/io.h> 39 + 40 + #include <linux/mmc/host.h> 41 + 42 + #define DRIVER_NAME "sdricoh_cs" 43 + 44 + static unsigned int switchlocked; 45 + 46 + /* i/o region */ 47 + #define SDRICOH_PCI_REGION 0 48 + #define SDRICOH_PCI_REGION_SIZE 0x1000 49 + 50 + /* registers */ 51 + #define R104_VERSION 0x104 52 + #define R200_CMD 0x200 53 + #define R204_CMD_ARG 0x204 54 + #define R208_DATAIO 0x208 55 + #define R20C_RESP 0x20c 56 + #define R21C_STATUS 0x21c 57 + #define R2E0_INIT 0x2e0 58 + #define R2E4_STATUS_RESP 0x2e4 59 + #define R2F0_RESET 0x2f0 60 + #define R224_MODE 0x224 61 + #define R226_BLOCKSIZE 0x226 62 + #define R228_POWER 0x228 63 + #define R230_DATA 0x230 64 + 65 + /* flags for the R21C_STATUS register */ 66 + #define STATUS_CMD_FINISHED 0x00000001 67 + #define STATUS_TRANSFER_FINISHED 0x00000004 68 + #define STATUS_CARD_INSERTED 0x00000020 69 + #define STATUS_CARD_LOCKED 0x00000080 70 + #define STATUS_CMD_TIMEOUT 0x00400000 71 + #define STATUS_READY_TO_READ 0x01000000 72 + #define STATUS_READY_TO_WRITE 0x02000000 73 + #define STATUS_BUSY 0x40000000 74 + 75 + /* timeouts */ 76 + #define INIT_TIMEOUT 100 77 + #define CMD_TIMEOUT 100000 78 + #define TRANSFER_TIMEOUT 100000 79 + #define BUSY_TIMEOUT 32767 80 + 81 + /* list of supported pcmcia devices */ 82 + static struct pcmcia_device_id pcmcia_ids[] = { 83 + /* vendor and device strings followed by their crc32 hashes */ 84 + PCMCIA_DEVICE_PROD_ID12("RICOH", "Bay1Controller", 0xd9f522ed, 85 + 0xc3901202), 86 + PCMCIA_DEVICE_NULL, 87 + }; 88 + 89 + MODULE_DEVICE_TABLE(pcmcia, pcmcia_ids); 90 + 91 + /* mmc privdata */ 92 + struct sdricoh_host { 93 + struct device *dev; 94 + struct mmc_host *mmc; /* MMC structure */ 95 + unsigned char __iomem *iobase; 96 + struct pci_dev *pci_dev; 97 + int app_cmd; 98 + }; 99 + 100 + /***************** register i/o helper functions *****************************/ 101 + 102 + static inline unsigned int sdricoh_readl(struct sdricoh_host *host, 103 + unsigned int reg) 104 + { 105 + unsigned int value = readl(host->iobase + reg); 106 + dev_vdbg(host->dev, "rl %x 0x%x\n", reg, value); 107 + return value; 108 + } 109 + 110 + static inline void sdricoh_writel(struct sdricoh_host *host, unsigned int reg, 111 + unsigned int value) 112 + { 113 + writel(value, host->iobase + reg); 114 + dev_vdbg(host->dev, "wl %x 0x%x\n", reg, value); 115 + 116 + } 117 + 118 + static inline unsigned int sdricoh_readw(struct sdricoh_host *host, 119 + unsigned int reg) 120 + { 121 + unsigned int value = readw(host->iobase + reg); 122 + dev_vdbg(host->dev, "rb %x 0x%x\n", reg, value); 123 + return value; 124 + } 125 + 126 + static inline void sdricoh_writew(struct sdricoh_host *host, unsigned int reg, 127 + unsigned short value) 128 + { 129 + writew(value, host->iobase + reg); 130 + dev_vdbg(host->dev, "ww %x 0x%x\n", reg, value); 131 + } 132 + 133 + static inline unsigned int sdricoh_readb(struct sdricoh_host *host, 134 + unsigned int reg) 135 + { 136 + unsigned int value = readb(host->iobase + reg); 137 + dev_vdbg(host->dev, "rb %x 0x%x\n", reg, value); 138 + return value; 139 + } 140 + 141 + static int sdricoh_query_status(struct sdricoh_host *host, unsigned int wanted, 142 + unsigned int timeout){ 143 + unsigned int loop; 144 + unsigned int status = 0; 145 + struct device *dev = host->dev; 146 + for (loop = 0; loop < timeout; loop++) { 147 + status = sdricoh_readl(host, R21C_STATUS); 148 + sdricoh_writel(host, R2E4_STATUS_RESP, status); 149 + if (status & wanted) 150 + break; 151 + } 152 + 153 + if (loop == timeout) { 154 + dev_err(dev, "query_status: timeout waiting for %x\n", wanted); 155 + return -ETIMEDOUT; 156 + } 157 + 158 + /* do not do this check in the loop as some commands fail otherwise */ 159 + if (status & 0x7F0000) { 160 + dev_err(dev, "waiting for status bit %x failed\n", wanted); 161 + return -EINVAL; 162 + } 163 + return 0; 164 + 165 + } 166 + 167 + static int sdricoh_mmc_cmd(struct sdricoh_host *host, unsigned char opcode, 168 + unsigned int arg) 169 + { 170 + unsigned int status; 171 + int result = 0; 172 + unsigned int loop = 0; 173 + /* reset status reg? */ 174 + sdricoh_writel(host, R21C_STATUS, 0x18); 175 + /* fill parameters */ 176 + sdricoh_writel(host, R204_CMD_ARG, arg); 177 + sdricoh_writel(host, R200_CMD, (0x10000 << 8) | opcode); 178 + /* wait for command completion */ 179 + if (opcode) { 180 + for (loop = 0; loop < CMD_TIMEOUT; loop++) { 181 + status = sdricoh_readl(host, R21C_STATUS); 182 + sdricoh_writel(host, R2E4_STATUS_RESP, status); 183 + if (status & STATUS_CMD_FINISHED) 184 + break; 185 + } 186 + /* don't check for timeout in the loop it is not always 187 + reset correctly 188 + */ 189 + if (loop == CMD_TIMEOUT || status & STATUS_CMD_TIMEOUT) 190 + result = -ETIMEDOUT; 191 + 192 + } 193 + 194 + return result; 195 + 196 + } 197 + 198 + static int sdricoh_reset(struct sdricoh_host *host) 199 + { 200 + dev_dbg(host->dev, "reset\n"); 201 + sdricoh_writel(host, R2F0_RESET, 0x10001); 202 + sdricoh_writel(host, R2E0_INIT, 0x10000); 203 + if (sdricoh_readl(host, R2E0_INIT) != 0x10000) 204 + return -EIO; 205 + sdricoh_writel(host, R2E0_INIT, 0x10007); 206 + 207 + sdricoh_writel(host, R224_MODE, 0x2000000); 208 + sdricoh_writel(host, R228_POWER, 0xe0); 209 + 210 + 211 + /* status register ? */ 212 + sdricoh_writel(host, R21C_STATUS, 0x18); 213 + 214 + return 0; 215 + } 216 + 217 + static int sdricoh_blockio(struct sdricoh_host *host, int read, 218 + u8 *buf, int len) 219 + { 220 + int size; 221 + u32 data = 0; 222 + /* wait until the data is available */ 223 + if (read) { 224 + if (sdricoh_query_status(host, STATUS_READY_TO_READ, 225 + TRANSFER_TIMEOUT)) 226 + return -ETIMEDOUT; 227 + sdricoh_writel(host, R21C_STATUS, 0x18); 228 + /* read data */ 229 + while (len) { 230 + data = sdricoh_readl(host, R230_DATA); 231 + size = min(len, 4); 232 + len -= size; 233 + while (size) { 234 + *buf = data & 0xFF; 235 + buf++; 236 + data >>= 8; 237 + size--; 238 + } 239 + } 240 + } else { 241 + if (sdricoh_query_status(host, STATUS_READY_TO_WRITE, 242 + TRANSFER_TIMEOUT)) 243 + return -ETIMEDOUT; 244 + sdricoh_writel(host, R21C_STATUS, 0x18); 245 + /* write data */ 246 + while (len) { 247 + size = min(len, 4); 248 + len -= size; 249 + while (size) { 250 + data >>= 8; 251 + data |= (u32)*buf << 24; 252 + buf++; 253 + size--; 254 + } 255 + sdricoh_writel(host, R230_DATA, data); 256 + } 257 + } 258 + 259 + if (len) 260 + return -EIO; 261 + 262 + return 0; 263 + } 264 + 265 + static void sdricoh_request(struct mmc_host *mmc, struct mmc_request *mrq) 266 + { 267 + struct sdricoh_host *host = mmc_priv(mmc); 268 + struct mmc_command *cmd = mrq->cmd; 269 + struct mmc_data *data = cmd->data; 270 + struct device *dev = host->dev; 271 + unsigned char opcode = cmd->opcode; 272 + int i; 273 + 274 + dev_dbg(dev, "=============================\n"); 275 + dev_dbg(dev, "sdricoh_request opcode=%i\n", opcode); 276 + 277 + sdricoh_writel(host, R21C_STATUS, 0x18); 278 + 279 + /* MMC_APP_CMDs need some special handling */ 280 + if (host->app_cmd) { 281 + opcode |= 64; 282 + host->app_cmd = 0; 283 + } else if (opcode == 55) 284 + host->app_cmd = 1; 285 + 286 + /* read/write commands seem to require this */ 287 + if (data) { 288 + sdricoh_writew(host, R226_BLOCKSIZE, data->blksz); 289 + sdricoh_writel(host, R208_DATAIO, 0); 290 + } 291 + 292 + cmd->error = sdricoh_mmc_cmd(host, opcode, cmd->arg); 293 + 294 + /* read response buffer */ 295 + if (cmd->flags & MMC_RSP_PRESENT) { 296 + if (cmd->flags & MMC_RSP_136) { 297 + /* CRC is stripped so we need to do some shifting. */ 298 + for (i = 0; i < 4; i++) { 299 + cmd->resp[i] = 300 + sdricoh_readl(host, 301 + R20C_RESP + (3 - i) * 4) << 8; 302 + if (i != 3) 303 + cmd->resp[i] |= 304 + sdricoh_readb(host, R20C_RESP + 305 + (3 - i) * 4 - 1); 306 + } 307 + } else 308 + cmd->resp[0] = sdricoh_readl(host, R20C_RESP); 309 + } 310 + 311 + /* transfer data */ 312 + if (data && cmd->error == 0) { 313 + dev_dbg(dev, "transfer: blksz %i blocks %i sg_len %i " 314 + "sg length %i\n", data->blksz, data->blocks, 315 + data->sg_len, data->sg->length); 316 + 317 + /* enter data reading mode */ 318 + sdricoh_writel(host, R21C_STATUS, 0x837f031e); 319 + for (i = 0; i < data->blocks; i++) { 320 + size_t len = data->blksz; 321 + u8 *buf; 322 + struct page *page; 323 + int result; 324 + page = sg_page(data->sg); 325 + 326 + buf = kmap(page) + data->sg->offset + (len * i); 327 + result = 328 + sdricoh_blockio(host, 329 + data->flags & MMC_DATA_READ, buf, len); 330 + kunmap(page); 331 + flush_dcache_page(page); 332 + if (result) { 333 + dev_err(dev, "sdricoh_request: cmd %i " 334 + "block transfer failed\n", cmd->opcode); 335 + cmd->error = result; 336 + break; 337 + } else 338 + data->bytes_xfered += len; 339 + } 340 + 341 + sdricoh_writel(host, R208_DATAIO, 1); 342 + 343 + if (sdricoh_query_status(host, STATUS_TRANSFER_FINISHED, 344 + TRANSFER_TIMEOUT)) { 345 + dev_err(dev, "sdricoh_request: transfer end error\n"); 346 + cmd->error = -EINVAL; 347 + } 348 + } 349 + /* FIXME check busy flag */ 350 + 351 + mmc_request_done(mmc, mrq); 352 + dev_dbg(dev, "=============================\n"); 353 + } 354 + 355 + static void sdricoh_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) 356 + { 357 + struct sdricoh_host *host = mmc_priv(mmc); 358 + dev_dbg(host->dev, "set_ios\n"); 359 + 360 + if (ios->power_mode == MMC_POWER_ON) { 361 + sdricoh_writel(host, R228_POWER, 0xc0e0); 362 + 363 + if (ios->bus_width == MMC_BUS_WIDTH_4) { 364 + sdricoh_writel(host, R224_MODE, 0x2000300); 365 + sdricoh_writel(host, R228_POWER, 0x40e0); 366 + } else { 367 + sdricoh_writel(host, R224_MODE, 0x2000340); 368 + } 369 + 370 + } else if (ios->power_mode == MMC_POWER_UP) { 371 + sdricoh_writel(host, R224_MODE, 0x2000320); 372 + sdricoh_writel(host, R228_POWER, 0xe0); 373 + } 374 + } 375 + 376 + static int sdricoh_get_ro(struct mmc_host *mmc) 377 + { 378 + struct sdricoh_host *host = mmc_priv(mmc); 379 + unsigned int status; 380 + 381 + status = sdricoh_readl(host, R21C_STATUS); 382 + sdricoh_writel(host, R2E4_STATUS_RESP, status); 383 + 384 + /* some notebooks seem to have the locked flag switched */ 385 + if (switchlocked) 386 + return !(status & STATUS_CARD_LOCKED); 387 + 388 + return (status & STATUS_CARD_LOCKED); 389 + } 390 + 391 + static struct mmc_host_ops sdricoh_ops = { 392 + .request = sdricoh_request, 393 + .set_ios = sdricoh_set_ios, 394 + .get_ro = sdricoh_get_ro, 395 + }; 396 + 397 + /* initialize the control and register it to the mmc framework */ 398 + static int sdricoh_init_mmc(struct pci_dev *pci_dev, 399 + struct pcmcia_device *pcmcia_dev) 400 + { 401 + int result = 0; 402 + void __iomem *iobase = NULL; 403 + struct mmc_host *mmc = NULL; 404 + struct sdricoh_host *host = NULL; 405 + struct device *dev = &pcmcia_dev->dev; 406 + /* map iomem */ 407 + if (pci_resource_len(pci_dev, SDRICOH_PCI_REGION) != 408 + SDRICOH_PCI_REGION_SIZE) { 409 + dev_dbg(dev, "unexpected pci resource len\n"); 410 + return -ENODEV; 411 + } 412 + iobase = 413 + pci_iomap(pci_dev, SDRICOH_PCI_REGION, SDRICOH_PCI_REGION_SIZE); 414 + if (!iobase) { 415 + dev_err(dev, "unable to map iobase\n"); 416 + return -ENODEV; 417 + } 418 + /* check version? */ 419 + if (readl(iobase + R104_VERSION) != 0x4000) { 420 + dev_dbg(dev, "no supported mmc controller found\n"); 421 + result = -ENODEV; 422 + goto err; 423 + } 424 + /* allocate privdata */ 425 + mmc = pcmcia_dev->priv = 426 + mmc_alloc_host(sizeof(struct sdricoh_host), &pcmcia_dev->dev); 427 + if (!mmc) { 428 + dev_err(dev, "mmc_alloc_host failed\n"); 429 + result = -ENOMEM; 430 + goto err; 431 + } 432 + host = mmc_priv(mmc); 433 + 434 + host->iobase = iobase; 435 + host->dev = dev; 436 + host->pci_dev = pci_dev; 437 + 438 + mmc->ops = &sdricoh_ops; 439 + 440 + /* FIXME: frequency and voltage handling is done by the controller 441 + */ 442 + mmc->f_min = 450000; 443 + mmc->f_max = 24000000; 444 + mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; 445 + mmc->caps |= MMC_CAP_4_BIT_DATA; 446 + 447 + mmc->max_seg_size = 1024 * 512; 448 + mmc->max_blk_size = 512; 449 + 450 + /* reset the controler */ 451 + if (sdricoh_reset(host)) { 452 + dev_dbg(dev, "could not reset\n"); 453 + result = -EIO; 454 + goto err; 455 + 456 + } 457 + 458 + result = mmc_add_host(mmc); 459 + 460 + if (!result) { 461 + dev_dbg(dev, "mmc host registered\n"); 462 + return 0; 463 + } 464 + 465 + err: 466 + if (iobase) 467 + iounmap(iobase); 468 + if (mmc) 469 + mmc_free_host(mmc); 470 + 471 + return result; 472 + } 473 + 474 + /* search for supported mmc controllers */ 475 + static int sdricoh_pcmcia_probe(struct pcmcia_device *pcmcia_dev) 476 + { 477 + struct pci_dev *pci_dev = NULL; 478 + 479 + dev_info(&pcmcia_dev->dev, "Searching MMC controller for pcmcia device" 480 + " %s %s ...\n", pcmcia_dev->prod_id[0], pcmcia_dev->prod_id[1]); 481 + 482 + /* search pci cardbus bridge that contains the mmc controler */ 483 + /* the io region is already claimed by yenta_socket... */ 484 + while ((pci_dev = 485 + pci_get_device(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_RL5C476, 486 + pci_dev))) { 487 + /* try to init the device */ 488 + if (!sdricoh_init_mmc(pci_dev, pcmcia_dev)) { 489 + dev_info(&pcmcia_dev->dev, "MMC controller found\n"); 490 + return 0; 491 + } 492 + 493 + } 494 + dev_err(&pcmcia_dev->dev, "No MMC controller was found.\n"); 495 + return -ENODEV; 496 + } 497 + 498 + static void sdricoh_pcmcia_detach(struct pcmcia_device *link) 499 + { 500 + struct mmc_host *mmc = link->priv; 501 + 502 + dev_dbg(&link->dev, "detach\n"); 503 + 504 + /* remove mmc host */ 505 + if (mmc) { 506 + struct sdricoh_host *host = mmc_priv(mmc); 507 + mmc_remove_host(mmc); 508 + pci_iounmap(host->pci_dev, host->iobase); 509 + pci_dev_put(host->pci_dev); 510 + mmc_free_host(mmc); 511 + } 512 + pcmcia_disable_device(link); 513 + 514 + } 515 + 516 + #ifdef CONFIG_PM 517 + static int sdricoh_pcmcia_suspend(struct pcmcia_device *link) 518 + { 519 + struct mmc_host *mmc = link->priv; 520 + dev_dbg(&link->dev, "suspend\n"); 521 + mmc_suspend_host(mmc, PMSG_SUSPEND); 522 + return 0; 523 + } 524 + 525 + static int sdricoh_pcmcia_resume(struct pcmcia_device *link) 526 + { 527 + struct mmc_host *mmc = link->priv; 528 + dev_dbg(&link->dev, "resume\n"); 529 + sdricoh_reset(mmc_priv(mmc)); 530 + mmc_resume_host(mmc); 531 + return 0; 532 + } 533 + #else 534 + #define sdricoh_pcmcia_suspend NULL 535 + #define sdricoh_pcmcia_resume NULL 536 + #endif 537 + 538 + static struct pcmcia_driver sdricoh_driver = { 539 + .drv = { 540 + .name = DRIVER_NAME, 541 + }, 542 + .probe = sdricoh_pcmcia_probe, 543 + .remove = sdricoh_pcmcia_detach, 544 + .id_table = pcmcia_ids, 545 + .suspend = sdricoh_pcmcia_suspend, 546 + .resume = sdricoh_pcmcia_resume, 547 + }; 548 + 549 + /*****************************************************************************\ 550 + * * 551 + * Driver init/exit * 552 + * * 553 + \*****************************************************************************/ 554 + 555 + static int __init sdricoh_drv_init(void) 556 + { 557 + return pcmcia_register_driver(&sdricoh_driver); 558 + } 559 + 560 + static void __exit sdricoh_drv_exit(void) 561 + { 562 + pcmcia_unregister_driver(&sdricoh_driver); 563 + } 564 + 565 + module_init(sdricoh_drv_init); 566 + module_exit(sdricoh_drv_exit); 567 + 568 + module_param(switchlocked, uint, 0444); 569 + 570 + MODULE_AUTHOR("Sascha Sommer <saschasommer@freenet.de>"); 571 + MODULE_DESCRIPTION("Ricoh PCMCIA Secure Digital Interface driver"); 572 + MODULE_LICENSE("GPL"); 573 + 574 + MODULE_PARM_DESC(switchlocked, "Switch the cards locked status." 575 + "Use this when unlocked cards are shown readonly (default 0)");
+1 -1
drivers/mmc/host/tifm_sd.c
··· 973 973 974 974 mmc->ops = &tifm_sd_ops; 975 975 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; 976 - mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_MULTIWRITE; 976 + mmc->caps = MMC_CAP_4_BIT_DATA; 977 977 mmc->f_min = 20000000 / 60; 978 978 mmc->f_max = 24000000; 979 979
+19 -19
drivers/mmc/host/wbsd.c
··· 68 68 69 69 static const int valid_ids[] = { 70 70 0x7112, 71 - }; 71 + }; 72 72 73 73 #ifdef CONFIG_PNP 74 - static unsigned int nopnp = 0; 74 + static unsigned int param_nopnp = 0; 75 75 #else 76 - static const unsigned int nopnp = 1; 76 + static const unsigned int param_nopnp = 1; 77 77 #endif 78 - static unsigned int io = 0x248; 79 - static unsigned int irq = 6; 80 - static int dma = 2; 78 + static unsigned int param_io = 0x248; 79 + static unsigned int param_irq = 6; 80 + static int param_dma = 2; 81 81 82 82 /* 83 83 * Basic functions ··· 939 939 940 940 spin_unlock_bh(&host->lock); 941 941 942 - return csr & WBSD_WRPT; 942 + return !!(csr & WBSD_WRPT); 943 943 } 944 944 945 945 static const struct mmc_host_ops wbsd_ops = { ··· 1219 1219 mmc->f_min = 375000; 1220 1220 mmc->f_max = 24000000; 1221 1221 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; 1222 - mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_MULTIWRITE; 1222 + mmc->caps = MMC_CAP_4_BIT_DATA; 1223 1223 1224 1224 spin_lock_init(&host->lock); 1225 1225 ··· 1420 1420 1421 1421 dma_unmap_single(mmc_dev(host->mmc), host->dma_addr, 1422 1422 WBSD_DMA_SIZE, DMA_BIDIRECTIONAL); 1423 - host->dma_addr = (dma_addr_t)NULL; 1423 + host->dma_addr = 0; 1424 1424 1425 1425 kfree(host->dma_buffer); 1426 1426 host->dma_buffer = NULL; ··· 1445 1445 1446 1446 host->dma = -1; 1447 1447 host->dma_buffer = NULL; 1448 - host->dma_addr = (dma_addr_t)NULL; 1448 + host->dma_addr = 0; 1449 1449 } 1450 1450 1451 1451 /* ··· 1765 1765 static int __devinit wbsd_probe(struct platform_device *dev) 1766 1766 { 1767 1767 /* Use the module parameters for resources */ 1768 - return wbsd_init(&dev->dev, io, irq, dma, 0); 1768 + return wbsd_init(&dev->dev, param_io, param_irq, param_dma, 0); 1769 1769 } 1770 1770 1771 1771 static int __devexit wbsd_remove(struct platform_device *dev) ··· 1979 1979 1980 1980 #ifdef CONFIG_PNP 1981 1981 1982 - if (!nopnp) { 1982 + if (!param_nopnp) { 1983 1983 result = pnp_register_driver(&wbsd_pnp_driver); 1984 1984 if (result < 0) 1985 1985 return result; 1986 1986 } 1987 1987 #endif /* CONFIG_PNP */ 1988 1988 1989 - if (nopnp) { 1989 + if (param_nopnp) { 1990 1990 result = platform_driver_register(&wbsd_driver); 1991 1991 if (result < 0) 1992 1992 return result; ··· 2012 2012 { 2013 2013 #ifdef CONFIG_PNP 2014 2014 2015 - if (!nopnp) 2015 + if (!param_nopnp) 2016 2016 pnp_unregister_driver(&wbsd_pnp_driver); 2017 2017 2018 2018 #endif /* CONFIG_PNP */ 2019 2019 2020 - if (nopnp) { 2020 + if (param_nopnp) { 2021 2021 platform_device_unregister(wbsd_device); 2022 2022 2023 2023 platform_driver_unregister(&wbsd_driver); ··· 2029 2029 module_init(wbsd_drv_init); 2030 2030 module_exit(wbsd_drv_exit); 2031 2031 #ifdef CONFIG_PNP 2032 - module_param(nopnp, uint, 0444); 2032 + module_param_named(nopnp, param_nopnp, uint, 0444); 2033 2033 #endif 2034 - module_param(io, uint, 0444); 2035 - module_param(irq, uint, 0444); 2036 - module_param(dma, int, 0444); 2034 + module_param_named(io, param_io, uint, 0444); 2035 + module_param_named(irq, param_irq, uint, 0444); 2036 + module_param_named(dma, param_dma, int, 0444); 2037 2037 2038 2038 MODULE_LICENSE("GPL"); 2039 2039 MODULE_AUTHOR("Pierre Ossman <drzeus@drzeus.cx>");
+7 -13
drivers/net/wireless/libertas/if_sdio.c
··· 1 1 /* 2 2 * linux/drivers/net/wireless/libertas/if_sdio.c 3 3 * 4 - * Copyright 2007 Pierre Ossman 4 + * Copyright 2007-2008 Pierre Ossman 5 5 * 6 6 * Inspired by if_cs.c, Copyright 2007 Holger Schurig 7 7 * ··· 266 266 267 267 /* 268 268 * The transfer must be in one transaction or the firmware 269 - * goes suicidal. 269 + * goes suicidal. There's no way to guarantee that for all 270 + * controllers, but we can at least try. 270 271 */ 271 - chunk = size; 272 - if ((chunk > card->func->cur_blksize) || (chunk > 512)) { 273 - chunk = (chunk + card->func->cur_blksize - 1) / 274 - card->func->cur_blksize * card->func->cur_blksize; 275 - } 272 + chunk = sdio_align_size(card->func, size); 276 273 277 274 ret = sdio_readsb(card->func, card->buffer, card->ioport, chunk); 278 275 if (ret) ··· 693 696 694 697 /* 695 698 * The transfer must be in one transaction or the firmware 696 - * goes suicidal. 699 + * goes suicidal. There's no way to guarantee that for all 700 + * controllers, but we can at least try. 697 701 */ 698 - size = nb + 4; 699 - if ((size > card->func->cur_blksize) || (size > 512)) { 700 - size = (size + card->func->cur_blksize - 1) / 701 - card->func->cur_blksize * card->func->cur_blksize; 702 - } 702 + size = sdio_align_size(card->func, nb + 4); 703 703 704 704 packet = kzalloc(sizeof(struct if_sdio_packet) + size, 705 705 GFP_ATOMIC);
+4
include/asm-arm/arch-at91/at91_mci.h
··· 75 75 #define AT91_MCI_TRTYP_MULTIPLE (1 << 19) 76 76 #define AT91_MCI_TRTYP_STREAM (2 << 19) 77 77 78 + #define AT91_MCI_BLKR 0x18 /* Block Register */ 79 + #define AT91_MCI_BLKR_BCNT(n) ((0xffff & (n)) << 0) /* Block count */ 80 + #define AT91_MCI_BLKR_BLKLEN(n) ((0xffff & (n)) << 16) /* Block lenght */ 81 + 78 82 #define AT91_MCI_RSPR(n) (0x20 + ((n) * 4)) /* Response Registers 0-3 */ 79 83 #define AT91_MCR_RDR 0x30 /* Receive Data Register */ 80 84 #define AT91_MCR_TDR 0x34 /* Transmit Data Register */
+17 -3
include/asm-arm/arch-s3c2410/regs-sdi.h
··· 28 28 #define S3C2410_SDIDCNT (0x30) 29 29 #define S3C2410_SDIDSTA (0x34) 30 30 #define S3C2410_SDIFSTA (0x38) 31 + 31 32 #define S3C2410_SDIDATA (0x3C) 32 33 #define S3C2410_SDIIMSK (0x40) 33 34 35 + #define S3C2440_SDIDATA (0x40) 36 + #define S3C2440_SDIIMSK (0x3C) 37 + 38 + #define S3C2440_SDICON_SDRESET (1<<8) 39 + #define S3C2440_SDICON_MMCCLOCK (1<<5) 34 40 #define S3C2410_SDICON_BYTEORDER (1<<4) 35 41 #define S3C2410_SDICON_SDIOIRQ (1<<3) 36 42 #define S3C2410_SDICON_RWAITEN (1<<2) ··· 48 42 #define S3C2410_SDICMDCON_LONGRSP (1<<10) 49 43 #define S3C2410_SDICMDCON_WAITRSP (1<<9) 50 44 #define S3C2410_SDICMDCON_CMDSTART (1<<8) 51 - #define S3C2410_SDICMDCON_INDEX (0xff) 45 + #define S3C2410_SDICMDCON_SENDERHOST (1<<6) 46 + #define S3C2410_SDICMDCON_INDEX (0x3f) 52 47 53 48 #define S3C2410_SDICMDSTAT_CRCFAIL (1<<12) 54 49 #define S3C2410_SDICMDSTAT_CMDSENT (1<<11) ··· 58 51 #define S3C2410_SDICMDSTAT_XFERING (1<<8) 59 52 #define S3C2410_SDICMDSTAT_INDEX (0xff) 60 53 54 + #define S3C2440_SDIDCON_DS_BYTE (0<<22) 55 + #define S3C2440_SDIDCON_DS_HALFWORD (1<<22) 56 + #define S3C2440_SDIDCON_DS_WORD (2<<22) 61 57 #define S3C2410_SDIDCON_IRQPERIOD (1<<21) 62 58 #define S3C2410_SDIDCON_TXAFTERRESP (1<<20) 63 59 #define S3C2410_SDIDCON_RXAFTERCMD (1<<19) ··· 69 59 #define S3C2410_SDIDCON_WIDEBUS (1<<16) 70 60 #define S3C2410_SDIDCON_DMAEN (1<<15) 71 61 #define S3C2410_SDIDCON_STOP (1<<14) 62 + #define S3C2440_SDIDCON_DATSTART (1<<14) 72 63 #define S3C2410_SDIDCON_DATMODE (3<<12) 73 64 #define S3C2410_SDIDCON_BLKNUM (0x7ff) 74 65 ··· 79 68 #define S3C2410_SDIDCON_XFER_RXSTART (2<<12) 80 69 #define S3C2410_SDIDCON_XFER_TXSTART (3<<12) 81 70 71 + #define S3C2410_SDIDCON_BLKNUM_MASK (0xFFF) 82 72 #define S3C2410_SDIDCNT_BLKNUM_SHIFT (12) 83 73 84 74 #define S3C2410_SDIDSTA_RDYWAITREQ (1<<10) ··· 94 82 #define S3C2410_SDIDSTA_TXDATAON (1<<1) 95 83 #define S3C2410_SDIDSTA_RXDATAON (1<<0) 96 84 85 + #define S3C2440_SDIFSTA_FIFORESET (1<<16) 86 + #define S3C2440_SDIFSTA_FIFOFAIL (3<<14) /* 3 is correct (2 bits) */ 97 87 #define S3C2410_SDIFSTA_TFDET (1<<13) 98 88 #define S3C2410_SDIFSTA_RFDET (1<<12) 99 - #define S3C2410_SDIFSTA_TXHALF (1<<11) 100 - #define S3C2410_SDIFSTA_TXEMPTY (1<<10) 89 + #define S3C2410_SDIFSTA_TFHALF (1<<11) 90 + #define S3C2410_SDIFSTA_TFEMPTY (1<<10) 101 91 #define S3C2410_SDIFSTA_RFLAST (1<<9) 102 92 #define S3C2410_SDIFSTA_RFFULL (1<<8) 103 93 #define S3C2410_SDIFSTA_RFHALF (1<<7)
+15
include/asm-arm/plat-s3c24xx/mci.h
··· 1 + #ifndef _ARCH_MCI_H 2 + #define _ARCH_MCI_H 3 + 4 + struct s3c24xx_mci_pdata { 5 + unsigned int wprotect_invert : 1; 6 + unsigned int detect_invert : 1; /* set => detect active high. */ 7 + 8 + unsigned int gpio_detect; 9 + unsigned int gpio_wprotect; 10 + unsigned long ocr_avail; 11 + void (*set_power)(unsigned char power_mode, 12 + unsigned short vdd); 13 + }; 14 + 15 + #endif /* _ARCH_NCI_H */
+5 -1
include/asm-avr32/arch-at32ap/board.h
··· 77 77 struct platform_device *at32_add_device_twi(unsigned int id, 78 78 struct i2c_board_info *b, 79 79 unsigned int n); 80 - struct platform_device *at32_add_device_mci(unsigned int id); 80 + 81 + struct mci_platform_data; 82 + struct platform_device * 83 + at32_add_device_mci(unsigned int id, struct mci_platform_data *data); 84 + 81 85 struct platform_device *at32_add_device_ac97c(unsigned int id); 82 86 struct platform_device *at32_add_device_abdac(unsigned int id); 83 87 struct platform_device *at32_add_device_psif(unsigned int id);
+9
include/asm-avr32/atmel-mci.h
··· 1 + #ifndef __ASM_AVR32_ATMEL_MCI_H 2 + #define __ASM_AVR32_ATMEL_MCI_H 3 + 4 + struct mci_platform_data { 5 + int detect_pin; 6 + int wp_pin; 7 + }; 8 + 9 + #endif /* __ASM_AVR32_ATMEL_MCI_H */
+8 -8
include/asm-mips/mach-au1x00/au1100_mmc.h
··· 38 38 #ifndef __ASM_AU1100_MMC_H 39 39 #define __ASM_AU1100_MMC_H 40 40 41 + #include <linux/leds.h> 41 42 42 - #define NUM_AU1100_MMC_CONTROLLERS 2 43 - 44 - #if defined(CONFIG_SOC_AU1100) 45 - #define AU1100_SD_IRQ AU1100_SD_INT 46 - #elif defined(CONFIG_SOC_AU1200) 47 - #define AU1100_SD_IRQ AU1200_SD_INT 48 - #endif 49 - 43 + struct au1xmmc_platform_data { 44 + int(*cd_setup)(void *mmc_host, int on); 45 + int(*card_inserted)(void *mmc_host); 46 + int(*card_readonly)(void *mmc_host); 47 + void(*set_power)(void *mmc_host, int state); 48 + struct led_classdev *led; 49 + }; 50 50 51 51 #define SD0_BASE 0xB0600000 52 52 #define SD1_BASE 0xB0680000
+1
include/linux/mmc/core.h
··· 135 135 struct mmc_command *, int); 136 136 137 137 extern void mmc_set_data_timeout(struct mmc_data *, const struct mmc_card *); 138 + extern unsigned int mmc_align_data_size(struct mmc_card *, unsigned int); 138 139 139 140 extern int __mmc_claim_host(struct mmc_host *host, atomic_t *abort); 140 141 extern void mmc_release_host(struct mmc_host *host);
+27 -5
include/linux/mmc/host.h
··· 51 51 52 52 struct mmc_host_ops { 53 53 void (*request)(struct mmc_host *host, struct mmc_request *req); 54 + /* 55 + * Avoid calling these three functions too often or in a "fast path", 56 + * since underlaying controller might implement them in an expensive 57 + * and/or slow way. 58 + * 59 + * Also note that these functions might sleep, so don't call them 60 + * in the atomic contexts! 61 + * 62 + * Return values for the get_ro callback should be: 63 + * 0 for a read/write card 64 + * 1 for a read-only card 65 + * -ENOSYS when not supported (equal to NULL callback) 66 + * or a negative errno value when something bad happened 67 + * 68 + * Return values for the get_ro callback should be: 69 + * 0 for a absent card 70 + * 1 for a present card 71 + * -ENOSYS when not supported (equal to NULL callback) 72 + * or a negative errno value when something bad happened 73 + */ 54 74 void (*set_ios)(struct mmc_host *host, struct mmc_ios *ios); 55 75 int (*get_ro)(struct mmc_host *host); 76 + int (*get_cd)(struct mmc_host *host); 77 + 56 78 void (*enable_sdio_irq)(struct mmc_host *host, int enable); 57 79 }; 58 80 ··· 111 89 unsigned long caps; /* Host capabilities */ 112 90 113 91 #define MMC_CAP_4_BIT_DATA (1 << 0) /* Can the host do 4 bit transfers */ 114 - #define MMC_CAP_MULTIWRITE (1 << 1) /* Can accurately report bytes sent to card on error */ 115 - #define MMC_CAP_MMC_HIGHSPEED (1 << 2) /* Can do MMC high-speed timing */ 116 - #define MMC_CAP_SD_HIGHSPEED (1 << 3) /* Can do SD high-speed timing */ 117 - #define MMC_CAP_SDIO_IRQ (1 << 4) /* Can signal pending SDIO IRQs */ 118 - #define MMC_CAP_SPI (1 << 5) /* Talks only SPI protocols */ 92 + #define MMC_CAP_MMC_HIGHSPEED (1 << 1) /* Can do MMC high-speed timing */ 93 + #define MMC_CAP_SD_HIGHSPEED (1 << 2) /* Can do SD high-speed timing */ 94 + #define MMC_CAP_SDIO_IRQ (1 << 3) /* Can signal pending SDIO IRQs */ 95 + #define MMC_CAP_SPI (1 << 4) /* Talks only SPI protocols */ 96 + #define MMC_CAP_NEEDS_POLL (1 << 5) /* Needs polling for card-detection */ 119 97 120 98 /* host specific block data */ 121 99 unsigned int max_seg_size; /* see blk_queue_max_segment_size */
-1
include/linux/mmc/mmc.h
··· 16 16 * Based strongly on code by: 17 17 * 18 18 * Author: Yong-iL Joh <tolkien@mizi.com> 19 - * Date : $Date: 2002/06/18 12:37:30 $ 20 19 * 21 20 * Author: Andrew Christian 22 21 * 15 May 2002
+11 -10
include/linux/mmc/sdio_func.h
··· 1 1 /* 2 2 * include/linux/mmc/sdio_func.h 3 3 * 4 - * Copyright 2007 Pierre Ossman 4 + * Copyright 2007-2008 Pierre Ossman 5 5 * 6 6 * This program is free software; you can redistribute it and/or modify 7 7 * it under the terms of the GNU General Public License as published by ··· 45 45 46 46 unsigned max_blksize; /* maximum block size */ 47 47 unsigned cur_blksize; /* current block size */ 48 + 49 + unsigned enable_timeout; /* max enable timeout in msec */ 48 50 49 51 unsigned int state; /* function state */ 50 52 #define SDIO_STATE_PRESENT (1<<0) /* present in sysfs */ ··· 122 120 extern int sdio_claim_irq(struct sdio_func *func, sdio_irq_handler_t *handler); 123 121 extern int sdio_release_irq(struct sdio_func *func); 124 122 125 - extern unsigned char sdio_readb(struct sdio_func *func, 126 - unsigned int addr, int *err_ret); 127 - extern unsigned short sdio_readw(struct sdio_func *func, 128 - unsigned int addr, int *err_ret); 129 - extern unsigned long sdio_readl(struct sdio_func *func, 130 - unsigned int addr, int *err_ret); 123 + extern unsigned int sdio_align_size(struct sdio_func *func, unsigned int sz); 124 + 125 + extern u8 sdio_readb(struct sdio_func *func, unsigned int addr, int *err_ret); 126 + extern u16 sdio_readw(struct sdio_func *func, unsigned int addr, int *err_ret); 127 + extern u32 sdio_readl(struct sdio_func *func, unsigned int addr, int *err_ret); 131 128 132 129 extern int sdio_memcpy_fromio(struct sdio_func *func, void *dst, 133 130 unsigned int addr, int count); 134 131 extern int sdio_readsb(struct sdio_func *func, void *dst, 135 132 unsigned int addr, int count); 136 133 137 - extern void sdio_writeb(struct sdio_func *func, unsigned char b, 134 + extern void sdio_writeb(struct sdio_func *func, u8 b, 138 135 unsigned int addr, int *err_ret); 139 - extern void sdio_writew(struct sdio_func *func, unsigned short b, 136 + extern void sdio_writew(struct sdio_func *func, u16 b, 140 137 unsigned int addr, int *err_ret); 141 - extern void sdio_writel(struct sdio_func *func, unsigned long b, 138 + extern void sdio_writel(struct sdio_func *func, u32 b, 142 139 unsigned int addr, int *err_ret); 143 140 144 141 extern int sdio_memcpy_toio(struct sdio_func *func, unsigned int addr,
+1
include/linux/pci_ids.h
··· 2190 2190 #define PCI_DEVICE_ID_JMICRON_JMB366 0x2366 2191 2191 #define PCI_DEVICE_ID_JMICRON_JMB368 0x2368 2192 2192 #define PCI_DEVICE_ID_JMICRON_JMB38X_SD 0x2381 2193 + #define PCI_DEVICE_ID_JMICRON_JMB38X_MMC 0x2382 2193 2194 #define PCI_DEVICE_ID_JMICRON_JMB38X_MS 0x2383 2194 2195 2195 2196 #define PCI_VENDOR_ID_KORENIX 0x1982
+9
include/linux/spi/mmc_spi.h
··· 23 23 /* sense switch on sd cards */ 24 24 int (*get_ro)(struct device *); 25 25 26 + /* 27 + * If board does not use CD interrupts, driver can optimize polling 28 + * using this function. 29 + */ 30 + int (*get_cd)(struct device *); 31 + 32 + /* Capabilities to pass into mmc core (e.g. MMC_CAP_NEEDS_POLL). */ 33 + unsigned long caps; 34 + 26 35 /* how long to debounce card detect, in msecs */ 27 36 u16 detect_delay; 28 37