at v3.2-rc5 399 lines 10 kB view raw
1/* 2 * linux/drivers/mmc/core/host.c 3 * 4 * Copyright (C) 2003 Russell King, All Rights Reserved. 5 * Copyright (C) 2007-2008 Pierre Ossman 6 * Copyright (C) 2010 Linus Walleij 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License version 2 as 10 * published by the Free Software Foundation. 11 * 12 * MMC host class device management 13 */ 14 15#include <linux/device.h> 16#include <linux/err.h> 17#include <linux/idr.h> 18#include <linux/pagemap.h> 19#include <linux/export.h> 20#include <linux/leds.h> 21#include <linux/slab.h> 22#include <linux/suspend.h> 23 24#include <linux/mmc/host.h> 25#include <linux/mmc/card.h> 26 27#include "core.h" 28#include "host.h" 29 30#define cls_dev_to_mmc_host(d) container_of(d, struct mmc_host, class_dev) 31 32static void mmc_host_classdev_release(struct device *dev) 33{ 34 struct mmc_host *host = cls_dev_to_mmc_host(dev); 35 kfree(host); 36} 37 38static struct class mmc_host_class = { 39 .name = "mmc_host", 40 .dev_release = mmc_host_classdev_release, 41}; 42 43int mmc_register_host_class(void) 44{ 45 return class_register(&mmc_host_class); 46} 47 48void mmc_unregister_host_class(void) 49{ 50 class_unregister(&mmc_host_class); 51} 52 53static DEFINE_IDR(mmc_host_idr); 54static DEFINE_SPINLOCK(mmc_host_lock); 55 56#ifdef CONFIG_MMC_CLKGATE 57 58/* 59 * Enabling clock gating will make the core call out to the host 60 * once up and once down when it performs a request or card operation 61 * intermingled in any fashion. The driver will see this through 62 * set_ios() operations with ios.clock field set to 0 to gate (disable) 63 * the block clock, and to the old frequency to enable it again. 64 */ 65static void mmc_host_clk_gate_delayed(struct mmc_host *host) 66{ 67 unsigned long tick_ns; 68 unsigned long freq = host->ios.clock; 69 unsigned long flags; 70 71 if (!freq) { 72 pr_debug("%s: frequency set to 0 in disable function, " 73 "this means the clock is already disabled.\n", 74 mmc_hostname(host)); 75 return; 76 } 77 /* 78 * New requests may have appeared while we were scheduling, 79 * then there is no reason to delay the check before 80 * clk_disable(). 81 */ 82 spin_lock_irqsave(&host->clk_lock, flags); 83 84 /* 85 * Delay n bus cycles (at least 8 from MMC spec) before attempting 86 * to disable the MCI block clock. The reference count may have 87 * gone up again after this delay due to rescheduling! 88 */ 89 if (!host->clk_requests) { 90 spin_unlock_irqrestore(&host->clk_lock, flags); 91 tick_ns = DIV_ROUND_UP(1000000000, freq); 92 ndelay(host->clk_delay * tick_ns); 93 } else { 94 /* New users appeared while waiting for this work */ 95 spin_unlock_irqrestore(&host->clk_lock, flags); 96 return; 97 } 98 mutex_lock(&host->clk_gate_mutex); 99 spin_lock_irqsave(&host->clk_lock, flags); 100 if (!host->clk_requests) { 101 spin_unlock_irqrestore(&host->clk_lock, flags); 102 /* This will set host->ios.clock to 0 */ 103 mmc_gate_clock(host); 104 spin_lock_irqsave(&host->clk_lock, flags); 105 pr_debug("%s: gated MCI clock\n", mmc_hostname(host)); 106 } 107 spin_unlock_irqrestore(&host->clk_lock, flags); 108 mutex_unlock(&host->clk_gate_mutex); 109} 110 111/* 112 * Internal work. Work to disable the clock at some later point. 113 */ 114static void mmc_host_clk_gate_work(struct work_struct *work) 115{ 116 struct mmc_host *host = container_of(work, struct mmc_host, 117 clk_gate_work); 118 119 mmc_host_clk_gate_delayed(host); 120} 121 122/** 123 * mmc_host_clk_hold - ungate hardware MCI clocks 124 * @host: host to ungate. 125 * 126 * Makes sure the host ios.clock is restored to a non-zero value 127 * past this call. Increase clock reference count and ungate clock 128 * if we're the first user. 129 */ 130void mmc_host_clk_hold(struct mmc_host *host) 131{ 132 unsigned long flags; 133 134 mutex_lock(&host->clk_gate_mutex); 135 spin_lock_irqsave(&host->clk_lock, flags); 136 if (host->clk_gated) { 137 spin_unlock_irqrestore(&host->clk_lock, flags); 138 mmc_ungate_clock(host); 139 spin_lock_irqsave(&host->clk_lock, flags); 140 pr_debug("%s: ungated MCI clock\n", mmc_hostname(host)); 141 } 142 host->clk_requests++; 143 spin_unlock_irqrestore(&host->clk_lock, flags); 144 mutex_unlock(&host->clk_gate_mutex); 145} 146 147/** 148 * mmc_host_may_gate_card - check if this card may be gated 149 * @card: card to check. 150 */ 151static bool mmc_host_may_gate_card(struct mmc_card *card) 152{ 153 /* If there is no card we may gate it */ 154 if (!card) 155 return true; 156 /* 157 * Don't gate SDIO cards! These need to be clocked at all times 158 * since they may be independent systems generating interrupts 159 * and other events. The clock requests counter from the core will 160 * go down to zero since the core does not need it, but we will not 161 * gate the clock, because there is somebody out there that may still 162 * be using it. 163 */ 164 return !(card->quirks & MMC_QUIRK_BROKEN_CLK_GATING); 165} 166 167/** 168 * mmc_host_clk_release - gate off hardware MCI clocks 169 * @host: host to gate. 170 * 171 * Calls the host driver with ios.clock set to zero as often as possible 172 * in order to gate off hardware MCI clocks. Decrease clock reference 173 * count and schedule disabling of clock. 174 */ 175void mmc_host_clk_release(struct mmc_host *host) 176{ 177 unsigned long flags; 178 179 spin_lock_irqsave(&host->clk_lock, flags); 180 host->clk_requests--; 181 if (mmc_host_may_gate_card(host->card) && 182 !host->clk_requests) 183 queue_work(system_nrt_wq, &host->clk_gate_work); 184 spin_unlock_irqrestore(&host->clk_lock, flags); 185} 186 187/** 188 * mmc_host_clk_rate - get current clock frequency setting 189 * @host: host to get the clock frequency for. 190 * 191 * Returns current clock frequency regardless of gating. 192 */ 193unsigned int mmc_host_clk_rate(struct mmc_host *host) 194{ 195 unsigned long freq; 196 unsigned long flags; 197 198 spin_lock_irqsave(&host->clk_lock, flags); 199 if (host->clk_gated) 200 freq = host->clk_old; 201 else 202 freq = host->ios.clock; 203 spin_unlock_irqrestore(&host->clk_lock, flags); 204 return freq; 205} 206 207/** 208 * mmc_host_clk_init - set up clock gating code 209 * @host: host with potential clock to control 210 */ 211static inline void mmc_host_clk_init(struct mmc_host *host) 212{ 213 host->clk_requests = 0; 214 /* Hold MCI clock for 8 cycles by default */ 215 host->clk_delay = 8; 216 host->clk_gated = false; 217 INIT_WORK(&host->clk_gate_work, mmc_host_clk_gate_work); 218 spin_lock_init(&host->clk_lock); 219 mutex_init(&host->clk_gate_mutex); 220} 221 222/** 223 * mmc_host_clk_exit - shut down clock gating code 224 * @host: host with potential clock to control 225 */ 226static inline void mmc_host_clk_exit(struct mmc_host *host) 227{ 228 /* 229 * Wait for any outstanding gate and then make sure we're 230 * ungated before exiting. 231 */ 232 if (cancel_work_sync(&host->clk_gate_work)) 233 mmc_host_clk_gate_delayed(host); 234 if (host->clk_gated) 235 mmc_host_clk_hold(host); 236 /* There should be only one user now */ 237 WARN_ON(host->clk_requests > 1); 238} 239 240#else 241 242static inline void mmc_host_clk_init(struct mmc_host *host) 243{ 244} 245 246static inline void mmc_host_clk_exit(struct mmc_host *host) 247{ 248} 249 250#endif 251 252/** 253 * mmc_alloc_host - initialise the per-host structure. 254 * @extra: sizeof private data structure 255 * @dev: pointer to host device model structure 256 * 257 * Initialise the per-host structure. 258 */ 259struct mmc_host *mmc_alloc_host(int extra, struct device *dev) 260{ 261 int err; 262 struct mmc_host *host; 263 264 if (!idr_pre_get(&mmc_host_idr, GFP_KERNEL)) 265 return NULL; 266 267 host = kzalloc(sizeof(struct mmc_host) + extra, GFP_KERNEL); 268 if (!host) 269 return NULL; 270 271 spin_lock(&mmc_host_lock); 272 err = idr_get_new(&mmc_host_idr, host, &host->index); 273 spin_unlock(&mmc_host_lock); 274 if (err) 275 goto free; 276 277 dev_set_name(&host->class_dev, "mmc%d", host->index); 278 279 host->parent = dev; 280 host->class_dev.parent = dev; 281 host->class_dev.class = &mmc_host_class; 282 device_initialize(&host->class_dev); 283 284 mmc_host_clk_init(host); 285 286 spin_lock_init(&host->lock); 287 init_waitqueue_head(&host->wq); 288 INIT_DELAYED_WORK(&host->detect, mmc_rescan); 289 INIT_DELAYED_WORK_DEFERRABLE(&host->disable, mmc_host_deeper_disable); 290#ifdef CONFIG_PM 291 host->pm_notify.notifier_call = mmc_pm_notify; 292#endif 293 294 /* 295 * By default, hosts do not support SGIO or large requests. 296 * They have to set these according to their abilities. 297 */ 298 host->max_segs = 1; 299 host->max_seg_size = PAGE_CACHE_SIZE; 300 301 host->max_req_size = PAGE_CACHE_SIZE; 302 host->max_blk_size = 512; 303 host->max_blk_count = PAGE_CACHE_SIZE / 512; 304 305 /* 306 * Enable runtime power management by default. This flag was added due 307 * to runtime power management causing disruption for some users, but 308 * the power on/off code has been improved since then. 309 * 310 * We'll enable this flag by default as an experiment, and if no 311 * problems are reported, we will follow up later and remove the flag 312 * altogether. 313 */ 314 host->caps = MMC_CAP_POWER_OFF_CARD; 315 316 return host; 317 318free: 319 kfree(host); 320 return NULL; 321} 322 323EXPORT_SYMBOL(mmc_alloc_host); 324 325/** 326 * mmc_add_host - initialise host hardware 327 * @host: mmc host 328 * 329 * Register the host with the driver model. The host must be 330 * prepared to start servicing requests before this function 331 * completes. 332 */ 333int mmc_add_host(struct mmc_host *host) 334{ 335 int err; 336 337 WARN_ON((host->caps & MMC_CAP_SDIO_IRQ) && 338 !host->ops->enable_sdio_irq); 339 340 err = device_add(&host->class_dev); 341 if (err) 342 return err; 343 344 led_trigger_register_simple(dev_name(&host->class_dev), &host->led); 345 346#ifdef CONFIG_DEBUG_FS 347 mmc_add_host_debugfs(host); 348#endif 349 350 mmc_start_host(host); 351 register_pm_notifier(&host->pm_notify); 352 353 return 0; 354} 355 356EXPORT_SYMBOL(mmc_add_host); 357 358/** 359 * mmc_remove_host - remove host hardware 360 * @host: mmc host 361 * 362 * Unregister and remove all cards associated with this host, 363 * and power down the MMC bus. No new requests will be issued 364 * after this function has returned. 365 */ 366void mmc_remove_host(struct mmc_host *host) 367{ 368 unregister_pm_notifier(&host->pm_notify); 369 mmc_stop_host(host); 370 371#ifdef CONFIG_DEBUG_FS 372 mmc_remove_host_debugfs(host); 373#endif 374 375 device_del(&host->class_dev); 376 377 led_trigger_unregister_simple(host->led); 378 379 mmc_host_clk_exit(host); 380} 381 382EXPORT_SYMBOL(mmc_remove_host); 383 384/** 385 * mmc_free_host - free the host structure 386 * @host: mmc host 387 * 388 * Free the host once all references to it have been dropped. 389 */ 390void mmc_free_host(struct mmc_host *host) 391{ 392 spin_lock(&mmc_host_lock); 393 idr_remove(&mmc_host_idr, host->index); 394 spin_unlock(&mmc_host_lock); 395 396 put_device(&host->class_dev); 397} 398 399EXPORT_SYMBOL(mmc_free_host);