Commit | Line | Data |
---|---|---|
b93931a6 PO |
1 | /* |
2 | * linux/drivers/mmc/core/host.c | |
3 | * | |
4 | * Copyright (C) 2003 Russell King, All Rights Reserved. | |
ff3112f5 | 5 | * Copyright (C) 2007-2008 Pierre Ossman |
04566831 | 6 | * Copyright (C) 2010 Linus Walleij |
b93931a6 PO |
7 | * |
8 | * This program is free software; you can redistribute it and/or modify | |
9 | * it under the terms of the GNU General Public License version 2 as | |
10 | * published by the Free Software Foundation. | |
11 | * | |
12 | * MMC host class device management | |
13 | */ | |
14 | ||
15 | #include <linux/device.h> | |
16 | #include <linux/err.h> | |
17 | #include <linux/idr.h> | |
18 | #include <linux/pagemap.h> | |
3ef77af1 | 19 | #include <linux/export.h> |
af8350c7 | 20 | #include <linux/leds.h> |
5a0e3ad6 | 21 | #include <linux/slab.h> |
4c2ef25f | 22 | #include <linux/suspend.h> |
b93931a6 PO |
23 | |
24 | #include <linux/mmc/host.h> | |
04566831 | 25 | #include <linux/mmc/card.h> |
b93931a6 PO |
26 | |
27 | #include "core.h" | |
28 | #include "host.h" | |
29 | ||
30 | #define cls_dev_to_mmc_host(d) container_of(d, struct mmc_host, class_dev) | |
31 | ||
32 | static void mmc_host_classdev_release(struct device *dev) | |
33 | { | |
34 | struct mmc_host *host = cls_dev_to_mmc_host(dev); | |
35 | kfree(host); | |
36 | } | |
37 | ||
38 | static struct class mmc_host_class = { | |
39 | .name = "mmc_host", | |
40 | .dev_release = mmc_host_classdev_release, | |
41 | }; | |
42 | ||
43 | int mmc_register_host_class(void) | |
44 | { | |
45 | return class_register(&mmc_host_class); | |
46 | } | |
47 | ||
48 | void mmc_unregister_host_class(void) | |
49 | { | |
50 | class_unregister(&mmc_host_class); | |
51 | } | |
52 | ||
53 | static DEFINE_IDR(mmc_host_idr); | |
54 | static DEFINE_SPINLOCK(mmc_host_lock); | |
55 | ||
04566831 | 56 | #ifdef CONFIG_MMC_CLKGATE |
597dd9d7 SRT |
57 | static ssize_t clkgate_delay_show(struct device *dev, |
58 | struct device_attribute *attr, char *buf) | |
59 | { | |
60 | struct mmc_host *host = cls_dev_to_mmc_host(dev); | |
4137e504 | 61 | return snprintf(buf, PAGE_SIZE, "%lu\n", host->clkgate_delay); |
597dd9d7 SRT |
62 | } |
63 | ||
64 | static ssize_t clkgate_delay_store(struct device *dev, | |
65 | struct device_attribute *attr, const char *buf, size_t count) | |
66 | { | |
67 | struct mmc_host *host = cls_dev_to_mmc_host(dev); | |
68 | unsigned long flags, value; | |
69 | ||
70 | if (kstrtoul(buf, 0, &value)) | |
71 | return -EINVAL; | |
72 | ||
73 | spin_lock_irqsave(&host->clk_lock, flags); | |
74 | host->clkgate_delay = value; | |
75 | spin_unlock_irqrestore(&host->clk_lock, flags); | |
597dd9d7 SRT |
76 | return count; |
77 | } | |
04566831 LW |
78 | |
79 | /* | |
80 | * Enabling clock gating will make the core call out to the host | |
81 | * once up and once down when it performs a request or card operation | |
82 | * intermingled in any fashion. The driver will see this through | |
83 | * set_ios() operations with ios.clock field set to 0 to gate (disable) | |
84 | * the block clock, and to the old frequency to enable it again. | |
85 | */ | |
86 | static void mmc_host_clk_gate_delayed(struct mmc_host *host) | |
87 | { | |
88 | unsigned long tick_ns; | |
89 | unsigned long freq = host->ios.clock; | |
90 | unsigned long flags; | |
91 | ||
92 | if (!freq) { | |
93 | pr_debug("%s: frequency set to 0 in disable function, " | |
94 | "this means the clock is already disabled.\n", | |
95 | mmc_hostname(host)); | |
96 | return; | |
97 | } | |
98 | /* | |
99 | * New requests may have appeared while we were scheduling, | |
100 | * then there is no reason to delay the check before | |
101 | * clk_disable(). | |
102 | */ | |
103 | spin_lock_irqsave(&host->clk_lock, flags); | |
104 | ||
105 | /* | |
106 | * Delay n bus cycles (at least 8 from MMC spec) before attempting | |
107 | * to disable the MCI block clock. The reference count may have | |
108 | * gone up again after this delay due to rescheduling! | |
109 | */ | |
110 | if (!host->clk_requests) { | |
111 | spin_unlock_irqrestore(&host->clk_lock, flags); | |
112 | tick_ns = DIV_ROUND_UP(1000000000, freq); | |
113 | ndelay(host->clk_delay * tick_ns); | |
114 | } else { | |
115 | /* New users appeared while waiting for this work */ | |
116 | spin_unlock_irqrestore(&host->clk_lock, flags); | |
117 | return; | |
118 | } | |
86f315bb | 119 | mutex_lock(&host->clk_gate_mutex); |
04566831 LW |
120 | spin_lock_irqsave(&host->clk_lock, flags); |
121 | if (!host->clk_requests) { | |
122 | spin_unlock_irqrestore(&host->clk_lock, flags); | |
123 | /* This will set host->ios.clock to 0 */ | |
124 | mmc_gate_clock(host); | |
125 | spin_lock_irqsave(&host->clk_lock, flags); | |
126 | pr_debug("%s: gated MCI clock\n", mmc_hostname(host)); | |
127 | } | |
128 | spin_unlock_irqrestore(&host->clk_lock, flags); | |
86f315bb | 129 | mutex_unlock(&host->clk_gate_mutex); |
04566831 LW |
130 | } |
131 | ||
132 | /* | |
133 | * Internal work. Work to disable the clock at some later point. | |
134 | */ | |
135 | static void mmc_host_clk_gate_work(struct work_struct *work) | |
136 | { | |
137 | struct mmc_host *host = container_of(work, struct mmc_host, | |
597dd9d7 | 138 | clk_gate_work.work); |
04566831 LW |
139 | |
140 | mmc_host_clk_gate_delayed(host); | |
141 | } | |
142 | ||
143 | /** | |
08c14071 | 144 | * mmc_host_clk_hold - ungate hardware MCI clocks |
04566831 LW |
145 | * @host: host to ungate. |
146 | * | |
147 | * Makes sure the host ios.clock is restored to a non-zero value | |
148 | * past this call. Increase clock reference count and ungate clock | |
149 | * if we're the first user. | |
150 | */ | |
08c14071 | 151 | void mmc_host_clk_hold(struct mmc_host *host) |
04566831 LW |
152 | { |
153 | unsigned long flags; | |
154 | ||
597dd9d7 SRT |
155 | /* cancel any clock gating work scheduled by mmc_host_clk_release() */ |
156 | cancel_delayed_work_sync(&host->clk_gate_work); | |
86f315bb | 157 | mutex_lock(&host->clk_gate_mutex); |
04566831 LW |
158 | spin_lock_irqsave(&host->clk_lock, flags); |
159 | if (host->clk_gated) { | |
160 | spin_unlock_irqrestore(&host->clk_lock, flags); | |
161 | mmc_ungate_clock(host); | |
162 | spin_lock_irqsave(&host->clk_lock, flags); | |
163 | pr_debug("%s: ungated MCI clock\n", mmc_hostname(host)); | |
164 | } | |
165 | host->clk_requests++; | |
166 | spin_unlock_irqrestore(&host->clk_lock, flags); | |
86f315bb | 167 | mutex_unlock(&host->clk_gate_mutex); |
04566831 LW |
168 | } |
169 | ||
170 | /** | |
171 | * mmc_host_may_gate_card - check if this card may be gated | |
172 | * @card: card to check. | |
173 | */ | |
174 | static bool mmc_host_may_gate_card(struct mmc_card *card) | |
175 | { | |
176 | /* If there is no card we may gate it */ | |
177 | if (!card) | |
178 | return true; | |
179 | /* | |
180 | * Don't gate SDIO cards! These need to be clocked at all times | |
181 | * since they may be independent systems generating interrupts | |
182 | * and other events. The clock requests counter from the core will | |
183 | * go down to zero since the core does not need it, but we will not | |
184 | * gate the clock, because there is somebody out there that may still | |
185 | * be using it. | |
186 | */ | |
db993500 | 187 | return !(card->quirks & MMC_QUIRK_BROKEN_CLK_GATING); |
04566831 LW |
188 | } |
189 | ||
190 | /** | |
08c14071 | 191 | * mmc_host_clk_release - gate off hardware MCI clocks |
04566831 LW |
192 | * @host: host to gate. |
193 | * | |
194 | * Calls the host driver with ios.clock set to zero as often as possible | |
195 | * in order to gate off hardware MCI clocks. Decrease clock reference | |
196 | * count and schedule disabling of clock. | |
197 | */ | |
08c14071 | 198 | void mmc_host_clk_release(struct mmc_host *host) |
04566831 LW |
199 | { |
200 | unsigned long flags; | |
201 | ||
202 | spin_lock_irqsave(&host->clk_lock, flags); | |
203 | host->clk_requests--; | |
204 | if (mmc_host_may_gate_card(host->card) && | |
205 | !host->clk_requests) | |
597dd9d7 SRT |
206 | queue_delayed_work(system_nrt_wq, &host->clk_gate_work, |
207 | msecs_to_jiffies(host->clkgate_delay)); | |
04566831 LW |
208 | spin_unlock_irqrestore(&host->clk_lock, flags); |
209 | } | |
210 | ||
211 | /** | |
212 | * mmc_host_clk_rate - get current clock frequency setting | |
213 | * @host: host to get the clock frequency for. | |
214 | * | |
215 | * Returns current clock frequency regardless of gating. | |
216 | */ | |
217 | unsigned int mmc_host_clk_rate(struct mmc_host *host) | |
218 | { | |
219 | unsigned long freq; | |
220 | unsigned long flags; | |
221 | ||
222 | spin_lock_irqsave(&host->clk_lock, flags); | |
223 | if (host->clk_gated) | |
224 | freq = host->clk_old; | |
225 | else | |
226 | freq = host->ios.clock; | |
227 | spin_unlock_irqrestore(&host->clk_lock, flags); | |
228 | return freq; | |
229 | } | |
230 | ||
231 | /** | |
232 | * mmc_host_clk_init - set up clock gating code | |
233 | * @host: host with potential clock to control | |
234 | */ | |
235 | static inline void mmc_host_clk_init(struct mmc_host *host) | |
236 | { | |
237 | host->clk_requests = 0; | |
238 | /* Hold MCI clock for 8 cycles by default */ | |
239 | host->clk_delay = 8; | |
597dd9d7 | 240 | /* |
c84f15ae | 241 | * Default clock gating delay is 0ms to avoid wasting power. |
597dd9d7 SRT |
242 | * This value can be tuned by writing into sysfs entry. |
243 | */ | |
c84f15ae | 244 | host->clkgate_delay = 0; |
04566831 | 245 | host->clk_gated = false; |
597dd9d7 | 246 | INIT_DELAYED_WORK(&host->clk_gate_work, mmc_host_clk_gate_work); |
04566831 | 247 | spin_lock_init(&host->clk_lock); |
86f315bb | 248 | mutex_init(&host->clk_gate_mutex); |
04566831 LW |
249 | } |
250 | ||
251 | /** | |
252 | * mmc_host_clk_exit - shut down clock gating code | |
253 | * @host: host with potential clock to control | |
254 | */ | |
255 | static inline void mmc_host_clk_exit(struct mmc_host *host) | |
256 | { | |
257 | /* | |
258 | * Wait for any outstanding gate and then make sure we're | |
259 | * ungated before exiting. | |
260 | */ | |
597dd9d7 | 261 | if (cancel_delayed_work_sync(&host->clk_gate_work)) |
04566831 LW |
262 | mmc_host_clk_gate_delayed(host); |
263 | if (host->clk_gated) | |
08c14071 | 264 | mmc_host_clk_hold(host); |
c288b855 LW |
265 | /* There should be only one user now */ |
266 | WARN_ON(host->clk_requests > 1); | |
04566831 LW |
267 | } |
268 | ||
597dd9d7 SRT |
269 | static inline void mmc_host_clk_sysfs_init(struct mmc_host *host) |
270 | { | |
271 | host->clkgate_delay_attr.show = clkgate_delay_show; | |
272 | host->clkgate_delay_attr.store = clkgate_delay_store; | |
273 | sysfs_attr_init(&host->clkgate_delay_attr.attr); | |
274 | host->clkgate_delay_attr.attr.name = "clkgate_delay"; | |
275 | host->clkgate_delay_attr.attr.mode = S_IRUGO | S_IWUSR; | |
276 | if (device_create_file(&host->class_dev, &host->clkgate_delay_attr)) | |
277 | pr_err("%s: Failed to create clkgate_delay sysfs entry\n", | |
278 | mmc_hostname(host)); | |
279 | } | |
04566831 LW |
280 | #else |
281 | ||
282 | static inline void mmc_host_clk_init(struct mmc_host *host) | |
283 | { | |
284 | } | |
285 | ||
286 | static inline void mmc_host_clk_exit(struct mmc_host *host) | |
287 | { | |
288 | } | |
289 | ||
597dd9d7 SRT |
290 | static inline void mmc_host_clk_sysfs_init(struct mmc_host *host) |
291 | { | |
292 | } | |
293 | ||
04566831 LW |
294 | #endif |
295 | ||
b93931a6 PO |
296 | /** |
297 | * mmc_alloc_host - initialise the per-host structure. | |
298 | * @extra: sizeof private data structure | |
299 | * @dev: pointer to host device model structure | |
300 | * | |
301 | * Initialise the per-host structure. | |
302 | */ | |
303 | struct mmc_host *mmc_alloc_host(int extra, struct device *dev) | |
304 | { | |
ff3112f5 | 305 | int err; |
b93931a6 PO |
306 | struct mmc_host *host; |
307 | ||
ff3112f5 PO |
308 | if (!idr_pre_get(&mmc_host_idr, GFP_KERNEL)) |
309 | return NULL; | |
310 | ||
be760a9d | 311 | host = kzalloc(sizeof(struct mmc_host) + extra, GFP_KERNEL); |
b93931a6 PO |
312 | if (!host) |
313 | return NULL; | |
314 | ||
ff3112f5 PO |
315 | spin_lock(&mmc_host_lock); |
316 | err = idr_get_new(&mmc_host_idr, host, &host->index); | |
317 | spin_unlock(&mmc_host_lock); | |
318 | if (err) | |
319 | goto free; | |
320 | ||
d1b26863 | 321 | dev_set_name(&host->class_dev, "mmc%d", host->index); |
ff3112f5 | 322 | |
b93931a6 PO |
323 | host->parent = dev; |
324 | host->class_dev.parent = dev; | |
325 | host->class_dev.class = &mmc_host_class; | |
326 | device_initialize(&host->class_dev); | |
327 | ||
04566831 LW |
328 | mmc_host_clk_init(host); |
329 | ||
27410ee7 GL |
330 | host->slot.cd_irq = -EINVAL; |
331 | ||
b93931a6 PO |
332 | spin_lock_init(&host->lock); |
333 | init_waitqueue_head(&host->wq); | |
334 | INIT_DELAYED_WORK(&host->detect, mmc_rescan); | |
81ca03a0 | 335 | #ifdef CONFIG_PM |
4c2ef25f | 336 | host->pm_notify.notifier_call = mmc_pm_notify; |
81ca03a0 | 337 | #endif |
b93931a6 PO |
338 | |
339 | /* | |
340 | * By default, hosts do not support SGIO or large requests. | |
341 | * They have to set these according to their abilities. | |
342 | */ | |
a36274e0 | 343 | host->max_segs = 1; |
b93931a6 PO |
344 | host->max_seg_size = PAGE_CACHE_SIZE; |
345 | ||
346 | host->max_req_size = PAGE_CACHE_SIZE; | |
347 | host->max_blk_size = 512; | |
348 | host->max_blk_count = PAGE_CACHE_SIZE / 512; | |
349 | ||
350 | return host; | |
ff3112f5 PO |
351 | |
352 | free: | |
353 | kfree(host); | |
354 | return NULL; | |
b93931a6 PO |
355 | } |
356 | ||
357 | EXPORT_SYMBOL(mmc_alloc_host); | |
358 | ||
359 | /** | |
360 | * mmc_add_host - initialise host hardware | |
361 | * @host: mmc host | |
67a61c48 PO |
362 | * |
363 | * Register the host with the driver model. The host must be | |
364 | * prepared to start servicing requests before this function | |
365 | * completes. | |
b93931a6 PO |
366 | */ |
367 | int mmc_add_host(struct mmc_host *host) | |
368 | { | |
369 | int err; | |
370 | ||
17b759af NP |
371 | WARN_ON((host->caps & MMC_CAP_SDIO_IRQ) && |
372 | !host->ops->enable_sdio_irq); | |
373 | ||
b93931a6 PO |
374 | err = device_add(&host->class_dev); |
375 | if (err) | |
376 | return err; | |
377 | ||
f317dfeb WS |
378 | led_trigger_register_simple(dev_name(&host->class_dev), &host->led); |
379 | ||
6edd8ee6 HS |
380 | #ifdef CONFIG_DEBUG_FS |
381 | mmc_add_host_debugfs(host); | |
382 | #endif | |
597dd9d7 | 383 | mmc_host_clk_sysfs_init(host); |
6edd8ee6 | 384 | |
b93931a6 | 385 | mmc_start_host(host); |
4c2ef25f | 386 | register_pm_notifier(&host->pm_notify); |
b93931a6 PO |
387 | |
388 | return 0; | |
389 | } | |
390 | ||
391 | EXPORT_SYMBOL(mmc_add_host); | |
392 | ||
393 | /** | |
394 | * mmc_remove_host - remove host hardware | |
395 | * @host: mmc host | |
396 | * | |
397 | * Unregister and remove all cards associated with this host, | |
67a61c48 PO |
398 | * and power down the MMC bus. No new requests will be issued |
399 | * after this function has returned. | |
b93931a6 PO |
400 | */ |
401 | void mmc_remove_host(struct mmc_host *host) | |
402 | { | |
4c2ef25f | 403 | unregister_pm_notifier(&host->pm_notify); |
b93931a6 PO |
404 | mmc_stop_host(host); |
405 | ||
6edd8ee6 HS |
406 | #ifdef CONFIG_DEBUG_FS |
407 | mmc_remove_host_debugfs(host); | |
408 | #endif | |
409 | ||
b93931a6 PO |
410 | device_del(&host->class_dev); |
411 | ||
77f1fd6e | 412 | led_trigger_unregister_simple(host->led); |
04566831 LW |
413 | |
414 | mmc_host_clk_exit(host); | |
b93931a6 PO |
415 | } |
416 | ||
417 | EXPORT_SYMBOL(mmc_remove_host); | |
418 | ||
419 | /** | |
420 | * mmc_free_host - free the host structure | |
421 | * @host: mmc host | |
422 | * | |
423 | * Free the host once all references to it have been dropped. | |
424 | */ | |
425 | void mmc_free_host(struct mmc_host *host) | |
426 | { | |
ff3112f5 PO |
427 | spin_lock(&mmc_host_lock); |
428 | idr_remove(&mmc_host_idr, host->index); | |
429 | spin_unlock(&mmc_host_lock); | |
430 | ||
b93931a6 PO |
431 | put_device(&host->class_dev); |
432 | } | |
433 | ||
434 | EXPORT_SYMBOL(mmc_free_host); |