Merge branch 'kconfig' of git://git.kernel.org/pub/scm/linux/kernel/git/mmarek/kbuild
[deliverable/linux.git] / drivers / spi / spi-omap2-mcspi.c
1 /*
2 * OMAP2 McSPI controller driver
3 *
4 * Copyright (C) 2005, 2006 Nokia Corporation
5 * Author: Samuel Ortiz <samuel.ortiz@nokia.com> and
6 * Juha Yrj�l� <juha.yrjola@nokia.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 *
22 */
23
24 #include <linux/kernel.h>
25 #include <linux/init.h>
26 #include <linux/interrupt.h>
27 #include <linux/module.h>
28 #include <linux/device.h>
29 #include <linux/delay.h>
30 #include <linux/dma-mapping.h>
31 #include <linux/platform_device.h>
32 #include <linux/err.h>
33 #include <linux/clk.h>
34 #include <linux/io.h>
35 #include <linux/slab.h>
36 #include <linux/pm_runtime.h>
37 #include <linux/of.h>
38 #include <linux/of_device.h>
39
40 #include <linux/spi/spi.h>
41
42 #include <plat/dma.h>
43 #include <plat/clock.h>
44 #include <plat/mcspi.h>
45
46 #define OMAP2_MCSPI_MAX_FREQ 48000000
47 #define SPI_AUTOSUSPEND_TIMEOUT 2000
48
49 #define OMAP2_MCSPI_REVISION 0x00
50 #define OMAP2_MCSPI_SYSSTATUS 0x14
51 #define OMAP2_MCSPI_IRQSTATUS 0x18
52 #define OMAP2_MCSPI_IRQENABLE 0x1c
53 #define OMAP2_MCSPI_WAKEUPENABLE 0x20
54 #define OMAP2_MCSPI_SYST 0x24
55 #define OMAP2_MCSPI_MODULCTRL 0x28
56
57 /* per-channel banks, 0x14 bytes each, first is: */
58 #define OMAP2_MCSPI_CHCONF0 0x2c
59 #define OMAP2_MCSPI_CHSTAT0 0x30
60 #define OMAP2_MCSPI_CHCTRL0 0x34
61 #define OMAP2_MCSPI_TX0 0x38
62 #define OMAP2_MCSPI_RX0 0x3c
63
64 /* per-register bitmasks: */
65
66 #define OMAP2_MCSPI_MODULCTRL_SINGLE BIT(0)
67 #define OMAP2_MCSPI_MODULCTRL_MS BIT(2)
68 #define OMAP2_MCSPI_MODULCTRL_STEST BIT(3)
69
70 #define OMAP2_MCSPI_CHCONF_PHA BIT(0)
71 #define OMAP2_MCSPI_CHCONF_POL BIT(1)
72 #define OMAP2_MCSPI_CHCONF_CLKD_MASK (0x0f << 2)
73 #define OMAP2_MCSPI_CHCONF_EPOL BIT(6)
74 #define OMAP2_MCSPI_CHCONF_WL_MASK (0x1f << 7)
75 #define OMAP2_MCSPI_CHCONF_TRM_RX_ONLY BIT(12)
76 #define OMAP2_MCSPI_CHCONF_TRM_TX_ONLY BIT(13)
77 #define OMAP2_MCSPI_CHCONF_TRM_MASK (0x03 << 12)
78 #define OMAP2_MCSPI_CHCONF_DMAW BIT(14)
79 #define OMAP2_MCSPI_CHCONF_DMAR BIT(15)
80 #define OMAP2_MCSPI_CHCONF_DPE0 BIT(16)
81 #define OMAP2_MCSPI_CHCONF_DPE1 BIT(17)
82 #define OMAP2_MCSPI_CHCONF_IS BIT(18)
83 #define OMAP2_MCSPI_CHCONF_TURBO BIT(19)
84 #define OMAP2_MCSPI_CHCONF_FORCE BIT(20)
85
86 #define OMAP2_MCSPI_CHSTAT_RXS BIT(0)
87 #define OMAP2_MCSPI_CHSTAT_TXS BIT(1)
88 #define OMAP2_MCSPI_CHSTAT_EOT BIT(2)
89
90 #define OMAP2_MCSPI_CHCTRL_EN BIT(0)
91
92 #define OMAP2_MCSPI_WAKEUPENABLE_WKEN BIT(0)
93
94 /* We have 2 DMA channels per CS, one for RX and one for TX */
95 struct omap2_mcspi_dma {
96 int dma_tx_channel;
97 int dma_rx_channel;
98
99 int dma_tx_sync_dev;
100 int dma_rx_sync_dev;
101
102 struct completion dma_tx_completion;
103 struct completion dma_rx_completion;
104 };
105
106 /* use PIO for small transfers, avoiding DMA setup/teardown overhead and
107 * cache operations; better heuristics consider wordsize and bitrate.
108 */
109 #define DMA_MIN_BYTES 160
110
111
112 /*
113 * Used for context save and restore, structure members to be updated whenever
114 * corresponding registers are modified.
115 */
116 struct omap2_mcspi_regs {
117 u32 modulctrl;
118 u32 wakeupenable;
119 struct list_head cs;
120 };
121
122 struct omap2_mcspi {
123 struct spi_master *master;
124 /* Virtual base address of the controller */
125 void __iomem *base;
126 unsigned long phys;
127 /* SPI1 has 4 channels, while SPI2 has 2 */
128 struct omap2_mcspi_dma *dma_channels;
129 struct device *dev;
130 struct omap2_mcspi_regs ctx;
131 };
132
133 struct omap2_mcspi_cs {
134 void __iomem *base;
135 unsigned long phys;
136 int word_len;
137 struct list_head node;
138 /* Context save and restore shadow register */
139 u32 chconf0;
140 };
141
142 #define MOD_REG_BIT(val, mask, set) do { \
143 if (set) \
144 val |= mask; \
145 else \
146 val &= ~mask; \
147 } while (0)
148
149 static inline void mcspi_write_reg(struct spi_master *master,
150 int idx, u32 val)
151 {
152 struct omap2_mcspi *mcspi = spi_master_get_devdata(master);
153
154 __raw_writel(val, mcspi->base + idx);
155 }
156
157 static inline u32 mcspi_read_reg(struct spi_master *master, int idx)
158 {
159 struct omap2_mcspi *mcspi = spi_master_get_devdata(master);
160
161 return __raw_readl(mcspi->base + idx);
162 }
163
164 static inline void mcspi_write_cs_reg(const struct spi_device *spi,
165 int idx, u32 val)
166 {
167 struct omap2_mcspi_cs *cs = spi->controller_state;
168
169 __raw_writel(val, cs->base + idx);
170 }
171
172 static inline u32 mcspi_read_cs_reg(const struct spi_device *spi, int idx)
173 {
174 struct omap2_mcspi_cs *cs = spi->controller_state;
175
176 return __raw_readl(cs->base + idx);
177 }
178
179 static inline u32 mcspi_cached_chconf0(const struct spi_device *spi)
180 {
181 struct omap2_mcspi_cs *cs = spi->controller_state;
182
183 return cs->chconf0;
184 }
185
186 static inline void mcspi_write_chconf0(const struct spi_device *spi, u32 val)
187 {
188 struct omap2_mcspi_cs *cs = spi->controller_state;
189
190 cs->chconf0 = val;
191 mcspi_write_cs_reg(spi, OMAP2_MCSPI_CHCONF0, val);
192 mcspi_read_cs_reg(spi, OMAP2_MCSPI_CHCONF0);
193 }
194
195 static void omap2_mcspi_set_dma_req(const struct spi_device *spi,
196 int is_read, int enable)
197 {
198 u32 l, rw;
199
200 l = mcspi_cached_chconf0(spi);
201
202 if (is_read) /* 1 is read, 0 write */
203 rw = OMAP2_MCSPI_CHCONF_DMAR;
204 else
205 rw = OMAP2_MCSPI_CHCONF_DMAW;
206
207 MOD_REG_BIT(l, rw, enable);
208 mcspi_write_chconf0(spi, l);
209 }
210
211 static void omap2_mcspi_set_enable(const struct spi_device *spi, int enable)
212 {
213 u32 l;
214
215 l = enable ? OMAP2_MCSPI_CHCTRL_EN : 0;
216 mcspi_write_cs_reg(spi, OMAP2_MCSPI_CHCTRL0, l);
217 /* Flash post-writes */
218 mcspi_read_cs_reg(spi, OMAP2_MCSPI_CHCTRL0);
219 }
220
221 static void omap2_mcspi_force_cs(struct spi_device *spi, int cs_active)
222 {
223 u32 l;
224
225 l = mcspi_cached_chconf0(spi);
226 MOD_REG_BIT(l, OMAP2_MCSPI_CHCONF_FORCE, cs_active);
227 mcspi_write_chconf0(spi, l);
228 }
229
230 static void omap2_mcspi_set_master_mode(struct spi_master *master)
231 {
232 struct omap2_mcspi *mcspi = spi_master_get_devdata(master);
233 struct omap2_mcspi_regs *ctx = &mcspi->ctx;
234 u32 l;
235
236 /*
237 * Setup when switching from (reset default) slave mode
238 * to single-channel master mode
239 */
240 l = mcspi_read_reg(master, OMAP2_MCSPI_MODULCTRL);
241 MOD_REG_BIT(l, OMAP2_MCSPI_MODULCTRL_STEST, 0);
242 MOD_REG_BIT(l, OMAP2_MCSPI_MODULCTRL_MS, 0);
243 MOD_REG_BIT(l, OMAP2_MCSPI_MODULCTRL_SINGLE, 1);
244 mcspi_write_reg(master, OMAP2_MCSPI_MODULCTRL, l);
245
246 ctx->modulctrl = l;
247 }
248
249 static void omap2_mcspi_restore_ctx(struct omap2_mcspi *mcspi)
250 {
251 struct spi_master *spi_cntrl = mcspi->master;
252 struct omap2_mcspi_regs *ctx = &mcspi->ctx;
253 struct omap2_mcspi_cs *cs;
254
255 /* McSPI: context restore */
256 mcspi_write_reg(spi_cntrl, OMAP2_MCSPI_MODULCTRL, ctx->modulctrl);
257 mcspi_write_reg(spi_cntrl, OMAP2_MCSPI_WAKEUPENABLE, ctx->wakeupenable);
258
259 list_for_each_entry(cs, &ctx->cs, node)
260 __raw_writel(cs->chconf0, cs->base + OMAP2_MCSPI_CHCONF0);
261 }
262 static void omap2_mcspi_disable_clocks(struct omap2_mcspi *mcspi)
263 {
264 pm_runtime_mark_last_busy(mcspi->dev);
265 pm_runtime_put_autosuspend(mcspi->dev);
266 }
267
268 static int omap2_mcspi_enable_clocks(struct omap2_mcspi *mcspi)
269 {
270 return pm_runtime_get_sync(mcspi->dev);
271 }
272
273 static int omap2_prepare_transfer(struct spi_master *master)
274 {
275 struct omap2_mcspi *mcspi = spi_master_get_devdata(master);
276
277 pm_runtime_get_sync(mcspi->dev);
278 return 0;
279 }
280
281 static int omap2_unprepare_transfer(struct spi_master *master)
282 {
283 struct omap2_mcspi *mcspi = spi_master_get_devdata(master);
284
285 pm_runtime_mark_last_busy(mcspi->dev);
286 pm_runtime_put_autosuspend(mcspi->dev);
287 return 0;
288 }
289
290 static int mcspi_wait_for_reg_bit(void __iomem *reg, unsigned long bit)
291 {
292 unsigned long timeout;
293
294 timeout = jiffies + msecs_to_jiffies(1000);
295 while (!(__raw_readl(reg) & bit)) {
296 if (time_after(jiffies, timeout))
297 return -1;
298 cpu_relax();
299 }
300 return 0;
301 }
302
303 static unsigned
304 omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer)
305 {
306 struct omap2_mcspi *mcspi;
307 struct omap2_mcspi_cs *cs = spi->controller_state;
308 struct omap2_mcspi_dma *mcspi_dma;
309 unsigned int count, c;
310 unsigned long base, tx_reg, rx_reg;
311 int word_len, data_type, element_count;
312 int elements = 0;
313 u32 l;
314 u8 * rx;
315 const u8 * tx;
316 void __iomem *chstat_reg;
317
318 mcspi = spi_master_get_devdata(spi->master);
319 mcspi_dma = &mcspi->dma_channels[spi->chip_select];
320 l = mcspi_cached_chconf0(spi);
321
322 chstat_reg = cs->base + OMAP2_MCSPI_CHSTAT0;
323
324 count = xfer->len;
325 c = count;
326 word_len = cs->word_len;
327
328 base = cs->phys;
329 tx_reg = base + OMAP2_MCSPI_TX0;
330 rx_reg = base + OMAP2_MCSPI_RX0;
331 rx = xfer->rx_buf;
332 tx = xfer->tx_buf;
333
334 if (word_len <= 8) {
335 data_type = OMAP_DMA_DATA_TYPE_S8;
336 element_count = count;
337 } else if (word_len <= 16) {
338 data_type = OMAP_DMA_DATA_TYPE_S16;
339 element_count = count >> 1;
340 } else /* word_len <= 32 */ {
341 data_type = OMAP_DMA_DATA_TYPE_S32;
342 element_count = count >> 2;
343 }
344
345 if (tx != NULL) {
346 omap_set_dma_transfer_params(mcspi_dma->dma_tx_channel,
347 data_type, element_count, 1,
348 OMAP_DMA_SYNC_ELEMENT,
349 mcspi_dma->dma_tx_sync_dev, 0);
350
351 omap_set_dma_dest_params(mcspi_dma->dma_tx_channel, 0,
352 OMAP_DMA_AMODE_CONSTANT,
353 tx_reg, 0, 0);
354
355 omap_set_dma_src_params(mcspi_dma->dma_tx_channel, 0,
356 OMAP_DMA_AMODE_POST_INC,
357 xfer->tx_dma, 0, 0);
358 }
359
360 if (rx != NULL) {
361 elements = element_count - 1;
362 if (l & OMAP2_MCSPI_CHCONF_TURBO)
363 elements--;
364
365 omap_set_dma_transfer_params(mcspi_dma->dma_rx_channel,
366 data_type, elements, 1,
367 OMAP_DMA_SYNC_ELEMENT,
368 mcspi_dma->dma_rx_sync_dev, 1);
369
370 omap_set_dma_src_params(mcspi_dma->dma_rx_channel, 0,
371 OMAP_DMA_AMODE_CONSTANT,
372 rx_reg, 0, 0);
373
374 omap_set_dma_dest_params(mcspi_dma->dma_rx_channel, 0,
375 OMAP_DMA_AMODE_POST_INC,
376 xfer->rx_dma, 0, 0);
377 }
378
379 if (tx != NULL) {
380 omap_start_dma(mcspi_dma->dma_tx_channel);
381 omap2_mcspi_set_dma_req(spi, 0, 1);
382 }
383
384 if (rx != NULL) {
385 omap_start_dma(mcspi_dma->dma_rx_channel);
386 omap2_mcspi_set_dma_req(spi, 1, 1);
387 }
388
389 if (tx != NULL) {
390 wait_for_completion(&mcspi_dma->dma_tx_completion);
391 dma_unmap_single(mcspi->dev, xfer->tx_dma, count,
392 DMA_TO_DEVICE);
393
394 /* for TX_ONLY mode, be sure all words have shifted out */
395 if (rx == NULL) {
396 if (mcspi_wait_for_reg_bit(chstat_reg,
397 OMAP2_MCSPI_CHSTAT_TXS) < 0)
398 dev_err(&spi->dev, "TXS timed out\n");
399 else if (mcspi_wait_for_reg_bit(chstat_reg,
400 OMAP2_MCSPI_CHSTAT_EOT) < 0)
401 dev_err(&spi->dev, "EOT timed out\n");
402 }
403 }
404
405 if (rx != NULL) {
406 wait_for_completion(&mcspi_dma->dma_rx_completion);
407 dma_unmap_single(mcspi->dev, xfer->rx_dma, count,
408 DMA_FROM_DEVICE);
409 omap2_mcspi_set_enable(spi, 0);
410
411 if (l & OMAP2_MCSPI_CHCONF_TURBO) {
412
413 if (likely(mcspi_read_cs_reg(spi, OMAP2_MCSPI_CHSTAT0)
414 & OMAP2_MCSPI_CHSTAT_RXS)) {
415 u32 w;
416
417 w = mcspi_read_cs_reg(spi, OMAP2_MCSPI_RX0);
418 if (word_len <= 8)
419 ((u8 *)xfer->rx_buf)[elements++] = w;
420 else if (word_len <= 16)
421 ((u16 *)xfer->rx_buf)[elements++] = w;
422 else /* word_len <= 32 */
423 ((u32 *)xfer->rx_buf)[elements++] = w;
424 } else {
425 dev_err(&spi->dev,
426 "DMA RX penultimate word empty");
427 count -= (word_len <= 8) ? 2 :
428 (word_len <= 16) ? 4 :
429 /* word_len <= 32 */ 8;
430 omap2_mcspi_set_enable(spi, 1);
431 return count;
432 }
433 }
434
435 if (likely(mcspi_read_cs_reg(spi, OMAP2_MCSPI_CHSTAT0)
436 & OMAP2_MCSPI_CHSTAT_RXS)) {
437 u32 w;
438
439 w = mcspi_read_cs_reg(spi, OMAP2_MCSPI_RX0);
440 if (word_len <= 8)
441 ((u8 *)xfer->rx_buf)[elements] = w;
442 else if (word_len <= 16)
443 ((u16 *)xfer->rx_buf)[elements] = w;
444 else /* word_len <= 32 */
445 ((u32 *)xfer->rx_buf)[elements] = w;
446 } else {
447 dev_err(&spi->dev, "DMA RX last word empty");
448 count -= (word_len <= 8) ? 1 :
449 (word_len <= 16) ? 2 :
450 /* word_len <= 32 */ 4;
451 }
452 omap2_mcspi_set_enable(spi, 1);
453 }
454 return count;
455 }
456
457 static unsigned
458 omap2_mcspi_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer)
459 {
460 struct omap2_mcspi *mcspi;
461 struct omap2_mcspi_cs *cs = spi->controller_state;
462 unsigned int count, c;
463 u32 l;
464 void __iomem *base = cs->base;
465 void __iomem *tx_reg;
466 void __iomem *rx_reg;
467 void __iomem *chstat_reg;
468 int word_len;
469
470 mcspi = spi_master_get_devdata(spi->master);
471 count = xfer->len;
472 c = count;
473 word_len = cs->word_len;
474
475 l = mcspi_cached_chconf0(spi);
476
477 /* We store the pre-calculated register addresses on stack to speed
478 * up the transfer loop. */
479 tx_reg = base + OMAP2_MCSPI_TX0;
480 rx_reg = base + OMAP2_MCSPI_RX0;
481 chstat_reg = base + OMAP2_MCSPI_CHSTAT0;
482
483 if (c < (word_len>>3))
484 return 0;
485
486 if (word_len <= 8) {
487 u8 *rx;
488 const u8 *tx;
489
490 rx = xfer->rx_buf;
491 tx = xfer->tx_buf;
492
493 do {
494 c -= 1;
495 if (tx != NULL) {
496 if (mcspi_wait_for_reg_bit(chstat_reg,
497 OMAP2_MCSPI_CHSTAT_TXS) < 0) {
498 dev_err(&spi->dev, "TXS timed out\n");
499 goto out;
500 }
501 dev_vdbg(&spi->dev, "write-%d %02x\n",
502 word_len, *tx);
503 __raw_writel(*tx++, tx_reg);
504 }
505 if (rx != NULL) {
506 if (mcspi_wait_for_reg_bit(chstat_reg,
507 OMAP2_MCSPI_CHSTAT_RXS) < 0) {
508 dev_err(&spi->dev, "RXS timed out\n");
509 goto out;
510 }
511
512 if (c == 1 && tx == NULL &&
513 (l & OMAP2_MCSPI_CHCONF_TURBO)) {
514 omap2_mcspi_set_enable(spi, 0);
515 *rx++ = __raw_readl(rx_reg);
516 dev_vdbg(&spi->dev, "read-%d %02x\n",
517 word_len, *(rx - 1));
518 if (mcspi_wait_for_reg_bit(chstat_reg,
519 OMAP2_MCSPI_CHSTAT_RXS) < 0) {
520 dev_err(&spi->dev,
521 "RXS timed out\n");
522 goto out;
523 }
524 c = 0;
525 } else if (c == 0 && tx == NULL) {
526 omap2_mcspi_set_enable(spi, 0);
527 }
528
529 *rx++ = __raw_readl(rx_reg);
530 dev_vdbg(&spi->dev, "read-%d %02x\n",
531 word_len, *(rx - 1));
532 }
533 } while (c);
534 } else if (word_len <= 16) {
535 u16 *rx;
536 const u16 *tx;
537
538 rx = xfer->rx_buf;
539 tx = xfer->tx_buf;
540 do {
541 c -= 2;
542 if (tx != NULL) {
543 if (mcspi_wait_for_reg_bit(chstat_reg,
544 OMAP2_MCSPI_CHSTAT_TXS) < 0) {
545 dev_err(&spi->dev, "TXS timed out\n");
546 goto out;
547 }
548 dev_vdbg(&spi->dev, "write-%d %04x\n",
549 word_len, *tx);
550 __raw_writel(*tx++, tx_reg);
551 }
552 if (rx != NULL) {
553 if (mcspi_wait_for_reg_bit(chstat_reg,
554 OMAP2_MCSPI_CHSTAT_RXS) < 0) {
555 dev_err(&spi->dev, "RXS timed out\n");
556 goto out;
557 }
558
559 if (c == 2 && tx == NULL &&
560 (l & OMAP2_MCSPI_CHCONF_TURBO)) {
561 omap2_mcspi_set_enable(spi, 0);
562 *rx++ = __raw_readl(rx_reg);
563 dev_vdbg(&spi->dev, "read-%d %04x\n",
564 word_len, *(rx - 1));
565 if (mcspi_wait_for_reg_bit(chstat_reg,
566 OMAP2_MCSPI_CHSTAT_RXS) < 0) {
567 dev_err(&spi->dev,
568 "RXS timed out\n");
569 goto out;
570 }
571 c = 0;
572 } else if (c == 0 && tx == NULL) {
573 omap2_mcspi_set_enable(spi, 0);
574 }
575
576 *rx++ = __raw_readl(rx_reg);
577 dev_vdbg(&spi->dev, "read-%d %04x\n",
578 word_len, *(rx - 1));
579 }
580 } while (c >= 2);
581 } else if (word_len <= 32) {
582 u32 *rx;
583 const u32 *tx;
584
585 rx = xfer->rx_buf;
586 tx = xfer->tx_buf;
587 do {
588 c -= 4;
589 if (tx != NULL) {
590 if (mcspi_wait_for_reg_bit(chstat_reg,
591 OMAP2_MCSPI_CHSTAT_TXS) < 0) {
592 dev_err(&spi->dev, "TXS timed out\n");
593 goto out;
594 }
595 dev_vdbg(&spi->dev, "write-%d %08x\n",
596 word_len, *tx);
597 __raw_writel(*tx++, tx_reg);
598 }
599 if (rx != NULL) {
600 if (mcspi_wait_for_reg_bit(chstat_reg,
601 OMAP2_MCSPI_CHSTAT_RXS) < 0) {
602 dev_err(&spi->dev, "RXS timed out\n");
603 goto out;
604 }
605
606 if (c == 4 && tx == NULL &&
607 (l & OMAP2_MCSPI_CHCONF_TURBO)) {
608 omap2_mcspi_set_enable(spi, 0);
609 *rx++ = __raw_readl(rx_reg);
610 dev_vdbg(&spi->dev, "read-%d %08x\n",
611 word_len, *(rx - 1));
612 if (mcspi_wait_for_reg_bit(chstat_reg,
613 OMAP2_MCSPI_CHSTAT_RXS) < 0) {
614 dev_err(&spi->dev,
615 "RXS timed out\n");
616 goto out;
617 }
618 c = 0;
619 } else if (c == 0 && tx == NULL) {
620 omap2_mcspi_set_enable(spi, 0);
621 }
622
623 *rx++ = __raw_readl(rx_reg);
624 dev_vdbg(&spi->dev, "read-%d %08x\n",
625 word_len, *(rx - 1));
626 }
627 } while (c >= 4);
628 }
629
630 /* for TX_ONLY mode, be sure all words have shifted out */
631 if (xfer->rx_buf == NULL) {
632 if (mcspi_wait_for_reg_bit(chstat_reg,
633 OMAP2_MCSPI_CHSTAT_TXS) < 0) {
634 dev_err(&spi->dev, "TXS timed out\n");
635 } else if (mcspi_wait_for_reg_bit(chstat_reg,
636 OMAP2_MCSPI_CHSTAT_EOT) < 0)
637 dev_err(&spi->dev, "EOT timed out\n");
638
639 /* disable chan to purge rx datas received in TX_ONLY transfer,
640 * otherwise these rx datas will affect the direct following
641 * RX_ONLY transfer.
642 */
643 omap2_mcspi_set_enable(spi, 0);
644 }
645 out:
646 omap2_mcspi_set_enable(spi, 1);
647 return count - c;
648 }
649
650 static u32 omap2_mcspi_calc_divisor(u32 speed_hz)
651 {
652 u32 div;
653
654 for (div = 0; div < 15; div++)
655 if (speed_hz >= (OMAP2_MCSPI_MAX_FREQ >> div))
656 return div;
657
658 return 15;
659 }
660
661 /* called only when no transfer is active to this device */
662 static int omap2_mcspi_setup_transfer(struct spi_device *spi,
663 struct spi_transfer *t)
664 {
665 struct omap2_mcspi_cs *cs = spi->controller_state;
666 struct omap2_mcspi *mcspi;
667 struct spi_master *spi_cntrl;
668 u32 l = 0, div = 0;
669 u8 word_len = spi->bits_per_word;
670 u32 speed_hz = spi->max_speed_hz;
671
672 mcspi = spi_master_get_devdata(spi->master);
673 spi_cntrl = mcspi->master;
674
675 if (t != NULL && t->bits_per_word)
676 word_len = t->bits_per_word;
677
678 cs->word_len = word_len;
679
680 if (t && t->speed_hz)
681 speed_hz = t->speed_hz;
682
683 speed_hz = min_t(u32, speed_hz, OMAP2_MCSPI_MAX_FREQ);
684 div = omap2_mcspi_calc_divisor(speed_hz);
685
686 l = mcspi_cached_chconf0(spi);
687
688 /* standard 4-wire master mode: SCK, MOSI/out, MISO/in, nCS
689 * REVISIT: this controller could support SPI_3WIRE mode.
690 */
691 l &= ~(OMAP2_MCSPI_CHCONF_IS|OMAP2_MCSPI_CHCONF_DPE1);
692 l |= OMAP2_MCSPI_CHCONF_DPE0;
693
694 /* wordlength */
695 l &= ~OMAP2_MCSPI_CHCONF_WL_MASK;
696 l |= (word_len - 1) << 7;
697
698 /* set chipselect polarity; manage with FORCE */
699 if (!(spi->mode & SPI_CS_HIGH))
700 l |= OMAP2_MCSPI_CHCONF_EPOL; /* active-low; normal */
701 else
702 l &= ~OMAP2_MCSPI_CHCONF_EPOL;
703
704 /* set clock divisor */
705 l &= ~OMAP2_MCSPI_CHCONF_CLKD_MASK;
706 l |= div << 2;
707
708 /* set SPI mode 0..3 */
709 if (spi->mode & SPI_CPOL)
710 l |= OMAP2_MCSPI_CHCONF_POL;
711 else
712 l &= ~OMAP2_MCSPI_CHCONF_POL;
713 if (spi->mode & SPI_CPHA)
714 l |= OMAP2_MCSPI_CHCONF_PHA;
715 else
716 l &= ~OMAP2_MCSPI_CHCONF_PHA;
717
718 mcspi_write_chconf0(spi, l);
719
720 dev_dbg(&spi->dev, "setup: speed %d, sample %s edge, clk %s\n",
721 OMAP2_MCSPI_MAX_FREQ >> div,
722 (spi->mode & SPI_CPHA) ? "trailing" : "leading",
723 (spi->mode & SPI_CPOL) ? "inverted" : "normal");
724
725 return 0;
726 }
727
728 static void omap2_mcspi_dma_rx_callback(int lch, u16 ch_status, void *data)
729 {
730 struct spi_device *spi = data;
731 struct omap2_mcspi *mcspi;
732 struct omap2_mcspi_dma *mcspi_dma;
733
734 mcspi = spi_master_get_devdata(spi->master);
735 mcspi_dma = &(mcspi->dma_channels[spi->chip_select]);
736
737 complete(&mcspi_dma->dma_rx_completion);
738
739 /* We must disable the DMA RX request */
740 omap2_mcspi_set_dma_req(spi, 1, 0);
741 }
742
743 static void omap2_mcspi_dma_tx_callback(int lch, u16 ch_status, void *data)
744 {
745 struct spi_device *spi = data;
746 struct omap2_mcspi *mcspi;
747 struct omap2_mcspi_dma *mcspi_dma;
748
749 mcspi = spi_master_get_devdata(spi->master);
750 mcspi_dma = &(mcspi->dma_channels[spi->chip_select]);
751
752 complete(&mcspi_dma->dma_tx_completion);
753
754 /* We must disable the DMA TX request */
755 omap2_mcspi_set_dma_req(spi, 0, 0);
756 }
757
758 static int omap2_mcspi_request_dma(struct spi_device *spi)
759 {
760 struct spi_master *master = spi->master;
761 struct omap2_mcspi *mcspi;
762 struct omap2_mcspi_dma *mcspi_dma;
763
764 mcspi = spi_master_get_devdata(master);
765 mcspi_dma = mcspi->dma_channels + spi->chip_select;
766
767 if (omap_request_dma(mcspi_dma->dma_rx_sync_dev, "McSPI RX",
768 omap2_mcspi_dma_rx_callback, spi,
769 &mcspi_dma->dma_rx_channel)) {
770 dev_err(&spi->dev, "no RX DMA channel for McSPI\n");
771 return -EAGAIN;
772 }
773
774 if (omap_request_dma(mcspi_dma->dma_tx_sync_dev, "McSPI TX",
775 omap2_mcspi_dma_tx_callback, spi,
776 &mcspi_dma->dma_tx_channel)) {
777 omap_free_dma(mcspi_dma->dma_rx_channel);
778 mcspi_dma->dma_rx_channel = -1;
779 dev_err(&spi->dev, "no TX DMA channel for McSPI\n");
780 return -EAGAIN;
781 }
782
783 init_completion(&mcspi_dma->dma_rx_completion);
784 init_completion(&mcspi_dma->dma_tx_completion);
785
786 return 0;
787 }
788
789 static int omap2_mcspi_setup(struct spi_device *spi)
790 {
791 int ret;
792 struct omap2_mcspi *mcspi = spi_master_get_devdata(spi->master);
793 struct omap2_mcspi_regs *ctx = &mcspi->ctx;
794 struct omap2_mcspi_dma *mcspi_dma;
795 struct omap2_mcspi_cs *cs = spi->controller_state;
796
797 if (spi->bits_per_word < 4 || spi->bits_per_word > 32) {
798 dev_dbg(&spi->dev, "setup: unsupported %d bit words\n",
799 spi->bits_per_word);
800 return -EINVAL;
801 }
802
803 mcspi_dma = &mcspi->dma_channels[spi->chip_select];
804
805 if (!cs) {
806 cs = kzalloc(sizeof *cs, GFP_KERNEL);
807 if (!cs)
808 return -ENOMEM;
809 cs->base = mcspi->base + spi->chip_select * 0x14;
810 cs->phys = mcspi->phys + spi->chip_select * 0x14;
811 cs->chconf0 = 0;
812 spi->controller_state = cs;
813 /* Link this to context save list */
814 list_add_tail(&cs->node, &ctx->cs);
815 }
816
817 if (mcspi_dma->dma_rx_channel == -1
818 || mcspi_dma->dma_tx_channel == -1) {
819 ret = omap2_mcspi_request_dma(spi);
820 if (ret < 0)
821 return ret;
822 }
823
824 ret = omap2_mcspi_enable_clocks(mcspi);
825 if (ret < 0)
826 return ret;
827
828 ret = omap2_mcspi_setup_transfer(spi, NULL);
829 omap2_mcspi_disable_clocks(mcspi);
830
831 return ret;
832 }
833
834 static void omap2_mcspi_cleanup(struct spi_device *spi)
835 {
836 struct omap2_mcspi *mcspi;
837 struct omap2_mcspi_dma *mcspi_dma;
838 struct omap2_mcspi_cs *cs;
839
840 mcspi = spi_master_get_devdata(spi->master);
841
842 if (spi->controller_state) {
843 /* Unlink controller state from context save list */
844 cs = spi->controller_state;
845 list_del(&cs->node);
846
847 kfree(cs);
848 }
849
850 if (spi->chip_select < spi->master->num_chipselect) {
851 mcspi_dma = &mcspi->dma_channels[spi->chip_select];
852
853 if (mcspi_dma->dma_rx_channel != -1) {
854 omap_free_dma(mcspi_dma->dma_rx_channel);
855 mcspi_dma->dma_rx_channel = -1;
856 }
857 if (mcspi_dma->dma_tx_channel != -1) {
858 omap_free_dma(mcspi_dma->dma_tx_channel);
859 mcspi_dma->dma_tx_channel = -1;
860 }
861 }
862 }
863
864 static void omap2_mcspi_work(struct omap2_mcspi *mcspi, struct spi_message *m)
865 {
866
867 /* We only enable one channel at a time -- the one whose message is
868 * -- although this controller would gladly
869 * arbitrate among multiple channels. This corresponds to "single
870 * channel" master mode. As a side effect, we need to manage the
871 * chipselect with the FORCE bit ... CS != channel enable.
872 */
873
874 struct spi_device *spi;
875 struct spi_transfer *t = NULL;
876 int cs_active = 0;
877 struct omap2_mcspi_cs *cs;
878 struct omap2_mcspi_device_config *cd;
879 int par_override = 0;
880 int status = 0;
881 u32 chconf;
882
883 spi = m->spi;
884 cs = spi->controller_state;
885 cd = spi->controller_data;
886
887 omap2_mcspi_set_enable(spi, 1);
888 list_for_each_entry(t, &m->transfers, transfer_list) {
889 if (t->tx_buf == NULL && t->rx_buf == NULL && t->len) {
890 status = -EINVAL;
891 break;
892 }
893 if (par_override || t->speed_hz || t->bits_per_word) {
894 par_override = 1;
895 status = omap2_mcspi_setup_transfer(spi, t);
896 if (status < 0)
897 break;
898 if (!t->speed_hz && !t->bits_per_word)
899 par_override = 0;
900 }
901
902 if (!cs_active) {
903 omap2_mcspi_force_cs(spi, 1);
904 cs_active = 1;
905 }
906
907 chconf = mcspi_cached_chconf0(spi);
908 chconf &= ~OMAP2_MCSPI_CHCONF_TRM_MASK;
909 chconf &= ~OMAP2_MCSPI_CHCONF_TURBO;
910
911 if (t->tx_buf == NULL)
912 chconf |= OMAP2_MCSPI_CHCONF_TRM_RX_ONLY;
913 else if (t->rx_buf == NULL)
914 chconf |= OMAP2_MCSPI_CHCONF_TRM_TX_ONLY;
915
916 if (cd && cd->turbo_mode && t->tx_buf == NULL) {
917 /* Turbo mode is for more than one word */
918 if (t->len > ((cs->word_len + 7) >> 3))
919 chconf |= OMAP2_MCSPI_CHCONF_TURBO;
920 }
921
922 mcspi_write_chconf0(spi, chconf);
923
924 if (t->len) {
925 unsigned count;
926
927 /* RX_ONLY mode needs dummy data in TX reg */
928 if (t->tx_buf == NULL)
929 __raw_writel(0, cs->base
930 + OMAP2_MCSPI_TX0);
931
932 if (m->is_dma_mapped || t->len >= DMA_MIN_BYTES)
933 count = omap2_mcspi_txrx_dma(spi, t);
934 else
935 count = omap2_mcspi_txrx_pio(spi, t);
936 m->actual_length += count;
937
938 if (count != t->len) {
939 status = -EIO;
940 break;
941 }
942 }
943
944 if (t->delay_usecs)
945 udelay(t->delay_usecs);
946
947 /* ignore the "leave it on after last xfer" hint */
948 if (t->cs_change) {
949 omap2_mcspi_force_cs(spi, 0);
950 cs_active = 0;
951 }
952 }
953 /* Restore defaults if they were overriden */
954 if (par_override) {
955 par_override = 0;
956 status = omap2_mcspi_setup_transfer(spi, NULL);
957 }
958
959 if (cs_active)
960 omap2_mcspi_force_cs(spi, 0);
961
962 omap2_mcspi_set_enable(spi, 0);
963
964 m->status = status;
965
966 }
967
968 static int omap2_mcspi_transfer_one_message(struct spi_master *master,
969 struct spi_message *m)
970 {
971 struct omap2_mcspi *mcspi;
972 struct spi_transfer *t;
973
974 mcspi = spi_master_get_devdata(master);
975 m->actual_length = 0;
976 m->status = 0;
977
978 /* reject invalid messages and transfers */
979 if (list_empty(&m->transfers))
980 return -EINVAL;
981 list_for_each_entry(t, &m->transfers, transfer_list) {
982 const void *tx_buf = t->tx_buf;
983 void *rx_buf = t->rx_buf;
984 unsigned len = t->len;
985
986 if (t->speed_hz > OMAP2_MCSPI_MAX_FREQ
987 || (len && !(rx_buf || tx_buf))
988 || (t->bits_per_word &&
989 ( t->bits_per_word < 4
990 || t->bits_per_word > 32))) {
991 dev_dbg(mcspi->dev, "transfer: %d Hz, %d %s%s, %d bpw\n",
992 t->speed_hz,
993 len,
994 tx_buf ? "tx" : "",
995 rx_buf ? "rx" : "",
996 t->bits_per_word);
997 return -EINVAL;
998 }
999 if (t->speed_hz && t->speed_hz < (OMAP2_MCSPI_MAX_FREQ >> 15)) {
1000 dev_dbg(mcspi->dev, "speed_hz %d below minimum %d Hz\n",
1001 t->speed_hz,
1002 OMAP2_MCSPI_MAX_FREQ >> 15);
1003 return -EINVAL;
1004 }
1005
1006 if (m->is_dma_mapped || len < DMA_MIN_BYTES)
1007 continue;
1008
1009 if (tx_buf != NULL) {
1010 t->tx_dma = dma_map_single(mcspi->dev, (void *) tx_buf,
1011 len, DMA_TO_DEVICE);
1012 if (dma_mapping_error(mcspi->dev, t->tx_dma)) {
1013 dev_dbg(mcspi->dev, "dma %cX %d bytes error\n",
1014 'T', len);
1015 return -EINVAL;
1016 }
1017 }
1018 if (rx_buf != NULL) {
1019 t->rx_dma = dma_map_single(mcspi->dev, rx_buf, t->len,
1020 DMA_FROM_DEVICE);
1021 if (dma_mapping_error(mcspi->dev, t->rx_dma)) {
1022 dev_dbg(mcspi->dev, "dma %cX %d bytes error\n",
1023 'R', len);
1024 if (tx_buf != NULL)
1025 dma_unmap_single(mcspi->dev, t->tx_dma,
1026 len, DMA_TO_DEVICE);
1027 return -EINVAL;
1028 }
1029 }
1030 }
1031
1032 omap2_mcspi_work(mcspi, m);
1033 spi_finalize_current_message(master);
1034 return 0;
1035 }
1036
1037 static int __devinit omap2_mcspi_master_setup(struct omap2_mcspi *mcspi)
1038 {
1039 struct spi_master *master = mcspi->master;
1040 struct omap2_mcspi_regs *ctx = &mcspi->ctx;
1041 int ret = 0;
1042
1043 ret = omap2_mcspi_enable_clocks(mcspi);
1044 if (ret < 0)
1045 return ret;
1046
1047 mcspi_write_reg(master, OMAP2_MCSPI_WAKEUPENABLE,
1048 OMAP2_MCSPI_WAKEUPENABLE_WKEN);
1049 ctx->wakeupenable = OMAP2_MCSPI_WAKEUPENABLE_WKEN;
1050
1051 omap2_mcspi_set_master_mode(master);
1052 omap2_mcspi_disable_clocks(mcspi);
1053 return 0;
1054 }
1055
1056 static int omap_mcspi_runtime_resume(struct device *dev)
1057 {
1058 struct omap2_mcspi *mcspi;
1059 struct spi_master *master;
1060
1061 master = dev_get_drvdata(dev);
1062 mcspi = spi_master_get_devdata(master);
1063 omap2_mcspi_restore_ctx(mcspi);
1064
1065 return 0;
1066 }
1067
1068 static struct omap2_mcspi_platform_config omap2_pdata = {
1069 .regs_offset = 0,
1070 };
1071
1072 static struct omap2_mcspi_platform_config omap4_pdata = {
1073 .regs_offset = OMAP4_MCSPI_REG_OFFSET,
1074 };
1075
1076 static const struct of_device_id omap_mcspi_of_match[] = {
1077 {
1078 .compatible = "ti,omap2-mcspi",
1079 .data = &omap2_pdata,
1080 },
1081 {
1082 .compatible = "ti,omap4-mcspi",
1083 .data = &omap4_pdata,
1084 },
1085 { },
1086 };
1087 MODULE_DEVICE_TABLE(of, omap_mcspi_of_match);
1088
1089 static int __devinit omap2_mcspi_probe(struct platform_device *pdev)
1090 {
1091 struct spi_master *master;
1092 struct omap2_mcspi_platform_config *pdata;
1093 struct omap2_mcspi *mcspi;
1094 struct resource *r;
1095 int status = 0, i;
1096 u32 regs_offset = 0;
1097 static int bus_num = 1;
1098 struct device_node *node = pdev->dev.of_node;
1099 const struct of_device_id *match;
1100
1101 master = spi_alloc_master(&pdev->dev, sizeof *mcspi);
1102 if (master == NULL) {
1103 dev_dbg(&pdev->dev, "master allocation failed\n");
1104 return -ENOMEM;
1105 }
1106
1107 /* the spi->mode bits understood by this driver: */
1108 master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
1109
1110 master->setup = omap2_mcspi_setup;
1111 master->prepare_transfer_hardware = omap2_prepare_transfer;
1112 master->unprepare_transfer_hardware = omap2_unprepare_transfer;
1113 master->transfer_one_message = omap2_mcspi_transfer_one_message;
1114 master->cleanup = omap2_mcspi_cleanup;
1115 master->dev.of_node = node;
1116
1117 match = of_match_device(omap_mcspi_of_match, &pdev->dev);
1118 if (match) {
1119 u32 num_cs = 1; /* default number of chipselect */
1120 pdata = match->data;
1121
1122 of_property_read_u32(node, "ti,spi-num-cs", &num_cs);
1123 master->num_chipselect = num_cs;
1124 master->bus_num = bus_num++;
1125 } else {
1126 pdata = pdev->dev.platform_data;
1127 master->num_chipselect = pdata->num_cs;
1128 if (pdev->id != -1)
1129 master->bus_num = pdev->id;
1130 }
1131 regs_offset = pdata->regs_offset;
1132
1133 dev_set_drvdata(&pdev->dev, master);
1134
1135 mcspi = spi_master_get_devdata(master);
1136 mcspi->master = master;
1137
1138 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1139 if (r == NULL) {
1140 status = -ENODEV;
1141 goto free_master;
1142 }
1143
1144 r->start += regs_offset;
1145 r->end += regs_offset;
1146 mcspi->phys = r->start;
1147
1148 mcspi->base = devm_request_and_ioremap(&pdev->dev, r);
1149 if (!mcspi->base) {
1150 dev_dbg(&pdev->dev, "can't ioremap MCSPI\n");
1151 status = -ENOMEM;
1152 goto free_master;
1153 }
1154
1155 mcspi->dev = &pdev->dev;
1156
1157 INIT_LIST_HEAD(&mcspi->ctx.cs);
1158
1159 mcspi->dma_channels = kcalloc(master->num_chipselect,
1160 sizeof(struct omap2_mcspi_dma),
1161 GFP_KERNEL);
1162
1163 if (mcspi->dma_channels == NULL)
1164 goto free_master;
1165
1166 for (i = 0; i < master->num_chipselect; i++) {
1167 char dma_ch_name[14];
1168 struct resource *dma_res;
1169
1170 sprintf(dma_ch_name, "rx%d", i);
1171 dma_res = platform_get_resource_byname(pdev, IORESOURCE_DMA,
1172 dma_ch_name);
1173 if (!dma_res) {
1174 dev_dbg(&pdev->dev, "cannot get DMA RX channel\n");
1175 status = -ENODEV;
1176 break;
1177 }
1178
1179 mcspi->dma_channels[i].dma_rx_channel = -1;
1180 mcspi->dma_channels[i].dma_rx_sync_dev = dma_res->start;
1181 sprintf(dma_ch_name, "tx%d", i);
1182 dma_res = platform_get_resource_byname(pdev, IORESOURCE_DMA,
1183 dma_ch_name);
1184 if (!dma_res) {
1185 dev_dbg(&pdev->dev, "cannot get DMA TX channel\n");
1186 status = -ENODEV;
1187 break;
1188 }
1189
1190 mcspi->dma_channels[i].dma_tx_channel = -1;
1191 mcspi->dma_channels[i].dma_tx_sync_dev = dma_res->start;
1192 }
1193
1194 if (status < 0)
1195 goto dma_chnl_free;
1196
1197 pm_runtime_use_autosuspend(&pdev->dev);
1198 pm_runtime_set_autosuspend_delay(&pdev->dev, SPI_AUTOSUSPEND_TIMEOUT);
1199 pm_runtime_enable(&pdev->dev);
1200
1201 if (status || omap2_mcspi_master_setup(mcspi) < 0)
1202 goto disable_pm;
1203
1204 status = spi_register_master(master);
1205 if (status < 0)
1206 goto err_spi_register;
1207
1208 return status;
1209
1210 err_spi_register:
1211 spi_master_put(master);
1212 disable_pm:
1213 pm_runtime_disable(&pdev->dev);
1214 dma_chnl_free:
1215 kfree(mcspi->dma_channels);
1216 free_master:
1217 kfree(master);
1218 platform_set_drvdata(pdev, NULL);
1219 return status;
1220 }
1221
1222 static int __devexit omap2_mcspi_remove(struct platform_device *pdev)
1223 {
1224 struct spi_master *master;
1225 struct omap2_mcspi *mcspi;
1226 struct omap2_mcspi_dma *dma_channels;
1227
1228 master = dev_get_drvdata(&pdev->dev);
1229 mcspi = spi_master_get_devdata(master);
1230 dma_channels = mcspi->dma_channels;
1231
1232 omap2_mcspi_disable_clocks(mcspi);
1233 pm_runtime_disable(&pdev->dev);
1234
1235 spi_unregister_master(master);
1236 kfree(dma_channels);
1237 platform_set_drvdata(pdev, NULL);
1238
1239 return 0;
1240 }
1241
1242 /* work with hotplug and coldplug */
1243 MODULE_ALIAS("platform:omap2_mcspi");
1244
1245 #ifdef CONFIG_SUSPEND
1246 /*
1247 * When SPI wake up from off-mode, CS is in activate state. If it was in
1248 * unactive state when driver was suspend, then force it to unactive state at
1249 * wake up.
1250 */
1251 static int omap2_mcspi_resume(struct device *dev)
1252 {
1253 struct spi_master *master = dev_get_drvdata(dev);
1254 struct omap2_mcspi *mcspi = spi_master_get_devdata(master);
1255 struct omap2_mcspi_regs *ctx = &mcspi->ctx;
1256 struct omap2_mcspi_cs *cs;
1257
1258 omap2_mcspi_enable_clocks(mcspi);
1259 list_for_each_entry(cs, &ctx->cs, node) {
1260 if ((cs->chconf0 & OMAP2_MCSPI_CHCONF_FORCE) == 0) {
1261 /*
1262 * We need to toggle CS state for OMAP take this
1263 * change in account.
1264 */
1265 MOD_REG_BIT(cs->chconf0, OMAP2_MCSPI_CHCONF_FORCE, 1);
1266 __raw_writel(cs->chconf0, cs->base + OMAP2_MCSPI_CHCONF0);
1267 MOD_REG_BIT(cs->chconf0, OMAP2_MCSPI_CHCONF_FORCE, 0);
1268 __raw_writel(cs->chconf0, cs->base + OMAP2_MCSPI_CHCONF0);
1269 }
1270 }
1271 omap2_mcspi_disable_clocks(mcspi);
1272 return 0;
1273 }
1274 #else
1275 #define omap2_mcspi_resume NULL
1276 #endif
1277
1278 static const struct dev_pm_ops omap2_mcspi_pm_ops = {
1279 .resume = omap2_mcspi_resume,
1280 .runtime_resume = omap_mcspi_runtime_resume,
1281 };
1282
1283 static struct platform_driver omap2_mcspi_driver = {
1284 .driver = {
1285 .name = "omap2_mcspi",
1286 .owner = THIS_MODULE,
1287 .pm = &omap2_mcspi_pm_ops,
1288 .of_match_table = omap_mcspi_of_match,
1289 },
1290 .probe = omap2_mcspi_probe,
1291 .remove = __devexit_p(omap2_mcspi_remove),
1292 };
1293
1294 module_platform_driver(omap2_mcspi_driver);
1295 MODULE_LICENSE("GPL");
This page took 0.067389 seconds and 6 git commands to generate.