spi/s3c64xx: Prevent unnecessary map-unmap
[deliverable/linux.git] / drivers / spi / spi_s3c64xx.c
CommitLineData
230d42d4
JB
1/* linux/drivers/spi/spi_s3c64xx.c
2 *
3 * Copyright (C) 2009 Samsung Electronics Ltd.
4 * Jaswinder Singh <jassi.brar@samsung.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 */
20
21#include <linux/init.h>
22#include <linux/module.h>
23#include <linux/workqueue.h>
24#include <linux/delay.h>
25#include <linux/clk.h>
26#include <linux/dma-mapping.h>
27#include <linux/platform_device.h>
28#include <linux/spi/spi.h>
29
30#include <mach/dma.h>
e6b873c9 31#include <plat/s3c64xx-spi.h>
230d42d4
JB
32
33/* Registers and bit-fields */
34
35#define S3C64XX_SPI_CH_CFG 0x00
36#define S3C64XX_SPI_CLK_CFG 0x04
37#define S3C64XX_SPI_MODE_CFG 0x08
38#define S3C64XX_SPI_SLAVE_SEL 0x0C
39#define S3C64XX_SPI_INT_EN 0x10
40#define S3C64XX_SPI_STATUS 0x14
41#define S3C64XX_SPI_TX_DATA 0x18
42#define S3C64XX_SPI_RX_DATA 0x1C
43#define S3C64XX_SPI_PACKET_CNT 0x20
44#define S3C64XX_SPI_PENDING_CLR 0x24
45#define S3C64XX_SPI_SWAP_CFG 0x28
46#define S3C64XX_SPI_FB_CLK 0x2C
47
48#define S3C64XX_SPI_CH_HS_EN (1<<6) /* High Speed Enable */
49#define S3C64XX_SPI_CH_SW_RST (1<<5)
50#define S3C64XX_SPI_CH_SLAVE (1<<4)
51#define S3C64XX_SPI_CPOL_L (1<<3)
52#define S3C64XX_SPI_CPHA_B (1<<2)
53#define S3C64XX_SPI_CH_RXCH_ON (1<<1)
54#define S3C64XX_SPI_CH_TXCH_ON (1<<0)
55
56#define S3C64XX_SPI_CLKSEL_SRCMSK (3<<9)
57#define S3C64XX_SPI_CLKSEL_SRCSHFT 9
58#define S3C64XX_SPI_ENCLK_ENABLE (1<<8)
59#define S3C64XX_SPI_PSR_MASK 0xff
60
61#define S3C64XX_SPI_MODE_CH_TSZ_BYTE (0<<29)
62#define S3C64XX_SPI_MODE_CH_TSZ_HALFWORD (1<<29)
63#define S3C64XX_SPI_MODE_CH_TSZ_WORD (2<<29)
64#define S3C64XX_SPI_MODE_CH_TSZ_MASK (3<<29)
65#define S3C64XX_SPI_MODE_BUS_TSZ_BYTE (0<<17)
66#define S3C64XX_SPI_MODE_BUS_TSZ_HALFWORD (1<<17)
67#define S3C64XX_SPI_MODE_BUS_TSZ_WORD (2<<17)
68#define S3C64XX_SPI_MODE_BUS_TSZ_MASK (3<<17)
69#define S3C64XX_SPI_MODE_RXDMA_ON (1<<2)
70#define S3C64XX_SPI_MODE_TXDMA_ON (1<<1)
71#define S3C64XX_SPI_MODE_4BURST (1<<0)
72
73#define S3C64XX_SPI_SLAVE_AUTO (1<<1)
74#define S3C64XX_SPI_SLAVE_SIG_INACT (1<<0)
75
76#define S3C64XX_SPI_ACT(c) writel(0, (c)->regs + S3C64XX_SPI_SLAVE_SEL)
77
78#define S3C64XX_SPI_DEACT(c) writel(S3C64XX_SPI_SLAVE_SIG_INACT, \
79 (c)->regs + S3C64XX_SPI_SLAVE_SEL)
80
81#define S3C64XX_SPI_INT_TRAILING_EN (1<<6)
82#define S3C64XX_SPI_INT_RX_OVERRUN_EN (1<<5)
83#define S3C64XX_SPI_INT_RX_UNDERRUN_EN (1<<4)
84#define S3C64XX_SPI_INT_TX_OVERRUN_EN (1<<3)
85#define S3C64XX_SPI_INT_TX_UNDERRUN_EN (1<<2)
86#define S3C64XX_SPI_INT_RX_FIFORDY_EN (1<<1)
87#define S3C64XX_SPI_INT_TX_FIFORDY_EN (1<<0)
88
89#define S3C64XX_SPI_ST_RX_OVERRUN_ERR (1<<5)
90#define S3C64XX_SPI_ST_RX_UNDERRUN_ERR (1<<4)
91#define S3C64XX_SPI_ST_TX_OVERRUN_ERR (1<<3)
92#define S3C64XX_SPI_ST_TX_UNDERRUN_ERR (1<<2)
93#define S3C64XX_SPI_ST_RX_FIFORDY (1<<1)
94#define S3C64XX_SPI_ST_TX_FIFORDY (1<<0)
95
96#define S3C64XX_SPI_PACKET_CNT_EN (1<<16)
97
98#define S3C64XX_SPI_PND_TX_UNDERRUN_CLR (1<<4)
99#define S3C64XX_SPI_PND_TX_OVERRUN_CLR (1<<3)
100#define S3C64XX_SPI_PND_RX_UNDERRUN_CLR (1<<2)
101#define S3C64XX_SPI_PND_RX_OVERRUN_CLR (1<<1)
102#define S3C64XX_SPI_PND_TRAILING_CLR (1<<0)
103
104#define S3C64XX_SPI_SWAP_RX_HALF_WORD (1<<7)
105#define S3C64XX_SPI_SWAP_RX_BYTE (1<<6)
106#define S3C64XX_SPI_SWAP_RX_BIT (1<<5)
107#define S3C64XX_SPI_SWAP_RX_EN (1<<4)
108#define S3C64XX_SPI_SWAP_TX_HALF_WORD (1<<3)
109#define S3C64XX_SPI_SWAP_TX_BYTE (1<<2)
110#define S3C64XX_SPI_SWAP_TX_BIT (1<<1)
111#define S3C64XX_SPI_SWAP_TX_EN (1<<0)
112
113#define S3C64XX_SPI_FBCLK_MSK (3<<0)
114
115#define S3C64XX_SPI_ST_TRLCNTZ(v, i) ((((v) >> (i)->rx_lvl_offset) & \
116 (((i)->fifo_lvl_mask + 1))) \
117 ? 1 : 0)
118
119#define S3C64XX_SPI_ST_TX_DONE(v, i) ((((v) >> (i)->rx_lvl_offset) & \
120 (((i)->fifo_lvl_mask + 1) << 1)) \
121 ? 1 : 0)
122#define TX_FIFO_LVL(v, i) (((v) >> 6) & (i)->fifo_lvl_mask)
123#define RX_FIFO_LVL(v, i) (((v) >> (i)->rx_lvl_offset) & (i)->fifo_lvl_mask)
124
125#define S3C64XX_SPI_MAX_TRAILCNT 0x3ff
126#define S3C64XX_SPI_TRAILCNT_OFF 19
127
128#define S3C64XX_SPI_TRAILCNT S3C64XX_SPI_MAX_TRAILCNT
129
130#define msecs_to_loops(t) (loops_per_jiffy / 1000 * HZ * t)
131
132#define SUSPND (1<<0)
133#define SPIBUSY (1<<1)
134#define RXBUSY (1<<2)
135#define TXBUSY (1<<3)
136
137/**
138 * struct s3c64xx_spi_driver_data - Runtime info holder for SPI driver.
139 * @clk: Pointer to the spi clock.
b0d5d6e5 140 * @src_clk: Pointer to the clock used to generate SPI signals.
230d42d4
JB
141 * @master: Pointer to the SPI Protocol master.
142 * @workqueue: Work queue for the SPI xfer requests.
143 * @cntrlr_info: Platform specific data for the controller this driver manages.
144 * @tgl_spi: Pointer to the last CS left untoggled by the cs_change hint.
145 * @work: Work
146 * @queue: To log SPI xfer requests.
147 * @lock: Controller specific lock.
148 * @state: Set of FLAGS to indicate status.
149 * @rx_dmach: Controller's DMA channel for Rx.
150 * @tx_dmach: Controller's DMA channel for Tx.
151 * @sfr_start: BUS address of SPI controller regs.
152 * @regs: Pointer to ioremap'ed controller registers.
153 * @xfer_completion: To indicate completion of xfer task.
154 * @cur_mode: Stores the active configuration of the controller.
155 * @cur_bpw: Stores the active bits per word settings.
156 * @cur_speed: Stores the active xfer clock speed.
157 */
158struct s3c64xx_spi_driver_data {
159 void __iomem *regs;
160 struct clk *clk;
b0d5d6e5 161 struct clk *src_clk;
230d42d4
JB
162 struct platform_device *pdev;
163 struct spi_master *master;
164 struct workqueue_struct *workqueue;
ad7de729 165 struct s3c64xx_spi_info *cntrlr_info;
230d42d4
JB
166 struct spi_device *tgl_spi;
167 struct work_struct work;
168 struct list_head queue;
169 spinlock_t lock;
170 enum dma_ch rx_dmach;
171 enum dma_ch tx_dmach;
172 unsigned long sfr_start;
173 struct completion xfer_completion;
174 unsigned state;
175 unsigned cur_mode, cur_bpw;
176 unsigned cur_speed;
177};
178
179static struct s3c2410_dma_client s3c64xx_spi_dma_client = {
180 .name = "samsung-spi-dma",
181};
182
183static void flush_fifo(struct s3c64xx_spi_driver_data *sdd)
184{
ad7de729 185 struct s3c64xx_spi_info *sci = sdd->cntrlr_info;
230d42d4
JB
186 void __iomem *regs = sdd->regs;
187 unsigned long loops;
188 u32 val;
189
190 writel(0, regs + S3C64XX_SPI_PACKET_CNT);
191
192 val = readl(regs + S3C64XX_SPI_CH_CFG);
193 val |= S3C64XX_SPI_CH_SW_RST;
194 val &= ~S3C64XX_SPI_CH_HS_EN;
195 writel(val, regs + S3C64XX_SPI_CH_CFG);
196
197 /* Flush TxFIFO*/
198 loops = msecs_to_loops(1);
199 do {
200 val = readl(regs + S3C64XX_SPI_STATUS);
201 } while (TX_FIFO_LVL(val, sci) && loops--);
202
be7852a8
MB
203 if (loops == 0)
204 dev_warn(&sdd->pdev->dev, "Timed out flushing TX FIFO\n");
205
230d42d4
JB
206 /* Flush RxFIFO*/
207 loops = msecs_to_loops(1);
208 do {
209 val = readl(regs + S3C64XX_SPI_STATUS);
210 if (RX_FIFO_LVL(val, sci))
211 readl(regs + S3C64XX_SPI_RX_DATA);
212 else
213 break;
214 } while (loops--);
215
be7852a8
MB
216 if (loops == 0)
217 dev_warn(&sdd->pdev->dev, "Timed out flushing RX FIFO\n");
218
230d42d4
JB
219 val = readl(regs + S3C64XX_SPI_CH_CFG);
220 val &= ~S3C64XX_SPI_CH_SW_RST;
221 writel(val, regs + S3C64XX_SPI_CH_CFG);
222
223 val = readl(regs + S3C64XX_SPI_MODE_CFG);
224 val &= ~(S3C64XX_SPI_MODE_TXDMA_ON | S3C64XX_SPI_MODE_RXDMA_ON);
225 writel(val, regs + S3C64XX_SPI_MODE_CFG);
226
227 val = readl(regs + S3C64XX_SPI_CH_CFG);
228 val &= ~(S3C64XX_SPI_CH_RXCH_ON | S3C64XX_SPI_CH_TXCH_ON);
229 writel(val, regs + S3C64XX_SPI_CH_CFG);
230}
231
232static void enable_datapath(struct s3c64xx_spi_driver_data *sdd,
233 struct spi_device *spi,
234 struct spi_transfer *xfer, int dma_mode)
235{
ad7de729 236 struct s3c64xx_spi_info *sci = sdd->cntrlr_info;
230d42d4
JB
237 void __iomem *regs = sdd->regs;
238 u32 modecfg, chcfg;
239
240 modecfg = readl(regs + S3C64XX_SPI_MODE_CFG);
241 modecfg &= ~(S3C64XX_SPI_MODE_TXDMA_ON | S3C64XX_SPI_MODE_RXDMA_ON);
242
243 chcfg = readl(regs + S3C64XX_SPI_CH_CFG);
244 chcfg &= ~S3C64XX_SPI_CH_TXCH_ON;
245
246 if (dma_mode) {
247 chcfg &= ~S3C64XX_SPI_CH_RXCH_ON;
248 } else {
249 /* Always shift in data in FIFO, even if xfer is Tx only,
250 * this helps setting PCKT_CNT value for generating clocks
251 * as exactly needed.
252 */
253 chcfg |= S3C64XX_SPI_CH_RXCH_ON;
254 writel(((xfer->len * 8 / sdd->cur_bpw) & 0xffff)
255 | S3C64XX_SPI_PACKET_CNT_EN,
256 regs + S3C64XX_SPI_PACKET_CNT);
257 }
258
259 if (xfer->tx_buf != NULL) {
260 sdd->state |= TXBUSY;
261 chcfg |= S3C64XX_SPI_CH_TXCH_ON;
262 if (dma_mode) {
263 modecfg |= S3C64XX_SPI_MODE_TXDMA_ON;
264 s3c2410_dma_config(sdd->tx_dmach, 1);
265 s3c2410_dma_enqueue(sdd->tx_dmach, (void *)sdd,
266 xfer->tx_dma, xfer->len);
267 s3c2410_dma_ctrl(sdd->tx_dmach, S3C2410_DMAOP_START);
268 } else {
269 unsigned char *buf = (unsigned char *) xfer->tx_buf;
270 int i = 0;
271 while (i < xfer->len)
272 writeb(buf[i++], regs + S3C64XX_SPI_TX_DATA);
273 }
274 }
275
276 if (xfer->rx_buf != NULL) {
277 sdd->state |= RXBUSY;
278
279 if (sci->high_speed && sdd->cur_speed >= 30000000UL
280 && !(sdd->cur_mode & SPI_CPHA))
281 chcfg |= S3C64XX_SPI_CH_HS_EN;
282
283 if (dma_mode) {
284 modecfg |= S3C64XX_SPI_MODE_RXDMA_ON;
285 chcfg |= S3C64XX_SPI_CH_RXCH_ON;
286 writel(((xfer->len * 8 / sdd->cur_bpw) & 0xffff)
287 | S3C64XX_SPI_PACKET_CNT_EN,
288 regs + S3C64XX_SPI_PACKET_CNT);
289 s3c2410_dma_config(sdd->rx_dmach, 1);
290 s3c2410_dma_enqueue(sdd->rx_dmach, (void *)sdd,
291 xfer->rx_dma, xfer->len);
292 s3c2410_dma_ctrl(sdd->rx_dmach, S3C2410_DMAOP_START);
293 }
294 }
295
296 writel(modecfg, regs + S3C64XX_SPI_MODE_CFG);
297 writel(chcfg, regs + S3C64XX_SPI_CH_CFG);
298}
299
300static inline void enable_cs(struct s3c64xx_spi_driver_data *sdd,
301 struct spi_device *spi)
302{
303 struct s3c64xx_spi_csinfo *cs;
304
305 if (sdd->tgl_spi != NULL) { /* If last device toggled after mssg */
306 if (sdd->tgl_spi != spi) { /* if last mssg on diff device */
307 /* Deselect the last toggled device */
308 cs = sdd->tgl_spi->controller_data;
fa0fcde6
JB
309 cs->set_level(cs->line,
310 spi->mode & SPI_CS_HIGH ? 0 : 1);
230d42d4
JB
311 }
312 sdd->tgl_spi = NULL;
313 }
314
315 cs = spi->controller_data;
fa0fcde6 316 cs->set_level(cs->line, spi->mode & SPI_CS_HIGH ? 1 : 0);
230d42d4
JB
317}
318
319static int wait_for_xfer(struct s3c64xx_spi_driver_data *sdd,
320 struct spi_transfer *xfer, int dma_mode)
321{
ad7de729 322 struct s3c64xx_spi_info *sci = sdd->cntrlr_info;
230d42d4
JB
323 void __iomem *regs = sdd->regs;
324 unsigned long val;
325 int ms;
326
327 /* millisecs to xfer 'len' bytes @ 'cur_speed' */
328 ms = xfer->len * 8 * 1000 / sdd->cur_speed;
9d8f86b5 329 ms += 10; /* some tolerance */
230d42d4
JB
330
331 if (dma_mode) {
332 val = msecs_to_jiffies(ms) + 10;
333 val = wait_for_completion_timeout(&sdd->xfer_completion, val);
334 } else {
c3f139b6 335 u32 status;
230d42d4
JB
336 val = msecs_to_loops(ms);
337 do {
c3f139b6
JB
338 status = readl(regs + S3C64XX_SPI_STATUS);
339 } while (RX_FIFO_LVL(status, sci) < xfer->len && --val);
230d42d4
JB
340 }
341
342 if (!val)
343 return -EIO;
344
345 if (dma_mode) {
346 u32 status;
347
348 /*
349 * DmaTx returns after simply writing data in the FIFO,
350 * w/o waiting for real transmission on the bus to finish.
351 * DmaRx returns only after Dma read data from FIFO which
352 * needs bus transmission to finish, so we don't worry if
353 * Xfer involved Rx(with or without Tx).
354 */
355 if (xfer->rx_buf == NULL) {
356 val = msecs_to_loops(10);
357 status = readl(regs + S3C64XX_SPI_STATUS);
358 while ((TX_FIFO_LVL(status, sci)
359 || !S3C64XX_SPI_ST_TX_DONE(status, sci))
360 && --val) {
361 cpu_relax();
362 status = readl(regs + S3C64XX_SPI_STATUS);
363 }
364
365 if (!val)
366 return -EIO;
367 }
368 } else {
369 unsigned char *buf;
370 int i;
371
372 /* If it was only Tx */
373 if (xfer->rx_buf == NULL) {
374 sdd->state &= ~TXBUSY;
375 return 0;
376 }
377
378 i = 0;
379 buf = xfer->rx_buf;
380 while (i < xfer->len)
381 buf[i++] = readb(regs + S3C64XX_SPI_RX_DATA);
382
383 sdd->state &= ~RXBUSY;
384 }
385
386 return 0;
387}
388
389static inline void disable_cs(struct s3c64xx_spi_driver_data *sdd,
390 struct spi_device *spi)
391{
392 struct s3c64xx_spi_csinfo *cs = spi->controller_data;
393
394 if (sdd->tgl_spi == spi)
395 sdd->tgl_spi = NULL;
396
fa0fcde6 397 cs->set_level(cs->line, spi->mode & SPI_CS_HIGH ? 0 : 1);
230d42d4
JB
398}
399
400static void s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd)
401{
230d42d4
JB
402 void __iomem *regs = sdd->regs;
403 u32 val;
404
405 /* Disable Clock */
406 val = readl(regs + S3C64XX_SPI_CLK_CFG);
407 val &= ~S3C64XX_SPI_ENCLK_ENABLE;
408 writel(val, regs + S3C64XX_SPI_CLK_CFG);
409
410 /* Set Polarity and Phase */
411 val = readl(regs + S3C64XX_SPI_CH_CFG);
412 val &= ~(S3C64XX_SPI_CH_SLAVE |
413 S3C64XX_SPI_CPOL_L |
414 S3C64XX_SPI_CPHA_B);
415
416 if (sdd->cur_mode & SPI_CPOL)
417 val |= S3C64XX_SPI_CPOL_L;
418
419 if (sdd->cur_mode & SPI_CPHA)
420 val |= S3C64XX_SPI_CPHA_B;
421
422 writel(val, regs + S3C64XX_SPI_CH_CFG);
423
424 /* Set Channel & DMA Mode */
425 val = readl(regs + S3C64XX_SPI_MODE_CFG);
426 val &= ~(S3C64XX_SPI_MODE_BUS_TSZ_MASK
427 | S3C64XX_SPI_MODE_CH_TSZ_MASK);
428
429 switch (sdd->cur_bpw) {
430 case 32:
431 val |= S3C64XX_SPI_MODE_BUS_TSZ_WORD;
432 break;
433 case 16:
434 val |= S3C64XX_SPI_MODE_BUS_TSZ_HALFWORD;
435 break;
436 default:
437 val |= S3C64XX_SPI_MODE_BUS_TSZ_BYTE;
438 break;
439 }
440 val |= S3C64XX_SPI_MODE_CH_TSZ_BYTE; /* Always 8bits wide */
441
442 writel(val, regs + S3C64XX_SPI_MODE_CFG);
443
444 /* Configure Clock */
445 val = readl(regs + S3C64XX_SPI_CLK_CFG);
446 val &= ~S3C64XX_SPI_PSR_MASK;
b0d5d6e5 447 val |= ((clk_get_rate(sdd->src_clk) / sdd->cur_speed / 2 - 1)
230d42d4
JB
448 & S3C64XX_SPI_PSR_MASK);
449 writel(val, regs + S3C64XX_SPI_CLK_CFG);
450
451 /* Enable Clock */
452 val = readl(regs + S3C64XX_SPI_CLK_CFG);
453 val |= S3C64XX_SPI_ENCLK_ENABLE;
454 writel(val, regs + S3C64XX_SPI_CLK_CFG);
455}
456
8944f4f3
MB
457static void s3c64xx_spi_dma_rxcb(struct s3c2410_dma_chan *chan, void *buf_id,
458 int size, enum s3c2410_dma_buffresult res)
230d42d4
JB
459{
460 struct s3c64xx_spi_driver_data *sdd = buf_id;
461 unsigned long flags;
462
463 spin_lock_irqsave(&sdd->lock, flags);
464
465 if (res == S3C2410_RES_OK)
466 sdd->state &= ~RXBUSY;
467 else
468 dev_err(&sdd->pdev->dev, "DmaAbrtRx-%d\n", size);
469
470 /* If the other done */
471 if (!(sdd->state & TXBUSY))
472 complete(&sdd->xfer_completion);
473
474 spin_unlock_irqrestore(&sdd->lock, flags);
475}
476
8944f4f3
MB
477static void s3c64xx_spi_dma_txcb(struct s3c2410_dma_chan *chan, void *buf_id,
478 int size, enum s3c2410_dma_buffresult res)
230d42d4
JB
479{
480 struct s3c64xx_spi_driver_data *sdd = buf_id;
481 unsigned long flags;
482
483 spin_lock_irqsave(&sdd->lock, flags);
484
485 if (res == S3C2410_RES_OK)
486 sdd->state &= ~TXBUSY;
487 else
488 dev_err(&sdd->pdev->dev, "DmaAbrtTx-%d \n", size);
489
490 /* If the other done */
491 if (!(sdd->state & RXBUSY))
492 complete(&sdd->xfer_completion);
493
494 spin_unlock_irqrestore(&sdd->lock, flags);
495}
496
497#define XFER_DMAADDR_INVALID DMA_BIT_MASK(32)
498
499static int s3c64xx_spi_map_mssg(struct s3c64xx_spi_driver_data *sdd,
500 struct spi_message *msg)
501{
e02ddd44 502 struct s3c64xx_spi_info *sci = sdd->cntrlr_info;
230d42d4
JB
503 struct device *dev = &sdd->pdev->dev;
504 struct spi_transfer *xfer;
505
506 if (msg->is_dma_mapped)
507 return 0;
508
509 /* First mark all xfer unmapped */
510 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
511 xfer->rx_dma = XFER_DMAADDR_INVALID;
512 xfer->tx_dma = XFER_DMAADDR_INVALID;
513 }
514
515 /* Map until end or first fail */
516 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
517
e02ddd44
JB
518 if (xfer->len <= ((sci->fifo_lvl_mask >> 1) + 1))
519 continue;
520
230d42d4 521 if (xfer->tx_buf != NULL) {
251ee478
JB
522 xfer->tx_dma = dma_map_single(dev,
523 (void *)xfer->tx_buf, xfer->len,
524 DMA_TO_DEVICE);
230d42d4
JB
525 if (dma_mapping_error(dev, xfer->tx_dma)) {
526 dev_err(dev, "dma_map_single Tx failed\n");
527 xfer->tx_dma = XFER_DMAADDR_INVALID;
528 return -ENOMEM;
529 }
530 }
531
532 if (xfer->rx_buf != NULL) {
533 xfer->rx_dma = dma_map_single(dev, xfer->rx_buf,
534 xfer->len, DMA_FROM_DEVICE);
535 if (dma_mapping_error(dev, xfer->rx_dma)) {
536 dev_err(dev, "dma_map_single Rx failed\n");
537 dma_unmap_single(dev, xfer->tx_dma,
538 xfer->len, DMA_TO_DEVICE);
539 xfer->tx_dma = XFER_DMAADDR_INVALID;
540 xfer->rx_dma = XFER_DMAADDR_INVALID;
541 return -ENOMEM;
542 }
543 }
544 }
545
546 return 0;
547}
548
549static void s3c64xx_spi_unmap_mssg(struct s3c64xx_spi_driver_data *sdd,
550 struct spi_message *msg)
551{
e02ddd44 552 struct s3c64xx_spi_info *sci = sdd->cntrlr_info;
230d42d4
JB
553 struct device *dev = &sdd->pdev->dev;
554 struct spi_transfer *xfer;
555
556 if (msg->is_dma_mapped)
557 return;
558
559 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
560
e02ddd44
JB
561 if (xfer->len <= ((sci->fifo_lvl_mask >> 1) + 1))
562 continue;
563
230d42d4
JB
564 if (xfer->rx_buf != NULL
565 && xfer->rx_dma != XFER_DMAADDR_INVALID)
566 dma_unmap_single(dev, xfer->rx_dma,
567 xfer->len, DMA_FROM_DEVICE);
568
569 if (xfer->tx_buf != NULL
570 && xfer->tx_dma != XFER_DMAADDR_INVALID)
571 dma_unmap_single(dev, xfer->tx_dma,
572 xfer->len, DMA_TO_DEVICE);
573 }
574}
575
576static void handle_msg(struct s3c64xx_spi_driver_data *sdd,
577 struct spi_message *msg)
578{
ad7de729 579 struct s3c64xx_spi_info *sci = sdd->cntrlr_info;
230d42d4
JB
580 struct spi_device *spi = msg->spi;
581 struct s3c64xx_spi_csinfo *cs = spi->controller_data;
582 struct spi_transfer *xfer;
583 int status = 0, cs_toggle = 0;
584 u32 speed;
585 u8 bpw;
586
587 /* If Master's(controller) state differs from that needed by Slave */
588 if (sdd->cur_speed != spi->max_speed_hz
589 || sdd->cur_mode != spi->mode
590 || sdd->cur_bpw != spi->bits_per_word) {
591 sdd->cur_bpw = spi->bits_per_word;
592 sdd->cur_speed = spi->max_speed_hz;
593 sdd->cur_mode = spi->mode;
594 s3c64xx_spi_config(sdd);
595 }
596
597 /* Map all the transfers if needed */
598 if (s3c64xx_spi_map_mssg(sdd, msg)) {
599 dev_err(&spi->dev,
600 "Xfer: Unable to map message buffers!\n");
601 status = -ENOMEM;
602 goto out;
603 }
604
605 /* Configure feedback delay */
606 writel(cs->fb_delay & 0x3, sdd->regs + S3C64XX_SPI_FB_CLK);
607
608 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
609
610 unsigned long flags;
611 int use_dma;
612
613 INIT_COMPLETION(sdd->xfer_completion);
614
615 /* Only BPW and Speed may change across transfers */
616 bpw = xfer->bits_per_word ? : spi->bits_per_word;
617 speed = xfer->speed_hz ? : spi->max_speed_hz;
618
619 if (bpw != sdd->cur_bpw || speed != sdd->cur_speed) {
620 sdd->cur_bpw = bpw;
621 sdd->cur_speed = speed;
622 s3c64xx_spi_config(sdd);
623 }
624
625 /* Polling method for xfers not bigger than FIFO capacity */
626 if (xfer->len <= ((sci->fifo_lvl_mask >> 1) + 1))
627 use_dma = 0;
628 else
629 use_dma = 1;
630
631 spin_lock_irqsave(&sdd->lock, flags);
632
633 /* Pending only which is to be done */
634 sdd->state &= ~RXBUSY;
635 sdd->state &= ~TXBUSY;
636
637 enable_datapath(sdd, spi, xfer, use_dma);
638
639 /* Slave Select */
640 enable_cs(sdd, spi);
641
642 /* Start the signals */
643 S3C64XX_SPI_ACT(sdd);
644
645 spin_unlock_irqrestore(&sdd->lock, flags);
646
647 status = wait_for_xfer(sdd, xfer, use_dma);
648
649 /* Quiese the signals */
650 S3C64XX_SPI_DEACT(sdd);
651
652 if (status) {
8a349d4b
JP
653 dev_err(&spi->dev, "I/O Error: "
654 "rx-%d tx-%d res:rx-%c tx-%c len-%d\n",
230d42d4
JB
655 xfer->rx_buf ? 1 : 0, xfer->tx_buf ? 1 : 0,
656 (sdd->state & RXBUSY) ? 'f' : 'p',
657 (sdd->state & TXBUSY) ? 'f' : 'p',
658 xfer->len);
659
660 if (use_dma) {
661 if (xfer->tx_buf != NULL
662 && (sdd->state & TXBUSY))
663 s3c2410_dma_ctrl(sdd->tx_dmach,
664 S3C2410_DMAOP_FLUSH);
665 if (xfer->rx_buf != NULL
666 && (sdd->state & RXBUSY))
667 s3c2410_dma_ctrl(sdd->rx_dmach,
668 S3C2410_DMAOP_FLUSH);
669 }
670
671 goto out;
672 }
673
674 if (xfer->delay_usecs)
675 udelay(xfer->delay_usecs);
676
677 if (xfer->cs_change) {
678 /* Hint that the next mssg is gonna be
679 for the same device */
680 if (list_is_last(&xfer->transfer_list,
681 &msg->transfers))
682 cs_toggle = 1;
683 else
684 disable_cs(sdd, spi);
685 }
686
687 msg->actual_length += xfer->len;
688
689 flush_fifo(sdd);
690 }
691
692out:
693 if (!cs_toggle || status)
694 disable_cs(sdd, spi);
695 else
696 sdd->tgl_spi = spi;
697
698 s3c64xx_spi_unmap_mssg(sdd, msg);
699
700 msg->status = status;
701
702 if (msg->complete)
703 msg->complete(msg->context);
704}
705
706static int acquire_dma(struct s3c64xx_spi_driver_data *sdd)
707{
708 if (s3c2410_dma_request(sdd->rx_dmach,
709 &s3c64xx_spi_dma_client, NULL) < 0) {
710 dev_err(&sdd->pdev->dev, "cannot get RxDMA\n");
711 return 0;
712 }
713 s3c2410_dma_set_buffdone_fn(sdd->rx_dmach, s3c64xx_spi_dma_rxcb);
714 s3c2410_dma_devconfig(sdd->rx_dmach, S3C2410_DMASRC_HW,
715 sdd->sfr_start + S3C64XX_SPI_RX_DATA);
716
717 if (s3c2410_dma_request(sdd->tx_dmach,
718 &s3c64xx_spi_dma_client, NULL) < 0) {
719 dev_err(&sdd->pdev->dev, "cannot get TxDMA\n");
720 s3c2410_dma_free(sdd->rx_dmach, &s3c64xx_spi_dma_client);
721 return 0;
722 }
723 s3c2410_dma_set_buffdone_fn(sdd->tx_dmach, s3c64xx_spi_dma_txcb);
724 s3c2410_dma_devconfig(sdd->tx_dmach, S3C2410_DMASRC_MEM,
725 sdd->sfr_start + S3C64XX_SPI_TX_DATA);
726
727 return 1;
728}
729
730static void s3c64xx_spi_work(struct work_struct *work)
731{
732 struct s3c64xx_spi_driver_data *sdd = container_of(work,
733 struct s3c64xx_spi_driver_data, work);
734 unsigned long flags;
735
736 /* Acquire DMA channels */
737 while (!acquire_dma(sdd))
738 msleep(10);
739
740 spin_lock_irqsave(&sdd->lock, flags);
741
742 while (!list_empty(&sdd->queue)
743 && !(sdd->state & SUSPND)) {
744
745 struct spi_message *msg;
746
747 msg = container_of(sdd->queue.next, struct spi_message, queue);
748
749 list_del_init(&msg->queue);
750
751 /* Set Xfer busy flag */
752 sdd->state |= SPIBUSY;
753
754 spin_unlock_irqrestore(&sdd->lock, flags);
755
756 handle_msg(sdd, msg);
757
758 spin_lock_irqsave(&sdd->lock, flags);
759
760 sdd->state &= ~SPIBUSY;
761 }
762
763 spin_unlock_irqrestore(&sdd->lock, flags);
764
765 /* Free DMA channels */
766 s3c2410_dma_free(sdd->tx_dmach, &s3c64xx_spi_dma_client);
767 s3c2410_dma_free(sdd->rx_dmach, &s3c64xx_spi_dma_client);
768}
769
770static int s3c64xx_spi_transfer(struct spi_device *spi,
771 struct spi_message *msg)
772{
773 struct s3c64xx_spi_driver_data *sdd;
774 unsigned long flags;
775
776 sdd = spi_master_get_devdata(spi->master);
777
778 spin_lock_irqsave(&sdd->lock, flags);
779
780 if (sdd->state & SUSPND) {
781 spin_unlock_irqrestore(&sdd->lock, flags);
782 return -ESHUTDOWN;
783 }
784
785 msg->status = -EINPROGRESS;
786 msg->actual_length = 0;
787
788 list_add_tail(&msg->queue, &sdd->queue);
789
790 queue_work(sdd->workqueue, &sdd->work);
791
792 spin_unlock_irqrestore(&sdd->lock, flags);
793
794 return 0;
795}
796
797/*
798 * Here we only check the validity of requested configuration
799 * and save the configuration in a local data-structure.
800 * The controller is actually configured only just before we
801 * get a message to transfer.
802 */
803static int s3c64xx_spi_setup(struct spi_device *spi)
804{
805 struct s3c64xx_spi_csinfo *cs = spi->controller_data;
806 struct s3c64xx_spi_driver_data *sdd;
ad7de729 807 struct s3c64xx_spi_info *sci;
230d42d4
JB
808 struct spi_message *msg;
809 u32 psr, speed;
810 unsigned long flags;
811 int err = 0;
812
813 if (cs == NULL || cs->set_level == NULL) {
814 dev_err(&spi->dev, "No CS for SPI(%d)\n", spi->chip_select);
815 return -ENODEV;
816 }
817
818 sdd = spi_master_get_devdata(spi->master);
819 sci = sdd->cntrlr_info;
820
821 spin_lock_irqsave(&sdd->lock, flags);
822
823 list_for_each_entry(msg, &sdd->queue, queue) {
824 /* Is some mssg is already queued for this device */
825 if (msg->spi == spi) {
826 dev_err(&spi->dev,
827 "setup: attempt while mssg in queue!\n");
828 spin_unlock_irqrestore(&sdd->lock, flags);
829 return -EBUSY;
830 }
831 }
832
833 if (sdd->state & SUSPND) {
834 spin_unlock_irqrestore(&sdd->lock, flags);
835 dev_err(&spi->dev,
836 "setup: SPI-%d not active!\n", spi->master->bus_num);
837 return -ESHUTDOWN;
838 }
839
840 spin_unlock_irqrestore(&sdd->lock, flags);
841
842 if (spi->bits_per_word != 8
843 && spi->bits_per_word != 16
844 && spi->bits_per_word != 32) {
845 dev_err(&spi->dev, "setup: %dbits/wrd not supported!\n",
846 spi->bits_per_word);
847 err = -EINVAL;
848 goto setup_exit;
849 }
850
851 /* Check if we can provide the requested rate */
b0d5d6e5 852 speed = clk_get_rate(sdd->src_clk) / 2 / (0 + 1); /* Max possible */
230d42d4
JB
853
854 if (spi->max_speed_hz > speed)
855 spi->max_speed_hz = speed;
856
b0d5d6e5 857 psr = clk_get_rate(sdd->src_clk) / 2 / spi->max_speed_hz - 1;
230d42d4
JB
858 psr &= S3C64XX_SPI_PSR_MASK;
859 if (psr == S3C64XX_SPI_PSR_MASK)
860 psr--;
861
b0d5d6e5 862 speed = clk_get_rate(sdd->src_clk) / 2 / (psr + 1);
230d42d4
JB
863 if (spi->max_speed_hz < speed) {
864 if (psr+1 < S3C64XX_SPI_PSR_MASK) {
865 psr++;
866 } else {
867 err = -EINVAL;
868 goto setup_exit;
869 }
870 }
871
b0d5d6e5 872 speed = clk_get_rate(sdd->src_clk) / 2 / (psr + 1);
230d42d4
JB
873 if (spi->max_speed_hz >= speed)
874 spi->max_speed_hz = speed;
875 else
876 err = -EINVAL;
877
878setup_exit:
879
880 /* setup() returns with device de-selected */
881 disable_cs(sdd, spi);
882
883 return err;
884}
885
886static void s3c64xx_spi_hwinit(struct s3c64xx_spi_driver_data *sdd, int channel)
887{
ad7de729 888 struct s3c64xx_spi_info *sci = sdd->cntrlr_info;
230d42d4
JB
889 void __iomem *regs = sdd->regs;
890 unsigned int val;
891
892 sdd->cur_speed = 0;
893
894 S3C64XX_SPI_DEACT(sdd);
895
896 /* Disable Interrupts - we use Polling if not DMA mode */
897 writel(0, regs + S3C64XX_SPI_INT_EN);
898
899 writel(sci->src_clk_nr << S3C64XX_SPI_CLKSEL_SRCSHFT,
900 regs + S3C64XX_SPI_CLK_CFG);
901 writel(0, regs + S3C64XX_SPI_MODE_CFG);
902 writel(0, regs + S3C64XX_SPI_PACKET_CNT);
903
904 /* Clear any irq pending bits */
905 writel(readl(regs + S3C64XX_SPI_PENDING_CLR),
906 regs + S3C64XX_SPI_PENDING_CLR);
907
908 writel(0, regs + S3C64XX_SPI_SWAP_CFG);
909
910 val = readl(regs + S3C64XX_SPI_MODE_CFG);
911 val &= ~S3C64XX_SPI_MODE_4BURST;
912 val &= ~(S3C64XX_SPI_MAX_TRAILCNT << S3C64XX_SPI_TRAILCNT_OFF);
913 val |= (S3C64XX_SPI_TRAILCNT << S3C64XX_SPI_TRAILCNT_OFF);
914 writel(val, regs + S3C64XX_SPI_MODE_CFG);
915
916 flush_fifo(sdd);
917}
918
919static int __init s3c64xx_spi_probe(struct platform_device *pdev)
920{
921 struct resource *mem_res, *dmatx_res, *dmarx_res;
922 struct s3c64xx_spi_driver_data *sdd;
ad7de729 923 struct s3c64xx_spi_info *sci;
230d42d4
JB
924 struct spi_master *master;
925 int ret;
926
927 if (pdev->id < 0) {
928 dev_err(&pdev->dev,
929 "Invalid platform device id-%d\n", pdev->id);
930 return -ENODEV;
931 }
932
933 if (pdev->dev.platform_data == NULL) {
934 dev_err(&pdev->dev, "platform_data missing!\n");
935 return -ENODEV;
936 }
937
cc0fc0bb
MB
938 sci = pdev->dev.platform_data;
939 if (!sci->src_clk_name) {
940 dev_err(&pdev->dev,
941 "Board init must call s3c64xx_spi_set_info()\n");
942 return -EINVAL;
943 }
944
230d42d4
JB
945 /* Check for availability of necessary resource */
946
947 dmatx_res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
948 if (dmatx_res == NULL) {
949 dev_err(&pdev->dev, "Unable to get SPI-Tx dma resource\n");
950 return -ENXIO;
951 }
952
953 dmarx_res = platform_get_resource(pdev, IORESOURCE_DMA, 1);
954 if (dmarx_res == NULL) {
955 dev_err(&pdev->dev, "Unable to get SPI-Rx dma resource\n");
956 return -ENXIO;
957 }
958
959 mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
960 if (mem_res == NULL) {
961 dev_err(&pdev->dev, "Unable to get SPI MEM resource\n");
962 return -ENXIO;
963 }
964
965 master = spi_alloc_master(&pdev->dev,
966 sizeof(struct s3c64xx_spi_driver_data));
967 if (master == NULL) {
968 dev_err(&pdev->dev, "Unable to allocate SPI Master\n");
969 return -ENOMEM;
970 }
971
230d42d4
JB
972 platform_set_drvdata(pdev, master);
973
974 sdd = spi_master_get_devdata(master);
975 sdd->master = master;
976 sdd->cntrlr_info = sci;
977 sdd->pdev = pdev;
978 sdd->sfr_start = mem_res->start;
979 sdd->tx_dmach = dmatx_res->start;
980 sdd->rx_dmach = dmarx_res->start;
981
982 sdd->cur_bpw = 8;
983
984 master->bus_num = pdev->id;
985 master->setup = s3c64xx_spi_setup;
986 master->transfer = s3c64xx_spi_transfer;
987 master->num_chipselect = sci->num_cs;
988 master->dma_alignment = 8;
989 /* the spi->mode bits understood by this driver: */
990 master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
991
992 if (request_mem_region(mem_res->start,
993 resource_size(mem_res), pdev->name) == NULL) {
994 dev_err(&pdev->dev, "Req mem region failed\n");
995 ret = -ENXIO;
996 goto err0;
997 }
998
999 sdd->regs = ioremap(mem_res->start, resource_size(mem_res));
1000 if (sdd->regs == NULL) {
1001 dev_err(&pdev->dev, "Unable to remap IO\n");
1002 ret = -ENXIO;
1003 goto err1;
1004 }
1005
1006 if (sci->cfg_gpio == NULL || sci->cfg_gpio(pdev)) {
1007 dev_err(&pdev->dev, "Unable to config gpio\n");
1008 ret = -EBUSY;
1009 goto err2;
1010 }
1011
1012 /* Setup clocks */
1013 sdd->clk = clk_get(&pdev->dev, "spi");
1014 if (IS_ERR(sdd->clk)) {
1015 dev_err(&pdev->dev, "Unable to acquire clock 'spi'\n");
1016 ret = PTR_ERR(sdd->clk);
1017 goto err3;
1018 }
1019
1020 if (clk_enable(sdd->clk)) {
1021 dev_err(&pdev->dev, "Couldn't enable clock 'spi'\n");
1022 ret = -EBUSY;
1023 goto err4;
1024 }
1025
b0d5d6e5
JB
1026 sdd->src_clk = clk_get(&pdev->dev, sci->src_clk_name);
1027 if (IS_ERR(sdd->src_clk)) {
230d42d4
JB
1028 dev_err(&pdev->dev,
1029 "Unable to acquire clock '%s'\n", sci->src_clk_name);
b0d5d6e5 1030 ret = PTR_ERR(sdd->src_clk);
230d42d4
JB
1031 goto err5;
1032 }
1033
b0d5d6e5 1034 if (clk_enable(sdd->src_clk)) {
230d42d4
JB
1035 dev_err(&pdev->dev, "Couldn't enable clock '%s'\n",
1036 sci->src_clk_name);
1037 ret = -EBUSY;
1038 goto err6;
1039 }
1040
1041 sdd->workqueue = create_singlethread_workqueue(
1042 dev_name(master->dev.parent));
1043 if (sdd->workqueue == NULL) {
1044 dev_err(&pdev->dev, "Unable to create workqueue\n");
1045 ret = -ENOMEM;
1046 goto err7;
1047 }
1048
1049 /* Setup Deufult Mode */
1050 s3c64xx_spi_hwinit(sdd, pdev->id);
1051
1052 spin_lock_init(&sdd->lock);
1053 init_completion(&sdd->xfer_completion);
1054 INIT_WORK(&sdd->work, s3c64xx_spi_work);
1055 INIT_LIST_HEAD(&sdd->queue);
1056
1057 if (spi_register_master(master)) {
1058 dev_err(&pdev->dev, "cannot register SPI master\n");
1059 ret = -EBUSY;
1060 goto err8;
1061 }
1062
8a349d4b
JP
1063 dev_dbg(&pdev->dev, "Samsung SoC SPI Driver loaded for Bus SPI-%d "
1064 "with %d Slaves attached\n",
230d42d4 1065 pdev->id, master->num_chipselect);
8a349d4b 1066 dev_dbg(&pdev->dev, "\tIOmem=[0x%x-0x%x]\tDMA=[Rx-%d, Tx-%d]\n",
230d42d4
JB
1067 mem_res->end, mem_res->start,
1068 sdd->rx_dmach, sdd->tx_dmach);
1069
1070 return 0;
1071
1072err8:
1073 destroy_workqueue(sdd->workqueue);
1074err7:
b0d5d6e5 1075 clk_disable(sdd->src_clk);
230d42d4 1076err6:
b0d5d6e5 1077 clk_put(sdd->src_clk);
230d42d4
JB
1078err5:
1079 clk_disable(sdd->clk);
1080err4:
1081 clk_put(sdd->clk);
1082err3:
1083err2:
1084 iounmap((void *) sdd->regs);
1085err1:
1086 release_mem_region(mem_res->start, resource_size(mem_res));
1087err0:
1088 platform_set_drvdata(pdev, NULL);
1089 spi_master_put(master);
1090
1091 return ret;
1092}
1093
1094static int s3c64xx_spi_remove(struct platform_device *pdev)
1095{
1096 struct spi_master *master = spi_master_get(platform_get_drvdata(pdev));
1097 struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
230d42d4
JB
1098 struct resource *mem_res;
1099 unsigned long flags;
1100
1101 spin_lock_irqsave(&sdd->lock, flags);
1102 sdd->state |= SUSPND;
1103 spin_unlock_irqrestore(&sdd->lock, flags);
1104
1105 while (sdd->state & SPIBUSY)
1106 msleep(10);
1107
1108 spi_unregister_master(master);
1109
1110 destroy_workqueue(sdd->workqueue);
1111
b0d5d6e5
JB
1112 clk_disable(sdd->src_clk);
1113 clk_put(sdd->src_clk);
230d42d4
JB
1114
1115 clk_disable(sdd->clk);
1116 clk_put(sdd->clk);
1117
1118 iounmap((void *) sdd->regs);
1119
1120 mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
ef6c680d
JB
1121 if (mem_res != NULL)
1122 release_mem_region(mem_res->start, resource_size(mem_res));
230d42d4
JB
1123
1124 platform_set_drvdata(pdev, NULL);
1125 spi_master_put(master);
1126
1127 return 0;
1128}
1129
1130#ifdef CONFIG_PM
1131static int s3c64xx_spi_suspend(struct platform_device *pdev, pm_message_t state)
1132{
1133 struct spi_master *master = spi_master_get(platform_get_drvdata(pdev));
1134 struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
230d42d4
JB
1135 unsigned long flags;
1136
1137 spin_lock_irqsave(&sdd->lock, flags);
1138 sdd->state |= SUSPND;
1139 spin_unlock_irqrestore(&sdd->lock, flags);
1140
1141 while (sdd->state & SPIBUSY)
1142 msleep(10);
1143
1144 /* Disable the clock */
b0d5d6e5 1145 clk_disable(sdd->src_clk);
230d42d4
JB
1146 clk_disable(sdd->clk);
1147
1148 sdd->cur_speed = 0; /* Output Clock is stopped */
1149
1150 return 0;
1151}
1152
1153static int s3c64xx_spi_resume(struct platform_device *pdev)
1154{
1155 struct spi_master *master = spi_master_get(platform_get_drvdata(pdev));
1156 struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
ad7de729 1157 struct s3c64xx_spi_info *sci = sdd->cntrlr_info;
230d42d4
JB
1158 unsigned long flags;
1159
1160 sci->cfg_gpio(pdev);
1161
1162 /* Enable the clock */
b0d5d6e5 1163 clk_enable(sdd->src_clk);
230d42d4
JB
1164 clk_enable(sdd->clk);
1165
1166 s3c64xx_spi_hwinit(sdd, pdev->id);
1167
1168 spin_lock_irqsave(&sdd->lock, flags);
1169 sdd->state &= ~SUSPND;
1170 spin_unlock_irqrestore(&sdd->lock, flags);
1171
1172 return 0;
1173}
1174#else
1175#define s3c64xx_spi_suspend NULL
1176#define s3c64xx_spi_resume NULL
1177#endif /* CONFIG_PM */
1178
1179static struct platform_driver s3c64xx_spi_driver = {
1180 .driver = {
1181 .name = "s3c64xx-spi",
1182 .owner = THIS_MODULE,
1183 },
1184 .remove = s3c64xx_spi_remove,
1185 .suspend = s3c64xx_spi_suspend,
1186 .resume = s3c64xx_spi_resume,
1187};
1188MODULE_ALIAS("platform:s3c64xx-spi");
1189
1190static int __init s3c64xx_spi_init(void)
1191{
1192 return platform_driver_probe(&s3c64xx_spi_driver, s3c64xx_spi_probe);
1193}
d2a787fc 1194subsys_initcall(s3c64xx_spi_init);
230d42d4
JB
1195
1196static void __exit s3c64xx_spi_exit(void)
1197{
1198 platform_driver_unregister(&s3c64xx_spi_driver);
1199}
1200module_exit(s3c64xx_spi_exit);
1201
1202MODULE_AUTHOR("Jaswinder Singh <jassi.brar@samsung.com>");
1203MODULE_DESCRIPTION("S3C64XX SPI Controller Driver");
1204MODULE_LICENSE("GPL");
This page took 0.159074 seconds and 5 git commands to generate.