spi: davinci: remove unnecessary typecast
[deliverable/linux.git] / drivers / spi / davinci_spi.c
CommitLineData
358934a6
SP
1/*
2 * Copyright (C) 2009 Texas Instruments.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 */
18
19#include <linux/interrupt.h>
20#include <linux/io.h>
21#include <linux/gpio.h>
22#include <linux/module.h>
23#include <linux/delay.h>
24#include <linux/platform_device.h>
25#include <linux/err.h>
26#include <linux/clk.h>
27#include <linux/dma-mapping.h>
28#include <linux/spi/spi.h>
29#include <linux/spi/spi_bitbang.h>
5a0e3ad6 30#include <linux/slab.h>
358934a6
SP
31
32#include <mach/spi.h>
33#include <mach/edma.h>
34
35#define SPI_NO_RESOURCE ((resource_size_t)-1)
36
37#define SPI_MAX_CHIPSELECT 2
38
39#define CS_DEFAULT 0xFF
40
41#define SPI_BUFSIZ (SMP_CACHE_BYTES + 1)
42#define DAVINCI_DMA_DATA_TYPE_S8 0x01
43#define DAVINCI_DMA_DATA_TYPE_S16 0x02
44#define DAVINCI_DMA_DATA_TYPE_S32 0x04
45
46#define SPIFMT_PHASE_MASK BIT(16)
47#define SPIFMT_POLARITY_MASK BIT(17)
48#define SPIFMT_DISTIMER_MASK BIT(18)
49#define SPIFMT_SHIFTDIR_MASK BIT(20)
50#define SPIFMT_WAITENA_MASK BIT(21)
51#define SPIFMT_PARITYENA_MASK BIT(22)
52#define SPIFMT_ODD_PARITY_MASK BIT(23)
53#define SPIFMT_WDELAY_MASK 0x3f000000u
54#define SPIFMT_WDELAY_SHIFT 24
55#define SPIFMT_CHARLEN_MASK 0x0000001Fu
56
358934a6
SP
57
58/* SPIPC0 */
59#define SPIPC0_DIFUN_MASK BIT(11) /* MISO */
60#define SPIPC0_DOFUN_MASK BIT(10) /* MOSI */
61#define SPIPC0_CLKFUN_MASK BIT(9) /* CLK */
62#define SPIPC0_SPIENA_MASK BIT(8) /* nREADY */
358934a6
SP
63
64#define SPIINT_MASKALL 0x0101035F
65#define SPI_INTLVL_1 0x000001FFu
66#define SPI_INTLVL_0 0x00000000u
67
68/* SPIDAT1 */
69#define SPIDAT1_CSHOLD_SHIFT 28
70#define SPIDAT1_CSNR_SHIFT 16
71#define SPIGCR1_CLKMOD_MASK BIT(1)
72#define SPIGCR1_MASTER_MASK BIT(0)
73#define SPIGCR1_LOOPBACK_MASK BIT(16)
8e206f1c 74#define SPIGCR1_SPIENA_MASK BIT(24)
358934a6
SP
75
76/* SPIBUF */
77#define SPIBUF_TXFULL_MASK BIT(29)
78#define SPIBUF_RXEMPTY_MASK BIT(31)
79
80/* Error Masks */
81#define SPIFLG_DLEN_ERR_MASK BIT(0)
82#define SPIFLG_TIMEOUT_MASK BIT(1)
83#define SPIFLG_PARERR_MASK BIT(2)
84#define SPIFLG_DESYNC_MASK BIT(3)
85#define SPIFLG_BITERR_MASK BIT(4)
86#define SPIFLG_OVRRUN_MASK BIT(6)
87#define SPIFLG_RX_INTR_MASK BIT(8)
88#define SPIFLG_TX_INTR_MASK BIT(9)
89#define SPIFLG_BUF_INIT_ACTIVE_MASK BIT(24)
8e206f1c 90
358934a6
SP
91#define SPIINT_BITERR_INTR BIT(4)
92#define SPIINT_OVRRUN_INTR BIT(6)
93#define SPIINT_RX_INTR BIT(8)
94#define SPIINT_TX_INTR BIT(9)
95#define SPIINT_DMA_REQ_EN BIT(16)
358934a6
SP
96
97#define SPI_T2CDELAY_SHIFT 16
98#define SPI_C2TDELAY_SHIFT 24
99
100/* SPI Controller registers */
101#define SPIGCR0 0x00
102#define SPIGCR1 0x04
103#define SPIINT 0x08
104#define SPILVL 0x0c
105#define SPIFLG 0x10
106#define SPIPC0 0x14
358934a6
SP
107#define SPIDAT1 0x3c
108#define SPIBUF 0x40
358934a6
SP
109#define SPIDELAY 0x48
110#define SPIDEF 0x4c
111#define SPIFMT0 0x50
358934a6
SP
112
113struct davinci_spi_slave {
114 u32 cmd_to_write;
115 u32 clk_ctrl_to_write;
116 u32 bytes_per_word;
117 u8 active_cs;
118};
119
120/* We have 2 DMA channels per CS, one for RX and one for TX */
121struct davinci_spi_dma {
122 int dma_tx_channel;
123 int dma_rx_channel;
124 int dma_tx_sync_dev;
125 int dma_rx_sync_dev;
126 enum dma_event_q eventq;
127
128 struct completion dma_tx_completion;
129 struct completion dma_rx_completion;
130};
131
132/* SPI Controller driver's private data. */
133struct davinci_spi {
134 struct spi_bitbang bitbang;
135 struct clk *clk;
136
137 u8 version;
138 resource_size_t pbase;
139 void __iomem *base;
140 size_t region_size;
141 u32 irq;
142 struct completion done;
143
144 const void *tx;
145 void *rx;
146 u8 *tmp_buf;
147 int count;
148 struct davinci_spi_dma *dma_channels;
778e261e 149 struct davinci_spi_platform_data *pdata;
358934a6
SP
150
151 void (*get_rx)(u32 rx_data, struct davinci_spi *);
152 u32 (*get_tx)(struct davinci_spi *);
153
154 struct davinci_spi_slave slave[SPI_MAX_CHIPSELECT];
155};
156
157static unsigned use_dma;
158
159static void davinci_spi_rx_buf_u8(u32 data, struct davinci_spi *davinci_spi)
160{
161 u8 *rx = davinci_spi->rx;
162
163 *rx++ = (u8)data;
164 davinci_spi->rx = rx;
165}
166
167static void davinci_spi_rx_buf_u16(u32 data, struct davinci_spi *davinci_spi)
168{
169 u16 *rx = davinci_spi->rx;
170
171 *rx++ = (u16)data;
172 davinci_spi->rx = rx;
173}
174
175static u32 davinci_spi_tx_buf_u8(struct davinci_spi *davinci_spi)
176{
177 u32 data;
178 const u8 *tx = davinci_spi->tx;
179
180 data = *tx++;
181 davinci_spi->tx = tx;
182 return data;
183}
184
185static u32 davinci_spi_tx_buf_u16(struct davinci_spi *davinci_spi)
186{
187 u32 data;
188 const u16 *tx = davinci_spi->tx;
189
190 data = *tx++;
191 davinci_spi->tx = tx;
192 return data;
193}
194
195static inline void set_io_bits(void __iomem *addr, u32 bits)
196{
197 u32 v = ioread32(addr);
198
199 v |= bits;
200 iowrite32(v, addr);
201}
202
203static inline void clear_io_bits(void __iomem *addr, u32 bits)
204{
205 u32 v = ioread32(addr);
206
207 v &= ~bits;
208 iowrite32(v, addr);
209}
210
211static inline void set_fmt_bits(void __iomem *addr, u32 bits, int cs_num)
212{
213 set_io_bits(addr + SPIFMT0 + (0x4 * cs_num), bits);
214}
215
216static inline void clear_fmt_bits(void __iomem *addr, u32 bits, int cs_num)
217{
218 clear_io_bits(addr + SPIFMT0 + (0x4 * cs_num), bits);
219}
220
221static void davinci_spi_set_dma_req(const struct spi_device *spi, int enable)
222{
223 struct davinci_spi *davinci_spi = spi_master_get_devdata(spi->master);
224
225 if (enable)
226 set_io_bits(davinci_spi->base + SPIINT, SPIINT_DMA_REQ_EN);
227 else
228 clear_io_bits(davinci_spi->base + SPIINT, SPIINT_DMA_REQ_EN);
229}
230
231/*
232 * Interface to control the chip select signal
233 */
234static void davinci_spi_chipselect(struct spi_device *spi, int value)
235{
236 struct davinci_spi *davinci_spi;
237 struct davinci_spi_platform_data *pdata;
238 u32 data1_reg_val = 0;
239
240 davinci_spi = spi_master_get_devdata(spi->master);
241 pdata = davinci_spi->pdata;
242
243 /*
244 * Board specific chip select logic decides the polarity and cs
245 * line for the controller
246 */
247 if (value == BITBANG_CS_INACTIVE) {
248 set_io_bits(davinci_spi->base + SPIDEF, CS_DEFAULT);
249
250 data1_reg_val |= CS_DEFAULT << SPIDAT1_CSNR_SHIFT;
251 iowrite32(data1_reg_val, davinci_spi->base + SPIDAT1);
252
253 while ((ioread32(davinci_spi->base + SPIBUF)
254 & SPIBUF_RXEMPTY_MASK) == 0)
255 cpu_relax();
256 }
257}
258
259/**
260 * davinci_spi_setup_transfer - This functions will determine transfer method
261 * @spi: spi device on which data transfer to be done
262 * @t: spi transfer in which transfer info is filled
263 *
264 * This function determines data transfer method (8/16/32 bit transfer).
265 * It will also set the SPI Clock Control register according to
266 * SPI slave device freq.
267 */
268static int davinci_spi_setup_transfer(struct spi_device *spi,
269 struct spi_transfer *t)
270{
271
272 struct davinci_spi *davinci_spi;
358934a6 273 u8 bits_per_word = 0;
0c2a2ae3 274 u32 hz = 0, prescale = 0, clkspeed;
358934a6
SP
275
276 davinci_spi = spi_master_get_devdata(spi->master);
358934a6
SP
277
278 if (t) {
279 bits_per_word = t->bits_per_word;
280 hz = t->speed_hz;
281 }
282
283 /* if bits_per_word is not set then set it default */
284 if (!bits_per_word)
285 bits_per_word = spi->bits_per_word;
286
287 /*
288 * Assign function pointer to appropriate transfer method
289 * 8bit, 16bit or 32bit transfer
290 */
291 if (bits_per_word <= 8 && bits_per_word >= 2) {
292 davinci_spi->get_rx = davinci_spi_rx_buf_u8;
293 davinci_spi->get_tx = davinci_spi_tx_buf_u8;
294 davinci_spi->slave[spi->chip_select].bytes_per_word = 1;
295 } else if (bits_per_word <= 16 && bits_per_word >= 2) {
296 davinci_spi->get_rx = davinci_spi_rx_buf_u16;
297 davinci_spi->get_tx = davinci_spi_tx_buf_u16;
298 davinci_spi->slave[spi->chip_select].bytes_per_word = 2;
299 } else
300 return -EINVAL;
301
302 if (!hz)
303 hz = spi->max_speed_hz;
304
305 clear_fmt_bits(davinci_spi->base, SPIFMT_CHARLEN_MASK,
306 spi->chip_select);
307 set_fmt_bits(davinci_spi->base, bits_per_word & 0x1f,
308 spi->chip_select);
309
0c2a2ae3
TK
310 clkspeed = clk_get_rate(davinci_spi->clk);
311 if (hz > clkspeed / 2)
312 prescale = 1 << 8;
313 if (hz < clkspeed / 256)
314 prescale = 255 << 8;
315 if (!prescale)
316 prescale = ((clkspeed / hz - 1) << 8) & 0x0000ff00;
358934a6
SP
317
318 clear_fmt_bits(davinci_spi->base, 0x0000ff00, spi->chip_select);
0c2a2ae3 319 set_fmt_bits(davinci_spi->base, prescale, spi->chip_select);
358934a6
SP
320
321 return 0;
322}
323
324static void davinci_spi_dma_rx_callback(unsigned lch, u16 ch_status, void *data)
325{
326 struct spi_device *spi = (struct spi_device *)data;
327 struct davinci_spi *davinci_spi;
328 struct davinci_spi_dma *davinci_spi_dma;
358934a6
SP
329
330 davinci_spi = spi_master_get_devdata(spi->master);
331 davinci_spi_dma = &(davinci_spi->dma_channels[spi->chip_select]);
358934a6
SP
332
333 if (ch_status == DMA_COMPLETE)
334 edma_stop(davinci_spi_dma->dma_rx_channel);
335 else
336 edma_clean_channel(davinci_spi_dma->dma_rx_channel);
337
338 complete(&davinci_spi_dma->dma_rx_completion);
339 /* We must disable the DMA RX request */
340 davinci_spi_set_dma_req(spi, 0);
341}
342
343static void davinci_spi_dma_tx_callback(unsigned lch, u16 ch_status, void *data)
344{
345 struct spi_device *spi = (struct spi_device *)data;
346 struct davinci_spi *davinci_spi;
347 struct davinci_spi_dma *davinci_spi_dma;
358934a6
SP
348
349 davinci_spi = spi_master_get_devdata(spi->master);
350 davinci_spi_dma = &(davinci_spi->dma_channels[spi->chip_select]);
358934a6
SP
351
352 if (ch_status == DMA_COMPLETE)
353 edma_stop(davinci_spi_dma->dma_tx_channel);
354 else
355 edma_clean_channel(davinci_spi_dma->dma_tx_channel);
356
357 complete(&davinci_spi_dma->dma_tx_completion);
358 /* We must disable the DMA TX request */
359 davinci_spi_set_dma_req(spi, 0);
360}
361
362static int davinci_spi_request_dma(struct spi_device *spi)
363{
364 struct davinci_spi *davinci_spi;
365 struct davinci_spi_dma *davinci_spi_dma;
358934a6
SP
366 struct device *sdev;
367 int r;
368
369 davinci_spi = spi_master_get_devdata(spi->master);
370 davinci_spi_dma = &davinci_spi->dma_channels[spi->chip_select];
358934a6
SP
371 sdev = davinci_spi->bitbang.master->dev.parent;
372
373 r = edma_alloc_channel(davinci_spi_dma->dma_rx_sync_dev,
374 davinci_spi_dma_rx_callback, spi,
375 davinci_spi_dma->eventq);
376 if (r < 0) {
377 dev_dbg(sdev, "Unable to request DMA channel for SPI RX\n");
378 return -EAGAIN;
379 }
380 davinci_spi_dma->dma_rx_channel = r;
381 r = edma_alloc_channel(davinci_spi_dma->dma_tx_sync_dev,
382 davinci_spi_dma_tx_callback, spi,
383 davinci_spi_dma->eventq);
384 if (r < 0) {
385 edma_free_channel(davinci_spi_dma->dma_rx_channel);
386 davinci_spi_dma->dma_rx_channel = -1;
387 dev_dbg(sdev, "Unable to request DMA channel for SPI TX\n");
388 return -EAGAIN;
389 }
390 davinci_spi_dma->dma_tx_channel = r;
391
392 return 0;
393}
394
395/**
396 * davinci_spi_setup - This functions will set default transfer method
397 * @spi: spi device on which data transfer to be done
398 *
399 * This functions sets the default transfer method.
400 */
358934a6
SP
401static int davinci_spi_setup(struct spi_device *spi)
402{
403 int retval;
404 struct davinci_spi *davinci_spi;
405 struct davinci_spi_dma *davinci_spi_dma;
406 struct device *sdev;
407
408 davinci_spi = spi_master_get_devdata(spi->master);
409 sdev = davinci_spi->bitbang.master->dev.parent;
410
411 /* if bits per word length is zero then set it default 8 */
412 if (!spi->bits_per_word)
413 spi->bits_per_word = 8;
414
415 davinci_spi->slave[spi->chip_select].cmd_to_write = 0;
416
417 if (use_dma && davinci_spi->dma_channels) {
418 davinci_spi_dma = &davinci_spi->dma_channels[spi->chip_select];
419
420 if ((davinci_spi_dma->dma_rx_channel == -1)
421 || (davinci_spi_dma->dma_tx_channel == -1)) {
422 retval = davinci_spi_request_dma(spi);
423 if (retval < 0)
424 return retval;
425 }
426 }
427
428 /*
429 * SPI in DaVinci and DA8xx operate between
430 * 600 KHz and 50 MHz
431 */
432 if (spi->max_speed_hz < 600000 || spi->max_speed_hz > 50000000) {
433 dev_dbg(sdev, "Operating frequency is not in acceptable "
434 "range\n");
435 return -EINVAL;
436 }
437
438 /*
439 * Set up SPIFMTn register, unique to this chipselect.
440 *
441 * NOTE: we could do all of these with one write. Also, some
442 * of the "version 2" features are found in chips that don't
443 * support all of them...
444 */
445 if (spi->mode & SPI_LSB_FIRST)
446 set_fmt_bits(davinci_spi->base, SPIFMT_SHIFTDIR_MASK,
447 spi->chip_select);
448 else
449 clear_fmt_bits(davinci_spi->base, SPIFMT_SHIFTDIR_MASK,
450 spi->chip_select);
451
452 if (spi->mode & SPI_CPOL)
453 set_fmt_bits(davinci_spi->base, SPIFMT_POLARITY_MASK,
454 spi->chip_select);
455 else
456 clear_fmt_bits(davinci_spi->base, SPIFMT_POLARITY_MASK,
457 spi->chip_select);
458
459 if (!(spi->mode & SPI_CPHA))
460 set_fmt_bits(davinci_spi->base, SPIFMT_PHASE_MASK,
461 spi->chip_select);
462 else
463 clear_fmt_bits(davinci_spi->base, SPIFMT_PHASE_MASK,
464 spi->chip_select);
465
466 /*
467 * Version 1 hardware supports two basic SPI modes:
468 * - Standard SPI mode uses 4 pins, with chipselect
469 * - 3 pin SPI is a 4 pin variant without CS (SPI_NO_CS)
470 * (distinct from SPI_3WIRE, with just one data wire;
471 * or similar variants without MOSI or without MISO)
472 *
473 * Version 2 hardware supports an optional handshaking signal,
474 * so it can support two more modes:
475 * - 5 pin SPI variant is standard SPI plus SPI_READY
476 * - 4 pin with enable is (SPI_READY | SPI_NO_CS)
477 */
478
479 if (davinci_spi->version == SPI_VERSION_2) {
480 clear_fmt_bits(davinci_spi->base, SPIFMT_WDELAY_MASK,
481 spi->chip_select);
482 set_fmt_bits(davinci_spi->base,
483 (davinci_spi->pdata->wdelay
484 << SPIFMT_WDELAY_SHIFT)
485 & SPIFMT_WDELAY_MASK,
486 spi->chip_select);
487
488 if (davinci_spi->pdata->odd_parity)
489 set_fmt_bits(davinci_spi->base,
490 SPIFMT_ODD_PARITY_MASK,
491 spi->chip_select);
492 else
493 clear_fmt_bits(davinci_spi->base,
494 SPIFMT_ODD_PARITY_MASK,
495 spi->chip_select);
496
497 if (davinci_spi->pdata->parity_enable)
498 set_fmt_bits(davinci_spi->base,
499 SPIFMT_PARITYENA_MASK,
500 spi->chip_select);
501 else
502 clear_fmt_bits(davinci_spi->base,
503 SPIFMT_PARITYENA_MASK,
504 spi->chip_select);
505
506 if (davinci_spi->pdata->wait_enable)
507 set_fmt_bits(davinci_spi->base,
508 SPIFMT_WAITENA_MASK,
509 spi->chip_select);
510 else
511 clear_fmt_bits(davinci_spi->base,
512 SPIFMT_WAITENA_MASK,
513 spi->chip_select);
514
515 if (davinci_spi->pdata->timer_disable)
516 set_fmt_bits(davinci_spi->base,
517 SPIFMT_DISTIMER_MASK,
518 spi->chip_select);
519 else
520 clear_fmt_bits(davinci_spi->base,
521 SPIFMT_DISTIMER_MASK,
522 spi->chip_select);
523 }
524
525 retval = davinci_spi_setup_transfer(spi, NULL);
526
527 return retval;
528}
529
530static void davinci_spi_cleanup(struct spi_device *spi)
531{
532 struct davinci_spi *davinci_spi = spi_master_get_devdata(spi->master);
533 struct davinci_spi_dma *davinci_spi_dma;
534
535 davinci_spi_dma = &davinci_spi->dma_channels[spi->chip_select];
536
537 if (use_dma && davinci_spi->dma_channels) {
538 davinci_spi_dma = &davinci_spi->dma_channels[spi->chip_select];
539
540 if ((davinci_spi_dma->dma_rx_channel != -1)
541 && (davinci_spi_dma->dma_tx_channel != -1)) {
542 edma_free_channel(davinci_spi_dma->dma_tx_channel);
543 edma_free_channel(davinci_spi_dma->dma_rx_channel);
544 }
545 }
546}
547
548static int davinci_spi_bufs_prep(struct spi_device *spi,
549 struct davinci_spi *davinci_spi)
550{
551 int op_mode = 0;
552
553 /*
554 * REVISIT unless devices disagree about SPI_LOOP or
555 * SPI_READY (SPI_NO_CS only allows one device!), this
556 * should not need to be done before each message...
557 * optimize for both flags staying cleared.
558 */
559
560 op_mode = SPIPC0_DIFUN_MASK
561 | SPIPC0_DOFUN_MASK
562 | SPIPC0_CLKFUN_MASK;
563 if (!(spi->mode & SPI_NO_CS))
564 op_mode |= 1 << spi->chip_select;
565 if (spi->mode & SPI_READY)
566 op_mode |= SPIPC0_SPIENA_MASK;
567
568 iowrite32(op_mode, davinci_spi->base + SPIPC0);
569
570 if (spi->mode & SPI_LOOP)
571 set_io_bits(davinci_spi->base + SPIGCR1,
572 SPIGCR1_LOOPBACK_MASK);
573 else
574 clear_io_bits(davinci_spi->base + SPIGCR1,
575 SPIGCR1_LOOPBACK_MASK);
576
577 return 0;
578}
579
580static int davinci_spi_check_error(struct davinci_spi *davinci_spi,
581 int int_status)
582{
583 struct device *sdev = davinci_spi->bitbang.master->dev.parent;
584
585 if (int_status & SPIFLG_TIMEOUT_MASK) {
586 dev_dbg(sdev, "SPI Time-out Error\n");
587 return -ETIMEDOUT;
588 }
589 if (int_status & SPIFLG_DESYNC_MASK) {
590 dev_dbg(sdev, "SPI Desynchronization Error\n");
591 return -EIO;
592 }
593 if (int_status & SPIFLG_BITERR_MASK) {
594 dev_dbg(sdev, "SPI Bit error\n");
595 return -EIO;
596 }
597
598 if (davinci_spi->version == SPI_VERSION_2) {
599 if (int_status & SPIFLG_DLEN_ERR_MASK) {
600 dev_dbg(sdev, "SPI Data Length Error\n");
601 return -EIO;
602 }
603 if (int_status & SPIFLG_PARERR_MASK) {
604 dev_dbg(sdev, "SPI Parity Error\n");
605 return -EIO;
606 }
607 if (int_status & SPIFLG_OVRRUN_MASK) {
608 dev_dbg(sdev, "SPI Data Overrun error\n");
609 return -EIO;
610 }
611 if (int_status & SPIFLG_TX_INTR_MASK) {
612 dev_dbg(sdev, "SPI TX intr bit set\n");
613 return -EIO;
614 }
615 if (int_status & SPIFLG_BUF_INIT_ACTIVE_MASK) {
616 dev_dbg(sdev, "SPI Buffer Init Active\n");
617 return -EBUSY;
618 }
619 }
620
621 return 0;
622}
623
624/**
625 * davinci_spi_bufs - functions which will handle transfer data
626 * @spi: spi device on which data transfer to be done
627 * @t: spi transfer in which transfer info is filled
628 *
629 * This function will put data to be transferred into data register
630 * of SPI controller and then wait until the completion will be marked
631 * by the IRQ Handler.
632 */
633static int davinci_spi_bufs_pio(struct spi_device *spi, struct spi_transfer *t)
634{
635 struct davinci_spi *davinci_spi;
636 int int_status, count, ret;
637 u8 conv, tmp;
638 u32 tx_data, data1_reg_val;
639 u32 buf_val, flg_val;
640 struct davinci_spi_platform_data *pdata;
641
642 davinci_spi = spi_master_get_devdata(spi->master);
643 pdata = davinci_spi->pdata;
644
645 davinci_spi->tx = t->tx_buf;
646 davinci_spi->rx = t->rx_buf;
647
648 /* convert len to words based on bits_per_word */
649 conv = davinci_spi->slave[spi->chip_select].bytes_per_word;
650 davinci_spi->count = t->len / conv;
651
652 INIT_COMPLETION(davinci_spi->done);
653
654 ret = davinci_spi_bufs_prep(spi, davinci_spi);
655 if (ret)
656 return ret;
657
658 /* Enable SPI */
659 set_io_bits(davinci_spi->base + SPIGCR1, SPIGCR1_SPIENA_MASK);
660
661 iowrite32(0 | (pdata->c2tdelay << SPI_C2TDELAY_SHIFT) |
662 (pdata->t2cdelay << SPI_T2CDELAY_SHIFT),
663 davinci_spi->base + SPIDELAY);
664
665 count = davinci_spi->count;
666 data1_reg_val = pdata->cs_hold << SPIDAT1_CSHOLD_SHIFT;
667 tmp = ~(0x1 << spi->chip_select);
668
669 clear_io_bits(davinci_spi->base + SPIDEF, ~tmp);
670
671 data1_reg_val |= tmp << SPIDAT1_CSNR_SHIFT;
672
673 while ((ioread32(davinci_spi->base + SPIBUF)
674 & SPIBUF_RXEMPTY_MASK) == 0)
675 cpu_relax();
676
677 /* Determine the command to execute READ or WRITE */
678 if (t->tx_buf) {
679 clear_io_bits(davinci_spi->base + SPIINT, SPIINT_MASKALL);
680
681 while (1) {
682 tx_data = davinci_spi->get_tx(davinci_spi);
683
684 data1_reg_val &= ~(0xFFFF);
685 data1_reg_val |= (0xFFFF & tx_data);
686
687 buf_val = ioread32(davinci_spi->base + SPIBUF);
688 if ((buf_val & SPIBUF_TXFULL_MASK) == 0) {
689 iowrite32(data1_reg_val,
690 davinci_spi->base + SPIDAT1);
691
692 count--;
693 }
694 while (ioread32(davinci_spi->base + SPIBUF)
695 & SPIBUF_RXEMPTY_MASK)
696 cpu_relax();
697
698 /* getting the returned byte */
699 if (t->rx_buf) {
700 buf_val = ioread32(davinci_spi->base + SPIBUF);
701 davinci_spi->get_rx(buf_val, davinci_spi);
702 }
703 if (count <= 0)
704 break;
705 }
706 } else {
707 if (pdata->poll_mode) {
708 while (1) {
709 /* keeps the serial clock going */
710 if ((ioread32(davinci_spi->base + SPIBUF)
711 & SPIBUF_TXFULL_MASK) == 0)
712 iowrite32(data1_reg_val,
713 davinci_spi->base + SPIDAT1);
714
715 while (ioread32(davinci_spi->base + SPIBUF) &
716 SPIBUF_RXEMPTY_MASK)
717 cpu_relax();
718
719 flg_val = ioread32(davinci_spi->base + SPIFLG);
720 buf_val = ioread32(davinci_spi->base + SPIBUF);
721
722 davinci_spi->get_rx(buf_val, davinci_spi);
723
724 count--;
725 if (count <= 0)
726 break;
727 }
728 } else { /* Receive in Interrupt mode */
729 int i;
730
731 for (i = 0; i < davinci_spi->count; i++) {
732 set_io_bits(davinci_spi->base + SPIINT,
733 SPIINT_BITERR_INTR
734 | SPIINT_OVRRUN_INTR
735 | SPIINT_RX_INTR);
736
737 iowrite32(data1_reg_val,
738 davinci_spi->base + SPIDAT1);
739
740 while (ioread32(davinci_spi->base + SPIINT) &
741 SPIINT_RX_INTR)
742 cpu_relax();
743 }
744 iowrite32((data1_reg_val & 0x0ffcffff),
745 davinci_spi->base + SPIDAT1);
746 }
747 }
748
749 /*
750 * Check for bit error, desync error,parity error,timeout error and
751 * receive overflow errors
752 */
753 int_status = ioread32(davinci_spi->base + SPIFLG);
754
755 ret = davinci_spi_check_error(davinci_spi, int_status);
756 if (ret != 0)
757 return ret;
758
759 /* SPI Framework maintains the count only in bytes so convert back */
760 davinci_spi->count *= conv;
761
762 return t->len;
763}
764
765#define DAVINCI_DMA_DATA_TYPE_S8 0x01
766#define DAVINCI_DMA_DATA_TYPE_S16 0x02
767#define DAVINCI_DMA_DATA_TYPE_S32 0x04
768
769static int davinci_spi_bufs_dma(struct spi_device *spi, struct spi_transfer *t)
770{
771 struct davinci_spi *davinci_spi;
772 int int_status = 0;
773 int count, temp_count;
774 u8 conv = 1;
775 u8 tmp;
776 u32 data1_reg_val;
777 struct davinci_spi_dma *davinci_spi_dma;
778 int word_len, data_type, ret;
779 unsigned long tx_reg, rx_reg;
780 struct davinci_spi_platform_data *pdata;
781 struct device *sdev;
782
783 davinci_spi = spi_master_get_devdata(spi->master);
784 pdata = davinci_spi->pdata;
785 sdev = davinci_spi->bitbang.master->dev.parent;
786
787 davinci_spi_dma = &davinci_spi->dma_channels[spi->chip_select];
788
789 tx_reg = (unsigned long)davinci_spi->pbase + SPIDAT1;
790 rx_reg = (unsigned long)davinci_spi->pbase + SPIBUF;
791
792 davinci_spi->tx = t->tx_buf;
793 davinci_spi->rx = t->rx_buf;
794
795 /* convert len to words based on bits_per_word */
796 conv = davinci_spi->slave[spi->chip_select].bytes_per_word;
797 davinci_spi->count = t->len / conv;
798
799 INIT_COMPLETION(davinci_spi->done);
800
801 init_completion(&davinci_spi_dma->dma_rx_completion);
802 init_completion(&davinci_spi_dma->dma_tx_completion);
803
804 word_len = conv * 8;
805
806 if (word_len <= 8)
807 data_type = DAVINCI_DMA_DATA_TYPE_S8;
808 else if (word_len <= 16)
809 data_type = DAVINCI_DMA_DATA_TYPE_S16;
810 else if (word_len <= 32)
811 data_type = DAVINCI_DMA_DATA_TYPE_S32;
812 else
813 return -EINVAL;
814
815 ret = davinci_spi_bufs_prep(spi, davinci_spi);
816 if (ret)
817 return ret;
818
819 /* Put delay val if required */
820 iowrite32(0 | (pdata->c2tdelay << SPI_C2TDELAY_SHIFT) |
821 (pdata->t2cdelay << SPI_T2CDELAY_SHIFT),
822 davinci_spi->base + SPIDELAY);
823
824 count = davinci_spi->count; /* the number of elements */
825 data1_reg_val = pdata->cs_hold << SPIDAT1_CSHOLD_SHIFT;
826
827 /* CS default = 0xFF */
828 tmp = ~(0x1 << spi->chip_select);
829
830 clear_io_bits(davinci_spi->base + SPIDEF, ~tmp);
831
832 data1_reg_val |= tmp << SPIDAT1_CSNR_SHIFT;
833
834 /* disable all interrupts for dma transfers */
835 clear_io_bits(davinci_spi->base + SPIINT, SPIINT_MASKALL);
836 /* Disable SPI to write configuration bits in SPIDAT */
837 clear_io_bits(davinci_spi->base + SPIGCR1, SPIGCR1_SPIENA_MASK);
838 iowrite32(data1_reg_val, davinci_spi->base + SPIDAT1);
839 /* Enable SPI */
840 set_io_bits(davinci_spi->base + SPIGCR1, SPIGCR1_SPIENA_MASK);
841
842 while ((ioread32(davinci_spi->base + SPIBUF)
843 & SPIBUF_RXEMPTY_MASK) == 0)
844 cpu_relax();
845
846
847 if (t->tx_buf) {
848 t->tx_dma = dma_map_single(&spi->dev, (void *)t->tx_buf, count,
849 DMA_TO_DEVICE);
850 if (dma_mapping_error(&spi->dev, t->tx_dma)) {
851 dev_dbg(sdev, "Unable to DMA map a %d bytes"
852 " TX buffer\n", count);
853 return -ENOMEM;
854 }
855 temp_count = count;
856 } else {
857 /* We need TX clocking for RX transaction */
858 t->tx_dma = dma_map_single(&spi->dev,
859 (void *)davinci_spi->tmp_buf, count + 1,
860 DMA_TO_DEVICE);
861 if (dma_mapping_error(&spi->dev, t->tx_dma)) {
862 dev_dbg(sdev, "Unable to DMA map a %d bytes"
863 " TX tmp buffer\n", count);
864 return -ENOMEM;
865 }
866 temp_count = count + 1;
867 }
868
869 edma_set_transfer_params(davinci_spi_dma->dma_tx_channel,
870 data_type, temp_count, 1, 0, ASYNC);
871 edma_set_dest(davinci_spi_dma->dma_tx_channel, tx_reg, INCR, W8BIT);
872 edma_set_src(davinci_spi_dma->dma_tx_channel, t->tx_dma, INCR, W8BIT);
873 edma_set_src_index(davinci_spi_dma->dma_tx_channel, data_type, 0);
874 edma_set_dest_index(davinci_spi_dma->dma_tx_channel, 0, 0);
875
876 if (t->rx_buf) {
877 /* initiate transaction */
878 iowrite32(data1_reg_val, davinci_spi->base + SPIDAT1);
879
880 t->rx_dma = dma_map_single(&spi->dev, (void *)t->rx_buf, count,
881 DMA_FROM_DEVICE);
882 if (dma_mapping_error(&spi->dev, t->rx_dma)) {
883 dev_dbg(sdev, "Couldn't DMA map a %d bytes RX buffer\n",
884 count);
885 if (t->tx_buf != NULL)
886 dma_unmap_single(NULL, t->tx_dma,
887 count, DMA_TO_DEVICE);
888 return -ENOMEM;
889 }
890 edma_set_transfer_params(davinci_spi_dma->dma_rx_channel,
891 data_type, count, 1, 0, ASYNC);
892 edma_set_src(davinci_spi_dma->dma_rx_channel,
893 rx_reg, INCR, W8BIT);
894 edma_set_dest(davinci_spi_dma->dma_rx_channel,
895 t->rx_dma, INCR, W8BIT);
896 edma_set_src_index(davinci_spi_dma->dma_rx_channel, 0, 0);
897 edma_set_dest_index(davinci_spi_dma->dma_rx_channel,
898 data_type, 0);
899 }
900
901 if ((t->tx_buf) || (t->rx_buf))
902 edma_start(davinci_spi_dma->dma_tx_channel);
903
904 if (t->rx_buf)
905 edma_start(davinci_spi_dma->dma_rx_channel);
906
907 if ((t->rx_buf) || (t->tx_buf))
908 davinci_spi_set_dma_req(spi, 1);
909
910 if (t->tx_buf)
911 wait_for_completion_interruptible(
912 &davinci_spi_dma->dma_tx_completion);
913
914 if (t->rx_buf)
915 wait_for_completion_interruptible(
916 &davinci_spi_dma->dma_rx_completion);
917
918 dma_unmap_single(NULL, t->tx_dma, temp_count, DMA_TO_DEVICE);
919
920 if (t->rx_buf)
921 dma_unmap_single(NULL, t->rx_dma, count, DMA_FROM_DEVICE);
922
923 /*
924 * Check for bit error, desync error,parity error,timeout error and
925 * receive overflow errors
926 */
927 int_status = ioread32(davinci_spi->base + SPIFLG);
928
929 ret = davinci_spi_check_error(davinci_spi, int_status);
930 if (ret != 0)
931 return ret;
932
933 /* SPI Framework maintains the count only in bytes so convert back */
934 davinci_spi->count *= conv;
935
936 return t->len;
937}
938
939/**
940 * davinci_spi_irq - IRQ handler for DaVinci SPI
941 * @irq: IRQ number for this SPI Master
942 * @context_data: structure for SPI Master controller davinci_spi
943 */
944static irqreturn_t davinci_spi_irq(s32 irq, void *context_data)
945{
946 struct davinci_spi *davinci_spi = context_data;
947 u32 int_status, rx_data = 0;
948 irqreturn_t ret = IRQ_NONE;
949
950 int_status = ioread32(davinci_spi->base + SPIFLG);
951
952 while ((int_status & SPIFLG_RX_INTR_MASK)) {
953 if (likely(int_status & SPIFLG_RX_INTR_MASK)) {
954 ret = IRQ_HANDLED;
955
956 rx_data = ioread32(davinci_spi->base + SPIBUF);
957 davinci_spi->get_rx(rx_data, davinci_spi);
958
959 /* Disable Receive Interrupt */
960 iowrite32(~(SPIINT_RX_INTR | SPIINT_TX_INTR),
961 davinci_spi->base + SPIINT);
962 } else
963 (void)davinci_spi_check_error(davinci_spi, int_status);
964
965 int_status = ioread32(davinci_spi->base + SPIFLG);
966 }
967
968 return ret;
969}
970
971/**
972 * davinci_spi_probe - probe function for SPI Master Controller
973 * @pdev: platform_device structure which contains plateform specific data
974 */
975static int davinci_spi_probe(struct platform_device *pdev)
976{
977 struct spi_master *master;
978 struct davinci_spi *davinci_spi;
979 struct davinci_spi_platform_data *pdata;
980 struct resource *r, *mem;
981 resource_size_t dma_rx_chan = SPI_NO_RESOURCE;
982 resource_size_t dma_tx_chan = SPI_NO_RESOURCE;
983 resource_size_t dma_eventq = SPI_NO_RESOURCE;
984 int i = 0, ret = 0;
985
986 pdata = pdev->dev.platform_data;
987 if (pdata == NULL) {
988 ret = -ENODEV;
989 goto err;
990 }
991
992 master = spi_alloc_master(&pdev->dev, sizeof(struct davinci_spi));
993 if (master == NULL) {
994 ret = -ENOMEM;
995 goto err;
996 }
997
998 dev_set_drvdata(&pdev->dev, master);
999
1000 davinci_spi = spi_master_get_devdata(master);
1001 if (davinci_spi == NULL) {
1002 ret = -ENOENT;
1003 goto free_master;
1004 }
1005
1006 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1007 if (r == NULL) {
1008 ret = -ENOENT;
1009 goto free_master;
1010 }
1011
1012 davinci_spi->pbase = r->start;
1013 davinci_spi->region_size = resource_size(r);
1014 davinci_spi->pdata = pdata;
1015
1016 mem = request_mem_region(r->start, davinci_spi->region_size,
1017 pdev->name);
1018 if (mem == NULL) {
1019 ret = -EBUSY;
1020 goto free_master;
1021 }
1022
50356dd7 1023 davinci_spi->base = ioremap(r->start, davinci_spi->region_size);
358934a6
SP
1024 if (davinci_spi->base == NULL) {
1025 ret = -ENOMEM;
1026 goto release_region;
1027 }
1028
1029 davinci_spi->irq = platform_get_irq(pdev, 0);
1030 if (davinci_spi->irq <= 0) {
1031 ret = -EINVAL;
1032 goto unmap_io;
1033 }
1034
1035 ret = request_irq(davinci_spi->irq, davinci_spi_irq, IRQF_DISABLED,
1036 dev_name(&pdev->dev), davinci_spi);
1037 if (ret)
1038 goto unmap_io;
1039
1040 /* Allocate tmp_buf for tx_buf */
1041 davinci_spi->tmp_buf = kzalloc(SPI_BUFSIZ, GFP_KERNEL);
1042 if (davinci_spi->tmp_buf == NULL) {
1043 ret = -ENOMEM;
1044 goto irq_free;
1045 }
1046
1047 davinci_spi->bitbang.master = spi_master_get(master);
1048 if (davinci_spi->bitbang.master == NULL) {
1049 ret = -ENODEV;
1050 goto free_tmp_buf;
1051 }
1052
1053 davinci_spi->clk = clk_get(&pdev->dev, NULL);
1054 if (IS_ERR(davinci_spi->clk)) {
1055 ret = -ENODEV;
1056 goto put_master;
1057 }
1058 clk_enable(davinci_spi->clk);
1059
358934a6
SP
1060 master->bus_num = pdev->id;
1061 master->num_chipselect = pdata->num_chipselect;
1062 master->setup = davinci_spi_setup;
1063 master->cleanup = davinci_spi_cleanup;
1064
1065 davinci_spi->bitbang.chipselect = davinci_spi_chipselect;
1066 davinci_spi->bitbang.setup_transfer = davinci_spi_setup_transfer;
1067
1068 davinci_spi->version = pdata->version;
1069 use_dma = pdata->use_dma;
1070
1071 davinci_spi->bitbang.flags = SPI_NO_CS | SPI_LSB_FIRST | SPI_LOOP;
1072 if (davinci_spi->version == SPI_VERSION_2)
1073 davinci_spi->bitbang.flags |= SPI_READY;
1074
1075 if (use_dma) {
778e261e
BN
1076 r = platform_get_resource(pdev, IORESOURCE_DMA, 0);
1077 if (r)
1078 dma_rx_chan = r->start;
1079 r = platform_get_resource(pdev, IORESOURCE_DMA, 1);
1080 if (r)
1081 dma_tx_chan = r->start;
1082 r = platform_get_resource(pdev, IORESOURCE_DMA, 2);
1083 if (r)
1084 dma_eventq = r->start;
358934a6
SP
1085 }
1086
1087 if (!use_dma ||
1088 dma_rx_chan == SPI_NO_RESOURCE ||
1089 dma_tx_chan == SPI_NO_RESOURCE ||
1090 dma_eventq == SPI_NO_RESOURCE) {
1091 davinci_spi->bitbang.txrx_bufs = davinci_spi_bufs_pio;
1092 use_dma = 0;
1093 } else {
1094 davinci_spi->bitbang.txrx_bufs = davinci_spi_bufs_dma;
1095 davinci_spi->dma_channels = kzalloc(master->num_chipselect
1096 * sizeof(struct davinci_spi_dma), GFP_KERNEL);
1097 if (davinci_spi->dma_channels == NULL) {
1098 ret = -ENOMEM;
1099 goto free_clk;
1100 }
1101
1102 for (i = 0; i < master->num_chipselect; i++) {
1103 davinci_spi->dma_channels[i].dma_rx_channel = -1;
1104 davinci_spi->dma_channels[i].dma_rx_sync_dev =
1105 dma_rx_chan;
1106 davinci_spi->dma_channels[i].dma_tx_channel = -1;
1107 davinci_spi->dma_channels[i].dma_tx_sync_dev =
1108 dma_tx_chan;
1109 davinci_spi->dma_channels[i].eventq = dma_eventq;
1110 }
1111 dev_info(&pdev->dev, "DaVinci SPI driver in EDMA mode\n"
1112 "Using RX channel = %d , TX channel = %d and "
1113 "event queue = %d", dma_rx_chan, dma_tx_chan,
1114 dma_eventq);
1115 }
1116
1117 davinci_spi->get_rx = davinci_spi_rx_buf_u8;
1118 davinci_spi->get_tx = davinci_spi_tx_buf_u8;
1119
1120 init_completion(&davinci_spi->done);
1121
1122 /* Reset In/OUT SPI module */
1123 iowrite32(0, davinci_spi->base + SPIGCR0);
1124 udelay(100);
1125 iowrite32(1, davinci_spi->base + SPIGCR0);
1126
1127 /* Clock internal */
1128 if (davinci_spi->pdata->clk_internal)
1129 set_io_bits(davinci_spi->base + SPIGCR1,
1130 SPIGCR1_CLKMOD_MASK);
1131 else
1132 clear_io_bits(davinci_spi->base + SPIGCR1,
1133 SPIGCR1_CLKMOD_MASK);
1134
1135 /* master mode default */
1136 set_io_bits(davinci_spi->base + SPIGCR1, SPIGCR1_MASTER_MASK);
1137
1138 if (davinci_spi->pdata->intr_level)
1139 iowrite32(SPI_INTLVL_1, davinci_spi->base + SPILVL);
1140 else
1141 iowrite32(SPI_INTLVL_0, davinci_spi->base + SPILVL);
1142
1143 ret = spi_bitbang_start(&davinci_spi->bitbang);
1144 if (ret)
1145 goto free_clk;
1146
3b740b10 1147 dev_info(&pdev->dev, "Controller at 0x%p\n", davinci_spi->base);
358934a6
SP
1148
1149 if (!pdata->poll_mode)
1150 dev_info(&pdev->dev, "Operating in interrupt mode"
1151 " using IRQ %d\n", davinci_spi->irq);
1152
1153 return ret;
1154
1155free_clk:
1156 clk_disable(davinci_spi->clk);
1157 clk_put(davinci_spi->clk);
1158put_master:
1159 spi_master_put(master);
1160free_tmp_buf:
1161 kfree(davinci_spi->tmp_buf);
1162irq_free:
1163 free_irq(davinci_spi->irq, davinci_spi);
1164unmap_io:
1165 iounmap(davinci_spi->base);
1166release_region:
1167 release_mem_region(davinci_spi->pbase, davinci_spi->region_size);
1168free_master:
1169 kfree(master);
1170err:
1171 return ret;
1172}
1173
1174/**
1175 * davinci_spi_remove - remove function for SPI Master Controller
1176 * @pdev: platform_device structure which contains plateform specific data
1177 *
1178 * This function will do the reverse action of davinci_spi_probe function
1179 * It will free the IRQ and SPI controller's memory region.
1180 * It will also call spi_bitbang_stop to destroy the work queue which was
1181 * created by spi_bitbang_start.
1182 */
1183static int __exit davinci_spi_remove(struct platform_device *pdev)
1184{
1185 struct davinci_spi *davinci_spi;
1186 struct spi_master *master;
1187
1188 master = dev_get_drvdata(&pdev->dev);
1189 davinci_spi = spi_master_get_devdata(master);
1190
1191 spi_bitbang_stop(&davinci_spi->bitbang);
1192
1193 clk_disable(davinci_spi->clk);
1194 clk_put(davinci_spi->clk);
1195 spi_master_put(master);
1196 kfree(davinci_spi->tmp_buf);
1197 free_irq(davinci_spi->irq, davinci_spi);
1198 iounmap(davinci_spi->base);
1199 release_mem_region(davinci_spi->pbase, davinci_spi->region_size);
1200
1201 return 0;
1202}
1203
1204static struct platform_driver davinci_spi_driver = {
1205 .driver.name = "spi_davinci",
1206 .remove = __exit_p(davinci_spi_remove),
1207};
1208
1209static int __init davinci_spi_init(void)
1210{
1211 return platform_driver_probe(&davinci_spi_driver, davinci_spi_probe);
1212}
1213module_init(davinci_spi_init);
1214
1215static void __exit davinci_spi_exit(void)
1216{
1217 platform_driver_unregister(&davinci_spi_driver);
1218}
1219module_exit(davinci_spi_exit);
1220
1221MODULE_DESCRIPTION("TI DaVinci SPI Master Controller Driver");
1222MODULE_LICENSE("GPL");
This page took 0.123136 seconds and 5 git commands to generate.