spi: davinci: make chip-slect specific parameters really chip-select specific
[deliverable/linux.git] / drivers / spi / davinci_spi.c
1 /*
2 * Copyright (C) 2009 Texas Instruments.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 */
18
19 #include <linux/interrupt.h>
20 #include <linux/io.h>
21 #include <linux/gpio.h>
22 #include <linux/module.h>
23 #include <linux/delay.h>
24 #include <linux/platform_device.h>
25 #include <linux/err.h>
26 #include <linux/clk.h>
27 #include <linux/dma-mapping.h>
28 #include <linux/spi/spi.h>
29 #include <linux/spi/spi_bitbang.h>
30 #include <linux/slab.h>
31
32 #include <mach/spi.h>
33 #include <mach/edma.h>
34
35 #define SPI_NO_RESOURCE ((resource_size_t)-1)
36
37 #define SPI_MAX_CHIPSELECT 2
38
39 #define CS_DEFAULT 0xFF
40
41 #define SPI_BUFSIZ (SMP_CACHE_BYTES + 1)
42 #define DAVINCI_DMA_DATA_TYPE_S8 0x01
43 #define DAVINCI_DMA_DATA_TYPE_S16 0x02
44 #define DAVINCI_DMA_DATA_TYPE_S32 0x04
45
46 #define SPIFMT_PHASE_MASK BIT(16)
47 #define SPIFMT_POLARITY_MASK BIT(17)
48 #define SPIFMT_DISTIMER_MASK BIT(18)
49 #define SPIFMT_SHIFTDIR_MASK BIT(20)
50 #define SPIFMT_WAITENA_MASK BIT(21)
51 #define SPIFMT_PARITYENA_MASK BIT(22)
52 #define SPIFMT_ODD_PARITY_MASK BIT(23)
53 #define SPIFMT_WDELAY_MASK 0x3f000000u
54 #define SPIFMT_WDELAY_SHIFT 24
55 #define SPIFMT_CHARLEN_MASK 0x0000001Fu
56 #define SPIFMT_PRESCALE_SHIFT 8
57
58
59 /* SPIPC0 */
60 #define SPIPC0_DIFUN_MASK BIT(11) /* MISO */
61 #define SPIPC0_DOFUN_MASK BIT(10) /* MOSI */
62 #define SPIPC0_CLKFUN_MASK BIT(9) /* CLK */
63 #define SPIPC0_SPIENA_MASK BIT(8) /* nREADY */
64
65 #define SPIINT_MASKALL 0x0101035F
66 #define SPI_INTLVL_1 0x000001FFu
67 #define SPI_INTLVL_0 0x00000000u
68
69 /* SPIDAT1 (upper 16 bit defines) */
70 #define SPIDAT1_CSHOLD_MASK BIT(12)
71
72 /* SPIGCR1 */
73 #define SPIGCR1_CLKMOD_MASK BIT(1)
74 #define SPIGCR1_MASTER_MASK BIT(0)
75 #define SPIGCR1_LOOPBACK_MASK BIT(16)
76 #define SPIGCR1_SPIENA_MASK BIT(24)
77
78 /* SPIBUF */
79 #define SPIBUF_TXFULL_MASK BIT(29)
80 #define SPIBUF_RXEMPTY_MASK BIT(31)
81
82 /* Error Masks */
83 #define SPIFLG_DLEN_ERR_MASK BIT(0)
84 #define SPIFLG_TIMEOUT_MASK BIT(1)
85 #define SPIFLG_PARERR_MASK BIT(2)
86 #define SPIFLG_DESYNC_MASK BIT(3)
87 #define SPIFLG_BITERR_MASK BIT(4)
88 #define SPIFLG_OVRRUN_MASK BIT(6)
89 #define SPIFLG_RX_INTR_MASK BIT(8)
90 #define SPIFLG_TX_INTR_MASK BIT(9)
91 #define SPIFLG_BUF_INIT_ACTIVE_MASK BIT(24)
92
93 #define SPIINT_BITERR_INTR BIT(4)
94 #define SPIINT_OVRRUN_INTR BIT(6)
95 #define SPIINT_RX_INTR BIT(8)
96 #define SPIINT_TX_INTR BIT(9)
97 #define SPIINT_DMA_REQ_EN BIT(16)
98
99 #define SPI_T2CDELAY_SHIFT 16
100 #define SPI_C2TDELAY_SHIFT 24
101
102 /* SPI Controller registers */
103 #define SPIGCR0 0x00
104 #define SPIGCR1 0x04
105 #define SPIINT 0x08
106 #define SPILVL 0x0c
107 #define SPIFLG 0x10
108 #define SPIPC0 0x14
109 #define SPIDAT1 0x3c
110 #define SPIBUF 0x40
111 #define SPIDELAY 0x48
112 #define SPIDEF 0x4c
113 #define SPIFMT0 0x50
114
115 struct davinci_spi_slave {
116 u32 cmd_to_write;
117 u32 clk_ctrl_to_write;
118 u32 bytes_per_word;
119 u8 active_cs;
120 };
121
122 /* We have 2 DMA channels per CS, one for RX and one for TX */
123 struct davinci_spi_dma {
124 int dma_tx_channel;
125 int dma_rx_channel;
126 int dma_tx_sync_dev;
127 int dma_rx_sync_dev;
128 enum dma_event_q eventq;
129
130 struct completion dma_tx_completion;
131 struct completion dma_rx_completion;
132 };
133
134 /* SPI Controller driver's private data. */
135 struct davinci_spi {
136 struct spi_bitbang bitbang;
137 struct clk *clk;
138
139 u8 version;
140 resource_size_t pbase;
141 void __iomem *base;
142 size_t region_size;
143 u32 irq;
144 struct completion done;
145
146 const void *tx;
147 void *rx;
148 u8 *tmp_buf;
149 int count;
150 struct davinci_spi_dma *dma_channels;
151 struct davinci_spi_platform_data *pdata;
152
153 void (*get_rx)(u32 rx_data, struct davinci_spi *);
154 u32 (*get_tx)(struct davinci_spi *);
155
156 struct davinci_spi_slave slave[SPI_MAX_CHIPSELECT];
157 };
158
159 static struct davinci_spi_config davinci_spi_default_cfg;
160
161 static unsigned use_dma;
162
163 static void davinci_spi_rx_buf_u8(u32 data, struct davinci_spi *davinci_spi)
164 {
165 u8 *rx = davinci_spi->rx;
166
167 *rx++ = (u8)data;
168 davinci_spi->rx = rx;
169 }
170
171 static void davinci_spi_rx_buf_u16(u32 data, struct davinci_spi *davinci_spi)
172 {
173 u16 *rx = davinci_spi->rx;
174
175 *rx++ = (u16)data;
176 davinci_spi->rx = rx;
177 }
178
179 static u32 davinci_spi_tx_buf_u8(struct davinci_spi *davinci_spi)
180 {
181 u32 data;
182 const u8 *tx = davinci_spi->tx;
183
184 data = *tx++;
185 davinci_spi->tx = tx;
186 return data;
187 }
188
189 static u32 davinci_spi_tx_buf_u16(struct davinci_spi *davinci_spi)
190 {
191 u32 data;
192 const u16 *tx = davinci_spi->tx;
193
194 data = *tx++;
195 davinci_spi->tx = tx;
196 return data;
197 }
198
199 static inline void set_io_bits(void __iomem *addr, u32 bits)
200 {
201 u32 v = ioread32(addr);
202
203 v |= bits;
204 iowrite32(v, addr);
205 }
206
207 static inline void clear_io_bits(void __iomem *addr, u32 bits)
208 {
209 u32 v = ioread32(addr);
210
211 v &= ~bits;
212 iowrite32(v, addr);
213 }
214
215 static inline void set_fmt_bits(void __iomem *addr, u32 bits, int cs_num)
216 {
217 set_io_bits(addr + SPIFMT0 + (0x4 * cs_num), bits);
218 }
219
220 static inline void clear_fmt_bits(void __iomem *addr, u32 bits, int cs_num)
221 {
222 clear_io_bits(addr + SPIFMT0 + (0x4 * cs_num), bits);
223 }
224
225 static void davinci_spi_set_dma_req(const struct spi_device *spi, int enable)
226 {
227 struct davinci_spi *davinci_spi = spi_master_get_devdata(spi->master);
228
229 if (enable)
230 set_io_bits(davinci_spi->base + SPIINT, SPIINT_DMA_REQ_EN);
231 else
232 clear_io_bits(davinci_spi->base + SPIINT, SPIINT_DMA_REQ_EN);
233 }
234
235 /*
236 * Interface to control the chip select signal
237 */
238 static void davinci_spi_chipselect(struct spi_device *spi, int value)
239 {
240 struct davinci_spi *davinci_spi;
241 struct davinci_spi_platform_data *pdata;
242 u8 chip_sel = spi->chip_select;
243 u16 spidat1_cfg = CS_DEFAULT;
244 bool gpio_chipsel = false;
245
246 davinci_spi = spi_master_get_devdata(spi->master);
247 pdata = davinci_spi->pdata;
248
249 if (pdata->chip_sel && chip_sel < pdata->num_chipselect &&
250 pdata->chip_sel[chip_sel] != SPI_INTERN_CS)
251 gpio_chipsel = true;
252
253 /*
254 * Board specific chip select logic decides the polarity and cs
255 * line for the controller
256 */
257 if (gpio_chipsel) {
258 if (value == BITBANG_CS_ACTIVE)
259 gpio_set_value(pdata->chip_sel[chip_sel], 0);
260 else
261 gpio_set_value(pdata->chip_sel[chip_sel], 1);
262 } else {
263 if (value == BITBANG_CS_ACTIVE) {
264 spidat1_cfg |= SPIDAT1_CSHOLD_MASK;
265 spidat1_cfg &= ~(0x1 << chip_sel);
266 }
267
268 iowrite16(spidat1_cfg, davinci_spi->base + SPIDAT1 + 2);
269 }
270 }
271
272 /**
273 * davinci_spi_get_prescale - Calculates the correct prescale value
274 * @maxspeed_hz: the maximum rate the SPI clock can run at
275 *
276 * This function calculates the prescale value that generates a clock rate
277 * less than or equal to the specified maximum.
278 *
279 * Returns: calculated prescale - 1 for easy programming into SPI registers
280 * or negative error number if valid prescalar cannot be updated.
281 */
282 static inline int davinci_spi_get_prescale(struct davinci_spi *davinci_spi,
283 u32 max_speed_hz)
284 {
285 int ret;
286
287 ret = DIV_ROUND_UP(clk_get_rate(davinci_spi->clk), max_speed_hz);
288
289 if (ret < 3 || ret > 256)
290 return -EINVAL;
291
292 return ret - 1;
293 }
294
295 /**
296 * davinci_spi_setup_transfer - This functions will determine transfer method
297 * @spi: spi device on which data transfer to be done
298 * @t: spi transfer in which transfer info is filled
299 *
300 * This function determines data transfer method (8/16/32 bit transfer).
301 * It will also set the SPI Clock Control register according to
302 * SPI slave device freq.
303 */
304 static int davinci_spi_setup_transfer(struct spi_device *spi,
305 struct spi_transfer *t)
306 {
307
308 struct davinci_spi *davinci_spi;
309 u8 bits_per_word = 0;
310 u32 hz = 0, prescale = 0;
311
312 davinci_spi = spi_master_get_devdata(spi->master);
313
314 if (t) {
315 bits_per_word = t->bits_per_word;
316 hz = t->speed_hz;
317 }
318
319 /* if bits_per_word is not set then set it default */
320 if (!bits_per_word)
321 bits_per_word = spi->bits_per_word;
322
323 /*
324 * Assign function pointer to appropriate transfer method
325 * 8bit, 16bit or 32bit transfer
326 */
327 if (bits_per_word <= 8 && bits_per_word >= 2) {
328 davinci_spi->get_rx = davinci_spi_rx_buf_u8;
329 davinci_spi->get_tx = davinci_spi_tx_buf_u8;
330 davinci_spi->slave[spi->chip_select].bytes_per_word = 1;
331 } else if (bits_per_word <= 16 && bits_per_word >= 2) {
332 davinci_spi->get_rx = davinci_spi_rx_buf_u16;
333 davinci_spi->get_tx = davinci_spi_tx_buf_u16;
334 davinci_spi->slave[spi->chip_select].bytes_per_word = 2;
335 } else
336 return -EINVAL;
337
338 if (!hz)
339 hz = spi->max_speed_hz;
340
341 prescale = davinci_spi_get_prescale(davinci_spi, hz);
342 if (prescale < 0)
343 return prescale;
344
345 clear_fmt_bits(davinci_spi->base, SPIFMT_CHARLEN_MASK,
346 spi->chip_select);
347 set_fmt_bits(davinci_spi->base, bits_per_word & 0x1f,
348 spi->chip_select);
349
350 clear_fmt_bits(davinci_spi->base, 0x0000ff00, spi->chip_select);
351 set_fmt_bits(davinci_spi->base,
352 prescale << SPIFMT_PRESCALE_SHIFT, spi->chip_select);
353
354 return 0;
355 }
356
357 static void davinci_spi_dma_rx_callback(unsigned lch, u16 ch_status, void *data)
358 {
359 struct spi_device *spi = (struct spi_device *)data;
360 struct davinci_spi *davinci_spi;
361 struct davinci_spi_dma *davinci_spi_dma;
362
363 davinci_spi = spi_master_get_devdata(spi->master);
364 davinci_spi_dma = &(davinci_spi->dma_channels[spi->chip_select]);
365
366 if (ch_status == DMA_COMPLETE)
367 edma_stop(davinci_spi_dma->dma_rx_channel);
368 else
369 edma_clean_channel(davinci_spi_dma->dma_rx_channel);
370
371 complete(&davinci_spi_dma->dma_rx_completion);
372 /* We must disable the DMA RX request */
373 davinci_spi_set_dma_req(spi, 0);
374 }
375
376 static void davinci_spi_dma_tx_callback(unsigned lch, u16 ch_status, void *data)
377 {
378 struct spi_device *spi = (struct spi_device *)data;
379 struct davinci_spi *davinci_spi;
380 struct davinci_spi_dma *davinci_spi_dma;
381
382 davinci_spi = spi_master_get_devdata(spi->master);
383 davinci_spi_dma = &(davinci_spi->dma_channels[spi->chip_select]);
384
385 if (ch_status == DMA_COMPLETE)
386 edma_stop(davinci_spi_dma->dma_tx_channel);
387 else
388 edma_clean_channel(davinci_spi_dma->dma_tx_channel);
389
390 complete(&davinci_spi_dma->dma_tx_completion);
391 /* We must disable the DMA TX request */
392 davinci_spi_set_dma_req(spi, 0);
393 }
394
395 static int davinci_spi_request_dma(struct spi_device *spi)
396 {
397 struct davinci_spi *davinci_spi;
398 struct davinci_spi_dma *davinci_spi_dma;
399 struct device *sdev;
400 int r;
401
402 davinci_spi = spi_master_get_devdata(spi->master);
403 davinci_spi_dma = &davinci_spi->dma_channels[spi->chip_select];
404 sdev = davinci_spi->bitbang.master->dev.parent;
405
406 r = edma_alloc_channel(davinci_spi_dma->dma_rx_sync_dev,
407 davinci_spi_dma_rx_callback, spi,
408 davinci_spi_dma->eventq);
409 if (r < 0) {
410 dev_dbg(sdev, "Unable to request DMA channel for SPI RX\n");
411 return -EAGAIN;
412 }
413 davinci_spi_dma->dma_rx_channel = r;
414 r = edma_alloc_channel(davinci_spi_dma->dma_tx_sync_dev,
415 davinci_spi_dma_tx_callback, spi,
416 davinci_spi_dma->eventq);
417 if (r < 0) {
418 edma_free_channel(davinci_spi_dma->dma_rx_channel);
419 davinci_spi_dma->dma_rx_channel = -1;
420 dev_dbg(sdev, "Unable to request DMA channel for SPI TX\n");
421 return -EAGAIN;
422 }
423 davinci_spi_dma->dma_tx_channel = r;
424
425 return 0;
426 }
427
428 /**
429 * davinci_spi_setup - This functions will set default transfer method
430 * @spi: spi device on which data transfer to be done
431 *
432 * This functions sets the default transfer method.
433 */
434 static int davinci_spi_setup(struct spi_device *spi)
435 {
436 int retval;
437 struct davinci_spi *davinci_spi;
438 struct davinci_spi_dma *davinci_spi_dma;
439 struct davinci_spi_config *spicfg;
440
441 davinci_spi = spi_master_get_devdata(spi->master);
442 spicfg = (struct davinci_spi_config *)spi->controller_data;
443 if (!spicfg)
444 spicfg = &davinci_spi_default_cfg;
445
446 /* if bits per word length is zero then set it default 8 */
447 if (!spi->bits_per_word)
448 spi->bits_per_word = 8;
449
450 davinci_spi->slave[spi->chip_select].cmd_to_write = 0;
451
452 if (use_dma && davinci_spi->dma_channels) {
453 davinci_spi_dma = &davinci_spi->dma_channels[spi->chip_select];
454
455 if ((davinci_spi_dma->dma_rx_channel == -1)
456 || (davinci_spi_dma->dma_tx_channel == -1)) {
457 retval = davinci_spi_request_dma(spi);
458 if (retval < 0)
459 return retval;
460 }
461 }
462
463 /*
464 * Set up SPIFMTn register, unique to this chipselect.
465 *
466 * NOTE: we could do all of these with one write. Also, some
467 * of the "version 2" features are found in chips that don't
468 * support all of them...
469 */
470 if (spi->mode & SPI_LSB_FIRST)
471 set_fmt_bits(davinci_spi->base, SPIFMT_SHIFTDIR_MASK,
472 spi->chip_select);
473 else
474 clear_fmt_bits(davinci_spi->base, SPIFMT_SHIFTDIR_MASK,
475 spi->chip_select);
476
477 if (spi->mode & SPI_CPOL)
478 set_fmt_bits(davinci_spi->base, SPIFMT_POLARITY_MASK,
479 spi->chip_select);
480 else
481 clear_fmt_bits(davinci_spi->base, SPIFMT_POLARITY_MASK,
482 spi->chip_select);
483
484 if (!(spi->mode & SPI_CPHA))
485 set_fmt_bits(davinci_spi->base, SPIFMT_PHASE_MASK,
486 spi->chip_select);
487 else
488 clear_fmt_bits(davinci_spi->base, SPIFMT_PHASE_MASK,
489 spi->chip_select);
490
491 /*
492 * Version 1 hardware supports two basic SPI modes:
493 * - Standard SPI mode uses 4 pins, with chipselect
494 * - 3 pin SPI is a 4 pin variant without CS (SPI_NO_CS)
495 * (distinct from SPI_3WIRE, with just one data wire;
496 * or similar variants without MOSI or without MISO)
497 *
498 * Version 2 hardware supports an optional handshaking signal,
499 * so it can support two more modes:
500 * - 5 pin SPI variant is standard SPI plus SPI_READY
501 * - 4 pin with enable is (SPI_READY | SPI_NO_CS)
502 */
503
504 if (davinci_spi->version == SPI_VERSION_2) {
505
506 clear_fmt_bits(davinci_spi->base, SPIFMT_WDELAY_MASK,
507 spi->chip_select);
508 set_fmt_bits(davinci_spi->base,
509 (spicfg->wdelay << SPIFMT_WDELAY_SHIFT) &
510 SPIFMT_WDELAY_MASK, spi->chip_select);
511
512 if (spicfg->odd_parity)
513 set_fmt_bits(davinci_spi->base, SPIFMT_ODD_PARITY_MASK,
514 spi->chip_select);
515 else
516 clear_fmt_bits(davinci_spi->base,
517 SPIFMT_ODD_PARITY_MASK,
518 spi->chip_select);
519
520 if (spicfg->parity_enable)
521 set_fmt_bits(davinci_spi->base, SPIFMT_PARITYENA_MASK,
522 spi->chip_select);
523 else
524 clear_fmt_bits(davinci_spi->base, SPIFMT_PARITYENA_MASK,
525 spi->chip_select);
526
527 if (spicfg->timer_disable)
528 set_fmt_bits(davinci_spi->base, SPIFMT_DISTIMER_MASK,
529 spi->chip_select);
530 else
531 clear_fmt_bits(davinci_spi->base, SPIFMT_DISTIMER_MASK,
532 spi->chip_select);
533
534 if (spi->mode & SPI_READY)
535 set_fmt_bits(davinci_spi->base,
536 SPIFMT_WAITENA_MASK,
537 spi->chip_select);
538 else
539 clear_fmt_bits(davinci_spi->base,
540 SPIFMT_WAITENA_MASK,
541 spi->chip_select);
542
543 }
544
545 retval = davinci_spi_setup_transfer(spi, NULL);
546
547 return retval;
548 }
549
550 static void davinci_spi_cleanup(struct spi_device *spi)
551 {
552 struct davinci_spi *davinci_spi = spi_master_get_devdata(spi->master);
553 struct davinci_spi_dma *davinci_spi_dma;
554
555 davinci_spi_dma = &davinci_spi->dma_channels[spi->chip_select];
556
557 if (use_dma && davinci_spi->dma_channels) {
558 davinci_spi_dma = &davinci_spi->dma_channels[spi->chip_select];
559
560 if ((davinci_spi_dma->dma_rx_channel != -1)
561 && (davinci_spi_dma->dma_tx_channel != -1)) {
562 edma_free_channel(davinci_spi_dma->dma_tx_channel);
563 edma_free_channel(davinci_spi_dma->dma_rx_channel);
564 }
565 }
566 }
567
568 static int davinci_spi_bufs_prep(struct spi_device *spi,
569 struct davinci_spi *davinci_spi)
570 {
571 struct davinci_spi_platform_data *pdata;
572 int op_mode = 0;
573
574 /*
575 * REVISIT unless devices disagree about SPI_LOOP or
576 * SPI_READY (SPI_NO_CS only allows one device!), this
577 * should not need to be done before each message...
578 * optimize for both flags staying cleared.
579 */
580
581 op_mode = SPIPC0_DIFUN_MASK
582 | SPIPC0_DOFUN_MASK
583 | SPIPC0_CLKFUN_MASK;
584 if (!(spi->mode & SPI_NO_CS)) {
585 pdata = davinci_spi->pdata;
586 if (!pdata->chip_sel ||
587 pdata->chip_sel[spi->chip_select] == SPI_INTERN_CS)
588 op_mode |= 1 << spi->chip_select;
589 }
590 if (spi->mode & SPI_READY)
591 op_mode |= SPIPC0_SPIENA_MASK;
592
593 iowrite32(op_mode, davinci_spi->base + SPIPC0);
594
595 if (spi->mode & SPI_LOOP)
596 set_io_bits(davinci_spi->base + SPIGCR1,
597 SPIGCR1_LOOPBACK_MASK);
598 else
599 clear_io_bits(davinci_spi->base + SPIGCR1,
600 SPIGCR1_LOOPBACK_MASK);
601
602 return 0;
603 }
604
605 static int davinci_spi_check_error(struct davinci_spi *davinci_spi,
606 int int_status)
607 {
608 struct device *sdev = davinci_spi->bitbang.master->dev.parent;
609
610 if (int_status & SPIFLG_TIMEOUT_MASK) {
611 dev_dbg(sdev, "SPI Time-out Error\n");
612 return -ETIMEDOUT;
613 }
614 if (int_status & SPIFLG_DESYNC_MASK) {
615 dev_dbg(sdev, "SPI Desynchronization Error\n");
616 return -EIO;
617 }
618 if (int_status & SPIFLG_BITERR_MASK) {
619 dev_dbg(sdev, "SPI Bit error\n");
620 return -EIO;
621 }
622
623 if (davinci_spi->version == SPI_VERSION_2) {
624 if (int_status & SPIFLG_DLEN_ERR_MASK) {
625 dev_dbg(sdev, "SPI Data Length Error\n");
626 return -EIO;
627 }
628 if (int_status & SPIFLG_PARERR_MASK) {
629 dev_dbg(sdev, "SPI Parity Error\n");
630 return -EIO;
631 }
632 if (int_status & SPIFLG_OVRRUN_MASK) {
633 dev_dbg(sdev, "SPI Data Overrun error\n");
634 return -EIO;
635 }
636 if (int_status & SPIFLG_TX_INTR_MASK) {
637 dev_dbg(sdev, "SPI TX intr bit set\n");
638 return -EIO;
639 }
640 if (int_status & SPIFLG_BUF_INIT_ACTIVE_MASK) {
641 dev_dbg(sdev, "SPI Buffer Init Active\n");
642 return -EBUSY;
643 }
644 }
645
646 return 0;
647 }
648
649 /**
650 * davinci_spi_bufs - functions which will handle transfer data
651 * @spi: spi device on which data transfer to be done
652 * @t: spi transfer in which transfer info is filled
653 *
654 * This function will put data to be transferred into data register
655 * of SPI controller and then wait until the completion will be marked
656 * by the IRQ Handler.
657 */
658 static int davinci_spi_bufs_pio(struct spi_device *spi, struct spi_transfer *t)
659 {
660 struct davinci_spi *davinci_spi;
661 int int_status, count, ret;
662 u8 conv;
663 u32 tx_data, data1_reg_val;
664 u32 buf_val, flg_val;
665 struct davinci_spi_platform_data *pdata;
666 struct davinci_spi_config *spicfg;
667
668 davinci_spi = spi_master_get_devdata(spi->master);
669 pdata = davinci_spi->pdata;
670 spicfg = (struct davinci_spi_config *)spi->controller_data;
671 if (!spicfg)
672 spicfg = &davinci_spi_default_cfg;
673
674 davinci_spi->tx = t->tx_buf;
675 davinci_spi->rx = t->rx_buf;
676
677 /* convert len to words based on bits_per_word */
678 conv = davinci_spi->slave[spi->chip_select].bytes_per_word;
679 davinci_spi->count = t->len / conv;
680
681 data1_reg_val = ioread32(davinci_spi->base + SPIDAT1);
682
683 INIT_COMPLETION(davinci_spi->done);
684
685 ret = davinci_spi_bufs_prep(spi, davinci_spi);
686 if (ret)
687 return ret;
688
689 /* Enable SPI */
690 set_io_bits(davinci_spi->base + SPIGCR1, SPIGCR1_SPIENA_MASK);
691
692 iowrite32((spicfg->c2tdelay << SPI_C2TDELAY_SHIFT) |
693 (spicfg->t2cdelay << SPI_T2CDELAY_SHIFT),
694 davinci_spi->base + SPIDELAY);
695
696 count = davinci_spi->count;
697
698 /* Determine the command to execute READ or WRITE */
699 if (t->tx_buf) {
700 clear_io_bits(davinci_spi->base + SPIINT, SPIINT_MASKALL);
701
702 while (1) {
703 tx_data = davinci_spi->get_tx(davinci_spi);
704
705 data1_reg_val &= ~(0xFFFF);
706 data1_reg_val |= (0xFFFF & tx_data);
707
708 buf_val = ioread32(davinci_spi->base + SPIBUF);
709 if ((buf_val & SPIBUF_TXFULL_MASK) == 0) {
710 iowrite32(data1_reg_val,
711 davinci_spi->base + SPIDAT1);
712
713 count--;
714 }
715 while (ioread32(davinci_spi->base + SPIBUF)
716 & SPIBUF_RXEMPTY_MASK)
717 cpu_relax();
718
719 /* getting the returned byte */
720 if (t->rx_buf) {
721 buf_val = ioread32(davinci_spi->base + SPIBUF);
722 davinci_spi->get_rx(buf_val, davinci_spi);
723 }
724 if (count <= 0)
725 break;
726 }
727 } else {
728 if (pdata->poll_mode) {
729 while (1) {
730 /* keeps the serial clock going */
731 if ((ioread32(davinci_spi->base + SPIBUF)
732 & SPIBUF_TXFULL_MASK) == 0)
733 iowrite32(data1_reg_val,
734 davinci_spi->base + SPIDAT1);
735
736 while (ioread32(davinci_spi->base + SPIBUF) &
737 SPIBUF_RXEMPTY_MASK)
738 cpu_relax();
739
740 flg_val = ioread32(davinci_spi->base + SPIFLG);
741 buf_val = ioread32(davinci_spi->base + SPIBUF);
742
743 davinci_spi->get_rx(buf_val, davinci_spi);
744
745 count--;
746 if (count <= 0)
747 break;
748 }
749 } else { /* Receive in Interrupt mode */
750 int i;
751
752 for (i = 0; i < davinci_spi->count; i++) {
753 set_io_bits(davinci_spi->base + SPIINT,
754 SPIINT_BITERR_INTR
755 | SPIINT_OVRRUN_INTR
756 | SPIINT_RX_INTR);
757
758 iowrite32(data1_reg_val,
759 davinci_spi->base + SPIDAT1);
760
761 while (ioread32(davinci_spi->base + SPIINT) &
762 SPIINT_RX_INTR)
763 cpu_relax();
764 }
765 iowrite32((data1_reg_val & 0x0ffcffff),
766 davinci_spi->base + SPIDAT1);
767 }
768 }
769
770 /*
771 * Check for bit error, desync error,parity error,timeout error and
772 * receive overflow errors
773 */
774 int_status = ioread32(davinci_spi->base + SPIFLG);
775
776 ret = davinci_spi_check_error(davinci_spi, int_status);
777 if (ret != 0)
778 return ret;
779
780 /* SPI Framework maintains the count only in bytes so convert back */
781 davinci_spi->count *= conv;
782
783 return t->len;
784 }
785
786 #define DAVINCI_DMA_DATA_TYPE_S8 0x01
787 #define DAVINCI_DMA_DATA_TYPE_S16 0x02
788 #define DAVINCI_DMA_DATA_TYPE_S32 0x04
789
790 static int davinci_spi_bufs_dma(struct spi_device *spi, struct spi_transfer *t)
791 {
792 struct davinci_spi *davinci_spi;
793 int int_status = 0;
794 int count, temp_count;
795 u8 conv = 1;
796 u32 data1_reg_val;
797 struct davinci_spi_dma *davinci_spi_dma;
798 int word_len, data_type, ret;
799 unsigned long tx_reg, rx_reg;
800 struct davinci_spi_config *spicfg;
801 struct device *sdev;
802
803 davinci_spi = spi_master_get_devdata(spi->master);
804 sdev = davinci_spi->bitbang.master->dev.parent;
805 spicfg = (struct davinci_spi_config *)spi->controller_data;
806 if (!spicfg)
807 spicfg = &davinci_spi_default_cfg;
808
809 davinci_spi_dma = &davinci_spi->dma_channels[spi->chip_select];
810
811 tx_reg = (unsigned long)davinci_spi->pbase + SPIDAT1;
812 rx_reg = (unsigned long)davinci_spi->pbase + SPIBUF;
813
814 davinci_spi->tx = t->tx_buf;
815 davinci_spi->rx = t->rx_buf;
816
817 /* convert len to words based on bits_per_word */
818 conv = davinci_spi->slave[spi->chip_select].bytes_per_word;
819 davinci_spi->count = t->len / conv;
820
821 data1_reg_val = ioread32(davinci_spi->base + SPIDAT1);
822
823 INIT_COMPLETION(davinci_spi->done);
824
825 init_completion(&davinci_spi_dma->dma_rx_completion);
826 init_completion(&davinci_spi_dma->dma_tx_completion);
827
828 word_len = conv * 8;
829
830 if (word_len <= 8)
831 data_type = DAVINCI_DMA_DATA_TYPE_S8;
832 else if (word_len <= 16)
833 data_type = DAVINCI_DMA_DATA_TYPE_S16;
834 else if (word_len <= 32)
835 data_type = DAVINCI_DMA_DATA_TYPE_S32;
836 else
837 return -EINVAL;
838
839 ret = davinci_spi_bufs_prep(spi, davinci_spi);
840 if (ret)
841 return ret;
842
843 /* Put delay val if required */
844 iowrite32((spicfg->c2tdelay << SPI_C2TDELAY_SHIFT) |
845 (spicfg->t2cdelay << SPI_T2CDELAY_SHIFT),
846 davinci_spi->base + SPIDELAY);
847
848 count = davinci_spi->count; /* the number of elements */
849
850 /* disable all interrupts for dma transfers */
851 clear_io_bits(davinci_spi->base + SPIINT, SPIINT_MASKALL);
852 /* Disable SPI to write configuration bits in SPIDAT */
853 clear_io_bits(davinci_spi->base + SPIGCR1, SPIGCR1_SPIENA_MASK);
854 /* Enable SPI */
855 set_io_bits(davinci_spi->base + SPIGCR1, SPIGCR1_SPIENA_MASK);
856
857 if (t->tx_buf) {
858 t->tx_dma = dma_map_single(&spi->dev, (void *)t->tx_buf, count,
859 DMA_TO_DEVICE);
860 if (dma_mapping_error(&spi->dev, t->tx_dma)) {
861 dev_dbg(sdev, "Unable to DMA map a %d bytes"
862 " TX buffer\n", count);
863 return -ENOMEM;
864 }
865 temp_count = count;
866 } else {
867 /* We need TX clocking for RX transaction */
868 t->tx_dma = dma_map_single(&spi->dev,
869 (void *)davinci_spi->tmp_buf, count + 1,
870 DMA_TO_DEVICE);
871 if (dma_mapping_error(&spi->dev, t->tx_dma)) {
872 dev_dbg(sdev, "Unable to DMA map a %d bytes"
873 " TX tmp buffer\n", count);
874 return -ENOMEM;
875 }
876 temp_count = count + 1;
877 }
878
879 edma_set_transfer_params(davinci_spi_dma->dma_tx_channel,
880 data_type, temp_count, 1, 0, ASYNC);
881 edma_set_dest(davinci_spi_dma->dma_tx_channel, tx_reg, INCR, W8BIT);
882 edma_set_src(davinci_spi_dma->dma_tx_channel, t->tx_dma, INCR, W8BIT);
883 edma_set_src_index(davinci_spi_dma->dma_tx_channel, data_type, 0);
884 edma_set_dest_index(davinci_spi_dma->dma_tx_channel, 0, 0);
885
886 if (t->rx_buf) {
887 /* initiate transaction */
888 iowrite32(data1_reg_val, davinci_spi->base + SPIDAT1);
889
890 t->rx_dma = dma_map_single(&spi->dev, (void *)t->rx_buf, count,
891 DMA_FROM_DEVICE);
892 if (dma_mapping_error(&spi->dev, t->rx_dma)) {
893 dev_dbg(sdev, "Couldn't DMA map a %d bytes RX buffer\n",
894 count);
895 if (t->tx_buf != NULL)
896 dma_unmap_single(NULL, t->tx_dma,
897 count, DMA_TO_DEVICE);
898 return -ENOMEM;
899 }
900 edma_set_transfer_params(davinci_spi_dma->dma_rx_channel,
901 data_type, count, 1, 0, ASYNC);
902 edma_set_src(davinci_spi_dma->dma_rx_channel,
903 rx_reg, INCR, W8BIT);
904 edma_set_dest(davinci_spi_dma->dma_rx_channel,
905 t->rx_dma, INCR, W8BIT);
906 edma_set_src_index(davinci_spi_dma->dma_rx_channel, 0, 0);
907 edma_set_dest_index(davinci_spi_dma->dma_rx_channel,
908 data_type, 0);
909 }
910
911 if ((t->tx_buf) || (t->rx_buf))
912 edma_start(davinci_spi_dma->dma_tx_channel);
913
914 if (t->rx_buf)
915 edma_start(davinci_spi_dma->dma_rx_channel);
916
917 if ((t->rx_buf) || (t->tx_buf))
918 davinci_spi_set_dma_req(spi, 1);
919
920 if (t->tx_buf)
921 wait_for_completion_interruptible(
922 &davinci_spi_dma->dma_tx_completion);
923
924 if (t->rx_buf)
925 wait_for_completion_interruptible(
926 &davinci_spi_dma->dma_rx_completion);
927
928 dma_unmap_single(NULL, t->tx_dma, temp_count, DMA_TO_DEVICE);
929
930 if (t->rx_buf)
931 dma_unmap_single(NULL, t->rx_dma, count, DMA_FROM_DEVICE);
932
933 /*
934 * Check for bit error, desync error,parity error,timeout error and
935 * receive overflow errors
936 */
937 int_status = ioread32(davinci_spi->base + SPIFLG);
938
939 ret = davinci_spi_check_error(davinci_spi, int_status);
940 if (ret != 0)
941 return ret;
942
943 /* SPI Framework maintains the count only in bytes so convert back */
944 davinci_spi->count *= conv;
945
946 return t->len;
947 }
948
949 /**
950 * davinci_spi_irq - IRQ handler for DaVinci SPI
951 * @irq: IRQ number for this SPI Master
952 * @context_data: structure for SPI Master controller davinci_spi
953 */
954 static irqreturn_t davinci_spi_irq(s32 irq, void *context_data)
955 {
956 struct davinci_spi *davinci_spi = context_data;
957 u32 int_status, rx_data = 0;
958 irqreturn_t ret = IRQ_NONE;
959
960 int_status = ioread32(davinci_spi->base + SPIFLG);
961
962 while ((int_status & SPIFLG_RX_INTR_MASK)) {
963 if (likely(int_status & SPIFLG_RX_INTR_MASK)) {
964 ret = IRQ_HANDLED;
965
966 rx_data = ioread32(davinci_spi->base + SPIBUF);
967 davinci_spi->get_rx(rx_data, davinci_spi);
968
969 /* Disable Receive Interrupt */
970 iowrite32(~(SPIINT_RX_INTR | SPIINT_TX_INTR),
971 davinci_spi->base + SPIINT);
972 } else
973 (void)davinci_spi_check_error(davinci_spi, int_status);
974
975 int_status = ioread32(davinci_spi->base + SPIFLG);
976 }
977
978 return ret;
979 }
980
981 /**
982 * davinci_spi_probe - probe function for SPI Master Controller
983 * @pdev: platform_device structure which contains plateform specific data
984 */
985 static int davinci_spi_probe(struct platform_device *pdev)
986 {
987 struct spi_master *master;
988 struct davinci_spi *davinci_spi;
989 struct davinci_spi_platform_data *pdata;
990 struct resource *r, *mem;
991 resource_size_t dma_rx_chan = SPI_NO_RESOURCE;
992 resource_size_t dma_tx_chan = SPI_NO_RESOURCE;
993 resource_size_t dma_eventq = SPI_NO_RESOURCE;
994 int i = 0, ret = 0;
995
996 pdata = pdev->dev.platform_data;
997 if (pdata == NULL) {
998 ret = -ENODEV;
999 goto err;
1000 }
1001
1002 master = spi_alloc_master(&pdev->dev, sizeof(struct davinci_spi));
1003 if (master == NULL) {
1004 ret = -ENOMEM;
1005 goto err;
1006 }
1007
1008 dev_set_drvdata(&pdev->dev, master);
1009
1010 davinci_spi = spi_master_get_devdata(master);
1011 if (davinci_spi == NULL) {
1012 ret = -ENOENT;
1013 goto free_master;
1014 }
1015
1016 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1017 if (r == NULL) {
1018 ret = -ENOENT;
1019 goto free_master;
1020 }
1021
1022 davinci_spi->pbase = r->start;
1023 davinci_spi->region_size = resource_size(r);
1024 davinci_spi->pdata = pdata;
1025
1026 mem = request_mem_region(r->start, davinci_spi->region_size,
1027 pdev->name);
1028 if (mem == NULL) {
1029 ret = -EBUSY;
1030 goto free_master;
1031 }
1032
1033 davinci_spi->base = ioremap(r->start, davinci_spi->region_size);
1034 if (davinci_spi->base == NULL) {
1035 ret = -ENOMEM;
1036 goto release_region;
1037 }
1038
1039 davinci_spi->irq = platform_get_irq(pdev, 0);
1040 if (davinci_spi->irq <= 0) {
1041 ret = -EINVAL;
1042 goto unmap_io;
1043 }
1044
1045 ret = request_irq(davinci_spi->irq, davinci_spi_irq, IRQF_DISABLED,
1046 dev_name(&pdev->dev), davinci_spi);
1047 if (ret)
1048 goto unmap_io;
1049
1050 /* Allocate tmp_buf for tx_buf */
1051 davinci_spi->tmp_buf = kzalloc(SPI_BUFSIZ, GFP_KERNEL);
1052 if (davinci_spi->tmp_buf == NULL) {
1053 ret = -ENOMEM;
1054 goto irq_free;
1055 }
1056
1057 davinci_spi->bitbang.master = spi_master_get(master);
1058 if (davinci_spi->bitbang.master == NULL) {
1059 ret = -ENODEV;
1060 goto free_tmp_buf;
1061 }
1062
1063 davinci_spi->clk = clk_get(&pdev->dev, NULL);
1064 if (IS_ERR(davinci_spi->clk)) {
1065 ret = -ENODEV;
1066 goto put_master;
1067 }
1068 clk_enable(davinci_spi->clk);
1069
1070 master->bus_num = pdev->id;
1071 master->num_chipselect = pdata->num_chipselect;
1072 master->setup = davinci_spi_setup;
1073 master->cleanup = davinci_spi_cleanup;
1074
1075 davinci_spi->bitbang.chipselect = davinci_spi_chipselect;
1076 davinci_spi->bitbang.setup_transfer = davinci_spi_setup_transfer;
1077
1078 davinci_spi->version = pdata->version;
1079 use_dma = pdata->use_dma;
1080
1081 davinci_spi->bitbang.flags = SPI_NO_CS | SPI_LSB_FIRST | SPI_LOOP;
1082 if (davinci_spi->version == SPI_VERSION_2)
1083 davinci_spi->bitbang.flags |= SPI_READY;
1084
1085 if (use_dma) {
1086 r = platform_get_resource(pdev, IORESOURCE_DMA, 0);
1087 if (r)
1088 dma_rx_chan = r->start;
1089 r = platform_get_resource(pdev, IORESOURCE_DMA, 1);
1090 if (r)
1091 dma_tx_chan = r->start;
1092 r = platform_get_resource(pdev, IORESOURCE_DMA, 2);
1093 if (r)
1094 dma_eventq = r->start;
1095 }
1096
1097 if (!use_dma ||
1098 dma_rx_chan == SPI_NO_RESOURCE ||
1099 dma_tx_chan == SPI_NO_RESOURCE ||
1100 dma_eventq == SPI_NO_RESOURCE) {
1101 davinci_spi->bitbang.txrx_bufs = davinci_spi_bufs_pio;
1102 use_dma = 0;
1103 } else {
1104 davinci_spi->bitbang.txrx_bufs = davinci_spi_bufs_dma;
1105 davinci_spi->dma_channels = kzalloc(master->num_chipselect
1106 * sizeof(struct davinci_spi_dma), GFP_KERNEL);
1107 if (davinci_spi->dma_channels == NULL) {
1108 ret = -ENOMEM;
1109 goto free_clk;
1110 }
1111
1112 for (i = 0; i < master->num_chipselect; i++) {
1113 davinci_spi->dma_channels[i].dma_rx_channel = -1;
1114 davinci_spi->dma_channels[i].dma_rx_sync_dev =
1115 dma_rx_chan;
1116 davinci_spi->dma_channels[i].dma_tx_channel = -1;
1117 davinci_spi->dma_channels[i].dma_tx_sync_dev =
1118 dma_tx_chan;
1119 davinci_spi->dma_channels[i].eventq = dma_eventq;
1120 }
1121 dev_info(&pdev->dev, "DaVinci SPI driver in EDMA mode\n"
1122 "Using RX channel = %d , TX channel = %d and "
1123 "event queue = %d", dma_rx_chan, dma_tx_chan,
1124 dma_eventq);
1125 }
1126
1127 davinci_spi->get_rx = davinci_spi_rx_buf_u8;
1128 davinci_spi->get_tx = davinci_spi_tx_buf_u8;
1129
1130 init_completion(&davinci_spi->done);
1131
1132 /* Reset In/OUT SPI module */
1133 iowrite32(0, davinci_spi->base + SPIGCR0);
1134 udelay(100);
1135 iowrite32(1, davinci_spi->base + SPIGCR0);
1136
1137 /* initialize chip selects */
1138 if (pdata->chip_sel) {
1139 for (i = 0; i < pdata->num_chipselect; i++) {
1140 if (pdata->chip_sel[i] != SPI_INTERN_CS)
1141 gpio_direction_output(pdata->chip_sel[i], 1);
1142 }
1143 }
1144
1145 /* Clock internal */
1146 if (davinci_spi->pdata->clk_internal)
1147 set_io_bits(davinci_spi->base + SPIGCR1,
1148 SPIGCR1_CLKMOD_MASK);
1149 else
1150 clear_io_bits(davinci_spi->base + SPIGCR1,
1151 SPIGCR1_CLKMOD_MASK);
1152
1153 iowrite32(CS_DEFAULT, davinci_spi->base + SPIDEF);
1154
1155 /* master mode default */
1156 set_io_bits(davinci_spi->base + SPIGCR1, SPIGCR1_MASTER_MASK);
1157
1158 if (davinci_spi->pdata->intr_level)
1159 iowrite32(SPI_INTLVL_1, davinci_spi->base + SPILVL);
1160 else
1161 iowrite32(SPI_INTLVL_0, davinci_spi->base + SPILVL);
1162
1163 ret = spi_bitbang_start(&davinci_spi->bitbang);
1164 if (ret)
1165 goto free_clk;
1166
1167 dev_info(&pdev->dev, "Controller at 0x%p\n", davinci_spi->base);
1168
1169 if (!pdata->poll_mode)
1170 dev_info(&pdev->dev, "Operating in interrupt mode"
1171 " using IRQ %d\n", davinci_spi->irq);
1172
1173 return ret;
1174
1175 free_clk:
1176 clk_disable(davinci_spi->clk);
1177 clk_put(davinci_spi->clk);
1178 put_master:
1179 spi_master_put(master);
1180 free_tmp_buf:
1181 kfree(davinci_spi->tmp_buf);
1182 irq_free:
1183 free_irq(davinci_spi->irq, davinci_spi);
1184 unmap_io:
1185 iounmap(davinci_spi->base);
1186 release_region:
1187 release_mem_region(davinci_spi->pbase, davinci_spi->region_size);
1188 free_master:
1189 kfree(master);
1190 err:
1191 return ret;
1192 }
1193
1194 /**
1195 * davinci_spi_remove - remove function for SPI Master Controller
1196 * @pdev: platform_device structure which contains plateform specific data
1197 *
1198 * This function will do the reverse action of davinci_spi_probe function
1199 * It will free the IRQ and SPI controller's memory region.
1200 * It will also call spi_bitbang_stop to destroy the work queue which was
1201 * created by spi_bitbang_start.
1202 */
1203 static int __exit davinci_spi_remove(struct platform_device *pdev)
1204 {
1205 struct davinci_spi *davinci_spi;
1206 struct spi_master *master;
1207
1208 master = dev_get_drvdata(&pdev->dev);
1209 davinci_spi = spi_master_get_devdata(master);
1210
1211 spi_bitbang_stop(&davinci_spi->bitbang);
1212
1213 clk_disable(davinci_spi->clk);
1214 clk_put(davinci_spi->clk);
1215 spi_master_put(master);
1216 kfree(davinci_spi->tmp_buf);
1217 free_irq(davinci_spi->irq, davinci_spi);
1218 iounmap(davinci_spi->base);
1219 release_mem_region(davinci_spi->pbase, davinci_spi->region_size);
1220
1221 return 0;
1222 }
1223
1224 static struct platform_driver davinci_spi_driver = {
1225 .driver.name = "spi_davinci",
1226 .remove = __exit_p(davinci_spi_remove),
1227 };
1228
1229 static int __init davinci_spi_init(void)
1230 {
1231 return platform_driver_probe(&davinci_spi_driver, davinci_spi_probe);
1232 }
1233 module_init(davinci_spi_init);
1234
1235 static void __exit davinci_spi_exit(void)
1236 {
1237 platform_driver_unregister(&davinci_spi_driver);
1238 }
1239 module_exit(davinci_spi_exit);
1240
1241 MODULE_DESCRIPTION("TI DaVinci SPI Master Controller Driver");
1242 MODULE_LICENSE("GPL");
This page took 0.105748 seconds and 5 git commands to generate.