spi: spi-gpio: fix compilation warning on 64 bits systems
[deliverable/linux.git] / drivers / spi / spi-pxa2xx.c
CommitLineData
e0c9905e
SS
1/*
2 * Copyright (C) 2005 Stephen Street / StreetFire Sound Labs
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
17 */
18
19#include <linux/init.h>
20#include <linux/module.h>
21#include <linux/device.h>
22#include <linux/ioport.h>
23#include <linux/errno.h>
24#include <linux/interrupt.h>
25#include <linux/platform_device.h>
8348c259 26#include <linux/spi/pxa2xx_spi.h>
e0c9905e
SS
27#include <linux/dma-mapping.h>
28#include <linux/spi/spi.h>
29#include <linux/workqueue.h>
e0c9905e 30#include <linux/delay.h>
a7bb3909 31#include <linux/gpio.h>
5a0e3ad6 32#include <linux/slab.h>
3343b7a6 33#include <linux/clk.h>
e0c9905e
SS
34
35#include <asm/io.h>
36#include <asm/irq.h>
e0c9905e 37#include <asm/delay.h>
e0c9905e 38
e0c9905e
SS
39
40MODULE_AUTHOR("Stephen Street");
037cdafe 41MODULE_DESCRIPTION("PXA2xx SSP SPI Controller");
e0c9905e 42MODULE_LICENSE("GPL");
7e38c3c4 43MODULE_ALIAS("platform:pxa2xx-spi");
e0c9905e
SS
44
45#define MAX_BUSES 3
46
f1f640a9
VS
47#define TIMOUT_DFLT 1000
48
7e964455
NF
49#define DMA_INT_MASK (DCSR_ENDINTR | DCSR_STARTINTR | DCSR_BUSERR)
50#define RESET_DMA_CHANNEL (DCSR_NODESC | DMA_INT_MASK)
2b9b84f4 51#define IS_DMA_ALIGNED(x) IS_ALIGNED((unsigned long)(x), DMA_ALIGNMENT)
7e964455 52#define MAX_DMA_LEN 8191
7ad0ba91 53#define DMA_ALIGNMENT 8
e0c9905e 54
b97c74bd
NF
55/*
56 * for testing SSCR1 changes that require SSP restart, basically
57 * everything except the service and interrupt enables, the pxa270 developer
58 * manual says only SSCR1_SCFR, SSCR1_SPH, SSCR1_SPO need to be in this
59 * list, but the PXA255 dev man says all bits without really meaning the
60 * service and interrupt enables
61 */
62#define SSCR1_CHANGE_MASK (SSCR1_TTELP | SSCR1_TTE | SSCR1_SCFR \
8d94cc50 63 | SSCR1_ECRA | SSCR1_ECRB | SSCR1_SCLKDIR \
b97c74bd
NF
64 | SSCR1_SFRMDIR | SSCR1_RWOT | SSCR1_TRAIL \
65 | SSCR1_IFS | SSCR1_STRF | SSCR1_EFWR \
66 | SSCR1_RFT | SSCR1_TFT | SSCR1_MWDS \
67 | SSCR1_SPH | SSCR1_SPO | SSCR1_LBM)
8d94cc50 68
e0c9905e 69#define DEFINE_SSP_REG(reg, off) \
cf43369d
DB
70static inline u32 read_##reg(void const __iomem *p) \
71{ return __raw_readl(p + (off)); } \
72\
73static inline void write_##reg(u32 v, void __iomem *p) \
74{ __raw_writel(v, p + (off)); }
e0c9905e
SS
75
76DEFINE_SSP_REG(SSCR0, 0x00)
77DEFINE_SSP_REG(SSCR1, 0x04)
78DEFINE_SSP_REG(SSSR, 0x08)
79DEFINE_SSP_REG(SSITR, 0x0c)
80DEFINE_SSP_REG(SSDR, 0x10)
81DEFINE_SSP_REG(SSTO, 0x28)
82DEFINE_SSP_REG(SSPSP, 0x2c)
83
84#define START_STATE ((void*)0)
85#define RUNNING_STATE ((void*)1)
86#define DONE_STATE ((void*)2)
87#define ERROR_STATE ((void*)-1)
88
e0c9905e
SS
89struct driver_data {
90 /* Driver model hookup */
91 struct platform_device *pdev;
92
2f1a74e5 93 /* SSP Info */
94 struct ssp_device *ssp;
95
e0c9905e
SS
96 /* SPI framework hookup */
97 enum pxa_ssp_type ssp_type;
98 struct spi_master *master;
99
100 /* PXA hookup */
101 struct pxa2xx_spi_master *master_info;
102
103 /* DMA setup stuff */
104 int rx_channel;
105 int tx_channel;
106 u32 *null_dma_buf;
107
108 /* SSP register addresses */
cf43369d 109 void __iomem *ioaddr;
e0c9905e
SS
110 u32 ssdr_physical;
111
112 /* SSP masks*/
113 u32 dma_cr1;
114 u32 int_cr1;
115 u32 clear_sr;
116 u32 mask_sr;
117
3343b7a6
MW
118 /* Maximun clock rate */
119 unsigned long max_clk_rate;
120
e0c9905e
SS
121 /* Message Transfer pump */
122 struct tasklet_struct pump_transfers;
123
124 /* Current message transfer state info */
125 struct spi_message* cur_msg;
126 struct spi_transfer* cur_transfer;
127 struct chip_data *cur_chip;
128 size_t len;
129 void *tx;
130 void *tx_end;
131 void *rx;
132 void *rx_end;
133 int dma_mapped;
134 dma_addr_t rx_dma;
135 dma_addr_t tx_dma;
136 size_t rx_map_len;
137 size_t tx_map_len;
9708c121
SS
138 u8 n_bytes;
139 u32 dma_width;
8d94cc50
SS
140 int (*write)(struct driver_data *drv_data);
141 int (*read)(struct driver_data *drv_data);
e0c9905e
SS
142 irqreturn_t (*transfer_handler)(struct driver_data *drv_data);
143 void (*cs_control)(u32 command);
144};
145
146struct chip_data {
147 u32 cr0;
148 u32 cr1;
e0c9905e
SS
149 u32 psp;
150 u32 timeout;
151 u8 n_bytes;
152 u32 dma_width;
153 u32 dma_burst_size;
154 u32 threshold;
155 u32 dma_threshold;
156 u8 enable_dma;
9708c121
SS
157 u8 bits_per_word;
158 u32 speed_hz;
2a8626a9
SAS
159 union {
160 int gpio_cs;
161 unsigned int frm;
162 };
a7bb3909 163 int gpio_cs_inverted;
8d94cc50
SS
164 int (*write)(struct driver_data *drv_data);
165 int (*read)(struct driver_data *drv_data);
e0c9905e
SS
166 void (*cs_control)(u32 command);
167};
168
a7bb3909
EM
169static void cs_assert(struct driver_data *drv_data)
170{
171 struct chip_data *chip = drv_data->cur_chip;
172
2a8626a9
SAS
173 if (drv_data->ssp_type == CE4100_SSP) {
174 write_SSSR(drv_data->cur_chip->frm, drv_data->ioaddr);
175 return;
176 }
177
a7bb3909
EM
178 if (chip->cs_control) {
179 chip->cs_control(PXA2XX_CS_ASSERT);
180 return;
181 }
182
183 if (gpio_is_valid(chip->gpio_cs))
184 gpio_set_value(chip->gpio_cs, chip->gpio_cs_inverted);
185}
186
187static void cs_deassert(struct driver_data *drv_data)
188{
189 struct chip_data *chip = drv_data->cur_chip;
190
2a8626a9
SAS
191 if (drv_data->ssp_type == CE4100_SSP)
192 return;
193
a7bb3909 194 if (chip->cs_control) {
2b2562d3 195 chip->cs_control(PXA2XX_CS_DEASSERT);
a7bb3909
EM
196 return;
197 }
198
199 if (gpio_is_valid(chip->gpio_cs))
200 gpio_set_value(chip->gpio_cs, !chip->gpio_cs_inverted);
201}
202
2a8626a9
SAS
203static void write_SSSR_CS(struct driver_data *drv_data, u32 val)
204{
205 void __iomem *reg = drv_data->ioaddr;
206
207 if (drv_data->ssp_type == CE4100_SSP)
208 val |= read_SSSR(reg) & SSSR_ALT_FRM_MASK;
209
210 write_SSSR(val, reg);
211}
212
213static int pxa25x_ssp_comp(struct driver_data *drv_data)
214{
215 if (drv_data->ssp_type == PXA25x_SSP)
216 return 1;
217 if (drv_data->ssp_type == CE4100_SSP)
218 return 1;
219 return 0;
220}
221
e0c9905e
SS
222static int flush(struct driver_data *drv_data)
223{
224 unsigned long limit = loops_per_jiffy << 1;
225
cf43369d 226 void __iomem *reg = drv_data->ioaddr;
e0c9905e
SS
227
228 do {
229 while (read_SSSR(reg) & SSSR_RNE) {
230 read_SSDR(reg);
231 }
306c68aa 232 } while ((read_SSSR(reg) & SSSR_BSY) && --limit);
2a8626a9 233 write_SSSR_CS(drv_data, SSSR_ROR);
e0c9905e
SS
234
235 return limit;
236}
237
8d94cc50 238static int null_writer(struct driver_data *drv_data)
e0c9905e 239{
cf43369d 240 void __iomem *reg = drv_data->ioaddr;
9708c121 241 u8 n_bytes = drv_data->n_bytes;
e0c9905e 242
4a25605f 243 if (((read_SSSR(reg) & SSSR_TFL_MASK) == SSSR_TFL_MASK)
8d94cc50
SS
244 || (drv_data->tx == drv_data->tx_end))
245 return 0;
246
247 write_SSDR(0, reg);
248 drv_data->tx += n_bytes;
249
250 return 1;
e0c9905e
SS
251}
252
8d94cc50 253static int null_reader(struct driver_data *drv_data)
e0c9905e 254{
cf43369d 255 void __iomem *reg = drv_data->ioaddr;
9708c121 256 u8 n_bytes = drv_data->n_bytes;
e0c9905e
SS
257
258 while ((read_SSSR(reg) & SSSR_RNE)
8d94cc50 259 && (drv_data->rx < drv_data->rx_end)) {
e0c9905e
SS
260 read_SSDR(reg);
261 drv_data->rx += n_bytes;
262 }
8d94cc50
SS
263
264 return drv_data->rx == drv_data->rx_end;
e0c9905e
SS
265}
266
8d94cc50 267static int u8_writer(struct driver_data *drv_data)
e0c9905e 268{
cf43369d 269 void __iomem *reg = drv_data->ioaddr;
e0c9905e 270
4a25605f 271 if (((read_SSSR(reg) & SSSR_TFL_MASK) == SSSR_TFL_MASK)
8d94cc50
SS
272 || (drv_data->tx == drv_data->tx_end))
273 return 0;
274
275 write_SSDR(*(u8 *)(drv_data->tx), reg);
276 ++drv_data->tx;
277
278 return 1;
e0c9905e
SS
279}
280
8d94cc50 281static int u8_reader(struct driver_data *drv_data)
e0c9905e 282{
cf43369d 283 void __iomem *reg = drv_data->ioaddr;
e0c9905e
SS
284
285 while ((read_SSSR(reg) & SSSR_RNE)
8d94cc50 286 && (drv_data->rx < drv_data->rx_end)) {
e0c9905e
SS
287 *(u8 *)(drv_data->rx) = read_SSDR(reg);
288 ++drv_data->rx;
289 }
8d94cc50
SS
290
291 return drv_data->rx == drv_data->rx_end;
e0c9905e
SS
292}
293
8d94cc50 294static int u16_writer(struct driver_data *drv_data)
e0c9905e 295{
cf43369d 296 void __iomem *reg = drv_data->ioaddr;
e0c9905e 297
4a25605f 298 if (((read_SSSR(reg) & SSSR_TFL_MASK) == SSSR_TFL_MASK)
8d94cc50
SS
299 || (drv_data->tx == drv_data->tx_end))
300 return 0;
301
302 write_SSDR(*(u16 *)(drv_data->tx), reg);
303 drv_data->tx += 2;
304
305 return 1;
e0c9905e
SS
306}
307
8d94cc50 308static int u16_reader(struct driver_data *drv_data)
e0c9905e 309{
cf43369d 310 void __iomem *reg = drv_data->ioaddr;
e0c9905e
SS
311
312 while ((read_SSSR(reg) & SSSR_RNE)
8d94cc50 313 && (drv_data->rx < drv_data->rx_end)) {
e0c9905e
SS
314 *(u16 *)(drv_data->rx) = read_SSDR(reg);
315 drv_data->rx += 2;
316 }
8d94cc50
SS
317
318 return drv_data->rx == drv_data->rx_end;
e0c9905e 319}
8d94cc50
SS
320
321static int u32_writer(struct driver_data *drv_data)
e0c9905e 322{
cf43369d 323 void __iomem *reg = drv_data->ioaddr;
e0c9905e 324
4a25605f 325 if (((read_SSSR(reg) & SSSR_TFL_MASK) == SSSR_TFL_MASK)
8d94cc50
SS
326 || (drv_data->tx == drv_data->tx_end))
327 return 0;
328
329 write_SSDR(*(u32 *)(drv_data->tx), reg);
330 drv_data->tx += 4;
331
332 return 1;
e0c9905e
SS
333}
334
8d94cc50 335static int u32_reader(struct driver_data *drv_data)
e0c9905e 336{
cf43369d 337 void __iomem *reg = drv_data->ioaddr;
e0c9905e
SS
338
339 while ((read_SSSR(reg) & SSSR_RNE)
8d94cc50 340 && (drv_data->rx < drv_data->rx_end)) {
e0c9905e
SS
341 *(u32 *)(drv_data->rx) = read_SSDR(reg);
342 drv_data->rx += 4;
343 }
8d94cc50
SS
344
345 return drv_data->rx == drv_data->rx_end;
e0c9905e
SS
346}
347
348static void *next_transfer(struct driver_data *drv_data)
349{
350 struct spi_message *msg = drv_data->cur_msg;
351 struct spi_transfer *trans = drv_data->cur_transfer;
352
353 /* Move to next transfer */
354 if (trans->transfer_list.next != &msg->transfers) {
355 drv_data->cur_transfer =
356 list_entry(trans->transfer_list.next,
357 struct spi_transfer,
358 transfer_list);
359 return RUNNING_STATE;
360 } else
361 return DONE_STATE;
362}
363
364static int map_dma_buffers(struct driver_data *drv_data)
365{
366 struct spi_message *msg = drv_data->cur_msg;
367 struct device *dev = &msg->spi->dev;
368
369 if (!drv_data->cur_chip->enable_dma)
370 return 0;
371
372 if (msg->is_dma_mapped)
373 return drv_data->rx_dma && drv_data->tx_dma;
374
375 if (!IS_DMA_ALIGNED(drv_data->rx) || !IS_DMA_ALIGNED(drv_data->tx))
376 return 0;
377
378 /* Modify setup if rx buffer is null */
379 if (drv_data->rx == NULL) {
380 *drv_data->null_dma_buf = 0;
381 drv_data->rx = drv_data->null_dma_buf;
382 drv_data->rx_map_len = 4;
383 } else
384 drv_data->rx_map_len = drv_data->len;
385
386
387 /* Modify setup if tx buffer is null */
388 if (drv_data->tx == NULL) {
389 *drv_data->null_dma_buf = 0;
390 drv_data->tx = drv_data->null_dma_buf;
391 drv_data->tx_map_len = 4;
392 } else
393 drv_data->tx_map_len = drv_data->len;
394
393df744
NF
395 /* Stream map the tx buffer. Always do DMA_TO_DEVICE first
396 * so we flush the cache *before* invalidating it, in case
397 * the tx and rx buffers overlap.
398 */
e0c9905e 399 drv_data->tx_dma = dma_map_single(dev, drv_data->tx,
393df744
NF
400 drv_data->tx_map_len, DMA_TO_DEVICE);
401 if (dma_mapping_error(dev, drv_data->tx_dma))
402 return 0;
e0c9905e 403
393df744
NF
404 /* Stream map the rx buffer */
405 drv_data->rx_dma = dma_map_single(dev, drv_data->rx,
e0c9905e 406 drv_data->rx_map_len, DMA_FROM_DEVICE);
393df744
NF
407 if (dma_mapping_error(dev, drv_data->rx_dma)) {
408 dma_unmap_single(dev, drv_data->tx_dma,
409 drv_data->tx_map_len, DMA_TO_DEVICE);
e0c9905e
SS
410 return 0;
411 }
412
413 return 1;
414}
415
416static void unmap_dma_buffers(struct driver_data *drv_data)
417{
418 struct device *dev;
419
420 if (!drv_data->dma_mapped)
421 return;
422
423 if (!drv_data->cur_msg->is_dma_mapped) {
424 dev = &drv_data->cur_msg->spi->dev;
425 dma_unmap_single(dev, drv_data->rx_dma,
426 drv_data->rx_map_len, DMA_FROM_DEVICE);
427 dma_unmap_single(dev, drv_data->tx_dma,
428 drv_data->tx_map_len, DMA_TO_DEVICE);
429 }
430
431 drv_data->dma_mapped = 0;
432}
433
434/* caller already set message->status; dma and pio irqs are blocked */
5daa3ba0 435static void giveback(struct driver_data *drv_data)
e0c9905e
SS
436{
437 struct spi_transfer* last_transfer;
5daa3ba0 438 struct spi_message *msg;
e0c9905e 439
5daa3ba0
SS
440 msg = drv_data->cur_msg;
441 drv_data->cur_msg = NULL;
442 drv_data->cur_transfer = NULL;
5daa3ba0
SS
443
444 last_transfer = list_entry(msg->transfers.prev,
e0c9905e
SS
445 struct spi_transfer,
446 transfer_list);
447
8423597d
NF
448 /* Delay if requested before any change in chip select */
449 if (last_transfer->delay_usecs)
450 udelay(last_transfer->delay_usecs);
451
452 /* Drop chip select UNLESS cs_change is true or we are returning
453 * a message with an error, or next message is for another chip
454 */
e0c9905e 455 if (!last_transfer->cs_change)
a7bb3909 456 cs_deassert(drv_data);
8423597d
NF
457 else {
458 struct spi_message *next_msg;
459
460 /* Holding of cs was hinted, but we need to make sure
461 * the next message is for the same chip. Don't waste
462 * time with the following tests unless this was hinted.
463 *
464 * We cannot postpone this until pump_messages, because
465 * after calling msg->complete (below) the driver that
466 * sent the current message could be unloaded, which
467 * could invalidate the cs_control() callback...
468 */
469
470 /* get a pointer to the next message, if any */
7f86bde9 471 next_msg = spi_get_next_queued_message(drv_data->master);
8423597d
NF
472
473 /* see if the next and current messages point
474 * to the same chip
475 */
476 if (next_msg && next_msg->spi != msg->spi)
477 next_msg = NULL;
478 if (!next_msg || msg->state == ERROR_STATE)
a7bb3909 479 cs_deassert(drv_data);
8423597d 480 }
e0c9905e 481
7f86bde9 482 spi_finalize_current_message(drv_data->master);
a7bb3909 483 drv_data->cur_chip = NULL;
e0c9905e
SS
484}
485
cf43369d 486static int wait_ssp_rx_stall(void const __iomem *ioaddr)
e0c9905e
SS
487{
488 unsigned long limit = loops_per_jiffy << 1;
489
306c68aa 490 while ((read_SSSR(ioaddr) & SSSR_BSY) && --limit)
e0c9905e
SS
491 cpu_relax();
492
493 return limit;
494}
495
496static int wait_dma_channel_stop(int channel)
497{
498 unsigned long limit = loops_per_jiffy << 1;
499
306c68aa 500 while (!(DCSR(channel) & DCSR_STOPSTATE) && --limit)
e0c9905e
SS
501 cpu_relax();
502
503 return limit;
504}
505
cf43369d 506static void dma_error_stop(struct driver_data *drv_data, const char *msg)
e0c9905e 507{
cf43369d 508 void __iomem *reg = drv_data->ioaddr;
e0c9905e 509
8d94cc50
SS
510 /* Stop and reset */
511 DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL;
512 DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL;
2a8626a9 513 write_SSSR_CS(drv_data, drv_data->clear_sr);
8d94cc50 514 write_SSCR1(read_SSCR1(reg) & ~drv_data->dma_cr1, reg);
2a8626a9 515 if (!pxa25x_ssp_comp(drv_data))
8d94cc50
SS
516 write_SSTO(0, reg);
517 flush(drv_data);
518 write_SSCR0(read_SSCR0(reg) & ~SSCR0_SSE, reg);
e0c9905e 519
8d94cc50 520 unmap_dma_buffers(drv_data);
e0c9905e 521
8d94cc50 522 dev_err(&drv_data->pdev->dev, "%s\n", msg);
e0c9905e 523
8d94cc50
SS
524 drv_data->cur_msg->state = ERROR_STATE;
525 tasklet_schedule(&drv_data->pump_transfers);
526}
527
528static void dma_transfer_complete(struct driver_data *drv_data)
529{
cf43369d 530 void __iomem *reg = drv_data->ioaddr;
8d94cc50
SS
531 struct spi_message *msg = drv_data->cur_msg;
532
533 /* Clear and disable interrupts on SSP and DMA channels*/
534 write_SSCR1(read_SSCR1(reg) & ~drv_data->dma_cr1, reg);
2a8626a9 535 write_SSSR_CS(drv_data, drv_data->clear_sr);
8d94cc50
SS
536 DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL;
537 DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL;
538
539 if (wait_dma_channel_stop(drv_data->rx_channel) == 0)
540 dev_err(&drv_data->pdev->dev,
541 "dma_handler: dma rx channel stop failed\n");
542
543 if (wait_ssp_rx_stall(drv_data->ioaddr) == 0)
544 dev_err(&drv_data->pdev->dev,
545 "dma_transfer: ssp rx stall failed\n");
546
547 unmap_dma_buffers(drv_data);
548
549 /* update the buffer pointer for the amount completed in dma */
550 drv_data->rx += drv_data->len -
551 (DCMD(drv_data->rx_channel) & DCMD_LENGTH);
552
553 /* read trailing data from fifo, it does not matter how many
554 * bytes are in the fifo just read until buffer is full
555 * or fifo is empty, which ever occurs first */
556 drv_data->read(drv_data);
557
558 /* return count of what was actually read */
559 msg->actual_length += drv_data->len -
560 (drv_data->rx_end - drv_data->rx);
561
8423597d
NF
562 /* Transfer delays and chip select release are
563 * handled in pump_transfers or giveback
564 */
8d94cc50
SS
565
566 /* Move to next transfer */
567 msg->state = next_transfer(drv_data);
568
569 /* Schedule transfer tasklet */
570 tasklet_schedule(&drv_data->pump_transfers);
571}
572
573static void dma_handler(int channel, void *data)
574{
575 struct driver_data *drv_data = data;
576 u32 irq_status = DCSR(channel) & DMA_INT_MASK;
577
578 if (irq_status & DCSR_BUSERR) {
e0c9905e
SS
579
580 if (channel == drv_data->tx_channel)
8d94cc50
SS
581 dma_error_stop(drv_data,
582 "dma_handler: "
583 "bad bus address on tx channel");
e0c9905e 584 else
8d94cc50
SS
585 dma_error_stop(drv_data,
586 "dma_handler: "
587 "bad bus address on rx channel");
588 return;
e0c9905e
SS
589 }
590
591 /* PXA255x_SSP has no timeout interrupt, wait for tailing bytes */
8d94cc50
SS
592 if ((channel == drv_data->tx_channel)
593 && (irq_status & DCSR_ENDINTR)
594 && (drv_data->ssp_type == PXA25x_SSP)) {
e0c9905e
SS
595
596 /* Wait for rx to stall */
597 if (wait_ssp_rx_stall(drv_data->ioaddr) == 0)
598 dev_err(&drv_data->pdev->dev,
599 "dma_handler: ssp rx stall failed\n");
600
8d94cc50
SS
601 /* finish this transfer, start the next */
602 dma_transfer_complete(drv_data);
e0c9905e
SS
603 }
604}
605
606static irqreturn_t dma_transfer(struct driver_data *drv_data)
607{
608 u32 irq_status;
cf43369d 609 void __iomem *reg = drv_data->ioaddr;
e0c9905e
SS
610
611 irq_status = read_SSSR(reg) & drv_data->mask_sr;
612 if (irq_status & SSSR_ROR) {
8d94cc50 613 dma_error_stop(drv_data, "dma_transfer: fifo overrun");
e0c9905e
SS
614 return IRQ_HANDLED;
615 }
616
617 /* Check for false positive timeout */
8d94cc50
SS
618 if ((irq_status & SSSR_TINT)
619 && (DCSR(drv_data->tx_channel) & DCSR_RUN)) {
e0c9905e
SS
620 write_SSSR(SSSR_TINT, reg);
621 return IRQ_HANDLED;
622 }
623
624 if (irq_status & SSSR_TINT || drv_data->rx == drv_data->rx_end) {
625
8d94cc50
SS
626 /* Clear and disable timeout interrupt, do the rest in
627 * dma_transfer_complete */
2a8626a9 628 if (!pxa25x_ssp_comp(drv_data))
e0c9905e 629 write_SSTO(0, reg);
e0c9905e 630
8d94cc50
SS
631 /* finish this transfer, start the next */
632 dma_transfer_complete(drv_data);
e0c9905e
SS
633
634 return IRQ_HANDLED;
635 }
636
637 /* Opps problem detected */
638 return IRQ_NONE;
639}
640
579d3bb2
SAS
641static void reset_sccr1(struct driver_data *drv_data)
642{
643 void __iomem *reg = drv_data->ioaddr;
644 struct chip_data *chip = drv_data->cur_chip;
645 u32 sccr1_reg;
646
647 sccr1_reg = read_SSCR1(reg) & ~drv_data->int_cr1;
648 sccr1_reg &= ~SSCR1_RFT;
649 sccr1_reg |= chip->threshold;
650 write_SSCR1(sccr1_reg, reg);
651}
652
8d94cc50 653static void int_error_stop(struct driver_data *drv_data, const char* msg)
e0c9905e 654{
cf43369d 655 void __iomem *reg = drv_data->ioaddr;
e0c9905e 656
8d94cc50 657 /* Stop and reset SSP */
2a8626a9 658 write_SSSR_CS(drv_data, drv_data->clear_sr);
579d3bb2 659 reset_sccr1(drv_data);
2a8626a9 660 if (!pxa25x_ssp_comp(drv_data))
8d94cc50
SS
661 write_SSTO(0, reg);
662 flush(drv_data);
663 write_SSCR0(read_SSCR0(reg) & ~SSCR0_SSE, reg);
e0c9905e 664
8d94cc50 665 dev_err(&drv_data->pdev->dev, "%s\n", msg);
e0c9905e 666
8d94cc50
SS
667 drv_data->cur_msg->state = ERROR_STATE;
668 tasklet_schedule(&drv_data->pump_transfers);
669}
5daa3ba0 670
8d94cc50
SS
671static void int_transfer_complete(struct driver_data *drv_data)
672{
cf43369d 673 void __iomem *reg = drv_data->ioaddr;
e0c9905e 674
8d94cc50 675 /* Stop SSP */
2a8626a9 676 write_SSSR_CS(drv_data, drv_data->clear_sr);
579d3bb2 677 reset_sccr1(drv_data);
2a8626a9 678 if (!pxa25x_ssp_comp(drv_data))
8d94cc50 679 write_SSTO(0, reg);
e0c9905e 680
25985edc 681 /* Update total byte transferred return count actual bytes read */
8d94cc50
SS
682 drv_data->cur_msg->actual_length += drv_data->len -
683 (drv_data->rx_end - drv_data->rx);
e0c9905e 684
8423597d
NF
685 /* Transfer delays and chip select release are
686 * handled in pump_transfers or giveback
687 */
e0c9905e 688
8d94cc50
SS
689 /* Move to next transfer */
690 drv_data->cur_msg->state = next_transfer(drv_data);
e0c9905e 691
8d94cc50
SS
692 /* Schedule transfer tasklet */
693 tasklet_schedule(&drv_data->pump_transfers);
694}
e0c9905e 695
8d94cc50
SS
696static irqreturn_t interrupt_transfer(struct driver_data *drv_data)
697{
cf43369d 698 void __iomem *reg = drv_data->ioaddr;
e0c9905e 699
8d94cc50
SS
700 u32 irq_mask = (read_SSCR1(reg) & SSCR1_TIE) ?
701 drv_data->mask_sr : drv_data->mask_sr & ~SSSR_TFS;
e0c9905e 702
8d94cc50 703 u32 irq_status = read_SSSR(reg) & irq_mask;
e0c9905e 704
8d94cc50
SS
705 if (irq_status & SSSR_ROR) {
706 int_error_stop(drv_data, "interrupt_transfer: fifo overrun");
707 return IRQ_HANDLED;
708 }
e0c9905e 709
8d94cc50
SS
710 if (irq_status & SSSR_TINT) {
711 write_SSSR(SSSR_TINT, reg);
712 if (drv_data->read(drv_data)) {
713 int_transfer_complete(drv_data);
714 return IRQ_HANDLED;
715 }
716 }
e0c9905e 717
8d94cc50
SS
718 /* Drain rx fifo, Fill tx fifo and prevent overruns */
719 do {
720 if (drv_data->read(drv_data)) {
721 int_transfer_complete(drv_data);
722 return IRQ_HANDLED;
723 }
724 } while (drv_data->write(drv_data));
e0c9905e 725
8d94cc50
SS
726 if (drv_data->read(drv_data)) {
727 int_transfer_complete(drv_data);
728 return IRQ_HANDLED;
729 }
e0c9905e 730
8d94cc50 731 if (drv_data->tx == drv_data->tx_end) {
579d3bb2
SAS
732 u32 bytes_left;
733 u32 sccr1_reg;
734
735 sccr1_reg = read_SSCR1(reg);
736 sccr1_reg &= ~SSCR1_TIE;
737
738 /*
739 * PXA25x_SSP has no timeout, set up rx threshould for the
25985edc 740 * remaining RX bytes.
579d3bb2 741 */
2a8626a9 742 if (pxa25x_ssp_comp(drv_data)) {
579d3bb2
SAS
743
744 sccr1_reg &= ~SSCR1_RFT;
745
746 bytes_left = drv_data->rx_end - drv_data->rx;
747 switch (drv_data->n_bytes) {
748 case 4:
749 bytes_left >>= 1;
750 case 2:
751 bytes_left >>= 1;
8d94cc50 752 }
579d3bb2
SAS
753
754 if (bytes_left > RX_THRESH_DFLT)
755 bytes_left = RX_THRESH_DFLT;
756
757 sccr1_reg |= SSCR1_RxTresh(bytes_left);
e0c9905e 758 }
579d3bb2 759 write_SSCR1(sccr1_reg, reg);
e0c9905e
SS
760 }
761
5daa3ba0
SS
762 /* We did something */
763 return IRQ_HANDLED;
e0c9905e
SS
764}
765
7d12e780 766static irqreturn_t ssp_int(int irq, void *dev_id)
e0c9905e 767{
c7bec5ab 768 struct driver_data *drv_data = dev_id;
cf43369d 769 void __iomem *reg = drv_data->ioaddr;
49cbb1e0
SAS
770 u32 sccr1_reg = read_SSCR1(reg);
771 u32 mask = drv_data->mask_sr;
772 u32 status;
773
774 status = read_SSSR(reg);
775
776 /* Ignore possible writes if we don't need to write */
777 if (!(sccr1_reg & SSCR1_TIE))
778 mask &= ~SSSR_TFS;
779
780 if (!(status & mask))
781 return IRQ_NONE;
e0c9905e
SS
782
783 if (!drv_data->cur_msg) {
5daa3ba0
SS
784
785 write_SSCR0(read_SSCR0(reg) & ~SSCR0_SSE, reg);
786 write_SSCR1(read_SSCR1(reg) & ~drv_data->int_cr1, reg);
2a8626a9 787 if (!pxa25x_ssp_comp(drv_data))
5daa3ba0 788 write_SSTO(0, reg);
2a8626a9 789 write_SSSR_CS(drv_data, drv_data->clear_sr);
5daa3ba0 790
e0c9905e 791 dev_err(&drv_data->pdev->dev, "bad message state "
8d94cc50 792 "in interrupt handler\n");
5daa3ba0 793
e0c9905e
SS
794 /* Never fail */
795 return IRQ_HANDLED;
796 }
797
798 return drv_data->transfer_handler(drv_data);
799}
800
cf43369d
DB
801static int set_dma_burst_and_threshold(struct chip_data *chip,
802 struct spi_device *spi,
8d94cc50
SS
803 u8 bits_per_word, u32 *burst_code,
804 u32 *threshold)
805{
806 struct pxa2xx_spi_chip *chip_info =
807 (struct pxa2xx_spi_chip *)spi->controller_data;
808 int bytes_per_word;
809 int burst_bytes;
810 int thresh_words;
811 int req_burst_size;
812 int retval = 0;
813
814 /* Set the threshold (in registers) to equal the same amount of data
815 * as represented by burst size (in bytes). The computation below
816 * is (burst_size rounded up to nearest 8 byte, word or long word)
817 * divided by (bytes/register); the tx threshold is the inverse of
818 * the rx, so that there will always be enough data in the rx fifo
819 * to satisfy a burst, and there will always be enough space in the
820 * tx fifo to accept a burst (a tx burst will overwrite the fifo if
821 * there is not enough space), there must always remain enough empty
822 * space in the rx fifo for any data loaded to the tx fifo.
823 * Whenever burst_size (in bytes) equals bits/word, the fifo threshold
824 * will be 8, or half the fifo;
825 * The threshold can only be set to 2, 4 or 8, but not 16, because
826 * to burst 16 to the tx fifo, the fifo would have to be empty;
827 * however, the minimum fifo trigger level is 1, and the tx will
828 * request service when the fifo is at this level, with only 15 spaces.
829 */
830
831 /* find bytes/word */
832 if (bits_per_word <= 8)
833 bytes_per_word = 1;
834 else if (bits_per_word <= 16)
835 bytes_per_word = 2;
836 else
837 bytes_per_word = 4;
838
839 /* use struct pxa2xx_spi_chip->dma_burst_size if available */
840 if (chip_info)
841 req_burst_size = chip_info->dma_burst_size;
842 else {
843 switch (chip->dma_burst_size) {
844 default:
845 /* if the default burst size is not set,
846 * do it now */
847 chip->dma_burst_size = DCMD_BURST8;
848 case DCMD_BURST8:
849 req_burst_size = 8;
850 break;
851 case DCMD_BURST16:
852 req_burst_size = 16;
853 break;
854 case DCMD_BURST32:
855 req_burst_size = 32;
856 break;
857 }
858 }
859 if (req_burst_size <= 8) {
860 *burst_code = DCMD_BURST8;
861 burst_bytes = 8;
862 } else if (req_burst_size <= 16) {
863 if (bytes_per_word == 1) {
864 /* don't burst more than 1/2 the fifo */
865 *burst_code = DCMD_BURST8;
866 burst_bytes = 8;
867 retval = 1;
868 } else {
869 *burst_code = DCMD_BURST16;
870 burst_bytes = 16;
871 }
872 } else {
873 if (bytes_per_word == 1) {
874 /* don't burst more than 1/2 the fifo */
875 *burst_code = DCMD_BURST8;
876 burst_bytes = 8;
877 retval = 1;
878 } else if (bytes_per_word == 2) {
879 /* don't burst more than 1/2 the fifo */
880 *burst_code = DCMD_BURST16;
881 burst_bytes = 16;
882 retval = 1;
883 } else {
884 *burst_code = DCMD_BURST32;
885 burst_bytes = 32;
886 }
887 }
888
889 thresh_words = burst_bytes / bytes_per_word;
890
891 /* thresh_words will be between 2 and 8 */
892 *threshold = (SSCR1_RxTresh(thresh_words) & SSCR1_RFT)
893 | (SSCR1_TxTresh(16-thresh_words) & SSCR1_TFT);
894
895 return retval;
896}
897
3343b7a6 898static unsigned int ssp_get_clk_div(struct driver_data *drv_data, int rate)
2f1a74e5 899{
3343b7a6
MW
900 unsigned long ssp_clk = drv_data->max_clk_rate;
901 const struct ssp_device *ssp = drv_data->ssp;
902
903 rate = min_t(int, ssp_clk, rate);
2f1a74e5 904
2a8626a9 905 if (ssp->type == PXA25x_SSP || ssp->type == CE4100_SSP)
2f1a74e5 906 return ((ssp_clk / (2 * rate) - 1) & 0xff) << 8;
907 else
908 return ((ssp_clk / rate - 1) & 0xfff) << 8;
909}
910
e0c9905e
SS
911static void pump_transfers(unsigned long data)
912{
913 struct driver_data *drv_data = (struct driver_data *)data;
914 struct spi_message *message = NULL;
915 struct spi_transfer *transfer = NULL;
916 struct spi_transfer *previous = NULL;
917 struct chip_data *chip = NULL;
cf43369d 918 void __iomem *reg = drv_data->ioaddr;
9708c121
SS
919 u32 clk_div = 0;
920 u8 bits = 0;
921 u32 speed = 0;
922 u32 cr0;
8d94cc50
SS
923 u32 cr1;
924 u32 dma_thresh = drv_data->cur_chip->dma_threshold;
925 u32 dma_burst = drv_data->cur_chip->dma_burst_size;
e0c9905e
SS
926
927 /* Get current state information */
928 message = drv_data->cur_msg;
929 transfer = drv_data->cur_transfer;
930 chip = drv_data->cur_chip;
931
932 /* Handle for abort */
933 if (message->state == ERROR_STATE) {
934 message->status = -EIO;
5daa3ba0 935 giveback(drv_data);
e0c9905e
SS
936 return;
937 }
938
939 /* Handle end of message */
940 if (message->state == DONE_STATE) {
941 message->status = 0;
5daa3ba0 942 giveback(drv_data);
e0c9905e
SS
943 return;
944 }
945
8423597d 946 /* Delay if requested at end of transfer before CS change */
e0c9905e
SS
947 if (message->state == RUNNING_STATE) {
948 previous = list_entry(transfer->transfer_list.prev,
949 struct spi_transfer,
950 transfer_list);
951 if (previous->delay_usecs)
952 udelay(previous->delay_usecs);
8423597d
NF
953
954 /* Drop chip select only if cs_change is requested */
955 if (previous->cs_change)
a7bb3909 956 cs_deassert(drv_data);
e0c9905e
SS
957 }
958
7e964455
NF
959 /* Check for transfers that need multiple DMA segments */
960 if (transfer->len > MAX_DMA_LEN && chip->enable_dma) {
961
962 /* reject already-mapped transfers; PIO won't always work */
963 if (message->is_dma_mapped
964 || transfer->rx_dma || transfer->tx_dma) {
965 dev_err(&drv_data->pdev->dev,
966 "pump_transfers: mapped transfer length "
20b918dc 967 "of %u is greater than %d\n",
7e964455
NF
968 transfer->len, MAX_DMA_LEN);
969 message->status = -EINVAL;
970 giveback(drv_data);
971 return;
972 }
973
974 /* warn ... we force this to PIO mode */
975 if (printk_ratelimit())
976 dev_warn(&message->spi->dev, "pump_transfers: "
977 "DMA disabled for transfer length %ld "
978 "greater than %d\n",
979 (long)drv_data->len, MAX_DMA_LEN);
8d94cc50
SS
980 }
981
e0c9905e
SS
982 /* Setup the transfer state based on the type of transfer */
983 if (flush(drv_data) == 0) {
984 dev_err(&drv_data->pdev->dev, "pump_transfers: flush failed\n");
985 message->status = -EIO;
5daa3ba0 986 giveback(drv_data);
e0c9905e
SS
987 return;
988 }
9708c121
SS
989 drv_data->n_bytes = chip->n_bytes;
990 drv_data->dma_width = chip->dma_width;
e0c9905e
SS
991 drv_data->tx = (void *)transfer->tx_buf;
992 drv_data->tx_end = drv_data->tx + transfer->len;
993 drv_data->rx = transfer->rx_buf;
994 drv_data->rx_end = drv_data->rx + transfer->len;
995 drv_data->rx_dma = transfer->rx_dma;
996 drv_data->tx_dma = transfer->tx_dma;
8d94cc50 997 drv_data->len = transfer->len & DCMD_LENGTH;
e0c9905e
SS
998 drv_data->write = drv_data->tx ? chip->write : null_writer;
999 drv_data->read = drv_data->rx ? chip->read : null_reader;
9708c121
SS
1000
1001 /* Change speed and bit per word on a per transfer */
8d94cc50 1002 cr0 = chip->cr0;
9708c121
SS
1003 if (transfer->speed_hz || transfer->bits_per_word) {
1004
9708c121
SS
1005 bits = chip->bits_per_word;
1006 speed = chip->speed_hz;
1007
1008 if (transfer->speed_hz)
1009 speed = transfer->speed_hz;
1010
1011 if (transfer->bits_per_word)
1012 bits = transfer->bits_per_word;
1013
3343b7a6 1014 clk_div = ssp_get_clk_div(drv_data, speed);
9708c121
SS
1015
1016 if (bits <= 8) {
1017 drv_data->n_bytes = 1;
1018 drv_data->dma_width = DCMD_WIDTH1;
1019 drv_data->read = drv_data->read != null_reader ?
1020 u8_reader : null_reader;
1021 drv_data->write = drv_data->write != null_writer ?
1022 u8_writer : null_writer;
1023 } else if (bits <= 16) {
1024 drv_data->n_bytes = 2;
1025 drv_data->dma_width = DCMD_WIDTH2;
1026 drv_data->read = drv_data->read != null_reader ?
1027 u16_reader : null_reader;
1028 drv_data->write = drv_data->write != null_writer ?
1029 u16_writer : null_writer;
1030 } else if (bits <= 32) {
1031 drv_data->n_bytes = 4;
1032 drv_data->dma_width = DCMD_WIDTH4;
1033 drv_data->read = drv_data->read != null_reader ?
1034 u32_reader : null_reader;
1035 drv_data->write = drv_data->write != null_writer ?
1036 u32_writer : null_writer;
1037 }
8d94cc50
SS
1038 /* if bits/word is changed in dma mode, then must check the
1039 * thresholds and burst also */
1040 if (chip->enable_dma) {
1041 if (set_dma_burst_and_threshold(chip, message->spi,
1042 bits, &dma_burst,
1043 &dma_thresh))
1044 if (printk_ratelimit())
1045 dev_warn(&message->spi->dev,
7e964455 1046 "pump_transfers: "
8d94cc50
SS
1047 "DMA burst size reduced to "
1048 "match bits_per_word\n");
1049 }
9708c121
SS
1050
1051 cr0 = clk_div
1052 | SSCR0_Motorola
5daa3ba0 1053 | SSCR0_DataSize(bits > 16 ? bits - 16 : bits)
9708c121
SS
1054 | SSCR0_SSE
1055 | (bits > 16 ? SSCR0_EDSS : 0);
9708c121
SS
1056 }
1057
e0c9905e
SS
1058 message->state = RUNNING_STATE;
1059
7e964455
NF
1060 /* Try to map dma buffer and do a dma transfer if successful, but
1061 * only if the length is non-zero and less than MAX_DMA_LEN.
1062 *
1063 * Zero-length non-descriptor DMA is illegal on PXA2xx; force use
1064 * of PIO instead. Care is needed above because the transfer may
1065 * have have been passed with buffers that are already dma mapped.
1066 * A zero-length transfer in PIO mode will not try to write/read
1067 * to/from the buffers
1068 *
1069 * REVISIT large transfers are exactly where we most want to be
1070 * using DMA. If this happens much, split those transfers into
1071 * multiple DMA segments rather than forcing PIO.
1072 */
1073 drv_data->dma_mapped = 0;
1074 if (drv_data->len > 0 && drv_data->len <= MAX_DMA_LEN)
1075 drv_data->dma_mapped = map_dma_buffers(drv_data);
1076 if (drv_data->dma_mapped) {
e0c9905e
SS
1077
1078 /* Ensure we have the correct interrupt handler */
1079 drv_data->transfer_handler = dma_transfer;
1080
1081 /* Setup rx DMA Channel */
1082 DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL;
1083 DSADR(drv_data->rx_channel) = drv_data->ssdr_physical;
1084 DTADR(drv_data->rx_channel) = drv_data->rx_dma;
1085 if (drv_data->rx == drv_data->null_dma_buf)
1086 /* No target address increment */
1087 DCMD(drv_data->rx_channel) = DCMD_FLOWSRC
9708c121 1088 | drv_data->dma_width
8d94cc50 1089 | dma_burst
e0c9905e
SS
1090 | drv_data->len;
1091 else
1092 DCMD(drv_data->rx_channel) = DCMD_INCTRGADDR
1093 | DCMD_FLOWSRC
9708c121 1094 | drv_data->dma_width
8d94cc50 1095 | dma_burst
e0c9905e
SS
1096 | drv_data->len;
1097
1098 /* Setup tx DMA Channel */
1099 DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL;
1100 DSADR(drv_data->tx_channel) = drv_data->tx_dma;
1101 DTADR(drv_data->tx_channel) = drv_data->ssdr_physical;
1102 if (drv_data->tx == drv_data->null_dma_buf)
1103 /* No source address increment */
1104 DCMD(drv_data->tx_channel) = DCMD_FLOWTRG
9708c121 1105 | drv_data->dma_width
8d94cc50 1106 | dma_burst
e0c9905e
SS
1107 | drv_data->len;
1108 else
1109 DCMD(drv_data->tx_channel) = DCMD_INCSRCADDR
1110 | DCMD_FLOWTRG
9708c121 1111 | drv_data->dma_width
8d94cc50 1112 | dma_burst
e0c9905e
SS
1113 | drv_data->len;
1114
1115 /* Enable dma end irqs on SSP to detect end of transfer */
1116 if (drv_data->ssp_type == PXA25x_SSP)
1117 DCMD(drv_data->tx_channel) |= DCMD_ENDIRQEN;
1118
8d94cc50
SS
1119 /* Clear status and start DMA engine */
1120 cr1 = chip->cr1 | dma_thresh | drv_data->dma_cr1;
e0c9905e
SS
1121 write_SSSR(drv_data->clear_sr, reg);
1122 DCSR(drv_data->rx_channel) |= DCSR_RUN;
1123 DCSR(drv_data->tx_channel) |= DCSR_RUN;
e0c9905e
SS
1124 } else {
1125 /* Ensure we have the correct interrupt handler */
1126 drv_data->transfer_handler = interrupt_transfer;
1127
8d94cc50
SS
1128 /* Clear status */
1129 cr1 = chip->cr1 | chip->threshold | drv_data->int_cr1;
2a8626a9 1130 write_SSSR_CS(drv_data, drv_data->clear_sr);
8d94cc50
SS
1131 }
1132
1133 /* see if we need to reload the config registers */
1134 if ((read_SSCR0(reg) != cr0)
1135 || (read_SSCR1(reg) & SSCR1_CHANGE_MASK) !=
1136 (cr1 & SSCR1_CHANGE_MASK)) {
1137
b97c74bd 1138 /* stop the SSP, and update the other bits */
8d94cc50 1139 write_SSCR0(cr0 & ~SSCR0_SSE, reg);
2a8626a9 1140 if (!pxa25x_ssp_comp(drv_data))
e0c9905e 1141 write_SSTO(chip->timeout, reg);
b97c74bd
NF
1142 /* first set CR1 without interrupt and service enables */
1143 write_SSCR1(cr1 & SSCR1_CHANGE_MASK, reg);
1144 /* restart the SSP */
8d94cc50 1145 write_SSCR0(cr0, reg);
b97c74bd 1146
8d94cc50 1147 } else {
2a8626a9 1148 if (!pxa25x_ssp_comp(drv_data))
8d94cc50 1149 write_SSTO(chip->timeout, reg);
e0c9905e 1150 }
b97c74bd 1151
a7bb3909 1152 cs_assert(drv_data);
b97c74bd
NF
1153
1154 /* after chip select, release the data by enabling service
1155 * requests and interrupts, without changing any mode bits */
1156 write_SSCR1(cr1, reg);
e0c9905e
SS
1157}
1158
7f86bde9
MW
1159static int pxa2xx_spi_transfer_one_message(struct spi_master *master,
1160 struct spi_message *msg)
e0c9905e 1161{
7f86bde9 1162 struct driver_data *drv_data = spi_master_get_devdata(master);
e0c9905e 1163
7f86bde9 1164 drv_data->cur_msg = msg;
e0c9905e
SS
1165 /* Initial message state*/
1166 drv_data->cur_msg->state = START_STATE;
1167 drv_data->cur_transfer = list_entry(drv_data->cur_msg->transfers.next,
1168 struct spi_transfer,
1169 transfer_list);
1170
8d94cc50
SS
1171 /* prepare to setup the SSP, in pump_transfers, using the per
1172 * chip configuration */
e0c9905e 1173 drv_data->cur_chip = spi_get_ctldata(drv_data->cur_msg->spi);
e0c9905e
SS
1174
1175 /* Mark as busy and launch transfers */
1176 tasklet_schedule(&drv_data->pump_transfers);
e0c9905e
SS
1177 return 0;
1178}
1179
a7bb3909
EM
1180static int setup_cs(struct spi_device *spi, struct chip_data *chip,
1181 struct pxa2xx_spi_chip *chip_info)
1182{
1183 int err = 0;
1184
1185 if (chip == NULL || chip_info == NULL)
1186 return 0;
1187
1188 /* NOTE: setup() can be called multiple times, possibly with
1189 * different chip_info, release previously requested GPIO
1190 */
1191 if (gpio_is_valid(chip->gpio_cs))
1192 gpio_free(chip->gpio_cs);
1193
1194 /* If (*cs_control) is provided, ignore GPIO chip select */
1195 if (chip_info->cs_control) {
1196 chip->cs_control = chip_info->cs_control;
1197 return 0;
1198 }
1199
1200 if (gpio_is_valid(chip_info->gpio_cs)) {
1201 err = gpio_request(chip_info->gpio_cs, "SPI_CS");
1202 if (err) {
1203 dev_err(&spi->dev, "failed to request chip select "
1204 "GPIO%d\n", chip_info->gpio_cs);
1205 return err;
1206 }
1207
1208 chip->gpio_cs = chip_info->gpio_cs;
1209 chip->gpio_cs_inverted = spi->mode & SPI_CS_HIGH;
1210
1211 err = gpio_direction_output(chip->gpio_cs,
1212 !chip->gpio_cs_inverted);
1213 }
1214
1215 return err;
1216}
1217
e0c9905e
SS
1218static int setup(struct spi_device *spi)
1219{
1220 struct pxa2xx_spi_chip *chip_info = NULL;
1221 struct chip_data *chip;
1222 struct driver_data *drv_data = spi_master_get_devdata(spi->master);
1223 unsigned int clk_div;
f1f640a9
VS
1224 uint tx_thres = TX_THRESH_DFLT;
1225 uint rx_thres = RX_THRESH_DFLT;
e0c9905e 1226
2a8626a9 1227 if (!pxa25x_ssp_comp(drv_data)
8d94cc50
SS
1228 && (spi->bits_per_word < 4 || spi->bits_per_word > 32)) {
1229 dev_err(&spi->dev, "failed setup: ssp_type=%d, bits/wrd=%d "
1230 "b/w not 4-32 for type non-PXA25x_SSP\n",
1231 drv_data->ssp_type, spi->bits_per_word);
e0c9905e 1232 return -EINVAL;
2a8626a9 1233 } else if (pxa25x_ssp_comp(drv_data)
8d94cc50
SS
1234 && (spi->bits_per_word < 4
1235 || spi->bits_per_word > 16)) {
1236 dev_err(&spi->dev, "failed setup: ssp_type=%d, bits/wrd=%d "
1237 "b/w not 4-16 for type PXA25x_SSP\n",
1238 drv_data->ssp_type, spi->bits_per_word);
e0c9905e 1239 return -EINVAL;
8d94cc50 1240 }
e0c9905e 1241
8d94cc50 1242 /* Only alloc on first setup */
e0c9905e 1243 chip = spi_get_ctldata(spi);
8d94cc50 1244 if (!chip) {
e0c9905e 1245 chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL);
8d94cc50
SS
1246 if (!chip) {
1247 dev_err(&spi->dev,
1248 "failed setup: can't allocate chip data\n");
e0c9905e 1249 return -ENOMEM;
8d94cc50 1250 }
e0c9905e 1251
2a8626a9
SAS
1252 if (drv_data->ssp_type == CE4100_SSP) {
1253 if (spi->chip_select > 4) {
1254 dev_err(&spi->dev, "failed setup: "
1255 "cs number must not be > 4.\n");
1256 kfree(chip);
1257 return -EINVAL;
1258 }
1259
1260 chip->frm = spi->chip_select;
1261 } else
1262 chip->gpio_cs = -1;
e0c9905e 1263 chip->enable_dma = 0;
f1f640a9 1264 chip->timeout = TIMOUT_DFLT;
e0c9905e
SS
1265 chip->dma_burst_size = drv_data->master_info->enable_dma ?
1266 DCMD_BURST8 : 0;
e0c9905e
SS
1267 }
1268
8d94cc50
SS
1269 /* protocol drivers may change the chip settings, so...
1270 * if chip_info exists, use it */
1271 chip_info = spi->controller_data;
1272
e0c9905e 1273 /* chip_info isn't always needed */
8d94cc50 1274 chip->cr1 = 0;
e0c9905e 1275 if (chip_info) {
f1f640a9
VS
1276 if (chip_info->timeout)
1277 chip->timeout = chip_info->timeout;
1278 if (chip_info->tx_threshold)
1279 tx_thres = chip_info->tx_threshold;
1280 if (chip_info->rx_threshold)
1281 rx_thres = chip_info->rx_threshold;
1282 chip->enable_dma = drv_data->master_info->enable_dma;
e0c9905e 1283 chip->dma_threshold = 0;
e0c9905e
SS
1284 if (chip_info->enable_loopback)
1285 chip->cr1 = SSCR1_LBM;
1286 }
1287
f1f640a9
VS
1288 chip->threshold = (SSCR1_RxTresh(rx_thres) & SSCR1_RFT) |
1289 (SSCR1_TxTresh(tx_thres) & SSCR1_TFT);
1290
8d94cc50
SS
1291 /* set dma burst and threshold outside of chip_info path so that if
1292 * chip_info goes away after setting chip->enable_dma, the
1293 * burst and threshold can still respond to changes in bits_per_word */
1294 if (chip->enable_dma) {
1295 /* set up legal burst and threshold for dma */
1296 if (set_dma_burst_and_threshold(chip, spi, spi->bits_per_word,
1297 &chip->dma_burst_size,
1298 &chip->dma_threshold)) {
1299 dev_warn(&spi->dev, "in setup: DMA burst size reduced "
1300 "to match bits_per_word\n");
1301 }
1302 }
1303
3343b7a6 1304 clk_div = ssp_get_clk_div(drv_data, spi->max_speed_hz);
9708c121 1305 chip->speed_hz = spi->max_speed_hz;
e0c9905e
SS
1306
1307 chip->cr0 = clk_div
1308 | SSCR0_Motorola
5daa3ba0
SS
1309 | SSCR0_DataSize(spi->bits_per_word > 16 ?
1310 spi->bits_per_word - 16 : spi->bits_per_word)
e0c9905e
SS
1311 | SSCR0_SSE
1312 | (spi->bits_per_word > 16 ? SSCR0_EDSS : 0);
7f6ee1ad
JC
1313 chip->cr1 &= ~(SSCR1_SPO | SSCR1_SPH);
1314 chip->cr1 |= (((spi->mode & SPI_CPHA) != 0) ? SSCR1_SPH : 0)
1315 | (((spi->mode & SPI_CPOL) != 0) ? SSCR1_SPO : 0);
e0c9905e
SS
1316
1317 /* NOTE: PXA25x_SSP _could_ use external clocking ... */
2a8626a9 1318 if (!pxa25x_ssp_comp(drv_data))
7d077197 1319 dev_dbg(&spi->dev, "%ld Hz actual, %s\n",
3343b7a6 1320 drv_data->max_clk_rate
c9840daa
EM
1321 / (1 + ((chip->cr0 & SSCR0_SCR(0xfff)) >> 8)),
1322 chip->enable_dma ? "DMA" : "PIO");
e0c9905e 1323 else
7d077197 1324 dev_dbg(&spi->dev, "%ld Hz actual, %s\n",
3343b7a6 1325 drv_data->max_clk_rate / 2
c9840daa
EM
1326 / (1 + ((chip->cr0 & SSCR0_SCR(0x0ff)) >> 8)),
1327 chip->enable_dma ? "DMA" : "PIO");
e0c9905e
SS
1328
1329 if (spi->bits_per_word <= 8) {
1330 chip->n_bytes = 1;
1331 chip->dma_width = DCMD_WIDTH1;
1332 chip->read = u8_reader;
1333 chip->write = u8_writer;
1334 } else if (spi->bits_per_word <= 16) {
1335 chip->n_bytes = 2;
1336 chip->dma_width = DCMD_WIDTH2;
1337 chip->read = u16_reader;
1338 chip->write = u16_writer;
1339 } else if (spi->bits_per_word <= 32) {
1340 chip->cr0 |= SSCR0_EDSS;
1341 chip->n_bytes = 4;
1342 chip->dma_width = DCMD_WIDTH4;
1343 chip->read = u32_reader;
1344 chip->write = u32_writer;
1345 } else {
1346 dev_err(&spi->dev, "invalid wordsize\n");
e0c9905e
SS
1347 return -ENODEV;
1348 }
9708c121 1349 chip->bits_per_word = spi->bits_per_word;
e0c9905e
SS
1350
1351 spi_set_ctldata(spi, chip);
1352
2a8626a9
SAS
1353 if (drv_data->ssp_type == CE4100_SSP)
1354 return 0;
1355
a7bb3909 1356 return setup_cs(spi, chip, chip_info);
e0c9905e
SS
1357}
1358
0ffa0285 1359static void cleanup(struct spi_device *spi)
e0c9905e 1360{
0ffa0285 1361 struct chip_data *chip = spi_get_ctldata(spi);
2a8626a9 1362 struct driver_data *drv_data = spi_master_get_devdata(spi->master);
e0c9905e 1363
7348d82a
DR
1364 if (!chip)
1365 return;
1366
2a8626a9 1367 if (drv_data->ssp_type != CE4100_SSP && gpio_is_valid(chip->gpio_cs))
a7bb3909
EM
1368 gpio_free(chip->gpio_cs);
1369
e0c9905e
SS
1370 kfree(chip);
1371}
1372
fd4a319b 1373static int pxa2xx_spi_probe(struct platform_device *pdev)
e0c9905e
SS
1374{
1375 struct device *dev = &pdev->dev;
1376 struct pxa2xx_spi_master *platform_info;
1377 struct spi_master *master;
65a00a20 1378 struct driver_data *drv_data;
2f1a74e5 1379 struct ssp_device *ssp;
65a00a20 1380 int status;
e0c9905e 1381
851bacf5
MW
1382 platform_info = dev_get_platdata(dev);
1383 if (!platform_info) {
1384 dev_err(&pdev->dev, "missing platform data\n");
1385 return -ENODEV;
1386 }
e0c9905e 1387
baffe169 1388 ssp = pxa_ssp_request(pdev->id, pdev->name);
851bacf5
MW
1389 if (!ssp)
1390 ssp = &platform_info->ssp;
1391
1392 if (!ssp->mmio_base) {
1393 dev_err(&pdev->dev, "failed to get ssp\n");
e0c9905e
SS
1394 return -ENODEV;
1395 }
1396
1397 /* Allocate master with space for drv_data and null dma buffer */
1398 master = spi_alloc_master(dev, sizeof(struct driver_data) + 16);
1399 if (!master) {
65a00a20 1400 dev_err(&pdev->dev, "cannot alloc spi_master\n");
baffe169 1401 pxa_ssp_free(ssp);
e0c9905e
SS
1402 return -ENOMEM;
1403 }
1404 drv_data = spi_master_get_devdata(master);
1405 drv_data->master = master;
1406 drv_data->master_info = platform_info;
1407 drv_data->pdev = pdev;
2f1a74e5 1408 drv_data->ssp = ssp;
e0c9905e 1409
21486af0 1410 master->dev.parent = &pdev->dev;
21486af0 1411 master->dev.of_node = pdev->dev.of_node;
e7db06b5 1412 /* the spi->mode bits understood by this driver: */
50e0a7bd 1413 master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
e7db06b5 1414
851bacf5 1415 master->bus_num = ssp->port_id;
e0c9905e 1416 master->num_chipselect = platform_info->num_chipselect;
7ad0ba91 1417 master->dma_alignment = DMA_ALIGNMENT;
e0c9905e
SS
1418 master->cleanup = cleanup;
1419 master->setup = setup;
7f86bde9 1420 master->transfer_one_message = pxa2xx_spi_transfer_one_message;
e0c9905e 1421
2f1a74e5 1422 drv_data->ssp_type = ssp->type;
2b9b84f4 1423 drv_data->null_dma_buf = (u32 *)PTR_ALIGN(&drv_data[1], DMA_ALIGNMENT);
e0c9905e 1424
2f1a74e5 1425 drv_data->ioaddr = ssp->mmio_base;
1426 drv_data->ssdr_physical = ssp->phys_base + SSDR;
2a8626a9 1427 if (pxa25x_ssp_comp(drv_data)) {
e0c9905e
SS
1428 drv_data->int_cr1 = SSCR1_TIE | SSCR1_RIE;
1429 drv_data->dma_cr1 = 0;
1430 drv_data->clear_sr = SSSR_ROR;
1431 drv_data->mask_sr = SSSR_RFS | SSSR_TFS | SSSR_ROR;
1432 } else {
1433 drv_data->int_cr1 = SSCR1_TIE | SSCR1_RIE | SSCR1_TINTE;
1434 drv_data->dma_cr1 = SSCR1_TSRE | SSCR1_RSRE | SSCR1_TINTE;
1435 drv_data->clear_sr = SSSR_ROR | SSSR_TINT;
1436 drv_data->mask_sr = SSSR_TINT | SSSR_RFS | SSSR_TFS | SSSR_ROR;
1437 }
1438
49cbb1e0
SAS
1439 status = request_irq(ssp->irq, ssp_int, IRQF_SHARED, dev_name(dev),
1440 drv_data);
e0c9905e 1441 if (status < 0) {
65a00a20 1442 dev_err(&pdev->dev, "cannot get IRQ %d\n", ssp->irq);
e0c9905e
SS
1443 goto out_error_master_alloc;
1444 }
1445
1446 /* Setup DMA if requested */
1447 drv_data->tx_channel = -1;
1448 drv_data->rx_channel = -1;
1449 if (platform_info->enable_dma) {
1450
1451 /* Get two DMA channels (rx and tx) */
1452 drv_data->rx_channel = pxa_request_dma("pxa2xx_spi_ssp_rx",
1453 DMA_PRIO_HIGH,
1454 dma_handler,
1455 drv_data);
1456 if (drv_data->rx_channel < 0) {
1457 dev_err(dev, "problem (%d) requesting rx channel\n",
1458 drv_data->rx_channel);
1459 status = -ENODEV;
1460 goto out_error_irq_alloc;
1461 }
1462 drv_data->tx_channel = pxa_request_dma("pxa2xx_spi_ssp_tx",
1463 DMA_PRIO_MEDIUM,
1464 dma_handler,
1465 drv_data);
1466 if (drv_data->tx_channel < 0) {
1467 dev_err(dev, "problem (%d) requesting tx channel\n",
1468 drv_data->tx_channel);
1469 status = -ENODEV;
1470 goto out_error_dma_alloc;
1471 }
1472
2f1a74e5 1473 DRCMR(ssp->drcmr_rx) = DRCMR_MAPVLD | drv_data->rx_channel;
1474 DRCMR(ssp->drcmr_tx) = DRCMR_MAPVLD | drv_data->tx_channel;
e0c9905e
SS
1475 }
1476
1477 /* Enable SOC clock */
3343b7a6
MW
1478 clk_prepare_enable(ssp->clk);
1479
1480 drv_data->max_clk_rate = clk_get_rate(ssp->clk);
e0c9905e
SS
1481
1482 /* Load default SSP configuration */
1483 write_SSCR0(0, drv_data->ioaddr);
f1f640a9
VS
1484 write_SSCR1(SSCR1_RxTresh(RX_THRESH_DFLT) |
1485 SSCR1_TxTresh(TX_THRESH_DFLT),
1486 drv_data->ioaddr);
c9840daa 1487 write_SSCR0(SSCR0_SCR(2)
e0c9905e
SS
1488 | SSCR0_Motorola
1489 | SSCR0_DataSize(8),
1490 drv_data->ioaddr);
2a8626a9 1491 if (!pxa25x_ssp_comp(drv_data))
e0c9905e
SS
1492 write_SSTO(0, drv_data->ioaddr);
1493 write_SSPSP(0, drv_data->ioaddr);
1494
7f86bde9
MW
1495 tasklet_init(&drv_data->pump_transfers, pump_transfers,
1496 (unsigned long)drv_data);
e0c9905e
SS
1497
1498 /* Register with the SPI framework */
1499 platform_set_drvdata(pdev, drv_data);
1500 status = spi_register_master(master);
1501 if (status != 0) {
1502 dev_err(&pdev->dev, "problem registering spi master\n");
7f86bde9 1503 goto out_error_clock_enabled;
e0c9905e
SS
1504 }
1505
1506 return status;
1507
e0c9905e 1508out_error_clock_enabled:
3343b7a6 1509 clk_disable_unprepare(ssp->clk);
e0c9905e
SS
1510
1511out_error_dma_alloc:
1512 if (drv_data->tx_channel != -1)
1513 pxa_free_dma(drv_data->tx_channel);
1514 if (drv_data->rx_channel != -1)
1515 pxa_free_dma(drv_data->rx_channel);
1516
1517out_error_irq_alloc:
2f1a74e5 1518 free_irq(ssp->irq, drv_data);
e0c9905e
SS
1519
1520out_error_master_alloc:
1521 spi_master_put(master);
baffe169 1522 pxa_ssp_free(ssp);
e0c9905e
SS
1523 return status;
1524}
1525
1526static int pxa2xx_spi_remove(struct platform_device *pdev)
1527{
1528 struct driver_data *drv_data = platform_get_drvdata(pdev);
51e911e2 1529 struct ssp_device *ssp;
e0c9905e
SS
1530
1531 if (!drv_data)
1532 return 0;
51e911e2 1533 ssp = drv_data->ssp;
e0c9905e 1534
e0c9905e
SS
1535 /* Disable the SSP at the peripheral and SOC level */
1536 write_SSCR0(0, drv_data->ioaddr);
3343b7a6 1537 clk_disable_unprepare(ssp->clk);
e0c9905e
SS
1538
1539 /* Release DMA */
1540 if (drv_data->master_info->enable_dma) {
2f1a74e5 1541 DRCMR(ssp->drcmr_rx) = 0;
1542 DRCMR(ssp->drcmr_tx) = 0;
e0c9905e
SS
1543 pxa_free_dma(drv_data->tx_channel);
1544 pxa_free_dma(drv_data->rx_channel);
1545 }
1546
1547 /* Release IRQ */
2f1a74e5 1548 free_irq(ssp->irq, drv_data);
1549
1550 /* Release SSP */
baffe169 1551 pxa_ssp_free(ssp);
e0c9905e
SS
1552
1553 /* Disconnect from the SPI framework */
1554 spi_unregister_master(drv_data->master);
1555
1556 /* Prevent double remove */
1557 platform_set_drvdata(pdev, NULL);
1558
1559 return 0;
1560}
1561
1562static void pxa2xx_spi_shutdown(struct platform_device *pdev)
1563{
1564 int status = 0;
1565
1566 if ((status = pxa2xx_spi_remove(pdev)) != 0)
1567 dev_err(&pdev->dev, "shutdown failed with %d\n", status);
1568}
1569
1570#ifdef CONFIG_PM
86d2593a 1571static int pxa2xx_spi_suspend(struct device *dev)
e0c9905e 1572{
86d2593a 1573 struct driver_data *drv_data = dev_get_drvdata(dev);
2f1a74e5 1574 struct ssp_device *ssp = drv_data->ssp;
e0c9905e
SS
1575 int status = 0;
1576
7f86bde9 1577 status = spi_master_suspend(drv_data->master);
e0c9905e
SS
1578 if (status != 0)
1579 return status;
1580 write_SSCR0(0, drv_data->ioaddr);
3343b7a6 1581 clk_disable_unprepare(ssp->clk);
e0c9905e
SS
1582
1583 return 0;
1584}
1585
86d2593a 1586static int pxa2xx_spi_resume(struct device *dev)
e0c9905e 1587{
86d2593a 1588 struct driver_data *drv_data = dev_get_drvdata(dev);
2f1a74e5 1589 struct ssp_device *ssp = drv_data->ssp;
e0c9905e
SS
1590 int status = 0;
1591
148da331
DR
1592 if (drv_data->rx_channel != -1)
1593 DRCMR(drv_data->ssp->drcmr_rx) =
1594 DRCMR_MAPVLD | drv_data->rx_channel;
1595 if (drv_data->tx_channel != -1)
1596 DRCMR(drv_data->ssp->drcmr_tx) =
1597 DRCMR_MAPVLD | drv_data->tx_channel;
1598
e0c9905e 1599 /* Enable the SSP clock */
3343b7a6 1600 clk_prepare_enable(ssp->clk);
e0c9905e
SS
1601
1602 /* Start the queue running */
7f86bde9 1603 status = spi_master_resume(drv_data->master);
e0c9905e 1604 if (status != 0) {
86d2593a 1605 dev_err(dev, "problem starting queue (%d)\n", status);
e0c9905e
SS
1606 return status;
1607 }
1608
1609 return 0;
1610}
86d2593a 1611
47145210 1612static const struct dev_pm_ops pxa2xx_spi_pm_ops = {
86d2593a
MR
1613 .suspend = pxa2xx_spi_suspend,
1614 .resume = pxa2xx_spi_resume,
1615};
1616#endif
e0c9905e
SS
1617
1618static struct platform_driver driver = {
1619 .driver = {
86d2593a
MR
1620 .name = "pxa2xx-spi",
1621 .owner = THIS_MODULE,
1622#ifdef CONFIG_PM
1623 .pm = &pxa2xx_spi_pm_ops,
1624#endif
e0c9905e 1625 },
fbd29a14 1626 .probe = pxa2xx_spi_probe,
d1e44d9c 1627 .remove = pxa2xx_spi_remove,
e0c9905e 1628 .shutdown = pxa2xx_spi_shutdown,
e0c9905e
SS
1629};
1630
1631static int __init pxa2xx_spi_init(void)
1632{
fbd29a14 1633 return platform_driver_register(&driver);
e0c9905e 1634}
5b61a749 1635subsys_initcall(pxa2xx_spi_init);
e0c9905e
SS
1636
1637static void __exit pxa2xx_spi_exit(void)
1638{
1639 platform_driver_unregister(&driver);
1640}
1641module_exit(pxa2xx_spi_exit);
This page took 0.698599 seconds and 5 git commands to generate.