spi: spi_bfin: change handling of communication parameters
[deliverable/linux.git] / drivers / spi / spi_bfin5xx.c
CommitLineData
a5f6abd4 1/*
131b17d4
BW
2 * File: drivers/spi/bfin5xx_spi.c
3 * Maintainer:
4 * Bryan Wu <bryan.wu@analog.com>
5 * Original Author:
6 * Luke Yang (Analog Devices Inc.)
a5f6abd4 7 *
131b17d4
BW
8 * Created: March. 10th 2006
9 * Description: SPI controller driver for Blackfin BF5xx
10 * Bugs: Enter bugs at http://blackfin.uclinux.org/
a5f6abd4
WB
11 *
12 * Modified:
13 * March 10, 2006 bfin5xx_spi.c Created. (Luke Yang)
14 * August 7, 2006 added full duplex mode (Axel Weiss & Luke Yang)
131b17d4 15 * July 17, 2007 add support for BF54x SPI0 controller (Bryan Wu)
a32c691d
BW
16 * July 30, 2007 add platfrom_resource interface to support multi-port
17 * SPI controller (Bryan Wu)
a5f6abd4 18 *
131b17d4 19 * Copyright 2004-2007 Analog Devices Inc.
a5f6abd4
WB
20 *
21 * This program is free software ; you can redistribute it and/or modify
22 * it under the terms of the GNU General Public License as published by
23 * the Free Software Foundation ; either version 2, or (at your option)
24 * any later version.
25 *
26 * This program is distributed in the hope that it will be useful,
27 * but WITHOUT ANY WARRANTY ; without even the implied warranty of
28 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
29 * GNU General Public License for more details.
30 *
31 * You should have received a copy of the GNU General Public License
32 * along with this program ; see the file COPYING.
33 * If not, write to the Free Software Foundation,
34 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
35 */
36
37#include <linux/init.h>
38#include <linux/module.h>
131b17d4 39#include <linux/delay.h>
a5f6abd4 40#include <linux/device.h>
131b17d4 41#include <linux/io.h>
a5f6abd4 42#include <linux/ioport.h>
131b17d4 43#include <linux/irq.h>
a5f6abd4
WB
44#include <linux/errno.h>
45#include <linux/interrupt.h>
46#include <linux/platform_device.h>
47#include <linux/dma-mapping.h>
48#include <linux/spi/spi.h>
49#include <linux/workqueue.h>
a5f6abd4 50
a5f6abd4 51#include <asm/dma.h>
131b17d4 52#include <asm/portmux.h>
a5f6abd4
WB
53#include <asm/bfin5xx_spi.h>
54
a32c691d
BW
55#define DRV_NAME "bfin-spi"
56#define DRV_AUTHOR "Bryan Wu, Luke Yang"
57#define DRV_DESC "Blackfin BF5xx on-chip SPI Contoller Driver"
58#define DRV_VERSION "1.0"
59
60MODULE_AUTHOR(DRV_AUTHOR);
61MODULE_DESCRIPTION(DRV_DESC);
a5f6abd4
WB
62MODULE_LICENSE("GPL");
63
64#define IS_DMA_ALIGNED(x) (((u32)(x)&0x07)==0)
65
a32c691d
BW
66static u32 spi_dma_ch;
67static u32 spi_regs_base;
68
a5f6abd4
WB
69#define DEFINE_SPI_REG(reg, off) \
70static inline u16 read_##reg(void) \
a32c691d 71 { return bfin_read16(spi_regs_base + off); } \
a5f6abd4 72static inline void write_##reg(u16 v) \
a32c691d 73 {bfin_write16(spi_regs_base + off, v); }
a5f6abd4
WB
74
75DEFINE_SPI_REG(CTRL, 0x00)
76DEFINE_SPI_REG(FLAG, 0x04)
77DEFINE_SPI_REG(STAT, 0x08)
78DEFINE_SPI_REG(TDBR, 0x0C)
79DEFINE_SPI_REG(RDBR, 0x10)
80DEFINE_SPI_REG(BAUD, 0x14)
81DEFINE_SPI_REG(SHAW, 0x18)
82#define START_STATE ((void*)0)
83#define RUNNING_STATE ((void*)1)
84#define DONE_STATE ((void*)2)
85#define ERROR_STATE ((void*)-1)
86#define QUEUE_RUNNING 0
87#define QUEUE_STOPPED 1
88int dma_requested;
89
90struct driver_data {
91 /* Driver model hookup */
92 struct platform_device *pdev;
93
94 /* SPI framework hookup */
95 struct spi_master *master;
96
97 /* BFIN hookup */
98 struct bfin5xx_spi_master *master_info;
99
100 /* Driver message queue */
101 struct workqueue_struct *workqueue;
102 struct work_struct pump_messages;
103 spinlock_t lock;
104 struct list_head queue;
105 int busy;
106 int run;
107
108 /* Message Transfer pump */
109 struct tasklet_struct pump_transfers;
110
111 /* Current message transfer state info */
112 struct spi_message *cur_msg;
113 struct spi_transfer *cur_transfer;
114 struct chip_data *cur_chip;
115 size_t len_in_bytes;
116 size_t len;
117 void *tx;
118 void *tx_end;
119 void *rx;
120 void *rx_end;
121 int dma_mapped;
122 dma_addr_t rx_dma;
123 dma_addr_t tx_dma;
124 size_t rx_map_len;
125 size_t tx_map_len;
126 u8 n_bytes;
fad91c89 127 int cs_change;
a5f6abd4
WB
128 void (*write) (struct driver_data *);
129 void (*read) (struct driver_data *);
130 void (*duplex) (struct driver_data *);
131};
132
133struct chip_data {
134 u16 ctl_reg;
135 u16 baud;
136 u16 flag;
137
138 u8 chip_select_num;
139 u8 n_bytes;
88b40369 140 u8 width; /* 0 or 1 */
a5f6abd4
WB
141 u8 enable_dma;
142 u8 bits_per_word; /* 8 or 16 */
143 u8 cs_change_per_word;
144 u8 cs_chg_udelay;
145 void (*write) (struct driver_data *);
146 void (*read) (struct driver_data *);
147 void (*duplex) (struct driver_data *);
148};
149
88b40369 150static void bfin_spi_enable(struct driver_data *drv_data)
a5f6abd4
WB
151{
152 u16 cr;
153
154 cr = read_CTRL();
155 write_CTRL(cr | BIT_CTL_ENABLE);
a5f6abd4
WB
156}
157
88b40369 158static void bfin_spi_disable(struct driver_data *drv_data)
a5f6abd4
WB
159{
160 u16 cr;
161
162 cr = read_CTRL();
163 write_CTRL(cr & (~BIT_CTL_ENABLE));
a5f6abd4
WB
164}
165
166/* Caculate the SPI_BAUD register value based on input HZ */
167static u16 hz_to_spi_baud(u32 speed_hz)
168{
169 u_long sclk = get_sclk();
170 u16 spi_baud = (sclk / (2 * speed_hz));
171
172 if ((sclk % (2 * speed_hz)) > 0)
173 spi_baud++;
174
a5f6abd4
WB
175 return spi_baud;
176}
177
178static int flush(struct driver_data *drv_data)
179{
180 unsigned long limit = loops_per_jiffy << 1;
181
182 /* wait for stop and clear stat */
183 while (!(read_STAT() & BIT_STAT_SPIF) && limit--)
184 continue;
185
186 write_STAT(BIT_STAT_CLR);
187
188 return limit;
189}
190
fad91c89
BW
191/* Chip select operation functions for cs_change flag */
192static void cs_active(struct chip_data *chip)
193{
194 u16 flag = read_FLAG();
195
196 flag |= chip->flag;
197 flag &= ~(chip->flag << 8);
198
199 write_FLAG(flag);
200}
201
202static void cs_deactive(struct chip_data *chip)
203{
204 u16 flag = read_FLAG();
205
206 flag |= (chip->flag << 8);
207
208 write_FLAG(flag);
209}
210
7c4ef094 211#define MAX_SPI_SSEL 7
5fec5b5a 212
a5f6abd4 213/* stop controller and re-config current chip*/
5fec5b5a 214static int restore_state(struct driver_data *drv_data)
a5f6abd4
WB
215{
216 struct chip_data *chip = drv_data->cur_chip;
5fec5b5a 217 int ret = 0;
12e17c42 218
a5f6abd4
WB
219 /* Clear status and disable clock */
220 write_STAT(BIT_STAT_CLR);
221 bfin_spi_disable(drv_data);
88b40369 222 dev_dbg(&drv_data->pdev->dev, "restoring spi ctl state\n");
a5f6abd4 223
5fec5b5a 224 /* Load the registers */
cc487e73 225 cs_deactive(chip);
5fec5b5a 226 write_BAUD(chip->baud);
cc487e73
SZ
227 chip->ctl_reg &= (~BIT_CTL_TIMOD);
228 chip->ctl_reg |= (chip->width << 8);
229 write_CTRL(chip->ctl_reg);
230
231 bfin_spi_enable(drv_data);
5fec5b5a 232
5fec5b5a
BW
233 if (ret)
234 dev_dbg(&drv_data->pdev->dev,
235 ": request chip select number %d failed\n",
236 chip->chip_select_num);
237
238 return ret;
a5f6abd4
WB
239}
240
241/* used to kick off transfer in rx mode */
242static unsigned short dummy_read(void)
243{
244 unsigned short tmp;
245 tmp = read_RDBR();
246 return tmp;
247}
248
249static void null_writer(struct driver_data *drv_data)
250{
251 u8 n_bytes = drv_data->n_bytes;
252
253 while (drv_data->tx < drv_data->tx_end) {
254 write_TDBR(0);
255 while ((read_STAT() & BIT_STAT_TXS))
256 continue;
257 drv_data->tx += n_bytes;
258 }
259}
260
261static void null_reader(struct driver_data *drv_data)
262{
263 u8 n_bytes = drv_data->n_bytes;
264 dummy_read();
265
266 while (drv_data->rx < drv_data->rx_end) {
267 while (!(read_STAT() & BIT_STAT_RXS))
268 continue;
269 dummy_read();
270 drv_data->rx += n_bytes;
271 }
272}
273
274static void u8_writer(struct driver_data *drv_data)
275{
131b17d4 276 dev_dbg(&drv_data->pdev->dev,
88b40369 277 "cr8-s is 0x%x\n", read_STAT());
cc487e73 278
a5f6abd4
WB
279 while (drv_data->tx < drv_data->tx_end) {
280 write_TDBR(*(u8 *) (drv_data->tx));
281 while (read_STAT() & BIT_STAT_TXS)
282 continue;
283 ++drv_data->tx;
284 }
285
286 /* poll for SPI completion before returning */
287 while (!(read_STAT() & BIT_STAT_SPIF))
288 continue;
289}
290
291static void u8_cs_chg_writer(struct driver_data *drv_data)
292{
293 struct chip_data *chip = drv_data->cur_chip;
294
295 while (drv_data->tx < drv_data->tx_end) {
fad91c89 296 cs_active(chip);
a5f6abd4
WB
297
298 write_TDBR(*(u8 *) (drv_data->tx));
299 while (read_STAT() & BIT_STAT_TXS)
300 continue;
fad91c89 301 cs_deactive(chip);
5fec5b5a 302
a5f6abd4
WB
303 if (chip->cs_chg_udelay)
304 udelay(chip->cs_chg_udelay);
305 ++drv_data->tx;
306 }
5fec5b5a 307
cc487e73
SZ
308 /* poll for SPI completion before returning */
309 while (!(read_STAT() & BIT_STAT_SPIF))
310 continue;
a5f6abd4
WB
311}
312
313static void u8_reader(struct driver_data *drv_data)
314{
131b17d4 315 dev_dbg(&drv_data->pdev->dev,
88b40369 316 "cr-8 is 0x%x\n", read_STAT());
a5f6abd4
WB
317
318 /* clear TDBR buffer before read(else it will be shifted out) */
319 write_TDBR(0xFFFF);
320
321 dummy_read();
cc487e73 322
a5f6abd4
WB
323 while (drv_data->rx < drv_data->rx_end - 1) {
324 while (!(read_STAT() & BIT_STAT_RXS))
325 continue;
326 *(u8 *) (drv_data->rx) = read_RDBR();
327 ++drv_data->rx;
328 }
329
330 while (!(read_STAT() & BIT_STAT_RXS))
331 continue;
332 *(u8 *) (drv_data->rx) = read_SHAW();
333 ++drv_data->rx;
334}
335
336static void u8_cs_chg_reader(struct driver_data *drv_data)
337{
338 struct chip_data *chip = drv_data->cur_chip;
339
cc487e73
SZ
340 /* clear TDBR buffer before read(else it will be shifted out) */
341 write_TDBR(0xFFFF);
a5f6abd4 342
cc487e73
SZ
343 cs_active(chip);
344 dummy_read();
345
346 while (drv_data->rx < drv_data->rx_end - 1) {
fad91c89 347 cs_deactive(chip);
5fec5b5a 348
a5f6abd4
WB
349 if (chip->cs_chg_udelay)
350 udelay(chip->cs_chg_udelay);
cc487e73
SZ
351
352 while (!(read_STAT() & BIT_STAT_RXS))
353 continue;
354 cs_active(chip);
355 *(u8 *) (drv_data->rx) = read_RDBR();
a5f6abd4
WB
356 ++drv_data->rx;
357 }
fad91c89 358 cs_deactive(chip);
5fec5b5a 359
cc487e73
SZ
360 while (!(read_STAT() & BIT_STAT_RXS))
361 continue;
362 *(u8 *) (drv_data->rx) = read_SHAW();
363 ++drv_data->rx;
a5f6abd4
WB
364}
365
366static void u8_duplex(struct driver_data *drv_data)
367{
368 /* in duplex mode, clk is triggered by writing of TDBR */
369 while (drv_data->rx < drv_data->rx_end) {
370 write_TDBR(*(u8 *) (drv_data->tx));
cc487e73 371 while (read_STAT() & BIT_STAT_TXS)
a5f6abd4
WB
372 continue;
373 while (!(read_STAT() & BIT_STAT_RXS))
374 continue;
375 *(u8 *) (drv_data->rx) = read_RDBR();
376 ++drv_data->rx;
377 ++drv_data->tx;
378 }
cc487e73
SZ
379
380 /* poll for SPI completion before returning */
381 while (!(read_STAT() & BIT_STAT_SPIF))
382 continue;
a5f6abd4
WB
383}
384
385static void u8_cs_chg_duplex(struct driver_data *drv_data)
386{
387 struct chip_data *chip = drv_data->cur_chip;
388
389 while (drv_data->rx < drv_data->rx_end) {
fad91c89 390 cs_active(chip);
5fec5b5a 391
a5f6abd4 392 write_TDBR(*(u8 *) (drv_data->tx));
cc487e73 393 while (read_STAT() & BIT_STAT_TXS)
a5f6abd4
WB
394 continue;
395 while (!(read_STAT() & BIT_STAT_RXS))
396 continue;
397 *(u8 *) (drv_data->rx) = read_RDBR();
fad91c89 398 cs_deactive(chip);
5fec5b5a 399
a5f6abd4
WB
400 if (chip->cs_chg_udelay)
401 udelay(chip->cs_chg_udelay);
402 ++drv_data->rx;
403 ++drv_data->tx;
404 }
cc487e73
SZ
405
406 /* poll for SPI completion before returning */
407 while (!(read_STAT() & BIT_STAT_SPIF))
408 continue;
a5f6abd4
WB
409}
410
411static void u16_writer(struct driver_data *drv_data)
412{
131b17d4 413 dev_dbg(&drv_data->pdev->dev,
88b40369
BW
414 "cr16 is 0x%x\n", read_STAT());
415
a5f6abd4
WB
416 while (drv_data->tx < drv_data->tx_end) {
417 write_TDBR(*(u16 *) (drv_data->tx));
418 while ((read_STAT() & BIT_STAT_TXS))
419 continue;
420 drv_data->tx += 2;
421 }
422
423 /* poll for SPI completion before returning */
424 while (!(read_STAT() & BIT_STAT_SPIF))
425 continue;
426}
427
428static void u16_cs_chg_writer(struct driver_data *drv_data)
429{
430 struct chip_data *chip = drv_data->cur_chip;
431
432 while (drv_data->tx < drv_data->tx_end) {
fad91c89 433 cs_active(chip);
a5f6abd4
WB
434
435 write_TDBR(*(u16 *) (drv_data->tx));
436 while ((read_STAT() & BIT_STAT_TXS))
437 continue;
fad91c89 438 cs_deactive(chip);
5fec5b5a 439
a5f6abd4
WB
440 if (chip->cs_chg_udelay)
441 udelay(chip->cs_chg_udelay);
442 drv_data->tx += 2;
443 }
cc487e73
SZ
444
445 /* poll for SPI completion before returning */
446 while (!(read_STAT() & BIT_STAT_SPIF))
447 continue;
a5f6abd4
WB
448}
449
450static void u16_reader(struct driver_data *drv_data)
451{
88b40369
BW
452 dev_dbg(&drv_data->pdev->dev,
453 "cr-16 is 0x%x\n", read_STAT());
cc487e73
SZ
454
455 /* clear TDBR buffer before read(else it will be shifted out) */
456 write_TDBR(0xFFFF);
457
a5f6abd4
WB
458 dummy_read();
459
460 while (drv_data->rx < (drv_data->rx_end - 2)) {
461 while (!(read_STAT() & BIT_STAT_RXS))
462 continue;
463 *(u16 *) (drv_data->rx) = read_RDBR();
464 drv_data->rx += 2;
465 }
466
467 while (!(read_STAT() & BIT_STAT_RXS))
468 continue;
469 *(u16 *) (drv_data->rx) = read_SHAW();
470 drv_data->rx += 2;
471}
472
473static void u16_cs_chg_reader(struct driver_data *drv_data)
474{
475 struct chip_data *chip = drv_data->cur_chip;
476
cc487e73
SZ
477 /* clear TDBR buffer before read(else it will be shifted out) */
478 write_TDBR(0xFFFF);
a5f6abd4 479
cc487e73
SZ
480 cs_active(chip);
481 dummy_read();
482
483 while (drv_data->rx < drv_data->rx_end) {
fad91c89 484 cs_deactive(chip);
5fec5b5a 485
a5f6abd4
WB
486 if (chip->cs_chg_udelay)
487 udelay(chip->cs_chg_udelay);
cc487e73
SZ
488
489 while (!(read_STAT() & BIT_STAT_RXS))
490 continue;
491 cs_active(chip);
492 *(u16 *) (drv_data->rx) = read_RDBR();
a5f6abd4
WB
493 drv_data->rx += 2;
494 }
fad91c89 495 cs_deactive(chip);
cc487e73
SZ
496
497 while (!(read_STAT() & BIT_STAT_RXS))
498 continue;
499 *(u16 *) (drv_data->rx) = read_SHAW();
500 drv_data->rx += 2;
a5f6abd4
WB
501}
502
503static void u16_duplex(struct driver_data *drv_data)
504{
505 /* in duplex mode, clk is triggered by writing of TDBR */
506 while (drv_data->tx < drv_data->tx_end) {
507 write_TDBR(*(u16 *) (drv_data->tx));
cc487e73 508 while (read_STAT() & BIT_STAT_TXS)
a5f6abd4
WB
509 continue;
510 while (!(read_STAT() & BIT_STAT_RXS))
511 continue;
512 *(u16 *) (drv_data->rx) = read_RDBR();
513 drv_data->rx += 2;
514 drv_data->tx += 2;
515 }
cc487e73
SZ
516
517 /* poll for SPI completion before returning */
518 while (!(read_STAT() & BIT_STAT_SPIF))
519 continue;
a5f6abd4
WB
520}
521
522static void u16_cs_chg_duplex(struct driver_data *drv_data)
523{
524 struct chip_data *chip = drv_data->cur_chip;
525
526 while (drv_data->tx < drv_data->tx_end) {
fad91c89 527 cs_active(chip);
a5f6abd4
WB
528
529 write_TDBR(*(u16 *) (drv_data->tx));
cc487e73 530 while (read_STAT() & BIT_STAT_TXS)
a5f6abd4
WB
531 continue;
532 while (!(read_STAT() & BIT_STAT_RXS))
533 continue;
534 *(u16 *) (drv_data->rx) = read_RDBR();
fad91c89 535 cs_deactive(chip);
5fec5b5a 536
a5f6abd4
WB
537 if (chip->cs_chg_udelay)
538 udelay(chip->cs_chg_udelay);
539 drv_data->rx += 2;
540 drv_data->tx += 2;
541 }
cc487e73
SZ
542
543 /* poll for SPI completion before returning */
544 while (!(read_STAT() & BIT_STAT_SPIF))
545 continue;
a5f6abd4
WB
546}
547
548/* test if ther is more transfer to be done */
549static void *next_transfer(struct driver_data *drv_data)
550{
551 struct spi_message *msg = drv_data->cur_msg;
552 struct spi_transfer *trans = drv_data->cur_transfer;
553
554 /* Move to next transfer */
555 if (trans->transfer_list.next != &msg->transfers) {
556 drv_data->cur_transfer =
557 list_entry(trans->transfer_list.next,
558 struct spi_transfer, transfer_list);
559 return RUNNING_STATE;
560 } else
561 return DONE_STATE;
562}
563
564/*
565 * caller already set message->status;
566 * dma and pio irqs are blocked give finished message back
567 */
568static void giveback(struct driver_data *drv_data)
569{
fad91c89 570 struct chip_data *chip = drv_data->cur_chip;
a5f6abd4
WB
571 struct spi_transfer *last_transfer;
572 unsigned long flags;
573 struct spi_message *msg;
574
575 spin_lock_irqsave(&drv_data->lock, flags);
576 msg = drv_data->cur_msg;
577 drv_data->cur_msg = NULL;
578 drv_data->cur_transfer = NULL;
579 drv_data->cur_chip = NULL;
580 queue_work(drv_data->workqueue, &drv_data->pump_messages);
581 spin_unlock_irqrestore(&drv_data->lock, flags);
582
583 last_transfer = list_entry(msg->transfers.prev,
584 struct spi_transfer, transfer_list);
585
586 msg->state = NULL;
587
588 /* disable chip select signal. And not stop spi in autobuffer mode */
589 if (drv_data->tx_dma != 0xFFFF) {
fad91c89 590 cs_deactive(chip);
a5f6abd4
WB
591 bfin_spi_disable(drv_data);
592 }
593
fad91c89
BW
594 if (!drv_data->cs_change)
595 cs_deactive(chip);
596
a5f6abd4
WB
597 if (msg->complete)
598 msg->complete(msg->context);
599}
600
88b40369 601static irqreturn_t dma_irq_handler(int irq, void *dev_id)
a5f6abd4
WB
602{
603 struct driver_data *drv_data = (struct driver_data *)dev_id;
604 struct spi_message *msg = drv_data->cur_msg;
fad91c89 605 struct chip_data *chip = drv_data->cur_chip;
a5f6abd4 606
88b40369 607 dev_dbg(&drv_data->pdev->dev, "in dma_irq_handler\n");
a32c691d 608 clear_dma_irqstat(spi_dma_ch);
a5f6abd4 609
d6fe89b0 610 /* Wait for DMA to complete */
a32c691d 611 while (get_dma_curr_irqstat(spi_dma_ch) & DMA_RUN)
d6fe89b0
BW
612 continue;
613
a5f6abd4 614 /*
d6fe89b0
BW
615 * wait for the last transaction shifted out. HRM states:
616 * at this point there may still be data in the SPI DMA FIFO waiting
617 * to be transmitted ... software needs to poll TXS in the SPI_STAT
618 * register until it goes low for 2 successive reads
a5f6abd4
WB
619 */
620 if (drv_data->tx != NULL) {
a32c691d
BW
621 while ((read_STAT() & TXS) ||
622 (read_STAT() & TXS))
a5f6abd4
WB
623 continue;
624 }
625
a32c691d 626 while (!(read_STAT() & SPIF))
a5f6abd4
WB
627 continue;
628
a5f6abd4
WB
629 msg->actual_length += drv_data->len_in_bytes;
630
fad91c89
BW
631 if (drv_data->cs_change)
632 cs_deactive(chip);
633
a5f6abd4
WB
634 /* Move to next transfer */
635 msg->state = next_transfer(drv_data);
636
637 /* Schedule transfer tasklet */
638 tasklet_schedule(&drv_data->pump_transfers);
639
640 /* free the irq handler before next transfer */
88b40369
BW
641 dev_dbg(&drv_data->pdev->dev,
642 "disable dma channel irq%d\n",
a32c691d
BW
643 spi_dma_ch);
644 dma_disable_irq(spi_dma_ch);
a5f6abd4
WB
645
646 return IRQ_HANDLED;
647}
648
649static void pump_transfers(unsigned long data)
650{
651 struct driver_data *drv_data = (struct driver_data *)data;
652 struct spi_message *message = NULL;
653 struct spi_transfer *transfer = NULL;
654 struct spi_transfer *previous = NULL;
655 struct chip_data *chip = NULL;
88b40369
BW
656 u8 width;
657 u16 cr, dma_width, dma_config;
a5f6abd4
WB
658 u32 tranf_success = 1;
659
660 /* Get current state information */
661 message = drv_data->cur_msg;
662 transfer = drv_data->cur_transfer;
663 chip = drv_data->cur_chip;
a5f6abd4
WB
664 /*
665 * if msg is error or done, report it back using complete() callback
666 */
667
668 /* Handle for abort */
669 if (message->state == ERROR_STATE) {
670 message->status = -EIO;
671 giveback(drv_data);
672 return;
673 }
674
675 /* Handle end of message */
676 if (message->state == DONE_STATE) {
677 message->status = 0;
678 giveback(drv_data);
679 return;
680 }
681
682 /* Delay if requested at end of transfer */
683 if (message->state == RUNNING_STATE) {
684 previous = list_entry(transfer->transfer_list.prev,
685 struct spi_transfer, transfer_list);
686 if (previous->delay_usecs)
687 udelay(previous->delay_usecs);
688 }
689
690 /* Setup the transfer state based on the type of transfer */
691 if (flush(drv_data) == 0) {
692 dev_err(&drv_data->pdev->dev, "pump_transfers: flush failed\n");
693 message->status = -EIO;
694 giveback(drv_data);
695 return;
696 }
697
698 if (transfer->tx_buf != NULL) {
699 drv_data->tx = (void *)transfer->tx_buf;
700 drv_data->tx_end = drv_data->tx + transfer->len;
88b40369
BW
701 dev_dbg(&drv_data->pdev->dev, "tx_buf is %p, tx_end is %p\n",
702 transfer->tx_buf, drv_data->tx_end);
a5f6abd4
WB
703 } else {
704 drv_data->tx = NULL;
705 }
706
707 if (transfer->rx_buf != NULL) {
708 drv_data->rx = transfer->rx_buf;
709 drv_data->rx_end = drv_data->rx + transfer->len;
88b40369
BW
710 dev_dbg(&drv_data->pdev->dev, "rx_buf is %p, rx_end is %p\n",
711 transfer->rx_buf, drv_data->rx_end);
a5f6abd4
WB
712 } else {
713 drv_data->rx = NULL;
714 }
715
716 drv_data->rx_dma = transfer->rx_dma;
717 drv_data->tx_dma = transfer->tx_dma;
718 drv_data->len_in_bytes = transfer->len;
fad91c89 719 drv_data->cs_change = transfer->cs_change;
a5f6abd4
WB
720
721 width = chip->width;
722 if (width == CFG_SPI_WORDSIZE16) {
723 drv_data->len = (transfer->len) >> 1;
724 } else {
725 drv_data->len = transfer->len;
726 }
727 drv_data->write = drv_data->tx ? chip->write : null_writer;
728 drv_data->read = drv_data->rx ? chip->read : null_reader;
729 drv_data->duplex = chip->duplex ? chip->duplex : null_writer;
131b17d4
BW
730 dev_dbg(&drv_data->pdev->dev, "transfer: ",
731 "drv_data->write is %p, chip->write is %p, null_wr is %p\n",
732 drv_data->write, chip->write, null_writer);
a5f6abd4
WB
733
734 /* speed and width has been set on per message */
735 message->state = RUNNING_STATE;
736 dma_config = 0;
737
cc487e73
SZ
738 write_STAT(BIT_STAT_CLR);
739 cr = (read_CTRL() & (~BIT_CTL_TIMOD));
fad91c89 740 cs_active(chip);
a5f6abd4 741
88b40369
BW
742 dev_dbg(&drv_data->pdev->dev,
743 "now pumping a transfer: width is %d, len is %d\n",
744 width, transfer->len);
a5f6abd4
WB
745
746 /*
747 * Try to map dma buffer and do a dma transfer if
748 * successful use different way to r/w according to
749 * drv_data->cur_chip->enable_dma
750 */
751 if (drv_data->cur_chip->enable_dma && drv_data->len > 6) {
752
a32c691d
BW
753 disable_dma(spi_dma_ch);
754 clear_dma_irqstat(spi_dma_ch);
a5f6abd4
WB
755
756 /* config dma channel */
88b40369 757 dev_dbg(&drv_data->pdev->dev, "doing dma transfer\n");
a5f6abd4 758 if (width == CFG_SPI_WORDSIZE16) {
a32c691d
BW
759 set_dma_x_count(spi_dma_ch, drv_data->len);
760 set_dma_x_modify(spi_dma_ch, 2);
a5f6abd4
WB
761 dma_width = WDSIZE_16;
762 } else {
a32c691d
BW
763 set_dma_x_count(spi_dma_ch, drv_data->len);
764 set_dma_x_modify(spi_dma_ch, 1);
a5f6abd4
WB
765 dma_width = WDSIZE_8;
766 }
767
a5f6abd4
WB
768 /* dirty hack for autobuffer DMA mode */
769 if (drv_data->tx_dma == 0xFFFF) {
88b40369
BW
770 dev_dbg(&drv_data->pdev->dev,
771 "doing autobuffer DMA out.\n");
a5f6abd4 772
cc487e73
SZ
773 /* set SPI transfer mode */
774 write_CTRL(cr | CFG_SPI_DMAWRITE);
775
a5f6abd4
WB
776 /* no irq in autobuffer mode */
777 dma_config =
778 (DMAFLOW_AUTO | RESTART | dma_width | DI_EN);
a32c691d
BW
779 set_dma_config(spi_dma_ch, dma_config);
780 set_dma_start_addr(spi_dma_ch,
781 (unsigned long)drv_data->tx);
782 enable_dma(spi_dma_ch);
a5f6abd4
WB
783
784 /* just return here, there can only be one transfer in this mode */
785 message->status = 0;
786 giveback(drv_data);
787 return;
788 }
789
790 /* In dma mode, rx or tx must be NULL in one transfer */
791 if (drv_data->rx != NULL) {
792 /* set transfer mode, and enable SPI */
88b40369 793 dev_dbg(&drv_data->pdev->dev, "doing DMA in.\n");
a5f6abd4 794
cc487e73
SZ
795 /* set SPI transfer mode */
796 write_CTRL(cr | CFG_SPI_DMAREAD);
a5f6abd4
WB
797
798 /* clear tx reg soformer data is not shifted out */
cc487e73 799 write_TDBR(0xFFFF);
a5f6abd4 800
a32c691d 801 set_dma_x_count(spi_dma_ch, drv_data->len);
a5f6abd4
WB
802
803 /* start dma */
a32c691d 804 dma_enable_irq(spi_dma_ch);
a5f6abd4 805 dma_config = (WNR | RESTART | dma_width | DI_EN);
a32c691d
BW
806 set_dma_config(spi_dma_ch, dma_config);
807 set_dma_start_addr(spi_dma_ch,
808 (unsigned long)drv_data->rx);
809 enable_dma(spi_dma_ch);
a5f6abd4 810
a5f6abd4 811 } else if (drv_data->tx != NULL) {
88b40369 812 dev_dbg(&drv_data->pdev->dev, "doing DMA out.\n");
a5f6abd4 813
cc487e73
SZ
814 /* set SPI transfer mode */
815 write_CTRL(cr | CFG_SPI_DMAWRITE);
816
a5f6abd4 817 /* start dma */
a32c691d 818 dma_enable_irq(spi_dma_ch);
a5f6abd4 819 dma_config = (RESTART | dma_width | DI_EN);
a32c691d
BW
820 set_dma_config(spi_dma_ch, dma_config);
821 set_dma_start_addr(spi_dma_ch,
822 (unsigned long)drv_data->tx);
823 enable_dma(spi_dma_ch);
a5f6abd4
WB
824 }
825 } else {
826 /* IO mode write then read */
88b40369 827 dev_dbg(&drv_data->pdev->dev, "doing IO transfer\n");
a5f6abd4 828
a5f6abd4
WB
829 if (drv_data->tx != NULL && drv_data->rx != NULL) {
830 /* full duplex mode */
831 BUG_ON((drv_data->tx_end - drv_data->tx) !=
832 (drv_data->rx_end - drv_data->rx));
88b40369
BW
833 dev_dbg(&drv_data->pdev->dev,
834 "IO duplex: cr is 0x%x\n", cr);
a5f6abd4 835
cc487e73
SZ
836 /* set SPI transfer mode */
837 write_CTRL(cr | CFG_SPI_WRITE);
a5f6abd4
WB
838
839 drv_data->duplex(drv_data);
840
841 if (drv_data->tx != drv_data->tx_end)
842 tranf_success = 0;
843 } else if (drv_data->tx != NULL) {
844 /* write only half duplex */
131b17d4 845 dev_dbg(&drv_data->pdev->dev,
88b40369 846 "IO write: cr is 0x%x\n", cr);
a5f6abd4 847
cc487e73
SZ
848 /* set SPI transfer mode */
849 write_CTRL(cr | CFG_SPI_WRITE);
a5f6abd4
WB
850
851 drv_data->write(drv_data);
852
853 if (drv_data->tx != drv_data->tx_end)
854 tranf_success = 0;
855 } else if (drv_data->rx != NULL) {
856 /* read only half duplex */
131b17d4 857 dev_dbg(&drv_data->pdev->dev,
88b40369 858 "IO read: cr is 0x%x\n", cr);
a5f6abd4 859
cc487e73
SZ
860 /* set SPI transfer mode */
861 write_CTRL(cr | CFG_SPI_READ);
a5f6abd4
WB
862
863 drv_data->read(drv_data);
864 if (drv_data->rx != drv_data->rx_end)
865 tranf_success = 0;
866 }
867
868 if (!tranf_success) {
131b17d4 869 dev_dbg(&drv_data->pdev->dev,
88b40369 870 "IO write error!\n");
a5f6abd4
WB
871 message->state = ERROR_STATE;
872 } else {
873 /* Update total byte transfered */
874 message->actual_length += drv_data->len;
875
876 /* Move to next transfer of this msg */
877 message->state = next_transfer(drv_data);
878 }
879
880 /* Schedule next transfer tasklet */
881 tasklet_schedule(&drv_data->pump_transfers);
882
883 }
884}
885
886/* pop a msg from queue and kick off real transfer */
887static void pump_messages(struct work_struct *work)
888{
131b17d4 889 struct driver_data *drv_data;
a5f6abd4
WB
890 unsigned long flags;
891
131b17d4
BW
892 drv_data = container_of(work, struct driver_data, pump_messages);
893
a5f6abd4
WB
894 /* Lock queue and check for queue work */
895 spin_lock_irqsave(&drv_data->lock, flags);
896 if (list_empty(&drv_data->queue) || drv_data->run == QUEUE_STOPPED) {
897 /* pumper kicked off but no work to do */
898 drv_data->busy = 0;
899 spin_unlock_irqrestore(&drv_data->lock, flags);
900 return;
901 }
902
903 /* Make sure we are not already running a message */
904 if (drv_data->cur_msg) {
905 spin_unlock_irqrestore(&drv_data->lock, flags);
906 return;
907 }
908
909 /* Extract head of queue */
910 drv_data->cur_msg = list_entry(drv_data->queue.next,
911 struct spi_message, queue);
5fec5b5a
BW
912
913 /* Setup the SSP using the per chip configuration */
914 drv_data->cur_chip = spi_get_ctldata(drv_data->cur_msg->spi);
915 if (restore_state(drv_data)) {
916 spin_unlock_irqrestore(&drv_data->lock, flags);
917 return;
918 };
919
a5f6abd4
WB
920 list_del_init(&drv_data->cur_msg->queue);
921
922 /* Initial message state */
923 drv_data->cur_msg->state = START_STATE;
924 drv_data->cur_transfer = list_entry(drv_data->cur_msg->transfers.next,
925 struct spi_transfer, transfer_list);
926
5fec5b5a
BW
927 dev_dbg(&drv_data->pdev->dev, "got a message to pump, "
928 "state is set to: baud %d, flag 0x%x, ctl 0x%x\n",
929 drv_data->cur_chip->baud, drv_data->cur_chip->flag,
930 drv_data->cur_chip->ctl_reg);
131b17d4
BW
931
932 dev_dbg(&drv_data->pdev->dev,
88b40369
BW
933 "the first transfer len is %d\n",
934 drv_data->cur_transfer->len);
a5f6abd4
WB
935
936 /* Mark as busy and launch transfers */
937 tasklet_schedule(&drv_data->pump_transfers);
938
939 drv_data->busy = 1;
940 spin_unlock_irqrestore(&drv_data->lock, flags);
941}
942
943/*
944 * got a msg to transfer, queue it in drv_data->queue.
945 * And kick off message pumper
946 */
947static int transfer(struct spi_device *spi, struct spi_message *msg)
948{
949 struct driver_data *drv_data = spi_master_get_devdata(spi->master);
950 unsigned long flags;
951
952 spin_lock_irqsave(&drv_data->lock, flags);
953
954 if (drv_data->run == QUEUE_STOPPED) {
955 spin_unlock_irqrestore(&drv_data->lock, flags);
956 return -ESHUTDOWN;
957 }
958
959 msg->actual_length = 0;
960 msg->status = -EINPROGRESS;
961 msg->state = START_STATE;
962
88b40369 963 dev_dbg(&spi->dev, "adding an msg in transfer() \n");
a5f6abd4
WB
964 list_add_tail(&msg->queue, &drv_data->queue);
965
966 if (drv_data->run == QUEUE_RUNNING && !drv_data->busy)
967 queue_work(drv_data->workqueue, &drv_data->pump_messages);
968
969 spin_unlock_irqrestore(&drv_data->lock, flags);
970
971 return 0;
972}
973
12e17c42
SZ
974#define MAX_SPI_SSEL 7
975
976static u16 ssel[3][MAX_SPI_SSEL] = {
977 {P_SPI0_SSEL1, P_SPI0_SSEL2, P_SPI0_SSEL3,
978 P_SPI0_SSEL4, P_SPI0_SSEL5,
979 P_SPI0_SSEL6, P_SPI0_SSEL7},
980
981 {P_SPI1_SSEL1, P_SPI1_SSEL2, P_SPI1_SSEL3,
982 P_SPI1_SSEL4, P_SPI1_SSEL5,
983 P_SPI1_SSEL6, P_SPI1_SSEL7},
984
985 {P_SPI2_SSEL1, P_SPI2_SSEL2, P_SPI2_SSEL3,
986 P_SPI2_SSEL4, P_SPI2_SSEL5,
987 P_SPI2_SSEL6, P_SPI2_SSEL7},
988};
989
a5f6abd4
WB
990/* first setup for new devices */
991static int setup(struct spi_device *spi)
992{
993 struct bfin5xx_spi_chip *chip_info = NULL;
994 struct chip_data *chip;
995 struct driver_data *drv_data = spi_master_get_devdata(spi->master);
996 u8 spi_flg;
997
998 /* Abort device setup if requested features are not supported */
999 if (spi->mode & ~(SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST)) {
1000 dev_err(&spi->dev, "requested mode not fully supported\n");
1001 return -EINVAL;
1002 }
1003
1004 /* Zero (the default) here means 8 bits */
1005 if (!spi->bits_per_word)
1006 spi->bits_per_word = 8;
1007
1008 if (spi->bits_per_word != 8 && spi->bits_per_word != 16)
1009 return -EINVAL;
1010
1011 /* Only alloc (or use chip_info) on first setup */
1012 chip = spi_get_ctldata(spi);
1013 if (chip == NULL) {
1014 chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL);
1015 if (!chip)
1016 return -ENOMEM;
1017
1018 chip->enable_dma = 0;
1019 chip_info = spi->controller_data;
1020 }
1021
1022 /* chip_info isn't always needed */
1023 if (chip_info) {
2ed35516
MF
1024 /* Make sure people stop trying to set fields via ctl_reg
1025 * when they should actually be using common SPI framework.
1026 * Currently we let through: WOM EMISO PSSE GM SZ TIMOD.
1027 * Not sure if a user actually needs/uses any of these,
1028 * but let's assume (for now) they do.
1029 */
1030 if (chip_info->ctl_reg & (SPE|MSTR|CPOL|CPHA|LSBF|SIZE)) {
1031 dev_err(&spi->dev, "do not set bits in ctl_reg "
1032 "that the SPI framework manages\n");
1033 return -EINVAL;
1034 }
1035
a5f6abd4
WB
1036 chip->enable_dma = chip_info->enable_dma != 0
1037 && drv_data->master_info->enable_dma;
1038 chip->ctl_reg = chip_info->ctl_reg;
1039 chip->bits_per_word = chip_info->bits_per_word;
1040 chip->cs_change_per_word = chip_info->cs_change_per_word;
1041 chip->cs_chg_udelay = chip_info->cs_chg_udelay;
1042 }
1043
1044 /* translate common spi framework into our register */
1045 if (spi->mode & SPI_CPOL)
1046 chip->ctl_reg |= CPOL;
1047 if (spi->mode & SPI_CPHA)
1048 chip->ctl_reg |= CPHA;
1049 if (spi->mode & SPI_LSB_FIRST)
1050 chip->ctl_reg |= LSBF;
1051 /* we dont support running in slave mode (yet?) */
1052 chip->ctl_reg |= MSTR;
1053
1054 /*
1055 * if any one SPI chip is registered and wants DMA, request the
1056 * DMA channel for it
1057 */
1058 if (chip->enable_dma && !dma_requested) {
1059 /* register dma irq handler */
a32c691d 1060 if (request_dma(spi_dma_ch, "BF53x_SPI_DMA") < 0) {
88b40369
BW
1061 dev_dbg(&spi->dev,
1062 "Unable to request BlackFin SPI DMA channel\n");
a5f6abd4
WB
1063 return -ENODEV;
1064 }
a32c691d
BW
1065 if (set_dma_callback(spi_dma_ch, (void *)dma_irq_handler,
1066 drv_data) < 0) {
88b40369 1067 dev_dbg(&spi->dev, "Unable to set dma callback\n");
a5f6abd4
WB
1068 return -EPERM;
1069 }
a32c691d 1070 dma_disable_irq(spi_dma_ch);
a5f6abd4
WB
1071 dma_requested = 1;
1072 }
1073
1074 /*
1075 * Notice: for blackfin, the speed_hz is the value of register
1076 * SPI_BAUD, not the real baudrate
1077 */
1078 chip->baud = hz_to_spi_baud(spi->max_speed_hz);
1079 spi_flg = ~(1 << (spi->chip_select));
1080 chip->flag = ((u16) spi_flg << 8) | (1 << (spi->chip_select));
1081 chip->chip_select_num = spi->chip_select;
1082
1083 switch (chip->bits_per_word) {
1084 case 8:
1085 chip->n_bytes = 1;
1086 chip->width = CFG_SPI_WORDSIZE8;
1087 chip->read = chip->cs_change_per_word ?
1088 u8_cs_chg_reader : u8_reader;
1089 chip->write = chip->cs_change_per_word ?
1090 u8_cs_chg_writer : u8_writer;
1091 chip->duplex = chip->cs_change_per_word ?
1092 u8_cs_chg_duplex : u8_duplex;
1093 break;
1094
1095 case 16:
1096 chip->n_bytes = 2;
1097 chip->width = CFG_SPI_WORDSIZE16;
1098 chip->read = chip->cs_change_per_word ?
1099 u16_cs_chg_reader : u16_reader;
1100 chip->write = chip->cs_change_per_word ?
1101 u16_cs_chg_writer : u16_writer;
1102 chip->duplex = chip->cs_change_per_word ?
1103 u16_cs_chg_duplex : u16_duplex;
1104 break;
1105
1106 default:
1107 dev_err(&spi->dev, "%d bits_per_word is not supported\n",
1108 chip->bits_per_word);
1109 kfree(chip);
1110 return -ENODEV;
1111 }
1112
898eb71c 1113 dev_dbg(&spi->dev, "setup spi chip %s, width is %d, dma is %d\n",
a5f6abd4 1114 spi->modalias, chip->width, chip->enable_dma);
88b40369 1115 dev_dbg(&spi->dev, "ctl_reg is 0x%x, flag_reg is 0x%x\n",
a5f6abd4
WB
1116 chip->ctl_reg, chip->flag);
1117
1118 spi_set_ctldata(spi, chip);
1119
12e17c42
SZ
1120 dev_dbg(&spi->dev, "chip select number is %d\n", chip->chip_select_num);
1121 if ((chip->chip_select_num > 0)
1122 && (chip->chip_select_num <= spi->master->num_chipselect))
1123 peripheral_request(ssel[spi->master->bus_num]
1124 [chip->chip_select_num-1], DRV_NAME);
1125
a5f6abd4
WB
1126 return 0;
1127}
1128
1129/*
1130 * callback for spi framework.
1131 * clean driver specific data
1132 */
88b40369 1133static void cleanup(struct spi_device *spi)
a5f6abd4 1134{
27bb9e79 1135 struct chip_data *chip = spi_get_ctldata(spi);
a5f6abd4 1136
12e17c42
SZ
1137 if ((chip->chip_select_num > 0)
1138 && (chip->chip_select_num <= spi->master->num_chipselect))
1139 peripheral_free(ssel[spi->master->bus_num]
1140 [chip->chip_select_num-1]);
1141
a5f6abd4
WB
1142 kfree(chip);
1143}
1144
1145static inline int init_queue(struct driver_data *drv_data)
1146{
1147 INIT_LIST_HEAD(&drv_data->queue);
1148 spin_lock_init(&drv_data->lock);
1149
1150 drv_data->run = QUEUE_STOPPED;
1151 drv_data->busy = 0;
1152
1153 /* init transfer tasklet */
1154 tasklet_init(&drv_data->pump_transfers,
1155 pump_transfers, (unsigned long)drv_data);
1156
1157 /* init messages workqueue */
1158 INIT_WORK(&drv_data->pump_messages, pump_messages);
1159 drv_data->workqueue =
49dce689 1160 create_singlethread_workqueue(drv_data->master->dev.parent->bus_id);
a5f6abd4
WB
1161 if (drv_data->workqueue == NULL)
1162 return -EBUSY;
1163
1164 return 0;
1165}
1166
1167static inline int start_queue(struct driver_data *drv_data)
1168{
1169 unsigned long flags;
1170
1171 spin_lock_irqsave(&drv_data->lock, flags);
1172
1173 if (drv_data->run == QUEUE_RUNNING || drv_data->busy) {
1174 spin_unlock_irqrestore(&drv_data->lock, flags);
1175 return -EBUSY;
1176 }
1177
1178 drv_data->run = QUEUE_RUNNING;
1179 drv_data->cur_msg = NULL;
1180 drv_data->cur_transfer = NULL;
1181 drv_data->cur_chip = NULL;
1182 spin_unlock_irqrestore(&drv_data->lock, flags);
1183
1184 queue_work(drv_data->workqueue, &drv_data->pump_messages);
1185
1186 return 0;
1187}
1188
1189static inline int stop_queue(struct driver_data *drv_data)
1190{
1191 unsigned long flags;
1192 unsigned limit = 500;
1193 int status = 0;
1194
1195 spin_lock_irqsave(&drv_data->lock, flags);
1196
1197 /*
1198 * This is a bit lame, but is optimized for the common execution path.
1199 * A wait_queue on the drv_data->busy could be used, but then the common
1200 * execution path (pump_messages) would be required to call wake_up or
1201 * friends on every SPI message. Do this instead
1202 */
1203 drv_data->run = QUEUE_STOPPED;
1204 while (!list_empty(&drv_data->queue) && drv_data->busy && limit--) {
1205 spin_unlock_irqrestore(&drv_data->lock, flags);
1206 msleep(10);
1207 spin_lock_irqsave(&drv_data->lock, flags);
1208 }
1209
1210 if (!list_empty(&drv_data->queue) || drv_data->busy)
1211 status = -EBUSY;
1212
1213 spin_unlock_irqrestore(&drv_data->lock, flags);
1214
1215 return status;
1216}
1217
1218static inline int destroy_queue(struct driver_data *drv_data)
1219{
1220 int status;
1221
1222 status = stop_queue(drv_data);
1223 if (status != 0)
1224 return status;
1225
1226 destroy_workqueue(drv_data->workqueue);
1227
1228 return 0;
1229}
1230
7c4ef094 1231static int setup_pin_mux(int action, int bus_num)
cc2f81a6
MH
1232{
1233
7c4ef094
SZ
1234 u16 pin_req[3][4] = {
1235 {P_SPI0_SCK, P_SPI0_MISO, P_SPI0_MOSI, 0},
1236 {P_SPI1_SCK, P_SPI1_MISO, P_SPI1_MOSI, 0},
1237 {P_SPI2_SCK, P_SPI2_MISO, P_SPI2_MOSI, 0},
1238 };
cc2f81a6
MH
1239
1240 if (action) {
7c4ef094 1241 if (peripheral_request_list(pin_req[bus_num], DRV_NAME))
cc2f81a6
MH
1242 return -EFAULT;
1243 } else {
7c4ef094 1244 peripheral_free_list(pin_req[bus_num]);
cc2f81a6
MH
1245 }
1246
1247 return 0;
1248}
1249
a5f6abd4
WB
1250static int __init bfin5xx_spi_probe(struct platform_device *pdev)
1251{
1252 struct device *dev = &pdev->dev;
1253 struct bfin5xx_spi_master *platform_info;
1254 struct spi_master *master;
1255 struct driver_data *drv_data = 0;
a32c691d 1256 struct resource *res;
a5f6abd4
WB
1257 int status = 0;
1258
1259 platform_info = dev->platform_data;
1260
1261 /* Allocate master with space for drv_data */
1262 master = spi_alloc_master(dev, sizeof(struct driver_data) + 16);
1263 if (!master) {
1264 dev_err(&pdev->dev, "can not alloc spi_master\n");
1265 return -ENOMEM;
1266 }
131b17d4 1267
a5f6abd4
WB
1268 drv_data = spi_master_get_devdata(master);
1269 drv_data->master = master;
1270 drv_data->master_info = platform_info;
1271 drv_data->pdev = pdev;
1272
1273 master->bus_num = pdev->id;
1274 master->num_chipselect = platform_info->num_chipselect;
1275 master->cleanup = cleanup;
1276 master->setup = setup;
1277 master->transfer = transfer;
1278
a32c691d
BW
1279 /* Find and map our resources */
1280 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1281 if (res == NULL) {
1282 dev_err(dev, "Cannot get IORESOURCE_MEM\n");
1283 status = -ENOENT;
1284 goto out_error_get_res;
1285 }
1286
1287 spi_regs_base = (u32) ioremap(res->start, (res->end - res->start)+1);
1288 if (!spi_regs_base) {
1289 dev_err(dev, "Cannot map IO\n");
1290 status = -ENXIO;
1291 goto out_error_ioremap;
1292 }
1293
1294 spi_dma_ch = platform_get_irq(pdev, 0);
1295 if (spi_dma_ch < 0) {
1296 dev_err(dev, "No DMA channel specified\n");
1297 status = -ENOENT;
1298 goto out_error_no_dma_ch;
1299 }
1300
a5f6abd4
WB
1301 /* Initial and start queue */
1302 status = init_queue(drv_data);
1303 if (status != 0) {
a32c691d 1304 dev_err(dev, "problem initializing queue\n");
a5f6abd4
WB
1305 goto out_error_queue_alloc;
1306 }
a32c691d 1307
a5f6abd4
WB
1308 status = start_queue(drv_data);
1309 if (status != 0) {
a32c691d 1310 dev_err(dev, "problem starting queue\n");
a5f6abd4
WB
1311 goto out_error_queue_alloc;
1312 }
1313
1314 /* Register with the SPI framework */
1315 platform_set_drvdata(pdev, drv_data);
1316 status = spi_register_master(master);
1317 if (status != 0) {
a32c691d 1318 dev_err(dev, "problem registering spi master\n");
a5f6abd4
WB
1319 goto out_error_queue_alloc;
1320 }
a32c691d 1321
7c4ef094
SZ
1322 if (setup_pin_mux(1, master->bus_num)) {
1323 dev_err(&pdev->dev, ": Requesting Peripherals failed\n");
1324 goto out_error;
1325 }
1326
a32c691d
BW
1327 dev_info(dev, "%s, Version %s, regs_base @ 0x%08x\n",
1328 DRV_DESC, DRV_VERSION, spi_regs_base);
a5f6abd4
WB
1329 return status;
1330
cc2f81a6 1331out_error_queue_alloc:
a5f6abd4 1332 destroy_queue(drv_data);
a32c691d
BW
1333out_error_no_dma_ch:
1334 iounmap((void *) spi_regs_base);
1335out_error_ioremap:
1336out_error_get_res:
cc2f81a6 1337out_error:
a5f6abd4 1338 spi_master_put(master);
cc2f81a6 1339
a5f6abd4
WB
1340 return status;
1341}
1342
1343/* stop hardware and remove the driver */
1344static int __devexit bfin5xx_spi_remove(struct platform_device *pdev)
1345{
1346 struct driver_data *drv_data = platform_get_drvdata(pdev);
1347 int status = 0;
1348
1349 if (!drv_data)
1350 return 0;
1351
1352 /* Remove the queue */
1353 status = destroy_queue(drv_data);
1354 if (status != 0)
1355 return status;
1356
1357 /* Disable the SSP at the peripheral and SOC level */
1358 bfin_spi_disable(drv_data);
1359
1360 /* Release DMA */
1361 if (drv_data->master_info->enable_dma) {
a32c691d
BW
1362 if (dma_channel_active(spi_dma_ch))
1363 free_dma(spi_dma_ch);
a5f6abd4
WB
1364 }
1365
1366 /* Disconnect from the SPI framework */
1367 spi_unregister_master(drv_data->master);
1368
7c4ef094 1369 setup_pin_mux(0, drv_data->master->bus_num);
cc2f81a6 1370
a5f6abd4
WB
1371 /* Prevent double remove */
1372 platform_set_drvdata(pdev, NULL);
1373
1374 return 0;
1375}
1376
1377#ifdef CONFIG_PM
1378static int bfin5xx_spi_suspend(struct platform_device *pdev, pm_message_t state)
1379{
1380 struct driver_data *drv_data = platform_get_drvdata(pdev);
1381 int status = 0;
1382
1383 status = stop_queue(drv_data);
1384 if (status != 0)
1385 return status;
1386
1387 /* stop hardware */
1388 bfin_spi_disable(drv_data);
1389
1390 return 0;
1391}
1392
1393static int bfin5xx_spi_resume(struct platform_device *pdev)
1394{
1395 struct driver_data *drv_data = platform_get_drvdata(pdev);
1396 int status = 0;
1397
1398 /* Enable the SPI interface */
1399 bfin_spi_enable(drv_data);
1400
1401 /* Start the queue running */
1402 status = start_queue(drv_data);
1403 if (status != 0) {
1404 dev_err(&pdev->dev, "problem starting queue (%d)\n", status);
1405 return status;
1406 }
1407
1408 return 0;
1409}
1410#else
1411#define bfin5xx_spi_suspend NULL
1412#define bfin5xx_spi_resume NULL
1413#endif /* CONFIG_PM */
1414
fc3ba952 1415MODULE_ALIAS("bfin-spi-master"); /* for platform bus hotplug */
a5f6abd4 1416static struct platform_driver bfin5xx_spi_driver = {
fc3ba952 1417 .driver = {
a32c691d 1418 .name = DRV_NAME,
88b40369
BW
1419 .owner = THIS_MODULE,
1420 },
1421 .suspend = bfin5xx_spi_suspend,
1422 .resume = bfin5xx_spi_resume,
1423 .remove = __devexit_p(bfin5xx_spi_remove),
a5f6abd4
WB
1424};
1425
1426static int __init bfin5xx_spi_init(void)
1427{
88b40369 1428 return platform_driver_probe(&bfin5xx_spi_driver, bfin5xx_spi_probe);
a5f6abd4 1429}
a5f6abd4
WB
1430module_init(bfin5xx_spi_init);
1431
1432static void __exit bfin5xx_spi_exit(void)
1433{
1434 platform_driver_unregister(&bfin5xx_spi_driver);
1435}
a5f6abd4 1436module_exit(bfin5xx_spi_exit);
This page took 0.164321 seconds and 5 git commands to generate.