Merge branch 'misc' of git://git.kernel.org/pub/scm/linux/kernel/git/mmarek/kbuild
[deliverable/linux.git] / drivers / spi / spi-img-spfi.c
CommitLineData
deba2580
AB
1/*
2 * IMG SPFI controller driver
3 *
4 * Copyright (C) 2007,2008,2013 Imagination Technologies Ltd.
5 * Copyright (C) 2014 Google, Inc.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 */
11
12#include <linux/clk.h>
13#include <linux/delay.h>
14#include <linux/dmaengine.h>
8c2c8c03 15#include <linux/gpio.h>
deba2580
AB
16#include <linux/interrupt.h>
17#include <linux/io.h>
18#include <linux/irq.h>
19#include <linux/module.h>
20#include <linux/of.h>
21#include <linux/platform_device.h>
22#include <linux/pm_runtime.h>
23#include <linux/scatterlist.h>
24#include <linux/slab.h>
25#include <linux/spi/spi.h>
26#include <linux/spinlock.h>
27
28#define SPFI_DEVICE_PARAMETER(x) (0x00 + 0x4 * (x))
29#define SPFI_DEVICE_PARAMETER_BITCLK_SHIFT 24
30#define SPFI_DEVICE_PARAMETER_BITCLK_MASK 0xff
31#define SPFI_DEVICE_PARAMETER_CSSETUP_SHIFT 16
32#define SPFI_DEVICE_PARAMETER_CSSETUP_MASK 0xff
33#define SPFI_DEVICE_PARAMETER_CSHOLD_SHIFT 8
34#define SPFI_DEVICE_PARAMETER_CSHOLD_MASK 0xff
35#define SPFI_DEVICE_PARAMETER_CSDELAY_SHIFT 0
36#define SPFI_DEVICE_PARAMETER_CSDELAY_MASK 0xff
37
38#define SPFI_CONTROL 0x14
39#define SPFI_CONTROL_CONTINUE BIT(12)
40#define SPFI_CONTROL_SOFT_RESET BIT(11)
41#define SPFI_CONTROL_SEND_DMA BIT(10)
42#define SPFI_CONTROL_GET_DMA BIT(9)
43#define SPFI_CONTROL_TMODE_SHIFT 5
44#define SPFI_CONTROL_TMODE_MASK 0x7
45#define SPFI_CONTROL_TMODE_SINGLE 0
46#define SPFI_CONTROL_TMODE_DUAL 1
47#define SPFI_CONTROL_TMODE_QUAD 2
48#define SPFI_CONTROL_SPFI_EN BIT(0)
49
50#define SPFI_TRANSACTION 0x18
51#define SPFI_TRANSACTION_TSIZE_SHIFT 16
52#define SPFI_TRANSACTION_TSIZE_MASK 0xffff
53
54#define SPFI_PORT_STATE 0x1c
55#define SPFI_PORT_STATE_DEV_SEL_SHIFT 20
56#define SPFI_PORT_STATE_DEV_SEL_MASK 0x7
57#define SPFI_PORT_STATE_CK_POL(x) BIT(19 - (x))
58#define SPFI_PORT_STATE_CK_PHASE(x) BIT(14 - (x))
59
60#define SPFI_TX_32BIT_VALID_DATA 0x20
61#define SPFI_TX_8BIT_VALID_DATA 0x24
62#define SPFI_RX_32BIT_VALID_DATA 0x28
63#define SPFI_RX_8BIT_VALID_DATA 0x2c
64
65#define SPFI_INTERRUPT_STATUS 0x30
66#define SPFI_INTERRUPT_ENABLE 0x34
67#define SPFI_INTERRUPT_CLEAR 0x38
68#define SPFI_INTERRUPT_IACCESS BIT(12)
69#define SPFI_INTERRUPT_GDEX8BIT BIT(11)
70#define SPFI_INTERRUPT_ALLDONETRIG BIT(9)
71#define SPFI_INTERRUPT_GDFUL BIT(8)
72#define SPFI_INTERRUPT_GDHF BIT(7)
73#define SPFI_INTERRUPT_GDEX32BIT BIT(6)
74#define SPFI_INTERRUPT_GDTRIG BIT(5)
75#define SPFI_INTERRUPT_SDFUL BIT(3)
76#define SPFI_INTERRUPT_SDHF BIT(2)
77#define SPFI_INTERRUPT_SDE BIT(1)
78#define SPFI_INTERRUPT_SDTRIG BIT(0)
79
80/*
81 * There are four parallel FIFOs of 16 bytes each. The word buffer
82 * (*_32BIT_VALID_DATA) accesses all four FIFOs at once, resulting in an
83 * effective FIFO size of 64 bytes. The byte buffer (*_8BIT_VALID_DATA)
84 * accesses only a single FIFO, resulting in an effective FIFO size of
85 * 16 bytes.
86 */
87#define SPFI_32BIT_FIFO_SIZE 64
88#define SPFI_8BIT_FIFO_SIZE 16
89
90struct img_spfi {
91 struct device *dev;
92 struct spi_master *master;
93 spinlock_t lock;
94
95 void __iomem *regs;
96 phys_addr_t phys;
97 int irq;
98 struct clk *spfi_clk;
99 struct clk *sys_clk;
100
101 struct dma_chan *rx_ch;
102 struct dma_chan *tx_ch;
103 bool tx_dma_busy;
104 bool rx_dma_busy;
105};
106
107static inline u32 spfi_readl(struct img_spfi *spfi, u32 reg)
108{
109 return readl(spfi->regs + reg);
110}
111
112static inline void spfi_writel(struct img_spfi *spfi, u32 val, u32 reg)
113{
114 writel(val, spfi->regs + reg);
115}
116
117static inline void spfi_start(struct img_spfi *spfi)
118{
119 u32 val;
120
121 val = spfi_readl(spfi, SPFI_CONTROL);
122 val |= SPFI_CONTROL_SPFI_EN;
123 spfi_writel(spfi, val, SPFI_CONTROL);
124}
125
deba2580
AB
126static inline void spfi_reset(struct img_spfi *spfi)
127{
128 spfi_writel(spfi, SPFI_CONTROL_SOFT_RESET, SPFI_CONTROL);
deba2580
AB
129 spfi_writel(spfi, 0, SPFI_CONTROL);
130}
131
8c2c8c03 132static int spfi_wait_all_done(struct img_spfi *spfi)
deba2580 133{
8c2c8c03 134 unsigned long timeout = jiffies + msecs_to_jiffies(50);
deba2580 135
deba2580 136 while (time_before(jiffies, timeout)) {
8c2c8c03
EG
137 u32 status = spfi_readl(spfi, SPFI_INTERRUPT_STATUS);
138
139 if (status & SPFI_INTERRUPT_ALLDONETRIG) {
140 spfi_writel(spfi, SPFI_INTERRUPT_ALLDONETRIG,
141 SPFI_INTERRUPT_CLEAR);
142 return 0;
143 }
deba2580
AB
144 cpu_relax();
145 }
146
8c2c8c03 147 dev_err(spfi->dev, "Timed out waiting for transaction to complete\n");
deba2580 148 spfi_reset(spfi);
8c2c8c03
EG
149
150 return -ETIMEDOUT;
deba2580
AB
151}
152
153static unsigned int spfi_pio_write32(struct img_spfi *spfi, const u32 *buf,
154 unsigned int max)
155{
156 unsigned int count = 0;
157 u32 status;
158
549858ce 159 while (count < max / 4) {
deba2580
AB
160 spfi_writel(spfi, SPFI_INTERRUPT_SDFUL, SPFI_INTERRUPT_CLEAR);
161 status = spfi_readl(spfi, SPFI_INTERRUPT_STATUS);
162 if (status & SPFI_INTERRUPT_SDFUL)
163 break;
549858ce
AB
164 spfi_writel(spfi, buf[count], SPFI_TX_32BIT_VALID_DATA);
165 count++;
deba2580
AB
166 }
167
549858ce 168 return count * 4;
deba2580
AB
169}
170
171static unsigned int spfi_pio_write8(struct img_spfi *spfi, const u8 *buf,
172 unsigned int max)
173{
174 unsigned int count = 0;
175 u32 status;
176
177 while (count < max) {
178 spfi_writel(spfi, SPFI_INTERRUPT_SDFUL, SPFI_INTERRUPT_CLEAR);
179 status = spfi_readl(spfi, SPFI_INTERRUPT_STATUS);
180 if (status & SPFI_INTERRUPT_SDFUL)
181 break;
182 spfi_writel(spfi, buf[count], SPFI_TX_8BIT_VALID_DATA);
183 count++;
184 }
185
186 return count;
187}
188
189static unsigned int spfi_pio_read32(struct img_spfi *spfi, u32 *buf,
190 unsigned int max)
191{
192 unsigned int count = 0;
193 u32 status;
194
549858ce 195 while (count < max / 4) {
deba2580
AB
196 spfi_writel(spfi, SPFI_INTERRUPT_GDEX32BIT,
197 SPFI_INTERRUPT_CLEAR);
198 status = spfi_readl(spfi, SPFI_INTERRUPT_STATUS);
199 if (!(status & SPFI_INTERRUPT_GDEX32BIT))
200 break;
549858ce
AB
201 buf[count] = spfi_readl(spfi, SPFI_RX_32BIT_VALID_DATA);
202 count++;
deba2580
AB
203 }
204
549858ce 205 return count * 4;
deba2580
AB
206}
207
208static unsigned int spfi_pio_read8(struct img_spfi *spfi, u8 *buf,
209 unsigned int max)
210{
211 unsigned int count = 0;
212 u32 status;
213
214 while (count < max) {
215 spfi_writel(spfi, SPFI_INTERRUPT_GDEX8BIT,
216 SPFI_INTERRUPT_CLEAR);
217 status = spfi_readl(spfi, SPFI_INTERRUPT_STATUS);
218 if (!(status & SPFI_INTERRUPT_GDEX8BIT))
219 break;
220 buf[count] = spfi_readl(spfi, SPFI_RX_8BIT_VALID_DATA);
221 count++;
222 }
223
224 return count;
225}
226
227static int img_spfi_start_pio(struct spi_master *master,
228 struct spi_device *spi,
229 struct spi_transfer *xfer)
230{
231 struct img_spfi *spfi = spi_master_get_devdata(spi->master);
232 unsigned int tx_bytes = 0, rx_bytes = 0;
233 const void *tx_buf = xfer->tx_buf;
234 void *rx_buf = xfer->rx_buf;
235 unsigned long timeout;
8c2c8c03 236 int ret;
deba2580
AB
237
238 if (tx_buf)
239 tx_bytes = xfer->len;
240 if (rx_buf)
241 rx_bytes = xfer->len;
242
243 spfi_start(spfi);
244
245 timeout = jiffies +
246 msecs_to_jiffies(xfer->len * 8 * 1000 / xfer->speed_hz + 100);
247 while ((tx_bytes > 0 || rx_bytes > 0) &&
248 time_before(jiffies, timeout)) {
249 unsigned int tx_count, rx_count;
250
549858ce 251 if (tx_bytes >= 4)
deba2580 252 tx_count = spfi_pio_write32(spfi, tx_buf, tx_bytes);
549858ce 253 else
deba2580 254 tx_count = spfi_pio_write8(spfi, tx_buf, tx_bytes);
549858ce
AB
255
256 if (rx_bytes >= 4)
257 rx_count = spfi_pio_read32(spfi, rx_buf, rx_bytes);
258 else
deba2580 259 rx_count = spfi_pio_read8(spfi, rx_buf, rx_bytes);
deba2580
AB
260
261 tx_buf += tx_count;
262 rx_buf += rx_count;
263 tx_bytes -= tx_count;
264 rx_bytes -= rx_count;
265
266 cpu_relax();
267 }
268
8c2c8c03
EG
269 ret = spfi_wait_all_done(spfi);
270 if (ret < 0)
271 return ret;
272
deba2580
AB
273 if (rx_bytes > 0 || tx_bytes > 0) {
274 dev_err(spfi->dev, "PIO transfer timed out\n");
deba2580
AB
275 return -ETIMEDOUT;
276 }
277
deba2580
AB
278 return 0;
279}
280
281static void img_spfi_dma_rx_cb(void *data)
282{
283 struct img_spfi *spfi = data;
284 unsigned long flags;
285
8c2c8c03 286 spfi_wait_all_done(spfi);
deba2580 287
8c2c8c03 288 spin_lock_irqsave(&spfi->lock, flags);
deba2580 289 spfi->rx_dma_busy = false;
8c2c8c03 290 if (!spfi->tx_dma_busy)
deba2580 291 spi_finalize_current_transfer(spfi->master);
deba2580
AB
292 spin_unlock_irqrestore(&spfi->lock, flags);
293}
294
295static void img_spfi_dma_tx_cb(void *data)
296{
297 struct img_spfi *spfi = data;
298 unsigned long flags;
299
8c2c8c03 300 spfi_wait_all_done(spfi);
deba2580
AB
301
302 spin_lock_irqsave(&spfi->lock, flags);
deba2580 303 spfi->tx_dma_busy = false;
8c2c8c03 304 if (!spfi->rx_dma_busy)
deba2580 305 spi_finalize_current_transfer(spfi->master);
deba2580
AB
306 spin_unlock_irqrestore(&spfi->lock, flags);
307}
308
309static int img_spfi_start_dma(struct spi_master *master,
310 struct spi_device *spi,
311 struct spi_transfer *xfer)
312{
313 struct img_spfi *spfi = spi_master_get_devdata(spi->master);
314 struct dma_async_tx_descriptor *rxdesc = NULL, *txdesc = NULL;
315 struct dma_slave_config rxconf, txconf;
316
317 spfi->rx_dma_busy = false;
318 spfi->tx_dma_busy = false;
319
320 if (xfer->rx_buf) {
321 rxconf.direction = DMA_DEV_TO_MEM;
549858ce 322 if (xfer->len % 4 == 0) {
deba2580
AB
323 rxconf.src_addr = spfi->phys + SPFI_RX_32BIT_VALID_DATA;
324 rxconf.src_addr_width = 4;
325 rxconf.src_maxburst = 4;
549858ce 326 } else {
deba2580
AB
327 rxconf.src_addr = spfi->phys + SPFI_RX_8BIT_VALID_DATA;
328 rxconf.src_addr_width = 1;
76fe5e95 329 rxconf.src_maxburst = 4;
deba2580
AB
330 }
331 dmaengine_slave_config(spfi->rx_ch, &rxconf);
332
333 rxdesc = dmaengine_prep_slave_sg(spfi->rx_ch, xfer->rx_sg.sgl,
334 xfer->rx_sg.nents,
335 DMA_DEV_TO_MEM,
336 DMA_PREP_INTERRUPT);
337 if (!rxdesc)
338 goto stop_dma;
339
340 rxdesc->callback = img_spfi_dma_rx_cb;
341 rxdesc->callback_param = spfi;
342 }
343
344 if (xfer->tx_buf) {
345 txconf.direction = DMA_MEM_TO_DEV;
549858ce 346 if (xfer->len % 4 == 0) {
deba2580
AB
347 txconf.dst_addr = spfi->phys + SPFI_TX_32BIT_VALID_DATA;
348 txconf.dst_addr_width = 4;
349 txconf.dst_maxburst = 4;
549858ce 350 } else {
deba2580
AB
351 txconf.dst_addr = spfi->phys + SPFI_TX_8BIT_VALID_DATA;
352 txconf.dst_addr_width = 1;
76fe5e95 353 txconf.dst_maxburst = 4;
deba2580
AB
354 }
355 dmaengine_slave_config(spfi->tx_ch, &txconf);
356
357 txdesc = dmaengine_prep_slave_sg(spfi->tx_ch, xfer->tx_sg.sgl,
358 xfer->tx_sg.nents,
359 DMA_MEM_TO_DEV,
360 DMA_PREP_INTERRUPT);
361 if (!txdesc)
362 goto stop_dma;
363
364 txdesc->callback = img_spfi_dma_tx_cb;
365 txdesc->callback_param = spfi;
366 }
367
368 if (xfer->rx_buf) {
369 spfi->rx_dma_busy = true;
370 dmaengine_submit(rxdesc);
371 dma_async_issue_pending(spfi->rx_ch);
372 }
373
c0e7dc21
AB
374 spfi_start(spfi);
375
deba2580
AB
376 if (xfer->tx_buf) {
377 spfi->tx_dma_busy = true;
378 dmaengine_submit(txdesc);
379 dma_async_issue_pending(spfi->tx_ch);
380 }
381
deba2580
AB
382 return 1;
383
384stop_dma:
385 dmaengine_terminate_all(spfi->rx_ch);
386 dmaengine_terminate_all(spfi->tx_ch);
387 return -EIO;
388}
389
824ab37d
EG
390static void img_spfi_handle_err(struct spi_master *master,
391 struct spi_message *msg)
392{
393 struct img_spfi *spfi = spi_master_get_devdata(master);
394 unsigned long flags;
395
396 /*
397 * Stop all DMA and reset the controller if the previous transaction
398 * timed-out and never completed it's DMA.
399 */
400 spin_lock_irqsave(&spfi->lock, flags);
401 if (spfi->tx_dma_busy || spfi->rx_dma_busy) {
402 spfi->tx_dma_busy = false;
403 spfi->rx_dma_busy = false;
404
405 dmaengine_terminate_all(spfi->tx_ch);
406 dmaengine_terminate_all(spfi->rx_ch);
407 }
408 spin_unlock_irqrestore(&spfi->lock, flags);
824ab37d
EG
409}
410
b6fe3977
EG
411static int img_spfi_prepare(struct spi_master *master, struct spi_message *msg)
412{
413 struct img_spfi *spfi = spi_master_get_devdata(master);
414 u32 val;
415
416 val = spfi_readl(spfi, SPFI_PORT_STATE);
417 if (msg->spi->mode & SPI_CPHA)
418 val |= SPFI_PORT_STATE_CK_PHASE(msg->spi->chip_select);
419 else
420 val &= ~SPFI_PORT_STATE_CK_PHASE(msg->spi->chip_select);
421 if (msg->spi->mode & SPI_CPOL)
422 val |= SPFI_PORT_STATE_CK_POL(msg->spi->chip_select);
423 else
424 val &= ~SPFI_PORT_STATE_CK_POL(msg->spi->chip_select);
425 spfi_writel(spfi, val, SPFI_PORT_STATE);
426
427 return 0;
428}
429
ba33d8ac
AB
430static int img_spfi_unprepare(struct spi_master *master,
431 struct spi_message *msg)
432{
433 struct img_spfi *spfi = spi_master_get_devdata(master);
434
435 spfi_reset(spfi);
436
437 return 0;
438}
439
8c2c8c03
EG
440static int img_spfi_setup(struct spi_device *spi)
441{
442 int ret;
443
444 ret = gpio_request_one(spi->cs_gpio, (spi->mode & SPI_CS_HIGH) ?
445 GPIOF_OUT_INIT_LOW : GPIOF_OUT_INIT_HIGH,
446 dev_name(&spi->dev));
447 if (ret)
448 dev_err(&spi->dev, "can't request chipselect gpio %d\n",
449 spi->cs_gpio);
450
451 return ret;
452}
453
454static void img_spfi_cleanup(struct spi_device *spi)
455{
456 gpio_free(spi->cs_gpio);
457}
458
deba2580
AB
459static void img_spfi_config(struct spi_master *master, struct spi_device *spi,
460 struct spi_transfer *xfer)
461{
462 struct img_spfi *spfi = spi_master_get_devdata(spi->master);
463 u32 val, div;
464
465 /*
466 * output = spfi_clk * (BITCLK / 512), where BITCLK must be a
8543d0e7 467 * power of 2 up to 128
deba2580 468 */
8543d0e7
AB
469 div = DIV_ROUND_UP(clk_get_rate(spfi->spfi_clk), xfer->speed_hz);
470 div = clamp(512 / (1 << get_count_order(div)), 1, 128);
deba2580
AB
471
472 val = spfi_readl(spfi, SPFI_DEVICE_PARAMETER(spi->chip_select));
473 val &= ~(SPFI_DEVICE_PARAMETER_BITCLK_MASK <<
474 SPFI_DEVICE_PARAMETER_BITCLK_SHIFT);
475 val |= div << SPFI_DEVICE_PARAMETER_BITCLK_SHIFT;
476 spfi_writel(spfi, val, SPFI_DEVICE_PARAMETER(spi->chip_select));
477
ede8342b
SN
478 spfi_writel(spfi, xfer->len << SPFI_TRANSACTION_TSIZE_SHIFT,
479 SPFI_TRANSACTION);
480
deba2580
AB
481 val = spfi_readl(spfi, SPFI_CONTROL);
482 val &= ~(SPFI_CONTROL_SEND_DMA | SPFI_CONTROL_GET_DMA);
483 if (xfer->tx_buf)
484 val |= SPFI_CONTROL_SEND_DMA;
485 if (xfer->rx_buf)
486 val |= SPFI_CONTROL_GET_DMA;
487 val &= ~(SPFI_CONTROL_TMODE_MASK << SPFI_CONTROL_TMODE_SHIFT);
488 if (xfer->tx_nbits == SPI_NBITS_DUAL &&
489 xfer->rx_nbits == SPI_NBITS_DUAL)
490 val |= SPFI_CONTROL_TMODE_DUAL << SPFI_CONTROL_TMODE_SHIFT;
491 else if (xfer->tx_nbits == SPI_NBITS_QUAD &&
492 xfer->rx_nbits == SPI_NBITS_QUAD)
493 val |= SPFI_CONTROL_TMODE_QUAD << SPFI_CONTROL_TMODE_SHIFT;
deba2580 494 spfi_writel(spfi, val, SPFI_CONTROL);
deba2580
AB
495}
496
497static int img_spfi_transfer_one(struct spi_master *master,
498 struct spi_device *spi,
499 struct spi_transfer *xfer)
500{
501 struct img_spfi *spfi = spi_master_get_devdata(spi->master);
deba2580
AB
502 int ret;
503
f165ed63
SN
504 if (xfer->len > SPFI_TRANSACTION_TSIZE_MASK) {
505 dev_err(spfi->dev,
506 "Transfer length (%d) is greater than the max supported (%d)",
507 xfer->len, SPFI_TRANSACTION_TSIZE_MASK);
508 return -EINVAL;
509 }
510
deba2580
AB
511 img_spfi_config(master, spi, xfer);
512 if (master->can_dma && master->can_dma(master, spi, xfer))
513 ret = img_spfi_start_dma(master, spi, xfer);
514 else
515 ret = img_spfi_start_pio(master, spi, xfer);
516
517 return ret;
518}
519
deba2580
AB
520static bool img_spfi_can_dma(struct spi_master *master, struct spi_device *spi,
521 struct spi_transfer *xfer)
522{
549858ce 523 if (xfer->len > SPFI_32BIT_FIFO_SIZE)
deba2580
AB
524 return true;
525 return false;
526}
527
528static irqreturn_t img_spfi_irq(int irq, void *dev_id)
529{
530 struct img_spfi *spfi = (struct img_spfi *)dev_id;
531 u32 status;
532
533 status = spfi_readl(spfi, SPFI_INTERRUPT_STATUS);
534 if (status & SPFI_INTERRUPT_IACCESS) {
535 spfi_writel(spfi, SPFI_INTERRUPT_IACCESS, SPFI_INTERRUPT_CLEAR);
536 dev_err(spfi->dev, "Illegal access interrupt");
537 return IRQ_HANDLED;
538 }
539
540 return IRQ_NONE;
541}
542
543static int img_spfi_probe(struct platform_device *pdev)
544{
545 struct spi_master *master;
546 struct img_spfi *spfi;
547 struct resource *res;
548 int ret;
549
550 master = spi_alloc_master(&pdev->dev, sizeof(*spfi));
551 if (!master)
552 return -ENOMEM;
553 platform_set_drvdata(pdev, master);
554
555 spfi = spi_master_get_devdata(master);
556 spfi->dev = &pdev->dev;
557 spfi->master = master;
558 spin_lock_init(&spfi->lock);
559
560 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
561 spfi->regs = devm_ioremap_resource(spfi->dev, res);
562 if (IS_ERR(spfi->regs)) {
563 ret = PTR_ERR(spfi->regs);
564 goto put_spi;
565 }
566 spfi->phys = res->start;
567
568 spfi->irq = platform_get_irq(pdev, 0);
569 if (spfi->irq < 0) {
570 ret = spfi->irq;
571 goto put_spi;
572 }
573 ret = devm_request_irq(spfi->dev, spfi->irq, img_spfi_irq,
574 IRQ_TYPE_LEVEL_HIGH, dev_name(spfi->dev), spfi);
575 if (ret)
576 goto put_spi;
577
578 spfi->sys_clk = devm_clk_get(spfi->dev, "sys");
579 if (IS_ERR(spfi->sys_clk)) {
580 ret = PTR_ERR(spfi->sys_clk);
581 goto put_spi;
582 }
583 spfi->spfi_clk = devm_clk_get(spfi->dev, "spfi");
584 if (IS_ERR(spfi->spfi_clk)) {
585 ret = PTR_ERR(spfi->spfi_clk);
586 goto put_spi;
587 }
588
589 ret = clk_prepare_enable(spfi->sys_clk);
590 if (ret)
591 goto put_spi;
592 ret = clk_prepare_enable(spfi->spfi_clk);
593 if (ret)
594 goto disable_pclk;
595
596 spfi_reset(spfi);
597 /*
598 * Only enable the error (IACCESS) interrupt. In PIO mode we'll
599 * poll the status of the FIFOs.
600 */
601 spfi_writel(spfi, SPFI_INTERRUPT_IACCESS, SPFI_INTERRUPT_ENABLE);
602
603 master->auto_runtime_pm = true;
604 master->bus_num = pdev->id;
605 master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_TX_DUAL | SPI_RX_DUAL;
606 if (of_property_read_bool(spfi->dev->of_node, "img,supports-quad-mode"))
607 master->mode_bits |= SPI_TX_QUAD | SPI_RX_QUAD;
deba2580
AB
608 master->dev.of_node = pdev->dev.of_node;
609 master->bits_per_word_mask = SPI_BPW_MASK(32) | SPI_BPW_MASK(8);
8543d0e7
AB
610 master->max_speed_hz = clk_get_rate(spfi->spfi_clk) / 4;
611 master->min_speed_hz = clk_get_rate(spfi->spfi_clk) / 512;
deba2580 612
8c2c8c03
EG
613 master->setup = img_spfi_setup;
614 master->cleanup = img_spfi_cleanup;
deba2580 615 master->transfer_one = img_spfi_transfer_one;
b6fe3977 616 master->prepare_message = img_spfi_prepare;
ba33d8ac 617 master->unprepare_message = img_spfi_unprepare;
824ab37d 618 master->handle_err = img_spfi_handle_err;
deba2580
AB
619
620 spfi->tx_ch = dma_request_slave_channel(spfi->dev, "tx");
621 spfi->rx_ch = dma_request_slave_channel(spfi->dev, "rx");
622 if (!spfi->tx_ch || !spfi->rx_ch) {
623 if (spfi->tx_ch)
624 dma_release_channel(spfi->tx_ch);
625 if (spfi->rx_ch)
626 dma_release_channel(spfi->rx_ch);
627 dev_warn(spfi->dev, "Failed to get DMA channels, falling back to PIO mode\n");
628 } else {
629 master->dma_tx = spfi->tx_ch;
630 master->dma_rx = spfi->rx_ch;
631 master->can_dma = img_spfi_can_dma;
632 }
633
634 pm_runtime_set_active(spfi->dev);
635 pm_runtime_enable(spfi->dev);
636
637 ret = devm_spi_register_master(spfi->dev, master);
638 if (ret)
639 goto disable_pm;
640
641 return 0;
642
643disable_pm:
644 pm_runtime_disable(spfi->dev);
645 if (spfi->rx_ch)
646 dma_release_channel(spfi->rx_ch);
647 if (spfi->tx_ch)
648 dma_release_channel(spfi->tx_ch);
649 clk_disable_unprepare(spfi->spfi_clk);
650disable_pclk:
651 clk_disable_unprepare(spfi->sys_clk);
652put_spi:
653 spi_master_put(master);
654
655 return ret;
656}
657
658static int img_spfi_remove(struct platform_device *pdev)
659{
660 struct spi_master *master = platform_get_drvdata(pdev);
661 struct img_spfi *spfi = spi_master_get_devdata(master);
662
663 if (spfi->tx_ch)
664 dma_release_channel(spfi->tx_ch);
665 if (spfi->rx_ch)
666 dma_release_channel(spfi->rx_ch);
667
668 pm_runtime_disable(spfi->dev);
669 if (!pm_runtime_status_suspended(spfi->dev)) {
670 clk_disable_unprepare(spfi->spfi_clk);
671 clk_disable_unprepare(spfi->sys_clk);
672 }
673
674 spi_master_put(master);
675
676 return 0;
677}
678
47164fdb 679#ifdef CONFIG_PM
deba2580
AB
680static int img_spfi_runtime_suspend(struct device *dev)
681{
682 struct spi_master *master = dev_get_drvdata(dev);
683 struct img_spfi *spfi = spi_master_get_devdata(master);
684
685 clk_disable_unprepare(spfi->spfi_clk);
686 clk_disable_unprepare(spfi->sys_clk);
687
688 return 0;
689}
690
691static int img_spfi_runtime_resume(struct device *dev)
692{
693 struct spi_master *master = dev_get_drvdata(dev);
694 struct img_spfi *spfi = spi_master_get_devdata(master);
695 int ret;
696
697 ret = clk_prepare_enable(spfi->sys_clk);
698 if (ret)
699 return ret;
700 ret = clk_prepare_enable(spfi->spfi_clk);
701 if (ret) {
702 clk_disable_unprepare(spfi->sys_clk);
703 return ret;
704 }
705
706 return 0;
707}
47164fdb 708#endif /* CONFIG_PM */
deba2580
AB
709
710#ifdef CONFIG_PM_SLEEP
711static int img_spfi_suspend(struct device *dev)
712{
713 struct spi_master *master = dev_get_drvdata(dev);
714
715 return spi_master_suspend(master);
716}
717
718static int img_spfi_resume(struct device *dev)
719{
720 struct spi_master *master = dev_get_drvdata(dev);
721 struct img_spfi *spfi = spi_master_get_devdata(master);
722 int ret;
723
724 ret = pm_runtime_get_sync(dev);
725 if (ret)
726 return ret;
727 spfi_reset(spfi);
728 pm_runtime_put(dev);
729
730 return spi_master_resume(master);
731}
732#endif /* CONFIG_PM_SLEEP */
733
734static const struct dev_pm_ops img_spfi_pm_ops = {
735 SET_RUNTIME_PM_OPS(img_spfi_runtime_suspend, img_spfi_runtime_resume,
736 NULL)
737 SET_SYSTEM_SLEEP_PM_OPS(img_spfi_suspend, img_spfi_resume)
738};
739
740static const struct of_device_id img_spfi_of_match[] = {
741 { .compatible = "img,spfi", },
742 { },
743};
744MODULE_DEVICE_TABLE(of, img_spfi_of_match);
745
746static struct platform_driver img_spfi_driver = {
747 .driver = {
748 .name = "img-spfi",
749 .pm = &img_spfi_pm_ops,
750 .of_match_table = of_match_ptr(img_spfi_of_match),
751 },
752 .probe = img_spfi_probe,
753 .remove = img_spfi_remove,
754};
755module_platform_driver(img_spfi_driver);
756
757MODULE_DESCRIPTION("IMG SPFI controller driver");
758MODULE_AUTHOR("Andrew Bresticker <abrestic@chromium.org>");
759MODULE_LICENSE("GPL v2");
This page took 0.08398 seconds and 5 git commands to generate.