Commit | Line | Data |
---|---|---|
deba2580 AB |
1 | /* |
2 | * IMG SPFI controller driver | |
3 | * | |
4 | * Copyright (C) 2007,2008,2013 Imagination Technologies Ltd. | |
5 | * Copyright (C) 2014 Google, Inc. | |
6 | * | |
7 | * This program is free software; you can redistribute it and/or modify it | |
8 | * under the terms and conditions of the GNU General Public License, | |
9 | * version 2, as published by the Free Software Foundation. | |
10 | */ | |
11 | ||
12 | #include <linux/clk.h> | |
13 | #include <linux/delay.h> | |
14 | #include <linux/dmaengine.h> | |
15 | #include <linux/interrupt.h> | |
16 | #include <linux/io.h> | |
17 | #include <linux/irq.h> | |
18 | #include <linux/module.h> | |
19 | #include <linux/of.h> | |
20 | #include <linux/platform_device.h> | |
21 | #include <linux/pm_runtime.h> | |
22 | #include <linux/scatterlist.h> | |
23 | #include <linux/slab.h> | |
24 | #include <linux/spi/spi.h> | |
25 | #include <linux/spinlock.h> | |
26 | ||
27 | #define SPFI_DEVICE_PARAMETER(x) (0x00 + 0x4 * (x)) | |
28 | #define SPFI_DEVICE_PARAMETER_BITCLK_SHIFT 24 | |
29 | #define SPFI_DEVICE_PARAMETER_BITCLK_MASK 0xff | |
30 | #define SPFI_DEVICE_PARAMETER_CSSETUP_SHIFT 16 | |
31 | #define SPFI_DEVICE_PARAMETER_CSSETUP_MASK 0xff | |
32 | #define SPFI_DEVICE_PARAMETER_CSHOLD_SHIFT 8 | |
33 | #define SPFI_DEVICE_PARAMETER_CSHOLD_MASK 0xff | |
34 | #define SPFI_DEVICE_PARAMETER_CSDELAY_SHIFT 0 | |
35 | #define SPFI_DEVICE_PARAMETER_CSDELAY_MASK 0xff | |
36 | ||
37 | #define SPFI_CONTROL 0x14 | |
38 | #define SPFI_CONTROL_CONTINUE BIT(12) | |
39 | #define SPFI_CONTROL_SOFT_RESET BIT(11) | |
40 | #define SPFI_CONTROL_SEND_DMA BIT(10) | |
41 | #define SPFI_CONTROL_GET_DMA BIT(9) | |
42 | #define SPFI_CONTROL_TMODE_SHIFT 5 | |
43 | #define SPFI_CONTROL_TMODE_MASK 0x7 | |
44 | #define SPFI_CONTROL_TMODE_SINGLE 0 | |
45 | #define SPFI_CONTROL_TMODE_DUAL 1 | |
46 | #define SPFI_CONTROL_TMODE_QUAD 2 | |
47 | #define SPFI_CONTROL_SPFI_EN BIT(0) | |
48 | ||
49 | #define SPFI_TRANSACTION 0x18 | |
50 | #define SPFI_TRANSACTION_TSIZE_SHIFT 16 | |
51 | #define SPFI_TRANSACTION_TSIZE_MASK 0xffff | |
52 | ||
53 | #define SPFI_PORT_STATE 0x1c | |
54 | #define SPFI_PORT_STATE_DEV_SEL_SHIFT 20 | |
55 | #define SPFI_PORT_STATE_DEV_SEL_MASK 0x7 | |
56 | #define SPFI_PORT_STATE_CK_POL(x) BIT(19 - (x)) | |
57 | #define SPFI_PORT_STATE_CK_PHASE(x) BIT(14 - (x)) | |
58 | ||
59 | #define SPFI_TX_32BIT_VALID_DATA 0x20 | |
60 | #define SPFI_TX_8BIT_VALID_DATA 0x24 | |
61 | #define SPFI_RX_32BIT_VALID_DATA 0x28 | |
62 | #define SPFI_RX_8BIT_VALID_DATA 0x2c | |
63 | ||
64 | #define SPFI_INTERRUPT_STATUS 0x30 | |
65 | #define SPFI_INTERRUPT_ENABLE 0x34 | |
66 | #define SPFI_INTERRUPT_CLEAR 0x38 | |
67 | #define SPFI_INTERRUPT_IACCESS BIT(12) | |
68 | #define SPFI_INTERRUPT_GDEX8BIT BIT(11) | |
69 | #define SPFI_INTERRUPT_ALLDONETRIG BIT(9) | |
70 | #define SPFI_INTERRUPT_GDFUL BIT(8) | |
71 | #define SPFI_INTERRUPT_GDHF BIT(7) | |
72 | #define SPFI_INTERRUPT_GDEX32BIT BIT(6) | |
73 | #define SPFI_INTERRUPT_GDTRIG BIT(5) | |
74 | #define SPFI_INTERRUPT_SDFUL BIT(3) | |
75 | #define SPFI_INTERRUPT_SDHF BIT(2) | |
76 | #define SPFI_INTERRUPT_SDE BIT(1) | |
77 | #define SPFI_INTERRUPT_SDTRIG BIT(0) | |
78 | ||
79 | /* | |
80 | * There are four parallel FIFOs of 16 bytes each. The word buffer | |
81 | * (*_32BIT_VALID_DATA) accesses all four FIFOs at once, resulting in an | |
82 | * effective FIFO size of 64 bytes. The byte buffer (*_8BIT_VALID_DATA) | |
83 | * accesses only a single FIFO, resulting in an effective FIFO size of | |
84 | * 16 bytes. | |
85 | */ | |
86 | #define SPFI_32BIT_FIFO_SIZE 64 | |
87 | #define SPFI_8BIT_FIFO_SIZE 16 | |
88 | ||
89 | struct img_spfi { | |
90 | struct device *dev; | |
91 | struct spi_master *master; | |
92 | spinlock_t lock; | |
93 | ||
94 | void __iomem *regs; | |
95 | phys_addr_t phys; | |
96 | int irq; | |
97 | struct clk *spfi_clk; | |
98 | struct clk *sys_clk; | |
99 | ||
100 | struct dma_chan *rx_ch; | |
101 | struct dma_chan *tx_ch; | |
102 | bool tx_dma_busy; | |
103 | bool rx_dma_busy; | |
104 | }; | |
105 | ||
106 | static inline u32 spfi_readl(struct img_spfi *spfi, u32 reg) | |
107 | { | |
108 | return readl(spfi->regs + reg); | |
109 | } | |
110 | ||
111 | static inline void spfi_writel(struct img_spfi *spfi, u32 val, u32 reg) | |
112 | { | |
113 | writel(val, spfi->regs + reg); | |
114 | } | |
115 | ||
116 | static inline void spfi_start(struct img_spfi *spfi) | |
117 | { | |
118 | u32 val; | |
119 | ||
120 | val = spfi_readl(spfi, SPFI_CONTROL); | |
121 | val |= SPFI_CONTROL_SPFI_EN; | |
122 | spfi_writel(spfi, val, SPFI_CONTROL); | |
123 | } | |
124 | ||
125 | static inline void spfi_stop(struct img_spfi *spfi) | |
126 | { | |
127 | u32 val; | |
128 | ||
129 | val = spfi_readl(spfi, SPFI_CONTROL); | |
130 | val &= ~SPFI_CONTROL_SPFI_EN; | |
131 | spfi_writel(spfi, val, SPFI_CONTROL); | |
132 | } | |
133 | ||
134 | static inline void spfi_reset(struct img_spfi *spfi) | |
135 | { | |
136 | spfi_writel(spfi, SPFI_CONTROL_SOFT_RESET, SPFI_CONTROL); | |
137 | udelay(1); | |
138 | spfi_writel(spfi, 0, SPFI_CONTROL); | |
139 | } | |
140 | ||
141 | static void spfi_flush_tx_fifo(struct img_spfi *spfi) | |
142 | { | |
143 | unsigned long timeout = jiffies + msecs_to_jiffies(10); | |
144 | ||
145 | spfi_writel(spfi, SPFI_INTERRUPT_SDE, SPFI_INTERRUPT_CLEAR); | |
146 | while (time_before(jiffies, timeout)) { | |
147 | if (spfi_readl(spfi, SPFI_INTERRUPT_STATUS) & | |
148 | SPFI_INTERRUPT_SDE) | |
149 | return; | |
150 | cpu_relax(); | |
151 | } | |
152 | ||
153 | dev_err(spfi->dev, "Timed out waiting for FIFO to drain\n"); | |
154 | spfi_reset(spfi); | |
155 | } | |
156 | ||
157 | static unsigned int spfi_pio_write32(struct img_spfi *spfi, const u32 *buf, | |
158 | unsigned int max) | |
159 | { | |
160 | unsigned int count = 0; | |
161 | u32 status; | |
162 | ||
163 | while (count < max) { | |
164 | spfi_writel(spfi, SPFI_INTERRUPT_SDFUL, SPFI_INTERRUPT_CLEAR); | |
165 | status = spfi_readl(spfi, SPFI_INTERRUPT_STATUS); | |
166 | if (status & SPFI_INTERRUPT_SDFUL) | |
167 | break; | |
168 | spfi_writel(spfi, buf[count / 4], SPFI_TX_32BIT_VALID_DATA); | |
169 | count += 4; | |
170 | } | |
171 | ||
172 | return count; | |
173 | } | |
174 | ||
175 | static unsigned int spfi_pio_write8(struct img_spfi *spfi, const u8 *buf, | |
176 | unsigned int max) | |
177 | { | |
178 | unsigned int count = 0; | |
179 | u32 status; | |
180 | ||
181 | while (count < max) { | |
182 | spfi_writel(spfi, SPFI_INTERRUPT_SDFUL, SPFI_INTERRUPT_CLEAR); | |
183 | status = spfi_readl(spfi, SPFI_INTERRUPT_STATUS); | |
184 | if (status & SPFI_INTERRUPT_SDFUL) | |
185 | break; | |
186 | spfi_writel(spfi, buf[count], SPFI_TX_8BIT_VALID_DATA); | |
187 | count++; | |
188 | } | |
189 | ||
190 | return count; | |
191 | } | |
192 | ||
193 | static unsigned int spfi_pio_read32(struct img_spfi *spfi, u32 *buf, | |
194 | unsigned int max) | |
195 | { | |
196 | unsigned int count = 0; | |
197 | u32 status; | |
198 | ||
199 | while (count < max) { | |
200 | spfi_writel(spfi, SPFI_INTERRUPT_GDEX32BIT, | |
201 | SPFI_INTERRUPT_CLEAR); | |
202 | status = spfi_readl(spfi, SPFI_INTERRUPT_STATUS); | |
203 | if (!(status & SPFI_INTERRUPT_GDEX32BIT)) | |
204 | break; | |
205 | buf[count / 4] = spfi_readl(spfi, SPFI_RX_32BIT_VALID_DATA); | |
206 | count += 4; | |
207 | } | |
208 | ||
209 | return count; | |
210 | } | |
211 | ||
212 | static unsigned int spfi_pio_read8(struct img_spfi *spfi, u8 *buf, | |
213 | unsigned int max) | |
214 | { | |
215 | unsigned int count = 0; | |
216 | u32 status; | |
217 | ||
218 | while (count < max) { | |
219 | spfi_writel(spfi, SPFI_INTERRUPT_GDEX8BIT, | |
220 | SPFI_INTERRUPT_CLEAR); | |
221 | status = spfi_readl(spfi, SPFI_INTERRUPT_STATUS); | |
222 | if (!(status & SPFI_INTERRUPT_GDEX8BIT)) | |
223 | break; | |
224 | buf[count] = spfi_readl(spfi, SPFI_RX_8BIT_VALID_DATA); | |
225 | count++; | |
226 | } | |
227 | ||
228 | return count; | |
229 | } | |
230 | ||
231 | static int img_spfi_start_pio(struct spi_master *master, | |
232 | struct spi_device *spi, | |
233 | struct spi_transfer *xfer) | |
234 | { | |
235 | struct img_spfi *spfi = spi_master_get_devdata(spi->master); | |
236 | unsigned int tx_bytes = 0, rx_bytes = 0; | |
237 | const void *tx_buf = xfer->tx_buf; | |
238 | void *rx_buf = xfer->rx_buf; | |
239 | unsigned long timeout; | |
240 | ||
241 | if (tx_buf) | |
242 | tx_bytes = xfer->len; | |
243 | if (rx_buf) | |
244 | rx_bytes = xfer->len; | |
245 | ||
246 | spfi_start(spfi); | |
247 | ||
248 | timeout = jiffies + | |
249 | msecs_to_jiffies(xfer->len * 8 * 1000 / xfer->speed_hz + 100); | |
250 | while ((tx_bytes > 0 || rx_bytes > 0) && | |
251 | time_before(jiffies, timeout)) { | |
252 | unsigned int tx_count, rx_count; | |
253 | ||
254 | switch (xfer->bits_per_word) { | |
255 | case 32: | |
256 | tx_count = spfi_pio_write32(spfi, tx_buf, tx_bytes); | |
257 | rx_count = spfi_pio_read32(spfi, rx_buf, rx_bytes); | |
258 | break; | |
259 | case 8: | |
260 | default: | |
261 | tx_count = spfi_pio_write8(spfi, tx_buf, tx_bytes); | |
262 | rx_count = spfi_pio_read8(spfi, rx_buf, rx_bytes); | |
263 | break; | |
264 | } | |
265 | ||
266 | tx_buf += tx_count; | |
267 | rx_buf += rx_count; | |
268 | tx_bytes -= tx_count; | |
269 | rx_bytes -= rx_count; | |
270 | ||
271 | cpu_relax(); | |
272 | } | |
273 | ||
274 | if (rx_bytes > 0 || tx_bytes > 0) { | |
275 | dev_err(spfi->dev, "PIO transfer timed out\n"); | |
276 | spfi_reset(spfi); | |
277 | return -ETIMEDOUT; | |
278 | } | |
279 | ||
280 | if (tx_buf) | |
281 | spfi_flush_tx_fifo(spfi); | |
282 | spfi_stop(spfi); | |
283 | ||
284 | return 0; | |
285 | } | |
286 | ||
287 | static void img_spfi_dma_rx_cb(void *data) | |
288 | { | |
289 | struct img_spfi *spfi = data; | |
290 | unsigned long flags; | |
291 | ||
292 | spin_lock_irqsave(&spfi->lock, flags); | |
293 | ||
294 | spfi->rx_dma_busy = false; | |
295 | if (!spfi->tx_dma_busy) { | |
296 | spfi_stop(spfi); | |
297 | spi_finalize_current_transfer(spfi->master); | |
298 | } | |
299 | ||
300 | spin_unlock_irqrestore(&spfi->lock, flags); | |
301 | } | |
302 | ||
303 | static void img_spfi_dma_tx_cb(void *data) | |
304 | { | |
305 | struct img_spfi *spfi = data; | |
306 | unsigned long flags; | |
307 | ||
308 | spfi_flush_tx_fifo(spfi); | |
309 | ||
310 | spin_lock_irqsave(&spfi->lock, flags); | |
311 | ||
312 | spfi->tx_dma_busy = false; | |
313 | if (!spfi->rx_dma_busy) { | |
314 | spfi_stop(spfi); | |
315 | spi_finalize_current_transfer(spfi->master); | |
316 | } | |
317 | ||
318 | spin_unlock_irqrestore(&spfi->lock, flags); | |
319 | } | |
320 | ||
321 | static int img_spfi_start_dma(struct spi_master *master, | |
322 | struct spi_device *spi, | |
323 | struct spi_transfer *xfer) | |
324 | { | |
325 | struct img_spfi *spfi = spi_master_get_devdata(spi->master); | |
326 | struct dma_async_tx_descriptor *rxdesc = NULL, *txdesc = NULL; | |
327 | struct dma_slave_config rxconf, txconf; | |
328 | ||
329 | spfi->rx_dma_busy = false; | |
330 | spfi->tx_dma_busy = false; | |
331 | ||
332 | if (xfer->rx_buf) { | |
333 | rxconf.direction = DMA_DEV_TO_MEM; | |
334 | switch (xfer->bits_per_word) { | |
335 | case 32: | |
336 | rxconf.src_addr = spfi->phys + SPFI_RX_32BIT_VALID_DATA; | |
337 | rxconf.src_addr_width = 4; | |
338 | rxconf.src_maxburst = 4; | |
339 | break; | |
340 | case 8: | |
341 | default: | |
342 | rxconf.src_addr = spfi->phys + SPFI_RX_8BIT_VALID_DATA; | |
343 | rxconf.src_addr_width = 1; | |
76fe5e95 | 344 | rxconf.src_maxburst = 4; |
deba2580 AB |
345 | } |
346 | dmaengine_slave_config(spfi->rx_ch, &rxconf); | |
347 | ||
348 | rxdesc = dmaengine_prep_slave_sg(spfi->rx_ch, xfer->rx_sg.sgl, | |
349 | xfer->rx_sg.nents, | |
350 | DMA_DEV_TO_MEM, | |
351 | DMA_PREP_INTERRUPT); | |
352 | if (!rxdesc) | |
353 | goto stop_dma; | |
354 | ||
355 | rxdesc->callback = img_spfi_dma_rx_cb; | |
356 | rxdesc->callback_param = spfi; | |
357 | } | |
358 | ||
359 | if (xfer->tx_buf) { | |
360 | txconf.direction = DMA_MEM_TO_DEV; | |
361 | switch (xfer->bits_per_word) { | |
362 | case 32: | |
363 | txconf.dst_addr = spfi->phys + SPFI_TX_32BIT_VALID_DATA; | |
364 | txconf.dst_addr_width = 4; | |
365 | txconf.dst_maxburst = 4; | |
366 | break; | |
367 | case 8: | |
368 | default: | |
369 | txconf.dst_addr = spfi->phys + SPFI_TX_8BIT_VALID_DATA; | |
370 | txconf.dst_addr_width = 1; | |
76fe5e95 | 371 | txconf.dst_maxburst = 4; |
deba2580 AB |
372 | break; |
373 | } | |
374 | dmaengine_slave_config(spfi->tx_ch, &txconf); | |
375 | ||
376 | txdesc = dmaengine_prep_slave_sg(spfi->tx_ch, xfer->tx_sg.sgl, | |
377 | xfer->tx_sg.nents, | |
378 | DMA_MEM_TO_DEV, | |
379 | DMA_PREP_INTERRUPT); | |
380 | if (!txdesc) | |
381 | goto stop_dma; | |
382 | ||
383 | txdesc->callback = img_spfi_dma_tx_cb; | |
384 | txdesc->callback_param = spfi; | |
385 | } | |
386 | ||
387 | if (xfer->rx_buf) { | |
388 | spfi->rx_dma_busy = true; | |
389 | dmaengine_submit(rxdesc); | |
390 | dma_async_issue_pending(spfi->rx_ch); | |
391 | } | |
392 | ||
c0e7dc21 AB |
393 | spfi_start(spfi); |
394 | ||
deba2580 AB |
395 | if (xfer->tx_buf) { |
396 | spfi->tx_dma_busy = true; | |
397 | dmaengine_submit(txdesc); | |
398 | dma_async_issue_pending(spfi->tx_ch); | |
399 | } | |
400 | ||
deba2580 AB |
401 | return 1; |
402 | ||
403 | stop_dma: | |
404 | dmaengine_terminate_all(spfi->rx_ch); | |
405 | dmaengine_terminate_all(spfi->tx_ch); | |
406 | return -EIO; | |
407 | } | |
408 | ||
409 | static void img_spfi_config(struct spi_master *master, struct spi_device *spi, | |
410 | struct spi_transfer *xfer) | |
411 | { | |
412 | struct img_spfi *spfi = spi_master_get_devdata(spi->master); | |
413 | u32 val, div; | |
414 | ||
415 | /* | |
416 | * output = spfi_clk * (BITCLK / 512), where BITCLK must be a | |
417 | * power of 2 up to 256 (where 255 == 256 since BITCLK is 8 bits) | |
418 | */ | |
419 | div = DIV_ROUND_UP(master->max_speed_hz, xfer->speed_hz); | |
420 | div = clamp(512 / (1 << get_count_order(div)), 1, 255); | |
421 | ||
422 | val = spfi_readl(spfi, SPFI_DEVICE_PARAMETER(spi->chip_select)); | |
423 | val &= ~(SPFI_DEVICE_PARAMETER_BITCLK_MASK << | |
424 | SPFI_DEVICE_PARAMETER_BITCLK_SHIFT); | |
425 | val |= div << SPFI_DEVICE_PARAMETER_BITCLK_SHIFT; | |
426 | spfi_writel(spfi, val, SPFI_DEVICE_PARAMETER(spi->chip_select)); | |
427 | ||
428 | val = spfi_readl(spfi, SPFI_CONTROL); | |
429 | val &= ~(SPFI_CONTROL_SEND_DMA | SPFI_CONTROL_GET_DMA); | |
430 | if (xfer->tx_buf) | |
431 | val |= SPFI_CONTROL_SEND_DMA; | |
432 | if (xfer->rx_buf) | |
433 | val |= SPFI_CONTROL_GET_DMA; | |
434 | val &= ~(SPFI_CONTROL_TMODE_MASK << SPFI_CONTROL_TMODE_SHIFT); | |
435 | if (xfer->tx_nbits == SPI_NBITS_DUAL && | |
436 | xfer->rx_nbits == SPI_NBITS_DUAL) | |
437 | val |= SPFI_CONTROL_TMODE_DUAL << SPFI_CONTROL_TMODE_SHIFT; | |
438 | else if (xfer->tx_nbits == SPI_NBITS_QUAD && | |
439 | xfer->rx_nbits == SPI_NBITS_QUAD) | |
440 | val |= SPFI_CONTROL_TMODE_QUAD << SPFI_CONTROL_TMODE_SHIFT; | |
441 | val &= ~SPFI_CONTROL_CONTINUE; | |
442 | if (!xfer->cs_change && !list_is_last(&xfer->transfer_list, | |
443 | &master->cur_msg->transfers)) | |
444 | val |= SPFI_CONTROL_CONTINUE; | |
445 | spfi_writel(spfi, val, SPFI_CONTROL); | |
446 | ||
447 | val = spfi_readl(spfi, SPFI_PORT_STATE); | |
448 | if (spi->mode & SPI_CPHA) | |
449 | val |= SPFI_PORT_STATE_CK_PHASE(spi->chip_select); | |
450 | else | |
451 | val &= ~SPFI_PORT_STATE_CK_PHASE(spi->chip_select); | |
452 | if (spi->mode & SPI_CPOL) | |
453 | val |= SPFI_PORT_STATE_CK_POL(spi->chip_select); | |
454 | else | |
455 | val &= ~SPFI_PORT_STATE_CK_POL(spi->chip_select); | |
456 | spfi_writel(spfi, val, SPFI_PORT_STATE); | |
457 | ||
458 | spfi_writel(spfi, xfer->len << SPFI_TRANSACTION_TSIZE_SHIFT, | |
459 | SPFI_TRANSACTION); | |
460 | } | |
461 | ||
462 | static int img_spfi_transfer_one(struct spi_master *master, | |
463 | struct spi_device *spi, | |
464 | struct spi_transfer *xfer) | |
465 | { | |
466 | struct img_spfi *spfi = spi_master_get_devdata(spi->master); | |
467 | bool dma_reset = false; | |
468 | unsigned long flags; | |
469 | int ret; | |
470 | ||
471 | /* | |
472 | * Stop all DMA and reset the controller if the previous transaction | |
473 | * timed-out and never completed it's DMA. | |
474 | */ | |
475 | spin_lock_irqsave(&spfi->lock, flags); | |
476 | if (spfi->tx_dma_busy || spfi->rx_dma_busy) { | |
477 | dev_err(spfi->dev, "SPI DMA still busy\n"); | |
478 | dma_reset = true; | |
479 | } | |
480 | spin_unlock_irqrestore(&spfi->lock, flags); | |
481 | ||
482 | if (dma_reset) { | |
483 | dmaengine_terminate_all(spfi->tx_ch); | |
484 | dmaengine_terminate_all(spfi->rx_ch); | |
485 | spfi_reset(spfi); | |
486 | } | |
487 | ||
488 | img_spfi_config(master, spi, xfer); | |
489 | if (master->can_dma && master->can_dma(master, spi, xfer)) | |
490 | ret = img_spfi_start_dma(master, spi, xfer); | |
491 | else | |
492 | ret = img_spfi_start_pio(master, spi, xfer); | |
493 | ||
494 | return ret; | |
495 | } | |
496 | ||
497 | static void img_spfi_set_cs(struct spi_device *spi, bool enable) | |
498 | { | |
499 | struct img_spfi *spfi = spi_master_get_devdata(spi->master); | |
500 | u32 val; | |
501 | ||
502 | val = spfi_readl(spfi, SPFI_PORT_STATE); | |
503 | val &= ~(SPFI_PORT_STATE_DEV_SEL_MASK << SPFI_PORT_STATE_DEV_SEL_SHIFT); | |
504 | val |= spi->chip_select << SPFI_PORT_STATE_DEV_SEL_SHIFT; | |
505 | spfi_writel(spfi, val, SPFI_PORT_STATE); | |
506 | } | |
507 | ||
508 | static bool img_spfi_can_dma(struct spi_master *master, struct spi_device *spi, | |
509 | struct spi_transfer *xfer) | |
510 | { | |
511 | if (xfer->bits_per_word == 8 && xfer->len > SPFI_8BIT_FIFO_SIZE) | |
512 | return true; | |
513 | if (xfer->bits_per_word == 32 && xfer->len > SPFI_32BIT_FIFO_SIZE) | |
514 | return true; | |
515 | return false; | |
516 | } | |
517 | ||
518 | static irqreturn_t img_spfi_irq(int irq, void *dev_id) | |
519 | { | |
520 | struct img_spfi *spfi = (struct img_spfi *)dev_id; | |
521 | u32 status; | |
522 | ||
523 | status = spfi_readl(spfi, SPFI_INTERRUPT_STATUS); | |
524 | if (status & SPFI_INTERRUPT_IACCESS) { | |
525 | spfi_writel(spfi, SPFI_INTERRUPT_IACCESS, SPFI_INTERRUPT_CLEAR); | |
526 | dev_err(spfi->dev, "Illegal access interrupt"); | |
527 | return IRQ_HANDLED; | |
528 | } | |
529 | ||
530 | return IRQ_NONE; | |
531 | } | |
532 | ||
533 | static int img_spfi_probe(struct platform_device *pdev) | |
534 | { | |
535 | struct spi_master *master; | |
536 | struct img_spfi *spfi; | |
537 | struct resource *res; | |
538 | int ret; | |
539 | ||
540 | master = spi_alloc_master(&pdev->dev, sizeof(*spfi)); | |
541 | if (!master) | |
542 | return -ENOMEM; | |
543 | platform_set_drvdata(pdev, master); | |
544 | ||
545 | spfi = spi_master_get_devdata(master); | |
546 | spfi->dev = &pdev->dev; | |
547 | spfi->master = master; | |
548 | spin_lock_init(&spfi->lock); | |
549 | ||
550 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | |
551 | spfi->regs = devm_ioremap_resource(spfi->dev, res); | |
552 | if (IS_ERR(spfi->regs)) { | |
553 | ret = PTR_ERR(spfi->regs); | |
554 | goto put_spi; | |
555 | } | |
556 | spfi->phys = res->start; | |
557 | ||
558 | spfi->irq = platform_get_irq(pdev, 0); | |
559 | if (spfi->irq < 0) { | |
560 | ret = spfi->irq; | |
561 | goto put_spi; | |
562 | } | |
563 | ret = devm_request_irq(spfi->dev, spfi->irq, img_spfi_irq, | |
564 | IRQ_TYPE_LEVEL_HIGH, dev_name(spfi->dev), spfi); | |
565 | if (ret) | |
566 | goto put_spi; | |
567 | ||
568 | spfi->sys_clk = devm_clk_get(spfi->dev, "sys"); | |
569 | if (IS_ERR(spfi->sys_clk)) { | |
570 | ret = PTR_ERR(spfi->sys_clk); | |
571 | goto put_spi; | |
572 | } | |
573 | spfi->spfi_clk = devm_clk_get(spfi->dev, "spfi"); | |
574 | if (IS_ERR(spfi->spfi_clk)) { | |
575 | ret = PTR_ERR(spfi->spfi_clk); | |
576 | goto put_spi; | |
577 | } | |
578 | ||
579 | ret = clk_prepare_enable(spfi->sys_clk); | |
580 | if (ret) | |
581 | goto put_spi; | |
582 | ret = clk_prepare_enable(spfi->spfi_clk); | |
583 | if (ret) | |
584 | goto disable_pclk; | |
585 | ||
586 | spfi_reset(spfi); | |
587 | /* | |
588 | * Only enable the error (IACCESS) interrupt. In PIO mode we'll | |
589 | * poll the status of the FIFOs. | |
590 | */ | |
591 | spfi_writel(spfi, SPFI_INTERRUPT_IACCESS, SPFI_INTERRUPT_ENABLE); | |
592 | ||
593 | master->auto_runtime_pm = true; | |
594 | master->bus_num = pdev->id; | |
595 | master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_TX_DUAL | SPI_RX_DUAL; | |
596 | if (of_property_read_bool(spfi->dev->of_node, "img,supports-quad-mode")) | |
597 | master->mode_bits |= SPI_TX_QUAD | SPI_RX_QUAD; | |
598 | master->num_chipselect = 5; | |
599 | master->dev.of_node = pdev->dev.of_node; | |
600 | master->bits_per_word_mask = SPI_BPW_MASK(32) | SPI_BPW_MASK(8); | |
601 | master->max_speed_hz = clk_get_rate(spfi->spfi_clk); | |
602 | master->min_speed_hz = master->max_speed_hz / 512; | |
603 | ||
604 | master->set_cs = img_spfi_set_cs; | |
605 | master->transfer_one = img_spfi_transfer_one; | |
606 | ||
607 | spfi->tx_ch = dma_request_slave_channel(spfi->dev, "tx"); | |
608 | spfi->rx_ch = dma_request_slave_channel(spfi->dev, "rx"); | |
609 | if (!spfi->tx_ch || !spfi->rx_ch) { | |
610 | if (spfi->tx_ch) | |
611 | dma_release_channel(spfi->tx_ch); | |
612 | if (spfi->rx_ch) | |
613 | dma_release_channel(spfi->rx_ch); | |
614 | dev_warn(spfi->dev, "Failed to get DMA channels, falling back to PIO mode\n"); | |
615 | } else { | |
616 | master->dma_tx = spfi->tx_ch; | |
617 | master->dma_rx = spfi->rx_ch; | |
618 | master->can_dma = img_spfi_can_dma; | |
619 | } | |
620 | ||
621 | pm_runtime_set_active(spfi->dev); | |
622 | pm_runtime_enable(spfi->dev); | |
623 | ||
624 | ret = devm_spi_register_master(spfi->dev, master); | |
625 | if (ret) | |
626 | goto disable_pm; | |
627 | ||
628 | return 0; | |
629 | ||
630 | disable_pm: | |
631 | pm_runtime_disable(spfi->dev); | |
632 | if (spfi->rx_ch) | |
633 | dma_release_channel(spfi->rx_ch); | |
634 | if (spfi->tx_ch) | |
635 | dma_release_channel(spfi->tx_ch); | |
636 | clk_disable_unprepare(spfi->spfi_clk); | |
637 | disable_pclk: | |
638 | clk_disable_unprepare(spfi->sys_clk); | |
639 | put_spi: | |
640 | spi_master_put(master); | |
641 | ||
642 | return ret; | |
643 | } | |
644 | ||
645 | static int img_spfi_remove(struct platform_device *pdev) | |
646 | { | |
647 | struct spi_master *master = platform_get_drvdata(pdev); | |
648 | struct img_spfi *spfi = spi_master_get_devdata(master); | |
649 | ||
650 | if (spfi->tx_ch) | |
651 | dma_release_channel(spfi->tx_ch); | |
652 | if (spfi->rx_ch) | |
653 | dma_release_channel(spfi->rx_ch); | |
654 | ||
655 | pm_runtime_disable(spfi->dev); | |
656 | if (!pm_runtime_status_suspended(spfi->dev)) { | |
657 | clk_disable_unprepare(spfi->spfi_clk); | |
658 | clk_disable_unprepare(spfi->sys_clk); | |
659 | } | |
660 | ||
661 | spi_master_put(master); | |
662 | ||
663 | return 0; | |
664 | } | |
665 | ||
47164fdb | 666 | #ifdef CONFIG_PM |
deba2580 AB |
667 | static int img_spfi_runtime_suspend(struct device *dev) |
668 | { | |
669 | struct spi_master *master = dev_get_drvdata(dev); | |
670 | struct img_spfi *spfi = spi_master_get_devdata(master); | |
671 | ||
672 | clk_disable_unprepare(spfi->spfi_clk); | |
673 | clk_disable_unprepare(spfi->sys_clk); | |
674 | ||
675 | return 0; | |
676 | } | |
677 | ||
678 | static int img_spfi_runtime_resume(struct device *dev) | |
679 | { | |
680 | struct spi_master *master = dev_get_drvdata(dev); | |
681 | struct img_spfi *spfi = spi_master_get_devdata(master); | |
682 | int ret; | |
683 | ||
684 | ret = clk_prepare_enable(spfi->sys_clk); | |
685 | if (ret) | |
686 | return ret; | |
687 | ret = clk_prepare_enable(spfi->spfi_clk); | |
688 | if (ret) { | |
689 | clk_disable_unprepare(spfi->sys_clk); | |
690 | return ret; | |
691 | } | |
692 | ||
693 | return 0; | |
694 | } | |
47164fdb | 695 | #endif /* CONFIG_PM */ |
deba2580 AB |
696 | |
697 | #ifdef CONFIG_PM_SLEEP | |
698 | static int img_spfi_suspend(struct device *dev) | |
699 | { | |
700 | struct spi_master *master = dev_get_drvdata(dev); | |
701 | ||
702 | return spi_master_suspend(master); | |
703 | } | |
704 | ||
705 | static int img_spfi_resume(struct device *dev) | |
706 | { | |
707 | struct spi_master *master = dev_get_drvdata(dev); | |
708 | struct img_spfi *spfi = spi_master_get_devdata(master); | |
709 | int ret; | |
710 | ||
711 | ret = pm_runtime_get_sync(dev); | |
712 | if (ret) | |
713 | return ret; | |
714 | spfi_reset(spfi); | |
715 | pm_runtime_put(dev); | |
716 | ||
717 | return spi_master_resume(master); | |
718 | } | |
719 | #endif /* CONFIG_PM_SLEEP */ | |
720 | ||
721 | static const struct dev_pm_ops img_spfi_pm_ops = { | |
722 | SET_RUNTIME_PM_OPS(img_spfi_runtime_suspend, img_spfi_runtime_resume, | |
723 | NULL) | |
724 | SET_SYSTEM_SLEEP_PM_OPS(img_spfi_suspend, img_spfi_resume) | |
725 | }; | |
726 | ||
727 | static const struct of_device_id img_spfi_of_match[] = { | |
728 | { .compatible = "img,spfi", }, | |
729 | { }, | |
730 | }; | |
731 | MODULE_DEVICE_TABLE(of, img_spfi_of_match); | |
732 | ||
733 | static struct platform_driver img_spfi_driver = { | |
734 | .driver = { | |
735 | .name = "img-spfi", | |
736 | .pm = &img_spfi_pm_ops, | |
737 | .of_match_table = of_match_ptr(img_spfi_of_match), | |
738 | }, | |
739 | .probe = img_spfi_probe, | |
740 | .remove = img_spfi_remove, | |
741 | }; | |
742 | module_platform_driver(img_spfi_driver); | |
743 | ||
744 | MODULE_DESCRIPTION("IMG SPFI controller driver"); | |
745 | MODULE_AUTHOR("Andrew Bresticker <abrestic@chromium.org>"); | |
746 | MODULE_LICENSE("GPL v2"); |