Merge branch 'next' into for-linus
[deliverable/linux.git] / drivers / spi / spi-qup.c
1 /*
2 * Copyright (c) 2008-2014, The Linux foundation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License rev 2 and
6 * only rev 2 as published by the free Software foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or fITNESS fOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14 #include <linux/clk.h>
15 #include <linux/delay.h>
16 #include <linux/err.h>
17 #include <linux/interrupt.h>
18 #include <linux/io.h>
19 #include <linux/list.h>
20 #include <linux/module.h>
21 #include <linux/of.h>
22 #include <linux/platform_device.h>
23 #include <linux/pm_runtime.h>
24 #include <linux/spi/spi.h>
25
26 #define QUP_CONFIG 0x0000
27 #define QUP_STATE 0x0004
28 #define QUP_IO_M_MODES 0x0008
29 #define QUP_SW_RESET 0x000c
30 #define QUP_OPERATIONAL 0x0018
31 #define QUP_ERROR_FLAGS 0x001c
32 #define QUP_ERROR_FLAGS_EN 0x0020
33 #define QUP_OPERATIONAL_MASK 0x0028
34 #define QUP_HW_VERSION 0x0030
35 #define QUP_MX_OUTPUT_CNT 0x0100
36 #define QUP_OUTPUT_FIFO 0x0110
37 #define QUP_MX_WRITE_CNT 0x0150
38 #define QUP_MX_INPUT_CNT 0x0200
39 #define QUP_MX_READ_CNT 0x0208
40 #define QUP_INPUT_FIFO 0x0218
41
42 #define SPI_CONFIG 0x0300
43 #define SPI_IO_CONTROL 0x0304
44 #define SPI_ERROR_FLAGS 0x0308
45 #define SPI_ERROR_FLAGS_EN 0x030c
46
47 /* QUP_CONFIG fields */
48 #define QUP_CONFIG_SPI_MODE (1 << 8)
49 #define QUP_CONFIG_CLOCK_AUTO_GATE BIT(13)
50 #define QUP_CONFIG_NO_INPUT BIT(7)
51 #define QUP_CONFIG_NO_OUTPUT BIT(6)
52 #define QUP_CONFIG_N 0x001f
53
54 /* QUP_STATE fields */
55 #define QUP_STATE_VALID BIT(2)
56 #define QUP_STATE_RESET 0
57 #define QUP_STATE_RUN 1
58 #define QUP_STATE_PAUSE 3
59 #define QUP_STATE_MASK 3
60 #define QUP_STATE_CLEAR 2
61
62 #define QUP_HW_VERSION_2_1_1 0x20010001
63
64 /* QUP_IO_M_MODES fields */
65 #define QUP_IO_M_PACK_EN BIT(15)
66 #define QUP_IO_M_UNPACK_EN BIT(14)
67 #define QUP_IO_M_INPUT_MODE_MASK_SHIFT 12
68 #define QUP_IO_M_OUTPUT_MODE_MASK_SHIFT 10
69 #define QUP_IO_M_INPUT_MODE_MASK (3 << QUP_IO_M_INPUT_MODE_MASK_SHIFT)
70 #define QUP_IO_M_OUTPUT_MODE_MASK (3 << QUP_IO_M_OUTPUT_MODE_MASK_SHIFT)
71
72 #define QUP_IO_M_OUTPUT_BLOCK_SIZE(x) (((x) & (0x03 << 0)) >> 0)
73 #define QUP_IO_M_OUTPUT_FIFO_SIZE(x) (((x) & (0x07 << 2)) >> 2)
74 #define QUP_IO_M_INPUT_BLOCK_SIZE(x) (((x) & (0x03 << 5)) >> 5)
75 #define QUP_IO_M_INPUT_FIFO_SIZE(x) (((x) & (0x07 << 7)) >> 7)
76
77 #define QUP_IO_M_MODE_FIFO 0
78 #define QUP_IO_M_MODE_BLOCK 1
79 #define QUP_IO_M_MODE_DMOV 2
80 #define QUP_IO_M_MODE_BAM 3
81
82 /* QUP_OPERATIONAL fields */
83 #define QUP_OP_MAX_INPUT_DONE_FLAG BIT(11)
84 #define QUP_OP_MAX_OUTPUT_DONE_FLAG BIT(10)
85 #define QUP_OP_IN_SERVICE_FLAG BIT(9)
86 #define QUP_OP_OUT_SERVICE_FLAG BIT(8)
87 #define QUP_OP_IN_FIFO_FULL BIT(7)
88 #define QUP_OP_OUT_FIFO_FULL BIT(6)
89 #define QUP_OP_IN_FIFO_NOT_EMPTY BIT(5)
90 #define QUP_OP_OUT_FIFO_NOT_EMPTY BIT(4)
91
92 /* QUP_ERROR_FLAGS and QUP_ERROR_FLAGS_EN fields */
93 #define QUP_ERROR_OUTPUT_OVER_RUN BIT(5)
94 #define QUP_ERROR_INPUT_UNDER_RUN BIT(4)
95 #define QUP_ERROR_OUTPUT_UNDER_RUN BIT(3)
96 #define QUP_ERROR_INPUT_OVER_RUN BIT(2)
97
98 /* SPI_CONFIG fields */
99 #define SPI_CONFIG_HS_MODE BIT(10)
100 #define SPI_CONFIG_INPUT_FIRST BIT(9)
101 #define SPI_CONFIG_LOOPBACK BIT(8)
102
103 /* SPI_IO_CONTROL fields */
104 #define SPI_IO_C_FORCE_CS BIT(11)
105 #define SPI_IO_C_CLK_IDLE_HIGH BIT(10)
106 #define SPI_IO_C_MX_CS_MODE BIT(8)
107 #define SPI_IO_C_CS_N_POLARITY_0 BIT(4)
108 #define SPI_IO_C_CS_SELECT(x) (((x) & 3) << 2)
109 #define SPI_IO_C_CS_SELECT_MASK 0x000c
110 #define SPI_IO_C_TRISTATE_CS BIT(1)
111 #define SPI_IO_C_NO_TRI_STATE BIT(0)
112
113 /* SPI_ERROR_FLAGS and SPI_ERROR_FLAGS_EN fields */
114 #define SPI_ERROR_CLK_OVER_RUN BIT(1)
115 #define SPI_ERROR_CLK_UNDER_RUN BIT(0)
116
117 #define SPI_NUM_CHIPSELECTS 4
118
119 /* high speed mode is when bus rate is greater then 26MHz */
120 #define SPI_HS_MIN_RATE 26000000
121 #define SPI_MAX_RATE 50000000
122
123 #define SPI_DELAY_THRESHOLD 1
124 #define SPI_DELAY_RETRY 10
125
126 struct spi_qup {
127 void __iomem *base;
128 struct device *dev;
129 struct clk *cclk; /* core clock */
130 struct clk *iclk; /* interface clock */
131 int irq;
132 spinlock_t lock;
133
134 int in_fifo_sz;
135 int out_fifo_sz;
136 int in_blk_sz;
137 int out_blk_sz;
138
139 struct spi_transfer *xfer;
140 struct completion done;
141 int error;
142 int w_size; /* bytes per SPI word */
143 int tx_bytes;
144 int rx_bytes;
145 };
146
147
148 static inline bool spi_qup_is_valid_state(struct spi_qup *controller)
149 {
150 u32 opstate = readl_relaxed(controller->base + QUP_STATE);
151
152 return opstate & QUP_STATE_VALID;
153 }
154
155 static int spi_qup_set_state(struct spi_qup *controller, u32 state)
156 {
157 unsigned long loop;
158 u32 cur_state;
159
160 loop = 0;
161 while (!spi_qup_is_valid_state(controller)) {
162
163 usleep_range(SPI_DELAY_THRESHOLD, SPI_DELAY_THRESHOLD * 2);
164
165 if (++loop > SPI_DELAY_RETRY)
166 return -EIO;
167 }
168
169 if (loop)
170 dev_dbg(controller->dev, "invalid state for %ld,us %d\n",
171 loop, state);
172
173 cur_state = readl_relaxed(controller->base + QUP_STATE);
174 /*
175 * Per spec: for PAUSE_STATE to RESET_STATE, two writes
176 * of (b10) are required
177 */
178 if (((cur_state & QUP_STATE_MASK) == QUP_STATE_PAUSE) &&
179 (state == QUP_STATE_RESET)) {
180 writel_relaxed(QUP_STATE_CLEAR, controller->base + QUP_STATE);
181 writel_relaxed(QUP_STATE_CLEAR, controller->base + QUP_STATE);
182 } else {
183 cur_state &= ~QUP_STATE_MASK;
184 cur_state |= state;
185 writel_relaxed(cur_state, controller->base + QUP_STATE);
186 }
187
188 loop = 0;
189 while (!spi_qup_is_valid_state(controller)) {
190
191 usleep_range(SPI_DELAY_THRESHOLD, SPI_DELAY_THRESHOLD * 2);
192
193 if (++loop > SPI_DELAY_RETRY)
194 return -EIO;
195 }
196
197 return 0;
198 }
199
200
201 static void spi_qup_fifo_read(struct spi_qup *controller,
202 struct spi_transfer *xfer)
203 {
204 u8 *rx_buf = xfer->rx_buf;
205 u32 word, state;
206 int idx, shift, w_size;
207
208 w_size = controller->w_size;
209
210 while (controller->rx_bytes < xfer->len) {
211
212 state = readl_relaxed(controller->base + QUP_OPERATIONAL);
213 if (0 == (state & QUP_OP_IN_FIFO_NOT_EMPTY))
214 break;
215
216 word = readl_relaxed(controller->base + QUP_INPUT_FIFO);
217
218 if (!rx_buf) {
219 controller->rx_bytes += w_size;
220 continue;
221 }
222
223 for (idx = 0; idx < w_size; idx++, controller->rx_bytes++) {
224 /*
225 * The data format depends on bytes per SPI word:
226 * 4 bytes: 0x12345678
227 * 2 bytes: 0x00001234
228 * 1 byte : 0x00000012
229 */
230 shift = BITS_PER_BYTE;
231 shift *= (w_size - idx - 1);
232 rx_buf[controller->rx_bytes] = word >> shift;
233 }
234 }
235 }
236
237 static void spi_qup_fifo_write(struct spi_qup *controller,
238 struct spi_transfer *xfer)
239 {
240 const u8 *tx_buf = xfer->tx_buf;
241 u32 word, state, data;
242 int idx, w_size;
243
244 w_size = controller->w_size;
245
246 while (controller->tx_bytes < xfer->len) {
247
248 state = readl_relaxed(controller->base + QUP_OPERATIONAL);
249 if (state & QUP_OP_OUT_FIFO_FULL)
250 break;
251
252 word = 0;
253 for (idx = 0; idx < w_size; idx++, controller->tx_bytes++) {
254
255 if (!tx_buf) {
256 controller->tx_bytes += w_size;
257 break;
258 }
259
260 data = tx_buf[controller->tx_bytes];
261 word |= data << (BITS_PER_BYTE * (3 - idx));
262 }
263
264 writel_relaxed(word, controller->base + QUP_OUTPUT_FIFO);
265 }
266 }
267
268 static irqreturn_t spi_qup_qup_irq(int irq, void *dev_id)
269 {
270 struct spi_qup *controller = dev_id;
271 struct spi_transfer *xfer;
272 u32 opflags, qup_err, spi_err;
273 unsigned long flags;
274 int error = 0;
275
276 spin_lock_irqsave(&controller->lock, flags);
277 xfer = controller->xfer;
278 controller->xfer = NULL;
279 spin_unlock_irqrestore(&controller->lock, flags);
280
281 qup_err = readl_relaxed(controller->base + QUP_ERROR_FLAGS);
282 spi_err = readl_relaxed(controller->base + SPI_ERROR_FLAGS);
283 opflags = readl_relaxed(controller->base + QUP_OPERATIONAL);
284
285 writel_relaxed(qup_err, controller->base + QUP_ERROR_FLAGS);
286 writel_relaxed(spi_err, controller->base + SPI_ERROR_FLAGS);
287 writel_relaxed(opflags, controller->base + QUP_OPERATIONAL);
288
289 if (!xfer) {
290 dev_err_ratelimited(controller->dev, "unexpected irq %08x %08x %08x\n",
291 qup_err, spi_err, opflags);
292 return IRQ_HANDLED;
293 }
294
295 if (qup_err) {
296 if (qup_err & QUP_ERROR_OUTPUT_OVER_RUN)
297 dev_warn(controller->dev, "OUTPUT_OVER_RUN\n");
298 if (qup_err & QUP_ERROR_INPUT_UNDER_RUN)
299 dev_warn(controller->dev, "INPUT_UNDER_RUN\n");
300 if (qup_err & QUP_ERROR_OUTPUT_UNDER_RUN)
301 dev_warn(controller->dev, "OUTPUT_UNDER_RUN\n");
302 if (qup_err & QUP_ERROR_INPUT_OVER_RUN)
303 dev_warn(controller->dev, "INPUT_OVER_RUN\n");
304
305 error = -EIO;
306 }
307
308 if (spi_err) {
309 if (spi_err & SPI_ERROR_CLK_OVER_RUN)
310 dev_warn(controller->dev, "CLK_OVER_RUN\n");
311 if (spi_err & SPI_ERROR_CLK_UNDER_RUN)
312 dev_warn(controller->dev, "CLK_UNDER_RUN\n");
313
314 error = -EIO;
315 }
316
317 if (opflags & QUP_OP_IN_SERVICE_FLAG)
318 spi_qup_fifo_read(controller, xfer);
319
320 if (opflags & QUP_OP_OUT_SERVICE_FLAG)
321 spi_qup_fifo_write(controller, xfer);
322
323 spin_lock_irqsave(&controller->lock, flags);
324 controller->error = error;
325 controller->xfer = xfer;
326 spin_unlock_irqrestore(&controller->lock, flags);
327
328 if (controller->rx_bytes == xfer->len || error)
329 complete(&controller->done);
330
331 return IRQ_HANDLED;
332 }
333
334
335 /* set clock freq ... bits per word */
336 static int spi_qup_io_config(struct spi_device *spi, struct spi_transfer *xfer)
337 {
338 struct spi_qup *controller = spi_master_get_devdata(spi->master);
339 u32 config, iomode, mode;
340 int ret, n_words, w_size;
341
342 if (spi->mode & SPI_LOOP && xfer->len > controller->in_fifo_sz) {
343 dev_err(controller->dev, "too big size for loopback %d > %d\n",
344 xfer->len, controller->in_fifo_sz);
345 return -EIO;
346 }
347
348 ret = clk_set_rate(controller->cclk, xfer->speed_hz);
349 if (ret) {
350 dev_err(controller->dev, "fail to set frequency %d",
351 xfer->speed_hz);
352 return -EIO;
353 }
354
355 if (spi_qup_set_state(controller, QUP_STATE_RESET)) {
356 dev_err(controller->dev, "cannot set RESET state\n");
357 return -EIO;
358 }
359
360 w_size = 4;
361 if (xfer->bits_per_word <= 8)
362 w_size = 1;
363 else if (xfer->bits_per_word <= 16)
364 w_size = 2;
365
366 n_words = xfer->len / w_size;
367 controller->w_size = w_size;
368
369 if (n_words <= (controller->in_fifo_sz / sizeof(u32))) {
370 mode = QUP_IO_M_MODE_FIFO;
371 writel_relaxed(n_words, controller->base + QUP_MX_READ_CNT);
372 writel_relaxed(n_words, controller->base + QUP_MX_WRITE_CNT);
373 /* must be zero for FIFO */
374 writel_relaxed(0, controller->base + QUP_MX_INPUT_CNT);
375 writel_relaxed(0, controller->base + QUP_MX_OUTPUT_CNT);
376 } else {
377 mode = QUP_IO_M_MODE_BLOCK;
378 writel_relaxed(n_words, controller->base + QUP_MX_INPUT_CNT);
379 writel_relaxed(n_words, controller->base + QUP_MX_OUTPUT_CNT);
380 /* must be zero for BLOCK and BAM */
381 writel_relaxed(0, controller->base + QUP_MX_READ_CNT);
382 writel_relaxed(0, controller->base + QUP_MX_WRITE_CNT);
383 }
384
385 iomode = readl_relaxed(controller->base + QUP_IO_M_MODES);
386 /* Set input and output transfer mode */
387 iomode &= ~(QUP_IO_M_INPUT_MODE_MASK | QUP_IO_M_OUTPUT_MODE_MASK);
388 iomode &= ~(QUP_IO_M_PACK_EN | QUP_IO_M_UNPACK_EN);
389 iomode |= (mode << QUP_IO_M_OUTPUT_MODE_MASK_SHIFT);
390 iomode |= (mode << QUP_IO_M_INPUT_MODE_MASK_SHIFT);
391
392 writel_relaxed(iomode, controller->base + QUP_IO_M_MODES);
393
394 config = readl_relaxed(controller->base + SPI_CONFIG);
395
396 if (spi->mode & SPI_LOOP)
397 config |= SPI_CONFIG_LOOPBACK;
398 else
399 config &= ~SPI_CONFIG_LOOPBACK;
400
401 if (spi->mode & SPI_CPHA)
402 config &= ~SPI_CONFIG_INPUT_FIRST;
403 else
404 config |= SPI_CONFIG_INPUT_FIRST;
405
406 /*
407 * HS_MODE improves signal stability for spi-clk high rates,
408 * but is invalid in loop back mode.
409 */
410 if ((xfer->speed_hz >= SPI_HS_MIN_RATE) && !(spi->mode & SPI_LOOP))
411 config |= SPI_CONFIG_HS_MODE;
412 else
413 config &= ~SPI_CONFIG_HS_MODE;
414
415 writel_relaxed(config, controller->base + SPI_CONFIG);
416
417 config = readl_relaxed(controller->base + QUP_CONFIG);
418 config &= ~(QUP_CONFIG_NO_INPUT | QUP_CONFIG_NO_OUTPUT | QUP_CONFIG_N);
419 config |= xfer->bits_per_word - 1;
420 config |= QUP_CONFIG_SPI_MODE;
421 writel_relaxed(config, controller->base + QUP_CONFIG);
422
423 writel_relaxed(0, controller->base + QUP_OPERATIONAL_MASK);
424 return 0;
425 }
426
427 static int spi_qup_transfer_one(struct spi_master *master,
428 struct spi_device *spi,
429 struct spi_transfer *xfer)
430 {
431 struct spi_qup *controller = spi_master_get_devdata(master);
432 unsigned long timeout, flags;
433 int ret = -EIO;
434
435 ret = spi_qup_io_config(spi, xfer);
436 if (ret)
437 return ret;
438
439 timeout = DIV_ROUND_UP(xfer->speed_hz, MSEC_PER_SEC);
440 timeout = DIV_ROUND_UP(xfer->len * 8, timeout);
441 timeout = 100 * msecs_to_jiffies(timeout);
442
443 reinit_completion(&controller->done);
444
445 spin_lock_irqsave(&controller->lock, flags);
446 controller->xfer = xfer;
447 controller->error = 0;
448 controller->rx_bytes = 0;
449 controller->tx_bytes = 0;
450 spin_unlock_irqrestore(&controller->lock, flags);
451
452 if (spi_qup_set_state(controller, QUP_STATE_RUN)) {
453 dev_warn(controller->dev, "cannot set RUN state\n");
454 goto exit;
455 }
456
457 if (spi_qup_set_state(controller, QUP_STATE_PAUSE)) {
458 dev_warn(controller->dev, "cannot set PAUSE state\n");
459 goto exit;
460 }
461
462 spi_qup_fifo_write(controller, xfer);
463
464 if (spi_qup_set_state(controller, QUP_STATE_RUN)) {
465 dev_warn(controller->dev, "cannot set EXECUTE state\n");
466 goto exit;
467 }
468
469 if (!wait_for_completion_timeout(&controller->done, timeout))
470 ret = -ETIMEDOUT;
471 exit:
472 spi_qup_set_state(controller, QUP_STATE_RESET);
473 spin_lock_irqsave(&controller->lock, flags);
474 controller->xfer = NULL;
475 if (!ret)
476 ret = controller->error;
477 spin_unlock_irqrestore(&controller->lock, flags);
478 return ret;
479 }
480
481 static int spi_qup_probe(struct platform_device *pdev)
482 {
483 struct spi_master *master;
484 struct clk *iclk, *cclk;
485 struct spi_qup *controller;
486 struct resource *res;
487 struct device *dev;
488 void __iomem *base;
489 u32 data, max_freq, iomode;
490 int ret, irq, size;
491
492 dev = &pdev->dev;
493 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
494 base = devm_ioremap_resource(dev, res);
495 if (IS_ERR(base))
496 return PTR_ERR(base);
497
498 irq = platform_get_irq(pdev, 0);
499 if (irq < 0)
500 return irq;
501
502 cclk = devm_clk_get(dev, "core");
503 if (IS_ERR(cclk))
504 return PTR_ERR(cclk);
505
506 iclk = devm_clk_get(dev, "iface");
507 if (IS_ERR(iclk))
508 return PTR_ERR(iclk);
509
510 /* This is optional parameter */
511 if (of_property_read_u32(dev->of_node, "spi-max-frequency", &max_freq))
512 max_freq = SPI_MAX_RATE;
513
514 if (!max_freq || max_freq > SPI_MAX_RATE) {
515 dev_err(dev, "invalid clock frequency %d\n", max_freq);
516 return -ENXIO;
517 }
518
519 ret = clk_prepare_enable(cclk);
520 if (ret) {
521 dev_err(dev, "cannot enable core clock\n");
522 return ret;
523 }
524
525 ret = clk_prepare_enable(iclk);
526 if (ret) {
527 clk_disable_unprepare(cclk);
528 dev_err(dev, "cannot enable iface clock\n");
529 return ret;
530 }
531
532 data = readl_relaxed(base + QUP_HW_VERSION);
533
534 if (data < QUP_HW_VERSION_2_1_1) {
535 clk_disable_unprepare(cclk);
536 clk_disable_unprepare(iclk);
537 dev_err(dev, "v.%08x is not supported\n", data);
538 return -ENXIO;
539 }
540
541 master = spi_alloc_master(dev, sizeof(struct spi_qup));
542 if (!master) {
543 clk_disable_unprepare(cclk);
544 clk_disable_unprepare(iclk);
545 dev_err(dev, "cannot allocate master\n");
546 return -ENOMEM;
547 }
548
549 /* use num-cs unless not present or out of range */
550 if (of_property_read_u16(dev->of_node, "num-cs",
551 &master->num_chipselect) ||
552 (master->num_chipselect > SPI_NUM_CHIPSELECTS))
553 master->num_chipselect = SPI_NUM_CHIPSELECTS;
554
555 master->bus_num = pdev->id;
556 master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LOOP;
557 master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
558 master->max_speed_hz = max_freq;
559 master->transfer_one = spi_qup_transfer_one;
560 master->dev.of_node = pdev->dev.of_node;
561 master->auto_runtime_pm = true;
562
563 platform_set_drvdata(pdev, master);
564
565 controller = spi_master_get_devdata(master);
566
567 controller->dev = dev;
568 controller->base = base;
569 controller->iclk = iclk;
570 controller->cclk = cclk;
571 controller->irq = irq;
572
573 spin_lock_init(&controller->lock);
574 init_completion(&controller->done);
575
576 iomode = readl_relaxed(base + QUP_IO_M_MODES);
577
578 size = QUP_IO_M_OUTPUT_BLOCK_SIZE(iomode);
579 if (size)
580 controller->out_blk_sz = size * 16;
581 else
582 controller->out_blk_sz = 4;
583
584 size = QUP_IO_M_INPUT_BLOCK_SIZE(iomode);
585 if (size)
586 controller->in_blk_sz = size * 16;
587 else
588 controller->in_blk_sz = 4;
589
590 size = QUP_IO_M_OUTPUT_FIFO_SIZE(iomode);
591 controller->out_fifo_sz = controller->out_blk_sz * (2 << size);
592
593 size = QUP_IO_M_INPUT_FIFO_SIZE(iomode);
594 controller->in_fifo_sz = controller->in_blk_sz * (2 << size);
595
596 dev_info(dev, "v.%08x IN:block:%d, fifo:%d, OUT:block:%d, fifo:%d\n",
597 data, controller->in_blk_sz, controller->in_fifo_sz,
598 controller->out_blk_sz, controller->out_fifo_sz);
599
600 writel_relaxed(1, base + QUP_SW_RESET);
601
602 ret = spi_qup_set_state(controller, QUP_STATE_RESET);
603 if (ret) {
604 dev_err(dev, "cannot set RESET state\n");
605 goto error;
606 }
607
608 writel_relaxed(0, base + QUP_OPERATIONAL);
609 writel_relaxed(0, base + QUP_IO_M_MODES);
610 writel_relaxed(0, base + QUP_OPERATIONAL_MASK);
611 writel_relaxed(SPI_ERROR_CLK_UNDER_RUN | SPI_ERROR_CLK_OVER_RUN,
612 base + SPI_ERROR_FLAGS_EN);
613
614 writel_relaxed(0, base + SPI_CONFIG);
615 writel_relaxed(SPI_IO_C_NO_TRI_STATE, base + SPI_IO_CONTROL);
616
617 ret = devm_request_irq(dev, irq, spi_qup_qup_irq,
618 IRQF_TRIGGER_HIGH, pdev->name, controller);
619 if (ret)
620 goto error;
621
622 pm_runtime_set_autosuspend_delay(dev, MSEC_PER_SEC);
623 pm_runtime_use_autosuspend(dev);
624 pm_runtime_set_active(dev);
625 pm_runtime_enable(dev);
626
627 ret = devm_spi_register_master(dev, master);
628 if (ret)
629 goto disable_pm;
630
631 return 0;
632
633 disable_pm:
634 pm_runtime_disable(&pdev->dev);
635 error:
636 clk_disable_unprepare(cclk);
637 clk_disable_unprepare(iclk);
638 spi_master_put(master);
639 return ret;
640 }
641
642 #ifdef CONFIG_PM_RUNTIME
643 static int spi_qup_pm_suspend_runtime(struct device *device)
644 {
645 struct spi_master *master = dev_get_drvdata(device);
646 struct spi_qup *controller = spi_master_get_devdata(master);
647 u32 config;
648
649 /* Enable clocks auto gaiting */
650 config = readl(controller->base + QUP_CONFIG);
651 config |= QUP_CONFIG_CLOCK_AUTO_GATE;
652 writel_relaxed(config, controller->base + QUP_CONFIG);
653 return 0;
654 }
655
656 static int spi_qup_pm_resume_runtime(struct device *device)
657 {
658 struct spi_master *master = dev_get_drvdata(device);
659 struct spi_qup *controller = spi_master_get_devdata(master);
660 u32 config;
661
662 /* Disable clocks auto gaiting */
663 config = readl_relaxed(controller->base + QUP_CONFIG);
664 config &= ~QUP_CONFIG_CLOCK_AUTO_GATE;
665 writel_relaxed(config, controller->base + QUP_CONFIG);
666 return 0;
667 }
668 #endif /* CONFIG_PM_RUNTIME */
669
670 #ifdef CONFIG_PM_SLEEP
671 static int spi_qup_suspend(struct device *device)
672 {
673 struct spi_master *master = dev_get_drvdata(device);
674 struct spi_qup *controller = spi_master_get_devdata(master);
675 int ret;
676
677 ret = spi_master_suspend(master);
678 if (ret)
679 return ret;
680
681 ret = spi_qup_set_state(controller, QUP_STATE_RESET);
682 if (ret)
683 return ret;
684
685 clk_disable_unprepare(controller->cclk);
686 clk_disable_unprepare(controller->iclk);
687 return 0;
688 }
689
690 static int spi_qup_resume(struct device *device)
691 {
692 struct spi_master *master = dev_get_drvdata(device);
693 struct spi_qup *controller = spi_master_get_devdata(master);
694 int ret;
695
696 ret = clk_prepare_enable(controller->iclk);
697 if (ret)
698 return ret;
699
700 ret = clk_prepare_enable(controller->cclk);
701 if (ret)
702 return ret;
703
704 ret = spi_qup_set_state(controller, QUP_STATE_RESET);
705 if (ret)
706 return ret;
707
708 return spi_master_resume(master);
709 }
710 #endif /* CONFIG_PM_SLEEP */
711
712 static int spi_qup_remove(struct platform_device *pdev)
713 {
714 struct spi_master *master = dev_get_drvdata(&pdev->dev);
715 struct spi_qup *controller = spi_master_get_devdata(master);
716 int ret;
717
718 ret = pm_runtime_get_sync(&pdev->dev);
719 if (ret < 0)
720 return ret;
721
722 ret = spi_qup_set_state(controller, QUP_STATE_RESET);
723 if (ret)
724 return ret;
725
726 clk_disable_unprepare(controller->cclk);
727 clk_disable_unprepare(controller->iclk);
728
729 pm_runtime_put_noidle(&pdev->dev);
730 pm_runtime_disable(&pdev->dev);
731 return 0;
732 }
733
734 static const struct of_device_id spi_qup_dt_match[] = {
735 { .compatible = "qcom,spi-qup-v2.1.1", },
736 { .compatible = "qcom,spi-qup-v2.2.1", },
737 { }
738 };
739 MODULE_DEVICE_TABLE(of, spi_qup_dt_match);
740
741 static const struct dev_pm_ops spi_qup_dev_pm_ops = {
742 SET_SYSTEM_SLEEP_PM_OPS(spi_qup_suspend, spi_qup_resume)
743 SET_RUNTIME_PM_OPS(spi_qup_pm_suspend_runtime,
744 spi_qup_pm_resume_runtime,
745 NULL)
746 };
747
748 static struct platform_driver spi_qup_driver = {
749 .driver = {
750 .name = "spi_qup",
751 .owner = THIS_MODULE,
752 .pm = &spi_qup_dev_pm_ops,
753 .of_match_table = spi_qup_dt_match,
754 },
755 .probe = spi_qup_probe,
756 .remove = spi_qup_remove,
757 };
758 module_platform_driver(spi_qup_driver);
759
760 MODULE_LICENSE("GPL v2");
761 MODULE_ALIAS("platform:spi_qup");
This page took 0.049214 seconds and 5 git commands to generate.