1 /* OMAP SSI port driver.
3 * Copyright (C) 2010 Nokia Corporation. All rights reserved.
4 * Copyright (C) 2014 Sebastian Reichel <sre@kernel.org>
6 * Contact: Carlos Chinea <carlos.chinea@nokia.com>
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * version 2 as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
23 #include <linux/platform_device.h>
24 #include <linux/dma-mapping.h>
25 #include <linux/pm_runtime.h>
26 #include <linux/delay.h>
28 #include <linux/gpio/consumer.h>
29 #include <linux/pinctrl/consumer.h>
30 #include <linux/debugfs.h>
32 #include "omap_ssi_regs.h"
35 static inline int hsi_dummy_msg(struct hsi_msg
*msg __maybe_unused
)
40 static inline int hsi_dummy_cl(struct hsi_client
*cl __maybe_unused
)
45 static inline unsigned int ssi_wakein(struct hsi_port
*port
)
47 struct omap_ssi_port
*omap_port
= hsi_port_drvdata(port
);
48 return gpiod_get_value(omap_port
->wake_gpio
);
51 #ifdef CONFIG_DEBUG_FS
52 static void ssi_debug_remove_port(struct hsi_port
*port
)
54 struct omap_ssi_port
*omap_port
= hsi_port_drvdata(port
);
56 debugfs_remove_recursive(omap_port
->dir
);
59 static int ssi_debug_port_show(struct seq_file
*m
, void *p __maybe_unused
)
61 struct hsi_port
*port
= m
->private;
62 struct omap_ssi_port
*omap_port
= hsi_port_drvdata(port
);
63 struct hsi_controller
*ssi
= to_hsi_controller(port
->device
.parent
);
64 struct omap_ssi_controller
*omap_ssi
= hsi_controller_drvdata(ssi
);
65 void __iomem
*base
= omap_ssi
->sys
;
68 pm_runtime_get_sync(omap_port
->pdev
);
69 if (omap_port
->wake_irq
> 0)
70 seq_printf(m
, "CAWAKE\t\t: %d\n", ssi_wakein(port
));
71 seq_printf(m
, "WAKE\t\t: 0x%08x\n",
72 readl(base
+ SSI_WAKE_REG(port
->num
)));
73 seq_printf(m
, "MPU_ENABLE_IRQ%d\t: 0x%08x\n", 0,
74 readl(base
+ SSI_MPU_ENABLE_REG(port
->num
, 0)));
75 seq_printf(m
, "MPU_STATUS_IRQ%d\t: 0x%08x\n", 0,
76 readl(base
+ SSI_MPU_STATUS_REG(port
->num
, 0)));
78 base
= omap_port
->sst_base
;
79 seq_puts(m
, "\nSST\n===\n");
80 seq_printf(m
, "ID SST\t\t: 0x%08x\n",
81 readl(base
+ SSI_SST_ID_REG
));
82 seq_printf(m
, "MODE\t\t: 0x%08x\n",
83 readl(base
+ SSI_SST_MODE_REG
));
84 seq_printf(m
, "FRAMESIZE\t: 0x%08x\n",
85 readl(base
+ SSI_SST_FRAMESIZE_REG
));
86 seq_printf(m
, "DIVISOR\t\t: 0x%08x\n",
87 readl(base
+ SSI_SST_DIVISOR_REG
));
88 seq_printf(m
, "CHANNELS\t: 0x%08x\n",
89 readl(base
+ SSI_SST_CHANNELS_REG
));
90 seq_printf(m
, "ARBMODE\t\t: 0x%08x\n",
91 readl(base
+ SSI_SST_ARBMODE_REG
));
92 seq_printf(m
, "TXSTATE\t\t: 0x%08x\n",
93 readl(base
+ SSI_SST_TXSTATE_REG
));
94 seq_printf(m
, "BUFSTATE\t: 0x%08x\n",
95 readl(base
+ SSI_SST_BUFSTATE_REG
));
96 seq_printf(m
, "BREAK\t\t: 0x%08x\n",
97 readl(base
+ SSI_SST_BREAK_REG
));
98 for (ch
= 0; ch
< omap_port
->channels
; ch
++) {
99 seq_printf(m
, "BUFFER_CH%d\t: 0x%08x\n", ch
,
100 readl(base
+ SSI_SST_BUFFER_CH_REG(ch
)));
103 base
= omap_port
->ssr_base
;
104 seq_puts(m
, "\nSSR\n===\n");
105 seq_printf(m
, "ID SSR\t\t: 0x%08x\n",
106 readl(base
+ SSI_SSR_ID_REG
));
107 seq_printf(m
, "MODE\t\t: 0x%08x\n",
108 readl(base
+ SSI_SSR_MODE_REG
));
109 seq_printf(m
, "FRAMESIZE\t: 0x%08x\n",
110 readl(base
+ SSI_SSR_FRAMESIZE_REG
));
111 seq_printf(m
, "CHANNELS\t: 0x%08x\n",
112 readl(base
+ SSI_SSR_CHANNELS_REG
));
113 seq_printf(m
, "TIMEOUT\t\t: 0x%08x\n",
114 readl(base
+ SSI_SSR_TIMEOUT_REG
));
115 seq_printf(m
, "RXSTATE\t\t: 0x%08x\n",
116 readl(base
+ SSI_SSR_RXSTATE_REG
));
117 seq_printf(m
, "BUFSTATE\t: 0x%08x\n",
118 readl(base
+ SSI_SSR_BUFSTATE_REG
));
119 seq_printf(m
, "BREAK\t\t: 0x%08x\n",
120 readl(base
+ SSI_SSR_BREAK_REG
));
121 seq_printf(m
, "ERROR\t\t: 0x%08x\n",
122 readl(base
+ SSI_SSR_ERROR_REG
));
123 seq_printf(m
, "ERRORACK\t: 0x%08x\n",
124 readl(base
+ SSI_SSR_ERRORACK_REG
));
125 for (ch
= 0; ch
< omap_port
->channels
; ch
++) {
126 seq_printf(m
, "BUFFER_CH%d\t: 0x%08x\n", ch
,
127 readl(base
+ SSI_SSR_BUFFER_CH_REG(ch
)));
129 pm_runtime_put_sync(omap_port
->pdev
);
134 static int ssi_port_regs_open(struct inode
*inode
, struct file
*file
)
136 return single_open(file
, ssi_debug_port_show
, inode
->i_private
);
139 static const struct file_operations ssi_port_regs_fops
= {
140 .open
= ssi_port_regs_open
,
143 .release
= single_release
,
146 static int ssi_div_get(void *data
, u64
*val
)
148 struct hsi_port
*port
= data
;
149 struct omap_ssi_port
*omap_port
= hsi_port_drvdata(port
);
151 pm_runtime_get_sync(omap_port
->pdev
);
152 *val
= readl(omap_port
->sst_base
+ SSI_SST_DIVISOR_REG
);
153 pm_runtime_put_sync(omap_port
->pdev
);
158 static int ssi_div_set(void *data
, u64 val
)
160 struct hsi_port
*port
= data
;
161 struct omap_ssi_port
*omap_port
= hsi_port_drvdata(port
);
166 pm_runtime_get_sync(omap_port
->pdev
);
167 writel(val
, omap_port
->sst_base
+ SSI_SST_DIVISOR_REG
);
168 omap_port
->sst
.divisor
= val
;
169 pm_runtime_put_sync(omap_port
->pdev
);
174 DEFINE_SIMPLE_ATTRIBUTE(ssi_sst_div_fops
, ssi_div_get
, ssi_div_set
, "%llu\n");
176 static int ssi_debug_add_port(struct omap_ssi_port
*omap_port
,
179 struct hsi_port
*port
= to_hsi_port(omap_port
->dev
);
181 dir
= debugfs_create_dir(dev_name(omap_port
->dev
), dir
);
184 omap_port
->dir
= dir
;
185 debugfs_create_file("regs", S_IRUGO
, dir
, port
, &ssi_port_regs_fops
);
186 dir
= debugfs_create_dir("sst", dir
);
189 debugfs_create_file("divisor", S_IRUGO
| S_IWUSR
, dir
, port
,
196 static int ssi_claim_lch(struct hsi_msg
*msg
)
199 struct hsi_port
*port
= hsi_get_port(msg
->cl
);
200 struct hsi_controller
*ssi
= to_hsi_controller(port
->device
.parent
);
201 struct omap_ssi_controller
*omap_ssi
= hsi_controller_drvdata(ssi
);
204 for (lch
= 0; lch
< SSI_MAX_GDD_LCH
; lch
++)
205 if (!omap_ssi
->gdd_trn
[lch
].msg
) {
206 omap_ssi
->gdd_trn
[lch
].msg
= msg
;
207 omap_ssi
->gdd_trn
[lch
].sg
= msg
->sgt
.sgl
;
214 static int ssi_start_dma(struct hsi_msg
*msg
, int lch
)
216 struct hsi_port
*port
= hsi_get_port(msg
->cl
);
217 struct omap_ssi_port
*omap_port
= hsi_port_drvdata(port
);
218 struct hsi_controller
*ssi
= to_hsi_controller(port
->device
.parent
);
219 struct omap_ssi_controller
*omap_ssi
= hsi_controller_drvdata(ssi
);
220 void __iomem
*gdd
= omap_ssi
->gdd
;
228 if (msg
->ttype
== HSI_MSG_READ
) {
229 err
= dma_map_sg(&ssi
->device
, msg
->sgt
.sgl
, msg
->sgt
.nents
,
232 dev_dbg(&ssi
->device
, "DMA map SG failed !\n");
235 csdp
= SSI_DST_BURST_4x32_BIT
| SSI_DST_MEMORY_PORT
|
236 SSI_SRC_SINGLE_ACCESS0
| SSI_SRC_PERIPHERAL_PORT
|
238 ccr
= msg
->channel
+ 0x10 + (port
->num
* 8); /* Sync */
239 ccr
|= SSI_DST_AMODE_POSTINC
| SSI_SRC_AMODE_CONST
|
241 s_addr
= omap_port
->ssr_dma
+
242 SSI_SSR_BUFFER_CH_REG(msg
->channel
);
243 d_addr
= sg_dma_address(msg
->sgt
.sgl
);
245 err
= dma_map_sg(&ssi
->device
, msg
->sgt
.sgl
, msg
->sgt
.nents
,
248 dev_dbg(&ssi
->device
, "DMA map SG failed !\n");
251 csdp
= SSI_SRC_BURST_4x32_BIT
| SSI_SRC_MEMORY_PORT
|
252 SSI_DST_SINGLE_ACCESS0
| SSI_DST_PERIPHERAL_PORT
|
254 ccr
= (msg
->channel
+ 1 + (port
->num
* 8)) & 0xf; /* Sync */
255 ccr
|= SSI_SRC_AMODE_POSTINC
| SSI_DST_AMODE_CONST
|
257 s_addr
= sg_dma_address(msg
->sgt
.sgl
);
258 d_addr
= omap_port
->sst_dma
+
259 SSI_SST_BUFFER_CH_REG(msg
->channel
);
261 dev_dbg(&ssi
->device
, "lch %d cdsp %08x ccr %04x s_addr %08x d_addr %08x\n",
262 lch
, csdp
, ccr
, s_addr
, d_addr
);
264 /* Hold clocks during the transfer */
265 pm_runtime_get_sync(omap_port
->pdev
);
267 writew_relaxed(csdp
, gdd
+ SSI_GDD_CSDP_REG(lch
));
268 writew_relaxed(SSI_BLOCK_IE
| SSI_TOUT_IE
, gdd
+ SSI_GDD_CICR_REG(lch
));
269 writel_relaxed(d_addr
, gdd
+ SSI_GDD_CDSA_REG(lch
));
270 writel_relaxed(s_addr
, gdd
+ SSI_GDD_CSSA_REG(lch
));
271 writew_relaxed(SSI_BYTES_TO_FRAMES(msg
->sgt
.sgl
->length
),
272 gdd
+ SSI_GDD_CEN_REG(lch
));
274 spin_lock_bh(&omap_ssi
->lock
);
275 tmp
= readl(omap_ssi
->sys
+ SSI_GDD_MPU_IRQ_ENABLE_REG
);
276 tmp
|= SSI_GDD_LCH(lch
);
277 writel_relaxed(tmp
, omap_ssi
->sys
+ SSI_GDD_MPU_IRQ_ENABLE_REG
);
278 spin_unlock_bh(&omap_ssi
->lock
);
279 writew(ccr
, gdd
+ SSI_GDD_CCR_REG(lch
));
280 msg
->status
= HSI_STATUS_PROCEEDING
;
285 static int ssi_start_pio(struct hsi_msg
*msg
)
287 struct hsi_port
*port
= hsi_get_port(msg
->cl
);
288 struct omap_ssi_port
*omap_port
= hsi_port_drvdata(port
);
289 struct hsi_controller
*ssi
= to_hsi_controller(port
->device
.parent
);
290 struct omap_ssi_controller
*omap_ssi
= hsi_controller_drvdata(ssi
);
293 pm_runtime_get_sync(omap_port
->pdev
);
294 if (msg
->ttype
== HSI_MSG_WRITE
) {
295 val
= SSI_DATAACCEPT(msg
->channel
);
296 /* Hold clocks for pio writes */
297 pm_runtime_get_sync(omap_port
->pdev
);
299 val
= SSI_DATAAVAILABLE(msg
->channel
) | SSI_ERROROCCURED
;
301 dev_dbg(&port
->device
, "Single %s transfer\n",
302 msg
->ttype
? "write" : "read");
303 val
|= readl(omap_ssi
->sys
+ SSI_MPU_ENABLE_REG(port
->num
, 0));
304 writel(val
, omap_ssi
->sys
+ SSI_MPU_ENABLE_REG(port
->num
, 0));
305 pm_runtime_put_sync(omap_port
->pdev
);
307 msg
->status
= HSI_STATUS_PROCEEDING
;
312 static int ssi_start_transfer(struct list_head
*queue
)
317 if (list_empty(queue
))
319 msg
= list_first_entry(queue
, struct hsi_msg
, link
);
320 if (msg
->status
!= HSI_STATUS_QUEUED
)
322 if ((msg
->sgt
.nents
) && (msg
->sgt
.sgl
->length
> sizeof(u32
)))
323 lch
= ssi_claim_lch(msg
);
325 return ssi_start_dma(msg
, lch
);
327 return ssi_start_pio(msg
);
330 static int ssi_async_break(struct hsi_msg
*msg
)
332 struct hsi_port
*port
= hsi_get_port(msg
->cl
);
333 struct omap_ssi_port
*omap_port
= hsi_port_drvdata(port
);
334 struct hsi_controller
*ssi
= to_hsi_controller(port
->device
.parent
);
335 struct omap_ssi_controller
*omap_ssi
= hsi_controller_drvdata(ssi
);
339 pm_runtime_get_sync(omap_port
->pdev
);
340 if (msg
->ttype
== HSI_MSG_WRITE
) {
341 if (omap_port
->sst
.mode
!= SSI_MODE_FRAME
) {
345 writel(1, omap_port
->sst_base
+ SSI_SST_BREAK_REG
);
346 msg
->status
= HSI_STATUS_COMPLETED
;
349 if (omap_port
->ssr
.mode
!= SSI_MODE_FRAME
) {
353 spin_lock_bh(&omap_port
->lock
);
354 tmp
= readl(omap_ssi
->sys
+
355 SSI_MPU_ENABLE_REG(port
->num
, 0));
356 writel(tmp
| SSI_BREAKDETECTED
,
357 omap_ssi
->sys
+ SSI_MPU_ENABLE_REG(port
->num
, 0));
358 msg
->status
= HSI_STATUS_PROCEEDING
;
359 list_add_tail(&msg
->link
, &omap_port
->brkqueue
);
360 spin_unlock_bh(&omap_port
->lock
);
363 pm_runtime_put_sync(omap_port
->pdev
);
368 static int ssi_async(struct hsi_msg
*msg
)
370 struct hsi_port
*port
= hsi_get_port(msg
->cl
);
371 struct omap_ssi_port
*omap_port
= hsi_port_drvdata(port
);
372 struct list_head
*queue
;
377 if (msg
->sgt
.nents
> 1)
378 return -ENOSYS
; /* TODO: Add sg support */
380 if (msg
->break_frame
)
381 return ssi_async_break(msg
);
384 BUG_ON(msg
->channel
>= omap_port
->sst
.channels
);
385 queue
= &omap_port
->txqueue
[msg
->channel
];
387 BUG_ON(msg
->channel
>= omap_port
->ssr
.channels
);
388 queue
= &omap_port
->rxqueue
[msg
->channel
];
390 msg
->status
= HSI_STATUS_QUEUED
;
391 spin_lock_bh(&omap_port
->lock
);
392 list_add_tail(&msg
->link
, queue
);
393 err
= ssi_start_transfer(queue
);
395 list_del(&msg
->link
);
396 msg
->status
= HSI_STATUS_ERROR
;
398 spin_unlock_bh(&omap_port
->lock
);
399 dev_dbg(&port
->device
, "msg status %d ttype %d ch %d\n",
400 msg
->status
, msg
->ttype
, msg
->channel
);
405 static u32
ssi_calculate_div(struct hsi_controller
*ssi
)
407 struct omap_ssi_controller
*omap_ssi
= hsi_controller_drvdata(ssi
);
408 u32 tx_fckrate
= (u32
) omap_ssi
->fck_rate
;
410 /* / 2 : SSI TX clock is always half of the SSI functional clock */
412 /* Round down when tx_fckrate % omap_ssi->max_speed == 0 */
414 dev_dbg(&ssi
->device
, "TX div %d for fck_rate %lu Khz speed %d Kb/s\n",
415 tx_fckrate
/ omap_ssi
->max_speed
, omap_ssi
->fck_rate
,
416 omap_ssi
->max_speed
);
418 return tx_fckrate
/ omap_ssi
->max_speed
;
421 static void ssi_flush_queue(struct list_head
*queue
, struct hsi_client
*cl
)
423 struct list_head
*node
, *tmp
;
426 list_for_each_safe(node
, tmp
, queue
) {
427 msg
= list_entry(node
, struct hsi_msg
, link
);
428 if ((cl
) && (cl
!= msg
->cl
))
431 pr_debug("flush queue: ch %d, msg %p len %d type %d ctxt %p\n",
432 msg
->channel
, msg
, msg
->sgt
.sgl
->length
,
433 msg
->ttype
, msg
->context
);
435 msg
->destructor(msg
);
441 static int ssi_setup(struct hsi_client
*cl
)
443 struct hsi_port
*port
= to_hsi_port(cl
->device
.parent
);
444 struct omap_ssi_port
*omap_port
= hsi_port_drvdata(port
);
445 struct hsi_controller
*ssi
= to_hsi_controller(port
->device
.parent
);
446 struct omap_ssi_controller
*omap_ssi
= hsi_controller_drvdata(ssi
);
447 void __iomem
*sst
= omap_port
->sst_base
;
448 void __iomem
*ssr
= omap_port
->ssr_base
;
453 pm_runtime_get_sync(omap_port
->pdev
);
454 spin_lock_bh(&omap_port
->lock
);
455 if (cl
->tx_cfg
.speed
)
456 omap_ssi
->max_speed
= cl
->tx_cfg
.speed
;
457 div
= ssi_calculate_div(ssi
);
458 if (div
> SSI_MAX_DIVISOR
) {
459 dev_err(&cl
->device
, "Invalid TX speed %d Mb/s (div %d)\n",
460 cl
->tx_cfg
.speed
, div
);
464 /* Set TX/RX module to sleep to stop TX/RX during cfg update */
465 writel_relaxed(SSI_MODE_SLEEP
, sst
+ SSI_SST_MODE_REG
);
466 writel_relaxed(SSI_MODE_SLEEP
, ssr
+ SSI_SSR_MODE_REG
);
467 /* Flush posted write */
468 val
= readl(ssr
+ SSI_SSR_MODE_REG
);
470 writel_relaxed(31, sst
+ SSI_SST_FRAMESIZE_REG
);
471 writel_relaxed(div
, sst
+ SSI_SST_DIVISOR_REG
);
472 writel_relaxed(cl
->tx_cfg
.num_hw_channels
, sst
+ SSI_SST_CHANNELS_REG
);
473 writel_relaxed(cl
->tx_cfg
.arb_mode
, sst
+ SSI_SST_ARBMODE_REG
);
474 writel_relaxed(cl
->tx_cfg
.mode
, sst
+ SSI_SST_MODE_REG
);
476 writel_relaxed(31, ssr
+ SSI_SSR_FRAMESIZE_REG
);
477 writel_relaxed(cl
->rx_cfg
.num_hw_channels
, ssr
+ SSI_SSR_CHANNELS_REG
);
478 writel_relaxed(0, ssr
+ SSI_SSR_TIMEOUT_REG
);
479 /* Cleanup the break queue if we leave FRAME mode */
480 if ((omap_port
->ssr
.mode
== SSI_MODE_FRAME
) &&
481 (cl
->rx_cfg
.mode
!= SSI_MODE_FRAME
))
482 ssi_flush_queue(&omap_port
->brkqueue
, cl
);
483 writel_relaxed(cl
->rx_cfg
.mode
, ssr
+ SSI_SSR_MODE_REG
);
484 omap_port
->channels
= max(cl
->rx_cfg
.num_hw_channels
,
485 cl
->tx_cfg
.num_hw_channels
);
486 /* Shadow registering for OFF mode */
488 omap_port
->sst
.divisor
= div
;
489 omap_port
->sst
.frame_size
= 31;
490 omap_port
->sst
.channels
= cl
->tx_cfg
.num_hw_channels
;
491 omap_port
->sst
.arb_mode
= cl
->tx_cfg
.arb_mode
;
492 omap_port
->sst
.mode
= cl
->tx_cfg
.mode
;
494 omap_port
->ssr
.frame_size
= 31;
495 omap_port
->ssr
.timeout
= 0;
496 omap_port
->ssr
.channels
= cl
->rx_cfg
.num_hw_channels
;
497 omap_port
->ssr
.mode
= cl
->rx_cfg
.mode
;
499 spin_unlock_bh(&omap_port
->lock
);
500 pm_runtime_put_sync(omap_port
->pdev
);
505 static int ssi_flush(struct hsi_client
*cl
)
507 struct hsi_port
*port
= hsi_get_port(cl
);
508 struct omap_ssi_port
*omap_port
= hsi_port_drvdata(port
);
509 struct hsi_controller
*ssi
= to_hsi_controller(port
->device
.parent
);
510 struct omap_ssi_controller
*omap_ssi
= hsi_controller_drvdata(ssi
);
512 void __iomem
*sst
= omap_port
->sst_base
;
513 void __iomem
*ssr
= omap_port
->ssr_base
;
517 pm_runtime_get_sync(omap_port
->pdev
);
518 spin_lock_bh(&omap_port
->lock
);
520 /* stop all ssi communication */
521 pinctrl_pm_select_idle_state(omap_port
->pdev
);
522 udelay(1); /* wait for racing frames */
524 /* Stop all DMA transfers */
525 for (i
= 0; i
< SSI_MAX_GDD_LCH
; i
++) {
526 msg
= omap_ssi
->gdd_trn
[i
].msg
;
527 if (!msg
|| (port
!= hsi_get_port(msg
->cl
)))
529 writew_relaxed(0, omap_ssi
->gdd
+ SSI_GDD_CCR_REG(i
));
530 if (msg
->ttype
== HSI_MSG_READ
)
531 pm_runtime_put_sync(omap_port
->pdev
);
532 omap_ssi
->gdd_trn
[i
].msg
= NULL
;
534 /* Flush all SST buffers */
535 writel_relaxed(0, sst
+ SSI_SST_BUFSTATE_REG
);
536 writel_relaxed(0, sst
+ SSI_SST_TXSTATE_REG
);
537 /* Flush all SSR buffers */
538 writel_relaxed(0, ssr
+ SSI_SSR_RXSTATE_REG
);
539 writel_relaxed(0, ssr
+ SSI_SSR_BUFSTATE_REG
);
540 /* Flush all errors */
541 err
= readl(ssr
+ SSI_SSR_ERROR_REG
);
542 writel_relaxed(err
, ssr
+ SSI_SSR_ERRORACK_REG
);
544 writel_relaxed(0, ssr
+ SSI_SSR_BREAK_REG
);
545 /* Clear interrupts */
546 writel_relaxed(0, omap_ssi
->sys
+ SSI_MPU_ENABLE_REG(port
->num
, 0));
547 writel_relaxed(0xffffff00,
548 omap_ssi
->sys
+ SSI_MPU_STATUS_REG(port
->num
, 0));
549 writel_relaxed(0, omap_ssi
->sys
+ SSI_GDD_MPU_IRQ_ENABLE_REG
);
550 writel(0xff, omap_ssi
->sys
+ SSI_GDD_MPU_IRQ_STATUS_REG
);
551 /* Dequeue all pending requests */
552 for (i
= 0; i
< omap_port
->channels
; i
++) {
553 /* Release write clocks */
554 if (!list_empty(&omap_port
->txqueue
[i
]))
555 pm_runtime_put_sync(omap_port
->pdev
);
556 ssi_flush_queue(&omap_port
->txqueue
[i
], NULL
);
557 ssi_flush_queue(&omap_port
->rxqueue
[i
], NULL
);
559 ssi_flush_queue(&omap_port
->brkqueue
, NULL
);
561 /* Resume SSI communication */
562 pinctrl_pm_select_default_state(omap_port
->pdev
);
564 spin_unlock_bh(&omap_port
->lock
);
565 pm_runtime_put_sync(omap_port
->pdev
);
570 static int ssi_start_tx(struct hsi_client
*cl
)
572 struct hsi_port
*port
= hsi_get_port(cl
);
573 struct omap_ssi_port
*omap_port
= hsi_port_drvdata(port
);
574 struct hsi_controller
*ssi
= to_hsi_controller(port
->device
.parent
);
575 struct omap_ssi_controller
*omap_ssi
= hsi_controller_drvdata(ssi
);
577 dev_dbg(&port
->device
, "Wake out high %d\n", omap_port
->wk_refcount
);
579 spin_lock_bh(&omap_port
->wk_lock
);
580 if (omap_port
->wk_refcount
++) {
581 spin_unlock_bh(&omap_port
->wk_lock
);
584 pm_runtime_get_sync(omap_port
->pdev
); /* Grab clocks */
585 writel(SSI_WAKE(0), omap_ssi
->sys
+ SSI_SET_WAKE_REG(port
->num
));
586 spin_unlock_bh(&omap_port
->wk_lock
);
591 static int ssi_stop_tx(struct hsi_client
*cl
)
593 struct hsi_port
*port
= hsi_get_port(cl
);
594 struct omap_ssi_port
*omap_port
= hsi_port_drvdata(port
);
595 struct hsi_controller
*ssi
= to_hsi_controller(port
->device
.parent
);
596 struct omap_ssi_controller
*omap_ssi
= hsi_controller_drvdata(ssi
);
598 dev_dbg(&port
->device
, "Wake out low %d\n", omap_port
->wk_refcount
);
600 spin_lock_bh(&omap_port
->wk_lock
);
601 BUG_ON(!omap_port
->wk_refcount
);
602 if (--omap_port
->wk_refcount
) {
603 spin_unlock_bh(&omap_port
->wk_lock
);
606 writel(SSI_WAKE(0), omap_ssi
->sys
+ SSI_CLEAR_WAKE_REG(port
->num
));
607 pm_runtime_put_sync(omap_port
->pdev
); /* Release clocks */
608 spin_unlock_bh(&omap_port
->wk_lock
);
613 static void ssi_transfer(struct omap_ssi_port
*omap_port
,
614 struct list_head
*queue
)
619 spin_lock_bh(&omap_port
->lock
);
621 err
= ssi_start_transfer(queue
);
623 msg
= list_first_entry(queue
, struct hsi_msg
, link
);
624 msg
->status
= HSI_STATUS_ERROR
;
626 list_del(&msg
->link
);
627 spin_unlock_bh(&omap_port
->lock
);
629 spin_lock_bh(&omap_port
->lock
);
632 spin_unlock_bh(&omap_port
->lock
);
635 static void ssi_cleanup_queues(struct hsi_client
*cl
)
637 struct hsi_port
*port
= hsi_get_port(cl
);
638 struct omap_ssi_port
*omap_port
= hsi_port_drvdata(port
);
639 struct hsi_controller
*ssi
= to_hsi_controller(port
->device
.parent
);
640 struct omap_ssi_controller
*omap_ssi
= hsi_controller_drvdata(ssi
);
645 u32 status
= SSI_ERROROCCURED
;
648 ssi_flush_queue(&omap_port
->brkqueue
, cl
);
649 if (list_empty(&omap_port
->brkqueue
))
650 status
|= SSI_BREAKDETECTED
;
652 for (i
= 0; i
< omap_port
->channels
; i
++) {
653 if (list_empty(&omap_port
->txqueue
[i
]))
655 msg
= list_first_entry(&omap_port
->txqueue
[i
], struct hsi_msg
,
657 if ((msg
->cl
== cl
) && (msg
->status
== HSI_STATUS_PROCEEDING
)) {
658 txbufstate
|= (1 << i
);
659 status
|= SSI_DATAACCEPT(i
);
660 /* Release the clocks writes, also GDD ones */
661 pm_runtime_put_sync(omap_port
->pdev
);
663 ssi_flush_queue(&omap_port
->txqueue
[i
], cl
);
665 for (i
= 0; i
< omap_port
->channels
; i
++) {
666 if (list_empty(&omap_port
->rxqueue
[i
]))
668 msg
= list_first_entry(&omap_port
->rxqueue
[i
], struct hsi_msg
,
670 if ((msg
->cl
== cl
) && (msg
->status
== HSI_STATUS_PROCEEDING
)) {
671 rxbufstate
|= (1 << i
);
672 status
|= SSI_DATAAVAILABLE(i
);
674 ssi_flush_queue(&omap_port
->rxqueue
[i
], cl
);
675 /* Check if we keep the error detection interrupt armed */
676 if (!list_empty(&omap_port
->rxqueue
[i
]))
677 status
&= ~SSI_ERROROCCURED
;
679 /* Cleanup write buffers */
680 tmp
= readl(omap_port
->sst_base
+ SSI_SST_BUFSTATE_REG
);
682 writel_relaxed(tmp
, omap_port
->sst_base
+ SSI_SST_BUFSTATE_REG
);
683 /* Cleanup read buffers */
684 tmp
= readl(omap_port
->ssr_base
+ SSI_SSR_BUFSTATE_REG
);
686 writel_relaxed(tmp
, omap_port
->ssr_base
+ SSI_SSR_BUFSTATE_REG
);
687 /* Disarm and ack pending interrupts */
688 tmp
= readl(omap_ssi
->sys
+ SSI_MPU_ENABLE_REG(port
->num
, 0));
690 writel_relaxed(tmp
, omap_ssi
->sys
+ SSI_MPU_ENABLE_REG(port
->num
, 0));
691 writel_relaxed(status
, omap_ssi
->sys
+
692 SSI_MPU_STATUS_REG(port
->num
, 0));
695 static void ssi_cleanup_gdd(struct hsi_controller
*ssi
, struct hsi_client
*cl
)
697 struct omap_ssi_controller
*omap_ssi
= hsi_controller_drvdata(ssi
);
698 struct hsi_port
*port
= hsi_get_port(cl
);
699 struct omap_ssi_port
*omap_port
= hsi_port_drvdata(port
);
705 for (i
= 0; i
< SSI_MAX_GDD_LCH
; i
++) {
706 msg
= omap_ssi
->gdd_trn
[i
].msg
;
707 if ((!msg
) || (msg
->cl
!= cl
))
709 writew_relaxed(0, omap_ssi
->gdd
+ SSI_GDD_CCR_REG(i
));
712 * Clock references for write will be handled in
715 if (msg
->ttype
== HSI_MSG_READ
)
716 pm_runtime_put_sync(omap_port
->pdev
);
717 omap_ssi
->gdd_trn
[i
].msg
= NULL
;
719 tmp
= readl_relaxed(omap_ssi
->sys
+ SSI_GDD_MPU_IRQ_ENABLE_REG
);
721 writel_relaxed(tmp
, omap_ssi
->sys
+ SSI_GDD_MPU_IRQ_ENABLE_REG
);
722 writel(val
, omap_ssi
->sys
+ SSI_GDD_MPU_IRQ_STATUS_REG
);
725 static int ssi_set_port_mode(struct omap_ssi_port
*omap_port
, u32 mode
)
727 writel(mode
, omap_port
->sst_base
+ SSI_SST_MODE_REG
);
728 writel(mode
, omap_port
->ssr_base
+ SSI_SSR_MODE_REG
);
730 mode
= readl(omap_port
->ssr_base
+ SSI_SSR_MODE_REG
);
735 static int ssi_release(struct hsi_client
*cl
)
737 struct hsi_port
*port
= hsi_get_port(cl
);
738 struct omap_ssi_port
*omap_port
= hsi_port_drvdata(port
);
739 struct hsi_controller
*ssi
= to_hsi_controller(port
->device
.parent
);
741 spin_lock_bh(&omap_port
->lock
);
742 pm_runtime_get_sync(omap_port
->pdev
);
743 /* Stop all the pending DMA requests for that client */
744 ssi_cleanup_gdd(ssi
, cl
);
745 /* Now cleanup all the queues */
746 ssi_cleanup_queues(cl
);
747 pm_runtime_put_sync(omap_port
->pdev
);
748 /* If it is the last client of the port, do extra checks and cleanup */
749 if (port
->claimed
<= 1) {
751 * Drop the clock reference for the incoming wake line
752 * if it is still kept high by the other side.
754 if (test_and_clear_bit(SSI_WAKE_EN
, &omap_port
->flags
))
755 pm_runtime_put_sync(omap_port
->pdev
);
756 pm_runtime_get_sync(omap_port
->pdev
);
757 /* Stop any SSI TX/RX without a client */
758 ssi_set_port_mode(omap_port
, SSI_MODE_SLEEP
);
759 omap_port
->sst
.mode
= SSI_MODE_SLEEP
;
760 omap_port
->ssr
.mode
= SSI_MODE_SLEEP
;
761 pm_runtime_put_sync(omap_port
->pdev
);
762 WARN_ON(omap_port
->wk_refcount
!= 0);
764 spin_unlock_bh(&omap_port
->lock
);
771 static void ssi_error(struct hsi_port
*port
)
773 struct omap_ssi_port
*omap_port
= hsi_port_drvdata(port
);
774 struct hsi_controller
*ssi
= to_hsi_controller(port
->device
.parent
);
775 struct omap_ssi_controller
*omap_ssi
= hsi_controller_drvdata(ssi
);
783 err
= readl(omap_port
->ssr_base
+ SSI_SSR_ERROR_REG
);
784 dev_err(&port
->device
, "SSI error: 0x%02x\n", err
);
786 dev_dbg(&port
->device
, "spurious SSI error ignored!\n");
789 spin_lock(&omap_ssi
->lock
);
790 /* Cancel all GDD read transfers */
791 for (i
= 0, val
= 0; i
< SSI_MAX_GDD_LCH
; i
++) {
792 msg
= omap_ssi
->gdd_trn
[i
].msg
;
793 if ((msg
) && (msg
->ttype
== HSI_MSG_READ
)) {
794 writew_relaxed(0, omap_ssi
->gdd
+ SSI_GDD_CCR_REG(i
));
796 omap_ssi
->gdd_trn
[i
].msg
= NULL
;
799 tmp
= readl(omap_ssi
->sys
+ SSI_GDD_MPU_IRQ_ENABLE_REG
);
801 writel_relaxed(tmp
, omap_ssi
->sys
+ SSI_GDD_MPU_IRQ_ENABLE_REG
);
802 spin_unlock(&omap_ssi
->lock
);
803 /* Cancel all PIO read transfers */
804 spin_lock(&omap_port
->lock
);
805 tmp
= readl(omap_ssi
->sys
+ SSI_MPU_ENABLE_REG(port
->num
, 0));
806 tmp
&= 0xfeff00ff; /* Disable error & all dataavailable interrupts */
807 writel_relaxed(tmp
, omap_ssi
->sys
+ SSI_MPU_ENABLE_REG(port
->num
, 0));
809 writel_relaxed(err
, omap_port
->ssr_base
+ SSI_SSR_ERRORACK_REG
);
810 writel_relaxed(SSI_ERROROCCURED
,
811 omap_ssi
->sys
+ SSI_MPU_STATUS_REG(port
->num
, 0));
812 /* Signal the error all current pending read requests */
813 for (i
= 0; i
< omap_port
->channels
; i
++) {
814 if (list_empty(&omap_port
->rxqueue
[i
]))
816 msg
= list_first_entry(&omap_port
->rxqueue
[i
], struct hsi_msg
,
818 list_del(&msg
->link
);
819 msg
->status
= HSI_STATUS_ERROR
;
820 spin_unlock(&omap_port
->lock
);
822 /* Now restart queued reads if any */
823 ssi_transfer(omap_port
, &omap_port
->rxqueue
[i
]);
824 spin_lock(&omap_port
->lock
);
826 spin_unlock(&omap_port
->lock
);
829 static void ssi_break_complete(struct hsi_port
*port
)
831 struct omap_ssi_port
*omap_port
= hsi_port_drvdata(port
);
832 struct hsi_controller
*ssi
= to_hsi_controller(port
->device
.parent
);
833 struct omap_ssi_controller
*omap_ssi
= hsi_controller_drvdata(ssi
);
838 dev_dbg(&port
->device
, "HWBREAK received\n");
840 spin_lock(&omap_port
->lock
);
841 val
= readl(omap_ssi
->sys
+ SSI_MPU_ENABLE_REG(port
->num
, 0));
842 val
&= ~SSI_BREAKDETECTED
;
843 writel_relaxed(val
, omap_ssi
->sys
+ SSI_MPU_ENABLE_REG(port
->num
, 0));
844 writel_relaxed(0, omap_port
->ssr_base
+ SSI_SSR_BREAK_REG
);
845 writel(SSI_BREAKDETECTED
,
846 omap_ssi
->sys
+ SSI_MPU_STATUS_REG(port
->num
, 0));
847 spin_unlock(&omap_port
->lock
);
849 list_for_each_entry_safe(msg
, tmp
, &omap_port
->brkqueue
, link
) {
850 msg
->status
= HSI_STATUS_COMPLETED
;
851 spin_lock(&omap_port
->lock
);
852 list_del(&msg
->link
);
853 spin_unlock(&omap_port
->lock
);
859 static void ssi_pio_complete(struct hsi_port
*port
, struct list_head
*queue
)
861 struct hsi_controller
*ssi
= to_hsi_controller(port
->device
.parent
);
862 struct omap_ssi_controller
*omap_ssi
= hsi_controller_drvdata(ssi
);
863 struct omap_ssi_port
*omap_port
= hsi_port_drvdata(port
);
869 spin_lock(&omap_port
->lock
);
870 msg
= list_first_entry(queue
, struct hsi_msg
, link
);
871 if ((!msg
->sgt
.nents
) || (!msg
->sgt
.sgl
->length
)) {
873 msg
->status
= HSI_STATUS_PENDING
;
875 if (msg
->ttype
== HSI_MSG_WRITE
)
876 val
= SSI_DATAACCEPT(msg
->channel
);
878 val
= SSI_DATAAVAILABLE(msg
->channel
);
879 if (msg
->status
== HSI_STATUS_PROCEEDING
) {
880 buf
= sg_virt(msg
->sgt
.sgl
) + msg
->actual_len
;
881 if (msg
->ttype
== HSI_MSG_WRITE
)
882 writel(*buf
, omap_port
->sst_base
+
883 SSI_SST_BUFFER_CH_REG(msg
->channel
));
885 *buf
= readl(omap_port
->ssr_base
+
886 SSI_SSR_BUFFER_CH_REG(msg
->channel
));
887 dev_dbg(&port
->device
, "ch %d ttype %d 0x%08x\n", msg
->channel
,
889 msg
->actual_len
+= sizeof(*buf
);
890 if (msg
->actual_len
>= msg
->sgt
.sgl
->length
)
891 msg
->status
= HSI_STATUS_COMPLETED
;
893 * Wait for the last written frame to be really sent before
894 * we call the complete callback
896 if ((msg
->status
== HSI_STATUS_PROCEEDING
) ||
897 ((msg
->status
== HSI_STATUS_COMPLETED
) &&
898 (msg
->ttype
== HSI_MSG_WRITE
))) {
899 writel(val
, omap_ssi
->sys
+
900 SSI_MPU_STATUS_REG(port
->num
, 0));
901 spin_unlock(&omap_port
->lock
);
907 /* Transfer completed at this point */
908 reg
= readl(omap_ssi
->sys
+ SSI_MPU_ENABLE_REG(port
->num
, 0));
909 if (msg
->ttype
== HSI_MSG_WRITE
) {
910 /* Release clocks for write transfer */
911 pm_runtime_put_sync(omap_port
->pdev
);
914 writel_relaxed(reg
, omap_ssi
->sys
+ SSI_MPU_ENABLE_REG(port
->num
, 0));
915 writel_relaxed(val
, omap_ssi
->sys
+ SSI_MPU_STATUS_REG(port
->num
, 0));
916 list_del(&msg
->link
);
917 spin_unlock(&omap_port
->lock
);
919 ssi_transfer(omap_port
, queue
);
922 static void ssi_pio_tasklet(unsigned long ssi_port
)
924 struct hsi_port
*port
= (struct hsi_port
*)ssi_port
;
925 struct hsi_controller
*ssi
= to_hsi_controller(port
->device
.parent
);
926 struct omap_ssi_port
*omap_port
= hsi_port_drvdata(port
);
927 struct omap_ssi_controller
*omap_ssi
= hsi_controller_drvdata(ssi
);
928 void __iomem
*sys
= omap_ssi
->sys
;
932 pm_runtime_get_sync(omap_port
->pdev
);
933 status_reg
= readl(sys
+ SSI_MPU_STATUS_REG(port
->num
, 0));
934 status_reg
&= readl(sys
+ SSI_MPU_ENABLE_REG(port
->num
, 0));
936 for (ch
= 0; ch
< omap_port
->channels
; ch
++) {
937 if (status_reg
& SSI_DATAACCEPT(ch
))
938 ssi_pio_complete(port
, &omap_port
->txqueue
[ch
]);
939 if (status_reg
& SSI_DATAAVAILABLE(ch
))
940 ssi_pio_complete(port
, &omap_port
->rxqueue
[ch
]);
942 if (status_reg
& SSI_BREAKDETECTED
)
943 ssi_break_complete(port
);
944 if (status_reg
& SSI_ERROROCCURED
)
947 status_reg
= readl(sys
+ SSI_MPU_STATUS_REG(port
->num
, 0));
948 status_reg
&= readl(sys
+ SSI_MPU_ENABLE_REG(port
->num
, 0));
949 pm_runtime_put_sync(omap_port
->pdev
);
952 tasklet_hi_schedule(&omap_port
->pio_tasklet
);
954 enable_irq(omap_port
->irq
);
957 static irqreturn_t
ssi_pio_isr(int irq
, void *port
)
959 struct omap_ssi_port
*omap_port
= hsi_port_drvdata(port
);
961 tasklet_hi_schedule(&omap_port
->pio_tasklet
);
962 disable_irq_nosync(irq
);
967 static irqreturn_t
ssi_wake_thread(int irq __maybe_unused
, void *ssi_port
)
969 struct hsi_port
*port
= (struct hsi_port
*)ssi_port
;
970 struct hsi_controller
*ssi
= to_hsi_controller(port
->device
.parent
);
971 struct omap_ssi_port
*omap_port
= hsi_port_drvdata(port
);
972 struct omap_ssi_controller
*omap_ssi
= hsi_controller_drvdata(ssi
);
974 if (ssi_wakein(port
)) {
976 * We can have a quick High-Low-High transition in the line.
977 * In such a case if we have long interrupt latencies,
978 * we can miss the low event or get twice a high event.
979 * This workaround will avoid breaking the clock reference
980 * count when such a situation ocurrs.
982 if (!test_and_set_bit(SSI_WAKE_EN
, &omap_port
->flags
))
983 pm_runtime_get_sync(omap_port
->pdev
);
984 dev_dbg(&ssi
->device
, "Wake in high\n");
985 if (omap_port
->wktest
) { /* FIXME: HACK ! To be removed */
987 omap_ssi
->sys
+ SSI_SET_WAKE_REG(port
->num
));
989 hsi_event(port
, HSI_EVENT_START_RX
);
991 dev_dbg(&ssi
->device
, "Wake in low\n");
992 if (omap_port
->wktest
) { /* FIXME: HACK ! To be removed */
994 omap_ssi
->sys
+ SSI_CLEAR_WAKE_REG(port
->num
));
996 hsi_event(port
, HSI_EVENT_STOP_RX
);
997 if (test_and_clear_bit(SSI_WAKE_EN
, &omap_port
->flags
))
998 pm_runtime_put_sync(omap_port
->pdev
);
1004 static int ssi_port_irq(struct hsi_port
*port
, struct platform_device
*pd
)
1006 struct omap_ssi_port
*omap_port
= hsi_port_drvdata(port
);
1009 err
= platform_get_irq(pd
, 0);
1011 dev_err(&port
->device
, "Port IRQ resource missing\n");
1014 omap_port
->irq
= err
;
1015 tasklet_init(&omap_port
->pio_tasklet
, ssi_pio_tasklet
,
1016 (unsigned long)port
);
1017 err
= devm_request_irq(&port
->device
, omap_port
->irq
, ssi_pio_isr
,
1018 0, "mpu_irq0", port
);
1020 dev_err(&port
->device
, "Request IRQ %d failed (%d)\n",
1021 omap_port
->irq
, err
);
1025 static int ssi_wake_irq(struct hsi_port
*port
, struct platform_device
*pd
)
1027 struct omap_ssi_port
*omap_port
= hsi_port_drvdata(port
);
1031 if (!omap_port
->wake_gpio
) {
1032 omap_port
->wake_irq
= -1;
1036 cawake_irq
= gpiod_to_irq(omap_port
->wake_gpio
);
1037 omap_port
->wake_irq
= cawake_irq
;
1039 err
= devm_request_threaded_irq(&port
->device
, cawake_irq
, NULL
,
1041 IRQF_TRIGGER_RISING
| IRQF_TRIGGER_FALLING
| IRQF_ONESHOT
,
1042 "SSI cawake", port
);
1044 dev_err(&port
->device
, "Request Wake in IRQ %d failed %d\n",
1046 err
= enable_irq_wake(cawake_irq
);
1048 dev_err(&port
->device
, "Enable wake on the wakeline in irq %d failed %d\n",
1054 static void ssi_queues_init(struct omap_ssi_port
*omap_port
)
1058 for (ch
= 0; ch
< SSI_MAX_CHANNELS
; ch
++) {
1059 INIT_LIST_HEAD(&omap_port
->txqueue
[ch
]);
1060 INIT_LIST_HEAD(&omap_port
->rxqueue
[ch
]);
1062 INIT_LIST_HEAD(&omap_port
->brkqueue
);
1065 static int ssi_port_get_iomem(struct platform_device
*pd
,
1066 const char *name
, void __iomem
**pbase
, dma_addr_t
*phy
)
1068 struct hsi_port
*port
= platform_get_drvdata(pd
);
1069 struct resource
*mem
;
1070 struct resource
*ioarea
;
1073 mem
= platform_get_resource_byname(pd
, IORESOURCE_MEM
, name
);
1075 dev_err(&pd
->dev
, "IO memory region missing (%s)\n", name
);
1078 ioarea
= devm_request_mem_region(&port
->device
, mem
->start
,
1079 resource_size(mem
), dev_name(&pd
->dev
));
1081 dev_err(&pd
->dev
, "%s IO memory region request failed\n",
1085 base
= devm_ioremap(&port
->device
, mem
->start
, resource_size(mem
));
1087 dev_err(&pd
->dev
, "%s IO remap failed\n", mem
->name
);
1098 static int ssi_port_probe(struct platform_device
*pd
)
1100 struct device_node
*np
= pd
->dev
.of_node
;
1101 struct hsi_port
*port
;
1102 struct omap_ssi_port
*omap_port
;
1103 struct hsi_controller
*ssi
= dev_get_drvdata(pd
->dev
.parent
);
1104 struct omap_ssi_controller
*omap_ssi
= hsi_controller_drvdata(ssi
);
1105 struct gpio_desc
*cawake_gpio
= NULL
;
1109 dev_dbg(&pd
->dev
, "init ssi port...\n");
1111 if (!ssi
->port
|| !omap_ssi
->port
) {
1112 dev_err(&pd
->dev
, "ssi controller not initialized!\n");
1117 /* get id of first uninitialized port in controller */
1118 for (port_id
= 0; port_id
< ssi
->num_ports
&& omap_ssi
->port
[port_id
];
1122 if (port_id
>= ssi
->num_ports
) {
1123 dev_err(&pd
->dev
, "port id out of range!\n");
1128 port
= ssi
->port
[port_id
];
1131 dev_err(&pd
->dev
, "missing device tree data\n");
1136 cawake_gpio
= devm_gpiod_get(&pd
->dev
, "ti,ssi-cawake", GPIOD_IN
);
1137 if (IS_ERR(cawake_gpio
)) {
1138 err
= PTR_ERR(cawake_gpio
);
1139 dev_err(&pd
->dev
, "couldn't get cawake gpio (err=%d)!\n", err
);
1143 omap_port
= devm_kzalloc(&port
->device
, sizeof(*omap_port
), GFP_KERNEL
);
1148 omap_port
->wake_gpio
= cawake_gpio
;
1149 omap_port
->pdev
= &pd
->dev
;
1150 omap_port
->port_id
= port_id
;
1152 /* initialize HSI port */
1153 port
->async
= ssi_async
;
1154 port
->setup
= ssi_setup
;
1155 port
->flush
= ssi_flush
;
1156 port
->start_tx
= ssi_start_tx
;
1157 port
->stop_tx
= ssi_stop_tx
;
1158 port
->release
= ssi_release
;
1159 hsi_port_set_drvdata(port
, omap_port
);
1160 omap_ssi
->port
[port_id
] = omap_port
;
1162 platform_set_drvdata(pd
, port
);
1164 err
= ssi_port_get_iomem(pd
, "tx", &omap_port
->sst_base
,
1165 &omap_port
->sst_dma
);
1168 err
= ssi_port_get_iomem(pd
, "rx", &omap_port
->ssr_base
,
1169 &omap_port
->ssr_dma
);
1173 err
= ssi_port_irq(port
, pd
);
1176 err
= ssi_wake_irq(port
, pd
);
1180 ssi_queues_init(omap_port
);
1181 spin_lock_init(&omap_port
->lock
);
1182 spin_lock_init(&omap_port
->wk_lock
);
1183 omap_port
->dev
= &port
->device
;
1185 pm_runtime_irq_safe(omap_port
->pdev
);
1186 pm_runtime_enable(omap_port
->pdev
);
1188 #ifdef CONFIG_DEBUG_FS
1189 err
= ssi_debug_add_port(omap_port
, omap_ssi
->dir
);
1191 pm_runtime_disable(omap_port
->pdev
);
1196 hsi_add_clients_from_dt(port
, np
);
1198 dev_info(&pd
->dev
, "ssi port %u successfully initialized\n", port_id
);
1206 static int ssi_port_remove(struct platform_device
*pd
)
1208 struct hsi_port
*port
= platform_get_drvdata(pd
);
1209 struct omap_ssi_port
*omap_port
= hsi_port_drvdata(port
);
1210 struct hsi_controller
*ssi
= to_hsi_controller(port
->device
.parent
);
1211 struct omap_ssi_controller
*omap_ssi
= hsi_controller_drvdata(ssi
);
1213 #ifdef CONFIG_DEBUG_FS
1214 ssi_debug_remove_port(port
);
1217 hsi_port_unregister_clients(port
);
1219 tasklet_kill(&omap_port
->pio_tasklet
);
1221 port
->async
= hsi_dummy_msg
;
1222 port
->setup
= hsi_dummy_cl
;
1223 port
->flush
= hsi_dummy_cl
;
1224 port
->start_tx
= hsi_dummy_cl
;
1225 port
->stop_tx
= hsi_dummy_cl
;
1226 port
->release
= hsi_dummy_cl
;
1228 omap_ssi
->port
[omap_port
->port_id
] = NULL
;
1229 platform_set_drvdata(pd
, NULL
);
1230 pm_runtime_disable(&pd
->dev
);
1235 static int ssi_restore_divisor(struct omap_ssi_port
*omap_port
)
1237 writel_relaxed(omap_port
->sst
.divisor
,
1238 omap_port
->sst_base
+ SSI_SST_DIVISOR_REG
);
1243 void omap_ssi_port_update_fclk(struct hsi_controller
*ssi
,
1244 struct omap_ssi_port
*omap_port
)
1246 /* update divisor */
1247 u32 div
= ssi_calculate_div(ssi
);
1248 omap_port
->sst
.divisor
= div
;
1249 ssi_restore_divisor(omap_port
);
1253 static int ssi_save_port_ctx(struct omap_ssi_port
*omap_port
)
1255 struct hsi_port
*port
= to_hsi_port(omap_port
->dev
);
1256 struct hsi_controller
*ssi
= to_hsi_controller(port
->device
.parent
);
1257 struct omap_ssi_controller
*omap_ssi
= hsi_controller_drvdata(ssi
);
1259 omap_port
->sys_mpu_enable
= readl(omap_ssi
->sys
+
1260 SSI_MPU_ENABLE_REG(port
->num
, 0));
1265 static int ssi_restore_port_ctx(struct omap_ssi_port
*omap_port
)
1267 struct hsi_port
*port
= to_hsi_port(omap_port
->dev
);
1268 struct hsi_controller
*ssi
= to_hsi_controller(port
->device
.parent
);
1269 struct omap_ssi_controller
*omap_ssi
= hsi_controller_drvdata(ssi
);
1272 writel_relaxed(omap_port
->sys_mpu_enable
,
1273 omap_ssi
->sys
+ SSI_MPU_ENABLE_REG(port
->num
, 0));
1276 base
= omap_port
->sst_base
;
1277 writel_relaxed(omap_port
->sst
.frame_size
, base
+ SSI_SST_FRAMESIZE_REG
);
1278 writel_relaxed(omap_port
->sst
.channels
, base
+ SSI_SST_CHANNELS_REG
);
1279 writel_relaxed(omap_port
->sst
.arb_mode
, base
+ SSI_SST_ARBMODE_REG
);
1282 base
= omap_port
->ssr_base
;
1283 writel_relaxed(omap_port
->ssr
.frame_size
, base
+ SSI_SSR_FRAMESIZE_REG
);
1284 writel_relaxed(omap_port
->ssr
.channels
, base
+ SSI_SSR_CHANNELS_REG
);
1285 writel_relaxed(omap_port
->ssr
.timeout
, base
+ SSI_SSR_TIMEOUT_REG
);
1290 static int ssi_restore_port_mode(struct omap_ssi_port
*omap_port
)
1294 writel_relaxed(omap_port
->sst
.mode
,
1295 omap_port
->sst_base
+ SSI_SST_MODE_REG
);
1296 writel_relaxed(omap_port
->ssr
.mode
,
1297 omap_port
->ssr_base
+ SSI_SSR_MODE_REG
);
1299 mode
= readl(omap_port
->ssr_base
+ SSI_SSR_MODE_REG
);
1304 static int omap_ssi_port_runtime_suspend(struct device
*dev
)
1306 struct hsi_port
*port
= dev_get_drvdata(dev
);
1307 struct omap_ssi_port
*omap_port
= hsi_port_drvdata(port
);
1308 struct hsi_controller
*ssi
= to_hsi_controller(port
->device
.parent
);
1309 struct omap_ssi_controller
*omap_ssi
= hsi_controller_drvdata(ssi
);
1311 dev_dbg(dev
, "port runtime suspend!\n");
1313 ssi_set_port_mode(omap_port
, SSI_MODE_SLEEP
);
1314 if (omap_ssi
->get_loss
)
1315 omap_port
->loss_count
=
1316 omap_ssi
->get_loss(ssi
->device
.parent
);
1317 ssi_save_port_ctx(omap_port
);
1322 static int omap_ssi_port_runtime_resume(struct device
*dev
)
1324 struct hsi_port
*port
= dev_get_drvdata(dev
);
1325 struct omap_ssi_port
*omap_port
= hsi_port_drvdata(port
);
1326 struct hsi_controller
*ssi
= to_hsi_controller(port
->device
.parent
);
1327 struct omap_ssi_controller
*omap_ssi
= hsi_controller_drvdata(ssi
);
1329 dev_dbg(dev
, "port runtime resume!\n");
1331 if ((omap_ssi
->get_loss
) && (omap_port
->loss_count
==
1332 omap_ssi
->get_loss(ssi
->device
.parent
)))
1333 goto mode
; /* We always need to restore the mode & TX divisor */
1335 ssi_restore_port_ctx(omap_port
);
1338 ssi_restore_divisor(omap_port
);
1339 ssi_restore_port_mode(omap_port
);
1344 static const struct dev_pm_ops omap_ssi_port_pm_ops
= {
1345 SET_RUNTIME_PM_OPS(omap_ssi_port_runtime_suspend
,
1346 omap_ssi_port_runtime_resume
, NULL
)
1349 #define DEV_PM_OPS (&omap_ssi_port_pm_ops)
1351 #define DEV_PM_OPS NULL
1356 static const struct of_device_id omap_ssi_port_of_match
[] = {
1357 { .compatible
= "ti,omap3-ssi-port", },
1360 MODULE_DEVICE_TABLE(of
, omap_ssi_port_of_match
);
1362 #define omap_ssi_port_of_match NULL
1365 struct platform_driver ssi_port_pdriver
= {
1366 .probe
= ssi_port_probe
,
1367 .remove
= ssi_port_remove
,
1369 .name
= "omap_ssi_port",
1370 .of_match_table
= omap_ssi_port_of_match
,