Merge branch 'for-linus-4.5' of git://git.kernel.org/pub/scm/linux/kernel/git/mason...
[deliverable/linux.git] / drivers / tty / serial / atmel_serial.c
1 /*
2 * Driver for Atmel AT91 / AT32 Serial ports
3 * Copyright (C) 2003 Rick Bronson
4 *
5 * Based on drivers/char/serial_sa1100.c, by Deep Blue Solutions Ltd.
6 * Based on drivers/char/serial.c, by Linus Torvalds, Theodore Ts'o.
7 *
8 * DMA support added by Chip Coldwell.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 *
24 */
25 #include <linux/tty.h>
26 #include <linux/ioport.h>
27 #include <linux/slab.h>
28 #include <linux/init.h>
29 #include <linux/serial.h>
30 #include <linux/clk.h>
31 #include <linux/console.h>
32 #include <linux/sysrq.h>
33 #include <linux/tty_flip.h>
34 #include <linux/platform_device.h>
35 #include <linux/of.h>
36 #include <linux/of_device.h>
37 #include <linux/of_gpio.h>
38 #include <linux/dma-mapping.h>
39 #include <linux/dmaengine.h>
40 #include <linux/atmel_pdc.h>
41 #include <linux/atmel_serial.h>
42 #include <linux/uaccess.h>
43 #include <linux/platform_data/atmel.h>
44 #include <linux/timer.h>
45 #include <linux/gpio.h>
46 #include <linux/gpio/consumer.h>
47 #include <linux/err.h>
48 #include <linux/irq.h>
49 #include <linux/suspend.h>
50
51 #include <asm/io.h>
52 #include <asm/ioctls.h>
53
54 #define PDC_BUFFER_SIZE 512
55 /* Revisit: We should calculate this based on the actual port settings */
56 #define PDC_RX_TIMEOUT (3 * 10) /* 3 bytes */
57
58 /* The minium number of data FIFOs should be able to contain */
59 #define ATMEL_MIN_FIFO_SIZE 8
60 /*
61 * These two offsets are substracted from the RX FIFO size to define the RTS
62 * high and low thresholds
63 */
64 #define ATMEL_RTS_HIGH_OFFSET 16
65 #define ATMEL_RTS_LOW_OFFSET 20
66
67 #if defined(CONFIG_SERIAL_ATMEL_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
68 #define SUPPORT_SYSRQ
69 #endif
70
71 #include <linux/serial_core.h>
72
73 #include "serial_mctrl_gpio.h"
74
75 static void atmel_start_rx(struct uart_port *port);
76 static void atmel_stop_rx(struct uart_port *port);
77
78 #ifdef CONFIG_SERIAL_ATMEL_TTYAT
79
80 /* Use device name ttyAT, major 204 and minor 154-169. This is necessary if we
81 * should coexist with the 8250 driver, such as if we have an external 16C550
82 * UART. */
83 #define SERIAL_ATMEL_MAJOR 204
84 #define MINOR_START 154
85 #define ATMEL_DEVICENAME "ttyAT"
86
87 #else
88
89 /* Use device name ttyS, major 4, minor 64-68. This is the usual serial port
90 * name, but it is legally reserved for the 8250 driver. */
91 #define SERIAL_ATMEL_MAJOR TTY_MAJOR
92 #define MINOR_START 64
93 #define ATMEL_DEVICENAME "ttyS"
94
95 #endif
96
97 #define ATMEL_ISR_PASS_LIMIT 256
98
99 struct atmel_dma_buffer {
100 unsigned char *buf;
101 dma_addr_t dma_addr;
102 unsigned int dma_size;
103 unsigned int ofs;
104 };
105
106 struct atmel_uart_char {
107 u16 status;
108 u16 ch;
109 };
110
111 #define ATMEL_SERIAL_RINGSIZE 1024
112
113 /*
114 * at91: 6 USARTs and one DBGU port (SAM9260)
115 * avr32: 4
116 */
117 #define ATMEL_MAX_UART 7
118
119 /*
120 * We wrap our port structure around the generic uart_port.
121 */
122 struct atmel_uart_port {
123 struct uart_port uart; /* uart */
124 struct clk *clk; /* uart clock */
125 int may_wakeup; /* cached value of device_may_wakeup for times we need to disable it */
126 u32 backup_imr; /* IMR saved during suspend */
127 int break_active; /* break being received */
128
129 bool use_dma_rx; /* enable DMA receiver */
130 bool use_pdc_rx; /* enable PDC receiver */
131 short pdc_rx_idx; /* current PDC RX buffer */
132 struct atmel_dma_buffer pdc_rx[2]; /* PDC receier */
133
134 bool use_dma_tx; /* enable DMA transmitter */
135 bool use_pdc_tx; /* enable PDC transmitter */
136 struct atmel_dma_buffer pdc_tx; /* PDC transmitter */
137
138 spinlock_t lock_tx; /* port lock */
139 spinlock_t lock_rx; /* port lock */
140 struct dma_chan *chan_tx;
141 struct dma_chan *chan_rx;
142 struct dma_async_tx_descriptor *desc_tx;
143 struct dma_async_tx_descriptor *desc_rx;
144 dma_cookie_t cookie_tx;
145 dma_cookie_t cookie_rx;
146 struct scatterlist sg_tx;
147 struct scatterlist sg_rx;
148 struct tasklet_struct tasklet;
149 unsigned int irq_status;
150 unsigned int irq_status_prev;
151 unsigned int status_change;
152 unsigned int tx_len;
153
154 struct circ_buf rx_ring;
155
156 struct mctrl_gpios *gpios;
157 unsigned int tx_done_mask;
158 u32 fifo_size;
159 u32 rts_high;
160 u32 rts_low;
161 bool ms_irq_enabled;
162 bool is_usart; /* usart or uart */
163 struct timer_list uart_timer; /* uart timer */
164
165 bool suspended;
166 unsigned int pending;
167 unsigned int pending_status;
168 spinlock_t lock_suspended;
169
170 int (*prepare_rx)(struct uart_port *port);
171 int (*prepare_tx)(struct uart_port *port);
172 void (*schedule_rx)(struct uart_port *port);
173 void (*schedule_tx)(struct uart_port *port);
174 void (*release_rx)(struct uart_port *port);
175 void (*release_tx)(struct uart_port *port);
176 };
177
178 static struct atmel_uart_port atmel_ports[ATMEL_MAX_UART];
179 static DECLARE_BITMAP(atmel_ports_in_use, ATMEL_MAX_UART);
180
181 #ifdef SUPPORT_SYSRQ
182 static struct console atmel_console;
183 #endif
184
185 #if defined(CONFIG_OF)
186 static const struct of_device_id atmel_serial_dt_ids[] = {
187 { .compatible = "atmel,at91rm9200-usart" },
188 { .compatible = "atmel,at91sam9260-usart" },
189 { /* sentinel */ }
190 };
191 #endif
192
193 static inline struct atmel_uart_port *
194 to_atmel_uart_port(struct uart_port *uart)
195 {
196 return container_of(uart, struct atmel_uart_port, uart);
197 }
198
199 static inline u32 atmel_uart_readl(struct uart_port *port, u32 reg)
200 {
201 return __raw_readl(port->membase + reg);
202 }
203
204 static inline void atmel_uart_writel(struct uart_port *port, u32 reg, u32 value)
205 {
206 __raw_writel(value, port->membase + reg);
207 }
208
209 #ifdef CONFIG_AVR32
210
211 /* AVR32 cannot handle 8 or 16bit I/O accesses but only 32bit I/O accesses */
212 static inline u8 atmel_uart_read_char(struct uart_port *port)
213 {
214 return __raw_readl(port->membase + ATMEL_US_RHR);
215 }
216
217 static inline void atmel_uart_write_char(struct uart_port *port, u8 value)
218 {
219 __raw_writel(value, port->membase + ATMEL_US_THR);
220 }
221
222 #else
223
224 static inline u8 atmel_uart_read_char(struct uart_port *port)
225 {
226 return __raw_readb(port->membase + ATMEL_US_RHR);
227 }
228
229 static inline void atmel_uart_write_char(struct uart_port *port, u8 value)
230 {
231 __raw_writeb(value, port->membase + ATMEL_US_THR);
232 }
233
234 #endif
235
236 #ifdef CONFIG_SERIAL_ATMEL_PDC
237 static bool atmel_use_pdc_rx(struct uart_port *port)
238 {
239 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
240
241 return atmel_port->use_pdc_rx;
242 }
243
244 static bool atmel_use_pdc_tx(struct uart_port *port)
245 {
246 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
247
248 return atmel_port->use_pdc_tx;
249 }
250 #else
251 static bool atmel_use_pdc_rx(struct uart_port *port)
252 {
253 return false;
254 }
255
256 static bool atmel_use_pdc_tx(struct uart_port *port)
257 {
258 return false;
259 }
260 #endif
261
262 static bool atmel_use_dma_tx(struct uart_port *port)
263 {
264 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
265
266 return atmel_port->use_dma_tx;
267 }
268
269 static bool atmel_use_dma_rx(struct uart_port *port)
270 {
271 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
272
273 return atmel_port->use_dma_rx;
274 }
275
276 static unsigned int atmel_get_lines_status(struct uart_port *port)
277 {
278 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
279 unsigned int status, ret = 0;
280
281 status = atmel_uart_readl(port, ATMEL_US_CSR);
282
283 mctrl_gpio_get(atmel_port->gpios, &ret);
284
285 if (!IS_ERR_OR_NULL(mctrl_gpio_to_gpiod(atmel_port->gpios,
286 UART_GPIO_CTS))) {
287 if (ret & TIOCM_CTS)
288 status &= ~ATMEL_US_CTS;
289 else
290 status |= ATMEL_US_CTS;
291 }
292
293 if (!IS_ERR_OR_NULL(mctrl_gpio_to_gpiod(atmel_port->gpios,
294 UART_GPIO_DSR))) {
295 if (ret & TIOCM_DSR)
296 status &= ~ATMEL_US_DSR;
297 else
298 status |= ATMEL_US_DSR;
299 }
300
301 if (!IS_ERR_OR_NULL(mctrl_gpio_to_gpiod(atmel_port->gpios,
302 UART_GPIO_RI))) {
303 if (ret & TIOCM_RI)
304 status &= ~ATMEL_US_RI;
305 else
306 status |= ATMEL_US_RI;
307 }
308
309 if (!IS_ERR_OR_NULL(mctrl_gpio_to_gpiod(atmel_port->gpios,
310 UART_GPIO_DCD))) {
311 if (ret & TIOCM_CD)
312 status &= ~ATMEL_US_DCD;
313 else
314 status |= ATMEL_US_DCD;
315 }
316
317 return status;
318 }
319
320 /* Enable or disable the rs485 support */
321 static int atmel_config_rs485(struct uart_port *port,
322 struct serial_rs485 *rs485conf)
323 {
324 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
325 unsigned int mode;
326
327 /* Disable interrupts */
328 atmel_uart_writel(port, ATMEL_US_IDR, atmel_port->tx_done_mask);
329
330 mode = atmel_uart_readl(port, ATMEL_US_MR);
331
332 /* Resetting serial mode to RS232 (0x0) */
333 mode &= ~ATMEL_US_USMODE;
334
335 port->rs485 = *rs485conf;
336
337 if (rs485conf->flags & SER_RS485_ENABLED) {
338 dev_dbg(port->dev, "Setting UART to RS485\n");
339 atmel_port->tx_done_mask = ATMEL_US_TXEMPTY;
340 atmel_uart_writel(port, ATMEL_US_TTGR,
341 rs485conf->delay_rts_after_send);
342 mode |= ATMEL_US_USMODE_RS485;
343 } else {
344 dev_dbg(port->dev, "Setting UART to RS232\n");
345 if (atmel_use_pdc_tx(port))
346 atmel_port->tx_done_mask = ATMEL_US_ENDTX |
347 ATMEL_US_TXBUFE;
348 else
349 atmel_port->tx_done_mask = ATMEL_US_TXRDY;
350 }
351 atmel_uart_writel(port, ATMEL_US_MR, mode);
352
353 /* Enable interrupts */
354 atmel_uart_writel(port, ATMEL_US_IER, atmel_port->tx_done_mask);
355
356 return 0;
357 }
358
359 /*
360 * Return TIOCSER_TEMT when transmitter FIFO and Shift register is empty.
361 */
362 static u_int atmel_tx_empty(struct uart_port *port)
363 {
364 return (atmel_uart_readl(port, ATMEL_US_CSR) & ATMEL_US_TXEMPTY) ?
365 TIOCSER_TEMT :
366 0;
367 }
368
369 /*
370 * Set state of the modem control output lines
371 */
372 static void atmel_set_mctrl(struct uart_port *port, u_int mctrl)
373 {
374 unsigned int control = 0;
375 unsigned int mode = atmel_uart_readl(port, ATMEL_US_MR);
376 unsigned int rts_paused, rts_ready;
377 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
378
379 /* override mode to RS485 if needed, otherwise keep the current mode */
380 if (port->rs485.flags & SER_RS485_ENABLED) {
381 atmel_uart_writel(port, ATMEL_US_TTGR,
382 port->rs485.delay_rts_after_send);
383 mode &= ~ATMEL_US_USMODE;
384 mode |= ATMEL_US_USMODE_RS485;
385 }
386
387 /* set the RTS line state according to the mode */
388 if ((mode & ATMEL_US_USMODE) == ATMEL_US_USMODE_HWHS) {
389 /* force RTS line to high level */
390 rts_paused = ATMEL_US_RTSEN;
391
392 /* give the control of the RTS line back to the hardware */
393 rts_ready = ATMEL_US_RTSDIS;
394 } else {
395 /* force RTS line to high level */
396 rts_paused = ATMEL_US_RTSDIS;
397
398 /* force RTS line to low level */
399 rts_ready = ATMEL_US_RTSEN;
400 }
401
402 if (mctrl & TIOCM_RTS)
403 control |= rts_ready;
404 else
405 control |= rts_paused;
406
407 if (mctrl & TIOCM_DTR)
408 control |= ATMEL_US_DTREN;
409 else
410 control |= ATMEL_US_DTRDIS;
411
412 atmel_uart_writel(port, ATMEL_US_CR, control);
413
414 mctrl_gpio_set(atmel_port->gpios, mctrl);
415
416 /* Local loopback mode? */
417 mode &= ~ATMEL_US_CHMODE;
418 if (mctrl & TIOCM_LOOP)
419 mode |= ATMEL_US_CHMODE_LOC_LOOP;
420 else
421 mode |= ATMEL_US_CHMODE_NORMAL;
422
423 atmel_uart_writel(port, ATMEL_US_MR, mode);
424 }
425
426 /*
427 * Get state of the modem control input lines
428 */
429 static u_int atmel_get_mctrl(struct uart_port *port)
430 {
431 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
432 unsigned int ret = 0, status;
433
434 status = atmel_uart_readl(port, ATMEL_US_CSR);
435
436 /*
437 * The control signals are active low.
438 */
439 if (!(status & ATMEL_US_DCD))
440 ret |= TIOCM_CD;
441 if (!(status & ATMEL_US_CTS))
442 ret |= TIOCM_CTS;
443 if (!(status & ATMEL_US_DSR))
444 ret |= TIOCM_DSR;
445 if (!(status & ATMEL_US_RI))
446 ret |= TIOCM_RI;
447
448 return mctrl_gpio_get(atmel_port->gpios, &ret);
449 }
450
451 /*
452 * Stop transmitting.
453 */
454 static void atmel_stop_tx(struct uart_port *port)
455 {
456 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
457
458 if (atmel_use_pdc_tx(port)) {
459 /* disable PDC transmit */
460 atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTDIS);
461 }
462 /* Disable interrupts */
463 atmel_uart_writel(port, ATMEL_US_IDR, atmel_port->tx_done_mask);
464
465 if ((port->rs485.flags & SER_RS485_ENABLED) &&
466 !(port->rs485.flags & SER_RS485_RX_DURING_TX))
467 atmel_start_rx(port);
468 }
469
470 /*
471 * Start transmitting.
472 */
473 static void atmel_start_tx(struct uart_port *port)
474 {
475 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
476
477 if (atmel_use_pdc_tx(port)) {
478 if (atmel_uart_readl(port, ATMEL_PDC_PTSR) & ATMEL_PDC_TXTEN)
479 /* The transmitter is already running. Yes, we
480 really need this.*/
481 return;
482
483 if ((port->rs485.flags & SER_RS485_ENABLED) &&
484 !(port->rs485.flags & SER_RS485_RX_DURING_TX))
485 atmel_stop_rx(port);
486
487 /* re-enable PDC transmit */
488 atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTEN);
489 }
490 /* Enable interrupts */
491 atmel_uart_writel(port, ATMEL_US_IER, atmel_port->tx_done_mask);
492 }
493
494 /*
495 * start receiving - port is in process of being opened.
496 */
497 static void atmel_start_rx(struct uart_port *port)
498 {
499 /* reset status and receiver */
500 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA);
501
502 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RXEN);
503
504 if (atmel_use_pdc_rx(port)) {
505 /* enable PDC controller */
506 atmel_uart_writel(port, ATMEL_US_IER,
507 ATMEL_US_ENDRX | ATMEL_US_TIMEOUT |
508 port->read_status_mask);
509 atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_RXTEN);
510 } else {
511 atmel_uart_writel(port, ATMEL_US_IER, ATMEL_US_RXRDY);
512 }
513 }
514
515 /*
516 * Stop receiving - port is in process of being closed.
517 */
518 static void atmel_stop_rx(struct uart_port *port)
519 {
520 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RXDIS);
521
522 if (atmel_use_pdc_rx(port)) {
523 /* disable PDC receive */
524 atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_RXTDIS);
525 atmel_uart_writel(port, ATMEL_US_IDR,
526 ATMEL_US_ENDRX | ATMEL_US_TIMEOUT |
527 port->read_status_mask);
528 } else {
529 atmel_uart_writel(port, ATMEL_US_IDR, ATMEL_US_RXRDY);
530 }
531 }
532
533 /*
534 * Enable modem status interrupts
535 */
536 static void atmel_enable_ms(struct uart_port *port)
537 {
538 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
539 uint32_t ier = 0;
540
541 /*
542 * Interrupt should not be enabled twice
543 */
544 if (atmel_port->ms_irq_enabled)
545 return;
546
547 atmel_port->ms_irq_enabled = true;
548
549 if (!mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_CTS))
550 ier |= ATMEL_US_CTSIC;
551
552 if (!mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_DSR))
553 ier |= ATMEL_US_DSRIC;
554
555 if (!mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_RI))
556 ier |= ATMEL_US_RIIC;
557
558 if (!mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_DCD))
559 ier |= ATMEL_US_DCDIC;
560
561 atmel_uart_writel(port, ATMEL_US_IER, ier);
562
563 mctrl_gpio_enable_ms(atmel_port->gpios);
564 }
565
566 /*
567 * Disable modem status interrupts
568 */
569 static void atmel_disable_ms(struct uart_port *port)
570 {
571 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
572 uint32_t idr = 0;
573
574 /*
575 * Interrupt should not be disabled twice
576 */
577 if (!atmel_port->ms_irq_enabled)
578 return;
579
580 atmel_port->ms_irq_enabled = false;
581
582 mctrl_gpio_disable_ms(atmel_port->gpios);
583
584 if (!mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_CTS))
585 idr |= ATMEL_US_CTSIC;
586
587 if (!mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_DSR))
588 idr |= ATMEL_US_DSRIC;
589
590 if (!mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_RI))
591 idr |= ATMEL_US_RIIC;
592
593 if (!mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_DCD))
594 idr |= ATMEL_US_DCDIC;
595
596 atmel_uart_writel(port, ATMEL_US_IDR, idr);
597 }
598
599 /*
600 * Control the transmission of a break signal
601 */
602 static void atmel_break_ctl(struct uart_port *port, int break_state)
603 {
604 if (break_state != 0)
605 /* start break */
606 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_STTBRK);
607 else
608 /* stop break */
609 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_STPBRK);
610 }
611
612 /*
613 * Stores the incoming character in the ring buffer
614 */
615 static void
616 atmel_buffer_rx_char(struct uart_port *port, unsigned int status,
617 unsigned int ch)
618 {
619 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
620 struct circ_buf *ring = &atmel_port->rx_ring;
621 struct atmel_uart_char *c;
622
623 if (!CIRC_SPACE(ring->head, ring->tail, ATMEL_SERIAL_RINGSIZE))
624 /* Buffer overflow, ignore char */
625 return;
626
627 c = &((struct atmel_uart_char *)ring->buf)[ring->head];
628 c->status = status;
629 c->ch = ch;
630
631 /* Make sure the character is stored before we update head. */
632 smp_wmb();
633
634 ring->head = (ring->head + 1) & (ATMEL_SERIAL_RINGSIZE - 1);
635 }
636
637 /*
638 * Deal with parity, framing and overrun errors.
639 */
640 static void atmel_pdc_rxerr(struct uart_port *port, unsigned int status)
641 {
642 /* clear error */
643 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA);
644
645 if (status & ATMEL_US_RXBRK) {
646 /* ignore side-effect */
647 status &= ~(ATMEL_US_PARE | ATMEL_US_FRAME);
648 port->icount.brk++;
649 }
650 if (status & ATMEL_US_PARE)
651 port->icount.parity++;
652 if (status & ATMEL_US_FRAME)
653 port->icount.frame++;
654 if (status & ATMEL_US_OVRE)
655 port->icount.overrun++;
656 }
657
658 /*
659 * Characters received (called from interrupt handler)
660 */
661 static void atmel_rx_chars(struct uart_port *port)
662 {
663 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
664 unsigned int status, ch;
665
666 status = atmel_uart_readl(port, ATMEL_US_CSR);
667 while (status & ATMEL_US_RXRDY) {
668 ch = atmel_uart_read_char(port);
669
670 /*
671 * note that the error handling code is
672 * out of the main execution path
673 */
674 if (unlikely(status & (ATMEL_US_PARE | ATMEL_US_FRAME
675 | ATMEL_US_OVRE | ATMEL_US_RXBRK)
676 || atmel_port->break_active)) {
677
678 /* clear error */
679 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA);
680
681 if (status & ATMEL_US_RXBRK
682 && !atmel_port->break_active) {
683 atmel_port->break_active = 1;
684 atmel_uart_writel(port, ATMEL_US_IER,
685 ATMEL_US_RXBRK);
686 } else {
687 /*
688 * This is either the end-of-break
689 * condition or we've received at
690 * least one character without RXBRK
691 * being set. In both cases, the next
692 * RXBRK will indicate start-of-break.
693 */
694 atmel_uart_writel(port, ATMEL_US_IDR,
695 ATMEL_US_RXBRK);
696 status &= ~ATMEL_US_RXBRK;
697 atmel_port->break_active = 0;
698 }
699 }
700
701 atmel_buffer_rx_char(port, status, ch);
702 status = atmel_uart_readl(port, ATMEL_US_CSR);
703 }
704
705 tasklet_schedule(&atmel_port->tasklet);
706 }
707
708 /*
709 * Transmit characters (called from tasklet with TXRDY interrupt
710 * disabled)
711 */
712 static void atmel_tx_chars(struct uart_port *port)
713 {
714 struct circ_buf *xmit = &port->state->xmit;
715 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
716
717 if (port->x_char &&
718 (atmel_uart_readl(port, ATMEL_US_CSR) & atmel_port->tx_done_mask)) {
719 atmel_uart_write_char(port, port->x_char);
720 port->icount.tx++;
721 port->x_char = 0;
722 }
723 if (uart_circ_empty(xmit) || uart_tx_stopped(port))
724 return;
725
726 while (atmel_uart_readl(port, ATMEL_US_CSR) &
727 atmel_port->tx_done_mask) {
728 atmel_uart_write_char(port, xmit->buf[xmit->tail]);
729 xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
730 port->icount.tx++;
731 if (uart_circ_empty(xmit))
732 break;
733 }
734
735 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
736 uart_write_wakeup(port);
737
738 if (!uart_circ_empty(xmit))
739 /* Enable interrupts */
740 atmel_uart_writel(port, ATMEL_US_IER,
741 atmel_port->tx_done_mask);
742 }
743
744 static void atmel_complete_tx_dma(void *arg)
745 {
746 struct atmel_uart_port *atmel_port = arg;
747 struct uart_port *port = &atmel_port->uart;
748 struct circ_buf *xmit = &port->state->xmit;
749 struct dma_chan *chan = atmel_port->chan_tx;
750 unsigned long flags;
751
752 spin_lock_irqsave(&port->lock, flags);
753
754 if (chan)
755 dmaengine_terminate_all(chan);
756 xmit->tail += atmel_port->tx_len;
757 xmit->tail &= UART_XMIT_SIZE - 1;
758
759 port->icount.tx += atmel_port->tx_len;
760
761 spin_lock_irq(&atmel_port->lock_tx);
762 async_tx_ack(atmel_port->desc_tx);
763 atmel_port->cookie_tx = -EINVAL;
764 atmel_port->desc_tx = NULL;
765 spin_unlock_irq(&atmel_port->lock_tx);
766
767 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
768 uart_write_wakeup(port);
769
770 /*
771 * xmit is a circular buffer so, if we have just send data from
772 * xmit->tail to the end of xmit->buf, now we have to transmit the
773 * remaining data from the beginning of xmit->buf to xmit->head.
774 */
775 if (!uart_circ_empty(xmit))
776 tasklet_schedule(&atmel_port->tasklet);
777
778 spin_unlock_irqrestore(&port->lock, flags);
779 }
780
781 static void atmel_release_tx_dma(struct uart_port *port)
782 {
783 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
784 struct dma_chan *chan = atmel_port->chan_tx;
785
786 if (chan) {
787 dmaengine_terminate_all(chan);
788 dma_release_channel(chan);
789 dma_unmap_sg(port->dev, &atmel_port->sg_tx, 1,
790 DMA_TO_DEVICE);
791 }
792
793 atmel_port->desc_tx = NULL;
794 atmel_port->chan_tx = NULL;
795 atmel_port->cookie_tx = -EINVAL;
796 }
797
798 /*
799 * Called from tasklet with TXRDY interrupt is disabled.
800 */
801 static void atmel_tx_dma(struct uart_port *port)
802 {
803 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
804 struct circ_buf *xmit = &port->state->xmit;
805 struct dma_chan *chan = atmel_port->chan_tx;
806 struct dma_async_tx_descriptor *desc;
807 struct scatterlist sgl[2], *sg, *sg_tx = &atmel_port->sg_tx;
808 unsigned int tx_len, part1_len, part2_len, sg_len;
809 dma_addr_t phys_addr;
810
811 /* Make sure we have an idle channel */
812 if (atmel_port->desc_tx != NULL)
813 return;
814
815 if (!uart_circ_empty(xmit) && !uart_tx_stopped(port)) {
816 /*
817 * DMA is idle now.
818 * Port xmit buffer is already mapped,
819 * and it is one page... Just adjust
820 * offsets and lengths. Since it is a circular buffer,
821 * we have to transmit till the end, and then the rest.
822 * Take the port lock to get a
823 * consistent xmit buffer state.
824 */
825 tx_len = CIRC_CNT_TO_END(xmit->head,
826 xmit->tail,
827 UART_XMIT_SIZE);
828
829 if (atmel_port->fifo_size) {
830 /* multi data mode */
831 part1_len = (tx_len & ~0x3); /* DWORD access */
832 part2_len = (tx_len & 0x3); /* BYTE access */
833 } else {
834 /* single data (legacy) mode */
835 part1_len = 0;
836 part2_len = tx_len; /* BYTE access only */
837 }
838
839 sg_init_table(sgl, 2);
840 sg_len = 0;
841 phys_addr = sg_dma_address(sg_tx) + xmit->tail;
842 if (part1_len) {
843 sg = &sgl[sg_len++];
844 sg_dma_address(sg) = phys_addr;
845 sg_dma_len(sg) = part1_len;
846
847 phys_addr += part1_len;
848 }
849
850 if (part2_len) {
851 sg = &sgl[sg_len++];
852 sg_dma_address(sg) = phys_addr;
853 sg_dma_len(sg) = part2_len;
854 }
855
856 /*
857 * save tx_len so atmel_complete_tx_dma() will increase
858 * xmit->tail correctly
859 */
860 atmel_port->tx_len = tx_len;
861
862 desc = dmaengine_prep_slave_sg(chan,
863 sgl,
864 sg_len,
865 DMA_MEM_TO_DEV,
866 DMA_PREP_INTERRUPT |
867 DMA_CTRL_ACK);
868 if (!desc) {
869 dev_err(port->dev, "Failed to send via dma!\n");
870 return;
871 }
872
873 dma_sync_sg_for_device(port->dev, sg_tx, 1, DMA_TO_DEVICE);
874
875 atmel_port->desc_tx = desc;
876 desc->callback = atmel_complete_tx_dma;
877 desc->callback_param = atmel_port;
878 atmel_port->cookie_tx = dmaengine_submit(desc);
879
880 } else {
881 if (port->rs485.flags & SER_RS485_ENABLED) {
882 /* DMA done, stop TX, start RX for RS485 */
883 atmel_start_rx(port);
884 }
885 }
886
887 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
888 uart_write_wakeup(port);
889 }
890
891 static int atmel_prepare_tx_dma(struct uart_port *port)
892 {
893 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
894 dma_cap_mask_t mask;
895 struct dma_slave_config config;
896 int ret, nent;
897
898 dma_cap_zero(mask);
899 dma_cap_set(DMA_SLAVE, mask);
900
901 atmel_port->chan_tx = dma_request_slave_channel(port->dev, "tx");
902 if (atmel_port->chan_tx == NULL)
903 goto chan_err;
904 dev_info(port->dev, "using %s for tx DMA transfers\n",
905 dma_chan_name(atmel_port->chan_tx));
906
907 spin_lock_init(&atmel_port->lock_tx);
908 sg_init_table(&atmel_port->sg_tx, 1);
909 /* UART circular tx buffer is an aligned page. */
910 BUG_ON(!PAGE_ALIGNED(port->state->xmit.buf));
911 sg_set_page(&atmel_port->sg_tx,
912 virt_to_page(port->state->xmit.buf),
913 UART_XMIT_SIZE,
914 (unsigned long)port->state->xmit.buf & ~PAGE_MASK);
915 nent = dma_map_sg(port->dev,
916 &atmel_port->sg_tx,
917 1,
918 DMA_TO_DEVICE);
919
920 if (!nent) {
921 dev_dbg(port->dev, "need to release resource of dma\n");
922 goto chan_err;
923 } else {
924 dev_dbg(port->dev, "%s: mapped %d@%p to %pad\n", __func__,
925 sg_dma_len(&atmel_port->sg_tx),
926 port->state->xmit.buf,
927 &sg_dma_address(&atmel_port->sg_tx));
928 }
929
930 /* Configure the slave DMA */
931 memset(&config, 0, sizeof(config));
932 config.direction = DMA_MEM_TO_DEV;
933 config.dst_addr_width = (atmel_port->fifo_size) ?
934 DMA_SLAVE_BUSWIDTH_4_BYTES :
935 DMA_SLAVE_BUSWIDTH_1_BYTE;
936 config.dst_addr = port->mapbase + ATMEL_US_THR;
937 config.dst_maxburst = 1;
938
939 ret = dmaengine_slave_config(atmel_port->chan_tx,
940 &config);
941 if (ret) {
942 dev_err(port->dev, "DMA tx slave configuration failed\n");
943 goto chan_err;
944 }
945
946 return 0;
947
948 chan_err:
949 dev_err(port->dev, "TX channel not available, switch to pio\n");
950 atmel_port->use_dma_tx = 0;
951 if (atmel_port->chan_tx)
952 atmel_release_tx_dma(port);
953 return -EINVAL;
954 }
955
956 static void atmel_complete_rx_dma(void *arg)
957 {
958 struct uart_port *port = arg;
959 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
960
961 tasklet_schedule(&atmel_port->tasklet);
962 }
963
964 static void atmel_release_rx_dma(struct uart_port *port)
965 {
966 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
967 struct dma_chan *chan = atmel_port->chan_rx;
968
969 if (chan) {
970 dmaengine_terminate_all(chan);
971 dma_release_channel(chan);
972 dma_unmap_sg(port->dev, &atmel_port->sg_rx, 1,
973 DMA_FROM_DEVICE);
974 }
975
976 atmel_port->desc_rx = NULL;
977 atmel_port->chan_rx = NULL;
978 atmel_port->cookie_rx = -EINVAL;
979 }
980
981 static void atmel_rx_from_dma(struct uart_port *port)
982 {
983 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
984 struct tty_port *tport = &port->state->port;
985 struct circ_buf *ring = &atmel_port->rx_ring;
986 struct dma_chan *chan = atmel_port->chan_rx;
987 struct dma_tx_state state;
988 enum dma_status dmastat;
989 size_t count;
990
991
992 /* Reset the UART timeout early so that we don't miss one */
993 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_STTTO);
994 dmastat = dmaengine_tx_status(chan,
995 atmel_port->cookie_rx,
996 &state);
997 /* Restart a new tasklet if DMA status is error */
998 if (dmastat == DMA_ERROR) {
999 dev_dbg(port->dev, "Get residue error, restart tasklet\n");
1000 atmel_uart_writel(port, ATMEL_US_IER, ATMEL_US_TIMEOUT);
1001 tasklet_schedule(&atmel_port->tasklet);
1002 return;
1003 }
1004
1005 /* CPU claims ownership of RX DMA buffer */
1006 dma_sync_sg_for_cpu(port->dev,
1007 &atmel_port->sg_rx,
1008 1,
1009 DMA_FROM_DEVICE);
1010
1011 /*
1012 * ring->head points to the end of data already written by the DMA.
1013 * ring->tail points to the beginning of data to be read by the
1014 * framework.
1015 * The current transfer size should not be larger than the dma buffer
1016 * length.
1017 */
1018 ring->head = sg_dma_len(&atmel_port->sg_rx) - state.residue;
1019 BUG_ON(ring->head > sg_dma_len(&atmel_port->sg_rx));
1020 /*
1021 * At this point ring->head may point to the first byte right after the
1022 * last byte of the dma buffer:
1023 * 0 <= ring->head <= sg_dma_len(&atmel_port->sg_rx)
1024 *
1025 * However ring->tail must always points inside the dma buffer:
1026 * 0 <= ring->tail <= sg_dma_len(&atmel_port->sg_rx) - 1
1027 *
1028 * Since we use a ring buffer, we have to handle the case
1029 * where head is lower than tail. In such a case, we first read from
1030 * tail to the end of the buffer then reset tail.
1031 */
1032 if (ring->head < ring->tail) {
1033 count = sg_dma_len(&atmel_port->sg_rx) - ring->tail;
1034
1035 tty_insert_flip_string(tport, ring->buf + ring->tail, count);
1036 ring->tail = 0;
1037 port->icount.rx += count;
1038 }
1039
1040 /* Finally we read data from tail to head */
1041 if (ring->tail < ring->head) {
1042 count = ring->head - ring->tail;
1043
1044 tty_insert_flip_string(tport, ring->buf + ring->tail, count);
1045 /* Wrap ring->head if needed */
1046 if (ring->head >= sg_dma_len(&atmel_port->sg_rx))
1047 ring->head = 0;
1048 ring->tail = ring->head;
1049 port->icount.rx += count;
1050 }
1051
1052 /* USART retreives ownership of RX DMA buffer */
1053 dma_sync_sg_for_device(port->dev,
1054 &atmel_port->sg_rx,
1055 1,
1056 DMA_FROM_DEVICE);
1057
1058 /*
1059 * Drop the lock here since it might end up calling
1060 * uart_start(), which takes the lock.
1061 */
1062 spin_unlock(&port->lock);
1063 tty_flip_buffer_push(tport);
1064 spin_lock(&port->lock);
1065
1066 atmel_uart_writel(port, ATMEL_US_IER, ATMEL_US_TIMEOUT);
1067 }
1068
1069 static int atmel_prepare_rx_dma(struct uart_port *port)
1070 {
1071 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1072 struct dma_async_tx_descriptor *desc;
1073 dma_cap_mask_t mask;
1074 struct dma_slave_config config;
1075 struct circ_buf *ring;
1076 int ret, nent;
1077
1078 ring = &atmel_port->rx_ring;
1079
1080 dma_cap_zero(mask);
1081 dma_cap_set(DMA_CYCLIC, mask);
1082
1083 atmel_port->chan_rx = dma_request_slave_channel(port->dev, "rx");
1084 if (atmel_port->chan_rx == NULL)
1085 goto chan_err;
1086 dev_info(port->dev, "using %s for rx DMA transfers\n",
1087 dma_chan_name(atmel_port->chan_rx));
1088
1089 spin_lock_init(&atmel_port->lock_rx);
1090 sg_init_table(&atmel_port->sg_rx, 1);
1091 /* UART circular rx buffer is an aligned page. */
1092 BUG_ON(!PAGE_ALIGNED(ring->buf));
1093 sg_set_page(&atmel_port->sg_rx,
1094 virt_to_page(ring->buf),
1095 sizeof(struct atmel_uart_char) * ATMEL_SERIAL_RINGSIZE,
1096 (unsigned long)ring->buf & ~PAGE_MASK);
1097 nent = dma_map_sg(port->dev,
1098 &atmel_port->sg_rx,
1099 1,
1100 DMA_FROM_DEVICE);
1101
1102 if (!nent) {
1103 dev_dbg(port->dev, "need to release resource of dma\n");
1104 goto chan_err;
1105 } else {
1106 dev_dbg(port->dev, "%s: mapped %d@%p to %pad\n", __func__,
1107 sg_dma_len(&atmel_port->sg_rx),
1108 ring->buf,
1109 &sg_dma_address(&atmel_port->sg_rx));
1110 }
1111
1112 /* Configure the slave DMA */
1113 memset(&config, 0, sizeof(config));
1114 config.direction = DMA_DEV_TO_MEM;
1115 config.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
1116 config.src_addr = port->mapbase + ATMEL_US_RHR;
1117 config.src_maxburst = 1;
1118
1119 ret = dmaengine_slave_config(atmel_port->chan_rx,
1120 &config);
1121 if (ret) {
1122 dev_err(port->dev, "DMA rx slave configuration failed\n");
1123 goto chan_err;
1124 }
1125 /*
1126 * Prepare a cyclic dma transfer, assign 2 descriptors,
1127 * each one is half ring buffer size
1128 */
1129 desc = dmaengine_prep_dma_cyclic(atmel_port->chan_rx,
1130 sg_dma_address(&atmel_port->sg_rx),
1131 sg_dma_len(&atmel_port->sg_rx),
1132 sg_dma_len(&atmel_port->sg_rx)/2,
1133 DMA_DEV_TO_MEM,
1134 DMA_PREP_INTERRUPT);
1135 desc->callback = atmel_complete_rx_dma;
1136 desc->callback_param = port;
1137 atmel_port->desc_rx = desc;
1138 atmel_port->cookie_rx = dmaengine_submit(desc);
1139
1140 return 0;
1141
1142 chan_err:
1143 dev_err(port->dev, "RX channel not available, switch to pio\n");
1144 atmel_port->use_dma_rx = 0;
1145 if (atmel_port->chan_rx)
1146 atmel_release_rx_dma(port);
1147 return -EINVAL;
1148 }
1149
1150 static void atmel_uart_timer_callback(unsigned long data)
1151 {
1152 struct uart_port *port = (void *)data;
1153 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1154
1155 tasklet_schedule(&atmel_port->tasklet);
1156 mod_timer(&atmel_port->uart_timer, jiffies + uart_poll_timeout(port));
1157 }
1158
1159 /*
1160 * receive interrupt handler.
1161 */
1162 static void
1163 atmel_handle_receive(struct uart_port *port, unsigned int pending)
1164 {
1165 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1166
1167 if (atmel_use_pdc_rx(port)) {
1168 /*
1169 * PDC receive. Just schedule the tasklet and let it
1170 * figure out the details.
1171 *
1172 * TODO: We're not handling error flags correctly at
1173 * the moment.
1174 */
1175 if (pending & (ATMEL_US_ENDRX | ATMEL_US_TIMEOUT)) {
1176 atmel_uart_writel(port, ATMEL_US_IDR,
1177 (ATMEL_US_ENDRX | ATMEL_US_TIMEOUT));
1178 tasklet_schedule(&atmel_port->tasklet);
1179 }
1180
1181 if (pending & (ATMEL_US_RXBRK | ATMEL_US_OVRE |
1182 ATMEL_US_FRAME | ATMEL_US_PARE))
1183 atmel_pdc_rxerr(port, pending);
1184 }
1185
1186 if (atmel_use_dma_rx(port)) {
1187 if (pending & ATMEL_US_TIMEOUT) {
1188 atmel_uart_writel(port, ATMEL_US_IDR,
1189 ATMEL_US_TIMEOUT);
1190 tasklet_schedule(&atmel_port->tasklet);
1191 }
1192 }
1193
1194 /* Interrupt receive */
1195 if (pending & ATMEL_US_RXRDY)
1196 atmel_rx_chars(port);
1197 else if (pending & ATMEL_US_RXBRK) {
1198 /*
1199 * End of break detected. If it came along with a
1200 * character, atmel_rx_chars will handle it.
1201 */
1202 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA);
1203 atmel_uart_writel(port, ATMEL_US_IDR, ATMEL_US_RXBRK);
1204 atmel_port->break_active = 0;
1205 }
1206 }
1207
1208 /*
1209 * transmit interrupt handler. (Transmit is IRQF_NODELAY safe)
1210 */
1211 static void
1212 atmel_handle_transmit(struct uart_port *port, unsigned int pending)
1213 {
1214 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1215
1216 if (pending & atmel_port->tx_done_mask) {
1217 /* Either PDC or interrupt transmission */
1218 atmel_uart_writel(port, ATMEL_US_IDR,
1219 atmel_port->tx_done_mask);
1220 tasklet_schedule(&atmel_port->tasklet);
1221 }
1222 }
1223
1224 /*
1225 * status flags interrupt handler.
1226 */
1227 static void
1228 atmel_handle_status(struct uart_port *port, unsigned int pending,
1229 unsigned int status)
1230 {
1231 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1232
1233 if (pending & (ATMEL_US_RIIC | ATMEL_US_DSRIC | ATMEL_US_DCDIC
1234 | ATMEL_US_CTSIC)) {
1235 atmel_port->irq_status = status;
1236 atmel_port->status_change = atmel_port->irq_status ^
1237 atmel_port->irq_status_prev;
1238 atmel_port->irq_status_prev = status;
1239 tasklet_schedule(&atmel_port->tasklet);
1240 }
1241 }
1242
1243 /*
1244 * Interrupt handler
1245 */
1246 static irqreturn_t atmel_interrupt(int irq, void *dev_id)
1247 {
1248 struct uart_port *port = dev_id;
1249 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1250 unsigned int status, pending, mask, pass_counter = 0;
1251
1252 spin_lock(&atmel_port->lock_suspended);
1253
1254 do {
1255 status = atmel_get_lines_status(port);
1256 mask = atmel_uart_readl(port, ATMEL_US_IMR);
1257 pending = status & mask;
1258 if (!pending)
1259 break;
1260
1261 if (atmel_port->suspended) {
1262 atmel_port->pending |= pending;
1263 atmel_port->pending_status = status;
1264 atmel_uart_writel(port, ATMEL_US_IDR, mask);
1265 pm_system_wakeup();
1266 break;
1267 }
1268
1269 atmel_handle_receive(port, pending);
1270 atmel_handle_status(port, pending, status);
1271 atmel_handle_transmit(port, pending);
1272 } while (pass_counter++ < ATMEL_ISR_PASS_LIMIT);
1273
1274 spin_unlock(&atmel_port->lock_suspended);
1275
1276 return pass_counter ? IRQ_HANDLED : IRQ_NONE;
1277 }
1278
1279 static void atmel_release_tx_pdc(struct uart_port *port)
1280 {
1281 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1282 struct atmel_dma_buffer *pdc = &atmel_port->pdc_tx;
1283
1284 dma_unmap_single(port->dev,
1285 pdc->dma_addr,
1286 pdc->dma_size,
1287 DMA_TO_DEVICE);
1288 }
1289
1290 /*
1291 * Called from tasklet with ENDTX and TXBUFE interrupts disabled.
1292 */
1293 static void atmel_tx_pdc(struct uart_port *port)
1294 {
1295 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1296 struct circ_buf *xmit = &port->state->xmit;
1297 struct atmel_dma_buffer *pdc = &atmel_port->pdc_tx;
1298 int count;
1299
1300 /* nothing left to transmit? */
1301 if (atmel_uart_readl(port, ATMEL_PDC_TCR))
1302 return;
1303
1304 xmit->tail += pdc->ofs;
1305 xmit->tail &= UART_XMIT_SIZE - 1;
1306
1307 port->icount.tx += pdc->ofs;
1308 pdc->ofs = 0;
1309
1310 /* more to transmit - setup next transfer */
1311
1312 /* disable PDC transmit */
1313 atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTDIS);
1314
1315 if (!uart_circ_empty(xmit) && !uart_tx_stopped(port)) {
1316 dma_sync_single_for_device(port->dev,
1317 pdc->dma_addr,
1318 pdc->dma_size,
1319 DMA_TO_DEVICE);
1320
1321 count = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE);
1322 pdc->ofs = count;
1323
1324 atmel_uart_writel(port, ATMEL_PDC_TPR,
1325 pdc->dma_addr + xmit->tail);
1326 atmel_uart_writel(port, ATMEL_PDC_TCR, count);
1327 /* re-enable PDC transmit */
1328 atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTEN);
1329 /* Enable interrupts */
1330 atmel_uart_writel(port, ATMEL_US_IER,
1331 atmel_port->tx_done_mask);
1332 } else {
1333 if ((port->rs485.flags & SER_RS485_ENABLED) &&
1334 !(port->rs485.flags & SER_RS485_RX_DURING_TX)) {
1335 /* DMA done, stop TX, start RX for RS485 */
1336 atmel_start_rx(port);
1337 }
1338 }
1339
1340 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
1341 uart_write_wakeup(port);
1342 }
1343
1344 static int atmel_prepare_tx_pdc(struct uart_port *port)
1345 {
1346 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1347 struct atmel_dma_buffer *pdc = &atmel_port->pdc_tx;
1348 struct circ_buf *xmit = &port->state->xmit;
1349
1350 pdc->buf = xmit->buf;
1351 pdc->dma_addr = dma_map_single(port->dev,
1352 pdc->buf,
1353 UART_XMIT_SIZE,
1354 DMA_TO_DEVICE);
1355 pdc->dma_size = UART_XMIT_SIZE;
1356 pdc->ofs = 0;
1357
1358 return 0;
1359 }
1360
1361 static void atmel_rx_from_ring(struct uart_port *port)
1362 {
1363 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1364 struct circ_buf *ring = &atmel_port->rx_ring;
1365 unsigned int flg;
1366 unsigned int status;
1367
1368 while (ring->head != ring->tail) {
1369 struct atmel_uart_char c;
1370
1371 /* Make sure c is loaded after head. */
1372 smp_rmb();
1373
1374 c = ((struct atmel_uart_char *)ring->buf)[ring->tail];
1375
1376 ring->tail = (ring->tail + 1) & (ATMEL_SERIAL_RINGSIZE - 1);
1377
1378 port->icount.rx++;
1379 status = c.status;
1380 flg = TTY_NORMAL;
1381
1382 /*
1383 * note that the error handling code is
1384 * out of the main execution path
1385 */
1386 if (unlikely(status & (ATMEL_US_PARE | ATMEL_US_FRAME
1387 | ATMEL_US_OVRE | ATMEL_US_RXBRK))) {
1388 if (status & ATMEL_US_RXBRK) {
1389 /* ignore side-effect */
1390 status &= ~(ATMEL_US_PARE | ATMEL_US_FRAME);
1391
1392 port->icount.brk++;
1393 if (uart_handle_break(port))
1394 continue;
1395 }
1396 if (status & ATMEL_US_PARE)
1397 port->icount.parity++;
1398 if (status & ATMEL_US_FRAME)
1399 port->icount.frame++;
1400 if (status & ATMEL_US_OVRE)
1401 port->icount.overrun++;
1402
1403 status &= port->read_status_mask;
1404
1405 if (status & ATMEL_US_RXBRK)
1406 flg = TTY_BREAK;
1407 else if (status & ATMEL_US_PARE)
1408 flg = TTY_PARITY;
1409 else if (status & ATMEL_US_FRAME)
1410 flg = TTY_FRAME;
1411 }
1412
1413
1414 if (uart_handle_sysrq_char(port, c.ch))
1415 continue;
1416
1417 uart_insert_char(port, status, ATMEL_US_OVRE, c.ch, flg);
1418 }
1419
1420 /*
1421 * Drop the lock here since it might end up calling
1422 * uart_start(), which takes the lock.
1423 */
1424 spin_unlock(&port->lock);
1425 tty_flip_buffer_push(&port->state->port);
1426 spin_lock(&port->lock);
1427 }
1428
1429 static void atmel_release_rx_pdc(struct uart_port *port)
1430 {
1431 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1432 int i;
1433
1434 for (i = 0; i < 2; i++) {
1435 struct atmel_dma_buffer *pdc = &atmel_port->pdc_rx[i];
1436
1437 dma_unmap_single(port->dev,
1438 pdc->dma_addr,
1439 pdc->dma_size,
1440 DMA_FROM_DEVICE);
1441 kfree(pdc->buf);
1442 }
1443 }
1444
1445 static void atmel_rx_from_pdc(struct uart_port *port)
1446 {
1447 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1448 struct tty_port *tport = &port->state->port;
1449 struct atmel_dma_buffer *pdc;
1450 int rx_idx = atmel_port->pdc_rx_idx;
1451 unsigned int head;
1452 unsigned int tail;
1453 unsigned int count;
1454
1455 do {
1456 /* Reset the UART timeout early so that we don't miss one */
1457 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_STTTO);
1458
1459 pdc = &atmel_port->pdc_rx[rx_idx];
1460 head = atmel_uart_readl(port, ATMEL_PDC_RPR) - pdc->dma_addr;
1461 tail = pdc->ofs;
1462
1463 /* If the PDC has switched buffers, RPR won't contain
1464 * any address within the current buffer. Since head
1465 * is unsigned, we just need a one-way comparison to
1466 * find out.
1467 *
1468 * In this case, we just need to consume the entire
1469 * buffer and resubmit it for DMA. This will clear the
1470 * ENDRX bit as well, so that we can safely re-enable
1471 * all interrupts below.
1472 */
1473 head = min(head, pdc->dma_size);
1474
1475 if (likely(head != tail)) {
1476 dma_sync_single_for_cpu(port->dev, pdc->dma_addr,
1477 pdc->dma_size, DMA_FROM_DEVICE);
1478
1479 /*
1480 * head will only wrap around when we recycle
1481 * the DMA buffer, and when that happens, we
1482 * explicitly set tail to 0. So head will
1483 * always be greater than tail.
1484 */
1485 count = head - tail;
1486
1487 tty_insert_flip_string(tport, pdc->buf + pdc->ofs,
1488 count);
1489
1490 dma_sync_single_for_device(port->dev, pdc->dma_addr,
1491 pdc->dma_size, DMA_FROM_DEVICE);
1492
1493 port->icount.rx += count;
1494 pdc->ofs = head;
1495 }
1496
1497 /*
1498 * If the current buffer is full, we need to check if
1499 * the next one contains any additional data.
1500 */
1501 if (head >= pdc->dma_size) {
1502 pdc->ofs = 0;
1503 atmel_uart_writel(port, ATMEL_PDC_RNPR, pdc->dma_addr);
1504 atmel_uart_writel(port, ATMEL_PDC_RNCR, pdc->dma_size);
1505
1506 rx_idx = !rx_idx;
1507 atmel_port->pdc_rx_idx = rx_idx;
1508 }
1509 } while (head >= pdc->dma_size);
1510
1511 /*
1512 * Drop the lock here since it might end up calling
1513 * uart_start(), which takes the lock.
1514 */
1515 spin_unlock(&port->lock);
1516 tty_flip_buffer_push(tport);
1517 spin_lock(&port->lock);
1518
1519 atmel_uart_writel(port, ATMEL_US_IER,
1520 ATMEL_US_ENDRX | ATMEL_US_TIMEOUT);
1521 }
1522
1523 static int atmel_prepare_rx_pdc(struct uart_port *port)
1524 {
1525 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1526 int i;
1527
1528 for (i = 0; i < 2; i++) {
1529 struct atmel_dma_buffer *pdc = &atmel_port->pdc_rx[i];
1530
1531 pdc->buf = kmalloc(PDC_BUFFER_SIZE, GFP_KERNEL);
1532 if (pdc->buf == NULL) {
1533 if (i != 0) {
1534 dma_unmap_single(port->dev,
1535 atmel_port->pdc_rx[0].dma_addr,
1536 PDC_BUFFER_SIZE,
1537 DMA_FROM_DEVICE);
1538 kfree(atmel_port->pdc_rx[0].buf);
1539 }
1540 atmel_port->use_pdc_rx = 0;
1541 return -ENOMEM;
1542 }
1543 pdc->dma_addr = dma_map_single(port->dev,
1544 pdc->buf,
1545 PDC_BUFFER_SIZE,
1546 DMA_FROM_DEVICE);
1547 pdc->dma_size = PDC_BUFFER_SIZE;
1548 pdc->ofs = 0;
1549 }
1550
1551 atmel_port->pdc_rx_idx = 0;
1552
1553 atmel_uart_writel(port, ATMEL_PDC_RPR, atmel_port->pdc_rx[0].dma_addr);
1554 atmel_uart_writel(port, ATMEL_PDC_RCR, PDC_BUFFER_SIZE);
1555
1556 atmel_uart_writel(port, ATMEL_PDC_RNPR,
1557 atmel_port->pdc_rx[1].dma_addr);
1558 atmel_uart_writel(port, ATMEL_PDC_RNCR, PDC_BUFFER_SIZE);
1559
1560 return 0;
1561 }
1562
1563 /*
1564 * tasklet handling tty stuff outside the interrupt handler.
1565 */
1566 static void atmel_tasklet_func(unsigned long data)
1567 {
1568 struct uart_port *port = (struct uart_port *)data;
1569 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1570 unsigned int status = atmel_port->irq_status;
1571 unsigned int status_change = atmel_port->status_change;
1572
1573 /* The interrupt handler does not take the lock */
1574 spin_lock(&port->lock);
1575
1576 atmel_port->schedule_tx(port);
1577
1578 if (status_change & (ATMEL_US_RI | ATMEL_US_DSR
1579 | ATMEL_US_DCD | ATMEL_US_CTS)) {
1580 /* TODO: All reads to CSR will clear these interrupts! */
1581 if (status_change & ATMEL_US_RI)
1582 port->icount.rng++;
1583 if (status_change & ATMEL_US_DSR)
1584 port->icount.dsr++;
1585 if (status_change & ATMEL_US_DCD)
1586 uart_handle_dcd_change(port, !(status & ATMEL_US_DCD));
1587 if (status_change & ATMEL_US_CTS)
1588 uart_handle_cts_change(port, !(status & ATMEL_US_CTS));
1589
1590 wake_up_interruptible(&port->state->port.delta_msr_wait);
1591
1592 atmel_port->status_change = 0;
1593 }
1594
1595 atmel_port->schedule_rx(port);
1596
1597 spin_unlock(&port->lock);
1598 }
1599
1600 static void atmel_init_property(struct atmel_uart_port *atmel_port,
1601 struct platform_device *pdev)
1602 {
1603 struct device_node *np = pdev->dev.of_node;
1604 struct atmel_uart_data *pdata = dev_get_platdata(&pdev->dev);
1605
1606 if (np) {
1607 /* DMA/PDC usage specification */
1608 if (of_get_property(np, "atmel,use-dma-rx", NULL)) {
1609 if (of_get_property(np, "dmas", NULL)) {
1610 atmel_port->use_dma_rx = true;
1611 atmel_port->use_pdc_rx = false;
1612 } else {
1613 atmel_port->use_dma_rx = false;
1614 atmel_port->use_pdc_rx = true;
1615 }
1616 } else {
1617 atmel_port->use_dma_rx = false;
1618 atmel_port->use_pdc_rx = false;
1619 }
1620
1621 if (of_get_property(np, "atmel,use-dma-tx", NULL)) {
1622 if (of_get_property(np, "dmas", NULL)) {
1623 atmel_port->use_dma_tx = true;
1624 atmel_port->use_pdc_tx = false;
1625 } else {
1626 atmel_port->use_dma_tx = false;
1627 atmel_port->use_pdc_tx = true;
1628 }
1629 } else {
1630 atmel_port->use_dma_tx = false;
1631 atmel_port->use_pdc_tx = false;
1632 }
1633
1634 } else {
1635 atmel_port->use_pdc_rx = pdata->use_dma_rx;
1636 atmel_port->use_pdc_tx = pdata->use_dma_tx;
1637 atmel_port->use_dma_rx = false;
1638 atmel_port->use_dma_tx = false;
1639 }
1640
1641 }
1642
1643 static void atmel_init_rs485(struct uart_port *port,
1644 struct platform_device *pdev)
1645 {
1646 struct device_node *np = pdev->dev.of_node;
1647 struct atmel_uart_data *pdata = dev_get_platdata(&pdev->dev);
1648
1649 if (np) {
1650 struct serial_rs485 *rs485conf = &port->rs485;
1651 u32 rs485_delay[2];
1652 /* rs485 properties */
1653 if (of_property_read_u32_array(np, "rs485-rts-delay",
1654 rs485_delay, 2) == 0) {
1655 rs485conf->delay_rts_before_send = rs485_delay[0];
1656 rs485conf->delay_rts_after_send = rs485_delay[1];
1657 rs485conf->flags = 0;
1658 }
1659
1660 if (of_get_property(np, "rs485-rx-during-tx", NULL))
1661 rs485conf->flags |= SER_RS485_RX_DURING_TX;
1662
1663 if (of_get_property(np, "linux,rs485-enabled-at-boot-time",
1664 NULL))
1665 rs485conf->flags |= SER_RS485_ENABLED;
1666 } else {
1667 port->rs485 = pdata->rs485;
1668 }
1669
1670 }
1671
1672 static void atmel_set_ops(struct uart_port *port)
1673 {
1674 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1675
1676 if (atmel_use_dma_rx(port)) {
1677 atmel_port->prepare_rx = &atmel_prepare_rx_dma;
1678 atmel_port->schedule_rx = &atmel_rx_from_dma;
1679 atmel_port->release_rx = &atmel_release_rx_dma;
1680 } else if (atmel_use_pdc_rx(port)) {
1681 atmel_port->prepare_rx = &atmel_prepare_rx_pdc;
1682 atmel_port->schedule_rx = &atmel_rx_from_pdc;
1683 atmel_port->release_rx = &atmel_release_rx_pdc;
1684 } else {
1685 atmel_port->prepare_rx = NULL;
1686 atmel_port->schedule_rx = &atmel_rx_from_ring;
1687 atmel_port->release_rx = NULL;
1688 }
1689
1690 if (atmel_use_dma_tx(port)) {
1691 atmel_port->prepare_tx = &atmel_prepare_tx_dma;
1692 atmel_port->schedule_tx = &atmel_tx_dma;
1693 atmel_port->release_tx = &atmel_release_tx_dma;
1694 } else if (atmel_use_pdc_tx(port)) {
1695 atmel_port->prepare_tx = &atmel_prepare_tx_pdc;
1696 atmel_port->schedule_tx = &atmel_tx_pdc;
1697 atmel_port->release_tx = &atmel_release_tx_pdc;
1698 } else {
1699 atmel_port->prepare_tx = NULL;
1700 atmel_port->schedule_tx = &atmel_tx_chars;
1701 atmel_port->release_tx = NULL;
1702 }
1703 }
1704
1705 /*
1706 * Get ip name usart or uart
1707 */
1708 static void atmel_get_ip_name(struct uart_port *port)
1709 {
1710 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1711 int name = atmel_uart_readl(port, ATMEL_US_NAME);
1712 u32 version;
1713 int usart, uart;
1714 /* usart and uart ascii */
1715 usart = 0x55534152;
1716 uart = 0x44424755;
1717
1718 atmel_port->is_usart = false;
1719
1720 if (name == usart) {
1721 dev_dbg(port->dev, "This is usart\n");
1722 atmel_port->is_usart = true;
1723 } else if (name == uart) {
1724 dev_dbg(port->dev, "This is uart\n");
1725 atmel_port->is_usart = false;
1726 } else {
1727 /* fallback for older SoCs: use version field */
1728 version = atmel_uart_readl(port, ATMEL_US_VERSION);
1729 switch (version) {
1730 case 0x302:
1731 case 0x10213:
1732 dev_dbg(port->dev, "This version is usart\n");
1733 atmel_port->is_usart = true;
1734 break;
1735 case 0x203:
1736 case 0x10202:
1737 dev_dbg(port->dev, "This version is uart\n");
1738 atmel_port->is_usart = false;
1739 break;
1740 default:
1741 dev_err(port->dev, "Not supported ip name nor version, set to uart\n");
1742 }
1743 }
1744 }
1745
1746 /*
1747 * Perform initialization and enable port for reception
1748 */
1749 static int atmel_startup(struct uart_port *port)
1750 {
1751 struct platform_device *pdev = to_platform_device(port->dev);
1752 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1753 struct tty_struct *tty = port->state->port.tty;
1754 int retval;
1755
1756 /*
1757 * Ensure that no interrupts are enabled otherwise when
1758 * request_irq() is called we could get stuck trying to
1759 * handle an unexpected interrupt
1760 */
1761 atmel_uart_writel(port, ATMEL_US_IDR, -1);
1762 atmel_port->ms_irq_enabled = false;
1763
1764 /*
1765 * Allocate the IRQ
1766 */
1767 retval = request_irq(port->irq, atmel_interrupt,
1768 IRQF_SHARED | IRQF_COND_SUSPEND,
1769 tty ? tty->name : "atmel_serial", port);
1770 if (retval) {
1771 dev_err(port->dev, "atmel_startup - Can't get irq\n");
1772 return retval;
1773 }
1774
1775 tasklet_enable(&atmel_port->tasklet);
1776
1777 /*
1778 * Initialize DMA (if necessary)
1779 */
1780 atmel_init_property(atmel_port, pdev);
1781 atmel_set_ops(port);
1782
1783 if (atmel_port->prepare_rx) {
1784 retval = atmel_port->prepare_rx(port);
1785 if (retval < 0)
1786 atmel_set_ops(port);
1787 }
1788
1789 if (atmel_port->prepare_tx) {
1790 retval = atmel_port->prepare_tx(port);
1791 if (retval < 0)
1792 atmel_set_ops(port);
1793 }
1794
1795 /*
1796 * Enable FIFO when available
1797 */
1798 if (atmel_port->fifo_size) {
1799 unsigned int txrdym = ATMEL_US_ONE_DATA;
1800 unsigned int rxrdym = ATMEL_US_ONE_DATA;
1801 unsigned int fmr;
1802
1803 atmel_uart_writel(port, ATMEL_US_CR,
1804 ATMEL_US_FIFOEN |
1805 ATMEL_US_RXFCLR |
1806 ATMEL_US_TXFLCLR);
1807
1808 if (atmel_use_dma_tx(port))
1809 txrdym = ATMEL_US_FOUR_DATA;
1810
1811 fmr = ATMEL_US_TXRDYM(txrdym) | ATMEL_US_RXRDYM(rxrdym);
1812 if (atmel_port->rts_high &&
1813 atmel_port->rts_low)
1814 fmr |= ATMEL_US_FRTSC |
1815 ATMEL_US_RXFTHRES(atmel_port->rts_high) |
1816 ATMEL_US_RXFTHRES2(atmel_port->rts_low);
1817
1818 atmel_uart_writel(port, ATMEL_US_FMR, fmr);
1819 }
1820
1821 /* Save current CSR for comparison in atmel_tasklet_func() */
1822 atmel_port->irq_status_prev = atmel_get_lines_status(port);
1823 atmel_port->irq_status = atmel_port->irq_status_prev;
1824
1825 /*
1826 * Finally, enable the serial port
1827 */
1828 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA | ATMEL_US_RSTRX);
1829 /* enable xmit & rcvr */
1830 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXEN | ATMEL_US_RXEN);
1831
1832 setup_timer(&atmel_port->uart_timer,
1833 atmel_uart_timer_callback,
1834 (unsigned long)port);
1835
1836 if (atmel_use_pdc_rx(port)) {
1837 /* set UART timeout */
1838 if (!atmel_port->is_usart) {
1839 mod_timer(&atmel_port->uart_timer,
1840 jiffies + uart_poll_timeout(port));
1841 /* set USART timeout */
1842 } else {
1843 atmel_uart_writel(port, ATMEL_US_RTOR, PDC_RX_TIMEOUT);
1844 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_STTTO);
1845
1846 atmel_uart_writel(port, ATMEL_US_IER,
1847 ATMEL_US_ENDRX | ATMEL_US_TIMEOUT);
1848 }
1849 /* enable PDC controller */
1850 atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_RXTEN);
1851 } else if (atmel_use_dma_rx(port)) {
1852 /* set UART timeout */
1853 if (!atmel_port->is_usart) {
1854 mod_timer(&atmel_port->uart_timer,
1855 jiffies + uart_poll_timeout(port));
1856 /* set USART timeout */
1857 } else {
1858 atmel_uart_writel(port, ATMEL_US_RTOR, PDC_RX_TIMEOUT);
1859 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_STTTO);
1860
1861 atmel_uart_writel(port, ATMEL_US_IER,
1862 ATMEL_US_TIMEOUT);
1863 }
1864 } else {
1865 /* enable receive only */
1866 atmel_uart_writel(port, ATMEL_US_IER, ATMEL_US_RXRDY);
1867 }
1868
1869 return 0;
1870 }
1871
1872 /*
1873 * Flush any TX data submitted for DMA. Called when the TX circular
1874 * buffer is reset.
1875 */
1876 static void atmel_flush_buffer(struct uart_port *port)
1877 {
1878 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1879
1880 if (atmel_use_pdc_tx(port)) {
1881 atmel_uart_writel(port, ATMEL_PDC_TCR, 0);
1882 atmel_port->pdc_tx.ofs = 0;
1883 }
1884 }
1885
1886 /*
1887 * Disable the port
1888 */
1889 static void atmel_shutdown(struct uart_port *port)
1890 {
1891 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1892
1893 /*
1894 * Prevent any tasklets being scheduled during
1895 * cleanup
1896 */
1897 del_timer_sync(&atmel_port->uart_timer);
1898
1899 /*
1900 * Clear out any scheduled tasklets before
1901 * we destroy the buffers
1902 */
1903 tasklet_disable(&atmel_port->tasklet);
1904 tasklet_kill(&atmel_port->tasklet);
1905
1906 /*
1907 * Ensure everything is stopped and
1908 * disable all interrupts, port and break condition.
1909 */
1910 atmel_stop_rx(port);
1911 atmel_stop_tx(port);
1912
1913 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA);
1914 atmel_uart_writel(port, ATMEL_US_IDR, -1);
1915
1916
1917 /*
1918 * Shut-down the DMA.
1919 */
1920 if (atmel_port->release_rx)
1921 atmel_port->release_rx(port);
1922 if (atmel_port->release_tx)
1923 atmel_port->release_tx(port);
1924
1925 /*
1926 * Reset ring buffer pointers
1927 */
1928 atmel_port->rx_ring.head = 0;
1929 atmel_port->rx_ring.tail = 0;
1930
1931 /*
1932 * Free the interrupts
1933 */
1934 free_irq(port->irq, port);
1935
1936 atmel_port->ms_irq_enabled = false;
1937
1938 atmel_flush_buffer(port);
1939 }
1940
1941 /*
1942 * Power / Clock management.
1943 */
1944 static void atmel_serial_pm(struct uart_port *port, unsigned int state,
1945 unsigned int oldstate)
1946 {
1947 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1948
1949 switch (state) {
1950 case 0:
1951 /*
1952 * Enable the peripheral clock for this serial port.
1953 * This is called on uart_open() or a resume event.
1954 */
1955 clk_prepare_enable(atmel_port->clk);
1956
1957 /* re-enable interrupts if we disabled some on suspend */
1958 atmel_uart_writel(port, ATMEL_US_IER, atmel_port->backup_imr);
1959 break;
1960 case 3:
1961 /* Back up the interrupt mask and disable all interrupts */
1962 atmel_port->backup_imr = atmel_uart_readl(port, ATMEL_US_IMR);
1963 atmel_uart_writel(port, ATMEL_US_IDR, -1);
1964
1965 /*
1966 * Disable the peripheral clock for this serial port.
1967 * This is called on uart_close() or a suspend event.
1968 */
1969 clk_disable_unprepare(atmel_port->clk);
1970 break;
1971 default:
1972 dev_err(port->dev, "atmel_serial: unknown pm %d\n", state);
1973 }
1974 }
1975
1976 /*
1977 * Change the port parameters
1978 */
1979 static void atmel_set_termios(struct uart_port *port, struct ktermios *termios,
1980 struct ktermios *old)
1981 {
1982 unsigned long flags;
1983 unsigned int old_mode, mode, imr, quot, baud;
1984
1985 /* save the current mode register */
1986 mode = old_mode = atmel_uart_readl(port, ATMEL_US_MR);
1987
1988 /* reset the mode, clock divisor, parity, stop bits and data size */
1989 mode &= ~(ATMEL_US_USCLKS | ATMEL_US_CHRL | ATMEL_US_NBSTOP |
1990 ATMEL_US_PAR | ATMEL_US_USMODE);
1991
1992 baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk / 16);
1993 quot = uart_get_divisor(port, baud);
1994
1995 if (quot > 65535) { /* BRGR is 16-bit, so switch to slower clock */
1996 quot /= 8;
1997 mode |= ATMEL_US_USCLKS_MCK_DIV8;
1998 }
1999
2000 /* byte size */
2001 switch (termios->c_cflag & CSIZE) {
2002 case CS5:
2003 mode |= ATMEL_US_CHRL_5;
2004 break;
2005 case CS6:
2006 mode |= ATMEL_US_CHRL_6;
2007 break;
2008 case CS7:
2009 mode |= ATMEL_US_CHRL_7;
2010 break;
2011 default:
2012 mode |= ATMEL_US_CHRL_8;
2013 break;
2014 }
2015
2016 /* stop bits */
2017 if (termios->c_cflag & CSTOPB)
2018 mode |= ATMEL_US_NBSTOP_2;
2019
2020 /* parity */
2021 if (termios->c_cflag & PARENB) {
2022 /* Mark or Space parity */
2023 if (termios->c_cflag & CMSPAR) {
2024 if (termios->c_cflag & PARODD)
2025 mode |= ATMEL_US_PAR_MARK;
2026 else
2027 mode |= ATMEL_US_PAR_SPACE;
2028 } else if (termios->c_cflag & PARODD)
2029 mode |= ATMEL_US_PAR_ODD;
2030 else
2031 mode |= ATMEL_US_PAR_EVEN;
2032 } else
2033 mode |= ATMEL_US_PAR_NONE;
2034
2035 spin_lock_irqsave(&port->lock, flags);
2036
2037 port->read_status_mask = ATMEL_US_OVRE;
2038 if (termios->c_iflag & INPCK)
2039 port->read_status_mask |= (ATMEL_US_FRAME | ATMEL_US_PARE);
2040 if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
2041 port->read_status_mask |= ATMEL_US_RXBRK;
2042
2043 if (atmel_use_pdc_rx(port))
2044 /* need to enable error interrupts */
2045 atmel_uart_writel(port, ATMEL_US_IER, port->read_status_mask);
2046
2047 /*
2048 * Characters to ignore
2049 */
2050 port->ignore_status_mask = 0;
2051 if (termios->c_iflag & IGNPAR)
2052 port->ignore_status_mask |= (ATMEL_US_FRAME | ATMEL_US_PARE);
2053 if (termios->c_iflag & IGNBRK) {
2054 port->ignore_status_mask |= ATMEL_US_RXBRK;
2055 /*
2056 * If we're ignoring parity and break indicators,
2057 * ignore overruns too (for real raw support).
2058 */
2059 if (termios->c_iflag & IGNPAR)
2060 port->ignore_status_mask |= ATMEL_US_OVRE;
2061 }
2062 /* TODO: Ignore all characters if CREAD is set.*/
2063
2064 /* update the per-port timeout */
2065 uart_update_timeout(port, termios->c_cflag, baud);
2066
2067 /*
2068 * save/disable interrupts. The tty layer will ensure that the
2069 * transmitter is empty if requested by the caller, so there's
2070 * no need to wait for it here.
2071 */
2072 imr = atmel_uart_readl(port, ATMEL_US_IMR);
2073 atmel_uart_writel(port, ATMEL_US_IDR, -1);
2074
2075 /* disable receiver and transmitter */
2076 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXDIS | ATMEL_US_RXDIS);
2077
2078 /* mode */
2079 if (port->rs485.flags & SER_RS485_ENABLED) {
2080 atmel_uart_writel(port, ATMEL_US_TTGR,
2081 port->rs485.delay_rts_after_send);
2082 mode |= ATMEL_US_USMODE_RS485;
2083 } else if (termios->c_cflag & CRTSCTS) {
2084 /* RS232 with hardware handshake (RTS/CTS) */
2085 mode |= ATMEL_US_USMODE_HWHS;
2086 } else {
2087 /* RS232 without hadware handshake */
2088 mode |= ATMEL_US_USMODE_NORMAL;
2089 }
2090
2091 /* set the mode, clock divisor, parity, stop bits and data size */
2092 atmel_uart_writel(port, ATMEL_US_MR, mode);
2093
2094 /*
2095 * when switching the mode, set the RTS line state according to the
2096 * new mode, otherwise keep the former state
2097 */
2098 if ((old_mode & ATMEL_US_USMODE) != (mode & ATMEL_US_USMODE)) {
2099 unsigned int rts_state;
2100
2101 if ((mode & ATMEL_US_USMODE) == ATMEL_US_USMODE_HWHS) {
2102 /* let the hardware control the RTS line */
2103 rts_state = ATMEL_US_RTSDIS;
2104 } else {
2105 /* force RTS line to low level */
2106 rts_state = ATMEL_US_RTSEN;
2107 }
2108
2109 atmel_uart_writel(port, ATMEL_US_CR, rts_state);
2110 }
2111
2112 /* set the baud rate */
2113 atmel_uart_writel(port, ATMEL_US_BRGR, quot);
2114 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA | ATMEL_US_RSTRX);
2115 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXEN | ATMEL_US_RXEN);
2116
2117 /* restore interrupts */
2118 atmel_uart_writel(port, ATMEL_US_IER, imr);
2119
2120 /* CTS flow-control and modem-status interrupts */
2121 if (UART_ENABLE_MS(port, termios->c_cflag))
2122 atmel_enable_ms(port);
2123 else
2124 atmel_disable_ms(port);
2125
2126 spin_unlock_irqrestore(&port->lock, flags);
2127 }
2128
2129 static void atmel_set_ldisc(struct uart_port *port, struct ktermios *termios)
2130 {
2131 if (termios->c_line == N_PPS) {
2132 port->flags |= UPF_HARDPPS_CD;
2133 spin_lock_irq(&port->lock);
2134 atmel_enable_ms(port);
2135 spin_unlock_irq(&port->lock);
2136 } else {
2137 port->flags &= ~UPF_HARDPPS_CD;
2138 if (!UART_ENABLE_MS(port, termios->c_cflag)) {
2139 spin_lock_irq(&port->lock);
2140 atmel_disable_ms(port);
2141 spin_unlock_irq(&port->lock);
2142 }
2143 }
2144 }
2145
2146 /*
2147 * Return string describing the specified port
2148 */
2149 static const char *atmel_type(struct uart_port *port)
2150 {
2151 return (port->type == PORT_ATMEL) ? "ATMEL_SERIAL" : NULL;
2152 }
2153
2154 /*
2155 * Release the memory region(s) being used by 'port'.
2156 */
2157 static void atmel_release_port(struct uart_port *port)
2158 {
2159 struct platform_device *pdev = to_platform_device(port->dev);
2160 int size = pdev->resource[0].end - pdev->resource[0].start + 1;
2161
2162 release_mem_region(port->mapbase, size);
2163
2164 if (port->flags & UPF_IOREMAP) {
2165 iounmap(port->membase);
2166 port->membase = NULL;
2167 }
2168 }
2169
2170 /*
2171 * Request the memory region(s) being used by 'port'.
2172 */
2173 static int atmel_request_port(struct uart_port *port)
2174 {
2175 struct platform_device *pdev = to_platform_device(port->dev);
2176 int size = pdev->resource[0].end - pdev->resource[0].start + 1;
2177
2178 if (!request_mem_region(port->mapbase, size, "atmel_serial"))
2179 return -EBUSY;
2180
2181 if (port->flags & UPF_IOREMAP) {
2182 port->membase = ioremap(port->mapbase, size);
2183 if (port->membase == NULL) {
2184 release_mem_region(port->mapbase, size);
2185 return -ENOMEM;
2186 }
2187 }
2188
2189 return 0;
2190 }
2191
2192 /*
2193 * Configure/autoconfigure the port.
2194 */
2195 static void atmel_config_port(struct uart_port *port, int flags)
2196 {
2197 if (flags & UART_CONFIG_TYPE) {
2198 port->type = PORT_ATMEL;
2199 atmel_request_port(port);
2200 }
2201 }
2202
2203 /*
2204 * Verify the new serial_struct (for TIOCSSERIAL).
2205 */
2206 static int atmel_verify_port(struct uart_port *port, struct serial_struct *ser)
2207 {
2208 int ret = 0;
2209 if (ser->type != PORT_UNKNOWN && ser->type != PORT_ATMEL)
2210 ret = -EINVAL;
2211 if (port->irq != ser->irq)
2212 ret = -EINVAL;
2213 if (ser->io_type != SERIAL_IO_MEM)
2214 ret = -EINVAL;
2215 if (port->uartclk / 16 != ser->baud_base)
2216 ret = -EINVAL;
2217 if (port->mapbase != (unsigned long)ser->iomem_base)
2218 ret = -EINVAL;
2219 if (port->iobase != ser->port)
2220 ret = -EINVAL;
2221 if (ser->hub6 != 0)
2222 ret = -EINVAL;
2223 return ret;
2224 }
2225
2226 #ifdef CONFIG_CONSOLE_POLL
2227 static int atmel_poll_get_char(struct uart_port *port)
2228 {
2229 while (!(atmel_uart_readl(port, ATMEL_US_CSR) & ATMEL_US_RXRDY))
2230 cpu_relax();
2231
2232 return atmel_uart_read_char(port);
2233 }
2234
2235 static void atmel_poll_put_char(struct uart_port *port, unsigned char ch)
2236 {
2237 while (!(atmel_uart_readl(port, ATMEL_US_CSR) & ATMEL_US_TXRDY))
2238 cpu_relax();
2239
2240 atmel_uart_write_char(port, ch);
2241 }
2242 #endif
2243
2244 static struct uart_ops atmel_pops = {
2245 .tx_empty = atmel_tx_empty,
2246 .set_mctrl = atmel_set_mctrl,
2247 .get_mctrl = atmel_get_mctrl,
2248 .stop_tx = atmel_stop_tx,
2249 .start_tx = atmel_start_tx,
2250 .stop_rx = atmel_stop_rx,
2251 .enable_ms = atmel_enable_ms,
2252 .break_ctl = atmel_break_ctl,
2253 .startup = atmel_startup,
2254 .shutdown = atmel_shutdown,
2255 .flush_buffer = atmel_flush_buffer,
2256 .set_termios = atmel_set_termios,
2257 .set_ldisc = atmel_set_ldisc,
2258 .type = atmel_type,
2259 .release_port = atmel_release_port,
2260 .request_port = atmel_request_port,
2261 .config_port = atmel_config_port,
2262 .verify_port = atmel_verify_port,
2263 .pm = atmel_serial_pm,
2264 #ifdef CONFIG_CONSOLE_POLL
2265 .poll_get_char = atmel_poll_get_char,
2266 .poll_put_char = atmel_poll_put_char,
2267 #endif
2268 };
2269
2270 /*
2271 * Configure the port from the platform device resource info.
2272 */
2273 static int atmel_init_port(struct atmel_uart_port *atmel_port,
2274 struct platform_device *pdev)
2275 {
2276 int ret;
2277 struct uart_port *port = &atmel_port->uart;
2278 struct atmel_uart_data *pdata = dev_get_platdata(&pdev->dev);
2279
2280 atmel_init_property(atmel_port, pdev);
2281 atmel_set_ops(port);
2282
2283 atmel_init_rs485(port, pdev);
2284
2285 port->iotype = UPIO_MEM;
2286 port->flags = UPF_BOOT_AUTOCONF;
2287 port->ops = &atmel_pops;
2288 port->fifosize = 1;
2289 port->dev = &pdev->dev;
2290 port->mapbase = pdev->resource[0].start;
2291 port->irq = pdev->resource[1].start;
2292 port->rs485_config = atmel_config_rs485;
2293
2294 tasklet_init(&atmel_port->tasklet, atmel_tasklet_func,
2295 (unsigned long)port);
2296 tasklet_disable(&atmel_port->tasklet);
2297
2298 memset(&atmel_port->rx_ring, 0, sizeof(atmel_port->rx_ring));
2299
2300 if (pdata && pdata->regs) {
2301 /* Already mapped by setup code */
2302 port->membase = pdata->regs;
2303 } else {
2304 port->flags |= UPF_IOREMAP;
2305 port->membase = NULL;
2306 }
2307
2308 /* for console, the clock could already be configured */
2309 if (!atmel_port->clk) {
2310 atmel_port->clk = clk_get(&pdev->dev, "usart");
2311 if (IS_ERR(atmel_port->clk)) {
2312 ret = PTR_ERR(atmel_port->clk);
2313 atmel_port->clk = NULL;
2314 return ret;
2315 }
2316 ret = clk_prepare_enable(atmel_port->clk);
2317 if (ret) {
2318 clk_put(atmel_port->clk);
2319 atmel_port->clk = NULL;
2320 return ret;
2321 }
2322 port->uartclk = clk_get_rate(atmel_port->clk);
2323 clk_disable_unprepare(atmel_port->clk);
2324 /* only enable clock when USART is in use */
2325 }
2326
2327 /* Use TXEMPTY for interrupt when rs485 else TXRDY or ENDTX|TXBUFE */
2328 if (port->rs485.flags & SER_RS485_ENABLED)
2329 atmel_port->tx_done_mask = ATMEL_US_TXEMPTY;
2330 else if (atmel_use_pdc_tx(port)) {
2331 port->fifosize = PDC_BUFFER_SIZE;
2332 atmel_port->tx_done_mask = ATMEL_US_ENDTX | ATMEL_US_TXBUFE;
2333 } else {
2334 atmel_port->tx_done_mask = ATMEL_US_TXRDY;
2335 }
2336
2337 return 0;
2338 }
2339
2340 struct platform_device *atmel_default_console_device; /* the serial console device */
2341
2342 #ifdef CONFIG_SERIAL_ATMEL_CONSOLE
2343 static void atmel_console_putchar(struct uart_port *port, int ch)
2344 {
2345 while (!(atmel_uart_readl(port, ATMEL_US_CSR) & ATMEL_US_TXRDY))
2346 cpu_relax();
2347 atmel_uart_write_char(port, ch);
2348 }
2349
2350 /*
2351 * Interrupts are disabled on entering
2352 */
2353 static void atmel_console_write(struct console *co, const char *s, u_int count)
2354 {
2355 struct uart_port *port = &atmel_ports[co->index].uart;
2356 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
2357 unsigned int status, imr;
2358 unsigned int pdc_tx;
2359
2360 /*
2361 * First, save IMR and then disable interrupts
2362 */
2363 imr = atmel_uart_readl(port, ATMEL_US_IMR);
2364 atmel_uart_writel(port, ATMEL_US_IDR,
2365 ATMEL_US_RXRDY | atmel_port->tx_done_mask);
2366
2367 /* Store PDC transmit status and disable it */
2368 pdc_tx = atmel_uart_readl(port, ATMEL_PDC_PTSR) & ATMEL_PDC_TXTEN;
2369 atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTDIS);
2370
2371 uart_console_write(port, s, count, atmel_console_putchar);
2372
2373 /*
2374 * Finally, wait for transmitter to become empty
2375 * and restore IMR
2376 */
2377 do {
2378 status = atmel_uart_readl(port, ATMEL_US_CSR);
2379 } while (!(status & ATMEL_US_TXRDY));
2380
2381 /* Restore PDC transmit status */
2382 if (pdc_tx)
2383 atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTEN);
2384
2385 /* set interrupts back the way they were */
2386 atmel_uart_writel(port, ATMEL_US_IER, imr);
2387 }
2388
2389 /*
2390 * If the port was already initialised (eg, by a boot loader),
2391 * try to determine the current setup.
2392 */
2393 static void __init atmel_console_get_options(struct uart_port *port, int *baud,
2394 int *parity, int *bits)
2395 {
2396 unsigned int mr, quot;
2397
2398 /*
2399 * If the baud rate generator isn't running, the port wasn't
2400 * initialized by the boot loader.
2401 */
2402 quot = atmel_uart_readl(port, ATMEL_US_BRGR) & ATMEL_US_CD;
2403 if (!quot)
2404 return;
2405
2406 mr = atmel_uart_readl(port, ATMEL_US_MR) & ATMEL_US_CHRL;
2407 if (mr == ATMEL_US_CHRL_8)
2408 *bits = 8;
2409 else
2410 *bits = 7;
2411
2412 mr = atmel_uart_readl(port, ATMEL_US_MR) & ATMEL_US_PAR;
2413 if (mr == ATMEL_US_PAR_EVEN)
2414 *parity = 'e';
2415 else if (mr == ATMEL_US_PAR_ODD)
2416 *parity = 'o';
2417
2418 /*
2419 * The serial core only rounds down when matching this to a
2420 * supported baud rate. Make sure we don't end up slightly
2421 * lower than one of those, as it would make us fall through
2422 * to a much lower baud rate than we really want.
2423 */
2424 *baud = port->uartclk / (16 * (quot - 1));
2425 }
2426
2427 static int __init atmel_console_setup(struct console *co, char *options)
2428 {
2429 int ret;
2430 struct uart_port *port = &atmel_ports[co->index].uart;
2431 int baud = 115200;
2432 int bits = 8;
2433 int parity = 'n';
2434 int flow = 'n';
2435
2436 if (port->membase == NULL) {
2437 /* Port not initialized yet - delay setup */
2438 return -ENODEV;
2439 }
2440
2441 ret = clk_prepare_enable(atmel_ports[co->index].clk);
2442 if (ret)
2443 return ret;
2444
2445 atmel_uart_writel(port, ATMEL_US_IDR, -1);
2446 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA | ATMEL_US_RSTRX);
2447 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXEN | ATMEL_US_RXEN);
2448
2449 if (options)
2450 uart_parse_options(options, &baud, &parity, &bits, &flow);
2451 else
2452 atmel_console_get_options(port, &baud, &parity, &bits);
2453
2454 return uart_set_options(port, co, baud, parity, bits, flow);
2455 }
2456
2457 static struct uart_driver atmel_uart;
2458
2459 static struct console atmel_console = {
2460 .name = ATMEL_DEVICENAME,
2461 .write = atmel_console_write,
2462 .device = uart_console_device,
2463 .setup = atmel_console_setup,
2464 .flags = CON_PRINTBUFFER,
2465 .index = -1,
2466 .data = &atmel_uart,
2467 };
2468
2469 #define ATMEL_CONSOLE_DEVICE (&atmel_console)
2470
2471 /*
2472 * Early console initialization (before VM subsystem initialized).
2473 */
2474 static int __init atmel_console_init(void)
2475 {
2476 int ret;
2477 if (atmel_default_console_device) {
2478 struct atmel_uart_data *pdata =
2479 dev_get_platdata(&atmel_default_console_device->dev);
2480 int id = pdata->num;
2481 struct atmel_uart_port *port = &atmel_ports[id];
2482
2483 port->backup_imr = 0;
2484 port->uart.line = id;
2485
2486 add_preferred_console(ATMEL_DEVICENAME, id, NULL);
2487 ret = atmel_init_port(port, atmel_default_console_device);
2488 if (ret)
2489 return ret;
2490 register_console(&atmel_console);
2491 }
2492
2493 return 0;
2494 }
2495
2496 console_initcall(atmel_console_init);
2497
2498 /*
2499 * Late console initialization.
2500 */
2501 static int __init atmel_late_console_init(void)
2502 {
2503 if (atmel_default_console_device
2504 && !(atmel_console.flags & CON_ENABLED))
2505 register_console(&atmel_console);
2506
2507 return 0;
2508 }
2509
2510 core_initcall(atmel_late_console_init);
2511
2512 static inline bool atmel_is_console_port(struct uart_port *port)
2513 {
2514 return port->cons && port->cons->index == port->line;
2515 }
2516
2517 #else
2518 #define ATMEL_CONSOLE_DEVICE NULL
2519
2520 static inline bool atmel_is_console_port(struct uart_port *port)
2521 {
2522 return false;
2523 }
2524 #endif
2525
2526 static struct uart_driver atmel_uart = {
2527 .owner = THIS_MODULE,
2528 .driver_name = "atmel_serial",
2529 .dev_name = ATMEL_DEVICENAME,
2530 .major = SERIAL_ATMEL_MAJOR,
2531 .minor = MINOR_START,
2532 .nr = ATMEL_MAX_UART,
2533 .cons = ATMEL_CONSOLE_DEVICE,
2534 };
2535
2536 #ifdef CONFIG_PM
2537 static bool atmel_serial_clk_will_stop(void)
2538 {
2539 #ifdef CONFIG_ARCH_AT91
2540 return at91_suspend_entering_slow_clock();
2541 #else
2542 return false;
2543 #endif
2544 }
2545
2546 static int atmel_serial_suspend(struct platform_device *pdev,
2547 pm_message_t state)
2548 {
2549 struct uart_port *port = platform_get_drvdata(pdev);
2550 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
2551
2552 if (atmel_is_console_port(port) && console_suspend_enabled) {
2553 /* Drain the TX shifter */
2554 while (!(atmel_uart_readl(port, ATMEL_US_CSR) &
2555 ATMEL_US_TXEMPTY))
2556 cpu_relax();
2557 }
2558
2559 /* we can not wake up if we're running on slow clock */
2560 atmel_port->may_wakeup = device_may_wakeup(&pdev->dev);
2561 if (atmel_serial_clk_will_stop()) {
2562 unsigned long flags;
2563
2564 spin_lock_irqsave(&atmel_port->lock_suspended, flags);
2565 atmel_port->suspended = true;
2566 spin_unlock_irqrestore(&atmel_port->lock_suspended, flags);
2567 device_set_wakeup_enable(&pdev->dev, 0);
2568 }
2569
2570 uart_suspend_port(&atmel_uart, port);
2571
2572 return 0;
2573 }
2574
2575 static int atmel_serial_resume(struct platform_device *pdev)
2576 {
2577 struct uart_port *port = platform_get_drvdata(pdev);
2578 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
2579 unsigned long flags;
2580
2581 spin_lock_irqsave(&atmel_port->lock_suspended, flags);
2582 if (atmel_port->pending) {
2583 atmel_handle_receive(port, atmel_port->pending);
2584 atmel_handle_status(port, atmel_port->pending,
2585 atmel_port->pending_status);
2586 atmel_handle_transmit(port, atmel_port->pending);
2587 atmel_port->pending = 0;
2588 }
2589 atmel_port->suspended = false;
2590 spin_unlock_irqrestore(&atmel_port->lock_suspended, flags);
2591
2592 uart_resume_port(&atmel_uart, port);
2593 device_set_wakeup_enable(&pdev->dev, atmel_port->may_wakeup);
2594
2595 return 0;
2596 }
2597 #else
2598 #define atmel_serial_suspend NULL
2599 #define atmel_serial_resume NULL
2600 #endif
2601
2602 static void atmel_serial_probe_fifos(struct atmel_uart_port *port,
2603 struct platform_device *pdev)
2604 {
2605 port->fifo_size = 0;
2606 port->rts_low = 0;
2607 port->rts_high = 0;
2608
2609 if (of_property_read_u32(pdev->dev.of_node,
2610 "atmel,fifo-size",
2611 &port->fifo_size))
2612 return;
2613
2614 if (!port->fifo_size)
2615 return;
2616
2617 if (port->fifo_size < ATMEL_MIN_FIFO_SIZE) {
2618 port->fifo_size = 0;
2619 dev_err(&pdev->dev, "Invalid FIFO size\n");
2620 return;
2621 }
2622
2623 /*
2624 * 0 <= rts_low <= rts_high <= fifo_size
2625 * Once their CTS line asserted by the remote peer, some x86 UARTs tend
2626 * to flush their internal TX FIFO, commonly up to 16 data, before
2627 * actually stopping to send new data. So we try to set the RTS High
2628 * Threshold to a reasonably high value respecting this 16 data
2629 * empirical rule when possible.
2630 */
2631 port->rts_high = max_t(int, port->fifo_size >> 1,
2632 port->fifo_size - ATMEL_RTS_HIGH_OFFSET);
2633 port->rts_low = max_t(int, port->fifo_size >> 2,
2634 port->fifo_size - ATMEL_RTS_LOW_OFFSET);
2635
2636 dev_info(&pdev->dev, "Using FIFO (%u data)\n",
2637 port->fifo_size);
2638 dev_dbg(&pdev->dev, "RTS High Threshold : %2u data\n",
2639 port->rts_high);
2640 dev_dbg(&pdev->dev, "RTS Low Threshold : %2u data\n",
2641 port->rts_low);
2642 }
2643
2644 static int atmel_serial_probe(struct platform_device *pdev)
2645 {
2646 struct atmel_uart_port *port;
2647 struct device_node *np = pdev->dev.of_node;
2648 struct atmel_uart_data *pdata = dev_get_platdata(&pdev->dev);
2649 void *data;
2650 int ret = -ENODEV;
2651 bool rs485_enabled;
2652
2653 BUILD_BUG_ON(ATMEL_SERIAL_RINGSIZE & (ATMEL_SERIAL_RINGSIZE - 1));
2654
2655 if (np)
2656 ret = of_alias_get_id(np, "serial");
2657 else
2658 if (pdata)
2659 ret = pdata->num;
2660
2661 if (ret < 0)
2662 /* port id not found in platform data nor device-tree aliases:
2663 * auto-enumerate it */
2664 ret = find_first_zero_bit(atmel_ports_in_use, ATMEL_MAX_UART);
2665
2666 if (ret >= ATMEL_MAX_UART) {
2667 ret = -ENODEV;
2668 goto err;
2669 }
2670
2671 if (test_and_set_bit(ret, atmel_ports_in_use)) {
2672 /* port already in use */
2673 ret = -EBUSY;
2674 goto err;
2675 }
2676
2677 port = &atmel_ports[ret];
2678 port->backup_imr = 0;
2679 port->uart.line = ret;
2680 atmel_serial_probe_fifos(port, pdev);
2681
2682 spin_lock_init(&port->lock_suspended);
2683
2684 ret = atmel_init_port(port, pdev);
2685 if (ret)
2686 goto err_clear_bit;
2687
2688 port->gpios = mctrl_gpio_init(&port->uart, 0);
2689 if (IS_ERR(port->gpios)) {
2690 ret = PTR_ERR(port->gpios);
2691 goto err_clear_bit;
2692 }
2693
2694 if (!atmel_use_pdc_rx(&port->uart)) {
2695 ret = -ENOMEM;
2696 data = kmalloc(sizeof(struct atmel_uart_char)
2697 * ATMEL_SERIAL_RINGSIZE, GFP_KERNEL);
2698 if (!data)
2699 goto err_alloc_ring;
2700 port->rx_ring.buf = data;
2701 }
2702
2703 rs485_enabled = port->uart.rs485.flags & SER_RS485_ENABLED;
2704
2705 ret = uart_add_one_port(&atmel_uart, &port->uart);
2706 if (ret)
2707 goto err_add_port;
2708
2709 #ifdef CONFIG_SERIAL_ATMEL_CONSOLE
2710 if (atmel_is_console_port(&port->uart)
2711 && ATMEL_CONSOLE_DEVICE->flags & CON_ENABLED) {
2712 /*
2713 * The serial core enabled the clock for us, so undo
2714 * the clk_prepare_enable() in atmel_console_setup()
2715 */
2716 clk_disable_unprepare(port->clk);
2717 }
2718 #endif
2719
2720 device_init_wakeup(&pdev->dev, 1);
2721 platform_set_drvdata(pdev, port);
2722
2723 /*
2724 * The peripheral clock has been disabled by atmel_init_port():
2725 * enable it before accessing I/O registers
2726 */
2727 clk_prepare_enable(port->clk);
2728
2729 if (rs485_enabled) {
2730 atmel_uart_writel(&port->uart, ATMEL_US_MR,
2731 ATMEL_US_USMODE_NORMAL);
2732 atmel_uart_writel(&port->uart, ATMEL_US_CR, ATMEL_US_RTSEN);
2733 }
2734
2735 /*
2736 * Get port name of usart or uart
2737 */
2738 atmel_get_ip_name(&port->uart);
2739
2740 /*
2741 * The peripheral clock can now safely be disabled till the port
2742 * is used
2743 */
2744 clk_disable_unprepare(port->clk);
2745
2746 return 0;
2747
2748 err_add_port:
2749 kfree(port->rx_ring.buf);
2750 port->rx_ring.buf = NULL;
2751 err_alloc_ring:
2752 if (!atmel_is_console_port(&port->uart)) {
2753 clk_put(port->clk);
2754 port->clk = NULL;
2755 }
2756 err_clear_bit:
2757 clear_bit(port->uart.line, atmel_ports_in_use);
2758 err:
2759 return ret;
2760 }
2761
2762 static struct platform_driver atmel_serial_driver = {
2763 .probe = atmel_serial_probe,
2764 .suspend = atmel_serial_suspend,
2765 .resume = atmel_serial_resume,
2766 .driver = {
2767 .name = "atmel_usart",
2768 .of_match_table = of_match_ptr(atmel_serial_dt_ids),
2769 .suppress_bind_attrs = true,
2770 },
2771 };
2772
2773 static int __init atmel_serial_init(void)
2774 {
2775 int ret;
2776
2777 ret = uart_register_driver(&atmel_uart);
2778 if (ret)
2779 return ret;
2780
2781 ret = platform_driver_register(&atmel_serial_driver);
2782 if (ret)
2783 uart_unregister_driver(&atmel_uart);
2784
2785 return ret;
2786 }
2787 device_initcall(atmel_serial_init);
This page took 0.144059 seconds and 5 git commands to generate.