Commit | Line | Data |
---|---|---|
17a328ca KM |
1 | /* |
2 | * SuperH IrDA Driver | |
3 | * | |
4 | * Copyright (C) 2010 Renesas Solutions Corp. | |
5 | * Kuninori Morimoto <morimoto.kuninori@renesas.com> | |
6 | * | |
7 | * Based on sh_sir.c | |
8 | * Copyright (C) 2009 Renesas Solutions Corp. | |
9 | * Copyright 2006-2009 Analog Devices Inc. | |
10 | * | |
11 | * This program is free software; you can redistribute it and/or modify | |
12 | * it under the terms of the GNU General Public License version 2 as | |
13 | * published by the Free Software Foundation. | |
14 | */ | |
15 | ||
16 | /* | |
17 | * CAUTION | |
18 | * | |
19 | * This driver is very simple. | |
20 | * So, it doesn't have below support now | |
21 | * - MIR/FIR support | |
22 | * - DMA transfer support | |
23 | * - FIFO mode support | |
24 | */ | |
25 | #include <linux/module.h> | |
26 | #include <linux/platform_device.h> | |
27 | #include <linux/clk.h> | |
28 | #include <net/irda/wrapper.h> | |
29 | #include <net/irda/irda_device.h> | |
30 | ||
31 | #define DRIVER_NAME "sh_irda" | |
32 | ||
33 | #if defined(CONFIG_ARCH_SH7367) || defined(CONFIG_ARCH_SH7377) | |
34 | #define __IRDARAM_LEN 0x13FF | |
35 | #else | |
36 | #define __IRDARAM_LEN 0x1039 | |
37 | #endif | |
38 | ||
39 | #define IRTMR 0x1F00 /* Transfer mode */ | |
40 | #define IRCFR 0x1F02 /* Configuration */ | |
41 | #define IRCTR 0x1F04 /* IR control */ | |
42 | #define IRTFLR 0x1F20 /* Transmit frame length */ | |
43 | #define IRTCTR 0x1F22 /* Transmit control */ | |
44 | #define IRRFLR 0x1F40 /* Receive frame length */ | |
45 | #define IRRCTR 0x1F42 /* Receive control */ | |
46 | #define SIRISR 0x1F60 /* SIR-UART mode interrupt source */ | |
47 | #define SIRIMR 0x1F62 /* SIR-UART mode interrupt mask */ | |
48 | #define SIRICR 0x1F64 /* SIR-UART mode interrupt clear */ | |
49 | #define SIRBCR 0x1F68 /* SIR-UART mode baud rate count */ | |
50 | #define MFIRISR 0x1F70 /* MIR/FIR mode interrupt source */ | |
51 | #define MFIRIMR 0x1F72 /* MIR/FIR mode interrupt mask */ | |
52 | #define MFIRICR 0x1F74 /* MIR/FIR mode interrupt clear */ | |
53 | #define CRCCTR 0x1F80 /* CRC engine control */ | |
54 | #define CRCIR 0x1F86 /* CRC engine input data */ | |
55 | #define CRCCR 0x1F8A /* CRC engine calculation */ | |
56 | #define CRCOR 0x1F8E /* CRC engine output data */ | |
57 | #define FIFOCP 0x1FC0 /* FIFO current pointer */ | |
58 | #define FIFOFP 0x1FC2 /* FIFO follow pointer */ | |
59 | #define FIFORSMSK 0x1FC4 /* FIFO receive status mask */ | |
60 | #define FIFORSOR 0x1FC6 /* FIFO receive status OR */ | |
61 | #define FIFOSEL 0x1FC8 /* FIFO select */ | |
62 | #define FIFORS 0x1FCA /* FIFO receive status */ | |
63 | #define FIFORFL 0x1FCC /* FIFO receive frame length */ | |
64 | #define FIFORAMCP 0x1FCE /* FIFO RAM current pointer */ | |
65 | #define FIFORAMFP 0x1FD0 /* FIFO RAM follow pointer */ | |
66 | #define BIFCTL 0x1FD2 /* BUS interface control */ | |
67 | #define IRDARAM 0x0000 /* IrDA buffer RAM */ | |
68 | #define IRDARAM_LEN __IRDARAM_LEN /* - 8/16/32 (read-only for 32) */ | |
69 | ||
70 | /* IRTMR */ | |
71 | #define TMD_MASK (0x3 << 14) /* Transfer Mode */ | |
72 | #define TMD_SIR (0x0 << 14) | |
73 | #define TMD_MIR (0x3 << 14) | |
74 | #define TMD_FIR (0x2 << 14) | |
75 | ||
76 | #define FIFORIM (1 << 8) /* FIFO receive interrupt mask */ | |
77 | #define MIM (1 << 4) /* MIR/FIR Interrupt Mask */ | |
78 | #define SIM (1 << 0) /* SIR Interrupt Mask */ | |
79 | #define xIM_MASK (FIFORIM | MIM | SIM) | |
80 | ||
81 | /* IRCFR */ | |
82 | #define RTO_SHIFT 8 /* shift for Receive Timeout */ | |
83 | #define RTO (0x3 << RTO_SHIFT) | |
84 | ||
85 | /* IRTCTR */ | |
86 | #define ARMOD (1 << 15) /* Auto-Receive Mode */ | |
87 | #define TE (1 << 0) /* Transmit Enable */ | |
88 | ||
89 | /* IRRFLR */ | |
90 | #define RFL_MASK (0x1FFF) /* mask for Receive Frame Length */ | |
91 | ||
92 | /* IRRCTR */ | |
93 | #define RE (1 << 0) /* Receive Enable */ | |
94 | ||
95 | /* | |
96 | * SIRISR, SIRIMR, SIRICR, | |
97 | * MFIRISR, MFIRIMR, MFIRICR | |
98 | */ | |
99 | #define FRE (1 << 15) /* Frame Receive End */ | |
100 | #define TROV (1 << 11) /* Transfer Area Overflow */ | |
101 | #define xIR_9 (1 << 9) | |
102 | #define TOT xIR_9 /* for SIR Timeout */ | |
103 | #define ABTD xIR_9 /* for MIR/FIR Abort Detection */ | |
104 | #define xIR_8 (1 << 8) | |
105 | #define FER xIR_8 /* for SIR Framing Error */ | |
106 | #define CRCER xIR_8 /* for MIR/FIR CRC error */ | |
107 | #define FTE (1 << 7) /* Frame Transmit End */ | |
108 | #define xIR_MASK (FRE | TROV | xIR_9 | xIR_8 | FTE) | |
109 | ||
110 | /* SIRBCR */ | |
111 | #define BRC_MASK (0x3F) /* mask for Baud Rate Count */ | |
112 | ||
113 | /* CRCCTR */ | |
114 | #define CRC_RST (1 << 15) /* CRC Engine Reset */ | |
115 | #define CRC_CT_MASK 0x0FFF /* mask for CRC Engine Input Data Count */ | |
116 | ||
117 | /* CRCIR */ | |
118 | #define CRC_IN_MASK 0x0FFF /* mask for CRC Engine Input Data */ | |
119 | ||
120 | /************************************************************************ | |
121 | ||
122 | ||
123 | enum / structure | |
124 | ||
125 | ||
126 | ************************************************************************/ | |
127 | enum sh_irda_mode { | |
128 | SH_IRDA_NONE = 0, | |
129 | SH_IRDA_SIR, | |
130 | SH_IRDA_MIR, | |
131 | SH_IRDA_FIR, | |
132 | }; | |
133 | ||
134 | struct sh_irda_self; | |
135 | struct sh_irda_xir_func { | |
136 | int (*xir_fre) (struct sh_irda_self *self); | |
137 | int (*xir_trov) (struct sh_irda_self *self); | |
138 | int (*xir_9) (struct sh_irda_self *self); | |
139 | int (*xir_8) (struct sh_irda_self *self); | |
140 | int (*xir_fte) (struct sh_irda_self *self); | |
141 | }; | |
142 | ||
143 | struct sh_irda_self { | |
144 | void __iomem *membase; | |
145 | unsigned int irq; | |
146 | struct clk *clk; | |
147 | ||
148 | struct net_device *ndev; | |
149 | ||
150 | struct irlap_cb *irlap; | |
151 | struct qos_info qos; | |
152 | ||
153 | iobuff_t tx_buff; | |
154 | iobuff_t rx_buff; | |
155 | ||
156 | enum sh_irda_mode mode; | |
157 | spinlock_t lock; | |
158 | ||
159 | struct sh_irda_xir_func *xir_func; | |
160 | }; | |
161 | ||
162 | /************************************************************************ | |
163 | ||
164 | ||
165 | common function | |
166 | ||
167 | ||
168 | ************************************************************************/ | |
169 | static void sh_irda_write(struct sh_irda_self *self, u32 offset, u16 data) | |
170 | { | |
171 | unsigned long flags; | |
172 | ||
173 | spin_lock_irqsave(&self->lock, flags); | |
174 | iowrite16(data, self->membase + offset); | |
175 | spin_unlock_irqrestore(&self->lock, flags); | |
176 | } | |
177 | ||
178 | static u16 sh_irda_read(struct sh_irda_self *self, u32 offset) | |
179 | { | |
180 | unsigned long flags; | |
181 | u16 ret; | |
182 | ||
183 | spin_lock_irqsave(&self->lock, flags); | |
184 | ret = ioread16(self->membase + offset); | |
185 | spin_unlock_irqrestore(&self->lock, flags); | |
186 | ||
187 | return ret; | |
188 | } | |
189 | ||
190 | static void sh_irda_update_bits(struct sh_irda_self *self, u32 offset, | |
191 | u16 mask, u16 data) | |
192 | { | |
193 | unsigned long flags; | |
194 | u16 old, new; | |
195 | ||
196 | spin_lock_irqsave(&self->lock, flags); | |
197 | old = ioread16(self->membase + offset); | |
198 | new = (old & ~mask) | data; | |
199 | if (old != new) | |
200 | iowrite16(data, self->membase + offset); | |
201 | spin_unlock_irqrestore(&self->lock, flags); | |
202 | } | |
203 | ||
204 | /************************************************************************ | |
205 | ||
206 | ||
207 | mode function | |
208 | ||
209 | ||
210 | ************************************************************************/ | |
211 | /*===================================== | |
212 | * | |
213 | * common | |
214 | * | |
215 | *=====================================*/ | |
216 | static void sh_irda_rcv_ctrl(struct sh_irda_self *self, int enable) | |
217 | { | |
218 | struct device *dev = &self->ndev->dev; | |
219 | ||
220 | sh_irda_update_bits(self, IRRCTR, RE, enable ? RE : 0); | |
221 | dev_dbg(dev, "recv %s\n", enable ? "enable" : "disable"); | |
222 | } | |
223 | ||
224 | static int sh_irda_set_timeout(struct sh_irda_self *self, int interval) | |
225 | { | |
226 | struct device *dev = &self->ndev->dev; | |
227 | ||
228 | if (SH_IRDA_SIR != self->mode) | |
229 | interval = 0; | |
230 | ||
231 | if (interval < 0 || interval > 2) { | |
232 | dev_err(dev, "unsupported timeout interval\n"); | |
233 | return -EINVAL; | |
234 | } | |
235 | ||
236 | sh_irda_update_bits(self, IRCFR, RTO, interval << RTO_SHIFT); | |
237 | return 0; | |
238 | } | |
239 | ||
240 | static int sh_irda_set_baudrate(struct sh_irda_self *self, int baudrate) | |
241 | { | |
242 | struct device *dev = &self->ndev->dev; | |
243 | u16 val; | |
244 | ||
245 | if (baudrate < 0) | |
246 | return 0; | |
247 | ||
248 | if (SH_IRDA_SIR != self->mode) { | |
249 | dev_err(dev, "it is not SIR mode\n"); | |
250 | return -EINVAL; | |
251 | } | |
252 | ||
253 | /* | |
254 | * Baud rate (bits/s) = | |
255 | * (48 MHz / 26) / (baud rate counter value + 1) x 16 | |
256 | */ | |
257 | val = (48000000 / 26 / 16 / baudrate) - 1; | |
258 | dev_dbg(dev, "baudrate = %d, val = 0x%02x\n", baudrate, val); | |
259 | ||
260 | sh_irda_update_bits(self, SIRBCR, BRC_MASK, val); | |
261 | ||
262 | return 0; | |
263 | } | |
264 | ||
265 | static int xir_get_rcv_length(struct sh_irda_self *self) | |
266 | { | |
267 | return RFL_MASK & sh_irda_read(self, IRRFLR); | |
268 | } | |
269 | ||
270 | /*===================================== | |
271 | * | |
272 | * NONE MODE | |
273 | * | |
274 | *=====================================*/ | |
275 | static int xir_fre(struct sh_irda_self *self) | |
276 | { | |
277 | struct device *dev = &self->ndev->dev; | |
278 | dev_err(dev, "none mode: frame recv\n"); | |
279 | return 0; | |
280 | } | |
281 | ||
282 | static int xir_trov(struct sh_irda_self *self) | |
283 | { | |
284 | struct device *dev = &self->ndev->dev; | |
285 | dev_err(dev, "none mode: buffer ram over\n"); | |
286 | return 0; | |
287 | } | |
288 | ||
289 | static int xir_9(struct sh_irda_self *self) | |
290 | { | |
291 | struct device *dev = &self->ndev->dev; | |
292 | dev_err(dev, "none mode: time over\n"); | |
293 | return 0; | |
294 | } | |
295 | ||
296 | static int xir_8(struct sh_irda_self *self) | |
297 | { | |
298 | struct device *dev = &self->ndev->dev; | |
299 | dev_err(dev, "none mode: framing error\n"); | |
300 | return 0; | |
301 | } | |
302 | ||
303 | static int xir_fte(struct sh_irda_self *self) | |
304 | { | |
305 | struct device *dev = &self->ndev->dev; | |
306 | dev_err(dev, "none mode: frame transmit end\n"); | |
307 | return 0; | |
308 | } | |
309 | ||
310 | static struct sh_irda_xir_func xir_func = { | |
311 | .xir_fre = xir_fre, | |
312 | .xir_trov = xir_trov, | |
313 | .xir_9 = xir_9, | |
314 | .xir_8 = xir_8, | |
315 | .xir_fte = xir_fte, | |
316 | }; | |
317 | ||
318 | /*===================================== | |
319 | * | |
320 | * MIR/FIR MODE | |
321 | * | |
322 | * MIR/FIR are not supported now | |
323 | *=====================================*/ | |
324 | static struct sh_irda_xir_func mfir_func = { | |
325 | .xir_fre = xir_fre, | |
326 | .xir_trov = xir_trov, | |
327 | .xir_9 = xir_9, | |
328 | .xir_8 = xir_8, | |
329 | .xir_fte = xir_fte, | |
330 | }; | |
331 | ||
332 | /*===================================== | |
333 | * | |
334 | * SIR MODE | |
335 | * | |
336 | *=====================================*/ | |
337 | static int sir_fre(struct sh_irda_self *self) | |
338 | { | |
339 | struct device *dev = &self->ndev->dev; | |
340 | u16 data16; | |
341 | u8 *data = (u8 *)&data16; | |
342 | int len = xir_get_rcv_length(self); | |
343 | int i, j; | |
344 | ||
345 | if (len > IRDARAM_LEN) | |
346 | len = IRDARAM_LEN; | |
347 | ||
348 | dev_dbg(dev, "frame recv length = %d\n", len); | |
349 | ||
350 | for (i = 0; i < len; i++) { | |
351 | j = i % 2; | |
352 | if (!j) | |
353 | data16 = sh_irda_read(self, IRDARAM + i); | |
354 | ||
355 | async_unwrap_char(self->ndev, &self->ndev->stats, | |
356 | &self->rx_buff, data[j]); | |
357 | } | |
358 | self->ndev->last_rx = jiffies; | |
359 | ||
360 | sh_irda_rcv_ctrl(self, 1); | |
361 | ||
362 | return 0; | |
363 | } | |
364 | ||
365 | static int sir_trov(struct sh_irda_self *self) | |
366 | { | |
367 | struct device *dev = &self->ndev->dev; | |
368 | ||
369 | dev_err(dev, "buffer ram over\n"); | |
370 | sh_irda_rcv_ctrl(self, 1); | |
371 | return 0; | |
372 | } | |
373 | ||
374 | static int sir_tot(struct sh_irda_self *self) | |
375 | { | |
376 | struct device *dev = &self->ndev->dev; | |
377 | ||
378 | dev_err(dev, "time over\n"); | |
379 | sh_irda_set_baudrate(self, 9600); | |
380 | sh_irda_rcv_ctrl(self, 1); | |
381 | return 0; | |
382 | } | |
383 | ||
384 | static int sir_fer(struct sh_irda_self *self) | |
385 | { | |
386 | struct device *dev = &self->ndev->dev; | |
387 | ||
388 | dev_err(dev, "framing error\n"); | |
389 | sh_irda_rcv_ctrl(self, 1); | |
390 | return 0; | |
391 | } | |
392 | ||
393 | static int sir_fte(struct sh_irda_self *self) | |
394 | { | |
395 | struct device *dev = &self->ndev->dev; | |
396 | ||
397 | dev_dbg(dev, "frame transmit end\n"); | |
398 | netif_wake_queue(self->ndev); | |
399 | ||
400 | return 0; | |
401 | } | |
402 | ||
403 | static struct sh_irda_xir_func sir_func = { | |
404 | .xir_fre = sir_fre, | |
405 | .xir_trov = sir_trov, | |
406 | .xir_9 = sir_tot, | |
407 | .xir_8 = sir_fer, | |
408 | .xir_fte = sir_fte, | |
409 | }; | |
410 | ||
411 | static void sh_irda_set_mode(struct sh_irda_self *self, enum sh_irda_mode mode) | |
412 | { | |
413 | struct device *dev = &self->ndev->dev; | |
414 | struct sh_irda_xir_func *func; | |
415 | const char *name; | |
416 | u16 data; | |
417 | ||
418 | switch (mode) { | |
419 | case SH_IRDA_SIR: | |
420 | name = "SIR"; | |
421 | data = TMD_SIR; | |
422 | func = &sir_func; | |
423 | break; | |
424 | case SH_IRDA_MIR: | |
425 | name = "MIR"; | |
426 | data = TMD_MIR; | |
427 | func = &mfir_func; | |
428 | break; | |
429 | case SH_IRDA_FIR: | |
430 | name = "FIR"; | |
431 | data = TMD_FIR; | |
432 | func = &mfir_func; | |
433 | break; | |
434 | default: | |
435 | name = "NONE"; | |
436 | data = 0; | |
437 | func = &xir_func; | |
438 | break; | |
439 | } | |
440 | ||
441 | self->mode = mode; | |
442 | self->xir_func = func; | |
443 | sh_irda_update_bits(self, IRTMR, TMD_MASK, data); | |
444 | ||
445 | dev_dbg(dev, "switch to %s mode", name); | |
446 | } | |
447 | ||
448 | /************************************************************************ | |
449 | ||
450 | ||
451 | irq function | |
452 | ||
453 | ||
454 | ************************************************************************/ | |
455 | static void sh_irda_set_irq_mask(struct sh_irda_self *self) | |
456 | { | |
457 | u16 tmr_hole; | |
458 | u16 xir_reg; | |
459 | ||
460 | /* set all mask */ | |
461 | sh_irda_update_bits(self, IRTMR, xIM_MASK, xIM_MASK); | |
462 | sh_irda_update_bits(self, SIRIMR, xIR_MASK, xIR_MASK); | |
463 | sh_irda_update_bits(self, MFIRIMR, xIR_MASK, xIR_MASK); | |
464 | ||
465 | /* clear irq */ | |
466 | sh_irda_update_bits(self, SIRICR, xIR_MASK, xIR_MASK); | |
467 | sh_irda_update_bits(self, MFIRICR, xIR_MASK, xIR_MASK); | |
468 | ||
469 | switch (self->mode) { | |
470 | case SH_IRDA_SIR: | |
471 | tmr_hole = SIM; | |
472 | xir_reg = SIRIMR; | |
473 | break; | |
474 | case SH_IRDA_MIR: | |
475 | case SH_IRDA_FIR: | |
476 | tmr_hole = MIM; | |
477 | xir_reg = MFIRIMR; | |
478 | break; | |
479 | default: | |
480 | tmr_hole = 0; | |
481 | xir_reg = 0; | |
482 | break; | |
483 | } | |
484 | ||
485 | /* open mask */ | |
486 | if (xir_reg) { | |
487 | sh_irda_update_bits(self, IRTMR, tmr_hole, 0); | |
488 | sh_irda_update_bits(self, xir_reg, xIR_MASK, 0); | |
489 | } | |
490 | } | |
491 | ||
492 | static irqreturn_t sh_irda_irq(int irq, void *dev_id) | |
493 | { | |
494 | struct sh_irda_self *self = dev_id; | |
495 | struct sh_irda_xir_func *func = self->xir_func; | |
496 | u16 isr = sh_irda_read(self, SIRISR); | |
497 | ||
498 | /* clear irq */ | |
499 | sh_irda_write(self, SIRICR, isr); | |
500 | ||
501 | if (isr & FRE) | |
502 | func->xir_fre(self); | |
503 | if (isr & TROV) | |
504 | func->xir_trov(self); | |
505 | if (isr & xIR_9) | |
506 | func->xir_9(self); | |
507 | if (isr & xIR_8) | |
508 | func->xir_8(self); | |
509 | if (isr & FTE) | |
510 | func->xir_fte(self); | |
511 | ||
512 | return IRQ_HANDLED; | |
513 | } | |
514 | ||
515 | /************************************************************************ | |
516 | ||
517 | ||
518 | CRC function | |
519 | ||
520 | ||
521 | ************************************************************************/ | |
522 | static void sh_irda_crc_reset(struct sh_irda_self *self) | |
523 | { | |
524 | sh_irda_write(self, CRCCTR, CRC_RST); | |
525 | } | |
526 | ||
527 | static void sh_irda_crc_add(struct sh_irda_self *self, u16 data) | |
528 | { | |
529 | sh_irda_write(self, CRCIR, data & CRC_IN_MASK); | |
530 | } | |
531 | ||
532 | static u16 sh_irda_crc_cnt(struct sh_irda_self *self) | |
533 | { | |
534 | return CRC_CT_MASK & sh_irda_read(self, CRCCTR); | |
535 | } | |
536 | ||
537 | static u16 sh_irda_crc_out(struct sh_irda_self *self) | |
538 | { | |
539 | return sh_irda_read(self, CRCOR); | |
540 | } | |
541 | ||
542 | static int sh_irda_crc_init(struct sh_irda_self *self) | |
543 | { | |
544 | struct device *dev = &self->ndev->dev; | |
545 | int ret = -EIO; | |
546 | u16 val; | |
547 | ||
548 | sh_irda_crc_reset(self); | |
549 | ||
550 | sh_irda_crc_add(self, 0xCC); | |
551 | sh_irda_crc_add(self, 0xF5); | |
552 | sh_irda_crc_add(self, 0xF1); | |
553 | sh_irda_crc_add(self, 0xA7); | |
554 | ||
555 | val = sh_irda_crc_cnt(self); | |
556 | if (4 != val) { | |
557 | dev_err(dev, "CRC count error %x\n", val); | |
558 | goto crc_init_out; | |
559 | } | |
560 | ||
561 | val = sh_irda_crc_out(self); | |
562 | if (0x51DF != val) { | |
563 | dev_err(dev, "CRC result error%x\n", val); | |
564 | goto crc_init_out; | |
565 | } | |
566 | ||
567 | ret = 0; | |
568 | ||
569 | crc_init_out: | |
570 | ||
571 | sh_irda_crc_reset(self); | |
572 | return ret; | |
573 | } | |
574 | ||
575 | /************************************************************************ | |
576 | ||
577 | ||
578 | iobuf function | |
579 | ||
580 | ||
581 | ************************************************************************/ | |
582 | static void sh_irda_remove_iobuf(struct sh_irda_self *self) | |
583 | { | |
584 | kfree(self->rx_buff.head); | |
585 | ||
586 | self->tx_buff.head = NULL; | |
587 | self->tx_buff.data = NULL; | |
588 | self->rx_buff.head = NULL; | |
589 | self->rx_buff.data = NULL; | |
590 | } | |
591 | ||
592 | static int sh_irda_init_iobuf(struct sh_irda_self *self, int rxsize, int txsize) | |
593 | { | |
594 | if (self->rx_buff.head || | |
595 | self->tx_buff.head) { | |
596 | dev_err(&self->ndev->dev, "iobuff has already existed."); | |
597 | return -EINVAL; | |
598 | } | |
599 | ||
600 | /* rx_buff */ | |
601 | self->rx_buff.head = kmalloc(rxsize, GFP_KERNEL); | |
602 | if (!self->rx_buff.head) | |
603 | return -ENOMEM; | |
604 | ||
605 | self->rx_buff.truesize = rxsize; | |
606 | self->rx_buff.in_frame = FALSE; | |
607 | self->rx_buff.state = OUTSIDE_FRAME; | |
608 | self->rx_buff.data = self->rx_buff.head; | |
609 | ||
610 | /* tx_buff */ | |
611 | self->tx_buff.head = self->membase + IRDARAM; | |
612 | self->tx_buff.truesize = IRDARAM_LEN; | |
613 | ||
614 | return 0; | |
615 | } | |
616 | ||
617 | /************************************************************************ | |
618 | ||
619 | ||
620 | net_device_ops function | |
621 | ||
622 | ||
623 | ************************************************************************/ | |
624 | static int sh_irda_hard_xmit(struct sk_buff *skb, struct net_device *ndev) | |
625 | { | |
626 | struct sh_irda_self *self = netdev_priv(ndev); | |
627 | struct device *dev = &self->ndev->dev; | |
628 | int speed = irda_get_next_speed(skb); | |
629 | int ret; | |
630 | ||
631 | dev_dbg(dev, "hard xmit\n"); | |
632 | ||
633 | netif_stop_queue(ndev); | |
634 | sh_irda_rcv_ctrl(self, 0); | |
635 | ||
636 | ret = sh_irda_set_baudrate(self, speed); | |
637 | if (ret < 0) | |
638 | return ret; | |
639 | ||
640 | self->tx_buff.len = 0; | |
641 | if (skb->len) { | |
642 | unsigned long flags; | |
643 | ||
644 | spin_lock_irqsave(&self->lock, flags); | |
645 | self->tx_buff.len = async_wrap_skb(skb, | |
646 | self->tx_buff.head, | |
647 | self->tx_buff.truesize); | |
648 | spin_unlock_irqrestore(&self->lock, flags); | |
649 | ||
650 | if (self->tx_buff.len > self->tx_buff.truesize) | |
651 | self->tx_buff.len = self->tx_buff.truesize; | |
652 | ||
653 | sh_irda_write(self, IRTFLR, self->tx_buff.len); | |
654 | sh_irda_write(self, IRTCTR, ARMOD | TE); | |
655 | } | |
656 | ||
657 | dev_kfree_skb(skb); | |
658 | ||
659 | return 0; | |
660 | } | |
661 | ||
662 | static int sh_irda_ioctl(struct net_device *ndev, struct ifreq *ifreq, int cmd) | |
663 | { | |
664 | /* | |
665 | * FIXME | |
666 | * | |
667 | * This function is needed for irda framework. | |
668 | * But nothing to do now | |
669 | */ | |
670 | return 0; | |
671 | } | |
672 | ||
673 | static struct net_device_stats *sh_irda_stats(struct net_device *ndev) | |
674 | { | |
675 | struct sh_irda_self *self = netdev_priv(ndev); | |
676 | ||
677 | return &self->ndev->stats; | |
678 | } | |
679 | ||
680 | static int sh_irda_open(struct net_device *ndev) | |
681 | { | |
682 | struct sh_irda_self *self = netdev_priv(ndev); | |
683 | int err; | |
684 | ||
685 | clk_enable(self->clk); | |
686 | err = sh_irda_crc_init(self); | |
687 | if (err) | |
688 | goto open_err; | |
689 | ||
690 | sh_irda_set_mode(self, SH_IRDA_SIR); | |
691 | sh_irda_set_timeout(self, 2); | |
692 | sh_irda_set_baudrate(self, 9600); | |
693 | ||
694 | self->irlap = irlap_open(ndev, &self->qos, DRIVER_NAME); | |
695 | if (!self->irlap) { | |
696 | err = -ENODEV; | |
697 | goto open_err; | |
698 | } | |
699 | ||
700 | netif_start_queue(ndev); | |
701 | sh_irda_rcv_ctrl(self, 1); | |
702 | sh_irda_set_irq_mask(self); | |
703 | ||
704 | dev_info(&ndev->dev, "opened\n"); | |
705 | ||
706 | return 0; | |
707 | ||
708 | open_err: | |
709 | clk_disable(self->clk); | |
710 | ||
711 | return err; | |
712 | } | |
713 | ||
714 | static int sh_irda_stop(struct net_device *ndev) | |
715 | { | |
716 | struct sh_irda_self *self = netdev_priv(ndev); | |
717 | ||
718 | /* Stop IrLAP */ | |
719 | if (self->irlap) { | |
720 | irlap_close(self->irlap); | |
721 | self->irlap = NULL; | |
722 | } | |
723 | ||
724 | netif_stop_queue(ndev); | |
725 | ||
726 | dev_info(&ndev->dev, "stoped\n"); | |
727 | ||
728 | return 0; | |
729 | } | |
730 | ||
731 | static const struct net_device_ops sh_irda_ndo = { | |
732 | .ndo_open = sh_irda_open, | |
733 | .ndo_stop = sh_irda_stop, | |
734 | .ndo_start_xmit = sh_irda_hard_xmit, | |
735 | .ndo_do_ioctl = sh_irda_ioctl, | |
736 | .ndo_get_stats = sh_irda_stats, | |
737 | }; | |
738 | ||
739 | /************************************************************************ | |
740 | ||
741 | ||
742 | platform_driver function | |
743 | ||
744 | ||
745 | ************************************************************************/ | |
746 | static int __devinit sh_irda_probe(struct platform_device *pdev) | |
747 | { | |
748 | struct net_device *ndev; | |
749 | struct sh_irda_self *self; | |
750 | struct resource *res; | |
751 | char clk_name[8]; | |
752 | unsigned int irq; | |
753 | int err = -ENOMEM; | |
754 | ||
755 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | |
756 | irq = platform_get_irq(pdev, 0); | |
757 | if (!res || irq < 0) { | |
758 | dev_err(&pdev->dev, "Not enough platform resources.\n"); | |
759 | goto exit; | |
760 | } | |
761 | ||
762 | ndev = alloc_irdadev(sizeof(*self)); | |
763 | if (!ndev) | |
764 | goto exit; | |
765 | ||
766 | self = netdev_priv(ndev); | |
767 | self->membase = ioremap_nocache(res->start, resource_size(res)); | |
768 | if (!self->membase) { | |
769 | err = -ENXIO; | |
770 | dev_err(&pdev->dev, "Unable to ioremap.\n"); | |
771 | goto err_mem_1; | |
772 | } | |
773 | ||
774 | err = sh_irda_init_iobuf(self, IRDA_SKB_MAX_MTU, IRDA_SIR_MAX_FRAME); | |
775 | if (err) | |
776 | goto err_mem_2; | |
777 | ||
778 | snprintf(clk_name, sizeof(clk_name), "irda%d", pdev->id); | |
779 | self->clk = clk_get(&pdev->dev, clk_name); | |
780 | if (IS_ERR(self->clk)) { | |
781 | dev_err(&pdev->dev, "cannot get clock \"%s\"\n", clk_name); | |
782 | goto err_mem_3; | |
783 | } | |
784 | ||
785 | irda_init_max_qos_capabilies(&self->qos); | |
786 | ||
787 | ndev->netdev_ops = &sh_irda_ndo; | |
788 | ndev->irq = irq; | |
789 | ||
790 | self->ndev = ndev; | |
791 | self->qos.baud_rate.bits &= IR_9600; /* FIXME */ | |
792 | self->qos.min_turn_time.bits = 1; /* 10 ms or more */ | |
793 | spin_lock_init(&self->lock); | |
794 | ||
795 | irda_qos_bits_to_value(&self->qos); | |
796 | ||
797 | err = register_netdev(ndev); | |
798 | if (err) | |
799 | goto err_mem_4; | |
800 | ||
801 | platform_set_drvdata(pdev, ndev); | |
802 | ||
803 | if (request_irq(irq, sh_irda_irq, IRQF_DISABLED, "sh_irda", self)) { | |
804 | dev_warn(&pdev->dev, "Unable to attach sh_irda interrupt\n"); | |
805 | goto err_mem_4; | |
806 | } | |
807 | ||
808 | dev_info(&pdev->dev, "SuperH IrDA probed\n"); | |
809 | ||
810 | goto exit; | |
811 | ||
812 | err_mem_4: | |
813 | clk_put(self->clk); | |
814 | err_mem_3: | |
815 | sh_irda_remove_iobuf(self); | |
816 | err_mem_2: | |
817 | iounmap(self->membase); | |
818 | err_mem_1: | |
819 | free_netdev(ndev); | |
820 | exit: | |
821 | return err; | |
822 | } | |
823 | ||
824 | static int __devexit sh_irda_remove(struct platform_device *pdev) | |
825 | { | |
826 | struct net_device *ndev = platform_get_drvdata(pdev); | |
827 | struct sh_irda_self *self = netdev_priv(ndev); | |
828 | ||
829 | if (!self) | |
830 | return 0; | |
831 | ||
832 | unregister_netdev(ndev); | |
833 | clk_put(self->clk); | |
834 | sh_irda_remove_iobuf(self); | |
835 | iounmap(self->membase); | |
836 | free_netdev(ndev); | |
837 | platform_set_drvdata(pdev, NULL); | |
838 | ||
839 | return 0; | |
840 | } | |
841 | ||
842 | static struct platform_driver sh_irda_driver = { | |
843 | .probe = sh_irda_probe, | |
844 | .remove = __devexit_p(sh_irda_remove), | |
845 | .driver = { | |
846 | .name = DRIVER_NAME, | |
847 | }, | |
848 | }; | |
849 | ||
850 | static int __init sh_irda_init(void) | |
851 | { | |
852 | return platform_driver_register(&sh_irda_driver); | |
853 | } | |
854 | ||
855 | static void __exit sh_irda_exit(void) | |
856 | { | |
857 | platform_driver_unregister(&sh_irda_driver); | |
858 | } | |
859 | ||
860 | module_init(sh_irda_init); | |
861 | module_exit(sh_irda_exit); | |
862 | ||
863 | MODULE_AUTHOR("Kuninori Morimoto <morimoto.kuninori@renesas.com>"); | |
864 | MODULE_DESCRIPTION("SuperH IrDA driver"); | |
865 | MODULE_LICENSE("GPL"); |