Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /********************************************************************* |
2 | * | |
3 | * sir_dev.c: irda sir network device | |
4 | * | |
5 | * Copyright (c) 2002 Martin Diehl | |
6 | * | |
7 | * This program is free software; you can redistribute it and/or | |
8 | * modify it under the terms of the GNU General Public License as | |
9 | * published by the Free Software Foundation; either version 2 of | |
10 | * the License, or (at your option) any later version. | |
11 | * | |
12 | ********************************************************************/ | |
13 | ||
14 | #include <linux/module.h> | |
15 | #include <linux/kernel.h> | |
5a0e3ad6 | 16 | #include <linux/slab.h> |
1da177e4 | 17 | #include <linux/init.h> |
1da177e4 LT |
18 | #include <linux/delay.h> |
19 | ||
20 | #include <net/irda/irda.h> | |
21 | #include <net/irda/wrapper.h> | |
22 | #include <net/irda/irda_device.h> | |
23 | ||
24 | #include "sir-dev.h" | |
25 | ||
788252e6 CH |
26 | |
27 | static struct workqueue_struct *irda_sir_wq; | |
28 | ||
29 | /* STATE MACHINE */ | |
30 | ||
31 | /* substate handler of the config-fsm to handle the cases where we want | |
32 | * to wait for transmit completion before changing the port configuration | |
33 | */ | |
34 | ||
35 | static int sirdev_tx_complete_fsm(struct sir_dev *dev) | |
36 | { | |
37 | struct sir_fsm *fsm = &dev->fsm; | |
38 | unsigned next_state, delay; | |
39 | unsigned bytes_left; | |
40 | ||
41 | do { | |
42 | next_state = fsm->substate; /* default: stay in current substate */ | |
43 | delay = 0; | |
44 | ||
45 | switch(fsm->substate) { | |
46 | ||
47 | case SIRDEV_STATE_WAIT_XMIT: | |
48 | if (dev->drv->chars_in_buffer) | |
49 | bytes_left = dev->drv->chars_in_buffer(dev); | |
50 | else | |
51 | bytes_left = 0; | |
52 | if (!bytes_left) { | |
53 | next_state = SIRDEV_STATE_WAIT_UNTIL_SENT; | |
54 | break; | |
55 | } | |
56 | ||
57 | if (dev->speed > 115200) | |
58 | delay = (bytes_left*8*10000) / (dev->speed/100); | |
59 | else if (dev->speed > 0) | |
60 | delay = (bytes_left*10*10000) / (dev->speed/100); | |
61 | else | |
62 | delay = 0; | |
63 | /* expected delay (usec) until remaining bytes are sent */ | |
64 | if (delay < 100) { | |
65 | udelay(delay); | |
66 | delay = 0; | |
67 | break; | |
68 | } | |
69 | /* sleep some longer delay (msec) */ | |
70 | delay = (delay+999) / 1000; | |
71 | break; | |
72 | ||
73 | case SIRDEV_STATE_WAIT_UNTIL_SENT: | |
74 | /* block until underlaying hardware buffer are empty */ | |
75 | if (dev->drv->wait_until_sent) | |
76 | dev->drv->wait_until_sent(dev); | |
77 | next_state = SIRDEV_STATE_TX_DONE; | |
78 | break; | |
79 | ||
80 | case SIRDEV_STATE_TX_DONE: | |
81 | return 0; | |
82 | ||
83 | default: | |
a97a6f10 | 84 | IRDA_ERROR("%s - undefined state\n", __func__); |
788252e6 CH |
85 | return -EINVAL; |
86 | } | |
87 | fsm->substate = next_state; | |
88 | } while (delay == 0); | |
89 | return delay; | |
90 | } | |
91 | ||
92 | /* | |
93 | * Function sirdev_config_fsm | |
94 | * | |
95 | * State machine to handle the configuration of the device (and attached dongle, if any). | |
96 | * This handler is scheduled for execution in kIrDAd context, so we can sleep. | |
97 | * however, kIrDAd is shared by all sir_dev devices so we better don't sleep there too | |
98 | * long. Instead, for longer delays we start a timer to reschedule us later. | |
99 | * On entry, fsm->sem is always locked and the netdev xmit queue stopped. | |
100 | * Both must be unlocked/restarted on completion - but only on final exit. | |
101 | */ | |
102 | ||
c4028958 | 103 | static void sirdev_config_fsm(struct work_struct *work) |
788252e6 | 104 | { |
c4028958 | 105 | struct sir_dev *dev = container_of(work, struct sir_dev, fsm.work.work); |
788252e6 CH |
106 | struct sir_fsm *fsm = &dev->fsm; |
107 | int next_state; | |
108 | int ret = -1; | |
109 | unsigned delay; | |
110 | ||
a97a6f10 | 111 | IRDA_DEBUG(2, "%s(), <%ld>\n", __func__, jiffies); |
788252e6 CH |
112 | |
113 | do { | |
114 | IRDA_DEBUG(3, "%s - state=0x%04x / substate=0x%04x\n", | |
a97a6f10 | 115 | __func__, fsm->state, fsm->substate); |
788252e6 CH |
116 | |
117 | next_state = fsm->state; | |
118 | delay = 0; | |
119 | ||
120 | switch(fsm->state) { | |
121 | ||
122 | case SIRDEV_STATE_DONGLE_OPEN: | |
123 | if (dev->dongle_drv != NULL) { | |
124 | ret = sirdev_put_dongle(dev); | |
125 | if (ret) { | |
126 | fsm->result = -EINVAL; | |
127 | next_state = SIRDEV_STATE_ERROR; | |
128 | break; | |
129 | } | |
130 | } | |
131 | ||
132 | /* Initialize dongle */ | |
133 | ret = sirdev_get_dongle(dev, fsm->param); | |
134 | if (ret) { | |
135 | fsm->result = ret; | |
136 | next_state = SIRDEV_STATE_ERROR; | |
137 | break; | |
138 | } | |
139 | ||
140 | /* Dongles are powered through the modem control lines which | |
141 | * were just set during open. Before resetting, let's wait for | |
142 | * the power to stabilize. This is what some dongle drivers did | |
143 | * in open before, while others didn't - should be safe anyway. | |
144 | */ | |
145 | ||
146 | delay = 50; | |
147 | fsm->substate = SIRDEV_STATE_DONGLE_RESET; | |
148 | next_state = SIRDEV_STATE_DONGLE_RESET; | |
149 | ||
150 | fsm->param = 9600; | |
151 | ||
152 | break; | |
153 | ||
154 | case SIRDEV_STATE_DONGLE_CLOSE: | |
155 | /* shouldn't we just treat this as success=? */ | |
156 | if (dev->dongle_drv == NULL) { | |
157 | fsm->result = -EINVAL; | |
158 | next_state = SIRDEV_STATE_ERROR; | |
159 | break; | |
160 | } | |
161 | ||
162 | ret = sirdev_put_dongle(dev); | |
163 | if (ret) { | |
164 | fsm->result = ret; | |
165 | next_state = SIRDEV_STATE_ERROR; | |
166 | break; | |
167 | } | |
168 | next_state = SIRDEV_STATE_DONE; | |
169 | break; | |
170 | ||
171 | case SIRDEV_STATE_SET_DTR_RTS: | |
172 | ret = sirdev_set_dtr_rts(dev, | |
173 | (fsm->param&0x02) ? TRUE : FALSE, | |
174 | (fsm->param&0x01) ? TRUE : FALSE); | |
175 | next_state = SIRDEV_STATE_DONE; | |
176 | break; | |
177 | ||
178 | case SIRDEV_STATE_SET_SPEED: | |
179 | fsm->substate = SIRDEV_STATE_WAIT_XMIT; | |
180 | next_state = SIRDEV_STATE_DONGLE_CHECK; | |
181 | break; | |
182 | ||
183 | case SIRDEV_STATE_DONGLE_CHECK: | |
184 | ret = sirdev_tx_complete_fsm(dev); | |
185 | if (ret < 0) { | |
186 | fsm->result = ret; | |
187 | next_state = SIRDEV_STATE_ERROR; | |
188 | break; | |
189 | } | |
190 | if ((delay=ret) != 0) | |
191 | break; | |
192 | ||
193 | if (dev->dongle_drv) { | |
194 | fsm->substate = SIRDEV_STATE_DONGLE_RESET; | |
195 | next_state = SIRDEV_STATE_DONGLE_RESET; | |
196 | } | |
197 | else { | |
198 | dev->speed = fsm->param; | |
199 | next_state = SIRDEV_STATE_PORT_SPEED; | |
200 | } | |
201 | break; | |
202 | ||
203 | case SIRDEV_STATE_DONGLE_RESET: | |
204 | if (dev->dongle_drv->reset) { | |
205 | ret = dev->dongle_drv->reset(dev); | |
206 | if (ret < 0) { | |
207 | fsm->result = ret; | |
208 | next_state = SIRDEV_STATE_ERROR; | |
209 | break; | |
210 | } | |
211 | } | |
212 | else | |
213 | ret = 0; | |
214 | if ((delay=ret) == 0) { | |
215 | /* set serial port according to dongle default speed */ | |
216 | if (dev->drv->set_speed) | |
217 | dev->drv->set_speed(dev, dev->speed); | |
218 | fsm->substate = SIRDEV_STATE_DONGLE_SPEED; | |
219 | next_state = SIRDEV_STATE_DONGLE_SPEED; | |
220 | } | |
221 | break; | |
222 | ||
223 | case SIRDEV_STATE_DONGLE_SPEED: | |
224 | if (dev->dongle_drv->reset) { | |
225 | ret = dev->dongle_drv->set_speed(dev, fsm->param); | |
226 | if (ret < 0) { | |
227 | fsm->result = ret; | |
228 | next_state = SIRDEV_STATE_ERROR; | |
229 | break; | |
230 | } | |
231 | } | |
232 | else | |
233 | ret = 0; | |
234 | if ((delay=ret) == 0) | |
235 | next_state = SIRDEV_STATE_PORT_SPEED; | |
236 | break; | |
237 | ||
238 | case SIRDEV_STATE_PORT_SPEED: | |
239 | /* Finally we are ready to change the serial port speed */ | |
240 | if (dev->drv->set_speed) | |
241 | dev->drv->set_speed(dev, dev->speed); | |
242 | dev->new_speed = 0; | |
243 | next_state = SIRDEV_STATE_DONE; | |
244 | break; | |
245 | ||
246 | case SIRDEV_STATE_DONE: | |
247 | /* Signal network layer so it can send more frames */ | |
248 | netif_wake_queue(dev->netdev); | |
249 | next_state = SIRDEV_STATE_COMPLETE; | |
250 | break; | |
251 | ||
252 | default: | |
a97a6f10 | 253 | IRDA_ERROR("%s - undefined state\n", __func__); |
788252e6 CH |
254 | fsm->result = -EINVAL; |
255 | /* fall thru */ | |
256 | ||
257 | case SIRDEV_STATE_ERROR: | |
a97a6f10 | 258 | IRDA_ERROR("%s - error: %d\n", __func__, fsm->result); |
788252e6 CH |
259 | |
260 | #if 0 /* don't enable this before we have netdev->tx_timeout to recover */ | |
261 | netif_stop_queue(dev->netdev); | |
262 | #else | |
263 | netif_wake_queue(dev->netdev); | |
264 | #endif | |
265 | /* fall thru */ | |
266 | ||
267 | case SIRDEV_STATE_COMPLETE: | |
268 | /* config change finished, so we are not busy any longer */ | |
269 | sirdev_enable_rx(dev); | |
270 | up(&fsm->sem); | |
271 | return; | |
272 | } | |
273 | fsm->state = next_state; | |
274 | } while(!delay); | |
275 | ||
276 | queue_delayed_work(irda_sir_wq, &fsm->work, msecs_to_jiffies(delay)); | |
277 | } | |
278 | ||
279 | /* schedule some device configuration task for execution by kIrDAd | |
280 | * on behalf of the above state machine. | |
281 | * can be called from process or interrupt/tasklet context. | |
282 | */ | |
283 | ||
284 | int sirdev_schedule_request(struct sir_dev *dev, int initial_state, unsigned param) | |
285 | { | |
286 | struct sir_fsm *fsm = &dev->fsm; | |
287 | ||
a97a6f10 HH |
288 | IRDA_DEBUG(2, "%s - state=0x%04x / param=%u\n", __func__, |
289 | initial_state, param); | |
788252e6 CH |
290 | |
291 | if (down_trylock(&fsm->sem)) { | |
292 | if (in_interrupt() || in_atomic() || irqs_disabled()) { | |
a97a6f10 | 293 | IRDA_DEBUG(1, "%s(), state machine busy!\n", __func__); |
788252e6 CH |
294 | return -EWOULDBLOCK; |
295 | } else | |
296 | down(&fsm->sem); | |
297 | } | |
298 | ||
299 | if (fsm->state == SIRDEV_STATE_DEAD) { | |
300 | /* race with sirdev_close should never happen */ | |
a97a6f10 | 301 | IRDA_ERROR("%s(), instance staled!\n", __func__); |
788252e6 CH |
302 | up(&fsm->sem); |
303 | return -ESTALE; /* or better EPIPE? */ | |
304 | } | |
305 | ||
306 | netif_stop_queue(dev->netdev); | |
307 | atomic_set(&dev->enable_rx, 0); | |
308 | ||
309 | fsm->state = initial_state; | |
310 | fsm->param = param; | |
311 | fsm->result = 0; | |
312 | ||
c4028958 DH |
313 | INIT_DELAYED_WORK(&fsm->work, sirdev_config_fsm); |
314 | queue_delayed_work(irda_sir_wq, &fsm->work, 0); | |
788252e6 CH |
315 | return 0; |
316 | } | |
317 | ||
318 | ||
1da177e4 LT |
319 | /***************************************************************************/ |
320 | ||
321 | void sirdev_enable_rx(struct sir_dev *dev) | |
322 | { | |
323 | if (unlikely(atomic_read(&dev->enable_rx))) | |
324 | return; | |
325 | ||
326 | /* flush rx-buffer - should also help in case of problems with echo cancelation */ | |
327 | dev->rx_buff.data = dev->rx_buff.head; | |
328 | dev->rx_buff.len = 0; | |
329 | dev->rx_buff.in_frame = FALSE; | |
330 | dev->rx_buff.state = OUTSIDE_FRAME; | |
331 | atomic_set(&dev->enable_rx, 1); | |
332 | } | |
333 | ||
334 | static int sirdev_is_receiving(struct sir_dev *dev) | |
335 | { | |
336 | if (!atomic_read(&dev->enable_rx)) | |
337 | return 0; | |
338 | ||
807540ba | 339 | return dev->rx_buff.state != OUTSIDE_FRAME; |
1da177e4 LT |
340 | } |
341 | ||
342 | int sirdev_set_dongle(struct sir_dev *dev, IRDA_DONGLE type) | |
343 | { | |
344 | int err; | |
345 | ||
a97a6f10 | 346 | IRDA_DEBUG(3, "%s : requesting dongle %d.\n", __func__, type); |
1da177e4 LT |
347 | |
348 | err = sirdev_schedule_dongle_open(dev, type); | |
349 | if (unlikely(err)) | |
350 | return err; | |
351 | down(&dev->fsm.sem); /* block until config change completed */ | |
352 | err = dev->fsm.result; | |
353 | up(&dev->fsm.sem); | |
354 | return err; | |
355 | } | |
214ad784 | 356 | EXPORT_SYMBOL(sirdev_set_dongle); |
1da177e4 LT |
357 | |
358 | /* used by dongle drivers for dongle programming */ | |
359 | ||
360 | int sirdev_raw_write(struct sir_dev *dev, const char *buf, int len) | |
361 | { | |
362 | unsigned long flags; | |
363 | int ret; | |
364 | ||
365 | if (unlikely(len > dev->tx_buff.truesize)) | |
366 | return -ENOSPC; | |
367 | ||
368 | spin_lock_irqsave(&dev->tx_lock, flags); /* serialize with other tx operations */ | |
369 | while (dev->tx_buff.len > 0) { /* wait until tx idle */ | |
370 | spin_unlock_irqrestore(&dev->tx_lock, flags); | |
371 | msleep(10); | |
372 | spin_lock_irqsave(&dev->tx_lock, flags); | |
373 | } | |
374 | ||
375 | dev->tx_buff.data = dev->tx_buff.head; | |
376 | memcpy(dev->tx_buff.data, buf, len); | |
377 | dev->tx_buff.len = len; | |
378 | ||
379 | ret = dev->drv->do_write(dev, dev->tx_buff.data, dev->tx_buff.len); | |
380 | if (ret > 0) { | |
a97a6f10 | 381 | IRDA_DEBUG(3, "%s(), raw-tx started\n", __func__); |
1da177e4 LT |
382 | |
383 | dev->tx_buff.data += ret; | |
384 | dev->tx_buff.len -= ret; | |
385 | dev->raw_tx = 1; | |
386 | ret = len; /* all data is going to be sent */ | |
387 | } | |
388 | spin_unlock_irqrestore(&dev->tx_lock, flags); | |
389 | return ret; | |
390 | } | |
214ad784 | 391 | EXPORT_SYMBOL(sirdev_raw_write); |
1da177e4 LT |
392 | |
393 | /* seems some dongle drivers may need this */ | |
394 | ||
395 | int sirdev_raw_read(struct sir_dev *dev, char *buf, int len) | |
396 | { | |
397 | int count; | |
398 | ||
399 | if (atomic_read(&dev->enable_rx)) | |
400 | return -EIO; /* fail if we expect irda-frames */ | |
401 | ||
402 | count = (len < dev->rx_buff.len) ? len : dev->rx_buff.len; | |
403 | ||
404 | if (count > 0) { | |
405 | memcpy(buf, dev->rx_buff.data, count); | |
406 | dev->rx_buff.data += count; | |
407 | dev->rx_buff.len -= count; | |
408 | } | |
409 | ||
410 | /* remaining stuff gets flushed when re-enabling normal rx */ | |
411 | ||
412 | return count; | |
413 | } | |
214ad784 | 414 | EXPORT_SYMBOL(sirdev_raw_read); |
1da177e4 LT |
415 | |
416 | int sirdev_set_dtr_rts(struct sir_dev *dev, int dtr, int rts) | |
417 | { | |
418 | int ret = -ENXIO; | |
37e1370b | 419 | if (dev->drv->set_dtr_rts) |
1da177e4 LT |
420 | ret = dev->drv->set_dtr_rts(dev, dtr, rts); |
421 | return ret; | |
422 | } | |
214ad784 AB |
423 | EXPORT_SYMBOL(sirdev_set_dtr_rts); |
424 | ||
1da177e4 LT |
425 | /**********************************************************************/ |
426 | ||
427 | /* called from client driver - likely with bh-context - to indicate | |
428 | * it made some progress with transmission. Hence we send the next | |
429 | * chunk, if any, or complete the skb otherwise | |
430 | */ | |
431 | ||
432 | void sirdev_write_complete(struct sir_dev *dev) | |
433 | { | |
434 | unsigned long flags; | |
435 | struct sk_buff *skb; | |
436 | int actual = 0; | |
437 | int err; | |
438 | ||
439 | spin_lock_irqsave(&dev->tx_lock, flags); | |
440 | ||
441 | IRDA_DEBUG(3, "%s() - dev->tx_buff.len = %d\n", | |
a97a6f10 | 442 | __func__, dev->tx_buff.len); |
1da177e4 LT |
443 | |
444 | if (likely(dev->tx_buff.len > 0)) { | |
445 | /* Write data left in transmit buffer */ | |
446 | actual = dev->drv->do_write(dev, dev->tx_buff.data, dev->tx_buff.len); | |
447 | ||
448 | if (likely(actual>0)) { | |
449 | dev->tx_buff.data += actual; | |
450 | dev->tx_buff.len -= actual; | |
451 | } | |
452 | else if (unlikely(actual<0)) { | |
453 | /* could be dropped later when we have tx_timeout to recover */ | |
454 | IRDA_ERROR("%s: drv->do_write failed (%d)\n", | |
a97a6f10 | 455 | __func__, actual); |
1da177e4 LT |
456 | if ((skb=dev->tx_skb) != NULL) { |
457 | dev->tx_skb = NULL; | |
458 | dev_kfree_skb_any(skb); | |
af049081 SH |
459 | dev->netdev->stats.tx_errors++; |
460 | dev->netdev->stats.tx_dropped++; | |
1da177e4 LT |
461 | } |
462 | dev->tx_buff.len = 0; | |
463 | } | |
464 | if (dev->tx_buff.len > 0) | |
465 | goto done; /* more data to send later */ | |
466 | } | |
467 | ||
468 | if (unlikely(dev->raw_tx != 0)) { | |
469 | /* in raw mode we are just done now after the buffer was sent | |
470 | * completely. Since this was requested by some dongle driver | |
471 | * running under the control of the irda-thread we must take | |
472 | * care here not to re-enable the queue. The queue will be | |
473 | * restarted when the irda-thread has completed the request. | |
474 | */ | |
475 | ||
a97a6f10 | 476 | IRDA_DEBUG(3, "%s(), raw-tx done\n", __func__); |
1da177e4 LT |
477 | dev->raw_tx = 0; |
478 | goto done; /* no post-frame handling in raw mode */ | |
479 | } | |
480 | ||
481 | /* we have finished now sending this skb. | |
482 | * update statistics and free the skb. | |
483 | * finally we check and trigger a pending speed change, if any. | |
484 | * if not we switch to rx mode and wake the queue for further | |
485 | * packets. | |
486 | * note the scheduled speed request blocks until the lower | |
487 | * client driver and the corresponding hardware has really | |
488 | * finished sending all data (xmit fifo drained f.e.) | |
489 | * before the speed change gets finally done and the queue | |
490 | * re-activated. | |
491 | */ | |
492 | ||
a97a6f10 | 493 | IRDA_DEBUG(5, "%s(), finished with frame!\n", __func__); |
1da177e4 LT |
494 | |
495 | if ((skb=dev->tx_skb) != NULL) { | |
496 | dev->tx_skb = NULL; | |
af049081 SH |
497 | dev->netdev->stats.tx_packets++; |
498 | dev->netdev->stats.tx_bytes += skb->len; | |
1da177e4 LT |
499 | dev_kfree_skb_any(skb); |
500 | } | |
501 | ||
502 | if (unlikely(dev->new_speed > 0)) { | |
a97a6f10 | 503 | IRDA_DEBUG(5, "%s(), Changing speed!\n", __func__); |
1da177e4 LT |
504 | err = sirdev_schedule_speed(dev, dev->new_speed); |
505 | if (unlikely(err)) { | |
506 | /* should never happen | |
507 | * forget the speed change and hope the stack recovers | |
508 | */ | |
509 | IRDA_ERROR("%s - schedule speed change failed: %d\n", | |
a97a6f10 | 510 | __func__, err); |
1da177e4 LT |
511 | netif_wake_queue(dev->netdev); |
512 | } | |
513 | /* else: success | |
514 | * speed change in progress now | |
515 | * on completion dev->new_speed gets cleared, | |
516 | * rx-reenabled and the queue restarted | |
517 | */ | |
518 | } | |
519 | else { | |
520 | sirdev_enable_rx(dev); | |
521 | netif_wake_queue(dev->netdev); | |
522 | } | |
523 | ||
524 | done: | |
525 | spin_unlock_irqrestore(&dev->tx_lock, flags); | |
526 | } | |
214ad784 | 527 | EXPORT_SYMBOL(sirdev_write_complete); |
1da177e4 LT |
528 | |
529 | /* called from client driver - likely with bh-context - to give us | |
530 | * some more received bytes. We put them into the rx-buffer, | |
531 | * normally unwrapping and building LAP-skb's (unless rx disabled) | |
532 | */ | |
533 | ||
534 | int sirdev_receive(struct sir_dev *dev, const unsigned char *cp, size_t count) | |
535 | { | |
536 | if (!dev || !dev->netdev) { | |
a97a6f10 | 537 | IRDA_WARNING("%s(), not ready yet!\n", __func__); |
1da177e4 LT |
538 | return -1; |
539 | } | |
540 | ||
541 | if (!dev->irlap) { | |
542 | IRDA_WARNING("%s - too early: %p / %zd!\n", | |
a97a6f10 | 543 | __func__, cp, count); |
1da177e4 LT |
544 | return -1; |
545 | } | |
546 | ||
547 | if (cp==NULL) { | |
548 | /* error already at lower level receive | |
549 | * just update stats and set media busy | |
550 | */ | |
551 | irda_device_set_media_busy(dev->netdev, TRUE); | |
af049081 | 552 | dev->netdev->stats.rx_dropped++; |
a97a6f10 | 553 | IRDA_DEBUG(0, "%s; rx-drop: %zd\n", __func__, count); |
1da177e4 LT |
554 | return 0; |
555 | } | |
556 | ||
557 | /* Read the characters into the buffer */ | |
558 | if (likely(atomic_read(&dev->enable_rx))) { | |
559 | while (count--) | |
560 | /* Unwrap and destuff one byte */ | |
af049081 | 561 | async_unwrap_char(dev->netdev, &dev->netdev->stats, |
1da177e4 LT |
562 | &dev->rx_buff, *cp++); |
563 | } else { | |
564 | while (count--) { | |
565 | /* rx not enabled: save the raw bytes and never | |
566 | * trigger any netif_rx. The received bytes are flushed | |
567 | * later when we re-enable rx but might be read meanwhile | |
568 | * by the dongle driver. | |
569 | */ | |
570 | dev->rx_buff.data[dev->rx_buff.len++] = *cp++; | |
571 | ||
572 | /* What should we do when the buffer is full? */ | |
573 | if (unlikely(dev->rx_buff.len == dev->rx_buff.truesize)) | |
574 | dev->rx_buff.len = 0; | |
575 | } | |
576 | } | |
577 | ||
578 | return 0; | |
579 | } | |
214ad784 | 580 | EXPORT_SYMBOL(sirdev_receive); |
1da177e4 LT |
581 | |
582 | /**********************************************************************/ | |
583 | ||
584 | /* callbacks from network layer */ | |
585 | ||
6518bbb8 SH |
586 | static netdev_tx_t sirdev_hard_xmit(struct sk_buff *skb, |
587 | struct net_device *ndev) | |
1da177e4 | 588 | { |
4cf1653a | 589 | struct sir_dev *dev = netdev_priv(ndev); |
1da177e4 LT |
590 | unsigned long flags; |
591 | int actual = 0; | |
592 | int err; | |
593 | s32 speed; | |
594 | ||
ec634fe3 | 595 | IRDA_ASSERT(dev != NULL, return NETDEV_TX_OK;); |
1da177e4 LT |
596 | |
597 | netif_stop_queue(ndev); | |
598 | ||
a97a6f10 | 599 | IRDA_DEBUG(3, "%s(), skb->len = %d\n", __func__, skb->len); |
1da177e4 LT |
600 | |
601 | speed = irda_get_next_speed(skb); | |
602 | if ((speed != dev->speed) && (speed != -1)) { | |
603 | if (!skb->len) { | |
604 | err = sirdev_schedule_speed(dev, speed); | |
605 | if (unlikely(err == -EWOULDBLOCK)) { | |
606 | /* Failed to initiate the speed change, likely the fsm | |
607 | * is still busy (pretty unlikely, but...) | |
608 | * We refuse to accept the skb and return with the queue | |
609 | * stopped so the network layer will retry after the | |
610 | * fsm completes and wakes the queue. | |
611 | */ | |
5b548140 | 612 | return NETDEV_TX_BUSY; |
1da177e4 LT |
613 | } |
614 | else if (unlikely(err)) { | |
615 | /* other fatal error - forget the speed change and | |
616 | * hope the stack will recover somehow | |
617 | */ | |
618 | netif_start_queue(ndev); | |
619 | } | |
620 | /* else: success | |
621 | * speed change in progress now | |
622 | * on completion the queue gets restarted | |
623 | */ | |
624 | ||
625 | dev_kfree_skb_any(skb); | |
6ed10654 | 626 | return NETDEV_TX_OK; |
1da177e4 LT |
627 | } else |
628 | dev->new_speed = speed; | |
629 | } | |
630 | ||
631 | /* Init tx buffer*/ | |
632 | dev->tx_buff.data = dev->tx_buff.head; | |
633 | ||
634 | /* Check problems */ | |
635 | if(spin_is_locked(&dev->tx_lock)) { | |
a97a6f10 | 636 | IRDA_DEBUG(3, "%s(), write not completed\n", __func__); |
1da177e4 LT |
637 | } |
638 | ||
639 | /* serialize with write completion */ | |
640 | spin_lock_irqsave(&dev->tx_lock, flags); | |
641 | ||
642 | /* Copy skb to tx_buff while wrapping, stuffing and making CRC */ | |
643 | dev->tx_buff.len = async_wrap_skb(skb, dev->tx_buff.data, dev->tx_buff.truesize); | |
644 | ||
645 | /* transmission will start now - disable receive. | |
646 | * if we are just in the middle of an incoming frame, | |
647 | * treat it as collision. probably it's a good idea to | |
648 | * reset the rx_buf OUTSIDE_FRAME in this case too? | |
649 | */ | |
650 | atomic_set(&dev->enable_rx, 0); | |
651 | if (unlikely(sirdev_is_receiving(dev))) | |
af049081 | 652 | dev->netdev->stats.collisions++; |
1da177e4 LT |
653 | |
654 | actual = dev->drv->do_write(dev, dev->tx_buff.data, dev->tx_buff.len); | |
655 | ||
656 | if (likely(actual > 0)) { | |
657 | dev->tx_skb = skb; | |
1da177e4 LT |
658 | dev->tx_buff.data += actual; |
659 | dev->tx_buff.len -= actual; | |
660 | } | |
661 | else if (unlikely(actual < 0)) { | |
662 | /* could be dropped later when we have tx_timeout to recover */ | |
663 | IRDA_ERROR("%s: drv->do_write failed (%d)\n", | |
a97a6f10 | 664 | __func__, actual); |
1da177e4 | 665 | dev_kfree_skb_any(skb); |
af049081 SH |
666 | dev->netdev->stats.tx_errors++; |
667 | dev->netdev->stats.tx_dropped++; | |
1da177e4 LT |
668 | netif_wake_queue(ndev); |
669 | } | |
670 | spin_unlock_irqrestore(&dev->tx_lock, flags); | |
671 | ||
6ed10654 | 672 | return NETDEV_TX_OK; |
1da177e4 LT |
673 | } |
674 | ||
675 | /* called from network layer with rtnl hold */ | |
676 | ||
677 | static int sirdev_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd) | |
678 | { | |
679 | struct if_irda_req *irq = (struct if_irda_req *) rq; | |
4cf1653a | 680 | struct sir_dev *dev = netdev_priv(ndev); |
1da177e4 LT |
681 | int ret = 0; |
682 | ||
683 | IRDA_ASSERT(dev != NULL, return -1;); | |
684 | ||
a97a6f10 | 685 | IRDA_DEBUG(3, "%s(), %s, (cmd=0x%X)\n", __func__, ndev->name, cmd); |
1da177e4 LT |
686 | |
687 | switch (cmd) { | |
688 | case SIOCSBANDWIDTH: /* Set bandwidth */ | |
689 | if (!capable(CAP_NET_ADMIN)) | |
690 | ret = -EPERM; | |
691 | else | |
692 | ret = sirdev_schedule_speed(dev, irq->ifr_baudrate); | |
693 | /* cannot sleep here for completion | |
694 | * we are called from network layer with rtnl hold | |
695 | */ | |
696 | break; | |
697 | ||
698 | case SIOCSDONGLE: /* Set dongle */ | |
699 | if (!capable(CAP_NET_ADMIN)) | |
700 | ret = -EPERM; | |
701 | else | |
702 | ret = sirdev_schedule_dongle_open(dev, irq->ifr_dongle); | |
703 | /* cannot sleep here for completion | |
704 | * we are called from network layer with rtnl hold | |
705 | */ | |
706 | break; | |
707 | ||
708 | case SIOCSMEDIABUSY: /* Set media busy */ | |
709 | if (!capable(CAP_NET_ADMIN)) | |
710 | ret = -EPERM; | |
711 | else | |
712 | irda_device_set_media_busy(dev->netdev, TRUE); | |
713 | break; | |
714 | ||
715 | case SIOCGRECEIVING: /* Check if we are receiving right now */ | |
716 | irq->ifr_receiving = sirdev_is_receiving(dev); | |
717 | break; | |
718 | ||
719 | case SIOCSDTRRTS: | |
720 | if (!capable(CAP_NET_ADMIN)) | |
721 | ret = -EPERM; | |
722 | else | |
723 | ret = sirdev_schedule_dtr_rts(dev, irq->ifr_dtr, irq->ifr_rts); | |
724 | /* cannot sleep here for completion | |
725 | * we are called from network layer with rtnl hold | |
726 | */ | |
727 | break; | |
728 | ||
729 | case SIOCSMODE: | |
730 | #if 0 | |
731 | if (!capable(CAP_NET_ADMIN)) | |
732 | ret = -EPERM; | |
733 | else | |
734 | ret = sirdev_schedule_mode(dev, irq->ifr_mode); | |
735 | /* cannot sleep here for completion | |
736 | * we are called from network layer with rtnl hold | |
737 | */ | |
738 | break; | |
739 | #endif | |
740 | default: | |
741 | ret = -EOPNOTSUPP; | |
742 | } | |
743 | ||
744 | return ret; | |
745 | } | |
746 | ||
747 | /* ----------------------------------------------------------------------------- */ | |
748 | ||
749 | #define SIRBUF_ALLOCSIZE 4269 /* worst case size of a wrapped IrLAP frame */ | |
750 | ||
751 | static int sirdev_alloc_buffers(struct sir_dev *dev) | |
752 | { | |
753 | dev->tx_buff.truesize = SIRBUF_ALLOCSIZE; | |
754 | dev->rx_buff.truesize = IRDA_SKB_MAX_MTU; | |
755 | ||
756 | /* Bootstrap ZeroCopy Rx */ | |
e4e90b21 DM |
757 | dev->rx_buff.skb = __netdev_alloc_skb(dev->netdev, dev->rx_buff.truesize, |
758 | GFP_KERNEL); | |
1da177e4 LT |
759 | if (dev->rx_buff.skb == NULL) |
760 | return -ENOMEM; | |
761 | skb_reserve(dev->rx_buff.skb, 1); | |
762 | dev->rx_buff.head = dev->rx_buff.skb->data; | |
763 | ||
764 | dev->tx_buff.head = kmalloc(dev->tx_buff.truesize, GFP_KERNEL); | |
765 | if (dev->tx_buff.head == NULL) { | |
766 | kfree_skb(dev->rx_buff.skb); | |
767 | dev->rx_buff.skb = NULL; | |
768 | dev->rx_buff.head = NULL; | |
769 | return -ENOMEM; | |
770 | } | |
771 | ||
772 | dev->tx_buff.data = dev->tx_buff.head; | |
773 | dev->rx_buff.data = dev->rx_buff.head; | |
774 | dev->tx_buff.len = 0; | |
775 | dev->rx_buff.len = 0; | |
776 | ||
777 | dev->rx_buff.in_frame = FALSE; | |
778 | dev->rx_buff.state = OUTSIDE_FRAME; | |
779 | return 0; | |
780 | }; | |
781 | ||
782 | static void sirdev_free_buffers(struct sir_dev *dev) | |
783 | { | |
875b4829 | 784 | kfree_skb(dev->rx_buff.skb); |
b4558ea9 | 785 | kfree(dev->tx_buff.head); |
1da177e4 LT |
786 | dev->rx_buff.head = dev->tx_buff.head = NULL; |
787 | dev->rx_buff.skb = NULL; | |
788 | } | |
789 | ||
790 | static int sirdev_open(struct net_device *ndev) | |
791 | { | |
4cf1653a | 792 | struct sir_dev *dev = netdev_priv(ndev); |
1da177e4 LT |
793 | const struct sir_driver *drv = dev->drv; |
794 | ||
795 | if (!drv) | |
796 | return -ENODEV; | |
797 | ||
798 | /* increase the reference count of the driver module before doing serious stuff */ | |
799 | if (!try_module_get(drv->owner)) | |
800 | return -ESTALE; | |
801 | ||
a97a6f10 | 802 | IRDA_DEBUG(2, "%s()\n", __func__); |
1da177e4 LT |
803 | |
804 | if (sirdev_alloc_buffers(dev)) | |
805 | goto errout_dec; | |
806 | ||
807 | if (!dev->drv->start_dev || dev->drv->start_dev(dev)) | |
808 | goto errout_free; | |
809 | ||
810 | sirdev_enable_rx(dev); | |
811 | dev->raw_tx = 0; | |
812 | ||
813 | netif_start_queue(ndev); | |
814 | dev->irlap = irlap_open(ndev, &dev->qos, dev->hwname); | |
815 | if (!dev->irlap) | |
816 | goto errout_stop; | |
817 | ||
818 | netif_wake_queue(ndev); | |
819 | ||
a97a6f10 | 820 | IRDA_DEBUG(2, "%s - done, speed = %d\n", __func__, dev->speed); |
1da177e4 LT |
821 | |
822 | return 0; | |
823 | ||
824 | errout_stop: | |
825 | atomic_set(&dev->enable_rx, 0); | |
826 | if (dev->drv->stop_dev) | |
827 | dev->drv->stop_dev(dev); | |
828 | errout_free: | |
829 | sirdev_free_buffers(dev); | |
830 | errout_dec: | |
831 | module_put(drv->owner); | |
832 | return -EAGAIN; | |
833 | } | |
834 | ||
835 | static int sirdev_close(struct net_device *ndev) | |
836 | { | |
4cf1653a | 837 | struct sir_dev *dev = netdev_priv(ndev); |
1da177e4 LT |
838 | const struct sir_driver *drv; |
839 | ||
a97a6f10 | 840 | // IRDA_DEBUG(0, "%s\n", __func__); |
1da177e4 LT |
841 | |
842 | netif_stop_queue(ndev); | |
843 | ||
844 | down(&dev->fsm.sem); /* block on pending config completion */ | |
845 | ||
846 | atomic_set(&dev->enable_rx, 0); | |
847 | ||
848 | if (unlikely(!dev->irlap)) | |
849 | goto out; | |
850 | irlap_close(dev->irlap); | |
851 | dev->irlap = NULL; | |
852 | ||
853 | drv = dev->drv; | |
854 | if (unlikely(!drv || !dev->priv)) | |
855 | goto out; | |
856 | ||
857 | if (drv->stop_dev) | |
858 | drv->stop_dev(dev); | |
859 | ||
860 | sirdev_free_buffers(dev); | |
861 | module_put(drv->owner); | |
862 | ||
863 | out: | |
864 | dev->speed = 0; | |
865 | up(&dev->fsm.sem); | |
866 | return 0; | |
867 | } | |
868 | ||
2b023f46 SH |
869 | static const struct net_device_ops sirdev_ops = { |
870 | .ndo_start_xmit = sirdev_hard_xmit, | |
871 | .ndo_open = sirdev_open, | |
872 | .ndo_stop = sirdev_close, | |
873 | .ndo_do_ioctl = sirdev_ioctl, | |
874 | }; | |
1da177e4 LT |
875 | /* ----------------------------------------------------------------------------- */ |
876 | ||
877 | struct sir_dev * sirdev_get_instance(const struct sir_driver *drv, const char *name) | |
878 | { | |
879 | struct net_device *ndev; | |
880 | struct sir_dev *dev; | |
881 | ||
a97a6f10 | 882 | IRDA_DEBUG(0, "%s - %s\n", __func__, name); |
1da177e4 LT |
883 | |
884 | /* instead of adding tests to protect against drv->do_write==NULL | |
885 | * at several places we refuse to create a sir_dev instance for | |
886 | * drivers which don't implement do_write. | |
887 | */ | |
888 | if (!drv || !drv->do_write) | |
889 | return NULL; | |
890 | ||
891 | /* | |
892 | * Allocate new instance of the device | |
893 | */ | |
894 | ndev = alloc_irdadev(sizeof(*dev)); | |
895 | if (ndev == NULL) { | |
a97a6f10 | 896 | IRDA_ERROR("%s - Can't allocate memory for IrDA control block!\n", __func__); |
1da177e4 LT |
897 | goto out; |
898 | } | |
4cf1653a | 899 | dev = netdev_priv(ndev); |
1da177e4 LT |
900 | |
901 | irda_init_max_qos_capabilies(&dev->qos); | |
902 | dev->qos.baud_rate.bits = IR_9600|IR_19200|IR_38400|IR_57600|IR_115200; | |
903 | dev->qos.min_turn_time.bits = drv->qos_mtt_bits; | |
904 | irda_qos_bits_to_value(&dev->qos); | |
905 | ||
906 | strncpy(dev->hwname, name, sizeof(dev->hwname)-1); | |
907 | ||
908 | atomic_set(&dev->enable_rx, 0); | |
909 | dev->tx_skb = NULL; | |
910 | ||
911 | spin_lock_init(&dev->tx_lock); | |
912 | init_MUTEX(&dev->fsm.sem); | |
913 | ||
1da177e4 LT |
914 | dev->drv = drv; |
915 | dev->netdev = ndev; | |
916 | ||
1da177e4 | 917 | /* Override the network functions we need to use */ |
2b023f46 | 918 | ndev->netdev_ops = &sirdev_ops; |
1da177e4 LT |
919 | |
920 | if (register_netdev(ndev)) { | |
a97a6f10 | 921 | IRDA_ERROR("%s(), register_netdev() failed!\n", __func__); |
1da177e4 LT |
922 | goto out_freenetdev; |
923 | } | |
924 | ||
925 | return dev; | |
926 | ||
927 | out_freenetdev: | |
928 | free_netdev(ndev); | |
929 | out: | |
930 | return NULL; | |
931 | } | |
214ad784 | 932 | EXPORT_SYMBOL(sirdev_get_instance); |
1da177e4 LT |
933 | |
934 | int sirdev_put_instance(struct sir_dev *dev) | |
935 | { | |
936 | int err = 0; | |
937 | ||
a97a6f10 | 938 | IRDA_DEBUG(0, "%s\n", __func__); |
1da177e4 LT |
939 | |
940 | atomic_set(&dev->enable_rx, 0); | |
941 | ||
942 | netif_carrier_off(dev->netdev); | |
943 | netif_device_detach(dev->netdev); | |
944 | ||
945 | if (dev->dongle_drv) | |
946 | err = sirdev_schedule_dongle_close(dev); | |
947 | if (err) | |
a97a6f10 | 948 | IRDA_ERROR("%s - error %d\n", __func__, err); |
1da177e4 LT |
949 | |
950 | sirdev_close(dev->netdev); | |
951 | ||
952 | down(&dev->fsm.sem); | |
953 | dev->fsm.state = SIRDEV_STATE_DEAD; /* mark staled */ | |
954 | dev->dongle_drv = NULL; | |
955 | dev->priv = NULL; | |
956 | up(&dev->fsm.sem); | |
957 | ||
958 | /* Remove netdevice */ | |
959 | unregister_netdev(dev->netdev); | |
960 | ||
961 | free_netdev(dev->netdev); | |
962 | ||
963 | return 0; | |
964 | } | |
214ad784 | 965 | EXPORT_SYMBOL(sirdev_put_instance); |
1da177e4 | 966 | |
788252e6 CH |
967 | static int __init sir_wq_init(void) |
968 | { | |
969 | irda_sir_wq = create_singlethread_workqueue("irda_sir_wq"); | |
970 | if (!irda_sir_wq) | |
971 | return -ENOMEM; | |
972 | return 0; | |
973 | } | |
974 | ||
975 | static void __exit sir_wq_exit(void) | |
976 | { | |
977 | destroy_workqueue(irda_sir_wq); | |
978 | } | |
979 | ||
980 | module_init(sir_wq_init); | |
981 | module_exit(sir_wq_exit); | |
982 | ||
983 | MODULE_AUTHOR("Martin Diehl <info@mdiehl.de>"); | |
984 | MODULE_DESCRIPTION("IrDA SIR core"); | |
985 | MODULE_LICENSE("GPL"); |