Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /********************************************************************* |
2 | * | |
3 | * sir_dev.c: irda sir network device | |
4 | * | |
5 | * Copyright (c) 2002 Martin Diehl | |
6 | * | |
7 | * This program is free software; you can redistribute it and/or | |
8 | * modify it under the terms of the GNU General Public License as | |
9 | * published by the Free Software Foundation; either version 2 of | |
10 | * the License, or (at your option) any later version. | |
11 | * | |
12 | ********************************************************************/ | |
13 | ||
14 | #include <linux/module.h> | |
15 | #include <linux/kernel.h> | |
16 | #include <linux/init.h> | |
1da177e4 LT |
17 | #include <linux/delay.h> |
18 | ||
19 | #include <net/irda/irda.h> | |
20 | #include <net/irda/wrapper.h> | |
21 | #include <net/irda/irda_device.h> | |
22 | ||
23 | #include "sir-dev.h" | |
24 | ||
788252e6 CH |
25 | |
26 | static struct workqueue_struct *irda_sir_wq; | |
27 | ||
28 | /* STATE MACHINE */ | |
29 | ||
30 | /* substate handler of the config-fsm to handle the cases where we want | |
31 | * to wait for transmit completion before changing the port configuration | |
32 | */ | |
33 | ||
34 | static int sirdev_tx_complete_fsm(struct sir_dev *dev) | |
35 | { | |
36 | struct sir_fsm *fsm = &dev->fsm; | |
37 | unsigned next_state, delay; | |
38 | unsigned bytes_left; | |
39 | ||
40 | do { | |
41 | next_state = fsm->substate; /* default: stay in current substate */ | |
42 | delay = 0; | |
43 | ||
44 | switch(fsm->substate) { | |
45 | ||
46 | case SIRDEV_STATE_WAIT_XMIT: | |
47 | if (dev->drv->chars_in_buffer) | |
48 | bytes_left = dev->drv->chars_in_buffer(dev); | |
49 | else | |
50 | bytes_left = 0; | |
51 | if (!bytes_left) { | |
52 | next_state = SIRDEV_STATE_WAIT_UNTIL_SENT; | |
53 | break; | |
54 | } | |
55 | ||
56 | if (dev->speed > 115200) | |
57 | delay = (bytes_left*8*10000) / (dev->speed/100); | |
58 | else if (dev->speed > 0) | |
59 | delay = (bytes_left*10*10000) / (dev->speed/100); | |
60 | else | |
61 | delay = 0; | |
62 | /* expected delay (usec) until remaining bytes are sent */ | |
63 | if (delay < 100) { | |
64 | udelay(delay); | |
65 | delay = 0; | |
66 | break; | |
67 | } | |
68 | /* sleep some longer delay (msec) */ | |
69 | delay = (delay+999) / 1000; | |
70 | break; | |
71 | ||
72 | case SIRDEV_STATE_WAIT_UNTIL_SENT: | |
73 | /* block until underlaying hardware buffer are empty */ | |
74 | if (dev->drv->wait_until_sent) | |
75 | dev->drv->wait_until_sent(dev); | |
76 | next_state = SIRDEV_STATE_TX_DONE; | |
77 | break; | |
78 | ||
79 | case SIRDEV_STATE_TX_DONE: | |
80 | return 0; | |
81 | ||
82 | default: | |
83 | IRDA_ERROR("%s - undefined state\n", __FUNCTION__); | |
84 | return -EINVAL; | |
85 | } | |
86 | fsm->substate = next_state; | |
87 | } while (delay == 0); | |
88 | return delay; | |
89 | } | |
90 | ||
91 | /* | |
92 | * Function sirdev_config_fsm | |
93 | * | |
94 | * State machine to handle the configuration of the device (and attached dongle, if any). | |
95 | * This handler is scheduled for execution in kIrDAd context, so we can sleep. | |
96 | * however, kIrDAd is shared by all sir_dev devices so we better don't sleep there too | |
97 | * long. Instead, for longer delays we start a timer to reschedule us later. | |
98 | * On entry, fsm->sem is always locked and the netdev xmit queue stopped. | |
99 | * Both must be unlocked/restarted on completion - but only on final exit. | |
100 | */ | |
101 | ||
c4028958 | 102 | static void sirdev_config_fsm(struct work_struct *work) |
788252e6 | 103 | { |
c4028958 | 104 | struct sir_dev *dev = container_of(work, struct sir_dev, fsm.work.work); |
788252e6 CH |
105 | struct sir_fsm *fsm = &dev->fsm; |
106 | int next_state; | |
107 | int ret = -1; | |
108 | unsigned delay; | |
109 | ||
110 | IRDA_DEBUG(2, "%s(), <%ld>\n", __FUNCTION__, jiffies); | |
111 | ||
112 | do { | |
113 | IRDA_DEBUG(3, "%s - state=0x%04x / substate=0x%04x\n", | |
114 | __FUNCTION__, fsm->state, fsm->substate); | |
115 | ||
116 | next_state = fsm->state; | |
117 | delay = 0; | |
118 | ||
119 | switch(fsm->state) { | |
120 | ||
121 | case SIRDEV_STATE_DONGLE_OPEN: | |
122 | if (dev->dongle_drv != NULL) { | |
123 | ret = sirdev_put_dongle(dev); | |
124 | if (ret) { | |
125 | fsm->result = -EINVAL; | |
126 | next_state = SIRDEV_STATE_ERROR; | |
127 | break; | |
128 | } | |
129 | } | |
130 | ||
131 | /* Initialize dongle */ | |
132 | ret = sirdev_get_dongle(dev, fsm->param); | |
133 | if (ret) { | |
134 | fsm->result = ret; | |
135 | next_state = SIRDEV_STATE_ERROR; | |
136 | break; | |
137 | } | |
138 | ||
139 | /* Dongles are powered through the modem control lines which | |
140 | * were just set during open. Before resetting, let's wait for | |
141 | * the power to stabilize. This is what some dongle drivers did | |
142 | * in open before, while others didn't - should be safe anyway. | |
143 | */ | |
144 | ||
145 | delay = 50; | |
146 | fsm->substate = SIRDEV_STATE_DONGLE_RESET; | |
147 | next_state = SIRDEV_STATE_DONGLE_RESET; | |
148 | ||
149 | fsm->param = 9600; | |
150 | ||
151 | break; | |
152 | ||
153 | case SIRDEV_STATE_DONGLE_CLOSE: | |
154 | /* shouldn't we just treat this as success=? */ | |
155 | if (dev->dongle_drv == NULL) { | |
156 | fsm->result = -EINVAL; | |
157 | next_state = SIRDEV_STATE_ERROR; | |
158 | break; | |
159 | } | |
160 | ||
161 | ret = sirdev_put_dongle(dev); | |
162 | if (ret) { | |
163 | fsm->result = ret; | |
164 | next_state = SIRDEV_STATE_ERROR; | |
165 | break; | |
166 | } | |
167 | next_state = SIRDEV_STATE_DONE; | |
168 | break; | |
169 | ||
170 | case SIRDEV_STATE_SET_DTR_RTS: | |
171 | ret = sirdev_set_dtr_rts(dev, | |
172 | (fsm->param&0x02) ? TRUE : FALSE, | |
173 | (fsm->param&0x01) ? TRUE : FALSE); | |
174 | next_state = SIRDEV_STATE_DONE; | |
175 | break; | |
176 | ||
177 | case SIRDEV_STATE_SET_SPEED: | |
178 | fsm->substate = SIRDEV_STATE_WAIT_XMIT; | |
179 | next_state = SIRDEV_STATE_DONGLE_CHECK; | |
180 | break; | |
181 | ||
182 | case SIRDEV_STATE_DONGLE_CHECK: | |
183 | ret = sirdev_tx_complete_fsm(dev); | |
184 | if (ret < 0) { | |
185 | fsm->result = ret; | |
186 | next_state = SIRDEV_STATE_ERROR; | |
187 | break; | |
188 | } | |
189 | if ((delay=ret) != 0) | |
190 | break; | |
191 | ||
192 | if (dev->dongle_drv) { | |
193 | fsm->substate = SIRDEV_STATE_DONGLE_RESET; | |
194 | next_state = SIRDEV_STATE_DONGLE_RESET; | |
195 | } | |
196 | else { | |
197 | dev->speed = fsm->param; | |
198 | next_state = SIRDEV_STATE_PORT_SPEED; | |
199 | } | |
200 | break; | |
201 | ||
202 | case SIRDEV_STATE_DONGLE_RESET: | |
203 | if (dev->dongle_drv->reset) { | |
204 | ret = dev->dongle_drv->reset(dev); | |
205 | if (ret < 0) { | |
206 | fsm->result = ret; | |
207 | next_state = SIRDEV_STATE_ERROR; | |
208 | break; | |
209 | } | |
210 | } | |
211 | else | |
212 | ret = 0; | |
213 | if ((delay=ret) == 0) { | |
214 | /* set serial port according to dongle default speed */ | |
215 | if (dev->drv->set_speed) | |
216 | dev->drv->set_speed(dev, dev->speed); | |
217 | fsm->substate = SIRDEV_STATE_DONGLE_SPEED; | |
218 | next_state = SIRDEV_STATE_DONGLE_SPEED; | |
219 | } | |
220 | break; | |
221 | ||
222 | case SIRDEV_STATE_DONGLE_SPEED: | |
223 | if (dev->dongle_drv->reset) { | |
224 | ret = dev->dongle_drv->set_speed(dev, fsm->param); | |
225 | if (ret < 0) { | |
226 | fsm->result = ret; | |
227 | next_state = SIRDEV_STATE_ERROR; | |
228 | break; | |
229 | } | |
230 | } | |
231 | else | |
232 | ret = 0; | |
233 | if ((delay=ret) == 0) | |
234 | next_state = SIRDEV_STATE_PORT_SPEED; | |
235 | break; | |
236 | ||
237 | case SIRDEV_STATE_PORT_SPEED: | |
238 | /* Finally we are ready to change the serial port speed */ | |
239 | if (dev->drv->set_speed) | |
240 | dev->drv->set_speed(dev, dev->speed); | |
241 | dev->new_speed = 0; | |
242 | next_state = SIRDEV_STATE_DONE; | |
243 | break; | |
244 | ||
245 | case SIRDEV_STATE_DONE: | |
246 | /* Signal network layer so it can send more frames */ | |
247 | netif_wake_queue(dev->netdev); | |
248 | next_state = SIRDEV_STATE_COMPLETE; | |
249 | break; | |
250 | ||
251 | default: | |
252 | IRDA_ERROR("%s - undefined state\n", __FUNCTION__); | |
253 | fsm->result = -EINVAL; | |
254 | /* fall thru */ | |
255 | ||
256 | case SIRDEV_STATE_ERROR: | |
257 | IRDA_ERROR("%s - error: %d\n", __FUNCTION__, fsm->result); | |
258 | ||
259 | #if 0 /* don't enable this before we have netdev->tx_timeout to recover */ | |
260 | netif_stop_queue(dev->netdev); | |
261 | #else | |
262 | netif_wake_queue(dev->netdev); | |
263 | #endif | |
264 | /* fall thru */ | |
265 | ||
266 | case SIRDEV_STATE_COMPLETE: | |
267 | /* config change finished, so we are not busy any longer */ | |
268 | sirdev_enable_rx(dev); | |
269 | up(&fsm->sem); | |
270 | return; | |
271 | } | |
272 | fsm->state = next_state; | |
273 | } while(!delay); | |
274 | ||
275 | queue_delayed_work(irda_sir_wq, &fsm->work, msecs_to_jiffies(delay)); | |
276 | } | |
277 | ||
278 | /* schedule some device configuration task for execution by kIrDAd | |
279 | * on behalf of the above state machine. | |
280 | * can be called from process or interrupt/tasklet context. | |
281 | */ | |
282 | ||
283 | int sirdev_schedule_request(struct sir_dev *dev, int initial_state, unsigned param) | |
284 | { | |
285 | struct sir_fsm *fsm = &dev->fsm; | |
286 | ||
287 | IRDA_DEBUG(2, "%s - state=0x%04x / param=%u\n", __FUNCTION__, initial_state, param); | |
288 | ||
289 | if (down_trylock(&fsm->sem)) { | |
290 | if (in_interrupt() || in_atomic() || irqs_disabled()) { | |
291 | IRDA_DEBUG(1, "%s(), state machine busy!\n", __FUNCTION__); | |
292 | return -EWOULDBLOCK; | |
293 | } else | |
294 | down(&fsm->sem); | |
295 | } | |
296 | ||
297 | if (fsm->state == SIRDEV_STATE_DEAD) { | |
298 | /* race with sirdev_close should never happen */ | |
299 | IRDA_ERROR("%s(), instance staled!\n", __FUNCTION__); | |
300 | up(&fsm->sem); | |
301 | return -ESTALE; /* or better EPIPE? */ | |
302 | } | |
303 | ||
304 | netif_stop_queue(dev->netdev); | |
305 | atomic_set(&dev->enable_rx, 0); | |
306 | ||
307 | fsm->state = initial_state; | |
308 | fsm->param = param; | |
309 | fsm->result = 0; | |
310 | ||
c4028958 DH |
311 | INIT_DELAYED_WORK(&fsm->work, sirdev_config_fsm); |
312 | queue_delayed_work(irda_sir_wq, &fsm->work, 0); | |
788252e6 CH |
313 | return 0; |
314 | } | |
315 | ||
316 | ||
1da177e4 LT |
317 | /***************************************************************************/ |
318 | ||
319 | void sirdev_enable_rx(struct sir_dev *dev) | |
320 | { | |
321 | if (unlikely(atomic_read(&dev->enable_rx))) | |
322 | return; | |
323 | ||
324 | /* flush rx-buffer - should also help in case of problems with echo cancelation */ | |
325 | dev->rx_buff.data = dev->rx_buff.head; | |
326 | dev->rx_buff.len = 0; | |
327 | dev->rx_buff.in_frame = FALSE; | |
328 | dev->rx_buff.state = OUTSIDE_FRAME; | |
329 | atomic_set(&dev->enable_rx, 1); | |
330 | } | |
331 | ||
332 | static int sirdev_is_receiving(struct sir_dev *dev) | |
333 | { | |
334 | if (!atomic_read(&dev->enable_rx)) | |
335 | return 0; | |
336 | ||
337 | return (dev->rx_buff.state != OUTSIDE_FRAME); | |
338 | } | |
339 | ||
340 | int sirdev_set_dongle(struct sir_dev *dev, IRDA_DONGLE type) | |
341 | { | |
342 | int err; | |
343 | ||
344 | IRDA_DEBUG(3, "%s : requesting dongle %d.\n", __FUNCTION__, type); | |
345 | ||
346 | err = sirdev_schedule_dongle_open(dev, type); | |
347 | if (unlikely(err)) | |
348 | return err; | |
349 | down(&dev->fsm.sem); /* block until config change completed */ | |
350 | err = dev->fsm.result; | |
351 | up(&dev->fsm.sem); | |
352 | return err; | |
353 | } | |
214ad784 | 354 | EXPORT_SYMBOL(sirdev_set_dongle); |
1da177e4 LT |
355 | |
356 | /* used by dongle drivers for dongle programming */ | |
357 | ||
358 | int sirdev_raw_write(struct sir_dev *dev, const char *buf, int len) | |
359 | { | |
360 | unsigned long flags; | |
361 | int ret; | |
362 | ||
363 | if (unlikely(len > dev->tx_buff.truesize)) | |
364 | return -ENOSPC; | |
365 | ||
366 | spin_lock_irqsave(&dev->tx_lock, flags); /* serialize with other tx operations */ | |
367 | while (dev->tx_buff.len > 0) { /* wait until tx idle */ | |
368 | spin_unlock_irqrestore(&dev->tx_lock, flags); | |
369 | msleep(10); | |
370 | spin_lock_irqsave(&dev->tx_lock, flags); | |
371 | } | |
372 | ||
373 | dev->tx_buff.data = dev->tx_buff.head; | |
374 | memcpy(dev->tx_buff.data, buf, len); | |
375 | dev->tx_buff.len = len; | |
376 | ||
377 | ret = dev->drv->do_write(dev, dev->tx_buff.data, dev->tx_buff.len); | |
378 | if (ret > 0) { | |
379 | IRDA_DEBUG(3, "%s(), raw-tx started\n", __FUNCTION__); | |
380 | ||
381 | dev->tx_buff.data += ret; | |
382 | dev->tx_buff.len -= ret; | |
383 | dev->raw_tx = 1; | |
384 | ret = len; /* all data is going to be sent */ | |
385 | } | |
386 | spin_unlock_irqrestore(&dev->tx_lock, flags); | |
387 | return ret; | |
388 | } | |
214ad784 | 389 | EXPORT_SYMBOL(sirdev_raw_write); |
1da177e4 LT |
390 | |
391 | /* seems some dongle drivers may need this */ | |
392 | ||
393 | int sirdev_raw_read(struct sir_dev *dev, char *buf, int len) | |
394 | { | |
395 | int count; | |
396 | ||
397 | if (atomic_read(&dev->enable_rx)) | |
398 | return -EIO; /* fail if we expect irda-frames */ | |
399 | ||
400 | count = (len < dev->rx_buff.len) ? len : dev->rx_buff.len; | |
401 | ||
402 | if (count > 0) { | |
403 | memcpy(buf, dev->rx_buff.data, count); | |
404 | dev->rx_buff.data += count; | |
405 | dev->rx_buff.len -= count; | |
406 | } | |
407 | ||
408 | /* remaining stuff gets flushed when re-enabling normal rx */ | |
409 | ||
410 | return count; | |
411 | } | |
214ad784 | 412 | EXPORT_SYMBOL(sirdev_raw_read); |
1da177e4 LT |
413 | |
414 | int sirdev_set_dtr_rts(struct sir_dev *dev, int dtr, int rts) | |
415 | { | |
416 | int ret = -ENXIO; | |
37e1370b | 417 | if (dev->drv->set_dtr_rts) |
1da177e4 LT |
418 | ret = dev->drv->set_dtr_rts(dev, dtr, rts); |
419 | return ret; | |
420 | } | |
214ad784 AB |
421 | EXPORT_SYMBOL(sirdev_set_dtr_rts); |
422 | ||
1da177e4 LT |
423 | /**********************************************************************/ |
424 | ||
425 | /* called from client driver - likely with bh-context - to indicate | |
426 | * it made some progress with transmission. Hence we send the next | |
427 | * chunk, if any, or complete the skb otherwise | |
428 | */ | |
429 | ||
430 | void sirdev_write_complete(struct sir_dev *dev) | |
431 | { | |
432 | unsigned long flags; | |
433 | struct sk_buff *skb; | |
434 | int actual = 0; | |
435 | int err; | |
436 | ||
437 | spin_lock_irqsave(&dev->tx_lock, flags); | |
438 | ||
439 | IRDA_DEBUG(3, "%s() - dev->tx_buff.len = %d\n", | |
440 | __FUNCTION__, dev->tx_buff.len); | |
441 | ||
442 | if (likely(dev->tx_buff.len > 0)) { | |
443 | /* Write data left in transmit buffer */ | |
444 | actual = dev->drv->do_write(dev, dev->tx_buff.data, dev->tx_buff.len); | |
445 | ||
446 | if (likely(actual>0)) { | |
447 | dev->tx_buff.data += actual; | |
448 | dev->tx_buff.len -= actual; | |
449 | } | |
450 | else if (unlikely(actual<0)) { | |
451 | /* could be dropped later when we have tx_timeout to recover */ | |
452 | IRDA_ERROR("%s: drv->do_write failed (%d)\n", | |
453 | __FUNCTION__, actual); | |
454 | if ((skb=dev->tx_skb) != NULL) { | |
455 | dev->tx_skb = NULL; | |
456 | dev_kfree_skb_any(skb); | |
457 | dev->stats.tx_errors++; | |
458 | dev->stats.tx_dropped++; | |
459 | } | |
460 | dev->tx_buff.len = 0; | |
461 | } | |
462 | if (dev->tx_buff.len > 0) | |
463 | goto done; /* more data to send later */ | |
464 | } | |
465 | ||
466 | if (unlikely(dev->raw_tx != 0)) { | |
467 | /* in raw mode we are just done now after the buffer was sent | |
468 | * completely. Since this was requested by some dongle driver | |
469 | * running under the control of the irda-thread we must take | |
470 | * care here not to re-enable the queue. The queue will be | |
471 | * restarted when the irda-thread has completed the request. | |
472 | */ | |
473 | ||
474 | IRDA_DEBUG(3, "%s(), raw-tx done\n", __FUNCTION__); | |
475 | dev->raw_tx = 0; | |
476 | goto done; /* no post-frame handling in raw mode */ | |
477 | } | |
478 | ||
479 | /* we have finished now sending this skb. | |
480 | * update statistics and free the skb. | |
481 | * finally we check and trigger a pending speed change, if any. | |
482 | * if not we switch to rx mode and wake the queue for further | |
483 | * packets. | |
484 | * note the scheduled speed request blocks until the lower | |
485 | * client driver and the corresponding hardware has really | |
486 | * finished sending all data (xmit fifo drained f.e.) | |
487 | * before the speed change gets finally done and the queue | |
488 | * re-activated. | |
489 | */ | |
490 | ||
491 | IRDA_DEBUG(5, "%s(), finished with frame!\n", __FUNCTION__); | |
492 | ||
493 | if ((skb=dev->tx_skb) != NULL) { | |
494 | dev->tx_skb = NULL; | |
495 | dev->stats.tx_packets++; | |
496 | dev->stats.tx_bytes += skb->len; | |
497 | dev_kfree_skb_any(skb); | |
498 | } | |
499 | ||
500 | if (unlikely(dev->new_speed > 0)) { | |
501 | IRDA_DEBUG(5, "%s(), Changing speed!\n", __FUNCTION__); | |
502 | err = sirdev_schedule_speed(dev, dev->new_speed); | |
503 | if (unlikely(err)) { | |
504 | /* should never happen | |
505 | * forget the speed change and hope the stack recovers | |
506 | */ | |
507 | IRDA_ERROR("%s - schedule speed change failed: %d\n", | |
508 | __FUNCTION__, err); | |
509 | netif_wake_queue(dev->netdev); | |
510 | } | |
511 | /* else: success | |
512 | * speed change in progress now | |
513 | * on completion dev->new_speed gets cleared, | |
514 | * rx-reenabled and the queue restarted | |
515 | */ | |
516 | } | |
517 | else { | |
518 | sirdev_enable_rx(dev); | |
519 | netif_wake_queue(dev->netdev); | |
520 | } | |
521 | ||
522 | done: | |
523 | spin_unlock_irqrestore(&dev->tx_lock, flags); | |
524 | } | |
214ad784 | 525 | EXPORT_SYMBOL(sirdev_write_complete); |
1da177e4 LT |
526 | |
527 | /* called from client driver - likely with bh-context - to give us | |
528 | * some more received bytes. We put them into the rx-buffer, | |
529 | * normally unwrapping and building LAP-skb's (unless rx disabled) | |
530 | */ | |
531 | ||
532 | int sirdev_receive(struct sir_dev *dev, const unsigned char *cp, size_t count) | |
533 | { | |
534 | if (!dev || !dev->netdev) { | |
535 | IRDA_WARNING("%s(), not ready yet!\n", __FUNCTION__); | |
536 | return -1; | |
537 | } | |
538 | ||
539 | if (!dev->irlap) { | |
540 | IRDA_WARNING("%s - too early: %p / %zd!\n", | |
541 | __FUNCTION__, cp, count); | |
542 | return -1; | |
543 | } | |
544 | ||
545 | if (cp==NULL) { | |
546 | /* error already at lower level receive | |
547 | * just update stats and set media busy | |
548 | */ | |
549 | irda_device_set_media_busy(dev->netdev, TRUE); | |
550 | dev->stats.rx_dropped++; | |
551 | IRDA_DEBUG(0, "%s; rx-drop: %zd\n", __FUNCTION__, count); | |
552 | return 0; | |
553 | } | |
554 | ||
555 | /* Read the characters into the buffer */ | |
556 | if (likely(atomic_read(&dev->enable_rx))) { | |
557 | while (count--) | |
558 | /* Unwrap and destuff one byte */ | |
559 | async_unwrap_char(dev->netdev, &dev->stats, | |
560 | &dev->rx_buff, *cp++); | |
561 | } else { | |
562 | while (count--) { | |
563 | /* rx not enabled: save the raw bytes and never | |
564 | * trigger any netif_rx. The received bytes are flushed | |
565 | * later when we re-enable rx but might be read meanwhile | |
566 | * by the dongle driver. | |
567 | */ | |
568 | dev->rx_buff.data[dev->rx_buff.len++] = *cp++; | |
569 | ||
570 | /* What should we do when the buffer is full? */ | |
571 | if (unlikely(dev->rx_buff.len == dev->rx_buff.truesize)) | |
572 | dev->rx_buff.len = 0; | |
573 | } | |
574 | } | |
575 | ||
576 | return 0; | |
577 | } | |
214ad784 | 578 | EXPORT_SYMBOL(sirdev_receive); |
1da177e4 LT |
579 | |
580 | /**********************************************************************/ | |
581 | ||
582 | /* callbacks from network layer */ | |
583 | ||
584 | static struct net_device_stats *sirdev_get_stats(struct net_device *ndev) | |
585 | { | |
586 | struct sir_dev *dev = ndev->priv; | |
587 | ||
588 | return (dev) ? &dev->stats : NULL; | |
589 | } | |
590 | ||
591 | static int sirdev_hard_xmit(struct sk_buff *skb, struct net_device *ndev) | |
592 | { | |
593 | struct sir_dev *dev = ndev->priv; | |
594 | unsigned long flags; | |
595 | int actual = 0; | |
596 | int err; | |
597 | s32 speed; | |
598 | ||
599 | IRDA_ASSERT(dev != NULL, return 0;); | |
600 | ||
601 | netif_stop_queue(ndev); | |
602 | ||
603 | IRDA_DEBUG(3, "%s(), skb->len = %d\n", __FUNCTION__, skb->len); | |
604 | ||
605 | speed = irda_get_next_speed(skb); | |
606 | if ((speed != dev->speed) && (speed != -1)) { | |
607 | if (!skb->len) { | |
608 | err = sirdev_schedule_speed(dev, speed); | |
609 | if (unlikely(err == -EWOULDBLOCK)) { | |
610 | /* Failed to initiate the speed change, likely the fsm | |
611 | * is still busy (pretty unlikely, but...) | |
612 | * We refuse to accept the skb and return with the queue | |
613 | * stopped so the network layer will retry after the | |
614 | * fsm completes and wakes the queue. | |
615 | */ | |
616 | return 1; | |
617 | } | |
618 | else if (unlikely(err)) { | |
619 | /* other fatal error - forget the speed change and | |
620 | * hope the stack will recover somehow | |
621 | */ | |
622 | netif_start_queue(ndev); | |
623 | } | |
624 | /* else: success | |
625 | * speed change in progress now | |
626 | * on completion the queue gets restarted | |
627 | */ | |
628 | ||
629 | dev_kfree_skb_any(skb); | |
630 | return 0; | |
631 | } else | |
632 | dev->new_speed = speed; | |
633 | } | |
634 | ||
635 | /* Init tx buffer*/ | |
636 | dev->tx_buff.data = dev->tx_buff.head; | |
637 | ||
638 | /* Check problems */ | |
639 | if(spin_is_locked(&dev->tx_lock)) { | |
640 | IRDA_DEBUG(3, "%s(), write not completed\n", __FUNCTION__); | |
641 | } | |
642 | ||
643 | /* serialize with write completion */ | |
644 | spin_lock_irqsave(&dev->tx_lock, flags); | |
645 | ||
646 | /* Copy skb to tx_buff while wrapping, stuffing and making CRC */ | |
647 | dev->tx_buff.len = async_wrap_skb(skb, dev->tx_buff.data, dev->tx_buff.truesize); | |
648 | ||
649 | /* transmission will start now - disable receive. | |
650 | * if we are just in the middle of an incoming frame, | |
651 | * treat it as collision. probably it's a good idea to | |
652 | * reset the rx_buf OUTSIDE_FRAME in this case too? | |
653 | */ | |
654 | atomic_set(&dev->enable_rx, 0); | |
655 | if (unlikely(sirdev_is_receiving(dev))) | |
656 | dev->stats.collisions++; | |
657 | ||
658 | actual = dev->drv->do_write(dev, dev->tx_buff.data, dev->tx_buff.len); | |
659 | ||
660 | if (likely(actual > 0)) { | |
661 | dev->tx_skb = skb; | |
662 | ndev->trans_start = jiffies; | |
663 | dev->tx_buff.data += actual; | |
664 | dev->tx_buff.len -= actual; | |
665 | } | |
666 | else if (unlikely(actual < 0)) { | |
667 | /* could be dropped later when we have tx_timeout to recover */ | |
668 | IRDA_ERROR("%s: drv->do_write failed (%d)\n", | |
669 | __FUNCTION__, actual); | |
670 | dev_kfree_skb_any(skb); | |
671 | dev->stats.tx_errors++; | |
672 | dev->stats.tx_dropped++; | |
673 | netif_wake_queue(ndev); | |
674 | } | |
675 | spin_unlock_irqrestore(&dev->tx_lock, flags); | |
676 | ||
677 | return 0; | |
678 | } | |
679 | ||
680 | /* called from network layer with rtnl hold */ | |
681 | ||
682 | static int sirdev_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd) | |
683 | { | |
684 | struct if_irda_req *irq = (struct if_irda_req *) rq; | |
685 | struct sir_dev *dev = ndev->priv; | |
686 | int ret = 0; | |
687 | ||
688 | IRDA_ASSERT(dev != NULL, return -1;); | |
689 | ||
690 | IRDA_DEBUG(3, "%s(), %s, (cmd=0x%X)\n", __FUNCTION__, ndev->name, cmd); | |
691 | ||
692 | switch (cmd) { | |
693 | case SIOCSBANDWIDTH: /* Set bandwidth */ | |
694 | if (!capable(CAP_NET_ADMIN)) | |
695 | ret = -EPERM; | |
696 | else | |
697 | ret = sirdev_schedule_speed(dev, irq->ifr_baudrate); | |
698 | /* cannot sleep here for completion | |
699 | * we are called from network layer with rtnl hold | |
700 | */ | |
701 | break; | |
702 | ||
703 | case SIOCSDONGLE: /* Set dongle */ | |
704 | if (!capable(CAP_NET_ADMIN)) | |
705 | ret = -EPERM; | |
706 | else | |
707 | ret = sirdev_schedule_dongle_open(dev, irq->ifr_dongle); | |
708 | /* cannot sleep here for completion | |
709 | * we are called from network layer with rtnl hold | |
710 | */ | |
711 | break; | |
712 | ||
713 | case SIOCSMEDIABUSY: /* Set media busy */ | |
714 | if (!capable(CAP_NET_ADMIN)) | |
715 | ret = -EPERM; | |
716 | else | |
717 | irda_device_set_media_busy(dev->netdev, TRUE); | |
718 | break; | |
719 | ||
720 | case SIOCGRECEIVING: /* Check if we are receiving right now */ | |
721 | irq->ifr_receiving = sirdev_is_receiving(dev); | |
722 | break; | |
723 | ||
724 | case SIOCSDTRRTS: | |
725 | if (!capable(CAP_NET_ADMIN)) | |
726 | ret = -EPERM; | |
727 | else | |
728 | ret = sirdev_schedule_dtr_rts(dev, irq->ifr_dtr, irq->ifr_rts); | |
729 | /* cannot sleep here for completion | |
730 | * we are called from network layer with rtnl hold | |
731 | */ | |
732 | break; | |
733 | ||
734 | case SIOCSMODE: | |
735 | #if 0 | |
736 | if (!capable(CAP_NET_ADMIN)) | |
737 | ret = -EPERM; | |
738 | else | |
739 | ret = sirdev_schedule_mode(dev, irq->ifr_mode); | |
740 | /* cannot sleep here for completion | |
741 | * we are called from network layer with rtnl hold | |
742 | */ | |
743 | break; | |
744 | #endif | |
745 | default: | |
746 | ret = -EOPNOTSUPP; | |
747 | } | |
748 | ||
749 | return ret; | |
750 | } | |
751 | ||
752 | /* ----------------------------------------------------------------------------- */ | |
753 | ||
754 | #define SIRBUF_ALLOCSIZE 4269 /* worst case size of a wrapped IrLAP frame */ | |
755 | ||
756 | static int sirdev_alloc_buffers(struct sir_dev *dev) | |
757 | { | |
758 | dev->tx_buff.truesize = SIRBUF_ALLOCSIZE; | |
759 | dev->rx_buff.truesize = IRDA_SKB_MAX_MTU; | |
760 | ||
761 | /* Bootstrap ZeroCopy Rx */ | |
762 | dev->rx_buff.skb = __dev_alloc_skb(dev->rx_buff.truesize, GFP_KERNEL); | |
763 | if (dev->rx_buff.skb == NULL) | |
764 | return -ENOMEM; | |
765 | skb_reserve(dev->rx_buff.skb, 1); | |
766 | dev->rx_buff.head = dev->rx_buff.skb->data; | |
767 | ||
768 | dev->tx_buff.head = kmalloc(dev->tx_buff.truesize, GFP_KERNEL); | |
769 | if (dev->tx_buff.head == NULL) { | |
770 | kfree_skb(dev->rx_buff.skb); | |
771 | dev->rx_buff.skb = NULL; | |
772 | dev->rx_buff.head = NULL; | |
773 | return -ENOMEM; | |
774 | } | |
775 | ||
776 | dev->tx_buff.data = dev->tx_buff.head; | |
777 | dev->rx_buff.data = dev->rx_buff.head; | |
778 | dev->tx_buff.len = 0; | |
779 | dev->rx_buff.len = 0; | |
780 | ||
781 | dev->rx_buff.in_frame = FALSE; | |
782 | dev->rx_buff.state = OUTSIDE_FRAME; | |
783 | return 0; | |
784 | }; | |
785 | ||
786 | static void sirdev_free_buffers(struct sir_dev *dev) | |
787 | { | |
788 | if (dev->rx_buff.skb) | |
789 | kfree_skb(dev->rx_buff.skb); | |
b4558ea9 | 790 | kfree(dev->tx_buff.head); |
1da177e4 LT |
791 | dev->rx_buff.head = dev->tx_buff.head = NULL; |
792 | dev->rx_buff.skb = NULL; | |
793 | } | |
794 | ||
795 | static int sirdev_open(struct net_device *ndev) | |
796 | { | |
797 | struct sir_dev *dev = ndev->priv; | |
798 | const struct sir_driver *drv = dev->drv; | |
799 | ||
800 | if (!drv) | |
801 | return -ENODEV; | |
802 | ||
803 | /* increase the reference count of the driver module before doing serious stuff */ | |
804 | if (!try_module_get(drv->owner)) | |
805 | return -ESTALE; | |
806 | ||
807 | IRDA_DEBUG(2, "%s()\n", __FUNCTION__); | |
808 | ||
809 | if (sirdev_alloc_buffers(dev)) | |
810 | goto errout_dec; | |
811 | ||
812 | if (!dev->drv->start_dev || dev->drv->start_dev(dev)) | |
813 | goto errout_free; | |
814 | ||
815 | sirdev_enable_rx(dev); | |
816 | dev->raw_tx = 0; | |
817 | ||
818 | netif_start_queue(ndev); | |
819 | dev->irlap = irlap_open(ndev, &dev->qos, dev->hwname); | |
820 | if (!dev->irlap) | |
821 | goto errout_stop; | |
822 | ||
823 | netif_wake_queue(ndev); | |
824 | ||
825 | IRDA_DEBUG(2, "%s - done, speed = %d\n", __FUNCTION__, dev->speed); | |
826 | ||
827 | return 0; | |
828 | ||
829 | errout_stop: | |
830 | atomic_set(&dev->enable_rx, 0); | |
831 | if (dev->drv->stop_dev) | |
832 | dev->drv->stop_dev(dev); | |
833 | errout_free: | |
834 | sirdev_free_buffers(dev); | |
835 | errout_dec: | |
836 | module_put(drv->owner); | |
837 | return -EAGAIN; | |
838 | } | |
839 | ||
840 | static int sirdev_close(struct net_device *ndev) | |
841 | { | |
842 | struct sir_dev *dev = ndev->priv; | |
843 | const struct sir_driver *drv; | |
844 | ||
845 | // IRDA_DEBUG(0, "%s\n", __FUNCTION__); | |
846 | ||
847 | netif_stop_queue(ndev); | |
848 | ||
849 | down(&dev->fsm.sem); /* block on pending config completion */ | |
850 | ||
851 | atomic_set(&dev->enable_rx, 0); | |
852 | ||
853 | if (unlikely(!dev->irlap)) | |
854 | goto out; | |
855 | irlap_close(dev->irlap); | |
856 | dev->irlap = NULL; | |
857 | ||
858 | drv = dev->drv; | |
859 | if (unlikely(!drv || !dev->priv)) | |
860 | goto out; | |
861 | ||
862 | if (drv->stop_dev) | |
863 | drv->stop_dev(dev); | |
864 | ||
865 | sirdev_free_buffers(dev); | |
866 | module_put(drv->owner); | |
867 | ||
868 | out: | |
869 | dev->speed = 0; | |
870 | up(&dev->fsm.sem); | |
871 | return 0; | |
872 | } | |
873 | ||
874 | /* ----------------------------------------------------------------------------- */ | |
875 | ||
876 | struct sir_dev * sirdev_get_instance(const struct sir_driver *drv, const char *name) | |
877 | { | |
878 | struct net_device *ndev; | |
879 | struct sir_dev *dev; | |
880 | ||
881 | IRDA_DEBUG(0, "%s - %s\n", __FUNCTION__, name); | |
882 | ||
883 | /* instead of adding tests to protect against drv->do_write==NULL | |
884 | * at several places we refuse to create a sir_dev instance for | |
885 | * drivers which don't implement do_write. | |
886 | */ | |
887 | if (!drv || !drv->do_write) | |
888 | return NULL; | |
889 | ||
890 | /* | |
891 | * Allocate new instance of the device | |
892 | */ | |
893 | ndev = alloc_irdadev(sizeof(*dev)); | |
894 | if (ndev == NULL) { | |
895 | IRDA_ERROR("%s - Can't allocate memory for IrDA control block!\n", __FUNCTION__); | |
896 | goto out; | |
897 | } | |
898 | dev = ndev->priv; | |
899 | ||
900 | irda_init_max_qos_capabilies(&dev->qos); | |
901 | dev->qos.baud_rate.bits = IR_9600|IR_19200|IR_38400|IR_57600|IR_115200; | |
902 | dev->qos.min_turn_time.bits = drv->qos_mtt_bits; | |
903 | irda_qos_bits_to_value(&dev->qos); | |
904 | ||
905 | strncpy(dev->hwname, name, sizeof(dev->hwname)-1); | |
906 | ||
907 | atomic_set(&dev->enable_rx, 0); | |
908 | dev->tx_skb = NULL; | |
909 | ||
910 | spin_lock_init(&dev->tx_lock); | |
911 | init_MUTEX(&dev->fsm.sem); | |
912 | ||
1da177e4 LT |
913 | dev->drv = drv; |
914 | dev->netdev = ndev; | |
915 | ||
1da177e4 LT |
916 | /* Override the network functions we need to use */ |
917 | ndev->hard_start_xmit = sirdev_hard_xmit; | |
918 | ndev->open = sirdev_open; | |
919 | ndev->stop = sirdev_close; | |
920 | ndev->get_stats = sirdev_get_stats; | |
921 | ndev->do_ioctl = sirdev_ioctl; | |
922 | ||
923 | if (register_netdev(ndev)) { | |
924 | IRDA_ERROR("%s(), register_netdev() failed!\n", __FUNCTION__); | |
925 | goto out_freenetdev; | |
926 | } | |
927 | ||
928 | return dev; | |
929 | ||
930 | out_freenetdev: | |
931 | free_netdev(ndev); | |
932 | out: | |
933 | return NULL; | |
934 | } | |
214ad784 | 935 | EXPORT_SYMBOL(sirdev_get_instance); |
1da177e4 LT |
936 | |
937 | int sirdev_put_instance(struct sir_dev *dev) | |
938 | { | |
939 | int err = 0; | |
940 | ||
941 | IRDA_DEBUG(0, "%s\n", __FUNCTION__); | |
942 | ||
943 | atomic_set(&dev->enable_rx, 0); | |
944 | ||
945 | netif_carrier_off(dev->netdev); | |
946 | netif_device_detach(dev->netdev); | |
947 | ||
948 | if (dev->dongle_drv) | |
949 | err = sirdev_schedule_dongle_close(dev); | |
950 | if (err) | |
951 | IRDA_ERROR("%s - error %d\n", __FUNCTION__, err); | |
952 | ||
953 | sirdev_close(dev->netdev); | |
954 | ||
955 | down(&dev->fsm.sem); | |
956 | dev->fsm.state = SIRDEV_STATE_DEAD; /* mark staled */ | |
957 | dev->dongle_drv = NULL; | |
958 | dev->priv = NULL; | |
959 | up(&dev->fsm.sem); | |
960 | ||
961 | /* Remove netdevice */ | |
962 | unregister_netdev(dev->netdev); | |
963 | ||
964 | free_netdev(dev->netdev); | |
965 | ||
966 | return 0; | |
967 | } | |
214ad784 | 968 | EXPORT_SYMBOL(sirdev_put_instance); |
1da177e4 | 969 | |
788252e6 CH |
970 | static int __init sir_wq_init(void) |
971 | { | |
972 | irda_sir_wq = create_singlethread_workqueue("irda_sir_wq"); | |
973 | if (!irda_sir_wq) | |
974 | return -ENOMEM; | |
975 | return 0; | |
976 | } | |
977 | ||
978 | static void __exit sir_wq_exit(void) | |
979 | { | |
980 | destroy_workqueue(irda_sir_wq); | |
981 | } | |
982 | ||
983 | module_init(sir_wq_init); | |
984 | module_exit(sir_wq_exit); | |
985 | ||
986 | MODULE_AUTHOR("Martin Diehl <info@mdiehl.de>"); | |
987 | MODULE_DESCRIPTION("IrDA SIR core"); | |
988 | MODULE_LICENSE("GPL"); |