workqueue: deprecate flush[_delayed]_work_sync()
[deliverable/linux.git] / drivers / isdn / mISDN / hwchannel.c
1 /*
2 *
3 * Author Karsten Keil <kkeil@novell.com>
4 *
5 * Copyright 2008 by Karsten Keil <kkeil@novell.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
18 #include <linux/gfp.h>
19 #include <linux/module.h>
20 #include <linux/mISDNhw.h>
21
22 static void
23 dchannel_bh(struct work_struct *ws)
24 {
25 struct dchannel *dch = container_of(ws, struct dchannel, workq);
26 struct sk_buff *skb;
27 int err;
28
29 if (test_and_clear_bit(FLG_RECVQUEUE, &dch->Flags)) {
30 while ((skb = skb_dequeue(&dch->rqueue))) {
31 if (likely(dch->dev.D.peer)) {
32 err = dch->dev.D.recv(dch->dev.D.peer, skb);
33 if (err)
34 dev_kfree_skb(skb);
35 } else
36 dev_kfree_skb(skb);
37 }
38 }
39 if (test_and_clear_bit(FLG_PHCHANGE, &dch->Flags)) {
40 if (dch->phfunc)
41 dch->phfunc(dch);
42 }
43 }
44
45 static void
46 bchannel_bh(struct work_struct *ws)
47 {
48 struct bchannel *bch = container_of(ws, struct bchannel, workq);
49 struct sk_buff *skb;
50 int err;
51
52 if (test_and_clear_bit(FLG_RECVQUEUE, &bch->Flags)) {
53 while ((skb = skb_dequeue(&bch->rqueue))) {
54 bch->rcount--;
55 if (likely(bch->ch.peer)) {
56 err = bch->ch.recv(bch->ch.peer, skb);
57 if (err)
58 dev_kfree_skb(skb);
59 } else
60 dev_kfree_skb(skb);
61 }
62 }
63 }
64
65 int
66 mISDN_initdchannel(struct dchannel *ch, int maxlen, void *phf)
67 {
68 test_and_set_bit(FLG_HDLC, &ch->Flags);
69 ch->maxlen = maxlen;
70 ch->hw = NULL;
71 ch->rx_skb = NULL;
72 ch->tx_skb = NULL;
73 ch->tx_idx = 0;
74 ch->phfunc = phf;
75 skb_queue_head_init(&ch->squeue);
76 skb_queue_head_init(&ch->rqueue);
77 INIT_LIST_HEAD(&ch->dev.bchannels);
78 INIT_WORK(&ch->workq, dchannel_bh);
79 return 0;
80 }
81 EXPORT_SYMBOL(mISDN_initdchannel);
82
83 int
84 mISDN_initbchannel(struct bchannel *ch, unsigned short maxlen,
85 unsigned short minlen)
86 {
87 ch->Flags = 0;
88 ch->minlen = minlen;
89 ch->next_minlen = minlen;
90 ch->init_minlen = minlen;
91 ch->maxlen = maxlen;
92 ch->next_maxlen = maxlen;
93 ch->init_maxlen = maxlen;
94 ch->hw = NULL;
95 ch->rx_skb = NULL;
96 ch->tx_skb = NULL;
97 ch->tx_idx = 0;
98 skb_queue_head_init(&ch->rqueue);
99 ch->rcount = 0;
100 ch->next_skb = NULL;
101 INIT_WORK(&ch->workq, bchannel_bh);
102 return 0;
103 }
104 EXPORT_SYMBOL(mISDN_initbchannel);
105
106 int
107 mISDN_freedchannel(struct dchannel *ch)
108 {
109 if (ch->tx_skb) {
110 dev_kfree_skb(ch->tx_skb);
111 ch->tx_skb = NULL;
112 }
113 if (ch->rx_skb) {
114 dev_kfree_skb(ch->rx_skb);
115 ch->rx_skb = NULL;
116 }
117 skb_queue_purge(&ch->squeue);
118 skb_queue_purge(&ch->rqueue);
119 flush_work(&ch->workq);
120 return 0;
121 }
122 EXPORT_SYMBOL(mISDN_freedchannel);
123
124 void
125 mISDN_clear_bchannel(struct bchannel *ch)
126 {
127 if (ch->tx_skb) {
128 dev_kfree_skb(ch->tx_skb);
129 ch->tx_skb = NULL;
130 }
131 ch->tx_idx = 0;
132 if (ch->rx_skb) {
133 dev_kfree_skb(ch->rx_skb);
134 ch->rx_skb = NULL;
135 }
136 if (ch->next_skb) {
137 dev_kfree_skb(ch->next_skb);
138 ch->next_skb = NULL;
139 }
140 test_and_clear_bit(FLG_TX_BUSY, &ch->Flags);
141 test_and_clear_bit(FLG_TX_NEXT, &ch->Flags);
142 test_and_clear_bit(FLG_ACTIVE, &ch->Flags);
143 test_and_clear_bit(FLG_FILLEMPTY, &ch->Flags);
144 test_and_clear_bit(FLG_TX_EMPTY, &ch->Flags);
145 test_and_clear_bit(FLG_RX_OFF, &ch->Flags);
146 ch->dropcnt = 0;
147 ch->minlen = ch->init_minlen;
148 ch->next_minlen = ch->init_minlen;
149 ch->maxlen = ch->init_maxlen;
150 ch->next_maxlen = ch->init_maxlen;
151 }
152 EXPORT_SYMBOL(mISDN_clear_bchannel);
153
154 int
155 mISDN_freebchannel(struct bchannel *ch)
156 {
157 mISDN_clear_bchannel(ch);
158 skb_queue_purge(&ch->rqueue);
159 ch->rcount = 0;
160 flush_work(&ch->workq);
161 return 0;
162 }
163 EXPORT_SYMBOL(mISDN_freebchannel);
164
165 int
166 mISDN_ctrl_bchannel(struct bchannel *bch, struct mISDN_ctrl_req *cq)
167 {
168 int ret = 0;
169
170 switch (cq->op) {
171 case MISDN_CTRL_GETOP:
172 cq->op = MISDN_CTRL_RX_BUFFER | MISDN_CTRL_FILL_EMPTY |
173 MISDN_CTRL_RX_OFF;
174 break;
175 case MISDN_CTRL_FILL_EMPTY:
176 if (cq->p1) {
177 memset(bch->fill, cq->p2 & 0xff, MISDN_BCH_FILL_SIZE);
178 test_and_set_bit(FLG_FILLEMPTY, &bch->Flags);
179 } else {
180 test_and_clear_bit(FLG_FILLEMPTY, &bch->Flags);
181 }
182 break;
183 case MISDN_CTRL_RX_OFF:
184 /* read back dropped byte count */
185 cq->p2 = bch->dropcnt;
186 if (cq->p1)
187 test_and_set_bit(FLG_RX_OFF, &bch->Flags);
188 else
189 test_and_clear_bit(FLG_RX_OFF, &bch->Flags);
190 bch->dropcnt = 0;
191 break;
192 case MISDN_CTRL_RX_BUFFER:
193 if (cq->p2 > MISDN_CTRL_RX_SIZE_IGNORE)
194 bch->next_maxlen = cq->p2;
195 if (cq->p1 > MISDN_CTRL_RX_SIZE_IGNORE)
196 bch->next_minlen = cq->p1;
197 /* we return the old values */
198 cq->p1 = bch->minlen;
199 cq->p2 = bch->maxlen;
200 break;
201 default:
202 pr_info("mISDN unhandled control %x operation\n", cq->op);
203 ret = -EINVAL;
204 break;
205 }
206 return ret;
207 }
208 EXPORT_SYMBOL(mISDN_ctrl_bchannel);
209
210 static inline u_int
211 get_sapi_tei(u_char *p)
212 {
213 u_int sapi, tei;
214
215 sapi = *p >> 2;
216 tei = p[1] >> 1;
217 return sapi | (tei << 8);
218 }
219
220 void
221 recv_Dchannel(struct dchannel *dch)
222 {
223 struct mISDNhead *hh;
224
225 if (dch->rx_skb->len < 2) { /* at least 2 for sapi / tei */
226 dev_kfree_skb(dch->rx_skb);
227 dch->rx_skb = NULL;
228 return;
229 }
230 hh = mISDN_HEAD_P(dch->rx_skb);
231 hh->prim = PH_DATA_IND;
232 hh->id = get_sapi_tei(dch->rx_skb->data);
233 skb_queue_tail(&dch->rqueue, dch->rx_skb);
234 dch->rx_skb = NULL;
235 schedule_event(dch, FLG_RECVQUEUE);
236 }
237 EXPORT_SYMBOL(recv_Dchannel);
238
239 void
240 recv_Echannel(struct dchannel *ech, struct dchannel *dch)
241 {
242 struct mISDNhead *hh;
243
244 if (ech->rx_skb->len < 2) { /* at least 2 for sapi / tei */
245 dev_kfree_skb(ech->rx_skb);
246 ech->rx_skb = NULL;
247 return;
248 }
249 hh = mISDN_HEAD_P(ech->rx_skb);
250 hh->prim = PH_DATA_E_IND;
251 hh->id = get_sapi_tei(ech->rx_skb->data);
252 skb_queue_tail(&dch->rqueue, ech->rx_skb);
253 ech->rx_skb = NULL;
254 schedule_event(dch, FLG_RECVQUEUE);
255 }
256 EXPORT_SYMBOL(recv_Echannel);
257
258 void
259 recv_Bchannel(struct bchannel *bch, unsigned int id, bool force)
260 {
261 struct mISDNhead *hh;
262
263 /* if allocation did fail upper functions still may call us */
264 if (unlikely(!bch->rx_skb))
265 return;
266 if (unlikely(!bch->rx_skb->len)) {
267 /* we have no data to send - this may happen after recovery
268 * from overflow or too small allocation.
269 * We need to free the buffer here */
270 dev_kfree_skb(bch->rx_skb);
271 bch->rx_skb = NULL;
272 } else {
273 if (test_bit(FLG_TRANSPARENT, &bch->Flags) &&
274 (bch->rx_skb->len < bch->minlen) && !force)
275 return;
276 hh = mISDN_HEAD_P(bch->rx_skb);
277 hh->prim = PH_DATA_IND;
278 hh->id = id;
279 if (bch->rcount >= 64) {
280 printk(KERN_WARNING
281 "B%d receive queue overflow - flushing!\n",
282 bch->nr);
283 skb_queue_purge(&bch->rqueue);
284 }
285 bch->rcount++;
286 skb_queue_tail(&bch->rqueue, bch->rx_skb);
287 bch->rx_skb = NULL;
288 schedule_event(bch, FLG_RECVQUEUE);
289 }
290 }
291 EXPORT_SYMBOL(recv_Bchannel);
292
293 void
294 recv_Dchannel_skb(struct dchannel *dch, struct sk_buff *skb)
295 {
296 skb_queue_tail(&dch->rqueue, skb);
297 schedule_event(dch, FLG_RECVQUEUE);
298 }
299 EXPORT_SYMBOL(recv_Dchannel_skb);
300
301 void
302 recv_Bchannel_skb(struct bchannel *bch, struct sk_buff *skb)
303 {
304 if (bch->rcount >= 64) {
305 printk(KERN_WARNING "B-channel %p receive queue overflow, "
306 "flushing!\n", bch);
307 skb_queue_purge(&bch->rqueue);
308 bch->rcount = 0;
309 }
310 bch->rcount++;
311 skb_queue_tail(&bch->rqueue, skb);
312 schedule_event(bch, FLG_RECVQUEUE);
313 }
314 EXPORT_SYMBOL(recv_Bchannel_skb);
315
316 static void
317 confirm_Dsend(struct dchannel *dch)
318 {
319 struct sk_buff *skb;
320
321 skb = _alloc_mISDN_skb(PH_DATA_CNF, mISDN_HEAD_ID(dch->tx_skb),
322 0, NULL, GFP_ATOMIC);
323 if (!skb) {
324 printk(KERN_ERR "%s: no skb id %x\n", __func__,
325 mISDN_HEAD_ID(dch->tx_skb));
326 return;
327 }
328 skb_queue_tail(&dch->rqueue, skb);
329 schedule_event(dch, FLG_RECVQUEUE);
330 }
331
332 int
333 get_next_dframe(struct dchannel *dch)
334 {
335 dch->tx_idx = 0;
336 dch->tx_skb = skb_dequeue(&dch->squeue);
337 if (dch->tx_skb) {
338 confirm_Dsend(dch);
339 return 1;
340 }
341 dch->tx_skb = NULL;
342 test_and_clear_bit(FLG_TX_BUSY, &dch->Flags);
343 return 0;
344 }
345 EXPORT_SYMBOL(get_next_dframe);
346
347 static void
348 confirm_Bsend(struct bchannel *bch)
349 {
350 struct sk_buff *skb;
351
352 if (bch->rcount >= 64) {
353 printk(KERN_WARNING "B-channel %p receive queue overflow, "
354 "flushing!\n", bch);
355 skb_queue_purge(&bch->rqueue);
356 bch->rcount = 0;
357 }
358 skb = _alloc_mISDN_skb(PH_DATA_CNF, mISDN_HEAD_ID(bch->tx_skb),
359 0, NULL, GFP_ATOMIC);
360 if (!skb) {
361 printk(KERN_ERR "%s: no skb id %x\n", __func__,
362 mISDN_HEAD_ID(bch->tx_skb));
363 return;
364 }
365 bch->rcount++;
366 skb_queue_tail(&bch->rqueue, skb);
367 schedule_event(bch, FLG_RECVQUEUE);
368 }
369
370 int
371 get_next_bframe(struct bchannel *bch)
372 {
373 bch->tx_idx = 0;
374 if (test_bit(FLG_TX_NEXT, &bch->Flags)) {
375 bch->tx_skb = bch->next_skb;
376 if (bch->tx_skb) {
377 bch->next_skb = NULL;
378 test_and_clear_bit(FLG_TX_NEXT, &bch->Flags);
379 /* confirm imediately to allow next data */
380 confirm_Bsend(bch);
381 return 1;
382 } else {
383 test_and_clear_bit(FLG_TX_NEXT, &bch->Flags);
384 printk(KERN_WARNING "B TX_NEXT without skb\n");
385 }
386 }
387 bch->tx_skb = NULL;
388 test_and_clear_bit(FLG_TX_BUSY, &bch->Flags);
389 return 0;
390 }
391 EXPORT_SYMBOL(get_next_bframe);
392
393 void
394 queue_ch_frame(struct mISDNchannel *ch, u_int pr, int id, struct sk_buff *skb)
395 {
396 struct mISDNhead *hh;
397
398 if (!skb) {
399 _queue_data(ch, pr, id, 0, NULL, GFP_ATOMIC);
400 } else {
401 if (ch->peer) {
402 hh = mISDN_HEAD_P(skb);
403 hh->prim = pr;
404 hh->id = id;
405 if (!ch->recv(ch->peer, skb))
406 return;
407 }
408 dev_kfree_skb(skb);
409 }
410 }
411 EXPORT_SYMBOL(queue_ch_frame);
412
413 int
414 dchannel_senddata(struct dchannel *ch, struct sk_buff *skb)
415 {
416 /* check oversize */
417 if (skb->len <= 0) {
418 printk(KERN_WARNING "%s: skb too small\n", __func__);
419 return -EINVAL;
420 }
421 if (skb->len > ch->maxlen) {
422 printk(KERN_WARNING "%s: skb too large(%d/%d)\n",
423 __func__, skb->len, ch->maxlen);
424 return -EINVAL;
425 }
426 /* HW lock must be obtained */
427 if (test_and_set_bit(FLG_TX_BUSY, &ch->Flags)) {
428 skb_queue_tail(&ch->squeue, skb);
429 return 0;
430 } else {
431 /* write to fifo */
432 ch->tx_skb = skb;
433 ch->tx_idx = 0;
434 return 1;
435 }
436 }
437 EXPORT_SYMBOL(dchannel_senddata);
438
439 int
440 bchannel_senddata(struct bchannel *ch, struct sk_buff *skb)
441 {
442
443 /* check oversize */
444 if (skb->len <= 0) {
445 printk(KERN_WARNING "%s: skb too small\n", __func__);
446 return -EINVAL;
447 }
448 if (skb->len > ch->maxlen) {
449 printk(KERN_WARNING "%s: skb too large(%d/%d)\n",
450 __func__, skb->len, ch->maxlen);
451 return -EINVAL;
452 }
453 /* HW lock must be obtained */
454 /* check for pending next_skb */
455 if (ch->next_skb) {
456 printk(KERN_WARNING
457 "%s: next_skb exist ERROR (skb->len=%d next_skb->len=%d)\n",
458 __func__, skb->len, ch->next_skb->len);
459 return -EBUSY;
460 }
461 if (test_and_set_bit(FLG_TX_BUSY, &ch->Flags)) {
462 test_and_set_bit(FLG_TX_NEXT, &ch->Flags);
463 ch->next_skb = skb;
464 return 0;
465 } else {
466 /* write to fifo */
467 ch->tx_skb = skb;
468 ch->tx_idx = 0;
469 confirm_Bsend(ch);
470 return 1;
471 }
472 }
473 EXPORT_SYMBOL(bchannel_senddata);
474
475 /* The function allocates a new receive skb on demand with a size for the
476 * requirements of the current protocol. It returns the tailroom of the
477 * receive skb or an error.
478 */
479 int
480 bchannel_get_rxbuf(struct bchannel *bch, int reqlen)
481 {
482 int len;
483
484 if (bch->rx_skb) {
485 len = skb_tailroom(bch->rx_skb);
486 if (len < reqlen) {
487 pr_warning("B%d no space for %d (only %d) bytes\n",
488 bch->nr, reqlen, len);
489 if (test_bit(FLG_TRANSPARENT, &bch->Flags)) {
490 /* send what we have now and try a new buffer */
491 recv_Bchannel(bch, 0, true);
492 } else {
493 /* on HDLC we have to drop too big frames */
494 return -EMSGSIZE;
495 }
496 } else {
497 return len;
498 }
499 }
500 /* update current min/max length first */
501 if (unlikely(bch->maxlen != bch->next_maxlen))
502 bch->maxlen = bch->next_maxlen;
503 if (unlikely(bch->minlen != bch->next_minlen))
504 bch->minlen = bch->next_minlen;
505 if (unlikely(reqlen > bch->maxlen))
506 return -EMSGSIZE;
507 if (test_bit(FLG_TRANSPARENT, &bch->Flags)) {
508 if (reqlen >= bch->minlen) {
509 len = reqlen;
510 } else {
511 len = 2 * bch->minlen;
512 if (len > bch->maxlen)
513 len = bch->maxlen;
514 }
515 } else {
516 /* with HDLC we do not know the length yet */
517 len = bch->maxlen;
518 }
519 bch->rx_skb = mI_alloc_skb(len, GFP_ATOMIC);
520 if (!bch->rx_skb) {
521 pr_warning("B%d receive no memory for %d bytes\n",
522 bch->nr, len);
523 len = -ENOMEM;
524 }
525 return len;
526 }
527 EXPORT_SYMBOL(bchannel_get_rxbuf);
This page took 0.044578 seconds and 5 git commands to generate.