Bluetooth: Add new l2cap_chan struct members for high speed channels
[deliverable/linux.git] / net / bluetooth / l2cap_core.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
8
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
10
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
14
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
27 */
28
29 /* Bluetooth L2CAP core. */
30
31 #include <linux/module.h>
32
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
35
36 #include <net/bluetooth/bluetooth.h>
37 #include <net/bluetooth/hci_core.h>
38 #include <net/bluetooth/l2cap.h>
39 #include <net/bluetooth/smp.h>
40 #include <net/bluetooth/a2mp.h>
41
42 bool disable_ertm;
43
44 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
45 static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP, };
46
47 static LIST_HEAD(chan_list);
48 static DEFINE_RWLOCK(chan_list_lock);
49
50 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
51 u8 code, u8 ident, u16 dlen, void *data);
52 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
53 void *data);
54 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
55 static void l2cap_send_disconn_req(struct l2cap_conn *conn,
56 struct l2cap_chan *chan, int err);
57
58 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
59 struct sk_buff_head *skbs, u8 event);
60
61 /* ---- L2CAP channels ---- */
62
63 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
64 u16 cid)
65 {
66 struct l2cap_chan *c;
67
68 list_for_each_entry(c, &conn->chan_l, list) {
69 if (c->dcid == cid)
70 return c;
71 }
72 return NULL;
73 }
74
75 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
76 u16 cid)
77 {
78 struct l2cap_chan *c;
79
80 list_for_each_entry(c, &conn->chan_l, list) {
81 if (c->scid == cid)
82 return c;
83 }
84 return NULL;
85 }
86
87 /* Find channel with given SCID.
88 * Returns locked channel. */
89 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
90 u16 cid)
91 {
92 struct l2cap_chan *c;
93
94 mutex_lock(&conn->chan_lock);
95 c = __l2cap_get_chan_by_scid(conn, cid);
96 if (c)
97 l2cap_chan_lock(c);
98 mutex_unlock(&conn->chan_lock);
99
100 return c;
101 }
102
103 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn,
104 u8 ident)
105 {
106 struct l2cap_chan *c;
107
108 list_for_each_entry(c, &conn->chan_l, list) {
109 if (c->ident == ident)
110 return c;
111 }
112 return NULL;
113 }
114
115 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
116 {
117 struct l2cap_chan *c;
118
119 list_for_each_entry(c, &chan_list, global_l) {
120 if (c->sport == psm && !bacmp(&bt_sk(c->sk)->src, src))
121 return c;
122 }
123 return NULL;
124 }
125
126 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
127 {
128 int err;
129
130 write_lock(&chan_list_lock);
131
132 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
133 err = -EADDRINUSE;
134 goto done;
135 }
136
137 if (psm) {
138 chan->psm = psm;
139 chan->sport = psm;
140 err = 0;
141 } else {
142 u16 p;
143
144 err = -EINVAL;
145 for (p = 0x1001; p < 0x1100; p += 2)
146 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
147 chan->psm = cpu_to_le16(p);
148 chan->sport = cpu_to_le16(p);
149 err = 0;
150 break;
151 }
152 }
153
154 done:
155 write_unlock(&chan_list_lock);
156 return err;
157 }
158
159 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
160 {
161 write_lock(&chan_list_lock);
162
163 chan->scid = scid;
164
165 write_unlock(&chan_list_lock);
166
167 return 0;
168 }
169
170 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
171 {
172 u16 cid = L2CAP_CID_DYN_START;
173
174 for (; cid < L2CAP_CID_DYN_END; cid++) {
175 if (!__l2cap_get_chan_by_scid(conn, cid))
176 return cid;
177 }
178
179 return 0;
180 }
181
182 static void __l2cap_state_change(struct l2cap_chan *chan, int state)
183 {
184 BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
185 state_to_string(state));
186
187 chan->state = state;
188 chan->ops->state_change(chan, state);
189 }
190
191 static void l2cap_state_change(struct l2cap_chan *chan, int state)
192 {
193 struct sock *sk = chan->sk;
194
195 lock_sock(sk);
196 __l2cap_state_change(chan, state);
197 release_sock(sk);
198 }
199
200 static inline void __l2cap_chan_set_err(struct l2cap_chan *chan, int err)
201 {
202 struct sock *sk = chan->sk;
203
204 sk->sk_err = err;
205 }
206
207 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
208 {
209 struct sock *sk = chan->sk;
210
211 lock_sock(sk);
212 __l2cap_chan_set_err(chan, err);
213 release_sock(sk);
214 }
215
216 static void __set_retrans_timer(struct l2cap_chan *chan)
217 {
218 if (!delayed_work_pending(&chan->monitor_timer) &&
219 chan->retrans_timeout) {
220 l2cap_set_timer(chan, &chan->retrans_timer,
221 msecs_to_jiffies(chan->retrans_timeout));
222 }
223 }
224
225 static void __set_monitor_timer(struct l2cap_chan *chan)
226 {
227 __clear_retrans_timer(chan);
228 if (chan->monitor_timeout) {
229 l2cap_set_timer(chan, &chan->monitor_timer,
230 msecs_to_jiffies(chan->monitor_timeout));
231 }
232 }
233
234 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
235 u16 seq)
236 {
237 struct sk_buff *skb;
238
239 skb_queue_walk(head, skb) {
240 if (bt_cb(skb)->control.txseq == seq)
241 return skb;
242 }
243
244 return NULL;
245 }
246
247 /* ---- L2CAP sequence number lists ---- */
248
249 /* For ERTM, ordered lists of sequence numbers must be tracked for
250 * SREJ requests that are received and for frames that are to be
251 * retransmitted. These seq_list functions implement a singly-linked
252 * list in an array, where membership in the list can also be checked
253 * in constant time. Items can also be added to the tail of the list
254 * and removed from the head in constant time, without further memory
255 * allocs or frees.
256 */
257
258 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
259 {
260 size_t alloc_size, i;
261
262 /* Allocated size is a power of 2 to map sequence numbers
263 * (which may be up to 14 bits) in to a smaller array that is
264 * sized for the negotiated ERTM transmit windows.
265 */
266 alloc_size = roundup_pow_of_two(size);
267
268 seq_list->list = kmalloc(sizeof(u16) * alloc_size, GFP_KERNEL);
269 if (!seq_list->list)
270 return -ENOMEM;
271
272 seq_list->mask = alloc_size - 1;
273 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
274 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
275 for (i = 0; i < alloc_size; i++)
276 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
277
278 return 0;
279 }
280
281 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
282 {
283 kfree(seq_list->list);
284 }
285
286 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
287 u16 seq)
288 {
289 /* Constant-time check for list membership */
290 return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
291 }
292
293 static u16 l2cap_seq_list_remove(struct l2cap_seq_list *seq_list, u16 seq)
294 {
295 u16 mask = seq_list->mask;
296
297 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR) {
298 /* In case someone tries to pop the head of an empty list */
299 return L2CAP_SEQ_LIST_CLEAR;
300 } else if (seq_list->head == seq) {
301 /* Head can be removed in constant time */
302 seq_list->head = seq_list->list[seq & mask];
303 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
304
305 if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
306 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
307 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
308 }
309 } else {
310 /* Walk the list to find the sequence number */
311 u16 prev = seq_list->head;
312 while (seq_list->list[prev & mask] != seq) {
313 prev = seq_list->list[prev & mask];
314 if (prev == L2CAP_SEQ_LIST_TAIL)
315 return L2CAP_SEQ_LIST_CLEAR;
316 }
317
318 /* Unlink the number from the list and clear it */
319 seq_list->list[prev & mask] = seq_list->list[seq & mask];
320 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
321 if (seq_list->tail == seq)
322 seq_list->tail = prev;
323 }
324 return seq;
325 }
326
327 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
328 {
329 /* Remove the head in constant time */
330 return l2cap_seq_list_remove(seq_list, seq_list->head);
331 }
332
333 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
334 {
335 u16 i;
336
337 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
338 return;
339
340 for (i = 0; i <= seq_list->mask; i++)
341 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
342
343 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
344 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
345 }
346
347 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
348 {
349 u16 mask = seq_list->mask;
350
351 /* All appends happen in constant time */
352
353 if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
354 return;
355
356 if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
357 seq_list->head = seq;
358 else
359 seq_list->list[seq_list->tail & mask] = seq;
360
361 seq_list->tail = seq;
362 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
363 }
364
365 static void l2cap_chan_timeout(struct work_struct *work)
366 {
367 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
368 chan_timer.work);
369 struct l2cap_conn *conn = chan->conn;
370 int reason;
371
372 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
373
374 mutex_lock(&conn->chan_lock);
375 l2cap_chan_lock(chan);
376
377 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
378 reason = ECONNREFUSED;
379 else if (chan->state == BT_CONNECT &&
380 chan->sec_level != BT_SECURITY_SDP)
381 reason = ECONNREFUSED;
382 else
383 reason = ETIMEDOUT;
384
385 l2cap_chan_close(chan, reason);
386
387 l2cap_chan_unlock(chan);
388
389 chan->ops->close(chan);
390 mutex_unlock(&conn->chan_lock);
391
392 l2cap_chan_put(chan);
393 }
394
395 struct l2cap_chan *l2cap_chan_create(void)
396 {
397 struct l2cap_chan *chan;
398
399 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
400 if (!chan)
401 return NULL;
402
403 mutex_init(&chan->lock);
404
405 write_lock(&chan_list_lock);
406 list_add(&chan->global_l, &chan_list);
407 write_unlock(&chan_list_lock);
408
409 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
410
411 chan->state = BT_OPEN;
412
413 kref_init(&chan->kref);
414
415 /* This flag is cleared in l2cap_chan_ready() */
416 set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
417
418 BT_DBG("chan %p", chan);
419
420 return chan;
421 }
422
423 static void l2cap_chan_destroy(struct kref *kref)
424 {
425 struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);
426
427 BT_DBG("chan %p", chan);
428
429 write_lock(&chan_list_lock);
430 list_del(&chan->global_l);
431 write_unlock(&chan_list_lock);
432
433 kfree(chan);
434 }
435
436 void l2cap_chan_hold(struct l2cap_chan *c)
437 {
438 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
439
440 kref_get(&c->kref);
441 }
442
443 void l2cap_chan_put(struct l2cap_chan *c)
444 {
445 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
446
447 kref_put(&c->kref, l2cap_chan_destroy);
448 }
449
450 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
451 {
452 chan->fcs = L2CAP_FCS_CRC16;
453 chan->max_tx = L2CAP_DEFAULT_MAX_TX;
454 chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
455 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
456 chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
457 chan->sec_level = BT_SECURITY_LOW;
458
459 set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
460 }
461
462 void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
463 {
464 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
465 __le16_to_cpu(chan->psm), chan->dcid);
466
467 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
468
469 chan->conn = conn;
470
471 switch (chan->chan_type) {
472 case L2CAP_CHAN_CONN_ORIENTED:
473 if (conn->hcon->type == LE_LINK) {
474 /* LE connection */
475 chan->omtu = L2CAP_DEFAULT_MTU;
476 chan->scid = L2CAP_CID_LE_DATA;
477 chan->dcid = L2CAP_CID_LE_DATA;
478 } else {
479 /* Alloc CID for connection-oriented socket */
480 chan->scid = l2cap_alloc_cid(conn);
481 chan->omtu = L2CAP_DEFAULT_MTU;
482 }
483 break;
484
485 case L2CAP_CHAN_CONN_LESS:
486 /* Connectionless socket */
487 chan->scid = L2CAP_CID_CONN_LESS;
488 chan->dcid = L2CAP_CID_CONN_LESS;
489 chan->omtu = L2CAP_DEFAULT_MTU;
490 break;
491
492 case L2CAP_CHAN_CONN_FIX_A2MP:
493 chan->scid = L2CAP_CID_A2MP;
494 chan->dcid = L2CAP_CID_A2MP;
495 chan->omtu = L2CAP_A2MP_DEFAULT_MTU;
496 chan->imtu = L2CAP_A2MP_DEFAULT_MTU;
497 break;
498
499 default:
500 /* Raw socket can send/recv signalling messages only */
501 chan->scid = L2CAP_CID_SIGNALING;
502 chan->dcid = L2CAP_CID_SIGNALING;
503 chan->omtu = L2CAP_DEFAULT_MTU;
504 }
505
506 chan->local_id = L2CAP_BESTEFFORT_ID;
507 chan->local_stype = L2CAP_SERV_BESTEFFORT;
508 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
509 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
510 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
511 chan->local_flush_to = L2CAP_EFS_DEFAULT_FLUSH_TO;
512
513 l2cap_chan_hold(chan);
514
515 list_add(&chan->list, &conn->chan_l);
516 }
517
518 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
519 {
520 mutex_lock(&conn->chan_lock);
521 __l2cap_chan_add(conn, chan);
522 mutex_unlock(&conn->chan_lock);
523 }
524
525 void l2cap_chan_del(struct l2cap_chan *chan, int err)
526 {
527 struct l2cap_conn *conn = chan->conn;
528
529 __clear_chan_timer(chan);
530
531 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
532
533 if (conn) {
534 struct amp_mgr *mgr = conn->hcon->amp_mgr;
535 /* Delete from channel list */
536 list_del(&chan->list);
537
538 l2cap_chan_put(chan);
539
540 chan->conn = NULL;
541
542 if (chan->chan_type != L2CAP_CHAN_CONN_FIX_A2MP)
543 hci_conn_put(conn->hcon);
544
545 if (mgr && mgr->bredr_chan == chan)
546 mgr->bredr_chan = NULL;
547 }
548
549 chan->ops->teardown(chan, err);
550
551 if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
552 return;
553
554 switch(chan->mode) {
555 case L2CAP_MODE_BASIC:
556 break;
557
558 case L2CAP_MODE_ERTM:
559 __clear_retrans_timer(chan);
560 __clear_monitor_timer(chan);
561 __clear_ack_timer(chan);
562
563 skb_queue_purge(&chan->srej_q);
564
565 l2cap_seq_list_free(&chan->srej_list);
566 l2cap_seq_list_free(&chan->retrans_list);
567
568 /* fall through */
569
570 case L2CAP_MODE_STREAMING:
571 skb_queue_purge(&chan->tx_q);
572 break;
573 }
574
575 return;
576 }
577
578 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
579 {
580 struct l2cap_conn *conn = chan->conn;
581 struct sock *sk = chan->sk;
582
583 BT_DBG("chan %p state %s sk %p", chan, state_to_string(chan->state),
584 sk);
585
586 switch (chan->state) {
587 case BT_LISTEN:
588 chan->ops->teardown(chan, 0);
589 break;
590
591 case BT_CONNECTED:
592 case BT_CONFIG:
593 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
594 conn->hcon->type == ACL_LINK) {
595 __set_chan_timer(chan, sk->sk_sndtimeo);
596 l2cap_send_disconn_req(conn, chan, reason);
597 } else
598 l2cap_chan_del(chan, reason);
599 break;
600
601 case BT_CONNECT2:
602 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
603 conn->hcon->type == ACL_LINK) {
604 struct l2cap_conn_rsp rsp;
605 __u16 result;
606
607 if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags))
608 result = L2CAP_CR_SEC_BLOCK;
609 else
610 result = L2CAP_CR_BAD_PSM;
611 l2cap_state_change(chan, BT_DISCONN);
612
613 rsp.scid = cpu_to_le16(chan->dcid);
614 rsp.dcid = cpu_to_le16(chan->scid);
615 rsp.result = cpu_to_le16(result);
616 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
617 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
618 sizeof(rsp), &rsp);
619 }
620
621 l2cap_chan_del(chan, reason);
622 break;
623
624 case BT_CONNECT:
625 case BT_DISCONN:
626 l2cap_chan_del(chan, reason);
627 break;
628
629 default:
630 chan->ops->teardown(chan, 0);
631 break;
632 }
633 }
634
635 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
636 {
637 if (chan->chan_type == L2CAP_CHAN_RAW) {
638 switch (chan->sec_level) {
639 case BT_SECURITY_HIGH:
640 return HCI_AT_DEDICATED_BONDING_MITM;
641 case BT_SECURITY_MEDIUM:
642 return HCI_AT_DEDICATED_BONDING;
643 default:
644 return HCI_AT_NO_BONDING;
645 }
646 } else if (chan->psm == __constant_cpu_to_le16(L2CAP_PSM_SDP)) {
647 if (chan->sec_level == BT_SECURITY_LOW)
648 chan->sec_level = BT_SECURITY_SDP;
649
650 if (chan->sec_level == BT_SECURITY_HIGH)
651 return HCI_AT_NO_BONDING_MITM;
652 else
653 return HCI_AT_NO_BONDING;
654 } else {
655 switch (chan->sec_level) {
656 case BT_SECURITY_HIGH:
657 return HCI_AT_GENERAL_BONDING_MITM;
658 case BT_SECURITY_MEDIUM:
659 return HCI_AT_GENERAL_BONDING;
660 default:
661 return HCI_AT_NO_BONDING;
662 }
663 }
664 }
665
666 /* Service level security */
667 int l2cap_chan_check_security(struct l2cap_chan *chan)
668 {
669 struct l2cap_conn *conn = chan->conn;
670 __u8 auth_type;
671
672 auth_type = l2cap_get_auth_type(chan);
673
674 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
675 }
676
677 static u8 l2cap_get_ident(struct l2cap_conn *conn)
678 {
679 u8 id;
680
681 /* Get next available identificator.
682 * 1 - 128 are used by kernel.
683 * 129 - 199 are reserved.
684 * 200 - 254 are used by utilities like l2ping, etc.
685 */
686
687 spin_lock(&conn->lock);
688
689 if (++conn->tx_ident > 128)
690 conn->tx_ident = 1;
691
692 id = conn->tx_ident;
693
694 spin_unlock(&conn->lock);
695
696 return id;
697 }
698
699 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
700 void *data)
701 {
702 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
703 u8 flags;
704
705 BT_DBG("code 0x%2.2x", code);
706
707 if (!skb)
708 return;
709
710 if (lmp_no_flush_capable(conn->hcon->hdev))
711 flags = ACL_START_NO_FLUSH;
712 else
713 flags = ACL_START;
714
715 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
716 skb->priority = HCI_PRIO_MAX;
717
718 hci_send_acl(conn->hchan, skb, flags);
719 }
720
721 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
722 {
723 struct hci_conn *hcon = chan->conn->hcon;
724 u16 flags;
725
726 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
727 skb->priority);
728
729 if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
730 lmp_no_flush_capable(hcon->hdev))
731 flags = ACL_START_NO_FLUSH;
732 else
733 flags = ACL_START;
734
735 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
736 hci_send_acl(chan->conn->hchan, skb, flags);
737 }
738
739 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
740 {
741 control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
742 control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
743
744 if (enh & L2CAP_CTRL_FRAME_TYPE) {
745 /* S-Frame */
746 control->sframe = 1;
747 control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
748 control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
749
750 control->sar = 0;
751 control->txseq = 0;
752 } else {
753 /* I-Frame */
754 control->sframe = 0;
755 control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
756 control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
757
758 control->poll = 0;
759 control->super = 0;
760 }
761 }
762
763 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
764 {
765 control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
766 control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
767
768 if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
769 /* S-Frame */
770 control->sframe = 1;
771 control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
772 control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
773
774 control->sar = 0;
775 control->txseq = 0;
776 } else {
777 /* I-Frame */
778 control->sframe = 0;
779 control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
780 control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
781
782 control->poll = 0;
783 control->super = 0;
784 }
785 }
786
787 static inline void __unpack_control(struct l2cap_chan *chan,
788 struct sk_buff *skb)
789 {
790 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
791 __unpack_extended_control(get_unaligned_le32(skb->data),
792 &bt_cb(skb)->control);
793 skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
794 } else {
795 __unpack_enhanced_control(get_unaligned_le16(skb->data),
796 &bt_cb(skb)->control);
797 skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
798 }
799 }
800
801 static u32 __pack_extended_control(struct l2cap_ctrl *control)
802 {
803 u32 packed;
804
805 packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
806 packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
807
808 if (control->sframe) {
809 packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
810 packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
811 packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
812 } else {
813 packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
814 packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
815 }
816
817 return packed;
818 }
819
820 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
821 {
822 u16 packed;
823
824 packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
825 packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
826
827 if (control->sframe) {
828 packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
829 packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
830 packed |= L2CAP_CTRL_FRAME_TYPE;
831 } else {
832 packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
833 packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
834 }
835
836 return packed;
837 }
838
839 static inline void __pack_control(struct l2cap_chan *chan,
840 struct l2cap_ctrl *control,
841 struct sk_buff *skb)
842 {
843 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
844 put_unaligned_le32(__pack_extended_control(control),
845 skb->data + L2CAP_HDR_SIZE);
846 } else {
847 put_unaligned_le16(__pack_enhanced_control(control),
848 skb->data + L2CAP_HDR_SIZE);
849 }
850 }
851
852 static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
853 {
854 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
855 return L2CAP_EXT_HDR_SIZE;
856 else
857 return L2CAP_ENH_HDR_SIZE;
858 }
859
860 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
861 u32 control)
862 {
863 struct sk_buff *skb;
864 struct l2cap_hdr *lh;
865 int hlen = __ertm_hdr_size(chan);
866
867 if (chan->fcs == L2CAP_FCS_CRC16)
868 hlen += L2CAP_FCS_SIZE;
869
870 skb = bt_skb_alloc(hlen, GFP_KERNEL);
871
872 if (!skb)
873 return ERR_PTR(-ENOMEM);
874
875 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
876 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
877 lh->cid = cpu_to_le16(chan->dcid);
878
879 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
880 put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
881 else
882 put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
883
884 if (chan->fcs == L2CAP_FCS_CRC16) {
885 u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
886 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
887 }
888
889 skb->priority = HCI_PRIO_MAX;
890 return skb;
891 }
892
893 static void l2cap_send_sframe(struct l2cap_chan *chan,
894 struct l2cap_ctrl *control)
895 {
896 struct sk_buff *skb;
897 u32 control_field;
898
899 BT_DBG("chan %p, control %p", chan, control);
900
901 if (!control->sframe)
902 return;
903
904 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
905 !control->poll)
906 control->final = 1;
907
908 if (control->super == L2CAP_SUPER_RR)
909 clear_bit(CONN_RNR_SENT, &chan->conn_state);
910 else if (control->super == L2CAP_SUPER_RNR)
911 set_bit(CONN_RNR_SENT, &chan->conn_state);
912
913 if (control->super != L2CAP_SUPER_SREJ) {
914 chan->last_acked_seq = control->reqseq;
915 __clear_ack_timer(chan);
916 }
917
918 BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
919 control->final, control->poll, control->super);
920
921 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
922 control_field = __pack_extended_control(control);
923 else
924 control_field = __pack_enhanced_control(control);
925
926 skb = l2cap_create_sframe_pdu(chan, control_field);
927 if (!IS_ERR(skb))
928 l2cap_do_send(chan, skb);
929 }
930
931 static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
932 {
933 struct l2cap_ctrl control;
934
935 BT_DBG("chan %p, poll %d", chan, poll);
936
937 memset(&control, 0, sizeof(control));
938 control.sframe = 1;
939 control.poll = poll;
940
941 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
942 control.super = L2CAP_SUPER_RNR;
943 else
944 control.super = L2CAP_SUPER_RR;
945
946 control.reqseq = chan->buffer_seq;
947 l2cap_send_sframe(chan, &control);
948 }
949
950 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
951 {
952 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
953 }
954
955 static bool __amp_capable(struct l2cap_chan *chan)
956 {
957 struct l2cap_conn *conn = chan->conn;
958
959 if (enable_hs &&
960 chan->chan_policy == BT_CHANNEL_POLICY_AMP_PREFERRED &&
961 conn->fixed_chan_mask & L2CAP_FC_A2MP)
962 return true;
963 else
964 return false;
965 }
966
967 void l2cap_send_conn_req(struct l2cap_chan *chan)
968 {
969 struct l2cap_conn *conn = chan->conn;
970 struct l2cap_conn_req req;
971
972 req.scid = cpu_to_le16(chan->scid);
973 req.psm = chan->psm;
974
975 chan->ident = l2cap_get_ident(conn);
976
977 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
978
979 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
980 }
981
982 static void l2cap_chan_ready(struct l2cap_chan *chan)
983 {
984 /* This clears all conf flags, including CONF_NOT_COMPLETE */
985 chan->conf_state = 0;
986 __clear_chan_timer(chan);
987
988 chan->state = BT_CONNECTED;
989
990 chan->ops->ready(chan);
991 }
992
993 static void l2cap_start_connection(struct l2cap_chan *chan)
994 {
995 if (__amp_capable(chan)) {
996 BT_DBG("chan %p AMP capable: discover AMPs", chan);
997 a2mp_discover_amp(chan);
998 } else {
999 l2cap_send_conn_req(chan);
1000 }
1001 }
1002
1003 static void l2cap_do_start(struct l2cap_chan *chan)
1004 {
1005 struct l2cap_conn *conn = chan->conn;
1006
1007 if (conn->hcon->type == LE_LINK) {
1008 l2cap_chan_ready(chan);
1009 return;
1010 }
1011
1012 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
1013 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
1014 return;
1015
1016 if (l2cap_chan_check_security(chan) &&
1017 __l2cap_no_conn_pending(chan)) {
1018 l2cap_start_connection(chan);
1019 }
1020 } else {
1021 struct l2cap_info_req req;
1022 req.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
1023
1024 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1025 conn->info_ident = l2cap_get_ident(conn);
1026
1027 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1028
1029 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
1030 sizeof(req), &req);
1031 }
1032 }
1033
1034 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1035 {
1036 u32 local_feat_mask = l2cap_feat_mask;
1037 if (!disable_ertm)
1038 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1039
1040 switch (mode) {
1041 case L2CAP_MODE_ERTM:
1042 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1043 case L2CAP_MODE_STREAMING:
1044 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1045 default:
1046 return 0x00;
1047 }
1048 }
1049
1050 static void l2cap_send_disconn_req(struct l2cap_conn *conn,
1051 struct l2cap_chan *chan, int err)
1052 {
1053 struct sock *sk = chan->sk;
1054 struct l2cap_disconn_req req;
1055
1056 if (!conn)
1057 return;
1058
1059 if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) {
1060 __clear_retrans_timer(chan);
1061 __clear_monitor_timer(chan);
1062 __clear_ack_timer(chan);
1063 }
1064
1065 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
1066 l2cap_state_change(chan, BT_DISCONN);
1067 return;
1068 }
1069
1070 req.dcid = cpu_to_le16(chan->dcid);
1071 req.scid = cpu_to_le16(chan->scid);
1072 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
1073 sizeof(req), &req);
1074
1075 lock_sock(sk);
1076 __l2cap_state_change(chan, BT_DISCONN);
1077 __l2cap_chan_set_err(chan, err);
1078 release_sock(sk);
1079 }
1080
1081 /* ---- L2CAP connections ---- */
1082 static void l2cap_conn_start(struct l2cap_conn *conn)
1083 {
1084 struct l2cap_chan *chan, *tmp;
1085
1086 BT_DBG("conn %p", conn);
1087
1088 mutex_lock(&conn->chan_lock);
1089
1090 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1091 struct sock *sk = chan->sk;
1092
1093 l2cap_chan_lock(chan);
1094
1095 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1096 l2cap_chan_unlock(chan);
1097 continue;
1098 }
1099
1100 if (chan->state == BT_CONNECT) {
1101 if (!l2cap_chan_check_security(chan) ||
1102 !__l2cap_no_conn_pending(chan)) {
1103 l2cap_chan_unlock(chan);
1104 continue;
1105 }
1106
1107 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1108 && test_bit(CONF_STATE2_DEVICE,
1109 &chan->conf_state)) {
1110 l2cap_chan_close(chan, ECONNRESET);
1111 l2cap_chan_unlock(chan);
1112 continue;
1113 }
1114
1115 l2cap_start_connection(chan);
1116
1117 } else if (chan->state == BT_CONNECT2) {
1118 struct l2cap_conn_rsp rsp;
1119 char buf[128];
1120 rsp.scid = cpu_to_le16(chan->dcid);
1121 rsp.dcid = cpu_to_le16(chan->scid);
1122
1123 if (l2cap_chan_check_security(chan)) {
1124 lock_sock(sk);
1125 if (test_bit(BT_SK_DEFER_SETUP,
1126 &bt_sk(sk)->flags)) {
1127 rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
1128 rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1129 chan->ops->defer(chan);
1130
1131 } else {
1132 __l2cap_state_change(chan, BT_CONFIG);
1133 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
1134 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
1135 }
1136 release_sock(sk);
1137 } else {
1138 rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
1139 rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1140 }
1141
1142 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1143 sizeof(rsp), &rsp);
1144
1145 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1146 rsp.result != L2CAP_CR_SUCCESS) {
1147 l2cap_chan_unlock(chan);
1148 continue;
1149 }
1150
1151 set_bit(CONF_REQ_SENT, &chan->conf_state);
1152 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1153 l2cap_build_conf_req(chan, buf), buf);
1154 chan->num_conf_req++;
1155 }
1156
1157 l2cap_chan_unlock(chan);
1158 }
1159
1160 mutex_unlock(&conn->chan_lock);
1161 }
1162
1163 /* Find socket with cid and source/destination bdaddr.
1164 * Returns closest match, locked.
1165 */
1166 static struct l2cap_chan *l2cap_global_chan_by_scid(int state, u16 cid,
1167 bdaddr_t *src,
1168 bdaddr_t *dst)
1169 {
1170 struct l2cap_chan *c, *c1 = NULL;
1171
1172 read_lock(&chan_list_lock);
1173
1174 list_for_each_entry(c, &chan_list, global_l) {
1175 struct sock *sk = c->sk;
1176
1177 if (state && c->state != state)
1178 continue;
1179
1180 if (c->scid == cid) {
1181 int src_match, dst_match;
1182 int src_any, dst_any;
1183
1184 /* Exact match. */
1185 src_match = !bacmp(&bt_sk(sk)->src, src);
1186 dst_match = !bacmp(&bt_sk(sk)->dst, dst);
1187 if (src_match && dst_match) {
1188 read_unlock(&chan_list_lock);
1189 return c;
1190 }
1191
1192 /* Closest match */
1193 src_any = !bacmp(&bt_sk(sk)->src, BDADDR_ANY);
1194 dst_any = !bacmp(&bt_sk(sk)->dst, BDADDR_ANY);
1195 if ((src_match && dst_any) || (src_any && dst_match) ||
1196 (src_any && dst_any))
1197 c1 = c;
1198 }
1199 }
1200
1201 read_unlock(&chan_list_lock);
1202
1203 return c1;
1204 }
1205
1206 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1207 {
1208 struct sock *parent, *sk;
1209 struct l2cap_chan *chan, *pchan;
1210
1211 BT_DBG("");
1212
1213 /* Check if we have socket listening on cid */
1214 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
1215 conn->src, conn->dst);
1216 if (!pchan)
1217 return;
1218
1219 parent = pchan->sk;
1220
1221 lock_sock(parent);
1222
1223 chan = pchan->ops->new_connection(pchan);
1224 if (!chan)
1225 goto clean;
1226
1227 sk = chan->sk;
1228
1229 hci_conn_hold(conn->hcon);
1230 conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
1231
1232 bacpy(&bt_sk(sk)->src, conn->src);
1233 bacpy(&bt_sk(sk)->dst, conn->dst);
1234
1235 l2cap_chan_add(conn, chan);
1236
1237 l2cap_chan_ready(chan);
1238
1239 clean:
1240 release_sock(parent);
1241 }
1242
1243 static void l2cap_conn_ready(struct l2cap_conn *conn)
1244 {
1245 struct l2cap_chan *chan;
1246 struct hci_conn *hcon = conn->hcon;
1247
1248 BT_DBG("conn %p", conn);
1249
1250 if (!hcon->out && hcon->type == LE_LINK)
1251 l2cap_le_conn_ready(conn);
1252
1253 if (hcon->out && hcon->type == LE_LINK)
1254 smp_conn_security(hcon, hcon->pending_sec_level);
1255
1256 mutex_lock(&conn->chan_lock);
1257
1258 list_for_each_entry(chan, &conn->chan_l, list) {
1259
1260 l2cap_chan_lock(chan);
1261
1262 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
1263 l2cap_chan_unlock(chan);
1264 continue;
1265 }
1266
1267 if (hcon->type == LE_LINK) {
1268 if (smp_conn_security(hcon, chan->sec_level))
1269 l2cap_chan_ready(chan);
1270
1271 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1272 struct sock *sk = chan->sk;
1273 __clear_chan_timer(chan);
1274 lock_sock(sk);
1275 __l2cap_state_change(chan, BT_CONNECTED);
1276 sk->sk_state_change(sk);
1277 release_sock(sk);
1278
1279 } else if (chan->state == BT_CONNECT)
1280 l2cap_do_start(chan);
1281
1282 l2cap_chan_unlock(chan);
1283 }
1284
1285 mutex_unlock(&conn->chan_lock);
1286 }
1287
1288 /* Notify sockets that we cannot guaranty reliability anymore */
1289 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1290 {
1291 struct l2cap_chan *chan;
1292
1293 BT_DBG("conn %p", conn);
1294
1295 mutex_lock(&conn->chan_lock);
1296
1297 list_for_each_entry(chan, &conn->chan_l, list) {
1298 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1299 l2cap_chan_set_err(chan, err);
1300 }
1301
1302 mutex_unlock(&conn->chan_lock);
1303 }
1304
1305 static void l2cap_info_timeout(struct work_struct *work)
1306 {
1307 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1308 info_timer.work);
1309
1310 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1311 conn->info_ident = 0;
1312
1313 l2cap_conn_start(conn);
1314 }
1315
1316 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1317 {
1318 struct l2cap_conn *conn = hcon->l2cap_data;
1319 struct l2cap_chan *chan, *l;
1320
1321 if (!conn)
1322 return;
1323
1324 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1325
1326 kfree_skb(conn->rx_skb);
1327
1328 mutex_lock(&conn->chan_lock);
1329
1330 /* Kill channels */
1331 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1332 l2cap_chan_hold(chan);
1333 l2cap_chan_lock(chan);
1334
1335 l2cap_chan_del(chan, err);
1336
1337 l2cap_chan_unlock(chan);
1338
1339 chan->ops->close(chan);
1340 l2cap_chan_put(chan);
1341 }
1342
1343 mutex_unlock(&conn->chan_lock);
1344
1345 hci_chan_del(conn->hchan);
1346
1347 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1348 cancel_delayed_work_sync(&conn->info_timer);
1349
1350 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) {
1351 cancel_delayed_work_sync(&conn->security_timer);
1352 smp_chan_destroy(conn);
1353 }
1354
1355 hcon->l2cap_data = NULL;
1356 kfree(conn);
1357 }
1358
1359 static void security_timeout(struct work_struct *work)
1360 {
1361 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1362 security_timer.work);
1363
1364 BT_DBG("conn %p", conn);
1365
1366 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags)) {
1367 smp_chan_destroy(conn);
1368 l2cap_conn_del(conn->hcon, ETIMEDOUT);
1369 }
1370 }
1371
1372 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
1373 {
1374 struct l2cap_conn *conn = hcon->l2cap_data;
1375 struct hci_chan *hchan;
1376
1377 if (conn || status)
1378 return conn;
1379
1380 hchan = hci_chan_create(hcon);
1381 if (!hchan)
1382 return NULL;
1383
1384 conn = kzalloc(sizeof(struct l2cap_conn), GFP_KERNEL);
1385 if (!conn) {
1386 hci_chan_del(hchan);
1387 return NULL;
1388 }
1389
1390 hcon->l2cap_data = conn;
1391 conn->hcon = hcon;
1392 conn->hchan = hchan;
1393
1394 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
1395
1396 switch (hcon->type) {
1397 case AMP_LINK:
1398 conn->mtu = hcon->hdev->block_mtu;
1399 break;
1400
1401 case LE_LINK:
1402 if (hcon->hdev->le_mtu) {
1403 conn->mtu = hcon->hdev->le_mtu;
1404 break;
1405 }
1406 /* fall through */
1407
1408 default:
1409 conn->mtu = hcon->hdev->acl_mtu;
1410 break;
1411 }
1412
1413 conn->src = &hcon->hdev->bdaddr;
1414 conn->dst = &hcon->dst;
1415
1416 conn->feat_mask = 0;
1417
1418 spin_lock_init(&conn->lock);
1419 mutex_init(&conn->chan_lock);
1420
1421 INIT_LIST_HEAD(&conn->chan_l);
1422
1423 if (hcon->type == LE_LINK)
1424 INIT_DELAYED_WORK(&conn->security_timer, security_timeout);
1425 else
1426 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
1427
1428 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
1429
1430 return conn;
1431 }
1432
1433 /* ---- Socket interface ---- */
1434
1435 /* Find socket with psm and source / destination bdaddr.
1436 * Returns closest match.
1437 */
1438 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1439 bdaddr_t *src,
1440 bdaddr_t *dst)
1441 {
1442 struct l2cap_chan *c, *c1 = NULL;
1443
1444 read_lock(&chan_list_lock);
1445
1446 list_for_each_entry(c, &chan_list, global_l) {
1447 struct sock *sk = c->sk;
1448
1449 if (state && c->state != state)
1450 continue;
1451
1452 if (c->psm == psm) {
1453 int src_match, dst_match;
1454 int src_any, dst_any;
1455
1456 /* Exact match. */
1457 src_match = !bacmp(&bt_sk(sk)->src, src);
1458 dst_match = !bacmp(&bt_sk(sk)->dst, dst);
1459 if (src_match && dst_match) {
1460 read_unlock(&chan_list_lock);
1461 return c;
1462 }
1463
1464 /* Closest match */
1465 src_any = !bacmp(&bt_sk(sk)->src, BDADDR_ANY);
1466 dst_any = !bacmp(&bt_sk(sk)->dst, BDADDR_ANY);
1467 if ((src_match && dst_any) || (src_any && dst_match) ||
1468 (src_any && dst_any))
1469 c1 = c;
1470 }
1471 }
1472
1473 read_unlock(&chan_list_lock);
1474
1475 return c1;
1476 }
1477
1478 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
1479 bdaddr_t *dst, u8 dst_type)
1480 {
1481 struct sock *sk = chan->sk;
1482 bdaddr_t *src = &bt_sk(sk)->src;
1483 struct l2cap_conn *conn;
1484 struct hci_conn *hcon;
1485 struct hci_dev *hdev;
1486 __u8 auth_type;
1487 int err;
1488
1489 BT_DBG("%pMR -> %pMR (type %u) psm 0x%2.2x", src, dst,
1490 dst_type, __le16_to_cpu(psm));
1491
1492 hdev = hci_get_route(dst, src);
1493 if (!hdev)
1494 return -EHOSTUNREACH;
1495
1496 hci_dev_lock(hdev);
1497
1498 l2cap_chan_lock(chan);
1499
1500 /* PSM must be odd and lsb of upper byte must be 0 */
1501 if ((__le16_to_cpu(psm) & 0x0101) != 0x0001 && !cid &&
1502 chan->chan_type != L2CAP_CHAN_RAW) {
1503 err = -EINVAL;
1504 goto done;
1505 }
1506
1507 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !(psm || cid)) {
1508 err = -EINVAL;
1509 goto done;
1510 }
1511
1512 switch (chan->mode) {
1513 case L2CAP_MODE_BASIC:
1514 break;
1515 case L2CAP_MODE_ERTM:
1516 case L2CAP_MODE_STREAMING:
1517 if (!disable_ertm)
1518 break;
1519 /* fall through */
1520 default:
1521 err = -ENOTSUPP;
1522 goto done;
1523 }
1524
1525 switch (chan->state) {
1526 case BT_CONNECT:
1527 case BT_CONNECT2:
1528 case BT_CONFIG:
1529 /* Already connecting */
1530 err = 0;
1531 goto done;
1532
1533 case BT_CONNECTED:
1534 /* Already connected */
1535 err = -EISCONN;
1536 goto done;
1537
1538 case BT_OPEN:
1539 case BT_BOUND:
1540 /* Can connect */
1541 break;
1542
1543 default:
1544 err = -EBADFD;
1545 goto done;
1546 }
1547
1548 /* Set destination address and psm */
1549 lock_sock(sk);
1550 bacpy(&bt_sk(sk)->dst, dst);
1551 release_sock(sk);
1552
1553 chan->psm = psm;
1554 chan->dcid = cid;
1555
1556 auth_type = l2cap_get_auth_type(chan);
1557
1558 if (chan->dcid == L2CAP_CID_LE_DATA)
1559 hcon = hci_connect(hdev, LE_LINK, dst, dst_type,
1560 chan->sec_level, auth_type);
1561 else
1562 hcon = hci_connect(hdev, ACL_LINK, dst, dst_type,
1563 chan->sec_level, auth_type);
1564
1565 if (IS_ERR(hcon)) {
1566 err = PTR_ERR(hcon);
1567 goto done;
1568 }
1569
1570 conn = l2cap_conn_add(hcon, 0);
1571 if (!conn) {
1572 hci_conn_put(hcon);
1573 err = -ENOMEM;
1574 goto done;
1575 }
1576
1577 if (hcon->type == LE_LINK) {
1578 err = 0;
1579
1580 if (!list_empty(&conn->chan_l)) {
1581 err = -EBUSY;
1582 hci_conn_put(hcon);
1583 }
1584
1585 if (err)
1586 goto done;
1587 }
1588
1589 /* Update source addr of the socket */
1590 bacpy(src, conn->src);
1591
1592 l2cap_chan_unlock(chan);
1593 l2cap_chan_add(conn, chan);
1594 l2cap_chan_lock(chan);
1595
1596 l2cap_state_change(chan, BT_CONNECT);
1597 __set_chan_timer(chan, sk->sk_sndtimeo);
1598
1599 if (hcon->state == BT_CONNECTED) {
1600 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1601 __clear_chan_timer(chan);
1602 if (l2cap_chan_check_security(chan))
1603 l2cap_state_change(chan, BT_CONNECTED);
1604 } else
1605 l2cap_do_start(chan);
1606 }
1607
1608 err = 0;
1609
1610 done:
1611 l2cap_chan_unlock(chan);
1612 hci_dev_unlock(hdev);
1613 hci_dev_put(hdev);
1614 return err;
1615 }
1616
1617 int __l2cap_wait_ack(struct sock *sk)
1618 {
1619 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
1620 DECLARE_WAITQUEUE(wait, current);
1621 int err = 0;
1622 int timeo = HZ/5;
1623
1624 add_wait_queue(sk_sleep(sk), &wait);
1625 set_current_state(TASK_INTERRUPTIBLE);
1626 while (chan->unacked_frames > 0 && chan->conn) {
1627 if (!timeo)
1628 timeo = HZ/5;
1629
1630 if (signal_pending(current)) {
1631 err = sock_intr_errno(timeo);
1632 break;
1633 }
1634
1635 release_sock(sk);
1636 timeo = schedule_timeout(timeo);
1637 lock_sock(sk);
1638 set_current_state(TASK_INTERRUPTIBLE);
1639
1640 err = sock_error(sk);
1641 if (err)
1642 break;
1643 }
1644 set_current_state(TASK_RUNNING);
1645 remove_wait_queue(sk_sleep(sk), &wait);
1646 return err;
1647 }
1648
1649 static void l2cap_monitor_timeout(struct work_struct *work)
1650 {
1651 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1652 monitor_timer.work);
1653
1654 BT_DBG("chan %p", chan);
1655
1656 l2cap_chan_lock(chan);
1657
1658 if (!chan->conn) {
1659 l2cap_chan_unlock(chan);
1660 l2cap_chan_put(chan);
1661 return;
1662 }
1663
1664 l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
1665
1666 l2cap_chan_unlock(chan);
1667 l2cap_chan_put(chan);
1668 }
1669
1670 static void l2cap_retrans_timeout(struct work_struct *work)
1671 {
1672 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1673 retrans_timer.work);
1674
1675 BT_DBG("chan %p", chan);
1676
1677 l2cap_chan_lock(chan);
1678
1679 if (!chan->conn) {
1680 l2cap_chan_unlock(chan);
1681 l2cap_chan_put(chan);
1682 return;
1683 }
1684
1685 l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
1686 l2cap_chan_unlock(chan);
1687 l2cap_chan_put(chan);
1688 }
1689
1690 static void l2cap_streaming_send(struct l2cap_chan *chan,
1691 struct sk_buff_head *skbs)
1692 {
1693 struct sk_buff *skb;
1694 struct l2cap_ctrl *control;
1695
1696 BT_DBG("chan %p, skbs %p", chan, skbs);
1697
1698 skb_queue_splice_tail_init(skbs, &chan->tx_q);
1699
1700 while (!skb_queue_empty(&chan->tx_q)) {
1701
1702 skb = skb_dequeue(&chan->tx_q);
1703
1704 bt_cb(skb)->control.retries = 1;
1705 control = &bt_cb(skb)->control;
1706
1707 control->reqseq = 0;
1708 control->txseq = chan->next_tx_seq;
1709
1710 __pack_control(chan, control, skb);
1711
1712 if (chan->fcs == L2CAP_FCS_CRC16) {
1713 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1714 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1715 }
1716
1717 l2cap_do_send(chan, skb);
1718
1719 BT_DBG("Sent txseq %u", control->txseq);
1720
1721 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1722 chan->frames_sent++;
1723 }
1724 }
1725
1726 static int l2cap_ertm_send(struct l2cap_chan *chan)
1727 {
1728 struct sk_buff *skb, *tx_skb;
1729 struct l2cap_ctrl *control;
1730 int sent = 0;
1731
1732 BT_DBG("chan %p", chan);
1733
1734 if (chan->state != BT_CONNECTED)
1735 return -ENOTCONN;
1736
1737 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1738 return 0;
1739
1740 while (chan->tx_send_head &&
1741 chan->unacked_frames < chan->remote_tx_win &&
1742 chan->tx_state == L2CAP_TX_STATE_XMIT) {
1743
1744 skb = chan->tx_send_head;
1745
1746 bt_cb(skb)->control.retries = 1;
1747 control = &bt_cb(skb)->control;
1748
1749 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1750 control->final = 1;
1751
1752 control->reqseq = chan->buffer_seq;
1753 chan->last_acked_seq = chan->buffer_seq;
1754 control->txseq = chan->next_tx_seq;
1755
1756 __pack_control(chan, control, skb);
1757
1758 if (chan->fcs == L2CAP_FCS_CRC16) {
1759 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1760 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1761 }
1762
1763 /* Clone after data has been modified. Data is assumed to be
1764 read-only (for locking purposes) on cloned sk_buffs.
1765 */
1766 tx_skb = skb_clone(skb, GFP_KERNEL);
1767
1768 if (!tx_skb)
1769 break;
1770
1771 __set_retrans_timer(chan);
1772
1773 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1774 chan->unacked_frames++;
1775 chan->frames_sent++;
1776 sent++;
1777
1778 if (skb_queue_is_last(&chan->tx_q, skb))
1779 chan->tx_send_head = NULL;
1780 else
1781 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1782
1783 l2cap_do_send(chan, tx_skb);
1784 BT_DBG("Sent txseq %u", control->txseq);
1785 }
1786
1787 BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
1788 chan->unacked_frames, skb_queue_len(&chan->tx_q));
1789
1790 return sent;
1791 }
1792
1793 static void l2cap_ertm_resend(struct l2cap_chan *chan)
1794 {
1795 struct l2cap_ctrl control;
1796 struct sk_buff *skb;
1797 struct sk_buff *tx_skb;
1798 u16 seq;
1799
1800 BT_DBG("chan %p", chan);
1801
1802 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1803 return;
1804
1805 while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
1806 seq = l2cap_seq_list_pop(&chan->retrans_list);
1807
1808 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
1809 if (!skb) {
1810 BT_DBG("Error: Can't retransmit seq %d, frame missing",
1811 seq);
1812 continue;
1813 }
1814
1815 bt_cb(skb)->control.retries++;
1816 control = bt_cb(skb)->control;
1817
1818 if (chan->max_tx != 0 &&
1819 bt_cb(skb)->control.retries > chan->max_tx) {
1820 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
1821 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
1822 l2cap_seq_list_clear(&chan->retrans_list);
1823 break;
1824 }
1825
1826 control.reqseq = chan->buffer_seq;
1827 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1828 control.final = 1;
1829 else
1830 control.final = 0;
1831
1832 if (skb_cloned(skb)) {
1833 /* Cloned sk_buffs are read-only, so we need a
1834 * writeable copy
1835 */
1836 tx_skb = skb_copy(skb, GFP_KERNEL);
1837 } else {
1838 tx_skb = skb_clone(skb, GFP_KERNEL);
1839 }
1840
1841 if (!tx_skb) {
1842 l2cap_seq_list_clear(&chan->retrans_list);
1843 break;
1844 }
1845
1846 /* Update skb contents */
1847 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1848 put_unaligned_le32(__pack_extended_control(&control),
1849 tx_skb->data + L2CAP_HDR_SIZE);
1850 } else {
1851 put_unaligned_le16(__pack_enhanced_control(&control),
1852 tx_skb->data + L2CAP_HDR_SIZE);
1853 }
1854
1855 if (chan->fcs == L2CAP_FCS_CRC16) {
1856 u16 fcs = crc16(0, (u8 *) tx_skb->data, tx_skb->len);
1857 put_unaligned_le16(fcs, skb_put(tx_skb,
1858 L2CAP_FCS_SIZE));
1859 }
1860
1861 l2cap_do_send(chan, tx_skb);
1862
1863 BT_DBG("Resent txseq %d", control.txseq);
1864
1865 chan->last_acked_seq = chan->buffer_seq;
1866 }
1867 }
1868
1869 static void l2cap_retransmit(struct l2cap_chan *chan,
1870 struct l2cap_ctrl *control)
1871 {
1872 BT_DBG("chan %p, control %p", chan, control);
1873
1874 l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
1875 l2cap_ertm_resend(chan);
1876 }
1877
1878 static void l2cap_retransmit_all(struct l2cap_chan *chan,
1879 struct l2cap_ctrl *control)
1880 {
1881 struct sk_buff *skb;
1882
1883 BT_DBG("chan %p, control %p", chan, control);
1884
1885 if (control->poll)
1886 set_bit(CONN_SEND_FBIT, &chan->conn_state);
1887
1888 l2cap_seq_list_clear(&chan->retrans_list);
1889
1890 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1891 return;
1892
1893 if (chan->unacked_frames) {
1894 skb_queue_walk(&chan->tx_q, skb) {
1895 if (bt_cb(skb)->control.txseq == control->reqseq ||
1896 skb == chan->tx_send_head)
1897 break;
1898 }
1899
1900 skb_queue_walk_from(&chan->tx_q, skb) {
1901 if (skb == chan->tx_send_head)
1902 break;
1903
1904 l2cap_seq_list_append(&chan->retrans_list,
1905 bt_cb(skb)->control.txseq);
1906 }
1907
1908 l2cap_ertm_resend(chan);
1909 }
1910 }
1911
1912 static void l2cap_send_ack(struct l2cap_chan *chan)
1913 {
1914 struct l2cap_ctrl control;
1915 u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
1916 chan->last_acked_seq);
1917 int threshold;
1918
1919 BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
1920 chan, chan->last_acked_seq, chan->buffer_seq);
1921
1922 memset(&control, 0, sizeof(control));
1923 control.sframe = 1;
1924
1925 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
1926 chan->rx_state == L2CAP_RX_STATE_RECV) {
1927 __clear_ack_timer(chan);
1928 control.super = L2CAP_SUPER_RNR;
1929 control.reqseq = chan->buffer_seq;
1930 l2cap_send_sframe(chan, &control);
1931 } else {
1932 if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
1933 l2cap_ertm_send(chan);
1934 /* If any i-frames were sent, they included an ack */
1935 if (chan->buffer_seq == chan->last_acked_seq)
1936 frames_to_ack = 0;
1937 }
1938
1939 /* Ack now if the window is 3/4ths full.
1940 * Calculate without mul or div
1941 */
1942 threshold = chan->ack_win;
1943 threshold += threshold << 1;
1944 threshold >>= 2;
1945
1946 BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
1947 threshold);
1948
1949 if (frames_to_ack >= threshold) {
1950 __clear_ack_timer(chan);
1951 control.super = L2CAP_SUPER_RR;
1952 control.reqseq = chan->buffer_seq;
1953 l2cap_send_sframe(chan, &control);
1954 frames_to_ack = 0;
1955 }
1956
1957 if (frames_to_ack)
1958 __set_ack_timer(chan);
1959 }
1960 }
1961
1962 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
1963 struct msghdr *msg, int len,
1964 int count, struct sk_buff *skb)
1965 {
1966 struct l2cap_conn *conn = chan->conn;
1967 struct sk_buff **frag;
1968 int sent = 0;
1969
1970 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1971 return -EFAULT;
1972
1973 sent += count;
1974 len -= count;
1975
1976 /* Continuation fragments (no L2CAP header) */
1977 frag = &skb_shinfo(skb)->frag_list;
1978 while (len) {
1979 struct sk_buff *tmp;
1980
1981 count = min_t(unsigned int, conn->mtu, len);
1982
1983 tmp = chan->ops->alloc_skb(chan, count,
1984 msg->msg_flags & MSG_DONTWAIT);
1985 if (IS_ERR(tmp))
1986 return PTR_ERR(tmp);
1987
1988 *frag = tmp;
1989
1990 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1991 return -EFAULT;
1992
1993 (*frag)->priority = skb->priority;
1994
1995 sent += count;
1996 len -= count;
1997
1998 skb->len += (*frag)->len;
1999 skb->data_len += (*frag)->len;
2000
2001 frag = &(*frag)->next;
2002 }
2003
2004 return sent;
2005 }
2006
2007 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
2008 struct msghdr *msg, size_t len,
2009 u32 priority)
2010 {
2011 struct l2cap_conn *conn = chan->conn;
2012 struct sk_buff *skb;
2013 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
2014 struct l2cap_hdr *lh;
2015
2016 BT_DBG("chan %p len %zu priority %u", chan, len, priority);
2017
2018 count = min_t(unsigned int, (conn->mtu - hlen), len);
2019
2020 skb = chan->ops->alloc_skb(chan, count + hlen,
2021 msg->msg_flags & MSG_DONTWAIT);
2022 if (IS_ERR(skb))
2023 return skb;
2024
2025 skb->priority = priority;
2026
2027 /* Create L2CAP header */
2028 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2029 lh->cid = cpu_to_le16(chan->dcid);
2030 lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
2031 put_unaligned(chan->psm, skb_put(skb, L2CAP_PSMLEN_SIZE));
2032
2033 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2034 if (unlikely(err < 0)) {
2035 kfree_skb(skb);
2036 return ERR_PTR(err);
2037 }
2038 return skb;
2039 }
2040
2041 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2042 struct msghdr *msg, size_t len,
2043 u32 priority)
2044 {
2045 struct l2cap_conn *conn = chan->conn;
2046 struct sk_buff *skb;
2047 int err, count;
2048 struct l2cap_hdr *lh;
2049
2050 BT_DBG("chan %p len %zu", chan, len);
2051
2052 count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2053
2054 skb = chan->ops->alloc_skb(chan, count + L2CAP_HDR_SIZE,
2055 msg->msg_flags & MSG_DONTWAIT);
2056 if (IS_ERR(skb))
2057 return skb;
2058
2059 skb->priority = priority;
2060
2061 /* Create L2CAP header */
2062 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2063 lh->cid = cpu_to_le16(chan->dcid);
2064 lh->len = cpu_to_le16(len);
2065
2066 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2067 if (unlikely(err < 0)) {
2068 kfree_skb(skb);
2069 return ERR_PTR(err);
2070 }
2071 return skb;
2072 }
2073
2074 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2075 struct msghdr *msg, size_t len,
2076 u16 sdulen)
2077 {
2078 struct l2cap_conn *conn = chan->conn;
2079 struct sk_buff *skb;
2080 int err, count, hlen;
2081 struct l2cap_hdr *lh;
2082
2083 BT_DBG("chan %p len %zu", chan, len);
2084
2085 if (!conn)
2086 return ERR_PTR(-ENOTCONN);
2087
2088 hlen = __ertm_hdr_size(chan);
2089
2090 if (sdulen)
2091 hlen += L2CAP_SDULEN_SIZE;
2092
2093 if (chan->fcs == L2CAP_FCS_CRC16)
2094 hlen += L2CAP_FCS_SIZE;
2095
2096 count = min_t(unsigned int, (conn->mtu - hlen), len);
2097
2098 skb = chan->ops->alloc_skb(chan, count + hlen,
2099 msg->msg_flags & MSG_DONTWAIT);
2100 if (IS_ERR(skb))
2101 return skb;
2102
2103 /* Create L2CAP header */
2104 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2105 lh->cid = cpu_to_le16(chan->dcid);
2106 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2107
2108 /* Control header is populated later */
2109 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2110 put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2111 else
2112 put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2113
2114 if (sdulen)
2115 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2116
2117 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2118 if (unlikely(err < 0)) {
2119 kfree_skb(skb);
2120 return ERR_PTR(err);
2121 }
2122
2123 bt_cb(skb)->control.fcs = chan->fcs;
2124 bt_cb(skb)->control.retries = 0;
2125 return skb;
2126 }
2127
2128 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2129 struct sk_buff_head *seg_queue,
2130 struct msghdr *msg, size_t len)
2131 {
2132 struct sk_buff *skb;
2133 u16 sdu_len;
2134 size_t pdu_len;
2135 u8 sar;
2136
2137 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2138
2139 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2140 * so fragmented skbs are not used. The HCI layer's handling
2141 * of fragmented skbs is not compatible with ERTM's queueing.
2142 */
2143
2144 /* PDU size is derived from the HCI MTU */
2145 pdu_len = chan->conn->mtu;
2146
2147 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2148
2149 /* Adjust for largest possible L2CAP overhead. */
2150 if (chan->fcs)
2151 pdu_len -= L2CAP_FCS_SIZE;
2152
2153 pdu_len -= __ertm_hdr_size(chan);
2154
2155 /* Remote device may have requested smaller PDUs */
2156 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2157
2158 if (len <= pdu_len) {
2159 sar = L2CAP_SAR_UNSEGMENTED;
2160 sdu_len = 0;
2161 pdu_len = len;
2162 } else {
2163 sar = L2CAP_SAR_START;
2164 sdu_len = len;
2165 pdu_len -= L2CAP_SDULEN_SIZE;
2166 }
2167
2168 while (len > 0) {
2169 skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2170
2171 if (IS_ERR(skb)) {
2172 __skb_queue_purge(seg_queue);
2173 return PTR_ERR(skb);
2174 }
2175
2176 bt_cb(skb)->control.sar = sar;
2177 __skb_queue_tail(seg_queue, skb);
2178
2179 len -= pdu_len;
2180 if (sdu_len) {
2181 sdu_len = 0;
2182 pdu_len += L2CAP_SDULEN_SIZE;
2183 }
2184
2185 if (len <= pdu_len) {
2186 sar = L2CAP_SAR_END;
2187 pdu_len = len;
2188 } else {
2189 sar = L2CAP_SAR_CONTINUE;
2190 }
2191 }
2192
2193 return 0;
2194 }
2195
2196 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
2197 u32 priority)
2198 {
2199 struct sk_buff *skb;
2200 int err;
2201 struct sk_buff_head seg_queue;
2202
2203 /* Connectionless channel */
2204 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2205 skb = l2cap_create_connless_pdu(chan, msg, len, priority);
2206 if (IS_ERR(skb))
2207 return PTR_ERR(skb);
2208
2209 l2cap_do_send(chan, skb);
2210 return len;
2211 }
2212
2213 switch (chan->mode) {
2214 case L2CAP_MODE_BASIC:
2215 /* Check outgoing MTU */
2216 if (len > chan->omtu)
2217 return -EMSGSIZE;
2218
2219 /* Create a basic PDU */
2220 skb = l2cap_create_basic_pdu(chan, msg, len, priority);
2221 if (IS_ERR(skb))
2222 return PTR_ERR(skb);
2223
2224 l2cap_do_send(chan, skb);
2225 err = len;
2226 break;
2227
2228 case L2CAP_MODE_ERTM:
2229 case L2CAP_MODE_STREAMING:
2230 /* Check outgoing MTU */
2231 if (len > chan->omtu) {
2232 err = -EMSGSIZE;
2233 break;
2234 }
2235
2236 __skb_queue_head_init(&seg_queue);
2237
2238 /* Do segmentation before calling in to the state machine,
2239 * since it's possible to block while waiting for memory
2240 * allocation.
2241 */
2242 err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2243
2244 /* The channel could have been closed while segmenting,
2245 * check that it is still connected.
2246 */
2247 if (chan->state != BT_CONNECTED) {
2248 __skb_queue_purge(&seg_queue);
2249 err = -ENOTCONN;
2250 }
2251
2252 if (err)
2253 break;
2254
2255 if (chan->mode == L2CAP_MODE_ERTM)
2256 l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2257 else
2258 l2cap_streaming_send(chan, &seg_queue);
2259
2260 err = len;
2261
2262 /* If the skbs were not queued for sending, they'll still be in
2263 * seg_queue and need to be purged.
2264 */
2265 __skb_queue_purge(&seg_queue);
2266 break;
2267
2268 default:
2269 BT_DBG("bad state %1.1x", chan->mode);
2270 err = -EBADFD;
2271 }
2272
2273 return err;
2274 }
2275
2276 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2277 {
2278 struct l2cap_ctrl control;
2279 u16 seq;
2280
2281 BT_DBG("chan %p, txseq %u", chan, txseq);
2282
2283 memset(&control, 0, sizeof(control));
2284 control.sframe = 1;
2285 control.super = L2CAP_SUPER_SREJ;
2286
2287 for (seq = chan->expected_tx_seq; seq != txseq;
2288 seq = __next_seq(chan, seq)) {
2289 if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2290 control.reqseq = seq;
2291 l2cap_send_sframe(chan, &control);
2292 l2cap_seq_list_append(&chan->srej_list, seq);
2293 }
2294 }
2295
2296 chan->expected_tx_seq = __next_seq(chan, txseq);
2297 }
2298
2299 static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2300 {
2301 struct l2cap_ctrl control;
2302
2303 BT_DBG("chan %p", chan);
2304
2305 if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2306 return;
2307
2308 memset(&control, 0, sizeof(control));
2309 control.sframe = 1;
2310 control.super = L2CAP_SUPER_SREJ;
2311 control.reqseq = chan->srej_list.tail;
2312 l2cap_send_sframe(chan, &control);
2313 }
2314
2315 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2316 {
2317 struct l2cap_ctrl control;
2318 u16 initial_head;
2319 u16 seq;
2320
2321 BT_DBG("chan %p, txseq %u", chan, txseq);
2322
2323 memset(&control, 0, sizeof(control));
2324 control.sframe = 1;
2325 control.super = L2CAP_SUPER_SREJ;
2326
2327 /* Capture initial list head to allow only one pass through the list. */
2328 initial_head = chan->srej_list.head;
2329
2330 do {
2331 seq = l2cap_seq_list_pop(&chan->srej_list);
2332 if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2333 break;
2334
2335 control.reqseq = seq;
2336 l2cap_send_sframe(chan, &control);
2337 l2cap_seq_list_append(&chan->srej_list, seq);
2338 } while (chan->srej_list.head != initial_head);
2339 }
2340
2341 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2342 {
2343 struct sk_buff *acked_skb;
2344 u16 ackseq;
2345
2346 BT_DBG("chan %p, reqseq %u", chan, reqseq);
2347
2348 if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2349 return;
2350
2351 BT_DBG("expected_ack_seq %u, unacked_frames %u",
2352 chan->expected_ack_seq, chan->unacked_frames);
2353
2354 for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2355 ackseq = __next_seq(chan, ackseq)) {
2356
2357 acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2358 if (acked_skb) {
2359 skb_unlink(acked_skb, &chan->tx_q);
2360 kfree_skb(acked_skb);
2361 chan->unacked_frames--;
2362 }
2363 }
2364
2365 chan->expected_ack_seq = reqseq;
2366
2367 if (chan->unacked_frames == 0)
2368 __clear_retrans_timer(chan);
2369
2370 BT_DBG("unacked_frames %u", chan->unacked_frames);
2371 }
2372
2373 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2374 {
2375 BT_DBG("chan %p", chan);
2376
2377 chan->expected_tx_seq = chan->buffer_seq;
2378 l2cap_seq_list_clear(&chan->srej_list);
2379 skb_queue_purge(&chan->srej_q);
2380 chan->rx_state = L2CAP_RX_STATE_RECV;
2381 }
2382
2383 static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2384 struct l2cap_ctrl *control,
2385 struct sk_buff_head *skbs, u8 event)
2386 {
2387 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2388 event);
2389
2390 switch (event) {
2391 case L2CAP_EV_DATA_REQUEST:
2392 if (chan->tx_send_head == NULL)
2393 chan->tx_send_head = skb_peek(skbs);
2394
2395 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2396 l2cap_ertm_send(chan);
2397 break;
2398 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2399 BT_DBG("Enter LOCAL_BUSY");
2400 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2401
2402 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2403 /* The SREJ_SENT state must be aborted if we are to
2404 * enter the LOCAL_BUSY state.
2405 */
2406 l2cap_abort_rx_srej_sent(chan);
2407 }
2408
2409 l2cap_send_ack(chan);
2410
2411 break;
2412 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2413 BT_DBG("Exit LOCAL_BUSY");
2414 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2415
2416 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2417 struct l2cap_ctrl local_control;
2418
2419 memset(&local_control, 0, sizeof(local_control));
2420 local_control.sframe = 1;
2421 local_control.super = L2CAP_SUPER_RR;
2422 local_control.poll = 1;
2423 local_control.reqseq = chan->buffer_seq;
2424 l2cap_send_sframe(chan, &local_control);
2425
2426 chan->retry_count = 1;
2427 __set_monitor_timer(chan);
2428 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2429 }
2430 break;
2431 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2432 l2cap_process_reqseq(chan, control->reqseq);
2433 break;
2434 case L2CAP_EV_EXPLICIT_POLL:
2435 l2cap_send_rr_or_rnr(chan, 1);
2436 chan->retry_count = 1;
2437 __set_monitor_timer(chan);
2438 __clear_ack_timer(chan);
2439 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2440 break;
2441 case L2CAP_EV_RETRANS_TO:
2442 l2cap_send_rr_or_rnr(chan, 1);
2443 chan->retry_count = 1;
2444 __set_monitor_timer(chan);
2445 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2446 break;
2447 case L2CAP_EV_RECV_FBIT:
2448 /* Nothing to process */
2449 break;
2450 default:
2451 break;
2452 }
2453 }
2454
2455 static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2456 struct l2cap_ctrl *control,
2457 struct sk_buff_head *skbs, u8 event)
2458 {
2459 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2460 event);
2461
2462 switch (event) {
2463 case L2CAP_EV_DATA_REQUEST:
2464 if (chan->tx_send_head == NULL)
2465 chan->tx_send_head = skb_peek(skbs);
2466 /* Queue data, but don't send. */
2467 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2468 break;
2469 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2470 BT_DBG("Enter LOCAL_BUSY");
2471 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2472
2473 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2474 /* The SREJ_SENT state must be aborted if we are to
2475 * enter the LOCAL_BUSY state.
2476 */
2477 l2cap_abort_rx_srej_sent(chan);
2478 }
2479
2480 l2cap_send_ack(chan);
2481
2482 break;
2483 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2484 BT_DBG("Exit LOCAL_BUSY");
2485 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2486
2487 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2488 struct l2cap_ctrl local_control;
2489 memset(&local_control, 0, sizeof(local_control));
2490 local_control.sframe = 1;
2491 local_control.super = L2CAP_SUPER_RR;
2492 local_control.poll = 1;
2493 local_control.reqseq = chan->buffer_seq;
2494 l2cap_send_sframe(chan, &local_control);
2495
2496 chan->retry_count = 1;
2497 __set_monitor_timer(chan);
2498 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2499 }
2500 break;
2501 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2502 l2cap_process_reqseq(chan, control->reqseq);
2503
2504 /* Fall through */
2505
2506 case L2CAP_EV_RECV_FBIT:
2507 if (control && control->final) {
2508 __clear_monitor_timer(chan);
2509 if (chan->unacked_frames > 0)
2510 __set_retrans_timer(chan);
2511 chan->retry_count = 0;
2512 chan->tx_state = L2CAP_TX_STATE_XMIT;
2513 BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
2514 }
2515 break;
2516 case L2CAP_EV_EXPLICIT_POLL:
2517 /* Ignore */
2518 break;
2519 case L2CAP_EV_MONITOR_TO:
2520 if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
2521 l2cap_send_rr_or_rnr(chan, 1);
2522 __set_monitor_timer(chan);
2523 chan->retry_count++;
2524 } else {
2525 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
2526 }
2527 break;
2528 default:
2529 break;
2530 }
2531 }
2532
2533 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
2534 struct sk_buff_head *skbs, u8 event)
2535 {
2536 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2537 chan, control, skbs, event, chan->tx_state);
2538
2539 switch (chan->tx_state) {
2540 case L2CAP_TX_STATE_XMIT:
2541 l2cap_tx_state_xmit(chan, control, skbs, event);
2542 break;
2543 case L2CAP_TX_STATE_WAIT_F:
2544 l2cap_tx_state_wait_f(chan, control, skbs, event);
2545 break;
2546 default:
2547 /* Ignore event */
2548 break;
2549 }
2550 }
2551
2552 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
2553 struct l2cap_ctrl *control)
2554 {
2555 BT_DBG("chan %p, control %p", chan, control);
2556 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
2557 }
2558
2559 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
2560 struct l2cap_ctrl *control)
2561 {
2562 BT_DBG("chan %p, control %p", chan, control);
2563 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
2564 }
2565
2566 /* Copy frame to all raw sockets on that connection */
2567 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2568 {
2569 struct sk_buff *nskb;
2570 struct l2cap_chan *chan;
2571
2572 BT_DBG("conn %p", conn);
2573
2574 mutex_lock(&conn->chan_lock);
2575
2576 list_for_each_entry(chan, &conn->chan_l, list) {
2577 struct sock *sk = chan->sk;
2578 if (chan->chan_type != L2CAP_CHAN_RAW)
2579 continue;
2580
2581 /* Don't send frame to the socket it came from */
2582 if (skb->sk == sk)
2583 continue;
2584 nskb = skb_clone(skb, GFP_KERNEL);
2585 if (!nskb)
2586 continue;
2587
2588 if (chan->ops->recv(chan, nskb))
2589 kfree_skb(nskb);
2590 }
2591
2592 mutex_unlock(&conn->chan_lock);
2593 }
2594
2595 /* ---- L2CAP signalling commands ---- */
2596 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
2597 u8 ident, u16 dlen, void *data)
2598 {
2599 struct sk_buff *skb, **frag;
2600 struct l2cap_cmd_hdr *cmd;
2601 struct l2cap_hdr *lh;
2602 int len, count;
2603
2604 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
2605 conn, code, ident, dlen);
2606
2607 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2608 count = min_t(unsigned int, conn->mtu, len);
2609
2610 skb = bt_skb_alloc(count, GFP_KERNEL);
2611 if (!skb)
2612 return NULL;
2613
2614 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2615 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2616
2617 if (conn->hcon->type == LE_LINK)
2618 lh->cid = __constant_cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2619 else
2620 lh->cid = __constant_cpu_to_le16(L2CAP_CID_SIGNALING);
2621
2622 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2623 cmd->code = code;
2624 cmd->ident = ident;
2625 cmd->len = cpu_to_le16(dlen);
2626
2627 if (dlen) {
2628 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2629 memcpy(skb_put(skb, count), data, count);
2630 data += count;
2631 }
2632
2633 len -= skb->len;
2634
2635 /* Continuation fragments (no L2CAP header) */
2636 frag = &skb_shinfo(skb)->frag_list;
2637 while (len) {
2638 count = min_t(unsigned int, conn->mtu, len);
2639
2640 *frag = bt_skb_alloc(count, GFP_KERNEL);
2641 if (!*frag)
2642 goto fail;
2643
2644 memcpy(skb_put(*frag, count), data, count);
2645
2646 len -= count;
2647 data += count;
2648
2649 frag = &(*frag)->next;
2650 }
2651
2652 return skb;
2653
2654 fail:
2655 kfree_skb(skb);
2656 return NULL;
2657 }
2658
2659 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
2660 unsigned long *val)
2661 {
2662 struct l2cap_conf_opt *opt = *ptr;
2663 int len;
2664
2665 len = L2CAP_CONF_OPT_SIZE + opt->len;
2666 *ptr += len;
2667
2668 *type = opt->type;
2669 *olen = opt->len;
2670
2671 switch (opt->len) {
2672 case 1:
2673 *val = *((u8 *) opt->val);
2674 break;
2675
2676 case 2:
2677 *val = get_unaligned_le16(opt->val);
2678 break;
2679
2680 case 4:
2681 *val = get_unaligned_le32(opt->val);
2682 break;
2683
2684 default:
2685 *val = (unsigned long) opt->val;
2686 break;
2687 }
2688
2689 BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
2690 return len;
2691 }
2692
2693 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2694 {
2695 struct l2cap_conf_opt *opt = *ptr;
2696
2697 BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
2698
2699 opt->type = type;
2700 opt->len = len;
2701
2702 switch (len) {
2703 case 1:
2704 *((u8 *) opt->val) = val;
2705 break;
2706
2707 case 2:
2708 put_unaligned_le16(val, opt->val);
2709 break;
2710
2711 case 4:
2712 put_unaligned_le32(val, opt->val);
2713 break;
2714
2715 default:
2716 memcpy(opt->val, (void *) val, len);
2717 break;
2718 }
2719
2720 *ptr += L2CAP_CONF_OPT_SIZE + len;
2721 }
2722
2723 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
2724 {
2725 struct l2cap_conf_efs efs;
2726
2727 switch (chan->mode) {
2728 case L2CAP_MODE_ERTM:
2729 efs.id = chan->local_id;
2730 efs.stype = chan->local_stype;
2731 efs.msdu = cpu_to_le16(chan->local_msdu);
2732 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2733 efs.acc_lat = __constant_cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
2734 efs.flush_to = __constant_cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO);
2735 break;
2736
2737 case L2CAP_MODE_STREAMING:
2738 efs.id = 1;
2739 efs.stype = L2CAP_SERV_BESTEFFORT;
2740 efs.msdu = cpu_to_le16(chan->local_msdu);
2741 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2742 efs.acc_lat = 0;
2743 efs.flush_to = 0;
2744 break;
2745
2746 default:
2747 return;
2748 }
2749
2750 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
2751 (unsigned long) &efs);
2752 }
2753
2754 static void l2cap_ack_timeout(struct work_struct *work)
2755 {
2756 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
2757 ack_timer.work);
2758 u16 frames_to_ack;
2759
2760 BT_DBG("chan %p", chan);
2761
2762 l2cap_chan_lock(chan);
2763
2764 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2765 chan->last_acked_seq);
2766
2767 if (frames_to_ack)
2768 l2cap_send_rr_or_rnr(chan, 0);
2769
2770 l2cap_chan_unlock(chan);
2771 l2cap_chan_put(chan);
2772 }
2773
2774 int l2cap_ertm_init(struct l2cap_chan *chan)
2775 {
2776 int err;
2777
2778 chan->next_tx_seq = 0;
2779 chan->expected_tx_seq = 0;
2780 chan->expected_ack_seq = 0;
2781 chan->unacked_frames = 0;
2782 chan->buffer_seq = 0;
2783 chan->frames_sent = 0;
2784 chan->last_acked_seq = 0;
2785 chan->sdu = NULL;
2786 chan->sdu_last_frag = NULL;
2787 chan->sdu_len = 0;
2788
2789 skb_queue_head_init(&chan->tx_q);
2790
2791 chan->local_amp_id = 0;
2792 chan->move_id = 0;
2793 chan->move_state = L2CAP_MOVE_STABLE;
2794 chan->move_role = L2CAP_MOVE_ROLE_NONE;
2795
2796 if (chan->mode != L2CAP_MODE_ERTM)
2797 return 0;
2798
2799 chan->rx_state = L2CAP_RX_STATE_RECV;
2800 chan->tx_state = L2CAP_TX_STATE_XMIT;
2801
2802 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
2803 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
2804 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
2805
2806 skb_queue_head_init(&chan->srej_q);
2807
2808 err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
2809 if (err < 0)
2810 return err;
2811
2812 err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
2813 if (err < 0)
2814 l2cap_seq_list_free(&chan->srej_list);
2815
2816 return err;
2817 }
2818
2819 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2820 {
2821 switch (mode) {
2822 case L2CAP_MODE_STREAMING:
2823 case L2CAP_MODE_ERTM:
2824 if (l2cap_mode_supported(mode, remote_feat_mask))
2825 return mode;
2826 /* fall through */
2827 default:
2828 return L2CAP_MODE_BASIC;
2829 }
2830 }
2831
2832 static inline bool __l2cap_ews_supported(struct l2cap_chan *chan)
2833 {
2834 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
2835 }
2836
2837 static inline bool __l2cap_efs_supported(struct l2cap_chan *chan)
2838 {
2839 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
2840 }
2841
2842 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
2843 {
2844 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
2845 __l2cap_ews_supported(chan)) {
2846 /* use extended control field */
2847 set_bit(FLAG_EXT_CTRL, &chan->flags);
2848 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
2849 } else {
2850 chan->tx_win = min_t(u16, chan->tx_win,
2851 L2CAP_DEFAULT_TX_WINDOW);
2852 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
2853 }
2854 chan->ack_win = chan->tx_win;
2855 }
2856
2857 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
2858 {
2859 struct l2cap_conf_req *req = data;
2860 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
2861 void *ptr = req->data;
2862 u16 size;
2863
2864 BT_DBG("chan %p", chan);
2865
2866 if (chan->num_conf_req || chan->num_conf_rsp)
2867 goto done;
2868
2869 switch (chan->mode) {
2870 case L2CAP_MODE_STREAMING:
2871 case L2CAP_MODE_ERTM:
2872 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
2873 break;
2874
2875 if (__l2cap_efs_supported(chan))
2876 set_bit(FLAG_EFS_ENABLE, &chan->flags);
2877
2878 /* fall through */
2879 default:
2880 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
2881 break;
2882 }
2883
2884 done:
2885 if (chan->imtu != L2CAP_DEFAULT_MTU)
2886 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
2887
2888 switch (chan->mode) {
2889 case L2CAP_MODE_BASIC:
2890 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
2891 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
2892 break;
2893
2894 rfc.mode = L2CAP_MODE_BASIC;
2895 rfc.txwin_size = 0;
2896 rfc.max_transmit = 0;
2897 rfc.retrans_timeout = 0;
2898 rfc.monitor_timeout = 0;
2899 rfc.max_pdu_size = 0;
2900
2901 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2902 (unsigned long) &rfc);
2903 break;
2904
2905 case L2CAP_MODE_ERTM:
2906 rfc.mode = L2CAP_MODE_ERTM;
2907 rfc.max_transmit = chan->max_tx;
2908 rfc.retrans_timeout = 0;
2909 rfc.monitor_timeout = 0;
2910
2911 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2912 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
2913 L2CAP_FCS_SIZE);
2914 rfc.max_pdu_size = cpu_to_le16(size);
2915
2916 l2cap_txwin_setup(chan);
2917
2918 rfc.txwin_size = min_t(u16, chan->tx_win,
2919 L2CAP_DEFAULT_TX_WINDOW);
2920
2921 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2922 (unsigned long) &rfc);
2923
2924 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2925 l2cap_add_opt_efs(&ptr, chan);
2926
2927 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2928 break;
2929
2930 if (chan->fcs == L2CAP_FCS_NONE ||
2931 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2932 chan->fcs = L2CAP_FCS_NONE;
2933 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2934 }
2935
2936 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2937 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
2938 chan->tx_win);
2939 break;
2940
2941 case L2CAP_MODE_STREAMING:
2942 l2cap_txwin_setup(chan);
2943 rfc.mode = L2CAP_MODE_STREAMING;
2944 rfc.txwin_size = 0;
2945 rfc.max_transmit = 0;
2946 rfc.retrans_timeout = 0;
2947 rfc.monitor_timeout = 0;
2948
2949 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2950 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
2951 L2CAP_FCS_SIZE);
2952 rfc.max_pdu_size = cpu_to_le16(size);
2953
2954 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2955 (unsigned long) &rfc);
2956
2957 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2958 l2cap_add_opt_efs(&ptr, chan);
2959
2960 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2961 break;
2962
2963 if (chan->fcs == L2CAP_FCS_NONE ||
2964 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2965 chan->fcs = L2CAP_FCS_NONE;
2966 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2967 }
2968 break;
2969 }
2970
2971 req->dcid = cpu_to_le16(chan->dcid);
2972 req->flags = __constant_cpu_to_le16(0);
2973
2974 return ptr - data;
2975 }
2976
2977 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
2978 {
2979 struct l2cap_conf_rsp *rsp = data;
2980 void *ptr = rsp->data;
2981 void *req = chan->conf_req;
2982 int len = chan->conf_len;
2983 int type, hint, olen;
2984 unsigned long val;
2985 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2986 struct l2cap_conf_efs efs;
2987 u8 remote_efs = 0;
2988 u16 mtu = L2CAP_DEFAULT_MTU;
2989 u16 result = L2CAP_CONF_SUCCESS;
2990 u16 size;
2991
2992 BT_DBG("chan %p", chan);
2993
2994 while (len >= L2CAP_CONF_OPT_SIZE) {
2995 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2996
2997 hint = type & L2CAP_CONF_HINT;
2998 type &= L2CAP_CONF_MASK;
2999
3000 switch (type) {
3001 case L2CAP_CONF_MTU:
3002 mtu = val;
3003 break;
3004
3005 case L2CAP_CONF_FLUSH_TO:
3006 chan->flush_to = val;
3007 break;
3008
3009 case L2CAP_CONF_QOS:
3010 break;
3011
3012 case L2CAP_CONF_RFC:
3013 if (olen == sizeof(rfc))
3014 memcpy(&rfc, (void *) val, olen);
3015 break;
3016
3017 case L2CAP_CONF_FCS:
3018 if (val == L2CAP_FCS_NONE)
3019 set_bit(CONF_NO_FCS_RECV, &chan->conf_state);
3020 break;
3021
3022 case L2CAP_CONF_EFS:
3023 remote_efs = 1;
3024 if (olen == sizeof(efs))
3025 memcpy(&efs, (void *) val, olen);
3026 break;
3027
3028 case L2CAP_CONF_EWS:
3029 if (!enable_hs)
3030 return -ECONNREFUSED;
3031
3032 set_bit(FLAG_EXT_CTRL, &chan->flags);
3033 set_bit(CONF_EWS_RECV, &chan->conf_state);
3034 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3035 chan->remote_tx_win = val;
3036 break;
3037
3038 default:
3039 if (hint)
3040 break;
3041
3042 result = L2CAP_CONF_UNKNOWN;
3043 *((u8 *) ptr++) = type;
3044 break;
3045 }
3046 }
3047
3048 if (chan->num_conf_rsp || chan->num_conf_req > 1)
3049 goto done;
3050
3051 switch (chan->mode) {
3052 case L2CAP_MODE_STREAMING:
3053 case L2CAP_MODE_ERTM:
3054 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
3055 chan->mode = l2cap_select_mode(rfc.mode,
3056 chan->conn->feat_mask);
3057 break;
3058 }
3059
3060 if (remote_efs) {
3061 if (__l2cap_efs_supported(chan))
3062 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3063 else
3064 return -ECONNREFUSED;
3065 }
3066
3067 if (chan->mode != rfc.mode)
3068 return -ECONNREFUSED;
3069
3070 break;
3071 }
3072
3073 done:
3074 if (chan->mode != rfc.mode) {
3075 result = L2CAP_CONF_UNACCEPT;
3076 rfc.mode = chan->mode;
3077
3078 if (chan->num_conf_rsp == 1)
3079 return -ECONNREFUSED;
3080
3081 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3082 (unsigned long) &rfc);
3083 }
3084
3085 if (result == L2CAP_CONF_SUCCESS) {
3086 /* Configure output options and let the other side know
3087 * which ones we don't like. */
3088
3089 if (mtu < L2CAP_DEFAULT_MIN_MTU)
3090 result = L2CAP_CONF_UNACCEPT;
3091 else {
3092 chan->omtu = mtu;
3093 set_bit(CONF_MTU_DONE, &chan->conf_state);
3094 }
3095 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
3096
3097 if (remote_efs) {
3098 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3099 efs.stype != L2CAP_SERV_NOTRAFIC &&
3100 efs.stype != chan->local_stype) {
3101
3102 result = L2CAP_CONF_UNACCEPT;
3103
3104 if (chan->num_conf_req >= 1)
3105 return -ECONNREFUSED;
3106
3107 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3108 sizeof(efs),
3109 (unsigned long) &efs);
3110 } else {
3111 /* Send PENDING Conf Rsp */
3112 result = L2CAP_CONF_PENDING;
3113 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3114 }
3115 }
3116
3117 switch (rfc.mode) {
3118 case L2CAP_MODE_BASIC:
3119 chan->fcs = L2CAP_FCS_NONE;
3120 set_bit(CONF_MODE_DONE, &chan->conf_state);
3121 break;
3122
3123 case L2CAP_MODE_ERTM:
3124 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3125 chan->remote_tx_win = rfc.txwin_size;
3126 else
3127 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3128
3129 chan->remote_max_tx = rfc.max_transmit;
3130
3131 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3132 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3133 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3134 rfc.max_pdu_size = cpu_to_le16(size);
3135 chan->remote_mps = size;
3136
3137 rfc.retrans_timeout =
3138 __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3139 rfc.monitor_timeout =
3140 __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3141
3142 set_bit(CONF_MODE_DONE, &chan->conf_state);
3143
3144 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3145 sizeof(rfc), (unsigned long) &rfc);
3146
3147 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3148 chan->remote_id = efs.id;
3149 chan->remote_stype = efs.stype;
3150 chan->remote_msdu = le16_to_cpu(efs.msdu);
3151 chan->remote_flush_to =
3152 le32_to_cpu(efs.flush_to);
3153 chan->remote_acc_lat =
3154 le32_to_cpu(efs.acc_lat);
3155 chan->remote_sdu_itime =
3156 le32_to_cpu(efs.sdu_itime);
3157 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3158 sizeof(efs),
3159 (unsigned long) &efs);
3160 }
3161 break;
3162
3163 case L2CAP_MODE_STREAMING:
3164 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3165 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3166 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3167 rfc.max_pdu_size = cpu_to_le16(size);
3168 chan->remote_mps = size;
3169
3170 set_bit(CONF_MODE_DONE, &chan->conf_state);
3171
3172 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3173 (unsigned long) &rfc);
3174
3175 break;
3176
3177 default:
3178 result = L2CAP_CONF_UNACCEPT;
3179
3180 memset(&rfc, 0, sizeof(rfc));
3181 rfc.mode = chan->mode;
3182 }
3183
3184 if (result == L2CAP_CONF_SUCCESS)
3185 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3186 }
3187 rsp->scid = cpu_to_le16(chan->dcid);
3188 rsp->result = cpu_to_le16(result);
3189 rsp->flags = __constant_cpu_to_le16(0);
3190
3191 return ptr - data;
3192 }
3193
3194 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
3195 void *data, u16 *result)
3196 {
3197 struct l2cap_conf_req *req = data;
3198 void *ptr = req->data;
3199 int type, olen;
3200 unsigned long val;
3201 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3202 struct l2cap_conf_efs efs;
3203
3204 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3205
3206 while (len >= L2CAP_CONF_OPT_SIZE) {
3207 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3208
3209 switch (type) {
3210 case L2CAP_CONF_MTU:
3211 if (val < L2CAP_DEFAULT_MIN_MTU) {
3212 *result = L2CAP_CONF_UNACCEPT;
3213 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3214 } else
3215 chan->imtu = val;
3216 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3217 break;
3218
3219 case L2CAP_CONF_FLUSH_TO:
3220 chan->flush_to = val;
3221 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
3222 2, chan->flush_to);
3223 break;
3224
3225 case L2CAP_CONF_RFC:
3226 if (olen == sizeof(rfc))
3227 memcpy(&rfc, (void *)val, olen);
3228
3229 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3230 rfc.mode != chan->mode)
3231 return -ECONNREFUSED;
3232
3233 chan->fcs = 0;
3234
3235 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3236 sizeof(rfc), (unsigned long) &rfc);
3237 break;
3238
3239 case L2CAP_CONF_EWS:
3240 chan->ack_win = min_t(u16, val, chan->ack_win);
3241 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3242 chan->tx_win);
3243 break;
3244
3245 case L2CAP_CONF_EFS:
3246 if (olen == sizeof(efs))
3247 memcpy(&efs, (void *)val, olen);
3248
3249 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3250 efs.stype != L2CAP_SERV_NOTRAFIC &&
3251 efs.stype != chan->local_stype)
3252 return -ECONNREFUSED;
3253
3254 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
3255 (unsigned long) &efs);
3256 break;
3257 }
3258 }
3259
3260 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3261 return -ECONNREFUSED;
3262
3263 chan->mode = rfc.mode;
3264
3265 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3266 switch (rfc.mode) {
3267 case L2CAP_MODE_ERTM:
3268 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3269 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3270 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3271 if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3272 chan->ack_win = min_t(u16, chan->ack_win,
3273 rfc.txwin_size);
3274
3275 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3276 chan->local_msdu = le16_to_cpu(efs.msdu);
3277 chan->local_sdu_itime =
3278 le32_to_cpu(efs.sdu_itime);
3279 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3280 chan->local_flush_to =
3281 le32_to_cpu(efs.flush_to);
3282 }
3283 break;
3284
3285 case L2CAP_MODE_STREAMING:
3286 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3287 }
3288 }
3289
3290 req->dcid = cpu_to_le16(chan->dcid);
3291 req->flags = __constant_cpu_to_le16(0);
3292
3293 return ptr - data;
3294 }
3295
3296 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data,
3297 u16 result, u16 flags)
3298 {
3299 struct l2cap_conf_rsp *rsp = data;
3300 void *ptr = rsp->data;
3301
3302 BT_DBG("chan %p", chan);
3303
3304 rsp->scid = cpu_to_le16(chan->dcid);
3305 rsp->result = cpu_to_le16(result);
3306 rsp->flags = cpu_to_le16(flags);
3307
3308 return ptr - data;
3309 }
3310
3311 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
3312 {
3313 struct l2cap_conn_rsp rsp;
3314 struct l2cap_conn *conn = chan->conn;
3315 u8 buf[128];
3316
3317 rsp.scid = cpu_to_le16(chan->dcid);
3318 rsp.dcid = cpu_to_le16(chan->scid);
3319 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
3320 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
3321 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
3322
3323 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3324 return;
3325
3326 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3327 l2cap_build_conf_req(chan, buf), buf);
3328 chan->num_conf_req++;
3329 }
3330
3331 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
3332 {
3333 int type, olen;
3334 unsigned long val;
3335 /* Use sane default values in case a misbehaving remote device
3336 * did not send an RFC or extended window size option.
3337 */
3338 u16 txwin_ext = chan->ack_win;
3339 struct l2cap_conf_rfc rfc = {
3340 .mode = chan->mode,
3341 .retrans_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
3342 .monitor_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
3343 .max_pdu_size = cpu_to_le16(chan->imtu),
3344 .txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
3345 };
3346
3347 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
3348
3349 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
3350 return;
3351
3352 while (len >= L2CAP_CONF_OPT_SIZE) {
3353 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3354
3355 switch (type) {
3356 case L2CAP_CONF_RFC:
3357 if (olen == sizeof(rfc))
3358 memcpy(&rfc, (void *)val, olen);
3359 break;
3360 case L2CAP_CONF_EWS:
3361 txwin_ext = val;
3362 break;
3363 }
3364 }
3365
3366 switch (rfc.mode) {
3367 case L2CAP_MODE_ERTM:
3368 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3369 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3370 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3371 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3372 chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
3373 else
3374 chan->ack_win = min_t(u16, chan->ack_win,
3375 rfc.txwin_size);
3376 break;
3377 case L2CAP_MODE_STREAMING:
3378 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3379 }
3380 }
3381
3382 static inline int l2cap_command_rej(struct l2cap_conn *conn,
3383 struct l2cap_cmd_hdr *cmd, u8 *data)
3384 {
3385 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
3386
3387 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
3388 return 0;
3389
3390 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
3391 cmd->ident == conn->info_ident) {
3392 cancel_delayed_work(&conn->info_timer);
3393
3394 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3395 conn->info_ident = 0;
3396
3397 l2cap_conn_start(conn);
3398 }
3399
3400 return 0;
3401 }
3402
3403 static void l2cap_connect(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd,
3404 u8 *data, u8 rsp_code, u8 amp_id)
3405 {
3406 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
3407 struct l2cap_conn_rsp rsp;
3408 struct l2cap_chan *chan = NULL, *pchan;
3409 struct sock *parent, *sk = NULL;
3410 int result, status = L2CAP_CS_NO_INFO;
3411
3412 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
3413 __le16 psm = req->psm;
3414
3415 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
3416
3417 /* Check if we have socket listening on psm */
3418 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, conn->src, conn->dst);
3419 if (!pchan) {
3420 result = L2CAP_CR_BAD_PSM;
3421 goto sendresp;
3422 }
3423
3424 parent = pchan->sk;
3425
3426 mutex_lock(&conn->chan_lock);
3427 lock_sock(parent);
3428
3429 /* Check if the ACL is secure enough (if not SDP) */
3430 if (psm != __constant_cpu_to_le16(L2CAP_PSM_SDP) &&
3431 !hci_conn_check_link_mode(conn->hcon)) {
3432 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
3433 result = L2CAP_CR_SEC_BLOCK;
3434 goto response;
3435 }
3436
3437 result = L2CAP_CR_NO_MEM;
3438
3439 /* Check if we already have channel with that dcid */
3440 if (__l2cap_get_chan_by_dcid(conn, scid))
3441 goto response;
3442
3443 chan = pchan->ops->new_connection(pchan);
3444 if (!chan)
3445 goto response;
3446
3447 sk = chan->sk;
3448
3449 hci_conn_hold(conn->hcon);
3450
3451 bacpy(&bt_sk(sk)->src, conn->src);
3452 bacpy(&bt_sk(sk)->dst, conn->dst);
3453 chan->psm = psm;
3454 chan->dcid = scid;
3455
3456 __l2cap_chan_add(conn, chan);
3457
3458 dcid = chan->scid;
3459
3460 __set_chan_timer(chan, sk->sk_sndtimeo);
3461
3462 chan->ident = cmd->ident;
3463
3464 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
3465 if (l2cap_chan_check_security(chan)) {
3466 if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) {
3467 __l2cap_state_change(chan, BT_CONNECT2);
3468 result = L2CAP_CR_PEND;
3469 status = L2CAP_CS_AUTHOR_PEND;
3470 chan->ops->defer(chan);
3471 } else {
3472 __l2cap_state_change(chan, BT_CONFIG);
3473 result = L2CAP_CR_SUCCESS;
3474 status = L2CAP_CS_NO_INFO;
3475 }
3476 } else {
3477 __l2cap_state_change(chan, BT_CONNECT2);
3478 result = L2CAP_CR_PEND;
3479 status = L2CAP_CS_AUTHEN_PEND;
3480 }
3481 } else {
3482 __l2cap_state_change(chan, BT_CONNECT2);
3483 result = L2CAP_CR_PEND;
3484 status = L2CAP_CS_NO_INFO;
3485 }
3486
3487 response:
3488 release_sock(parent);
3489 mutex_unlock(&conn->chan_lock);
3490
3491 sendresp:
3492 rsp.scid = cpu_to_le16(scid);
3493 rsp.dcid = cpu_to_le16(dcid);
3494 rsp.result = cpu_to_le16(result);
3495 rsp.status = cpu_to_le16(status);
3496 l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
3497
3498 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
3499 struct l2cap_info_req info;
3500 info.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
3501
3502 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
3503 conn->info_ident = l2cap_get_ident(conn);
3504
3505 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
3506
3507 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
3508 sizeof(info), &info);
3509 }
3510
3511 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
3512 result == L2CAP_CR_SUCCESS) {
3513 u8 buf[128];
3514 set_bit(CONF_REQ_SENT, &chan->conf_state);
3515 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3516 l2cap_build_conf_req(chan, buf), buf);
3517 chan->num_conf_req++;
3518 }
3519 }
3520
3521 static int l2cap_connect_req(struct l2cap_conn *conn,
3522 struct l2cap_cmd_hdr *cmd, u8 *data)
3523 {
3524 l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);
3525 return 0;
3526 }
3527
3528 static inline int l2cap_connect_rsp(struct l2cap_conn *conn,
3529 struct l2cap_cmd_hdr *cmd, u8 *data)
3530 {
3531 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
3532 u16 scid, dcid, result, status;
3533 struct l2cap_chan *chan;
3534 u8 req[128];
3535 int err;
3536
3537 scid = __le16_to_cpu(rsp->scid);
3538 dcid = __le16_to_cpu(rsp->dcid);
3539 result = __le16_to_cpu(rsp->result);
3540 status = __le16_to_cpu(rsp->status);
3541
3542 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
3543 dcid, scid, result, status);
3544
3545 mutex_lock(&conn->chan_lock);
3546
3547 if (scid) {
3548 chan = __l2cap_get_chan_by_scid(conn, scid);
3549 if (!chan) {
3550 err = -EFAULT;
3551 goto unlock;
3552 }
3553 } else {
3554 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
3555 if (!chan) {
3556 err = -EFAULT;
3557 goto unlock;
3558 }
3559 }
3560
3561 err = 0;
3562
3563 l2cap_chan_lock(chan);
3564
3565 switch (result) {
3566 case L2CAP_CR_SUCCESS:
3567 l2cap_state_change(chan, BT_CONFIG);
3568 chan->ident = 0;
3569 chan->dcid = dcid;
3570 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
3571
3572 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3573 break;
3574
3575 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3576 l2cap_build_conf_req(chan, req), req);
3577 chan->num_conf_req++;
3578 break;
3579
3580 case L2CAP_CR_PEND:
3581 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
3582 break;
3583
3584 default:
3585 l2cap_chan_del(chan, ECONNREFUSED);
3586 break;
3587 }
3588
3589 l2cap_chan_unlock(chan);
3590
3591 unlock:
3592 mutex_unlock(&conn->chan_lock);
3593
3594 return err;
3595 }
3596
3597 static inline void set_default_fcs(struct l2cap_chan *chan)
3598 {
3599 /* FCS is enabled only in ERTM or streaming mode, if one or both
3600 * sides request it.
3601 */
3602 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
3603 chan->fcs = L2CAP_FCS_NONE;
3604 else if (!test_bit(CONF_NO_FCS_RECV, &chan->conf_state))
3605 chan->fcs = L2CAP_FCS_CRC16;
3606 }
3607
3608 static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data,
3609 u8 ident, u16 flags)
3610 {
3611 struct l2cap_conn *conn = chan->conn;
3612
3613 BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident,
3614 flags);
3615
3616 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3617 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3618
3619 l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP,
3620 l2cap_build_conf_rsp(chan, data,
3621 L2CAP_CONF_SUCCESS, flags), data);
3622 }
3623
3624 static inline int l2cap_config_req(struct l2cap_conn *conn,
3625 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3626 u8 *data)
3627 {
3628 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
3629 u16 dcid, flags;
3630 u8 rsp[64];
3631 struct l2cap_chan *chan;
3632 int len, err = 0;
3633
3634 dcid = __le16_to_cpu(req->dcid);
3635 flags = __le16_to_cpu(req->flags);
3636
3637 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
3638
3639 chan = l2cap_get_chan_by_scid(conn, dcid);
3640 if (!chan)
3641 return -ENOENT;
3642
3643 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
3644 struct l2cap_cmd_rej_cid rej;
3645
3646 rej.reason = __constant_cpu_to_le16(L2CAP_REJ_INVALID_CID);
3647 rej.scid = cpu_to_le16(chan->scid);
3648 rej.dcid = cpu_to_le16(chan->dcid);
3649
3650 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
3651 sizeof(rej), &rej);
3652 goto unlock;
3653 }
3654
3655 /* Reject if config buffer is too small. */
3656 len = cmd_len - sizeof(*req);
3657 if (len < 0 || chan->conf_len + len > sizeof(chan->conf_req)) {
3658 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3659 l2cap_build_conf_rsp(chan, rsp,
3660 L2CAP_CONF_REJECT, flags), rsp);
3661 goto unlock;
3662 }
3663
3664 /* Store config. */
3665 memcpy(chan->conf_req + chan->conf_len, req->data, len);
3666 chan->conf_len += len;
3667
3668 if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
3669 /* Incomplete config. Send empty response. */
3670 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3671 l2cap_build_conf_rsp(chan, rsp,
3672 L2CAP_CONF_SUCCESS, flags), rsp);
3673 goto unlock;
3674 }
3675
3676 /* Complete config. */
3677 len = l2cap_parse_conf_req(chan, rsp);
3678 if (len < 0) {
3679 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3680 goto unlock;
3681 }
3682
3683 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
3684 chan->num_conf_rsp++;
3685
3686 /* Reset config buffer. */
3687 chan->conf_len = 0;
3688
3689 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
3690 goto unlock;
3691
3692 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
3693 set_default_fcs(chan);
3694
3695 if (chan->mode == L2CAP_MODE_ERTM ||
3696 chan->mode == L2CAP_MODE_STREAMING)
3697 err = l2cap_ertm_init(chan);
3698
3699 if (err < 0)
3700 l2cap_send_disconn_req(chan->conn, chan, -err);
3701 else
3702 l2cap_chan_ready(chan);
3703
3704 goto unlock;
3705 }
3706
3707 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
3708 u8 buf[64];
3709 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3710 l2cap_build_conf_req(chan, buf), buf);
3711 chan->num_conf_req++;
3712 }
3713
3714 /* Got Conf Rsp PENDING from remote side and asume we sent
3715 Conf Rsp PENDING in the code above */
3716 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
3717 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
3718
3719 /* check compatibility */
3720
3721 /* Send rsp for BR/EDR channel */
3722 if (!chan->ctrl_id)
3723 l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
3724 else
3725 chan->ident = cmd->ident;
3726 }
3727
3728 unlock:
3729 l2cap_chan_unlock(chan);
3730 return err;
3731 }
3732
3733 static inline int l2cap_config_rsp(struct l2cap_conn *conn,
3734 struct l2cap_cmd_hdr *cmd, u8 *data)
3735 {
3736 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
3737 u16 scid, flags, result;
3738 struct l2cap_chan *chan;
3739 int len = le16_to_cpu(cmd->len) - sizeof(*rsp);
3740 int err = 0;
3741
3742 scid = __le16_to_cpu(rsp->scid);
3743 flags = __le16_to_cpu(rsp->flags);
3744 result = __le16_to_cpu(rsp->result);
3745
3746 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
3747 result, len);
3748
3749 chan = l2cap_get_chan_by_scid(conn, scid);
3750 if (!chan)
3751 return 0;
3752
3753 switch (result) {
3754 case L2CAP_CONF_SUCCESS:
3755 l2cap_conf_rfc_get(chan, rsp->data, len);
3756 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
3757 break;
3758
3759 case L2CAP_CONF_PENDING:
3760 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
3761
3762 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
3763 char buf[64];
3764
3765 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
3766 buf, &result);
3767 if (len < 0) {
3768 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3769 goto done;
3770 }
3771
3772 /* check compatibility */
3773
3774 if (!chan->ctrl_id)
3775 l2cap_send_efs_conf_rsp(chan, buf, cmd->ident,
3776 0);
3777 else
3778 chan->ident = cmd->ident;
3779 }
3780 goto done;
3781
3782 case L2CAP_CONF_UNACCEPT:
3783 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
3784 char req[64];
3785
3786 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
3787 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3788 goto done;
3789 }
3790
3791 /* throw out any old stored conf requests */
3792 result = L2CAP_CONF_SUCCESS;
3793 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
3794 req, &result);
3795 if (len < 0) {
3796 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3797 goto done;
3798 }
3799
3800 l2cap_send_cmd(conn, l2cap_get_ident(conn),
3801 L2CAP_CONF_REQ, len, req);
3802 chan->num_conf_req++;
3803 if (result != L2CAP_CONF_SUCCESS)
3804 goto done;
3805 break;
3806 }
3807
3808 default:
3809 l2cap_chan_set_err(chan, ECONNRESET);
3810
3811 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
3812 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3813 goto done;
3814 }
3815
3816 if (flags & L2CAP_CONF_FLAG_CONTINUATION)
3817 goto done;
3818
3819 set_bit(CONF_INPUT_DONE, &chan->conf_state);
3820
3821 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
3822 set_default_fcs(chan);
3823
3824 if (chan->mode == L2CAP_MODE_ERTM ||
3825 chan->mode == L2CAP_MODE_STREAMING)
3826 err = l2cap_ertm_init(chan);
3827
3828 if (err < 0)
3829 l2cap_send_disconn_req(chan->conn, chan, -err);
3830 else
3831 l2cap_chan_ready(chan);
3832 }
3833
3834 done:
3835 l2cap_chan_unlock(chan);
3836 return err;
3837 }
3838
3839 static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
3840 struct l2cap_cmd_hdr *cmd, u8 *data)
3841 {
3842 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
3843 struct l2cap_disconn_rsp rsp;
3844 u16 dcid, scid;
3845 struct l2cap_chan *chan;
3846 struct sock *sk;
3847
3848 scid = __le16_to_cpu(req->scid);
3849 dcid = __le16_to_cpu(req->dcid);
3850
3851 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
3852
3853 mutex_lock(&conn->chan_lock);
3854
3855 chan = __l2cap_get_chan_by_scid(conn, dcid);
3856 if (!chan) {
3857 mutex_unlock(&conn->chan_lock);
3858 return 0;
3859 }
3860
3861 l2cap_chan_lock(chan);
3862
3863 sk = chan->sk;
3864
3865 rsp.dcid = cpu_to_le16(chan->scid);
3866 rsp.scid = cpu_to_le16(chan->dcid);
3867 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
3868
3869 lock_sock(sk);
3870 sk->sk_shutdown = SHUTDOWN_MASK;
3871 release_sock(sk);
3872
3873 l2cap_chan_hold(chan);
3874 l2cap_chan_del(chan, ECONNRESET);
3875
3876 l2cap_chan_unlock(chan);
3877
3878 chan->ops->close(chan);
3879 l2cap_chan_put(chan);
3880
3881 mutex_unlock(&conn->chan_lock);
3882
3883 return 0;
3884 }
3885
3886 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
3887 struct l2cap_cmd_hdr *cmd, u8 *data)
3888 {
3889 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
3890 u16 dcid, scid;
3891 struct l2cap_chan *chan;
3892
3893 scid = __le16_to_cpu(rsp->scid);
3894 dcid = __le16_to_cpu(rsp->dcid);
3895
3896 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
3897
3898 mutex_lock(&conn->chan_lock);
3899
3900 chan = __l2cap_get_chan_by_scid(conn, scid);
3901 if (!chan) {
3902 mutex_unlock(&conn->chan_lock);
3903 return 0;
3904 }
3905
3906 l2cap_chan_lock(chan);
3907
3908 l2cap_chan_hold(chan);
3909 l2cap_chan_del(chan, 0);
3910
3911 l2cap_chan_unlock(chan);
3912
3913 chan->ops->close(chan);
3914 l2cap_chan_put(chan);
3915
3916 mutex_unlock(&conn->chan_lock);
3917
3918 return 0;
3919 }
3920
3921 static inline int l2cap_information_req(struct l2cap_conn *conn,
3922 struct l2cap_cmd_hdr *cmd, u8 *data)
3923 {
3924 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
3925 u16 type;
3926
3927 type = __le16_to_cpu(req->type);
3928
3929 BT_DBG("type 0x%4.4x", type);
3930
3931 if (type == L2CAP_IT_FEAT_MASK) {
3932 u8 buf[8];
3933 u32 feat_mask = l2cap_feat_mask;
3934 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3935 rsp->type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
3936 rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
3937 if (!disable_ertm)
3938 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
3939 | L2CAP_FEAT_FCS;
3940 if (enable_hs)
3941 feat_mask |= L2CAP_FEAT_EXT_FLOW
3942 | L2CAP_FEAT_EXT_WINDOW;
3943
3944 put_unaligned_le32(feat_mask, rsp->data);
3945 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
3946 buf);
3947 } else if (type == L2CAP_IT_FIXED_CHAN) {
3948 u8 buf[12];
3949 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3950
3951 if (enable_hs)
3952 l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
3953 else
3954 l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
3955
3956 rsp->type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3957 rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
3958 memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
3959 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
3960 buf);
3961 } else {
3962 struct l2cap_info_rsp rsp;
3963 rsp.type = cpu_to_le16(type);
3964 rsp.result = __constant_cpu_to_le16(L2CAP_IR_NOTSUPP);
3965 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp),
3966 &rsp);
3967 }
3968
3969 return 0;
3970 }
3971
3972 static inline int l2cap_information_rsp(struct l2cap_conn *conn,
3973 struct l2cap_cmd_hdr *cmd, u8 *data)
3974 {
3975 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
3976 u16 type, result;
3977
3978 type = __le16_to_cpu(rsp->type);
3979 result = __le16_to_cpu(rsp->result);
3980
3981 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
3982
3983 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
3984 if (cmd->ident != conn->info_ident ||
3985 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
3986 return 0;
3987
3988 cancel_delayed_work(&conn->info_timer);
3989
3990 if (result != L2CAP_IR_SUCCESS) {
3991 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3992 conn->info_ident = 0;
3993
3994 l2cap_conn_start(conn);
3995
3996 return 0;
3997 }
3998
3999 switch (type) {
4000 case L2CAP_IT_FEAT_MASK:
4001 conn->feat_mask = get_unaligned_le32(rsp->data);
4002
4003 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
4004 struct l2cap_info_req req;
4005 req.type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4006
4007 conn->info_ident = l2cap_get_ident(conn);
4008
4009 l2cap_send_cmd(conn, conn->info_ident,
4010 L2CAP_INFO_REQ, sizeof(req), &req);
4011 } else {
4012 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4013 conn->info_ident = 0;
4014
4015 l2cap_conn_start(conn);
4016 }
4017 break;
4018
4019 case L2CAP_IT_FIXED_CHAN:
4020 conn->fixed_chan_mask = rsp->data[0];
4021 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4022 conn->info_ident = 0;
4023
4024 l2cap_conn_start(conn);
4025 break;
4026 }
4027
4028 return 0;
4029 }
4030
4031 static inline int l2cap_create_channel_req(struct l2cap_conn *conn,
4032 struct l2cap_cmd_hdr *cmd,
4033 u16 cmd_len, void *data)
4034 {
4035 struct l2cap_create_chan_req *req = data;
4036 struct l2cap_create_chan_rsp rsp;
4037 u16 psm, scid;
4038
4039 if (cmd_len != sizeof(*req))
4040 return -EPROTO;
4041
4042 if (!enable_hs)
4043 return -EINVAL;
4044
4045 psm = le16_to_cpu(req->psm);
4046 scid = le16_to_cpu(req->scid);
4047
4048 BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id);
4049
4050 /* Placeholder: Always reject */
4051 rsp.dcid = 0;
4052 rsp.scid = cpu_to_le16(scid);
4053 rsp.result = __constant_cpu_to_le16(L2CAP_CR_NO_MEM);
4054 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
4055
4056 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
4057 sizeof(rsp), &rsp);
4058
4059 return 0;
4060 }
4061
4062 static inline int l2cap_create_channel_rsp(struct l2cap_conn *conn,
4063 struct l2cap_cmd_hdr *cmd,
4064 void *data)
4065 {
4066 BT_DBG("conn %p", conn);
4067
4068 return l2cap_connect_rsp(conn, cmd, data);
4069 }
4070
4071 static void l2cap_send_move_chan_rsp(struct l2cap_conn *conn, u8 ident,
4072 u16 icid, u16 result)
4073 {
4074 struct l2cap_move_chan_rsp rsp;
4075
4076 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
4077
4078 rsp.icid = cpu_to_le16(icid);
4079 rsp.result = cpu_to_le16(result);
4080
4081 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_RSP, sizeof(rsp), &rsp);
4082 }
4083
4084 static void l2cap_send_move_chan_cfm(struct l2cap_conn *conn,
4085 struct l2cap_chan *chan,
4086 u16 icid, u16 result)
4087 {
4088 struct l2cap_move_chan_cfm cfm;
4089 u8 ident;
4090
4091 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
4092
4093 ident = l2cap_get_ident(conn);
4094 if (chan)
4095 chan->ident = ident;
4096
4097 cfm.icid = cpu_to_le16(icid);
4098 cfm.result = cpu_to_le16(result);
4099
4100 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM, sizeof(cfm), &cfm);
4101 }
4102
4103 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
4104 u16 icid)
4105 {
4106 struct l2cap_move_chan_cfm_rsp rsp;
4107
4108 BT_DBG("icid 0x%4.4x", icid);
4109
4110 rsp.icid = cpu_to_le16(icid);
4111 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4112 }
4113
4114 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
4115 struct l2cap_cmd_hdr *cmd,
4116 u16 cmd_len, void *data)
4117 {
4118 struct l2cap_move_chan_req *req = data;
4119 u16 icid = 0;
4120 u16 result = L2CAP_MR_NOT_ALLOWED;
4121
4122 if (cmd_len != sizeof(*req))
4123 return -EPROTO;
4124
4125 icid = le16_to_cpu(req->icid);
4126
4127 BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id);
4128
4129 if (!enable_hs)
4130 return -EINVAL;
4131
4132 /* Placeholder: Always refuse */
4133 l2cap_send_move_chan_rsp(conn, cmd->ident, icid, result);
4134
4135 return 0;
4136 }
4137
4138 static inline int l2cap_move_channel_rsp(struct l2cap_conn *conn,
4139 struct l2cap_cmd_hdr *cmd,
4140 u16 cmd_len, void *data)
4141 {
4142 struct l2cap_move_chan_rsp *rsp = data;
4143 u16 icid, result;
4144
4145 if (cmd_len != sizeof(*rsp))
4146 return -EPROTO;
4147
4148 icid = le16_to_cpu(rsp->icid);
4149 result = le16_to_cpu(rsp->result);
4150
4151 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
4152
4153 /* Placeholder: Always unconfirmed */
4154 l2cap_send_move_chan_cfm(conn, NULL, icid, L2CAP_MC_UNCONFIRMED);
4155
4156 return 0;
4157 }
4158
4159 static inline int l2cap_move_channel_confirm(struct l2cap_conn *conn,
4160 struct l2cap_cmd_hdr *cmd,
4161 u16 cmd_len, void *data)
4162 {
4163 struct l2cap_move_chan_cfm *cfm = data;
4164 u16 icid, result;
4165
4166 if (cmd_len != sizeof(*cfm))
4167 return -EPROTO;
4168
4169 icid = le16_to_cpu(cfm->icid);
4170 result = le16_to_cpu(cfm->result);
4171
4172 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
4173
4174 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
4175
4176 return 0;
4177 }
4178
4179 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
4180 struct l2cap_cmd_hdr *cmd,
4181 u16 cmd_len, void *data)
4182 {
4183 struct l2cap_move_chan_cfm_rsp *rsp = data;
4184 u16 icid;
4185
4186 if (cmd_len != sizeof(*rsp))
4187 return -EPROTO;
4188
4189 icid = le16_to_cpu(rsp->icid);
4190
4191 BT_DBG("icid 0x%4.4x", icid);
4192
4193 return 0;
4194 }
4195
4196 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
4197 u16 to_multiplier)
4198 {
4199 u16 max_latency;
4200
4201 if (min > max || min < 6 || max > 3200)
4202 return -EINVAL;
4203
4204 if (to_multiplier < 10 || to_multiplier > 3200)
4205 return -EINVAL;
4206
4207 if (max >= to_multiplier * 8)
4208 return -EINVAL;
4209
4210 max_latency = (to_multiplier * 8 / max) - 1;
4211 if (latency > 499 || latency > max_latency)
4212 return -EINVAL;
4213
4214 return 0;
4215 }
4216
4217 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
4218 struct l2cap_cmd_hdr *cmd,
4219 u8 *data)
4220 {
4221 struct hci_conn *hcon = conn->hcon;
4222 struct l2cap_conn_param_update_req *req;
4223 struct l2cap_conn_param_update_rsp rsp;
4224 u16 min, max, latency, to_multiplier, cmd_len;
4225 int err;
4226
4227 if (!(hcon->link_mode & HCI_LM_MASTER))
4228 return -EINVAL;
4229
4230 cmd_len = __le16_to_cpu(cmd->len);
4231 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
4232 return -EPROTO;
4233
4234 req = (struct l2cap_conn_param_update_req *) data;
4235 min = __le16_to_cpu(req->min);
4236 max = __le16_to_cpu(req->max);
4237 latency = __le16_to_cpu(req->latency);
4238 to_multiplier = __le16_to_cpu(req->to_multiplier);
4239
4240 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
4241 min, max, latency, to_multiplier);
4242
4243 memset(&rsp, 0, sizeof(rsp));
4244
4245 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
4246 if (err)
4247 rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
4248 else
4249 rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
4250
4251 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
4252 sizeof(rsp), &rsp);
4253
4254 if (!err)
4255 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
4256
4257 return 0;
4258 }
4259
4260 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
4261 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4262 u8 *data)
4263 {
4264 int err = 0;
4265
4266 switch (cmd->code) {
4267 case L2CAP_COMMAND_REJ:
4268 l2cap_command_rej(conn, cmd, data);
4269 break;
4270
4271 case L2CAP_CONN_REQ:
4272 err = l2cap_connect_req(conn, cmd, data);
4273 break;
4274
4275 case L2CAP_CONN_RSP:
4276 case L2CAP_CREATE_CHAN_RSP:
4277 err = l2cap_connect_rsp(conn, cmd, data);
4278 break;
4279
4280 case L2CAP_CONF_REQ:
4281 err = l2cap_config_req(conn, cmd, cmd_len, data);
4282 break;
4283
4284 case L2CAP_CONF_RSP:
4285 err = l2cap_config_rsp(conn, cmd, data);
4286 break;
4287
4288 case L2CAP_DISCONN_REQ:
4289 err = l2cap_disconnect_req(conn, cmd, data);
4290 break;
4291
4292 case L2CAP_DISCONN_RSP:
4293 err = l2cap_disconnect_rsp(conn, cmd, data);
4294 break;
4295
4296 case L2CAP_ECHO_REQ:
4297 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
4298 break;
4299
4300 case L2CAP_ECHO_RSP:
4301 break;
4302
4303 case L2CAP_INFO_REQ:
4304 err = l2cap_information_req(conn, cmd, data);
4305 break;
4306
4307 case L2CAP_INFO_RSP:
4308 err = l2cap_information_rsp(conn, cmd, data);
4309 break;
4310
4311 case L2CAP_CREATE_CHAN_REQ:
4312 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
4313 break;
4314
4315 case L2CAP_MOVE_CHAN_REQ:
4316 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
4317 break;
4318
4319 case L2CAP_MOVE_CHAN_RSP:
4320 err = l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
4321 break;
4322
4323 case L2CAP_MOVE_CHAN_CFM:
4324 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
4325 break;
4326
4327 case L2CAP_MOVE_CHAN_CFM_RSP:
4328 err = l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
4329 break;
4330
4331 default:
4332 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
4333 err = -EINVAL;
4334 break;
4335 }
4336
4337 return err;
4338 }
4339
4340 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
4341 struct l2cap_cmd_hdr *cmd, u8 *data)
4342 {
4343 switch (cmd->code) {
4344 case L2CAP_COMMAND_REJ:
4345 return 0;
4346
4347 case L2CAP_CONN_PARAM_UPDATE_REQ:
4348 return l2cap_conn_param_update_req(conn, cmd, data);
4349
4350 case L2CAP_CONN_PARAM_UPDATE_RSP:
4351 return 0;
4352
4353 default:
4354 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
4355 return -EINVAL;
4356 }
4357 }
4358
4359 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
4360 struct sk_buff *skb)
4361 {
4362 u8 *data = skb->data;
4363 int len = skb->len;
4364 struct l2cap_cmd_hdr cmd;
4365 int err;
4366
4367 l2cap_raw_recv(conn, skb);
4368
4369 while (len >= L2CAP_CMD_HDR_SIZE) {
4370 u16 cmd_len;
4371 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
4372 data += L2CAP_CMD_HDR_SIZE;
4373 len -= L2CAP_CMD_HDR_SIZE;
4374
4375 cmd_len = le16_to_cpu(cmd.len);
4376
4377 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len,
4378 cmd.ident);
4379
4380 if (cmd_len > len || !cmd.ident) {
4381 BT_DBG("corrupted command");
4382 break;
4383 }
4384
4385 if (conn->hcon->type == LE_LINK)
4386 err = l2cap_le_sig_cmd(conn, &cmd, data);
4387 else
4388 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
4389
4390 if (err) {
4391 struct l2cap_cmd_rej_unk rej;
4392
4393 BT_ERR("Wrong link type (%d)", err);
4394
4395 /* FIXME: Map err to a valid reason */
4396 rej.reason = __constant_cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
4397 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ,
4398 sizeof(rej), &rej);
4399 }
4400
4401 data += cmd_len;
4402 len -= cmd_len;
4403 }
4404
4405 kfree_skb(skb);
4406 }
4407
4408 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
4409 {
4410 u16 our_fcs, rcv_fcs;
4411 int hdr_size;
4412
4413 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
4414 hdr_size = L2CAP_EXT_HDR_SIZE;
4415 else
4416 hdr_size = L2CAP_ENH_HDR_SIZE;
4417
4418 if (chan->fcs == L2CAP_FCS_CRC16) {
4419 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
4420 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
4421 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
4422
4423 if (our_fcs != rcv_fcs)
4424 return -EBADMSG;
4425 }
4426 return 0;
4427 }
4428
4429 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
4430 {
4431 struct l2cap_ctrl control;
4432
4433 BT_DBG("chan %p", chan);
4434
4435 memset(&control, 0, sizeof(control));
4436 control.sframe = 1;
4437 control.final = 1;
4438 control.reqseq = chan->buffer_seq;
4439 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4440
4441 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4442 control.super = L2CAP_SUPER_RNR;
4443 l2cap_send_sframe(chan, &control);
4444 }
4445
4446 if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
4447 chan->unacked_frames > 0)
4448 __set_retrans_timer(chan);
4449
4450 /* Send pending iframes */
4451 l2cap_ertm_send(chan);
4452
4453 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
4454 test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
4455 /* F-bit wasn't sent in an s-frame or i-frame yet, so
4456 * send it now.
4457 */
4458 control.super = L2CAP_SUPER_RR;
4459 l2cap_send_sframe(chan, &control);
4460 }
4461 }
4462
4463 static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag,
4464 struct sk_buff **last_frag)
4465 {
4466 /* skb->len reflects data in skb as well as all fragments
4467 * skb->data_len reflects only data in fragments
4468 */
4469 if (!skb_has_frag_list(skb))
4470 skb_shinfo(skb)->frag_list = new_frag;
4471
4472 new_frag->next = NULL;
4473
4474 (*last_frag)->next = new_frag;
4475 *last_frag = new_frag;
4476
4477 skb->len += new_frag->len;
4478 skb->data_len += new_frag->len;
4479 skb->truesize += new_frag->truesize;
4480 }
4481
4482 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
4483 struct l2cap_ctrl *control)
4484 {
4485 int err = -EINVAL;
4486
4487 switch (control->sar) {
4488 case L2CAP_SAR_UNSEGMENTED:
4489 if (chan->sdu)
4490 break;
4491
4492 err = chan->ops->recv(chan, skb);
4493 break;
4494
4495 case L2CAP_SAR_START:
4496 if (chan->sdu)
4497 break;
4498
4499 chan->sdu_len = get_unaligned_le16(skb->data);
4500 skb_pull(skb, L2CAP_SDULEN_SIZE);
4501
4502 if (chan->sdu_len > chan->imtu) {
4503 err = -EMSGSIZE;
4504 break;
4505 }
4506
4507 if (skb->len >= chan->sdu_len)
4508 break;
4509
4510 chan->sdu = skb;
4511 chan->sdu_last_frag = skb;
4512
4513 skb = NULL;
4514 err = 0;
4515 break;
4516
4517 case L2CAP_SAR_CONTINUE:
4518 if (!chan->sdu)
4519 break;
4520
4521 append_skb_frag(chan->sdu, skb,
4522 &chan->sdu_last_frag);
4523 skb = NULL;
4524
4525 if (chan->sdu->len >= chan->sdu_len)
4526 break;
4527
4528 err = 0;
4529 break;
4530
4531 case L2CAP_SAR_END:
4532 if (!chan->sdu)
4533 break;
4534
4535 append_skb_frag(chan->sdu, skb,
4536 &chan->sdu_last_frag);
4537 skb = NULL;
4538
4539 if (chan->sdu->len != chan->sdu_len)
4540 break;
4541
4542 err = chan->ops->recv(chan, chan->sdu);
4543
4544 if (!err) {
4545 /* Reassembly complete */
4546 chan->sdu = NULL;
4547 chan->sdu_last_frag = NULL;
4548 chan->sdu_len = 0;
4549 }
4550 break;
4551 }
4552
4553 if (err) {
4554 kfree_skb(skb);
4555 kfree_skb(chan->sdu);
4556 chan->sdu = NULL;
4557 chan->sdu_last_frag = NULL;
4558 chan->sdu_len = 0;
4559 }
4560
4561 return err;
4562 }
4563
4564 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
4565 {
4566 u8 event;
4567
4568 if (chan->mode != L2CAP_MODE_ERTM)
4569 return;
4570
4571 event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
4572 l2cap_tx(chan, NULL, NULL, event);
4573 }
4574
4575 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
4576 {
4577 int err = 0;
4578 /* Pass sequential frames to l2cap_reassemble_sdu()
4579 * until a gap is encountered.
4580 */
4581
4582 BT_DBG("chan %p", chan);
4583
4584 while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4585 struct sk_buff *skb;
4586 BT_DBG("Searching for skb with txseq %d (queue len %d)",
4587 chan->buffer_seq, skb_queue_len(&chan->srej_q));
4588
4589 skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
4590
4591 if (!skb)
4592 break;
4593
4594 skb_unlink(skb, &chan->srej_q);
4595 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
4596 err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->control);
4597 if (err)
4598 break;
4599 }
4600
4601 if (skb_queue_empty(&chan->srej_q)) {
4602 chan->rx_state = L2CAP_RX_STATE_RECV;
4603 l2cap_send_ack(chan);
4604 }
4605
4606 return err;
4607 }
4608
4609 static void l2cap_handle_srej(struct l2cap_chan *chan,
4610 struct l2cap_ctrl *control)
4611 {
4612 struct sk_buff *skb;
4613
4614 BT_DBG("chan %p, control %p", chan, control);
4615
4616 if (control->reqseq == chan->next_tx_seq) {
4617 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
4618 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4619 return;
4620 }
4621
4622 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
4623
4624 if (skb == NULL) {
4625 BT_DBG("Seq %d not available for retransmission",
4626 control->reqseq);
4627 return;
4628 }
4629
4630 if (chan->max_tx != 0 && bt_cb(skb)->control.retries >= chan->max_tx) {
4631 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
4632 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4633 return;
4634 }
4635
4636 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4637
4638 if (control->poll) {
4639 l2cap_pass_to_tx(chan, control);
4640
4641 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4642 l2cap_retransmit(chan, control);
4643 l2cap_ertm_send(chan);
4644
4645 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
4646 set_bit(CONN_SREJ_ACT, &chan->conn_state);
4647 chan->srej_save_reqseq = control->reqseq;
4648 }
4649 } else {
4650 l2cap_pass_to_tx_fbit(chan, control);
4651
4652 if (control->final) {
4653 if (chan->srej_save_reqseq != control->reqseq ||
4654 !test_and_clear_bit(CONN_SREJ_ACT,
4655 &chan->conn_state))
4656 l2cap_retransmit(chan, control);
4657 } else {
4658 l2cap_retransmit(chan, control);
4659 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
4660 set_bit(CONN_SREJ_ACT, &chan->conn_state);
4661 chan->srej_save_reqseq = control->reqseq;
4662 }
4663 }
4664 }
4665 }
4666
4667 static void l2cap_handle_rej(struct l2cap_chan *chan,
4668 struct l2cap_ctrl *control)
4669 {
4670 struct sk_buff *skb;
4671
4672 BT_DBG("chan %p, control %p", chan, control);
4673
4674 if (control->reqseq == chan->next_tx_seq) {
4675 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
4676 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4677 return;
4678 }
4679
4680 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
4681
4682 if (chan->max_tx && skb &&
4683 bt_cb(skb)->control.retries >= chan->max_tx) {
4684 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
4685 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4686 return;
4687 }
4688
4689 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4690
4691 l2cap_pass_to_tx(chan, control);
4692
4693 if (control->final) {
4694 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
4695 l2cap_retransmit_all(chan, control);
4696 } else {
4697 l2cap_retransmit_all(chan, control);
4698 l2cap_ertm_send(chan);
4699 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
4700 set_bit(CONN_REJ_ACT, &chan->conn_state);
4701 }
4702 }
4703
4704 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
4705 {
4706 BT_DBG("chan %p, txseq %d", chan, txseq);
4707
4708 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
4709 chan->expected_tx_seq);
4710
4711 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
4712 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
4713 chan->tx_win) {
4714 /* See notes below regarding "double poll" and
4715 * invalid packets.
4716 */
4717 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
4718 BT_DBG("Invalid/Ignore - after SREJ");
4719 return L2CAP_TXSEQ_INVALID_IGNORE;
4720 } else {
4721 BT_DBG("Invalid - in window after SREJ sent");
4722 return L2CAP_TXSEQ_INVALID;
4723 }
4724 }
4725
4726 if (chan->srej_list.head == txseq) {
4727 BT_DBG("Expected SREJ");
4728 return L2CAP_TXSEQ_EXPECTED_SREJ;
4729 }
4730
4731 if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
4732 BT_DBG("Duplicate SREJ - txseq already stored");
4733 return L2CAP_TXSEQ_DUPLICATE_SREJ;
4734 }
4735
4736 if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
4737 BT_DBG("Unexpected SREJ - not requested");
4738 return L2CAP_TXSEQ_UNEXPECTED_SREJ;
4739 }
4740 }
4741
4742 if (chan->expected_tx_seq == txseq) {
4743 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
4744 chan->tx_win) {
4745 BT_DBG("Invalid - txseq outside tx window");
4746 return L2CAP_TXSEQ_INVALID;
4747 } else {
4748 BT_DBG("Expected");
4749 return L2CAP_TXSEQ_EXPECTED;
4750 }
4751 }
4752
4753 if (__seq_offset(chan, txseq, chan->last_acked_seq) <
4754 __seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) {
4755 BT_DBG("Duplicate - expected_tx_seq later than txseq");
4756 return L2CAP_TXSEQ_DUPLICATE;
4757 }
4758
4759 if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
4760 /* A source of invalid packets is a "double poll" condition,
4761 * where delays cause us to send multiple poll packets. If
4762 * the remote stack receives and processes both polls,
4763 * sequence numbers can wrap around in such a way that a
4764 * resent frame has a sequence number that looks like new data
4765 * with a sequence gap. This would trigger an erroneous SREJ
4766 * request.
4767 *
4768 * Fortunately, this is impossible with a tx window that's
4769 * less than half of the maximum sequence number, which allows
4770 * invalid frames to be safely ignored.
4771 *
4772 * With tx window sizes greater than half of the tx window
4773 * maximum, the frame is invalid and cannot be ignored. This
4774 * causes a disconnect.
4775 */
4776
4777 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
4778 BT_DBG("Invalid/Ignore - txseq outside tx window");
4779 return L2CAP_TXSEQ_INVALID_IGNORE;
4780 } else {
4781 BT_DBG("Invalid - txseq outside tx window");
4782 return L2CAP_TXSEQ_INVALID;
4783 }
4784 } else {
4785 BT_DBG("Unexpected - txseq indicates missing frames");
4786 return L2CAP_TXSEQ_UNEXPECTED;
4787 }
4788 }
4789
4790 static int l2cap_rx_state_recv(struct l2cap_chan *chan,
4791 struct l2cap_ctrl *control,
4792 struct sk_buff *skb, u8 event)
4793 {
4794 int err = 0;
4795 bool skb_in_use = 0;
4796
4797 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
4798 event);
4799
4800 switch (event) {
4801 case L2CAP_EV_RECV_IFRAME:
4802 switch (l2cap_classify_txseq(chan, control->txseq)) {
4803 case L2CAP_TXSEQ_EXPECTED:
4804 l2cap_pass_to_tx(chan, control);
4805
4806 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4807 BT_DBG("Busy, discarding expected seq %d",
4808 control->txseq);
4809 break;
4810 }
4811
4812 chan->expected_tx_seq = __next_seq(chan,
4813 control->txseq);
4814
4815 chan->buffer_seq = chan->expected_tx_seq;
4816 skb_in_use = 1;
4817
4818 err = l2cap_reassemble_sdu(chan, skb, control);
4819 if (err)
4820 break;
4821
4822 if (control->final) {
4823 if (!test_and_clear_bit(CONN_REJ_ACT,
4824 &chan->conn_state)) {
4825 control->final = 0;
4826 l2cap_retransmit_all(chan, control);
4827 l2cap_ertm_send(chan);
4828 }
4829 }
4830
4831 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
4832 l2cap_send_ack(chan);
4833 break;
4834 case L2CAP_TXSEQ_UNEXPECTED:
4835 l2cap_pass_to_tx(chan, control);
4836
4837 /* Can't issue SREJ frames in the local busy state.
4838 * Drop this frame, it will be seen as missing
4839 * when local busy is exited.
4840 */
4841 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4842 BT_DBG("Busy, discarding unexpected seq %d",
4843 control->txseq);
4844 break;
4845 }
4846
4847 /* There was a gap in the sequence, so an SREJ
4848 * must be sent for each missing frame. The
4849 * current frame is stored for later use.
4850 */
4851 skb_queue_tail(&chan->srej_q, skb);
4852 skb_in_use = 1;
4853 BT_DBG("Queued %p (queue len %d)", skb,
4854 skb_queue_len(&chan->srej_q));
4855
4856 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
4857 l2cap_seq_list_clear(&chan->srej_list);
4858 l2cap_send_srej(chan, control->txseq);
4859
4860 chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
4861 break;
4862 case L2CAP_TXSEQ_DUPLICATE:
4863 l2cap_pass_to_tx(chan, control);
4864 break;
4865 case L2CAP_TXSEQ_INVALID_IGNORE:
4866 break;
4867 case L2CAP_TXSEQ_INVALID:
4868 default:
4869 l2cap_send_disconn_req(chan->conn, chan,
4870 ECONNRESET);
4871 break;
4872 }
4873 break;
4874 case L2CAP_EV_RECV_RR:
4875 l2cap_pass_to_tx(chan, control);
4876 if (control->final) {
4877 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4878
4879 if (!test_and_clear_bit(CONN_REJ_ACT,
4880 &chan->conn_state)) {
4881 control->final = 0;
4882 l2cap_retransmit_all(chan, control);
4883 }
4884
4885 l2cap_ertm_send(chan);
4886 } else if (control->poll) {
4887 l2cap_send_i_or_rr_or_rnr(chan);
4888 } else {
4889 if (test_and_clear_bit(CONN_REMOTE_BUSY,
4890 &chan->conn_state) &&
4891 chan->unacked_frames)
4892 __set_retrans_timer(chan);
4893
4894 l2cap_ertm_send(chan);
4895 }
4896 break;
4897 case L2CAP_EV_RECV_RNR:
4898 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4899 l2cap_pass_to_tx(chan, control);
4900 if (control && control->poll) {
4901 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4902 l2cap_send_rr_or_rnr(chan, 0);
4903 }
4904 __clear_retrans_timer(chan);
4905 l2cap_seq_list_clear(&chan->retrans_list);
4906 break;
4907 case L2CAP_EV_RECV_REJ:
4908 l2cap_handle_rej(chan, control);
4909 break;
4910 case L2CAP_EV_RECV_SREJ:
4911 l2cap_handle_srej(chan, control);
4912 break;
4913 default:
4914 break;
4915 }
4916
4917 if (skb && !skb_in_use) {
4918 BT_DBG("Freeing %p", skb);
4919 kfree_skb(skb);
4920 }
4921
4922 return err;
4923 }
4924
4925 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
4926 struct l2cap_ctrl *control,
4927 struct sk_buff *skb, u8 event)
4928 {
4929 int err = 0;
4930 u16 txseq = control->txseq;
4931 bool skb_in_use = 0;
4932
4933 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
4934 event);
4935
4936 switch (event) {
4937 case L2CAP_EV_RECV_IFRAME:
4938 switch (l2cap_classify_txseq(chan, txseq)) {
4939 case L2CAP_TXSEQ_EXPECTED:
4940 /* Keep frame for reassembly later */
4941 l2cap_pass_to_tx(chan, control);
4942 skb_queue_tail(&chan->srej_q, skb);
4943 skb_in_use = 1;
4944 BT_DBG("Queued %p (queue len %d)", skb,
4945 skb_queue_len(&chan->srej_q));
4946
4947 chan->expected_tx_seq = __next_seq(chan, txseq);
4948 break;
4949 case L2CAP_TXSEQ_EXPECTED_SREJ:
4950 l2cap_seq_list_pop(&chan->srej_list);
4951
4952 l2cap_pass_to_tx(chan, control);
4953 skb_queue_tail(&chan->srej_q, skb);
4954 skb_in_use = 1;
4955 BT_DBG("Queued %p (queue len %d)", skb,
4956 skb_queue_len(&chan->srej_q));
4957
4958 err = l2cap_rx_queued_iframes(chan);
4959 if (err)
4960 break;
4961
4962 break;
4963 case L2CAP_TXSEQ_UNEXPECTED:
4964 /* Got a frame that can't be reassembled yet.
4965 * Save it for later, and send SREJs to cover
4966 * the missing frames.
4967 */
4968 skb_queue_tail(&chan->srej_q, skb);
4969 skb_in_use = 1;
4970 BT_DBG("Queued %p (queue len %d)", skb,
4971 skb_queue_len(&chan->srej_q));
4972
4973 l2cap_pass_to_tx(chan, control);
4974 l2cap_send_srej(chan, control->txseq);
4975 break;
4976 case L2CAP_TXSEQ_UNEXPECTED_SREJ:
4977 /* This frame was requested with an SREJ, but
4978 * some expected retransmitted frames are
4979 * missing. Request retransmission of missing
4980 * SREJ'd frames.
4981 */
4982 skb_queue_tail(&chan->srej_q, skb);
4983 skb_in_use = 1;
4984 BT_DBG("Queued %p (queue len %d)", skb,
4985 skb_queue_len(&chan->srej_q));
4986
4987 l2cap_pass_to_tx(chan, control);
4988 l2cap_send_srej_list(chan, control->txseq);
4989 break;
4990 case L2CAP_TXSEQ_DUPLICATE_SREJ:
4991 /* We've already queued this frame. Drop this copy. */
4992 l2cap_pass_to_tx(chan, control);
4993 break;
4994 case L2CAP_TXSEQ_DUPLICATE:
4995 /* Expecting a later sequence number, so this frame
4996 * was already received. Ignore it completely.
4997 */
4998 break;
4999 case L2CAP_TXSEQ_INVALID_IGNORE:
5000 break;
5001 case L2CAP_TXSEQ_INVALID:
5002 default:
5003 l2cap_send_disconn_req(chan->conn, chan,
5004 ECONNRESET);
5005 break;
5006 }
5007 break;
5008 case L2CAP_EV_RECV_RR:
5009 l2cap_pass_to_tx(chan, control);
5010 if (control->final) {
5011 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5012
5013 if (!test_and_clear_bit(CONN_REJ_ACT,
5014 &chan->conn_state)) {
5015 control->final = 0;
5016 l2cap_retransmit_all(chan, control);
5017 }
5018
5019 l2cap_ertm_send(chan);
5020 } else if (control->poll) {
5021 if (test_and_clear_bit(CONN_REMOTE_BUSY,
5022 &chan->conn_state) &&
5023 chan->unacked_frames) {
5024 __set_retrans_timer(chan);
5025 }
5026
5027 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5028 l2cap_send_srej_tail(chan);
5029 } else {
5030 if (test_and_clear_bit(CONN_REMOTE_BUSY,
5031 &chan->conn_state) &&
5032 chan->unacked_frames)
5033 __set_retrans_timer(chan);
5034
5035 l2cap_send_ack(chan);
5036 }
5037 break;
5038 case L2CAP_EV_RECV_RNR:
5039 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5040 l2cap_pass_to_tx(chan, control);
5041 if (control->poll) {
5042 l2cap_send_srej_tail(chan);
5043 } else {
5044 struct l2cap_ctrl rr_control;
5045 memset(&rr_control, 0, sizeof(rr_control));
5046 rr_control.sframe = 1;
5047 rr_control.super = L2CAP_SUPER_RR;
5048 rr_control.reqseq = chan->buffer_seq;
5049 l2cap_send_sframe(chan, &rr_control);
5050 }
5051
5052 break;
5053 case L2CAP_EV_RECV_REJ:
5054 l2cap_handle_rej(chan, control);
5055 break;
5056 case L2CAP_EV_RECV_SREJ:
5057 l2cap_handle_srej(chan, control);
5058 break;
5059 }
5060
5061 if (skb && !skb_in_use) {
5062 BT_DBG("Freeing %p", skb);
5063 kfree_skb(skb);
5064 }
5065
5066 return err;
5067 }
5068
5069 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
5070 {
5071 /* Make sure reqseq is for a packet that has been sent but not acked */
5072 u16 unacked;
5073
5074 unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
5075 return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
5076 }
5077
5078 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
5079 struct sk_buff *skb, u8 event)
5080 {
5081 int err = 0;
5082
5083 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
5084 control, skb, event, chan->rx_state);
5085
5086 if (__valid_reqseq(chan, control->reqseq)) {
5087 switch (chan->rx_state) {
5088 case L2CAP_RX_STATE_RECV:
5089 err = l2cap_rx_state_recv(chan, control, skb, event);
5090 break;
5091 case L2CAP_RX_STATE_SREJ_SENT:
5092 err = l2cap_rx_state_srej_sent(chan, control, skb,
5093 event);
5094 break;
5095 default:
5096 /* shut it down */
5097 break;
5098 }
5099 } else {
5100 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
5101 control->reqseq, chan->next_tx_seq,
5102 chan->expected_ack_seq);
5103 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
5104 }
5105
5106 return err;
5107 }
5108
5109 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
5110 struct sk_buff *skb)
5111 {
5112 int err = 0;
5113
5114 BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
5115 chan->rx_state);
5116
5117 if (l2cap_classify_txseq(chan, control->txseq) ==
5118 L2CAP_TXSEQ_EXPECTED) {
5119 l2cap_pass_to_tx(chan, control);
5120
5121 BT_DBG("buffer_seq %d->%d", chan->buffer_seq,
5122 __next_seq(chan, chan->buffer_seq));
5123
5124 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
5125
5126 l2cap_reassemble_sdu(chan, skb, control);
5127 } else {
5128 if (chan->sdu) {
5129 kfree_skb(chan->sdu);
5130 chan->sdu = NULL;
5131 }
5132 chan->sdu_last_frag = NULL;
5133 chan->sdu_len = 0;
5134
5135 if (skb) {
5136 BT_DBG("Freeing %p", skb);
5137 kfree_skb(skb);
5138 }
5139 }
5140
5141 chan->last_acked_seq = control->txseq;
5142 chan->expected_tx_seq = __next_seq(chan, control->txseq);
5143
5144 return err;
5145 }
5146
5147 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
5148 {
5149 struct l2cap_ctrl *control = &bt_cb(skb)->control;
5150 u16 len;
5151 u8 event;
5152
5153 __unpack_control(chan, skb);
5154
5155 len = skb->len;
5156
5157 /*
5158 * We can just drop the corrupted I-frame here.
5159 * Receiver will miss it and start proper recovery
5160 * procedures and ask for retransmission.
5161 */
5162 if (l2cap_check_fcs(chan, skb))
5163 goto drop;
5164
5165 if (!control->sframe && control->sar == L2CAP_SAR_START)
5166 len -= L2CAP_SDULEN_SIZE;
5167
5168 if (chan->fcs == L2CAP_FCS_CRC16)
5169 len -= L2CAP_FCS_SIZE;
5170
5171 if (len > chan->mps) {
5172 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
5173 goto drop;
5174 }
5175
5176 if (!control->sframe) {
5177 int err;
5178
5179 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
5180 control->sar, control->reqseq, control->final,
5181 control->txseq);
5182
5183 /* Validate F-bit - F=0 always valid, F=1 only
5184 * valid in TX WAIT_F
5185 */
5186 if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
5187 goto drop;
5188
5189 if (chan->mode != L2CAP_MODE_STREAMING) {
5190 event = L2CAP_EV_RECV_IFRAME;
5191 err = l2cap_rx(chan, control, skb, event);
5192 } else {
5193 err = l2cap_stream_rx(chan, control, skb);
5194 }
5195
5196 if (err)
5197 l2cap_send_disconn_req(chan->conn, chan,
5198 ECONNRESET);
5199 } else {
5200 const u8 rx_func_to_event[4] = {
5201 L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
5202 L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
5203 };
5204
5205 /* Only I-frames are expected in streaming mode */
5206 if (chan->mode == L2CAP_MODE_STREAMING)
5207 goto drop;
5208
5209 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
5210 control->reqseq, control->final, control->poll,
5211 control->super);
5212
5213 if (len != 0) {
5214 BT_ERR("%d", len);
5215 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
5216 goto drop;
5217 }
5218
5219 /* Validate F and P bits */
5220 if (control->final && (control->poll ||
5221 chan->tx_state != L2CAP_TX_STATE_WAIT_F))
5222 goto drop;
5223
5224 event = rx_func_to_event[control->super];
5225 if (l2cap_rx(chan, control, skb, event))
5226 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
5227 }
5228
5229 return 0;
5230
5231 drop:
5232 kfree_skb(skb);
5233 return 0;
5234 }
5235
5236 static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
5237 struct sk_buff *skb)
5238 {
5239 struct l2cap_chan *chan;
5240
5241 chan = l2cap_get_chan_by_scid(conn, cid);
5242 if (!chan) {
5243 if (cid == L2CAP_CID_A2MP) {
5244 chan = a2mp_channel_create(conn, skb);
5245 if (!chan) {
5246 kfree_skb(skb);
5247 return;
5248 }
5249
5250 l2cap_chan_lock(chan);
5251 } else {
5252 BT_DBG("unknown cid 0x%4.4x", cid);
5253 /* Drop packet and return */
5254 kfree_skb(skb);
5255 return;
5256 }
5257 }
5258
5259 BT_DBG("chan %p, len %d", chan, skb->len);
5260
5261 if (chan->state != BT_CONNECTED)
5262 goto drop;
5263
5264 switch (chan->mode) {
5265 case L2CAP_MODE_BASIC:
5266 /* If socket recv buffers overflows we drop data here
5267 * which is *bad* because L2CAP has to be reliable.
5268 * But we don't have any other choice. L2CAP doesn't
5269 * provide flow control mechanism. */
5270
5271 if (chan->imtu < skb->len)
5272 goto drop;
5273
5274 if (!chan->ops->recv(chan, skb))
5275 goto done;
5276 break;
5277
5278 case L2CAP_MODE_ERTM:
5279 case L2CAP_MODE_STREAMING:
5280 l2cap_data_rcv(chan, skb);
5281 goto done;
5282
5283 default:
5284 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
5285 break;
5286 }
5287
5288 drop:
5289 kfree_skb(skb);
5290
5291 done:
5292 l2cap_chan_unlock(chan);
5293 }
5294
5295 static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
5296 struct sk_buff *skb)
5297 {
5298 struct l2cap_chan *chan;
5299
5300 chan = l2cap_global_chan_by_psm(0, psm, conn->src, conn->dst);
5301 if (!chan)
5302 goto drop;
5303
5304 BT_DBG("chan %p, len %d", chan, skb->len);
5305
5306 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
5307 goto drop;
5308
5309 if (chan->imtu < skb->len)
5310 goto drop;
5311
5312 if (!chan->ops->recv(chan, skb))
5313 return;
5314
5315 drop:
5316 kfree_skb(skb);
5317 }
5318
5319 static void l2cap_att_channel(struct l2cap_conn *conn, u16 cid,
5320 struct sk_buff *skb)
5321 {
5322 struct l2cap_chan *chan;
5323
5324 chan = l2cap_global_chan_by_scid(0, cid, conn->src, conn->dst);
5325 if (!chan)
5326 goto drop;
5327
5328 BT_DBG("chan %p, len %d", chan, skb->len);
5329
5330 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
5331 goto drop;
5332
5333 if (chan->imtu < skb->len)
5334 goto drop;
5335
5336 if (!chan->ops->recv(chan, skb))
5337 return;
5338
5339 drop:
5340 kfree_skb(skb);
5341 }
5342
5343 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
5344 {
5345 struct l2cap_hdr *lh = (void *) skb->data;
5346 u16 cid, len;
5347 __le16 psm;
5348
5349 skb_pull(skb, L2CAP_HDR_SIZE);
5350 cid = __le16_to_cpu(lh->cid);
5351 len = __le16_to_cpu(lh->len);
5352
5353 if (len != skb->len) {
5354 kfree_skb(skb);
5355 return;
5356 }
5357
5358 BT_DBG("len %d, cid 0x%4.4x", len, cid);
5359
5360 switch (cid) {
5361 case L2CAP_CID_LE_SIGNALING:
5362 case L2CAP_CID_SIGNALING:
5363 l2cap_sig_channel(conn, skb);
5364 break;
5365
5366 case L2CAP_CID_CONN_LESS:
5367 psm = get_unaligned((__le16 *) skb->data);
5368 skb_pull(skb, L2CAP_PSMLEN_SIZE);
5369 l2cap_conless_channel(conn, psm, skb);
5370 break;
5371
5372 case L2CAP_CID_LE_DATA:
5373 l2cap_att_channel(conn, cid, skb);
5374 break;
5375
5376 case L2CAP_CID_SMP:
5377 if (smp_sig_channel(conn, skb))
5378 l2cap_conn_del(conn->hcon, EACCES);
5379 break;
5380
5381 default:
5382 l2cap_data_channel(conn, cid, skb);
5383 break;
5384 }
5385 }
5386
5387 /* ---- L2CAP interface with lower layer (HCI) ---- */
5388
5389 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
5390 {
5391 int exact = 0, lm1 = 0, lm2 = 0;
5392 struct l2cap_chan *c;
5393
5394 BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
5395
5396 /* Find listening sockets and check their link_mode */
5397 read_lock(&chan_list_lock);
5398 list_for_each_entry(c, &chan_list, global_l) {
5399 struct sock *sk = c->sk;
5400
5401 if (c->state != BT_LISTEN)
5402 continue;
5403
5404 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
5405 lm1 |= HCI_LM_ACCEPT;
5406 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
5407 lm1 |= HCI_LM_MASTER;
5408 exact++;
5409 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
5410 lm2 |= HCI_LM_ACCEPT;
5411 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
5412 lm2 |= HCI_LM_MASTER;
5413 }
5414 }
5415 read_unlock(&chan_list_lock);
5416
5417 return exact ? lm1 : lm2;
5418 }
5419
5420 void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
5421 {
5422 struct l2cap_conn *conn;
5423
5424 BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
5425
5426 if (!status) {
5427 conn = l2cap_conn_add(hcon, status);
5428 if (conn)
5429 l2cap_conn_ready(conn);
5430 } else
5431 l2cap_conn_del(hcon, bt_to_errno(status));
5432
5433 }
5434
5435 int l2cap_disconn_ind(struct hci_conn *hcon)
5436 {
5437 struct l2cap_conn *conn = hcon->l2cap_data;
5438
5439 BT_DBG("hcon %p", hcon);
5440
5441 if (!conn)
5442 return HCI_ERROR_REMOTE_USER_TERM;
5443 return conn->disc_reason;
5444 }
5445
5446 void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
5447 {
5448 BT_DBG("hcon %p reason %d", hcon, reason);
5449
5450 l2cap_conn_del(hcon, bt_to_errno(reason));
5451 }
5452
5453 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
5454 {
5455 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
5456 return;
5457
5458 if (encrypt == 0x00) {
5459 if (chan->sec_level == BT_SECURITY_MEDIUM) {
5460 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
5461 } else if (chan->sec_level == BT_SECURITY_HIGH)
5462 l2cap_chan_close(chan, ECONNREFUSED);
5463 } else {
5464 if (chan->sec_level == BT_SECURITY_MEDIUM)
5465 __clear_chan_timer(chan);
5466 }
5467 }
5468
5469 int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
5470 {
5471 struct l2cap_conn *conn = hcon->l2cap_data;
5472 struct l2cap_chan *chan;
5473
5474 if (!conn)
5475 return 0;
5476
5477 BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
5478
5479 if (hcon->type == LE_LINK) {
5480 if (!status && encrypt)
5481 smp_distribute_keys(conn, 0);
5482 cancel_delayed_work(&conn->security_timer);
5483 }
5484
5485 mutex_lock(&conn->chan_lock);
5486
5487 list_for_each_entry(chan, &conn->chan_l, list) {
5488 l2cap_chan_lock(chan);
5489
5490 BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
5491 state_to_string(chan->state));
5492
5493 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
5494 l2cap_chan_unlock(chan);
5495 continue;
5496 }
5497
5498 if (chan->scid == L2CAP_CID_LE_DATA) {
5499 if (!status && encrypt) {
5500 chan->sec_level = hcon->sec_level;
5501 l2cap_chan_ready(chan);
5502 }
5503
5504 l2cap_chan_unlock(chan);
5505 continue;
5506 }
5507
5508 if (test_bit(CONF_CONNECT_PEND, &chan->conf_state)) {
5509 l2cap_chan_unlock(chan);
5510 continue;
5511 }
5512
5513 if (!status && (chan->state == BT_CONNECTED ||
5514 chan->state == BT_CONFIG)) {
5515 struct sock *sk = chan->sk;
5516
5517 clear_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags);
5518 sk->sk_state_change(sk);
5519
5520 l2cap_check_encryption(chan, encrypt);
5521 l2cap_chan_unlock(chan);
5522 continue;
5523 }
5524
5525 if (chan->state == BT_CONNECT) {
5526 if (!status) {
5527 l2cap_start_connection(chan);
5528 } else {
5529 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
5530 }
5531 } else if (chan->state == BT_CONNECT2) {
5532 struct sock *sk = chan->sk;
5533 struct l2cap_conn_rsp rsp;
5534 __u16 res, stat;
5535
5536 lock_sock(sk);
5537
5538 if (!status) {
5539 if (test_bit(BT_SK_DEFER_SETUP,
5540 &bt_sk(sk)->flags)) {
5541 res = L2CAP_CR_PEND;
5542 stat = L2CAP_CS_AUTHOR_PEND;
5543 chan->ops->defer(chan);
5544 } else {
5545 __l2cap_state_change(chan, BT_CONFIG);
5546 res = L2CAP_CR_SUCCESS;
5547 stat = L2CAP_CS_NO_INFO;
5548 }
5549 } else {
5550 __l2cap_state_change(chan, BT_DISCONN);
5551 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
5552 res = L2CAP_CR_SEC_BLOCK;
5553 stat = L2CAP_CS_NO_INFO;
5554 }
5555
5556 release_sock(sk);
5557
5558 rsp.scid = cpu_to_le16(chan->dcid);
5559 rsp.dcid = cpu_to_le16(chan->scid);
5560 rsp.result = cpu_to_le16(res);
5561 rsp.status = cpu_to_le16(stat);
5562 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
5563 sizeof(rsp), &rsp);
5564
5565 if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
5566 res == L2CAP_CR_SUCCESS) {
5567 char buf[128];
5568 set_bit(CONF_REQ_SENT, &chan->conf_state);
5569 l2cap_send_cmd(conn, l2cap_get_ident(conn),
5570 L2CAP_CONF_REQ,
5571 l2cap_build_conf_req(chan, buf),
5572 buf);
5573 chan->num_conf_req++;
5574 }
5575 }
5576
5577 l2cap_chan_unlock(chan);
5578 }
5579
5580 mutex_unlock(&conn->chan_lock);
5581
5582 return 0;
5583 }
5584
5585 int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
5586 {
5587 struct l2cap_conn *conn = hcon->l2cap_data;
5588 struct l2cap_hdr *hdr;
5589 int len;
5590
5591 /* For AMP controller do not create l2cap conn */
5592 if (!conn && hcon->hdev->dev_type != HCI_BREDR)
5593 goto drop;
5594
5595 if (!conn)
5596 conn = l2cap_conn_add(hcon, 0);
5597
5598 if (!conn)
5599 goto drop;
5600
5601 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
5602
5603 switch (flags) {
5604 case ACL_START:
5605 case ACL_START_NO_FLUSH:
5606 case ACL_COMPLETE:
5607 if (conn->rx_len) {
5608 BT_ERR("Unexpected start frame (len %d)", skb->len);
5609 kfree_skb(conn->rx_skb);
5610 conn->rx_skb = NULL;
5611 conn->rx_len = 0;
5612 l2cap_conn_unreliable(conn, ECOMM);
5613 }
5614
5615 /* Start fragment always begin with Basic L2CAP header */
5616 if (skb->len < L2CAP_HDR_SIZE) {
5617 BT_ERR("Frame is too short (len %d)", skb->len);
5618 l2cap_conn_unreliable(conn, ECOMM);
5619 goto drop;
5620 }
5621
5622 hdr = (struct l2cap_hdr *) skb->data;
5623 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
5624
5625 if (len == skb->len) {
5626 /* Complete frame received */
5627 l2cap_recv_frame(conn, skb);
5628 return 0;
5629 }
5630
5631 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
5632
5633 if (skb->len > len) {
5634 BT_ERR("Frame is too long (len %d, expected len %d)",
5635 skb->len, len);
5636 l2cap_conn_unreliable(conn, ECOMM);
5637 goto drop;
5638 }
5639
5640 /* Allocate skb for the complete frame (with header) */
5641 conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL);
5642 if (!conn->rx_skb)
5643 goto drop;
5644
5645 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
5646 skb->len);
5647 conn->rx_len = len - skb->len;
5648 break;
5649
5650 case ACL_CONT:
5651 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
5652
5653 if (!conn->rx_len) {
5654 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
5655 l2cap_conn_unreliable(conn, ECOMM);
5656 goto drop;
5657 }
5658
5659 if (skb->len > conn->rx_len) {
5660 BT_ERR("Fragment is too long (len %d, expected %d)",
5661 skb->len, conn->rx_len);
5662 kfree_skb(conn->rx_skb);
5663 conn->rx_skb = NULL;
5664 conn->rx_len = 0;
5665 l2cap_conn_unreliable(conn, ECOMM);
5666 goto drop;
5667 }
5668
5669 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
5670 skb->len);
5671 conn->rx_len -= skb->len;
5672
5673 if (!conn->rx_len) {
5674 /* Complete frame received */
5675 l2cap_recv_frame(conn, conn->rx_skb);
5676 conn->rx_skb = NULL;
5677 }
5678 break;
5679 }
5680
5681 drop:
5682 kfree_skb(skb);
5683 return 0;
5684 }
5685
5686 static int l2cap_debugfs_show(struct seq_file *f, void *p)
5687 {
5688 struct l2cap_chan *c;
5689
5690 read_lock(&chan_list_lock);
5691
5692 list_for_each_entry(c, &chan_list, global_l) {
5693 struct sock *sk = c->sk;
5694
5695 seq_printf(f, "%pMR %pMR %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
5696 &bt_sk(sk)->src, &bt_sk(sk)->dst,
5697 c->state, __le16_to_cpu(c->psm),
5698 c->scid, c->dcid, c->imtu, c->omtu,
5699 c->sec_level, c->mode);
5700 }
5701
5702 read_unlock(&chan_list_lock);
5703
5704 return 0;
5705 }
5706
5707 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
5708 {
5709 return single_open(file, l2cap_debugfs_show, inode->i_private);
5710 }
5711
5712 static const struct file_operations l2cap_debugfs_fops = {
5713 .open = l2cap_debugfs_open,
5714 .read = seq_read,
5715 .llseek = seq_lseek,
5716 .release = single_release,
5717 };
5718
5719 static struct dentry *l2cap_debugfs;
5720
5721 int __init l2cap_init(void)
5722 {
5723 int err;
5724
5725 err = l2cap_init_sockets();
5726 if (err < 0)
5727 return err;
5728
5729 if (bt_debugfs) {
5730 l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
5731 NULL, &l2cap_debugfs_fops);
5732 if (!l2cap_debugfs)
5733 BT_ERR("Failed to create L2CAP debug file");
5734 }
5735
5736 return 0;
5737 }
5738
5739 void l2cap_exit(void)
5740 {
5741 debugfs_remove(l2cap_debugfs);
5742 l2cap_cleanup_sockets();
5743 }
5744
5745 module_param(disable_ertm, bool, 0644);
5746 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
This page took 0.496599 seconds and 5 git commands to generate.