Bluetooth: Remove extra l2cap_state_change(BT_CONNECTED)
[deliverable/linux.git] / net / bluetooth / l2cap_core.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
8
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
10
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
14
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
27 */
28
29 /* Bluetooth L2CAP core. */
30
31 #include <linux/module.h>
32
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
35
36 #include <net/bluetooth/bluetooth.h>
37 #include <net/bluetooth/hci_core.h>
38 #include <net/bluetooth/l2cap.h>
39 #include <net/bluetooth/smp.h>
40
41 bool disable_ertm;
42
43 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
44 static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP, };
45
46 static LIST_HEAD(chan_list);
47 static DEFINE_RWLOCK(chan_list_lock);
48
49 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
50 u8 code, u8 ident, u16 dlen, void *data);
51 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
52 void *data);
53 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
54 static void l2cap_send_disconn_req(struct l2cap_conn *conn,
55 struct l2cap_chan *chan, int err);
56
57 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
58 struct sk_buff_head *skbs, u8 event);
59
60 /* ---- L2CAP channels ---- */
61
62 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid)
63 {
64 struct l2cap_chan *c;
65
66 list_for_each_entry(c, &conn->chan_l, list) {
67 if (c->dcid == cid)
68 return c;
69 }
70 return NULL;
71 }
72
73 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
74 {
75 struct l2cap_chan *c;
76
77 list_for_each_entry(c, &conn->chan_l, list) {
78 if (c->scid == cid)
79 return c;
80 }
81 return NULL;
82 }
83
84 /* Find channel with given SCID.
85 * Returns locked channel. */
86 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
87 {
88 struct l2cap_chan *c;
89
90 mutex_lock(&conn->chan_lock);
91 c = __l2cap_get_chan_by_scid(conn, cid);
92 if (c)
93 l2cap_chan_lock(c);
94 mutex_unlock(&conn->chan_lock);
95
96 return c;
97 }
98
99 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
100 {
101 struct l2cap_chan *c;
102
103 list_for_each_entry(c, &conn->chan_l, list) {
104 if (c->ident == ident)
105 return c;
106 }
107 return NULL;
108 }
109
110 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
111 {
112 struct l2cap_chan *c;
113
114 list_for_each_entry(c, &chan_list, global_l) {
115 if (c->sport == psm && !bacmp(&bt_sk(c->sk)->src, src))
116 return c;
117 }
118 return NULL;
119 }
120
121 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
122 {
123 int err;
124
125 write_lock(&chan_list_lock);
126
127 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
128 err = -EADDRINUSE;
129 goto done;
130 }
131
132 if (psm) {
133 chan->psm = psm;
134 chan->sport = psm;
135 err = 0;
136 } else {
137 u16 p;
138
139 err = -EINVAL;
140 for (p = 0x1001; p < 0x1100; p += 2)
141 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
142 chan->psm = cpu_to_le16(p);
143 chan->sport = cpu_to_le16(p);
144 err = 0;
145 break;
146 }
147 }
148
149 done:
150 write_unlock(&chan_list_lock);
151 return err;
152 }
153
154 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
155 {
156 write_lock(&chan_list_lock);
157
158 chan->scid = scid;
159
160 write_unlock(&chan_list_lock);
161
162 return 0;
163 }
164
165 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
166 {
167 u16 cid = L2CAP_CID_DYN_START;
168
169 for (; cid < L2CAP_CID_DYN_END; cid++) {
170 if (!__l2cap_get_chan_by_scid(conn, cid))
171 return cid;
172 }
173
174 return 0;
175 }
176
177 static void __l2cap_state_change(struct l2cap_chan *chan, int state)
178 {
179 BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
180 state_to_string(state));
181
182 chan->state = state;
183 chan->ops->state_change(chan->data, state);
184 }
185
186 static void l2cap_state_change(struct l2cap_chan *chan, int state)
187 {
188 struct sock *sk = chan->sk;
189
190 lock_sock(sk);
191 __l2cap_state_change(chan, state);
192 release_sock(sk);
193 }
194
195 static inline void __l2cap_chan_set_err(struct l2cap_chan *chan, int err)
196 {
197 struct sock *sk = chan->sk;
198
199 sk->sk_err = err;
200 }
201
202 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
203 {
204 struct sock *sk = chan->sk;
205
206 lock_sock(sk);
207 __l2cap_chan_set_err(chan, err);
208 release_sock(sk);
209 }
210
211 static void __set_retrans_timer(struct l2cap_chan *chan)
212 {
213 if (!delayed_work_pending(&chan->monitor_timer) &&
214 chan->retrans_timeout) {
215 l2cap_set_timer(chan, &chan->retrans_timer,
216 msecs_to_jiffies(chan->retrans_timeout));
217 }
218 }
219
220 static void __set_monitor_timer(struct l2cap_chan *chan)
221 {
222 __clear_retrans_timer(chan);
223 if (chan->monitor_timeout) {
224 l2cap_set_timer(chan, &chan->monitor_timer,
225 msecs_to_jiffies(chan->monitor_timeout));
226 }
227 }
228
229 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
230 u16 seq)
231 {
232 struct sk_buff *skb;
233
234 skb_queue_walk(head, skb) {
235 if (bt_cb(skb)->control.txseq == seq)
236 return skb;
237 }
238
239 return NULL;
240 }
241
242 /* ---- L2CAP sequence number lists ---- */
243
244 /* For ERTM, ordered lists of sequence numbers must be tracked for
245 * SREJ requests that are received and for frames that are to be
246 * retransmitted. These seq_list functions implement a singly-linked
247 * list in an array, where membership in the list can also be checked
248 * in constant time. Items can also be added to the tail of the list
249 * and removed from the head in constant time, without further memory
250 * allocs or frees.
251 */
252
253 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
254 {
255 size_t alloc_size, i;
256
257 /* Allocated size is a power of 2 to map sequence numbers
258 * (which may be up to 14 bits) in to a smaller array that is
259 * sized for the negotiated ERTM transmit windows.
260 */
261 alloc_size = roundup_pow_of_two(size);
262
263 seq_list->list = kmalloc(sizeof(u16) * alloc_size, GFP_KERNEL);
264 if (!seq_list->list)
265 return -ENOMEM;
266
267 seq_list->mask = alloc_size - 1;
268 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
269 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
270 for (i = 0; i < alloc_size; i++)
271 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
272
273 return 0;
274 }
275
276 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
277 {
278 kfree(seq_list->list);
279 }
280
281 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
282 u16 seq)
283 {
284 /* Constant-time check for list membership */
285 return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
286 }
287
288 static u16 l2cap_seq_list_remove(struct l2cap_seq_list *seq_list, u16 seq)
289 {
290 u16 mask = seq_list->mask;
291
292 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR) {
293 /* In case someone tries to pop the head of an empty list */
294 return L2CAP_SEQ_LIST_CLEAR;
295 } else if (seq_list->head == seq) {
296 /* Head can be removed in constant time */
297 seq_list->head = seq_list->list[seq & mask];
298 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
299
300 if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
301 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
302 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
303 }
304 } else {
305 /* Walk the list to find the sequence number */
306 u16 prev = seq_list->head;
307 while (seq_list->list[prev & mask] != seq) {
308 prev = seq_list->list[prev & mask];
309 if (prev == L2CAP_SEQ_LIST_TAIL)
310 return L2CAP_SEQ_LIST_CLEAR;
311 }
312
313 /* Unlink the number from the list and clear it */
314 seq_list->list[prev & mask] = seq_list->list[seq & mask];
315 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
316 if (seq_list->tail == seq)
317 seq_list->tail = prev;
318 }
319 return seq;
320 }
321
322 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
323 {
324 /* Remove the head in constant time */
325 return l2cap_seq_list_remove(seq_list, seq_list->head);
326 }
327
328 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
329 {
330 u16 i;
331
332 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
333 return;
334
335 for (i = 0; i <= seq_list->mask; i++)
336 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
337
338 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
339 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
340 }
341
342 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
343 {
344 u16 mask = seq_list->mask;
345
346 /* All appends happen in constant time */
347
348 if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
349 return;
350
351 if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
352 seq_list->head = seq;
353 else
354 seq_list->list[seq_list->tail & mask] = seq;
355
356 seq_list->tail = seq;
357 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
358 }
359
360 static void l2cap_chan_timeout(struct work_struct *work)
361 {
362 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
363 chan_timer.work);
364 struct l2cap_conn *conn = chan->conn;
365 int reason;
366
367 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
368
369 mutex_lock(&conn->chan_lock);
370 l2cap_chan_lock(chan);
371
372 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
373 reason = ECONNREFUSED;
374 else if (chan->state == BT_CONNECT &&
375 chan->sec_level != BT_SECURITY_SDP)
376 reason = ECONNREFUSED;
377 else
378 reason = ETIMEDOUT;
379
380 l2cap_chan_close(chan, reason);
381
382 l2cap_chan_unlock(chan);
383
384 chan->ops->close(chan->data);
385 mutex_unlock(&conn->chan_lock);
386
387 l2cap_chan_put(chan);
388 }
389
390 struct l2cap_chan *l2cap_chan_create(void)
391 {
392 struct l2cap_chan *chan;
393
394 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
395 if (!chan)
396 return NULL;
397
398 mutex_init(&chan->lock);
399
400 write_lock(&chan_list_lock);
401 list_add(&chan->global_l, &chan_list);
402 write_unlock(&chan_list_lock);
403
404 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
405
406 chan->state = BT_OPEN;
407
408 atomic_set(&chan->refcnt, 1);
409
410 /* This flag is cleared in l2cap_chan_ready() */
411 set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
412
413 BT_DBG("chan %p", chan);
414
415 return chan;
416 }
417
418 void l2cap_chan_destroy(struct l2cap_chan *chan)
419 {
420 write_lock(&chan_list_lock);
421 list_del(&chan->global_l);
422 write_unlock(&chan_list_lock);
423
424 l2cap_chan_put(chan);
425 }
426
427 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
428 {
429 chan->fcs = L2CAP_FCS_CRC16;
430 chan->max_tx = L2CAP_DEFAULT_MAX_TX;
431 chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
432 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
433 chan->sec_level = BT_SECURITY_LOW;
434
435 set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
436 }
437
438 static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
439 {
440 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
441 __le16_to_cpu(chan->psm), chan->dcid);
442
443 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
444
445 chan->conn = conn;
446
447 switch (chan->chan_type) {
448 case L2CAP_CHAN_CONN_ORIENTED:
449 if (conn->hcon->type == LE_LINK) {
450 /* LE connection */
451 chan->omtu = L2CAP_LE_DEFAULT_MTU;
452 chan->scid = L2CAP_CID_LE_DATA;
453 chan->dcid = L2CAP_CID_LE_DATA;
454 } else {
455 /* Alloc CID for connection-oriented socket */
456 chan->scid = l2cap_alloc_cid(conn);
457 chan->omtu = L2CAP_DEFAULT_MTU;
458 }
459 break;
460
461 case L2CAP_CHAN_CONN_LESS:
462 /* Connectionless socket */
463 chan->scid = L2CAP_CID_CONN_LESS;
464 chan->dcid = L2CAP_CID_CONN_LESS;
465 chan->omtu = L2CAP_DEFAULT_MTU;
466 break;
467
468 default:
469 /* Raw socket can send/recv signalling messages only */
470 chan->scid = L2CAP_CID_SIGNALING;
471 chan->dcid = L2CAP_CID_SIGNALING;
472 chan->omtu = L2CAP_DEFAULT_MTU;
473 }
474
475 chan->local_id = L2CAP_BESTEFFORT_ID;
476 chan->local_stype = L2CAP_SERV_BESTEFFORT;
477 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
478 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
479 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
480 chan->local_flush_to = L2CAP_DEFAULT_FLUSH_TO;
481
482 l2cap_chan_hold(chan);
483
484 list_add(&chan->list, &conn->chan_l);
485 }
486
487 static void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
488 {
489 mutex_lock(&conn->chan_lock);
490 __l2cap_chan_add(conn, chan);
491 mutex_unlock(&conn->chan_lock);
492 }
493
494 static void l2cap_chan_del(struct l2cap_chan *chan, int err)
495 {
496 struct sock *sk = chan->sk;
497 struct l2cap_conn *conn = chan->conn;
498 struct sock *parent = bt_sk(sk)->parent;
499
500 __clear_chan_timer(chan);
501
502 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
503
504 if (conn) {
505 /* Delete from channel list */
506 list_del(&chan->list);
507
508 l2cap_chan_put(chan);
509
510 chan->conn = NULL;
511 hci_conn_put(conn->hcon);
512 }
513
514 lock_sock(sk);
515
516 __l2cap_state_change(chan, BT_CLOSED);
517 sock_set_flag(sk, SOCK_ZAPPED);
518
519 if (err)
520 __l2cap_chan_set_err(chan, err);
521
522 if (parent) {
523 bt_accept_unlink(sk);
524 parent->sk_data_ready(parent, 0);
525 } else
526 sk->sk_state_change(sk);
527
528 release_sock(sk);
529
530 if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
531 return;
532
533 switch(chan->mode) {
534 case L2CAP_MODE_BASIC:
535 break;
536
537 case L2CAP_MODE_ERTM:
538 __clear_retrans_timer(chan);
539 __clear_monitor_timer(chan);
540 __clear_ack_timer(chan);
541
542 skb_queue_purge(&chan->srej_q);
543
544 l2cap_seq_list_free(&chan->srej_list);
545 l2cap_seq_list_free(&chan->retrans_list);
546
547 /* fall through */
548
549 case L2CAP_MODE_STREAMING:
550 skb_queue_purge(&chan->tx_q);
551 break;
552 }
553
554 return;
555 }
556
557 static void l2cap_chan_cleanup_listen(struct sock *parent)
558 {
559 struct sock *sk;
560
561 BT_DBG("parent %p", parent);
562
563 /* Close not yet accepted channels */
564 while ((sk = bt_accept_dequeue(parent, NULL))) {
565 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
566
567 l2cap_chan_lock(chan);
568 __clear_chan_timer(chan);
569 l2cap_chan_close(chan, ECONNRESET);
570 l2cap_chan_unlock(chan);
571
572 chan->ops->close(chan->data);
573 }
574 }
575
576 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
577 {
578 struct l2cap_conn *conn = chan->conn;
579 struct sock *sk = chan->sk;
580
581 BT_DBG("chan %p state %s sk %p", chan,
582 state_to_string(chan->state), sk);
583
584 switch (chan->state) {
585 case BT_LISTEN:
586 lock_sock(sk);
587 l2cap_chan_cleanup_listen(sk);
588
589 __l2cap_state_change(chan, BT_CLOSED);
590 sock_set_flag(sk, SOCK_ZAPPED);
591 release_sock(sk);
592 break;
593
594 case BT_CONNECTED:
595 case BT_CONFIG:
596 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
597 conn->hcon->type == ACL_LINK) {
598 __set_chan_timer(chan, sk->sk_sndtimeo);
599 l2cap_send_disconn_req(conn, chan, reason);
600 } else
601 l2cap_chan_del(chan, reason);
602 break;
603
604 case BT_CONNECT2:
605 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
606 conn->hcon->type == ACL_LINK) {
607 struct l2cap_conn_rsp rsp;
608 __u16 result;
609
610 if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags))
611 result = L2CAP_CR_SEC_BLOCK;
612 else
613 result = L2CAP_CR_BAD_PSM;
614 l2cap_state_change(chan, BT_DISCONN);
615
616 rsp.scid = cpu_to_le16(chan->dcid);
617 rsp.dcid = cpu_to_le16(chan->scid);
618 rsp.result = cpu_to_le16(result);
619 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
620 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
621 sizeof(rsp), &rsp);
622 }
623
624 l2cap_chan_del(chan, reason);
625 break;
626
627 case BT_CONNECT:
628 case BT_DISCONN:
629 l2cap_chan_del(chan, reason);
630 break;
631
632 default:
633 lock_sock(sk);
634 sock_set_flag(sk, SOCK_ZAPPED);
635 release_sock(sk);
636 break;
637 }
638 }
639
640 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
641 {
642 if (chan->chan_type == L2CAP_CHAN_RAW) {
643 switch (chan->sec_level) {
644 case BT_SECURITY_HIGH:
645 return HCI_AT_DEDICATED_BONDING_MITM;
646 case BT_SECURITY_MEDIUM:
647 return HCI_AT_DEDICATED_BONDING;
648 default:
649 return HCI_AT_NO_BONDING;
650 }
651 } else if (chan->psm == __constant_cpu_to_le16(L2CAP_PSM_SDP)) {
652 if (chan->sec_level == BT_SECURITY_LOW)
653 chan->sec_level = BT_SECURITY_SDP;
654
655 if (chan->sec_level == BT_SECURITY_HIGH)
656 return HCI_AT_NO_BONDING_MITM;
657 else
658 return HCI_AT_NO_BONDING;
659 } else {
660 switch (chan->sec_level) {
661 case BT_SECURITY_HIGH:
662 return HCI_AT_GENERAL_BONDING_MITM;
663 case BT_SECURITY_MEDIUM:
664 return HCI_AT_GENERAL_BONDING;
665 default:
666 return HCI_AT_NO_BONDING;
667 }
668 }
669 }
670
671 /* Service level security */
672 int l2cap_chan_check_security(struct l2cap_chan *chan)
673 {
674 struct l2cap_conn *conn = chan->conn;
675 __u8 auth_type;
676
677 auth_type = l2cap_get_auth_type(chan);
678
679 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
680 }
681
682 static u8 l2cap_get_ident(struct l2cap_conn *conn)
683 {
684 u8 id;
685
686 /* Get next available identificator.
687 * 1 - 128 are used by kernel.
688 * 129 - 199 are reserved.
689 * 200 - 254 are used by utilities like l2ping, etc.
690 */
691
692 spin_lock(&conn->lock);
693
694 if (++conn->tx_ident > 128)
695 conn->tx_ident = 1;
696
697 id = conn->tx_ident;
698
699 spin_unlock(&conn->lock);
700
701 return id;
702 }
703
704 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
705 {
706 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
707 u8 flags;
708
709 BT_DBG("code 0x%2.2x", code);
710
711 if (!skb)
712 return;
713
714 if (lmp_no_flush_capable(conn->hcon->hdev))
715 flags = ACL_START_NO_FLUSH;
716 else
717 flags = ACL_START;
718
719 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
720 skb->priority = HCI_PRIO_MAX;
721
722 hci_send_acl(conn->hchan, skb, flags);
723 }
724
725 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
726 {
727 struct hci_conn *hcon = chan->conn->hcon;
728 u16 flags;
729
730 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
731 skb->priority);
732
733 if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
734 lmp_no_flush_capable(hcon->hdev))
735 flags = ACL_START_NO_FLUSH;
736 else
737 flags = ACL_START;
738
739 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
740 hci_send_acl(chan->conn->hchan, skb, flags);
741 }
742
743 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
744 {
745 control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
746 control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
747
748 if (enh & L2CAP_CTRL_FRAME_TYPE) {
749 /* S-Frame */
750 control->sframe = 1;
751 control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
752 control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
753
754 control->sar = 0;
755 control->txseq = 0;
756 } else {
757 /* I-Frame */
758 control->sframe = 0;
759 control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
760 control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
761
762 control->poll = 0;
763 control->super = 0;
764 }
765 }
766
767 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
768 {
769 control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
770 control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
771
772 if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
773 /* S-Frame */
774 control->sframe = 1;
775 control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
776 control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
777
778 control->sar = 0;
779 control->txseq = 0;
780 } else {
781 /* I-Frame */
782 control->sframe = 0;
783 control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
784 control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
785
786 control->poll = 0;
787 control->super = 0;
788 }
789 }
790
791 static inline void __unpack_control(struct l2cap_chan *chan,
792 struct sk_buff *skb)
793 {
794 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
795 __unpack_extended_control(get_unaligned_le32(skb->data),
796 &bt_cb(skb)->control);
797 skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
798 } else {
799 __unpack_enhanced_control(get_unaligned_le16(skb->data),
800 &bt_cb(skb)->control);
801 skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
802 }
803 }
804
805 static u32 __pack_extended_control(struct l2cap_ctrl *control)
806 {
807 u32 packed;
808
809 packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
810 packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
811
812 if (control->sframe) {
813 packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
814 packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
815 packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
816 } else {
817 packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
818 packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
819 }
820
821 return packed;
822 }
823
824 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
825 {
826 u16 packed;
827
828 packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
829 packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
830
831 if (control->sframe) {
832 packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
833 packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
834 packed |= L2CAP_CTRL_FRAME_TYPE;
835 } else {
836 packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
837 packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
838 }
839
840 return packed;
841 }
842
843 static inline void __pack_control(struct l2cap_chan *chan,
844 struct l2cap_ctrl *control,
845 struct sk_buff *skb)
846 {
847 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
848 put_unaligned_le32(__pack_extended_control(control),
849 skb->data + L2CAP_HDR_SIZE);
850 } else {
851 put_unaligned_le16(__pack_enhanced_control(control),
852 skb->data + L2CAP_HDR_SIZE);
853 }
854 }
855
856 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
857 u32 control)
858 {
859 struct sk_buff *skb;
860 struct l2cap_hdr *lh;
861 int hlen;
862
863 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
864 hlen = L2CAP_EXT_HDR_SIZE;
865 else
866 hlen = L2CAP_ENH_HDR_SIZE;
867
868 if (chan->fcs == L2CAP_FCS_CRC16)
869 hlen += L2CAP_FCS_SIZE;
870
871 skb = bt_skb_alloc(hlen, GFP_KERNEL);
872
873 if (!skb)
874 return ERR_PTR(-ENOMEM);
875
876 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
877 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
878 lh->cid = cpu_to_le16(chan->dcid);
879
880 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
881 put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
882 else
883 put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
884
885 if (chan->fcs == L2CAP_FCS_CRC16) {
886 u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
887 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
888 }
889
890 skb->priority = HCI_PRIO_MAX;
891 return skb;
892 }
893
894 static void l2cap_send_sframe(struct l2cap_chan *chan,
895 struct l2cap_ctrl *control)
896 {
897 struct sk_buff *skb;
898 u32 control_field;
899
900 BT_DBG("chan %p, control %p", chan, control);
901
902 if (!control->sframe)
903 return;
904
905 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
906 !control->poll)
907 control->final = 1;
908
909 if (control->super == L2CAP_SUPER_RR)
910 clear_bit(CONN_RNR_SENT, &chan->conn_state);
911 else if (control->super == L2CAP_SUPER_RNR)
912 set_bit(CONN_RNR_SENT, &chan->conn_state);
913
914 if (control->super != L2CAP_SUPER_SREJ) {
915 chan->last_acked_seq = control->reqseq;
916 __clear_ack_timer(chan);
917 }
918
919 BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
920 control->final, control->poll, control->super);
921
922 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
923 control_field = __pack_extended_control(control);
924 else
925 control_field = __pack_enhanced_control(control);
926
927 skb = l2cap_create_sframe_pdu(chan, control_field);
928 if (!IS_ERR(skb))
929 l2cap_do_send(chan, skb);
930 }
931
932 static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
933 {
934 struct l2cap_ctrl control;
935
936 BT_DBG("chan %p, poll %d", chan, poll);
937
938 memset(&control, 0, sizeof(control));
939 control.sframe = 1;
940 control.poll = poll;
941
942 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
943 control.super = L2CAP_SUPER_RNR;
944 else
945 control.super = L2CAP_SUPER_RR;
946
947 control.reqseq = chan->buffer_seq;
948 l2cap_send_sframe(chan, &control);
949 }
950
951 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
952 {
953 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
954 }
955
956 static void l2cap_send_conn_req(struct l2cap_chan *chan)
957 {
958 struct l2cap_conn *conn = chan->conn;
959 struct l2cap_conn_req req;
960
961 req.scid = cpu_to_le16(chan->scid);
962 req.psm = chan->psm;
963
964 chan->ident = l2cap_get_ident(conn);
965
966 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
967
968 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
969 }
970
971 static void l2cap_chan_ready(struct l2cap_chan *chan)
972 {
973 struct sock *sk = chan->sk;
974 struct sock *parent;
975
976 lock_sock(sk);
977
978 parent = bt_sk(sk)->parent;
979
980 BT_DBG("sk %p, parent %p", sk, parent);
981
982 /* This clears all conf flags, including CONF_NOT_COMPLETE */
983 chan->conf_state = 0;
984 __clear_chan_timer(chan);
985
986 __l2cap_state_change(chan, BT_CONNECTED);
987 sk->sk_state_change(sk);
988
989 if (parent)
990 parent->sk_data_ready(parent, 0);
991
992 release_sock(sk);
993 }
994
995 static void l2cap_do_start(struct l2cap_chan *chan)
996 {
997 struct l2cap_conn *conn = chan->conn;
998
999 if (conn->hcon->type == LE_LINK) {
1000 l2cap_chan_ready(chan);
1001 return;
1002 }
1003
1004 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
1005 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
1006 return;
1007
1008 if (l2cap_chan_check_security(chan) &&
1009 __l2cap_no_conn_pending(chan))
1010 l2cap_send_conn_req(chan);
1011 } else {
1012 struct l2cap_info_req req;
1013 req.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
1014
1015 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1016 conn->info_ident = l2cap_get_ident(conn);
1017
1018 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1019
1020 l2cap_send_cmd(conn, conn->info_ident,
1021 L2CAP_INFO_REQ, sizeof(req), &req);
1022 }
1023 }
1024
1025 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1026 {
1027 u32 local_feat_mask = l2cap_feat_mask;
1028 if (!disable_ertm)
1029 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1030
1031 switch (mode) {
1032 case L2CAP_MODE_ERTM:
1033 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1034 case L2CAP_MODE_STREAMING:
1035 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1036 default:
1037 return 0x00;
1038 }
1039 }
1040
1041 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *chan, int err)
1042 {
1043 struct sock *sk = chan->sk;
1044 struct l2cap_disconn_req req;
1045
1046 if (!conn)
1047 return;
1048
1049 if (chan->mode == L2CAP_MODE_ERTM) {
1050 __clear_retrans_timer(chan);
1051 __clear_monitor_timer(chan);
1052 __clear_ack_timer(chan);
1053 }
1054
1055 req.dcid = cpu_to_le16(chan->dcid);
1056 req.scid = cpu_to_le16(chan->scid);
1057 l2cap_send_cmd(conn, l2cap_get_ident(conn),
1058 L2CAP_DISCONN_REQ, sizeof(req), &req);
1059
1060 lock_sock(sk);
1061 __l2cap_state_change(chan, BT_DISCONN);
1062 __l2cap_chan_set_err(chan, err);
1063 release_sock(sk);
1064 }
1065
1066 /* ---- L2CAP connections ---- */
1067 static void l2cap_conn_start(struct l2cap_conn *conn)
1068 {
1069 struct l2cap_chan *chan, *tmp;
1070
1071 BT_DBG("conn %p", conn);
1072
1073 mutex_lock(&conn->chan_lock);
1074
1075 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1076 struct sock *sk = chan->sk;
1077
1078 l2cap_chan_lock(chan);
1079
1080 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1081 l2cap_chan_unlock(chan);
1082 continue;
1083 }
1084
1085 if (chan->state == BT_CONNECT) {
1086 if (!l2cap_chan_check_security(chan) ||
1087 !__l2cap_no_conn_pending(chan)) {
1088 l2cap_chan_unlock(chan);
1089 continue;
1090 }
1091
1092 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1093 && test_bit(CONF_STATE2_DEVICE,
1094 &chan->conf_state)) {
1095 l2cap_chan_close(chan, ECONNRESET);
1096 l2cap_chan_unlock(chan);
1097 continue;
1098 }
1099
1100 l2cap_send_conn_req(chan);
1101
1102 } else if (chan->state == BT_CONNECT2) {
1103 struct l2cap_conn_rsp rsp;
1104 char buf[128];
1105 rsp.scid = cpu_to_le16(chan->dcid);
1106 rsp.dcid = cpu_to_le16(chan->scid);
1107
1108 if (l2cap_chan_check_security(chan)) {
1109 lock_sock(sk);
1110 if (test_bit(BT_SK_DEFER_SETUP,
1111 &bt_sk(sk)->flags)) {
1112 struct sock *parent = bt_sk(sk)->parent;
1113 rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
1114 rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1115 if (parent)
1116 parent->sk_data_ready(parent, 0);
1117
1118 } else {
1119 __l2cap_state_change(chan, BT_CONFIG);
1120 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
1121 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
1122 }
1123 release_sock(sk);
1124 } else {
1125 rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
1126 rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1127 }
1128
1129 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1130 sizeof(rsp), &rsp);
1131
1132 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1133 rsp.result != L2CAP_CR_SUCCESS) {
1134 l2cap_chan_unlock(chan);
1135 continue;
1136 }
1137
1138 set_bit(CONF_REQ_SENT, &chan->conf_state);
1139 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1140 l2cap_build_conf_req(chan, buf), buf);
1141 chan->num_conf_req++;
1142 }
1143
1144 l2cap_chan_unlock(chan);
1145 }
1146
1147 mutex_unlock(&conn->chan_lock);
1148 }
1149
1150 /* Find socket with cid and source/destination bdaddr.
1151 * Returns closest match, locked.
1152 */
1153 static struct l2cap_chan *l2cap_global_chan_by_scid(int state, u16 cid,
1154 bdaddr_t *src,
1155 bdaddr_t *dst)
1156 {
1157 struct l2cap_chan *c, *c1 = NULL;
1158
1159 read_lock(&chan_list_lock);
1160
1161 list_for_each_entry(c, &chan_list, global_l) {
1162 struct sock *sk = c->sk;
1163
1164 if (state && c->state != state)
1165 continue;
1166
1167 if (c->scid == cid) {
1168 int src_match, dst_match;
1169 int src_any, dst_any;
1170
1171 /* Exact match. */
1172 src_match = !bacmp(&bt_sk(sk)->src, src);
1173 dst_match = !bacmp(&bt_sk(sk)->dst, dst);
1174 if (src_match && dst_match) {
1175 read_unlock(&chan_list_lock);
1176 return c;
1177 }
1178
1179 /* Closest match */
1180 src_any = !bacmp(&bt_sk(sk)->src, BDADDR_ANY);
1181 dst_any = !bacmp(&bt_sk(sk)->dst, BDADDR_ANY);
1182 if ((src_match && dst_any) || (src_any && dst_match) ||
1183 (src_any && dst_any))
1184 c1 = c;
1185 }
1186 }
1187
1188 read_unlock(&chan_list_lock);
1189
1190 return c1;
1191 }
1192
1193 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1194 {
1195 struct sock *parent, *sk;
1196 struct l2cap_chan *chan, *pchan;
1197
1198 BT_DBG("");
1199
1200 /* Check if we have socket listening on cid */
1201 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
1202 conn->src, conn->dst);
1203 if (!pchan)
1204 return;
1205
1206 parent = pchan->sk;
1207
1208 lock_sock(parent);
1209
1210 /* Check for backlog size */
1211 if (sk_acceptq_is_full(parent)) {
1212 BT_DBG("backlog full %d", parent->sk_ack_backlog);
1213 goto clean;
1214 }
1215
1216 chan = pchan->ops->new_connection(pchan->data);
1217 if (!chan)
1218 goto clean;
1219
1220 sk = chan->sk;
1221
1222 hci_conn_hold(conn->hcon);
1223
1224 bacpy(&bt_sk(sk)->src, conn->src);
1225 bacpy(&bt_sk(sk)->dst, conn->dst);
1226
1227 bt_accept_enqueue(parent, sk);
1228
1229 l2cap_chan_add(conn, chan);
1230
1231 __set_chan_timer(chan, sk->sk_sndtimeo);
1232
1233 __l2cap_state_change(chan, BT_CONNECTED);
1234 parent->sk_data_ready(parent, 0);
1235
1236 clean:
1237 release_sock(parent);
1238 }
1239
1240 static void l2cap_conn_ready(struct l2cap_conn *conn)
1241 {
1242 struct l2cap_chan *chan;
1243
1244 BT_DBG("conn %p", conn);
1245
1246 if (!conn->hcon->out && conn->hcon->type == LE_LINK)
1247 l2cap_le_conn_ready(conn);
1248
1249 if (conn->hcon->out && conn->hcon->type == LE_LINK)
1250 smp_conn_security(conn, conn->hcon->pending_sec_level);
1251
1252 mutex_lock(&conn->chan_lock);
1253
1254 list_for_each_entry(chan, &conn->chan_l, list) {
1255
1256 l2cap_chan_lock(chan);
1257
1258 if (conn->hcon->type == LE_LINK) {
1259 if (smp_conn_security(conn, chan->sec_level))
1260 l2cap_chan_ready(chan);
1261
1262 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1263 struct sock *sk = chan->sk;
1264 __clear_chan_timer(chan);
1265 lock_sock(sk);
1266 __l2cap_state_change(chan, BT_CONNECTED);
1267 sk->sk_state_change(sk);
1268 release_sock(sk);
1269
1270 } else if (chan->state == BT_CONNECT)
1271 l2cap_do_start(chan);
1272
1273 l2cap_chan_unlock(chan);
1274 }
1275
1276 mutex_unlock(&conn->chan_lock);
1277 }
1278
1279 /* Notify sockets that we cannot guaranty reliability anymore */
1280 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1281 {
1282 struct l2cap_chan *chan;
1283
1284 BT_DBG("conn %p", conn);
1285
1286 mutex_lock(&conn->chan_lock);
1287
1288 list_for_each_entry(chan, &conn->chan_l, list) {
1289 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1290 __l2cap_chan_set_err(chan, err);
1291 }
1292
1293 mutex_unlock(&conn->chan_lock);
1294 }
1295
1296 static void l2cap_info_timeout(struct work_struct *work)
1297 {
1298 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1299 info_timer.work);
1300
1301 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1302 conn->info_ident = 0;
1303
1304 l2cap_conn_start(conn);
1305 }
1306
1307 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1308 {
1309 struct l2cap_conn *conn = hcon->l2cap_data;
1310 struct l2cap_chan *chan, *l;
1311
1312 if (!conn)
1313 return;
1314
1315 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1316
1317 kfree_skb(conn->rx_skb);
1318
1319 mutex_lock(&conn->chan_lock);
1320
1321 /* Kill channels */
1322 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1323 l2cap_chan_hold(chan);
1324 l2cap_chan_lock(chan);
1325
1326 l2cap_chan_del(chan, err);
1327
1328 l2cap_chan_unlock(chan);
1329
1330 chan->ops->close(chan->data);
1331 l2cap_chan_put(chan);
1332 }
1333
1334 mutex_unlock(&conn->chan_lock);
1335
1336 hci_chan_del(conn->hchan);
1337
1338 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1339 cancel_delayed_work_sync(&conn->info_timer);
1340
1341 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) {
1342 cancel_delayed_work_sync(&conn->security_timer);
1343 smp_chan_destroy(conn);
1344 }
1345
1346 hcon->l2cap_data = NULL;
1347 kfree(conn);
1348 }
1349
1350 static void security_timeout(struct work_struct *work)
1351 {
1352 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1353 security_timer.work);
1354
1355 l2cap_conn_del(conn->hcon, ETIMEDOUT);
1356 }
1357
1358 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
1359 {
1360 struct l2cap_conn *conn = hcon->l2cap_data;
1361 struct hci_chan *hchan;
1362
1363 if (conn || status)
1364 return conn;
1365
1366 hchan = hci_chan_create(hcon);
1367 if (!hchan)
1368 return NULL;
1369
1370 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
1371 if (!conn) {
1372 hci_chan_del(hchan);
1373 return NULL;
1374 }
1375
1376 hcon->l2cap_data = conn;
1377 conn->hcon = hcon;
1378 conn->hchan = hchan;
1379
1380 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
1381
1382 if (hcon->hdev->le_mtu && hcon->type == LE_LINK)
1383 conn->mtu = hcon->hdev->le_mtu;
1384 else
1385 conn->mtu = hcon->hdev->acl_mtu;
1386
1387 conn->src = &hcon->hdev->bdaddr;
1388 conn->dst = &hcon->dst;
1389
1390 conn->feat_mask = 0;
1391
1392 spin_lock_init(&conn->lock);
1393 mutex_init(&conn->chan_lock);
1394
1395 INIT_LIST_HEAD(&conn->chan_l);
1396
1397 if (hcon->type == LE_LINK)
1398 INIT_DELAYED_WORK(&conn->security_timer, security_timeout);
1399 else
1400 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
1401
1402 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
1403
1404 return conn;
1405 }
1406
1407 /* ---- Socket interface ---- */
1408
1409 /* Find socket with psm and source / destination bdaddr.
1410 * Returns closest match.
1411 */
1412 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1413 bdaddr_t *src,
1414 bdaddr_t *dst)
1415 {
1416 struct l2cap_chan *c, *c1 = NULL;
1417
1418 read_lock(&chan_list_lock);
1419
1420 list_for_each_entry(c, &chan_list, global_l) {
1421 struct sock *sk = c->sk;
1422
1423 if (state && c->state != state)
1424 continue;
1425
1426 if (c->psm == psm) {
1427 int src_match, dst_match;
1428 int src_any, dst_any;
1429
1430 /* Exact match. */
1431 src_match = !bacmp(&bt_sk(sk)->src, src);
1432 dst_match = !bacmp(&bt_sk(sk)->dst, dst);
1433 if (src_match && dst_match) {
1434 read_unlock(&chan_list_lock);
1435 return c;
1436 }
1437
1438 /* Closest match */
1439 src_any = !bacmp(&bt_sk(sk)->src, BDADDR_ANY);
1440 dst_any = !bacmp(&bt_sk(sk)->dst, BDADDR_ANY);
1441 if ((src_match && dst_any) || (src_any && dst_match) ||
1442 (src_any && dst_any))
1443 c1 = c;
1444 }
1445 }
1446
1447 read_unlock(&chan_list_lock);
1448
1449 return c1;
1450 }
1451
1452 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
1453 bdaddr_t *dst, u8 dst_type)
1454 {
1455 struct sock *sk = chan->sk;
1456 bdaddr_t *src = &bt_sk(sk)->src;
1457 struct l2cap_conn *conn;
1458 struct hci_conn *hcon;
1459 struct hci_dev *hdev;
1460 __u8 auth_type;
1461 int err;
1462
1463 BT_DBG("%s -> %s (type %u) psm 0x%2.2x", batostr(src), batostr(dst),
1464 dst_type, __le16_to_cpu(chan->psm));
1465
1466 hdev = hci_get_route(dst, src);
1467 if (!hdev)
1468 return -EHOSTUNREACH;
1469
1470 hci_dev_lock(hdev);
1471
1472 l2cap_chan_lock(chan);
1473
1474 /* PSM must be odd and lsb of upper byte must be 0 */
1475 if ((__le16_to_cpu(psm) & 0x0101) != 0x0001 && !cid &&
1476 chan->chan_type != L2CAP_CHAN_RAW) {
1477 err = -EINVAL;
1478 goto done;
1479 }
1480
1481 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !(psm || cid)) {
1482 err = -EINVAL;
1483 goto done;
1484 }
1485
1486 switch (chan->mode) {
1487 case L2CAP_MODE_BASIC:
1488 break;
1489 case L2CAP_MODE_ERTM:
1490 case L2CAP_MODE_STREAMING:
1491 if (!disable_ertm)
1492 break;
1493 /* fall through */
1494 default:
1495 err = -ENOTSUPP;
1496 goto done;
1497 }
1498
1499 lock_sock(sk);
1500
1501 switch (sk->sk_state) {
1502 case BT_CONNECT:
1503 case BT_CONNECT2:
1504 case BT_CONFIG:
1505 /* Already connecting */
1506 err = 0;
1507 release_sock(sk);
1508 goto done;
1509
1510 case BT_CONNECTED:
1511 /* Already connected */
1512 err = -EISCONN;
1513 release_sock(sk);
1514 goto done;
1515
1516 case BT_OPEN:
1517 case BT_BOUND:
1518 /* Can connect */
1519 break;
1520
1521 default:
1522 err = -EBADFD;
1523 release_sock(sk);
1524 goto done;
1525 }
1526
1527 /* Set destination address and psm */
1528 bacpy(&bt_sk(sk)->dst, dst);
1529
1530 release_sock(sk);
1531
1532 chan->psm = psm;
1533 chan->dcid = cid;
1534
1535 auth_type = l2cap_get_auth_type(chan);
1536
1537 if (chan->dcid == L2CAP_CID_LE_DATA)
1538 hcon = hci_connect(hdev, LE_LINK, dst, dst_type,
1539 chan->sec_level, auth_type);
1540 else
1541 hcon = hci_connect(hdev, ACL_LINK, dst, dst_type,
1542 chan->sec_level, auth_type);
1543
1544 if (IS_ERR(hcon)) {
1545 err = PTR_ERR(hcon);
1546 goto done;
1547 }
1548
1549 conn = l2cap_conn_add(hcon, 0);
1550 if (!conn) {
1551 hci_conn_put(hcon);
1552 err = -ENOMEM;
1553 goto done;
1554 }
1555
1556 if (hcon->type == LE_LINK) {
1557 err = 0;
1558
1559 if (!list_empty(&conn->chan_l)) {
1560 err = -EBUSY;
1561 hci_conn_put(hcon);
1562 }
1563
1564 if (err)
1565 goto done;
1566 }
1567
1568 /* Update source addr of the socket */
1569 bacpy(src, conn->src);
1570
1571 l2cap_chan_unlock(chan);
1572 l2cap_chan_add(conn, chan);
1573 l2cap_chan_lock(chan);
1574
1575 l2cap_state_change(chan, BT_CONNECT);
1576 __set_chan_timer(chan, sk->sk_sndtimeo);
1577
1578 if (hcon->state == BT_CONNECTED) {
1579 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1580 __clear_chan_timer(chan);
1581 if (l2cap_chan_check_security(chan))
1582 l2cap_state_change(chan, BT_CONNECTED);
1583 } else
1584 l2cap_do_start(chan);
1585 }
1586
1587 err = 0;
1588
1589 done:
1590 l2cap_chan_unlock(chan);
1591 hci_dev_unlock(hdev);
1592 hci_dev_put(hdev);
1593 return err;
1594 }
1595
1596 int __l2cap_wait_ack(struct sock *sk)
1597 {
1598 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
1599 DECLARE_WAITQUEUE(wait, current);
1600 int err = 0;
1601 int timeo = HZ/5;
1602
1603 add_wait_queue(sk_sleep(sk), &wait);
1604 set_current_state(TASK_INTERRUPTIBLE);
1605 while (chan->unacked_frames > 0 && chan->conn) {
1606 if (!timeo)
1607 timeo = HZ/5;
1608
1609 if (signal_pending(current)) {
1610 err = sock_intr_errno(timeo);
1611 break;
1612 }
1613
1614 release_sock(sk);
1615 timeo = schedule_timeout(timeo);
1616 lock_sock(sk);
1617 set_current_state(TASK_INTERRUPTIBLE);
1618
1619 err = sock_error(sk);
1620 if (err)
1621 break;
1622 }
1623 set_current_state(TASK_RUNNING);
1624 remove_wait_queue(sk_sleep(sk), &wait);
1625 return err;
1626 }
1627
1628 static void l2cap_monitor_timeout(struct work_struct *work)
1629 {
1630 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1631 monitor_timer.work);
1632
1633 BT_DBG("chan %p", chan);
1634
1635 l2cap_chan_lock(chan);
1636
1637 if (!chan->conn) {
1638 l2cap_chan_unlock(chan);
1639 l2cap_chan_put(chan);
1640 return;
1641 }
1642
1643 l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
1644
1645 l2cap_chan_unlock(chan);
1646 l2cap_chan_put(chan);
1647 }
1648
1649 static void l2cap_retrans_timeout(struct work_struct *work)
1650 {
1651 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1652 retrans_timer.work);
1653
1654 BT_DBG("chan %p", chan);
1655
1656 l2cap_chan_lock(chan);
1657
1658 if (!chan->conn) {
1659 l2cap_chan_unlock(chan);
1660 l2cap_chan_put(chan);
1661 return;
1662 }
1663
1664 l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
1665 l2cap_chan_unlock(chan);
1666 l2cap_chan_put(chan);
1667 }
1668
1669 static void l2cap_streaming_send(struct l2cap_chan *chan,
1670 struct sk_buff_head *skbs)
1671 {
1672 struct sk_buff *skb;
1673 struct l2cap_ctrl *control;
1674
1675 BT_DBG("chan %p, skbs %p", chan, skbs);
1676
1677 skb_queue_splice_tail_init(skbs, &chan->tx_q);
1678
1679 while (!skb_queue_empty(&chan->tx_q)) {
1680
1681 skb = skb_dequeue(&chan->tx_q);
1682
1683 bt_cb(skb)->control.retries = 1;
1684 control = &bt_cb(skb)->control;
1685
1686 control->reqseq = 0;
1687 control->txseq = chan->next_tx_seq;
1688
1689 __pack_control(chan, control, skb);
1690
1691 if (chan->fcs == L2CAP_FCS_CRC16) {
1692 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1693 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1694 }
1695
1696 l2cap_do_send(chan, skb);
1697
1698 BT_DBG("Sent txseq %d", (int)control->txseq);
1699
1700 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1701 chan->frames_sent++;
1702 }
1703 }
1704
1705 static int l2cap_ertm_send(struct l2cap_chan *chan)
1706 {
1707 struct sk_buff *skb, *tx_skb;
1708 struct l2cap_ctrl *control;
1709 int sent = 0;
1710
1711 BT_DBG("chan %p", chan);
1712
1713 if (chan->state != BT_CONNECTED)
1714 return -ENOTCONN;
1715
1716 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1717 return 0;
1718
1719 while (chan->tx_send_head &&
1720 chan->unacked_frames < chan->remote_tx_win &&
1721 chan->tx_state == L2CAP_TX_STATE_XMIT) {
1722
1723 skb = chan->tx_send_head;
1724
1725 bt_cb(skb)->control.retries = 1;
1726 control = &bt_cb(skb)->control;
1727
1728 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1729 control->final = 1;
1730
1731 control->reqseq = chan->buffer_seq;
1732 chan->last_acked_seq = chan->buffer_seq;
1733 control->txseq = chan->next_tx_seq;
1734
1735 __pack_control(chan, control, skb);
1736
1737 if (chan->fcs == L2CAP_FCS_CRC16) {
1738 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1739 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1740 }
1741
1742 /* Clone after data has been modified. Data is assumed to be
1743 read-only (for locking purposes) on cloned sk_buffs.
1744 */
1745 tx_skb = skb_clone(skb, GFP_KERNEL);
1746
1747 if (!tx_skb)
1748 break;
1749
1750 __set_retrans_timer(chan);
1751
1752 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1753 chan->unacked_frames++;
1754 chan->frames_sent++;
1755 sent++;
1756
1757 if (skb_queue_is_last(&chan->tx_q, skb))
1758 chan->tx_send_head = NULL;
1759 else
1760 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1761
1762 l2cap_do_send(chan, tx_skb);
1763 BT_DBG("Sent txseq %d", (int)control->txseq);
1764 }
1765
1766 BT_DBG("Sent %d, %d unacked, %d in ERTM queue", sent,
1767 (int) chan->unacked_frames, skb_queue_len(&chan->tx_q));
1768
1769 return sent;
1770 }
1771
1772 static void l2cap_ertm_resend(struct l2cap_chan *chan)
1773 {
1774 struct l2cap_ctrl control;
1775 struct sk_buff *skb;
1776 struct sk_buff *tx_skb;
1777 u16 seq;
1778
1779 BT_DBG("chan %p", chan);
1780
1781 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1782 return;
1783
1784 while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
1785 seq = l2cap_seq_list_pop(&chan->retrans_list);
1786
1787 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
1788 if (!skb) {
1789 BT_DBG("Error: Can't retransmit seq %d, frame missing",
1790 seq);
1791 continue;
1792 }
1793
1794 bt_cb(skb)->control.retries++;
1795 control = bt_cb(skb)->control;
1796
1797 if (chan->max_tx != 0 &&
1798 bt_cb(skb)->control.retries > chan->max_tx) {
1799 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
1800 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
1801 l2cap_seq_list_clear(&chan->retrans_list);
1802 break;
1803 }
1804
1805 control.reqseq = chan->buffer_seq;
1806 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1807 control.final = 1;
1808 else
1809 control.final = 0;
1810
1811 if (skb_cloned(skb)) {
1812 /* Cloned sk_buffs are read-only, so we need a
1813 * writeable copy
1814 */
1815 tx_skb = skb_copy(skb, GFP_ATOMIC);
1816 } else {
1817 tx_skb = skb_clone(skb, GFP_ATOMIC);
1818 }
1819
1820 if (!tx_skb) {
1821 l2cap_seq_list_clear(&chan->retrans_list);
1822 break;
1823 }
1824
1825 /* Update skb contents */
1826 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1827 put_unaligned_le32(__pack_extended_control(&control),
1828 tx_skb->data + L2CAP_HDR_SIZE);
1829 } else {
1830 put_unaligned_le16(__pack_enhanced_control(&control),
1831 tx_skb->data + L2CAP_HDR_SIZE);
1832 }
1833
1834 if (chan->fcs == L2CAP_FCS_CRC16) {
1835 u16 fcs = crc16(0, (u8 *) tx_skb->data, tx_skb->len);
1836 put_unaligned_le16(fcs, skb_put(tx_skb,
1837 L2CAP_FCS_SIZE));
1838 }
1839
1840 l2cap_do_send(chan, tx_skb);
1841
1842 BT_DBG("Resent txseq %d", control.txseq);
1843
1844 chan->last_acked_seq = chan->buffer_seq;
1845 }
1846 }
1847
1848 static void l2cap_retransmit(struct l2cap_chan *chan,
1849 struct l2cap_ctrl *control)
1850 {
1851 BT_DBG("chan %p, control %p", chan, control);
1852
1853 l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
1854 l2cap_ertm_resend(chan);
1855 }
1856
1857 static void l2cap_retransmit_all(struct l2cap_chan *chan,
1858 struct l2cap_ctrl *control)
1859 {
1860 struct sk_buff *skb;
1861
1862 BT_DBG("chan %p, control %p", chan, control);
1863
1864 if (control->poll)
1865 set_bit(CONN_SEND_FBIT, &chan->conn_state);
1866
1867 l2cap_seq_list_clear(&chan->retrans_list);
1868
1869 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1870 return;
1871
1872 if (chan->unacked_frames) {
1873 skb_queue_walk(&chan->tx_q, skb) {
1874 if (bt_cb(skb)->control.txseq == control->reqseq ||
1875 skb == chan->tx_send_head)
1876 break;
1877 }
1878
1879 skb_queue_walk_from(&chan->tx_q, skb) {
1880 if (skb == chan->tx_send_head)
1881 break;
1882
1883 l2cap_seq_list_append(&chan->retrans_list,
1884 bt_cb(skb)->control.txseq);
1885 }
1886
1887 l2cap_ertm_resend(chan);
1888 }
1889 }
1890
1891 static void l2cap_send_ack(struct l2cap_chan *chan)
1892 {
1893 struct l2cap_ctrl control;
1894 u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
1895 chan->last_acked_seq);
1896 int threshold;
1897
1898 BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
1899 chan, chan->last_acked_seq, chan->buffer_seq);
1900
1901 memset(&control, 0, sizeof(control));
1902 control.sframe = 1;
1903
1904 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
1905 chan->rx_state == L2CAP_RX_STATE_RECV) {
1906 __clear_ack_timer(chan);
1907 control.super = L2CAP_SUPER_RNR;
1908 control.reqseq = chan->buffer_seq;
1909 l2cap_send_sframe(chan, &control);
1910 } else {
1911 if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
1912 l2cap_ertm_send(chan);
1913 /* If any i-frames were sent, they included an ack */
1914 if (chan->buffer_seq == chan->last_acked_seq)
1915 frames_to_ack = 0;
1916 }
1917
1918 /* Ack now if the tx window is 3/4ths full.
1919 * Calculate without mul or div
1920 */
1921 threshold = chan->tx_win;
1922 threshold += threshold << 1;
1923 threshold >>= 2;
1924
1925 BT_DBG("frames_to_ack %d, threshold %d", (int)frames_to_ack,
1926 threshold);
1927
1928 if (frames_to_ack >= threshold) {
1929 __clear_ack_timer(chan);
1930 control.super = L2CAP_SUPER_RR;
1931 control.reqseq = chan->buffer_seq;
1932 l2cap_send_sframe(chan, &control);
1933 frames_to_ack = 0;
1934 }
1935
1936 if (frames_to_ack)
1937 __set_ack_timer(chan);
1938 }
1939 }
1940
1941 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
1942 struct msghdr *msg, int len,
1943 int count, struct sk_buff *skb)
1944 {
1945 struct l2cap_conn *conn = chan->conn;
1946 struct sk_buff **frag;
1947 int sent = 0;
1948
1949 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1950 return -EFAULT;
1951
1952 sent += count;
1953 len -= count;
1954
1955 /* Continuation fragments (no L2CAP header) */
1956 frag = &skb_shinfo(skb)->frag_list;
1957 while (len) {
1958 struct sk_buff *tmp;
1959
1960 count = min_t(unsigned int, conn->mtu, len);
1961
1962 tmp = chan->ops->alloc_skb(chan, count,
1963 msg->msg_flags & MSG_DONTWAIT);
1964 if (IS_ERR(tmp))
1965 return PTR_ERR(tmp);
1966
1967 *frag = tmp;
1968
1969 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1970 return -EFAULT;
1971
1972 (*frag)->priority = skb->priority;
1973
1974 sent += count;
1975 len -= count;
1976
1977 skb->len += (*frag)->len;
1978 skb->data_len += (*frag)->len;
1979
1980 frag = &(*frag)->next;
1981 }
1982
1983 return sent;
1984 }
1985
1986 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
1987 struct msghdr *msg, size_t len,
1988 u32 priority)
1989 {
1990 struct l2cap_conn *conn = chan->conn;
1991 struct sk_buff *skb;
1992 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
1993 struct l2cap_hdr *lh;
1994
1995 BT_DBG("chan %p len %d priority %u", chan, (int)len, priority);
1996
1997 count = min_t(unsigned int, (conn->mtu - hlen), len);
1998
1999 skb = chan->ops->alloc_skb(chan, count + hlen,
2000 msg->msg_flags & MSG_DONTWAIT);
2001 if (IS_ERR(skb))
2002 return skb;
2003
2004 skb->priority = priority;
2005
2006 /* Create L2CAP header */
2007 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2008 lh->cid = cpu_to_le16(chan->dcid);
2009 lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
2010 put_unaligned(chan->psm, skb_put(skb, L2CAP_PSMLEN_SIZE));
2011
2012 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2013 if (unlikely(err < 0)) {
2014 kfree_skb(skb);
2015 return ERR_PTR(err);
2016 }
2017 return skb;
2018 }
2019
2020 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2021 struct msghdr *msg, size_t len,
2022 u32 priority)
2023 {
2024 struct l2cap_conn *conn = chan->conn;
2025 struct sk_buff *skb;
2026 int err, count;
2027 struct l2cap_hdr *lh;
2028
2029 BT_DBG("chan %p len %d", chan, (int)len);
2030
2031 count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2032
2033 skb = chan->ops->alloc_skb(chan, count + L2CAP_HDR_SIZE,
2034 msg->msg_flags & MSG_DONTWAIT);
2035 if (IS_ERR(skb))
2036 return skb;
2037
2038 skb->priority = priority;
2039
2040 /* Create L2CAP header */
2041 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2042 lh->cid = cpu_to_le16(chan->dcid);
2043 lh->len = cpu_to_le16(len);
2044
2045 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2046 if (unlikely(err < 0)) {
2047 kfree_skb(skb);
2048 return ERR_PTR(err);
2049 }
2050 return skb;
2051 }
2052
2053 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2054 struct msghdr *msg, size_t len,
2055 u16 sdulen)
2056 {
2057 struct l2cap_conn *conn = chan->conn;
2058 struct sk_buff *skb;
2059 int err, count, hlen;
2060 struct l2cap_hdr *lh;
2061
2062 BT_DBG("chan %p len %d", chan, (int)len);
2063
2064 if (!conn)
2065 return ERR_PTR(-ENOTCONN);
2066
2067 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2068 hlen = L2CAP_EXT_HDR_SIZE;
2069 else
2070 hlen = L2CAP_ENH_HDR_SIZE;
2071
2072 if (sdulen)
2073 hlen += L2CAP_SDULEN_SIZE;
2074
2075 if (chan->fcs == L2CAP_FCS_CRC16)
2076 hlen += L2CAP_FCS_SIZE;
2077
2078 count = min_t(unsigned int, (conn->mtu - hlen), len);
2079
2080 skb = chan->ops->alloc_skb(chan, count + hlen,
2081 msg->msg_flags & MSG_DONTWAIT);
2082 if (IS_ERR(skb))
2083 return skb;
2084
2085 /* Create L2CAP header */
2086 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2087 lh->cid = cpu_to_le16(chan->dcid);
2088 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2089
2090 /* Control header is populated later */
2091 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2092 put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2093 else
2094 put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2095
2096 if (sdulen)
2097 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2098
2099 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2100 if (unlikely(err < 0)) {
2101 kfree_skb(skb);
2102 return ERR_PTR(err);
2103 }
2104
2105 bt_cb(skb)->control.fcs = chan->fcs;
2106 bt_cb(skb)->control.retries = 0;
2107 return skb;
2108 }
2109
2110 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2111 struct sk_buff_head *seg_queue,
2112 struct msghdr *msg, size_t len)
2113 {
2114 struct sk_buff *skb;
2115 u16 sdu_len;
2116 size_t pdu_len;
2117 int err = 0;
2118 u8 sar;
2119
2120 BT_DBG("chan %p, msg %p, len %d", chan, msg, (int)len);
2121
2122 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2123 * so fragmented skbs are not used. The HCI layer's handling
2124 * of fragmented skbs is not compatible with ERTM's queueing.
2125 */
2126
2127 /* PDU size is derived from the HCI MTU */
2128 pdu_len = chan->conn->mtu;
2129
2130 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2131
2132 /* Adjust for largest possible L2CAP overhead. */
2133 if (chan->fcs)
2134 pdu_len -= L2CAP_FCS_SIZE;
2135
2136 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2137 pdu_len -= L2CAP_EXT_HDR_SIZE;
2138 else
2139 pdu_len -= L2CAP_ENH_HDR_SIZE;
2140
2141 /* Remote device may have requested smaller PDUs */
2142 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2143
2144 if (len <= pdu_len) {
2145 sar = L2CAP_SAR_UNSEGMENTED;
2146 sdu_len = 0;
2147 pdu_len = len;
2148 } else {
2149 sar = L2CAP_SAR_START;
2150 sdu_len = len;
2151 pdu_len -= L2CAP_SDULEN_SIZE;
2152 }
2153
2154 while (len > 0) {
2155 skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2156
2157 if (IS_ERR(skb)) {
2158 __skb_queue_purge(seg_queue);
2159 return PTR_ERR(skb);
2160 }
2161
2162 bt_cb(skb)->control.sar = sar;
2163 __skb_queue_tail(seg_queue, skb);
2164
2165 len -= pdu_len;
2166 if (sdu_len) {
2167 sdu_len = 0;
2168 pdu_len += L2CAP_SDULEN_SIZE;
2169 }
2170
2171 if (len <= pdu_len) {
2172 sar = L2CAP_SAR_END;
2173 pdu_len = len;
2174 } else {
2175 sar = L2CAP_SAR_CONTINUE;
2176 }
2177 }
2178
2179 return err;
2180 }
2181
2182 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
2183 u32 priority)
2184 {
2185 struct sk_buff *skb;
2186 int err;
2187 struct sk_buff_head seg_queue;
2188
2189 /* Connectionless channel */
2190 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2191 skb = l2cap_create_connless_pdu(chan, msg, len, priority);
2192 if (IS_ERR(skb))
2193 return PTR_ERR(skb);
2194
2195 l2cap_do_send(chan, skb);
2196 return len;
2197 }
2198
2199 switch (chan->mode) {
2200 case L2CAP_MODE_BASIC:
2201 /* Check outgoing MTU */
2202 if (len > chan->omtu)
2203 return -EMSGSIZE;
2204
2205 /* Create a basic PDU */
2206 skb = l2cap_create_basic_pdu(chan, msg, len, priority);
2207 if (IS_ERR(skb))
2208 return PTR_ERR(skb);
2209
2210 l2cap_do_send(chan, skb);
2211 err = len;
2212 break;
2213
2214 case L2CAP_MODE_ERTM:
2215 case L2CAP_MODE_STREAMING:
2216 /* Check outgoing MTU */
2217 if (len > chan->omtu) {
2218 err = -EMSGSIZE;
2219 break;
2220 }
2221
2222 __skb_queue_head_init(&seg_queue);
2223
2224 /* Do segmentation before calling in to the state machine,
2225 * since it's possible to block while waiting for memory
2226 * allocation.
2227 */
2228 err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2229
2230 /* The channel could have been closed while segmenting,
2231 * check that it is still connected.
2232 */
2233 if (chan->state != BT_CONNECTED) {
2234 __skb_queue_purge(&seg_queue);
2235 err = -ENOTCONN;
2236 }
2237
2238 if (err)
2239 break;
2240
2241 if (chan->mode == L2CAP_MODE_ERTM)
2242 l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2243 else
2244 l2cap_streaming_send(chan, &seg_queue);
2245
2246 err = len;
2247
2248 /* If the skbs were not queued for sending, they'll still be in
2249 * seg_queue and need to be purged.
2250 */
2251 __skb_queue_purge(&seg_queue);
2252 break;
2253
2254 default:
2255 BT_DBG("bad state %1.1x", chan->mode);
2256 err = -EBADFD;
2257 }
2258
2259 return err;
2260 }
2261
2262 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2263 {
2264 struct l2cap_ctrl control;
2265 u16 seq;
2266
2267 BT_DBG("chan %p, txseq %d", chan, txseq);
2268
2269 memset(&control, 0, sizeof(control));
2270 control.sframe = 1;
2271 control.super = L2CAP_SUPER_SREJ;
2272
2273 for (seq = chan->expected_tx_seq; seq != txseq;
2274 seq = __next_seq(chan, seq)) {
2275 if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2276 control.reqseq = seq;
2277 l2cap_send_sframe(chan, &control);
2278 l2cap_seq_list_append(&chan->srej_list, seq);
2279 }
2280 }
2281
2282 chan->expected_tx_seq = __next_seq(chan, txseq);
2283 }
2284
2285 static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2286 {
2287 struct l2cap_ctrl control;
2288
2289 BT_DBG("chan %p", chan);
2290
2291 if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2292 return;
2293
2294 memset(&control, 0, sizeof(control));
2295 control.sframe = 1;
2296 control.super = L2CAP_SUPER_SREJ;
2297 control.reqseq = chan->srej_list.tail;
2298 l2cap_send_sframe(chan, &control);
2299 }
2300
2301 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2302 {
2303 struct l2cap_ctrl control;
2304 u16 initial_head;
2305 u16 seq;
2306
2307 BT_DBG("chan %p, txseq %d", chan, txseq);
2308
2309 memset(&control, 0, sizeof(control));
2310 control.sframe = 1;
2311 control.super = L2CAP_SUPER_SREJ;
2312
2313 /* Capture initial list head to allow only one pass through the list. */
2314 initial_head = chan->srej_list.head;
2315
2316 do {
2317 seq = l2cap_seq_list_pop(&chan->srej_list);
2318 if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2319 break;
2320
2321 control.reqseq = seq;
2322 l2cap_send_sframe(chan, &control);
2323 l2cap_seq_list_append(&chan->srej_list, seq);
2324 } while (chan->srej_list.head != initial_head);
2325 }
2326
2327 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2328 {
2329 struct sk_buff *acked_skb;
2330 u16 ackseq;
2331
2332 BT_DBG("chan %p, reqseq %d", chan, reqseq);
2333
2334 if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2335 return;
2336
2337 BT_DBG("expected_ack_seq %d, unacked_frames %d",
2338 chan->expected_ack_seq, chan->unacked_frames);
2339
2340 for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2341 ackseq = __next_seq(chan, ackseq)) {
2342
2343 acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2344 if (acked_skb) {
2345 skb_unlink(acked_skb, &chan->tx_q);
2346 kfree_skb(acked_skb);
2347 chan->unacked_frames--;
2348 }
2349 }
2350
2351 chan->expected_ack_seq = reqseq;
2352
2353 if (chan->unacked_frames == 0)
2354 __clear_retrans_timer(chan);
2355
2356 BT_DBG("unacked_frames %d", (int) chan->unacked_frames);
2357 }
2358
2359 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2360 {
2361 BT_DBG("chan %p", chan);
2362
2363 chan->expected_tx_seq = chan->buffer_seq;
2364 l2cap_seq_list_clear(&chan->srej_list);
2365 skb_queue_purge(&chan->srej_q);
2366 chan->rx_state = L2CAP_RX_STATE_RECV;
2367 }
2368
2369 static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2370 struct l2cap_ctrl *control,
2371 struct sk_buff_head *skbs, u8 event)
2372 {
2373 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2374 event);
2375
2376 switch (event) {
2377 case L2CAP_EV_DATA_REQUEST:
2378 if (chan->tx_send_head == NULL)
2379 chan->tx_send_head = skb_peek(skbs);
2380
2381 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2382 l2cap_ertm_send(chan);
2383 break;
2384 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2385 BT_DBG("Enter LOCAL_BUSY");
2386 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2387
2388 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2389 /* The SREJ_SENT state must be aborted if we are to
2390 * enter the LOCAL_BUSY state.
2391 */
2392 l2cap_abort_rx_srej_sent(chan);
2393 }
2394
2395 l2cap_send_ack(chan);
2396
2397 break;
2398 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2399 BT_DBG("Exit LOCAL_BUSY");
2400 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2401
2402 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2403 struct l2cap_ctrl local_control;
2404
2405 memset(&local_control, 0, sizeof(local_control));
2406 local_control.sframe = 1;
2407 local_control.super = L2CAP_SUPER_RR;
2408 local_control.poll = 1;
2409 local_control.reqseq = chan->buffer_seq;
2410 l2cap_send_sframe(chan, &local_control);
2411
2412 chan->retry_count = 1;
2413 __set_monitor_timer(chan);
2414 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2415 }
2416 break;
2417 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2418 l2cap_process_reqseq(chan, control->reqseq);
2419 break;
2420 case L2CAP_EV_EXPLICIT_POLL:
2421 l2cap_send_rr_or_rnr(chan, 1);
2422 chan->retry_count = 1;
2423 __set_monitor_timer(chan);
2424 __clear_ack_timer(chan);
2425 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2426 break;
2427 case L2CAP_EV_RETRANS_TO:
2428 l2cap_send_rr_or_rnr(chan, 1);
2429 chan->retry_count = 1;
2430 __set_monitor_timer(chan);
2431 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2432 break;
2433 case L2CAP_EV_RECV_FBIT:
2434 /* Nothing to process */
2435 break;
2436 default:
2437 break;
2438 }
2439 }
2440
2441 static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2442 struct l2cap_ctrl *control,
2443 struct sk_buff_head *skbs, u8 event)
2444 {
2445 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2446 event);
2447
2448 switch (event) {
2449 case L2CAP_EV_DATA_REQUEST:
2450 if (chan->tx_send_head == NULL)
2451 chan->tx_send_head = skb_peek(skbs);
2452 /* Queue data, but don't send. */
2453 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2454 break;
2455 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2456 BT_DBG("Enter LOCAL_BUSY");
2457 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2458
2459 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2460 /* The SREJ_SENT state must be aborted if we are to
2461 * enter the LOCAL_BUSY state.
2462 */
2463 l2cap_abort_rx_srej_sent(chan);
2464 }
2465
2466 l2cap_send_ack(chan);
2467
2468 break;
2469 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2470 BT_DBG("Exit LOCAL_BUSY");
2471 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2472
2473 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2474 struct l2cap_ctrl local_control;
2475 memset(&local_control, 0, sizeof(local_control));
2476 local_control.sframe = 1;
2477 local_control.super = L2CAP_SUPER_RR;
2478 local_control.poll = 1;
2479 local_control.reqseq = chan->buffer_seq;
2480 l2cap_send_sframe(chan, &local_control);
2481
2482 chan->retry_count = 1;
2483 __set_monitor_timer(chan);
2484 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2485 }
2486 break;
2487 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2488 l2cap_process_reqseq(chan, control->reqseq);
2489
2490 /* Fall through */
2491
2492 case L2CAP_EV_RECV_FBIT:
2493 if (control && control->final) {
2494 __clear_monitor_timer(chan);
2495 if (chan->unacked_frames > 0)
2496 __set_retrans_timer(chan);
2497 chan->retry_count = 0;
2498 chan->tx_state = L2CAP_TX_STATE_XMIT;
2499 BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
2500 }
2501 break;
2502 case L2CAP_EV_EXPLICIT_POLL:
2503 /* Ignore */
2504 break;
2505 case L2CAP_EV_MONITOR_TO:
2506 if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
2507 l2cap_send_rr_or_rnr(chan, 1);
2508 __set_monitor_timer(chan);
2509 chan->retry_count++;
2510 } else {
2511 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
2512 }
2513 break;
2514 default:
2515 break;
2516 }
2517 }
2518
2519 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
2520 struct sk_buff_head *skbs, u8 event)
2521 {
2522 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2523 chan, control, skbs, event, chan->tx_state);
2524
2525 switch (chan->tx_state) {
2526 case L2CAP_TX_STATE_XMIT:
2527 l2cap_tx_state_xmit(chan, control, skbs, event);
2528 break;
2529 case L2CAP_TX_STATE_WAIT_F:
2530 l2cap_tx_state_wait_f(chan, control, skbs, event);
2531 break;
2532 default:
2533 /* Ignore event */
2534 break;
2535 }
2536 }
2537
2538 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
2539 struct l2cap_ctrl *control)
2540 {
2541 BT_DBG("chan %p, control %p", chan, control);
2542 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
2543 }
2544
2545 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
2546 struct l2cap_ctrl *control)
2547 {
2548 BT_DBG("chan %p, control %p", chan, control);
2549 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
2550 }
2551
2552 /* Copy frame to all raw sockets on that connection */
2553 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2554 {
2555 struct sk_buff *nskb;
2556 struct l2cap_chan *chan;
2557
2558 BT_DBG("conn %p", conn);
2559
2560 mutex_lock(&conn->chan_lock);
2561
2562 list_for_each_entry(chan, &conn->chan_l, list) {
2563 struct sock *sk = chan->sk;
2564 if (chan->chan_type != L2CAP_CHAN_RAW)
2565 continue;
2566
2567 /* Don't send frame to the socket it came from */
2568 if (skb->sk == sk)
2569 continue;
2570 nskb = skb_clone(skb, GFP_ATOMIC);
2571 if (!nskb)
2572 continue;
2573
2574 if (chan->ops->recv(chan->data, nskb))
2575 kfree_skb(nskb);
2576 }
2577
2578 mutex_unlock(&conn->chan_lock);
2579 }
2580
2581 /* ---- L2CAP signalling commands ---- */
2582 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
2583 u8 code, u8 ident, u16 dlen, void *data)
2584 {
2585 struct sk_buff *skb, **frag;
2586 struct l2cap_cmd_hdr *cmd;
2587 struct l2cap_hdr *lh;
2588 int len, count;
2589
2590 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
2591 conn, code, ident, dlen);
2592
2593 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2594 count = min_t(unsigned int, conn->mtu, len);
2595
2596 skb = bt_skb_alloc(count, GFP_ATOMIC);
2597 if (!skb)
2598 return NULL;
2599
2600 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2601 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2602
2603 if (conn->hcon->type == LE_LINK)
2604 lh->cid = __constant_cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2605 else
2606 lh->cid = __constant_cpu_to_le16(L2CAP_CID_SIGNALING);
2607
2608 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2609 cmd->code = code;
2610 cmd->ident = ident;
2611 cmd->len = cpu_to_le16(dlen);
2612
2613 if (dlen) {
2614 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2615 memcpy(skb_put(skb, count), data, count);
2616 data += count;
2617 }
2618
2619 len -= skb->len;
2620
2621 /* Continuation fragments (no L2CAP header) */
2622 frag = &skb_shinfo(skb)->frag_list;
2623 while (len) {
2624 count = min_t(unsigned int, conn->mtu, len);
2625
2626 *frag = bt_skb_alloc(count, GFP_ATOMIC);
2627 if (!*frag)
2628 goto fail;
2629
2630 memcpy(skb_put(*frag, count), data, count);
2631
2632 len -= count;
2633 data += count;
2634
2635 frag = &(*frag)->next;
2636 }
2637
2638 return skb;
2639
2640 fail:
2641 kfree_skb(skb);
2642 return NULL;
2643 }
2644
2645 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
2646 {
2647 struct l2cap_conf_opt *opt = *ptr;
2648 int len;
2649
2650 len = L2CAP_CONF_OPT_SIZE + opt->len;
2651 *ptr += len;
2652
2653 *type = opt->type;
2654 *olen = opt->len;
2655
2656 switch (opt->len) {
2657 case 1:
2658 *val = *((u8 *) opt->val);
2659 break;
2660
2661 case 2:
2662 *val = get_unaligned_le16(opt->val);
2663 break;
2664
2665 case 4:
2666 *val = get_unaligned_le32(opt->val);
2667 break;
2668
2669 default:
2670 *val = (unsigned long) opt->val;
2671 break;
2672 }
2673
2674 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
2675 return len;
2676 }
2677
2678 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2679 {
2680 struct l2cap_conf_opt *opt = *ptr;
2681
2682 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
2683
2684 opt->type = type;
2685 opt->len = len;
2686
2687 switch (len) {
2688 case 1:
2689 *((u8 *) opt->val) = val;
2690 break;
2691
2692 case 2:
2693 put_unaligned_le16(val, opt->val);
2694 break;
2695
2696 case 4:
2697 put_unaligned_le32(val, opt->val);
2698 break;
2699
2700 default:
2701 memcpy(opt->val, (void *) val, len);
2702 break;
2703 }
2704
2705 *ptr += L2CAP_CONF_OPT_SIZE + len;
2706 }
2707
2708 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
2709 {
2710 struct l2cap_conf_efs efs;
2711
2712 switch (chan->mode) {
2713 case L2CAP_MODE_ERTM:
2714 efs.id = chan->local_id;
2715 efs.stype = chan->local_stype;
2716 efs.msdu = cpu_to_le16(chan->local_msdu);
2717 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2718 efs.acc_lat = __constant_cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
2719 efs.flush_to = __constant_cpu_to_le32(L2CAP_DEFAULT_FLUSH_TO);
2720 break;
2721
2722 case L2CAP_MODE_STREAMING:
2723 efs.id = 1;
2724 efs.stype = L2CAP_SERV_BESTEFFORT;
2725 efs.msdu = cpu_to_le16(chan->local_msdu);
2726 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2727 efs.acc_lat = 0;
2728 efs.flush_to = 0;
2729 break;
2730
2731 default:
2732 return;
2733 }
2734
2735 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
2736 (unsigned long) &efs);
2737 }
2738
2739 static void l2cap_ack_timeout(struct work_struct *work)
2740 {
2741 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
2742 ack_timer.work);
2743 u16 frames_to_ack;
2744
2745 BT_DBG("chan %p", chan);
2746
2747 l2cap_chan_lock(chan);
2748
2749 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2750 chan->last_acked_seq);
2751
2752 if (frames_to_ack)
2753 l2cap_send_rr_or_rnr(chan, 0);
2754
2755 l2cap_chan_unlock(chan);
2756 l2cap_chan_put(chan);
2757 }
2758
2759 static inline int l2cap_ertm_init(struct l2cap_chan *chan)
2760 {
2761 int err;
2762
2763 chan->next_tx_seq = 0;
2764 chan->expected_tx_seq = 0;
2765 chan->expected_ack_seq = 0;
2766 chan->unacked_frames = 0;
2767 chan->buffer_seq = 0;
2768 chan->frames_sent = 0;
2769 chan->last_acked_seq = 0;
2770 chan->sdu = NULL;
2771 chan->sdu_last_frag = NULL;
2772 chan->sdu_len = 0;
2773
2774 skb_queue_head_init(&chan->tx_q);
2775
2776 if (chan->mode != L2CAP_MODE_ERTM)
2777 return 0;
2778
2779 chan->rx_state = L2CAP_RX_STATE_RECV;
2780 chan->tx_state = L2CAP_TX_STATE_XMIT;
2781
2782 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
2783 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
2784 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
2785
2786 skb_queue_head_init(&chan->srej_q);
2787
2788 err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
2789 if (err < 0)
2790 return err;
2791
2792 err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
2793 if (err < 0)
2794 l2cap_seq_list_free(&chan->srej_list);
2795
2796 return err;
2797 }
2798
2799 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2800 {
2801 switch (mode) {
2802 case L2CAP_MODE_STREAMING:
2803 case L2CAP_MODE_ERTM:
2804 if (l2cap_mode_supported(mode, remote_feat_mask))
2805 return mode;
2806 /* fall through */
2807 default:
2808 return L2CAP_MODE_BASIC;
2809 }
2810 }
2811
2812 static inline bool __l2cap_ews_supported(struct l2cap_chan *chan)
2813 {
2814 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
2815 }
2816
2817 static inline bool __l2cap_efs_supported(struct l2cap_chan *chan)
2818 {
2819 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
2820 }
2821
2822 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
2823 {
2824 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
2825 __l2cap_ews_supported(chan)) {
2826 /* use extended control field */
2827 set_bit(FLAG_EXT_CTRL, &chan->flags);
2828 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
2829 } else {
2830 chan->tx_win = min_t(u16, chan->tx_win,
2831 L2CAP_DEFAULT_TX_WINDOW);
2832 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
2833 }
2834 }
2835
2836 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
2837 {
2838 struct l2cap_conf_req *req = data;
2839 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
2840 void *ptr = req->data;
2841 u16 size;
2842
2843 BT_DBG("chan %p", chan);
2844
2845 if (chan->num_conf_req || chan->num_conf_rsp)
2846 goto done;
2847
2848 switch (chan->mode) {
2849 case L2CAP_MODE_STREAMING:
2850 case L2CAP_MODE_ERTM:
2851 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
2852 break;
2853
2854 if (__l2cap_efs_supported(chan))
2855 set_bit(FLAG_EFS_ENABLE, &chan->flags);
2856
2857 /* fall through */
2858 default:
2859 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
2860 break;
2861 }
2862
2863 done:
2864 if (chan->imtu != L2CAP_DEFAULT_MTU)
2865 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
2866
2867 switch (chan->mode) {
2868 case L2CAP_MODE_BASIC:
2869 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
2870 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
2871 break;
2872
2873 rfc.mode = L2CAP_MODE_BASIC;
2874 rfc.txwin_size = 0;
2875 rfc.max_transmit = 0;
2876 rfc.retrans_timeout = 0;
2877 rfc.monitor_timeout = 0;
2878 rfc.max_pdu_size = 0;
2879
2880 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2881 (unsigned long) &rfc);
2882 break;
2883
2884 case L2CAP_MODE_ERTM:
2885 rfc.mode = L2CAP_MODE_ERTM;
2886 rfc.max_transmit = chan->max_tx;
2887 rfc.retrans_timeout = 0;
2888 rfc.monitor_timeout = 0;
2889
2890 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2891 L2CAP_EXT_HDR_SIZE -
2892 L2CAP_SDULEN_SIZE -
2893 L2CAP_FCS_SIZE);
2894 rfc.max_pdu_size = cpu_to_le16(size);
2895
2896 l2cap_txwin_setup(chan);
2897
2898 rfc.txwin_size = min_t(u16, chan->tx_win,
2899 L2CAP_DEFAULT_TX_WINDOW);
2900
2901 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2902 (unsigned long) &rfc);
2903
2904 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2905 l2cap_add_opt_efs(&ptr, chan);
2906
2907 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2908 break;
2909
2910 if (chan->fcs == L2CAP_FCS_NONE ||
2911 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2912 chan->fcs = L2CAP_FCS_NONE;
2913 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2914 }
2915
2916 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2917 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
2918 chan->tx_win);
2919 break;
2920
2921 case L2CAP_MODE_STREAMING:
2922 l2cap_txwin_setup(chan);
2923 rfc.mode = L2CAP_MODE_STREAMING;
2924 rfc.txwin_size = 0;
2925 rfc.max_transmit = 0;
2926 rfc.retrans_timeout = 0;
2927 rfc.monitor_timeout = 0;
2928
2929 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2930 L2CAP_EXT_HDR_SIZE -
2931 L2CAP_SDULEN_SIZE -
2932 L2CAP_FCS_SIZE);
2933 rfc.max_pdu_size = cpu_to_le16(size);
2934
2935 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2936 (unsigned long) &rfc);
2937
2938 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2939 l2cap_add_opt_efs(&ptr, chan);
2940
2941 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2942 break;
2943
2944 if (chan->fcs == L2CAP_FCS_NONE ||
2945 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2946 chan->fcs = L2CAP_FCS_NONE;
2947 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2948 }
2949 break;
2950 }
2951
2952 req->dcid = cpu_to_le16(chan->dcid);
2953 req->flags = __constant_cpu_to_le16(0);
2954
2955 return ptr - data;
2956 }
2957
2958 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
2959 {
2960 struct l2cap_conf_rsp *rsp = data;
2961 void *ptr = rsp->data;
2962 void *req = chan->conf_req;
2963 int len = chan->conf_len;
2964 int type, hint, olen;
2965 unsigned long val;
2966 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2967 struct l2cap_conf_efs efs;
2968 u8 remote_efs = 0;
2969 u16 mtu = L2CAP_DEFAULT_MTU;
2970 u16 result = L2CAP_CONF_SUCCESS;
2971 u16 size;
2972
2973 BT_DBG("chan %p", chan);
2974
2975 while (len >= L2CAP_CONF_OPT_SIZE) {
2976 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2977
2978 hint = type & L2CAP_CONF_HINT;
2979 type &= L2CAP_CONF_MASK;
2980
2981 switch (type) {
2982 case L2CAP_CONF_MTU:
2983 mtu = val;
2984 break;
2985
2986 case L2CAP_CONF_FLUSH_TO:
2987 chan->flush_to = val;
2988 break;
2989
2990 case L2CAP_CONF_QOS:
2991 break;
2992
2993 case L2CAP_CONF_RFC:
2994 if (olen == sizeof(rfc))
2995 memcpy(&rfc, (void *) val, olen);
2996 break;
2997
2998 case L2CAP_CONF_FCS:
2999 if (val == L2CAP_FCS_NONE)
3000 set_bit(CONF_NO_FCS_RECV, &chan->conf_state);
3001 break;
3002
3003 case L2CAP_CONF_EFS:
3004 remote_efs = 1;
3005 if (olen == sizeof(efs))
3006 memcpy(&efs, (void *) val, olen);
3007 break;
3008
3009 case L2CAP_CONF_EWS:
3010 if (!enable_hs)
3011 return -ECONNREFUSED;
3012
3013 set_bit(FLAG_EXT_CTRL, &chan->flags);
3014 set_bit(CONF_EWS_RECV, &chan->conf_state);
3015 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3016 chan->remote_tx_win = val;
3017 break;
3018
3019 default:
3020 if (hint)
3021 break;
3022
3023 result = L2CAP_CONF_UNKNOWN;
3024 *((u8 *) ptr++) = type;
3025 break;
3026 }
3027 }
3028
3029 if (chan->num_conf_rsp || chan->num_conf_req > 1)
3030 goto done;
3031
3032 switch (chan->mode) {
3033 case L2CAP_MODE_STREAMING:
3034 case L2CAP_MODE_ERTM:
3035 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
3036 chan->mode = l2cap_select_mode(rfc.mode,
3037 chan->conn->feat_mask);
3038 break;
3039 }
3040
3041 if (remote_efs) {
3042 if (__l2cap_efs_supported(chan))
3043 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3044 else
3045 return -ECONNREFUSED;
3046 }
3047
3048 if (chan->mode != rfc.mode)
3049 return -ECONNREFUSED;
3050
3051 break;
3052 }
3053
3054 done:
3055 if (chan->mode != rfc.mode) {
3056 result = L2CAP_CONF_UNACCEPT;
3057 rfc.mode = chan->mode;
3058
3059 if (chan->num_conf_rsp == 1)
3060 return -ECONNREFUSED;
3061
3062 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3063 sizeof(rfc), (unsigned long) &rfc);
3064 }
3065
3066 if (result == L2CAP_CONF_SUCCESS) {
3067 /* Configure output options and let the other side know
3068 * which ones we don't like. */
3069
3070 if (mtu < L2CAP_DEFAULT_MIN_MTU)
3071 result = L2CAP_CONF_UNACCEPT;
3072 else {
3073 chan->omtu = mtu;
3074 set_bit(CONF_MTU_DONE, &chan->conf_state);
3075 }
3076 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
3077
3078 if (remote_efs) {
3079 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3080 efs.stype != L2CAP_SERV_NOTRAFIC &&
3081 efs.stype != chan->local_stype) {
3082
3083 result = L2CAP_CONF_UNACCEPT;
3084
3085 if (chan->num_conf_req >= 1)
3086 return -ECONNREFUSED;
3087
3088 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3089 sizeof(efs),
3090 (unsigned long) &efs);
3091 } else {
3092 /* Send PENDING Conf Rsp */
3093 result = L2CAP_CONF_PENDING;
3094 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3095 }
3096 }
3097
3098 switch (rfc.mode) {
3099 case L2CAP_MODE_BASIC:
3100 chan->fcs = L2CAP_FCS_NONE;
3101 set_bit(CONF_MODE_DONE, &chan->conf_state);
3102 break;
3103
3104 case L2CAP_MODE_ERTM:
3105 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3106 chan->remote_tx_win = rfc.txwin_size;
3107 else
3108 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3109
3110 chan->remote_max_tx = rfc.max_transmit;
3111
3112 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3113 chan->conn->mtu -
3114 L2CAP_EXT_HDR_SIZE -
3115 L2CAP_SDULEN_SIZE -
3116 L2CAP_FCS_SIZE);
3117 rfc.max_pdu_size = cpu_to_le16(size);
3118 chan->remote_mps = size;
3119
3120 rfc.retrans_timeout =
3121 __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3122 rfc.monitor_timeout =
3123 __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3124
3125 set_bit(CONF_MODE_DONE, &chan->conf_state);
3126
3127 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3128 sizeof(rfc), (unsigned long) &rfc);
3129
3130 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3131 chan->remote_id = efs.id;
3132 chan->remote_stype = efs.stype;
3133 chan->remote_msdu = le16_to_cpu(efs.msdu);
3134 chan->remote_flush_to =
3135 le32_to_cpu(efs.flush_to);
3136 chan->remote_acc_lat =
3137 le32_to_cpu(efs.acc_lat);
3138 chan->remote_sdu_itime =
3139 le32_to_cpu(efs.sdu_itime);
3140 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3141 sizeof(efs), (unsigned long) &efs);
3142 }
3143 break;
3144
3145 case L2CAP_MODE_STREAMING:
3146 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3147 chan->conn->mtu -
3148 L2CAP_EXT_HDR_SIZE -
3149 L2CAP_SDULEN_SIZE -
3150 L2CAP_FCS_SIZE);
3151 rfc.max_pdu_size = cpu_to_le16(size);
3152 chan->remote_mps = size;
3153
3154 set_bit(CONF_MODE_DONE, &chan->conf_state);
3155
3156 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3157 sizeof(rfc), (unsigned long) &rfc);
3158
3159 break;
3160
3161 default:
3162 result = L2CAP_CONF_UNACCEPT;
3163
3164 memset(&rfc, 0, sizeof(rfc));
3165 rfc.mode = chan->mode;
3166 }
3167
3168 if (result == L2CAP_CONF_SUCCESS)
3169 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3170 }
3171 rsp->scid = cpu_to_le16(chan->dcid);
3172 rsp->result = cpu_to_le16(result);
3173 rsp->flags = __constant_cpu_to_le16(0);
3174
3175 return ptr - data;
3176 }
3177
3178 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, void *data, u16 *result)
3179 {
3180 struct l2cap_conf_req *req = data;
3181 void *ptr = req->data;
3182 int type, olen;
3183 unsigned long val;
3184 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3185 struct l2cap_conf_efs efs;
3186
3187 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3188
3189 while (len >= L2CAP_CONF_OPT_SIZE) {
3190 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3191
3192 switch (type) {
3193 case L2CAP_CONF_MTU:
3194 if (val < L2CAP_DEFAULT_MIN_MTU) {
3195 *result = L2CAP_CONF_UNACCEPT;
3196 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3197 } else
3198 chan->imtu = val;
3199 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3200 break;
3201
3202 case L2CAP_CONF_FLUSH_TO:
3203 chan->flush_to = val;
3204 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
3205 2, chan->flush_to);
3206 break;
3207
3208 case L2CAP_CONF_RFC:
3209 if (olen == sizeof(rfc))
3210 memcpy(&rfc, (void *)val, olen);
3211
3212 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3213 rfc.mode != chan->mode)
3214 return -ECONNREFUSED;
3215
3216 chan->fcs = 0;
3217
3218 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3219 sizeof(rfc), (unsigned long) &rfc);
3220 break;
3221
3222 case L2CAP_CONF_EWS:
3223 chan->tx_win = min_t(u16, val,
3224 L2CAP_DEFAULT_EXT_WINDOW);
3225 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3226 chan->tx_win);
3227 break;
3228
3229 case L2CAP_CONF_EFS:
3230 if (olen == sizeof(efs))
3231 memcpy(&efs, (void *)val, olen);
3232
3233 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3234 efs.stype != L2CAP_SERV_NOTRAFIC &&
3235 efs.stype != chan->local_stype)
3236 return -ECONNREFUSED;
3237
3238 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3239 sizeof(efs), (unsigned long) &efs);
3240 break;
3241 }
3242 }
3243
3244 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3245 return -ECONNREFUSED;
3246
3247 chan->mode = rfc.mode;
3248
3249 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3250 switch (rfc.mode) {
3251 case L2CAP_MODE_ERTM:
3252 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3253 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3254 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3255
3256 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3257 chan->local_msdu = le16_to_cpu(efs.msdu);
3258 chan->local_sdu_itime =
3259 le32_to_cpu(efs.sdu_itime);
3260 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3261 chan->local_flush_to =
3262 le32_to_cpu(efs.flush_to);
3263 }
3264 break;
3265
3266 case L2CAP_MODE_STREAMING:
3267 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3268 }
3269 }
3270
3271 req->dcid = cpu_to_le16(chan->dcid);
3272 req->flags = __constant_cpu_to_le16(0);
3273
3274 return ptr - data;
3275 }
3276
3277 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data, u16 result, u16 flags)
3278 {
3279 struct l2cap_conf_rsp *rsp = data;
3280 void *ptr = rsp->data;
3281
3282 BT_DBG("chan %p", chan);
3283
3284 rsp->scid = cpu_to_le16(chan->dcid);
3285 rsp->result = cpu_to_le16(result);
3286 rsp->flags = cpu_to_le16(flags);
3287
3288 return ptr - data;
3289 }
3290
3291 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
3292 {
3293 struct l2cap_conn_rsp rsp;
3294 struct l2cap_conn *conn = chan->conn;
3295 u8 buf[128];
3296
3297 rsp.scid = cpu_to_le16(chan->dcid);
3298 rsp.dcid = cpu_to_le16(chan->scid);
3299 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
3300 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
3301 l2cap_send_cmd(conn, chan->ident,
3302 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
3303
3304 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3305 return;
3306
3307 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3308 l2cap_build_conf_req(chan, buf), buf);
3309 chan->num_conf_req++;
3310 }
3311
3312 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
3313 {
3314 int type, olen;
3315 unsigned long val;
3316 struct l2cap_conf_rfc rfc;
3317
3318 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
3319
3320 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
3321 return;
3322
3323 while (len >= L2CAP_CONF_OPT_SIZE) {
3324 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3325
3326 switch (type) {
3327 case L2CAP_CONF_RFC:
3328 if (olen == sizeof(rfc))
3329 memcpy(&rfc, (void *)val, olen);
3330 goto done;
3331 }
3332 }
3333
3334 /* Use sane default values in case a misbehaving remote device
3335 * did not send an RFC option.
3336 */
3337 rfc.mode = chan->mode;
3338 rfc.retrans_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3339 rfc.monitor_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3340 rfc.max_pdu_size = cpu_to_le16(chan->imtu);
3341
3342 BT_ERR("Expected RFC option was not found, using defaults");
3343
3344 done:
3345 switch (rfc.mode) {
3346 case L2CAP_MODE_ERTM:
3347 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3348 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3349 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3350 break;
3351 case L2CAP_MODE_STREAMING:
3352 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3353 }
3354 }
3355
3356 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3357 {
3358 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
3359
3360 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
3361 return 0;
3362
3363 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
3364 cmd->ident == conn->info_ident) {
3365 cancel_delayed_work(&conn->info_timer);
3366
3367 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3368 conn->info_ident = 0;
3369
3370 l2cap_conn_start(conn);
3371 }
3372
3373 return 0;
3374 }
3375
3376 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3377 {
3378 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
3379 struct l2cap_conn_rsp rsp;
3380 struct l2cap_chan *chan = NULL, *pchan;
3381 struct sock *parent, *sk = NULL;
3382 int result, status = L2CAP_CS_NO_INFO;
3383
3384 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
3385 __le16 psm = req->psm;
3386
3387 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
3388
3389 /* Check if we have socket listening on psm */
3390 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, conn->src, conn->dst);
3391 if (!pchan) {
3392 result = L2CAP_CR_BAD_PSM;
3393 goto sendresp;
3394 }
3395
3396 parent = pchan->sk;
3397
3398 mutex_lock(&conn->chan_lock);
3399 lock_sock(parent);
3400
3401 /* Check if the ACL is secure enough (if not SDP) */
3402 if (psm != __constant_cpu_to_le16(L2CAP_PSM_SDP) &&
3403 !hci_conn_check_link_mode(conn->hcon)) {
3404 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
3405 result = L2CAP_CR_SEC_BLOCK;
3406 goto response;
3407 }
3408
3409 result = L2CAP_CR_NO_MEM;
3410
3411 /* Check for backlog size */
3412 if (sk_acceptq_is_full(parent)) {
3413 BT_DBG("backlog full %d", parent->sk_ack_backlog);
3414 goto response;
3415 }
3416
3417 chan = pchan->ops->new_connection(pchan->data);
3418 if (!chan)
3419 goto response;
3420
3421 sk = chan->sk;
3422
3423 /* Check if we already have channel with that dcid */
3424 if (__l2cap_get_chan_by_dcid(conn, scid)) {
3425 sock_set_flag(sk, SOCK_ZAPPED);
3426 chan->ops->close(chan->data);
3427 goto response;
3428 }
3429
3430 hci_conn_hold(conn->hcon);
3431
3432 bacpy(&bt_sk(sk)->src, conn->src);
3433 bacpy(&bt_sk(sk)->dst, conn->dst);
3434 chan->psm = psm;
3435 chan->dcid = scid;
3436
3437 bt_accept_enqueue(parent, sk);
3438
3439 __l2cap_chan_add(conn, chan);
3440
3441 dcid = chan->scid;
3442
3443 __set_chan_timer(chan, sk->sk_sndtimeo);
3444
3445 chan->ident = cmd->ident;
3446
3447 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
3448 if (l2cap_chan_check_security(chan)) {
3449 if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) {
3450 __l2cap_state_change(chan, BT_CONNECT2);
3451 result = L2CAP_CR_PEND;
3452 status = L2CAP_CS_AUTHOR_PEND;
3453 parent->sk_data_ready(parent, 0);
3454 } else {
3455 __l2cap_state_change(chan, BT_CONFIG);
3456 result = L2CAP_CR_SUCCESS;
3457 status = L2CAP_CS_NO_INFO;
3458 }
3459 } else {
3460 __l2cap_state_change(chan, BT_CONNECT2);
3461 result = L2CAP_CR_PEND;
3462 status = L2CAP_CS_AUTHEN_PEND;
3463 }
3464 } else {
3465 __l2cap_state_change(chan, BT_CONNECT2);
3466 result = L2CAP_CR_PEND;
3467 status = L2CAP_CS_NO_INFO;
3468 }
3469
3470 response:
3471 release_sock(parent);
3472 mutex_unlock(&conn->chan_lock);
3473
3474 sendresp:
3475 rsp.scid = cpu_to_le16(scid);
3476 rsp.dcid = cpu_to_le16(dcid);
3477 rsp.result = cpu_to_le16(result);
3478 rsp.status = cpu_to_le16(status);
3479 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
3480
3481 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
3482 struct l2cap_info_req info;
3483 info.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
3484
3485 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
3486 conn->info_ident = l2cap_get_ident(conn);
3487
3488 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
3489
3490 l2cap_send_cmd(conn, conn->info_ident,
3491 L2CAP_INFO_REQ, sizeof(info), &info);
3492 }
3493
3494 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
3495 result == L2CAP_CR_SUCCESS) {
3496 u8 buf[128];
3497 set_bit(CONF_REQ_SENT, &chan->conf_state);
3498 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3499 l2cap_build_conf_req(chan, buf), buf);
3500 chan->num_conf_req++;
3501 }
3502
3503 return 0;
3504 }
3505
3506 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3507 {
3508 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
3509 u16 scid, dcid, result, status;
3510 struct l2cap_chan *chan;
3511 u8 req[128];
3512 int err;
3513
3514 scid = __le16_to_cpu(rsp->scid);
3515 dcid = __le16_to_cpu(rsp->dcid);
3516 result = __le16_to_cpu(rsp->result);
3517 status = __le16_to_cpu(rsp->status);
3518
3519 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
3520 dcid, scid, result, status);
3521
3522 mutex_lock(&conn->chan_lock);
3523
3524 if (scid) {
3525 chan = __l2cap_get_chan_by_scid(conn, scid);
3526 if (!chan) {
3527 err = -EFAULT;
3528 goto unlock;
3529 }
3530 } else {
3531 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
3532 if (!chan) {
3533 err = -EFAULT;
3534 goto unlock;
3535 }
3536 }
3537
3538 err = 0;
3539
3540 l2cap_chan_lock(chan);
3541
3542 switch (result) {
3543 case L2CAP_CR_SUCCESS:
3544 l2cap_state_change(chan, BT_CONFIG);
3545 chan->ident = 0;
3546 chan->dcid = dcid;
3547 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
3548
3549 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3550 break;
3551
3552 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3553 l2cap_build_conf_req(chan, req), req);
3554 chan->num_conf_req++;
3555 break;
3556
3557 case L2CAP_CR_PEND:
3558 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
3559 break;
3560
3561 default:
3562 l2cap_chan_del(chan, ECONNREFUSED);
3563 break;
3564 }
3565
3566 l2cap_chan_unlock(chan);
3567
3568 unlock:
3569 mutex_unlock(&conn->chan_lock);
3570
3571 return err;
3572 }
3573
3574 static inline void set_default_fcs(struct l2cap_chan *chan)
3575 {
3576 /* FCS is enabled only in ERTM or streaming mode, if one or both
3577 * sides request it.
3578 */
3579 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
3580 chan->fcs = L2CAP_FCS_NONE;
3581 else if (!test_bit(CONF_NO_FCS_RECV, &chan->conf_state))
3582 chan->fcs = L2CAP_FCS_CRC16;
3583 }
3584
3585 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3586 {
3587 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
3588 u16 dcid, flags;
3589 u8 rsp[64];
3590 struct l2cap_chan *chan;
3591 int len, err = 0;
3592
3593 dcid = __le16_to_cpu(req->dcid);
3594 flags = __le16_to_cpu(req->flags);
3595
3596 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
3597
3598 chan = l2cap_get_chan_by_scid(conn, dcid);
3599 if (!chan)
3600 return -ENOENT;
3601
3602 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
3603 struct l2cap_cmd_rej_cid rej;
3604
3605 rej.reason = __constant_cpu_to_le16(L2CAP_REJ_INVALID_CID);
3606 rej.scid = cpu_to_le16(chan->scid);
3607 rej.dcid = cpu_to_le16(chan->dcid);
3608
3609 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
3610 sizeof(rej), &rej);
3611 goto unlock;
3612 }
3613
3614 /* Reject if config buffer is too small. */
3615 len = cmd_len - sizeof(*req);
3616 if (len < 0 || chan->conf_len + len > sizeof(chan->conf_req)) {
3617 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3618 l2cap_build_conf_rsp(chan, rsp,
3619 L2CAP_CONF_REJECT, flags), rsp);
3620 goto unlock;
3621 }
3622
3623 /* Store config. */
3624 memcpy(chan->conf_req + chan->conf_len, req->data, len);
3625 chan->conf_len += len;
3626
3627 if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
3628 /* Incomplete config. Send empty response. */
3629 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3630 l2cap_build_conf_rsp(chan, rsp,
3631 L2CAP_CONF_SUCCESS, flags), rsp);
3632 goto unlock;
3633 }
3634
3635 /* Complete config. */
3636 len = l2cap_parse_conf_req(chan, rsp);
3637 if (len < 0) {
3638 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3639 goto unlock;
3640 }
3641
3642 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
3643 chan->num_conf_rsp++;
3644
3645 /* Reset config buffer. */
3646 chan->conf_len = 0;
3647
3648 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
3649 goto unlock;
3650
3651 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
3652 set_default_fcs(chan);
3653
3654 if (chan->mode == L2CAP_MODE_ERTM ||
3655 chan->mode == L2CAP_MODE_STREAMING)
3656 err = l2cap_ertm_init(chan);
3657
3658 if (err < 0)
3659 l2cap_send_disconn_req(chan->conn, chan, -err);
3660 else
3661 l2cap_chan_ready(chan);
3662
3663 goto unlock;
3664 }
3665
3666 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
3667 u8 buf[64];
3668 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3669 l2cap_build_conf_req(chan, buf), buf);
3670 chan->num_conf_req++;
3671 }
3672
3673 /* Got Conf Rsp PENDING from remote side and asume we sent
3674 Conf Rsp PENDING in the code above */
3675 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
3676 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
3677
3678 /* check compatibility */
3679
3680 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3681 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3682
3683 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3684 l2cap_build_conf_rsp(chan, rsp,
3685 L2CAP_CONF_SUCCESS, flags), rsp);
3686 }
3687
3688 unlock:
3689 l2cap_chan_unlock(chan);
3690 return err;
3691 }
3692
3693 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3694 {
3695 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
3696 u16 scid, flags, result;
3697 struct l2cap_chan *chan;
3698 int len = le16_to_cpu(cmd->len) - sizeof(*rsp);
3699 int err = 0;
3700
3701 scid = __le16_to_cpu(rsp->scid);
3702 flags = __le16_to_cpu(rsp->flags);
3703 result = __le16_to_cpu(rsp->result);
3704
3705 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
3706 result, len);
3707
3708 chan = l2cap_get_chan_by_scid(conn, scid);
3709 if (!chan)
3710 return 0;
3711
3712 switch (result) {
3713 case L2CAP_CONF_SUCCESS:
3714 l2cap_conf_rfc_get(chan, rsp->data, len);
3715 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
3716 break;
3717
3718 case L2CAP_CONF_PENDING:
3719 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
3720
3721 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
3722 char buf[64];
3723
3724 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
3725 buf, &result);
3726 if (len < 0) {
3727 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3728 goto done;
3729 }
3730
3731 /* check compatibility */
3732
3733 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3734 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3735
3736 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3737 l2cap_build_conf_rsp(chan, buf,
3738 L2CAP_CONF_SUCCESS, 0x0000), buf);
3739 }
3740 goto done;
3741
3742 case L2CAP_CONF_UNACCEPT:
3743 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
3744 char req[64];
3745
3746 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
3747 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3748 goto done;
3749 }
3750
3751 /* throw out any old stored conf requests */
3752 result = L2CAP_CONF_SUCCESS;
3753 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
3754 req, &result);
3755 if (len < 0) {
3756 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3757 goto done;
3758 }
3759
3760 l2cap_send_cmd(conn, l2cap_get_ident(conn),
3761 L2CAP_CONF_REQ, len, req);
3762 chan->num_conf_req++;
3763 if (result != L2CAP_CONF_SUCCESS)
3764 goto done;
3765 break;
3766 }
3767
3768 default:
3769 l2cap_chan_set_err(chan, ECONNRESET);
3770
3771 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
3772 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3773 goto done;
3774 }
3775
3776 if (flags & L2CAP_CONF_FLAG_CONTINUATION)
3777 goto done;
3778
3779 set_bit(CONF_INPUT_DONE, &chan->conf_state);
3780
3781 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
3782 set_default_fcs(chan);
3783
3784 if (chan->mode == L2CAP_MODE_ERTM ||
3785 chan->mode == L2CAP_MODE_STREAMING)
3786 err = l2cap_ertm_init(chan);
3787
3788 if (err < 0)
3789 l2cap_send_disconn_req(chan->conn, chan, -err);
3790 else
3791 l2cap_chan_ready(chan);
3792 }
3793
3794 done:
3795 l2cap_chan_unlock(chan);
3796 return err;
3797 }
3798
3799 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3800 {
3801 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
3802 struct l2cap_disconn_rsp rsp;
3803 u16 dcid, scid;
3804 struct l2cap_chan *chan;
3805 struct sock *sk;
3806
3807 scid = __le16_to_cpu(req->scid);
3808 dcid = __le16_to_cpu(req->dcid);
3809
3810 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
3811
3812 mutex_lock(&conn->chan_lock);
3813
3814 chan = __l2cap_get_chan_by_scid(conn, dcid);
3815 if (!chan) {
3816 mutex_unlock(&conn->chan_lock);
3817 return 0;
3818 }
3819
3820 l2cap_chan_lock(chan);
3821
3822 sk = chan->sk;
3823
3824 rsp.dcid = cpu_to_le16(chan->scid);
3825 rsp.scid = cpu_to_le16(chan->dcid);
3826 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
3827
3828 lock_sock(sk);
3829 sk->sk_shutdown = SHUTDOWN_MASK;
3830 release_sock(sk);
3831
3832 l2cap_chan_hold(chan);
3833 l2cap_chan_del(chan, ECONNRESET);
3834
3835 l2cap_chan_unlock(chan);
3836
3837 chan->ops->close(chan->data);
3838 l2cap_chan_put(chan);
3839
3840 mutex_unlock(&conn->chan_lock);
3841
3842 return 0;
3843 }
3844
3845 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3846 {
3847 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
3848 u16 dcid, scid;
3849 struct l2cap_chan *chan;
3850
3851 scid = __le16_to_cpu(rsp->scid);
3852 dcid = __le16_to_cpu(rsp->dcid);
3853
3854 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
3855
3856 mutex_lock(&conn->chan_lock);
3857
3858 chan = __l2cap_get_chan_by_scid(conn, scid);
3859 if (!chan) {
3860 mutex_unlock(&conn->chan_lock);
3861 return 0;
3862 }
3863
3864 l2cap_chan_lock(chan);
3865
3866 l2cap_chan_hold(chan);
3867 l2cap_chan_del(chan, 0);
3868
3869 l2cap_chan_unlock(chan);
3870
3871 chan->ops->close(chan->data);
3872 l2cap_chan_put(chan);
3873
3874 mutex_unlock(&conn->chan_lock);
3875
3876 return 0;
3877 }
3878
3879 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3880 {
3881 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
3882 u16 type;
3883
3884 type = __le16_to_cpu(req->type);
3885
3886 BT_DBG("type 0x%4.4x", type);
3887
3888 if (type == L2CAP_IT_FEAT_MASK) {
3889 u8 buf[8];
3890 u32 feat_mask = l2cap_feat_mask;
3891 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3892 rsp->type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
3893 rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
3894 if (!disable_ertm)
3895 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
3896 | L2CAP_FEAT_FCS;
3897 if (enable_hs)
3898 feat_mask |= L2CAP_FEAT_EXT_FLOW
3899 | L2CAP_FEAT_EXT_WINDOW;
3900
3901 put_unaligned_le32(feat_mask, rsp->data);
3902 l2cap_send_cmd(conn, cmd->ident,
3903 L2CAP_INFO_RSP, sizeof(buf), buf);
3904 } else if (type == L2CAP_IT_FIXED_CHAN) {
3905 u8 buf[12];
3906 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3907
3908 if (enable_hs)
3909 l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
3910 else
3911 l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
3912
3913 rsp->type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3914 rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
3915 memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
3916 l2cap_send_cmd(conn, cmd->ident,
3917 L2CAP_INFO_RSP, sizeof(buf), buf);
3918 } else {
3919 struct l2cap_info_rsp rsp;
3920 rsp.type = cpu_to_le16(type);
3921 rsp.result = __constant_cpu_to_le16(L2CAP_IR_NOTSUPP);
3922 l2cap_send_cmd(conn, cmd->ident,
3923 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
3924 }
3925
3926 return 0;
3927 }
3928
3929 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3930 {
3931 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
3932 u16 type, result;
3933
3934 type = __le16_to_cpu(rsp->type);
3935 result = __le16_to_cpu(rsp->result);
3936
3937 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
3938
3939 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
3940 if (cmd->ident != conn->info_ident ||
3941 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
3942 return 0;
3943
3944 cancel_delayed_work(&conn->info_timer);
3945
3946 if (result != L2CAP_IR_SUCCESS) {
3947 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3948 conn->info_ident = 0;
3949
3950 l2cap_conn_start(conn);
3951
3952 return 0;
3953 }
3954
3955 switch (type) {
3956 case L2CAP_IT_FEAT_MASK:
3957 conn->feat_mask = get_unaligned_le32(rsp->data);
3958
3959 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
3960 struct l2cap_info_req req;
3961 req.type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3962
3963 conn->info_ident = l2cap_get_ident(conn);
3964
3965 l2cap_send_cmd(conn, conn->info_ident,
3966 L2CAP_INFO_REQ, sizeof(req), &req);
3967 } else {
3968 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3969 conn->info_ident = 0;
3970
3971 l2cap_conn_start(conn);
3972 }
3973 break;
3974
3975 case L2CAP_IT_FIXED_CHAN:
3976 conn->fixed_chan_mask = rsp->data[0];
3977 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3978 conn->info_ident = 0;
3979
3980 l2cap_conn_start(conn);
3981 break;
3982 }
3983
3984 return 0;
3985 }
3986
3987 static inline int l2cap_create_channel_req(struct l2cap_conn *conn,
3988 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3989 void *data)
3990 {
3991 struct l2cap_create_chan_req *req = data;
3992 struct l2cap_create_chan_rsp rsp;
3993 u16 psm, scid;
3994
3995 if (cmd_len != sizeof(*req))
3996 return -EPROTO;
3997
3998 if (!enable_hs)
3999 return -EINVAL;
4000
4001 psm = le16_to_cpu(req->psm);
4002 scid = le16_to_cpu(req->scid);
4003
4004 BT_DBG("psm %d, scid %d, amp_id %d", psm, scid, req->amp_id);
4005
4006 /* Placeholder: Always reject */
4007 rsp.dcid = 0;
4008 rsp.scid = cpu_to_le16(scid);
4009 rsp.result = __constant_cpu_to_le16(L2CAP_CR_NO_MEM);
4010 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
4011
4012 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
4013 sizeof(rsp), &rsp);
4014
4015 return 0;
4016 }
4017
4018 static inline int l2cap_create_channel_rsp(struct l2cap_conn *conn,
4019 struct l2cap_cmd_hdr *cmd, void *data)
4020 {
4021 BT_DBG("conn %p", conn);
4022
4023 return l2cap_connect_rsp(conn, cmd, data);
4024 }
4025
4026 static void l2cap_send_move_chan_rsp(struct l2cap_conn *conn, u8 ident,
4027 u16 icid, u16 result)
4028 {
4029 struct l2cap_move_chan_rsp rsp;
4030
4031 BT_DBG("icid %d, result %d", icid, result);
4032
4033 rsp.icid = cpu_to_le16(icid);
4034 rsp.result = cpu_to_le16(result);
4035
4036 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_RSP, sizeof(rsp), &rsp);
4037 }
4038
4039 static void l2cap_send_move_chan_cfm(struct l2cap_conn *conn,
4040 struct l2cap_chan *chan, u16 icid, u16 result)
4041 {
4042 struct l2cap_move_chan_cfm cfm;
4043 u8 ident;
4044
4045 BT_DBG("icid %d, result %d", icid, result);
4046
4047 ident = l2cap_get_ident(conn);
4048 if (chan)
4049 chan->ident = ident;
4050
4051 cfm.icid = cpu_to_le16(icid);
4052 cfm.result = cpu_to_le16(result);
4053
4054 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM, sizeof(cfm), &cfm);
4055 }
4056
4057 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
4058 u16 icid)
4059 {
4060 struct l2cap_move_chan_cfm_rsp rsp;
4061
4062 BT_DBG("icid %d", icid);
4063
4064 rsp.icid = cpu_to_le16(icid);
4065 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4066 }
4067
4068 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
4069 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
4070 {
4071 struct l2cap_move_chan_req *req = data;
4072 u16 icid = 0;
4073 u16 result = L2CAP_MR_NOT_ALLOWED;
4074
4075 if (cmd_len != sizeof(*req))
4076 return -EPROTO;
4077
4078 icid = le16_to_cpu(req->icid);
4079
4080 BT_DBG("icid %d, dest_amp_id %d", icid, req->dest_amp_id);
4081
4082 if (!enable_hs)
4083 return -EINVAL;
4084
4085 /* Placeholder: Always refuse */
4086 l2cap_send_move_chan_rsp(conn, cmd->ident, icid, result);
4087
4088 return 0;
4089 }
4090
4091 static inline int l2cap_move_channel_rsp(struct l2cap_conn *conn,
4092 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
4093 {
4094 struct l2cap_move_chan_rsp *rsp = data;
4095 u16 icid, result;
4096
4097 if (cmd_len != sizeof(*rsp))
4098 return -EPROTO;
4099
4100 icid = le16_to_cpu(rsp->icid);
4101 result = le16_to_cpu(rsp->result);
4102
4103 BT_DBG("icid %d, result %d", icid, result);
4104
4105 /* Placeholder: Always unconfirmed */
4106 l2cap_send_move_chan_cfm(conn, NULL, icid, L2CAP_MC_UNCONFIRMED);
4107
4108 return 0;
4109 }
4110
4111 static inline int l2cap_move_channel_confirm(struct l2cap_conn *conn,
4112 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
4113 {
4114 struct l2cap_move_chan_cfm *cfm = data;
4115 u16 icid, result;
4116
4117 if (cmd_len != sizeof(*cfm))
4118 return -EPROTO;
4119
4120 icid = le16_to_cpu(cfm->icid);
4121 result = le16_to_cpu(cfm->result);
4122
4123 BT_DBG("icid %d, result %d", icid, result);
4124
4125 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
4126
4127 return 0;
4128 }
4129
4130 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
4131 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
4132 {
4133 struct l2cap_move_chan_cfm_rsp *rsp = data;
4134 u16 icid;
4135
4136 if (cmd_len != sizeof(*rsp))
4137 return -EPROTO;
4138
4139 icid = le16_to_cpu(rsp->icid);
4140
4141 BT_DBG("icid %d", icid);
4142
4143 return 0;
4144 }
4145
4146 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
4147 u16 to_multiplier)
4148 {
4149 u16 max_latency;
4150
4151 if (min > max || min < 6 || max > 3200)
4152 return -EINVAL;
4153
4154 if (to_multiplier < 10 || to_multiplier > 3200)
4155 return -EINVAL;
4156
4157 if (max >= to_multiplier * 8)
4158 return -EINVAL;
4159
4160 max_latency = (to_multiplier * 8 / max) - 1;
4161 if (latency > 499 || latency > max_latency)
4162 return -EINVAL;
4163
4164 return 0;
4165 }
4166
4167 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
4168 struct l2cap_cmd_hdr *cmd, u8 *data)
4169 {
4170 struct hci_conn *hcon = conn->hcon;
4171 struct l2cap_conn_param_update_req *req;
4172 struct l2cap_conn_param_update_rsp rsp;
4173 u16 min, max, latency, to_multiplier, cmd_len;
4174 int err;
4175
4176 if (!(hcon->link_mode & HCI_LM_MASTER))
4177 return -EINVAL;
4178
4179 cmd_len = __le16_to_cpu(cmd->len);
4180 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
4181 return -EPROTO;
4182
4183 req = (struct l2cap_conn_param_update_req *) data;
4184 min = __le16_to_cpu(req->min);
4185 max = __le16_to_cpu(req->max);
4186 latency = __le16_to_cpu(req->latency);
4187 to_multiplier = __le16_to_cpu(req->to_multiplier);
4188
4189 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
4190 min, max, latency, to_multiplier);
4191
4192 memset(&rsp, 0, sizeof(rsp));
4193
4194 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
4195 if (err)
4196 rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
4197 else
4198 rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
4199
4200 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
4201 sizeof(rsp), &rsp);
4202
4203 if (!err)
4204 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
4205
4206 return 0;
4207 }
4208
4209 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
4210 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
4211 {
4212 int err = 0;
4213
4214 switch (cmd->code) {
4215 case L2CAP_COMMAND_REJ:
4216 l2cap_command_rej(conn, cmd, data);
4217 break;
4218
4219 case L2CAP_CONN_REQ:
4220 err = l2cap_connect_req(conn, cmd, data);
4221 break;
4222
4223 case L2CAP_CONN_RSP:
4224 err = l2cap_connect_rsp(conn, cmd, data);
4225 break;
4226
4227 case L2CAP_CONF_REQ:
4228 err = l2cap_config_req(conn, cmd, cmd_len, data);
4229 break;
4230
4231 case L2CAP_CONF_RSP:
4232 err = l2cap_config_rsp(conn, cmd, data);
4233 break;
4234
4235 case L2CAP_DISCONN_REQ:
4236 err = l2cap_disconnect_req(conn, cmd, data);
4237 break;
4238
4239 case L2CAP_DISCONN_RSP:
4240 err = l2cap_disconnect_rsp(conn, cmd, data);
4241 break;
4242
4243 case L2CAP_ECHO_REQ:
4244 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
4245 break;
4246
4247 case L2CAP_ECHO_RSP:
4248 break;
4249
4250 case L2CAP_INFO_REQ:
4251 err = l2cap_information_req(conn, cmd, data);
4252 break;
4253
4254 case L2CAP_INFO_RSP:
4255 err = l2cap_information_rsp(conn, cmd, data);
4256 break;
4257
4258 case L2CAP_CREATE_CHAN_REQ:
4259 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
4260 break;
4261
4262 case L2CAP_CREATE_CHAN_RSP:
4263 err = l2cap_create_channel_rsp(conn, cmd, data);
4264 break;
4265
4266 case L2CAP_MOVE_CHAN_REQ:
4267 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
4268 break;
4269
4270 case L2CAP_MOVE_CHAN_RSP:
4271 err = l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
4272 break;
4273
4274 case L2CAP_MOVE_CHAN_CFM:
4275 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
4276 break;
4277
4278 case L2CAP_MOVE_CHAN_CFM_RSP:
4279 err = l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
4280 break;
4281
4282 default:
4283 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
4284 err = -EINVAL;
4285 break;
4286 }
4287
4288 return err;
4289 }
4290
4291 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
4292 struct l2cap_cmd_hdr *cmd, u8 *data)
4293 {
4294 switch (cmd->code) {
4295 case L2CAP_COMMAND_REJ:
4296 return 0;
4297
4298 case L2CAP_CONN_PARAM_UPDATE_REQ:
4299 return l2cap_conn_param_update_req(conn, cmd, data);
4300
4301 case L2CAP_CONN_PARAM_UPDATE_RSP:
4302 return 0;
4303
4304 default:
4305 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
4306 return -EINVAL;
4307 }
4308 }
4309
4310 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
4311 struct sk_buff *skb)
4312 {
4313 u8 *data = skb->data;
4314 int len = skb->len;
4315 struct l2cap_cmd_hdr cmd;
4316 int err;
4317
4318 l2cap_raw_recv(conn, skb);
4319
4320 while (len >= L2CAP_CMD_HDR_SIZE) {
4321 u16 cmd_len;
4322 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
4323 data += L2CAP_CMD_HDR_SIZE;
4324 len -= L2CAP_CMD_HDR_SIZE;
4325
4326 cmd_len = le16_to_cpu(cmd.len);
4327
4328 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
4329
4330 if (cmd_len > len || !cmd.ident) {
4331 BT_DBG("corrupted command");
4332 break;
4333 }
4334
4335 if (conn->hcon->type == LE_LINK)
4336 err = l2cap_le_sig_cmd(conn, &cmd, data);
4337 else
4338 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
4339
4340 if (err) {
4341 struct l2cap_cmd_rej_unk rej;
4342
4343 BT_ERR("Wrong link type (%d)", err);
4344
4345 /* FIXME: Map err to a valid reason */
4346 rej.reason = __constant_cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
4347 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
4348 }
4349
4350 data += cmd_len;
4351 len -= cmd_len;
4352 }
4353
4354 kfree_skb(skb);
4355 }
4356
4357 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
4358 {
4359 u16 our_fcs, rcv_fcs;
4360 int hdr_size;
4361
4362 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
4363 hdr_size = L2CAP_EXT_HDR_SIZE;
4364 else
4365 hdr_size = L2CAP_ENH_HDR_SIZE;
4366
4367 if (chan->fcs == L2CAP_FCS_CRC16) {
4368 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
4369 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
4370 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
4371
4372 if (our_fcs != rcv_fcs)
4373 return -EBADMSG;
4374 }
4375 return 0;
4376 }
4377
4378 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
4379 {
4380 struct l2cap_ctrl control;
4381
4382 BT_DBG("chan %p", chan);
4383
4384 memset(&control, 0, sizeof(control));
4385 control.sframe = 1;
4386 control.final = 1;
4387 control.reqseq = chan->buffer_seq;
4388 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4389
4390 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4391 control.super = L2CAP_SUPER_RNR;
4392 l2cap_send_sframe(chan, &control);
4393 }
4394
4395 if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
4396 chan->unacked_frames > 0)
4397 __set_retrans_timer(chan);
4398
4399 /* Send pending iframes */
4400 l2cap_ertm_send(chan);
4401
4402 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
4403 test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
4404 /* F-bit wasn't sent in an s-frame or i-frame yet, so
4405 * send it now.
4406 */
4407 control.super = L2CAP_SUPER_RR;
4408 l2cap_send_sframe(chan, &control);
4409 }
4410 }
4411
4412 static void append_skb_frag(struct sk_buff *skb,
4413 struct sk_buff *new_frag, struct sk_buff **last_frag)
4414 {
4415 /* skb->len reflects data in skb as well as all fragments
4416 * skb->data_len reflects only data in fragments
4417 */
4418 if (!skb_has_frag_list(skb))
4419 skb_shinfo(skb)->frag_list = new_frag;
4420
4421 new_frag->next = NULL;
4422
4423 (*last_frag)->next = new_frag;
4424 *last_frag = new_frag;
4425
4426 skb->len += new_frag->len;
4427 skb->data_len += new_frag->len;
4428 skb->truesize += new_frag->truesize;
4429 }
4430
4431 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
4432 struct l2cap_ctrl *control)
4433 {
4434 int err = -EINVAL;
4435
4436 switch (control->sar) {
4437 case L2CAP_SAR_UNSEGMENTED:
4438 if (chan->sdu)
4439 break;
4440
4441 err = chan->ops->recv(chan->data, skb);
4442 break;
4443
4444 case L2CAP_SAR_START:
4445 if (chan->sdu)
4446 break;
4447
4448 chan->sdu_len = get_unaligned_le16(skb->data);
4449 skb_pull(skb, L2CAP_SDULEN_SIZE);
4450
4451 if (chan->sdu_len > chan->imtu) {
4452 err = -EMSGSIZE;
4453 break;
4454 }
4455
4456 if (skb->len >= chan->sdu_len)
4457 break;
4458
4459 chan->sdu = skb;
4460 chan->sdu_last_frag = skb;
4461
4462 skb = NULL;
4463 err = 0;
4464 break;
4465
4466 case L2CAP_SAR_CONTINUE:
4467 if (!chan->sdu)
4468 break;
4469
4470 append_skb_frag(chan->sdu, skb,
4471 &chan->sdu_last_frag);
4472 skb = NULL;
4473
4474 if (chan->sdu->len >= chan->sdu_len)
4475 break;
4476
4477 err = 0;
4478 break;
4479
4480 case L2CAP_SAR_END:
4481 if (!chan->sdu)
4482 break;
4483
4484 append_skb_frag(chan->sdu, skb,
4485 &chan->sdu_last_frag);
4486 skb = NULL;
4487
4488 if (chan->sdu->len != chan->sdu_len)
4489 break;
4490
4491 err = chan->ops->recv(chan->data, chan->sdu);
4492
4493 if (!err) {
4494 /* Reassembly complete */
4495 chan->sdu = NULL;
4496 chan->sdu_last_frag = NULL;
4497 chan->sdu_len = 0;
4498 }
4499 break;
4500 }
4501
4502 if (err) {
4503 kfree_skb(skb);
4504 kfree_skb(chan->sdu);
4505 chan->sdu = NULL;
4506 chan->sdu_last_frag = NULL;
4507 chan->sdu_len = 0;
4508 }
4509
4510 return err;
4511 }
4512
4513 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
4514 {
4515 u8 event;
4516
4517 if (chan->mode != L2CAP_MODE_ERTM)
4518 return;
4519
4520 event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
4521 l2cap_tx(chan, NULL, NULL, event);
4522 }
4523
4524 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
4525 {
4526 int err = 0;
4527 /* Pass sequential frames to l2cap_reassemble_sdu()
4528 * until a gap is encountered.
4529 */
4530
4531 BT_DBG("chan %p", chan);
4532
4533 while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4534 struct sk_buff *skb;
4535 BT_DBG("Searching for skb with txseq %d (queue len %d)",
4536 chan->buffer_seq, skb_queue_len(&chan->srej_q));
4537
4538 skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
4539
4540 if (!skb)
4541 break;
4542
4543 skb_unlink(skb, &chan->srej_q);
4544 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
4545 err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->control);
4546 if (err)
4547 break;
4548 }
4549
4550 if (skb_queue_empty(&chan->srej_q)) {
4551 chan->rx_state = L2CAP_RX_STATE_RECV;
4552 l2cap_send_ack(chan);
4553 }
4554
4555 return err;
4556 }
4557
4558 static void l2cap_handle_srej(struct l2cap_chan *chan,
4559 struct l2cap_ctrl *control)
4560 {
4561 struct sk_buff *skb;
4562
4563 BT_DBG("chan %p, control %p", chan, control);
4564
4565 if (control->reqseq == chan->next_tx_seq) {
4566 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
4567 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4568 return;
4569 }
4570
4571 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
4572
4573 if (skb == NULL) {
4574 BT_DBG("Seq %d not available for retransmission",
4575 control->reqseq);
4576 return;
4577 }
4578
4579 if (chan->max_tx != 0 && bt_cb(skb)->control.retries >= chan->max_tx) {
4580 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
4581 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4582 return;
4583 }
4584
4585 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4586
4587 if (control->poll) {
4588 l2cap_pass_to_tx(chan, control);
4589
4590 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4591 l2cap_retransmit(chan, control);
4592 l2cap_ertm_send(chan);
4593
4594 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
4595 set_bit(CONN_SREJ_ACT, &chan->conn_state);
4596 chan->srej_save_reqseq = control->reqseq;
4597 }
4598 } else {
4599 l2cap_pass_to_tx_fbit(chan, control);
4600
4601 if (control->final) {
4602 if (chan->srej_save_reqseq != control->reqseq ||
4603 !test_and_clear_bit(CONN_SREJ_ACT,
4604 &chan->conn_state))
4605 l2cap_retransmit(chan, control);
4606 } else {
4607 l2cap_retransmit(chan, control);
4608 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
4609 set_bit(CONN_SREJ_ACT, &chan->conn_state);
4610 chan->srej_save_reqseq = control->reqseq;
4611 }
4612 }
4613 }
4614 }
4615
4616 static void l2cap_handle_rej(struct l2cap_chan *chan,
4617 struct l2cap_ctrl *control)
4618 {
4619 struct sk_buff *skb;
4620
4621 BT_DBG("chan %p, control %p", chan, control);
4622
4623 if (control->reqseq == chan->next_tx_seq) {
4624 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
4625 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4626 return;
4627 }
4628
4629 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
4630
4631 if (chan->max_tx && skb &&
4632 bt_cb(skb)->control.retries >= chan->max_tx) {
4633 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
4634 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4635 return;
4636 }
4637
4638 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4639
4640 l2cap_pass_to_tx(chan, control);
4641
4642 if (control->final) {
4643 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
4644 l2cap_retransmit_all(chan, control);
4645 } else {
4646 l2cap_retransmit_all(chan, control);
4647 l2cap_ertm_send(chan);
4648 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
4649 set_bit(CONN_REJ_ACT, &chan->conn_state);
4650 }
4651 }
4652
4653 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
4654 {
4655 BT_DBG("chan %p, txseq %d", chan, txseq);
4656
4657 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
4658 chan->expected_tx_seq);
4659
4660 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
4661 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
4662 chan->tx_win) {
4663 /* See notes below regarding "double poll" and
4664 * invalid packets.
4665 */
4666 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
4667 BT_DBG("Invalid/Ignore - after SREJ");
4668 return L2CAP_TXSEQ_INVALID_IGNORE;
4669 } else {
4670 BT_DBG("Invalid - in window after SREJ sent");
4671 return L2CAP_TXSEQ_INVALID;
4672 }
4673 }
4674
4675 if (chan->srej_list.head == txseq) {
4676 BT_DBG("Expected SREJ");
4677 return L2CAP_TXSEQ_EXPECTED_SREJ;
4678 }
4679
4680 if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
4681 BT_DBG("Duplicate SREJ - txseq already stored");
4682 return L2CAP_TXSEQ_DUPLICATE_SREJ;
4683 }
4684
4685 if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
4686 BT_DBG("Unexpected SREJ - not requested");
4687 return L2CAP_TXSEQ_UNEXPECTED_SREJ;
4688 }
4689 }
4690
4691 if (chan->expected_tx_seq == txseq) {
4692 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
4693 chan->tx_win) {
4694 BT_DBG("Invalid - txseq outside tx window");
4695 return L2CAP_TXSEQ_INVALID;
4696 } else {
4697 BT_DBG("Expected");
4698 return L2CAP_TXSEQ_EXPECTED;
4699 }
4700 }
4701
4702 if (__seq_offset(chan, txseq, chan->last_acked_seq) <
4703 __seq_offset(chan, chan->expected_tx_seq,
4704 chan->last_acked_seq)){
4705 BT_DBG("Duplicate - expected_tx_seq later than txseq");
4706 return L2CAP_TXSEQ_DUPLICATE;
4707 }
4708
4709 if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
4710 /* A source of invalid packets is a "double poll" condition,
4711 * where delays cause us to send multiple poll packets. If
4712 * the remote stack receives and processes both polls,
4713 * sequence numbers can wrap around in such a way that a
4714 * resent frame has a sequence number that looks like new data
4715 * with a sequence gap. This would trigger an erroneous SREJ
4716 * request.
4717 *
4718 * Fortunately, this is impossible with a tx window that's
4719 * less than half of the maximum sequence number, which allows
4720 * invalid frames to be safely ignored.
4721 *
4722 * With tx window sizes greater than half of the tx window
4723 * maximum, the frame is invalid and cannot be ignored. This
4724 * causes a disconnect.
4725 */
4726
4727 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
4728 BT_DBG("Invalid/Ignore - txseq outside tx window");
4729 return L2CAP_TXSEQ_INVALID_IGNORE;
4730 } else {
4731 BT_DBG("Invalid - txseq outside tx window");
4732 return L2CAP_TXSEQ_INVALID;
4733 }
4734 } else {
4735 BT_DBG("Unexpected - txseq indicates missing frames");
4736 return L2CAP_TXSEQ_UNEXPECTED;
4737 }
4738 }
4739
4740 static int l2cap_rx_state_recv(struct l2cap_chan *chan,
4741 struct l2cap_ctrl *control,
4742 struct sk_buff *skb, u8 event)
4743 {
4744 int err = 0;
4745 bool skb_in_use = 0;
4746
4747 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
4748 event);
4749
4750 switch (event) {
4751 case L2CAP_EV_RECV_IFRAME:
4752 switch (l2cap_classify_txseq(chan, control->txseq)) {
4753 case L2CAP_TXSEQ_EXPECTED:
4754 l2cap_pass_to_tx(chan, control);
4755
4756 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4757 BT_DBG("Busy, discarding expected seq %d",
4758 control->txseq);
4759 break;
4760 }
4761
4762 chan->expected_tx_seq = __next_seq(chan,
4763 control->txseq);
4764
4765 chan->buffer_seq = chan->expected_tx_seq;
4766 skb_in_use = 1;
4767
4768 err = l2cap_reassemble_sdu(chan, skb, control);
4769 if (err)
4770 break;
4771
4772 if (control->final) {
4773 if (!test_and_clear_bit(CONN_REJ_ACT,
4774 &chan->conn_state)) {
4775 control->final = 0;
4776 l2cap_retransmit_all(chan, control);
4777 l2cap_ertm_send(chan);
4778 }
4779 }
4780
4781 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
4782 l2cap_send_ack(chan);
4783 break;
4784 case L2CAP_TXSEQ_UNEXPECTED:
4785 l2cap_pass_to_tx(chan, control);
4786
4787 /* Can't issue SREJ frames in the local busy state.
4788 * Drop this frame, it will be seen as missing
4789 * when local busy is exited.
4790 */
4791 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4792 BT_DBG("Busy, discarding unexpected seq %d",
4793 control->txseq);
4794 break;
4795 }
4796
4797 /* There was a gap in the sequence, so an SREJ
4798 * must be sent for each missing frame. The
4799 * current frame is stored for later use.
4800 */
4801 skb_queue_tail(&chan->srej_q, skb);
4802 skb_in_use = 1;
4803 BT_DBG("Queued %p (queue len %d)", skb,
4804 skb_queue_len(&chan->srej_q));
4805
4806 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
4807 l2cap_seq_list_clear(&chan->srej_list);
4808 l2cap_send_srej(chan, control->txseq);
4809
4810 chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
4811 break;
4812 case L2CAP_TXSEQ_DUPLICATE:
4813 l2cap_pass_to_tx(chan, control);
4814 break;
4815 case L2CAP_TXSEQ_INVALID_IGNORE:
4816 break;
4817 case L2CAP_TXSEQ_INVALID:
4818 default:
4819 l2cap_send_disconn_req(chan->conn, chan,
4820 ECONNRESET);
4821 break;
4822 }
4823 break;
4824 case L2CAP_EV_RECV_RR:
4825 l2cap_pass_to_tx(chan, control);
4826 if (control->final) {
4827 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4828
4829 if (!test_and_clear_bit(CONN_REJ_ACT,
4830 &chan->conn_state)) {
4831 control->final = 0;
4832 l2cap_retransmit_all(chan, control);
4833 }
4834
4835 l2cap_ertm_send(chan);
4836 } else if (control->poll) {
4837 l2cap_send_i_or_rr_or_rnr(chan);
4838 } else {
4839 if (test_and_clear_bit(CONN_REMOTE_BUSY,
4840 &chan->conn_state) &&
4841 chan->unacked_frames)
4842 __set_retrans_timer(chan);
4843
4844 l2cap_ertm_send(chan);
4845 }
4846 break;
4847 case L2CAP_EV_RECV_RNR:
4848 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4849 l2cap_pass_to_tx(chan, control);
4850 if (control && control->poll) {
4851 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4852 l2cap_send_rr_or_rnr(chan, 0);
4853 }
4854 __clear_retrans_timer(chan);
4855 l2cap_seq_list_clear(&chan->retrans_list);
4856 break;
4857 case L2CAP_EV_RECV_REJ:
4858 l2cap_handle_rej(chan, control);
4859 break;
4860 case L2CAP_EV_RECV_SREJ:
4861 l2cap_handle_srej(chan, control);
4862 break;
4863 default:
4864 break;
4865 }
4866
4867 if (skb && !skb_in_use) {
4868 BT_DBG("Freeing %p", skb);
4869 kfree_skb(skb);
4870 }
4871
4872 return err;
4873 }
4874
4875 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
4876 struct l2cap_ctrl *control,
4877 struct sk_buff *skb, u8 event)
4878 {
4879 int err = 0;
4880 u16 txseq = control->txseq;
4881 bool skb_in_use = 0;
4882
4883 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
4884 event);
4885
4886 switch (event) {
4887 case L2CAP_EV_RECV_IFRAME:
4888 switch (l2cap_classify_txseq(chan, txseq)) {
4889 case L2CAP_TXSEQ_EXPECTED:
4890 /* Keep frame for reassembly later */
4891 l2cap_pass_to_tx(chan, control);
4892 skb_queue_tail(&chan->srej_q, skb);
4893 skb_in_use = 1;
4894 BT_DBG("Queued %p (queue len %d)", skb,
4895 skb_queue_len(&chan->srej_q));
4896
4897 chan->expected_tx_seq = __next_seq(chan, txseq);
4898 break;
4899 case L2CAP_TXSEQ_EXPECTED_SREJ:
4900 l2cap_seq_list_pop(&chan->srej_list);
4901
4902 l2cap_pass_to_tx(chan, control);
4903 skb_queue_tail(&chan->srej_q, skb);
4904 skb_in_use = 1;
4905 BT_DBG("Queued %p (queue len %d)", skb,
4906 skb_queue_len(&chan->srej_q));
4907
4908 err = l2cap_rx_queued_iframes(chan);
4909 if (err)
4910 break;
4911
4912 break;
4913 case L2CAP_TXSEQ_UNEXPECTED:
4914 /* Got a frame that can't be reassembled yet.
4915 * Save it for later, and send SREJs to cover
4916 * the missing frames.
4917 */
4918 skb_queue_tail(&chan->srej_q, skb);
4919 skb_in_use = 1;
4920 BT_DBG("Queued %p (queue len %d)", skb,
4921 skb_queue_len(&chan->srej_q));
4922
4923 l2cap_pass_to_tx(chan, control);
4924 l2cap_send_srej(chan, control->txseq);
4925 break;
4926 case L2CAP_TXSEQ_UNEXPECTED_SREJ:
4927 /* This frame was requested with an SREJ, but
4928 * some expected retransmitted frames are
4929 * missing. Request retransmission of missing
4930 * SREJ'd frames.
4931 */
4932 skb_queue_tail(&chan->srej_q, skb);
4933 skb_in_use = 1;
4934 BT_DBG("Queued %p (queue len %d)", skb,
4935 skb_queue_len(&chan->srej_q));
4936
4937 l2cap_pass_to_tx(chan, control);
4938 l2cap_send_srej_list(chan, control->txseq);
4939 break;
4940 case L2CAP_TXSEQ_DUPLICATE_SREJ:
4941 /* We've already queued this frame. Drop this copy. */
4942 l2cap_pass_to_tx(chan, control);
4943 break;
4944 case L2CAP_TXSEQ_DUPLICATE:
4945 /* Expecting a later sequence number, so this frame
4946 * was already received. Ignore it completely.
4947 */
4948 break;
4949 case L2CAP_TXSEQ_INVALID_IGNORE:
4950 break;
4951 case L2CAP_TXSEQ_INVALID:
4952 default:
4953 l2cap_send_disconn_req(chan->conn, chan,
4954 ECONNRESET);
4955 break;
4956 }
4957 break;
4958 case L2CAP_EV_RECV_RR:
4959 l2cap_pass_to_tx(chan, control);
4960 if (control->final) {
4961 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4962
4963 if (!test_and_clear_bit(CONN_REJ_ACT,
4964 &chan->conn_state)) {
4965 control->final = 0;
4966 l2cap_retransmit_all(chan, control);
4967 }
4968
4969 l2cap_ertm_send(chan);
4970 } else if (control->poll) {
4971 if (test_and_clear_bit(CONN_REMOTE_BUSY,
4972 &chan->conn_state) &&
4973 chan->unacked_frames) {
4974 __set_retrans_timer(chan);
4975 }
4976
4977 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4978 l2cap_send_srej_tail(chan);
4979 } else {
4980 if (test_and_clear_bit(CONN_REMOTE_BUSY,
4981 &chan->conn_state) &&
4982 chan->unacked_frames)
4983 __set_retrans_timer(chan);
4984
4985 l2cap_send_ack(chan);
4986 }
4987 break;
4988 case L2CAP_EV_RECV_RNR:
4989 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4990 l2cap_pass_to_tx(chan, control);
4991 if (control->poll) {
4992 l2cap_send_srej_tail(chan);
4993 } else {
4994 struct l2cap_ctrl rr_control;
4995 memset(&rr_control, 0, sizeof(rr_control));
4996 rr_control.sframe = 1;
4997 rr_control.super = L2CAP_SUPER_RR;
4998 rr_control.reqseq = chan->buffer_seq;
4999 l2cap_send_sframe(chan, &rr_control);
5000 }
5001
5002 break;
5003 case L2CAP_EV_RECV_REJ:
5004 l2cap_handle_rej(chan, control);
5005 break;
5006 case L2CAP_EV_RECV_SREJ:
5007 l2cap_handle_srej(chan, control);
5008 break;
5009 }
5010
5011 if (skb && !skb_in_use) {
5012 BT_DBG("Freeing %p", skb);
5013 kfree_skb(skb);
5014 }
5015
5016 return err;
5017 }
5018
5019 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
5020 {
5021 /* Make sure reqseq is for a packet that has been sent but not acked */
5022 u16 unacked;
5023
5024 unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
5025 return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
5026 }
5027
5028 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
5029 struct sk_buff *skb, u8 event)
5030 {
5031 int err = 0;
5032
5033 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
5034 control, skb, event, chan->rx_state);
5035
5036 if (__valid_reqseq(chan, control->reqseq)) {
5037 switch (chan->rx_state) {
5038 case L2CAP_RX_STATE_RECV:
5039 err = l2cap_rx_state_recv(chan, control, skb, event);
5040 break;
5041 case L2CAP_RX_STATE_SREJ_SENT:
5042 err = l2cap_rx_state_srej_sent(chan, control, skb,
5043 event);
5044 break;
5045 default:
5046 /* shut it down */
5047 break;
5048 }
5049 } else {
5050 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
5051 control->reqseq, chan->next_tx_seq,
5052 chan->expected_ack_seq);
5053 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
5054 }
5055
5056 return err;
5057 }
5058
5059 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
5060 struct sk_buff *skb)
5061 {
5062 int err = 0;
5063
5064 BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
5065 chan->rx_state);
5066
5067 if (l2cap_classify_txseq(chan, control->txseq) ==
5068 L2CAP_TXSEQ_EXPECTED) {
5069 l2cap_pass_to_tx(chan, control);
5070
5071 BT_DBG("buffer_seq %d->%d", chan->buffer_seq,
5072 __next_seq(chan, chan->buffer_seq));
5073
5074 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
5075
5076 l2cap_reassemble_sdu(chan, skb, control);
5077 } else {
5078 if (chan->sdu) {
5079 kfree_skb(chan->sdu);
5080 chan->sdu = NULL;
5081 }
5082 chan->sdu_last_frag = NULL;
5083 chan->sdu_len = 0;
5084
5085 if (skb) {
5086 BT_DBG("Freeing %p", skb);
5087 kfree_skb(skb);
5088 }
5089 }
5090
5091 chan->last_acked_seq = control->txseq;
5092 chan->expected_tx_seq = __next_seq(chan, control->txseq);
5093
5094 return err;
5095 }
5096
5097 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
5098 {
5099 struct l2cap_ctrl *control = &bt_cb(skb)->control;
5100 u16 len;
5101 u8 event;
5102
5103 __unpack_control(chan, skb);
5104
5105 len = skb->len;
5106
5107 /*
5108 * We can just drop the corrupted I-frame here.
5109 * Receiver will miss it and start proper recovery
5110 * procedures and ask for retransmission.
5111 */
5112 if (l2cap_check_fcs(chan, skb))
5113 goto drop;
5114
5115 if (!control->sframe && control->sar == L2CAP_SAR_START)
5116 len -= L2CAP_SDULEN_SIZE;
5117
5118 if (chan->fcs == L2CAP_FCS_CRC16)
5119 len -= L2CAP_FCS_SIZE;
5120
5121 if (len > chan->mps) {
5122 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
5123 goto drop;
5124 }
5125
5126 if (!control->sframe) {
5127 int err;
5128
5129 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
5130 control->sar, control->reqseq, control->final,
5131 control->txseq);
5132
5133 /* Validate F-bit - F=0 always valid, F=1 only
5134 * valid in TX WAIT_F
5135 */
5136 if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
5137 goto drop;
5138
5139 if (chan->mode != L2CAP_MODE_STREAMING) {
5140 event = L2CAP_EV_RECV_IFRAME;
5141 err = l2cap_rx(chan, control, skb, event);
5142 } else {
5143 err = l2cap_stream_rx(chan, control, skb);
5144 }
5145
5146 if (err)
5147 l2cap_send_disconn_req(chan->conn, chan,
5148 ECONNRESET);
5149 } else {
5150 const u8 rx_func_to_event[4] = {
5151 L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
5152 L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
5153 };
5154
5155 /* Only I-frames are expected in streaming mode */
5156 if (chan->mode == L2CAP_MODE_STREAMING)
5157 goto drop;
5158
5159 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
5160 control->reqseq, control->final, control->poll,
5161 control->super);
5162
5163 if (len != 0) {
5164 BT_ERR("%d", len);
5165 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
5166 goto drop;
5167 }
5168
5169 /* Validate F and P bits */
5170 if (control->final && (control->poll ||
5171 chan->tx_state != L2CAP_TX_STATE_WAIT_F))
5172 goto drop;
5173
5174 event = rx_func_to_event[control->super];
5175 if (l2cap_rx(chan, control, skb, event))
5176 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
5177 }
5178
5179 return 0;
5180
5181 drop:
5182 kfree_skb(skb);
5183 return 0;
5184 }
5185
5186 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
5187 {
5188 struct l2cap_chan *chan;
5189
5190 chan = l2cap_get_chan_by_scid(conn, cid);
5191 if (!chan) {
5192 BT_DBG("unknown cid 0x%4.4x", cid);
5193 /* Drop packet and return */
5194 kfree_skb(skb);
5195 return 0;
5196 }
5197
5198 BT_DBG("chan %p, len %d", chan, skb->len);
5199
5200 if (chan->state != BT_CONNECTED)
5201 goto drop;
5202
5203 switch (chan->mode) {
5204 case L2CAP_MODE_BASIC:
5205 /* If socket recv buffers overflows we drop data here
5206 * which is *bad* because L2CAP has to be reliable.
5207 * But we don't have any other choice. L2CAP doesn't
5208 * provide flow control mechanism. */
5209
5210 if (chan->imtu < skb->len)
5211 goto drop;
5212
5213 if (!chan->ops->recv(chan->data, skb))
5214 goto done;
5215 break;
5216
5217 case L2CAP_MODE_ERTM:
5218 case L2CAP_MODE_STREAMING:
5219 l2cap_data_rcv(chan, skb);
5220 goto done;
5221
5222 default:
5223 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
5224 break;
5225 }
5226
5227 drop:
5228 kfree_skb(skb);
5229
5230 done:
5231 l2cap_chan_unlock(chan);
5232
5233 return 0;
5234 }
5235
5236 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
5237 {
5238 struct l2cap_chan *chan;
5239
5240 chan = l2cap_global_chan_by_psm(0, psm, conn->src, conn->dst);
5241 if (!chan)
5242 goto drop;
5243
5244 BT_DBG("chan %p, len %d", chan, skb->len);
5245
5246 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
5247 goto drop;
5248
5249 if (chan->imtu < skb->len)
5250 goto drop;
5251
5252 if (!chan->ops->recv(chan->data, skb))
5253 return 0;
5254
5255 drop:
5256 kfree_skb(skb);
5257
5258 return 0;
5259 }
5260
5261 static inline int l2cap_att_channel(struct l2cap_conn *conn, u16 cid,
5262 struct sk_buff *skb)
5263 {
5264 struct l2cap_chan *chan;
5265
5266 chan = l2cap_global_chan_by_scid(0, cid, conn->src, conn->dst);
5267 if (!chan)
5268 goto drop;
5269
5270 BT_DBG("chan %p, len %d", chan, skb->len);
5271
5272 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
5273 goto drop;
5274
5275 if (chan->imtu < skb->len)
5276 goto drop;
5277
5278 if (!chan->ops->recv(chan->data, skb))
5279 return 0;
5280
5281 drop:
5282 kfree_skb(skb);
5283
5284 return 0;
5285 }
5286
5287 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
5288 {
5289 struct l2cap_hdr *lh = (void *) skb->data;
5290 u16 cid, len;
5291 __le16 psm;
5292
5293 skb_pull(skb, L2CAP_HDR_SIZE);
5294 cid = __le16_to_cpu(lh->cid);
5295 len = __le16_to_cpu(lh->len);
5296
5297 if (len != skb->len) {
5298 kfree_skb(skb);
5299 return;
5300 }
5301
5302 BT_DBG("len %d, cid 0x%4.4x", len, cid);
5303
5304 switch (cid) {
5305 case L2CAP_CID_LE_SIGNALING:
5306 case L2CAP_CID_SIGNALING:
5307 l2cap_sig_channel(conn, skb);
5308 break;
5309
5310 case L2CAP_CID_CONN_LESS:
5311 psm = get_unaligned((__le16 *) skb->data);
5312 skb_pull(skb, 2);
5313 l2cap_conless_channel(conn, psm, skb);
5314 break;
5315
5316 case L2CAP_CID_LE_DATA:
5317 l2cap_att_channel(conn, cid, skb);
5318 break;
5319
5320 case L2CAP_CID_SMP:
5321 if (smp_sig_channel(conn, skb))
5322 l2cap_conn_del(conn->hcon, EACCES);
5323 break;
5324
5325 default:
5326 l2cap_data_channel(conn, cid, skb);
5327 break;
5328 }
5329 }
5330
5331 /* ---- L2CAP interface with lower layer (HCI) ---- */
5332
5333 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
5334 {
5335 int exact = 0, lm1 = 0, lm2 = 0;
5336 struct l2cap_chan *c;
5337
5338 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
5339
5340 /* Find listening sockets and check their link_mode */
5341 read_lock(&chan_list_lock);
5342 list_for_each_entry(c, &chan_list, global_l) {
5343 struct sock *sk = c->sk;
5344
5345 if (c->state != BT_LISTEN)
5346 continue;
5347
5348 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
5349 lm1 |= HCI_LM_ACCEPT;
5350 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
5351 lm1 |= HCI_LM_MASTER;
5352 exact++;
5353 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
5354 lm2 |= HCI_LM_ACCEPT;
5355 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
5356 lm2 |= HCI_LM_MASTER;
5357 }
5358 }
5359 read_unlock(&chan_list_lock);
5360
5361 return exact ? lm1 : lm2;
5362 }
5363
5364 int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
5365 {
5366 struct l2cap_conn *conn;
5367
5368 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
5369
5370 if (!status) {
5371 conn = l2cap_conn_add(hcon, status);
5372 if (conn)
5373 l2cap_conn_ready(conn);
5374 } else
5375 l2cap_conn_del(hcon, bt_to_errno(status));
5376
5377 return 0;
5378 }
5379
5380 int l2cap_disconn_ind(struct hci_conn *hcon)
5381 {
5382 struct l2cap_conn *conn = hcon->l2cap_data;
5383
5384 BT_DBG("hcon %p", hcon);
5385
5386 if (!conn)
5387 return HCI_ERROR_REMOTE_USER_TERM;
5388 return conn->disc_reason;
5389 }
5390
5391 int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
5392 {
5393 BT_DBG("hcon %p reason %d", hcon, reason);
5394
5395 l2cap_conn_del(hcon, bt_to_errno(reason));
5396 return 0;
5397 }
5398
5399 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
5400 {
5401 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
5402 return;
5403
5404 if (encrypt == 0x00) {
5405 if (chan->sec_level == BT_SECURITY_MEDIUM) {
5406 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
5407 } else if (chan->sec_level == BT_SECURITY_HIGH)
5408 l2cap_chan_close(chan, ECONNREFUSED);
5409 } else {
5410 if (chan->sec_level == BT_SECURITY_MEDIUM)
5411 __clear_chan_timer(chan);
5412 }
5413 }
5414
5415 int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
5416 {
5417 struct l2cap_conn *conn = hcon->l2cap_data;
5418 struct l2cap_chan *chan;
5419
5420 if (!conn)
5421 return 0;
5422
5423 BT_DBG("conn %p", conn);
5424
5425 if (hcon->type == LE_LINK) {
5426 if (!status && encrypt)
5427 smp_distribute_keys(conn, 0);
5428 cancel_delayed_work(&conn->security_timer);
5429 }
5430
5431 mutex_lock(&conn->chan_lock);
5432
5433 list_for_each_entry(chan, &conn->chan_l, list) {
5434 l2cap_chan_lock(chan);
5435
5436 BT_DBG("chan->scid %d", chan->scid);
5437
5438 if (chan->scid == L2CAP_CID_LE_DATA) {
5439 if (!status && encrypt) {
5440 chan->sec_level = hcon->sec_level;
5441 l2cap_chan_ready(chan);
5442 }
5443
5444 l2cap_chan_unlock(chan);
5445 continue;
5446 }
5447
5448 if (test_bit(CONF_CONNECT_PEND, &chan->conf_state)) {
5449 l2cap_chan_unlock(chan);
5450 continue;
5451 }
5452
5453 if (!status && (chan->state == BT_CONNECTED ||
5454 chan->state == BT_CONFIG)) {
5455 struct sock *sk = chan->sk;
5456
5457 clear_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags);
5458 sk->sk_state_change(sk);
5459
5460 l2cap_check_encryption(chan, encrypt);
5461 l2cap_chan_unlock(chan);
5462 continue;
5463 }
5464
5465 if (chan->state == BT_CONNECT) {
5466 if (!status) {
5467 l2cap_send_conn_req(chan);
5468 } else {
5469 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
5470 }
5471 } else if (chan->state == BT_CONNECT2) {
5472 struct sock *sk = chan->sk;
5473 struct l2cap_conn_rsp rsp;
5474 __u16 res, stat;
5475
5476 lock_sock(sk);
5477
5478 if (!status) {
5479 if (test_bit(BT_SK_DEFER_SETUP,
5480 &bt_sk(sk)->flags)) {
5481 struct sock *parent = bt_sk(sk)->parent;
5482 res = L2CAP_CR_PEND;
5483 stat = L2CAP_CS_AUTHOR_PEND;
5484 if (parent)
5485 parent->sk_data_ready(parent, 0);
5486 } else {
5487 __l2cap_state_change(chan, BT_CONFIG);
5488 res = L2CAP_CR_SUCCESS;
5489 stat = L2CAP_CS_NO_INFO;
5490 }
5491 } else {
5492 __l2cap_state_change(chan, BT_DISCONN);
5493 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
5494 res = L2CAP_CR_SEC_BLOCK;
5495 stat = L2CAP_CS_NO_INFO;
5496 }
5497
5498 release_sock(sk);
5499
5500 rsp.scid = cpu_to_le16(chan->dcid);
5501 rsp.dcid = cpu_to_le16(chan->scid);
5502 rsp.result = cpu_to_le16(res);
5503 rsp.status = cpu_to_le16(stat);
5504 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
5505 sizeof(rsp), &rsp);
5506
5507 if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
5508 res == L2CAP_CR_SUCCESS) {
5509 char buf[128];
5510 set_bit(CONF_REQ_SENT, &chan->conf_state);
5511 l2cap_send_cmd(conn, l2cap_get_ident(conn),
5512 L2CAP_CONF_REQ,
5513 l2cap_build_conf_req(chan, buf),
5514 buf);
5515 chan->num_conf_req++;
5516 }
5517 }
5518
5519 l2cap_chan_unlock(chan);
5520 }
5521
5522 mutex_unlock(&conn->chan_lock);
5523
5524 return 0;
5525 }
5526
5527 int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
5528 {
5529 struct l2cap_conn *conn = hcon->l2cap_data;
5530
5531 if (!conn)
5532 conn = l2cap_conn_add(hcon, 0);
5533
5534 if (!conn)
5535 goto drop;
5536
5537 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
5538
5539 if (!(flags & ACL_CONT)) {
5540 struct l2cap_hdr *hdr;
5541 int len;
5542
5543 if (conn->rx_len) {
5544 BT_ERR("Unexpected start frame (len %d)", skb->len);
5545 kfree_skb(conn->rx_skb);
5546 conn->rx_skb = NULL;
5547 conn->rx_len = 0;
5548 l2cap_conn_unreliable(conn, ECOMM);
5549 }
5550
5551 /* Start fragment always begin with Basic L2CAP header */
5552 if (skb->len < L2CAP_HDR_SIZE) {
5553 BT_ERR("Frame is too short (len %d)", skb->len);
5554 l2cap_conn_unreliable(conn, ECOMM);
5555 goto drop;
5556 }
5557
5558 hdr = (struct l2cap_hdr *) skb->data;
5559 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
5560
5561 if (len == skb->len) {
5562 /* Complete frame received */
5563 l2cap_recv_frame(conn, skb);
5564 return 0;
5565 }
5566
5567 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
5568
5569 if (skb->len > len) {
5570 BT_ERR("Frame is too long (len %d, expected len %d)",
5571 skb->len, len);
5572 l2cap_conn_unreliable(conn, ECOMM);
5573 goto drop;
5574 }
5575
5576 /* Allocate skb for the complete frame (with header) */
5577 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
5578 if (!conn->rx_skb)
5579 goto drop;
5580
5581 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
5582 skb->len);
5583 conn->rx_len = len - skb->len;
5584 } else {
5585 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
5586
5587 if (!conn->rx_len) {
5588 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
5589 l2cap_conn_unreliable(conn, ECOMM);
5590 goto drop;
5591 }
5592
5593 if (skb->len > conn->rx_len) {
5594 BT_ERR("Fragment is too long (len %d, expected %d)",
5595 skb->len, conn->rx_len);
5596 kfree_skb(conn->rx_skb);
5597 conn->rx_skb = NULL;
5598 conn->rx_len = 0;
5599 l2cap_conn_unreliable(conn, ECOMM);
5600 goto drop;
5601 }
5602
5603 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
5604 skb->len);
5605 conn->rx_len -= skb->len;
5606
5607 if (!conn->rx_len) {
5608 /* Complete frame received */
5609 l2cap_recv_frame(conn, conn->rx_skb);
5610 conn->rx_skb = NULL;
5611 }
5612 }
5613
5614 drop:
5615 kfree_skb(skb);
5616 return 0;
5617 }
5618
5619 static int l2cap_debugfs_show(struct seq_file *f, void *p)
5620 {
5621 struct l2cap_chan *c;
5622
5623 read_lock(&chan_list_lock);
5624
5625 list_for_each_entry(c, &chan_list, global_l) {
5626 struct sock *sk = c->sk;
5627
5628 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
5629 batostr(&bt_sk(sk)->src),
5630 batostr(&bt_sk(sk)->dst),
5631 c->state, __le16_to_cpu(c->psm),
5632 c->scid, c->dcid, c->imtu, c->omtu,
5633 c->sec_level, c->mode);
5634 }
5635
5636 read_unlock(&chan_list_lock);
5637
5638 return 0;
5639 }
5640
5641 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
5642 {
5643 return single_open(file, l2cap_debugfs_show, inode->i_private);
5644 }
5645
5646 static const struct file_operations l2cap_debugfs_fops = {
5647 .open = l2cap_debugfs_open,
5648 .read = seq_read,
5649 .llseek = seq_lseek,
5650 .release = single_release,
5651 };
5652
5653 static struct dentry *l2cap_debugfs;
5654
5655 int __init l2cap_init(void)
5656 {
5657 int err;
5658
5659 err = l2cap_init_sockets();
5660 if (err < 0)
5661 return err;
5662
5663 if (bt_debugfs) {
5664 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
5665 bt_debugfs, NULL, &l2cap_debugfs_fops);
5666 if (!l2cap_debugfs)
5667 BT_ERR("Failed to create L2CAP debug file");
5668 }
5669
5670 return 0;
5671 }
5672
5673 void l2cap_exit(void)
5674 {
5675 debugfs_remove(l2cap_debugfs);
5676 l2cap_cleanup_sockets();
5677 }
5678
5679 module_param(disable_ertm, bool, 0644);
5680 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
This page took 0.152768 seconds and 6 git commands to generate.