Bluetooth: A2MP: Handling fixed channels
[deliverable/linux.git] / net / bluetooth / l2cap_core.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
8
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
10
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
14
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
27 */
28
29 /* Bluetooth L2CAP core. */
30
31 #include <linux/module.h>
32
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
35
36 #include <net/bluetooth/bluetooth.h>
37 #include <net/bluetooth/hci_core.h>
38 #include <net/bluetooth/l2cap.h>
39 #include <net/bluetooth/smp.h>
40
41 bool disable_ertm;
42
43 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
44 static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP, };
45
46 static LIST_HEAD(chan_list);
47 static DEFINE_RWLOCK(chan_list_lock);
48
49 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
50 u8 code, u8 ident, u16 dlen, void *data);
51 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
52 void *data);
53 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
54 static void l2cap_send_disconn_req(struct l2cap_conn *conn,
55 struct l2cap_chan *chan, int err);
56
57 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
58 struct sk_buff_head *skbs, u8 event);
59
60 /* ---- L2CAP channels ---- */
61
62 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid)
63 {
64 struct l2cap_chan *c;
65
66 list_for_each_entry(c, &conn->chan_l, list) {
67 if (c->dcid == cid)
68 return c;
69 }
70 return NULL;
71 }
72
73 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
74 {
75 struct l2cap_chan *c;
76
77 list_for_each_entry(c, &conn->chan_l, list) {
78 if (c->scid == cid)
79 return c;
80 }
81 return NULL;
82 }
83
84 /* Find channel with given SCID.
85 * Returns locked channel. */
86 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
87 {
88 struct l2cap_chan *c;
89
90 mutex_lock(&conn->chan_lock);
91 c = __l2cap_get_chan_by_scid(conn, cid);
92 if (c)
93 l2cap_chan_lock(c);
94 mutex_unlock(&conn->chan_lock);
95
96 return c;
97 }
98
99 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
100 {
101 struct l2cap_chan *c;
102
103 list_for_each_entry(c, &conn->chan_l, list) {
104 if (c->ident == ident)
105 return c;
106 }
107 return NULL;
108 }
109
110 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
111 {
112 struct l2cap_chan *c;
113
114 list_for_each_entry(c, &chan_list, global_l) {
115 if (c->sport == psm && !bacmp(&bt_sk(c->sk)->src, src))
116 return c;
117 }
118 return NULL;
119 }
120
121 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
122 {
123 int err;
124
125 write_lock(&chan_list_lock);
126
127 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
128 err = -EADDRINUSE;
129 goto done;
130 }
131
132 if (psm) {
133 chan->psm = psm;
134 chan->sport = psm;
135 err = 0;
136 } else {
137 u16 p;
138
139 err = -EINVAL;
140 for (p = 0x1001; p < 0x1100; p += 2)
141 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
142 chan->psm = cpu_to_le16(p);
143 chan->sport = cpu_to_le16(p);
144 err = 0;
145 break;
146 }
147 }
148
149 done:
150 write_unlock(&chan_list_lock);
151 return err;
152 }
153
154 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
155 {
156 write_lock(&chan_list_lock);
157
158 chan->scid = scid;
159
160 write_unlock(&chan_list_lock);
161
162 return 0;
163 }
164
165 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
166 {
167 u16 cid = L2CAP_CID_DYN_START;
168
169 for (; cid < L2CAP_CID_DYN_END; cid++) {
170 if (!__l2cap_get_chan_by_scid(conn, cid))
171 return cid;
172 }
173
174 return 0;
175 }
176
177 static void __l2cap_state_change(struct l2cap_chan *chan, int state)
178 {
179 BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
180 state_to_string(state));
181
182 chan->state = state;
183 chan->ops->state_change(chan, state);
184 }
185
186 static void l2cap_state_change(struct l2cap_chan *chan, int state)
187 {
188 struct sock *sk = chan->sk;
189
190 lock_sock(sk);
191 __l2cap_state_change(chan, state);
192 release_sock(sk);
193 }
194
195 static inline void __l2cap_chan_set_err(struct l2cap_chan *chan, int err)
196 {
197 struct sock *sk = chan->sk;
198
199 sk->sk_err = err;
200 }
201
202 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
203 {
204 struct sock *sk = chan->sk;
205
206 lock_sock(sk);
207 __l2cap_chan_set_err(chan, err);
208 release_sock(sk);
209 }
210
211 static void __set_retrans_timer(struct l2cap_chan *chan)
212 {
213 if (!delayed_work_pending(&chan->monitor_timer) &&
214 chan->retrans_timeout) {
215 l2cap_set_timer(chan, &chan->retrans_timer,
216 msecs_to_jiffies(chan->retrans_timeout));
217 }
218 }
219
220 static void __set_monitor_timer(struct l2cap_chan *chan)
221 {
222 __clear_retrans_timer(chan);
223 if (chan->monitor_timeout) {
224 l2cap_set_timer(chan, &chan->monitor_timer,
225 msecs_to_jiffies(chan->monitor_timeout));
226 }
227 }
228
229 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
230 u16 seq)
231 {
232 struct sk_buff *skb;
233
234 skb_queue_walk(head, skb) {
235 if (bt_cb(skb)->control.txseq == seq)
236 return skb;
237 }
238
239 return NULL;
240 }
241
242 /* ---- L2CAP sequence number lists ---- */
243
244 /* For ERTM, ordered lists of sequence numbers must be tracked for
245 * SREJ requests that are received and for frames that are to be
246 * retransmitted. These seq_list functions implement a singly-linked
247 * list in an array, where membership in the list can also be checked
248 * in constant time. Items can also be added to the tail of the list
249 * and removed from the head in constant time, without further memory
250 * allocs or frees.
251 */
252
253 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
254 {
255 size_t alloc_size, i;
256
257 /* Allocated size is a power of 2 to map sequence numbers
258 * (which may be up to 14 bits) in to a smaller array that is
259 * sized for the negotiated ERTM transmit windows.
260 */
261 alloc_size = roundup_pow_of_two(size);
262
263 seq_list->list = kmalloc(sizeof(u16) * alloc_size, GFP_KERNEL);
264 if (!seq_list->list)
265 return -ENOMEM;
266
267 seq_list->mask = alloc_size - 1;
268 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
269 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
270 for (i = 0; i < alloc_size; i++)
271 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
272
273 return 0;
274 }
275
276 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
277 {
278 kfree(seq_list->list);
279 }
280
281 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
282 u16 seq)
283 {
284 /* Constant-time check for list membership */
285 return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
286 }
287
288 static u16 l2cap_seq_list_remove(struct l2cap_seq_list *seq_list, u16 seq)
289 {
290 u16 mask = seq_list->mask;
291
292 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR) {
293 /* In case someone tries to pop the head of an empty list */
294 return L2CAP_SEQ_LIST_CLEAR;
295 } else if (seq_list->head == seq) {
296 /* Head can be removed in constant time */
297 seq_list->head = seq_list->list[seq & mask];
298 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
299
300 if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
301 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
302 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
303 }
304 } else {
305 /* Walk the list to find the sequence number */
306 u16 prev = seq_list->head;
307 while (seq_list->list[prev & mask] != seq) {
308 prev = seq_list->list[prev & mask];
309 if (prev == L2CAP_SEQ_LIST_TAIL)
310 return L2CAP_SEQ_LIST_CLEAR;
311 }
312
313 /* Unlink the number from the list and clear it */
314 seq_list->list[prev & mask] = seq_list->list[seq & mask];
315 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
316 if (seq_list->tail == seq)
317 seq_list->tail = prev;
318 }
319 return seq;
320 }
321
322 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
323 {
324 /* Remove the head in constant time */
325 return l2cap_seq_list_remove(seq_list, seq_list->head);
326 }
327
328 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
329 {
330 u16 i;
331
332 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
333 return;
334
335 for (i = 0; i <= seq_list->mask; i++)
336 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
337
338 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
339 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
340 }
341
342 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
343 {
344 u16 mask = seq_list->mask;
345
346 /* All appends happen in constant time */
347
348 if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
349 return;
350
351 if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
352 seq_list->head = seq;
353 else
354 seq_list->list[seq_list->tail & mask] = seq;
355
356 seq_list->tail = seq;
357 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
358 }
359
360 static void l2cap_chan_timeout(struct work_struct *work)
361 {
362 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
363 chan_timer.work);
364 struct l2cap_conn *conn = chan->conn;
365 int reason;
366
367 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
368
369 mutex_lock(&conn->chan_lock);
370 l2cap_chan_lock(chan);
371
372 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
373 reason = ECONNREFUSED;
374 else if (chan->state == BT_CONNECT &&
375 chan->sec_level != BT_SECURITY_SDP)
376 reason = ECONNREFUSED;
377 else
378 reason = ETIMEDOUT;
379
380 l2cap_chan_close(chan, reason);
381
382 l2cap_chan_unlock(chan);
383
384 chan->ops->close(chan);
385 mutex_unlock(&conn->chan_lock);
386
387 l2cap_chan_put(chan);
388 }
389
390 struct l2cap_chan *l2cap_chan_create(void)
391 {
392 struct l2cap_chan *chan;
393
394 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
395 if (!chan)
396 return NULL;
397
398 mutex_init(&chan->lock);
399
400 write_lock(&chan_list_lock);
401 list_add(&chan->global_l, &chan_list);
402 write_unlock(&chan_list_lock);
403
404 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
405
406 chan->state = BT_OPEN;
407
408 atomic_set(&chan->refcnt, 1);
409
410 /* This flag is cleared in l2cap_chan_ready() */
411 set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
412
413 BT_DBG("chan %p", chan);
414
415 return chan;
416 }
417
418 void l2cap_chan_destroy(struct l2cap_chan *chan)
419 {
420 write_lock(&chan_list_lock);
421 list_del(&chan->global_l);
422 write_unlock(&chan_list_lock);
423
424 l2cap_chan_put(chan);
425 }
426
427 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
428 {
429 chan->fcs = L2CAP_FCS_CRC16;
430 chan->max_tx = L2CAP_DEFAULT_MAX_TX;
431 chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
432 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
433 chan->sec_level = BT_SECURITY_LOW;
434
435 set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
436 }
437
438 static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
439 {
440 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
441 __le16_to_cpu(chan->psm), chan->dcid);
442
443 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
444
445 chan->conn = conn;
446
447 switch (chan->chan_type) {
448 case L2CAP_CHAN_CONN_ORIENTED:
449 if (conn->hcon->type == LE_LINK) {
450 /* LE connection */
451 chan->omtu = L2CAP_LE_DEFAULT_MTU;
452 chan->scid = L2CAP_CID_LE_DATA;
453 chan->dcid = L2CAP_CID_LE_DATA;
454 } else {
455 /* Alloc CID for connection-oriented socket */
456 chan->scid = l2cap_alloc_cid(conn);
457 chan->omtu = L2CAP_DEFAULT_MTU;
458 }
459 break;
460
461 case L2CAP_CHAN_CONN_LESS:
462 /* Connectionless socket */
463 chan->scid = L2CAP_CID_CONN_LESS;
464 chan->dcid = L2CAP_CID_CONN_LESS;
465 chan->omtu = L2CAP_DEFAULT_MTU;
466 break;
467
468 case L2CAP_CHAN_CONN_FIX_A2MP:
469 chan->scid = L2CAP_CID_A2MP;
470 chan->dcid = L2CAP_CID_A2MP;
471 chan->omtu = L2CAP_A2MP_DEFAULT_MTU;
472 chan->imtu = L2CAP_A2MP_DEFAULT_MTU;
473 break;
474
475 default:
476 /* Raw socket can send/recv signalling messages only */
477 chan->scid = L2CAP_CID_SIGNALING;
478 chan->dcid = L2CAP_CID_SIGNALING;
479 chan->omtu = L2CAP_DEFAULT_MTU;
480 }
481
482 chan->local_id = L2CAP_BESTEFFORT_ID;
483 chan->local_stype = L2CAP_SERV_BESTEFFORT;
484 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
485 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
486 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
487 chan->local_flush_to = L2CAP_DEFAULT_FLUSH_TO;
488
489 l2cap_chan_hold(chan);
490
491 list_add(&chan->list, &conn->chan_l);
492 }
493
494 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
495 {
496 mutex_lock(&conn->chan_lock);
497 __l2cap_chan_add(conn, chan);
498 mutex_unlock(&conn->chan_lock);
499 }
500
501 void l2cap_chan_del(struct l2cap_chan *chan, int err)
502 {
503 struct l2cap_conn *conn = chan->conn;
504
505 __clear_chan_timer(chan);
506
507 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
508
509 if (conn) {
510 /* Delete from channel list */
511 list_del(&chan->list);
512
513 l2cap_chan_put(chan);
514
515 chan->conn = NULL;
516 hci_conn_put(conn->hcon);
517 }
518
519 if (chan->ops->teardown)
520 chan->ops->teardown(chan, err);
521
522 if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
523 return;
524
525 switch(chan->mode) {
526 case L2CAP_MODE_BASIC:
527 break;
528
529 case L2CAP_MODE_ERTM:
530 __clear_retrans_timer(chan);
531 __clear_monitor_timer(chan);
532 __clear_ack_timer(chan);
533
534 skb_queue_purge(&chan->srej_q);
535
536 l2cap_seq_list_free(&chan->srej_list);
537 l2cap_seq_list_free(&chan->retrans_list);
538
539 /* fall through */
540
541 case L2CAP_MODE_STREAMING:
542 skb_queue_purge(&chan->tx_q);
543 break;
544 }
545
546 return;
547 }
548
549 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
550 {
551 struct l2cap_conn *conn = chan->conn;
552 struct sock *sk = chan->sk;
553
554 BT_DBG("chan %p state %s sk %p", chan,
555 state_to_string(chan->state), sk);
556
557 switch (chan->state) {
558 case BT_LISTEN:
559 if (chan->ops->teardown)
560 chan->ops->teardown(chan, 0);
561 break;
562
563 case BT_CONNECTED:
564 case BT_CONFIG:
565 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
566 conn->hcon->type == ACL_LINK) {
567 __set_chan_timer(chan, sk->sk_sndtimeo);
568 l2cap_send_disconn_req(conn, chan, reason);
569 } else
570 l2cap_chan_del(chan, reason);
571 break;
572
573 case BT_CONNECT2:
574 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
575 conn->hcon->type == ACL_LINK) {
576 struct l2cap_conn_rsp rsp;
577 __u16 result;
578
579 if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags))
580 result = L2CAP_CR_SEC_BLOCK;
581 else
582 result = L2CAP_CR_BAD_PSM;
583 l2cap_state_change(chan, BT_DISCONN);
584
585 rsp.scid = cpu_to_le16(chan->dcid);
586 rsp.dcid = cpu_to_le16(chan->scid);
587 rsp.result = cpu_to_le16(result);
588 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
589 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
590 sizeof(rsp), &rsp);
591 }
592
593 l2cap_chan_del(chan, reason);
594 break;
595
596 case BT_CONNECT:
597 case BT_DISCONN:
598 l2cap_chan_del(chan, reason);
599 break;
600
601 default:
602 if (chan->ops->teardown)
603 chan->ops->teardown(chan, 0);
604 break;
605 }
606 }
607
608 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
609 {
610 if (chan->chan_type == L2CAP_CHAN_RAW) {
611 switch (chan->sec_level) {
612 case BT_SECURITY_HIGH:
613 return HCI_AT_DEDICATED_BONDING_MITM;
614 case BT_SECURITY_MEDIUM:
615 return HCI_AT_DEDICATED_BONDING;
616 default:
617 return HCI_AT_NO_BONDING;
618 }
619 } else if (chan->psm == __constant_cpu_to_le16(L2CAP_PSM_SDP)) {
620 if (chan->sec_level == BT_SECURITY_LOW)
621 chan->sec_level = BT_SECURITY_SDP;
622
623 if (chan->sec_level == BT_SECURITY_HIGH)
624 return HCI_AT_NO_BONDING_MITM;
625 else
626 return HCI_AT_NO_BONDING;
627 } else {
628 switch (chan->sec_level) {
629 case BT_SECURITY_HIGH:
630 return HCI_AT_GENERAL_BONDING_MITM;
631 case BT_SECURITY_MEDIUM:
632 return HCI_AT_GENERAL_BONDING;
633 default:
634 return HCI_AT_NO_BONDING;
635 }
636 }
637 }
638
639 /* Service level security */
640 int l2cap_chan_check_security(struct l2cap_chan *chan)
641 {
642 struct l2cap_conn *conn = chan->conn;
643 __u8 auth_type;
644
645 auth_type = l2cap_get_auth_type(chan);
646
647 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
648 }
649
650 static u8 l2cap_get_ident(struct l2cap_conn *conn)
651 {
652 u8 id;
653
654 /* Get next available identificator.
655 * 1 - 128 are used by kernel.
656 * 129 - 199 are reserved.
657 * 200 - 254 are used by utilities like l2ping, etc.
658 */
659
660 spin_lock(&conn->lock);
661
662 if (++conn->tx_ident > 128)
663 conn->tx_ident = 1;
664
665 id = conn->tx_ident;
666
667 spin_unlock(&conn->lock);
668
669 return id;
670 }
671
672 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
673 {
674 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
675 u8 flags;
676
677 BT_DBG("code 0x%2.2x", code);
678
679 if (!skb)
680 return;
681
682 if (lmp_no_flush_capable(conn->hcon->hdev))
683 flags = ACL_START_NO_FLUSH;
684 else
685 flags = ACL_START;
686
687 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
688 skb->priority = HCI_PRIO_MAX;
689
690 hci_send_acl(conn->hchan, skb, flags);
691 }
692
693 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
694 {
695 struct hci_conn *hcon = chan->conn->hcon;
696 u16 flags;
697
698 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
699 skb->priority);
700
701 if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
702 lmp_no_flush_capable(hcon->hdev))
703 flags = ACL_START_NO_FLUSH;
704 else
705 flags = ACL_START;
706
707 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
708 hci_send_acl(chan->conn->hchan, skb, flags);
709 }
710
711 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
712 {
713 control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
714 control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
715
716 if (enh & L2CAP_CTRL_FRAME_TYPE) {
717 /* S-Frame */
718 control->sframe = 1;
719 control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
720 control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
721
722 control->sar = 0;
723 control->txseq = 0;
724 } else {
725 /* I-Frame */
726 control->sframe = 0;
727 control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
728 control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
729
730 control->poll = 0;
731 control->super = 0;
732 }
733 }
734
735 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
736 {
737 control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
738 control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
739
740 if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
741 /* S-Frame */
742 control->sframe = 1;
743 control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
744 control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
745
746 control->sar = 0;
747 control->txseq = 0;
748 } else {
749 /* I-Frame */
750 control->sframe = 0;
751 control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
752 control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
753
754 control->poll = 0;
755 control->super = 0;
756 }
757 }
758
759 static inline void __unpack_control(struct l2cap_chan *chan,
760 struct sk_buff *skb)
761 {
762 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
763 __unpack_extended_control(get_unaligned_le32(skb->data),
764 &bt_cb(skb)->control);
765 skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
766 } else {
767 __unpack_enhanced_control(get_unaligned_le16(skb->data),
768 &bt_cb(skb)->control);
769 skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
770 }
771 }
772
773 static u32 __pack_extended_control(struct l2cap_ctrl *control)
774 {
775 u32 packed;
776
777 packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
778 packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
779
780 if (control->sframe) {
781 packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
782 packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
783 packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
784 } else {
785 packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
786 packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
787 }
788
789 return packed;
790 }
791
792 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
793 {
794 u16 packed;
795
796 packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
797 packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
798
799 if (control->sframe) {
800 packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
801 packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
802 packed |= L2CAP_CTRL_FRAME_TYPE;
803 } else {
804 packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
805 packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
806 }
807
808 return packed;
809 }
810
811 static inline void __pack_control(struct l2cap_chan *chan,
812 struct l2cap_ctrl *control,
813 struct sk_buff *skb)
814 {
815 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
816 put_unaligned_le32(__pack_extended_control(control),
817 skb->data + L2CAP_HDR_SIZE);
818 } else {
819 put_unaligned_le16(__pack_enhanced_control(control),
820 skb->data + L2CAP_HDR_SIZE);
821 }
822 }
823
824 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
825 u32 control)
826 {
827 struct sk_buff *skb;
828 struct l2cap_hdr *lh;
829 int hlen;
830
831 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
832 hlen = L2CAP_EXT_HDR_SIZE;
833 else
834 hlen = L2CAP_ENH_HDR_SIZE;
835
836 if (chan->fcs == L2CAP_FCS_CRC16)
837 hlen += L2CAP_FCS_SIZE;
838
839 skb = bt_skb_alloc(hlen, GFP_KERNEL);
840
841 if (!skb)
842 return ERR_PTR(-ENOMEM);
843
844 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
845 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
846 lh->cid = cpu_to_le16(chan->dcid);
847
848 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
849 put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
850 else
851 put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
852
853 if (chan->fcs == L2CAP_FCS_CRC16) {
854 u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
855 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
856 }
857
858 skb->priority = HCI_PRIO_MAX;
859 return skb;
860 }
861
862 static void l2cap_send_sframe(struct l2cap_chan *chan,
863 struct l2cap_ctrl *control)
864 {
865 struct sk_buff *skb;
866 u32 control_field;
867
868 BT_DBG("chan %p, control %p", chan, control);
869
870 if (!control->sframe)
871 return;
872
873 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
874 !control->poll)
875 control->final = 1;
876
877 if (control->super == L2CAP_SUPER_RR)
878 clear_bit(CONN_RNR_SENT, &chan->conn_state);
879 else if (control->super == L2CAP_SUPER_RNR)
880 set_bit(CONN_RNR_SENT, &chan->conn_state);
881
882 if (control->super != L2CAP_SUPER_SREJ) {
883 chan->last_acked_seq = control->reqseq;
884 __clear_ack_timer(chan);
885 }
886
887 BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
888 control->final, control->poll, control->super);
889
890 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
891 control_field = __pack_extended_control(control);
892 else
893 control_field = __pack_enhanced_control(control);
894
895 skb = l2cap_create_sframe_pdu(chan, control_field);
896 if (!IS_ERR(skb))
897 l2cap_do_send(chan, skb);
898 }
899
900 static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
901 {
902 struct l2cap_ctrl control;
903
904 BT_DBG("chan %p, poll %d", chan, poll);
905
906 memset(&control, 0, sizeof(control));
907 control.sframe = 1;
908 control.poll = poll;
909
910 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
911 control.super = L2CAP_SUPER_RNR;
912 else
913 control.super = L2CAP_SUPER_RR;
914
915 control.reqseq = chan->buffer_seq;
916 l2cap_send_sframe(chan, &control);
917 }
918
919 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
920 {
921 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
922 }
923
924 static void l2cap_send_conn_req(struct l2cap_chan *chan)
925 {
926 struct l2cap_conn *conn = chan->conn;
927 struct l2cap_conn_req req;
928
929 req.scid = cpu_to_le16(chan->scid);
930 req.psm = chan->psm;
931
932 chan->ident = l2cap_get_ident(conn);
933
934 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
935
936 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
937 }
938
939 static void l2cap_chan_ready(struct l2cap_chan *chan)
940 {
941 /* This clears all conf flags, including CONF_NOT_COMPLETE */
942 chan->conf_state = 0;
943 __clear_chan_timer(chan);
944
945 chan->state = BT_CONNECTED;
946
947 if (chan->ops->ready)
948 chan->ops->ready(chan);
949 }
950
951 static void l2cap_do_start(struct l2cap_chan *chan)
952 {
953 struct l2cap_conn *conn = chan->conn;
954
955 if (conn->hcon->type == LE_LINK) {
956 l2cap_chan_ready(chan);
957 return;
958 }
959
960 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
961 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
962 return;
963
964 if (l2cap_chan_check_security(chan) &&
965 __l2cap_no_conn_pending(chan))
966 l2cap_send_conn_req(chan);
967 } else {
968 struct l2cap_info_req req;
969 req.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
970
971 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
972 conn->info_ident = l2cap_get_ident(conn);
973
974 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
975
976 l2cap_send_cmd(conn, conn->info_ident,
977 L2CAP_INFO_REQ, sizeof(req), &req);
978 }
979 }
980
981 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
982 {
983 u32 local_feat_mask = l2cap_feat_mask;
984 if (!disable_ertm)
985 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
986
987 switch (mode) {
988 case L2CAP_MODE_ERTM:
989 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
990 case L2CAP_MODE_STREAMING:
991 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
992 default:
993 return 0x00;
994 }
995 }
996
997 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *chan, int err)
998 {
999 struct sock *sk = chan->sk;
1000 struct l2cap_disconn_req req;
1001
1002 if (!conn)
1003 return;
1004
1005 if (chan->mode == L2CAP_MODE_ERTM) {
1006 __clear_retrans_timer(chan);
1007 __clear_monitor_timer(chan);
1008 __clear_ack_timer(chan);
1009 }
1010
1011 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
1012 __l2cap_state_change(chan, BT_DISCONN);
1013 return;
1014 }
1015
1016 req.dcid = cpu_to_le16(chan->dcid);
1017 req.scid = cpu_to_le16(chan->scid);
1018 l2cap_send_cmd(conn, l2cap_get_ident(conn),
1019 L2CAP_DISCONN_REQ, sizeof(req), &req);
1020
1021 lock_sock(sk);
1022 __l2cap_state_change(chan, BT_DISCONN);
1023 __l2cap_chan_set_err(chan, err);
1024 release_sock(sk);
1025 }
1026
1027 /* ---- L2CAP connections ---- */
1028 static void l2cap_conn_start(struct l2cap_conn *conn)
1029 {
1030 struct l2cap_chan *chan, *tmp;
1031
1032 BT_DBG("conn %p", conn);
1033
1034 mutex_lock(&conn->chan_lock);
1035
1036 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1037 struct sock *sk = chan->sk;
1038
1039 l2cap_chan_lock(chan);
1040
1041 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1042 l2cap_chan_unlock(chan);
1043 continue;
1044 }
1045
1046 if (chan->state == BT_CONNECT) {
1047 if (!l2cap_chan_check_security(chan) ||
1048 !__l2cap_no_conn_pending(chan)) {
1049 l2cap_chan_unlock(chan);
1050 continue;
1051 }
1052
1053 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1054 && test_bit(CONF_STATE2_DEVICE,
1055 &chan->conf_state)) {
1056 l2cap_chan_close(chan, ECONNRESET);
1057 l2cap_chan_unlock(chan);
1058 continue;
1059 }
1060
1061 l2cap_send_conn_req(chan);
1062
1063 } else if (chan->state == BT_CONNECT2) {
1064 struct l2cap_conn_rsp rsp;
1065 char buf[128];
1066 rsp.scid = cpu_to_le16(chan->dcid);
1067 rsp.dcid = cpu_to_le16(chan->scid);
1068
1069 if (l2cap_chan_check_security(chan)) {
1070 lock_sock(sk);
1071 if (test_bit(BT_SK_DEFER_SETUP,
1072 &bt_sk(sk)->flags)) {
1073 struct sock *parent = bt_sk(sk)->parent;
1074 rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
1075 rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1076 if (parent)
1077 parent->sk_data_ready(parent, 0);
1078
1079 } else {
1080 __l2cap_state_change(chan, BT_CONFIG);
1081 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
1082 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
1083 }
1084 release_sock(sk);
1085 } else {
1086 rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
1087 rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1088 }
1089
1090 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1091 sizeof(rsp), &rsp);
1092
1093 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1094 rsp.result != L2CAP_CR_SUCCESS) {
1095 l2cap_chan_unlock(chan);
1096 continue;
1097 }
1098
1099 set_bit(CONF_REQ_SENT, &chan->conf_state);
1100 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1101 l2cap_build_conf_req(chan, buf), buf);
1102 chan->num_conf_req++;
1103 }
1104
1105 l2cap_chan_unlock(chan);
1106 }
1107
1108 mutex_unlock(&conn->chan_lock);
1109 }
1110
1111 /* Find socket with cid and source/destination bdaddr.
1112 * Returns closest match, locked.
1113 */
1114 static struct l2cap_chan *l2cap_global_chan_by_scid(int state, u16 cid,
1115 bdaddr_t *src,
1116 bdaddr_t *dst)
1117 {
1118 struct l2cap_chan *c, *c1 = NULL;
1119
1120 read_lock(&chan_list_lock);
1121
1122 list_for_each_entry(c, &chan_list, global_l) {
1123 struct sock *sk = c->sk;
1124
1125 if (state && c->state != state)
1126 continue;
1127
1128 if (c->scid == cid) {
1129 int src_match, dst_match;
1130 int src_any, dst_any;
1131
1132 /* Exact match. */
1133 src_match = !bacmp(&bt_sk(sk)->src, src);
1134 dst_match = !bacmp(&bt_sk(sk)->dst, dst);
1135 if (src_match && dst_match) {
1136 read_unlock(&chan_list_lock);
1137 return c;
1138 }
1139
1140 /* Closest match */
1141 src_any = !bacmp(&bt_sk(sk)->src, BDADDR_ANY);
1142 dst_any = !bacmp(&bt_sk(sk)->dst, BDADDR_ANY);
1143 if ((src_match && dst_any) || (src_any && dst_match) ||
1144 (src_any && dst_any))
1145 c1 = c;
1146 }
1147 }
1148
1149 read_unlock(&chan_list_lock);
1150
1151 return c1;
1152 }
1153
1154 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1155 {
1156 struct sock *parent, *sk;
1157 struct l2cap_chan *chan, *pchan;
1158
1159 BT_DBG("");
1160
1161 /* Check if we have socket listening on cid */
1162 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
1163 conn->src, conn->dst);
1164 if (!pchan)
1165 return;
1166
1167 parent = pchan->sk;
1168
1169 lock_sock(parent);
1170
1171 chan = pchan->ops->new_connection(pchan);
1172 if (!chan)
1173 goto clean;
1174
1175 sk = chan->sk;
1176
1177 hci_conn_hold(conn->hcon);
1178
1179 bacpy(&bt_sk(sk)->src, conn->src);
1180 bacpy(&bt_sk(sk)->dst, conn->dst);
1181
1182 bt_accept_enqueue(parent, sk);
1183
1184 l2cap_chan_add(conn, chan);
1185
1186 l2cap_chan_ready(chan);
1187
1188 clean:
1189 release_sock(parent);
1190 }
1191
1192 static void l2cap_conn_ready(struct l2cap_conn *conn)
1193 {
1194 struct l2cap_chan *chan;
1195
1196 BT_DBG("conn %p", conn);
1197
1198 if (!conn->hcon->out && conn->hcon->type == LE_LINK)
1199 l2cap_le_conn_ready(conn);
1200
1201 if (conn->hcon->out && conn->hcon->type == LE_LINK)
1202 smp_conn_security(conn, conn->hcon->pending_sec_level);
1203
1204 mutex_lock(&conn->chan_lock);
1205
1206 list_for_each_entry(chan, &conn->chan_l, list) {
1207
1208 l2cap_chan_lock(chan);
1209
1210 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
1211 l2cap_chan_unlock(chan);
1212 continue;
1213 }
1214
1215 if (conn->hcon->type == LE_LINK) {
1216 if (smp_conn_security(conn, chan->sec_level))
1217 l2cap_chan_ready(chan);
1218
1219 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1220 struct sock *sk = chan->sk;
1221 __clear_chan_timer(chan);
1222 lock_sock(sk);
1223 __l2cap_state_change(chan, BT_CONNECTED);
1224 sk->sk_state_change(sk);
1225 release_sock(sk);
1226
1227 } else if (chan->state == BT_CONNECT)
1228 l2cap_do_start(chan);
1229
1230 l2cap_chan_unlock(chan);
1231 }
1232
1233 mutex_unlock(&conn->chan_lock);
1234 }
1235
1236 /* Notify sockets that we cannot guaranty reliability anymore */
1237 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1238 {
1239 struct l2cap_chan *chan;
1240
1241 BT_DBG("conn %p", conn);
1242
1243 mutex_lock(&conn->chan_lock);
1244
1245 list_for_each_entry(chan, &conn->chan_l, list) {
1246 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1247 __l2cap_chan_set_err(chan, err);
1248 }
1249
1250 mutex_unlock(&conn->chan_lock);
1251 }
1252
1253 static void l2cap_info_timeout(struct work_struct *work)
1254 {
1255 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1256 info_timer.work);
1257
1258 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1259 conn->info_ident = 0;
1260
1261 l2cap_conn_start(conn);
1262 }
1263
1264 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1265 {
1266 struct l2cap_conn *conn = hcon->l2cap_data;
1267 struct l2cap_chan *chan, *l;
1268
1269 if (!conn)
1270 return;
1271
1272 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1273
1274 kfree_skb(conn->rx_skb);
1275
1276 mutex_lock(&conn->chan_lock);
1277
1278 /* Kill channels */
1279 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1280 l2cap_chan_hold(chan);
1281 l2cap_chan_lock(chan);
1282
1283 l2cap_chan_del(chan, err);
1284
1285 l2cap_chan_unlock(chan);
1286
1287 chan->ops->close(chan);
1288 l2cap_chan_put(chan);
1289 }
1290
1291 mutex_unlock(&conn->chan_lock);
1292
1293 hci_chan_del(conn->hchan);
1294
1295 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1296 cancel_delayed_work_sync(&conn->info_timer);
1297
1298 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) {
1299 cancel_delayed_work_sync(&conn->security_timer);
1300 smp_chan_destroy(conn);
1301 }
1302
1303 hcon->l2cap_data = NULL;
1304 kfree(conn);
1305 }
1306
1307 static void security_timeout(struct work_struct *work)
1308 {
1309 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1310 security_timer.work);
1311
1312 l2cap_conn_del(conn->hcon, ETIMEDOUT);
1313 }
1314
1315 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
1316 {
1317 struct l2cap_conn *conn = hcon->l2cap_data;
1318 struct hci_chan *hchan;
1319
1320 if (conn || status)
1321 return conn;
1322
1323 hchan = hci_chan_create(hcon);
1324 if (!hchan)
1325 return NULL;
1326
1327 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
1328 if (!conn) {
1329 hci_chan_del(hchan);
1330 return NULL;
1331 }
1332
1333 hcon->l2cap_data = conn;
1334 conn->hcon = hcon;
1335 conn->hchan = hchan;
1336
1337 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
1338
1339 if (hcon->hdev->le_mtu && hcon->type == LE_LINK)
1340 conn->mtu = hcon->hdev->le_mtu;
1341 else
1342 conn->mtu = hcon->hdev->acl_mtu;
1343
1344 conn->src = &hcon->hdev->bdaddr;
1345 conn->dst = &hcon->dst;
1346
1347 conn->feat_mask = 0;
1348
1349 spin_lock_init(&conn->lock);
1350 mutex_init(&conn->chan_lock);
1351
1352 INIT_LIST_HEAD(&conn->chan_l);
1353
1354 if (hcon->type == LE_LINK)
1355 INIT_DELAYED_WORK(&conn->security_timer, security_timeout);
1356 else
1357 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
1358
1359 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
1360
1361 return conn;
1362 }
1363
1364 /* ---- Socket interface ---- */
1365
1366 /* Find socket with psm and source / destination bdaddr.
1367 * Returns closest match.
1368 */
1369 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1370 bdaddr_t *src,
1371 bdaddr_t *dst)
1372 {
1373 struct l2cap_chan *c, *c1 = NULL;
1374
1375 read_lock(&chan_list_lock);
1376
1377 list_for_each_entry(c, &chan_list, global_l) {
1378 struct sock *sk = c->sk;
1379
1380 if (state && c->state != state)
1381 continue;
1382
1383 if (c->psm == psm) {
1384 int src_match, dst_match;
1385 int src_any, dst_any;
1386
1387 /* Exact match. */
1388 src_match = !bacmp(&bt_sk(sk)->src, src);
1389 dst_match = !bacmp(&bt_sk(sk)->dst, dst);
1390 if (src_match && dst_match) {
1391 read_unlock(&chan_list_lock);
1392 return c;
1393 }
1394
1395 /* Closest match */
1396 src_any = !bacmp(&bt_sk(sk)->src, BDADDR_ANY);
1397 dst_any = !bacmp(&bt_sk(sk)->dst, BDADDR_ANY);
1398 if ((src_match && dst_any) || (src_any && dst_match) ||
1399 (src_any && dst_any))
1400 c1 = c;
1401 }
1402 }
1403
1404 read_unlock(&chan_list_lock);
1405
1406 return c1;
1407 }
1408
1409 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
1410 bdaddr_t *dst, u8 dst_type)
1411 {
1412 struct sock *sk = chan->sk;
1413 bdaddr_t *src = &bt_sk(sk)->src;
1414 struct l2cap_conn *conn;
1415 struct hci_conn *hcon;
1416 struct hci_dev *hdev;
1417 __u8 auth_type;
1418 int err;
1419
1420 BT_DBG("%s -> %s (type %u) psm 0x%2.2x", batostr(src), batostr(dst),
1421 dst_type, __le16_to_cpu(chan->psm));
1422
1423 hdev = hci_get_route(dst, src);
1424 if (!hdev)
1425 return -EHOSTUNREACH;
1426
1427 hci_dev_lock(hdev);
1428
1429 l2cap_chan_lock(chan);
1430
1431 /* PSM must be odd and lsb of upper byte must be 0 */
1432 if ((__le16_to_cpu(psm) & 0x0101) != 0x0001 && !cid &&
1433 chan->chan_type != L2CAP_CHAN_RAW) {
1434 err = -EINVAL;
1435 goto done;
1436 }
1437
1438 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !(psm || cid)) {
1439 err = -EINVAL;
1440 goto done;
1441 }
1442
1443 switch (chan->mode) {
1444 case L2CAP_MODE_BASIC:
1445 break;
1446 case L2CAP_MODE_ERTM:
1447 case L2CAP_MODE_STREAMING:
1448 if (!disable_ertm)
1449 break;
1450 /* fall through */
1451 default:
1452 err = -ENOTSUPP;
1453 goto done;
1454 }
1455
1456 switch (chan->state) {
1457 case BT_CONNECT:
1458 case BT_CONNECT2:
1459 case BT_CONFIG:
1460 /* Already connecting */
1461 err = 0;
1462 goto done;
1463
1464 case BT_CONNECTED:
1465 /* Already connected */
1466 err = -EISCONN;
1467 goto done;
1468
1469 case BT_OPEN:
1470 case BT_BOUND:
1471 /* Can connect */
1472 break;
1473
1474 default:
1475 err = -EBADFD;
1476 goto done;
1477 }
1478
1479 /* Set destination address and psm */
1480 lock_sock(sk);
1481 bacpy(&bt_sk(sk)->dst, dst);
1482 release_sock(sk);
1483
1484 chan->psm = psm;
1485 chan->dcid = cid;
1486
1487 auth_type = l2cap_get_auth_type(chan);
1488
1489 if (chan->dcid == L2CAP_CID_LE_DATA)
1490 hcon = hci_connect(hdev, LE_LINK, dst, dst_type,
1491 chan->sec_level, auth_type);
1492 else
1493 hcon = hci_connect(hdev, ACL_LINK, dst, dst_type,
1494 chan->sec_level, auth_type);
1495
1496 if (IS_ERR(hcon)) {
1497 err = PTR_ERR(hcon);
1498 goto done;
1499 }
1500
1501 conn = l2cap_conn_add(hcon, 0);
1502 if (!conn) {
1503 hci_conn_put(hcon);
1504 err = -ENOMEM;
1505 goto done;
1506 }
1507
1508 if (hcon->type == LE_LINK) {
1509 err = 0;
1510
1511 if (!list_empty(&conn->chan_l)) {
1512 err = -EBUSY;
1513 hci_conn_put(hcon);
1514 }
1515
1516 if (err)
1517 goto done;
1518 }
1519
1520 /* Update source addr of the socket */
1521 bacpy(src, conn->src);
1522
1523 l2cap_chan_unlock(chan);
1524 l2cap_chan_add(conn, chan);
1525 l2cap_chan_lock(chan);
1526
1527 l2cap_state_change(chan, BT_CONNECT);
1528 __set_chan_timer(chan, sk->sk_sndtimeo);
1529
1530 if (hcon->state == BT_CONNECTED) {
1531 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1532 __clear_chan_timer(chan);
1533 if (l2cap_chan_check_security(chan))
1534 l2cap_state_change(chan, BT_CONNECTED);
1535 } else
1536 l2cap_do_start(chan);
1537 }
1538
1539 err = 0;
1540
1541 done:
1542 l2cap_chan_unlock(chan);
1543 hci_dev_unlock(hdev);
1544 hci_dev_put(hdev);
1545 return err;
1546 }
1547
1548 int __l2cap_wait_ack(struct sock *sk)
1549 {
1550 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
1551 DECLARE_WAITQUEUE(wait, current);
1552 int err = 0;
1553 int timeo = HZ/5;
1554
1555 add_wait_queue(sk_sleep(sk), &wait);
1556 set_current_state(TASK_INTERRUPTIBLE);
1557 while (chan->unacked_frames > 0 && chan->conn) {
1558 if (!timeo)
1559 timeo = HZ/5;
1560
1561 if (signal_pending(current)) {
1562 err = sock_intr_errno(timeo);
1563 break;
1564 }
1565
1566 release_sock(sk);
1567 timeo = schedule_timeout(timeo);
1568 lock_sock(sk);
1569 set_current_state(TASK_INTERRUPTIBLE);
1570
1571 err = sock_error(sk);
1572 if (err)
1573 break;
1574 }
1575 set_current_state(TASK_RUNNING);
1576 remove_wait_queue(sk_sleep(sk), &wait);
1577 return err;
1578 }
1579
1580 static void l2cap_monitor_timeout(struct work_struct *work)
1581 {
1582 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1583 monitor_timer.work);
1584
1585 BT_DBG("chan %p", chan);
1586
1587 l2cap_chan_lock(chan);
1588
1589 if (!chan->conn) {
1590 l2cap_chan_unlock(chan);
1591 l2cap_chan_put(chan);
1592 return;
1593 }
1594
1595 l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
1596
1597 l2cap_chan_unlock(chan);
1598 l2cap_chan_put(chan);
1599 }
1600
1601 static void l2cap_retrans_timeout(struct work_struct *work)
1602 {
1603 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1604 retrans_timer.work);
1605
1606 BT_DBG("chan %p", chan);
1607
1608 l2cap_chan_lock(chan);
1609
1610 if (!chan->conn) {
1611 l2cap_chan_unlock(chan);
1612 l2cap_chan_put(chan);
1613 return;
1614 }
1615
1616 l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
1617 l2cap_chan_unlock(chan);
1618 l2cap_chan_put(chan);
1619 }
1620
1621 static void l2cap_streaming_send(struct l2cap_chan *chan,
1622 struct sk_buff_head *skbs)
1623 {
1624 struct sk_buff *skb;
1625 struct l2cap_ctrl *control;
1626
1627 BT_DBG("chan %p, skbs %p", chan, skbs);
1628
1629 skb_queue_splice_tail_init(skbs, &chan->tx_q);
1630
1631 while (!skb_queue_empty(&chan->tx_q)) {
1632
1633 skb = skb_dequeue(&chan->tx_q);
1634
1635 bt_cb(skb)->control.retries = 1;
1636 control = &bt_cb(skb)->control;
1637
1638 control->reqseq = 0;
1639 control->txseq = chan->next_tx_seq;
1640
1641 __pack_control(chan, control, skb);
1642
1643 if (chan->fcs == L2CAP_FCS_CRC16) {
1644 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1645 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1646 }
1647
1648 l2cap_do_send(chan, skb);
1649
1650 BT_DBG("Sent txseq %d", (int)control->txseq);
1651
1652 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1653 chan->frames_sent++;
1654 }
1655 }
1656
1657 static int l2cap_ertm_send(struct l2cap_chan *chan)
1658 {
1659 struct sk_buff *skb, *tx_skb;
1660 struct l2cap_ctrl *control;
1661 int sent = 0;
1662
1663 BT_DBG("chan %p", chan);
1664
1665 if (chan->state != BT_CONNECTED)
1666 return -ENOTCONN;
1667
1668 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1669 return 0;
1670
1671 while (chan->tx_send_head &&
1672 chan->unacked_frames < chan->remote_tx_win &&
1673 chan->tx_state == L2CAP_TX_STATE_XMIT) {
1674
1675 skb = chan->tx_send_head;
1676
1677 bt_cb(skb)->control.retries = 1;
1678 control = &bt_cb(skb)->control;
1679
1680 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1681 control->final = 1;
1682
1683 control->reqseq = chan->buffer_seq;
1684 chan->last_acked_seq = chan->buffer_seq;
1685 control->txseq = chan->next_tx_seq;
1686
1687 __pack_control(chan, control, skb);
1688
1689 if (chan->fcs == L2CAP_FCS_CRC16) {
1690 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1691 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1692 }
1693
1694 /* Clone after data has been modified. Data is assumed to be
1695 read-only (for locking purposes) on cloned sk_buffs.
1696 */
1697 tx_skb = skb_clone(skb, GFP_KERNEL);
1698
1699 if (!tx_skb)
1700 break;
1701
1702 __set_retrans_timer(chan);
1703
1704 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1705 chan->unacked_frames++;
1706 chan->frames_sent++;
1707 sent++;
1708
1709 if (skb_queue_is_last(&chan->tx_q, skb))
1710 chan->tx_send_head = NULL;
1711 else
1712 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1713
1714 l2cap_do_send(chan, tx_skb);
1715 BT_DBG("Sent txseq %d", (int)control->txseq);
1716 }
1717
1718 BT_DBG("Sent %d, %d unacked, %d in ERTM queue", sent,
1719 (int) chan->unacked_frames, skb_queue_len(&chan->tx_q));
1720
1721 return sent;
1722 }
1723
1724 static void l2cap_ertm_resend(struct l2cap_chan *chan)
1725 {
1726 struct l2cap_ctrl control;
1727 struct sk_buff *skb;
1728 struct sk_buff *tx_skb;
1729 u16 seq;
1730
1731 BT_DBG("chan %p", chan);
1732
1733 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1734 return;
1735
1736 while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
1737 seq = l2cap_seq_list_pop(&chan->retrans_list);
1738
1739 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
1740 if (!skb) {
1741 BT_DBG("Error: Can't retransmit seq %d, frame missing",
1742 seq);
1743 continue;
1744 }
1745
1746 bt_cb(skb)->control.retries++;
1747 control = bt_cb(skb)->control;
1748
1749 if (chan->max_tx != 0 &&
1750 bt_cb(skb)->control.retries > chan->max_tx) {
1751 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
1752 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
1753 l2cap_seq_list_clear(&chan->retrans_list);
1754 break;
1755 }
1756
1757 control.reqseq = chan->buffer_seq;
1758 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1759 control.final = 1;
1760 else
1761 control.final = 0;
1762
1763 if (skb_cloned(skb)) {
1764 /* Cloned sk_buffs are read-only, so we need a
1765 * writeable copy
1766 */
1767 tx_skb = skb_copy(skb, GFP_ATOMIC);
1768 } else {
1769 tx_skb = skb_clone(skb, GFP_ATOMIC);
1770 }
1771
1772 if (!tx_skb) {
1773 l2cap_seq_list_clear(&chan->retrans_list);
1774 break;
1775 }
1776
1777 /* Update skb contents */
1778 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1779 put_unaligned_le32(__pack_extended_control(&control),
1780 tx_skb->data + L2CAP_HDR_SIZE);
1781 } else {
1782 put_unaligned_le16(__pack_enhanced_control(&control),
1783 tx_skb->data + L2CAP_HDR_SIZE);
1784 }
1785
1786 if (chan->fcs == L2CAP_FCS_CRC16) {
1787 u16 fcs = crc16(0, (u8 *) tx_skb->data, tx_skb->len);
1788 put_unaligned_le16(fcs, skb_put(tx_skb,
1789 L2CAP_FCS_SIZE));
1790 }
1791
1792 l2cap_do_send(chan, tx_skb);
1793
1794 BT_DBG("Resent txseq %d", control.txseq);
1795
1796 chan->last_acked_seq = chan->buffer_seq;
1797 }
1798 }
1799
1800 static void l2cap_retransmit(struct l2cap_chan *chan,
1801 struct l2cap_ctrl *control)
1802 {
1803 BT_DBG("chan %p, control %p", chan, control);
1804
1805 l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
1806 l2cap_ertm_resend(chan);
1807 }
1808
1809 static void l2cap_retransmit_all(struct l2cap_chan *chan,
1810 struct l2cap_ctrl *control)
1811 {
1812 struct sk_buff *skb;
1813
1814 BT_DBG("chan %p, control %p", chan, control);
1815
1816 if (control->poll)
1817 set_bit(CONN_SEND_FBIT, &chan->conn_state);
1818
1819 l2cap_seq_list_clear(&chan->retrans_list);
1820
1821 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1822 return;
1823
1824 if (chan->unacked_frames) {
1825 skb_queue_walk(&chan->tx_q, skb) {
1826 if (bt_cb(skb)->control.txseq == control->reqseq ||
1827 skb == chan->tx_send_head)
1828 break;
1829 }
1830
1831 skb_queue_walk_from(&chan->tx_q, skb) {
1832 if (skb == chan->tx_send_head)
1833 break;
1834
1835 l2cap_seq_list_append(&chan->retrans_list,
1836 bt_cb(skb)->control.txseq);
1837 }
1838
1839 l2cap_ertm_resend(chan);
1840 }
1841 }
1842
1843 static void l2cap_send_ack(struct l2cap_chan *chan)
1844 {
1845 struct l2cap_ctrl control;
1846 u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
1847 chan->last_acked_seq);
1848 int threshold;
1849
1850 BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
1851 chan, chan->last_acked_seq, chan->buffer_seq);
1852
1853 memset(&control, 0, sizeof(control));
1854 control.sframe = 1;
1855
1856 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
1857 chan->rx_state == L2CAP_RX_STATE_RECV) {
1858 __clear_ack_timer(chan);
1859 control.super = L2CAP_SUPER_RNR;
1860 control.reqseq = chan->buffer_seq;
1861 l2cap_send_sframe(chan, &control);
1862 } else {
1863 if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
1864 l2cap_ertm_send(chan);
1865 /* If any i-frames were sent, they included an ack */
1866 if (chan->buffer_seq == chan->last_acked_seq)
1867 frames_to_ack = 0;
1868 }
1869
1870 /* Ack now if the tx window is 3/4ths full.
1871 * Calculate without mul or div
1872 */
1873 threshold = chan->tx_win;
1874 threshold += threshold << 1;
1875 threshold >>= 2;
1876
1877 BT_DBG("frames_to_ack %d, threshold %d", (int)frames_to_ack,
1878 threshold);
1879
1880 if (frames_to_ack >= threshold) {
1881 __clear_ack_timer(chan);
1882 control.super = L2CAP_SUPER_RR;
1883 control.reqseq = chan->buffer_seq;
1884 l2cap_send_sframe(chan, &control);
1885 frames_to_ack = 0;
1886 }
1887
1888 if (frames_to_ack)
1889 __set_ack_timer(chan);
1890 }
1891 }
1892
1893 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
1894 struct msghdr *msg, int len,
1895 int count, struct sk_buff *skb)
1896 {
1897 struct l2cap_conn *conn = chan->conn;
1898 struct sk_buff **frag;
1899 int sent = 0;
1900
1901 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1902 return -EFAULT;
1903
1904 sent += count;
1905 len -= count;
1906
1907 /* Continuation fragments (no L2CAP header) */
1908 frag = &skb_shinfo(skb)->frag_list;
1909 while (len) {
1910 struct sk_buff *tmp;
1911
1912 count = min_t(unsigned int, conn->mtu, len);
1913
1914 tmp = chan->ops->alloc_skb(chan, count,
1915 msg->msg_flags & MSG_DONTWAIT);
1916 if (IS_ERR(tmp))
1917 return PTR_ERR(tmp);
1918
1919 *frag = tmp;
1920
1921 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1922 return -EFAULT;
1923
1924 (*frag)->priority = skb->priority;
1925
1926 sent += count;
1927 len -= count;
1928
1929 skb->len += (*frag)->len;
1930 skb->data_len += (*frag)->len;
1931
1932 frag = &(*frag)->next;
1933 }
1934
1935 return sent;
1936 }
1937
1938 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
1939 struct msghdr *msg, size_t len,
1940 u32 priority)
1941 {
1942 struct l2cap_conn *conn = chan->conn;
1943 struct sk_buff *skb;
1944 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
1945 struct l2cap_hdr *lh;
1946
1947 BT_DBG("chan %p len %d priority %u", chan, (int)len, priority);
1948
1949 count = min_t(unsigned int, (conn->mtu - hlen), len);
1950
1951 skb = chan->ops->alloc_skb(chan, count + hlen,
1952 msg->msg_flags & MSG_DONTWAIT);
1953 if (IS_ERR(skb))
1954 return skb;
1955
1956 skb->priority = priority;
1957
1958 /* Create L2CAP header */
1959 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1960 lh->cid = cpu_to_le16(chan->dcid);
1961 lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
1962 put_unaligned(chan->psm, skb_put(skb, L2CAP_PSMLEN_SIZE));
1963
1964 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
1965 if (unlikely(err < 0)) {
1966 kfree_skb(skb);
1967 return ERR_PTR(err);
1968 }
1969 return skb;
1970 }
1971
1972 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
1973 struct msghdr *msg, size_t len,
1974 u32 priority)
1975 {
1976 struct l2cap_conn *conn = chan->conn;
1977 struct sk_buff *skb;
1978 int err, count;
1979 struct l2cap_hdr *lh;
1980
1981 BT_DBG("chan %p len %d", chan, (int)len);
1982
1983 count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
1984
1985 skb = chan->ops->alloc_skb(chan, count + L2CAP_HDR_SIZE,
1986 msg->msg_flags & MSG_DONTWAIT);
1987 if (IS_ERR(skb))
1988 return skb;
1989
1990 skb->priority = priority;
1991
1992 /* Create L2CAP header */
1993 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1994 lh->cid = cpu_to_le16(chan->dcid);
1995 lh->len = cpu_to_le16(len);
1996
1997 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
1998 if (unlikely(err < 0)) {
1999 kfree_skb(skb);
2000 return ERR_PTR(err);
2001 }
2002 return skb;
2003 }
2004
2005 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2006 struct msghdr *msg, size_t len,
2007 u16 sdulen)
2008 {
2009 struct l2cap_conn *conn = chan->conn;
2010 struct sk_buff *skb;
2011 int err, count, hlen;
2012 struct l2cap_hdr *lh;
2013
2014 BT_DBG("chan %p len %d", chan, (int)len);
2015
2016 if (!conn)
2017 return ERR_PTR(-ENOTCONN);
2018
2019 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2020 hlen = L2CAP_EXT_HDR_SIZE;
2021 else
2022 hlen = L2CAP_ENH_HDR_SIZE;
2023
2024 if (sdulen)
2025 hlen += L2CAP_SDULEN_SIZE;
2026
2027 if (chan->fcs == L2CAP_FCS_CRC16)
2028 hlen += L2CAP_FCS_SIZE;
2029
2030 count = min_t(unsigned int, (conn->mtu - hlen), len);
2031
2032 skb = chan->ops->alloc_skb(chan, count + hlen,
2033 msg->msg_flags & MSG_DONTWAIT);
2034 if (IS_ERR(skb))
2035 return skb;
2036
2037 /* Create L2CAP header */
2038 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2039 lh->cid = cpu_to_le16(chan->dcid);
2040 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2041
2042 /* Control header is populated later */
2043 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2044 put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2045 else
2046 put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2047
2048 if (sdulen)
2049 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2050
2051 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2052 if (unlikely(err < 0)) {
2053 kfree_skb(skb);
2054 return ERR_PTR(err);
2055 }
2056
2057 bt_cb(skb)->control.fcs = chan->fcs;
2058 bt_cb(skb)->control.retries = 0;
2059 return skb;
2060 }
2061
2062 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2063 struct sk_buff_head *seg_queue,
2064 struct msghdr *msg, size_t len)
2065 {
2066 struct sk_buff *skb;
2067 u16 sdu_len;
2068 size_t pdu_len;
2069 int err = 0;
2070 u8 sar;
2071
2072 BT_DBG("chan %p, msg %p, len %d", chan, msg, (int)len);
2073
2074 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2075 * so fragmented skbs are not used. The HCI layer's handling
2076 * of fragmented skbs is not compatible with ERTM's queueing.
2077 */
2078
2079 /* PDU size is derived from the HCI MTU */
2080 pdu_len = chan->conn->mtu;
2081
2082 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2083
2084 /* Adjust for largest possible L2CAP overhead. */
2085 if (chan->fcs)
2086 pdu_len -= L2CAP_FCS_SIZE;
2087
2088 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2089 pdu_len -= L2CAP_EXT_HDR_SIZE;
2090 else
2091 pdu_len -= L2CAP_ENH_HDR_SIZE;
2092
2093 /* Remote device may have requested smaller PDUs */
2094 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2095
2096 if (len <= pdu_len) {
2097 sar = L2CAP_SAR_UNSEGMENTED;
2098 sdu_len = 0;
2099 pdu_len = len;
2100 } else {
2101 sar = L2CAP_SAR_START;
2102 sdu_len = len;
2103 pdu_len -= L2CAP_SDULEN_SIZE;
2104 }
2105
2106 while (len > 0) {
2107 skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2108
2109 if (IS_ERR(skb)) {
2110 __skb_queue_purge(seg_queue);
2111 return PTR_ERR(skb);
2112 }
2113
2114 bt_cb(skb)->control.sar = sar;
2115 __skb_queue_tail(seg_queue, skb);
2116
2117 len -= pdu_len;
2118 if (sdu_len) {
2119 sdu_len = 0;
2120 pdu_len += L2CAP_SDULEN_SIZE;
2121 }
2122
2123 if (len <= pdu_len) {
2124 sar = L2CAP_SAR_END;
2125 pdu_len = len;
2126 } else {
2127 sar = L2CAP_SAR_CONTINUE;
2128 }
2129 }
2130
2131 return err;
2132 }
2133
2134 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
2135 u32 priority)
2136 {
2137 struct sk_buff *skb;
2138 int err;
2139 struct sk_buff_head seg_queue;
2140
2141 /* Connectionless channel */
2142 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2143 skb = l2cap_create_connless_pdu(chan, msg, len, priority);
2144 if (IS_ERR(skb))
2145 return PTR_ERR(skb);
2146
2147 l2cap_do_send(chan, skb);
2148 return len;
2149 }
2150
2151 switch (chan->mode) {
2152 case L2CAP_MODE_BASIC:
2153 /* Check outgoing MTU */
2154 if (len > chan->omtu)
2155 return -EMSGSIZE;
2156
2157 /* Create a basic PDU */
2158 skb = l2cap_create_basic_pdu(chan, msg, len, priority);
2159 if (IS_ERR(skb))
2160 return PTR_ERR(skb);
2161
2162 l2cap_do_send(chan, skb);
2163 err = len;
2164 break;
2165
2166 case L2CAP_MODE_ERTM:
2167 case L2CAP_MODE_STREAMING:
2168 /* Check outgoing MTU */
2169 if (len > chan->omtu) {
2170 err = -EMSGSIZE;
2171 break;
2172 }
2173
2174 __skb_queue_head_init(&seg_queue);
2175
2176 /* Do segmentation before calling in to the state machine,
2177 * since it's possible to block while waiting for memory
2178 * allocation.
2179 */
2180 err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2181
2182 /* The channel could have been closed while segmenting,
2183 * check that it is still connected.
2184 */
2185 if (chan->state != BT_CONNECTED) {
2186 __skb_queue_purge(&seg_queue);
2187 err = -ENOTCONN;
2188 }
2189
2190 if (err)
2191 break;
2192
2193 if (chan->mode == L2CAP_MODE_ERTM)
2194 l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2195 else
2196 l2cap_streaming_send(chan, &seg_queue);
2197
2198 err = len;
2199
2200 /* If the skbs were not queued for sending, they'll still be in
2201 * seg_queue and need to be purged.
2202 */
2203 __skb_queue_purge(&seg_queue);
2204 break;
2205
2206 default:
2207 BT_DBG("bad state %1.1x", chan->mode);
2208 err = -EBADFD;
2209 }
2210
2211 return err;
2212 }
2213
2214 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2215 {
2216 struct l2cap_ctrl control;
2217 u16 seq;
2218
2219 BT_DBG("chan %p, txseq %d", chan, txseq);
2220
2221 memset(&control, 0, sizeof(control));
2222 control.sframe = 1;
2223 control.super = L2CAP_SUPER_SREJ;
2224
2225 for (seq = chan->expected_tx_seq; seq != txseq;
2226 seq = __next_seq(chan, seq)) {
2227 if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2228 control.reqseq = seq;
2229 l2cap_send_sframe(chan, &control);
2230 l2cap_seq_list_append(&chan->srej_list, seq);
2231 }
2232 }
2233
2234 chan->expected_tx_seq = __next_seq(chan, txseq);
2235 }
2236
2237 static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2238 {
2239 struct l2cap_ctrl control;
2240
2241 BT_DBG("chan %p", chan);
2242
2243 if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2244 return;
2245
2246 memset(&control, 0, sizeof(control));
2247 control.sframe = 1;
2248 control.super = L2CAP_SUPER_SREJ;
2249 control.reqseq = chan->srej_list.tail;
2250 l2cap_send_sframe(chan, &control);
2251 }
2252
2253 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2254 {
2255 struct l2cap_ctrl control;
2256 u16 initial_head;
2257 u16 seq;
2258
2259 BT_DBG("chan %p, txseq %d", chan, txseq);
2260
2261 memset(&control, 0, sizeof(control));
2262 control.sframe = 1;
2263 control.super = L2CAP_SUPER_SREJ;
2264
2265 /* Capture initial list head to allow only one pass through the list. */
2266 initial_head = chan->srej_list.head;
2267
2268 do {
2269 seq = l2cap_seq_list_pop(&chan->srej_list);
2270 if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2271 break;
2272
2273 control.reqseq = seq;
2274 l2cap_send_sframe(chan, &control);
2275 l2cap_seq_list_append(&chan->srej_list, seq);
2276 } while (chan->srej_list.head != initial_head);
2277 }
2278
2279 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2280 {
2281 struct sk_buff *acked_skb;
2282 u16 ackseq;
2283
2284 BT_DBG("chan %p, reqseq %d", chan, reqseq);
2285
2286 if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2287 return;
2288
2289 BT_DBG("expected_ack_seq %d, unacked_frames %d",
2290 chan->expected_ack_seq, chan->unacked_frames);
2291
2292 for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2293 ackseq = __next_seq(chan, ackseq)) {
2294
2295 acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2296 if (acked_skb) {
2297 skb_unlink(acked_skb, &chan->tx_q);
2298 kfree_skb(acked_skb);
2299 chan->unacked_frames--;
2300 }
2301 }
2302
2303 chan->expected_ack_seq = reqseq;
2304
2305 if (chan->unacked_frames == 0)
2306 __clear_retrans_timer(chan);
2307
2308 BT_DBG("unacked_frames %d", (int) chan->unacked_frames);
2309 }
2310
2311 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2312 {
2313 BT_DBG("chan %p", chan);
2314
2315 chan->expected_tx_seq = chan->buffer_seq;
2316 l2cap_seq_list_clear(&chan->srej_list);
2317 skb_queue_purge(&chan->srej_q);
2318 chan->rx_state = L2CAP_RX_STATE_RECV;
2319 }
2320
2321 static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2322 struct l2cap_ctrl *control,
2323 struct sk_buff_head *skbs, u8 event)
2324 {
2325 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2326 event);
2327
2328 switch (event) {
2329 case L2CAP_EV_DATA_REQUEST:
2330 if (chan->tx_send_head == NULL)
2331 chan->tx_send_head = skb_peek(skbs);
2332
2333 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2334 l2cap_ertm_send(chan);
2335 break;
2336 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2337 BT_DBG("Enter LOCAL_BUSY");
2338 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2339
2340 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2341 /* The SREJ_SENT state must be aborted if we are to
2342 * enter the LOCAL_BUSY state.
2343 */
2344 l2cap_abort_rx_srej_sent(chan);
2345 }
2346
2347 l2cap_send_ack(chan);
2348
2349 break;
2350 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2351 BT_DBG("Exit LOCAL_BUSY");
2352 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2353
2354 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2355 struct l2cap_ctrl local_control;
2356
2357 memset(&local_control, 0, sizeof(local_control));
2358 local_control.sframe = 1;
2359 local_control.super = L2CAP_SUPER_RR;
2360 local_control.poll = 1;
2361 local_control.reqseq = chan->buffer_seq;
2362 l2cap_send_sframe(chan, &local_control);
2363
2364 chan->retry_count = 1;
2365 __set_monitor_timer(chan);
2366 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2367 }
2368 break;
2369 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2370 l2cap_process_reqseq(chan, control->reqseq);
2371 break;
2372 case L2CAP_EV_EXPLICIT_POLL:
2373 l2cap_send_rr_or_rnr(chan, 1);
2374 chan->retry_count = 1;
2375 __set_monitor_timer(chan);
2376 __clear_ack_timer(chan);
2377 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2378 break;
2379 case L2CAP_EV_RETRANS_TO:
2380 l2cap_send_rr_or_rnr(chan, 1);
2381 chan->retry_count = 1;
2382 __set_monitor_timer(chan);
2383 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2384 break;
2385 case L2CAP_EV_RECV_FBIT:
2386 /* Nothing to process */
2387 break;
2388 default:
2389 break;
2390 }
2391 }
2392
2393 static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2394 struct l2cap_ctrl *control,
2395 struct sk_buff_head *skbs, u8 event)
2396 {
2397 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2398 event);
2399
2400 switch (event) {
2401 case L2CAP_EV_DATA_REQUEST:
2402 if (chan->tx_send_head == NULL)
2403 chan->tx_send_head = skb_peek(skbs);
2404 /* Queue data, but don't send. */
2405 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2406 break;
2407 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2408 BT_DBG("Enter LOCAL_BUSY");
2409 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2410
2411 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2412 /* The SREJ_SENT state must be aborted if we are to
2413 * enter the LOCAL_BUSY state.
2414 */
2415 l2cap_abort_rx_srej_sent(chan);
2416 }
2417
2418 l2cap_send_ack(chan);
2419
2420 break;
2421 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2422 BT_DBG("Exit LOCAL_BUSY");
2423 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2424
2425 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2426 struct l2cap_ctrl local_control;
2427 memset(&local_control, 0, sizeof(local_control));
2428 local_control.sframe = 1;
2429 local_control.super = L2CAP_SUPER_RR;
2430 local_control.poll = 1;
2431 local_control.reqseq = chan->buffer_seq;
2432 l2cap_send_sframe(chan, &local_control);
2433
2434 chan->retry_count = 1;
2435 __set_monitor_timer(chan);
2436 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2437 }
2438 break;
2439 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2440 l2cap_process_reqseq(chan, control->reqseq);
2441
2442 /* Fall through */
2443
2444 case L2CAP_EV_RECV_FBIT:
2445 if (control && control->final) {
2446 __clear_monitor_timer(chan);
2447 if (chan->unacked_frames > 0)
2448 __set_retrans_timer(chan);
2449 chan->retry_count = 0;
2450 chan->tx_state = L2CAP_TX_STATE_XMIT;
2451 BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
2452 }
2453 break;
2454 case L2CAP_EV_EXPLICIT_POLL:
2455 /* Ignore */
2456 break;
2457 case L2CAP_EV_MONITOR_TO:
2458 if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
2459 l2cap_send_rr_or_rnr(chan, 1);
2460 __set_monitor_timer(chan);
2461 chan->retry_count++;
2462 } else {
2463 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
2464 }
2465 break;
2466 default:
2467 break;
2468 }
2469 }
2470
2471 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
2472 struct sk_buff_head *skbs, u8 event)
2473 {
2474 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2475 chan, control, skbs, event, chan->tx_state);
2476
2477 switch (chan->tx_state) {
2478 case L2CAP_TX_STATE_XMIT:
2479 l2cap_tx_state_xmit(chan, control, skbs, event);
2480 break;
2481 case L2CAP_TX_STATE_WAIT_F:
2482 l2cap_tx_state_wait_f(chan, control, skbs, event);
2483 break;
2484 default:
2485 /* Ignore event */
2486 break;
2487 }
2488 }
2489
2490 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
2491 struct l2cap_ctrl *control)
2492 {
2493 BT_DBG("chan %p, control %p", chan, control);
2494 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
2495 }
2496
2497 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
2498 struct l2cap_ctrl *control)
2499 {
2500 BT_DBG("chan %p, control %p", chan, control);
2501 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
2502 }
2503
2504 /* Copy frame to all raw sockets on that connection */
2505 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2506 {
2507 struct sk_buff *nskb;
2508 struct l2cap_chan *chan;
2509
2510 BT_DBG("conn %p", conn);
2511
2512 mutex_lock(&conn->chan_lock);
2513
2514 list_for_each_entry(chan, &conn->chan_l, list) {
2515 struct sock *sk = chan->sk;
2516 if (chan->chan_type != L2CAP_CHAN_RAW)
2517 continue;
2518
2519 /* Don't send frame to the socket it came from */
2520 if (skb->sk == sk)
2521 continue;
2522 nskb = skb_clone(skb, GFP_ATOMIC);
2523 if (!nskb)
2524 continue;
2525
2526 if (chan->ops->recv(chan, nskb))
2527 kfree_skb(nskb);
2528 }
2529
2530 mutex_unlock(&conn->chan_lock);
2531 }
2532
2533 /* ---- L2CAP signalling commands ---- */
2534 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
2535 u8 code, u8 ident, u16 dlen, void *data)
2536 {
2537 struct sk_buff *skb, **frag;
2538 struct l2cap_cmd_hdr *cmd;
2539 struct l2cap_hdr *lh;
2540 int len, count;
2541
2542 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
2543 conn, code, ident, dlen);
2544
2545 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2546 count = min_t(unsigned int, conn->mtu, len);
2547
2548 skb = bt_skb_alloc(count, GFP_ATOMIC);
2549 if (!skb)
2550 return NULL;
2551
2552 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2553 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2554
2555 if (conn->hcon->type == LE_LINK)
2556 lh->cid = __constant_cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2557 else
2558 lh->cid = __constant_cpu_to_le16(L2CAP_CID_SIGNALING);
2559
2560 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2561 cmd->code = code;
2562 cmd->ident = ident;
2563 cmd->len = cpu_to_le16(dlen);
2564
2565 if (dlen) {
2566 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2567 memcpy(skb_put(skb, count), data, count);
2568 data += count;
2569 }
2570
2571 len -= skb->len;
2572
2573 /* Continuation fragments (no L2CAP header) */
2574 frag = &skb_shinfo(skb)->frag_list;
2575 while (len) {
2576 count = min_t(unsigned int, conn->mtu, len);
2577
2578 *frag = bt_skb_alloc(count, GFP_ATOMIC);
2579 if (!*frag)
2580 goto fail;
2581
2582 memcpy(skb_put(*frag, count), data, count);
2583
2584 len -= count;
2585 data += count;
2586
2587 frag = &(*frag)->next;
2588 }
2589
2590 return skb;
2591
2592 fail:
2593 kfree_skb(skb);
2594 return NULL;
2595 }
2596
2597 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
2598 {
2599 struct l2cap_conf_opt *opt = *ptr;
2600 int len;
2601
2602 len = L2CAP_CONF_OPT_SIZE + opt->len;
2603 *ptr += len;
2604
2605 *type = opt->type;
2606 *olen = opt->len;
2607
2608 switch (opt->len) {
2609 case 1:
2610 *val = *((u8 *) opt->val);
2611 break;
2612
2613 case 2:
2614 *val = get_unaligned_le16(opt->val);
2615 break;
2616
2617 case 4:
2618 *val = get_unaligned_le32(opt->val);
2619 break;
2620
2621 default:
2622 *val = (unsigned long) opt->val;
2623 break;
2624 }
2625
2626 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
2627 return len;
2628 }
2629
2630 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2631 {
2632 struct l2cap_conf_opt *opt = *ptr;
2633
2634 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
2635
2636 opt->type = type;
2637 opt->len = len;
2638
2639 switch (len) {
2640 case 1:
2641 *((u8 *) opt->val) = val;
2642 break;
2643
2644 case 2:
2645 put_unaligned_le16(val, opt->val);
2646 break;
2647
2648 case 4:
2649 put_unaligned_le32(val, opt->val);
2650 break;
2651
2652 default:
2653 memcpy(opt->val, (void *) val, len);
2654 break;
2655 }
2656
2657 *ptr += L2CAP_CONF_OPT_SIZE + len;
2658 }
2659
2660 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
2661 {
2662 struct l2cap_conf_efs efs;
2663
2664 switch (chan->mode) {
2665 case L2CAP_MODE_ERTM:
2666 efs.id = chan->local_id;
2667 efs.stype = chan->local_stype;
2668 efs.msdu = cpu_to_le16(chan->local_msdu);
2669 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2670 efs.acc_lat = __constant_cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
2671 efs.flush_to = __constant_cpu_to_le32(L2CAP_DEFAULT_FLUSH_TO);
2672 break;
2673
2674 case L2CAP_MODE_STREAMING:
2675 efs.id = 1;
2676 efs.stype = L2CAP_SERV_BESTEFFORT;
2677 efs.msdu = cpu_to_le16(chan->local_msdu);
2678 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2679 efs.acc_lat = 0;
2680 efs.flush_to = 0;
2681 break;
2682
2683 default:
2684 return;
2685 }
2686
2687 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
2688 (unsigned long) &efs);
2689 }
2690
2691 static void l2cap_ack_timeout(struct work_struct *work)
2692 {
2693 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
2694 ack_timer.work);
2695 u16 frames_to_ack;
2696
2697 BT_DBG("chan %p", chan);
2698
2699 l2cap_chan_lock(chan);
2700
2701 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2702 chan->last_acked_seq);
2703
2704 if (frames_to_ack)
2705 l2cap_send_rr_or_rnr(chan, 0);
2706
2707 l2cap_chan_unlock(chan);
2708 l2cap_chan_put(chan);
2709 }
2710
2711 int l2cap_ertm_init(struct l2cap_chan *chan)
2712 {
2713 int err;
2714
2715 chan->next_tx_seq = 0;
2716 chan->expected_tx_seq = 0;
2717 chan->expected_ack_seq = 0;
2718 chan->unacked_frames = 0;
2719 chan->buffer_seq = 0;
2720 chan->frames_sent = 0;
2721 chan->last_acked_seq = 0;
2722 chan->sdu = NULL;
2723 chan->sdu_last_frag = NULL;
2724 chan->sdu_len = 0;
2725
2726 skb_queue_head_init(&chan->tx_q);
2727
2728 if (chan->mode != L2CAP_MODE_ERTM)
2729 return 0;
2730
2731 chan->rx_state = L2CAP_RX_STATE_RECV;
2732 chan->tx_state = L2CAP_TX_STATE_XMIT;
2733
2734 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
2735 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
2736 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
2737
2738 skb_queue_head_init(&chan->srej_q);
2739
2740 err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
2741 if (err < 0)
2742 return err;
2743
2744 err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
2745 if (err < 0)
2746 l2cap_seq_list_free(&chan->srej_list);
2747
2748 return err;
2749 }
2750
2751 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2752 {
2753 switch (mode) {
2754 case L2CAP_MODE_STREAMING:
2755 case L2CAP_MODE_ERTM:
2756 if (l2cap_mode_supported(mode, remote_feat_mask))
2757 return mode;
2758 /* fall through */
2759 default:
2760 return L2CAP_MODE_BASIC;
2761 }
2762 }
2763
2764 static inline bool __l2cap_ews_supported(struct l2cap_chan *chan)
2765 {
2766 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
2767 }
2768
2769 static inline bool __l2cap_efs_supported(struct l2cap_chan *chan)
2770 {
2771 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
2772 }
2773
2774 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
2775 {
2776 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
2777 __l2cap_ews_supported(chan)) {
2778 /* use extended control field */
2779 set_bit(FLAG_EXT_CTRL, &chan->flags);
2780 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
2781 } else {
2782 chan->tx_win = min_t(u16, chan->tx_win,
2783 L2CAP_DEFAULT_TX_WINDOW);
2784 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
2785 }
2786 }
2787
2788 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
2789 {
2790 struct l2cap_conf_req *req = data;
2791 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
2792 void *ptr = req->data;
2793 u16 size;
2794
2795 BT_DBG("chan %p", chan);
2796
2797 if (chan->num_conf_req || chan->num_conf_rsp)
2798 goto done;
2799
2800 switch (chan->mode) {
2801 case L2CAP_MODE_STREAMING:
2802 case L2CAP_MODE_ERTM:
2803 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
2804 break;
2805
2806 if (__l2cap_efs_supported(chan))
2807 set_bit(FLAG_EFS_ENABLE, &chan->flags);
2808
2809 /* fall through */
2810 default:
2811 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
2812 break;
2813 }
2814
2815 done:
2816 if (chan->imtu != L2CAP_DEFAULT_MTU)
2817 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
2818
2819 switch (chan->mode) {
2820 case L2CAP_MODE_BASIC:
2821 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
2822 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
2823 break;
2824
2825 rfc.mode = L2CAP_MODE_BASIC;
2826 rfc.txwin_size = 0;
2827 rfc.max_transmit = 0;
2828 rfc.retrans_timeout = 0;
2829 rfc.monitor_timeout = 0;
2830 rfc.max_pdu_size = 0;
2831
2832 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2833 (unsigned long) &rfc);
2834 break;
2835
2836 case L2CAP_MODE_ERTM:
2837 rfc.mode = L2CAP_MODE_ERTM;
2838 rfc.max_transmit = chan->max_tx;
2839 rfc.retrans_timeout = 0;
2840 rfc.monitor_timeout = 0;
2841
2842 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2843 L2CAP_EXT_HDR_SIZE -
2844 L2CAP_SDULEN_SIZE -
2845 L2CAP_FCS_SIZE);
2846 rfc.max_pdu_size = cpu_to_le16(size);
2847
2848 l2cap_txwin_setup(chan);
2849
2850 rfc.txwin_size = min_t(u16, chan->tx_win,
2851 L2CAP_DEFAULT_TX_WINDOW);
2852
2853 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2854 (unsigned long) &rfc);
2855
2856 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2857 l2cap_add_opt_efs(&ptr, chan);
2858
2859 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2860 break;
2861
2862 if (chan->fcs == L2CAP_FCS_NONE ||
2863 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2864 chan->fcs = L2CAP_FCS_NONE;
2865 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2866 }
2867
2868 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2869 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
2870 chan->tx_win);
2871 break;
2872
2873 case L2CAP_MODE_STREAMING:
2874 l2cap_txwin_setup(chan);
2875 rfc.mode = L2CAP_MODE_STREAMING;
2876 rfc.txwin_size = 0;
2877 rfc.max_transmit = 0;
2878 rfc.retrans_timeout = 0;
2879 rfc.monitor_timeout = 0;
2880
2881 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2882 L2CAP_EXT_HDR_SIZE -
2883 L2CAP_SDULEN_SIZE -
2884 L2CAP_FCS_SIZE);
2885 rfc.max_pdu_size = cpu_to_le16(size);
2886
2887 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2888 (unsigned long) &rfc);
2889
2890 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2891 l2cap_add_opt_efs(&ptr, chan);
2892
2893 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2894 break;
2895
2896 if (chan->fcs == L2CAP_FCS_NONE ||
2897 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2898 chan->fcs = L2CAP_FCS_NONE;
2899 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2900 }
2901 break;
2902 }
2903
2904 req->dcid = cpu_to_le16(chan->dcid);
2905 req->flags = __constant_cpu_to_le16(0);
2906
2907 return ptr - data;
2908 }
2909
2910 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
2911 {
2912 struct l2cap_conf_rsp *rsp = data;
2913 void *ptr = rsp->data;
2914 void *req = chan->conf_req;
2915 int len = chan->conf_len;
2916 int type, hint, olen;
2917 unsigned long val;
2918 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2919 struct l2cap_conf_efs efs;
2920 u8 remote_efs = 0;
2921 u16 mtu = L2CAP_DEFAULT_MTU;
2922 u16 result = L2CAP_CONF_SUCCESS;
2923 u16 size;
2924
2925 BT_DBG("chan %p", chan);
2926
2927 while (len >= L2CAP_CONF_OPT_SIZE) {
2928 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2929
2930 hint = type & L2CAP_CONF_HINT;
2931 type &= L2CAP_CONF_MASK;
2932
2933 switch (type) {
2934 case L2CAP_CONF_MTU:
2935 mtu = val;
2936 break;
2937
2938 case L2CAP_CONF_FLUSH_TO:
2939 chan->flush_to = val;
2940 break;
2941
2942 case L2CAP_CONF_QOS:
2943 break;
2944
2945 case L2CAP_CONF_RFC:
2946 if (olen == sizeof(rfc))
2947 memcpy(&rfc, (void *) val, olen);
2948 break;
2949
2950 case L2CAP_CONF_FCS:
2951 if (val == L2CAP_FCS_NONE)
2952 set_bit(CONF_NO_FCS_RECV, &chan->conf_state);
2953 break;
2954
2955 case L2CAP_CONF_EFS:
2956 remote_efs = 1;
2957 if (olen == sizeof(efs))
2958 memcpy(&efs, (void *) val, olen);
2959 break;
2960
2961 case L2CAP_CONF_EWS:
2962 if (!enable_hs)
2963 return -ECONNREFUSED;
2964
2965 set_bit(FLAG_EXT_CTRL, &chan->flags);
2966 set_bit(CONF_EWS_RECV, &chan->conf_state);
2967 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
2968 chan->remote_tx_win = val;
2969 break;
2970
2971 default:
2972 if (hint)
2973 break;
2974
2975 result = L2CAP_CONF_UNKNOWN;
2976 *((u8 *) ptr++) = type;
2977 break;
2978 }
2979 }
2980
2981 if (chan->num_conf_rsp || chan->num_conf_req > 1)
2982 goto done;
2983
2984 switch (chan->mode) {
2985 case L2CAP_MODE_STREAMING:
2986 case L2CAP_MODE_ERTM:
2987 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
2988 chan->mode = l2cap_select_mode(rfc.mode,
2989 chan->conn->feat_mask);
2990 break;
2991 }
2992
2993 if (remote_efs) {
2994 if (__l2cap_efs_supported(chan))
2995 set_bit(FLAG_EFS_ENABLE, &chan->flags);
2996 else
2997 return -ECONNREFUSED;
2998 }
2999
3000 if (chan->mode != rfc.mode)
3001 return -ECONNREFUSED;
3002
3003 break;
3004 }
3005
3006 done:
3007 if (chan->mode != rfc.mode) {
3008 result = L2CAP_CONF_UNACCEPT;
3009 rfc.mode = chan->mode;
3010
3011 if (chan->num_conf_rsp == 1)
3012 return -ECONNREFUSED;
3013
3014 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3015 sizeof(rfc), (unsigned long) &rfc);
3016 }
3017
3018 if (result == L2CAP_CONF_SUCCESS) {
3019 /* Configure output options and let the other side know
3020 * which ones we don't like. */
3021
3022 if (mtu < L2CAP_DEFAULT_MIN_MTU)
3023 result = L2CAP_CONF_UNACCEPT;
3024 else {
3025 chan->omtu = mtu;
3026 set_bit(CONF_MTU_DONE, &chan->conf_state);
3027 }
3028 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
3029
3030 if (remote_efs) {
3031 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3032 efs.stype != L2CAP_SERV_NOTRAFIC &&
3033 efs.stype != chan->local_stype) {
3034
3035 result = L2CAP_CONF_UNACCEPT;
3036
3037 if (chan->num_conf_req >= 1)
3038 return -ECONNREFUSED;
3039
3040 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3041 sizeof(efs),
3042 (unsigned long) &efs);
3043 } else {
3044 /* Send PENDING Conf Rsp */
3045 result = L2CAP_CONF_PENDING;
3046 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3047 }
3048 }
3049
3050 switch (rfc.mode) {
3051 case L2CAP_MODE_BASIC:
3052 chan->fcs = L2CAP_FCS_NONE;
3053 set_bit(CONF_MODE_DONE, &chan->conf_state);
3054 break;
3055
3056 case L2CAP_MODE_ERTM:
3057 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3058 chan->remote_tx_win = rfc.txwin_size;
3059 else
3060 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3061
3062 chan->remote_max_tx = rfc.max_transmit;
3063
3064 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3065 chan->conn->mtu -
3066 L2CAP_EXT_HDR_SIZE -
3067 L2CAP_SDULEN_SIZE -
3068 L2CAP_FCS_SIZE);
3069 rfc.max_pdu_size = cpu_to_le16(size);
3070 chan->remote_mps = size;
3071
3072 rfc.retrans_timeout =
3073 __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3074 rfc.monitor_timeout =
3075 __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3076
3077 set_bit(CONF_MODE_DONE, &chan->conf_state);
3078
3079 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3080 sizeof(rfc), (unsigned long) &rfc);
3081
3082 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3083 chan->remote_id = efs.id;
3084 chan->remote_stype = efs.stype;
3085 chan->remote_msdu = le16_to_cpu(efs.msdu);
3086 chan->remote_flush_to =
3087 le32_to_cpu(efs.flush_to);
3088 chan->remote_acc_lat =
3089 le32_to_cpu(efs.acc_lat);
3090 chan->remote_sdu_itime =
3091 le32_to_cpu(efs.sdu_itime);
3092 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3093 sizeof(efs), (unsigned long) &efs);
3094 }
3095 break;
3096
3097 case L2CAP_MODE_STREAMING:
3098 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3099 chan->conn->mtu -
3100 L2CAP_EXT_HDR_SIZE -
3101 L2CAP_SDULEN_SIZE -
3102 L2CAP_FCS_SIZE);
3103 rfc.max_pdu_size = cpu_to_le16(size);
3104 chan->remote_mps = size;
3105
3106 set_bit(CONF_MODE_DONE, &chan->conf_state);
3107
3108 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3109 sizeof(rfc), (unsigned long) &rfc);
3110
3111 break;
3112
3113 default:
3114 result = L2CAP_CONF_UNACCEPT;
3115
3116 memset(&rfc, 0, sizeof(rfc));
3117 rfc.mode = chan->mode;
3118 }
3119
3120 if (result == L2CAP_CONF_SUCCESS)
3121 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3122 }
3123 rsp->scid = cpu_to_le16(chan->dcid);
3124 rsp->result = cpu_to_le16(result);
3125 rsp->flags = __constant_cpu_to_le16(0);
3126
3127 return ptr - data;
3128 }
3129
3130 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, void *data, u16 *result)
3131 {
3132 struct l2cap_conf_req *req = data;
3133 void *ptr = req->data;
3134 int type, olen;
3135 unsigned long val;
3136 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3137 struct l2cap_conf_efs efs;
3138
3139 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3140
3141 while (len >= L2CAP_CONF_OPT_SIZE) {
3142 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3143
3144 switch (type) {
3145 case L2CAP_CONF_MTU:
3146 if (val < L2CAP_DEFAULT_MIN_MTU) {
3147 *result = L2CAP_CONF_UNACCEPT;
3148 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3149 } else
3150 chan->imtu = val;
3151 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3152 break;
3153
3154 case L2CAP_CONF_FLUSH_TO:
3155 chan->flush_to = val;
3156 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
3157 2, chan->flush_to);
3158 break;
3159
3160 case L2CAP_CONF_RFC:
3161 if (olen == sizeof(rfc))
3162 memcpy(&rfc, (void *)val, olen);
3163
3164 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3165 rfc.mode != chan->mode)
3166 return -ECONNREFUSED;
3167
3168 chan->fcs = 0;
3169
3170 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3171 sizeof(rfc), (unsigned long) &rfc);
3172 break;
3173
3174 case L2CAP_CONF_EWS:
3175 chan->tx_win = min_t(u16, val,
3176 L2CAP_DEFAULT_EXT_WINDOW);
3177 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3178 chan->tx_win);
3179 break;
3180
3181 case L2CAP_CONF_EFS:
3182 if (olen == sizeof(efs))
3183 memcpy(&efs, (void *)val, olen);
3184
3185 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3186 efs.stype != L2CAP_SERV_NOTRAFIC &&
3187 efs.stype != chan->local_stype)
3188 return -ECONNREFUSED;
3189
3190 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3191 sizeof(efs), (unsigned long) &efs);
3192 break;
3193 }
3194 }
3195
3196 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3197 return -ECONNREFUSED;
3198
3199 chan->mode = rfc.mode;
3200
3201 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3202 switch (rfc.mode) {
3203 case L2CAP_MODE_ERTM:
3204 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3205 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3206 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3207
3208 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3209 chan->local_msdu = le16_to_cpu(efs.msdu);
3210 chan->local_sdu_itime =
3211 le32_to_cpu(efs.sdu_itime);
3212 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3213 chan->local_flush_to =
3214 le32_to_cpu(efs.flush_to);
3215 }
3216 break;
3217
3218 case L2CAP_MODE_STREAMING:
3219 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3220 }
3221 }
3222
3223 req->dcid = cpu_to_le16(chan->dcid);
3224 req->flags = __constant_cpu_to_le16(0);
3225
3226 return ptr - data;
3227 }
3228
3229 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data, u16 result, u16 flags)
3230 {
3231 struct l2cap_conf_rsp *rsp = data;
3232 void *ptr = rsp->data;
3233
3234 BT_DBG("chan %p", chan);
3235
3236 rsp->scid = cpu_to_le16(chan->dcid);
3237 rsp->result = cpu_to_le16(result);
3238 rsp->flags = cpu_to_le16(flags);
3239
3240 return ptr - data;
3241 }
3242
3243 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
3244 {
3245 struct l2cap_conn_rsp rsp;
3246 struct l2cap_conn *conn = chan->conn;
3247 u8 buf[128];
3248
3249 rsp.scid = cpu_to_le16(chan->dcid);
3250 rsp.dcid = cpu_to_le16(chan->scid);
3251 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
3252 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
3253 l2cap_send_cmd(conn, chan->ident,
3254 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
3255
3256 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3257 return;
3258
3259 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3260 l2cap_build_conf_req(chan, buf), buf);
3261 chan->num_conf_req++;
3262 }
3263
3264 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
3265 {
3266 int type, olen;
3267 unsigned long val;
3268 struct l2cap_conf_rfc rfc;
3269
3270 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
3271
3272 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
3273 return;
3274
3275 while (len >= L2CAP_CONF_OPT_SIZE) {
3276 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3277
3278 switch (type) {
3279 case L2CAP_CONF_RFC:
3280 if (olen == sizeof(rfc))
3281 memcpy(&rfc, (void *)val, olen);
3282 goto done;
3283 }
3284 }
3285
3286 /* Use sane default values in case a misbehaving remote device
3287 * did not send an RFC option.
3288 */
3289 rfc.mode = chan->mode;
3290 rfc.retrans_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3291 rfc.monitor_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3292 rfc.max_pdu_size = cpu_to_le16(chan->imtu);
3293
3294 BT_ERR("Expected RFC option was not found, using defaults");
3295
3296 done:
3297 switch (rfc.mode) {
3298 case L2CAP_MODE_ERTM:
3299 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3300 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3301 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3302 break;
3303 case L2CAP_MODE_STREAMING:
3304 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3305 }
3306 }
3307
3308 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3309 {
3310 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
3311
3312 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
3313 return 0;
3314
3315 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
3316 cmd->ident == conn->info_ident) {
3317 cancel_delayed_work(&conn->info_timer);
3318
3319 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3320 conn->info_ident = 0;
3321
3322 l2cap_conn_start(conn);
3323 }
3324
3325 return 0;
3326 }
3327
3328 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3329 {
3330 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
3331 struct l2cap_conn_rsp rsp;
3332 struct l2cap_chan *chan = NULL, *pchan;
3333 struct sock *parent, *sk = NULL;
3334 int result, status = L2CAP_CS_NO_INFO;
3335
3336 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
3337 __le16 psm = req->psm;
3338
3339 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
3340
3341 /* Check if we have socket listening on psm */
3342 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, conn->src, conn->dst);
3343 if (!pchan) {
3344 result = L2CAP_CR_BAD_PSM;
3345 goto sendresp;
3346 }
3347
3348 parent = pchan->sk;
3349
3350 mutex_lock(&conn->chan_lock);
3351 lock_sock(parent);
3352
3353 /* Check if the ACL is secure enough (if not SDP) */
3354 if (psm != __constant_cpu_to_le16(L2CAP_PSM_SDP) &&
3355 !hci_conn_check_link_mode(conn->hcon)) {
3356 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
3357 result = L2CAP_CR_SEC_BLOCK;
3358 goto response;
3359 }
3360
3361 result = L2CAP_CR_NO_MEM;
3362
3363 /* Check if we already have channel with that dcid */
3364 if (__l2cap_get_chan_by_dcid(conn, scid))
3365 goto response;
3366
3367 chan = pchan->ops->new_connection(pchan);
3368 if (!chan)
3369 goto response;
3370
3371 sk = chan->sk;
3372
3373 hci_conn_hold(conn->hcon);
3374
3375 bacpy(&bt_sk(sk)->src, conn->src);
3376 bacpy(&bt_sk(sk)->dst, conn->dst);
3377 chan->psm = psm;
3378 chan->dcid = scid;
3379
3380 bt_accept_enqueue(parent, sk);
3381
3382 __l2cap_chan_add(conn, chan);
3383
3384 dcid = chan->scid;
3385
3386 __set_chan_timer(chan, sk->sk_sndtimeo);
3387
3388 chan->ident = cmd->ident;
3389
3390 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
3391 if (l2cap_chan_check_security(chan)) {
3392 if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) {
3393 __l2cap_state_change(chan, BT_CONNECT2);
3394 result = L2CAP_CR_PEND;
3395 status = L2CAP_CS_AUTHOR_PEND;
3396 parent->sk_data_ready(parent, 0);
3397 } else {
3398 __l2cap_state_change(chan, BT_CONFIG);
3399 result = L2CAP_CR_SUCCESS;
3400 status = L2CAP_CS_NO_INFO;
3401 }
3402 } else {
3403 __l2cap_state_change(chan, BT_CONNECT2);
3404 result = L2CAP_CR_PEND;
3405 status = L2CAP_CS_AUTHEN_PEND;
3406 }
3407 } else {
3408 __l2cap_state_change(chan, BT_CONNECT2);
3409 result = L2CAP_CR_PEND;
3410 status = L2CAP_CS_NO_INFO;
3411 }
3412
3413 response:
3414 release_sock(parent);
3415 mutex_unlock(&conn->chan_lock);
3416
3417 sendresp:
3418 rsp.scid = cpu_to_le16(scid);
3419 rsp.dcid = cpu_to_le16(dcid);
3420 rsp.result = cpu_to_le16(result);
3421 rsp.status = cpu_to_le16(status);
3422 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
3423
3424 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
3425 struct l2cap_info_req info;
3426 info.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
3427
3428 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
3429 conn->info_ident = l2cap_get_ident(conn);
3430
3431 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
3432
3433 l2cap_send_cmd(conn, conn->info_ident,
3434 L2CAP_INFO_REQ, sizeof(info), &info);
3435 }
3436
3437 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
3438 result == L2CAP_CR_SUCCESS) {
3439 u8 buf[128];
3440 set_bit(CONF_REQ_SENT, &chan->conf_state);
3441 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3442 l2cap_build_conf_req(chan, buf), buf);
3443 chan->num_conf_req++;
3444 }
3445
3446 return 0;
3447 }
3448
3449 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3450 {
3451 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
3452 u16 scid, dcid, result, status;
3453 struct l2cap_chan *chan;
3454 u8 req[128];
3455 int err;
3456
3457 scid = __le16_to_cpu(rsp->scid);
3458 dcid = __le16_to_cpu(rsp->dcid);
3459 result = __le16_to_cpu(rsp->result);
3460 status = __le16_to_cpu(rsp->status);
3461
3462 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
3463 dcid, scid, result, status);
3464
3465 mutex_lock(&conn->chan_lock);
3466
3467 if (scid) {
3468 chan = __l2cap_get_chan_by_scid(conn, scid);
3469 if (!chan) {
3470 err = -EFAULT;
3471 goto unlock;
3472 }
3473 } else {
3474 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
3475 if (!chan) {
3476 err = -EFAULT;
3477 goto unlock;
3478 }
3479 }
3480
3481 err = 0;
3482
3483 l2cap_chan_lock(chan);
3484
3485 switch (result) {
3486 case L2CAP_CR_SUCCESS:
3487 l2cap_state_change(chan, BT_CONFIG);
3488 chan->ident = 0;
3489 chan->dcid = dcid;
3490 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
3491
3492 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3493 break;
3494
3495 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3496 l2cap_build_conf_req(chan, req), req);
3497 chan->num_conf_req++;
3498 break;
3499
3500 case L2CAP_CR_PEND:
3501 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
3502 break;
3503
3504 default:
3505 l2cap_chan_del(chan, ECONNREFUSED);
3506 break;
3507 }
3508
3509 l2cap_chan_unlock(chan);
3510
3511 unlock:
3512 mutex_unlock(&conn->chan_lock);
3513
3514 return err;
3515 }
3516
3517 static inline void set_default_fcs(struct l2cap_chan *chan)
3518 {
3519 /* FCS is enabled only in ERTM or streaming mode, if one or both
3520 * sides request it.
3521 */
3522 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
3523 chan->fcs = L2CAP_FCS_NONE;
3524 else if (!test_bit(CONF_NO_FCS_RECV, &chan->conf_state))
3525 chan->fcs = L2CAP_FCS_CRC16;
3526 }
3527
3528 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3529 {
3530 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
3531 u16 dcid, flags;
3532 u8 rsp[64];
3533 struct l2cap_chan *chan;
3534 int len, err = 0;
3535
3536 dcid = __le16_to_cpu(req->dcid);
3537 flags = __le16_to_cpu(req->flags);
3538
3539 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
3540
3541 chan = l2cap_get_chan_by_scid(conn, dcid);
3542 if (!chan)
3543 return -ENOENT;
3544
3545 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
3546 struct l2cap_cmd_rej_cid rej;
3547
3548 rej.reason = __constant_cpu_to_le16(L2CAP_REJ_INVALID_CID);
3549 rej.scid = cpu_to_le16(chan->scid);
3550 rej.dcid = cpu_to_le16(chan->dcid);
3551
3552 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
3553 sizeof(rej), &rej);
3554 goto unlock;
3555 }
3556
3557 /* Reject if config buffer is too small. */
3558 len = cmd_len - sizeof(*req);
3559 if (len < 0 || chan->conf_len + len > sizeof(chan->conf_req)) {
3560 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3561 l2cap_build_conf_rsp(chan, rsp,
3562 L2CAP_CONF_REJECT, flags), rsp);
3563 goto unlock;
3564 }
3565
3566 /* Store config. */
3567 memcpy(chan->conf_req + chan->conf_len, req->data, len);
3568 chan->conf_len += len;
3569
3570 if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
3571 /* Incomplete config. Send empty response. */
3572 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3573 l2cap_build_conf_rsp(chan, rsp,
3574 L2CAP_CONF_SUCCESS, flags), rsp);
3575 goto unlock;
3576 }
3577
3578 /* Complete config. */
3579 len = l2cap_parse_conf_req(chan, rsp);
3580 if (len < 0) {
3581 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3582 goto unlock;
3583 }
3584
3585 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
3586 chan->num_conf_rsp++;
3587
3588 /* Reset config buffer. */
3589 chan->conf_len = 0;
3590
3591 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
3592 goto unlock;
3593
3594 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
3595 set_default_fcs(chan);
3596
3597 if (chan->mode == L2CAP_MODE_ERTM ||
3598 chan->mode == L2CAP_MODE_STREAMING)
3599 err = l2cap_ertm_init(chan);
3600
3601 if (err < 0)
3602 l2cap_send_disconn_req(chan->conn, chan, -err);
3603 else
3604 l2cap_chan_ready(chan);
3605
3606 goto unlock;
3607 }
3608
3609 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
3610 u8 buf[64];
3611 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3612 l2cap_build_conf_req(chan, buf), buf);
3613 chan->num_conf_req++;
3614 }
3615
3616 /* Got Conf Rsp PENDING from remote side and asume we sent
3617 Conf Rsp PENDING in the code above */
3618 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
3619 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
3620
3621 /* check compatibility */
3622
3623 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3624 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3625
3626 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3627 l2cap_build_conf_rsp(chan, rsp,
3628 L2CAP_CONF_SUCCESS, flags), rsp);
3629 }
3630
3631 unlock:
3632 l2cap_chan_unlock(chan);
3633 return err;
3634 }
3635
3636 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3637 {
3638 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
3639 u16 scid, flags, result;
3640 struct l2cap_chan *chan;
3641 int len = le16_to_cpu(cmd->len) - sizeof(*rsp);
3642 int err = 0;
3643
3644 scid = __le16_to_cpu(rsp->scid);
3645 flags = __le16_to_cpu(rsp->flags);
3646 result = __le16_to_cpu(rsp->result);
3647
3648 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
3649 result, len);
3650
3651 chan = l2cap_get_chan_by_scid(conn, scid);
3652 if (!chan)
3653 return 0;
3654
3655 switch (result) {
3656 case L2CAP_CONF_SUCCESS:
3657 l2cap_conf_rfc_get(chan, rsp->data, len);
3658 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
3659 break;
3660
3661 case L2CAP_CONF_PENDING:
3662 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
3663
3664 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
3665 char buf[64];
3666
3667 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
3668 buf, &result);
3669 if (len < 0) {
3670 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3671 goto done;
3672 }
3673
3674 /* check compatibility */
3675
3676 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3677 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3678
3679 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3680 l2cap_build_conf_rsp(chan, buf,
3681 L2CAP_CONF_SUCCESS, 0x0000), buf);
3682 }
3683 goto done;
3684
3685 case L2CAP_CONF_UNACCEPT:
3686 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
3687 char req[64];
3688
3689 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
3690 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3691 goto done;
3692 }
3693
3694 /* throw out any old stored conf requests */
3695 result = L2CAP_CONF_SUCCESS;
3696 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
3697 req, &result);
3698 if (len < 0) {
3699 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3700 goto done;
3701 }
3702
3703 l2cap_send_cmd(conn, l2cap_get_ident(conn),
3704 L2CAP_CONF_REQ, len, req);
3705 chan->num_conf_req++;
3706 if (result != L2CAP_CONF_SUCCESS)
3707 goto done;
3708 break;
3709 }
3710
3711 default:
3712 l2cap_chan_set_err(chan, ECONNRESET);
3713
3714 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
3715 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3716 goto done;
3717 }
3718
3719 if (flags & L2CAP_CONF_FLAG_CONTINUATION)
3720 goto done;
3721
3722 set_bit(CONF_INPUT_DONE, &chan->conf_state);
3723
3724 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
3725 set_default_fcs(chan);
3726
3727 if (chan->mode == L2CAP_MODE_ERTM ||
3728 chan->mode == L2CAP_MODE_STREAMING)
3729 err = l2cap_ertm_init(chan);
3730
3731 if (err < 0)
3732 l2cap_send_disconn_req(chan->conn, chan, -err);
3733 else
3734 l2cap_chan_ready(chan);
3735 }
3736
3737 done:
3738 l2cap_chan_unlock(chan);
3739 return err;
3740 }
3741
3742 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3743 {
3744 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
3745 struct l2cap_disconn_rsp rsp;
3746 u16 dcid, scid;
3747 struct l2cap_chan *chan;
3748 struct sock *sk;
3749
3750 scid = __le16_to_cpu(req->scid);
3751 dcid = __le16_to_cpu(req->dcid);
3752
3753 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
3754
3755 mutex_lock(&conn->chan_lock);
3756
3757 chan = __l2cap_get_chan_by_scid(conn, dcid);
3758 if (!chan) {
3759 mutex_unlock(&conn->chan_lock);
3760 return 0;
3761 }
3762
3763 l2cap_chan_lock(chan);
3764
3765 sk = chan->sk;
3766
3767 rsp.dcid = cpu_to_le16(chan->scid);
3768 rsp.scid = cpu_to_le16(chan->dcid);
3769 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
3770
3771 lock_sock(sk);
3772 sk->sk_shutdown = SHUTDOWN_MASK;
3773 release_sock(sk);
3774
3775 l2cap_chan_hold(chan);
3776 l2cap_chan_del(chan, ECONNRESET);
3777
3778 l2cap_chan_unlock(chan);
3779
3780 chan->ops->close(chan);
3781 l2cap_chan_put(chan);
3782
3783 mutex_unlock(&conn->chan_lock);
3784
3785 return 0;
3786 }
3787
3788 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3789 {
3790 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
3791 u16 dcid, scid;
3792 struct l2cap_chan *chan;
3793
3794 scid = __le16_to_cpu(rsp->scid);
3795 dcid = __le16_to_cpu(rsp->dcid);
3796
3797 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
3798
3799 mutex_lock(&conn->chan_lock);
3800
3801 chan = __l2cap_get_chan_by_scid(conn, scid);
3802 if (!chan) {
3803 mutex_unlock(&conn->chan_lock);
3804 return 0;
3805 }
3806
3807 l2cap_chan_lock(chan);
3808
3809 l2cap_chan_hold(chan);
3810 l2cap_chan_del(chan, 0);
3811
3812 l2cap_chan_unlock(chan);
3813
3814 chan->ops->close(chan);
3815 l2cap_chan_put(chan);
3816
3817 mutex_unlock(&conn->chan_lock);
3818
3819 return 0;
3820 }
3821
3822 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3823 {
3824 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
3825 u16 type;
3826
3827 type = __le16_to_cpu(req->type);
3828
3829 BT_DBG("type 0x%4.4x", type);
3830
3831 if (type == L2CAP_IT_FEAT_MASK) {
3832 u8 buf[8];
3833 u32 feat_mask = l2cap_feat_mask;
3834 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3835 rsp->type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
3836 rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
3837 if (!disable_ertm)
3838 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
3839 | L2CAP_FEAT_FCS;
3840 if (enable_hs)
3841 feat_mask |= L2CAP_FEAT_EXT_FLOW
3842 | L2CAP_FEAT_EXT_WINDOW;
3843
3844 put_unaligned_le32(feat_mask, rsp->data);
3845 l2cap_send_cmd(conn, cmd->ident,
3846 L2CAP_INFO_RSP, sizeof(buf), buf);
3847 } else if (type == L2CAP_IT_FIXED_CHAN) {
3848 u8 buf[12];
3849 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3850
3851 if (enable_hs)
3852 l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
3853 else
3854 l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
3855
3856 rsp->type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3857 rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
3858 memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
3859 l2cap_send_cmd(conn, cmd->ident,
3860 L2CAP_INFO_RSP, sizeof(buf), buf);
3861 } else {
3862 struct l2cap_info_rsp rsp;
3863 rsp.type = cpu_to_le16(type);
3864 rsp.result = __constant_cpu_to_le16(L2CAP_IR_NOTSUPP);
3865 l2cap_send_cmd(conn, cmd->ident,
3866 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
3867 }
3868
3869 return 0;
3870 }
3871
3872 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3873 {
3874 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
3875 u16 type, result;
3876
3877 type = __le16_to_cpu(rsp->type);
3878 result = __le16_to_cpu(rsp->result);
3879
3880 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
3881
3882 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
3883 if (cmd->ident != conn->info_ident ||
3884 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
3885 return 0;
3886
3887 cancel_delayed_work(&conn->info_timer);
3888
3889 if (result != L2CAP_IR_SUCCESS) {
3890 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3891 conn->info_ident = 0;
3892
3893 l2cap_conn_start(conn);
3894
3895 return 0;
3896 }
3897
3898 switch (type) {
3899 case L2CAP_IT_FEAT_MASK:
3900 conn->feat_mask = get_unaligned_le32(rsp->data);
3901
3902 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
3903 struct l2cap_info_req req;
3904 req.type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3905
3906 conn->info_ident = l2cap_get_ident(conn);
3907
3908 l2cap_send_cmd(conn, conn->info_ident,
3909 L2CAP_INFO_REQ, sizeof(req), &req);
3910 } else {
3911 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3912 conn->info_ident = 0;
3913
3914 l2cap_conn_start(conn);
3915 }
3916 break;
3917
3918 case L2CAP_IT_FIXED_CHAN:
3919 conn->fixed_chan_mask = rsp->data[0];
3920 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3921 conn->info_ident = 0;
3922
3923 l2cap_conn_start(conn);
3924 break;
3925 }
3926
3927 return 0;
3928 }
3929
3930 static inline int l2cap_create_channel_req(struct l2cap_conn *conn,
3931 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3932 void *data)
3933 {
3934 struct l2cap_create_chan_req *req = data;
3935 struct l2cap_create_chan_rsp rsp;
3936 u16 psm, scid;
3937
3938 if (cmd_len != sizeof(*req))
3939 return -EPROTO;
3940
3941 if (!enable_hs)
3942 return -EINVAL;
3943
3944 psm = le16_to_cpu(req->psm);
3945 scid = le16_to_cpu(req->scid);
3946
3947 BT_DBG("psm %d, scid %d, amp_id %d", psm, scid, req->amp_id);
3948
3949 /* Placeholder: Always reject */
3950 rsp.dcid = 0;
3951 rsp.scid = cpu_to_le16(scid);
3952 rsp.result = __constant_cpu_to_le16(L2CAP_CR_NO_MEM);
3953 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
3954
3955 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
3956 sizeof(rsp), &rsp);
3957
3958 return 0;
3959 }
3960
3961 static inline int l2cap_create_channel_rsp(struct l2cap_conn *conn,
3962 struct l2cap_cmd_hdr *cmd, void *data)
3963 {
3964 BT_DBG("conn %p", conn);
3965
3966 return l2cap_connect_rsp(conn, cmd, data);
3967 }
3968
3969 static void l2cap_send_move_chan_rsp(struct l2cap_conn *conn, u8 ident,
3970 u16 icid, u16 result)
3971 {
3972 struct l2cap_move_chan_rsp rsp;
3973
3974 BT_DBG("icid %d, result %d", icid, result);
3975
3976 rsp.icid = cpu_to_le16(icid);
3977 rsp.result = cpu_to_le16(result);
3978
3979 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_RSP, sizeof(rsp), &rsp);
3980 }
3981
3982 static void l2cap_send_move_chan_cfm(struct l2cap_conn *conn,
3983 struct l2cap_chan *chan, u16 icid, u16 result)
3984 {
3985 struct l2cap_move_chan_cfm cfm;
3986 u8 ident;
3987
3988 BT_DBG("icid %d, result %d", icid, result);
3989
3990 ident = l2cap_get_ident(conn);
3991 if (chan)
3992 chan->ident = ident;
3993
3994 cfm.icid = cpu_to_le16(icid);
3995 cfm.result = cpu_to_le16(result);
3996
3997 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM, sizeof(cfm), &cfm);
3998 }
3999
4000 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
4001 u16 icid)
4002 {
4003 struct l2cap_move_chan_cfm_rsp rsp;
4004
4005 BT_DBG("icid %d", icid);
4006
4007 rsp.icid = cpu_to_le16(icid);
4008 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4009 }
4010
4011 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
4012 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
4013 {
4014 struct l2cap_move_chan_req *req = data;
4015 u16 icid = 0;
4016 u16 result = L2CAP_MR_NOT_ALLOWED;
4017
4018 if (cmd_len != sizeof(*req))
4019 return -EPROTO;
4020
4021 icid = le16_to_cpu(req->icid);
4022
4023 BT_DBG("icid %d, dest_amp_id %d", icid, req->dest_amp_id);
4024
4025 if (!enable_hs)
4026 return -EINVAL;
4027
4028 /* Placeholder: Always refuse */
4029 l2cap_send_move_chan_rsp(conn, cmd->ident, icid, result);
4030
4031 return 0;
4032 }
4033
4034 static inline int l2cap_move_channel_rsp(struct l2cap_conn *conn,
4035 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
4036 {
4037 struct l2cap_move_chan_rsp *rsp = data;
4038 u16 icid, result;
4039
4040 if (cmd_len != sizeof(*rsp))
4041 return -EPROTO;
4042
4043 icid = le16_to_cpu(rsp->icid);
4044 result = le16_to_cpu(rsp->result);
4045
4046 BT_DBG("icid %d, result %d", icid, result);
4047
4048 /* Placeholder: Always unconfirmed */
4049 l2cap_send_move_chan_cfm(conn, NULL, icid, L2CAP_MC_UNCONFIRMED);
4050
4051 return 0;
4052 }
4053
4054 static inline int l2cap_move_channel_confirm(struct l2cap_conn *conn,
4055 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
4056 {
4057 struct l2cap_move_chan_cfm *cfm = data;
4058 u16 icid, result;
4059
4060 if (cmd_len != sizeof(*cfm))
4061 return -EPROTO;
4062
4063 icid = le16_to_cpu(cfm->icid);
4064 result = le16_to_cpu(cfm->result);
4065
4066 BT_DBG("icid %d, result %d", icid, result);
4067
4068 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
4069
4070 return 0;
4071 }
4072
4073 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
4074 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
4075 {
4076 struct l2cap_move_chan_cfm_rsp *rsp = data;
4077 u16 icid;
4078
4079 if (cmd_len != sizeof(*rsp))
4080 return -EPROTO;
4081
4082 icid = le16_to_cpu(rsp->icid);
4083
4084 BT_DBG("icid %d", icid);
4085
4086 return 0;
4087 }
4088
4089 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
4090 u16 to_multiplier)
4091 {
4092 u16 max_latency;
4093
4094 if (min > max || min < 6 || max > 3200)
4095 return -EINVAL;
4096
4097 if (to_multiplier < 10 || to_multiplier > 3200)
4098 return -EINVAL;
4099
4100 if (max >= to_multiplier * 8)
4101 return -EINVAL;
4102
4103 max_latency = (to_multiplier * 8 / max) - 1;
4104 if (latency > 499 || latency > max_latency)
4105 return -EINVAL;
4106
4107 return 0;
4108 }
4109
4110 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
4111 struct l2cap_cmd_hdr *cmd, u8 *data)
4112 {
4113 struct hci_conn *hcon = conn->hcon;
4114 struct l2cap_conn_param_update_req *req;
4115 struct l2cap_conn_param_update_rsp rsp;
4116 u16 min, max, latency, to_multiplier, cmd_len;
4117 int err;
4118
4119 if (!(hcon->link_mode & HCI_LM_MASTER))
4120 return -EINVAL;
4121
4122 cmd_len = __le16_to_cpu(cmd->len);
4123 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
4124 return -EPROTO;
4125
4126 req = (struct l2cap_conn_param_update_req *) data;
4127 min = __le16_to_cpu(req->min);
4128 max = __le16_to_cpu(req->max);
4129 latency = __le16_to_cpu(req->latency);
4130 to_multiplier = __le16_to_cpu(req->to_multiplier);
4131
4132 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
4133 min, max, latency, to_multiplier);
4134
4135 memset(&rsp, 0, sizeof(rsp));
4136
4137 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
4138 if (err)
4139 rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
4140 else
4141 rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
4142
4143 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
4144 sizeof(rsp), &rsp);
4145
4146 if (!err)
4147 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
4148
4149 return 0;
4150 }
4151
4152 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
4153 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
4154 {
4155 int err = 0;
4156
4157 switch (cmd->code) {
4158 case L2CAP_COMMAND_REJ:
4159 l2cap_command_rej(conn, cmd, data);
4160 break;
4161
4162 case L2CAP_CONN_REQ:
4163 err = l2cap_connect_req(conn, cmd, data);
4164 break;
4165
4166 case L2CAP_CONN_RSP:
4167 err = l2cap_connect_rsp(conn, cmd, data);
4168 break;
4169
4170 case L2CAP_CONF_REQ:
4171 err = l2cap_config_req(conn, cmd, cmd_len, data);
4172 break;
4173
4174 case L2CAP_CONF_RSP:
4175 err = l2cap_config_rsp(conn, cmd, data);
4176 break;
4177
4178 case L2CAP_DISCONN_REQ:
4179 err = l2cap_disconnect_req(conn, cmd, data);
4180 break;
4181
4182 case L2CAP_DISCONN_RSP:
4183 err = l2cap_disconnect_rsp(conn, cmd, data);
4184 break;
4185
4186 case L2CAP_ECHO_REQ:
4187 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
4188 break;
4189
4190 case L2CAP_ECHO_RSP:
4191 break;
4192
4193 case L2CAP_INFO_REQ:
4194 err = l2cap_information_req(conn, cmd, data);
4195 break;
4196
4197 case L2CAP_INFO_RSP:
4198 err = l2cap_information_rsp(conn, cmd, data);
4199 break;
4200
4201 case L2CAP_CREATE_CHAN_REQ:
4202 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
4203 break;
4204
4205 case L2CAP_CREATE_CHAN_RSP:
4206 err = l2cap_create_channel_rsp(conn, cmd, data);
4207 break;
4208
4209 case L2CAP_MOVE_CHAN_REQ:
4210 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
4211 break;
4212
4213 case L2CAP_MOVE_CHAN_RSP:
4214 err = l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
4215 break;
4216
4217 case L2CAP_MOVE_CHAN_CFM:
4218 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
4219 break;
4220
4221 case L2CAP_MOVE_CHAN_CFM_RSP:
4222 err = l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
4223 break;
4224
4225 default:
4226 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
4227 err = -EINVAL;
4228 break;
4229 }
4230
4231 return err;
4232 }
4233
4234 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
4235 struct l2cap_cmd_hdr *cmd, u8 *data)
4236 {
4237 switch (cmd->code) {
4238 case L2CAP_COMMAND_REJ:
4239 return 0;
4240
4241 case L2CAP_CONN_PARAM_UPDATE_REQ:
4242 return l2cap_conn_param_update_req(conn, cmd, data);
4243
4244 case L2CAP_CONN_PARAM_UPDATE_RSP:
4245 return 0;
4246
4247 default:
4248 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
4249 return -EINVAL;
4250 }
4251 }
4252
4253 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
4254 struct sk_buff *skb)
4255 {
4256 u8 *data = skb->data;
4257 int len = skb->len;
4258 struct l2cap_cmd_hdr cmd;
4259 int err;
4260
4261 l2cap_raw_recv(conn, skb);
4262
4263 while (len >= L2CAP_CMD_HDR_SIZE) {
4264 u16 cmd_len;
4265 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
4266 data += L2CAP_CMD_HDR_SIZE;
4267 len -= L2CAP_CMD_HDR_SIZE;
4268
4269 cmd_len = le16_to_cpu(cmd.len);
4270
4271 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
4272
4273 if (cmd_len > len || !cmd.ident) {
4274 BT_DBG("corrupted command");
4275 break;
4276 }
4277
4278 if (conn->hcon->type == LE_LINK)
4279 err = l2cap_le_sig_cmd(conn, &cmd, data);
4280 else
4281 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
4282
4283 if (err) {
4284 struct l2cap_cmd_rej_unk rej;
4285
4286 BT_ERR("Wrong link type (%d)", err);
4287
4288 /* FIXME: Map err to a valid reason */
4289 rej.reason = __constant_cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
4290 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
4291 }
4292
4293 data += cmd_len;
4294 len -= cmd_len;
4295 }
4296
4297 kfree_skb(skb);
4298 }
4299
4300 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
4301 {
4302 u16 our_fcs, rcv_fcs;
4303 int hdr_size;
4304
4305 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
4306 hdr_size = L2CAP_EXT_HDR_SIZE;
4307 else
4308 hdr_size = L2CAP_ENH_HDR_SIZE;
4309
4310 if (chan->fcs == L2CAP_FCS_CRC16) {
4311 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
4312 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
4313 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
4314
4315 if (our_fcs != rcv_fcs)
4316 return -EBADMSG;
4317 }
4318 return 0;
4319 }
4320
4321 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
4322 {
4323 struct l2cap_ctrl control;
4324
4325 BT_DBG("chan %p", chan);
4326
4327 memset(&control, 0, sizeof(control));
4328 control.sframe = 1;
4329 control.final = 1;
4330 control.reqseq = chan->buffer_seq;
4331 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4332
4333 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4334 control.super = L2CAP_SUPER_RNR;
4335 l2cap_send_sframe(chan, &control);
4336 }
4337
4338 if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
4339 chan->unacked_frames > 0)
4340 __set_retrans_timer(chan);
4341
4342 /* Send pending iframes */
4343 l2cap_ertm_send(chan);
4344
4345 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
4346 test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
4347 /* F-bit wasn't sent in an s-frame or i-frame yet, so
4348 * send it now.
4349 */
4350 control.super = L2CAP_SUPER_RR;
4351 l2cap_send_sframe(chan, &control);
4352 }
4353 }
4354
4355 static void append_skb_frag(struct sk_buff *skb,
4356 struct sk_buff *new_frag, struct sk_buff **last_frag)
4357 {
4358 /* skb->len reflects data in skb as well as all fragments
4359 * skb->data_len reflects only data in fragments
4360 */
4361 if (!skb_has_frag_list(skb))
4362 skb_shinfo(skb)->frag_list = new_frag;
4363
4364 new_frag->next = NULL;
4365
4366 (*last_frag)->next = new_frag;
4367 *last_frag = new_frag;
4368
4369 skb->len += new_frag->len;
4370 skb->data_len += new_frag->len;
4371 skb->truesize += new_frag->truesize;
4372 }
4373
4374 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
4375 struct l2cap_ctrl *control)
4376 {
4377 int err = -EINVAL;
4378
4379 switch (control->sar) {
4380 case L2CAP_SAR_UNSEGMENTED:
4381 if (chan->sdu)
4382 break;
4383
4384 err = chan->ops->recv(chan, skb);
4385 break;
4386
4387 case L2CAP_SAR_START:
4388 if (chan->sdu)
4389 break;
4390
4391 chan->sdu_len = get_unaligned_le16(skb->data);
4392 skb_pull(skb, L2CAP_SDULEN_SIZE);
4393
4394 if (chan->sdu_len > chan->imtu) {
4395 err = -EMSGSIZE;
4396 break;
4397 }
4398
4399 if (skb->len >= chan->sdu_len)
4400 break;
4401
4402 chan->sdu = skb;
4403 chan->sdu_last_frag = skb;
4404
4405 skb = NULL;
4406 err = 0;
4407 break;
4408
4409 case L2CAP_SAR_CONTINUE:
4410 if (!chan->sdu)
4411 break;
4412
4413 append_skb_frag(chan->sdu, skb,
4414 &chan->sdu_last_frag);
4415 skb = NULL;
4416
4417 if (chan->sdu->len >= chan->sdu_len)
4418 break;
4419
4420 err = 0;
4421 break;
4422
4423 case L2CAP_SAR_END:
4424 if (!chan->sdu)
4425 break;
4426
4427 append_skb_frag(chan->sdu, skb,
4428 &chan->sdu_last_frag);
4429 skb = NULL;
4430
4431 if (chan->sdu->len != chan->sdu_len)
4432 break;
4433
4434 err = chan->ops->recv(chan, chan->sdu);
4435
4436 if (!err) {
4437 /* Reassembly complete */
4438 chan->sdu = NULL;
4439 chan->sdu_last_frag = NULL;
4440 chan->sdu_len = 0;
4441 }
4442 break;
4443 }
4444
4445 if (err) {
4446 kfree_skb(skb);
4447 kfree_skb(chan->sdu);
4448 chan->sdu = NULL;
4449 chan->sdu_last_frag = NULL;
4450 chan->sdu_len = 0;
4451 }
4452
4453 return err;
4454 }
4455
4456 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
4457 {
4458 u8 event;
4459
4460 if (chan->mode != L2CAP_MODE_ERTM)
4461 return;
4462
4463 event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
4464 l2cap_tx(chan, NULL, NULL, event);
4465 }
4466
4467 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
4468 {
4469 int err = 0;
4470 /* Pass sequential frames to l2cap_reassemble_sdu()
4471 * until a gap is encountered.
4472 */
4473
4474 BT_DBG("chan %p", chan);
4475
4476 while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4477 struct sk_buff *skb;
4478 BT_DBG("Searching for skb with txseq %d (queue len %d)",
4479 chan->buffer_seq, skb_queue_len(&chan->srej_q));
4480
4481 skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
4482
4483 if (!skb)
4484 break;
4485
4486 skb_unlink(skb, &chan->srej_q);
4487 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
4488 err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->control);
4489 if (err)
4490 break;
4491 }
4492
4493 if (skb_queue_empty(&chan->srej_q)) {
4494 chan->rx_state = L2CAP_RX_STATE_RECV;
4495 l2cap_send_ack(chan);
4496 }
4497
4498 return err;
4499 }
4500
4501 static void l2cap_handle_srej(struct l2cap_chan *chan,
4502 struct l2cap_ctrl *control)
4503 {
4504 struct sk_buff *skb;
4505
4506 BT_DBG("chan %p, control %p", chan, control);
4507
4508 if (control->reqseq == chan->next_tx_seq) {
4509 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
4510 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4511 return;
4512 }
4513
4514 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
4515
4516 if (skb == NULL) {
4517 BT_DBG("Seq %d not available for retransmission",
4518 control->reqseq);
4519 return;
4520 }
4521
4522 if (chan->max_tx != 0 && bt_cb(skb)->control.retries >= chan->max_tx) {
4523 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
4524 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4525 return;
4526 }
4527
4528 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4529
4530 if (control->poll) {
4531 l2cap_pass_to_tx(chan, control);
4532
4533 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4534 l2cap_retransmit(chan, control);
4535 l2cap_ertm_send(chan);
4536
4537 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
4538 set_bit(CONN_SREJ_ACT, &chan->conn_state);
4539 chan->srej_save_reqseq = control->reqseq;
4540 }
4541 } else {
4542 l2cap_pass_to_tx_fbit(chan, control);
4543
4544 if (control->final) {
4545 if (chan->srej_save_reqseq != control->reqseq ||
4546 !test_and_clear_bit(CONN_SREJ_ACT,
4547 &chan->conn_state))
4548 l2cap_retransmit(chan, control);
4549 } else {
4550 l2cap_retransmit(chan, control);
4551 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
4552 set_bit(CONN_SREJ_ACT, &chan->conn_state);
4553 chan->srej_save_reqseq = control->reqseq;
4554 }
4555 }
4556 }
4557 }
4558
4559 static void l2cap_handle_rej(struct l2cap_chan *chan,
4560 struct l2cap_ctrl *control)
4561 {
4562 struct sk_buff *skb;
4563
4564 BT_DBG("chan %p, control %p", chan, control);
4565
4566 if (control->reqseq == chan->next_tx_seq) {
4567 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
4568 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4569 return;
4570 }
4571
4572 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
4573
4574 if (chan->max_tx && skb &&
4575 bt_cb(skb)->control.retries >= chan->max_tx) {
4576 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
4577 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4578 return;
4579 }
4580
4581 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4582
4583 l2cap_pass_to_tx(chan, control);
4584
4585 if (control->final) {
4586 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
4587 l2cap_retransmit_all(chan, control);
4588 } else {
4589 l2cap_retransmit_all(chan, control);
4590 l2cap_ertm_send(chan);
4591 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
4592 set_bit(CONN_REJ_ACT, &chan->conn_state);
4593 }
4594 }
4595
4596 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
4597 {
4598 BT_DBG("chan %p, txseq %d", chan, txseq);
4599
4600 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
4601 chan->expected_tx_seq);
4602
4603 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
4604 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
4605 chan->tx_win) {
4606 /* See notes below regarding "double poll" and
4607 * invalid packets.
4608 */
4609 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
4610 BT_DBG("Invalid/Ignore - after SREJ");
4611 return L2CAP_TXSEQ_INVALID_IGNORE;
4612 } else {
4613 BT_DBG("Invalid - in window after SREJ sent");
4614 return L2CAP_TXSEQ_INVALID;
4615 }
4616 }
4617
4618 if (chan->srej_list.head == txseq) {
4619 BT_DBG("Expected SREJ");
4620 return L2CAP_TXSEQ_EXPECTED_SREJ;
4621 }
4622
4623 if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
4624 BT_DBG("Duplicate SREJ - txseq already stored");
4625 return L2CAP_TXSEQ_DUPLICATE_SREJ;
4626 }
4627
4628 if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
4629 BT_DBG("Unexpected SREJ - not requested");
4630 return L2CAP_TXSEQ_UNEXPECTED_SREJ;
4631 }
4632 }
4633
4634 if (chan->expected_tx_seq == txseq) {
4635 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
4636 chan->tx_win) {
4637 BT_DBG("Invalid - txseq outside tx window");
4638 return L2CAP_TXSEQ_INVALID;
4639 } else {
4640 BT_DBG("Expected");
4641 return L2CAP_TXSEQ_EXPECTED;
4642 }
4643 }
4644
4645 if (__seq_offset(chan, txseq, chan->last_acked_seq) <
4646 __seq_offset(chan, chan->expected_tx_seq,
4647 chan->last_acked_seq)){
4648 BT_DBG("Duplicate - expected_tx_seq later than txseq");
4649 return L2CAP_TXSEQ_DUPLICATE;
4650 }
4651
4652 if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
4653 /* A source of invalid packets is a "double poll" condition,
4654 * where delays cause us to send multiple poll packets. If
4655 * the remote stack receives and processes both polls,
4656 * sequence numbers can wrap around in such a way that a
4657 * resent frame has a sequence number that looks like new data
4658 * with a sequence gap. This would trigger an erroneous SREJ
4659 * request.
4660 *
4661 * Fortunately, this is impossible with a tx window that's
4662 * less than half of the maximum sequence number, which allows
4663 * invalid frames to be safely ignored.
4664 *
4665 * With tx window sizes greater than half of the tx window
4666 * maximum, the frame is invalid and cannot be ignored. This
4667 * causes a disconnect.
4668 */
4669
4670 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
4671 BT_DBG("Invalid/Ignore - txseq outside tx window");
4672 return L2CAP_TXSEQ_INVALID_IGNORE;
4673 } else {
4674 BT_DBG("Invalid - txseq outside tx window");
4675 return L2CAP_TXSEQ_INVALID;
4676 }
4677 } else {
4678 BT_DBG("Unexpected - txseq indicates missing frames");
4679 return L2CAP_TXSEQ_UNEXPECTED;
4680 }
4681 }
4682
4683 static int l2cap_rx_state_recv(struct l2cap_chan *chan,
4684 struct l2cap_ctrl *control,
4685 struct sk_buff *skb, u8 event)
4686 {
4687 int err = 0;
4688 bool skb_in_use = 0;
4689
4690 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
4691 event);
4692
4693 switch (event) {
4694 case L2CAP_EV_RECV_IFRAME:
4695 switch (l2cap_classify_txseq(chan, control->txseq)) {
4696 case L2CAP_TXSEQ_EXPECTED:
4697 l2cap_pass_to_tx(chan, control);
4698
4699 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4700 BT_DBG("Busy, discarding expected seq %d",
4701 control->txseq);
4702 break;
4703 }
4704
4705 chan->expected_tx_seq = __next_seq(chan,
4706 control->txseq);
4707
4708 chan->buffer_seq = chan->expected_tx_seq;
4709 skb_in_use = 1;
4710
4711 err = l2cap_reassemble_sdu(chan, skb, control);
4712 if (err)
4713 break;
4714
4715 if (control->final) {
4716 if (!test_and_clear_bit(CONN_REJ_ACT,
4717 &chan->conn_state)) {
4718 control->final = 0;
4719 l2cap_retransmit_all(chan, control);
4720 l2cap_ertm_send(chan);
4721 }
4722 }
4723
4724 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
4725 l2cap_send_ack(chan);
4726 break;
4727 case L2CAP_TXSEQ_UNEXPECTED:
4728 l2cap_pass_to_tx(chan, control);
4729
4730 /* Can't issue SREJ frames in the local busy state.
4731 * Drop this frame, it will be seen as missing
4732 * when local busy is exited.
4733 */
4734 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4735 BT_DBG("Busy, discarding unexpected seq %d",
4736 control->txseq);
4737 break;
4738 }
4739
4740 /* There was a gap in the sequence, so an SREJ
4741 * must be sent for each missing frame. The
4742 * current frame is stored for later use.
4743 */
4744 skb_queue_tail(&chan->srej_q, skb);
4745 skb_in_use = 1;
4746 BT_DBG("Queued %p (queue len %d)", skb,
4747 skb_queue_len(&chan->srej_q));
4748
4749 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
4750 l2cap_seq_list_clear(&chan->srej_list);
4751 l2cap_send_srej(chan, control->txseq);
4752
4753 chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
4754 break;
4755 case L2CAP_TXSEQ_DUPLICATE:
4756 l2cap_pass_to_tx(chan, control);
4757 break;
4758 case L2CAP_TXSEQ_INVALID_IGNORE:
4759 break;
4760 case L2CAP_TXSEQ_INVALID:
4761 default:
4762 l2cap_send_disconn_req(chan->conn, chan,
4763 ECONNRESET);
4764 break;
4765 }
4766 break;
4767 case L2CAP_EV_RECV_RR:
4768 l2cap_pass_to_tx(chan, control);
4769 if (control->final) {
4770 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4771
4772 if (!test_and_clear_bit(CONN_REJ_ACT,
4773 &chan->conn_state)) {
4774 control->final = 0;
4775 l2cap_retransmit_all(chan, control);
4776 }
4777
4778 l2cap_ertm_send(chan);
4779 } else if (control->poll) {
4780 l2cap_send_i_or_rr_or_rnr(chan);
4781 } else {
4782 if (test_and_clear_bit(CONN_REMOTE_BUSY,
4783 &chan->conn_state) &&
4784 chan->unacked_frames)
4785 __set_retrans_timer(chan);
4786
4787 l2cap_ertm_send(chan);
4788 }
4789 break;
4790 case L2CAP_EV_RECV_RNR:
4791 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4792 l2cap_pass_to_tx(chan, control);
4793 if (control && control->poll) {
4794 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4795 l2cap_send_rr_or_rnr(chan, 0);
4796 }
4797 __clear_retrans_timer(chan);
4798 l2cap_seq_list_clear(&chan->retrans_list);
4799 break;
4800 case L2CAP_EV_RECV_REJ:
4801 l2cap_handle_rej(chan, control);
4802 break;
4803 case L2CAP_EV_RECV_SREJ:
4804 l2cap_handle_srej(chan, control);
4805 break;
4806 default:
4807 break;
4808 }
4809
4810 if (skb && !skb_in_use) {
4811 BT_DBG("Freeing %p", skb);
4812 kfree_skb(skb);
4813 }
4814
4815 return err;
4816 }
4817
4818 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
4819 struct l2cap_ctrl *control,
4820 struct sk_buff *skb, u8 event)
4821 {
4822 int err = 0;
4823 u16 txseq = control->txseq;
4824 bool skb_in_use = 0;
4825
4826 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
4827 event);
4828
4829 switch (event) {
4830 case L2CAP_EV_RECV_IFRAME:
4831 switch (l2cap_classify_txseq(chan, txseq)) {
4832 case L2CAP_TXSEQ_EXPECTED:
4833 /* Keep frame for reassembly later */
4834 l2cap_pass_to_tx(chan, control);
4835 skb_queue_tail(&chan->srej_q, skb);
4836 skb_in_use = 1;
4837 BT_DBG("Queued %p (queue len %d)", skb,
4838 skb_queue_len(&chan->srej_q));
4839
4840 chan->expected_tx_seq = __next_seq(chan, txseq);
4841 break;
4842 case L2CAP_TXSEQ_EXPECTED_SREJ:
4843 l2cap_seq_list_pop(&chan->srej_list);
4844
4845 l2cap_pass_to_tx(chan, control);
4846 skb_queue_tail(&chan->srej_q, skb);
4847 skb_in_use = 1;
4848 BT_DBG("Queued %p (queue len %d)", skb,
4849 skb_queue_len(&chan->srej_q));
4850
4851 err = l2cap_rx_queued_iframes(chan);
4852 if (err)
4853 break;
4854
4855 break;
4856 case L2CAP_TXSEQ_UNEXPECTED:
4857 /* Got a frame that can't be reassembled yet.
4858 * Save it for later, and send SREJs to cover
4859 * the missing frames.
4860 */
4861 skb_queue_tail(&chan->srej_q, skb);
4862 skb_in_use = 1;
4863 BT_DBG("Queued %p (queue len %d)", skb,
4864 skb_queue_len(&chan->srej_q));
4865
4866 l2cap_pass_to_tx(chan, control);
4867 l2cap_send_srej(chan, control->txseq);
4868 break;
4869 case L2CAP_TXSEQ_UNEXPECTED_SREJ:
4870 /* This frame was requested with an SREJ, but
4871 * some expected retransmitted frames are
4872 * missing. Request retransmission of missing
4873 * SREJ'd frames.
4874 */
4875 skb_queue_tail(&chan->srej_q, skb);
4876 skb_in_use = 1;
4877 BT_DBG("Queued %p (queue len %d)", skb,
4878 skb_queue_len(&chan->srej_q));
4879
4880 l2cap_pass_to_tx(chan, control);
4881 l2cap_send_srej_list(chan, control->txseq);
4882 break;
4883 case L2CAP_TXSEQ_DUPLICATE_SREJ:
4884 /* We've already queued this frame. Drop this copy. */
4885 l2cap_pass_to_tx(chan, control);
4886 break;
4887 case L2CAP_TXSEQ_DUPLICATE:
4888 /* Expecting a later sequence number, so this frame
4889 * was already received. Ignore it completely.
4890 */
4891 break;
4892 case L2CAP_TXSEQ_INVALID_IGNORE:
4893 break;
4894 case L2CAP_TXSEQ_INVALID:
4895 default:
4896 l2cap_send_disconn_req(chan->conn, chan,
4897 ECONNRESET);
4898 break;
4899 }
4900 break;
4901 case L2CAP_EV_RECV_RR:
4902 l2cap_pass_to_tx(chan, control);
4903 if (control->final) {
4904 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4905
4906 if (!test_and_clear_bit(CONN_REJ_ACT,
4907 &chan->conn_state)) {
4908 control->final = 0;
4909 l2cap_retransmit_all(chan, control);
4910 }
4911
4912 l2cap_ertm_send(chan);
4913 } else if (control->poll) {
4914 if (test_and_clear_bit(CONN_REMOTE_BUSY,
4915 &chan->conn_state) &&
4916 chan->unacked_frames) {
4917 __set_retrans_timer(chan);
4918 }
4919
4920 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4921 l2cap_send_srej_tail(chan);
4922 } else {
4923 if (test_and_clear_bit(CONN_REMOTE_BUSY,
4924 &chan->conn_state) &&
4925 chan->unacked_frames)
4926 __set_retrans_timer(chan);
4927
4928 l2cap_send_ack(chan);
4929 }
4930 break;
4931 case L2CAP_EV_RECV_RNR:
4932 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4933 l2cap_pass_to_tx(chan, control);
4934 if (control->poll) {
4935 l2cap_send_srej_tail(chan);
4936 } else {
4937 struct l2cap_ctrl rr_control;
4938 memset(&rr_control, 0, sizeof(rr_control));
4939 rr_control.sframe = 1;
4940 rr_control.super = L2CAP_SUPER_RR;
4941 rr_control.reqseq = chan->buffer_seq;
4942 l2cap_send_sframe(chan, &rr_control);
4943 }
4944
4945 break;
4946 case L2CAP_EV_RECV_REJ:
4947 l2cap_handle_rej(chan, control);
4948 break;
4949 case L2CAP_EV_RECV_SREJ:
4950 l2cap_handle_srej(chan, control);
4951 break;
4952 }
4953
4954 if (skb && !skb_in_use) {
4955 BT_DBG("Freeing %p", skb);
4956 kfree_skb(skb);
4957 }
4958
4959 return err;
4960 }
4961
4962 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
4963 {
4964 /* Make sure reqseq is for a packet that has been sent but not acked */
4965 u16 unacked;
4966
4967 unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
4968 return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
4969 }
4970
4971 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
4972 struct sk_buff *skb, u8 event)
4973 {
4974 int err = 0;
4975
4976 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
4977 control, skb, event, chan->rx_state);
4978
4979 if (__valid_reqseq(chan, control->reqseq)) {
4980 switch (chan->rx_state) {
4981 case L2CAP_RX_STATE_RECV:
4982 err = l2cap_rx_state_recv(chan, control, skb, event);
4983 break;
4984 case L2CAP_RX_STATE_SREJ_SENT:
4985 err = l2cap_rx_state_srej_sent(chan, control, skb,
4986 event);
4987 break;
4988 default:
4989 /* shut it down */
4990 break;
4991 }
4992 } else {
4993 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
4994 control->reqseq, chan->next_tx_seq,
4995 chan->expected_ack_seq);
4996 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4997 }
4998
4999 return err;
5000 }
5001
5002 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
5003 struct sk_buff *skb)
5004 {
5005 int err = 0;
5006
5007 BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
5008 chan->rx_state);
5009
5010 if (l2cap_classify_txseq(chan, control->txseq) ==
5011 L2CAP_TXSEQ_EXPECTED) {
5012 l2cap_pass_to_tx(chan, control);
5013
5014 BT_DBG("buffer_seq %d->%d", chan->buffer_seq,
5015 __next_seq(chan, chan->buffer_seq));
5016
5017 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
5018
5019 l2cap_reassemble_sdu(chan, skb, control);
5020 } else {
5021 if (chan->sdu) {
5022 kfree_skb(chan->sdu);
5023 chan->sdu = NULL;
5024 }
5025 chan->sdu_last_frag = NULL;
5026 chan->sdu_len = 0;
5027
5028 if (skb) {
5029 BT_DBG("Freeing %p", skb);
5030 kfree_skb(skb);
5031 }
5032 }
5033
5034 chan->last_acked_seq = control->txseq;
5035 chan->expected_tx_seq = __next_seq(chan, control->txseq);
5036
5037 return err;
5038 }
5039
5040 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
5041 {
5042 struct l2cap_ctrl *control = &bt_cb(skb)->control;
5043 u16 len;
5044 u8 event;
5045
5046 __unpack_control(chan, skb);
5047
5048 len = skb->len;
5049
5050 /*
5051 * We can just drop the corrupted I-frame here.
5052 * Receiver will miss it and start proper recovery
5053 * procedures and ask for retransmission.
5054 */
5055 if (l2cap_check_fcs(chan, skb))
5056 goto drop;
5057
5058 if (!control->sframe && control->sar == L2CAP_SAR_START)
5059 len -= L2CAP_SDULEN_SIZE;
5060
5061 if (chan->fcs == L2CAP_FCS_CRC16)
5062 len -= L2CAP_FCS_SIZE;
5063
5064 if (len > chan->mps) {
5065 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
5066 goto drop;
5067 }
5068
5069 if (!control->sframe) {
5070 int err;
5071
5072 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
5073 control->sar, control->reqseq, control->final,
5074 control->txseq);
5075
5076 /* Validate F-bit - F=0 always valid, F=1 only
5077 * valid in TX WAIT_F
5078 */
5079 if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
5080 goto drop;
5081
5082 if (chan->mode != L2CAP_MODE_STREAMING) {
5083 event = L2CAP_EV_RECV_IFRAME;
5084 err = l2cap_rx(chan, control, skb, event);
5085 } else {
5086 err = l2cap_stream_rx(chan, control, skb);
5087 }
5088
5089 if (err)
5090 l2cap_send_disconn_req(chan->conn, chan,
5091 ECONNRESET);
5092 } else {
5093 const u8 rx_func_to_event[4] = {
5094 L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
5095 L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
5096 };
5097
5098 /* Only I-frames are expected in streaming mode */
5099 if (chan->mode == L2CAP_MODE_STREAMING)
5100 goto drop;
5101
5102 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
5103 control->reqseq, control->final, control->poll,
5104 control->super);
5105
5106 if (len != 0) {
5107 BT_ERR("%d", len);
5108 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
5109 goto drop;
5110 }
5111
5112 /* Validate F and P bits */
5113 if (control->final && (control->poll ||
5114 chan->tx_state != L2CAP_TX_STATE_WAIT_F))
5115 goto drop;
5116
5117 event = rx_func_to_event[control->super];
5118 if (l2cap_rx(chan, control, skb, event))
5119 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
5120 }
5121
5122 return 0;
5123
5124 drop:
5125 kfree_skb(skb);
5126 return 0;
5127 }
5128
5129 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
5130 {
5131 struct l2cap_chan *chan;
5132
5133 chan = l2cap_get_chan_by_scid(conn, cid);
5134 if (!chan) {
5135 BT_DBG("unknown cid 0x%4.4x", cid);
5136 /* Drop packet and return */
5137 kfree_skb(skb);
5138 return 0;
5139 }
5140
5141 BT_DBG("chan %p, len %d", chan, skb->len);
5142
5143 if (chan->state != BT_CONNECTED)
5144 goto drop;
5145
5146 switch (chan->mode) {
5147 case L2CAP_MODE_BASIC:
5148 /* If socket recv buffers overflows we drop data here
5149 * which is *bad* because L2CAP has to be reliable.
5150 * But we don't have any other choice. L2CAP doesn't
5151 * provide flow control mechanism. */
5152
5153 if (chan->imtu < skb->len)
5154 goto drop;
5155
5156 if (!chan->ops->recv(chan, skb))
5157 goto done;
5158 break;
5159
5160 case L2CAP_MODE_ERTM:
5161 case L2CAP_MODE_STREAMING:
5162 l2cap_data_rcv(chan, skb);
5163 goto done;
5164
5165 default:
5166 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
5167 break;
5168 }
5169
5170 drop:
5171 kfree_skb(skb);
5172
5173 done:
5174 l2cap_chan_unlock(chan);
5175
5176 return 0;
5177 }
5178
5179 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
5180 {
5181 struct l2cap_chan *chan;
5182
5183 chan = l2cap_global_chan_by_psm(0, psm, conn->src, conn->dst);
5184 if (!chan)
5185 goto drop;
5186
5187 BT_DBG("chan %p, len %d", chan, skb->len);
5188
5189 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
5190 goto drop;
5191
5192 if (chan->imtu < skb->len)
5193 goto drop;
5194
5195 if (!chan->ops->recv(chan, skb))
5196 return 0;
5197
5198 drop:
5199 kfree_skb(skb);
5200
5201 return 0;
5202 }
5203
5204 static inline int l2cap_att_channel(struct l2cap_conn *conn, u16 cid,
5205 struct sk_buff *skb)
5206 {
5207 struct l2cap_chan *chan;
5208
5209 chan = l2cap_global_chan_by_scid(0, cid, conn->src, conn->dst);
5210 if (!chan)
5211 goto drop;
5212
5213 BT_DBG("chan %p, len %d", chan, skb->len);
5214
5215 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
5216 goto drop;
5217
5218 if (chan->imtu < skb->len)
5219 goto drop;
5220
5221 if (!chan->ops->recv(chan, skb))
5222 return 0;
5223
5224 drop:
5225 kfree_skb(skb);
5226
5227 return 0;
5228 }
5229
5230 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
5231 {
5232 struct l2cap_hdr *lh = (void *) skb->data;
5233 u16 cid, len;
5234 __le16 psm;
5235
5236 skb_pull(skb, L2CAP_HDR_SIZE);
5237 cid = __le16_to_cpu(lh->cid);
5238 len = __le16_to_cpu(lh->len);
5239
5240 if (len != skb->len) {
5241 kfree_skb(skb);
5242 return;
5243 }
5244
5245 BT_DBG("len %d, cid 0x%4.4x", len, cid);
5246
5247 switch (cid) {
5248 case L2CAP_CID_LE_SIGNALING:
5249 case L2CAP_CID_SIGNALING:
5250 l2cap_sig_channel(conn, skb);
5251 break;
5252
5253 case L2CAP_CID_CONN_LESS:
5254 psm = get_unaligned((__le16 *) skb->data);
5255 skb_pull(skb, L2CAP_PSMLEN_SIZE);
5256 l2cap_conless_channel(conn, psm, skb);
5257 break;
5258
5259 case L2CAP_CID_LE_DATA:
5260 l2cap_att_channel(conn, cid, skb);
5261 break;
5262
5263 case L2CAP_CID_SMP:
5264 if (smp_sig_channel(conn, skb))
5265 l2cap_conn_del(conn->hcon, EACCES);
5266 break;
5267
5268 default:
5269 l2cap_data_channel(conn, cid, skb);
5270 break;
5271 }
5272 }
5273
5274 /* ---- L2CAP interface with lower layer (HCI) ---- */
5275
5276 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
5277 {
5278 int exact = 0, lm1 = 0, lm2 = 0;
5279 struct l2cap_chan *c;
5280
5281 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
5282
5283 /* Find listening sockets and check their link_mode */
5284 read_lock(&chan_list_lock);
5285 list_for_each_entry(c, &chan_list, global_l) {
5286 struct sock *sk = c->sk;
5287
5288 if (c->state != BT_LISTEN)
5289 continue;
5290
5291 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
5292 lm1 |= HCI_LM_ACCEPT;
5293 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
5294 lm1 |= HCI_LM_MASTER;
5295 exact++;
5296 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
5297 lm2 |= HCI_LM_ACCEPT;
5298 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
5299 lm2 |= HCI_LM_MASTER;
5300 }
5301 }
5302 read_unlock(&chan_list_lock);
5303
5304 return exact ? lm1 : lm2;
5305 }
5306
5307 int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
5308 {
5309 struct l2cap_conn *conn;
5310
5311 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
5312
5313 if (!status) {
5314 conn = l2cap_conn_add(hcon, status);
5315 if (conn)
5316 l2cap_conn_ready(conn);
5317 } else
5318 l2cap_conn_del(hcon, bt_to_errno(status));
5319
5320 return 0;
5321 }
5322
5323 int l2cap_disconn_ind(struct hci_conn *hcon)
5324 {
5325 struct l2cap_conn *conn = hcon->l2cap_data;
5326
5327 BT_DBG("hcon %p", hcon);
5328
5329 if (!conn)
5330 return HCI_ERROR_REMOTE_USER_TERM;
5331 return conn->disc_reason;
5332 }
5333
5334 int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
5335 {
5336 BT_DBG("hcon %p reason %d", hcon, reason);
5337
5338 l2cap_conn_del(hcon, bt_to_errno(reason));
5339 return 0;
5340 }
5341
5342 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
5343 {
5344 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
5345 return;
5346
5347 if (encrypt == 0x00) {
5348 if (chan->sec_level == BT_SECURITY_MEDIUM) {
5349 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
5350 } else if (chan->sec_level == BT_SECURITY_HIGH)
5351 l2cap_chan_close(chan, ECONNREFUSED);
5352 } else {
5353 if (chan->sec_level == BT_SECURITY_MEDIUM)
5354 __clear_chan_timer(chan);
5355 }
5356 }
5357
5358 int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
5359 {
5360 struct l2cap_conn *conn = hcon->l2cap_data;
5361 struct l2cap_chan *chan;
5362
5363 if (!conn)
5364 return 0;
5365
5366 BT_DBG("conn %p", conn);
5367
5368 if (hcon->type == LE_LINK) {
5369 if (!status && encrypt)
5370 smp_distribute_keys(conn, 0);
5371 cancel_delayed_work(&conn->security_timer);
5372 }
5373
5374 mutex_lock(&conn->chan_lock);
5375
5376 list_for_each_entry(chan, &conn->chan_l, list) {
5377 l2cap_chan_lock(chan);
5378
5379 BT_DBG("chan->scid %d", chan->scid);
5380
5381 if (chan->scid == L2CAP_CID_LE_DATA) {
5382 if (!status && encrypt) {
5383 chan->sec_level = hcon->sec_level;
5384 l2cap_chan_ready(chan);
5385 }
5386
5387 l2cap_chan_unlock(chan);
5388 continue;
5389 }
5390
5391 if (test_bit(CONF_CONNECT_PEND, &chan->conf_state)) {
5392 l2cap_chan_unlock(chan);
5393 continue;
5394 }
5395
5396 if (!status && (chan->state == BT_CONNECTED ||
5397 chan->state == BT_CONFIG)) {
5398 struct sock *sk = chan->sk;
5399
5400 clear_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags);
5401 sk->sk_state_change(sk);
5402
5403 l2cap_check_encryption(chan, encrypt);
5404 l2cap_chan_unlock(chan);
5405 continue;
5406 }
5407
5408 if (chan->state == BT_CONNECT) {
5409 if (!status) {
5410 l2cap_send_conn_req(chan);
5411 } else {
5412 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
5413 }
5414 } else if (chan->state == BT_CONNECT2) {
5415 struct sock *sk = chan->sk;
5416 struct l2cap_conn_rsp rsp;
5417 __u16 res, stat;
5418
5419 lock_sock(sk);
5420
5421 if (!status) {
5422 if (test_bit(BT_SK_DEFER_SETUP,
5423 &bt_sk(sk)->flags)) {
5424 struct sock *parent = bt_sk(sk)->parent;
5425 res = L2CAP_CR_PEND;
5426 stat = L2CAP_CS_AUTHOR_PEND;
5427 if (parent)
5428 parent->sk_data_ready(parent, 0);
5429 } else {
5430 __l2cap_state_change(chan, BT_CONFIG);
5431 res = L2CAP_CR_SUCCESS;
5432 stat = L2CAP_CS_NO_INFO;
5433 }
5434 } else {
5435 __l2cap_state_change(chan, BT_DISCONN);
5436 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
5437 res = L2CAP_CR_SEC_BLOCK;
5438 stat = L2CAP_CS_NO_INFO;
5439 }
5440
5441 release_sock(sk);
5442
5443 rsp.scid = cpu_to_le16(chan->dcid);
5444 rsp.dcid = cpu_to_le16(chan->scid);
5445 rsp.result = cpu_to_le16(res);
5446 rsp.status = cpu_to_le16(stat);
5447 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
5448 sizeof(rsp), &rsp);
5449
5450 if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
5451 res == L2CAP_CR_SUCCESS) {
5452 char buf[128];
5453 set_bit(CONF_REQ_SENT, &chan->conf_state);
5454 l2cap_send_cmd(conn, l2cap_get_ident(conn),
5455 L2CAP_CONF_REQ,
5456 l2cap_build_conf_req(chan, buf),
5457 buf);
5458 chan->num_conf_req++;
5459 }
5460 }
5461
5462 l2cap_chan_unlock(chan);
5463 }
5464
5465 mutex_unlock(&conn->chan_lock);
5466
5467 return 0;
5468 }
5469
5470 int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
5471 {
5472 struct l2cap_conn *conn = hcon->l2cap_data;
5473
5474 if (!conn)
5475 conn = l2cap_conn_add(hcon, 0);
5476
5477 if (!conn)
5478 goto drop;
5479
5480 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
5481
5482 if (!(flags & ACL_CONT)) {
5483 struct l2cap_hdr *hdr;
5484 int len;
5485
5486 if (conn->rx_len) {
5487 BT_ERR("Unexpected start frame (len %d)", skb->len);
5488 kfree_skb(conn->rx_skb);
5489 conn->rx_skb = NULL;
5490 conn->rx_len = 0;
5491 l2cap_conn_unreliable(conn, ECOMM);
5492 }
5493
5494 /* Start fragment always begin with Basic L2CAP header */
5495 if (skb->len < L2CAP_HDR_SIZE) {
5496 BT_ERR("Frame is too short (len %d)", skb->len);
5497 l2cap_conn_unreliable(conn, ECOMM);
5498 goto drop;
5499 }
5500
5501 hdr = (struct l2cap_hdr *) skb->data;
5502 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
5503
5504 if (len == skb->len) {
5505 /* Complete frame received */
5506 l2cap_recv_frame(conn, skb);
5507 return 0;
5508 }
5509
5510 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
5511
5512 if (skb->len > len) {
5513 BT_ERR("Frame is too long (len %d, expected len %d)",
5514 skb->len, len);
5515 l2cap_conn_unreliable(conn, ECOMM);
5516 goto drop;
5517 }
5518
5519 /* Allocate skb for the complete frame (with header) */
5520 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
5521 if (!conn->rx_skb)
5522 goto drop;
5523
5524 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
5525 skb->len);
5526 conn->rx_len = len - skb->len;
5527 } else {
5528 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
5529
5530 if (!conn->rx_len) {
5531 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
5532 l2cap_conn_unreliable(conn, ECOMM);
5533 goto drop;
5534 }
5535
5536 if (skb->len > conn->rx_len) {
5537 BT_ERR("Fragment is too long (len %d, expected %d)",
5538 skb->len, conn->rx_len);
5539 kfree_skb(conn->rx_skb);
5540 conn->rx_skb = NULL;
5541 conn->rx_len = 0;
5542 l2cap_conn_unreliable(conn, ECOMM);
5543 goto drop;
5544 }
5545
5546 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
5547 skb->len);
5548 conn->rx_len -= skb->len;
5549
5550 if (!conn->rx_len) {
5551 /* Complete frame received */
5552 l2cap_recv_frame(conn, conn->rx_skb);
5553 conn->rx_skb = NULL;
5554 }
5555 }
5556
5557 drop:
5558 kfree_skb(skb);
5559 return 0;
5560 }
5561
5562 static int l2cap_debugfs_show(struct seq_file *f, void *p)
5563 {
5564 struct l2cap_chan *c;
5565
5566 read_lock(&chan_list_lock);
5567
5568 list_for_each_entry(c, &chan_list, global_l) {
5569 struct sock *sk = c->sk;
5570
5571 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
5572 batostr(&bt_sk(sk)->src),
5573 batostr(&bt_sk(sk)->dst),
5574 c->state, __le16_to_cpu(c->psm),
5575 c->scid, c->dcid, c->imtu, c->omtu,
5576 c->sec_level, c->mode);
5577 }
5578
5579 read_unlock(&chan_list_lock);
5580
5581 return 0;
5582 }
5583
5584 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
5585 {
5586 return single_open(file, l2cap_debugfs_show, inode->i_private);
5587 }
5588
5589 static const struct file_operations l2cap_debugfs_fops = {
5590 .open = l2cap_debugfs_open,
5591 .read = seq_read,
5592 .llseek = seq_lseek,
5593 .release = single_release,
5594 };
5595
5596 static struct dentry *l2cap_debugfs;
5597
5598 int __init l2cap_init(void)
5599 {
5600 int err;
5601
5602 err = l2cap_init_sockets();
5603 if (err < 0)
5604 return err;
5605
5606 if (bt_debugfs) {
5607 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
5608 bt_debugfs, NULL, &l2cap_debugfs_fops);
5609 if (!l2cap_debugfs)
5610 BT_ERR("Failed to create L2CAP debug file");
5611 }
5612
5613 return 0;
5614 }
5615
5616 void l2cap_exit(void)
5617 {
5618 debugfs_remove(l2cap_debugfs);
5619 l2cap_cleanup_sockets();
5620 }
5621
5622 module_param(disable_ertm, bool, 0644);
5623 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
This page took 0.15413 seconds and 6 git commands to generate.