Bluetooth: AMP: Handle complete frames in l2cap
[deliverable/linux.git] / net / bluetooth / l2cap_core.c
... / ...
CommitLineData
1/*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
8
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
10
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
14
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
27*/
28
29/* Bluetooth L2CAP core. */
30
31#include <linux/module.h>
32
33#include <linux/debugfs.h>
34#include <linux/crc16.h>
35
36#include <net/bluetooth/bluetooth.h>
37#include <net/bluetooth/hci_core.h>
38#include <net/bluetooth/l2cap.h>
39#include <net/bluetooth/smp.h>
40#include <net/bluetooth/a2mp.h>
41
42bool disable_ertm;
43
44static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
45static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP, };
46
47static LIST_HEAD(chan_list);
48static DEFINE_RWLOCK(chan_list_lock);
49
50static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
51 u8 code, u8 ident, u16 dlen, void *data);
52static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
53 void *data);
54static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
55static void l2cap_send_disconn_req(struct l2cap_conn *conn,
56 struct l2cap_chan *chan, int err);
57
58static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
59 struct sk_buff_head *skbs, u8 event);
60
61/* ---- L2CAP channels ---- */
62
63static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
64 u16 cid)
65{
66 struct l2cap_chan *c;
67
68 list_for_each_entry(c, &conn->chan_l, list) {
69 if (c->dcid == cid)
70 return c;
71 }
72 return NULL;
73}
74
75static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
76 u16 cid)
77{
78 struct l2cap_chan *c;
79
80 list_for_each_entry(c, &conn->chan_l, list) {
81 if (c->scid == cid)
82 return c;
83 }
84 return NULL;
85}
86
87/* Find channel with given SCID.
88 * Returns locked channel. */
89static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
90 u16 cid)
91{
92 struct l2cap_chan *c;
93
94 mutex_lock(&conn->chan_lock);
95 c = __l2cap_get_chan_by_scid(conn, cid);
96 if (c)
97 l2cap_chan_lock(c);
98 mutex_unlock(&conn->chan_lock);
99
100 return c;
101}
102
103static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn,
104 u8 ident)
105{
106 struct l2cap_chan *c;
107
108 list_for_each_entry(c, &conn->chan_l, list) {
109 if (c->ident == ident)
110 return c;
111 }
112 return NULL;
113}
114
115static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
116{
117 struct l2cap_chan *c;
118
119 list_for_each_entry(c, &chan_list, global_l) {
120 if (c->sport == psm && !bacmp(&bt_sk(c->sk)->src, src))
121 return c;
122 }
123 return NULL;
124}
125
126int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
127{
128 int err;
129
130 write_lock(&chan_list_lock);
131
132 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
133 err = -EADDRINUSE;
134 goto done;
135 }
136
137 if (psm) {
138 chan->psm = psm;
139 chan->sport = psm;
140 err = 0;
141 } else {
142 u16 p;
143
144 err = -EINVAL;
145 for (p = 0x1001; p < 0x1100; p += 2)
146 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
147 chan->psm = cpu_to_le16(p);
148 chan->sport = cpu_to_le16(p);
149 err = 0;
150 break;
151 }
152 }
153
154done:
155 write_unlock(&chan_list_lock);
156 return err;
157}
158
159int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
160{
161 write_lock(&chan_list_lock);
162
163 chan->scid = scid;
164
165 write_unlock(&chan_list_lock);
166
167 return 0;
168}
169
170static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
171{
172 u16 cid = L2CAP_CID_DYN_START;
173
174 for (; cid < L2CAP_CID_DYN_END; cid++) {
175 if (!__l2cap_get_chan_by_scid(conn, cid))
176 return cid;
177 }
178
179 return 0;
180}
181
182static void __l2cap_state_change(struct l2cap_chan *chan, int state)
183{
184 BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
185 state_to_string(state));
186
187 chan->state = state;
188 chan->ops->state_change(chan, state);
189}
190
191static void l2cap_state_change(struct l2cap_chan *chan, int state)
192{
193 struct sock *sk = chan->sk;
194
195 lock_sock(sk);
196 __l2cap_state_change(chan, state);
197 release_sock(sk);
198}
199
200static inline void __l2cap_chan_set_err(struct l2cap_chan *chan, int err)
201{
202 struct sock *sk = chan->sk;
203
204 sk->sk_err = err;
205}
206
207static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
208{
209 struct sock *sk = chan->sk;
210
211 lock_sock(sk);
212 __l2cap_chan_set_err(chan, err);
213 release_sock(sk);
214}
215
216static void __set_retrans_timer(struct l2cap_chan *chan)
217{
218 if (!delayed_work_pending(&chan->monitor_timer) &&
219 chan->retrans_timeout) {
220 l2cap_set_timer(chan, &chan->retrans_timer,
221 msecs_to_jiffies(chan->retrans_timeout));
222 }
223}
224
225static void __set_monitor_timer(struct l2cap_chan *chan)
226{
227 __clear_retrans_timer(chan);
228 if (chan->monitor_timeout) {
229 l2cap_set_timer(chan, &chan->monitor_timer,
230 msecs_to_jiffies(chan->monitor_timeout));
231 }
232}
233
234static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
235 u16 seq)
236{
237 struct sk_buff *skb;
238
239 skb_queue_walk(head, skb) {
240 if (bt_cb(skb)->control.txseq == seq)
241 return skb;
242 }
243
244 return NULL;
245}
246
247/* ---- L2CAP sequence number lists ---- */
248
249/* For ERTM, ordered lists of sequence numbers must be tracked for
250 * SREJ requests that are received and for frames that are to be
251 * retransmitted. These seq_list functions implement a singly-linked
252 * list in an array, where membership in the list can also be checked
253 * in constant time. Items can also be added to the tail of the list
254 * and removed from the head in constant time, without further memory
255 * allocs or frees.
256 */
257
258static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
259{
260 size_t alloc_size, i;
261
262 /* Allocated size is a power of 2 to map sequence numbers
263 * (which may be up to 14 bits) in to a smaller array that is
264 * sized for the negotiated ERTM transmit windows.
265 */
266 alloc_size = roundup_pow_of_two(size);
267
268 seq_list->list = kmalloc(sizeof(u16) * alloc_size, GFP_KERNEL);
269 if (!seq_list->list)
270 return -ENOMEM;
271
272 seq_list->mask = alloc_size - 1;
273 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
274 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
275 for (i = 0; i < alloc_size; i++)
276 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
277
278 return 0;
279}
280
281static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
282{
283 kfree(seq_list->list);
284}
285
286static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
287 u16 seq)
288{
289 /* Constant-time check for list membership */
290 return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
291}
292
293static u16 l2cap_seq_list_remove(struct l2cap_seq_list *seq_list, u16 seq)
294{
295 u16 mask = seq_list->mask;
296
297 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR) {
298 /* In case someone tries to pop the head of an empty list */
299 return L2CAP_SEQ_LIST_CLEAR;
300 } else if (seq_list->head == seq) {
301 /* Head can be removed in constant time */
302 seq_list->head = seq_list->list[seq & mask];
303 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
304
305 if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
306 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
307 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
308 }
309 } else {
310 /* Walk the list to find the sequence number */
311 u16 prev = seq_list->head;
312 while (seq_list->list[prev & mask] != seq) {
313 prev = seq_list->list[prev & mask];
314 if (prev == L2CAP_SEQ_LIST_TAIL)
315 return L2CAP_SEQ_LIST_CLEAR;
316 }
317
318 /* Unlink the number from the list and clear it */
319 seq_list->list[prev & mask] = seq_list->list[seq & mask];
320 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
321 if (seq_list->tail == seq)
322 seq_list->tail = prev;
323 }
324 return seq;
325}
326
327static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
328{
329 /* Remove the head in constant time */
330 return l2cap_seq_list_remove(seq_list, seq_list->head);
331}
332
333static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
334{
335 u16 i;
336
337 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
338 return;
339
340 for (i = 0; i <= seq_list->mask; i++)
341 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
342
343 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
344 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
345}
346
347static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
348{
349 u16 mask = seq_list->mask;
350
351 /* All appends happen in constant time */
352
353 if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
354 return;
355
356 if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
357 seq_list->head = seq;
358 else
359 seq_list->list[seq_list->tail & mask] = seq;
360
361 seq_list->tail = seq;
362 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
363}
364
365static void l2cap_chan_timeout(struct work_struct *work)
366{
367 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
368 chan_timer.work);
369 struct l2cap_conn *conn = chan->conn;
370 int reason;
371
372 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
373
374 mutex_lock(&conn->chan_lock);
375 l2cap_chan_lock(chan);
376
377 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
378 reason = ECONNREFUSED;
379 else if (chan->state == BT_CONNECT &&
380 chan->sec_level != BT_SECURITY_SDP)
381 reason = ECONNREFUSED;
382 else
383 reason = ETIMEDOUT;
384
385 l2cap_chan_close(chan, reason);
386
387 l2cap_chan_unlock(chan);
388
389 chan->ops->close(chan);
390 mutex_unlock(&conn->chan_lock);
391
392 l2cap_chan_put(chan);
393}
394
395struct l2cap_chan *l2cap_chan_create(void)
396{
397 struct l2cap_chan *chan;
398
399 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
400 if (!chan)
401 return NULL;
402
403 mutex_init(&chan->lock);
404
405 write_lock(&chan_list_lock);
406 list_add(&chan->global_l, &chan_list);
407 write_unlock(&chan_list_lock);
408
409 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
410
411 chan->state = BT_OPEN;
412
413 kref_init(&chan->kref);
414
415 /* This flag is cleared in l2cap_chan_ready() */
416 set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
417
418 BT_DBG("chan %p", chan);
419
420 return chan;
421}
422
423static void l2cap_chan_destroy(struct kref *kref)
424{
425 struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);
426
427 BT_DBG("chan %p", chan);
428
429 write_lock(&chan_list_lock);
430 list_del(&chan->global_l);
431 write_unlock(&chan_list_lock);
432
433 kfree(chan);
434}
435
436void l2cap_chan_hold(struct l2cap_chan *c)
437{
438 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
439
440 kref_get(&c->kref);
441}
442
443void l2cap_chan_put(struct l2cap_chan *c)
444{
445 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
446
447 kref_put(&c->kref, l2cap_chan_destroy);
448}
449
450void l2cap_chan_set_defaults(struct l2cap_chan *chan)
451{
452 chan->fcs = L2CAP_FCS_CRC16;
453 chan->max_tx = L2CAP_DEFAULT_MAX_TX;
454 chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
455 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
456 chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
457 chan->sec_level = BT_SECURITY_LOW;
458
459 set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
460}
461
462void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
463{
464 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
465 __le16_to_cpu(chan->psm), chan->dcid);
466
467 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
468
469 chan->conn = conn;
470
471 switch (chan->chan_type) {
472 case L2CAP_CHAN_CONN_ORIENTED:
473 if (conn->hcon->type == LE_LINK) {
474 /* LE connection */
475 chan->omtu = L2CAP_DEFAULT_MTU;
476 chan->scid = L2CAP_CID_LE_DATA;
477 chan->dcid = L2CAP_CID_LE_DATA;
478 } else {
479 /* Alloc CID for connection-oriented socket */
480 chan->scid = l2cap_alloc_cid(conn);
481 chan->omtu = L2CAP_DEFAULT_MTU;
482 }
483 break;
484
485 case L2CAP_CHAN_CONN_LESS:
486 /* Connectionless socket */
487 chan->scid = L2CAP_CID_CONN_LESS;
488 chan->dcid = L2CAP_CID_CONN_LESS;
489 chan->omtu = L2CAP_DEFAULT_MTU;
490 break;
491
492 case L2CAP_CHAN_CONN_FIX_A2MP:
493 chan->scid = L2CAP_CID_A2MP;
494 chan->dcid = L2CAP_CID_A2MP;
495 chan->omtu = L2CAP_A2MP_DEFAULT_MTU;
496 chan->imtu = L2CAP_A2MP_DEFAULT_MTU;
497 break;
498
499 default:
500 /* Raw socket can send/recv signalling messages only */
501 chan->scid = L2CAP_CID_SIGNALING;
502 chan->dcid = L2CAP_CID_SIGNALING;
503 chan->omtu = L2CAP_DEFAULT_MTU;
504 }
505
506 chan->local_id = L2CAP_BESTEFFORT_ID;
507 chan->local_stype = L2CAP_SERV_BESTEFFORT;
508 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
509 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
510 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
511 chan->local_flush_to = L2CAP_EFS_DEFAULT_FLUSH_TO;
512
513 l2cap_chan_hold(chan);
514
515 list_add(&chan->list, &conn->chan_l);
516}
517
518void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
519{
520 mutex_lock(&conn->chan_lock);
521 __l2cap_chan_add(conn, chan);
522 mutex_unlock(&conn->chan_lock);
523}
524
525void l2cap_chan_del(struct l2cap_chan *chan, int err)
526{
527 struct l2cap_conn *conn = chan->conn;
528
529 __clear_chan_timer(chan);
530
531 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
532
533 if (conn) {
534 /* Delete from channel list */
535 list_del(&chan->list);
536
537 l2cap_chan_put(chan);
538
539 chan->conn = NULL;
540
541 if (chan->chan_type != L2CAP_CHAN_CONN_FIX_A2MP)
542 hci_conn_put(conn->hcon);
543 }
544
545 chan->ops->teardown(chan, err);
546
547 if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
548 return;
549
550 switch(chan->mode) {
551 case L2CAP_MODE_BASIC:
552 break;
553
554 case L2CAP_MODE_ERTM:
555 __clear_retrans_timer(chan);
556 __clear_monitor_timer(chan);
557 __clear_ack_timer(chan);
558
559 skb_queue_purge(&chan->srej_q);
560
561 l2cap_seq_list_free(&chan->srej_list);
562 l2cap_seq_list_free(&chan->retrans_list);
563
564 /* fall through */
565
566 case L2CAP_MODE_STREAMING:
567 skb_queue_purge(&chan->tx_q);
568 break;
569 }
570
571 return;
572}
573
574void l2cap_chan_close(struct l2cap_chan *chan, int reason)
575{
576 struct l2cap_conn *conn = chan->conn;
577 struct sock *sk = chan->sk;
578
579 BT_DBG("chan %p state %s sk %p", chan, state_to_string(chan->state),
580 sk);
581
582 switch (chan->state) {
583 case BT_LISTEN:
584 chan->ops->teardown(chan, 0);
585 break;
586
587 case BT_CONNECTED:
588 case BT_CONFIG:
589 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
590 conn->hcon->type == ACL_LINK) {
591 __set_chan_timer(chan, sk->sk_sndtimeo);
592 l2cap_send_disconn_req(conn, chan, reason);
593 } else
594 l2cap_chan_del(chan, reason);
595 break;
596
597 case BT_CONNECT2:
598 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
599 conn->hcon->type == ACL_LINK) {
600 struct l2cap_conn_rsp rsp;
601 __u16 result;
602
603 if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags))
604 result = L2CAP_CR_SEC_BLOCK;
605 else
606 result = L2CAP_CR_BAD_PSM;
607 l2cap_state_change(chan, BT_DISCONN);
608
609 rsp.scid = cpu_to_le16(chan->dcid);
610 rsp.dcid = cpu_to_le16(chan->scid);
611 rsp.result = cpu_to_le16(result);
612 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
613 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
614 sizeof(rsp), &rsp);
615 }
616
617 l2cap_chan_del(chan, reason);
618 break;
619
620 case BT_CONNECT:
621 case BT_DISCONN:
622 l2cap_chan_del(chan, reason);
623 break;
624
625 default:
626 chan->ops->teardown(chan, 0);
627 break;
628 }
629}
630
631static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
632{
633 if (chan->chan_type == L2CAP_CHAN_RAW) {
634 switch (chan->sec_level) {
635 case BT_SECURITY_HIGH:
636 return HCI_AT_DEDICATED_BONDING_MITM;
637 case BT_SECURITY_MEDIUM:
638 return HCI_AT_DEDICATED_BONDING;
639 default:
640 return HCI_AT_NO_BONDING;
641 }
642 } else if (chan->psm == __constant_cpu_to_le16(L2CAP_PSM_SDP)) {
643 if (chan->sec_level == BT_SECURITY_LOW)
644 chan->sec_level = BT_SECURITY_SDP;
645
646 if (chan->sec_level == BT_SECURITY_HIGH)
647 return HCI_AT_NO_BONDING_MITM;
648 else
649 return HCI_AT_NO_BONDING;
650 } else {
651 switch (chan->sec_level) {
652 case BT_SECURITY_HIGH:
653 return HCI_AT_GENERAL_BONDING_MITM;
654 case BT_SECURITY_MEDIUM:
655 return HCI_AT_GENERAL_BONDING;
656 default:
657 return HCI_AT_NO_BONDING;
658 }
659 }
660}
661
662/* Service level security */
663int l2cap_chan_check_security(struct l2cap_chan *chan)
664{
665 struct l2cap_conn *conn = chan->conn;
666 __u8 auth_type;
667
668 auth_type = l2cap_get_auth_type(chan);
669
670 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
671}
672
673static u8 l2cap_get_ident(struct l2cap_conn *conn)
674{
675 u8 id;
676
677 /* Get next available identificator.
678 * 1 - 128 are used by kernel.
679 * 129 - 199 are reserved.
680 * 200 - 254 are used by utilities like l2ping, etc.
681 */
682
683 spin_lock(&conn->lock);
684
685 if (++conn->tx_ident > 128)
686 conn->tx_ident = 1;
687
688 id = conn->tx_ident;
689
690 spin_unlock(&conn->lock);
691
692 return id;
693}
694
695static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
696 void *data)
697{
698 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
699 u8 flags;
700
701 BT_DBG("code 0x%2.2x", code);
702
703 if (!skb)
704 return;
705
706 if (lmp_no_flush_capable(conn->hcon->hdev))
707 flags = ACL_START_NO_FLUSH;
708 else
709 flags = ACL_START;
710
711 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
712 skb->priority = HCI_PRIO_MAX;
713
714 hci_send_acl(conn->hchan, skb, flags);
715}
716
717static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
718{
719 struct hci_conn *hcon = chan->conn->hcon;
720 u16 flags;
721
722 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
723 skb->priority);
724
725 if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
726 lmp_no_flush_capable(hcon->hdev))
727 flags = ACL_START_NO_FLUSH;
728 else
729 flags = ACL_START;
730
731 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
732 hci_send_acl(chan->conn->hchan, skb, flags);
733}
734
735static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
736{
737 control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
738 control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
739
740 if (enh & L2CAP_CTRL_FRAME_TYPE) {
741 /* S-Frame */
742 control->sframe = 1;
743 control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
744 control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
745
746 control->sar = 0;
747 control->txseq = 0;
748 } else {
749 /* I-Frame */
750 control->sframe = 0;
751 control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
752 control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
753
754 control->poll = 0;
755 control->super = 0;
756 }
757}
758
759static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
760{
761 control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
762 control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
763
764 if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
765 /* S-Frame */
766 control->sframe = 1;
767 control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
768 control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
769
770 control->sar = 0;
771 control->txseq = 0;
772 } else {
773 /* I-Frame */
774 control->sframe = 0;
775 control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
776 control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
777
778 control->poll = 0;
779 control->super = 0;
780 }
781}
782
783static inline void __unpack_control(struct l2cap_chan *chan,
784 struct sk_buff *skb)
785{
786 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
787 __unpack_extended_control(get_unaligned_le32(skb->data),
788 &bt_cb(skb)->control);
789 skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
790 } else {
791 __unpack_enhanced_control(get_unaligned_le16(skb->data),
792 &bt_cb(skb)->control);
793 skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
794 }
795}
796
797static u32 __pack_extended_control(struct l2cap_ctrl *control)
798{
799 u32 packed;
800
801 packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
802 packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
803
804 if (control->sframe) {
805 packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
806 packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
807 packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
808 } else {
809 packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
810 packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
811 }
812
813 return packed;
814}
815
816static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
817{
818 u16 packed;
819
820 packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
821 packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
822
823 if (control->sframe) {
824 packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
825 packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
826 packed |= L2CAP_CTRL_FRAME_TYPE;
827 } else {
828 packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
829 packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
830 }
831
832 return packed;
833}
834
835static inline void __pack_control(struct l2cap_chan *chan,
836 struct l2cap_ctrl *control,
837 struct sk_buff *skb)
838{
839 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
840 put_unaligned_le32(__pack_extended_control(control),
841 skb->data + L2CAP_HDR_SIZE);
842 } else {
843 put_unaligned_le16(__pack_enhanced_control(control),
844 skb->data + L2CAP_HDR_SIZE);
845 }
846}
847
848static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
849{
850 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
851 return L2CAP_EXT_HDR_SIZE;
852 else
853 return L2CAP_ENH_HDR_SIZE;
854}
855
856static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
857 u32 control)
858{
859 struct sk_buff *skb;
860 struct l2cap_hdr *lh;
861 int hlen = __ertm_hdr_size(chan);
862
863 if (chan->fcs == L2CAP_FCS_CRC16)
864 hlen += L2CAP_FCS_SIZE;
865
866 skb = bt_skb_alloc(hlen, GFP_KERNEL);
867
868 if (!skb)
869 return ERR_PTR(-ENOMEM);
870
871 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
872 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
873 lh->cid = cpu_to_le16(chan->dcid);
874
875 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
876 put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
877 else
878 put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
879
880 if (chan->fcs == L2CAP_FCS_CRC16) {
881 u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
882 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
883 }
884
885 skb->priority = HCI_PRIO_MAX;
886 return skb;
887}
888
889static void l2cap_send_sframe(struct l2cap_chan *chan,
890 struct l2cap_ctrl *control)
891{
892 struct sk_buff *skb;
893 u32 control_field;
894
895 BT_DBG("chan %p, control %p", chan, control);
896
897 if (!control->sframe)
898 return;
899
900 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
901 !control->poll)
902 control->final = 1;
903
904 if (control->super == L2CAP_SUPER_RR)
905 clear_bit(CONN_RNR_SENT, &chan->conn_state);
906 else if (control->super == L2CAP_SUPER_RNR)
907 set_bit(CONN_RNR_SENT, &chan->conn_state);
908
909 if (control->super != L2CAP_SUPER_SREJ) {
910 chan->last_acked_seq = control->reqseq;
911 __clear_ack_timer(chan);
912 }
913
914 BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
915 control->final, control->poll, control->super);
916
917 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
918 control_field = __pack_extended_control(control);
919 else
920 control_field = __pack_enhanced_control(control);
921
922 skb = l2cap_create_sframe_pdu(chan, control_field);
923 if (!IS_ERR(skb))
924 l2cap_do_send(chan, skb);
925}
926
927static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
928{
929 struct l2cap_ctrl control;
930
931 BT_DBG("chan %p, poll %d", chan, poll);
932
933 memset(&control, 0, sizeof(control));
934 control.sframe = 1;
935 control.poll = poll;
936
937 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
938 control.super = L2CAP_SUPER_RNR;
939 else
940 control.super = L2CAP_SUPER_RR;
941
942 control.reqseq = chan->buffer_seq;
943 l2cap_send_sframe(chan, &control);
944}
945
946static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
947{
948 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
949}
950
951static bool __amp_capable(struct l2cap_chan *chan)
952{
953 struct l2cap_conn *conn = chan->conn;
954
955 if (enable_hs &&
956 chan->chan_policy == BT_CHANNEL_POLICY_AMP_PREFERRED &&
957 conn->fixed_chan_mask & L2CAP_FC_A2MP)
958 return true;
959 else
960 return false;
961}
962
963void l2cap_send_conn_req(struct l2cap_chan *chan)
964{
965 struct l2cap_conn *conn = chan->conn;
966 struct l2cap_conn_req req;
967
968 req.scid = cpu_to_le16(chan->scid);
969 req.psm = chan->psm;
970
971 chan->ident = l2cap_get_ident(conn);
972
973 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
974
975 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
976}
977
978static void l2cap_chan_ready(struct l2cap_chan *chan)
979{
980 /* This clears all conf flags, including CONF_NOT_COMPLETE */
981 chan->conf_state = 0;
982 __clear_chan_timer(chan);
983
984 chan->state = BT_CONNECTED;
985
986 chan->ops->ready(chan);
987}
988
989static void l2cap_start_connection(struct l2cap_chan *chan)
990{
991 if (__amp_capable(chan)) {
992 BT_DBG("chan %p AMP capable: discover AMPs", chan);
993 a2mp_discover_amp(chan);
994 } else {
995 l2cap_send_conn_req(chan);
996 }
997}
998
999static void l2cap_do_start(struct l2cap_chan *chan)
1000{
1001 struct l2cap_conn *conn = chan->conn;
1002
1003 if (conn->hcon->type == LE_LINK) {
1004 l2cap_chan_ready(chan);
1005 return;
1006 }
1007
1008 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
1009 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
1010 return;
1011
1012 if (l2cap_chan_check_security(chan) &&
1013 __l2cap_no_conn_pending(chan)) {
1014 l2cap_start_connection(chan);
1015 }
1016 } else {
1017 struct l2cap_info_req req;
1018 req.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
1019
1020 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1021 conn->info_ident = l2cap_get_ident(conn);
1022
1023 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1024
1025 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
1026 sizeof(req), &req);
1027 }
1028}
1029
1030static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1031{
1032 u32 local_feat_mask = l2cap_feat_mask;
1033 if (!disable_ertm)
1034 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1035
1036 switch (mode) {
1037 case L2CAP_MODE_ERTM:
1038 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1039 case L2CAP_MODE_STREAMING:
1040 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1041 default:
1042 return 0x00;
1043 }
1044}
1045
1046static void l2cap_send_disconn_req(struct l2cap_conn *conn,
1047 struct l2cap_chan *chan, int err)
1048{
1049 struct sock *sk = chan->sk;
1050 struct l2cap_disconn_req req;
1051
1052 if (!conn)
1053 return;
1054
1055 if (chan->mode == L2CAP_MODE_ERTM) {
1056 __clear_retrans_timer(chan);
1057 __clear_monitor_timer(chan);
1058 __clear_ack_timer(chan);
1059 }
1060
1061 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
1062 l2cap_state_change(chan, BT_DISCONN);
1063 return;
1064 }
1065
1066 req.dcid = cpu_to_le16(chan->dcid);
1067 req.scid = cpu_to_le16(chan->scid);
1068 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
1069 sizeof(req), &req);
1070
1071 lock_sock(sk);
1072 __l2cap_state_change(chan, BT_DISCONN);
1073 __l2cap_chan_set_err(chan, err);
1074 release_sock(sk);
1075}
1076
1077/* ---- L2CAP connections ---- */
1078static void l2cap_conn_start(struct l2cap_conn *conn)
1079{
1080 struct l2cap_chan *chan, *tmp;
1081
1082 BT_DBG("conn %p", conn);
1083
1084 mutex_lock(&conn->chan_lock);
1085
1086 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1087 struct sock *sk = chan->sk;
1088
1089 l2cap_chan_lock(chan);
1090
1091 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1092 l2cap_chan_unlock(chan);
1093 continue;
1094 }
1095
1096 if (chan->state == BT_CONNECT) {
1097 if (!l2cap_chan_check_security(chan) ||
1098 !__l2cap_no_conn_pending(chan)) {
1099 l2cap_chan_unlock(chan);
1100 continue;
1101 }
1102
1103 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1104 && test_bit(CONF_STATE2_DEVICE,
1105 &chan->conf_state)) {
1106 l2cap_chan_close(chan, ECONNRESET);
1107 l2cap_chan_unlock(chan);
1108 continue;
1109 }
1110
1111 l2cap_start_connection(chan);
1112
1113 } else if (chan->state == BT_CONNECT2) {
1114 struct l2cap_conn_rsp rsp;
1115 char buf[128];
1116 rsp.scid = cpu_to_le16(chan->dcid);
1117 rsp.dcid = cpu_to_le16(chan->scid);
1118
1119 if (l2cap_chan_check_security(chan)) {
1120 lock_sock(sk);
1121 if (test_bit(BT_SK_DEFER_SETUP,
1122 &bt_sk(sk)->flags)) {
1123 rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
1124 rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1125 chan->ops->defer(chan);
1126
1127 } else {
1128 __l2cap_state_change(chan, BT_CONFIG);
1129 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
1130 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
1131 }
1132 release_sock(sk);
1133 } else {
1134 rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
1135 rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1136 }
1137
1138 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1139 sizeof(rsp), &rsp);
1140
1141 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1142 rsp.result != L2CAP_CR_SUCCESS) {
1143 l2cap_chan_unlock(chan);
1144 continue;
1145 }
1146
1147 set_bit(CONF_REQ_SENT, &chan->conf_state);
1148 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1149 l2cap_build_conf_req(chan, buf), buf);
1150 chan->num_conf_req++;
1151 }
1152
1153 l2cap_chan_unlock(chan);
1154 }
1155
1156 mutex_unlock(&conn->chan_lock);
1157}
1158
1159/* Find socket with cid and source/destination bdaddr.
1160 * Returns closest match, locked.
1161 */
1162static struct l2cap_chan *l2cap_global_chan_by_scid(int state, u16 cid,
1163 bdaddr_t *src,
1164 bdaddr_t *dst)
1165{
1166 struct l2cap_chan *c, *c1 = NULL;
1167
1168 read_lock(&chan_list_lock);
1169
1170 list_for_each_entry(c, &chan_list, global_l) {
1171 struct sock *sk = c->sk;
1172
1173 if (state && c->state != state)
1174 continue;
1175
1176 if (c->scid == cid) {
1177 int src_match, dst_match;
1178 int src_any, dst_any;
1179
1180 /* Exact match. */
1181 src_match = !bacmp(&bt_sk(sk)->src, src);
1182 dst_match = !bacmp(&bt_sk(sk)->dst, dst);
1183 if (src_match && dst_match) {
1184 read_unlock(&chan_list_lock);
1185 return c;
1186 }
1187
1188 /* Closest match */
1189 src_any = !bacmp(&bt_sk(sk)->src, BDADDR_ANY);
1190 dst_any = !bacmp(&bt_sk(sk)->dst, BDADDR_ANY);
1191 if ((src_match && dst_any) || (src_any && dst_match) ||
1192 (src_any && dst_any))
1193 c1 = c;
1194 }
1195 }
1196
1197 read_unlock(&chan_list_lock);
1198
1199 return c1;
1200}
1201
1202static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1203{
1204 struct sock *parent, *sk;
1205 struct l2cap_chan *chan, *pchan;
1206
1207 BT_DBG("");
1208
1209 /* Check if we have socket listening on cid */
1210 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
1211 conn->src, conn->dst);
1212 if (!pchan)
1213 return;
1214
1215 parent = pchan->sk;
1216
1217 lock_sock(parent);
1218
1219 chan = pchan->ops->new_connection(pchan);
1220 if (!chan)
1221 goto clean;
1222
1223 sk = chan->sk;
1224
1225 hci_conn_hold(conn->hcon);
1226 conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
1227
1228 bacpy(&bt_sk(sk)->src, conn->src);
1229 bacpy(&bt_sk(sk)->dst, conn->dst);
1230
1231 l2cap_chan_add(conn, chan);
1232
1233 l2cap_chan_ready(chan);
1234
1235clean:
1236 release_sock(parent);
1237}
1238
1239static void l2cap_conn_ready(struct l2cap_conn *conn)
1240{
1241 struct l2cap_chan *chan;
1242 struct hci_conn *hcon = conn->hcon;
1243
1244 BT_DBG("conn %p", conn);
1245
1246 if (!hcon->out && hcon->type == LE_LINK)
1247 l2cap_le_conn_ready(conn);
1248
1249 if (hcon->out && hcon->type == LE_LINK)
1250 smp_conn_security(hcon, hcon->pending_sec_level);
1251
1252 mutex_lock(&conn->chan_lock);
1253
1254 list_for_each_entry(chan, &conn->chan_l, list) {
1255
1256 l2cap_chan_lock(chan);
1257
1258 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
1259 l2cap_chan_unlock(chan);
1260 continue;
1261 }
1262
1263 if (hcon->type == LE_LINK) {
1264 if (smp_conn_security(hcon, chan->sec_level))
1265 l2cap_chan_ready(chan);
1266
1267 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1268 struct sock *sk = chan->sk;
1269 __clear_chan_timer(chan);
1270 lock_sock(sk);
1271 __l2cap_state_change(chan, BT_CONNECTED);
1272 sk->sk_state_change(sk);
1273 release_sock(sk);
1274
1275 } else if (chan->state == BT_CONNECT)
1276 l2cap_do_start(chan);
1277
1278 l2cap_chan_unlock(chan);
1279 }
1280
1281 mutex_unlock(&conn->chan_lock);
1282}
1283
1284/* Notify sockets that we cannot guaranty reliability anymore */
1285static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1286{
1287 struct l2cap_chan *chan;
1288
1289 BT_DBG("conn %p", conn);
1290
1291 mutex_lock(&conn->chan_lock);
1292
1293 list_for_each_entry(chan, &conn->chan_l, list) {
1294 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1295 l2cap_chan_set_err(chan, err);
1296 }
1297
1298 mutex_unlock(&conn->chan_lock);
1299}
1300
1301static void l2cap_info_timeout(struct work_struct *work)
1302{
1303 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1304 info_timer.work);
1305
1306 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1307 conn->info_ident = 0;
1308
1309 l2cap_conn_start(conn);
1310}
1311
1312static void l2cap_conn_del(struct hci_conn *hcon, int err)
1313{
1314 struct l2cap_conn *conn = hcon->l2cap_data;
1315 struct l2cap_chan *chan, *l;
1316
1317 if (!conn)
1318 return;
1319
1320 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1321
1322 kfree_skb(conn->rx_skb);
1323
1324 mutex_lock(&conn->chan_lock);
1325
1326 /* Kill channels */
1327 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1328 l2cap_chan_hold(chan);
1329 l2cap_chan_lock(chan);
1330
1331 l2cap_chan_del(chan, err);
1332
1333 l2cap_chan_unlock(chan);
1334
1335 chan->ops->close(chan);
1336 l2cap_chan_put(chan);
1337 }
1338
1339 mutex_unlock(&conn->chan_lock);
1340
1341 hci_chan_del(conn->hchan);
1342
1343 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1344 cancel_delayed_work_sync(&conn->info_timer);
1345
1346 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) {
1347 cancel_delayed_work_sync(&conn->security_timer);
1348 smp_chan_destroy(conn);
1349 }
1350
1351 hcon->l2cap_data = NULL;
1352 kfree(conn);
1353}
1354
1355static void security_timeout(struct work_struct *work)
1356{
1357 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1358 security_timer.work);
1359
1360 BT_DBG("conn %p", conn);
1361
1362 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags)) {
1363 smp_chan_destroy(conn);
1364 l2cap_conn_del(conn->hcon, ETIMEDOUT);
1365 }
1366}
1367
1368static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
1369{
1370 struct l2cap_conn *conn = hcon->l2cap_data;
1371 struct hci_chan *hchan;
1372
1373 if (conn || status)
1374 return conn;
1375
1376 hchan = hci_chan_create(hcon);
1377 if (!hchan)
1378 return NULL;
1379
1380 conn = kzalloc(sizeof(struct l2cap_conn), GFP_KERNEL);
1381 if (!conn) {
1382 hci_chan_del(hchan);
1383 return NULL;
1384 }
1385
1386 hcon->l2cap_data = conn;
1387 conn->hcon = hcon;
1388 conn->hchan = hchan;
1389
1390 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
1391
1392 switch (hcon->type) {
1393 case AMP_LINK:
1394 conn->mtu = hcon->hdev->block_mtu;
1395 break;
1396
1397 case LE_LINK:
1398 if (hcon->hdev->le_mtu) {
1399 conn->mtu = hcon->hdev->le_mtu;
1400 break;
1401 }
1402 /* fall through */
1403
1404 default:
1405 conn->mtu = hcon->hdev->acl_mtu;
1406 break;
1407 }
1408
1409 conn->src = &hcon->hdev->bdaddr;
1410 conn->dst = &hcon->dst;
1411
1412 conn->feat_mask = 0;
1413
1414 spin_lock_init(&conn->lock);
1415 mutex_init(&conn->chan_lock);
1416
1417 INIT_LIST_HEAD(&conn->chan_l);
1418
1419 if (hcon->type == LE_LINK)
1420 INIT_DELAYED_WORK(&conn->security_timer, security_timeout);
1421 else
1422 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
1423
1424 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
1425
1426 return conn;
1427}
1428
1429/* ---- Socket interface ---- */
1430
1431/* Find socket with psm and source / destination bdaddr.
1432 * Returns closest match.
1433 */
1434static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1435 bdaddr_t *src,
1436 bdaddr_t *dst)
1437{
1438 struct l2cap_chan *c, *c1 = NULL;
1439
1440 read_lock(&chan_list_lock);
1441
1442 list_for_each_entry(c, &chan_list, global_l) {
1443 struct sock *sk = c->sk;
1444
1445 if (state && c->state != state)
1446 continue;
1447
1448 if (c->psm == psm) {
1449 int src_match, dst_match;
1450 int src_any, dst_any;
1451
1452 /* Exact match. */
1453 src_match = !bacmp(&bt_sk(sk)->src, src);
1454 dst_match = !bacmp(&bt_sk(sk)->dst, dst);
1455 if (src_match && dst_match) {
1456 read_unlock(&chan_list_lock);
1457 return c;
1458 }
1459
1460 /* Closest match */
1461 src_any = !bacmp(&bt_sk(sk)->src, BDADDR_ANY);
1462 dst_any = !bacmp(&bt_sk(sk)->dst, BDADDR_ANY);
1463 if ((src_match && dst_any) || (src_any && dst_match) ||
1464 (src_any && dst_any))
1465 c1 = c;
1466 }
1467 }
1468
1469 read_unlock(&chan_list_lock);
1470
1471 return c1;
1472}
1473
1474int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
1475 bdaddr_t *dst, u8 dst_type)
1476{
1477 struct sock *sk = chan->sk;
1478 bdaddr_t *src = &bt_sk(sk)->src;
1479 struct l2cap_conn *conn;
1480 struct hci_conn *hcon;
1481 struct hci_dev *hdev;
1482 __u8 auth_type;
1483 int err;
1484
1485 BT_DBG("%pMR -> %pMR (type %u) psm 0x%2.2x", src, dst,
1486 dst_type, __le16_to_cpu(psm));
1487
1488 hdev = hci_get_route(dst, src);
1489 if (!hdev)
1490 return -EHOSTUNREACH;
1491
1492 hci_dev_lock(hdev);
1493
1494 l2cap_chan_lock(chan);
1495
1496 /* PSM must be odd and lsb of upper byte must be 0 */
1497 if ((__le16_to_cpu(psm) & 0x0101) != 0x0001 && !cid &&
1498 chan->chan_type != L2CAP_CHAN_RAW) {
1499 err = -EINVAL;
1500 goto done;
1501 }
1502
1503 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !(psm || cid)) {
1504 err = -EINVAL;
1505 goto done;
1506 }
1507
1508 switch (chan->mode) {
1509 case L2CAP_MODE_BASIC:
1510 break;
1511 case L2CAP_MODE_ERTM:
1512 case L2CAP_MODE_STREAMING:
1513 if (!disable_ertm)
1514 break;
1515 /* fall through */
1516 default:
1517 err = -ENOTSUPP;
1518 goto done;
1519 }
1520
1521 switch (chan->state) {
1522 case BT_CONNECT:
1523 case BT_CONNECT2:
1524 case BT_CONFIG:
1525 /* Already connecting */
1526 err = 0;
1527 goto done;
1528
1529 case BT_CONNECTED:
1530 /* Already connected */
1531 err = -EISCONN;
1532 goto done;
1533
1534 case BT_OPEN:
1535 case BT_BOUND:
1536 /* Can connect */
1537 break;
1538
1539 default:
1540 err = -EBADFD;
1541 goto done;
1542 }
1543
1544 /* Set destination address and psm */
1545 lock_sock(sk);
1546 bacpy(&bt_sk(sk)->dst, dst);
1547 release_sock(sk);
1548
1549 chan->psm = psm;
1550 chan->dcid = cid;
1551
1552 auth_type = l2cap_get_auth_type(chan);
1553
1554 if (chan->dcid == L2CAP_CID_LE_DATA)
1555 hcon = hci_connect(hdev, LE_LINK, dst, dst_type,
1556 chan->sec_level, auth_type);
1557 else
1558 hcon = hci_connect(hdev, ACL_LINK, dst, dst_type,
1559 chan->sec_level, auth_type);
1560
1561 if (IS_ERR(hcon)) {
1562 err = PTR_ERR(hcon);
1563 goto done;
1564 }
1565
1566 conn = l2cap_conn_add(hcon, 0);
1567 if (!conn) {
1568 hci_conn_put(hcon);
1569 err = -ENOMEM;
1570 goto done;
1571 }
1572
1573 if (hcon->type == LE_LINK) {
1574 err = 0;
1575
1576 if (!list_empty(&conn->chan_l)) {
1577 err = -EBUSY;
1578 hci_conn_put(hcon);
1579 }
1580
1581 if (err)
1582 goto done;
1583 }
1584
1585 /* Update source addr of the socket */
1586 bacpy(src, conn->src);
1587
1588 l2cap_chan_unlock(chan);
1589 l2cap_chan_add(conn, chan);
1590 l2cap_chan_lock(chan);
1591
1592 l2cap_state_change(chan, BT_CONNECT);
1593 __set_chan_timer(chan, sk->sk_sndtimeo);
1594
1595 if (hcon->state == BT_CONNECTED) {
1596 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1597 __clear_chan_timer(chan);
1598 if (l2cap_chan_check_security(chan))
1599 l2cap_state_change(chan, BT_CONNECTED);
1600 } else
1601 l2cap_do_start(chan);
1602 }
1603
1604 err = 0;
1605
1606done:
1607 l2cap_chan_unlock(chan);
1608 hci_dev_unlock(hdev);
1609 hci_dev_put(hdev);
1610 return err;
1611}
1612
1613int __l2cap_wait_ack(struct sock *sk)
1614{
1615 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
1616 DECLARE_WAITQUEUE(wait, current);
1617 int err = 0;
1618 int timeo = HZ/5;
1619
1620 add_wait_queue(sk_sleep(sk), &wait);
1621 set_current_state(TASK_INTERRUPTIBLE);
1622 while (chan->unacked_frames > 0 && chan->conn) {
1623 if (!timeo)
1624 timeo = HZ/5;
1625
1626 if (signal_pending(current)) {
1627 err = sock_intr_errno(timeo);
1628 break;
1629 }
1630
1631 release_sock(sk);
1632 timeo = schedule_timeout(timeo);
1633 lock_sock(sk);
1634 set_current_state(TASK_INTERRUPTIBLE);
1635
1636 err = sock_error(sk);
1637 if (err)
1638 break;
1639 }
1640 set_current_state(TASK_RUNNING);
1641 remove_wait_queue(sk_sleep(sk), &wait);
1642 return err;
1643}
1644
1645static void l2cap_monitor_timeout(struct work_struct *work)
1646{
1647 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1648 monitor_timer.work);
1649
1650 BT_DBG("chan %p", chan);
1651
1652 l2cap_chan_lock(chan);
1653
1654 if (!chan->conn) {
1655 l2cap_chan_unlock(chan);
1656 l2cap_chan_put(chan);
1657 return;
1658 }
1659
1660 l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
1661
1662 l2cap_chan_unlock(chan);
1663 l2cap_chan_put(chan);
1664}
1665
1666static void l2cap_retrans_timeout(struct work_struct *work)
1667{
1668 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1669 retrans_timer.work);
1670
1671 BT_DBG("chan %p", chan);
1672
1673 l2cap_chan_lock(chan);
1674
1675 if (!chan->conn) {
1676 l2cap_chan_unlock(chan);
1677 l2cap_chan_put(chan);
1678 return;
1679 }
1680
1681 l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
1682 l2cap_chan_unlock(chan);
1683 l2cap_chan_put(chan);
1684}
1685
1686static void l2cap_streaming_send(struct l2cap_chan *chan,
1687 struct sk_buff_head *skbs)
1688{
1689 struct sk_buff *skb;
1690 struct l2cap_ctrl *control;
1691
1692 BT_DBG("chan %p, skbs %p", chan, skbs);
1693
1694 skb_queue_splice_tail_init(skbs, &chan->tx_q);
1695
1696 while (!skb_queue_empty(&chan->tx_q)) {
1697
1698 skb = skb_dequeue(&chan->tx_q);
1699
1700 bt_cb(skb)->control.retries = 1;
1701 control = &bt_cb(skb)->control;
1702
1703 control->reqseq = 0;
1704 control->txseq = chan->next_tx_seq;
1705
1706 __pack_control(chan, control, skb);
1707
1708 if (chan->fcs == L2CAP_FCS_CRC16) {
1709 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1710 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1711 }
1712
1713 l2cap_do_send(chan, skb);
1714
1715 BT_DBG("Sent txseq %u", control->txseq);
1716
1717 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1718 chan->frames_sent++;
1719 }
1720}
1721
1722static int l2cap_ertm_send(struct l2cap_chan *chan)
1723{
1724 struct sk_buff *skb, *tx_skb;
1725 struct l2cap_ctrl *control;
1726 int sent = 0;
1727
1728 BT_DBG("chan %p", chan);
1729
1730 if (chan->state != BT_CONNECTED)
1731 return -ENOTCONN;
1732
1733 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1734 return 0;
1735
1736 while (chan->tx_send_head &&
1737 chan->unacked_frames < chan->remote_tx_win &&
1738 chan->tx_state == L2CAP_TX_STATE_XMIT) {
1739
1740 skb = chan->tx_send_head;
1741
1742 bt_cb(skb)->control.retries = 1;
1743 control = &bt_cb(skb)->control;
1744
1745 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1746 control->final = 1;
1747
1748 control->reqseq = chan->buffer_seq;
1749 chan->last_acked_seq = chan->buffer_seq;
1750 control->txseq = chan->next_tx_seq;
1751
1752 __pack_control(chan, control, skb);
1753
1754 if (chan->fcs == L2CAP_FCS_CRC16) {
1755 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1756 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1757 }
1758
1759 /* Clone after data has been modified. Data is assumed to be
1760 read-only (for locking purposes) on cloned sk_buffs.
1761 */
1762 tx_skb = skb_clone(skb, GFP_KERNEL);
1763
1764 if (!tx_skb)
1765 break;
1766
1767 __set_retrans_timer(chan);
1768
1769 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1770 chan->unacked_frames++;
1771 chan->frames_sent++;
1772 sent++;
1773
1774 if (skb_queue_is_last(&chan->tx_q, skb))
1775 chan->tx_send_head = NULL;
1776 else
1777 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1778
1779 l2cap_do_send(chan, tx_skb);
1780 BT_DBG("Sent txseq %u", control->txseq);
1781 }
1782
1783 BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
1784 chan->unacked_frames, skb_queue_len(&chan->tx_q));
1785
1786 return sent;
1787}
1788
1789static void l2cap_ertm_resend(struct l2cap_chan *chan)
1790{
1791 struct l2cap_ctrl control;
1792 struct sk_buff *skb;
1793 struct sk_buff *tx_skb;
1794 u16 seq;
1795
1796 BT_DBG("chan %p", chan);
1797
1798 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1799 return;
1800
1801 while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
1802 seq = l2cap_seq_list_pop(&chan->retrans_list);
1803
1804 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
1805 if (!skb) {
1806 BT_DBG("Error: Can't retransmit seq %d, frame missing",
1807 seq);
1808 continue;
1809 }
1810
1811 bt_cb(skb)->control.retries++;
1812 control = bt_cb(skb)->control;
1813
1814 if (chan->max_tx != 0 &&
1815 bt_cb(skb)->control.retries > chan->max_tx) {
1816 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
1817 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
1818 l2cap_seq_list_clear(&chan->retrans_list);
1819 break;
1820 }
1821
1822 control.reqseq = chan->buffer_seq;
1823 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1824 control.final = 1;
1825 else
1826 control.final = 0;
1827
1828 if (skb_cloned(skb)) {
1829 /* Cloned sk_buffs are read-only, so we need a
1830 * writeable copy
1831 */
1832 tx_skb = skb_copy(skb, GFP_KERNEL);
1833 } else {
1834 tx_skb = skb_clone(skb, GFP_KERNEL);
1835 }
1836
1837 if (!tx_skb) {
1838 l2cap_seq_list_clear(&chan->retrans_list);
1839 break;
1840 }
1841
1842 /* Update skb contents */
1843 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1844 put_unaligned_le32(__pack_extended_control(&control),
1845 tx_skb->data + L2CAP_HDR_SIZE);
1846 } else {
1847 put_unaligned_le16(__pack_enhanced_control(&control),
1848 tx_skb->data + L2CAP_HDR_SIZE);
1849 }
1850
1851 if (chan->fcs == L2CAP_FCS_CRC16) {
1852 u16 fcs = crc16(0, (u8 *) tx_skb->data, tx_skb->len);
1853 put_unaligned_le16(fcs, skb_put(tx_skb,
1854 L2CAP_FCS_SIZE));
1855 }
1856
1857 l2cap_do_send(chan, tx_skb);
1858
1859 BT_DBG("Resent txseq %d", control.txseq);
1860
1861 chan->last_acked_seq = chan->buffer_seq;
1862 }
1863}
1864
1865static void l2cap_retransmit(struct l2cap_chan *chan,
1866 struct l2cap_ctrl *control)
1867{
1868 BT_DBG("chan %p, control %p", chan, control);
1869
1870 l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
1871 l2cap_ertm_resend(chan);
1872}
1873
1874static void l2cap_retransmit_all(struct l2cap_chan *chan,
1875 struct l2cap_ctrl *control)
1876{
1877 struct sk_buff *skb;
1878
1879 BT_DBG("chan %p, control %p", chan, control);
1880
1881 if (control->poll)
1882 set_bit(CONN_SEND_FBIT, &chan->conn_state);
1883
1884 l2cap_seq_list_clear(&chan->retrans_list);
1885
1886 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1887 return;
1888
1889 if (chan->unacked_frames) {
1890 skb_queue_walk(&chan->tx_q, skb) {
1891 if (bt_cb(skb)->control.txseq == control->reqseq ||
1892 skb == chan->tx_send_head)
1893 break;
1894 }
1895
1896 skb_queue_walk_from(&chan->tx_q, skb) {
1897 if (skb == chan->tx_send_head)
1898 break;
1899
1900 l2cap_seq_list_append(&chan->retrans_list,
1901 bt_cb(skb)->control.txseq);
1902 }
1903
1904 l2cap_ertm_resend(chan);
1905 }
1906}
1907
1908static void l2cap_send_ack(struct l2cap_chan *chan)
1909{
1910 struct l2cap_ctrl control;
1911 u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
1912 chan->last_acked_seq);
1913 int threshold;
1914
1915 BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
1916 chan, chan->last_acked_seq, chan->buffer_seq);
1917
1918 memset(&control, 0, sizeof(control));
1919 control.sframe = 1;
1920
1921 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
1922 chan->rx_state == L2CAP_RX_STATE_RECV) {
1923 __clear_ack_timer(chan);
1924 control.super = L2CAP_SUPER_RNR;
1925 control.reqseq = chan->buffer_seq;
1926 l2cap_send_sframe(chan, &control);
1927 } else {
1928 if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
1929 l2cap_ertm_send(chan);
1930 /* If any i-frames were sent, they included an ack */
1931 if (chan->buffer_seq == chan->last_acked_seq)
1932 frames_to_ack = 0;
1933 }
1934
1935 /* Ack now if the window is 3/4ths full.
1936 * Calculate without mul or div
1937 */
1938 threshold = chan->ack_win;
1939 threshold += threshold << 1;
1940 threshold >>= 2;
1941
1942 BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
1943 threshold);
1944
1945 if (frames_to_ack >= threshold) {
1946 __clear_ack_timer(chan);
1947 control.super = L2CAP_SUPER_RR;
1948 control.reqseq = chan->buffer_seq;
1949 l2cap_send_sframe(chan, &control);
1950 frames_to_ack = 0;
1951 }
1952
1953 if (frames_to_ack)
1954 __set_ack_timer(chan);
1955 }
1956}
1957
1958static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
1959 struct msghdr *msg, int len,
1960 int count, struct sk_buff *skb)
1961{
1962 struct l2cap_conn *conn = chan->conn;
1963 struct sk_buff **frag;
1964 int sent = 0;
1965
1966 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1967 return -EFAULT;
1968
1969 sent += count;
1970 len -= count;
1971
1972 /* Continuation fragments (no L2CAP header) */
1973 frag = &skb_shinfo(skb)->frag_list;
1974 while (len) {
1975 struct sk_buff *tmp;
1976
1977 count = min_t(unsigned int, conn->mtu, len);
1978
1979 tmp = chan->ops->alloc_skb(chan, count,
1980 msg->msg_flags & MSG_DONTWAIT);
1981 if (IS_ERR(tmp))
1982 return PTR_ERR(tmp);
1983
1984 *frag = tmp;
1985
1986 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1987 return -EFAULT;
1988
1989 (*frag)->priority = skb->priority;
1990
1991 sent += count;
1992 len -= count;
1993
1994 skb->len += (*frag)->len;
1995 skb->data_len += (*frag)->len;
1996
1997 frag = &(*frag)->next;
1998 }
1999
2000 return sent;
2001}
2002
2003static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
2004 struct msghdr *msg, size_t len,
2005 u32 priority)
2006{
2007 struct l2cap_conn *conn = chan->conn;
2008 struct sk_buff *skb;
2009 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
2010 struct l2cap_hdr *lh;
2011
2012 BT_DBG("chan %p len %zu priority %u", chan, len, priority);
2013
2014 count = min_t(unsigned int, (conn->mtu - hlen), len);
2015
2016 skb = chan->ops->alloc_skb(chan, count + hlen,
2017 msg->msg_flags & MSG_DONTWAIT);
2018 if (IS_ERR(skb))
2019 return skb;
2020
2021 skb->priority = priority;
2022
2023 /* Create L2CAP header */
2024 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2025 lh->cid = cpu_to_le16(chan->dcid);
2026 lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
2027 put_unaligned(chan->psm, skb_put(skb, L2CAP_PSMLEN_SIZE));
2028
2029 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2030 if (unlikely(err < 0)) {
2031 kfree_skb(skb);
2032 return ERR_PTR(err);
2033 }
2034 return skb;
2035}
2036
2037static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2038 struct msghdr *msg, size_t len,
2039 u32 priority)
2040{
2041 struct l2cap_conn *conn = chan->conn;
2042 struct sk_buff *skb;
2043 int err, count;
2044 struct l2cap_hdr *lh;
2045
2046 BT_DBG("chan %p len %zu", chan, len);
2047
2048 count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2049
2050 skb = chan->ops->alloc_skb(chan, count + L2CAP_HDR_SIZE,
2051 msg->msg_flags & MSG_DONTWAIT);
2052 if (IS_ERR(skb))
2053 return skb;
2054
2055 skb->priority = priority;
2056
2057 /* Create L2CAP header */
2058 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2059 lh->cid = cpu_to_le16(chan->dcid);
2060 lh->len = cpu_to_le16(len);
2061
2062 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2063 if (unlikely(err < 0)) {
2064 kfree_skb(skb);
2065 return ERR_PTR(err);
2066 }
2067 return skb;
2068}
2069
2070static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2071 struct msghdr *msg, size_t len,
2072 u16 sdulen)
2073{
2074 struct l2cap_conn *conn = chan->conn;
2075 struct sk_buff *skb;
2076 int err, count, hlen;
2077 struct l2cap_hdr *lh;
2078
2079 BT_DBG("chan %p len %zu", chan, len);
2080
2081 if (!conn)
2082 return ERR_PTR(-ENOTCONN);
2083
2084 hlen = __ertm_hdr_size(chan);
2085
2086 if (sdulen)
2087 hlen += L2CAP_SDULEN_SIZE;
2088
2089 if (chan->fcs == L2CAP_FCS_CRC16)
2090 hlen += L2CAP_FCS_SIZE;
2091
2092 count = min_t(unsigned int, (conn->mtu - hlen), len);
2093
2094 skb = chan->ops->alloc_skb(chan, count + hlen,
2095 msg->msg_flags & MSG_DONTWAIT);
2096 if (IS_ERR(skb))
2097 return skb;
2098
2099 /* Create L2CAP header */
2100 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2101 lh->cid = cpu_to_le16(chan->dcid);
2102 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2103
2104 /* Control header is populated later */
2105 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2106 put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2107 else
2108 put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2109
2110 if (sdulen)
2111 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2112
2113 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2114 if (unlikely(err < 0)) {
2115 kfree_skb(skb);
2116 return ERR_PTR(err);
2117 }
2118
2119 bt_cb(skb)->control.fcs = chan->fcs;
2120 bt_cb(skb)->control.retries = 0;
2121 return skb;
2122}
2123
2124static int l2cap_segment_sdu(struct l2cap_chan *chan,
2125 struct sk_buff_head *seg_queue,
2126 struct msghdr *msg, size_t len)
2127{
2128 struct sk_buff *skb;
2129 u16 sdu_len;
2130 size_t pdu_len;
2131 u8 sar;
2132
2133 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2134
2135 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2136 * so fragmented skbs are not used. The HCI layer's handling
2137 * of fragmented skbs is not compatible with ERTM's queueing.
2138 */
2139
2140 /* PDU size is derived from the HCI MTU */
2141 pdu_len = chan->conn->mtu;
2142
2143 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2144
2145 /* Adjust for largest possible L2CAP overhead. */
2146 if (chan->fcs)
2147 pdu_len -= L2CAP_FCS_SIZE;
2148
2149 pdu_len -= __ertm_hdr_size(chan);
2150
2151 /* Remote device may have requested smaller PDUs */
2152 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2153
2154 if (len <= pdu_len) {
2155 sar = L2CAP_SAR_UNSEGMENTED;
2156 sdu_len = 0;
2157 pdu_len = len;
2158 } else {
2159 sar = L2CAP_SAR_START;
2160 sdu_len = len;
2161 pdu_len -= L2CAP_SDULEN_SIZE;
2162 }
2163
2164 while (len > 0) {
2165 skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2166
2167 if (IS_ERR(skb)) {
2168 __skb_queue_purge(seg_queue);
2169 return PTR_ERR(skb);
2170 }
2171
2172 bt_cb(skb)->control.sar = sar;
2173 __skb_queue_tail(seg_queue, skb);
2174
2175 len -= pdu_len;
2176 if (sdu_len) {
2177 sdu_len = 0;
2178 pdu_len += L2CAP_SDULEN_SIZE;
2179 }
2180
2181 if (len <= pdu_len) {
2182 sar = L2CAP_SAR_END;
2183 pdu_len = len;
2184 } else {
2185 sar = L2CAP_SAR_CONTINUE;
2186 }
2187 }
2188
2189 return 0;
2190}
2191
2192int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
2193 u32 priority)
2194{
2195 struct sk_buff *skb;
2196 int err;
2197 struct sk_buff_head seg_queue;
2198
2199 /* Connectionless channel */
2200 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2201 skb = l2cap_create_connless_pdu(chan, msg, len, priority);
2202 if (IS_ERR(skb))
2203 return PTR_ERR(skb);
2204
2205 l2cap_do_send(chan, skb);
2206 return len;
2207 }
2208
2209 switch (chan->mode) {
2210 case L2CAP_MODE_BASIC:
2211 /* Check outgoing MTU */
2212 if (len > chan->omtu)
2213 return -EMSGSIZE;
2214
2215 /* Create a basic PDU */
2216 skb = l2cap_create_basic_pdu(chan, msg, len, priority);
2217 if (IS_ERR(skb))
2218 return PTR_ERR(skb);
2219
2220 l2cap_do_send(chan, skb);
2221 err = len;
2222 break;
2223
2224 case L2CAP_MODE_ERTM:
2225 case L2CAP_MODE_STREAMING:
2226 /* Check outgoing MTU */
2227 if (len > chan->omtu) {
2228 err = -EMSGSIZE;
2229 break;
2230 }
2231
2232 __skb_queue_head_init(&seg_queue);
2233
2234 /* Do segmentation before calling in to the state machine,
2235 * since it's possible to block while waiting for memory
2236 * allocation.
2237 */
2238 err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2239
2240 /* The channel could have been closed while segmenting,
2241 * check that it is still connected.
2242 */
2243 if (chan->state != BT_CONNECTED) {
2244 __skb_queue_purge(&seg_queue);
2245 err = -ENOTCONN;
2246 }
2247
2248 if (err)
2249 break;
2250
2251 if (chan->mode == L2CAP_MODE_ERTM)
2252 l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2253 else
2254 l2cap_streaming_send(chan, &seg_queue);
2255
2256 err = len;
2257
2258 /* If the skbs were not queued for sending, they'll still be in
2259 * seg_queue and need to be purged.
2260 */
2261 __skb_queue_purge(&seg_queue);
2262 break;
2263
2264 default:
2265 BT_DBG("bad state %1.1x", chan->mode);
2266 err = -EBADFD;
2267 }
2268
2269 return err;
2270}
2271
2272static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2273{
2274 struct l2cap_ctrl control;
2275 u16 seq;
2276
2277 BT_DBG("chan %p, txseq %u", chan, txseq);
2278
2279 memset(&control, 0, sizeof(control));
2280 control.sframe = 1;
2281 control.super = L2CAP_SUPER_SREJ;
2282
2283 for (seq = chan->expected_tx_seq; seq != txseq;
2284 seq = __next_seq(chan, seq)) {
2285 if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2286 control.reqseq = seq;
2287 l2cap_send_sframe(chan, &control);
2288 l2cap_seq_list_append(&chan->srej_list, seq);
2289 }
2290 }
2291
2292 chan->expected_tx_seq = __next_seq(chan, txseq);
2293}
2294
2295static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2296{
2297 struct l2cap_ctrl control;
2298
2299 BT_DBG("chan %p", chan);
2300
2301 if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2302 return;
2303
2304 memset(&control, 0, sizeof(control));
2305 control.sframe = 1;
2306 control.super = L2CAP_SUPER_SREJ;
2307 control.reqseq = chan->srej_list.tail;
2308 l2cap_send_sframe(chan, &control);
2309}
2310
2311static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2312{
2313 struct l2cap_ctrl control;
2314 u16 initial_head;
2315 u16 seq;
2316
2317 BT_DBG("chan %p, txseq %u", chan, txseq);
2318
2319 memset(&control, 0, sizeof(control));
2320 control.sframe = 1;
2321 control.super = L2CAP_SUPER_SREJ;
2322
2323 /* Capture initial list head to allow only one pass through the list. */
2324 initial_head = chan->srej_list.head;
2325
2326 do {
2327 seq = l2cap_seq_list_pop(&chan->srej_list);
2328 if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2329 break;
2330
2331 control.reqseq = seq;
2332 l2cap_send_sframe(chan, &control);
2333 l2cap_seq_list_append(&chan->srej_list, seq);
2334 } while (chan->srej_list.head != initial_head);
2335}
2336
2337static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2338{
2339 struct sk_buff *acked_skb;
2340 u16 ackseq;
2341
2342 BT_DBG("chan %p, reqseq %u", chan, reqseq);
2343
2344 if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2345 return;
2346
2347 BT_DBG("expected_ack_seq %u, unacked_frames %u",
2348 chan->expected_ack_seq, chan->unacked_frames);
2349
2350 for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2351 ackseq = __next_seq(chan, ackseq)) {
2352
2353 acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2354 if (acked_skb) {
2355 skb_unlink(acked_skb, &chan->tx_q);
2356 kfree_skb(acked_skb);
2357 chan->unacked_frames--;
2358 }
2359 }
2360
2361 chan->expected_ack_seq = reqseq;
2362
2363 if (chan->unacked_frames == 0)
2364 __clear_retrans_timer(chan);
2365
2366 BT_DBG("unacked_frames %u", chan->unacked_frames);
2367}
2368
2369static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2370{
2371 BT_DBG("chan %p", chan);
2372
2373 chan->expected_tx_seq = chan->buffer_seq;
2374 l2cap_seq_list_clear(&chan->srej_list);
2375 skb_queue_purge(&chan->srej_q);
2376 chan->rx_state = L2CAP_RX_STATE_RECV;
2377}
2378
2379static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2380 struct l2cap_ctrl *control,
2381 struct sk_buff_head *skbs, u8 event)
2382{
2383 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2384 event);
2385
2386 switch (event) {
2387 case L2CAP_EV_DATA_REQUEST:
2388 if (chan->tx_send_head == NULL)
2389 chan->tx_send_head = skb_peek(skbs);
2390
2391 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2392 l2cap_ertm_send(chan);
2393 break;
2394 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2395 BT_DBG("Enter LOCAL_BUSY");
2396 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2397
2398 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2399 /* The SREJ_SENT state must be aborted if we are to
2400 * enter the LOCAL_BUSY state.
2401 */
2402 l2cap_abort_rx_srej_sent(chan);
2403 }
2404
2405 l2cap_send_ack(chan);
2406
2407 break;
2408 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2409 BT_DBG("Exit LOCAL_BUSY");
2410 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2411
2412 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2413 struct l2cap_ctrl local_control;
2414
2415 memset(&local_control, 0, sizeof(local_control));
2416 local_control.sframe = 1;
2417 local_control.super = L2CAP_SUPER_RR;
2418 local_control.poll = 1;
2419 local_control.reqseq = chan->buffer_seq;
2420 l2cap_send_sframe(chan, &local_control);
2421
2422 chan->retry_count = 1;
2423 __set_monitor_timer(chan);
2424 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2425 }
2426 break;
2427 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2428 l2cap_process_reqseq(chan, control->reqseq);
2429 break;
2430 case L2CAP_EV_EXPLICIT_POLL:
2431 l2cap_send_rr_or_rnr(chan, 1);
2432 chan->retry_count = 1;
2433 __set_monitor_timer(chan);
2434 __clear_ack_timer(chan);
2435 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2436 break;
2437 case L2CAP_EV_RETRANS_TO:
2438 l2cap_send_rr_or_rnr(chan, 1);
2439 chan->retry_count = 1;
2440 __set_monitor_timer(chan);
2441 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2442 break;
2443 case L2CAP_EV_RECV_FBIT:
2444 /* Nothing to process */
2445 break;
2446 default:
2447 break;
2448 }
2449}
2450
2451static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2452 struct l2cap_ctrl *control,
2453 struct sk_buff_head *skbs, u8 event)
2454{
2455 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2456 event);
2457
2458 switch (event) {
2459 case L2CAP_EV_DATA_REQUEST:
2460 if (chan->tx_send_head == NULL)
2461 chan->tx_send_head = skb_peek(skbs);
2462 /* Queue data, but don't send. */
2463 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2464 break;
2465 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2466 BT_DBG("Enter LOCAL_BUSY");
2467 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2468
2469 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2470 /* The SREJ_SENT state must be aborted if we are to
2471 * enter the LOCAL_BUSY state.
2472 */
2473 l2cap_abort_rx_srej_sent(chan);
2474 }
2475
2476 l2cap_send_ack(chan);
2477
2478 break;
2479 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2480 BT_DBG("Exit LOCAL_BUSY");
2481 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2482
2483 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2484 struct l2cap_ctrl local_control;
2485 memset(&local_control, 0, sizeof(local_control));
2486 local_control.sframe = 1;
2487 local_control.super = L2CAP_SUPER_RR;
2488 local_control.poll = 1;
2489 local_control.reqseq = chan->buffer_seq;
2490 l2cap_send_sframe(chan, &local_control);
2491
2492 chan->retry_count = 1;
2493 __set_monitor_timer(chan);
2494 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2495 }
2496 break;
2497 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2498 l2cap_process_reqseq(chan, control->reqseq);
2499
2500 /* Fall through */
2501
2502 case L2CAP_EV_RECV_FBIT:
2503 if (control && control->final) {
2504 __clear_monitor_timer(chan);
2505 if (chan->unacked_frames > 0)
2506 __set_retrans_timer(chan);
2507 chan->retry_count = 0;
2508 chan->tx_state = L2CAP_TX_STATE_XMIT;
2509 BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
2510 }
2511 break;
2512 case L2CAP_EV_EXPLICIT_POLL:
2513 /* Ignore */
2514 break;
2515 case L2CAP_EV_MONITOR_TO:
2516 if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
2517 l2cap_send_rr_or_rnr(chan, 1);
2518 __set_monitor_timer(chan);
2519 chan->retry_count++;
2520 } else {
2521 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
2522 }
2523 break;
2524 default:
2525 break;
2526 }
2527}
2528
2529static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
2530 struct sk_buff_head *skbs, u8 event)
2531{
2532 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2533 chan, control, skbs, event, chan->tx_state);
2534
2535 switch (chan->tx_state) {
2536 case L2CAP_TX_STATE_XMIT:
2537 l2cap_tx_state_xmit(chan, control, skbs, event);
2538 break;
2539 case L2CAP_TX_STATE_WAIT_F:
2540 l2cap_tx_state_wait_f(chan, control, skbs, event);
2541 break;
2542 default:
2543 /* Ignore event */
2544 break;
2545 }
2546}
2547
2548static void l2cap_pass_to_tx(struct l2cap_chan *chan,
2549 struct l2cap_ctrl *control)
2550{
2551 BT_DBG("chan %p, control %p", chan, control);
2552 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
2553}
2554
2555static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
2556 struct l2cap_ctrl *control)
2557{
2558 BT_DBG("chan %p, control %p", chan, control);
2559 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
2560}
2561
2562/* Copy frame to all raw sockets on that connection */
2563static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2564{
2565 struct sk_buff *nskb;
2566 struct l2cap_chan *chan;
2567
2568 BT_DBG("conn %p", conn);
2569
2570 mutex_lock(&conn->chan_lock);
2571
2572 list_for_each_entry(chan, &conn->chan_l, list) {
2573 struct sock *sk = chan->sk;
2574 if (chan->chan_type != L2CAP_CHAN_RAW)
2575 continue;
2576
2577 /* Don't send frame to the socket it came from */
2578 if (skb->sk == sk)
2579 continue;
2580 nskb = skb_clone(skb, GFP_KERNEL);
2581 if (!nskb)
2582 continue;
2583
2584 if (chan->ops->recv(chan, nskb))
2585 kfree_skb(nskb);
2586 }
2587
2588 mutex_unlock(&conn->chan_lock);
2589}
2590
2591/* ---- L2CAP signalling commands ---- */
2592static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
2593 u8 ident, u16 dlen, void *data)
2594{
2595 struct sk_buff *skb, **frag;
2596 struct l2cap_cmd_hdr *cmd;
2597 struct l2cap_hdr *lh;
2598 int len, count;
2599
2600 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
2601 conn, code, ident, dlen);
2602
2603 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2604 count = min_t(unsigned int, conn->mtu, len);
2605
2606 skb = bt_skb_alloc(count, GFP_KERNEL);
2607 if (!skb)
2608 return NULL;
2609
2610 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2611 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2612
2613 if (conn->hcon->type == LE_LINK)
2614 lh->cid = __constant_cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2615 else
2616 lh->cid = __constant_cpu_to_le16(L2CAP_CID_SIGNALING);
2617
2618 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2619 cmd->code = code;
2620 cmd->ident = ident;
2621 cmd->len = cpu_to_le16(dlen);
2622
2623 if (dlen) {
2624 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2625 memcpy(skb_put(skb, count), data, count);
2626 data += count;
2627 }
2628
2629 len -= skb->len;
2630
2631 /* Continuation fragments (no L2CAP header) */
2632 frag = &skb_shinfo(skb)->frag_list;
2633 while (len) {
2634 count = min_t(unsigned int, conn->mtu, len);
2635
2636 *frag = bt_skb_alloc(count, GFP_KERNEL);
2637 if (!*frag)
2638 goto fail;
2639
2640 memcpy(skb_put(*frag, count), data, count);
2641
2642 len -= count;
2643 data += count;
2644
2645 frag = &(*frag)->next;
2646 }
2647
2648 return skb;
2649
2650fail:
2651 kfree_skb(skb);
2652 return NULL;
2653}
2654
2655static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
2656 unsigned long *val)
2657{
2658 struct l2cap_conf_opt *opt = *ptr;
2659 int len;
2660
2661 len = L2CAP_CONF_OPT_SIZE + opt->len;
2662 *ptr += len;
2663
2664 *type = opt->type;
2665 *olen = opt->len;
2666
2667 switch (opt->len) {
2668 case 1:
2669 *val = *((u8 *) opt->val);
2670 break;
2671
2672 case 2:
2673 *val = get_unaligned_le16(opt->val);
2674 break;
2675
2676 case 4:
2677 *val = get_unaligned_le32(opt->val);
2678 break;
2679
2680 default:
2681 *val = (unsigned long) opt->val;
2682 break;
2683 }
2684
2685 BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
2686 return len;
2687}
2688
2689static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2690{
2691 struct l2cap_conf_opt *opt = *ptr;
2692
2693 BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
2694
2695 opt->type = type;
2696 opt->len = len;
2697
2698 switch (len) {
2699 case 1:
2700 *((u8 *) opt->val) = val;
2701 break;
2702
2703 case 2:
2704 put_unaligned_le16(val, opt->val);
2705 break;
2706
2707 case 4:
2708 put_unaligned_le32(val, opt->val);
2709 break;
2710
2711 default:
2712 memcpy(opt->val, (void *) val, len);
2713 break;
2714 }
2715
2716 *ptr += L2CAP_CONF_OPT_SIZE + len;
2717}
2718
2719static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
2720{
2721 struct l2cap_conf_efs efs;
2722
2723 switch (chan->mode) {
2724 case L2CAP_MODE_ERTM:
2725 efs.id = chan->local_id;
2726 efs.stype = chan->local_stype;
2727 efs.msdu = cpu_to_le16(chan->local_msdu);
2728 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2729 efs.acc_lat = __constant_cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
2730 efs.flush_to = __constant_cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO);
2731 break;
2732
2733 case L2CAP_MODE_STREAMING:
2734 efs.id = 1;
2735 efs.stype = L2CAP_SERV_BESTEFFORT;
2736 efs.msdu = cpu_to_le16(chan->local_msdu);
2737 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2738 efs.acc_lat = 0;
2739 efs.flush_to = 0;
2740 break;
2741
2742 default:
2743 return;
2744 }
2745
2746 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
2747 (unsigned long) &efs);
2748}
2749
2750static void l2cap_ack_timeout(struct work_struct *work)
2751{
2752 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
2753 ack_timer.work);
2754 u16 frames_to_ack;
2755
2756 BT_DBG("chan %p", chan);
2757
2758 l2cap_chan_lock(chan);
2759
2760 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2761 chan->last_acked_seq);
2762
2763 if (frames_to_ack)
2764 l2cap_send_rr_or_rnr(chan, 0);
2765
2766 l2cap_chan_unlock(chan);
2767 l2cap_chan_put(chan);
2768}
2769
2770int l2cap_ertm_init(struct l2cap_chan *chan)
2771{
2772 int err;
2773
2774 chan->next_tx_seq = 0;
2775 chan->expected_tx_seq = 0;
2776 chan->expected_ack_seq = 0;
2777 chan->unacked_frames = 0;
2778 chan->buffer_seq = 0;
2779 chan->frames_sent = 0;
2780 chan->last_acked_seq = 0;
2781 chan->sdu = NULL;
2782 chan->sdu_last_frag = NULL;
2783 chan->sdu_len = 0;
2784
2785 skb_queue_head_init(&chan->tx_q);
2786
2787 if (chan->mode != L2CAP_MODE_ERTM)
2788 return 0;
2789
2790 chan->rx_state = L2CAP_RX_STATE_RECV;
2791 chan->tx_state = L2CAP_TX_STATE_XMIT;
2792
2793 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
2794 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
2795 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
2796
2797 skb_queue_head_init(&chan->srej_q);
2798
2799 err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
2800 if (err < 0)
2801 return err;
2802
2803 err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
2804 if (err < 0)
2805 l2cap_seq_list_free(&chan->srej_list);
2806
2807 return err;
2808}
2809
2810static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2811{
2812 switch (mode) {
2813 case L2CAP_MODE_STREAMING:
2814 case L2CAP_MODE_ERTM:
2815 if (l2cap_mode_supported(mode, remote_feat_mask))
2816 return mode;
2817 /* fall through */
2818 default:
2819 return L2CAP_MODE_BASIC;
2820 }
2821}
2822
2823static inline bool __l2cap_ews_supported(struct l2cap_chan *chan)
2824{
2825 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
2826}
2827
2828static inline bool __l2cap_efs_supported(struct l2cap_chan *chan)
2829{
2830 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
2831}
2832
2833static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
2834{
2835 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
2836 __l2cap_ews_supported(chan)) {
2837 /* use extended control field */
2838 set_bit(FLAG_EXT_CTRL, &chan->flags);
2839 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
2840 } else {
2841 chan->tx_win = min_t(u16, chan->tx_win,
2842 L2CAP_DEFAULT_TX_WINDOW);
2843 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
2844 }
2845 chan->ack_win = chan->tx_win;
2846}
2847
2848static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
2849{
2850 struct l2cap_conf_req *req = data;
2851 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
2852 void *ptr = req->data;
2853 u16 size;
2854
2855 BT_DBG("chan %p", chan);
2856
2857 if (chan->num_conf_req || chan->num_conf_rsp)
2858 goto done;
2859
2860 switch (chan->mode) {
2861 case L2CAP_MODE_STREAMING:
2862 case L2CAP_MODE_ERTM:
2863 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
2864 break;
2865
2866 if (__l2cap_efs_supported(chan))
2867 set_bit(FLAG_EFS_ENABLE, &chan->flags);
2868
2869 /* fall through */
2870 default:
2871 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
2872 break;
2873 }
2874
2875done:
2876 if (chan->imtu != L2CAP_DEFAULT_MTU)
2877 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
2878
2879 switch (chan->mode) {
2880 case L2CAP_MODE_BASIC:
2881 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
2882 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
2883 break;
2884
2885 rfc.mode = L2CAP_MODE_BASIC;
2886 rfc.txwin_size = 0;
2887 rfc.max_transmit = 0;
2888 rfc.retrans_timeout = 0;
2889 rfc.monitor_timeout = 0;
2890 rfc.max_pdu_size = 0;
2891
2892 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2893 (unsigned long) &rfc);
2894 break;
2895
2896 case L2CAP_MODE_ERTM:
2897 rfc.mode = L2CAP_MODE_ERTM;
2898 rfc.max_transmit = chan->max_tx;
2899 rfc.retrans_timeout = 0;
2900 rfc.monitor_timeout = 0;
2901
2902 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2903 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
2904 L2CAP_FCS_SIZE);
2905 rfc.max_pdu_size = cpu_to_le16(size);
2906
2907 l2cap_txwin_setup(chan);
2908
2909 rfc.txwin_size = min_t(u16, chan->tx_win,
2910 L2CAP_DEFAULT_TX_WINDOW);
2911
2912 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2913 (unsigned long) &rfc);
2914
2915 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2916 l2cap_add_opt_efs(&ptr, chan);
2917
2918 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2919 break;
2920
2921 if (chan->fcs == L2CAP_FCS_NONE ||
2922 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2923 chan->fcs = L2CAP_FCS_NONE;
2924 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2925 }
2926
2927 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2928 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
2929 chan->tx_win);
2930 break;
2931
2932 case L2CAP_MODE_STREAMING:
2933 l2cap_txwin_setup(chan);
2934 rfc.mode = L2CAP_MODE_STREAMING;
2935 rfc.txwin_size = 0;
2936 rfc.max_transmit = 0;
2937 rfc.retrans_timeout = 0;
2938 rfc.monitor_timeout = 0;
2939
2940 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2941 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
2942 L2CAP_FCS_SIZE);
2943 rfc.max_pdu_size = cpu_to_le16(size);
2944
2945 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2946 (unsigned long) &rfc);
2947
2948 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2949 l2cap_add_opt_efs(&ptr, chan);
2950
2951 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2952 break;
2953
2954 if (chan->fcs == L2CAP_FCS_NONE ||
2955 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2956 chan->fcs = L2CAP_FCS_NONE;
2957 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2958 }
2959 break;
2960 }
2961
2962 req->dcid = cpu_to_le16(chan->dcid);
2963 req->flags = __constant_cpu_to_le16(0);
2964
2965 return ptr - data;
2966}
2967
2968static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
2969{
2970 struct l2cap_conf_rsp *rsp = data;
2971 void *ptr = rsp->data;
2972 void *req = chan->conf_req;
2973 int len = chan->conf_len;
2974 int type, hint, olen;
2975 unsigned long val;
2976 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2977 struct l2cap_conf_efs efs;
2978 u8 remote_efs = 0;
2979 u16 mtu = L2CAP_DEFAULT_MTU;
2980 u16 result = L2CAP_CONF_SUCCESS;
2981 u16 size;
2982
2983 BT_DBG("chan %p", chan);
2984
2985 while (len >= L2CAP_CONF_OPT_SIZE) {
2986 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2987
2988 hint = type & L2CAP_CONF_HINT;
2989 type &= L2CAP_CONF_MASK;
2990
2991 switch (type) {
2992 case L2CAP_CONF_MTU:
2993 mtu = val;
2994 break;
2995
2996 case L2CAP_CONF_FLUSH_TO:
2997 chan->flush_to = val;
2998 break;
2999
3000 case L2CAP_CONF_QOS:
3001 break;
3002
3003 case L2CAP_CONF_RFC:
3004 if (olen == sizeof(rfc))
3005 memcpy(&rfc, (void *) val, olen);
3006 break;
3007
3008 case L2CAP_CONF_FCS:
3009 if (val == L2CAP_FCS_NONE)
3010 set_bit(CONF_NO_FCS_RECV, &chan->conf_state);
3011 break;
3012
3013 case L2CAP_CONF_EFS:
3014 remote_efs = 1;
3015 if (olen == sizeof(efs))
3016 memcpy(&efs, (void *) val, olen);
3017 break;
3018
3019 case L2CAP_CONF_EWS:
3020 if (!enable_hs)
3021 return -ECONNREFUSED;
3022
3023 set_bit(FLAG_EXT_CTRL, &chan->flags);
3024 set_bit(CONF_EWS_RECV, &chan->conf_state);
3025 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3026 chan->remote_tx_win = val;
3027 break;
3028
3029 default:
3030 if (hint)
3031 break;
3032
3033 result = L2CAP_CONF_UNKNOWN;
3034 *((u8 *) ptr++) = type;
3035 break;
3036 }
3037 }
3038
3039 if (chan->num_conf_rsp || chan->num_conf_req > 1)
3040 goto done;
3041
3042 switch (chan->mode) {
3043 case L2CAP_MODE_STREAMING:
3044 case L2CAP_MODE_ERTM:
3045 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
3046 chan->mode = l2cap_select_mode(rfc.mode,
3047 chan->conn->feat_mask);
3048 break;
3049 }
3050
3051 if (remote_efs) {
3052 if (__l2cap_efs_supported(chan))
3053 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3054 else
3055 return -ECONNREFUSED;
3056 }
3057
3058 if (chan->mode != rfc.mode)
3059 return -ECONNREFUSED;
3060
3061 break;
3062 }
3063
3064done:
3065 if (chan->mode != rfc.mode) {
3066 result = L2CAP_CONF_UNACCEPT;
3067 rfc.mode = chan->mode;
3068
3069 if (chan->num_conf_rsp == 1)
3070 return -ECONNREFUSED;
3071
3072 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3073 (unsigned long) &rfc);
3074 }
3075
3076 if (result == L2CAP_CONF_SUCCESS) {
3077 /* Configure output options and let the other side know
3078 * which ones we don't like. */
3079
3080 if (mtu < L2CAP_DEFAULT_MIN_MTU)
3081 result = L2CAP_CONF_UNACCEPT;
3082 else {
3083 chan->omtu = mtu;
3084 set_bit(CONF_MTU_DONE, &chan->conf_state);
3085 }
3086 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
3087
3088 if (remote_efs) {
3089 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3090 efs.stype != L2CAP_SERV_NOTRAFIC &&
3091 efs.stype != chan->local_stype) {
3092
3093 result = L2CAP_CONF_UNACCEPT;
3094
3095 if (chan->num_conf_req >= 1)
3096 return -ECONNREFUSED;
3097
3098 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3099 sizeof(efs),
3100 (unsigned long) &efs);
3101 } else {
3102 /* Send PENDING Conf Rsp */
3103 result = L2CAP_CONF_PENDING;
3104 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3105 }
3106 }
3107
3108 switch (rfc.mode) {
3109 case L2CAP_MODE_BASIC:
3110 chan->fcs = L2CAP_FCS_NONE;
3111 set_bit(CONF_MODE_DONE, &chan->conf_state);
3112 break;
3113
3114 case L2CAP_MODE_ERTM:
3115 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3116 chan->remote_tx_win = rfc.txwin_size;
3117 else
3118 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3119
3120 chan->remote_max_tx = rfc.max_transmit;
3121
3122 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3123 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3124 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3125 rfc.max_pdu_size = cpu_to_le16(size);
3126 chan->remote_mps = size;
3127
3128 rfc.retrans_timeout =
3129 __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3130 rfc.monitor_timeout =
3131 __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3132
3133 set_bit(CONF_MODE_DONE, &chan->conf_state);
3134
3135 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3136 sizeof(rfc), (unsigned long) &rfc);
3137
3138 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3139 chan->remote_id = efs.id;
3140 chan->remote_stype = efs.stype;
3141 chan->remote_msdu = le16_to_cpu(efs.msdu);
3142 chan->remote_flush_to =
3143 le32_to_cpu(efs.flush_to);
3144 chan->remote_acc_lat =
3145 le32_to_cpu(efs.acc_lat);
3146 chan->remote_sdu_itime =
3147 le32_to_cpu(efs.sdu_itime);
3148 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3149 sizeof(efs),
3150 (unsigned long) &efs);
3151 }
3152 break;
3153
3154 case L2CAP_MODE_STREAMING:
3155 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3156 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3157 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3158 rfc.max_pdu_size = cpu_to_le16(size);
3159 chan->remote_mps = size;
3160
3161 set_bit(CONF_MODE_DONE, &chan->conf_state);
3162
3163 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3164 (unsigned long) &rfc);
3165
3166 break;
3167
3168 default:
3169 result = L2CAP_CONF_UNACCEPT;
3170
3171 memset(&rfc, 0, sizeof(rfc));
3172 rfc.mode = chan->mode;
3173 }
3174
3175 if (result == L2CAP_CONF_SUCCESS)
3176 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3177 }
3178 rsp->scid = cpu_to_le16(chan->dcid);
3179 rsp->result = cpu_to_le16(result);
3180 rsp->flags = __constant_cpu_to_le16(0);
3181
3182 return ptr - data;
3183}
3184
3185static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
3186 void *data, u16 *result)
3187{
3188 struct l2cap_conf_req *req = data;
3189 void *ptr = req->data;
3190 int type, olen;
3191 unsigned long val;
3192 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3193 struct l2cap_conf_efs efs;
3194
3195 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3196
3197 while (len >= L2CAP_CONF_OPT_SIZE) {
3198 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3199
3200 switch (type) {
3201 case L2CAP_CONF_MTU:
3202 if (val < L2CAP_DEFAULT_MIN_MTU) {
3203 *result = L2CAP_CONF_UNACCEPT;
3204 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3205 } else
3206 chan->imtu = val;
3207 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3208 break;
3209
3210 case L2CAP_CONF_FLUSH_TO:
3211 chan->flush_to = val;
3212 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
3213 2, chan->flush_to);
3214 break;
3215
3216 case L2CAP_CONF_RFC:
3217 if (olen == sizeof(rfc))
3218 memcpy(&rfc, (void *)val, olen);
3219
3220 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3221 rfc.mode != chan->mode)
3222 return -ECONNREFUSED;
3223
3224 chan->fcs = 0;
3225
3226 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3227 sizeof(rfc), (unsigned long) &rfc);
3228 break;
3229
3230 case L2CAP_CONF_EWS:
3231 chan->ack_win = min_t(u16, val, chan->ack_win);
3232 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3233 chan->tx_win);
3234 break;
3235
3236 case L2CAP_CONF_EFS:
3237 if (olen == sizeof(efs))
3238 memcpy(&efs, (void *)val, olen);
3239
3240 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3241 efs.stype != L2CAP_SERV_NOTRAFIC &&
3242 efs.stype != chan->local_stype)
3243 return -ECONNREFUSED;
3244
3245 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
3246 (unsigned long) &efs);
3247 break;
3248 }
3249 }
3250
3251 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3252 return -ECONNREFUSED;
3253
3254 chan->mode = rfc.mode;
3255
3256 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3257 switch (rfc.mode) {
3258 case L2CAP_MODE_ERTM:
3259 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3260 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3261 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3262 if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3263 chan->ack_win = min_t(u16, chan->ack_win,
3264 rfc.txwin_size);
3265
3266 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3267 chan->local_msdu = le16_to_cpu(efs.msdu);
3268 chan->local_sdu_itime =
3269 le32_to_cpu(efs.sdu_itime);
3270 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3271 chan->local_flush_to =
3272 le32_to_cpu(efs.flush_to);
3273 }
3274 break;
3275
3276 case L2CAP_MODE_STREAMING:
3277 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3278 }
3279 }
3280
3281 req->dcid = cpu_to_le16(chan->dcid);
3282 req->flags = __constant_cpu_to_le16(0);
3283
3284 return ptr - data;
3285}
3286
3287static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data,
3288 u16 result, u16 flags)
3289{
3290 struct l2cap_conf_rsp *rsp = data;
3291 void *ptr = rsp->data;
3292
3293 BT_DBG("chan %p", chan);
3294
3295 rsp->scid = cpu_to_le16(chan->dcid);
3296 rsp->result = cpu_to_le16(result);
3297 rsp->flags = cpu_to_le16(flags);
3298
3299 return ptr - data;
3300}
3301
3302void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
3303{
3304 struct l2cap_conn_rsp rsp;
3305 struct l2cap_conn *conn = chan->conn;
3306 u8 buf[128];
3307
3308 rsp.scid = cpu_to_le16(chan->dcid);
3309 rsp.dcid = cpu_to_le16(chan->scid);
3310 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
3311 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
3312 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
3313
3314 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3315 return;
3316
3317 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3318 l2cap_build_conf_req(chan, buf), buf);
3319 chan->num_conf_req++;
3320}
3321
3322static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
3323{
3324 int type, olen;
3325 unsigned long val;
3326 /* Use sane default values in case a misbehaving remote device
3327 * did not send an RFC or extended window size option.
3328 */
3329 u16 txwin_ext = chan->ack_win;
3330 struct l2cap_conf_rfc rfc = {
3331 .mode = chan->mode,
3332 .retrans_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
3333 .monitor_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
3334 .max_pdu_size = cpu_to_le16(chan->imtu),
3335 .txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
3336 };
3337
3338 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
3339
3340 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
3341 return;
3342
3343 while (len >= L2CAP_CONF_OPT_SIZE) {
3344 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3345
3346 switch (type) {
3347 case L2CAP_CONF_RFC:
3348 if (olen == sizeof(rfc))
3349 memcpy(&rfc, (void *)val, olen);
3350 break;
3351 case L2CAP_CONF_EWS:
3352 txwin_ext = val;
3353 break;
3354 }
3355 }
3356
3357 switch (rfc.mode) {
3358 case L2CAP_MODE_ERTM:
3359 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3360 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3361 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3362 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3363 chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
3364 else
3365 chan->ack_win = min_t(u16, chan->ack_win,
3366 rfc.txwin_size);
3367 break;
3368 case L2CAP_MODE_STREAMING:
3369 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3370 }
3371}
3372
3373static inline int l2cap_command_rej(struct l2cap_conn *conn,
3374 struct l2cap_cmd_hdr *cmd, u8 *data)
3375{
3376 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
3377
3378 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
3379 return 0;
3380
3381 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
3382 cmd->ident == conn->info_ident) {
3383 cancel_delayed_work(&conn->info_timer);
3384
3385 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3386 conn->info_ident = 0;
3387
3388 l2cap_conn_start(conn);
3389 }
3390
3391 return 0;
3392}
3393
3394static void l2cap_connect(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd,
3395 u8 *data, u8 rsp_code, u8 amp_id)
3396{
3397 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
3398 struct l2cap_conn_rsp rsp;
3399 struct l2cap_chan *chan = NULL, *pchan;
3400 struct sock *parent, *sk = NULL;
3401 int result, status = L2CAP_CS_NO_INFO;
3402
3403 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
3404 __le16 psm = req->psm;
3405
3406 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
3407
3408 /* Check if we have socket listening on psm */
3409 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, conn->src, conn->dst);
3410 if (!pchan) {
3411 result = L2CAP_CR_BAD_PSM;
3412 goto sendresp;
3413 }
3414
3415 parent = pchan->sk;
3416
3417 mutex_lock(&conn->chan_lock);
3418 lock_sock(parent);
3419
3420 /* Check if the ACL is secure enough (if not SDP) */
3421 if (psm != __constant_cpu_to_le16(L2CAP_PSM_SDP) &&
3422 !hci_conn_check_link_mode(conn->hcon)) {
3423 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
3424 result = L2CAP_CR_SEC_BLOCK;
3425 goto response;
3426 }
3427
3428 result = L2CAP_CR_NO_MEM;
3429
3430 /* Check if we already have channel with that dcid */
3431 if (__l2cap_get_chan_by_dcid(conn, scid))
3432 goto response;
3433
3434 chan = pchan->ops->new_connection(pchan);
3435 if (!chan)
3436 goto response;
3437
3438 sk = chan->sk;
3439
3440 hci_conn_hold(conn->hcon);
3441
3442 bacpy(&bt_sk(sk)->src, conn->src);
3443 bacpy(&bt_sk(sk)->dst, conn->dst);
3444 chan->psm = psm;
3445 chan->dcid = scid;
3446
3447 __l2cap_chan_add(conn, chan);
3448
3449 dcid = chan->scid;
3450
3451 __set_chan_timer(chan, sk->sk_sndtimeo);
3452
3453 chan->ident = cmd->ident;
3454
3455 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
3456 if (l2cap_chan_check_security(chan)) {
3457 if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) {
3458 __l2cap_state_change(chan, BT_CONNECT2);
3459 result = L2CAP_CR_PEND;
3460 status = L2CAP_CS_AUTHOR_PEND;
3461 chan->ops->defer(chan);
3462 } else {
3463 __l2cap_state_change(chan, BT_CONFIG);
3464 result = L2CAP_CR_SUCCESS;
3465 status = L2CAP_CS_NO_INFO;
3466 }
3467 } else {
3468 __l2cap_state_change(chan, BT_CONNECT2);
3469 result = L2CAP_CR_PEND;
3470 status = L2CAP_CS_AUTHEN_PEND;
3471 }
3472 } else {
3473 __l2cap_state_change(chan, BT_CONNECT2);
3474 result = L2CAP_CR_PEND;
3475 status = L2CAP_CS_NO_INFO;
3476 }
3477
3478response:
3479 release_sock(parent);
3480 mutex_unlock(&conn->chan_lock);
3481
3482sendresp:
3483 rsp.scid = cpu_to_le16(scid);
3484 rsp.dcid = cpu_to_le16(dcid);
3485 rsp.result = cpu_to_le16(result);
3486 rsp.status = cpu_to_le16(status);
3487 l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
3488
3489 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
3490 struct l2cap_info_req info;
3491 info.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
3492
3493 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
3494 conn->info_ident = l2cap_get_ident(conn);
3495
3496 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
3497
3498 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
3499 sizeof(info), &info);
3500 }
3501
3502 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
3503 result == L2CAP_CR_SUCCESS) {
3504 u8 buf[128];
3505 set_bit(CONF_REQ_SENT, &chan->conf_state);
3506 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3507 l2cap_build_conf_req(chan, buf), buf);
3508 chan->num_conf_req++;
3509 }
3510}
3511
3512static int l2cap_connect_req(struct l2cap_conn *conn,
3513 struct l2cap_cmd_hdr *cmd, u8 *data)
3514{
3515 l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);
3516 return 0;
3517}
3518
3519static inline int l2cap_connect_rsp(struct l2cap_conn *conn,
3520 struct l2cap_cmd_hdr *cmd, u8 *data)
3521{
3522 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
3523 u16 scid, dcid, result, status;
3524 struct l2cap_chan *chan;
3525 u8 req[128];
3526 int err;
3527
3528 scid = __le16_to_cpu(rsp->scid);
3529 dcid = __le16_to_cpu(rsp->dcid);
3530 result = __le16_to_cpu(rsp->result);
3531 status = __le16_to_cpu(rsp->status);
3532
3533 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
3534 dcid, scid, result, status);
3535
3536 mutex_lock(&conn->chan_lock);
3537
3538 if (scid) {
3539 chan = __l2cap_get_chan_by_scid(conn, scid);
3540 if (!chan) {
3541 err = -EFAULT;
3542 goto unlock;
3543 }
3544 } else {
3545 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
3546 if (!chan) {
3547 err = -EFAULT;
3548 goto unlock;
3549 }
3550 }
3551
3552 err = 0;
3553
3554 l2cap_chan_lock(chan);
3555
3556 switch (result) {
3557 case L2CAP_CR_SUCCESS:
3558 l2cap_state_change(chan, BT_CONFIG);
3559 chan->ident = 0;
3560 chan->dcid = dcid;
3561 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
3562
3563 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3564 break;
3565
3566 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3567 l2cap_build_conf_req(chan, req), req);
3568 chan->num_conf_req++;
3569 break;
3570
3571 case L2CAP_CR_PEND:
3572 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
3573 break;
3574
3575 default:
3576 l2cap_chan_del(chan, ECONNREFUSED);
3577 break;
3578 }
3579
3580 l2cap_chan_unlock(chan);
3581
3582unlock:
3583 mutex_unlock(&conn->chan_lock);
3584
3585 return err;
3586}
3587
3588static inline void set_default_fcs(struct l2cap_chan *chan)
3589{
3590 /* FCS is enabled only in ERTM or streaming mode, if one or both
3591 * sides request it.
3592 */
3593 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
3594 chan->fcs = L2CAP_FCS_NONE;
3595 else if (!test_bit(CONF_NO_FCS_RECV, &chan->conf_state))
3596 chan->fcs = L2CAP_FCS_CRC16;
3597}
3598
3599static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data,
3600 u8 ident, u16 flags)
3601{
3602 struct l2cap_conn *conn = chan->conn;
3603
3604 BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident,
3605 flags);
3606
3607 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3608 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3609
3610 l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP,
3611 l2cap_build_conf_rsp(chan, data,
3612 L2CAP_CONF_SUCCESS, flags), data);
3613}
3614
3615static inline int l2cap_config_req(struct l2cap_conn *conn,
3616 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3617 u8 *data)
3618{
3619 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
3620 u16 dcid, flags;
3621 u8 rsp[64];
3622 struct l2cap_chan *chan;
3623 int len, err = 0;
3624
3625 dcid = __le16_to_cpu(req->dcid);
3626 flags = __le16_to_cpu(req->flags);
3627
3628 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
3629
3630 chan = l2cap_get_chan_by_scid(conn, dcid);
3631 if (!chan)
3632 return -ENOENT;
3633
3634 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
3635 struct l2cap_cmd_rej_cid rej;
3636
3637 rej.reason = __constant_cpu_to_le16(L2CAP_REJ_INVALID_CID);
3638 rej.scid = cpu_to_le16(chan->scid);
3639 rej.dcid = cpu_to_le16(chan->dcid);
3640
3641 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
3642 sizeof(rej), &rej);
3643 goto unlock;
3644 }
3645
3646 /* Reject if config buffer is too small. */
3647 len = cmd_len - sizeof(*req);
3648 if (len < 0 || chan->conf_len + len > sizeof(chan->conf_req)) {
3649 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3650 l2cap_build_conf_rsp(chan, rsp,
3651 L2CAP_CONF_REJECT, flags), rsp);
3652 goto unlock;
3653 }
3654
3655 /* Store config. */
3656 memcpy(chan->conf_req + chan->conf_len, req->data, len);
3657 chan->conf_len += len;
3658
3659 if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
3660 /* Incomplete config. Send empty response. */
3661 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3662 l2cap_build_conf_rsp(chan, rsp,
3663 L2CAP_CONF_SUCCESS, flags), rsp);
3664 goto unlock;
3665 }
3666
3667 /* Complete config. */
3668 len = l2cap_parse_conf_req(chan, rsp);
3669 if (len < 0) {
3670 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3671 goto unlock;
3672 }
3673
3674 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
3675 chan->num_conf_rsp++;
3676
3677 /* Reset config buffer. */
3678 chan->conf_len = 0;
3679
3680 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
3681 goto unlock;
3682
3683 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
3684 set_default_fcs(chan);
3685
3686 if (chan->mode == L2CAP_MODE_ERTM ||
3687 chan->mode == L2CAP_MODE_STREAMING)
3688 err = l2cap_ertm_init(chan);
3689
3690 if (err < 0)
3691 l2cap_send_disconn_req(chan->conn, chan, -err);
3692 else
3693 l2cap_chan_ready(chan);
3694
3695 goto unlock;
3696 }
3697
3698 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
3699 u8 buf[64];
3700 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3701 l2cap_build_conf_req(chan, buf), buf);
3702 chan->num_conf_req++;
3703 }
3704
3705 /* Got Conf Rsp PENDING from remote side and asume we sent
3706 Conf Rsp PENDING in the code above */
3707 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
3708 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
3709
3710 /* check compatibility */
3711
3712 l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
3713 }
3714
3715unlock:
3716 l2cap_chan_unlock(chan);
3717 return err;
3718}
3719
3720static inline int l2cap_config_rsp(struct l2cap_conn *conn,
3721 struct l2cap_cmd_hdr *cmd, u8 *data)
3722{
3723 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
3724 u16 scid, flags, result;
3725 struct l2cap_chan *chan;
3726 int len = le16_to_cpu(cmd->len) - sizeof(*rsp);
3727 int err = 0;
3728
3729 scid = __le16_to_cpu(rsp->scid);
3730 flags = __le16_to_cpu(rsp->flags);
3731 result = __le16_to_cpu(rsp->result);
3732
3733 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
3734 result, len);
3735
3736 chan = l2cap_get_chan_by_scid(conn, scid);
3737 if (!chan)
3738 return 0;
3739
3740 switch (result) {
3741 case L2CAP_CONF_SUCCESS:
3742 l2cap_conf_rfc_get(chan, rsp->data, len);
3743 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
3744 break;
3745
3746 case L2CAP_CONF_PENDING:
3747 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
3748
3749 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
3750 char buf[64];
3751
3752 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
3753 buf, &result);
3754 if (len < 0) {
3755 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3756 goto done;
3757 }
3758
3759 /* check compatibility */
3760
3761 l2cap_send_efs_conf_rsp(chan, buf, cmd->ident, 0);
3762 }
3763 goto done;
3764
3765 case L2CAP_CONF_UNACCEPT:
3766 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
3767 char req[64];
3768
3769 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
3770 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3771 goto done;
3772 }
3773
3774 /* throw out any old stored conf requests */
3775 result = L2CAP_CONF_SUCCESS;
3776 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
3777 req, &result);
3778 if (len < 0) {
3779 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3780 goto done;
3781 }
3782
3783 l2cap_send_cmd(conn, l2cap_get_ident(conn),
3784 L2CAP_CONF_REQ, len, req);
3785 chan->num_conf_req++;
3786 if (result != L2CAP_CONF_SUCCESS)
3787 goto done;
3788 break;
3789 }
3790
3791 default:
3792 l2cap_chan_set_err(chan, ECONNRESET);
3793
3794 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
3795 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3796 goto done;
3797 }
3798
3799 if (flags & L2CAP_CONF_FLAG_CONTINUATION)
3800 goto done;
3801
3802 set_bit(CONF_INPUT_DONE, &chan->conf_state);
3803
3804 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
3805 set_default_fcs(chan);
3806
3807 if (chan->mode == L2CAP_MODE_ERTM ||
3808 chan->mode == L2CAP_MODE_STREAMING)
3809 err = l2cap_ertm_init(chan);
3810
3811 if (err < 0)
3812 l2cap_send_disconn_req(chan->conn, chan, -err);
3813 else
3814 l2cap_chan_ready(chan);
3815 }
3816
3817done:
3818 l2cap_chan_unlock(chan);
3819 return err;
3820}
3821
3822static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
3823 struct l2cap_cmd_hdr *cmd, u8 *data)
3824{
3825 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
3826 struct l2cap_disconn_rsp rsp;
3827 u16 dcid, scid;
3828 struct l2cap_chan *chan;
3829 struct sock *sk;
3830
3831 scid = __le16_to_cpu(req->scid);
3832 dcid = __le16_to_cpu(req->dcid);
3833
3834 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
3835
3836 mutex_lock(&conn->chan_lock);
3837
3838 chan = __l2cap_get_chan_by_scid(conn, dcid);
3839 if (!chan) {
3840 mutex_unlock(&conn->chan_lock);
3841 return 0;
3842 }
3843
3844 l2cap_chan_lock(chan);
3845
3846 sk = chan->sk;
3847
3848 rsp.dcid = cpu_to_le16(chan->scid);
3849 rsp.scid = cpu_to_le16(chan->dcid);
3850 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
3851
3852 lock_sock(sk);
3853 sk->sk_shutdown = SHUTDOWN_MASK;
3854 release_sock(sk);
3855
3856 l2cap_chan_hold(chan);
3857 l2cap_chan_del(chan, ECONNRESET);
3858
3859 l2cap_chan_unlock(chan);
3860
3861 chan->ops->close(chan);
3862 l2cap_chan_put(chan);
3863
3864 mutex_unlock(&conn->chan_lock);
3865
3866 return 0;
3867}
3868
3869static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
3870 struct l2cap_cmd_hdr *cmd, u8 *data)
3871{
3872 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
3873 u16 dcid, scid;
3874 struct l2cap_chan *chan;
3875
3876 scid = __le16_to_cpu(rsp->scid);
3877 dcid = __le16_to_cpu(rsp->dcid);
3878
3879 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
3880
3881 mutex_lock(&conn->chan_lock);
3882
3883 chan = __l2cap_get_chan_by_scid(conn, scid);
3884 if (!chan) {
3885 mutex_unlock(&conn->chan_lock);
3886 return 0;
3887 }
3888
3889 l2cap_chan_lock(chan);
3890
3891 l2cap_chan_hold(chan);
3892 l2cap_chan_del(chan, 0);
3893
3894 l2cap_chan_unlock(chan);
3895
3896 chan->ops->close(chan);
3897 l2cap_chan_put(chan);
3898
3899 mutex_unlock(&conn->chan_lock);
3900
3901 return 0;
3902}
3903
3904static inline int l2cap_information_req(struct l2cap_conn *conn,
3905 struct l2cap_cmd_hdr *cmd, u8 *data)
3906{
3907 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
3908 u16 type;
3909
3910 type = __le16_to_cpu(req->type);
3911
3912 BT_DBG("type 0x%4.4x", type);
3913
3914 if (type == L2CAP_IT_FEAT_MASK) {
3915 u8 buf[8];
3916 u32 feat_mask = l2cap_feat_mask;
3917 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3918 rsp->type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
3919 rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
3920 if (!disable_ertm)
3921 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
3922 | L2CAP_FEAT_FCS;
3923 if (enable_hs)
3924 feat_mask |= L2CAP_FEAT_EXT_FLOW
3925 | L2CAP_FEAT_EXT_WINDOW;
3926
3927 put_unaligned_le32(feat_mask, rsp->data);
3928 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
3929 buf);
3930 } else if (type == L2CAP_IT_FIXED_CHAN) {
3931 u8 buf[12];
3932 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3933
3934 if (enable_hs)
3935 l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
3936 else
3937 l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
3938
3939 rsp->type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3940 rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
3941 memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
3942 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
3943 buf);
3944 } else {
3945 struct l2cap_info_rsp rsp;
3946 rsp.type = cpu_to_le16(type);
3947 rsp.result = __constant_cpu_to_le16(L2CAP_IR_NOTSUPP);
3948 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp),
3949 &rsp);
3950 }
3951
3952 return 0;
3953}
3954
3955static inline int l2cap_information_rsp(struct l2cap_conn *conn,
3956 struct l2cap_cmd_hdr *cmd, u8 *data)
3957{
3958 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
3959 u16 type, result;
3960
3961 type = __le16_to_cpu(rsp->type);
3962 result = __le16_to_cpu(rsp->result);
3963
3964 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
3965
3966 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
3967 if (cmd->ident != conn->info_ident ||
3968 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
3969 return 0;
3970
3971 cancel_delayed_work(&conn->info_timer);
3972
3973 if (result != L2CAP_IR_SUCCESS) {
3974 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3975 conn->info_ident = 0;
3976
3977 l2cap_conn_start(conn);
3978
3979 return 0;
3980 }
3981
3982 switch (type) {
3983 case L2CAP_IT_FEAT_MASK:
3984 conn->feat_mask = get_unaligned_le32(rsp->data);
3985
3986 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
3987 struct l2cap_info_req req;
3988 req.type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3989
3990 conn->info_ident = l2cap_get_ident(conn);
3991
3992 l2cap_send_cmd(conn, conn->info_ident,
3993 L2CAP_INFO_REQ, sizeof(req), &req);
3994 } else {
3995 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3996 conn->info_ident = 0;
3997
3998 l2cap_conn_start(conn);
3999 }
4000 break;
4001
4002 case L2CAP_IT_FIXED_CHAN:
4003 conn->fixed_chan_mask = rsp->data[0];
4004 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4005 conn->info_ident = 0;
4006
4007 l2cap_conn_start(conn);
4008 break;
4009 }
4010
4011 return 0;
4012}
4013
4014static inline int l2cap_create_channel_req(struct l2cap_conn *conn,
4015 struct l2cap_cmd_hdr *cmd,
4016 u16 cmd_len, void *data)
4017{
4018 struct l2cap_create_chan_req *req = data;
4019 struct l2cap_create_chan_rsp rsp;
4020 u16 psm, scid;
4021
4022 if (cmd_len != sizeof(*req))
4023 return -EPROTO;
4024
4025 if (!enable_hs)
4026 return -EINVAL;
4027
4028 psm = le16_to_cpu(req->psm);
4029 scid = le16_to_cpu(req->scid);
4030
4031 BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id);
4032
4033 /* Placeholder: Always reject */
4034 rsp.dcid = 0;
4035 rsp.scid = cpu_to_le16(scid);
4036 rsp.result = __constant_cpu_to_le16(L2CAP_CR_NO_MEM);
4037 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
4038
4039 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
4040 sizeof(rsp), &rsp);
4041
4042 return 0;
4043}
4044
4045static inline int l2cap_create_channel_rsp(struct l2cap_conn *conn,
4046 struct l2cap_cmd_hdr *cmd,
4047 void *data)
4048{
4049 BT_DBG("conn %p", conn);
4050
4051 return l2cap_connect_rsp(conn, cmd, data);
4052}
4053
4054static void l2cap_send_move_chan_rsp(struct l2cap_conn *conn, u8 ident,
4055 u16 icid, u16 result)
4056{
4057 struct l2cap_move_chan_rsp rsp;
4058
4059 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
4060
4061 rsp.icid = cpu_to_le16(icid);
4062 rsp.result = cpu_to_le16(result);
4063
4064 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_RSP, sizeof(rsp), &rsp);
4065}
4066
4067static void l2cap_send_move_chan_cfm(struct l2cap_conn *conn,
4068 struct l2cap_chan *chan,
4069 u16 icid, u16 result)
4070{
4071 struct l2cap_move_chan_cfm cfm;
4072 u8 ident;
4073
4074 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
4075
4076 ident = l2cap_get_ident(conn);
4077 if (chan)
4078 chan->ident = ident;
4079
4080 cfm.icid = cpu_to_le16(icid);
4081 cfm.result = cpu_to_le16(result);
4082
4083 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM, sizeof(cfm), &cfm);
4084}
4085
4086static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
4087 u16 icid)
4088{
4089 struct l2cap_move_chan_cfm_rsp rsp;
4090
4091 BT_DBG("icid 0x%4.4x", icid);
4092
4093 rsp.icid = cpu_to_le16(icid);
4094 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4095}
4096
4097static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
4098 struct l2cap_cmd_hdr *cmd,
4099 u16 cmd_len, void *data)
4100{
4101 struct l2cap_move_chan_req *req = data;
4102 u16 icid = 0;
4103 u16 result = L2CAP_MR_NOT_ALLOWED;
4104
4105 if (cmd_len != sizeof(*req))
4106 return -EPROTO;
4107
4108 icid = le16_to_cpu(req->icid);
4109
4110 BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id);
4111
4112 if (!enable_hs)
4113 return -EINVAL;
4114
4115 /* Placeholder: Always refuse */
4116 l2cap_send_move_chan_rsp(conn, cmd->ident, icid, result);
4117
4118 return 0;
4119}
4120
4121static inline int l2cap_move_channel_rsp(struct l2cap_conn *conn,
4122 struct l2cap_cmd_hdr *cmd,
4123 u16 cmd_len, void *data)
4124{
4125 struct l2cap_move_chan_rsp *rsp = data;
4126 u16 icid, result;
4127
4128 if (cmd_len != sizeof(*rsp))
4129 return -EPROTO;
4130
4131 icid = le16_to_cpu(rsp->icid);
4132 result = le16_to_cpu(rsp->result);
4133
4134 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
4135
4136 /* Placeholder: Always unconfirmed */
4137 l2cap_send_move_chan_cfm(conn, NULL, icid, L2CAP_MC_UNCONFIRMED);
4138
4139 return 0;
4140}
4141
4142static inline int l2cap_move_channel_confirm(struct l2cap_conn *conn,
4143 struct l2cap_cmd_hdr *cmd,
4144 u16 cmd_len, void *data)
4145{
4146 struct l2cap_move_chan_cfm *cfm = data;
4147 u16 icid, result;
4148
4149 if (cmd_len != sizeof(*cfm))
4150 return -EPROTO;
4151
4152 icid = le16_to_cpu(cfm->icid);
4153 result = le16_to_cpu(cfm->result);
4154
4155 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
4156
4157 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
4158
4159 return 0;
4160}
4161
4162static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
4163 struct l2cap_cmd_hdr *cmd,
4164 u16 cmd_len, void *data)
4165{
4166 struct l2cap_move_chan_cfm_rsp *rsp = data;
4167 u16 icid;
4168
4169 if (cmd_len != sizeof(*rsp))
4170 return -EPROTO;
4171
4172 icid = le16_to_cpu(rsp->icid);
4173
4174 BT_DBG("icid 0x%4.4x", icid);
4175
4176 return 0;
4177}
4178
4179static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
4180 u16 to_multiplier)
4181{
4182 u16 max_latency;
4183
4184 if (min > max || min < 6 || max > 3200)
4185 return -EINVAL;
4186
4187 if (to_multiplier < 10 || to_multiplier > 3200)
4188 return -EINVAL;
4189
4190 if (max >= to_multiplier * 8)
4191 return -EINVAL;
4192
4193 max_latency = (to_multiplier * 8 / max) - 1;
4194 if (latency > 499 || latency > max_latency)
4195 return -EINVAL;
4196
4197 return 0;
4198}
4199
4200static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
4201 struct l2cap_cmd_hdr *cmd,
4202 u8 *data)
4203{
4204 struct hci_conn *hcon = conn->hcon;
4205 struct l2cap_conn_param_update_req *req;
4206 struct l2cap_conn_param_update_rsp rsp;
4207 u16 min, max, latency, to_multiplier, cmd_len;
4208 int err;
4209
4210 if (!(hcon->link_mode & HCI_LM_MASTER))
4211 return -EINVAL;
4212
4213 cmd_len = __le16_to_cpu(cmd->len);
4214 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
4215 return -EPROTO;
4216
4217 req = (struct l2cap_conn_param_update_req *) data;
4218 min = __le16_to_cpu(req->min);
4219 max = __le16_to_cpu(req->max);
4220 latency = __le16_to_cpu(req->latency);
4221 to_multiplier = __le16_to_cpu(req->to_multiplier);
4222
4223 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
4224 min, max, latency, to_multiplier);
4225
4226 memset(&rsp, 0, sizeof(rsp));
4227
4228 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
4229 if (err)
4230 rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
4231 else
4232 rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
4233
4234 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
4235 sizeof(rsp), &rsp);
4236
4237 if (!err)
4238 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
4239
4240 return 0;
4241}
4242
4243static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
4244 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4245 u8 *data)
4246{
4247 int err = 0;
4248
4249 switch (cmd->code) {
4250 case L2CAP_COMMAND_REJ:
4251 l2cap_command_rej(conn, cmd, data);
4252 break;
4253
4254 case L2CAP_CONN_REQ:
4255 err = l2cap_connect_req(conn, cmd, data);
4256 break;
4257
4258 case L2CAP_CONN_RSP:
4259 case L2CAP_CREATE_CHAN_RSP:
4260 err = l2cap_connect_rsp(conn, cmd, data);
4261 break;
4262
4263 case L2CAP_CONF_REQ:
4264 err = l2cap_config_req(conn, cmd, cmd_len, data);
4265 break;
4266
4267 case L2CAP_CONF_RSP:
4268 err = l2cap_config_rsp(conn, cmd, data);
4269 break;
4270
4271 case L2CAP_DISCONN_REQ:
4272 err = l2cap_disconnect_req(conn, cmd, data);
4273 break;
4274
4275 case L2CAP_DISCONN_RSP:
4276 err = l2cap_disconnect_rsp(conn, cmd, data);
4277 break;
4278
4279 case L2CAP_ECHO_REQ:
4280 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
4281 break;
4282
4283 case L2CAP_ECHO_RSP:
4284 break;
4285
4286 case L2CAP_INFO_REQ:
4287 err = l2cap_information_req(conn, cmd, data);
4288 break;
4289
4290 case L2CAP_INFO_RSP:
4291 err = l2cap_information_rsp(conn, cmd, data);
4292 break;
4293
4294 case L2CAP_CREATE_CHAN_REQ:
4295 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
4296 break;
4297
4298 case L2CAP_MOVE_CHAN_REQ:
4299 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
4300 break;
4301
4302 case L2CAP_MOVE_CHAN_RSP:
4303 err = l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
4304 break;
4305
4306 case L2CAP_MOVE_CHAN_CFM:
4307 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
4308 break;
4309
4310 case L2CAP_MOVE_CHAN_CFM_RSP:
4311 err = l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
4312 break;
4313
4314 default:
4315 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
4316 err = -EINVAL;
4317 break;
4318 }
4319
4320 return err;
4321}
4322
4323static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
4324 struct l2cap_cmd_hdr *cmd, u8 *data)
4325{
4326 switch (cmd->code) {
4327 case L2CAP_COMMAND_REJ:
4328 return 0;
4329
4330 case L2CAP_CONN_PARAM_UPDATE_REQ:
4331 return l2cap_conn_param_update_req(conn, cmd, data);
4332
4333 case L2CAP_CONN_PARAM_UPDATE_RSP:
4334 return 0;
4335
4336 default:
4337 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
4338 return -EINVAL;
4339 }
4340}
4341
4342static inline void l2cap_sig_channel(struct l2cap_conn *conn,
4343 struct sk_buff *skb)
4344{
4345 u8 *data = skb->data;
4346 int len = skb->len;
4347 struct l2cap_cmd_hdr cmd;
4348 int err;
4349
4350 l2cap_raw_recv(conn, skb);
4351
4352 while (len >= L2CAP_CMD_HDR_SIZE) {
4353 u16 cmd_len;
4354 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
4355 data += L2CAP_CMD_HDR_SIZE;
4356 len -= L2CAP_CMD_HDR_SIZE;
4357
4358 cmd_len = le16_to_cpu(cmd.len);
4359
4360 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len,
4361 cmd.ident);
4362
4363 if (cmd_len > len || !cmd.ident) {
4364 BT_DBG("corrupted command");
4365 break;
4366 }
4367
4368 if (conn->hcon->type == LE_LINK)
4369 err = l2cap_le_sig_cmd(conn, &cmd, data);
4370 else
4371 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
4372
4373 if (err) {
4374 struct l2cap_cmd_rej_unk rej;
4375
4376 BT_ERR("Wrong link type (%d)", err);
4377
4378 /* FIXME: Map err to a valid reason */
4379 rej.reason = __constant_cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
4380 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ,
4381 sizeof(rej), &rej);
4382 }
4383
4384 data += cmd_len;
4385 len -= cmd_len;
4386 }
4387
4388 kfree_skb(skb);
4389}
4390
4391static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
4392{
4393 u16 our_fcs, rcv_fcs;
4394 int hdr_size;
4395
4396 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
4397 hdr_size = L2CAP_EXT_HDR_SIZE;
4398 else
4399 hdr_size = L2CAP_ENH_HDR_SIZE;
4400
4401 if (chan->fcs == L2CAP_FCS_CRC16) {
4402 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
4403 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
4404 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
4405
4406 if (our_fcs != rcv_fcs)
4407 return -EBADMSG;
4408 }
4409 return 0;
4410}
4411
4412static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
4413{
4414 struct l2cap_ctrl control;
4415
4416 BT_DBG("chan %p", chan);
4417
4418 memset(&control, 0, sizeof(control));
4419 control.sframe = 1;
4420 control.final = 1;
4421 control.reqseq = chan->buffer_seq;
4422 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4423
4424 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4425 control.super = L2CAP_SUPER_RNR;
4426 l2cap_send_sframe(chan, &control);
4427 }
4428
4429 if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
4430 chan->unacked_frames > 0)
4431 __set_retrans_timer(chan);
4432
4433 /* Send pending iframes */
4434 l2cap_ertm_send(chan);
4435
4436 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
4437 test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
4438 /* F-bit wasn't sent in an s-frame or i-frame yet, so
4439 * send it now.
4440 */
4441 control.super = L2CAP_SUPER_RR;
4442 l2cap_send_sframe(chan, &control);
4443 }
4444}
4445
4446static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag,
4447 struct sk_buff **last_frag)
4448{
4449 /* skb->len reflects data in skb as well as all fragments
4450 * skb->data_len reflects only data in fragments
4451 */
4452 if (!skb_has_frag_list(skb))
4453 skb_shinfo(skb)->frag_list = new_frag;
4454
4455 new_frag->next = NULL;
4456
4457 (*last_frag)->next = new_frag;
4458 *last_frag = new_frag;
4459
4460 skb->len += new_frag->len;
4461 skb->data_len += new_frag->len;
4462 skb->truesize += new_frag->truesize;
4463}
4464
4465static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
4466 struct l2cap_ctrl *control)
4467{
4468 int err = -EINVAL;
4469
4470 switch (control->sar) {
4471 case L2CAP_SAR_UNSEGMENTED:
4472 if (chan->sdu)
4473 break;
4474
4475 err = chan->ops->recv(chan, skb);
4476 break;
4477
4478 case L2CAP_SAR_START:
4479 if (chan->sdu)
4480 break;
4481
4482 chan->sdu_len = get_unaligned_le16(skb->data);
4483 skb_pull(skb, L2CAP_SDULEN_SIZE);
4484
4485 if (chan->sdu_len > chan->imtu) {
4486 err = -EMSGSIZE;
4487 break;
4488 }
4489
4490 if (skb->len >= chan->sdu_len)
4491 break;
4492
4493 chan->sdu = skb;
4494 chan->sdu_last_frag = skb;
4495
4496 skb = NULL;
4497 err = 0;
4498 break;
4499
4500 case L2CAP_SAR_CONTINUE:
4501 if (!chan->sdu)
4502 break;
4503
4504 append_skb_frag(chan->sdu, skb,
4505 &chan->sdu_last_frag);
4506 skb = NULL;
4507
4508 if (chan->sdu->len >= chan->sdu_len)
4509 break;
4510
4511 err = 0;
4512 break;
4513
4514 case L2CAP_SAR_END:
4515 if (!chan->sdu)
4516 break;
4517
4518 append_skb_frag(chan->sdu, skb,
4519 &chan->sdu_last_frag);
4520 skb = NULL;
4521
4522 if (chan->sdu->len != chan->sdu_len)
4523 break;
4524
4525 err = chan->ops->recv(chan, chan->sdu);
4526
4527 if (!err) {
4528 /* Reassembly complete */
4529 chan->sdu = NULL;
4530 chan->sdu_last_frag = NULL;
4531 chan->sdu_len = 0;
4532 }
4533 break;
4534 }
4535
4536 if (err) {
4537 kfree_skb(skb);
4538 kfree_skb(chan->sdu);
4539 chan->sdu = NULL;
4540 chan->sdu_last_frag = NULL;
4541 chan->sdu_len = 0;
4542 }
4543
4544 return err;
4545}
4546
4547void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
4548{
4549 u8 event;
4550
4551 if (chan->mode != L2CAP_MODE_ERTM)
4552 return;
4553
4554 event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
4555 l2cap_tx(chan, NULL, NULL, event);
4556}
4557
4558static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
4559{
4560 int err = 0;
4561 /* Pass sequential frames to l2cap_reassemble_sdu()
4562 * until a gap is encountered.
4563 */
4564
4565 BT_DBG("chan %p", chan);
4566
4567 while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4568 struct sk_buff *skb;
4569 BT_DBG("Searching for skb with txseq %d (queue len %d)",
4570 chan->buffer_seq, skb_queue_len(&chan->srej_q));
4571
4572 skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
4573
4574 if (!skb)
4575 break;
4576
4577 skb_unlink(skb, &chan->srej_q);
4578 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
4579 err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->control);
4580 if (err)
4581 break;
4582 }
4583
4584 if (skb_queue_empty(&chan->srej_q)) {
4585 chan->rx_state = L2CAP_RX_STATE_RECV;
4586 l2cap_send_ack(chan);
4587 }
4588
4589 return err;
4590}
4591
4592static void l2cap_handle_srej(struct l2cap_chan *chan,
4593 struct l2cap_ctrl *control)
4594{
4595 struct sk_buff *skb;
4596
4597 BT_DBG("chan %p, control %p", chan, control);
4598
4599 if (control->reqseq == chan->next_tx_seq) {
4600 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
4601 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4602 return;
4603 }
4604
4605 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
4606
4607 if (skb == NULL) {
4608 BT_DBG("Seq %d not available for retransmission",
4609 control->reqseq);
4610 return;
4611 }
4612
4613 if (chan->max_tx != 0 && bt_cb(skb)->control.retries >= chan->max_tx) {
4614 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
4615 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4616 return;
4617 }
4618
4619 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4620
4621 if (control->poll) {
4622 l2cap_pass_to_tx(chan, control);
4623
4624 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4625 l2cap_retransmit(chan, control);
4626 l2cap_ertm_send(chan);
4627
4628 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
4629 set_bit(CONN_SREJ_ACT, &chan->conn_state);
4630 chan->srej_save_reqseq = control->reqseq;
4631 }
4632 } else {
4633 l2cap_pass_to_tx_fbit(chan, control);
4634
4635 if (control->final) {
4636 if (chan->srej_save_reqseq != control->reqseq ||
4637 !test_and_clear_bit(CONN_SREJ_ACT,
4638 &chan->conn_state))
4639 l2cap_retransmit(chan, control);
4640 } else {
4641 l2cap_retransmit(chan, control);
4642 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
4643 set_bit(CONN_SREJ_ACT, &chan->conn_state);
4644 chan->srej_save_reqseq = control->reqseq;
4645 }
4646 }
4647 }
4648}
4649
4650static void l2cap_handle_rej(struct l2cap_chan *chan,
4651 struct l2cap_ctrl *control)
4652{
4653 struct sk_buff *skb;
4654
4655 BT_DBG("chan %p, control %p", chan, control);
4656
4657 if (control->reqseq == chan->next_tx_seq) {
4658 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
4659 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4660 return;
4661 }
4662
4663 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
4664
4665 if (chan->max_tx && skb &&
4666 bt_cb(skb)->control.retries >= chan->max_tx) {
4667 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
4668 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4669 return;
4670 }
4671
4672 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4673
4674 l2cap_pass_to_tx(chan, control);
4675
4676 if (control->final) {
4677 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
4678 l2cap_retransmit_all(chan, control);
4679 } else {
4680 l2cap_retransmit_all(chan, control);
4681 l2cap_ertm_send(chan);
4682 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
4683 set_bit(CONN_REJ_ACT, &chan->conn_state);
4684 }
4685}
4686
4687static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
4688{
4689 BT_DBG("chan %p, txseq %d", chan, txseq);
4690
4691 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
4692 chan->expected_tx_seq);
4693
4694 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
4695 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
4696 chan->tx_win) {
4697 /* See notes below regarding "double poll" and
4698 * invalid packets.
4699 */
4700 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
4701 BT_DBG("Invalid/Ignore - after SREJ");
4702 return L2CAP_TXSEQ_INVALID_IGNORE;
4703 } else {
4704 BT_DBG("Invalid - in window after SREJ sent");
4705 return L2CAP_TXSEQ_INVALID;
4706 }
4707 }
4708
4709 if (chan->srej_list.head == txseq) {
4710 BT_DBG("Expected SREJ");
4711 return L2CAP_TXSEQ_EXPECTED_SREJ;
4712 }
4713
4714 if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
4715 BT_DBG("Duplicate SREJ - txseq already stored");
4716 return L2CAP_TXSEQ_DUPLICATE_SREJ;
4717 }
4718
4719 if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
4720 BT_DBG("Unexpected SREJ - not requested");
4721 return L2CAP_TXSEQ_UNEXPECTED_SREJ;
4722 }
4723 }
4724
4725 if (chan->expected_tx_seq == txseq) {
4726 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
4727 chan->tx_win) {
4728 BT_DBG("Invalid - txseq outside tx window");
4729 return L2CAP_TXSEQ_INVALID;
4730 } else {
4731 BT_DBG("Expected");
4732 return L2CAP_TXSEQ_EXPECTED;
4733 }
4734 }
4735
4736 if (__seq_offset(chan, txseq, chan->last_acked_seq) <
4737 __seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) {
4738 BT_DBG("Duplicate - expected_tx_seq later than txseq");
4739 return L2CAP_TXSEQ_DUPLICATE;
4740 }
4741
4742 if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
4743 /* A source of invalid packets is a "double poll" condition,
4744 * where delays cause us to send multiple poll packets. If
4745 * the remote stack receives and processes both polls,
4746 * sequence numbers can wrap around in such a way that a
4747 * resent frame has a sequence number that looks like new data
4748 * with a sequence gap. This would trigger an erroneous SREJ
4749 * request.
4750 *
4751 * Fortunately, this is impossible with a tx window that's
4752 * less than half of the maximum sequence number, which allows
4753 * invalid frames to be safely ignored.
4754 *
4755 * With tx window sizes greater than half of the tx window
4756 * maximum, the frame is invalid and cannot be ignored. This
4757 * causes a disconnect.
4758 */
4759
4760 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
4761 BT_DBG("Invalid/Ignore - txseq outside tx window");
4762 return L2CAP_TXSEQ_INVALID_IGNORE;
4763 } else {
4764 BT_DBG("Invalid - txseq outside tx window");
4765 return L2CAP_TXSEQ_INVALID;
4766 }
4767 } else {
4768 BT_DBG("Unexpected - txseq indicates missing frames");
4769 return L2CAP_TXSEQ_UNEXPECTED;
4770 }
4771}
4772
4773static int l2cap_rx_state_recv(struct l2cap_chan *chan,
4774 struct l2cap_ctrl *control,
4775 struct sk_buff *skb, u8 event)
4776{
4777 int err = 0;
4778 bool skb_in_use = 0;
4779
4780 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
4781 event);
4782
4783 switch (event) {
4784 case L2CAP_EV_RECV_IFRAME:
4785 switch (l2cap_classify_txseq(chan, control->txseq)) {
4786 case L2CAP_TXSEQ_EXPECTED:
4787 l2cap_pass_to_tx(chan, control);
4788
4789 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4790 BT_DBG("Busy, discarding expected seq %d",
4791 control->txseq);
4792 break;
4793 }
4794
4795 chan->expected_tx_seq = __next_seq(chan,
4796 control->txseq);
4797
4798 chan->buffer_seq = chan->expected_tx_seq;
4799 skb_in_use = 1;
4800
4801 err = l2cap_reassemble_sdu(chan, skb, control);
4802 if (err)
4803 break;
4804
4805 if (control->final) {
4806 if (!test_and_clear_bit(CONN_REJ_ACT,
4807 &chan->conn_state)) {
4808 control->final = 0;
4809 l2cap_retransmit_all(chan, control);
4810 l2cap_ertm_send(chan);
4811 }
4812 }
4813
4814 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
4815 l2cap_send_ack(chan);
4816 break;
4817 case L2CAP_TXSEQ_UNEXPECTED:
4818 l2cap_pass_to_tx(chan, control);
4819
4820 /* Can't issue SREJ frames in the local busy state.
4821 * Drop this frame, it will be seen as missing
4822 * when local busy is exited.
4823 */
4824 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4825 BT_DBG("Busy, discarding unexpected seq %d",
4826 control->txseq);
4827 break;
4828 }
4829
4830 /* There was a gap in the sequence, so an SREJ
4831 * must be sent for each missing frame. The
4832 * current frame is stored for later use.
4833 */
4834 skb_queue_tail(&chan->srej_q, skb);
4835 skb_in_use = 1;
4836 BT_DBG("Queued %p (queue len %d)", skb,
4837 skb_queue_len(&chan->srej_q));
4838
4839 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
4840 l2cap_seq_list_clear(&chan->srej_list);
4841 l2cap_send_srej(chan, control->txseq);
4842
4843 chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
4844 break;
4845 case L2CAP_TXSEQ_DUPLICATE:
4846 l2cap_pass_to_tx(chan, control);
4847 break;
4848 case L2CAP_TXSEQ_INVALID_IGNORE:
4849 break;
4850 case L2CAP_TXSEQ_INVALID:
4851 default:
4852 l2cap_send_disconn_req(chan->conn, chan,
4853 ECONNRESET);
4854 break;
4855 }
4856 break;
4857 case L2CAP_EV_RECV_RR:
4858 l2cap_pass_to_tx(chan, control);
4859 if (control->final) {
4860 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4861
4862 if (!test_and_clear_bit(CONN_REJ_ACT,
4863 &chan->conn_state)) {
4864 control->final = 0;
4865 l2cap_retransmit_all(chan, control);
4866 }
4867
4868 l2cap_ertm_send(chan);
4869 } else if (control->poll) {
4870 l2cap_send_i_or_rr_or_rnr(chan);
4871 } else {
4872 if (test_and_clear_bit(CONN_REMOTE_BUSY,
4873 &chan->conn_state) &&
4874 chan->unacked_frames)
4875 __set_retrans_timer(chan);
4876
4877 l2cap_ertm_send(chan);
4878 }
4879 break;
4880 case L2CAP_EV_RECV_RNR:
4881 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4882 l2cap_pass_to_tx(chan, control);
4883 if (control && control->poll) {
4884 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4885 l2cap_send_rr_or_rnr(chan, 0);
4886 }
4887 __clear_retrans_timer(chan);
4888 l2cap_seq_list_clear(&chan->retrans_list);
4889 break;
4890 case L2CAP_EV_RECV_REJ:
4891 l2cap_handle_rej(chan, control);
4892 break;
4893 case L2CAP_EV_RECV_SREJ:
4894 l2cap_handle_srej(chan, control);
4895 break;
4896 default:
4897 break;
4898 }
4899
4900 if (skb && !skb_in_use) {
4901 BT_DBG("Freeing %p", skb);
4902 kfree_skb(skb);
4903 }
4904
4905 return err;
4906}
4907
4908static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
4909 struct l2cap_ctrl *control,
4910 struct sk_buff *skb, u8 event)
4911{
4912 int err = 0;
4913 u16 txseq = control->txseq;
4914 bool skb_in_use = 0;
4915
4916 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
4917 event);
4918
4919 switch (event) {
4920 case L2CAP_EV_RECV_IFRAME:
4921 switch (l2cap_classify_txseq(chan, txseq)) {
4922 case L2CAP_TXSEQ_EXPECTED:
4923 /* Keep frame for reassembly later */
4924 l2cap_pass_to_tx(chan, control);
4925 skb_queue_tail(&chan->srej_q, skb);
4926 skb_in_use = 1;
4927 BT_DBG("Queued %p (queue len %d)", skb,
4928 skb_queue_len(&chan->srej_q));
4929
4930 chan->expected_tx_seq = __next_seq(chan, txseq);
4931 break;
4932 case L2CAP_TXSEQ_EXPECTED_SREJ:
4933 l2cap_seq_list_pop(&chan->srej_list);
4934
4935 l2cap_pass_to_tx(chan, control);
4936 skb_queue_tail(&chan->srej_q, skb);
4937 skb_in_use = 1;
4938 BT_DBG("Queued %p (queue len %d)", skb,
4939 skb_queue_len(&chan->srej_q));
4940
4941 err = l2cap_rx_queued_iframes(chan);
4942 if (err)
4943 break;
4944
4945 break;
4946 case L2CAP_TXSEQ_UNEXPECTED:
4947 /* Got a frame that can't be reassembled yet.
4948 * Save it for later, and send SREJs to cover
4949 * the missing frames.
4950 */
4951 skb_queue_tail(&chan->srej_q, skb);
4952 skb_in_use = 1;
4953 BT_DBG("Queued %p (queue len %d)", skb,
4954 skb_queue_len(&chan->srej_q));
4955
4956 l2cap_pass_to_tx(chan, control);
4957 l2cap_send_srej(chan, control->txseq);
4958 break;
4959 case L2CAP_TXSEQ_UNEXPECTED_SREJ:
4960 /* This frame was requested with an SREJ, but
4961 * some expected retransmitted frames are
4962 * missing. Request retransmission of missing
4963 * SREJ'd frames.
4964 */
4965 skb_queue_tail(&chan->srej_q, skb);
4966 skb_in_use = 1;
4967 BT_DBG("Queued %p (queue len %d)", skb,
4968 skb_queue_len(&chan->srej_q));
4969
4970 l2cap_pass_to_tx(chan, control);
4971 l2cap_send_srej_list(chan, control->txseq);
4972 break;
4973 case L2CAP_TXSEQ_DUPLICATE_SREJ:
4974 /* We've already queued this frame. Drop this copy. */
4975 l2cap_pass_to_tx(chan, control);
4976 break;
4977 case L2CAP_TXSEQ_DUPLICATE:
4978 /* Expecting a later sequence number, so this frame
4979 * was already received. Ignore it completely.
4980 */
4981 break;
4982 case L2CAP_TXSEQ_INVALID_IGNORE:
4983 break;
4984 case L2CAP_TXSEQ_INVALID:
4985 default:
4986 l2cap_send_disconn_req(chan->conn, chan,
4987 ECONNRESET);
4988 break;
4989 }
4990 break;
4991 case L2CAP_EV_RECV_RR:
4992 l2cap_pass_to_tx(chan, control);
4993 if (control->final) {
4994 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4995
4996 if (!test_and_clear_bit(CONN_REJ_ACT,
4997 &chan->conn_state)) {
4998 control->final = 0;
4999 l2cap_retransmit_all(chan, control);
5000 }
5001
5002 l2cap_ertm_send(chan);
5003 } else if (control->poll) {
5004 if (test_and_clear_bit(CONN_REMOTE_BUSY,
5005 &chan->conn_state) &&
5006 chan->unacked_frames) {
5007 __set_retrans_timer(chan);
5008 }
5009
5010 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5011 l2cap_send_srej_tail(chan);
5012 } else {
5013 if (test_and_clear_bit(CONN_REMOTE_BUSY,
5014 &chan->conn_state) &&
5015 chan->unacked_frames)
5016 __set_retrans_timer(chan);
5017
5018 l2cap_send_ack(chan);
5019 }
5020 break;
5021 case L2CAP_EV_RECV_RNR:
5022 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5023 l2cap_pass_to_tx(chan, control);
5024 if (control->poll) {
5025 l2cap_send_srej_tail(chan);
5026 } else {
5027 struct l2cap_ctrl rr_control;
5028 memset(&rr_control, 0, sizeof(rr_control));
5029 rr_control.sframe = 1;
5030 rr_control.super = L2CAP_SUPER_RR;
5031 rr_control.reqseq = chan->buffer_seq;
5032 l2cap_send_sframe(chan, &rr_control);
5033 }
5034
5035 break;
5036 case L2CAP_EV_RECV_REJ:
5037 l2cap_handle_rej(chan, control);
5038 break;
5039 case L2CAP_EV_RECV_SREJ:
5040 l2cap_handle_srej(chan, control);
5041 break;
5042 }
5043
5044 if (skb && !skb_in_use) {
5045 BT_DBG("Freeing %p", skb);
5046 kfree_skb(skb);
5047 }
5048
5049 return err;
5050}
5051
5052static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
5053{
5054 /* Make sure reqseq is for a packet that has been sent but not acked */
5055 u16 unacked;
5056
5057 unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
5058 return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
5059}
5060
5061static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
5062 struct sk_buff *skb, u8 event)
5063{
5064 int err = 0;
5065
5066 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
5067 control, skb, event, chan->rx_state);
5068
5069 if (__valid_reqseq(chan, control->reqseq)) {
5070 switch (chan->rx_state) {
5071 case L2CAP_RX_STATE_RECV:
5072 err = l2cap_rx_state_recv(chan, control, skb, event);
5073 break;
5074 case L2CAP_RX_STATE_SREJ_SENT:
5075 err = l2cap_rx_state_srej_sent(chan, control, skb,
5076 event);
5077 break;
5078 default:
5079 /* shut it down */
5080 break;
5081 }
5082 } else {
5083 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
5084 control->reqseq, chan->next_tx_seq,
5085 chan->expected_ack_seq);
5086 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
5087 }
5088
5089 return err;
5090}
5091
5092static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
5093 struct sk_buff *skb)
5094{
5095 int err = 0;
5096
5097 BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
5098 chan->rx_state);
5099
5100 if (l2cap_classify_txseq(chan, control->txseq) ==
5101 L2CAP_TXSEQ_EXPECTED) {
5102 l2cap_pass_to_tx(chan, control);
5103
5104 BT_DBG("buffer_seq %d->%d", chan->buffer_seq,
5105 __next_seq(chan, chan->buffer_seq));
5106
5107 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
5108
5109 l2cap_reassemble_sdu(chan, skb, control);
5110 } else {
5111 if (chan->sdu) {
5112 kfree_skb(chan->sdu);
5113 chan->sdu = NULL;
5114 }
5115 chan->sdu_last_frag = NULL;
5116 chan->sdu_len = 0;
5117
5118 if (skb) {
5119 BT_DBG("Freeing %p", skb);
5120 kfree_skb(skb);
5121 }
5122 }
5123
5124 chan->last_acked_seq = control->txseq;
5125 chan->expected_tx_seq = __next_seq(chan, control->txseq);
5126
5127 return err;
5128}
5129
5130static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
5131{
5132 struct l2cap_ctrl *control = &bt_cb(skb)->control;
5133 u16 len;
5134 u8 event;
5135
5136 __unpack_control(chan, skb);
5137
5138 len = skb->len;
5139
5140 /*
5141 * We can just drop the corrupted I-frame here.
5142 * Receiver will miss it and start proper recovery
5143 * procedures and ask for retransmission.
5144 */
5145 if (l2cap_check_fcs(chan, skb))
5146 goto drop;
5147
5148 if (!control->sframe && control->sar == L2CAP_SAR_START)
5149 len -= L2CAP_SDULEN_SIZE;
5150
5151 if (chan->fcs == L2CAP_FCS_CRC16)
5152 len -= L2CAP_FCS_SIZE;
5153
5154 if (len > chan->mps) {
5155 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
5156 goto drop;
5157 }
5158
5159 if (!control->sframe) {
5160 int err;
5161
5162 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
5163 control->sar, control->reqseq, control->final,
5164 control->txseq);
5165
5166 /* Validate F-bit - F=0 always valid, F=1 only
5167 * valid in TX WAIT_F
5168 */
5169 if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
5170 goto drop;
5171
5172 if (chan->mode != L2CAP_MODE_STREAMING) {
5173 event = L2CAP_EV_RECV_IFRAME;
5174 err = l2cap_rx(chan, control, skb, event);
5175 } else {
5176 err = l2cap_stream_rx(chan, control, skb);
5177 }
5178
5179 if (err)
5180 l2cap_send_disconn_req(chan->conn, chan,
5181 ECONNRESET);
5182 } else {
5183 const u8 rx_func_to_event[4] = {
5184 L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
5185 L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
5186 };
5187
5188 /* Only I-frames are expected in streaming mode */
5189 if (chan->mode == L2CAP_MODE_STREAMING)
5190 goto drop;
5191
5192 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
5193 control->reqseq, control->final, control->poll,
5194 control->super);
5195
5196 if (len != 0) {
5197 BT_ERR("%d", len);
5198 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
5199 goto drop;
5200 }
5201
5202 /* Validate F and P bits */
5203 if (control->final && (control->poll ||
5204 chan->tx_state != L2CAP_TX_STATE_WAIT_F))
5205 goto drop;
5206
5207 event = rx_func_to_event[control->super];
5208 if (l2cap_rx(chan, control, skb, event))
5209 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
5210 }
5211
5212 return 0;
5213
5214drop:
5215 kfree_skb(skb);
5216 return 0;
5217}
5218
5219static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
5220 struct sk_buff *skb)
5221{
5222 struct l2cap_chan *chan;
5223
5224 chan = l2cap_get_chan_by_scid(conn, cid);
5225 if (!chan) {
5226 if (cid == L2CAP_CID_A2MP) {
5227 chan = a2mp_channel_create(conn, skb);
5228 if (!chan) {
5229 kfree_skb(skb);
5230 return;
5231 }
5232
5233 l2cap_chan_lock(chan);
5234 } else {
5235 BT_DBG("unknown cid 0x%4.4x", cid);
5236 /* Drop packet and return */
5237 kfree_skb(skb);
5238 return;
5239 }
5240 }
5241
5242 BT_DBG("chan %p, len %d", chan, skb->len);
5243
5244 if (chan->state != BT_CONNECTED)
5245 goto drop;
5246
5247 switch (chan->mode) {
5248 case L2CAP_MODE_BASIC:
5249 /* If socket recv buffers overflows we drop data here
5250 * which is *bad* because L2CAP has to be reliable.
5251 * But we don't have any other choice. L2CAP doesn't
5252 * provide flow control mechanism. */
5253
5254 if (chan->imtu < skb->len)
5255 goto drop;
5256
5257 if (!chan->ops->recv(chan, skb))
5258 goto done;
5259 break;
5260
5261 case L2CAP_MODE_ERTM:
5262 case L2CAP_MODE_STREAMING:
5263 l2cap_data_rcv(chan, skb);
5264 goto done;
5265
5266 default:
5267 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
5268 break;
5269 }
5270
5271drop:
5272 kfree_skb(skb);
5273
5274done:
5275 l2cap_chan_unlock(chan);
5276}
5277
5278static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
5279 struct sk_buff *skb)
5280{
5281 struct l2cap_chan *chan;
5282
5283 chan = l2cap_global_chan_by_psm(0, psm, conn->src, conn->dst);
5284 if (!chan)
5285 goto drop;
5286
5287 BT_DBG("chan %p, len %d", chan, skb->len);
5288
5289 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
5290 goto drop;
5291
5292 if (chan->imtu < skb->len)
5293 goto drop;
5294
5295 if (!chan->ops->recv(chan, skb))
5296 return;
5297
5298drop:
5299 kfree_skb(skb);
5300}
5301
5302static void l2cap_att_channel(struct l2cap_conn *conn, u16 cid,
5303 struct sk_buff *skb)
5304{
5305 struct l2cap_chan *chan;
5306
5307 chan = l2cap_global_chan_by_scid(0, cid, conn->src, conn->dst);
5308 if (!chan)
5309 goto drop;
5310
5311 BT_DBG("chan %p, len %d", chan, skb->len);
5312
5313 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
5314 goto drop;
5315
5316 if (chan->imtu < skb->len)
5317 goto drop;
5318
5319 if (!chan->ops->recv(chan, skb))
5320 return;
5321
5322drop:
5323 kfree_skb(skb);
5324}
5325
5326static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
5327{
5328 struct l2cap_hdr *lh = (void *) skb->data;
5329 u16 cid, len;
5330 __le16 psm;
5331
5332 skb_pull(skb, L2CAP_HDR_SIZE);
5333 cid = __le16_to_cpu(lh->cid);
5334 len = __le16_to_cpu(lh->len);
5335
5336 if (len != skb->len) {
5337 kfree_skb(skb);
5338 return;
5339 }
5340
5341 BT_DBG("len %d, cid 0x%4.4x", len, cid);
5342
5343 switch (cid) {
5344 case L2CAP_CID_LE_SIGNALING:
5345 case L2CAP_CID_SIGNALING:
5346 l2cap_sig_channel(conn, skb);
5347 break;
5348
5349 case L2CAP_CID_CONN_LESS:
5350 psm = get_unaligned((__le16 *) skb->data);
5351 skb_pull(skb, L2CAP_PSMLEN_SIZE);
5352 l2cap_conless_channel(conn, psm, skb);
5353 break;
5354
5355 case L2CAP_CID_LE_DATA:
5356 l2cap_att_channel(conn, cid, skb);
5357 break;
5358
5359 case L2CAP_CID_SMP:
5360 if (smp_sig_channel(conn, skb))
5361 l2cap_conn_del(conn->hcon, EACCES);
5362 break;
5363
5364 default:
5365 l2cap_data_channel(conn, cid, skb);
5366 break;
5367 }
5368}
5369
5370/* ---- L2CAP interface with lower layer (HCI) ---- */
5371
5372int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
5373{
5374 int exact = 0, lm1 = 0, lm2 = 0;
5375 struct l2cap_chan *c;
5376
5377 BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
5378
5379 /* Find listening sockets and check their link_mode */
5380 read_lock(&chan_list_lock);
5381 list_for_each_entry(c, &chan_list, global_l) {
5382 struct sock *sk = c->sk;
5383
5384 if (c->state != BT_LISTEN)
5385 continue;
5386
5387 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
5388 lm1 |= HCI_LM_ACCEPT;
5389 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
5390 lm1 |= HCI_LM_MASTER;
5391 exact++;
5392 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
5393 lm2 |= HCI_LM_ACCEPT;
5394 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
5395 lm2 |= HCI_LM_MASTER;
5396 }
5397 }
5398 read_unlock(&chan_list_lock);
5399
5400 return exact ? lm1 : lm2;
5401}
5402
5403void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
5404{
5405 struct l2cap_conn *conn;
5406
5407 BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
5408
5409 if (!status) {
5410 conn = l2cap_conn_add(hcon, status);
5411 if (conn)
5412 l2cap_conn_ready(conn);
5413 } else
5414 l2cap_conn_del(hcon, bt_to_errno(status));
5415
5416}
5417
5418int l2cap_disconn_ind(struct hci_conn *hcon)
5419{
5420 struct l2cap_conn *conn = hcon->l2cap_data;
5421
5422 BT_DBG("hcon %p", hcon);
5423
5424 if (!conn)
5425 return HCI_ERROR_REMOTE_USER_TERM;
5426 return conn->disc_reason;
5427}
5428
5429void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
5430{
5431 BT_DBG("hcon %p reason %d", hcon, reason);
5432
5433 l2cap_conn_del(hcon, bt_to_errno(reason));
5434}
5435
5436static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
5437{
5438 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
5439 return;
5440
5441 if (encrypt == 0x00) {
5442 if (chan->sec_level == BT_SECURITY_MEDIUM) {
5443 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
5444 } else if (chan->sec_level == BT_SECURITY_HIGH)
5445 l2cap_chan_close(chan, ECONNREFUSED);
5446 } else {
5447 if (chan->sec_level == BT_SECURITY_MEDIUM)
5448 __clear_chan_timer(chan);
5449 }
5450}
5451
5452int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
5453{
5454 struct l2cap_conn *conn = hcon->l2cap_data;
5455 struct l2cap_chan *chan;
5456
5457 if (!conn)
5458 return 0;
5459
5460 BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
5461
5462 if (hcon->type == LE_LINK) {
5463 if (!status && encrypt)
5464 smp_distribute_keys(conn, 0);
5465 cancel_delayed_work(&conn->security_timer);
5466 }
5467
5468 mutex_lock(&conn->chan_lock);
5469
5470 list_for_each_entry(chan, &conn->chan_l, list) {
5471 l2cap_chan_lock(chan);
5472
5473 BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
5474 state_to_string(chan->state));
5475
5476 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
5477 l2cap_chan_unlock(chan);
5478 continue;
5479 }
5480
5481 if (chan->scid == L2CAP_CID_LE_DATA) {
5482 if (!status && encrypt) {
5483 chan->sec_level = hcon->sec_level;
5484 l2cap_chan_ready(chan);
5485 }
5486
5487 l2cap_chan_unlock(chan);
5488 continue;
5489 }
5490
5491 if (test_bit(CONF_CONNECT_PEND, &chan->conf_state)) {
5492 l2cap_chan_unlock(chan);
5493 continue;
5494 }
5495
5496 if (!status && (chan->state == BT_CONNECTED ||
5497 chan->state == BT_CONFIG)) {
5498 struct sock *sk = chan->sk;
5499
5500 clear_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags);
5501 sk->sk_state_change(sk);
5502
5503 l2cap_check_encryption(chan, encrypt);
5504 l2cap_chan_unlock(chan);
5505 continue;
5506 }
5507
5508 if (chan->state == BT_CONNECT) {
5509 if (!status) {
5510 l2cap_start_connection(chan);
5511 } else {
5512 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
5513 }
5514 } else if (chan->state == BT_CONNECT2) {
5515 struct sock *sk = chan->sk;
5516 struct l2cap_conn_rsp rsp;
5517 __u16 res, stat;
5518
5519 lock_sock(sk);
5520
5521 if (!status) {
5522 if (test_bit(BT_SK_DEFER_SETUP,
5523 &bt_sk(sk)->flags)) {
5524 res = L2CAP_CR_PEND;
5525 stat = L2CAP_CS_AUTHOR_PEND;
5526 chan->ops->defer(chan);
5527 } else {
5528 __l2cap_state_change(chan, BT_CONFIG);
5529 res = L2CAP_CR_SUCCESS;
5530 stat = L2CAP_CS_NO_INFO;
5531 }
5532 } else {
5533 __l2cap_state_change(chan, BT_DISCONN);
5534 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
5535 res = L2CAP_CR_SEC_BLOCK;
5536 stat = L2CAP_CS_NO_INFO;
5537 }
5538
5539 release_sock(sk);
5540
5541 rsp.scid = cpu_to_le16(chan->dcid);
5542 rsp.dcid = cpu_to_le16(chan->scid);
5543 rsp.result = cpu_to_le16(res);
5544 rsp.status = cpu_to_le16(stat);
5545 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
5546 sizeof(rsp), &rsp);
5547
5548 if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
5549 res == L2CAP_CR_SUCCESS) {
5550 char buf[128];
5551 set_bit(CONF_REQ_SENT, &chan->conf_state);
5552 l2cap_send_cmd(conn, l2cap_get_ident(conn),
5553 L2CAP_CONF_REQ,
5554 l2cap_build_conf_req(chan, buf),
5555 buf);
5556 chan->num_conf_req++;
5557 }
5558 }
5559
5560 l2cap_chan_unlock(chan);
5561 }
5562
5563 mutex_unlock(&conn->chan_lock);
5564
5565 return 0;
5566}
5567
5568int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
5569{
5570 struct l2cap_conn *conn = hcon->l2cap_data;
5571 struct l2cap_hdr *hdr;
5572 int len;
5573
5574 if (!conn)
5575 conn = l2cap_conn_add(hcon, 0);
5576
5577 if (!conn)
5578 goto drop;
5579
5580 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
5581
5582 switch (flags) {
5583 case ACL_START:
5584 case ACL_START_NO_FLUSH:
5585 case ACL_COMPLETE:
5586 if (conn->rx_len) {
5587 BT_ERR("Unexpected start frame (len %d)", skb->len);
5588 kfree_skb(conn->rx_skb);
5589 conn->rx_skb = NULL;
5590 conn->rx_len = 0;
5591 l2cap_conn_unreliable(conn, ECOMM);
5592 }
5593
5594 /* Start fragment always begin with Basic L2CAP header */
5595 if (skb->len < L2CAP_HDR_SIZE) {
5596 BT_ERR("Frame is too short (len %d)", skb->len);
5597 l2cap_conn_unreliable(conn, ECOMM);
5598 goto drop;
5599 }
5600
5601 hdr = (struct l2cap_hdr *) skb->data;
5602 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
5603
5604 if (len == skb->len) {
5605 /* Complete frame received */
5606 l2cap_recv_frame(conn, skb);
5607 return 0;
5608 }
5609
5610 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
5611
5612 if (skb->len > len) {
5613 BT_ERR("Frame is too long (len %d, expected len %d)",
5614 skb->len, len);
5615 l2cap_conn_unreliable(conn, ECOMM);
5616 goto drop;
5617 }
5618
5619 /* Allocate skb for the complete frame (with header) */
5620 conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL);
5621 if (!conn->rx_skb)
5622 goto drop;
5623
5624 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
5625 skb->len);
5626 conn->rx_len = len - skb->len;
5627 break;
5628
5629 case ACL_CONT:
5630 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
5631
5632 if (!conn->rx_len) {
5633 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
5634 l2cap_conn_unreliable(conn, ECOMM);
5635 goto drop;
5636 }
5637
5638 if (skb->len > conn->rx_len) {
5639 BT_ERR("Fragment is too long (len %d, expected %d)",
5640 skb->len, conn->rx_len);
5641 kfree_skb(conn->rx_skb);
5642 conn->rx_skb = NULL;
5643 conn->rx_len = 0;
5644 l2cap_conn_unreliable(conn, ECOMM);
5645 goto drop;
5646 }
5647
5648 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
5649 skb->len);
5650 conn->rx_len -= skb->len;
5651
5652 if (!conn->rx_len) {
5653 /* Complete frame received */
5654 l2cap_recv_frame(conn, conn->rx_skb);
5655 conn->rx_skb = NULL;
5656 }
5657 break;
5658 }
5659
5660drop:
5661 kfree_skb(skb);
5662 return 0;
5663}
5664
5665static int l2cap_debugfs_show(struct seq_file *f, void *p)
5666{
5667 struct l2cap_chan *c;
5668
5669 read_lock(&chan_list_lock);
5670
5671 list_for_each_entry(c, &chan_list, global_l) {
5672 struct sock *sk = c->sk;
5673
5674 seq_printf(f, "%pMR %pMR %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
5675 &bt_sk(sk)->src, &bt_sk(sk)->dst,
5676 c->state, __le16_to_cpu(c->psm),
5677 c->scid, c->dcid, c->imtu, c->omtu,
5678 c->sec_level, c->mode);
5679 }
5680
5681 read_unlock(&chan_list_lock);
5682
5683 return 0;
5684}
5685
5686static int l2cap_debugfs_open(struct inode *inode, struct file *file)
5687{
5688 return single_open(file, l2cap_debugfs_show, inode->i_private);
5689}
5690
5691static const struct file_operations l2cap_debugfs_fops = {
5692 .open = l2cap_debugfs_open,
5693 .read = seq_read,
5694 .llseek = seq_lseek,
5695 .release = single_release,
5696};
5697
5698static struct dentry *l2cap_debugfs;
5699
5700int __init l2cap_init(void)
5701{
5702 int err;
5703
5704 err = l2cap_init_sockets();
5705 if (err < 0)
5706 return err;
5707
5708 if (bt_debugfs) {
5709 l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
5710 NULL, &l2cap_debugfs_fops);
5711 if (!l2cap_debugfs)
5712 BT_ERR("Failed to create L2CAP debug file");
5713 }
5714
5715 return 0;
5716}
5717
5718void l2cap_exit(void)
5719{
5720 debugfs_remove(l2cap_debugfs);
5721 l2cap_cleanup_sockets();
5722}
5723
5724module_param(disable_ertm, bool, 0644);
5725MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
This page took 0.084456 seconds and 5 git commands to generate.