Bluetooth: Handle physical link completion
[deliverable/linux.git] / net / bluetooth / l2cap_core.c
... / ...
CommitLineData
1/*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
8
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
10
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
14
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
27*/
28
29/* Bluetooth L2CAP core. */
30
31#include <linux/module.h>
32
33#include <linux/debugfs.h>
34#include <linux/crc16.h>
35
36#include <net/bluetooth/bluetooth.h>
37#include <net/bluetooth/hci_core.h>
38#include <net/bluetooth/l2cap.h>
39#include <net/bluetooth/smp.h>
40#include <net/bluetooth/a2mp.h>
41
42bool disable_ertm;
43
44static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
45static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP, };
46
47static LIST_HEAD(chan_list);
48static DEFINE_RWLOCK(chan_list_lock);
49
50static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
51 u8 code, u8 ident, u16 dlen, void *data);
52static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
53 void *data);
54static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
55static void l2cap_send_disconn_req(struct l2cap_conn *conn,
56 struct l2cap_chan *chan, int err);
57
58static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
59 struct sk_buff_head *skbs, u8 event);
60
61/* ---- L2CAP channels ---- */
62
63static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
64 u16 cid)
65{
66 struct l2cap_chan *c;
67
68 list_for_each_entry(c, &conn->chan_l, list) {
69 if (c->dcid == cid)
70 return c;
71 }
72 return NULL;
73}
74
75static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
76 u16 cid)
77{
78 struct l2cap_chan *c;
79
80 list_for_each_entry(c, &conn->chan_l, list) {
81 if (c->scid == cid)
82 return c;
83 }
84 return NULL;
85}
86
87/* Find channel with given SCID.
88 * Returns locked channel. */
89static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
90 u16 cid)
91{
92 struct l2cap_chan *c;
93
94 mutex_lock(&conn->chan_lock);
95 c = __l2cap_get_chan_by_scid(conn, cid);
96 if (c)
97 l2cap_chan_lock(c);
98 mutex_unlock(&conn->chan_lock);
99
100 return c;
101}
102
103/* Find channel with given DCID.
104 * Returns locked channel.
105 */
106static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
107 u16 cid)
108{
109 struct l2cap_chan *c;
110
111 mutex_lock(&conn->chan_lock);
112 c = __l2cap_get_chan_by_dcid(conn, cid);
113 if (c)
114 l2cap_chan_lock(c);
115 mutex_unlock(&conn->chan_lock);
116
117 return c;
118}
119
120static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn,
121 u8 ident)
122{
123 struct l2cap_chan *c;
124
125 list_for_each_entry(c, &conn->chan_l, list) {
126 if (c->ident == ident)
127 return c;
128 }
129 return NULL;
130}
131
132static struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn,
133 u8 ident)
134{
135 struct l2cap_chan *c;
136
137 mutex_lock(&conn->chan_lock);
138 c = __l2cap_get_chan_by_ident(conn, ident);
139 if (c)
140 l2cap_chan_lock(c);
141 mutex_unlock(&conn->chan_lock);
142
143 return c;
144}
145
146static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
147{
148 struct l2cap_chan *c;
149
150 list_for_each_entry(c, &chan_list, global_l) {
151 if (c->sport == psm && !bacmp(&bt_sk(c->sk)->src, src))
152 return c;
153 }
154 return NULL;
155}
156
157int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
158{
159 int err;
160
161 write_lock(&chan_list_lock);
162
163 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
164 err = -EADDRINUSE;
165 goto done;
166 }
167
168 if (psm) {
169 chan->psm = psm;
170 chan->sport = psm;
171 err = 0;
172 } else {
173 u16 p;
174
175 err = -EINVAL;
176 for (p = 0x1001; p < 0x1100; p += 2)
177 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
178 chan->psm = cpu_to_le16(p);
179 chan->sport = cpu_to_le16(p);
180 err = 0;
181 break;
182 }
183 }
184
185done:
186 write_unlock(&chan_list_lock);
187 return err;
188}
189
190int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
191{
192 write_lock(&chan_list_lock);
193
194 chan->scid = scid;
195
196 write_unlock(&chan_list_lock);
197
198 return 0;
199}
200
201static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
202{
203 u16 cid = L2CAP_CID_DYN_START;
204
205 for (; cid < L2CAP_CID_DYN_END; cid++) {
206 if (!__l2cap_get_chan_by_scid(conn, cid))
207 return cid;
208 }
209
210 return 0;
211}
212
213static void __l2cap_state_change(struct l2cap_chan *chan, int state)
214{
215 BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
216 state_to_string(state));
217
218 chan->state = state;
219 chan->ops->state_change(chan, state);
220}
221
222static void l2cap_state_change(struct l2cap_chan *chan, int state)
223{
224 struct sock *sk = chan->sk;
225
226 lock_sock(sk);
227 __l2cap_state_change(chan, state);
228 release_sock(sk);
229}
230
231static inline void __l2cap_chan_set_err(struct l2cap_chan *chan, int err)
232{
233 struct sock *sk = chan->sk;
234
235 sk->sk_err = err;
236}
237
238static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
239{
240 struct sock *sk = chan->sk;
241
242 lock_sock(sk);
243 __l2cap_chan_set_err(chan, err);
244 release_sock(sk);
245}
246
247static void __set_retrans_timer(struct l2cap_chan *chan)
248{
249 if (!delayed_work_pending(&chan->monitor_timer) &&
250 chan->retrans_timeout) {
251 l2cap_set_timer(chan, &chan->retrans_timer,
252 msecs_to_jiffies(chan->retrans_timeout));
253 }
254}
255
256static void __set_monitor_timer(struct l2cap_chan *chan)
257{
258 __clear_retrans_timer(chan);
259 if (chan->monitor_timeout) {
260 l2cap_set_timer(chan, &chan->monitor_timer,
261 msecs_to_jiffies(chan->monitor_timeout));
262 }
263}
264
265static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
266 u16 seq)
267{
268 struct sk_buff *skb;
269
270 skb_queue_walk(head, skb) {
271 if (bt_cb(skb)->control.txseq == seq)
272 return skb;
273 }
274
275 return NULL;
276}
277
278/* ---- L2CAP sequence number lists ---- */
279
280/* For ERTM, ordered lists of sequence numbers must be tracked for
281 * SREJ requests that are received and for frames that are to be
282 * retransmitted. These seq_list functions implement a singly-linked
283 * list in an array, where membership in the list can also be checked
284 * in constant time. Items can also be added to the tail of the list
285 * and removed from the head in constant time, without further memory
286 * allocs or frees.
287 */
288
289static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
290{
291 size_t alloc_size, i;
292
293 /* Allocated size is a power of 2 to map sequence numbers
294 * (which may be up to 14 bits) in to a smaller array that is
295 * sized for the negotiated ERTM transmit windows.
296 */
297 alloc_size = roundup_pow_of_two(size);
298
299 seq_list->list = kmalloc(sizeof(u16) * alloc_size, GFP_KERNEL);
300 if (!seq_list->list)
301 return -ENOMEM;
302
303 seq_list->mask = alloc_size - 1;
304 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
305 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
306 for (i = 0; i < alloc_size; i++)
307 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
308
309 return 0;
310}
311
312static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
313{
314 kfree(seq_list->list);
315}
316
317static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
318 u16 seq)
319{
320 /* Constant-time check for list membership */
321 return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
322}
323
324static u16 l2cap_seq_list_remove(struct l2cap_seq_list *seq_list, u16 seq)
325{
326 u16 mask = seq_list->mask;
327
328 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR) {
329 /* In case someone tries to pop the head of an empty list */
330 return L2CAP_SEQ_LIST_CLEAR;
331 } else if (seq_list->head == seq) {
332 /* Head can be removed in constant time */
333 seq_list->head = seq_list->list[seq & mask];
334 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
335
336 if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
337 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
338 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
339 }
340 } else {
341 /* Walk the list to find the sequence number */
342 u16 prev = seq_list->head;
343 while (seq_list->list[prev & mask] != seq) {
344 prev = seq_list->list[prev & mask];
345 if (prev == L2CAP_SEQ_LIST_TAIL)
346 return L2CAP_SEQ_LIST_CLEAR;
347 }
348
349 /* Unlink the number from the list and clear it */
350 seq_list->list[prev & mask] = seq_list->list[seq & mask];
351 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
352 if (seq_list->tail == seq)
353 seq_list->tail = prev;
354 }
355 return seq;
356}
357
358static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
359{
360 /* Remove the head in constant time */
361 return l2cap_seq_list_remove(seq_list, seq_list->head);
362}
363
364static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
365{
366 u16 i;
367
368 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
369 return;
370
371 for (i = 0; i <= seq_list->mask; i++)
372 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
373
374 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
375 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
376}
377
378static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
379{
380 u16 mask = seq_list->mask;
381
382 /* All appends happen in constant time */
383
384 if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
385 return;
386
387 if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
388 seq_list->head = seq;
389 else
390 seq_list->list[seq_list->tail & mask] = seq;
391
392 seq_list->tail = seq;
393 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
394}
395
396static void l2cap_chan_timeout(struct work_struct *work)
397{
398 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
399 chan_timer.work);
400 struct l2cap_conn *conn = chan->conn;
401 int reason;
402
403 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
404
405 mutex_lock(&conn->chan_lock);
406 l2cap_chan_lock(chan);
407
408 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
409 reason = ECONNREFUSED;
410 else if (chan->state == BT_CONNECT &&
411 chan->sec_level != BT_SECURITY_SDP)
412 reason = ECONNREFUSED;
413 else
414 reason = ETIMEDOUT;
415
416 l2cap_chan_close(chan, reason);
417
418 l2cap_chan_unlock(chan);
419
420 chan->ops->close(chan);
421 mutex_unlock(&conn->chan_lock);
422
423 l2cap_chan_put(chan);
424}
425
426struct l2cap_chan *l2cap_chan_create(void)
427{
428 struct l2cap_chan *chan;
429
430 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
431 if (!chan)
432 return NULL;
433
434 mutex_init(&chan->lock);
435
436 write_lock(&chan_list_lock);
437 list_add(&chan->global_l, &chan_list);
438 write_unlock(&chan_list_lock);
439
440 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
441
442 chan->state = BT_OPEN;
443
444 kref_init(&chan->kref);
445
446 /* This flag is cleared in l2cap_chan_ready() */
447 set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
448
449 BT_DBG("chan %p", chan);
450
451 return chan;
452}
453
454static void l2cap_chan_destroy(struct kref *kref)
455{
456 struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);
457
458 BT_DBG("chan %p", chan);
459
460 write_lock(&chan_list_lock);
461 list_del(&chan->global_l);
462 write_unlock(&chan_list_lock);
463
464 kfree(chan);
465}
466
467void l2cap_chan_hold(struct l2cap_chan *c)
468{
469 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
470
471 kref_get(&c->kref);
472}
473
474void l2cap_chan_put(struct l2cap_chan *c)
475{
476 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
477
478 kref_put(&c->kref, l2cap_chan_destroy);
479}
480
481void l2cap_chan_set_defaults(struct l2cap_chan *chan)
482{
483 chan->fcs = L2CAP_FCS_CRC16;
484 chan->max_tx = L2CAP_DEFAULT_MAX_TX;
485 chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
486 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
487 chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
488 chan->sec_level = BT_SECURITY_LOW;
489
490 set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
491}
492
493void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
494{
495 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
496 __le16_to_cpu(chan->psm), chan->dcid);
497
498 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
499
500 chan->conn = conn;
501
502 switch (chan->chan_type) {
503 case L2CAP_CHAN_CONN_ORIENTED:
504 if (conn->hcon->type == LE_LINK) {
505 /* LE connection */
506 chan->omtu = L2CAP_DEFAULT_MTU;
507 chan->scid = L2CAP_CID_LE_DATA;
508 chan->dcid = L2CAP_CID_LE_DATA;
509 } else {
510 /* Alloc CID for connection-oriented socket */
511 chan->scid = l2cap_alloc_cid(conn);
512 chan->omtu = L2CAP_DEFAULT_MTU;
513 }
514 break;
515
516 case L2CAP_CHAN_CONN_LESS:
517 /* Connectionless socket */
518 chan->scid = L2CAP_CID_CONN_LESS;
519 chan->dcid = L2CAP_CID_CONN_LESS;
520 chan->omtu = L2CAP_DEFAULT_MTU;
521 break;
522
523 case L2CAP_CHAN_CONN_FIX_A2MP:
524 chan->scid = L2CAP_CID_A2MP;
525 chan->dcid = L2CAP_CID_A2MP;
526 chan->omtu = L2CAP_A2MP_DEFAULT_MTU;
527 chan->imtu = L2CAP_A2MP_DEFAULT_MTU;
528 break;
529
530 default:
531 /* Raw socket can send/recv signalling messages only */
532 chan->scid = L2CAP_CID_SIGNALING;
533 chan->dcid = L2CAP_CID_SIGNALING;
534 chan->omtu = L2CAP_DEFAULT_MTU;
535 }
536
537 chan->local_id = L2CAP_BESTEFFORT_ID;
538 chan->local_stype = L2CAP_SERV_BESTEFFORT;
539 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
540 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
541 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
542 chan->local_flush_to = L2CAP_EFS_DEFAULT_FLUSH_TO;
543
544 l2cap_chan_hold(chan);
545
546 list_add(&chan->list, &conn->chan_l);
547}
548
549void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
550{
551 mutex_lock(&conn->chan_lock);
552 __l2cap_chan_add(conn, chan);
553 mutex_unlock(&conn->chan_lock);
554}
555
556void l2cap_chan_del(struct l2cap_chan *chan, int err)
557{
558 struct l2cap_conn *conn = chan->conn;
559
560 __clear_chan_timer(chan);
561
562 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
563
564 if (conn) {
565 struct amp_mgr *mgr = conn->hcon->amp_mgr;
566 /* Delete from channel list */
567 list_del(&chan->list);
568
569 l2cap_chan_put(chan);
570
571 chan->conn = NULL;
572
573 if (chan->chan_type != L2CAP_CHAN_CONN_FIX_A2MP)
574 hci_conn_put(conn->hcon);
575
576 if (mgr && mgr->bredr_chan == chan)
577 mgr->bredr_chan = NULL;
578 }
579
580 chan->ops->teardown(chan, err);
581
582 if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
583 return;
584
585 switch(chan->mode) {
586 case L2CAP_MODE_BASIC:
587 break;
588
589 case L2CAP_MODE_ERTM:
590 __clear_retrans_timer(chan);
591 __clear_monitor_timer(chan);
592 __clear_ack_timer(chan);
593
594 skb_queue_purge(&chan->srej_q);
595
596 l2cap_seq_list_free(&chan->srej_list);
597 l2cap_seq_list_free(&chan->retrans_list);
598
599 /* fall through */
600
601 case L2CAP_MODE_STREAMING:
602 skb_queue_purge(&chan->tx_q);
603 break;
604 }
605
606 return;
607}
608
609void l2cap_chan_close(struct l2cap_chan *chan, int reason)
610{
611 struct l2cap_conn *conn = chan->conn;
612 struct sock *sk = chan->sk;
613
614 BT_DBG("chan %p state %s sk %p", chan, state_to_string(chan->state),
615 sk);
616
617 switch (chan->state) {
618 case BT_LISTEN:
619 chan->ops->teardown(chan, 0);
620 break;
621
622 case BT_CONNECTED:
623 case BT_CONFIG:
624 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
625 conn->hcon->type == ACL_LINK) {
626 __set_chan_timer(chan, sk->sk_sndtimeo);
627 l2cap_send_disconn_req(conn, chan, reason);
628 } else
629 l2cap_chan_del(chan, reason);
630 break;
631
632 case BT_CONNECT2:
633 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
634 conn->hcon->type == ACL_LINK) {
635 struct l2cap_conn_rsp rsp;
636 __u16 result;
637
638 if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags))
639 result = L2CAP_CR_SEC_BLOCK;
640 else
641 result = L2CAP_CR_BAD_PSM;
642 l2cap_state_change(chan, BT_DISCONN);
643
644 rsp.scid = cpu_to_le16(chan->dcid);
645 rsp.dcid = cpu_to_le16(chan->scid);
646 rsp.result = cpu_to_le16(result);
647 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
648 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
649 sizeof(rsp), &rsp);
650 }
651
652 l2cap_chan_del(chan, reason);
653 break;
654
655 case BT_CONNECT:
656 case BT_DISCONN:
657 l2cap_chan_del(chan, reason);
658 break;
659
660 default:
661 chan->ops->teardown(chan, 0);
662 break;
663 }
664}
665
666static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
667{
668 if (chan->chan_type == L2CAP_CHAN_RAW) {
669 switch (chan->sec_level) {
670 case BT_SECURITY_HIGH:
671 return HCI_AT_DEDICATED_BONDING_MITM;
672 case BT_SECURITY_MEDIUM:
673 return HCI_AT_DEDICATED_BONDING;
674 default:
675 return HCI_AT_NO_BONDING;
676 }
677 } else if (chan->psm == __constant_cpu_to_le16(L2CAP_PSM_SDP)) {
678 if (chan->sec_level == BT_SECURITY_LOW)
679 chan->sec_level = BT_SECURITY_SDP;
680
681 if (chan->sec_level == BT_SECURITY_HIGH)
682 return HCI_AT_NO_BONDING_MITM;
683 else
684 return HCI_AT_NO_BONDING;
685 } else {
686 switch (chan->sec_level) {
687 case BT_SECURITY_HIGH:
688 return HCI_AT_GENERAL_BONDING_MITM;
689 case BT_SECURITY_MEDIUM:
690 return HCI_AT_GENERAL_BONDING;
691 default:
692 return HCI_AT_NO_BONDING;
693 }
694 }
695}
696
697/* Service level security */
698int l2cap_chan_check_security(struct l2cap_chan *chan)
699{
700 struct l2cap_conn *conn = chan->conn;
701 __u8 auth_type;
702
703 auth_type = l2cap_get_auth_type(chan);
704
705 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
706}
707
708static u8 l2cap_get_ident(struct l2cap_conn *conn)
709{
710 u8 id;
711
712 /* Get next available identificator.
713 * 1 - 128 are used by kernel.
714 * 129 - 199 are reserved.
715 * 200 - 254 are used by utilities like l2ping, etc.
716 */
717
718 spin_lock(&conn->lock);
719
720 if (++conn->tx_ident > 128)
721 conn->tx_ident = 1;
722
723 id = conn->tx_ident;
724
725 spin_unlock(&conn->lock);
726
727 return id;
728}
729
730static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
731 void *data)
732{
733 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
734 u8 flags;
735
736 BT_DBG("code 0x%2.2x", code);
737
738 if (!skb)
739 return;
740
741 if (lmp_no_flush_capable(conn->hcon->hdev))
742 flags = ACL_START_NO_FLUSH;
743 else
744 flags = ACL_START;
745
746 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
747 skb->priority = HCI_PRIO_MAX;
748
749 hci_send_acl(conn->hchan, skb, flags);
750}
751
752static bool __chan_is_moving(struct l2cap_chan *chan)
753{
754 return chan->move_state != L2CAP_MOVE_STABLE &&
755 chan->move_state != L2CAP_MOVE_WAIT_PREPARE;
756}
757
758static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
759{
760 struct hci_conn *hcon = chan->conn->hcon;
761 u16 flags;
762
763 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
764 skb->priority);
765
766 if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
767 lmp_no_flush_capable(hcon->hdev))
768 flags = ACL_START_NO_FLUSH;
769 else
770 flags = ACL_START;
771
772 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
773 hci_send_acl(chan->conn->hchan, skb, flags);
774}
775
776static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
777{
778 control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
779 control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
780
781 if (enh & L2CAP_CTRL_FRAME_TYPE) {
782 /* S-Frame */
783 control->sframe = 1;
784 control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
785 control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
786
787 control->sar = 0;
788 control->txseq = 0;
789 } else {
790 /* I-Frame */
791 control->sframe = 0;
792 control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
793 control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
794
795 control->poll = 0;
796 control->super = 0;
797 }
798}
799
800static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
801{
802 control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
803 control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
804
805 if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
806 /* S-Frame */
807 control->sframe = 1;
808 control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
809 control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
810
811 control->sar = 0;
812 control->txseq = 0;
813 } else {
814 /* I-Frame */
815 control->sframe = 0;
816 control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
817 control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
818
819 control->poll = 0;
820 control->super = 0;
821 }
822}
823
824static inline void __unpack_control(struct l2cap_chan *chan,
825 struct sk_buff *skb)
826{
827 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
828 __unpack_extended_control(get_unaligned_le32(skb->data),
829 &bt_cb(skb)->control);
830 skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
831 } else {
832 __unpack_enhanced_control(get_unaligned_le16(skb->data),
833 &bt_cb(skb)->control);
834 skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
835 }
836}
837
838static u32 __pack_extended_control(struct l2cap_ctrl *control)
839{
840 u32 packed;
841
842 packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
843 packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
844
845 if (control->sframe) {
846 packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
847 packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
848 packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
849 } else {
850 packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
851 packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
852 }
853
854 return packed;
855}
856
857static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
858{
859 u16 packed;
860
861 packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
862 packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
863
864 if (control->sframe) {
865 packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
866 packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
867 packed |= L2CAP_CTRL_FRAME_TYPE;
868 } else {
869 packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
870 packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
871 }
872
873 return packed;
874}
875
876static inline void __pack_control(struct l2cap_chan *chan,
877 struct l2cap_ctrl *control,
878 struct sk_buff *skb)
879{
880 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
881 put_unaligned_le32(__pack_extended_control(control),
882 skb->data + L2CAP_HDR_SIZE);
883 } else {
884 put_unaligned_le16(__pack_enhanced_control(control),
885 skb->data + L2CAP_HDR_SIZE);
886 }
887}
888
889static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
890{
891 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
892 return L2CAP_EXT_HDR_SIZE;
893 else
894 return L2CAP_ENH_HDR_SIZE;
895}
896
897static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
898 u32 control)
899{
900 struct sk_buff *skb;
901 struct l2cap_hdr *lh;
902 int hlen = __ertm_hdr_size(chan);
903
904 if (chan->fcs == L2CAP_FCS_CRC16)
905 hlen += L2CAP_FCS_SIZE;
906
907 skb = bt_skb_alloc(hlen, GFP_KERNEL);
908
909 if (!skb)
910 return ERR_PTR(-ENOMEM);
911
912 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
913 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
914 lh->cid = cpu_to_le16(chan->dcid);
915
916 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
917 put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
918 else
919 put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
920
921 if (chan->fcs == L2CAP_FCS_CRC16) {
922 u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
923 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
924 }
925
926 skb->priority = HCI_PRIO_MAX;
927 return skb;
928}
929
930static void l2cap_send_sframe(struct l2cap_chan *chan,
931 struct l2cap_ctrl *control)
932{
933 struct sk_buff *skb;
934 u32 control_field;
935
936 BT_DBG("chan %p, control %p", chan, control);
937
938 if (!control->sframe)
939 return;
940
941 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
942 !control->poll)
943 control->final = 1;
944
945 if (control->super == L2CAP_SUPER_RR)
946 clear_bit(CONN_RNR_SENT, &chan->conn_state);
947 else if (control->super == L2CAP_SUPER_RNR)
948 set_bit(CONN_RNR_SENT, &chan->conn_state);
949
950 if (control->super != L2CAP_SUPER_SREJ) {
951 chan->last_acked_seq = control->reqseq;
952 __clear_ack_timer(chan);
953 }
954
955 BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
956 control->final, control->poll, control->super);
957
958 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
959 control_field = __pack_extended_control(control);
960 else
961 control_field = __pack_enhanced_control(control);
962
963 skb = l2cap_create_sframe_pdu(chan, control_field);
964 if (!IS_ERR(skb))
965 l2cap_do_send(chan, skb);
966}
967
968static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
969{
970 struct l2cap_ctrl control;
971
972 BT_DBG("chan %p, poll %d", chan, poll);
973
974 memset(&control, 0, sizeof(control));
975 control.sframe = 1;
976 control.poll = poll;
977
978 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
979 control.super = L2CAP_SUPER_RNR;
980 else
981 control.super = L2CAP_SUPER_RR;
982
983 control.reqseq = chan->buffer_seq;
984 l2cap_send_sframe(chan, &control);
985}
986
987static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
988{
989 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
990}
991
992static bool __amp_capable(struct l2cap_chan *chan)
993{
994 struct l2cap_conn *conn = chan->conn;
995
996 if (enable_hs &&
997 chan->chan_policy == BT_CHANNEL_POLICY_AMP_PREFERRED &&
998 conn->fixed_chan_mask & L2CAP_FC_A2MP)
999 return true;
1000 else
1001 return false;
1002}
1003
1004void l2cap_send_conn_req(struct l2cap_chan *chan)
1005{
1006 struct l2cap_conn *conn = chan->conn;
1007 struct l2cap_conn_req req;
1008
1009 req.scid = cpu_to_le16(chan->scid);
1010 req.psm = chan->psm;
1011
1012 chan->ident = l2cap_get_ident(conn);
1013
1014 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
1015
1016 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
1017}
1018
1019static void l2cap_send_create_chan_req(struct l2cap_chan *chan, u8 amp_id)
1020{
1021 struct l2cap_create_chan_req req;
1022 req.scid = cpu_to_le16(chan->scid);
1023 req.psm = chan->psm;
1024 req.amp_id = amp_id;
1025
1026 chan->ident = l2cap_get_ident(chan->conn);
1027
1028 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_REQ,
1029 sizeof(req), &req);
1030}
1031
1032static void l2cap_move_setup(struct l2cap_chan *chan)
1033{
1034 struct sk_buff *skb;
1035
1036 BT_DBG("chan %p", chan);
1037
1038 if (chan->mode != L2CAP_MODE_ERTM)
1039 return;
1040
1041 __clear_retrans_timer(chan);
1042 __clear_monitor_timer(chan);
1043 __clear_ack_timer(chan);
1044
1045 chan->retry_count = 0;
1046 skb_queue_walk(&chan->tx_q, skb) {
1047 if (bt_cb(skb)->control.retries)
1048 bt_cb(skb)->control.retries = 1;
1049 else
1050 break;
1051 }
1052
1053 chan->expected_tx_seq = chan->buffer_seq;
1054
1055 clear_bit(CONN_REJ_ACT, &chan->conn_state);
1056 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
1057 l2cap_seq_list_clear(&chan->retrans_list);
1058 l2cap_seq_list_clear(&chan->srej_list);
1059 skb_queue_purge(&chan->srej_q);
1060
1061 chan->tx_state = L2CAP_TX_STATE_XMIT;
1062 chan->rx_state = L2CAP_RX_STATE_MOVE;
1063
1064 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
1065}
1066
1067static void l2cap_move_done(struct l2cap_chan *chan)
1068{
1069 u8 move_role = chan->move_role;
1070 BT_DBG("chan %p", chan);
1071
1072 chan->move_state = L2CAP_MOVE_STABLE;
1073 chan->move_role = L2CAP_MOVE_ROLE_NONE;
1074
1075 if (chan->mode != L2CAP_MODE_ERTM)
1076 return;
1077
1078 switch (move_role) {
1079 case L2CAP_MOVE_ROLE_INITIATOR:
1080 l2cap_tx(chan, NULL, NULL, L2CAP_EV_EXPLICIT_POLL);
1081 chan->rx_state = L2CAP_RX_STATE_WAIT_F;
1082 break;
1083 case L2CAP_MOVE_ROLE_RESPONDER:
1084 chan->rx_state = L2CAP_RX_STATE_WAIT_P;
1085 break;
1086 }
1087}
1088
1089static void l2cap_chan_ready(struct l2cap_chan *chan)
1090{
1091 /* This clears all conf flags, including CONF_NOT_COMPLETE */
1092 chan->conf_state = 0;
1093 __clear_chan_timer(chan);
1094
1095 chan->state = BT_CONNECTED;
1096
1097 chan->ops->ready(chan);
1098}
1099
1100static void l2cap_start_connection(struct l2cap_chan *chan)
1101{
1102 if (__amp_capable(chan)) {
1103 BT_DBG("chan %p AMP capable: discover AMPs", chan);
1104 a2mp_discover_amp(chan);
1105 } else {
1106 l2cap_send_conn_req(chan);
1107 }
1108}
1109
1110static void l2cap_do_start(struct l2cap_chan *chan)
1111{
1112 struct l2cap_conn *conn = chan->conn;
1113
1114 if (conn->hcon->type == LE_LINK) {
1115 l2cap_chan_ready(chan);
1116 return;
1117 }
1118
1119 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
1120 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
1121 return;
1122
1123 if (l2cap_chan_check_security(chan) &&
1124 __l2cap_no_conn_pending(chan)) {
1125 l2cap_start_connection(chan);
1126 }
1127 } else {
1128 struct l2cap_info_req req;
1129 req.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
1130
1131 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1132 conn->info_ident = l2cap_get_ident(conn);
1133
1134 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1135
1136 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
1137 sizeof(req), &req);
1138 }
1139}
1140
1141static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1142{
1143 u32 local_feat_mask = l2cap_feat_mask;
1144 if (!disable_ertm)
1145 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1146
1147 switch (mode) {
1148 case L2CAP_MODE_ERTM:
1149 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1150 case L2CAP_MODE_STREAMING:
1151 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1152 default:
1153 return 0x00;
1154 }
1155}
1156
1157static void l2cap_send_disconn_req(struct l2cap_conn *conn,
1158 struct l2cap_chan *chan, int err)
1159{
1160 struct sock *sk = chan->sk;
1161 struct l2cap_disconn_req req;
1162
1163 if (!conn)
1164 return;
1165
1166 if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) {
1167 __clear_retrans_timer(chan);
1168 __clear_monitor_timer(chan);
1169 __clear_ack_timer(chan);
1170 }
1171
1172 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
1173 l2cap_state_change(chan, BT_DISCONN);
1174 return;
1175 }
1176
1177 req.dcid = cpu_to_le16(chan->dcid);
1178 req.scid = cpu_to_le16(chan->scid);
1179 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
1180 sizeof(req), &req);
1181
1182 lock_sock(sk);
1183 __l2cap_state_change(chan, BT_DISCONN);
1184 __l2cap_chan_set_err(chan, err);
1185 release_sock(sk);
1186}
1187
1188/* ---- L2CAP connections ---- */
1189static void l2cap_conn_start(struct l2cap_conn *conn)
1190{
1191 struct l2cap_chan *chan, *tmp;
1192
1193 BT_DBG("conn %p", conn);
1194
1195 mutex_lock(&conn->chan_lock);
1196
1197 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1198 struct sock *sk = chan->sk;
1199
1200 l2cap_chan_lock(chan);
1201
1202 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1203 l2cap_chan_unlock(chan);
1204 continue;
1205 }
1206
1207 if (chan->state == BT_CONNECT) {
1208 if (!l2cap_chan_check_security(chan) ||
1209 !__l2cap_no_conn_pending(chan)) {
1210 l2cap_chan_unlock(chan);
1211 continue;
1212 }
1213
1214 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1215 && test_bit(CONF_STATE2_DEVICE,
1216 &chan->conf_state)) {
1217 l2cap_chan_close(chan, ECONNRESET);
1218 l2cap_chan_unlock(chan);
1219 continue;
1220 }
1221
1222 l2cap_start_connection(chan);
1223
1224 } else if (chan->state == BT_CONNECT2) {
1225 struct l2cap_conn_rsp rsp;
1226 char buf[128];
1227 rsp.scid = cpu_to_le16(chan->dcid);
1228 rsp.dcid = cpu_to_le16(chan->scid);
1229
1230 if (l2cap_chan_check_security(chan)) {
1231 lock_sock(sk);
1232 if (test_bit(BT_SK_DEFER_SETUP,
1233 &bt_sk(sk)->flags)) {
1234 rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
1235 rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1236 chan->ops->defer(chan);
1237
1238 } else {
1239 __l2cap_state_change(chan, BT_CONFIG);
1240 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
1241 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
1242 }
1243 release_sock(sk);
1244 } else {
1245 rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
1246 rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1247 }
1248
1249 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1250 sizeof(rsp), &rsp);
1251
1252 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1253 rsp.result != L2CAP_CR_SUCCESS) {
1254 l2cap_chan_unlock(chan);
1255 continue;
1256 }
1257
1258 set_bit(CONF_REQ_SENT, &chan->conf_state);
1259 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1260 l2cap_build_conf_req(chan, buf), buf);
1261 chan->num_conf_req++;
1262 }
1263
1264 l2cap_chan_unlock(chan);
1265 }
1266
1267 mutex_unlock(&conn->chan_lock);
1268}
1269
1270/* Find socket with cid and source/destination bdaddr.
1271 * Returns closest match, locked.
1272 */
1273static struct l2cap_chan *l2cap_global_chan_by_scid(int state, u16 cid,
1274 bdaddr_t *src,
1275 bdaddr_t *dst)
1276{
1277 struct l2cap_chan *c, *c1 = NULL;
1278
1279 read_lock(&chan_list_lock);
1280
1281 list_for_each_entry(c, &chan_list, global_l) {
1282 struct sock *sk = c->sk;
1283
1284 if (state && c->state != state)
1285 continue;
1286
1287 if (c->scid == cid) {
1288 int src_match, dst_match;
1289 int src_any, dst_any;
1290
1291 /* Exact match. */
1292 src_match = !bacmp(&bt_sk(sk)->src, src);
1293 dst_match = !bacmp(&bt_sk(sk)->dst, dst);
1294 if (src_match && dst_match) {
1295 read_unlock(&chan_list_lock);
1296 return c;
1297 }
1298
1299 /* Closest match */
1300 src_any = !bacmp(&bt_sk(sk)->src, BDADDR_ANY);
1301 dst_any = !bacmp(&bt_sk(sk)->dst, BDADDR_ANY);
1302 if ((src_match && dst_any) || (src_any && dst_match) ||
1303 (src_any && dst_any))
1304 c1 = c;
1305 }
1306 }
1307
1308 read_unlock(&chan_list_lock);
1309
1310 return c1;
1311}
1312
1313static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1314{
1315 struct sock *parent, *sk;
1316 struct l2cap_chan *chan, *pchan;
1317
1318 BT_DBG("");
1319
1320 /* Check if we have socket listening on cid */
1321 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
1322 conn->src, conn->dst);
1323 if (!pchan)
1324 return;
1325
1326 parent = pchan->sk;
1327
1328 lock_sock(parent);
1329
1330 chan = pchan->ops->new_connection(pchan);
1331 if (!chan)
1332 goto clean;
1333
1334 sk = chan->sk;
1335
1336 hci_conn_hold(conn->hcon);
1337 conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
1338
1339 bacpy(&bt_sk(sk)->src, conn->src);
1340 bacpy(&bt_sk(sk)->dst, conn->dst);
1341
1342 l2cap_chan_add(conn, chan);
1343
1344 l2cap_chan_ready(chan);
1345
1346clean:
1347 release_sock(parent);
1348}
1349
1350static void l2cap_conn_ready(struct l2cap_conn *conn)
1351{
1352 struct l2cap_chan *chan;
1353 struct hci_conn *hcon = conn->hcon;
1354
1355 BT_DBG("conn %p", conn);
1356
1357 if (!hcon->out && hcon->type == LE_LINK)
1358 l2cap_le_conn_ready(conn);
1359
1360 if (hcon->out && hcon->type == LE_LINK)
1361 smp_conn_security(hcon, hcon->pending_sec_level);
1362
1363 mutex_lock(&conn->chan_lock);
1364
1365 list_for_each_entry(chan, &conn->chan_l, list) {
1366
1367 l2cap_chan_lock(chan);
1368
1369 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
1370 l2cap_chan_unlock(chan);
1371 continue;
1372 }
1373
1374 if (hcon->type == LE_LINK) {
1375 if (smp_conn_security(hcon, chan->sec_level))
1376 l2cap_chan_ready(chan);
1377
1378 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1379 struct sock *sk = chan->sk;
1380 __clear_chan_timer(chan);
1381 lock_sock(sk);
1382 __l2cap_state_change(chan, BT_CONNECTED);
1383 sk->sk_state_change(sk);
1384 release_sock(sk);
1385
1386 } else if (chan->state == BT_CONNECT)
1387 l2cap_do_start(chan);
1388
1389 l2cap_chan_unlock(chan);
1390 }
1391
1392 mutex_unlock(&conn->chan_lock);
1393}
1394
1395/* Notify sockets that we cannot guaranty reliability anymore */
1396static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1397{
1398 struct l2cap_chan *chan;
1399
1400 BT_DBG("conn %p", conn);
1401
1402 mutex_lock(&conn->chan_lock);
1403
1404 list_for_each_entry(chan, &conn->chan_l, list) {
1405 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1406 l2cap_chan_set_err(chan, err);
1407 }
1408
1409 mutex_unlock(&conn->chan_lock);
1410}
1411
1412static void l2cap_info_timeout(struct work_struct *work)
1413{
1414 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1415 info_timer.work);
1416
1417 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1418 conn->info_ident = 0;
1419
1420 l2cap_conn_start(conn);
1421}
1422
1423static void l2cap_conn_del(struct hci_conn *hcon, int err)
1424{
1425 struct l2cap_conn *conn = hcon->l2cap_data;
1426 struct l2cap_chan *chan, *l;
1427
1428 if (!conn)
1429 return;
1430
1431 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1432
1433 kfree_skb(conn->rx_skb);
1434
1435 mutex_lock(&conn->chan_lock);
1436
1437 /* Kill channels */
1438 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1439 l2cap_chan_hold(chan);
1440 l2cap_chan_lock(chan);
1441
1442 l2cap_chan_del(chan, err);
1443
1444 l2cap_chan_unlock(chan);
1445
1446 chan->ops->close(chan);
1447 l2cap_chan_put(chan);
1448 }
1449
1450 mutex_unlock(&conn->chan_lock);
1451
1452 hci_chan_del(conn->hchan);
1453
1454 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1455 cancel_delayed_work_sync(&conn->info_timer);
1456
1457 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) {
1458 cancel_delayed_work_sync(&conn->security_timer);
1459 smp_chan_destroy(conn);
1460 }
1461
1462 hcon->l2cap_data = NULL;
1463 kfree(conn);
1464}
1465
1466static void security_timeout(struct work_struct *work)
1467{
1468 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1469 security_timer.work);
1470
1471 BT_DBG("conn %p", conn);
1472
1473 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags)) {
1474 smp_chan_destroy(conn);
1475 l2cap_conn_del(conn->hcon, ETIMEDOUT);
1476 }
1477}
1478
1479static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
1480{
1481 struct l2cap_conn *conn = hcon->l2cap_data;
1482 struct hci_chan *hchan;
1483
1484 if (conn || status)
1485 return conn;
1486
1487 hchan = hci_chan_create(hcon);
1488 if (!hchan)
1489 return NULL;
1490
1491 conn = kzalloc(sizeof(struct l2cap_conn), GFP_KERNEL);
1492 if (!conn) {
1493 hci_chan_del(hchan);
1494 return NULL;
1495 }
1496
1497 hcon->l2cap_data = conn;
1498 conn->hcon = hcon;
1499 conn->hchan = hchan;
1500
1501 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
1502
1503 switch (hcon->type) {
1504 case AMP_LINK:
1505 conn->mtu = hcon->hdev->block_mtu;
1506 break;
1507
1508 case LE_LINK:
1509 if (hcon->hdev->le_mtu) {
1510 conn->mtu = hcon->hdev->le_mtu;
1511 break;
1512 }
1513 /* fall through */
1514
1515 default:
1516 conn->mtu = hcon->hdev->acl_mtu;
1517 break;
1518 }
1519
1520 conn->src = &hcon->hdev->bdaddr;
1521 conn->dst = &hcon->dst;
1522
1523 conn->feat_mask = 0;
1524
1525 spin_lock_init(&conn->lock);
1526 mutex_init(&conn->chan_lock);
1527
1528 INIT_LIST_HEAD(&conn->chan_l);
1529
1530 if (hcon->type == LE_LINK)
1531 INIT_DELAYED_WORK(&conn->security_timer, security_timeout);
1532 else
1533 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
1534
1535 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
1536
1537 return conn;
1538}
1539
1540/* ---- Socket interface ---- */
1541
1542/* Find socket with psm and source / destination bdaddr.
1543 * Returns closest match.
1544 */
1545static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1546 bdaddr_t *src,
1547 bdaddr_t *dst)
1548{
1549 struct l2cap_chan *c, *c1 = NULL;
1550
1551 read_lock(&chan_list_lock);
1552
1553 list_for_each_entry(c, &chan_list, global_l) {
1554 struct sock *sk = c->sk;
1555
1556 if (state && c->state != state)
1557 continue;
1558
1559 if (c->psm == psm) {
1560 int src_match, dst_match;
1561 int src_any, dst_any;
1562
1563 /* Exact match. */
1564 src_match = !bacmp(&bt_sk(sk)->src, src);
1565 dst_match = !bacmp(&bt_sk(sk)->dst, dst);
1566 if (src_match && dst_match) {
1567 read_unlock(&chan_list_lock);
1568 return c;
1569 }
1570
1571 /* Closest match */
1572 src_any = !bacmp(&bt_sk(sk)->src, BDADDR_ANY);
1573 dst_any = !bacmp(&bt_sk(sk)->dst, BDADDR_ANY);
1574 if ((src_match && dst_any) || (src_any && dst_match) ||
1575 (src_any && dst_any))
1576 c1 = c;
1577 }
1578 }
1579
1580 read_unlock(&chan_list_lock);
1581
1582 return c1;
1583}
1584
1585int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
1586 bdaddr_t *dst, u8 dst_type)
1587{
1588 struct sock *sk = chan->sk;
1589 bdaddr_t *src = &bt_sk(sk)->src;
1590 struct l2cap_conn *conn;
1591 struct hci_conn *hcon;
1592 struct hci_dev *hdev;
1593 __u8 auth_type;
1594 int err;
1595
1596 BT_DBG("%pMR -> %pMR (type %u) psm 0x%2.2x", src, dst,
1597 dst_type, __le16_to_cpu(psm));
1598
1599 hdev = hci_get_route(dst, src);
1600 if (!hdev)
1601 return -EHOSTUNREACH;
1602
1603 hci_dev_lock(hdev);
1604
1605 l2cap_chan_lock(chan);
1606
1607 /* PSM must be odd and lsb of upper byte must be 0 */
1608 if ((__le16_to_cpu(psm) & 0x0101) != 0x0001 && !cid &&
1609 chan->chan_type != L2CAP_CHAN_RAW) {
1610 err = -EINVAL;
1611 goto done;
1612 }
1613
1614 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !(psm || cid)) {
1615 err = -EINVAL;
1616 goto done;
1617 }
1618
1619 switch (chan->mode) {
1620 case L2CAP_MODE_BASIC:
1621 break;
1622 case L2CAP_MODE_ERTM:
1623 case L2CAP_MODE_STREAMING:
1624 if (!disable_ertm)
1625 break;
1626 /* fall through */
1627 default:
1628 err = -ENOTSUPP;
1629 goto done;
1630 }
1631
1632 switch (chan->state) {
1633 case BT_CONNECT:
1634 case BT_CONNECT2:
1635 case BT_CONFIG:
1636 /* Already connecting */
1637 err = 0;
1638 goto done;
1639
1640 case BT_CONNECTED:
1641 /* Already connected */
1642 err = -EISCONN;
1643 goto done;
1644
1645 case BT_OPEN:
1646 case BT_BOUND:
1647 /* Can connect */
1648 break;
1649
1650 default:
1651 err = -EBADFD;
1652 goto done;
1653 }
1654
1655 /* Set destination address and psm */
1656 lock_sock(sk);
1657 bacpy(&bt_sk(sk)->dst, dst);
1658 release_sock(sk);
1659
1660 chan->psm = psm;
1661 chan->dcid = cid;
1662
1663 auth_type = l2cap_get_auth_type(chan);
1664
1665 if (chan->dcid == L2CAP_CID_LE_DATA)
1666 hcon = hci_connect(hdev, LE_LINK, dst, dst_type,
1667 chan->sec_level, auth_type);
1668 else
1669 hcon = hci_connect(hdev, ACL_LINK, dst, dst_type,
1670 chan->sec_level, auth_type);
1671
1672 if (IS_ERR(hcon)) {
1673 err = PTR_ERR(hcon);
1674 goto done;
1675 }
1676
1677 conn = l2cap_conn_add(hcon, 0);
1678 if (!conn) {
1679 hci_conn_put(hcon);
1680 err = -ENOMEM;
1681 goto done;
1682 }
1683
1684 if (hcon->type == LE_LINK) {
1685 err = 0;
1686
1687 if (!list_empty(&conn->chan_l)) {
1688 err = -EBUSY;
1689 hci_conn_put(hcon);
1690 }
1691
1692 if (err)
1693 goto done;
1694 }
1695
1696 /* Update source addr of the socket */
1697 bacpy(src, conn->src);
1698
1699 l2cap_chan_unlock(chan);
1700 l2cap_chan_add(conn, chan);
1701 l2cap_chan_lock(chan);
1702
1703 l2cap_state_change(chan, BT_CONNECT);
1704 __set_chan_timer(chan, sk->sk_sndtimeo);
1705
1706 if (hcon->state == BT_CONNECTED) {
1707 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1708 __clear_chan_timer(chan);
1709 if (l2cap_chan_check_security(chan))
1710 l2cap_state_change(chan, BT_CONNECTED);
1711 } else
1712 l2cap_do_start(chan);
1713 }
1714
1715 err = 0;
1716
1717done:
1718 l2cap_chan_unlock(chan);
1719 hci_dev_unlock(hdev);
1720 hci_dev_put(hdev);
1721 return err;
1722}
1723
1724int __l2cap_wait_ack(struct sock *sk)
1725{
1726 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
1727 DECLARE_WAITQUEUE(wait, current);
1728 int err = 0;
1729 int timeo = HZ/5;
1730
1731 add_wait_queue(sk_sleep(sk), &wait);
1732 set_current_state(TASK_INTERRUPTIBLE);
1733 while (chan->unacked_frames > 0 && chan->conn) {
1734 if (!timeo)
1735 timeo = HZ/5;
1736
1737 if (signal_pending(current)) {
1738 err = sock_intr_errno(timeo);
1739 break;
1740 }
1741
1742 release_sock(sk);
1743 timeo = schedule_timeout(timeo);
1744 lock_sock(sk);
1745 set_current_state(TASK_INTERRUPTIBLE);
1746
1747 err = sock_error(sk);
1748 if (err)
1749 break;
1750 }
1751 set_current_state(TASK_RUNNING);
1752 remove_wait_queue(sk_sleep(sk), &wait);
1753 return err;
1754}
1755
1756static void l2cap_monitor_timeout(struct work_struct *work)
1757{
1758 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1759 monitor_timer.work);
1760
1761 BT_DBG("chan %p", chan);
1762
1763 l2cap_chan_lock(chan);
1764
1765 if (!chan->conn) {
1766 l2cap_chan_unlock(chan);
1767 l2cap_chan_put(chan);
1768 return;
1769 }
1770
1771 l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
1772
1773 l2cap_chan_unlock(chan);
1774 l2cap_chan_put(chan);
1775}
1776
1777static void l2cap_retrans_timeout(struct work_struct *work)
1778{
1779 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1780 retrans_timer.work);
1781
1782 BT_DBG("chan %p", chan);
1783
1784 l2cap_chan_lock(chan);
1785
1786 if (!chan->conn) {
1787 l2cap_chan_unlock(chan);
1788 l2cap_chan_put(chan);
1789 return;
1790 }
1791
1792 l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
1793 l2cap_chan_unlock(chan);
1794 l2cap_chan_put(chan);
1795}
1796
1797static void l2cap_streaming_send(struct l2cap_chan *chan,
1798 struct sk_buff_head *skbs)
1799{
1800 struct sk_buff *skb;
1801 struct l2cap_ctrl *control;
1802
1803 BT_DBG("chan %p, skbs %p", chan, skbs);
1804
1805 skb_queue_splice_tail_init(skbs, &chan->tx_q);
1806
1807 while (!skb_queue_empty(&chan->tx_q)) {
1808
1809 skb = skb_dequeue(&chan->tx_q);
1810
1811 bt_cb(skb)->control.retries = 1;
1812 control = &bt_cb(skb)->control;
1813
1814 control->reqseq = 0;
1815 control->txseq = chan->next_tx_seq;
1816
1817 __pack_control(chan, control, skb);
1818
1819 if (chan->fcs == L2CAP_FCS_CRC16) {
1820 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1821 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1822 }
1823
1824 l2cap_do_send(chan, skb);
1825
1826 BT_DBG("Sent txseq %u", control->txseq);
1827
1828 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1829 chan->frames_sent++;
1830 }
1831}
1832
1833static int l2cap_ertm_send(struct l2cap_chan *chan)
1834{
1835 struct sk_buff *skb, *tx_skb;
1836 struct l2cap_ctrl *control;
1837 int sent = 0;
1838
1839 BT_DBG("chan %p", chan);
1840
1841 if (chan->state != BT_CONNECTED)
1842 return -ENOTCONN;
1843
1844 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1845 return 0;
1846
1847 while (chan->tx_send_head &&
1848 chan->unacked_frames < chan->remote_tx_win &&
1849 chan->tx_state == L2CAP_TX_STATE_XMIT) {
1850
1851 skb = chan->tx_send_head;
1852
1853 bt_cb(skb)->control.retries = 1;
1854 control = &bt_cb(skb)->control;
1855
1856 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1857 control->final = 1;
1858
1859 control->reqseq = chan->buffer_seq;
1860 chan->last_acked_seq = chan->buffer_seq;
1861 control->txseq = chan->next_tx_seq;
1862
1863 __pack_control(chan, control, skb);
1864
1865 if (chan->fcs == L2CAP_FCS_CRC16) {
1866 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1867 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1868 }
1869
1870 /* Clone after data has been modified. Data is assumed to be
1871 read-only (for locking purposes) on cloned sk_buffs.
1872 */
1873 tx_skb = skb_clone(skb, GFP_KERNEL);
1874
1875 if (!tx_skb)
1876 break;
1877
1878 __set_retrans_timer(chan);
1879
1880 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1881 chan->unacked_frames++;
1882 chan->frames_sent++;
1883 sent++;
1884
1885 if (skb_queue_is_last(&chan->tx_q, skb))
1886 chan->tx_send_head = NULL;
1887 else
1888 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1889
1890 l2cap_do_send(chan, tx_skb);
1891 BT_DBG("Sent txseq %u", control->txseq);
1892 }
1893
1894 BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
1895 chan->unacked_frames, skb_queue_len(&chan->tx_q));
1896
1897 return sent;
1898}
1899
1900static void l2cap_ertm_resend(struct l2cap_chan *chan)
1901{
1902 struct l2cap_ctrl control;
1903 struct sk_buff *skb;
1904 struct sk_buff *tx_skb;
1905 u16 seq;
1906
1907 BT_DBG("chan %p", chan);
1908
1909 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1910 return;
1911
1912 while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
1913 seq = l2cap_seq_list_pop(&chan->retrans_list);
1914
1915 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
1916 if (!skb) {
1917 BT_DBG("Error: Can't retransmit seq %d, frame missing",
1918 seq);
1919 continue;
1920 }
1921
1922 bt_cb(skb)->control.retries++;
1923 control = bt_cb(skb)->control;
1924
1925 if (chan->max_tx != 0 &&
1926 bt_cb(skb)->control.retries > chan->max_tx) {
1927 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
1928 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
1929 l2cap_seq_list_clear(&chan->retrans_list);
1930 break;
1931 }
1932
1933 control.reqseq = chan->buffer_seq;
1934 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1935 control.final = 1;
1936 else
1937 control.final = 0;
1938
1939 if (skb_cloned(skb)) {
1940 /* Cloned sk_buffs are read-only, so we need a
1941 * writeable copy
1942 */
1943 tx_skb = skb_copy(skb, GFP_KERNEL);
1944 } else {
1945 tx_skb = skb_clone(skb, GFP_KERNEL);
1946 }
1947
1948 if (!tx_skb) {
1949 l2cap_seq_list_clear(&chan->retrans_list);
1950 break;
1951 }
1952
1953 /* Update skb contents */
1954 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1955 put_unaligned_le32(__pack_extended_control(&control),
1956 tx_skb->data + L2CAP_HDR_SIZE);
1957 } else {
1958 put_unaligned_le16(__pack_enhanced_control(&control),
1959 tx_skb->data + L2CAP_HDR_SIZE);
1960 }
1961
1962 if (chan->fcs == L2CAP_FCS_CRC16) {
1963 u16 fcs = crc16(0, (u8 *) tx_skb->data, tx_skb->len);
1964 put_unaligned_le16(fcs, skb_put(tx_skb,
1965 L2CAP_FCS_SIZE));
1966 }
1967
1968 l2cap_do_send(chan, tx_skb);
1969
1970 BT_DBG("Resent txseq %d", control.txseq);
1971
1972 chan->last_acked_seq = chan->buffer_seq;
1973 }
1974}
1975
1976static void l2cap_retransmit(struct l2cap_chan *chan,
1977 struct l2cap_ctrl *control)
1978{
1979 BT_DBG("chan %p, control %p", chan, control);
1980
1981 l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
1982 l2cap_ertm_resend(chan);
1983}
1984
1985static void l2cap_retransmit_all(struct l2cap_chan *chan,
1986 struct l2cap_ctrl *control)
1987{
1988 struct sk_buff *skb;
1989
1990 BT_DBG("chan %p, control %p", chan, control);
1991
1992 if (control->poll)
1993 set_bit(CONN_SEND_FBIT, &chan->conn_state);
1994
1995 l2cap_seq_list_clear(&chan->retrans_list);
1996
1997 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1998 return;
1999
2000 if (chan->unacked_frames) {
2001 skb_queue_walk(&chan->tx_q, skb) {
2002 if (bt_cb(skb)->control.txseq == control->reqseq ||
2003 skb == chan->tx_send_head)
2004 break;
2005 }
2006
2007 skb_queue_walk_from(&chan->tx_q, skb) {
2008 if (skb == chan->tx_send_head)
2009 break;
2010
2011 l2cap_seq_list_append(&chan->retrans_list,
2012 bt_cb(skb)->control.txseq);
2013 }
2014
2015 l2cap_ertm_resend(chan);
2016 }
2017}
2018
2019static void l2cap_send_ack(struct l2cap_chan *chan)
2020{
2021 struct l2cap_ctrl control;
2022 u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2023 chan->last_acked_seq);
2024 int threshold;
2025
2026 BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2027 chan, chan->last_acked_seq, chan->buffer_seq);
2028
2029 memset(&control, 0, sizeof(control));
2030 control.sframe = 1;
2031
2032 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
2033 chan->rx_state == L2CAP_RX_STATE_RECV) {
2034 __clear_ack_timer(chan);
2035 control.super = L2CAP_SUPER_RNR;
2036 control.reqseq = chan->buffer_seq;
2037 l2cap_send_sframe(chan, &control);
2038 } else {
2039 if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
2040 l2cap_ertm_send(chan);
2041 /* If any i-frames were sent, they included an ack */
2042 if (chan->buffer_seq == chan->last_acked_seq)
2043 frames_to_ack = 0;
2044 }
2045
2046 /* Ack now if the window is 3/4ths full.
2047 * Calculate without mul or div
2048 */
2049 threshold = chan->ack_win;
2050 threshold += threshold << 1;
2051 threshold >>= 2;
2052
2053 BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
2054 threshold);
2055
2056 if (frames_to_ack >= threshold) {
2057 __clear_ack_timer(chan);
2058 control.super = L2CAP_SUPER_RR;
2059 control.reqseq = chan->buffer_seq;
2060 l2cap_send_sframe(chan, &control);
2061 frames_to_ack = 0;
2062 }
2063
2064 if (frames_to_ack)
2065 __set_ack_timer(chan);
2066 }
2067}
2068
2069static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
2070 struct msghdr *msg, int len,
2071 int count, struct sk_buff *skb)
2072{
2073 struct l2cap_conn *conn = chan->conn;
2074 struct sk_buff **frag;
2075 int sent = 0;
2076
2077 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
2078 return -EFAULT;
2079
2080 sent += count;
2081 len -= count;
2082
2083 /* Continuation fragments (no L2CAP header) */
2084 frag = &skb_shinfo(skb)->frag_list;
2085 while (len) {
2086 struct sk_buff *tmp;
2087
2088 count = min_t(unsigned int, conn->mtu, len);
2089
2090 tmp = chan->ops->alloc_skb(chan, count,
2091 msg->msg_flags & MSG_DONTWAIT);
2092 if (IS_ERR(tmp))
2093 return PTR_ERR(tmp);
2094
2095 *frag = tmp;
2096
2097 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
2098 return -EFAULT;
2099
2100 (*frag)->priority = skb->priority;
2101
2102 sent += count;
2103 len -= count;
2104
2105 skb->len += (*frag)->len;
2106 skb->data_len += (*frag)->len;
2107
2108 frag = &(*frag)->next;
2109 }
2110
2111 return sent;
2112}
2113
2114static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
2115 struct msghdr *msg, size_t len,
2116 u32 priority)
2117{
2118 struct l2cap_conn *conn = chan->conn;
2119 struct sk_buff *skb;
2120 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
2121 struct l2cap_hdr *lh;
2122
2123 BT_DBG("chan %p len %zu priority %u", chan, len, priority);
2124
2125 count = min_t(unsigned int, (conn->mtu - hlen), len);
2126
2127 skb = chan->ops->alloc_skb(chan, count + hlen,
2128 msg->msg_flags & MSG_DONTWAIT);
2129 if (IS_ERR(skb))
2130 return skb;
2131
2132 skb->priority = priority;
2133
2134 /* Create L2CAP header */
2135 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2136 lh->cid = cpu_to_le16(chan->dcid);
2137 lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
2138 put_unaligned(chan->psm, skb_put(skb, L2CAP_PSMLEN_SIZE));
2139
2140 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2141 if (unlikely(err < 0)) {
2142 kfree_skb(skb);
2143 return ERR_PTR(err);
2144 }
2145 return skb;
2146}
2147
2148static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2149 struct msghdr *msg, size_t len,
2150 u32 priority)
2151{
2152 struct l2cap_conn *conn = chan->conn;
2153 struct sk_buff *skb;
2154 int err, count;
2155 struct l2cap_hdr *lh;
2156
2157 BT_DBG("chan %p len %zu", chan, len);
2158
2159 count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2160
2161 skb = chan->ops->alloc_skb(chan, count + L2CAP_HDR_SIZE,
2162 msg->msg_flags & MSG_DONTWAIT);
2163 if (IS_ERR(skb))
2164 return skb;
2165
2166 skb->priority = priority;
2167
2168 /* Create L2CAP header */
2169 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2170 lh->cid = cpu_to_le16(chan->dcid);
2171 lh->len = cpu_to_le16(len);
2172
2173 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2174 if (unlikely(err < 0)) {
2175 kfree_skb(skb);
2176 return ERR_PTR(err);
2177 }
2178 return skb;
2179}
2180
2181static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2182 struct msghdr *msg, size_t len,
2183 u16 sdulen)
2184{
2185 struct l2cap_conn *conn = chan->conn;
2186 struct sk_buff *skb;
2187 int err, count, hlen;
2188 struct l2cap_hdr *lh;
2189
2190 BT_DBG("chan %p len %zu", chan, len);
2191
2192 if (!conn)
2193 return ERR_PTR(-ENOTCONN);
2194
2195 hlen = __ertm_hdr_size(chan);
2196
2197 if (sdulen)
2198 hlen += L2CAP_SDULEN_SIZE;
2199
2200 if (chan->fcs == L2CAP_FCS_CRC16)
2201 hlen += L2CAP_FCS_SIZE;
2202
2203 count = min_t(unsigned int, (conn->mtu - hlen), len);
2204
2205 skb = chan->ops->alloc_skb(chan, count + hlen,
2206 msg->msg_flags & MSG_DONTWAIT);
2207 if (IS_ERR(skb))
2208 return skb;
2209
2210 /* Create L2CAP header */
2211 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2212 lh->cid = cpu_to_le16(chan->dcid);
2213 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2214
2215 /* Control header is populated later */
2216 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2217 put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2218 else
2219 put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2220
2221 if (sdulen)
2222 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2223
2224 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2225 if (unlikely(err < 0)) {
2226 kfree_skb(skb);
2227 return ERR_PTR(err);
2228 }
2229
2230 bt_cb(skb)->control.fcs = chan->fcs;
2231 bt_cb(skb)->control.retries = 0;
2232 return skb;
2233}
2234
2235static int l2cap_segment_sdu(struct l2cap_chan *chan,
2236 struct sk_buff_head *seg_queue,
2237 struct msghdr *msg, size_t len)
2238{
2239 struct sk_buff *skb;
2240 u16 sdu_len;
2241 size_t pdu_len;
2242 u8 sar;
2243
2244 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2245
2246 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2247 * so fragmented skbs are not used. The HCI layer's handling
2248 * of fragmented skbs is not compatible with ERTM's queueing.
2249 */
2250
2251 /* PDU size is derived from the HCI MTU */
2252 pdu_len = chan->conn->mtu;
2253
2254 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2255
2256 /* Adjust for largest possible L2CAP overhead. */
2257 if (chan->fcs)
2258 pdu_len -= L2CAP_FCS_SIZE;
2259
2260 pdu_len -= __ertm_hdr_size(chan);
2261
2262 /* Remote device may have requested smaller PDUs */
2263 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2264
2265 if (len <= pdu_len) {
2266 sar = L2CAP_SAR_UNSEGMENTED;
2267 sdu_len = 0;
2268 pdu_len = len;
2269 } else {
2270 sar = L2CAP_SAR_START;
2271 sdu_len = len;
2272 pdu_len -= L2CAP_SDULEN_SIZE;
2273 }
2274
2275 while (len > 0) {
2276 skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2277
2278 if (IS_ERR(skb)) {
2279 __skb_queue_purge(seg_queue);
2280 return PTR_ERR(skb);
2281 }
2282
2283 bt_cb(skb)->control.sar = sar;
2284 __skb_queue_tail(seg_queue, skb);
2285
2286 len -= pdu_len;
2287 if (sdu_len) {
2288 sdu_len = 0;
2289 pdu_len += L2CAP_SDULEN_SIZE;
2290 }
2291
2292 if (len <= pdu_len) {
2293 sar = L2CAP_SAR_END;
2294 pdu_len = len;
2295 } else {
2296 sar = L2CAP_SAR_CONTINUE;
2297 }
2298 }
2299
2300 return 0;
2301}
2302
2303int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
2304 u32 priority)
2305{
2306 struct sk_buff *skb;
2307 int err;
2308 struct sk_buff_head seg_queue;
2309
2310 /* Connectionless channel */
2311 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2312 skb = l2cap_create_connless_pdu(chan, msg, len, priority);
2313 if (IS_ERR(skb))
2314 return PTR_ERR(skb);
2315
2316 l2cap_do_send(chan, skb);
2317 return len;
2318 }
2319
2320 switch (chan->mode) {
2321 case L2CAP_MODE_BASIC:
2322 /* Check outgoing MTU */
2323 if (len > chan->omtu)
2324 return -EMSGSIZE;
2325
2326 /* Create a basic PDU */
2327 skb = l2cap_create_basic_pdu(chan, msg, len, priority);
2328 if (IS_ERR(skb))
2329 return PTR_ERR(skb);
2330
2331 l2cap_do_send(chan, skb);
2332 err = len;
2333 break;
2334
2335 case L2CAP_MODE_ERTM:
2336 case L2CAP_MODE_STREAMING:
2337 /* Check outgoing MTU */
2338 if (len > chan->omtu) {
2339 err = -EMSGSIZE;
2340 break;
2341 }
2342
2343 __skb_queue_head_init(&seg_queue);
2344
2345 /* Do segmentation before calling in to the state machine,
2346 * since it's possible to block while waiting for memory
2347 * allocation.
2348 */
2349 err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2350
2351 /* The channel could have been closed while segmenting,
2352 * check that it is still connected.
2353 */
2354 if (chan->state != BT_CONNECTED) {
2355 __skb_queue_purge(&seg_queue);
2356 err = -ENOTCONN;
2357 }
2358
2359 if (err)
2360 break;
2361
2362 if (chan->mode == L2CAP_MODE_ERTM)
2363 l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2364 else
2365 l2cap_streaming_send(chan, &seg_queue);
2366
2367 err = len;
2368
2369 /* If the skbs were not queued for sending, they'll still be in
2370 * seg_queue and need to be purged.
2371 */
2372 __skb_queue_purge(&seg_queue);
2373 break;
2374
2375 default:
2376 BT_DBG("bad state %1.1x", chan->mode);
2377 err = -EBADFD;
2378 }
2379
2380 return err;
2381}
2382
2383static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2384{
2385 struct l2cap_ctrl control;
2386 u16 seq;
2387
2388 BT_DBG("chan %p, txseq %u", chan, txseq);
2389
2390 memset(&control, 0, sizeof(control));
2391 control.sframe = 1;
2392 control.super = L2CAP_SUPER_SREJ;
2393
2394 for (seq = chan->expected_tx_seq; seq != txseq;
2395 seq = __next_seq(chan, seq)) {
2396 if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2397 control.reqseq = seq;
2398 l2cap_send_sframe(chan, &control);
2399 l2cap_seq_list_append(&chan->srej_list, seq);
2400 }
2401 }
2402
2403 chan->expected_tx_seq = __next_seq(chan, txseq);
2404}
2405
2406static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2407{
2408 struct l2cap_ctrl control;
2409
2410 BT_DBG("chan %p", chan);
2411
2412 if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2413 return;
2414
2415 memset(&control, 0, sizeof(control));
2416 control.sframe = 1;
2417 control.super = L2CAP_SUPER_SREJ;
2418 control.reqseq = chan->srej_list.tail;
2419 l2cap_send_sframe(chan, &control);
2420}
2421
2422static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2423{
2424 struct l2cap_ctrl control;
2425 u16 initial_head;
2426 u16 seq;
2427
2428 BT_DBG("chan %p, txseq %u", chan, txseq);
2429
2430 memset(&control, 0, sizeof(control));
2431 control.sframe = 1;
2432 control.super = L2CAP_SUPER_SREJ;
2433
2434 /* Capture initial list head to allow only one pass through the list. */
2435 initial_head = chan->srej_list.head;
2436
2437 do {
2438 seq = l2cap_seq_list_pop(&chan->srej_list);
2439 if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2440 break;
2441
2442 control.reqseq = seq;
2443 l2cap_send_sframe(chan, &control);
2444 l2cap_seq_list_append(&chan->srej_list, seq);
2445 } while (chan->srej_list.head != initial_head);
2446}
2447
2448static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2449{
2450 struct sk_buff *acked_skb;
2451 u16 ackseq;
2452
2453 BT_DBG("chan %p, reqseq %u", chan, reqseq);
2454
2455 if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2456 return;
2457
2458 BT_DBG("expected_ack_seq %u, unacked_frames %u",
2459 chan->expected_ack_seq, chan->unacked_frames);
2460
2461 for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2462 ackseq = __next_seq(chan, ackseq)) {
2463
2464 acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2465 if (acked_skb) {
2466 skb_unlink(acked_skb, &chan->tx_q);
2467 kfree_skb(acked_skb);
2468 chan->unacked_frames--;
2469 }
2470 }
2471
2472 chan->expected_ack_seq = reqseq;
2473
2474 if (chan->unacked_frames == 0)
2475 __clear_retrans_timer(chan);
2476
2477 BT_DBG("unacked_frames %u", chan->unacked_frames);
2478}
2479
2480static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2481{
2482 BT_DBG("chan %p", chan);
2483
2484 chan->expected_tx_seq = chan->buffer_seq;
2485 l2cap_seq_list_clear(&chan->srej_list);
2486 skb_queue_purge(&chan->srej_q);
2487 chan->rx_state = L2CAP_RX_STATE_RECV;
2488}
2489
2490static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2491 struct l2cap_ctrl *control,
2492 struct sk_buff_head *skbs, u8 event)
2493{
2494 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2495 event);
2496
2497 switch (event) {
2498 case L2CAP_EV_DATA_REQUEST:
2499 if (chan->tx_send_head == NULL)
2500 chan->tx_send_head = skb_peek(skbs);
2501
2502 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2503 l2cap_ertm_send(chan);
2504 break;
2505 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2506 BT_DBG("Enter LOCAL_BUSY");
2507 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2508
2509 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2510 /* The SREJ_SENT state must be aborted if we are to
2511 * enter the LOCAL_BUSY state.
2512 */
2513 l2cap_abort_rx_srej_sent(chan);
2514 }
2515
2516 l2cap_send_ack(chan);
2517
2518 break;
2519 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2520 BT_DBG("Exit LOCAL_BUSY");
2521 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2522
2523 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2524 struct l2cap_ctrl local_control;
2525
2526 memset(&local_control, 0, sizeof(local_control));
2527 local_control.sframe = 1;
2528 local_control.super = L2CAP_SUPER_RR;
2529 local_control.poll = 1;
2530 local_control.reqseq = chan->buffer_seq;
2531 l2cap_send_sframe(chan, &local_control);
2532
2533 chan->retry_count = 1;
2534 __set_monitor_timer(chan);
2535 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2536 }
2537 break;
2538 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2539 l2cap_process_reqseq(chan, control->reqseq);
2540 break;
2541 case L2CAP_EV_EXPLICIT_POLL:
2542 l2cap_send_rr_or_rnr(chan, 1);
2543 chan->retry_count = 1;
2544 __set_monitor_timer(chan);
2545 __clear_ack_timer(chan);
2546 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2547 break;
2548 case L2CAP_EV_RETRANS_TO:
2549 l2cap_send_rr_or_rnr(chan, 1);
2550 chan->retry_count = 1;
2551 __set_monitor_timer(chan);
2552 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2553 break;
2554 case L2CAP_EV_RECV_FBIT:
2555 /* Nothing to process */
2556 break;
2557 default:
2558 break;
2559 }
2560}
2561
2562static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2563 struct l2cap_ctrl *control,
2564 struct sk_buff_head *skbs, u8 event)
2565{
2566 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2567 event);
2568
2569 switch (event) {
2570 case L2CAP_EV_DATA_REQUEST:
2571 if (chan->tx_send_head == NULL)
2572 chan->tx_send_head = skb_peek(skbs);
2573 /* Queue data, but don't send. */
2574 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2575 break;
2576 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2577 BT_DBG("Enter LOCAL_BUSY");
2578 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2579
2580 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2581 /* The SREJ_SENT state must be aborted if we are to
2582 * enter the LOCAL_BUSY state.
2583 */
2584 l2cap_abort_rx_srej_sent(chan);
2585 }
2586
2587 l2cap_send_ack(chan);
2588
2589 break;
2590 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2591 BT_DBG("Exit LOCAL_BUSY");
2592 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2593
2594 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2595 struct l2cap_ctrl local_control;
2596 memset(&local_control, 0, sizeof(local_control));
2597 local_control.sframe = 1;
2598 local_control.super = L2CAP_SUPER_RR;
2599 local_control.poll = 1;
2600 local_control.reqseq = chan->buffer_seq;
2601 l2cap_send_sframe(chan, &local_control);
2602
2603 chan->retry_count = 1;
2604 __set_monitor_timer(chan);
2605 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2606 }
2607 break;
2608 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2609 l2cap_process_reqseq(chan, control->reqseq);
2610
2611 /* Fall through */
2612
2613 case L2CAP_EV_RECV_FBIT:
2614 if (control && control->final) {
2615 __clear_monitor_timer(chan);
2616 if (chan->unacked_frames > 0)
2617 __set_retrans_timer(chan);
2618 chan->retry_count = 0;
2619 chan->tx_state = L2CAP_TX_STATE_XMIT;
2620 BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
2621 }
2622 break;
2623 case L2CAP_EV_EXPLICIT_POLL:
2624 /* Ignore */
2625 break;
2626 case L2CAP_EV_MONITOR_TO:
2627 if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
2628 l2cap_send_rr_or_rnr(chan, 1);
2629 __set_monitor_timer(chan);
2630 chan->retry_count++;
2631 } else {
2632 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
2633 }
2634 break;
2635 default:
2636 break;
2637 }
2638}
2639
2640static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
2641 struct sk_buff_head *skbs, u8 event)
2642{
2643 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2644 chan, control, skbs, event, chan->tx_state);
2645
2646 switch (chan->tx_state) {
2647 case L2CAP_TX_STATE_XMIT:
2648 l2cap_tx_state_xmit(chan, control, skbs, event);
2649 break;
2650 case L2CAP_TX_STATE_WAIT_F:
2651 l2cap_tx_state_wait_f(chan, control, skbs, event);
2652 break;
2653 default:
2654 /* Ignore event */
2655 break;
2656 }
2657}
2658
2659static void l2cap_pass_to_tx(struct l2cap_chan *chan,
2660 struct l2cap_ctrl *control)
2661{
2662 BT_DBG("chan %p, control %p", chan, control);
2663 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
2664}
2665
2666static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
2667 struct l2cap_ctrl *control)
2668{
2669 BT_DBG("chan %p, control %p", chan, control);
2670 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
2671}
2672
2673/* Copy frame to all raw sockets on that connection */
2674static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2675{
2676 struct sk_buff *nskb;
2677 struct l2cap_chan *chan;
2678
2679 BT_DBG("conn %p", conn);
2680
2681 mutex_lock(&conn->chan_lock);
2682
2683 list_for_each_entry(chan, &conn->chan_l, list) {
2684 struct sock *sk = chan->sk;
2685 if (chan->chan_type != L2CAP_CHAN_RAW)
2686 continue;
2687
2688 /* Don't send frame to the socket it came from */
2689 if (skb->sk == sk)
2690 continue;
2691 nskb = skb_clone(skb, GFP_KERNEL);
2692 if (!nskb)
2693 continue;
2694
2695 if (chan->ops->recv(chan, nskb))
2696 kfree_skb(nskb);
2697 }
2698
2699 mutex_unlock(&conn->chan_lock);
2700}
2701
2702/* ---- L2CAP signalling commands ---- */
2703static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
2704 u8 ident, u16 dlen, void *data)
2705{
2706 struct sk_buff *skb, **frag;
2707 struct l2cap_cmd_hdr *cmd;
2708 struct l2cap_hdr *lh;
2709 int len, count;
2710
2711 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
2712 conn, code, ident, dlen);
2713
2714 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2715 count = min_t(unsigned int, conn->mtu, len);
2716
2717 skb = bt_skb_alloc(count, GFP_KERNEL);
2718 if (!skb)
2719 return NULL;
2720
2721 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2722 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2723
2724 if (conn->hcon->type == LE_LINK)
2725 lh->cid = __constant_cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2726 else
2727 lh->cid = __constant_cpu_to_le16(L2CAP_CID_SIGNALING);
2728
2729 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2730 cmd->code = code;
2731 cmd->ident = ident;
2732 cmd->len = cpu_to_le16(dlen);
2733
2734 if (dlen) {
2735 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2736 memcpy(skb_put(skb, count), data, count);
2737 data += count;
2738 }
2739
2740 len -= skb->len;
2741
2742 /* Continuation fragments (no L2CAP header) */
2743 frag = &skb_shinfo(skb)->frag_list;
2744 while (len) {
2745 count = min_t(unsigned int, conn->mtu, len);
2746
2747 *frag = bt_skb_alloc(count, GFP_KERNEL);
2748 if (!*frag)
2749 goto fail;
2750
2751 memcpy(skb_put(*frag, count), data, count);
2752
2753 len -= count;
2754 data += count;
2755
2756 frag = &(*frag)->next;
2757 }
2758
2759 return skb;
2760
2761fail:
2762 kfree_skb(skb);
2763 return NULL;
2764}
2765
2766static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
2767 unsigned long *val)
2768{
2769 struct l2cap_conf_opt *opt = *ptr;
2770 int len;
2771
2772 len = L2CAP_CONF_OPT_SIZE + opt->len;
2773 *ptr += len;
2774
2775 *type = opt->type;
2776 *olen = opt->len;
2777
2778 switch (opt->len) {
2779 case 1:
2780 *val = *((u8 *) opt->val);
2781 break;
2782
2783 case 2:
2784 *val = get_unaligned_le16(opt->val);
2785 break;
2786
2787 case 4:
2788 *val = get_unaligned_le32(opt->val);
2789 break;
2790
2791 default:
2792 *val = (unsigned long) opt->val;
2793 break;
2794 }
2795
2796 BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
2797 return len;
2798}
2799
2800static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2801{
2802 struct l2cap_conf_opt *opt = *ptr;
2803
2804 BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
2805
2806 opt->type = type;
2807 opt->len = len;
2808
2809 switch (len) {
2810 case 1:
2811 *((u8 *) opt->val) = val;
2812 break;
2813
2814 case 2:
2815 put_unaligned_le16(val, opt->val);
2816 break;
2817
2818 case 4:
2819 put_unaligned_le32(val, opt->val);
2820 break;
2821
2822 default:
2823 memcpy(opt->val, (void *) val, len);
2824 break;
2825 }
2826
2827 *ptr += L2CAP_CONF_OPT_SIZE + len;
2828}
2829
2830static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
2831{
2832 struct l2cap_conf_efs efs;
2833
2834 switch (chan->mode) {
2835 case L2CAP_MODE_ERTM:
2836 efs.id = chan->local_id;
2837 efs.stype = chan->local_stype;
2838 efs.msdu = cpu_to_le16(chan->local_msdu);
2839 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2840 efs.acc_lat = __constant_cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
2841 efs.flush_to = __constant_cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO);
2842 break;
2843
2844 case L2CAP_MODE_STREAMING:
2845 efs.id = 1;
2846 efs.stype = L2CAP_SERV_BESTEFFORT;
2847 efs.msdu = cpu_to_le16(chan->local_msdu);
2848 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2849 efs.acc_lat = 0;
2850 efs.flush_to = 0;
2851 break;
2852
2853 default:
2854 return;
2855 }
2856
2857 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
2858 (unsigned long) &efs);
2859}
2860
2861static void l2cap_ack_timeout(struct work_struct *work)
2862{
2863 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
2864 ack_timer.work);
2865 u16 frames_to_ack;
2866
2867 BT_DBG("chan %p", chan);
2868
2869 l2cap_chan_lock(chan);
2870
2871 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2872 chan->last_acked_seq);
2873
2874 if (frames_to_ack)
2875 l2cap_send_rr_or_rnr(chan, 0);
2876
2877 l2cap_chan_unlock(chan);
2878 l2cap_chan_put(chan);
2879}
2880
2881int l2cap_ertm_init(struct l2cap_chan *chan)
2882{
2883 int err;
2884
2885 chan->next_tx_seq = 0;
2886 chan->expected_tx_seq = 0;
2887 chan->expected_ack_seq = 0;
2888 chan->unacked_frames = 0;
2889 chan->buffer_seq = 0;
2890 chan->frames_sent = 0;
2891 chan->last_acked_seq = 0;
2892 chan->sdu = NULL;
2893 chan->sdu_last_frag = NULL;
2894 chan->sdu_len = 0;
2895
2896 skb_queue_head_init(&chan->tx_q);
2897
2898 chan->local_amp_id = 0;
2899 chan->move_id = 0;
2900 chan->move_state = L2CAP_MOVE_STABLE;
2901 chan->move_role = L2CAP_MOVE_ROLE_NONE;
2902
2903 if (chan->mode != L2CAP_MODE_ERTM)
2904 return 0;
2905
2906 chan->rx_state = L2CAP_RX_STATE_RECV;
2907 chan->tx_state = L2CAP_TX_STATE_XMIT;
2908
2909 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
2910 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
2911 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
2912
2913 skb_queue_head_init(&chan->srej_q);
2914
2915 err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
2916 if (err < 0)
2917 return err;
2918
2919 err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
2920 if (err < 0)
2921 l2cap_seq_list_free(&chan->srej_list);
2922
2923 return err;
2924}
2925
2926static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2927{
2928 switch (mode) {
2929 case L2CAP_MODE_STREAMING:
2930 case L2CAP_MODE_ERTM:
2931 if (l2cap_mode_supported(mode, remote_feat_mask))
2932 return mode;
2933 /* fall through */
2934 default:
2935 return L2CAP_MODE_BASIC;
2936 }
2937}
2938
2939static inline bool __l2cap_ews_supported(struct l2cap_chan *chan)
2940{
2941 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
2942}
2943
2944static inline bool __l2cap_efs_supported(struct l2cap_chan *chan)
2945{
2946 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
2947}
2948
2949static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
2950{
2951 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
2952 __l2cap_ews_supported(chan)) {
2953 /* use extended control field */
2954 set_bit(FLAG_EXT_CTRL, &chan->flags);
2955 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
2956 } else {
2957 chan->tx_win = min_t(u16, chan->tx_win,
2958 L2CAP_DEFAULT_TX_WINDOW);
2959 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
2960 }
2961 chan->ack_win = chan->tx_win;
2962}
2963
2964static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
2965{
2966 struct l2cap_conf_req *req = data;
2967 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
2968 void *ptr = req->data;
2969 u16 size;
2970
2971 BT_DBG("chan %p", chan);
2972
2973 if (chan->num_conf_req || chan->num_conf_rsp)
2974 goto done;
2975
2976 switch (chan->mode) {
2977 case L2CAP_MODE_STREAMING:
2978 case L2CAP_MODE_ERTM:
2979 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
2980 break;
2981
2982 if (__l2cap_efs_supported(chan))
2983 set_bit(FLAG_EFS_ENABLE, &chan->flags);
2984
2985 /* fall through */
2986 default:
2987 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
2988 break;
2989 }
2990
2991done:
2992 if (chan->imtu != L2CAP_DEFAULT_MTU)
2993 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
2994
2995 switch (chan->mode) {
2996 case L2CAP_MODE_BASIC:
2997 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
2998 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
2999 break;
3000
3001 rfc.mode = L2CAP_MODE_BASIC;
3002 rfc.txwin_size = 0;
3003 rfc.max_transmit = 0;
3004 rfc.retrans_timeout = 0;
3005 rfc.monitor_timeout = 0;
3006 rfc.max_pdu_size = 0;
3007
3008 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3009 (unsigned long) &rfc);
3010 break;
3011
3012 case L2CAP_MODE_ERTM:
3013 rfc.mode = L2CAP_MODE_ERTM;
3014 rfc.max_transmit = chan->max_tx;
3015 rfc.retrans_timeout = 0;
3016 rfc.monitor_timeout = 0;
3017
3018 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3019 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3020 L2CAP_FCS_SIZE);
3021 rfc.max_pdu_size = cpu_to_le16(size);
3022
3023 l2cap_txwin_setup(chan);
3024
3025 rfc.txwin_size = min_t(u16, chan->tx_win,
3026 L2CAP_DEFAULT_TX_WINDOW);
3027
3028 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3029 (unsigned long) &rfc);
3030
3031 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3032 l2cap_add_opt_efs(&ptr, chan);
3033
3034 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
3035 break;
3036
3037 if (chan->fcs == L2CAP_FCS_NONE ||
3038 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
3039 chan->fcs = L2CAP_FCS_NONE;
3040 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
3041 }
3042
3043 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3044 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3045 chan->tx_win);
3046 break;
3047
3048 case L2CAP_MODE_STREAMING:
3049 l2cap_txwin_setup(chan);
3050 rfc.mode = L2CAP_MODE_STREAMING;
3051 rfc.txwin_size = 0;
3052 rfc.max_transmit = 0;
3053 rfc.retrans_timeout = 0;
3054 rfc.monitor_timeout = 0;
3055
3056 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3057 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3058 L2CAP_FCS_SIZE);
3059 rfc.max_pdu_size = cpu_to_le16(size);
3060
3061 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3062 (unsigned long) &rfc);
3063
3064 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3065 l2cap_add_opt_efs(&ptr, chan);
3066
3067 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
3068 break;
3069
3070 if (chan->fcs == L2CAP_FCS_NONE ||
3071 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
3072 chan->fcs = L2CAP_FCS_NONE;
3073 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
3074 }
3075 break;
3076 }
3077
3078 req->dcid = cpu_to_le16(chan->dcid);
3079 req->flags = __constant_cpu_to_le16(0);
3080
3081 return ptr - data;
3082}
3083
3084static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
3085{
3086 struct l2cap_conf_rsp *rsp = data;
3087 void *ptr = rsp->data;
3088 void *req = chan->conf_req;
3089 int len = chan->conf_len;
3090 int type, hint, olen;
3091 unsigned long val;
3092 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3093 struct l2cap_conf_efs efs;
3094 u8 remote_efs = 0;
3095 u16 mtu = L2CAP_DEFAULT_MTU;
3096 u16 result = L2CAP_CONF_SUCCESS;
3097 u16 size;
3098
3099 BT_DBG("chan %p", chan);
3100
3101 while (len >= L2CAP_CONF_OPT_SIZE) {
3102 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
3103
3104 hint = type & L2CAP_CONF_HINT;
3105 type &= L2CAP_CONF_MASK;
3106
3107 switch (type) {
3108 case L2CAP_CONF_MTU:
3109 mtu = val;
3110 break;
3111
3112 case L2CAP_CONF_FLUSH_TO:
3113 chan->flush_to = val;
3114 break;
3115
3116 case L2CAP_CONF_QOS:
3117 break;
3118
3119 case L2CAP_CONF_RFC:
3120 if (olen == sizeof(rfc))
3121 memcpy(&rfc, (void *) val, olen);
3122 break;
3123
3124 case L2CAP_CONF_FCS:
3125 if (val == L2CAP_FCS_NONE)
3126 set_bit(CONF_NO_FCS_RECV, &chan->conf_state);
3127 break;
3128
3129 case L2CAP_CONF_EFS:
3130 remote_efs = 1;
3131 if (olen == sizeof(efs))
3132 memcpy(&efs, (void *) val, olen);
3133 break;
3134
3135 case L2CAP_CONF_EWS:
3136 if (!enable_hs)
3137 return -ECONNREFUSED;
3138
3139 set_bit(FLAG_EXT_CTRL, &chan->flags);
3140 set_bit(CONF_EWS_RECV, &chan->conf_state);
3141 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3142 chan->remote_tx_win = val;
3143 break;
3144
3145 default:
3146 if (hint)
3147 break;
3148
3149 result = L2CAP_CONF_UNKNOWN;
3150 *((u8 *) ptr++) = type;
3151 break;
3152 }
3153 }
3154
3155 if (chan->num_conf_rsp || chan->num_conf_req > 1)
3156 goto done;
3157
3158 switch (chan->mode) {
3159 case L2CAP_MODE_STREAMING:
3160 case L2CAP_MODE_ERTM:
3161 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
3162 chan->mode = l2cap_select_mode(rfc.mode,
3163 chan->conn->feat_mask);
3164 break;
3165 }
3166
3167 if (remote_efs) {
3168 if (__l2cap_efs_supported(chan))
3169 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3170 else
3171 return -ECONNREFUSED;
3172 }
3173
3174 if (chan->mode != rfc.mode)
3175 return -ECONNREFUSED;
3176
3177 break;
3178 }
3179
3180done:
3181 if (chan->mode != rfc.mode) {
3182 result = L2CAP_CONF_UNACCEPT;
3183 rfc.mode = chan->mode;
3184
3185 if (chan->num_conf_rsp == 1)
3186 return -ECONNREFUSED;
3187
3188 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3189 (unsigned long) &rfc);
3190 }
3191
3192 if (result == L2CAP_CONF_SUCCESS) {
3193 /* Configure output options and let the other side know
3194 * which ones we don't like. */
3195
3196 if (mtu < L2CAP_DEFAULT_MIN_MTU)
3197 result = L2CAP_CONF_UNACCEPT;
3198 else {
3199 chan->omtu = mtu;
3200 set_bit(CONF_MTU_DONE, &chan->conf_state);
3201 }
3202 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
3203
3204 if (remote_efs) {
3205 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3206 efs.stype != L2CAP_SERV_NOTRAFIC &&
3207 efs.stype != chan->local_stype) {
3208
3209 result = L2CAP_CONF_UNACCEPT;
3210
3211 if (chan->num_conf_req >= 1)
3212 return -ECONNREFUSED;
3213
3214 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3215 sizeof(efs),
3216 (unsigned long) &efs);
3217 } else {
3218 /* Send PENDING Conf Rsp */
3219 result = L2CAP_CONF_PENDING;
3220 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3221 }
3222 }
3223
3224 switch (rfc.mode) {
3225 case L2CAP_MODE_BASIC:
3226 chan->fcs = L2CAP_FCS_NONE;
3227 set_bit(CONF_MODE_DONE, &chan->conf_state);
3228 break;
3229
3230 case L2CAP_MODE_ERTM:
3231 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3232 chan->remote_tx_win = rfc.txwin_size;
3233 else
3234 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3235
3236 chan->remote_max_tx = rfc.max_transmit;
3237
3238 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3239 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3240 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3241 rfc.max_pdu_size = cpu_to_le16(size);
3242 chan->remote_mps = size;
3243
3244 rfc.retrans_timeout =
3245 __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3246 rfc.monitor_timeout =
3247 __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3248
3249 set_bit(CONF_MODE_DONE, &chan->conf_state);
3250
3251 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3252 sizeof(rfc), (unsigned long) &rfc);
3253
3254 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3255 chan->remote_id = efs.id;
3256 chan->remote_stype = efs.stype;
3257 chan->remote_msdu = le16_to_cpu(efs.msdu);
3258 chan->remote_flush_to =
3259 le32_to_cpu(efs.flush_to);
3260 chan->remote_acc_lat =
3261 le32_to_cpu(efs.acc_lat);
3262 chan->remote_sdu_itime =
3263 le32_to_cpu(efs.sdu_itime);
3264 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3265 sizeof(efs),
3266 (unsigned long) &efs);
3267 }
3268 break;
3269
3270 case L2CAP_MODE_STREAMING:
3271 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3272 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3273 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3274 rfc.max_pdu_size = cpu_to_le16(size);
3275 chan->remote_mps = size;
3276
3277 set_bit(CONF_MODE_DONE, &chan->conf_state);
3278
3279 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3280 (unsigned long) &rfc);
3281
3282 break;
3283
3284 default:
3285 result = L2CAP_CONF_UNACCEPT;
3286
3287 memset(&rfc, 0, sizeof(rfc));
3288 rfc.mode = chan->mode;
3289 }
3290
3291 if (result == L2CAP_CONF_SUCCESS)
3292 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3293 }
3294 rsp->scid = cpu_to_le16(chan->dcid);
3295 rsp->result = cpu_to_le16(result);
3296 rsp->flags = __constant_cpu_to_le16(0);
3297
3298 return ptr - data;
3299}
3300
3301static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
3302 void *data, u16 *result)
3303{
3304 struct l2cap_conf_req *req = data;
3305 void *ptr = req->data;
3306 int type, olen;
3307 unsigned long val;
3308 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3309 struct l2cap_conf_efs efs;
3310
3311 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3312
3313 while (len >= L2CAP_CONF_OPT_SIZE) {
3314 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3315
3316 switch (type) {
3317 case L2CAP_CONF_MTU:
3318 if (val < L2CAP_DEFAULT_MIN_MTU) {
3319 *result = L2CAP_CONF_UNACCEPT;
3320 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3321 } else
3322 chan->imtu = val;
3323 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3324 break;
3325
3326 case L2CAP_CONF_FLUSH_TO:
3327 chan->flush_to = val;
3328 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
3329 2, chan->flush_to);
3330 break;
3331
3332 case L2CAP_CONF_RFC:
3333 if (olen == sizeof(rfc))
3334 memcpy(&rfc, (void *)val, olen);
3335
3336 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3337 rfc.mode != chan->mode)
3338 return -ECONNREFUSED;
3339
3340 chan->fcs = 0;
3341
3342 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3343 sizeof(rfc), (unsigned long) &rfc);
3344 break;
3345
3346 case L2CAP_CONF_EWS:
3347 chan->ack_win = min_t(u16, val, chan->ack_win);
3348 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3349 chan->tx_win);
3350 break;
3351
3352 case L2CAP_CONF_EFS:
3353 if (olen == sizeof(efs))
3354 memcpy(&efs, (void *)val, olen);
3355
3356 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3357 efs.stype != L2CAP_SERV_NOTRAFIC &&
3358 efs.stype != chan->local_stype)
3359 return -ECONNREFUSED;
3360
3361 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
3362 (unsigned long) &efs);
3363 break;
3364 }
3365 }
3366
3367 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3368 return -ECONNREFUSED;
3369
3370 chan->mode = rfc.mode;
3371
3372 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3373 switch (rfc.mode) {
3374 case L2CAP_MODE_ERTM:
3375 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3376 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3377 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3378 if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3379 chan->ack_win = min_t(u16, chan->ack_win,
3380 rfc.txwin_size);
3381
3382 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3383 chan->local_msdu = le16_to_cpu(efs.msdu);
3384 chan->local_sdu_itime =
3385 le32_to_cpu(efs.sdu_itime);
3386 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3387 chan->local_flush_to =
3388 le32_to_cpu(efs.flush_to);
3389 }
3390 break;
3391
3392 case L2CAP_MODE_STREAMING:
3393 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3394 }
3395 }
3396
3397 req->dcid = cpu_to_le16(chan->dcid);
3398 req->flags = __constant_cpu_to_le16(0);
3399
3400 return ptr - data;
3401}
3402
3403static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data,
3404 u16 result, u16 flags)
3405{
3406 struct l2cap_conf_rsp *rsp = data;
3407 void *ptr = rsp->data;
3408
3409 BT_DBG("chan %p", chan);
3410
3411 rsp->scid = cpu_to_le16(chan->dcid);
3412 rsp->result = cpu_to_le16(result);
3413 rsp->flags = cpu_to_le16(flags);
3414
3415 return ptr - data;
3416}
3417
3418void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
3419{
3420 struct l2cap_conn_rsp rsp;
3421 struct l2cap_conn *conn = chan->conn;
3422 u8 buf[128];
3423
3424 rsp.scid = cpu_to_le16(chan->dcid);
3425 rsp.dcid = cpu_to_le16(chan->scid);
3426 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
3427 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
3428 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
3429
3430 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3431 return;
3432
3433 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3434 l2cap_build_conf_req(chan, buf), buf);
3435 chan->num_conf_req++;
3436}
3437
3438static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
3439{
3440 int type, olen;
3441 unsigned long val;
3442 /* Use sane default values in case a misbehaving remote device
3443 * did not send an RFC or extended window size option.
3444 */
3445 u16 txwin_ext = chan->ack_win;
3446 struct l2cap_conf_rfc rfc = {
3447 .mode = chan->mode,
3448 .retrans_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
3449 .monitor_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
3450 .max_pdu_size = cpu_to_le16(chan->imtu),
3451 .txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
3452 };
3453
3454 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
3455
3456 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
3457 return;
3458
3459 while (len >= L2CAP_CONF_OPT_SIZE) {
3460 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3461
3462 switch (type) {
3463 case L2CAP_CONF_RFC:
3464 if (olen == sizeof(rfc))
3465 memcpy(&rfc, (void *)val, olen);
3466 break;
3467 case L2CAP_CONF_EWS:
3468 txwin_ext = val;
3469 break;
3470 }
3471 }
3472
3473 switch (rfc.mode) {
3474 case L2CAP_MODE_ERTM:
3475 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3476 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3477 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3478 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3479 chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
3480 else
3481 chan->ack_win = min_t(u16, chan->ack_win,
3482 rfc.txwin_size);
3483 break;
3484 case L2CAP_MODE_STREAMING:
3485 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3486 }
3487}
3488
3489static inline int l2cap_command_rej(struct l2cap_conn *conn,
3490 struct l2cap_cmd_hdr *cmd, u8 *data)
3491{
3492 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
3493
3494 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
3495 return 0;
3496
3497 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
3498 cmd->ident == conn->info_ident) {
3499 cancel_delayed_work(&conn->info_timer);
3500
3501 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3502 conn->info_ident = 0;
3503
3504 l2cap_conn_start(conn);
3505 }
3506
3507 return 0;
3508}
3509
3510static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
3511 struct l2cap_cmd_hdr *cmd,
3512 u8 *data, u8 rsp_code, u8 amp_id)
3513{
3514 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
3515 struct l2cap_conn_rsp rsp;
3516 struct l2cap_chan *chan = NULL, *pchan;
3517 struct sock *parent, *sk = NULL;
3518 int result, status = L2CAP_CS_NO_INFO;
3519
3520 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
3521 __le16 psm = req->psm;
3522
3523 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
3524
3525 /* Check if we have socket listening on psm */
3526 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, conn->src, conn->dst);
3527 if (!pchan) {
3528 result = L2CAP_CR_BAD_PSM;
3529 goto sendresp;
3530 }
3531
3532 parent = pchan->sk;
3533
3534 mutex_lock(&conn->chan_lock);
3535 lock_sock(parent);
3536
3537 /* Check if the ACL is secure enough (if not SDP) */
3538 if (psm != __constant_cpu_to_le16(L2CAP_PSM_SDP) &&
3539 !hci_conn_check_link_mode(conn->hcon)) {
3540 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
3541 result = L2CAP_CR_SEC_BLOCK;
3542 goto response;
3543 }
3544
3545 result = L2CAP_CR_NO_MEM;
3546
3547 /* Check if we already have channel with that dcid */
3548 if (__l2cap_get_chan_by_dcid(conn, scid))
3549 goto response;
3550
3551 chan = pchan->ops->new_connection(pchan);
3552 if (!chan)
3553 goto response;
3554
3555 sk = chan->sk;
3556
3557 hci_conn_hold(conn->hcon);
3558
3559 bacpy(&bt_sk(sk)->src, conn->src);
3560 bacpy(&bt_sk(sk)->dst, conn->dst);
3561 chan->psm = psm;
3562 chan->dcid = scid;
3563 chan->local_amp_id = amp_id;
3564
3565 __l2cap_chan_add(conn, chan);
3566
3567 dcid = chan->scid;
3568
3569 __set_chan_timer(chan, sk->sk_sndtimeo);
3570
3571 chan->ident = cmd->ident;
3572
3573 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
3574 if (l2cap_chan_check_security(chan)) {
3575 if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) {
3576 __l2cap_state_change(chan, BT_CONNECT2);
3577 result = L2CAP_CR_PEND;
3578 status = L2CAP_CS_AUTHOR_PEND;
3579 chan->ops->defer(chan);
3580 } else {
3581 /* Force pending result for AMP controllers.
3582 * The connection will succeed after the
3583 * physical link is up.
3584 */
3585 if (amp_id) {
3586 __l2cap_state_change(chan, BT_CONNECT2);
3587 result = L2CAP_CR_PEND;
3588 } else {
3589 __l2cap_state_change(chan, BT_CONFIG);
3590 result = L2CAP_CR_SUCCESS;
3591 }
3592 status = L2CAP_CS_NO_INFO;
3593 }
3594 } else {
3595 __l2cap_state_change(chan, BT_CONNECT2);
3596 result = L2CAP_CR_PEND;
3597 status = L2CAP_CS_AUTHEN_PEND;
3598 }
3599 } else {
3600 __l2cap_state_change(chan, BT_CONNECT2);
3601 result = L2CAP_CR_PEND;
3602 status = L2CAP_CS_NO_INFO;
3603 }
3604
3605response:
3606 release_sock(parent);
3607 mutex_unlock(&conn->chan_lock);
3608
3609sendresp:
3610 rsp.scid = cpu_to_le16(scid);
3611 rsp.dcid = cpu_to_le16(dcid);
3612 rsp.result = cpu_to_le16(result);
3613 rsp.status = cpu_to_le16(status);
3614 l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
3615
3616 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
3617 struct l2cap_info_req info;
3618 info.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
3619
3620 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
3621 conn->info_ident = l2cap_get_ident(conn);
3622
3623 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
3624
3625 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
3626 sizeof(info), &info);
3627 }
3628
3629 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
3630 result == L2CAP_CR_SUCCESS) {
3631 u8 buf[128];
3632 set_bit(CONF_REQ_SENT, &chan->conf_state);
3633 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3634 l2cap_build_conf_req(chan, buf), buf);
3635 chan->num_conf_req++;
3636 }
3637
3638 return chan;
3639}
3640
3641static int l2cap_connect_req(struct l2cap_conn *conn,
3642 struct l2cap_cmd_hdr *cmd, u8 *data)
3643{
3644 l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);
3645 return 0;
3646}
3647
3648static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
3649 struct l2cap_cmd_hdr *cmd, u8 *data)
3650{
3651 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
3652 u16 scid, dcid, result, status;
3653 struct l2cap_chan *chan;
3654 u8 req[128];
3655 int err;
3656
3657 scid = __le16_to_cpu(rsp->scid);
3658 dcid = __le16_to_cpu(rsp->dcid);
3659 result = __le16_to_cpu(rsp->result);
3660 status = __le16_to_cpu(rsp->status);
3661
3662 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
3663 dcid, scid, result, status);
3664
3665 mutex_lock(&conn->chan_lock);
3666
3667 if (scid) {
3668 chan = __l2cap_get_chan_by_scid(conn, scid);
3669 if (!chan) {
3670 err = -EFAULT;
3671 goto unlock;
3672 }
3673 } else {
3674 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
3675 if (!chan) {
3676 err = -EFAULT;
3677 goto unlock;
3678 }
3679 }
3680
3681 err = 0;
3682
3683 l2cap_chan_lock(chan);
3684
3685 switch (result) {
3686 case L2CAP_CR_SUCCESS:
3687 l2cap_state_change(chan, BT_CONFIG);
3688 chan->ident = 0;
3689 chan->dcid = dcid;
3690 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
3691
3692 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3693 break;
3694
3695 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3696 l2cap_build_conf_req(chan, req), req);
3697 chan->num_conf_req++;
3698 break;
3699
3700 case L2CAP_CR_PEND:
3701 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
3702 break;
3703
3704 default:
3705 l2cap_chan_del(chan, ECONNREFUSED);
3706 break;
3707 }
3708
3709 l2cap_chan_unlock(chan);
3710
3711unlock:
3712 mutex_unlock(&conn->chan_lock);
3713
3714 return err;
3715}
3716
3717static inline void set_default_fcs(struct l2cap_chan *chan)
3718{
3719 /* FCS is enabled only in ERTM or streaming mode, if one or both
3720 * sides request it.
3721 */
3722 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
3723 chan->fcs = L2CAP_FCS_NONE;
3724 else if (!test_bit(CONF_NO_FCS_RECV, &chan->conf_state))
3725 chan->fcs = L2CAP_FCS_CRC16;
3726}
3727
3728static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data,
3729 u8 ident, u16 flags)
3730{
3731 struct l2cap_conn *conn = chan->conn;
3732
3733 BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident,
3734 flags);
3735
3736 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3737 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3738
3739 l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP,
3740 l2cap_build_conf_rsp(chan, data,
3741 L2CAP_CONF_SUCCESS, flags), data);
3742}
3743
3744static inline int l2cap_config_req(struct l2cap_conn *conn,
3745 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3746 u8 *data)
3747{
3748 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
3749 u16 dcid, flags;
3750 u8 rsp[64];
3751 struct l2cap_chan *chan;
3752 int len, err = 0;
3753
3754 dcid = __le16_to_cpu(req->dcid);
3755 flags = __le16_to_cpu(req->flags);
3756
3757 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
3758
3759 chan = l2cap_get_chan_by_scid(conn, dcid);
3760 if (!chan)
3761 return -ENOENT;
3762
3763 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
3764 struct l2cap_cmd_rej_cid rej;
3765
3766 rej.reason = __constant_cpu_to_le16(L2CAP_REJ_INVALID_CID);
3767 rej.scid = cpu_to_le16(chan->scid);
3768 rej.dcid = cpu_to_le16(chan->dcid);
3769
3770 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
3771 sizeof(rej), &rej);
3772 goto unlock;
3773 }
3774
3775 /* Reject if config buffer is too small. */
3776 len = cmd_len - sizeof(*req);
3777 if (len < 0 || chan->conf_len + len > sizeof(chan->conf_req)) {
3778 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3779 l2cap_build_conf_rsp(chan, rsp,
3780 L2CAP_CONF_REJECT, flags), rsp);
3781 goto unlock;
3782 }
3783
3784 /* Store config. */
3785 memcpy(chan->conf_req + chan->conf_len, req->data, len);
3786 chan->conf_len += len;
3787
3788 if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
3789 /* Incomplete config. Send empty response. */
3790 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3791 l2cap_build_conf_rsp(chan, rsp,
3792 L2CAP_CONF_SUCCESS, flags), rsp);
3793 goto unlock;
3794 }
3795
3796 /* Complete config. */
3797 len = l2cap_parse_conf_req(chan, rsp);
3798 if (len < 0) {
3799 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3800 goto unlock;
3801 }
3802
3803 chan->ident = cmd->ident;
3804 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
3805 chan->num_conf_rsp++;
3806
3807 /* Reset config buffer. */
3808 chan->conf_len = 0;
3809
3810 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
3811 goto unlock;
3812
3813 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
3814 set_default_fcs(chan);
3815
3816 if (chan->mode == L2CAP_MODE_ERTM ||
3817 chan->mode == L2CAP_MODE_STREAMING)
3818 err = l2cap_ertm_init(chan);
3819
3820 if (err < 0)
3821 l2cap_send_disconn_req(chan->conn, chan, -err);
3822 else
3823 l2cap_chan_ready(chan);
3824
3825 goto unlock;
3826 }
3827
3828 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
3829 u8 buf[64];
3830 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3831 l2cap_build_conf_req(chan, buf), buf);
3832 chan->num_conf_req++;
3833 }
3834
3835 /* Got Conf Rsp PENDING from remote side and asume we sent
3836 Conf Rsp PENDING in the code above */
3837 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
3838 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
3839
3840 /* check compatibility */
3841
3842 /* Send rsp for BR/EDR channel */
3843 if (!chan->ctrl_id)
3844 l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
3845 else
3846 chan->ident = cmd->ident;
3847 }
3848
3849unlock:
3850 l2cap_chan_unlock(chan);
3851 return err;
3852}
3853
3854static inline int l2cap_config_rsp(struct l2cap_conn *conn,
3855 struct l2cap_cmd_hdr *cmd, u8 *data)
3856{
3857 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
3858 u16 scid, flags, result;
3859 struct l2cap_chan *chan;
3860 int len = le16_to_cpu(cmd->len) - sizeof(*rsp);
3861 int err = 0;
3862
3863 scid = __le16_to_cpu(rsp->scid);
3864 flags = __le16_to_cpu(rsp->flags);
3865 result = __le16_to_cpu(rsp->result);
3866
3867 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
3868 result, len);
3869
3870 chan = l2cap_get_chan_by_scid(conn, scid);
3871 if (!chan)
3872 return 0;
3873
3874 switch (result) {
3875 case L2CAP_CONF_SUCCESS:
3876 l2cap_conf_rfc_get(chan, rsp->data, len);
3877 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
3878 break;
3879
3880 case L2CAP_CONF_PENDING:
3881 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
3882
3883 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
3884 char buf[64];
3885
3886 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
3887 buf, &result);
3888 if (len < 0) {
3889 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3890 goto done;
3891 }
3892
3893 /* check compatibility */
3894
3895 if (!chan->ctrl_id)
3896 l2cap_send_efs_conf_rsp(chan, buf, cmd->ident,
3897 0);
3898 else
3899 chan->ident = cmd->ident;
3900 }
3901 goto done;
3902
3903 case L2CAP_CONF_UNACCEPT:
3904 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
3905 char req[64];
3906
3907 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
3908 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3909 goto done;
3910 }
3911
3912 /* throw out any old stored conf requests */
3913 result = L2CAP_CONF_SUCCESS;
3914 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
3915 req, &result);
3916 if (len < 0) {
3917 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3918 goto done;
3919 }
3920
3921 l2cap_send_cmd(conn, l2cap_get_ident(conn),
3922 L2CAP_CONF_REQ, len, req);
3923 chan->num_conf_req++;
3924 if (result != L2CAP_CONF_SUCCESS)
3925 goto done;
3926 break;
3927 }
3928
3929 default:
3930 l2cap_chan_set_err(chan, ECONNRESET);
3931
3932 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
3933 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3934 goto done;
3935 }
3936
3937 if (flags & L2CAP_CONF_FLAG_CONTINUATION)
3938 goto done;
3939
3940 set_bit(CONF_INPUT_DONE, &chan->conf_state);
3941
3942 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
3943 set_default_fcs(chan);
3944
3945 if (chan->mode == L2CAP_MODE_ERTM ||
3946 chan->mode == L2CAP_MODE_STREAMING)
3947 err = l2cap_ertm_init(chan);
3948
3949 if (err < 0)
3950 l2cap_send_disconn_req(chan->conn, chan, -err);
3951 else
3952 l2cap_chan_ready(chan);
3953 }
3954
3955done:
3956 l2cap_chan_unlock(chan);
3957 return err;
3958}
3959
3960static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
3961 struct l2cap_cmd_hdr *cmd, u8 *data)
3962{
3963 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
3964 struct l2cap_disconn_rsp rsp;
3965 u16 dcid, scid;
3966 struct l2cap_chan *chan;
3967 struct sock *sk;
3968
3969 scid = __le16_to_cpu(req->scid);
3970 dcid = __le16_to_cpu(req->dcid);
3971
3972 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
3973
3974 mutex_lock(&conn->chan_lock);
3975
3976 chan = __l2cap_get_chan_by_scid(conn, dcid);
3977 if (!chan) {
3978 mutex_unlock(&conn->chan_lock);
3979 return 0;
3980 }
3981
3982 l2cap_chan_lock(chan);
3983
3984 sk = chan->sk;
3985
3986 rsp.dcid = cpu_to_le16(chan->scid);
3987 rsp.scid = cpu_to_le16(chan->dcid);
3988 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
3989
3990 lock_sock(sk);
3991 sk->sk_shutdown = SHUTDOWN_MASK;
3992 release_sock(sk);
3993
3994 l2cap_chan_hold(chan);
3995 l2cap_chan_del(chan, ECONNRESET);
3996
3997 l2cap_chan_unlock(chan);
3998
3999 chan->ops->close(chan);
4000 l2cap_chan_put(chan);
4001
4002 mutex_unlock(&conn->chan_lock);
4003
4004 return 0;
4005}
4006
4007static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
4008 struct l2cap_cmd_hdr *cmd, u8 *data)
4009{
4010 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
4011 u16 dcid, scid;
4012 struct l2cap_chan *chan;
4013
4014 scid = __le16_to_cpu(rsp->scid);
4015 dcid = __le16_to_cpu(rsp->dcid);
4016
4017 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
4018
4019 mutex_lock(&conn->chan_lock);
4020
4021 chan = __l2cap_get_chan_by_scid(conn, scid);
4022 if (!chan) {
4023 mutex_unlock(&conn->chan_lock);
4024 return 0;
4025 }
4026
4027 l2cap_chan_lock(chan);
4028
4029 l2cap_chan_hold(chan);
4030 l2cap_chan_del(chan, 0);
4031
4032 l2cap_chan_unlock(chan);
4033
4034 chan->ops->close(chan);
4035 l2cap_chan_put(chan);
4036
4037 mutex_unlock(&conn->chan_lock);
4038
4039 return 0;
4040}
4041
4042static inline int l2cap_information_req(struct l2cap_conn *conn,
4043 struct l2cap_cmd_hdr *cmd, u8 *data)
4044{
4045 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
4046 u16 type;
4047
4048 type = __le16_to_cpu(req->type);
4049
4050 BT_DBG("type 0x%4.4x", type);
4051
4052 if (type == L2CAP_IT_FEAT_MASK) {
4053 u8 buf[8];
4054 u32 feat_mask = l2cap_feat_mask;
4055 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4056 rsp->type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
4057 rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
4058 if (!disable_ertm)
4059 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
4060 | L2CAP_FEAT_FCS;
4061 if (enable_hs)
4062 feat_mask |= L2CAP_FEAT_EXT_FLOW
4063 | L2CAP_FEAT_EXT_WINDOW;
4064
4065 put_unaligned_le32(feat_mask, rsp->data);
4066 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4067 buf);
4068 } else if (type == L2CAP_IT_FIXED_CHAN) {
4069 u8 buf[12];
4070 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4071
4072 if (enable_hs)
4073 l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
4074 else
4075 l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
4076
4077 rsp->type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4078 rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
4079 memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
4080 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4081 buf);
4082 } else {
4083 struct l2cap_info_rsp rsp;
4084 rsp.type = cpu_to_le16(type);
4085 rsp.result = __constant_cpu_to_le16(L2CAP_IR_NOTSUPP);
4086 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp),
4087 &rsp);
4088 }
4089
4090 return 0;
4091}
4092
4093static inline int l2cap_information_rsp(struct l2cap_conn *conn,
4094 struct l2cap_cmd_hdr *cmd, u8 *data)
4095{
4096 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
4097 u16 type, result;
4098
4099 type = __le16_to_cpu(rsp->type);
4100 result = __le16_to_cpu(rsp->result);
4101
4102 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
4103
4104 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
4105 if (cmd->ident != conn->info_ident ||
4106 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
4107 return 0;
4108
4109 cancel_delayed_work(&conn->info_timer);
4110
4111 if (result != L2CAP_IR_SUCCESS) {
4112 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4113 conn->info_ident = 0;
4114
4115 l2cap_conn_start(conn);
4116
4117 return 0;
4118 }
4119
4120 switch (type) {
4121 case L2CAP_IT_FEAT_MASK:
4122 conn->feat_mask = get_unaligned_le32(rsp->data);
4123
4124 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
4125 struct l2cap_info_req req;
4126 req.type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4127
4128 conn->info_ident = l2cap_get_ident(conn);
4129
4130 l2cap_send_cmd(conn, conn->info_ident,
4131 L2CAP_INFO_REQ, sizeof(req), &req);
4132 } else {
4133 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4134 conn->info_ident = 0;
4135
4136 l2cap_conn_start(conn);
4137 }
4138 break;
4139
4140 case L2CAP_IT_FIXED_CHAN:
4141 conn->fixed_chan_mask = rsp->data[0];
4142 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4143 conn->info_ident = 0;
4144
4145 l2cap_conn_start(conn);
4146 break;
4147 }
4148
4149 return 0;
4150}
4151
4152static int l2cap_create_channel_req(struct l2cap_conn *conn,
4153 struct l2cap_cmd_hdr *cmd,
4154 u16 cmd_len, void *data)
4155{
4156 struct l2cap_create_chan_req *req = data;
4157 struct l2cap_chan *chan;
4158 u16 psm, scid;
4159
4160 if (cmd_len != sizeof(*req))
4161 return -EPROTO;
4162
4163 if (!enable_hs)
4164 return -EINVAL;
4165
4166 psm = le16_to_cpu(req->psm);
4167 scid = le16_to_cpu(req->scid);
4168
4169 BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id);
4170
4171 if (req->amp_id) {
4172 struct hci_dev *hdev;
4173
4174 /* Validate AMP controller id */
4175 hdev = hci_dev_get(req->amp_id);
4176 if (!hdev || hdev->dev_type != HCI_AMP ||
4177 !test_bit(HCI_UP, &hdev->flags)) {
4178 struct l2cap_create_chan_rsp rsp;
4179
4180 rsp.dcid = 0;
4181 rsp.scid = cpu_to_le16(scid);
4182 rsp.result = __constant_cpu_to_le16(L2CAP_CR_BAD_AMP);
4183 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
4184
4185 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
4186 sizeof(rsp), &rsp);
4187
4188 if (hdev)
4189 hci_dev_put(hdev);
4190
4191 return 0;
4192 }
4193
4194 hci_dev_put(hdev);
4195 }
4196
4197 chan = l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4198 req->amp_id);
4199
4200 return 0;
4201}
4202
4203static void l2cap_send_move_chan_req(struct l2cap_chan *chan, u8 dest_amp_id)
4204{
4205 struct l2cap_move_chan_req req;
4206 u8 ident;
4207
4208 BT_DBG("chan %p, dest_amp_id %d", chan, dest_amp_id);
4209
4210 ident = l2cap_get_ident(chan->conn);
4211 chan->ident = ident;
4212
4213 req.icid = cpu_to_le16(chan->scid);
4214 req.dest_amp_id = dest_amp_id;
4215
4216 l2cap_send_cmd(chan->conn, ident, L2CAP_MOVE_CHAN_REQ, sizeof(req),
4217 &req);
4218
4219 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4220}
4221
4222static void l2cap_send_move_chan_rsp(struct l2cap_chan *chan, u16 result)
4223{
4224 struct l2cap_move_chan_rsp rsp;
4225
4226 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4227
4228 rsp.icid = cpu_to_le16(chan->dcid);
4229 rsp.result = cpu_to_le16(result);
4230
4231 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_RSP,
4232 sizeof(rsp), &rsp);
4233}
4234
4235static void l2cap_send_move_chan_cfm(struct l2cap_chan *chan, u16 result)
4236{
4237 struct l2cap_move_chan_cfm cfm;
4238
4239 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4240
4241 chan->ident = l2cap_get_ident(chan->conn);
4242
4243 cfm.icid = cpu_to_le16(chan->scid);
4244 cfm.result = cpu_to_le16(result);
4245
4246 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_CFM,
4247 sizeof(cfm), &cfm);
4248
4249 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4250}
4251
4252static void l2cap_send_move_chan_cfm_icid(struct l2cap_conn *conn, u16 icid)
4253{
4254 struct l2cap_move_chan_cfm cfm;
4255
4256 BT_DBG("conn %p, icid 0x%4.4x", conn, icid);
4257
4258 cfm.icid = cpu_to_le16(icid);
4259 cfm.result = __constant_cpu_to_le16(L2CAP_MC_UNCONFIRMED);
4260
4261 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_MOVE_CHAN_CFM,
4262 sizeof(cfm), &cfm);
4263}
4264
4265static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
4266 u16 icid)
4267{
4268 struct l2cap_move_chan_cfm_rsp rsp;
4269
4270 BT_DBG("icid 0x%4.4x", icid);
4271
4272 rsp.icid = cpu_to_le16(icid);
4273 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4274}
4275
4276static void __release_logical_link(struct l2cap_chan *chan)
4277{
4278 chan->hs_hchan = NULL;
4279 chan->hs_hcon = NULL;
4280
4281 /* Placeholder - release the logical link */
4282}
4283
4284static void l2cap_logical_fail(struct l2cap_chan *chan)
4285{
4286 /* Logical link setup failed */
4287 if (chan->state != BT_CONNECTED) {
4288 /* Create channel failure, disconnect */
4289 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4290 return;
4291 }
4292
4293 switch (chan->move_role) {
4294 case L2CAP_MOVE_ROLE_RESPONDER:
4295 l2cap_move_done(chan);
4296 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_SUPP);
4297 break;
4298 case L2CAP_MOVE_ROLE_INITIATOR:
4299 if (chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_COMP ||
4300 chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_CFM) {
4301 /* Remote has only sent pending or
4302 * success responses, clean up
4303 */
4304 l2cap_move_done(chan);
4305 }
4306
4307 /* Other amp move states imply that the move
4308 * has already aborted
4309 */
4310 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4311 break;
4312 }
4313}
4314
4315static void l2cap_logical_finish_create(struct l2cap_chan *chan,
4316 struct hci_chan *hchan)
4317{
4318 struct l2cap_conf_rsp rsp;
4319 u8 code;
4320
4321 chan->hs_hcon = hchan->conn;
4322 chan->hs_hcon->l2cap_data = chan->conn;
4323
4324 code = l2cap_build_conf_rsp(chan, &rsp,
4325 L2CAP_CONF_SUCCESS, 0);
4326 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CONF_RSP, code,
4327 &rsp);
4328 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
4329
4330 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4331 int err = 0;
4332
4333 set_default_fcs(chan);
4334
4335 err = l2cap_ertm_init(chan);
4336 if (err < 0)
4337 l2cap_send_disconn_req(chan->conn, chan, -err);
4338 else
4339 l2cap_chan_ready(chan);
4340 }
4341}
4342
4343static void l2cap_logical_finish_move(struct l2cap_chan *chan,
4344 struct hci_chan *hchan)
4345{
4346 chan->hs_hcon = hchan->conn;
4347 chan->hs_hcon->l2cap_data = chan->conn;
4348
4349 BT_DBG("move_state %d", chan->move_state);
4350
4351 switch (chan->move_state) {
4352 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
4353 /* Move confirm will be sent after a success
4354 * response is received
4355 */
4356 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4357 break;
4358 case L2CAP_MOVE_WAIT_LOGICAL_CFM:
4359 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4360 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4361 } else if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
4362 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
4363 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4364 } else if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4365 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4366 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4367 }
4368 break;
4369 default:
4370 /* Move was not in expected state, free the channel */
4371 __release_logical_link(chan);
4372
4373 chan->move_state = L2CAP_MOVE_STABLE;
4374 }
4375}
4376
4377/* Call with chan locked */
4378static void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan,
4379 u8 status)
4380{
4381 BT_DBG("chan %p, hchan %p, status %d", chan, hchan, status);
4382
4383 if (status) {
4384 l2cap_logical_fail(chan);
4385 __release_logical_link(chan);
4386 return;
4387 }
4388
4389 if (chan->state != BT_CONNECTED) {
4390 /* Ignore logical link if channel is on BR/EDR */
4391 if (chan->local_amp_id)
4392 l2cap_logical_finish_create(chan, hchan);
4393 } else {
4394 l2cap_logical_finish_move(chan, hchan);
4395 }
4396}
4397
4398static void l2cap_do_create(struct l2cap_chan *chan, int result,
4399 u8 local_amp_id, u8 remote_amp_id)
4400{
4401 if (!test_bit(CONF_CONNECT_PEND, &chan->conf_state)) {
4402 struct l2cap_conn_rsp rsp;
4403 char buf[128];
4404 rsp.scid = cpu_to_le16(chan->dcid);
4405 rsp.dcid = cpu_to_le16(chan->scid);
4406
4407 /* Incoming channel on AMP */
4408 if (result == L2CAP_CR_SUCCESS) {
4409 /* Send successful response */
4410 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
4411 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4412 } else {
4413 /* Send negative response */
4414 rsp.result = cpu_to_le16(L2CAP_CR_NO_MEM);
4415 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4416 }
4417
4418 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_RSP,
4419 sizeof(rsp), &rsp);
4420
4421 if (result == L2CAP_CR_SUCCESS) {
4422 __l2cap_state_change(chan, BT_CONFIG);
4423 set_bit(CONF_REQ_SENT, &chan->conf_state);
4424 l2cap_send_cmd(chan->conn, l2cap_get_ident(chan->conn),
4425 L2CAP_CONF_REQ,
4426 l2cap_build_conf_req(chan, buf), buf);
4427 chan->num_conf_req++;
4428 }
4429 } else {
4430 /* Outgoing channel on AMP */
4431 if (result == L2CAP_CR_SUCCESS) {
4432 chan->local_amp_id = local_amp_id;
4433 l2cap_send_create_chan_req(chan, remote_amp_id);
4434 } else {
4435 /* Revert to BR/EDR connect */
4436 l2cap_send_conn_req(chan);
4437 }
4438 }
4439}
4440
4441static void l2cap_do_move_initiate(struct l2cap_chan *chan, u8 local_amp_id,
4442 u8 remote_amp_id)
4443{
4444 l2cap_move_setup(chan);
4445 chan->move_id = local_amp_id;
4446 chan->move_state = L2CAP_MOVE_WAIT_RSP;
4447
4448 l2cap_send_move_chan_req(chan, remote_amp_id);
4449}
4450
4451static void l2cap_do_move_respond(struct l2cap_chan *chan, int result)
4452{
4453 struct hci_chan *hchan = NULL;
4454
4455 /* Placeholder - get hci_chan for logical link */
4456
4457 if (hchan) {
4458 if (hchan->state == BT_CONNECTED) {
4459 /* Logical link is ready to go */
4460 chan->hs_hcon = hchan->conn;
4461 chan->hs_hcon->l2cap_data = chan->conn;
4462 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4463 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4464
4465 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
4466 } else {
4467 /* Wait for logical link to be ready */
4468 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4469 }
4470 } else {
4471 /* Logical link not available */
4472 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_ALLOWED);
4473 }
4474}
4475
4476static void l2cap_do_move_cancel(struct l2cap_chan *chan, int result)
4477{
4478 if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4479 u8 rsp_result;
4480 if (result == -EINVAL)
4481 rsp_result = L2CAP_MR_BAD_ID;
4482 else
4483 rsp_result = L2CAP_MR_NOT_ALLOWED;
4484
4485 l2cap_send_move_chan_rsp(chan, rsp_result);
4486 }
4487
4488 chan->move_role = L2CAP_MOVE_ROLE_NONE;
4489 chan->move_state = L2CAP_MOVE_STABLE;
4490
4491 /* Restart data transmission */
4492 l2cap_ertm_send(chan);
4493}
4494
4495void l2cap_physical_cfm(struct l2cap_chan *chan, int result, u8 local_amp_id,
4496 u8 remote_amp_id)
4497{
4498 BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d",
4499 chan, result, local_amp_id, remote_amp_id);
4500
4501 l2cap_chan_lock(chan);
4502
4503 if (chan->state == BT_DISCONN || chan->state == BT_CLOSED) {
4504 l2cap_chan_unlock(chan);
4505 return;
4506 }
4507
4508 if (chan->state != BT_CONNECTED) {
4509 l2cap_do_create(chan, result, local_amp_id, remote_amp_id);
4510 } else if (result != L2CAP_MR_SUCCESS) {
4511 l2cap_do_move_cancel(chan, result);
4512 } else {
4513 switch (chan->move_role) {
4514 case L2CAP_MOVE_ROLE_INITIATOR:
4515 l2cap_do_move_initiate(chan, local_amp_id,
4516 remote_amp_id);
4517 break;
4518 case L2CAP_MOVE_ROLE_RESPONDER:
4519 l2cap_do_move_respond(chan, result);
4520 break;
4521 default:
4522 l2cap_do_move_cancel(chan, result);
4523 break;
4524 }
4525 }
4526
4527 l2cap_chan_unlock(chan);
4528}
4529
4530static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
4531 struct l2cap_cmd_hdr *cmd,
4532 u16 cmd_len, void *data)
4533{
4534 struct l2cap_move_chan_req *req = data;
4535 struct l2cap_move_chan_rsp rsp;
4536 struct l2cap_chan *chan;
4537 u16 icid = 0;
4538 u16 result = L2CAP_MR_NOT_ALLOWED;
4539
4540 if (cmd_len != sizeof(*req))
4541 return -EPROTO;
4542
4543 icid = le16_to_cpu(req->icid);
4544
4545 BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id);
4546
4547 if (!enable_hs)
4548 return -EINVAL;
4549
4550 chan = l2cap_get_chan_by_dcid(conn, icid);
4551 if (!chan) {
4552 rsp.icid = cpu_to_le16(icid);
4553 rsp.result = __constant_cpu_to_le16(L2CAP_MR_NOT_ALLOWED);
4554 l2cap_send_cmd(conn, cmd->ident, L2CAP_MOVE_CHAN_RSP,
4555 sizeof(rsp), &rsp);
4556 return 0;
4557 }
4558
4559 chan->ident = cmd->ident;
4560
4561 if (chan->scid < L2CAP_CID_DYN_START ||
4562 chan->chan_policy == BT_CHANNEL_POLICY_BREDR_ONLY ||
4563 (chan->mode != L2CAP_MODE_ERTM &&
4564 chan->mode != L2CAP_MODE_STREAMING)) {
4565 result = L2CAP_MR_NOT_ALLOWED;
4566 goto send_move_response;
4567 }
4568
4569 if (chan->local_amp_id == req->dest_amp_id) {
4570 result = L2CAP_MR_SAME_ID;
4571 goto send_move_response;
4572 }
4573
4574 if (req->dest_amp_id) {
4575 struct hci_dev *hdev;
4576 hdev = hci_dev_get(req->dest_amp_id);
4577 if (!hdev || hdev->dev_type != HCI_AMP ||
4578 !test_bit(HCI_UP, &hdev->flags)) {
4579 if (hdev)
4580 hci_dev_put(hdev);
4581
4582 result = L2CAP_MR_BAD_ID;
4583 goto send_move_response;
4584 }
4585 hci_dev_put(hdev);
4586 }
4587
4588 /* Detect a move collision. Only send a collision response
4589 * if this side has "lost", otherwise proceed with the move.
4590 * The winner has the larger bd_addr.
4591 */
4592 if ((__chan_is_moving(chan) ||
4593 chan->move_role != L2CAP_MOVE_ROLE_NONE) &&
4594 bacmp(conn->src, conn->dst) > 0) {
4595 result = L2CAP_MR_COLLISION;
4596 goto send_move_response;
4597 }
4598
4599 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
4600 l2cap_move_setup(chan);
4601 chan->move_id = req->dest_amp_id;
4602 icid = chan->dcid;
4603
4604 if (!req->dest_amp_id) {
4605 /* Moving to BR/EDR */
4606 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4607 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4608 result = L2CAP_MR_PEND;
4609 } else {
4610 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4611 result = L2CAP_MR_SUCCESS;
4612 }
4613 } else {
4614 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4615 /* Placeholder - uncomment when amp functions are available */
4616 /*amp_accept_physical(chan, req->dest_amp_id);*/
4617 result = L2CAP_MR_PEND;
4618 }
4619
4620send_move_response:
4621 l2cap_send_move_chan_rsp(chan, result);
4622
4623 l2cap_chan_unlock(chan);
4624
4625 return 0;
4626}
4627
4628static void l2cap_move_continue(struct l2cap_conn *conn, u16 icid, u16 result)
4629{
4630 struct l2cap_chan *chan;
4631 struct hci_chan *hchan = NULL;
4632
4633 chan = l2cap_get_chan_by_scid(conn, icid);
4634 if (!chan) {
4635 l2cap_send_move_chan_cfm_icid(conn, icid);
4636 return;
4637 }
4638
4639 __clear_chan_timer(chan);
4640 if (result == L2CAP_MR_PEND)
4641 __set_chan_timer(chan, L2CAP_MOVE_ERTX_TIMEOUT);
4642
4643 switch (chan->move_state) {
4644 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
4645 /* Move confirm will be sent when logical link
4646 * is complete.
4647 */
4648 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4649 break;
4650 case L2CAP_MOVE_WAIT_RSP_SUCCESS:
4651 if (result == L2CAP_MR_PEND) {
4652 break;
4653 } else if (test_bit(CONN_LOCAL_BUSY,
4654 &chan->conn_state)) {
4655 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4656 } else {
4657 /* Logical link is up or moving to BR/EDR,
4658 * proceed with move
4659 */
4660 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
4661 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4662 }
4663 break;
4664 case L2CAP_MOVE_WAIT_RSP:
4665 /* Moving to AMP */
4666 if (result == L2CAP_MR_SUCCESS) {
4667 /* Remote is ready, send confirm immediately
4668 * after logical link is ready
4669 */
4670 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4671 } else {
4672 /* Both logical link and move success
4673 * are required to confirm
4674 */
4675 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_COMP;
4676 }
4677
4678 /* Placeholder - get hci_chan for logical link */
4679 if (!hchan) {
4680 /* Logical link not available */
4681 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4682 break;
4683 }
4684
4685 /* If the logical link is not yet connected, do not
4686 * send confirmation.
4687 */
4688 if (hchan->state != BT_CONNECTED)
4689 break;
4690
4691 /* Logical link is already ready to go */
4692
4693 chan->hs_hcon = hchan->conn;
4694 chan->hs_hcon->l2cap_data = chan->conn;
4695
4696 if (result == L2CAP_MR_SUCCESS) {
4697 /* Can confirm now */
4698 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4699 } else {
4700 /* Now only need move success
4701 * to confirm
4702 */
4703 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4704 }
4705
4706 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
4707 break;
4708 default:
4709 /* Any other amp move state means the move failed. */
4710 chan->move_id = chan->local_amp_id;
4711 l2cap_move_done(chan);
4712 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4713 }
4714
4715 l2cap_chan_unlock(chan);
4716}
4717
4718static void l2cap_move_fail(struct l2cap_conn *conn, u8 ident, u16 icid,
4719 u16 result)
4720{
4721 struct l2cap_chan *chan;
4722
4723 chan = l2cap_get_chan_by_ident(conn, ident);
4724 if (!chan) {
4725 /* Could not locate channel, icid is best guess */
4726 l2cap_send_move_chan_cfm_icid(conn, icid);
4727 return;
4728 }
4729
4730 __clear_chan_timer(chan);
4731
4732 if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
4733 if (result == L2CAP_MR_COLLISION) {
4734 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
4735 } else {
4736 /* Cleanup - cancel move */
4737 chan->move_id = chan->local_amp_id;
4738 l2cap_move_done(chan);
4739 }
4740 }
4741
4742 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4743
4744 l2cap_chan_unlock(chan);
4745}
4746
4747static int l2cap_move_channel_rsp(struct l2cap_conn *conn,
4748 struct l2cap_cmd_hdr *cmd,
4749 u16 cmd_len, void *data)
4750{
4751 struct l2cap_move_chan_rsp *rsp = data;
4752 u16 icid, result;
4753
4754 if (cmd_len != sizeof(*rsp))
4755 return -EPROTO;
4756
4757 icid = le16_to_cpu(rsp->icid);
4758 result = le16_to_cpu(rsp->result);
4759
4760 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
4761
4762 if (result == L2CAP_MR_SUCCESS || result == L2CAP_MR_PEND)
4763 l2cap_move_continue(conn, icid, result);
4764 else
4765 l2cap_move_fail(conn, cmd->ident, icid, result);
4766
4767 return 0;
4768}
4769
4770static int l2cap_move_channel_confirm(struct l2cap_conn *conn,
4771 struct l2cap_cmd_hdr *cmd,
4772 u16 cmd_len, void *data)
4773{
4774 struct l2cap_move_chan_cfm *cfm = data;
4775 struct l2cap_chan *chan;
4776 u16 icid, result;
4777
4778 if (cmd_len != sizeof(*cfm))
4779 return -EPROTO;
4780
4781 icid = le16_to_cpu(cfm->icid);
4782 result = le16_to_cpu(cfm->result);
4783
4784 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
4785
4786 chan = l2cap_get_chan_by_dcid(conn, icid);
4787 if (!chan) {
4788 /* Spec requires a response even if the icid was not found */
4789 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
4790 return 0;
4791 }
4792
4793 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM) {
4794 if (result == L2CAP_MC_CONFIRMED) {
4795 chan->local_amp_id = chan->move_id;
4796 if (!chan->local_amp_id)
4797 __release_logical_link(chan);
4798 } else {
4799 chan->move_id = chan->local_amp_id;
4800 }
4801
4802 l2cap_move_done(chan);
4803 }
4804
4805 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
4806
4807 l2cap_chan_unlock(chan);
4808
4809 return 0;
4810}
4811
4812static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
4813 struct l2cap_cmd_hdr *cmd,
4814 u16 cmd_len, void *data)
4815{
4816 struct l2cap_move_chan_cfm_rsp *rsp = data;
4817 struct l2cap_chan *chan;
4818 u16 icid;
4819
4820 if (cmd_len != sizeof(*rsp))
4821 return -EPROTO;
4822
4823 icid = le16_to_cpu(rsp->icid);
4824
4825 BT_DBG("icid 0x%4.4x", icid);
4826
4827 chan = l2cap_get_chan_by_scid(conn, icid);
4828 if (!chan)
4829 return 0;
4830
4831 __clear_chan_timer(chan);
4832
4833 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM_RSP) {
4834 chan->local_amp_id = chan->move_id;
4835
4836 if (!chan->local_amp_id && chan->hs_hchan)
4837 __release_logical_link(chan);
4838
4839 l2cap_move_done(chan);
4840 }
4841
4842 l2cap_chan_unlock(chan);
4843
4844 return 0;
4845}
4846
4847static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
4848 u16 to_multiplier)
4849{
4850 u16 max_latency;
4851
4852 if (min > max || min < 6 || max > 3200)
4853 return -EINVAL;
4854
4855 if (to_multiplier < 10 || to_multiplier > 3200)
4856 return -EINVAL;
4857
4858 if (max >= to_multiplier * 8)
4859 return -EINVAL;
4860
4861 max_latency = (to_multiplier * 8 / max) - 1;
4862 if (latency > 499 || latency > max_latency)
4863 return -EINVAL;
4864
4865 return 0;
4866}
4867
4868static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
4869 struct l2cap_cmd_hdr *cmd,
4870 u8 *data)
4871{
4872 struct hci_conn *hcon = conn->hcon;
4873 struct l2cap_conn_param_update_req *req;
4874 struct l2cap_conn_param_update_rsp rsp;
4875 u16 min, max, latency, to_multiplier, cmd_len;
4876 int err;
4877
4878 if (!(hcon->link_mode & HCI_LM_MASTER))
4879 return -EINVAL;
4880
4881 cmd_len = __le16_to_cpu(cmd->len);
4882 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
4883 return -EPROTO;
4884
4885 req = (struct l2cap_conn_param_update_req *) data;
4886 min = __le16_to_cpu(req->min);
4887 max = __le16_to_cpu(req->max);
4888 latency = __le16_to_cpu(req->latency);
4889 to_multiplier = __le16_to_cpu(req->to_multiplier);
4890
4891 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
4892 min, max, latency, to_multiplier);
4893
4894 memset(&rsp, 0, sizeof(rsp));
4895
4896 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
4897 if (err)
4898 rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
4899 else
4900 rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
4901
4902 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
4903 sizeof(rsp), &rsp);
4904
4905 if (!err)
4906 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
4907
4908 return 0;
4909}
4910
4911static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
4912 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4913 u8 *data)
4914{
4915 int err = 0;
4916
4917 switch (cmd->code) {
4918 case L2CAP_COMMAND_REJ:
4919 l2cap_command_rej(conn, cmd, data);
4920 break;
4921
4922 case L2CAP_CONN_REQ:
4923 err = l2cap_connect_req(conn, cmd, data);
4924 break;
4925
4926 case L2CAP_CONN_RSP:
4927 case L2CAP_CREATE_CHAN_RSP:
4928 err = l2cap_connect_create_rsp(conn, cmd, data);
4929 break;
4930
4931 case L2CAP_CONF_REQ:
4932 err = l2cap_config_req(conn, cmd, cmd_len, data);
4933 break;
4934
4935 case L2CAP_CONF_RSP:
4936 err = l2cap_config_rsp(conn, cmd, data);
4937 break;
4938
4939 case L2CAP_DISCONN_REQ:
4940 err = l2cap_disconnect_req(conn, cmd, data);
4941 break;
4942
4943 case L2CAP_DISCONN_RSP:
4944 err = l2cap_disconnect_rsp(conn, cmd, data);
4945 break;
4946
4947 case L2CAP_ECHO_REQ:
4948 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
4949 break;
4950
4951 case L2CAP_ECHO_RSP:
4952 break;
4953
4954 case L2CAP_INFO_REQ:
4955 err = l2cap_information_req(conn, cmd, data);
4956 break;
4957
4958 case L2CAP_INFO_RSP:
4959 err = l2cap_information_rsp(conn, cmd, data);
4960 break;
4961
4962 case L2CAP_CREATE_CHAN_REQ:
4963 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
4964 break;
4965
4966 case L2CAP_MOVE_CHAN_REQ:
4967 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
4968 break;
4969
4970 case L2CAP_MOVE_CHAN_RSP:
4971 err = l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
4972 break;
4973
4974 case L2CAP_MOVE_CHAN_CFM:
4975 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
4976 break;
4977
4978 case L2CAP_MOVE_CHAN_CFM_RSP:
4979 err = l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
4980 break;
4981
4982 default:
4983 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
4984 err = -EINVAL;
4985 break;
4986 }
4987
4988 return err;
4989}
4990
4991static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
4992 struct l2cap_cmd_hdr *cmd, u8 *data)
4993{
4994 switch (cmd->code) {
4995 case L2CAP_COMMAND_REJ:
4996 return 0;
4997
4998 case L2CAP_CONN_PARAM_UPDATE_REQ:
4999 return l2cap_conn_param_update_req(conn, cmd, data);
5000
5001 case L2CAP_CONN_PARAM_UPDATE_RSP:
5002 return 0;
5003
5004 default:
5005 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
5006 return -EINVAL;
5007 }
5008}
5009
5010static inline void l2cap_sig_channel(struct l2cap_conn *conn,
5011 struct sk_buff *skb)
5012{
5013 u8 *data = skb->data;
5014 int len = skb->len;
5015 struct l2cap_cmd_hdr cmd;
5016 int err;
5017
5018 l2cap_raw_recv(conn, skb);
5019
5020 while (len >= L2CAP_CMD_HDR_SIZE) {
5021 u16 cmd_len;
5022 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
5023 data += L2CAP_CMD_HDR_SIZE;
5024 len -= L2CAP_CMD_HDR_SIZE;
5025
5026 cmd_len = le16_to_cpu(cmd.len);
5027
5028 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len,
5029 cmd.ident);
5030
5031 if (cmd_len > len || !cmd.ident) {
5032 BT_DBG("corrupted command");
5033 break;
5034 }
5035
5036 if (conn->hcon->type == LE_LINK)
5037 err = l2cap_le_sig_cmd(conn, &cmd, data);
5038 else
5039 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
5040
5041 if (err) {
5042 struct l2cap_cmd_rej_unk rej;
5043
5044 BT_ERR("Wrong link type (%d)", err);
5045
5046 /* FIXME: Map err to a valid reason */
5047 rej.reason = __constant_cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5048 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ,
5049 sizeof(rej), &rej);
5050 }
5051
5052 data += cmd_len;
5053 len -= cmd_len;
5054 }
5055
5056 kfree_skb(skb);
5057}
5058
5059static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
5060{
5061 u16 our_fcs, rcv_fcs;
5062 int hdr_size;
5063
5064 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
5065 hdr_size = L2CAP_EXT_HDR_SIZE;
5066 else
5067 hdr_size = L2CAP_ENH_HDR_SIZE;
5068
5069 if (chan->fcs == L2CAP_FCS_CRC16) {
5070 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
5071 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
5072 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
5073
5074 if (our_fcs != rcv_fcs)
5075 return -EBADMSG;
5076 }
5077 return 0;
5078}
5079
5080static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
5081{
5082 struct l2cap_ctrl control;
5083
5084 BT_DBG("chan %p", chan);
5085
5086 memset(&control, 0, sizeof(control));
5087 control.sframe = 1;
5088 control.final = 1;
5089 control.reqseq = chan->buffer_seq;
5090 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5091
5092 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5093 control.super = L2CAP_SUPER_RNR;
5094 l2cap_send_sframe(chan, &control);
5095 }
5096
5097 if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
5098 chan->unacked_frames > 0)
5099 __set_retrans_timer(chan);
5100
5101 /* Send pending iframes */
5102 l2cap_ertm_send(chan);
5103
5104 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
5105 test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
5106 /* F-bit wasn't sent in an s-frame or i-frame yet, so
5107 * send it now.
5108 */
5109 control.super = L2CAP_SUPER_RR;
5110 l2cap_send_sframe(chan, &control);
5111 }
5112}
5113
5114static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag,
5115 struct sk_buff **last_frag)
5116{
5117 /* skb->len reflects data in skb as well as all fragments
5118 * skb->data_len reflects only data in fragments
5119 */
5120 if (!skb_has_frag_list(skb))
5121 skb_shinfo(skb)->frag_list = new_frag;
5122
5123 new_frag->next = NULL;
5124
5125 (*last_frag)->next = new_frag;
5126 *last_frag = new_frag;
5127
5128 skb->len += new_frag->len;
5129 skb->data_len += new_frag->len;
5130 skb->truesize += new_frag->truesize;
5131}
5132
5133static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
5134 struct l2cap_ctrl *control)
5135{
5136 int err = -EINVAL;
5137
5138 switch (control->sar) {
5139 case L2CAP_SAR_UNSEGMENTED:
5140 if (chan->sdu)
5141 break;
5142
5143 err = chan->ops->recv(chan, skb);
5144 break;
5145
5146 case L2CAP_SAR_START:
5147 if (chan->sdu)
5148 break;
5149
5150 chan->sdu_len = get_unaligned_le16(skb->data);
5151 skb_pull(skb, L2CAP_SDULEN_SIZE);
5152
5153 if (chan->sdu_len > chan->imtu) {
5154 err = -EMSGSIZE;
5155 break;
5156 }
5157
5158 if (skb->len >= chan->sdu_len)
5159 break;
5160
5161 chan->sdu = skb;
5162 chan->sdu_last_frag = skb;
5163
5164 skb = NULL;
5165 err = 0;
5166 break;
5167
5168 case L2CAP_SAR_CONTINUE:
5169 if (!chan->sdu)
5170 break;
5171
5172 append_skb_frag(chan->sdu, skb,
5173 &chan->sdu_last_frag);
5174 skb = NULL;
5175
5176 if (chan->sdu->len >= chan->sdu_len)
5177 break;
5178
5179 err = 0;
5180 break;
5181
5182 case L2CAP_SAR_END:
5183 if (!chan->sdu)
5184 break;
5185
5186 append_skb_frag(chan->sdu, skb,
5187 &chan->sdu_last_frag);
5188 skb = NULL;
5189
5190 if (chan->sdu->len != chan->sdu_len)
5191 break;
5192
5193 err = chan->ops->recv(chan, chan->sdu);
5194
5195 if (!err) {
5196 /* Reassembly complete */
5197 chan->sdu = NULL;
5198 chan->sdu_last_frag = NULL;
5199 chan->sdu_len = 0;
5200 }
5201 break;
5202 }
5203
5204 if (err) {
5205 kfree_skb(skb);
5206 kfree_skb(chan->sdu);
5207 chan->sdu = NULL;
5208 chan->sdu_last_frag = NULL;
5209 chan->sdu_len = 0;
5210 }
5211
5212 return err;
5213}
5214
5215static int l2cap_resegment(struct l2cap_chan *chan)
5216{
5217 /* Placeholder */
5218 return 0;
5219}
5220
5221void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
5222{
5223 u8 event;
5224
5225 if (chan->mode != L2CAP_MODE_ERTM)
5226 return;
5227
5228 event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
5229 l2cap_tx(chan, NULL, NULL, event);
5230}
5231
5232static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
5233{
5234 int err = 0;
5235 /* Pass sequential frames to l2cap_reassemble_sdu()
5236 * until a gap is encountered.
5237 */
5238
5239 BT_DBG("chan %p", chan);
5240
5241 while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5242 struct sk_buff *skb;
5243 BT_DBG("Searching for skb with txseq %d (queue len %d)",
5244 chan->buffer_seq, skb_queue_len(&chan->srej_q));
5245
5246 skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
5247
5248 if (!skb)
5249 break;
5250
5251 skb_unlink(skb, &chan->srej_q);
5252 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
5253 err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->control);
5254 if (err)
5255 break;
5256 }
5257
5258 if (skb_queue_empty(&chan->srej_q)) {
5259 chan->rx_state = L2CAP_RX_STATE_RECV;
5260 l2cap_send_ack(chan);
5261 }
5262
5263 return err;
5264}
5265
5266static void l2cap_handle_srej(struct l2cap_chan *chan,
5267 struct l2cap_ctrl *control)
5268{
5269 struct sk_buff *skb;
5270
5271 BT_DBG("chan %p, control %p", chan, control);
5272
5273 if (control->reqseq == chan->next_tx_seq) {
5274 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
5275 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
5276 return;
5277 }
5278
5279 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
5280
5281 if (skb == NULL) {
5282 BT_DBG("Seq %d not available for retransmission",
5283 control->reqseq);
5284 return;
5285 }
5286
5287 if (chan->max_tx != 0 && bt_cb(skb)->control.retries >= chan->max_tx) {
5288 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
5289 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
5290 return;
5291 }
5292
5293 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5294
5295 if (control->poll) {
5296 l2cap_pass_to_tx(chan, control);
5297
5298 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5299 l2cap_retransmit(chan, control);
5300 l2cap_ertm_send(chan);
5301
5302 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5303 set_bit(CONN_SREJ_ACT, &chan->conn_state);
5304 chan->srej_save_reqseq = control->reqseq;
5305 }
5306 } else {
5307 l2cap_pass_to_tx_fbit(chan, control);
5308
5309 if (control->final) {
5310 if (chan->srej_save_reqseq != control->reqseq ||
5311 !test_and_clear_bit(CONN_SREJ_ACT,
5312 &chan->conn_state))
5313 l2cap_retransmit(chan, control);
5314 } else {
5315 l2cap_retransmit(chan, control);
5316 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5317 set_bit(CONN_SREJ_ACT, &chan->conn_state);
5318 chan->srej_save_reqseq = control->reqseq;
5319 }
5320 }
5321 }
5322}
5323
5324static void l2cap_handle_rej(struct l2cap_chan *chan,
5325 struct l2cap_ctrl *control)
5326{
5327 struct sk_buff *skb;
5328
5329 BT_DBG("chan %p, control %p", chan, control);
5330
5331 if (control->reqseq == chan->next_tx_seq) {
5332 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
5333 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
5334 return;
5335 }
5336
5337 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
5338
5339 if (chan->max_tx && skb &&
5340 bt_cb(skb)->control.retries >= chan->max_tx) {
5341 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
5342 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
5343 return;
5344 }
5345
5346 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5347
5348 l2cap_pass_to_tx(chan, control);
5349
5350 if (control->final) {
5351 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
5352 l2cap_retransmit_all(chan, control);
5353 } else {
5354 l2cap_retransmit_all(chan, control);
5355 l2cap_ertm_send(chan);
5356 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
5357 set_bit(CONN_REJ_ACT, &chan->conn_state);
5358 }
5359}
5360
5361static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
5362{
5363 BT_DBG("chan %p, txseq %d", chan, txseq);
5364
5365 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
5366 chan->expected_tx_seq);
5367
5368 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
5369 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
5370 chan->tx_win) {
5371 /* See notes below regarding "double poll" and
5372 * invalid packets.
5373 */
5374 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
5375 BT_DBG("Invalid/Ignore - after SREJ");
5376 return L2CAP_TXSEQ_INVALID_IGNORE;
5377 } else {
5378 BT_DBG("Invalid - in window after SREJ sent");
5379 return L2CAP_TXSEQ_INVALID;
5380 }
5381 }
5382
5383 if (chan->srej_list.head == txseq) {
5384 BT_DBG("Expected SREJ");
5385 return L2CAP_TXSEQ_EXPECTED_SREJ;
5386 }
5387
5388 if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
5389 BT_DBG("Duplicate SREJ - txseq already stored");
5390 return L2CAP_TXSEQ_DUPLICATE_SREJ;
5391 }
5392
5393 if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
5394 BT_DBG("Unexpected SREJ - not requested");
5395 return L2CAP_TXSEQ_UNEXPECTED_SREJ;
5396 }
5397 }
5398
5399 if (chan->expected_tx_seq == txseq) {
5400 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
5401 chan->tx_win) {
5402 BT_DBG("Invalid - txseq outside tx window");
5403 return L2CAP_TXSEQ_INVALID;
5404 } else {
5405 BT_DBG("Expected");
5406 return L2CAP_TXSEQ_EXPECTED;
5407 }
5408 }
5409
5410 if (__seq_offset(chan, txseq, chan->last_acked_seq) <
5411 __seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) {
5412 BT_DBG("Duplicate - expected_tx_seq later than txseq");
5413 return L2CAP_TXSEQ_DUPLICATE;
5414 }
5415
5416 if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
5417 /* A source of invalid packets is a "double poll" condition,
5418 * where delays cause us to send multiple poll packets. If
5419 * the remote stack receives and processes both polls,
5420 * sequence numbers can wrap around in such a way that a
5421 * resent frame has a sequence number that looks like new data
5422 * with a sequence gap. This would trigger an erroneous SREJ
5423 * request.
5424 *
5425 * Fortunately, this is impossible with a tx window that's
5426 * less than half of the maximum sequence number, which allows
5427 * invalid frames to be safely ignored.
5428 *
5429 * With tx window sizes greater than half of the tx window
5430 * maximum, the frame is invalid and cannot be ignored. This
5431 * causes a disconnect.
5432 */
5433
5434 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
5435 BT_DBG("Invalid/Ignore - txseq outside tx window");
5436 return L2CAP_TXSEQ_INVALID_IGNORE;
5437 } else {
5438 BT_DBG("Invalid - txseq outside tx window");
5439 return L2CAP_TXSEQ_INVALID;
5440 }
5441 } else {
5442 BT_DBG("Unexpected - txseq indicates missing frames");
5443 return L2CAP_TXSEQ_UNEXPECTED;
5444 }
5445}
5446
5447static int l2cap_rx_state_recv(struct l2cap_chan *chan,
5448 struct l2cap_ctrl *control,
5449 struct sk_buff *skb, u8 event)
5450{
5451 int err = 0;
5452 bool skb_in_use = 0;
5453
5454 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
5455 event);
5456
5457 switch (event) {
5458 case L2CAP_EV_RECV_IFRAME:
5459 switch (l2cap_classify_txseq(chan, control->txseq)) {
5460 case L2CAP_TXSEQ_EXPECTED:
5461 l2cap_pass_to_tx(chan, control);
5462
5463 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5464 BT_DBG("Busy, discarding expected seq %d",
5465 control->txseq);
5466 break;
5467 }
5468
5469 chan->expected_tx_seq = __next_seq(chan,
5470 control->txseq);
5471
5472 chan->buffer_seq = chan->expected_tx_seq;
5473 skb_in_use = 1;
5474
5475 err = l2cap_reassemble_sdu(chan, skb, control);
5476 if (err)
5477 break;
5478
5479 if (control->final) {
5480 if (!test_and_clear_bit(CONN_REJ_ACT,
5481 &chan->conn_state)) {
5482 control->final = 0;
5483 l2cap_retransmit_all(chan, control);
5484 l2cap_ertm_send(chan);
5485 }
5486 }
5487
5488 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
5489 l2cap_send_ack(chan);
5490 break;
5491 case L2CAP_TXSEQ_UNEXPECTED:
5492 l2cap_pass_to_tx(chan, control);
5493
5494 /* Can't issue SREJ frames in the local busy state.
5495 * Drop this frame, it will be seen as missing
5496 * when local busy is exited.
5497 */
5498 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5499 BT_DBG("Busy, discarding unexpected seq %d",
5500 control->txseq);
5501 break;
5502 }
5503
5504 /* There was a gap in the sequence, so an SREJ
5505 * must be sent for each missing frame. The
5506 * current frame is stored for later use.
5507 */
5508 skb_queue_tail(&chan->srej_q, skb);
5509 skb_in_use = 1;
5510 BT_DBG("Queued %p (queue len %d)", skb,
5511 skb_queue_len(&chan->srej_q));
5512
5513 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
5514 l2cap_seq_list_clear(&chan->srej_list);
5515 l2cap_send_srej(chan, control->txseq);
5516
5517 chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
5518 break;
5519 case L2CAP_TXSEQ_DUPLICATE:
5520 l2cap_pass_to_tx(chan, control);
5521 break;
5522 case L2CAP_TXSEQ_INVALID_IGNORE:
5523 break;
5524 case L2CAP_TXSEQ_INVALID:
5525 default:
5526 l2cap_send_disconn_req(chan->conn, chan,
5527 ECONNRESET);
5528 break;
5529 }
5530 break;
5531 case L2CAP_EV_RECV_RR:
5532 l2cap_pass_to_tx(chan, control);
5533 if (control->final) {
5534 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5535
5536 if (!test_and_clear_bit(CONN_REJ_ACT,
5537 &chan->conn_state)) {
5538 control->final = 0;
5539 l2cap_retransmit_all(chan, control);
5540 }
5541
5542 l2cap_ertm_send(chan);
5543 } else if (control->poll) {
5544 l2cap_send_i_or_rr_or_rnr(chan);
5545 } else {
5546 if (test_and_clear_bit(CONN_REMOTE_BUSY,
5547 &chan->conn_state) &&
5548 chan->unacked_frames)
5549 __set_retrans_timer(chan);
5550
5551 l2cap_ertm_send(chan);
5552 }
5553 break;
5554 case L2CAP_EV_RECV_RNR:
5555 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5556 l2cap_pass_to_tx(chan, control);
5557 if (control && control->poll) {
5558 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5559 l2cap_send_rr_or_rnr(chan, 0);
5560 }
5561 __clear_retrans_timer(chan);
5562 l2cap_seq_list_clear(&chan->retrans_list);
5563 break;
5564 case L2CAP_EV_RECV_REJ:
5565 l2cap_handle_rej(chan, control);
5566 break;
5567 case L2CAP_EV_RECV_SREJ:
5568 l2cap_handle_srej(chan, control);
5569 break;
5570 default:
5571 break;
5572 }
5573
5574 if (skb && !skb_in_use) {
5575 BT_DBG("Freeing %p", skb);
5576 kfree_skb(skb);
5577 }
5578
5579 return err;
5580}
5581
5582static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
5583 struct l2cap_ctrl *control,
5584 struct sk_buff *skb, u8 event)
5585{
5586 int err = 0;
5587 u16 txseq = control->txseq;
5588 bool skb_in_use = 0;
5589
5590 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
5591 event);
5592
5593 switch (event) {
5594 case L2CAP_EV_RECV_IFRAME:
5595 switch (l2cap_classify_txseq(chan, txseq)) {
5596 case L2CAP_TXSEQ_EXPECTED:
5597 /* Keep frame for reassembly later */
5598 l2cap_pass_to_tx(chan, control);
5599 skb_queue_tail(&chan->srej_q, skb);
5600 skb_in_use = 1;
5601 BT_DBG("Queued %p (queue len %d)", skb,
5602 skb_queue_len(&chan->srej_q));
5603
5604 chan->expected_tx_seq = __next_seq(chan, txseq);
5605 break;
5606 case L2CAP_TXSEQ_EXPECTED_SREJ:
5607 l2cap_seq_list_pop(&chan->srej_list);
5608
5609 l2cap_pass_to_tx(chan, control);
5610 skb_queue_tail(&chan->srej_q, skb);
5611 skb_in_use = 1;
5612 BT_DBG("Queued %p (queue len %d)", skb,
5613 skb_queue_len(&chan->srej_q));
5614
5615 err = l2cap_rx_queued_iframes(chan);
5616 if (err)
5617 break;
5618
5619 break;
5620 case L2CAP_TXSEQ_UNEXPECTED:
5621 /* Got a frame that can't be reassembled yet.
5622 * Save it for later, and send SREJs to cover
5623 * the missing frames.
5624 */
5625 skb_queue_tail(&chan->srej_q, skb);
5626 skb_in_use = 1;
5627 BT_DBG("Queued %p (queue len %d)", skb,
5628 skb_queue_len(&chan->srej_q));
5629
5630 l2cap_pass_to_tx(chan, control);
5631 l2cap_send_srej(chan, control->txseq);
5632 break;
5633 case L2CAP_TXSEQ_UNEXPECTED_SREJ:
5634 /* This frame was requested with an SREJ, but
5635 * some expected retransmitted frames are
5636 * missing. Request retransmission of missing
5637 * SREJ'd frames.
5638 */
5639 skb_queue_tail(&chan->srej_q, skb);
5640 skb_in_use = 1;
5641 BT_DBG("Queued %p (queue len %d)", skb,
5642 skb_queue_len(&chan->srej_q));
5643
5644 l2cap_pass_to_tx(chan, control);
5645 l2cap_send_srej_list(chan, control->txseq);
5646 break;
5647 case L2CAP_TXSEQ_DUPLICATE_SREJ:
5648 /* We've already queued this frame. Drop this copy. */
5649 l2cap_pass_to_tx(chan, control);
5650 break;
5651 case L2CAP_TXSEQ_DUPLICATE:
5652 /* Expecting a later sequence number, so this frame
5653 * was already received. Ignore it completely.
5654 */
5655 break;
5656 case L2CAP_TXSEQ_INVALID_IGNORE:
5657 break;
5658 case L2CAP_TXSEQ_INVALID:
5659 default:
5660 l2cap_send_disconn_req(chan->conn, chan,
5661 ECONNRESET);
5662 break;
5663 }
5664 break;
5665 case L2CAP_EV_RECV_RR:
5666 l2cap_pass_to_tx(chan, control);
5667 if (control->final) {
5668 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5669
5670 if (!test_and_clear_bit(CONN_REJ_ACT,
5671 &chan->conn_state)) {
5672 control->final = 0;
5673 l2cap_retransmit_all(chan, control);
5674 }
5675
5676 l2cap_ertm_send(chan);
5677 } else if (control->poll) {
5678 if (test_and_clear_bit(CONN_REMOTE_BUSY,
5679 &chan->conn_state) &&
5680 chan->unacked_frames) {
5681 __set_retrans_timer(chan);
5682 }
5683
5684 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5685 l2cap_send_srej_tail(chan);
5686 } else {
5687 if (test_and_clear_bit(CONN_REMOTE_BUSY,
5688 &chan->conn_state) &&
5689 chan->unacked_frames)
5690 __set_retrans_timer(chan);
5691
5692 l2cap_send_ack(chan);
5693 }
5694 break;
5695 case L2CAP_EV_RECV_RNR:
5696 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5697 l2cap_pass_to_tx(chan, control);
5698 if (control->poll) {
5699 l2cap_send_srej_tail(chan);
5700 } else {
5701 struct l2cap_ctrl rr_control;
5702 memset(&rr_control, 0, sizeof(rr_control));
5703 rr_control.sframe = 1;
5704 rr_control.super = L2CAP_SUPER_RR;
5705 rr_control.reqseq = chan->buffer_seq;
5706 l2cap_send_sframe(chan, &rr_control);
5707 }
5708
5709 break;
5710 case L2CAP_EV_RECV_REJ:
5711 l2cap_handle_rej(chan, control);
5712 break;
5713 case L2CAP_EV_RECV_SREJ:
5714 l2cap_handle_srej(chan, control);
5715 break;
5716 }
5717
5718 if (skb && !skb_in_use) {
5719 BT_DBG("Freeing %p", skb);
5720 kfree_skb(skb);
5721 }
5722
5723 return err;
5724}
5725
5726static int l2cap_finish_move(struct l2cap_chan *chan)
5727{
5728 BT_DBG("chan %p", chan);
5729
5730 chan->rx_state = L2CAP_RX_STATE_RECV;
5731
5732 if (chan->hs_hcon)
5733 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
5734 else
5735 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
5736
5737 return l2cap_resegment(chan);
5738}
5739
5740static int l2cap_rx_state_wait_p(struct l2cap_chan *chan,
5741 struct l2cap_ctrl *control,
5742 struct sk_buff *skb, u8 event)
5743{
5744 int err;
5745
5746 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
5747 event);
5748
5749 if (!control->poll)
5750 return -EPROTO;
5751
5752 l2cap_process_reqseq(chan, control->reqseq);
5753
5754 if (!skb_queue_empty(&chan->tx_q))
5755 chan->tx_send_head = skb_peek(&chan->tx_q);
5756 else
5757 chan->tx_send_head = NULL;
5758
5759 /* Rewind next_tx_seq to the point expected
5760 * by the receiver.
5761 */
5762 chan->next_tx_seq = control->reqseq;
5763 chan->unacked_frames = 0;
5764
5765 err = l2cap_finish_move(chan);
5766 if (err)
5767 return err;
5768
5769 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5770 l2cap_send_i_or_rr_or_rnr(chan);
5771
5772 if (event == L2CAP_EV_RECV_IFRAME)
5773 return -EPROTO;
5774
5775 return l2cap_rx_state_recv(chan, control, NULL, event);
5776}
5777
5778static int l2cap_rx_state_wait_f(struct l2cap_chan *chan,
5779 struct l2cap_ctrl *control,
5780 struct sk_buff *skb, u8 event)
5781{
5782 int err;
5783
5784 if (!control->final)
5785 return -EPROTO;
5786
5787 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5788
5789 chan->rx_state = L2CAP_RX_STATE_RECV;
5790 l2cap_process_reqseq(chan, control->reqseq);
5791
5792 if (!skb_queue_empty(&chan->tx_q))
5793 chan->tx_send_head = skb_peek(&chan->tx_q);
5794 else
5795 chan->tx_send_head = NULL;
5796
5797 /* Rewind next_tx_seq to the point expected
5798 * by the receiver.
5799 */
5800 chan->next_tx_seq = control->reqseq;
5801 chan->unacked_frames = 0;
5802
5803 if (chan->hs_hcon)
5804 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
5805 else
5806 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
5807
5808 err = l2cap_resegment(chan);
5809
5810 if (!err)
5811 err = l2cap_rx_state_recv(chan, control, skb, event);
5812
5813 return err;
5814}
5815
5816static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
5817{
5818 /* Make sure reqseq is for a packet that has been sent but not acked */
5819 u16 unacked;
5820
5821 unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
5822 return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
5823}
5824
5825static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
5826 struct sk_buff *skb, u8 event)
5827{
5828 int err = 0;
5829
5830 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
5831 control, skb, event, chan->rx_state);
5832
5833 if (__valid_reqseq(chan, control->reqseq)) {
5834 switch (chan->rx_state) {
5835 case L2CAP_RX_STATE_RECV:
5836 err = l2cap_rx_state_recv(chan, control, skb, event);
5837 break;
5838 case L2CAP_RX_STATE_SREJ_SENT:
5839 err = l2cap_rx_state_srej_sent(chan, control, skb,
5840 event);
5841 break;
5842 case L2CAP_RX_STATE_WAIT_P:
5843 err = l2cap_rx_state_wait_p(chan, control, skb, event);
5844 break;
5845 case L2CAP_RX_STATE_WAIT_F:
5846 err = l2cap_rx_state_wait_f(chan, control, skb, event);
5847 break;
5848 default:
5849 /* shut it down */
5850 break;
5851 }
5852 } else {
5853 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
5854 control->reqseq, chan->next_tx_seq,
5855 chan->expected_ack_seq);
5856 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
5857 }
5858
5859 return err;
5860}
5861
5862static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
5863 struct sk_buff *skb)
5864{
5865 int err = 0;
5866
5867 BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
5868 chan->rx_state);
5869
5870 if (l2cap_classify_txseq(chan, control->txseq) ==
5871 L2CAP_TXSEQ_EXPECTED) {
5872 l2cap_pass_to_tx(chan, control);
5873
5874 BT_DBG("buffer_seq %d->%d", chan->buffer_seq,
5875 __next_seq(chan, chan->buffer_seq));
5876
5877 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
5878
5879 l2cap_reassemble_sdu(chan, skb, control);
5880 } else {
5881 if (chan->sdu) {
5882 kfree_skb(chan->sdu);
5883 chan->sdu = NULL;
5884 }
5885 chan->sdu_last_frag = NULL;
5886 chan->sdu_len = 0;
5887
5888 if (skb) {
5889 BT_DBG("Freeing %p", skb);
5890 kfree_skb(skb);
5891 }
5892 }
5893
5894 chan->last_acked_seq = control->txseq;
5895 chan->expected_tx_seq = __next_seq(chan, control->txseq);
5896
5897 return err;
5898}
5899
5900static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
5901{
5902 struct l2cap_ctrl *control = &bt_cb(skb)->control;
5903 u16 len;
5904 u8 event;
5905
5906 __unpack_control(chan, skb);
5907
5908 len = skb->len;
5909
5910 /*
5911 * We can just drop the corrupted I-frame here.
5912 * Receiver will miss it and start proper recovery
5913 * procedures and ask for retransmission.
5914 */
5915 if (l2cap_check_fcs(chan, skb))
5916 goto drop;
5917
5918 if (!control->sframe && control->sar == L2CAP_SAR_START)
5919 len -= L2CAP_SDULEN_SIZE;
5920
5921 if (chan->fcs == L2CAP_FCS_CRC16)
5922 len -= L2CAP_FCS_SIZE;
5923
5924 if (len > chan->mps) {
5925 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
5926 goto drop;
5927 }
5928
5929 if (!control->sframe) {
5930 int err;
5931
5932 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
5933 control->sar, control->reqseq, control->final,
5934 control->txseq);
5935
5936 /* Validate F-bit - F=0 always valid, F=1 only
5937 * valid in TX WAIT_F
5938 */
5939 if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
5940 goto drop;
5941
5942 if (chan->mode != L2CAP_MODE_STREAMING) {
5943 event = L2CAP_EV_RECV_IFRAME;
5944 err = l2cap_rx(chan, control, skb, event);
5945 } else {
5946 err = l2cap_stream_rx(chan, control, skb);
5947 }
5948
5949 if (err)
5950 l2cap_send_disconn_req(chan->conn, chan,
5951 ECONNRESET);
5952 } else {
5953 const u8 rx_func_to_event[4] = {
5954 L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
5955 L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
5956 };
5957
5958 /* Only I-frames are expected in streaming mode */
5959 if (chan->mode == L2CAP_MODE_STREAMING)
5960 goto drop;
5961
5962 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
5963 control->reqseq, control->final, control->poll,
5964 control->super);
5965
5966 if (len != 0) {
5967 BT_ERR("%d", len);
5968 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
5969 goto drop;
5970 }
5971
5972 /* Validate F and P bits */
5973 if (control->final && (control->poll ||
5974 chan->tx_state != L2CAP_TX_STATE_WAIT_F))
5975 goto drop;
5976
5977 event = rx_func_to_event[control->super];
5978 if (l2cap_rx(chan, control, skb, event))
5979 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
5980 }
5981
5982 return 0;
5983
5984drop:
5985 kfree_skb(skb);
5986 return 0;
5987}
5988
5989static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
5990 struct sk_buff *skb)
5991{
5992 struct l2cap_chan *chan;
5993
5994 chan = l2cap_get_chan_by_scid(conn, cid);
5995 if (!chan) {
5996 if (cid == L2CAP_CID_A2MP) {
5997 chan = a2mp_channel_create(conn, skb);
5998 if (!chan) {
5999 kfree_skb(skb);
6000 return;
6001 }
6002
6003 l2cap_chan_lock(chan);
6004 } else {
6005 BT_DBG("unknown cid 0x%4.4x", cid);
6006 /* Drop packet and return */
6007 kfree_skb(skb);
6008 return;
6009 }
6010 }
6011
6012 BT_DBG("chan %p, len %d", chan, skb->len);
6013
6014 if (chan->state != BT_CONNECTED)
6015 goto drop;
6016
6017 switch (chan->mode) {
6018 case L2CAP_MODE_BASIC:
6019 /* If socket recv buffers overflows we drop data here
6020 * which is *bad* because L2CAP has to be reliable.
6021 * But we don't have any other choice. L2CAP doesn't
6022 * provide flow control mechanism. */
6023
6024 if (chan->imtu < skb->len)
6025 goto drop;
6026
6027 if (!chan->ops->recv(chan, skb))
6028 goto done;
6029 break;
6030
6031 case L2CAP_MODE_ERTM:
6032 case L2CAP_MODE_STREAMING:
6033 l2cap_data_rcv(chan, skb);
6034 goto done;
6035
6036 default:
6037 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
6038 break;
6039 }
6040
6041drop:
6042 kfree_skb(skb);
6043
6044done:
6045 l2cap_chan_unlock(chan);
6046}
6047
6048static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
6049 struct sk_buff *skb)
6050{
6051 struct l2cap_chan *chan;
6052
6053 chan = l2cap_global_chan_by_psm(0, psm, conn->src, conn->dst);
6054 if (!chan)
6055 goto drop;
6056
6057 BT_DBG("chan %p, len %d", chan, skb->len);
6058
6059 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
6060 goto drop;
6061
6062 if (chan->imtu < skb->len)
6063 goto drop;
6064
6065 if (!chan->ops->recv(chan, skb))
6066 return;
6067
6068drop:
6069 kfree_skb(skb);
6070}
6071
6072static void l2cap_att_channel(struct l2cap_conn *conn, u16 cid,
6073 struct sk_buff *skb)
6074{
6075 struct l2cap_chan *chan;
6076
6077 chan = l2cap_global_chan_by_scid(0, cid, conn->src, conn->dst);
6078 if (!chan)
6079 goto drop;
6080
6081 BT_DBG("chan %p, len %d", chan, skb->len);
6082
6083 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
6084 goto drop;
6085
6086 if (chan->imtu < skb->len)
6087 goto drop;
6088
6089 if (!chan->ops->recv(chan, skb))
6090 return;
6091
6092drop:
6093 kfree_skb(skb);
6094}
6095
6096static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
6097{
6098 struct l2cap_hdr *lh = (void *) skb->data;
6099 u16 cid, len;
6100 __le16 psm;
6101
6102 skb_pull(skb, L2CAP_HDR_SIZE);
6103 cid = __le16_to_cpu(lh->cid);
6104 len = __le16_to_cpu(lh->len);
6105
6106 if (len != skb->len) {
6107 kfree_skb(skb);
6108 return;
6109 }
6110
6111 BT_DBG("len %d, cid 0x%4.4x", len, cid);
6112
6113 switch (cid) {
6114 case L2CAP_CID_LE_SIGNALING:
6115 case L2CAP_CID_SIGNALING:
6116 l2cap_sig_channel(conn, skb);
6117 break;
6118
6119 case L2CAP_CID_CONN_LESS:
6120 psm = get_unaligned((__le16 *) skb->data);
6121 skb_pull(skb, L2CAP_PSMLEN_SIZE);
6122 l2cap_conless_channel(conn, psm, skb);
6123 break;
6124
6125 case L2CAP_CID_LE_DATA:
6126 l2cap_att_channel(conn, cid, skb);
6127 break;
6128
6129 case L2CAP_CID_SMP:
6130 if (smp_sig_channel(conn, skb))
6131 l2cap_conn_del(conn->hcon, EACCES);
6132 break;
6133
6134 default:
6135 l2cap_data_channel(conn, cid, skb);
6136 break;
6137 }
6138}
6139
6140/* ---- L2CAP interface with lower layer (HCI) ---- */
6141
6142int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
6143{
6144 int exact = 0, lm1 = 0, lm2 = 0;
6145 struct l2cap_chan *c;
6146
6147 BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
6148
6149 /* Find listening sockets and check their link_mode */
6150 read_lock(&chan_list_lock);
6151 list_for_each_entry(c, &chan_list, global_l) {
6152 struct sock *sk = c->sk;
6153
6154 if (c->state != BT_LISTEN)
6155 continue;
6156
6157 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
6158 lm1 |= HCI_LM_ACCEPT;
6159 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
6160 lm1 |= HCI_LM_MASTER;
6161 exact++;
6162 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
6163 lm2 |= HCI_LM_ACCEPT;
6164 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
6165 lm2 |= HCI_LM_MASTER;
6166 }
6167 }
6168 read_unlock(&chan_list_lock);
6169
6170 return exact ? lm1 : lm2;
6171}
6172
6173void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
6174{
6175 struct l2cap_conn *conn;
6176
6177 BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
6178
6179 if (!status) {
6180 conn = l2cap_conn_add(hcon, status);
6181 if (conn)
6182 l2cap_conn_ready(conn);
6183 } else
6184 l2cap_conn_del(hcon, bt_to_errno(status));
6185
6186}
6187
6188int l2cap_disconn_ind(struct hci_conn *hcon)
6189{
6190 struct l2cap_conn *conn = hcon->l2cap_data;
6191
6192 BT_DBG("hcon %p", hcon);
6193
6194 if (!conn)
6195 return HCI_ERROR_REMOTE_USER_TERM;
6196 return conn->disc_reason;
6197}
6198
6199void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
6200{
6201 BT_DBG("hcon %p reason %d", hcon, reason);
6202
6203 l2cap_conn_del(hcon, bt_to_errno(reason));
6204}
6205
6206static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
6207{
6208 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
6209 return;
6210
6211 if (encrypt == 0x00) {
6212 if (chan->sec_level == BT_SECURITY_MEDIUM) {
6213 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
6214 } else if (chan->sec_level == BT_SECURITY_HIGH)
6215 l2cap_chan_close(chan, ECONNREFUSED);
6216 } else {
6217 if (chan->sec_level == BT_SECURITY_MEDIUM)
6218 __clear_chan_timer(chan);
6219 }
6220}
6221
6222int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
6223{
6224 struct l2cap_conn *conn = hcon->l2cap_data;
6225 struct l2cap_chan *chan;
6226
6227 if (!conn)
6228 return 0;
6229
6230 BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
6231
6232 if (hcon->type == LE_LINK) {
6233 if (!status && encrypt)
6234 smp_distribute_keys(conn, 0);
6235 cancel_delayed_work(&conn->security_timer);
6236 }
6237
6238 mutex_lock(&conn->chan_lock);
6239
6240 list_for_each_entry(chan, &conn->chan_l, list) {
6241 l2cap_chan_lock(chan);
6242
6243 BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
6244 state_to_string(chan->state));
6245
6246 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
6247 l2cap_chan_unlock(chan);
6248 continue;
6249 }
6250
6251 if (chan->scid == L2CAP_CID_LE_DATA) {
6252 if (!status && encrypt) {
6253 chan->sec_level = hcon->sec_level;
6254 l2cap_chan_ready(chan);
6255 }
6256
6257 l2cap_chan_unlock(chan);
6258 continue;
6259 }
6260
6261 if (test_bit(CONF_CONNECT_PEND, &chan->conf_state)) {
6262 l2cap_chan_unlock(chan);
6263 continue;
6264 }
6265
6266 if (!status && (chan->state == BT_CONNECTED ||
6267 chan->state == BT_CONFIG)) {
6268 struct sock *sk = chan->sk;
6269
6270 clear_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags);
6271 sk->sk_state_change(sk);
6272
6273 l2cap_check_encryption(chan, encrypt);
6274 l2cap_chan_unlock(chan);
6275 continue;
6276 }
6277
6278 if (chan->state == BT_CONNECT) {
6279 if (!status) {
6280 l2cap_start_connection(chan);
6281 } else {
6282 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
6283 }
6284 } else if (chan->state == BT_CONNECT2) {
6285 struct sock *sk = chan->sk;
6286 struct l2cap_conn_rsp rsp;
6287 __u16 res, stat;
6288
6289 lock_sock(sk);
6290
6291 if (!status) {
6292 if (test_bit(BT_SK_DEFER_SETUP,
6293 &bt_sk(sk)->flags)) {
6294 res = L2CAP_CR_PEND;
6295 stat = L2CAP_CS_AUTHOR_PEND;
6296 chan->ops->defer(chan);
6297 } else {
6298 __l2cap_state_change(chan, BT_CONFIG);
6299 res = L2CAP_CR_SUCCESS;
6300 stat = L2CAP_CS_NO_INFO;
6301 }
6302 } else {
6303 __l2cap_state_change(chan, BT_DISCONN);
6304 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
6305 res = L2CAP_CR_SEC_BLOCK;
6306 stat = L2CAP_CS_NO_INFO;
6307 }
6308
6309 release_sock(sk);
6310
6311 rsp.scid = cpu_to_le16(chan->dcid);
6312 rsp.dcid = cpu_to_le16(chan->scid);
6313 rsp.result = cpu_to_le16(res);
6314 rsp.status = cpu_to_le16(stat);
6315 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
6316 sizeof(rsp), &rsp);
6317
6318 if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
6319 res == L2CAP_CR_SUCCESS) {
6320 char buf[128];
6321 set_bit(CONF_REQ_SENT, &chan->conf_state);
6322 l2cap_send_cmd(conn, l2cap_get_ident(conn),
6323 L2CAP_CONF_REQ,
6324 l2cap_build_conf_req(chan, buf),
6325 buf);
6326 chan->num_conf_req++;
6327 }
6328 }
6329
6330 l2cap_chan_unlock(chan);
6331 }
6332
6333 mutex_unlock(&conn->chan_lock);
6334
6335 return 0;
6336}
6337
6338int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
6339{
6340 struct l2cap_conn *conn = hcon->l2cap_data;
6341 struct l2cap_hdr *hdr;
6342 int len;
6343
6344 /* For AMP controller do not create l2cap conn */
6345 if (!conn && hcon->hdev->dev_type != HCI_BREDR)
6346 goto drop;
6347
6348 if (!conn)
6349 conn = l2cap_conn_add(hcon, 0);
6350
6351 if (!conn)
6352 goto drop;
6353
6354 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
6355
6356 switch (flags) {
6357 case ACL_START:
6358 case ACL_START_NO_FLUSH:
6359 case ACL_COMPLETE:
6360 if (conn->rx_len) {
6361 BT_ERR("Unexpected start frame (len %d)", skb->len);
6362 kfree_skb(conn->rx_skb);
6363 conn->rx_skb = NULL;
6364 conn->rx_len = 0;
6365 l2cap_conn_unreliable(conn, ECOMM);
6366 }
6367
6368 /* Start fragment always begin with Basic L2CAP header */
6369 if (skb->len < L2CAP_HDR_SIZE) {
6370 BT_ERR("Frame is too short (len %d)", skb->len);
6371 l2cap_conn_unreliable(conn, ECOMM);
6372 goto drop;
6373 }
6374
6375 hdr = (struct l2cap_hdr *) skb->data;
6376 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
6377
6378 if (len == skb->len) {
6379 /* Complete frame received */
6380 l2cap_recv_frame(conn, skb);
6381 return 0;
6382 }
6383
6384 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
6385
6386 if (skb->len > len) {
6387 BT_ERR("Frame is too long (len %d, expected len %d)",
6388 skb->len, len);
6389 l2cap_conn_unreliable(conn, ECOMM);
6390 goto drop;
6391 }
6392
6393 /* Allocate skb for the complete frame (with header) */
6394 conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL);
6395 if (!conn->rx_skb)
6396 goto drop;
6397
6398 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
6399 skb->len);
6400 conn->rx_len = len - skb->len;
6401 break;
6402
6403 case ACL_CONT:
6404 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
6405
6406 if (!conn->rx_len) {
6407 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
6408 l2cap_conn_unreliable(conn, ECOMM);
6409 goto drop;
6410 }
6411
6412 if (skb->len > conn->rx_len) {
6413 BT_ERR("Fragment is too long (len %d, expected %d)",
6414 skb->len, conn->rx_len);
6415 kfree_skb(conn->rx_skb);
6416 conn->rx_skb = NULL;
6417 conn->rx_len = 0;
6418 l2cap_conn_unreliable(conn, ECOMM);
6419 goto drop;
6420 }
6421
6422 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
6423 skb->len);
6424 conn->rx_len -= skb->len;
6425
6426 if (!conn->rx_len) {
6427 /* Complete frame received */
6428 l2cap_recv_frame(conn, conn->rx_skb);
6429 conn->rx_skb = NULL;
6430 }
6431 break;
6432 }
6433
6434drop:
6435 kfree_skb(skb);
6436 return 0;
6437}
6438
6439static int l2cap_debugfs_show(struct seq_file *f, void *p)
6440{
6441 struct l2cap_chan *c;
6442
6443 read_lock(&chan_list_lock);
6444
6445 list_for_each_entry(c, &chan_list, global_l) {
6446 struct sock *sk = c->sk;
6447
6448 seq_printf(f, "%pMR %pMR %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
6449 &bt_sk(sk)->src, &bt_sk(sk)->dst,
6450 c->state, __le16_to_cpu(c->psm),
6451 c->scid, c->dcid, c->imtu, c->omtu,
6452 c->sec_level, c->mode);
6453 }
6454
6455 read_unlock(&chan_list_lock);
6456
6457 return 0;
6458}
6459
6460static int l2cap_debugfs_open(struct inode *inode, struct file *file)
6461{
6462 return single_open(file, l2cap_debugfs_show, inode->i_private);
6463}
6464
6465static const struct file_operations l2cap_debugfs_fops = {
6466 .open = l2cap_debugfs_open,
6467 .read = seq_read,
6468 .llseek = seq_lseek,
6469 .release = single_release,
6470};
6471
6472static struct dentry *l2cap_debugfs;
6473
6474int __init l2cap_init(void)
6475{
6476 int err;
6477
6478 err = l2cap_init_sockets();
6479 if (err < 0)
6480 return err;
6481
6482 if (bt_debugfs) {
6483 l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
6484 NULL, &l2cap_debugfs_fops);
6485 if (!l2cap_debugfs)
6486 BT_ERR("Failed to create L2CAP debug file");
6487 }
6488
6489 return 0;
6490}
6491
6492void l2cap_exit(void)
6493{
6494 debugfs_remove(l2cap_debugfs);
6495 l2cap_cleanup_sockets();
6496}
6497
6498module_param(disable_ertm, bool, 0644);
6499MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
This page took 0.050798 seconds and 5 git commands to generate.