Bluetooth: Move amp.h header file into net/bluetooth/
[deliverable/linux.git] / net / bluetooth / l2cap_core.c
... / ...
CommitLineData
1/*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
8
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
10
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
14
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
27*/
28
29/* Bluetooth L2CAP core. */
30
31#include <linux/module.h>
32
33#include <linux/debugfs.h>
34#include <linux/crc16.h>
35
36#include <net/bluetooth/bluetooth.h>
37#include <net/bluetooth/hci_core.h>
38#include <net/bluetooth/l2cap.h>
39#include <net/bluetooth/smp.h>
40#include <net/bluetooth/a2mp.h>
41
42#include "amp.h"
43
44bool disable_ertm;
45
46static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
47static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP, };
48
49static LIST_HEAD(chan_list);
50static DEFINE_RWLOCK(chan_list_lock);
51
52static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
53 u8 code, u8 ident, u16 dlen, void *data);
54static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
55 void *data);
56static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
57static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
58
59static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
60 struct sk_buff_head *skbs, u8 event);
61
62/* ---- L2CAP channels ---- */
63
64static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
65 u16 cid)
66{
67 struct l2cap_chan *c;
68
69 list_for_each_entry(c, &conn->chan_l, list) {
70 if (c->dcid == cid)
71 return c;
72 }
73 return NULL;
74}
75
76static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
77 u16 cid)
78{
79 struct l2cap_chan *c;
80
81 list_for_each_entry(c, &conn->chan_l, list) {
82 if (c->scid == cid)
83 return c;
84 }
85 return NULL;
86}
87
88/* Find channel with given SCID.
89 * Returns locked channel. */
90static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
91 u16 cid)
92{
93 struct l2cap_chan *c;
94
95 mutex_lock(&conn->chan_lock);
96 c = __l2cap_get_chan_by_scid(conn, cid);
97 if (c)
98 l2cap_chan_lock(c);
99 mutex_unlock(&conn->chan_lock);
100
101 return c;
102}
103
104/* Find channel with given DCID.
105 * Returns locked channel.
106 */
107static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
108 u16 cid)
109{
110 struct l2cap_chan *c;
111
112 mutex_lock(&conn->chan_lock);
113 c = __l2cap_get_chan_by_dcid(conn, cid);
114 if (c)
115 l2cap_chan_lock(c);
116 mutex_unlock(&conn->chan_lock);
117
118 return c;
119}
120
121static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn,
122 u8 ident)
123{
124 struct l2cap_chan *c;
125
126 list_for_each_entry(c, &conn->chan_l, list) {
127 if (c->ident == ident)
128 return c;
129 }
130 return NULL;
131}
132
133static struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn,
134 u8 ident)
135{
136 struct l2cap_chan *c;
137
138 mutex_lock(&conn->chan_lock);
139 c = __l2cap_get_chan_by_ident(conn, ident);
140 if (c)
141 l2cap_chan_lock(c);
142 mutex_unlock(&conn->chan_lock);
143
144 return c;
145}
146
147static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
148{
149 struct l2cap_chan *c;
150
151 list_for_each_entry(c, &chan_list, global_l) {
152 if (c->sport == psm && !bacmp(&bt_sk(c->sk)->src, src))
153 return c;
154 }
155 return NULL;
156}
157
158int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
159{
160 int err;
161
162 write_lock(&chan_list_lock);
163
164 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
165 err = -EADDRINUSE;
166 goto done;
167 }
168
169 if (psm) {
170 chan->psm = psm;
171 chan->sport = psm;
172 err = 0;
173 } else {
174 u16 p;
175
176 err = -EINVAL;
177 for (p = 0x1001; p < 0x1100; p += 2)
178 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
179 chan->psm = cpu_to_le16(p);
180 chan->sport = cpu_to_le16(p);
181 err = 0;
182 break;
183 }
184 }
185
186done:
187 write_unlock(&chan_list_lock);
188 return err;
189}
190
191int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
192{
193 write_lock(&chan_list_lock);
194
195 chan->scid = scid;
196
197 write_unlock(&chan_list_lock);
198
199 return 0;
200}
201
202static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
203{
204 u16 cid = L2CAP_CID_DYN_START;
205
206 for (; cid < L2CAP_CID_DYN_END; cid++) {
207 if (!__l2cap_get_chan_by_scid(conn, cid))
208 return cid;
209 }
210
211 return 0;
212}
213
214static void __l2cap_state_change(struct l2cap_chan *chan, int state)
215{
216 BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
217 state_to_string(state));
218
219 chan->state = state;
220 chan->ops->state_change(chan, state);
221}
222
223static void l2cap_state_change(struct l2cap_chan *chan, int state)
224{
225 struct sock *sk = chan->sk;
226
227 lock_sock(sk);
228 __l2cap_state_change(chan, state);
229 release_sock(sk);
230}
231
232static inline void __l2cap_chan_set_err(struct l2cap_chan *chan, int err)
233{
234 struct sock *sk = chan->sk;
235
236 sk->sk_err = err;
237}
238
239static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
240{
241 struct sock *sk = chan->sk;
242
243 lock_sock(sk);
244 __l2cap_chan_set_err(chan, err);
245 release_sock(sk);
246}
247
248static void __set_retrans_timer(struct l2cap_chan *chan)
249{
250 if (!delayed_work_pending(&chan->monitor_timer) &&
251 chan->retrans_timeout) {
252 l2cap_set_timer(chan, &chan->retrans_timer,
253 msecs_to_jiffies(chan->retrans_timeout));
254 }
255}
256
257static void __set_monitor_timer(struct l2cap_chan *chan)
258{
259 __clear_retrans_timer(chan);
260 if (chan->monitor_timeout) {
261 l2cap_set_timer(chan, &chan->monitor_timer,
262 msecs_to_jiffies(chan->monitor_timeout));
263 }
264}
265
266static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
267 u16 seq)
268{
269 struct sk_buff *skb;
270
271 skb_queue_walk(head, skb) {
272 if (bt_cb(skb)->control.txseq == seq)
273 return skb;
274 }
275
276 return NULL;
277}
278
279/* ---- L2CAP sequence number lists ---- */
280
281/* For ERTM, ordered lists of sequence numbers must be tracked for
282 * SREJ requests that are received and for frames that are to be
283 * retransmitted. These seq_list functions implement a singly-linked
284 * list in an array, where membership in the list can also be checked
285 * in constant time. Items can also be added to the tail of the list
286 * and removed from the head in constant time, without further memory
287 * allocs or frees.
288 */
289
290static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
291{
292 size_t alloc_size, i;
293
294 /* Allocated size is a power of 2 to map sequence numbers
295 * (which may be up to 14 bits) in to a smaller array that is
296 * sized for the negotiated ERTM transmit windows.
297 */
298 alloc_size = roundup_pow_of_two(size);
299
300 seq_list->list = kmalloc(sizeof(u16) * alloc_size, GFP_KERNEL);
301 if (!seq_list->list)
302 return -ENOMEM;
303
304 seq_list->mask = alloc_size - 1;
305 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
306 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
307 for (i = 0; i < alloc_size; i++)
308 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
309
310 return 0;
311}
312
313static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
314{
315 kfree(seq_list->list);
316}
317
318static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
319 u16 seq)
320{
321 /* Constant-time check for list membership */
322 return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
323}
324
325static u16 l2cap_seq_list_remove(struct l2cap_seq_list *seq_list, u16 seq)
326{
327 u16 mask = seq_list->mask;
328
329 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR) {
330 /* In case someone tries to pop the head of an empty list */
331 return L2CAP_SEQ_LIST_CLEAR;
332 } else if (seq_list->head == seq) {
333 /* Head can be removed in constant time */
334 seq_list->head = seq_list->list[seq & mask];
335 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
336
337 if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
338 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
339 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
340 }
341 } else {
342 /* Walk the list to find the sequence number */
343 u16 prev = seq_list->head;
344 while (seq_list->list[prev & mask] != seq) {
345 prev = seq_list->list[prev & mask];
346 if (prev == L2CAP_SEQ_LIST_TAIL)
347 return L2CAP_SEQ_LIST_CLEAR;
348 }
349
350 /* Unlink the number from the list and clear it */
351 seq_list->list[prev & mask] = seq_list->list[seq & mask];
352 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
353 if (seq_list->tail == seq)
354 seq_list->tail = prev;
355 }
356 return seq;
357}
358
359static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
360{
361 /* Remove the head in constant time */
362 return l2cap_seq_list_remove(seq_list, seq_list->head);
363}
364
365static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
366{
367 u16 i;
368
369 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
370 return;
371
372 for (i = 0; i <= seq_list->mask; i++)
373 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
374
375 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
376 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
377}
378
379static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
380{
381 u16 mask = seq_list->mask;
382
383 /* All appends happen in constant time */
384
385 if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
386 return;
387
388 if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
389 seq_list->head = seq;
390 else
391 seq_list->list[seq_list->tail & mask] = seq;
392
393 seq_list->tail = seq;
394 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
395}
396
397static void l2cap_chan_timeout(struct work_struct *work)
398{
399 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
400 chan_timer.work);
401 struct l2cap_conn *conn = chan->conn;
402 int reason;
403
404 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
405
406 mutex_lock(&conn->chan_lock);
407 l2cap_chan_lock(chan);
408
409 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
410 reason = ECONNREFUSED;
411 else if (chan->state == BT_CONNECT &&
412 chan->sec_level != BT_SECURITY_SDP)
413 reason = ECONNREFUSED;
414 else
415 reason = ETIMEDOUT;
416
417 l2cap_chan_close(chan, reason);
418
419 l2cap_chan_unlock(chan);
420
421 chan->ops->close(chan);
422 mutex_unlock(&conn->chan_lock);
423
424 l2cap_chan_put(chan);
425}
426
427struct l2cap_chan *l2cap_chan_create(void)
428{
429 struct l2cap_chan *chan;
430
431 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
432 if (!chan)
433 return NULL;
434
435 mutex_init(&chan->lock);
436
437 write_lock(&chan_list_lock);
438 list_add(&chan->global_l, &chan_list);
439 write_unlock(&chan_list_lock);
440
441 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
442
443 chan->state = BT_OPEN;
444
445 kref_init(&chan->kref);
446
447 /* This flag is cleared in l2cap_chan_ready() */
448 set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
449
450 BT_DBG("chan %p", chan);
451
452 return chan;
453}
454
455static void l2cap_chan_destroy(struct kref *kref)
456{
457 struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);
458
459 BT_DBG("chan %p", chan);
460
461 write_lock(&chan_list_lock);
462 list_del(&chan->global_l);
463 write_unlock(&chan_list_lock);
464
465 kfree(chan);
466}
467
468void l2cap_chan_hold(struct l2cap_chan *c)
469{
470 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
471
472 kref_get(&c->kref);
473}
474
475void l2cap_chan_put(struct l2cap_chan *c)
476{
477 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
478
479 kref_put(&c->kref, l2cap_chan_destroy);
480}
481
482void l2cap_chan_set_defaults(struct l2cap_chan *chan)
483{
484 chan->fcs = L2CAP_FCS_CRC16;
485 chan->max_tx = L2CAP_DEFAULT_MAX_TX;
486 chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
487 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
488 chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
489 chan->sec_level = BT_SECURITY_LOW;
490
491 set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
492}
493
494void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
495{
496 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
497 __le16_to_cpu(chan->psm), chan->dcid);
498
499 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
500
501 chan->conn = conn;
502
503 switch (chan->chan_type) {
504 case L2CAP_CHAN_CONN_ORIENTED:
505 if (conn->hcon->type == LE_LINK) {
506 /* LE connection */
507 chan->omtu = L2CAP_DEFAULT_MTU;
508 if (chan->dcid == L2CAP_CID_ATT)
509 chan->scid = L2CAP_CID_ATT;
510 else
511 chan->scid = l2cap_alloc_cid(conn);
512 } else {
513 /* Alloc CID for connection-oriented socket */
514 chan->scid = l2cap_alloc_cid(conn);
515 chan->omtu = L2CAP_DEFAULT_MTU;
516 }
517 break;
518
519 case L2CAP_CHAN_CONN_LESS:
520 /* Connectionless socket */
521 chan->scid = L2CAP_CID_CONN_LESS;
522 chan->dcid = L2CAP_CID_CONN_LESS;
523 chan->omtu = L2CAP_DEFAULT_MTU;
524 break;
525
526 case L2CAP_CHAN_CONN_FIX_A2MP:
527 chan->scid = L2CAP_CID_A2MP;
528 chan->dcid = L2CAP_CID_A2MP;
529 chan->omtu = L2CAP_A2MP_DEFAULT_MTU;
530 chan->imtu = L2CAP_A2MP_DEFAULT_MTU;
531 break;
532
533 default:
534 /* Raw socket can send/recv signalling messages only */
535 chan->scid = L2CAP_CID_SIGNALING;
536 chan->dcid = L2CAP_CID_SIGNALING;
537 chan->omtu = L2CAP_DEFAULT_MTU;
538 }
539
540 chan->local_id = L2CAP_BESTEFFORT_ID;
541 chan->local_stype = L2CAP_SERV_BESTEFFORT;
542 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
543 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
544 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
545 chan->local_flush_to = L2CAP_EFS_DEFAULT_FLUSH_TO;
546
547 l2cap_chan_hold(chan);
548
549 hci_conn_hold(conn->hcon);
550
551 list_add(&chan->list, &conn->chan_l);
552}
553
554void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
555{
556 mutex_lock(&conn->chan_lock);
557 __l2cap_chan_add(conn, chan);
558 mutex_unlock(&conn->chan_lock);
559}
560
561void l2cap_chan_del(struct l2cap_chan *chan, int err)
562{
563 struct l2cap_conn *conn = chan->conn;
564
565 __clear_chan_timer(chan);
566
567 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
568
569 if (conn) {
570 struct amp_mgr *mgr = conn->hcon->amp_mgr;
571 /* Delete from channel list */
572 list_del(&chan->list);
573
574 l2cap_chan_put(chan);
575
576 chan->conn = NULL;
577
578 if (chan->chan_type != L2CAP_CHAN_CONN_FIX_A2MP)
579 hci_conn_drop(conn->hcon);
580
581 if (mgr && mgr->bredr_chan == chan)
582 mgr->bredr_chan = NULL;
583 }
584
585 if (chan->hs_hchan) {
586 struct hci_chan *hs_hchan = chan->hs_hchan;
587
588 BT_DBG("chan %p disconnect hs_hchan %p", chan, hs_hchan);
589 amp_disconnect_logical_link(hs_hchan);
590 }
591
592 chan->ops->teardown(chan, err);
593
594 if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
595 return;
596
597 switch(chan->mode) {
598 case L2CAP_MODE_BASIC:
599 break;
600
601 case L2CAP_MODE_ERTM:
602 __clear_retrans_timer(chan);
603 __clear_monitor_timer(chan);
604 __clear_ack_timer(chan);
605
606 skb_queue_purge(&chan->srej_q);
607
608 l2cap_seq_list_free(&chan->srej_list);
609 l2cap_seq_list_free(&chan->retrans_list);
610
611 /* fall through */
612
613 case L2CAP_MODE_STREAMING:
614 skb_queue_purge(&chan->tx_q);
615 break;
616 }
617
618 return;
619}
620
621void l2cap_chan_close(struct l2cap_chan *chan, int reason)
622{
623 struct l2cap_conn *conn = chan->conn;
624 struct sock *sk = chan->sk;
625
626 BT_DBG("chan %p state %s sk %p", chan, state_to_string(chan->state),
627 sk);
628
629 switch (chan->state) {
630 case BT_LISTEN:
631 chan->ops->teardown(chan, 0);
632 break;
633
634 case BT_CONNECTED:
635 case BT_CONFIG:
636 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
637 conn->hcon->type == ACL_LINK) {
638 __set_chan_timer(chan, sk->sk_sndtimeo);
639 l2cap_send_disconn_req(chan, reason);
640 } else
641 l2cap_chan_del(chan, reason);
642 break;
643
644 case BT_CONNECT2:
645 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
646 conn->hcon->type == ACL_LINK) {
647 struct l2cap_conn_rsp rsp;
648 __u16 result;
649
650 if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags))
651 result = L2CAP_CR_SEC_BLOCK;
652 else
653 result = L2CAP_CR_BAD_PSM;
654 l2cap_state_change(chan, BT_DISCONN);
655
656 rsp.scid = cpu_to_le16(chan->dcid);
657 rsp.dcid = cpu_to_le16(chan->scid);
658 rsp.result = cpu_to_le16(result);
659 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
660 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
661 sizeof(rsp), &rsp);
662 }
663
664 l2cap_chan_del(chan, reason);
665 break;
666
667 case BT_CONNECT:
668 case BT_DISCONN:
669 l2cap_chan_del(chan, reason);
670 break;
671
672 default:
673 chan->ops->teardown(chan, 0);
674 break;
675 }
676}
677
678static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
679{
680 if (chan->chan_type == L2CAP_CHAN_RAW) {
681 switch (chan->sec_level) {
682 case BT_SECURITY_HIGH:
683 return HCI_AT_DEDICATED_BONDING_MITM;
684 case BT_SECURITY_MEDIUM:
685 return HCI_AT_DEDICATED_BONDING;
686 default:
687 return HCI_AT_NO_BONDING;
688 }
689 } else if (chan->psm == __constant_cpu_to_le16(L2CAP_PSM_SDP)) {
690 if (chan->sec_level == BT_SECURITY_LOW)
691 chan->sec_level = BT_SECURITY_SDP;
692
693 if (chan->sec_level == BT_SECURITY_HIGH)
694 return HCI_AT_NO_BONDING_MITM;
695 else
696 return HCI_AT_NO_BONDING;
697 } else {
698 switch (chan->sec_level) {
699 case BT_SECURITY_HIGH:
700 return HCI_AT_GENERAL_BONDING_MITM;
701 case BT_SECURITY_MEDIUM:
702 return HCI_AT_GENERAL_BONDING;
703 default:
704 return HCI_AT_NO_BONDING;
705 }
706 }
707}
708
709/* Service level security */
710int l2cap_chan_check_security(struct l2cap_chan *chan)
711{
712 struct l2cap_conn *conn = chan->conn;
713 __u8 auth_type;
714
715 auth_type = l2cap_get_auth_type(chan);
716
717 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
718}
719
720static u8 l2cap_get_ident(struct l2cap_conn *conn)
721{
722 u8 id;
723
724 /* Get next available identificator.
725 * 1 - 128 are used by kernel.
726 * 129 - 199 are reserved.
727 * 200 - 254 are used by utilities like l2ping, etc.
728 */
729
730 spin_lock(&conn->lock);
731
732 if (++conn->tx_ident > 128)
733 conn->tx_ident = 1;
734
735 id = conn->tx_ident;
736
737 spin_unlock(&conn->lock);
738
739 return id;
740}
741
742static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
743 void *data)
744{
745 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
746 u8 flags;
747
748 BT_DBG("code 0x%2.2x", code);
749
750 if (!skb)
751 return;
752
753 if (lmp_no_flush_capable(conn->hcon->hdev))
754 flags = ACL_START_NO_FLUSH;
755 else
756 flags = ACL_START;
757
758 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
759 skb->priority = HCI_PRIO_MAX;
760
761 hci_send_acl(conn->hchan, skb, flags);
762}
763
764static bool __chan_is_moving(struct l2cap_chan *chan)
765{
766 return chan->move_state != L2CAP_MOVE_STABLE &&
767 chan->move_state != L2CAP_MOVE_WAIT_PREPARE;
768}
769
770static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
771{
772 struct hci_conn *hcon = chan->conn->hcon;
773 u16 flags;
774
775 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
776 skb->priority);
777
778 if (chan->hs_hcon && !__chan_is_moving(chan)) {
779 if (chan->hs_hchan)
780 hci_send_acl(chan->hs_hchan, skb, ACL_COMPLETE);
781 else
782 kfree_skb(skb);
783
784 return;
785 }
786
787 if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
788 lmp_no_flush_capable(hcon->hdev))
789 flags = ACL_START_NO_FLUSH;
790 else
791 flags = ACL_START;
792
793 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
794 hci_send_acl(chan->conn->hchan, skb, flags);
795}
796
797static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
798{
799 control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
800 control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
801
802 if (enh & L2CAP_CTRL_FRAME_TYPE) {
803 /* S-Frame */
804 control->sframe = 1;
805 control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
806 control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
807
808 control->sar = 0;
809 control->txseq = 0;
810 } else {
811 /* I-Frame */
812 control->sframe = 0;
813 control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
814 control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
815
816 control->poll = 0;
817 control->super = 0;
818 }
819}
820
821static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
822{
823 control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
824 control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
825
826 if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
827 /* S-Frame */
828 control->sframe = 1;
829 control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
830 control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
831
832 control->sar = 0;
833 control->txseq = 0;
834 } else {
835 /* I-Frame */
836 control->sframe = 0;
837 control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
838 control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
839
840 control->poll = 0;
841 control->super = 0;
842 }
843}
844
845static inline void __unpack_control(struct l2cap_chan *chan,
846 struct sk_buff *skb)
847{
848 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
849 __unpack_extended_control(get_unaligned_le32(skb->data),
850 &bt_cb(skb)->control);
851 skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
852 } else {
853 __unpack_enhanced_control(get_unaligned_le16(skb->data),
854 &bt_cb(skb)->control);
855 skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
856 }
857}
858
859static u32 __pack_extended_control(struct l2cap_ctrl *control)
860{
861 u32 packed;
862
863 packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
864 packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
865
866 if (control->sframe) {
867 packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
868 packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
869 packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
870 } else {
871 packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
872 packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
873 }
874
875 return packed;
876}
877
878static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
879{
880 u16 packed;
881
882 packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
883 packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
884
885 if (control->sframe) {
886 packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
887 packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
888 packed |= L2CAP_CTRL_FRAME_TYPE;
889 } else {
890 packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
891 packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
892 }
893
894 return packed;
895}
896
897static inline void __pack_control(struct l2cap_chan *chan,
898 struct l2cap_ctrl *control,
899 struct sk_buff *skb)
900{
901 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
902 put_unaligned_le32(__pack_extended_control(control),
903 skb->data + L2CAP_HDR_SIZE);
904 } else {
905 put_unaligned_le16(__pack_enhanced_control(control),
906 skb->data + L2CAP_HDR_SIZE);
907 }
908}
909
910static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
911{
912 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
913 return L2CAP_EXT_HDR_SIZE;
914 else
915 return L2CAP_ENH_HDR_SIZE;
916}
917
918static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
919 u32 control)
920{
921 struct sk_buff *skb;
922 struct l2cap_hdr *lh;
923 int hlen = __ertm_hdr_size(chan);
924
925 if (chan->fcs == L2CAP_FCS_CRC16)
926 hlen += L2CAP_FCS_SIZE;
927
928 skb = bt_skb_alloc(hlen, GFP_KERNEL);
929
930 if (!skb)
931 return ERR_PTR(-ENOMEM);
932
933 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
934 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
935 lh->cid = cpu_to_le16(chan->dcid);
936
937 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
938 put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
939 else
940 put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
941
942 if (chan->fcs == L2CAP_FCS_CRC16) {
943 u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
944 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
945 }
946
947 skb->priority = HCI_PRIO_MAX;
948 return skb;
949}
950
951static void l2cap_send_sframe(struct l2cap_chan *chan,
952 struct l2cap_ctrl *control)
953{
954 struct sk_buff *skb;
955 u32 control_field;
956
957 BT_DBG("chan %p, control %p", chan, control);
958
959 if (!control->sframe)
960 return;
961
962 if (__chan_is_moving(chan))
963 return;
964
965 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
966 !control->poll)
967 control->final = 1;
968
969 if (control->super == L2CAP_SUPER_RR)
970 clear_bit(CONN_RNR_SENT, &chan->conn_state);
971 else if (control->super == L2CAP_SUPER_RNR)
972 set_bit(CONN_RNR_SENT, &chan->conn_state);
973
974 if (control->super != L2CAP_SUPER_SREJ) {
975 chan->last_acked_seq = control->reqseq;
976 __clear_ack_timer(chan);
977 }
978
979 BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
980 control->final, control->poll, control->super);
981
982 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
983 control_field = __pack_extended_control(control);
984 else
985 control_field = __pack_enhanced_control(control);
986
987 skb = l2cap_create_sframe_pdu(chan, control_field);
988 if (!IS_ERR(skb))
989 l2cap_do_send(chan, skb);
990}
991
992static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
993{
994 struct l2cap_ctrl control;
995
996 BT_DBG("chan %p, poll %d", chan, poll);
997
998 memset(&control, 0, sizeof(control));
999 control.sframe = 1;
1000 control.poll = poll;
1001
1002 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
1003 control.super = L2CAP_SUPER_RNR;
1004 else
1005 control.super = L2CAP_SUPER_RR;
1006
1007 control.reqseq = chan->buffer_seq;
1008 l2cap_send_sframe(chan, &control);
1009}
1010
1011static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
1012{
1013 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
1014}
1015
1016static bool __amp_capable(struct l2cap_chan *chan)
1017{
1018 struct l2cap_conn *conn = chan->conn;
1019 struct hci_dev *hdev;
1020 bool amp_available = false;
1021
1022 if (!conn->hs_enabled)
1023 return false;
1024
1025 if (!(conn->fixed_chan_mask & L2CAP_FC_A2MP))
1026 return false;
1027
1028 read_lock(&hci_dev_list_lock);
1029 list_for_each_entry(hdev, &hci_dev_list, list) {
1030 if (hdev->amp_type != AMP_TYPE_BREDR &&
1031 test_bit(HCI_UP, &hdev->flags)) {
1032 amp_available = true;
1033 break;
1034 }
1035 }
1036 read_unlock(&hci_dev_list_lock);
1037
1038 if (chan->chan_policy == BT_CHANNEL_POLICY_AMP_PREFERRED)
1039 return amp_available;
1040
1041 return false;
1042}
1043
1044static bool l2cap_check_efs(struct l2cap_chan *chan)
1045{
1046 /* Check EFS parameters */
1047 return true;
1048}
1049
1050void l2cap_send_conn_req(struct l2cap_chan *chan)
1051{
1052 struct l2cap_conn *conn = chan->conn;
1053 struct l2cap_conn_req req;
1054
1055 req.scid = cpu_to_le16(chan->scid);
1056 req.psm = chan->psm;
1057
1058 chan->ident = l2cap_get_ident(conn);
1059
1060 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
1061
1062 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
1063}
1064
1065static void l2cap_send_create_chan_req(struct l2cap_chan *chan, u8 amp_id)
1066{
1067 struct l2cap_create_chan_req req;
1068 req.scid = cpu_to_le16(chan->scid);
1069 req.psm = chan->psm;
1070 req.amp_id = amp_id;
1071
1072 chan->ident = l2cap_get_ident(chan->conn);
1073
1074 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_REQ,
1075 sizeof(req), &req);
1076}
1077
1078static void l2cap_move_setup(struct l2cap_chan *chan)
1079{
1080 struct sk_buff *skb;
1081
1082 BT_DBG("chan %p", chan);
1083
1084 if (chan->mode != L2CAP_MODE_ERTM)
1085 return;
1086
1087 __clear_retrans_timer(chan);
1088 __clear_monitor_timer(chan);
1089 __clear_ack_timer(chan);
1090
1091 chan->retry_count = 0;
1092 skb_queue_walk(&chan->tx_q, skb) {
1093 if (bt_cb(skb)->control.retries)
1094 bt_cb(skb)->control.retries = 1;
1095 else
1096 break;
1097 }
1098
1099 chan->expected_tx_seq = chan->buffer_seq;
1100
1101 clear_bit(CONN_REJ_ACT, &chan->conn_state);
1102 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
1103 l2cap_seq_list_clear(&chan->retrans_list);
1104 l2cap_seq_list_clear(&chan->srej_list);
1105 skb_queue_purge(&chan->srej_q);
1106
1107 chan->tx_state = L2CAP_TX_STATE_XMIT;
1108 chan->rx_state = L2CAP_RX_STATE_MOVE;
1109
1110 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
1111}
1112
1113static void l2cap_move_done(struct l2cap_chan *chan)
1114{
1115 u8 move_role = chan->move_role;
1116 BT_DBG("chan %p", chan);
1117
1118 chan->move_state = L2CAP_MOVE_STABLE;
1119 chan->move_role = L2CAP_MOVE_ROLE_NONE;
1120
1121 if (chan->mode != L2CAP_MODE_ERTM)
1122 return;
1123
1124 switch (move_role) {
1125 case L2CAP_MOVE_ROLE_INITIATOR:
1126 l2cap_tx(chan, NULL, NULL, L2CAP_EV_EXPLICIT_POLL);
1127 chan->rx_state = L2CAP_RX_STATE_WAIT_F;
1128 break;
1129 case L2CAP_MOVE_ROLE_RESPONDER:
1130 chan->rx_state = L2CAP_RX_STATE_WAIT_P;
1131 break;
1132 }
1133}
1134
1135static void l2cap_chan_ready(struct l2cap_chan *chan)
1136{
1137 /* This clears all conf flags, including CONF_NOT_COMPLETE */
1138 chan->conf_state = 0;
1139 __clear_chan_timer(chan);
1140
1141 chan->state = BT_CONNECTED;
1142
1143 chan->ops->ready(chan);
1144}
1145
1146static void l2cap_start_connection(struct l2cap_chan *chan)
1147{
1148 if (__amp_capable(chan)) {
1149 BT_DBG("chan %p AMP capable: discover AMPs", chan);
1150 a2mp_discover_amp(chan);
1151 } else {
1152 l2cap_send_conn_req(chan);
1153 }
1154}
1155
1156static void l2cap_do_start(struct l2cap_chan *chan)
1157{
1158 struct l2cap_conn *conn = chan->conn;
1159
1160 if (conn->hcon->type == LE_LINK) {
1161 l2cap_chan_ready(chan);
1162 return;
1163 }
1164
1165 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
1166 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
1167 return;
1168
1169 if (l2cap_chan_check_security(chan) &&
1170 __l2cap_no_conn_pending(chan)) {
1171 l2cap_start_connection(chan);
1172 }
1173 } else {
1174 struct l2cap_info_req req;
1175 req.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
1176
1177 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1178 conn->info_ident = l2cap_get_ident(conn);
1179
1180 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1181
1182 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
1183 sizeof(req), &req);
1184 }
1185}
1186
1187static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1188{
1189 u32 local_feat_mask = l2cap_feat_mask;
1190 if (!disable_ertm)
1191 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1192
1193 switch (mode) {
1194 case L2CAP_MODE_ERTM:
1195 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1196 case L2CAP_MODE_STREAMING:
1197 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1198 default:
1199 return 0x00;
1200 }
1201}
1202
1203static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err)
1204{
1205 struct sock *sk = chan->sk;
1206 struct l2cap_conn *conn = chan->conn;
1207 struct l2cap_disconn_req req;
1208
1209 if (!conn)
1210 return;
1211
1212 if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) {
1213 __clear_retrans_timer(chan);
1214 __clear_monitor_timer(chan);
1215 __clear_ack_timer(chan);
1216 }
1217
1218 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
1219 l2cap_state_change(chan, BT_DISCONN);
1220 return;
1221 }
1222
1223 req.dcid = cpu_to_le16(chan->dcid);
1224 req.scid = cpu_to_le16(chan->scid);
1225 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
1226 sizeof(req), &req);
1227
1228 lock_sock(sk);
1229 __l2cap_state_change(chan, BT_DISCONN);
1230 __l2cap_chan_set_err(chan, err);
1231 release_sock(sk);
1232}
1233
1234/* ---- L2CAP connections ---- */
1235static void l2cap_conn_start(struct l2cap_conn *conn)
1236{
1237 struct l2cap_chan *chan, *tmp;
1238
1239 BT_DBG("conn %p", conn);
1240
1241 mutex_lock(&conn->chan_lock);
1242
1243 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1244 struct sock *sk = chan->sk;
1245
1246 l2cap_chan_lock(chan);
1247
1248 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1249 l2cap_chan_unlock(chan);
1250 continue;
1251 }
1252
1253 if (chan->state == BT_CONNECT) {
1254 if (!l2cap_chan_check_security(chan) ||
1255 !__l2cap_no_conn_pending(chan)) {
1256 l2cap_chan_unlock(chan);
1257 continue;
1258 }
1259
1260 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1261 && test_bit(CONF_STATE2_DEVICE,
1262 &chan->conf_state)) {
1263 l2cap_chan_close(chan, ECONNRESET);
1264 l2cap_chan_unlock(chan);
1265 continue;
1266 }
1267
1268 l2cap_start_connection(chan);
1269
1270 } else if (chan->state == BT_CONNECT2) {
1271 struct l2cap_conn_rsp rsp;
1272 char buf[128];
1273 rsp.scid = cpu_to_le16(chan->dcid);
1274 rsp.dcid = cpu_to_le16(chan->scid);
1275
1276 if (l2cap_chan_check_security(chan)) {
1277 lock_sock(sk);
1278 if (test_bit(BT_SK_DEFER_SETUP,
1279 &bt_sk(sk)->flags)) {
1280 rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
1281 rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1282 chan->ops->defer(chan);
1283
1284 } else {
1285 __l2cap_state_change(chan, BT_CONFIG);
1286 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
1287 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
1288 }
1289 release_sock(sk);
1290 } else {
1291 rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
1292 rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1293 }
1294
1295 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1296 sizeof(rsp), &rsp);
1297
1298 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1299 rsp.result != L2CAP_CR_SUCCESS) {
1300 l2cap_chan_unlock(chan);
1301 continue;
1302 }
1303
1304 set_bit(CONF_REQ_SENT, &chan->conf_state);
1305 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1306 l2cap_build_conf_req(chan, buf), buf);
1307 chan->num_conf_req++;
1308 }
1309
1310 l2cap_chan_unlock(chan);
1311 }
1312
1313 mutex_unlock(&conn->chan_lock);
1314}
1315
1316/* Find socket with cid and source/destination bdaddr.
1317 * Returns closest match, locked.
1318 */
1319static struct l2cap_chan *l2cap_global_chan_by_scid(int state, u16 cid,
1320 bdaddr_t *src,
1321 bdaddr_t *dst)
1322{
1323 struct l2cap_chan *c, *c1 = NULL;
1324
1325 read_lock(&chan_list_lock);
1326
1327 list_for_each_entry(c, &chan_list, global_l) {
1328 struct sock *sk = c->sk;
1329
1330 if (state && c->state != state)
1331 continue;
1332
1333 if (c->scid == cid) {
1334 int src_match, dst_match;
1335 int src_any, dst_any;
1336
1337 /* Exact match. */
1338 src_match = !bacmp(&bt_sk(sk)->src, src);
1339 dst_match = !bacmp(&bt_sk(sk)->dst, dst);
1340 if (src_match && dst_match) {
1341 read_unlock(&chan_list_lock);
1342 return c;
1343 }
1344
1345 /* Closest match */
1346 src_any = !bacmp(&bt_sk(sk)->src, BDADDR_ANY);
1347 dst_any = !bacmp(&bt_sk(sk)->dst, BDADDR_ANY);
1348 if ((src_match && dst_any) || (src_any && dst_match) ||
1349 (src_any && dst_any))
1350 c1 = c;
1351 }
1352 }
1353
1354 read_unlock(&chan_list_lock);
1355
1356 return c1;
1357}
1358
1359static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1360{
1361 struct sock *parent;
1362 struct l2cap_chan *chan, *pchan;
1363
1364 BT_DBG("");
1365
1366 /* Check if we have socket listening on cid */
1367 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_ATT,
1368 conn->src, conn->dst);
1369 if (!pchan)
1370 return;
1371
1372 /* Client ATT sockets should override the server one */
1373 if (__l2cap_get_chan_by_dcid(conn, L2CAP_CID_ATT))
1374 return;
1375
1376 parent = pchan->sk;
1377
1378 lock_sock(parent);
1379
1380 chan = pchan->ops->new_connection(pchan);
1381 if (!chan)
1382 goto clean;
1383
1384 chan->dcid = L2CAP_CID_ATT;
1385
1386 bacpy(&bt_sk(chan->sk)->src, conn->src);
1387 bacpy(&bt_sk(chan->sk)->dst, conn->dst);
1388
1389 __l2cap_chan_add(conn, chan);
1390
1391clean:
1392 release_sock(parent);
1393}
1394
1395static void l2cap_conn_ready(struct l2cap_conn *conn)
1396{
1397 struct l2cap_chan *chan;
1398 struct hci_conn *hcon = conn->hcon;
1399
1400 BT_DBG("conn %p", conn);
1401
1402 /* For outgoing pairing which doesn't necessarily have an
1403 * associated socket (e.g. mgmt_pair_device).
1404 */
1405 if (hcon->out && hcon->type == LE_LINK)
1406 smp_conn_security(hcon, hcon->pending_sec_level);
1407
1408 mutex_lock(&conn->chan_lock);
1409
1410 if (hcon->type == LE_LINK)
1411 l2cap_le_conn_ready(conn);
1412
1413 list_for_each_entry(chan, &conn->chan_l, list) {
1414
1415 l2cap_chan_lock(chan);
1416
1417 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
1418 l2cap_chan_unlock(chan);
1419 continue;
1420 }
1421
1422 if (hcon->type == LE_LINK) {
1423 if (smp_conn_security(hcon, chan->sec_level))
1424 l2cap_chan_ready(chan);
1425
1426 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1427 struct sock *sk = chan->sk;
1428 __clear_chan_timer(chan);
1429 lock_sock(sk);
1430 __l2cap_state_change(chan, BT_CONNECTED);
1431 sk->sk_state_change(sk);
1432 release_sock(sk);
1433
1434 } else if (chan->state == BT_CONNECT) {
1435 l2cap_do_start(chan);
1436 }
1437
1438 l2cap_chan_unlock(chan);
1439 }
1440
1441 mutex_unlock(&conn->chan_lock);
1442}
1443
1444/* Notify sockets that we cannot guaranty reliability anymore */
1445static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1446{
1447 struct l2cap_chan *chan;
1448
1449 BT_DBG("conn %p", conn);
1450
1451 mutex_lock(&conn->chan_lock);
1452
1453 list_for_each_entry(chan, &conn->chan_l, list) {
1454 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1455 l2cap_chan_set_err(chan, err);
1456 }
1457
1458 mutex_unlock(&conn->chan_lock);
1459}
1460
1461static void l2cap_info_timeout(struct work_struct *work)
1462{
1463 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1464 info_timer.work);
1465
1466 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1467 conn->info_ident = 0;
1468
1469 l2cap_conn_start(conn);
1470}
1471
1472/*
1473 * l2cap_user
1474 * External modules can register l2cap_user objects on l2cap_conn. The ->probe
1475 * callback is called during registration. The ->remove callback is called
1476 * during unregistration.
1477 * An l2cap_user object can either be explicitly unregistered or when the
1478 * underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon,
1479 * l2cap->hchan, .. are valid as long as the remove callback hasn't been called.
1480 * External modules must own a reference to the l2cap_conn object if they intend
1481 * to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at
1482 * any time if they don't.
1483 */
1484
1485int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user)
1486{
1487 struct hci_dev *hdev = conn->hcon->hdev;
1488 int ret;
1489
1490 /* We need to check whether l2cap_conn is registered. If it is not, we
1491 * must not register the l2cap_user. l2cap_conn_del() is unregisters
1492 * l2cap_conn objects, but doesn't provide its own locking. Instead, it
1493 * relies on the parent hci_conn object to be locked. This itself relies
1494 * on the hci_dev object to be locked. So we must lock the hci device
1495 * here, too. */
1496
1497 hci_dev_lock(hdev);
1498
1499 if (user->list.next || user->list.prev) {
1500 ret = -EINVAL;
1501 goto out_unlock;
1502 }
1503
1504 /* conn->hchan is NULL after l2cap_conn_del() was called */
1505 if (!conn->hchan) {
1506 ret = -ENODEV;
1507 goto out_unlock;
1508 }
1509
1510 ret = user->probe(conn, user);
1511 if (ret)
1512 goto out_unlock;
1513
1514 list_add(&user->list, &conn->users);
1515 ret = 0;
1516
1517out_unlock:
1518 hci_dev_unlock(hdev);
1519 return ret;
1520}
1521EXPORT_SYMBOL(l2cap_register_user);
1522
1523void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user)
1524{
1525 struct hci_dev *hdev = conn->hcon->hdev;
1526
1527 hci_dev_lock(hdev);
1528
1529 if (!user->list.next || !user->list.prev)
1530 goto out_unlock;
1531
1532 list_del(&user->list);
1533 user->list.next = NULL;
1534 user->list.prev = NULL;
1535 user->remove(conn, user);
1536
1537out_unlock:
1538 hci_dev_unlock(hdev);
1539}
1540EXPORT_SYMBOL(l2cap_unregister_user);
1541
1542static void l2cap_unregister_all_users(struct l2cap_conn *conn)
1543{
1544 struct l2cap_user *user;
1545
1546 while (!list_empty(&conn->users)) {
1547 user = list_first_entry(&conn->users, struct l2cap_user, list);
1548 list_del(&user->list);
1549 user->list.next = NULL;
1550 user->list.prev = NULL;
1551 user->remove(conn, user);
1552 }
1553}
1554
1555static void l2cap_conn_del(struct hci_conn *hcon, int err)
1556{
1557 struct l2cap_conn *conn = hcon->l2cap_data;
1558 struct l2cap_chan *chan, *l;
1559
1560 if (!conn)
1561 return;
1562
1563 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1564
1565 kfree_skb(conn->rx_skb);
1566
1567 l2cap_unregister_all_users(conn);
1568
1569 mutex_lock(&conn->chan_lock);
1570
1571 /* Kill channels */
1572 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1573 l2cap_chan_hold(chan);
1574 l2cap_chan_lock(chan);
1575
1576 l2cap_chan_del(chan, err);
1577
1578 l2cap_chan_unlock(chan);
1579
1580 chan->ops->close(chan);
1581 l2cap_chan_put(chan);
1582 }
1583
1584 mutex_unlock(&conn->chan_lock);
1585
1586 hci_chan_del(conn->hchan);
1587
1588 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1589 cancel_delayed_work_sync(&conn->info_timer);
1590
1591 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) {
1592 cancel_delayed_work_sync(&conn->security_timer);
1593 smp_chan_destroy(conn);
1594 }
1595
1596 hcon->l2cap_data = NULL;
1597 conn->hchan = NULL;
1598 l2cap_conn_put(conn);
1599}
1600
1601static void security_timeout(struct work_struct *work)
1602{
1603 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1604 security_timer.work);
1605
1606 BT_DBG("conn %p", conn);
1607
1608 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags)) {
1609 smp_chan_destroy(conn);
1610 l2cap_conn_del(conn->hcon, ETIMEDOUT);
1611 }
1612}
1613
1614static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
1615{
1616 struct l2cap_conn *conn = hcon->l2cap_data;
1617 struct hci_chan *hchan;
1618
1619 if (conn)
1620 return conn;
1621
1622 hchan = hci_chan_create(hcon);
1623 if (!hchan)
1624 return NULL;
1625
1626 conn = kzalloc(sizeof(struct l2cap_conn), GFP_KERNEL);
1627 if (!conn) {
1628 hci_chan_del(hchan);
1629 return NULL;
1630 }
1631
1632 kref_init(&conn->ref);
1633 hcon->l2cap_data = conn;
1634 conn->hcon = hcon;
1635 hci_conn_get(conn->hcon);
1636 conn->hchan = hchan;
1637
1638 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
1639
1640 switch (hcon->type) {
1641 case LE_LINK:
1642 if (hcon->hdev->le_mtu) {
1643 conn->mtu = hcon->hdev->le_mtu;
1644 break;
1645 }
1646 /* fall through */
1647 default:
1648 conn->mtu = hcon->hdev->acl_mtu;
1649 break;
1650 }
1651
1652 conn->src = &hcon->hdev->bdaddr;
1653 conn->dst = &hcon->dst;
1654
1655 conn->feat_mask = 0;
1656
1657 if (hcon->type == ACL_LINK)
1658 conn->hs_enabled = test_bit(HCI_HS_ENABLED,
1659 &hcon->hdev->dev_flags);
1660
1661 spin_lock_init(&conn->lock);
1662 mutex_init(&conn->chan_lock);
1663
1664 INIT_LIST_HEAD(&conn->chan_l);
1665 INIT_LIST_HEAD(&conn->users);
1666
1667 if (hcon->type == LE_LINK)
1668 INIT_DELAYED_WORK(&conn->security_timer, security_timeout);
1669 else
1670 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
1671
1672 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
1673
1674 return conn;
1675}
1676
1677static void l2cap_conn_free(struct kref *ref)
1678{
1679 struct l2cap_conn *conn = container_of(ref, struct l2cap_conn, ref);
1680
1681 hci_conn_put(conn->hcon);
1682 kfree(conn);
1683}
1684
1685void l2cap_conn_get(struct l2cap_conn *conn)
1686{
1687 kref_get(&conn->ref);
1688}
1689EXPORT_SYMBOL(l2cap_conn_get);
1690
1691void l2cap_conn_put(struct l2cap_conn *conn)
1692{
1693 kref_put(&conn->ref, l2cap_conn_free);
1694}
1695EXPORT_SYMBOL(l2cap_conn_put);
1696
1697/* ---- Socket interface ---- */
1698
1699/* Find socket with psm and source / destination bdaddr.
1700 * Returns closest match.
1701 */
1702static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1703 bdaddr_t *src,
1704 bdaddr_t *dst)
1705{
1706 struct l2cap_chan *c, *c1 = NULL;
1707
1708 read_lock(&chan_list_lock);
1709
1710 list_for_each_entry(c, &chan_list, global_l) {
1711 struct sock *sk = c->sk;
1712
1713 if (state && c->state != state)
1714 continue;
1715
1716 if (c->psm == psm) {
1717 int src_match, dst_match;
1718 int src_any, dst_any;
1719
1720 /* Exact match. */
1721 src_match = !bacmp(&bt_sk(sk)->src, src);
1722 dst_match = !bacmp(&bt_sk(sk)->dst, dst);
1723 if (src_match && dst_match) {
1724 read_unlock(&chan_list_lock);
1725 return c;
1726 }
1727
1728 /* Closest match */
1729 src_any = !bacmp(&bt_sk(sk)->src, BDADDR_ANY);
1730 dst_any = !bacmp(&bt_sk(sk)->dst, BDADDR_ANY);
1731 if ((src_match && dst_any) || (src_any && dst_match) ||
1732 (src_any && dst_any))
1733 c1 = c;
1734 }
1735 }
1736
1737 read_unlock(&chan_list_lock);
1738
1739 return c1;
1740}
1741
1742int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
1743 bdaddr_t *dst, u8 dst_type)
1744{
1745 struct sock *sk = chan->sk;
1746 bdaddr_t *src = &bt_sk(sk)->src;
1747 struct l2cap_conn *conn;
1748 struct hci_conn *hcon;
1749 struct hci_dev *hdev;
1750 __u8 auth_type;
1751 int err;
1752
1753 BT_DBG("%pMR -> %pMR (type %u) psm 0x%2.2x", src, dst,
1754 dst_type, __le16_to_cpu(psm));
1755
1756 hdev = hci_get_route(dst, src);
1757 if (!hdev)
1758 return -EHOSTUNREACH;
1759
1760 hci_dev_lock(hdev);
1761
1762 l2cap_chan_lock(chan);
1763
1764 /* PSM must be odd and lsb of upper byte must be 0 */
1765 if ((__le16_to_cpu(psm) & 0x0101) != 0x0001 && !cid &&
1766 chan->chan_type != L2CAP_CHAN_RAW) {
1767 err = -EINVAL;
1768 goto done;
1769 }
1770
1771 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !(psm || cid)) {
1772 err = -EINVAL;
1773 goto done;
1774 }
1775
1776 switch (chan->mode) {
1777 case L2CAP_MODE_BASIC:
1778 break;
1779 case L2CAP_MODE_ERTM:
1780 case L2CAP_MODE_STREAMING:
1781 if (!disable_ertm)
1782 break;
1783 /* fall through */
1784 default:
1785 err = -ENOTSUPP;
1786 goto done;
1787 }
1788
1789 switch (chan->state) {
1790 case BT_CONNECT:
1791 case BT_CONNECT2:
1792 case BT_CONFIG:
1793 /* Already connecting */
1794 err = 0;
1795 goto done;
1796
1797 case BT_CONNECTED:
1798 /* Already connected */
1799 err = -EISCONN;
1800 goto done;
1801
1802 case BT_OPEN:
1803 case BT_BOUND:
1804 /* Can connect */
1805 break;
1806
1807 default:
1808 err = -EBADFD;
1809 goto done;
1810 }
1811
1812 /* Set destination address and psm */
1813 lock_sock(sk);
1814 bacpy(&bt_sk(sk)->dst, dst);
1815 release_sock(sk);
1816
1817 chan->psm = psm;
1818 chan->dcid = cid;
1819
1820 auth_type = l2cap_get_auth_type(chan);
1821
1822 if (bdaddr_type_is_le(dst_type))
1823 hcon = hci_connect(hdev, LE_LINK, dst, dst_type,
1824 chan->sec_level, auth_type);
1825 else
1826 hcon = hci_connect(hdev, ACL_LINK, dst, dst_type,
1827 chan->sec_level, auth_type);
1828
1829 if (IS_ERR(hcon)) {
1830 err = PTR_ERR(hcon);
1831 goto done;
1832 }
1833
1834 conn = l2cap_conn_add(hcon);
1835 if (!conn) {
1836 hci_conn_drop(hcon);
1837 err = -ENOMEM;
1838 goto done;
1839 }
1840
1841 if (cid && __l2cap_get_chan_by_dcid(conn, cid)) {
1842 hci_conn_drop(hcon);
1843 err = -EBUSY;
1844 goto done;
1845 }
1846
1847 /* Update source addr of the socket */
1848 bacpy(src, conn->src);
1849
1850 l2cap_chan_unlock(chan);
1851 l2cap_chan_add(conn, chan);
1852 l2cap_chan_lock(chan);
1853
1854 /* l2cap_chan_add takes its own ref so we can drop this one */
1855 hci_conn_drop(hcon);
1856
1857 l2cap_state_change(chan, BT_CONNECT);
1858 __set_chan_timer(chan, sk->sk_sndtimeo);
1859
1860 if (hcon->state == BT_CONNECTED) {
1861 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1862 __clear_chan_timer(chan);
1863 if (l2cap_chan_check_security(chan))
1864 l2cap_state_change(chan, BT_CONNECTED);
1865 } else
1866 l2cap_do_start(chan);
1867 }
1868
1869 err = 0;
1870
1871done:
1872 l2cap_chan_unlock(chan);
1873 hci_dev_unlock(hdev);
1874 hci_dev_put(hdev);
1875 return err;
1876}
1877
1878int __l2cap_wait_ack(struct sock *sk)
1879{
1880 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
1881 DECLARE_WAITQUEUE(wait, current);
1882 int err = 0;
1883 int timeo = HZ/5;
1884
1885 add_wait_queue(sk_sleep(sk), &wait);
1886 set_current_state(TASK_INTERRUPTIBLE);
1887 while (chan->unacked_frames > 0 && chan->conn) {
1888 if (!timeo)
1889 timeo = HZ/5;
1890
1891 if (signal_pending(current)) {
1892 err = sock_intr_errno(timeo);
1893 break;
1894 }
1895
1896 release_sock(sk);
1897 timeo = schedule_timeout(timeo);
1898 lock_sock(sk);
1899 set_current_state(TASK_INTERRUPTIBLE);
1900
1901 err = sock_error(sk);
1902 if (err)
1903 break;
1904 }
1905 set_current_state(TASK_RUNNING);
1906 remove_wait_queue(sk_sleep(sk), &wait);
1907 return err;
1908}
1909
1910static void l2cap_monitor_timeout(struct work_struct *work)
1911{
1912 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1913 monitor_timer.work);
1914
1915 BT_DBG("chan %p", chan);
1916
1917 l2cap_chan_lock(chan);
1918
1919 if (!chan->conn) {
1920 l2cap_chan_unlock(chan);
1921 l2cap_chan_put(chan);
1922 return;
1923 }
1924
1925 l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
1926
1927 l2cap_chan_unlock(chan);
1928 l2cap_chan_put(chan);
1929}
1930
1931static void l2cap_retrans_timeout(struct work_struct *work)
1932{
1933 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1934 retrans_timer.work);
1935
1936 BT_DBG("chan %p", chan);
1937
1938 l2cap_chan_lock(chan);
1939
1940 if (!chan->conn) {
1941 l2cap_chan_unlock(chan);
1942 l2cap_chan_put(chan);
1943 return;
1944 }
1945
1946 l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
1947 l2cap_chan_unlock(chan);
1948 l2cap_chan_put(chan);
1949}
1950
1951static void l2cap_streaming_send(struct l2cap_chan *chan,
1952 struct sk_buff_head *skbs)
1953{
1954 struct sk_buff *skb;
1955 struct l2cap_ctrl *control;
1956
1957 BT_DBG("chan %p, skbs %p", chan, skbs);
1958
1959 if (__chan_is_moving(chan))
1960 return;
1961
1962 skb_queue_splice_tail_init(skbs, &chan->tx_q);
1963
1964 while (!skb_queue_empty(&chan->tx_q)) {
1965
1966 skb = skb_dequeue(&chan->tx_q);
1967
1968 bt_cb(skb)->control.retries = 1;
1969 control = &bt_cb(skb)->control;
1970
1971 control->reqseq = 0;
1972 control->txseq = chan->next_tx_seq;
1973
1974 __pack_control(chan, control, skb);
1975
1976 if (chan->fcs == L2CAP_FCS_CRC16) {
1977 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1978 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1979 }
1980
1981 l2cap_do_send(chan, skb);
1982
1983 BT_DBG("Sent txseq %u", control->txseq);
1984
1985 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1986 chan->frames_sent++;
1987 }
1988}
1989
1990static int l2cap_ertm_send(struct l2cap_chan *chan)
1991{
1992 struct sk_buff *skb, *tx_skb;
1993 struct l2cap_ctrl *control;
1994 int sent = 0;
1995
1996 BT_DBG("chan %p", chan);
1997
1998 if (chan->state != BT_CONNECTED)
1999 return -ENOTCONN;
2000
2001 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2002 return 0;
2003
2004 if (__chan_is_moving(chan))
2005 return 0;
2006
2007 while (chan->tx_send_head &&
2008 chan->unacked_frames < chan->remote_tx_win &&
2009 chan->tx_state == L2CAP_TX_STATE_XMIT) {
2010
2011 skb = chan->tx_send_head;
2012
2013 bt_cb(skb)->control.retries = 1;
2014 control = &bt_cb(skb)->control;
2015
2016 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
2017 control->final = 1;
2018
2019 control->reqseq = chan->buffer_seq;
2020 chan->last_acked_seq = chan->buffer_seq;
2021 control->txseq = chan->next_tx_seq;
2022
2023 __pack_control(chan, control, skb);
2024
2025 if (chan->fcs == L2CAP_FCS_CRC16) {
2026 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
2027 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
2028 }
2029
2030 /* Clone after data has been modified. Data is assumed to be
2031 read-only (for locking purposes) on cloned sk_buffs.
2032 */
2033 tx_skb = skb_clone(skb, GFP_KERNEL);
2034
2035 if (!tx_skb)
2036 break;
2037
2038 __set_retrans_timer(chan);
2039
2040 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
2041 chan->unacked_frames++;
2042 chan->frames_sent++;
2043 sent++;
2044
2045 if (skb_queue_is_last(&chan->tx_q, skb))
2046 chan->tx_send_head = NULL;
2047 else
2048 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
2049
2050 l2cap_do_send(chan, tx_skb);
2051 BT_DBG("Sent txseq %u", control->txseq);
2052 }
2053
2054 BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
2055 chan->unacked_frames, skb_queue_len(&chan->tx_q));
2056
2057 return sent;
2058}
2059
2060static void l2cap_ertm_resend(struct l2cap_chan *chan)
2061{
2062 struct l2cap_ctrl control;
2063 struct sk_buff *skb;
2064 struct sk_buff *tx_skb;
2065 u16 seq;
2066
2067 BT_DBG("chan %p", chan);
2068
2069 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2070 return;
2071
2072 if (__chan_is_moving(chan))
2073 return;
2074
2075 while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
2076 seq = l2cap_seq_list_pop(&chan->retrans_list);
2077
2078 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
2079 if (!skb) {
2080 BT_DBG("Error: Can't retransmit seq %d, frame missing",
2081 seq);
2082 continue;
2083 }
2084
2085 bt_cb(skb)->control.retries++;
2086 control = bt_cb(skb)->control;
2087
2088 if (chan->max_tx != 0 &&
2089 bt_cb(skb)->control.retries > chan->max_tx) {
2090 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
2091 l2cap_send_disconn_req(chan, ECONNRESET);
2092 l2cap_seq_list_clear(&chan->retrans_list);
2093 break;
2094 }
2095
2096 control.reqseq = chan->buffer_seq;
2097 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
2098 control.final = 1;
2099 else
2100 control.final = 0;
2101
2102 if (skb_cloned(skb)) {
2103 /* Cloned sk_buffs are read-only, so we need a
2104 * writeable copy
2105 */
2106 tx_skb = skb_copy(skb, GFP_KERNEL);
2107 } else {
2108 tx_skb = skb_clone(skb, GFP_KERNEL);
2109 }
2110
2111 if (!tx_skb) {
2112 l2cap_seq_list_clear(&chan->retrans_list);
2113 break;
2114 }
2115
2116 /* Update skb contents */
2117 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
2118 put_unaligned_le32(__pack_extended_control(&control),
2119 tx_skb->data + L2CAP_HDR_SIZE);
2120 } else {
2121 put_unaligned_le16(__pack_enhanced_control(&control),
2122 tx_skb->data + L2CAP_HDR_SIZE);
2123 }
2124
2125 if (chan->fcs == L2CAP_FCS_CRC16) {
2126 u16 fcs = crc16(0, (u8 *) tx_skb->data, tx_skb->len);
2127 put_unaligned_le16(fcs, skb_put(tx_skb,
2128 L2CAP_FCS_SIZE));
2129 }
2130
2131 l2cap_do_send(chan, tx_skb);
2132
2133 BT_DBG("Resent txseq %d", control.txseq);
2134
2135 chan->last_acked_seq = chan->buffer_seq;
2136 }
2137}
2138
2139static void l2cap_retransmit(struct l2cap_chan *chan,
2140 struct l2cap_ctrl *control)
2141{
2142 BT_DBG("chan %p, control %p", chan, control);
2143
2144 l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
2145 l2cap_ertm_resend(chan);
2146}
2147
2148static void l2cap_retransmit_all(struct l2cap_chan *chan,
2149 struct l2cap_ctrl *control)
2150{
2151 struct sk_buff *skb;
2152
2153 BT_DBG("chan %p, control %p", chan, control);
2154
2155 if (control->poll)
2156 set_bit(CONN_SEND_FBIT, &chan->conn_state);
2157
2158 l2cap_seq_list_clear(&chan->retrans_list);
2159
2160 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2161 return;
2162
2163 if (chan->unacked_frames) {
2164 skb_queue_walk(&chan->tx_q, skb) {
2165 if (bt_cb(skb)->control.txseq == control->reqseq ||
2166 skb == chan->tx_send_head)
2167 break;
2168 }
2169
2170 skb_queue_walk_from(&chan->tx_q, skb) {
2171 if (skb == chan->tx_send_head)
2172 break;
2173
2174 l2cap_seq_list_append(&chan->retrans_list,
2175 bt_cb(skb)->control.txseq);
2176 }
2177
2178 l2cap_ertm_resend(chan);
2179 }
2180}
2181
2182static void l2cap_send_ack(struct l2cap_chan *chan)
2183{
2184 struct l2cap_ctrl control;
2185 u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2186 chan->last_acked_seq);
2187 int threshold;
2188
2189 BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2190 chan, chan->last_acked_seq, chan->buffer_seq);
2191
2192 memset(&control, 0, sizeof(control));
2193 control.sframe = 1;
2194
2195 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
2196 chan->rx_state == L2CAP_RX_STATE_RECV) {
2197 __clear_ack_timer(chan);
2198 control.super = L2CAP_SUPER_RNR;
2199 control.reqseq = chan->buffer_seq;
2200 l2cap_send_sframe(chan, &control);
2201 } else {
2202 if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
2203 l2cap_ertm_send(chan);
2204 /* If any i-frames were sent, they included an ack */
2205 if (chan->buffer_seq == chan->last_acked_seq)
2206 frames_to_ack = 0;
2207 }
2208
2209 /* Ack now if the window is 3/4ths full.
2210 * Calculate without mul or div
2211 */
2212 threshold = chan->ack_win;
2213 threshold += threshold << 1;
2214 threshold >>= 2;
2215
2216 BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
2217 threshold);
2218
2219 if (frames_to_ack >= threshold) {
2220 __clear_ack_timer(chan);
2221 control.super = L2CAP_SUPER_RR;
2222 control.reqseq = chan->buffer_seq;
2223 l2cap_send_sframe(chan, &control);
2224 frames_to_ack = 0;
2225 }
2226
2227 if (frames_to_ack)
2228 __set_ack_timer(chan);
2229 }
2230}
2231
2232static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
2233 struct msghdr *msg, int len,
2234 int count, struct sk_buff *skb)
2235{
2236 struct l2cap_conn *conn = chan->conn;
2237 struct sk_buff **frag;
2238 int sent = 0;
2239
2240 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
2241 return -EFAULT;
2242
2243 sent += count;
2244 len -= count;
2245
2246 /* Continuation fragments (no L2CAP header) */
2247 frag = &skb_shinfo(skb)->frag_list;
2248 while (len) {
2249 struct sk_buff *tmp;
2250
2251 count = min_t(unsigned int, conn->mtu, len);
2252
2253 tmp = chan->ops->alloc_skb(chan, count,
2254 msg->msg_flags & MSG_DONTWAIT);
2255 if (IS_ERR(tmp))
2256 return PTR_ERR(tmp);
2257
2258 *frag = tmp;
2259
2260 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
2261 return -EFAULT;
2262
2263 (*frag)->priority = skb->priority;
2264
2265 sent += count;
2266 len -= count;
2267
2268 skb->len += (*frag)->len;
2269 skb->data_len += (*frag)->len;
2270
2271 frag = &(*frag)->next;
2272 }
2273
2274 return sent;
2275}
2276
2277static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
2278 struct msghdr *msg, size_t len,
2279 u32 priority)
2280{
2281 struct l2cap_conn *conn = chan->conn;
2282 struct sk_buff *skb;
2283 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
2284 struct l2cap_hdr *lh;
2285
2286 BT_DBG("chan %p len %zu priority %u", chan, len, priority);
2287
2288 count = min_t(unsigned int, (conn->mtu - hlen), len);
2289
2290 skb = chan->ops->alloc_skb(chan, count + hlen,
2291 msg->msg_flags & MSG_DONTWAIT);
2292 if (IS_ERR(skb))
2293 return skb;
2294
2295 skb->priority = priority;
2296
2297 /* Create L2CAP header */
2298 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2299 lh->cid = cpu_to_le16(chan->dcid);
2300 lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
2301 put_unaligned(chan->psm, skb_put(skb, L2CAP_PSMLEN_SIZE));
2302
2303 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2304 if (unlikely(err < 0)) {
2305 kfree_skb(skb);
2306 return ERR_PTR(err);
2307 }
2308 return skb;
2309}
2310
2311static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2312 struct msghdr *msg, size_t len,
2313 u32 priority)
2314{
2315 struct l2cap_conn *conn = chan->conn;
2316 struct sk_buff *skb;
2317 int err, count;
2318 struct l2cap_hdr *lh;
2319
2320 BT_DBG("chan %p len %zu", chan, len);
2321
2322 count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2323
2324 skb = chan->ops->alloc_skb(chan, count + L2CAP_HDR_SIZE,
2325 msg->msg_flags & MSG_DONTWAIT);
2326 if (IS_ERR(skb))
2327 return skb;
2328
2329 skb->priority = priority;
2330
2331 /* Create L2CAP header */
2332 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2333 lh->cid = cpu_to_le16(chan->dcid);
2334 lh->len = cpu_to_le16(len);
2335
2336 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2337 if (unlikely(err < 0)) {
2338 kfree_skb(skb);
2339 return ERR_PTR(err);
2340 }
2341 return skb;
2342}
2343
2344static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2345 struct msghdr *msg, size_t len,
2346 u16 sdulen)
2347{
2348 struct l2cap_conn *conn = chan->conn;
2349 struct sk_buff *skb;
2350 int err, count, hlen;
2351 struct l2cap_hdr *lh;
2352
2353 BT_DBG("chan %p len %zu", chan, len);
2354
2355 if (!conn)
2356 return ERR_PTR(-ENOTCONN);
2357
2358 hlen = __ertm_hdr_size(chan);
2359
2360 if (sdulen)
2361 hlen += L2CAP_SDULEN_SIZE;
2362
2363 if (chan->fcs == L2CAP_FCS_CRC16)
2364 hlen += L2CAP_FCS_SIZE;
2365
2366 count = min_t(unsigned int, (conn->mtu - hlen), len);
2367
2368 skb = chan->ops->alloc_skb(chan, count + hlen,
2369 msg->msg_flags & MSG_DONTWAIT);
2370 if (IS_ERR(skb))
2371 return skb;
2372
2373 /* Create L2CAP header */
2374 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2375 lh->cid = cpu_to_le16(chan->dcid);
2376 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2377
2378 /* Control header is populated later */
2379 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2380 put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2381 else
2382 put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2383
2384 if (sdulen)
2385 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2386
2387 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2388 if (unlikely(err < 0)) {
2389 kfree_skb(skb);
2390 return ERR_PTR(err);
2391 }
2392
2393 bt_cb(skb)->control.fcs = chan->fcs;
2394 bt_cb(skb)->control.retries = 0;
2395 return skb;
2396}
2397
2398static int l2cap_segment_sdu(struct l2cap_chan *chan,
2399 struct sk_buff_head *seg_queue,
2400 struct msghdr *msg, size_t len)
2401{
2402 struct sk_buff *skb;
2403 u16 sdu_len;
2404 size_t pdu_len;
2405 u8 sar;
2406
2407 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2408
2409 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2410 * so fragmented skbs are not used. The HCI layer's handling
2411 * of fragmented skbs is not compatible with ERTM's queueing.
2412 */
2413
2414 /* PDU size is derived from the HCI MTU */
2415 pdu_len = chan->conn->mtu;
2416
2417 /* Constrain PDU size for BR/EDR connections */
2418 if (!chan->hs_hcon)
2419 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2420
2421 /* Adjust for largest possible L2CAP overhead. */
2422 if (chan->fcs)
2423 pdu_len -= L2CAP_FCS_SIZE;
2424
2425 pdu_len -= __ertm_hdr_size(chan);
2426
2427 /* Remote device may have requested smaller PDUs */
2428 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2429
2430 if (len <= pdu_len) {
2431 sar = L2CAP_SAR_UNSEGMENTED;
2432 sdu_len = 0;
2433 pdu_len = len;
2434 } else {
2435 sar = L2CAP_SAR_START;
2436 sdu_len = len;
2437 pdu_len -= L2CAP_SDULEN_SIZE;
2438 }
2439
2440 while (len > 0) {
2441 skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2442
2443 if (IS_ERR(skb)) {
2444 __skb_queue_purge(seg_queue);
2445 return PTR_ERR(skb);
2446 }
2447
2448 bt_cb(skb)->control.sar = sar;
2449 __skb_queue_tail(seg_queue, skb);
2450
2451 len -= pdu_len;
2452 if (sdu_len) {
2453 sdu_len = 0;
2454 pdu_len += L2CAP_SDULEN_SIZE;
2455 }
2456
2457 if (len <= pdu_len) {
2458 sar = L2CAP_SAR_END;
2459 pdu_len = len;
2460 } else {
2461 sar = L2CAP_SAR_CONTINUE;
2462 }
2463 }
2464
2465 return 0;
2466}
2467
2468int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
2469 u32 priority)
2470{
2471 struct sk_buff *skb;
2472 int err;
2473 struct sk_buff_head seg_queue;
2474
2475 /* Connectionless channel */
2476 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2477 skb = l2cap_create_connless_pdu(chan, msg, len, priority);
2478 if (IS_ERR(skb))
2479 return PTR_ERR(skb);
2480
2481 l2cap_do_send(chan, skb);
2482 return len;
2483 }
2484
2485 switch (chan->mode) {
2486 case L2CAP_MODE_BASIC:
2487 /* Check outgoing MTU */
2488 if (len > chan->omtu)
2489 return -EMSGSIZE;
2490
2491 /* Create a basic PDU */
2492 skb = l2cap_create_basic_pdu(chan, msg, len, priority);
2493 if (IS_ERR(skb))
2494 return PTR_ERR(skb);
2495
2496 l2cap_do_send(chan, skb);
2497 err = len;
2498 break;
2499
2500 case L2CAP_MODE_ERTM:
2501 case L2CAP_MODE_STREAMING:
2502 /* Check outgoing MTU */
2503 if (len > chan->omtu) {
2504 err = -EMSGSIZE;
2505 break;
2506 }
2507
2508 __skb_queue_head_init(&seg_queue);
2509
2510 /* Do segmentation before calling in to the state machine,
2511 * since it's possible to block while waiting for memory
2512 * allocation.
2513 */
2514 err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2515
2516 /* The channel could have been closed while segmenting,
2517 * check that it is still connected.
2518 */
2519 if (chan->state != BT_CONNECTED) {
2520 __skb_queue_purge(&seg_queue);
2521 err = -ENOTCONN;
2522 }
2523
2524 if (err)
2525 break;
2526
2527 if (chan->mode == L2CAP_MODE_ERTM)
2528 l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2529 else
2530 l2cap_streaming_send(chan, &seg_queue);
2531
2532 err = len;
2533
2534 /* If the skbs were not queued for sending, they'll still be in
2535 * seg_queue and need to be purged.
2536 */
2537 __skb_queue_purge(&seg_queue);
2538 break;
2539
2540 default:
2541 BT_DBG("bad state %1.1x", chan->mode);
2542 err = -EBADFD;
2543 }
2544
2545 return err;
2546}
2547
2548static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2549{
2550 struct l2cap_ctrl control;
2551 u16 seq;
2552
2553 BT_DBG("chan %p, txseq %u", chan, txseq);
2554
2555 memset(&control, 0, sizeof(control));
2556 control.sframe = 1;
2557 control.super = L2CAP_SUPER_SREJ;
2558
2559 for (seq = chan->expected_tx_seq; seq != txseq;
2560 seq = __next_seq(chan, seq)) {
2561 if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2562 control.reqseq = seq;
2563 l2cap_send_sframe(chan, &control);
2564 l2cap_seq_list_append(&chan->srej_list, seq);
2565 }
2566 }
2567
2568 chan->expected_tx_seq = __next_seq(chan, txseq);
2569}
2570
2571static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2572{
2573 struct l2cap_ctrl control;
2574
2575 BT_DBG("chan %p", chan);
2576
2577 if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2578 return;
2579
2580 memset(&control, 0, sizeof(control));
2581 control.sframe = 1;
2582 control.super = L2CAP_SUPER_SREJ;
2583 control.reqseq = chan->srej_list.tail;
2584 l2cap_send_sframe(chan, &control);
2585}
2586
2587static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2588{
2589 struct l2cap_ctrl control;
2590 u16 initial_head;
2591 u16 seq;
2592
2593 BT_DBG("chan %p, txseq %u", chan, txseq);
2594
2595 memset(&control, 0, sizeof(control));
2596 control.sframe = 1;
2597 control.super = L2CAP_SUPER_SREJ;
2598
2599 /* Capture initial list head to allow only one pass through the list. */
2600 initial_head = chan->srej_list.head;
2601
2602 do {
2603 seq = l2cap_seq_list_pop(&chan->srej_list);
2604 if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2605 break;
2606
2607 control.reqseq = seq;
2608 l2cap_send_sframe(chan, &control);
2609 l2cap_seq_list_append(&chan->srej_list, seq);
2610 } while (chan->srej_list.head != initial_head);
2611}
2612
2613static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2614{
2615 struct sk_buff *acked_skb;
2616 u16 ackseq;
2617
2618 BT_DBG("chan %p, reqseq %u", chan, reqseq);
2619
2620 if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2621 return;
2622
2623 BT_DBG("expected_ack_seq %u, unacked_frames %u",
2624 chan->expected_ack_seq, chan->unacked_frames);
2625
2626 for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2627 ackseq = __next_seq(chan, ackseq)) {
2628
2629 acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2630 if (acked_skb) {
2631 skb_unlink(acked_skb, &chan->tx_q);
2632 kfree_skb(acked_skb);
2633 chan->unacked_frames--;
2634 }
2635 }
2636
2637 chan->expected_ack_seq = reqseq;
2638
2639 if (chan->unacked_frames == 0)
2640 __clear_retrans_timer(chan);
2641
2642 BT_DBG("unacked_frames %u", chan->unacked_frames);
2643}
2644
2645static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2646{
2647 BT_DBG("chan %p", chan);
2648
2649 chan->expected_tx_seq = chan->buffer_seq;
2650 l2cap_seq_list_clear(&chan->srej_list);
2651 skb_queue_purge(&chan->srej_q);
2652 chan->rx_state = L2CAP_RX_STATE_RECV;
2653}
2654
2655static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2656 struct l2cap_ctrl *control,
2657 struct sk_buff_head *skbs, u8 event)
2658{
2659 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2660 event);
2661
2662 switch (event) {
2663 case L2CAP_EV_DATA_REQUEST:
2664 if (chan->tx_send_head == NULL)
2665 chan->tx_send_head = skb_peek(skbs);
2666
2667 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2668 l2cap_ertm_send(chan);
2669 break;
2670 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2671 BT_DBG("Enter LOCAL_BUSY");
2672 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2673
2674 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2675 /* The SREJ_SENT state must be aborted if we are to
2676 * enter the LOCAL_BUSY state.
2677 */
2678 l2cap_abort_rx_srej_sent(chan);
2679 }
2680
2681 l2cap_send_ack(chan);
2682
2683 break;
2684 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2685 BT_DBG("Exit LOCAL_BUSY");
2686 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2687
2688 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2689 struct l2cap_ctrl local_control;
2690
2691 memset(&local_control, 0, sizeof(local_control));
2692 local_control.sframe = 1;
2693 local_control.super = L2CAP_SUPER_RR;
2694 local_control.poll = 1;
2695 local_control.reqseq = chan->buffer_seq;
2696 l2cap_send_sframe(chan, &local_control);
2697
2698 chan->retry_count = 1;
2699 __set_monitor_timer(chan);
2700 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2701 }
2702 break;
2703 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2704 l2cap_process_reqseq(chan, control->reqseq);
2705 break;
2706 case L2CAP_EV_EXPLICIT_POLL:
2707 l2cap_send_rr_or_rnr(chan, 1);
2708 chan->retry_count = 1;
2709 __set_monitor_timer(chan);
2710 __clear_ack_timer(chan);
2711 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2712 break;
2713 case L2CAP_EV_RETRANS_TO:
2714 l2cap_send_rr_or_rnr(chan, 1);
2715 chan->retry_count = 1;
2716 __set_monitor_timer(chan);
2717 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2718 break;
2719 case L2CAP_EV_RECV_FBIT:
2720 /* Nothing to process */
2721 break;
2722 default:
2723 break;
2724 }
2725}
2726
2727static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2728 struct l2cap_ctrl *control,
2729 struct sk_buff_head *skbs, u8 event)
2730{
2731 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2732 event);
2733
2734 switch (event) {
2735 case L2CAP_EV_DATA_REQUEST:
2736 if (chan->tx_send_head == NULL)
2737 chan->tx_send_head = skb_peek(skbs);
2738 /* Queue data, but don't send. */
2739 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2740 break;
2741 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2742 BT_DBG("Enter LOCAL_BUSY");
2743 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2744
2745 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2746 /* The SREJ_SENT state must be aborted if we are to
2747 * enter the LOCAL_BUSY state.
2748 */
2749 l2cap_abort_rx_srej_sent(chan);
2750 }
2751
2752 l2cap_send_ack(chan);
2753
2754 break;
2755 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2756 BT_DBG("Exit LOCAL_BUSY");
2757 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2758
2759 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2760 struct l2cap_ctrl local_control;
2761 memset(&local_control, 0, sizeof(local_control));
2762 local_control.sframe = 1;
2763 local_control.super = L2CAP_SUPER_RR;
2764 local_control.poll = 1;
2765 local_control.reqseq = chan->buffer_seq;
2766 l2cap_send_sframe(chan, &local_control);
2767
2768 chan->retry_count = 1;
2769 __set_monitor_timer(chan);
2770 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2771 }
2772 break;
2773 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2774 l2cap_process_reqseq(chan, control->reqseq);
2775
2776 /* Fall through */
2777
2778 case L2CAP_EV_RECV_FBIT:
2779 if (control && control->final) {
2780 __clear_monitor_timer(chan);
2781 if (chan->unacked_frames > 0)
2782 __set_retrans_timer(chan);
2783 chan->retry_count = 0;
2784 chan->tx_state = L2CAP_TX_STATE_XMIT;
2785 BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
2786 }
2787 break;
2788 case L2CAP_EV_EXPLICIT_POLL:
2789 /* Ignore */
2790 break;
2791 case L2CAP_EV_MONITOR_TO:
2792 if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
2793 l2cap_send_rr_or_rnr(chan, 1);
2794 __set_monitor_timer(chan);
2795 chan->retry_count++;
2796 } else {
2797 l2cap_send_disconn_req(chan, ECONNABORTED);
2798 }
2799 break;
2800 default:
2801 break;
2802 }
2803}
2804
2805static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
2806 struct sk_buff_head *skbs, u8 event)
2807{
2808 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2809 chan, control, skbs, event, chan->tx_state);
2810
2811 switch (chan->tx_state) {
2812 case L2CAP_TX_STATE_XMIT:
2813 l2cap_tx_state_xmit(chan, control, skbs, event);
2814 break;
2815 case L2CAP_TX_STATE_WAIT_F:
2816 l2cap_tx_state_wait_f(chan, control, skbs, event);
2817 break;
2818 default:
2819 /* Ignore event */
2820 break;
2821 }
2822}
2823
2824static void l2cap_pass_to_tx(struct l2cap_chan *chan,
2825 struct l2cap_ctrl *control)
2826{
2827 BT_DBG("chan %p, control %p", chan, control);
2828 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
2829}
2830
2831static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
2832 struct l2cap_ctrl *control)
2833{
2834 BT_DBG("chan %p, control %p", chan, control);
2835 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
2836}
2837
2838/* Copy frame to all raw sockets on that connection */
2839static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2840{
2841 struct sk_buff *nskb;
2842 struct l2cap_chan *chan;
2843
2844 BT_DBG("conn %p", conn);
2845
2846 mutex_lock(&conn->chan_lock);
2847
2848 list_for_each_entry(chan, &conn->chan_l, list) {
2849 struct sock *sk = chan->sk;
2850 if (chan->chan_type != L2CAP_CHAN_RAW)
2851 continue;
2852
2853 /* Don't send frame to the socket it came from */
2854 if (skb->sk == sk)
2855 continue;
2856 nskb = skb_clone(skb, GFP_KERNEL);
2857 if (!nskb)
2858 continue;
2859
2860 if (chan->ops->recv(chan, nskb))
2861 kfree_skb(nskb);
2862 }
2863
2864 mutex_unlock(&conn->chan_lock);
2865}
2866
2867/* ---- L2CAP signalling commands ---- */
2868static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
2869 u8 ident, u16 dlen, void *data)
2870{
2871 struct sk_buff *skb, **frag;
2872 struct l2cap_cmd_hdr *cmd;
2873 struct l2cap_hdr *lh;
2874 int len, count;
2875
2876 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
2877 conn, code, ident, dlen);
2878
2879 if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE)
2880 return NULL;
2881
2882 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2883 count = min_t(unsigned int, conn->mtu, len);
2884
2885 skb = bt_skb_alloc(count, GFP_KERNEL);
2886 if (!skb)
2887 return NULL;
2888
2889 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2890 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2891
2892 if (conn->hcon->type == LE_LINK)
2893 lh->cid = __constant_cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2894 else
2895 lh->cid = __constant_cpu_to_le16(L2CAP_CID_SIGNALING);
2896
2897 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2898 cmd->code = code;
2899 cmd->ident = ident;
2900 cmd->len = cpu_to_le16(dlen);
2901
2902 if (dlen) {
2903 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2904 memcpy(skb_put(skb, count), data, count);
2905 data += count;
2906 }
2907
2908 len -= skb->len;
2909
2910 /* Continuation fragments (no L2CAP header) */
2911 frag = &skb_shinfo(skb)->frag_list;
2912 while (len) {
2913 count = min_t(unsigned int, conn->mtu, len);
2914
2915 *frag = bt_skb_alloc(count, GFP_KERNEL);
2916 if (!*frag)
2917 goto fail;
2918
2919 memcpy(skb_put(*frag, count), data, count);
2920
2921 len -= count;
2922 data += count;
2923
2924 frag = &(*frag)->next;
2925 }
2926
2927 return skb;
2928
2929fail:
2930 kfree_skb(skb);
2931 return NULL;
2932}
2933
2934static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
2935 unsigned long *val)
2936{
2937 struct l2cap_conf_opt *opt = *ptr;
2938 int len;
2939
2940 len = L2CAP_CONF_OPT_SIZE + opt->len;
2941 *ptr += len;
2942
2943 *type = opt->type;
2944 *olen = opt->len;
2945
2946 switch (opt->len) {
2947 case 1:
2948 *val = *((u8 *) opt->val);
2949 break;
2950
2951 case 2:
2952 *val = get_unaligned_le16(opt->val);
2953 break;
2954
2955 case 4:
2956 *val = get_unaligned_le32(opt->val);
2957 break;
2958
2959 default:
2960 *val = (unsigned long) opt->val;
2961 break;
2962 }
2963
2964 BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
2965 return len;
2966}
2967
2968static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2969{
2970 struct l2cap_conf_opt *opt = *ptr;
2971
2972 BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
2973
2974 opt->type = type;
2975 opt->len = len;
2976
2977 switch (len) {
2978 case 1:
2979 *((u8 *) opt->val) = val;
2980 break;
2981
2982 case 2:
2983 put_unaligned_le16(val, opt->val);
2984 break;
2985
2986 case 4:
2987 put_unaligned_le32(val, opt->val);
2988 break;
2989
2990 default:
2991 memcpy(opt->val, (void *) val, len);
2992 break;
2993 }
2994
2995 *ptr += L2CAP_CONF_OPT_SIZE + len;
2996}
2997
2998static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
2999{
3000 struct l2cap_conf_efs efs;
3001
3002 switch (chan->mode) {
3003 case L2CAP_MODE_ERTM:
3004 efs.id = chan->local_id;
3005 efs.stype = chan->local_stype;
3006 efs.msdu = cpu_to_le16(chan->local_msdu);
3007 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
3008 efs.acc_lat = __constant_cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
3009 efs.flush_to = __constant_cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO);
3010 break;
3011
3012 case L2CAP_MODE_STREAMING:
3013 efs.id = 1;
3014 efs.stype = L2CAP_SERV_BESTEFFORT;
3015 efs.msdu = cpu_to_le16(chan->local_msdu);
3016 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
3017 efs.acc_lat = 0;
3018 efs.flush_to = 0;
3019 break;
3020
3021 default:
3022 return;
3023 }
3024
3025 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
3026 (unsigned long) &efs);
3027}
3028
3029static void l2cap_ack_timeout(struct work_struct *work)
3030{
3031 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
3032 ack_timer.work);
3033 u16 frames_to_ack;
3034
3035 BT_DBG("chan %p", chan);
3036
3037 l2cap_chan_lock(chan);
3038
3039 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
3040 chan->last_acked_seq);
3041
3042 if (frames_to_ack)
3043 l2cap_send_rr_or_rnr(chan, 0);
3044
3045 l2cap_chan_unlock(chan);
3046 l2cap_chan_put(chan);
3047}
3048
3049int l2cap_ertm_init(struct l2cap_chan *chan)
3050{
3051 int err;
3052
3053 chan->next_tx_seq = 0;
3054 chan->expected_tx_seq = 0;
3055 chan->expected_ack_seq = 0;
3056 chan->unacked_frames = 0;
3057 chan->buffer_seq = 0;
3058 chan->frames_sent = 0;
3059 chan->last_acked_seq = 0;
3060 chan->sdu = NULL;
3061 chan->sdu_last_frag = NULL;
3062 chan->sdu_len = 0;
3063
3064 skb_queue_head_init(&chan->tx_q);
3065
3066 chan->local_amp_id = AMP_ID_BREDR;
3067 chan->move_id = AMP_ID_BREDR;
3068 chan->move_state = L2CAP_MOVE_STABLE;
3069 chan->move_role = L2CAP_MOVE_ROLE_NONE;
3070
3071 if (chan->mode != L2CAP_MODE_ERTM)
3072 return 0;
3073
3074 chan->rx_state = L2CAP_RX_STATE_RECV;
3075 chan->tx_state = L2CAP_TX_STATE_XMIT;
3076
3077 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
3078 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
3079 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
3080
3081 skb_queue_head_init(&chan->srej_q);
3082
3083 err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
3084 if (err < 0)
3085 return err;
3086
3087 err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
3088 if (err < 0)
3089 l2cap_seq_list_free(&chan->srej_list);
3090
3091 return err;
3092}
3093
3094static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
3095{
3096 switch (mode) {
3097 case L2CAP_MODE_STREAMING:
3098 case L2CAP_MODE_ERTM:
3099 if (l2cap_mode_supported(mode, remote_feat_mask))
3100 return mode;
3101 /* fall through */
3102 default:
3103 return L2CAP_MODE_BASIC;
3104 }
3105}
3106
3107static inline bool __l2cap_ews_supported(struct l2cap_conn *conn)
3108{
3109 return conn->hs_enabled && conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
3110}
3111
3112static inline bool __l2cap_efs_supported(struct l2cap_conn *conn)
3113{
3114 return conn->hs_enabled && conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
3115}
3116
3117static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan,
3118 struct l2cap_conf_rfc *rfc)
3119{
3120 if (chan->local_amp_id != AMP_ID_BREDR && chan->hs_hcon) {
3121 u64 ertm_to = chan->hs_hcon->hdev->amp_be_flush_to;
3122
3123 /* Class 1 devices have must have ERTM timeouts
3124 * exceeding the Link Supervision Timeout. The
3125 * default Link Supervision Timeout for AMP
3126 * controllers is 10 seconds.
3127 *
3128 * Class 1 devices use 0xffffffff for their
3129 * best-effort flush timeout, so the clamping logic
3130 * will result in a timeout that meets the above
3131 * requirement. ERTM timeouts are 16-bit values, so
3132 * the maximum timeout is 65.535 seconds.
3133 */
3134
3135 /* Convert timeout to milliseconds and round */
3136 ertm_to = DIV_ROUND_UP_ULL(ertm_to, 1000);
3137
3138 /* This is the recommended formula for class 2 devices
3139 * that start ERTM timers when packets are sent to the
3140 * controller.
3141 */
3142 ertm_to = 3 * ertm_to + 500;
3143
3144 if (ertm_to > 0xffff)
3145 ertm_to = 0xffff;
3146
3147 rfc->retrans_timeout = cpu_to_le16((u16) ertm_to);
3148 rfc->monitor_timeout = rfc->retrans_timeout;
3149 } else {
3150 rfc->retrans_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3151 rfc->monitor_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3152 }
3153}
3154
3155static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
3156{
3157 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
3158 __l2cap_ews_supported(chan->conn)) {
3159 /* use extended control field */
3160 set_bit(FLAG_EXT_CTRL, &chan->flags);
3161 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3162 } else {
3163 chan->tx_win = min_t(u16, chan->tx_win,
3164 L2CAP_DEFAULT_TX_WINDOW);
3165 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
3166 }
3167 chan->ack_win = chan->tx_win;
3168}
3169
3170static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
3171{
3172 struct l2cap_conf_req *req = data;
3173 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
3174 void *ptr = req->data;
3175 u16 size;
3176
3177 BT_DBG("chan %p", chan);
3178
3179 if (chan->num_conf_req || chan->num_conf_rsp)
3180 goto done;
3181
3182 switch (chan->mode) {
3183 case L2CAP_MODE_STREAMING:
3184 case L2CAP_MODE_ERTM:
3185 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
3186 break;
3187
3188 if (__l2cap_efs_supported(chan->conn))
3189 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3190
3191 /* fall through */
3192 default:
3193 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
3194 break;
3195 }
3196
3197done:
3198 if (chan->imtu != L2CAP_DEFAULT_MTU)
3199 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3200
3201 switch (chan->mode) {
3202 case L2CAP_MODE_BASIC:
3203 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
3204 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
3205 break;
3206
3207 rfc.mode = L2CAP_MODE_BASIC;
3208 rfc.txwin_size = 0;
3209 rfc.max_transmit = 0;
3210 rfc.retrans_timeout = 0;
3211 rfc.monitor_timeout = 0;
3212 rfc.max_pdu_size = 0;
3213
3214 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3215 (unsigned long) &rfc);
3216 break;
3217
3218 case L2CAP_MODE_ERTM:
3219 rfc.mode = L2CAP_MODE_ERTM;
3220 rfc.max_transmit = chan->max_tx;
3221
3222 __l2cap_set_ertm_timeouts(chan, &rfc);
3223
3224 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3225 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3226 L2CAP_FCS_SIZE);
3227 rfc.max_pdu_size = cpu_to_le16(size);
3228
3229 l2cap_txwin_setup(chan);
3230
3231 rfc.txwin_size = min_t(u16, chan->tx_win,
3232 L2CAP_DEFAULT_TX_WINDOW);
3233
3234 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3235 (unsigned long) &rfc);
3236
3237 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3238 l2cap_add_opt_efs(&ptr, chan);
3239
3240 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3241 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3242 chan->tx_win);
3243
3244 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3245 if (chan->fcs == L2CAP_FCS_NONE ||
3246 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3247 chan->fcs = L2CAP_FCS_NONE;
3248 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3249 chan->fcs);
3250 }
3251 break;
3252
3253 case L2CAP_MODE_STREAMING:
3254 l2cap_txwin_setup(chan);
3255 rfc.mode = L2CAP_MODE_STREAMING;
3256 rfc.txwin_size = 0;
3257 rfc.max_transmit = 0;
3258 rfc.retrans_timeout = 0;
3259 rfc.monitor_timeout = 0;
3260
3261 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3262 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3263 L2CAP_FCS_SIZE);
3264 rfc.max_pdu_size = cpu_to_le16(size);
3265
3266 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3267 (unsigned long) &rfc);
3268
3269 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3270 l2cap_add_opt_efs(&ptr, chan);
3271
3272 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3273 if (chan->fcs == L2CAP_FCS_NONE ||
3274 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3275 chan->fcs = L2CAP_FCS_NONE;
3276 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3277 chan->fcs);
3278 }
3279 break;
3280 }
3281
3282 req->dcid = cpu_to_le16(chan->dcid);
3283 req->flags = __constant_cpu_to_le16(0);
3284
3285 return ptr - data;
3286}
3287
3288static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
3289{
3290 struct l2cap_conf_rsp *rsp = data;
3291 void *ptr = rsp->data;
3292 void *req = chan->conf_req;
3293 int len = chan->conf_len;
3294 int type, hint, olen;
3295 unsigned long val;
3296 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3297 struct l2cap_conf_efs efs;
3298 u8 remote_efs = 0;
3299 u16 mtu = L2CAP_DEFAULT_MTU;
3300 u16 result = L2CAP_CONF_SUCCESS;
3301 u16 size;
3302
3303 BT_DBG("chan %p", chan);
3304
3305 while (len >= L2CAP_CONF_OPT_SIZE) {
3306 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
3307
3308 hint = type & L2CAP_CONF_HINT;
3309 type &= L2CAP_CONF_MASK;
3310
3311 switch (type) {
3312 case L2CAP_CONF_MTU:
3313 mtu = val;
3314 break;
3315
3316 case L2CAP_CONF_FLUSH_TO:
3317 chan->flush_to = val;
3318 break;
3319
3320 case L2CAP_CONF_QOS:
3321 break;
3322
3323 case L2CAP_CONF_RFC:
3324 if (olen == sizeof(rfc))
3325 memcpy(&rfc, (void *) val, olen);
3326 break;
3327
3328 case L2CAP_CONF_FCS:
3329 if (val == L2CAP_FCS_NONE)
3330 set_bit(CONF_RECV_NO_FCS, &chan->conf_state);
3331 break;
3332
3333 case L2CAP_CONF_EFS:
3334 remote_efs = 1;
3335 if (olen == sizeof(efs))
3336 memcpy(&efs, (void *) val, olen);
3337 break;
3338
3339 case L2CAP_CONF_EWS:
3340 if (!chan->conn->hs_enabled)
3341 return -ECONNREFUSED;
3342
3343 set_bit(FLAG_EXT_CTRL, &chan->flags);
3344 set_bit(CONF_EWS_RECV, &chan->conf_state);
3345 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3346 chan->remote_tx_win = val;
3347 break;
3348
3349 default:
3350 if (hint)
3351 break;
3352
3353 result = L2CAP_CONF_UNKNOWN;
3354 *((u8 *) ptr++) = type;
3355 break;
3356 }
3357 }
3358
3359 if (chan->num_conf_rsp || chan->num_conf_req > 1)
3360 goto done;
3361
3362 switch (chan->mode) {
3363 case L2CAP_MODE_STREAMING:
3364 case L2CAP_MODE_ERTM:
3365 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
3366 chan->mode = l2cap_select_mode(rfc.mode,
3367 chan->conn->feat_mask);
3368 break;
3369 }
3370
3371 if (remote_efs) {
3372 if (__l2cap_efs_supported(chan->conn))
3373 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3374 else
3375 return -ECONNREFUSED;
3376 }
3377
3378 if (chan->mode != rfc.mode)
3379 return -ECONNREFUSED;
3380
3381 break;
3382 }
3383
3384done:
3385 if (chan->mode != rfc.mode) {
3386 result = L2CAP_CONF_UNACCEPT;
3387 rfc.mode = chan->mode;
3388
3389 if (chan->num_conf_rsp == 1)
3390 return -ECONNREFUSED;
3391
3392 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3393 (unsigned long) &rfc);
3394 }
3395
3396 if (result == L2CAP_CONF_SUCCESS) {
3397 /* Configure output options and let the other side know
3398 * which ones we don't like. */
3399
3400 if (mtu < L2CAP_DEFAULT_MIN_MTU)
3401 result = L2CAP_CONF_UNACCEPT;
3402 else {
3403 chan->omtu = mtu;
3404 set_bit(CONF_MTU_DONE, &chan->conf_state);
3405 }
3406 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
3407
3408 if (remote_efs) {
3409 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3410 efs.stype != L2CAP_SERV_NOTRAFIC &&
3411 efs.stype != chan->local_stype) {
3412
3413 result = L2CAP_CONF_UNACCEPT;
3414
3415 if (chan->num_conf_req >= 1)
3416 return -ECONNREFUSED;
3417
3418 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3419 sizeof(efs),
3420 (unsigned long) &efs);
3421 } else {
3422 /* Send PENDING Conf Rsp */
3423 result = L2CAP_CONF_PENDING;
3424 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3425 }
3426 }
3427
3428 switch (rfc.mode) {
3429 case L2CAP_MODE_BASIC:
3430 chan->fcs = L2CAP_FCS_NONE;
3431 set_bit(CONF_MODE_DONE, &chan->conf_state);
3432 break;
3433
3434 case L2CAP_MODE_ERTM:
3435 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3436 chan->remote_tx_win = rfc.txwin_size;
3437 else
3438 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3439
3440 chan->remote_max_tx = rfc.max_transmit;
3441
3442 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3443 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3444 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3445 rfc.max_pdu_size = cpu_to_le16(size);
3446 chan->remote_mps = size;
3447
3448 __l2cap_set_ertm_timeouts(chan, &rfc);
3449
3450 set_bit(CONF_MODE_DONE, &chan->conf_state);
3451
3452 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3453 sizeof(rfc), (unsigned long) &rfc);
3454
3455 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3456 chan->remote_id = efs.id;
3457 chan->remote_stype = efs.stype;
3458 chan->remote_msdu = le16_to_cpu(efs.msdu);
3459 chan->remote_flush_to =
3460 le32_to_cpu(efs.flush_to);
3461 chan->remote_acc_lat =
3462 le32_to_cpu(efs.acc_lat);
3463 chan->remote_sdu_itime =
3464 le32_to_cpu(efs.sdu_itime);
3465 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3466 sizeof(efs),
3467 (unsigned long) &efs);
3468 }
3469 break;
3470
3471 case L2CAP_MODE_STREAMING:
3472 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3473 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3474 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3475 rfc.max_pdu_size = cpu_to_le16(size);
3476 chan->remote_mps = size;
3477
3478 set_bit(CONF_MODE_DONE, &chan->conf_state);
3479
3480 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3481 (unsigned long) &rfc);
3482
3483 break;
3484
3485 default:
3486 result = L2CAP_CONF_UNACCEPT;
3487
3488 memset(&rfc, 0, sizeof(rfc));
3489 rfc.mode = chan->mode;
3490 }
3491
3492 if (result == L2CAP_CONF_SUCCESS)
3493 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3494 }
3495 rsp->scid = cpu_to_le16(chan->dcid);
3496 rsp->result = cpu_to_le16(result);
3497 rsp->flags = __constant_cpu_to_le16(0);
3498
3499 return ptr - data;
3500}
3501
3502static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
3503 void *data, u16 *result)
3504{
3505 struct l2cap_conf_req *req = data;
3506 void *ptr = req->data;
3507 int type, olen;
3508 unsigned long val;
3509 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3510 struct l2cap_conf_efs efs;
3511
3512 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3513
3514 while (len >= L2CAP_CONF_OPT_SIZE) {
3515 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3516
3517 switch (type) {
3518 case L2CAP_CONF_MTU:
3519 if (val < L2CAP_DEFAULT_MIN_MTU) {
3520 *result = L2CAP_CONF_UNACCEPT;
3521 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3522 } else
3523 chan->imtu = val;
3524 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3525 break;
3526
3527 case L2CAP_CONF_FLUSH_TO:
3528 chan->flush_to = val;
3529 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
3530 2, chan->flush_to);
3531 break;
3532
3533 case L2CAP_CONF_RFC:
3534 if (olen == sizeof(rfc))
3535 memcpy(&rfc, (void *)val, olen);
3536
3537 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3538 rfc.mode != chan->mode)
3539 return -ECONNREFUSED;
3540
3541 chan->fcs = 0;
3542
3543 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3544 sizeof(rfc), (unsigned long) &rfc);
3545 break;
3546
3547 case L2CAP_CONF_EWS:
3548 chan->ack_win = min_t(u16, val, chan->ack_win);
3549 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3550 chan->tx_win);
3551 break;
3552
3553 case L2CAP_CONF_EFS:
3554 if (olen == sizeof(efs))
3555 memcpy(&efs, (void *)val, olen);
3556
3557 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3558 efs.stype != L2CAP_SERV_NOTRAFIC &&
3559 efs.stype != chan->local_stype)
3560 return -ECONNREFUSED;
3561
3562 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
3563 (unsigned long) &efs);
3564 break;
3565
3566 case L2CAP_CONF_FCS:
3567 if (*result == L2CAP_CONF_PENDING)
3568 if (val == L2CAP_FCS_NONE)
3569 set_bit(CONF_RECV_NO_FCS,
3570 &chan->conf_state);
3571 break;
3572 }
3573 }
3574
3575 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3576 return -ECONNREFUSED;
3577
3578 chan->mode = rfc.mode;
3579
3580 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3581 switch (rfc.mode) {
3582 case L2CAP_MODE_ERTM:
3583 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3584 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3585 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3586 if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3587 chan->ack_win = min_t(u16, chan->ack_win,
3588 rfc.txwin_size);
3589
3590 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3591 chan->local_msdu = le16_to_cpu(efs.msdu);
3592 chan->local_sdu_itime =
3593 le32_to_cpu(efs.sdu_itime);
3594 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3595 chan->local_flush_to =
3596 le32_to_cpu(efs.flush_to);
3597 }
3598 break;
3599
3600 case L2CAP_MODE_STREAMING:
3601 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3602 }
3603 }
3604
3605 req->dcid = cpu_to_le16(chan->dcid);
3606 req->flags = __constant_cpu_to_le16(0);
3607
3608 return ptr - data;
3609}
3610
3611static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data,
3612 u16 result, u16 flags)
3613{
3614 struct l2cap_conf_rsp *rsp = data;
3615 void *ptr = rsp->data;
3616
3617 BT_DBG("chan %p", chan);
3618
3619 rsp->scid = cpu_to_le16(chan->dcid);
3620 rsp->result = cpu_to_le16(result);
3621 rsp->flags = cpu_to_le16(flags);
3622
3623 return ptr - data;
3624}
3625
3626void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
3627{
3628 struct l2cap_conn_rsp rsp;
3629 struct l2cap_conn *conn = chan->conn;
3630 u8 buf[128];
3631 u8 rsp_code;
3632
3633 rsp.scid = cpu_to_le16(chan->dcid);
3634 rsp.dcid = cpu_to_le16(chan->scid);
3635 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
3636 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
3637
3638 if (chan->hs_hcon)
3639 rsp_code = L2CAP_CREATE_CHAN_RSP;
3640 else
3641 rsp_code = L2CAP_CONN_RSP;
3642
3643 BT_DBG("chan %p rsp_code %u", chan, rsp_code);
3644
3645 l2cap_send_cmd(conn, chan->ident, rsp_code, sizeof(rsp), &rsp);
3646
3647 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3648 return;
3649
3650 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3651 l2cap_build_conf_req(chan, buf), buf);
3652 chan->num_conf_req++;
3653}
3654
3655static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
3656{
3657 int type, olen;
3658 unsigned long val;
3659 /* Use sane default values in case a misbehaving remote device
3660 * did not send an RFC or extended window size option.
3661 */
3662 u16 txwin_ext = chan->ack_win;
3663 struct l2cap_conf_rfc rfc = {
3664 .mode = chan->mode,
3665 .retrans_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
3666 .monitor_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
3667 .max_pdu_size = cpu_to_le16(chan->imtu),
3668 .txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
3669 };
3670
3671 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
3672
3673 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
3674 return;
3675
3676 while (len >= L2CAP_CONF_OPT_SIZE) {
3677 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3678
3679 switch (type) {
3680 case L2CAP_CONF_RFC:
3681 if (olen == sizeof(rfc))
3682 memcpy(&rfc, (void *)val, olen);
3683 break;
3684 case L2CAP_CONF_EWS:
3685 txwin_ext = val;
3686 break;
3687 }
3688 }
3689
3690 switch (rfc.mode) {
3691 case L2CAP_MODE_ERTM:
3692 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3693 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3694 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3695 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3696 chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
3697 else
3698 chan->ack_win = min_t(u16, chan->ack_win,
3699 rfc.txwin_size);
3700 break;
3701 case L2CAP_MODE_STREAMING:
3702 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3703 }
3704}
3705
3706static inline int l2cap_command_rej(struct l2cap_conn *conn,
3707 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3708 u8 *data)
3709{
3710 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
3711
3712 if (cmd_len < sizeof(*rej))
3713 return -EPROTO;
3714
3715 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
3716 return 0;
3717
3718 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
3719 cmd->ident == conn->info_ident) {
3720 cancel_delayed_work(&conn->info_timer);
3721
3722 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3723 conn->info_ident = 0;
3724
3725 l2cap_conn_start(conn);
3726 }
3727
3728 return 0;
3729}
3730
3731static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
3732 struct l2cap_cmd_hdr *cmd,
3733 u8 *data, u8 rsp_code, u8 amp_id)
3734{
3735 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
3736 struct l2cap_conn_rsp rsp;
3737 struct l2cap_chan *chan = NULL, *pchan;
3738 struct sock *parent, *sk = NULL;
3739 int result, status = L2CAP_CS_NO_INFO;
3740
3741 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
3742 __le16 psm = req->psm;
3743
3744 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
3745
3746 /* Check if we have socket listening on psm */
3747 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, conn->src, conn->dst);
3748 if (!pchan) {
3749 result = L2CAP_CR_BAD_PSM;
3750 goto sendresp;
3751 }
3752
3753 parent = pchan->sk;
3754
3755 mutex_lock(&conn->chan_lock);
3756 lock_sock(parent);
3757
3758 /* Check if the ACL is secure enough (if not SDP) */
3759 if (psm != __constant_cpu_to_le16(L2CAP_PSM_SDP) &&
3760 !hci_conn_check_link_mode(conn->hcon)) {
3761 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
3762 result = L2CAP_CR_SEC_BLOCK;
3763 goto response;
3764 }
3765
3766 result = L2CAP_CR_NO_MEM;
3767
3768 /* Check if we already have channel with that dcid */
3769 if (__l2cap_get_chan_by_dcid(conn, scid))
3770 goto response;
3771
3772 chan = pchan->ops->new_connection(pchan);
3773 if (!chan)
3774 goto response;
3775
3776 sk = chan->sk;
3777
3778 /* For certain devices (ex: HID mouse), support for authentication,
3779 * pairing and bonding is optional. For such devices, inorder to avoid
3780 * the ACL alive for too long after L2CAP disconnection, reset the ACL
3781 * disc_timeout back to HCI_DISCONN_TIMEOUT during L2CAP connect.
3782 */
3783 conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
3784
3785 bacpy(&bt_sk(sk)->src, conn->src);
3786 bacpy(&bt_sk(sk)->dst, conn->dst);
3787 chan->psm = psm;
3788 chan->dcid = scid;
3789 chan->local_amp_id = amp_id;
3790
3791 __l2cap_chan_add(conn, chan);
3792
3793 dcid = chan->scid;
3794
3795 __set_chan_timer(chan, sk->sk_sndtimeo);
3796
3797 chan->ident = cmd->ident;
3798
3799 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
3800 if (l2cap_chan_check_security(chan)) {
3801 if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) {
3802 __l2cap_state_change(chan, BT_CONNECT2);
3803 result = L2CAP_CR_PEND;
3804 status = L2CAP_CS_AUTHOR_PEND;
3805 chan->ops->defer(chan);
3806 } else {
3807 /* Force pending result for AMP controllers.
3808 * The connection will succeed after the
3809 * physical link is up.
3810 */
3811 if (amp_id == AMP_ID_BREDR) {
3812 __l2cap_state_change(chan, BT_CONFIG);
3813 result = L2CAP_CR_SUCCESS;
3814 } else {
3815 __l2cap_state_change(chan, BT_CONNECT2);
3816 result = L2CAP_CR_PEND;
3817 }
3818 status = L2CAP_CS_NO_INFO;
3819 }
3820 } else {
3821 __l2cap_state_change(chan, BT_CONNECT2);
3822 result = L2CAP_CR_PEND;
3823 status = L2CAP_CS_AUTHEN_PEND;
3824 }
3825 } else {
3826 __l2cap_state_change(chan, BT_CONNECT2);
3827 result = L2CAP_CR_PEND;
3828 status = L2CAP_CS_NO_INFO;
3829 }
3830
3831response:
3832 release_sock(parent);
3833 mutex_unlock(&conn->chan_lock);
3834
3835sendresp:
3836 rsp.scid = cpu_to_le16(scid);
3837 rsp.dcid = cpu_to_le16(dcid);
3838 rsp.result = cpu_to_le16(result);
3839 rsp.status = cpu_to_le16(status);
3840 l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
3841
3842 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
3843 struct l2cap_info_req info;
3844 info.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
3845
3846 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
3847 conn->info_ident = l2cap_get_ident(conn);
3848
3849 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
3850
3851 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
3852 sizeof(info), &info);
3853 }
3854
3855 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
3856 result == L2CAP_CR_SUCCESS) {
3857 u8 buf[128];
3858 set_bit(CONF_REQ_SENT, &chan->conf_state);
3859 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3860 l2cap_build_conf_req(chan, buf), buf);
3861 chan->num_conf_req++;
3862 }
3863
3864 return chan;
3865}
3866
3867static int l2cap_connect_req(struct l2cap_conn *conn,
3868 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3869{
3870 struct hci_dev *hdev = conn->hcon->hdev;
3871 struct hci_conn *hcon = conn->hcon;
3872
3873 if (cmd_len < sizeof(struct l2cap_conn_req))
3874 return -EPROTO;
3875
3876 hci_dev_lock(hdev);
3877 if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
3878 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &hcon->flags))
3879 mgmt_device_connected(hdev, &hcon->dst, hcon->type,
3880 hcon->dst_type, 0, NULL, 0,
3881 hcon->dev_class);
3882 hci_dev_unlock(hdev);
3883
3884 l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);
3885 return 0;
3886}
3887
3888static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
3889 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3890 u8 *data)
3891{
3892 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
3893 u16 scid, dcid, result, status;
3894 struct l2cap_chan *chan;
3895 u8 req[128];
3896 int err;
3897
3898 if (cmd_len < sizeof(*rsp))
3899 return -EPROTO;
3900
3901 scid = __le16_to_cpu(rsp->scid);
3902 dcid = __le16_to_cpu(rsp->dcid);
3903 result = __le16_to_cpu(rsp->result);
3904 status = __le16_to_cpu(rsp->status);
3905
3906 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
3907 dcid, scid, result, status);
3908
3909 mutex_lock(&conn->chan_lock);
3910
3911 if (scid) {
3912 chan = __l2cap_get_chan_by_scid(conn, scid);
3913 if (!chan) {
3914 err = -EBADSLT;
3915 goto unlock;
3916 }
3917 } else {
3918 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
3919 if (!chan) {
3920 err = -EBADSLT;
3921 goto unlock;
3922 }
3923 }
3924
3925 err = 0;
3926
3927 l2cap_chan_lock(chan);
3928
3929 switch (result) {
3930 case L2CAP_CR_SUCCESS:
3931 l2cap_state_change(chan, BT_CONFIG);
3932 chan->ident = 0;
3933 chan->dcid = dcid;
3934 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
3935
3936 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3937 break;
3938
3939 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3940 l2cap_build_conf_req(chan, req), req);
3941 chan->num_conf_req++;
3942 break;
3943
3944 case L2CAP_CR_PEND:
3945 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
3946 break;
3947
3948 default:
3949 l2cap_chan_del(chan, ECONNREFUSED);
3950 break;
3951 }
3952
3953 l2cap_chan_unlock(chan);
3954
3955unlock:
3956 mutex_unlock(&conn->chan_lock);
3957
3958 return err;
3959}
3960
3961static inline void set_default_fcs(struct l2cap_chan *chan)
3962{
3963 /* FCS is enabled only in ERTM or streaming mode, if one or both
3964 * sides request it.
3965 */
3966 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
3967 chan->fcs = L2CAP_FCS_NONE;
3968 else if (!test_bit(CONF_RECV_NO_FCS, &chan->conf_state))
3969 chan->fcs = L2CAP_FCS_CRC16;
3970}
3971
3972static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data,
3973 u8 ident, u16 flags)
3974{
3975 struct l2cap_conn *conn = chan->conn;
3976
3977 BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident,
3978 flags);
3979
3980 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3981 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3982
3983 l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP,
3984 l2cap_build_conf_rsp(chan, data,
3985 L2CAP_CONF_SUCCESS, flags), data);
3986}
3987
3988static inline int l2cap_config_req(struct l2cap_conn *conn,
3989 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3990 u8 *data)
3991{
3992 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
3993 u16 dcid, flags;
3994 u8 rsp[64];
3995 struct l2cap_chan *chan;
3996 int len, err = 0;
3997
3998 if (cmd_len < sizeof(*req))
3999 return -EPROTO;
4000
4001 dcid = __le16_to_cpu(req->dcid);
4002 flags = __le16_to_cpu(req->flags);
4003
4004 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
4005
4006 chan = l2cap_get_chan_by_scid(conn, dcid);
4007 if (!chan)
4008 return -EBADSLT;
4009
4010 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
4011 struct l2cap_cmd_rej_cid rej;
4012
4013 rej.reason = __constant_cpu_to_le16(L2CAP_REJ_INVALID_CID);
4014 rej.scid = cpu_to_le16(chan->scid);
4015 rej.dcid = cpu_to_le16(chan->dcid);
4016
4017 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
4018 sizeof(rej), &rej);
4019 goto unlock;
4020 }
4021
4022 /* Reject if config buffer is too small. */
4023 len = cmd_len - sizeof(*req);
4024 if (chan->conf_len + len > sizeof(chan->conf_req)) {
4025 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4026 l2cap_build_conf_rsp(chan, rsp,
4027 L2CAP_CONF_REJECT, flags), rsp);
4028 goto unlock;
4029 }
4030
4031 /* Store config. */
4032 memcpy(chan->conf_req + chan->conf_len, req->data, len);
4033 chan->conf_len += len;
4034
4035 if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
4036 /* Incomplete config. Send empty response. */
4037 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4038 l2cap_build_conf_rsp(chan, rsp,
4039 L2CAP_CONF_SUCCESS, flags), rsp);
4040 goto unlock;
4041 }
4042
4043 /* Complete config. */
4044 len = l2cap_parse_conf_req(chan, rsp);
4045 if (len < 0) {
4046 l2cap_send_disconn_req(chan, ECONNRESET);
4047 goto unlock;
4048 }
4049
4050 chan->ident = cmd->ident;
4051 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
4052 chan->num_conf_rsp++;
4053
4054 /* Reset config buffer. */
4055 chan->conf_len = 0;
4056
4057 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
4058 goto unlock;
4059
4060 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4061 set_default_fcs(chan);
4062
4063 if (chan->mode == L2CAP_MODE_ERTM ||
4064 chan->mode == L2CAP_MODE_STREAMING)
4065 err = l2cap_ertm_init(chan);
4066
4067 if (err < 0)
4068 l2cap_send_disconn_req(chan, -err);
4069 else
4070 l2cap_chan_ready(chan);
4071
4072 goto unlock;
4073 }
4074
4075 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
4076 u8 buf[64];
4077 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4078 l2cap_build_conf_req(chan, buf), buf);
4079 chan->num_conf_req++;
4080 }
4081
4082 /* Got Conf Rsp PENDING from remote side and asume we sent
4083 Conf Rsp PENDING in the code above */
4084 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
4085 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4086
4087 /* check compatibility */
4088
4089 /* Send rsp for BR/EDR channel */
4090 if (!chan->hs_hcon)
4091 l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
4092 else
4093 chan->ident = cmd->ident;
4094 }
4095
4096unlock:
4097 l2cap_chan_unlock(chan);
4098 return err;
4099}
4100
4101static inline int l2cap_config_rsp(struct l2cap_conn *conn,
4102 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4103 u8 *data)
4104{
4105 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
4106 u16 scid, flags, result;
4107 struct l2cap_chan *chan;
4108 int len = cmd_len - sizeof(*rsp);
4109 int err = 0;
4110
4111 if (cmd_len < sizeof(*rsp))
4112 return -EPROTO;
4113
4114 scid = __le16_to_cpu(rsp->scid);
4115 flags = __le16_to_cpu(rsp->flags);
4116 result = __le16_to_cpu(rsp->result);
4117
4118 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
4119 result, len);
4120
4121 chan = l2cap_get_chan_by_scid(conn, scid);
4122 if (!chan)
4123 return 0;
4124
4125 switch (result) {
4126 case L2CAP_CONF_SUCCESS:
4127 l2cap_conf_rfc_get(chan, rsp->data, len);
4128 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4129 break;
4130
4131 case L2CAP_CONF_PENDING:
4132 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4133
4134 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4135 char buf[64];
4136
4137 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4138 buf, &result);
4139 if (len < 0) {
4140 l2cap_send_disconn_req(chan, ECONNRESET);
4141 goto done;
4142 }
4143
4144 if (!chan->hs_hcon) {
4145 l2cap_send_efs_conf_rsp(chan, buf, cmd->ident,
4146 0);
4147 } else {
4148 if (l2cap_check_efs(chan)) {
4149 amp_create_logical_link(chan);
4150 chan->ident = cmd->ident;
4151 }
4152 }
4153 }
4154 goto done;
4155
4156 case L2CAP_CONF_UNACCEPT:
4157 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
4158 char req[64];
4159
4160 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
4161 l2cap_send_disconn_req(chan, ECONNRESET);
4162 goto done;
4163 }
4164
4165 /* throw out any old stored conf requests */
4166 result = L2CAP_CONF_SUCCESS;
4167 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4168 req, &result);
4169 if (len < 0) {
4170 l2cap_send_disconn_req(chan, ECONNRESET);
4171 goto done;
4172 }
4173
4174 l2cap_send_cmd(conn, l2cap_get_ident(conn),
4175 L2CAP_CONF_REQ, len, req);
4176 chan->num_conf_req++;
4177 if (result != L2CAP_CONF_SUCCESS)
4178 goto done;
4179 break;
4180 }
4181
4182 default:
4183 l2cap_chan_set_err(chan, ECONNRESET);
4184
4185 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
4186 l2cap_send_disconn_req(chan, ECONNRESET);
4187 goto done;
4188 }
4189
4190 if (flags & L2CAP_CONF_FLAG_CONTINUATION)
4191 goto done;
4192
4193 set_bit(CONF_INPUT_DONE, &chan->conf_state);
4194
4195 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
4196 set_default_fcs(chan);
4197
4198 if (chan->mode == L2CAP_MODE_ERTM ||
4199 chan->mode == L2CAP_MODE_STREAMING)
4200 err = l2cap_ertm_init(chan);
4201
4202 if (err < 0)
4203 l2cap_send_disconn_req(chan, -err);
4204 else
4205 l2cap_chan_ready(chan);
4206 }
4207
4208done:
4209 l2cap_chan_unlock(chan);
4210 return err;
4211}
4212
4213static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
4214 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4215 u8 *data)
4216{
4217 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
4218 struct l2cap_disconn_rsp rsp;
4219 u16 dcid, scid;
4220 struct l2cap_chan *chan;
4221 struct sock *sk;
4222
4223 if (cmd_len != sizeof(*req))
4224 return -EPROTO;
4225
4226 scid = __le16_to_cpu(req->scid);
4227 dcid = __le16_to_cpu(req->dcid);
4228
4229 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
4230
4231 mutex_lock(&conn->chan_lock);
4232
4233 chan = __l2cap_get_chan_by_scid(conn, dcid);
4234 if (!chan) {
4235 mutex_unlock(&conn->chan_lock);
4236 return -EBADSLT;
4237 }
4238
4239 l2cap_chan_lock(chan);
4240
4241 sk = chan->sk;
4242
4243 rsp.dcid = cpu_to_le16(chan->scid);
4244 rsp.scid = cpu_to_le16(chan->dcid);
4245 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
4246
4247 lock_sock(sk);
4248 sk->sk_shutdown = SHUTDOWN_MASK;
4249 release_sock(sk);
4250
4251 l2cap_chan_hold(chan);
4252 l2cap_chan_del(chan, ECONNRESET);
4253
4254 l2cap_chan_unlock(chan);
4255
4256 chan->ops->close(chan);
4257 l2cap_chan_put(chan);
4258
4259 mutex_unlock(&conn->chan_lock);
4260
4261 return 0;
4262}
4263
4264static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
4265 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4266 u8 *data)
4267{
4268 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
4269 u16 dcid, scid;
4270 struct l2cap_chan *chan;
4271
4272 if (cmd_len != sizeof(*rsp))
4273 return -EPROTO;
4274
4275 scid = __le16_to_cpu(rsp->scid);
4276 dcid = __le16_to_cpu(rsp->dcid);
4277
4278 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
4279
4280 mutex_lock(&conn->chan_lock);
4281
4282 chan = __l2cap_get_chan_by_scid(conn, scid);
4283 if (!chan) {
4284 mutex_unlock(&conn->chan_lock);
4285 return 0;
4286 }
4287
4288 l2cap_chan_lock(chan);
4289
4290 l2cap_chan_hold(chan);
4291 l2cap_chan_del(chan, 0);
4292
4293 l2cap_chan_unlock(chan);
4294
4295 chan->ops->close(chan);
4296 l2cap_chan_put(chan);
4297
4298 mutex_unlock(&conn->chan_lock);
4299
4300 return 0;
4301}
4302
4303static inline int l2cap_information_req(struct l2cap_conn *conn,
4304 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4305 u8 *data)
4306{
4307 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
4308 u16 type;
4309
4310 if (cmd_len != sizeof(*req))
4311 return -EPROTO;
4312
4313 type = __le16_to_cpu(req->type);
4314
4315 BT_DBG("type 0x%4.4x", type);
4316
4317 if (type == L2CAP_IT_FEAT_MASK) {
4318 u8 buf[8];
4319 u32 feat_mask = l2cap_feat_mask;
4320 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4321 rsp->type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
4322 rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
4323 if (!disable_ertm)
4324 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
4325 | L2CAP_FEAT_FCS;
4326 if (conn->hs_enabled)
4327 feat_mask |= L2CAP_FEAT_EXT_FLOW
4328 | L2CAP_FEAT_EXT_WINDOW;
4329
4330 put_unaligned_le32(feat_mask, rsp->data);
4331 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4332 buf);
4333 } else if (type == L2CAP_IT_FIXED_CHAN) {
4334 u8 buf[12];
4335 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4336
4337 if (conn->hs_enabled)
4338 l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
4339 else
4340 l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
4341
4342 rsp->type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4343 rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
4344 memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
4345 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4346 buf);
4347 } else {
4348 struct l2cap_info_rsp rsp;
4349 rsp.type = cpu_to_le16(type);
4350 rsp.result = __constant_cpu_to_le16(L2CAP_IR_NOTSUPP);
4351 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp),
4352 &rsp);
4353 }
4354
4355 return 0;
4356}
4357
4358static inline int l2cap_information_rsp(struct l2cap_conn *conn,
4359 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4360 u8 *data)
4361{
4362 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
4363 u16 type, result;
4364
4365 if (cmd_len < sizeof(*rsp))
4366 return -EPROTO;
4367
4368 type = __le16_to_cpu(rsp->type);
4369 result = __le16_to_cpu(rsp->result);
4370
4371 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
4372
4373 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
4374 if (cmd->ident != conn->info_ident ||
4375 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
4376 return 0;
4377
4378 cancel_delayed_work(&conn->info_timer);
4379
4380 if (result != L2CAP_IR_SUCCESS) {
4381 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4382 conn->info_ident = 0;
4383
4384 l2cap_conn_start(conn);
4385
4386 return 0;
4387 }
4388
4389 switch (type) {
4390 case L2CAP_IT_FEAT_MASK:
4391 conn->feat_mask = get_unaligned_le32(rsp->data);
4392
4393 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
4394 struct l2cap_info_req req;
4395 req.type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4396
4397 conn->info_ident = l2cap_get_ident(conn);
4398
4399 l2cap_send_cmd(conn, conn->info_ident,
4400 L2CAP_INFO_REQ, sizeof(req), &req);
4401 } else {
4402 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4403 conn->info_ident = 0;
4404
4405 l2cap_conn_start(conn);
4406 }
4407 break;
4408
4409 case L2CAP_IT_FIXED_CHAN:
4410 conn->fixed_chan_mask = rsp->data[0];
4411 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4412 conn->info_ident = 0;
4413
4414 l2cap_conn_start(conn);
4415 break;
4416 }
4417
4418 return 0;
4419}
4420
4421static int l2cap_create_channel_req(struct l2cap_conn *conn,
4422 struct l2cap_cmd_hdr *cmd,
4423 u16 cmd_len, void *data)
4424{
4425 struct l2cap_create_chan_req *req = data;
4426 struct l2cap_create_chan_rsp rsp;
4427 struct l2cap_chan *chan;
4428 struct hci_dev *hdev;
4429 u16 psm, scid;
4430
4431 if (cmd_len != sizeof(*req))
4432 return -EPROTO;
4433
4434 if (!conn->hs_enabled)
4435 return -EINVAL;
4436
4437 psm = le16_to_cpu(req->psm);
4438 scid = le16_to_cpu(req->scid);
4439
4440 BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id);
4441
4442 /* For controller id 0 make BR/EDR connection */
4443 if (req->amp_id == AMP_ID_BREDR) {
4444 l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4445 req->amp_id);
4446 return 0;
4447 }
4448
4449 /* Validate AMP controller id */
4450 hdev = hci_dev_get(req->amp_id);
4451 if (!hdev)
4452 goto error;
4453
4454 if (hdev->dev_type != HCI_AMP || !test_bit(HCI_UP, &hdev->flags)) {
4455 hci_dev_put(hdev);
4456 goto error;
4457 }
4458
4459 chan = l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4460 req->amp_id);
4461 if (chan) {
4462 struct amp_mgr *mgr = conn->hcon->amp_mgr;
4463 struct hci_conn *hs_hcon;
4464
4465 hs_hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK, conn->dst);
4466 if (!hs_hcon) {
4467 hci_dev_put(hdev);
4468 return -EBADSLT;
4469 }
4470
4471 BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr, chan, hs_hcon);
4472
4473 mgr->bredr_chan = chan;
4474 chan->hs_hcon = hs_hcon;
4475 chan->fcs = L2CAP_FCS_NONE;
4476 conn->mtu = hdev->block_mtu;
4477 }
4478
4479 hci_dev_put(hdev);
4480
4481 return 0;
4482
4483error:
4484 rsp.dcid = 0;
4485 rsp.scid = cpu_to_le16(scid);
4486 rsp.result = __constant_cpu_to_le16(L2CAP_CR_BAD_AMP);
4487 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
4488
4489 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
4490 sizeof(rsp), &rsp);
4491
4492 return 0;
4493}
4494
4495static void l2cap_send_move_chan_req(struct l2cap_chan *chan, u8 dest_amp_id)
4496{
4497 struct l2cap_move_chan_req req;
4498 u8 ident;
4499
4500 BT_DBG("chan %p, dest_amp_id %d", chan, dest_amp_id);
4501
4502 ident = l2cap_get_ident(chan->conn);
4503 chan->ident = ident;
4504
4505 req.icid = cpu_to_le16(chan->scid);
4506 req.dest_amp_id = dest_amp_id;
4507
4508 l2cap_send_cmd(chan->conn, ident, L2CAP_MOVE_CHAN_REQ, sizeof(req),
4509 &req);
4510
4511 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4512}
4513
4514static void l2cap_send_move_chan_rsp(struct l2cap_chan *chan, u16 result)
4515{
4516 struct l2cap_move_chan_rsp rsp;
4517
4518 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4519
4520 rsp.icid = cpu_to_le16(chan->dcid);
4521 rsp.result = cpu_to_le16(result);
4522
4523 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_RSP,
4524 sizeof(rsp), &rsp);
4525}
4526
4527static void l2cap_send_move_chan_cfm(struct l2cap_chan *chan, u16 result)
4528{
4529 struct l2cap_move_chan_cfm cfm;
4530
4531 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4532
4533 chan->ident = l2cap_get_ident(chan->conn);
4534
4535 cfm.icid = cpu_to_le16(chan->scid);
4536 cfm.result = cpu_to_le16(result);
4537
4538 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_CFM,
4539 sizeof(cfm), &cfm);
4540
4541 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4542}
4543
4544static void l2cap_send_move_chan_cfm_icid(struct l2cap_conn *conn, u16 icid)
4545{
4546 struct l2cap_move_chan_cfm cfm;
4547
4548 BT_DBG("conn %p, icid 0x%4.4x", conn, icid);
4549
4550 cfm.icid = cpu_to_le16(icid);
4551 cfm.result = __constant_cpu_to_le16(L2CAP_MC_UNCONFIRMED);
4552
4553 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_MOVE_CHAN_CFM,
4554 sizeof(cfm), &cfm);
4555}
4556
4557static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
4558 u16 icid)
4559{
4560 struct l2cap_move_chan_cfm_rsp rsp;
4561
4562 BT_DBG("icid 0x%4.4x", icid);
4563
4564 rsp.icid = cpu_to_le16(icid);
4565 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4566}
4567
4568static void __release_logical_link(struct l2cap_chan *chan)
4569{
4570 chan->hs_hchan = NULL;
4571 chan->hs_hcon = NULL;
4572
4573 /* Placeholder - release the logical link */
4574}
4575
4576static void l2cap_logical_fail(struct l2cap_chan *chan)
4577{
4578 /* Logical link setup failed */
4579 if (chan->state != BT_CONNECTED) {
4580 /* Create channel failure, disconnect */
4581 l2cap_send_disconn_req(chan, ECONNRESET);
4582 return;
4583 }
4584
4585 switch (chan->move_role) {
4586 case L2CAP_MOVE_ROLE_RESPONDER:
4587 l2cap_move_done(chan);
4588 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_SUPP);
4589 break;
4590 case L2CAP_MOVE_ROLE_INITIATOR:
4591 if (chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_COMP ||
4592 chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_CFM) {
4593 /* Remote has only sent pending or
4594 * success responses, clean up
4595 */
4596 l2cap_move_done(chan);
4597 }
4598
4599 /* Other amp move states imply that the move
4600 * has already aborted
4601 */
4602 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4603 break;
4604 }
4605}
4606
4607static void l2cap_logical_finish_create(struct l2cap_chan *chan,
4608 struct hci_chan *hchan)
4609{
4610 struct l2cap_conf_rsp rsp;
4611
4612 chan->hs_hchan = hchan;
4613 chan->hs_hcon->l2cap_data = chan->conn;
4614
4615 l2cap_send_efs_conf_rsp(chan, &rsp, chan->ident, 0);
4616
4617 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4618 int err;
4619
4620 set_default_fcs(chan);
4621
4622 err = l2cap_ertm_init(chan);
4623 if (err < 0)
4624 l2cap_send_disconn_req(chan, -err);
4625 else
4626 l2cap_chan_ready(chan);
4627 }
4628}
4629
4630static void l2cap_logical_finish_move(struct l2cap_chan *chan,
4631 struct hci_chan *hchan)
4632{
4633 chan->hs_hcon = hchan->conn;
4634 chan->hs_hcon->l2cap_data = chan->conn;
4635
4636 BT_DBG("move_state %d", chan->move_state);
4637
4638 switch (chan->move_state) {
4639 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
4640 /* Move confirm will be sent after a success
4641 * response is received
4642 */
4643 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4644 break;
4645 case L2CAP_MOVE_WAIT_LOGICAL_CFM:
4646 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4647 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4648 } else if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
4649 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
4650 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4651 } else if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4652 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4653 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4654 }
4655 break;
4656 default:
4657 /* Move was not in expected state, free the channel */
4658 __release_logical_link(chan);
4659
4660 chan->move_state = L2CAP_MOVE_STABLE;
4661 }
4662}
4663
4664/* Call with chan locked */
4665void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan,
4666 u8 status)
4667{
4668 BT_DBG("chan %p, hchan %p, status %d", chan, hchan, status);
4669
4670 if (status) {
4671 l2cap_logical_fail(chan);
4672 __release_logical_link(chan);
4673 return;
4674 }
4675
4676 if (chan->state != BT_CONNECTED) {
4677 /* Ignore logical link if channel is on BR/EDR */
4678 if (chan->local_amp_id != AMP_ID_BREDR)
4679 l2cap_logical_finish_create(chan, hchan);
4680 } else {
4681 l2cap_logical_finish_move(chan, hchan);
4682 }
4683}
4684
4685void l2cap_move_start(struct l2cap_chan *chan)
4686{
4687 BT_DBG("chan %p", chan);
4688
4689 if (chan->local_amp_id == AMP_ID_BREDR) {
4690 if (chan->chan_policy != BT_CHANNEL_POLICY_AMP_PREFERRED)
4691 return;
4692 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4693 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4694 /* Placeholder - start physical link setup */
4695 } else {
4696 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4697 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4698 chan->move_id = 0;
4699 l2cap_move_setup(chan);
4700 l2cap_send_move_chan_req(chan, 0);
4701 }
4702}
4703
4704static void l2cap_do_create(struct l2cap_chan *chan, int result,
4705 u8 local_amp_id, u8 remote_amp_id)
4706{
4707 BT_DBG("chan %p state %s %u -> %u", chan, state_to_string(chan->state),
4708 local_amp_id, remote_amp_id);
4709
4710 chan->fcs = L2CAP_FCS_NONE;
4711
4712 /* Outgoing channel on AMP */
4713 if (chan->state == BT_CONNECT) {
4714 if (result == L2CAP_CR_SUCCESS) {
4715 chan->local_amp_id = local_amp_id;
4716 l2cap_send_create_chan_req(chan, remote_amp_id);
4717 } else {
4718 /* Revert to BR/EDR connect */
4719 l2cap_send_conn_req(chan);
4720 }
4721
4722 return;
4723 }
4724
4725 /* Incoming channel on AMP */
4726 if (__l2cap_no_conn_pending(chan)) {
4727 struct l2cap_conn_rsp rsp;
4728 char buf[128];
4729 rsp.scid = cpu_to_le16(chan->dcid);
4730 rsp.dcid = cpu_to_le16(chan->scid);
4731
4732 if (result == L2CAP_CR_SUCCESS) {
4733 /* Send successful response */
4734 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
4735 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
4736 } else {
4737 /* Send negative response */
4738 rsp.result = __constant_cpu_to_le16(L2CAP_CR_NO_MEM);
4739 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
4740 }
4741
4742 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_RSP,
4743 sizeof(rsp), &rsp);
4744
4745 if (result == L2CAP_CR_SUCCESS) {
4746 __l2cap_state_change(chan, BT_CONFIG);
4747 set_bit(CONF_REQ_SENT, &chan->conf_state);
4748 l2cap_send_cmd(chan->conn, l2cap_get_ident(chan->conn),
4749 L2CAP_CONF_REQ,
4750 l2cap_build_conf_req(chan, buf), buf);
4751 chan->num_conf_req++;
4752 }
4753 }
4754}
4755
4756static void l2cap_do_move_initiate(struct l2cap_chan *chan, u8 local_amp_id,
4757 u8 remote_amp_id)
4758{
4759 l2cap_move_setup(chan);
4760 chan->move_id = local_amp_id;
4761 chan->move_state = L2CAP_MOVE_WAIT_RSP;
4762
4763 l2cap_send_move_chan_req(chan, remote_amp_id);
4764}
4765
4766static void l2cap_do_move_respond(struct l2cap_chan *chan, int result)
4767{
4768 struct hci_chan *hchan = NULL;
4769
4770 /* Placeholder - get hci_chan for logical link */
4771
4772 if (hchan) {
4773 if (hchan->state == BT_CONNECTED) {
4774 /* Logical link is ready to go */
4775 chan->hs_hcon = hchan->conn;
4776 chan->hs_hcon->l2cap_data = chan->conn;
4777 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4778 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4779
4780 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
4781 } else {
4782 /* Wait for logical link to be ready */
4783 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4784 }
4785 } else {
4786 /* Logical link not available */
4787 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_ALLOWED);
4788 }
4789}
4790
4791static void l2cap_do_move_cancel(struct l2cap_chan *chan, int result)
4792{
4793 if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4794 u8 rsp_result;
4795 if (result == -EINVAL)
4796 rsp_result = L2CAP_MR_BAD_ID;
4797 else
4798 rsp_result = L2CAP_MR_NOT_ALLOWED;
4799
4800 l2cap_send_move_chan_rsp(chan, rsp_result);
4801 }
4802
4803 chan->move_role = L2CAP_MOVE_ROLE_NONE;
4804 chan->move_state = L2CAP_MOVE_STABLE;
4805
4806 /* Restart data transmission */
4807 l2cap_ertm_send(chan);
4808}
4809
4810/* Invoke with locked chan */
4811void __l2cap_physical_cfm(struct l2cap_chan *chan, int result)
4812{
4813 u8 local_amp_id = chan->local_amp_id;
4814 u8 remote_amp_id = chan->remote_amp_id;
4815
4816 BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d",
4817 chan, result, local_amp_id, remote_amp_id);
4818
4819 if (chan->state == BT_DISCONN || chan->state == BT_CLOSED) {
4820 l2cap_chan_unlock(chan);
4821 return;
4822 }
4823
4824 if (chan->state != BT_CONNECTED) {
4825 l2cap_do_create(chan, result, local_amp_id, remote_amp_id);
4826 } else if (result != L2CAP_MR_SUCCESS) {
4827 l2cap_do_move_cancel(chan, result);
4828 } else {
4829 switch (chan->move_role) {
4830 case L2CAP_MOVE_ROLE_INITIATOR:
4831 l2cap_do_move_initiate(chan, local_amp_id,
4832 remote_amp_id);
4833 break;
4834 case L2CAP_MOVE_ROLE_RESPONDER:
4835 l2cap_do_move_respond(chan, result);
4836 break;
4837 default:
4838 l2cap_do_move_cancel(chan, result);
4839 break;
4840 }
4841 }
4842}
4843
4844static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
4845 struct l2cap_cmd_hdr *cmd,
4846 u16 cmd_len, void *data)
4847{
4848 struct l2cap_move_chan_req *req = data;
4849 struct l2cap_move_chan_rsp rsp;
4850 struct l2cap_chan *chan;
4851 u16 icid = 0;
4852 u16 result = L2CAP_MR_NOT_ALLOWED;
4853
4854 if (cmd_len != sizeof(*req))
4855 return -EPROTO;
4856
4857 icid = le16_to_cpu(req->icid);
4858
4859 BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id);
4860
4861 if (!conn->hs_enabled)
4862 return -EINVAL;
4863
4864 chan = l2cap_get_chan_by_dcid(conn, icid);
4865 if (!chan) {
4866 rsp.icid = cpu_to_le16(icid);
4867 rsp.result = __constant_cpu_to_le16(L2CAP_MR_NOT_ALLOWED);
4868 l2cap_send_cmd(conn, cmd->ident, L2CAP_MOVE_CHAN_RSP,
4869 sizeof(rsp), &rsp);
4870 return 0;
4871 }
4872
4873 chan->ident = cmd->ident;
4874
4875 if (chan->scid < L2CAP_CID_DYN_START ||
4876 chan->chan_policy == BT_CHANNEL_POLICY_BREDR_ONLY ||
4877 (chan->mode != L2CAP_MODE_ERTM &&
4878 chan->mode != L2CAP_MODE_STREAMING)) {
4879 result = L2CAP_MR_NOT_ALLOWED;
4880 goto send_move_response;
4881 }
4882
4883 if (chan->local_amp_id == req->dest_amp_id) {
4884 result = L2CAP_MR_SAME_ID;
4885 goto send_move_response;
4886 }
4887
4888 if (req->dest_amp_id != AMP_ID_BREDR) {
4889 struct hci_dev *hdev;
4890 hdev = hci_dev_get(req->dest_amp_id);
4891 if (!hdev || hdev->dev_type != HCI_AMP ||
4892 !test_bit(HCI_UP, &hdev->flags)) {
4893 if (hdev)
4894 hci_dev_put(hdev);
4895
4896 result = L2CAP_MR_BAD_ID;
4897 goto send_move_response;
4898 }
4899 hci_dev_put(hdev);
4900 }
4901
4902 /* Detect a move collision. Only send a collision response
4903 * if this side has "lost", otherwise proceed with the move.
4904 * The winner has the larger bd_addr.
4905 */
4906 if ((__chan_is_moving(chan) ||
4907 chan->move_role != L2CAP_MOVE_ROLE_NONE) &&
4908 bacmp(conn->src, conn->dst) > 0) {
4909 result = L2CAP_MR_COLLISION;
4910 goto send_move_response;
4911 }
4912
4913 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
4914 l2cap_move_setup(chan);
4915 chan->move_id = req->dest_amp_id;
4916 icid = chan->dcid;
4917
4918 if (req->dest_amp_id == AMP_ID_BREDR) {
4919 /* Moving to BR/EDR */
4920 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4921 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4922 result = L2CAP_MR_PEND;
4923 } else {
4924 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4925 result = L2CAP_MR_SUCCESS;
4926 }
4927 } else {
4928 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4929 /* Placeholder - uncomment when amp functions are available */
4930 /*amp_accept_physical(chan, req->dest_amp_id);*/
4931 result = L2CAP_MR_PEND;
4932 }
4933
4934send_move_response:
4935 l2cap_send_move_chan_rsp(chan, result);
4936
4937 l2cap_chan_unlock(chan);
4938
4939 return 0;
4940}
4941
4942static void l2cap_move_continue(struct l2cap_conn *conn, u16 icid, u16 result)
4943{
4944 struct l2cap_chan *chan;
4945 struct hci_chan *hchan = NULL;
4946
4947 chan = l2cap_get_chan_by_scid(conn, icid);
4948 if (!chan) {
4949 l2cap_send_move_chan_cfm_icid(conn, icid);
4950 return;
4951 }
4952
4953 __clear_chan_timer(chan);
4954 if (result == L2CAP_MR_PEND)
4955 __set_chan_timer(chan, L2CAP_MOVE_ERTX_TIMEOUT);
4956
4957 switch (chan->move_state) {
4958 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
4959 /* Move confirm will be sent when logical link
4960 * is complete.
4961 */
4962 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4963 break;
4964 case L2CAP_MOVE_WAIT_RSP_SUCCESS:
4965 if (result == L2CAP_MR_PEND) {
4966 break;
4967 } else if (test_bit(CONN_LOCAL_BUSY,
4968 &chan->conn_state)) {
4969 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4970 } else {
4971 /* Logical link is up or moving to BR/EDR,
4972 * proceed with move
4973 */
4974 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
4975 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4976 }
4977 break;
4978 case L2CAP_MOVE_WAIT_RSP:
4979 /* Moving to AMP */
4980 if (result == L2CAP_MR_SUCCESS) {
4981 /* Remote is ready, send confirm immediately
4982 * after logical link is ready
4983 */
4984 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4985 } else {
4986 /* Both logical link and move success
4987 * are required to confirm
4988 */
4989 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_COMP;
4990 }
4991
4992 /* Placeholder - get hci_chan for logical link */
4993 if (!hchan) {
4994 /* Logical link not available */
4995 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4996 break;
4997 }
4998
4999 /* If the logical link is not yet connected, do not
5000 * send confirmation.
5001 */
5002 if (hchan->state != BT_CONNECTED)
5003 break;
5004
5005 /* Logical link is already ready to go */
5006
5007 chan->hs_hcon = hchan->conn;
5008 chan->hs_hcon->l2cap_data = chan->conn;
5009
5010 if (result == L2CAP_MR_SUCCESS) {
5011 /* Can confirm now */
5012 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5013 } else {
5014 /* Now only need move success
5015 * to confirm
5016 */
5017 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
5018 }
5019
5020 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
5021 break;
5022 default:
5023 /* Any other amp move state means the move failed. */
5024 chan->move_id = chan->local_amp_id;
5025 l2cap_move_done(chan);
5026 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5027 }
5028
5029 l2cap_chan_unlock(chan);
5030}
5031
5032static void l2cap_move_fail(struct l2cap_conn *conn, u8 ident, u16 icid,
5033 u16 result)
5034{
5035 struct l2cap_chan *chan;
5036
5037 chan = l2cap_get_chan_by_ident(conn, ident);
5038 if (!chan) {
5039 /* Could not locate channel, icid is best guess */
5040 l2cap_send_move_chan_cfm_icid(conn, icid);
5041 return;
5042 }
5043
5044 __clear_chan_timer(chan);
5045
5046 if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
5047 if (result == L2CAP_MR_COLLISION) {
5048 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5049 } else {
5050 /* Cleanup - cancel move */
5051 chan->move_id = chan->local_amp_id;
5052 l2cap_move_done(chan);
5053 }
5054 }
5055
5056 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5057
5058 l2cap_chan_unlock(chan);
5059}
5060
5061static int l2cap_move_channel_rsp(struct l2cap_conn *conn,
5062 struct l2cap_cmd_hdr *cmd,
5063 u16 cmd_len, void *data)
5064{
5065 struct l2cap_move_chan_rsp *rsp = data;
5066 u16 icid, result;
5067
5068 if (cmd_len != sizeof(*rsp))
5069 return -EPROTO;
5070
5071 icid = le16_to_cpu(rsp->icid);
5072 result = le16_to_cpu(rsp->result);
5073
5074 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5075
5076 if (result == L2CAP_MR_SUCCESS || result == L2CAP_MR_PEND)
5077 l2cap_move_continue(conn, icid, result);
5078 else
5079 l2cap_move_fail(conn, cmd->ident, icid, result);
5080
5081 return 0;
5082}
5083
5084static int l2cap_move_channel_confirm(struct l2cap_conn *conn,
5085 struct l2cap_cmd_hdr *cmd,
5086 u16 cmd_len, void *data)
5087{
5088 struct l2cap_move_chan_cfm *cfm = data;
5089 struct l2cap_chan *chan;
5090 u16 icid, result;
5091
5092 if (cmd_len != sizeof(*cfm))
5093 return -EPROTO;
5094
5095 icid = le16_to_cpu(cfm->icid);
5096 result = le16_to_cpu(cfm->result);
5097
5098 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5099
5100 chan = l2cap_get_chan_by_dcid(conn, icid);
5101 if (!chan) {
5102 /* Spec requires a response even if the icid was not found */
5103 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5104 return 0;
5105 }
5106
5107 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM) {
5108 if (result == L2CAP_MC_CONFIRMED) {
5109 chan->local_amp_id = chan->move_id;
5110 if (chan->local_amp_id == AMP_ID_BREDR)
5111 __release_logical_link(chan);
5112 } else {
5113 chan->move_id = chan->local_amp_id;
5114 }
5115
5116 l2cap_move_done(chan);
5117 }
5118
5119 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5120
5121 l2cap_chan_unlock(chan);
5122
5123 return 0;
5124}
5125
5126static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
5127 struct l2cap_cmd_hdr *cmd,
5128 u16 cmd_len, void *data)
5129{
5130 struct l2cap_move_chan_cfm_rsp *rsp = data;
5131 struct l2cap_chan *chan;
5132 u16 icid;
5133
5134 if (cmd_len != sizeof(*rsp))
5135 return -EPROTO;
5136
5137 icid = le16_to_cpu(rsp->icid);
5138
5139 BT_DBG("icid 0x%4.4x", icid);
5140
5141 chan = l2cap_get_chan_by_scid(conn, icid);
5142 if (!chan)
5143 return 0;
5144
5145 __clear_chan_timer(chan);
5146
5147 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM_RSP) {
5148 chan->local_amp_id = chan->move_id;
5149
5150 if (chan->local_amp_id == AMP_ID_BREDR && chan->hs_hchan)
5151 __release_logical_link(chan);
5152
5153 l2cap_move_done(chan);
5154 }
5155
5156 l2cap_chan_unlock(chan);
5157
5158 return 0;
5159}
5160
5161static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
5162 u16 to_multiplier)
5163{
5164 u16 max_latency;
5165
5166 if (min > max || min < 6 || max > 3200)
5167 return -EINVAL;
5168
5169 if (to_multiplier < 10 || to_multiplier > 3200)
5170 return -EINVAL;
5171
5172 if (max >= to_multiplier * 8)
5173 return -EINVAL;
5174
5175 max_latency = (to_multiplier * 8 / max) - 1;
5176 if (latency > 499 || latency > max_latency)
5177 return -EINVAL;
5178
5179 return 0;
5180}
5181
5182static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
5183 struct l2cap_cmd_hdr *cmd,
5184 u8 *data)
5185{
5186 struct hci_conn *hcon = conn->hcon;
5187 struct l2cap_conn_param_update_req *req;
5188 struct l2cap_conn_param_update_rsp rsp;
5189 u16 min, max, latency, to_multiplier, cmd_len;
5190 int err;
5191
5192 if (!(hcon->link_mode & HCI_LM_MASTER))
5193 return -EINVAL;
5194
5195 cmd_len = __le16_to_cpu(cmd->len);
5196 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
5197 return -EPROTO;
5198
5199 req = (struct l2cap_conn_param_update_req *) data;
5200 min = __le16_to_cpu(req->min);
5201 max = __le16_to_cpu(req->max);
5202 latency = __le16_to_cpu(req->latency);
5203 to_multiplier = __le16_to_cpu(req->to_multiplier);
5204
5205 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
5206 min, max, latency, to_multiplier);
5207
5208 memset(&rsp, 0, sizeof(rsp));
5209
5210 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
5211 if (err)
5212 rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
5213 else
5214 rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
5215
5216 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
5217 sizeof(rsp), &rsp);
5218
5219 if (!err)
5220 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
5221
5222 return 0;
5223}
5224
5225static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
5226 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5227 u8 *data)
5228{
5229 int err = 0;
5230
5231 switch (cmd->code) {
5232 case L2CAP_COMMAND_REJ:
5233 l2cap_command_rej(conn, cmd, cmd_len, data);
5234 break;
5235
5236 case L2CAP_CONN_REQ:
5237 err = l2cap_connect_req(conn, cmd, cmd_len, data);
5238 break;
5239
5240 case L2CAP_CONN_RSP:
5241 case L2CAP_CREATE_CHAN_RSP:
5242 l2cap_connect_create_rsp(conn, cmd, cmd_len, data);
5243 break;
5244
5245 case L2CAP_CONF_REQ:
5246 err = l2cap_config_req(conn, cmd, cmd_len, data);
5247 break;
5248
5249 case L2CAP_CONF_RSP:
5250 l2cap_config_rsp(conn, cmd, cmd_len, data);
5251 break;
5252
5253 case L2CAP_DISCONN_REQ:
5254 err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5255 break;
5256
5257 case L2CAP_DISCONN_RSP:
5258 l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5259 break;
5260
5261 case L2CAP_ECHO_REQ:
5262 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
5263 break;
5264
5265 case L2CAP_ECHO_RSP:
5266 break;
5267
5268 case L2CAP_INFO_REQ:
5269 err = l2cap_information_req(conn, cmd, cmd_len, data);
5270 break;
5271
5272 case L2CAP_INFO_RSP:
5273 l2cap_information_rsp(conn, cmd, cmd_len, data);
5274 break;
5275
5276 case L2CAP_CREATE_CHAN_REQ:
5277 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
5278 break;
5279
5280 case L2CAP_MOVE_CHAN_REQ:
5281 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
5282 break;
5283
5284 case L2CAP_MOVE_CHAN_RSP:
5285 l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
5286 break;
5287
5288 case L2CAP_MOVE_CHAN_CFM:
5289 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
5290 break;
5291
5292 case L2CAP_MOVE_CHAN_CFM_RSP:
5293 l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
5294 break;
5295
5296 default:
5297 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
5298 err = -EINVAL;
5299 break;
5300 }
5301
5302 return err;
5303}
5304
5305static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
5306 struct l2cap_cmd_hdr *cmd, u8 *data)
5307{
5308 switch (cmd->code) {
5309 case L2CAP_COMMAND_REJ:
5310 return 0;
5311
5312 case L2CAP_CONN_PARAM_UPDATE_REQ:
5313 return l2cap_conn_param_update_req(conn, cmd, data);
5314
5315 case L2CAP_CONN_PARAM_UPDATE_RSP:
5316 return 0;
5317
5318 default:
5319 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
5320 return -EINVAL;
5321 }
5322}
5323
5324static __le16 l2cap_err_to_reason(int err)
5325{
5326 switch (err) {
5327 case -EBADSLT:
5328 return __constant_cpu_to_le16(L2CAP_REJ_INVALID_CID);
5329 case -EMSGSIZE:
5330 return __constant_cpu_to_le16(L2CAP_REJ_MTU_EXCEEDED);
5331 case -EINVAL:
5332 case -EPROTO:
5333 default:
5334 return __constant_cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5335 }
5336}
5337
5338static inline void l2cap_le_sig_channel(struct l2cap_conn *conn,
5339 struct sk_buff *skb)
5340{
5341 struct hci_conn *hcon = conn->hcon;
5342 struct l2cap_cmd_hdr *cmd;
5343 u16 len;
5344 int err;
5345
5346 if (hcon->type != LE_LINK)
5347 goto drop;
5348
5349 if (skb->len < L2CAP_CMD_HDR_SIZE)
5350 goto drop;
5351
5352 cmd = (void *) skb->data;
5353 skb_pull(skb, L2CAP_CMD_HDR_SIZE);
5354
5355 len = le16_to_cpu(cmd->len);
5356
5357 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len, cmd->ident);
5358
5359 if (len != skb->len || !cmd->ident) {
5360 BT_DBG("corrupted command");
5361 goto drop;
5362 }
5363
5364 err = l2cap_le_sig_cmd(conn, cmd, skb->data);
5365 if (err) {
5366 struct l2cap_cmd_rej_unk rej;
5367
5368 BT_ERR("Wrong link type (%d)", err);
5369
5370 rej.reason = l2cap_err_to_reason(err);
5371 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
5372 sizeof(rej), &rej);
5373 }
5374
5375drop:
5376 kfree_skb(skb);
5377}
5378
5379static inline void l2cap_sig_channel(struct l2cap_conn *conn,
5380 struct sk_buff *skb)
5381{
5382 struct hci_conn *hcon = conn->hcon;
5383 u8 *data = skb->data;
5384 int len = skb->len;
5385 struct l2cap_cmd_hdr cmd;
5386 int err;
5387
5388 l2cap_raw_recv(conn, skb);
5389
5390 if (hcon->type != ACL_LINK)
5391 goto drop;
5392
5393 while (len >= L2CAP_CMD_HDR_SIZE) {
5394 u16 cmd_len;
5395 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
5396 data += L2CAP_CMD_HDR_SIZE;
5397 len -= L2CAP_CMD_HDR_SIZE;
5398
5399 cmd_len = le16_to_cpu(cmd.len);
5400
5401 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len,
5402 cmd.ident);
5403
5404 if (cmd_len > len || !cmd.ident) {
5405 BT_DBG("corrupted command");
5406 break;
5407 }
5408
5409 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
5410 if (err) {
5411 struct l2cap_cmd_rej_unk rej;
5412
5413 BT_ERR("Wrong link type (%d)", err);
5414
5415 rej.reason = l2cap_err_to_reason(err);
5416 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ,
5417 sizeof(rej), &rej);
5418 }
5419
5420 data += cmd_len;
5421 len -= cmd_len;
5422 }
5423
5424drop:
5425 kfree_skb(skb);
5426}
5427
5428static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
5429{
5430 u16 our_fcs, rcv_fcs;
5431 int hdr_size;
5432
5433 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
5434 hdr_size = L2CAP_EXT_HDR_SIZE;
5435 else
5436 hdr_size = L2CAP_ENH_HDR_SIZE;
5437
5438 if (chan->fcs == L2CAP_FCS_CRC16) {
5439 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
5440 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
5441 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
5442
5443 if (our_fcs != rcv_fcs)
5444 return -EBADMSG;
5445 }
5446 return 0;
5447}
5448
5449static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
5450{
5451 struct l2cap_ctrl control;
5452
5453 BT_DBG("chan %p", chan);
5454
5455 memset(&control, 0, sizeof(control));
5456 control.sframe = 1;
5457 control.final = 1;
5458 control.reqseq = chan->buffer_seq;
5459 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5460
5461 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5462 control.super = L2CAP_SUPER_RNR;
5463 l2cap_send_sframe(chan, &control);
5464 }
5465
5466 if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
5467 chan->unacked_frames > 0)
5468 __set_retrans_timer(chan);
5469
5470 /* Send pending iframes */
5471 l2cap_ertm_send(chan);
5472
5473 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
5474 test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
5475 /* F-bit wasn't sent in an s-frame or i-frame yet, so
5476 * send it now.
5477 */
5478 control.super = L2CAP_SUPER_RR;
5479 l2cap_send_sframe(chan, &control);
5480 }
5481}
5482
5483static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag,
5484 struct sk_buff **last_frag)
5485{
5486 /* skb->len reflects data in skb as well as all fragments
5487 * skb->data_len reflects only data in fragments
5488 */
5489 if (!skb_has_frag_list(skb))
5490 skb_shinfo(skb)->frag_list = new_frag;
5491
5492 new_frag->next = NULL;
5493
5494 (*last_frag)->next = new_frag;
5495 *last_frag = new_frag;
5496
5497 skb->len += new_frag->len;
5498 skb->data_len += new_frag->len;
5499 skb->truesize += new_frag->truesize;
5500}
5501
5502static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
5503 struct l2cap_ctrl *control)
5504{
5505 int err = -EINVAL;
5506
5507 switch (control->sar) {
5508 case L2CAP_SAR_UNSEGMENTED:
5509 if (chan->sdu)
5510 break;
5511
5512 err = chan->ops->recv(chan, skb);
5513 break;
5514
5515 case L2CAP_SAR_START:
5516 if (chan->sdu)
5517 break;
5518
5519 chan->sdu_len = get_unaligned_le16(skb->data);
5520 skb_pull(skb, L2CAP_SDULEN_SIZE);
5521
5522 if (chan->sdu_len > chan->imtu) {
5523 err = -EMSGSIZE;
5524 break;
5525 }
5526
5527 if (skb->len >= chan->sdu_len)
5528 break;
5529
5530 chan->sdu = skb;
5531 chan->sdu_last_frag = skb;
5532
5533 skb = NULL;
5534 err = 0;
5535 break;
5536
5537 case L2CAP_SAR_CONTINUE:
5538 if (!chan->sdu)
5539 break;
5540
5541 append_skb_frag(chan->sdu, skb,
5542 &chan->sdu_last_frag);
5543 skb = NULL;
5544
5545 if (chan->sdu->len >= chan->sdu_len)
5546 break;
5547
5548 err = 0;
5549 break;
5550
5551 case L2CAP_SAR_END:
5552 if (!chan->sdu)
5553 break;
5554
5555 append_skb_frag(chan->sdu, skb,
5556 &chan->sdu_last_frag);
5557 skb = NULL;
5558
5559 if (chan->sdu->len != chan->sdu_len)
5560 break;
5561
5562 err = chan->ops->recv(chan, chan->sdu);
5563
5564 if (!err) {
5565 /* Reassembly complete */
5566 chan->sdu = NULL;
5567 chan->sdu_last_frag = NULL;
5568 chan->sdu_len = 0;
5569 }
5570 break;
5571 }
5572
5573 if (err) {
5574 kfree_skb(skb);
5575 kfree_skb(chan->sdu);
5576 chan->sdu = NULL;
5577 chan->sdu_last_frag = NULL;
5578 chan->sdu_len = 0;
5579 }
5580
5581 return err;
5582}
5583
5584static int l2cap_resegment(struct l2cap_chan *chan)
5585{
5586 /* Placeholder */
5587 return 0;
5588}
5589
5590void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
5591{
5592 u8 event;
5593
5594 if (chan->mode != L2CAP_MODE_ERTM)
5595 return;
5596
5597 event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
5598 l2cap_tx(chan, NULL, NULL, event);
5599}
5600
5601static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
5602{
5603 int err = 0;
5604 /* Pass sequential frames to l2cap_reassemble_sdu()
5605 * until a gap is encountered.
5606 */
5607
5608 BT_DBG("chan %p", chan);
5609
5610 while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5611 struct sk_buff *skb;
5612 BT_DBG("Searching for skb with txseq %d (queue len %d)",
5613 chan->buffer_seq, skb_queue_len(&chan->srej_q));
5614
5615 skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
5616
5617 if (!skb)
5618 break;
5619
5620 skb_unlink(skb, &chan->srej_q);
5621 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
5622 err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->control);
5623 if (err)
5624 break;
5625 }
5626
5627 if (skb_queue_empty(&chan->srej_q)) {
5628 chan->rx_state = L2CAP_RX_STATE_RECV;
5629 l2cap_send_ack(chan);
5630 }
5631
5632 return err;
5633}
5634
5635static void l2cap_handle_srej(struct l2cap_chan *chan,
5636 struct l2cap_ctrl *control)
5637{
5638 struct sk_buff *skb;
5639
5640 BT_DBG("chan %p, control %p", chan, control);
5641
5642 if (control->reqseq == chan->next_tx_seq) {
5643 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
5644 l2cap_send_disconn_req(chan, ECONNRESET);
5645 return;
5646 }
5647
5648 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
5649
5650 if (skb == NULL) {
5651 BT_DBG("Seq %d not available for retransmission",
5652 control->reqseq);
5653 return;
5654 }
5655
5656 if (chan->max_tx != 0 && bt_cb(skb)->control.retries >= chan->max_tx) {
5657 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
5658 l2cap_send_disconn_req(chan, ECONNRESET);
5659 return;
5660 }
5661
5662 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5663
5664 if (control->poll) {
5665 l2cap_pass_to_tx(chan, control);
5666
5667 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5668 l2cap_retransmit(chan, control);
5669 l2cap_ertm_send(chan);
5670
5671 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5672 set_bit(CONN_SREJ_ACT, &chan->conn_state);
5673 chan->srej_save_reqseq = control->reqseq;
5674 }
5675 } else {
5676 l2cap_pass_to_tx_fbit(chan, control);
5677
5678 if (control->final) {
5679 if (chan->srej_save_reqseq != control->reqseq ||
5680 !test_and_clear_bit(CONN_SREJ_ACT,
5681 &chan->conn_state))
5682 l2cap_retransmit(chan, control);
5683 } else {
5684 l2cap_retransmit(chan, control);
5685 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5686 set_bit(CONN_SREJ_ACT, &chan->conn_state);
5687 chan->srej_save_reqseq = control->reqseq;
5688 }
5689 }
5690 }
5691}
5692
5693static void l2cap_handle_rej(struct l2cap_chan *chan,
5694 struct l2cap_ctrl *control)
5695{
5696 struct sk_buff *skb;
5697
5698 BT_DBG("chan %p, control %p", chan, control);
5699
5700 if (control->reqseq == chan->next_tx_seq) {
5701 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
5702 l2cap_send_disconn_req(chan, ECONNRESET);
5703 return;
5704 }
5705
5706 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
5707
5708 if (chan->max_tx && skb &&
5709 bt_cb(skb)->control.retries >= chan->max_tx) {
5710 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
5711 l2cap_send_disconn_req(chan, ECONNRESET);
5712 return;
5713 }
5714
5715 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5716
5717 l2cap_pass_to_tx(chan, control);
5718
5719 if (control->final) {
5720 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
5721 l2cap_retransmit_all(chan, control);
5722 } else {
5723 l2cap_retransmit_all(chan, control);
5724 l2cap_ertm_send(chan);
5725 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
5726 set_bit(CONN_REJ_ACT, &chan->conn_state);
5727 }
5728}
5729
5730static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
5731{
5732 BT_DBG("chan %p, txseq %d", chan, txseq);
5733
5734 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
5735 chan->expected_tx_seq);
5736
5737 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
5738 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
5739 chan->tx_win) {
5740 /* See notes below regarding "double poll" and
5741 * invalid packets.
5742 */
5743 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
5744 BT_DBG("Invalid/Ignore - after SREJ");
5745 return L2CAP_TXSEQ_INVALID_IGNORE;
5746 } else {
5747 BT_DBG("Invalid - in window after SREJ sent");
5748 return L2CAP_TXSEQ_INVALID;
5749 }
5750 }
5751
5752 if (chan->srej_list.head == txseq) {
5753 BT_DBG("Expected SREJ");
5754 return L2CAP_TXSEQ_EXPECTED_SREJ;
5755 }
5756
5757 if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
5758 BT_DBG("Duplicate SREJ - txseq already stored");
5759 return L2CAP_TXSEQ_DUPLICATE_SREJ;
5760 }
5761
5762 if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
5763 BT_DBG("Unexpected SREJ - not requested");
5764 return L2CAP_TXSEQ_UNEXPECTED_SREJ;
5765 }
5766 }
5767
5768 if (chan->expected_tx_seq == txseq) {
5769 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
5770 chan->tx_win) {
5771 BT_DBG("Invalid - txseq outside tx window");
5772 return L2CAP_TXSEQ_INVALID;
5773 } else {
5774 BT_DBG("Expected");
5775 return L2CAP_TXSEQ_EXPECTED;
5776 }
5777 }
5778
5779 if (__seq_offset(chan, txseq, chan->last_acked_seq) <
5780 __seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) {
5781 BT_DBG("Duplicate - expected_tx_seq later than txseq");
5782 return L2CAP_TXSEQ_DUPLICATE;
5783 }
5784
5785 if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
5786 /* A source of invalid packets is a "double poll" condition,
5787 * where delays cause us to send multiple poll packets. If
5788 * the remote stack receives and processes both polls,
5789 * sequence numbers can wrap around in such a way that a
5790 * resent frame has a sequence number that looks like new data
5791 * with a sequence gap. This would trigger an erroneous SREJ
5792 * request.
5793 *
5794 * Fortunately, this is impossible with a tx window that's
5795 * less than half of the maximum sequence number, which allows
5796 * invalid frames to be safely ignored.
5797 *
5798 * With tx window sizes greater than half of the tx window
5799 * maximum, the frame is invalid and cannot be ignored. This
5800 * causes a disconnect.
5801 */
5802
5803 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
5804 BT_DBG("Invalid/Ignore - txseq outside tx window");
5805 return L2CAP_TXSEQ_INVALID_IGNORE;
5806 } else {
5807 BT_DBG("Invalid - txseq outside tx window");
5808 return L2CAP_TXSEQ_INVALID;
5809 }
5810 } else {
5811 BT_DBG("Unexpected - txseq indicates missing frames");
5812 return L2CAP_TXSEQ_UNEXPECTED;
5813 }
5814}
5815
5816static int l2cap_rx_state_recv(struct l2cap_chan *chan,
5817 struct l2cap_ctrl *control,
5818 struct sk_buff *skb, u8 event)
5819{
5820 int err = 0;
5821 bool skb_in_use = false;
5822
5823 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
5824 event);
5825
5826 switch (event) {
5827 case L2CAP_EV_RECV_IFRAME:
5828 switch (l2cap_classify_txseq(chan, control->txseq)) {
5829 case L2CAP_TXSEQ_EXPECTED:
5830 l2cap_pass_to_tx(chan, control);
5831
5832 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5833 BT_DBG("Busy, discarding expected seq %d",
5834 control->txseq);
5835 break;
5836 }
5837
5838 chan->expected_tx_seq = __next_seq(chan,
5839 control->txseq);
5840
5841 chan->buffer_seq = chan->expected_tx_seq;
5842 skb_in_use = true;
5843
5844 err = l2cap_reassemble_sdu(chan, skb, control);
5845 if (err)
5846 break;
5847
5848 if (control->final) {
5849 if (!test_and_clear_bit(CONN_REJ_ACT,
5850 &chan->conn_state)) {
5851 control->final = 0;
5852 l2cap_retransmit_all(chan, control);
5853 l2cap_ertm_send(chan);
5854 }
5855 }
5856
5857 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
5858 l2cap_send_ack(chan);
5859 break;
5860 case L2CAP_TXSEQ_UNEXPECTED:
5861 l2cap_pass_to_tx(chan, control);
5862
5863 /* Can't issue SREJ frames in the local busy state.
5864 * Drop this frame, it will be seen as missing
5865 * when local busy is exited.
5866 */
5867 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5868 BT_DBG("Busy, discarding unexpected seq %d",
5869 control->txseq);
5870 break;
5871 }
5872
5873 /* There was a gap in the sequence, so an SREJ
5874 * must be sent for each missing frame. The
5875 * current frame is stored for later use.
5876 */
5877 skb_queue_tail(&chan->srej_q, skb);
5878 skb_in_use = true;
5879 BT_DBG("Queued %p (queue len %d)", skb,
5880 skb_queue_len(&chan->srej_q));
5881
5882 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
5883 l2cap_seq_list_clear(&chan->srej_list);
5884 l2cap_send_srej(chan, control->txseq);
5885
5886 chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
5887 break;
5888 case L2CAP_TXSEQ_DUPLICATE:
5889 l2cap_pass_to_tx(chan, control);
5890 break;
5891 case L2CAP_TXSEQ_INVALID_IGNORE:
5892 break;
5893 case L2CAP_TXSEQ_INVALID:
5894 default:
5895 l2cap_send_disconn_req(chan, ECONNRESET);
5896 break;
5897 }
5898 break;
5899 case L2CAP_EV_RECV_RR:
5900 l2cap_pass_to_tx(chan, control);
5901 if (control->final) {
5902 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5903
5904 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state) &&
5905 !__chan_is_moving(chan)) {
5906 control->final = 0;
5907 l2cap_retransmit_all(chan, control);
5908 }
5909
5910 l2cap_ertm_send(chan);
5911 } else if (control->poll) {
5912 l2cap_send_i_or_rr_or_rnr(chan);
5913 } else {
5914 if (test_and_clear_bit(CONN_REMOTE_BUSY,
5915 &chan->conn_state) &&
5916 chan->unacked_frames)
5917 __set_retrans_timer(chan);
5918
5919 l2cap_ertm_send(chan);
5920 }
5921 break;
5922 case L2CAP_EV_RECV_RNR:
5923 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5924 l2cap_pass_to_tx(chan, control);
5925 if (control && control->poll) {
5926 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5927 l2cap_send_rr_or_rnr(chan, 0);
5928 }
5929 __clear_retrans_timer(chan);
5930 l2cap_seq_list_clear(&chan->retrans_list);
5931 break;
5932 case L2CAP_EV_RECV_REJ:
5933 l2cap_handle_rej(chan, control);
5934 break;
5935 case L2CAP_EV_RECV_SREJ:
5936 l2cap_handle_srej(chan, control);
5937 break;
5938 default:
5939 break;
5940 }
5941
5942 if (skb && !skb_in_use) {
5943 BT_DBG("Freeing %p", skb);
5944 kfree_skb(skb);
5945 }
5946
5947 return err;
5948}
5949
5950static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
5951 struct l2cap_ctrl *control,
5952 struct sk_buff *skb, u8 event)
5953{
5954 int err = 0;
5955 u16 txseq = control->txseq;
5956 bool skb_in_use = false;
5957
5958 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
5959 event);
5960
5961 switch (event) {
5962 case L2CAP_EV_RECV_IFRAME:
5963 switch (l2cap_classify_txseq(chan, txseq)) {
5964 case L2CAP_TXSEQ_EXPECTED:
5965 /* Keep frame for reassembly later */
5966 l2cap_pass_to_tx(chan, control);
5967 skb_queue_tail(&chan->srej_q, skb);
5968 skb_in_use = true;
5969 BT_DBG("Queued %p (queue len %d)", skb,
5970 skb_queue_len(&chan->srej_q));
5971
5972 chan->expected_tx_seq = __next_seq(chan, txseq);
5973 break;
5974 case L2CAP_TXSEQ_EXPECTED_SREJ:
5975 l2cap_seq_list_pop(&chan->srej_list);
5976
5977 l2cap_pass_to_tx(chan, control);
5978 skb_queue_tail(&chan->srej_q, skb);
5979 skb_in_use = true;
5980 BT_DBG("Queued %p (queue len %d)", skb,
5981 skb_queue_len(&chan->srej_q));
5982
5983 err = l2cap_rx_queued_iframes(chan);
5984 if (err)
5985 break;
5986
5987 break;
5988 case L2CAP_TXSEQ_UNEXPECTED:
5989 /* Got a frame that can't be reassembled yet.
5990 * Save it for later, and send SREJs to cover
5991 * the missing frames.
5992 */
5993 skb_queue_tail(&chan->srej_q, skb);
5994 skb_in_use = true;
5995 BT_DBG("Queued %p (queue len %d)", skb,
5996 skb_queue_len(&chan->srej_q));
5997
5998 l2cap_pass_to_tx(chan, control);
5999 l2cap_send_srej(chan, control->txseq);
6000 break;
6001 case L2CAP_TXSEQ_UNEXPECTED_SREJ:
6002 /* This frame was requested with an SREJ, but
6003 * some expected retransmitted frames are
6004 * missing. Request retransmission of missing
6005 * SREJ'd frames.
6006 */
6007 skb_queue_tail(&chan->srej_q, skb);
6008 skb_in_use = true;
6009 BT_DBG("Queued %p (queue len %d)", skb,
6010 skb_queue_len(&chan->srej_q));
6011
6012 l2cap_pass_to_tx(chan, control);
6013 l2cap_send_srej_list(chan, control->txseq);
6014 break;
6015 case L2CAP_TXSEQ_DUPLICATE_SREJ:
6016 /* We've already queued this frame. Drop this copy. */
6017 l2cap_pass_to_tx(chan, control);
6018 break;
6019 case L2CAP_TXSEQ_DUPLICATE:
6020 /* Expecting a later sequence number, so this frame
6021 * was already received. Ignore it completely.
6022 */
6023 break;
6024 case L2CAP_TXSEQ_INVALID_IGNORE:
6025 break;
6026 case L2CAP_TXSEQ_INVALID:
6027 default:
6028 l2cap_send_disconn_req(chan, ECONNRESET);
6029 break;
6030 }
6031 break;
6032 case L2CAP_EV_RECV_RR:
6033 l2cap_pass_to_tx(chan, control);
6034 if (control->final) {
6035 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6036
6037 if (!test_and_clear_bit(CONN_REJ_ACT,
6038 &chan->conn_state)) {
6039 control->final = 0;
6040 l2cap_retransmit_all(chan, control);
6041 }
6042
6043 l2cap_ertm_send(chan);
6044 } else if (control->poll) {
6045 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6046 &chan->conn_state) &&
6047 chan->unacked_frames) {
6048 __set_retrans_timer(chan);
6049 }
6050
6051 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6052 l2cap_send_srej_tail(chan);
6053 } else {
6054 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6055 &chan->conn_state) &&
6056 chan->unacked_frames)
6057 __set_retrans_timer(chan);
6058
6059 l2cap_send_ack(chan);
6060 }
6061 break;
6062 case L2CAP_EV_RECV_RNR:
6063 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6064 l2cap_pass_to_tx(chan, control);
6065 if (control->poll) {
6066 l2cap_send_srej_tail(chan);
6067 } else {
6068 struct l2cap_ctrl rr_control;
6069 memset(&rr_control, 0, sizeof(rr_control));
6070 rr_control.sframe = 1;
6071 rr_control.super = L2CAP_SUPER_RR;
6072 rr_control.reqseq = chan->buffer_seq;
6073 l2cap_send_sframe(chan, &rr_control);
6074 }
6075
6076 break;
6077 case L2CAP_EV_RECV_REJ:
6078 l2cap_handle_rej(chan, control);
6079 break;
6080 case L2CAP_EV_RECV_SREJ:
6081 l2cap_handle_srej(chan, control);
6082 break;
6083 }
6084
6085 if (skb && !skb_in_use) {
6086 BT_DBG("Freeing %p", skb);
6087 kfree_skb(skb);
6088 }
6089
6090 return err;
6091}
6092
6093static int l2cap_finish_move(struct l2cap_chan *chan)
6094{
6095 BT_DBG("chan %p", chan);
6096
6097 chan->rx_state = L2CAP_RX_STATE_RECV;
6098
6099 if (chan->hs_hcon)
6100 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6101 else
6102 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6103
6104 return l2cap_resegment(chan);
6105}
6106
6107static int l2cap_rx_state_wait_p(struct l2cap_chan *chan,
6108 struct l2cap_ctrl *control,
6109 struct sk_buff *skb, u8 event)
6110{
6111 int err;
6112
6113 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6114 event);
6115
6116 if (!control->poll)
6117 return -EPROTO;
6118
6119 l2cap_process_reqseq(chan, control->reqseq);
6120
6121 if (!skb_queue_empty(&chan->tx_q))
6122 chan->tx_send_head = skb_peek(&chan->tx_q);
6123 else
6124 chan->tx_send_head = NULL;
6125
6126 /* Rewind next_tx_seq to the point expected
6127 * by the receiver.
6128 */
6129 chan->next_tx_seq = control->reqseq;
6130 chan->unacked_frames = 0;
6131
6132 err = l2cap_finish_move(chan);
6133 if (err)
6134 return err;
6135
6136 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6137 l2cap_send_i_or_rr_or_rnr(chan);
6138
6139 if (event == L2CAP_EV_RECV_IFRAME)
6140 return -EPROTO;
6141
6142 return l2cap_rx_state_recv(chan, control, NULL, event);
6143}
6144
6145static int l2cap_rx_state_wait_f(struct l2cap_chan *chan,
6146 struct l2cap_ctrl *control,
6147 struct sk_buff *skb, u8 event)
6148{
6149 int err;
6150
6151 if (!control->final)
6152 return -EPROTO;
6153
6154 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6155
6156 chan->rx_state = L2CAP_RX_STATE_RECV;
6157 l2cap_process_reqseq(chan, control->reqseq);
6158
6159 if (!skb_queue_empty(&chan->tx_q))
6160 chan->tx_send_head = skb_peek(&chan->tx_q);
6161 else
6162 chan->tx_send_head = NULL;
6163
6164 /* Rewind next_tx_seq to the point expected
6165 * by the receiver.
6166 */
6167 chan->next_tx_seq = control->reqseq;
6168 chan->unacked_frames = 0;
6169
6170 if (chan->hs_hcon)
6171 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6172 else
6173 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6174
6175 err = l2cap_resegment(chan);
6176
6177 if (!err)
6178 err = l2cap_rx_state_recv(chan, control, skb, event);
6179
6180 return err;
6181}
6182
6183static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
6184{
6185 /* Make sure reqseq is for a packet that has been sent but not acked */
6186 u16 unacked;
6187
6188 unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
6189 return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
6190}
6191
6192static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6193 struct sk_buff *skb, u8 event)
6194{
6195 int err = 0;
6196
6197 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
6198 control, skb, event, chan->rx_state);
6199
6200 if (__valid_reqseq(chan, control->reqseq)) {
6201 switch (chan->rx_state) {
6202 case L2CAP_RX_STATE_RECV:
6203 err = l2cap_rx_state_recv(chan, control, skb, event);
6204 break;
6205 case L2CAP_RX_STATE_SREJ_SENT:
6206 err = l2cap_rx_state_srej_sent(chan, control, skb,
6207 event);
6208 break;
6209 case L2CAP_RX_STATE_WAIT_P:
6210 err = l2cap_rx_state_wait_p(chan, control, skb, event);
6211 break;
6212 case L2CAP_RX_STATE_WAIT_F:
6213 err = l2cap_rx_state_wait_f(chan, control, skb, event);
6214 break;
6215 default:
6216 /* shut it down */
6217 break;
6218 }
6219 } else {
6220 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
6221 control->reqseq, chan->next_tx_seq,
6222 chan->expected_ack_seq);
6223 l2cap_send_disconn_req(chan, ECONNRESET);
6224 }
6225
6226 return err;
6227}
6228
6229static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6230 struct sk_buff *skb)
6231{
6232 int err = 0;
6233
6234 BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
6235 chan->rx_state);
6236
6237 if (l2cap_classify_txseq(chan, control->txseq) ==
6238 L2CAP_TXSEQ_EXPECTED) {
6239 l2cap_pass_to_tx(chan, control);
6240
6241 BT_DBG("buffer_seq %d->%d", chan->buffer_seq,
6242 __next_seq(chan, chan->buffer_seq));
6243
6244 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6245
6246 l2cap_reassemble_sdu(chan, skb, control);
6247 } else {
6248 if (chan->sdu) {
6249 kfree_skb(chan->sdu);
6250 chan->sdu = NULL;
6251 }
6252 chan->sdu_last_frag = NULL;
6253 chan->sdu_len = 0;
6254
6255 if (skb) {
6256 BT_DBG("Freeing %p", skb);
6257 kfree_skb(skb);
6258 }
6259 }
6260
6261 chan->last_acked_seq = control->txseq;
6262 chan->expected_tx_seq = __next_seq(chan, control->txseq);
6263
6264 return err;
6265}
6266
6267static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6268{
6269 struct l2cap_ctrl *control = &bt_cb(skb)->control;
6270 u16 len;
6271 u8 event;
6272
6273 __unpack_control(chan, skb);
6274
6275 len = skb->len;
6276
6277 /*
6278 * We can just drop the corrupted I-frame here.
6279 * Receiver will miss it and start proper recovery
6280 * procedures and ask for retransmission.
6281 */
6282 if (l2cap_check_fcs(chan, skb))
6283 goto drop;
6284
6285 if (!control->sframe && control->sar == L2CAP_SAR_START)
6286 len -= L2CAP_SDULEN_SIZE;
6287
6288 if (chan->fcs == L2CAP_FCS_CRC16)
6289 len -= L2CAP_FCS_SIZE;
6290
6291 if (len > chan->mps) {
6292 l2cap_send_disconn_req(chan, ECONNRESET);
6293 goto drop;
6294 }
6295
6296 if (!control->sframe) {
6297 int err;
6298
6299 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
6300 control->sar, control->reqseq, control->final,
6301 control->txseq);
6302
6303 /* Validate F-bit - F=0 always valid, F=1 only
6304 * valid in TX WAIT_F
6305 */
6306 if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
6307 goto drop;
6308
6309 if (chan->mode != L2CAP_MODE_STREAMING) {
6310 event = L2CAP_EV_RECV_IFRAME;
6311 err = l2cap_rx(chan, control, skb, event);
6312 } else {
6313 err = l2cap_stream_rx(chan, control, skb);
6314 }
6315
6316 if (err)
6317 l2cap_send_disconn_req(chan, ECONNRESET);
6318 } else {
6319 const u8 rx_func_to_event[4] = {
6320 L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
6321 L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
6322 };
6323
6324 /* Only I-frames are expected in streaming mode */
6325 if (chan->mode == L2CAP_MODE_STREAMING)
6326 goto drop;
6327
6328 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
6329 control->reqseq, control->final, control->poll,
6330 control->super);
6331
6332 if (len != 0) {
6333 BT_ERR("Trailing bytes: %d in sframe", len);
6334 l2cap_send_disconn_req(chan, ECONNRESET);
6335 goto drop;
6336 }
6337
6338 /* Validate F and P bits */
6339 if (control->final && (control->poll ||
6340 chan->tx_state != L2CAP_TX_STATE_WAIT_F))
6341 goto drop;
6342
6343 event = rx_func_to_event[control->super];
6344 if (l2cap_rx(chan, control, skb, event))
6345 l2cap_send_disconn_req(chan, ECONNRESET);
6346 }
6347
6348 return 0;
6349
6350drop:
6351 kfree_skb(skb);
6352 return 0;
6353}
6354
6355static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
6356 struct sk_buff *skb)
6357{
6358 struct l2cap_chan *chan;
6359
6360 chan = l2cap_get_chan_by_scid(conn, cid);
6361 if (!chan) {
6362 if (cid == L2CAP_CID_A2MP) {
6363 chan = a2mp_channel_create(conn, skb);
6364 if (!chan) {
6365 kfree_skb(skb);
6366 return;
6367 }
6368
6369 l2cap_chan_lock(chan);
6370 } else {
6371 BT_DBG("unknown cid 0x%4.4x", cid);
6372 /* Drop packet and return */
6373 kfree_skb(skb);
6374 return;
6375 }
6376 }
6377
6378 BT_DBG("chan %p, len %d", chan, skb->len);
6379
6380 if (chan->state != BT_CONNECTED)
6381 goto drop;
6382
6383 switch (chan->mode) {
6384 case L2CAP_MODE_BASIC:
6385 /* If socket recv buffers overflows we drop data here
6386 * which is *bad* because L2CAP has to be reliable.
6387 * But we don't have any other choice. L2CAP doesn't
6388 * provide flow control mechanism. */
6389
6390 if (chan->imtu < skb->len)
6391 goto drop;
6392
6393 if (!chan->ops->recv(chan, skb))
6394 goto done;
6395 break;
6396
6397 case L2CAP_MODE_ERTM:
6398 case L2CAP_MODE_STREAMING:
6399 l2cap_data_rcv(chan, skb);
6400 goto done;
6401
6402 default:
6403 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
6404 break;
6405 }
6406
6407drop:
6408 kfree_skb(skb);
6409
6410done:
6411 l2cap_chan_unlock(chan);
6412}
6413
6414static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
6415 struct sk_buff *skb)
6416{
6417 struct hci_conn *hcon = conn->hcon;
6418 struct l2cap_chan *chan;
6419
6420 if (hcon->type != ACL_LINK)
6421 goto drop;
6422
6423 chan = l2cap_global_chan_by_psm(0, psm, conn->src, conn->dst);
6424 if (!chan)
6425 goto drop;
6426
6427 BT_DBG("chan %p, len %d", chan, skb->len);
6428
6429 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
6430 goto drop;
6431
6432 if (chan->imtu < skb->len)
6433 goto drop;
6434
6435 if (!chan->ops->recv(chan, skb))
6436 return;
6437
6438drop:
6439 kfree_skb(skb);
6440}
6441
6442static void l2cap_att_channel(struct l2cap_conn *conn,
6443 struct sk_buff *skb)
6444{
6445 struct hci_conn *hcon = conn->hcon;
6446 struct l2cap_chan *chan;
6447
6448 if (hcon->type != LE_LINK)
6449 goto drop;
6450
6451 chan = l2cap_global_chan_by_scid(BT_CONNECTED, L2CAP_CID_ATT,
6452 conn->src, conn->dst);
6453 if (!chan)
6454 goto drop;
6455
6456 BT_DBG("chan %p, len %d", chan, skb->len);
6457
6458 if (chan->imtu < skb->len)
6459 goto drop;
6460
6461 if (!chan->ops->recv(chan, skb))
6462 return;
6463
6464drop:
6465 kfree_skb(skb);
6466}
6467
6468static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
6469{
6470 struct l2cap_hdr *lh = (void *) skb->data;
6471 u16 cid, len;
6472 __le16 psm;
6473
6474 skb_pull(skb, L2CAP_HDR_SIZE);
6475 cid = __le16_to_cpu(lh->cid);
6476 len = __le16_to_cpu(lh->len);
6477
6478 if (len != skb->len) {
6479 kfree_skb(skb);
6480 return;
6481 }
6482
6483 BT_DBG("len %d, cid 0x%4.4x", len, cid);
6484
6485 switch (cid) {
6486 case L2CAP_CID_SIGNALING:
6487 l2cap_sig_channel(conn, skb);
6488 break;
6489
6490 case L2CAP_CID_CONN_LESS:
6491 psm = get_unaligned((__le16 *) skb->data);
6492 skb_pull(skb, L2CAP_PSMLEN_SIZE);
6493 l2cap_conless_channel(conn, psm, skb);
6494 break;
6495
6496 case L2CAP_CID_ATT:
6497 l2cap_att_channel(conn, skb);
6498 break;
6499
6500 case L2CAP_CID_LE_SIGNALING:
6501 l2cap_le_sig_channel(conn, skb);
6502 break;
6503
6504 case L2CAP_CID_SMP:
6505 if (smp_sig_channel(conn, skb))
6506 l2cap_conn_del(conn->hcon, EACCES);
6507 break;
6508
6509 default:
6510 l2cap_data_channel(conn, cid, skb);
6511 break;
6512 }
6513}
6514
6515/* ---- L2CAP interface with lower layer (HCI) ---- */
6516
6517int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
6518{
6519 int exact = 0, lm1 = 0, lm2 = 0;
6520 struct l2cap_chan *c;
6521
6522 BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
6523
6524 /* Find listening sockets and check their link_mode */
6525 read_lock(&chan_list_lock);
6526 list_for_each_entry(c, &chan_list, global_l) {
6527 struct sock *sk = c->sk;
6528
6529 if (c->state != BT_LISTEN)
6530 continue;
6531
6532 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
6533 lm1 |= HCI_LM_ACCEPT;
6534 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
6535 lm1 |= HCI_LM_MASTER;
6536 exact++;
6537 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
6538 lm2 |= HCI_LM_ACCEPT;
6539 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
6540 lm2 |= HCI_LM_MASTER;
6541 }
6542 }
6543 read_unlock(&chan_list_lock);
6544
6545 return exact ? lm1 : lm2;
6546}
6547
6548void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
6549{
6550 struct l2cap_conn *conn;
6551
6552 BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
6553
6554 if (!status) {
6555 conn = l2cap_conn_add(hcon);
6556 if (conn)
6557 l2cap_conn_ready(conn);
6558 } else {
6559 l2cap_conn_del(hcon, bt_to_errno(status));
6560 }
6561}
6562
6563int l2cap_disconn_ind(struct hci_conn *hcon)
6564{
6565 struct l2cap_conn *conn = hcon->l2cap_data;
6566
6567 BT_DBG("hcon %p", hcon);
6568
6569 if (!conn)
6570 return HCI_ERROR_REMOTE_USER_TERM;
6571 return conn->disc_reason;
6572}
6573
6574void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
6575{
6576 BT_DBG("hcon %p reason %d", hcon, reason);
6577
6578 l2cap_conn_del(hcon, bt_to_errno(reason));
6579}
6580
6581static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
6582{
6583 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
6584 return;
6585
6586 if (encrypt == 0x00) {
6587 if (chan->sec_level == BT_SECURITY_MEDIUM) {
6588 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
6589 } else if (chan->sec_level == BT_SECURITY_HIGH)
6590 l2cap_chan_close(chan, ECONNREFUSED);
6591 } else {
6592 if (chan->sec_level == BT_SECURITY_MEDIUM)
6593 __clear_chan_timer(chan);
6594 }
6595}
6596
6597int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
6598{
6599 struct l2cap_conn *conn = hcon->l2cap_data;
6600 struct l2cap_chan *chan;
6601
6602 if (!conn)
6603 return 0;
6604
6605 BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
6606
6607 if (hcon->type == LE_LINK) {
6608 if (!status && encrypt)
6609 smp_distribute_keys(conn, 0);
6610 cancel_delayed_work(&conn->security_timer);
6611 }
6612
6613 mutex_lock(&conn->chan_lock);
6614
6615 list_for_each_entry(chan, &conn->chan_l, list) {
6616 l2cap_chan_lock(chan);
6617
6618 BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
6619 state_to_string(chan->state));
6620
6621 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
6622 l2cap_chan_unlock(chan);
6623 continue;
6624 }
6625
6626 if (chan->scid == L2CAP_CID_ATT) {
6627 if (!status && encrypt) {
6628 chan->sec_level = hcon->sec_level;
6629 l2cap_chan_ready(chan);
6630 }
6631
6632 l2cap_chan_unlock(chan);
6633 continue;
6634 }
6635
6636 if (!__l2cap_no_conn_pending(chan)) {
6637 l2cap_chan_unlock(chan);
6638 continue;
6639 }
6640
6641 if (!status && (chan->state == BT_CONNECTED ||
6642 chan->state == BT_CONFIG)) {
6643 struct sock *sk = chan->sk;
6644
6645 clear_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags);
6646 sk->sk_state_change(sk);
6647
6648 l2cap_check_encryption(chan, encrypt);
6649 l2cap_chan_unlock(chan);
6650 continue;
6651 }
6652
6653 if (chan->state == BT_CONNECT) {
6654 if (!status) {
6655 l2cap_start_connection(chan);
6656 } else {
6657 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
6658 }
6659 } else if (chan->state == BT_CONNECT2) {
6660 struct sock *sk = chan->sk;
6661 struct l2cap_conn_rsp rsp;
6662 __u16 res, stat;
6663
6664 lock_sock(sk);
6665
6666 if (!status) {
6667 if (test_bit(BT_SK_DEFER_SETUP,
6668 &bt_sk(sk)->flags)) {
6669 res = L2CAP_CR_PEND;
6670 stat = L2CAP_CS_AUTHOR_PEND;
6671 chan->ops->defer(chan);
6672 } else {
6673 __l2cap_state_change(chan, BT_CONFIG);
6674 res = L2CAP_CR_SUCCESS;
6675 stat = L2CAP_CS_NO_INFO;
6676 }
6677 } else {
6678 __l2cap_state_change(chan, BT_DISCONN);
6679 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
6680 res = L2CAP_CR_SEC_BLOCK;
6681 stat = L2CAP_CS_NO_INFO;
6682 }
6683
6684 release_sock(sk);
6685
6686 rsp.scid = cpu_to_le16(chan->dcid);
6687 rsp.dcid = cpu_to_le16(chan->scid);
6688 rsp.result = cpu_to_le16(res);
6689 rsp.status = cpu_to_le16(stat);
6690 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
6691 sizeof(rsp), &rsp);
6692
6693 if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
6694 res == L2CAP_CR_SUCCESS) {
6695 char buf[128];
6696 set_bit(CONF_REQ_SENT, &chan->conf_state);
6697 l2cap_send_cmd(conn, l2cap_get_ident(conn),
6698 L2CAP_CONF_REQ,
6699 l2cap_build_conf_req(chan, buf),
6700 buf);
6701 chan->num_conf_req++;
6702 }
6703 }
6704
6705 l2cap_chan_unlock(chan);
6706 }
6707
6708 mutex_unlock(&conn->chan_lock);
6709
6710 return 0;
6711}
6712
6713int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
6714{
6715 struct l2cap_conn *conn = hcon->l2cap_data;
6716 struct l2cap_hdr *hdr;
6717 int len;
6718
6719 /* For AMP controller do not create l2cap conn */
6720 if (!conn && hcon->hdev->dev_type != HCI_BREDR)
6721 goto drop;
6722
6723 if (!conn)
6724 conn = l2cap_conn_add(hcon);
6725
6726 if (!conn)
6727 goto drop;
6728
6729 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
6730
6731 switch (flags) {
6732 case ACL_START:
6733 case ACL_START_NO_FLUSH:
6734 case ACL_COMPLETE:
6735 if (conn->rx_len) {
6736 BT_ERR("Unexpected start frame (len %d)", skb->len);
6737 kfree_skb(conn->rx_skb);
6738 conn->rx_skb = NULL;
6739 conn->rx_len = 0;
6740 l2cap_conn_unreliable(conn, ECOMM);
6741 }
6742
6743 /* Start fragment always begin with Basic L2CAP header */
6744 if (skb->len < L2CAP_HDR_SIZE) {
6745 BT_ERR("Frame is too short (len %d)", skb->len);
6746 l2cap_conn_unreliable(conn, ECOMM);
6747 goto drop;
6748 }
6749
6750 hdr = (struct l2cap_hdr *) skb->data;
6751 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
6752
6753 if (len == skb->len) {
6754 /* Complete frame received */
6755 l2cap_recv_frame(conn, skb);
6756 return 0;
6757 }
6758
6759 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
6760
6761 if (skb->len > len) {
6762 BT_ERR("Frame is too long (len %d, expected len %d)",
6763 skb->len, len);
6764 l2cap_conn_unreliable(conn, ECOMM);
6765 goto drop;
6766 }
6767
6768 /* Allocate skb for the complete frame (with header) */
6769 conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL);
6770 if (!conn->rx_skb)
6771 goto drop;
6772
6773 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
6774 skb->len);
6775 conn->rx_len = len - skb->len;
6776 break;
6777
6778 case ACL_CONT:
6779 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
6780
6781 if (!conn->rx_len) {
6782 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
6783 l2cap_conn_unreliable(conn, ECOMM);
6784 goto drop;
6785 }
6786
6787 if (skb->len > conn->rx_len) {
6788 BT_ERR("Fragment is too long (len %d, expected %d)",
6789 skb->len, conn->rx_len);
6790 kfree_skb(conn->rx_skb);
6791 conn->rx_skb = NULL;
6792 conn->rx_len = 0;
6793 l2cap_conn_unreliable(conn, ECOMM);
6794 goto drop;
6795 }
6796
6797 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
6798 skb->len);
6799 conn->rx_len -= skb->len;
6800
6801 if (!conn->rx_len) {
6802 /* Complete frame received. l2cap_recv_frame
6803 * takes ownership of the skb so set the global
6804 * rx_skb pointer to NULL first.
6805 */
6806 struct sk_buff *rx_skb = conn->rx_skb;
6807 conn->rx_skb = NULL;
6808 l2cap_recv_frame(conn, rx_skb);
6809 }
6810 break;
6811 }
6812
6813drop:
6814 kfree_skb(skb);
6815 return 0;
6816}
6817
6818static int l2cap_debugfs_show(struct seq_file *f, void *p)
6819{
6820 struct l2cap_chan *c;
6821
6822 read_lock(&chan_list_lock);
6823
6824 list_for_each_entry(c, &chan_list, global_l) {
6825 struct sock *sk = c->sk;
6826
6827 seq_printf(f, "%pMR %pMR %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
6828 &bt_sk(sk)->src, &bt_sk(sk)->dst,
6829 c->state, __le16_to_cpu(c->psm),
6830 c->scid, c->dcid, c->imtu, c->omtu,
6831 c->sec_level, c->mode);
6832 }
6833
6834 read_unlock(&chan_list_lock);
6835
6836 return 0;
6837}
6838
6839static int l2cap_debugfs_open(struct inode *inode, struct file *file)
6840{
6841 return single_open(file, l2cap_debugfs_show, inode->i_private);
6842}
6843
6844static const struct file_operations l2cap_debugfs_fops = {
6845 .open = l2cap_debugfs_open,
6846 .read = seq_read,
6847 .llseek = seq_lseek,
6848 .release = single_release,
6849};
6850
6851static struct dentry *l2cap_debugfs;
6852
6853int __init l2cap_init(void)
6854{
6855 int err;
6856
6857 err = l2cap_init_sockets();
6858 if (err < 0)
6859 return err;
6860
6861 if (bt_debugfs) {
6862 l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
6863 NULL, &l2cap_debugfs_fops);
6864 if (!l2cap_debugfs)
6865 BT_ERR("Failed to create L2CAP debug file");
6866 }
6867
6868 return 0;
6869}
6870
6871void l2cap_exit(void)
6872{
6873 debugfs_remove(l2cap_debugfs);
6874 l2cap_cleanup_sockets();
6875}
6876
6877module_param(disable_ertm, bool, 0644);
6878MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
This page took 0.053952 seconds and 5 git commands to generate.