nl802154: fix misspelled enum
[deliverable/linux.git] / net / bluetooth / l2cap_core.c
... / ...
CommitLineData
1/*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
8
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
10
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
14
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
27*/
28
29/* Bluetooth L2CAP core. */
30
31#include <linux/module.h>
32
33#include <linux/debugfs.h>
34#include <linux/crc16.h>
35
36#include <net/bluetooth/bluetooth.h>
37#include <net/bluetooth/hci_core.h>
38#include <net/bluetooth/l2cap.h>
39
40#include "smp.h"
41#include "a2mp.h"
42#include "amp.h"
43
44#define LE_FLOWCTL_MAX_CREDITS 65535
45
46bool disable_ertm;
47
48static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN | L2CAP_FEAT_UCD;
49
50static LIST_HEAD(chan_list);
51static DEFINE_RWLOCK(chan_list_lock);
52
53static u16 le_max_credits = L2CAP_LE_MAX_CREDITS;
54static u16 le_default_mps = L2CAP_LE_DEFAULT_MPS;
55
56static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
57 u8 code, u8 ident, u16 dlen, void *data);
58static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
59 void *data);
60static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
61static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
62
63static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
64 struct sk_buff_head *skbs, u8 event);
65
66static inline u8 bdaddr_type(u8 link_type, u8 bdaddr_type)
67{
68 if (link_type == LE_LINK) {
69 if (bdaddr_type == ADDR_LE_DEV_PUBLIC)
70 return BDADDR_LE_PUBLIC;
71 else
72 return BDADDR_LE_RANDOM;
73 }
74
75 return BDADDR_BREDR;
76}
77
78static inline u8 bdaddr_src_type(struct hci_conn *hcon)
79{
80 return bdaddr_type(hcon->type, hcon->src_type);
81}
82
83static inline u8 bdaddr_dst_type(struct hci_conn *hcon)
84{
85 return bdaddr_type(hcon->type, hcon->dst_type);
86}
87
88/* ---- L2CAP channels ---- */
89
90static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
91 u16 cid)
92{
93 struct l2cap_chan *c;
94
95 list_for_each_entry(c, &conn->chan_l, list) {
96 if (c->dcid == cid)
97 return c;
98 }
99 return NULL;
100}
101
102static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
103 u16 cid)
104{
105 struct l2cap_chan *c;
106
107 list_for_each_entry(c, &conn->chan_l, list) {
108 if (c->scid == cid)
109 return c;
110 }
111 return NULL;
112}
113
114/* Find channel with given SCID.
115 * Returns locked channel. */
116static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
117 u16 cid)
118{
119 struct l2cap_chan *c;
120
121 mutex_lock(&conn->chan_lock);
122 c = __l2cap_get_chan_by_scid(conn, cid);
123 if (c)
124 l2cap_chan_lock(c);
125 mutex_unlock(&conn->chan_lock);
126
127 return c;
128}
129
130/* Find channel with given DCID.
131 * Returns locked channel.
132 */
133static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
134 u16 cid)
135{
136 struct l2cap_chan *c;
137
138 mutex_lock(&conn->chan_lock);
139 c = __l2cap_get_chan_by_dcid(conn, cid);
140 if (c)
141 l2cap_chan_lock(c);
142 mutex_unlock(&conn->chan_lock);
143
144 return c;
145}
146
147static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn,
148 u8 ident)
149{
150 struct l2cap_chan *c;
151
152 list_for_each_entry(c, &conn->chan_l, list) {
153 if (c->ident == ident)
154 return c;
155 }
156 return NULL;
157}
158
159static struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn,
160 u8 ident)
161{
162 struct l2cap_chan *c;
163
164 mutex_lock(&conn->chan_lock);
165 c = __l2cap_get_chan_by_ident(conn, ident);
166 if (c)
167 l2cap_chan_lock(c);
168 mutex_unlock(&conn->chan_lock);
169
170 return c;
171}
172
173static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
174{
175 struct l2cap_chan *c;
176
177 list_for_each_entry(c, &chan_list, global_l) {
178 if (c->sport == psm && !bacmp(&c->src, src))
179 return c;
180 }
181 return NULL;
182}
183
184int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
185{
186 int err;
187
188 write_lock(&chan_list_lock);
189
190 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
191 err = -EADDRINUSE;
192 goto done;
193 }
194
195 if (psm) {
196 chan->psm = psm;
197 chan->sport = psm;
198 err = 0;
199 } else {
200 u16 p;
201
202 err = -EINVAL;
203 for (p = 0x1001; p < 0x1100; p += 2)
204 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
205 chan->psm = cpu_to_le16(p);
206 chan->sport = cpu_to_le16(p);
207 err = 0;
208 break;
209 }
210 }
211
212done:
213 write_unlock(&chan_list_lock);
214 return err;
215}
216EXPORT_SYMBOL_GPL(l2cap_add_psm);
217
218int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
219{
220 write_lock(&chan_list_lock);
221
222 /* Override the defaults (which are for conn-oriented) */
223 chan->omtu = L2CAP_DEFAULT_MTU;
224 chan->chan_type = L2CAP_CHAN_FIXED;
225
226 chan->scid = scid;
227
228 write_unlock(&chan_list_lock);
229
230 return 0;
231}
232
233static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
234{
235 u16 cid, dyn_end;
236
237 if (conn->hcon->type == LE_LINK)
238 dyn_end = L2CAP_CID_LE_DYN_END;
239 else
240 dyn_end = L2CAP_CID_DYN_END;
241
242 for (cid = L2CAP_CID_DYN_START; cid < dyn_end; cid++) {
243 if (!__l2cap_get_chan_by_scid(conn, cid))
244 return cid;
245 }
246
247 return 0;
248}
249
250static void l2cap_state_change(struct l2cap_chan *chan, int state)
251{
252 BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
253 state_to_string(state));
254
255 chan->state = state;
256 chan->ops->state_change(chan, state, 0);
257}
258
259static inline void l2cap_state_change_and_error(struct l2cap_chan *chan,
260 int state, int err)
261{
262 chan->state = state;
263 chan->ops->state_change(chan, chan->state, err);
264}
265
266static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
267{
268 chan->ops->state_change(chan, chan->state, err);
269}
270
271static void __set_retrans_timer(struct l2cap_chan *chan)
272{
273 if (!delayed_work_pending(&chan->monitor_timer) &&
274 chan->retrans_timeout) {
275 l2cap_set_timer(chan, &chan->retrans_timer,
276 msecs_to_jiffies(chan->retrans_timeout));
277 }
278}
279
280static void __set_monitor_timer(struct l2cap_chan *chan)
281{
282 __clear_retrans_timer(chan);
283 if (chan->monitor_timeout) {
284 l2cap_set_timer(chan, &chan->monitor_timer,
285 msecs_to_jiffies(chan->monitor_timeout));
286 }
287}
288
289static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
290 u16 seq)
291{
292 struct sk_buff *skb;
293
294 skb_queue_walk(head, skb) {
295 if (bt_cb(skb)->l2cap.txseq == seq)
296 return skb;
297 }
298
299 return NULL;
300}
301
302/* ---- L2CAP sequence number lists ---- */
303
304/* For ERTM, ordered lists of sequence numbers must be tracked for
305 * SREJ requests that are received and for frames that are to be
306 * retransmitted. These seq_list functions implement a singly-linked
307 * list in an array, where membership in the list can also be checked
308 * in constant time. Items can also be added to the tail of the list
309 * and removed from the head in constant time, without further memory
310 * allocs or frees.
311 */
312
313static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
314{
315 size_t alloc_size, i;
316
317 /* Allocated size is a power of 2 to map sequence numbers
318 * (which may be up to 14 bits) in to a smaller array that is
319 * sized for the negotiated ERTM transmit windows.
320 */
321 alloc_size = roundup_pow_of_two(size);
322
323 seq_list->list = kmalloc(sizeof(u16) * alloc_size, GFP_KERNEL);
324 if (!seq_list->list)
325 return -ENOMEM;
326
327 seq_list->mask = alloc_size - 1;
328 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
329 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
330 for (i = 0; i < alloc_size; i++)
331 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
332
333 return 0;
334}
335
336static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
337{
338 kfree(seq_list->list);
339}
340
341static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
342 u16 seq)
343{
344 /* Constant-time check for list membership */
345 return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
346}
347
348static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
349{
350 u16 seq = seq_list->head;
351 u16 mask = seq_list->mask;
352
353 seq_list->head = seq_list->list[seq & mask];
354 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
355
356 if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
357 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
358 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
359 }
360
361 return seq;
362}
363
364static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
365{
366 u16 i;
367
368 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
369 return;
370
371 for (i = 0; i <= seq_list->mask; i++)
372 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
373
374 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
375 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
376}
377
378static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
379{
380 u16 mask = seq_list->mask;
381
382 /* All appends happen in constant time */
383
384 if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
385 return;
386
387 if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
388 seq_list->head = seq;
389 else
390 seq_list->list[seq_list->tail & mask] = seq;
391
392 seq_list->tail = seq;
393 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
394}
395
396static void l2cap_chan_timeout(struct work_struct *work)
397{
398 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
399 chan_timer.work);
400 struct l2cap_conn *conn = chan->conn;
401 int reason;
402
403 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
404
405 mutex_lock(&conn->chan_lock);
406 l2cap_chan_lock(chan);
407
408 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
409 reason = ECONNREFUSED;
410 else if (chan->state == BT_CONNECT &&
411 chan->sec_level != BT_SECURITY_SDP)
412 reason = ECONNREFUSED;
413 else
414 reason = ETIMEDOUT;
415
416 l2cap_chan_close(chan, reason);
417
418 l2cap_chan_unlock(chan);
419
420 chan->ops->close(chan);
421 mutex_unlock(&conn->chan_lock);
422
423 l2cap_chan_put(chan);
424}
425
426struct l2cap_chan *l2cap_chan_create(void)
427{
428 struct l2cap_chan *chan;
429
430 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
431 if (!chan)
432 return NULL;
433
434 mutex_init(&chan->lock);
435
436 /* Set default lock nesting level */
437 atomic_set(&chan->nesting, L2CAP_NESTING_NORMAL);
438
439 write_lock(&chan_list_lock);
440 list_add(&chan->global_l, &chan_list);
441 write_unlock(&chan_list_lock);
442
443 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
444
445 chan->state = BT_OPEN;
446
447 kref_init(&chan->kref);
448
449 /* This flag is cleared in l2cap_chan_ready() */
450 set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
451
452 BT_DBG("chan %p", chan);
453
454 return chan;
455}
456EXPORT_SYMBOL_GPL(l2cap_chan_create);
457
458static void l2cap_chan_destroy(struct kref *kref)
459{
460 struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);
461
462 BT_DBG("chan %p", chan);
463
464 write_lock(&chan_list_lock);
465 list_del(&chan->global_l);
466 write_unlock(&chan_list_lock);
467
468 kfree(chan);
469}
470
471void l2cap_chan_hold(struct l2cap_chan *c)
472{
473 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
474
475 kref_get(&c->kref);
476}
477
478void l2cap_chan_put(struct l2cap_chan *c)
479{
480 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
481
482 kref_put(&c->kref, l2cap_chan_destroy);
483}
484EXPORT_SYMBOL_GPL(l2cap_chan_put);
485
486void l2cap_chan_set_defaults(struct l2cap_chan *chan)
487{
488 chan->fcs = L2CAP_FCS_CRC16;
489 chan->max_tx = L2CAP_DEFAULT_MAX_TX;
490 chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
491 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
492 chan->remote_max_tx = chan->max_tx;
493 chan->remote_tx_win = chan->tx_win;
494 chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
495 chan->sec_level = BT_SECURITY_LOW;
496 chan->flush_to = L2CAP_DEFAULT_FLUSH_TO;
497 chan->retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
498 chan->monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
499 chan->conf_state = 0;
500
501 set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
502}
503EXPORT_SYMBOL_GPL(l2cap_chan_set_defaults);
504
505static void l2cap_le_flowctl_init(struct l2cap_chan *chan)
506{
507 chan->sdu = NULL;
508 chan->sdu_last_frag = NULL;
509 chan->sdu_len = 0;
510 chan->tx_credits = 0;
511 chan->rx_credits = le_max_credits;
512 chan->mps = min_t(u16, chan->imtu, le_default_mps);
513
514 skb_queue_head_init(&chan->tx_q);
515}
516
517void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
518{
519 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
520 __le16_to_cpu(chan->psm), chan->dcid);
521
522 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
523
524 chan->conn = conn;
525
526 switch (chan->chan_type) {
527 case L2CAP_CHAN_CONN_ORIENTED:
528 /* Alloc CID for connection-oriented socket */
529 chan->scid = l2cap_alloc_cid(conn);
530 if (conn->hcon->type == ACL_LINK)
531 chan->omtu = L2CAP_DEFAULT_MTU;
532 break;
533
534 case L2CAP_CHAN_CONN_LESS:
535 /* Connectionless socket */
536 chan->scid = L2CAP_CID_CONN_LESS;
537 chan->dcid = L2CAP_CID_CONN_LESS;
538 chan->omtu = L2CAP_DEFAULT_MTU;
539 break;
540
541 case L2CAP_CHAN_FIXED:
542 /* Caller will set CID and CID specific MTU values */
543 break;
544
545 default:
546 /* Raw socket can send/recv signalling messages only */
547 chan->scid = L2CAP_CID_SIGNALING;
548 chan->dcid = L2CAP_CID_SIGNALING;
549 chan->omtu = L2CAP_DEFAULT_MTU;
550 }
551
552 chan->local_id = L2CAP_BESTEFFORT_ID;
553 chan->local_stype = L2CAP_SERV_BESTEFFORT;
554 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
555 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
556 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
557 chan->local_flush_to = L2CAP_EFS_DEFAULT_FLUSH_TO;
558
559 l2cap_chan_hold(chan);
560
561 /* Only keep a reference for fixed channels if they requested it */
562 if (chan->chan_type != L2CAP_CHAN_FIXED ||
563 test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
564 hci_conn_hold(conn->hcon);
565
566 list_add(&chan->list, &conn->chan_l);
567}
568
569void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
570{
571 mutex_lock(&conn->chan_lock);
572 __l2cap_chan_add(conn, chan);
573 mutex_unlock(&conn->chan_lock);
574}
575
576void l2cap_chan_del(struct l2cap_chan *chan, int err)
577{
578 struct l2cap_conn *conn = chan->conn;
579
580 __clear_chan_timer(chan);
581
582 BT_DBG("chan %p, conn %p, err %d, state %s", chan, conn, err,
583 state_to_string(chan->state));
584
585 chan->ops->teardown(chan, err);
586
587 if (conn) {
588 struct amp_mgr *mgr = conn->hcon->amp_mgr;
589 /* Delete from channel list */
590 list_del(&chan->list);
591
592 l2cap_chan_put(chan);
593
594 chan->conn = NULL;
595
596 /* Reference was only held for non-fixed channels or
597 * fixed channels that explicitly requested it using the
598 * FLAG_HOLD_HCI_CONN flag.
599 */
600 if (chan->chan_type != L2CAP_CHAN_FIXED ||
601 test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
602 hci_conn_drop(conn->hcon);
603
604 if (mgr && mgr->bredr_chan == chan)
605 mgr->bredr_chan = NULL;
606 }
607
608 if (chan->hs_hchan) {
609 struct hci_chan *hs_hchan = chan->hs_hchan;
610
611 BT_DBG("chan %p disconnect hs_hchan %p", chan, hs_hchan);
612 amp_disconnect_logical_link(hs_hchan);
613 }
614
615 if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
616 return;
617
618 switch(chan->mode) {
619 case L2CAP_MODE_BASIC:
620 break;
621
622 case L2CAP_MODE_LE_FLOWCTL:
623 skb_queue_purge(&chan->tx_q);
624 break;
625
626 case L2CAP_MODE_ERTM:
627 __clear_retrans_timer(chan);
628 __clear_monitor_timer(chan);
629 __clear_ack_timer(chan);
630
631 skb_queue_purge(&chan->srej_q);
632
633 l2cap_seq_list_free(&chan->srej_list);
634 l2cap_seq_list_free(&chan->retrans_list);
635
636 /* fall through */
637
638 case L2CAP_MODE_STREAMING:
639 skb_queue_purge(&chan->tx_q);
640 break;
641 }
642
643 return;
644}
645EXPORT_SYMBOL_GPL(l2cap_chan_del);
646
647static void l2cap_conn_update_id_addr(struct work_struct *work)
648{
649 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
650 id_addr_update_work);
651 struct hci_conn *hcon = conn->hcon;
652 struct l2cap_chan *chan;
653
654 mutex_lock(&conn->chan_lock);
655
656 list_for_each_entry(chan, &conn->chan_l, list) {
657 l2cap_chan_lock(chan);
658 bacpy(&chan->dst, &hcon->dst);
659 chan->dst_type = bdaddr_dst_type(hcon);
660 l2cap_chan_unlock(chan);
661 }
662
663 mutex_unlock(&conn->chan_lock);
664}
665
666static void l2cap_chan_le_connect_reject(struct l2cap_chan *chan)
667{
668 struct l2cap_conn *conn = chan->conn;
669 struct l2cap_le_conn_rsp rsp;
670 u16 result;
671
672 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
673 result = L2CAP_CR_AUTHORIZATION;
674 else
675 result = L2CAP_CR_BAD_PSM;
676
677 l2cap_state_change(chan, BT_DISCONN);
678
679 rsp.dcid = cpu_to_le16(chan->scid);
680 rsp.mtu = cpu_to_le16(chan->imtu);
681 rsp.mps = cpu_to_le16(chan->mps);
682 rsp.credits = cpu_to_le16(chan->rx_credits);
683 rsp.result = cpu_to_le16(result);
684
685 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
686 &rsp);
687}
688
689static void l2cap_chan_connect_reject(struct l2cap_chan *chan)
690{
691 struct l2cap_conn *conn = chan->conn;
692 struct l2cap_conn_rsp rsp;
693 u16 result;
694
695 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
696 result = L2CAP_CR_SEC_BLOCK;
697 else
698 result = L2CAP_CR_BAD_PSM;
699
700 l2cap_state_change(chan, BT_DISCONN);
701
702 rsp.scid = cpu_to_le16(chan->dcid);
703 rsp.dcid = cpu_to_le16(chan->scid);
704 rsp.result = cpu_to_le16(result);
705 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
706
707 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
708}
709
710void l2cap_chan_close(struct l2cap_chan *chan, int reason)
711{
712 struct l2cap_conn *conn = chan->conn;
713
714 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
715
716 switch (chan->state) {
717 case BT_LISTEN:
718 chan->ops->teardown(chan, 0);
719 break;
720
721 case BT_CONNECTED:
722 case BT_CONFIG:
723 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
724 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
725 l2cap_send_disconn_req(chan, reason);
726 } else
727 l2cap_chan_del(chan, reason);
728 break;
729
730 case BT_CONNECT2:
731 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
732 if (conn->hcon->type == ACL_LINK)
733 l2cap_chan_connect_reject(chan);
734 else if (conn->hcon->type == LE_LINK)
735 l2cap_chan_le_connect_reject(chan);
736 }
737
738 l2cap_chan_del(chan, reason);
739 break;
740
741 case BT_CONNECT:
742 case BT_DISCONN:
743 l2cap_chan_del(chan, reason);
744 break;
745
746 default:
747 chan->ops->teardown(chan, 0);
748 break;
749 }
750}
751EXPORT_SYMBOL(l2cap_chan_close);
752
753static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
754{
755 switch (chan->chan_type) {
756 case L2CAP_CHAN_RAW:
757 switch (chan->sec_level) {
758 case BT_SECURITY_HIGH:
759 case BT_SECURITY_FIPS:
760 return HCI_AT_DEDICATED_BONDING_MITM;
761 case BT_SECURITY_MEDIUM:
762 return HCI_AT_DEDICATED_BONDING;
763 default:
764 return HCI_AT_NO_BONDING;
765 }
766 break;
767 case L2CAP_CHAN_CONN_LESS:
768 if (chan->psm == cpu_to_le16(L2CAP_PSM_3DSP)) {
769 if (chan->sec_level == BT_SECURITY_LOW)
770 chan->sec_level = BT_SECURITY_SDP;
771 }
772 if (chan->sec_level == BT_SECURITY_HIGH ||
773 chan->sec_level == BT_SECURITY_FIPS)
774 return HCI_AT_NO_BONDING_MITM;
775 else
776 return HCI_AT_NO_BONDING;
777 break;
778 case L2CAP_CHAN_CONN_ORIENTED:
779 if (chan->psm == cpu_to_le16(L2CAP_PSM_SDP)) {
780 if (chan->sec_level == BT_SECURITY_LOW)
781 chan->sec_level = BT_SECURITY_SDP;
782
783 if (chan->sec_level == BT_SECURITY_HIGH ||
784 chan->sec_level == BT_SECURITY_FIPS)
785 return HCI_AT_NO_BONDING_MITM;
786 else
787 return HCI_AT_NO_BONDING;
788 }
789 /* fall through */
790 default:
791 switch (chan->sec_level) {
792 case BT_SECURITY_HIGH:
793 case BT_SECURITY_FIPS:
794 return HCI_AT_GENERAL_BONDING_MITM;
795 case BT_SECURITY_MEDIUM:
796 return HCI_AT_GENERAL_BONDING;
797 default:
798 return HCI_AT_NO_BONDING;
799 }
800 break;
801 }
802}
803
804/* Service level security */
805int l2cap_chan_check_security(struct l2cap_chan *chan, bool initiator)
806{
807 struct l2cap_conn *conn = chan->conn;
808 __u8 auth_type;
809
810 if (conn->hcon->type == LE_LINK)
811 return smp_conn_security(conn->hcon, chan->sec_level);
812
813 auth_type = l2cap_get_auth_type(chan);
814
815 return hci_conn_security(conn->hcon, chan->sec_level, auth_type,
816 initiator);
817}
818
819static u8 l2cap_get_ident(struct l2cap_conn *conn)
820{
821 u8 id;
822
823 /* Get next available identificator.
824 * 1 - 128 are used by kernel.
825 * 129 - 199 are reserved.
826 * 200 - 254 are used by utilities like l2ping, etc.
827 */
828
829 mutex_lock(&conn->ident_lock);
830
831 if (++conn->tx_ident > 128)
832 conn->tx_ident = 1;
833
834 id = conn->tx_ident;
835
836 mutex_unlock(&conn->ident_lock);
837
838 return id;
839}
840
841static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
842 void *data)
843{
844 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
845 u8 flags;
846
847 BT_DBG("code 0x%2.2x", code);
848
849 if (!skb)
850 return;
851
852 /* Use NO_FLUSH if supported or we have an LE link (which does
853 * not support auto-flushing packets) */
854 if (lmp_no_flush_capable(conn->hcon->hdev) ||
855 conn->hcon->type == LE_LINK)
856 flags = ACL_START_NO_FLUSH;
857 else
858 flags = ACL_START;
859
860 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
861 skb->priority = HCI_PRIO_MAX;
862
863 hci_send_acl(conn->hchan, skb, flags);
864}
865
866static bool __chan_is_moving(struct l2cap_chan *chan)
867{
868 return chan->move_state != L2CAP_MOVE_STABLE &&
869 chan->move_state != L2CAP_MOVE_WAIT_PREPARE;
870}
871
872static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
873{
874 struct hci_conn *hcon = chan->conn->hcon;
875 u16 flags;
876
877 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
878 skb->priority);
879
880 if (chan->hs_hcon && !__chan_is_moving(chan)) {
881 if (chan->hs_hchan)
882 hci_send_acl(chan->hs_hchan, skb, ACL_COMPLETE);
883 else
884 kfree_skb(skb);
885
886 return;
887 }
888
889 /* Use NO_FLUSH for LE links (where this is the only option) or
890 * if the BR/EDR link supports it and flushing has not been
891 * explicitly requested (through FLAG_FLUSHABLE).
892 */
893 if (hcon->type == LE_LINK ||
894 (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
895 lmp_no_flush_capable(hcon->hdev)))
896 flags = ACL_START_NO_FLUSH;
897 else
898 flags = ACL_START;
899
900 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
901 hci_send_acl(chan->conn->hchan, skb, flags);
902}
903
904static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
905{
906 control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
907 control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
908
909 if (enh & L2CAP_CTRL_FRAME_TYPE) {
910 /* S-Frame */
911 control->sframe = 1;
912 control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
913 control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
914
915 control->sar = 0;
916 control->txseq = 0;
917 } else {
918 /* I-Frame */
919 control->sframe = 0;
920 control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
921 control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
922
923 control->poll = 0;
924 control->super = 0;
925 }
926}
927
928static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
929{
930 control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
931 control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
932
933 if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
934 /* S-Frame */
935 control->sframe = 1;
936 control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
937 control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
938
939 control->sar = 0;
940 control->txseq = 0;
941 } else {
942 /* I-Frame */
943 control->sframe = 0;
944 control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
945 control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
946
947 control->poll = 0;
948 control->super = 0;
949 }
950}
951
952static inline void __unpack_control(struct l2cap_chan *chan,
953 struct sk_buff *skb)
954{
955 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
956 __unpack_extended_control(get_unaligned_le32(skb->data),
957 &bt_cb(skb)->l2cap);
958 skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
959 } else {
960 __unpack_enhanced_control(get_unaligned_le16(skb->data),
961 &bt_cb(skb)->l2cap);
962 skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
963 }
964}
965
966static u32 __pack_extended_control(struct l2cap_ctrl *control)
967{
968 u32 packed;
969
970 packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
971 packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
972
973 if (control->sframe) {
974 packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
975 packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
976 packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
977 } else {
978 packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
979 packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
980 }
981
982 return packed;
983}
984
985static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
986{
987 u16 packed;
988
989 packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
990 packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
991
992 if (control->sframe) {
993 packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
994 packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
995 packed |= L2CAP_CTRL_FRAME_TYPE;
996 } else {
997 packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
998 packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
999 }
1000
1001 return packed;
1002}
1003
1004static inline void __pack_control(struct l2cap_chan *chan,
1005 struct l2cap_ctrl *control,
1006 struct sk_buff *skb)
1007{
1008 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1009 put_unaligned_le32(__pack_extended_control(control),
1010 skb->data + L2CAP_HDR_SIZE);
1011 } else {
1012 put_unaligned_le16(__pack_enhanced_control(control),
1013 skb->data + L2CAP_HDR_SIZE);
1014 }
1015}
1016
1017static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
1018{
1019 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1020 return L2CAP_EXT_HDR_SIZE;
1021 else
1022 return L2CAP_ENH_HDR_SIZE;
1023}
1024
1025static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
1026 u32 control)
1027{
1028 struct sk_buff *skb;
1029 struct l2cap_hdr *lh;
1030 int hlen = __ertm_hdr_size(chan);
1031
1032 if (chan->fcs == L2CAP_FCS_CRC16)
1033 hlen += L2CAP_FCS_SIZE;
1034
1035 skb = bt_skb_alloc(hlen, GFP_KERNEL);
1036
1037 if (!skb)
1038 return ERR_PTR(-ENOMEM);
1039
1040 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1041 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
1042 lh->cid = cpu_to_le16(chan->dcid);
1043
1044 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1045 put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
1046 else
1047 put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
1048
1049 if (chan->fcs == L2CAP_FCS_CRC16) {
1050 u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
1051 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1052 }
1053
1054 skb->priority = HCI_PRIO_MAX;
1055 return skb;
1056}
1057
1058static void l2cap_send_sframe(struct l2cap_chan *chan,
1059 struct l2cap_ctrl *control)
1060{
1061 struct sk_buff *skb;
1062 u32 control_field;
1063
1064 BT_DBG("chan %p, control %p", chan, control);
1065
1066 if (!control->sframe)
1067 return;
1068
1069 if (__chan_is_moving(chan))
1070 return;
1071
1072 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
1073 !control->poll)
1074 control->final = 1;
1075
1076 if (control->super == L2CAP_SUPER_RR)
1077 clear_bit(CONN_RNR_SENT, &chan->conn_state);
1078 else if (control->super == L2CAP_SUPER_RNR)
1079 set_bit(CONN_RNR_SENT, &chan->conn_state);
1080
1081 if (control->super != L2CAP_SUPER_SREJ) {
1082 chan->last_acked_seq = control->reqseq;
1083 __clear_ack_timer(chan);
1084 }
1085
1086 BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
1087 control->final, control->poll, control->super);
1088
1089 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1090 control_field = __pack_extended_control(control);
1091 else
1092 control_field = __pack_enhanced_control(control);
1093
1094 skb = l2cap_create_sframe_pdu(chan, control_field);
1095 if (!IS_ERR(skb))
1096 l2cap_do_send(chan, skb);
1097}
1098
1099static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
1100{
1101 struct l2cap_ctrl control;
1102
1103 BT_DBG("chan %p, poll %d", chan, poll);
1104
1105 memset(&control, 0, sizeof(control));
1106 control.sframe = 1;
1107 control.poll = poll;
1108
1109 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
1110 control.super = L2CAP_SUPER_RNR;
1111 else
1112 control.super = L2CAP_SUPER_RR;
1113
1114 control.reqseq = chan->buffer_seq;
1115 l2cap_send_sframe(chan, &control);
1116}
1117
1118static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
1119{
1120 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
1121 return true;
1122
1123 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
1124}
1125
1126static bool __amp_capable(struct l2cap_chan *chan)
1127{
1128 struct l2cap_conn *conn = chan->conn;
1129 struct hci_dev *hdev;
1130 bool amp_available = false;
1131
1132 if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
1133 return false;
1134
1135 if (!(conn->remote_fixed_chan & L2CAP_FC_A2MP))
1136 return false;
1137
1138 read_lock(&hci_dev_list_lock);
1139 list_for_each_entry(hdev, &hci_dev_list, list) {
1140 if (hdev->amp_type != AMP_TYPE_BREDR &&
1141 test_bit(HCI_UP, &hdev->flags)) {
1142 amp_available = true;
1143 break;
1144 }
1145 }
1146 read_unlock(&hci_dev_list_lock);
1147
1148 if (chan->chan_policy == BT_CHANNEL_POLICY_AMP_PREFERRED)
1149 return amp_available;
1150
1151 return false;
1152}
1153
1154static bool l2cap_check_efs(struct l2cap_chan *chan)
1155{
1156 /* Check EFS parameters */
1157 return true;
1158}
1159
1160void l2cap_send_conn_req(struct l2cap_chan *chan)
1161{
1162 struct l2cap_conn *conn = chan->conn;
1163 struct l2cap_conn_req req;
1164
1165 req.scid = cpu_to_le16(chan->scid);
1166 req.psm = chan->psm;
1167
1168 chan->ident = l2cap_get_ident(conn);
1169
1170 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
1171
1172 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
1173}
1174
1175static void l2cap_send_create_chan_req(struct l2cap_chan *chan, u8 amp_id)
1176{
1177 struct l2cap_create_chan_req req;
1178 req.scid = cpu_to_le16(chan->scid);
1179 req.psm = chan->psm;
1180 req.amp_id = amp_id;
1181
1182 chan->ident = l2cap_get_ident(chan->conn);
1183
1184 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_REQ,
1185 sizeof(req), &req);
1186}
1187
1188static void l2cap_move_setup(struct l2cap_chan *chan)
1189{
1190 struct sk_buff *skb;
1191
1192 BT_DBG("chan %p", chan);
1193
1194 if (chan->mode != L2CAP_MODE_ERTM)
1195 return;
1196
1197 __clear_retrans_timer(chan);
1198 __clear_monitor_timer(chan);
1199 __clear_ack_timer(chan);
1200
1201 chan->retry_count = 0;
1202 skb_queue_walk(&chan->tx_q, skb) {
1203 if (bt_cb(skb)->l2cap.retries)
1204 bt_cb(skb)->l2cap.retries = 1;
1205 else
1206 break;
1207 }
1208
1209 chan->expected_tx_seq = chan->buffer_seq;
1210
1211 clear_bit(CONN_REJ_ACT, &chan->conn_state);
1212 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
1213 l2cap_seq_list_clear(&chan->retrans_list);
1214 l2cap_seq_list_clear(&chan->srej_list);
1215 skb_queue_purge(&chan->srej_q);
1216
1217 chan->tx_state = L2CAP_TX_STATE_XMIT;
1218 chan->rx_state = L2CAP_RX_STATE_MOVE;
1219
1220 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
1221}
1222
1223static void l2cap_move_done(struct l2cap_chan *chan)
1224{
1225 u8 move_role = chan->move_role;
1226 BT_DBG("chan %p", chan);
1227
1228 chan->move_state = L2CAP_MOVE_STABLE;
1229 chan->move_role = L2CAP_MOVE_ROLE_NONE;
1230
1231 if (chan->mode != L2CAP_MODE_ERTM)
1232 return;
1233
1234 switch (move_role) {
1235 case L2CAP_MOVE_ROLE_INITIATOR:
1236 l2cap_tx(chan, NULL, NULL, L2CAP_EV_EXPLICIT_POLL);
1237 chan->rx_state = L2CAP_RX_STATE_WAIT_F;
1238 break;
1239 case L2CAP_MOVE_ROLE_RESPONDER:
1240 chan->rx_state = L2CAP_RX_STATE_WAIT_P;
1241 break;
1242 }
1243}
1244
1245static void l2cap_chan_ready(struct l2cap_chan *chan)
1246{
1247 /* The channel may have already been flagged as connected in
1248 * case of receiving data before the L2CAP info req/rsp
1249 * procedure is complete.
1250 */
1251 if (chan->state == BT_CONNECTED)
1252 return;
1253
1254 /* This clears all conf flags, including CONF_NOT_COMPLETE */
1255 chan->conf_state = 0;
1256 __clear_chan_timer(chan);
1257
1258 if (chan->mode == L2CAP_MODE_LE_FLOWCTL && !chan->tx_credits)
1259 chan->ops->suspend(chan);
1260
1261 chan->state = BT_CONNECTED;
1262
1263 chan->ops->ready(chan);
1264}
1265
1266static void l2cap_le_connect(struct l2cap_chan *chan)
1267{
1268 struct l2cap_conn *conn = chan->conn;
1269 struct l2cap_le_conn_req req;
1270
1271 if (test_and_set_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags))
1272 return;
1273
1274 req.psm = chan->psm;
1275 req.scid = cpu_to_le16(chan->scid);
1276 req.mtu = cpu_to_le16(chan->imtu);
1277 req.mps = cpu_to_le16(chan->mps);
1278 req.credits = cpu_to_le16(chan->rx_credits);
1279
1280 chan->ident = l2cap_get_ident(conn);
1281
1282 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_REQ,
1283 sizeof(req), &req);
1284}
1285
1286static void l2cap_le_start(struct l2cap_chan *chan)
1287{
1288 struct l2cap_conn *conn = chan->conn;
1289
1290 if (!smp_conn_security(conn->hcon, chan->sec_level))
1291 return;
1292
1293 if (!chan->psm) {
1294 l2cap_chan_ready(chan);
1295 return;
1296 }
1297
1298 if (chan->state == BT_CONNECT)
1299 l2cap_le_connect(chan);
1300}
1301
1302static void l2cap_start_connection(struct l2cap_chan *chan)
1303{
1304 if (__amp_capable(chan)) {
1305 BT_DBG("chan %p AMP capable: discover AMPs", chan);
1306 a2mp_discover_amp(chan);
1307 } else if (chan->conn->hcon->type == LE_LINK) {
1308 l2cap_le_start(chan);
1309 } else {
1310 l2cap_send_conn_req(chan);
1311 }
1312}
1313
1314static void l2cap_request_info(struct l2cap_conn *conn)
1315{
1316 struct l2cap_info_req req;
1317
1318 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1319 return;
1320
1321 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
1322
1323 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1324 conn->info_ident = l2cap_get_ident(conn);
1325
1326 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1327
1328 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
1329 sizeof(req), &req);
1330}
1331
1332static void l2cap_do_start(struct l2cap_chan *chan)
1333{
1334 struct l2cap_conn *conn = chan->conn;
1335
1336 if (conn->hcon->type == LE_LINK) {
1337 l2cap_le_start(chan);
1338 return;
1339 }
1340
1341 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)) {
1342 l2cap_request_info(conn);
1343 return;
1344 }
1345
1346 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
1347 return;
1348
1349 if (l2cap_chan_check_security(chan, true) &&
1350 __l2cap_no_conn_pending(chan))
1351 l2cap_start_connection(chan);
1352}
1353
1354static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1355{
1356 u32 local_feat_mask = l2cap_feat_mask;
1357 if (!disable_ertm)
1358 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1359
1360 switch (mode) {
1361 case L2CAP_MODE_ERTM:
1362 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1363 case L2CAP_MODE_STREAMING:
1364 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1365 default:
1366 return 0x00;
1367 }
1368}
1369
1370static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err)
1371{
1372 struct l2cap_conn *conn = chan->conn;
1373 struct l2cap_disconn_req req;
1374
1375 if (!conn)
1376 return;
1377
1378 if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) {
1379 __clear_retrans_timer(chan);
1380 __clear_monitor_timer(chan);
1381 __clear_ack_timer(chan);
1382 }
1383
1384 if (chan->scid == L2CAP_CID_A2MP) {
1385 l2cap_state_change(chan, BT_DISCONN);
1386 return;
1387 }
1388
1389 req.dcid = cpu_to_le16(chan->dcid);
1390 req.scid = cpu_to_le16(chan->scid);
1391 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
1392 sizeof(req), &req);
1393
1394 l2cap_state_change_and_error(chan, BT_DISCONN, err);
1395}
1396
1397/* ---- L2CAP connections ---- */
1398static void l2cap_conn_start(struct l2cap_conn *conn)
1399{
1400 struct l2cap_chan *chan, *tmp;
1401
1402 BT_DBG("conn %p", conn);
1403
1404 mutex_lock(&conn->chan_lock);
1405
1406 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1407 l2cap_chan_lock(chan);
1408
1409 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1410 l2cap_chan_ready(chan);
1411 l2cap_chan_unlock(chan);
1412 continue;
1413 }
1414
1415 if (chan->state == BT_CONNECT) {
1416 if (!l2cap_chan_check_security(chan, true) ||
1417 !__l2cap_no_conn_pending(chan)) {
1418 l2cap_chan_unlock(chan);
1419 continue;
1420 }
1421
1422 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1423 && test_bit(CONF_STATE2_DEVICE,
1424 &chan->conf_state)) {
1425 l2cap_chan_close(chan, ECONNRESET);
1426 l2cap_chan_unlock(chan);
1427 continue;
1428 }
1429
1430 l2cap_start_connection(chan);
1431
1432 } else if (chan->state == BT_CONNECT2) {
1433 struct l2cap_conn_rsp rsp;
1434 char buf[128];
1435 rsp.scid = cpu_to_le16(chan->dcid);
1436 rsp.dcid = cpu_to_le16(chan->scid);
1437
1438 if (l2cap_chan_check_security(chan, false)) {
1439 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
1440 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1441 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1442 chan->ops->defer(chan);
1443
1444 } else {
1445 l2cap_state_change(chan, BT_CONFIG);
1446 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1447 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1448 }
1449 } else {
1450 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1451 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1452 }
1453
1454 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1455 sizeof(rsp), &rsp);
1456
1457 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1458 rsp.result != L2CAP_CR_SUCCESS) {
1459 l2cap_chan_unlock(chan);
1460 continue;
1461 }
1462
1463 set_bit(CONF_REQ_SENT, &chan->conf_state);
1464 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1465 l2cap_build_conf_req(chan, buf), buf);
1466 chan->num_conf_req++;
1467 }
1468
1469 l2cap_chan_unlock(chan);
1470 }
1471
1472 mutex_unlock(&conn->chan_lock);
1473}
1474
1475static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1476{
1477 struct hci_conn *hcon = conn->hcon;
1478 struct hci_dev *hdev = hcon->hdev;
1479
1480 BT_DBG("%s conn %p", hdev->name, conn);
1481
1482 /* For outgoing pairing which doesn't necessarily have an
1483 * associated socket (e.g. mgmt_pair_device).
1484 */
1485 if (hcon->out)
1486 smp_conn_security(hcon, hcon->pending_sec_level);
1487
1488 /* For LE slave connections, make sure the connection interval
1489 * is in the range of the minium and maximum interval that has
1490 * been configured for this connection. If not, then trigger
1491 * the connection update procedure.
1492 */
1493 if (hcon->role == HCI_ROLE_SLAVE &&
1494 (hcon->le_conn_interval < hcon->le_conn_min_interval ||
1495 hcon->le_conn_interval > hcon->le_conn_max_interval)) {
1496 struct l2cap_conn_param_update_req req;
1497
1498 req.min = cpu_to_le16(hcon->le_conn_min_interval);
1499 req.max = cpu_to_le16(hcon->le_conn_max_interval);
1500 req.latency = cpu_to_le16(hcon->le_conn_latency);
1501 req.to_multiplier = cpu_to_le16(hcon->le_supv_timeout);
1502
1503 l2cap_send_cmd(conn, l2cap_get_ident(conn),
1504 L2CAP_CONN_PARAM_UPDATE_REQ, sizeof(req), &req);
1505 }
1506}
1507
1508static void l2cap_conn_ready(struct l2cap_conn *conn)
1509{
1510 struct l2cap_chan *chan;
1511 struct hci_conn *hcon = conn->hcon;
1512
1513 BT_DBG("conn %p", conn);
1514
1515 if (hcon->type == ACL_LINK)
1516 l2cap_request_info(conn);
1517
1518 mutex_lock(&conn->chan_lock);
1519
1520 list_for_each_entry(chan, &conn->chan_l, list) {
1521
1522 l2cap_chan_lock(chan);
1523
1524 if (chan->scid == L2CAP_CID_A2MP) {
1525 l2cap_chan_unlock(chan);
1526 continue;
1527 }
1528
1529 if (hcon->type == LE_LINK) {
1530 l2cap_le_start(chan);
1531 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1532 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
1533 l2cap_chan_ready(chan);
1534 } else if (chan->state == BT_CONNECT) {
1535 l2cap_do_start(chan);
1536 }
1537
1538 l2cap_chan_unlock(chan);
1539 }
1540
1541 mutex_unlock(&conn->chan_lock);
1542
1543 if (hcon->type == LE_LINK)
1544 l2cap_le_conn_ready(conn);
1545
1546 queue_work(hcon->hdev->workqueue, &conn->pending_rx_work);
1547}
1548
1549/* Notify sockets that we cannot guaranty reliability anymore */
1550static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1551{
1552 struct l2cap_chan *chan;
1553
1554 BT_DBG("conn %p", conn);
1555
1556 mutex_lock(&conn->chan_lock);
1557
1558 list_for_each_entry(chan, &conn->chan_l, list) {
1559 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1560 l2cap_chan_set_err(chan, err);
1561 }
1562
1563 mutex_unlock(&conn->chan_lock);
1564}
1565
1566static void l2cap_info_timeout(struct work_struct *work)
1567{
1568 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1569 info_timer.work);
1570
1571 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1572 conn->info_ident = 0;
1573
1574 l2cap_conn_start(conn);
1575}
1576
1577/*
1578 * l2cap_user
1579 * External modules can register l2cap_user objects on l2cap_conn. The ->probe
1580 * callback is called during registration. The ->remove callback is called
1581 * during unregistration.
1582 * An l2cap_user object can either be explicitly unregistered or when the
1583 * underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon,
1584 * l2cap->hchan, .. are valid as long as the remove callback hasn't been called.
1585 * External modules must own a reference to the l2cap_conn object if they intend
1586 * to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at
1587 * any time if they don't.
1588 */
1589
1590int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user)
1591{
1592 struct hci_dev *hdev = conn->hcon->hdev;
1593 int ret;
1594
1595 /* We need to check whether l2cap_conn is registered. If it is not, we
1596 * must not register the l2cap_user. l2cap_conn_del() is unregisters
1597 * l2cap_conn objects, but doesn't provide its own locking. Instead, it
1598 * relies on the parent hci_conn object to be locked. This itself relies
1599 * on the hci_dev object to be locked. So we must lock the hci device
1600 * here, too. */
1601
1602 hci_dev_lock(hdev);
1603
1604 if (user->list.next || user->list.prev) {
1605 ret = -EINVAL;
1606 goto out_unlock;
1607 }
1608
1609 /* conn->hchan is NULL after l2cap_conn_del() was called */
1610 if (!conn->hchan) {
1611 ret = -ENODEV;
1612 goto out_unlock;
1613 }
1614
1615 ret = user->probe(conn, user);
1616 if (ret)
1617 goto out_unlock;
1618
1619 list_add(&user->list, &conn->users);
1620 ret = 0;
1621
1622out_unlock:
1623 hci_dev_unlock(hdev);
1624 return ret;
1625}
1626EXPORT_SYMBOL(l2cap_register_user);
1627
1628void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user)
1629{
1630 struct hci_dev *hdev = conn->hcon->hdev;
1631
1632 hci_dev_lock(hdev);
1633
1634 if (!user->list.next || !user->list.prev)
1635 goto out_unlock;
1636
1637 list_del(&user->list);
1638 user->list.next = NULL;
1639 user->list.prev = NULL;
1640 user->remove(conn, user);
1641
1642out_unlock:
1643 hci_dev_unlock(hdev);
1644}
1645EXPORT_SYMBOL(l2cap_unregister_user);
1646
1647static void l2cap_unregister_all_users(struct l2cap_conn *conn)
1648{
1649 struct l2cap_user *user;
1650
1651 while (!list_empty(&conn->users)) {
1652 user = list_first_entry(&conn->users, struct l2cap_user, list);
1653 list_del(&user->list);
1654 user->list.next = NULL;
1655 user->list.prev = NULL;
1656 user->remove(conn, user);
1657 }
1658}
1659
1660static void l2cap_conn_del(struct hci_conn *hcon, int err)
1661{
1662 struct l2cap_conn *conn = hcon->l2cap_data;
1663 struct l2cap_chan *chan, *l;
1664
1665 if (!conn)
1666 return;
1667
1668 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1669
1670 kfree_skb(conn->rx_skb);
1671
1672 skb_queue_purge(&conn->pending_rx);
1673
1674 /* We can not call flush_work(&conn->pending_rx_work) here since we
1675 * might block if we are running on a worker from the same workqueue
1676 * pending_rx_work is waiting on.
1677 */
1678 if (work_pending(&conn->pending_rx_work))
1679 cancel_work_sync(&conn->pending_rx_work);
1680
1681 if (work_pending(&conn->id_addr_update_work))
1682 cancel_work_sync(&conn->id_addr_update_work);
1683
1684 l2cap_unregister_all_users(conn);
1685
1686 /* Force the connection to be immediately dropped */
1687 hcon->disc_timeout = 0;
1688
1689 mutex_lock(&conn->chan_lock);
1690
1691 /* Kill channels */
1692 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1693 l2cap_chan_hold(chan);
1694 l2cap_chan_lock(chan);
1695
1696 l2cap_chan_del(chan, err);
1697
1698 l2cap_chan_unlock(chan);
1699
1700 chan->ops->close(chan);
1701 l2cap_chan_put(chan);
1702 }
1703
1704 mutex_unlock(&conn->chan_lock);
1705
1706 hci_chan_del(conn->hchan);
1707
1708 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1709 cancel_delayed_work_sync(&conn->info_timer);
1710
1711 hcon->l2cap_data = NULL;
1712 conn->hchan = NULL;
1713 l2cap_conn_put(conn);
1714}
1715
1716static void l2cap_conn_free(struct kref *ref)
1717{
1718 struct l2cap_conn *conn = container_of(ref, struct l2cap_conn, ref);
1719
1720 hci_conn_put(conn->hcon);
1721 kfree(conn);
1722}
1723
1724struct l2cap_conn *l2cap_conn_get(struct l2cap_conn *conn)
1725{
1726 kref_get(&conn->ref);
1727 return conn;
1728}
1729EXPORT_SYMBOL(l2cap_conn_get);
1730
1731void l2cap_conn_put(struct l2cap_conn *conn)
1732{
1733 kref_put(&conn->ref, l2cap_conn_free);
1734}
1735EXPORT_SYMBOL(l2cap_conn_put);
1736
1737/* ---- Socket interface ---- */
1738
1739/* Find socket with psm and source / destination bdaddr.
1740 * Returns closest match.
1741 */
1742static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1743 bdaddr_t *src,
1744 bdaddr_t *dst,
1745 u8 link_type)
1746{
1747 struct l2cap_chan *c, *c1 = NULL;
1748
1749 read_lock(&chan_list_lock);
1750
1751 list_for_each_entry(c, &chan_list, global_l) {
1752 if (state && c->state != state)
1753 continue;
1754
1755 if (link_type == ACL_LINK && c->src_type != BDADDR_BREDR)
1756 continue;
1757
1758 if (link_type == LE_LINK && c->src_type == BDADDR_BREDR)
1759 continue;
1760
1761 if (c->psm == psm) {
1762 int src_match, dst_match;
1763 int src_any, dst_any;
1764
1765 /* Exact match. */
1766 src_match = !bacmp(&c->src, src);
1767 dst_match = !bacmp(&c->dst, dst);
1768 if (src_match && dst_match) {
1769 l2cap_chan_hold(c);
1770 read_unlock(&chan_list_lock);
1771 return c;
1772 }
1773
1774 /* Closest match */
1775 src_any = !bacmp(&c->src, BDADDR_ANY);
1776 dst_any = !bacmp(&c->dst, BDADDR_ANY);
1777 if ((src_match && dst_any) || (src_any && dst_match) ||
1778 (src_any && dst_any))
1779 c1 = c;
1780 }
1781 }
1782
1783 if (c1)
1784 l2cap_chan_hold(c1);
1785
1786 read_unlock(&chan_list_lock);
1787
1788 return c1;
1789}
1790
1791static void l2cap_monitor_timeout(struct work_struct *work)
1792{
1793 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1794 monitor_timer.work);
1795
1796 BT_DBG("chan %p", chan);
1797
1798 l2cap_chan_lock(chan);
1799
1800 if (!chan->conn) {
1801 l2cap_chan_unlock(chan);
1802 l2cap_chan_put(chan);
1803 return;
1804 }
1805
1806 l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
1807
1808 l2cap_chan_unlock(chan);
1809 l2cap_chan_put(chan);
1810}
1811
1812static void l2cap_retrans_timeout(struct work_struct *work)
1813{
1814 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1815 retrans_timer.work);
1816
1817 BT_DBG("chan %p", chan);
1818
1819 l2cap_chan_lock(chan);
1820
1821 if (!chan->conn) {
1822 l2cap_chan_unlock(chan);
1823 l2cap_chan_put(chan);
1824 return;
1825 }
1826
1827 l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
1828 l2cap_chan_unlock(chan);
1829 l2cap_chan_put(chan);
1830}
1831
1832static void l2cap_streaming_send(struct l2cap_chan *chan,
1833 struct sk_buff_head *skbs)
1834{
1835 struct sk_buff *skb;
1836 struct l2cap_ctrl *control;
1837
1838 BT_DBG("chan %p, skbs %p", chan, skbs);
1839
1840 if (__chan_is_moving(chan))
1841 return;
1842
1843 skb_queue_splice_tail_init(skbs, &chan->tx_q);
1844
1845 while (!skb_queue_empty(&chan->tx_q)) {
1846
1847 skb = skb_dequeue(&chan->tx_q);
1848
1849 bt_cb(skb)->l2cap.retries = 1;
1850 control = &bt_cb(skb)->l2cap;
1851
1852 control->reqseq = 0;
1853 control->txseq = chan->next_tx_seq;
1854
1855 __pack_control(chan, control, skb);
1856
1857 if (chan->fcs == L2CAP_FCS_CRC16) {
1858 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1859 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1860 }
1861
1862 l2cap_do_send(chan, skb);
1863
1864 BT_DBG("Sent txseq %u", control->txseq);
1865
1866 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1867 chan->frames_sent++;
1868 }
1869}
1870
1871static int l2cap_ertm_send(struct l2cap_chan *chan)
1872{
1873 struct sk_buff *skb, *tx_skb;
1874 struct l2cap_ctrl *control;
1875 int sent = 0;
1876
1877 BT_DBG("chan %p", chan);
1878
1879 if (chan->state != BT_CONNECTED)
1880 return -ENOTCONN;
1881
1882 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1883 return 0;
1884
1885 if (__chan_is_moving(chan))
1886 return 0;
1887
1888 while (chan->tx_send_head &&
1889 chan->unacked_frames < chan->remote_tx_win &&
1890 chan->tx_state == L2CAP_TX_STATE_XMIT) {
1891
1892 skb = chan->tx_send_head;
1893
1894 bt_cb(skb)->l2cap.retries = 1;
1895 control = &bt_cb(skb)->l2cap;
1896
1897 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1898 control->final = 1;
1899
1900 control->reqseq = chan->buffer_seq;
1901 chan->last_acked_seq = chan->buffer_seq;
1902 control->txseq = chan->next_tx_seq;
1903
1904 __pack_control(chan, control, skb);
1905
1906 if (chan->fcs == L2CAP_FCS_CRC16) {
1907 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1908 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1909 }
1910
1911 /* Clone after data has been modified. Data is assumed to be
1912 read-only (for locking purposes) on cloned sk_buffs.
1913 */
1914 tx_skb = skb_clone(skb, GFP_KERNEL);
1915
1916 if (!tx_skb)
1917 break;
1918
1919 __set_retrans_timer(chan);
1920
1921 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1922 chan->unacked_frames++;
1923 chan->frames_sent++;
1924 sent++;
1925
1926 if (skb_queue_is_last(&chan->tx_q, skb))
1927 chan->tx_send_head = NULL;
1928 else
1929 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1930
1931 l2cap_do_send(chan, tx_skb);
1932 BT_DBG("Sent txseq %u", control->txseq);
1933 }
1934
1935 BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
1936 chan->unacked_frames, skb_queue_len(&chan->tx_q));
1937
1938 return sent;
1939}
1940
1941static void l2cap_ertm_resend(struct l2cap_chan *chan)
1942{
1943 struct l2cap_ctrl control;
1944 struct sk_buff *skb;
1945 struct sk_buff *tx_skb;
1946 u16 seq;
1947
1948 BT_DBG("chan %p", chan);
1949
1950 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1951 return;
1952
1953 if (__chan_is_moving(chan))
1954 return;
1955
1956 while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
1957 seq = l2cap_seq_list_pop(&chan->retrans_list);
1958
1959 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
1960 if (!skb) {
1961 BT_DBG("Error: Can't retransmit seq %d, frame missing",
1962 seq);
1963 continue;
1964 }
1965
1966 bt_cb(skb)->l2cap.retries++;
1967 control = bt_cb(skb)->l2cap;
1968
1969 if (chan->max_tx != 0 &&
1970 bt_cb(skb)->l2cap.retries > chan->max_tx) {
1971 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
1972 l2cap_send_disconn_req(chan, ECONNRESET);
1973 l2cap_seq_list_clear(&chan->retrans_list);
1974 break;
1975 }
1976
1977 control.reqseq = chan->buffer_seq;
1978 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1979 control.final = 1;
1980 else
1981 control.final = 0;
1982
1983 if (skb_cloned(skb)) {
1984 /* Cloned sk_buffs are read-only, so we need a
1985 * writeable copy
1986 */
1987 tx_skb = skb_copy(skb, GFP_KERNEL);
1988 } else {
1989 tx_skb = skb_clone(skb, GFP_KERNEL);
1990 }
1991
1992 if (!tx_skb) {
1993 l2cap_seq_list_clear(&chan->retrans_list);
1994 break;
1995 }
1996
1997 /* Update skb contents */
1998 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1999 put_unaligned_le32(__pack_extended_control(&control),
2000 tx_skb->data + L2CAP_HDR_SIZE);
2001 } else {
2002 put_unaligned_le16(__pack_enhanced_control(&control),
2003 tx_skb->data + L2CAP_HDR_SIZE);
2004 }
2005
2006 /* Update FCS */
2007 if (chan->fcs == L2CAP_FCS_CRC16) {
2008 u16 fcs = crc16(0, (u8 *) tx_skb->data,
2009 tx_skb->len - L2CAP_FCS_SIZE);
2010 put_unaligned_le16(fcs, skb_tail_pointer(tx_skb) -
2011 L2CAP_FCS_SIZE);
2012 }
2013
2014 l2cap_do_send(chan, tx_skb);
2015
2016 BT_DBG("Resent txseq %d", control.txseq);
2017
2018 chan->last_acked_seq = chan->buffer_seq;
2019 }
2020}
2021
2022static void l2cap_retransmit(struct l2cap_chan *chan,
2023 struct l2cap_ctrl *control)
2024{
2025 BT_DBG("chan %p, control %p", chan, control);
2026
2027 l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
2028 l2cap_ertm_resend(chan);
2029}
2030
2031static void l2cap_retransmit_all(struct l2cap_chan *chan,
2032 struct l2cap_ctrl *control)
2033{
2034 struct sk_buff *skb;
2035
2036 BT_DBG("chan %p, control %p", chan, control);
2037
2038 if (control->poll)
2039 set_bit(CONN_SEND_FBIT, &chan->conn_state);
2040
2041 l2cap_seq_list_clear(&chan->retrans_list);
2042
2043 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2044 return;
2045
2046 if (chan->unacked_frames) {
2047 skb_queue_walk(&chan->tx_q, skb) {
2048 if (bt_cb(skb)->l2cap.txseq == control->reqseq ||
2049 skb == chan->tx_send_head)
2050 break;
2051 }
2052
2053 skb_queue_walk_from(&chan->tx_q, skb) {
2054 if (skb == chan->tx_send_head)
2055 break;
2056
2057 l2cap_seq_list_append(&chan->retrans_list,
2058 bt_cb(skb)->l2cap.txseq);
2059 }
2060
2061 l2cap_ertm_resend(chan);
2062 }
2063}
2064
2065static void l2cap_send_ack(struct l2cap_chan *chan)
2066{
2067 struct l2cap_ctrl control;
2068 u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2069 chan->last_acked_seq);
2070 int threshold;
2071
2072 BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2073 chan, chan->last_acked_seq, chan->buffer_seq);
2074
2075 memset(&control, 0, sizeof(control));
2076 control.sframe = 1;
2077
2078 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
2079 chan->rx_state == L2CAP_RX_STATE_RECV) {
2080 __clear_ack_timer(chan);
2081 control.super = L2CAP_SUPER_RNR;
2082 control.reqseq = chan->buffer_seq;
2083 l2cap_send_sframe(chan, &control);
2084 } else {
2085 if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
2086 l2cap_ertm_send(chan);
2087 /* If any i-frames were sent, they included an ack */
2088 if (chan->buffer_seq == chan->last_acked_seq)
2089 frames_to_ack = 0;
2090 }
2091
2092 /* Ack now if the window is 3/4ths full.
2093 * Calculate without mul or div
2094 */
2095 threshold = chan->ack_win;
2096 threshold += threshold << 1;
2097 threshold >>= 2;
2098
2099 BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
2100 threshold);
2101
2102 if (frames_to_ack >= threshold) {
2103 __clear_ack_timer(chan);
2104 control.super = L2CAP_SUPER_RR;
2105 control.reqseq = chan->buffer_seq;
2106 l2cap_send_sframe(chan, &control);
2107 frames_to_ack = 0;
2108 }
2109
2110 if (frames_to_ack)
2111 __set_ack_timer(chan);
2112 }
2113}
2114
2115static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
2116 struct msghdr *msg, int len,
2117 int count, struct sk_buff *skb)
2118{
2119 struct l2cap_conn *conn = chan->conn;
2120 struct sk_buff **frag;
2121 int sent = 0;
2122
2123 if (copy_from_iter(skb_put(skb, count), count, &msg->msg_iter) != count)
2124 return -EFAULT;
2125
2126 sent += count;
2127 len -= count;
2128
2129 /* Continuation fragments (no L2CAP header) */
2130 frag = &skb_shinfo(skb)->frag_list;
2131 while (len) {
2132 struct sk_buff *tmp;
2133
2134 count = min_t(unsigned int, conn->mtu, len);
2135
2136 tmp = chan->ops->alloc_skb(chan, 0, count,
2137 msg->msg_flags & MSG_DONTWAIT);
2138 if (IS_ERR(tmp))
2139 return PTR_ERR(tmp);
2140
2141 *frag = tmp;
2142
2143 if (copy_from_iter(skb_put(*frag, count), count,
2144 &msg->msg_iter) != count)
2145 return -EFAULT;
2146
2147 sent += count;
2148 len -= count;
2149
2150 skb->len += (*frag)->len;
2151 skb->data_len += (*frag)->len;
2152
2153 frag = &(*frag)->next;
2154 }
2155
2156 return sent;
2157}
2158
2159static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
2160 struct msghdr *msg, size_t len)
2161{
2162 struct l2cap_conn *conn = chan->conn;
2163 struct sk_buff *skb;
2164 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
2165 struct l2cap_hdr *lh;
2166
2167 BT_DBG("chan %p psm 0x%2.2x len %zu", chan,
2168 __le16_to_cpu(chan->psm), len);
2169
2170 count = min_t(unsigned int, (conn->mtu - hlen), len);
2171
2172 skb = chan->ops->alloc_skb(chan, hlen, count,
2173 msg->msg_flags & MSG_DONTWAIT);
2174 if (IS_ERR(skb))
2175 return skb;
2176
2177 /* Create L2CAP header */
2178 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2179 lh->cid = cpu_to_le16(chan->dcid);
2180 lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
2181 put_unaligned(chan->psm, (__le16 *) skb_put(skb, L2CAP_PSMLEN_SIZE));
2182
2183 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2184 if (unlikely(err < 0)) {
2185 kfree_skb(skb);
2186 return ERR_PTR(err);
2187 }
2188 return skb;
2189}
2190
2191static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2192 struct msghdr *msg, size_t len)
2193{
2194 struct l2cap_conn *conn = chan->conn;
2195 struct sk_buff *skb;
2196 int err, count;
2197 struct l2cap_hdr *lh;
2198
2199 BT_DBG("chan %p len %zu", chan, len);
2200
2201 count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2202
2203 skb = chan->ops->alloc_skb(chan, L2CAP_HDR_SIZE, count,
2204 msg->msg_flags & MSG_DONTWAIT);
2205 if (IS_ERR(skb))
2206 return skb;
2207
2208 /* Create L2CAP header */
2209 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2210 lh->cid = cpu_to_le16(chan->dcid);
2211 lh->len = cpu_to_le16(len);
2212
2213 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2214 if (unlikely(err < 0)) {
2215 kfree_skb(skb);
2216 return ERR_PTR(err);
2217 }
2218 return skb;
2219}
2220
2221static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2222 struct msghdr *msg, size_t len,
2223 u16 sdulen)
2224{
2225 struct l2cap_conn *conn = chan->conn;
2226 struct sk_buff *skb;
2227 int err, count, hlen;
2228 struct l2cap_hdr *lh;
2229
2230 BT_DBG("chan %p len %zu", chan, len);
2231
2232 if (!conn)
2233 return ERR_PTR(-ENOTCONN);
2234
2235 hlen = __ertm_hdr_size(chan);
2236
2237 if (sdulen)
2238 hlen += L2CAP_SDULEN_SIZE;
2239
2240 if (chan->fcs == L2CAP_FCS_CRC16)
2241 hlen += L2CAP_FCS_SIZE;
2242
2243 count = min_t(unsigned int, (conn->mtu - hlen), len);
2244
2245 skb = chan->ops->alloc_skb(chan, hlen, count,
2246 msg->msg_flags & MSG_DONTWAIT);
2247 if (IS_ERR(skb))
2248 return skb;
2249
2250 /* Create L2CAP header */
2251 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2252 lh->cid = cpu_to_le16(chan->dcid);
2253 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2254
2255 /* Control header is populated later */
2256 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2257 put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2258 else
2259 put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2260
2261 if (sdulen)
2262 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2263
2264 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2265 if (unlikely(err < 0)) {
2266 kfree_skb(skb);
2267 return ERR_PTR(err);
2268 }
2269
2270 bt_cb(skb)->l2cap.fcs = chan->fcs;
2271 bt_cb(skb)->l2cap.retries = 0;
2272 return skb;
2273}
2274
2275static int l2cap_segment_sdu(struct l2cap_chan *chan,
2276 struct sk_buff_head *seg_queue,
2277 struct msghdr *msg, size_t len)
2278{
2279 struct sk_buff *skb;
2280 u16 sdu_len;
2281 size_t pdu_len;
2282 u8 sar;
2283
2284 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2285
2286 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2287 * so fragmented skbs are not used. The HCI layer's handling
2288 * of fragmented skbs is not compatible with ERTM's queueing.
2289 */
2290
2291 /* PDU size is derived from the HCI MTU */
2292 pdu_len = chan->conn->mtu;
2293
2294 /* Constrain PDU size for BR/EDR connections */
2295 if (!chan->hs_hcon)
2296 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2297
2298 /* Adjust for largest possible L2CAP overhead. */
2299 if (chan->fcs)
2300 pdu_len -= L2CAP_FCS_SIZE;
2301
2302 pdu_len -= __ertm_hdr_size(chan);
2303
2304 /* Remote device may have requested smaller PDUs */
2305 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2306
2307 if (len <= pdu_len) {
2308 sar = L2CAP_SAR_UNSEGMENTED;
2309 sdu_len = 0;
2310 pdu_len = len;
2311 } else {
2312 sar = L2CAP_SAR_START;
2313 sdu_len = len;
2314 }
2315
2316 while (len > 0) {
2317 skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2318
2319 if (IS_ERR(skb)) {
2320 __skb_queue_purge(seg_queue);
2321 return PTR_ERR(skb);
2322 }
2323
2324 bt_cb(skb)->l2cap.sar = sar;
2325 __skb_queue_tail(seg_queue, skb);
2326
2327 len -= pdu_len;
2328 if (sdu_len)
2329 sdu_len = 0;
2330
2331 if (len <= pdu_len) {
2332 sar = L2CAP_SAR_END;
2333 pdu_len = len;
2334 } else {
2335 sar = L2CAP_SAR_CONTINUE;
2336 }
2337 }
2338
2339 return 0;
2340}
2341
2342static struct sk_buff *l2cap_create_le_flowctl_pdu(struct l2cap_chan *chan,
2343 struct msghdr *msg,
2344 size_t len, u16 sdulen)
2345{
2346 struct l2cap_conn *conn = chan->conn;
2347 struct sk_buff *skb;
2348 int err, count, hlen;
2349 struct l2cap_hdr *lh;
2350
2351 BT_DBG("chan %p len %zu", chan, len);
2352
2353 if (!conn)
2354 return ERR_PTR(-ENOTCONN);
2355
2356 hlen = L2CAP_HDR_SIZE;
2357
2358 if (sdulen)
2359 hlen += L2CAP_SDULEN_SIZE;
2360
2361 count = min_t(unsigned int, (conn->mtu - hlen), len);
2362
2363 skb = chan->ops->alloc_skb(chan, hlen, count,
2364 msg->msg_flags & MSG_DONTWAIT);
2365 if (IS_ERR(skb))
2366 return skb;
2367
2368 /* Create L2CAP header */
2369 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2370 lh->cid = cpu_to_le16(chan->dcid);
2371 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2372
2373 if (sdulen)
2374 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2375
2376 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2377 if (unlikely(err < 0)) {
2378 kfree_skb(skb);
2379 return ERR_PTR(err);
2380 }
2381
2382 return skb;
2383}
2384
2385static int l2cap_segment_le_sdu(struct l2cap_chan *chan,
2386 struct sk_buff_head *seg_queue,
2387 struct msghdr *msg, size_t len)
2388{
2389 struct sk_buff *skb;
2390 size_t pdu_len;
2391 u16 sdu_len;
2392
2393 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2394
2395 sdu_len = len;
2396 pdu_len = chan->remote_mps - L2CAP_SDULEN_SIZE;
2397
2398 while (len > 0) {
2399 if (len <= pdu_len)
2400 pdu_len = len;
2401
2402 skb = l2cap_create_le_flowctl_pdu(chan, msg, pdu_len, sdu_len);
2403 if (IS_ERR(skb)) {
2404 __skb_queue_purge(seg_queue);
2405 return PTR_ERR(skb);
2406 }
2407
2408 __skb_queue_tail(seg_queue, skb);
2409
2410 len -= pdu_len;
2411
2412 if (sdu_len) {
2413 sdu_len = 0;
2414 pdu_len += L2CAP_SDULEN_SIZE;
2415 }
2416 }
2417
2418 return 0;
2419}
2420
2421int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
2422{
2423 struct sk_buff *skb;
2424 int err;
2425 struct sk_buff_head seg_queue;
2426
2427 if (!chan->conn)
2428 return -ENOTCONN;
2429
2430 /* Connectionless channel */
2431 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2432 skb = l2cap_create_connless_pdu(chan, msg, len);
2433 if (IS_ERR(skb))
2434 return PTR_ERR(skb);
2435
2436 /* Channel lock is released before requesting new skb and then
2437 * reacquired thus we need to recheck channel state.
2438 */
2439 if (chan->state != BT_CONNECTED) {
2440 kfree_skb(skb);
2441 return -ENOTCONN;
2442 }
2443
2444 l2cap_do_send(chan, skb);
2445 return len;
2446 }
2447
2448 switch (chan->mode) {
2449 case L2CAP_MODE_LE_FLOWCTL:
2450 /* Check outgoing MTU */
2451 if (len > chan->omtu)
2452 return -EMSGSIZE;
2453
2454 if (!chan->tx_credits)
2455 return -EAGAIN;
2456
2457 __skb_queue_head_init(&seg_queue);
2458
2459 err = l2cap_segment_le_sdu(chan, &seg_queue, msg, len);
2460
2461 if (chan->state != BT_CONNECTED) {
2462 __skb_queue_purge(&seg_queue);
2463 err = -ENOTCONN;
2464 }
2465
2466 if (err)
2467 return err;
2468
2469 skb_queue_splice_tail_init(&seg_queue, &chan->tx_q);
2470
2471 while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
2472 l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
2473 chan->tx_credits--;
2474 }
2475
2476 if (!chan->tx_credits)
2477 chan->ops->suspend(chan);
2478
2479 err = len;
2480
2481 break;
2482
2483 case L2CAP_MODE_BASIC:
2484 /* Check outgoing MTU */
2485 if (len > chan->omtu)
2486 return -EMSGSIZE;
2487
2488 /* Create a basic PDU */
2489 skb = l2cap_create_basic_pdu(chan, msg, len);
2490 if (IS_ERR(skb))
2491 return PTR_ERR(skb);
2492
2493 /* Channel lock is released before requesting new skb and then
2494 * reacquired thus we need to recheck channel state.
2495 */
2496 if (chan->state != BT_CONNECTED) {
2497 kfree_skb(skb);
2498 return -ENOTCONN;
2499 }
2500
2501 l2cap_do_send(chan, skb);
2502 err = len;
2503 break;
2504
2505 case L2CAP_MODE_ERTM:
2506 case L2CAP_MODE_STREAMING:
2507 /* Check outgoing MTU */
2508 if (len > chan->omtu) {
2509 err = -EMSGSIZE;
2510 break;
2511 }
2512
2513 __skb_queue_head_init(&seg_queue);
2514
2515 /* Do segmentation before calling in to the state machine,
2516 * since it's possible to block while waiting for memory
2517 * allocation.
2518 */
2519 err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2520
2521 /* The channel could have been closed while segmenting,
2522 * check that it is still connected.
2523 */
2524 if (chan->state != BT_CONNECTED) {
2525 __skb_queue_purge(&seg_queue);
2526 err = -ENOTCONN;
2527 }
2528
2529 if (err)
2530 break;
2531
2532 if (chan->mode == L2CAP_MODE_ERTM)
2533 l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2534 else
2535 l2cap_streaming_send(chan, &seg_queue);
2536
2537 err = len;
2538
2539 /* If the skbs were not queued for sending, they'll still be in
2540 * seg_queue and need to be purged.
2541 */
2542 __skb_queue_purge(&seg_queue);
2543 break;
2544
2545 default:
2546 BT_DBG("bad state %1.1x", chan->mode);
2547 err = -EBADFD;
2548 }
2549
2550 return err;
2551}
2552EXPORT_SYMBOL_GPL(l2cap_chan_send);
2553
2554static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2555{
2556 struct l2cap_ctrl control;
2557 u16 seq;
2558
2559 BT_DBG("chan %p, txseq %u", chan, txseq);
2560
2561 memset(&control, 0, sizeof(control));
2562 control.sframe = 1;
2563 control.super = L2CAP_SUPER_SREJ;
2564
2565 for (seq = chan->expected_tx_seq; seq != txseq;
2566 seq = __next_seq(chan, seq)) {
2567 if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2568 control.reqseq = seq;
2569 l2cap_send_sframe(chan, &control);
2570 l2cap_seq_list_append(&chan->srej_list, seq);
2571 }
2572 }
2573
2574 chan->expected_tx_seq = __next_seq(chan, txseq);
2575}
2576
2577static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2578{
2579 struct l2cap_ctrl control;
2580
2581 BT_DBG("chan %p", chan);
2582
2583 if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2584 return;
2585
2586 memset(&control, 0, sizeof(control));
2587 control.sframe = 1;
2588 control.super = L2CAP_SUPER_SREJ;
2589 control.reqseq = chan->srej_list.tail;
2590 l2cap_send_sframe(chan, &control);
2591}
2592
2593static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2594{
2595 struct l2cap_ctrl control;
2596 u16 initial_head;
2597 u16 seq;
2598
2599 BT_DBG("chan %p, txseq %u", chan, txseq);
2600
2601 memset(&control, 0, sizeof(control));
2602 control.sframe = 1;
2603 control.super = L2CAP_SUPER_SREJ;
2604
2605 /* Capture initial list head to allow only one pass through the list. */
2606 initial_head = chan->srej_list.head;
2607
2608 do {
2609 seq = l2cap_seq_list_pop(&chan->srej_list);
2610 if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2611 break;
2612
2613 control.reqseq = seq;
2614 l2cap_send_sframe(chan, &control);
2615 l2cap_seq_list_append(&chan->srej_list, seq);
2616 } while (chan->srej_list.head != initial_head);
2617}
2618
2619static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2620{
2621 struct sk_buff *acked_skb;
2622 u16 ackseq;
2623
2624 BT_DBG("chan %p, reqseq %u", chan, reqseq);
2625
2626 if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2627 return;
2628
2629 BT_DBG("expected_ack_seq %u, unacked_frames %u",
2630 chan->expected_ack_seq, chan->unacked_frames);
2631
2632 for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2633 ackseq = __next_seq(chan, ackseq)) {
2634
2635 acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2636 if (acked_skb) {
2637 skb_unlink(acked_skb, &chan->tx_q);
2638 kfree_skb(acked_skb);
2639 chan->unacked_frames--;
2640 }
2641 }
2642
2643 chan->expected_ack_seq = reqseq;
2644
2645 if (chan->unacked_frames == 0)
2646 __clear_retrans_timer(chan);
2647
2648 BT_DBG("unacked_frames %u", chan->unacked_frames);
2649}
2650
2651static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2652{
2653 BT_DBG("chan %p", chan);
2654
2655 chan->expected_tx_seq = chan->buffer_seq;
2656 l2cap_seq_list_clear(&chan->srej_list);
2657 skb_queue_purge(&chan->srej_q);
2658 chan->rx_state = L2CAP_RX_STATE_RECV;
2659}
2660
2661static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2662 struct l2cap_ctrl *control,
2663 struct sk_buff_head *skbs, u8 event)
2664{
2665 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2666 event);
2667
2668 switch (event) {
2669 case L2CAP_EV_DATA_REQUEST:
2670 if (chan->tx_send_head == NULL)
2671 chan->tx_send_head = skb_peek(skbs);
2672
2673 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2674 l2cap_ertm_send(chan);
2675 break;
2676 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2677 BT_DBG("Enter LOCAL_BUSY");
2678 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2679
2680 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2681 /* The SREJ_SENT state must be aborted if we are to
2682 * enter the LOCAL_BUSY state.
2683 */
2684 l2cap_abort_rx_srej_sent(chan);
2685 }
2686
2687 l2cap_send_ack(chan);
2688
2689 break;
2690 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2691 BT_DBG("Exit LOCAL_BUSY");
2692 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2693
2694 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2695 struct l2cap_ctrl local_control;
2696
2697 memset(&local_control, 0, sizeof(local_control));
2698 local_control.sframe = 1;
2699 local_control.super = L2CAP_SUPER_RR;
2700 local_control.poll = 1;
2701 local_control.reqseq = chan->buffer_seq;
2702 l2cap_send_sframe(chan, &local_control);
2703
2704 chan->retry_count = 1;
2705 __set_monitor_timer(chan);
2706 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2707 }
2708 break;
2709 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2710 l2cap_process_reqseq(chan, control->reqseq);
2711 break;
2712 case L2CAP_EV_EXPLICIT_POLL:
2713 l2cap_send_rr_or_rnr(chan, 1);
2714 chan->retry_count = 1;
2715 __set_monitor_timer(chan);
2716 __clear_ack_timer(chan);
2717 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2718 break;
2719 case L2CAP_EV_RETRANS_TO:
2720 l2cap_send_rr_or_rnr(chan, 1);
2721 chan->retry_count = 1;
2722 __set_monitor_timer(chan);
2723 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2724 break;
2725 case L2CAP_EV_RECV_FBIT:
2726 /* Nothing to process */
2727 break;
2728 default:
2729 break;
2730 }
2731}
2732
2733static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2734 struct l2cap_ctrl *control,
2735 struct sk_buff_head *skbs, u8 event)
2736{
2737 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2738 event);
2739
2740 switch (event) {
2741 case L2CAP_EV_DATA_REQUEST:
2742 if (chan->tx_send_head == NULL)
2743 chan->tx_send_head = skb_peek(skbs);
2744 /* Queue data, but don't send. */
2745 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2746 break;
2747 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2748 BT_DBG("Enter LOCAL_BUSY");
2749 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2750
2751 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2752 /* The SREJ_SENT state must be aborted if we are to
2753 * enter the LOCAL_BUSY state.
2754 */
2755 l2cap_abort_rx_srej_sent(chan);
2756 }
2757
2758 l2cap_send_ack(chan);
2759
2760 break;
2761 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2762 BT_DBG("Exit LOCAL_BUSY");
2763 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2764
2765 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2766 struct l2cap_ctrl local_control;
2767 memset(&local_control, 0, sizeof(local_control));
2768 local_control.sframe = 1;
2769 local_control.super = L2CAP_SUPER_RR;
2770 local_control.poll = 1;
2771 local_control.reqseq = chan->buffer_seq;
2772 l2cap_send_sframe(chan, &local_control);
2773
2774 chan->retry_count = 1;
2775 __set_monitor_timer(chan);
2776 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2777 }
2778 break;
2779 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2780 l2cap_process_reqseq(chan, control->reqseq);
2781
2782 /* Fall through */
2783
2784 case L2CAP_EV_RECV_FBIT:
2785 if (control && control->final) {
2786 __clear_monitor_timer(chan);
2787 if (chan->unacked_frames > 0)
2788 __set_retrans_timer(chan);
2789 chan->retry_count = 0;
2790 chan->tx_state = L2CAP_TX_STATE_XMIT;
2791 BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
2792 }
2793 break;
2794 case L2CAP_EV_EXPLICIT_POLL:
2795 /* Ignore */
2796 break;
2797 case L2CAP_EV_MONITOR_TO:
2798 if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
2799 l2cap_send_rr_or_rnr(chan, 1);
2800 __set_monitor_timer(chan);
2801 chan->retry_count++;
2802 } else {
2803 l2cap_send_disconn_req(chan, ECONNABORTED);
2804 }
2805 break;
2806 default:
2807 break;
2808 }
2809}
2810
2811static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
2812 struct sk_buff_head *skbs, u8 event)
2813{
2814 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2815 chan, control, skbs, event, chan->tx_state);
2816
2817 switch (chan->tx_state) {
2818 case L2CAP_TX_STATE_XMIT:
2819 l2cap_tx_state_xmit(chan, control, skbs, event);
2820 break;
2821 case L2CAP_TX_STATE_WAIT_F:
2822 l2cap_tx_state_wait_f(chan, control, skbs, event);
2823 break;
2824 default:
2825 /* Ignore event */
2826 break;
2827 }
2828}
2829
2830static void l2cap_pass_to_tx(struct l2cap_chan *chan,
2831 struct l2cap_ctrl *control)
2832{
2833 BT_DBG("chan %p, control %p", chan, control);
2834 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
2835}
2836
2837static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
2838 struct l2cap_ctrl *control)
2839{
2840 BT_DBG("chan %p, control %p", chan, control);
2841 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
2842}
2843
2844/* Copy frame to all raw sockets on that connection */
2845static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2846{
2847 struct sk_buff *nskb;
2848 struct l2cap_chan *chan;
2849
2850 BT_DBG("conn %p", conn);
2851
2852 mutex_lock(&conn->chan_lock);
2853
2854 list_for_each_entry(chan, &conn->chan_l, list) {
2855 if (chan->chan_type != L2CAP_CHAN_RAW)
2856 continue;
2857
2858 /* Don't send frame to the channel it came from */
2859 if (bt_cb(skb)->l2cap.chan == chan)
2860 continue;
2861
2862 nskb = skb_clone(skb, GFP_KERNEL);
2863 if (!nskb)
2864 continue;
2865 if (chan->ops->recv(chan, nskb))
2866 kfree_skb(nskb);
2867 }
2868
2869 mutex_unlock(&conn->chan_lock);
2870}
2871
2872/* ---- L2CAP signalling commands ---- */
2873static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
2874 u8 ident, u16 dlen, void *data)
2875{
2876 struct sk_buff *skb, **frag;
2877 struct l2cap_cmd_hdr *cmd;
2878 struct l2cap_hdr *lh;
2879 int len, count;
2880
2881 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
2882 conn, code, ident, dlen);
2883
2884 if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE)
2885 return NULL;
2886
2887 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2888 count = min_t(unsigned int, conn->mtu, len);
2889
2890 skb = bt_skb_alloc(count, GFP_KERNEL);
2891 if (!skb)
2892 return NULL;
2893
2894 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2895 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2896
2897 if (conn->hcon->type == LE_LINK)
2898 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2899 else
2900 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
2901
2902 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2903 cmd->code = code;
2904 cmd->ident = ident;
2905 cmd->len = cpu_to_le16(dlen);
2906
2907 if (dlen) {
2908 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2909 memcpy(skb_put(skb, count), data, count);
2910 data += count;
2911 }
2912
2913 len -= skb->len;
2914
2915 /* Continuation fragments (no L2CAP header) */
2916 frag = &skb_shinfo(skb)->frag_list;
2917 while (len) {
2918 count = min_t(unsigned int, conn->mtu, len);
2919
2920 *frag = bt_skb_alloc(count, GFP_KERNEL);
2921 if (!*frag)
2922 goto fail;
2923
2924 memcpy(skb_put(*frag, count), data, count);
2925
2926 len -= count;
2927 data += count;
2928
2929 frag = &(*frag)->next;
2930 }
2931
2932 return skb;
2933
2934fail:
2935 kfree_skb(skb);
2936 return NULL;
2937}
2938
2939static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
2940 unsigned long *val)
2941{
2942 struct l2cap_conf_opt *opt = *ptr;
2943 int len;
2944
2945 len = L2CAP_CONF_OPT_SIZE + opt->len;
2946 *ptr += len;
2947
2948 *type = opt->type;
2949 *olen = opt->len;
2950
2951 switch (opt->len) {
2952 case 1:
2953 *val = *((u8 *) opt->val);
2954 break;
2955
2956 case 2:
2957 *val = get_unaligned_le16(opt->val);
2958 break;
2959
2960 case 4:
2961 *val = get_unaligned_le32(opt->val);
2962 break;
2963
2964 default:
2965 *val = (unsigned long) opt->val;
2966 break;
2967 }
2968
2969 BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
2970 return len;
2971}
2972
2973static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2974{
2975 struct l2cap_conf_opt *opt = *ptr;
2976
2977 BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
2978
2979 opt->type = type;
2980 opt->len = len;
2981
2982 switch (len) {
2983 case 1:
2984 *((u8 *) opt->val) = val;
2985 break;
2986
2987 case 2:
2988 put_unaligned_le16(val, opt->val);
2989 break;
2990
2991 case 4:
2992 put_unaligned_le32(val, opt->val);
2993 break;
2994
2995 default:
2996 memcpy(opt->val, (void *) val, len);
2997 break;
2998 }
2999
3000 *ptr += L2CAP_CONF_OPT_SIZE + len;
3001}
3002
3003static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
3004{
3005 struct l2cap_conf_efs efs;
3006
3007 switch (chan->mode) {
3008 case L2CAP_MODE_ERTM:
3009 efs.id = chan->local_id;
3010 efs.stype = chan->local_stype;
3011 efs.msdu = cpu_to_le16(chan->local_msdu);
3012 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
3013 efs.acc_lat = cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
3014 efs.flush_to = cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO);
3015 break;
3016
3017 case L2CAP_MODE_STREAMING:
3018 efs.id = 1;
3019 efs.stype = L2CAP_SERV_BESTEFFORT;
3020 efs.msdu = cpu_to_le16(chan->local_msdu);
3021 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
3022 efs.acc_lat = 0;
3023 efs.flush_to = 0;
3024 break;
3025
3026 default:
3027 return;
3028 }
3029
3030 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
3031 (unsigned long) &efs);
3032}
3033
3034static void l2cap_ack_timeout(struct work_struct *work)
3035{
3036 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
3037 ack_timer.work);
3038 u16 frames_to_ack;
3039
3040 BT_DBG("chan %p", chan);
3041
3042 l2cap_chan_lock(chan);
3043
3044 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
3045 chan->last_acked_seq);
3046
3047 if (frames_to_ack)
3048 l2cap_send_rr_or_rnr(chan, 0);
3049
3050 l2cap_chan_unlock(chan);
3051 l2cap_chan_put(chan);
3052}
3053
3054int l2cap_ertm_init(struct l2cap_chan *chan)
3055{
3056 int err;
3057
3058 chan->next_tx_seq = 0;
3059 chan->expected_tx_seq = 0;
3060 chan->expected_ack_seq = 0;
3061 chan->unacked_frames = 0;
3062 chan->buffer_seq = 0;
3063 chan->frames_sent = 0;
3064 chan->last_acked_seq = 0;
3065 chan->sdu = NULL;
3066 chan->sdu_last_frag = NULL;
3067 chan->sdu_len = 0;
3068
3069 skb_queue_head_init(&chan->tx_q);
3070
3071 chan->local_amp_id = AMP_ID_BREDR;
3072 chan->move_id = AMP_ID_BREDR;
3073 chan->move_state = L2CAP_MOVE_STABLE;
3074 chan->move_role = L2CAP_MOVE_ROLE_NONE;
3075
3076 if (chan->mode != L2CAP_MODE_ERTM)
3077 return 0;
3078
3079 chan->rx_state = L2CAP_RX_STATE_RECV;
3080 chan->tx_state = L2CAP_TX_STATE_XMIT;
3081
3082 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
3083 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
3084 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
3085
3086 skb_queue_head_init(&chan->srej_q);
3087
3088 err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
3089 if (err < 0)
3090 return err;
3091
3092 err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
3093 if (err < 0)
3094 l2cap_seq_list_free(&chan->srej_list);
3095
3096 return err;
3097}
3098
3099static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
3100{
3101 switch (mode) {
3102 case L2CAP_MODE_STREAMING:
3103 case L2CAP_MODE_ERTM:
3104 if (l2cap_mode_supported(mode, remote_feat_mask))
3105 return mode;
3106 /* fall through */
3107 default:
3108 return L2CAP_MODE_BASIC;
3109 }
3110}
3111
3112static inline bool __l2cap_ews_supported(struct l2cap_conn *conn)
3113{
3114 return ((conn->local_fixed_chan & L2CAP_FC_A2MP) &&
3115 (conn->feat_mask & L2CAP_FEAT_EXT_WINDOW));
3116}
3117
3118static inline bool __l2cap_efs_supported(struct l2cap_conn *conn)
3119{
3120 return ((conn->local_fixed_chan & L2CAP_FC_A2MP) &&
3121 (conn->feat_mask & L2CAP_FEAT_EXT_FLOW));
3122}
3123
3124static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan,
3125 struct l2cap_conf_rfc *rfc)
3126{
3127 if (chan->local_amp_id != AMP_ID_BREDR && chan->hs_hcon) {
3128 u64 ertm_to = chan->hs_hcon->hdev->amp_be_flush_to;
3129
3130 /* Class 1 devices have must have ERTM timeouts
3131 * exceeding the Link Supervision Timeout. The
3132 * default Link Supervision Timeout for AMP
3133 * controllers is 10 seconds.
3134 *
3135 * Class 1 devices use 0xffffffff for their
3136 * best-effort flush timeout, so the clamping logic
3137 * will result in a timeout that meets the above
3138 * requirement. ERTM timeouts are 16-bit values, so
3139 * the maximum timeout is 65.535 seconds.
3140 */
3141
3142 /* Convert timeout to milliseconds and round */
3143 ertm_to = DIV_ROUND_UP_ULL(ertm_to, 1000);
3144
3145 /* This is the recommended formula for class 2 devices
3146 * that start ERTM timers when packets are sent to the
3147 * controller.
3148 */
3149 ertm_to = 3 * ertm_to + 500;
3150
3151 if (ertm_to > 0xffff)
3152 ertm_to = 0xffff;
3153
3154 rfc->retrans_timeout = cpu_to_le16((u16) ertm_to);
3155 rfc->monitor_timeout = rfc->retrans_timeout;
3156 } else {
3157 rfc->retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3158 rfc->monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3159 }
3160}
3161
3162static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
3163{
3164 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
3165 __l2cap_ews_supported(chan->conn)) {
3166 /* use extended control field */
3167 set_bit(FLAG_EXT_CTRL, &chan->flags);
3168 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3169 } else {
3170 chan->tx_win = min_t(u16, chan->tx_win,
3171 L2CAP_DEFAULT_TX_WINDOW);
3172 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
3173 }
3174 chan->ack_win = chan->tx_win;
3175}
3176
3177static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
3178{
3179 struct l2cap_conf_req *req = data;
3180 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
3181 void *ptr = req->data;
3182 u16 size;
3183
3184 BT_DBG("chan %p", chan);
3185
3186 if (chan->num_conf_req || chan->num_conf_rsp)
3187 goto done;
3188
3189 switch (chan->mode) {
3190 case L2CAP_MODE_STREAMING:
3191 case L2CAP_MODE_ERTM:
3192 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
3193 break;
3194
3195 if (__l2cap_efs_supported(chan->conn))
3196 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3197
3198 /* fall through */
3199 default:
3200 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
3201 break;
3202 }
3203
3204done:
3205 if (chan->imtu != L2CAP_DEFAULT_MTU)
3206 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3207
3208 switch (chan->mode) {
3209 case L2CAP_MODE_BASIC:
3210 if (disable_ertm)
3211 break;
3212
3213 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
3214 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
3215 break;
3216
3217 rfc.mode = L2CAP_MODE_BASIC;
3218 rfc.txwin_size = 0;
3219 rfc.max_transmit = 0;
3220 rfc.retrans_timeout = 0;
3221 rfc.monitor_timeout = 0;
3222 rfc.max_pdu_size = 0;
3223
3224 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3225 (unsigned long) &rfc);
3226 break;
3227
3228 case L2CAP_MODE_ERTM:
3229 rfc.mode = L2CAP_MODE_ERTM;
3230 rfc.max_transmit = chan->max_tx;
3231
3232 __l2cap_set_ertm_timeouts(chan, &rfc);
3233
3234 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3235 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3236 L2CAP_FCS_SIZE);
3237 rfc.max_pdu_size = cpu_to_le16(size);
3238
3239 l2cap_txwin_setup(chan);
3240
3241 rfc.txwin_size = min_t(u16, chan->tx_win,
3242 L2CAP_DEFAULT_TX_WINDOW);
3243
3244 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3245 (unsigned long) &rfc);
3246
3247 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3248 l2cap_add_opt_efs(&ptr, chan);
3249
3250 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3251 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3252 chan->tx_win);
3253
3254 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3255 if (chan->fcs == L2CAP_FCS_NONE ||
3256 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3257 chan->fcs = L2CAP_FCS_NONE;
3258 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3259 chan->fcs);
3260 }
3261 break;
3262
3263 case L2CAP_MODE_STREAMING:
3264 l2cap_txwin_setup(chan);
3265 rfc.mode = L2CAP_MODE_STREAMING;
3266 rfc.txwin_size = 0;
3267 rfc.max_transmit = 0;
3268 rfc.retrans_timeout = 0;
3269 rfc.monitor_timeout = 0;
3270
3271 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3272 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3273 L2CAP_FCS_SIZE);
3274 rfc.max_pdu_size = cpu_to_le16(size);
3275
3276 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3277 (unsigned long) &rfc);
3278
3279 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3280 l2cap_add_opt_efs(&ptr, chan);
3281
3282 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3283 if (chan->fcs == L2CAP_FCS_NONE ||
3284 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3285 chan->fcs = L2CAP_FCS_NONE;
3286 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3287 chan->fcs);
3288 }
3289 break;
3290 }
3291
3292 req->dcid = cpu_to_le16(chan->dcid);
3293 req->flags = cpu_to_le16(0);
3294
3295 return ptr - data;
3296}
3297
3298static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
3299{
3300 struct l2cap_conf_rsp *rsp = data;
3301 void *ptr = rsp->data;
3302 void *req = chan->conf_req;
3303 int len = chan->conf_len;
3304 int type, hint, olen;
3305 unsigned long val;
3306 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3307 struct l2cap_conf_efs efs;
3308 u8 remote_efs = 0;
3309 u16 mtu = L2CAP_DEFAULT_MTU;
3310 u16 result = L2CAP_CONF_SUCCESS;
3311 u16 size;
3312
3313 BT_DBG("chan %p", chan);
3314
3315 while (len >= L2CAP_CONF_OPT_SIZE) {
3316 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
3317
3318 hint = type & L2CAP_CONF_HINT;
3319 type &= L2CAP_CONF_MASK;
3320
3321 switch (type) {
3322 case L2CAP_CONF_MTU:
3323 mtu = val;
3324 break;
3325
3326 case L2CAP_CONF_FLUSH_TO:
3327 chan->flush_to = val;
3328 break;
3329
3330 case L2CAP_CONF_QOS:
3331 break;
3332
3333 case L2CAP_CONF_RFC:
3334 if (olen == sizeof(rfc))
3335 memcpy(&rfc, (void *) val, olen);
3336 break;
3337
3338 case L2CAP_CONF_FCS:
3339 if (val == L2CAP_FCS_NONE)
3340 set_bit(CONF_RECV_NO_FCS, &chan->conf_state);
3341 break;
3342
3343 case L2CAP_CONF_EFS:
3344 remote_efs = 1;
3345 if (olen == sizeof(efs))
3346 memcpy(&efs, (void *) val, olen);
3347 break;
3348
3349 case L2CAP_CONF_EWS:
3350 if (!(chan->conn->local_fixed_chan & L2CAP_FC_A2MP))
3351 return -ECONNREFUSED;
3352
3353 set_bit(FLAG_EXT_CTRL, &chan->flags);
3354 set_bit(CONF_EWS_RECV, &chan->conf_state);
3355 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3356 chan->remote_tx_win = val;
3357 break;
3358
3359 default:
3360 if (hint)
3361 break;
3362
3363 result = L2CAP_CONF_UNKNOWN;
3364 *((u8 *) ptr++) = type;
3365 break;
3366 }
3367 }
3368
3369 if (chan->num_conf_rsp || chan->num_conf_req > 1)
3370 goto done;
3371
3372 switch (chan->mode) {
3373 case L2CAP_MODE_STREAMING:
3374 case L2CAP_MODE_ERTM:
3375 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
3376 chan->mode = l2cap_select_mode(rfc.mode,
3377 chan->conn->feat_mask);
3378 break;
3379 }
3380
3381 if (remote_efs) {
3382 if (__l2cap_efs_supported(chan->conn))
3383 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3384 else
3385 return -ECONNREFUSED;
3386 }
3387
3388 if (chan->mode != rfc.mode)
3389 return -ECONNREFUSED;
3390
3391 break;
3392 }
3393
3394done:
3395 if (chan->mode != rfc.mode) {
3396 result = L2CAP_CONF_UNACCEPT;
3397 rfc.mode = chan->mode;
3398
3399 if (chan->num_conf_rsp == 1)
3400 return -ECONNREFUSED;
3401
3402 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3403 (unsigned long) &rfc);
3404 }
3405
3406 if (result == L2CAP_CONF_SUCCESS) {
3407 /* Configure output options and let the other side know
3408 * which ones we don't like. */
3409
3410 if (mtu < L2CAP_DEFAULT_MIN_MTU)
3411 result = L2CAP_CONF_UNACCEPT;
3412 else {
3413 chan->omtu = mtu;
3414 set_bit(CONF_MTU_DONE, &chan->conf_state);
3415 }
3416 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
3417
3418 if (remote_efs) {
3419 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3420 efs.stype != L2CAP_SERV_NOTRAFIC &&
3421 efs.stype != chan->local_stype) {
3422
3423 result = L2CAP_CONF_UNACCEPT;
3424
3425 if (chan->num_conf_req >= 1)
3426 return -ECONNREFUSED;
3427
3428 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3429 sizeof(efs),
3430 (unsigned long) &efs);
3431 } else {
3432 /* Send PENDING Conf Rsp */
3433 result = L2CAP_CONF_PENDING;
3434 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3435 }
3436 }
3437
3438 switch (rfc.mode) {
3439 case L2CAP_MODE_BASIC:
3440 chan->fcs = L2CAP_FCS_NONE;
3441 set_bit(CONF_MODE_DONE, &chan->conf_state);
3442 break;
3443
3444 case L2CAP_MODE_ERTM:
3445 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3446 chan->remote_tx_win = rfc.txwin_size;
3447 else
3448 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3449
3450 chan->remote_max_tx = rfc.max_transmit;
3451
3452 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3453 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3454 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3455 rfc.max_pdu_size = cpu_to_le16(size);
3456 chan->remote_mps = size;
3457
3458 __l2cap_set_ertm_timeouts(chan, &rfc);
3459
3460 set_bit(CONF_MODE_DONE, &chan->conf_state);
3461
3462 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3463 sizeof(rfc), (unsigned long) &rfc);
3464
3465 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3466 chan->remote_id = efs.id;
3467 chan->remote_stype = efs.stype;
3468 chan->remote_msdu = le16_to_cpu(efs.msdu);
3469 chan->remote_flush_to =
3470 le32_to_cpu(efs.flush_to);
3471 chan->remote_acc_lat =
3472 le32_to_cpu(efs.acc_lat);
3473 chan->remote_sdu_itime =
3474 le32_to_cpu(efs.sdu_itime);
3475 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3476 sizeof(efs),
3477 (unsigned long) &efs);
3478 }
3479 break;
3480
3481 case L2CAP_MODE_STREAMING:
3482 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3483 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3484 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3485 rfc.max_pdu_size = cpu_to_le16(size);
3486 chan->remote_mps = size;
3487
3488 set_bit(CONF_MODE_DONE, &chan->conf_state);
3489
3490 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3491 (unsigned long) &rfc);
3492
3493 break;
3494
3495 default:
3496 result = L2CAP_CONF_UNACCEPT;
3497
3498 memset(&rfc, 0, sizeof(rfc));
3499 rfc.mode = chan->mode;
3500 }
3501
3502 if (result == L2CAP_CONF_SUCCESS)
3503 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3504 }
3505 rsp->scid = cpu_to_le16(chan->dcid);
3506 rsp->result = cpu_to_le16(result);
3507 rsp->flags = cpu_to_le16(0);
3508
3509 return ptr - data;
3510}
3511
3512static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
3513 void *data, u16 *result)
3514{
3515 struct l2cap_conf_req *req = data;
3516 void *ptr = req->data;
3517 int type, olen;
3518 unsigned long val;
3519 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3520 struct l2cap_conf_efs efs;
3521
3522 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3523
3524 while (len >= L2CAP_CONF_OPT_SIZE) {
3525 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3526
3527 switch (type) {
3528 case L2CAP_CONF_MTU:
3529 if (val < L2CAP_DEFAULT_MIN_MTU) {
3530 *result = L2CAP_CONF_UNACCEPT;
3531 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3532 } else
3533 chan->imtu = val;
3534 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3535 break;
3536
3537 case L2CAP_CONF_FLUSH_TO:
3538 chan->flush_to = val;
3539 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
3540 2, chan->flush_to);
3541 break;
3542
3543 case L2CAP_CONF_RFC:
3544 if (olen == sizeof(rfc))
3545 memcpy(&rfc, (void *)val, olen);
3546
3547 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3548 rfc.mode != chan->mode)
3549 return -ECONNREFUSED;
3550
3551 chan->fcs = 0;
3552
3553 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3554 sizeof(rfc), (unsigned long) &rfc);
3555 break;
3556
3557 case L2CAP_CONF_EWS:
3558 chan->ack_win = min_t(u16, val, chan->ack_win);
3559 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3560 chan->tx_win);
3561 break;
3562
3563 case L2CAP_CONF_EFS:
3564 if (olen == sizeof(efs))
3565 memcpy(&efs, (void *)val, olen);
3566
3567 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3568 efs.stype != L2CAP_SERV_NOTRAFIC &&
3569 efs.stype != chan->local_stype)
3570 return -ECONNREFUSED;
3571
3572 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
3573 (unsigned long) &efs);
3574 break;
3575
3576 case L2CAP_CONF_FCS:
3577 if (*result == L2CAP_CONF_PENDING)
3578 if (val == L2CAP_FCS_NONE)
3579 set_bit(CONF_RECV_NO_FCS,
3580 &chan->conf_state);
3581 break;
3582 }
3583 }
3584
3585 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3586 return -ECONNREFUSED;
3587
3588 chan->mode = rfc.mode;
3589
3590 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3591 switch (rfc.mode) {
3592 case L2CAP_MODE_ERTM:
3593 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3594 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3595 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3596 if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3597 chan->ack_win = min_t(u16, chan->ack_win,
3598 rfc.txwin_size);
3599
3600 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3601 chan->local_msdu = le16_to_cpu(efs.msdu);
3602 chan->local_sdu_itime =
3603 le32_to_cpu(efs.sdu_itime);
3604 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3605 chan->local_flush_to =
3606 le32_to_cpu(efs.flush_to);
3607 }
3608 break;
3609
3610 case L2CAP_MODE_STREAMING:
3611 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3612 }
3613 }
3614
3615 req->dcid = cpu_to_le16(chan->dcid);
3616 req->flags = cpu_to_le16(0);
3617
3618 return ptr - data;
3619}
3620
3621static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data,
3622 u16 result, u16 flags)
3623{
3624 struct l2cap_conf_rsp *rsp = data;
3625 void *ptr = rsp->data;
3626
3627 BT_DBG("chan %p", chan);
3628
3629 rsp->scid = cpu_to_le16(chan->dcid);
3630 rsp->result = cpu_to_le16(result);
3631 rsp->flags = cpu_to_le16(flags);
3632
3633 return ptr - data;
3634}
3635
3636void __l2cap_le_connect_rsp_defer(struct l2cap_chan *chan)
3637{
3638 struct l2cap_le_conn_rsp rsp;
3639 struct l2cap_conn *conn = chan->conn;
3640
3641 BT_DBG("chan %p", chan);
3642
3643 rsp.dcid = cpu_to_le16(chan->scid);
3644 rsp.mtu = cpu_to_le16(chan->imtu);
3645 rsp.mps = cpu_to_le16(chan->mps);
3646 rsp.credits = cpu_to_le16(chan->rx_credits);
3647 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
3648
3649 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
3650 &rsp);
3651}
3652
3653void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
3654{
3655 struct l2cap_conn_rsp rsp;
3656 struct l2cap_conn *conn = chan->conn;
3657 u8 buf[128];
3658 u8 rsp_code;
3659
3660 rsp.scid = cpu_to_le16(chan->dcid);
3661 rsp.dcid = cpu_to_le16(chan->scid);
3662 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
3663 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
3664
3665 if (chan->hs_hcon)
3666 rsp_code = L2CAP_CREATE_CHAN_RSP;
3667 else
3668 rsp_code = L2CAP_CONN_RSP;
3669
3670 BT_DBG("chan %p rsp_code %u", chan, rsp_code);
3671
3672 l2cap_send_cmd(conn, chan->ident, rsp_code, sizeof(rsp), &rsp);
3673
3674 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3675 return;
3676
3677 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3678 l2cap_build_conf_req(chan, buf), buf);
3679 chan->num_conf_req++;
3680}
3681
3682static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
3683{
3684 int type, olen;
3685 unsigned long val;
3686 /* Use sane default values in case a misbehaving remote device
3687 * did not send an RFC or extended window size option.
3688 */
3689 u16 txwin_ext = chan->ack_win;
3690 struct l2cap_conf_rfc rfc = {
3691 .mode = chan->mode,
3692 .retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
3693 .monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
3694 .max_pdu_size = cpu_to_le16(chan->imtu),
3695 .txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
3696 };
3697
3698 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
3699
3700 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
3701 return;
3702
3703 while (len >= L2CAP_CONF_OPT_SIZE) {
3704 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3705
3706 switch (type) {
3707 case L2CAP_CONF_RFC:
3708 if (olen == sizeof(rfc))
3709 memcpy(&rfc, (void *)val, olen);
3710 break;
3711 case L2CAP_CONF_EWS:
3712 txwin_ext = val;
3713 break;
3714 }
3715 }
3716
3717 switch (rfc.mode) {
3718 case L2CAP_MODE_ERTM:
3719 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3720 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3721 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3722 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3723 chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
3724 else
3725 chan->ack_win = min_t(u16, chan->ack_win,
3726 rfc.txwin_size);
3727 break;
3728 case L2CAP_MODE_STREAMING:
3729 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3730 }
3731}
3732
3733static inline int l2cap_command_rej(struct l2cap_conn *conn,
3734 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3735 u8 *data)
3736{
3737 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
3738
3739 if (cmd_len < sizeof(*rej))
3740 return -EPROTO;
3741
3742 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
3743 return 0;
3744
3745 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
3746 cmd->ident == conn->info_ident) {
3747 cancel_delayed_work(&conn->info_timer);
3748
3749 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3750 conn->info_ident = 0;
3751
3752 l2cap_conn_start(conn);
3753 }
3754
3755 return 0;
3756}
3757
3758static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
3759 struct l2cap_cmd_hdr *cmd,
3760 u8 *data, u8 rsp_code, u8 amp_id)
3761{
3762 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
3763 struct l2cap_conn_rsp rsp;
3764 struct l2cap_chan *chan = NULL, *pchan;
3765 int result, status = L2CAP_CS_NO_INFO;
3766
3767 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
3768 __le16 psm = req->psm;
3769
3770 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
3771
3772 /* Check if we have socket listening on psm */
3773 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
3774 &conn->hcon->dst, ACL_LINK);
3775 if (!pchan) {
3776 result = L2CAP_CR_BAD_PSM;
3777 goto sendresp;
3778 }
3779
3780 mutex_lock(&conn->chan_lock);
3781 l2cap_chan_lock(pchan);
3782
3783 /* Check if the ACL is secure enough (if not SDP) */
3784 if (psm != cpu_to_le16(L2CAP_PSM_SDP) &&
3785 !hci_conn_check_link_mode(conn->hcon)) {
3786 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
3787 result = L2CAP_CR_SEC_BLOCK;
3788 goto response;
3789 }
3790
3791 result = L2CAP_CR_NO_MEM;
3792
3793 /* Check if we already have channel with that dcid */
3794 if (__l2cap_get_chan_by_dcid(conn, scid))
3795 goto response;
3796
3797 chan = pchan->ops->new_connection(pchan);
3798 if (!chan)
3799 goto response;
3800
3801 /* For certain devices (ex: HID mouse), support for authentication,
3802 * pairing and bonding is optional. For such devices, inorder to avoid
3803 * the ACL alive for too long after L2CAP disconnection, reset the ACL
3804 * disc_timeout back to HCI_DISCONN_TIMEOUT during L2CAP connect.
3805 */
3806 conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
3807
3808 bacpy(&chan->src, &conn->hcon->src);
3809 bacpy(&chan->dst, &conn->hcon->dst);
3810 chan->src_type = bdaddr_src_type(conn->hcon);
3811 chan->dst_type = bdaddr_dst_type(conn->hcon);
3812 chan->psm = psm;
3813 chan->dcid = scid;
3814 chan->local_amp_id = amp_id;
3815
3816 __l2cap_chan_add(conn, chan);
3817
3818 dcid = chan->scid;
3819
3820 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
3821
3822 chan->ident = cmd->ident;
3823
3824 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
3825 if (l2cap_chan_check_security(chan, false)) {
3826 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
3827 l2cap_state_change(chan, BT_CONNECT2);
3828 result = L2CAP_CR_PEND;
3829 status = L2CAP_CS_AUTHOR_PEND;
3830 chan->ops->defer(chan);
3831 } else {
3832 /* Force pending result for AMP controllers.
3833 * The connection will succeed after the
3834 * physical link is up.
3835 */
3836 if (amp_id == AMP_ID_BREDR) {
3837 l2cap_state_change(chan, BT_CONFIG);
3838 result = L2CAP_CR_SUCCESS;
3839 } else {
3840 l2cap_state_change(chan, BT_CONNECT2);
3841 result = L2CAP_CR_PEND;
3842 }
3843 status = L2CAP_CS_NO_INFO;
3844 }
3845 } else {
3846 l2cap_state_change(chan, BT_CONNECT2);
3847 result = L2CAP_CR_PEND;
3848 status = L2CAP_CS_AUTHEN_PEND;
3849 }
3850 } else {
3851 l2cap_state_change(chan, BT_CONNECT2);
3852 result = L2CAP_CR_PEND;
3853 status = L2CAP_CS_NO_INFO;
3854 }
3855
3856response:
3857 l2cap_chan_unlock(pchan);
3858 mutex_unlock(&conn->chan_lock);
3859 l2cap_chan_put(pchan);
3860
3861sendresp:
3862 rsp.scid = cpu_to_le16(scid);
3863 rsp.dcid = cpu_to_le16(dcid);
3864 rsp.result = cpu_to_le16(result);
3865 rsp.status = cpu_to_le16(status);
3866 l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
3867
3868 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
3869 struct l2cap_info_req info;
3870 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3871
3872 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
3873 conn->info_ident = l2cap_get_ident(conn);
3874
3875 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
3876
3877 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
3878 sizeof(info), &info);
3879 }
3880
3881 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
3882 result == L2CAP_CR_SUCCESS) {
3883 u8 buf[128];
3884 set_bit(CONF_REQ_SENT, &chan->conf_state);
3885 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3886 l2cap_build_conf_req(chan, buf), buf);
3887 chan->num_conf_req++;
3888 }
3889
3890 return chan;
3891}
3892
3893static int l2cap_connect_req(struct l2cap_conn *conn,
3894 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3895{
3896 struct hci_dev *hdev = conn->hcon->hdev;
3897 struct hci_conn *hcon = conn->hcon;
3898
3899 if (cmd_len < sizeof(struct l2cap_conn_req))
3900 return -EPROTO;
3901
3902 hci_dev_lock(hdev);
3903 if (hci_dev_test_flag(hdev, HCI_MGMT) &&
3904 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &hcon->flags))
3905 mgmt_device_connected(hdev, hcon, 0, NULL, 0);
3906 hci_dev_unlock(hdev);
3907
3908 l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);
3909 return 0;
3910}
3911
3912static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
3913 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3914 u8 *data)
3915{
3916 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
3917 u16 scid, dcid, result, status;
3918 struct l2cap_chan *chan;
3919 u8 req[128];
3920 int err;
3921
3922 if (cmd_len < sizeof(*rsp))
3923 return -EPROTO;
3924
3925 scid = __le16_to_cpu(rsp->scid);
3926 dcid = __le16_to_cpu(rsp->dcid);
3927 result = __le16_to_cpu(rsp->result);
3928 status = __le16_to_cpu(rsp->status);
3929
3930 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
3931 dcid, scid, result, status);
3932
3933 mutex_lock(&conn->chan_lock);
3934
3935 if (scid) {
3936 chan = __l2cap_get_chan_by_scid(conn, scid);
3937 if (!chan) {
3938 err = -EBADSLT;
3939 goto unlock;
3940 }
3941 } else {
3942 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
3943 if (!chan) {
3944 err = -EBADSLT;
3945 goto unlock;
3946 }
3947 }
3948
3949 err = 0;
3950
3951 l2cap_chan_lock(chan);
3952
3953 switch (result) {
3954 case L2CAP_CR_SUCCESS:
3955 l2cap_state_change(chan, BT_CONFIG);
3956 chan->ident = 0;
3957 chan->dcid = dcid;
3958 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
3959
3960 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3961 break;
3962
3963 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3964 l2cap_build_conf_req(chan, req), req);
3965 chan->num_conf_req++;
3966 break;
3967
3968 case L2CAP_CR_PEND:
3969 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
3970 break;
3971
3972 default:
3973 l2cap_chan_del(chan, ECONNREFUSED);
3974 break;
3975 }
3976
3977 l2cap_chan_unlock(chan);
3978
3979unlock:
3980 mutex_unlock(&conn->chan_lock);
3981
3982 return err;
3983}
3984
3985static inline void set_default_fcs(struct l2cap_chan *chan)
3986{
3987 /* FCS is enabled only in ERTM or streaming mode, if one or both
3988 * sides request it.
3989 */
3990 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
3991 chan->fcs = L2CAP_FCS_NONE;
3992 else if (!test_bit(CONF_RECV_NO_FCS, &chan->conf_state))
3993 chan->fcs = L2CAP_FCS_CRC16;
3994}
3995
3996static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data,
3997 u8 ident, u16 flags)
3998{
3999 struct l2cap_conn *conn = chan->conn;
4000
4001 BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident,
4002 flags);
4003
4004 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
4005 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
4006
4007 l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP,
4008 l2cap_build_conf_rsp(chan, data,
4009 L2CAP_CONF_SUCCESS, flags), data);
4010}
4011
4012static void cmd_reject_invalid_cid(struct l2cap_conn *conn, u8 ident,
4013 u16 scid, u16 dcid)
4014{
4015 struct l2cap_cmd_rej_cid rej;
4016
4017 rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
4018 rej.scid = __cpu_to_le16(scid);
4019 rej.dcid = __cpu_to_le16(dcid);
4020
4021 l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
4022}
4023
4024static inline int l2cap_config_req(struct l2cap_conn *conn,
4025 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4026 u8 *data)
4027{
4028 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
4029 u16 dcid, flags;
4030 u8 rsp[64];
4031 struct l2cap_chan *chan;
4032 int len, err = 0;
4033
4034 if (cmd_len < sizeof(*req))
4035 return -EPROTO;
4036
4037 dcid = __le16_to_cpu(req->dcid);
4038 flags = __le16_to_cpu(req->flags);
4039
4040 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
4041
4042 chan = l2cap_get_chan_by_scid(conn, dcid);
4043 if (!chan) {
4044 cmd_reject_invalid_cid(conn, cmd->ident, dcid, 0);
4045 return 0;
4046 }
4047
4048 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
4049 cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4050 chan->dcid);
4051 goto unlock;
4052 }
4053
4054 /* Reject if config buffer is too small. */
4055 len = cmd_len - sizeof(*req);
4056 if (chan->conf_len + len > sizeof(chan->conf_req)) {
4057 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4058 l2cap_build_conf_rsp(chan, rsp,
4059 L2CAP_CONF_REJECT, flags), rsp);
4060 goto unlock;
4061 }
4062
4063 /* Store config. */
4064 memcpy(chan->conf_req + chan->conf_len, req->data, len);
4065 chan->conf_len += len;
4066
4067 if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
4068 /* Incomplete config. Send empty response. */
4069 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4070 l2cap_build_conf_rsp(chan, rsp,
4071 L2CAP_CONF_SUCCESS, flags), rsp);
4072 goto unlock;
4073 }
4074
4075 /* Complete config. */
4076 len = l2cap_parse_conf_req(chan, rsp);
4077 if (len < 0) {
4078 l2cap_send_disconn_req(chan, ECONNRESET);
4079 goto unlock;
4080 }
4081
4082 chan->ident = cmd->ident;
4083 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
4084 chan->num_conf_rsp++;
4085
4086 /* Reset config buffer. */
4087 chan->conf_len = 0;
4088
4089 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
4090 goto unlock;
4091
4092 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4093 set_default_fcs(chan);
4094
4095 if (chan->mode == L2CAP_MODE_ERTM ||
4096 chan->mode == L2CAP_MODE_STREAMING)
4097 err = l2cap_ertm_init(chan);
4098
4099 if (err < 0)
4100 l2cap_send_disconn_req(chan, -err);
4101 else
4102 l2cap_chan_ready(chan);
4103
4104 goto unlock;
4105 }
4106
4107 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
4108 u8 buf[64];
4109 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4110 l2cap_build_conf_req(chan, buf), buf);
4111 chan->num_conf_req++;
4112 }
4113
4114 /* Got Conf Rsp PENDING from remote side and assume we sent
4115 Conf Rsp PENDING in the code above */
4116 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
4117 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4118
4119 /* check compatibility */
4120
4121 /* Send rsp for BR/EDR channel */
4122 if (!chan->hs_hcon)
4123 l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
4124 else
4125 chan->ident = cmd->ident;
4126 }
4127
4128unlock:
4129 l2cap_chan_unlock(chan);
4130 return err;
4131}
4132
4133static inline int l2cap_config_rsp(struct l2cap_conn *conn,
4134 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4135 u8 *data)
4136{
4137 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
4138 u16 scid, flags, result;
4139 struct l2cap_chan *chan;
4140 int len = cmd_len - sizeof(*rsp);
4141 int err = 0;
4142
4143 if (cmd_len < sizeof(*rsp))
4144 return -EPROTO;
4145
4146 scid = __le16_to_cpu(rsp->scid);
4147 flags = __le16_to_cpu(rsp->flags);
4148 result = __le16_to_cpu(rsp->result);
4149
4150 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
4151 result, len);
4152
4153 chan = l2cap_get_chan_by_scid(conn, scid);
4154 if (!chan)
4155 return 0;
4156
4157 switch (result) {
4158 case L2CAP_CONF_SUCCESS:
4159 l2cap_conf_rfc_get(chan, rsp->data, len);
4160 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4161 break;
4162
4163 case L2CAP_CONF_PENDING:
4164 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4165
4166 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4167 char buf[64];
4168
4169 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4170 buf, &result);
4171 if (len < 0) {
4172 l2cap_send_disconn_req(chan, ECONNRESET);
4173 goto done;
4174 }
4175
4176 if (!chan->hs_hcon) {
4177 l2cap_send_efs_conf_rsp(chan, buf, cmd->ident,
4178 0);
4179 } else {
4180 if (l2cap_check_efs(chan)) {
4181 amp_create_logical_link(chan);
4182 chan->ident = cmd->ident;
4183 }
4184 }
4185 }
4186 goto done;
4187
4188 case L2CAP_CONF_UNACCEPT:
4189 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
4190 char req[64];
4191
4192 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
4193 l2cap_send_disconn_req(chan, ECONNRESET);
4194 goto done;
4195 }
4196
4197 /* throw out any old stored conf requests */
4198 result = L2CAP_CONF_SUCCESS;
4199 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4200 req, &result);
4201 if (len < 0) {
4202 l2cap_send_disconn_req(chan, ECONNRESET);
4203 goto done;
4204 }
4205
4206 l2cap_send_cmd(conn, l2cap_get_ident(conn),
4207 L2CAP_CONF_REQ, len, req);
4208 chan->num_conf_req++;
4209 if (result != L2CAP_CONF_SUCCESS)
4210 goto done;
4211 break;
4212 }
4213
4214 default:
4215 l2cap_chan_set_err(chan, ECONNRESET);
4216
4217 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
4218 l2cap_send_disconn_req(chan, ECONNRESET);
4219 goto done;
4220 }
4221
4222 if (flags & L2CAP_CONF_FLAG_CONTINUATION)
4223 goto done;
4224
4225 set_bit(CONF_INPUT_DONE, &chan->conf_state);
4226
4227 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
4228 set_default_fcs(chan);
4229
4230 if (chan->mode == L2CAP_MODE_ERTM ||
4231 chan->mode == L2CAP_MODE_STREAMING)
4232 err = l2cap_ertm_init(chan);
4233
4234 if (err < 0)
4235 l2cap_send_disconn_req(chan, -err);
4236 else
4237 l2cap_chan_ready(chan);
4238 }
4239
4240done:
4241 l2cap_chan_unlock(chan);
4242 return err;
4243}
4244
4245static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
4246 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4247 u8 *data)
4248{
4249 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
4250 struct l2cap_disconn_rsp rsp;
4251 u16 dcid, scid;
4252 struct l2cap_chan *chan;
4253
4254 if (cmd_len != sizeof(*req))
4255 return -EPROTO;
4256
4257 scid = __le16_to_cpu(req->scid);
4258 dcid = __le16_to_cpu(req->dcid);
4259
4260 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
4261
4262 mutex_lock(&conn->chan_lock);
4263
4264 chan = __l2cap_get_chan_by_scid(conn, dcid);
4265 if (!chan) {
4266 mutex_unlock(&conn->chan_lock);
4267 cmd_reject_invalid_cid(conn, cmd->ident, dcid, scid);
4268 return 0;
4269 }
4270
4271 l2cap_chan_lock(chan);
4272
4273 rsp.dcid = cpu_to_le16(chan->scid);
4274 rsp.scid = cpu_to_le16(chan->dcid);
4275 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
4276
4277 chan->ops->set_shutdown(chan);
4278
4279 l2cap_chan_hold(chan);
4280 l2cap_chan_del(chan, ECONNRESET);
4281
4282 l2cap_chan_unlock(chan);
4283
4284 chan->ops->close(chan);
4285 l2cap_chan_put(chan);
4286
4287 mutex_unlock(&conn->chan_lock);
4288
4289 return 0;
4290}
4291
4292static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
4293 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4294 u8 *data)
4295{
4296 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
4297 u16 dcid, scid;
4298 struct l2cap_chan *chan;
4299
4300 if (cmd_len != sizeof(*rsp))
4301 return -EPROTO;
4302
4303 scid = __le16_to_cpu(rsp->scid);
4304 dcid = __le16_to_cpu(rsp->dcid);
4305
4306 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
4307
4308 mutex_lock(&conn->chan_lock);
4309
4310 chan = __l2cap_get_chan_by_scid(conn, scid);
4311 if (!chan) {
4312 mutex_unlock(&conn->chan_lock);
4313 return 0;
4314 }
4315
4316 l2cap_chan_lock(chan);
4317
4318 l2cap_chan_hold(chan);
4319 l2cap_chan_del(chan, 0);
4320
4321 l2cap_chan_unlock(chan);
4322
4323 chan->ops->close(chan);
4324 l2cap_chan_put(chan);
4325
4326 mutex_unlock(&conn->chan_lock);
4327
4328 return 0;
4329}
4330
4331static inline int l2cap_information_req(struct l2cap_conn *conn,
4332 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4333 u8 *data)
4334{
4335 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
4336 u16 type;
4337
4338 if (cmd_len != sizeof(*req))
4339 return -EPROTO;
4340
4341 type = __le16_to_cpu(req->type);
4342
4343 BT_DBG("type 0x%4.4x", type);
4344
4345 if (type == L2CAP_IT_FEAT_MASK) {
4346 u8 buf[8];
4347 u32 feat_mask = l2cap_feat_mask;
4348 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4349 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
4350 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4351 if (!disable_ertm)
4352 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
4353 | L2CAP_FEAT_FCS;
4354 if (conn->local_fixed_chan & L2CAP_FC_A2MP)
4355 feat_mask |= L2CAP_FEAT_EXT_FLOW
4356 | L2CAP_FEAT_EXT_WINDOW;
4357
4358 put_unaligned_le32(feat_mask, rsp->data);
4359 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4360 buf);
4361 } else if (type == L2CAP_IT_FIXED_CHAN) {
4362 u8 buf[12];
4363 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4364
4365 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4366 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4367 rsp->data[0] = conn->local_fixed_chan;
4368 memset(rsp->data + 1, 0, 7);
4369 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4370 buf);
4371 } else {
4372 struct l2cap_info_rsp rsp;
4373 rsp.type = cpu_to_le16(type);
4374 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
4375 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp),
4376 &rsp);
4377 }
4378
4379 return 0;
4380}
4381
4382static inline int l2cap_information_rsp(struct l2cap_conn *conn,
4383 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4384 u8 *data)
4385{
4386 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
4387 u16 type, result;
4388
4389 if (cmd_len < sizeof(*rsp))
4390 return -EPROTO;
4391
4392 type = __le16_to_cpu(rsp->type);
4393 result = __le16_to_cpu(rsp->result);
4394
4395 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
4396
4397 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
4398 if (cmd->ident != conn->info_ident ||
4399 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
4400 return 0;
4401
4402 cancel_delayed_work(&conn->info_timer);
4403
4404 if (result != L2CAP_IR_SUCCESS) {
4405 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4406 conn->info_ident = 0;
4407
4408 l2cap_conn_start(conn);
4409
4410 return 0;
4411 }
4412
4413 switch (type) {
4414 case L2CAP_IT_FEAT_MASK:
4415 conn->feat_mask = get_unaligned_le32(rsp->data);
4416
4417 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
4418 struct l2cap_info_req req;
4419 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4420
4421 conn->info_ident = l2cap_get_ident(conn);
4422
4423 l2cap_send_cmd(conn, conn->info_ident,
4424 L2CAP_INFO_REQ, sizeof(req), &req);
4425 } else {
4426 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4427 conn->info_ident = 0;
4428
4429 l2cap_conn_start(conn);
4430 }
4431 break;
4432
4433 case L2CAP_IT_FIXED_CHAN:
4434 conn->remote_fixed_chan = rsp->data[0];
4435 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4436 conn->info_ident = 0;
4437
4438 l2cap_conn_start(conn);
4439 break;
4440 }
4441
4442 return 0;
4443}
4444
4445static int l2cap_create_channel_req(struct l2cap_conn *conn,
4446 struct l2cap_cmd_hdr *cmd,
4447 u16 cmd_len, void *data)
4448{
4449 struct l2cap_create_chan_req *req = data;
4450 struct l2cap_create_chan_rsp rsp;
4451 struct l2cap_chan *chan;
4452 struct hci_dev *hdev;
4453 u16 psm, scid;
4454
4455 if (cmd_len != sizeof(*req))
4456 return -EPROTO;
4457
4458 if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
4459 return -EINVAL;
4460
4461 psm = le16_to_cpu(req->psm);
4462 scid = le16_to_cpu(req->scid);
4463
4464 BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id);
4465
4466 /* For controller id 0 make BR/EDR connection */
4467 if (req->amp_id == AMP_ID_BREDR) {
4468 l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4469 req->amp_id);
4470 return 0;
4471 }
4472
4473 /* Validate AMP controller id */
4474 hdev = hci_dev_get(req->amp_id);
4475 if (!hdev)
4476 goto error;
4477
4478 if (hdev->dev_type != HCI_AMP || !test_bit(HCI_UP, &hdev->flags)) {
4479 hci_dev_put(hdev);
4480 goto error;
4481 }
4482
4483 chan = l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4484 req->amp_id);
4485 if (chan) {
4486 struct amp_mgr *mgr = conn->hcon->amp_mgr;
4487 struct hci_conn *hs_hcon;
4488
4489 hs_hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK,
4490 &conn->hcon->dst);
4491 if (!hs_hcon) {
4492 hci_dev_put(hdev);
4493 cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4494 chan->dcid);
4495 return 0;
4496 }
4497
4498 BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr, chan, hs_hcon);
4499
4500 mgr->bredr_chan = chan;
4501 chan->hs_hcon = hs_hcon;
4502 chan->fcs = L2CAP_FCS_NONE;
4503 conn->mtu = hdev->block_mtu;
4504 }
4505
4506 hci_dev_put(hdev);
4507
4508 return 0;
4509
4510error:
4511 rsp.dcid = 0;
4512 rsp.scid = cpu_to_le16(scid);
4513 rsp.result = cpu_to_le16(L2CAP_CR_BAD_AMP);
4514 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4515
4516 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
4517 sizeof(rsp), &rsp);
4518
4519 return 0;
4520}
4521
4522static void l2cap_send_move_chan_req(struct l2cap_chan *chan, u8 dest_amp_id)
4523{
4524 struct l2cap_move_chan_req req;
4525 u8 ident;
4526
4527 BT_DBG("chan %p, dest_amp_id %d", chan, dest_amp_id);
4528
4529 ident = l2cap_get_ident(chan->conn);
4530 chan->ident = ident;
4531
4532 req.icid = cpu_to_le16(chan->scid);
4533 req.dest_amp_id = dest_amp_id;
4534
4535 l2cap_send_cmd(chan->conn, ident, L2CAP_MOVE_CHAN_REQ, sizeof(req),
4536 &req);
4537
4538 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4539}
4540
4541static void l2cap_send_move_chan_rsp(struct l2cap_chan *chan, u16 result)
4542{
4543 struct l2cap_move_chan_rsp rsp;
4544
4545 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4546
4547 rsp.icid = cpu_to_le16(chan->dcid);
4548 rsp.result = cpu_to_le16(result);
4549
4550 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_RSP,
4551 sizeof(rsp), &rsp);
4552}
4553
4554static void l2cap_send_move_chan_cfm(struct l2cap_chan *chan, u16 result)
4555{
4556 struct l2cap_move_chan_cfm cfm;
4557
4558 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4559
4560 chan->ident = l2cap_get_ident(chan->conn);
4561
4562 cfm.icid = cpu_to_le16(chan->scid);
4563 cfm.result = cpu_to_le16(result);
4564
4565 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_CFM,
4566 sizeof(cfm), &cfm);
4567
4568 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4569}
4570
4571static void l2cap_send_move_chan_cfm_icid(struct l2cap_conn *conn, u16 icid)
4572{
4573 struct l2cap_move_chan_cfm cfm;
4574
4575 BT_DBG("conn %p, icid 0x%4.4x", conn, icid);
4576
4577 cfm.icid = cpu_to_le16(icid);
4578 cfm.result = cpu_to_le16(L2CAP_MC_UNCONFIRMED);
4579
4580 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_MOVE_CHAN_CFM,
4581 sizeof(cfm), &cfm);
4582}
4583
4584static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
4585 u16 icid)
4586{
4587 struct l2cap_move_chan_cfm_rsp rsp;
4588
4589 BT_DBG("icid 0x%4.4x", icid);
4590
4591 rsp.icid = cpu_to_le16(icid);
4592 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4593}
4594
4595static void __release_logical_link(struct l2cap_chan *chan)
4596{
4597 chan->hs_hchan = NULL;
4598 chan->hs_hcon = NULL;
4599
4600 /* Placeholder - release the logical link */
4601}
4602
4603static void l2cap_logical_fail(struct l2cap_chan *chan)
4604{
4605 /* Logical link setup failed */
4606 if (chan->state != BT_CONNECTED) {
4607 /* Create channel failure, disconnect */
4608 l2cap_send_disconn_req(chan, ECONNRESET);
4609 return;
4610 }
4611
4612 switch (chan->move_role) {
4613 case L2CAP_MOVE_ROLE_RESPONDER:
4614 l2cap_move_done(chan);
4615 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_SUPP);
4616 break;
4617 case L2CAP_MOVE_ROLE_INITIATOR:
4618 if (chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_COMP ||
4619 chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_CFM) {
4620 /* Remote has only sent pending or
4621 * success responses, clean up
4622 */
4623 l2cap_move_done(chan);
4624 }
4625
4626 /* Other amp move states imply that the move
4627 * has already aborted
4628 */
4629 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4630 break;
4631 }
4632}
4633
4634static void l2cap_logical_finish_create(struct l2cap_chan *chan,
4635 struct hci_chan *hchan)
4636{
4637 struct l2cap_conf_rsp rsp;
4638
4639 chan->hs_hchan = hchan;
4640 chan->hs_hcon->l2cap_data = chan->conn;
4641
4642 l2cap_send_efs_conf_rsp(chan, &rsp, chan->ident, 0);
4643
4644 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4645 int err;
4646
4647 set_default_fcs(chan);
4648
4649 err = l2cap_ertm_init(chan);
4650 if (err < 0)
4651 l2cap_send_disconn_req(chan, -err);
4652 else
4653 l2cap_chan_ready(chan);
4654 }
4655}
4656
4657static void l2cap_logical_finish_move(struct l2cap_chan *chan,
4658 struct hci_chan *hchan)
4659{
4660 chan->hs_hcon = hchan->conn;
4661 chan->hs_hcon->l2cap_data = chan->conn;
4662
4663 BT_DBG("move_state %d", chan->move_state);
4664
4665 switch (chan->move_state) {
4666 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
4667 /* Move confirm will be sent after a success
4668 * response is received
4669 */
4670 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4671 break;
4672 case L2CAP_MOVE_WAIT_LOGICAL_CFM:
4673 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4674 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4675 } else if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
4676 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
4677 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4678 } else if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4679 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4680 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4681 }
4682 break;
4683 default:
4684 /* Move was not in expected state, free the channel */
4685 __release_logical_link(chan);
4686
4687 chan->move_state = L2CAP_MOVE_STABLE;
4688 }
4689}
4690
4691/* Call with chan locked */
4692void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan,
4693 u8 status)
4694{
4695 BT_DBG("chan %p, hchan %p, status %d", chan, hchan, status);
4696
4697 if (status) {
4698 l2cap_logical_fail(chan);
4699 __release_logical_link(chan);
4700 return;
4701 }
4702
4703 if (chan->state != BT_CONNECTED) {
4704 /* Ignore logical link if channel is on BR/EDR */
4705 if (chan->local_amp_id != AMP_ID_BREDR)
4706 l2cap_logical_finish_create(chan, hchan);
4707 } else {
4708 l2cap_logical_finish_move(chan, hchan);
4709 }
4710}
4711
4712void l2cap_move_start(struct l2cap_chan *chan)
4713{
4714 BT_DBG("chan %p", chan);
4715
4716 if (chan->local_amp_id == AMP_ID_BREDR) {
4717 if (chan->chan_policy != BT_CHANNEL_POLICY_AMP_PREFERRED)
4718 return;
4719 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4720 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4721 /* Placeholder - start physical link setup */
4722 } else {
4723 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4724 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4725 chan->move_id = 0;
4726 l2cap_move_setup(chan);
4727 l2cap_send_move_chan_req(chan, 0);
4728 }
4729}
4730
4731static void l2cap_do_create(struct l2cap_chan *chan, int result,
4732 u8 local_amp_id, u8 remote_amp_id)
4733{
4734 BT_DBG("chan %p state %s %u -> %u", chan, state_to_string(chan->state),
4735 local_amp_id, remote_amp_id);
4736
4737 chan->fcs = L2CAP_FCS_NONE;
4738
4739 /* Outgoing channel on AMP */
4740 if (chan->state == BT_CONNECT) {
4741 if (result == L2CAP_CR_SUCCESS) {
4742 chan->local_amp_id = local_amp_id;
4743 l2cap_send_create_chan_req(chan, remote_amp_id);
4744 } else {
4745 /* Revert to BR/EDR connect */
4746 l2cap_send_conn_req(chan);
4747 }
4748
4749 return;
4750 }
4751
4752 /* Incoming channel on AMP */
4753 if (__l2cap_no_conn_pending(chan)) {
4754 struct l2cap_conn_rsp rsp;
4755 char buf[128];
4756 rsp.scid = cpu_to_le16(chan->dcid);
4757 rsp.dcid = cpu_to_le16(chan->scid);
4758
4759 if (result == L2CAP_CR_SUCCESS) {
4760 /* Send successful response */
4761 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
4762 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4763 } else {
4764 /* Send negative response */
4765 rsp.result = cpu_to_le16(L2CAP_CR_NO_MEM);
4766 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4767 }
4768
4769 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_RSP,
4770 sizeof(rsp), &rsp);
4771
4772 if (result == L2CAP_CR_SUCCESS) {
4773 l2cap_state_change(chan, BT_CONFIG);
4774 set_bit(CONF_REQ_SENT, &chan->conf_state);
4775 l2cap_send_cmd(chan->conn, l2cap_get_ident(chan->conn),
4776 L2CAP_CONF_REQ,
4777 l2cap_build_conf_req(chan, buf), buf);
4778 chan->num_conf_req++;
4779 }
4780 }
4781}
4782
4783static void l2cap_do_move_initiate(struct l2cap_chan *chan, u8 local_amp_id,
4784 u8 remote_amp_id)
4785{
4786 l2cap_move_setup(chan);
4787 chan->move_id = local_amp_id;
4788 chan->move_state = L2CAP_MOVE_WAIT_RSP;
4789
4790 l2cap_send_move_chan_req(chan, remote_amp_id);
4791}
4792
4793static void l2cap_do_move_respond(struct l2cap_chan *chan, int result)
4794{
4795 struct hci_chan *hchan = NULL;
4796
4797 /* Placeholder - get hci_chan for logical link */
4798
4799 if (hchan) {
4800 if (hchan->state == BT_CONNECTED) {
4801 /* Logical link is ready to go */
4802 chan->hs_hcon = hchan->conn;
4803 chan->hs_hcon->l2cap_data = chan->conn;
4804 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4805 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4806
4807 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
4808 } else {
4809 /* Wait for logical link to be ready */
4810 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4811 }
4812 } else {
4813 /* Logical link not available */
4814 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_ALLOWED);
4815 }
4816}
4817
4818static void l2cap_do_move_cancel(struct l2cap_chan *chan, int result)
4819{
4820 if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4821 u8 rsp_result;
4822 if (result == -EINVAL)
4823 rsp_result = L2CAP_MR_BAD_ID;
4824 else
4825 rsp_result = L2CAP_MR_NOT_ALLOWED;
4826
4827 l2cap_send_move_chan_rsp(chan, rsp_result);
4828 }
4829
4830 chan->move_role = L2CAP_MOVE_ROLE_NONE;
4831 chan->move_state = L2CAP_MOVE_STABLE;
4832
4833 /* Restart data transmission */
4834 l2cap_ertm_send(chan);
4835}
4836
4837/* Invoke with locked chan */
4838void __l2cap_physical_cfm(struct l2cap_chan *chan, int result)
4839{
4840 u8 local_amp_id = chan->local_amp_id;
4841 u8 remote_amp_id = chan->remote_amp_id;
4842
4843 BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d",
4844 chan, result, local_amp_id, remote_amp_id);
4845
4846 if (chan->state == BT_DISCONN || chan->state == BT_CLOSED) {
4847 l2cap_chan_unlock(chan);
4848 return;
4849 }
4850
4851 if (chan->state != BT_CONNECTED) {
4852 l2cap_do_create(chan, result, local_amp_id, remote_amp_id);
4853 } else if (result != L2CAP_MR_SUCCESS) {
4854 l2cap_do_move_cancel(chan, result);
4855 } else {
4856 switch (chan->move_role) {
4857 case L2CAP_MOVE_ROLE_INITIATOR:
4858 l2cap_do_move_initiate(chan, local_amp_id,
4859 remote_amp_id);
4860 break;
4861 case L2CAP_MOVE_ROLE_RESPONDER:
4862 l2cap_do_move_respond(chan, result);
4863 break;
4864 default:
4865 l2cap_do_move_cancel(chan, result);
4866 break;
4867 }
4868 }
4869}
4870
4871static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
4872 struct l2cap_cmd_hdr *cmd,
4873 u16 cmd_len, void *data)
4874{
4875 struct l2cap_move_chan_req *req = data;
4876 struct l2cap_move_chan_rsp rsp;
4877 struct l2cap_chan *chan;
4878 u16 icid = 0;
4879 u16 result = L2CAP_MR_NOT_ALLOWED;
4880
4881 if (cmd_len != sizeof(*req))
4882 return -EPROTO;
4883
4884 icid = le16_to_cpu(req->icid);
4885
4886 BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id);
4887
4888 if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
4889 return -EINVAL;
4890
4891 chan = l2cap_get_chan_by_dcid(conn, icid);
4892 if (!chan) {
4893 rsp.icid = cpu_to_le16(icid);
4894 rsp.result = cpu_to_le16(L2CAP_MR_NOT_ALLOWED);
4895 l2cap_send_cmd(conn, cmd->ident, L2CAP_MOVE_CHAN_RSP,
4896 sizeof(rsp), &rsp);
4897 return 0;
4898 }
4899
4900 chan->ident = cmd->ident;
4901
4902 if (chan->scid < L2CAP_CID_DYN_START ||
4903 chan->chan_policy == BT_CHANNEL_POLICY_BREDR_ONLY ||
4904 (chan->mode != L2CAP_MODE_ERTM &&
4905 chan->mode != L2CAP_MODE_STREAMING)) {
4906 result = L2CAP_MR_NOT_ALLOWED;
4907 goto send_move_response;
4908 }
4909
4910 if (chan->local_amp_id == req->dest_amp_id) {
4911 result = L2CAP_MR_SAME_ID;
4912 goto send_move_response;
4913 }
4914
4915 if (req->dest_amp_id != AMP_ID_BREDR) {
4916 struct hci_dev *hdev;
4917 hdev = hci_dev_get(req->dest_amp_id);
4918 if (!hdev || hdev->dev_type != HCI_AMP ||
4919 !test_bit(HCI_UP, &hdev->flags)) {
4920 if (hdev)
4921 hci_dev_put(hdev);
4922
4923 result = L2CAP_MR_BAD_ID;
4924 goto send_move_response;
4925 }
4926 hci_dev_put(hdev);
4927 }
4928
4929 /* Detect a move collision. Only send a collision response
4930 * if this side has "lost", otherwise proceed with the move.
4931 * The winner has the larger bd_addr.
4932 */
4933 if ((__chan_is_moving(chan) ||
4934 chan->move_role != L2CAP_MOVE_ROLE_NONE) &&
4935 bacmp(&conn->hcon->src, &conn->hcon->dst) > 0) {
4936 result = L2CAP_MR_COLLISION;
4937 goto send_move_response;
4938 }
4939
4940 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
4941 l2cap_move_setup(chan);
4942 chan->move_id = req->dest_amp_id;
4943 icid = chan->dcid;
4944
4945 if (req->dest_amp_id == AMP_ID_BREDR) {
4946 /* Moving to BR/EDR */
4947 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4948 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4949 result = L2CAP_MR_PEND;
4950 } else {
4951 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4952 result = L2CAP_MR_SUCCESS;
4953 }
4954 } else {
4955 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4956 /* Placeholder - uncomment when amp functions are available */
4957 /*amp_accept_physical(chan, req->dest_amp_id);*/
4958 result = L2CAP_MR_PEND;
4959 }
4960
4961send_move_response:
4962 l2cap_send_move_chan_rsp(chan, result);
4963
4964 l2cap_chan_unlock(chan);
4965
4966 return 0;
4967}
4968
4969static void l2cap_move_continue(struct l2cap_conn *conn, u16 icid, u16 result)
4970{
4971 struct l2cap_chan *chan;
4972 struct hci_chan *hchan = NULL;
4973
4974 chan = l2cap_get_chan_by_scid(conn, icid);
4975 if (!chan) {
4976 l2cap_send_move_chan_cfm_icid(conn, icid);
4977 return;
4978 }
4979
4980 __clear_chan_timer(chan);
4981 if (result == L2CAP_MR_PEND)
4982 __set_chan_timer(chan, L2CAP_MOVE_ERTX_TIMEOUT);
4983
4984 switch (chan->move_state) {
4985 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
4986 /* Move confirm will be sent when logical link
4987 * is complete.
4988 */
4989 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4990 break;
4991 case L2CAP_MOVE_WAIT_RSP_SUCCESS:
4992 if (result == L2CAP_MR_PEND) {
4993 break;
4994 } else if (test_bit(CONN_LOCAL_BUSY,
4995 &chan->conn_state)) {
4996 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4997 } else {
4998 /* Logical link is up or moving to BR/EDR,
4999 * proceed with move
5000 */
5001 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
5002 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5003 }
5004 break;
5005 case L2CAP_MOVE_WAIT_RSP:
5006 /* Moving to AMP */
5007 if (result == L2CAP_MR_SUCCESS) {
5008 /* Remote is ready, send confirm immediately
5009 * after logical link is ready
5010 */
5011 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5012 } else {
5013 /* Both logical link and move success
5014 * are required to confirm
5015 */
5016 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_COMP;
5017 }
5018
5019 /* Placeholder - get hci_chan for logical link */
5020 if (!hchan) {
5021 /* Logical link not available */
5022 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5023 break;
5024 }
5025
5026 /* If the logical link is not yet connected, do not
5027 * send confirmation.
5028 */
5029 if (hchan->state != BT_CONNECTED)
5030 break;
5031
5032 /* Logical link is already ready to go */
5033
5034 chan->hs_hcon = hchan->conn;
5035 chan->hs_hcon->l2cap_data = chan->conn;
5036
5037 if (result == L2CAP_MR_SUCCESS) {
5038 /* Can confirm now */
5039 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5040 } else {
5041 /* Now only need move success
5042 * to confirm
5043 */
5044 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
5045 }
5046
5047 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
5048 break;
5049 default:
5050 /* Any other amp move state means the move failed. */
5051 chan->move_id = chan->local_amp_id;
5052 l2cap_move_done(chan);
5053 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5054 }
5055
5056 l2cap_chan_unlock(chan);
5057}
5058
5059static void l2cap_move_fail(struct l2cap_conn *conn, u8 ident, u16 icid,
5060 u16 result)
5061{
5062 struct l2cap_chan *chan;
5063
5064 chan = l2cap_get_chan_by_ident(conn, ident);
5065 if (!chan) {
5066 /* Could not locate channel, icid is best guess */
5067 l2cap_send_move_chan_cfm_icid(conn, icid);
5068 return;
5069 }
5070
5071 __clear_chan_timer(chan);
5072
5073 if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
5074 if (result == L2CAP_MR_COLLISION) {
5075 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5076 } else {
5077 /* Cleanup - cancel move */
5078 chan->move_id = chan->local_amp_id;
5079 l2cap_move_done(chan);
5080 }
5081 }
5082
5083 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5084
5085 l2cap_chan_unlock(chan);
5086}
5087
5088static int l2cap_move_channel_rsp(struct l2cap_conn *conn,
5089 struct l2cap_cmd_hdr *cmd,
5090 u16 cmd_len, void *data)
5091{
5092 struct l2cap_move_chan_rsp *rsp = data;
5093 u16 icid, result;
5094
5095 if (cmd_len != sizeof(*rsp))
5096 return -EPROTO;
5097
5098 icid = le16_to_cpu(rsp->icid);
5099 result = le16_to_cpu(rsp->result);
5100
5101 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5102
5103 if (result == L2CAP_MR_SUCCESS || result == L2CAP_MR_PEND)
5104 l2cap_move_continue(conn, icid, result);
5105 else
5106 l2cap_move_fail(conn, cmd->ident, icid, result);
5107
5108 return 0;
5109}
5110
5111static int l2cap_move_channel_confirm(struct l2cap_conn *conn,
5112 struct l2cap_cmd_hdr *cmd,
5113 u16 cmd_len, void *data)
5114{
5115 struct l2cap_move_chan_cfm *cfm = data;
5116 struct l2cap_chan *chan;
5117 u16 icid, result;
5118
5119 if (cmd_len != sizeof(*cfm))
5120 return -EPROTO;
5121
5122 icid = le16_to_cpu(cfm->icid);
5123 result = le16_to_cpu(cfm->result);
5124
5125 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5126
5127 chan = l2cap_get_chan_by_dcid(conn, icid);
5128 if (!chan) {
5129 /* Spec requires a response even if the icid was not found */
5130 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5131 return 0;
5132 }
5133
5134 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM) {
5135 if (result == L2CAP_MC_CONFIRMED) {
5136 chan->local_amp_id = chan->move_id;
5137 if (chan->local_amp_id == AMP_ID_BREDR)
5138 __release_logical_link(chan);
5139 } else {
5140 chan->move_id = chan->local_amp_id;
5141 }
5142
5143 l2cap_move_done(chan);
5144 }
5145
5146 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5147
5148 l2cap_chan_unlock(chan);
5149
5150 return 0;
5151}
5152
5153static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
5154 struct l2cap_cmd_hdr *cmd,
5155 u16 cmd_len, void *data)
5156{
5157 struct l2cap_move_chan_cfm_rsp *rsp = data;
5158 struct l2cap_chan *chan;
5159 u16 icid;
5160
5161 if (cmd_len != sizeof(*rsp))
5162 return -EPROTO;
5163
5164 icid = le16_to_cpu(rsp->icid);
5165
5166 BT_DBG("icid 0x%4.4x", icid);
5167
5168 chan = l2cap_get_chan_by_scid(conn, icid);
5169 if (!chan)
5170 return 0;
5171
5172 __clear_chan_timer(chan);
5173
5174 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM_RSP) {
5175 chan->local_amp_id = chan->move_id;
5176
5177 if (chan->local_amp_id == AMP_ID_BREDR && chan->hs_hchan)
5178 __release_logical_link(chan);
5179
5180 l2cap_move_done(chan);
5181 }
5182
5183 l2cap_chan_unlock(chan);
5184
5185 return 0;
5186}
5187
5188static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
5189 struct l2cap_cmd_hdr *cmd,
5190 u16 cmd_len, u8 *data)
5191{
5192 struct hci_conn *hcon = conn->hcon;
5193 struct l2cap_conn_param_update_req *req;
5194 struct l2cap_conn_param_update_rsp rsp;
5195 u16 min, max, latency, to_multiplier;
5196 int err;
5197
5198 if (hcon->role != HCI_ROLE_MASTER)
5199 return -EINVAL;
5200
5201 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
5202 return -EPROTO;
5203
5204 req = (struct l2cap_conn_param_update_req *) data;
5205 min = __le16_to_cpu(req->min);
5206 max = __le16_to_cpu(req->max);
5207 latency = __le16_to_cpu(req->latency);
5208 to_multiplier = __le16_to_cpu(req->to_multiplier);
5209
5210 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
5211 min, max, latency, to_multiplier);
5212
5213 memset(&rsp, 0, sizeof(rsp));
5214
5215 err = hci_check_conn_params(min, max, latency, to_multiplier);
5216 if (err)
5217 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
5218 else
5219 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
5220
5221 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
5222 sizeof(rsp), &rsp);
5223
5224 if (!err) {
5225 u8 store_hint;
5226
5227 store_hint = hci_le_conn_update(hcon, min, max, latency,
5228 to_multiplier);
5229 mgmt_new_conn_param(hcon->hdev, &hcon->dst, hcon->dst_type,
5230 store_hint, min, max, latency,
5231 to_multiplier);
5232
5233 }
5234
5235 return 0;
5236}
5237
5238static int l2cap_le_connect_rsp(struct l2cap_conn *conn,
5239 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5240 u8 *data)
5241{
5242 struct l2cap_le_conn_rsp *rsp = (struct l2cap_le_conn_rsp *) data;
5243 struct hci_conn *hcon = conn->hcon;
5244 u16 dcid, mtu, mps, credits, result;
5245 struct l2cap_chan *chan;
5246 int err, sec_level;
5247
5248 if (cmd_len < sizeof(*rsp))
5249 return -EPROTO;
5250
5251 dcid = __le16_to_cpu(rsp->dcid);
5252 mtu = __le16_to_cpu(rsp->mtu);
5253 mps = __le16_to_cpu(rsp->mps);
5254 credits = __le16_to_cpu(rsp->credits);
5255 result = __le16_to_cpu(rsp->result);
5256
5257 if (result == L2CAP_CR_SUCCESS && (mtu < 23 || mps < 23))
5258 return -EPROTO;
5259
5260 BT_DBG("dcid 0x%4.4x mtu %u mps %u credits %u result 0x%2.2x",
5261 dcid, mtu, mps, credits, result);
5262
5263 mutex_lock(&conn->chan_lock);
5264
5265 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5266 if (!chan) {
5267 err = -EBADSLT;
5268 goto unlock;
5269 }
5270
5271 err = 0;
5272
5273 l2cap_chan_lock(chan);
5274
5275 switch (result) {
5276 case L2CAP_CR_SUCCESS:
5277 chan->ident = 0;
5278 chan->dcid = dcid;
5279 chan->omtu = mtu;
5280 chan->remote_mps = mps;
5281 chan->tx_credits = credits;
5282 l2cap_chan_ready(chan);
5283 break;
5284
5285 case L2CAP_CR_AUTHENTICATION:
5286 case L2CAP_CR_ENCRYPTION:
5287 /* If we already have MITM protection we can't do
5288 * anything.
5289 */
5290 if (hcon->sec_level > BT_SECURITY_MEDIUM) {
5291 l2cap_chan_del(chan, ECONNREFUSED);
5292 break;
5293 }
5294
5295 sec_level = hcon->sec_level + 1;
5296 if (chan->sec_level < sec_level)
5297 chan->sec_level = sec_level;
5298
5299 /* We'll need to send a new Connect Request */
5300 clear_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags);
5301
5302 smp_conn_security(hcon, chan->sec_level);
5303 break;
5304
5305 default:
5306 l2cap_chan_del(chan, ECONNREFUSED);
5307 break;
5308 }
5309
5310 l2cap_chan_unlock(chan);
5311
5312unlock:
5313 mutex_unlock(&conn->chan_lock);
5314
5315 return err;
5316}
5317
5318static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
5319 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5320 u8 *data)
5321{
5322 int err = 0;
5323
5324 switch (cmd->code) {
5325 case L2CAP_COMMAND_REJ:
5326 l2cap_command_rej(conn, cmd, cmd_len, data);
5327 break;
5328
5329 case L2CAP_CONN_REQ:
5330 err = l2cap_connect_req(conn, cmd, cmd_len, data);
5331 break;
5332
5333 case L2CAP_CONN_RSP:
5334 case L2CAP_CREATE_CHAN_RSP:
5335 l2cap_connect_create_rsp(conn, cmd, cmd_len, data);
5336 break;
5337
5338 case L2CAP_CONF_REQ:
5339 err = l2cap_config_req(conn, cmd, cmd_len, data);
5340 break;
5341
5342 case L2CAP_CONF_RSP:
5343 l2cap_config_rsp(conn, cmd, cmd_len, data);
5344 break;
5345
5346 case L2CAP_DISCONN_REQ:
5347 err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5348 break;
5349
5350 case L2CAP_DISCONN_RSP:
5351 l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5352 break;
5353
5354 case L2CAP_ECHO_REQ:
5355 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
5356 break;
5357
5358 case L2CAP_ECHO_RSP:
5359 break;
5360
5361 case L2CAP_INFO_REQ:
5362 err = l2cap_information_req(conn, cmd, cmd_len, data);
5363 break;
5364
5365 case L2CAP_INFO_RSP:
5366 l2cap_information_rsp(conn, cmd, cmd_len, data);
5367 break;
5368
5369 case L2CAP_CREATE_CHAN_REQ:
5370 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
5371 break;
5372
5373 case L2CAP_MOVE_CHAN_REQ:
5374 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
5375 break;
5376
5377 case L2CAP_MOVE_CHAN_RSP:
5378 l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
5379 break;
5380
5381 case L2CAP_MOVE_CHAN_CFM:
5382 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
5383 break;
5384
5385 case L2CAP_MOVE_CHAN_CFM_RSP:
5386 l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
5387 break;
5388
5389 default:
5390 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
5391 err = -EINVAL;
5392 break;
5393 }
5394
5395 return err;
5396}
5397
5398static int l2cap_le_connect_req(struct l2cap_conn *conn,
5399 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5400 u8 *data)
5401{
5402 struct l2cap_le_conn_req *req = (struct l2cap_le_conn_req *) data;
5403 struct l2cap_le_conn_rsp rsp;
5404 struct l2cap_chan *chan, *pchan;
5405 u16 dcid, scid, credits, mtu, mps;
5406 __le16 psm;
5407 u8 result;
5408
5409 if (cmd_len != sizeof(*req))
5410 return -EPROTO;
5411
5412 scid = __le16_to_cpu(req->scid);
5413 mtu = __le16_to_cpu(req->mtu);
5414 mps = __le16_to_cpu(req->mps);
5415 psm = req->psm;
5416 dcid = 0;
5417 credits = 0;
5418
5419 if (mtu < 23 || mps < 23)
5420 return -EPROTO;
5421
5422 BT_DBG("psm 0x%2.2x scid 0x%4.4x mtu %u mps %u", __le16_to_cpu(psm),
5423 scid, mtu, mps);
5424
5425 /* Check if we have socket listening on psm */
5426 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
5427 &conn->hcon->dst, LE_LINK);
5428 if (!pchan) {
5429 result = L2CAP_CR_BAD_PSM;
5430 chan = NULL;
5431 goto response;
5432 }
5433
5434 mutex_lock(&conn->chan_lock);
5435 l2cap_chan_lock(pchan);
5436
5437 if (!smp_sufficient_security(conn->hcon, pchan->sec_level,
5438 SMP_ALLOW_STK)) {
5439 result = L2CAP_CR_AUTHENTICATION;
5440 chan = NULL;
5441 goto response_unlock;
5442 }
5443
5444 /* Check if we already have channel with that dcid */
5445 if (__l2cap_get_chan_by_dcid(conn, scid)) {
5446 result = L2CAP_CR_NO_MEM;
5447 chan = NULL;
5448 goto response_unlock;
5449 }
5450
5451 chan = pchan->ops->new_connection(pchan);
5452 if (!chan) {
5453 result = L2CAP_CR_NO_MEM;
5454 goto response_unlock;
5455 }
5456
5457 l2cap_le_flowctl_init(chan);
5458
5459 bacpy(&chan->src, &conn->hcon->src);
5460 bacpy(&chan->dst, &conn->hcon->dst);
5461 chan->src_type = bdaddr_src_type(conn->hcon);
5462 chan->dst_type = bdaddr_dst_type(conn->hcon);
5463 chan->psm = psm;
5464 chan->dcid = scid;
5465 chan->omtu = mtu;
5466 chan->remote_mps = mps;
5467 chan->tx_credits = __le16_to_cpu(req->credits);
5468
5469 __l2cap_chan_add(conn, chan);
5470 dcid = chan->scid;
5471 credits = chan->rx_credits;
5472
5473 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
5474
5475 chan->ident = cmd->ident;
5476
5477 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
5478 l2cap_state_change(chan, BT_CONNECT2);
5479 /* The following result value is actually not defined
5480 * for LE CoC but we use it to let the function know
5481 * that it should bail out after doing its cleanup
5482 * instead of sending a response.
5483 */
5484 result = L2CAP_CR_PEND;
5485 chan->ops->defer(chan);
5486 } else {
5487 l2cap_chan_ready(chan);
5488 result = L2CAP_CR_SUCCESS;
5489 }
5490
5491response_unlock:
5492 l2cap_chan_unlock(pchan);
5493 mutex_unlock(&conn->chan_lock);
5494 l2cap_chan_put(pchan);
5495
5496 if (result == L2CAP_CR_PEND)
5497 return 0;
5498
5499response:
5500 if (chan) {
5501 rsp.mtu = cpu_to_le16(chan->imtu);
5502 rsp.mps = cpu_to_le16(chan->mps);
5503 } else {
5504 rsp.mtu = 0;
5505 rsp.mps = 0;
5506 }
5507
5508 rsp.dcid = cpu_to_le16(dcid);
5509 rsp.credits = cpu_to_le16(credits);
5510 rsp.result = cpu_to_le16(result);
5511
5512 l2cap_send_cmd(conn, cmd->ident, L2CAP_LE_CONN_RSP, sizeof(rsp), &rsp);
5513
5514 return 0;
5515}
5516
5517static inline int l2cap_le_credits(struct l2cap_conn *conn,
5518 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5519 u8 *data)
5520{
5521 struct l2cap_le_credits *pkt;
5522 struct l2cap_chan *chan;
5523 u16 cid, credits, max_credits;
5524
5525 if (cmd_len != sizeof(*pkt))
5526 return -EPROTO;
5527
5528 pkt = (struct l2cap_le_credits *) data;
5529 cid = __le16_to_cpu(pkt->cid);
5530 credits = __le16_to_cpu(pkt->credits);
5531
5532 BT_DBG("cid 0x%4.4x credits 0x%4.4x", cid, credits);
5533
5534 chan = l2cap_get_chan_by_dcid(conn, cid);
5535 if (!chan)
5536 return -EBADSLT;
5537
5538 max_credits = LE_FLOWCTL_MAX_CREDITS - chan->tx_credits;
5539 if (credits > max_credits) {
5540 BT_ERR("LE credits overflow");
5541 l2cap_send_disconn_req(chan, ECONNRESET);
5542 l2cap_chan_unlock(chan);
5543
5544 /* Return 0 so that we don't trigger an unnecessary
5545 * command reject packet.
5546 */
5547 return 0;
5548 }
5549
5550 chan->tx_credits += credits;
5551
5552 while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
5553 l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
5554 chan->tx_credits--;
5555 }
5556
5557 if (chan->tx_credits)
5558 chan->ops->resume(chan);
5559
5560 l2cap_chan_unlock(chan);
5561
5562 return 0;
5563}
5564
5565static inline int l2cap_le_command_rej(struct l2cap_conn *conn,
5566 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5567 u8 *data)
5568{
5569 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
5570 struct l2cap_chan *chan;
5571
5572 if (cmd_len < sizeof(*rej))
5573 return -EPROTO;
5574
5575 mutex_lock(&conn->chan_lock);
5576
5577 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5578 if (!chan)
5579 goto done;
5580
5581 l2cap_chan_lock(chan);
5582 l2cap_chan_del(chan, ECONNREFUSED);
5583 l2cap_chan_unlock(chan);
5584
5585done:
5586 mutex_unlock(&conn->chan_lock);
5587 return 0;
5588}
5589
5590static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
5591 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5592 u8 *data)
5593{
5594 int err = 0;
5595
5596 switch (cmd->code) {
5597 case L2CAP_COMMAND_REJ:
5598 l2cap_le_command_rej(conn, cmd, cmd_len, data);
5599 break;
5600
5601 case L2CAP_CONN_PARAM_UPDATE_REQ:
5602 err = l2cap_conn_param_update_req(conn, cmd, cmd_len, data);
5603 break;
5604
5605 case L2CAP_CONN_PARAM_UPDATE_RSP:
5606 break;
5607
5608 case L2CAP_LE_CONN_RSP:
5609 l2cap_le_connect_rsp(conn, cmd, cmd_len, data);
5610 break;
5611
5612 case L2CAP_LE_CONN_REQ:
5613 err = l2cap_le_connect_req(conn, cmd, cmd_len, data);
5614 break;
5615
5616 case L2CAP_LE_CREDITS:
5617 err = l2cap_le_credits(conn, cmd, cmd_len, data);
5618 break;
5619
5620 case L2CAP_DISCONN_REQ:
5621 err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5622 break;
5623
5624 case L2CAP_DISCONN_RSP:
5625 l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5626 break;
5627
5628 default:
5629 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
5630 err = -EINVAL;
5631 break;
5632 }
5633
5634 return err;
5635}
5636
5637static inline void l2cap_le_sig_channel(struct l2cap_conn *conn,
5638 struct sk_buff *skb)
5639{
5640 struct hci_conn *hcon = conn->hcon;
5641 struct l2cap_cmd_hdr *cmd;
5642 u16 len;
5643 int err;
5644
5645 if (hcon->type != LE_LINK)
5646 goto drop;
5647
5648 if (skb->len < L2CAP_CMD_HDR_SIZE)
5649 goto drop;
5650
5651 cmd = (void *) skb->data;
5652 skb_pull(skb, L2CAP_CMD_HDR_SIZE);
5653
5654 len = le16_to_cpu(cmd->len);
5655
5656 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len, cmd->ident);
5657
5658 if (len != skb->len || !cmd->ident) {
5659 BT_DBG("corrupted command");
5660 goto drop;
5661 }
5662
5663 err = l2cap_le_sig_cmd(conn, cmd, len, skb->data);
5664 if (err) {
5665 struct l2cap_cmd_rej_unk rej;
5666
5667 BT_ERR("Wrong link type (%d)", err);
5668
5669 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5670 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
5671 sizeof(rej), &rej);
5672 }
5673
5674drop:
5675 kfree_skb(skb);
5676}
5677
5678static inline void l2cap_sig_channel(struct l2cap_conn *conn,
5679 struct sk_buff *skb)
5680{
5681 struct hci_conn *hcon = conn->hcon;
5682 u8 *data = skb->data;
5683 int len = skb->len;
5684 struct l2cap_cmd_hdr cmd;
5685 int err;
5686
5687 l2cap_raw_recv(conn, skb);
5688
5689 if (hcon->type != ACL_LINK)
5690 goto drop;
5691
5692 while (len >= L2CAP_CMD_HDR_SIZE) {
5693 u16 cmd_len;
5694 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
5695 data += L2CAP_CMD_HDR_SIZE;
5696 len -= L2CAP_CMD_HDR_SIZE;
5697
5698 cmd_len = le16_to_cpu(cmd.len);
5699
5700 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len,
5701 cmd.ident);
5702
5703 if (cmd_len > len || !cmd.ident) {
5704 BT_DBG("corrupted command");
5705 break;
5706 }
5707
5708 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
5709 if (err) {
5710 struct l2cap_cmd_rej_unk rej;
5711
5712 BT_ERR("Wrong link type (%d)", err);
5713
5714 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5715 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ,
5716 sizeof(rej), &rej);
5717 }
5718
5719 data += cmd_len;
5720 len -= cmd_len;
5721 }
5722
5723drop:
5724 kfree_skb(skb);
5725}
5726
5727static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
5728{
5729 u16 our_fcs, rcv_fcs;
5730 int hdr_size;
5731
5732 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
5733 hdr_size = L2CAP_EXT_HDR_SIZE;
5734 else
5735 hdr_size = L2CAP_ENH_HDR_SIZE;
5736
5737 if (chan->fcs == L2CAP_FCS_CRC16) {
5738 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
5739 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
5740 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
5741
5742 if (our_fcs != rcv_fcs)
5743 return -EBADMSG;
5744 }
5745 return 0;
5746}
5747
5748static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
5749{
5750 struct l2cap_ctrl control;
5751
5752 BT_DBG("chan %p", chan);
5753
5754 memset(&control, 0, sizeof(control));
5755 control.sframe = 1;
5756 control.final = 1;
5757 control.reqseq = chan->buffer_seq;
5758 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5759
5760 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5761 control.super = L2CAP_SUPER_RNR;
5762 l2cap_send_sframe(chan, &control);
5763 }
5764
5765 if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
5766 chan->unacked_frames > 0)
5767 __set_retrans_timer(chan);
5768
5769 /* Send pending iframes */
5770 l2cap_ertm_send(chan);
5771
5772 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
5773 test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
5774 /* F-bit wasn't sent in an s-frame or i-frame yet, so
5775 * send it now.
5776 */
5777 control.super = L2CAP_SUPER_RR;
5778 l2cap_send_sframe(chan, &control);
5779 }
5780}
5781
5782static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag,
5783 struct sk_buff **last_frag)
5784{
5785 /* skb->len reflects data in skb as well as all fragments
5786 * skb->data_len reflects only data in fragments
5787 */
5788 if (!skb_has_frag_list(skb))
5789 skb_shinfo(skb)->frag_list = new_frag;
5790
5791 new_frag->next = NULL;
5792
5793 (*last_frag)->next = new_frag;
5794 *last_frag = new_frag;
5795
5796 skb->len += new_frag->len;
5797 skb->data_len += new_frag->len;
5798 skb->truesize += new_frag->truesize;
5799}
5800
5801static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
5802 struct l2cap_ctrl *control)
5803{
5804 int err = -EINVAL;
5805
5806 switch (control->sar) {
5807 case L2CAP_SAR_UNSEGMENTED:
5808 if (chan->sdu)
5809 break;
5810
5811 err = chan->ops->recv(chan, skb);
5812 break;
5813
5814 case L2CAP_SAR_START:
5815 if (chan->sdu)
5816 break;
5817
5818 chan->sdu_len = get_unaligned_le16(skb->data);
5819 skb_pull(skb, L2CAP_SDULEN_SIZE);
5820
5821 if (chan->sdu_len > chan->imtu) {
5822 err = -EMSGSIZE;
5823 break;
5824 }
5825
5826 if (skb->len >= chan->sdu_len)
5827 break;
5828
5829 chan->sdu = skb;
5830 chan->sdu_last_frag = skb;
5831
5832 skb = NULL;
5833 err = 0;
5834 break;
5835
5836 case L2CAP_SAR_CONTINUE:
5837 if (!chan->sdu)
5838 break;
5839
5840 append_skb_frag(chan->sdu, skb,
5841 &chan->sdu_last_frag);
5842 skb = NULL;
5843
5844 if (chan->sdu->len >= chan->sdu_len)
5845 break;
5846
5847 err = 0;
5848 break;
5849
5850 case L2CAP_SAR_END:
5851 if (!chan->sdu)
5852 break;
5853
5854 append_skb_frag(chan->sdu, skb,
5855 &chan->sdu_last_frag);
5856 skb = NULL;
5857
5858 if (chan->sdu->len != chan->sdu_len)
5859 break;
5860
5861 err = chan->ops->recv(chan, chan->sdu);
5862
5863 if (!err) {
5864 /* Reassembly complete */
5865 chan->sdu = NULL;
5866 chan->sdu_last_frag = NULL;
5867 chan->sdu_len = 0;
5868 }
5869 break;
5870 }
5871
5872 if (err) {
5873 kfree_skb(skb);
5874 kfree_skb(chan->sdu);
5875 chan->sdu = NULL;
5876 chan->sdu_last_frag = NULL;
5877 chan->sdu_len = 0;
5878 }
5879
5880 return err;
5881}
5882
5883static int l2cap_resegment(struct l2cap_chan *chan)
5884{
5885 /* Placeholder */
5886 return 0;
5887}
5888
5889void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
5890{
5891 u8 event;
5892
5893 if (chan->mode != L2CAP_MODE_ERTM)
5894 return;
5895
5896 event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
5897 l2cap_tx(chan, NULL, NULL, event);
5898}
5899
5900static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
5901{
5902 int err = 0;
5903 /* Pass sequential frames to l2cap_reassemble_sdu()
5904 * until a gap is encountered.
5905 */
5906
5907 BT_DBG("chan %p", chan);
5908
5909 while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5910 struct sk_buff *skb;
5911 BT_DBG("Searching for skb with txseq %d (queue len %d)",
5912 chan->buffer_seq, skb_queue_len(&chan->srej_q));
5913
5914 skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
5915
5916 if (!skb)
5917 break;
5918
5919 skb_unlink(skb, &chan->srej_q);
5920 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
5921 err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->l2cap);
5922 if (err)
5923 break;
5924 }
5925
5926 if (skb_queue_empty(&chan->srej_q)) {
5927 chan->rx_state = L2CAP_RX_STATE_RECV;
5928 l2cap_send_ack(chan);
5929 }
5930
5931 return err;
5932}
5933
5934static void l2cap_handle_srej(struct l2cap_chan *chan,
5935 struct l2cap_ctrl *control)
5936{
5937 struct sk_buff *skb;
5938
5939 BT_DBG("chan %p, control %p", chan, control);
5940
5941 if (control->reqseq == chan->next_tx_seq) {
5942 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
5943 l2cap_send_disconn_req(chan, ECONNRESET);
5944 return;
5945 }
5946
5947 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
5948
5949 if (skb == NULL) {
5950 BT_DBG("Seq %d not available for retransmission",
5951 control->reqseq);
5952 return;
5953 }
5954
5955 if (chan->max_tx != 0 && bt_cb(skb)->l2cap.retries >= chan->max_tx) {
5956 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
5957 l2cap_send_disconn_req(chan, ECONNRESET);
5958 return;
5959 }
5960
5961 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5962
5963 if (control->poll) {
5964 l2cap_pass_to_tx(chan, control);
5965
5966 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5967 l2cap_retransmit(chan, control);
5968 l2cap_ertm_send(chan);
5969
5970 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5971 set_bit(CONN_SREJ_ACT, &chan->conn_state);
5972 chan->srej_save_reqseq = control->reqseq;
5973 }
5974 } else {
5975 l2cap_pass_to_tx_fbit(chan, control);
5976
5977 if (control->final) {
5978 if (chan->srej_save_reqseq != control->reqseq ||
5979 !test_and_clear_bit(CONN_SREJ_ACT,
5980 &chan->conn_state))
5981 l2cap_retransmit(chan, control);
5982 } else {
5983 l2cap_retransmit(chan, control);
5984 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5985 set_bit(CONN_SREJ_ACT, &chan->conn_state);
5986 chan->srej_save_reqseq = control->reqseq;
5987 }
5988 }
5989 }
5990}
5991
5992static void l2cap_handle_rej(struct l2cap_chan *chan,
5993 struct l2cap_ctrl *control)
5994{
5995 struct sk_buff *skb;
5996
5997 BT_DBG("chan %p, control %p", chan, control);
5998
5999 if (control->reqseq == chan->next_tx_seq) {
6000 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
6001 l2cap_send_disconn_req(chan, ECONNRESET);
6002 return;
6003 }
6004
6005 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
6006
6007 if (chan->max_tx && skb &&
6008 bt_cb(skb)->l2cap.retries >= chan->max_tx) {
6009 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
6010 l2cap_send_disconn_req(chan, ECONNRESET);
6011 return;
6012 }
6013
6014 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6015
6016 l2cap_pass_to_tx(chan, control);
6017
6018 if (control->final) {
6019 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
6020 l2cap_retransmit_all(chan, control);
6021 } else {
6022 l2cap_retransmit_all(chan, control);
6023 l2cap_ertm_send(chan);
6024 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
6025 set_bit(CONN_REJ_ACT, &chan->conn_state);
6026 }
6027}
6028
6029static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
6030{
6031 BT_DBG("chan %p, txseq %d", chan, txseq);
6032
6033 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
6034 chan->expected_tx_seq);
6035
6036 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
6037 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6038 chan->tx_win) {
6039 /* See notes below regarding "double poll" and
6040 * invalid packets.
6041 */
6042 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6043 BT_DBG("Invalid/Ignore - after SREJ");
6044 return L2CAP_TXSEQ_INVALID_IGNORE;
6045 } else {
6046 BT_DBG("Invalid - in window after SREJ sent");
6047 return L2CAP_TXSEQ_INVALID;
6048 }
6049 }
6050
6051 if (chan->srej_list.head == txseq) {
6052 BT_DBG("Expected SREJ");
6053 return L2CAP_TXSEQ_EXPECTED_SREJ;
6054 }
6055
6056 if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
6057 BT_DBG("Duplicate SREJ - txseq already stored");
6058 return L2CAP_TXSEQ_DUPLICATE_SREJ;
6059 }
6060
6061 if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
6062 BT_DBG("Unexpected SREJ - not requested");
6063 return L2CAP_TXSEQ_UNEXPECTED_SREJ;
6064 }
6065 }
6066
6067 if (chan->expected_tx_seq == txseq) {
6068 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6069 chan->tx_win) {
6070 BT_DBG("Invalid - txseq outside tx window");
6071 return L2CAP_TXSEQ_INVALID;
6072 } else {
6073 BT_DBG("Expected");
6074 return L2CAP_TXSEQ_EXPECTED;
6075 }
6076 }
6077
6078 if (__seq_offset(chan, txseq, chan->last_acked_seq) <
6079 __seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) {
6080 BT_DBG("Duplicate - expected_tx_seq later than txseq");
6081 return L2CAP_TXSEQ_DUPLICATE;
6082 }
6083
6084 if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
6085 /* A source of invalid packets is a "double poll" condition,
6086 * where delays cause us to send multiple poll packets. If
6087 * the remote stack receives and processes both polls,
6088 * sequence numbers can wrap around in such a way that a
6089 * resent frame has a sequence number that looks like new data
6090 * with a sequence gap. This would trigger an erroneous SREJ
6091 * request.
6092 *
6093 * Fortunately, this is impossible with a tx window that's
6094 * less than half of the maximum sequence number, which allows
6095 * invalid frames to be safely ignored.
6096 *
6097 * With tx window sizes greater than half of the tx window
6098 * maximum, the frame is invalid and cannot be ignored. This
6099 * causes a disconnect.
6100 */
6101
6102 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6103 BT_DBG("Invalid/Ignore - txseq outside tx window");
6104 return L2CAP_TXSEQ_INVALID_IGNORE;
6105 } else {
6106 BT_DBG("Invalid - txseq outside tx window");
6107 return L2CAP_TXSEQ_INVALID;
6108 }
6109 } else {
6110 BT_DBG("Unexpected - txseq indicates missing frames");
6111 return L2CAP_TXSEQ_UNEXPECTED;
6112 }
6113}
6114
6115static int l2cap_rx_state_recv(struct l2cap_chan *chan,
6116 struct l2cap_ctrl *control,
6117 struct sk_buff *skb, u8 event)
6118{
6119 int err = 0;
6120 bool skb_in_use = false;
6121
6122 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6123 event);
6124
6125 switch (event) {
6126 case L2CAP_EV_RECV_IFRAME:
6127 switch (l2cap_classify_txseq(chan, control->txseq)) {
6128 case L2CAP_TXSEQ_EXPECTED:
6129 l2cap_pass_to_tx(chan, control);
6130
6131 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6132 BT_DBG("Busy, discarding expected seq %d",
6133 control->txseq);
6134 break;
6135 }
6136
6137 chan->expected_tx_seq = __next_seq(chan,
6138 control->txseq);
6139
6140 chan->buffer_seq = chan->expected_tx_seq;
6141 skb_in_use = true;
6142
6143 err = l2cap_reassemble_sdu(chan, skb, control);
6144 if (err)
6145 break;
6146
6147 if (control->final) {
6148 if (!test_and_clear_bit(CONN_REJ_ACT,
6149 &chan->conn_state)) {
6150 control->final = 0;
6151 l2cap_retransmit_all(chan, control);
6152 l2cap_ertm_send(chan);
6153 }
6154 }
6155
6156 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
6157 l2cap_send_ack(chan);
6158 break;
6159 case L2CAP_TXSEQ_UNEXPECTED:
6160 l2cap_pass_to_tx(chan, control);
6161
6162 /* Can't issue SREJ frames in the local busy state.
6163 * Drop this frame, it will be seen as missing
6164 * when local busy is exited.
6165 */
6166 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6167 BT_DBG("Busy, discarding unexpected seq %d",
6168 control->txseq);
6169 break;
6170 }
6171
6172 /* There was a gap in the sequence, so an SREJ
6173 * must be sent for each missing frame. The
6174 * current frame is stored for later use.
6175 */
6176 skb_queue_tail(&chan->srej_q, skb);
6177 skb_in_use = true;
6178 BT_DBG("Queued %p (queue len %d)", skb,
6179 skb_queue_len(&chan->srej_q));
6180
6181 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
6182 l2cap_seq_list_clear(&chan->srej_list);
6183 l2cap_send_srej(chan, control->txseq);
6184
6185 chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
6186 break;
6187 case L2CAP_TXSEQ_DUPLICATE:
6188 l2cap_pass_to_tx(chan, control);
6189 break;
6190 case L2CAP_TXSEQ_INVALID_IGNORE:
6191 break;
6192 case L2CAP_TXSEQ_INVALID:
6193 default:
6194 l2cap_send_disconn_req(chan, ECONNRESET);
6195 break;
6196 }
6197 break;
6198 case L2CAP_EV_RECV_RR:
6199 l2cap_pass_to_tx(chan, control);
6200 if (control->final) {
6201 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6202
6203 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state) &&
6204 !__chan_is_moving(chan)) {
6205 control->final = 0;
6206 l2cap_retransmit_all(chan, control);
6207 }
6208
6209 l2cap_ertm_send(chan);
6210 } else if (control->poll) {
6211 l2cap_send_i_or_rr_or_rnr(chan);
6212 } else {
6213 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6214 &chan->conn_state) &&
6215 chan->unacked_frames)
6216 __set_retrans_timer(chan);
6217
6218 l2cap_ertm_send(chan);
6219 }
6220 break;
6221 case L2CAP_EV_RECV_RNR:
6222 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6223 l2cap_pass_to_tx(chan, control);
6224 if (control && control->poll) {
6225 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6226 l2cap_send_rr_or_rnr(chan, 0);
6227 }
6228 __clear_retrans_timer(chan);
6229 l2cap_seq_list_clear(&chan->retrans_list);
6230 break;
6231 case L2CAP_EV_RECV_REJ:
6232 l2cap_handle_rej(chan, control);
6233 break;
6234 case L2CAP_EV_RECV_SREJ:
6235 l2cap_handle_srej(chan, control);
6236 break;
6237 default:
6238 break;
6239 }
6240
6241 if (skb && !skb_in_use) {
6242 BT_DBG("Freeing %p", skb);
6243 kfree_skb(skb);
6244 }
6245
6246 return err;
6247}
6248
6249static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
6250 struct l2cap_ctrl *control,
6251 struct sk_buff *skb, u8 event)
6252{
6253 int err = 0;
6254 u16 txseq = control->txseq;
6255 bool skb_in_use = false;
6256
6257 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6258 event);
6259
6260 switch (event) {
6261 case L2CAP_EV_RECV_IFRAME:
6262 switch (l2cap_classify_txseq(chan, txseq)) {
6263 case L2CAP_TXSEQ_EXPECTED:
6264 /* Keep frame for reassembly later */
6265 l2cap_pass_to_tx(chan, control);
6266 skb_queue_tail(&chan->srej_q, skb);
6267 skb_in_use = true;
6268 BT_DBG("Queued %p (queue len %d)", skb,
6269 skb_queue_len(&chan->srej_q));
6270
6271 chan->expected_tx_seq = __next_seq(chan, txseq);
6272 break;
6273 case L2CAP_TXSEQ_EXPECTED_SREJ:
6274 l2cap_seq_list_pop(&chan->srej_list);
6275
6276 l2cap_pass_to_tx(chan, control);
6277 skb_queue_tail(&chan->srej_q, skb);
6278 skb_in_use = true;
6279 BT_DBG("Queued %p (queue len %d)", skb,
6280 skb_queue_len(&chan->srej_q));
6281
6282 err = l2cap_rx_queued_iframes(chan);
6283 if (err)
6284 break;
6285
6286 break;
6287 case L2CAP_TXSEQ_UNEXPECTED:
6288 /* Got a frame that can't be reassembled yet.
6289 * Save it for later, and send SREJs to cover
6290 * the missing frames.
6291 */
6292 skb_queue_tail(&chan->srej_q, skb);
6293 skb_in_use = true;
6294 BT_DBG("Queued %p (queue len %d)", skb,
6295 skb_queue_len(&chan->srej_q));
6296
6297 l2cap_pass_to_tx(chan, control);
6298 l2cap_send_srej(chan, control->txseq);
6299 break;
6300 case L2CAP_TXSEQ_UNEXPECTED_SREJ:
6301 /* This frame was requested with an SREJ, but
6302 * some expected retransmitted frames are
6303 * missing. Request retransmission of missing
6304 * SREJ'd frames.
6305 */
6306 skb_queue_tail(&chan->srej_q, skb);
6307 skb_in_use = true;
6308 BT_DBG("Queued %p (queue len %d)", skb,
6309 skb_queue_len(&chan->srej_q));
6310
6311 l2cap_pass_to_tx(chan, control);
6312 l2cap_send_srej_list(chan, control->txseq);
6313 break;
6314 case L2CAP_TXSEQ_DUPLICATE_SREJ:
6315 /* We've already queued this frame. Drop this copy. */
6316 l2cap_pass_to_tx(chan, control);
6317 break;
6318 case L2CAP_TXSEQ_DUPLICATE:
6319 /* Expecting a later sequence number, so this frame
6320 * was already received. Ignore it completely.
6321 */
6322 break;
6323 case L2CAP_TXSEQ_INVALID_IGNORE:
6324 break;
6325 case L2CAP_TXSEQ_INVALID:
6326 default:
6327 l2cap_send_disconn_req(chan, ECONNRESET);
6328 break;
6329 }
6330 break;
6331 case L2CAP_EV_RECV_RR:
6332 l2cap_pass_to_tx(chan, control);
6333 if (control->final) {
6334 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6335
6336 if (!test_and_clear_bit(CONN_REJ_ACT,
6337 &chan->conn_state)) {
6338 control->final = 0;
6339 l2cap_retransmit_all(chan, control);
6340 }
6341
6342 l2cap_ertm_send(chan);
6343 } else if (control->poll) {
6344 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6345 &chan->conn_state) &&
6346 chan->unacked_frames) {
6347 __set_retrans_timer(chan);
6348 }
6349
6350 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6351 l2cap_send_srej_tail(chan);
6352 } else {
6353 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6354 &chan->conn_state) &&
6355 chan->unacked_frames)
6356 __set_retrans_timer(chan);
6357
6358 l2cap_send_ack(chan);
6359 }
6360 break;
6361 case L2CAP_EV_RECV_RNR:
6362 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6363 l2cap_pass_to_tx(chan, control);
6364 if (control->poll) {
6365 l2cap_send_srej_tail(chan);
6366 } else {
6367 struct l2cap_ctrl rr_control;
6368 memset(&rr_control, 0, sizeof(rr_control));
6369 rr_control.sframe = 1;
6370 rr_control.super = L2CAP_SUPER_RR;
6371 rr_control.reqseq = chan->buffer_seq;
6372 l2cap_send_sframe(chan, &rr_control);
6373 }
6374
6375 break;
6376 case L2CAP_EV_RECV_REJ:
6377 l2cap_handle_rej(chan, control);
6378 break;
6379 case L2CAP_EV_RECV_SREJ:
6380 l2cap_handle_srej(chan, control);
6381 break;
6382 }
6383
6384 if (skb && !skb_in_use) {
6385 BT_DBG("Freeing %p", skb);
6386 kfree_skb(skb);
6387 }
6388
6389 return err;
6390}
6391
6392static int l2cap_finish_move(struct l2cap_chan *chan)
6393{
6394 BT_DBG("chan %p", chan);
6395
6396 chan->rx_state = L2CAP_RX_STATE_RECV;
6397
6398 if (chan->hs_hcon)
6399 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6400 else
6401 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6402
6403 return l2cap_resegment(chan);
6404}
6405
6406static int l2cap_rx_state_wait_p(struct l2cap_chan *chan,
6407 struct l2cap_ctrl *control,
6408 struct sk_buff *skb, u8 event)
6409{
6410 int err;
6411
6412 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6413 event);
6414
6415 if (!control->poll)
6416 return -EPROTO;
6417
6418 l2cap_process_reqseq(chan, control->reqseq);
6419
6420 if (!skb_queue_empty(&chan->tx_q))
6421 chan->tx_send_head = skb_peek(&chan->tx_q);
6422 else
6423 chan->tx_send_head = NULL;
6424
6425 /* Rewind next_tx_seq to the point expected
6426 * by the receiver.
6427 */
6428 chan->next_tx_seq = control->reqseq;
6429 chan->unacked_frames = 0;
6430
6431 err = l2cap_finish_move(chan);
6432 if (err)
6433 return err;
6434
6435 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6436 l2cap_send_i_or_rr_or_rnr(chan);
6437
6438 if (event == L2CAP_EV_RECV_IFRAME)
6439 return -EPROTO;
6440
6441 return l2cap_rx_state_recv(chan, control, NULL, event);
6442}
6443
6444static int l2cap_rx_state_wait_f(struct l2cap_chan *chan,
6445 struct l2cap_ctrl *control,
6446 struct sk_buff *skb, u8 event)
6447{
6448 int err;
6449
6450 if (!control->final)
6451 return -EPROTO;
6452
6453 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6454
6455 chan->rx_state = L2CAP_RX_STATE_RECV;
6456 l2cap_process_reqseq(chan, control->reqseq);
6457
6458 if (!skb_queue_empty(&chan->tx_q))
6459 chan->tx_send_head = skb_peek(&chan->tx_q);
6460 else
6461 chan->tx_send_head = NULL;
6462
6463 /* Rewind next_tx_seq to the point expected
6464 * by the receiver.
6465 */
6466 chan->next_tx_seq = control->reqseq;
6467 chan->unacked_frames = 0;
6468
6469 if (chan->hs_hcon)
6470 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6471 else
6472 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6473
6474 err = l2cap_resegment(chan);
6475
6476 if (!err)
6477 err = l2cap_rx_state_recv(chan, control, skb, event);
6478
6479 return err;
6480}
6481
6482static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
6483{
6484 /* Make sure reqseq is for a packet that has been sent but not acked */
6485 u16 unacked;
6486
6487 unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
6488 return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
6489}
6490
6491static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6492 struct sk_buff *skb, u8 event)
6493{
6494 int err = 0;
6495
6496 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
6497 control, skb, event, chan->rx_state);
6498
6499 if (__valid_reqseq(chan, control->reqseq)) {
6500 switch (chan->rx_state) {
6501 case L2CAP_RX_STATE_RECV:
6502 err = l2cap_rx_state_recv(chan, control, skb, event);
6503 break;
6504 case L2CAP_RX_STATE_SREJ_SENT:
6505 err = l2cap_rx_state_srej_sent(chan, control, skb,
6506 event);
6507 break;
6508 case L2CAP_RX_STATE_WAIT_P:
6509 err = l2cap_rx_state_wait_p(chan, control, skb, event);
6510 break;
6511 case L2CAP_RX_STATE_WAIT_F:
6512 err = l2cap_rx_state_wait_f(chan, control, skb, event);
6513 break;
6514 default:
6515 /* shut it down */
6516 break;
6517 }
6518 } else {
6519 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
6520 control->reqseq, chan->next_tx_seq,
6521 chan->expected_ack_seq);
6522 l2cap_send_disconn_req(chan, ECONNRESET);
6523 }
6524
6525 return err;
6526}
6527
6528static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6529 struct sk_buff *skb)
6530{
6531 int err = 0;
6532
6533 BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
6534 chan->rx_state);
6535
6536 if (l2cap_classify_txseq(chan, control->txseq) ==
6537 L2CAP_TXSEQ_EXPECTED) {
6538 l2cap_pass_to_tx(chan, control);
6539
6540 BT_DBG("buffer_seq %d->%d", chan->buffer_seq,
6541 __next_seq(chan, chan->buffer_seq));
6542
6543 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6544
6545 l2cap_reassemble_sdu(chan, skb, control);
6546 } else {
6547 if (chan->sdu) {
6548 kfree_skb(chan->sdu);
6549 chan->sdu = NULL;
6550 }
6551 chan->sdu_last_frag = NULL;
6552 chan->sdu_len = 0;
6553
6554 if (skb) {
6555 BT_DBG("Freeing %p", skb);
6556 kfree_skb(skb);
6557 }
6558 }
6559
6560 chan->last_acked_seq = control->txseq;
6561 chan->expected_tx_seq = __next_seq(chan, control->txseq);
6562
6563 return err;
6564}
6565
6566static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6567{
6568 struct l2cap_ctrl *control = &bt_cb(skb)->l2cap;
6569 u16 len;
6570 u8 event;
6571
6572 __unpack_control(chan, skb);
6573
6574 len = skb->len;
6575
6576 /*
6577 * We can just drop the corrupted I-frame here.
6578 * Receiver will miss it and start proper recovery
6579 * procedures and ask for retransmission.
6580 */
6581 if (l2cap_check_fcs(chan, skb))
6582 goto drop;
6583
6584 if (!control->sframe && control->sar == L2CAP_SAR_START)
6585 len -= L2CAP_SDULEN_SIZE;
6586
6587 if (chan->fcs == L2CAP_FCS_CRC16)
6588 len -= L2CAP_FCS_SIZE;
6589
6590 if (len > chan->mps) {
6591 l2cap_send_disconn_req(chan, ECONNRESET);
6592 goto drop;
6593 }
6594
6595 if (!control->sframe) {
6596 int err;
6597
6598 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
6599 control->sar, control->reqseq, control->final,
6600 control->txseq);
6601
6602 /* Validate F-bit - F=0 always valid, F=1 only
6603 * valid in TX WAIT_F
6604 */
6605 if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
6606 goto drop;
6607
6608 if (chan->mode != L2CAP_MODE_STREAMING) {
6609 event = L2CAP_EV_RECV_IFRAME;
6610 err = l2cap_rx(chan, control, skb, event);
6611 } else {
6612 err = l2cap_stream_rx(chan, control, skb);
6613 }
6614
6615 if (err)
6616 l2cap_send_disconn_req(chan, ECONNRESET);
6617 } else {
6618 const u8 rx_func_to_event[4] = {
6619 L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
6620 L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
6621 };
6622
6623 /* Only I-frames are expected in streaming mode */
6624 if (chan->mode == L2CAP_MODE_STREAMING)
6625 goto drop;
6626
6627 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
6628 control->reqseq, control->final, control->poll,
6629 control->super);
6630
6631 if (len != 0) {
6632 BT_ERR("Trailing bytes: %d in sframe", len);
6633 l2cap_send_disconn_req(chan, ECONNRESET);
6634 goto drop;
6635 }
6636
6637 /* Validate F and P bits */
6638 if (control->final && (control->poll ||
6639 chan->tx_state != L2CAP_TX_STATE_WAIT_F))
6640 goto drop;
6641
6642 event = rx_func_to_event[control->super];
6643 if (l2cap_rx(chan, control, skb, event))
6644 l2cap_send_disconn_req(chan, ECONNRESET);
6645 }
6646
6647 return 0;
6648
6649drop:
6650 kfree_skb(skb);
6651 return 0;
6652}
6653
6654static void l2cap_chan_le_send_credits(struct l2cap_chan *chan)
6655{
6656 struct l2cap_conn *conn = chan->conn;
6657 struct l2cap_le_credits pkt;
6658 u16 return_credits;
6659
6660 /* We return more credits to the sender only after the amount of
6661 * credits falls below half of the initial amount.
6662 */
6663 if (chan->rx_credits >= (le_max_credits + 1) / 2)
6664 return;
6665
6666 return_credits = le_max_credits - chan->rx_credits;
6667
6668 BT_DBG("chan %p returning %u credits to sender", chan, return_credits);
6669
6670 chan->rx_credits += return_credits;
6671
6672 pkt.cid = cpu_to_le16(chan->scid);
6673 pkt.credits = cpu_to_le16(return_credits);
6674
6675 chan->ident = l2cap_get_ident(conn);
6676
6677 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CREDITS, sizeof(pkt), &pkt);
6678}
6679
6680static int l2cap_le_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6681{
6682 int err;
6683
6684 if (!chan->rx_credits) {
6685 BT_ERR("No credits to receive LE L2CAP data");
6686 l2cap_send_disconn_req(chan, ECONNRESET);
6687 return -ENOBUFS;
6688 }
6689
6690 if (chan->imtu < skb->len) {
6691 BT_ERR("Too big LE L2CAP PDU");
6692 return -ENOBUFS;
6693 }
6694
6695 chan->rx_credits--;
6696 BT_DBG("rx_credits %u -> %u", chan->rx_credits + 1, chan->rx_credits);
6697
6698 l2cap_chan_le_send_credits(chan);
6699
6700 err = 0;
6701
6702 if (!chan->sdu) {
6703 u16 sdu_len;
6704
6705 sdu_len = get_unaligned_le16(skb->data);
6706 skb_pull(skb, L2CAP_SDULEN_SIZE);
6707
6708 BT_DBG("Start of new SDU. sdu_len %u skb->len %u imtu %u",
6709 sdu_len, skb->len, chan->imtu);
6710
6711 if (sdu_len > chan->imtu) {
6712 BT_ERR("Too big LE L2CAP SDU length received");
6713 err = -EMSGSIZE;
6714 goto failed;
6715 }
6716
6717 if (skb->len > sdu_len) {
6718 BT_ERR("Too much LE L2CAP data received");
6719 err = -EINVAL;
6720 goto failed;
6721 }
6722
6723 if (skb->len == sdu_len)
6724 return chan->ops->recv(chan, skb);
6725
6726 chan->sdu = skb;
6727 chan->sdu_len = sdu_len;
6728 chan->sdu_last_frag = skb;
6729
6730 return 0;
6731 }
6732
6733 BT_DBG("SDU fragment. chan->sdu->len %u skb->len %u chan->sdu_len %u",
6734 chan->sdu->len, skb->len, chan->sdu_len);
6735
6736 if (chan->sdu->len + skb->len > chan->sdu_len) {
6737 BT_ERR("Too much LE L2CAP data received");
6738 err = -EINVAL;
6739 goto failed;
6740 }
6741
6742 append_skb_frag(chan->sdu, skb, &chan->sdu_last_frag);
6743 skb = NULL;
6744
6745 if (chan->sdu->len == chan->sdu_len) {
6746 err = chan->ops->recv(chan, chan->sdu);
6747 if (!err) {
6748 chan->sdu = NULL;
6749 chan->sdu_last_frag = NULL;
6750 chan->sdu_len = 0;
6751 }
6752 }
6753
6754failed:
6755 if (err) {
6756 kfree_skb(skb);
6757 kfree_skb(chan->sdu);
6758 chan->sdu = NULL;
6759 chan->sdu_last_frag = NULL;
6760 chan->sdu_len = 0;
6761 }
6762
6763 /* We can't return an error here since we took care of the skb
6764 * freeing internally. An error return would cause the caller to
6765 * do a double-free of the skb.
6766 */
6767 return 0;
6768}
6769
6770static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
6771 struct sk_buff *skb)
6772{
6773 struct l2cap_chan *chan;
6774
6775 chan = l2cap_get_chan_by_scid(conn, cid);
6776 if (!chan) {
6777 if (cid == L2CAP_CID_A2MP) {
6778 chan = a2mp_channel_create(conn, skb);
6779 if (!chan) {
6780 kfree_skb(skb);
6781 return;
6782 }
6783
6784 l2cap_chan_lock(chan);
6785 } else {
6786 BT_DBG("unknown cid 0x%4.4x", cid);
6787 /* Drop packet and return */
6788 kfree_skb(skb);
6789 return;
6790 }
6791 }
6792
6793 BT_DBG("chan %p, len %d", chan, skb->len);
6794
6795 /* If we receive data on a fixed channel before the info req/rsp
6796 * procdure is done simply assume that the channel is supported
6797 * and mark it as ready.
6798 */
6799 if (chan->chan_type == L2CAP_CHAN_FIXED)
6800 l2cap_chan_ready(chan);
6801
6802 if (chan->state != BT_CONNECTED)
6803 goto drop;
6804
6805 switch (chan->mode) {
6806 case L2CAP_MODE_LE_FLOWCTL:
6807 if (l2cap_le_data_rcv(chan, skb) < 0)
6808 goto drop;
6809
6810 goto done;
6811
6812 case L2CAP_MODE_BASIC:
6813 /* If socket recv buffers overflows we drop data here
6814 * which is *bad* because L2CAP has to be reliable.
6815 * But we don't have any other choice. L2CAP doesn't
6816 * provide flow control mechanism. */
6817
6818 if (chan->imtu < skb->len) {
6819 BT_ERR("Dropping L2CAP data: receive buffer overflow");
6820 goto drop;
6821 }
6822
6823 if (!chan->ops->recv(chan, skb))
6824 goto done;
6825 break;
6826
6827 case L2CAP_MODE_ERTM:
6828 case L2CAP_MODE_STREAMING:
6829 l2cap_data_rcv(chan, skb);
6830 goto done;
6831
6832 default:
6833 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
6834 break;
6835 }
6836
6837drop:
6838 kfree_skb(skb);
6839
6840done:
6841 l2cap_chan_unlock(chan);
6842}
6843
6844static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
6845 struct sk_buff *skb)
6846{
6847 struct hci_conn *hcon = conn->hcon;
6848 struct l2cap_chan *chan;
6849
6850 if (hcon->type != ACL_LINK)
6851 goto free_skb;
6852
6853 chan = l2cap_global_chan_by_psm(0, psm, &hcon->src, &hcon->dst,
6854 ACL_LINK);
6855 if (!chan)
6856 goto free_skb;
6857
6858 BT_DBG("chan %p, len %d", chan, skb->len);
6859
6860 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
6861 goto drop;
6862
6863 if (chan->imtu < skb->len)
6864 goto drop;
6865
6866 /* Store remote BD_ADDR and PSM for msg_name */
6867 bacpy(&bt_cb(skb)->l2cap.bdaddr, &hcon->dst);
6868 bt_cb(skb)->l2cap.psm = psm;
6869
6870 if (!chan->ops->recv(chan, skb)) {
6871 l2cap_chan_put(chan);
6872 return;
6873 }
6874
6875drop:
6876 l2cap_chan_put(chan);
6877free_skb:
6878 kfree_skb(skb);
6879}
6880
6881static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
6882{
6883 struct l2cap_hdr *lh = (void *) skb->data;
6884 struct hci_conn *hcon = conn->hcon;
6885 u16 cid, len;
6886 __le16 psm;
6887
6888 if (hcon->state != BT_CONNECTED) {
6889 BT_DBG("queueing pending rx skb");
6890 skb_queue_tail(&conn->pending_rx, skb);
6891 return;
6892 }
6893
6894 skb_pull(skb, L2CAP_HDR_SIZE);
6895 cid = __le16_to_cpu(lh->cid);
6896 len = __le16_to_cpu(lh->len);
6897
6898 if (len != skb->len) {
6899 kfree_skb(skb);
6900 return;
6901 }
6902
6903 /* Since we can't actively block incoming LE connections we must
6904 * at least ensure that we ignore incoming data from them.
6905 */
6906 if (hcon->type == LE_LINK &&
6907 hci_bdaddr_list_lookup(&hcon->hdev->blacklist, &hcon->dst,
6908 bdaddr_dst_type(hcon))) {
6909 kfree_skb(skb);
6910 return;
6911 }
6912
6913 BT_DBG("len %d, cid 0x%4.4x", len, cid);
6914
6915 switch (cid) {
6916 case L2CAP_CID_SIGNALING:
6917 l2cap_sig_channel(conn, skb);
6918 break;
6919
6920 case L2CAP_CID_CONN_LESS:
6921 psm = get_unaligned((__le16 *) skb->data);
6922 skb_pull(skb, L2CAP_PSMLEN_SIZE);
6923 l2cap_conless_channel(conn, psm, skb);
6924 break;
6925
6926 case L2CAP_CID_LE_SIGNALING:
6927 l2cap_le_sig_channel(conn, skb);
6928 break;
6929
6930 default:
6931 l2cap_data_channel(conn, cid, skb);
6932 break;
6933 }
6934}
6935
6936static void process_pending_rx(struct work_struct *work)
6937{
6938 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
6939 pending_rx_work);
6940 struct sk_buff *skb;
6941
6942 BT_DBG("");
6943
6944 while ((skb = skb_dequeue(&conn->pending_rx)))
6945 l2cap_recv_frame(conn, skb);
6946}
6947
6948static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
6949{
6950 struct l2cap_conn *conn = hcon->l2cap_data;
6951 struct hci_chan *hchan;
6952
6953 if (conn)
6954 return conn;
6955
6956 hchan = hci_chan_create(hcon);
6957 if (!hchan)
6958 return NULL;
6959
6960 conn = kzalloc(sizeof(*conn), GFP_KERNEL);
6961 if (!conn) {
6962 hci_chan_del(hchan);
6963 return NULL;
6964 }
6965
6966 kref_init(&conn->ref);
6967 hcon->l2cap_data = conn;
6968 conn->hcon = hci_conn_get(hcon);
6969 conn->hchan = hchan;
6970
6971 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
6972
6973 switch (hcon->type) {
6974 case LE_LINK:
6975 if (hcon->hdev->le_mtu) {
6976 conn->mtu = hcon->hdev->le_mtu;
6977 break;
6978 }
6979 /* fall through */
6980 default:
6981 conn->mtu = hcon->hdev->acl_mtu;
6982 break;
6983 }
6984
6985 conn->feat_mask = 0;
6986
6987 conn->local_fixed_chan = L2CAP_FC_SIG_BREDR | L2CAP_FC_CONNLESS;
6988
6989 if (hcon->type == ACL_LINK &&
6990 hci_dev_test_flag(hcon->hdev, HCI_HS_ENABLED))
6991 conn->local_fixed_chan |= L2CAP_FC_A2MP;
6992
6993 if (hci_dev_test_flag(hcon->hdev, HCI_LE_ENABLED) &&
6994 (bredr_sc_enabled(hcon->hdev) ||
6995 hci_dev_test_flag(hcon->hdev, HCI_FORCE_BREDR_SMP)))
6996 conn->local_fixed_chan |= L2CAP_FC_SMP_BREDR;
6997
6998 mutex_init(&conn->ident_lock);
6999 mutex_init(&conn->chan_lock);
7000
7001 INIT_LIST_HEAD(&conn->chan_l);
7002 INIT_LIST_HEAD(&conn->users);
7003
7004 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
7005
7006 skb_queue_head_init(&conn->pending_rx);
7007 INIT_WORK(&conn->pending_rx_work, process_pending_rx);
7008 INIT_WORK(&conn->id_addr_update_work, l2cap_conn_update_id_addr);
7009
7010 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
7011
7012 return conn;
7013}
7014
7015static bool is_valid_psm(u16 psm, u8 dst_type) {
7016 if (!psm)
7017 return false;
7018
7019 if (bdaddr_type_is_le(dst_type))
7020 return (psm <= 0x00ff);
7021
7022 /* PSM must be odd and lsb of upper byte must be 0 */
7023 return ((psm & 0x0101) == 0x0001);
7024}
7025
7026int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
7027 bdaddr_t *dst, u8 dst_type)
7028{
7029 struct l2cap_conn *conn;
7030 struct hci_conn *hcon;
7031 struct hci_dev *hdev;
7032 int err;
7033
7034 BT_DBG("%pMR -> %pMR (type %u) psm 0x%2.2x", &chan->src, dst,
7035 dst_type, __le16_to_cpu(psm));
7036
7037 hdev = hci_get_route(dst, &chan->src);
7038 if (!hdev)
7039 return -EHOSTUNREACH;
7040
7041 hci_dev_lock(hdev);
7042
7043 if (!is_valid_psm(__le16_to_cpu(psm), dst_type) && !cid &&
7044 chan->chan_type != L2CAP_CHAN_RAW) {
7045 err = -EINVAL;
7046 goto done;
7047 }
7048
7049 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !psm) {
7050 err = -EINVAL;
7051 goto done;
7052 }
7053
7054 if (chan->chan_type == L2CAP_CHAN_FIXED && !cid) {
7055 err = -EINVAL;
7056 goto done;
7057 }
7058
7059 switch (chan->mode) {
7060 case L2CAP_MODE_BASIC:
7061 break;
7062 case L2CAP_MODE_LE_FLOWCTL:
7063 l2cap_le_flowctl_init(chan);
7064 break;
7065 case L2CAP_MODE_ERTM:
7066 case L2CAP_MODE_STREAMING:
7067 if (!disable_ertm)
7068 break;
7069 /* fall through */
7070 default:
7071 err = -EOPNOTSUPP;
7072 goto done;
7073 }
7074
7075 switch (chan->state) {
7076 case BT_CONNECT:
7077 case BT_CONNECT2:
7078 case BT_CONFIG:
7079 /* Already connecting */
7080 err = 0;
7081 goto done;
7082
7083 case BT_CONNECTED:
7084 /* Already connected */
7085 err = -EISCONN;
7086 goto done;
7087
7088 case BT_OPEN:
7089 case BT_BOUND:
7090 /* Can connect */
7091 break;
7092
7093 default:
7094 err = -EBADFD;
7095 goto done;
7096 }
7097
7098 /* Set destination address and psm */
7099 bacpy(&chan->dst, dst);
7100 chan->dst_type = dst_type;
7101
7102 chan->psm = psm;
7103 chan->dcid = cid;
7104
7105 if (bdaddr_type_is_le(dst_type)) {
7106 u8 role;
7107
7108 /* Convert from L2CAP channel address type to HCI address type
7109 */
7110 if (dst_type == BDADDR_LE_PUBLIC)
7111 dst_type = ADDR_LE_DEV_PUBLIC;
7112 else
7113 dst_type = ADDR_LE_DEV_RANDOM;
7114
7115 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
7116 role = HCI_ROLE_SLAVE;
7117 else
7118 role = HCI_ROLE_MASTER;
7119
7120 hcon = hci_connect_le(hdev, dst, dst_type, chan->sec_level,
7121 HCI_LE_CONN_TIMEOUT, role);
7122 } else {
7123 u8 auth_type = l2cap_get_auth_type(chan);
7124 hcon = hci_connect_acl(hdev, dst, chan->sec_level, auth_type);
7125 }
7126
7127 if (IS_ERR(hcon)) {
7128 err = PTR_ERR(hcon);
7129 goto done;
7130 }
7131
7132 conn = l2cap_conn_add(hcon);
7133 if (!conn) {
7134 hci_conn_drop(hcon);
7135 err = -ENOMEM;
7136 goto done;
7137 }
7138
7139 mutex_lock(&conn->chan_lock);
7140 l2cap_chan_lock(chan);
7141
7142 if (cid && __l2cap_get_chan_by_dcid(conn, cid)) {
7143 hci_conn_drop(hcon);
7144 err = -EBUSY;
7145 goto chan_unlock;
7146 }
7147
7148 /* Update source addr of the socket */
7149 bacpy(&chan->src, &hcon->src);
7150 chan->src_type = bdaddr_src_type(hcon);
7151
7152 __l2cap_chan_add(conn, chan);
7153
7154 /* l2cap_chan_add takes its own ref so we can drop this one */
7155 hci_conn_drop(hcon);
7156
7157 l2cap_state_change(chan, BT_CONNECT);
7158 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
7159
7160 /* Release chan->sport so that it can be reused by other
7161 * sockets (as it's only used for listening sockets).
7162 */
7163 write_lock(&chan_list_lock);
7164 chan->sport = 0;
7165 write_unlock(&chan_list_lock);
7166
7167 if (hcon->state == BT_CONNECTED) {
7168 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
7169 __clear_chan_timer(chan);
7170 if (l2cap_chan_check_security(chan, true))
7171 l2cap_state_change(chan, BT_CONNECTED);
7172 } else
7173 l2cap_do_start(chan);
7174 }
7175
7176 err = 0;
7177
7178chan_unlock:
7179 l2cap_chan_unlock(chan);
7180 mutex_unlock(&conn->chan_lock);
7181done:
7182 hci_dev_unlock(hdev);
7183 hci_dev_put(hdev);
7184 return err;
7185}
7186EXPORT_SYMBOL_GPL(l2cap_chan_connect);
7187
7188/* ---- L2CAP interface with lower layer (HCI) ---- */
7189
7190int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
7191{
7192 int exact = 0, lm1 = 0, lm2 = 0;
7193 struct l2cap_chan *c;
7194
7195 BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
7196
7197 /* Find listening sockets and check their link_mode */
7198 read_lock(&chan_list_lock);
7199 list_for_each_entry(c, &chan_list, global_l) {
7200 if (c->state != BT_LISTEN)
7201 continue;
7202
7203 if (!bacmp(&c->src, &hdev->bdaddr)) {
7204 lm1 |= HCI_LM_ACCEPT;
7205 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
7206 lm1 |= HCI_LM_MASTER;
7207 exact++;
7208 } else if (!bacmp(&c->src, BDADDR_ANY)) {
7209 lm2 |= HCI_LM_ACCEPT;
7210 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
7211 lm2 |= HCI_LM_MASTER;
7212 }
7213 }
7214 read_unlock(&chan_list_lock);
7215
7216 return exact ? lm1 : lm2;
7217}
7218
7219/* Find the next fixed channel in BT_LISTEN state, continue iteration
7220 * from an existing channel in the list or from the beginning of the
7221 * global list (by passing NULL as first parameter).
7222 */
7223static struct l2cap_chan *l2cap_global_fixed_chan(struct l2cap_chan *c,
7224 struct hci_conn *hcon)
7225{
7226 u8 src_type = bdaddr_src_type(hcon);
7227
7228 read_lock(&chan_list_lock);
7229
7230 if (c)
7231 c = list_next_entry(c, global_l);
7232 else
7233 c = list_entry(chan_list.next, typeof(*c), global_l);
7234
7235 list_for_each_entry_from(c, &chan_list, global_l) {
7236 if (c->chan_type != L2CAP_CHAN_FIXED)
7237 continue;
7238 if (c->state != BT_LISTEN)
7239 continue;
7240 if (bacmp(&c->src, &hcon->src) && bacmp(&c->src, BDADDR_ANY))
7241 continue;
7242 if (src_type != c->src_type)
7243 continue;
7244
7245 l2cap_chan_hold(c);
7246 read_unlock(&chan_list_lock);
7247 return c;
7248 }
7249
7250 read_unlock(&chan_list_lock);
7251
7252 return NULL;
7253}
7254
7255static void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
7256{
7257 struct hci_dev *hdev = hcon->hdev;
7258 struct l2cap_conn *conn;
7259 struct l2cap_chan *pchan;
7260 u8 dst_type;
7261
7262 if (hcon->type != ACL_LINK && hcon->type != LE_LINK)
7263 return;
7264
7265 BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
7266
7267 if (status) {
7268 l2cap_conn_del(hcon, bt_to_errno(status));
7269 return;
7270 }
7271
7272 conn = l2cap_conn_add(hcon);
7273 if (!conn)
7274 return;
7275
7276 dst_type = bdaddr_dst_type(hcon);
7277
7278 /* If device is blocked, do not create channels for it */
7279 if (hci_bdaddr_list_lookup(&hdev->blacklist, &hcon->dst, dst_type))
7280 return;
7281
7282 /* Find fixed channels and notify them of the new connection. We
7283 * use multiple individual lookups, continuing each time where
7284 * we left off, because the list lock would prevent calling the
7285 * potentially sleeping l2cap_chan_lock() function.
7286 */
7287 pchan = l2cap_global_fixed_chan(NULL, hcon);
7288 while (pchan) {
7289 struct l2cap_chan *chan, *next;
7290
7291 /* Client fixed channels should override server ones */
7292 if (__l2cap_get_chan_by_dcid(conn, pchan->scid))
7293 goto next;
7294
7295 l2cap_chan_lock(pchan);
7296 chan = pchan->ops->new_connection(pchan);
7297 if (chan) {
7298 bacpy(&chan->src, &hcon->src);
7299 bacpy(&chan->dst, &hcon->dst);
7300 chan->src_type = bdaddr_src_type(hcon);
7301 chan->dst_type = dst_type;
7302
7303 __l2cap_chan_add(conn, chan);
7304 }
7305
7306 l2cap_chan_unlock(pchan);
7307next:
7308 next = l2cap_global_fixed_chan(pchan, hcon);
7309 l2cap_chan_put(pchan);
7310 pchan = next;
7311 }
7312
7313 l2cap_conn_ready(conn);
7314}
7315
7316int l2cap_disconn_ind(struct hci_conn *hcon)
7317{
7318 struct l2cap_conn *conn = hcon->l2cap_data;
7319
7320 BT_DBG("hcon %p", hcon);
7321
7322 if (!conn)
7323 return HCI_ERROR_REMOTE_USER_TERM;
7324 return conn->disc_reason;
7325}
7326
7327static void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
7328{
7329 if (hcon->type != ACL_LINK && hcon->type != LE_LINK)
7330 return;
7331
7332 BT_DBG("hcon %p reason %d", hcon, reason);
7333
7334 l2cap_conn_del(hcon, bt_to_errno(reason));
7335}
7336
7337static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
7338{
7339 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
7340 return;
7341
7342 if (encrypt == 0x00) {
7343 if (chan->sec_level == BT_SECURITY_MEDIUM) {
7344 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
7345 } else if (chan->sec_level == BT_SECURITY_HIGH ||
7346 chan->sec_level == BT_SECURITY_FIPS)
7347 l2cap_chan_close(chan, ECONNREFUSED);
7348 } else {
7349 if (chan->sec_level == BT_SECURITY_MEDIUM)
7350 __clear_chan_timer(chan);
7351 }
7352}
7353
7354static void l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
7355{
7356 struct l2cap_conn *conn = hcon->l2cap_data;
7357 struct l2cap_chan *chan;
7358
7359 if (!conn)
7360 return;
7361
7362 BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
7363
7364 mutex_lock(&conn->chan_lock);
7365
7366 list_for_each_entry(chan, &conn->chan_l, list) {
7367 l2cap_chan_lock(chan);
7368
7369 BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
7370 state_to_string(chan->state));
7371
7372 if (chan->scid == L2CAP_CID_A2MP) {
7373 l2cap_chan_unlock(chan);
7374 continue;
7375 }
7376
7377 if (!status && encrypt)
7378 chan->sec_level = hcon->sec_level;
7379
7380 if (!__l2cap_no_conn_pending(chan)) {
7381 l2cap_chan_unlock(chan);
7382 continue;
7383 }
7384
7385 if (!status && (chan->state == BT_CONNECTED ||
7386 chan->state == BT_CONFIG)) {
7387 chan->ops->resume(chan);
7388 l2cap_check_encryption(chan, encrypt);
7389 l2cap_chan_unlock(chan);
7390 continue;
7391 }
7392
7393 if (chan->state == BT_CONNECT) {
7394 if (!status)
7395 l2cap_start_connection(chan);
7396 else
7397 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
7398 } else if (chan->state == BT_CONNECT2 &&
7399 chan->mode != L2CAP_MODE_LE_FLOWCTL) {
7400 struct l2cap_conn_rsp rsp;
7401 __u16 res, stat;
7402
7403 if (!status) {
7404 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
7405 res = L2CAP_CR_PEND;
7406 stat = L2CAP_CS_AUTHOR_PEND;
7407 chan->ops->defer(chan);
7408 } else {
7409 l2cap_state_change(chan, BT_CONFIG);
7410 res = L2CAP_CR_SUCCESS;
7411 stat = L2CAP_CS_NO_INFO;
7412 }
7413 } else {
7414 l2cap_state_change(chan, BT_DISCONN);
7415 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
7416 res = L2CAP_CR_SEC_BLOCK;
7417 stat = L2CAP_CS_NO_INFO;
7418 }
7419
7420 rsp.scid = cpu_to_le16(chan->dcid);
7421 rsp.dcid = cpu_to_le16(chan->scid);
7422 rsp.result = cpu_to_le16(res);
7423 rsp.status = cpu_to_le16(stat);
7424 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
7425 sizeof(rsp), &rsp);
7426
7427 if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
7428 res == L2CAP_CR_SUCCESS) {
7429 char buf[128];
7430 set_bit(CONF_REQ_SENT, &chan->conf_state);
7431 l2cap_send_cmd(conn, l2cap_get_ident(conn),
7432 L2CAP_CONF_REQ,
7433 l2cap_build_conf_req(chan, buf),
7434 buf);
7435 chan->num_conf_req++;
7436 }
7437 }
7438
7439 l2cap_chan_unlock(chan);
7440 }
7441
7442 mutex_unlock(&conn->chan_lock);
7443}
7444
7445void l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
7446{
7447 struct l2cap_conn *conn = hcon->l2cap_data;
7448 struct l2cap_hdr *hdr;
7449 int len;
7450
7451 /* For AMP controller do not create l2cap conn */
7452 if (!conn && hcon->hdev->dev_type != HCI_BREDR)
7453 goto drop;
7454
7455 if (!conn)
7456 conn = l2cap_conn_add(hcon);
7457
7458 if (!conn)
7459 goto drop;
7460
7461 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
7462
7463 switch (flags) {
7464 case ACL_START:
7465 case ACL_START_NO_FLUSH:
7466 case ACL_COMPLETE:
7467 if (conn->rx_len) {
7468 BT_ERR("Unexpected start frame (len %d)", skb->len);
7469 kfree_skb(conn->rx_skb);
7470 conn->rx_skb = NULL;
7471 conn->rx_len = 0;
7472 l2cap_conn_unreliable(conn, ECOMM);
7473 }
7474
7475 /* Start fragment always begin with Basic L2CAP header */
7476 if (skb->len < L2CAP_HDR_SIZE) {
7477 BT_ERR("Frame is too short (len %d)", skb->len);
7478 l2cap_conn_unreliable(conn, ECOMM);
7479 goto drop;
7480 }
7481
7482 hdr = (struct l2cap_hdr *) skb->data;
7483 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
7484
7485 if (len == skb->len) {
7486 /* Complete frame received */
7487 l2cap_recv_frame(conn, skb);
7488 return;
7489 }
7490
7491 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
7492
7493 if (skb->len > len) {
7494 BT_ERR("Frame is too long (len %d, expected len %d)",
7495 skb->len, len);
7496 l2cap_conn_unreliable(conn, ECOMM);
7497 goto drop;
7498 }
7499
7500 /* Allocate skb for the complete frame (with header) */
7501 conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL);
7502 if (!conn->rx_skb)
7503 goto drop;
7504
7505 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
7506 skb->len);
7507 conn->rx_len = len - skb->len;
7508 break;
7509
7510 case ACL_CONT:
7511 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
7512
7513 if (!conn->rx_len) {
7514 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
7515 l2cap_conn_unreliable(conn, ECOMM);
7516 goto drop;
7517 }
7518
7519 if (skb->len > conn->rx_len) {
7520 BT_ERR("Fragment is too long (len %d, expected %d)",
7521 skb->len, conn->rx_len);
7522 kfree_skb(conn->rx_skb);
7523 conn->rx_skb = NULL;
7524 conn->rx_len = 0;
7525 l2cap_conn_unreliable(conn, ECOMM);
7526 goto drop;
7527 }
7528
7529 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
7530 skb->len);
7531 conn->rx_len -= skb->len;
7532
7533 if (!conn->rx_len) {
7534 /* Complete frame received. l2cap_recv_frame
7535 * takes ownership of the skb so set the global
7536 * rx_skb pointer to NULL first.
7537 */
7538 struct sk_buff *rx_skb = conn->rx_skb;
7539 conn->rx_skb = NULL;
7540 l2cap_recv_frame(conn, rx_skb);
7541 }
7542 break;
7543 }
7544
7545drop:
7546 kfree_skb(skb);
7547}
7548
7549static struct hci_cb l2cap_cb = {
7550 .name = "L2CAP",
7551 .connect_cfm = l2cap_connect_cfm,
7552 .disconn_cfm = l2cap_disconn_cfm,
7553 .security_cfm = l2cap_security_cfm,
7554};
7555
7556static int l2cap_debugfs_show(struct seq_file *f, void *p)
7557{
7558 struct l2cap_chan *c;
7559
7560 read_lock(&chan_list_lock);
7561
7562 list_for_each_entry(c, &chan_list, global_l) {
7563 seq_printf(f, "%pMR (%u) %pMR (%u) %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
7564 &c->src, c->src_type, &c->dst, c->dst_type,
7565 c->state, __le16_to_cpu(c->psm),
7566 c->scid, c->dcid, c->imtu, c->omtu,
7567 c->sec_level, c->mode);
7568 }
7569
7570 read_unlock(&chan_list_lock);
7571
7572 return 0;
7573}
7574
7575static int l2cap_debugfs_open(struct inode *inode, struct file *file)
7576{
7577 return single_open(file, l2cap_debugfs_show, inode->i_private);
7578}
7579
7580static const struct file_operations l2cap_debugfs_fops = {
7581 .open = l2cap_debugfs_open,
7582 .read = seq_read,
7583 .llseek = seq_lseek,
7584 .release = single_release,
7585};
7586
7587static struct dentry *l2cap_debugfs;
7588
7589int __init l2cap_init(void)
7590{
7591 int err;
7592
7593 err = l2cap_init_sockets();
7594 if (err < 0)
7595 return err;
7596
7597 hci_register_cb(&l2cap_cb);
7598
7599 if (IS_ERR_OR_NULL(bt_debugfs))
7600 return 0;
7601
7602 l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
7603 NULL, &l2cap_debugfs_fops);
7604
7605 debugfs_create_u16("l2cap_le_max_credits", 0644, bt_debugfs,
7606 &le_max_credits);
7607 debugfs_create_u16("l2cap_le_default_mps", 0644, bt_debugfs,
7608 &le_default_mps);
7609
7610 return 0;
7611}
7612
7613void l2cap_exit(void)
7614{
7615 debugfs_remove(l2cap_debugfs);
7616 hci_unregister_cb(&l2cap_cb);
7617 l2cap_cleanup_sockets();
7618}
7619
7620module_param(disable_ertm, bool, 0644);
7621MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
This page took 0.106885 seconds and 5 git commands to generate.