Bluetooth: Add key preference parameter to smp_sufficient_security
[deliverable/linux.git] / net / bluetooth / l2cap_core.c
... / ...
CommitLineData
1/*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
8
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
10
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
14
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
27*/
28
29/* Bluetooth L2CAP core. */
30
31#include <linux/module.h>
32
33#include <linux/debugfs.h>
34#include <linux/crc16.h>
35
36#include <net/bluetooth/bluetooth.h>
37#include <net/bluetooth/hci_core.h>
38#include <net/bluetooth/l2cap.h>
39
40#include "smp.h"
41#include "a2mp.h"
42#include "amp.h"
43
44#define LE_FLOWCTL_MAX_CREDITS 65535
45
46bool disable_ertm;
47
48static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN | L2CAP_FEAT_UCD;
49static u8 l2cap_fixed_chan[8] = { L2CAP_FC_SIG_BREDR | L2CAP_FC_CONNLESS, };
50
51static LIST_HEAD(chan_list);
52static DEFINE_RWLOCK(chan_list_lock);
53
54static u16 le_max_credits = L2CAP_LE_MAX_CREDITS;
55static u16 le_default_mps = L2CAP_LE_DEFAULT_MPS;
56
57static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
58 u8 code, u8 ident, u16 dlen, void *data);
59static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
60 void *data);
61static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
62static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
63
64static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
65 struct sk_buff_head *skbs, u8 event);
66
67static inline __u8 bdaddr_type(struct hci_conn *hcon, __u8 type)
68{
69 if (hcon->type == LE_LINK) {
70 if (type == ADDR_LE_DEV_PUBLIC)
71 return BDADDR_LE_PUBLIC;
72 else
73 return BDADDR_LE_RANDOM;
74 }
75
76 return BDADDR_BREDR;
77}
78
79/* ---- L2CAP channels ---- */
80
81static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
82 u16 cid)
83{
84 struct l2cap_chan *c;
85
86 list_for_each_entry(c, &conn->chan_l, list) {
87 if (c->dcid == cid)
88 return c;
89 }
90 return NULL;
91}
92
93static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
94 u16 cid)
95{
96 struct l2cap_chan *c;
97
98 list_for_each_entry(c, &conn->chan_l, list) {
99 if (c->scid == cid)
100 return c;
101 }
102 return NULL;
103}
104
105/* Find channel with given SCID.
106 * Returns locked channel. */
107static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
108 u16 cid)
109{
110 struct l2cap_chan *c;
111
112 mutex_lock(&conn->chan_lock);
113 c = __l2cap_get_chan_by_scid(conn, cid);
114 if (c)
115 l2cap_chan_lock(c);
116 mutex_unlock(&conn->chan_lock);
117
118 return c;
119}
120
121/* Find channel with given DCID.
122 * Returns locked channel.
123 */
124static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
125 u16 cid)
126{
127 struct l2cap_chan *c;
128
129 mutex_lock(&conn->chan_lock);
130 c = __l2cap_get_chan_by_dcid(conn, cid);
131 if (c)
132 l2cap_chan_lock(c);
133 mutex_unlock(&conn->chan_lock);
134
135 return c;
136}
137
138static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn,
139 u8 ident)
140{
141 struct l2cap_chan *c;
142
143 list_for_each_entry(c, &conn->chan_l, list) {
144 if (c->ident == ident)
145 return c;
146 }
147 return NULL;
148}
149
150static struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn,
151 u8 ident)
152{
153 struct l2cap_chan *c;
154
155 mutex_lock(&conn->chan_lock);
156 c = __l2cap_get_chan_by_ident(conn, ident);
157 if (c)
158 l2cap_chan_lock(c);
159 mutex_unlock(&conn->chan_lock);
160
161 return c;
162}
163
164static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
165{
166 struct l2cap_chan *c;
167
168 list_for_each_entry(c, &chan_list, global_l) {
169 if (c->sport == psm && !bacmp(&c->src, src))
170 return c;
171 }
172 return NULL;
173}
174
175int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
176{
177 int err;
178
179 write_lock(&chan_list_lock);
180
181 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
182 err = -EADDRINUSE;
183 goto done;
184 }
185
186 if (psm) {
187 chan->psm = psm;
188 chan->sport = psm;
189 err = 0;
190 } else {
191 u16 p;
192
193 err = -EINVAL;
194 for (p = 0x1001; p < 0x1100; p += 2)
195 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
196 chan->psm = cpu_to_le16(p);
197 chan->sport = cpu_to_le16(p);
198 err = 0;
199 break;
200 }
201 }
202
203done:
204 write_unlock(&chan_list_lock);
205 return err;
206}
207EXPORT_SYMBOL_GPL(l2cap_add_psm);
208
209int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
210{
211 write_lock(&chan_list_lock);
212
213 /* Override the defaults (which are for conn-oriented) */
214 chan->omtu = L2CAP_DEFAULT_MTU;
215 chan->chan_type = L2CAP_CHAN_FIXED;
216
217 chan->scid = scid;
218
219 write_unlock(&chan_list_lock);
220
221 return 0;
222}
223
224static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
225{
226 u16 cid, dyn_end;
227
228 if (conn->hcon->type == LE_LINK)
229 dyn_end = L2CAP_CID_LE_DYN_END;
230 else
231 dyn_end = L2CAP_CID_DYN_END;
232
233 for (cid = L2CAP_CID_DYN_START; cid < dyn_end; cid++) {
234 if (!__l2cap_get_chan_by_scid(conn, cid))
235 return cid;
236 }
237
238 return 0;
239}
240
241static void l2cap_state_change(struct l2cap_chan *chan, int state)
242{
243 BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
244 state_to_string(state));
245
246 chan->state = state;
247 chan->ops->state_change(chan, state, 0);
248}
249
250static inline void l2cap_state_change_and_error(struct l2cap_chan *chan,
251 int state, int err)
252{
253 chan->state = state;
254 chan->ops->state_change(chan, chan->state, err);
255}
256
257static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
258{
259 chan->ops->state_change(chan, chan->state, err);
260}
261
262static void __set_retrans_timer(struct l2cap_chan *chan)
263{
264 if (!delayed_work_pending(&chan->monitor_timer) &&
265 chan->retrans_timeout) {
266 l2cap_set_timer(chan, &chan->retrans_timer,
267 msecs_to_jiffies(chan->retrans_timeout));
268 }
269}
270
271static void __set_monitor_timer(struct l2cap_chan *chan)
272{
273 __clear_retrans_timer(chan);
274 if (chan->monitor_timeout) {
275 l2cap_set_timer(chan, &chan->monitor_timer,
276 msecs_to_jiffies(chan->monitor_timeout));
277 }
278}
279
280static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
281 u16 seq)
282{
283 struct sk_buff *skb;
284
285 skb_queue_walk(head, skb) {
286 if (bt_cb(skb)->control.txseq == seq)
287 return skb;
288 }
289
290 return NULL;
291}
292
293/* ---- L2CAP sequence number lists ---- */
294
295/* For ERTM, ordered lists of sequence numbers must be tracked for
296 * SREJ requests that are received and for frames that are to be
297 * retransmitted. These seq_list functions implement a singly-linked
298 * list in an array, where membership in the list can also be checked
299 * in constant time. Items can also be added to the tail of the list
300 * and removed from the head in constant time, without further memory
301 * allocs or frees.
302 */
303
304static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
305{
306 size_t alloc_size, i;
307
308 /* Allocated size is a power of 2 to map sequence numbers
309 * (which may be up to 14 bits) in to a smaller array that is
310 * sized for the negotiated ERTM transmit windows.
311 */
312 alloc_size = roundup_pow_of_two(size);
313
314 seq_list->list = kmalloc(sizeof(u16) * alloc_size, GFP_KERNEL);
315 if (!seq_list->list)
316 return -ENOMEM;
317
318 seq_list->mask = alloc_size - 1;
319 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
320 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
321 for (i = 0; i < alloc_size; i++)
322 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
323
324 return 0;
325}
326
327static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
328{
329 kfree(seq_list->list);
330}
331
332static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
333 u16 seq)
334{
335 /* Constant-time check for list membership */
336 return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
337}
338
339static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
340{
341 u16 seq = seq_list->head;
342 u16 mask = seq_list->mask;
343
344 seq_list->head = seq_list->list[seq & mask];
345 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
346
347 if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
348 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
349 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
350 }
351
352 return seq;
353}
354
355static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
356{
357 u16 i;
358
359 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
360 return;
361
362 for (i = 0; i <= seq_list->mask; i++)
363 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
364
365 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
366 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
367}
368
369static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
370{
371 u16 mask = seq_list->mask;
372
373 /* All appends happen in constant time */
374
375 if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
376 return;
377
378 if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
379 seq_list->head = seq;
380 else
381 seq_list->list[seq_list->tail & mask] = seq;
382
383 seq_list->tail = seq;
384 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
385}
386
387static void l2cap_chan_timeout(struct work_struct *work)
388{
389 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
390 chan_timer.work);
391 struct l2cap_conn *conn = chan->conn;
392 int reason;
393
394 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
395
396 mutex_lock(&conn->chan_lock);
397 l2cap_chan_lock(chan);
398
399 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
400 reason = ECONNREFUSED;
401 else if (chan->state == BT_CONNECT &&
402 chan->sec_level != BT_SECURITY_SDP)
403 reason = ECONNREFUSED;
404 else
405 reason = ETIMEDOUT;
406
407 l2cap_chan_close(chan, reason);
408
409 l2cap_chan_unlock(chan);
410
411 chan->ops->close(chan);
412 mutex_unlock(&conn->chan_lock);
413
414 l2cap_chan_put(chan);
415}
416
417struct l2cap_chan *l2cap_chan_create(void)
418{
419 struct l2cap_chan *chan;
420
421 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
422 if (!chan)
423 return NULL;
424
425 mutex_init(&chan->lock);
426
427 /* Set default lock nesting level */
428 atomic_set(&chan->nesting, L2CAP_NESTING_NORMAL);
429
430 write_lock(&chan_list_lock);
431 list_add(&chan->global_l, &chan_list);
432 write_unlock(&chan_list_lock);
433
434 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
435
436 chan->state = BT_OPEN;
437
438 kref_init(&chan->kref);
439
440 /* This flag is cleared in l2cap_chan_ready() */
441 set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
442
443 BT_DBG("chan %p", chan);
444
445 return chan;
446}
447EXPORT_SYMBOL_GPL(l2cap_chan_create);
448
449static void l2cap_chan_destroy(struct kref *kref)
450{
451 struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);
452
453 BT_DBG("chan %p", chan);
454
455 write_lock(&chan_list_lock);
456 list_del(&chan->global_l);
457 write_unlock(&chan_list_lock);
458
459 kfree(chan);
460}
461
462void l2cap_chan_hold(struct l2cap_chan *c)
463{
464 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
465
466 kref_get(&c->kref);
467}
468
469void l2cap_chan_put(struct l2cap_chan *c)
470{
471 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
472
473 kref_put(&c->kref, l2cap_chan_destroy);
474}
475EXPORT_SYMBOL_GPL(l2cap_chan_put);
476
477void l2cap_chan_set_defaults(struct l2cap_chan *chan)
478{
479 chan->fcs = L2CAP_FCS_CRC16;
480 chan->max_tx = L2CAP_DEFAULT_MAX_TX;
481 chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
482 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
483 chan->remote_max_tx = chan->max_tx;
484 chan->remote_tx_win = chan->tx_win;
485 chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
486 chan->sec_level = BT_SECURITY_LOW;
487 chan->flush_to = L2CAP_DEFAULT_FLUSH_TO;
488 chan->retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
489 chan->monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
490 chan->conf_state = 0;
491
492 set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
493}
494EXPORT_SYMBOL_GPL(l2cap_chan_set_defaults);
495
496static void l2cap_le_flowctl_init(struct l2cap_chan *chan)
497{
498 chan->sdu = NULL;
499 chan->sdu_last_frag = NULL;
500 chan->sdu_len = 0;
501 chan->tx_credits = 0;
502 chan->rx_credits = le_max_credits;
503 chan->mps = min_t(u16, chan->imtu, le_default_mps);
504
505 skb_queue_head_init(&chan->tx_q);
506}
507
508void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
509{
510 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
511 __le16_to_cpu(chan->psm), chan->dcid);
512
513 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
514
515 chan->conn = conn;
516
517 switch (chan->chan_type) {
518 case L2CAP_CHAN_CONN_ORIENTED:
519 /* Alloc CID for connection-oriented socket */
520 chan->scid = l2cap_alloc_cid(conn);
521 if (conn->hcon->type == ACL_LINK)
522 chan->omtu = L2CAP_DEFAULT_MTU;
523 break;
524
525 case L2CAP_CHAN_CONN_LESS:
526 /* Connectionless socket */
527 chan->scid = L2CAP_CID_CONN_LESS;
528 chan->dcid = L2CAP_CID_CONN_LESS;
529 chan->omtu = L2CAP_DEFAULT_MTU;
530 break;
531
532 case L2CAP_CHAN_FIXED:
533 /* Caller will set CID and CID specific MTU values */
534 break;
535
536 default:
537 /* Raw socket can send/recv signalling messages only */
538 chan->scid = L2CAP_CID_SIGNALING;
539 chan->dcid = L2CAP_CID_SIGNALING;
540 chan->omtu = L2CAP_DEFAULT_MTU;
541 }
542
543 chan->local_id = L2CAP_BESTEFFORT_ID;
544 chan->local_stype = L2CAP_SERV_BESTEFFORT;
545 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
546 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
547 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
548 chan->local_flush_to = L2CAP_EFS_DEFAULT_FLUSH_TO;
549
550 l2cap_chan_hold(chan);
551
552 /* Only keep a reference for fixed channels if they requested it */
553 if (chan->chan_type != L2CAP_CHAN_FIXED ||
554 test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
555 hci_conn_hold(conn->hcon);
556
557 list_add(&chan->list, &conn->chan_l);
558}
559
560void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
561{
562 mutex_lock(&conn->chan_lock);
563 __l2cap_chan_add(conn, chan);
564 mutex_unlock(&conn->chan_lock);
565}
566
567void l2cap_chan_del(struct l2cap_chan *chan, int err)
568{
569 struct l2cap_conn *conn = chan->conn;
570
571 __clear_chan_timer(chan);
572
573 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
574
575 chan->ops->teardown(chan, err);
576
577 if (conn) {
578 struct amp_mgr *mgr = conn->hcon->amp_mgr;
579 /* Delete from channel list */
580 list_del(&chan->list);
581
582 l2cap_chan_put(chan);
583
584 chan->conn = NULL;
585
586 /* Reference was only held for non-fixed channels or
587 * fixed channels that explicitly requested it using the
588 * FLAG_HOLD_HCI_CONN flag.
589 */
590 if (chan->chan_type != L2CAP_CHAN_FIXED ||
591 test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
592 hci_conn_drop(conn->hcon);
593
594 if (mgr && mgr->bredr_chan == chan)
595 mgr->bredr_chan = NULL;
596 }
597
598 if (chan->hs_hchan) {
599 struct hci_chan *hs_hchan = chan->hs_hchan;
600
601 BT_DBG("chan %p disconnect hs_hchan %p", chan, hs_hchan);
602 amp_disconnect_logical_link(hs_hchan);
603 }
604
605 if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
606 return;
607
608 switch(chan->mode) {
609 case L2CAP_MODE_BASIC:
610 break;
611
612 case L2CAP_MODE_LE_FLOWCTL:
613 skb_queue_purge(&chan->tx_q);
614 break;
615
616 case L2CAP_MODE_ERTM:
617 __clear_retrans_timer(chan);
618 __clear_monitor_timer(chan);
619 __clear_ack_timer(chan);
620
621 skb_queue_purge(&chan->srej_q);
622
623 l2cap_seq_list_free(&chan->srej_list);
624 l2cap_seq_list_free(&chan->retrans_list);
625
626 /* fall through */
627
628 case L2CAP_MODE_STREAMING:
629 skb_queue_purge(&chan->tx_q);
630 break;
631 }
632
633 return;
634}
635EXPORT_SYMBOL_GPL(l2cap_chan_del);
636
637static void l2cap_conn_update_id_addr(struct work_struct *work)
638{
639 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
640 id_addr_update_work);
641 struct hci_conn *hcon = conn->hcon;
642 struct l2cap_chan *chan;
643
644 mutex_lock(&conn->chan_lock);
645
646 list_for_each_entry(chan, &conn->chan_l, list) {
647 l2cap_chan_lock(chan);
648 bacpy(&chan->dst, &hcon->dst);
649 chan->dst_type = bdaddr_type(hcon, hcon->dst_type);
650 l2cap_chan_unlock(chan);
651 }
652
653 mutex_unlock(&conn->chan_lock);
654}
655
656static void l2cap_chan_le_connect_reject(struct l2cap_chan *chan)
657{
658 struct l2cap_conn *conn = chan->conn;
659 struct l2cap_le_conn_rsp rsp;
660 u16 result;
661
662 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
663 result = L2CAP_CR_AUTHORIZATION;
664 else
665 result = L2CAP_CR_BAD_PSM;
666
667 l2cap_state_change(chan, BT_DISCONN);
668
669 rsp.dcid = cpu_to_le16(chan->scid);
670 rsp.mtu = cpu_to_le16(chan->imtu);
671 rsp.mps = cpu_to_le16(chan->mps);
672 rsp.credits = cpu_to_le16(chan->rx_credits);
673 rsp.result = cpu_to_le16(result);
674
675 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
676 &rsp);
677}
678
679static void l2cap_chan_connect_reject(struct l2cap_chan *chan)
680{
681 struct l2cap_conn *conn = chan->conn;
682 struct l2cap_conn_rsp rsp;
683 u16 result;
684
685 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
686 result = L2CAP_CR_SEC_BLOCK;
687 else
688 result = L2CAP_CR_BAD_PSM;
689
690 l2cap_state_change(chan, BT_DISCONN);
691
692 rsp.scid = cpu_to_le16(chan->dcid);
693 rsp.dcid = cpu_to_le16(chan->scid);
694 rsp.result = cpu_to_le16(result);
695 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
696
697 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
698}
699
700void l2cap_chan_close(struct l2cap_chan *chan, int reason)
701{
702 struct l2cap_conn *conn = chan->conn;
703
704 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
705
706 switch (chan->state) {
707 case BT_LISTEN:
708 chan->ops->teardown(chan, 0);
709 break;
710
711 case BT_CONNECTED:
712 case BT_CONFIG:
713 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
714 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
715 l2cap_send_disconn_req(chan, reason);
716 } else
717 l2cap_chan_del(chan, reason);
718 break;
719
720 case BT_CONNECT2:
721 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
722 if (conn->hcon->type == ACL_LINK)
723 l2cap_chan_connect_reject(chan);
724 else if (conn->hcon->type == LE_LINK)
725 l2cap_chan_le_connect_reject(chan);
726 }
727
728 l2cap_chan_del(chan, reason);
729 break;
730
731 case BT_CONNECT:
732 case BT_DISCONN:
733 l2cap_chan_del(chan, reason);
734 break;
735
736 default:
737 chan->ops->teardown(chan, 0);
738 break;
739 }
740}
741EXPORT_SYMBOL(l2cap_chan_close);
742
743static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
744{
745 switch (chan->chan_type) {
746 case L2CAP_CHAN_RAW:
747 switch (chan->sec_level) {
748 case BT_SECURITY_HIGH:
749 case BT_SECURITY_FIPS:
750 return HCI_AT_DEDICATED_BONDING_MITM;
751 case BT_SECURITY_MEDIUM:
752 return HCI_AT_DEDICATED_BONDING;
753 default:
754 return HCI_AT_NO_BONDING;
755 }
756 break;
757 case L2CAP_CHAN_CONN_LESS:
758 if (chan->psm == cpu_to_le16(L2CAP_PSM_3DSP)) {
759 if (chan->sec_level == BT_SECURITY_LOW)
760 chan->sec_level = BT_SECURITY_SDP;
761 }
762 if (chan->sec_level == BT_SECURITY_HIGH ||
763 chan->sec_level == BT_SECURITY_FIPS)
764 return HCI_AT_NO_BONDING_MITM;
765 else
766 return HCI_AT_NO_BONDING;
767 break;
768 case L2CAP_CHAN_CONN_ORIENTED:
769 if (chan->psm == cpu_to_le16(L2CAP_PSM_SDP)) {
770 if (chan->sec_level == BT_SECURITY_LOW)
771 chan->sec_level = BT_SECURITY_SDP;
772
773 if (chan->sec_level == BT_SECURITY_HIGH ||
774 chan->sec_level == BT_SECURITY_FIPS)
775 return HCI_AT_NO_BONDING_MITM;
776 else
777 return HCI_AT_NO_BONDING;
778 }
779 /* fall through */
780 default:
781 switch (chan->sec_level) {
782 case BT_SECURITY_HIGH:
783 case BT_SECURITY_FIPS:
784 return HCI_AT_GENERAL_BONDING_MITM;
785 case BT_SECURITY_MEDIUM:
786 return HCI_AT_GENERAL_BONDING;
787 default:
788 return HCI_AT_NO_BONDING;
789 }
790 break;
791 }
792}
793
794/* Service level security */
795int l2cap_chan_check_security(struct l2cap_chan *chan, bool initiator)
796{
797 struct l2cap_conn *conn = chan->conn;
798 __u8 auth_type;
799
800 if (conn->hcon->type == LE_LINK)
801 return smp_conn_security(conn->hcon, chan->sec_level);
802
803 auth_type = l2cap_get_auth_type(chan);
804
805 return hci_conn_security(conn->hcon, chan->sec_level, auth_type,
806 initiator);
807}
808
809static u8 l2cap_get_ident(struct l2cap_conn *conn)
810{
811 u8 id;
812
813 /* Get next available identificator.
814 * 1 - 128 are used by kernel.
815 * 129 - 199 are reserved.
816 * 200 - 254 are used by utilities like l2ping, etc.
817 */
818
819 mutex_lock(&conn->ident_lock);
820
821 if (++conn->tx_ident > 128)
822 conn->tx_ident = 1;
823
824 id = conn->tx_ident;
825
826 mutex_unlock(&conn->ident_lock);
827
828 return id;
829}
830
831static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
832 void *data)
833{
834 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
835 u8 flags;
836
837 BT_DBG("code 0x%2.2x", code);
838
839 if (!skb)
840 return;
841
842 if (lmp_no_flush_capable(conn->hcon->hdev))
843 flags = ACL_START_NO_FLUSH;
844 else
845 flags = ACL_START;
846
847 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
848 skb->priority = HCI_PRIO_MAX;
849
850 hci_send_acl(conn->hchan, skb, flags);
851}
852
853static bool __chan_is_moving(struct l2cap_chan *chan)
854{
855 return chan->move_state != L2CAP_MOVE_STABLE &&
856 chan->move_state != L2CAP_MOVE_WAIT_PREPARE;
857}
858
859static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
860{
861 struct hci_conn *hcon = chan->conn->hcon;
862 u16 flags;
863
864 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
865 skb->priority);
866
867 if (chan->hs_hcon && !__chan_is_moving(chan)) {
868 if (chan->hs_hchan)
869 hci_send_acl(chan->hs_hchan, skb, ACL_COMPLETE);
870 else
871 kfree_skb(skb);
872
873 return;
874 }
875
876 if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
877 lmp_no_flush_capable(hcon->hdev))
878 flags = ACL_START_NO_FLUSH;
879 else
880 flags = ACL_START;
881
882 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
883 hci_send_acl(chan->conn->hchan, skb, flags);
884}
885
886static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
887{
888 control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
889 control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
890
891 if (enh & L2CAP_CTRL_FRAME_TYPE) {
892 /* S-Frame */
893 control->sframe = 1;
894 control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
895 control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
896
897 control->sar = 0;
898 control->txseq = 0;
899 } else {
900 /* I-Frame */
901 control->sframe = 0;
902 control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
903 control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
904
905 control->poll = 0;
906 control->super = 0;
907 }
908}
909
910static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
911{
912 control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
913 control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
914
915 if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
916 /* S-Frame */
917 control->sframe = 1;
918 control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
919 control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
920
921 control->sar = 0;
922 control->txseq = 0;
923 } else {
924 /* I-Frame */
925 control->sframe = 0;
926 control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
927 control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
928
929 control->poll = 0;
930 control->super = 0;
931 }
932}
933
934static inline void __unpack_control(struct l2cap_chan *chan,
935 struct sk_buff *skb)
936{
937 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
938 __unpack_extended_control(get_unaligned_le32(skb->data),
939 &bt_cb(skb)->control);
940 skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
941 } else {
942 __unpack_enhanced_control(get_unaligned_le16(skb->data),
943 &bt_cb(skb)->control);
944 skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
945 }
946}
947
948static u32 __pack_extended_control(struct l2cap_ctrl *control)
949{
950 u32 packed;
951
952 packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
953 packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
954
955 if (control->sframe) {
956 packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
957 packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
958 packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
959 } else {
960 packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
961 packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
962 }
963
964 return packed;
965}
966
967static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
968{
969 u16 packed;
970
971 packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
972 packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
973
974 if (control->sframe) {
975 packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
976 packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
977 packed |= L2CAP_CTRL_FRAME_TYPE;
978 } else {
979 packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
980 packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
981 }
982
983 return packed;
984}
985
986static inline void __pack_control(struct l2cap_chan *chan,
987 struct l2cap_ctrl *control,
988 struct sk_buff *skb)
989{
990 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
991 put_unaligned_le32(__pack_extended_control(control),
992 skb->data + L2CAP_HDR_SIZE);
993 } else {
994 put_unaligned_le16(__pack_enhanced_control(control),
995 skb->data + L2CAP_HDR_SIZE);
996 }
997}
998
999static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
1000{
1001 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1002 return L2CAP_EXT_HDR_SIZE;
1003 else
1004 return L2CAP_ENH_HDR_SIZE;
1005}
1006
1007static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
1008 u32 control)
1009{
1010 struct sk_buff *skb;
1011 struct l2cap_hdr *lh;
1012 int hlen = __ertm_hdr_size(chan);
1013
1014 if (chan->fcs == L2CAP_FCS_CRC16)
1015 hlen += L2CAP_FCS_SIZE;
1016
1017 skb = bt_skb_alloc(hlen, GFP_KERNEL);
1018
1019 if (!skb)
1020 return ERR_PTR(-ENOMEM);
1021
1022 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1023 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
1024 lh->cid = cpu_to_le16(chan->dcid);
1025
1026 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1027 put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
1028 else
1029 put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
1030
1031 if (chan->fcs == L2CAP_FCS_CRC16) {
1032 u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
1033 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1034 }
1035
1036 skb->priority = HCI_PRIO_MAX;
1037 return skb;
1038}
1039
1040static void l2cap_send_sframe(struct l2cap_chan *chan,
1041 struct l2cap_ctrl *control)
1042{
1043 struct sk_buff *skb;
1044 u32 control_field;
1045
1046 BT_DBG("chan %p, control %p", chan, control);
1047
1048 if (!control->sframe)
1049 return;
1050
1051 if (__chan_is_moving(chan))
1052 return;
1053
1054 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
1055 !control->poll)
1056 control->final = 1;
1057
1058 if (control->super == L2CAP_SUPER_RR)
1059 clear_bit(CONN_RNR_SENT, &chan->conn_state);
1060 else if (control->super == L2CAP_SUPER_RNR)
1061 set_bit(CONN_RNR_SENT, &chan->conn_state);
1062
1063 if (control->super != L2CAP_SUPER_SREJ) {
1064 chan->last_acked_seq = control->reqseq;
1065 __clear_ack_timer(chan);
1066 }
1067
1068 BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
1069 control->final, control->poll, control->super);
1070
1071 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1072 control_field = __pack_extended_control(control);
1073 else
1074 control_field = __pack_enhanced_control(control);
1075
1076 skb = l2cap_create_sframe_pdu(chan, control_field);
1077 if (!IS_ERR(skb))
1078 l2cap_do_send(chan, skb);
1079}
1080
1081static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
1082{
1083 struct l2cap_ctrl control;
1084
1085 BT_DBG("chan %p, poll %d", chan, poll);
1086
1087 memset(&control, 0, sizeof(control));
1088 control.sframe = 1;
1089 control.poll = poll;
1090
1091 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
1092 control.super = L2CAP_SUPER_RNR;
1093 else
1094 control.super = L2CAP_SUPER_RR;
1095
1096 control.reqseq = chan->buffer_seq;
1097 l2cap_send_sframe(chan, &control);
1098}
1099
1100static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
1101{
1102 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
1103 return true;
1104
1105 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
1106}
1107
1108static bool __amp_capable(struct l2cap_chan *chan)
1109{
1110 struct l2cap_conn *conn = chan->conn;
1111 struct hci_dev *hdev;
1112 bool amp_available = false;
1113
1114 if (!conn->hs_enabled)
1115 return false;
1116
1117 if (!(conn->fixed_chan_mask & L2CAP_FC_A2MP))
1118 return false;
1119
1120 read_lock(&hci_dev_list_lock);
1121 list_for_each_entry(hdev, &hci_dev_list, list) {
1122 if (hdev->amp_type != AMP_TYPE_BREDR &&
1123 test_bit(HCI_UP, &hdev->flags)) {
1124 amp_available = true;
1125 break;
1126 }
1127 }
1128 read_unlock(&hci_dev_list_lock);
1129
1130 if (chan->chan_policy == BT_CHANNEL_POLICY_AMP_PREFERRED)
1131 return amp_available;
1132
1133 return false;
1134}
1135
1136static bool l2cap_check_efs(struct l2cap_chan *chan)
1137{
1138 /* Check EFS parameters */
1139 return true;
1140}
1141
1142void l2cap_send_conn_req(struct l2cap_chan *chan)
1143{
1144 struct l2cap_conn *conn = chan->conn;
1145 struct l2cap_conn_req req;
1146
1147 req.scid = cpu_to_le16(chan->scid);
1148 req.psm = chan->psm;
1149
1150 chan->ident = l2cap_get_ident(conn);
1151
1152 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
1153
1154 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
1155}
1156
1157static void l2cap_send_create_chan_req(struct l2cap_chan *chan, u8 amp_id)
1158{
1159 struct l2cap_create_chan_req req;
1160 req.scid = cpu_to_le16(chan->scid);
1161 req.psm = chan->psm;
1162 req.amp_id = amp_id;
1163
1164 chan->ident = l2cap_get_ident(chan->conn);
1165
1166 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_REQ,
1167 sizeof(req), &req);
1168}
1169
1170static void l2cap_move_setup(struct l2cap_chan *chan)
1171{
1172 struct sk_buff *skb;
1173
1174 BT_DBG("chan %p", chan);
1175
1176 if (chan->mode != L2CAP_MODE_ERTM)
1177 return;
1178
1179 __clear_retrans_timer(chan);
1180 __clear_monitor_timer(chan);
1181 __clear_ack_timer(chan);
1182
1183 chan->retry_count = 0;
1184 skb_queue_walk(&chan->tx_q, skb) {
1185 if (bt_cb(skb)->control.retries)
1186 bt_cb(skb)->control.retries = 1;
1187 else
1188 break;
1189 }
1190
1191 chan->expected_tx_seq = chan->buffer_seq;
1192
1193 clear_bit(CONN_REJ_ACT, &chan->conn_state);
1194 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
1195 l2cap_seq_list_clear(&chan->retrans_list);
1196 l2cap_seq_list_clear(&chan->srej_list);
1197 skb_queue_purge(&chan->srej_q);
1198
1199 chan->tx_state = L2CAP_TX_STATE_XMIT;
1200 chan->rx_state = L2CAP_RX_STATE_MOVE;
1201
1202 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
1203}
1204
1205static void l2cap_move_done(struct l2cap_chan *chan)
1206{
1207 u8 move_role = chan->move_role;
1208 BT_DBG("chan %p", chan);
1209
1210 chan->move_state = L2CAP_MOVE_STABLE;
1211 chan->move_role = L2CAP_MOVE_ROLE_NONE;
1212
1213 if (chan->mode != L2CAP_MODE_ERTM)
1214 return;
1215
1216 switch (move_role) {
1217 case L2CAP_MOVE_ROLE_INITIATOR:
1218 l2cap_tx(chan, NULL, NULL, L2CAP_EV_EXPLICIT_POLL);
1219 chan->rx_state = L2CAP_RX_STATE_WAIT_F;
1220 break;
1221 case L2CAP_MOVE_ROLE_RESPONDER:
1222 chan->rx_state = L2CAP_RX_STATE_WAIT_P;
1223 break;
1224 }
1225}
1226
1227static void l2cap_chan_ready(struct l2cap_chan *chan)
1228{
1229 /* This clears all conf flags, including CONF_NOT_COMPLETE */
1230 chan->conf_state = 0;
1231 __clear_chan_timer(chan);
1232
1233 if (chan->mode == L2CAP_MODE_LE_FLOWCTL && !chan->tx_credits)
1234 chan->ops->suspend(chan);
1235
1236 chan->state = BT_CONNECTED;
1237
1238 chan->ops->ready(chan);
1239}
1240
1241static void l2cap_le_connect(struct l2cap_chan *chan)
1242{
1243 struct l2cap_conn *conn = chan->conn;
1244 struct l2cap_le_conn_req req;
1245
1246 if (test_and_set_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags))
1247 return;
1248
1249 req.psm = chan->psm;
1250 req.scid = cpu_to_le16(chan->scid);
1251 req.mtu = cpu_to_le16(chan->imtu);
1252 req.mps = cpu_to_le16(chan->mps);
1253 req.credits = cpu_to_le16(chan->rx_credits);
1254
1255 chan->ident = l2cap_get_ident(conn);
1256
1257 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_REQ,
1258 sizeof(req), &req);
1259}
1260
1261static void l2cap_le_start(struct l2cap_chan *chan)
1262{
1263 struct l2cap_conn *conn = chan->conn;
1264
1265 if (!smp_conn_security(conn->hcon, chan->sec_level))
1266 return;
1267
1268 if (!chan->psm) {
1269 l2cap_chan_ready(chan);
1270 return;
1271 }
1272
1273 if (chan->state == BT_CONNECT)
1274 l2cap_le_connect(chan);
1275}
1276
1277static void l2cap_start_connection(struct l2cap_chan *chan)
1278{
1279 if (__amp_capable(chan)) {
1280 BT_DBG("chan %p AMP capable: discover AMPs", chan);
1281 a2mp_discover_amp(chan);
1282 } else if (chan->conn->hcon->type == LE_LINK) {
1283 l2cap_le_start(chan);
1284 } else {
1285 l2cap_send_conn_req(chan);
1286 }
1287}
1288
1289static void l2cap_request_info(struct l2cap_conn *conn)
1290{
1291 struct l2cap_info_req req;
1292
1293 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1294 return;
1295
1296 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
1297
1298 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1299 conn->info_ident = l2cap_get_ident(conn);
1300
1301 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1302
1303 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
1304 sizeof(req), &req);
1305}
1306
1307static void l2cap_do_start(struct l2cap_chan *chan)
1308{
1309 struct l2cap_conn *conn = chan->conn;
1310
1311 if (conn->hcon->type == LE_LINK) {
1312 l2cap_le_start(chan);
1313 return;
1314 }
1315
1316 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)) {
1317 l2cap_request_info(conn);
1318 return;
1319 }
1320
1321 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
1322 return;
1323
1324 if (l2cap_chan_check_security(chan, true) &&
1325 __l2cap_no_conn_pending(chan))
1326 l2cap_start_connection(chan);
1327}
1328
1329static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1330{
1331 u32 local_feat_mask = l2cap_feat_mask;
1332 if (!disable_ertm)
1333 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1334
1335 switch (mode) {
1336 case L2CAP_MODE_ERTM:
1337 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1338 case L2CAP_MODE_STREAMING:
1339 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1340 default:
1341 return 0x00;
1342 }
1343}
1344
1345static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err)
1346{
1347 struct l2cap_conn *conn = chan->conn;
1348 struct l2cap_disconn_req req;
1349
1350 if (!conn)
1351 return;
1352
1353 if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) {
1354 __clear_retrans_timer(chan);
1355 __clear_monitor_timer(chan);
1356 __clear_ack_timer(chan);
1357 }
1358
1359 if (chan->scid == L2CAP_CID_A2MP) {
1360 l2cap_state_change(chan, BT_DISCONN);
1361 return;
1362 }
1363
1364 req.dcid = cpu_to_le16(chan->dcid);
1365 req.scid = cpu_to_le16(chan->scid);
1366 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
1367 sizeof(req), &req);
1368
1369 l2cap_state_change_and_error(chan, BT_DISCONN, err);
1370}
1371
1372/* ---- L2CAP connections ---- */
1373static void l2cap_conn_start(struct l2cap_conn *conn)
1374{
1375 struct l2cap_chan *chan, *tmp;
1376
1377 BT_DBG("conn %p", conn);
1378
1379 mutex_lock(&conn->chan_lock);
1380
1381 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1382 l2cap_chan_lock(chan);
1383
1384 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1385 l2cap_chan_ready(chan);
1386 l2cap_chan_unlock(chan);
1387 continue;
1388 }
1389
1390 if (chan->state == BT_CONNECT) {
1391 if (!l2cap_chan_check_security(chan, true) ||
1392 !__l2cap_no_conn_pending(chan)) {
1393 l2cap_chan_unlock(chan);
1394 continue;
1395 }
1396
1397 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1398 && test_bit(CONF_STATE2_DEVICE,
1399 &chan->conf_state)) {
1400 l2cap_chan_close(chan, ECONNRESET);
1401 l2cap_chan_unlock(chan);
1402 continue;
1403 }
1404
1405 l2cap_start_connection(chan);
1406
1407 } else if (chan->state == BT_CONNECT2) {
1408 struct l2cap_conn_rsp rsp;
1409 char buf[128];
1410 rsp.scid = cpu_to_le16(chan->dcid);
1411 rsp.dcid = cpu_to_le16(chan->scid);
1412
1413 if (l2cap_chan_check_security(chan, false)) {
1414 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
1415 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1416 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1417 chan->ops->defer(chan);
1418
1419 } else {
1420 l2cap_state_change(chan, BT_CONFIG);
1421 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1422 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1423 }
1424 } else {
1425 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1426 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1427 }
1428
1429 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1430 sizeof(rsp), &rsp);
1431
1432 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1433 rsp.result != L2CAP_CR_SUCCESS) {
1434 l2cap_chan_unlock(chan);
1435 continue;
1436 }
1437
1438 set_bit(CONF_REQ_SENT, &chan->conf_state);
1439 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1440 l2cap_build_conf_req(chan, buf), buf);
1441 chan->num_conf_req++;
1442 }
1443
1444 l2cap_chan_unlock(chan);
1445 }
1446
1447 mutex_unlock(&conn->chan_lock);
1448}
1449
1450static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1451{
1452 struct hci_conn *hcon = conn->hcon;
1453 struct hci_dev *hdev = hcon->hdev;
1454
1455 BT_DBG("%s conn %p", hdev->name, conn);
1456
1457 /* For outgoing pairing which doesn't necessarily have an
1458 * associated socket (e.g. mgmt_pair_device).
1459 */
1460 if (hcon->out)
1461 smp_conn_security(hcon, hcon->pending_sec_level);
1462
1463 /* For LE slave connections, make sure the connection interval
1464 * is in the range of the minium and maximum interval that has
1465 * been configured for this connection. If not, then trigger
1466 * the connection update procedure.
1467 */
1468 if (hcon->role == HCI_ROLE_SLAVE &&
1469 (hcon->le_conn_interval < hcon->le_conn_min_interval ||
1470 hcon->le_conn_interval > hcon->le_conn_max_interval)) {
1471 struct l2cap_conn_param_update_req req;
1472
1473 req.min = cpu_to_le16(hcon->le_conn_min_interval);
1474 req.max = cpu_to_le16(hcon->le_conn_max_interval);
1475 req.latency = cpu_to_le16(hcon->le_conn_latency);
1476 req.to_multiplier = cpu_to_le16(hcon->le_supv_timeout);
1477
1478 l2cap_send_cmd(conn, l2cap_get_ident(conn),
1479 L2CAP_CONN_PARAM_UPDATE_REQ, sizeof(req), &req);
1480 }
1481}
1482
1483static void l2cap_conn_ready(struct l2cap_conn *conn)
1484{
1485 struct l2cap_chan *chan;
1486 struct hci_conn *hcon = conn->hcon;
1487
1488 BT_DBG("conn %p", conn);
1489
1490 if (hcon->type == ACL_LINK)
1491 l2cap_request_info(conn);
1492
1493 mutex_lock(&conn->chan_lock);
1494
1495 list_for_each_entry(chan, &conn->chan_l, list) {
1496
1497 l2cap_chan_lock(chan);
1498
1499 if (chan->scid == L2CAP_CID_A2MP) {
1500 l2cap_chan_unlock(chan);
1501 continue;
1502 }
1503
1504 if (hcon->type == LE_LINK) {
1505 l2cap_le_start(chan);
1506 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1507 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
1508 l2cap_chan_ready(chan);
1509 } else if (chan->state == BT_CONNECT) {
1510 l2cap_do_start(chan);
1511 }
1512
1513 l2cap_chan_unlock(chan);
1514 }
1515
1516 mutex_unlock(&conn->chan_lock);
1517
1518 if (hcon->type == LE_LINK)
1519 l2cap_le_conn_ready(conn);
1520
1521 queue_work(hcon->hdev->workqueue, &conn->pending_rx_work);
1522}
1523
1524/* Notify sockets that we cannot guaranty reliability anymore */
1525static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1526{
1527 struct l2cap_chan *chan;
1528
1529 BT_DBG("conn %p", conn);
1530
1531 mutex_lock(&conn->chan_lock);
1532
1533 list_for_each_entry(chan, &conn->chan_l, list) {
1534 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1535 l2cap_chan_set_err(chan, err);
1536 }
1537
1538 mutex_unlock(&conn->chan_lock);
1539}
1540
1541static void l2cap_info_timeout(struct work_struct *work)
1542{
1543 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1544 info_timer.work);
1545
1546 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1547 conn->info_ident = 0;
1548
1549 l2cap_conn_start(conn);
1550}
1551
1552/*
1553 * l2cap_user
1554 * External modules can register l2cap_user objects on l2cap_conn. The ->probe
1555 * callback is called during registration. The ->remove callback is called
1556 * during unregistration.
1557 * An l2cap_user object can either be explicitly unregistered or when the
1558 * underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon,
1559 * l2cap->hchan, .. are valid as long as the remove callback hasn't been called.
1560 * External modules must own a reference to the l2cap_conn object if they intend
1561 * to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at
1562 * any time if they don't.
1563 */
1564
1565int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user)
1566{
1567 struct hci_dev *hdev = conn->hcon->hdev;
1568 int ret;
1569
1570 /* We need to check whether l2cap_conn is registered. If it is not, we
1571 * must not register the l2cap_user. l2cap_conn_del() is unregisters
1572 * l2cap_conn objects, but doesn't provide its own locking. Instead, it
1573 * relies on the parent hci_conn object to be locked. This itself relies
1574 * on the hci_dev object to be locked. So we must lock the hci device
1575 * here, too. */
1576
1577 hci_dev_lock(hdev);
1578
1579 if (user->list.next || user->list.prev) {
1580 ret = -EINVAL;
1581 goto out_unlock;
1582 }
1583
1584 /* conn->hchan is NULL after l2cap_conn_del() was called */
1585 if (!conn->hchan) {
1586 ret = -ENODEV;
1587 goto out_unlock;
1588 }
1589
1590 ret = user->probe(conn, user);
1591 if (ret)
1592 goto out_unlock;
1593
1594 list_add(&user->list, &conn->users);
1595 ret = 0;
1596
1597out_unlock:
1598 hci_dev_unlock(hdev);
1599 return ret;
1600}
1601EXPORT_SYMBOL(l2cap_register_user);
1602
1603void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user)
1604{
1605 struct hci_dev *hdev = conn->hcon->hdev;
1606
1607 hci_dev_lock(hdev);
1608
1609 if (!user->list.next || !user->list.prev)
1610 goto out_unlock;
1611
1612 list_del(&user->list);
1613 user->list.next = NULL;
1614 user->list.prev = NULL;
1615 user->remove(conn, user);
1616
1617out_unlock:
1618 hci_dev_unlock(hdev);
1619}
1620EXPORT_SYMBOL(l2cap_unregister_user);
1621
1622static void l2cap_unregister_all_users(struct l2cap_conn *conn)
1623{
1624 struct l2cap_user *user;
1625
1626 while (!list_empty(&conn->users)) {
1627 user = list_first_entry(&conn->users, struct l2cap_user, list);
1628 list_del(&user->list);
1629 user->list.next = NULL;
1630 user->list.prev = NULL;
1631 user->remove(conn, user);
1632 }
1633}
1634
1635static void l2cap_conn_del(struct hci_conn *hcon, int err)
1636{
1637 struct l2cap_conn *conn = hcon->l2cap_data;
1638 struct l2cap_chan *chan, *l;
1639
1640 if (!conn)
1641 return;
1642
1643 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1644
1645 kfree_skb(conn->rx_skb);
1646
1647 skb_queue_purge(&conn->pending_rx);
1648
1649 /* We can not call flush_work(&conn->pending_rx_work) here since we
1650 * might block if we are running on a worker from the same workqueue
1651 * pending_rx_work is waiting on.
1652 */
1653 if (work_pending(&conn->pending_rx_work))
1654 cancel_work_sync(&conn->pending_rx_work);
1655
1656 if (work_pending(&conn->id_addr_update_work))
1657 cancel_work_sync(&conn->id_addr_update_work);
1658
1659 l2cap_unregister_all_users(conn);
1660
1661 /* Force the connection to be immediately dropped */
1662 hcon->disc_timeout = 0;
1663
1664 mutex_lock(&conn->chan_lock);
1665
1666 /* Kill channels */
1667 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1668 l2cap_chan_hold(chan);
1669 l2cap_chan_lock(chan);
1670
1671 l2cap_chan_del(chan, err);
1672
1673 l2cap_chan_unlock(chan);
1674
1675 chan->ops->close(chan);
1676 l2cap_chan_put(chan);
1677 }
1678
1679 mutex_unlock(&conn->chan_lock);
1680
1681 hci_chan_del(conn->hchan);
1682
1683 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1684 cancel_delayed_work_sync(&conn->info_timer);
1685
1686 hcon->l2cap_data = NULL;
1687 conn->hchan = NULL;
1688 l2cap_conn_put(conn);
1689}
1690
1691static void l2cap_conn_free(struct kref *ref)
1692{
1693 struct l2cap_conn *conn = container_of(ref, struct l2cap_conn, ref);
1694
1695 hci_conn_put(conn->hcon);
1696 kfree(conn);
1697}
1698
1699struct l2cap_conn *l2cap_conn_get(struct l2cap_conn *conn)
1700{
1701 kref_get(&conn->ref);
1702 return conn;
1703}
1704EXPORT_SYMBOL(l2cap_conn_get);
1705
1706void l2cap_conn_put(struct l2cap_conn *conn)
1707{
1708 kref_put(&conn->ref, l2cap_conn_free);
1709}
1710EXPORT_SYMBOL(l2cap_conn_put);
1711
1712/* ---- Socket interface ---- */
1713
1714/* Find socket with psm and source / destination bdaddr.
1715 * Returns closest match.
1716 */
1717static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1718 bdaddr_t *src,
1719 bdaddr_t *dst,
1720 u8 link_type)
1721{
1722 struct l2cap_chan *c, *c1 = NULL;
1723
1724 read_lock(&chan_list_lock);
1725
1726 list_for_each_entry(c, &chan_list, global_l) {
1727 if (state && c->state != state)
1728 continue;
1729
1730 if (link_type == ACL_LINK && c->src_type != BDADDR_BREDR)
1731 continue;
1732
1733 if (link_type == LE_LINK && c->src_type == BDADDR_BREDR)
1734 continue;
1735
1736 if (c->psm == psm) {
1737 int src_match, dst_match;
1738 int src_any, dst_any;
1739
1740 /* Exact match. */
1741 src_match = !bacmp(&c->src, src);
1742 dst_match = !bacmp(&c->dst, dst);
1743 if (src_match && dst_match) {
1744 l2cap_chan_hold(c);
1745 read_unlock(&chan_list_lock);
1746 return c;
1747 }
1748
1749 /* Closest match */
1750 src_any = !bacmp(&c->src, BDADDR_ANY);
1751 dst_any = !bacmp(&c->dst, BDADDR_ANY);
1752 if ((src_match && dst_any) || (src_any && dst_match) ||
1753 (src_any && dst_any))
1754 c1 = c;
1755 }
1756 }
1757
1758 if (c1)
1759 l2cap_chan_hold(c1);
1760
1761 read_unlock(&chan_list_lock);
1762
1763 return c1;
1764}
1765
1766static void l2cap_monitor_timeout(struct work_struct *work)
1767{
1768 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1769 monitor_timer.work);
1770
1771 BT_DBG("chan %p", chan);
1772
1773 l2cap_chan_lock(chan);
1774
1775 if (!chan->conn) {
1776 l2cap_chan_unlock(chan);
1777 l2cap_chan_put(chan);
1778 return;
1779 }
1780
1781 l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
1782
1783 l2cap_chan_unlock(chan);
1784 l2cap_chan_put(chan);
1785}
1786
1787static void l2cap_retrans_timeout(struct work_struct *work)
1788{
1789 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1790 retrans_timer.work);
1791
1792 BT_DBG("chan %p", chan);
1793
1794 l2cap_chan_lock(chan);
1795
1796 if (!chan->conn) {
1797 l2cap_chan_unlock(chan);
1798 l2cap_chan_put(chan);
1799 return;
1800 }
1801
1802 l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
1803 l2cap_chan_unlock(chan);
1804 l2cap_chan_put(chan);
1805}
1806
1807static void l2cap_streaming_send(struct l2cap_chan *chan,
1808 struct sk_buff_head *skbs)
1809{
1810 struct sk_buff *skb;
1811 struct l2cap_ctrl *control;
1812
1813 BT_DBG("chan %p, skbs %p", chan, skbs);
1814
1815 if (__chan_is_moving(chan))
1816 return;
1817
1818 skb_queue_splice_tail_init(skbs, &chan->tx_q);
1819
1820 while (!skb_queue_empty(&chan->tx_q)) {
1821
1822 skb = skb_dequeue(&chan->tx_q);
1823
1824 bt_cb(skb)->control.retries = 1;
1825 control = &bt_cb(skb)->control;
1826
1827 control->reqseq = 0;
1828 control->txseq = chan->next_tx_seq;
1829
1830 __pack_control(chan, control, skb);
1831
1832 if (chan->fcs == L2CAP_FCS_CRC16) {
1833 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1834 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1835 }
1836
1837 l2cap_do_send(chan, skb);
1838
1839 BT_DBG("Sent txseq %u", control->txseq);
1840
1841 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1842 chan->frames_sent++;
1843 }
1844}
1845
1846static int l2cap_ertm_send(struct l2cap_chan *chan)
1847{
1848 struct sk_buff *skb, *tx_skb;
1849 struct l2cap_ctrl *control;
1850 int sent = 0;
1851
1852 BT_DBG("chan %p", chan);
1853
1854 if (chan->state != BT_CONNECTED)
1855 return -ENOTCONN;
1856
1857 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1858 return 0;
1859
1860 if (__chan_is_moving(chan))
1861 return 0;
1862
1863 while (chan->tx_send_head &&
1864 chan->unacked_frames < chan->remote_tx_win &&
1865 chan->tx_state == L2CAP_TX_STATE_XMIT) {
1866
1867 skb = chan->tx_send_head;
1868
1869 bt_cb(skb)->control.retries = 1;
1870 control = &bt_cb(skb)->control;
1871
1872 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1873 control->final = 1;
1874
1875 control->reqseq = chan->buffer_seq;
1876 chan->last_acked_seq = chan->buffer_seq;
1877 control->txseq = chan->next_tx_seq;
1878
1879 __pack_control(chan, control, skb);
1880
1881 if (chan->fcs == L2CAP_FCS_CRC16) {
1882 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1883 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1884 }
1885
1886 /* Clone after data has been modified. Data is assumed to be
1887 read-only (for locking purposes) on cloned sk_buffs.
1888 */
1889 tx_skb = skb_clone(skb, GFP_KERNEL);
1890
1891 if (!tx_skb)
1892 break;
1893
1894 __set_retrans_timer(chan);
1895
1896 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1897 chan->unacked_frames++;
1898 chan->frames_sent++;
1899 sent++;
1900
1901 if (skb_queue_is_last(&chan->tx_q, skb))
1902 chan->tx_send_head = NULL;
1903 else
1904 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1905
1906 l2cap_do_send(chan, tx_skb);
1907 BT_DBG("Sent txseq %u", control->txseq);
1908 }
1909
1910 BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
1911 chan->unacked_frames, skb_queue_len(&chan->tx_q));
1912
1913 return sent;
1914}
1915
1916static void l2cap_ertm_resend(struct l2cap_chan *chan)
1917{
1918 struct l2cap_ctrl control;
1919 struct sk_buff *skb;
1920 struct sk_buff *tx_skb;
1921 u16 seq;
1922
1923 BT_DBG("chan %p", chan);
1924
1925 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1926 return;
1927
1928 if (__chan_is_moving(chan))
1929 return;
1930
1931 while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
1932 seq = l2cap_seq_list_pop(&chan->retrans_list);
1933
1934 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
1935 if (!skb) {
1936 BT_DBG("Error: Can't retransmit seq %d, frame missing",
1937 seq);
1938 continue;
1939 }
1940
1941 bt_cb(skb)->control.retries++;
1942 control = bt_cb(skb)->control;
1943
1944 if (chan->max_tx != 0 &&
1945 bt_cb(skb)->control.retries > chan->max_tx) {
1946 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
1947 l2cap_send_disconn_req(chan, ECONNRESET);
1948 l2cap_seq_list_clear(&chan->retrans_list);
1949 break;
1950 }
1951
1952 control.reqseq = chan->buffer_seq;
1953 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1954 control.final = 1;
1955 else
1956 control.final = 0;
1957
1958 if (skb_cloned(skb)) {
1959 /* Cloned sk_buffs are read-only, so we need a
1960 * writeable copy
1961 */
1962 tx_skb = skb_copy(skb, GFP_KERNEL);
1963 } else {
1964 tx_skb = skb_clone(skb, GFP_KERNEL);
1965 }
1966
1967 if (!tx_skb) {
1968 l2cap_seq_list_clear(&chan->retrans_list);
1969 break;
1970 }
1971
1972 /* Update skb contents */
1973 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1974 put_unaligned_le32(__pack_extended_control(&control),
1975 tx_skb->data + L2CAP_HDR_SIZE);
1976 } else {
1977 put_unaligned_le16(__pack_enhanced_control(&control),
1978 tx_skb->data + L2CAP_HDR_SIZE);
1979 }
1980
1981 /* Update FCS */
1982 if (chan->fcs == L2CAP_FCS_CRC16) {
1983 u16 fcs = crc16(0, (u8 *) tx_skb->data,
1984 tx_skb->len - L2CAP_FCS_SIZE);
1985 put_unaligned_le16(fcs, skb_tail_pointer(tx_skb) -
1986 L2CAP_FCS_SIZE);
1987 }
1988
1989 l2cap_do_send(chan, tx_skb);
1990
1991 BT_DBG("Resent txseq %d", control.txseq);
1992
1993 chan->last_acked_seq = chan->buffer_seq;
1994 }
1995}
1996
1997static void l2cap_retransmit(struct l2cap_chan *chan,
1998 struct l2cap_ctrl *control)
1999{
2000 BT_DBG("chan %p, control %p", chan, control);
2001
2002 l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
2003 l2cap_ertm_resend(chan);
2004}
2005
2006static void l2cap_retransmit_all(struct l2cap_chan *chan,
2007 struct l2cap_ctrl *control)
2008{
2009 struct sk_buff *skb;
2010
2011 BT_DBG("chan %p, control %p", chan, control);
2012
2013 if (control->poll)
2014 set_bit(CONN_SEND_FBIT, &chan->conn_state);
2015
2016 l2cap_seq_list_clear(&chan->retrans_list);
2017
2018 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2019 return;
2020
2021 if (chan->unacked_frames) {
2022 skb_queue_walk(&chan->tx_q, skb) {
2023 if (bt_cb(skb)->control.txseq == control->reqseq ||
2024 skb == chan->tx_send_head)
2025 break;
2026 }
2027
2028 skb_queue_walk_from(&chan->tx_q, skb) {
2029 if (skb == chan->tx_send_head)
2030 break;
2031
2032 l2cap_seq_list_append(&chan->retrans_list,
2033 bt_cb(skb)->control.txseq);
2034 }
2035
2036 l2cap_ertm_resend(chan);
2037 }
2038}
2039
2040static void l2cap_send_ack(struct l2cap_chan *chan)
2041{
2042 struct l2cap_ctrl control;
2043 u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2044 chan->last_acked_seq);
2045 int threshold;
2046
2047 BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2048 chan, chan->last_acked_seq, chan->buffer_seq);
2049
2050 memset(&control, 0, sizeof(control));
2051 control.sframe = 1;
2052
2053 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
2054 chan->rx_state == L2CAP_RX_STATE_RECV) {
2055 __clear_ack_timer(chan);
2056 control.super = L2CAP_SUPER_RNR;
2057 control.reqseq = chan->buffer_seq;
2058 l2cap_send_sframe(chan, &control);
2059 } else {
2060 if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
2061 l2cap_ertm_send(chan);
2062 /* If any i-frames were sent, they included an ack */
2063 if (chan->buffer_seq == chan->last_acked_seq)
2064 frames_to_ack = 0;
2065 }
2066
2067 /* Ack now if the window is 3/4ths full.
2068 * Calculate without mul or div
2069 */
2070 threshold = chan->ack_win;
2071 threshold += threshold << 1;
2072 threshold >>= 2;
2073
2074 BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
2075 threshold);
2076
2077 if (frames_to_ack >= threshold) {
2078 __clear_ack_timer(chan);
2079 control.super = L2CAP_SUPER_RR;
2080 control.reqseq = chan->buffer_seq;
2081 l2cap_send_sframe(chan, &control);
2082 frames_to_ack = 0;
2083 }
2084
2085 if (frames_to_ack)
2086 __set_ack_timer(chan);
2087 }
2088}
2089
2090static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
2091 struct msghdr *msg, int len,
2092 int count, struct sk_buff *skb)
2093{
2094 struct l2cap_conn *conn = chan->conn;
2095 struct sk_buff **frag;
2096 int sent = 0;
2097
2098 if (chan->ops->memcpy_fromiovec(chan, skb_put(skb, count),
2099 msg->msg_iov, count))
2100 return -EFAULT;
2101
2102 sent += count;
2103 len -= count;
2104
2105 /* Continuation fragments (no L2CAP header) */
2106 frag = &skb_shinfo(skb)->frag_list;
2107 while (len) {
2108 struct sk_buff *tmp;
2109
2110 count = min_t(unsigned int, conn->mtu, len);
2111
2112 tmp = chan->ops->alloc_skb(chan, 0, count,
2113 msg->msg_flags & MSG_DONTWAIT);
2114 if (IS_ERR(tmp))
2115 return PTR_ERR(tmp);
2116
2117 *frag = tmp;
2118
2119 if (chan->ops->memcpy_fromiovec(chan, skb_put(*frag, count),
2120 msg->msg_iov, count))
2121 return -EFAULT;
2122
2123 sent += count;
2124 len -= count;
2125
2126 skb->len += (*frag)->len;
2127 skb->data_len += (*frag)->len;
2128
2129 frag = &(*frag)->next;
2130 }
2131
2132 return sent;
2133}
2134
2135static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
2136 struct msghdr *msg, size_t len)
2137{
2138 struct l2cap_conn *conn = chan->conn;
2139 struct sk_buff *skb;
2140 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
2141 struct l2cap_hdr *lh;
2142
2143 BT_DBG("chan %p psm 0x%2.2x len %zu", chan,
2144 __le16_to_cpu(chan->psm), len);
2145
2146 count = min_t(unsigned int, (conn->mtu - hlen), len);
2147
2148 skb = chan->ops->alloc_skb(chan, hlen, count,
2149 msg->msg_flags & MSG_DONTWAIT);
2150 if (IS_ERR(skb))
2151 return skb;
2152
2153 /* Create L2CAP header */
2154 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2155 lh->cid = cpu_to_le16(chan->dcid);
2156 lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
2157 put_unaligned(chan->psm, (__le16 *) skb_put(skb, L2CAP_PSMLEN_SIZE));
2158
2159 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2160 if (unlikely(err < 0)) {
2161 kfree_skb(skb);
2162 return ERR_PTR(err);
2163 }
2164 return skb;
2165}
2166
2167static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2168 struct msghdr *msg, size_t len)
2169{
2170 struct l2cap_conn *conn = chan->conn;
2171 struct sk_buff *skb;
2172 int err, count;
2173 struct l2cap_hdr *lh;
2174
2175 BT_DBG("chan %p len %zu", chan, len);
2176
2177 count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2178
2179 skb = chan->ops->alloc_skb(chan, L2CAP_HDR_SIZE, count,
2180 msg->msg_flags & MSG_DONTWAIT);
2181 if (IS_ERR(skb))
2182 return skb;
2183
2184 /* Create L2CAP header */
2185 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2186 lh->cid = cpu_to_le16(chan->dcid);
2187 lh->len = cpu_to_le16(len);
2188
2189 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2190 if (unlikely(err < 0)) {
2191 kfree_skb(skb);
2192 return ERR_PTR(err);
2193 }
2194 return skb;
2195}
2196
2197static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2198 struct msghdr *msg, size_t len,
2199 u16 sdulen)
2200{
2201 struct l2cap_conn *conn = chan->conn;
2202 struct sk_buff *skb;
2203 int err, count, hlen;
2204 struct l2cap_hdr *lh;
2205
2206 BT_DBG("chan %p len %zu", chan, len);
2207
2208 if (!conn)
2209 return ERR_PTR(-ENOTCONN);
2210
2211 hlen = __ertm_hdr_size(chan);
2212
2213 if (sdulen)
2214 hlen += L2CAP_SDULEN_SIZE;
2215
2216 if (chan->fcs == L2CAP_FCS_CRC16)
2217 hlen += L2CAP_FCS_SIZE;
2218
2219 count = min_t(unsigned int, (conn->mtu - hlen), len);
2220
2221 skb = chan->ops->alloc_skb(chan, hlen, count,
2222 msg->msg_flags & MSG_DONTWAIT);
2223 if (IS_ERR(skb))
2224 return skb;
2225
2226 /* Create L2CAP header */
2227 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2228 lh->cid = cpu_to_le16(chan->dcid);
2229 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2230
2231 /* Control header is populated later */
2232 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2233 put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2234 else
2235 put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2236
2237 if (sdulen)
2238 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2239
2240 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2241 if (unlikely(err < 0)) {
2242 kfree_skb(skb);
2243 return ERR_PTR(err);
2244 }
2245
2246 bt_cb(skb)->control.fcs = chan->fcs;
2247 bt_cb(skb)->control.retries = 0;
2248 return skb;
2249}
2250
2251static int l2cap_segment_sdu(struct l2cap_chan *chan,
2252 struct sk_buff_head *seg_queue,
2253 struct msghdr *msg, size_t len)
2254{
2255 struct sk_buff *skb;
2256 u16 sdu_len;
2257 size_t pdu_len;
2258 u8 sar;
2259
2260 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2261
2262 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2263 * so fragmented skbs are not used. The HCI layer's handling
2264 * of fragmented skbs is not compatible with ERTM's queueing.
2265 */
2266
2267 /* PDU size is derived from the HCI MTU */
2268 pdu_len = chan->conn->mtu;
2269
2270 /* Constrain PDU size for BR/EDR connections */
2271 if (!chan->hs_hcon)
2272 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2273
2274 /* Adjust for largest possible L2CAP overhead. */
2275 if (chan->fcs)
2276 pdu_len -= L2CAP_FCS_SIZE;
2277
2278 pdu_len -= __ertm_hdr_size(chan);
2279
2280 /* Remote device may have requested smaller PDUs */
2281 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2282
2283 if (len <= pdu_len) {
2284 sar = L2CAP_SAR_UNSEGMENTED;
2285 sdu_len = 0;
2286 pdu_len = len;
2287 } else {
2288 sar = L2CAP_SAR_START;
2289 sdu_len = len;
2290 }
2291
2292 while (len > 0) {
2293 skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2294
2295 if (IS_ERR(skb)) {
2296 __skb_queue_purge(seg_queue);
2297 return PTR_ERR(skb);
2298 }
2299
2300 bt_cb(skb)->control.sar = sar;
2301 __skb_queue_tail(seg_queue, skb);
2302
2303 len -= pdu_len;
2304 if (sdu_len)
2305 sdu_len = 0;
2306
2307 if (len <= pdu_len) {
2308 sar = L2CAP_SAR_END;
2309 pdu_len = len;
2310 } else {
2311 sar = L2CAP_SAR_CONTINUE;
2312 }
2313 }
2314
2315 return 0;
2316}
2317
2318static struct sk_buff *l2cap_create_le_flowctl_pdu(struct l2cap_chan *chan,
2319 struct msghdr *msg,
2320 size_t len, u16 sdulen)
2321{
2322 struct l2cap_conn *conn = chan->conn;
2323 struct sk_buff *skb;
2324 int err, count, hlen;
2325 struct l2cap_hdr *lh;
2326
2327 BT_DBG("chan %p len %zu", chan, len);
2328
2329 if (!conn)
2330 return ERR_PTR(-ENOTCONN);
2331
2332 hlen = L2CAP_HDR_SIZE;
2333
2334 if (sdulen)
2335 hlen += L2CAP_SDULEN_SIZE;
2336
2337 count = min_t(unsigned int, (conn->mtu - hlen), len);
2338
2339 skb = chan->ops->alloc_skb(chan, hlen, count,
2340 msg->msg_flags & MSG_DONTWAIT);
2341 if (IS_ERR(skb))
2342 return skb;
2343
2344 /* Create L2CAP header */
2345 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2346 lh->cid = cpu_to_le16(chan->dcid);
2347 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2348
2349 if (sdulen)
2350 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2351
2352 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2353 if (unlikely(err < 0)) {
2354 kfree_skb(skb);
2355 return ERR_PTR(err);
2356 }
2357
2358 return skb;
2359}
2360
2361static int l2cap_segment_le_sdu(struct l2cap_chan *chan,
2362 struct sk_buff_head *seg_queue,
2363 struct msghdr *msg, size_t len)
2364{
2365 struct sk_buff *skb;
2366 size_t pdu_len;
2367 u16 sdu_len;
2368
2369 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2370
2371 sdu_len = len;
2372 pdu_len = chan->remote_mps - L2CAP_SDULEN_SIZE;
2373
2374 while (len > 0) {
2375 if (len <= pdu_len)
2376 pdu_len = len;
2377
2378 skb = l2cap_create_le_flowctl_pdu(chan, msg, pdu_len, sdu_len);
2379 if (IS_ERR(skb)) {
2380 __skb_queue_purge(seg_queue);
2381 return PTR_ERR(skb);
2382 }
2383
2384 __skb_queue_tail(seg_queue, skb);
2385
2386 len -= pdu_len;
2387
2388 if (sdu_len) {
2389 sdu_len = 0;
2390 pdu_len += L2CAP_SDULEN_SIZE;
2391 }
2392 }
2393
2394 return 0;
2395}
2396
2397int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
2398{
2399 struct sk_buff *skb;
2400 int err;
2401 struct sk_buff_head seg_queue;
2402
2403 if (!chan->conn)
2404 return -ENOTCONN;
2405
2406 /* Connectionless channel */
2407 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2408 skb = l2cap_create_connless_pdu(chan, msg, len);
2409 if (IS_ERR(skb))
2410 return PTR_ERR(skb);
2411
2412 /* Channel lock is released before requesting new skb and then
2413 * reacquired thus we need to recheck channel state.
2414 */
2415 if (chan->state != BT_CONNECTED) {
2416 kfree_skb(skb);
2417 return -ENOTCONN;
2418 }
2419
2420 l2cap_do_send(chan, skb);
2421 return len;
2422 }
2423
2424 switch (chan->mode) {
2425 case L2CAP_MODE_LE_FLOWCTL:
2426 /* Check outgoing MTU */
2427 if (len > chan->omtu)
2428 return -EMSGSIZE;
2429
2430 if (!chan->tx_credits)
2431 return -EAGAIN;
2432
2433 __skb_queue_head_init(&seg_queue);
2434
2435 err = l2cap_segment_le_sdu(chan, &seg_queue, msg, len);
2436
2437 if (chan->state != BT_CONNECTED) {
2438 __skb_queue_purge(&seg_queue);
2439 err = -ENOTCONN;
2440 }
2441
2442 if (err)
2443 return err;
2444
2445 skb_queue_splice_tail_init(&seg_queue, &chan->tx_q);
2446
2447 while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
2448 l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
2449 chan->tx_credits--;
2450 }
2451
2452 if (!chan->tx_credits)
2453 chan->ops->suspend(chan);
2454
2455 err = len;
2456
2457 break;
2458
2459 case L2CAP_MODE_BASIC:
2460 /* Check outgoing MTU */
2461 if (len > chan->omtu)
2462 return -EMSGSIZE;
2463
2464 /* Create a basic PDU */
2465 skb = l2cap_create_basic_pdu(chan, msg, len);
2466 if (IS_ERR(skb))
2467 return PTR_ERR(skb);
2468
2469 /* Channel lock is released before requesting new skb and then
2470 * reacquired thus we need to recheck channel state.
2471 */
2472 if (chan->state != BT_CONNECTED) {
2473 kfree_skb(skb);
2474 return -ENOTCONN;
2475 }
2476
2477 l2cap_do_send(chan, skb);
2478 err = len;
2479 break;
2480
2481 case L2CAP_MODE_ERTM:
2482 case L2CAP_MODE_STREAMING:
2483 /* Check outgoing MTU */
2484 if (len > chan->omtu) {
2485 err = -EMSGSIZE;
2486 break;
2487 }
2488
2489 __skb_queue_head_init(&seg_queue);
2490
2491 /* Do segmentation before calling in to the state machine,
2492 * since it's possible to block while waiting for memory
2493 * allocation.
2494 */
2495 err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2496
2497 /* The channel could have been closed while segmenting,
2498 * check that it is still connected.
2499 */
2500 if (chan->state != BT_CONNECTED) {
2501 __skb_queue_purge(&seg_queue);
2502 err = -ENOTCONN;
2503 }
2504
2505 if (err)
2506 break;
2507
2508 if (chan->mode == L2CAP_MODE_ERTM)
2509 l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2510 else
2511 l2cap_streaming_send(chan, &seg_queue);
2512
2513 err = len;
2514
2515 /* If the skbs were not queued for sending, they'll still be in
2516 * seg_queue and need to be purged.
2517 */
2518 __skb_queue_purge(&seg_queue);
2519 break;
2520
2521 default:
2522 BT_DBG("bad state %1.1x", chan->mode);
2523 err = -EBADFD;
2524 }
2525
2526 return err;
2527}
2528EXPORT_SYMBOL_GPL(l2cap_chan_send);
2529
2530static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2531{
2532 struct l2cap_ctrl control;
2533 u16 seq;
2534
2535 BT_DBG("chan %p, txseq %u", chan, txseq);
2536
2537 memset(&control, 0, sizeof(control));
2538 control.sframe = 1;
2539 control.super = L2CAP_SUPER_SREJ;
2540
2541 for (seq = chan->expected_tx_seq; seq != txseq;
2542 seq = __next_seq(chan, seq)) {
2543 if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2544 control.reqseq = seq;
2545 l2cap_send_sframe(chan, &control);
2546 l2cap_seq_list_append(&chan->srej_list, seq);
2547 }
2548 }
2549
2550 chan->expected_tx_seq = __next_seq(chan, txseq);
2551}
2552
2553static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2554{
2555 struct l2cap_ctrl control;
2556
2557 BT_DBG("chan %p", chan);
2558
2559 if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2560 return;
2561
2562 memset(&control, 0, sizeof(control));
2563 control.sframe = 1;
2564 control.super = L2CAP_SUPER_SREJ;
2565 control.reqseq = chan->srej_list.tail;
2566 l2cap_send_sframe(chan, &control);
2567}
2568
2569static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2570{
2571 struct l2cap_ctrl control;
2572 u16 initial_head;
2573 u16 seq;
2574
2575 BT_DBG("chan %p, txseq %u", chan, txseq);
2576
2577 memset(&control, 0, sizeof(control));
2578 control.sframe = 1;
2579 control.super = L2CAP_SUPER_SREJ;
2580
2581 /* Capture initial list head to allow only one pass through the list. */
2582 initial_head = chan->srej_list.head;
2583
2584 do {
2585 seq = l2cap_seq_list_pop(&chan->srej_list);
2586 if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2587 break;
2588
2589 control.reqseq = seq;
2590 l2cap_send_sframe(chan, &control);
2591 l2cap_seq_list_append(&chan->srej_list, seq);
2592 } while (chan->srej_list.head != initial_head);
2593}
2594
2595static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2596{
2597 struct sk_buff *acked_skb;
2598 u16 ackseq;
2599
2600 BT_DBG("chan %p, reqseq %u", chan, reqseq);
2601
2602 if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2603 return;
2604
2605 BT_DBG("expected_ack_seq %u, unacked_frames %u",
2606 chan->expected_ack_seq, chan->unacked_frames);
2607
2608 for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2609 ackseq = __next_seq(chan, ackseq)) {
2610
2611 acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2612 if (acked_skb) {
2613 skb_unlink(acked_skb, &chan->tx_q);
2614 kfree_skb(acked_skb);
2615 chan->unacked_frames--;
2616 }
2617 }
2618
2619 chan->expected_ack_seq = reqseq;
2620
2621 if (chan->unacked_frames == 0)
2622 __clear_retrans_timer(chan);
2623
2624 BT_DBG("unacked_frames %u", chan->unacked_frames);
2625}
2626
2627static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2628{
2629 BT_DBG("chan %p", chan);
2630
2631 chan->expected_tx_seq = chan->buffer_seq;
2632 l2cap_seq_list_clear(&chan->srej_list);
2633 skb_queue_purge(&chan->srej_q);
2634 chan->rx_state = L2CAP_RX_STATE_RECV;
2635}
2636
2637static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2638 struct l2cap_ctrl *control,
2639 struct sk_buff_head *skbs, u8 event)
2640{
2641 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2642 event);
2643
2644 switch (event) {
2645 case L2CAP_EV_DATA_REQUEST:
2646 if (chan->tx_send_head == NULL)
2647 chan->tx_send_head = skb_peek(skbs);
2648
2649 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2650 l2cap_ertm_send(chan);
2651 break;
2652 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2653 BT_DBG("Enter LOCAL_BUSY");
2654 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2655
2656 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2657 /* The SREJ_SENT state must be aborted if we are to
2658 * enter the LOCAL_BUSY state.
2659 */
2660 l2cap_abort_rx_srej_sent(chan);
2661 }
2662
2663 l2cap_send_ack(chan);
2664
2665 break;
2666 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2667 BT_DBG("Exit LOCAL_BUSY");
2668 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2669
2670 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2671 struct l2cap_ctrl local_control;
2672
2673 memset(&local_control, 0, sizeof(local_control));
2674 local_control.sframe = 1;
2675 local_control.super = L2CAP_SUPER_RR;
2676 local_control.poll = 1;
2677 local_control.reqseq = chan->buffer_seq;
2678 l2cap_send_sframe(chan, &local_control);
2679
2680 chan->retry_count = 1;
2681 __set_monitor_timer(chan);
2682 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2683 }
2684 break;
2685 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2686 l2cap_process_reqseq(chan, control->reqseq);
2687 break;
2688 case L2CAP_EV_EXPLICIT_POLL:
2689 l2cap_send_rr_or_rnr(chan, 1);
2690 chan->retry_count = 1;
2691 __set_monitor_timer(chan);
2692 __clear_ack_timer(chan);
2693 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2694 break;
2695 case L2CAP_EV_RETRANS_TO:
2696 l2cap_send_rr_or_rnr(chan, 1);
2697 chan->retry_count = 1;
2698 __set_monitor_timer(chan);
2699 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2700 break;
2701 case L2CAP_EV_RECV_FBIT:
2702 /* Nothing to process */
2703 break;
2704 default:
2705 break;
2706 }
2707}
2708
2709static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2710 struct l2cap_ctrl *control,
2711 struct sk_buff_head *skbs, u8 event)
2712{
2713 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2714 event);
2715
2716 switch (event) {
2717 case L2CAP_EV_DATA_REQUEST:
2718 if (chan->tx_send_head == NULL)
2719 chan->tx_send_head = skb_peek(skbs);
2720 /* Queue data, but don't send. */
2721 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2722 break;
2723 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2724 BT_DBG("Enter LOCAL_BUSY");
2725 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2726
2727 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2728 /* The SREJ_SENT state must be aborted if we are to
2729 * enter the LOCAL_BUSY state.
2730 */
2731 l2cap_abort_rx_srej_sent(chan);
2732 }
2733
2734 l2cap_send_ack(chan);
2735
2736 break;
2737 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2738 BT_DBG("Exit LOCAL_BUSY");
2739 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2740
2741 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2742 struct l2cap_ctrl local_control;
2743 memset(&local_control, 0, sizeof(local_control));
2744 local_control.sframe = 1;
2745 local_control.super = L2CAP_SUPER_RR;
2746 local_control.poll = 1;
2747 local_control.reqseq = chan->buffer_seq;
2748 l2cap_send_sframe(chan, &local_control);
2749
2750 chan->retry_count = 1;
2751 __set_monitor_timer(chan);
2752 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2753 }
2754 break;
2755 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2756 l2cap_process_reqseq(chan, control->reqseq);
2757
2758 /* Fall through */
2759
2760 case L2CAP_EV_RECV_FBIT:
2761 if (control && control->final) {
2762 __clear_monitor_timer(chan);
2763 if (chan->unacked_frames > 0)
2764 __set_retrans_timer(chan);
2765 chan->retry_count = 0;
2766 chan->tx_state = L2CAP_TX_STATE_XMIT;
2767 BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
2768 }
2769 break;
2770 case L2CAP_EV_EXPLICIT_POLL:
2771 /* Ignore */
2772 break;
2773 case L2CAP_EV_MONITOR_TO:
2774 if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
2775 l2cap_send_rr_or_rnr(chan, 1);
2776 __set_monitor_timer(chan);
2777 chan->retry_count++;
2778 } else {
2779 l2cap_send_disconn_req(chan, ECONNABORTED);
2780 }
2781 break;
2782 default:
2783 break;
2784 }
2785}
2786
2787static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
2788 struct sk_buff_head *skbs, u8 event)
2789{
2790 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2791 chan, control, skbs, event, chan->tx_state);
2792
2793 switch (chan->tx_state) {
2794 case L2CAP_TX_STATE_XMIT:
2795 l2cap_tx_state_xmit(chan, control, skbs, event);
2796 break;
2797 case L2CAP_TX_STATE_WAIT_F:
2798 l2cap_tx_state_wait_f(chan, control, skbs, event);
2799 break;
2800 default:
2801 /* Ignore event */
2802 break;
2803 }
2804}
2805
2806static void l2cap_pass_to_tx(struct l2cap_chan *chan,
2807 struct l2cap_ctrl *control)
2808{
2809 BT_DBG("chan %p, control %p", chan, control);
2810 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
2811}
2812
2813static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
2814 struct l2cap_ctrl *control)
2815{
2816 BT_DBG("chan %p, control %p", chan, control);
2817 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
2818}
2819
2820/* Copy frame to all raw sockets on that connection */
2821static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2822{
2823 struct sk_buff *nskb;
2824 struct l2cap_chan *chan;
2825
2826 BT_DBG("conn %p", conn);
2827
2828 mutex_lock(&conn->chan_lock);
2829
2830 list_for_each_entry(chan, &conn->chan_l, list) {
2831 if (chan->chan_type != L2CAP_CHAN_RAW)
2832 continue;
2833
2834 /* Don't send frame to the channel it came from */
2835 if (bt_cb(skb)->chan == chan)
2836 continue;
2837
2838 nskb = skb_clone(skb, GFP_KERNEL);
2839 if (!nskb)
2840 continue;
2841 if (chan->ops->recv(chan, nskb))
2842 kfree_skb(nskb);
2843 }
2844
2845 mutex_unlock(&conn->chan_lock);
2846}
2847
2848/* ---- L2CAP signalling commands ---- */
2849static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
2850 u8 ident, u16 dlen, void *data)
2851{
2852 struct sk_buff *skb, **frag;
2853 struct l2cap_cmd_hdr *cmd;
2854 struct l2cap_hdr *lh;
2855 int len, count;
2856
2857 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
2858 conn, code, ident, dlen);
2859
2860 if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE)
2861 return NULL;
2862
2863 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2864 count = min_t(unsigned int, conn->mtu, len);
2865
2866 skb = bt_skb_alloc(count, GFP_KERNEL);
2867 if (!skb)
2868 return NULL;
2869
2870 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2871 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2872
2873 if (conn->hcon->type == LE_LINK)
2874 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2875 else
2876 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
2877
2878 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2879 cmd->code = code;
2880 cmd->ident = ident;
2881 cmd->len = cpu_to_le16(dlen);
2882
2883 if (dlen) {
2884 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2885 memcpy(skb_put(skb, count), data, count);
2886 data += count;
2887 }
2888
2889 len -= skb->len;
2890
2891 /* Continuation fragments (no L2CAP header) */
2892 frag = &skb_shinfo(skb)->frag_list;
2893 while (len) {
2894 count = min_t(unsigned int, conn->mtu, len);
2895
2896 *frag = bt_skb_alloc(count, GFP_KERNEL);
2897 if (!*frag)
2898 goto fail;
2899
2900 memcpy(skb_put(*frag, count), data, count);
2901
2902 len -= count;
2903 data += count;
2904
2905 frag = &(*frag)->next;
2906 }
2907
2908 return skb;
2909
2910fail:
2911 kfree_skb(skb);
2912 return NULL;
2913}
2914
2915static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
2916 unsigned long *val)
2917{
2918 struct l2cap_conf_opt *opt = *ptr;
2919 int len;
2920
2921 len = L2CAP_CONF_OPT_SIZE + opt->len;
2922 *ptr += len;
2923
2924 *type = opt->type;
2925 *olen = opt->len;
2926
2927 switch (opt->len) {
2928 case 1:
2929 *val = *((u8 *) opt->val);
2930 break;
2931
2932 case 2:
2933 *val = get_unaligned_le16(opt->val);
2934 break;
2935
2936 case 4:
2937 *val = get_unaligned_le32(opt->val);
2938 break;
2939
2940 default:
2941 *val = (unsigned long) opt->val;
2942 break;
2943 }
2944
2945 BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
2946 return len;
2947}
2948
2949static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2950{
2951 struct l2cap_conf_opt *opt = *ptr;
2952
2953 BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
2954
2955 opt->type = type;
2956 opt->len = len;
2957
2958 switch (len) {
2959 case 1:
2960 *((u8 *) opt->val) = val;
2961 break;
2962
2963 case 2:
2964 put_unaligned_le16(val, opt->val);
2965 break;
2966
2967 case 4:
2968 put_unaligned_le32(val, opt->val);
2969 break;
2970
2971 default:
2972 memcpy(opt->val, (void *) val, len);
2973 break;
2974 }
2975
2976 *ptr += L2CAP_CONF_OPT_SIZE + len;
2977}
2978
2979static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
2980{
2981 struct l2cap_conf_efs efs;
2982
2983 switch (chan->mode) {
2984 case L2CAP_MODE_ERTM:
2985 efs.id = chan->local_id;
2986 efs.stype = chan->local_stype;
2987 efs.msdu = cpu_to_le16(chan->local_msdu);
2988 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2989 efs.acc_lat = cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
2990 efs.flush_to = cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO);
2991 break;
2992
2993 case L2CAP_MODE_STREAMING:
2994 efs.id = 1;
2995 efs.stype = L2CAP_SERV_BESTEFFORT;
2996 efs.msdu = cpu_to_le16(chan->local_msdu);
2997 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2998 efs.acc_lat = 0;
2999 efs.flush_to = 0;
3000 break;
3001
3002 default:
3003 return;
3004 }
3005
3006 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
3007 (unsigned long) &efs);
3008}
3009
3010static void l2cap_ack_timeout(struct work_struct *work)
3011{
3012 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
3013 ack_timer.work);
3014 u16 frames_to_ack;
3015
3016 BT_DBG("chan %p", chan);
3017
3018 l2cap_chan_lock(chan);
3019
3020 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
3021 chan->last_acked_seq);
3022
3023 if (frames_to_ack)
3024 l2cap_send_rr_or_rnr(chan, 0);
3025
3026 l2cap_chan_unlock(chan);
3027 l2cap_chan_put(chan);
3028}
3029
3030int l2cap_ertm_init(struct l2cap_chan *chan)
3031{
3032 int err;
3033
3034 chan->next_tx_seq = 0;
3035 chan->expected_tx_seq = 0;
3036 chan->expected_ack_seq = 0;
3037 chan->unacked_frames = 0;
3038 chan->buffer_seq = 0;
3039 chan->frames_sent = 0;
3040 chan->last_acked_seq = 0;
3041 chan->sdu = NULL;
3042 chan->sdu_last_frag = NULL;
3043 chan->sdu_len = 0;
3044
3045 skb_queue_head_init(&chan->tx_q);
3046
3047 chan->local_amp_id = AMP_ID_BREDR;
3048 chan->move_id = AMP_ID_BREDR;
3049 chan->move_state = L2CAP_MOVE_STABLE;
3050 chan->move_role = L2CAP_MOVE_ROLE_NONE;
3051
3052 if (chan->mode != L2CAP_MODE_ERTM)
3053 return 0;
3054
3055 chan->rx_state = L2CAP_RX_STATE_RECV;
3056 chan->tx_state = L2CAP_TX_STATE_XMIT;
3057
3058 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
3059 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
3060 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
3061
3062 skb_queue_head_init(&chan->srej_q);
3063
3064 err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
3065 if (err < 0)
3066 return err;
3067
3068 err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
3069 if (err < 0)
3070 l2cap_seq_list_free(&chan->srej_list);
3071
3072 return err;
3073}
3074
3075static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
3076{
3077 switch (mode) {
3078 case L2CAP_MODE_STREAMING:
3079 case L2CAP_MODE_ERTM:
3080 if (l2cap_mode_supported(mode, remote_feat_mask))
3081 return mode;
3082 /* fall through */
3083 default:
3084 return L2CAP_MODE_BASIC;
3085 }
3086}
3087
3088static inline bool __l2cap_ews_supported(struct l2cap_conn *conn)
3089{
3090 return conn->hs_enabled && conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
3091}
3092
3093static inline bool __l2cap_efs_supported(struct l2cap_conn *conn)
3094{
3095 return conn->hs_enabled && conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
3096}
3097
3098static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan,
3099 struct l2cap_conf_rfc *rfc)
3100{
3101 if (chan->local_amp_id != AMP_ID_BREDR && chan->hs_hcon) {
3102 u64 ertm_to = chan->hs_hcon->hdev->amp_be_flush_to;
3103
3104 /* Class 1 devices have must have ERTM timeouts
3105 * exceeding the Link Supervision Timeout. The
3106 * default Link Supervision Timeout for AMP
3107 * controllers is 10 seconds.
3108 *
3109 * Class 1 devices use 0xffffffff for their
3110 * best-effort flush timeout, so the clamping logic
3111 * will result in a timeout that meets the above
3112 * requirement. ERTM timeouts are 16-bit values, so
3113 * the maximum timeout is 65.535 seconds.
3114 */
3115
3116 /* Convert timeout to milliseconds and round */
3117 ertm_to = DIV_ROUND_UP_ULL(ertm_to, 1000);
3118
3119 /* This is the recommended formula for class 2 devices
3120 * that start ERTM timers when packets are sent to the
3121 * controller.
3122 */
3123 ertm_to = 3 * ertm_to + 500;
3124
3125 if (ertm_to > 0xffff)
3126 ertm_to = 0xffff;
3127
3128 rfc->retrans_timeout = cpu_to_le16((u16) ertm_to);
3129 rfc->monitor_timeout = rfc->retrans_timeout;
3130 } else {
3131 rfc->retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3132 rfc->monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3133 }
3134}
3135
3136static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
3137{
3138 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
3139 __l2cap_ews_supported(chan->conn)) {
3140 /* use extended control field */
3141 set_bit(FLAG_EXT_CTRL, &chan->flags);
3142 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3143 } else {
3144 chan->tx_win = min_t(u16, chan->tx_win,
3145 L2CAP_DEFAULT_TX_WINDOW);
3146 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
3147 }
3148 chan->ack_win = chan->tx_win;
3149}
3150
3151static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
3152{
3153 struct l2cap_conf_req *req = data;
3154 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
3155 void *ptr = req->data;
3156 u16 size;
3157
3158 BT_DBG("chan %p", chan);
3159
3160 if (chan->num_conf_req || chan->num_conf_rsp)
3161 goto done;
3162
3163 switch (chan->mode) {
3164 case L2CAP_MODE_STREAMING:
3165 case L2CAP_MODE_ERTM:
3166 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
3167 break;
3168
3169 if (__l2cap_efs_supported(chan->conn))
3170 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3171
3172 /* fall through */
3173 default:
3174 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
3175 break;
3176 }
3177
3178done:
3179 if (chan->imtu != L2CAP_DEFAULT_MTU)
3180 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3181
3182 switch (chan->mode) {
3183 case L2CAP_MODE_BASIC:
3184 if (disable_ertm)
3185 break;
3186
3187 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
3188 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
3189 break;
3190
3191 rfc.mode = L2CAP_MODE_BASIC;
3192 rfc.txwin_size = 0;
3193 rfc.max_transmit = 0;
3194 rfc.retrans_timeout = 0;
3195 rfc.monitor_timeout = 0;
3196 rfc.max_pdu_size = 0;
3197
3198 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3199 (unsigned long) &rfc);
3200 break;
3201
3202 case L2CAP_MODE_ERTM:
3203 rfc.mode = L2CAP_MODE_ERTM;
3204 rfc.max_transmit = chan->max_tx;
3205
3206 __l2cap_set_ertm_timeouts(chan, &rfc);
3207
3208 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3209 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3210 L2CAP_FCS_SIZE);
3211 rfc.max_pdu_size = cpu_to_le16(size);
3212
3213 l2cap_txwin_setup(chan);
3214
3215 rfc.txwin_size = min_t(u16, chan->tx_win,
3216 L2CAP_DEFAULT_TX_WINDOW);
3217
3218 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3219 (unsigned long) &rfc);
3220
3221 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3222 l2cap_add_opt_efs(&ptr, chan);
3223
3224 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3225 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3226 chan->tx_win);
3227
3228 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3229 if (chan->fcs == L2CAP_FCS_NONE ||
3230 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3231 chan->fcs = L2CAP_FCS_NONE;
3232 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3233 chan->fcs);
3234 }
3235 break;
3236
3237 case L2CAP_MODE_STREAMING:
3238 l2cap_txwin_setup(chan);
3239 rfc.mode = L2CAP_MODE_STREAMING;
3240 rfc.txwin_size = 0;
3241 rfc.max_transmit = 0;
3242 rfc.retrans_timeout = 0;
3243 rfc.monitor_timeout = 0;
3244
3245 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3246 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3247 L2CAP_FCS_SIZE);
3248 rfc.max_pdu_size = cpu_to_le16(size);
3249
3250 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3251 (unsigned long) &rfc);
3252
3253 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3254 l2cap_add_opt_efs(&ptr, chan);
3255
3256 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3257 if (chan->fcs == L2CAP_FCS_NONE ||
3258 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3259 chan->fcs = L2CAP_FCS_NONE;
3260 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3261 chan->fcs);
3262 }
3263 break;
3264 }
3265
3266 req->dcid = cpu_to_le16(chan->dcid);
3267 req->flags = cpu_to_le16(0);
3268
3269 return ptr - data;
3270}
3271
3272static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
3273{
3274 struct l2cap_conf_rsp *rsp = data;
3275 void *ptr = rsp->data;
3276 void *req = chan->conf_req;
3277 int len = chan->conf_len;
3278 int type, hint, olen;
3279 unsigned long val;
3280 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3281 struct l2cap_conf_efs efs;
3282 u8 remote_efs = 0;
3283 u16 mtu = L2CAP_DEFAULT_MTU;
3284 u16 result = L2CAP_CONF_SUCCESS;
3285 u16 size;
3286
3287 BT_DBG("chan %p", chan);
3288
3289 while (len >= L2CAP_CONF_OPT_SIZE) {
3290 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
3291
3292 hint = type & L2CAP_CONF_HINT;
3293 type &= L2CAP_CONF_MASK;
3294
3295 switch (type) {
3296 case L2CAP_CONF_MTU:
3297 mtu = val;
3298 break;
3299
3300 case L2CAP_CONF_FLUSH_TO:
3301 chan->flush_to = val;
3302 break;
3303
3304 case L2CAP_CONF_QOS:
3305 break;
3306
3307 case L2CAP_CONF_RFC:
3308 if (olen == sizeof(rfc))
3309 memcpy(&rfc, (void *) val, olen);
3310 break;
3311
3312 case L2CAP_CONF_FCS:
3313 if (val == L2CAP_FCS_NONE)
3314 set_bit(CONF_RECV_NO_FCS, &chan->conf_state);
3315 break;
3316
3317 case L2CAP_CONF_EFS:
3318 remote_efs = 1;
3319 if (olen == sizeof(efs))
3320 memcpy(&efs, (void *) val, olen);
3321 break;
3322
3323 case L2CAP_CONF_EWS:
3324 if (!chan->conn->hs_enabled)
3325 return -ECONNREFUSED;
3326
3327 set_bit(FLAG_EXT_CTRL, &chan->flags);
3328 set_bit(CONF_EWS_RECV, &chan->conf_state);
3329 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3330 chan->remote_tx_win = val;
3331 break;
3332
3333 default:
3334 if (hint)
3335 break;
3336
3337 result = L2CAP_CONF_UNKNOWN;
3338 *((u8 *) ptr++) = type;
3339 break;
3340 }
3341 }
3342
3343 if (chan->num_conf_rsp || chan->num_conf_req > 1)
3344 goto done;
3345
3346 switch (chan->mode) {
3347 case L2CAP_MODE_STREAMING:
3348 case L2CAP_MODE_ERTM:
3349 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
3350 chan->mode = l2cap_select_mode(rfc.mode,
3351 chan->conn->feat_mask);
3352 break;
3353 }
3354
3355 if (remote_efs) {
3356 if (__l2cap_efs_supported(chan->conn))
3357 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3358 else
3359 return -ECONNREFUSED;
3360 }
3361
3362 if (chan->mode != rfc.mode)
3363 return -ECONNREFUSED;
3364
3365 break;
3366 }
3367
3368done:
3369 if (chan->mode != rfc.mode) {
3370 result = L2CAP_CONF_UNACCEPT;
3371 rfc.mode = chan->mode;
3372
3373 if (chan->num_conf_rsp == 1)
3374 return -ECONNREFUSED;
3375
3376 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3377 (unsigned long) &rfc);
3378 }
3379
3380 if (result == L2CAP_CONF_SUCCESS) {
3381 /* Configure output options and let the other side know
3382 * which ones we don't like. */
3383
3384 if (mtu < L2CAP_DEFAULT_MIN_MTU)
3385 result = L2CAP_CONF_UNACCEPT;
3386 else {
3387 chan->omtu = mtu;
3388 set_bit(CONF_MTU_DONE, &chan->conf_state);
3389 }
3390 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
3391
3392 if (remote_efs) {
3393 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3394 efs.stype != L2CAP_SERV_NOTRAFIC &&
3395 efs.stype != chan->local_stype) {
3396
3397 result = L2CAP_CONF_UNACCEPT;
3398
3399 if (chan->num_conf_req >= 1)
3400 return -ECONNREFUSED;
3401
3402 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3403 sizeof(efs),
3404 (unsigned long) &efs);
3405 } else {
3406 /* Send PENDING Conf Rsp */
3407 result = L2CAP_CONF_PENDING;
3408 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3409 }
3410 }
3411
3412 switch (rfc.mode) {
3413 case L2CAP_MODE_BASIC:
3414 chan->fcs = L2CAP_FCS_NONE;
3415 set_bit(CONF_MODE_DONE, &chan->conf_state);
3416 break;
3417
3418 case L2CAP_MODE_ERTM:
3419 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3420 chan->remote_tx_win = rfc.txwin_size;
3421 else
3422 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3423
3424 chan->remote_max_tx = rfc.max_transmit;
3425
3426 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3427 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3428 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3429 rfc.max_pdu_size = cpu_to_le16(size);
3430 chan->remote_mps = size;
3431
3432 __l2cap_set_ertm_timeouts(chan, &rfc);
3433
3434 set_bit(CONF_MODE_DONE, &chan->conf_state);
3435
3436 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3437 sizeof(rfc), (unsigned long) &rfc);
3438
3439 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3440 chan->remote_id = efs.id;
3441 chan->remote_stype = efs.stype;
3442 chan->remote_msdu = le16_to_cpu(efs.msdu);
3443 chan->remote_flush_to =
3444 le32_to_cpu(efs.flush_to);
3445 chan->remote_acc_lat =
3446 le32_to_cpu(efs.acc_lat);
3447 chan->remote_sdu_itime =
3448 le32_to_cpu(efs.sdu_itime);
3449 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3450 sizeof(efs),
3451 (unsigned long) &efs);
3452 }
3453 break;
3454
3455 case L2CAP_MODE_STREAMING:
3456 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3457 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3458 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3459 rfc.max_pdu_size = cpu_to_le16(size);
3460 chan->remote_mps = size;
3461
3462 set_bit(CONF_MODE_DONE, &chan->conf_state);
3463
3464 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3465 (unsigned long) &rfc);
3466
3467 break;
3468
3469 default:
3470 result = L2CAP_CONF_UNACCEPT;
3471
3472 memset(&rfc, 0, sizeof(rfc));
3473 rfc.mode = chan->mode;
3474 }
3475
3476 if (result == L2CAP_CONF_SUCCESS)
3477 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3478 }
3479 rsp->scid = cpu_to_le16(chan->dcid);
3480 rsp->result = cpu_to_le16(result);
3481 rsp->flags = cpu_to_le16(0);
3482
3483 return ptr - data;
3484}
3485
3486static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
3487 void *data, u16 *result)
3488{
3489 struct l2cap_conf_req *req = data;
3490 void *ptr = req->data;
3491 int type, olen;
3492 unsigned long val;
3493 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3494 struct l2cap_conf_efs efs;
3495
3496 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3497
3498 while (len >= L2CAP_CONF_OPT_SIZE) {
3499 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3500
3501 switch (type) {
3502 case L2CAP_CONF_MTU:
3503 if (val < L2CAP_DEFAULT_MIN_MTU) {
3504 *result = L2CAP_CONF_UNACCEPT;
3505 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3506 } else
3507 chan->imtu = val;
3508 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3509 break;
3510
3511 case L2CAP_CONF_FLUSH_TO:
3512 chan->flush_to = val;
3513 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
3514 2, chan->flush_to);
3515 break;
3516
3517 case L2CAP_CONF_RFC:
3518 if (olen == sizeof(rfc))
3519 memcpy(&rfc, (void *)val, olen);
3520
3521 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3522 rfc.mode != chan->mode)
3523 return -ECONNREFUSED;
3524
3525 chan->fcs = 0;
3526
3527 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3528 sizeof(rfc), (unsigned long) &rfc);
3529 break;
3530
3531 case L2CAP_CONF_EWS:
3532 chan->ack_win = min_t(u16, val, chan->ack_win);
3533 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3534 chan->tx_win);
3535 break;
3536
3537 case L2CAP_CONF_EFS:
3538 if (olen == sizeof(efs))
3539 memcpy(&efs, (void *)val, olen);
3540
3541 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3542 efs.stype != L2CAP_SERV_NOTRAFIC &&
3543 efs.stype != chan->local_stype)
3544 return -ECONNREFUSED;
3545
3546 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
3547 (unsigned long) &efs);
3548 break;
3549
3550 case L2CAP_CONF_FCS:
3551 if (*result == L2CAP_CONF_PENDING)
3552 if (val == L2CAP_FCS_NONE)
3553 set_bit(CONF_RECV_NO_FCS,
3554 &chan->conf_state);
3555 break;
3556 }
3557 }
3558
3559 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3560 return -ECONNREFUSED;
3561
3562 chan->mode = rfc.mode;
3563
3564 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3565 switch (rfc.mode) {
3566 case L2CAP_MODE_ERTM:
3567 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3568 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3569 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3570 if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3571 chan->ack_win = min_t(u16, chan->ack_win,
3572 rfc.txwin_size);
3573
3574 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3575 chan->local_msdu = le16_to_cpu(efs.msdu);
3576 chan->local_sdu_itime =
3577 le32_to_cpu(efs.sdu_itime);
3578 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3579 chan->local_flush_to =
3580 le32_to_cpu(efs.flush_to);
3581 }
3582 break;
3583
3584 case L2CAP_MODE_STREAMING:
3585 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3586 }
3587 }
3588
3589 req->dcid = cpu_to_le16(chan->dcid);
3590 req->flags = cpu_to_le16(0);
3591
3592 return ptr - data;
3593}
3594
3595static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data,
3596 u16 result, u16 flags)
3597{
3598 struct l2cap_conf_rsp *rsp = data;
3599 void *ptr = rsp->data;
3600
3601 BT_DBG("chan %p", chan);
3602
3603 rsp->scid = cpu_to_le16(chan->dcid);
3604 rsp->result = cpu_to_le16(result);
3605 rsp->flags = cpu_to_le16(flags);
3606
3607 return ptr - data;
3608}
3609
3610void __l2cap_le_connect_rsp_defer(struct l2cap_chan *chan)
3611{
3612 struct l2cap_le_conn_rsp rsp;
3613 struct l2cap_conn *conn = chan->conn;
3614
3615 BT_DBG("chan %p", chan);
3616
3617 rsp.dcid = cpu_to_le16(chan->scid);
3618 rsp.mtu = cpu_to_le16(chan->imtu);
3619 rsp.mps = cpu_to_le16(chan->mps);
3620 rsp.credits = cpu_to_le16(chan->rx_credits);
3621 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
3622
3623 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
3624 &rsp);
3625}
3626
3627void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
3628{
3629 struct l2cap_conn_rsp rsp;
3630 struct l2cap_conn *conn = chan->conn;
3631 u8 buf[128];
3632 u8 rsp_code;
3633
3634 rsp.scid = cpu_to_le16(chan->dcid);
3635 rsp.dcid = cpu_to_le16(chan->scid);
3636 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
3637 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
3638
3639 if (chan->hs_hcon)
3640 rsp_code = L2CAP_CREATE_CHAN_RSP;
3641 else
3642 rsp_code = L2CAP_CONN_RSP;
3643
3644 BT_DBG("chan %p rsp_code %u", chan, rsp_code);
3645
3646 l2cap_send_cmd(conn, chan->ident, rsp_code, sizeof(rsp), &rsp);
3647
3648 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3649 return;
3650
3651 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3652 l2cap_build_conf_req(chan, buf), buf);
3653 chan->num_conf_req++;
3654}
3655
3656static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
3657{
3658 int type, olen;
3659 unsigned long val;
3660 /* Use sane default values in case a misbehaving remote device
3661 * did not send an RFC or extended window size option.
3662 */
3663 u16 txwin_ext = chan->ack_win;
3664 struct l2cap_conf_rfc rfc = {
3665 .mode = chan->mode,
3666 .retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
3667 .monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
3668 .max_pdu_size = cpu_to_le16(chan->imtu),
3669 .txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
3670 };
3671
3672 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
3673
3674 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
3675 return;
3676
3677 while (len >= L2CAP_CONF_OPT_SIZE) {
3678 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3679
3680 switch (type) {
3681 case L2CAP_CONF_RFC:
3682 if (olen == sizeof(rfc))
3683 memcpy(&rfc, (void *)val, olen);
3684 break;
3685 case L2CAP_CONF_EWS:
3686 txwin_ext = val;
3687 break;
3688 }
3689 }
3690
3691 switch (rfc.mode) {
3692 case L2CAP_MODE_ERTM:
3693 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3694 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3695 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3696 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3697 chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
3698 else
3699 chan->ack_win = min_t(u16, chan->ack_win,
3700 rfc.txwin_size);
3701 break;
3702 case L2CAP_MODE_STREAMING:
3703 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3704 }
3705}
3706
3707static inline int l2cap_command_rej(struct l2cap_conn *conn,
3708 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3709 u8 *data)
3710{
3711 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
3712
3713 if (cmd_len < sizeof(*rej))
3714 return -EPROTO;
3715
3716 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
3717 return 0;
3718
3719 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
3720 cmd->ident == conn->info_ident) {
3721 cancel_delayed_work(&conn->info_timer);
3722
3723 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3724 conn->info_ident = 0;
3725
3726 l2cap_conn_start(conn);
3727 }
3728
3729 return 0;
3730}
3731
3732static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
3733 struct l2cap_cmd_hdr *cmd,
3734 u8 *data, u8 rsp_code, u8 amp_id)
3735{
3736 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
3737 struct l2cap_conn_rsp rsp;
3738 struct l2cap_chan *chan = NULL, *pchan;
3739 int result, status = L2CAP_CS_NO_INFO;
3740
3741 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
3742 __le16 psm = req->psm;
3743
3744 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
3745
3746 /* Check if we have socket listening on psm */
3747 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
3748 &conn->hcon->dst, ACL_LINK);
3749 if (!pchan) {
3750 result = L2CAP_CR_BAD_PSM;
3751 goto sendresp;
3752 }
3753
3754 mutex_lock(&conn->chan_lock);
3755 l2cap_chan_lock(pchan);
3756
3757 /* Check if the ACL is secure enough (if not SDP) */
3758 if (psm != cpu_to_le16(L2CAP_PSM_SDP) &&
3759 !hci_conn_check_link_mode(conn->hcon)) {
3760 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
3761 result = L2CAP_CR_SEC_BLOCK;
3762 goto response;
3763 }
3764
3765 result = L2CAP_CR_NO_MEM;
3766
3767 /* Check if we already have channel with that dcid */
3768 if (__l2cap_get_chan_by_dcid(conn, scid))
3769 goto response;
3770
3771 chan = pchan->ops->new_connection(pchan);
3772 if (!chan)
3773 goto response;
3774
3775 /* For certain devices (ex: HID mouse), support for authentication,
3776 * pairing and bonding is optional. For such devices, inorder to avoid
3777 * the ACL alive for too long after L2CAP disconnection, reset the ACL
3778 * disc_timeout back to HCI_DISCONN_TIMEOUT during L2CAP connect.
3779 */
3780 conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
3781
3782 bacpy(&chan->src, &conn->hcon->src);
3783 bacpy(&chan->dst, &conn->hcon->dst);
3784 chan->src_type = bdaddr_type(conn->hcon, conn->hcon->src_type);
3785 chan->dst_type = bdaddr_type(conn->hcon, conn->hcon->dst_type);
3786 chan->psm = psm;
3787 chan->dcid = scid;
3788 chan->local_amp_id = amp_id;
3789
3790 __l2cap_chan_add(conn, chan);
3791
3792 dcid = chan->scid;
3793
3794 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
3795
3796 chan->ident = cmd->ident;
3797
3798 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
3799 if (l2cap_chan_check_security(chan, false)) {
3800 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
3801 l2cap_state_change(chan, BT_CONNECT2);
3802 result = L2CAP_CR_PEND;
3803 status = L2CAP_CS_AUTHOR_PEND;
3804 chan->ops->defer(chan);
3805 } else {
3806 /* Force pending result for AMP controllers.
3807 * The connection will succeed after the
3808 * physical link is up.
3809 */
3810 if (amp_id == AMP_ID_BREDR) {
3811 l2cap_state_change(chan, BT_CONFIG);
3812 result = L2CAP_CR_SUCCESS;
3813 } else {
3814 l2cap_state_change(chan, BT_CONNECT2);
3815 result = L2CAP_CR_PEND;
3816 }
3817 status = L2CAP_CS_NO_INFO;
3818 }
3819 } else {
3820 l2cap_state_change(chan, BT_CONNECT2);
3821 result = L2CAP_CR_PEND;
3822 status = L2CAP_CS_AUTHEN_PEND;
3823 }
3824 } else {
3825 l2cap_state_change(chan, BT_CONNECT2);
3826 result = L2CAP_CR_PEND;
3827 status = L2CAP_CS_NO_INFO;
3828 }
3829
3830response:
3831 l2cap_chan_unlock(pchan);
3832 mutex_unlock(&conn->chan_lock);
3833 l2cap_chan_put(pchan);
3834
3835sendresp:
3836 rsp.scid = cpu_to_le16(scid);
3837 rsp.dcid = cpu_to_le16(dcid);
3838 rsp.result = cpu_to_le16(result);
3839 rsp.status = cpu_to_le16(status);
3840 l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
3841
3842 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
3843 struct l2cap_info_req info;
3844 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3845
3846 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
3847 conn->info_ident = l2cap_get_ident(conn);
3848
3849 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
3850
3851 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
3852 sizeof(info), &info);
3853 }
3854
3855 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
3856 result == L2CAP_CR_SUCCESS) {
3857 u8 buf[128];
3858 set_bit(CONF_REQ_SENT, &chan->conf_state);
3859 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3860 l2cap_build_conf_req(chan, buf), buf);
3861 chan->num_conf_req++;
3862 }
3863
3864 return chan;
3865}
3866
3867static int l2cap_connect_req(struct l2cap_conn *conn,
3868 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3869{
3870 struct hci_dev *hdev = conn->hcon->hdev;
3871 struct hci_conn *hcon = conn->hcon;
3872
3873 if (cmd_len < sizeof(struct l2cap_conn_req))
3874 return -EPROTO;
3875
3876 hci_dev_lock(hdev);
3877 if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
3878 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &hcon->flags))
3879 mgmt_device_connected(hdev, hcon, 0, NULL, 0);
3880 hci_dev_unlock(hdev);
3881
3882 l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);
3883 return 0;
3884}
3885
3886static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
3887 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3888 u8 *data)
3889{
3890 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
3891 u16 scid, dcid, result, status;
3892 struct l2cap_chan *chan;
3893 u8 req[128];
3894 int err;
3895
3896 if (cmd_len < sizeof(*rsp))
3897 return -EPROTO;
3898
3899 scid = __le16_to_cpu(rsp->scid);
3900 dcid = __le16_to_cpu(rsp->dcid);
3901 result = __le16_to_cpu(rsp->result);
3902 status = __le16_to_cpu(rsp->status);
3903
3904 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
3905 dcid, scid, result, status);
3906
3907 mutex_lock(&conn->chan_lock);
3908
3909 if (scid) {
3910 chan = __l2cap_get_chan_by_scid(conn, scid);
3911 if (!chan) {
3912 err = -EBADSLT;
3913 goto unlock;
3914 }
3915 } else {
3916 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
3917 if (!chan) {
3918 err = -EBADSLT;
3919 goto unlock;
3920 }
3921 }
3922
3923 err = 0;
3924
3925 l2cap_chan_lock(chan);
3926
3927 switch (result) {
3928 case L2CAP_CR_SUCCESS:
3929 l2cap_state_change(chan, BT_CONFIG);
3930 chan->ident = 0;
3931 chan->dcid = dcid;
3932 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
3933
3934 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3935 break;
3936
3937 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3938 l2cap_build_conf_req(chan, req), req);
3939 chan->num_conf_req++;
3940 break;
3941
3942 case L2CAP_CR_PEND:
3943 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
3944 break;
3945
3946 default:
3947 l2cap_chan_del(chan, ECONNREFUSED);
3948 break;
3949 }
3950
3951 l2cap_chan_unlock(chan);
3952
3953unlock:
3954 mutex_unlock(&conn->chan_lock);
3955
3956 return err;
3957}
3958
3959static inline void set_default_fcs(struct l2cap_chan *chan)
3960{
3961 /* FCS is enabled only in ERTM or streaming mode, if one or both
3962 * sides request it.
3963 */
3964 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
3965 chan->fcs = L2CAP_FCS_NONE;
3966 else if (!test_bit(CONF_RECV_NO_FCS, &chan->conf_state))
3967 chan->fcs = L2CAP_FCS_CRC16;
3968}
3969
3970static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data,
3971 u8 ident, u16 flags)
3972{
3973 struct l2cap_conn *conn = chan->conn;
3974
3975 BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident,
3976 flags);
3977
3978 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3979 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3980
3981 l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP,
3982 l2cap_build_conf_rsp(chan, data,
3983 L2CAP_CONF_SUCCESS, flags), data);
3984}
3985
3986static void cmd_reject_invalid_cid(struct l2cap_conn *conn, u8 ident,
3987 u16 scid, u16 dcid)
3988{
3989 struct l2cap_cmd_rej_cid rej;
3990
3991 rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
3992 rej.scid = __cpu_to_le16(scid);
3993 rej.dcid = __cpu_to_le16(dcid);
3994
3995 l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3996}
3997
3998static inline int l2cap_config_req(struct l2cap_conn *conn,
3999 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4000 u8 *data)
4001{
4002 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
4003 u16 dcid, flags;
4004 u8 rsp[64];
4005 struct l2cap_chan *chan;
4006 int len, err = 0;
4007
4008 if (cmd_len < sizeof(*req))
4009 return -EPROTO;
4010
4011 dcid = __le16_to_cpu(req->dcid);
4012 flags = __le16_to_cpu(req->flags);
4013
4014 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
4015
4016 chan = l2cap_get_chan_by_scid(conn, dcid);
4017 if (!chan) {
4018 cmd_reject_invalid_cid(conn, cmd->ident, dcid, 0);
4019 return 0;
4020 }
4021
4022 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
4023 cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4024 chan->dcid);
4025 goto unlock;
4026 }
4027
4028 /* Reject if config buffer is too small. */
4029 len = cmd_len - sizeof(*req);
4030 if (chan->conf_len + len > sizeof(chan->conf_req)) {
4031 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4032 l2cap_build_conf_rsp(chan, rsp,
4033 L2CAP_CONF_REJECT, flags), rsp);
4034 goto unlock;
4035 }
4036
4037 /* Store config. */
4038 memcpy(chan->conf_req + chan->conf_len, req->data, len);
4039 chan->conf_len += len;
4040
4041 if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
4042 /* Incomplete config. Send empty response. */
4043 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4044 l2cap_build_conf_rsp(chan, rsp,
4045 L2CAP_CONF_SUCCESS, flags), rsp);
4046 goto unlock;
4047 }
4048
4049 /* Complete config. */
4050 len = l2cap_parse_conf_req(chan, rsp);
4051 if (len < 0) {
4052 l2cap_send_disconn_req(chan, ECONNRESET);
4053 goto unlock;
4054 }
4055
4056 chan->ident = cmd->ident;
4057 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
4058 chan->num_conf_rsp++;
4059
4060 /* Reset config buffer. */
4061 chan->conf_len = 0;
4062
4063 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
4064 goto unlock;
4065
4066 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4067 set_default_fcs(chan);
4068
4069 if (chan->mode == L2CAP_MODE_ERTM ||
4070 chan->mode == L2CAP_MODE_STREAMING)
4071 err = l2cap_ertm_init(chan);
4072
4073 if (err < 0)
4074 l2cap_send_disconn_req(chan, -err);
4075 else
4076 l2cap_chan_ready(chan);
4077
4078 goto unlock;
4079 }
4080
4081 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
4082 u8 buf[64];
4083 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4084 l2cap_build_conf_req(chan, buf), buf);
4085 chan->num_conf_req++;
4086 }
4087
4088 /* Got Conf Rsp PENDING from remote side and assume we sent
4089 Conf Rsp PENDING in the code above */
4090 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
4091 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4092
4093 /* check compatibility */
4094
4095 /* Send rsp for BR/EDR channel */
4096 if (!chan->hs_hcon)
4097 l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
4098 else
4099 chan->ident = cmd->ident;
4100 }
4101
4102unlock:
4103 l2cap_chan_unlock(chan);
4104 return err;
4105}
4106
4107static inline int l2cap_config_rsp(struct l2cap_conn *conn,
4108 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4109 u8 *data)
4110{
4111 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
4112 u16 scid, flags, result;
4113 struct l2cap_chan *chan;
4114 int len = cmd_len - sizeof(*rsp);
4115 int err = 0;
4116
4117 if (cmd_len < sizeof(*rsp))
4118 return -EPROTO;
4119
4120 scid = __le16_to_cpu(rsp->scid);
4121 flags = __le16_to_cpu(rsp->flags);
4122 result = __le16_to_cpu(rsp->result);
4123
4124 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
4125 result, len);
4126
4127 chan = l2cap_get_chan_by_scid(conn, scid);
4128 if (!chan)
4129 return 0;
4130
4131 switch (result) {
4132 case L2CAP_CONF_SUCCESS:
4133 l2cap_conf_rfc_get(chan, rsp->data, len);
4134 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4135 break;
4136
4137 case L2CAP_CONF_PENDING:
4138 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4139
4140 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4141 char buf[64];
4142
4143 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4144 buf, &result);
4145 if (len < 0) {
4146 l2cap_send_disconn_req(chan, ECONNRESET);
4147 goto done;
4148 }
4149
4150 if (!chan->hs_hcon) {
4151 l2cap_send_efs_conf_rsp(chan, buf, cmd->ident,
4152 0);
4153 } else {
4154 if (l2cap_check_efs(chan)) {
4155 amp_create_logical_link(chan);
4156 chan->ident = cmd->ident;
4157 }
4158 }
4159 }
4160 goto done;
4161
4162 case L2CAP_CONF_UNACCEPT:
4163 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
4164 char req[64];
4165
4166 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
4167 l2cap_send_disconn_req(chan, ECONNRESET);
4168 goto done;
4169 }
4170
4171 /* throw out any old stored conf requests */
4172 result = L2CAP_CONF_SUCCESS;
4173 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4174 req, &result);
4175 if (len < 0) {
4176 l2cap_send_disconn_req(chan, ECONNRESET);
4177 goto done;
4178 }
4179
4180 l2cap_send_cmd(conn, l2cap_get_ident(conn),
4181 L2CAP_CONF_REQ, len, req);
4182 chan->num_conf_req++;
4183 if (result != L2CAP_CONF_SUCCESS)
4184 goto done;
4185 break;
4186 }
4187
4188 default:
4189 l2cap_chan_set_err(chan, ECONNRESET);
4190
4191 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
4192 l2cap_send_disconn_req(chan, ECONNRESET);
4193 goto done;
4194 }
4195
4196 if (flags & L2CAP_CONF_FLAG_CONTINUATION)
4197 goto done;
4198
4199 set_bit(CONF_INPUT_DONE, &chan->conf_state);
4200
4201 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
4202 set_default_fcs(chan);
4203
4204 if (chan->mode == L2CAP_MODE_ERTM ||
4205 chan->mode == L2CAP_MODE_STREAMING)
4206 err = l2cap_ertm_init(chan);
4207
4208 if (err < 0)
4209 l2cap_send_disconn_req(chan, -err);
4210 else
4211 l2cap_chan_ready(chan);
4212 }
4213
4214done:
4215 l2cap_chan_unlock(chan);
4216 return err;
4217}
4218
4219static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
4220 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4221 u8 *data)
4222{
4223 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
4224 struct l2cap_disconn_rsp rsp;
4225 u16 dcid, scid;
4226 struct l2cap_chan *chan;
4227
4228 if (cmd_len != sizeof(*req))
4229 return -EPROTO;
4230
4231 scid = __le16_to_cpu(req->scid);
4232 dcid = __le16_to_cpu(req->dcid);
4233
4234 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
4235
4236 mutex_lock(&conn->chan_lock);
4237
4238 chan = __l2cap_get_chan_by_scid(conn, dcid);
4239 if (!chan) {
4240 mutex_unlock(&conn->chan_lock);
4241 cmd_reject_invalid_cid(conn, cmd->ident, dcid, scid);
4242 return 0;
4243 }
4244
4245 l2cap_chan_lock(chan);
4246
4247 rsp.dcid = cpu_to_le16(chan->scid);
4248 rsp.scid = cpu_to_le16(chan->dcid);
4249 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
4250
4251 chan->ops->set_shutdown(chan);
4252
4253 l2cap_chan_hold(chan);
4254 l2cap_chan_del(chan, ECONNRESET);
4255
4256 l2cap_chan_unlock(chan);
4257
4258 chan->ops->close(chan);
4259 l2cap_chan_put(chan);
4260
4261 mutex_unlock(&conn->chan_lock);
4262
4263 return 0;
4264}
4265
4266static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
4267 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4268 u8 *data)
4269{
4270 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
4271 u16 dcid, scid;
4272 struct l2cap_chan *chan;
4273
4274 if (cmd_len != sizeof(*rsp))
4275 return -EPROTO;
4276
4277 scid = __le16_to_cpu(rsp->scid);
4278 dcid = __le16_to_cpu(rsp->dcid);
4279
4280 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
4281
4282 mutex_lock(&conn->chan_lock);
4283
4284 chan = __l2cap_get_chan_by_scid(conn, scid);
4285 if (!chan) {
4286 mutex_unlock(&conn->chan_lock);
4287 return 0;
4288 }
4289
4290 l2cap_chan_lock(chan);
4291
4292 l2cap_chan_hold(chan);
4293 l2cap_chan_del(chan, 0);
4294
4295 l2cap_chan_unlock(chan);
4296
4297 chan->ops->close(chan);
4298 l2cap_chan_put(chan);
4299
4300 mutex_unlock(&conn->chan_lock);
4301
4302 return 0;
4303}
4304
4305static inline int l2cap_information_req(struct l2cap_conn *conn,
4306 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4307 u8 *data)
4308{
4309 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
4310 u16 type;
4311
4312 if (cmd_len != sizeof(*req))
4313 return -EPROTO;
4314
4315 type = __le16_to_cpu(req->type);
4316
4317 BT_DBG("type 0x%4.4x", type);
4318
4319 if (type == L2CAP_IT_FEAT_MASK) {
4320 u8 buf[8];
4321 u32 feat_mask = l2cap_feat_mask;
4322 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4323 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
4324 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4325 if (!disable_ertm)
4326 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
4327 | L2CAP_FEAT_FCS;
4328 if (conn->hs_enabled)
4329 feat_mask |= L2CAP_FEAT_EXT_FLOW
4330 | L2CAP_FEAT_EXT_WINDOW;
4331
4332 put_unaligned_le32(feat_mask, rsp->data);
4333 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4334 buf);
4335 } else if (type == L2CAP_IT_FIXED_CHAN) {
4336 u8 buf[12];
4337 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4338
4339 if (conn->hs_enabled)
4340 l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
4341 else
4342 l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
4343
4344 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4345 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4346 memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
4347 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4348 buf);
4349 } else {
4350 struct l2cap_info_rsp rsp;
4351 rsp.type = cpu_to_le16(type);
4352 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
4353 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp),
4354 &rsp);
4355 }
4356
4357 return 0;
4358}
4359
4360static inline int l2cap_information_rsp(struct l2cap_conn *conn,
4361 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4362 u8 *data)
4363{
4364 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
4365 u16 type, result;
4366
4367 if (cmd_len < sizeof(*rsp))
4368 return -EPROTO;
4369
4370 type = __le16_to_cpu(rsp->type);
4371 result = __le16_to_cpu(rsp->result);
4372
4373 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
4374
4375 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
4376 if (cmd->ident != conn->info_ident ||
4377 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
4378 return 0;
4379
4380 cancel_delayed_work(&conn->info_timer);
4381
4382 if (result != L2CAP_IR_SUCCESS) {
4383 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4384 conn->info_ident = 0;
4385
4386 l2cap_conn_start(conn);
4387
4388 return 0;
4389 }
4390
4391 switch (type) {
4392 case L2CAP_IT_FEAT_MASK:
4393 conn->feat_mask = get_unaligned_le32(rsp->data);
4394
4395 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
4396 struct l2cap_info_req req;
4397 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4398
4399 conn->info_ident = l2cap_get_ident(conn);
4400
4401 l2cap_send_cmd(conn, conn->info_ident,
4402 L2CAP_INFO_REQ, sizeof(req), &req);
4403 } else {
4404 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4405 conn->info_ident = 0;
4406
4407 l2cap_conn_start(conn);
4408 }
4409 break;
4410
4411 case L2CAP_IT_FIXED_CHAN:
4412 conn->fixed_chan_mask = rsp->data[0];
4413 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4414 conn->info_ident = 0;
4415
4416 l2cap_conn_start(conn);
4417 break;
4418 }
4419
4420 return 0;
4421}
4422
4423static int l2cap_create_channel_req(struct l2cap_conn *conn,
4424 struct l2cap_cmd_hdr *cmd,
4425 u16 cmd_len, void *data)
4426{
4427 struct l2cap_create_chan_req *req = data;
4428 struct l2cap_create_chan_rsp rsp;
4429 struct l2cap_chan *chan;
4430 struct hci_dev *hdev;
4431 u16 psm, scid;
4432
4433 if (cmd_len != sizeof(*req))
4434 return -EPROTO;
4435
4436 if (!conn->hs_enabled)
4437 return -EINVAL;
4438
4439 psm = le16_to_cpu(req->psm);
4440 scid = le16_to_cpu(req->scid);
4441
4442 BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id);
4443
4444 /* For controller id 0 make BR/EDR connection */
4445 if (req->amp_id == AMP_ID_BREDR) {
4446 l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4447 req->amp_id);
4448 return 0;
4449 }
4450
4451 /* Validate AMP controller id */
4452 hdev = hci_dev_get(req->amp_id);
4453 if (!hdev)
4454 goto error;
4455
4456 if (hdev->dev_type != HCI_AMP || !test_bit(HCI_UP, &hdev->flags)) {
4457 hci_dev_put(hdev);
4458 goto error;
4459 }
4460
4461 chan = l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4462 req->amp_id);
4463 if (chan) {
4464 struct amp_mgr *mgr = conn->hcon->amp_mgr;
4465 struct hci_conn *hs_hcon;
4466
4467 hs_hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK,
4468 &conn->hcon->dst);
4469 if (!hs_hcon) {
4470 hci_dev_put(hdev);
4471 cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4472 chan->dcid);
4473 return 0;
4474 }
4475
4476 BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr, chan, hs_hcon);
4477
4478 mgr->bredr_chan = chan;
4479 chan->hs_hcon = hs_hcon;
4480 chan->fcs = L2CAP_FCS_NONE;
4481 conn->mtu = hdev->block_mtu;
4482 }
4483
4484 hci_dev_put(hdev);
4485
4486 return 0;
4487
4488error:
4489 rsp.dcid = 0;
4490 rsp.scid = cpu_to_le16(scid);
4491 rsp.result = cpu_to_le16(L2CAP_CR_BAD_AMP);
4492 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4493
4494 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
4495 sizeof(rsp), &rsp);
4496
4497 return 0;
4498}
4499
4500static void l2cap_send_move_chan_req(struct l2cap_chan *chan, u8 dest_amp_id)
4501{
4502 struct l2cap_move_chan_req req;
4503 u8 ident;
4504
4505 BT_DBG("chan %p, dest_amp_id %d", chan, dest_amp_id);
4506
4507 ident = l2cap_get_ident(chan->conn);
4508 chan->ident = ident;
4509
4510 req.icid = cpu_to_le16(chan->scid);
4511 req.dest_amp_id = dest_amp_id;
4512
4513 l2cap_send_cmd(chan->conn, ident, L2CAP_MOVE_CHAN_REQ, sizeof(req),
4514 &req);
4515
4516 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4517}
4518
4519static void l2cap_send_move_chan_rsp(struct l2cap_chan *chan, u16 result)
4520{
4521 struct l2cap_move_chan_rsp rsp;
4522
4523 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4524
4525 rsp.icid = cpu_to_le16(chan->dcid);
4526 rsp.result = cpu_to_le16(result);
4527
4528 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_RSP,
4529 sizeof(rsp), &rsp);
4530}
4531
4532static void l2cap_send_move_chan_cfm(struct l2cap_chan *chan, u16 result)
4533{
4534 struct l2cap_move_chan_cfm cfm;
4535
4536 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4537
4538 chan->ident = l2cap_get_ident(chan->conn);
4539
4540 cfm.icid = cpu_to_le16(chan->scid);
4541 cfm.result = cpu_to_le16(result);
4542
4543 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_CFM,
4544 sizeof(cfm), &cfm);
4545
4546 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4547}
4548
4549static void l2cap_send_move_chan_cfm_icid(struct l2cap_conn *conn, u16 icid)
4550{
4551 struct l2cap_move_chan_cfm cfm;
4552
4553 BT_DBG("conn %p, icid 0x%4.4x", conn, icid);
4554
4555 cfm.icid = cpu_to_le16(icid);
4556 cfm.result = cpu_to_le16(L2CAP_MC_UNCONFIRMED);
4557
4558 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_MOVE_CHAN_CFM,
4559 sizeof(cfm), &cfm);
4560}
4561
4562static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
4563 u16 icid)
4564{
4565 struct l2cap_move_chan_cfm_rsp rsp;
4566
4567 BT_DBG("icid 0x%4.4x", icid);
4568
4569 rsp.icid = cpu_to_le16(icid);
4570 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4571}
4572
4573static void __release_logical_link(struct l2cap_chan *chan)
4574{
4575 chan->hs_hchan = NULL;
4576 chan->hs_hcon = NULL;
4577
4578 /* Placeholder - release the logical link */
4579}
4580
4581static void l2cap_logical_fail(struct l2cap_chan *chan)
4582{
4583 /* Logical link setup failed */
4584 if (chan->state != BT_CONNECTED) {
4585 /* Create channel failure, disconnect */
4586 l2cap_send_disconn_req(chan, ECONNRESET);
4587 return;
4588 }
4589
4590 switch (chan->move_role) {
4591 case L2CAP_MOVE_ROLE_RESPONDER:
4592 l2cap_move_done(chan);
4593 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_SUPP);
4594 break;
4595 case L2CAP_MOVE_ROLE_INITIATOR:
4596 if (chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_COMP ||
4597 chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_CFM) {
4598 /* Remote has only sent pending or
4599 * success responses, clean up
4600 */
4601 l2cap_move_done(chan);
4602 }
4603
4604 /* Other amp move states imply that the move
4605 * has already aborted
4606 */
4607 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4608 break;
4609 }
4610}
4611
4612static void l2cap_logical_finish_create(struct l2cap_chan *chan,
4613 struct hci_chan *hchan)
4614{
4615 struct l2cap_conf_rsp rsp;
4616
4617 chan->hs_hchan = hchan;
4618 chan->hs_hcon->l2cap_data = chan->conn;
4619
4620 l2cap_send_efs_conf_rsp(chan, &rsp, chan->ident, 0);
4621
4622 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4623 int err;
4624
4625 set_default_fcs(chan);
4626
4627 err = l2cap_ertm_init(chan);
4628 if (err < 0)
4629 l2cap_send_disconn_req(chan, -err);
4630 else
4631 l2cap_chan_ready(chan);
4632 }
4633}
4634
4635static void l2cap_logical_finish_move(struct l2cap_chan *chan,
4636 struct hci_chan *hchan)
4637{
4638 chan->hs_hcon = hchan->conn;
4639 chan->hs_hcon->l2cap_data = chan->conn;
4640
4641 BT_DBG("move_state %d", chan->move_state);
4642
4643 switch (chan->move_state) {
4644 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
4645 /* Move confirm will be sent after a success
4646 * response is received
4647 */
4648 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4649 break;
4650 case L2CAP_MOVE_WAIT_LOGICAL_CFM:
4651 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4652 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4653 } else if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
4654 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
4655 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4656 } else if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4657 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4658 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4659 }
4660 break;
4661 default:
4662 /* Move was not in expected state, free the channel */
4663 __release_logical_link(chan);
4664
4665 chan->move_state = L2CAP_MOVE_STABLE;
4666 }
4667}
4668
4669/* Call with chan locked */
4670void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan,
4671 u8 status)
4672{
4673 BT_DBG("chan %p, hchan %p, status %d", chan, hchan, status);
4674
4675 if (status) {
4676 l2cap_logical_fail(chan);
4677 __release_logical_link(chan);
4678 return;
4679 }
4680
4681 if (chan->state != BT_CONNECTED) {
4682 /* Ignore logical link if channel is on BR/EDR */
4683 if (chan->local_amp_id != AMP_ID_BREDR)
4684 l2cap_logical_finish_create(chan, hchan);
4685 } else {
4686 l2cap_logical_finish_move(chan, hchan);
4687 }
4688}
4689
4690void l2cap_move_start(struct l2cap_chan *chan)
4691{
4692 BT_DBG("chan %p", chan);
4693
4694 if (chan->local_amp_id == AMP_ID_BREDR) {
4695 if (chan->chan_policy != BT_CHANNEL_POLICY_AMP_PREFERRED)
4696 return;
4697 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4698 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4699 /* Placeholder - start physical link setup */
4700 } else {
4701 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4702 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4703 chan->move_id = 0;
4704 l2cap_move_setup(chan);
4705 l2cap_send_move_chan_req(chan, 0);
4706 }
4707}
4708
4709static void l2cap_do_create(struct l2cap_chan *chan, int result,
4710 u8 local_amp_id, u8 remote_amp_id)
4711{
4712 BT_DBG("chan %p state %s %u -> %u", chan, state_to_string(chan->state),
4713 local_amp_id, remote_amp_id);
4714
4715 chan->fcs = L2CAP_FCS_NONE;
4716
4717 /* Outgoing channel on AMP */
4718 if (chan->state == BT_CONNECT) {
4719 if (result == L2CAP_CR_SUCCESS) {
4720 chan->local_amp_id = local_amp_id;
4721 l2cap_send_create_chan_req(chan, remote_amp_id);
4722 } else {
4723 /* Revert to BR/EDR connect */
4724 l2cap_send_conn_req(chan);
4725 }
4726
4727 return;
4728 }
4729
4730 /* Incoming channel on AMP */
4731 if (__l2cap_no_conn_pending(chan)) {
4732 struct l2cap_conn_rsp rsp;
4733 char buf[128];
4734 rsp.scid = cpu_to_le16(chan->dcid);
4735 rsp.dcid = cpu_to_le16(chan->scid);
4736
4737 if (result == L2CAP_CR_SUCCESS) {
4738 /* Send successful response */
4739 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
4740 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4741 } else {
4742 /* Send negative response */
4743 rsp.result = cpu_to_le16(L2CAP_CR_NO_MEM);
4744 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4745 }
4746
4747 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_RSP,
4748 sizeof(rsp), &rsp);
4749
4750 if (result == L2CAP_CR_SUCCESS) {
4751 l2cap_state_change(chan, BT_CONFIG);
4752 set_bit(CONF_REQ_SENT, &chan->conf_state);
4753 l2cap_send_cmd(chan->conn, l2cap_get_ident(chan->conn),
4754 L2CAP_CONF_REQ,
4755 l2cap_build_conf_req(chan, buf), buf);
4756 chan->num_conf_req++;
4757 }
4758 }
4759}
4760
4761static void l2cap_do_move_initiate(struct l2cap_chan *chan, u8 local_amp_id,
4762 u8 remote_amp_id)
4763{
4764 l2cap_move_setup(chan);
4765 chan->move_id = local_amp_id;
4766 chan->move_state = L2CAP_MOVE_WAIT_RSP;
4767
4768 l2cap_send_move_chan_req(chan, remote_amp_id);
4769}
4770
4771static void l2cap_do_move_respond(struct l2cap_chan *chan, int result)
4772{
4773 struct hci_chan *hchan = NULL;
4774
4775 /* Placeholder - get hci_chan for logical link */
4776
4777 if (hchan) {
4778 if (hchan->state == BT_CONNECTED) {
4779 /* Logical link is ready to go */
4780 chan->hs_hcon = hchan->conn;
4781 chan->hs_hcon->l2cap_data = chan->conn;
4782 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4783 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4784
4785 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
4786 } else {
4787 /* Wait for logical link to be ready */
4788 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4789 }
4790 } else {
4791 /* Logical link not available */
4792 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_ALLOWED);
4793 }
4794}
4795
4796static void l2cap_do_move_cancel(struct l2cap_chan *chan, int result)
4797{
4798 if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4799 u8 rsp_result;
4800 if (result == -EINVAL)
4801 rsp_result = L2CAP_MR_BAD_ID;
4802 else
4803 rsp_result = L2CAP_MR_NOT_ALLOWED;
4804
4805 l2cap_send_move_chan_rsp(chan, rsp_result);
4806 }
4807
4808 chan->move_role = L2CAP_MOVE_ROLE_NONE;
4809 chan->move_state = L2CAP_MOVE_STABLE;
4810
4811 /* Restart data transmission */
4812 l2cap_ertm_send(chan);
4813}
4814
4815/* Invoke with locked chan */
4816void __l2cap_physical_cfm(struct l2cap_chan *chan, int result)
4817{
4818 u8 local_amp_id = chan->local_amp_id;
4819 u8 remote_amp_id = chan->remote_amp_id;
4820
4821 BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d",
4822 chan, result, local_amp_id, remote_amp_id);
4823
4824 if (chan->state == BT_DISCONN || chan->state == BT_CLOSED) {
4825 l2cap_chan_unlock(chan);
4826 return;
4827 }
4828
4829 if (chan->state != BT_CONNECTED) {
4830 l2cap_do_create(chan, result, local_amp_id, remote_amp_id);
4831 } else if (result != L2CAP_MR_SUCCESS) {
4832 l2cap_do_move_cancel(chan, result);
4833 } else {
4834 switch (chan->move_role) {
4835 case L2CAP_MOVE_ROLE_INITIATOR:
4836 l2cap_do_move_initiate(chan, local_amp_id,
4837 remote_amp_id);
4838 break;
4839 case L2CAP_MOVE_ROLE_RESPONDER:
4840 l2cap_do_move_respond(chan, result);
4841 break;
4842 default:
4843 l2cap_do_move_cancel(chan, result);
4844 break;
4845 }
4846 }
4847}
4848
4849static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
4850 struct l2cap_cmd_hdr *cmd,
4851 u16 cmd_len, void *data)
4852{
4853 struct l2cap_move_chan_req *req = data;
4854 struct l2cap_move_chan_rsp rsp;
4855 struct l2cap_chan *chan;
4856 u16 icid = 0;
4857 u16 result = L2CAP_MR_NOT_ALLOWED;
4858
4859 if (cmd_len != sizeof(*req))
4860 return -EPROTO;
4861
4862 icid = le16_to_cpu(req->icid);
4863
4864 BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id);
4865
4866 if (!conn->hs_enabled)
4867 return -EINVAL;
4868
4869 chan = l2cap_get_chan_by_dcid(conn, icid);
4870 if (!chan) {
4871 rsp.icid = cpu_to_le16(icid);
4872 rsp.result = cpu_to_le16(L2CAP_MR_NOT_ALLOWED);
4873 l2cap_send_cmd(conn, cmd->ident, L2CAP_MOVE_CHAN_RSP,
4874 sizeof(rsp), &rsp);
4875 return 0;
4876 }
4877
4878 chan->ident = cmd->ident;
4879
4880 if (chan->scid < L2CAP_CID_DYN_START ||
4881 chan->chan_policy == BT_CHANNEL_POLICY_BREDR_ONLY ||
4882 (chan->mode != L2CAP_MODE_ERTM &&
4883 chan->mode != L2CAP_MODE_STREAMING)) {
4884 result = L2CAP_MR_NOT_ALLOWED;
4885 goto send_move_response;
4886 }
4887
4888 if (chan->local_amp_id == req->dest_amp_id) {
4889 result = L2CAP_MR_SAME_ID;
4890 goto send_move_response;
4891 }
4892
4893 if (req->dest_amp_id != AMP_ID_BREDR) {
4894 struct hci_dev *hdev;
4895 hdev = hci_dev_get(req->dest_amp_id);
4896 if (!hdev || hdev->dev_type != HCI_AMP ||
4897 !test_bit(HCI_UP, &hdev->flags)) {
4898 if (hdev)
4899 hci_dev_put(hdev);
4900
4901 result = L2CAP_MR_BAD_ID;
4902 goto send_move_response;
4903 }
4904 hci_dev_put(hdev);
4905 }
4906
4907 /* Detect a move collision. Only send a collision response
4908 * if this side has "lost", otherwise proceed with the move.
4909 * The winner has the larger bd_addr.
4910 */
4911 if ((__chan_is_moving(chan) ||
4912 chan->move_role != L2CAP_MOVE_ROLE_NONE) &&
4913 bacmp(&conn->hcon->src, &conn->hcon->dst) > 0) {
4914 result = L2CAP_MR_COLLISION;
4915 goto send_move_response;
4916 }
4917
4918 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
4919 l2cap_move_setup(chan);
4920 chan->move_id = req->dest_amp_id;
4921 icid = chan->dcid;
4922
4923 if (req->dest_amp_id == AMP_ID_BREDR) {
4924 /* Moving to BR/EDR */
4925 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4926 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4927 result = L2CAP_MR_PEND;
4928 } else {
4929 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4930 result = L2CAP_MR_SUCCESS;
4931 }
4932 } else {
4933 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4934 /* Placeholder - uncomment when amp functions are available */
4935 /*amp_accept_physical(chan, req->dest_amp_id);*/
4936 result = L2CAP_MR_PEND;
4937 }
4938
4939send_move_response:
4940 l2cap_send_move_chan_rsp(chan, result);
4941
4942 l2cap_chan_unlock(chan);
4943
4944 return 0;
4945}
4946
4947static void l2cap_move_continue(struct l2cap_conn *conn, u16 icid, u16 result)
4948{
4949 struct l2cap_chan *chan;
4950 struct hci_chan *hchan = NULL;
4951
4952 chan = l2cap_get_chan_by_scid(conn, icid);
4953 if (!chan) {
4954 l2cap_send_move_chan_cfm_icid(conn, icid);
4955 return;
4956 }
4957
4958 __clear_chan_timer(chan);
4959 if (result == L2CAP_MR_PEND)
4960 __set_chan_timer(chan, L2CAP_MOVE_ERTX_TIMEOUT);
4961
4962 switch (chan->move_state) {
4963 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
4964 /* Move confirm will be sent when logical link
4965 * is complete.
4966 */
4967 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4968 break;
4969 case L2CAP_MOVE_WAIT_RSP_SUCCESS:
4970 if (result == L2CAP_MR_PEND) {
4971 break;
4972 } else if (test_bit(CONN_LOCAL_BUSY,
4973 &chan->conn_state)) {
4974 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4975 } else {
4976 /* Logical link is up or moving to BR/EDR,
4977 * proceed with move
4978 */
4979 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
4980 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4981 }
4982 break;
4983 case L2CAP_MOVE_WAIT_RSP:
4984 /* Moving to AMP */
4985 if (result == L2CAP_MR_SUCCESS) {
4986 /* Remote is ready, send confirm immediately
4987 * after logical link is ready
4988 */
4989 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4990 } else {
4991 /* Both logical link and move success
4992 * are required to confirm
4993 */
4994 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_COMP;
4995 }
4996
4997 /* Placeholder - get hci_chan for logical link */
4998 if (!hchan) {
4999 /* Logical link not available */
5000 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5001 break;
5002 }
5003
5004 /* If the logical link is not yet connected, do not
5005 * send confirmation.
5006 */
5007 if (hchan->state != BT_CONNECTED)
5008 break;
5009
5010 /* Logical link is already ready to go */
5011
5012 chan->hs_hcon = hchan->conn;
5013 chan->hs_hcon->l2cap_data = chan->conn;
5014
5015 if (result == L2CAP_MR_SUCCESS) {
5016 /* Can confirm now */
5017 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5018 } else {
5019 /* Now only need move success
5020 * to confirm
5021 */
5022 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
5023 }
5024
5025 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
5026 break;
5027 default:
5028 /* Any other amp move state means the move failed. */
5029 chan->move_id = chan->local_amp_id;
5030 l2cap_move_done(chan);
5031 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5032 }
5033
5034 l2cap_chan_unlock(chan);
5035}
5036
5037static void l2cap_move_fail(struct l2cap_conn *conn, u8 ident, u16 icid,
5038 u16 result)
5039{
5040 struct l2cap_chan *chan;
5041
5042 chan = l2cap_get_chan_by_ident(conn, ident);
5043 if (!chan) {
5044 /* Could not locate channel, icid is best guess */
5045 l2cap_send_move_chan_cfm_icid(conn, icid);
5046 return;
5047 }
5048
5049 __clear_chan_timer(chan);
5050
5051 if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
5052 if (result == L2CAP_MR_COLLISION) {
5053 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5054 } else {
5055 /* Cleanup - cancel move */
5056 chan->move_id = chan->local_amp_id;
5057 l2cap_move_done(chan);
5058 }
5059 }
5060
5061 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5062
5063 l2cap_chan_unlock(chan);
5064}
5065
5066static int l2cap_move_channel_rsp(struct l2cap_conn *conn,
5067 struct l2cap_cmd_hdr *cmd,
5068 u16 cmd_len, void *data)
5069{
5070 struct l2cap_move_chan_rsp *rsp = data;
5071 u16 icid, result;
5072
5073 if (cmd_len != sizeof(*rsp))
5074 return -EPROTO;
5075
5076 icid = le16_to_cpu(rsp->icid);
5077 result = le16_to_cpu(rsp->result);
5078
5079 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5080
5081 if (result == L2CAP_MR_SUCCESS || result == L2CAP_MR_PEND)
5082 l2cap_move_continue(conn, icid, result);
5083 else
5084 l2cap_move_fail(conn, cmd->ident, icid, result);
5085
5086 return 0;
5087}
5088
5089static int l2cap_move_channel_confirm(struct l2cap_conn *conn,
5090 struct l2cap_cmd_hdr *cmd,
5091 u16 cmd_len, void *data)
5092{
5093 struct l2cap_move_chan_cfm *cfm = data;
5094 struct l2cap_chan *chan;
5095 u16 icid, result;
5096
5097 if (cmd_len != sizeof(*cfm))
5098 return -EPROTO;
5099
5100 icid = le16_to_cpu(cfm->icid);
5101 result = le16_to_cpu(cfm->result);
5102
5103 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5104
5105 chan = l2cap_get_chan_by_dcid(conn, icid);
5106 if (!chan) {
5107 /* Spec requires a response even if the icid was not found */
5108 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5109 return 0;
5110 }
5111
5112 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM) {
5113 if (result == L2CAP_MC_CONFIRMED) {
5114 chan->local_amp_id = chan->move_id;
5115 if (chan->local_amp_id == AMP_ID_BREDR)
5116 __release_logical_link(chan);
5117 } else {
5118 chan->move_id = chan->local_amp_id;
5119 }
5120
5121 l2cap_move_done(chan);
5122 }
5123
5124 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5125
5126 l2cap_chan_unlock(chan);
5127
5128 return 0;
5129}
5130
5131static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
5132 struct l2cap_cmd_hdr *cmd,
5133 u16 cmd_len, void *data)
5134{
5135 struct l2cap_move_chan_cfm_rsp *rsp = data;
5136 struct l2cap_chan *chan;
5137 u16 icid;
5138
5139 if (cmd_len != sizeof(*rsp))
5140 return -EPROTO;
5141
5142 icid = le16_to_cpu(rsp->icid);
5143
5144 BT_DBG("icid 0x%4.4x", icid);
5145
5146 chan = l2cap_get_chan_by_scid(conn, icid);
5147 if (!chan)
5148 return 0;
5149
5150 __clear_chan_timer(chan);
5151
5152 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM_RSP) {
5153 chan->local_amp_id = chan->move_id;
5154
5155 if (chan->local_amp_id == AMP_ID_BREDR && chan->hs_hchan)
5156 __release_logical_link(chan);
5157
5158 l2cap_move_done(chan);
5159 }
5160
5161 l2cap_chan_unlock(chan);
5162
5163 return 0;
5164}
5165
5166static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
5167 struct l2cap_cmd_hdr *cmd,
5168 u16 cmd_len, u8 *data)
5169{
5170 struct hci_conn *hcon = conn->hcon;
5171 struct l2cap_conn_param_update_req *req;
5172 struct l2cap_conn_param_update_rsp rsp;
5173 u16 min, max, latency, to_multiplier;
5174 int err;
5175
5176 if (hcon->role != HCI_ROLE_MASTER)
5177 return -EINVAL;
5178
5179 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
5180 return -EPROTO;
5181
5182 req = (struct l2cap_conn_param_update_req *) data;
5183 min = __le16_to_cpu(req->min);
5184 max = __le16_to_cpu(req->max);
5185 latency = __le16_to_cpu(req->latency);
5186 to_multiplier = __le16_to_cpu(req->to_multiplier);
5187
5188 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
5189 min, max, latency, to_multiplier);
5190
5191 memset(&rsp, 0, sizeof(rsp));
5192
5193 err = hci_check_conn_params(min, max, latency, to_multiplier);
5194 if (err)
5195 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
5196 else
5197 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
5198
5199 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
5200 sizeof(rsp), &rsp);
5201
5202 if (!err) {
5203 u8 store_hint;
5204
5205 store_hint = hci_le_conn_update(hcon, min, max, latency,
5206 to_multiplier);
5207 mgmt_new_conn_param(hcon->hdev, &hcon->dst, hcon->dst_type,
5208 store_hint, min, max, latency,
5209 to_multiplier);
5210
5211 }
5212
5213 return 0;
5214}
5215
5216static int l2cap_le_connect_rsp(struct l2cap_conn *conn,
5217 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5218 u8 *data)
5219{
5220 struct l2cap_le_conn_rsp *rsp = (struct l2cap_le_conn_rsp *) data;
5221 u16 dcid, mtu, mps, credits, result;
5222 struct l2cap_chan *chan;
5223 int err;
5224
5225 if (cmd_len < sizeof(*rsp))
5226 return -EPROTO;
5227
5228 dcid = __le16_to_cpu(rsp->dcid);
5229 mtu = __le16_to_cpu(rsp->mtu);
5230 mps = __le16_to_cpu(rsp->mps);
5231 credits = __le16_to_cpu(rsp->credits);
5232 result = __le16_to_cpu(rsp->result);
5233
5234 if (result == L2CAP_CR_SUCCESS && (mtu < 23 || mps < 23))
5235 return -EPROTO;
5236
5237 BT_DBG("dcid 0x%4.4x mtu %u mps %u credits %u result 0x%2.2x",
5238 dcid, mtu, mps, credits, result);
5239
5240 mutex_lock(&conn->chan_lock);
5241
5242 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5243 if (!chan) {
5244 err = -EBADSLT;
5245 goto unlock;
5246 }
5247
5248 err = 0;
5249
5250 l2cap_chan_lock(chan);
5251
5252 switch (result) {
5253 case L2CAP_CR_SUCCESS:
5254 chan->ident = 0;
5255 chan->dcid = dcid;
5256 chan->omtu = mtu;
5257 chan->remote_mps = mps;
5258 chan->tx_credits = credits;
5259 l2cap_chan_ready(chan);
5260 break;
5261
5262 default:
5263 l2cap_chan_del(chan, ECONNREFUSED);
5264 break;
5265 }
5266
5267 l2cap_chan_unlock(chan);
5268
5269unlock:
5270 mutex_unlock(&conn->chan_lock);
5271
5272 return err;
5273}
5274
5275static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
5276 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5277 u8 *data)
5278{
5279 int err = 0;
5280
5281 switch (cmd->code) {
5282 case L2CAP_COMMAND_REJ:
5283 l2cap_command_rej(conn, cmd, cmd_len, data);
5284 break;
5285
5286 case L2CAP_CONN_REQ:
5287 err = l2cap_connect_req(conn, cmd, cmd_len, data);
5288 break;
5289
5290 case L2CAP_CONN_RSP:
5291 case L2CAP_CREATE_CHAN_RSP:
5292 l2cap_connect_create_rsp(conn, cmd, cmd_len, data);
5293 break;
5294
5295 case L2CAP_CONF_REQ:
5296 err = l2cap_config_req(conn, cmd, cmd_len, data);
5297 break;
5298
5299 case L2CAP_CONF_RSP:
5300 l2cap_config_rsp(conn, cmd, cmd_len, data);
5301 break;
5302
5303 case L2CAP_DISCONN_REQ:
5304 err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5305 break;
5306
5307 case L2CAP_DISCONN_RSP:
5308 l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5309 break;
5310
5311 case L2CAP_ECHO_REQ:
5312 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
5313 break;
5314
5315 case L2CAP_ECHO_RSP:
5316 break;
5317
5318 case L2CAP_INFO_REQ:
5319 err = l2cap_information_req(conn, cmd, cmd_len, data);
5320 break;
5321
5322 case L2CAP_INFO_RSP:
5323 l2cap_information_rsp(conn, cmd, cmd_len, data);
5324 break;
5325
5326 case L2CAP_CREATE_CHAN_REQ:
5327 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
5328 break;
5329
5330 case L2CAP_MOVE_CHAN_REQ:
5331 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
5332 break;
5333
5334 case L2CAP_MOVE_CHAN_RSP:
5335 l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
5336 break;
5337
5338 case L2CAP_MOVE_CHAN_CFM:
5339 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
5340 break;
5341
5342 case L2CAP_MOVE_CHAN_CFM_RSP:
5343 l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
5344 break;
5345
5346 default:
5347 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
5348 err = -EINVAL;
5349 break;
5350 }
5351
5352 return err;
5353}
5354
5355static int l2cap_le_connect_req(struct l2cap_conn *conn,
5356 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5357 u8 *data)
5358{
5359 struct l2cap_le_conn_req *req = (struct l2cap_le_conn_req *) data;
5360 struct l2cap_le_conn_rsp rsp;
5361 struct l2cap_chan *chan, *pchan;
5362 u16 dcid, scid, credits, mtu, mps;
5363 __le16 psm;
5364 u8 result;
5365
5366 if (cmd_len != sizeof(*req))
5367 return -EPROTO;
5368
5369 scid = __le16_to_cpu(req->scid);
5370 mtu = __le16_to_cpu(req->mtu);
5371 mps = __le16_to_cpu(req->mps);
5372 psm = req->psm;
5373 dcid = 0;
5374 credits = 0;
5375
5376 if (mtu < 23 || mps < 23)
5377 return -EPROTO;
5378
5379 BT_DBG("psm 0x%2.2x scid 0x%4.4x mtu %u mps %u", __le16_to_cpu(psm),
5380 scid, mtu, mps);
5381
5382 /* Check if we have socket listening on psm */
5383 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
5384 &conn->hcon->dst, LE_LINK);
5385 if (!pchan) {
5386 result = L2CAP_CR_BAD_PSM;
5387 chan = NULL;
5388 goto response;
5389 }
5390
5391 mutex_lock(&conn->chan_lock);
5392 l2cap_chan_lock(pchan);
5393
5394 if (!smp_sufficient_security(conn->hcon, pchan->sec_level,
5395 SMP_ALLOW_STK)) {
5396 result = L2CAP_CR_AUTHENTICATION;
5397 chan = NULL;
5398 goto response_unlock;
5399 }
5400
5401 /* Check if we already have channel with that dcid */
5402 if (__l2cap_get_chan_by_dcid(conn, scid)) {
5403 result = L2CAP_CR_NO_MEM;
5404 chan = NULL;
5405 goto response_unlock;
5406 }
5407
5408 chan = pchan->ops->new_connection(pchan);
5409 if (!chan) {
5410 result = L2CAP_CR_NO_MEM;
5411 goto response_unlock;
5412 }
5413
5414 l2cap_le_flowctl_init(chan);
5415
5416 bacpy(&chan->src, &conn->hcon->src);
5417 bacpy(&chan->dst, &conn->hcon->dst);
5418 chan->src_type = bdaddr_type(conn->hcon, conn->hcon->src_type);
5419 chan->dst_type = bdaddr_type(conn->hcon, conn->hcon->dst_type);
5420 chan->psm = psm;
5421 chan->dcid = scid;
5422 chan->omtu = mtu;
5423 chan->remote_mps = mps;
5424 chan->tx_credits = __le16_to_cpu(req->credits);
5425
5426 __l2cap_chan_add(conn, chan);
5427 dcid = chan->scid;
5428 credits = chan->rx_credits;
5429
5430 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
5431
5432 chan->ident = cmd->ident;
5433
5434 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
5435 l2cap_state_change(chan, BT_CONNECT2);
5436 /* The following result value is actually not defined
5437 * for LE CoC but we use it to let the function know
5438 * that it should bail out after doing its cleanup
5439 * instead of sending a response.
5440 */
5441 result = L2CAP_CR_PEND;
5442 chan->ops->defer(chan);
5443 } else {
5444 l2cap_chan_ready(chan);
5445 result = L2CAP_CR_SUCCESS;
5446 }
5447
5448response_unlock:
5449 l2cap_chan_unlock(pchan);
5450 mutex_unlock(&conn->chan_lock);
5451 l2cap_chan_put(pchan);
5452
5453 if (result == L2CAP_CR_PEND)
5454 return 0;
5455
5456response:
5457 if (chan) {
5458 rsp.mtu = cpu_to_le16(chan->imtu);
5459 rsp.mps = cpu_to_le16(chan->mps);
5460 } else {
5461 rsp.mtu = 0;
5462 rsp.mps = 0;
5463 }
5464
5465 rsp.dcid = cpu_to_le16(dcid);
5466 rsp.credits = cpu_to_le16(credits);
5467 rsp.result = cpu_to_le16(result);
5468
5469 l2cap_send_cmd(conn, cmd->ident, L2CAP_LE_CONN_RSP, sizeof(rsp), &rsp);
5470
5471 return 0;
5472}
5473
5474static inline int l2cap_le_credits(struct l2cap_conn *conn,
5475 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5476 u8 *data)
5477{
5478 struct l2cap_le_credits *pkt;
5479 struct l2cap_chan *chan;
5480 u16 cid, credits, max_credits;
5481
5482 if (cmd_len != sizeof(*pkt))
5483 return -EPROTO;
5484
5485 pkt = (struct l2cap_le_credits *) data;
5486 cid = __le16_to_cpu(pkt->cid);
5487 credits = __le16_to_cpu(pkt->credits);
5488
5489 BT_DBG("cid 0x%4.4x credits 0x%4.4x", cid, credits);
5490
5491 chan = l2cap_get_chan_by_dcid(conn, cid);
5492 if (!chan)
5493 return -EBADSLT;
5494
5495 max_credits = LE_FLOWCTL_MAX_CREDITS - chan->tx_credits;
5496 if (credits > max_credits) {
5497 BT_ERR("LE credits overflow");
5498 l2cap_send_disconn_req(chan, ECONNRESET);
5499 l2cap_chan_unlock(chan);
5500
5501 /* Return 0 so that we don't trigger an unnecessary
5502 * command reject packet.
5503 */
5504 return 0;
5505 }
5506
5507 chan->tx_credits += credits;
5508
5509 while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
5510 l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
5511 chan->tx_credits--;
5512 }
5513
5514 if (chan->tx_credits)
5515 chan->ops->resume(chan);
5516
5517 l2cap_chan_unlock(chan);
5518
5519 return 0;
5520}
5521
5522static inline int l2cap_le_command_rej(struct l2cap_conn *conn,
5523 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5524 u8 *data)
5525{
5526 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
5527 struct l2cap_chan *chan;
5528
5529 if (cmd_len < sizeof(*rej))
5530 return -EPROTO;
5531
5532 mutex_lock(&conn->chan_lock);
5533
5534 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5535 if (!chan)
5536 goto done;
5537
5538 l2cap_chan_lock(chan);
5539 l2cap_chan_del(chan, ECONNREFUSED);
5540 l2cap_chan_unlock(chan);
5541
5542done:
5543 mutex_unlock(&conn->chan_lock);
5544 return 0;
5545}
5546
5547static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
5548 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5549 u8 *data)
5550{
5551 int err = 0;
5552
5553 switch (cmd->code) {
5554 case L2CAP_COMMAND_REJ:
5555 l2cap_le_command_rej(conn, cmd, cmd_len, data);
5556 break;
5557
5558 case L2CAP_CONN_PARAM_UPDATE_REQ:
5559 err = l2cap_conn_param_update_req(conn, cmd, cmd_len, data);
5560 break;
5561
5562 case L2CAP_CONN_PARAM_UPDATE_RSP:
5563 break;
5564
5565 case L2CAP_LE_CONN_RSP:
5566 l2cap_le_connect_rsp(conn, cmd, cmd_len, data);
5567 break;
5568
5569 case L2CAP_LE_CONN_REQ:
5570 err = l2cap_le_connect_req(conn, cmd, cmd_len, data);
5571 break;
5572
5573 case L2CAP_LE_CREDITS:
5574 err = l2cap_le_credits(conn, cmd, cmd_len, data);
5575 break;
5576
5577 case L2CAP_DISCONN_REQ:
5578 err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5579 break;
5580
5581 case L2CAP_DISCONN_RSP:
5582 l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5583 break;
5584
5585 default:
5586 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
5587 err = -EINVAL;
5588 break;
5589 }
5590
5591 return err;
5592}
5593
5594static inline void l2cap_le_sig_channel(struct l2cap_conn *conn,
5595 struct sk_buff *skb)
5596{
5597 struct hci_conn *hcon = conn->hcon;
5598 struct l2cap_cmd_hdr *cmd;
5599 u16 len;
5600 int err;
5601
5602 if (hcon->type != LE_LINK)
5603 goto drop;
5604
5605 if (skb->len < L2CAP_CMD_HDR_SIZE)
5606 goto drop;
5607
5608 cmd = (void *) skb->data;
5609 skb_pull(skb, L2CAP_CMD_HDR_SIZE);
5610
5611 len = le16_to_cpu(cmd->len);
5612
5613 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len, cmd->ident);
5614
5615 if (len != skb->len || !cmd->ident) {
5616 BT_DBG("corrupted command");
5617 goto drop;
5618 }
5619
5620 err = l2cap_le_sig_cmd(conn, cmd, len, skb->data);
5621 if (err) {
5622 struct l2cap_cmd_rej_unk rej;
5623
5624 BT_ERR("Wrong link type (%d)", err);
5625
5626 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5627 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
5628 sizeof(rej), &rej);
5629 }
5630
5631drop:
5632 kfree_skb(skb);
5633}
5634
5635static inline void l2cap_sig_channel(struct l2cap_conn *conn,
5636 struct sk_buff *skb)
5637{
5638 struct hci_conn *hcon = conn->hcon;
5639 u8 *data = skb->data;
5640 int len = skb->len;
5641 struct l2cap_cmd_hdr cmd;
5642 int err;
5643
5644 l2cap_raw_recv(conn, skb);
5645
5646 if (hcon->type != ACL_LINK)
5647 goto drop;
5648
5649 while (len >= L2CAP_CMD_HDR_SIZE) {
5650 u16 cmd_len;
5651 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
5652 data += L2CAP_CMD_HDR_SIZE;
5653 len -= L2CAP_CMD_HDR_SIZE;
5654
5655 cmd_len = le16_to_cpu(cmd.len);
5656
5657 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len,
5658 cmd.ident);
5659
5660 if (cmd_len > len || !cmd.ident) {
5661 BT_DBG("corrupted command");
5662 break;
5663 }
5664
5665 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
5666 if (err) {
5667 struct l2cap_cmd_rej_unk rej;
5668
5669 BT_ERR("Wrong link type (%d)", err);
5670
5671 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5672 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ,
5673 sizeof(rej), &rej);
5674 }
5675
5676 data += cmd_len;
5677 len -= cmd_len;
5678 }
5679
5680drop:
5681 kfree_skb(skb);
5682}
5683
5684static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
5685{
5686 u16 our_fcs, rcv_fcs;
5687 int hdr_size;
5688
5689 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
5690 hdr_size = L2CAP_EXT_HDR_SIZE;
5691 else
5692 hdr_size = L2CAP_ENH_HDR_SIZE;
5693
5694 if (chan->fcs == L2CAP_FCS_CRC16) {
5695 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
5696 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
5697 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
5698
5699 if (our_fcs != rcv_fcs)
5700 return -EBADMSG;
5701 }
5702 return 0;
5703}
5704
5705static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
5706{
5707 struct l2cap_ctrl control;
5708
5709 BT_DBG("chan %p", chan);
5710
5711 memset(&control, 0, sizeof(control));
5712 control.sframe = 1;
5713 control.final = 1;
5714 control.reqseq = chan->buffer_seq;
5715 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5716
5717 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5718 control.super = L2CAP_SUPER_RNR;
5719 l2cap_send_sframe(chan, &control);
5720 }
5721
5722 if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
5723 chan->unacked_frames > 0)
5724 __set_retrans_timer(chan);
5725
5726 /* Send pending iframes */
5727 l2cap_ertm_send(chan);
5728
5729 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
5730 test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
5731 /* F-bit wasn't sent in an s-frame or i-frame yet, so
5732 * send it now.
5733 */
5734 control.super = L2CAP_SUPER_RR;
5735 l2cap_send_sframe(chan, &control);
5736 }
5737}
5738
5739static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag,
5740 struct sk_buff **last_frag)
5741{
5742 /* skb->len reflects data in skb as well as all fragments
5743 * skb->data_len reflects only data in fragments
5744 */
5745 if (!skb_has_frag_list(skb))
5746 skb_shinfo(skb)->frag_list = new_frag;
5747
5748 new_frag->next = NULL;
5749
5750 (*last_frag)->next = new_frag;
5751 *last_frag = new_frag;
5752
5753 skb->len += new_frag->len;
5754 skb->data_len += new_frag->len;
5755 skb->truesize += new_frag->truesize;
5756}
5757
5758static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
5759 struct l2cap_ctrl *control)
5760{
5761 int err = -EINVAL;
5762
5763 switch (control->sar) {
5764 case L2CAP_SAR_UNSEGMENTED:
5765 if (chan->sdu)
5766 break;
5767
5768 err = chan->ops->recv(chan, skb);
5769 break;
5770
5771 case L2CAP_SAR_START:
5772 if (chan->sdu)
5773 break;
5774
5775 chan->sdu_len = get_unaligned_le16(skb->data);
5776 skb_pull(skb, L2CAP_SDULEN_SIZE);
5777
5778 if (chan->sdu_len > chan->imtu) {
5779 err = -EMSGSIZE;
5780 break;
5781 }
5782
5783 if (skb->len >= chan->sdu_len)
5784 break;
5785
5786 chan->sdu = skb;
5787 chan->sdu_last_frag = skb;
5788
5789 skb = NULL;
5790 err = 0;
5791 break;
5792
5793 case L2CAP_SAR_CONTINUE:
5794 if (!chan->sdu)
5795 break;
5796
5797 append_skb_frag(chan->sdu, skb,
5798 &chan->sdu_last_frag);
5799 skb = NULL;
5800
5801 if (chan->sdu->len >= chan->sdu_len)
5802 break;
5803
5804 err = 0;
5805 break;
5806
5807 case L2CAP_SAR_END:
5808 if (!chan->sdu)
5809 break;
5810
5811 append_skb_frag(chan->sdu, skb,
5812 &chan->sdu_last_frag);
5813 skb = NULL;
5814
5815 if (chan->sdu->len != chan->sdu_len)
5816 break;
5817
5818 err = chan->ops->recv(chan, chan->sdu);
5819
5820 if (!err) {
5821 /* Reassembly complete */
5822 chan->sdu = NULL;
5823 chan->sdu_last_frag = NULL;
5824 chan->sdu_len = 0;
5825 }
5826 break;
5827 }
5828
5829 if (err) {
5830 kfree_skb(skb);
5831 kfree_skb(chan->sdu);
5832 chan->sdu = NULL;
5833 chan->sdu_last_frag = NULL;
5834 chan->sdu_len = 0;
5835 }
5836
5837 return err;
5838}
5839
5840static int l2cap_resegment(struct l2cap_chan *chan)
5841{
5842 /* Placeholder */
5843 return 0;
5844}
5845
5846void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
5847{
5848 u8 event;
5849
5850 if (chan->mode != L2CAP_MODE_ERTM)
5851 return;
5852
5853 event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
5854 l2cap_tx(chan, NULL, NULL, event);
5855}
5856
5857static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
5858{
5859 int err = 0;
5860 /* Pass sequential frames to l2cap_reassemble_sdu()
5861 * until a gap is encountered.
5862 */
5863
5864 BT_DBG("chan %p", chan);
5865
5866 while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5867 struct sk_buff *skb;
5868 BT_DBG("Searching for skb with txseq %d (queue len %d)",
5869 chan->buffer_seq, skb_queue_len(&chan->srej_q));
5870
5871 skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
5872
5873 if (!skb)
5874 break;
5875
5876 skb_unlink(skb, &chan->srej_q);
5877 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
5878 err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->control);
5879 if (err)
5880 break;
5881 }
5882
5883 if (skb_queue_empty(&chan->srej_q)) {
5884 chan->rx_state = L2CAP_RX_STATE_RECV;
5885 l2cap_send_ack(chan);
5886 }
5887
5888 return err;
5889}
5890
5891static void l2cap_handle_srej(struct l2cap_chan *chan,
5892 struct l2cap_ctrl *control)
5893{
5894 struct sk_buff *skb;
5895
5896 BT_DBG("chan %p, control %p", chan, control);
5897
5898 if (control->reqseq == chan->next_tx_seq) {
5899 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
5900 l2cap_send_disconn_req(chan, ECONNRESET);
5901 return;
5902 }
5903
5904 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
5905
5906 if (skb == NULL) {
5907 BT_DBG("Seq %d not available for retransmission",
5908 control->reqseq);
5909 return;
5910 }
5911
5912 if (chan->max_tx != 0 && bt_cb(skb)->control.retries >= chan->max_tx) {
5913 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
5914 l2cap_send_disconn_req(chan, ECONNRESET);
5915 return;
5916 }
5917
5918 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5919
5920 if (control->poll) {
5921 l2cap_pass_to_tx(chan, control);
5922
5923 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5924 l2cap_retransmit(chan, control);
5925 l2cap_ertm_send(chan);
5926
5927 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5928 set_bit(CONN_SREJ_ACT, &chan->conn_state);
5929 chan->srej_save_reqseq = control->reqseq;
5930 }
5931 } else {
5932 l2cap_pass_to_tx_fbit(chan, control);
5933
5934 if (control->final) {
5935 if (chan->srej_save_reqseq != control->reqseq ||
5936 !test_and_clear_bit(CONN_SREJ_ACT,
5937 &chan->conn_state))
5938 l2cap_retransmit(chan, control);
5939 } else {
5940 l2cap_retransmit(chan, control);
5941 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5942 set_bit(CONN_SREJ_ACT, &chan->conn_state);
5943 chan->srej_save_reqseq = control->reqseq;
5944 }
5945 }
5946 }
5947}
5948
5949static void l2cap_handle_rej(struct l2cap_chan *chan,
5950 struct l2cap_ctrl *control)
5951{
5952 struct sk_buff *skb;
5953
5954 BT_DBG("chan %p, control %p", chan, control);
5955
5956 if (control->reqseq == chan->next_tx_seq) {
5957 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
5958 l2cap_send_disconn_req(chan, ECONNRESET);
5959 return;
5960 }
5961
5962 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
5963
5964 if (chan->max_tx && skb &&
5965 bt_cb(skb)->control.retries >= chan->max_tx) {
5966 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
5967 l2cap_send_disconn_req(chan, ECONNRESET);
5968 return;
5969 }
5970
5971 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5972
5973 l2cap_pass_to_tx(chan, control);
5974
5975 if (control->final) {
5976 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
5977 l2cap_retransmit_all(chan, control);
5978 } else {
5979 l2cap_retransmit_all(chan, control);
5980 l2cap_ertm_send(chan);
5981 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
5982 set_bit(CONN_REJ_ACT, &chan->conn_state);
5983 }
5984}
5985
5986static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
5987{
5988 BT_DBG("chan %p, txseq %d", chan, txseq);
5989
5990 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
5991 chan->expected_tx_seq);
5992
5993 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
5994 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
5995 chan->tx_win) {
5996 /* See notes below regarding "double poll" and
5997 * invalid packets.
5998 */
5999 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6000 BT_DBG("Invalid/Ignore - after SREJ");
6001 return L2CAP_TXSEQ_INVALID_IGNORE;
6002 } else {
6003 BT_DBG("Invalid - in window after SREJ sent");
6004 return L2CAP_TXSEQ_INVALID;
6005 }
6006 }
6007
6008 if (chan->srej_list.head == txseq) {
6009 BT_DBG("Expected SREJ");
6010 return L2CAP_TXSEQ_EXPECTED_SREJ;
6011 }
6012
6013 if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
6014 BT_DBG("Duplicate SREJ - txseq already stored");
6015 return L2CAP_TXSEQ_DUPLICATE_SREJ;
6016 }
6017
6018 if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
6019 BT_DBG("Unexpected SREJ - not requested");
6020 return L2CAP_TXSEQ_UNEXPECTED_SREJ;
6021 }
6022 }
6023
6024 if (chan->expected_tx_seq == txseq) {
6025 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6026 chan->tx_win) {
6027 BT_DBG("Invalid - txseq outside tx window");
6028 return L2CAP_TXSEQ_INVALID;
6029 } else {
6030 BT_DBG("Expected");
6031 return L2CAP_TXSEQ_EXPECTED;
6032 }
6033 }
6034
6035 if (__seq_offset(chan, txseq, chan->last_acked_seq) <
6036 __seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) {
6037 BT_DBG("Duplicate - expected_tx_seq later than txseq");
6038 return L2CAP_TXSEQ_DUPLICATE;
6039 }
6040
6041 if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
6042 /* A source of invalid packets is a "double poll" condition,
6043 * where delays cause us to send multiple poll packets. If
6044 * the remote stack receives and processes both polls,
6045 * sequence numbers can wrap around in such a way that a
6046 * resent frame has a sequence number that looks like new data
6047 * with a sequence gap. This would trigger an erroneous SREJ
6048 * request.
6049 *
6050 * Fortunately, this is impossible with a tx window that's
6051 * less than half of the maximum sequence number, which allows
6052 * invalid frames to be safely ignored.
6053 *
6054 * With tx window sizes greater than half of the tx window
6055 * maximum, the frame is invalid and cannot be ignored. This
6056 * causes a disconnect.
6057 */
6058
6059 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6060 BT_DBG("Invalid/Ignore - txseq outside tx window");
6061 return L2CAP_TXSEQ_INVALID_IGNORE;
6062 } else {
6063 BT_DBG("Invalid - txseq outside tx window");
6064 return L2CAP_TXSEQ_INVALID;
6065 }
6066 } else {
6067 BT_DBG("Unexpected - txseq indicates missing frames");
6068 return L2CAP_TXSEQ_UNEXPECTED;
6069 }
6070}
6071
6072static int l2cap_rx_state_recv(struct l2cap_chan *chan,
6073 struct l2cap_ctrl *control,
6074 struct sk_buff *skb, u8 event)
6075{
6076 int err = 0;
6077 bool skb_in_use = false;
6078
6079 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6080 event);
6081
6082 switch (event) {
6083 case L2CAP_EV_RECV_IFRAME:
6084 switch (l2cap_classify_txseq(chan, control->txseq)) {
6085 case L2CAP_TXSEQ_EXPECTED:
6086 l2cap_pass_to_tx(chan, control);
6087
6088 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6089 BT_DBG("Busy, discarding expected seq %d",
6090 control->txseq);
6091 break;
6092 }
6093
6094 chan->expected_tx_seq = __next_seq(chan,
6095 control->txseq);
6096
6097 chan->buffer_seq = chan->expected_tx_seq;
6098 skb_in_use = true;
6099
6100 err = l2cap_reassemble_sdu(chan, skb, control);
6101 if (err)
6102 break;
6103
6104 if (control->final) {
6105 if (!test_and_clear_bit(CONN_REJ_ACT,
6106 &chan->conn_state)) {
6107 control->final = 0;
6108 l2cap_retransmit_all(chan, control);
6109 l2cap_ertm_send(chan);
6110 }
6111 }
6112
6113 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
6114 l2cap_send_ack(chan);
6115 break;
6116 case L2CAP_TXSEQ_UNEXPECTED:
6117 l2cap_pass_to_tx(chan, control);
6118
6119 /* Can't issue SREJ frames in the local busy state.
6120 * Drop this frame, it will be seen as missing
6121 * when local busy is exited.
6122 */
6123 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6124 BT_DBG("Busy, discarding unexpected seq %d",
6125 control->txseq);
6126 break;
6127 }
6128
6129 /* There was a gap in the sequence, so an SREJ
6130 * must be sent for each missing frame. The
6131 * current frame is stored for later use.
6132 */
6133 skb_queue_tail(&chan->srej_q, skb);
6134 skb_in_use = true;
6135 BT_DBG("Queued %p (queue len %d)", skb,
6136 skb_queue_len(&chan->srej_q));
6137
6138 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
6139 l2cap_seq_list_clear(&chan->srej_list);
6140 l2cap_send_srej(chan, control->txseq);
6141
6142 chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
6143 break;
6144 case L2CAP_TXSEQ_DUPLICATE:
6145 l2cap_pass_to_tx(chan, control);
6146 break;
6147 case L2CAP_TXSEQ_INVALID_IGNORE:
6148 break;
6149 case L2CAP_TXSEQ_INVALID:
6150 default:
6151 l2cap_send_disconn_req(chan, ECONNRESET);
6152 break;
6153 }
6154 break;
6155 case L2CAP_EV_RECV_RR:
6156 l2cap_pass_to_tx(chan, control);
6157 if (control->final) {
6158 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6159
6160 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state) &&
6161 !__chan_is_moving(chan)) {
6162 control->final = 0;
6163 l2cap_retransmit_all(chan, control);
6164 }
6165
6166 l2cap_ertm_send(chan);
6167 } else if (control->poll) {
6168 l2cap_send_i_or_rr_or_rnr(chan);
6169 } else {
6170 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6171 &chan->conn_state) &&
6172 chan->unacked_frames)
6173 __set_retrans_timer(chan);
6174
6175 l2cap_ertm_send(chan);
6176 }
6177 break;
6178 case L2CAP_EV_RECV_RNR:
6179 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6180 l2cap_pass_to_tx(chan, control);
6181 if (control && control->poll) {
6182 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6183 l2cap_send_rr_or_rnr(chan, 0);
6184 }
6185 __clear_retrans_timer(chan);
6186 l2cap_seq_list_clear(&chan->retrans_list);
6187 break;
6188 case L2CAP_EV_RECV_REJ:
6189 l2cap_handle_rej(chan, control);
6190 break;
6191 case L2CAP_EV_RECV_SREJ:
6192 l2cap_handle_srej(chan, control);
6193 break;
6194 default:
6195 break;
6196 }
6197
6198 if (skb && !skb_in_use) {
6199 BT_DBG("Freeing %p", skb);
6200 kfree_skb(skb);
6201 }
6202
6203 return err;
6204}
6205
6206static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
6207 struct l2cap_ctrl *control,
6208 struct sk_buff *skb, u8 event)
6209{
6210 int err = 0;
6211 u16 txseq = control->txseq;
6212 bool skb_in_use = false;
6213
6214 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6215 event);
6216
6217 switch (event) {
6218 case L2CAP_EV_RECV_IFRAME:
6219 switch (l2cap_classify_txseq(chan, txseq)) {
6220 case L2CAP_TXSEQ_EXPECTED:
6221 /* Keep frame for reassembly later */
6222 l2cap_pass_to_tx(chan, control);
6223 skb_queue_tail(&chan->srej_q, skb);
6224 skb_in_use = true;
6225 BT_DBG("Queued %p (queue len %d)", skb,
6226 skb_queue_len(&chan->srej_q));
6227
6228 chan->expected_tx_seq = __next_seq(chan, txseq);
6229 break;
6230 case L2CAP_TXSEQ_EXPECTED_SREJ:
6231 l2cap_seq_list_pop(&chan->srej_list);
6232
6233 l2cap_pass_to_tx(chan, control);
6234 skb_queue_tail(&chan->srej_q, skb);
6235 skb_in_use = true;
6236 BT_DBG("Queued %p (queue len %d)", skb,
6237 skb_queue_len(&chan->srej_q));
6238
6239 err = l2cap_rx_queued_iframes(chan);
6240 if (err)
6241 break;
6242
6243 break;
6244 case L2CAP_TXSEQ_UNEXPECTED:
6245 /* Got a frame that can't be reassembled yet.
6246 * Save it for later, and send SREJs to cover
6247 * the missing frames.
6248 */
6249 skb_queue_tail(&chan->srej_q, skb);
6250 skb_in_use = true;
6251 BT_DBG("Queued %p (queue len %d)", skb,
6252 skb_queue_len(&chan->srej_q));
6253
6254 l2cap_pass_to_tx(chan, control);
6255 l2cap_send_srej(chan, control->txseq);
6256 break;
6257 case L2CAP_TXSEQ_UNEXPECTED_SREJ:
6258 /* This frame was requested with an SREJ, but
6259 * some expected retransmitted frames are
6260 * missing. Request retransmission of missing
6261 * SREJ'd frames.
6262 */
6263 skb_queue_tail(&chan->srej_q, skb);
6264 skb_in_use = true;
6265 BT_DBG("Queued %p (queue len %d)", skb,
6266 skb_queue_len(&chan->srej_q));
6267
6268 l2cap_pass_to_tx(chan, control);
6269 l2cap_send_srej_list(chan, control->txseq);
6270 break;
6271 case L2CAP_TXSEQ_DUPLICATE_SREJ:
6272 /* We've already queued this frame. Drop this copy. */
6273 l2cap_pass_to_tx(chan, control);
6274 break;
6275 case L2CAP_TXSEQ_DUPLICATE:
6276 /* Expecting a later sequence number, so this frame
6277 * was already received. Ignore it completely.
6278 */
6279 break;
6280 case L2CAP_TXSEQ_INVALID_IGNORE:
6281 break;
6282 case L2CAP_TXSEQ_INVALID:
6283 default:
6284 l2cap_send_disconn_req(chan, ECONNRESET);
6285 break;
6286 }
6287 break;
6288 case L2CAP_EV_RECV_RR:
6289 l2cap_pass_to_tx(chan, control);
6290 if (control->final) {
6291 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6292
6293 if (!test_and_clear_bit(CONN_REJ_ACT,
6294 &chan->conn_state)) {
6295 control->final = 0;
6296 l2cap_retransmit_all(chan, control);
6297 }
6298
6299 l2cap_ertm_send(chan);
6300 } else if (control->poll) {
6301 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6302 &chan->conn_state) &&
6303 chan->unacked_frames) {
6304 __set_retrans_timer(chan);
6305 }
6306
6307 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6308 l2cap_send_srej_tail(chan);
6309 } else {
6310 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6311 &chan->conn_state) &&
6312 chan->unacked_frames)
6313 __set_retrans_timer(chan);
6314
6315 l2cap_send_ack(chan);
6316 }
6317 break;
6318 case L2CAP_EV_RECV_RNR:
6319 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6320 l2cap_pass_to_tx(chan, control);
6321 if (control->poll) {
6322 l2cap_send_srej_tail(chan);
6323 } else {
6324 struct l2cap_ctrl rr_control;
6325 memset(&rr_control, 0, sizeof(rr_control));
6326 rr_control.sframe = 1;
6327 rr_control.super = L2CAP_SUPER_RR;
6328 rr_control.reqseq = chan->buffer_seq;
6329 l2cap_send_sframe(chan, &rr_control);
6330 }
6331
6332 break;
6333 case L2CAP_EV_RECV_REJ:
6334 l2cap_handle_rej(chan, control);
6335 break;
6336 case L2CAP_EV_RECV_SREJ:
6337 l2cap_handle_srej(chan, control);
6338 break;
6339 }
6340
6341 if (skb && !skb_in_use) {
6342 BT_DBG("Freeing %p", skb);
6343 kfree_skb(skb);
6344 }
6345
6346 return err;
6347}
6348
6349static int l2cap_finish_move(struct l2cap_chan *chan)
6350{
6351 BT_DBG("chan %p", chan);
6352
6353 chan->rx_state = L2CAP_RX_STATE_RECV;
6354
6355 if (chan->hs_hcon)
6356 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6357 else
6358 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6359
6360 return l2cap_resegment(chan);
6361}
6362
6363static int l2cap_rx_state_wait_p(struct l2cap_chan *chan,
6364 struct l2cap_ctrl *control,
6365 struct sk_buff *skb, u8 event)
6366{
6367 int err;
6368
6369 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6370 event);
6371
6372 if (!control->poll)
6373 return -EPROTO;
6374
6375 l2cap_process_reqseq(chan, control->reqseq);
6376
6377 if (!skb_queue_empty(&chan->tx_q))
6378 chan->tx_send_head = skb_peek(&chan->tx_q);
6379 else
6380 chan->tx_send_head = NULL;
6381
6382 /* Rewind next_tx_seq to the point expected
6383 * by the receiver.
6384 */
6385 chan->next_tx_seq = control->reqseq;
6386 chan->unacked_frames = 0;
6387
6388 err = l2cap_finish_move(chan);
6389 if (err)
6390 return err;
6391
6392 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6393 l2cap_send_i_or_rr_or_rnr(chan);
6394
6395 if (event == L2CAP_EV_RECV_IFRAME)
6396 return -EPROTO;
6397
6398 return l2cap_rx_state_recv(chan, control, NULL, event);
6399}
6400
6401static int l2cap_rx_state_wait_f(struct l2cap_chan *chan,
6402 struct l2cap_ctrl *control,
6403 struct sk_buff *skb, u8 event)
6404{
6405 int err;
6406
6407 if (!control->final)
6408 return -EPROTO;
6409
6410 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6411
6412 chan->rx_state = L2CAP_RX_STATE_RECV;
6413 l2cap_process_reqseq(chan, control->reqseq);
6414
6415 if (!skb_queue_empty(&chan->tx_q))
6416 chan->tx_send_head = skb_peek(&chan->tx_q);
6417 else
6418 chan->tx_send_head = NULL;
6419
6420 /* Rewind next_tx_seq to the point expected
6421 * by the receiver.
6422 */
6423 chan->next_tx_seq = control->reqseq;
6424 chan->unacked_frames = 0;
6425
6426 if (chan->hs_hcon)
6427 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6428 else
6429 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6430
6431 err = l2cap_resegment(chan);
6432
6433 if (!err)
6434 err = l2cap_rx_state_recv(chan, control, skb, event);
6435
6436 return err;
6437}
6438
6439static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
6440{
6441 /* Make sure reqseq is for a packet that has been sent but not acked */
6442 u16 unacked;
6443
6444 unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
6445 return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
6446}
6447
6448static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6449 struct sk_buff *skb, u8 event)
6450{
6451 int err = 0;
6452
6453 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
6454 control, skb, event, chan->rx_state);
6455
6456 if (__valid_reqseq(chan, control->reqseq)) {
6457 switch (chan->rx_state) {
6458 case L2CAP_RX_STATE_RECV:
6459 err = l2cap_rx_state_recv(chan, control, skb, event);
6460 break;
6461 case L2CAP_RX_STATE_SREJ_SENT:
6462 err = l2cap_rx_state_srej_sent(chan, control, skb,
6463 event);
6464 break;
6465 case L2CAP_RX_STATE_WAIT_P:
6466 err = l2cap_rx_state_wait_p(chan, control, skb, event);
6467 break;
6468 case L2CAP_RX_STATE_WAIT_F:
6469 err = l2cap_rx_state_wait_f(chan, control, skb, event);
6470 break;
6471 default:
6472 /* shut it down */
6473 break;
6474 }
6475 } else {
6476 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
6477 control->reqseq, chan->next_tx_seq,
6478 chan->expected_ack_seq);
6479 l2cap_send_disconn_req(chan, ECONNRESET);
6480 }
6481
6482 return err;
6483}
6484
6485static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6486 struct sk_buff *skb)
6487{
6488 int err = 0;
6489
6490 BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
6491 chan->rx_state);
6492
6493 if (l2cap_classify_txseq(chan, control->txseq) ==
6494 L2CAP_TXSEQ_EXPECTED) {
6495 l2cap_pass_to_tx(chan, control);
6496
6497 BT_DBG("buffer_seq %d->%d", chan->buffer_seq,
6498 __next_seq(chan, chan->buffer_seq));
6499
6500 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6501
6502 l2cap_reassemble_sdu(chan, skb, control);
6503 } else {
6504 if (chan->sdu) {
6505 kfree_skb(chan->sdu);
6506 chan->sdu = NULL;
6507 }
6508 chan->sdu_last_frag = NULL;
6509 chan->sdu_len = 0;
6510
6511 if (skb) {
6512 BT_DBG("Freeing %p", skb);
6513 kfree_skb(skb);
6514 }
6515 }
6516
6517 chan->last_acked_seq = control->txseq;
6518 chan->expected_tx_seq = __next_seq(chan, control->txseq);
6519
6520 return err;
6521}
6522
6523static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6524{
6525 struct l2cap_ctrl *control = &bt_cb(skb)->control;
6526 u16 len;
6527 u8 event;
6528
6529 __unpack_control(chan, skb);
6530
6531 len = skb->len;
6532
6533 /*
6534 * We can just drop the corrupted I-frame here.
6535 * Receiver will miss it and start proper recovery
6536 * procedures and ask for retransmission.
6537 */
6538 if (l2cap_check_fcs(chan, skb))
6539 goto drop;
6540
6541 if (!control->sframe && control->sar == L2CAP_SAR_START)
6542 len -= L2CAP_SDULEN_SIZE;
6543
6544 if (chan->fcs == L2CAP_FCS_CRC16)
6545 len -= L2CAP_FCS_SIZE;
6546
6547 if (len > chan->mps) {
6548 l2cap_send_disconn_req(chan, ECONNRESET);
6549 goto drop;
6550 }
6551
6552 if (!control->sframe) {
6553 int err;
6554
6555 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
6556 control->sar, control->reqseq, control->final,
6557 control->txseq);
6558
6559 /* Validate F-bit - F=0 always valid, F=1 only
6560 * valid in TX WAIT_F
6561 */
6562 if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
6563 goto drop;
6564
6565 if (chan->mode != L2CAP_MODE_STREAMING) {
6566 event = L2CAP_EV_RECV_IFRAME;
6567 err = l2cap_rx(chan, control, skb, event);
6568 } else {
6569 err = l2cap_stream_rx(chan, control, skb);
6570 }
6571
6572 if (err)
6573 l2cap_send_disconn_req(chan, ECONNRESET);
6574 } else {
6575 const u8 rx_func_to_event[4] = {
6576 L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
6577 L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
6578 };
6579
6580 /* Only I-frames are expected in streaming mode */
6581 if (chan->mode == L2CAP_MODE_STREAMING)
6582 goto drop;
6583
6584 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
6585 control->reqseq, control->final, control->poll,
6586 control->super);
6587
6588 if (len != 0) {
6589 BT_ERR("Trailing bytes: %d in sframe", len);
6590 l2cap_send_disconn_req(chan, ECONNRESET);
6591 goto drop;
6592 }
6593
6594 /* Validate F and P bits */
6595 if (control->final && (control->poll ||
6596 chan->tx_state != L2CAP_TX_STATE_WAIT_F))
6597 goto drop;
6598
6599 event = rx_func_to_event[control->super];
6600 if (l2cap_rx(chan, control, skb, event))
6601 l2cap_send_disconn_req(chan, ECONNRESET);
6602 }
6603
6604 return 0;
6605
6606drop:
6607 kfree_skb(skb);
6608 return 0;
6609}
6610
6611static void l2cap_chan_le_send_credits(struct l2cap_chan *chan)
6612{
6613 struct l2cap_conn *conn = chan->conn;
6614 struct l2cap_le_credits pkt;
6615 u16 return_credits;
6616
6617 /* We return more credits to the sender only after the amount of
6618 * credits falls below half of the initial amount.
6619 */
6620 if (chan->rx_credits >= (le_max_credits + 1) / 2)
6621 return;
6622
6623 return_credits = le_max_credits - chan->rx_credits;
6624
6625 BT_DBG("chan %p returning %u credits to sender", chan, return_credits);
6626
6627 chan->rx_credits += return_credits;
6628
6629 pkt.cid = cpu_to_le16(chan->scid);
6630 pkt.credits = cpu_to_le16(return_credits);
6631
6632 chan->ident = l2cap_get_ident(conn);
6633
6634 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CREDITS, sizeof(pkt), &pkt);
6635}
6636
6637static int l2cap_le_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6638{
6639 int err;
6640
6641 if (!chan->rx_credits) {
6642 BT_ERR("No credits to receive LE L2CAP data");
6643 l2cap_send_disconn_req(chan, ECONNRESET);
6644 return -ENOBUFS;
6645 }
6646
6647 if (chan->imtu < skb->len) {
6648 BT_ERR("Too big LE L2CAP PDU");
6649 return -ENOBUFS;
6650 }
6651
6652 chan->rx_credits--;
6653 BT_DBG("rx_credits %u -> %u", chan->rx_credits + 1, chan->rx_credits);
6654
6655 l2cap_chan_le_send_credits(chan);
6656
6657 err = 0;
6658
6659 if (!chan->sdu) {
6660 u16 sdu_len;
6661
6662 sdu_len = get_unaligned_le16(skb->data);
6663 skb_pull(skb, L2CAP_SDULEN_SIZE);
6664
6665 BT_DBG("Start of new SDU. sdu_len %u skb->len %u imtu %u",
6666 sdu_len, skb->len, chan->imtu);
6667
6668 if (sdu_len > chan->imtu) {
6669 BT_ERR("Too big LE L2CAP SDU length received");
6670 err = -EMSGSIZE;
6671 goto failed;
6672 }
6673
6674 if (skb->len > sdu_len) {
6675 BT_ERR("Too much LE L2CAP data received");
6676 err = -EINVAL;
6677 goto failed;
6678 }
6679
6680 if (skb->len == sdu_len)
6681 return chan->ops->recv(chan, skb);
6682
6683 chan->sdu = skb;
6684 chan->sdu_len = sdu_len;
6685 chan->sdu_last_frag = skb;
6686
6687 return 0;
6688 }
6689
6690 BT_DBG("SDU fragment. chan->sdu->len %u skb->len %u chan->sdu_len %u",
6691 chan->sdu->len, skb->len, chan->sdu_len);
6692
6693 if (chan->sdu->len + skb->len > chan->sdu_len) {
6694 BT_ERR("Too much LE L2CAP data received");
6695 err = -EINVAL;
6696 goto failed;
6697 }
6698
6699 append_skb_frag(chan->sdu, skb, &chan->sdu_last_frag);
6700 skb = NULL;
6701
6702 if (chan->sdu->len == chan->sdu_len) {
6703 err = chan->ops->recv(chan, chan->sdu);
6704 if (!err) {
6705 chan->sdu = NULL;
6706 chan->sdu_last_frag = NULL;
6707 chan->sdu_len = 0;
6708 }
6709 }
6710
6711failed:
6712 if (err) {
6713 kfree_skb(skb);
6714 kfree_skb(chan->sdu);
6715 chan->sdu = NULL;
6716 chan->sdu_last_frag = NULL;
6717 chan->sdu_len = 0;
6718 }
6719
6720 /* We can't return an error here since we took care of the skb
6721 * freeing internally. An error return would cause the caller to
6722 * do a double-free of the skb.
6723 */
6724 return 0;
6725}
6726
6727static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
6728 struct sk_buff *skb)
6729{
6730 struct l2cap_chan *chan;
6731
6732 chan = l2cap_get_chan_by_scid(conn, cid);
6733 if (!chan) {
6734 if (cid == L2CAP_CID_A2MP) {
6735 chan = a2mp_channel_create(conn, skb);
6736 if (!chan) {
6737 kfree_skb(skb);
6738 return;
6739 }
6740
6741 l2cap_chan_lock(chan);
6742 } else {
6743 BT_DBG("unknown cid 0x%4.4x", cid);
6744 /* Drop packet and return */
6745 kfree_skb(skb);
6746 return;
6747 }
6748 }
6749
6750 BT_DBG("chan %p, len %d", chan, skb->len);
6751
6752 if (chan->state != BT_CONNECTED)
6753 goto drop;
6754
6755 switch (chan->mode) {
6756 case L2CAP_MODE_LE_FLOWCTL:
6757 if (l2cap_le_data_rcv(chan, skb) < 0)
6758 goto drop;
6759
6760 goto done;
6761
6762 case L2CAP_MODE_BASIC:
6763 /* If socket recv buffers overflows we drop data here
6764 * which is *bad* because L2CAP has to be reliable.
6765 * But we don't have any other choice. L2CAP doesn't
6766 * provide flow control mechanism. */
6767
6768 if (chan->imtu < skb->len) {
6769 BT_ERR("Dropping L2CAP data: receive buffer overflow");
6770 goto drop;
6771 }
6772
6773 if (!chan->ops->recv(chan, skb))
6774 goto done;
6775 break;
6776
6777 case L2CAP_MODE_ERTM:
6778 case L2CAP_MODE_STREAMING:
6779 l2cap_data_rcv(chan, skb);
6780 goto done;
6781
6782 default:
6783 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
6784 break;
6785 }
6786
6787drop:
6788 kfree_skb(skb);
6789
6790done:
6791 l2cap_chan_unlock(chan);
6792}
6793
6794static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
6795 struct sk_buff *skb)
6796{
6797 struct hci_conn *hcon = conn->hcon;
6798 struct l2cap_chan *chan;
6799
6800 if (hcon->type != ACL_LINK)
6801 goto free_skb;
6802
6803 chan = l2cap_global_chan_by_psm(0, psm, &hcon->src, &hcon->dst,
6804 ACL_LINK);
6805 if (!chan)
6806 goto free_skb;
6807
6808 BT_DBG("chan %p, len %d", chan, skb->len);
6809
6810 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
6811 goto drop;
6812
6813 if (chan->imtu < skb->len)
6814 goto drop;
6815
6816 /* Store remote BD_ADDR and PSM for msg_name */
6817 bacpy(&bt_cb(skb)->bdaddr, &hcon->dst);
6818 bt_cb(skb)->psm = psm;
6819
6820 if (!chan->ops->recv(chan, skb)) {
6821 l2cap_chan_put(chan);
6822 return;
6823 }
6824
6825drop:
6826 l2cap_chan_put(chan);
6827free_skb:
6828 kfree_skb(skb);
6829}
6830
6831static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
6832{
6833 struct l2cap_hdr *lh = (void *) skb->data;
6834 struct hci_conn *hcon = conn->hcon;
6835 u16 cid, len;
6836 __le16 psm;
6837
6838 if (hcon->state != BT_CONNECTED) {
6839 BT_DBG("queueing pending rx skb");
6840 skb_queue_tail(&conn->pending_rx, skb);
6841 return;
6842 }
6843
6844 skb_pull(skb, L2CAP_HDR_SIZE);
6845 cid = __le16_to_cpu(lh->cid);
6846 len = __le16_to_cpu(lh->len);
6847
6848 if (len != skb->len) {
6849 kfree_skb(skb);
6850 return;
6851 }
6852
6853 /* Since we can't actively block incoming LE connections we must
6854 * at least ensure that we ignore incoming data from them.
6855 */
6856 if (hcon->type == LE_LINK &&
6857 hci_bdaddr_list_lookup(&hcon->hdev->blacklist, &hcon->dst,
6858 bdaddr_type(hcon, hcon->dst_type))) {
6859 kfree_skb(skb);
6860 return;
6861 }
6862
6863 BT_DBG("len %d, cid 0x%4.4x", len, cid);
6864
6865 switch (cid) {
6866 case L2CAP_CID_SIGNALING:
6867 l2cap_sig_channel(conn, skb);
6868 break;
6869
6870 case L2CAP_CID_CONN_LESS:
6871 psm = get_unaligned((__le16 *) skb->data);
6872 skb_pull(skb, L2CAP_PSMLEN_SIZE);
6873 l2cap_conless_channel(conn, psm, skb);
6874 break;
6875
6876 case L2CAP_CID_LE_SIGNALING:
6877 l2cap_le_sig_channel(conn, skb);
6878 break;
6879
6880 default:
6881 l2cap_data_channel(conn, cid, skb);
6882 break;
6883 }
6884}
6885
6886static void process_pending_rx(struct work_struct *work)
6887{
6888 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
6889 pending_rx_work);
6890 struct sk_buff *skb;
6891
6892 BT_DBG("");
6893
6894 while ((skb = skb_dequeue(&conn->pending_rx)))
6895 l2cap_recv_frame(conn, skb);
6896}
6897
6898static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
6899{
6900 struct l2cap_conn *conn = hcon->l2cap_data;
6901 struct hci_chan *hchan;
6902
6903 if (conn)
6904 return conn;
6905
6906 hchan = hci_chan_create(hcon);
6907 if (!hchan)
6908 return NULL;
6909
6910 conn = kzalloc(sizeof(*conn), GFP_KERNEL);
6911 if (!conn) {
6912 hci_chan_del(hchan);
6913 return NULL;
6914 }
6915
6916 kref_init(&conn->ref);
6917 hcon->l2cap_data = conn;
6918 conn->hcon = hci_conn_get(hcon);
6919 conn->hchan = hchan;
6920
6921 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
6922
6923 switch (hcon->type) {
6924 case LE_LINK:
6925 if (hcon->hdev->le_mtu) {
6926 conn->mtu = hcon->hdev->le_mtu;
6927 break;
6928 }
6929 /* fall through */
6930 default:
6931 conn->mtu = hcon->hdev->acl_mtu;
6932 break;
6933 }
6934
6935 conn->feat_mask = 0;
6936
6937 if (hcon->type == ACL_LINK)
6938 conn->hs_enabled = test_bit(HCI_HS_ENABLED,
6939 &hcon->hdev->dev_flags);
6940
6941 mutex_init(&conn->ident_lock);
6942 mutex_init(&conn->chan_lock);
6943
6944 INIT_LIST_HEAD(&conn->chan_l);
6945 INIT_LIST_HEAD(&conn->users);
6946
6947 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
6948
6949 skb_queue_head_init(&conn->pending_rx);
6950 INIT_WORK(&conn->pending_rx_work, process_pending_rx);
6951 INIT_WORK(&conn->id_addr_update_work, l2cap_conn_update_id_addr);
6952
6953 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
6954
6955 return conn;
6956}
6957
6958static bool is_valid_psm(u16 psm, u8 dst_type) {
6959 if (!psm)
6960 return false;
6961
6962 if (bdaddr_type_is_le(dst_type))
6963 return (psm <= 0x00ff);
6964
6965 /* PSM must be odd and lsb of upper byte must be 0 */
6966 return ((psm & 0x0101) == 0x0001);
6967}
6968
6969int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
6970 bdaddr_t *dst, u8 dst_type)
6971{
6972 struct l2cap_conn *conn;
6973 struct hci_conn *hcon;
6974 struct hci_dev *hdev;
6975 int err;
6976
6977 BT_DBG("%pMR -> %pMR (type %u) psm 0x%2.2x", &chan->src, dst,
6978 dst_type, __le16_to_cpu(psm));
6979
6980 hdev = hci_get_route(dst, &chan->src);
6981 if (!hdev)
6982 return -EHOSTUNREACH;
6983
6984 hci_dev_lock(hdev);
6985
6986 if (!is_valid_psm(__le16_to_cpu(psm), dst_type) && !cid &&
6987 chan->chan_type != L2CAP_CHAN_RAW) {
6988 err = -EINVAL;
6989 goto done;
6990 }
6991
6992 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !psm) {
6993 err = -EINVAL;
6994 goto done;
6995 }
6996
6997 if (chan->chan_type == L2CAP_CHAN_FIXED && !cid) {
6998 err = -EINVAL;
6999 goto done;
7000 }
7001
7002 switch (chan->mode) {
7003 case L2CAP_MODE_BASIC:
7004 break;
7005 case L2CAP_MODE_LE_FLOWCTL:
7006 l2cap_le_flowctl_init(chan);
7007 break;
7008 case L2CAP_MODE_ERTM:
7009 case L2CAP_MODE_STREAMING:
7010 if (!disable_ertm)
7011 break;
7012 /* fall through */
7013 default:
7014 err = -EOPNOTSUPP;
7015 goto done;
7016 }
7017
7018 switch (chan->state) {
7019 case BT_CONNECT:
7020 case BT_CONNECT2:
7021 case BT_CONFIG:
7022 /* Already connecting */
7023 err = 0;
7024 goto done;
7025
7026 case BT_CONNECTED:
7027 /* Already connected */
7028 err = -EISCONN;
7029 goto done;
7030
7031 case BT_OPEN:
7032 case BT_BOUND:
7033 /* Can connect */
7034 break;
7035
7036 default:
7037 err = -EBADFD;
7038 goto done;
7039 }
7040
7041 /* Set destination address and psm */
7042 bacpy(&chan->dst, dst);
7043 chan->dst_type = dst_type;
7044
7045 chan->psm = psm;
7046 chan->dcid = cid;
7047
7048 if (bdaddr_type_is_le(dst_type)) {
7049 u8 role;
7050
7051 /* Convert from L2CAP channel address type to HCI address type
7052 */
7053 if (dst_type == BDADDR_LE_PUBLIC)
7054 dst_type = ADDR_LE_DEV_PUBLIC;
7055 else
7056 dst_type = ADDR_LE_DEV_RANDOM;
7057
7058 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
7059 role = HCI_ROLE_SLAVE;
7060 else
7061 role = HCI_ROLE_MASTER;
7062
7063 hcon = hci_connect_le(hdev, dst, dst_type, chan->sec_level,
7064 HCI_LE_CONN_TIMEOUT, role);
7065 } else {
7066 u8 auth_type = l2cap_get_auth_type(chan);
7067 hcon = hci_connect_acl(hdev, dst, chan->sec_level, auth_type);
7068 }
7069
7070 if (IS_ERR(hcon)) {
7071 err = PTR_ERR(hcon);
7072 goto done;
7073 }
7074
7075 conn = l2cap_conn_add(hcon);
7076 if (!conn) {
7077 hci_conn_drop(hcon);
7078 err = -ENOMEM;
7079 goto done;
7080 }
7081
7082 mutex_lock(&conn->chan_lock);
7083 l2cap_chan_lock(chan);
7084
7085 if (cid && __l2cap_get_chan_by_dcid(conn, cid)) {
7086 hci_conn_drop(hcon);
7087 err = -EBUSY;
7088 goto chan_unlock;
7089 }
7090
7091 /* Update source addr of the socket */
7092 bacpy(&chan->src, &hcon->src);
7093 chan->src_type = bdaddr_type(hcon, hcon->src_type);
7094
7095 __l2cap_chan_add(conn, chan);
7096
7097 /* l2cap_chan_add takes its own ref so we can drop this one */
7098 hci_conn_drop(hcon);
7099
7100 l2cap_state_change(chan, BT_CONNECT);
7101 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
7102
7103 /* Release chan->sport so that it can be reused by other
7104 * sockets (as it's only used for listening sockets).
7105 */
7106 write_lock(&chan_list_lock);
7107 chan->sport = 0;
7108 write_unlock(&chan_list_lock);
7109
7110 if (hcon->state == BT_CONNECTED) {
7111 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
7112 __clear_chan_timer(chan);
7113 if (l2cap_chan_check_security(chan, true))
7114 l2cap_state_change(chan, BT_CONNECTED);
7115 } else
7116 l2cap_do_start(chan);
7117 }
7118
7119 err = 0;
7120
7121chan_unlock:
7122 l2cap_chan_unlock(chan);
7123 mutex_unlock(&conn->chan_lock);
7124done:
7125 hci_dev_unlock(hdev);
7126 hci_dev_put(hdev);
7127 return err;
7128}
7129EXPORT_SYMBOL_GPL(l2cap_chan_connect);
7130
7131/* ---- L2CAP interface with lower layer (HCI) ---- */
7132
7133int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
7134{
7135 int exact = 0, lm1 = 0, lm2 = 0;
7136 struct l2cap_chan *c;
7137
7138 BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
7139
7140 /* Find listening sockets and check their link_mode */
7141 read_lock(&chan_list_lock);
7142 list_for_each_entry(c, &chan_list, global_l) {
7143 if (c->state != BT_LISTEN)
7144 continue;
7145
7146 if (!bacmp(&c->src, &hdev->bdaddr)) {
7147 lm1 |= HCI_LM_ACCEPT;
7148 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
7149 lm1 |= HCI_LM_MASTER;
7150 exact++;
7151 } else if (!bacmp(&c->src, BDADDR_ANY)) {
7152 lm2 |= HCI_LM_ACCEPT;
7153 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
7154 lm2 |= HCI_LM_MASTER;
7155 }
7156 }
7157 read_unlock(&chan_list_lock);
7158
7159 return exact ? lm1 : lm2;
7160}
7161
7162/* Find the next fixed channel in BT_LISTEN state, continue iteration
7163 * from an existing channel in the list or from the beginning of the
7164 * global list (by passing NULL as first parameter).
7165 */
7166static struct l2cap_chan *l2cap_global_fixed_chan(struct l2cap_chan *c,
7167 bdaddr_t *src, u8 link_type)
7168{
7169 read_lock(&chan_list_lock);
7170
7171 if (c)
7172 c = list_next_entry(c, global_l);
7173 else
7174 c = list_entry(chan_list.next, typeof(*c), global_l);
7175
7176 list_for_each_entry_from(c, &chan_list, global_l) {
7177 if (c->chan_type != L2CAP_CHAN_FIXED)
7178 continue;
7179 if (c->state != BT_LISTEN)
7180 continue;
7181 if (bacmp(&c->src, src) && bacmp(&c->src, BDADDR_ANY))
7182 continue;
7183 if (link_type == ACL_LINK && c->src_type != BDADDR_BREDR)
7184 continue;
7185 if (link_type == LE_LINK && c->src_type == BDADDR_BREDR)
7186 continue;
7187
7188 l2cap_chan_hold(c);
7189 read_unlock(&chan_list_lock);
7190 return c;
7191 }
7192
7193 read_unlock(&chan_list_lock);
7194
7195 return NULL;
7196}
7197
7198void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
7199{
7200 struct hci_dev *hdev = hcon->hdev;
7201 struct l2cap_conn *conn;
7202 struct l2cap_chan *pchan;
7203 u8 dst_type;
7204
7205 BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
7206
7207 if (status) {
7208 l2cap_conn_del(hcon, bt_to_errno(status));
7209 return;
7210 }
7211
7212 conn = l2cap_conn_add(hcon);
7213 if (!conn)
7214 return;
7215
7216 dst_type = bdaddr_type(hcon, hcon->dst_type);
7217
7218 /* If device is blocked, do not create channels for it */
7219 if (hci_bdaddr_list_lookup(&hdev->blacklist, &hcon->dst, dst_type))
7220 return;
7221
7222 /* Find fixed channels and notify them of the new connection. We
7223 * use multiple individual lookups, continuing each time where
7224 * we left off, because the list lock would prevent calling the
7225 * potentially sleeping l2cap_chan_lock() function.
7226 */
7227 pchan = l2cap_global_fixed_chan(NULL, &hdev->bdaddr, hcon->type);
7228 while (pchan) {
7229 struct l2cap_chan *chan, *next;
7230
7231 /* Client fixed channels should override server ones */
7232 if (__l2cap_get_chan_by_dcid(conn, pchan->scid))
7233 goto next;
7234
7235 l2cap_chan_lock(pchan);
7236 chan = pchan->ops->new_connection(pchan);
7237 if (chan) {
7238 bacpy(&chan->src, &hcon->src);
7239 bacpy(&chan->dst, &hcon->dst);
7240 chan->src_type = bdaddr_type(hcon, hcon->src_type);
7241 chan->dst_type = dst_type;
7242
7243 __l2cap_chan_add(conn, chan);
7244 }
7245
7246 l2cap_chan_unlock(pchan);
7247next:
7248 next = l2cap_global_fixed_chan(pchan, &hdev->bdaddr,
7249 hcon->type);
7250 l2cap_chan_put(pchan);
7251 pchan = next;
7252 }
7253
7254 l2cap_conn_ready(conn);
7255}
7256
7257int l2cap_disconn_ind(struct hci_conn *hcon)
7258{
7259 struct l2cap_conn *conn = hcon->l2cap_data;
7260
7261 BT_DBG("hcon %p", hcon);
7262
7263 if (!conn)
7264 return HCI_ERROR_REMOTE_USER_TERM;
7265 return conn->disc_reason;
7266}
7267
7268void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
7269{
7270 BT_DBG("hcon %p reason %d", hcon, reason);
7271
7272 l2cap_conn_del(hcon, bt_to_errno(reason));
7273}
7274
7275static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
7276{
7277 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
7278 return;
7279
7280 if (encrypt == 0x00) {
7281 if (chan->sec_level == BT_SECURITY_MEDIUM) {
7282 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
7283 } else if (chan->sec_level == BT_SECURITY_HIGH ||
7284 chan->sec_level == BT_SECURITY_FIPS)
7285 l2cap_chan_close(chan, ECONNREFUSED);
7286 } else {
7287 if (chan->sec_level == BT_SECURITY_MEDIUM)
7288 __clear_chan_timer(chan);
7289 }
7290}
7291
7292int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
7293{
7294 struct l2cap_conn *conn = hcon->l2cap_data;
7295 struct l2cap_chan *chan;
7296
7297 if (!conn)
7298 return 0;
7299
7300 BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
7301
7302 mutex_lock(&conn->chan_lock);
7303
7304 list_for_each_entry(chan, &conn->chan_l, list) {
7305 l2cap_chan_lock(chan);
7306
7307 BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
7308 state_to_string(chan->state));
7309
7310 if (chan->scid == L2CAP_CID_A2MP) {
7311 l2cap_chan_unlock(chan);
7312 continue;
7313 }
7314
7315 if (!status && encrypt)
7316 chan->sec_level = hcon->sec_level;
7317
7318 if (!__l2cap_no_conn_pending(chan)) {
7319 l2cap_chan_unlock(chan);
7320 continue;
7321 }
7322
7323 if (!status && (chan->state == BT_CONNECTED ||
7324 chan->state == BT_CONFIG)) {
7325 chan->ops->resume(chan);
7326 l2cap_check_encryption(chan, encrypt);
7327 l2cap_chan_unlock(chan);
7328 continue;
7329 }
7330
7331 if (chan->state == BT_CONNECT) {
7332 if (!status)
7333 l2cap_start_connection(chan);
7334 else
7335 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
7336 } else if (chan->state == BT_CONNECT2 &&
7337 chan->mode != L2CAP_MODE_LE_FLOWCTL) {
7338 struct l2cap_conn_rsp rsp;
7339 __u16 res, stat;
7340
7341 if (!status) {
7342 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
7343 res = L2CAP_CR_PEND;
7344 stat = L2CAP_CS_AUTHOR_PEND;
7345 chan->ops->defer(chan);
7346 } else {
7347 l2cap_state_change(chan, BT_CONFIG);
7348 res = L2CAP_CR_SUCCESS;
7349 stat = L2CAP_CS_NO_INFO;
7350 }
7351 } else {
7352 l2cap_state_change(chan, BT_DISCONN);
7353 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
7354 res = L2CAP_CR_SEC_BLOCK;
7355 stat = L2CAP_CS_NO_INFO;
7356 }
7357
7358 rsp.scid = cpu_to_le16(chan->dcid);
7359 rsp.dcid = cpu_to_le16(chan->scid);
7360 rsp.result = cpu_to_le16(res);
7361 rsp.status = cpu_to_le16(stat);
7362 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
7363 sizeof(rsp), &rsp);
7364
7365 if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
7366 res == L2CAP_CR_SUCCESS) {
7367 char buf[128];
7368 set_bit(CONF_REQ_SENT, &chan->conf_state);
7369 l2cap_send_cmd(conn, l2cap_get_ident(conn),
7370 L2CAP_CONF_REQ,
7371 l2cap_build_conf_req(chan, buf),
7372 buf);
7373 chan->num_conf_req++;
7374 }
7375 }
7376
7377 l2cap_chan_unlock(chan);
7378 }
7379
7380 mutex_unlock(&conn->chan_lock);
7381
7382 return 0;
7383}
7384
7385int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
7386{
7387 struct l2cap_conn *conn = hcon->l2cap_data;
7388 struct l2cap_hdr *hdr;
7389 int len;
7390
7391 /* For AMP controller do not create l2cap conn */
7392 if (!conn && hcon->hdev->dev_type != HCI_BREDR)
7393 goto drop;
7394
7395 if (!conn)
7396 conn = l2cap_conn_add(hcon);
7397
7398 if (!conn)
7399 goto drop;
7400
7401 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
7402
7403 switch (flags) {
7404 case ACL_START:
7405 case ACL_START_NO_FLUSH:
7406 case ACL_COMPLETE:
7407 if (conn->rx_len) {
7408 BT_ERR("Unexpected start frame (len %d)", skb->len);
7409 kfree_skb(conn->rx_skb);
7410 conn->rx_skb = NULL;
7411 conn->rx_len = 0;
7412 l2cap_conn_unreliable(conn, ECOMM);
7413 }
7414
7415 /* Start fragment always begin with Basic L2CAP header */
7416 if (skb->len < L2CAP_HDR_SIZE) {
7417 BT_ERR("Frame is too short (len %d)", skb->len);
7418 l2cap_conn_unreliable(conn, ECOMM);
7419 goto drop;
7420 }
7421
7422 hdr = (struct l2cap_hdr *) skb->data;
7423 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
7424
7425 if (len == skb->len) {
7426 /* Complete frame received */
7427 l2cap_recv_frame(conn, skb);
7428 return 0;
7429 }
7430
7431 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
7432
7433 if (skb->len > len) {
7434 BT_ERR("Frame is too long (len %d, expected len %d)",
7435 skb->len, len);
7436 l2cap_conn_unreliable(conn, ECOMM);
7437 goto drop;
7438 }
7439
7440 /* Allocate skb for the complete frame (with header) */
7441 conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL);
7442 if (!conn->rx_skb)
7443 goto drop;
7444
7445 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
7446 skb->len);
7447 conn->rx_len = len - skb->len;
7448 break;
7449
7450 case ACL_CONT:
7451 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
7452
7453 if (!conn->rx_len) {
7454 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
7455 l2cap_conn_unreliable(conn, ECOMM);
7456 goto drop;
7457 }
7458
7459 if (skb->len > conn->rx_len) {
7460 BT_ERR("Fragment is too long (len %d, expected %d)",
7461 skb->len, conn->rx_len);
7462 kfree_skb(conn->rx_skb);
7463 conn->rx_skb = NULL;
7464 conn->rx_len = 0;
7465 l2cap_conn_unreliable(conn, ECOMM);
7466 goto drop;
7467 }
7468
7469 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
7470 skb->len);
7471 conn->rx_len -= skb->len;
7472
7473 if (!conn->rx_len) {
7474 /* Complete frame received. l2cap_recv_frame
7475 * takes ownership of the skb so set the global
7476 * rx_skb pointer to NULL first.
7477 */
7478 struct sk_buff *rx_skb = conn->rx_skb;
7479 conn->rx_skb = NULL;
7480 l2cap_recv_frame(conn, rx_skb);
7481 }
7482 break;
7483 }
7484
7485drop:
7486 kfree_skb(skb);
7487 return 0;
7488}
7489
7490static int l2cap_debugfs_show(struct seq_file *f, void *p)
7491{
7492 struct l2cap_chan *c;
7493
7494 read_lock(&chan_list_lock);
7495
7496 list_for_each_entry(c, &chan_list, global_l) {
7497 seq_printf(f, "%pMR %pMR %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
7498 &c->src, &c->dst,
7499 c->state, __le16_to_cpu(c->psm),
7500 c->scid, c->dcid, c->imtu, c->omtu,
7501 c->sec_level, c->mode);
7502 }
7503
7504 read_unlock(&chan_list_lock);
7505
7506 return 0;
7507}
7508
7509static int l2cap_debugfs_open(struct inode *inode, struct file *file)
7510{
7511 return single_open(file, l2cap_debugfs_show, inode->i_private);
7512}
7513
7514static const struct file_operations l2cap_debugfs_fops = {
7515 .open = l2cap_debugfs_open,
7516 .read = seq_read,
7517 .llseek = seq_lseek,
7518 .release = single_release,
7519};
7520
7521static struct dentry *l2cap_debugfs;
7522
7523int __init l2cap_init(void)
7524{
7525 int err;
7526
7527 err = l2cap_init_sockets();
7528 if (err < 0)
7529 return err;
7530
7531 if (IS_ERR_OR_NULL(bt_debugfs))
7532 return 0;
7533
7534 l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
7535 NULL, &l2cap_debugfs_fops);
7536
7537 debugfs_create_u16("l2cap_le_max_credits", 0644, bt_debugfs,
7538 &le_max_credits);
7539 debugfs_create_u16("l2cap_le_default_mps", 0644, bt_debugfs,
7540 &le_default_mps);
7541
7542 return 0;
7543}
7544
7545void l2cap_exit(void)
7546{
7547 debugfs_remove(l2cap_debugfs);
7548 l2cap_cleanup_sockets();
7549}
7550
7551module_param(disable_ertm, bool, 0644);
7552MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
This page took 0.051519 seconds and 5 git commands to generate.