Bluetooth: Remove special ATT data channel handling
[deliverable/linux.git] / net / bluetooth / l2cap_core.c
... / ...
CommitLineData
1/*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
8
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
10
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
14
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
27*/
28
29/* Bluetooth L2CAP core. */
30
31#include <linux/module.h>
32
33#include <linux/debugfs.h>
34#include <linux/crc16.h>
35
36#include <net/bluetooth/bluetooth.h>
37#include <net/bluetooth/hci_core.h>
38#include <net/bluetooth/l2cap.h>
39
40#include "smp.h"
41#include "a2mp.h"
42#include "amp.h"
43
44#define LE_FLOWCTL_MAX_CREDITS 65535
45
46bool disable_ertm;
47
48static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN | L2CAP_FEAT_UCD;
49static u8 l2cap_fixed_chan[8] = { L2CAP_FC_SIG_BREDR | L2CAP_FC_CONNLESS, };
50
51static LIST_HEAD(chan_list);
52static DEFINE_RWLOCK(chan_list_lock);
53
54static u16 le_max_credits = L2CAP_LE_MAX_CREDITS;
55static u16 le_default_mps = L2CAP_LE_DEFAULT_MPS;
56
57static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
58 u8 code, u8 ident, u16 dlen, void *data);
59static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
60 void *data);
61static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
62static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
63
64static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
65 struct sk_buff_head *skbs, u8 event);
66
67static inline __u8 bdaddr_type(struct hci_conn *hcon, __u8 type)
68{
69 if (hcon->type == LE_LINK) {
70 if (type == ADDR_LE_DEV_PUBLIC)
71 return BDADDR_LE_PUBLIC;
72 else
73 return BDADDR_LE_RANDOM;
74 }
75
76 return BDADDR_BREDR;
77}
78
79/* ---- L2CAP channels ---- */
80
81static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
82 u16 cid)
83{
84 struct l2cap_chan *c;
85
86 list_for_each_entry(c, &conn->chan_l, list) {
87 if (c->dcid == cid)
88 return c;
89 }
90 return NULL;
91}
92
93static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
94 u16 cid)
95{
96 struct l2cap_chan *c;
97
98 list_for_each_entry(c, &conn->chan_l, list) {
99 if (c->scid == cid)
100 return c;
101 }
102 return NULL;
103}
104
105/* Find channel with given SCID.
106 * Returns locked channel. */
107static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
108 u16 cid)
109{
110 struct l2cap_chan *c;
111
112 mutex_lock(&conn->chan_lock);
113 c = __l2cap_get_chan_by_scid(conn, cid);
114 if (c)
115 l2cap_chan_lock(c);
116 mutex_unlock(&conn->chan_lock);
117
118 return c;
119}
120
121/* Find channel with given DCID.
122 * Returns locked channel.
123 */
124static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
125 u16 cid)
126{
127 struct l2cap_chan *c;
128
129 mutex_lock(&conn->chan_lock);
130 c = __l2cap_get_chan_by_dcid(conn, cid);
131 if (c)
132 l2cap_chan_lock(c);
133 mutex_unlock(&conn->chan_lock);
134
135 return c;
136}
137
138static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn,
139 u8 ident)
140{
141 struct l2cap_chan *c;
142
143 list_for_each_entry(c, &conn->chan_l, list) {
144 if (c->ident == ident)
145 return c;
146 }
147 return NULL;
148}
149
150static struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn,
151 u8 ident)
152{
153 struct l2cap_chan *c;
154
155 mutex_lock(&conn->chan_lock);
156 c = __l2cap_get_chan_by_ident(conn, ident);
157 if (c)
158 l2cap_chan_lock(c);
159 mutex_unlock(&conn->chan_lock);
160
161 return c;
162}
163
164static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
165{
166 struct l2cap_chan *c;
167
168 list_for_each_entry(c, &chan_list, global_l) {
169 if (c->sport == psm && !bacmp(&c->src, src))
170 return c;
171 }
172 return NULL;
173}
174
175int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
176{
177 int err;
178
179 write_lock(&chan_list_lock);
180
181 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
182 err = -EADDRINUSE;
183 goto done;
184 }
185
186 if (psm) {
187 chan->psm = psm;
188 chan->sport = psm;
189 err = 0;
190 } else {
191 u16 p;
192
193 err = -EINVAL;
194 for (p = 0x1001; p < 0x1100; p += 2)
195 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
196 chan->psm = cpu_to_le16(p);
197 chan->sport = cpu_to_le16(p);
198 err = 0;
199 break;
200 }
201 }
202
203done:
204 write_unlock(&chan_list_lock);
205 return err;
206}
207EXPORT_SYMBOL_GPL(l2cap_add_psm);
208
209int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
210{
211 write_lock(&chan_list_lock);
212
213 chan->scid = scid;
214
215 write_unlock(&chan_list_lock);
216
217 return 0;
218}
219
220static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
221{
222 u16 cid, dyn_end;
223
224 if (conn->hcon->type == LE_LINK)
225 dyn_end = L2CAP_CID_LE_DYN_END;
226 else
227 dyn_end = L2CAP_CID_DYN_END;
228
229 for (cid = L2CAP_CID_DYN_START; cid < dyn_end; cid++) {
230 if (!__l2cap_get_chan_by_scid(conn, cid))
231 return cid;
232 }
233
234 return 0;
235}
236
237static void l2cap_state_change(struct l2cap_chan *chan, int state)
238{
239 BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
240 state_to_string(state));
241
242 chan->state = state;
243 chan->ops->state_change(chan, state, 0);
244}
245
246static inline void l2cap_state_change_and_error(struct l2cap_chan *chan,
247 int state, int err)
248{
249 chan->state = state;
250 chan->ops->state_change(chan, chan->state, err);
251}
252
253static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
254{
255 chan->ops->state_change(chan, chan->state, err);
256}
257
258static void __set_retrans_timer(struct l2cap_chan *chan)
259{
260 if (!delayed_work_pending(&chan->monitor_timer) &&
261 chan->retrans_timeout) {
262 l2cap_set_timer(chan, &chan->retrans_timer,
263 msecs_to_jiffies(chan->retrans_timeout));
264 }
265}
266
267static void __set_monitor_timer(struct l2cap_chan *chan)
268{
269 __clear_retrans_timer(chan);
270 if (chan->monitor_timeout) {
271 l2cap_set_timer(chan, &chan->monitor_timer,
272 msecs_to_jiffies(chan->monitor_timeout));
273 }
274}
275
276static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
277 u16 seq)
278{
279 struct sk_buff *skb;
280
281 skb_queue_walk(head, skb) {
282 if (bt_cb(skb)->control.txseq == seq)
283 return skb;
284 }
285
286 return NULL;
287}
288
289/* ---- L2CAP sequence number lists ---- */
290
291/* For ERTM, ordered lists of sequence numbers must be tracked for
292 * SREJ requests that are received and for frames that are to be
293 * retransmitted. These seq_list functions implement a singly-linked
294 * list in an array, where membership in the list can also be checked
295 * in constant time. Items can also be added to the tail of the list
296 * and removed from the head in constant time, without further memory
297 * allocs or frees.
298 */
299
300static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
301{
302 size_t alloc_size, i;
303
304 /* Allocated size is a power of 2 to map sequence numbers
305 * (which may be up to 14 bits) in to a smaller array that is
306 * sized for the negotiated ERTM transmit windows.
307 */
308 alloc_size = roundup_pow_of_two(size);
309
310 seq_list->list = kmalloc(sizeof(u16) * alloc_size, GFP_KERNEL);
311 if (!seq_list->list)
312 return -ENOMEM;
313
314 seq_list->mask = alloc_size - 1;
315 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
316 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
317 for (i = 0; i < alloc_size; i++)
318 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
319
320 return 0;
321}
322
323static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
324{
325 kfree(seq_list->list);
326}
327
328static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
329 u16 seq)
330{
331 /* Constant-time check for list membership */
332 return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
333}
334
335static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
336{
337 u16 seq = seq_list->head;
338 u16 mask = seq_list->mask;
339
340 seq_list->head = seq_list->list[seq & mask];
341 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
342
343 if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
344 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
345 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
346 }
347
348 return seq;
349}
350
351static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
352{
353 u16 i;
354
355 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
356 return;
357
358 for (i = 0; i <= seq_list->mask; i++)
359 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
360
361 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
362 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
363}
364
365static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
366{
367 u16 mask = seq_list->mask;
368
369 /* All appends happen in constant time */
370
371 if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
372 return;
373
374 if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
375 seq_list->head = seq;
376 else
377 seq_list->list[seq_list->tail & mask] = seq;
378
379 seq_list->tail = seq;
380 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
381}
382
383static void l2cap_chan_timeout(struct work_struct *work)
384{
385 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
386 chan_timer.work);
387 struct l2cap_conn *conn = chan->conn;
388 int reason;
389
390 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
391
392 mutex_lock(&conn->chan_lock);
393 l2cap_chan_lock(chan);
394
395 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
396 reason = ECONNREFUSED;
397 else if (chan->state == BT_CONNECT &&
398 chan->sec_level != BT_SECURITY_SDP)
399 reason = ECONNREFUSED;
400 else
401 reason = ETIMEDOUT;
402
403 l2cap_chan_close(chan, reason);
404
405 l2cap_chan_unlock(chan);
406
407 chan->ops->close(chan);
408 mutex_unlock(&conn->chan_lock);
409
410 l2cap_chan_put(chan);
411}
412
413struct l2cap_chan *l2cap_chan_create(void)
414{
415 struct l2cap_chan *chan;
416
417 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
418 if (!chan)
419 return NULL;
420
421 mutex_init(&chan->lock);
422
423 write_lock(&chan_list_lock);
424 list_add(&chan->global_l, &chan_list);
425 write_unlock(&chan_list_lock);
426
427 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
428
429 chan->state = BT_OPEN;
430
431 kref_init(&chan->kref);
432
433 /* This flag is cleared in l2cap_chan_ready() */
434 set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
435
436 BT_DBG("chan %p", chan);
437
438 return chan;
439}
440EXPORT_SYMBOL_GPL(l2cap_chan_create);
441
442static void l2cap_chan_destroy(struct kref *kref)
443{
444 struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);
445
446 BT_DBG("chan %p", chan);
447
448 write_lock(&chan_list_lock);
449 list_del(&chan->global_l);
450 write_unlock(&chan_list_lock);
451
452 kfree(chan);
453}
454
455void l2cap_chan_hold(struct l2cap_chan *c)
456{
457 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
458
459 kref_get(&c->kref);
460}
461
462void l2cap_chan_put(struct l2cap_chan *c)
463{
464 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
465
466 kref_put(&c->kref, l2cap_chan_destroy);
467}
468EXPORT_SYMBOL_GPL(l2cap_chan_put);
469
470void l2cap_chan_set_defaults(struct l2cap_chan *chan)
471{
472 chan->fcs = L2CAP_FCS_CRC16;
473 chan->max_tx = L2CAP_DEFAULT_MAX_TX;
474 chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
475 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
476 chan->remote_max_tx = chan->max_tx;
477 chan->remote_tx_win = chan->tx_win;
478 chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
479 chan->sec_level = BT_SECURITY_LOW;
480 chan->flush_to = L2CAP_DEFAULT_FLUSH_TO;
481 chan->retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
482 chan->monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
483 chan->conf_state = 0;
484
485 set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
486}
487EXPORT_SYMBOL_GPL(l2cap_chan_set_defaults);
488
489static void l2cap_le_flowctl_init(struct l2cap_chan *chan)
490{
491 chan->sdu = NULL;
492 chan->sdu_last_frag = NULL;
493 chan->sdu_len = 0;
494 chan->tx_credits = 0;
495 chan->rx_credits = le_max_credits;
496 chan->mps = min_t(u16, chan->imtu, le_default_mps);
497
498 skb_queue_head_init(&chan->tx_q);
499}
500
501void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
502{
503 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
504 __le16_to_cpu(chan->psm), chan->dcid);
505
506 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
507
508 chan->conn = conn;
509
510 switch (chan->chan_type) {
511 case L2CAP_CHAN_CONN_ORIENTED:
512 /* Alloc CID for connection-oriented socket */
513 chan->scid = l2cap_alloc_cid(conn);
514 if (conn->hcon->type == ACL_LINK)
515 chan->omtu = L2CAP_DEFAULT_MTU;
516 break;
517
518 case L2CAP_CHAN_CONN_LESS:
519 /* Connectionless socket */
520 chan->scid = L2CAP_CID_CONN_LESS;
521 chan->dcid = L2CAP_CID_CONN_LESS;
522 chan->omtu = L2CAP_DEFAULT_MTU;
523 break;
524
525 case L2CAP_CHAN_FIXED:
526 /* Caller will set CID and CID specific MTU values */
527 break;
528
529 default:
530 /* Raw socket can send/recv signalling messages only */
531 chan->scid = L2CAP_CID_SIGNALING;
532 chan->dcid = L2CAP_CID_SIGNALING;
533 chan->omtu = L2CAP_DEFAULT_MTU;
534 }
535
536 chan->local_id = L2CAP_BESTEFFORT_ID;
537 chan->local_stype = L2CAP_SERV_BESTEFFORT;
538 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
539 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
540 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
541 chan->local_flush_to = L2CAP_EFS_DEFAULT_FLUSH_TO;
542
543 l2cap_chan_hold(chan);
544
545 hci_conn_hold(conn->hcon);
546
547 list_add(&chan->list, &conn->chan_l);
548}
549
550void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
551{
552 mutex_lock(&conn->chan_lock);
553 __l2cap_chan_add(conn, chan);
554 mutex_unlock(&conn->chan_lock);
555}
556
557void l2cap_chan_del(struct l2cap_chan *chan, int err)
558{
559 struct l2cap_conn *conn = chan->conn;
560
561 __clear_chan_timer(chan);
562
563 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
564
565 if (conn) {
566 struct amp_mgr *mgr = conn->hcon->amp_mgr;
567 /* Delete from channel list */
568 list_del(&chan->list);
569
570 l2cap_chan_put(chan);
571
572 chan->conn = NULL;
573
574 if (chan->scid != L2CAP_CID_A2MP)
575 hci_conn_drop(conn->hcon);
576
577 if (mgr && mgr->bredr_chan == chan)
578 mgr->bredr_chan = NULL;
579 }
580
581 if (chan->hs_hchan) {
582 struct hci_chan *hs_hchan = chan->hs_hchan;
583
584 BT_DBG("chan %p disconnect hs_hchan %p", chan, hs_hchan);
585 amp_disconnect_logical_link(hs_hchan);
586 }
587
588 chan->ops->teardown(chan, err);
589
590 if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
591 return;
592
593 switch(chan->mode) {
594 case L2CAP_MODE_BASIC:
595 break;
596
597 case L2CAP_MODE_LE_FLOWCTL:
598 skb_queue_purge(&chan->tx_q);
599 break;
600
601 case L2CAP_MODE_ERTM:
602 __clear_retrans_timer(chan);
603 __clear_monitor_timer(chan);
604 __clear_ack_timer(chan);
605
606 skb_queue_purge(&chan->srej_q);
607
608 l2cap_seq_list_free(&chan->srej_list);
609 l2cap_seq_list_free(&chan->retrans_list);
610
611 /* fall through */
612
613 case L2CAP_MODE_STREAMING:
614 skb_queue_purge(&chan->tx_q);
615 break;
616 }
617
618 return;
619}
620EXPORT_SYMBOL_GPL(l2cap_chan_del);
621
622void l2cap_conn_update_id_addr(struct hci_conn *hcon)
623{
624 struct l2cap_conn *conn = hcon->l2cap_data;
625 struct l2cap_chan *chan;
626
627 mutex_lock(&conn->chan_lock);
628
629 list_for_each_entry(chan, &conn->chan_l, list) {
630 l2cap_chan_lock(chan);
631 bacpy(&chan->dst, &hcon->dst);
632 chan->dst_type = bdaddr_type(hcon, hcon->dst_type);
633 l2cap_chan_unlock(chan);
634 }
635
636 mutex_unlock(&conn->chan_lock);
637}
638
639static void l2cap_chan_le_connect_reject(struct l2cap_chan *chan)
640{
641 struct l2cap_conn *conn = chan->conn;
642 struct l2cap_le_conn_rsp rsp;
643 u16 result;
644
645 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
646 result = L2CAP_CR_AUTHORIZATION;
647 else
648 result = L2CAP_CR_BAD_PSM;
649
650 l2cap_state_change(chan, BT_DISCONN);
651
652 rsp.dcid = cpu_to_le16(chan->scid);
653 rsp.mtu = cpu_to_le16(chan->imtu);
654 rsp.mps = cpu_to_le16(chan->mps);
655 rsp.credits = cpu_to_le16(chan->rx_credits);
656 rsp.result = cpu_to_le16(result);
657
658 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
659 &rsp);
660}
661
662static void l2cap_chan_connect_reject(struct l2cap_chan *chan)
663{
664 struct l2cap_conn *conn = chan->conn;
665 struct l2cap_conn_rsp rsp;
666 u16 result;
667
668 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
669 result = L2CAP_CR_SEC_BLOCK;
670 else
671 result = L2CAP_CR_BAD_PSM;
672
673 l2cap_state_change(chan, BT_DISCONN);
674
675 rsp.scid = cpu_to_le16(chan->dcid);
676 rsp.dcid = cpu_to_le16(chan->scid);
677 rsp.result = cpu_to_le16(result);
678 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
679
680 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
681}
682
683void l2cap_chan_close(struct l2cap_chan *chan, int reason)
684{
685 struct l2cap_conn *conn = chan->conn;
686
687 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
688
689 switch (chan->state) {
690 case BT_LISTEN:
691 chan->ops->teardown(chan, 0);
692 break;
693
694 case BT_CONNECTED:
695 case BT_CONFIG:
696 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
697 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
698 l2cap_send_disconn_req(chan, reason);
699 } else
700 l2cap_chan_del(chan, reason);
701 break;
702
703 case BT_CONNECT2:
704 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
705 if (conn->hcon->type == ACL_LINK)
706 l2cap_chan_connect_reject(chan);
707 else if (conn->hcon->type == LE_LINK)
708 l2cap_chan_le_connect_reject(chan);
709 }
710
711 l2cap_chan_del(chan, reason);
712 break;
713
714 case BT_CONNECT:
715 case BT_DISCONN:
716 l2cap_chan_del(chan, reason);
717 break;
718
719 default:
720 chan->ops->teardown(chan, 0);
721 break;
722 }
723}
724EXPORT_SYMBOL(l2cap_chan_close);
725
726static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
727{
728 switch (chan->chan_type) {
729 case L2CAP_CHAN_RAW:
730 switch (chan->sec_level) {
731 case BT_SECURITY_HIGH:
732 case BT_SECURITY_FIPS:
733 return HCI_AT_DEDICATED_BONDING_MITM;
734 case BT_SECURITY_MEDIUM:
735 return HCI_AT_DEDICATED_BONDING;
736 default:
737 return HCI_AT_NO_BONDING;
738 }
739 break;
740 case L2CAP_CHAN_CONN_LESS:
741 if (chan->psm == cpu_to_le16(L2CAP_PSM_3DSP)) {
742 if (chan->sec_level == BT_SECURITY_LOW)
743 chan->sec_level = BT_SECURITY_SDP;
744 }
745 if (chan->sec_level == BT_SECURITY_HIGH ||
746 chan->sec_level == BT_SECURITY_FIPS)
747 return HCI_AT_NO_BONDING_MITM;
748 else
749 return HCI_AT_NO_BONDING;
750 break;
751 case L2CAP_CHAN_CONN_ORIENTED:
752 if (chan->psm == cpu_to_le16(L2CAP_PSM_SDP)) {
753 if (chan->sec_level == BT_SECURITY_LOW)
754 chan->sec_level = BT_SECURITY_SDP;
755
756 if (chan->sec_level == BT_SECURITY_HIGH ||
757 chan->sec_level == BT_SECURITY_FIPS)
758 return HCI_AT_NO_BONDING_MITM;
759 else
760 return HCI_AT_NO_BONDING;
761 }
762 /* fall through */
763 default:
764 switch (chan->sec_level) {
765 case BT_SECURITY_HIGH:
766 case BT_SECURITY_FIPS:
767 return HCI_AT_GENERAL_BONDING_MITM;
768 case BT_SECURITY_MEDIUM:
769 return HCI_AT_GENERAL_BONDING;
770 default:
771 return HCI_AT_NO_BONDING;
772 }
773 break;
774 }
775}
776
777/* Service level security */
778int l2cap_chan_check_security(struct l2cap_chan *chan, bool initiator)
779{
780 struct l2cap_conn *conn = chan->conn;
781 __u8 auth_type;
782
783 if (conn->hcon->type == LE_LINK)
784 return smp_conn_security(conn->hcon, chan->sec_level);
785
786 auth_type = l2cap_get_auth_type(chan);
787
788 return hci_conn_security(conn->hcon, chan->sec_level, auth_type,
789 initiator);
790}
791
792static u8 l2cap_get_ident(struct l2cap_conn *conn)
793{
794 u8 id;
795
796 /* Get next available identificator.
797 * 1 - 128 are used by kernel.
798 * 129 - 199 are reserved.
799 * 200 - 254 are used by utilities like l2ping, etc.
800 */
801
802 mutex_lock(&conn->ident_lock);
803
804 if (++conn->tx_ident > 128)
805 conn->tx_ident = 1;
806
807 id = conn->tx_ident;
808
809 mutex_unlock(&conn->ident_lock);
810
811 return id;
812}
813
814static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
815 void *data)
816{
817 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
818 u8 flags;
819
820 BT_DBG("code 0x%2.2x", code);
821
822 if (!skb)
823 return;
824
825 if (lmp_no_flush_capable(conn->hcon->hdev))
826 flags = ACL_START_NO_FLUSH;
827 else
828 flags = ACL_START;
829
830 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
831 skb->priority = HCI_PRIO_MAX;
832
833 hci_send_acl(conn->hchan, skb, flags);
834}
835
836static bool __chan_is_moving(struct l2cap_chan *chan)
837{
838 return chan->move_state != L2CAP_MOVE_STABLE &&
839 chan->move_state != L2CAP_MOVE_WAIT_PREPARE;
840}
841
842static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
843{
844 struct hci_conn *hcon = chan->conn->hcon;
845 u16 flags;
846
847 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
848 skb->priority);
849
850 if (chan->hs_hcon && !__chan_is_moving(chan)) {
851 if (chan->hs_hchan)
852 hci_send_acl(chan->hs_hchan, skb, ACL_COMPLETE);
853 else
854 kfree_skb(skb);
855
856 return;
857 }
858
859 if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
860 lmp_no_flush_capable(hcon->hdev))
861 flags = ACL_START_NO_FLUSH;
862 else
863 flags = ACL_START;
864
865 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
866 hci_send_acl(chan->conn->hchan, skb, flags);
867}
868
869static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
870{
871 control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
872 control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
873
874 if (enh & L2CAP_CTRL_FRAME_TYPE) {
875 /* S-Frame */
876 control->sframe = 1;
877 control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
878 control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
879
880 control->sar = 0;
881 control->txseq = 0;
882 } else {
883 /* I-Frame */
884 control->sframe = 0;
885 control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
886 control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
887
888 control->poll = 0;
889 control->super = 0;
890 }
891}
892
893static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
894{
895 control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
896 control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
897
898 if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
899 /* S-Frame */
900 control->sframe = 1;
901 control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
902 control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
903
904 control->sar = 0;
905 control->txseq = 0;
906 } else {
907 /* I-Frame */
908 control->sframe = 0;
909 control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
910 control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
911
912 control->poll = 0;
913 control->super = 0;
914 }
915}
916
917static inline void __unpack_control(struct l2cap_chan *chan,
918 struct sk_buff *skb)
919{
920 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
921 __unpack_extended_control(get_unaligned_le32(skb->data),
922 &bt_cb(skb)->control);
923 skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
924 } else {
925 __unpack_enhanced_control(get_unaligned_le16(skb->data),
926 &bt_cb(skb)->control);
927 skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
928 }
929}
930
931static u32 __pack_extended_control(struct l2cap_ctrl *control)
932{
933 u32 packed;
934
935 packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
936 packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
937
938 if (control->sframe) {
939 packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
940 packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
941 packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
942 } else {
943 packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
944 packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
945 }
946
947 return packed;
948}
949
950static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
951{
952 u16 packed;
953
954 packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
955 packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
956
957 if (control->sframe) {
958 packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
959 packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
960 packed |= L2CAP_CTRL_FRAME_TYPE;
961 } else {
962 packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
963 packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
964 }
965
966 return packed;
967}
968
969static inline void __pack_control(struct l2cap_chan *chan,
970 struct l2cap_ctrl *control,
971 struct sk_buff *skb)
972{
973 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
974 put_unaligned_le32(__pack_extended_control(control),
975 skb->data + L2CAP_HDR_SIZE);
976 } else {
977 put_unaligned_le16(__pack_enhanced_control(control),
978 skb->data + L2CAP_HDR_SIZE);
979 }
980}
981
982static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
983{
984 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
985 return L2CAP_EXT_HDR_SIZE;
986 else
987 return L2CAP_ENH_HDR_SIZE;
988}
989
990static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
991 u32 control)
992{
993 struct sk_buff *skb;
994 struct l2cap_hdr *lh;
995 int hlen = __ertm_hdr_size(chan);
996
997 if (chan->fcs == L2CAP_FCS_CRC16)
998 hlen += L2CAP_FCS_SIZE;
999
1000 skb = bt_skb_alloc(hlen, GFP_KERNEL);
1001
1002 if (!skb)
1003 return ERR_PTR(-ENOMEM);
1004
1005 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1006 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
1007 lh->cid = cpu_to_le16(chan->dcid);
1008
1009 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1010 put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
1011 else
1012 put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
1013
1014 if (chan->fcs == L2CAP_FCS_CRC16) {
1015 u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
1016 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1017 }
1018
1019 skb->priority = HCI_PRIO_MAX;
1020 return skb;
1021}
1022
1023static void l2cap_send_sframe(struct l2cap_chan *chan,
1024 struct l2cap_ctrl *control)
1025{
1026 struct sk_buff *skb;
1027 u32 control_field;
1028
1029 BT_DBG("chan %p, control %p", chan, control);
1030
1031 if (!control->sframe)
1032 return;
1033
1034 if (__chan_is_moving(chan))
1035 return;
1036
1037 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
1038 !control->poll)
1039 control->final = 1;
1040
1041 if (control->super == L2CAP_SUPER_RR)
1042 clear_bit(CONN_RNR_SENT, &chan->conn_state);
1043 else if (control->super == L2CAP_SUPER_RNR)
1044 set_bit(CONN_RNR_SENT, &chan->conn_state);
1045
1046 if (control->super != L2CAP_SUPER_SREJ) {
1047 chan->last_acked_seq = control->reqseq;
1048 __clear_ack_timer(chan);
1049 }
1050
1051 BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
1052 control->final, control->poll, control->super);
1053
1054 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1055 control_field = __pack_extended_control(control);
1056 else
1057 control_field = __pack_enhanced_control(control);
1058
1059 skb = l2cap_create_sframe_pdu(chan, control_field);
1060 if (!IS_ERR(skb))
1061 l2cap_do_send(chan, skb);
1062}
1063
1064static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
1065{
1066 struct l2cap_ctrl control;
1067
1068 BT_DBG("chan %p, poll %d", chan, poll);
1069
1070 memset(&control, 0, sizeof(control));
1071 control.sframe = 1;
1072 control.poll = poll;
1073
1074 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
1075 control.super = L2CAP_SUPER_RNR;
1076 else
1077 control.super = L2CAP_SUPER_RR;
1078
1079 control.reqseq = chan->buffer_seq;
1080 l2cap_send_sframe(chan, &control);
1081}
1082
1083static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
1084{
1085 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
1086 return true;
1087
1088 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
1089}
1090
1091static bool __amp_capable(struct l2cap_chan *chan)
1092{
1093 struct l2cap_conn *conn = chan->conn;
1094 struct hci_dev *hdev;
1095 bool amp_available = false;
1096
1097 if (!conn->hs_enabled)
1098 return false;
1099
1100 if (!(conn->fixed_chan_mask & L2CAP_FC_A2MP))
1101 return false;
1102
1103 read_lock(&hci_dev_list_lock);
1104 list_for_each_entry(hdev, &hci_dev_list, list) {
1105 if (hdev->amp_type != AMP_TYPE_BREDR &&
1106 test_bit(HCI_UP, &hdev->flags)) {
1107 amp_available = true;
1108 break;
1109 }
1110 }
1111 read_unlock(&hci_dev_list_lock);
1112
1113 if (chan->chan_policy == BT_CHANNEL_POLICY_AMP_PREFERRED)
1114 return amp_available;
1115
1116 return false;
1117}
1118
1119static bool l2cap_check_efs(struct l2cap_chan *chan)
1120{
1121 /* Check EFS parameters */
1122 return true;
1123}
1124
1125void l2cap_send_conn_req(struct l2cap_chan *chan)
1126{
1127 struct l2cap_conn *conn = chan->conn;
1128 struct l2cap_conn_req req;
1129
1130 req.scid = cpu_to_le16(chan->scid);
1131 req.psm = chan->psm;
1132
1133 chan->ident = l2cap_get_ident(conn);
1134
1135 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
1136
1137 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
1138}
1139
1140static void l2cap_send_create_chan_req(struct l2cap_chan *chan, u8 amp_id)
1141{
1142 struct l2cap_create_chan_req req;
1143 req.scid = cpu_to_le16(chan->scid);
1144 req.psm = chan->psm;
1145 req.amp_id = amp_id;
1146
1147 chan->ident = l2cap_get_ident(chan->conn);
1148
1149 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_REQ,
1150 sizeof(req), &req);
1151}
1152
1153static void l2cap_move_setup(struct l2cap_chan *chan)
1154{
1155 struct sk_buff *skb;
1156
1157 BT_DBG("chan %p", chan);
1158
1159 if (chan->mode != L2CAP_MODE_ERTM)
1160 return;
1161
1162 __clear_retrans_timer(chan);
1163 __clear_monitor_timer(chan);
1164 __clear_ack_timer(chan);
1165
1166 chan->retry_count = 0;
1167 skb_queue_walk(&chan->tx_q, skb) {
1168 if (bt_cb(skb)->control.retries)
1169 bt_cb(skb)->control.retries = 1;
1170 else
1171 break;
1172 }
1173
1174 chan->expected_tx_seq = chan->buffer_seq;
1175
1176 clear_bit(CONN_REJ_ACT, &chan->conn_state);
1177 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
1178 l2cap_seq_list_clear(&chan->retrans_list);
1179 l2cap_seq_list_clear(&chan->srej_list);
1180 skb_queue_purge(&chan->srej_q);
1181
1182 chan->tx_state = L2CAP_TX_STATE_XMIT;
1183 chan->rx_state = L2CAP_RX_STATE_MOVE;
1184
1185 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
1186}
1187
1188static void l2cap_move_done(struct l2cap_chan *chan)
1189{
1190 u8 move_role = chan->move_role;
1191 BT_DBG("chan %p", chan);
1192
1193 chan->move_state = L2CAP_MOVE_STABLE;
1194 chan->move_role = L2CAP_MOVE_ROLE_NONE;
1195
1196 if (chan->mode != L2CAP_MODE_ERTM)
1197 return;
1198
1199 switch (move_role) {
1200 case L2CAP_MOVE_ROLE_INITIATOR:
1201 l2cap_tx(chan, NULL, NULL, L2CAP_EV_EXPLICIT_POLL);
1202 chan->rx_state = L2CAP_RX_STATE_WAIT_F;
1203 break;
1204 case L2CAP_MOVE_ROLE_RESPONDER:
1205 chan->rx_state = L2CAP_RX_STATE_WAIT_P;
1206 break;
1207 }
1208}
1209
1210static void l2cap_chan_ready(struct l2cap_chan *chan)
1211{
1212 /* This clears all conf flags, including CONF_NOT_COMPLETE */
1213 chan->conf_state = 0;
1214 __clear_chan_timer(chan);
1215
1216 if (chan->mode == L2CAP_MODE_LE_FLOWCTL && !chan->tx_credits)
1217 chan->ops->suspend(chan);
1218
1219 chan->state = BT_CONNECTED;
1220
1221 chan->ops->ready(chan);
1222}
1223
1224static void l2cap_le_connect(struct l2cap_chan *chan)
1225{
1226 struct l2cap_conn *conn = chan->conn;
1227 struct l2cap_le_conn_req req;
1228
1229 if (test_and_set_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags))
1230 return;
1231
1232 req.psm = chan->psm;
1233 req.scid = cpu_to_le16(chan->scid);
1234 req.mtu = cpu_to_le16(chan->imtu);
1235 req.mps = cpu_to_le16(chan->mps);
1236 req.credits = cpu_to_le16(chan->rx_credits);
1237
1238 chan->ident = l2cap_get_ident(conn);
1239
1240 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_REQ,
1241 sizeof(req), &req);
1242}
1243
1244static void l2cap_le_start(struct l2cap_chan *chan)
1245{
1246 struct l2cap_conn *conn = chan->conn;
1247
1248 if (!smp_conn_security(conn->hcon, chan->sec_level))
1249 return;
1250
1251 if (!chan->psm) {
1252 l2cap_chan_ready(chan);
1253 return;
1254 }
1255
1256 if (chan->state == BT_CONNECT)
1257 l2cap_le_connect(chan);
1258}
1259
1260static void l2cap_start_connection(struct l2cap_chan *chan)
1261{
1262 if (__amp_capable(chan)) {
1263 BT_DBG("chan %p AMP capable: discover AMPs", chan);
1264 a2mp_discover_amp(chan);
1265 } else if (chan->conn->hcon->type == LE_LINK) {
1266 l2cap_le_start(chan);
1267 } else {
1268 l2cap_send_conn_req(chan);
1269 }
1270}
1271
1272static void l2cap_do_start(struct l2cap_chan *chan)
1273{
1274 struct l2cap_conn *conn = chan->conn;
1275
1276 if (conn->hcon->type == LE_LINK) {
1277 l2cap_le_start(chan);
1278 return;
1279 }
1280
1281 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
1282 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
1283 return;
1284
1285 if (l2cap_chan_check_security(chan, true) &&
1286 __l2cap_no_conn_pending(chan)) {
1287 l2cap_start_connection(chan);
1288 }
1289 } else {
1290 struct l2cap_info_req req;
1291 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
1292
1293 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1294 conn->info_ident = l2cap_get_ident(conn);
1295
1296 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1297
1298 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
1299 sizeof(req), &req);
1300 }
1301}
1302
1303static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1304{
1305 u32 local_feat_mask = l2cap_feat_mask;
1306 if (!disable_ertm)
1307 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1308
1309 switch (mode) {
1310 case L2CAP_MODE_ERTM:
1311 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1312 case L2CAP_MODE_STREAMING:
1313 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1314 default:
1315 return 0x00;
1316 }
1317}
1318
1319static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err)
1320{
1321 struct l2cap_conn *conn = chan->conn;
1322 struct l2cap_disconn_req req;
1323
1324 if (!conn)
1325 return;
1326
1327 if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) {
1328 __clear_retrans_timer(chan);
1329 __clear_monitor_timer(chan);
1330 __clear_ack_timer(chan);
1331 }
1332
1333 if (chan->scid == L2CAP_CID_A2MP) {
1334 l2cap_state_change(chan, BT_DISCONN);
1335 return;
1336 }
1337
1338 req.dcid = cpu_to_le16(chan->dcid);
1339 req.scid = cpu_to_le16(chan->scid);
1340 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
1341 sizeof(req), &req);
1342
1343 l2cap_state_change_and_error(chan, BT_DISCONN, err);
1344}
1345
1346/* ---- L2CAP connections ---- */
1347static void l2cap_conn_start(struct l2cap_conn *conn)
1348{
1349 struct l2cap_chan *chan, *tmp;
1350
1351 BT_DBG("conn %p", conn);
1352
1353 mutex_lock(&conn->chan_lock);
1354
1355 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1356 l2cap_chan_lock(chan);
1357
1358 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1359 l2cap_chan_unlock(chan);
1360 continue;
1361 }
1362
1363 if (chan->state == BT_CONNECT) {
1364 if (!l2cap_chan_check_security(chan, true) ||
1365 !__l2cap_no_conn_pending(chan)) {
1366 l2cap_chan_unlock(chan);
1367 continue;
1368 }
1369
1370 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1371 && test_bit(CONF_STATE2_DEVICE,
1372 &chan->conf_state)) {
1373 l2cap_chan_close(chan, ECONNRESET);
1374 l2cap_chan_unlock(chan);
1375 continue;
1376 }
1377
1378 l2cap_start_connection(chan);
1379
1380 } else if (chan->state == BT_CONNECT2) {
1381 struct l2cap_conn_rsp rsp;
1382 char buf[128];
1383 rsp.scid = cpu_to_le16(chan->dcid);
1384 rsp.dcid = cpu_to_le16(chan->scid);
1385
1386 if (l2cap_chan_check_security(chan, false)) {
1387 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
1388 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1389 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1390 chan->ops->defer(chan);
1391
1392 } else {
1393 l2cap_state_change(chan, BT_CONFIG);
1394 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1395 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1396 }
1397 } else {
1398 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1399 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1400 }
1401
1402 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1403 sizeof(rsp), &rsp);
1404
1405 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1406 rsp.result != L2CAP_CR_SUCCESS) {
1407 l2cap_chan_unlock(chan);
1408 continue;
1409 }
1410
1411 set_bit(CONF_REQ_SENT, &chan->conf_state);
1412 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1413 l2cap_build_conf_req(chan, buf), buf);
1414 chan->num_conf_req++;
1415 }
1416
1417 l2cap_chan_unlock(chan);
1418 }
1419
1420 mutex_unlock(&conn->chan_lock);
1421}
1422
1423static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1424{
1425 struct hci_conn *hcon = conn->hcon;
1426 struct hci_dev *hdev = hcon->hdev;
1427
1428 BT_DBG("%s conn %p", hdev->name, conn);
1429
1430 /* For outgoing pairing which doesn't necessarily have an
1431 * associated socket (e.g. mgmt_pair_device).
1432 */
1433 if (hcon->out)
1434 smp_conn_security(hcon, hcon->pending_sec_level);
1435
1436 /* For LE slave connections, make sure the connection interval
1437 * is in the range of the minium and maximum interval that has
1438 * been configured for this connection. If not, then trigger
1439 * the connection update procedure.
1440 */
1441 if (hcon->role == HCI_ROLE_SLAVE &&
1442 (hcon->le_conn_interval < hcon->le_conn_min_interval ||
1443 hcon->le_conn_interval > hcon->le_conn_max_interval)) {
1444 struct l2cap_conn_param_update_req req;
1445
1446 req.min = cpu_to_le16(hcon->le_conn_min_interval);
1447 req.max = cpu_to_le16(hcon->le_conn_max_interval);
1448 req.latency = cpu_to_le16(hcon->le_conn_latency);
1449 req.to_multiplier = cpu_to_le16(hcon->le_supv_timeout);
1450
1451 l2cap_send_cmd(conn, l2cap_get_ident(conn),
1452 L2CAP_CONN_PARAM_UPDATE_REQ, sizeof(req), &req);
1453 }
1454}
1455
1456static void l2cap_conn_ready(struct l2cap_conn *conn)
1457{
1458 struct l2cap_chan *chan;
1459 struct hci_conn *hcon = conn->hcon;
1460
1461 BT_DBG("conn %p", conn);
1462
1463 if (hcon->type == LE_LINK)
1464 l2cap_le_conn_ready(conn);
1465
1466 mutex_lock(&conn->chan_lock);
1467
1468 list_for_each_entry(chan, &conn->chan_l, list) {
1469
1470 l2cap_chan_lock(chan);
1471
1472 if (chan->scid == L2CAP_CID_A2MP) {
1473 l2cap_chan_unlock(chan);
1474 continue;
1475 }
1476
1477 if (hcon->type == LE_LINK) {
1478 l2cap_le_start(chan);
1479 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1480 l2cap_chan_ready(chan);
1481
1482 } else if (chan->state == BT_CONNECT) {
1483 l2cap_do_start(chan);
1484 }
1485
1486 l2cap_chan_unlock(chan);
1487 }
1488
1489 mutex_unlock(&conn->chan_lock);
1490
1491 queue_work(hcon->hdev->workqueue, &conn->pending_rx_work);
1492}
1493
1494/* Notify sockets that we cannot guaranty reliability anymore */
1495static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1496{
1497 struct l2cap_chan *chan;
1498
1499 BT_DBG("conn %p", conn);
1500
1501 mutex_lock(&conn->chan_lock);
1502
1503 list_for_each_entry(chan, &conn->chan_l, list) {
1504 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1505 l2cap_chan_set_err(chan, err);
1506 }
1507
1508 mutex_unlock(&conn->chan_lock);
1509}
1510
1511static void l2cap_info_timeout(struct work_struct *work)
1512{
1513 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1514 info_timer.work);
1515
1516 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1517 conn->info_ident = 0;
1518
1519 l2cap_conn_start(conn);
1520}
1521
1522/*
1523 * l2cap_user
1524 * External modules can register l2cap_user objects on l2cap_conn. The ->probe
1525 * callback is called during registration. The ->remove callback is called
1526 * during unregistration.
1527 * An l2cap_user object can either be explicitly unregistered or when the
1528 * underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon,
1529 * l2cap->hchan, .. are valid as long as the remove callback hasn't been called.
1530 * External modules must own a reference to the l2cap_conn object if they intend
1531 * to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at
1532 * any time if they don't.
1533 */
1534
1535int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user)
1536{
1537 struct hci_dev *hdev = conn->hcon->hdev;
1538 int ret;
1539
1540 /* We need to check whether l2cap_conn is registered. If it is not, we
1541 * must not register the l2cap_user. l2cap_conn_del() is unregisters
1542 * l2cap_conn objects, but doesn't provide its own locking. Instead, it
1543 * relies on the parent hci_conn object to be locked. This itself relies
1544 * on the hci_dev object to be locked. So we must lock the hci device
1545 * here, too. */
1546
1547 hci_dev_lock(hdev);
1548
1549 if (user->list.next || user->list.prev) {
1550 ret = -EINVAL;
1551 goto out_unlock;
1552 }
1553
1554 /* conn->hchan is NULL after l2cap_conn_del() was called */
1555 if (!conn->hchan) {
1556 ret = -ENODEV;
1557 goto out_unlock;
1558 }
1559
1560 ret = user->probe(conn, user);
1561 if (ret)
1562 goto out_unlock;
1563
1564 list_add(&user->list, &conn->users);
1565 ret = 0;
1566
1567out_unlock:
1568 hci_dev_unlock(hdev);
1569 return ret;
1570}
1571EXPORT_SYMBOL(l2cap_register_user);
1572
1573void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user)
1574{
1575 struct hci_dev *hdev = conn->hcon->hdev;
1576
1577 hci_dev_lock(hdev);
1578
1579 if (!user->list.next || !user->list.prev)
1580 goto out_unlock;
1581
1582 list_del(&user->list);
1583 user->list.next = NULL;
1584 user->list.prev = NULL;
1585 user->remove(conn, user);
1586
1587out_unlock:
1588 hci_dev_unlock(hdev);
1589}
1590EXPORT_SYMBOL(l2cap_unregister_user);
1591
1592static void l2cap_unregister_all_users(struct l2cap_conn *conn)
1593{
1594 struct l2cap_user *user;
1595
1596 while (!list_empty(&conn->users)) {
1597 user = list_first_entry(&conn->users, struct l2cap_user, list);
1598 list_del(&user->list);
1599 user->list.next = NULL;
1600 user->list.prev = NULL;
1601 user->remove(conn, user);
1602 }
1603}
1604
1605static void l2cap_conn_del(struct hci_conn *hcon, int err)
1606{
1607 struct l2cap_conn *conn = hcon->l2cap_data;
1608 struct l2cap_chan *chan, *l;
1609
1610 if (!conn)
1611 return;
1612
1613 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1614
1615 kfree_skb(conn->rx_skb);
1616
1617 skb_queue_purge(&conn->pending_rx);
1618
1619 /* We can not call flush_work(&conn->pending_rx_work) here since we
1620 * might block if we are running on a worker from the same workqueue
1621 * pending_rx_work is waiting on.
1622 */
1623 if (work_pending(&conn->pending_rx_work))
1624 cancel_work_sync(&conn->pending_rx_work);
1625
1626 l2cap_unregister_all_users(conn);
1627
1628 mutex_lock(&conn->chan_lock);
1629
1630 /* Kill channels */
1631 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1632 l2cap_chan_hold(chan);
1633 l2cap_chan_lock(chan);
1634
1635 l2cap_chan_del(chan, err);
1636
1637 l2cap_chan_unlock(chan);
1638
1639 chan->ops->close(chan);
1640 l2cap_chan_put(chan);
1641 }
1642
1643 mutex_unlock(&conn->chan_lock);
1644
1645 hci_chan_del(conn->hchan);
1646
1647 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1648 cancel_delayed_work_sync(&conn->info_timer);
1649
1650 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) {
1651 cancel_delayed_work_sync(&conn->security_timer);
1652 smp_chan_destroy(conn);
1653 }
1654
1655 hcon->l2cap_data = NULL;
1656 conn->hchan = NULL;
1657 l2cap_conn_put(conn);
1658}
1659
1660static void security_timeout(struct work_struct *work)
1661{
1662 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1663 security_timer.work);
1664
1665 BT_DBG("conn %p", conn);
1666
1667 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags)) {
1668 smp_chan_destroy(conn);
1669 l2cap_conn_del(conn->hcon, ETIMEDOUT);
1670 }
1671}
1672
1673static void l2cap_conn_free(struct kref *ref)
1674{
1675 struct l2cap_conn *conn = container_of(ref, struct l2cap_conn, ref);
1676
1677 hci_conn_put(conn->hcon);
1678 kfree(conn);
1679}
1680
1681void l2cap_conn_get(struct l2cap_conn *conn)
1682{
1683 kref_get(&conn->ref);
1684}
1685EXPORT_SYMBOL(l2cap_conn_get);
1686
1687void l2cap_conn_put(struct l2cap_conn *conn)
1688{
1689 kref_put(&conn->ref, l2cap_conn_free);
1690}
1691EXPORT_SYMBOL(l2cap_conn_put);
1692
1693/* ---- Socket interface ---- */
1694
1695/* Find socket with psm and source / destination bdaddr.
1696 * Returns closest match.
1697 */
1698static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1699 bdaddr_t *src,
1700 bdaddr_t *dst,
1701 u8 link_type)
1702{
1703 struct l2cap_chan *c, *c1 = NULL;
1704
1705 read_lock(&chan_list_lock);
1706
1707 list_for_each_entry(c, &chan_list, global_l) {
1708 if (state && c->state != state)
1709 continue;
1710
1711 if (link_type == ACL_LINK && c->src_type != BDADDR_BREDR)
1712 continue;
1713
1714 if (link_type == LE_LINK && c->src_type == BDADDR_BREDR)
1715 continue;
1716
1717 if (c->psm == psm) {
1718 int src_match, dst_match;
1719 int src_any, dst_any;
1720
1721 /* Exact match. */
1722 src_match = !bacmp(&c->src, src);
1723 dst_match = !bacmp(&c->dst, dst);
1724 if (src_match && dst_match) {
1725 l2cap_chan_hold(c);
1726 read_unlock(&chan_list_lock);
1727 return c;
1728 }
1729
1730 /* Closest match */
1731 src_any = !bacmp(&c->src, BDADDR_ANY);
1732 dst_any = !bacmp(&c->dst, BDADDR_ANY);
1733 if ((src_match && dst_any) || (src_any && dst_match) ||
1734 (src_any && dst_any))
1735 c1 = c;
1736 }
1737 }
1738
1739 if (c1)
1740 l2cap_chan_hold(c1);
1741
1742 read_unlock(&chan_list_lock);
1743
1744 return c1;
1745}
1746
1747static void l2cap_monitor_timeout(struct work_struct *work)
1748{
1749 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1750 monitor_timer.work);
1751
1752 BT_DBG("chan %p", chan);
1753
1754 l2cap_chan_lock(chan);
1755
1756 if (!chan->conn) {
1757 l2cap_chan_unlock(chan);
1758 l2cap_chan_put(chan);
1759 return;
1760 }
1761
1762 l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
1763
1764 l2cap_chan_unlock(chan);
1765 l2cap_chan_put(chan);
1766}
1767
1768static void l2cap_retrans_timeout(struct work_struct *work)
1769{
1770 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1771 retrans_timer.work);
1772
1773 BT_DBG("chan %p", chan);
1774
1775 l2cap_chan_lock(chan);
1776
1777 if (!chan->conn) {
1778 l2cap_chan_unlock(chan);
1779 l2cap_chan_put(chan);
1780 return;
1781 }
1782
1783 l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
1784 l2cap_chan_unlock(chan);
1785 l2cap_chan_put(chan);
1786}
1787
1788static void l2cap_streaming_send(struct l2cap_chan *chan,
1789 struct sk_buff_head *skbs)
1790{
1791 struct sk_buff *skb;
1792 struct l2cap_ctrl *control;
1793
1794 BT_DBG("chan %p, skbs %p", chan, skbs);
1795
1796 if (__chan_is_moving(chan))
1797 return;
1798
1799 skb_queue_splice_tail_init(skbs, &chan->tx_q);
1800
1801 while (!skb_queue_empty(&chan->tx_q)) {
1802
1803 skb = skb_dequeue(&chan->tx_q);
1804
1805 bt_cb(skb)->control.retries = 1;
1806 control = &bt_cb(skb)->control;
1807
1808 control->reqseq = 0;
1809 control->txseq = chan->next_tx_seq;
1810
1811 __pack_control(chan, control, skb);
1812
1813 if (chan->fcs == L2CAP_FCS_CRC16) {
1814 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1815 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1816 }
1817
1818 l2cap_do_send(chan, skb);
1819
1820 BT_DBG("Sent txseq %u", control->txseq);
1821
1822 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1823 chan->frames_sent++;
1824 }
1825}
1826
1827static int l2cap_ertm_send(struct l2cap_chan *chan)
1828{
1829 struct sk_buff *skb, *tx_skb;
1830 struct l2cap_ctrl *control;
1831 int sent = 0;
1832
1833 BT_DBG("chan %p", chan);
1834
1835 if (chan->state != BT_CONNECTED)
1836 return -ENOTCONN;
1837
1838 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1839 return 0;
1840
1841 if (__chan_is_moving(chan))
1842 return 0;
1843
1844 while (chan->tx_send_head &&
1845 chan->unacked_frames < chan->remote_tx_win &&
1846 chan->tx_state == L2CAP_TX_STATE_XMIT) {
1847
1848 skb = chan->tx_send_head;
1849
1850 bt_cb(skb)->control.retries = 1;
1851 control = &bt_cb(skb)->control;
1852
1853 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1854 control->final = 1;
1855
1856 control->reqseq = chan->buffer_seq;
1857 chan->last_acked_seq = chan->buffer_seq;
1858 control->txseq = chan->next_tx_seq;
1859
1860 __pack_control(chan, control, skb);
1861
1862 if (chan->fcs == L2CAP_FCS_CRC16) {
1863 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1864 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1865 }
1866
1867 /* Clone after data has been modified. Data is assumed to be
1868 read-only (for locking purposes) on cloned sk_buffs.
1869 */
1870 tx_skb = skb_clone(skb, GFP_KERNEL);
1871
1872 if (!tx_skb)
1873 break;
1874
1875 __set_retrans_timer(chan);
1876
1877 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1878 chan->unacked_frames++;
1879 chan->frames_sent++;
1880 sent++;
1881
1882 if (skb_queue_is_last(&chan->tx_q, skb))
1883 chan->tx_send_head = NULL;
1884 else
1885 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1886
1887 l2cap_do_send(chan, tx_skb);
1888 BT_DBG("Sent txseq %u", control->txseq);
1889 }
1890
1891 BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
1892 chan->unacked_frames, skb_queue_len(&chan->tx_q));
1893
1894 return sent;
1895}
1896
1897static void l2cap_ertm_resend(struct l2cap_chan *chan)
1898{
1899 struct l2cap_ctrl control;
1900 struct sk_buff *skb;
1901 struct sk_buff *tx_skb;
1902 u16 seq;
1903
1904 BT_DBG("chan %p", chan);
1905
1906 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1907 return;
1908
1909 if (__chan_is_moving(chan))
1910 return;
1911
1912 while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
1913 seq = l2cap_seq_list_pop(&chan->retrans_list);
1914
1915 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
1916 if (!skb) {
1917 BT_DBG("Error: Can't retransmit seq %d, frame missing",
1918 seq);
1919 continue;
1920 }
1921
1922 bt_cb(skb)->control.retries++;
1923 control = bt_cb(skb)->control;
1924
1925 if (chan->max_tx != 0 &&
1926 bt_cb(skb)->control.retries > chan->max_tx) {
1927 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
1928 l2cap_send_disconn_req(chan, ECONNRESET);
1929 l2cap_seq_list_clear(&chan->retrans_list);
1930 break;
1931 }
1932
1933 control.reqseq = chan->buffer_seq;
1934 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1935 control.final = 1;
1936 else
1937 control.final = 0;
1938
1939 if (skb_cloned(skb)) {
1940 /* Cloned sk_buffs are read-only, so we need a
1941 * writeable copy
1942 */
1943 tx_skb = skb_copy(skb, GFP_KERNEL);
1944 } else {
1945 tx_skb = skb_clone(skb, GFP_KERNEL);
1946 }
1947
1948 if (!tx_skb) {
1949 l2cap_seq_list_clear(&chan->retrans_list);
1950 break;
1951 }
1952
1953 /* Update skb contents */
1954 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1955 put_unaligned_le32(__pack_extended_control(&control),
1956 tx_skb->data + L2CAP_HDR_SIZE);
1957 } else {
1958 put_unaligned_le16(__pack_enhanced_control(&control),
1959 tx_skb->data + L2CAP_HDR_SIZE);
1960 }
1961
1962 if (chan->fcs == L2CAP_FCS_CRC16) {
1963 u16 fcs = crc16(0, (u8 *) tx_skb->data, tx_skb->len);
1964 put_unaligned_le16(fcs, skb_put(tx_skb,
1965 L2CAP_FCS_SIZE));
1966 }
1967
1968 l2cap_do_send(chan, tx_skb);
1969
1970 BT_DBG("Resent txseq %d", control.txseq);
1971
1972 chan->last_acked_seq = chan->buffer_seq;
1973 }
1974}
1975
1976static void l2cap_retransmit(struct l2cap_chan *chan,
1977 struct l2cap_ctrl *control)
1978{
1979 BT_DBG("chan %p, control %p", chan, control);
1980
1981 l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
1982 l2cap_ertm_resend(chan);
1983}
1984
1985static void l2cap_retransmit_all(struct l2cap_chan *chan,
1986 struct l2cap_ctrl *control)
1987{
1988 struct sk_buff *skb;
1989
1990 BT_DBG("chan %p, control %p", chan, control);
1991
1992 if (control->poll)
1993 set_bit(CONN_SEND_FBIT, &chan->conn_state);
1994
1995 l2cap_seq_list_clear(&chan->retrans_list);
1996
1997 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1998 return;
1999
2000 if (chan->unacked_frames) {
2001 skb_queue_walk(&chan->tx_q, skb) {
2002 if (bt_cb(skb)->control.txseq == control->reqseq ||
2003 skb == chan->tx_send_head)
2004 break;
2005 }
2006
2007 skb_queue_walk_from(&chan->tx_q, skb) {
2008 if (skb == chan->tx_send_head)
2009 break;
2010
2011 l2cap_seq_list_append(&chan->retrans_list,
2012 bt_cb(skb)->control.txseq);
2013 }
2014
2015 l2cap_ertm_resend(chan);
2016 }
2017}
2018
2019static void l2cap_send_ack(struct l2cap_chan *chan)
2020{
2021 struct l2cap_ctrl control;
2022 u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2023 chan->last_acked_seq);
2024 int threshold;
2025
2026 BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2027 chan, chan->last_acked_seq, chan->buffer_seq);
2028
2029 memset(&control, 0, sizeof(control));
2030 control.sframe = 1;
2031
2032 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
2033 chan->rx_state == L2CAP_RX_STATE_RECV) {
2034 __clear_ack_timer(chan);
2035 control.super = L2CAP_SUPER_RNR;
2036 control.reqseq = chan->buffer_seq;
2037 l2cap_send_sframe(chan, &control);
2038 } else {
2039 if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
2040 l2cap_ertm_send(chan);
2041 /* If any i-frames were sent, they included an ack */
2042 if (chan->buffer_seq == chan->last_acked_seq)
2043 frames_to_ack = 0;
2044 }
2045
2046 /* Ack now if the window is 3/4ths full.
2047 * Calculate without mul or div
2048 */
2049 threshold = chan->ack_win;
2050 threshold += threshold << 1;
2051 threshold >>= 2;
2052
2053 BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
2054 threshold);
2055
2056 if (frames_to_ack >= threshold) {
2057 __clear_ack_timer(chan);
2058 control.super = L2CAP_SUPER_RR;
2059 control.reqseq = chan->buffer_seq;
2060 l2cap_send_sframe(chan, &control);
2061 frames_to_ack = 0;
2062 }
2063
2064 if (frames_to_ack)
2065 __set_ack_timer(chan);
2066 }
2067}
2068
2069static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
2070 struct msghdr *msg, int len,
2071 int count, struct sk_buff *skb)
2072{
2073 struct l2cap_conn *conn = chan->conn;
2074 struct sk_buff **frag;
2075 int sent = 0;
2076
2077 if (chan->ops->memcpy_fromiovec(chan, skb_put(skb, count),
2078 msg->msg_iov, count))
2079 return -EFAULT;
2080
2081 sent += count;
2082 len -= count;
2083
2084 /* Continuation fragments (no L2CAP header) */
2085 frag = &skb_shinfo(skb)->frag_list;
2086 while (len) {
2087 struct sk_buff *tmp;
2088
2089 count = min_t(unsigned int, conn->mtu, len);
2090
2091 tmp = chan->ops->alloc_skb(chan, 0, count,
2092 msg->msg_flags & MSG_DONTWAIT);
2093 if (IS_ERR(tmp))
2094 return PTR_ERR(tmp);
2095
2096 *frag = tmp;
2097
2098 if (chan->ops->memcpy_fromiovec(chan, skb_put(*frag, count),
2099 msg->msg_iov, count))
2100 return -EFAULT;
2101
2102 sent += count;
2103 len -= count;
2104
2105 skb->len += (*frag)->len;
2106 skb->data_len += (*frag)->len;
2107
2108 frag = &(*frag)->next;
2109 }
2110
2111 return sent;
2112}
2113
2114static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
2115 struct msghdr *msg, size_t len)
2116{
2117 struct l2cap_conn *conn = chan->conn;
2118 struct sk_buff *skb;
2119 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
2120 struct l2cap_hdr *lh;
2121
2122 BT_DBG("chan %p psm 0x%2.2x len %zu", chan,
2123 __le16_to_cpu(chan->psm), len);
2124
2125 count = min_t(unsigned int, (conn->mtu - hlen), len);
2126
2127 skb = chan->ops->alloc_skb(chan, hlen, count,
2128 msg->msg_flags & MSG_DONTWAIT);
2129 if (IS_ERR(skb))
2130 return skb;
2131
2132 /* Create L2CAP header */
2133 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2134 lh->cid = cpu_to_le16(chan->dcid);
2135 lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
2136 put_unaligned(chan->psm, (__le16 *) skb_put(skb, L2CAP_PSMLEN_SIZE));
2137
2138 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2139 if (unlikely(err < 0)) {
2140 kfree_skb(skb);
2141 return ERR_PTR(err);
2142 }
2143 return skb;
2144}
2145
2146static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2147 struct msghdr *msg, size_t len)
2148{
2149 struct l2cap_conn *conn = chan->conn;
2150 struct sk_buff *skb;
2151 int err, count;
2152 struct l2cap_hdr *lh;
2153
2154 BT_DBG("chan %p len %zu", chan, len);
2155
2156 count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2157
2158 skb = chan->ops->alloc_skb(chan, L2CAP_HDR_SIZE, count,
2159 msg->msg_flags & MSG_DONTWAIT);
2160 if (IS_ERR(skb))
2161 return skb;
2162
2163 /* Create L2CAP header */
2164 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2165 lh->cid = cpu_to_le16(chan->dcid);
2166 lh->len = cpu_to_le16(len);
2167
2168 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2169 if (unlikely(err < 0)) {
2170 kfree_skb(skb);
2171 return ERR_PTR(err);
2172 }
2173 return skb;
2174}
2175
2176static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2177 struct msghdr *msg, size_t len,
2178 u16 sdulen)
2179{
2180 struct l2cap_conn *conn = chan->conn;
2181 struct sk_buff *skb;
2182 int err, count, hlen;
2183 struct l2cap_hdr *lh;
2184
2185 BT_DBG("chan %p len %zu", chan, len);
2186
2187 if (!conn)
2188 return ERR_PTR(-ENOTCONN);
2189
2190 hlen = __ertm_hdr_size(chan);
2191
2192 if (sdulen)
2193 hlen += L2CAP_SDULEN_SIZE;
2194
2195 if (chan->fcs == L2CAP_FCS_CRC16)
2196 hlen += L2CAP_FCS_SIZE;
2197
2198 count = min_t(unsigned int, (conn->mtu - hlen), len);
2199
2200 skb = chan->ops->alloc_skb(chan, hlen, count,
2201 msg->msg_flags & MSG_DONTWAIT);
2202 if (IS_ERR(skb))
2203 return skb;
2204
2205 /* Create L2CAP header */
2206 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2207 lh->cid = cpu_to_le16(chan->dcid);
2208 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2209
2210 /* Control header is populated later */
2211 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2212 put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2213 else
2214 put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2215
2216 if (sdulen)
2217 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2218
2219 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2220 if (unlikely(err < 0)) {
2221 kfree_skb(skb);
2222 return ERR_PTR(err);
2223 }
2224
2225 bt_cb(skb)->control.fcs = chan->fcs;
2226 bt_cb(skb)->control.retries = 0;
2227 return skb;
2228}
2229
2230static int l2cap_segment_sdu(struct l2cap_chan *chan,
2231 struct sk_buff_head *seg_queue,
2232 struct msghdr *msg, size_t len)
2233{
2234 struct sk_buff *skb;
2235 u16 sdu_len;
2236 size_t pdu_len;
2237 u8 sar;
2238
2239 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2240
2241 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2242 * so fragmented skbs are not used. The HCI layer's handling
2243 * of fragmented skbs is not compatible with ERTM's queueing.
2244 */
2245
2246 /* PDU size is derived from the HCI MTU */
2247 pdu_len = chan->conn->mtu;
2248
2249 /* Constrain PDU size for BR/EDR connections */
2250 if (!chan->hs_hcon)
2251 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2252
2253 /* Adjust for largest possible L2CAP overhead. */
2254 if (chan->fcs)
2255 pdu_len -= L2CAP_FCS_SIZE;
2256
2257 pdu_len -= __ertm_hdr_size(chan);
2258
2259 /* Remote device may have requested smaller PDUs */
2260 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2261
2262 if (len <= pdu_len) {
2263 sar = L2CAP_SAR_UNSEGMENTED;
2264 sdu_len = 0;
2265 pdu_len = len;
2266 } else {
2267 sar = L2CAP_SAR_START;
2268 sdu_len = len;
2269 pdu_len -= L2CAP_SDULEN_SIZE;
2270 }
2271
2272 while (len > 0) {
2273 skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2274
2275 if (IS_ERR(skb)) {
2276 __skb_queue_purge(seg_queue);
2277 return PTR_ERR(skb);
2278 }
2279
2280 bt_cb(skb)->control.sar = sar;
2281 __skb_queue_tail(seg_queue, skb);
2282
2283 len -= pdu_len;
2284 if (sdu_len) {
2285 sdu_len = 0;
2286 pdu_len += L2CAP_SDULEN_SIZE;
2287 }
2288
2289 if (len <= pdu_len) {
2290 sar = L2CAP_SAR_END;
2291 pdu_len = len;
2292 } else {
2293 sar = L2CAP_SAR_CONTINUE;
2294 }
2295 }
2296
2297 return 0;
2298}
2299
2300static struct sk_buff *l2cap_create_le_flowctl_pdu(struct l2cap_chan *chan,
2301 struct msghdr *msg,
2302 size_t len, u16 sdulen)
2303{
2304 struct l2cap_conn *conn = chan->conn;
2305 struct sk_buff *skb;
2306 int err, count, hlen;
2307 struct l2cap_hdr *lh;
2308
2309 BT_DBG("chan %p len %zu", chan, len);
2310
2311 if (!conn)
2312 return ERR_PTR(-ENOTCONN);
2313
2314 hlen = L2CAP_HDR_SIZE;
2315
2316 if (sdulen)
2317 hlen += L2CAP_SDULEN_SIZE;
2318
2319 count = min_t(unsigned int, (conn->mtu - hlen), len);
2320
2321 skb = chan->ops->alloc_skb(chan, hlen, count,
2322 msg->msg_flags & MSG_DONTWAIT);
2323 if (IS_ERR(skb))
2324 return skb;
2325
2326 /* Create L2CAP header */
2327 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2328 lh->cid = cpu_to_le16(chan->dcid);
2329 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2330
2331 if (sdulen)
2332 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2333
2334 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2335 if (unlikely(err < 0)) {
2336 kfree_skb(skb);
2337 return ERR_PTR(err);
2338 }
2339
2340 return skb;
2341}
2342
2343static int l2cap_segment_le_sdu(struct l2cap_chan *chan,
2344 struct sk_buff_head *seg_queue,
2345 struct msghdr *msg, size_t len)
2346{
2347 struct sk_buff *skb;
2348 size_t pdu_len;
2349 u16 sdu_len;
2350
2351 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2352
2353 pdu_len = chan->conn->mtu - L2CAP_HDR_SIZE;
2354
2355 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2356
2357 sdu_len = len;
2358 pdu_len -= L2CAP_SDULEN_SIZE;
2359
2360 while (len > 0) {
2361 if (len <= pdu_len)
2362 pdu_len = len;
2363
2364 skb = l2cap_create_le_flowctl_pdu(chan, msg, pdu_len, sdu_len);
2365 if (IS_ERR(skb)) {
2366 __skb_queue_purge(seg_queue);
2367 return PTR_ERR(skb);
2368 }
2369
2370 __skb_queue_tail(seg_queue, skb);
2371
2372 len -= pdu_len;
2373
2374 if (sdu_len) {
2375 sdu_len = 0;
2376 pdu_len += L2CAP_SDULEN_SIZE;
2377 }
2378 }
2379
2380 return 0;
2381}
2382
2383int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
2384{
2385 struct sk_buff *skb;
2386 int err;
2387 struct sk_buff_head seg_queue;
2388
2389 if (!chan->conn)
2390 return -ENOTCONN;
2391
2392 /* Connectionless channel */
2393 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2394 skb = l2cap_create_connless_pdu(chan, msg, len);
2395 if (IS_ERR(skb))
2396 return PTR_ERR(skb);
2397
2398 /* Channel lock is released before requesting new skb and then
2399 * reacquired thus we need to recheck channel state.
2400 */
2401 if (chan->state != BT_CONNECTED) {
2402 kfree_skb(skb);
2403 return -ENOTCONN;
2404 }
2405
2406 l2cap_do_send(chan, skb);
2407 return len;
2408 }
2409
2410 switch (chan->mode) {
2411 case L2CAP_MODE_LE_FLOWCTL:
2412 /* Check outgoing MTU */
2413 if (len > chan->omtu)
2414 return -EMSGSIZE;
2415
2416 if (!chan->tx_credits)
2417 return -EAGAIN;
2418
2419 __skb_queue_head_init(&seg_queue);
2420
2421 err = l2cap_segment_le_sdu(chan, &seg_queue, msg, len);
2422
2423 if (chan->state != BT_CONNECTED) {
2424 __skb_queue_purge(&seg_queue);
2425 err = -ENOTCONN;
2426 }
2427
2428 if (err)
2429 return err;
2430
2431 skb_queue_splice_tail_init(&seg_queue, &chan->tx_q);
2432
2433 while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
2434 l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
2435 chan->tx_credits--;
2436 }
2437
2438 if (!chan->tx_credits)
2439 chan->ops->suspend(chan);
2440
2441 err = len;
2442
2443 break;
2444
2445 case L2CAP_MODE_BASIC:
2446 /* Check outgoing MTU */
2447 if (len > chan->omtu)
2448 return -EMSGSIZE;
2449
2450 /* Create a basic PDU */
2451 skb = l2cap_create_basic_pdu(chan, msg, len);
2452 if (IS_ERR(skb))
2453 return PTR_ERR(skb);
2454
2455 /* Channel lock is released before requesting new skb and then
2456 * reacquired thus we need to recheck channel state.
2457 */
2458 if (chan->state != BT_CONNECTED) {
2459 kfree_skb(skb);
2460 return -ENOTCONN;
2461 }
2462
2463 l2cap_do_send(chan, skb);
2464 err = len;
2465 break;
2466
2467 case L2CAP_MODE_ERTM:
2468 case L2CAP_MODE_STREAMING:
2469 /* Check outgoing MTU */
2470 if (len > chan->omtu) {
2471 err = -EMSGSIZE;
2472 break;
2473 }
2474
2475 __skb_queue_head_init(&seg_queue);
2476
2477 /* Do segmentation before calling in to the state machine,
2478 * since it's possible to block while waiting for memory
2479 * allocation.
2480 */
2481 err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2482
2483 /* The channel could have been closed while segmenting,
2484 * check that it is still connected.
2485 */
2486 if (chan->state != BT_CONNECTED) {
2487 __skb_queue_purge(&seg_queue);
2488 err = -ENOTCONN;
2489 }
2490
2491 if (err)
2492 break;
2493
2494 if (chan->mode == L2CAP_MODE_ERTM)
2495 l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2496 else
2497 l2cap_streaming_send(chan, &seg_queue);
2498
2499 err = len;
2500
2501 /* If the skbs were not queued for sending, they'll still be in
2502 * seg_queue and need to be purged.
2503 */
2504 __skb_queue_purge(&seg_queue);
2505 break;
2506
2507 default:
2508 BT_DBG("bad state %1.1x", chan->mode);
2509 err = -EBADFD;
2510 }
2511
2512 return err;
2513}
2514EXPORT_SYMBOL_GPL(l2cap_chan_send);
2515
2516static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2517{
2518 struct l2cap_ctrl control;
2519 u16 seq;
2520
2521 BT_DBG("chan %p, txseq %u", chan, txseq);
2522
2523 memset(&control, 0, sizeof(control));
2524 control.sframe = 1;
2525 control.super = L2CAP_SUPER_SREJ;
2526
2527 for (seq = chan->expected_tx_seq; seq != txseq;
2528 seq = __next_seq(chan, seq)) {
2529 if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2530 control.reqseq = seq;
2531 l2cap_send_sframe(chan, &control);
2532 l2cap_seq_list_append(&chan->srej_list, seq);
2533 }
2534 }
2535
2536 chan->expected_tx_seq = __next_seq(chan, txseq);
2537}
2538
2539static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2540{
2541 struct l2cap_ctrl control;
2542
2543 BT_DBG("chan %p", chan);
2544
2545 if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2546 return;
2547
2548 memset(&control, 0, sizeof(control));
2549 control.sframe = 1;
2550 control.super = L2CAP_SUPER_SREJ;
2551 control.reqseq = chan->srej_list.tail;
2552 l2cap_send_sframe(chan, &control);
2553}
2554
2555static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2556{
2557 struct l2cap_ctrl control;
2558 u16 initial_head;
2559 u16 seq;
2560
2561 BT_DBG("chan %p, txseq %u", chan, txseq);
2562
2563 memset(&control, 0, sizeof(control));
2564 control.sframe = 1;
2565 control.super = L2CAP_SUPER_SREJ;
2566
2567 /* Capture initial list head to allow only one pass through the list. */
2568 initial_head = chan->srej_list.head;
2569
2570 do {
2571 seq = l2cap_seq_list_pop(&chan->srej_list);
2572 if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2573 break;
2574
2575 control.reqseq = seq;
2576 l2cap_send_sframe(chan, &control);
2577 l2cap_seq_list_append(&chan->srej_list, seq);
2578 } while (chan->srej_list.head != initial_head);
2579}
2580
2581static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2582{
2583 struct sk_buff *acked_skb;
2584 u16 ackseq;
2585
2586 BT_DBG("chan %p, reqseq %u", chan, reqseq);
2587
2588 if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2589 return;
2590
2591 BT_DBG("expected_ack_seq %u, unacked_frames %u",
2592 chan->expected_ack_seq, chan->unacked_frames);
2593
2594 for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2595 ackseq = __next_seq(chan, ackseq)) {
2596
2597 acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2598 if (acked_skb) {
2599 skb_unlink(acked_skb, &chan->tx_q);
2600 kfree_skb(acked_skb);
2601 chan->unacked_frames--;
2602 }
2603 }
2604
2605 chan->expected_ack_seq = reqseq;
2606
2607 if (chan->unacked_frames == 0)
2608 __clear_retrans_timer(chan);
2609
2610 BT_DBG("unacked_frames %u", chan->unacked_frames);
2611}
2612
2613static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2614{
2615 BT_DBG("chan %p", chan);
2616
2617 chan->expected_tx_seq = chan->buffer_seq;
2618 l2cap_seq_list_clear(&chan->srej_list);
2619 skb_queue_purge(&chan->srej_q);
2620 chan->rx_state = L2CAP_RX_STATE_RECV;
2621}
2622
2623static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2624 struct l2cap_ctrl *control,
2625 struct sk_buff_head *skbs, u8 event)
2626{
2627 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2628 event);
2629
2630 switch (event) {
2631 case L2CAP_EV_DATA_REQUEST:
2632 if (chan->tx_send_head == NULL)
2633 chan->tx_send_head = skb_peek(skbs);
2634
2635 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2636 l2cap_ertm_send(chan);
2637 break;
2638 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2639 BT_DBG("Enter LOCAL_BUSY");
2640 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2641
2642 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2643 /* The SREJ_SENT state must be aborted if we are to
2644 * enter the LOCAL_BUSY state.
2645 */
2646 l2cap_abort_rx_srej_sent(chan);
2647 }
2648
2649 l2cap_send_ack(chan);
2650
2651 break;
2652 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2653 BT_DBG("Exit LOCAL_BUSY");
2654 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2655
2656 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2657 struct l2cap_ctrl local_control;
2658
2659 memset(&local_control, 0, sizeof(local_control));
2660 local_control.sframe = 1;
2661 local_control.super = L2CAP_SUPER_RR;
2662 local_control.poll = 1;
2663 local_control.reqseq = chan->buffer_seq;
2664 l2cap_send_sframe(chan, &local_control);
2665
2666 chan->retry_count = 1;
2667 __set_monitor_timer(chan);
2668 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2669 }
2670 break;
2671 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2672 l2cap_process_reqseq(chan, control->reqseq);
2673 break;
2674 case L2CAP_EV_EXPLICIT_POLL:
2675 l2cap_send_rr_or_rnr(chan, 1);
2676 chan->retry_count = 1;
2677 __set_monitor_timer(chan);
2678 __clear_ack_timer(chan);
2679 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2680 break;
2681 case L2CAP_EV_RETRANS_TO:
2682 l2cap_send_rr_or_rnr(chan, 1);
2683 chan->retry_count = 1;
2684 __set_monitor_timer(chan);
2685 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2686 break;
2687 case L2CAP_EV_RECV_FBIT:
2688 /* Nothing to process */
2689 break;
2690 default:
2691 break;
2692 }
2693}
2694
2695static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2696 struct l2cap_ctrl *control,
2697 struct sk_buff_head *skbs, u8 event)
2698{
2699 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2700 event);
2701
2702 switch (event) {
2703 case L2CAP_EV_DATA_REQUEST:
2704 if (chan->tx_send_head == NULL)
2705 chan->tx_send_head = skb_peek(skbs);
2706 /* Queue data, but don't send. */
2707 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2708 break;
2709 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2710 BT_DBG("Enter LOCAL_BUSY");
2711 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2712
2713 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2714 /* The SREJ_SENT state must be aborted if we are to
2715 * enter the LOCAL_BUSY state.
2716 */
2717 l2cap_abort_rx_srej_sent(chan);
2718 }
2719
2720 l2cap_send_ack(chan);
2721
2722 break;
2723 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2724 BT_DBG("Exit LOCAL_BUSY");
2725 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2726
2727 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2728 struct l2cap_ctrl local_control;
2729 memset(&local_control, 0, sizeof(local_control));
2730 local_control.sframe = 1;
2731 local_control.super = L2CAP_SUPER_RR;
2732 local_control.poll = 1;
2733 local_control.reqseq = chan->buffer_seq;
2734 l2cap_send_sframe(chan, &local_control);
2735
2736 chan->retry_count = 1;
2737 __set_monitor_timer(chan);
2738 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2739 }
2740 break;
2741 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2742 l2cap_process_reqseq(chan, control->reqseq);
2743
2744 /* Fall through */
2745
2746 case L2CAP_EV_RECV_FBIT:
2747 if (control && control->final) {
2748 __clear_monitor_timer(chan);
2749 if (chan->unacked_frames > 0)
2750 __set_retrans_timer(chan);
2751 chan->retry_count = 0;
2752 chan->tx_state = L2CAP_TX_STATE_XMIT;
2753 BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
2754 }
2755 break;
2756 case L2CAP_EV_EXPLICIT_POLL:
2757 /* Ignore */
2758 break;
2759 case L2CAP_EV_MONITOR_TO:
2760 if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
2761 l2cap_send_rr_or_rnr(chan, 1);
2762 __set_monitor_timer(chan);
2763 chan->retry_count++;
2764 } else {
2765 l2cap_send_disconn_req(chan, ECONNABORTED);
2766 }
2767 break;
2768 default:
2769 break;
2770 }
2771}
2772
2773static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
2774 struct sk_buff_head *skbs, u8 event)
2775{
2776 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2777 chan, control, skbs, event, chan->tx_state);
2778
2779 switch (chan->tx_state) {
2780 case L2CAP_TX_STATE_XMIT:
2781 l2cap_tx_state_xmit(chan, control, skbs, event);
2782 break;
2783 case L2CAP_TX_STATE_WAIT_F:
2784 l2cap_tx_state_wait_f(chan, control, skbs, event);
2785 break;
2786 default:
2787 /* Ignore event */
2788 break;
2789 }
2790}
2791
2792static void l2cap_pass_to_tx(struct l2cap_chan *chan,
2793 struct l2cap_ctrl *control)
2794{
2795 BT_DBG("chan %p, control %p", chan, control);
2796 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
2797}
2798
2799static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
2800 struct l2cap_ctrl *control)
2801{
2802 BT_DBG("chan %p, control %p", chan, control);
2803 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
2804}
2805
2806/* Copy frame to all raw sockets on that connection */
2807static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2808{
2809 struct sk_buff *nskb;
2810 struct l2cap_chan *chan;
2811
2812 BT_DBG("conn %p", conn);
2813
2814 mutex_lock(&conn->chan_lock);
2815
2816 list_for_each_entry(chan, &conn->chan_l, list) {
2817 if (chan->chan_type != L2CAP_CHAN_RAW)
2818 continue;
2819
2820 /* Don't send frame to the channel it came from */
2821 if (bt_cb(skb)->chan == chan)
2822 continue;
2823
2824 nskb = skb_clone(skb, GFP_KERNEL);
2825 if (!nskb)
2826 continue;
2827 if (chan->ops->recv(chan, nskb))
2828 kfree_skb(nskb);
2829 }
2830
2831 mutex_unlock(&conn->chan_lock);
2832}
2833
2834/* ---- L2CAP signalling commands ---- */
2835static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
2836 u8 ident, u16 dlen, void *data)
2837{
2838 struct sk_buff *skb, **frag;
2839 struct l2cap_cmd_hdr *cmd;
2840 struct l2cap_hdr *lh;
2841 int len, count;
2842
2843 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
2844 conn, code, ident, dlen);
2845
2846 if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE)
2847 return NULL;
2848
2849 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2850 count = min_t(unsigned int, conn->mtu, len);
2851
2852 skb = bt_skb_alloc(count, GFP_KERNEL);
2853 if (!skb)
2854 return NULL;
2855
2856 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2857 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2858
2859 if (conn->hcon->type == LE_LINK)
2860 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2861 else
2862 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
2863
2864 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2865 cmd->code = code;
2866 cmd->ident = ident;
2867 cmd->len = cpu_to_le16(dlen);
2868
2869 if (dlen) {
2870 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2871 memcpy(skb_put(skb, count), data, count);
2872 data += count;
2873 }
2874
2875 len -= skb->len;
2876
2877 /* Continuation fragments (no L2CAP header) */
2878 frag = &skb_shinfo(skb)->frag_list;
2879 while (len) {
2880 count = min_t(unsigned int, conn->mtu, len);
2881
2882 *frag = bt_skb_alloc(count, GFP_KERNEL);
2883 if (!*frag)
2884 goto fail;
2885
2886 memcpy(skb_put(*frag, count), data, count);
2887
2888 len -= count;
2889 data += count;
2890
2891 frag = &(*frag)->next;
2892 }
2893
2894 return skb;
2895
2896fail:
2897 kfree_skb(skb);
2898 return NULL;
2899}
2900
2901static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
2902 unsigned long *val)
2903{
2904 struct l2cap_conf_opt *opt = *ptr;
2905 int len;
2906
2907 len = L2CAP_CONF_OPT_SIZE + opt->len;
2908 *ptr += len;
2909
2910 *type = opt->type;
2911 *olen = opt->len;
2912
2913 switch (opt->len) {
2914 case 1:
2915 *val = *((u8 *) opt->val);
2916 break;
2917
2918 case 2:
2919 *val = get_unaligned_le16(opt->val);
2920 break;
2921
2922 case 4:
2923 *val = get_unaligned_le32(opt->val);
2924 break;
2925
2926 default:
2927 *val = (unsigned long) opt->val;
2928 break;
2929 }
2930
2931 BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
2932 return len;
2933}
2934
2935static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2936{
2937 struct l2cap_conf_opt *opt = *ptr;
2938
2939 BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
2940
2941 opt->type = type;
2942 opt->len = len;
2943
2944 switch (len) {
2945 case 1:
2946 *((u8 *) opt->val) = val;
2947 break;
2948
2949 case 2:
2950 put_unaligned_le16(val, opt->val);
2951 break;
2952
2953 case 4:
2954 put_unaligned_le32(val, opt->val);
2955 break;
2956
2957 default:
2958 memcpy(opt->val, (void *) val, len);
2959 break;
2960 }
2961
2962 *ptr += L2CAP_CONF_OPT_SIZE + len;
2963}
2964
2965static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
2966{
2967 struct l2cap_conf_efs efs;
2968
2969 switch (chan->mode) {
2970 case L2CAP_MODE_ERTM:
2971 efs.id = chan->local_id;
2972 efs.stype = chan->local_stype;
2973 efs.msdu = cpu_to_le16(chan->local_msdu);
2974 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2975 efs.acc_lat = cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
2976 efs.flush_to = cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO);
2977 break;
2978
2979 case L2CAP_MODE_STREAMING:
2980 efs.id = 1;
2981 efs.stype = L2CAP_SERV_BESTEFFORT;
2982 efs.msdu = cpu_to_le16(chan->local_msdu);
2983 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2984 efs.acc_lat = 0;
2985 efs.flush_to = 0;
2986 break;
2987
2988 default:
2989 return;
2990 }
2991
2992 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
2993 (unsigned long) &efs);
2994}
2995
2996static void l2cap_ack_timeout(struct work_struct *work)
2997{
2998 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
2999 ack_timer.work);
3000 u16 frames_to_ack;
3001
3002 BT_DBG("chan %p", chan);
3003
3004 l2cap_chan_lock(chan);
3005
3006 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
3007 chan->last_acked_seq);
3008
3009 if (frames_to_ack)
3010 l2cap_send_rr_or_rnr(chan, 0);
3011
3012 l2cap_chan_unlock(chan);
3013 l2cap_chan_put(chan);
3014}
3015
3016int l2cap_ertm_init(struct l2cap_chan *chan)
3017{
3018 int err;
3019
3020 chan->next_tx_seq = 0;
3021 chan->expected_tx_seq = 0;
3022 chan->expected_ack_seq = 0;
3023 chan->unacked_frames = 0;
3024 chan->buffer_seq = 0;
3025 chan->frames_sent = 0;
3026 chan->last_acked_seq = 0;
3027 chan->sdu = NULL;
3028 chan->sdu_last_frag = NULL;
3029 chan->sdu_len = 0;
3030
3031 skb_queue_head_init(&chan->tx_q);
3032
3033 chan->local_amp_id = AMP_ID_BREDR;
3034 chan->move_id = AMP_ID_BREDR;
3035 chan->move_state = L2CAP_MOVE_STABLE;
3036 chan->move_role = L2CAP_MOVE_ROLE_NONE;
3037
3038 if (chan->mode != L2CAP_MODE_ERTM)
3039 return 0;
3040
3041 chan->rx_state = L2CAP_RX_STATE_RECV;
3042 chan->tx_state = L2CAP_TX_STATE_XMIT;
3043
3044 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
3045 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
3046 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
3047
3048 skb_queue_head_init(&chan->srej_q);
3049
3050 err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
3051 if (err < 0)
3052 return err;
3053
3054 err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
3055 if (err < 0)
3056 l2cap_seq_list_free(&chan->srej_list);
3057
3058 return err;
3059}
3060
3061static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
3062{
3063 switch (mode) {
3064 case L2CAP_MODE_STREAMING:
3065 case L2CAP_MODE_ERTM:
3066 if (l2cap_mode_supported(mode, remote_feat_mask))
3067 return mode;
3068 /* fall through */
3069 default:
3070 return L2CAP_MODE_BASIC;
3071 }
3072}
3073
3074static inline bool __l2cap_ews_supported(struct l2cap_conn *conn)
3075{
3076 return conn->hs_enabled && conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
3077}
3078
3079static inline bool __l2cap_efs_supported(struct l2cap_conn *conn)
3080{
3081 return conn->hs_enabled && conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
3082}
3083
3084static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan,
3085 struct l2cap_conf_rfc *rfc)
3086{
3087 if (chan->local_amp_id != AMP_ID_BREDR && chan->hs_hcon) {
3088 u64 ertm_to = chan->hs_hcon->hdev->amp_be_flush_to;
3089
3090 /* Class 1 devices have must have ERTM timeouts
3091 * exceeding the Link Supervision Timeout. The
3092 * default Link Supervision Timeout for AMP
3093 * controllers is 10 seconds.
3094 *
3095 * Class 1 devices use 0xffffffff for their
3096 * best-effort flush timeout, so the clamping logic
3097 * will result in a timeout that meets the above
3098 * requirement. ERTM timeouts are 16-bit values, so
3099 * the maximum timeout is 65.535 seconds.
3100 */
3101
3102 /* Convert timeout to milliseconds and round */
3103 ertm_to = DIV_ROUND_UP_ULL(ertm_to, 1000);
3104
3105 /* This is the recommended formula for class 2 devices
3106 * that start ERTM timers when packets are sent to the
3107 * controller.
3108 */
3109 ertm_to = 3 * ertm_to + 500;
3110
3111 if (ertm_to > 0xffff)
3112 ertm_to = 0xffff;
3113
3114 rfc->retrans_timeout = cpu_to_le16((u16) ertm_to);
3115 rfc->monitor_timeout = rfc->retrans_timeout;
3116 } else {
3117 rfc->retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3118 rfc->monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3119 }
3120}
3121
3122static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
3123{
3124 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
3125 __l2cap_ews_supported(chan->conn)) {
3126 /* use extended control field */
3127 set_bit(FLAG_EXT_CTRL, &chan->flags);
3128 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3129 } else {
3130 chan->tx_win = min_t(u16, chan->tx_win,
3131 L2CAP_DEFAULT_TX_WINDOW);
3132 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
3133 }
3134 chan->ack_win = chan->tx_win;
3135}
3136
3137static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
3138{
3139 struct l2cap_conf_req *req = data;
3140 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
3141 void *ptr = req->data;
3142 u16 size;
3143
3144 BT_DBG("chan %p", chan);
3145
3146 if (chan->num_conf_req || chan->num_conf_rsp)
3147 goto done;
3148
3149 switch (chan->mode) {
3150 case L2CAP_MODE_STREAMING:
3151 case L2CAP_MODE_ERTM:
3152 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
3153 break;
3154
3155 if (__l2cap_efs_supported(chan->conn))
3156 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3157
3158 /* fall through */
3159 default:
3160 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
3161 break;
3162 }
3163
3164done:
3165 if (chan->imtu != L2CAP_DEFAULT_MTU)
3166 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3167
3168 switch (chan->mode) {
3169 case L2CAP_MODE_BASIC:
3170 if (disable_ertm)
3171 break;
3172
3173 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
3174 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
3175 break;
3176
3177 rfc.mode = L2CAP_MODE_BASIC;
3178 rfc.txwin_size = 0;
3179 rfc.max_transmit = 0;
3180 rfc.retrans_timeout = 0;
3181 rfc.monitor_timeout = 0;
3182 rfc.max_pdu_size = 0;
3183
3184 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3185 (unsigned long) &rfc);
3186 break;
3187
3188 case L2CAP_MODE_ERTM:
3189 rfc.mode = L2CAP_MODE_ERTM;
3190 rfc.max_transmit = chan->max_tx;
3191
3192 __l2cap_set_ertm_timeouts(chan, &rfc);
3193
3194 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3195 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3196 L2CAP_FCS_SIZE);
3197 rfc.max_pdu_size = cpu_to_le16(size);
3198
3199 l2cap_txwin_setup(chan);
3200
3201 rfc.txwin_size = min_t(u16, chan->tx_win,
3202 L2CAP_DEFAULT_TX_WINDOW);
3203
3204 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3205 (unsigned long) &rfc);
3206
3207 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3208 l2cap_add_opt_efs(&ptr, chan);
3209
3210 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3211 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3212 chan->tx_win);
3213
3214 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3215 if (chan->fcs == L2CAP_FCS_NONE ||
3216 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3217 chan->fcs = L2CAP_FCS_NONE;
3218 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3219 chan->fcs);
3220 }
3221 break;
3222
3223 case L2CAP_MODE_STREAMING:
3224 l2cap_txwin_setup(chan);
3225 rfc.mode = L2CAP_MODE_STREAMING;
3226 rfc.txwin_size = 0;
3227 rfc.max_transmit = 0;
3228 rfc.retrans_timeout = 0;
3229 rfc.monitor_timeout = 0;
3230
3231 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3232 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3233 L2CAP_FCS_SIZE);
3234 rfc.max_pdu_size = cpu_to_le16(size);
3235
3236 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3237 (unsigned long) &rfc);
3238
3239 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3240 l2cap_add_opt_efs(&ptr, chan);
3241
3242 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3243 if (chan->fcs == L2CAP_FCS_NONE ||
3244 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3245 chan->fcs = L2CAP_FCS_NONE;
3246 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3247 chan->fcs);
3248 }
3249 break;
3250 }
3251
3252 req->dcid = cpu_to_le16(chan->dcid);
3253 req->flags = cpu_to_le16(0);
3254
3255 return ptr - data;
3256}
3257
3258static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
3259{
3260 struct l2cap_conf_rsp *rsp = data;
3261 void *ptr = rsp->data;
3262 void *req = chan->conf_req;
3263 int len = chan->conf_len;
3264 int type, hint, olen;
3265 unsigned long val;
3266 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3267 struct l2cap_conf_efs efs;
3268 u8 remote_efs = 0;
3269 u16 mtu = L2CAP_DEFAULT_MTU;
3270 u16 result = L2CAP_CONF_SUCCESS;
3271 u16 size;
3272
3273 BT_DBG("chan %p", chan);
3274
3275 while (len >= L2CAP_CONF_OPT_SIZE) {
3276 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
3277
3278 hint = type & L2CAP_CONF_HINT;
3279 type &= L2CAP_CONF_MASK;
3280
3281 switch (type) {
3282 case L2CAP_CONF_MTU:
3283 mtu = val;
3284 break;
3285
3286 case L2CAP_CONF_FLUSH_TO:
3287 chan->flush_to = val;
3288 break;
3289
3290 case L2CAP_CONF_QOS:
3291 break;
3292
3293 case L2CAP_CONF_RFC:
3294 if (olen == sizeof(rfc))
3295 memcpy(&rfc, (void *) val, olen);
3296 break;
3297
3298 case L2CAP_CONF_FCS:
3299 if (val == L2CAP_FCS_NONE)
3300 set_bit(CONF_RECV_NO_FCS, &chan->conf_state);
3301 break;
3302
3303 case L2CAP_CONF_EFS:
3304 remote_efs = 1;
3305 if (olen == sizeof(efs))
3306 memcpy(&efs, (void *) val, olen);
3307 break;
3308
3309 case L2CAP_CONF_EWS:
3310 if (!chan->conn->hs_enabled)
3311 return -ECONNREFUSED;
3312
3313 set_bit(FLAG_EXT_CTRL, &chan->flags);
3314 set_bit(CONF_EWS_RECV, &chan->conf_state);
3315 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3316 chan->remote_tx_win = val;
3317 break;
3318
3319 default:
3320 if (hint)
3321 break;
3322
3323 result = L2CAP_CONF_UNKNOWN;
3324 *((u8 *) ptr++) = type;
3325 break;
3326 }
3327 }
3328
3329 if (chan->num_conf_rsp || chan->num_conf_req > 1)
3330 goto done;
3331
3332 switch (chan->mode) {
3333 case L2CAP_MODE_STREAMING:
3334 case L2CAP_MODE_ERTM:
3335 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
3336 chan->mode = l2cap_select_mode(rfc.mode,
3337 chan->conn->feat_mask);
3338 break;
3339 }
3340
3341 if (remote_efs) {
3342 if (__l2cap_efs_supported(chan->conn))
3343 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3344 else
3345 return -ECONNREFUSED;
3346 }
3347
3348 if (chan->mode != rfc.mode)
3349 return -ECONNREFUSED;
3350
3351 break;
3352 }
3353
3354done:
3355 if (chan->mode != rfc.mode) {
3356 result = L2CAP_CONF_UNACCEPT;
3357 rfc.mode = chan->mode;
3358
3359 if (chan->num_conf_rsp == 1)
3360 return -ECONNREFUSED;
3361
3362 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3363 (unsigned long) &rfc);
3364 }
3365
3366 if (result == L2CAP_CONF_SUCCESS) {
3367 /* Configure output options and let the other side know
3368 * which ones we don't like. */
3369
3370 if (mtu < L2CAP_DEFAULT_MIN_MTU)
3371 result = L2CAP_CONF_UNACCEPT;
3372 else {
3373 chan->omtu = mtu;
3374 set_bit(CONF_MTU_DONE, &chan->conf_state);
3375 }
3376 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
3377
3378 if (remote_efs) {
3379 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3380 efs.stype != L2CAP_SERV_NOTRAFIC &&
3381 efs.stype != chan->local_stype) {
3382
3383 result = L2CAP_CONF_UNACCEPT;
3384
3385 if (chan->num_conf_req >= 1)
3386 return -ECONNREFUSED;
3387
3388 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3389 sizeof(efs),
3390 (unsigned long) &efs);
3391 } else {
3392 /* Send PENDING Conf Rsp */
3393 result = L2CAP_CONF_PENDING;
3394 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3395 }
3396 }
3397
3398 switch (rfc.mode) {
3399 case L2CAP_MODE_BASIC:
3400 chan->fcs = L2CAP_FCS_NONE;
3401 set_bit(CONF_MODE_DONE, &chan->conf_state);
3402 break;
3403
3404 case L2CAP_MODE_ERTM:
3405 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3406 chan->remote_tx_win = rfc.txwin_size;
3407 else
3408 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3409
3410 chan->remote_max_tx = rfc.max_transmit;
3411
3412 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3413 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3414 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3415 rfc.max_pdu_size = cpu_to_le16(size);
3416 chan->remote_mps = size;
3417
3418 __l2cap_set_ertm_timeouts(chan, &rfc);
3419
3420 set_bit(CONF_MODE_DONE, &chan->conf_state);
3421
3422 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3423 sizeof(rfc), (unsigned long) &rfc);
3424
3425 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3426 chan->remote_id = efs.id;
3427 chan->remote_stype = efs.stype;
3428 chan->remote_msdu = le16_to_cpu(efs.msdu);
3429 chan->remote_flush_to =
3430 le32_to_cpu(efs.flush_to);
3431 chan->remote_acc_lat =
3432 le32_to_cpu(efs.acc_lat);
3433 chan->remote_sdu_itime =
3434 le32_to_cpu(efs.sdu_itime);
3435 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3436 sizeof(efs),
3437 (unsigned long) &efs);
3438 }
3439 break;
3440
3441 case L2CAP_MODE_STREAMING:
3442 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3443 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3444 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3445 rfc.max_pdu_size = cpu_to_le16(size);
3446 chan->remote_mps = size;
3447
3448 set_bit(CONF_MODE_DONE, &chan->conf_state);
3449
3450 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3451 (unsigned long) &rfc);
3452
3453 break;
3454
3455 default:
3456 result = L2CAP_CONF_UNACCEPT;
3457
3458 memset(&rfc, 0, sizeof(rfc));
3459 rfc.mode = chan->mode;
3460 }
3461
3462 if (result == L2CAP_CONF_SUCCESS)
3463 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3464 }
3465 rsp->scid = cpu_to_le16(chan->dcid);
3466 rsp->result = cpu_to_le16(result);
3467 rsp->flags = cpu_to_le16(0);
3468
3469 return ptr - data;
3470}
3471
3472static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
3473 void *data, u16 *result)
3474{
3475 struct l2cap_conf_req *req = data;
3476 void *ptr = req->data;
3477 int type, olen;
3478 unsigned long val;
3479 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3480 struct l2cap_conf_efs efs;
3481
3482 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3483
3484 while (len >= L2CAP_CONF_OPT_SIZE) {
3485 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3486
3487 switch (type) {
3488 case L2CAP_CONF_MTU:
3489 if (val < L2CAP_DEFAULT_MIN_MTU) {
3490 *result = L2CAP_CONF_UNACCEPT;
3491 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3492 } else
3493 chan->imtu = val;
3494 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3495 break;
3496
3497 case L2CAP_CONF_FLUSH_TO:
3498 chan->flush_to = val;
3499 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
3500 2, chan->flush_to);
3501 break;
3502
3503 case L2CAP_CONF_RFC:
3504 if (olen == sizeof(rfc))
3505 memcpy(&rfc, (void *)val, olen);
3506
3507 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3508 rfc.mode != chan->mode)
3509 return -ECONNREFUSED;
3510
3511 chan->fcs = 0;
3512
3513 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3514 sizeof(rfc), (unsigned long) &rfc);
3515 break;
3516
3517 case L2CAP_CONF_EWS:
3518 chan->ack_win = min_t(u16, val, chan->ack_win);
3519 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3520 chan->tx_win);
3521 break;
3522
3523 case L2CAP_CONF_EFS:
3524 if (olen == sizeof(efs))
3525 memcpy(&efs, (void *)val, olen);
3526
3527 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3528 efs.stype != L2CAP_SERV_NOTRAFIC &&
3529 efs.stype != chan->local_stype)
3530 return -ECONNREFUSED;
3531
3532 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
3533 (unsigned long) &efs);
3534 break;
3535
3536 case L2CAP_CONF_FCS:
3537 if (*result == L2CAP_CONF_PENDING)
3538 if (val == L2CAP_FCS_NONE)
3539 set_bit(CONF_RECV_NO_FCS,
3540 &chan->conf_state);
3541 break;
3542 }
3543 }
3544
3545 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3546 return -ECONNREFUSED;
3547
3548 chan->mode = rfc.mode;
3549
3550 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3551 switch (rfc.mode) {
3552 case L2CAP_MODE_ERTM:
3553 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3554 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3555 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3556 if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3557 chan->ack_win = min_t(u16, chan->ack_win,
3558 rfc.txwin_size);
3559
3560 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3561 chan->local_msdu = le16_to_cpu(efs.msdu);
3562 chan->local_sdu_itime =
3563 le32_to_cpu(efs.sdu_itime);
3564 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3565 chan->local_flush_to =
3566 le32_to_cpu(efs.flush_to);
3567 }
3568 break;
3569
3570 case L2CAP_MODE_STREAMING:
3571 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3572 }
3573 }
3574
3575 req->dcid = cpu_to_le16(chan->dcid);
3576 req->flags = cpu_to_le16(0);
3577
3578 return ptr - data;
3579}
3580
3581static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data,
3582 u16 result, u16 flags)
3583{
3584 struct l2cap_conf_rsp *rsp = data;
3585 void *ptr = rsp->data;
3586
3587 BT_DBG("chan %p", chan);
3588
3589 rsp->scid = cpu_to_le16(chan->dcid);
3590 rsp->result = cpu_to_le16(result);
3591 rsp->flags = cpu_to_le16(flags);
3592
3593 return ptr - data;
3594}
3595
3596void __l2cap_le_connect_rsp_defer(struct l2cap_chan *chan)
3597{
3598 struct l2cap_le_conn_rsp rsp;
3599 struct l2cap_conn *conn = chan->conn;
3600
3601 BT_DBG("chan %p", chan);
3602
3603 rsp.dcid = cpu_to_le16(chan->scid);
3604 rsp.mtu = cpu_to_le16(chan->imtu);
3605 rsp.mps = cpu_to_le16(chan->mps);
3606 rsp.credits = cpu_to_le16(chan->rx_credits);
3607 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
3608
3609 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
3610 &rsp);
3611}
3612
3613void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
3614{
3615 struct l2cap_conn_rsp rsp;
3616 struct l2cap_conn *conn = chan->conn;
3617 u8 buf[128];
3618 u8 rsp_code;
3619
3620 rsp.scid = cpu_to_le16(chan->dcid);
3621 rsp.dcid = cpu_to_le16(chan->scid);
3622 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
3623 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
3624
3625 if (chan->hs_hcon)
3626 rsp_code = L2CAP_CREATE_CHAN_RSP;
3627 else
3628 rsp_code = L2CAP_CONN_RSP;
3629
3630 BT_DBG("chan %p rsp_code %u", chan, rsp_code);
3631
3632 l2cap_send_cmd(conn, chan->ident, rsp_code, sizeof(rsp), &rsp);
3633
3634 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3635 return;
3636
3637 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3638 l2cap_build_conf_req(chan, buf), buf);
3639 chan->num_conf_req++;
3640}
3641
3642static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
3643{
3644 int type, olen;
3645 unsigned long val;
3646 /* Use sane default values in case a misbehaving remote device
3647 * did not send an RFC or extended window size option.
3648 */
3649 u16 txwin_ext = chan->ack_win;
3650 struct l2cap_conf_rfc rfc = {
3651 .mode = chan->mode,
3652 .retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
3653 .monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
3654 .max_pdu_size = cpu_to_le16(chan->imtu),
3655 .txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
3656 };
3657
3658 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
3659
3660 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
3661 return;
3662
3663 while (len >= L2CAP_CONF_OPT_SIZE) {
3664 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3665
3666 switch (type) {
3667 case L2CAP_CONF_RFC:
3668 if (olen == sizeof(rfc))
3669 memcpy(&rfc, (void *)val, olen);
3670 break;
3671 case L2CAP_CONF_EWS:
3672 txwin_ext = val;
3673 break;
3674 }
3675 }
3676
3677 switch (rfc.mode) {
3678 case L2CAP_MODE_ERTM:
3679 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3680 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3681 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3682 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3683 chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
3684 else
3685 chan->ack_win = min_t(u16, chan->ack_win,
3686 rfc.txwin_size);
3687 break;
3688 case L2CAP_MODE_STREAMING:
3689 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3690 }
3691}
3692
3693static inline int l2cap_command_rej(struct l2cap_conn *conn,
3694 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3695 u8 *data)
3696{
3697 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
3698
3699 if (cmd_len < sizeof(*rej))
3700 return -EPROTO;
3701
3702 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
3703 return 0;
3704
3705 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
3706 cmd->ident == conn->info_ident) {
3707 cancel_delayed_work(&conn->info_timer);
3708
3709 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3710 conn->info_ident = 0;
3711
3712 l2cap_conn_start(conn);
3713 }
3714
3715 return 0;
3716}
3717
3718static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
3719 struct l2cap_cmd_hdr *cmd,
3720 u8 *data, u8 rsp_code, u8 amp_id)
3721{
3722 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
3723 struct l2cap_conn_rsp rsp;
3724 struct l2cap_chan *chan = NULL, *pchan;
3725 int result, status = L2CAP_CS_NO_INFO;
3726
3727 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
3728 __le16 psm = req->psm;
3729
3730 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
3731
3732 /* Check if we have socket listening on psm */
3733 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
3734 &conn->hcon->dst, ACL_LINK);
3735 if (!pchan) {
3736 result = L2CAP_CR_BAD_PSM;
3737 goto sendresp;
3738 }
3739
3740 mutex_lock(&conn->chan_lock);
3741 l2cap_chan_lock(pchan);
3742
3743 /* Check if the ACL is secure enough (if not SDP) */
3744 if (psm != cpu_to_le16(L2CAP_PSM_SDP) &&
3745 !hci_conn_check_link_mode(conn->hcon)) {
3746 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
3747 result = L2CAP_CR_SEC_BLOCK;
3748 goto response;
3749 }
3750
3751 result = L2CAP_CR_NO_MEM;
3752
3753 /* Check if we already have channel with that dcid */
3754 if (__l2cap_get_chan_by_dcid(conn, scid))
3755 goto response;
3756
3757 chan = pchan->ops->new_connection(pchan);
3758 if (!chan)
3759 goto response;
3760
3761 /* For certain devices (ex: HID mouse), support for authentication,
3762 * pairing and bonding is optional. For such devices, inorder to avoid
3763 * the ACL alive for too long after L2CAP disconnection, reset the ACL
3764 * disc_timeout back to HCI_DISCONN_TIMEOUT during L2CAP connect.
3765 */
3766 conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
3767
3768 bacpy(&chan->src, &conn->hcon->src);
3769 bacpy(&chan->dst, &conn->hcon->dst);
3770 chan->src_type = bdaddr_type(conn->hcon, conn->hcon->src_type);
3771 chan->dst_type = bdaddr_type(conn->hcon, conn->hcon->dst_type);
3772 chan->psm = psm;
3773 chan->dcid = scid;
3774 chan->local_amp_id = amp_id;
3775
3776 __l2cap_chan_add(conn, chan);
3777
3778 dcid = chan->scid;
3779
3780 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
3781
3782 chan->ident = cmd->ident;
3783
3784 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
3785 if (l2cap_chan_check_security(chan, false)) {
3786 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
3787 l2cap_state_change(chan, BT_CONNECT2);
3788 result = L2CAP_CR_PEND;
3789 status = L2CAP_CS_AUTHOR_PEND;
3790 chan->ops->defer(chan);
3791 } else {
3792 /* Force pending result for AMP controllers.
3793 * The connection will succeed after the
3794 * physical link is up.
3795 */
3796 if (amp_id == AMP_ID_BREDR) {
3797 l2cap_state_change(chan, BT_CONFIG);
3798 result = L2CAP_CR_SUCCESS;
3799 } else {
3800 l2cap_state_change(chan, BT_CONNECT2);
3801 result = L2CAP_CR_PEND;
3802 }
3803 status = L2CAP_CS_NO_INFO;
3804 }
3805 } else {
3806 l2cap_state_change(chan, BT_CONNECT2);
3807 result = L2CAP_CR_PEND;
3808 status = L2CAP_CS_AUTHEN_PEND;
3809 }
3810 } else {
3811 l2cap_state_change(chan, BT_CONNECT2);
3812 result = L2CAP_CR_PEND;
3813 status = L2CAP_CS_NO_INFO;
3814 }
3815
3816response:
3817 l2cap_chan_unlock(pchan);
3818 mutex_unlock(&conn->chan_lock);
3819 l2cap_chan_put(pchan);
3820
3821sendresp:
3822 rsp.scid = cpu_to_le16(scid);
3823 rsp.dcid = cpu_to_le16(dcid);
3824 rsp.result = cpu_to_le16(result);
3825 rsp.status = cpu_to_le16(status);
3826 l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
3827
3828 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
3829 struct l2cap_info_req info;
3830 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3831
3832 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
3833 conn->info_ident = l2cap_get_ident(conn);
3834
3835 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
3836
3837 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
3838 sizeof(info), &info);
3839 }
3840
3841 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
3842 result == L2CAP_CR_SUCCESS) {
3843 u8 buf[128];
3844 set_bit(CONF_REQ_SENT, &chan->conf_state);
3845 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3846 l2cap_build_conf_req(chan, buf), buf);
3847 chan->num_conf_req++;
3848 }
3849
3850 return chan;
3851}
3852
3853static int l2cap_connect_req(struct l2cap_conn *conn,
3854 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3855{
3856 struct hci_dev *hdev = conn->hcon->hdev;
3857 struct hci_conn *hcon = conn->hcon;
3858
3859 if (cmd_len < sizeof(struct l2cap_conn_req))
3860 return -EPROTO;
3861
3862 hci_dev_lock(hdev);
3863 if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
3864 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &hcon->flags))
3865 mgmt_device_connected(hdev, &hcon->dst, hcon->type,
3866 hcon->dst_type, 0, NULL, 0,
3867 hcon->dev_class);
3868 hci_dev_unlock(hdev);
3869
3870 l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);
3871 return 0;
3872}
3873
3874static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
3875 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3876 u8 *data)
3877{
3878 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
3879 u16 scid, dcid, result, status;
3880 struct l2cap_chan *chan;
3881 u8 req[128];
3882 int err;
3883
3884 if (cmd_len < sizeof(*rsp))
3885 return -EPROTO;
3886
3887 scid = __le16_to_cpu(rsp->scid);
3888 dcid = __le16_to_cpu(rsp->dcid);
3889 result = __le16_to_cpu(rsp->result);
3890 status = __le16_to_cpu(rsp->status);
3891
3892 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
3893 dcid, scid, result, status);
3894
3895 mutex_lock(&conn->chan_lock);
3896
3897 if (scid) {
3898 chan = __l2cap_get_chan_by_scid(conn, scid);
3899 if (!chan) {
3900 err = -EBADSLT;
3901 goto unlock;
3902 }
3903 } else {
3904 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
3905 if (!chan) {
3906 err = -EBADSLT;
3907 goto unlock;
3908 }
3909 }
3910
3911 err = 0;
3912
3913 l2cap_chan_lock(chan);
3914
3915 switch (result) {
3916 case L2CAP_CR_SUCCESS:
3917 l2cap_state_change(chan, BT_CONFIG);
3918 chan->ident = 0;
3919 chan->dcid = dcid;
3920 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
3921
3922 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3923 break;
3924
3925 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3926 l2cap_build_conf_req(chan, req), req);
3927 chan->num_conf_req++;
3928 break;
3929
3930 case L2CAP_CR_PEND:
3931 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
3932 break;
3933
3934 default:
3935 l2cap_chan_del(chan, ECONNREFUSED);
3936 break;
3937 }
3938
3939 l2cap_chan_unlock(chan);
3940
3941unlock:
3942 mutex_unlock(&conn->chan_lock);
3943
3944 return err;
3945}
3946
3947static inline void set_default_fcs(struct l2cap_chan *chan)
3948{
3949 /* FCS is enabled only in ERTM or streaming mode, if one or both
3950 * sides request it.
3951 */
3952 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
3953 chan->fcs = L2CAP_FCS_NONE;
3954 else if (!test_bit(CONF_RECV_NO_FCS, &chan->conf_state))
3955 chan->fcs = L2CAP_FCS_CRC16;
3956}
3957
3958static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data,
3959 u8 ident, u16 flags)
3960{
3961 struct l2cap_conn *conn = chan->conn;
3962
3963 BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident,
3964 flags);
3965
3966 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3967 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3968
3969 l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP,
3970 l2cap_build_conf_rsp(chan, data,
3971 L2CAP_CONF_SUCCESS, flags), data);
3972}
3973
3974static void cmd_reject_invalid_cid(struct l2cap_conn *conn, u8 ident,
3975 u16 scid, u16 dcid)
3976{
3977 struct l2cap_cmd_rej_cid rej;
3978
3979 rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
3980 rej.scid = __cpu_to_le16(scid);
3981 rej.dcid = __cpu_to_le16(dcid);
3982
3983 l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3984}
3985
3986static inline int l2cap_config_req(struct l2cap_conn *conn,
3987 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3988 u8 *data)
3989{
3990 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
3991 u16 dcid, flags;
3992 u8 rsp[64];
3993 struct l2cap_chan *chan;
3994 int len, err = 0;
3995
3996 if (cmd_len < sizeof(*req))
3997 return -EPROTO;
3998
3999 dcid = __le16_to_cpu(req->dcid);
4000 flags = __le16_to_cpu(req->flags);
4001
4002 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
4003
4004 chan = l2cap_get_chan_by_scid(conn, dcid);
4005 if (!chan) {
4006 cmd_reject_invalid_cid(conn, cmd->ident, dcid, 0);
4007 return 0;
4008 }
4009
4010 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
4011 cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4012 chan->dcid);
4013 goto unlock;
4014 }
4015
4016 /* Reject if config buffer is too small. */
4017 len = cmd_len - sizeof(*req);
4018 if (chan->conf_len + len > sizeof(chan->conf_req)) {
4019 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4020 l2cap_build_conf_rsp(chan, rsp,
4021 L2CAP_CONF_REJECT, flags), rsp);
4022 goto unlock;
4023 }
4024
4025 /* Store config. */
4026 memcpy(chan->conf_req + chan->conf_len, req->data, len);
4027 chan->conf_len += len;
4028
4029 if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
4030 /* Incomplete config. Send empty response. */
4031 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4032 l2cap_build_conf_rsp(chan, rsp,
4033 L2CAP_CONF_SUCCESS, flags), rsp);
4034 goto unlock;
4035 }
4036
4037 /* Complete config. */
4038 len = l2cap_parse_conf_req(chan, rsp);
4039 if (len < 0) {
4040 l2cap_send_disconn_req(chan, ECONNRESET);
4041 goto unlock;
4042 }
4043
4044 chan->ident = cmd->ident;
4045 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
4046 chan->num_conf_rsp++;
4047
4048 /* Reset config buffer. */
4049 chan->conf_len = 0;
4050
4051 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
4052 goto unlock;
4053
4054 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4055 set_default_fcs(chan);
4056
4057 if (chan->mode == L2CAP_MODE_ERTM ||
4058 chan->mode == L2CAP_MODE_STREAMING)
4059 err = l2cap_ertm_init(chan);
4060
4061 if (err < 0)
4062 l2cap_send_disconn_req(chan, -err);
4063 else
4064 l2cap_chan_ready(chan);
4065
4066 goto unlock;
4067 }
4068
4069 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
4070 u8 buf[64];
4071 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4072 l2cap_build_conf_req(chan, buf), buf);
4073 chan->num_conf_req++;
4074 }
4075
4076 /* Got Conf Rsp PENDING from remote side and asume we sent
4077 Conf Rsp PENDING in the code above */
4078 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
4079 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4080
4081 /* check compatibility */
4082
4083 /* Send rsp for BR/EDR channel */
4084 if (!chan->hs_hcon)
4085 l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
4086 else
4087 chan->ident = cmd->ident;
4088 }
4089
4090unlock:
4091 l2cap_chan_unlock(chan);
4092 return err;
4093}
4094
4095static inline int l2cap_config_rsp(struct l2cap_conn *conn,
4096 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4097 u8 *data)
4098{
4099 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
4100 u16 scid, flags, result;
4101 struct l2cap_chan *chan;
4102 int len = cmd_len - sizeof(*rsp);
4103 int err = 0;
4104
4105 if (cmd_len < sizeof(*rsp))
4106 return -EPROTO;
4107
4108 scid = __le16_to_cpu(rsp->scid);
4109 flags = __le16_to_cpu(rsp->flags);
4110 result = __le16_to_cpu(rsp->result);
4111
4112 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
4113 result, len);
4114
4115 chan = l2cap_get_chan_by_scid(conn, scid);
4116 if (!chan)
4117 return 0;
4118
4119 switch (result) {
4120 case L2CAP_CONF_SUCCESS:
4121 l2cap_conf_rfc_get(chan, rsp->data, len);
4122 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4123 break;
4124
4125 case L2CAP_CONF_PENDING:
4126 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4127
4128 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4129 char buf[64];
4130
4131 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4132 buf, &result);
4133 if (len < 0) {
4134 l2cap_send_disconn_req(chan, ECONNRESET);
4135 goto done;
4136 }
4137
4138 if (!chan->hs_hcon) {
4139 l2cap_send_efs_conf_rsp(chan, buf, cmd->ident,
4140 0);
4141 } else {
4142 if (l2cap_check_efs(chan)) {
4143 amp_create_logical_link(chan);
4144 chan->ident = cmd->ident;
4145 }
4146 }
4147 }
4148 goto done;
4149
4150 case L2CAP_CONF_UNACCEPT:
4151 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
4152 char req[64];
4153
4154 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
4155 l2cap_send_disconn_req(chan, ECONNRESET);
4156 goto done;
4157 }
4158
4159 /* throw out any old stored conf requests */
4160 result = L2CAP_CONF_SUCCESS;
4161 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4162 req, &result);
4163 if (len < 0) {
4164 l2cap_send_disconn_req(chan, ECONNRESET);
4165 goto done;
4166 }
4167
4168 l2cap_send_cmd(conn, l2cap_get_ident(conn),
4169 L2CAP_CONF_REQ, len, req);
4170 chan->num_conf_req++;
4171 if (result != L2CAP_CONF_SUCCESS)
4172 goto done;
4173 break;
4174 }
4175
4176 default:
4177 l2cap_chan_set_err(chan, ECONNRESET);
4178
4179 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
4180 l2cap_send_disconn_req(chan, ECONNRESET);
4181 goto done;
4182 }
4183
4184 if (flags & L2CAP_CONF_FLAG_CONTINUATION)
4185 goto done;
4186
4187 set_bit(CONF_INPUT_DONE, &chan->conf_state);
4188
4189 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
4190 set_default_fcs(chan);
4191
4192 if (chan->mode == L2CAP_MODE_ERTM ||
4193 chan->mode == L2CAP_MODE_STREAMING)
4194 err = l2cap_ertm_init(chan);
4195
4196 if (err < 0)
4197 l2cap_send_disconn_req(chan, -err);
4198 else
4199 l2cap_chan_ready(chan);
4200 }
4201
4202done:
4203 l2cap_chan_unlock(chan);
4204 return err;
4205}
4206
4207static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
4208 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4209 u8 *data)
4210{
4211 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
4212 struct l2cap_disconn_rsp rsp;
4213 u16 dcid, scid;
4214 struct l2cap_chan *chan;
4215
4216 if (cmd_len != sizeof(*req))
4217 return -EPROTO;
4218
4219 scid = __le16_to_cpu(req->scid);
4220 dcid = __le16_to_cpu(req->dcid);
4221
4222 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
4223
4224 mutex_lock(&conn->chan_lock);
4225
4226 chan = __l2cap_get_chan_by_scid(conn, dcid);
4227 if (!chan) {
4228 mutex_unlock(&conn->chan_lock);
4229 cmd_reject_invalid_cid(conn, cmd->ident, dcid, scid);
4230 return 0;
4231 }
4232
4233 l2cap_chan_lock(chan);
4234
4235 rsp.dcid = cpu_to_le16(chan->scid);
4236 rsp.scid = cpu_to_le16(chan->dcid);
4237 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
4238
4239 chan->ops->set_shutdown(chan);
4240
4241 l2cap_chan_hold(chan);
4242 l2cap_chan_del(chan, ECONNRESET);
4243
4244 l2cap_chan_unlock(chan);
4245
4246 chan->ops->close(chan);
4247 l2cap_chan_put(chan);
4248
4249 mutex_unlock(&conn->chan_lock);
4250
4251 return 0;
4252}
4253
4254static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
4255 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4256 u8 *data)
4257{
4258 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
4259 u16 dcid, scid;
4260 struct l2cap_chan *chan;
4261
4262 if (cmd_len != sizeof(*rsp))
4263 return -EPROTO;
4264
4265 scid = __le16_to_cpu(rsp->scid);
4266 dcid = __le16_to_cpu(rsp->dcid);
4267
4268 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
4269
4270 mutex_lock(&conn->chan_lock);
4271
4272 chan = __l2cap_get_chan_by_scid(conn, scid);
4273 if (!chan) {
4274 mutex_unlock(&conn->chan_lock);
4275 return 0;
4276 }
4277
4278 l2cap_chan_lock(chan);
4279
4280 l2cap_chan_hold(chan);
4281 l2cap_chan_del(chan, 0);
4282
4283 l2cap_chan_unlock(chan);
4284
4285 chan->ops->close(chan);
4286 l2cap_chan_put(chan);
4287
4288 mutex_unlock(&conn->chan_lock);
4289
4290 return 0;
4291}
4292
4293static inline int l2cap_information_req(struct l2cap_conn *conn,
4294 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4295 u8 *data)
4296{
4297 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
4298 u16 type;
4299
4300 if (cmd_len != sizeof(*req))
4301 return -EPROTO;
4302
4303 type = __le16_to_cpu(req->type);
4304
4305 BT_DBG("type 0x%4.4x", type);
4306
4307 if (type == L2CAP_IT_FEAT_MASK) {
4308 u8 buf[8];
4309 u32 feat_mask = l2cap_feat_mask;
4310 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4311 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
4312 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4313 if (!disable_ertm)
4314 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
4315 | L2CAP_FEAT_FCS;
4316 if (conn->hs_enabled)
4317 feat_mask |= L2CAP_FEAT_EXT_FLOW
4318 | L2CAP_FEAT_EXT_WINDOW;
4319
4320 put_unaligned_le32(feat_mask, rsp->data);
4321 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4322 buf);
4323 } else if (type == L2CAP_IT_FIXED_CHAN) {
4324 u8 buf[12];
4325 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4326
4327 if (conn->hs_enabled)
4328 l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
4329 else
4330 l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
4331
4332 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4333 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4334 memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
4335 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4336 buf);
4337 } else {
4338 struct l2cap_info_rsp rsp;
4339 rsp.type = cpu_to_le16(type);
4340 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
4341 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp),
4342 &rsp);
4343 }
4344
4345 return 0;
4346}
4347
4348static inline int l2cap_information_rsp(struct l2cap_conn *conn,
4349 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4350 u8 *data)
4351{
4352 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
4353 u16 type, result;
4354
4355 if (cmd_len < sizeof(*rsp))
4356 return -EPROTO;
4357
4358 type = __le16_to_cpu(rsp->type);
4359 result = __le16_to_cpu(rsp->result);
4360
4361 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
4362
4363 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
4364 if (cmd->ident != conn->info_ident ||
4365 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
4366 return 0;
4367
4368 cancel_delayed_work(&conn->info_timer);
4369
4370 if (result != L2CAP_IR_SUCCESS) {
4371 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4372 conn->info_ident = 0;
4373
4374 l2cap_conn_start(conn);
4375
4376 return 0;
4377 }
4378
4379 switch (type) {
4380 case L2CAP_IT_FEAT_MASK:
4381 conn->feat_mask = get_unaligned_le32(rsp->data);
4382
4383 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
4384 struct l2cap_info_req req;
4385 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4386
4387 conn->info_ident = l2cap_get_ident(conn);
4388
4389 l2cap_send_cmd(conn, conn->info_ident,
4390 L2CAP_INFO_REQ, sizeof(req), &req);
4391 } else {
4392 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4393 conn->info_ident = 0;
4394
4395 l2cap_conn_start(conn);
4396 }
4397 break;
4398
4399 case L2CAP_IT_FIXED_CHAN:
4400 conn->fixed_chan_mask = rsp->data[0];
4401 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4402 conn->info_ident = 0;
4403
4404 l2cap_conn_start(conn);
4405 break;
4406 }
4407
4408 return 0;
4409}
4410
4411static int l2cap_create_channel_req(struct l2cap_conn *conn,
4412 struct l2cap_cmd_hdr *cmd,
4413 u16 cmd_len, void *data)
4414{
4415 struct l2cap_create_chan_req *req = data;
4416 struct l2cap_create_chan_rsp rsp;
4417 struct l2cap_chan *chan;
4418 struct hci_dev *hdev;
4419 u16 psm, scid;
4420
4421 if (cmd_len != sizeof(*req))
4422 return -EPROTO;
4423
4424 if (!conn->hs_enabled)
4425 return -EINVAL;
4426
4427 psm = le16_to_cpu(req->psm);
4428 scid = le16_to_cpu(req->scid);
4429
4430 BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id);
4431
4432 /* For controller id 0 make BR/EDR connection */
4433 if (req->amp_id == AMP_ID_BREDR) {
4434 l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4435 req->amp_id);
4436 return 0;
4437 }
4438
4439 /* Validate AMP controller id */
4440 hdev = hci_dev_get(req->amp_id);
4441 if (!hdev)
4442 goto error;
4443
4444 if (hdev->dev_type != HCI_AMP || !test_bit(HCI_UP, &hdev->flags)) {
4445 hci_dev_put(hdev);
4446 goto error;
4447 }
4448
4449 chan = l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4450 req->amp_id);
4451 if (chan) {
4452 struct amp_mgr *mgr = conn->hcon->amp_mgr;
4453 struct hci_conn *hs_hcon;
4454
4455 hs_hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK,
4456 &conn->hcon->dst);
4457 if (!hs_hcon) {
4458 hci_dev_put(hdev);
4459 cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4460 chan->dcid);
4461 return 0;
4462 }
4463
4464 BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr, chan, hs_hcon);
4465
4466 mgr->bredr_chan = chan;
4467 chan->hs_hcon = hs_hcon;
4468 chan->fcs = L2CAP_FCS_NONE;
4469 conn->mtu = hdev->block_mtu;
4470 }
4471
4472 hci_dev_put(hdev);
4473
4474 return 0;
4475
4476error:
4477 rsp.dcid = 0;
4478 rsp.scid = cpu_to_le16(scid);
4479 rsp.result = cpu_to_le16(L2CAP_CR_BAD_AMP);
4480 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4481
4482 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
4483 sizeof(rsp), &rsp);
4484
4485 return 0;
4486}
4487
4488static void l2cap_send_move_chan_req(struct l2cap_chan *chan, u8 dest_amp_id)
4489{
4490 struct l2cap_move_chan_req req;
4491 u8 ident;
4492
4493 BT_DBG("chan %p, dest_amp_id %d", chan, dest_amp_id);
4494
4495 ident = l2cap_get_ident(chan->conn);
4496 chan->ident = ident;
4497
4498 req.icid = cpu_to_le16(chan->scid);
4499 req.dest_amp_id = dest_amp_id;
4500
4501 l2cap_send_cmd(chan->conn, ident, L2CAP_MOVE_CHAN_REQ, sizeof(req),
4502 &req);
4503
4504 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4505}
4506
4507static void l2cap_send_move_chan_rsp(struct l2cap_chan *chan, u16 result)
4508{
4509 struct l2cap_move_chan_rsp rsp;
4510
4511 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4512
4513 rsp.icid = cpu_to_le16(chan->dcid);
4514 rsp.result = cpu_to_le16(result);
4515
4516 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_RSP,
4517 sizeof(rsp), &rsp);
4518}
4519
4520static void l2cap_send_move_chan_cfm(struct l2cap_chan *chan, u16 result)
4521{
4522 struct l2cap_move_chan_cfm cfm;
4523
4524 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4525
4526 chan->ident = l2cap_get_ident(chan->conn);
4527
4528 cfm.icid = cpu_to_le16(chan->scid);
4529 cfm.result = cpu_to_le16(result);
4530
4531 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_CFM,
4532 sizeof(cfm), &cfm);
4533
4534 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4535}
4536
4537static void l2cap_send_move_chan_cfm_icid(struct l2cap_conn *conn, u16 icid)
4538{
4539 struct l2cap_move_chan_cfm cfm;
4540
4541 BT_DBG("conn %p, icid 0x%4.4x", conn, icid);
4542
4543 cfm.icid = cpu_to_le16(icid);
4544 cfm.result = cpu_to_le16(L2CAP_MC_UNCONFIRMED);
4545
4546 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_MOVE_CHAN_CFM,
4547 sizeof(cfm), &cfm);
4548}
4549
4550static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
4551 u16 icid)
4552{
4553 struct l2cap_move_chan_cfm_rsp rsp;
4554
4555 BT_DBG("icid 0x%4.4x", icid);
4556
4557 rsp.icid = cpu_to_le16(icid);
4558 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4559}
4560
4561static void __release_logical_link(struct l2cap_chan *chan)
4562{
4563 chan->hs_hchan = NULL;
4564 chan->hs_hcon = NULL;
4565
4566 /* Placeholder - release the logical link */
4567}
4568
4569static void l2cap_logical_fail(struct l2cap_chan *chan)
4570{
4571 /* Logical link setup failed */
4572 if (chan->state != BT_CONNECTED) {
4573 /* Create channel failure, disconnect */
4574 l2cap_send_disconn_req(chan, ECONNRESET);
4575 return;
4576 }
4577
4578 switch (chan->move_role) {
4579 case L2CAP_MOVE_ROLE_RESPONDER:
4580 l2cap_move_done(chan);
4581 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_SUPP);
4582 break;
4583 case L2CAP_MOVE_ROLE_INITIATOR:
4584 if (chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_COMP ||
4585 chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_CFM) {
4586 /* Remote has only sent pending or
4587 * success responses, clean up
4588 */
4589 l2cap_move_done(chan);
4590 }
4591
4592 /* Other amp move states imply that the move
4593 * has already aborted
4594 */
4595 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4596 break;
4597 }
4598}
4599
4600static void l2cap_logical_finish_create(struct l2cap_chan *chan,
4601 struct hci_chan *hchan)
4602{
4603 struct l2cap_conf_rsp rsp;
4604
4605 chan->hs_hchan = hchan;
4606 chan->hs_hcon->l2cap_data = chan->conn;
4607
4608 l2cap_send_efs_conf_rsp(chan, &rsp, chan->ident, 0);
4609
4610 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4611 int err;
4612
4613 set_default_fcs(chan);
4614
4615 err = l2cap_ertm_init(chan);
4616 if (err < 0)
4617 l2cap_send_disconn_req(chan, -err);
4618 else
4619 l2cap_chan_ready(chan);
4620 }
4621}
4622
4623static void l2cap_logical_finish_move(struct l2cap_chan *chan,
4624 struct hci_chan *hchan)
4625{
4626 chan->hs_hcon = hchan->conn;
4627 chan->hs_hcon->l2cap_data = chan->conn;
4628
4629 BT_DBG("move_state %d", chan->move_state);
4630
4631 switch (chan->move_state) {
4632 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
4633 /* Move confirm will be sent after a success
4634 * response is received
4635 */
4636 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4637 break;
4638 case L2CAP_MOVE_WAIT_LOGICAL_CFM:
4639 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4640 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4641 } else if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
4642 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
4643 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4644 } else if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4645 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4646 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4647 }
4648 break;
4649 default:
4650 /* Move was not in expected state, free the channel */
4651 __release_logical_link(chan);
4652
4653 chan->move_state = L2CAP_MOVE_STABLE;
4654 }
4655}
4656
4657/* Call with chan locked */
4658void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan,
4659 u8 status)
4660{
4661 BT_DBG("chan %p, hchan %p, status %d", chan, hchan, status);
4662
4663 if (status) {
4664 l2cap_logical_fail(chan);
4665 __release_logical_link(chan);
4666 return;
4667 }
4668
4669 if (chan->state != BT_CONNECTED) {
4670 /* Ignore logical link if channel is on BR/EDR */
4671 if (chan->local_amp_id != AMP_ID_BREDR)
4672 l2cap_logical_finish_create(chan, hchan);
4673 } else {
4674 l2cap_logical_finish_move(chan, hchan);
4675 }
4676}
4677
4678void l2cap_move_start(struct l2cap_chan *chan)
4679{
4680 BT_DBG("chan %p", chan);
4681
4682 if (chan->local_amp_id == AMP_ID_BREDR) {
4683 if (chan->chan_policy != BT_CHANNEL_POLICY_AMP_PREFERRED)
4684 return;
4685 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4686 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4687 /* Placeholder - start physical link setup */
4688 } else {
4689 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4690 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4691 chan->move_id = 0;
4692 l2cap_move_setup(chan);
4693 l2cap_send_move_chan_req(chan, 0);
4694 }
4695}
4696
4697static void l2cap_do_create(struct l2cap_chan *chan, int result,
4698 u8 local_amp_id, u8 remote_amp_id)
4699{
4700 BT_DBG("chan %p state %s %u -> %u", chan, state_to_string(chan->state),
4701 local_amp_id, remote_amp_id);
4702
4703 chan->fcs = L2CAP_FCS_NONE;
4704
4705 /* Outgoing channel on AMP */
4706 if (chan->state == BT_CONNECT) {
4707 if (result == L2CAP_CR_SUCCESS) {
4708 chan->local_amp_id = local_amp_id;
4709 l2cap_send_create_chan_req(chan, remote_amp_id);
4710 } else {
4711 /* Revert to BR/EDR connect */
4712 l2cap_send_conn_req(chan);
4713 }
4714
4715 return;
4716 }
4717
4718 /* Incoming channel on AMP */
4719 if (__l2cap_no_conn_pending(chan)) {
4720 struct l2cap_conn_rsp rsp;
4721 char buf[128];
4722 rsp.scid = cpu_to_le16(chan->dcid);
4723 rsp.dcid = cpu_to_le16(chan->scid);
4724
4725 if (result == L2CAP_CR_SUCCESS) {
4726 /* Send successful response */
4727 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
4728 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4729 } else {
4730 /* Send negative response */
4731 rsp.result = cpu_to_le16(L2CAP_CR_NO_MEM);
4732 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4733 }
4734
4735 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_RSP,
4736 sizeof(rsp), &rsp);
4737
4738 if (result == L2CAP_CR_SUCCESS) {
4739 l2cap_state_change(chan, BT_CONFIG);
4740 set_bit(CONF_REQ_SENT, &chan->conf_state);
4741 l2cap_send_cmd(chan->conn, l2cap_get_ident(chan->conn),
4742 L2CAP_CONF_REQ,
4743 l2cap_build_conf_req(chan, buf), buf);
4744 chan->num_conf_req++;
4745 }
4746 }
4747}
4748
4749static void l2cap_do_move_initiate(struct l2cap_chan *chan, u8 local_amp_id,
4750 u8 remote_amp_id)
4751{
4752 l2cap_move_setup(chan);
4753 chan->move_id = local_amp_id;
4754 chan->move_state = L2CAP_MOVE_WAIT_RSP;
4755
4756 l2cap_send_move_chan_req(chan, remote_amp_id);
4757}
4758
4759static void l2cap_do_move_respond(struct l2cap_chan *chan, int result)
4760{
4761 struct hci_chan *hchan = NULL;
4762
4763 /* Placeholder - get hci_chan for logical link */
4764
4765 if (hchan) {
4766 if (hchan->state == BT_CONNECTED) {
4767 /* Logical link is ready to go */
4768 chan->hs_hcon = hchan->conn;
4769 chan->hs_hcon->l2cap_data = chan->conn;
4770 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4771 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4772
4773 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
4774 } else {
4775 /* Wait for logical link to be ready */
4776 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4777 }
4778 } else {
4779 /* Logical link not available */
4780 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_ALLOWED);
4781 }
4782}
4783
4784static void l2cap_do_move_cancel(struct l2cap_chan *chan, int result)
4785{
4786 if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4787 u8 rsp_result;
4788 if (result == -EINVAL)
4789 rsp_result = L2CAP_MR_BAD_ID;
4790 else
4791 rsp_result = L2CAP_MR_NOT_ALLOWED;
4792
4793 l2cap_send_move_chan_rsp(chan, rsp_result);
4794 }
4795
4796 chan->move_role = L2CAP_MOVE_ROLE_NONE;
4797 chan->move_state = L2CAP_MOVE_STABLE;
4798
4799 /* Restart data transmission */
4800 l2cap_ertm_send(chan);
4801}
4802
4803/* Invoke with locked chan */
4804void __l2cap_physical_cfm(struct l2cap_chan *chan, int result)
4805{
4806 u8 local_amp_id = chan->local_amp_id;
4807 u8 remote_amp_id = chan->remote_amp_id;
4808
4809 BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d",
4810 chan, result, local_amp_id, remote_amp_id);
4811
4812 if (chan->state == BT_DISCONN || chan->state == BT_CLOSED) {
4813 l2cap_chan_unlock(chan);
4814 return;
4815 }
4816
4817 if (chan->state != BT_CONNECTED) {
4818 l2cap_do_create(chan, result, local_amp_id, remote_amp_id);
4819 } else if (result != L2CAP_MR_SUCCESS) {
4820 l2cap_do_move_cancel(chan, result);
4821 } else {
4822 switch (chan->move_role) {
4823 case L2CAP_MOVE_ROLE_INITIATOR:
4824 l2cap_do_move_initiate(chan, local_amp_id,
4825 remote_amp_id);
4826 break;
4827 case L2CAP_MOVE_ROLE_RESPONDER:
4828 l2cap_do_move_respond(chan, result);
4829 break;
4830 default:
4831 l2cap_do_move_cancel(chan, result);
4832 break;
4833 }
4834 }
4835}
4836
4837static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
4838 struct l2cap_cmd_hdr *cmd,
4839 u16 cmd_len, void *data)
4840{
4841 struct l2cap_move_chan_req *req = data;
4842 struct l2cap_move_chan_rsp rsp;
4843 struct l2cap_chan *chan;
4844 u16 icid = 0;
4845 u16 result = L2CAP_MR_NOT_ALLOWED;
4846
4847 if (cmd_len != sizeof(*req))
4848 return -EPROTO;
4849
4850 icid = le16_to_cpu(req->icid);
4851
4852 BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id);
4853
4854 if (!conn->hs_enabled)
4855 return -EINVAL;
4856
4857 chan = l2cap_get_chan_by_dcid(conn, icid);
4858 if (!chan) {
4859 rsp.icid = cpu_to_le16(icid);
4860 rsp.result = cpu_to_le16(L2CAP_MR_NOT_ALLOWED);
4861 l2cap_send_cmd(conn, cmd->ident, L2CAP_MOVE_CHAN_RSP,
4862 sizeof(rsp), &rsp);
4863 return 0;
4864 }
4865
4866 chan->ident = cmd->ident;
4867
4868 if (chan->scid < L2CAP_CID_DYN_START ||
4869 chan->chan_policy == BT_CHANNEL_POLICY_BREDR_ONLY ||
4870 (chan->mode != L2CAP_MODE_ERTM &&
4871 chan->mode != L2CAP_MODE_STREAMING)) {
4872 result = L2CAP_MR_NOT_ALLOWED;
4873 goto send_move_response;
4874 }
4875
4876 if (chan->local_amp_id == req->dest_amp_id) {
4877 result = L2CAP_MR_SAME_ID;
4878 goto send_move_response;
4879 }
4880
4881 if (req->dest_amp_id != AMP_ID_BREDR) {
4882 struct hci_dev *hdev;
4883 hdev = hci_dev_get(req->dest_amp_id);
4884 if (!hdev || hdev->dev_type != HCI_AMP ||
4885 !test_bit(HCI_UP, &hdev->flags)) {
4886 if (hdev)
4887 hci_dev_put(hdev);
4888
4889 result = L2CAP_MR_BAD_ID;
4890 goto send_move_response;
4891 }
4892 hci_dev_put(hdev);
4893 }
4894
4895 /* Detect a move collision. Only send a collision response
4896 * if this side has "lost", otherwise proceed with the move.
4897 * The winner has the larger bd_addr.
4898 */
4899 if ((__chan_is_moving(chan) ||
4900 chan->move_role != L2CAP_MOVE_ROLE_NONE) &&
4901 bacmp(&conn->hcon->src, &conn->hcon->dst) > 0) {
4902 result = L2CAP_MR_COLLISION;
4903 goto send_move_response;
4904 }
4905
4906 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
4907 l2cap_move_setup(chan);
4908 chan->move_id = req->dest_amp_id;
4909 icid = chan->dcid;
4910
4911 if (req->dest_amp_id == AMP_ID_BREDR) {
4912 /* Moving to BR/EDR */
4913 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4914 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4915 result = L2CAP_MR_PEND;
4916 } else {
4917 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4918 result = L2CAP_MR_SUCCESS;
4919 }
4920 } else {
4921 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4922 /* Placeholder - uncomment when amp functions are available */
4923 /*amp_accept_physical(chan, req->dest_amp_id);*/
4924 result = L2CAP_MR_PEND;
4925 }
4926
4927send_move_response:
4928 l2cap_send_move_chan_rsp(chan, result);
4929
4930 l2cap_chan_unlock(chan);
4931
4932 return 0;
4933}
4934
4935static void l2cap_move_continue(struct l2cap_conn *conn, u16 icid, u16 result)
4936{
4937 struct l2cap_chan *chan;
4938 struct hci_chan *hchan = NULL;
4939
4940 chan = l2cap_get_chan_by_scid(conn, icid);
4941 if (!chan) {
4942 l2cap_send_move_chan_cfm_icid(conn, icid);
4943 return;
4944 }
4945
4946 __clear_chan_timer(chan);
4947 if (result == L2CAP_MR_PEND)
4948 __set_chan_timer(chan, L2CAP_MOVE_ERTX_TIMEOUT);
4949
4950 switch (chan->move_state) {
4951 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
4952 /* Move confirm will be sent when logical link
4953 * is complete.
4954 */
4955 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4956 break;
4957 case L2CAP_MOVE_WAIT_RSP_SUCCESS:
4958 if (result == L2CAP_MR_PEND) {
4959 break;
4960 } else if (test_bit(CONN_LOCAL_BUSY,
4961 &chan->conn_state)) {
4962 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4963 } else {
4964 /* Logical link is up or moving to BR/EDR,
4965 * proceed with move
4966 */
4967 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
4968 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4969 }
4970 break;
4971 case L2CAP_MOVE_WAIT_RSP:
4972 /* Moving to AMP */
4973 if (result == L2CAP_MR_SUCCESS) {
4974 /* Remote is ready, send confirm immediately
4975 * after logical link is ready
4976 */
4977 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4978 } else {
4979 /* Both logical link and move success
4980 * are required to confirm
4981 */
4982 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_COMP;
4983 }
4984
4985 /* Placeholder - get hci_chan for logical link */
4986 if (!hchan) {
4987 /* Logical link not available */
4988 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4989 break;
4990 }
4991
4992 /* If the logical link is not yet connected, do not
4993 * send confirmation.
4994 */
4995 if (hchan->state != BT_CONNECTED)
4996 break;
4997
4998 /* Logical link is already ready to go */
4999
5000 chan->hs_hcon = hchan->conn;
5001 chan->hs_hcon->l2cap_data = chan->conn;
5002
5003 if (result == L2CAP_MR_SUCCESS) {
5004 /* Can confirm now */
5005 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5006 } else {
5007 /* Now only need move success
5008 * to confirm
5009 */
5010 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
5011 }
5012
5013 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
5014 break;
5015 default:
5016 /* Any other amp move state means the move failed. */
5017 chan->move_id = chan->local_amp_id;
5018 l2cap_move_done(chan);
5019 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5020 }
5021
5022 l2cap_chan_unlock(chan);
5023}
5024
5025static void l2cap_move_fail(struct l2cap_conn *conn, u8 ident, u16 icid,
5026 u16 result)
5027{
5028 struct l2cap_chan *chan;
5029
5030 chan = l2cap_get_chan_by_ident(conn, ident);
5031 if (!chan) {
5032 /* Could not locate channel, icid is best guess */
5033 l2cap_send_move_chan_cfm_icid(conn, icid);
5034 return;
5035 }
5036
5037 __clear_chan_timer(chan);
5038
5039 if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
5040 if (result == L2CAP_MR_COLLISION) {
5041 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5042 } else {
5043 /* Cleanup - cancel move */
5044 chan->move_id = chan->local_amp_id;
5045 l2cap_move_done(chan);
5046 }
5047 }
5048
5049 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5050
5051 l2cap_chan_unlock(chan);
5052}
5053
5054static int l2cap_move_channel_rsp(struct l2cap_conn *conn,
5055 struct l2cap_cmd_hdr *cmd,
5056 u16 cmd_len, void *data)
5057{
5058 struct l2cap_move_chan_rsp *rsp = data;
5059 u16 icid, result;
5060
5061 if (cmd_len != sizeof(*rsp))
5062 return -EPROTO;
5063
5064 icid = le16_to_cpu(rsp->icid);
5065 result = le16_to_cpu(rsp->result);
5066
5067 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5068
5069 if (result == L2CAP_MR_SUCCESS || result == L2CAP_MR_PEND)
5070 l2cap_move_continue(conn, icid, result);
5071 else
5072 l2cap_move_fail(conn, cmd->ident, icid, result);
5073
5074 return 0;
5075}
5076
5077static int l2cap_move_channel_confirm(struct l2cap_conn *conn,
5078 struct l2cap_cmd_hdr *cmd,
5079 u16 cmd_len, void *data)
5080{
5081 struct l2cap_move_chan_cfm *cfm = data;
5082 struct l2cap_chan *chan;
5083 u16 icid, result;
5084
5085 if (cmd_len != sizeof(*cfm))
5086 return -EPROTO;
5087
5088 icid = le16_to_cpu(cfm->icid);
5089 result = le16_to_cpu(cfm->result);
5090
5091 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5092
5093 chan = l2cap_get_chan_by_dcid(conn, icid);
5094 if (!chan) {
5095 /* Spec requires a response even if the icid was not found */
5096 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5097 return 0;
5098 }
5099
5100 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM) {
5101 if (result == L2CAP_MC_CONFIRMED) {
5102 chan->local_amp_id = chan->move_id;
5103 if (chan->local_amp_id == AMP_ID_BREDR)
5104 __release_logical_link(chan);
5105 } else {
5106 chan->move_id = chan->local_amp_id;
5107 }
5108
5109 l2cap_move_done(chan);
5110 }
5111
5112 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5113
5114 l2cap_chan_unlock(chan);
5115
5116 return 0;
5117}
5118
5119static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
5120 struct l2cap_cmd_hdr *cmd,
5121 u16 cmd_len, void *data)
5122{
5123 struct l2cap_move_chan_cfm_rsp *rsp = data;
5124 struct l2cap_chan *chan;
5125 u16 icid;
5126
5127 if (cmd_len != sizeof(*rsp))
5128 return -EPROTO;
5129
5130 icid = le16_to_cpu(rsp->icid);
5131
5132 BT_DBG("icid 0x%4.4x", icid);
5133
5134 chan = l2cap_get_chan_by_scid(conn, icid);
5135 if (!chan)
5136 return 0;
5137
5138 __clear_chan_timer(chan);
5139
5140 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM_RSP) {
5141 chan->local_amp_id = chan->move_id;
5142
5143 if (chan->local_amp_id == AMP_ID_BREDR && chan->hs_hchan)
5144 __release_logical_link(chan);
5145
5146 l2cap_move_done(chan);
5147 }
5148
5149 l2cap_chan_unlock(chan);
5150
5151 return 0;
5152}
5153
5154static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
5155 struct l2cap_cmd_hdr *cmd,
5156 u16 cmd_len, u8 *data)
5157{
5158 struct hci_conn *hcon = conn->hcon;
5159 struct l2cap_conn_param_update_req *req;
5160 struct l2cap_conn_param_update_rsp rsp;
5161 u16 min, max, latency, to_multiplier;
5162 int err;
5163
5164 if (hcon->role != HCI_ROLE_MASTER)
5165 return -EINVAL;
5166
5167 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
5168 return -EPROTO;
5169
5170 req = (struct l2cap_conn_param_update_req *) data;
5171 min = __le16_to_cpu(req->min);
5172 max = __le16_to_cpu(req->max);
5173 latency = __le16_to_cpu(req->latency);
5174 to_multiplier = __le16_to_cpu(req->to_multiplier);
5175
5176 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
5177 min, max, latency, to_multiplier);
5178
5179 memset(&rsp, 0, sizeof(rsp));
5180
5181 err = hci_check_conn_params(min, max, latency, to_multiplier);
5182 if (err)
5183 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
5184 else
5185 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
5186
5187 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
5188 sizeof(rsp), &rsp);
5189
5190 if (!err) {
5191 u8 store_hint;
5192
5193 store_hint = hci_le_conn_update(hcon, min, max, latency,
5194 to_multiplier);
5195 mgmt_new_conn_param(hcon->hdev, &hcon->dst, hcon->dst_type,
5196 store_hint, min, max, latency,
5197 to_multiplier);
5198
5199 }
5200
5201 return 0;
5202}
5203
5204static int l2cap_le_connect_rsp(struct l2cap_conn *conn,
5205 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5206 u8 *data)
5207{
5208 struct l2cap_le_conn_rsp *rsp = (struct l2cap_le_conn_rsp *) data;
5209 u16 dcid, mtu, mps, credits, result;
5210 struct l2cap_chan *chan;
5211 int err;
5212
5213 if (cmd_len < sizeof(*rsp))
5214 return -EPROTO;
5215
5216 dcid = __le16_to_cpu(rsp->dcid);
5217 mtu = __le16_to_cpu(rsp->mtu);
5218 mps = __le16_to_cpu(rsp->mps);
5219 credits = __le16_to_cpu(rsp->credits);
5220 result = __le16_to_cpu(rsp->result);
5221
5222 if (result == L2CAP_CR_SUCCESS && (mtu < 23 || mps < 23))
5223 return -EPROTO;
5224
5225 BT_DBG("dcid 0x%4.4x mtu %u mps %u credits %u result 0x%2.2x",
5226 dcid, mtu, mps, credits, result);
5227
5228 mutex_lock(&conn->chan_lock);
5229
5230 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5231 if (!chan) {
5232 err = -EBADSLT;
5233 goto unlock;
5234 }
5235
5236 err = 0;
5237
5238 l2cap_chan_lock(chan);
5239
5240 switch (result) {
5241 case L2CAP_CR_SUCCESS:
5242 chan->ident = 0;
5243 chan->dcid = dcid;
5244 chan->omtu = mtu;
5245 chan->remote_mps = mps;
5246 chan->tx_credits = credits;
5247 l2cap_chan_ready(chan);
5248 break;
5249
5250 default:
5251 l2cap_chan_del(chan, ECONNREFUSED);
5252 break;
5253 }
5254
5255 l2cap_chan_unlock(chan);
5256
5257unlock:
5258 mutex_unlock(&conn->chan_lock);
5259
5260 return err;
5261}
5262
5263static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
5264 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5265 u8 *data)
5266{
5267 int err = 0;
5268
5269 switch (cmd->code) {
5270 case L2CAP_COMMAND_REJ:
5271 l2cap_command_rej(conn, cmd, cmd_len, data);
5272 break;
5273
5274 case L2CAP_CONN_REQ:
5275 err = l2cap_connect_req(conn, cmd, cmd_len, data);
5276 break;
5277
5278 case L2CAP_CONN_RSP:
5279 case L2CAP_CREATE_CHAN_RSP:
5280 l2cap_connect_create_rsp(conn, cmd, cmd_len, data);
5281 break;
5282
5283 case L2CAP_CONF_REQ:
5284 err = l2cap_config_req(conn, cmd, cmd_len, data);
5285 break;
5286
5287 case L2CAP_CONF_RSP:
5288 l2cap_config_rsp(conn, cmd, cmd_len, data);
5289 break;
5290
5291 case L2CAP_DISCONN_REQ:
5292 err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5293 break;
5294
5295 case L2CAP_DISCONN_RSP:
5296 l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5297 break;
5298
5299 case L2CAP_ECHO_REQ:
5300 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
5301 break;
5302
5303 case L2CAP_ECHO_RSP:
5304 break;
5305
5306 case L2CAP_INFO_REQ:
5307 err = l2cap_information_req(conn, cmd, cmd_len, data);
5308 break;
5309
5310 case L2CAP_INFO_RSP:
5311 l2cap_information_rsp(conn, cmd, cmd_len, data);
5312 break;
5313
5314 case L2CAP_CREATE_CHAN_REQ:
5315 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
5316 break;
5317
5318 case L2CAP_MOVE_CHAN_REQ:
5319 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
5320 break;
5321
5322 case L2CAP_MOVE_CHAN_RSP:
5323 l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
5324 break;
5325
5326 case L2CAP_MOVE_CHAN_CFM:
5327 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
5328 break;
5329
5330 case L2CAP_MOVE_CHAN_CFM_RSP:
5331 l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
5332 break;
5333
5334 default:
5335 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
5336 err = -EINVAL;
5337 break;
5338 }
5339
5340 return err;
5341}
5342
5343static int l2cap_le_connect_req(struct l2cap_conn *conn,
5344 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5345 u8 *data)
5346{
5347 struct l2cap_le_conn_req *req = (struct l2cap_le_conn_req *) data;
5348 struct l2cap_le_conn_rsp rsp;
5349 struct l2cap_chan *chan, *pchan;
5350 u16 dcid, scid, credits, mtu, mps;
5351 __le16 psm;
5352 u8 result;
5353
5354 if (cmd_len != sizeof(*req))
5355 return -EPROTO;
5356
5357 scid = __le16_to_cpu(req->scid);
5358 mtu = __le16_to_cpu(req->mtu);
5359 mps = __le16_to_cpu(req->mps);
5360 psm = req->psm;
5361 dcid = 0;
5362 credits = 0;
5363
5364 if (mtu < 23 || mps < 23)
5365 return -EPROTO;
5366
5367 BT_DBG("psm 0x%2.2x scid 0x%4.4x mtu %u mps %u", __le16_to_cpu(psm),
5368 scid, mtu, mps);
5369
5370 /* Check if we have socket listening on psm */
5371 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
5372 &conn->hcon->dst, LE_LINK);
5373 if (!pchan) {
5374 result = L2CAP_CR_BAD_PSM;
5375 chan = NULL;
5376 goto response;
5377 }
5378
5379 mutex_lock(&conn->chan_lock);
5380 l2cap_chan_lock(pchan);
5381
5382 if (!smp_sufficient_security(conn->hcon, pchan->sec_level)) {
5383 result = L2CAP_CR_AUTHENTICATION;
5384 chan = NULL;
5385 goto response_unlock;
5386 }
5387
5388 /* Check if we already have channel with that dcid */
5389 if (__l2cap_get_chan_by_dcid(conn, scid)) {
5390 result = L2CAP_CR_NO_MEM;
5391 chan = NULL;
5392 goto response_unlock;
5393 }
5394
5395 chan = pchan->ops->new_connection(pchan);
5396 if (!chan) {
5397 result = L2CAP_CR_NO_MEM;
5398 goto response_unlock;
5399 }
5400
5401 l2cap_le_flowctl_init(chan);
5402
5403 bacpy(&chan->src, &conn->hcon->src);
5404 bacpy(&chan->dst, &conn->hcon->dst);
5405 chan->src_type = bdaddr_type(conn->hcon, conn->hcon->src_type);
5406 chan->dst_type = bdaddr_type(conn->hcon, conn->hcon->dst_type);
5407 chan->psm = psm;
5408 chan->dcid = scid;
5409 chan->omtu = mtu;
5410 chan->remote_mps = mps;
5411 chan->tx_credits = __le16_to_cpu(req->credits);
5412
5413 __l2cap_chan_add(conn, chan);
5414 dcid = chan->scid;
5415 credits = chan->rx_credits;
5416
5417 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
5418
5419 chan->ident = cmd->ident;
5420
5421 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
5422 l2cap_state_change(chan, BT_CONNECT2);
5423 result = L2CAP_CR_PEND;
5424 chan->ops->defer(chan);
5425 } else {
5426 l2cap_chan_ready(chan);
5427 result = L2CAP_CR_SUCCESS;
5428 }
5429
5430response_unlock:
5431 l2cap_chan_unlock(pchan);
5432 mutex_unlock(&conn->chan_lock);
5433 l2cap_chan_put(pchan);
5434
5435 if (result == L2CAP_CR_PEND)
5436 return 0;
5437
5438response:
5439 if (chan) {
5440 rsp.mtu = cpu_to_le16(chan->imtu);
5441 rsp.mps = cpu_to_le16(chan->mps);
5442 } else {
5443 rsp.mtu = 0;
5444 rsp.mps = 0;
5445 }
5446
5447 rsp.dcid = cpu_to_le16(dcid);
5448 rsp.credits = cpu_to_le16(credits);
5449 rsp.result = cpu_to_le16(result);
5450
5451 l2cap_send_cmd(conn, cmd->ident, L2CAP_LE_CONN_RSP, sizeof(rsp), &rsp);
5452
5453 return 0;
5454}
5455
5456static inline int l2cap_le_credits(struct l2cap_conn *conn,
5457 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5458 u8 *data)
5459{
5460 struct l2cap_le_credits *pkt;
5461 struct l2cap_chan *chan;
5462 u16 cid, credits, max_credits;
5463
5464 if (cmd_len != sizeof(*pkt))
5465 return -EPROTO;
5466
5467 pkt = (struct l2cap_le_credits *) data;
5468 cid = __le16_to_cpu(pkt->cid);
5469 credits = __le16_to_cpu(pkt->credits);
5470
5471 BT_DBG("cid 0x%4.4x credits 0x%4.4x", cid, credits);
5472
5473 chan = l2cap_get_chan_by_dcid(conn, cid);
5474 if (!chan)
5475 return -EBADSLT;
5476
5477 max_credits = LE_FLOWCTL_MAX_CREDITS - chan->tx_credits;
5478 if (credits > max_credits) {
5479 BT_ERR("LE credits overflow");
5480 l2cap_send_disconn_req(chan, ECONNRESET);
5481
5482 /* Return 0 so that we don't trigger an unnecessary
5483 * command reject packet.
5484 */
5485 return 0;
5486 }
5487
5488 chan->tx_credits += credits;
5489
5490 while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
5491 l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
5492 chan->tx_credits--;
5493 }
5494
5495 if (chan->tx_credits)
5496 chan->ops->resume(chan);
5497
5498 l2cap_chan_unlock(chan);
5499
5500 return 0;
5501}
5502
5503static inline int l2cap_le_command_rej(struct l2cap_conn *conn,
5504 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5505 u8 *data)
5506{
5507 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
5508 struct l2cap_chan *chan;
5509
5510 if (cmd_len < sizeof(*rej))
5511 return -EPROTO;
5512
5513 mutex_lock(&conn->chan_lock);
5514
5515 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5516 if (!chan)
5517 goto done;
5518
5519 l2cap_chan_lock(chan);
5520 l2cap_chan_del(chan, ECONNREFUSED);
5521 l2cap_chan_unlock(chan);
5522
5523done:
5524 mutex_unlock(&conn->chan_lock);
5525 return 0;
5526}
5527
5528static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
5529 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5530 u8 *data)
5531{
5532 int err = 0;
5533
5534 switch (cmd->code) {
5535 case L2CAP_COMMAND_REJ:
5536 l2cap_le_command_rej(conn, cmd, cmd_len, data);
5537 break;
5538
5539 case L2CAP_CONN_PARAM_UPDATE_REQ:
5540 err = l2cap_conn_param_update_req(conn, cmd, cmd_len, data);
5541 break;
5542
5543 case L2CAP_CONN_PARAM_UPDATE_RSP:
5544 break;
5545
5546 case L2CAP_LE_CONN_RSP:
5547 l2cap_le_connect_rsp(conn, cmd, cmd_len, data);
5548 break;
5549
5550 case L2CAP_LE_CONN_REQ:
5551 err = l2cap_le_connect_req(conn, cmd, cmd_len, data);
5552 break;
5553
5554 case L2CAP_LE_CREDITS:
5555 err = l2cap_le_credits(conn, cmd, cmd_len, data);
5556 break;
5557
5558 case L2CAP_DISCONN_REQ:
5559 err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5560 break;
5561
5562 case L2CAP_DISCONN_RSP:
5563 l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5564 break;
5565
5566 default:
5567 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
5568 err = -EINVAL;
5569 break;
5570 }
5571
5572 return err;
5573}
5574
5575static inline void l2cap_le_sig_channel(struct l2cap_conn *conn,
5576 struct sk_buff *skb)
5577{
5578 struct hci_conn *hcon = conn->hcon;
5579 struct l2cap_cmd_hdr *cmd;
5580 u16 len;
5581 int err;
5582
5583 if (hcon->type != LE_LINK)
5584 goto drop;
5585
5586 if (skb->len < L2CAP_CMD_HDR_SIZE)
5587 goto drop;
5588
5589 cmd = (void *) skb->data;
5590 skb_pull(skb, L2CAP_CMD_HDR_SIZE);
5591
5592 len = le16_to_cpu(cmd->len);
5593
5594 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len, cmd->ident);
5595
5596 if (len != skb->len || !cmd->ident) {
5597 BT_DBG("corrupted command");
5598 goto drop;
5599 }
5600
5601 err = l2cap_le_sig_cmd(conn, cmd, len, skb->data);
5602 if (err) {
5603 struct l2cap_cmd_rej_unk rej;
5604
5605 BT_ERR("Wrong link type (%d)", err);
5606
5607 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5608 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
5609 sizeof(rej), &rej);
5610 }
5611
5612drop:
5613 kfree_skb(skb);
5614}
5615
5616static inline void l2cap_sig_channel(struct l2cap_conn *conn,
5617 struct sk_buff *skb)
5618{
5619 struct hci_conn *hcon = conn->hcon;
5620 u8 *data = skb->data;
5621 int len = skb->len;
5622 struct l2cap_cmd_hdr cmd;
5623 int err;
5624
5625 l2cap_raw_recv(conn, skb);
5626
5627 if (hcon->type != ACL_LINK)
5628 goto drop;
5629
5630 while (len >= L2CAP_CMD_HDR_SIZE) {
5631 u16 cmd_len;
5632 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
5633 data += L2CAP_CMD_HDR_SIZE;
5634 len -= L2CAP_CMD_HDR_SIZE;
5635
5636 cmd_len = le16_to_cpu(cmd.len);
5637
5638 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len,
5639 cmd.ident);
5640
5641 if (cmd_len > len || !cmd.ident) {
5642 BT_DBG("corrupted command");
5643 break;
5644 }
5645
5646 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
5647 if (err) {
5648 struct l2cap_cmd_rej_unk rej;
5649
5650 BT_ERR("Wrong link type (%d)", err);
5651
5652 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5653 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ,
5654 sizeof(rej), &rej);
5655 }
5656
5657 data += cmd_len;
5658 len -= cmd_len;
5659 }
5660
5661drop:
5662 kfree_skb(skb);
5663}
5664
5665static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
5666{
5667 u16 our_fcs, rcv_fcs;
5668 int hdr_size;
5669
5670 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
5671 hdr_size = L2CAP_EXT_HDR_SIZE;
5672 else
5673 hdr_size = L2CAP_ENH_HDR_SIZE;
5674
5675 if (chan->fcs == L2CAP_FCS_CRC16) {
5676 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
5677 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
5678 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
5679
5680 if (our_fcs != rcv_fcs)
5681 return -EBADMSG;
5682 }
5683 return 0;
5684}
5685
5686static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
5687{
5688 struct l2cap_ctrl control;
5689
5690 BT_DBG("chan %p", chan);
5691
5692 memset(&control, 0, sizeof(control));
5693 control.sframe = 1;
5694 control.final = 1;
5695 control.reqseq = chan->buffer_seq;
5696 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5697
5698 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5699 control.super = L2CAP_SUPER_RNR;
5700 l2cap_send_sframe(chan, &control);
5701 }
5702
5703 if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
5704 chan->unacked_frames > 0)
5705 __set_retrans_timer(chan);
5706
5707 /* Send pending iframes */
5708 l2cap_ertm_send(chan);
5709
5710 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
5711 test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
5712 /* F-bit wasn't sent in an s-frame or i-frame yet, so
5713 * send it now.
5714 */
5715 control.super = L2CAP_SUPER_RR;
5716 l2cap_send_sframe(chan, &control);
5717 }
5718}
5719
5720static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag,
5721 struct sk_buff **last_frag)
5722{
5723 /* skb->len reflects data in skb as well as all fragments
5724 * skb->data_len reflects only data in fragments
5725 */
5726 if (!skb_has_frag_list(skb))
5727 skb_shinfo(skb)->frag_list = new_frag;
5728
5729 new_frag->next = NULL;
5730
5731 (*last_frag)->next = new_frag;
5732 *last_frag = new_frag;
5733
5734 skb->len += new_frag->len;
5735 skb->data_len += new_frag->len;
5736 skb->truesize += new_frag->truesize;
5737}
5738
5739static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
5740 struct l2cap_ctrl *control)
5741{
5742 int err = -EINVAL;
5743
5744 switch (control->sar) {
5745 case L2CAP_SAR_UNSEGMENTED:
5746 if (chan->sdu)
5747 break;
5748
5749 err = chan->ops->recv(chan, skb);
5750 break;
5751
5752 case L2CAP_SAR_START:
5753 if (chan->sdu)
5754 break;
5755
5756 chan->sdu_len = get_unaligned_le16(skb->data);
5757 skb_pull(skb, L2CAP_SDULEN_SIZE);
5758
5759 if (chan->sdu_len > chan->imtu) {
5760 err = -EMSGSIZE;
5761 break;
5762 }
5763
5764 if (skb->len >= chan->sdu_len)
5765 break;
5766
5767 chan->sdu = skb;
5768 chan->sdu_last_frag = skb;
5769
5770 skb = NULL;
5771 err = 0;
5772 break;
5773
5774 case L2CAP_SAR_CONTINUE:
5775 if (!chan->sdu)
5776 break;
5777
5778 append_skb_frag(chan->sdu, skb,
5779 &chan->sdu_last_frag);
5780 skb = NULL;
5781
5782 if (chan->sdu->len >= chan->sdu_len)
5783 break;
5784
5785 err = 0;
5786 break;
5787
5788 case L2CAP_SAR_END:
5789 if (!chan->sdu)
5790 break;
5791
5792 append_skb_frag(chan->sdu, skb,
5793 &chan->sdu_last_frag);
5794 skb = NULL;
5795
5796 if (chan->sdu->len != chan->sdu_len)
5797 break;
5798
5799 err = chan->ops->recv(chan, chan->sdu);
5800
5801 if (!err) {
5802 /* Reassembly complete */
5803 chan->sdu = NULL;
5804 chan->sdu_last_frag = NULL;
5805 chan->sdu_len = 0;
5806 }
5807 break;
5808 }
5809
5810 if (err) {
5811 kfree_skb(skb);
5812 kfree_skb(chan->sdu);
5813 chan->sdu = NULL;
5814 chan->sdu_last_frag = NULL;
5815 chan->sdu_len = 0;
5816 }
5817
5818 return err;
5819}
5820
5821static int l2cap_resegment(struct l2cap_chan *chan)
5822{
5823 /* Placeholder */
5824 return 0;
5825}
5826
5827void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
5828{
5829 u8 event;
5830
5831 if (chan->mode != L2CAP_MODE_ERTM)
5832 return;
5833
5834 event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
5835 l2cap_tx(chan, NULL, NULL, event);
5836}
5837
5838static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
5839{
5840 int err = 0;
5841 /* Pass sequential frames to l2cap_reassemble_sdu()
5842 * until a gap is encountered.
5843 */
5844
5845 BT_DBG("chan %p", chan);
5846
5847 while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5848 struct sk_buff *skb;
5849 BT_DBG("Searching for skb with txseq %d (queue len %d)",
5850 chan->buffer_seq, skb_queue_len(&chan->srej_q));
5851
5852 skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
5853
5854 if (!skb)
5855 break;
5856
5857 skb_unlink(skb, &chan->srej_q);
5858 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
5859 err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->control);
5860 if (err)
5861 break;
5862 }
5863
5864 if (skb_queue_empty(&chan->srej_q)) {
5865 chan->rx_state = L2CAP_RX_STATE_RECV;
5866 l2cap_send_ack(chan);
5867 }
5868
5869 return err;
5870}
5871
5872static void l2cap_handle_srej(struct l2cap_chan *chan,
5873 struct l2cap_ctrl *control)
5874{
5875 struct sk_buff *skb;
5876
5877 BT_DBG("chan %p, control %p", chan, control);
5878
5879 if (control->reqseq == chan->next_tx_seq) {
5880 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
5881 l2cap_send_disconn_req(chan, ECONNRESET);
5882 return;
5883 }
5884
5885 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
5886
5887 if (skb == NULL) {
5888 BT_DBG("Seq %d not available for retransmission",
5889 control->reqseq);
5890 return;
5891 }
5892
5893 if (chan->max_tx != 0 && bt_cb(skb)->control.retries >= chan->max_tx) {
5894 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
5895 l2cap_send_disconn_req(chan, ECONNRESET);
5896 return;
5897 }
5898
5899 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5900
5901 if (control->poll) {
5902 l2cap_pass_to_tx(chan, control);
5903
5904 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5905 l2cap_retransmit(chan, control);
5906 l2cap_ertm_send(chan);
5907
5908 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5909 set_bit(CONN_SREJ_ACT, &chan->conn_state);
5910 chan->srej_save_reqseq = control->reqseq;
5911 }
5912 } else {
5913 l2cap_pass_to_tx_fbit(chan, control);
5914
5915 if (control->final) {
5916 if (chan->srej_save_reqseq != control->reqseq ||
5917 !test_and_clear_bit(CONN_SREJ_ACT,
5918 &chan->conn_state))
5919 l2cap_retransmit(chan, control);
5920 } else {
5921 l2cap_retransmit(chan, control);
5922 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5923 set_bit(CONN_SREJ_ACT, &chan->conn_state);
5924 chan->srej_save_reqseq = control->reqseq;
5925 }
5926 }
5927 }
5928}
5929
5930static void l2cap_handle_rej(struct l2cap_chan *chan,
5931 struct l2cap_ctrl *control)
5932{
5933 struct sk_buff *skb;
5934
5935 BT_DBG("chan %p, control %p", chan, control);
5936
5937 if (control->reqseq == chan->next_tx_seq) {
5938 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
5939 l2cap_send_disconn_req(chan, ECONNRESET);
5940 return;
5941 }
5942
5943 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
5944
5945 if (chan->max_tx && skb &&
5946 bt_cb(skb)->control.retries >= chan->max_tx) {
5947 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
5948 l2cap_send_disconn_req(chan, ECONNRESET);
5949 return;
5950 }
5951
5952 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5953
5954 l2cap_pass_to_tx(chan, control);
5955
5956 if (control->final) {
5957 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
5958 l2cap_retransmit_all(chan, control);
5959 } else {
5960 l2cap_retransmit_all(chan, control);
5961 l2cap_ertm_send(chan);
5962 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
5963 set_bit(CONN_REJ_ACT, &chan->conn_state);
5964 }
5965}
5966
5967static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
5968{
5969 BT_DBG("chan %p, txseq %d", chan, txseq);
5970
5971 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
5972 chan->expected_tx_seq);
5973
5974 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
5975 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
5976 chan->tx_win) {
5977 /* See notes below regarding "double poll" and
5978 * invalid packets.
5979 */
5980 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
5981 BT_DBG("Invalid/Ignore - after SREJ");
5982 return L2CAP_TXSEQ_INVALID_IGNORE;
5983 } else {
5984 BT_DBG("Invalid - in window after SREJ sent");
5985 return L2CAP_TXSEQ_INVALID;
5986 }
5987 }
5988
5989 if (chan->srej_list.head == txseq) {
5990 BT_DBG("Expected SREJ");
5991 return L2CAP_TXSEQ_EXPECTED_SREJ;
5992 }
5993
5994 if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
5995 BT_DBG("Duplicate SREJ - txseq already stored");
5996 return L2CAP_TXSEQ_DUPLICATE_SREJ;
5997 }
5998
5999 if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
6000 BT_DBG("Unexpected SREJ - not requested");
6001 return L2CAP_TXSEQ_UNEXPECTED_SREJ;
6002 }
6003 }
6004
6005 if (chan->expected_tx_seq == txseq) {
6006 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6007 chan->tx_win) {
6008 BT_DBG("Invalid - txseq outside tx window");
6009 return L2CAP_TXSEQ_INVALID;
6010 } else {
6011 BT_DBG("Expected");
6012 return L2CAP_TXSEQ_EXPECTED;
6013 }
6014 }
6015
6016 if (__seq_offset(chan, txseq, chan->last_acked_seq) <
6017 __seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) {
6018 BT_DBG("Duplicate - expected_tx_seq later than txseq");
6019 return L2CAP_TXSEQ_DUPLICATE;
6020 }
6021
6022 if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
6023 /* A source of invalid packets is a "double poll" condition,
6024 * where delays cause us to send multiple poll packets. If
6025 * the remote stack receives and processes both polls,
6026 * sequence numbers can wrap around in such a way that a
6027 * resent frame has a sequence number that looks like new data
6028 * with a sequence gap. This would trigger an erroneous SREJ
6029 * request.
6030 *
6031 * Fortunately, this is impossible with a tx window that's
6032 * less than half of the maximum sequence number, which allows
6033 * invalid frames to be safely ignored.
6034 *
6035 * With tx window sizes greater than half of the tx window
6036 * maximum, the frame is invalid and cannot be ignored. This
6037 * causes a disconnect.
6038 */
6039
6040 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6041 BT_DBG("Invalid/Ignore - txseq outside tx window");
6042 return L2CAP_TXSEQ_INVALID_IGNORE;
6043 } else {
6044 BT_DBG("Invalid - txseq outside tx window");
6045 return L2CAP_TXSEQ_INVALID;
6046 }
6047 } else {
6048 BT_DBG("Unexpected - txseq indicates missing frames");
6049 return L2CAP_TXSEQ_UNEXPECTED;
6050 }
6051}
6052
6053static int l2cap_rx_state_recv(struct l2cap_chan *chan,
6054 struct l2cap_ctrl *control,
6055 struct sk_buff *skb, u8 event)
6056{
6057 int err = 0;
6058 bool skb_in_use = false;
6059
6060 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6061 event);
6062
6063 switch (event) {
6064 case L2CAP_EV_RECV_IFRAME:
6065 switch (l2cap_classify_txseq(chan, control->txseq)) {
6066 case L2CAP_TXSEQ_EXPECTED:
6067 l2cap_pass_to_tx(chan, control);
6068
6069 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6070 BT_DBG("Busy, discarding expected seq %d",
6071 control->txseq);
6072 break;
6073 }
6074
6075 chan->expected_tx_seq = __next_seq(chan,
6076 control->txseq);
6077
6078 chan->buffer_seq = chan->expected_tx_seq;
6079 skb_in_use = true;
6080
6081 err = l2cap_reassemble_sdu(chan, skb, control);
6082 if (err)
6083 break;
6084
6085 if (control->final) {
6086 if (!test_and_clear_bit(CONN_REJ_ACT,
6087 &chan->conn_state)) {
6088 control->final = 0;
6089 l2cap_retransmit_all(chan, control);
6090 l2cap_ertm_send(chan);
6091 }
6092 }
6093
6094 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
6095 l2cap_send_ack(chan);
6096 break;
6097 case L2CAP_TXSEQ_UNEXPECTED:
6098 l2cap_pass_to_tx(chan, control);
6099
6100 /* Can't issue SREJ frames in the local busy state.
6101 * Drop this frame, it will be seen as missing
6102 * when local busy is exited.
6103 */
6104 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6105 BT_DBG("Busy, discarding unexpected seq %d",
6106 control->txseq);
6107 break;
6108 }
6109
6110 /* There was a gap in the sequence, so an SREJ
6111 * must be sent for each missing frame. The
6112 * current frame is stored for later use.
6113 */
6114 skb_queue_tail(&chan->srej_q, skb);
6115 skb_in_use = true;
6116 BT_DBG("Queued %p (queue len %d)", skb,
6117 skb_queue_len(&chan->srej_q));
6118
6119 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
6120 l2cap_seq_list_clear(&chan->srej_list);
6121 l2cap_send_srej(chan, control->txseq);
6122
6123 chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
6124 break;
6125 case L2CAP_TXSEQ_DUPLICATE:
6126 l2cap_pass_to_tx(chan, control);
6127 break;
6128 case L2CAP_TXSEQ_INVALID_IGNORE:
6129 break;
6130 case L2CAP_TXSEQ_INVALID:
6131 default:
6132 l2cap_send_disconn_req(chan, ECONNRESET);
6133 break;
6134 }
6135 break;
6136 case L2CAP_EV_RECV_RR:
6137 l2cap_pass_to_tx(chan, control);
6138 if (control->final) {
6139 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6140
6141 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state) &&
6142 !__chan_is_moving(chan)) {
6143 control->final = 0;
6144 l2cap_retransmit_all(chan, control);
6145 }
6146
6147 l2cap_ertm_send(chan);
6148 } else if (control->poll) {
6149 l2cap_send_i_or_rr_or_rnr(chan);
6150 } else {
6151 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6152 &chan->conn_state) &&
6153 chan->unacked_frames)
6154 __set_retrans_timer(chan);
6155
6156 l2cap_ertm_send(chan);
6157 }
6158 break;
6159 case L2CAP_EV_RECV_RNR:
6160 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6161 l2cap_pass_to_tx(chan, control);
6162 if (control && control->poll) {
6163 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6164 l2cap_send_rr_or_rnr(chan, 0);
6165 }
6166 __clear_retrans_timer(chan);
6167 l2cap_seq_list_clear(&chan->retrans_list);
6168 break;
6169 case L2CAP_EV_RECV_REJ:
6170 l2cap_handle_rej(chan, control);
6171 break;
6172 case L2CAP_EV_RECV_SREJ:
6173 l2cap_handle_srej(chan, control);
6174 break;
6175 default:
6176 break;
6177 }
6178
6179 if (skb && !skb_in_use) {
6180 BT_DBG("Freeing %p", skb);
6181 kfree_skb(skb);
6182 }
6183
6184 return err;
6185}
6186
6187static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
6188 struct l2cap_ctrl *control,
6189 struct sk_buff *skb, u8 event)
6190{
6191 int err = 0;
6192 u16 txseq = control->txseq;
6193 bool skb_in_use = false;
6194
6195 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6196 event);
6197
6198 switch (event) {
6199 case L2CAP_EV_RECV_IFRAME:
6200 switch (l2cap_classify_txseq(chan, txseq)) {
6201 case L2CAP_TXSEQ_EXPECTED:
6202 /* Keep frame for reassembly later */
6203 l2cap_pass_to_tx(chan, control);
6204 skb_queue_tail(&chan->srej_q, skb);
6205 skb_in_use = true;
6206 BT_DBG("Queued %p (queue len %d)", skb,
6207 skb_queue_len(&chan->srej_q));
6208
6209 chan->expected_tx_seq = __next_seq(chan, txseq);
6210 break;
6211 case L2CAP_TXSEQ_EXPECTED_SREJ:
6212 l2cap_seq_list_pop(&chan->srej_list);
6213
6214 l2cap_pass_to_tx(chan, control);
6215 skb_queue_tail(&chan->srej_q, skb);
6216 skb_in_use = true;
6217 BT_DBG("Queued %p (queue len %d)", skb,
6218 skb_queue_len(&chan->srej_q));
6219
6220 err = l2cap_rx_queued_iframes(chan);
6221 if (err)
6222 break;
6223
6224 break;
6225 case L2CAP_TXSEQ_UNEXPECTED:
6226 /* Got a frame that can't be reassembled yet.
6227 * Save it for later, and send SREJs to cover
6228 * the missing frames.
6229 */
6230 skb_queue_tail(&chan->srej_q, skb);
6231 skb_in_use = true;
6232 BT_DBG("Queued %p (queue len %d)", skb,
6233 skb_queue_len(&chan->srej_q));
6234
6235 l2cap_pass_to_tx(chan, control);
6236 l2cap_send_srej(chan, control->txseq);
6237 break;
6238 case L2CAP_TXSEQ_UNEXPECTED_SREJ:
6239 /* This frame was requested with an SREJ, but
6240 * some expected retransmitted frames are
6241 * missing. Request retransmission of missing
6242 * SREJ'd frames.
6243 */
6244 skb_queue_tail(&chan->srej_q, skb);
6245 skb_in_use = true;
6246 BT_DBG("Queued %p (queue len %d)", skb,
6247 skb_queue_len(&chan->srej_q));
6248
6249 l2cap_pass_to_tx(chan, control);
6250 l2cap_send_srej_list(chan, control->txseq);
6251 break;
6252 case L2CAP_TXSEQ_DUPLICATE_SREJ:
6253 /* We've already queued this frame. Drop this copy. */
6254 l2cap_pass_to_tx(chan, control);
6255 break;
6256 case L2CAP_TXSEQ_DUPLICATE:
6257 /* Expecting a later sequence number, so this frame
6258 * was already received. Ignore it completely.
6259 */
6260 break;
6261 case L2CAP_TXSEQ_INVALID_IGNORE:
6262 break;
6263 case L2CAP_TXSEQ_INVALID:
6264 default:
6265 l2cap_send_disconn_req(chan, ECONNRESET);
6266 break;
6267 }
6268 break;
6269 case L2CAP_EV_RECV_RR:
6270 l2cap_pass_to_tx(chan, control);
6271 if (control->final) {
6272 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6273
6274 if (!test_and_clear_bit(CONN_REJ_ACT,
6275 &chan->conn_state)) {
6276 control->final = 0;
6277 l2cap_retransmit_all(chan, control);
6278 }
6279
6280 l2cap_ertm_send(chan);
6281 } else if (control->poll) {
6282 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6283 &chan->conn_state) &&
6284 chan->unacked_frames) {
6285 __set_retrans_timer(chan);
6286 }
6287
6288 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6289 l2cap_send_srej_tail(chan);
6290 } else {
6291 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6292 &chan->conn_state) &&
6293 chan->unacked_frames)
6294 __set_retrans_timer(chan);
6295
6296 l2cap_send_ack(chan);
6297 }
6298 break;
6299 case L2CAP_EV_RECV_RNR:
6300 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6301 l2cap_pass_to_tx(chan, control);
6302 if (control->poll) {
6303 l2cap_send_srej_tail(chan);
6304 } else {
6305 struct l2cap_ctrl rr_control;
6306 memset(&rr_control, 0, sizeof(rr_control));
6307 rr_control.sframe = 1;
6308 rr_control.super = L2CAP_SUPER_RR;
6309 rr_control.reqseq = chan->buffer_seq;
6310 l2cap_send_sframe(chan, &rr_control);
6311 }
6312
6313 break;
6314 case L2CAP_EV_RECV_REJ:
6315 l2cap_handle_rej(chan, control);
6316 break;
6317 case L2CAP_EV_RECV_SREJ:
6318 l2cap_handle_srej(chan, control);
6319 break;
6320 }
6321
6322 if (skb && !skb_in_use) {
6323 BT_DBG("Freeing %p", skb);
6324 kfree_skb(skb);
6325 }
6326
6327 return err;
6328}
6329
6330static int l2cap_finish_move(struct l2cap_chan *chan)
6331{
6332 BT_DBG("chan %p", chan);
6333
6334 chan->rx_state = L2CAP_RX_STATE_RECV;
6335
6336 if (chan->hs_hcon)
6337 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6338 else
6339 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6340
6341 return l2cap_resegment(chan);
6342}
6343
6344static int l2cap_rx_state_wait_p(struct l2cap_chan *chan,
6345 struct l2cap_ctrl *control,
6346 struct sk_buff *skb, u8 event)
6347{
6348 int err;
6349
6350 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6351 event);
6352
6353 if (!control->poll)
6354 return -EPROTO;
6355
6356 l2cap_process_reqseq(chan, control->reqseq);
6357
6358 if (!skb_queue_empty(&chan->tx_q))
6359 chan->tx_send_head = skb_peek(&chan->tx_q);
6360 else
6361 chan->tx_send_head = NULL;
6362
6363 /* Rewind next_tx_seq to the point expected
6364 * by the receiver.
6365 */
6366 chan->next_tx_seq = control->reqseq;
6367 chan->unacked_frames = 0;
6368
6369 err = l2cap_finish_move(chan);
6370 if (err)
6371 return err;
6372
6373 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6374 l2cap_send_i_or_rr_or_rnr(chan);
6375
6376 if (event == L2CAP_EV_RECV_IFRAME)
6377 return -EPROTO;
6378
6379 return l2cap_rx_state_recv(chan, control, NULL, event);
6380}
6381
6382static int l2cap_rx_state_wait_f(struct l2cap_chan *chan,
6383 struct l2cap_ctrl *control,
6384 struct sk_buff *skb, u8 event)
6385{
6386 int err;
6387
6388 if (!control->final)
6389 return -EPROTO;
6390
6391 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6392
6393 chan->rx_state = L2CAP_RX_STATE_RECV;
6394 l2cap_process_reqseq(chan, control->reqseq);
6395
6396 if (!skb_queue_empty(&chan->tx_q))
6397 chan->tx_send_head = skb_peek(&chan->tx_q);
6398 else
6399 chan->tx_send_head = NULL;
6400
6401 /* Rewind next_tx_seq to the point expected
6402 * by the receiver.
6403 */
6404 chan->next_tx_seq = control->reqseq;
6405 chan->unacked_frames = 0;
6406
6407 if (chan->hs_hcon)
6408 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6409 else
6410 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6411
6412 err = l2cap_resegment(chan);
6413
6414 if (!err)
6415 err = l2cap_rx_state_recv(chan, control, skb, event);
6416
6417 return err;
6418}
6419
6420static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
6421{
6422 /* Make sure reqseq is for a packet that has been sent but not acked */
6423 u16 unacked;
6424
6425 unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
6426 return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
6427}
6428
6429static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6430 struct sk_buff *skb, u8 event)
6431{
6432 int err = 0;
6433
6434 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
6435 control, skb, event, chan->rx_state);
6436
6437 if (__valid_reqseq(chan, control->reqseq)) {
6438 switch (chan->rx_state) {
6439 case L2CAP_RX_STATE_RECV:
6440 err = l2cap_rx_state_recv(chan, control, skb, event);
6441 break;
6442 case L2CAP_RX_STATE_SREJ_SENT:
6443 err = l2cap_rx_state_srej_sent(chan, control, skb,
6444 event);
6445 break;
6446 case L2CAP_RX_STATE_WAIT_P:
6447 err = l2cap_rx_state_wait_p(chan, control, skb, event);
6448 break;
6449 case L2CAP_RX_STATE_WAIT_F:
6450 err = l2cap_rx_state_wait_f(chan, control, skb, event);
6451 break;
6452 default:
6453 /* shut it down */
6454 break;
6455 }
6456 } else {
6457 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
6458 control->reqseq, chan->next_tx_seq,
6459 chan->expected_ack_seq);
6460 l2cap_send_disconn_req(chan, ECONNRESET);
6461 }
6462
6463 return err;
6464}
6465
6466static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6467 struct sk_buff *skb)
6468{
6469 int err = 0;
6470
6471 BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
6472 chan->rx_state);
6473
6474 if (l2cap_classify_txseq(chan, control->txseq) ==
6475 L2CAP_TXSEQ_EXPECTED) {
6476 l2cap_pass_to_tx(chan, control);
6477
6478 BT_DBG("buffer_seq %d->%d", chan->buffer_seq,
6479 __next_seq(chan, chan->buffer_seq));
6480
6481 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6482
6483 l2cap_reassemble_sdu(chan, skb, control);
6484 } else {
6485 if (chan->sdu) {
6486 kfree_skb(chan->sdu);
6487 chan->sdu = NULL;
6488 }
6489 chan->sdu_last_frag = NULL;
6490 chan->sdu_len = 0;
6491
6492 if (skb) {
6493 BT_DBG("Freeing %p", skb);
6494 kfree_skb(skb);
6495 }
6496 }
6497
6498 chan->last_acked_seq = control->txseq;
6499 chan->expected_tx_seq = __next_seq(chan, control->txseq);
6500
6501 return err;
6502}
6503
6504static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6505{
6506 struct l2cap_ctrl *control = &bt_cb(skb)->control;
6507 u16 len;
6508 u8 event;
6509
6510 __unpack_control(chan, skb);
6511
6512 len = skb->len;
6513
6514 /*
6515 * We can just drop the corrupted I-frame here.
6516 * Receiver will miss it and start proper recovery
6517 * procedures and ask for retransmission.
6518 */
6519 if (l2cap_check_fcs(chan, skb))
6520 goto drop;
6521
6522 if (!control->sframe && control->sar == L2CAP_SAR_START)
6523 len -= L2CAP_SDULEN_SIZE;
6524
6525 if (chan->fcs == L2CAP_FCS_CRC16)
6526 len -= L2CAP_FCS_SIZE;
6527
6528 if (len > chan->mps) {
6529 l2cap_send_disconn_req(chan, ECONNRESET);
6530 goto drop;
6531 }
6532
6533 if (!control->sframe) {
6534 int err;
6535
6536 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
6537 control->sar, control->reqseq, control->final,
6538 control->txseq);
6539
6540 /* Validate F-bit - F=0 always valid, F=1 only
6541 * valid in TX WAIT_F
6542 */
6543 if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
6544 goto drop;
6545
6546 if (chan->mode != L2CAP_MODE_STREAMING) {
6547 event = L2CAP_EV_RECV_IFRAME;
6548 err = l2cap_rx(chan, control, skb, event);
6549 } else {
6550 err = l2cap_stream_rx(chan, control, skb);
6551 }
6552
6553 if (err)
6554 l2cap_send_disconn_req(chan, ECONNRESET);
6555 } else {
6556 const u8 rx_func_to_event[4] = {
6557 L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
6558 L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
6559 };
6560
6561 /* Only I-frames are expected in streaming mode */
6562 if (chan->mode == L2CAP_MODE_STREAMING)
6563 goto drop;
6564
6565 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
6566 control->reqseq, control->final, control->poll,
6567 control->super);
6568
6569 if (len != 0) {
6570 BT_ERR("Trailing bytes: %d in sframe", len);
6571 l2cap_send_disconn_req(chan, ECONNRESET);
6572 goto drop;
6573 }
6574
6575 /* Validate F and P bits */
6576 if (control->final && (control->poll ||
6577 chan->tx_state != L2CAP_TX_STATE_WAIT_F))
6578 goto drop;
6579
6580 event = rx_func_to_event[control->super];
6581 if (l2cap_rx(chan, control, skb, event))
6582 l2cap_send_disconn_req(chan, ECONNRESET);
6583 }
6584
6585 return 0;
6586
6587drop:
6588 kfree_skb(skb);
6589 return 0;
6590}
6591
6592static void l2cap_chan_le_send_credits(struct l2cap_chan *chan)
6593{
6594 struct l2cap_conn *conn = chan->conn;
6595 struct l2cap_le_credits pkt;
6596 u16 return_credits;
6597
6598 /* We return more credits to the sender only after the amount of
6599 * credits falls below half of the initial amount.
6600 */
6601 if (chan->rx_credits >= (le_max_credits + 1) / 2)
6602 return;
6603
6604 return_credits = le_max_credits - chan->rx_credits;
6605
6606 BT_DBG("chan %p returning %u credits to sender", chan, return_credits);
6607
6608 chan->rx_credits += return_credits;
6609
6610 pkt.cid = cpu_to_le16(chan->scid);
6611 pkt.credits = cpu_to_le16(return_credits);
6612
6613 chan->ident = l2cap_get_ident(conn);
6614
6615 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CREDITS, sizeof(pkt), &pkt);
6616}
6617
6618static int l2cap_le_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6619{
6620 int err;
6621
6622 if (!chan->rx_credits) {
6623 BT_ERR("No credits to receive LE L2CAP data");
6624 l2cap_send_disconn_req(chan, ECONNRESET);
6625 return -ENOBUFS;
6626 }
6627
6628 if (chan->imtu < skb->len) {
6629 BT_ERR("Too big LE L2CAP PDU");
6630 return -ENOBUFS;
6631 }
6632
6633 chan->rx_credits--;
6634 BT_DBG("rx_credits %u -> %u", chan->rx_credits + 1, chan->rx_credits);
6635
6636 l2cap_chan_le_send_credits(chan);
6637
6638 err = 0;
6639
6640 if (!chan->sdu) {
6641 u16 sdu_len;
6642
6643 sdu_len = get_unaligned_le16(skb->data);
6644 skb_pull(skb, L2CAP_SDULEN_SIZE);
6645
6646 BT_DBG("Start of new SDU. sdu_len %u skb->len %u imtu %u",
6647 sdu_len, skb->len, chan->imtu);
6648
6649 if (sdu_len > chan->imtu) {
6650 BT_ERR("Too big LE L2CAP SDU length received");
6651 err = -EMSGSIZE;
6652 goto failed;
6653 }
6654
6655 if (skb->len > sdu_len) {
6656 BT_ERR("Too much LE L2CAP data received");
6657 err = -EINVAL;
6658 goto failed;
6659 }
6660
6661 if (skb->len == sdu_len)
6662 return chan->ops->recv(chan, skb);
6663
6664 chan->sdu = skb;
6665 chan->sdu_len = sdu_len;
6666 chan->sdu_last_frag = skb;
6667
6668 return 0;
6669 }
6670
6671 BT_DBG("SDU fragment. chan->sdu->len %u skb->len %u chan->sdu_len %u",
6672 chan->sdu->len, skb->len, chan->sdu_len);
6673
6674 if (chan->sdu->len + skb->len > chan->sdu_len) {
6675 BT_ERR("Too much LE L2CAP data received");
6676 err = -EINVAL;
6677 goto failed;
6678 }
6679
6680 append_skb_frag(chan->sdu, skb, &chan->sdu_last_frag);
6681 skb = NULL;
6682
6683 if (chan->sdu->len == chan->sdu_len) {
6684 err = chan->ops->recv(chan, chan->sdu);
6685 if (!err) {
6686 chan->sdu = NULL;
6687 chan->sdu_last_frag = NULL;
6688 chan->sdu_len = 0;
6689 }
6690 }
6691
6692failed:
6693 if (err) {
6694 kfree_skb(skb);
6695 kfree_skb(chan->sdu);
6696 chan->sdu = NULL;
6697 chan->sdu_last_frag = NULL;
6698 chan->sdu_len = 0;
6699 }
6700
6701 /* We can't return an error here since we took care of the skb
6702 * freeing internally. An error return would cause the caller to
6703 * do a double-free of the skb.
6704 */
6705 return 0;
6706}
6707
6708static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
6709 struct sk_buff *skb)
6710{
6711 struct l2cap_chan *chan;
6712
6713 chan = l2cap_get_chan_by_scid(conn, cid);
6714 if (!chan) {
6715 if (cid == L2CAP_CID_A2MP) {
6716 chan = a2mp_channel_create(conn, skb);
6717 if (!chan) {
6718 kfree_skb(skb);
6719 return;
6720 }
6721
6722 l2cap_chan_lock(chan);
6723 } else {
6724 BT_DBG("unknown cid 0x%4.4x", cid);
6725 /* Drop packet and return */
6726 kfree_skb(skb);
6727 return;
6728 }
6729 }
6730
6731 BT_DBG("chan %p, len %d", chan, skb->len);
6732
6733 if (chan->state != BT_CONNECTED)
6734 goto drop;
6735
6736 switch (chan->mode) {
6737 case L2CAP_MODE_LE_FLOWCTL:
6738 if (l2cap_le_data_rcv(chan, skb) < 0)
6739 goto drop;
6740
6741 goto done;
6742
6743 case L2CAP_MODE_BASIC:
6744 /* If socket recv buffers overflows we drop data here
6745 * which is *bad* because L2CAP has to be reliable.
6746 * But we don't have any other choice. L2CAP doesn't
6747 * provide flow control mechanism. */
6748
6749 if (chan->imtu < skb->len) {
6750 BT_ERR("Dropping L2CAP data: receive buffer overflow");
6751 goto drop;
6752 }
6753
6754 if (!chan->ops->recv(chan, skb))
6755 goto done;
6756 break;
6757
6758 case L2CAP_MODE_ERTM:
6759 case L2CAP_MODE_STREAMING:
6760 l2cap_data_rcv(chan, skb);
6761 goto done;
6762
6763 default:
6764 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
6765 break;
6766 }
6767
6768drop:
6769 kfree_skb(skb);
6770
6771done:
6772 l2cap_chan_unlock(chan);
6773}
6774
6775static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
6776 struct sk_buff *skb)
6777{
6778 struct hci_conn *hcon = conn->hcon;
6779 struct l2cap_chan *chan;
6780
6781 if (hcon->type != ACL_LINK)
6782 goto free_skb;
6783
6784 chan = l2cap_global_chan_by_psm(0, psm, &hcon->src, &hcon->dst,
6785 ACL_LINK);
6786 if (!chan)
6787 goto free_skb;
6788
6789 BT_DBG("chan %p, len %d", chan, skb->len);
6790
6791 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
6792 goto drop;
6793
6794 if (chan->imtu < skb->len)
6795 goto drop;
6796
6797 /* Store remote BD_ADDR and PSM for msg_name */
6798 bacpy(&bt_cb(skb)->bdaddr, &hcon->dst);
6799 bt_cb(skb)->psm = psm;
6800
6801 if (!chan->ops->recv(chan, skb)) {
6802 l2cap_chan_put(chan);
6803 return;
6804 }
6805
6806drop:
6807 l2cap_chan_put(chan);
6808free_skb:
6809 kfree_skb(skb);
6810}
6811
6812static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
6813{
6814 struct l2cap_hdr *lh = (void *) skb->data;
6815 struct hci_conn *hcon = conn->hcon;
6816 u16 cid, len;
6817 __le16 psm;
6818
6819 if (hcon->state != BT_CONNECTED) {
6820 BT_DBG("queueing pending rx skb");
6821 skb_queue_tail(&conn->pending_rx, skb);
6822 return;
6823 }
6824
6825 skb_pull(skb, L2CAP_HDR_SIZE);
6826 cid = __le16_to_cpu(lh->cid);
6827 len = __le16_to_cpu(lh->len);
6828
6829 if (len != skb->len) {
6830 kfree_skb(skb);
6831 return;
6832 }
6833
6834 /* Since we can't actively block incoming LE connections we must
6835 * at least ensure that we ignore incoming data from them.
6836 */
6837 if (hcon->type == LE_LINK &&
6838 hci_bdaddr_list_lookup(&hcon->hdev->blacklist, &hcon->dst,
6839 bdaddr_type(hcon, hcon->dst_type))) {
6840 kfree_skb(skb);
6841 return;
6842 }
6843
6844 BT_DBG("len %d, cid 0x%4.4x", len, cid);
6845
6846 switch (cid) {
6847 case L2CAP_CID_SIGNALING:
6848 l2cap_sig_channel(conn, skb);
6849 break;
6850
6851 case L2CAP_CID_CONN_LESS:
6852 psm = get_unaligned((__le16 *) skb->data);
6853 skb_pull(skb, L2CAP_PSMLEN_SIZE);
6854 l2cap_conless_channel(conn, psm, skb);
6855 break;
6856
6857 case L2CAP_CID_LE_SIGNALING:
6858 l2cap_le_sig_channel(conn, skb);
6859 break;
6860
6861 case L2CAP_CID_SMP:
6862 if (smp_sig_channel(conn, skb))
6863 l2cap_conn_del(conn->hcon, EACCES);
6864 break;
6865
6866 default:
6867 l2cap_data_channel(conn, cid, skb);
6868 break;
6869 }
6870}
6871
6872static void process_pending_rx(struct work_struct *work)
6873{
6874 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
6875 pending_rx_work);
6876 struct sk_buff *skb;
6877
6878 BT_DBG("");
6879
6880 while ((skb = skb_dequeue(&conn->pending_rx)))
6881 l2cap_recv_frame(conn, skb);
6882}
6883
6884static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
6885{
6886 struct l2cap_conn *conn = hcon->l2cap_data;
6887 struct hci_chan *hchan;
6888
6889 if (conn)
6890 return conn;
6891
6892 hchan = hci_chan_create(hcon);
6893 if (!hchan)
6894 return NULL;
6895
6896 conn = kzalloc(sizeof(*conn), GFP_KERNEL);
6897 if (!conn) {
6898 hci_chan_del(hchan);
6899 return NULL;
6900 }
6901
6902 kref_init(&conn->ref);
6903 hcon->l2cap_data = conn;
6904 conn->hcon = hcon;
6905 hci_conn_get(conn->hcon);
6906 conn->hchan = hchan;
6907
6908 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
6909
6910 switch (hcon->type) {
6911 case LE_LINK:
6912 if (hcon->hdev->le_mtu) {
6913 conn->mtu = hcon->hdev->le_mtu;
6914 break;
6915 }
6916 /* fall through */
6917 default:
6918 conn->mtu = hcon->hdev->acl_mtu;
6919 break;
6920 }
6921
6922 conn->feat_mask = 0;
6923
6924 if (hcon->type == ACL_LINK)
6925 conn->hs_enabled = test_bit(HCI_HS_ENABLED,
6926 &hcon->hdev->dev_flags);
6927
6928 mutex_init(&conn->ident_lock);
6929 mutex_init(&conn->chan_lock);
6930
6931 INIT_LIST_HEAD(&conn->chan_l);
6932 INIT_LIST_HEAD(&conn->users);
6933
6934 if (hcon->type == LE_LINK)
6935 INIT_DELAYED_WORK(&conn->security_timer, security_timeout);
6936 else
6937 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
6938
6939 skb_queue_head_init(&conn->pending_rx);
6940 INIT_WORK(&conn->pending_rx_work, process_pending_rx);
6941
6942 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
6943
6944 return conn;
6945}
6946
6947static bool is_valid_psm(u16 psm, u8 dst_type) {
6948 if (!psm)
6949 return false;
6950
6951 if (bdaddr_type_is_le(dst_type))
6952 return (psm <= 0x00ff);
6953
6954 /* PSM must be odd and lsb of upper byte must be 0 */
6955 return ((psm & 0x0101) == 0x0001);
6956}
6957
6958int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
6959 bdaddr_t *dst, u8 dst_type)
6960{
6961 struct l2cap_conn *conn;
6962 struct hci_conn *hcon;
6963 struct hci_dev *hdev;
6964 int err;
6965
6966 BT_DBG("%pMR -> %pMR (type %u) psm 0x%2.2x", &chan->src, dst,
6967 dst_type, __le16_to_cpu(psm));
6968
6969 hdev = hci_get_route(dst, &chan->src);
6970 if (!hdev)
6971 return -EHOSTUNREACH;
6972
6973 hci_dev_lock(hdev);
6974
6975 l2cap_chan_lock(chan);
6976
6977 if (!is_valid_psm(__le16_to_cpu(psm), dst_type) && !cid &&
6978 chan->chan_type != L2CAP_CHAN_RAW) {
6979 err = -EINVAL;
6980 goto done;
6981 }
6982
6983 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !psm) {
6984 err = -EINVAL;
6985 goto done;
6986 }
6987
6988 if (chan->chan_type == L2CAP_CHAN_FIXED && !cid) {
6989 err = -EINVAL;
6990 goto done;
6991 }
6992
6993 switch (chan->mode) {
6994 case L2CAP_MODE_BASIC:
6995 break;
6996 case L2CAP_MODE_LE_FLOWCTL:
6997 l2cap_le_flowctl_init(chan);
6998 break;
6999 case L2CAP_MODE_ERTM:
7000 case L2CAP_MODE_STREAMING:
7001 if (!disable_ertm)
7002 break;
7003 /* fall through */
7004 default:
7005 err = -EOPNOTSUPP;
7006 goto done;
7007 }
7008
7009 switch (chan->state) {
7010 case BT_CONNECT:
7011 case BT_CONNECT2:
7012 case BT_CONFIG:
7013 /* Already connecting */
7014 err = 0;
7015 goto done;
7016
7017 case BT_CONNECTED:
7018 /* Already connected */
7019 err = -EISCONN;
7020 goto done;
7021
7022 case BT_OPEN:
7023 case BT_BOUND:
7024 /* Can connect */
7025 break;
7026
7027 default:
7028 err = -EBADFD;
7029 goto done;
7030 }
7031
7032 /* Set destination address and psm */
7033 bacpy(&chan->dst, dst);
7034 chan->dst_type = dst_type;
7035
7036 chan->psm = psm;
7037 chan->dcid = cid;
7038
7039 if (bdaddr_type_is_le(dst_type)) {
7040 u8 role;
7041
7042 /* Convert from L2CAP channel address type to HCI address type
7043 */
7044 if (dst_type == BDADDR_LE_PUBLIC)
7045 dst_type = ADDR_LE_DEV_PUBLIC;
7046 else
7047 dst_type = ADDR_LE_DEV_RANDOM;
7048
7049 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
7050 role = HCI_ROLE_SLAVE;
7051 else
7052 role = HCI_ROLE_MASTER;
7053
7054 hcon = hci_connect_le(hdev, dst, dst_type, chan->sec_level,
7055 HCI_LE_CONN_TIMEOUT, role);
7056 } else {
7057 u8 auth_type = l2cap_get_auth_type(chan);
7058 hcon = hci_connect_acl(hdev, dst, chan->sec_level, auth_type);
7059 }
7060
7061 if (IS_ERR(hcon)) {
7062 err = PTR_ERR(hcon);
7063 goto done;
7064 }
7065
7066 conn = l2cap_conn_add(hcon);
7067 if (!conn) {
7068 hci_conn_drop(hcon);
7069 err = -ENOMEM;
7070 goto done;
7071 }
7072
7073 if (cid && __l2cap_get_chan_by_dcid(conn, cid)) {
7074 hci_conn_drop(hcon);
7075 err = -EBUSY;
7076 goto done;
7077 }
7078
7079 /* Update source addr of the socket */
7080 bacpy(&chan->src, &hcon->src);
7081 chan->src_type = bdaddr_type(hcon, hcon->src_type);
7082
7083 l2cap_chan_unlock(chan);
7084 l2cap_chan_add(conn, chan);
7085 l2cap_chan_lock(chan);
7086
7087 /* l2cap_chan_add takes its own ref so we can drop this one */
7088 hci_conn_drop(hcon);
7089
7090 l2cap_state_change(chan, BT_CONNECT);
7091 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
7092
7093 /* Release chan->sport so that it can be reused by other
7094 * sockets (as it's only used for listening sockets).
7095 */
7096 write_lock(&chan_list_lock);
7097 chan->sport = 0;
7098 write_unlock(&chan_list_lock);
7099
7100 if (hcon->state == BT_CONNECTED) {
7101 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
7102 __clear_chan_timer(chan);
7103 if (l2cap_chan_check_security(chan, true))
7104 l2cap_state_change(chan, BT_CONNECTED);
7105 } else
7106 l2cap_do_start(chan);
7107 }
7108
7109 err = 0;
7110
7111done:
7112 l2cap_chan_unlock(chan);
7113 hci_dev_unlock(hdev);
7114 hci_dev_put(hdev);
7115 return err;
7116}
7117EXPORT_SYMBOL_GPL(l2cap_chan_connect);
7118
7119/* ---- L2CAP interface with lower layer (HCI) ---- */
7120
7121int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
7122{
7123 int exact = 0, lm1 = 0, lm2 = 0;
7124 struct l2cap_chan *c;
7125
7126 BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
7127
7128 /* Find listening sockets and check their link_mode */
7129 read_lock(&chan_list_lock);
7130 list_for_each_entry(c, &chan_list, global_l) {
7131 if (c->state != BT_LISTEN)
7132 continue;
7133
7134 if (!bacmp(&c->src, &hdev->bdaddr)) {
7135 lm1 |= HCI_LM_ACCEPT;
7136 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
7137 lm1 |= HCI_LM_MASTER;
7138 exact++;
7139 } else if (!bacmp(&c->src, BDADDR_ANY)) {
7140 lm2 |= HCI_LM_ACCEPT;
7141 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
7142 lm2 |= HCI_LM_MASTER;
7143 }
7144 }
7145 read_unlock(&chan_list_lock);
7146
7147 return exact ? lm1 : lm2;
7148}
7149
7150/* Find the next fixed channel in BT_LISTEN state, continue iteration
7151 * from an existing channel in the list or from the beginning of the
7152 * global list (by passing NULL as first parameter).
7153 */
7154static struct l2cap_chan *l2cap_global_fixed_chan(struct l2cap_chan *c,
7155 bdaddr_t *src, u8 link_type)
7156{
7157 read_lock(&chan_list_lock);
7158
7159 if (c)
7160 c = list_next_entry(c, global_l);
7161 else
7162 c = list_entry(chan_list.next, typeof(*c), global_l);
7163
7164 list_for_each_entry_from(c, &chan_list, global_l) {
7165 if (c->chan_type != L2CAP_CHAN_FIXED)
7166 continue;
7167 if (c->state != BT_LISTEN)
7168 continue;
7169 if (bacmp(&c->src, src) && bacmp(&c->src, BDADDR_ANY))
7170 continue;
7171 if (link_type == ACL_LINK && c->src_type != BDADDR_BREDR)
7172 continue;
7173 if (link_type == LE_LINK && c->src_type == BDADDR_BREDR)
7174 continue;
7175
7176 l2cap_chan_hold(c);
7177 read_unlock(&chan_list_lock);
7178 return c;
7179 }
7180
7181 read_unlock(&chan_list_lock);
7182
7183 return NULL;
7184}
7185
7186void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
7187{
7188 struct hci_dev *hdev = hcon->hdev;
7189 struct l2cap_conn *conn;
7190 struct l2cap_chan *pchan;
7191 u8 dst_type;
7192
7193 BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
7194
7195 if (status) {
7196 l2cap_conn_del(hcon, bt_to_errno(status));
7197 return;
7198 }
7199
7200 conn = l2cap_conn_add(hcon);
7201 if (!conn)
7202 return;
7203
7204 dst_type = bdaddr_type(hcon, hcon->dst_type);
7205
7206 /* If device is blocked, do not create channels for it */
7207 if (hci_bdaddr_list_lookup(&hdev->blacklist, &hcon->dst, dst_type))
7208 return;
7209
7210 /* Find fixed channels and notify them of the new connection. We
7211 * use multiple individual lookups, continuing each time where
7212 * we left off, because the list lock would prevent calling the
7213 * potentially sleeping l2cap_chan_lock() function.
7214 */
7215 pchan = l2cap_global_fixed_chan(NULL, &hdev->bdaddr, hcon->type);
7216 while (pchan) {
7217 struct l2cap_chan *chan, *next;
7218
7219 /* Client fixed channels should override server ones */
7220 if (__l2cap_get_chan_by_dcid(conn, pchan->scid))
7221 goto next;
7222
7223 l2cap_chan_lock(pchan);
7224 chan = pchan->ops->new_connection(pchan);
7225 if (chan) {
7226 bacpy(&chan->src, &hcon->src);
7227 bacpy(&chan->dst, &hcon->dst);
7228 chan->src_type = bdaddr_type(hcon, hcon->src_type);
7229 chan->dst_type = dst_type;
7230
7231 __l2cap_chan_add(conn, chan);
7232 }
7233
7234 l2cap_chan_unlock(pchan);
7235next:
7236 next = l2cap_global_fixed_chan(pchan, &hdev->bdaddr,
7237 hcon->type);
7238 l2cap_chan_put(pchan);
7239 pchan = next;
7240 }
7241
7242 l2cap_conn_ready(conn);
7243}
7244
7245int l2cap_disconn_ind(struct hci_conn *hcon)
7246{
7247 struct l2cap_conn *conn = hcon->l2cap_data;
7248
7249 BT_DBG("hcon %p", hcon);
7250
7251 if (!conn)
7252 return HCI_ERROR_REMOTE_USER_TERM;
7253 return conn->disc_reason;
7254}
7255
7256void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
7257{
7258 BT_DBG("hcon %p reason %d", hcon, reason);
7259
7260 l2cap_conn_del(hcon, bt_to_errno(reason));
7261}
7262
7263static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
7264{
7265 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
7266 return;
7267
7268 if (encrypt == 0x00) {
7269 if (chan->sec_level == BT_SECURITY_MEDIUM) {
7270 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
7271 } else if (chan->sec_level == BT_SECURITY_HIGH ||
7272 chan->sec_level == BT_SECURITY_FIPS)
7273 l2cap_chan_close(chan, ECONNREFUSED);
7274 } else {
7275 if (chan->sec_level == BT_SECURITY_MEDIUM)
7276 __clear_chan_timer(chan);
7277 }
7278}
7279
7280int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
7281{
7282 struct l2cap_conn *conn = hcon->l2cap_data;
7283 struct l2cap_chan *chan;
7284
7285 if (!conn)
7286 return 0;
7287
7288 BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
7289
7290 if (hcon->type == LE_LINK) {
7291 if (!status && encrypt)
7292 smp_distribute_keys(conn);
7293 cancel_delayed_work(&conn->security_timer);
7294 }
7295
7296 mutex_lock(&conn->chan_lock);
7297
7298 list_for_each_entry(chan, &conn->chan_l, list) {
7299 l2cap_chan_lock(chan);
7300
7301 BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
7302 state_to_string(chan->state));
7303
7304 if (chan->scid == L2CAP_CID_A2MP) {
7305 l2cap_chan_unlock(chan);
7306 continue;
7307 }
7308
7309 if (!status && encrypt)
7310 chan->sec_level = hcon->sec_level;
7311
7312 if (!__l2cap_no_conn_pending(chan)) {
7313 l2cap_chan_unlock(chan);
7314 continue;
7315 }
7316
7317 if (!status && (chan->state == BT_CONNECTED ||
7318 chan->state == BT_CONFIG)) {
7319 chan->ops->resume(chan);
7320 l2cap_check_encryption(chan, encrypt);
7321 l2cap_chan_unlock(chan);
7322 continue;
7323 }
7324
7325 if (chan->state == BT_CONNECT) {
7326 if (!status)
7327 l2cap_start_connection(chan);
7328 else
7329 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
7330 } else if (chan->state == BT_CONNECT2) {
7331 struct l2cap_conn_rsp rsp;
7332 __u16 res, stat;
7333
7334 if (!status) {
7335 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
7336 res = L2CAP_CR_PEND;
7337 stat = L2CAP_CS_AUTHOR_PEND;
7338 chan->ops->defer(chan);
7339 } else {
7340 l2cap_state_change(chan, BT_CONFIG);
7341 res = L2CAP_CR_SUCCESS;
7342 stat = L2CAP_CS_NO_INFO;
7343 }
7344 } else {
7345 l2cap_state_change(chan, BT_DISCONN);
7346 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
7347 res = L2CAP_CR_SEC_BLOCK;
7348 stat = L2CAP_CS_NO_INFO;
7349 }
7350
7351 rsp.scid = cpu_to_le16(chan->dcid);
7352 rsp.dcid = cpu_to_le16(chan->scid);
7353 rsp.result = cpu_to_le16(res);
7354 rsp.status = cpu_to_le16(stat);
7355 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
7356 sizeof(rsp), &rsp);
7357
7358 if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
7359 res == L2CAP_CR_SUCCESS) {
7360 char buf[128];
7361 set_bit(CONF_REQ_SENT, &chan->conf_state);
7362 l2cap_send_cmd(conn, l2cap_get_ident(conn),
7363 L2CAP_CONF_REQ,
7364 l2cap_build_conf_req(chan, buf),
7365 buf);
7366 chan->num_conf_req++;
7367 }
7368 }
7369
7370 l2cap_chan_unlock(chan);
7371 }
7372
7373 mutex_unlock(&conn->chan_lock);
7374
7375 return 0;
7376}
7377
7378int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
7379{
7380 struct l2cap_conn *conn = hcon->l2cap_data;
7381 struct l2cap_hdr *hdr;
7382 int len;
7383
7384 /* For AMP controller do not create l2cap conn */
7385 if (!conn && hcon->hdev->dev_type != HCI_BREDR)
7386 goto drop;
7387
7388 if (!conn)
7389 conn = l2cap_conn_add(hcon);
7390
7391 if (!conn)
7392 goto drop;
7393
7394 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
7395
7396 switch (flags) {
7397 case ACL_START:
7398 case ACL_START_NO_FLUSH:
7399 case ACL_COMPLETE:
7400 if (conn->rx_len) {
7401 BT_ERR("Unexpected start frame (len %d)", skb->len);
7402 kfree_skb(conn->rx_skb);
7403 conn->rx_skb = NULL;
7404 conn->rx_len = 0;
7405 l2cap_conn_unreliable(conn, ECOMM);
7406 }
7407
7408 /* Start fragment always begin with Basic L2CAP header */
7409 if (skb->len < L2CAP_HDR_SIZE) {
7410 BT_ERR("Frame is too short (len %d)", skb->len);
7411 l2cap_conn_unreliable(conn, ECOMM);
7412 goto drop;
7413 }
7414
7415 hdr = (struct l2cap_hdr *) skb->data;
7416 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
7417
7418 if (len == skb->len) {
7419 /* Complete frame received */
7420 l2cap_recv_frame(conn, skb);
7421 return 0;
7422 }
7423
7424 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
7425
7426 if (skb->len > len) {
7427 BT_ERR("Frame is too long (len %d, expected len %d)",
7428 skb->len, len);
7429 l2cap_conn_unreliable(conn, ECOMM);
7430 goto drop;
7431 }
7432
7433 /* Allocate skb for the complete frame (with header) */
7434 conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL);
7435 if (!conn->rx_skb)
7436 goto drop;
7437
7438 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
7439 skb->len);
7440 conn->rx_len = len - skb->len;
7441 break;
7442
7443 case ACL_CONT:
7444 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
7445
7446 if (!conn->rx_len) {
7447 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
7448 l2cap_conn_unreliable(conn, ECOMM);
7449 goto drop;
7450 }
7451
7452 if (skb->len > conn->rx_len) {
7453 BT_ERR("Fragment is too long (len %d, expected %d)",
7454 skb->len, conn->rx_len);
7455 kfree_skb(conn->rx_skb);
7456 conn->rx_skb = NULL;
7457 conn->rx_len = 0;
7458 l2cap_conn_unreliable(conn, ECOMM);
7459 goto drop;
7460 }
7461
7462 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
7463 skb->len);
7464 conn->rx_len -= skb->len;
7465
7466 if (!conn->rx_len) {
7467 /* Complete frame received. l2cap_recv_frame
7468 * takes ownership of the skb so set the global
7469 * rx_skb pointer to NULL first.
7470 */
7471 struct sk_buff *rx_skb = conn->rx_skb;
7472 conn->rx_skb = NULL;
7473 l2cap_recv_frame(conn, rx_skb);
7474 }
7475 break;
7476 }
7477
7478drop:
7479 kfree_skb(skb);
7480 return 0;
7481}
7482
7483static int l2cap_debugfs_show(struct seq_file *f, void *p)
7484{
7485 struct l2cap_chan *c;
7486
7487 read_lock(&chan_list_lock);
7488
7489 list_for_each_entry(c, &chan_list, global_l) {
7490 seq_printf(f, "%pMR %pMR %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
7491 &c->src, &c->dst,
7492 c->state, __le16_to_cpu(c->psm),
7493 c->scid, c->dcid, c->imtu, c->omtu,
7494 c->sec_level, c->mode);
7495 }
7496
7497 read_unlock(&chan_list_lock);
7498
7499 return 0;
7500}
7501
7502static int l2cap_debugfs_open(struct inode *inode, struct file *file)
7503{
7504 return single_open(file, l2cap_debugfs_show, inode->i_private);
7505}
7506
7507static const struct file_operations l2cap_debugfs_fops = {
7508 .open = l2cap_debugfs_open,
7509 .read = seq_read,
7510 .llseek = seq_lseek,
7511 .release = single_release,
7512};
7513
7514static struct dentry *l2cap_debugfs;
7515
7516int __init l2cap_init(void)
7517{
7518 int err;
7519
7520 err = l2cap_init_sockets();
7521 if (err < 0)
7522 return err;
7523
7524 if (IS_ERR_OR_NULL(bt_debugfs))
7525 return 0;
7526
7527 l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
7528 NULL, &l2cap_debugfs_fops);
7529
7530 debugfs_create_u16("l2cap_le_max_credits", 0644, bt_debugfs,
7531 &le_max_credits);
7532 debugfs_create_u16("l2cap_le_default_mps", 0644, bt_debugfs,
7533 &le_default_mps);
7534
7535 return 0;
7536}
7537
7538void l2cap_exit(void)
7539{
7540 debugfs_remove(l2cap_debugfs);
7541 l2cap_cleanup_sockets();
7542}
7543
7544module_param(disable_ertm, bool, 0644);
7545MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
This page took 0.049934 seconds and 5 git commands to generate.