Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless
[deliverable/linux.git] / net / bluetooth / l2cap_core.c
... / ...
CommitLineData
1/*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
8
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
10
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
14
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
27*/
28
29/* Bluetooth L2CAP core. */
30
31#include <linux/module.h>
32
33#include <linux/debugfs.h>
34#include <linux/crc16.h>
35
36#include <net/bluetooth/bluetooth.h>
37#include <net/bluetooth/hci_core.h>
38#include <net/bluetooth/l2cap.h>
39
40#include "smp.h"
41#include "a2mp.h"
42#include "amp.h"
43
44#define LE_FLOWCTL_MAX_CREDITS 65535
45
46bool disable_ertm;
47
48static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN | L2CAP_FEAT_UCD;
49static u8 l2cap_fixed_chan[8] = { L2CAP_FC_SIG_BREDR | L2CAP_FC_CONNLESS, };
50
51static LIST_HEAD(chan_list);
52static DEFINE_RWLOCK(chan_list_lock);
53
54static u16 le_max_credits = L2CAP_LE_MAX_CREDITS;
55static u16 le_default_mps = L2CAP_LE_DEFAULT_MPS;
56
57static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
58 u8 code, u8 ident, u16 dlen, void *data);
59static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
60 void *data);
61static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
62static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
63
64static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
65 struct sk_buff_head *skbs, u8 event);
66
67static inline __u8 bdaddr_type(struct hci_conn *hcon, __u8 type)
68{
69 if (hcon->type == LE_LINK) {
70 if (type == ADDR_LE_DEV_PUBLIC)
71 return BDADDR_LE_PUBLIC;
72 else
73 return BDADDR_LE_RANDOM;
74 }
75
76 return BDADDR_BREDR;
77}
78
79/* ---- L2CAP channels ---- */
80
81static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
82 u16 cid)
83{
84 struct l2cap_chan *c;
85
86 list_for_each_entry(c, &conn->chan_l, list) {
87 if (c->dcid == cid)
88 return c;
89 }
90 return NULL;
91}
92
93static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
94 u16 cid)
95{
96 struct l2cap_chan *c;
97
98 list_for_each_entry(c, &conn->chan_l, list) {
99 if (c->scid == cid)
100 return c;
101 }
102 return NULL;
103}
104
105/* Find channel with given SCID.
106 * Returns locked channel. */
107static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
108 u16 cid)
109{
110 struct l2cap_chan *c;
111
112 mutex_lock(&conn->chan_lock);
113 c = __l2cap_get_chan_by_scid(conn, cid);
114 if (c)
115 l2cap_chan_lock(c);
116 mutex_unlock(&conn->chan_lock);
117
118 return c;
119}
120
121/* Find channel with given DCID.
122 * Returns locked channel.
123 */
124static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
125 u16 cid)
126{
127 struct l2cap_chan *c;
128
129 mutex_lock(&conn->chan_lock);
130 c = __l2cap_get_chan_by_dcid(conn, cid);
131 if (c)
132 l2cap_chan_lock(c);
133 mutex_unlock(&conn->chan_lock);
134
135 return c;
136}
137
138static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn,
139 u8 ident)
140{
141 struct l2cap_chan *c;
142
143 list_for_each_entry(c, &conn->chan_l, list) {
144 if (c->ident == ident)
145 return c;
146 }
147 return NULL;
148}
149
150static struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn,
151 u8 ident)
152{
153 struct l2cap_chan *c;
154
155 mutex_lock(&conn->chan_lock);
156 c = __l2cap_get_chan_by_ident(conn, ident);
157 if (c)
158 l2cap_chan_lock(c);
159 mutex_unlock(&conn->chan_lock);
160
161 return c;
162}
163
164static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
165{
166 struct l2cap_chan *c;
167
168 list_for_each_entry(c, &chan_list, global_l) {
169 if (c->sport == psm && !bacmp(&c->src, src))
170 return c;
171 }
172 return NULL;
173}
174
175int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
176{
177 int err;
178
179 write_lock(&chan_list_lock);
180
181 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
182 err = -EADDRINUSE;
183 goto done;
184 }
185
186 if (psm) {
187 chan->psm = psm;
188 chan->sport = psm;
189 err = 0;
190 } else {
191 u16 p;
192
193 err = -EINVAL;
194 for (p = 0x1001; p < 0x1100; p += 2)
195 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
196 chan->psm = cpu_to_le16(p);
197 chan->sport = cpu_to_le16(p);
198 err = 0;
199 break;
200 }
201 }
202
203done:
204 write_unlock(&chan_list_lock);
205 return err;
206}
207EXPORT_SYMBOL_GPL(l2cap_add_psm);
208
209int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
210{
211 write_lock(&chan_list_lock);
212
213 /* Override the defaults (which are for conn-oriented) */
214 chan->omtu = L2CAP_DEFAULT_MTU;
215 chan->chan_type = L2CAP_CHAN_FIXED;
216
217 chan->scid = scid;
218
219 write_unlock(&chan_list_lock);
220
221 return 0;
222}
223
224static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
225{
226 u16 cid, dyn_end;
227
228 if (conn->hcon->type == LE_LINK)
229 dyn_end = L2CAP_CID_LE_DYN_END;
230 else
231 dyn_end = L2CAP_CID_DYN_END;
232
233 for (cid = L2CAP_CID_DYN_START; cid < dyn_end; cid++) {
234 if (!__l2cap_get_chan_by_scid(conn, cid))
235 return cid;
236 }
237
238 return 0;
239}
240
241static void l2cap_state_change(struct l2cap_chan *chan, int state)
242{
243 BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
244 state_to_string(state));
245
246 chan->state = state;
247 chan->ops->state_change(chan, state, 0);
248}
249
250static inline void l2cap_state_change_and_error(struct l2cap_chan *chan,
251 int state, int err)
252{
253 chan->state = state;
254 chan->ops->state_change(chan, chan->state, err);
255}
256
257static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
258{
259 chan->ops->state_change(chan, chan->state, err);
260}
261
262static void __set_retrans_timer(struct l2cap_chan *chan)
263{
264 if (!delayed_work_pending(&chan->monitor_timer) &&
265 chan->retrans_timeout) {
266 l2cap_set_timer(chan, &chan->retrans_timer,
267 msecs_to_jiffies(chan->retrans_timeout));
268 }
269}
270
271static void __set_monitor_timer(struct l2cap_chan *chan)
272{
273 __clear_retrans_timer(chan);
274 if (chan->monitor_timeout) {
275 l2cap_set_timer(chan, &chan->monitor_timer,
276 msecs_to_jiffies(chan->monitor_timeout));
277 }
278}
279
280static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
281 u16 seq)
282{
283 struct sk_buff *skb;
284
285 skb_queue_walk(head, skb) {
286 if (bt_cb(skb)->control.txseq == seq)
287 return skb;
288 }
289
290 return NULL;
291}
292
293/* ---- L2CAP sequence number lists ---- */
294
295/* For ERTM, ordered lists of sequence numbers must be tracked for
296 * SREJ requests that are received and for frames that are to be
297 * retransmitted. These seq_list functions implement a singly-linked
298 * list in an array, where membership in the list can also be checked
299 * in constant time. Items can also be added to the tail of the list
300 * and removed from the head in constant time, without further memory
301 * allocs or frees.
302 */
303
304static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
305{
306 size_t alloc_size, i;
307
308 /* Allocated size is a power of 2 to map sequence numbers
309 * (which may be up to 14 bits) in to a smaller array that is
310 * sized for the negotiated ERTM transmit windows.
311 */
312 alloc_size = roundup_pow_of_two(size);
313
314 seq_list->list = kmalloc(sizeof(u16) * alloc_size, GFP_KERNEL);
315 if (!seq_list->list)
316 return -ENOMEM;
317
318 seq_list->mask = alloc_size - 1;
319 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
320 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
321 for (i = 0; i < alloc_size; i++)
322 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
323
324 return 0;
325}
326
327static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
328{
329 kfree(seq_list->list);
330}
331
332static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
333 u16 seq)
334{
335 /* Constant-time check for list membership */
336 return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
337}
338
339static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
340{
341 u16 seq = seq_list->head;
342 u16 mask = seq_list->mask;
343
344 seq_list->head = seq_list->list[seq & mask];
345 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
346
347 if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
348 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
349 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
350 }
351
352 return seq;
353}
354
355static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
356{
357 u16 i;
358
359 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
360 return;
361
362 for (i = 0; i <= seq_list->mask; i++)
363 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
364
365 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
366 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
367}
368
369static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
370{
371 u16 mask = seq_list->mask;
372
373 /* All appends happen in constant time */
374
375 if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
376 return;
377
378 if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
379 seq_list->head = seq;
380 else
381 seq_list->list[seq_list->tail & mask] = seq;
382
383 seq_list->tail = seq;
384 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
385}
386
387static void l2cap_chan_timeout(struct work_struct *work)
388{
389 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
390 chan_timer.work);
391 struct l2cap_conn *conn = chan->conn;
392 int reason;
393
394 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
395
396 mutex_lock(&conn->chan_lock);
397 l2cap_chan_lock(chan);
398
399 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
400 reason = ECONNREFUSED;
401 else if (chan->state == BT_CONNECT &&
402 chan->sec_level != BT_SECURITY_SDP)
403 reason = ECONNREFUSED;
404 else
405 reason = ETIMEDOUT;
406
407 l2cap_chan_close(chan, reason);
408
409 l2cap_chan_unlock(chan);
410
411 chan->ops->close(chan);
412 mutex_unlock(&conn->chan_lock);
413
414 l2cap_chan_put(chan);
415}
416
417struct l2cap_chan *l2cap_chan_create(void)
418{
419 struct l2cap_chan *chan;
420
421 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
422 if (!chan)
423 return NULL;
424
425 mutex_init(&chan->lock);
426
427 write_lock(&chan_list_lock);
428 list_add(&chan->global_l, &chan_list);
429 write_unlock(&chan_list_lock);
430
431 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
432
433 chan->state = BT_OPEN;
434
435 kref_init(&chan->kref);
436
437 /* This flag is cleared in l2cap_chan_ready() */
438 set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
439
440 BT_DBG("chan %p", chan);
441
442 return chan;
443}
444EXPORT_SYMBOL_GPL(l2cap_chan_create);
445
446static void l2cap_chan_destroy(struct kref *kref)
447{
448 struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);
449
450 BT_DBG("chan %p", chan);
451
452 write_lock(&chan_list_lock);
453 list_del(&chan->global_l);
454 write_unlock(&chan_list_lock);
455
456 kfree(chan);
457}
458
459void l2cap_chan_hold(struct l2cap_chan *c)
460{
461 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
462
463 kref_get(&c->kref);
464}
465
466void l2cap_chan_put(struct l2cap_chan *c)
467{
468 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
469
470 kref_put(&c->kref, l2cap_chan_destroy);
471}
472EXPORT_SYMBOL_GPL(l2cap_chan_put);
473
474void l2cap_chan_set_defaults(struct l2cap_chan *chan)
475{
476 chan->fcs = L2CAP_FCS_CRC16;
477 chan->max_tx = L2CAP_DEFAULT_MAX_TX;
478 chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
479 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
480 chan->remote_max_tx = chan->max_tx;
481 chan->remote_tx_win = chan->tx_win;
482 chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
483 chan->sec_level = BT_SECURITY_LOW;
484 chan->flush_to = L2CAP_DEFAULT_FLUSH_TO;
485 chan->retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
486 chan->monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
487 chan->conf_state = 0;
488
489 set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
490}
491EXPORT_SYMBOL_GPL(l2cap_chan_set_defaults);
492
493static void l2cap_le_flowctl_init(struct l2cap_chan *chan)
494{
495 chan->sdu = NULL;
496 chan->sdu_last_frag = NULL;
497 chan->sdu_len = 0;
498 chan->tx_credits = 0;
499 chan->rx_credits = le_max_credits;
500 chan->mps = min_t(u16, chan->imtu, le_default_mps);
501
502 skb_queue_head_init(&chan->tx_q);
503}
504
505void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
506{
507 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
508 __le16_to_cpu(chan->psm), chan->dcid);
509
510 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
511
512 chan->conn = conn;
513
514 switch (chan->chan_type) {
515 case L2CAP_CHAN_CONN_ORIENTED:
516 /* Alloc CID for connection-oriented socket */
517 chan->scid = l2cap_alloc_cid(conn);
518 if (conn->hcon->type == ACL_LINK)
519 chan->omtu = L2CAP_DEFAULT_MTU;
520 break;
521
522 case L2CAP_CHAN_CONN_LESS:
523 /* Connectionless socket */
524 chan->scid = L2CAP_CID_CONN_LESS;
525 chan->dcid = L2CAP_CID_CONN_LESS;
526 chan->omtu = L2CAP_DEFAULT_MTU;
527 break;
528
529 case L2CAP_CHAN_FIXED:
530 /* Caller will set CID and CID specific MTU values */
531 break;
532
533 default:
534 /* Raw socket can send/recv signalling messages only */
535 chan->scid = L2CAP_CID_SIGNALING;
536 chan->dcid = L2CAP_CID_SIGNALING;
537 chan->omtu = L2CAP_DEFAULT_MTU;
538 }
539
540 chan->local_id = L2CAP_BESTEFFORT_ID;
541 chan->local_stype = L2CAP_SERV_BESTEFFORT;
542 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
543 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
544 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
545 chan->local_flush_to = L2CAP_EFS_DEFAULT_FLUSH_TO;
546
547 l2cap_chan_hold(chan);
548
549 hci_conn_hold(conn->hcon);
550
551 list_add(&chan->list, &conn->chan_l);
552}
553
554void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
555{
556 mutex_lock(&conn->chan_lock);
557 __l2cap_chan_add(conn, chan);
558 mutex_unlock(&conn->chan_lock);
559}
560
561void l2cap_chan_del(struct l2cap_chan *chan, int err)
562{
563 struct l2cap_conn *conn = chan->conn;
564
565 __clear_chan_timer(chan);
566
567 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
568
569 chan->ops->teardown(chan, err);
570
571 if (conn) {
572 struct amp_mgr *mgr = conn->hcon->amp_mgr;
573 /* Delete from channel list */
574 list_del(&chan->list);
575
576 l2cap_chan_put(chan);
577
578 chan->conn = NULL;
579
580 if (chan->scid != L2CAP_CID_A2MP)
581 hci_conn_drop(conn->hcon);
582
583 if (mgr && mgr->bredr_chan == chan)
584 mgr->bredr_chan = NULL;
585 }
586
587 if (chan->hs_hchan) {
588 struct hci_chan *hs_hchan = chan->hs_hchan;
589
590 BT_DBG("chan %p disconnect hs_hchan %p", chan, hs_hchan);
591 amp_disconnect_logical_link(hs_hchan);
592 }
593
594 if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
595 return;
596
597 switch(chan->mode) {
598 case L2CAP_MODE_BASIC:
599 break;
600
601 case L2CAP_MODE_LE_FLOWCTL:
602 skb_queue_purge(&chan->tx_q);
603 break;
604
605 case L2CAP_MODE_ERTM:
606 __clear_retrans_timer(chan);
607 __clear_monitor_timer(chan);
608 __clear_ack_timer(chan);
609
610 skb_queue_purge(&chan->srej_q);
611
612 l2cap_seq_list_free(&chan->srej_list);
613 l2cap_seq_list_free(&chan->retrans_list);
614
615 /* fall through */
616
617 case L2CAP_MODE_STREAMING:
618 skb_queue_purge(&chan->tx_q);
619 break;
620 }
621
622 return;
623}
624EXPORT_SYMBOL_GPL(l2cap_chan_del);
625
626void l2cap_conn_update_id_addr(struct hci_conn *hcon)
627{
628 struct l2cap_conn *conn = hcon->l2cap_data;
629 struct l2cap_chan *chan;
630
631 mutex_lock(&conn->chan_lock);
632
633 list_for_each_entry(chan, &conn->chan_l, list) {
634 l2cap_chan_lock(chan);
635 bacpy(&chan->dst, &hcon->dst);
636 chan->dst_type = bdaddr_type(hcon, hcon->dst_type);
637 l2cap_chan_unlock(chan);
638 }
639
640 mutex_unlock(&conn->chan_lock);
641}
642
643static void l2cap_chan_le_connect_reject(struct l2cap_chan *chan)
644{
645 struct l2cap_conn *conn = chan->conn;
646 struct l2cap_le_conn_rsp rsp;
647 u16 result;
648
649 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
650 result = L2CAP_CR_AUTHORIZATION;
651 else
652 result = L2CAP_CR_BAD_PSM;
653
654 l2cap_state_change(chan, BT_DISCONN);
655
656 rsp.dcid = cpu_to_le16(chan->scid);
657 rsp.mtu = cpu_to_le16(chan->imtu);
658 rsp.mps = cpu_to_le16(chan->mps);
659 rsp.credits = cpu_to_le16(chan->rx_credits);
660 rsp.result = cpu_to_le16(result);
661
662 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
663 &rsp);
664}
665
666static void l2cap_chan_connect_reject(struct l2cap_chan *chan)
667{
668 struct l2cap_conn *conn = chan->conn;
669 struct l2cap_conn_rsp rsp;
670 u16 result;
671
672 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
673 result = L2CAP_CR_SEC_BLOCK;
674 else
675 result = L2CAP_CR_BAD_PSM;
676
677 l2cap_state_change(chan, BT_DISCONN);
678
679 rsp.scid = cpu_to_le16(chan->dcid);
680 rsp.dcid = cpu_to_le16(chan->scid);
681 rsp.result = cpu_to_le16(result);
682 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
683
684 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
685}
686
687void l2cap_chan_close(struct l2cap_chan *chan, int reason)
688{
689 struct l2cap_conn *conn = chan->conn;
690
691 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
692
693 switch (chan->state) {
694 case BT_LISTEN:
695 chan->ops->teardown(chan, 0);
696 break;
697
698 case BT_CONNECTED:
699 case BT_CONFIG:
700 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
701 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
702 l2cap_send_disconn_req(chan, reason);
703 } else
704 l2cap_chan_del(chan, reason);
705 break;
706
707 case BT_CONNECT2:
708 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
709 if (conn->hcon->type == ACL_LINK)
710 l2cap_chan_connect_reject(chan);
711 else if (conn->hcon->type == LE_LINK)
712 l2cap_chan_le_connect_reject(chan);
713 }
714
715 l2cap_chan_del(chan, reason);
716 break;
717
718 case BT_CONNECT:
719 case BT_DISCONN:
720 l2cap_chan_del(chan, reason);
721 break;
722
723 default:
724 chan->ops->teardown(chan, 0);
725 break;
726 }
727}
728EXPORT_SYMBOL(l2cap_chan_close);
729
730static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
731{
732 switch (chan->chan_type) {
733 case L2CAP_CHAN_RAW:
734 switch (chan->sec_level) {
735 case BT_SECURITY_HIGH:
736 case BT_SECURITY_FIPS:
737 return HCI_AT_DEDICATED_BONDING_MITM;
738 case BT_SECURITY_MEDIUM:
739 return HCI_AT_DEDICATED_BONDING;
740 default:
741 return HCI_AT_NO_BONDING;
742 }
743 break;
744 case L2CAP_CHAN_CONN_LESS:
745 if (chan->psm == cpu_to_le16(L2CAP_PSM_3DSP)) {
746 if (chan->sec_level == BT_SECURITY_LOW)
747 chan->sec_level = BT_SECURITY_SDP;
748 }
749 if (chan->sec_level == BT_SECURITY_HIGH ||
750 chan->sec_level == BT_SECURITY_FIPS)
751 return HCI_AT_NO_BONDING_MITM;
752 else
753 return HCI_AT_NO_BONDING;
754 break;
755 case L2CAP_CHAN_CONN_ORIENTED:
756 if (chan->psm == cpu_to_le16(L2CAP_PSM_SDP)) {
757 if (chan->sec_level == BT_SECURITY_LOW)
758 chan->sec_level = BT_SECURITY_SDP;
759
760 if (chan->sec_level == BT_SECURITY_HIGH ||
761 chan->sec_level == BT_SECURITY_FIPS)
762 return HCI_AT_NO_BONDING_MITM;
763 else
764 return HCI_AT_NO_BONDING;
765 }
766 /* fall through */
767 default:
768 switch (chan->sec_level) {
769 case BT_SECURITY_HIGH:
770 case BT_SECURITY_FIPS:
771 return HCI_AT_GENERAL_BONDING_MITM;
772 case BT_SECURITY_MEDIUM:
773 return HCI_AT_GENERAL_BONDING;
774 default:
775 return HCI_AT_NO_BONDING;
776 }
777 break;
778 }
779}
780
781/* Service level security */
782int l2cap_chan_check_security(struct l2cap_chan *chan, bool initiator)
783{
784 struct l2cap_conn *conn = chan->conn;
785 __u8 auth_type;
786
787 if (conn->hcon->type == LE_LINK)
788 return smp_conn_security(conn->hcon, chan->sec_level);
789
790 auth_type = l2cap_get_auth_type(chan);
791
792 return hci_conn_security(conn->hcon, chan->sec_level, auth_type,
793 initiator);
794}
795
796static u8 l2cap_get_ident(struct l2cap_conn *conn)
797{
798 u8 id;
799
800 /* Get next available identificator.
801 * 1 - 128 are used by kernel.
802 * 129 - 199 are reserved.
803 * 200 - 254 are used by utilities like l2ping, etc.
804 */
805
806 mutex_lock(&conn->ident_lock);
807
808 if (++conn->tx_ident > 128)
809 conn->tx_ident = 1;
810
811 id = conn->tx_ident;
812
813 mutex_unlock(&conn->ident_lock);
814
815 return id;
816}
817
818static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
819 void *data)
820{
821 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
822 u8 flags;
823
824 BT_DBG("code 0x%2.2x", code);
825
826 if (!skb)
827 return;
828
829 if (lmp_no_flush_capable(conn->hcon->hdev))
830 flags = ACL_START_NO_FLUSH;
831 else
832 flags = ACL_START;
833
834 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
835 skb->priority = HCI_PRIO_MAX;
836
837 hci_send_acl(conn->hchan, skb, flags);
838}
839
840static bool __chan_is_moving(struct l2cap_chan *chan)
841{
842 return chan->move_state != L2CAP_MOVE_STABLE &&
843 chan->move_state != L2CAP_MOVE_WAIT_PREPARE;
844}
845
846static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
847{
848 struct hci_conn *hcon = chan->conn->hcon;
849 u16 flags;
850
851 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
852 skb->priority);
853
854 if (chan->hs_hcon && !__chan_is_moving(chan)) {
855 if (chan->hs_hchan)
856 hci_send_acl(chan->hs_hchan, skb, ACL_COMPLETE);
857 else
858 kfree_skb(skb);
859
860 return;
861 }
862
863 if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
864 lmp_no_flush_capable(hcon->hdev))
865 flags = ACL_START_NO_FLUSH;
866 else
867 flags = ACL_START;
868
869 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
870 hci_send_acl(chan->conn->hchan, skb, flags);
871}
872
873static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
874{
875 control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
876 control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
877
878 if (enh & L2CAP_CTRL_FRAME_TYPE) {
879 /* S-Frame */
880 control->sframe = 1;
881 control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
882 control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
883
884 control->sar = 0;
885 control->txseq = 0;
886 } else {
887 /* I-Frame */
888 control->sframe = 0;
889 control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
890 control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
891
892 control->poll = 0;
893 control->super = 0;
894 }
895}
896
897static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
898{
899 control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
900 control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
901
902 if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
903 /* S-Frame */
904 control->sframe = 1;
905 control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
906 control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
907
908 control->sar = 0;
909 control->txseq = 0;
910 } else {
911 /* I-Frame */
912 control->sframe = 0;
913 control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
914 control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
915
916 control->poll = 0;
917 control->super = 0;
918 }
919}
920
921static inline void __unpack_control(struct l2cap_chan *chan,
922 struct sk_buff *skb)
923{
924 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
925 __unpack_extended_control(get_unaligned_le32(skb->data),
926 &bt_cb(skb)->control);
927 skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
928 } else {
929 __unpack_enhanced_control(get_unaligned_le16(skb->data),
930 &bt_cb(skb)->control);
931 skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
932 }
933}
934
935static u32 __pack_extended_control(struct l2cap_ctrl *control)
936{
937 u32 packed;
938
939 packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
940 packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
941
942 if (control->sframe) {
943 packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
944 packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
945 packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
946 } else {
947 packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
948 packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
949 }
950
951 return packed;
952}
953
954static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
955{
956 u16 packed;
957
958 packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
959 packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
960
961 if (control->sframe) {
962 packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
963 packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
964 packed |= L2CAP_CTRL_FRAME_TYPE;
965 } else {
966 packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
967 packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
968 }
969
970 return packed;
971}
972
973static inline void __pack_control(struct l2cap_chan *chan,
974 struct l2cap_ctrl *control,
975 struct sk_buff *skb)
976{
977 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
978 put_unaligned_le32(__pack_extended_control(control),
979 skb->data + L2CAP_HDR_SIZE);
980 } else {
981 put_unaligned_le16(__pack_enhanced_control(control),
982 skb->data + L2CAP_HDR_SIZE);
983 }
984}
985
986static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
987{
988 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
989 return L2CAP_EXT_HDR_SIZE;
990 else
991 return L2CAP_ENH_HDR_SIZE;
992}
993
994static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
995 u32 control)
996{
997 struct sk_buff *skb;
998 struct l2cap_hdr *lh;
999 int hlen = __ertm_hdr_size(chan);
1000
1001 if (chan->fcs == L2CAP_FCS_CRC16)
1002 hlen += L2CAP_FCS_SIZE;
1003
1004 skb = bt_skb_alloc(hlen, GFP_KERNEL);
1005
1006 if (!skb)
1007 return ERR_PTR(-ENOMEM);
1008
1009 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1010 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
1011 lh->cid = cpu_to_le16(chan->dcid);
1012
1013 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1014 put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
1015 else
1016 put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
1017
1018 if (chan->fcs == L2CAP_FCS_CRC16) {
1019 u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
1020 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1021 }
1022
1023 skb->priority = HCI_PRIO_MAX;
1024 return skb;
1025}
1026
1027static void l2cap_send_sframe(struct l2cap_chan *chan,
1028 struct l2cap_ctrl *control)
1029{
1030 struct sk_buff *skb;
1031 u32 control_field;
1032
1033 BT_DBG("chan %p, control %p", chan, control);
1034
1035 if (!control->sframe)
1036 return;
1037
1038 if (__chan_is_moving(chan))
1039 return;
1040
1041 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
1042 !control->poll)
1043 control->final = 1;
1044
1045 if (control->super == L2CAP_SUPER_RR)
1046 clear_bit(CONN_RNR_SENT, &chan->conn_state);
1047 else if (control->super == L2CAP_SUPER_RNR)
1048 set_bit(CONN_RNR_SENT, &chan->conn_state);
1049
1050 if (control->super != L2CAP_SUPER_SREJ) {
1051 chan->last_acked_seq = control->reqseq;
1052 __clear_ack_timer(chan);
1053 }
1054
1055 BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
1056 control->final, control->poll, control->super);
1057
1058 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1059 control_field = __pack_extended_control(control);
1060 else
1061 control_field = __pack_enhanced_control(control);
1062
1063 skb = l2cap_create_sframe_pdu(chan, control_field);
1064 if (!IS_ERR(skb))
1065 l2cap_do_send(chan, skb);
1066}
1067
1068static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
1069{
1070 struct l2cap_ctrl control;
1071
1072 BT_DBG("chan %p, poll %d", chan, poll);
1073
1074 memset(&control, 0, sizeof(control));
1075 control.sframe = 1;
1076 control.poll = poll;
1077
1078 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
1079 control.super = L2CAP_SUPER_RNR;
1080 else
1081 control.super = L2CAP_SUPER_RR;
1082
1083 control.reqseq = chan->buffer_seq;
1084 l2cap_send_sframe(chan, &control);
1085}
1086
1087static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
1088{
1089 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
1090 return true;
1091
1092 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
1093}
1094
1095static bool __amp_capable(struct l2cap_chan *chan)
1096{
1097 struct l2cap_conn *conn = chan->conn;
1098 struct hci_dev *hdev;
1099 bool amp_available = false;
1100
1101 if (!conn->hs_enabled)
1102 return false;
1103
1104 if (!(conn->fixed_chan_mask & L2CAP_FC_A2MP))
1105 return false;
1106
1107 read_lock(&hci_dev_list_lock);
1108 list_for_each_entry(hdev, &hci_dev_list, list) {
1109 if (hdev->amp_type != AMP_TYPE_BREDR &&
1110 test_bit(HCI_UP, &hdev->flags)) {
1111 amp_available = true;
1112 break;
1113 }
1114 }
1115 read_unlock(&hci_dev_list_lock);
1116
1117 if (chan->chan_policy == BT_CHANNEL_POLICY_AMP_PREFERRED)
1118 return amp_available;
1119
1120 return false;
1121}
1122
1123static bool l2cap_check_efs(struct l2cap_chan *chan)
1124{
1125 /* Check EFS parameters */
1126 return true;
1127}
1128
1129void l2cap_send_conn_req(struct l2cap_chan *chan)
1130{
1131 struct l2cap_conn *conn = chan->conn;
1132 struct l2cap_conn_req req;
1133
1134 req.scid = cpu_to_le16(chan->scid);
1135 req.psm = chan->psm;
1136
1137 chan->ident = l2cap_get_ident(conn);
1138
1139 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
1140
1141 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
1142}
1143
1144static void l2cap_send_create_chan_req(struct l2cap_chan *chan, u8 amp_id)
1145{
1146 struct l2cap_create_chan_req req;
1147 req.scid = cpu_to_le16(chan->scid);
1148 req.psm = chan->psm;
1149 req.amp_id = amp_id;
1150
1151 chan->ident = l2cap_get_ident(chan->conn);
1152
1153 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_REQ,
1154 sizeof(req), &req);
1155}
1156
1157static void l2cap_move_setup(struct l2cap_chan *chan)
1158{
1159 struct sk_buff *skb;
1160
1161 BT_DBG("chan %p", chan);
1162
1163 if (chan->mode != L2CAP_MODE_ERTM)
1164 return;
1165
1166 __clear_retrans_timer(chan);
1167 __clear_monitor_timer(chan);
1168 __clear_ack_timer(chan);
1169
1170 chan->retry_count = 0;
1171 skb_queue_walk(&chan->tx_q, skb) {
1172 if (bt_cb(skb)->control.retries)
1173 bt_cb(skb)->control.retries = 1;
1174 else
1175 break;
1176 }
1177
1178 chan->expected_tx_seq = chan->buffer_seq;
1179
1180 clear_bit(CONN_REJ_ACT, &chan->conn_state);
1181 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
1182 l2cap_seq_list_clear(&chan->retrans_list);
1183 l2cap_seq_list_clear(&chan->srej_list);
1184 skb_queue_purge(&chan->srej_q);
1185
1186 chan->tx_state = L2CAP_TX_STATE_XMIT;
1187 chan->rx_state = L2CAP_RX_STATE_MOVE;
1188
1189 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
1190}
1191
1192static void l2cap_move_done(struct l2cap_chan *chan)
1193{
1194 u8 move_role = chan->move_role;
1195 BT_DBG("chan %p", chan);
1196
1197 chan->move_state = L2CAP_MOVE_STABLE;
1198 chan->move_role = L2CAP_MOVE_ROLE_NONE;
1199
1200 if (chan->mode != L2CAP_MODE_ERTM)
1201 return;
1202
1203 switch (move_role) {
1204 case L2CAP_MOVE_ROLE_INITIATOR:
1205 l2cap_tx(chan, NULL, NULL, L2CAP_EV_EXPLICIT_POLL);
1206 chan->rx_state = L2CAP_RX_STATE_WAIT_F;
1207 break;
1208 case L2CAP_MOVE_ROLE_RESPONDER:
1209 chan->rx_state = L2CAP_RX_STATE_WAIT_P;
1210 break;
1211 }
1212}
1213
1214static void l2cap_chan_ready(struct l2cap_chan *chan)
1215{
1216 /* This clears all conf flags, including CONF_NOT_COMPLETE */
1217 chan->conf_state = 0;
1218 __clear_chan_timer(chan);
1219
1220 if (chan->mode == L2CAP_MODE_LE_FLOWCTL && !chan->tx_credits)
1221 chan->ops->suspend(chan);
1222
1223 chan->state = BT_CONNECTED;
1224
1225 chan->ops->ready(chan);
1226}
1227
1228static void l2cap_le_connect(struct l2cap_chan *chan)
1229{
1230 struct l2cap_conn *conn = chan->conn;
1231 struct l2cap_le_conn_req req;
1232
1233 if (test_and_set_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags))
1234 return;
1235
1236 req.psm = chan->psm;
1237 req.scid = cpu_to_le16(chan->scid);
1238 req.mtu = cpu_to_le16(chan->imtu);
1239 req.mps = cpu_to_le16(chan->mps);
1240 req.credits = cpu_to_le16(chan->rx_credits);
1241
1242 chan->ident = l2cap_get_ident(conn);
1243
1244 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_REQ,
1245 sizeof(req), &req);
1246}
1247
1248static void l2cap_le_start(struct l2cap_chan *chan)
1249{
1250 struct l2cap_conn *conn = chan->conn;
1251
1252 if (!smp_conn_security(conn->hcon, chan->sec_level))
1253 return;
1254
1255 if (!chan->psm) {
1256 l2cap_chan_ready(chan);
1257 return;
1258 }
1259
1260 if (chan->state == BT_CONNECT)
1261 l2cap_le_connect(chan);
1262}
1263
1264static void l2cap_start_connection(struct l2cap_chan *chan)
1265{
1266 if (__amp_capable(chan)) {
1267 BT_DBG("chan %p AMP capable: discover AMPs", chan);
1268 a2mp_discover_amp(chan);
1269 } else if (chan->conn->hcon->type == LE_LINK) {
1270 l2cap_le_start(chan);
1271 } else {
1272 l2cap_send_conn_req(chan);
1273 }
1274}
1275
1276static void l2cap_do_start(struct l2cap_chan *chan)
1277{
1278 struct l2cap_conn *conn = chan->conn;
1279
1280 if (conn->hcon->type == LE_LINK) {
1281 l2cap_le_start(chan);
1282 return;
1283 }
1284
1285 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
1286 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
1287 return;
1288
1289 if (l2cap_chan_check_security(chan, true) &&
1290 __l2cap_no_conn_pending(chan)) {
1291 l2cap_start_connection(chan);
1292 }
1293 } else {
1294 struct l2cap_info_req req;
1295 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
1296
1297 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1298 conn->info_ident = l2cap_get_ident(conn);
1299
1300 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1301
1302 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
1303 sizeof(req), &req);
1304 }
1305}
1306
1307static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1308{
1309 u32 local_feat_mask = l2cap_feat_mask;
1310 if (!disable_ertm)
1311 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1312
1313 switch (mode) {
1314 case L2CAP_MODE_ERTM:
1315 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1316 case L2CAP_MODE_STREAMING:
1317 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1318 default:
1319 return 0x00;
1320 }
1321}
1322
1323static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err)
1324{
1325 struct l2cap_conn *conn = chan->conn;
1326 struct l2cap_disconn_req req;
1327
1328 if (!conn)
1329 return;
1330
1331 if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) {
1332 __clear_retrans_timer(chan);
1333 __clear_monitor_timer(chan);
1334 __clear_ack_timer(chan);
1335 }
1336
1337 if (chan->scid == L2CAP_CID_A2MP) {
1338 l2cap_state_change(chan, BT_DISCONN);
1339 return;
1340 }
1341
1342 req.dcid = cpu_to_le16(chan->dcid);
1343 req.scid = cpu_to_le16(chan->scid);
1344 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
1345 sizeof(req), &req);
1346
1347 l2cap_state_change_and_error(chan, BT_DISCONN, err);
1348}
1349
1350/* ---- L2CAP connections ---- */
1351static void l2cap_conn_start(struct l2cap_conn *conn)
1352{
1353 struct l2cap_chan *chan, *tmp;
1354
1355 BT_DBG("conn %p", conn);
1356
1357 mutex_lock(&conn->chan_lock);
1358
1359 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1360 l2cap_chan_lock(chan);
1361
1362 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1363 l2cap_chan_unlock(chan);
1364 continue;
1365 }
1366
1367 if (chan->state == BT_CONNECT) {
1368 if (!l2cap_chan_check_security(chan, true) ||
1369 !__l2cap_no_conn_pending(chan)) {
1370 l2cap_chan_unlock(chan);
1371 continue;
1372 }
1373
1374 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1375 && test_bit(CONF_STATE2_DEVICE,
1376 &chan->conf_state)) {
1377 l2cap_chan_close(chan, ECONNRESET);
1378 l2cap_chan_unlock(chan);
1379 continue;
1380 }
1381
1382 l2cap_start_connection(chan);
1383
1384 } else if (chan->state == BT_CONNECT2) {
1385 struct l2cap_conn_rsp rsp;
1386 char buf[128];
1387 rsp.scid = cpu_to_le16(chan->dcid);
1388 rsp.dcid = cpu_to_le16(chan->scid);
1389
1390 if (l2cap_chan_check_security(chan, false)) {
1391 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
1392 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1393 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1394 chan->ops->defer(chan);
1395
1396 } else {
1397 l2cap_state_change(chan, BT_CONFIG);
1398 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1399 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1400 }
1401 } else {
1402 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1403 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1404 }
1405
1406 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1407 sizeof(rsp), &rsp);
1408
1409 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1410 rsp.result != L2CAP_CR_SUCCESS) {
1411 l2cap_chan_unlock(chan);
1412 continue;
1413 }
1414
1415 set_bit(CONF_REQ_SENT, &chan->conf_state);
1416 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1417 l2cap_build_conf_req(chan, buf), buf);
1418 chan->num_conf_req++;
1419 }
1420
1421 l2cap_chan_unlock(chan);
1422 }
1423
1424 mutex_unlock(&conn->chan_lock);
1425}
1426
1427static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1428{
1429 struct hci_conn *hcon = conn->hcon;
1430 struct hci_dev *hdev = hcon->hdev;
1431
1432 BT_DBG("%s conn %p", hdev->name, conn);
1433
1434 /* For outgoing pairing which doesn't necessarily have an
1435 * associated socket (e.g. mgmt_pair_device).
1436 */
1437 if (hcon->out)
1438 smp_conn_security(hcon, hcon->pending_sec_level);
1439
1440 /* For LE slave connections, make sure the connection interval
1441 * is in the range of the minium and maximum interval that has
1442 * been configured for this connection. If not, then trigger
1443 * the connection update procedure.
1444 */
1445 if (hcon->role == HCI_ROLE_SLAVE &&
1446 (hcon->le_conn_interval < hcon->le_conn_min_interval ||
1447 hcon->le_conn_interval > hcon->le_conn_max_interval)) {
1448 struct l2cap_conn_param_update_req req;
1449
1450 req.min = cpu_to_le16(hcon->le_conn_min_interval);
1451 req.max = cpu_to_le16(hcon->le_conn_max_interval);
1452 req.latency = cpu_to_le16(hcon->le_conn_latency);
1453 req.to_multiplier = cpu_to_le16(hcon->le_supv_timeout);
1454
1455 l2cap_send_cmd(conn, l2cap_get_ident(conn),
1456 L2CAP_CONN_PARAM_UPDATE_REQ, sizeof(req), &req);
1457 }
1458}
1459
1460static void l2cap_conn_ready(struct l2cap_conn *conn)
1461{
1462 struct l2cap_chan *chan;
1463 struct hci_conn *hcon = conn->hcon;
1464
1465 BT_DBG("conn %p", conn);
1466
1467 mutex_lock(&conn->chan_lock);
1468
1469 list_for_each_entry(chan, &conn->chan_l, list) {
1470
1471 l2cap_chan_lock(chan);
1472
1473 if (chan->scid == L2CAP_CID_A2MP) {
1474 l2cap_chan_unlock(chan);
1475 continue;
1476 }
1477
1478 if (hcon->type == LE_LINK) {
1479 l2cap_le_start(chan);
1480 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1481 l2cap_chan_ready(chan);
1482
1483 } else if (chan->state == BT_CONNECT) {
1484 l2cap_do_start(chan);
1485 }
1486
1487 l2cap_chan_unlock(chan);
1488 }
1489
1490 mutex_unlock(&conn->chan_lock);
1491
1492 if (hcon->type == LE_LINK)
1493 l2cap_le_conn_ready(conn);
1494
1495 queue_work(hcon->hdev->workqueue, &conn->pending_rx_work);
1496}
1497
1498/* Notify sockets that we cannot guaranty reliability anymore */
1499static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1500{
1501 struct l2cap_chan *chan;
1502
1503 BT_DBG("conn %p", conn);
1504
1505 mutex_lock(&conn->chan_lock);
1506
1507 list_for_each_entry(chan, &conn->chan_l, list) {
1508 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1509 l2cap_chan_set_err(chan, err);
1510 }
1511
1512 mutex_unlock(&conn->chan_lock);
1513}
1514
1515static void l2cap_info_timeout(struct work_struct *work)
1516{
1517 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1518 info_timer.work);
1519
1520 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1521 conn->info_ident = 0;
1522
1523 l2cap_conn_start(conn);
1524}
1525
1526/*
1527 * l2cap_user
1528 * External modules can register l2cap_user objects on l2cap_conn. The ->probe
1529 * callback is called during registration. The ->remove callback is called
1530 * during unregistration.
1531 * An l2cap_user object can either be explicitly unregistered or when the
1532 * underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon,
1533 * l2cap->hchan, .. are valid as long as the remove callback hasn't been called.
1534 * External modules must own a reference to the l2cap_conn object if they intend
1535 * to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at
1536 * any time if they don't.
1537 */
1538
1539int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user)
1540{
1541 struct hci_dev *hdev = conn->hcon->hdev;
1542 int ret;
1543
1544 /* We need to check whether l2cap_conn is registered. If it is not, we
1545 * must not register the l2cap_user. l2cap_conn_del() is unregisters
1546 * l2cap_conn objects, but doesn't provide its own locking. Instead, it
1547 * relies on the parent hci_conn object to be locked. This itself relies
1548 * on the hci_dev object to be locked. So we must lock the hci device
1549 * here, too. */
1550
1551 hci_dev_lock(hdev);
1552
1553 if (user->list.next || user->list.prev) {
1554 ret = -EINVAL;
1555 goto out_unlock;
1556 }
1557
1558 /* conn->hchan is NULL after l2cap_conn_del() was called */
1559 if (!conn->hchan) {
1560 ret = -ENODEV;
1561 goto out_unlock;
1562 }
1563
1564 ret = user->probe(conn, user);
1565 if (ret)
1566 goto out_unlock;
1567
1568 list_add(&user->list, &conn->users);
1569 ret = 0;
1570
1571out_unlock:
1572 hci_dev_unlock(hdev);
1573 return ret;
1574}
1575EXPORT_SYMBOL(l2cap_register_user);
1576
1577void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user)
1578{
1579 struct hci_dev *hdev = conn->hcon->hdev;
1580
1581 hci_dev_lock(hdev);
1582
1583 if (!user->list.next || !user->list.prev)
1584 goto out_unlock;
1585
1586 list_del(&user->list);
1587 user->list.next = NULL;
1588 user->list.prev = NULL;
1589 user->remove(conn, user);
1590
1591out_unlock:
1592 hci_dev_unlock(hdev);
1593}
1594EXPORT_SYMBOL(l2cap_unregister_user);
1595
1596static void l2cap_unregister_all_users(struct l2cap_conn *conn)
1597{
1598 struct l2cap_user *user;
1599
1600 while (!list_empty(&conn->users)) {
1601 user = list_first_entry(&conn->users, struct l2cap_user, list);
1602 list_del(&user->list);
1603 user->list.next = NULL;
1604 user->list.prev = NULL;
1605 user->remove(conn, user);
1606 }
1607}
1608
1609static void l2cap_conn_del(struct hci_conn *hcon, int err)
1610{
1611 struct l2cap_conn *conn = hcon->l2cap_data;
1612 struct l2cap_chan *chan, *l;
1613
1614 if (!conn)
1615 return;
1616
1617 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1618
1619 kfree_skb(conn->rx_skb);
1620
1621 skb_queue_purge(&conn->pending_rx);
1622
1623 /* We can not call flush_work(&conn->pending_rx_work) here since we
1624 * might block if we are running on a worker from the same workqueue
1625 * pending_rx_work is waiting on.
1626 */
1627 if (work_pending(&conn->pending_rx_work))
1628 cancel_work_sync(&conn->pending_rx_work);
1629
1630 if (work_pending(&conn->disconn_work))
1631 cancel_work_sync(&conn->disconn_work);
1632
1633 l2cap_unregister_all_users(conn);
1634
1635 mutex_lock(&conn->chan_lock);
1636
1637 /* Kill channels */
1638 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1639 l2cap_chan_hold(chan);
1640 l2cap_chan_lock(chan);
1641
1642 l2cap_chan_del(chan, err);
1643
1644 l2cap_chan_unlock(chan);
1645
1646 chan->ops->close(chan);
1647 l2cap_chan_put(chan);
1648 }
1649
1650 mutex_unlock(&conn->chan_lock);
1651
1652 hci_chan_del(conn->hchan);
1653
1654 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1655 cancel_delayed_work_sync(&conn->info_timer);
1656
1657 hcon->l2cap_data = NULL;
1658 conn->hchan = NULL;
1659 l2cap_conn_put(conn);
1660}
1661
1662static void disconn_work(struct work_struct *work)
1663{
1664 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1665 disconn_work);
1666
1667 BT_DBG("conn %p", conn);
1668
1669 l2cap_conn_del(conn->hcon, conn->disconn_err);
1670}
1671
1672void l2cap_conn_shutdown(struct l2cap_conn *conn, int err)
1673{
1674 struct hci_dev *hdev = conn->hcon->hdev;
1675
1676 BT_DBG("conn %p err %d", conn, err);
1677
1678 conn->disconn_err = err;
1679 queue_work(hdev->workqueue, &conn->disconn_work);
1680}
1681
1682static void l2cap_conn_free(struct kref *ref)
1683{
1684 struct l2cap_conn *conn = container_of(ref, struct l2cap_conn, ref);
1685
1686 hci_conn_put(conn->hcon);
1687 kfree(conn);
1688}
1689
1690void l2cap_conn_get(struct l2cap_conn *conn)
1691{
1692 kref_get(&conn->ref);
1693}
1694EXPORT_SYMBOL(l2cap_conn_get);
1695
1696void l2cap_conn_put(struct l2cap_conn *conn)
1697{
1698 kref_put(&conn->ref, l2cap_conn_free);
1699}
1700EXPORT_SYMBOL(l2cap_conn_put);
1701
1702/* ---- Socket interface ---- */
1703
1704/* Find socket with psm and source / destination bdaddr.
1705 * Returns closest match.
1706 */
1707static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1708 bdaddr_t *src,
1709 bdaddr_t *dst,
1710 u8 link_type)
1711{
1712 struct l2cap_chan *c, *c1 = NULL;
1713
1714 read_lock(&chan_list_lock);
1715
1716 list_for_each_entry(c, &chan_list, global_l) {
1717 if (state && c->state != state)
1718 continue;
1719
1720 if (link_type == ACL_LINK && c->src_type != BDADDR_BREDR)
1721 continue;
1722
1723 if (link_type == LE_LINK && c->src_type == BDADDR_BREDR)
1724 continue;
1725
1726 if (c->psm == psm) {
1727 int src_match, dst_match;
1728 int src_any, dst_any;
1729
1730 /* Exact match. */
1731 src_match = !bacmp(&c->src, src);
1732 dst_match = !bacmp(&c->dst, dst);
1733 if (src_match && dst_match) {
1734 l2cap_chan_hold(c);
1735 read_unlock(&chan_list_lock);
1736 return c;
1737 }
1738
1739 /* Closest match */
1740 src_any = !bacmp(&c->src, BDADDR_ANY);
1741 dst_any = !bacmp(&c->dst, BDADDR_ANY);
1742 if ((src_match && dst_any) || (src_any && dst_match) ||
1743 (src_any && dst_any))
1744 c1 = c;
1745 }
1746 }
1747
1748 if (c1)
1749 l2cap_chan_hold(c1);
1750
1751 read_unlock(&chan_list_lock);
1752
1753 return c1;
1754}
1755
1756static void l2cap_monitor_timeout(struct work_struct *work)
1757{
1758 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1759 monitor_timer.work);
1760
1761 BT_DBG("chan %p", chan);
1762
1763 l2cap_chan_lock(chan);
1764
1765 if (!chan->conn) {
1766 l2cap_chan_unlock(chan);
1767 l2cap_chan_put(chan);
1768 return;
1769 }
1770
1771 l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
1772
1773 l2cap_chan_unlock(chan);
1774 l2cap_chan_put(chan);
1775}
1776
1777static void l2cap_retrans_timeout(struct work_struct *work)
1778{
1779 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1780 retrans_timer.work);
1781
1782 BT_DBG("chan %p", chan);
1783
1784 l2cap_chan_lock(chan);
1785
1786 if (!chan->conn) {
1787 l2cap_chan_unlock(chan);
1788 l2cap_chan_put(chan);
1789 return;
1790 }
1791
1792 l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
1793 l2cap_chan_unlock(chan);
1794 l2cap_chan_put(chan);
1795}
1796
1797static void l2cap_streaming_send(struct l2cap_chan *chan,
1798 struct sk_buff_head *skbs)
1799{
1800 struct sk_buff *skb;
1801 struct l2cap_ctrl *control;
1802
1803 BT_DBG("chan %p, skbs %p", chan, skbs);
1804
1805 if (__chan_is_moving(chan))
1806 return;
1807
1808 skb_queue_splice_tail_init(skbs, &chan->tx_q);
1809
1810 while (!skb_queue_empty(&chan->tx_q)) {
1811
1812 skb = skb_dequeue(&chan->tx_q);
1813
1814 bt_cb(skb)->control.retries = 1;
1815 control = &bt_cb(skb)->control;
1816
1817 control->reqseq = 0;
1818 control->txseq = chan->next_tx_seq;
1819
1820 __pack_control(chan, control, skb);
1821
1822 if (chan->fcs == L2CAP_FCS_CRC16) {
1823 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1824 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1825 }
1826
1827 l2cap_do_send(chan, skb);
1828
1829 BT_DBG("Sent txseq %u", control->txseq);
1830
1831 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1832 chan->frames_sent++;
1833 }
1834}
1835
1836static int l2cap_ertm_send(struct l2cap_chan *chan)
1837{
1838 struct sk_buff *skb, *tx_skb;
1839 struct l2cap_ctrl *control;
1840 int sent = 0;
1841
1842 BT_DBG("chan %p", chan);
1843
1844 if (chan->state != BT_CONNECTED)
1845 return -ENOTCONN;
1846
1847 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1848 return 0;
1849
1850 if (__chan_is_moving(chan))
1851 return 0;
1852
1853 while (chan->tx_send_head &&
1854 chan->unacked_frames < chan->remote_tx_win &&
1855 chan->tx_state == L2CAP_TX_STATE_XMIT) {
1856
1857 skb = chan->tx_send_head;
1858
1859 bt_cb(skb)->control.retries = 1;
1860 control = &bt_cb(skb)->control;
1861
1862 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1863 control->final = 1;
1864
1865 control->reqseq = chan->buffer_seq;
1866 chan->last_acked_seq = chan->buffer_seq;
1867 control->txseq = chan->next_tx_seq;
1868
1869 __pack_control(chan, control, skb);
1870
1871 if (chan->fcs == L2CAP_FCS_CRC16) {
1872 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1873 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1874 }
1875
1876 /* Clone after data has been modified. Data is assumed to be
1877 read-only (for locking purposes) on cloned sk_buffs.
1878 */
1879 tx_skb = skb_clone(skb, GFP_KERNEL);
1880
1881 if (!tx_skb)
1882 break;
1883
1884 __set_retrans_timer(chan);
1885
1886 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1887 chan->unacked_frames++;
1888 chan->frames_sent++;
1889 sent++;
1890
1891 if (skb_queue_is_last(&chan->tx_q, skb))
1892 chan->tx_send_head = NULL;
1893 else
1894 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1895
1896 l2cap_do_send(chan, tx_skb);
1897 BT_DBG("Sent txseq %u", control->txseq);
1898 }
1899
1900 BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
1901 chan->unacked_frames, skb_queue_len(&chan->tx_q));
1902
1903 return sent;
1904}
1905
1906static void l2cap_ertm_resend(struct l2cap_chan *chan)
1907{
1908 struct l2cap_ctrl control;
1909 struct sk_buff *skb;
1910 struct sk_buff *tx_skb;
1911 u16 seq;
1912
1913 BT_DBG("chan %p", chan);
1914
1915 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1916 return;
1917
1918 if (__chan_is_moving(chan))
1919 return;
1920
1921 while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
1922 seq = l2cap_seq_list_pop(&chan->retrans_list);
1923
1924 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
1925 if (!skb) {
1926 BT_DBG("Error: Can't retransmit seq %d, frame missing",
1927 seq);
1928 continue;
1929 }
1930
1931 bt_cb(skb)->control.retries++;
1932 control = bt_cb(skb)->control;
1933
1934 if (chan->max_tx != 0 &&
1935 bt_cb(skb)->control.retries > chan->max_tx) {
1936 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
1937 l2cap_send_disconn_req(chan, ECONNRESET);
1938 l2cap_seq_list_clear(&chan->retrans_list);
1939 break;
1940 }
1941
1942 control.reqseq = chan->buffer_seq;
1943 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1944 control.final = 1;
1945 else
1946 control.final = 0;
1947
1948 if (skb_cloned(skb)) {
1949 /* Cloned sk_buffs are read-only, so we need a
1950 * writeable copy
1951 */
1952 tx_skb = skb_copy(skb, GFP_KERNEL);
1953 } else {
1954 tx_skb = skb_clone(skb, GFP_KERNEL);
1955 }
1956
1957 if (!tx_skb) {
1958 l2cap_seq_list_clear(&chan->retrans_list);
1959 break;
1960 }
1961
1962 /* Update skb contents */
1963 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1964 put_unaligned_le32(__pack_extended_control(&control),
1965 tx_skb->data + L2CAP_HDR_SIZE);
1966 } else {
1967 put_unaligned_le16(__pack_enhanced_control(&control),
1968 tx_skb->data + L2CAP_HDR_SIZE);
1969 }
1970
1971 /* Update FCS */
1972 if (chan->fcs == L2CAP_FCS_CRC16) {
1973 u16 fcs = crc16(0, (u8 *) tx_skb->data,
1974 tx_skb->len - L2CAP_FCS_SIZE);
1975 put_unaligned_le16(fcs, skb_tail_pointer(tx_skb) -
1976 L2CAP_FCS_SIZE);
1977 }
1978
1979 l2cap_do_send(chan, tx_skb);
1980
1981 BT_DBG("Resent txseq %d", control.txseq);
1982
1983 chan->last_acked_seq = chan->buffer_seq;
1984 }
1985}
1986
1987static void l2cap_retransmit(struct l2cap_chan *chan,
1988 struct l2cap_ctrl *control)
1989{
1990 BT_DBG("chan %p, control %p", chan, control);
1991
1992 l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
1993 l2cap_ertm_resend(chan);
1994}
1995
1996static void l2cap_retransmit_all(struct l2cap_chan *chan,
1997 struct l2cap_ctrl *control)
1998{
1999 struct sk_buff *skb;
2000
2001 BT_DBG("chan %p, control %p", chan, control);
2002
2003 if (control->poll)
2004 set_bit(CONN_SEND_FBIT, &chan->conn_state);
2005
2006 l2cap_seq_list_clear(&chan->retrans_list);
2007
2008 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2009 return;
2010
2011 if (chan->unacked_frames) {
2012 skb_queue_walk(&chan->tx_q, skb) {
2013 if (bt_cb(skb)->control.txseq == control->reqseq ||
2014 skb == chan->tx_send_head)
2015 break;
2016 }
2017
2018 skb_queue_walk_from(&chan->tx_q, skb) {
2019 if (skb == chan->tx_send_head)
2020 break;
2021
2022 l2cap_seq_list_append(&chan->retrans_list,
2023 bt_cb(skb)->control.txseq);
2024 }
2025
2026 l2cap_ertm_resend(chan);
2027 }
2028}
2029
2030static void l2cap_send_ack(struct l2cap_chan *chan)
2031{
2032 struct l2cap_ctrl control;
2033 u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2034 chan->last_acked_seq);
2035 int threshold;
2036
2037 BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2038 chan, chan->last_acked_seq, chan->buffer_seq);
2039
2040 memset(&control, 0, sizeof(control));
2041 control.sframe = 1;
2042
2043 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
2044 chan->rx_state == L2CAP_RX_STATE_RECV) {
2045 __clear_ack_timer(chan);
2046 control.super = L2CAP_SUPER_RNR;
2047 control.reqseq = chan->buffer_seq;
2048 l2cap_send_sframe(chan, &control);
2049 } else {
2050 if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
2051 l2cap_ertm_send(chan);
2052 /* If any i-frames were sent, they included an ack */
2053 if (chan->buffer_seq == chan->last_acked_seq)
2054 frames_to_ack = 0;
2055 }
2056
2057 /* Ack now if the window is 3/4ths full.
2058 * Calculate without mul or div
2059 */
2060 threshold = chan->ack_win;
2061 threshold += threshold << 1;
2062 threshold >>= 2;
2063
2064 BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
2065 threshold);
2066
2067 if (frames_to_ack >= threshold) {
2068 __clear_ack_timer(chan);
2069 control.super = L2CAP_SUPER_RR;
2070 control.reqseq = chan->buffer_seq;
2071 l2cap_send_sframe(chan, &control);
2072 frames_to_ack = 0;
2073 }
2074
2075 if (frames_to_ack)
2076 __set_ack_timer(chan);
2077 }
2078}
2079
2080static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
2081 struct msghdr *msg, int len,
2082 int count, struct sk_buff *skb)
2083{
2084 struct l2cap_conn *conn = chan->conn;
2085 struct sk_buff **frag;
2086 int sent = 0;
2087
2088 if (chan->ops->memcpy_fromiovec(chan, skb_put(skb, count),
2089 msg->msg_iov, count))
2090 return -EFAULT;
2091
2092 sent += count;
2093 len -= count;
2094
2095 /* Continuation fragments (no L2CAP header) */
2096 frag = &skb_shinfo(skb)->frag_list;
2097 while (len) {
2098 struct sk_buff *tmp;
2099
2100 count = min_t(unsigned int, conn->mtu, len);
2101
2102 tmp = chan->ops->alloc_skb(chan, 0, count,
2103 msg->msg_flags & MSG_DONTWAIT);
2104 if (IS_ERR(tmp))
2105 return PTR_ERR(tmp);
2106
2107 *frag = tmp;
2108
2109 if (chan->ops->memcpy_fromiovec(chan, skb_put(*frag, count),
2110 msg->msg_iov, count))
2111 return -EFAULT;
2112
2113 sent += count;
2114 len -= count;
2115
2116 skb->len += (*frag)->len;
2117 skb->data_len += (*frag)->len;
2118
2119 frag = &(*frag)->next;
2120 }
2121
2122 return sent;
2123}
2124
2125static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
2126 struct msghdr *msg, size_t len)
2127{
2128 struct l2cap_conn *conn = chan->conn;
2129 struct sk_buff *skb;
2130 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
2131 struct l2cap_hdr *lh;
2132
2133 BT_DBG("chan %p psm 0x%2.2x len %zu", chan,
2134 __le16_to_cpu(chan->psm), len);
2135
2136 count = min_t(unsigned int, (conn->mtu - hlen), len);
2137
2138 skb = chan->ops->alloc_skb(chan, hlen, count,
2139 msg->msg_flags & MSG_DONTWAIT);
2140 if (IS_ERR(skb))
2141 return skb;
2142
2143 /* Create L2CAP header */
2144 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2145 lh->cid = cpu_to_le16(chan->dcid);
2146 lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
2147 put_unaligned(chan->psm, (__le16 *) skb_put(skb, L2CAP_PSMLEN_SIZE));
2148
2149 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2150 if (unlikely(err < 0)) {
2151 kfree_skb(skb);
2152 return ERR_PTR(err);
2153 }
2154 return skb;
2155}
2156
2157static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2158 struct msghdr *msg, size_t len)
2159{
2160 struct l2cap_conn *conn = chan->conn;
2161 struct sk_buff *skb;
2162 int err, count;
2163 struct l2cap_hdr *lh;
2164
2165 BT_DBG("chan %p len %zu", chan, len);
2166
2167 count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2168
2169 skb = chan->ops->alloc_skb(chan, L2CAP_HDR_SIZE, count,
2170 msg->msg_flags & MSG_DONTWAIT);
2171 if (IS_ERR(skb))
2172 return skb;
2173
2174 /* Create L2CAP header */
2175 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2176 lh->cid = cpu_to_le16(chan->dcid);
2177 lh->len = cpu_to_le16(len);
2178
2179 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2180 if (unlikely(err < 0)) {
2181 kfree_skb(skb);
2182 return ERR_PTR(err);
2183 }
2184 return skb;
2185}
2186
2187static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2188 struct msghdr *msg, size_t len,
2189 u16 sdulen)
2190{
2191 struct l2cap_conn *conn = chan->conn;
2192 struct sk_buff *skb;
2193 int err, count, hlen;
2194 struct l2cap_hdr *lh;
2195
2196 BT_DBG("chan %p len %zu", chan, len);
2197
2198 if (!conn)
2199 return ERR_PTR(-ENOTCONN);
2200
2201 hlen = __ertm_hdr_size(chan);
2202
2203 if (sdulen)
2204 hlen += L2CAP_SDULEN_SIZE;
2205
2206 if (chan->fcs == L2CAP_FCS_CRC16)
2207 hlen += L2CAP_FCS_SIZE;
2208
2209 count = min_t(unsigned int, (conn->mtu - hlen), len);
2210
2211 skb = chan->ops->alloc_skb(chan, hlen, count,
2212 msg->msg_flags & MSG_DONTWAIT);
2213 if (IS_ERR(skb))
2214 return skb;
2215
2216 /* Create L2CAP header */
2217 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2218 lh->cid = cpu_to_le16(chan->dcid);
2219 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2220
2221 /* Control header is populated later */
2222 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2223 put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2224 else
2225 put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2226
2227 if (sdulen)
2228 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2229
2230 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2231 if (unlikely(err < 0)) {
2232 kfree_skb(skb);
2233 return ERR_PTR(err);
2234 }
2235
2236 bt_cb(skb)->control.fcs = chan->fcs;
2237 bt_cb(skb)->control.retries = 0;
2238 return skb;
2239}
2240
2241static int l2cap_segment_sdu(struct l2cap_chan *chan,
2242 struct sk_buff_head *seg_queue,
2243 struct msghdr *msg, size_t len)
2244{
2245 struct sk_buff *skb;
2246 u16 sdu_len;
2247 size_t pdu_len;
2248 u8 sar;
2249
2250 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2251
2252 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2253 * so fragmented skbs are not used. The HCI layer's handling
2254 * of fragmented skbs is not compatible with ERTM's queueing.
2255 */
2256
2257 /* PDU size is derived from the HCI MTU */
2258 pdu_len = chan->conn->mtu;
2259
2260 /* Constrain PDU size for BR/EDR connections */
2261 if (!chan->hs_hcon)
2262 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2263
2264 /* Adjust for largest possible L2CAP overhead. */
2265 if (chan->fcs)
2266 pdu_len -= L2CAP_FCS_SIZE;
2267
2268 pdu_len -= __ertm_hdr_size(chan);
2269
2270 /* Remote device may have requested smaller PDUs */
2271 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2272
2273 if (len <= pdu_len) {
2274 sar = L2CAP_SAR_UNSEGMENTED;
2275 sdu_len = 0;
2276 pdu_len = len;
2277 } else {
2278 sar = L2CAP_SAR_START;
2279 sdu_len = len;
2280 }
2281
2282 while (len > 0) {
2283 skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2284
2285 if (IS_ERR(skb)) {
2286 __skb_queue_purge(seg_queue);
2287 return PTR_ERR(skb);
2288 }
2289
2290 bt_cb(skb)->control.sar = sar;
2291 __skb_queue_tail(seg_queue, skb);
2292
2293 len -= pdu_len;
2294 if (sdu_len)
2295 sdu_len = 0;
2296
2297 if (len <= pdu_len) {
2298 sar = L2CAP_SAR_END;
2299 pdu_len = len;
2300 } else {
2301 sar = L2CAP_SAR_CONTINUE;
2302 }
2303 }
2304
2305 return 0;
2306}
2307
2308static struct sk_buff *l2cap_create_le_flowctl_pdu(struct l2cap_chan *chan,
2309 struct msghdr *msg,
2310 size_t len, u16 sdulen)
2311{
2312 struct l2cap_conn *conn = chan->conn;
2313 struct sk_buff *skb;
2314 int err, count, hlen;
2315 struct l2cap_hdr *lh;
2316
2317 BT_DBG("chan %p len %zu", chan, len);
2318
2319 if (!conn)
2320 return ERR_PTR(-ENOTCONN);
2321
2322 hlen = L2CAP_HDR_SIZE;
2323
2324 if (sdulen)
2325 hlen += L2CAP_SDULEN_SIZE;
2326
2327 count = min_t(unsigned int, (conn->mtu - hlen), len);
2328
2329 skb = chan->ops->alloc_skb(chan, hlen, count,
2330 msg->msg_flags & MSG_DONTWAIT);
2331 if (IS_ERR(skb))
2332 return skb;
2333
2334 /* Create L2CAP header */
2335 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2336 lh->cid = cpu_to_le16(chan->dcid);
2337 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2338
2339 if (sdulen)
2340 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2341
2342 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2343 if (unlikely(err < 0)) {
2344 kfree_skb(skb);
2345 return ERR_PTR(err);
2346 }
2347
2348 return skb;
2349}
2350
2351static int l2cap_segment_le_sdu(struct l2cap_chan *chan,
2352 struct sk_buff_head *seg_queue,
2353 struct msghdr *msg, size_t len)
2354{
2355 struct sk_buff *skb;
2356 size_t pdu_len;
2357 u16 sdu_len;
2358
2359 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2360
2361 pdu_len = chan->conn->mtu - L2CAP_HDR_SIZE;
2362
2363 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2364
2365 sdu_len = len;
2366 pdu_len -= L2CAP_SDULEN_SIZE;
2367
2368 while (len > 0) {
2369 if (len <= pdu_len)
2370 pdu_len = len;
2371
2372 skb = l2cap_create_le_flowctl_pdu(chan, msg, pdu_len, sdu_len);
2373 if (IS_ERR(skb)) {
2374 __skb_queue_purge(seg_queue);
2375 return PTR_ERR(skb);
2376 }
2377
2378 __skb_queue_tail(seg_queue, skb);
2379
2380 len -= pdu_len;
2381
2382 if (sdu_len) {
2383 sdu_len = 0;
2384 pdu_len += L2CAP_SDULEN_SIZE;
2385 }
2386 }
2387
2388 return 0;
2389}
2390
2391int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
2392{
2393 struct sk_buff *skb;
2394 int err;
2395 struct sk_buff_head seg_queue;
2396
2397 if (!chan->conn)
2398 return -ENOTCONN;
2399
2400 /* Connectionless channel */
2401 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2402 skb = l2cap_create_connless_pdu(chan, msg, len);
2403 if (IS_ERR(skb))
2404 return PTR_ERR(skb);
2405
2406 /* Channel lock is released before requesting new skb and then
2407 * reacquired thus we need to recheck channel state.
2408 */
2409 if (chan->state != BT_CONNECTED) {
2410 kfree_skb(skb);
2411 return -ENOTCONN;
2412 }
2413
2414 l2cap_do_send(chan, skb);
2415 return len;
2416 }
2417
2418 switch (chan->mode) {
2419 case L2CAP_MODE_LE_FLOWCTL:
2420 /* Check outgoing MTU */
2421 if (len > chan->omtu)
2422 return -EMSGSIZE;
2423
2424 if (!chan->tx_credits)
2425 return -EAGAIN;
2426
2427 __skb_queue_head_init(&seg_queue);
2428
2429 err = l2cap_segment_le_sdu(chan, &seg_queue, msg, len);
2430
2431 if (chan->state != BT_CONNECTED) {
2432 __skb_queue_purge(&seg_queue);
2433 err = -ENOTCONN;
2434 }
2435
2436 if (err)
2437 return err;
2438
2439 skb_queue_splice_tail_init(&seg_queue, &chan->tx_q);
2440
2441 while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
2442 l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
2443 chan->tx_credits--;
2444 }
2445
2446 if (!chan->tx_credits)
2447 chan->ops->suspend(chan);
2448
2449 err = len;
2450
2451 break;
2452
2453 case L2CAP_MODE_BASIC:
2454 /* Check outgoing MTU */
2455 if (len > chan->omtu)
2456 return -EMSGSIZE;
2457
2458 /* Create a basic PDU */
2459 skb = l2cap_create_basic_pdu(chan, msg, len);
2460 if (IS_ERR(skb))
2461 return PTR_ERR(skb);
2462
2463 /* Channel lock is released before requesting new skb and then
2464 * reacquired thus we need to recheck channel state.
2465 */
2466 if (chan->state != BT_CONNECTED) {
2467 kfree_skb(skb);
2468 return -ENOTCONN;
2469 }
2470
2471 l2cap_do_send(chan, skb);
2472 err = len;
2473 break;
2474
2475 case L2CAP_MODE_ERTM:
2476 case L2CAP_MODE_STREAMING:
2477 /* Check outgoing MTU */
2478 if (len > chan->omtu) {
2479 err = -EMSGSIZE;
2480 break;
2481 }
2482
2483 __skb_queue_head_init(&seg_queue);
2484
2485 /* Do segmentation before calling in to the state machine,
2486 * since it's possible to block while waiting for memory
2487 * allocation.
2488 */
2489 err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2490
2491 /* The channel could have been closed while segmenting,
2492 * check that it is still connected.
2493 */
2494 if (chan->state != BT_CONNECTED) {
2495 __skb_queue_purge(&seg_queue);
2496 err = -ENOTCONN;
2497 }
2498
2499 if (err)
2500 break;
2501
2502 if (chan->mode == L2CAP_MODE_ERTM)
2503 l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2504 else
2505 l2cap_streaming_send(chan, &seg_queue);
2506
2507 err = len;
2508
2509 /* If the skbs were not queued for sending, they'll still be in
2510 * seg_queue and need to be purged.
2511 */
2512 __skb_queue_purge(&seg_queue);
2513 break;
2514
2515 default:
2516 BT_DBG("bad state %1.1x", chan->mode);
2517 err = -EBADFD;
2518 }
2519
2520 return err;
2521}
2522EXPORT_SYMBOL_GPL(l2cap_chan_send);
2523
2524static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2525{
2526 struct l2cap_ctrl control;
2527 u16 seq;
2528
2529 BT_DBG("chan %p, txseq %u", chan, txseq);
2530
2531 memset(&control, 0, sizeof(control));
2532 control.sframe = 1;
2533 control.super = L2CAP_SUPER_SREJ;
2534
2535 for (seq = chan->expected_tx_seq; seq != txseq;
2536 seq = __next_seq(chan, seq)) {
2537 if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2538 control.reqseq = seq;
2539 l2cap_send_sframe(chan, &control);
2540 l2cap_seq_list_append(&chan->srej_list, seq);
2541 }
2542 }
2543
2544 chan->expected_tx_seq = __next_seq(chan, txseq);
2545}
2546
2547static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2548{
2549 struct l2cap_ctrl control;
2550
2551 BT_DBG("chan %p", chan);
2552
2553 if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2554 return;
2555
2556 memset(&control, 0, sizeof(control));
2557 control.sframe = 1;
2558 control.super = L2CAP_SUPER_SREJ;
2559 control.reqseq = chan->srej_list.tail;
2560 l2cap_send_sframe(chan, &control);
2561}
2562
2563static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2564{
2565 struct l2cap_ctrl control;
2566 u16 initial_head;
2567 u16 seq;
2568
2569 BT_DBG("chan %p, txseq %u", chan, txseq);
2570
2571 memset(&control, 0, sizeof(control));
2572 control.sframe = 1;
2573 control.super = L2CAP_SUPER_SREJ;
2574
2575 /* Capture initial list head to allow only one pass through the list. */
2576 initial_head = chan->srej_list.head;
2577
2578 do {
2579 seq = l2cap_seq_list_pop(&chan->srej_list);
2580 if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2581 break;
2582
2583 control.reqseq = seq;
2584 l2cap_send_sframe(chan, &control);
2585 l2cap_seq_list_append(&chan->srej_list, seq);
2586 } while (chan->srej_list.head != initial_head);
2587}
2588
2589static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2590{
2591 struct sk_buff *acked_skb;
2592 u16 ackseq;
2593
2594 BT_DBG("chan %p, reqseq %u", chan, reqseq);
2595
2596 if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2597 return;
2598
2599 BT_DBG("expected_ack_seq %u, unacked_frames %u",
2600 chan->expected_ack_seq, chan->unacked_frames);
2601
2602 for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2603 ackseq = __next_seq(chan, ackseq)) {
2604
2605 acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2606 if (acked_skb) {
2607 skb_unlink(acked_skb, &chan->tx_q);
2608 kfree_skb(acked_skb);
2609 chan->unacked_frames--;
2610 }
2611 }
2612
2613 chan->expected_ack_seq = reqseq;
2614
2615 if (chan->unacked_frames == 0)
2616 __clear_retrans_timer(chan);
2617
2618 BT_DBG("unacked_frames %u", chan->unacked_frames);
2619}
2620
2621static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2622{
2623 BT_DBG("chan %p", chan);
2624
2625 chan->expected_tx_seq = chan->buffer_seq;
2626 l2cap_seq_list_clear(&chan->srej_list);
2627 skb_queue_purge(&chan->srej_q);
2628 chan->rx_state = L2CAP_RX_STATE_RECV;
2629}
2630
2631static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2632 struct l2cap_ctrl *control,
2633 struct sk_buff_head *skbs, u8 event)
2634{
2635 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2636 event);
2637
2638 switch (event) {
2639 case L2CAP_EV_DATA_REQUEST:
2640 if (chan->tx_send_head == NULL)
2641 chan->tx_send_head = skb_peek(skbs);
2642
2643 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2644 l2cap_ertm_send(chan);
2645 break;
2646 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2647 BT_DBG("Enter LOCAL_BUSY");
2648 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2649
2650 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2651 /* The SREJ_SENT state must be aborted if we are to
2652 * enter the LOCAL_BUSY state.
2653 */
2654 l2cap_abort_rx_srej_sent(chan);
2655 }
2656
2657 l2cap_send_ack(chan);
2658
2659 break;
2660 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2661 BT_DBG("Exit LOCAL_BUSY");
2662 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2663
2664 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2665 struct l2cap_ctrl local_control;
2666
2667 memset(&local_control, 0, sizeof(local_control));
2668 local_control.sframe = 1;
2669 local_control.super = L2CAP_SUPER_RR;
2670 local_control.poll = 1;
2671 local_control.reqseq = chan->buffer_seq;
2672 l2cap_send_sframe(chan, &local_control);
2673
2674 chan->retry_count = 1;
2675 __set_monitor_timer(chan);
2676 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2677 }
2678 break;
2679 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2680 l2cap_process_reqseq(chan, control->reqseq);
2681 break;
2682 case L2CAP_EV_EXPLICIT_POLL:
2683 l2cap_send_rr_or_rnr(chan, 1);
2684 chan->retry_count = 1;
2685 __set_monitor_timer(chan);
2686 __clear_ack_timer(chan);
2687 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2688 break;
2689 case L2CAP_EV_RETRANS_TO:
2690 l2cap_send_rr_or_rnr(chan, 1);
2691 chan->retry_count = 1;
2692 __set_monitor_timer(chan);
2693 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2694 break;
2695 case L2CAP_EV_RECV_FBIT:
2696 /* Nothing to process */
2697 break;
2698 default:
2699 break;
2700 }
2701}
2702
2703static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2704 struct l2cap_ctrl *control,
2705 struct sk_buff_head *skbs, u8 event)
2706{
2707 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2708 event);
2709
2710 switch (event) {
2711 case L2CAP_EV_DATA_REQUEST:
2712 if (chan->tx_send_head == NULL)
2713 chan->tx_send_head = skb_peek(skbs);
2714 /* Queue data, but don't send. */
2715 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2716 break;
2717 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2718 BT_DBG("Enter LOCAL_BUSY");
2719 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2720
2721 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2722 /* The SREJ_SENT state must be aborted if we are to
2723 * enter the LOCAL_BUSY state.
2724 */
2725 l2cap_abort_rx_srej_sent(chan);
2726 }
2727
2728 l2cap_send_ack(chan);
2729
2730 break;
2731 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2732 BT_DBG("Exit LOCAL_BUSY");
2733 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2734
2735 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2736 struct l2cap_ctrl local_control;
2737 memset(&local_control, 0, sizeof(local_control));
2738 local_control.sframe = 1;
2739 local_control.super = L2CAP_SUPER_RR;
2740 local_control.poll = 1;
2741 local_control.reqseq = chan->buffer_seq;
2742 l2cap_send_sframe(chan, &local_control);
2743
2744 chan->retry_count = 1;
2745 __set_monitor_timer(chan);
2746 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2747 }
2748 break;
2749 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2750 l2cap_process_reqseq(chan, control->reqseq);
2751
2752 /* Fall through */
2753
2754 case L2CAP_EV_RECV_FBIT:
2755 if (control && control->final) {
2756 __clear_monitor_timer(chan);
2757 if (chan->unacked_frames > 0)
2758 __set_retrans_timer(chan);
2759 chan->retry_count = 0;
2760 chan->tx_state = L2CAP_TX_STATE_XMIT;
2761 BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
2762 }
2763 break;
2764 case L2CAP_EV_EXPLICIT_POLL:
2765 /* Ignore */
2766 break;
2767 case L2CAP_EV_MONITOR_TO:
2768 if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
2769 l2cap_send_rr_or_rnr(chan, 1);
2770 __set_monitor_timer(chan);
2771 chan->retry_count++;
2772 } else {
2773 l2cap_send_disconn_req(chan, ECONNABORTED);
2774 }
2775 break;
2776 default:
2777 break;
2778 }
2779}
2780
2781static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
2782 struct sk_buff_head *skbs, u8 event)
2783{
2784 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2785 chan, control, skbs, event, chan->tx_state);
2786
2787 switch (chan->tx_state) {
2788 case L2CAP_TX_STATE_XMIT:
2789 l2cap_tx_state_xmit(chan, control, skbs, event);
2790 break;
2791 case L2CAP_TX_STATE_WAIT_F:
2792 l2cap_tx_state_wait_f(chan, control, skbs, event);
2793 break;
2794 default:
2795 /* Ignore event */
2796 break;
2797 }
2798}
2799
2800static void l2cap_pass_to_tx(struct l2cap_chan *chan,
2801 struct l2cap_ctrl *control)
2802{
2803 BT_DBG("chan %p, control %p", chan, control);
2804 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
2805}
2806
2807static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
2808 struct l2cap_ctrl *control)
2809{
2810 BT_DBG("chan %p, control %p", chan, control);
2811 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
2812}
2813
2814/* Copy frame to all raw sockets on that connection */
2815static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2816{
2817 struct sk_buff *nskb;
2818 struct l2cap_chan *chan;
2819
2820 BT_DBG("conn %p", conn);
2821
2822 mutex_lock(&conn->chan_lock);
2823
2824 list_for_each_entry(chan, &conn->chan_l, list) {
2825 if (chan->chan_type != L2CAP_CHAN_RAW)
2826 continue;
2827
2828 /* Don't send frame to the channel it came from */
2829 if (bt_cb(skb)->chan == chan)
2830 continue;
2831
2832 nskb = skb_clone(skb, GFP_KERNEL);
2833 if (!nskb)
2834 continue;
2835 if (chan->ops->recv(chan, nskb))
2836 kfree_skb(nskb);
2837 }
2838
2839 mutex_unlock(&conn->chan_lock);
2840}
2841
2842/* ---- L2CAP signalling commands ---- */
2843static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
2844 u8 ident, u16 dlen, void *data)
2845{
2846 struct sk_buff *skb, **frag;
2847 struct l2cap_cmd_hdr *cmd;
2848 struct l2cap_hdr *lh;
2849 int len, count;
2850
2851 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
2852 conn, code, ident, dlen);
2853
2854 if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE)
2855 return NULL;
2856
2857 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2858 count = min_t(unsigned int, conn->mtu, len);
2859
2860 skb = bt_skb_alloc(count, GFP_KERNEL);
2861 if (!skb)
2862 return NULL;
2863
2864 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2865 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2866
2867 if (conn->hcon->type == LE_LINK)
2868 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2869 else
2870 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
2871
2872 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2873 cmd->code = code;
2874 cmd->ident = ident;
2875 cmd->len = cpu_to_le16(dlen);
2876
2877 if (dlen) {
2878 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2879 memcpy(skb_put(skb, count), data, count);
2880 data += count;
2881 }
2882
2883 len -= skb->len;
2884
2885 /* Continuation fragments (no L2CAP header) */
2886 frag = &skb_shinfo(skb)->frag_list;
2887 while (len) {
2888 count = min_t(unsigned int, conn->mtu, len);
2889
2890 *frag = bt_skb_alloc(count, GFP_KERNEL);
2891 if (!*frag)
2892 goto fail;
2893
2894 memcpy(skb_put(*frag, count), data, count);
2895
2896 len -= count;
2897 data += count;
2898
2899 frag = &(*frag)->next;
2900 }
2901
2902 return skb;
2903
2904fail:
2905 kfree_skb(skb);
2906 return NULL;
2907}
2908
2909static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
2910 unsigned long *val)
2911{
2912 struct l2cap_conf_opt *opt = *ptr;
2913 int len;
2914
2915 len = L2CAP_CONF_OPT_SIZE + opt->len;
2916 *ptr += len;
2917
2918 *type = opt->type;
2919 *olen = opt->len;
2920
2921 switch (opt->len) {
2922 case 1:
2923 *val = *((u8 *) opt->val);
2924 break;
2925
2926 case 2:
2927 *val = get_unaligned_le16(opt->val);
2928 break;
2929
2930 case 4:
2931 *val = get_unaligned_le32(opt->val);
2932 break;
2933
2934 default:
2935 *val = (unsigned long) opt->val;
2936 break;
2937 }
2938
2939 BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
2940 return len;
2941}
2942
2943static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2944{
2945 struct l2cap_conf_opt *opt = *ptr;
2946
2947 BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
2948
2949 opt->type = type;
2950 opt->len = len;
2951
2952 switch (len) {
2953 case 1:
2954 *((u8 *) opt->val) = val;
2955 break;
2956
2957 case 2:
2958 put_unaligned_le16(val, opt->val);
2959 break;
2960
2961 case 4:
2962 put_unaligned_le32(val, opt->val);
2963 break;
2964
2965 default:
2966 memcpy(opt->val, (void *) val, len);
2967 break;
2968 }
2969
2970 *ptr += L2CAP_CONF_OPT_SIZE + len;
2971}
2972
2973static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
2974{
2975 struct l2cap_conf_efs efs;
2976
2977 switch (chan->mode) {
2978 case L2CAP_MODE_ERTM:
2979 efs.id = chan->local_id;
2980 efs.stype = chan->local_stype;
2981 efs.msdu = cpu_to_le16(chan->local_msdu);
2982 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2983 efs.acc_lat = cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
2984 efs.flush_to = cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO);
2985 break;
2986
2987 case L2CAP_MODE_STREAMING:
2988 efs.id = 1;
2989 efs.stype = L2CAP_SERV_BESTEFFORT;
2990 efs.msdu = cpu_to_le16(chan->local_msdu);
2991 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2992 efs.acc_lat = 0;
2993 efs.flush_to = 0;
2994 break;
2995
2996 default:
2997 return;
2998 }
2999
3000 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
3001 (unsigned long) &efs);
3002}
3003
3004static void l2cap_ack_timeout(struct work_struct *work)
3005{
3006 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
3007 ack_timer.work);
3008 u16 frames_to_ack;
3009
3010 BT_DBG("chan %p", chan);
3011
3012 l2cap_chan_lock(chan);
3013
3014 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
3015 chan->last_acked_seq);
3016
3017 if (frames_to_ack)
3018 l2cap_send_rr_or_rnr(chan, 0);
3019
3020 l2cap_chan_unlock(chan);
3021 l2cap_chan_put(chan);
3022}
3023
3024int l2cap_ertm_init(struct l2cap_chan *chan)
3025{
3026 int err;
3027
3028 chan->next_tx_seq = 0;
3029 chan->expected_tx_seq = 0;
3030 chan->expected_ack_seq = 0;
3031 chan->unacked_frames = 0;
3032 chan->buffer_seq = 0;
3033 chan->frames_sent = 0;
3034 chan->last_acked_seq = 0;
3035 chan->sdu = NULL;
3036 chan->sdu_last_frag = NULL;
3037 chan->sdu_len = 0;
3038
3039 skb_queue_head_init(&chan->tx_q);
3040
3041 chan->local_amp_id = AMP_ID_BREDR;
3042 chan->move_id = AMP_ID_BREDR;
3043 chan->move_state = L2CAP_MOVE_STABLE;
3044 chan->move_role = L2CAP_MOVE_ROLE_NONE;
3045
3046 if (chan->mode != L2CAP_MODE_ERTM)
3047 return 0;
3048
3049 chan->rx_state = L2CAP_RX_STATE_RECV;
3050 chan->tx_state = L2CAP_TX_STATE_XMIT;
3051
3052 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
3053 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
3054 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
3055
3056 skb_queue_head_init(&chan->srej_q);
3057
3058 err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
3059 if (err < 0)
3060 return err;
3061
3062 err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
3063 if (err < 0)
3064 l2cap_seq_list_free(&chan->srej_list);
3065
3066 return err;
3067}
3068
3069static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
3070{
3071 switch (mode) {
3072 case L2CAP_MODE_STREAMING:
3073 case L2CAP_MODE_ERTM:
3074 if (l2cap_mode_supported(mode, remote_feat_mask))
3075 return mode;
3076 /* fall through */
3077 default:
3078 return L2CAP_MODE_BASIC;
3079 }
3080}
3081
3082static inline bool __l2cap_ews_supported(struct l2cap_conn *conn)
3083{
3084 return conn->hs_enabled && conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
3085}
3086
3087static inline bool __l2cap_efs_supported(struct l2cap_conn *conn)
3088{
3089 return conn->hs_enabled && conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
3090}
3091
3092static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan,
3093 struct l2cap_conf_rfc *rfc)
3094{
3095 if (chan->local_amp_id != AMP_ID_BREDR && chan->hs_hcon) {
3096 u64 ertm_to = chan->hs_hcon->hdev->amp_be_flush_to;
3097
3098 /* Class 1 devices have must have ERTM timeouts
3099 * exceeding the Link Supervision Timeout. The
3100 * default Link Supervision Timeout for AMP
3101 * controllers is 10 seconds.
3102 *
3103 * Class 1 devices use 0xffffffff for their
3104 * best-effort flush timeout, so the clamping logic
3105 * will result in a timeout that meets the above
3106 * requirement. ERTM timeouts are 16-bit values, so
3107 * the maximum timeout is 65.535 seconds.
3108 */
3109
3110 /* Convert timeout to milliseconds and round */
3111 ertm_to = DIV_ROUND_UP_ULL(ertm_to, 1000);
3112
3113 /* This is the recommended formula for class 2 devices
3114 * that start ERTM timers when packets are sent to the
3115 * controller.
3116 */
3117 ertm_to = 3 * ertm_to + 500;
3118
3119 if (ertm_to > 0xffff)
3120 ertm_to = 0xffff;
3121
3122 rfc->retrans_timeout = cpu_to_le16((u16) ertm_to);
3123 rfc->monitor_timeout = rfc->retrans_timeout;
3124 } else {
3125 rfc->retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3126 rfc->monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3127 }
3128}
3129
3130static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
3131{
3132 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
3133 __l2cap_ews_supported(chan->conn)) {
3134 /* use extended control field */
3135 set_bit(FLAG_EXT_CTRL, &chan->flags);
3136 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3137 } else {
3138 chan->tx_win = min_t(u16, chan->tx_win,
3139 L2CAP_DEFAULT_TX_WINDOW);
3140 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
3141 }
3142 chan->ack_win = chan->tx_win;
3143}
3144
3145static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
3146{
3147 struct l2cap_conf_req *req = data;
3148 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
3149 void *ptr = req->data;
3150 u16 size;
3151
3152 BT_DBG("chan %p", chan);
3153
3154 if (chan->num_conf_req || chan->num_conf_rsp)
3155 goto done;
3156
3157 switch (chan->mode) {
3158 case L2CAP_MODE_STREAMING:
3159 case L2CAP_MODE_ERTM:
3160 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
3161 break;
3162
3163 if (__l2cap_efs_supported(chan->conn))
3164 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3165
3166 /* fall through */
3167 default:
3168 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
3169 break;
3170 }
3171
3172done:
3173 if (chan->imtu != L2CAP_DEFAULT_MTU)
3174 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3175
3176 switch (chan->mode) {
3177 case L2CAP_MODE_BASIC:
3178 if (disable_ertm)
3179 break;
3180
3181 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
3182 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
3183 break;
3184
3185 rfc.mode = L2CAP_MODE_BASIC;
3186 rfc.txwin_size = 0;
3187 rfc.max_transmit = 0;
3188 rfc.retrans_timeout = 0;
3189 rfc.monitor_timeout = 0;
3190 rfc.max_pdu_size = 0;
3191
3192 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3193 (unsigned long) &rfc);
3194 break;
3195
3196 case L2CAP_MODE_ERTM:
3197 rfc.mode = L2CAP_MODE_ERTM;
3198 rfc.max_transmit = chan->max_tx;
3199
3200 __l2cap_set_ertm_timeouts(chan, &rfc);
3201
3202 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3203 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3204 L2CAP_FCS_SIZE);
3205 rfc.max_pdu_size = cpu_to_le16(size);
3206
3207 l2cap_txwin_setup(chan);
3208
3209 rfc.txwin_size = min_t(u16, chan->tx_win,
3210 L2CAP_DEFAULT_TX_WINDOW);
3211
3212 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3213 (unsigned long) &rfc);
3214
3215 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3216 l2cap_add_opt_efs(&ptr, chan);
3217
3218 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3219 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3220 chan->tx_win);
3221
3222 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3223 if (chan->fcs == L2CAP_FCS_NONE ||
3224 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3225 chan->fcs = L2CAP_FCS_NONE;
3226 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3227 chan->fcs);
3228 }
3229 break;
3230
3231 case L2CAP_MODE_STREAMING:
3232 l2cap_txwin_setup(chan);
3233 rfc.mode = L2CAP_MODE_STREAMING;
3234 rfc.txwin_size = 0;
3235 rfc.max_transmit = 0;
3236 rfc.retrans_timeout = 0;
3237 rfc.monitor_timeout = 0;
3238
3239 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3240 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3241 L2CAP_FCS_SIZE);
3242 rfc.max_pdu_size = cpu_to_le16(size);
3243
3244 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3245 (unsigned long) &rfc);
3246
3247 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3248 l2cap_add_opt_efs(&ptr, chan);
3249
3250 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3251 if (chan->fcs == L2CAP_FCS_NONE ||
3252 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3253 chan->fcs = L2CAP_FCS_NONE;
3254 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3255 chan->fcs);
3256 }
3257 break;
3258 }
3259
3260 req->dcid = cpu_to_le16(chan->dcid);
3261 req->flags = cpu_to_le16(0);
3262
3263 return ptr - data;
3264}
3265
3266static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
3267{
3268 struct l2cap_conf_rsp *rsp = data;
3269 void *ptr = rsp->data;
3270 void *req = chan->conf_req;
3271 int len = chan->conf_len;
3272 int type, hint, olen;
3273 unsigned long val;
3274 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3275 struct l2cap_conf_efs efs;
3276 u8 remote_efs = 0;
3277 u16 mtu = L2CAP_DEFAULT_MTU;
3278 u16 result = L2CAP_CONF_SUCCESS;
3279 u16 size;
3280
3281 BT_DBG("chan %p", chan);
3282
3283 while (len >= L2CAP_CONF_OPT_SIZE) {
3284 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
3285
3286 hint = type & L2CAP_CONF_HINT;
3287 type &= L2CAP_CONF_MASK;
3288
3289 switch (type) {
3290 case L2CAP_CONF_MTU:
3291 mtu = val;
3292 break;
3293
3294 case L2CAP_CONF_FLUSH_TO:
3295 chan->flush_to = val;
3296 break;
3297
3298 case L2CAP_CONF_QOS:
3299 break;
3300
3301 case L2CAP_CONF_RFC:
3302 if (olen == sizeof(rfc))
3303 memcpy(&rfc, (void *) val, olen);
3304 break;
3305
3306 case L2CAP_CONF_FCS:
3307 if (val == L2CAP_FCS_NONE)
3308 set_bit(CONF_RECV_NO_FCS, &chan->conf_state);
3309 break;
3310
3311 case L2CAP_CONF_EFS:
3312 remote_efs = 1;
3313 if (olen == sizeof(efs))
3314 memcpy(&efs, (void *) val, olen);
3315 break;
3316
3317 case L2CAP_CONF_EWS:
3318 if (!chan->conn->hs_enabled)
3319 return -ECONNREFUSED;
3320
3321 set_bit(FLAG_EXT_CTRL, &chan->flags);
3322 set_bit(CONF_EWS_RECV, &chan->conf_state);
3323 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3324 chan->remote_tx_win = val;
3325 break;
3326
3327 default:
3328 if (hint)
3329 break;
3330
3331 result = L2CAP_CONF_UNKNOWN;
3332 *((u8 *) ptr++) = type;
3333 break;
3334 }
3335 }
3336
3337 if (chan->num_conf_rsp || chan->num_conf_req > 1)
3338 goto done;
3339
3340 switch (chan->mode) {
3341 case L2CAP_MODE_STREAMING:
3342 case L2CAP_MODE_ERTM:
3343 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
3344 chan->mode = l2cap_select_mode(rfc.mode,
3345 chan->conn->feat_mask);
3346 break;
3347 }
3348
3349 if (remote_efs) {
3350 if (__l2cap_efs_supported(chan->conn))
3351 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3352 else
3353 return -ECONNREFUSED;
3354 }
3355
3356 if (chan->mode != rfc.mode)
3357 return -ECONNREFUSED;
3358
3359 break;
3360 }
3361
3362done:
3363 if (chan->mode != rfc.mode) {
3364 result = L2CAP_CONF_UNACCEPT;
3365 rfc.mode = chan->mode;
3366
3367 if (chan->num_conf_rsp == 1)
3368 return -ECONNREFUSED;
3369
3370 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3371 (unsigned long) &rfc);
3372 }
3373
3374 if (result == L2CAP_CONF_SUCCESS) {
3375 /* Configure output options and let the other side know
3376 * which ones we don't like. */
3377
3378 if (mtu < L2CAP_DEFAULT_MIN_MTU)
3379 result = L2CAP_CONF_UNACCEPT;
3380 else {
3381 chan->omtu = mtu;
3382 set_bit(CONF_MTU_DONE, &chan->conf_state);
3383 }
3384 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
3385
3386 if (remote_efs) {
3387 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3388 efs.stype != L2CAP_SERV_NOTRAFIC &&
3389 efs.stype != chan->local_stype) {
3390
3391 result = L2CAP_CONF_UNACCEPT;
3392
3393 if (chan->num_conf_req >= 1)
3394 return -ECONNREFUSED;
3395
3396 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3397 sizeof(efs),
3398 (unsigned long) &efs);
3399 } else {
3400 /* Send PENDING Conf Rsp */
3401 result = L2CAP_CONF_PENDING;
3402 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3403 }
3404 }
3405
3406 switch (rfc.mode) {
3407 case L2CAP_MODE_BASIC:
3408 chan->fcs = L2CAP_FCS_NONE;
3409 set_bit(CONF_MODE_DONE, &chan->conf_state);
3410 break;
3411
3412 case L2CAP_MODE_ERTM:
3413 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3414 chan->remote_tx_win = rfc.txwin_size;
3415 else
3416 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3417
3418 chan->remote_max_tx = rfc.max_transmit;
3419
3420 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3421 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3422 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3423 rfc.max_pdu_size = cpu_to_le16(size);
3424 chan->remote_mps = size;
3425
3426 __l2cap_set_ertm_timeouts(chan, &rfc);
3427
3428 set_bit(CONF_MODE_DONE, &chan->conf_state);
3429
3430 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3431 sizeof(rfc), (unsigned long) &rfc);
3432
3433 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3434 chan->remote_id = efs.id;
3435 chan->remote_stype = efs.stype;
3436 chan->remote_msdu = le16_to_cpu(efs.msdu);
3437 chan->remote_flush_to =
3438 le32_to_cpu(efs.flush_to);
3439 chan->remote_acc_lat =
3440 le32_to_cpu(efs.acc_lat);
3441 chan->remote_sdu_itime =
3442 le32_to_cpu(efs.sdu_itime);
3443 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3444 sizeof(efs),
3445 (unsigned long) &efs);
3446 }
3447 break;
3448
3449 case L2CAP_MODE_STREAMING:
3450 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3451 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3452 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3453 rfc.max_pdu_size = cpu_to_le16(size);
3454 chan->remote_mps = size;
3455
3456 set_bit(CONF_MODE_DONE, &chan->conf_state);
3457
3458 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3459 (unsigned long) &rfc);
3460
3461 break;
3462
3463 default:
3464 result = L2CAP_CONF_UNACCEPT;
3465
3466 memset(&rfc, 0, sizeof(rfc));
3467 rfc.mode = chan->mode;
3468 }
3469
3470 if (result == L2CAP_CONF_SUCCESS)
3471 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3472 }
3473 rsp->scid = cpu_to_le16(chan->dcid);
3474 rsp->result = cpu_to_le16(result);
3475 rsp->flags = cpu_to_le16(0);
3476
3477 return ptr - data;
3478}
3479
3480static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
3481 void *data, u16 *result)
3482{
3483 struct l2cap_conf_req *req = data;
3484 void *ptr = req->data;
3485 int type, olen;
3486 unsigned long val;
3487 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3488 struct l2cap_conf_efs efs;
3489
3490 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3491
3492 while (len >= L2CAP_CONF_OPT_SIZE) {
3493 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3494
3495 switch (type) {
3496 case L2CAP_CONF_MTU:
3497 if (val < L2CAP_DEFAULT_MIN_MTU) {
3498 *result = L2CAP_CONF_UNACCEPT;
3499 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3500 } else
3501 chan->imtu = val;
3502 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3503 break;
3504
3505 case L2CAP_CONF_FLUSH_TO:
3506 chan->flush_to = val;
3507 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
3508 2, chan->flush_to);
3509 break;
3510
3511 case L2CAP_CONF_RFC:
3512 if (olen == sizeof(rfc))
3513 memcpy(&rfc, (void *)val, olen);
3514
3515 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3516 rfc.mode != chan->mode)
3517 return -ECONNREFUSED;
3518
3519 chan->fcs = 0;
3520
3521 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3522 sizeof(rfc), (unsigned long) &rfc);
3523 break;
3524
3525 case L2CAP_CONF_EWS:
3526 chan->ack_win = min_t(u16, val, chan->ack_win);
3527 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3528 chan->tx_win);
3529 break;
3530
3531 case L2CAP_CONF_EFS:
3532 if (olen == sizeof(efs))
3533 memcpy(&efs, (void *)val, olen);
3534
3535 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3536 efs.stype != L2CAP_SERV_NOTRAFIC &&
3537 efs.stype != chan->local_stype)
3538 return -ECONNREFUSED;
3539
3540 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
3541 (unsigned long) &efs);
3542 break;
3543
3544 case L2CAP_CONF_FCS:
3545 if (*result == L2CAP_CONF_PENDING)
3546 if (val == L2CAP_FCS_NONE)
3547 set_bit(CONF_RECV_NO_FCS,
3548 &chan->conf_state);
3549 break;
3550 }
3551 }
3552
3553 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3554 return -ECONNREFUSED;
3555
3556 chan->mode = rfc.mode;
3557
3558 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3559 switch (rfc.mode) {
3560 case L2CAP_MODE_ERTM:
3561 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3562 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3563 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3564 if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3565 chan->ack_win = min_t(u16, chan->ack_win,
3566 rfc.txwin_size);
3567
3568 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3569 chan->local_msdu = le16_to_cpu(efs.msdu);
3570 chan->local_sdu_itime =
3571 le32_to_cpu(efs.sdu_itime);
3572 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3573 chan->local_flush_to =
3574 le32_to_cpu(efs.flush_to);
3575 }
3576 break;
3577
3578 case L2CAP_MODE_STREAMING:
3579 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3580 }
3581 }
3582
3583 req->dcid = cpu_to_le16(chan->dcid);
3584 req->flags = cpu_to_le16(0);
3585
3586 return ptr - data;
3587}
3588
3589static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data,
3590 u16 result, u16 flags)
3591{
3592 struct l2cap_conf_rsp *rsp = data;
3593 void *ptr = rsp->data;
3594
3595 BT_DBG("chan %p", chan);
3596
3597 rsp->scid = cpu_to_le16(chan->dcid);
3598 rsp->result = cpu_to_le16(result);
3599 rsp->flags = cpu_to_le16(flags);
3600
3601 return ptr - data;
3602}
3603
3604void __l2cap_le_connect_rsp_defer(struct l2cap_chan *chan)
3605{
3606 struct l2cap_le_conn_rsp rsp;
3607 struct l2cap_conn *conn = chan->conn;
3608
3609 BT_DBG("chan %p", chan);
3610
3611 rsp.dcid = cpu_to_le16(chan->scid);
3612 rsp.mtu = cpu_to_le16(chan->imtu);
3613 rsp.mps = cpu_to_le16(chan->mps);
3614 rsp.credits = cpu_to_le16(chan->rx_credits);
3615 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
3616
3617 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
3618 &rsp);
3619}
3620
3621void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
3622{
3623 struct l2cap_conn_rsp rsp;
3624 struct l2cap_conn *conn = chan->conn;
3625 u8 buf[128];
3626 u8 rsp_code;
3627
3628 rsp.scid = cpu_to_le16(chan->dcid);
3629 rsp.dcid = cpu_to_le16(chan->scid);
3630 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
3631 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
3632
3633 if (chan->hs_hcon)
3634 rsp_code = L2CAP_CREATE_CHAN_RSP;
3635 else
3636 rsp_code = L2CAP_CONN_RSP;
3637
3638 BT_DBG("chan %p rsp_code %u", chan, rsp_code);
3639
3640 l2cap_send_cmd(conn, chan->ident, rsp_code, sizeof(rsp), &rsp);
3641
3642 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3643 return;
3644
3645 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3646 l2cap_build_conf_req(chan, buf), buf);
3647 chan->num_conf_req++;
3648}
3649
3650static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
3651{
3652 int type, olen;
3653 unsigned long val;
3654 /* Use sane default values in case a misbehaving remote device
3655 * did not send an RFC or extended window size option.
3656 */
3657 u16 txwin_ext = chan->ack_win;
3658 struct l2cap_conf_rfc rfc = {
3659 .mode = chan->mode,
3660 .retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
3661 .monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
3662 .max_pdu_size = cpu_to_le16(chan->imtu),
3663 .txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
3664 };
3665
3666 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
3667
3668 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
3669 return;
3670
3671 while (len >= L2CAP_CONF_OPT_SIZE) {
3672 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3673
3674 switch (type) {
3675 case L2CAP_CONF_RFC:
3676 if (olen == sizeof(rfc))
3677 memcpy(&rfc, (void *)val, olen);
3678 break;
3679 case L2CAP_CONF_EWS:
3680 txwin_ext = val;
3681 break;
3682 }
3683 }
3684
3685 switch (rfc.mode) {
3686 case L2CAP_MODE_ERTM:
3687 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3688 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3689 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3690 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3691 chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
3692 else
3693 chan->ack_win = min_t(u16, chan->ack_win,
3694 rfc.txwin_size);
3695 break;
3696 case L2CAP_MODE_STREAMING:
3697 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3698 }
3699}
3700
3701static inline int l2cap_command_rej(struct l2cap_conn *conn,
3702 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3703 u8 *data)
3704{
3705 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
3706
3707 if (cmd_len < sizeof(*rej))
3708 return -EPROTO;
3709
3710 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
3711 return 0;
3712
3713 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
3714 cmd->ident == conn->info_ident) {
3715 cancel_delayed_work(&conn->info_timer);
3716
3717 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3718 conn->info_ident = 0;
3719
3720 l2cap_conn_start(conn);
3721 }
3722
3723 return 0;
3724}
3725
3726static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
3727 struct l2cap_cmd_hdr *cmd,
3728 u8 *data, u8 rsp_code, u8 amp_id)
3729{
3730 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
3731 struct l2cap_conn_rsp rsp;
3732 struct l2cap_chan *chan = NULL, *pchan;
3733 int result, status = L2CAP_CS_NO_INFO;
3734
3735 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
3736 __le16 psm = req->psm;
3737
3738 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
3739
3740 /* Check if we have socket listening on psm */
3741 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
3742 &conn->hcon->dst, ACL_LINK);
3743 if (!pchan) {
3744 result = L2CAP_CR_BAD_PSM;
3745 goto sendresp;
3746 }
3747
3748 mutex_lock(&conn->chan_lock);
3749 l2cap_chan_lock(pchan);
3750
3751 /* Check if the ACL is secure enough (if not SDP) */
3752 if (psm != cpu_to_le16(L2CAP_PSM_SDP) &&
3753 !hci_conn_check_link_mode(conn->hcon)) {
3754 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
3755 result = L2CAP_CR_SEC_BLOCK;
3756 goto response;
3757 }
3758
3759 result = L2CAP_CR_NO_MEM;
3760
3761 /* Check if we already have channel with that dcid */
3762 if (__l2cap_get_chan_by_dcid(conn, scid))
3763 goto response;
3764
3765 chan = pchan->ops->new_connection(pchan);
3766 if (!chan)
3767 goto response;
3768
3769 /* For certain devices (ex: HID mouse), support for authentication,
3770 * pairing and bonding is optional. For such devices, inorder to avoid
3771 * the ACL alive for too long after L2CAP disconnection, reset the ACL
3772 * disc_timeout back to HCI_DISCONN_TIMEOUT during L2CAP connect.
3773 */
3774 conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
3775
3776 bacpy(&chan->src, &conn->hcon->src);
3777 bacpy(&chan->dst, &conn->hcon->dst);
3778 chan->src_type = bdaddr_type(conn->hcon, conn->hcon->src_type);
3779 chan->dst_type = bdaddr_type(conn->hcon, conn->hcon->dst_type);
3780 chan->psm = psm;
3781 chan->dcid = scid;
3782 chan->local_amp_id = amp_id;
3783
3784 __l2cap_chan_add(conn, chan);
3785
3786 dcid = chan->scid;
3787
3788 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
3789
3790 chan->ident = cmd->ident;
3791
3792 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
3793 if (l2cap_chan_check_security(chan, false)) {
3794 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
3795 l2cap_state_change(chan, BT_CONNECT2);
3796 result = L2CAP_CR_PEND;
3797 status = L2CAP_CS_AUTHOR_PEND;
3798 chan->ops->defer(chan);
3799 } else {
3800 /* Force pending result for AMP controllers.
3801 * The connection will succeed after the
3802 * physical link is up.
3803 */
3804 if (amp_id == AMP_ID_BREDR) {
3805 l2cap_state_change(chan, BT_CONFIG);
3806 result = L2CAP_CR_SUCCESS;
3807 } else {
3808 l2cap_state_change(chan, BT_CONNECT2);
3809 result = L2CAP_CR_PEND;
3810 }
3811 status = L2CAP_CS_NO_INFO;
3812 }
3813 } else {
3814 l2cap_state_change(chan, BT_CONNECT2);
3815 result = L2CAP_CR_PEND;
3816 status = L2CAP_CS_AUTHEN_PEND;
3817 }
3818 } else {
3819 l2cap_state_change(chan, BT_CONNECT2);
3820 result = L2CAP_CR_PEND;
3821 status = L2CAP_CS_NO_INFO;
3822 }
3823
3824response:
3825 l2cap_chan_unlock(pchan);
3826 mutex_unlock(&conn->chan_lock);
3827 l2cap_chan_put(pchan);
3828
3829sendresp:
3830 rsp.scid = cpu_to_le16(scid);
3831 rsp.dcid = cpu_to_le16(dcid);
3832 rsp.result = cpu_to_le16(result);
3833 rsp.status = cpu_to_le16(status);
3834 l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
3835
3836 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
3837 struct l2cap_info_req info;
3838 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3839
3840 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
3841 conn->info_ident = l2cap_get_ident(conn);
3842
3843 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
3844
3845 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
3846 sizeof(info), &info);
3847 }
3848
3849 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
3850 result == L2CAP_CR_SUCCESS) {
3851 u8 buf[128];
3852 set_bit(CONF_REQ_SENT, &chan->conf_state);
3853 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3854 l2cap_build_conf_req(chan, buf), buf);
3855 chan->num_conf_req++;
3856 }
3857
3858 return chan;
3859}
3860
3861static int l2cap_connect_req(struct l2cap_conn *conn,
3862 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3863{
3864 struct hci_dev *hdev = conn->hcon->hdev;
3865 struct hci_conn *hcon = conn->hcon;
3866
3867 if (cmd_len < sizeof(struct l2cap_conn_req))
3868 return -EPROTO;
3869
3870 hci_dev_lock(hdev);
3871 if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
3872 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &hcon->flags))
3873 mgmt_device_connected(hdev, &hcon->dst, hcon->type,
3874 hcon->dst_type, 0, NULL, 0,
3875 hcon->dev_class);
3876 hci_dev_unlock(hdev);
3877
3878 l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);
3879 return 0;
3880}
3881
3882static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
3883 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3884 u8 *data)
3885{
3886 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
3887 u16 scid, dcid, result, status;
3888 struct l2cap_chan *chan;
3889 u8 req[128];
3890 int err;
3891
3892 if (cmd_len < sizeof(*rsp))
3893 return -EPROTO;
3894
3895 scid = __le16_to_cpu(rsp->scid);
3896 dcid = __le16_to_cpu(rsp->dcid);
3897 result = __le16_to_cpu(rsp->result);
3898 status = __le16_to_cpu(rsp->status);
3899
3900 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
3901 dcid, scid, result, status);
3902
3903 mutex_lock(&conn->chan_lock);
3904
3905 if (scid) {
3906 chan = __l2cap_get_chan_by_scid(conn, scid);
3907 if (!chan) {
3908 err = -EBADSLT;
3909 goto unlock;
3910 }
3911 } else {
3912 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
3913 if (!chan) {
3914 err = -EBADSLT;
3915 goto unlock;
3916 }
3917 }
3918
3919 err = 0;
3920
3921 l2cap_chan_lock(chan);
3922
3923 switch (result) {
3924 case L2CAP_CR_SUCCESS:
3925 l2cap_state_change(chan, BT_CONFIG);
3926 chan->ident = 0;
3927 chan->dcid = dcid;
3928 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
3929
3930 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3931 break;
3932
3933 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3934 l2cap_build_conf_req(chan, req), req);
3935 chan->num_conf_req++;
3936 break;
3937
3938 case L2CAP_CR_PEND:
3939 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
3940 break;
3941
3942 default:
3943 l2cap_chan_del(chan, ECONNREFUSED);
3944 break;
3945 }
3946
3947 l2cap_chan_unlock(chan);
3948
3949unlock:
3950 mutex_unlock(&conn->chan_lock);
3951
3952 return err;
3953}
3954
3955static inline void set_default_fcs(struct l2cap_chan *chan)
3956{
3957 /* FCS is enabled only in ERTM or streaming mode, if one or both
3958 * sides request it.
3959 */
3960 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
3961 chan->fcs = L2CAP_FCS_NONE;
3962 else if (!test_bit(CONF_RECV_NO_FCS, &chan->conf_state))
3963 chan->fcs = L2CAP_FCS_CRC16;
3964}
3965
3966static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data,
3967 u8 ident, u16 flags)
3968{
3969 struct l2cap_conn *conn = chan->conn;
3970
3971 BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident,
3972 flags);
3973
3974 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3975 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3976
3977 l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP,
3978 l2cap_build_conf_rsp(chan, data,
3979 L2CAP_CONF_SUCCESS, flags), data);
3980}
3981
3982static void cmd_reject_invalid_cid(struct l2cap_conn *conn, u8 ident,
3983 u16 scid, u16 dcid)
3984{
3985 struct l2cap_cmd_rej_cid rej;
3986
3987 rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
3988 rej.scid = __cpu_to_le16(scid);
3989 rej.dcid = __cpu_to_le16(dcid);
3990
3991 l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3992}
3993
3994static inline int l2cap_config_req(struct l2cap_conn *conn,
3995 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3996 u8 *data)
3997{
3998 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
3999 u16 dcid, flags;
4000 u8 rsp[64];
4001 struct l2cap_chan *chan;
4002 int len, err = 0;
4003
4004 if (cmd_len < sizeof(*req))
4005 return -EPROTO;
4006
4007 dcid = __le16_to_cpu(req->dcid);
4008 flags = __le16_to_cpu(req->flags);
4009
4010 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
4011
4012 chan = l2cap_get_chan_by_scid(conn, dcid);
4013 if (!chan) {
4014 cmd_reject_invalid_cid(conn, cmd->ident, dcid, 0);
4015 return 0;
4016 }
4017
4018 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
4019 cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4020 chan->dcid);
4021 goto unlock;
4022 }
4023
4024 /* Reject if config buffer is too small. */
4025 len = cmd_len - sizeof(*req);
4026 if (chan->conf_len + len > sizeof(chan->conf_req)) {
4027 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4028 l2cap_build_conf_rsp(chan, rsp,
4029 L2CAP_CONF_REJECT, flags), rsp);
4030 goto unlock;
4031 }
4032
4033 /* Store config. */
4034 memcpy(chan->conf_req + chan->conf_len, req->data, len);
4035 chan->conf_len += len;
4036
4037 if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
4038 /* Incomplete config. Send empty response. */
4039 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4040 l2cap_build_conf_rsp(chan, rsp,
4041 L2CAP_CONF_SUCCESS, flags), rsp);
4042 goto unlock;
4043 }
4044
4045 /* Complete config. */
4046 len = l2cap_parse_conf_req(chan, rsp);
4047 if (len < 0) {
4048 l2cap_send_disconn_req(chan, ECONNRESET);
4049 goto unlock;
4050 }
4051
4052 chan->ident = cmd->ident;
4053 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
4054 chan->num_conf_rsp++;
4055
4056 /* Reset config buffer. */
4057 chan->conf_len = 0;
4058
4059 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
4060 goto unlock;
4061
4062 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4063 set_default_fcs(chan);
4064
4065 if (chan->mode == L2CAP_MODE_ERTM ||
4066 chan->mode == L2CAP_MODE_STREAMING)
4067 err = l2cap_ertm_init(chan);
4068
4069 if (err < 0)
4070 l2cap_send_disconn_req(chan, -err);
4071 else
4072 l2cap_chan_ready(chan);
4073
4074 goto unlock;
4075 }
4076
4077 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
4078 u8 buf[64];
4079 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4080 l2cap_build_conf_req(chan, buf), buf);
4081 chan->num_conf_req++;
4082 }
4083
4084 /* Got Conf Rsp PENDING from remote side and asume we sent
4085 Conf Rsp PENDING in the code above */
4086 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
4087 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4088
4089 /* check compatibility */
4090
4091 /* Send rsp for BR/EDR channel */
4092 if (!chan->hs_hcon)
4093 l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
4094 else
4095 chan->ident = cmd->ident;
4096 }
4097
4098unlock:
4099 l2cap_chan_unlock(chan);
4100 return err;
4101}
4102
4103static inline int l2cap_config_rsp(struct l2cap_conn *conn,
4104 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4105 u8 *data)
4106{
4107 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
4108 u16 scid, flags, result;
4109 struct l2cap_chan *chan;
4110 int len = cmd_len - sizeof(*rsp);
4111 int err = 0;
4112
4113 if (cmd_len < sizeof(*rsp))
4114 return -EPROTO;
4115
4116 scid = __le16_to_cpu(rsp->scid);
4117 flags = __le16_to_cpu(rsp->flags);
4118 result = __le16_to_cpu(rsp->result);
4119
4120 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
4121 result, len);
4122
4123 chan = l2cap_get_chan_by_scid(conn, scid);
4124 if (!chan)
4125 return 0;
4126
4127 switch (result) {
4128 case L2CAP_CONF_SUCCESS:
4129 l2cap_conf_rfc_get(chan, rsp->data, len);
4130 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4131 break;
4132
4133 case L2CAP_CONF_PENDING:
4134 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4135
4136 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4137 char buf[64];
4138
4139 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4140 buf, &result);
4141 if (len < 0) {
4142 l2cap_send_disconn_req(chan, ECONNRESET);
4143 goto done;
4144 }
4145
4146 if (!chan->hs_hcon) {
4147 l2cap_send_efs_conf_rsp(chan, buf, cmd->ident,
4148 0);
4149 } else {
4150 if (l2cap_check_efs(chan)) {
4151 amp_create_logical_link(chan);
4152 chan->ident = cmd->ident;
4153 }
4154 }
4155 }
4156 goto done;
4157
4158 case L2CAP_CONF_UNACCEPT:
4159 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
4160 char req[64];
4161
4162 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
4163 l2cap_send_disconn_req(chan, ECONNRESET);
4164 goto done;
4165 }
4166
4167 /* throw out any old stored conf requests */
4168 result = L2CAP_CONF_SUCCESS;
4169 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4170 req, &result);
4171 if (len < 0) {
4172 l2cap_send_disconn_req(chan, ECONNRESET);
4173 goto done;
4174 }
4175
4176 l2cap_send_cmd(conn, l2cap_get_ident(conn),
4177 L2CAP_CONF_REQ, len, req);
4178 chan->num_conf_req++;
4179 if (result != L2CAP_CONF_SUCCESS)
4180 goto done;
4181 break;
4182 }
4183
4184 default:
4185 l2cap_chan_set_err(chan, ECONNRESET);
4186
4187 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
4188 l2cap_send_disconn_req(chan, ECONNRESET);
4189 goto done;
4190 }
4191
4192 if (flags & L2CAP_CONF_FLAG_CONTINUATION)
4193 goto done;
4194
4195 set_bit(CONF_INPUT_DONE, &chan->conf_state);
4196
4197 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
4198 set_default_fcs(chan);
4199
4200 if (chan->mode == L2CAP_MODE_ERTM ||
4201 chan->mode == L2CAP_MODE_STREAMING)
4202 err = l2cap_ertm_init(chan);
4203
4204 if (err < 0)
4205 l2cap_send_disconn_req(chan, -err);
4206 else
4207 l2cap_chan_ready(chan);
4208 }
4209
4210done:
4211 l2cap_chan_unlock(chan);
4212 return err;
4213}
4214
4215static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
4216 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4217 u8 *data)
4218{
4219 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
4220 struct l2cap_disconn_rsp rsp;
4221 u16 dcid, scid;
4222 struct l2cap_chan *chan;
4223
4224 if (cmd_len != sizeof(*req))
4225 return -EPROTO;
4226
4227 scid = __le16_to_cpu(req->scid);
4228 dcid = __le16_to_cpu(req->dcid);
4229
4230 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
4231
4232 mutex_lock(&conn->chan_lock);
4233
4234 chan = __l2cap_get_chan_by_scid(conn, dcid);
4235 if (!chan) {
4236 mutex_unlock(&conn->chan_lock);
4237 cmd_reject_invalid_cid(conn, cmd->ident, dcid, scid);
4238 return 0;
4239 }
4240
4241 l2cap_chan_lock(chan);
4242
4243 rsp.dcid = cpu_to_le16(chan->scid);
4244 rsp.scid = cpu_to_le16(chan->dcid);
4245 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
4246
4247 chan->ops->set_shutdown(chan);
4248
4249 l2cap_chan_hold(chan);
4250 l2cap_chan_del(chan, ECONNRESET);
4251
4252 l2cap_chan_unlock(chan);
4253
4254 chan->ops->close(chan);
4255 l2cap_chan_put(chan);
4256
4257 mutex_unlock(&conn->chan_lock);
4258
4259 return 0;
4260}
4261
4262static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
4263 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4264 u8 *data)
4265{
4266 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
4267 u16 dcid, scid;
4268 struct l2cap_chan *chan;
4269
4270 if (cmd_len != sizeof(*rsp))
4271 return -EPROTO;
4272
4273 scid = __le16_to_cpu(rsp->scid);
4274 dcid = __le16_to_cpu(rsp->dcid);
4275
4276 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
4277
4278 mutex_lock(&conn->chan_lock);
4279
4280 chan = __l2cap_get_chan_by_scid(conn, scid);
4281 if (!chan) {
4282 mutex_unlock(&conn->chan_lock);
4283 return 0;
4284 }
4285
4286 l2cap_chan_lock(chan);
4287
4288 l2cap_chan_hold(chan);
4289 l2cap_chan_del(chan, 0);
4290
4291 l2cap_chan_unlock(chan);
4292
4293 chan->ops->close(chan);
4294 l2cap_chan_put(chan);
4295
4296 mutex_unlock(&conn->chan_lock);
4297
4298 return 0;
4299}
4300
4301static inline int l2cap_information_req(struct l2cap_conn *conn,
4302 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4303 u8 *data)
4304{
4305 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
4306 u16 type;
4307
4308 if (cmd_len != sizeof(*req))
4309 return -EPROTO;
4310
4311 type = __le16_to_cpu(req->type);
4312
4313 BT_DBG("type 0x%4.4x", type);
4314
4315 if (type == L2CAP_IT_FEAT_MASK) {
4316 u8 buf[8];
4317 u32 feat_mask = l2cap_feat_mask;
4318 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4319 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
4320 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4321 if (!disable_ertm)
4322 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
4323 | L2CAP_FEAT_FCS;
4324 if (conn->hs_enabled)
4325 feat_mask |= L2CAP_FEAT_EXT_FLOW
4326 | L2CAP_FEAT_EXT_WINDOW;
4327
4328 put_unaligned_le32(feat_mask, rsp->data);
4329 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4330 buf);
4331 } else if (type == L2CAP_IT_FIXED_CHAN) {
4332 u8 buf[12];
4333 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4334
4335 if (conn->hs_enabled)
4336 l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
4337 else
4338 l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
4339
4340 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4341 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4342 memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
4343 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4344 buf);
4345 } else {
4346 struct l2cap_info_rsp rsp;
4347 rsp.type = cpu_to_le16(type);
4348 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
4349 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp),
4350 &rsp);
4351 }
4352
4353 return 0;
4354}
4355
4356static inline int l2cap_information_rsp(struct l2cap_conn *conn,
4357 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4358 u8 *data)
4359{
4360 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
4361 u16 type, result;
4362
4363 if (cmd_len < sizeof(*rsp))
4364 return -EPROTO;
4365
4366 type = __le16_to_cpu(rsp->type);
4367 result = __le16_to_cpu(rsp->result);
4368
4369 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
4370
4371 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
4372 if (cmd->ident != conn->info_ident ||
4373 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
4374 return 0;
4375
4376 cancel_delayed_work(&conn->info_timer);
4377
4378 if (result != L2CAP_IR_SUCCESS) {
4379 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4380 conn->info_ident = 0;
4381
4382 l2cap_conn_start(conn);
4383
4384 return 0;
4385 }
4386
4387 switch (type) {
4388 case L2CAP_IT_FEAT_MASK:
4389 conn->feat_mask = get_unaligned_le32(rsp->data);
4390
4391 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
4392 struct l2cap_info_req req;
4393 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4394
4395 conn->info_ident = l2cap_get_ident(conn);
4396
4397 l2cap_send_cmd(conn, conn->info_ident,
4398 L2CAP_INFO_REQ, sizeof(req), &req);
4399 } else {
4400 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4401 conn->info_ident = 0;
4402
4403 l2cap_conn_start(conn);
4404 }
4405 break;
4406
4407 case L2CAP_IT_FIXED_CHAN:
4408 conn->fixed_chan_mask = rsp->data[0];
4409 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4410 conn->info_ident = 0;
4411
4412 l2cap_conn_start(conn);
4413 break;
4414 }
4415
4416 return 0;
4417}
4418
4419static int l2cap_create_channel_req(struct l2cap_conn *conn,
4420 struct l2cap_cmd_hdr *cmd,
4421 u16 cmd_len, void *data)
4422{
4423 struct l2cap_create_chan_req *req = data;
4424 struct l2cap_create_chan_rsp rsp;
4425 struct l2cap_chan *chan;
4426 struct hci_dev *hdev;
4427 u16 psm, scid;
4428
4429 if (cmd_len != sizeof(*req))
4430 return -EPROTO;
4431
4432 if (!conn->hs_enabled)
4433 return -EINVAL;
4434
4435 psm = le16_to_cpu(req->psm);
4436 scid = le16_to_cpu(req->scid);
4437
4438 BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id);
4439
4440 /* For controller id 0 make BR/EDR connection */
4441 if (req->amp_id == AMP_ID_BREDR) {
4442 l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4443 req->amp_id);
4444 return 0;
4445 }
4446
4447 /* Validate AMP controller id */
4448 hdev = hci_dev_get(req->amp_id);
4449 if (!hdev)
4450 goto error;
4451
4452 if (hdev->dev_type != HCI_AMP || !test_bit(HCI_UP, &hdev->flags)) {
4453 hci_dev_put(hdev);
4454 goto error;
4455 }
4456
4457 chan = l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4458 req->amp_id);
4459 if (chan) {
4460 struct amp_mgr *mgr = conn->hcon->amp_mgr;
4461 struct hci_conn *hs_hcon;
4462
4463 hs_hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK,
4464 &conn->hcon->dst);
4465 if (!hs_hcon) {
4466 hci_dev_put(hdev);
4467 cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4468 chan->dcid);
4469 return 0;
4470 }
4471
4472 BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr, chan, hs_hcon);
4473
4474 mgr->bredr_chan = chan;
4475 chan->hs_hcon = hs_hcon;
4476 chan->fcs = L2CAP_FCS_NONE;
4477 conn->mtu = hdev->block_mtu;
4478 }
4479
4480 hci_dev_put(hdev);
4481
4482 return 0;
4483
4484error:
4485 rsp.dcid = 0;
4486 rsp.scid = cpu_to_le16(scid);
4487 rsp.result = cpu_to_le16(L2CAP_CR_BAD_AMP);
4488 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4489
4490 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
4491 sizeof(rsp), &rsp);
4492
4493 return 0;
4494}
4495
4496static void l2cap_send_move_chan_req(struct l2cap_chan *chan, u8 dest_amp_id)
4497{
4498 struct l2cap_move_chan_req req;
4499 u8 ident;
4500
4501 BT_DBG("chan %p, dest_amp_id %d", chan, dest_amp_id);
4502
4503 ident = l2cap_get_ident(chan->conn);
4504 chan->ident = ident;
4505
4506 req.icid = cpu_to_le16(chan->scid);
4507 req.dest_amp_id = dest_amp_id;
4508
4509 l2cap_send_cmd(chan->conn, ident, L2CAP_MOVE_CHAN_REQ, sizeof(req),
4510 &req);
4511
4512 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4513}
4514
4515static void l2cap_send_move_chan_rsp(struct l2cap_chan *chan, u16 result)
4516{
4517 struct l2cap_move_chan_rsp rsp;
4518
4519 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4520
4521 rsp.icid = cpu_to_le16(chan->dcid);
4522 rsp.result = cpu_to_le16(result);
4523
4524 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_RSP,
4525 sizeof(rsp), &rsp);
4526}
4527
4528static void l2cap_send_move_chan_cfm(struct l2cap_chan *chan, u16 result)
4529{
4530 struct l2cap_move_chan_cfm cfm;
4531
4532 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4533
4534 chan->ident = l2cap_get_ident(chan->conn);
4535
4536 cfm.icid = cpu_to_le16(chan->scid);
4537 cfm.result = cpu_to_le16(result);
4538
4539 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_CFM,
4540 sizeof(cfm), &cfm);
4541
4542 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4543}
4544
4545static void l2cap_send_move_chan_cfm_icid(struct l2cap_conn *conn, u16 icid)
4546{
4547 struct l2cap_move_chan_cfm cfm;
4548
4549 BT_DBG("conn %p, icid 0x%4.4x", conn, icid);
4550
4551 cfm.icid = cpu_to_le16(icid);
4552 cfm.result = cpu_to_le16(L2CAP_MC_UNCONFIRMED);
4553
4554 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_MOVE_CHAN_CFM,
4555 sizeof(cfm), &cfm);
4556}
4557
4558static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
4559 u16 icid)
4560{
4561 struct l2cap_move_chan_cfm_rsp rsp;
4562
4563 BT_DBG("icid 0x%4.4x", icid);
4564
4565 rsp.icid = cpu_to_le16(icid);
4566 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4567}
4568
4569static void __release_logical_link(struct l2cap_chan *chan)
4570{
4571 chan->hs_hchan = NULL;
4572 chan->hs_hcon = NULL;
4573
4574 /* Placeholder - release the logical link */
4575}
4576
4577static void l2cap_logical_fail(struct l2cap_chan *chan)
4578{
4579 /* Logical link setup failed */
4580 if (chan->state != BT_CONNECTED) {
4581 /* Create channel failure, disconnect */
4582 l2cap_send_disconn_req(chan, ECONNRESET);
4583 return;
4584 }
4585
4586 switch (chan->move_role) {
4587 case L2CAP_MOVE_ROLE_RESPONDER:
4588 l2cap_move_done(chan);
4589 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_SUPP);
4590 break;
4591 case L2CAP_MOVE_ROLE_INITIATOR:
4592 if (chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_COMP ||
4593 chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_CFM) {
4594 /* Remote has only sent pending or
4595 * success responses, clean up
4596 */
4597 l2cap_move_done(chan);
4598 }
4599
4600 /* Other amp move states imply that the move
4601 * has already aborted
4602 */
4603 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4604 break;
4605 }
4606}
4607
4608static void l2cap_logical_finish_create(struct l2cap_chan *chan,
4609 struct hci_chan *hchan)
4610{
4611 struct l2cap_conf_rsp rsp;
4612
4613 chan->hs_hchan = hchan;
4614 chan->hs_hcon->l2cap_data = chan->conn;
4615
4616 l2cap_send_efs_conf_rsp(chan, &rsp, chan->ident, 0);
4617
4618 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4619 int err;
4620
4621 set_default_fcs(chan);
4622
4623 err = l2cap_ertm_init(chan);
4624 if (err < 0)
4625 l2cap_send_disconn_req(chan, -err);
4626 else
4627 l2cap_chan_ready(chan);
4628 }
4629}
4630
4631static void l2cap_logical_finish_move(struct l2cap_chan *chan,
4632 struct hci_chan *hchan)
4633{
4634 chan->hs_hcon = hchan->conn;
4635 chan->hs_hcon->l2cap_data = chan->conn;
4636
4637 BT_DBG("move_state %d", chan->move_state);
4638
4639 switch (chan->move_state) {
4640 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
4641 /* Move confirm will be sent after a success
4642 * response is received
4643 */
4644 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4645 break;
4646 case L2CAP_MOVE_WAIT_LOGICAL_CFM:
4647 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4648 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4649 } else if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
4650 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
4651 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4652 } else if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4653 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4654 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4655 }
4656 break;
4657 default:
4658 /* Move was not in expected state, free the channel */
4659 __release_logical_link(chan);
4660
4661 chan->move_state = L2CAP_MOVE_STABLE;
4662 }
4663}
4664
4665/* Call with chan locked */
4666void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan,
4667 u8 status)
4668{
4669 BT_DBG("chan %p, hchan %p, status %d", chan, hchan, status);
4670
4671 if (status) {
4672 l2cap_logical_fail(chan);
4673 __release_logical_link(chan);
4674 return;
4675 }
4676
4677 if (chan->state != BT_CONNECTED) {
4678 /* Ignore logical link if channel is on BR/EDR */
4679 if (chan->local_amp_id != AMP_ID_BREDR)
4680 l2cap_logical_finish_create(chan, hchan);
4681 } else {
4682 l2cap_logical_finish_move(chan, hchan);
4683 }
4684}
4685
4686void l2cap_move_start(struct l2cap_chan *chan)
4687{
4688 BT_DBG("chan %p", chan);
4689
4690 if (chan->local_amp_id == AMP_ID_BREDR) {
4691 if (chan->chan_policy != BT_CHANNEL_POLICY_AMP_PREFERRED)
4692 return;
4693 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4694 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4695 /* Placeholder - start physical link setup */
4696 } else {
4697 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4698 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4699 chan->move_id = 0;
4700 l2cap_move_setup(chan);
4701 l2cap_send_move_chan_req(chan, 0);
4702 }
4703}
4704
4705static void l2cap_do_create(struct l2cap_chan *chan, int result,
4706 u8 local_amp_id, u8 remote_amp_id)
4707{
4708 BT_DBG("chan %p state %s %u -> %u", chan, state_to_string(chan->state),
4709 local_amp_id, remote_amp_id);
4710
4711 chan->fcs = L2CAP_FCS_NONE;
4712
4713 /* Outgoing channel on AMP */
4714 if (chan->state == BT_CONNECT) {
4715 if (result == L2CAP_CR_SUCCESS) {
4716 chan->local_amp_id = local_amp_id;
4717 l2cap_send_create_chan_req(chan, remote_amp_id);
4718 } else {
4719 /* Revert to BR/EDR connect */
4720 l2cap_send_conn_req(chan);
4721 }
4722
4723 return;
4724 }
4725
4726 /* Incoming channel on AMP */
4727 if (__l2cap_no_conn_pending(chan)) {
4728 struct l2cap_conn_rsp rsp;
4729 char buf[128];
4730 rsp.scid = cpu_to_le16(chan->dcid);
4731 rsp.dcid = cpu_to_le16(chan->scid);
4732
4733 if (result == L2CAP_CR_SUCCESS) {
4734 /* Send successful response */
4735 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
4736 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4737 } else {
4738 /* Send negative response */
4739 rsp.result = cpu_to_le16(L2CAP_CR_NO_MEM);
4740 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4741 }
4742
4743 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_RSP,
4744 sizeof(rsp), &rsp);
4745
4746 if (result == L2CAP_CR_SUCCESS) {
4747 l2cap_state_change(chan, BT_CONFIG);
4748 set_bit(CONF_REQ_SENT, &chan->conf_state);
4749 l2cap_send_cmd(chan->conn, l2cap_get_ident(chan->conn),
4750 L2CAP_CONF_REQ,
4751 l2cap_build_conf_req(chan, buf), buf);
4752 chan->num_conf_req++;
4753 }
4754 }
4755}
4756
4757static void l2cap_do_move_initiate(struct l2cap_chan *chan, u8 local_amp_id,
4758 u8 remote_amp_id)
4759{
4760 l2cap_move_setup(chan);
4761 chan->move_id = local_amp_id;
4762 chan->move_state = L2CAP_MOVE_WAIT_RSP;
4763
4764 l2cap_send_move_chan_req(chan, remote_amp_id);
4765}
4766
4767static void l2cap_do_move_respond(struct l2cap_chan *chan, int result)
4768{
4769 struct hci_chan *hchan = NULL;
4770
4771 /* Placeholder - get hci_chan for logical link */
4772
4773 if (hchan) {
4774 if (hchan->state == BT_CONNECTED) {
4775 /* Logical link is ready to go */
4776 chan->hs_hcon = hchan->conn;
4777 chan->hs_hcon->l2cap_data = chan->conn;
4778 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4779 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4780
4781 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
4782 } else {
4783 /* Wait for logical link to be ready */
4784 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4785 }
4786 } else {
4787 /* Logical link not available */
4788 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_ALLOWED);
4789 }
4790}
4791
4792static void l2cap_do_move_cancel(struct l2cap_chan *chan, int result)
4793{
4794 if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4795 u8 rsp_result;
4796 if (result == -EINVAL)
4797 rsp_result = L2CAP_MR_BAD_ID;
4798 else
4799 rsp_result = L2CAP_MR_NOT_ALLOWED;
4800
4801 l2cap_send_move_chan_rsp(chan, rsp_result);
4802 }
4803
4804 chan->move_role = L2CAP_MOVE_ROLE_NONE;
4805 chan->move_state = L2CAP_MOVE_STABLE;
4806
4807 /* Restart data transmission */
4808 l2cap_ertm_send(chan);
4809}
4810
4811/* Invoke with locked chan */
4812void __l2cap_physical_cfm(struct l2cap_chan *chan, int result)
4813{
4814 u8 local_amp_id = chan->local_amp_id;
4815 u8 remote_amp_id = chan->remote_amp_id;
4816
4817 BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d",
4818 chan, result, local_amp_id, remote_amp_id);
4819
4820 if (chan->state == BT_DISCONN || chan->state == BT_CLOSED) {
4821 l2cap_chan_unlock(chan);
4822 return;
4823 }
4824
4825 if (chan->state != BT_CONNECTED) {
4826 l2cap_do_create(chan, result, local_amp_id, remote_amp_id);
4827 } else if (result != L2CAP_MR_SUCCESS) {
4828 l2cap_do_move_cancel(chan, result);
4829 } else {
4830 switch (chan->move_role) {
4831 case L2CAP_MOVE_ROLE_INITIATOR:
4832 l2cap_do_move_initiate(chan, local_amp_id,
4833 remote_amp_id);
4834 break;
4835 case L2CAP_MOVE_ROLE_RESPONDER:
4836 l2cap_do_move_respond(chan, result);
4837 break;
4838 default:
4839 l2cap_do_move_cancel(chan, result);
4840 break;
4841 }
4842 }
4843}
4844
4845static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
4846 struct l2cap_cmd_hdr *cmd,
4847 u16 cmd_len, void *data)
4848{
4849 struct l2cap_move_chan_req *req = data;
4850 struct l2cap_move_chan_rsp rsp;
4851 struct l2cap_chan *chan;
4852 u16 icid = 0;
4853 u16 result = L2CAP_MR_NOT_ALLOWED;
4854
4855 if (cmd_len != sizeof(*req))
4856 return -EPROTO;
4857
4858 icid = le16_to_cpu(req->icid);
4859
4860 BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id);
4861
4862 if (!conn->hs_enabled)
4863 return -EINVAL;
4864
4865 chan = l2cap_get_chan_by_dcid(conn, icid);
4866 if (!chan) {
4867 rsp.icid = cpu_to_le16(icid);
4868 rsp.result = cpu_to_le16(L2CAP_MR_NOT_ALLOWED);
4869 l2cap_send_cmd(conn, cmd->ident, L2CAP_MOVE_CHAN_RSP,
4870 sizeof(rsp), &rsp);
4871 return 0;
4872 }
4873
4874 chan->ident = cmd->ident;
4875
4876 if (chan->scid < L2CAP_CID_DYN_START ||
4877 chan->chan_policy == BT_CHANNEL_POLICY_BREDR_ONLY ||
4878 (chan->mode != L2CAP_MODE_ERTM &&
4879 chan->mode != L2CAP_MODE_STREAMING)) {
4880 result = L2CAP_MR_NOT_ALLOWED;
4881 goto send_move_response;
4882 }
4883
4884 if (chan->local_amp_id == req->dest_amp_id) {
4885 result = L2CAP_MR_SAME_ID;
4886 goto send_move_response;
4887 }
4888
4889 if (req->dest_amp_id != AMP_ID_BREDR) {
4890 struct hci_dev *hdev;
4891 hdev = hci_dev_get(req->dest_amp_id);
4892 if (!hdev || hdev->dev_type != HCI_AMP ||
4893 !test_bit(HCI_UP, &hdev->flags)) {
4894 if (hdev)
4895 hci_dev_put(hdev);
4896
4897 result = L2CAP_MR_BAD_ID;
4898 goto send_move_response;
4899 }
4900 hci_dev_put(hdev);
4901 }
4902
4903 /* Detect a move collision. Only send a collision response
4904 * if this side has "lost", otherwise proceed with the move.
4905 * The winner has the larger bd_addr.
4906 */
4907 if ((__chan_is_moving(chan) ||
4908 chan->move_role != L2CAP_MOVE_ROLE_NONE) &&
4909 bacmp(&conn->hcon->src, &conn->hcon->dst) > 0) {
4910 result = L2CAP_MR_COLLISION;
4911 goto send_move_response;
4912 }
4913
4914 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
4915 l2cap_move_setup(chan);
4916 chan->move_id = req->dest_amp_id;
4917 icid = chan->dcid;
4918
4919 if (req->dest_amp_id == AMP_ID_BREDR) {
4920 /* Moving to BR/EDR */
4921 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4922 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4923 result = L2CAP_MR_PEND;
4924 } else {
4925 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4926 result = L2CAP_MR_SUCCESS;
4927 }
4928 } else {
4929 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4930 /* Placeholder - uncomment when amp functions are available */
4931 /*amp_accept_physical(chan, req->dest_amp_id);*/
4932 result = L2CAP_MR_PEND;
4933 }
4934
4935send_move_response:
4936 l2cap_send_move_chan_rsp(chan, result);
4937
4938 l2cap_chan_unlock(chan);
4939
4940 return 0;
4941}
4942
4943static void l2cap_move_continue(struct l2cap_conn *conn, u16 icid, u16 result)
4944{
4945 struct l2cap_chan *chan;
4946 struct hci_chan *hchan = NULL;
4947
4948 chan = l2cap_get_chan_by_scid(conn, icid);
4949 if (!chan) {
4950 l2cap_send_move_chan_cfm_icid(conn, icid);
4951 return;
4952 }
4953
4954 __clear_chan_timer(chan);
4955 if (result == L2CAP_MR_PEND)
4956 __set_chan_timer(chan, L2CAP_MOVE_ERTX_TIMEOUT);
4957
4958 switch (chan->move_state) {
4959 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
4960 /* Move confirm will be sent when logical link
4961 * is complete.
4962 */
4963 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4964 break;
4965 case L2CAP_MOVE_WAIT_RSP_SUCCESS:
4966 if (result == L2CAP_MR_PEND) {
4967 break;
4968 } else if (test_bit(CONN_LOCAL_BUSY,
4969 &chan->conn_state)) {
4970 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4971 } else {
4972 /* Logical link is up or moving to BR/EDR,
4973 * proceed with move
4974 */
4975 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
4976 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4977 }
4978 break;
4979 case L2CAP_MOVE_WAIT_RSP:
4980 /* Moving to AMP */
4981 if (result == L2CAP_MR_SUCCESS) {
4982 /* Remote is ready, send confirm immediately
4983 * after logical link is ready
4984 */
4985 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4986 } else {
4987 /* Both logical link and move success
4988 * are required to confirm
4989 */
4990 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_COMP;
4991 }
4992
4993 /* Placeholder - get hci_chan for logical link */
4994 if (!hchan) {
4995 /* Logical link not available */
4996 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4997 break;
4998 }
4999
5000 /* If the logical link is not yet connected, do not
5001 * send confirmation.
5002 */
5003 if (hchan->state != BT_CONNECTED)
5004 break;
5005
5006 /* Logical link is already ready to go */
5007
5008 chan->hs_hcon = hchan->conn;
5009 chan->hs_hcon->l2cap_data = chan->conn;
5010
5011 if (result == L2CAP_MR_SUCCESS) {
5012 /* Can confirm now */
5013 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5014 } else {
5015 /* Now only need move success
5016 * to confirm
5017 */
5018 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
5019 }
5020
5021 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
5022 break;
5023 default:
5024 /* Any other amp move state means the move failed. */
5025 chan->move_id = chan->local_amp_id;
5026 l2cap_move_done(chan);
5027 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5028 }
5029
5030 l2cap_chan_unlock(chan);
5031}
5032
5033static void l2cap_move_fail(struct l2cap_conn *conn, u8 ident, u16 icid,
5034 u16 result)
5035{
5036 struct l2cap_chan *chan;
5037
5038 chan = l2cap_get_chan_by_ident(conn, ident);
5039 if (!chan) {
5040 /* Could not locate channel, icid is best guess */
5041 l2cap_send_move_chan_cfm_icid(conn, icid);
5042 return;
5043 }
5044
5045 __clear_chan_timer(chan);
5046
5047 if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
5048 if (result == L2CAP_MR_COLLISION) {
5049 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5050 } else {
5051 /* Cleanup - cancel move */
5052 chan->move_id = chan->local_amp_id;
5053 l2cap_move_done(chan);
5054 }
5055 }
5056
5057 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5058
5059 l2cap_chan_unlock(chan);
5060}
5061
5062static int l2cap_move_channel_rsp(struct l2cap_conn *conn,
5063 struct l2cap_cmd_hdr *cmd,
5064 u16 cmd_len, void *data)
5065{
5066 struct l2cap_move_chan_rsp *rsp = data;
5067 u16 icid, result;
5068
5069 if (cmd_len != sizeof(*rsp))
5070 return -EPROTO;
5071
5072 icid = le16_to_cpu(rsp->icid);
5073 result = le16_to_cpu(rsp->result);
5074
5075 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5076
5077 if (result == L2CAP_MR_SUCCESS || result == L2CAP_MR_PEND)
5078 l2cap_move_continue(conn, icid, result);
5079 else
5080 l2cap_move_fail(conn, cmd->ident, icid, result);
5081
5082 return 0;
5083}
5084
5085static int l2cap_move_channel_confirm(struct l2cap_conn *conn,
5086 struct l2cap_cmd_hdr *cmd,
5087 u16 cmd_len, void *data)
5088{
5089 struct l2cap_move_chan_cfm *cfm = data;
5090 struct l2cap_chan *chan;
5091 u16 icid, result;
5092
5093 if (cmd_len != sizeof(*cfm))
5094 return -EPROTO;
5095
5096 icid = le16_to_cpu(cfm->icid);
5097 result = le16_to_cpu(cfm->result);
5098
5099 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5100
5101 chan = l2cap_get_chan_by_dcid(conn, icid);
5102 if (!chan) {
5103 /* Spec requires a response even if the icid was not found */
5104 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5105 return 0;
5106 }
5107
5108 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM) {
5109 if (result == L2CAP_MC_CONFIRMED) {
5110 chan->local_amp_id = chan->move_id;
5111 if (chan->local_amp_id == AMP_ID_BREDR)
5112 __release_logical_link(chan);
5113 } else {
5114 chan->move_id = chan->local_amp_id;
5115 }
5116
5117 l2cap_move_done(chan);
5118 }
5119
5120 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5121
5122 l2cap_chan_unlock(chan);
5123
5124 return 0;
5125}
5126
5127static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
5128 struct l2cap_cmd_hdr *cmd,
5129 u16 cmd_len, void *data)
5130{
5131 struct l2cap_move_chan_cfm_rsp *rsp = data;
5132 struct l2cap_chan *chan;
5133 u16 icid;
5134
5135 if (cmd_len != sizeof(*rsp))
5136 return -EPROTO;
5137
5138 icid = le16_to_cpu(rsp->icid);
5139
5140 BT_DBG("icid 0x%4.4x", icid);
5141
5142 chan = l2cap_get_chan_by_scid(conn, icid);
5143 if (!chan)
5144 return 0;
5145
5146 __clear_chan_timer(chan);
5147
5148 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM_RSP) {
5149 chan->local_amp_id = chan->move_id;
5150
5151 if (chan->local_amp_id == AMP_ID_BREDR && chan->hs_hchan)
5152 __release_logical_link(chan);
5153
5154 l2cap_move_done(chan);
5155 }
5156
5157 l2cap_chan_unlock(chan);
5158
5159 return 0;
5160}
5161
5162static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
5163 struct l2cap_cmd_hdr *cmd,
5164 u16 cmd_len, u8 *data)
5165{
5166 struct hci_conn *hcon = conn->hcon;
5167 struct l2cap_conn_param_update_req *req;
5168 struct l2cap_conn_param_update_rsp rsp;
5169 u16 min, max, latency, to_multiplier;
5170 int err;
5171
5172 if (hcon->role != HCI_ROLE_MASTER)
5173 return -EINVAL;
5174
5175 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
5176 return -EPROTO;
5177
5178 req = (struct l2cap_conn_param_update_req *) data;
5179 min = __le16_to_cpu(req->min);
5180 max = __le16_to_cpu(req->max);
5181 latency = __le16_to_cpu(req->latency);
5182 to_multiplier = __le16_to_cpu(req->to_multiplier);
5183
5184 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
5185 min, max, latency, to_multiplier);
5186
5187 memset(&rsp, 0, sizeof(rsp));
5188
5189 err = hci_check_conn_params(min, max, latency, to_multiplier);
5190 if (err)
5191 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
5192 else
5193 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
5194
5195 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
5196 sizeof(rsp), &rsp);
5197
5198 if (!err) {
5199 u8 store_hint;
5200
5201 store_hint = hci_le_conn_update(hcon, min, max, latency,
5202 to_multiplier);
5203 mgmt_new_conn_param(hcon->hdev, &hcon->dst, hcon->dst_type,
5204 store_hint, min, max, latency,
5205 to_multiplier);
5206
5207 }
5208
5209 return 0;
5210}
5211
5212static int l2cap_le_connect_rsp(struct l2cap_conn *conn,
5213 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5214 u8 *data)
5215{
5216 struct l2cap_le_conn_rsp *rsp = (struct l2cap_le_conn_rsp *) data;
5217 u16 dcid, mtu, mps, credits, result;
5218 struct l2cap_chan *chan;
5219 int err;
5220
5221 if (cmd_len < sizeof(*rsp))
5222 return -EPROTO;
5223
5224 dcid = __le16_to_cpu(rsp->dcid);
5225 mtu = __le16_to_cpu(rsp->mtu);
5226 mps = __le16_to_cpu(rsp->mps);
5227 credits = __le16_to_cpu(rsp->credits);
5228 result = __le16_to_cpu(rsp->result);
5229
5230 if (result == L2CAP_CR_SUCCESS && (mtu < 23 || mps < 23))
5231 return -EPROTO;
5232
5233 BT_DBG("dcid 0x%4.4x mtu %u mps %u credits %u result 0x%2.2x",
5234 dcid, mtu, mps, credits, result);
5235
5236 mutex_lock(&conn->chan_lock);
5237
5238 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5239 if (!chan) {
5240 err = -EBADSLT;
5241 goto unlock;
5242 }
5243
5244 err = 0;
5245
5246 l2cap_chan_lock(chan);
5247
5248 switch (result) {
5249 case L2CAP_CR_SUCCESS:
5250 chan->ident = 0;
5251 chan->dcid = dcid;
5252 chan->omtu = mtu;
5253 chan->remote_mps = mps;
5254 chan->tx_credits = credits;
5255 l2cap_chan_ready(chan);
5256 break;
5257
5258 default:
5259 l2cap_chan_del(chan, ECONNREFUSED);
5260 break;
5261 }
5262
5263 l2cap_chan_unlock(chan);
5264
5265unlock:
5266 mutex_unlock(&conn->chan_lock);
5267
5268 return err;
5269}
5270
5271static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
5272 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5273 u8 *data)
5274{
5275 int err = 0;
5276
5277 switch (cmd->code) {
5278 case L2CAP_COMMAND_REJ:
5279 l2cap_command_rej(conn, cmd, cmd_len, data);
5280 break;
5281
5282 case L2CAP_CONN_REQ:
5283 err = l2cap_connect_req(conn, cmd, cmd_len, data);
5284 break;
5285
5286 case L2CAP_CONN_RSP:
5287 case L2CAP_CREATE_CHAN_RSP:
5288 l2cap_connect_create_rsp(conn, cmd, cmd_len, data);
5289 break;
5290
5291 case L2CAP_CONF_REQ:
5292 err = l2cap_config_req(conn, cmd, cmd_len, data);
5293 break;
5294
5295 case L2CAP_CONF_RSP:
5296 l2cap_config_rsp(conn, cmd, cmd_len, data);
5297 break;
5298
5299 case L2CAP_DISCONN_REQ:
5300 err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5301 break;
5302
5303 case L2CAP_DISCONN_RSP:
5304 l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5305 break;
5306
5307 case L2CAP_ECHO_REQ:
5308 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
5309 break;
5310
5311 case L2CAP_ECHO_RSP:
5312 break;
5313
5314 case L2CAP_INFO_REQ:
5315 err = l2cap_information_req(conn, cmd, cmd_len, data);
5316 break;
5317
5318 case L2CAP_INFO_RSP:
5319 l2cap_information_rsp(conn, cmd, cmd_len, data);
5320 break;
5321
5322 case L2CAP_CREATE_CHAN_REQ:
5323 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
5324 break;
5325
5326 case L2CAP_MOVE_CHAN_REQ:
5327 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
5328 break;
5329
5330 case L2CAP_MOVE_CHAN_RSP:
5331 l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
5332 break;
5333
5334 case L2CAP_MOVE_CHAN_CFM:
5335 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
5336 break;
5337
5338 case L2CAP_MOVE_CHAN_CFM_RSP:
5339 l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
5340 break;
5341
5342 default:
5343 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
5344 err = -EINVAL;
5345 break;
5346 }
5347
5348 return err;
5349}
5350
5351static int l2cap_le_connect_req(struct l2cap_conn *conn,
5352 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5353 u8 *data)
5354{
5355 struct l2cap_le_conn_req *req = (struct l2cap_le_conn_req *) data;
5356 struct l2cap_le_conn_rsp rsp;
5357 struct l2cap_chan *chan, *pchan;
5358 u16 dcid, scid, credits, mtu, mps;
5359 __le16 psm;
5360 u8 result;
5361
5362 if (cmd_len != sizeof(*req))
5363 return -EPROTO;
5364
5365 scid = __le16_to_cpu(req->scid);
5366 mtu = __le16_to_cpu(req->mtu);
5367 mps = __le16_to_cpu(req->mps);
5368 psm = req->psm;
5369 dcid = 0;
5370 credits = 0;
5371
5372 if (mtu < 23 || mps < 23)
5373 return -EPROTO;
5374
5375 BT_DBG("psm 0x%2.2x scid 0x%4.4x mtu %u mps %u", __le16_to_cpu(psm),
5376 scid, mtu, mps);
5377
5378 /* Check if we have socket listening on psm */
5379 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
5380 &conn->hcon->dst, LE_LINK);
5381 if (!pchan) {
5382 result = L2CAP_CR_BAD_PSM;
5383 chan = NULL;
5384 goto response;
5385 }
5386
5387 mutex_lock(&conn->chan_lock);
5388 l2cap_chan_lock(pchan);
5389
5390 if (!smp_sufficient_security(conn->hcon, pchan->sec_level)) {
5391 result = L2CAP_CR_AUTHENTICATION;
5392 chan = NULL;
5393 goto response_unlock;
5394 }
5395
5396 /* Check if we already have channel with that dcid */
5397 if (__l2cap_get_chan_by_dcid(conn, scid)) {
5398 result = L2CAP_CR_NO_MEM;
5399 chan = NULL;
5400 goto response_unlock;
5401 }
5402
5403 chan = pchan->ops->new_connection(pchan);
5404 if (!chan) {
5405 result = L2CAP_CR_NO_MEM;
5406 goto response_unlock;
5407 }
5408
5409 l2cap_le_flowctl_init(chan);
5410
5411 bacpy(&chan->src, &conn->hcon->src);
5412 bacpy(&chan->dst, &conn->hcon->dst);
5413 chan->src_type = bdaddr_type(conn->hcon, conn->hcon->src_type);
5414 chan->dst_type = bdaddr_type(conn->hcon, conn->hcon->dst_type);
5415 chan->psm = psm;
5416 chan->dcid = scid;
5417 chan->omtu = mtu;
5418 chan->remote_mps = mps;
5419 chan->tx_credits = __le16_to_cpu(req->credits);
5420
5421 __l2cap_chan_add(conn, chan);
5422 dcid = chan->scid;
5423 credits = chan->rx_credits;
5424
5425 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
5426
5427 chan->ident = cmd->ident;
5428
5429 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
5430 l2cap_state_change(chan, BT_CONNECT2);
5431 result = L2CAP_CR_PEND;
5432 chan->ops->defer(chan);
5433 } else {
5434 l2cap_chan_ready(chan);
5435 result = L2CAP_CR_SUCCESS;
5436 }
5437
5438response_unlock:
5439 l2cap_chan_unlock(pchan);
5440 mutex_unlock(&conn->chan_lock);
5441 l2cap_chan_put(pchan);
5442
5443 if (result == L2CAP_CR_PEND)
5444 return 0;
5445
5446response:
5447 if (chan) {
5448 rsp.mtu = cpu_to_le16(chan->imtu);
5449 rsp.mps = cpu_to_le16(chan->mps);
5450 } else {
5451 rsp.mtu = 0;
5452 rsp.mps = 0;
5453 }
5454
5455 rsp.dcid = cpu_to_le16(dcid);
5456 rsp.credits = cpu_to_le16(credits);
5457 rsp.result = cpu_to_le16(result);
5458
5459 l2cap_send_cmd(conn, cmd->ident, L2CAP_LE_CONN_RSP, sizeof(rsp), &rsp);
5460
5461 return 0;
5462}
5463
5464static inline int l2cap_le_credits(struct l2cap_conn *conn,
5465 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5466 u8 *data)
5467{
5468 struct l2cap_le_credits *pkt;
5469 struct l2cap_chan *chan;
5470 u16 cid, credits, max_credits;
5471
5472 if (cmd_len != sizeof(*pkt))
5473 return -EPROTO;
5474
5475 pkt = (struct l2cap_le_credits *) data;
5476 cid = __le16_to_cpu(pkt->cid);
5477 credits = __le16_to_cpu(pkt->credits);
5478
5479 BT_DBG("cid 0x%4.4x credits 0x%4.4x", cid, credits);
5480
5481 chan = l2cap_get_chan_by_dcid(conn, cid);
5482 if (!chan)
5483 return -EBADSLT;
5484
5485 max_credits = LE_FLOWCTL_MAX_CREDITS - chan->tx_credits;
5486 if (credits > max_credits) {
5487 BT_ERR("LE credits overflow");
5488 l2cap_send_disconn_req(chan, ECONNRESET);
5489
5490 /* Return 0 so that we don't trigger an unnecessary
5491 * command reject packet.
5492 */
5493 return 0;
5494 }
5495
5496 chan->tx_credits += credits;
5497
5498 while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
5499 l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
5500 chan->tx_credits--;
5501 }
5502
5503 if (chan->tx_credits)
5504 chan->ops->resume(chan);
5505
5506 l2cap_chan_unlock(chan);
5507
5508 return 0;
5509}
5510
5511static inline int l2cap_le_command_rej(struct l2cap_conn *conn,
5512 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5513 u8 *data)
5514{
5515 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
5516 struct l2cap_chan *chan;
5517
5518 if (cmd_len < sizeof(*rej))
5519 return -EPROTO;
5520
5521 mutex_lock(&conn->chan_lock);
5522
5523 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5524 if (!chan)
5525 goto done;
5526
5527 l2cap_chan_lock(chan);
5528 l2cap_chan_del(chan, ECONNREFUSED);
5529 l2cap_chan_unlock(chan);
5530
5531done:
5532 mutex_unlock(&conn->chan_lock);
5533 return 0;
5534}
5535
5536static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
5537 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5538 u8 *data)
5539{
5540 int err = 0;
5541
5542 switch (cmd->code) {
5543 case L2CAP_COMMAND_REJ:
5544 l2cap_le_command_rej(conn, cmd, cmd_len, data);
5545 break;
5546
5547 case L2CAP_CONN_PARAM_UPDATE_REQ:
5548 err = l2cap_conn_param_update_req(conn, cmd, cmd_len, data);
5549 break;
5550
5551 case L2CAP_CONN_PARAM_UPDATE_RSP:
5552 break;
5553
5554 case L2CAP_LE_CONN_RSP:
5555 l2cap_le_connect_rsp(conn, cmd, cmd_len, data);
5556 break;
5557
5558 case L2CAP_LE_CONN_REQ:
5559 err = l2cap_le_connect_req(conn, cmd, cmd_len, data);
5560 break;
5561
5562 case L2CAP_LE_CREDITS:
5563 err = l2cap_le_credits(conn, cmd, cmd_len, data);
5564 break;
5565
5566 case L2CAP_DISCONN_REQ:
5567 err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5568 break;
5569
5570 case L2CAP_DISCONN_RSP:
5571 l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5572 break;
5573
5574 default:
5575 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
5576 err = -EINVAL;
5577 break;
5578 }
5579
5580 return err;
5581}
5582
5583static inline void l2cap_le_sig_channel(struct l2cap_conn *conn,
5584 struct sk_buff *skb)
5585{
5586 struct hci_conn *hcon = conn->hcon;
5587 struct l2cap_cmd_hdr *cmd;
5588 u16 len;
5589 int err;
5590
5591 if (hcon->type != LE_LINK)
5592 goto drop;
5593
5594 if (skb->len < L2CAP_CMD_HDR_SIZE)
5595 goto drop;
5596
5597 cmd = (void *) skb->data;
5598 skb_pull(skb, L2CAP_CMD_HDR_SIZE);
5599
5600 len = le16_to_cpu(cmd->len);
5601
5602 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len, cmd->ident);
5603
5604 if (len != skb->len || !cmd->ident) {
5605 BT_DBG("corrupted command");
5606 goto drop;
5607 }
5608
5609 err = l2cap_le_sig_cmd(conn, cmd, len, skb->data);
5610 if (err) {
5611 struct l2cap_cmd_rej_unk rej;
5612
5613 BT_ERR("Wrong link type (%d)", err);
5614
5615 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5616 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
5617 sizeof(rej), &rej);
5618 }
5619
5620drop:
5621 kfree_skb(skb);
5622}
5623
5624static inline void l2cap_sig_channel(struct l2cap_conn *conn,
5625 struct sk_buff *skb)
5626{
5627 struct hci_conn *hcon = conn->hcon;
5628 u8 *data = skb->data;
5629 int len = skb->len;
5630 struct l2cap_cmd_hdr cmd;
5631 int err;
5632
5633 l2cap_raw_recv(conn, skb);
5634
5635 if (hcon->type != ACL_LINK)
5636 goto drop;
5637
5638 while (len >= L2CAP_CMD_HDR_SIZE) {
5639 u16 cmd_len;
5640 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
5641 data += L2CAP_CMD_HDR_SIZE;
5642 len -= L2CAP_CMD_HDR_SIZE;
5643
5644 cmd_len = le16_to_cpu(cmd.len);
5645
5646 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len,
5647 cmd.ident);
5648
5649 if (cmd_len > len || !cmd.ident) {
5650 BT_DBG("corrupted command");
5651 break;
5652 }
5653
5654 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
5655 if (err) {
5656 struct l2cap_cmd_rej_unk rej;
5657
5658 BT_ERR("Wrong link type (%d)", err);
5659
5660 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5661 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ,
5662 sizeof(rej), &rej);
5663 }
5664
5665 data += cmd_len;
5666 len -= cmd_len;
5667 }
5668
5669drop:
5670 kfree_skb(skb);
5671}
5672
5673static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
5674{
5675 u16 our_fcs, rcv_fcs;
5676 int hdr_size;
5677
5678 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
5679 hdr_size = L2CAP_EXT_HDR_SIZE;
5680 else
5681 hdr_size = L2CAP_ENH_HDR_SIZE;
5682
5683 if (chan->fcs == L2CAP_FCS_CRC16) {
5684 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
5685 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
5686 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
5687
5688 if (our_fcs != rcv_fcs)
5689 return -EBADMSG;
5690 }
5691 return 0;
5692}
5693
5694static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
5695{
5696 struct l2cap_ctrl control;
5697
5698 BT_DBG("chan %p", chan);
5699
5700 memset(&control, 0, sizeof(control));
5701 control.sframe = 1;
5702 control.final = 1;
5703 control.reqseq = chan->buffer_seq;
5704 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5705
5706 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5707 control.super = L2CAP_SUPER_RNR;
5708 l2cap_send_sframe(chan, &control);
5709 }
5710
5711 if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
5712 chan->unacked_frames > 0)
5713 __set_retrans_timer(chan);
5714
5715 /* Send pending iframes */
5716 l2cap_ertm_send(chan);
5717
5718 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
5719 test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
5720 /* F-bit wasn't sent in an s-frame or i-frame yet, so
5721 * send it now.
5722 */
5723 control.super = L2CAP_SUPER_RR;
5724 l2cap_send_sframe(chan, &control);
5725 }
5726}
5727
5728static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag,
5729 struct sk_buff **last_frag)
5730{
5731 /* skb->len reflects data in skb as well as all fragments
5732 * skb->data_len reflects only data in fragments
5733 */
5734 if (!skb_has_frag_list(skb))
5735 skb_shinfo(skb)->frag_list = new_frag;
5736
5737 new_frag->next = NULL;
5738
5739 (*last_frag)->next = new_frag;
5740 *last_frag = new_frag;
5741
5742 skb->len += new_frag->len;
5743 skb->data_len += new_frag->len;
5744 skb->truesize += new_frag->truesize;
5745}
5746
5747static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
5748 struct l2cap_ctrl *control)
5749{
5750 int err = -EINVAL;
5751
5752 switch (control->sar) {
5753 case L2CAP_SAR_UNSEGMENTED:
5754 if (chan->sdu)
5755 break;
5756
5757 err = chan->ops->recv(chan, skb);
5758 break;
5759
5760 case L2CAP_SAR_START:
5761 if (chan->sdu)
5762 break;
5763
5764 chan->sdu_len = get_unaligned_le16(skb->data);
5765 skb_pull(skb, L2CAP_SDULEN_SIZE);
5766
5767 if (chan->sdu_len > chan->imtu) {
5768 err = -EMSGSIZE;
5769 break;
5770 }
5771
5772 if (skb->len >= chan->sdu_len)
5773 break;
5774
5775 chan->sdu = skb;
5776 chan->sdu_last_frag = skb;
5777
5778 skb = NULL;
5779 err = 0;
5780 break;
5781
5782 case L2CAP_SAR_CONTINUE:
5783 if (!chan->sdu)
5784 break;
5785
5786 append_skb_frag(chan->sdu, skb,
5787 &chan->sdu_last_frag);
5788 skb = NULL;
5789
5790 if (chan->sdu->len >= chan->sdu_len)
5791 break;
5792
5793 err = 0;
5794 break;
5795
5796 case L2CAP_SAR_END:
5797 if (!chan->sdu)
5798 break;
5799
5800 append_skb_frag(chan->sdu, skb,
5801 &chan->sdu_last_frag);
5802 skb = NULL;
5803
5804 if (chan->sdu->len != chan->sdu_len)
5805 break;
5806
5807 err = chan->ops->recv(chan, chan->sdu);
5808
5809 if (!err) {
5810 /* Reassembly complete */
5811 chan->sdu = NULL;
5812 chan->sdu_last_frag = NULL;
5813 chan->sdu_len = 0;
5814 }
5815 break;
5816 }
5817
5818 if (err) {
5819 kfree_skb(skb);
5820 kfree_skb(chan->sdu);
5821 chan->sdu = NULL;
5822 chan->sdu_last_frag = NULL;
5823 chan->sdu_len = 0;
5824 }
5825
5826 return err;
5827}
5828
5829static int l2cap_resegment(struct l2cap_chan *chan)
5830{
5831 /* Placeholder */
5832 return 0;
5833}
5834
5835void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
5836{
5837 u8 event;
5838
5839 if (chan->mode != L2CAP_MODE_ERTM)
5840 return;
5841
5842 event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
5843 l2cap_tx(chan, NULL, NULL, event);
5844}
5845
5846static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
5847{
5848 int err = 0;
5849 /* Pass sequential frames to l2cap_reassemble_sdu()
5850 * until a gap is encountered.
5851 */
5852
5853 BT_DBG("chan %p", chan);
5854
5855 while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5856 struct sk_buff *skb;
5857 BT_DBG("Searching for skb with txseq %d (queue len %d)",
5858 chan->buffer_seq, skb_queue_len(&chan->srej_q));
5859
5860 skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
5861
5862 if (!skb)
5863 break;
5864
5865 skb_unlink(skb, &chan->srej_q);
5866 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
5867 err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->control);
5868 if (err)
5869 break;
5870 }
5871
5872 if (skb_queue_empty(&chan->srej_q)) {
5873 chan->rx_state = L2CAP_RX_STATE_RECV;
5874 l2cap_send_ack(chan);
5875 }
5876
5877 return err;
5878}
5879
5880static void l2cap_handle_srej(struct l2cap_chan *chan,
5881 struct l2cap_ctrl *control)
5882{
5883 struct sk_buff *skb;
5884
5885 BT_DBG("chan %p, control %p", chan, control);
5886
5887 if (control->reqseq == chan->next_tx_seq) {
5888 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
5889 l2cap_send_disconn_req(chan, ECONNRESET);
5890 return;
5891 }
5892
5893 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
5894
5895 if (skb == NULL) {
5896 BT_DBG("Seq %d not available for retransmission",
5897 control->reqseq);
5898 return;
5899 }
5900
5901 if (chan->max_tx != 0 && bt_cb(skb)->control.retries >= chan->max_tx) {
5902 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
5903 l2cap_send_disconn_req(chan, ECONNRESET);
5904 return;
5905 }
5906
5907 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5908
5909 if (control->poll) {
5910 l2cap_pass_to_tx(chan, control);
5911
5912 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5913 l2cap_retransmit(chan, control);
5914 l2cap_ertm_send(chan);
5915
5916 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5917 set_bit(CONN_SREJ_ACT, &chan->conn_state);
5918 chan->srej_save_reqseq = control->reqseq;
5919 }
5920 } else {
5921 l2cap_pass_to_tx_fbit(chan, control);
5922
5923 if (control->final) {
5924 if (chan->srej_save_reqseq != control->reqseq ||
5925 !test_and_clear_bit(CONN_SREJ_ACT,
5926 &chan->conn_state))
5927 l2cap_retransmit(chan, control);
5928 } else {
5929 l2cap_retransmit(chan, control);
5930 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5931 set_bit(CONN_SREJ_ACT, &chan->conn_state);
5932 chan->srej_save_reqseq = control->reqseq;
5933 }
5934 }
5935 }
5936}
5937
5938static void l2cap_handle_rej(struct l2cap_chan *chan,
5939 struct l2cap_ctrl *control)
5940{
5941 struct sk_buff *skb;
5942
5943 BT_DBG("chan %p, control %p", chan, control);
5944
5945 if (control->reqseq == chan->next_tx_seq) {
5946 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
5947 l2cap_send_disconn_req(chan, ECONNRESET);
5948 return;
5949 }
5950
5951 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
5952
5953 if (chan->max_tx && skb &&
5954 bt_cb(skb)->control.retries >= chan->max_tx) {
5955 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
5956 l2cap_send_disconn_req(chan, ECONNRESET);
5957 return;
5958 }
5959
5960 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5961
5962 l2cap_pass_to_tx(chan, control);
5963
5964 if (control->final) {
5965 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
5966 l2cap_retransmit_all(chan, control);
5967 } else {
5968 l2cap_retransmit_all(chan, control);
5969 l2cap_ertm_send(chan);
5970 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
5971 set_bit(CONN_REJ_ACT, &chan->conn_state);
5972 }
5973}
5974
5975static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
5976{
5977 BT_DBG("chan %p, txseq %d", chan, txseq);
5978
5979 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
5980 chan->expected_tx_seq);
5981
5982 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
5983 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
5984 chan->tx_win) {
5985 /* See notes below regarding "double poll" and
5986 * invalid packets.
5987 */
5988 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
5989 BT_DBG("Invalid/Ignore - after SREJ");
5990 return L2CAP_TXSEQ_INVALID_IGNORE;
5991 } else {
5992 BT_DBG("Invalid - in window after SREJ sent");
5993 return L2CAP_TXSEQ_INVALID;
5994 }
5995 }
5996
5997 if (chan->srej_list.head == txseq) {
5998 BT_DBG("Expected SREJ");
5999 return L2CAP_TXSEQ_EXPECTED_SREJ;
6000 }
6001
6002 if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
6003 BT_DBG("Duplicate SREJ - txseq already stored");
6004 return L2CAP_TXSEQ_DUPLICATE_SREJ;
6005 }
6006
6007 if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
6008 BT_DBG("Unexpected SREJ - not requested");
6009 return L2CAP_TXSEQ_UNEXPECTED_SREJ;
6010 }
6011 }
6012
6013 if (chan->expected_tx_seq == txseq) {
6014 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6015 chan->tx_win) {
6016 BT_DBG("Invalid - txseq outside tx window");
6017 return L2CAP_TXSEQ_INVALID;
6018 } else {
6019 BT_DBG("Expected");
6020 return L2CAP_TXSEQ_EXPECTED;
6021 }
6022 }
6023
6024 if (__seq_offset(chan, txseq, chan->last_acked_seq) <
6025 __seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) {
6026 BT_DBG("Duplicate - expected_tx_seq later than txseq");
6027 return L2CAP_TXSEQ_DUPLICATE;
6028 }
6029
6030 if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
6031 /* A source of invalid packets is a "double poll" condition,
6032 * where delays cause us to send multiple poll packets. If
6033 * the remote stack receives and processes both polls,
6034 * sequence numbers can wrap around in such a way that a
6035 * resent frame has a sequence number that looks like new data
6036 * with a sequence gap. This would trigger an erroneous SREJ
6037 * request.
6038 *
6039 * Fortunately, this is impossible with a tx window that's
6040 * less than half of the maximum sequence number, which allows
6041 * invalid frames to be safely ignored.
6042 *
6043 * With tx window sizes greater than half of the tx window
6044 * maximum, the frame is invalid and cannot be ignored. This
6045 * causes a disconnect.
6046 */
6047
6048 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6049 BT_DBG("Invalid/Ignore - txseq outside tx window");
6050 return L2CAP_TXSEQ_INVALID_IGNORE;
6051 } else {
6052 BT_DBG("Invalid - txseq outside tx window");
6053 return L2CAP_TXSEQ_INVALID;
6054 }
6055 } else {
6056 BT_DBG("Unexpected - txseq indicates missing frames");
6057 return L2CAP_TXSEQ_UNEXPECTED;
6058 }
6059}
6060
6061static int l2cap_rx_state_recv(struct l2cap_chan *chan,
6062 struct l2cap_ctrl *control,
6063 struct sk_buff *skb, u8 event)
6064{
6065 int err = 0;
6066 bool skb_in_use = false;
6067
6068 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6069 event);
6070
6071 switch (event) {
6072 case L2CAP_EV_RECV_IFRAME:
6073 switch (l2cap_classify_txseq(chan, control->txseq)) {
6074 case L2CAP_TXSEQ_EXPECTED:
6075 l2cap_pass_to_tx(chan, control);
6076
6077 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6078 BT_DBG("Busy, discarding expected seq %d",
6079 control->txseq);
6080 break;
6081 }
6082
6083 chan->expected_tx_seq = __next_seq(chan,
6084 control->txseq);
6085
6086 chan->buffer_seq = chan->expected_tx_seq;
6087 skb_in_use = true;
6088
6089 err = l2cap_reassemble_sdu(chan, skb, control);
6090 if (err)
6091 break;
6092
6093 if (control->final) {
6094 if (!test_and_clear_bit(CONN_REJ_ACT,
6095 &chan->conn_state)) {
6096 control->final = 0;
6097 l2cap_retransmit_all(chan, control);
6098 l2cap_ertm_send(chan);
6099 }
6100 }
6101
6102 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
6103 l2cap_send_ack(chan);
6104 break;
6105 case L2CAP_TXSEQ_UNEXPECTED:
6106 l2cap_pass_to_tx(chan, control);
6107
6108 /* Can't issue SREJ frames in the local busy state.
6109 * Drop this frame, it will be seen as missing
6110 * when local busy is exited.
6111 */
6112 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6113 BT_DBG("Busy, discarding unexpected seq %d",
6114 control->txseq);
6115 break;
6116 }
6117
6118 /* There was a gap in the sequence, so an SREJ
6119 * must be sent for each missing frame. The
6120 * current frame is stored for later use.
6121 */
6122 skb_queue_tail(&chan->srej_q, skb);
6123 skb_in_use = true;
6124 BT_DBG("Queued %p (queue len %d)", skb,
6125 skb_queue_len(&chan->srej_q));
6126
6127 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
6128 l2cap_seq_list_clear(&chan->srej_list);
6129 l2cap_send_srej(chan, control->txseq);
6130
6131 chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
6132 break;
6133 case L2CAP_TXSEQ_DUPLICATE:
6134 l2cap_pass_to_tx(chan, control);
6135 break;
6136 case L2CAP_TXSEQ_INVALID_IGNORE:
6137 break;
6138 case L2CAP_TXSEQ_INVALID:
6139 default:
6140 l2cap_send_disconn_req(chan, ECONNRESET);
6141 break;
6142 }
6143 break;
6144 case L2CAP_EV_RECV_RR:
6145 l2cap_pass_to_tx(chan, control);
6146 if (control->final) {
6147 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6148
6149 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state) &&
6150 !__chan_is_moving(chan)) {
6151 control->final = 0;
6152 l2cap_retransmit_all(chan, control);
6153 }
6154
6155 l2cap_ertm_send(chan);
6156 } else if (control->poll) {
6157 l2cap_send_i_or_rr_or_rnr(chan);
6158 } else {
6159 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6160 &chan->conn_state) &&
6161 chan->unacked_frames)
6162 __set_retrans_timer(chan);
6163
6164 l2cap_ertm_send(chan);
6165 }
6166 break;
6167 case L2CAP_EV_RECV_RNR:
6168 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6169 l2cap_pass_to_tx(chan, control);
6170 if (control && control->poll) {
6171 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6172 l2cap_send_rr_or_rnr(chan, 0);
6173 }
6174 __clear_retrans_timer(chan);
6175 l2cap_seq_list_clear(&chan->retrans_list);
6176 break;
6177 case L2CAP_EV_RECV_REJ:
6178 l2cap_handle_rej(chan, control);
6179 break;
6180 case L2CAP_EV_RECV_SREJ:
6181 l2cap_handle_srej(chan, control);
6182 break;
6183 default:
6184 break;
6185 }
6186
6187 if (skb && !skb_in_use) {
6188 BT_DBG("Freeing %p", skb);
6189 kfree_skb(skb);
6190 }
6191
6192 return err;
6193}
6194
6195static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
6196 struct l2cap_ctrl *control,
6197 struct sk_buff *skb, u8 event)
6198{
6199 int err = 0;
6200 u16 txseq = control->txseq;
6201 bool skb_in_use = false;
6202
6203 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6204 event);
6205
6206 switch (event) {
6207 case L2CAP_EV_RECV_IFRAME:
6208 switch (l2cap_classify_txseq(chan, txseq)) {
6209 case L2CAP_TXSEQ_EXPECTED:
6210 /* Keep frame for reassembly later */
6211 l2cap_pass_to_tx(chan, control);
6212 skb_queue_tail(&chan->srej_q, skb);
6213 skb_in_use = true;
6214 BT_DBG("Queued %p (queue len %d)", skb,
6215 skb_queue_len(&chan->srej_q));
6216
6217 chan->expected_tx_seq = __next_seq(chan, txseq);
6218 break;
6219 case L2CAP_TXSEQ_EXPECTED_SREJ:
6220 l2cap_seq_list_pop(&chan->srej_list);
6221
6222 l2cap_pass_to_tx(chan, control);
6223 skb_queue_tail(&chan->srej_q, skb);
6224 skb_in_use = true;
6225 BT_DBG("Queued %p (queue len %d)", skb,
6226 skb_queue_len(&chan->srej_q));
6227
6228 err = l2cap_rx_queued_iframes(chan);
6229 if (err)
6230 break;
6231
6232 break;
6233 case L2CAP_TXSEQ_UNEXPECTED:
6234 /* Got a frame that can't be reassembled yet.
6235 * Save it for later, and send SREJs to cover
6236 * the missing frames.
6237 */
6238 skb_queue_tail(&chan->srej_q, skb);
6239 skb_in_use = true;
6240 BT_DBG("Queued %p (queue len %d)", skb,
6241 skb_queue_len(&chan->srej_q));
6242
6243 l2cap_pass_to_tx(chan, control);
6244 l2cap_send_srej(chan, control->txseq);
6245 break;
6246 case L2CAP_TXSEQ_UNEXPECTED_SREJ:
6247 /* This frame was requested with an SREJ, but
6248 * some expected retransmitted frames are
6249 * missing. Request retransmission of missing
6250 * SREJ'd frames.
6251 */
6252 skb_queue_tail(&chan->srej_q, skb);
6253 skb_in_use = true;
6254 BT_DBG("Queued %p (queue len %d)", skb,
6255 skb_queue_len(&chan->srej_q));
6256
6257 l2cap_pass_to_tx(chan, control);
6258 l2cap_send_srej_list(chan, control->txseq);
6259 break;
6260 case L2CAP_TXSEQ_DUPLICATE_SREJ:
6261 /* We've already queued this frame. Drop this copy. */
6262 l2cap_pass_to_tx(chan, control);
6263 break;
6264 case L2CAP_TXSEQ_DUPLICATE:
6265 /* Expecting a later sequence number, so this frame
6266 * was already received. Ignore it completely.
6267 */
6268 break;
6269 case L2CAP_TXSEQ_INVALID_IGNORE:
6270 break;
6271 case L2CAP_TXSEQ_INVALID:
6272 default:
6273 l2cap_send_disconn_req(chan, ECONNRESET);
6274 break;
6275 }
6276 break;
6277 case L2CAP_EV_RECV_RR:
6278 l2cap_pass_to_tx(chan, control);
6279 if (control->final) {
6280 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6281
6282 if (!test_and_clear_bit(CONN_REJ_ACT,
6283 &chan->conn_state)) {
6284 control->final = 0;
6285 l2cap_retransmit_all(chan, control);
6286 }
6287
6288 l2cap_ertm_send(chan);
6289 } else if (control->poll) {
6290 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6291 &chan->conn_state) &&
6292 chan->unacked_frames) {
6293 __set_retrans_timer(chan);
6294 }
6295
6296 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6297 l2cap_send_srej_tail(chan);
6298 } else {
6299 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6300 &chan->conn_state) &&
6301 chan->unacked_frames)
6302 __set_retrans_timer(chan);
6303
6304 l2cap_send_ack(chan);
6305 }
6306 break;
6307 case L2CAP_EV_RECV_RNR:
6308 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6309 l2cap_pass_to_tx(chan, control);
6310 if (control->poll) {
6311 l2cap_send_srej_tail(chan);
6312 } else {
6313 struct l2cap_ctrl rr_control;
6314 memset(&rr_control, 0, sizeof(rr_control));
6315 rr_control.sframe = 1;
6316 rr_control.super = L2CAP_SUPER_RR;
6317 rr_control.reqseq = chan->buffer_seq;
6318 l2cap_send_sframe(chan, &rr_control);
6319 }
6320
6321 break;
6322 case L2CAP_EV_RECV_REJ:
6323 l2cap_handle_rej(chan, control);
6324 break;
6325 case L2CAP_EV_RECV_SREJ:
6326 l2cap_handle_srej(chan, control);
6327 break;
6328 }
6329
6330 if (skb && !skb_in_use) {
6331 BT_DBG("Freeing %p", skb);
6332 kfree_skb(skb);
6333 }
6334
6335 return err;
6336}
6337
6338static int l2cap_finish_move(struct l2cap_chan *chan)
6339{
6340 BT_DBG("chan %p", chan);
6341
6342 chan->rx_state = L2CAP_RX_STATE_RECV;
6343
6344 if (chan->hs_hcon)
6345 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6346 else
6347 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6348
6349 return l2cap_resegment(chan);
6350}
6351
6352static int l2cap_rx_state_wait_p(struct l2cap_chan *chan,
6353 struct l2cap_ctrl *control,
6354 struct sk_buff *skb, u8 event)
6355{
6356 int err;
6357
6358 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6359 event);
6360
6361 if (!control->poll)
6362 return -EPROTO;
6363
6364 l2cap_process_reqseq(chan, control->reqseq);
6365
6366 if (!skb_queue_empty(&chan->tx_q))
6367 chan->tx_send_head = skb_peek(&chan->tx_q);
6368 else
6369 chan->tx_send_head = NULL;
6370
6371 /* Rewind next_tx_seq to the point expected
6372 * by the receiver.
6373 */
6374 chan->next_tx_seq = control->reqseq;
6375 chan->unacked_frames = 0;
6376
6377 err = l2cap_finish_move(chan);
6378 if (err)
6379 return err;
6380
6381 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6382 l2cap_send_i_or_rr_or_rnr(chan);
6383
6384 if (event == L2CAP_EV_RECV_IFRAME)
6385 return -EPROTO;
6386
6387 return l2cap_rx_state_recv(chan, control, NULL, event);
6388}
6389
6390static int l2cap_rx_state_wait_f(struct l2cap_chan *chan,
6391 struct l2cap_ctrl *control,
6392 struct sk_buff *skb, u8 event)
6393{
6394 int err;
6395
6396 if (!control->final)
6397 return -EPROTO;
6398
6399 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6400
6401 chan->rx_state = L2CAP_RX_STATE_RECV;
6402 l2cap_process_reqseq(chan, control->reqseq);
6403
6404 if (!skb_queue_empty(&chan->tx_q))
6405 chan->tx_send_head = skb_peek(&chan->tx_q);
6406 else
6407 chan->tx_send_head = NULL;
6408
6409 /* Rewind next_tx_seq to the point expected
6410 * by the receiver.
6411 */
6412 chan->next_tx_seq = control->reqseq;
6413 chan->unacked_frames = 0;
6414
6415 if (chan->hs_hcon)
6416 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6417 else
6418 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6419
6420 err = l2cap_resegment(chan);
6421
6422 if (!err)
6423 err = l2cap_rx_state_recv(chan, control, skb, event);
6424
6425 return err;
6426}
6427
6428static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
6429{
6430 /* Make sure reqseq is for a packet that has been sent but not acked */
6431 u16 unacked;
6432
6433 unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
6434 return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
6435}
6436
6437static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6438 struct sk_buff *skb, u8 event)
6439{
6440 int err = 0;
6441
6442 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
6443 control, skb, event, chan->rx_state);
6444
6445 if (__valid_reqseq(chan, control->reqseq)) {
6446 switch (chan->rx_state) {
6447 case L2CAP_RX_STATE_RECV:
6448 err = l2cap_rx_state_recv(chan, control, skb, event);
6449 break;
6450 case L2CAP_RX_STATE_SREJ_SENT:
6451 err = l2cap_rx_state_srej_sent(chan, control, skb,
6452 event);
6453 break;
6454 case L2CAP_RX_STATE_WAIT_P:
6455 err = l2cap_rx_state_wait_p(chan, control, skb, event);
6456 break;
6457 case L2CAP_RX_STATE_WAIT_F:
6458 err = l2cap_rx_state_wait_f(chan, control, skb, event);
6459 break;
6460 default:
6461 /* shut it down */
6462 break;
6463 }
6464 } else {
6465 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
6466 control->reqseq, chan->next_tx_seq,
6467 chan->expected_ack_seq);
6468 l2cap_send_disconn_req(chan, ECONNRESET);
6469 }
6470
6471 return err;
6472}
6473
6474static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6475 struct sk_buff *skb)
6476{
6477 int err = 0;
6478
6479 BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
6480 chan->rx_state);
6481
6482 if (l2cap_classify_txseq(chan, control->txseq) ==
6483 L2CAP_TXSEQ_EXPECTED) {
6484 l2cap_pass_to_tx(chan, control);
6485
6486 BT_DBG("buffer_seq %d->%d", chan->buffer_seq,
6487 __next_seq(chan, chan->buffer_seq));
6488
6489 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6490
6491 l2cap_reassemble_sdu(chan, skb, control);
6492 } else {
6493 if (chan->sdu) {
6494 kfree_skb(chan->sdu);
6495 chan->sdu = NULL;
6496 }
6497 chan->sdu_last_frag = NULL;
6498 chan->sdu_len = 0;
6499
6500 if (skb) {
6501 BT_DBG("Freeing %p", skb);
6502 kfree_skb(skb);
6503 }
6504 }
6505
6506 chan->last_acked_seq = control->txseq;
6507 chan->expected_tx_seq = __next_seq(chan, control->txseq);
6508
6509 return err;
6510}
6511
6512static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6513{
6514 struct l2cap_ctrl *control = &bt_cb(skb)->control;
6515 u16 len;
6516 u8 event;
6517
6518 __unpack_control(chan, skb);
6519
6520 len = skb->len;
6521
6522 /*
6523 * We can just drop the corrupted I-frame here.
6524 * Receiver will miss it and start proper recovery
6525 * procedures and ask for retransmission.
6526 */
6527 if (l2cap_check_fcs(chan, skb))
6528 goto drop;
6529
6530 if (!control->sframe && control->sar == L2CAP_SAR_START)
6531 len -= L2CAP_SDULEN_SIZE;
6532
6533 if (chan->fcs == L2CAP_FCS_CRC16)
6534 len -= L2CAP_FCS_SIZE;
6535
6536 if (len > chan->mps) {
6537 l2cap_send_disconn_req(chan, ECONNRESET);
6538 goto drop;
6539 }
6540
6541 if (!control->sframe) {
6542 int err;
6543
6544 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
6545 control->sar, control->reqseq, control->final,
6546 control->txseq);
6547
6548 /* Validate F-bit - F=0 always valid, F=1 only
6549 * valid in TX WAIT_F
6550 */
6551 if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
6552 goto drop;
6553
6554 if (chan->mode != L2CAP_MODE_STREAMING) {
6555 event = L2CAP_EV_RECV_IFRAME;
6556 err = l2cap_rx(chan, control, skb, event);
6557 } else {
6558 err = l2cap_stream_rx(chan, control, skb);
6559 }
6560
6561 if (err)
6562 l2cap_send_disconn_req(chan, ECONNRESET);
6563 } else {
6564 const u8 rx_func_to_event[4] = {
6565 L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
6566 L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
6567 };
6568
6569 /* Only I-frames are expected in streaming mode */
6570 if (chan->mode == L2CAP_MODE_STREAMING)
6571 goto drop;
6572
6573 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
6574 control->reqseq, control->final, control->poll,
6575 control->super);
6576
6577 if (len != 0) {
6578 BT_ERR("Trailing bytes: %d in sframe", len);
6579 l2cap_send_disconn_req(chan, ECONNRESET);
6580 goto drop;
6581 }
6582
6583 /* Validate F and P bits */
6584 if (control->final && (control->poll ||
6585 chan->tx_state != L2CAP_TX_STATE_WAIT_F))
6586 goto drop;
6587
6588 event = rx_func_to_event[control->super];
6589 if (l2cap_rx(chan, control, skb, event))
6590 l2cap_send_disconn_req(chan, ECONNRESET);
6591 }
6592
6593 return 0;
6594
6595drop:
6596 kfree_skb(skb);
6597 return 0;
6598}
6599
6600static void l2cap_chan_le_send_credits(struct l2cap_chan *chan)
6601{
6602 struct l2cap_conn *conn = chan->conn;
6603 struct l2cap_le_credits pkt;
6604 u16 return_credits;
6605
6606 /* We return more credits to the sender only after the amount of
6607 * credits falls below half of the initial amount.
6608 */
6609 if (chan->rx_credits >= (le_max_credits + 1) / 2)
6610 return;
6611
6612 return_credits = le_max_credits - chan->rx_credits;
6613
6614 BT_DBG("chan %p returning %u credits to sender", chan, return_credits);
6615
6616 chan->rx_credits += return_credits;
6617
6618 pkt.cid = cpu_to_le16(chan->scid);
6619 pkt.credits = cpu_to_le16(return_credits);
6620
6621 chan->ident = l2cap_get_ident(conn);
6622
6623 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CREDITS, sizeof(pkt), &pkt);
6624}
6625
6626static int l2cap_le_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6627{
6628 int err;
6629
6630 if (!chan->rx_credits) {
6631 BT_ERR("No credits to receive LE L2CAP data");
6632 l2cap_send_disconn_req(chan, ECONNRESET);
6633 return -ENOBUFS;
6634 }
6635
6636 if (chan->imtu < skb->len) {
6637 BT_ERR("Too big LE L2CAP PDU");
6638 return -ENOBUFS;
6639 }
6640
6641 chan->rx_credits--;
6642 BT_DBG("rx_credits %u -> %u", chan->rx_credits + 1, chan->rx_credits);
6643
6644 l2cap_chan_le_send_credits(chan);
6645
6646 err = 0;
6647
6648 if (!chan->sdu) {
6649 u16 sdu_len;
6650
6651 sdu_len = get_unaligned_le16(skb->data);
6652 skb_pull(skb, L2CAP_SDULEN_SIZE);
6653
6654 BT_DBG("Start of new SDU. sdu_len %u skb->len %u imtu %u",
6655 sdu_len, skb->len, chan->imtu);
6656
6657 if (sdu_len > chan->imtu) {
6658 BT_ERR("Too big LE L2CAP SDU length received");
6659 err = -EMSGSIZE;
6660 goto failed;
6661 }
6662
6663 if (skb->len > sdu_len) {
6664 BT_ERR("Too much LE L2CAP data received");
6665 err = -EINVAL;
6666 goto failed;
6667 }
6668
6669 if (skb->len == sdu_len)
6670 return chan->ops->recv(chan, skb);
6671
6672 chan->sdu = skb;
6673 chan->sdu_len = sdu_len;
6674 chan->sdu_last_frag = skb;
6675
6676 return 0;
6677 }
6678
6679 BT_DBG("SDU fragment. chan->sdu->len %u skb->len %u chan->sdu_len %u",
6680 chan->sdu->len, skb->len, chan->sdu_len);
6681
6682 if (chan->sdu->len + skb->len > chan->sdu_len) {
6683 BT_ERR("Too much LE L2CAP data received");
6684 err = -EINVAL;
6685 goto failed;
6686 }
6687
6688 append_skb_frag(chan->sdu, skb, &chan->sdu_last_frag);
6689 skb = NULL;
6690
6691 if (chan->sdu->len == chan->sdu_len) {
6692 err = chan->ops->recv(chan, chan->sdu);
6693 if (!err) {
6694 chan->sdu = NULL;
6695 chan->sdu_last_frag = NULL;
6696 chan->sdu_len = 0;
6697 }
6698 }
6699
6700failed:
6701 if (err) {
6702 kfree_skb(skb);
6703 kfree_skb(chan->sdu);
6704 chan->sdu = NULL;
6705 chan->sdu_last_frag = NULL;
6706 chan->sdu_len = 0;
6707 }
6708
6709 /* We can't return an error here since we took care of the skb
6710 * freeing internally. An error return would cause the caller to
6711 * do a double-free of the skb.
6712 */
6713 return 0;
6714}
6715
6716static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
6717 struct sk_buff *skb)
6718{
6719 struct l2cap_chan *chan;
6720
6721 chan = l2cap_get_chan_by_scid(conn, cid);
6722 if (!chan) {
6723 if (cid == L2CAP_CID_A2MP) {
6724 chan = a2mp_channel_create(conn, skb);
6725 if (!chan) {
6726 kfree_skb(skb);
6727 return;
6728 }
6729
6730 l2cap_chan_lock(chan);
6731 } else {
6732 BT_DBG("unknown cid 0x%4.4x", cid);
6733 /* Drop packet and return */
6734 kfree_skb(skb);
6735 return;
6736 }
6737 }
6738
6739 BT_DBG("chan %p, len %d", chan, skb->len);
6740
6741 if (chan->state != BT_CONNECTED)
6742 goto drop;
6743
6744 switch (chan->mode) {
6745 case L2CAP_MODE_LE_FLOWCTL:
6746 if (l2cap_le_data_rcv(chan, skb) < 0)
6747 goto drop;
6748
6749 goto done;
6750
6751 case L2CAP_MODE_BASIC:
6752 /* If socket recv buffers overflows we drop data here
6753 * which is *bad* because L2CAP has to be reliable.
6754 * But we don't have any other choice. L2CAP doesn't
6755 * provide flow control mechanism. */
6756
6757 if (chan->imtu < skb->len) {
6758 BT_ERR("Dropping L2CAP data: receive buffer overflow");
6759 goto drop;
6760 }
6761
6762 if (!chan->ops->recv(chan, skb))
6763 goto done;
6764 break;
6765
6766 case L2CAP_MODE_ERTM:
6767 case L2CAP_MODE_STREAMING:
6768 l2cap_data_rcv(chan, skb);
6769 goto done;
6770
6771 default:
6772 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
6773 break;
6774 }
6775
6776drop:
6777 kfree_skb(skb);
6778
6779done:
6780 l2cap_chan_unlock(chan);
6781}
6782
6783static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
6784 struct sk_buff *skb)
6785{
6786 struct hci_conn *hcon = conn->hcon;
6787 struct l2cap_chan *chan;
6788
6789 if (hcon->type != ACL_LINK)
6790 goto free_skb;
6791
6792 chan = l2cap_global_chan_by_psm(0, psm, &hcon->src, &hcon->dst,
6793 ACL_LINK);
6794 if (!chan)
6795 goto free_skb;
6796
6797 BT_DBG("chan %p, len %d", chan, skb->len);
6798
6799 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
6800 goto drop;
6801
6802 if (chan->imtu < skb->len)
6803 goto drop;
6804
6805 /* Store remote BD_ADDR and PSM for msg_name */
6806 bacpy(&bt_cb(skb)->bdaddr, &hcon->dst);
6807 bt_cb(skb)->psm = psm;
6808
6809 if (!chan->ops->recv(chan, skb)) {
6810 l2cap_chan_put(chan);
6811 return;
6812 }
6813
6814drop:
6815 l2cap_chan_put(chan);
6816free_skb:
6817 kfree_skb(skb);
6818}
6819
6820static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
6821{
6822 struct l2cap_hdr *lh = (void *) skb->data;
6823 struct hci_conn *hcon = conn->hcon;
6824 u16 cid, len;
6825 __le16 psm;
6826
6827 if (hcon->state != BT_CONNECTED) {
6828 BT_DBG("queueing pending rx skb");
6829 skb_queue_tail(&conn->pending_rx, skb);
6830 return;
6831 }
6832
6833 skb_pull(skb, L2CAP_HDR_SIZE);
6834 cid = __le16_to_cpu(lh->cid);
6835 len = __le16_to_cpu(lh->len);
6836
6837 if (len != skb->len) {
6838 kfree_skb(skb);
6839 return;
6840 }
6841
6842 /* Since we can't actively block incoming LE connections we must
6843 * at least ensure that we ignore incoming data from them.
6844 */
6845 if (hcon->type == LE_LINK &&
6846 hci_bdaddr_list_lookup(&hcon->hdev->blacklist, &hcon->dst,
6847 bdaddr_type(hcon, hcon->dst_type))) {
6848 kfree_skb(skb);
6849 return;
6850 }
6851
6852 BT_DBG("len %d, cid 0x%4.4x", len, cid);
6853
6854 switch (cid) {
6855 case L2CAP_CID_SIGNALING:
6856 l2cap_sig_channel(conn, skb);
6857 break;
6858
6859 case L2CAP_CID_CONN_LESS:
6860 psm = get_unaligned((__le16 *) skb->data);
6861 skb_pull(skb, L2CAP_PSMLEN_SIZE);
6862 l2cap_conless_channel(conn, psm, skb);
6863 break;
6864
6865 case L2CAP_CID_LE_SIGNALING:
6866 l2cap_le_sig_channel(conn, skb);
6867 break;
6868
6869 default:
6870 l2cap_data_channel(conn, cid, skb);
6871 break;
6872 }
6873}
6874
6875static void process_pending_rx(struct work_struct *work)
6876{
6877 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
6878 pending_rx_work);
6879 struct sk_buff *skb;
6880
6881 BT_DBG("");
6882
6883 while ((skb = skb_dequeue(&conn->pending_rx)))
6884 l2cap_recv_frame(conn, skb);
6885}
6886
6887static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
6888{
6889 struct l2cap_conn *conn = hcon->l2cap_data;
6890 struct hci_chan *hchan;
6891
6892 if (conn)
6893 return conn;
6894
6895 hchan = hci_chan_create(hcon);
6896 if (!hchan)
6897 return NULL;
6898
6899 conn = kzalloc(sizeof(*conn), GFP_KERNEL);
6900 if (!conn) {
6901 hci_chan_del(hchan);
6902 return NULL;
6903 }
6904
6905 kref_init(&conn->ref);
6906 hcon->l2cap_data = conn;
6907 conn->hcon = hcon;
6908 hci_conn_get(conn->hcon);
6909 conn->hchan = hchan;
6910
6911 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
6912
6913 switch (hcon->type) {
6914 case LE_LINK:
6915 if (hcon->hdev->le_mtu) {
6916 conn->mtu = hcon->hdev->le_mtu;
6917 break;
6918 }
6919 /* fall through */
6920 default:
6921 conn->mtu = hcon->hdev->acl_mtu;
6922 break;
6923 }
6924
6925 conn->feat_mask = 0;
6926
6927 if (hcon->type == ACL_LINK)
6928 conn->hs_enabled = test_bit(HCI_HS_ENABLED,
6929 &hcon->hdev->dev_flags);
6930
6931 mutex_init(&conn->ident_lock);
6932 mutex_init(&conn->chan_lock);
6933
6934 INIT_LIST_HEAD(&conn->chan_l);
6935 INIT_LIST_HEAD(&conn->users);
6936
6937 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
6938
6939 INIT_WORK(&conn->disconn_work, disconn_work);
6940
6941 skb_queue_head_init(&conn->pending_rx);
6942 INIT_WORK(&conn->pending_rx_work, process_pending_rx);
6943
6944 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
6945
6946 return conn;
6947}
6948
6949static bool is_valid_psm(u16 psm, u8 dst_type) {
6950 if (!psm)
6951 return false;
6952
6953 if (bdaddr_type_is_le(dst_type))
6954 return (psm <= 0x00ff);
6955
6956 /* PSM must be odd and lsb of upper byte must be 0 */
6957 return ((psm & 0x0101) == 0x0001);
6958}
6959
6960int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
6961 bdaddr_t *dst, u8 dst_type)
6962{
6963 struct l2cap_conn *conn;
6964 struct hci_conn *hcon;
6965 struct hci_dev *hdev;
6966 int err;
6967
6968 BT_DBG("%pMR -> %pMR (type %u) psm 0x%2.2x", &chan->src, dst,
6969 dst_type, __le16_to_cpu(psm));
6970
6971 hdev = hci_get_route(dst, &chan->src);
6972 if (!hdev)
6973 return -EHOSTUNREACH;
6974
6975 hci_dev_lock(hdev);
6976
6977 l2cap_chan_lock(chan);
6978
6979 if (!is_valid_psm(__le16_to_cpu(psm), dst_type) && !cid &&
6980 chan->chan_type != L2CAP_CHAN_RAW) {
6981 err = -EINVAL;
6982 goto done;
6983 }
6984
6985 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !psm) {
6986 err = -EINVAL;
6987 goto done;
6988 }
6989
6990 if (chan->chan_type == L2CAP_CHAN_FIXED && !cid) {
6991 err = -EINVAL;
6992 goto done;
6993 }
6994
6995 switch (chan->mode) {
6996 case L2CAP_MODE_BASIC:
6997 break;
6998 case L2CAP_MODE_LE_FLOWCTL:
6999 l2cap_le_flowctl_init(chan);
7000 break;
7001 case L2CAP_MODE_ERTM:
7002 case L2CAP_MODE_STREAMING:
7003 if (!disable_ertm)
7004 break;
7005 /* fall through */
7006 default:
7007 err = -EOPNOTSUPP;
7008 goto done;
7009 }
7010
7011 switch (chan->state) {
7012 case BT_CONNECT:
7013 case BT_CONNECT2:
7014 case BT_CONFIG:
7015 /* Already connecting */
7016 err = 0;
7017 goto done;
7018
7019 case BT_CONNECTED:
7020 /* Already connected */
7021 err = -EISCONN;
7022 goto done;
7023
7024 case BT_OPEN:
7025 case BT_BOUND:
7026 /* Can connect */
7027 break;
7028
7029 default:
7030 err = -EBADFD;
7031 goto done;
7032 }
7033
7034 /* Set destination address and psm */
7035 bacpy(&chan->dst, dst);
7036 chan->dst_type = dst_type;
7037
7038 chan->psm = psm;
7039 chan->dcid = cid;
7040
7041 if (bdaddr_type_is_le(dst_type)) {
7042 u8 role;
7043
7044 /* Convert from L2CAP channel address type to HCI address type
7045 */
7046 if (dst_type == BDADDR_LE_PUBLIC)
7047 dst_type = ADDR_LE_DEV_PUBLIC;
7048 else
7049 dst_type = ADDR_LE_DEV_RANDOM;
7050
7051 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
7052 role = HCI_ROLE_SLAVE;
7053 else
7054 role = HCI_ROLE_MASTER;
7055
7056 hcon = hci_connect_le(hdev, dst, dst_type, chan->sec_level,
7057 HCI_LE_CONN_TIMEOUT, role);
7058 } else {
7059 u8 auth_type = l2cap_get_auth_type(chan);
7060 hcon = hci_connect_acl(hdev, dst, chan->sec_level, auth_type);
7061 }
7062
7063 if (IS_ERR(hcon)) {
7064 err = PTR_ERR(hcon);
7065 goto done;
7066 }
7067
7068 conn = l2cap_conn_add(hcon);
7069 if (!conn) {
7070 hci_conn_drop(hcon);
7071 err = -ENOMEM;
7072 goto done;
7073 }
7074
7075 if (cid && __l2cap_get_chan_by_dcid(conn, cid)) {
7076 hci_conn_drop(hcon);
7077 err = -EBUSY;
7078 goto done;
7079 }
7080
7081 /* Update source addr of the socket */
7082 bacpy(&chan->src, &hcon->src);
7083 chan->src_type = bdaddr_type(hcon, hcon->src_type);
7084
7085 l2cap_chan_unlock(chan);
7086 l2cap_chan_add(conn, chan);
7087 l2cap_chan_lock(chan);
7088
7089 /* l2cap_chan_add takes its own ref so we can drop this one */
7090 hci_conn_drop(hcon);
7091
7092 l2cap_state_change(chan, BT_CONNECT);
7093 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
7094
7095 /* Release chan->sport so that it can be reused by other
7096 * sockets (as it's only used for listening sockets).
7097 */
7098 write_lock(&chan_list_lock);
7099 chan->sport = 0;
7100 write_unlock(&chan_list_lock);
7101
7102 if (hcon->state == BT_CONNECTED) {
7103 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
7104 __clear_chan_timer(chan);
7105 if (l2cap_chan_check_security(chan, true))
7106 l2cap_state_change(chan, BT_CONNECTED);
7107 } else
7108 l2cap_do_start(chan);
7109 }
7110
7111 err = 0;
7112
7113done:
7114 l2cap_chan_unlock(chan);
7115 hci_dev_unlock(hdev);
7116 hci_dev_put(hdev);
7117 return err;
7118}
7119EXPORT_SYMBOL_GPL(l2cap_chan_connect);
7120
7121/* ---- L2CAP interface with lower layer (HCI) ---- */
7122
7123int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
7124{
7125 int exact = 0, lm1 = 0, lm2 = 0;
7126 struct l2cap_chan *c;
7127
7128 BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
7129
7130 /* Find listening sockets and check their link_mode */
7131 read_lock(&chan_list_lock);
7132 list_for_each_entry(c, &chan_list, global_l) {
7133 if (c->state != BT_LISTEN)
7134 continue;
7135
7136 if (!bacmp(&c->src, &hdev->bdaddr)) {
7137 lm1 |= HCI_LM_ACCEPT;
7138 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
7139 lm1 |= HCI_LM_MASTER;
7140 exact++;
7141 } else if (!bacmp(&c->src, BDADDR_ANY)) {
7142 lm2 |= HCI_LM_ACCEPT;
7143 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
7144 lm2 |= HCI_LM_MASTER;
7145 }
7146 }
7147 read_unlock(&chan_list_lock);
7148
7149 return exact ? lm1 : lm2;
7150}
7151
7152/* Find the next fixed channel in BT_LISTEN state, continue iteration
7153 * from an existing channel in the list or from the beginning of the
7154 * global list (by passing NULL as first parameter).
7155 */
7156static struct l2cap_chan *l2cap_global_fixed_chan(struct l2cap_chan *c,
7157 bdaddr_t *src, u8 link_type)
7158{
7159 read_lock(&chan_list_lock);
7160
7161 if (c)
7162 c = list_next_entry(c, global_l);
7163 else
7164 c = list_entry(chan_list.next, typeof(*c), global_l);
7165
7166 list_for_each_entry_from(c, &chan_list, global_l) {
7167 if (c->chan_type != L2CAP_CHAN_FIXED)
7168 continue;
7169 if (c->state != BT_LISTEN)
7170 continue;
7171 if (bacmp(&c->src, src) && bacmp(&c->src, BDADDR_ANY))
7172 continue;
7173 if (link_type == ACL_LINK && c->src_type != BDADDR_BREDR)
7174 continue;
7175 if (link_type == LE_LINK && c->src_type == BDADDR_BREDR)
7176 continue;
7177
7178 l2cap_chan_hold(c);
7179 read_unlock(&chan_list_lock);
7180 return c;
7181 }
7182
7183 read_unlock(&chan_list_lock);
7184
7185 return NULL;
7186}
7187
7188void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
7189{
7190 struct hci_dev *hdev = hcon->hdev;
7191 struct l2cap_conn *conn;
7192 struct l2cap_chan *pchan;
7193 u8 dst_type;
7194
7195 BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
7196
7197 if (status) {
7198 l2cap_conn_del(hcon, bt_to_errno(status));
7199 return;
7200 }
7201
7202 conn = l2cap_conn_add(hcon);
7203 if (!conn)
7204 return;
7205
7206 dst_type = bdaddr_type(hcon, hcon->dst_type);
7207
7208 /* If device is blocked, do not create channels for it */
7209 if (hci_bdaddr_list_lookup(&hdev->blacklist, &hcon->dst, dst_type))
7210 return;
7211
7212 /* Find fixed channels and notify them of the new connection. We
7213 * use multiple individual lookups, continuing each time where
7214 * we left off, because the list lock would prevent calling the
7215 * potentially sleeping l2cap_chan_lock() function.
7216 */
7217 pchan = l2cap_global_fixed_chan(NULL, &hdev->bdaddr, hcon->type);
7218 while (pchan) {
7219 struct l2cap_chan *chan, *next;
7220
7221 /* Client fixed channels should override server ones */
7222 if (__l2cap_get_chan_by_dcid(conn, pchan->scid))
7223 goto next;
7224
7225 l2cap_chan_lock(pchan);
7226 chan = pchan->ops->new_connection(pchan);
7227 if (chan) {
7228 bacpy(&chan->src, &hcon->src);
7229 bacpy(&chan->dst, &hcon->dst);
7230 chan->src_type = bdaddr_type(hcon, hcon->src_type);
7231 chan->dst_type = dst_type;
7232
7233 __l2cap_chan_add(conn, chan);
7234 }
7235
7236 l2cap_chan_unlock(pchan);
7237next:
7238 next = l2cap_global_fixed_chan(pchan, &hdev->bdaddr,
7239 hcon->type);
7240 l2cap_chan_put(pchan);
7241 pchan = next;
7242 }
7243
7244 l2cap_conn_ready(conn);
7245}
7246
7247int l2cap_disconn_ind(struct hci_conn *hcon)
7248{
7249 struct l2cap_conn *conn = hcon->l2cap_data;
7250
7251 BT_DBG("hcon %p", hcon);
7252
7253 if (!conn)
7254 return HCI_ERROR_REMOTE_USER_TERM;
7255 return conn->disc_reason;
7256}
7257
7258void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
7259{
7260 BT_DBG("hcon %p reason %d", hcon, reason);
7261
7262 l2cap_conn_del(hcon, bt_to_errno(reason));
7263}
7264
7265static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
7266{
7267 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
7268 return;
7269
7270 if (encrypt == 0x00) {
7271 if (chan->sec_level == BT_SECURITY_MEDIUM) {
7272 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
7273 } else if (chan->sec_level == BT_SECURITY_HIGH ||
7274 chan->sec_level == BT_SECURITY_FIPS)
7275 l2cap_chan_close(chan, ECONNREFUSED);
7276 } else {
7277 if (chan->sec_level == BT_SECURITY_MEDIUM)
7278 __clear_chan_timer(chan);
7279 }
7280}
7281
7282int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
7283{
7284 struct l2cap_conn *conn = hcon->l2cap_data;
7285 struct l2cap_chan *chan;
7286
7287 if (!conn)
7288 return 0;
7289
7290 BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
7291
7292 mutex_lock(&conn->chan_lock);
7293
7294 list_for_each_entry(chan, &conn->chan_l, list) {
7295 l2cap_chan_lock(chan);
7296
7297 BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
7298 state_to_string(chan->state));
7299
7300 if (chan->scid == L2CAP_CID_A2MP) {
7301 l2cap_chan_unlock(chan);
7302 continue;
7303 }
7304
7305 if (!status && encrypt)
7306 chan->sec_level = hcon->sec_level;
7307
7308 if (!__l2cap_no_conn_pending(chan)) {
7309 l2cap_chan_unlock(chan);
7310 continue;
7311 }
7312
7313 if (!status && (chan->state == BT_CONNECTED ||
7314 chan->state == BT_CONFIG)) {
7315 chan->ops->resume(chan);
7316 l2cap_check_encryption(chan, encrypt);
7317 l2cap_chan_unlock(chan);
7318 continue;
7319 }
7320
7321 if (chan->state == BT_CONNECT) {
7322 if (!status)
7323 l2cap_start_connection(chan);
7324 else
7325 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
7326 } else if (chan->state == BT_CONNECT2) {
7327 struct l2cap_conn_rsp rsp;
7328 __u16 res, stat;
7329
7330 if (!status) {
7331 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
7332 res = L2CAP_CR_PEND;
7333 stat = L2CAP_CS_AUTHOR_PEND;
7334 chan->ops->defer(chan);
7335 } else {
7336 l2cap_state_change(chan, BT_CONFIG);
7337 res = L2CAP_CR_SUCCESS;
7338 stat = L2CAP_CS_NO_INFO;
7339 }
7340 } else {
7341 l2cap_state_change(chan, BT_DISCONN);
7342 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
7343 res = L2CAP_CR_SEC_BLOCK;
7344 stat = L2CAP_CS_NO_INFO;
7345 }
7346
7347 rsp.scid = cpu_to_le16(chan->dcid);
7348 rsp.dcid = cpu_to_le16(chan->scid);
7349 rsp.result = cpu_to_le16(res);
7350 rsp.status = cpu_to_le16(stat);
7351 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
7352 sizeof(rsp), &rsp);
7353
7354 if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
7355 res == L2CAP_CR_SUCCESS) {
7356 char buf[128];
7357 set_bit(CONF_REQ_SENT, &chan->conf_state);
7358 l2cap_send_cmd(conn, l2cap_get_ident(conn),
7359 L2CAP_CONF_REQ,
7360 l2cap_build_conf_req(chan, buf),
7361 buf);
7362 chan->num_conf_req++;
7363 }
7364 }
7365
7366 l2cap_chan_unlock(chan);
7367 }
7368
7369 mutex_unlock(&conn->chan_lock);
7370
7371 return 0;
7372}
7373
7374int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
7375{
7376 struct l2cap_conn *conn = hcon->l2cap_data;
7377 struct l2cap_hdr *hdr;
7378 int len;
7379
7380 /* For AMP controller do not create l2cap conn */
7381 if (!conn && hcon->hdev->dev_type != HCI_BREDR)
7382 goto drop;
7383
7384 if (!conn)
7385 conn = l2cap_conn_add(hcon);
7386
7387 if (!conn)
7388 goto drop;
7389
7390 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
7391
7392 switch (flags) {
7393 case ACL_START:
7394 case ACL_START_NO_FLUSH:
7395 case ACL_COMPLETE:
7396 if (conn->rx_len) {
7397 BT_ERR("Unexpected start frame (len %d)", skb->len);
7398 kfree_skb(conn->rx_skb);
7399 conn->rx_skb = NULL;
7400 conn->rx_len = 0;
7401 l2cap_conn_unreliable(conn, ECOMM);
7402 }
7403
7404 /* Start fragment always begin with Basic L2CAP header */
7405 if (skb->len < L2CAP_HDR_SIZE) {
7406 BT_ERR("Frame is too short (len %d)", skb->len);
7407 l2cap_conn_unreliable(conn, ECOMM);
7408 goto drop;
7409 }
7410
7411 hdr = (struct l2cap_hdr *) skb->data;
7412 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
7413
7414 if (len == skb->len) {
7415 /* Complete frame received */
7416 l2cap_recv_frame(conn, skb);
7417 return 0;
7418 }
7419
7420 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
7421
7422 if (skb->len > len) {
7423 BT_ERR("Frame is too long (len %d, expected len %d)",
7424 skb->len, len);
7425 l2cap_conn_unreliable(conn, ECOMM);
7426 goto drop;
7427 }
7428
7429 /* Allocate skb for the complete frame (with header) */
7430 conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL);
7431 if (!conn->rx_skb)
7432 goto drop;
7433
7434 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
7435 skb->len);
7436 conn->rx_len = len - skb->len;
7437 break;
7438
7439 case ACL_CONT:
7440 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
7441
7442 if (!conn->rx_len) {
7443 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
7444 l2cap_conn_unreliable(conn, ECOMM);
7445 goto drop;
7446 }
7447
7448 if (skb->len > conn->rx_len) {
7449 BT_ERR("Fragment is too long (len %d, expected %d)",
7450 skb->len, conn->rx_len);
7451 kfree_skb(conn->rx_skb);
7452 conn->rx_skb = NULL;
7453 conn->rx_len = 0;
7454 l2cap_conn_unreliable(conn, ECOMM);
7455 goto drop;
7456 }
7457
7458 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
7459 skb->len);
7460 conn->rx_len -= skb->len;
7461
7462 if (!conn->rx_len) {
7463 /* Complete frame received. l2cap_recv_frame
7464 * takes ownership of the skb so set the global
7465 * rx_skb pointer to NULL first.
7466 */
7467 struct sk_buff *rx_skb = conn->rx_skb;
7468 conn->rx_skb = NULL;
7469 l2cap_recv_frame(conn, rx_skb);
7470 }
7471 break;
7472 }
7473
7474drop:
7475 kfree_skb(skb);
7476 return 0;
7477}
7478
7479static int l2cap_debugfs_show(struct seq_file *f, void *p)
7480{
7481 struct l2cap_chan *c;
7482
7483 read_lock(&chan_list_lock);
7484
7485 list_for_each_entry(c, &chan_list, global_l) {
7486 seq_printf(f, "%pMR %pMR %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
7487 &c->src, &c->dst,
7488 c->state, __le16_to_cpu(c->psm),
7489 c->scid, c->dcid, c->imtu, c->omtu,
7490 c->sec_level, c->mode);
7491 }
7492
7493 read_unlock(&chan_list_lock);
7494
7495 return 0;
7496}
7497
7498static int l2cap_debugfs_open(struct inode *inode, struct file *file)
7499{
7500 return single_open(file, l2cap_debugfs_show, inode->i_private);
7501}
7502
7503static const struct file_operations l2cap_debugfs_fops = {
7504 .open = l2cap_debugfs_open,
7505 .read = seq_read,
7506 .llseek = seq_lseek,
7507 .release = single_release,
7508};
7509
7510static struct dentry *l2cap_debugfs;
7511
7512int __init l2cap_init(void)
7513{
7514 int err;
7515
7516 err = l2cap_init_sockets();
7517 if (err < 0)
7518 return err;
7519
7520 if (IS_ERR_OR_NULL(bt_debugfs))
7521 return 0;
7522
7523 l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
7524 NULL, &l2cap_debugfs_fops);
7525
7526 debugfs_create_u16("l2cap_le_max_credits", 0644, bt_debugfs,
7527 &le_max_credits);
7528 debugfs_create_u16("l2cap_le_default_mps", 0644, bt_debugfs,
7529 &le_default_mps);
7530
7531 return 0;
7532}
7533
7534void l2cap_exit(void)
7535{
7536 debugfs_remove(l2cap_debugfs);
7537 l2cap_cleanup_sockets();
7538}
7539
7540module_param(disable_ertm, bool, 0644);
7541MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
This page took 0.065235 seconds and 5 git commands to generate.