Bluetooth: Remove typedef bt3c_info_t
[deliverable/linux.git] / net / bluetooth / l2cap_core.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
8
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
10
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
14
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
27 */
28
29 /* Bluetooth L2CAP core. */
30
31 #include <linux/module.h>
32
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
35
36 #include <net/bluetooth/bluetooth.h>
37 #include <net/bluetooth/hci_core.h>
38 #include <net/bluetooth/l2cap.h>
39
40 #include "smp.h"
41 #include "a2mp.h"
42 #include "amp.h"
43
44 #define LE_FLOWCTL_MAX_CREDITS 65535
45
46 bool disable_ertm;
47
48 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN | L2CAP_FEAT_UCD;
49 static u8 l2cap_fixed_chan[8] = { L2CAP_FC_SIG_BREDR | L2CAP_FC_CONNLESS, };
50
51 static LIST_HEAD(chan_list);
52 static DEFINE_RWLOCK(chan_list_lock);
53
54 static u16 le_max_credits = L2CAP_LE_MAX_CREDITS;
55 static u16 le_default_mps = L2CAP_LE_DEFAULT_MPS;
56
57 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
58 u8 code, u8 ident, u16 dlen, void *data);
59 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
60 void *data);
61 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
62 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
63
64 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
65 struct sk_buff_head *skbs, u8 event);
66
67 static inline __u8 bdaddr_type(struct hci_conn *hcon, __u8 type)
68 {
69 if (hcon->type == LE_LINK) {
70 if (type == ADDR_LE_DEV_PUBLIC)
71 return BDADDR_LE_PUBLIC;
72 else
73 return BDADDR_LE_RANDOM;
74 }
75
76 return BDADDR_BREDR;
77 }
78
79 /* ---- L2CAP channels ---- */
80
81 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
82 u16 cid)
83 {
84 struct l2cap_chan *c;
85
86 list_for_each_entry(c, &conn->chan_l, list) {
87 if (c->dcid == cid)
88 return c;
89 }
90 return NULL;
91 }
92
93 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
94 u16 cid)
95 {
96 struct l2cap_chan *c;
97
98 list_for_each_entry(c, &conn->chan_l, list) {
99 if (c->scid == cid)
100 return c;
101 }
102 return NULL;
103 }
104
105 /* Find channel with given SCID.
106 * Returns locked channel. */
107 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
108 u16 cid)
109 {
110 struct l2cap_chan *c;
111
112 mutex_lock(&conn->chan_lock);
113 c = __l2cap_get_chan_by_scid(conn, cid);
114 if (c)
115 l2cap_chan_lock(c);
116 mutex_unlock(&conn->chan_lock);
117
118 return c;
119 }
120
121 /* Find channel with given DCID.
122 * Returns locked channel.
123 */
124 static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
125 u16 cid)
126 {
127 struct l2cap_chan *c;
128
129 mutex_lock(&conn->chan_lock);
130 c = __l2cap_get_chan_by_dcid(conn, cid);
131 if (c)
132 l2cap_chan_lock(c);
133 mutex_unlock(&conn->chan_lock);
134
135 return c;
136 }
137
138 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn,
139 u8 ident)
140 {
141 struct l2cap_chan *c;
142
143 list_for_each_entry(c, &conn->chan_l, list) {
144 if (c->ident == ident)
145 return c;
146 }
147 return NULL;
148 }
149
150 static struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn,
151 u8 ident)
152 {
153 struct l2cap_chan *c;
154
155 mutex_lock(&conn->chan_lock);
156 c = __l2cap_get_chan_by_ident(conn, ident);
157 if (c)
158 l2cap_chan_lock(c);
159 mutex_unlock(&conn->chan_lock);
160
161 return c;
162 }
163
164 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
165 {
166 struct l2cap_chan *c;
167
168 list_for_each_entry(c, &chan_list, global_l) {
169 if (c->sport == psm && !bacmp(&c->src, src))
170 return c;
171 }
172 return NULL;
173 }
174
175 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
176 {
177 int err;
178
179 write_lock(&chan_list_lock);
180
181 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
182 err = -EADDRINUSE;
183 goto done;
184 }
185
186 if (psm) {
187 chan->psm = psm;
188 chan->sport = psm;
189 err = 0;
190 } else {
191 u16 p;
192
193 err = -EINVAL;
194 for (p = 0x1001; p < 0x1100; p += 2)
195 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
196 chan->psm = cpu_to_le16(p);
197 chan->sport = cpu_to_le16(p);
198 err = 0;
199 break;
200 }
201 }
202
203 done:
204 write_unlock(&chan_list_lock);
205 return err;
206 }
207 EXPORT_SYMBOL_GPL(l2cap_add_psm);
208
209 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
210 {
211 write_lock(&chan_list_lock);
212
213 /* Override the defaults (which are for conn-oriented) */
214 chan->omtu = L2CAP_DEFAULT_MTU;
215 chan->chan_type = L2CAP_CHAN_FIXED;
216
217 chan->scid = scid;
218
219 write_unlock(&chan_list_lock);
220
221 return 0;
222 }
223
224 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
225 {
226 u16 cid, dyn_end;
227
228 if (conn->hcon->type == LE_LINK)
229 dyn_end = L2CAP_CID_LE_DYN_END;
230 else
231 dyn_end = L2CAP_CID_DYN_END;
232
233 for (cid = L2CAP_CID_DYN_START; cid < dyn_end; cid++) {
234 if (!__l2cap_get_chan_by_scid(conn, cid))
235 return cid;
236 }
237
238 return 0;
239 }
240
241 static void l2cap_state_change(struct l2cap_chan *chan, int state)
242 {
243 BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
244 state_to_string(state));
245
246 chan->state = state;
247 chan->ops->state_change(chan, state, 0);
248 }
249
250 static inline void l2cap_state_change_and_error(struct l2cap_chan *chan,
251 int state, int err)
252 {
253 chan->state = state;
254 chan->ops->state_change(chan, chan->state, err);
255 }
256
257 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
258 {
259 chan->ops->state_change(chan, chan->state, err);
260 }
261
262 static void __set_retrans_timer(struct l2cap_chan *chan)
263 {
264 if (!delayed_work_pending(&chan->monitor_timer) &&
265 chan->retrans_timeout) {
266 l2cap_set_timer(chan, &chan->retrans_timer,
267 msecs_to_jiffies(chan->retrans_timeout));
268 }
269 }
270
271 static void __set_monitor_timer(struct l2cap_chan *chan)
272 {
273 __clear_retrans_timer(chan);
274 if (chan->monitor_timeout) {
275 l2cap_set_timer(chan, &chan->monitor_timer,
276 msecs_to_jiffies(chan->monitor_timeout));
277 }
278 }
279
280 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
281 u16 seq)
282 {
283 struct sk_buff *skb;
284
285 skb_queue_walk(head, skb) {
286 if (bt_cb(skb)->control.txseq == seq)
287 return skb;
288 }
289
290 return NULL;
291 }
292
293 /* ---- L2CAP sequence number lists ---- */
294
295 /* For ERTM, ordered lists of sequence numbers must be tracked for
296 * SREJ requests that are received and for frames that are to be
297 * retransmitted. These seq_list functions implement a singly-linked
298 * list in an array, where membership in the list can also be checked
299 * in constant time. Items can also be added to the tail of the list
300 * and removed from the head in constant time, without further memory
301 * allocs or frees.
302 */
303
304 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
305 {
306 size_t alloc_size, i;
307
308 /* Allocated size is a power of 2 to map sequence numbers
309 * (which may be up to 14 bits) in to a smaller array that is
310 * sized for the negotiated ERTM transmit windows.
311 */
312 alloc_size = roundup_pow_of_two(size);
313
314 seq_list->list = kmalloc(sizeof(u16) * alloc_size, GFP_KERNEL);
315 if (!seq_list->list)
316 return -ENOMEM;
317
318 seq_list->mask = alloc_size - 1;
319 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
320 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
321 for (i = 0; i < alloc_size; i++)
322 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
323
324 return 0;
325 }
326
327 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
328 {
329 kfree(seq_list->list);
330 }
331
332 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
333 u16 seq)
334 {
335 /* Constant-time check for list membership */
336 return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
337 }
338
339 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
340 {
341 u16 seq = seq_list->head;
342 u16 mask = seq_list->mask;
343
344 seq_list->head = seq_list->list[seq & mask];
345 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
346
347 if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
348 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
349 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
350 }
351
352 return seq;
353 }
354
355 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
356 {
357 u16 i;
358
359 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
360 return;
361
362 for (i = 0; i <= seq_list->mask; i++)
363 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
364
365 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
366 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
367 }
368
369 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
370 {
371 u16 mask = seq_list->mask;
372
373 /* All appends happen in constant time */
374
375 if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
376 return;
377
378 if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
379 seq_list->head = seq;
380 else
381 seq_list->list[seq_list->tail & mask] = seq;
382
383 seq_list->tail = seq;
384 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
385 }
386
387 static void l2cap_chan_timeout(struct work_struct *work)
388 {
389 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
390 chan_timer.work);
391 struct l2cap_conn *conn = chan->conn;
392 int reason;
393
394 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
395
396 mutex_lock(&conn->chan_lock);
397 l2cap_chan_lock(chan);
398
399 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
400 reason = ECONNREFUSED;
401 else if (chan->state == BT_CONNECT &&
402 chan->sec_level != BT_SECURITY_SDP)
403 reason = ECONNREFUSED;
404 else
405 reason = ETIMEDOUT;
406
407 l2cap_chan_close(chan, reason);
408
409 l2cap_chan_unlock(chan);
410
411 chan->ops->close(chan);
412 mutex_unlock(&conn->chan_lock);
413
414 l2cap_chan_put(chan);
415 }
416
417 struct l2cap_chan *l2cap_chan_create(void)
418 {
419 struct l2cap_chan *chan;
420
421 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
422 if (!chan)
423 return NULL;
424
425 mutex_init(&chan->lock);
426
427 write_lock(&chan_list_lock);
428 list_add(&chan->global_l, &chan_list);
429 write_unlock(&chan_list_lock);
430
431 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
432
433 chan->state = BT_OPEN;
434
435 kref_init(&chan->kref);
436
437 /* This flag is cleared in l2cap_chan_ready() */
438 set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
439
440 BT_DBG("chan %p", chan);
441
442 return chan;
443 }
444 EXPORT_SYMBOL_GPL(l2cap_chan_create);
445
446 static void l2cap_chan_destroy(struct kref *kref)
447 {
448 struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);
449
450 BT_DBG("chan %p", chan);
451
452 write_lock(&chan_list_lock);
453 list_del(&chan->global_l);
454 write_unlock(&chan_list_lock);
455
456 kfree(chan);
457 }
458
459 void l2cap_chan_hold(struct l2cap_chan *c)
460 {
461 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
462
463 kref_get(&c->kref);
464 }
465
466 void l2cap_chan_put(struct l2cap_chan *c)
467 {
468 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
469
470 kref_put(&c->kref, l2cap_chan_destroy);
471 }
472 EXPORT_SYMBOL_GPL(l2cap_chan_put);
473
474 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
475 {
476 chan->fcs = L2CAP_FCS_CRC16;
477 chan->max_tx = L2CAP_DEFAULT_MAX_TX;
478 chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
479 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
480 chan->remote_max_tx = chan->max_tx;
481 chan->remote_tx_win = chan->tx_win;
482 chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
483 chan->sec_level = BT_SECURITY_LOW;
484 chan->flush_to = L2CAP_DEFAULT_FLUSH_TO;
485 chan->retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
486 chan->monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
487 chan->conf_state = 0;
488
489 set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
490 }
491 EXPORT_SYMBOL_GPL(l2cap_chan_set_defaults);
492
493 static void l2cap_le_flowctl_init(struct l2cap_chan *chan)
494 {
495 chan->sdu = NULL;
496 chan->sdu_last_frag = NULL;
497 chan->sdu_len = 0;
498 chan->tx_credits = 0;
499 chan->rx_credits = le_max_credits;
500 chan->mps = min_t(u16, chan->imtu, le_default_mps);
501
502 skb_queue_head_init(&chan->tx_q);
503 }
504
505 void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
506 {
507 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
508 __le16_to_cpu(chan->psm), chan->dcid);
509
510 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
511
512 chan->conn = conn;
513
514 switch (chan->chan_type) {
515 case L2CAP_CHAN_CONN_ORIENTED:
516 /* Alloc CID for connection-oriented socket */
517 chan->scid = l2cap_alloc_cid(conn);
518 if (conn->hcon->type == ACL_LINK)
519 chan->omtu = L2CAP_DEFAULT_MTU;
520 break;
521
522 case L2CAP_CHAN_CONN_LESS:
523 /* Connectionless socket */
524 chan->scid = L2CAP_CID_CONN_LESS;
525 chan->dcid = L2CAP_CID_CONN_LESS;
526 chan->omtu = L2CAP_DEFAULT_MTU;
527 break;
528
529 case L2CAP_CHAN_FIXED:
530 /* Caller will set CID and CID specific MTU values */
531 break;
532
533 default:
534 /* Raw socket can send/recv signalling messages only */
535 chan->scid = L2CAP_CID_SIGNALING;
536 chan->dcid = L2CAP_CID_SIGNALING;
537 chan->omtu = L2CAP_DEFAULT_MTU;
538 }
539
540 chan->local_id = L2CAP_BESTEFFORT_ID;
541 chan->local_stype = L2CAP_SERV_BESTEFFORT;
542 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
543 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
544 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
545 chan->local_flush_to = L2CAP_EFS_DEFAULT_FLUSH_TO;
546
547 l2cap_chan_hold(chan);
548
549 hci_conn_hold(conn->hcon);
550
551 list_add(&chan->list, &conn->chan_l);
552 }
553
554 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
555 {
556 mutex_lock(&conn->chan_lock);
557 __l2cap_chan_add(conn, chan);
558 mutex_unlock(&conn->chan_lock);
559 }
560
561 void l2cap_chan_del(struct l2cap_chan *chan, int err)
562 {
563 struct l2cap_conn *conn = chan->conn;
564
565 __clear_chan_timer(chan);
566
567 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
568
569 chan->ops->teardown(chan, err);
570
571 if (conn) {
572 struct amp_mgr *mgr = conn->hcon->amp_mgr;
573 /* Delete from channel list */
574 list_del(&chan->list);
575
576 l2cap_chan_put(chan);
577
578 chan->conn = NULL;
579
580 if (chan->scid != L2CAP_CID_A2MP)
581 hci_conn_drop(conn->hcon);
582
583 if (mgr && mgr->bredr_chan == chan)
584 mgr->bredr_chan = NULL;
585 }
586
587 if (chan->hs_hchan) {
588 struct hci_chan *hs_hchan = chan->hs_hchan;
589
590 BT_DBG("chan %p disconnect hs_hchan %p", chan, hs_hchan);
591 amp_disconnect_logical_link(hs_hchan);
592 }
593
594 if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
595 return;
596
597 switch(chan->mode) {
598 case L2CAP_MODE_BASIC:
599 break;
600
601 case L2CAP_MODE_LE_FLOWCTL:
602 skb_queue_purge(&chan->tx_q);
603 break;
604
605 case L2CAP_MODE_ERTM:
606 __clear_retrans_timer(chan);
607 __clear_monitor_timer(chan);
608 __clear_ack_timer(chan);
609
610 skb_queue_purge(&chan->srej_q);
611
612 l2cap_seq_list_free(&chan->srej_list);
613 l2cap_seq_list_free(&chan->retrans_list);
614
615 /* fall through */
616
617 case L2CAP_MODE_STREAMING:
618 skb_queue_purge(&chan->tx_q);
619 break;
620 }
621
622 return;
623 }
624 EXPORT_SYMBOL_GPL(l2cap_chan_del);
625
626 void l2cap_conn_update_id_addr(struct hci_conn *hcon)
627 {
628 struct l2cap_conn *conn = hcon->l2cap_data;
629 struct l2cap_chan *chan;
630
631 mutex_lock(&conn->chan_lock);
632
633 list_for_each_entry(chan, &conn->chan_l, list) {
634 l2cap_chan_lock(chan);
635 bacpy(&chan->dst, &hcon->dst);
636 chan->dst_type = bdaddr_type(hcon, hcon->dst_type);
637 l2cap_chan_unlock(chan);
638 }
639
640 mutex_unlock(&conn->chan_lock);
641 }
642
643 static void l2cap_chan_le_connect_reject(struct l2cap_chan *chan)
644 {
645 struct l2cap_conn *conn = chan->conn;
646 struct l2cap_le_conn_rsp rsp;
647 u16 result;
648
649 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
650 result = L2CAP_CR_AUTHORIZATION;
651 else
652 result = L2CAP_CR_BAD_PSM;
653
654 l2cap_state_change(chan, BT_DISCONN);
655
656 rsp.dcid = cpu_to_le16(chan->scid);
657 rsp.mtu = cpu_to_le16(chan->imtu);
658 rsp.mps = cpu_to_le16(chan->mps);
659 rsp.credits = cpu_to_le16(chan->rx_credits);
660 rsp.result = cpu_to_le16(result);
661
662 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
663 &rsp);
664 }
665
666 static void l2cap_chan_connect_reject(struct l2cap_chan *chan)
667 {
668 struct l2cap_conn *conn = chan->conn;
669 struct l2cap_conn_rsp rsp;
670 u16 result;
671
672 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
673 result = L2CAP_CR_SEC_BLOCK;
674 else
675 result = L2CAP_CR_BAD_PSM;
676
677 l2cap_state_change(chan, BT_DISCONN);
678
679 rsp.scid = cpu_to_le16(chan->dcid);
680 rsp.dcid = cpu_to_le16(chan->scid);
681 rsp.result = cpu_to_le16(result);
682 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
683
684 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
685 }
686
687 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
688 {
689 struct l2cap_conn *conn = chan->conn;
690
691 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
692
693 switch (chan->state) {
694 case BT_LISTEN:
695 chan->ops->teardown(chan, 0);
696 break;
697
698 case BT_CONNECTED:
699 case BT_CONFIG:
700 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
701 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
702 l2cap_send_disconn_req(chan, reason);
703 } else
704 l2cap_chan_del(chan, reason);
705 break;
706
707 case BT_CONNECT2:
708 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
709 if (conn->hcon->type == ACL_LINK)
710 l2cap_chan_connect_reject(chan);
711 else if (conn->hcon->type == LE_LINK)
712 l2cap_chan_le_connect_reject(chan);
713 }
714
715 l2cap_chan_del(chan, reason);
716 break;
717
718 case BT_CONNECT:
719 case BT_DISCONN:
720 l2cap_chan_del(chan, reason);
721 break;
722
723 default:
724 chan->ops->teardown(chan, 0);
725 break;
726 }
727 }
728 EXPORT_SYMBOL(l2cap_chan_close);
729
730 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
731 {
732 switch (chan->chan_type) {
733 case L2CAP_CHAN_RAW:
734 switch (chan->sec_level) {
735 case BT_SECURITY_HIGH:
736 case BT_SECURITY_FIPS:
737 return HCI_AT_DEDICATED_BONDING_MITM;
738 case BT_SECURITY_MEDIUM:
739 return HCI_AT_DEDICATED_BONDING;
740 default:
741 return HCI_AT_NO_BONDING;
742 }
743 break;
744 case L2CAP_CHAN_CONN_LESS:
745 if (chan->psm == cpu_to_le16(L2CAP_PSM_3DSP)) {
746 if (chan->sec_level == BT_SECURITY_LOW)
747 chan->sec_level = BT_SECURITY_SDP;
748 }
749 if (chan->sec_level == BT_SECURITY_HIGH ||
750 chan->sec_level == BT_SECURITY_FIPS)
751 return HCI_AT_NO_BONDING_MITM;
752 else
753 return HCI_AT_NO_BONDING;
754 break;
755 case L2CAP_CHAN_CONN_ORIENTED:
756 if (chan->psm == cpu_to_le16(L2CAP_PSM_SDP)) {
757 if (chan->sec_level == BT_SECURITY_LOW)
758 chan->sec_level = BT_SECURITY_SDP;
759
760 if (chan->sec_level == BT_SECURITY_HIGH ||
761 chan->sec_level == BT_SECURITY_FIPS)
762 return HCI_AT_NO_BONDING_MITM;
763 else
764 return HCI_AT_NO_BONDING;
765 }
766 /* fall through */
767 default:
768 switch (chan->sec_level) {
769 case BT_SECURITY_HIGH:
770 case BT_SECURITY_FIPS:
771 return HCI_AT_GENERAL_BONDING_MITM;
772 case BT_SECURITY_MEDIUM:
773 return HCI_AT_GENERAL_BONDING;
774 default:
775 return HCI_AT_NO_BONDING;
776 }
777 break;
778 }
779 }
780
781 /* Service level security */
782 int l2cap_chan_check_security(struct l2cap_chan *chan, bool initiator)
783 {
784 struct l2cap_conn *conn = chan->conn;
785 __u8 auth_type;
786
787 if (conn->hcon->type == LE_LINK)
788 return smp_conn_security(conn->hcon, chan->sec_level);
789
790 auth_type = l2cap_get_auth_type(chan);
791
792 return hci_conn_security(conn->hcon, chan->sec_level, auth_type,
793 initiator);
794 }
795
796 static u8 l2cap_get_ident(struct l2cap_conn *conn)
797 {
798 u8 id;
799
800 /* Get next available identificator.
801 * 1 - 128 are used by kernel.
802 * 129 - 199 are reserved.
803 * 200 - 254 are used by utilities like l2ping, etc.
804 */
805
806 mutex_lock(&conn->ident_lock);
807
808 if (++conn->tx_ident > 128)
809 conn->tx_ident = 1;
810
811 id = conn->tx_ident;
812
813 mutex_unlock(&conn->ident_lock);
814
815 return id;
816 }
817
818 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
819 void *data)
820 {
821 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
822 u8 flags;
823
824 BT_DBG("code 0x%2.2x", code);
825
826 if (!skb)
827 return;
828
829 if (lmp_no_flush_capable(conn->hcon->hdev))
830 flags = ACL_START_NO_FLUSH;
831 else
832 flags = ACL_START;
833
834 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
835 skb->priority = HCI_PRIO_MAX;
836
837 hci_send_acl(conn->hchan, skb, flags);
838 }
839
840 static bool __chan_is_moving(struct l2cap_chan *chan)
841 {
842 return chan->move_state != L2CAP_MOVE_STABLE &&
843 chan->move_state != L2CAP_MOVE_WAIT_PREPARE;
844 }
845
846 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
847 {
848 struct hci_conn *hcon = chan->conn->hcon;
849 u16 flags;
850
851 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
852 skb->priority);
853
854 if (chan->hs_hcon && !__chan_is_moving(chan)) {
855 if (chan->hs_hchan)
856 hci_send_acl(chan->hs_hchan, skb, ACL_COMPLETE);
857 else
858 kfree_skb(skb);
859
860 return;
861 }
862
863 if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
864 lmp_no_flush_capable(hcon->hdev))
865 flags = ACL_START_NO_FLUSH;
866 else
867 flags = ACL_START;
868
869 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
870 hci_send_acl(chan->conn->hchan, skb, flags);
871 }
872
873 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
874 {
875 control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
876 control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
877
878 if (enh & L2CAP_CTRL_FRAME_TYPE) {
879 /* S-Frame */
880 control->sframe = 1;
881 control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
882 control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
883
884 control->sar = 0;
885 control->txseq = 0;
886 } else {
887 /* I-Frame */
888 control->sframe = 0;
889 control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
890 control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
891
892 control->poll = 0;
893 control->super = 0;
894 }
895 }
896
897 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
898 {
899 control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
900 control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
901
902 if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
903 /* S-Frame */
904 control->sframe = 1;
905 control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
906 control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
907
908 control->sar = 0;
909 control->txseq = 0;
910 } else {
911 /* I-Frame */
912 control->sframe = 0;
913 control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
914 control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
915
916 control->poll = 0;
917 control->super = 0;
918 }
919 }
920
921 static inline void __unpack_control(struct l2cap_chan *chan,
922 struct sk_buff *skb)
923 {
924 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
925 __unpack_extended_control(get_unaligned_le32(skb->data),
926 &bt_cb(skb)->control);
927 skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
928 } else {
929 __unpack_enhanced_control(get_unaligned_le16(skb->data),
930 &bt_cb(skb)->control);
931 skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
932 }
933 }
934
935 static u32 __pack_extended_control(struct l2cap_ctrl *control)
936 {
937 u32 packed;
938
939 packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
940 packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
941
942 if (control->sframe) {
943 packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
944 packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
945 packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
946 } else {
947 packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
948 packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
949 }
950
951 return packed;
952 }
953
954 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
955 {
956 u16 packed;
957
958 packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
959 packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
960
961 if (control->sframe) {
962 packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
963 packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
964 packed |= L2CAP_CTRL_FRAME_TYPE;
965 } else {
966 packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
967 packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
968 }
969
970 return packed;
971 }
972
973 static inline void __pack_control(struct l2cap_chan *chan,
974 struct l2cap_ctrl *control,
975 struct sk_buff *skb)
976 {
977 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
978 put_unaligned_le32(__pack_extended_control(control),
979 skb->data + L2CAP_HDR_SIZE);
980 } else {
981 put_unaligned_le16(__pack_enhanced_control(control),
982 skb->data + L2CAP_HDR_SIZE);
983 }
984 }
985
986 static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
987 {
988 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
989 return L2CAP_EXT_HDR_SIZE;
990 else
991 return L2CAP_ENH_HDR_SIZE;
992 }
993
994 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
995 u32 control)
996 {
997 struct sk_buff *skb;
998 struct l2cap_hdr *lh;
999 int hlen = __ertm_hdr_size(chan);
1000
1001 if (chan->fcs == L2CAP_FCS_CRC16)
1002 hlen += L2CAP_FCS_SIZE;
1003
1004 skb = bt_skb_alloc(hlen, GFP_KERNEL);
1005
1006 if (!skb)
1007 return ERR_PTR(-ENOMEM);
1008
1009 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1010 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
1011 lh->cid = cpu_to_le16(chan->dcid);
1012
1013 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1014 put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
1015 else
1016 put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
1017
1018 if (chan->fcs == L2CAP_FCS_CRC16) {
1019 u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
1020 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1021 }
1022
1023 skb->priority = HCI_PRIO_MAX;
1024 return skb;
1025 }
1026
1027 static void l2cap_send_sframe(struct l2cap_chan *chan,
1028 struct l2cap_ctrl *control)
1029 {
1030 struct sk_buff *skb;
1031 u32 control_field;
1032
1033 BT_DBG("chan %p, control %p", chan, control);
1034
1035 if (!control->sframe)
1036 return;
1037
1038 if (__chan_is_moving(chan))
1039 return;
1040
1041 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
1042 !control->poll)
1043 control->final = 1;
1044
1045 if (control->super == L2CAP_SUPER_RR)
1046 clear_bit(CONN_RNR_SENT, &chan->conn_state);
1047 else if (control->super == L2CAP_SUPER_RNR)
1048 set_bit(CONN_RNR_SENT, &chan->conn_state);
1049
1050 if (control->super != L2CAP_SUPER_SREJ) {
1051 chan->last_acked_seq = control->reqseq;
1052 __clear_ack_timer(chan);
1053 }
1054
1055 BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
1056 control->final, control->poll, control->super);
1057
1058 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1059 control_field = __pack_extended_control(control);
1060 else
1061 control_field = __pack_enhanced_control(control);
1062
1063 skb = l2cap_create_sframe_pdu(chan, control_field);
1064 if (!IS_ERR(skb))
1065 l2cap_do_send(chan, skb);
1066 }
1067
1068 static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
1069 {
1070 struct l2cap_ctrl control;
1071
1072 BT_DBG("chan %p, poll %d", chan, poll);
1073
1074 memset(&control, 0, sizeof(control));
1075 control.sframe = 1;
1076 control.poll = poll;
1077
1078 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
1079 control.super = L2CAP_SUPER_RNR;
1080 else
1081 control.super = L2CAP_SUPER_RR;
1082
1083 control.reqseq = chan->buffer_seq;
1084 l2cap_send_sframe(chan, &control);
1085 }
1086
1087 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
1088 {
1089 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
1090 return true;
1091
1092 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
1093 }
1094
1095 static bool __amp_capable(struct l2cap_chan *chan)
1096 {
1097 struct l2cap_conn *conn = chan->conn;
1098 struct hci_dev *hdev;
1099 bool amp_available = false;
1100
1101 if (!conn->hs_enabled)
1102 return false;
1103
1104 if (!(conn->fixed_chan_mask & L2CAP_FC_A2MP))
1105 return false;
1106
1107 read_lock(&hci_dev_list_lock);
1108 list_for_each_entry(hdev, &hci_dev_list, list) {
1109 if (hdev->amp_type != AMP_TYPE_BREDR &&
1110 test_bit(HCI_UP, &hdev->flags)) {
1111 amp_available = true;
1112 break;
1113 }
1114 }
1115 read_unlock(&hci_dev_list_lock);
1116
1117 if (chan->chan_policy == BT_CHANNEL_POLICY_AMP_PREFERRED)
1118 return amp_available;
1119
1120 return false;
1121 }
1122
1123 static bool l2cap_check_efs(struct l2cap_chan *chan)
1124 {
1125 /* Check EFS parameters */
1126 return true;
1127 }
1128
1129 void l2cap_send_conn_req(struct l2cap_chan *chan)
1130 {
1131 struct l2cap_conn *conn = chan->conn;
1132 struct l2cap_conn_req req;
1133
1134 req.scid = cpu_to_le16(chan->scid);
1135 req.psm = chan->psm;
1136
1137 chan->ident = l2cap_get_ident(conn);
1138
1139 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
1140
1141 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
1142 }
1143
1144 static void l2cap_send_create_chan_req(struct l2cap_chan *chan, u8 amp_id)
1145 {
1146 struct l2cap_create_chan_req req;
1147 req.scid = cpu_to_le16(chan->scid);
1148 req.psm = chan->psm;
1149 req.amp_id = amp_id;
1150
1151 chan->ident = l2cap_get_ident(chan->conn);
1152
1153 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_REQ,
1154 sizeof(req), &req);
1155 }
1156
1157 static void l2cap_move_setup(struct l2cap_chan *chan)
1158 {
1159 struct sk_buff *skb;
1160
1161 BT_DBG("chan %p", chan);
1162
1163 if (chan->mode != L2CAP_MODE_ERTM)
1164 return;
1165
1166 __clear_retrans_timer(chan);
1167 __clear_monitor_timer(chan);
1168 __clear_ack_timer(chan);
1169
1170 chan->retry_count = 0;
1171 skb_queue_walk(&chan->tx_q, skb) {
1172 if (bt_cb(skb)->control.retries)
1173 bt_cb(skb)->control.retries = 1;
1174 else
1175 break;
1176 }
1177
1178 chan->expected_tx_seq = chan->buffer_seq;
1179
1180 clear_bit(CONN_REJ_ACT, &chan->conn_state);
1181 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
1182 l2cap_seq_list_clear(&chan->retrans_list);
1183 l2cap_seq_list_clear(&chan->srej_list);
1184 skb_queue_purge(&chan->srej_q);
1185
1186 chan->tx_state = L2CAP_TX_STATE_XMIT;
1187 chan->rx_state = L2CAP_RX_STATE_MOVE;
1188
1189 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
1190 }
1191
1192 static void l2cap_move_done(struct l2cap_chan *chan)
1193 {
1194 u8 move_role = chan->move_role;
1195 BT_DBG("chan %p", chan);
1196
1197 chan->move_state = L2CAP_MOVE_STABLE;
1198 chan->move_role = L2CAP_MOVE_ROLE_NONE;
1199
1200 if (chan->mode != L2CAP_MODE_ERTM)
1201 return;
1202
1203 switch (move_role) {
1204 case L2CAP_MOVE_ROLE_INITIATOR:
1205 l2cap_tx(chan, NULL, NULL, L2CAP_EV_EXPLICIT_POLL);
1206 chan->rx_state = L2CAP_RX_STATE_WAIT_F;
1207 break;
1208 case L2CAP_MOVE_ROLE_RESPONDER:
1209 chan->rx_state = L2CAP_RX_STATE_WAIT_P;
1210 break;
1211 }
1212 }
1213
1214 static void l2cap_chan_ready(struct l2cap_chan *chan)
1215 {
1216 /* This clears all conf flags, including CONF_NOT_COMPLETE */
1217 chan->conf_state = 0;
1218 __clear_chan_timer(chan);
1219
1220 if (chan->mode == L2CAP_MODE_LE_FLOWCTL && !chan->tx_credits)
1221 chan->ops->suspend(chan);
1222
1223 chan->state = BT_CONNECTED;
1224
1225 chan->ops->ready(chan);
1226 }
1227
1228 static void l2cap_le_connect(struct l2cap_chan *chan)
1229 {
1230 struct l2cap_conn *conn = chan->conn;
1231 struct l2cap_le_conn_req req;
1232
1233 if (test_and_set_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags))
1234 return;
1235
1236 req.psm = chan->psm;
1237 req.scid = cpu_to_le16(chan->scid);
1238 req.mtu = cpu_to_le16(chan->imtu);
1239 req.mps = cpu_to_le16(chan->mps);
1240 req.credits = cpu_to_le16(chan->rx_credits);
1241
1242 chan->ident = l2cap_get_ident(conn);
1243
1244 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_REQ,
1245 sizeof(req), &req);
1246 }
1247
1248 static void l2cap_le_start(struct l2cap_chan *chan)
1249 {
1250 struct l2cap_conn *conn = chan->conn;
1251
1252 if (!smp_conn_security(conn->hcon, chan->sec_level))
1253 return;
1254
1255 if (!chan->psm) {
1256 l2cap_chan_ready(chan);
1257 return;
1258 }
1259
1260 if (chan->state == BT_CONNECT)
1261 l2cap_le_connect(chan);
1262 }
1263
1264 static void l2cap_start_connection(struct l2cap_chan *chan)
1265 {
1266 if (__amp_capable(chan)) {
1267 BT_DBG("chan %p AMP capable: discover AMPs", chan);
1268 a2mp_discover_amp(chan);
1269 } else if (chan->conn->hcon->type == LE_LINK) {
1270 l2cap_le_start(chan);
1271 } else {
1272 l2cap_send_conn_req(chan);
1273 }
1274 }
1275
1276 static void l2cap_do_start(struct l2cap_chan *chan)
1277 {
1278 struct l2cap_conn *conn = chan->conn;
1279
1280 if (conn->hcon->type == LE_LINK) {
1281 l2cap_le_start(chan);
1282 return;
1283 }
1284
1285 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
1286 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
1287 return;
1288
1289 if (l2cap_chan_check_security(chan, true) &&
1290 __l2cap_no_conn_pending(chan)) {
1291 l2cap_start_connection(chan);
1292 }
1293 } else {
1294 struct l2cap_info_req req;
1295 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
1296
1297 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1298 conn->info_ident = l2cap_get_ident(conn);
1299
1300 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1301
1302 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
1303 sizeof(req), &req);
1304 }
1305 }
1306
1307 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1308 {
1309 u32 local_feat_mask = l2cap_feat_mask;
1310 if (!disable_ertm)
1311 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1312
1313 switch (mode) {
1314 case L2CAP_MODE_ERTM:
1315 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1316 case L2CAP_MODE_STREAMING:
1317 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1318 default:
1319 return 0x00;
1320 }
1321 }
1322
1323 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err)
1324 {
1325 struct l2cap_conn *conn = chan->conn;
1326 struct l2cap_disconn_req req;
1327
1328 if (!conn)
1329 return;
1330
1331 if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) {
1332 __clear_retrans_timer(chan);
1333 __clear_monitor_timer(chan);
1334 __clear_ack_timer(chan);
1335 }
1336
1337 if (chan->scid == L2CAP_CID_A2MP) {
1338 l2cap_state_change(chan, BT_DISCONN);
1339 return;
1340 }
1341
1342 req.dcid = cpu_to_le16(chan->dcid);
1343 req.scid = cpu_to_le16(chan->scid);
1344 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
1345 sizeof(req), &req);
1346
1347 l2cap_state_change_and_error(chan, BT_DISCONN, err);
1348 }
1349
1350 /* ---- L2CAP connections ---- */
1351 static void l2cap_conn_start(struct l2cap_conn *conn)
1352 {
1353 struct l2cap_chan *chan, *tmp;
1354
1355 BT_DBG("conn %p", conn);
1356
1357 mutex_lock(&conn->chan_lock);
1358
1359 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1360 l2cap_chan_lock(chan);
1361
1362 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1363 l2cap_chan_unlock(chan);
1364 continue;
1365 }
1366
1367 if (chan->state == BT_CONNECT) {
1368 if (!l2cap_chan_check_security(chan, true) ||
1369 !__l2cap_no_conn_pending(chan)) {
1370 l2cap_chan_unlock(chan);
1371 continue;
1372 }
1373
1374 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1375 && test_bit(CONF_STATE2_DEVICE,
1376 &chan->conf_state)) {
1377 l2cap_chan_close(chan, ECONNRESET);
1378 l2cap_chan_unlock(chan);
1379 continue;
1380 }
1381
1382 l2cap_start_connection(chan);
1383
1384 } else if (chan->state == BT_CONNECT2) {
1385 struct l2cap_conn_rsp rsp;
1386 char buf[128];
1387 rsp.scid = cpu_to_le16(chan->dcid);
1388 rsp.dcid = cpu_to_le16(chan->scid);
1389
1390 if (l2cap_chan_check_security(chan, false)) {
1391 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
1392 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1393 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1394 chan->ops->defer(chan);
1395
1396 } else {
1397 l2cap_state_change(chan, BT_CONFIG);
1398 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1399 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1400 }
1401 } else {
1402 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1403 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1404 }
1405
1406 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1407 sizeof(rsp), &rsp);
1408
1409 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1410 rsp.result != L2CAP_CR_SUCCESS) {
1411 l2cap_chan_unlock(chan);
1412 continue;
1413 }
1414
1415 set_bit(CONF_REQ_SENT, &chan->conf_state);
1416 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1417 l2cap_build_conf_req(chan, buf), buf);
1418 chan->num_conf_req++;
1419 }
1420
1421 l2cap_chan_unlock(chan);
1422 }
1423
1424 mutex_unlock(&conn->chan_lock);
1425 }
1426
1427 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1428 {
1429 struct hci_conn *hcon = conn->hcon;
1430 struct hci_dev *hdev = hcon->hdev;
1431
1432 BT_DBG("%s conn %p", hdev->name, conn);
1433
1434 /* For outgoing pairing which doesn't necessarily have an
1435 * associated socket (e.g. mgmt_pair_device).
1436 */
1437 if (hcon->out)
1438 smp_conn_security(hcon, hcon->pending_sec_level);
1439
1440 /* For LE slave connections, make sure the connection interval
1441 * is in the range of the minium and maximum interval that has
1442 * been configured for this connection. If not, then trigger
1443 * the connection update procedure.
1444 */
1445 if (hcon->role == HCI_ROLE_SLAVE &&
1446 (hcon->le_conn_interval < hcon->le_conn_min_interval ||
1447 hcon->le_conn_interval > hcon->le_conn_max_interval)) {
1448 struct l2cap_conn_param_update_req req;
1449
1450 req.min = cpu_to_le16(hcon->le_conn_min_interval);
1451 req.max = cpu_to_le16(hcon->le_conn_max_interval);
1452 req.latency = cpu_to_le16(hcon->le_conn_latency);
1453 req.to_multiplier = cpu_to_le16(hcon->le_supv_timeout);
1454
1455 l2cap_send_cmd(conn, l2cap_get_ident(conn),
1456 L2CAP_CONN_PARAM_UPDATE_REQ, sizeof(req), &req);
1457 }
1458 }
1459
1460 static void l2cap_conn_ready(struct l2cap_conn *conn)
1461 {
1462 struct l2cap_chan *chan;
1463 struct hci_conn *hcon = conn->hcon;
1464
1465 BT_DBG("conn %p", conn);
1466
1467 mutex_lock(&conn->chan_lock);
1468
1469 list_for_each_entry(chan, &conn->chan_l, list) {
1470
1471 l2cap_chan_lock(chan);
1472
1473 if (chan->scid == L2CAP_CID_A2MP) {
1474 l2cap_chan_unlock(chan);
1475 continue;
1476 }
1477
1478 if (hcon->type == LE_LINK) {
1479 l2cap_le_start(chan);
1480 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1481 l2cap_chan_ready(chan);
1482
1483 } else if (chan->state == BT_CONNECT) {
1484 l2cap_do_start(chan);
1485 }
1486
1487 l2cap_chan_unlock(chan);
1488 }
1489
1490 mutex_unlock(&conn->chan_lock);
1491
1492 if (hcon->type == LE_LINK)
1493 l2cap_le_conn_ready(conn);
1494
1495 queue_work(hcon->hdev->workqueue, &conn->pending_rx_work);
1496 }
1497
1498 /* Notify sockets that we cannot guaranty reliability anymore */
1499 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1500 {
1501 struct l2cap_chan *chan;
1502
1503 BT_DBG("conn %p", conn);
1504
1505 mutex_lock(&conn->chan_lock);
1506
1507 list_for_each_entry(chan, &conn->chan_l, list) {
1508 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1509 l2cap_chan_set_err(chan, err);
1510 }
1511
1512 mutex_unlock(&conn->chan_lock);
1513 }
1514
1515 static void l2cap_info_timeout(struct work_struct *work)
1516 {
1517 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1518 info_timer.work);
1519
1520 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1521 conn->info_ident = 0;
1522
1523 l2cap_conn_start(conn);
1524 }
1525
1526 /*
1527 * l2cap_user
1528 * External modules can register l2cap_user objects on l2cap_conn. The ->probe
1529 * callback is called during registration. The ->remove callback is called
1530 * during unregistration.
1531 * An l2cap_user object can either be explicitly unregistered or when the
1532 * underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon,
1533 * l2cap->hchan, .. are valid as long as the remove callback hasn't been called.
1534 * External modules must own a reference to the l2cap_conn object if they intend
1535 * to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at
1536 * any time if they don't.
1537 */
1538
1539 int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user)
1540 {
1541 struct hci_dev *hdev = conn->hcon->hdev;
1542 int ret;
1543
1544 /* We need to check whether l2cap_conn is registered. If it is not, we
1545 * must not register the l2cap_user. l2cap_conn_del() is unregisters
1546 * l2cap_conn objects, but doesn't provide its own locking. Instead, it
1547 * relies on the parent hci_conn object to be locked. This itself relies
1548 * on the hci_dev object to be locked. So we must lock the hci device
1549 * here, too. */
1550
1551 hci_dev_lock(hdev);
1552
1553 if (user->list.next || user->list.prev) {
1554 ret = -EINVAL;
1555 goto out_unlock;
1556 }
1557
1558 /* conn->hchan is NULL after l2cap_conn_del() was called */
1559 if (!conn->hchan) {
1560 ret = -ENODEV;
1561 goto out_unlock;
1562 }
1563
1564 ret = user->probe(conn, user);
1565 if (ret)
1566 goto out_unlock;
1567
1568 list_add(&user->list, &conn->users);
1569 ret = 0;
1570
1571 out_unlock:
1572 hci_dev_unlock(hdev);
1573 return ret;
1574 }
1575 EXPORT_SYMBOL(l2cap_register_user);
1576
1577 void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user)
1578 {
1579 struct hci_dev *hdev = conn->hcon->hdev;
1580
1581 hci_dev_lock(hdev);
1582
1583 if (!user->list.next || !user->list.prev)
1584 goto out_unlock;
1585
1586 list_del(&user->list);
1587 user->list.next = NULL;
1588 user->list.prev = NULL;
1589 user->remove(conn, user);
1590
1591 out_unlock:
1592 hci_dev_unlock(hdev);
1593 }
1594 EXPORT_SYMBOL(l2cap_unregister_user);
1595
1596 static void l2cap_unregister_all_users(struct l2cap_conn *conn)
1597 {
1598 struct l2cap_user *user;
1599
1600 while (!list_empty(&conn->users)) {
1601 user = list_first_entry(&conn->users, struct l2cap_user, list);
1602 list_del(&user->list);
1603 user->list.next = NULL;
1604 user->list.prev = NULL;
1605 user->remove(conn, user);
1606 }
1607 }
1608
1609 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1610 {
1611 struct l2cap_conn *conn = hcon->l2cap_data;
1612 struct l2cap_chan *chan, *l;
1613
1614 if (!conn)
1615 return;
1616
1617 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1618
1619 kfree_skb(conn->rx_skb);
1620
1621 skb_queue_purge(&conn->pending_rx);
1622
1623 /* We can not call flush_work(&conn->pending_rx_work) here since we
1624 * might block if we are running on a worker from the same workqueue
1625 * pending_rx_work is waiting on.
1626 */
1627 if (work_pending(&conn->pending_rx_work))
1628 cancel_work_sync(&conn->pending_rx_work);
1629
1630 if (work_pending(&conn->disconn_work))
1631 cancel_work_sync(&conn->disconn_work);
1632
1633 l2cap_unregister_all_users(conn);
1634
1635 mutex_lock(&conn->chan_lock);
1636
1637 /* Kill channels */
1638 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1639 l2cap_chan_hold(chan);
1640 l2cap_chan_lock(chan);
1641
1642 l2cap_chan_del(chan, err);
1643
1644 l2cap_chan_unlock(chan);
1645
1646 chan->ops->close(chan);
1647 l2cap_chan_put(chan);
1648 }
1649
1650 mutex_unlock(&conn->chan_lock);
1651
1652 hci_chan_del(conn->hchan);
1653
1654 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1655 cancel_delayed_work_sync(&conn->info_timer);
1656
1657 hcon->l2cap_data = NULL;
1658 conn->hchan = NULL;
1659 l2cap_conn_put(conn);
1660 }
1661
1662 static void disconn_work(struct work_struct *work)
1663 {
1664 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1665 disconn_work);
1666
1667 BT_DBG("conn %p", conn);
1668
1669 l2cap_conn_del(conn->hcon, conn->disconn_err);
1670 }
1671
1672 void l2cap_conn_shutdown(struct l2cap_conn *conn, int err)
1673 {
1674 struct hci_dev *hdev = conn->hcon->hdev;
1675
1676 BT_DBG("conn %p err %d", conn, err);
1677
1678 conn->disconn_err = err;
1679 queue_work(hdev->workqueue, &conn->disconn_work);
1680 }
1681
1682 static void l2cap_conn_free(struct kref *ref)
1683 {
1684 struct l2cap_conn *conn = container_of(ref, struct l2cap_conn, ref);
1685
1686 hci_conn_put(conn->hcon);
1687 kfree(conn);
1688 }
1689
1690 void l2cap_conn_get(struct l2cap_conn *conn)
1691 {
1692 kref_get(&conn->ref);
1693 }
1694 EXPORT_SYMBOL(l2cap_conn_get);
1695
1696 void l2cap_conn_put(struct l2cap_conn *conn)
1697 {
1698 kref_put(&conn->ref, l2cap_conn_free);
1699 }
1700 EXPORT_SYMBOL(l2cap_conn_put);
1701
1702 /* ---- Socket interface ---- */
1703
1704 /* Find socket with psm and source / destination bdaddr.
1705 * Returns closest match.
1706 */
1707 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1708 bdaddr_t *src,
1709 bdaddr_t *dst,
1710 u8 link_type)
1711 {
1712 struct l2cap_chan *c, *c1 = NULL;
1713
1714 read_lock(&chan_list_lock);
1715
1716 list_for_each_entry(c, &chan_list, global_l) {
1717 if (state && c->state != state)
1718 continue;
1719
1720 if (link_type == ACL_LINK && c->src_type != BDADDR_BREDR)
1721 continue;
1722
1723 if (link_type == LE_LINK && c->src_type == BDADDR_BREDR)
1724 continue;
1725
1726 if (c->psm == psm) {
1727 int src_match, dst_match;
1728 int src_any, dst_any;
1729
1730 /* Exact match. */
1731 src_match = !bacmp(&c->src, src);
1732 dst_match = !bacmp(&c->dst, dst);
1733 if (src_match && dst_match) {
1734 l2cap_chan_hold(c);
1735 read_unlock(&chan_list_lock);
1736 return c;
1737 }
1738
1739 /* Closest match */
1740 src_any = !bacmp(&c->src, BDADDR_ANY);
1741 dst_any = !bacmp(&c->dst, BDADDR_ANY);
1742 if ((src_match && dst_any) || (src_any && dst_match) ||
1743 (src_any && dst_any))
1744 c1 = c;
1745 }
1746 }
1747
1748 if (c1)
1749 l2cap_chan_hold(c1);
1750
1751 read_unlock(&chan_list_lock);
1752
1753 return c1;
1754 }
1755
1756 static void l2cap_monitor_timeout(struct work_struct *work)
1757 {
1758 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1759 monitor_timer.work);
1760
1761 BT_DBG("chan %p", chan);
1762
1763 l2cap_chan_lock(chan);
1764
1765 if (!chan->conn) {
1766 l2cap_chan_unlock(chan);
1767 l2cap_chan_put(chan);
1768 return;
1769 }
1770
1771 l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
1772
1773 l2cap_chan_unlock(chan);
1774 l2cap_chan_put(chan);
1775 }
1776
1777 static void l2cap_retrans_timeout(struct work_struct *work)
1778 {
1779 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1780 retrans_timer.work);
1781
1782 BT_DBG("chan %p", chan);
1783
1784 l2cap_chan_lock(chan);
1785
1786 if (!chan->conn) {
1787 l2cap_chan_unlock(chan);
1788 l2cap_chan_put(chan);
1789 return;
1790 }
1791
1792 l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
1793 l2cap_chan_unlock(chan);
1794 l2cap_chan_put(chan);
1795 }
1796
1797 static void l2cap_streaming_send(struct l2cap_chan *chan,
1798 struct sk_buff_head *skbs)
1799 {
1800 struct sk_buff *skb;
1801 struct l2cap_ctrl *control;
1802
1803 BT_DBG("chan %p, skbs %p", chan, skbs);
1804
1805 if (__chan_is_moving(chan))
1806 return;
1807
1808 skb_queue_splice_tail_init(skbs, &chan->tx_q);
1809
1810 while (!skb_queue_empty(&chan->tx_q)) {
1811
1812 skb = skb_dequeue(&chan->tx_q);
1813
1814 bt_cb(skb)->control.retries = 1;
1815 control = &bt_cb(skb)->control;
1816
1817 control->reqseq = 0;
1818 control->txseq = chan->next_tx_seq;
1819
1820 __pack_control(chan, control, skb);
1821
1822 if (chan->fcs == L2CAP_FCS_CRC16) {
1823 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1824 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1825 }
1826
1827 l2cap_do_send(chan, skb);
1828
1829 BT_DBG("Sent txseq %u", control->txseq);
1830
1831 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1832 chan->frames_sent++;
1833 }
1834 }
1835
1836 static int l2cap_ertm_send(struct l2cap_chan *chan)
1837 {
1838 struct sk_buff *skb, *tx_skb;
1839 struct l2cap_ctrl *control;
1840 int sent = 0;
1841
1842 BT_DBG("chan %p", chan);
1843
1844 if (chan->state != BT_CONNECTED)
1845 return -ENOTCONN;
1846
1847 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1848 return 0;
1849
1850 if (__chan_is_moving(chan))
1851 return 0;
1852
1853 while (chan->tx_send_head &&
1854 chan->unacked_frames < chan->remote_tx_win &&
1855 chan->tx_state == L2CAP_TX_STATE_XMIT) {
1856
1857 skb = chan->tx_send_head;
1858
1859 bt_cb(skb)->control.retries = 1;
1860 control = &bt_cb(skb)->control;
1861
1862 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1863 control->final = 1;
1864
1865 control->reqseq = chan->buffer_seq;
1866 chan->last_acked_seq = chan->buffer_seq;
1867 control->txseq = chan->next_tx_seq;
1868
1869 __pack_control(chan, control, skb);
1870
1871 if (chan->fcs == L2CAP_FCS_CRC16) {
1872 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1873 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1874 }
1875
1876 /* Clone after data has been modified. Data is assumed to be
1877 read-only (for locking purposes) on cloned sk_buffs.
1878 */
1879 tx_skb = skb_clone(skb, GFP_KERNEL);
1880
1881 if (!tx_skb)
1882 break;
1883
1884 __set_retrans_timer(chan);
1885
1886 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1887 chan->unacked_frames++;
1888 chan->frames_sent++;
1889 sent++;
1890
1891 if (skb_queue_is_last(&chan->tx_q, skb))
1892 chan->tx_send_head = NULL;
1893 else
1894 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1895
1896 l2cap_do_send(chan, tx_skb);
1897 BT_DBG("Sent txseq %u", control->txseq);
1898 }
1899
1900 BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
1901 chan->unacked_frames, skb_queue_len(&chan->tx_q));
1902
1903 return sent;
1904 }
1905
1906 static void l2cap_ertm_resend(struct l2cap_chan *chan)
1907 {
1908 struct l2cap_ctrl control;
1909 struct sk_buff *skb;
1910 struct sk_buff *tx_skb;
1911 u16 seq;
1912
1913 BT_DBG("chan %p", chan);
1914
1915 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1916 return;
1917
1918 if (__chan_is_moving(chan))
1919 return;
1920
1921 while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
1922 seq = l2cap_seq_list_pop(&chan->retrans_list);
1923
1924 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
1925 if (!skb) {
1926 BT_DBG("Error: Can't retransmit seq %d, frame missing",
1927 seq);
1928 continue;
1929 }
1930
1931 bt_cb(skb)->control.retries++;
1932 control = bt_cb(skb)->control;
1933
1934 if (chan->max_tx != 0 &&
1935 bt_cb(skb)->control.retries > chan->max_tx) {
1936 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
1937 l2cap_send_disconn_req(chan, ECONNRESET);
1938 l2cap_seq_list_clear(&chan->retrans_list);
1939 break;
1940 }
1941
1942 control.reqseq = chan->buffer_seq;
1943 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1944 control.final = 1;
1945 else
1946 control.final = 0;
1947
1948 if (skb_cloned(skb)) {
1949 /* Cloned sk_buffs are read-only, so we need a
1950 * writeable copy
1951 */
1952 tx_skb = skb_copy(skb, GFP_KERNEL);
1953 } else {
1954 tx_skb = skb_clone(skb, GFP_KERNEL);
1955 }
1956
1957 if (!tx_skb) {
1958 l2cap_seq_list_clear(&chan->retrans_list);
1959 break;
1960 }
1961
1962 /* Update skb contents */
1963 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1964 put_unaligned_le32(__pack_extended_control(&control),
1965 tx_skb->data + L2CAP_HDR_SIZE);
1966 } else {
1967 put_unaligned_le16(__pack_enhanced_control(&control),
1968 tx_skb->data + L2CAP_HDR_SIZE);
1969 }
1970
1971 if (chan->fcs == L2CAP_FCS_CRC16) {
1972 u16 fcs = crc16(0, (u8 *) tx_skb->data, tx_skb->len);
1973 put_unaligned_le16(fcs, skb_put(tx_skb,
1974 L2CAP_FCS_SIZE));
1975 }
1976
1977 l2cap_do_send(chan, tx_skb);
1978
1979 BT_DBG("Resent txseq %d", control.txseq);
1980
1981 chan->last_acked_seq = chan->buffer_seq;
1982 }
1983 }
1984
1985 static void l2cap_retransmit(struct l2cap_chan *chan,
1986 struct l2cap_ctrl *control)
1987 {
1988 BT_DBG("chan %p, control %p", chan, control);
1989
1990 l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
1991 l2cap_ertm_resend(chan);
1992 }
1993
1994 static void l2cap_retransmit_all(struct l2cap_chan *chan,
1995 struct l2cap_ctrl *control)
1996 {
1997 struct sk_buff *skb;
1998
1999 BT_DBG("chan %p, control %p", chan, control);
2000
2001 if (control->poll)
2002 set_bit(CONN_SEND_FBIT, &chan->conn_state);
2003
2004 l2cap_seq_list_clear(&chan->retrans_list);
2005
2006 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2007 return;
2008
2009 if (chan->unacked_frames) {
2010 skb_queue_walk(&chan->tx_q, skb) {
2011 if (bt_cb(skb)->control.txseq == control->reqseq ||
2012 skb == chan->tx_send_head)
2013 break;
2014 }
2015
2016 skb_queue_walk_from(&chan->tx_q, skb) {
2017 if (skb == chan->tx_send_head)
2018 break;
2019
2020 l2cap_seq_list_append(&chan->retrans_list,
2021 bt_cb(skb)->control.txseq);
2022 }
2023
2024 l2cap_ertm_resend(chan);
2025 }
2026 }
2027
2028 static void l2cap_send_ack(struct l2cap_chan *chan)
2029 {
2030 struct l2cap_ctrl control;
2031 u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2032 chan->last_acked_seq);
2033 int threshold;
2034
2035 BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2036 chan, chan->last_acked_seq, chan->buffer_seq);
2037
2038 memset(&control, 0, sizeof(control));
2039 control.sframe = 1;
2040
2041 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
2042 chan->rx_state == L2CAP_RX_STATE_RECV) {
2043 __clear_ack_timer(chan);
2044 control.super = L2CAP_SUPER_RNR;
2045 control.reqseq = chan->buffer_seq;
2046 l2cap_send_sframe(chan, &control);
2047 } else {
2048 if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
2049 l2cap_ertm_send(chan);
2050 /* If any i-frames were sent, they included an ack */
2051 if (chan->buffer_seq == chan->last_acked_seq)
2052 frames_to_ack = 0;
2053 }
2054
2055 /* Ack now if the window is 3/4ths full.
2056 * Calculate without mul or div
2057 */
2058 threshold = chan->ack_win;
2059 threshold += threshold << 1;
2060 threshold >>= 2;
2061
2062 BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
2063 threshold);
2064
2065 if (frames_to_ack >= threshold) {
2066 __clear_ack_timer(chan);
2067 control.super = L2CAP_SUPER_RR;
2068 control.reqseq = chan->buffer_seq;
2069 l2cap_send_sframe(chan, &control);
2070 frames_to_ack = 0;
2071 }
2072
2073 if (frames_to_ack)
2074 __set_ack_timer(chan);
2075 }
2076 }
2077
2078 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
2079 struct msghdr *msg, int len,
2080 int count, struct sk_buff *skb)
2081 {
2082 struct l2cap_conn *conn = chan->conn;
2083 struct sk_buff **frag;
2084 int sent = 0;
2085
2086 if (chan->ops->memcpy_fromiovec(chan, skb_put(skb, count),
2087 msg->msg_iov, count))
2088 return -EFAULT;
2089
2090 sent += count;
2091 len -= count;
2092
2093 /* Continuation fragments (no L2CAP header) */
2094 frag = &skb_shinfo(skb)->frag_list;
2095 while (len) {
2096 struct sk_buff *tmp;
2097
2098 count = min_t(unsigned int, conn->mtu, len);
2099
2100 tmp = chan->ops->alloc_skb(chan, 0, count,
2101 msg->msg_flags & MSG_DONTWAIT);
2102 if (IS_ERR(tmp))
2103 return PTR_ERR(tmp);
2104
2105 *frag = tmp;
2106
2107 if (chan->ops->memcpy_fromiovec(chan, skb_put(*frag, count),
2108 msg->msg_iov, count))
2109 return -EFAULT;
2110
2111 sent += count;
2112 len -= count;
2113
2114 skb->len += (*frag)->len;
2115 skb->data_len += (*frag)->len;
2116
2117 frag = &(*frag)->next;
2118 }
2119
2120 return sent;
2121 }
2122
2123 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
2124 struct msghdr *msg, size_t len)
2125 {
2126 struct l2cap_conn *conn = chan->conn;
2127 struct sk_buff *skb;
2128 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
2129 struct l2cap_hdr *lh;
2130
2131 BT_DBG("chan %p psm 0x%2.2x len %zu", chan,
2132 __le16_to_cpu(chan->psm), len);
2133
2134 count = min_t(unsigned int, (conn->mtu - hlen), len);
2135
2136 skb = chan->ops->alloc_skb(chan, hlen, count,
2137 msg->msg_flags & MSG_DONTWAIT);
2138 if (IS_ERR(skb))
2139 return skb;
2140
2141 /* Create L2CAP header */
2142 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2143 lh->cid = cpu_to_le16(chan->dcid);
2144 lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
2145 put_unaligned(chan->psm, (__le16 *) skb_put(skb, L2CAP_PSMLEN_SIZE));
2146
2147 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2148 if (unlikely(err < 0)) {
2149 kfree_skb(skb);
2150 return ERR_PTR(err);
2151 }
2152 return skb;
2153 }
2154
2155 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2156 struct msghdr *msg, size_t len)
2157 {
2158 struct l2cap_conn *conn = chan->conn;
2159 struct sk_buff *skb;
2160 int err, count;
2161 struct l2cap_hdr *lh;
2162
2163 BT_DBG("chan %p len %zu", chan, len);
2164
2165 count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2166
2167 skb = chan->ops->alloc_skb(chan, L2CAP_HDR_SIZE, count,
2168 msg->msg_flags & MSG_DONTWAIT);
2169 if (IS_ERR(skb))
2170 return skb;
2171
2172 /* Create L2CAP header */
2173 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2174 lh->cid = cpu_to_le16(chan->dcid);
2175 lh->len = cpu_to_le16(len);
2176
2177 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2178 if (unlikely(err < 0)) {
2179 kfree_skb(skb);
2180 return ERR_PTR(err);
2181 }
2182 return skb;
2183 }
2184
2185 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2186 struct msghdr *msg, size_t len,
2187 u16 sdulen)
2188 {
2189 struct l2cap_conn *conn = chan->conn;
2190 struct sk_buff *skb;
2191 int err, count, hlen;
2192 struct l2cap_hdr *lh;
2193
2194 BT_DBG("chan %p len %zu", chan, len);
2195
2196 if (!conn)
2197 return ERR_PTR(-ENOTCONN);
2198
2199 hlen = __ertm_hdr_size(chan);
2200
2201 if (sdulen)
2202 hlen += L2CAP_SDULEN_SIZE;
2203
2204 if (chan->fcs == L2CAP_FCS_CRC16)
2205 hlen += L2CAP_FCS_SIZE;
2206
2207 count = min_t(unsigned int, (conn->mtu - hlen), len);
2208
2209 skb = chan->ops->alloc_skb(chan, hlen, count,
2210 msg->msg_flags & MSG_DONTWAIT);
2211 if (IS_ERR(skb))
2212 return skb;
2213
2214 /* Create L2CAP header */
2215 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2216 lh->cid = cpu_to_le16(chan->dcid);
2217 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2218
2219 /* Control header is populated later */
2220 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2221 put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2222 else
2223 put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2224
2225 if (sdulen)
2226 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2227
2228 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2229 if (unlikely(err < 0)) {
2230 kfree_skb(skb);
2231 return ERR_PTR(err);
2232 }
2233
2234 bt_cb(skb)->control.fcs = chan->fcs;
2235 bt_cb(skb)->control.retries = 0;
2236 return skb;
2237 }
2238
2239 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2240 struct sk_buff_head *seg_queue,
2241 struct msghdr *msg, size_t len)
2242 {
2243 struct sk_buff *skb;
2244 u16 sdu_len;
2245 size_t pdu_len;
2246 u8 sar;
2247
2248 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2249
2250 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2251 * so fragmented skbs are not used. The HCI layer's handling
2252 * of fragmented skbs is not compatible with ERTM's queueing.
2253 */
2254
2255 /* PDU size is derived from the HCI MTU */
2256 pdu_len = chan->conn->mtu;
2257
2258 /* Constrain PDU size for BR/EDR connections */
2259 if (!chan->hs_hcon)
2260 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2261
2262 /* Adjust for largest possible L2CAP overhead. */
2263 if (chan->fcs)
2264 pdu_len -= L2CAP_FCS_SIZE;
2265
2266 pdu_len -= __ertm_hdr_size(chan);
2267
2268 /* Remote device may have requested smaller PDUs */
2269 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2270
2271 if (len <= pdu_len) {
2272 sar = L2CAP_SAR_UNSEGMENTED;
2273 sdu_len = 0;
2274 pdu_len = len;
2275 } else {
2276 sar = L2CAP_SAR_START;
2277 sdu_len = len;
2278 pdu_len -= L2CAP_SDULEN_SIZE;
2279 }
2280
2281 while (len > 0) {
2282 skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2283
2284 if (IS_ERR(skb)) {
2285 __skb_queue_purge(seg_queue);
2286 return PTR_ERR(skb);
2287 }
2288
2289 bt_cb(skb)->control.sar = sar;
2290 __skb_queue_tail(seg_queue, skb);
2291
2292 len -= pdu_len;
2293 if (sdu_len) {
2294 sdu_len = 0;
2295 pdu_len += L2CAP_SDULEN_SIZE;
2296 }
2297
2298 if (len <= pdu_len) {
2299 sar = L2CAP_SAR_END;
2300 pdu_len = len;
2301 } else {
2302 sar = L2CAP_SAR_CONTINUE;
2303 }
2304 }
2305
2306 return 0;
2307 }
2308
2309 static struct sk_buff *l2cap_create_le_flowctl_pdu(struct l2cap_chan *chan,
2310 struct msghdr *msg,
2311 size_t len, u16 sdulen)
2312 {
2313 struct l2cap_conn *conn = chan->conn;
2314 struct sk_buff *skb;
2315 int err, count, hlen;
2316 struct l2cap_hdr *lh;
2317
2318 BT_DBG("chan %p len %zu", chan, len);
2319
2320 if (!conn)
2321 return ERR_PTR(-ENOTCONN);
2322
2323 hlen = L2CAP_HDR_SIZE;
2324
2325 if (sdulen)
2326 hlen += L2CAP_SDULEN_SIZE;
2327
2328 count = min_t(unsigned int, (conn->mtu - hlen), len);
2329
2330 skb = chan->ops->alloc_skb(chan, hlen, count,
2331 msg->msg_flags & MSG_DONTWAIT);
2332 if (IS_ERR(skb))
2333 return skb;
2334
2335 /* Create L2CAP header */
2336 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2337 lh->cid = cpu_to_le16(chan->dcid);
2338 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2339
2340 if (sdulen)
2341 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2342
2343 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2344 if (unlikely(err < 0)) {
2345 kfree_skb(skb);
2346 return ERR_PTR(err);
2347 }
2348
2349 return skb;
2350 }
2351
2352 static int l2cap_segment_le_sdu(struct l2cap_chan *chan,
2353 struct sk_buff_head *seg_queue,
2354 struct msghdr *msg, size_t len)
2355 {
2356 struct sk_buff *skb;
2357 size_t pdu_len;
2358 u16 sdu_len;
2359
2360 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2361
2362 pdu_len = chan->conn->mtu - L2CAP_HDR_SIZE;
2363
2364 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2365
2366 sdu_len = len;
2367 pdu_len -= L2CAP_SDULEN_SIZE;
2368
2369 while (len > 0) {
2370 if (len <= pdu_len)
2371 pdu_len = len;
2372
2373 skb = l2cap_create_le_flowctl_pdu(chan, msg, pdu_len, sdu_len);
2374 if (IS_ERR(skb)) {
2375 __skb_queue_purge(seg_queue);
2376 return PTR_ERR(skb);
2377 }
2378
2379 __skb_queue_tail(seg_queue, skb);
2380
2381 len -= pdu_len;
2382
2383 if (sdu_len) {
2384 sdu_len = 0;
2385 pdu_len += L2CAP_SDULEN_SIZE;
2386 }
2387 }
2388
2389 return 0;
2390 }
2391
2392 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
2393 {
2394 struct sk_buff *skb;
2395 int err;
2396 struct sk_buff_head seg_queue;
2397
2398 if (!chan->conn)
2399 return -ENOTCONN;
2400
2401 /* Connectionless channel */
2402 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2403 skb = l2cap_create_connless_pdu(chan, msg, len);
2404 if (IS_ERR(skb))
2405 return PTR_ERR(skb);
2406
2407 /* Channel lock is released before requesting new skb and then
2408 * reacquired thus we need to recheck channel state.
2409 */
2410 if (chan->state != BT_CONNECTED) {
2411 kfree_skb(skb);
2412 return -ENOTCONN;
2413 }
2414
2415 l2cap_do_send(chan, skb);
2416 return len;
2417 }
2418
2419 switch (chan->mode) {
2420 case L2CAP_MODE_LE_FLOWCTL:
2421 /* Check outgoing MTU */
2422 if (len > chan->omtu)
2423 return -EMSGSIZE;
2424
2425 if (!chan->tx_credits)
2426 return -EAGAIN;
2427
2428 __skb_queue_head_init(&seg_queue);
2429
2430 err = l2cap_segment_le_sdu(chan, &seg_queue, msg, len);
2431
2432 if (chan->state != BT_CONNECTED) {
2433 __skb_queue_purge(&seg_queue);
2434 err = -ENOTCONN;
2435 }
2436
2437 if (err)
2438 return err;
2439
2440 skb_queue_splice_tail_init(&seg_queue, &chan->tx_q);
2441
2442 while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
2443 l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
2444 chan->tx_credits--;
2445 }
2446
2447 if (!chan->tx_credits)
2448 chan->ops->suspend(chan);
2449
2450 err = len;
2451
2452 break;
2453
2454 case L2CAP_MODE_BASIC:
2455 /* Check outgoing MTU */
2456 if (len > chan->omtu)
2457 return -EMSGSIZE;
2458
2459 /* Create a basic PDU */
2460 skb = l2cap_create_basic_pdu(chan, msg, len);
2461 if (IS_ERR(skb))
2462 return PTR_ERR(skb);
2463
2464 /* Channel lock is released before requesting new skb and then
2465 * reacquired thus we need to recheck channel state.
2466 */
2467 if (chan->state != BT_CONNECTED) {
2468 kfree_skb(skb);
2469 return -ENOTCONN;
2470 }
2471
2472 l2cap_do_send(chan, skb);
2473 err = len;
2474 break;
2475
2476 case L2CAP_MODE_ERTM:
2477 case L2CAP_MODE_STREAMING:
2478 /* Check outgoing MTU */
2479 if (len > chan->omtu) {
2480 err = -EMSGSIZE;
2481 break;
2482 }
2483
2484 __skb_queue_head_init(&seg_queue);
2485
2486 /* Do segmentation before calling in to the state machine,
2487 * since it's possible to block while waiting for memory
2488 * allocation.
2489 */
2490 err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2491
2492 /* The channel could have been closed while segmenting,
2493 * check that it is still connected.
2494 */
2495 if (chan->state != BT_CONNECTED) {
2496 __skb_queue_purge(&seg_queue);
2497 err = -ENOTCONN;
2498 }
2499
2500 if (err)
2501 break;
2502
2503 if (chan->mode == L2CAP_MODE_ERTM)
2504 l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2505 else
2506 l2cap_streaming_send(chan, &seg_queue);
2507
2508 err = len;
2509
2510 /* If the skbs were not queued for sending, they'll still be in
2511 * seg_queue and need to be purged.
2512 */
2513 __skb_queue_purge(&seg_queue);
2514 break;
2515
2516 default:
2517 BT_DBG("bad state %1.1x", chan->mode);
2518 err = -EBADFD;
2519 }
2520
2521 return err;
2522 }
2523 EXPORT_SYMBOL_GPL(l2cap_chan_send);
2524
2525 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2526 {
2527 struct l2cap_ctrl control;
2528 u16 seq;
2529
2530 BT_DBG("chan %p, txseq %u", chan, txseq);
2531
2532 memset(&control, 0, sizeof(control));
2533 control.sframe = 1;
2534 control.super = L2CAP_SUPER_SREJ;
2535
2536 for (seq = chan->expected_tx_seq; seq != txseq;
2537 seq = __next_seq(chan, seq)) {
2538 if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2539 control.reqseq = seq;
2540 l2cap_send_sframe(chan, &control);
2541 l2cap_seq_list_append(&chan->srej_list, seq);
2542 }
2543 }
2544
2545 chan->expected_tx_seq = __next_seq(chan, txseq);
2546 }
2547
2548 static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2549 {
2550 struct l2cap_ctrl control;
2551
2552 BT_DBG("chan %p", chan);
2553
2554 if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2555 return;
2556
2557 memset(&control, 0, sizeof(control));
2558 control.sframe = 1;
2559 control.super = L2CAP_SUPER_SREJ;
2560 control.reqseq = chan->srej_list.tail;
2561 l2cap_send_sframe(chan, &control);
2562 }
2563
2564 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2565 {
2566 struct l2cap_ctrl control;
2567 u16 initial_head;
2568 u16 seq;
2569
2570 BT_DBG("chan %p, txseq %u", chan, txseq);
2571
2572 memset(&control, 0, sizeof(control));
2573 control.sframe = 1;
2574 control.super = L2CAP_SUPER_SREJ;
2575
2576 /* Capture initial list head to allow only one pass through the list. */
2577 initial_head = chan->srej_list.head;
2578
2579 do {
2580 seq = l2cap_seq_list_pop(&chan->srej_list);
2581 if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2582 break;
2583
2584 control.reqseq = seq;
2585 l2cap_send_sframe(chan, &control);
2586 l2cap_seq_list_append(&chan->srej_list, seq);
2587 } while (chan->srej_list.head != initial_head);
2588 }
2589
2590 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2591 {
2592 struct sk_buff *acked_skb;
2593 u16 ackseq;
2594
2595 BT_DBG("chan %p, reqseq %u", chan, reqseq);
2596
2597 if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2598 return;
2599
2600 BT_DBG("expected_ack_seq %u, unacked_frames %u",
2601 chan->expected_ack_seq, chan->unacked_frames);
2602
2603 for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2604 ackseq = __next_seq(chan, ackseq)) {
2605
2606 acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2607 if (acked_skb) {
2608 skb_unlink(acked_skb, &chan->tx_q);
2609 kfree_skb(acked_skb);
2610 chan->unacked_frames--;
2611 }
2612 }
2613
2614 chan->expected_ack_seq = reqseq;
2615
2616 if (chan->unacked_frames == 0)
2617 __clear_retrans_timer(chan);
2618
2619 BT_DBG("unacked_frames %u", chan->unacked_frames);
2620 }
2621
2622 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2623 {
2624 BT_DBG("chan %p", chan);
2625
2626 chan->expected_tx_seq = chan->buffer_seq;
2627 l2cap_seq_list_clear(&chan->srej_list);
2628 skb_queue_purge(&chan->srej_q);
2629 chan->rx_state = L2CAP_RX_STATE_RECV;
2630 }
2631
2632 static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2633 struct l2cap_ctrl *control,
2634 struct sk_buff_head *skbs, u8 event)
2635 {
2636 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2637 event);
2638
2639 switch (event) {
2640 case L2CAP_EV_DATA_REQUEST:
2641 if (chan->tx_send_head == NULL)
2642 chan->tx_send_head = skb_peek(skbs);
2643
2644 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2645 l2cap_ertm_send(chan);
2646 break;
2647 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2648 BT_DBG("Enter LOCAL_BUSY");
2649 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2650
2651 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2652 /* The SREJ_SENT state must be aborted if we are to
2653 * enter the LOCAL_BUSY state.
2654 */
2655 l2cap_abort_rx_srej_sent(chan);
2656 }
2657
2658 l2cap_send_ack(chan);
2659
2660 break;
2661 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2662 BT_DBG("Exit LOCAL_BUSY");
2663 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2664
2665 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2666 struct l2cap_ctrl local_control;
2667
2668 memset(&local_control, 0, sizeof(local_control));
2669 local_control.sframe = 1;
2670 local_control.super = L2CAP_SUPER_RR;
2671 local_control.poll = 1;
2672 local_control.reqseq = chan->buffer_seq;
2673 l2cap_send_sframe(chan, &local_control);
2674
2675 chan->retry_count = 1;
2676 __set_monitor_timer(chan);
2677 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2678 }
2679 break;
2680 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2681 l2cap_process_reqseq(chan, control->reqseq);
2682 break;
2683 case L2CAP_EV_EXPLICIT_POLL:
2684 l2cap_send_rr_or_rnr(chan, 1);
2685 chan->retry_count = 1;
2686 __set_monitor_timer(chan);
2687 __clear_ack_timer(chan);
2688 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2689 break;
2690 case L2CAP_EV_RETRANS_TO:
2691 l2cap_send_rr_or_rnr(chan, 1);
2692 chan->retry_count = 1;
2693 __set_monitor_timer(chan);
2694 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2695 break;
2696 case L2CAP_EV_RECV_FBIT:
2697 /* Nothing to process */
2698 break;
2699 default:
2700 break;
2701 }
2702 }
2703
2704 static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2705 struct l2cap_ctrl *control,
2706 struct sk_buff_head *skbs, u8 event)
2707 {
2708 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2709 event);
2710
2711 switch (event) {
2712 case L2CAP_EV_DATA_REQUEST:
2713 if (chan->tx_send_head == NULL)
2714 chan->tx_send_head = skb_peek(skbs);
2715 /* Queue data, but don't send. */
2716 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2717 break;
2718 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2719 BT_DBG("Enter LOCAL_BUSY");
2720 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2721
2722 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2723 /* The SREJ_SENT state must be aborted if we are to
2724 * enter the LOCAL_BUSY state.
2725 */
2726 l2cap_abort_rx_srej_sent(chan);
2727 }
2728
2729 l2cap_send_ack(chan);
2730
2731 break;
2732 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2733 BT_DBG("Exit LOCAL_BUSY");
2734 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2735
2736 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2737 struct l2cap_ctrl local_control;
2738 memset(&local_control, 0, sizeof(local_control));
2739 local_control.sframe = 1;
2740 local_control.super = L2CAP_SUPER_RR;
2741 local_control.poll = 1;
2742 local_control.reqseq = chan->buffer_seq;
2743 l2cap_send_sframe(chan, &local_control);
2744
2745 chan->retry_count = 1;
2746 __set_monitor_timer(chan);
2747 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2748 }
2749 break;
2750 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2751 l2cap_process_reqseq(chan, control->reqseq);
2752
2753 /* Fall through */
2754
2755 case L2CAP_EV_RECV_FBIT:
2756 if (control && control->final) {
2757 __clear_monitor_timer(chan);
2758 if (chan->unacked_frames > 0)
2759 __set_retrans_timer(chan);
2760 chan->retry_count = 0;
2761 chan->tx_state = L2CAP_TX_STATE_XMIT;
2762 BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
2763 }
2764 break;
2765 case L2CAP_EV_EXPLICIT_POLL:
2766 /* Ignore */
2767 break;
2768 case L2CAP_EV_MONITOR_TO:
2769 if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
2770 l2cap_send_rr_or_rnr(chan, 1);
2771 __set_monitor_timer(chan);
2772 chan->retry_count++;
2773 } else {
2774 l2cap_send_disconn_req(chan, ECONNABORTED);
2775 }
2776 break;
2777 default:
2778 break;
2779 }
2780 }
2781
2782 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
2783 struct sk_buff_head *skbs, u8 event)
2784 {
2785 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2786 chan, control, skbs, event, chan->tx_state);
2787
2788 switch (chan->tx_state) {
2789 case L2CAP_TX_STATE_XMIT:
2790 l2cap_tx_state_xmit(chan, control, skbs, event);
2791 break;
2792 case L2CAP_TX_STATE_WAIT_F:
2793 l2cap_tx_state_wait_f(chan, control, skbs, event);
2794 break;
2795 default:
2796 /* Ignore event */
2797 break;
2798 }
2799 }
2800
2801 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
2802 struct l2cap_ctrl *control)
2803 {
2804 BT_DBG("chan %p, control %p", chan, control);
2805 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
2806 }
2807
2808 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
2809 struct l2cap_ctrl *control)
2810 {
2811 BT_DBG("chan %p, control %p", chan, control);
2812 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
2813 }
2814
2815 /* Copy frame to all raw sockets on that connection */
2816 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2817 {
2818 struct sk_buff *nskb;
2819 struct l2cap_chan *chan;
2820
2821 BT_DBG("conn %p", conn);
2822
2823 mutex_lock(&conn->chan_lock);
2824
2825 list_for_each_entry(chan, &conn->chan_l, list) {
2826 if (chan->chan_type != L2CAP_CHAN_RAW)
2827 continue;
2828
2829 /* Don't send frame to the channel it came from */
2830 if (bt_cb(skb)->chan == chan)
2831 continue;
2832
2833 nskb = skb_clone(skb, GFP_KERNEL);
2834 if (!nskb)
2835 continue;
2836 if (chan->ops->recv(chan, nskb))
2837 kfree_skb(nskb);
2838 }
2839
2840 mutex_unlock(&conn->chan_lock);
2841 }
2842
2843 /* ---- L2CAP signalling commands ---- */
2844 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
2845 u8 ident, u16 dlen, void *data)
2846 {
2847 struct sk_buff *skb, **frag;
2848 struct l2cap_cmd_hdr *cmd;
2849 struct l2cap_hdr *lh;
2850 int len, count;
2851
2852 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
2853 conn, code, ident, dlen);
2854
2855 if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE)
2856 return NULL;
2857
2858 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2859 count = min_t(unsigned int, conn->mtu, len);
2860
2861 skb = bt_skb_alloc(count, GFP_KERNEL);
2862 if (!skb)
2863 return NULL;
2864
2865 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2866 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2867
2868 if (conn->hcon->type == LE_LINK)
2869 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2870 else
2871 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
2872
2873 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2874 cmd->code = code;
2875 cmd->ident = ident;
2876 cmd->len = cpu_to_le16(dlen);
2877
2878 if (dlen) {
2879 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2880 memcpy(skb_put(skb, count), data, count);
2881 data += count;
2882 }
2883
2884 len -= skb->len;
2885
2886 /* Continuation fragments (no L2CAP header) */
2887 frag = &skb_shinfo(skb)->frag_list;
2888 while (len) {
2889 count = min_t(unsigned int, conn->mtu, len);
2890
2891 *frag = bt_skb_alloc(count, GFP_KERNEL);
2892 if (!*frag)
2893 goto fail;
2894
2895 memcpy(skb_put(*frag, count), data, count);
2896
2897 len -= count;
2898 data += count;
2899
2900 frag = &(*frag)->next;
2901 }
2902
2903 return skb;
2904
2905 fail:
2906 kfree_skb(skb);
2907 return NULL;
2908 }
2909
2910 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
2911 unsigned long *val)
2912 {
2913 struct l2cap_conf_opt *opt = *ptr;
2914 int len;
2915
2916 len = L2CAP_CONF_OPT_SIZE + opt->len;
2917 *ptr += len;
2918
2919 *type = opt->type;
2920 *olen = opt->len;
2921
2922 switch (opt->len) {
2923 case 1:
2924 *val = *((u8 *) opt->val);
2925 break;
2926
2927 case 2:
2928 *val = get_unaligned_le16(opt->val);
2929 break;
2930
2931 case 4:
2932 *val = get_unaligned_le32(opt->val);
2933 break;
2934
2935 default:
2936 *val = (unsigned long) opt->val;
2937 break;
2938 }
2939
2940 BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
2941 return len;
2942 }
2943
2944 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2945 {
2946 struct l2cap_conf_opt *opt = *ptr;
2947
2948 BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
2949
2950 opt->type = type;
2951 opt->len = len;
2952
2953 switch (len) {
2954 case 1:
2955 *((u8 *) opt->val) = val;
2956 break;
2957
2958 case 2:
2959 put_unaligned_le16(val, opt->val);
2960 break;
2961
2962 case 4:
2963 put_unaligned_le32(val, opt->val);
2964 break;
2965
2966 default:
2967 memcpy(opt->val, (void *) val, len);
2968 break;
2969 }
2970
2971 *ptr += L2CAP_CONF_OPT_SIZE + len;
2972 }
2973
2974 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
2975 {
2976 struct l2cap_conf_efs efs;
2977
2978 switch (chan->mode) {
2979 case L2CAP_MODE_ERTM:
2980 efs.id = chan->local_id;
2981 efs.stype = chan->local_stype;
2982 efs.msdu = cpu_to_le16(chan->local_msdu);
2983 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2984 efs.acc_lat = cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
2985 efs.flush_to = cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO);
2986 break;
2987
2988 case L2CAP_MODE_STREAMING:
2989 efs.id = 1;
2990 efs.stype = L2CAP_SERV_BESTEFFORT;
2991 efs.msdu = cpu_to_le16(chan->local_msdu);
2992 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2993 efs.acc_lat = 0;
2994 efs.flush_to = 0;
2995 break;
2996
2997 default:
2998 return;
2999 }
3000
3001 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
3002 (unsigned long) &efs);
3003 }
3004
3005 static void l2cap_ack_timeout(struct work_struct *work)
3006 {
3007 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
3008 ack_timer.work);
3009 u16 frames_to_ack;
3010
3011 BT_DBG("chan %p", chan);
3012
3013 l2cap_chan_lock(chan);
3014
3015 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
3016 chan->last_acked_seq);
3017
3018 if (frames_to_ack)
3019 l2cap_send_rr_or_rnr(chan, 0);
3020
3021 l2cap_chan_unlock(chan);
3022 l2cap_chan_put(chan);
3023 }
3024
3025 int l2cap_ertm_init(struct l2cap_chan *chan)
3026 {
3027 int err;
3028
3029 chan->next_tx_seq = 0;
3030 chan->expected_tx_seq = 0;
3031 chan->expected_ack_seq = 0;
3032 chan->unacked_frames = 0;
3033 chan->buffer_seq = 0;
3034 chan->frames_sent = 0;
3035 chan->last_acked_seq = 0;
3036 chan->sdu = NULL;
3037 chan->sdu_last_frag = NULL;
3038 chan->sdu_len = 0;
3039
3040 skb_queue_head_init(&chan->tx_q);
3041
3042 chan->local_amp_id = AMP_ID_BREDR;
3043 chan->move_id = AMP_ID_BREDR;
3044 chan->move_state = L2CAP_MOVE_STABLE;
3045 chan->move_role = L2CAP_MOVE_ROLE_NONE;
3046
3047 if (chan->mode != L2CAP_MODE_ERTM)
3048 return 0;
3049
3050 chan->rx_state = L2CAP_RX_STATE_RECV;
3051 chan->tx_state = L2CAP_TX_STATE_XMIT;
3052
3053 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
3054 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
3055 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
3056
3057 skb_queue_head_init(&chan->srej_q);
3058
3059 err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
3060 if (err < 0)
3061 return err;
3062
3063 err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
3064 if (err < 0)
3065 l2cap_seq_list_free(&chan->srej_list);
3066
3067 return err;
3068 }
3069
3070 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
3071 {
3072 switch (mode) {
3073 case L2CAP_MODE_STREAMING:
3074 case L2CAP_MODE_ERTM:
3075 if (l2cap_mode_supported(mode, remote_feat_mask))
3076 return mode;
3077 /* fall through */
3078 default:
3079 return L2CAP_MODE_BASIC;
3080 }
3081 }
3082
3083 static inline bool __l2cap_ews_supported(struct l2cap_conn *conn)
3084 {
3085 return conn->hs_enabled && conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
3086 }
3087
3088 static inline bool __l2cap_efs_supported(struct l2cap_conn *conn)
3089 {
3090 return conn->hs_enabled && conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
3091 }
3092
3093 static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan,
3094 struct l2cap_conf_rfc *rfc)
3095 {
3096 if (chan->local_amp_id != AMP_ID_BREDR && chan->hs_hcon) {
3097 u64 ertm_to = chan->hs_hcon->hdev->amp_be_flush_to;
3098
3099 /* Class 1 devices have must have ERTM timeouts
3100 * exceeding the Link Supervision Timeout. The
3101 * default Link Supervision Timeout for AMP
3102 * controllers is 10 seconds.
3103 *
3104 * Class 1 devices use 0xffffffff for their
3105 * best-effort flush timeout, so the clamping logic
3106 * will result in a timeout that meets the above
3107 * requirement. ERTM timeouts are 16-bit values, so
3108 * the maximum timeout is 65.535 seconds.
3109 */
3110
3111 /* Convert timeout to milliseconds and round */
3112 ertm_to = DIV_ROUND_UP_ULL(ertm_to, 1000);
3113
3114 /* This is the recommended formula for class 2 devices
3115 * that start ERTM timers when packets are sent to the
3116 * controller.
3117 */
3118 ertm_to = 3 * ertm_to + 500;
3119
3120 if (ertm_to > 0xffff)
3121 ertm_to = 0xffff;
3122
3123 rfc->retrans_timeout = cpu_to_le16((u16) ertm_to);
3124 rfc->monitor_timeout = rfc->retrans_timeout;
3125 } else {
3126 rfc->retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3127 rfc->monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3128 }
3129 }
3130
3131 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
3132 {
3133 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
3134 __l2cap_ews_supported(chan->conn)) {
3135 /* use extended control field */
3136 set_bit(FLAG_EXT_CTRL, &chan->flags);
3137 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3138 } else {
3139 chan->tx_win = min_t(u16, chan->tx_win,
3140 L2CAP_DEFAULT_TX_WINDOW);
3141 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
3142 }
3143 chan->ack_win = chan->tx_win;
3144 }
3145
3146 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
3147 {
3148 struct l2cap_conf_req *req = data;
3149 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
3150 void *ptr = req->data;
3151 u16 size;
3152
3153 BT_DBG("chan %p", chan);
3154
3155 if (chan->num_conf_req || chan->num_conf_rsp)
3156 goto done;
3157
3158 switch (chan->mode) {
3159 case L2CAP_MODE_STREAMING:
3160 case L2CAP_MODE_ERTM:
3161 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
3162 break;
3163
3164 if (__l2cap_efs_supported(chan->conn))
3165 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3166
3167 /* fall through */
3168 default:
3169 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
3170 break;
3171 }
3172
3173 done:
3174 if (chan->imtu != L2CAP_DEFAULT_MTU)
3175 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3176
3177 switch (chan->mode) {
3178 case L2CAP_MODE_BASIC:
3179 if (disable_ertm)
3180 break;
3181
3182 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
3183 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
3184 break;
3185
3186 rfc.mode = L2CAP_MODE_BASIC;
3187 rfc.txwin_size = 0;
3188 rfc.max_transmit = 0;
3189 rfc.retrans_timeout = 0;
3190 rfc.monitor_timeout = 0;
3191 rfc.max_pdu_size = 0;
3192
3193 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3194 (unsigned long) &rfc);
3195 break;
3196
3197 case L2CAP_MODE_ERTM:
3198 rfc.mode = L2CAP_MODE_ERTM;
3199 rfc.max_transmit = chan->max_tx;
3200
3201 __l2cap_set_ertm_timeouts(chan, &rfc);
3202
3203 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3204 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3205 L2CAP_FCS_SIZE);
3206 rfc.max_pdu_size = cpu_to_le16(size);
3207
3208 l2cap_txwin_setup(chan);
3209
3210 rfc.txwin_size = min_t(u16, chan->tx_win,
3211 L2CAP_DEFAULT_TX_WINDOW);
3212
3213 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3214 (unsigned long) &rfc);
3215
3216 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3217 l2cap_add_opt_efs(&ptr, chan);
3218
3219 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3220 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3221 chan->tx_win);
3222
3223 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3224 if (chan->fcs == L2CAP_FCS_NONE ||
3225 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3226 chan->fcs = L2CAP_FCS_NONE;
3227 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3228 chan->fcs);
3229 }
3230 break;
3231
3232 case L2CAP_MODE_STREAMING:
3233 l2cap_txwin_setup(chan);
3234 rfc.mode = L2CAP_MODE_STREAMING;
3235 rfc.txwin_size = 0;
3236 rfc.max_transmit = 0;
3237 rfc.retrans_timeout = 0;
3238 rfc.monitor_timeout = 0;
3239
3240 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3241 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3242 L2CAP_FCS_SIZE);
3243 rfc.max_pdu_size = cpu_to_le16(size);
3244
3245 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3246 (unsigned long) &rfc);
3247
3248 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3249 l2cap_add_opt_efs(&ptr, chan);
3250
3251 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3252 if (chan->fcs == L2CAP_FCS_NONE ||
3253 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3254 chan->fcs = L2CAP_FCS_NONE;
3255 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3256 chan->fcs);
3257 }
3258 break;
3259 }
3260
3261 req->dcid = cpu_to_le16(chan->dcid);
3262 req->flags = cpu_to_le16(0);
3263
3264 return ptr - data;
3265 }
3266
3267 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
3268 {
3269 struct l2cap_conf_rsp *rsp = data;
3270 void *ptr = rsp->data;
3271 void *req = chan->conf_req;
3272 int len = chan->conf_len;
3273 int type, hint, olen;
3274 unsigned long val;
3275 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3276 struct l2cap_conf_efs efs;
3277 u8 remote_efs = 0;
3278 u16 mtu = L2CAP_DEFAULT_MTU;
3279 u16 result = L2CAP_CONF_SUCCESS;
3280 u16 size;
3281
3282 BT_DBG("chan %p", chan);
3283
3284 while (len >= L2CAP_CONF_OPT_SIZE) {
3285 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
3286
3287 hint = type & L2CAP_CONF_HINT;
3288 type &= L2CAP_CONF_MASK;
3289
3290 switch (type) {
3291 case L2CAP_CONF_MTU:
3292 mtu = val;
3293 break;
3294
3295 case L2CAP_CONF_FLUSH_TO:
3296 chan->flush_to = val;
3297 break;
3298
3299 case L2CAP_CONF_QOS:
3300 break;
3301
3302 case L2CAP_CONF_RFC:
3303 if (olen == sizeof(rfc))
3304 memcpy(&rfc, (void *) val, olen);
3305 break;
3306
3307 case L2CAP_CONF_FCS:
3308 if (val == L2CAP_FCS_NONE)
3309 set_bit(CONF_RECV_NO_FCS, &chan->conf_state);
3310 break;
3311
3312 case L2CAP_CONF_EFS:
3313 remote_efs = 1;
3314 if (olen == sizeof(efs))
3315 memcpy(&efs, (void *) val, olen);
3316 break;
3317
3318 case L2CAP_CONF_EWS:
3319 if (!chan->conn->hs_enabled)
3320 return -ECONNREFUSED;
3321
3322 set_bit(FLAG_EXT_CTRL, &chan->flags);
3323 set_bit(CONF_EWS_RECV, &chan->conf_state);
3324 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3325 chan->remote_tx_win = val;
3326 break;
3327
3328 default:
3329 if (hint)
3330 break;
3331
3332 result = L2CAP_CONF_UNKNOWN;
3333 *((u8 *) ptr++) = type;
3334 break;
3335 }
3336 }
3337
3338 if (chan->num_conf_rsp || chan->num_conf_req > 1)
3339 goto done;
3340
3341 switch (chan->mode) {
3342 case L2CAP_MODE_STREAMING:
3343 case L2CAP_MODE_ERTM:
3344 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
3345 chan->mode = l2cap_select_mode(rfc.mode,
3346 chan->conn->feat_mask);
3347 break;
3348 }
3349
3350 if (remote_efs) {
3351 if (__l2cap_efs_supported(chan->conn))
3352 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3353 else
3354 return -ECONNREFUSED;
3355 }
3356
3357 if (chan->mode != rfc.mode)
3358 return -ECONNREFUSED;
3359
3360 break;
3361 }
3362
3363 done:
3364 if (chan->mode != rfc.mode) {
3365 result = L2CAP_CONF_UNACCEPT;
3366 rfc.mode = chan->mode;
3367
3368 if (chan->num_conf_rsp == 1)
3369 return -ECONNREFUSED;
3370
3371 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3372 (unsigned long) &rfc);
3373 }
3374
3375 if (result == L2CAP_CONF_SUCCESS) {
3376 /* Configure output options and let the other side know
3377 * which ones we don't like. */
3378
3379 if (mtu < L2CAP_DEFAULT_MIN_MTU)
3380 result = L2CAP_CONF_UNACCEPT;
3381 else {
3382 chan->omtu = mtu;
3383 set_bit(CONF_MTU_DONE, &chan->conf_state);
3384 }
3385 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
3386
3387 if (remote_efs) {
3388 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3389 efs.stype != L2CAP_SERV_NOTRAFIC &&
3390 efs.stype != chan->local_stype) {
3391
3392 result = L2CAP_CONF_UNACCEPT;
3393
3394 if (chan->num_conf_req >= 1)
3395 return -ECONNREFUSED;
3396
3397 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3398 sizeof(efs),
3399 (unsigned long) &efs);
3400 } else {
3401 /* Send PENDING Conf Rsp */
3402 result = L2CAP_CONF_PENDING;
3403 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3404 }
3405 }
3406
3407 switch (rfc.mode) {
3408 case L2CAP_MODE_BASIC:
3409 chan->fcs = L2CAP_FCS_NONE;
3410 set_bit(CONF_MODE_DONE, &chan->conf_state);
3411 break;
3412
3413 case L2CAP_MODE_ERTM:
3414 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3415 chan->remote_tx_win = rfc.txwin_size;
3416 else
3417 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3418
3419 chan->remote_max_tx = rfc.max_transmit;
3420
3421 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3422 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3423 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3424 rfc.max_pdu_size = cpu_to_le16(size);
3425 chan->remote_mps = size;
3426
3427 __l2cap_set_ertm_timeouts(chan, &rfc);
3428
3429 set_bit(CONF_MODE_DONE, &chan->conf_state);
3430
3431 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3432 sizeof(rfc), (unsigned long) &rfc);
3433
3434 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3435 chan->remote_id = efs.id;
3436 chan->remote_stype = efs.stype;
3437 chan->remote_msdu = le16_to_cpu(efs.msdu);
3438 chan->remote_flush_to =
3439 le32_to_cpu(efs.flush_to);
3440 chan->remote_acc_lat =
3441 le32_to_cpu(efs.acc_lat);
3442 chan->remote_sdu_itime =
3443 le32_to_cpu(efs.sdu_itime);
3444 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3445 sizeof(efs),
3446 (unsigned long) &efs);
3447 }
3448 break;
3449
3450 case L2CAP_MODE_STREAMING:
3451 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3452 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3453 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3454 rfc.max_pdu_size = cpu_to_le16(size);
3455 chan->remote_mps = size;
3456
3457 set_bit(CONF_MODE_DONE, &chan->conf_state);
3458
3459 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3460 (unsigned long) &rfc);
3461
3462 break;
3463
3464 default:
3465 result = L2CAP_CONF_UNACCEPT;
3466
3467 memset(&rfc, 0, sizeof(rfc));
3468 rfc.mode = chan->mode;
3469 }
3470
3471 if (result == L2CAP_CONF_SUCCESS)
3472 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3473 }
3474 rsp->scid = cpu_to_le16(chan->dcid);
3475 rsp->result = cpu_to_le16(result);
3476 rsp->flags = cpu_to_le16(0);
3477
3478 return ptr - data;
3479 }
3480
3481 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
3482 void *data, u16 *result)
3483 {
3484 struct l2cap_conf_req *req = data;
3485 void *ptr = req->data;
3486 int type, olen;
3487 unsigned long val;
3488 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3489 struct l2cap_conf_efs efs;
3490
3491 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3492
3493 while (len >= L2CAP_CONF_OPT_SIZE) {
3494 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3495
3496 switch (type) {
3497 case L2CAP_CONF_MTU:
3498 if (val < L2CAP_DEFAULT_MIN_MTU) {
3499 *result = L2CAP_CONF_UNACCEPT;
3500 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3501 } else
3502 chan->imtu = val;
3503 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3504 break;
3505
3506 case L2CAP_CONF_FLUSH_TO:
3507 chan->flush_to = val;
3508 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
3509 2, chan->flush_to);
3510 break;
3511
3512 case L2CAP_CONF_RFC:
3513 if (olen == sizeof(rfc))
3514 memcpy(&rfc, (void *)val, olen);
3515
3516 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3517 rfc.mode != chan->mode)
3518 return -ECONNREFUSED;
3519
3520 chan->fcs = 0;
3521
3522 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3523 sizeof(rfc), (unsigned long) &rfc);
3524 break;
3525
3526 case L2CAP_CONF_EWS:
3527 chan->ack_win = min_t(u16, val, chan->ack_win);
3528 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3529 chan->tx_win);
3530 break;
3531
3532 case L2CAP_CONF_EFS:
3533 if (olen == sizeof(efs))
3534 memcpy(&efs, (void *)val, olen);
3535
3536 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3537 efs.stype != L2CAP_SERV_NOTRAFIC &&
3538 efs.stype != chan->local_stype)
3539 return -ECONNREFUSED;
3540
3541 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
3542 (unsigned long) &efs);
3543 break;
3544
3545 case L2CAP_CONF_FCS:
3546 if (*result == L2CAP_CONF_PENDING)
3547 if (val == L2CAP_FCS_NONE)
3548 set_bit(CONF_RECV_NO_FCS,
3549 &chan->conf_state);
3550 break;
3551 }
3552 }
3553
3554 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3555 return -ECONNREFUSED;
3556
3557 chan->mode = rfc.mode;
3558
3559 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3560 switch (rfc.mode) {
3561 case L2CAP_MODE_ERTM:
3562 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3563 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3564 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3565 if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3566 chan->ack_win = min_t(u16, chan->ack_win,
3567 rfc.txwin_size);
3568
3569 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3570 chan->local_msdu = le16_to_cpu(efs.msdu);
3571 chan->local_sdu_itime =
3572 le32_to_cpu(efs.sdu_itime);
3573 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3574 chan->local_flush_to =
3575 le32_to_cpu(efs.flush_to);
3576 }
3577 break;
3578
3579 case L2CAP_MODE_STREAMING:
3580 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3581 }
3582 }
3583
3584 req->dcid = cpu_to_le16(chan->dcid);
3585 req->flags = cpu_to_le16(0);
3586
3587 return ptr - data;
3588 }
3589
3590 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data,
3591 u16 result, u16 flags)
3592 {
3593 struct l2cap_conf_rsp *rsp = data;
3594 void *ptr = rsp->data;
3595
3596 BT_DBG("chan %p", chan);
3597
3598 rsp->scid = cpu_to_le16(chan->dcid);
3599 rsp->result = cpu_to_le16(result);
3600 rsp->flags = cpu_to_le16(flags);
3601
3602 return ptr - data;
3603 }
3604
3605 void __l2cap_le_connect_rsp_defer(struct l2cap_chan *chan)
3606 {
3607 struct l2cap_le_conn_rsp rsp;
3608 struct l2cap_conn *conn = chan->conn;
3609
3610 BT_DBG("chan %p", chan);
3611
3612 rsp.dcid = cpu_to_le16(chan->scid);
3613 rsp.mtu = cpu_to_le16(chan->imtu);
3614 rsp.mps = cpu_to_le16(chan->mps);
3615 rsp.credits = cpu_to_le16(chan->rx_credits);
3616 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
3617
3618 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
3619 &rsp);
3620 }
3621
3622 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
3623 {
3624 struct l2cap_conn_rsp rsp;
3625 struct l2cap_conn *conn = chan->conn;
3626 u8 buf[128];
3627 u8 rsp_code;
3628
3629 rsp.scid = cpu_to_le16(chan->dcid);
3630 rsp.dcid = cpu_to_le16(chan->scid);
3631 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
3632 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
3633
3634 if (chan->hs_hcon)
3635 rsp_code = L2CAP_CREATE_CHAN_RSP;
3636 else
3637 rsp_code = L2CAP_CONN_RSP;
3638
3639 BT_DBG("chan %p rsp_code %u", chan, rsp_code);
3640
3641 l2cap_send_cmd(conn, chan->ident, rsp_code, sizeof(rsp), &rsp);
3642
3643 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3644 return;
3645
3646 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3647 l2cap_build_conf_req(chan, buf), buf);
3648 chan->num_conf_req++;
3649 }
3650
3651 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
3652 {
3653 int type, olen;
3654 unsigned long val;
3655 /* Use sane default values in case a misbehaving remote device
3656 * did not send an RFC or extended window size option.
3657 */
3658 u16 txwin_ext = chan->ack_win;
3659 struct l2cap_conf_rfc rfc = {
3660 .mode = chan->mode,
3661 .retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
3662 .monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
3663 .max_pdu_size = cpu_to_le16(chan->imtu),
3664 .txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
3665 };
3666
3667 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
3668
3669 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
3670 return;
3671
3672 while (len >= L2CAP_CONF_OPT_SIZE) {
3673 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3674
3675 switch (type) {
3676 case L2CAP_CONF_RFC:
3677 if (olen == sizeof(rfc))
3678 memcpy(&rfc, (void *)val, olen);
3679 break;
3680 case L2CAP_CONF_EWS:
3681 txwin_ext = val;
3682 break;
3683 }
3684 }
3685
3686 switch (rfc.mode) {
3687 case L2CAP_MODE_ERTM:
3688 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3689 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3690 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3691 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3692 chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
3693 else
3694 chan->ack_win = min_t(u16, chan->ack_win,
3695 rfc.txwin_size);
3696 break;
3697 case L2CAP_MODE_STREAMING:
3698 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3699 }
3700 }
3701
3702 static inline int l2cap_command_rej(struct l2cap_conn *conn,
3703 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3704 u8 *data)
3705 {
3706 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
3707
3708 if (cmd_len < sizeof(*rej))
3709 return -EPROTO;
3710
3711 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
3712 return 0;
3713
3714 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
3715 cmd->ident == conn->info_ident) {
3716 cancel_delayed_work(&conn->info_timer);
3717
3718 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3719 conn->info_ident = 0;
3720
3721 l2cap_conn_start(conn);
3722 }
3723
3724 return 0;
3725 }
3726
3727 static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
3728 struct l2cap_cmd_hdr *cmd,
3729 u8 *data, u8 rsp_code, u8 amp_id)
3730 {
3731 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
3732 struct l2cap_conn_rsp rsp;
3733 struct l2cap_chan *chan = NULL, *pchan;
3734 int result, status = L2CAP_CS_NO_INFO;
3735
3736 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
3737 __le16 psm = req->psm;
3738
3739 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
3740
3741 /* Check if we have socket listening on psm */
3742 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
3743 &conn->hcon->dst, ACL_LINK);
3744 if (!pchan) {
3745 result = L2CAP_CR_BAD_PSM;
3746 goto sendresp;
3747 }
3748
3749 mutex_lock(&conn->chan_lock);
3750 l2cap_chan_lock(pchan);
3751
3752 /* Check if the ACL is secure enough (if not SDP) */
3753 if (psm != cpu_to_le16(L2CAP_PSM_SDP) &&
3754 !hci_conn_check_link_mode(conn->hcon)) {
3755 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
3756 result = L2CAP_CR_SEC_BLOCK;
3757 goto response;
3758 }
3759
3760 result = L2CAP_CR_NO_MEM;
3761
3762 /* Check if we already have channel with that dcid */
3763 if (__l2cap_get_chan_by_dcid(conn, scid))
3764 goto response;
3765
3766 chan = pchan->ops->new_connection(pchan);
3767 if (!chan)
3768 goto response;
3769
3770 /* For certain devices (ex: HID mouse), support for authentication,
3771 * pairing and bonding is optional. For such devices, inorder to avoid
3772 * the ACL alive for too long after L2CAP disconnection, reset the ACL
3773 * disc_timeout back to HCI_DISCONN_TIMEOUT during L2CAP connect.
3774 */
3775 conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
3776
3777 bacpy(&chan->src, &conn->hcon->src);
3778 bacpy(&chan->dst, &conn->hcon->dst);
3779 chan->src_type = bdaddr_type(conn->hcon, conn->hcon->src_type);
3780 chan->dst_type = bdaddr_type(conn->hcon, conn->hcon->dst_type);
3781 chan->psm = psm;
3782 chan->dcid = scid;
3783 chan->local_amp_id = amp_id;
3784
3785 __l2cap_chan_add(conn, chan);
3786
3787 dcid = chan->scid;
3788
3789 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
3790
3791 chan->ident = cmd->ident;
3792
3793 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
3794 if (l2cap_chan_check_security(chan, false)) {
3795 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
3796 l2cap_state_change(chan, BT_CONNECT2);
3797 result = L2CAP_CR_PEND;
3798 status = L2CAP_CS_AUTHOR_PEND;
3799 chan->ops->defer(chan);
3800 } else {
3801 /* Force pending result for AMP controllers.
3802 * The connection will succeed after the
3803 * physical link is up.
3804 */
3805 if (amp_id == AMP_ID_BREDR) {
3806 l2cap_state_change(chan, BT_CONFIG);
3807 result = L2CAP_CR_SUCCESS;
3808 } else {
3809 l2cap_state_change(chan, BT_CONNECT2);
3810 result = L2CAP_CR_PEND;
3811 }
3812 status = L2CAP_CS_NO_INFO;
3813 }
3814 } else {
3815 l2cap_state_change(chan, BT_CONNECT2);
3816 result = L2CAP_CR_PEND;
3817 status = L2CAP_CS_AUTHEN_PEND;
3818 }
3819 } else {
3820 l2cap_state_change(chan, BT_CONNECT2);
3821 result = L2CAP_CR_PEND;
3822 status = L2CAP_CS_NO_INFO;
3823 }
3824
3825 response:
3826 l2cap_chan_unlock(pchan);
3827 mutex_unlock(&conn->chan_lock);
3828 l2cap_chan_put(pchan);
3829
3830 sendresp:
3831 rsp.scid = cpu_to_le16(scid);
3832 rsp.dcid = cpu_to_le16(dcid);
3833 rsp.result = cpu_to_le16(result);
3834 rsp.status = cpu_to_le16(status);
3835 l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
3836
3837 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
3838 struct l2cap_info_req info;
3839 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3840
3841 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
3842 conn->info_ident = l2cap_get_ident(conn);
3843
3844 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
3845
3846 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
3847 sizeof(info), &info);
3848 }
3849
3850 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
3851 result == L2CAP_CR_SUCCESS) {
3852 u8 buf[128];
3853 set_bit(CONF_REQ_SENT, &chan->conf_state);
3854 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3855 l2cap_build_conf_req(chan, buf), buf);
3856 chan->num_conf_req++;
3857 }
3858
3859 return chan;
3860 }
3861
3862 static int l2cap_connect_req(struct l2cap_conn *conn,
3863 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3864 {
3865 struct hci_dev *hdev = conn->hcon->hdev;
3866 struct hci_conn *hcon = conn->hcon;
3867
3868 if (cmd_len < sizeof(struct l2cap_conn_req))
3869 return -EPROTO;
3870
3871 hci_dev_lock(hdev);
3872 if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
3873 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &hcon->flags))
3874 mgmt_device_connected(hdev, &hcon->dst, hcon->type,
3875 hcon->dst_type, 0, NULL, 0,
3876 hcon->dev_class);
3877 hci_dev_unlock(hdev);
3878
3879 l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);
3880 return 0;
3881 }
3882
3883 static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
3884 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3885 u8 *data)
3886 {
3887 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
3888 u16 scid, dcid, result, status;
3889 struct l2cap_chan *chan;
3890 u8 req[128];
3891 int err;
3892
3893 if (cmd_len < sizeof(*rsp))
3894 return -EPROTO;
3895
3896 scid = __le16_to_cpu(rsp->scid);
3897 dcid = __le16_to_cpu(rsp->dcid);
3898 result = __le16_to_cpu(rsp->result);
3899 status = __le16_to_cpu(rsp->status);
3900
3901 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
3902 dcid, scid, result, status);
3903
3904 mutex_lock(&conn->chan_lock);
3905
3906 if (scid) {
3907 chan = __l2cap_get_chan_by_scid(conn, scid);
3908 if (!chan) {
3909 err = -EBADSLT;
3910 goto unlock;
3911 }
3912 } else {
3913 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
3914 if (!chan) {
3915 err = -EBADSLT;
3916 goto unlock;
3917 }
3918 }
3919
3920 err = 0;
3921
3922 l2cap_chan_lock(chan);
3923
3924 switch (result) {
3925 case L2CAP_CR_SUCCESS:
3926 l2cap_state_change(chan, BT_CONFIG);
3927 chan->ident = 0;
3928 chan->dcid = dcid;
3929 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
3930
3931 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3932 break;
3933
3934 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3935 l2cap_build_conf_req(chan, req), req);
3936 chan->num_conf_req++;
3937 break;
3938
3939 case L2CAP_CR_PEND:
3940 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
3941 break;
3942
3943 default:
3944 l2cap_chan_del(chan, ECONNREFUSED);
3945 break;
3946 }
3947
3948 l2cap_chan_unlock(chan);
3949
3950 unlock:
3951 mutex_unlock(&conn->chan_lock);
3952
3953 return err;
3954 }
3955
3956 static inline void set_default_fcs(struct l2cap_chan *chan)
3957 {
3958 /* FCS is enabled only in ERTM or streaming mode, if one or both
3959 * sides request it.
3960 */
3961 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
3962 chan->fcs = L2CAP_FCS_NONE;
3963 else if (!test_bit(CONF_RECV_NO_FCS, &chan->conf_state))
3964 chan->fcs = L2CAP_FCS_CRC16;
3965 }
3966
3967 static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data,
3968 u8 ident, u16 flags)
3969 {
3970 struct l2cap_conn *conn = chan->conn;
3971
3972 BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident,
3973 flags);
3974
3975 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3976 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3977
3978 l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP,
3979 l2cap_build_conf_rsp(chan, data,
3980 L2CAP_CONF_SUCCESS, flags), data);
3981 }
3982
3983 static void cmd_reject_invalid_cid(struct l2cap_conn *conn, u8 ident,
3984 u16 scid, u16 dcid)
3985 {
3986 struct l2cap_cmd_rej_cid rej;
3987
3988 rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
3989 rej.scid = __cpu_to_le16(scid);
3990 rej.dcid = __cpu_to_le16(dcid);
3991
3992 l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3993 }
3994
3995 static inline int l2cap_config_req(struct l2cap_conn *conn,
3996 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3997 u8 *data)
3998 {
3999 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
4000 u16 dcid, flags;
4001 u8 rsp[64];
4002 struct l2cap_chan *chan;
4003 int len, err = 0;
4004
4005 if (cmd_len < sizeof(*req))
4006 return -EPROTO;
4007
4008 dcid = __le16_to_cpu(req->dcid);
4009 flags = __le16_to_cpu(req->flags);
4010
4011 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
4012
4013 chan = l2cap_get_chan_by_scid(conn, dcid);
4014 if (!chan) {
4015 cmd_reject_invalid_cid(conn, cmd->ident, dcid, 0);
4016 return 0;
4017 }
4018
4019 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
4020 cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4021 chan->dcid);
4022 goto unlock;
4023 }
4024
4025 /* Reject if config buffer is too small. */
4026 len = cmd_len - sizeof(*req);
4027 if (chan->conf_len + len > sizeof(chan->conf_req)) {
4028 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4029 l2cap_build_conf_rsp(chan, rsp,
4030 L2CAP_CONF_REJECT, flags), rsp);
4031 goto unlock;
4032 }
4033
4034 /* Store config. */
4035 memcpy(chan->conf_req + chan->conf_len, req->data, len);
4036 chan->conf_len += len;
4037
4038 if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
4039 /* Incomplete config. Send empty response. */
4040 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4041 l2cap_build_conf_rsp(chan, rsp,
4042 L2CAP_CONF_SUCCESS, flags), rsp);
4043 goto unlock;
4044 }
4045
4046 /* Complete config. */
4047 len = l2cap_parse_conf_req(chan, rsp);
4048 if (len < 0) {
4049 l2cap_send_disconn_req(chan, ECONNRESET);
4050 goto unlock;
4051 }
4052
4053 chan->ident = cmd->ident;
4054 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
4055 chan->num_conf_rsp++;
4056
4057 /* Reset config buffer. */
4058 chan->conf_len = 0;
4059
4060 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
4061 goto unlock;
4062
4063 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4064 set_default_fcs(chan);
4065
4066 if (chan->mode == L2CAP_MODE_ERTM ||
4067 chan->mode == L2CAP_MODE_STREAMING)
4068 err = l2cap_ertm_init(chan);
4069
4070 if (err < 0)
4071 l2cap_send_disconn_req(chan, -err);
4072 else
4073 l2cap_chan_ready(chan);
4074
4075 goto unlock;
4076 }
4077
4078 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
4079 u8 buf[64];
4080 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4081 l2cap_build_conf_req(chan, buf), buf);
4082 chan->num_conf_req++;
4083 }
4084
4085 /* Got Conf Rsp PENDING from remote side and asume we sent
4086 Conf Rsp PENDING in the code above */
4087 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
4088 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4089
4090 /* check compatibility */
4091
4092 /* Send rsp for BR/EDR channel */
4093 if (!chan->hs_hcon)
4094 l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
4095 else
4096 chan->ident = cmd->ident;
4097 }
4098
4099 unlock:
4100 l2cap_chan_unlock(chan);
4101 return err;
4102 }
4103
4104 static inline int l2cap_config_rsp(struct l2cap_conn *conn,
4105 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4106 u8 *data)
4107 {
4108 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
4109 u16 scid, flags, result;
4110 struct l2cap_chan *chan;
4111 int len = cmd_len - sizeof(*rsp);
4112 int err = 0;
4113
4114 if (cmd_len < sizeof(*rsp))
4115 return -EPROTO;
4116
4117 scid = __le16_to_cpu(rsp->scid);
4118 flags = __le16_to_cpu(rsp->flags);
4119 result = __le16_to_cpu(rsp->result);
4120
4121 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
4122 result, len);
4123
4124 chan = l2cap_get_chan_by_scid(conn, scid);
4125 if (!chan)
4126 return 0;
4127
4128 switch (result) {
4129 case L2CAP_CONF_SUCCESS:
4130 l2cap_conf_rfc_get(chan, rsp->data, len);
4131 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4132 break;
4133
4134 case L2CAP_CONF_PENDING:
4135 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4136
4137 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4138 char buf[64];
4139
4140 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4141 buf, &result);
4142 if (len < 0) {
4143 l2cap_send_disconn_req(chan, ECONNRESET);
4144 goto done;
4145 }
4146
4147 if (!chan->hs_hcon) {
4148 l2cap_send_efs_conf_rsp(chan, buf, cmd->ident,
4149 0);
4150 } else {
4151 if (l2cap_check_efs(chan)) {
4152 amp_create_logical_link(chan);
4153 chan->ident = cmd->ident;
4154 }
4155 }
4156 }
4157 goto done;
4158
4159 case L2CAP_CONF_UNACCEPT:
4160 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
4161 char req[64];
4162
4163 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
4164 l2cap_send_disconn_req(chan, ECONNRESET);
4165 goto done;
4166 }
4167
4168 /* throw out any old stored conf requests */
4169 result = L2CAP_CONF_SUCCESS;
4170 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4171 req, &result);
4172 if (len < 0) {
4173 l2cap_send_disconn_req(chan, ECONNRESET);
4174 goto done;
4175 }
4176
4177 l2cap_send_cmd(conn, l2cap_get_ident(conn),
4178 L2CAP_CONF_REQ, len, req);
4179 chan->num_conf_req++;
4180 if (result != L2CAP_CONF_SUCCESS)
4181 goto done;
4182 break;
4183 }
4184
4185 default:
4186 l2cap_chan_set_err(chan, ECONNRESET);
4187
4188 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
4189 l2cap_send_disconn_req(chan, ECONNRESET);
4190 goto done;
4191 }
4192
4193 if (flags & L2CAP_CONF_FLAG_CONTINUATION)
4194 goto done;
4195
4196 set_bit(CONF_INPUT_DONE, &chan->conf_state);
4197
4198 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
4199 set_default_fcs(chan);
4200
4201 if (chan->mode == L2CAP_MODE_ERTM ||
4202 chan->mode == L2CAP_MODE_STREAMING)
4203 err = l2cap_ertm_init(chan);
4204
4205 if (err < 0)
4206 l2cap_send_disconn_req(chan, -err);
4207 else
4208 l2cap_chan_ready(chan);
4209 }
4210
4211 done:
4212 l2cap_chan_unlock(chan);
4213 return err;
4214 }
4215
4216 static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
4217 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4218 u8 *data)
4219 {
4220 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
4221 struct l2cap_disconn_rsp rsp;
4222 u16 dcid, scid;
4223 struct l2cap_chan *chan;
4224
4225 if (cmd_len != sizeof(*req))
4226 return -EPROTO;
4227
4228 scid = __le16_to_cpu(req->scid);
4229 dcid = __le16_to_cpu(req->dcid);
4230
4231 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
4232
4233 mutex_lock(&conn->chan_lock);
4234
4235 chan = __l2cap_get_chan_by_scid(conn, dcid);
4236 if (!chan) {
4237 mutex_unlock(&conn->chan_lock);
4238 cmd_reject_invalid_cid(conn, cmd->ident, dcid, scid);
4239 return 0;
4240 }
4241
4242 l2cap_chan_lock(chan);
4243
4244 rsp.dcid = cpu_to_le16(chan->scid);
4245 rsp.scid = cpu_to_le16(chan->dcid);
4246 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
4247
4248 chan->ops->set_shutdown(chan);
4249
4250 l2cap_chan_hold(chan);
4251 l2cap_chan_del(chan, ECONNRESET);
4252
4253 l2cap_chan_unlock(chan);
4254
4255 chan->ops->close(chan);
4256 l2cap_chan_put(chan);
4257
4258 mutex_unlock(&conn->chan_lock);
4259
4260 return 0;
4261 }
4262
4263 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
4264 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4265 u8 *data)
4266 {
4267 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
4268 u16 dcid, scid;
4269 struct l2cap_chan *chan;
4270
4271 if (cmd_len != sizeof(*rsp))
4272 return -EPROTO;
4273
4274 scid = __le16_to_cpu(rsp->scid);
4275 dcid = __le16_to_cpu(rsp->dcid);
4276
4277 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
4278
4279 mutex_lock(&conn->chan_lock);
4280
4281 chan = __l2cap_get_chan_by_scid(conn, scid);
4282 if (!chan) {
4283 mutex_unlock(&conn->chan_lock);
4284 return 0;
4285 }
4286
4287 l2cap_chan_lock(chan);
4288
4289 l2cap_chan_hold(chan);
4290 l2cap_chan_del(chan, 0);
4291
4292 l2cap_chan_unlock(chan);
4293
4294 chan->ops->close(chan);
4295 l2cap_chan_put(chan);
4296
4297 mutex_unlock(&conn->chan_lock);
4298
4299 return 0;
4300 }
4301
4302 static inline int l2cap_information_req(struct l2cap_conn *conn,
4303 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4304 u8 *data)
4305 {
4306 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
4307 u16 type;
4308
4309 if (cmd_len != sizeof(*req))
4310 return -EPROTO;
4311
4312 type = __le16_to_cpu(req->type);
4313
4314 BT_DBG("type 0x%4.4x", type);
4315
4316 if (type == L2CAP_IT_FEAT_MASK) {
4317 u8 buf[8];
4318 u32 feat_mask = l2cap_feat_mask;
4319 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4320 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
4321 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4322 if (!disable_ertm)
4323 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
4324 | L2CAP_FEAT_FCS;
4325 if (conn->hs_enabled)
4326 feat_mask |= L2CAP_FEAT_EXT_FLOW
4327 | L2CAP_FEAT_EXT_WINDOW;
4328
4329 put_unaligned_le32(feat_mask, rsp->data);
4330 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4331 buf);
4332 } else if (type == L2CAP_IT_FIXED_CHAN) {
4333 u8 buf[12];
4334 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4335
4336 if (conn->hs_enabled)
4337 l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
4338 else
4339 l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
4340
4341 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4342 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4343 memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
4344 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4345 buf);
4346 } else {
4347 struct l2cap_info_rsp rsp;
4348 rsp.type = cpu_to_le16(type);
4349 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
4350 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp),
4351 &rsp);
4352 }
4353
4354 return 0;
4355 }
4356
4357 static inline int l2cap_information_rsp(struct l2cap_conn *conn,
4358 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4359 u8 *data)
4360 {
4361 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
4362 u16 type, result;
4363
4364 if (cmd_len < sizeof(*rsp))
4365 return -EPROTO;
4366
4367 type = __le16_to_cpu(rsp->type);
4368 result = __le16_to_cpu(rsp->result);
4369
4370 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
4371
4372 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
4373 if (cmd->ident != conn->info_ident ||
4374 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
4375 return 0;
4376
4377 cancel_delayed_work(&conn->info_timer);
4378
4379 if (result != L2CAP_IR_SUCCESS) {
4380 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4381 conn->info_ident = 0;
4382
4383 l2cap_conn_start(conn);
4384
4385 return 0;
4386 }
4387
4388 switch (type) {
4389 case L2CAP_IT_FEAT_MASK:
4390 conn->feat_mask = get_unaligned_le32(rsp->data);
4391
4392 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
4393 struct l2cap_info_req req;
4394 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4395
4396 conn->info_ident = l2cap_get_ident(conn);
4397
4398 l2cap_send_cmd(conn, conn->info_ident,
4399 L2CAP_INFO_REQ, sizeof(req), &req);
4400 } else {
4401 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4402 conn->info_ident = 0;
4403
4404 l2cap_conn_start(conn);
4405 }
4406 break;
4407
4408 case L2CAP_IT_FIXED_CHAN:
4409 conn->fixed_chan_mask = rsp->data[0];
4410 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4411 conn->info_ident = 0;
4412
4413 l2cap_conn_start(conn);
4414 break;
4415 }
4416
4417 return 0;
4418 }
4419
4420 static int l2cap_create_channel_req(struct l2cap_conn *conn,
4421 struct l2cap_cmd_hdr *cmd,
4422 u16 cmd_len, void *data)
4423 {
4424 struct l2cap_create_chan_req *req = data;
4425 struct l2cap_create_chan_rsp rsp;
4426 struct l2cap_chan *chan;
4427 struct hci_dev *hdev;
4428 u16 psm, scid;
4429
4430 if (cmd_len != sizeof(*req))
4431 return -EPROTO;
4432
4433 if (!conn->hs_enabled)
4434 return -EINVAL;
4435
4436 psm = le16_to_cpu(req->psm);
4437 scid = le16_to_cpu(req->scid);
4438
4439 BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id);
4440
4441 /* For controller id 0 make BR/EDR connection */
4442 if (req->amp_id == AMP_ID_BREDR) {
4443 l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4444 req->amp_id);
4445 return 0;
4446 }
4447
4448 /* Validate AMP controller id */
4449 hdev = hci_dev_get(req->amp_id);
4450 if (!hdev)
4451 goto error;
4452
4453 if (hdev->dev_type != HCI_AMP || !test_bit(HCI_UP, &hdev->flags)) {
4454 hci_dev_put(hdev);
4455 goto error;
4456 }
4457
4458 chan = l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4459 req->amp_id);
4460 if (chan) {
4461 struct amp_mgr *mgr = conn->hcon->amp_mgr;
4462 struct hci_conn *hs_hcon;
4463
4464 hs_hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK,
4465 &conn->hcon->dst);
4466 if (!hs_hcon) {
4467 hci_dev_put(hdev);
4468 cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4469 chan->dcid);
4470 return 0;
4471 }
4472
4473 BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr, chan, hs_hcon);
4474
4475 mgr->bredr_chan = chan;
4476 chan->hs_hcon = hs_hcon;
4477 chan->fcs = L2CAP_FCS_NONE;
4478 conn->mtu = hdev->block_mtu;
4479 }
4480
4481 hci_dev_put(hdev);
4482
4483 return 0;
4484
4485 error:
4486 rsp.dcid = 0;
4487 rsp.scid = cpu_to_le16(scid);
4488 rsp.result = cpu_to_le16(L2CAP_CR_BAD_AMP);
4489 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4490
4491 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
4492 sizeof(rsp), &rsp);
4493
4494 return 0;
4495 }
4496
4497 static void l2cap_send_move_chan_req(struct l2cap_chan *chan, u8 dest_amp_id)
4498 {
4499 struct l2cap_move_chan_req req;
4500 u8 ident;
4501
4502 BT_DBG("chan %p, dest_amp_id %d", chan, dest_amp_id);
4503
4504 ident = l2cap_get_ident(chan->conn);
4505 chan->ident = ident;
4506
4507 req.icid = cpu_to_le16(chan->scid);
4508 req.dest_amp_id = dest_amp_id;
4509
4510 l2cap_send_cmd(chan->conn, ident, L2CAP_MOVE_CHAN_REQ, sizeof(req),
4511 &req);
4512
4513 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4514 }
4515
4516 static void l2cap_send_move_chan_rsp(struct l2cap_chan *chan, u16 result)
4517 {
4518 struct l2cap_move_chan_rsp rsp;
4519
4520 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4521
4522 rsp.icid = cpu_to_le16(chan->dcid);
4523 rsp.result = cpu_to_le16(result);
4524
4525 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_RSP,
4526 sizeof(rsp), &rsp);
4527 }
4528
4529 static void l2cap_send_move_chan_cfm(struct l2cap_chan *chan, u16 result)
4530 {
4531 struct l2cap_move_chan_cfm cfm;
4532
4533 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4534
4535 chan->ident = l2cap_get_ident(chan->conn);
4536
4537 cfm.icid = cpu_to_le16(chan->scid);
4538 cfm.result = cpu_to_le16(result);
4539
4540 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_CFM,
4541 sizeof(cfm), &cfm);
4542
4543 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4544 }
4545
4546 static void l2cap_send_move_chan_cfm_icid(struct l2cap_conn *conn, u16 icid)
4547 {
4548 struct l2cap_move_chan_cfm cfm;
4549
4550 BT_DBG("conn %p, icid 0x%4.4x", conn, icid);
4551
4552 cfm.icid = cpu_to_le16(icid);
4553 cfm.result = cpu_to_le16(L2CAP_MC_UNCONFIRMED);
4554
4555 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_MOVE_CHAN_CFM,
4556 sizeof(cfm), &cfm);
4557 }
4558
4559 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
4560 u16 icid)
4561 {
4562 struct l2cap_move_chan_cfm_rsp rsp;
4563
4564 BT_DBG("icid 0x%4.4x", icid);
4565
4566 rsp.icid = cpu_to_le16(icid);
4567 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4568 }
4569
4570 static void __release_logical_link(struct l2cap_chan *chan)
4571 {
4572 chan->hs_hchan = NULL;
4573 chan->hs_hcon = NULL;
4574
4575 /* Placeholder - release the logical link */
4576 }
4577
4578 static void l2cap_logical_fail(struct l2cap_chan *chan)
4579 {
4580 /* Logical link setup failed */
4581 if (chan->state != BT_CONNECTED) {
4582 /* Create channel failure, disconnect */
4583 l2cap_send_disconn_req(chan, ECONNRESET);
4584 return;
4585 }
4586
4587 switch (chan->move_role) {
4588 case L2CAP_MOVE_ROLE_RESPONDER:
4589 l2cap_move_done(chan);
4590 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_SUPP);
4591 break;
4592 case L2CAP_MOVE_ROLE_INITIATOR:
4593 if (chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_COMP ||
4594 chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_CFM) {
4595 /* Remote has only sent pending or
4596 * success responses, clean up
4597 */
4598 l2cap_move_done(chan);
4599 }
4600
4601 /* Other amp move states imply that the move
4602 * has already aborted
4603 */
4604 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4605 break;
4606 }
4607 }
4608
4609 static void l2cap_logical_finish_create(struct l2cap_chan *chan,
4610 struct hci_chan *hchan)
4611 {
4612 struct l2cap_conf_rsp rsp;
4613
4614 chan->hs_hchan = hchan;
4615 chan->hs_hcon->l2cap_data = chan->conn;
4616
4617 l2cap_send_efs_conf_rsp(chan, &rsp, chan->ident, 0);
4618
4619 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4620 int err;
4621
4622 set_default_fcs(chan);
4623
4624 err = l2cap_ertm_init(chan);
4625 if (err < 0)
4626 l2cap_send_disconn_req(chan, -err);
4627 else
4628 l2cap_chan_ready(chan);
4629 }
4630 }
4631
4632 static void l2cap_logical_finish_move(struct l2cap_chan *chan,
4633 struct hci_chan *hchan)
4634 {
4635 chan->hs_hcon = hchan->conn;
4636 chan->hs_hcon->l2cap_data = chan->conn;
4637
4638 BT_DBG("move_state %d", chan->move_state);
4639
4640 switch (chan->move_state) {
4641 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
4642 /* Move confirm will be sent after a success
4643 * response is received
4644 */
4645 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4646 break;
4647 case L2CAP_MOVE_WAIT_LOGICAL_CFM:
4648 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4649 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4650 } else if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
4651 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
4652 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4653 } else if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4654 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4655 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4656 }
4657 break;
4658 default:
4659 /* Move was not in expected state, free the channel */
4660 __release_logical_link(chan);
4661
4662 chan->move_state = L2CAP_MOVE_STABLE;
4663 }
4664 }
4665
4666 /* Call with chan locked */
4667 void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan,
4668 u8 status)
4669 {
4670 BT_DBG("chan %p, hchan %p, status %d", chan, hchan, status);
4671
4672 if (status) {
4673 l2cap_logical_fail(chan);
4674 __release_logical_link(chan);
4675 return;
4676 }
4677
4678 if (chan->state != BT_CONNECTED) {
4679 /* Ignore logical link if channel is on BR/EDR */
4680 if (chan->local_amp_id != AMP_ID_BREDR)
4681 l2cap_logical_finish_create(chan, hchan);
4682 } else {
4683 l2cap_logical_finish_move(chan, hchan);
4684 }
4685 }
4686
4687 void l2cap_move_start(struct l2cap_chan *chan)
4688 {
4689 BT_DBG("chan %p", chan);
4690
4691 if (chan->local_amp_id == AMP_ID_BREDR) {
4692 if (chan->chan_policy != BT_CHANNEL_POLICY_AMP_PREFERRED)
4693 return;
4694 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4695 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4696 /* Placeholder - start physical link setup */
4697 } else {
4698 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4699 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4700 chan->move_id = 0;
4701 l2cap_move_setup(chan);
4702 l2cap_send_move_chan_req(chan, 0);
4703 }
4704 }
4705
4706 static void l2cap_do_create(struct l2cap_chan *chan, int result,
4707 u8 local_amp_id, u8 remote_amp_id)
4708 {
4709 BT_DBG("chan %p state %s %u -> %u", chan, state_to_string(chan->state),
4710 local_amp_id, remote_amp_id);
4711
4712 chan->fcs = L2CAP_FCS_NONE;
4713
4714 /* Outgoing channel on AMP */
4715 if (chan->state == BT_CONNECT) {
4716 if (result == L2CAP_CR_SUCCESS) {
4717 chan->local_amp_id = local_amp_id;
4718 l2cap_send_create_chan_req(chan, remote_amp_id);
4719 } else {
4720 /* Revert to BR/EDR connect */
4721 l2cap_send_conn_req(chan);
4722 }
4723
4724 return;
4725 }
4726
4727 /* Incoming channel on AMP */
4728 if (__l2cap_no_conn_pending(chan)) {
4729 struct l2cap_conn_rsp rsp;
4730 char buf[128];
4731 rsp.scid = cpu_to_le16(chan->dcid);
4732 rsp.dcid = cpu_to_le16(chan->scid);
4733
4734 if (result == L2CAP_CR_SUCCESS) {
4735 /* Send successful response */
4736 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
4737 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4738 } else {
4739 /* Send negative response */
4740 rsp.result = cpu_to_le16(L2CAP_CR_NO_MEM);
4741 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4742 }
4743
4744 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_RSP,
4745 sizeof(rsp), &rsp);
4746
4747 if (result == L2CAP_CR_SUCCESS) {
4748 l2cap_state_change(chan, BT_CONFIG);
4749 set_bit(CONF_REQ_SENT, &chan->conf_state);
4750 l2cap_send_cmd(chan->conn, l2cap_get_ident(chan->conn),
4751 L2CAP_CONF_REQ,
4752 l2cap_build_conf_req(chan, buf), buf);
4753 chan->num_conf_req++;
4754 }
4755 }
4756 }
4757
4758 static void l2cap_do_move_initiate(struct l2cap_chan *chan, u8 local_amp_id,
4759 u8 remote_amp_id)
4760 {
4761 l2cap_move_setup(chan);
4762 chan->move_id = local_amp_id;
4763 chan->move_state = L2CAP_MOVE_WAIT_RSP;
4764
4765 l2cap_send_move_chan_req(chan, remote_amp_id);
4766 }
4767
4768 static void l2cap_do_move_respond(struct l2cap_chan *chan, int result)
4769 {
4770 struct hci_chan *hchan = NULL;
4771
4772 /* Placeholder - get hci_chan for logical link */
4773
4774 if (hchan) {
4775 if (hchan->state == BT_CONNECTED) {
4776 /* Logical link is ready to go */
4777 chan->hs_hcon = hchan->conn;
4778 chan->hs_hcon->l2cap_data = chan->conn;
4779 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4780 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4781
4782 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
4783 } else {
4784 /* Wait for logical link to be ready */
4785 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4786 }
4787 } else {
4788 /* Logical link not available */
4789 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_ALLOWED);
4790 }
4791 }
4792
4793 static void l2cap_do_move_cancel(struct l2cap_chan *chan, int result)
4794 {
4795 if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4796 u8 rsp_result;
4797 if (result == -EINVAL)
4798 rsp_result = L2CAP_MR_BAD_ID;
4799 else
4800 rsp_result = L2CAP_MR_NOT_ALLOWED;
4801
4802 l2cap_send_move_chan_rsp(chan, rsp_result);
4803 }
4804
4805 chan->move_role = L2CAP_MOVE_ROLE_NONE;
4806 chan->move_state = L2CAP_MOVE_STABLE;
4807
4808 /* Restart data transmission */
4809 l2cap_ertm_send(chan);
4810 }
4811
4812 /* Invoke with locked chan */
4813 void __l2cap_physical_cfm(struct l2cap_chan *chan, int result)
4814 {
4815 u8 local_amp_id = chan->local_amp_id;
4816 u8 remote_amp_id = chan->remote_amp_id;
4817
4818 BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d",
4819 chan, result, local_amp_id, remote_amp_id);
4820
4821 if (chan->state == BT_DISCONN || chan->state == BT_CLOSED) {
4822 l2cap_chan_unlock(chan);
4823 return;
4824 }
4825
4826 if (chan->state != BT_CONNECTED) {
4827 l2cap_do_create(chan, result, local_amp_id, remote_amp_id);
4828 } else if (result != L2CAP_MR_SUCCESS) {
4829 l2cap_do_move_cancel(chan, result);
4830 } else {
4831 switch (chan->move_role) {
4832 case L2CAP_MOVE_ROLE_INITIATOR:
4833 l2cap_do_move_initiate(chan, local_amp_id,
4834 remote_amp_id);
4835 break;
4836 case L2CAP_MOVE_ROLE_RESPONDER:
4837 l2cap_do_move_respond(chan, result);
4838 break;
4839 default:
4840 l2cap_do_move_cancel(chan, result);
4841 break;
4842 }
4843 }
4844 }
4845
4846 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
4847 struct l2cap_cmd_hdr *cmd,
4848 u16 cmd_len, void *data)
4849 {
4850 struct l2cap_move_chan_req *req = data;
4851 struct l2cap_move_chan_rsp rsp;
4852 struct l2cap_chan *chan;
4853 u16 icid = 0;
4854 u16 result = L2CAP_MR_NOT_ALLOWED;
4855
4856 if (cmd_len != sizeof(*req))
4857 return -EPROTO;
4858
4859 icid = le16_to_cpu(req->icid);
4860
4861 BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id);
4862
4863 if (!conn->hs_enabled)
4864 return -EINVAL;
4865
4866 chan = l2cap_get_chan_by_dcid(conn, icid);
4867 if (!chan) {
4868 rsp.icid = cpu_to_le16(icid);
4869 rsp.result = cpu_to_le16(L2CAP_MR_NOT_ALLOWED);
4870 l2cap_send_cmd(conn, cmd->ident, L2CAP_MOVE_CHAN_RSP,
4871 sizeof(rsp), &rsp);
4872 return 0;
4873 }
4874
4875 chan->ident = cmd->ident;
4876
4877 if (chan->scid < L2CAP_CID_DYN_START ||
4878 chan->chan_policy == BT_CHANNEL_POLICY_BREDR_ONLY ||
4879 (chan->mode != L2CAP_MODE_ERTM &&
4880 chan->mode != L2CAP_MODE_STREAMING)) {
4881 result = L2CAP_MR_NOT_ALLOWED;
4882 goto send_move_response;
4883 }
4884
4885 if (chan->local_amp_id == req->dest_amp_id) {
4886 result = L2CAP_MR_SAME_ID;
4887 goto send_move_response;
4888 }
4889
4890 if (req->dest_amp_id != AMP_ID_BREDR) {
4891 struct hci_dev *hdev;
4892 hdev = hci_dev_get(req->dest_amp_id);
4893 if (!hdev || hdev->dev_type != HCI_AMP ||
4894 !test_bit(HCI_UP, &hdev->flags)) {
4895 if (hdev)
4896 hci_dev_put(hdev);
4897
4898 result = L2CAP_MR_BAD_ID;
4899 goto send_move_response;
4900 }
4901 hci_dev_put(hdev);
4902 }
4903
4904 /* Detect a move collision. Only send a collision response
4905 * if this side has "lost", otherwise proceed with the move.
4906 * The winner has the larger bd_addr.
4907 */
4908 if ((__chan_is_moving(chan) ||
4909 chan->move_role != L2CAP_MOVE_ROLE_NONE) &&
4910 bacmp(&conn->hcon->src, &conn->hcon->dst) > 0) {
4911 result = L2CAP_MR_COLLISION;
4912 goto send_move_response;
4913 }
4914
4915 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
4916 l2cap_move_setup(chan);
4917 chan->move_id = req->dest_amp_id;
4918 icid = chan->dcid;
4919
4920 if (req->dest_amp_id == AMP_ID_BREDR) {
4921 /* Moving to BR/EDR */
4922 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4923 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4924 result = L2CAP_MR_PEND;
4925 } else {
4926 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4927 result = L2CAP_MR_SUCCESS;
4928 }
4929 } else {
4930 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4931 /* Placeholder - uncomment when amp functions are available */
4932 /*amp_accept_physical(chan, req->dest_amp_id);*/
4933 result = L2CAP_MR_PEND;
4934 }
4935
4936 send_move_response:
4937 l2cap_send_move_chan_rsp(chan, result);
4938
4939 l2cap_chan_unlock(chan);
4940
4941 return 0;
4942 }
4943
4944 static void l2cap_move_continue(struct l2cap_conn *conn, u16 icid, u16 result)
4945 {
4946 struct l2cap_chan *chan;
4947 struct hci_chan *hchan = NULL;
4948
4949 chan = l2cap_get_chan_by_scid(conn, icid);
4950 if (!chan) {
4951 l2cap_send_move_chan_cfm_icid(conn, icid);
4952 return;
4953 }
4954
4955 __clear_chan_timer(chan);
4956 if (result == L2CAP_MR_PEND)
4957 __set_chan_timer(chan, L2CAP_MOVE_ERTX_TIMEOUT);
4958
4959 switch (chan->move_state) {
4960 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
4961 /* Move confirm will be sent when logical link
4962 * is complete.
4963 */
4964 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4965 break;
4966 case L2CAP_MOVE_WAIT_RSP_SUCCESS:
4967 if (result == L2CAP_MR_PEND) {
4968 break;
4969 } else if (test_bit(CONN_LOCAL_BUSY,
4970 &chan->conn_state)) {
4971 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4972 } else {
4973 /* Logical link is up or moving to BR/EDR,
4974 * proceed with move
4975 */
4976 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
4977 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4978 }
4979 break;
4980 case L2CAP_MOVE_WAIT_RSP:
4981 /* Moving to AMP */
4982 if (result == L2CAP_MR_SUCCESS) {
4983 /* Remote is ready, send confirm immediately
4984 * after logical link is ready
4985 */
4986 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4987 } else {
4988 /* Both logical link and move success
4989 * are required to confirm
4990 */
4991 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_COMP;
4992 }
4993
4994 /* Placeholder - get hci_chan for logical link */
4995 if (!hchan) {
4996 /* Logical link not available */
4997 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4998 break;
4999 }
5000
5001 /* If the logical link is not yet connected, do not
5002 * send confirmation.
5003 */
5004 if (hchan->state != BT_CONNECTED)
5005 break;
5006
5007 /* Logical link is already ready to go */
5008
5009 chan->hs_hcon = hchan->conn;
5010 chan->hs_hcon->l2cap_data = chan->conn;
5011
5012 if (result == L2CAP_MR_SUCCESS) {
5013 /* Can confirm now */
5014 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5015 } else {
5016 /* Now only need move success
5017 * to confirm
5018 */
5019 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
5020 }
5021
5022 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
5023 break;
5024 default:
5025 /* Any other amp move state means the move failed. */
5026 chan->move_id = chan->local_amp_id;
5027 l2cap_move_done(chan);
5028 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5029 }
5030
5031 l2cap_chan_unlock(chan);
5032 }
5033
5034 static void l2cap_move_fail(struct l2cap_conn *conn, u8 ident, u16 icid,
5035 u16 result)
5036 {
5037 struct l2cap_chan *chan;
5038
5039 chan = l2cap_get_chan_by_ident(conn, ident);
5040 if (!chan) {
5041 /* Could not locate channel, icid is best guess */
5042 l2cap_send_move_chan_cfm_icid(conn, icid);
5043 return;
5044 }
5045
5046 __clear_chan_timer(chan);
5047
5048 if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
5049 if (result == L2CAP_MR_COLLISION) {
5050 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5051 } else {
5052 /* Cleanup - cancel move */
5053 chan->move_id = chan->local_amp_id;
5054 l2cap_move_done(chan);
5055 }
5056 }
5057
5058 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5059
5060 l2cap_chan_unlock(chan);
5061 }
5062
5063 static int l2cap_move_channel_rsp(struct l2cap_conn *conn,
5064 struct l2cap_cmd_hdr *cmd,
5065 u16 cmd_len, void *data)
5066 {
5067 struct l2cap_move_chan_rsp *rsp = data;
5068 u16 icid, result;
5069
5070 if (cmd_len != sizeof(*rsp))
5071 return -EPROTO;
5072
5073 icid = le16_to_cpu(rsp->icid);
5074 result = le16_to_cpu(rsp->result);
5075
5076 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5077
5078 if (result == L2CAP_MR_SUCCESS || result == L2CAP_MR_PEND)
5079 l2cap_move_continue(conn, icid, result);
5080 else
5081 l2cap_move_fail(conn, cmd->ident, icid, result);
5082
5083 return 0;
5084 }
5085
5086 static int l2cap_move_channel_confirm(struct l2cap_conn *conn,
5087 struct l2cap_cmd_hdr *cmd,
5088 u16 cmd_len, void *data)
5089 {
5090 struct l2cap_move_chan_cfm *cfm = data;
5091 struct l2cap_chan *chan;
5092 u16 icid, result;
5093
5094 if (cmd_len != sizeof(*cfm))
5095 return -EPROTO;
5096
5097 icid = le16_to_cpu(cfm->icid);
5098 result = le16_to_cpu(cfm->result);
5099
5100 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5101
5102 chan = l2cap_get_chan_by_dcid(conn, icid);
5103 if (!chan) {
5104 /* Spec requires a response even if the icid was not found */
5105 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5106 return 0;
5107 }
5108
5109 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM) {
5110 if (result == L2CAP_MC_CONFIRMED) {
5111 chan->local_amp_id = chan->move_id;
5112 if (chan->local_amp_id == AMP_ID_BREDR)
5113 __release_logical_link(chan);
5114 } else {
5115 chan->move_id = chan->local_amp_id;
5116 }
5117
5118 l2cap_move_done(chan);
5119 }
5120
5121 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5122
5123 l2cap_chan_unlock(chan);
5124
5125 return 0;
5126 }
5127
5128 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
5129 struct l2cap_cmd_hdr *cmd,
5130 u16 cmd_len, void *data)
5131 {
5132 struct l2cap_move_chan_cfm_rsp *rsp = data;
5133 struct l2cap_chan *chan;
5134 u16 icid;
5135
5136 if (cmd_len != sizeof(*rsp))
5137 return -EPROTO;
5138
5139 icid = le16_to_cpu(rsp->icid);
5140
5141 BT_DBG("icid 0x%4.4x", icid);
5142
5143 chan = l2cap_get_chan_by_scid(conn, icid);
5144 if (!chan)
5145 return 0;
5146
5147 __clear_chan_timer(chan);
5148
5149 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM_RSP) {
5150 chan->local_amp_id = chan->move_id;
5151
5152 if (chan->local_amp_id == AMP_ID_BREDR && chan->hs_hchan)
5153 __release_logical_link(chan);
5154
5155 l2cap_move_done(chan);
5156 }
5157
5158 l2cap_chan_unlock(chan);
5159
5160 return 0;
5161 }
5162
5163 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
5164 struct l2cap_cmd_hdr *cmd,
5165 u16 cmd_len, u8 *data)
5166 {
5167 struct hci_conn *hcon = conn->hcon;
5168 struct l2cap_conn_param_update_req *req;
5169 struct l2cap_conn_param_update_rsp rsp;
5170 u16 min, max, latency, to_multiplier;
5171 int err;
5172
5173 if (hcon->role != HCI_ROLE_MASTER)
5174 return -EINVAL;
5175
5176 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
5177 return -EPROTO;
5178
5179 req = (struct l2cap_conn_param_update_req *) data;
5180 min = __le16_to_cpu(req->min);
5181 max = __le16_to_cpu(req->max);
5182 latency = __le16_to_cpu(req->latency);
5183 to_multiplier = __le16_to_cpu(req->to_multiplier);
5184
5185 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
5186 min, max, latency, to_multiplier);
5187
5188 memset(&rsp, 0, sizeof(rsp));
5189
5190 err = hci_check_conn_params(min, max, latency, to_multiplier);
5191 if (err)
5192 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
5193 else
5194 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
5195
5196 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
5197 sizeof(rsp), &rsp);
5198
5199 if (!err) {
5200 u8 store_hint;
5201
5202 store_hint = hci_le_conn_update(hcon, min, max, latency,
5203 to_multiplier);
5204 mgmt_new_conn_param(hcon->hdev, &hcon->dst, hcon->dst_type,
5205 store_hint, min, max, latency,
5206 to_multiplier);
5207
5208 }
5209
5210 return 0;
5211 }
5212
5213 static int l2cap_le_connect_rsp(struct l2cap_conn *conn,
5214 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5215 u8 *data)
5216 {
5217 struct l2cap_le_conn_rsp *rsp = (struct l2cap_le_conn_rsp *) data;
5218 u16 dcid, mtu, mps, credits, result;
5219 struct l2cap_chan *chan;
5220 int err;
5221
5222 if (cmd_len < sizeof(*rsp))
5223 return -EPROTO;
5224
5225 dcid = __le16_to_cpu(rsp->dcid);
5226 mtu = __le16_to_cpu(rsp->mtu);
5227 mps = __le16_to_cpu(rsp->mps);
5228 credits = __le16_to_cpu(rsp->credits);
5229 result = __le16_to_cpu(rsp->result);
5230
5231 if (result == L2CAP_CR_SUCCESS && (mtu < 23 || mps < 23))
5232 return -EPROTO;
5233
5234 BT_DBG("dcid 0x%4.4x mtu %u mps %u credits %u result 0x%2.2x",
5235 dcid, mtu, mps, credits, result);
5236
5237 mutex_lock(&conn->chan_lock);
5238
5239 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5240 if (!chan) {
5241 err = -EBADSLT;
5242 goto unlock;
5243 }
5244
5245 err = 0;
5246
5247 l2cap_chan_lock(chan);
5248
5249 switch (result) {
5250 case L2CAP_CR_SUCCESS:
5251 chan->ident = 0;
5252 chan->dcid = dcid;
5253 chan->omtu = mtu;
5254 chan->remote_mps = mps;
5255 chan->tx_credits = credits;
5256 l2cap_chan_ready(chan);
5257 break;
5258
5259 default:
5260 l2cap_chan_del(chan, ECONNREFUSED);
5261 break;
5262 }
5263
5264 l2cap_chan_unlock(chan);
5265
5266 unlock:
5267 mutex_unlock(&conn->chan_lock);
5268
5269 return err;
5270 }
5271
5272 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
5273 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5274 u8 *data)
5275 {
5276 int err = 0;
5277
5278 switch (cmd->code) {
5279 case L2CAP_COMMAND_REJ:
5280 l2cap_command_rej(conn, cmd, cmd_len, data);
5281 break;
5282
5283 case L2CAP_CONN_REQ:
5284 err = l2cap_connect_req(conn, cmd, cmd_len, data);
5285 break;
5286
5287 case L2CAP_CONN_RSP:
5288 case L2CAP_CREATE_CHAN_RSP:
5289 l2cap_connect_create_rsp(conn, cmd, cmd_len, data);
5290 break;
5291
5292 case L2CAP_CONF_REQ:
5293 err = l2cap_config_req(conn, cmd, cmd_len, data);
5294 break;
5295
5296 case L2CAP_CONF_RSP:
5297 l2cap_config_rsp(conn, cmd, cmd_len, data);
5298 break;
5299
5300 case L2CAP_DISCONN_REQ:
5301 err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5302 break;
5303
5304 case L2CAP_DISCONN_RSP:
5305 l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5306 break;
5307
5308 case L2CAP_ECHO_REQ:
5309 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
5310 break;
5311
5312 case L2CAP_ECHO_RSP:
5313 break;
5314
5315 case L2CAP_INFO_REQ:
5316 err = l2cap_information_req(conn, cmd, cmd_len, data);
5317 break;
5318
5319 case L2CAP_INFO_RSP:
5320 l2cap_information_rsp(conn, cmd, cmd_len, data);
5321 break;
5322
5323 case L2CAP_CREATE_CHAN_REQ:
5324 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
5325 break;
5326
5327 case L2CAP_MOVE_CHAN_REQ:
5328 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
5329 break;
5330
5331 case L2CAP_MOVE_CHAN_RSP:
5332 l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
5333 break;
5334
5335 case L2CAP_MOVE_CHAN_CFM:
5336 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
5337 break;
5338
5339 case L2CAP_MOVE_CHAN_CFM_RSP:
5340 l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
5341 break;
5342
5343 default:
5344 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
5345 err = -EINVAL;
5346 break;
5347 }
5348
5349 return err;
5350 }
5351
5352 static int l2cap_le_connect_req(struct l2cap_conn *conn,
5353 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5354 u8 *data)
5355 {
5356 struct l2cap_le_conn_req *req = (struct l2cap_le_conn_req *) data;
5357 struct l2cap_le_conn_rsp rsp;
5358 struct l2cap_chan *chan, *pchan;
5359 u16 dcid, scid, credits, mtu, mps;
5360 __le16 psm;
5361 u8 result;
5362
5363 if (cmd_len != sizeof(*req))
5364 return -EPROTO;
5365
5366 scid = __le16_to_cpu(req->scid);
5367 mtu = __le16_to_cpu(req->mtu);
5368 mps = __le16_to_cpu(req->mps);
5369 psm = req->psm;
5370 dcid = 0;
5371 credits = 0;
5372
5373 if (mtu < 23 || mps < 23)
5374 return -EPROTO;
5375
5376 BT_DBG("psm 0x%2.2x scid 0x%4.4x mtu %u mps %u", __le16_to_cpu(psm),
5377 scid, mtu, mps);
5378
5379 /* Check if we have socket listening on psm */
5380 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
5381 &conn->hcon->dst, LE_LINK);
5382 if (!pchan) {
5383 result = L2CAP_CR_BAD_PSM;
5384 chan = NULL;
5385 goto response;
5386 }
5387
5388 mutex_lock(&conn->chan_lock);
5389 l2cap_chan_lock(pchan);
5390
5391 if (!smp_sufficient_security(conn->hcon, pchan->sec_level)) {
5392 result = L2CAP_CR_AUTHENTICATION;
5393 chan = NULL;
5394 goto response_unlock;
5395 }
5396
5397 /* Check if we already have channel with that dcid */
5398 if (__l2cap_get_chan_by_dcid(conn, scid)) {
5399 result = L2CAP_CR_NO_MEM;
5400 chan = NULL;
5401 goto response_unlock;
5402 }
5403
5404 chan = pchan->ops->new_connection(pchan);
5405 if (!chan) {
5406 result = L2CAP_CR_NO_MEM;
5407 goto response_unlock;
5408 }
5409
5410 l2cap_le_flowctl_init(chan);
5411
5412 bacpy(&chan->src, &conn->hcon->src);
5413 bacpy(&chan->dst, &conn->hcon->dst);
5414 chan->src_type = bdaddr_type(conn->hcon, conn->hcon->src_type);
5415 chan->dst_type = bdaddr_type(conn->hcon, conn->hcon->dst_type);
5416 chan->psm = psm;
5417 chan->dcid = scid;
5418 chan->omtu = mtu;
5419 chan->remote_mps = mps;
5420 chan->tx_credits = __le16_to_cpu(req->credits);
5421
5422 __l2cap_chan_add(conn, chan);
5423 dcid = chan->scid;
5424 credits = chan->rx_credits;
5425
5426 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
5427
5428 chan->ident = cmd->ident;
5429
5430 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
5431 l2cap_state_change(chan, BT_CONNECT2);
5432 result = L2CAP_CR_PEND;
5433 chan->ops->defer(chan);
5434 } else {
5435 l2cap_chan_ready(chan);
5436 result = L2CAP_CR_SUCCESS;
5437 }
5438
5439 response_unlock:
5440 l2cap_chan_unlock(pchan);
5441 mutex_unlock(&conn->chan_lock);
5442 l2cap_chan_put(pchan);
5443
5444 if (result == L2CAP_CR_PEND)
5445 return 0;
5446
5447 response:
5448 if (chan) {
5449 rsp.mtu = cpu_to_le16(chan->imtu);
5450 rsp.mps = cpu_to_le16(chan->mps);
5451 } else {
5452 rsp.mtu = 0;
5453 rsp.mps = 0;
5454 }
5455
5456 rsp.dcid = cpu_to_le16(dcid);
5457 rsp.credits = cpu_to_le16(credits);
5458 rsp.result = cpu_to_le16(result);
5459
5460 l2cap_send_cmd(conn, cmd->ident, L2CAP_LE_CONN_RSP, sizeof(rsp), &rsp);
5461
5462 return 0;
5463 }
5464
5465 static inline int l2cap_le_credits(struct l2cap_conn *conn,
5466 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5467 u8 *data)
5468 {
5469 struct l2cap_le_credits *pkt;
5470 struct l2cap_chan *chan;
5471 u16 cid, credits, max_credits;
5472
5473 if (cmd_len != sizeof(*pkt))
5474 return -EPROTO;
5475
5476 pkt = (struct l2cap_le_credits *) data;
5477 cid = __le16_to_cpu(pkt->cid);
5478 credits = __le16_to_cpu(pkt->credits);
5479
5480 BT_DBG("cid 0x%4.4x credits 0x%4.4x", cid, credits);
5481
5482 chan = l2cap_get_chan_by_dcid(conn, cid);
5483 if (!chan)
5484 return -EBADSLT;
5485
5486 max_credits = LE_FLOWCTL_MAX_CREDITS - chan->tx_credits;
5487 if (credits > max_credits) {
5488 BT_ERR("LE credits overflow");
5489 l2cap_send_disconn_req(chan, ECONNRESET);
5490
5491 /* Return 0 so that we don't trigger an unnecessary
5492 * command reject packet.
5493 */
5494 return 0;
5495 }
5496
5497 chan->tx_credits += credits;
5498
5499 while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
5500 l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
5501 chan->tx_credits--;
5502 }
5503
5504 if (chan->tx_credits)
5505 chan->ops->resume(chan);
5506
5507 l2cap_chan_unlock(chan);
5508
5509 return 0;
5510 }
5511
5512 static inline int l2cap_le_command_rej(struct l2cap_conn *conn,
5513 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5514 u8 *data)
5515 {
5516 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
5517 struct l2cap_chan *chan;
5518
5519 if (cmd_len < sizeof(*rej))
5520 return -EPROTO;
5521
5522 mutex_lock(&conn->chan_lock);
5523
5524 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5525 if (!chan)
5526 goto done;
5527
5528 l2cap_chan_lock(chan);
5529 l2cap_chan_del(chan, ECONNREFUSED);
5530 l2cap_chan_unlock(chan);
5531
5532 done:
5533 mutex_unlock(&conn->chan_lock);
5534 return 0;
5535 }
5536
5537 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
5538 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5539 u8 *data)
5540 {
5541 int err = 0;
5542
5543 switch (cmd->code) {
5544 case L2CAP_COMMAND_REJ:
5545 l2cap_le_command_rej(conn, cmd, cmd_len, data);
5546 break;
5547
5548 case L2CAP_CONN_PARAM_UPDATE_REQ:
5549 err = l2cap_conn_param_update_req(conn, cmd, cmd_len, data);
5550 break;
5551
5552 case L2CAP_CONN_PARAM_UPDATE_RSP:
5553 break;
5554
5555 case L2CAP_LE_CONN_RSP:
5556 l2cap_le_connect_rsp(conn, cmd, cmd_len, data);
5557 break;
5558
5559 case L2CAP_LE_CONN_REQ:
5560 err = l2cap_le_connect_req(conn, cmd, cmd_len, data);
5561 break;
5562
5563 case L2CAP_LE_CREDITS:
5564 err = l2cap_le_credits(conn, cmd, cmd_len, data);
5565 break;
5566
5567 case L2CAP_DISCONN_REQ:
5568 err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5569 break;
5570
5571 case L2CAP_DISCONN_RSP:
5572 l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5573 break;
5574
5575 default:
5576 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
5577 err = -EINVAL;
5578 break;
5579 }
5580
5581 return err;
5582 }
5583
5584 static inline void l2cap_le_sig_channel(struct l2cap_conn *conn,
5585 struct sk_buff *skb)
5586 {
5587 struct hci_conn *hcon = conn->hcon;
5588 struct l2cap_cmd_hdr *cmd;
5589 u16 len;
5590 int err;
5591
5592 if (hcon->type != LE_LINK)
5593 goto drop;
5594
5595 if (skb->len < L2CAP_CMD_HDR_SIZE)
5596 goto drop;
5597
5598 cmd = (void *) skb->data;
5599 skb_pull(skb, L2CAP_CMD_HDR_SIZE);
5600
5601 len = le16_to_cpu(cmd->len);
5602
5603 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len, cmd->ident);
5604
5605 if (len != skb->len || !cmd->ident) {
5606 BT_DBG("corrupted command");
5607 goto drop;
5608 }
5609
5610 err = l2cap_le_sig_cmd(conn, cmd, len, skb->data);
5611 if (err) {
5612 struct l2cap_cmd_rej_unk rej;
5613
5614 BT_ERR("Wrong link type (%d)", err);
5615
5616 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5617 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
5618 sizeof(rej), &rej);
5619 }
5620
5621 drop:
5622 kfree_skb(skb);
5623 }
5624
5625 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
5626 struct sk_buff *skb)
5627 {
5628 struct hci_conn *hcon = conn->hcon;
5629 u8 *data = skb->data;
5630 int len = skb->len;
5631 struct l2cap_cmd_hdr cmd;
5632 int err;
5633
5634 l2cap_raw_recv(conn, skb);
5635
5636 if (hcon->type != ACL_LINK)
5637 goto drop;
5638
5639 while (len >= L2CAP_CMD_HDR_SIZE) {
5640 u16 cmd_len;
5641 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
5642 data += L2CAP_CMD_HDR_SIZE;
5643 len -= L2CAP_CMD_HDR_SIZE;
5644
5645 cmd_len = le16_to_cpu(cmd.len);
5646
5647 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len,
5648 cmd.ident);
5649
5650 if (cmd_len > len || !cmd.ident) {
5651 BT_DBG("corrupted command");
5652 break;
5653 }
5654
5655 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
5656 if (err) {
5657 struct l2cap_cmd_rej_unk rej;
5658
5659 BT_ERR("Wrong link type (%d)", err);
5660
5661 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5662 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ,
5663 sizeof(rej), &rej);
5664 }
5665
5666 data += cmd_len;
5667 len -= cmd_len;
5668 }
5669
5670 drop:
5671 kfree_skb(skb);
5672 }
5673
5674 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
5675 {
5676 u16 our_fcs, rcv_fcs;
5677 int hdr_size;
5678
5679 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
5680 hdr_size = L2CAP_EXT_HDR_SIZE;
5681 else
5682 hdr_size = L2CAP_ENH_HDR_SIZE;
5683
5684 if (chan->fcs == L2CAP_FCS_CRC16) {
5685 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
5686 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
5687 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
5688
5689 if (our_fcs != rcv_fcs)
5690 return -EBADMSG;
5691 }
5692 return 0;
5693 }
5694
5695 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
5696 {
5697 struct l2cap_ctrl control;
5698
5699 BT_DBG("chan %p", chan);
5700
5701 memset(&control, 0, sizeof(control));
5702 control.sframe = 1;
5703 control.final = 1;
5704 control.reqseq = chan->buffer_seq;
5705 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5706
5707 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5708 control.super = L2CAP_SUPER_RNR;
5709 l2cap_send_sframe(chan, &control);
5710 }
5711
5712 if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
5713 chan->unacked_frames > 0)
5714 __set_retrans_timer(chan);
5715
5716 /* Send pending iframes */
5717 l2cap_ertm_send(chan);
5718
5719 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
5720 test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
5721 /* F-bit wasn't sent in an s-frame or i-frame yet, so
5722 * send it now.
5723 */
5724 control.super = L2CAP_SUPER_RR;
5725 l2cap_send_sframe(chan, &control);
5726 }
5727 }
5728
5729 static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag,
5730 struct sk_buff **last_frag)
5731 {
5732 /* skb->len reflects data in skb as well as all fragments
5733 * skb->data_len reflects only data in fragments
5734 */
5735 if (!skb_has_frag_list(skb))
5736 skb_shinfo(skb)->frag_list = new_frag;
5737
5738 new_frag->next = NULL;
5739
5740 (*last_frag)->next = new_frag;
5741 *last_frag = new_frag;
5742
5743 skb->len += new_frag->len;
5744 skb->data_len += new_frag->len;
5745 skb->truesize += new_frag->truesize;
5746 }
5747
5748 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
5749 struct l2cap_ctrl *control)
5750 {
5751 int err = -EINVAL;
5752
5753 switch (control->sar) {
5754 case L2CAP_SAR_UNSEGMENTED:
5755 if (chan->sdu)
5756 break;
5757
5758 err = chan->ops->recv(chan, skb);
5759 break;
5760
5761 case L2CAP_SAR_START:
5762 if (chan->sdu)
5763 break;
5764
5765 chan->sdu_len = get_unaligned_le16(skb->data);
5766 skb_pull(skb, L2CAP_SDULEN_SIZE);
5767
5768 if (chan->sdu_len > chan->imtu) {
5769 err = -EMSGSIZE;
5770 break;
5771 }
5772
5773 if (skb->len >= chan->sdu_len)
5774 break;
5775
5776 chan->sdu = skb;
5777 chan->sdu_last_frag = skb;
5778
5779 skb = NULL;
5780 err = 0;
5781 break;
5782
5783 case L2CAP_SAR_CONTINUE:
5784 if (!chan->sdu)
5785 break;
5786
5787 append_skb_frag(chan->sdu, skb,
5788 &chan->sdu_last_frag);
5789 skb = NULL;
5790
5791 if (chan->sdu->len >= chan->sdu_len)
5792 break;
5793
5794 err = 0;
5795 break;
5796
5797 case L2CAP_SAR_END:
5798 if (!chan->sdu)
5799 break;
5800
5801 append_skb_frag(chan->sdu, skb,
5802 &chan->sdu_last_frag);
5803 skb = NULL;
5804
5805 if (chan->sdu->len != chan->sdu_len)
5806 break;
5807
5808 err = chan->ops->recv(chan, chan->sdu);
5809
5810 if (!err) {
5811 /* Reassembly complete */
5812 chan->sdu = NULL;
5813 chan->sdu_last_frag = NULL;
5814 chan->sdu_len = 0;
5815 }
5816 break;
5817 }
5818
5819 if (err) {
5820 kfree_skb(skb);
5821 kfree_skb(chan->sdu);
5822 chan->sdu = NULL;
5823 chan->sdu_last_frag = NULL;
5824 chan->sdu_len = 0;
5825 }
5826
5827 return err;
5828 }
5829
5830 static int l2cap_resegment(struct l2cap_chan *chan)
5831 {
5832 /* Placeholder */
5833 return 0;
5834 }
5835
5836 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
5837 {
5838 u8 event;
5839
5840 if (chan->mode != L2CAP_MODE_ERTM)
5841 return;
5842
5843 event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
5844 l2cap_tx(chan, NULL, NULL, event);
5845 }
5846
5847 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
5848 {
5849 int err = 0;
5850 /* Pass sequential frames to l2cap_reassemble_sdu()
5851 * until a gap is encountered.
5852 */
5853
5854 BT_DBG("chan %p", chan);
5855
5856 while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5857 struct sk_buff *skb;
5858 BT_DBG("Searching for skb with txseq %d (queue len %d)",
5859 chan->buffer_seq, skb_queue_len(&chan->srej_q));
5860
5861 skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
5862
5863 if (!skb)
5864 break;
5865
5866 skb_unlink(skb, &chan->srej_q);
5867 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
5868 err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->control);
5869 if (err)
5870 break;
5871 }
5872
5873 if (skb_queue_empty(&chan->srej_q)) {
5874 chan->rx_state = L2CAP_RX_STATE_RECV;
5875 l2cap_send_ack(chan);
5876 }
5877
5878 return err;
5879 }
5880
5881 static void l2cap_handle_srej(struct l2cap_chan *chan,
5882 struct l2cap_ctrl *control)
5883 {
5884 struct sk_buff *skb;
5885
5886 BT_DBG("chan %p, control %p", chan, control);
5887
5888 if (control->reqseq == chan->next_tx_seq) {
5889 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
5890 l2cap_send_disconn_req(chan, ECONNRESET);
5891 return;
5892 }
5893
5894 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
5895
5896 if (skb == NULL) {
5897 BT_DBG("Seq %d not available for retransmission",
5898 control->reqseq);
5899 return;
5900 }
5901
5902 if (chan->max_tx != 0 && bt_cb(skb)->control.retries >= chan->max_tx) {
5903 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
5904 l2cap_send_disconn_req(chan, ECONNRESET);
5905 return;
5906 }
5907
5908 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5909
5910 if (control->poll) {
5911 l2cap_pass_to_tx(chan, control);
5912
5913 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5914 l2cap_retransmit(chan, control);
5915 l2cap_ertm_send(chan);
5916
5917 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5918 set_bit(CONN_SREJ_ACT, &chan->conn_state);
5919 chan->srej_save_reqseq = control->reqseq;
5920 }
5921 } else {
5922 l2cap_pass_to_tx_fbit(chan, control);
5923
5924 if (control->final) {
5925 if (chan->srej_save_reqseq != control->reqseq ||
5926 !test_and_clear_bit(CONN_SREJ_ACT,
5927 &chan->conn_state))
5928 l2cap_retransmit(chan, control);
5929 } else {
5930 l2cap_retransmit(chan, control);
5931 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5932 set_bit(CONN_SREJ_ACT, &chan->conn_state);
5933 chan->srej_save_reqseq = control->reqseq;
5934 }
5935 }
5936 }
5937 }
5938
5939 static void l2cap_handle_rej(struct l2cap_chan *chan,
5940 struct l2cap_ctrl *control)
5941 {
5942 struct sk_buff *skb;
5943
5944 BT_DBG("chan %p, control %p", chan, control);
5945
5946 if (control->reqseq == chan->next_tx_seq) {
5947 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
5948 l2cap_send_disconn_req(chan, ECONNRESET);
5949 return;
5950 }
5951
5952 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
5953
5954 if (chan->max_tx && skb &&
5955 bt_cb(skb)->control.retries >= chan->max_tx) {
5956 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
5957 l2cap_send_disconn_req(chan, ECONNRESET);
5958 return;
5959 }
5960
5961 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5962
5963 l2cap_pass_to_tx(chan, control);
5964
5965 if (control->final) {
5966 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
5967 l2cap_retransmit_all(chan, control);
5968 } else {
5969 l2cap_retransmit_all(chan, control);
5970 l2cap_ertm_send(chan);
5971 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
5972 set_bit(CONN_REJ_ACT, &chan->conn_state);
5973 }
5974 }
5975
5976 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
5977 {
5978 BT_DBG("chan %p, txseq %d", chan, txseq);
5979
5980 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
5981 chan->expected_tx_seq);
5982
5983 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
5984 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
5985 chan->tx_win) {
5986 /* See notes below regarding "double poll" and
5987 * invalid packets.
5988 */
5989 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
5990 BT_DBG("Invalid/Ignore - after SREJ");
5991 return L2CAP_TXSEQ_INVALID_IGNORE;
5992 } else {
5993 BT_DBG("Invalid - in window after SREJ sent");
5994 return L2CAP_TXSEQ_INVALID;
5995 }
5996 }
5997
5998 if (chan->srej_list.head == txseq) {
5999 BT_DBG("Expected SREJ");
6000 return L2CAP_TXSEQ_EXPECTED_SREJ;
6001 }
6002
6003 if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
6004 BT_DBG("Duplicate SREJ - txseq already stored");
6005 return L2CAP_TXSEQ_DUPLICATE_SREJ;
6006 }
6007
6008 if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
6009 BT_DBG("Unexpected SREJ - not requested");
6010 return L2CAP_TXSEQ_UNEXPECTED_SREJ;
6011 }
6012 }
6013
6014 if (chan->expected_tx_seq == txseq) {
6015 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6016 chan->tx_win) {
6017 BT_DBG("Invalid - txseq outside tx window");
6018 return L2CAP_TXSEQ_INVALID;
6019 } else {
6020 BT_DBG("Expected");
6021 return L2CAP_TXSEQ_EXPECTED;
6022 }
6023 }
6024
6025 if (__seq_offset(chan, txseq, chan->last_acked_seq) <
6026 __seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) {
6027 BT_DBG("Duplicate - expected_tx_seq later than txseq");
6028 return L2CAP_TXSEQ_DUPLICATE;
6029 }
6030
6031 if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
6032 /* A source of invalid packets is a "double poll" condition,
6033 * where delays cause us to send multiple poll packets. If
6034 * the remote stack receives and processes both polls,
6035 * sequence numbers can wrap around in such a way that a
6036 * resent frame has a sequence number that looks like new data
6037 * with a sequence gap. This would trigger an erroneous SREJ
6038 * request.
6039 *
6040 * Fortunately, this is impossible with a tx window that's
6041 * less than half of the maximum sequence number, which allows
6042 * invalid frames to be safely ignored.
6043 *
6044 * With tx window sizes greater than half of the tx window
6045 * maximum, the frame is invalid and cannot be ignored. This
6046 * causes a disconnect.
6047 */
6048
6049 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6050 BT_DBG("Invalid/Ignore - txseq outside tx window");
6051 return L2CAP_TXSEQ_INVALID_IGNORE;
6052 } else {
6053 BT_DBG("Invalid - txseq outside tx window");
6054 return L2CAP_TXSEQ_INVALID;
6055 }
6056 } else {
6057 BT_DBG("Unexpected - txseq indicates missing frames");
6058 return L2CAP_TXSEQ_UNEXPECTED;
6059 }
6060 }
6061
6062 static int l2cap_rx_state_recv(struct l2cap_chan *chan,
6063 struct l2cap_ctrl *control,
6064 struct sk_buff *skb, u8 event)
6065 {
6066 int err = 0;
6067 bool skb_in_use = false;
6068
6069 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6070 event);
6071
6072 switch (event) {
6073 case L2CAP_EV_RECV_IFRAME:
6074 switch (l2cap_classify_txseq(chan, control->txseq)) {
6075 case L2CAP_TXSEQ_EXPECTED:
6076 l2cap_pass_to_tx(chan, control);
6077
6078 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6079 BT_DBG("Busy, discarding expected seq %d",
6080 control->txseq);
6081 break;
6082 }
6083
6084 chan->expected_tx_seq = __next_seq(chan,
6085 control->txseq);
6086
6087 chan->buffer_seq = chan->expected_tx_seq;
6088 skb_in_use = true;
6089
6090 err = l2cap_reassemble_sdu(chan, skb, control);
6091 if (err)
6092 break;
6093
6094 if (control->final) {
6095 if (!test_and_clear_bit(CONN_REJ_ACT,
6096 &chan->conn_state)) {
6097 control->final = 0;
6098 l2cap_retransmit_all(chan, control);
6099 l2cap_ertm_send(chan);
6100 }
6101 }
6102
6103 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
6104 l2cap_send_ack(chan);
6105 break;
6106 case L2CAP_TXSEQ_UNEXPECTED:
6107 l2cap_pass_to_tx(chan, control);
6108
6109 /* Can't issue SREJ frames in the local busy state.
6110 * Drop this frame, it will be seen as missing
6111 * when local busy is exited.
6112 */
6113 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6114 BT_DBG("Busy, discarding unexpected seq %d",
6115 control->txseq);
6116 break;
6117 }
6118
6119 /* There was a gap in the sequence, so an SREJ
6120 * must be sent for each missing frame. The
6121 * current frame is stored for later use.
6122 */
6123 skb_queue_tail(&chan->srej_q, skb);
6124 skb_in_use = true;
6125 BT_DBG("Queued %p (queue len %d)", skb,
6126 skb_queue_len(&chan->srej_q));
6127
6128 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
6129 l2cap_seq_list_clear(&chan->srej_list);
6130 l2cap_send_srej(chan, control->txseq);
6131
6132 chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
6133 break;
6134 case L2CAP_TXSEQ_DUPLICATE:
6135 l2cap_pass_to_tx(chan, control);
6136 break;
6137 case L2CAP_TXSEQ_INVALID_IGNORE:
6138 break;
6139 case L2CAP_TXSEQ_INVALID:
6140 default:
6141 l2cap_send_disconn_req(chan, ECONNRESET);
6142 break;
6143 }
6144 break;
6145 case L2CAP_EV_RECV_RR:
6146 l2cap_pass_to_tx(chan, control);
6147 if (control->final) {
6148 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6149
6150 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state) &&
6151 !__chan_is_moving(chan)) {
6152 control->final = 0;
6153 l2cap_retransmit_all(chan, control);
6154 }
6155
6156 l2cap_ertm_send(chan);
6157 } else if (control->poll) {
6158 l2cap_send_i_or_rr_or_rnr(chan);
6159 } else {
6160 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6161 &chan->conn_state) &&
6162 chan->unacked_frames)
6163 __set_retrans_timer(chan);
6164
6165 l2cap_ertm_send(chan);
6166 }
6167 break;
6168 case L2CAP_EV_RECV_RNR:
6169 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6170 l2cap_pass_to_tx(chan, control);
6171 if (control && control->poll) {
6172 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6173 l2cap_send_rr_or_rnr(chan, 0);
6174 }
6175 __clear_retrans_timer(chan);
6176 l2cap_seq_list_clear(&chan->retrans_list);
6177 break;
6178 case L2CAP_EV_RECV_REJ:
6179 l2cap_handle_rej(chan, control);
6180 break;
6181 case L2CAP_EV_RECV_SREJ:
6182 l2cap_handle_srej(chan, control);
6183 break;
6184 default:
6185 break;
6186 }
6187
6188 if (skb && !skb_in_use) {
6189 BT_DBG("Freeing %p", skb);
6190 kfree_skb(skb);
6191 }
6192
6193 return err;
6194 }
6195
6196 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
6197 struct l2cap_ctrl *control,
6198 struct sk_buff *skb, u8 event)
6199 {
6200 int err = 0;
6201 u16 txseq = control->txseq;
6202 bool skb_in_use = false;
6203
6204 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6205 event);
6206
6207 switch (event) {
6208 case L2CAP_EV_RECV_IFRAME:
6209 switch (l2cap_classify_txseq(chan, txseq)) {
6210 case L2CAP_TXSEQ_EXPECTED:
6211 /* Keep frame for reassembly later */
6212 l2cap_pass_to_tx(chan, control);
6213 skb_queue_tail(&chan->srej_q, skb);
6214 skb_in_use = true;
6215 BT_DBG("Queued %p (queue len %d)", skb,
6216 skb_queue_len(&chan->srej_q));
6217
6218 chan->expected_tx_seq = __next_seq(chan, txseq);
6219 break;
6220 case L2CAP_TXSEQ_EXPECTED_SREJ:
6221 l2cap_seq_list_pop(&chan->srej_list);
6222
6223 l2cap_pass_to_tx(chan, control);
6224 skb_queue_tail(&chan->srej_q, skb);
6225 skb_in_use = true;
6226 BT_DBG("Queued %p (queue len %d)", skb,
6227 skb_queue_len(&chan->srej_q));
6228
6229 err = l2cap_rx_queued_iframes(chan);
6230 if (err)
6231 break;
6232
6233 break;
6234 case L2CAP_TXSEQ_UNEXPECTED:
6235 /* Got a frame that can't be reassembled yet.
6236 * Save it for later, and send SREJs to cover
6237 * the missing frames.
6238 */
6239 skb_queue_tail(&chan->srej_q, skb);
6240 skb_in_use = true;
6241 BT_DBG("Queued %p (queue len %d)", skb,
6242 skb_queue_len(&chan->srej_q));
6243
6244 l2cap_pass_to_tx(chan, control);
6245 l2cap_send_srej(chan, control->txseq);
6246 break;
6247 case L2CAP_TXSEQ_UNEXPECTED_SREJ:
6248 /* This frame was requested with an SREJ, but
6249 * some expected retransmitted frames are
6250 * missing. Request retransmission of missing
6251 * SREJ'd frames.
6252 */
6253 skb_queue_tail(&chan->srej_q, skb);
6254 skb_in_use = true;
6255 BT_DBG("Queued %p (queue len %d)", skb,
6256 skb_queue_len(&chan->srej_q));
6257
6258 l2cap_pass_to_tx(chan, control);
6259 l2cap_send_srej_list(chan, control->txseq);
6260 break;
6261 case L2CAP_TXSEQ_DUPLICATE_SREJ:
6262 /* We've already queued this frame. Drop this copy. */
6263 l2cap_pass_to_tx(chan, control);
6264 break;
6265 case L2CAP_TXSEQ_DUPLICATE:
6266 /* Expecting a later sequence number, so this frame
6267 * was already received. Ignore it completely.
6268 */
6269 break;
6270 case L2CAP_TXSEQ_INVALID_IGNORE:
6271 break;
6272 case L2CAP_TXSEQ_INVALID:
6273 default:
6274 l2cap_send_disconn_req(chan, ECONNRESET);
6275 break;
6276 }
6277 break;
6278 case L2CAP_EV_RECV_RR:
6279 l2cap_pass_to_tx(chan, control);
6280 if (control->final) {
6281 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6282
6283 if (!test_and_clear_bit(CONN_REJ_ACT,
6284 &chan->conn_state)) {
6285 control->final = 0;
6286 l2cap_retransmit_all(chan, control);
6287 }
6288
6289 l2cap_ertm_send(chan);
6290 } else if (control->poll) {
6291 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6292 &chan->conn_state) &&
6293 chan->unacked_frames) {
6294 __set_retrans_timer(chan);
6295 }
6296
6297 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6298 l2cap_send_srej_tail(chan);
6299 } else {
6300 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6301 &chan->conn_state) &&
6302 chan->unacked_frames)
6303 __set_retrans_timer(chan);
6304
6305 l2cap_send_ack(chan);
6306 }
6307 break;
6308 case L2CAP_EV_RECV_RNR:
6309 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6310 l2cap_pass_to_tx(chan, control);
6311 if (control->poll) {
6312 l2cap_send_srej_tail(chan);
6313 } else {
6314 struct l2cap_ctrl rr_control;
6315 memset(&rr_control, 0, sizeof(rr_control));
6316 rr_control.sframe = 1;
6317 rr_control.super = L2CAP_SUPER_RR;
6318 rr_control.reqseq = chan->buffer_seq;
6319 l2cap_send_sframe(chan, &rr_control);
6320 }
6321
6322 break;
6323 case L2CAP_EV_RECV_REJ:
6324 l2cap_handle_rej(chan, control);
6325 break;
6326 case L2CAP_EV_RECV_SREJ:
6327 l2cap_handle_srej(chan, control);
6328 break;
6329 }
6330
6331 if (skb && !skb_in_use) {
6332 BT_DBG("Freeing %p", skb);
6333 kfree_skb(skb);
6334 }
6335
6336 return err;
6337 }
6338
6339 static int l2cap_finish_move(struct l2cap_chan *chan)
6340 {
6341 BT_DBG("chan %p", chan);
6342
6343 chan->rx_state = L2CAP_RX_STATE_RECV;
6344
6345 if (chan->hs_hcon)
6346 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6347 else
6348 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6349
6350 return l2cap_resegment(chan);
6351 }
6352
6353 static int l2cap_rx_state_wait_p(struct l2cap_chan *chan,
6354 struct l2cap_ctrl *control,
6355 struct sk_buff *skb, u8 event)
6356 {
6357 int err;
6358
6359 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6360 event);
6361
6362 if (!control->poll)
6363 return -EPROTO;
6364
6365 l2cap_process_reqseq(chan, control->reqseq);
6366
6367 if (!skb_queue_empty(&chan->tx_q))
6368 chan->tx_send_head = skb_peek(&chan->tx_q);
6369 else
6370 chan->tx_send_head = NULL;
6371
6372 /* Rewind next_tx_seq to the point expected
6373 * by the receiver.
6374 */
6375 chan->next_tx_seq = control->reqseq;
6376 chan->unacked_frames = 0;
6377
6378 err = l2cap_finish_move(chan);
6379 if (err)
6380 return err;
6381
6382 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6383 l2cap_send_i_or_rr_or_rnr(chan);
6384
6385 if (event == L2CAP_EV_RECV_IFRAME)
6386 return -EPROTO;
6387
6388 return l2cap_rx_state_recv(chan, control, NULL, event);
6389 }
6390
6391 static int l2cap_rx_state_wait_f(struct l2cap_chan *chan,
6392 struct l2cap_ctrl *control,
6393 struct sk_buff *skb, u8 event)
6394 {
6395 int err;
6396
6397 if (!control->final)
6398 return -EPROTO;
6399
6400 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6401
6402 chan->rx_state = L2CAP_RX_STATE_RECV;
6403 l2cap_process_reqseq(chan, control->reqseq);
6404
6405 if (!skb_queue_empty(&chan->tx_q))
6406 chan->tx_send_head = skb_peek(&chan->tx_q);
6407 else
6408 chan->tx_send_head = NULL;
6409
6410 /* Rewind next_tx_seq to the point expected
6411 * by the receiver.
6412 */
6413 chan->next_tx_seq = control->reqseq;
6414 chan->unacked_frames = 0;
6415
6416 if (chan->hs_hcon)
6417 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6418 else
6419 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6420
6421 err = l2cap_resegment(chan);
6422
6423 if (!err)
6424 err = l2cap_rx_state_recv(chan, control, skb, event);
6425
6426 return err;
6427 }
6428
6429 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
6430 {
6431 /* Make sure reqseq is for a packet that has been sent but not acked */
6432 u16 unacked;
6433
6434 unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
6435 return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
6436 }
6437
6438 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6439 struct sk_buff *skb, u8 event)
6440 {
6441 int err = 0;
6442
6443 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
6444 control, skb, event, chan->rx_state);
6445
6446 if (__valid_reqseq(chan, control->reqseq)) {
6447 switch (chan->rx_state) {
6448 case L2CAP_RX_STATE_RECV:
6449 err = l2cap_rx_state_recv(chan, control, skb, event);
6450 break;
6451 case L2CAP_RX_STATE_SREJ_SENT:
6452 err = l2cap_rx_state_srej_sent(chan, control, skb,
6453 event);
6454 break;
6455 case L2CAP_RX_STATE_WAIT_P:
6456 err = l2cap_rx_state_wait_p(chan, control, skb, event);
6457 break;
6458 case L2CAP_RX_STATE_WAIT_F:
6459 err = l2cap_rx_state_wait_f(chan, control, skb, event);
6460 break;
6461 default:
6462 /* shut it down */
6463 break;
6464 }
6465 } else {
6466 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
6467 control->reqseq, chan->next_tx_seq,
6468 chan->expected_ack_seq);
6469 l2cap_send_disconn_req(chan, ECONNRESET);
6470 }
6471
6472 return err;
6473 }
6474
6475 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6476 struct sk_buff *skb)
6477 {
6478 int err = 0;
6479
6480 BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
6481 chan->rx_state);
6482
6483 if (l2cap_classify_txseq(chan, control->txseq) ==
6484 L2CAP_TXSEQ_EXPECTED) {
6485 l2cap_pass_to_tx(chan, control);
6486
6487 BT_DBG("buffer_seq %d->%d", chan->buffer_seq,
6488 __next_seq(chan, chan->buffer_seq));
6489
6490 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6491
6492 l2cap_reassemble_sdu(chan, skb, control);
6493 } else {
6494 if (chan->sdu) {
6495 kfree_skb(chan->sdu);
6496 chan->sdu = NULL;
6497 }
6498 chan->sdu_last_frag = NULL;
6499 chan->sdu_len = 0;
6500
6501 if (skb) {
6502 BT_DBG("Freeing %p", skb);
6503 kfree_skb(skb);
6504 }
6505 }
6506
6507 chan->last_acked_seq = control->txseq;
6508 chan->expected_tx_seq = __next_seq(chan, control->txseq);
6509
6510 return err;
6511 }
6512
6513 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6514 {
6515 struct l2cap_ctrl *control = &bt_cb(skb)->control;
6516 u16 len;
6517 u8 event;
6518
6519 __unpack_control(chan, skb);
6520
6521 len = skb->len;
6522
6523 /*
6524 * We can just drop the corrupted I-frame here.
6525 * Receiver will miss it and start proper recovery
6526 * procedures and ask for retransmission.
6527 */
6528 if (l2cap_check_fcs(chan, skb))
6529 goto drop;
6530
6531 if (!control->sframe && control->sar == L2CAP_SAR_START)
6532 len -= L2CAP_SDULEN_SIZE;
6533
6534 if (chan->fcs == L2CAP_FCS_CRC16)
6535 len -= L2CAP_FCS_SIZE;
6536
6537 if (len > chan->mps) {
6538 l2cap_send_disconn_req(chan, ECONNRESET);
6539 goto drop;
6540 }
6541
6542 if (!control->sframe) {
6543 int err;
6544
6545 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
6546 control->sar, control->reqseq, control->final,
6547 control->txseq);
6548
6549 /* Validate F-bit - F=0 always valid, F=1 only
6550 * valid in TX WAIT_F
6551 */
6552 if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
6553 goto drop;
6554
6555 if (chan->mode != L2CAP_MODE_STREAMING) {
6556 event = L2CAP_EV_RECV_IFRAME;
6557 err = l2cap_rx(chan, control, skb, event);
6558 } else {
6559 err = l2cap_stream_rx(chan, control, skb);
6560 }
6561
6562 if (err)
6563 l2cap_send_disconn_req(chan, ECONNRESET);
6564 } else {
6565 const u8 rx_func_to_event[4] = {
6566 L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
6567 L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
6568 };
6569
6570 /* Only I-frames are expected in streaming mode */
6571 if (chan->mode == L2CAP_MODE_STREAMING)
6572 goto drop;
6573
6574 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
6575 control->reqseq, control->final, control->poll,
6576 control->super);
6577
6578 if (len != 0) {
6579 BT_ERR("Trailing bytes: %d in sframe", len);
6580 l2cap_send_disconn_req(chan, ECONNRESET);
6581 goto drop;
6582 }
6583
6584 /* Validate F and P bits */
6585 if (control->final && (control->poll ||
6586 chan->tx_state != L2CAP_TX_STATE_WAIT_F))
6587 goto drop;
6588
6589 event = rx_func_to_event[control->super];
6590 if (l2cap_rx(chan, control, skb, event))
6591 l2cap_send_disconn_req(chan, ECONNRESET);
6592 }
6593
6594 return 0;
6595
6596 drop:
6597 kfree_skb(skb);
6598 return 0;
6599 }
6600
6601 static void l2cap_chan_le_send_credits(struct l2cap_chan *chan)
6602 {
6603 struct l2cap_conn *conn = chan->conn;
6604 struct l2cap_le_credits pkt;
6605 u16 return_credits;
6606
6607 /* We return more credits to the sender only after the amount of
6608 * credits falls below half of the initial amount.
6609 */
6610 if (chan->rx_credits >= (le_max_credits + 1) / 2)
6611 return;
6612
6613 return_credits = le_max_credits - chan->rx_credits;
6614
6615 BT_DBG("chan %p returning %u credits to sender", chan, return_credits);
6616
6617 chan->rx_credits += return_credits;
6618
6619 pkt.cid = cpu_to_le16(chan->scid);
6620 pkt.credits = cpu_to_le16(return_credits);
6621
6622 chan->ident = l2cap_get_ident(conn);
6623
6624 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CREDITS, sizeof(pkt), &pkt);
6625 }
6626
6627 static int l2cap_le_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6628 {
6629 int err;
6630
6631 if (!chan->rx_credits) {
6632 BT_ERR("No credits to receive LE L2CAP data");
6633 l2cap_send_disconn_req(chan, ECONNRESET);
6634 return -ENOBUFS;
6635 }
6636
6637 if (chan->imtu < skb->len) {
6638 BT_ERR("Too big LE L2CAP PDU");
6639 return -ENOBUFS;
6640 }
6641
6642 chan->rx_credits--;
6643 BT_DBG("rx_credits %u -> %u", chan->rx_credits + 1, chan->rx_credits);
6644
6645 l2cap_chan_le_send_credits(chan);
6646
6647 err = 0;
6648
6649 if (!chan->sdu) {
6650 u16 sdu_len;
6651
6652 sdu_len = get_unaligned_le16(skb->data);
6653 skb_pull(skb, L2CAP_SDULEN_SIZE);
6654
6655 BT_DBG("Start of new SDU. sdu_len %u skb->len %u imtu %u",
6656 sdu_len, skb->len, chan->imtu);
6657
6658 if (sdu_len > chan->imtu) {
6659 BT_ERR("Too big LE L2CAP SDU length received");
6660 err = -EMSGSIZE;
6661 goto failed;
6662 }
6663
6664 if (skb->len > sdu_len) {
6665 BT_ERR("Too much LE L2CAP data received");
6666 err = -EINVAL;
6667 goto failed;
6668 }
6669
6670 if (skb->len == sdu_len)
6671 return chan->ops->recv(chan, skb);
6672
6673 chan->sdu = skb;
6674 chan->sdu_len = sdu_len;
6675 chan->sdu_last_frag = skb;
6676
6677 return 0;
6678 }
6679
6680 BT_DBG("SDU fragment. chan->sdu->len %u skb->len %u chan->sdu_len %u",
6681 chan->sdu->len, skb->len, chan->sdu_len);
6682
6683 if (chan->sdu->len + skb->len > chan->sdu_len) {
6684 BT_ERR("Too much LE L2CAP data received");
6685 err = -EINVAL;
6686 goto failed;
6687 }
6688
6689 append_skb_frag(chan->sdu, skb, &chan->sdu_last_frag);
6690 skb = NULL;
6691
6692 if (chan->sdu->len == chan->sdu_len) {
6693 err = chan->ops->recv(chan, chan->sdu);
6694 if (!err) {
6695 chan->sdu = NULL;
6696 chan->sdu_last_frag = NULL;
6697 chan->sdu_len = 0;
6698 }
6699 }
6700
6701 failed:
6702 if (err) {
6703 kfree_skb(skb);
6704 kfree_skb(chan->sdu);
6705 chan->sdu = NULL;
6706 chan->sdu_last_frag = NULL;
6707 chan->sdu_len = 0;
6708 }
6709
6710 /* We can't return an error here since we took care of the skb
6711 * freeing internally. An error return would cause the caller to
6712 * do a double-free of the skb.
6713 */
6714 return 0;
6715 }
6716
6717 static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
6718 struct sk_buff *skb)
6719 {
6720 struct l2cap_chan *chan;
6721
6722 chan = l2cap_get_chan_by_scid(conn, cid);
6723 if (!chan) {
6724 if (cid == L2CAP_CID_A2MP) {
6725 chan = a2mp_channel_create(conn, skb);
6726 if (!chan) {
6727 kfree_skb(skb);
6728 return;
6729 }
6730
6731 l2cap_chan_lock(chan);
6732 } else {
6733 BT_DBG("unknown cid 0x%4.4x", cid);
6734 /* Drop packet and return */
6735 kfree_skb(skb);
6736 return;
6737 }
6738 }
6739
6740 BT_DBG("chan %p, len %d", chan, skb->len);
6741
6742 if (chan->state != BT_CONNECTED)
6743 goto drop;
6744
6745 switch (chan->mode) {
6746 case L2CAP_MODE_LE_FLOWCTL:
6747 if (l2cap_le_data_rcv(chan, skb) < 0)
6748 goto drop;
6749
6750 goto done;
6751
6752 case L2CAP_MODE_BASIC:
6753 /* If socket recv buffers overflows we drop data here
6754 * which is *bad* because L2CAP has to be reliable.
6755 * But we don't have any other choice. L2CAP doesn't
6756 * provide flow control mechanism. */
6757
6758 if (chan->imtu < skb->len) {
6759 BT_ERR("Dropping L2CAP data: receive buffer overflow");
6760 goto drop;
6761 }
6762
6763 if (!chan->ops->recv(chan, skb))
6764 goto done;
6765 break;
6766
6767 case L2CAP_MODE_ERTM:
6768 case L2CAP_MODE_STREAMING:
6769 l2cap_data_rcv(chan, skb);
6770 goto done;
6771
6772 default:
6773 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
6774 break;
6775 }
6776
6777 drop:
6778 kfree_skb(skb);
6779
6780 done:
6781 l2cap_chan_unlock(chan);
6782 }
6783
6784 static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
6785 struct sk_buff *skb)
6786 {
6787 struct hci_conn *hcon = conn->hcon;
6788 struct l2cap_chan *chan;
6789
6790 if (hcon->type != ACL_LINK)
6791 goto free_skb;
6792
6793 chan = l2cap_global_chan_by_psm(0, psm, &hcon->src, &hcon->dst,
6794 ACL_LINK);
6795 if (!chan)
6796 goto free_skb;
6797
6798 BT_DBG("chan %p, len %d", chan, skb->len);
6799
6800 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
6801 goto drop;
6802
6803 if (chan->imtu < skb->len)
6804 goto drop;
6805
6806 /* Store remote BD_ADDR and PSM for msg_name */
6807 bacpy(&bt_cb(skb)->bdaddr, &hcon->dst);
6808 bt_cb(skb)->psm = psm;
6809
6810 if (!chan->ops->recv(chan, skb)) {
6811 l2cap_chan_put(chan);
6812 return;
6813 }
6814
6815 drop:
6816 l2cap_chan_put(chan);
6817 free_skb:
6818 kfree_skb(skb);
6819 }
6820
6821 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
6822 {
6823 struct l2cap_hdr *lh = (void *) skb->data;
6824 struct hci_conn *hcon = conn->hcon;
6825 u16 cid, len;
6826 __le16 psm;
6827
6828 if (hcon->state != BT_CONNECTED) {
6829 BT_DBG("queueing pending rx skb");
6830 skb_queue_tail(&conn->pending_rx, skb);
6831 return;
6832 }
6833
6834 skb_pull(skb, L2CAP_HDR_SIZE);
6835 cid = __le16_to_cpu(lh->cid);
6836 len = __le16_to_cpu(lh->len);
6837
6838 if (len != skb->len) {
6839 kfree_skb(skb);
6840 return;
6841 }
6842
6843 /* Since we can't actively block incoming LE connections we must
6844 * at least ensure that we ignore incoming data from them.
6845 */
6846 if (hcon->type == LE_LINK &&
6847 hci_bdaddr_list_lookup(&hcon->hdev->blacklist, &hcon->dst,
6848 bdaddr_type(hcon, hcon->dst_type))) {
6849 kfree_skb(skb);
6850 return;
6851 }
6852
6853 BT_DBG("len %d, cid 0x%4.4x", len, cid);
6854
6855 switch (cid) {
6856 case L2CAP_CID_SIGNALING:
6857 l2cap_sig_channel(conn, skb);
6858 break;
6859
6860 case L2CAP_CID_CONN_LESS:
6861 psm = get_unaligned((__le16 *) skb->data);
6862 skb_pull(skb, L2CAP_PSMLEN_SIZE);
6863 l2cap_conless_channel(conn, psm, skb);
6864 break;
6865
6866 case L2CAP_CID_LE_SIGNALING:
6867 l2cap_le_sig_channel(conn, skb);
6868 break;
6869
6870 default:
6871 l2cap_data_channel(conn, cid, skb);
6872 break;
6873 }
6874 }
6875
6876 static void process_pending_rx(struct work_struct *work)
6877 {
6878 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
6879 pending_rx_work);
6880 struct sk_buff *skb;
6881
6882 BT_DBG("");
6883
6884 while ((skb = skb_dequeue(&conn->pending_rx)))
6885 l2cap_recv_frame(conn, skb);
6886 }
6887
6888 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
6889 {
6890 struct l2cap_conn *conn = hcon->l2cap_data;
6891 struct hci_chan *hchan;
6892
6893 if (conn)
6894 return conn;
6895
6896 hchan = hci_chan_create(hcon);
6897 if (!hchan)
6898 return NULL;
6899
6900 conn = kzalloc(sizeof(*conn), GFP_KERNEL);
6901 if (!conn) {
6902 hci_chan_del(hchan);
6903 return NULL;
6904 }
6905
6906 kref_init(&conn->ref);
6907 hcon->l2cap_data = conn;
6908 conn->hcon = hcon;
6909 hci_conn_get(conn->hcon);
6910 conn->hchan = hchan;
6911
6912 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
6913
6914 switch (hcon->type) {
6915 case LE_LINK:
6916 if (hcon->hdev->le_mtu) {
6917 conn->mtu = hcon->hdev->le_mtu;
6918 break;
6919 }
6920 /* fall through */
6921 default:
6922 conn->mtu = hcon->hdev->acl_mtu;
6923 break;
6924 }
6925
6926 conn->feat_mask = 0;
6927
6928 if (hcon->type == ACL_LINK)
6929 conn->hs_enabled = test_bit(HCI_HS_ENABLED,
6930 &hcon->hdev->dev_flags);
6931
6932 mutex_init(&conn->ident_lock);
6933 mutex_init(&conn->chan_lock);
6934
6935 INIT_LIST_HEAD(&conn->chan_l);
6936 INIT_LIST_HEAD(&conn->users);
6937
6938 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
6939
6940 INIT_WORK(&conn->disconn_work, disconn_work);
6941
6942 skb_queue_head_init(&conn->pending_rx);
6943 INIT_WORK(&conn->pending_rx_work, process_pending_rx);
6944
6945 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
6946
6947 return conn;
6948 }
6949
6950 static bool is_valid_psm(u16 psm, u8 dst_type) {
6951 if (!psm)
6952 return false;
6953
6954 if (bdaddr_type_is_le(dst_type))
6955 return (psm <= 0x00ff);
6956
6957 /* PSM must be odd and lsb of upper byte must be 0 */
6958 return ((psm & 0x0101) == 0x0001);
6959 }
6960
6961 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
6962 bdaddr_t *dst, u8 dst_type)
6963 {
6964 struct l2cap_conn *conn;
6965 struct hci_conn *hcon;
6966 struct hci_dev *hdev;
6967 int err;
6968
6969 BT_DBG("%pMR -> %pMR (type %u) psm 0x%2.2x", &chan->src, dst,
6970 dst_type, __le16_to_cpu(psm));
6971
6972 hdev = hci_get_route(dst, &chan->src);
6973 if (!hdev)
6974 return -EHOSTUNREACH;
6975
6976 hci_dev_lock(hdev);
6977
6978 l2cap_chan_lock(chan);
6979
6980 if (!is_valid_psm(__le16_to_cpu(psm), dst_type) && !cid &&
6981 chan->chan_type != L2CAP_CHAN_RAW) {
6982 err = -EINVAL;
6983 goto done;
6984 }
6985
6986 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !psm) {
6987 err = -EINVAL;
6988 goto done;
6989 }
6990
6991 if (chan->chan_type == L2CAP_CHAN_FIXED && !cid) {
6992 err = -EINVAL;
6993 goto done;
6994 }
6995
6996 switch (chan->mode) {
6997 case L2CAP_MODE_BASIC:
6998 break;
6999 case L2CAP_MODE_LE_FLOWCTL:
7000 l2cap_le_flowctl_init(chan);
7001 break;
7002 case L2CAP_MODE_ERTM:
7003 case L2CAP_MODE_STREAMING:
7004 if (!disable_ertm)
7005 break;
7006 /* fall through */
7007 default:
7008 err = -EOPNOTSUPP;
7009 goto done;
7010 }
7011
7012 switch (chan->state) {
7013 case BT_CONNECT:
7014 case BT_CONNECT2:
7015 case BT_CONFIG:
7016 /* Already connecting */
7017 err = 0;
7018 goto done;
7019
7020 case BT_CONNECTED:
7021 /* Already connected */
7022 err = -EISCONN;
7023 goto done;
7024
7025 case BT_OPEN:
7026 case BT_BOUND:
7027 /* Can connect */
7028 break;
7029
7030 default:
7031 err = -EBADFD;
7032 goto done;
7033 }
7034
7035 /* Set destination address and psm */
7036 bacpy(&chan->dst, dst);
7037 chan->dst_type = dst_type;
7038
7039 chan->psm = psm;
7040 chan->dcid = cid;
7041
7042 if (bdaddr_type_is_le(dst_type)) {
7043 u8 role;
7044
7045 /* Convert from L2CAP channel address type to HCI address type
7046 */
7047 if (dst_type == BDADDR_LE_PUBLIC)
7048 dst_type = ADDR_LE_DEV_PUBLIC;
7049 else
7050 dst_type = ADDR_LE_DEV_RANDOM;
7051
7052 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
7053 role = HCI_ROLE_SLAVE;
7054 else
7055 role = HCI_ROLE_MASTER;
7056
7057 hcon = hci_connect_le(hdev, dst, dst_type, chan->sec_level,
7058 HCI_LE_CONN_TIMEOUT, role);
7059 } else {
7060 u8 auth_type = l2cap_get_auth_type(chan);
7061 hcon = hci_connect_acl(hdev, dst, chan->sec_level, auth_type);
7062 }
7063
7064 if (IS_ERR(hcon)) {
7065 err = PTR_ERR(hcon);
7066 goto done;
7067 }
7068
7069 conn = l2cap_conn_add(hcon);
7070 if (!conn) {
7071 hci_conn_drop(hcon);
7072 err = -ENOMEM;
7073 goto done;
7074 }
7075
7076 if (cid && __l2cap_get_chan_by_dcid(conn, cid)) {
7077 hci_conn_drop(hcon);
7078 err = -EBUSY;
7079 goto done;
7080 }
7081
7082 /* Update source addr of the socket */
7083 bacpy(&chan->src, &hcon->src);
7084 chan->src_type = bdaddr_type(hcon, hcon->src_type);
7085
7086 l2cap_chan_unlock(chan);
7087 l2cap_chan_add(conn, chan);
7088 l2cap_chan_lock(chan);
7089
7090 /* l2cap_chan_add takes its own ref so we can drop this one */
7091 hci_conn_drop(hcon);
7092
7093 l2cap_state_change(chan, BT_CONNECT);
7094 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
7095
7096 /* Release chan->sport so that it can be reused by other
7097 * sockets (as it's only used for listening sockets).
7098 */
7099 write_lock(&chan_list_lock);
7100 chan->sport = 0;
7101 write_unlock(&chan_list_lock);
7102
7103 if (hcon->state == BT_CONNECTED) {
7104 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
7105 __clear_chan_timer(chan);
7106 if (l2cap_chan_check_security(chan, true))
7107 l2cap_state_change(chan, BT_CONNECTED);
7108 } else
7109 l2cap_do_start(chan);
7110 }
7111
7112 err = 0;
7113
7114 done:
7115 l2cap_chan_unlock(chan);
7116 hci_dev_unlock(hdev);
7117 hci_dev_put(hdev);
7118 return err;
7119 }
7120 EXPORT_SYMBOL_GPL(l2cap_chan_connect);
7121
7122 /* ---- L2CAP interface with lower layer (HCI) ---- */
7123
7124 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
7125 {
7126 int exact = 0, lm1 = 0, lm2 = 0;
7127 struct l2cap_chan *c;
7128
7129 BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
7130
7131 /* Find listening sockets and check their link_mode */
7132 read_lock(&chan_list_lock);
7133 list_for_each_entry(c, &chan_list, global_l) {
7134 if (c->state != BT_LISTEN)
7135 continue;
7136
7137 if (!bacmp(&c->src, &hdev->bdaddr)) {
7138 lm1 |= HCI_LM_ACCEPT;
7139 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
7140 lm1 |= HCI_LM_MASTER;
7141 exact++;
7142 } else if (!bacmp(&c->src, BDADDR_ANY)) {
7143 lm2 |= HCI_LM_ACCEPT;
7144 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
7145 lm2 |= HCI_LM_MASTER;
7146 }
7147 }
7148 read_unlock(&chan_list_lock);
7149
7150 return exact ? lm1 : lm2;
7151 }
7152
7153 /* Find the next fixed channel in BT_LISTEN state, continue iteration
7154 * from an existing channel in the list or from the beginning of the
7155 * global list (by passing NULL as first parameter).
7156 */
7157 static struct l2cap_chan *l2cap_global_fixed_chan(struct l2cap_chan *c,
7158 bdaddr_t *src, u8 link_type)
7159 {
7160 read_lock(&chan_list_lock);
7161
7162 if (c)
7163 c = list_next_entry(c, global_l);
7164 else
7165 c = list_entry(chan_list.next, typeof(*c), global_l);
7166
7167 list_for_each_entry_from(c, &chan_list, global_l) {
7168 if (c->chan_type != L2CAP_CHAN_FIXED)
7169 continue;
7170 if (c->state != BT_LISTEN)
7171 continue;
7172 if (bacmp(&c->src, src) && bacmp(&c->src, BDADDR_ANY))
7173 continue;
7174 if (link_type == ACL_LINK && c->src_type != BDADDR_BREDR)
7175 continue;
7176 if (link_type == LE_LINK && c->src_type == BDADDR_BREDR)
7177 continue;
7178
7179 l2cap_chan_hold(c);
7180 read_unlock(&chan_list_lock);
7181 return c;
7182 }
7183
7184 read_unlock(&chan_list_lock);
7185
7186 return NULL;
7187 }
7188
7189 void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
7190 {
7191 struct hci_dev *hdev = hcon->hdev;
7192 struct l2cap_conn *conn;
7193 struct l2cap_chan *pchan;
7194 u8 dst_type;
7195
7196 BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
7197
7198 if (status) {
7199 l2cap_conn_del(hcon, bt_to_errno(status));
7200 return;
7201 }
7202
7203 conn = l2cap_conn_add(hcon);
7204 if (!conn)
7205 return;
7206
7207 dst_type = bdaddr_type(hcon, hcon->dst_type);
7208
7209 /* If device is blocked, do not create channels for it */
7210 if (hci_bdaddr_list_lookup(&hdev->blacklist, &hcon->dst, dst_type))
7211 return;
7212
7213 /* Find fixed channels and notify them of the new connection. We
7214 * use multiple individual lookups, continuing each time where
7215 * we left off, because the list lock would prevent calling the
7216 * potentially sleeping l2cap_chan_lock() function.
7217 */
7218 pchan = l2cap_global_fixed_chan(NULL, &hdev->bdaddr, hcon->type);
7219 while (pchan) {
7220 struct l2cap_chan *chan, *next;
7221
7222 /* Client fixed channels should override server ones */
7223 if (__l2cap_get_chan_by_dcid(conn, pchan->scid))
7224 goto next;
7225
7226 l2cap_chan_lock(pchan);
7227 chan = pchan->ops->new_connection(pchan);
7228 if (chan) {
7229 bacpy(&chan->src, &hcon->src);
7230 bacpy(&chan->dst, &hcon->dst);
7231 chan->src_type = bdaddr_type(hcon, hcon->src_type);
7232 chan->dst_type = dst_type;
7233
7234 __l2cap_chan_add(conn, chan);
7235 }
7236
7237 l2cap_chan_unlock(pchan);
7238 next:
7239 next = l2cap_global_fixed_chan(pchan, &hdev->bdaddr,
7240 hcon->type);
7241 l2cap_chan_put(pchan);
7242 pchan = next;
7243 }
7244
7245 l2cap_conn_ready(conn);
7246 }
7247
7248 int l2cap_disconn_ind(struct hci_conn *hcon)
7249 {
7250 struct l2cap_conn *conn = hcon->l2cap_data;
7251
7252 BT_DBG("hcon %p", hcon);
7253
7254 if (!conn)
7255 return HCI_ERROR_REMOTE_USER_TERM;
7256 return conn->disc_reason;
7257 }
7258
7259 void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
7260 {
7261 BT_DBG("hcon %p reason %d", hcon, reason);
7262
7263 l2cap_conn_del(hcon, bt_to_errno(reason));
7264 }
7265
7266 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
7267 {
7268 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
7269 return;
7270
7271 if (encrypt == 0x00) {
7272 if (chan->sec_level == BT_SECURITY_MEDIUM) {
7273 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
7274 } else if (chan->sec_level == BT_SECURITY_HIGH ||
7275 chan->sec_level == BT_SECURITY_FIPS)
7276 l2cap_chan_close(chan, ECONNREFUSED);
7277 } else {
7278 if (chan->sec_level == BT_SECURITY_MEDIUM)
7279 __clear_chan_timer(chan);
7280 }
7281 }
7282
7283 int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
7284 {
7285 struct l2cap_conn *conn = hcon->l2cap_data;
7286 struct l2cap_chan *chan;
7287
7288 if (!conn)
7289 return 0;
7290
7291 BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
7292
7293 mutex_lock(&conn->chan_lock);
7294
7295 list_for_each_entry(chan, &conn->chan_l, list) {
7296 l2cap_chan_lock(chan);
7297
7298 BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
7299 state_to_string(chan->state));
7300
7301 if (chan->scid == L2CAP_CID_A2MP) {
7302 l2cap_chan_unlock(chan);
7303 continue;
7304 }
7305
7306 if (!status && encrypt)
7307 chan->sec_level = hcon->sec_level;
7308
7309 if (!__l2cap_no_conn_pending(chan)) {
7310 l2cap_chan_unlock(chan);
7311 continue;
7312 }
7313
7314 if (!status && (chan->state == BT_CONNECTED ||
7315 chan->state == BT_CONFIG)) {
7316 chan->ops->resume(chan);
7317 l2cap_check_encryption(chan, encrypt);
7318 l2cap_chan_unlock(chan);
7319 continue;
7320 }
7321
7322 if (chan->state == BT_CONNECT) {
7323 if (!status)
7324 l2cap_start_connection(chan);
7325 else
7326 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
7327 } else if (chan->state == BT_CONNECT2) {
7328 struct l2cap_conn_rsp rsp;
7329 __u16 res, stat;
7330
7331 if (!status) {
7332 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
7333 res = L2CAP_CR_PEND;
7334 stat = L2CAP_CS_AUTHOR_PEND;
7335 chan->ops->defer(chan);
7336 } else {
7337 l2cap_state_change(chan, BT_CONFIG);
7338 res = L2CAP_CR_SUCCESS;
7339 stat = L2CAP_CS_NO_INFO;
7340 }
7341 } else {
7342 l2cap_state_change(chan, BT_DISCONN);
7343 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
7344 res = L2CAP_CR_SEC_BLOCK;
7345 stat = L2CAP_CS_NO_INFO;
7346 }
7347
7348 rsp.scid = cpu_to_le16(chan->dcid);
7349 rsp.dcid = cpu_to_le16(chan->scid);
7350 rsp.result = cpu_to_le16(res);
7351 rsp.status = cpu_to_le16(stat);
7352 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
7353 sizeof(rsp), &rsp);
7354
7355 if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
7356 res == L2CAP_CR_SUCCESS) {
7357 char buf[128];
7358 set_bit(CONF_REQ_SENT, &chan->conf_state);
7359 l2cap_send_cmd(conn, l2cap_get_ident(conn),
7360 L2CAP_CONF_REQ,
7361 l2cap_build_conf_req(chan, buf),
7362 buf);
7363 chan->num_conf_req++;
7364 }
7365 }
7366
7367 l2cap_chan_unlock(chan);
7368 }
7369
7370 mutex_unlock(&conn->chan_lock);
7371
7372 return 0;
7373 }
7374
7375 int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
7376 {
7377 struct l2cap_conn *conn = hcon->l2cap_data;
7378 struct l2cap_hdr *hdr;
7379 int len;
7380
7381 /* For AMP controller do not create l2cap conn */
7382 if (!conn && hcon->hdev->dev_type != HCI_BREDR)
7383 goto drop;
7384
7385 if (!conn)
7386 conn = l2cap_conn_add(hcon);
7387
7388 if (!conn)
7389 goto drop;
7390
7391 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
7392
7393 switch (flags) {
7394 case ACL_START:
7395 case ACL_START_NO_FLUSH:
7396 case ACL_COMPLETE:
7397 if (conn->rx_len) {
7398 BT_ERR("Unexpected start frame (len %d)", skb->len);
7399 kfree_skb(conn->rx_skb);
7400 conn->rx_skb = NULL;
7401 conn->rx_len = 0;
7402 l2cap_conn_unreliable(conn, ECOMM);
7403 }
7404
7405 /* Start fragment always begin with Basic L2CAP header */
7406 if (skb->len < L2CAP_HDR_SIZE) {
7407 BT_ERR("Frame is too short (len %d)", skb->len);
7408 l2cap_conn_unreliable(conn, ECOMM);
7409 goto drop;
7410 }
7411
7412 hdr = (struct l2cap_hdr *) skb->data;
7413 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
7414
7415 if (len == skb->len) {
7416 /* Complete frame received */
7417 l2cap_recv_frame(conn, skb);
7418 return 0;
7419 }
7420
7421 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
7422
7423 if (skb->len > len) {
7424 BT_ERR("Frame is too long (len %d, expected len %d)",
7425 skb->len, len);
7426 l2cap_conn_unreliable(conn, ECOMM);
7427 goto drop;
7428 }
7429
7430 /* Allocate skb for the complete frame (with header) */
7431 conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL);
7432 if (!conn->rx_skb)
7433 goto drop;
7434
7435 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
7436 skb->len);
7437 conn->rx_len = len - skb->len;
7438 break;
7439
7440 case ACL_CONT:
7441 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
7442
7443 if (!conn->rx_len) {
7444 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
7445 l2cap_conn_unreliable(conn, ECOMM);
7446 goto drop;
7447 }
7448
7449 if (skb->len > conn->rx_len) {
7450 BT_ERR("Fragment is too long (len %d, expected %d)",
7451 skb->len, conn->rx_len);
7452 kfree_skb(conn->rx_skb);
7453 conn->rx_skb = NULL;
7454 conn->rx_len = 0;
7455 l2cap_conn_unreliable(conn, ECOMM);
7456 goto drop;
7457 }
7458
7459 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
7460 skb->len);
7461 conn->rx_len -= skb->len;
7462
7463 if (!conn->rx_len) {
7464 /* Complete frame received. l2cap_recv_frame
7465 * takes ownership of the skb so set the global
7466 * rx_skb pointer to NULL first.
7467 */
7468 struct sk_buff *rx_skb = conn->rx_skb;
7469 conn->rx_skb = NULL;
7470 l2cap_recv_frame(conn, rx_skb);
7471 }
7472 break;
7473 }
7474
7475 drop:
7476 kfree_skb(skb);
7477 return 0;
7478 }
7479
7480 static int l2cap_debugfs_show(struct seq_file *f, void *p)
7481 {
7482 struct l2cap_chan *c;
7483
7484 read_lock(&chan_list_lock);
7485
7486 list_for_each_entry(c, &chan_list, global_l) {
7487 seq_printf(f, "%pMR %pMR %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
7488 &c->src, &c->dst,
7489 c->state, __le16_to_cpu(c->psm),
7490 c->scid, c->dcid, c->imtu, c->omtu,
7491 c->sec_level, c->mode);
7492 }
7493
7494 read_unlock(&chan_list_lock);
7495
7496 return 0;
7497 }
7498
7499 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
7500 {
7501 return single_open(file, l2cap_debugfs_show, inode->i_private);
7502 }
7503
7504 static const struct file_operations l2cap_debugfs_fops = {
7505 .open = l2cap_debugfs_open,
7506 .read = seq_read,
7507 .llseek = seq_lseek,
7508 .release = single_release,
7509 };
7510
7511 static struct dentry *l2cap_debugfs;
7512
7513 int __init l2cap_init(void)
7514 {
7515 int err;
7516
7517 err = l2cap_init_sockets();
7518 if (err < 0)
7519 return err;
7520
7521 if (IS_ERR_OR_NULL(bt_debugfs))
7522 return 0;
7523
7524 l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
7525 NULL, &l2cap_debugfs_fops);
7526
7527 debugfs_create_u16("l2cap_le_max_credits", 0644, bt_debugfs,
7528 &le_max_credits);
7529 debugfs_create_u16("l2cap_le_default_mps", 0644, bt_debugfs,
7530 &le_default_mps);
7531
7532 return 0;
7533 }
7534
7535 void l2cap_exit(void)
7536 {
7537 debugfs_remove(l2cap_debugfs);
7538 l2cap_cleanup_sockets();
7539 }
7540
7541 module_param(disable_ertm, bool, 0644);
7542 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
This page took 0.199181 seconds and 6 git commands to generate.