Bluetooth: Centralize looking up blocked devices to l2cap_recv_frame
[deliverable/linux.git] / net / bluetooth / l2cap_core.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
8
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
10
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
14
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
27 */
28
29 /* Bluetooth L2CAP core. */
30
31 #include <linux/module.h>
32
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
35
36 #include <net/bluetooth/bluetooth.h>
37 #include <net/bluetooth/hci_core.h>
38 #include <net/bluetooth/l2cap.h>
39
40 #include "smp.h"
41 #include "a2mp.h"
42 #include "amp.h"
43
44 #define LE_FLOWCTL_MAX_CREDITS 65535
45
46 bool disable_ertm;
47
48 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN | L2CAP_FEAT_UCD;
49 static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP | L2CAP_FC_CONNLESS, };
50
51 static LIST_HEAD(chan_list);
52 static DEFINE_RWLOCK(chan_list_lock);
53
54 static u16 le_max_credits = L2CAP_LE_MAX_CREDITS;
55 static u16 le_default_mps = L2CAP_LE_DEFAULT_MPS;
56
57 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
58 u8 code, u8 ident, u16 dlen, void *data);
59 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
60 void *data);
61 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
62 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
63
64 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
65 struct sk_buff_head *skbs, u8 event);
66
67 static inline __u8 bdaddr_type(struct hci_conn *hcon, __u8 type)
68 {
69 if (hcon->type == LE_LINK) {
70 if (type == ADDR_LE_DEV_PUBLIC)
71 return BDADDR_LE_PUBLIC;
72 else
73 return BDADDR_LE_RANDOM;
74 }
75
76 return BDADDR_BREDR;
77 }
78
79 /* ---- L2CAP channels ---- */
80
81 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
82 u16 cid)
83 {
84 struct l2cap_chan *c;
85
86 list_for_each_entry(c, &conn->chan_l, list) {
87 if (c->dcid == cid)
88 return c;
89 }
90 return NULL;
91 }
92
93 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
94 u16 cid)
95 {
96 struct l2cap_chan *c;
97
98 list_for_each_entry(c, &conn->chan_l, list) {
99 if (c->scid == cid)
100 return c;
101 }
102 return NULL;
103 }
104
105 /* Find channel with given SCID.
106 * Returns locked channel. */
107 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
108 u16 cid)
109 {
110 struct l2cap_chan *c;
111
112 mutex_lock(&conn->chan_lock);
113 c = __l2cap_get_chan_by_scid(conn, cid);
114 if (c)
115 l2cap_chan_lock(c);
116 mutex_unlock(&conn->chan_lock);
117
118 return c;
119 }
120
121 /* Find channel with given DCID.
122 * Returns locked channel.
123 */
124 static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
125 u16 cid)
126 {
127 struct l2cap_chan *c;
128
129 mutex_lock(&conn->chan_lock);
130 c = __l2cap_get_chan_by_dcid(conn, cid);
131 if (c)
132 l2cap_chan_lock(c);
133 mutex_unlock(&conn->chan_lock);
134
135 return c;
136 }
137
138 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn,
139 u8 ident)
140 {
141 struct l2cap_chan *c;
142
143 list_for_each_entry(c, &conn->chan_l, list) {
144 if (c->ident == ident)
145 return c;
146 }
147 return NULL;
148 }
149
150 static struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn,
151 u8 ident)
152 {
153 struct l2cap_chan *c;
154
155 mutex_lock(&conn->chan_lock);
156 c = __l2cap_get_chan_by_ident(conn, ident);
157 if (c)
158 l2cap_chan_lock(c);
159 mutex_unlock(&conn->chan_lock);
160
161 return c;
162 }
163
164 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
165 {
166 struct l2cap_chan *c;
167
168 list_for_each_entry(c, &chan_list, global_l) {
169 if (c->sport == psm && !bacmp(&c->src, src))
170 return c;
171 }
172 return NULL;
173 }
174
175 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
176 {
177 int err;
178
179 write_lock(&chan_list_lock);
180
181 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
182 err = -EADDRINUSE;
183 goto done;
184 }
185
186 if (psm) {
187 chan->psm = psm;
188 chan->sport = psm;
189 err = 0;
190 } else {
191 u16 p;
192
193 err = -EINVAL;
194 for (p = 0x1001; p < 0x1100; p += 2)
195 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
196 chan->psm = cpu_to_le16(p);
197 chan->sport = cpu_to_le16(p);
198 err = 0;
199 break;
200 }
201 }
202
203 done:
204 write_unlock(&chan_list_lock);
205 return err;
206 }
207 EXPORT_SYMBOL_GPL(l2cap_add_psm);
208
209 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
210 {
211 write_lock(&chan_list_lock);
212
213 chan->scid = scid;
214
215 write_unlock(&chan_list_lock);
216
217 return 0;
218 }
219
220 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
221 {
222 u16 cid, dyn_end;
223
224 if (conn->hcon->type == LE_LINK)
225 dyn_end = L2CAP_CID_LE_DYN_END;
226 else
227 dyn_end = L2CAP_CID_DYN_END;
228
229 for (cid = L2CAP_CID_DYN_START; cid < dyn_end; cid++) {
230 if (!__l2cap_get_chan_by_scid(conn, cid))
231 return cid;
232 }
233
234 return 0;
235 }
236
237 static void l2cap_state_change(struct l2cap_chan *chan, int state)
238 {
239 BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
240 state_to_string(state));
241
242 chan->state = state;
243 chan->ops->state_change(chan, state, 0);
244 }
245
246 static inline void l2cap_state_change_and_error(struct l2cap_chan *chan,
247 int state, int err)
248 {
249 chan->state = state;
250 chan->ops->state_change(chan, chan->state, err);
251 }
252
253 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
254 {
255 chan->ops->state_change(chan, chan->state, err);
256 }
257
258 static void __set_retrans_timer(struct l2cap_chan *chan)
259 {
260 if (!delayed_work_pending(&chan->monitor_timer) &&
261 chan->retrans_timeout) {
262 l2cap_set_timer(chan, &chan->retrans_timer,
263 msecs_to_jiffies(chan->retrans_timeout));
264 }
265 }
266
267 static void __set_monitor_timer(struct l2cap_chan *chan)
268 {
269 __clear_retrans_timer(chan);
270 if (chan->monitor_timeout) {
271 l2cap_set_timer(chan, &chan->monitor_timer,
272 msecs_to_jiffies(chan->monitor_timeout));
273 }
274 }
275
276 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
277 u16 seq)
278 {
279 struct sk_buff *skb;
280
281 skb_queue_walk(head, skb) {
282 if (bt_cb(skb)->control.txseq == seq)
283 return skb;
284 }
285
286 return NULL;
287 }
288
289 /* ---- L2CAP sequence number lists ---- */
290
291 /* For ERTM, ordered lists of sequence numbers must be tracked for
292 * SREJ requests that are received and for frames that are to be
293 * retransmitted. These seq_list functions implement a singly-linked
294 * list in an array, where membership in the list can also be checked
295 * in constant time. Items can also be added to the tail of the list
296 * and removed from the head in constant time, without further memory
297 * allocs or frees.
298 */
299
300 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
301 {
302 size_t alloc_size, i;
303
304 /* Allocated size is a power of 2 to map sequence numbers
305 * (which may be up to 14 bits) in to a smaller array that is
306 * sized for the negotiated ERTM transmit windows.
307 */
308 alloc_size = roundup_pow_of_two(size);
309
310 seq_list->list = kmalloc(sizeof(u16) * alloc_size, GFP_KERNEL);
311 if (!seq_list->list)
312 return -ENOMEM;
313
314 seq_list->mask = alloc_size - 1;
315 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
316 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
317 for (i = 0; i < alloc_size; i++)
318 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
319
320 return 0;
321 }
322
323 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
324 {
325 kfree(seq_list->list);
326 }
327
328 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
329 u16 seq)
330 {
331 /* Constant-time check for list membership */
332 return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
333 }
334
335 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
336 {
337 u16 seq = seq_list->head;
338 u16 mask = seq_list->mask;
339
340 seq_list->head = seq_list->list[seq & mask];
341 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
342
343 if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
344 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
345 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
346 }
347
348 return seq;
349 }
350
351 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
352 {
353 u16 i;
354
355 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
356 return;
357
358 for (i = 0; i <= seq_list->mask; i++)
359 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
360
361 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
362 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
363 }
364
365 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
366 {
367 u16 mask = seq_list->mask;
368
369 /* All appends happen in constant time */
370
371 if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
372 return;
373
374 if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
375 seq_list->head = seq;
376 else
377 seq_list->list[seq_list->tail & mask] = seq;
378
379 seq_list->tail = seq;
380 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
381 }
382
383 static void l2cap_chan_timeout(struct work_struct *work)
384 {
385 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
386 chan_timer.work);
387 struct l2cap_conn *conn = chan->conn;
388 int reason;
389
390 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
391
392 mutex_lock(&conn->chan_lock);
393 l2cap_chan_lock(chan);
394
395 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
396 reason = ECONNREFUSED;
397 else if (chan->state == BT_CONNECT &&
398 chan->sec_level != BT_SECURITY_SDP)
399 reason = ECONNREFUSED;
400 else
401 reason = ETIMEDOUT;
402
403 l2cap_chan_close(chan, reason);
404
405 l2cap_chan_unlock(chan);
406
407 chan->ops->close(chan);
408 mutex_unlock(&conn->chan_lock);
409
410 l2cap_chan_put(chan);
411 }
412
413 struct l2cap_chan *l2cap_chan_create(void)
414 {
415 struct l2cap_chan *chan;
416
417 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
418 if (!chan)
419 return NULL;
420
421 mutex_init(&chan->lock);
422
423 write_lock(&chan_list_lock);
424 list_add(&chan->global_l, &chan_list);
425 write_unlock(&chan_list_lock);
426
427 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
428
429 chan->state = BT_OPEN;
430
431 kref_init(&chan->kref);
432
433 /* This flag is cleared in l2cap_chan_ready() */
434 set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
435
436 BT_DBG("chan %p", chan);
437
438 return chan;
439 }
440 EXPORT_SYMBOL_GPL(l2cap_chan_create);
441
442 static void l2cap_chan_destroy(struct kref *kref)
443 {
444 struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);
445
446 BT_DBG("chan %p", chan);
447
448 write_lock(&chan_list_lock);
449 list_del(&chan->global_l);
450 write_unlock(&chan_list_lock);
451
452 kfree(chan);
453 }
454
455 void l2cap_chan_hold(struct l2cap_chan *c)
456 {
457 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
458
459 kref_get(&c->kref);
460 }
461
462 void l2cap_chan_put(struct l2cap_chan *c)
463 {
464 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
465
466 kref_put(&c->kref, l2cap_chan_destroy);
467 }
468 EXPORT_SYMBOL_GPL(l2cap_chan_put);
469
470 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
471 {
472 chan->fcs = L2CAP_FCS_CRC16;
473 chan->max_tx = L2CAP_DEFAULT_MAX_TX;
474 chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
475 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
476 chan->remote_max_tx = chan->max_tx;
477 chan->remote_tx_win = chan->tx_win;
478 chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
479 chan->sec_level = BT_SECURITY_LOW;
480 chan->flush_to = L2CAP_DEFAULT_FLUSH_TO;
481 chan->retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
482 chan->monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
483 chan->conf_state = 0;
484
485 set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
486 }
487 EXPORT_SYMBOL_GPL(l2cap_chan_set_defaults);
488
489 static void l2cap_le_flowctl_init(struct l2cap_chan *chan)
490 {
491 chan->sdu = NULL;
492 chan->sdu_last_frag = NULL;
493 chan->sdu_len = 0;
494 chan->tx_credits = 0;
495 chan->rx_credits = le_max_credits;
496 chan->mps = min_t(u16, chan->imtu, le_default_mps);
497
498 skb_queue_head_init(&chan->tx_q);
499 }
500
501 void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
502 {
503 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
504 __le16_to_cpu(chan->psm), chan->dcid);
505
506 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
507
508 chan->conn = conn;
509
510 switch (chan->chan_type) {
511 case L2CAP_CHAN_CONN_ORIENTED:
512 /* Alloc CID for connection-oriented socket */
513 chan->scid = l2cap_alloc_cid(conn);
514 if (conn->hcon->type == ACL_LINK)
515 chan->omtu = L2CAP_DEFAULT_MTU;
516 break;
517
518 case L2CAP_CHAN_CONN_LESS:
519 /* Connectionless socket */
520 chan->scid = L2CAP_CID_CONN_LESS;
521 chan->dcid = L2CAP_CID_CONN_LESS;
522 chan->omtu = L2CAP_DEFAULT_MTU;
523 break;
524
525 case L2CAP_CHAN_FIXED:
526 /* Caller will set CID and CID specific MTU values */
527 break;
528
529 default:
530 /* Raw socket can send/recv signalling messages only */
531 chan->scid = L2CAP_CID_SIGNALING;
532 chan->dcid = L2CAP_CID_SIGNALING;
533 chan->omtu = L2CAP_DEFAULT_MTU;
534 }
535
536 chan->local_id = L2CAP_BESTEFFORT_ID;
537 chan->local_stype = L2CAP_SERV_BESTEFFORT;
538 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
539 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
540 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
541 chan->local_flush_to = L2CAP_EFS_DEFAULT_FLUSH_TO;
542
543 l2cap_chan_hold(chan);
544
545 hci_conn_hold(conn->hcon);
546
547 list_add(&chan->list, &conn->chan_l);
548 }
549
550 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
551 {
552 mutex_lock(&conn->chan_lock);
553 __l2cap_chan_add(conn, chan);
554 mutex_unlock(&conn->chan_lock);
555 }
556
557 void l2cap_chan_del(struct l2cap_chan *chan, int err)
558 {
559 struct l2cap_conn *conn = chan->conn;
560
561 __clear_chan_timer(chan);
562
563 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
564
565 if (conn) {
566 struct amp_mgr *mgr = conn->hcon->amp_mgr;
567 /* Delete from channel list */
568 list_del(&chan->list);
569
570 l2cap_chan_put(chan);
571
572 chan->conn = NULL;
573
574 if (chan->scid != L2CAP_CID_A2MP)
575 hci_conn_drop(conn->hcon);
576
577 if (mgr && mgr->bredr_chan == chan)
578 mgr->bredr_chan = NULL;
579 }
580
581 if (chan->hs_hchan) {
582 struct hci_chan *hs_hchan = chan->hs_hchan;
583
584 BT_DBG("chan %p disconnect hs_hchan %p", chan, hs_hchan);
585 amp_disconnect_logical_link(hs_hchan);
586 }
587
588 chan->ops->teardown(chan, err);
589
590 if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
591 return;
592
593 switch(chan->mode) {
594 case L2CAP_MODE_BASIC:
595 break;
596
597 case L2CAP_MODE_LE_FLOWCTL:
598 skb_queue_purge(&chan->tx_q);
599 break;
600
601 case L2CAP_MODE_ERTM:
602 __clear_retrans_timer(chan);
603 __clear_monitor_timer(chan);
604 __clear_ack_timer(chan);
605
606 skb_queue_purge(&chan->srej_q);
607
608 l2cap_seq_list_free(&chan->srej_list);
609 l2cap_seq_list_free(&chan->retrans_list);
610
611 /* fall through */
612
613 case L2CAP_MODE_STREAMING:
614 skb_queue_purge(&chan->tx_q);
615 break;
616 }
617
618 return;
619 }
620 EXPORT_SYMBOL_GPL(l2cap_chan_del);
621
622 void l2cap_conn_update_id_addr(struct hci_conn *hcon)
623 {
624 struct l2cap_conn *conn = hcon->l2cap_data;
625 struct l2cap_chan *chan;
626
627 mutex_lock(&conn->chan_lock);
628
629 list_for_each_entry(chan, &conn->chan_l, list) {
630 l2cap_chan_lock(chan);
631 bacpy(&chan->dst, &hcon->dst);
632 chan->dst_type = bdaddr_type(hcon, hcon->dst_type);
633 l2cap_chan_unlock(chan);
634 }
635
636 mutex_unlock(&conn->chan_lock);
637 }
638
639 static void l2cap_chan_le_connect_reject(struct l2cap_chan *chan)
640 {
641 struct l2cap_conn *conn = chan->conn;
642 struct l2cap_le_conn_rsp rsp;
643 u16 result;
644
645 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
646 result = L2CAP_CR_AUTHORIZATION;
647 else
648 result = L2CAP_CR_BAD_PSM;
649
650 l2cap_state_change(chan, BT_DISCONN);
651
652 rsp.dcid = cpu_to_le16(chan->scid);
653 rsp.mtu = cpu_to_le16(chan->imtu);
654 rsp.mps = cpu_to_le16(chan->mps);
655 rsp.credits = cpu_to_le16(chan->rx_credits);
656 rsp.result = cpu_to_le16(result);
657
658 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
659 &rsp);
660 }
661
662 static void l2cap_chan_connect_reject(struct l2cap_chan *chan)
663 {
664 struct l2cap_conn *conn = chan->conn;
665 struct l2cap_conn_rsp rsp;
666 u16 result;
667
668 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
669 result = L2CAP_CR_SEC_BLOCK;
670 else
671 result = L2CAP_CR_BAD_PSM;
672
673 l2cap_state_change(chan, BT_DISCONN);
674
675 rsp.scid = cpu_to_le16(chan->dcid);
676 rsp.dcid = cpu_to_le16(chan->scid);
677 rsp.result = cpu_to_le16(result);
678 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
679
680 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
681 }
682
683 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
684 {
685 struct l2cap_conn *conn = chan->conn;
686
687 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
688
689 switch (chan->state) {
690 case BT_LISTEN:
691 chan->ops->teardown(chan, 0);
692 break;
693
694 case BT_CONNECTED:
695 case BT_CONFIG:
696 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
697 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
698 l2cap_send_disconn_req(chan, reason);
699 } else
700 l2cap_chan_del(chan, reason);
701 break;
702
703 case BT_CONNECT2:
704 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
705 if (conn->hcon->type == ACL_LINK)
706 l2cap_chan_connect_reject(chan);
707 else if (conn->hcon->type == LE_LINK)
708 l2cap_chan_le_connect_reject(chan);
709 }
710
711 l2cap_chan_del(chan, reason);
712 break;
713
714 case BT_CONNECT:
715 case BT_DISCONN:
716 l2cap_chan_del(chan, reason);
717 break;
718
719 default:
720 chan->ops->teardown(chan, 0);
721 break;
722 }
723 }
724 EXPORT_SYMBOL(l2cap_chan_close);
725
726 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
727 {
728 switch (chan->chan_type) {
729 case L2CAP_CHAN_RAW:
730 switch (chan->sec_level) {
731 case BT_SECURITY_HIGH:
732 case BT_SECURITY_FIPS:
733 return HCI_AT_DEDICATED_BONDING_MITM;
734 case BT_SECURITY_MEDIUM:
735 return HCI_AT_DEDICATED_BONDING;
736 default:
737 return HCI_AT_NO_BONDING;
738 }
739 break;
740 case L2CAP_CHAN_CONN_LESS:
741 if (chan->psm == cpu_to_le16(L2CAP_PSM_3DSP)) {
742 if (chan->sec_level == BT_SECURITY_LOW)
743 chan->sec_level = BT_SECURITY_SDP;
744 }
745 if (chan->sec_level == BT_SECURITY_HIGH ||
746 chan->sec_level == BT_SECURITY_FIPS)
747 return HCI_AT_NO_BONDING_MITM;
748 else
749 return HCI_AT_NO_BONDING;
750 break;
751 case L2CAP_CHAN_CONN_ORIENTED:
752 if (chan->psm == cpu_to_le16(L2CAP_PSM_SDP)) {
753 if (chan->sec_level == BT_SECURITY_LOW)
754 chan->sec_level = BT_SECURITY_SDP;
755
756 if (chan->sec_level == BT_SECURITY_HIGH ||
757 chan->sec_level == BT_SECURITY_FIPS)
758 return HCI_AT_NO_BONDING_MITM;
759 else
760 return HCI_AT_NO_BONDING;
761 }
762 /* fall through */
763 default:
764 switch (chan->sec_level) {
765 case BT_SECURITY_HIGH:
766 case BT_SECURITY_FIPS:
767 return HCI_AT_GENERAL_BONDING_MITM;
768 case BT_SECURITY_MEDIUM:
769 return HCI_AT_GENERAL_BONDING;
770 default:
771 return HCI_AT_NO_BONDING;
772 }
773 break;
774 }
775 }
776
777 /* Service level security */
778 int l2cap_chan_check_security(struct l2cap_chan *chan)
779 {
780 struct l2cap_conn *conn = chan->conn;
781 __u8 auth_type;
782
783 if (conn->hcon->type == LE_LINK)
784 return smp_conn_security(conn->hcon, chan->sec_level);
785
786 auth_type = l2cap_get_auth_type(chan);
787
788 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
789 }
790
791 static u8 l2cap_get_ident(struct l2cap_conn *conn)
792 {
793 u8 id;
794
795 /* Get next available identificator.
796 * 1 - 128 are used by kernel.
797 * 129 - 199 are reserved.
798 * 200 - 254 are used by utilities like l2ping, etc.
799 */
800
801 spin_lock(&conn->lock);
802
803 if (++conn->tx_ident > 128)
804 conn->tx_ident = 1;
805
806 id = conn->tx_ident;
807
808 spin_unlock(&conn->lock);
809
810 return id;
811 }
812
813 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
814 void *data)
815 {
816 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
817 u8 flags;
818
819 BT_DBG("code 0x%2.2x", code);
820
821 if (!skb)
822 return;
823
824 if (lmp_no_flush_capable(conn->hcon->hdev))
825 flags = ACL_START_NO_FLUSH;
826 else
827 flags = ACL_START;
828
829 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
830 skb->priority = HCI_PRIO_MAX;
831
832 hci_send_acl(conn->hchan, skb, flags);
833 }
834
835 static bool __chan_is_moving(struct l2cap_chan *chan)
836 {
837 return chan->move_state != L2CAP_MOVE_STABLE &&
838 chan->move_state != L2CAP_MOVE_WAIT_PREPARE;
839 }
840
841 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
842 {
843 struct hci_conn *hcon = chan->conn->hcon;
844 u16 flags;
845
846 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
847 skb->priority);
848
849 if (chan->hs_hcon && !__chan_is_moving(chan)) {
850 if (chan->hs_hchan)
851 hci_send_acl(chan->hs_hchan, skb, ACL_COMPLETE);
852 else
853 kfree_skb(skb);
854
855 return;
856 }
857
858 if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
859 lmp_no_flush_capable(hcon->hdev))
860 flags = ACL_START_NO_FLUSH;
861 else
862 flags = ACL_START;
863
864 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
865 hci_send_acl(chan->conn->hchan, skb, flags);
866 }
867
868 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
869 {
870 control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
871 control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
872
873 if (enh & L2CAP_CTRL_FRAME_TYPE) {
874 /* S-Frame */
875 control->sframe = 1;
876 control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
877 control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
878
879 control->sar = 0;
880 control->txseq = 0;
881 } else {
882 /* I-Frame */
883 control->sframe = 0;
884 control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
885 control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
886
887 control->poll = 0;
888 control->super = 0;
889 }
890 }
891
892 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
893 {
894 control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
895 control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
896
897 if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
898 /* S-Frame */
899 control->sframe = 1;
900 control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
901 control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
902
903 control->sar = 0;
904 control->txseq = 0;
905 } else {
906 /* I-Frame */
907 control->sframe = 0;
908 control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
909 control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
910
911 control->poll = 0;
912 control->super = 0;
913 }
914 }
915
916 static inline void __unpack_control(struct l2cap_chan *chan,
917 struct sk_buff *skb)
918 {
919 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
920 __unpack_extended_control(get_unaligned_le32(skb->data),
921 &bt_cb(skb)->control);
922 skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
923 } else {
924 __unpack_enhanced_control(get_unaligned_le16(skb->data),
925 &bt_cb(skb)->control);
926 skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
927 }
928 }
929
930 static u32 __pack_extended_control(struct l2cap_ctrl *control)
931 {
932 u32 packed;
933
934 packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
935 packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
936
937 if (control->sframe) {
938 packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
939 packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
940 packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
941 } else {
942 packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
943 packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
944 }
945
946 return packed;
947 }
948
949 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
950 {
951 u16 packed;
952
953 packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
954 packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
955
956 if (control->sframe) {
957 packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
958 packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
959 packed |= L2CAP_CTRL_FRAME_TYPE;
960 } else {
961 packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
962 packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
963 }
964
965 return packed;
966 }
967
968 static inline void __pack_control(struct l2cap_chan *chan,
969 struct l2cap_ctrl *control,
970 struct sk_buff *skb)
971 {
972 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
973 put_unaligned_le32(__pack_extended_control(control),
974 skb->data + L2CAP_HDR_SIZE);
975 } else {
976 put_unaligned_le16(__pack_enhanced_control(control),
977 skb->data + L2CAP_HDR_SIZE);
978 }
979 }
980
981 static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
982 {
983 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
984 return L2CAP_EXT_HDR_SIZE;
985 else
986 return L2CAP_ENH_HDR_SIZE;
987 }
988
989 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
990 u32 control)
991 {
992 struct sk_buff *skb;
993 struct l2cap_hdr *lh;
994 int hlen = __ertm_hdr_size(chan);
995
996 if (chan->fcs == L2CAP_FCS_CRC16)
997 hlen += L2CAP_FCS_SIZE;
998
999 skb = bt_skb_alloc(hlen, GFP_KERNEL);
1000
1001 if (!skb)
1002 return ERR_PTR(-ENOMEM);
1003
1004 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1005 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
1006 lh->cid = cpu_to_le16(chan->dcid);
1007
1008 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1009 put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
1010 else
1011 put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
1012
1013 if (chan->fcs == L2CAP_FCS_CRC16) {
1014 u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
1015 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1016 }
1017
1018 skb->priority = HCI_PRIO_MAX;
1019 return skb;
1020 }
1021
1022 static void l2cap_send_sframe(struct l2cap_chan *chan,
1023 struct l2cap_ctrl *control)
1024 {
1025 struct sk_buff *skb;
1026 u32 control_field;
1027
1028 BT_DBG("chan %p, control %p", chan, control);
1029
1030 if (!control->sframe)
1031 return;
1032
1033 if (__chan_is_moving(chan))
1034 return;
1035
1036 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
1037 !control->poll)
1038 control->final = 1;
1039
1040 if (control->super == L2CAP_SUPER_RR)
1041 clear_bit(CONN_RNR_SENT, &chan->conn_state);
1042 else if (control->super == L2CAP_SUPER_RNR)
1043 set_bit(CONN_RNR_SENT, &chan->conn_state);
1044
1045 if (control->super != L2CAP_SUPER_SREJ) {
1046 chan->last_acked_seq = control->reqseq;
1047 __clear_ack_timer(chan);
1048 }
1049
1050 BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
1051 control->final, control->poll, control->super);
1052
1053 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1054 control_field = __pack_extended_control(control);
1055 else
1056 control_field = __pack_enhanced_control(control);
1057
1058 skb = l2cap_create_sframe_pdu(chan, control_field);
1059 if (!IS_ERR(skb))
1060 l2cap_do_send(chan, skb);
1061 }
1062
1063 static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
1064 {
1065 struct l2cap_ctrl control;
1066
1067 BT_DBG("chan %p, poll %d", chan, poll);
1068
1069 memset(&control, 0, sizeof(control));
1070 control.sframe = 1;
1071 control.poll = poll;
1072
1073 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
1074 control.super = L2CAP_SUPER_RNR;
1075 else
1076 control.super = L2CAP_SUPER_RR;
1077
1078 control.reqseq = chan->buffer_seq;
1079 l2cap_send_sframe(chan, &control);
1080 }
1081
1082 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
1083 {
1084 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
1085 }
1086
1087 static bool __amp_capable(struct l2cap_chan *chan)
1088 {
1089 struct l2cap_conn *conn = chan->conn;
1090 struct hci_dev *hdev;
1091 bool amp_available = false;
1092
1093 if (!conn->hs_enabled)
1094 return false;
1095
1096 if (!(conn->fixed_chan_mask & L2CAP_FC_A2MP))
1097 return false;
1098
1099 read_lock(&hci_dev_list_lock);
1100 list_for_each_entry(hdev, &hci_dev_list, list) {
1101 if (hdev->amp_type != AMP_TYPE_BREDR &&
1102 test_bit(HCI_UP, &hdev->flags)) {
1103 amp_available = true;
1104 break;
1105 }
1106 }
1107 read_unlock(&hci_dev_list_lock);
1108
1109 if (chan->chan_policy == BT_CHANNEL_POLICY_AMP_PREFERRED)
1110 return amp_available;
1111
1112 return false;
1113 }
1114
1115 static bool l2cap_check_efs(struct l2cap_chan *chan)
1116 {
1117 /* Check EFS parameters */
1118 return true;
1119 }
1120
1121 void l2cap_send_conn_req(struct l2cap_chan *chan)
1122 {
1123 struct l2cap_conn *conn = chan->conn;
1124 struct l2cap_conn_req req;
1125
1126 req.scid = cpu_to_le16(chan->scid);
1127 req.psm = chan->psm;
1128
1129 chan->ident = l2cap_get_ident(conn);
1130
1131 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
1132
1133 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
1134 }
1135
1136 static void l2cap_send_create_chan_req(struct l2cap_chan *chan, u8 amp_id)
1137 {
1138 struct l2cap_create_chan_req req;
1139 req.scid = cpu_to_le16(chan->scid);
1140 req.psm = chan->psm;
1141 req.amp_id = amp_id;
1142
1143 chan->ident = l2cap_get_ident(chan->conn);
1144
1145 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_REQ,
1146 sizeof(req), &req);
1147 }
1148
1149 static void l2cap_move_setup(struct l2cap_chan *chan)
1150 {
1151 struct sk_buff *skb;
1152
1153 BT_DBG("chan %p", chan);
1154
1155 if (chan->mode != L2CAP_MODE_ERTM)
1156 return;
1157
1158 __clear_retrans_timer(chan);
1159 __clear_monitor_timer(chan);
1160 __clear_ack_timer(chan);
1161
1162 chan->retry_count = 0;
1163 skb_queue_walk(&chan->tx_q, skb) {
1164 if (bt_cb(skb)->control.retries)
1165 bt_cb(skb)->control.retries = 1;
1166 else
1167 break;
1168 }
1169
1170 chan->expected_tx_seq = chan->buffer_seq;
1171
1172 clear_bit(CONN_REJ_ACT, &chan->conn_state);
1173 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
1174 l2cap_seq_list_clear(&chan->retrans_list);
1175 l2cap_seq_list_clear(&chan->srej_list);
1176 skb_queue_purge(&chan->srej_q);
1177
1178 chan->tx_state = L2CAP_TX_STATE_XMIT;
1179 chan->rx_state = L2CAP_RX_STATE_MOVE;
1180
1181 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
1182 }
1183
1184 static void l2cap_move_done(struct l2cap_chan *chan)
1185 {
1186 u8 move_role = chan->move_role;
1187 BT_DBG("chan %p", chan);
1188
1189 chan->move_state = L2CAP_MOVE_STABLE;
1190 chan->move_role = L2CAP_MOVE_ROLE_NONE;
1191
1192 if (chan->mode != L2CAP_MODE_ERTM)
1193 return;
1194
1195 switch (move_role) {
1196 case L2CAP_MOVE_ROLE_INITIATOR:
1197 l2cap_tx(chan, NULL, NULL, L2CAP_EV_EXPLICIT_POLL);
1198 chan->rx_state = L2CAP_RX_STATE_WAIT_F;
1199 break;
1200 case L2CAP_MOVE_ROLE_RESPONDER:
1201 chan->rx_state = L2CAP_RX_STATE_WAIT_P;
1202 break;
1203 }
1204 }
1205
1206 static void l2cap_chan_ready(struct l2cap_chan *chan)
1207 {
1208 /* This clears all conf flags, including CONF_NOT_COMPLETE */
1209 chan->conf_state = 0;
1210 __clear_chan_timer(chan);
1211
1212 if (chan->mode == L2CAP_MODE_LE_FLOWCTL && !chan->tx_credits)
1213 chan->ops->suspend(chan);
1214
1215 chan->state = BT_CONNECTED;
1216
1217 chan->ops->ready(chan);
1218 }
1219
1220 static void l2cap_le_connect(struct l2cap_chan *chan)
1221 {
1222 struct l2cap_conn *conn = chan->conn;
1223 struct l2cap_le_conn_req req;
1224
1225 if (test_and_set_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags))
1226 return;
1227
1228 req.psm = chan->psm;
1229 req.scid = cpu_to_le16(chan->scid);
1230 req.mtu = cpu_to_le16(chan->imtu);
1231 req.mps = cpu_to_le16(chan->mps);
1232 req.credits = cpu_to_le16(chan->rx_credits);
1233
1234 chan->ident = l2cap_get_ident(conn);
1235
1236 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_REQ,
1237 sizeof(req), &req);
1238 }
1239
1240 static void l2cap_le_start(struct l2cap_chan *chan)
1241 {
1242 struct l2cap_conn *conn = chan->conn;
1243
1244 if (!smp_conn_security(conn->hcon, chan->sec_level))
1245 return;
1246
1247 if (!chan->psm) {
1248 l2cap_chan_ready(chan);
1249 return;
1250 }
1251
1252 if (chan->state == BT_CONNECT)
1253 l2cap_le_connect(chan);
1254 }
1255
1256 static void l2cap_start_connection(struct l2cap_chan *chan)
1257 {
1258 if (__amp_capable(chan)) {
1259 BT_DBG("chan %p AMP capable: discover AMPs", chan);
1260 a2mp_discover_amp(chan);
1261 } else if (chan->conn->hcon->type == LE_LINK) {
1262 l2cap_le_start(chan);
1263 } else {
1264 l2cap_send_conn_req(chan);
1265 }
1266 }
1267
1268 static void l2cap_do_start(struct l2cap_chan *chan)
1269 {
1270 struct l2cap_conn *conn = chan->conn;
1271
1272 if (conn->hcon->type == LE_LINK) {
1273 l2cap_le_start(chan);
1274 return;
1275 }
1276
1277 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
1278 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
1279 return;
1280
1281 if (l2cap_chan_check_security(chan) &&
1282 __l2cap_no_conn_pending(chan)) {
1283 l2cap_start_connection(chan);
1284 }
1285 } else {
1286 struct l2cap_info_req req;
1287 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
1288
1289 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1290 conn->info_ident = l2cap_get_ident(conn);
1291
1292 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1293
1294 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
1295 sizeof(req), &req);
1296 }
1297 }
1298
1299 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1300 {
1301 u32 local_feat_mask = l2cap_feat_mask;
1302 if (!disable_ertm)
1303 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1304
1305 switch (mode) {
1306 case L2CAP_MODE_ERTM:
1307 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1308 case L2CAP_MODE_STREAMING:
1309 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1310 default:
1311 return 0x00;
1312 }
1313 }
1314
1315 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err)
1316 {
1317 struct l2cap_conn *conn = chan->conn;
1318 struct l2cap_disconn_req req;
1319
1320 if (!conn)
1321 return;
1322
1323 if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) {
1324 __clear_retrans_timer(chan);
1325 __clear_monitor_timer(chan);
1326 __clear_ack_timer(chan);
1327 }
1328
1329 if (chan->scid == L2CAP_CID_A2MP) {
1330 l2cap_state_change(chan, BT_DISCONN);
1331 return;
1332 }
1333
1334 req.dcid = cpu_to_le16(chan->dcid);
1335 req.scid = cpu_to_le16(chan->scid);
1336 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
1337 sizeof(req), &req);
1338
1339 l2cap_state_change_and_error(chan, BT_DISCONN, err);
1340 }
1341
1342 /* ---- L2CAP connections ---- */
1343 static void l2cap_conn_start(struct l2cap_conn *conn)
1344 {
1345 struct l2cap_chan *chan, *tmp;
1346
1347 BT_DBG("conn %p", conn);
1348
1349 mutex_lock(&conn->chan_lock);
1350
1351 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1352 l2cap_chan_lock(chan);
1353
1354 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1355 l2cap_chan_unlock(chan);
1356 continue;
1357 }
1358
1359 if (chan->state == BT_CONNECT) {
1360 if (!l2cap_chan_check_security(chan) ||
1361 !__l2cap_no_conn_pending(chan)) {
1362 l2cap_chan_unlock(chan);
1363 continue;
1364 }
1365
1366 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1367 && test_bit(CONF_STATE2_DEVICE,
1368 &chan->conf_state)) {
1369 l2cap_chan_close(chan, ECONNRESET);
1370 l2cap_chan_unlock(chan);
1371 continue;
1372 }
1373
1374 l2cap_start_connection(chan);
1375
1376 } else if (chan->state == BT_CONNECT2) {
1377 struct l2cap_conn_rsp rsp;
1378 char buf[128];
1379 rsp.scid = cpu_to_le16(chan->dcid);
1380 rsp.dcid = cpu_to_le16(chan->scid);
1381
1382 if (l2cap_chan_check_security(chan)) {
1383 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
1384 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1385 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1386 chan->ops->defer(chan);
1387
1388 } else {
1389 l2cap_state_change(chan, BT_CONFIG);
1390 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1391 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1392 }
1393 } else {
1394 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1395 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1396 }
1397
1398 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1399 sizeof(rsp), &rsp);
1400
1401 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1402 rsp.result != L2CAP_CR_SUCCESS) {
1403 l2cap_chan_unlock(chan);
1404 continue;
1405 }
1406
1407 set_bit(CONF_REQ_SENT, &chan->conf_state);
1408 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1409 l2cap_build_conf_req(chan, buf), buf);
1410 chan->num_conf_req++;
1411 }
1412
1413 l2cap_chan_unlock(chan);
1414 }
1415
1416 mutex_unlock(&conn->chan_lock);
1417 }
1418
1419 /* Find socket with cid and source/destination bdaddr.
1420 * Returns closest match, locked.
1421 */
1422 static struct l2cap_chan *l2cap_global_chan_by_scid(int state, u16 cid,
1423 bdaddr_t *src,
1424 bdaddr_t *dst)
1425 {
1426 struct l2cap_chan *c, *c1 = NULL;
1427
1428 read_lock(&chan_list_lock);
1429
1430 list_for_each_entry(c, &chan_list, global_l) {
1431 if (state && c->state != state)
1432 continue;
1433
1434 if (c->scid == cid) {
1435 int src_match, dst_match;
1436 int src_any, dst_any;
1437
1438 /* Exact match. */
1439 src_match = !bacmp(&c->src, src);
1440 dst_match = !bacmp(&c->dst, dst);
1441 if (src_match && dst_match) {
1442 read_unlock(&chan_list_lock);
1443 return c;
1444 }
1445
1446 /* Closest match */
1447 src_any = !bacmp(&c->src, BDADDR_ANY);
1448 dst_any = !bacmp(&c->dst, BDADDR_ANY);
1449 if ((src_match && dst_any) || (src_any && dst_match) ||
1450 (src_any && dst_any))
1451 c1 = c;
1452 }
1453 }
1454
1455 read_unlock(&chan_list_lock);
1456
1457 return c1;
1458 }
1459
1460 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1461 {
1462 struct hci_conn *hcon = conn->hcon;
1463 struct l2cap_chan *chan, *pchan;
1464 u8 dst_type;
1465
1466 BT_DBG("");
1467
1468 /* Check if we have socket listening on cid */
1469 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_ATT,
1470 &hcon->src, &hcon->dst);
1471 if (!pchan)
1472 return;
1473
1474 /* Client ATT sockets should override the server one */
1475 if (__l2cap_get_chan_by_dcid(conn, L2CAP_CID_ATT))
1476 return;
1477
1478 dst_type = bdaddr_type(hcon, hcon->dst_type);
1479
1480 /* If device is blocked, do not create a channel for it */
1481 if (hci_blacklist_lookup(hcon->hdev, &hcon->dst, dst_type))
1482 return;
1483
1484 /* For LE slave connections, make sure the connection interval
1485 * is in the range of the minium and maximum interval that has
1486 * been configured for this connection. If not, then trigger
1487 * the connection update procedure.
1488 */
1489 if (!test_bit(HCI_CONN_MASTER, &hcon->flags) &&
1490 (hcon->le_conn_interval < hcon->le_conn_min_interval ||
1491 hcon->le_conn_interval > hcon->le_conn_max_interval)) {
1492 struct l2cap_conn_param_update_req req;
1493
1494 req.min = cpu_to_le16(hcon->le_conn_min_interval);
1495 req.max = cpu_to_le16(hcon->le_conn_max_interval);
1496 req.latency = cpu_to_le16(hcon->le_conn_latency);
1497 req.to_multiplier = cpu_to_le16(hcon->le_supv_timeout);
1498
1499 l2cap_send_cmd(conn, l2cap_get_ident(conn),
1500 L2CAP_CONN_PARAM_UPDATE_REQ, sizeof(req), &req);
1501 }
1502
1503 l2cap_chan_lock(pchan);
1504
1505 chan = pchan->ops->new_connection(pchan);
1506 if (!chan)
1507 goto clean;
1508
1509 bacpy(&chan->src, &hcon->src);
1510 bacpy(&chan->dst, &hcon->dst);
1511 chan->src_type = bdaddr_type(hcon, hcon->src_type);
1512 chan->dst_type = dst_type;
1513
1514 __l2cap_chan_add(conn, chan);
1515
1516 clean:
1517 l2cap_chan_unlock(pchan);
1518 }
1519
1520 static void l2cap_conn_ready(struct l2cap_conn *conn)
1521 {
1522 struct l2cap_chan *chan;
1523 struct hci_conn *hcon = conn->hcon;
1524
1525 BT_DBG("conn %p", conn);
1526
1527 /* For outgoing pairing which doesn't necessarily have an
1528 * associated socket (e.g. mgmt_pair_device).
1529 */
1530 if (hcon->out && hcon->type == LE_LINK)
1531 smp_conn_security(hcon, hcon->pending_sec_level);
1532
1533 mutex_lock(&conn->chan_lock);
1534
1535 if (hcon->type == LE_LINK)
1536 l2cap_le_conn_ready(conn);
1537
1538 list_for_each_entry(chan, &conn->chan_l, list) {
1539
1540 l2cap_chan_lock(chan);
1541
1542 if (chan->scid == L2CAP_CID_A2MP) {
1543 l2cap_chan_unlock(chan);
1544 continue;
1545 }
1546
1547 if (hcon->type == LE_LINK) {
1548 l2cap_le_start(chan);
1549 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1550 l2cap_chan_ready(chan);
1551
1552 } else if (chan->state == BT_CONNECT) {
1553 l2cap_do_start(chan);
1554 }
1555
1556 l2cap_chan_unlock(chan);
1557 }
1558
1559 mutex_unlock(&conn->chan_lock);
1560
1561 queue_work(hcon->hdev->workqueue, &conn->pending_rx_work);
1562 }
1563
1564 /* Notify sockets that we cannot guaranty reliability anymore */
1565 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1566 {
1567 struct l2cap_chan *chan;
1568
1569 BT_DBG("conn %p", conn);
1570
1571 mutex_lock(&conn->chan_lock);
1572
1573 list_for_each_entry(chan, &conn->chan_l, list) {
1574 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1575 l2cap_chan_set_err(chan, err);
1576 }
1577
1578 mutex_unlock(&conn->chan_lock);
1579 }
1580
1581 static void l2cap_info_timeout(struct work_struct *work)
1582 {
1583 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1584 info_timer.work);
1585
1586 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1587 conn->info_ident = 0;
1588
1589 l2cap_conn_start(conn);
1590 }
1591
1592 /*
1593 * l2cap_user
1594 * External modules can register l2cap_user objects on l2cap_conn. The ->probe
1595 * callback is called during registration. The ->remove callback is called
1596 * during unregistration.
1597 * An l2cap_user object can either be explicitly unregistered or when the
1598 * underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon,
1599 * l2cap->hchan, .. are valid as long as the remove callback hasn't been called.
1600 * External modules must own a reference to the l2cap_conn object if they intend
1601 * to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at
1602 * any time if they don't.
1603 */
1604
1605 int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user)
1606 {
1607 struct hci_dev *hdev = conn->hcon->hdev;
1608 int ret;
1609
1610 /* We need to check whether l2cap_conn is registered. If it is not, we
1611 * must not register the l2cap_user. l2cap_conn_del() is unregisters
1612 * l2cap_conn objects, but doesn't provide its own locking. Instead, it
1613 * relies on the parent hci_conn object to be locked. This itself relies
1614 * on the hci_dev object to be locked. So we must lock the hci device
1615 * here, too. */
1616
1617 hci_dev_lock(hdev);
1618
1619 if (user->list.next || user->list.prev) {
1620 ret = -EINVAL;
1621 goto out_unlock;
1622 }
1623
1624 /* conn->hchan is NULL after l2cap_conn_del() was called */
1625 if (!conn->hchan) {
1626 ret = -ENODEV;
1627 goto out_unlock;
1628 }
1629
1630 ret = user->probe(conn, user);
1631 if (ret)
1632 goto out_unlock;
1633
1634 list_add(&user->list, &conn->users);
1635 ret = 0;
1636
1637 out_unlock:
1638 hci_dev_unlock(hdev);
1639 return ret;
1640 }
1641 EXPORT_SYMBOL(l2cap_register_user);
1642
1643 void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user)
1644 {
1645 struct hci_dev *hdev = conn->hcon->hdev;
1646
1647 hci_dev_lock(hdev);
1648
1649 if (!user->list.next || !user->list.prev)
1650 goto out_unlock;
1651
1652 list_del(&user->list);
1653 user->list.next = NULL;
1654 user->list.prev = NULL;
1655 user->remove(conn, user);
1656
1657 out_unlock:
1658 hci_dev_unlock(hdev);
1659 }
1660 EXPORT_SYMBOL(l2cap_unregister_user);
1661
1662 static void l2cap_unregister_all_users(struct l2cap_conn *conn)
1663 {
1664 struct l2cap_user *user;
1665
1666 while (!list_empty(&conn->users)) {
1667 user = list_first_entry(&conn->users, struct l2cap_user, list);
1668 list_del(&user->list);
1669 user->list.next = NULL;
1670 user->list.prev = NULL;
1671 user->remove(conn, user);
1672 }
1673 }
1674
1675 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1676 {
1677 struct l2cap_conn *conn = hcon->l2cap_data;
1678 struct l2cap_chan *chan, *l;
1679
1680 if (!conn)
1681 return;
1682
1683 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1684
1685 kfree_skb(conn->rx_skb);
1686
1687 skb_queue_purge(&conn->pending_rx);
1688
1689 /* We can not call flush_work(&conn->pending_rx_work) here since we
1690 * might block if we are running on a worker from the same workqueue
1691 * pending_rx_work is waiting on.
1692 */
1693 if (work_pending(&conn->pending_rx_work))
1694 cancel_work_sync(&conn->pending_rx_work);
1695
1696 l2cap_unregister_all_users(conn);
1697
1698 mutex_lock(&conn->chan_lock);
1699
1700 /* Kill channels */
1701 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1702 l2cap_chan_hold(chan);
1703 l2cap_chan_lock(chan);
1704
1705 l2cap_chan_del(chan, err);
1706
1707 l2cap_chan_unlock(chan);
1708
1709 chan->ops->close(chan);
1710 l2cap_chan_put(chan);
1711 }
1712
1713 mutex_unlock(&conn->chan_lock);
1714
1715 hci_chan_del(conn->hchan);
1716
1717 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1718 cancel_delayed_work_sync(&conn->info_timer);
1719
1720 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) {
1721 cancel_delayed_work_sync(&conn->security_timer);
1722 smp_chan_destroy(conn);
1723 }
1724
1725 hcon->l2cap_data = NULL;
1726 conn->hchan = NULL;
1727 l2cap_conn_put(conn);
1728 }
1729
1730 static void security_timeout(struct work_struct *work)
1731 {
1732 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1733 security_timer.work);
1734
1735 BT_DBG("conn %p", conn);
1736
1737 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags)) {
1738 smp_chan_destroy(conn);
1739 l2cap_conn_del(conn->hcon, ETIMEDOUT);
1740 }
1741 }
1742
1743 static void l2cap_conn_free(struct kref *ref)
1744 {
1745 struct l2cap_conn *conn = container_of(ref, struct l2cap_conn, ref);
1746
1747 hci_conn_put(conn->hcon);
1748 kfree(conn);
1749 }
1750
1751 void l2cap_conn_get(struct l2cap_conn *conn)
1752 {
1753 kref_get(&conn->ref);
1754 }
1755 EXPORT_SYMBOL(l2cap_conn_get);
1756
1757 void l2cap_conn_put(struct l2cap_conn *conn)
1758 {
1759 kref_put(&conn->ref, l2cap_conn_free);
1760 }
1761 EXPORT_SYMBOL(l2cap_conn_put);
1762
1763 /* ---- Socket interface ---- */
1764
1765 /* Find socket with psm and source / destination bdaddr.
1766 * Returns closest match.
1767 */
1768 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1769 bdaddr_t *src,
1770 bdaddr_t *dst,
1771 u8 link_type)
1772 {
1773 struct l2cap_chan *c, *c1 = NULL;
1774
1775 read_lock(&chan_list_lock);
1776
1777 list_for_each_entry(c, &chan_list, global_l) {
1778 if (state && c->state != state)
1779 continue;
1780
1781 if (link_type == ACL_LINK && c->src_type != BDADDR_BREDR)
1782 continue;
1783
1784 if (link_type == LE_LINK && c->src_type == BDADDR_BREDR)
1785 continue;
1786
1787 if (c->psm == psm) {
1788 int src_match, dst_match;
1789 int src_any, dst_any;
1790
1791 /* Exact match. */
1792 src_match = !bacmp(&c->src, src);
1793 dst_match = !bacmp(&c->dst, dst);
1794 if (src_match && dst_match) {
1795 read_unlock(&chan_list_lock);
1796 return c;
1797 }
1798
1799 /* Closest match */
1800 src_any = !bacmp(&c->src, BDADDR_ANY);
1801 dst_any = !bacmp(&c->dst, BDADDR_ANY);
1802 if ((src_match && dst_any) || (src_any && dst_match) ||
1803 (src_any && dst_any))
1804 c1 = c;
1805 }
1806 }
1807
1808 read_unlock(&chan_list_lock);
1809
1810 return c1;
1811 }
1812
1813 static void l2cap_monitor_timeout(struct work_struct *work)
1814 {
1815 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1816 monitor_timer.work);
1817
1818 BT_DBG("chan %p", chan);
1819
1820 l2cap_chan_lock(chan);
1821
1822 if (!chan->conn) {
1823 l2cap_chan_unlock(chan);
1824 l2cap_chan_put(chan);
1825 return;
1826 }
1827
1828 l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
1829
1830 l2cap_chan_unlock(chan);
1831 l2cap_chan_put(chan);
1832 }
1833
1834 static void l2cap_retrans_timeout(struct work_struct *work)
1835 {
1836 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1837 retrans_timer.work);
1838
1839 BT_DBG("chan %p", chan);
1840
1841 l2cap_chan_lock(chan);
1842
1843 if (!chan->conn) {
1844 l2cap_chan_unlock(chan);
1845 l2cap_chan_put(chan);
1846 return;
1847 }
1848
1849 l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
1850 l2cap_chan_unlock(chan);
1851 l2cap_chan_put(chan);
1852 }
1853
1854 static void l2cap_streaming_send(struct l2cap_chan *chan,
1855 struct sk_buff_head *skbs)
1856 {
1857 struct sk_buff *skb;
1858 struct l2cap_ctrl *control;
1859
1860 BT_DBG("chan %p, skbs %p", chan, skbs);
1861
1862 if (__chan_is_moving(chan))
1863 return;
1864
1865 skb_queue_splice_tail_init(skbs, &chan->tx_q);
1866
1867 while (!skb_queue_empty(&chan->tx_q)) {
1868
1869 skb = skb_dequeue(&chan->tx_q);
1870
1871 bt_cb(skb)->control.retries = 1;
1872 control = &bt_cb(skb)->control;
1873
1874 control->reqseq = 0;
1875 control->txseq = chan->next_tx_seq;
1876
1877 __pack_control(chan, control, skb);
1878
1879 if (chan->fcs == L2CAP_FCS_CRC16) {
1880 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1881 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1882 }
1883
1884 l2cap_do_send(chan, skb);
1885
1886 BT_DBG("Sent txseq %u", control->txseq);
1887
1888 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1889 chan->frames_sent++;
1890 }
1891 }
1892
1893 static int l2cap_ertm_send(struct l2cap_chan *chan)
1894 {
1895 struct sk_buff *skb, *tx_skb;
1896 struct l2cap_ctrl *control;
1897 int sent = 0;
1898
1899 BT_DBG("chan %p", chan);
1900
1901 if (chan->state != BT_CONNECTED)
1902 return -ENOTCONN;
1903
1904 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1905 return 0;
1906
1907 if (__chan_is_moving(chan))
1908 return 0;
1909
1910 while (chan->tx_send_head &&
1911 chan->unacked_frames < chan->remote_tx_win &&
1912 chan->tx_state == L2CAP_TX_STATE_XMIT) {
1913
1914 skb = chan->tx_send_head;
1915
1916 bt_cb(skb)->control.retries = 1;
1917 control = &bt_cb(skb)->control;
1918
1919 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1920 control->final = 1;
1921
1922 control->reqseq = chan->buffer_seq;
1923 chan->last_acked_seq = chan->buffer_seq;
1924 control->txseq = chan->next_tx_seq;
1925
1926 __pack_control(chan, control, skb);
1927
1928 if (chan->fcs == L2CAP_FCS_CRC16) {
1929 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1930 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1931 }
1932
1933 /* Clone after data has been modified. Data is assumed to be
1934 read-only (for locking purposes) on cloned sk_buffs.
1935 */
1936 tx_skb = skb_clone(skb, GFP_KERNEL);
1937
1938 if (!tx_skb)
1939 break;
1940
1941 __set_retrans_timer(chan);
1942
1943 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1944 chan->unacked_frames++;
1945 chan->frames_sent++;
1946 sent++;
1947
1948 if (skb_queue_is_last(&chan->tx_q, skb))
1949 chan->tx_send_head = NULL;
1950 else
1951 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1952
1953 l2cap_do_send(chan, tx_skb);
1954 BT_DBG("Sent txseq %u", control->txseq);
1955 }
1956
1957 BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
1958 chan->unacked_frames, skb_queue_len(&chan->tx_q));
1959
1960 return sent;
1961 }
1962
1963 static void l2cap_ertm_resend(struct l2cap_chan *chan)
1964 {
1965 struct l2cap_ctrl control;
1966 struct sk_buff *skb;
1967 struct sk_buff *tx_skb;
1968 u16 seq;
1969
1970 BT_DBG("chan %p", chan);
1971
1972 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1973 return;
1974
1975 if (__chan_is_moving(chan))
1976 return;
1977
1978 while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
1979 seq = l2cap_seq_list_pop(&chan->retrans_list);
1980
1981 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
1982 if (!skb) {
1983 BT_DBG("Error: Can't retransmit seq %d, frame missing",
1984 seq);
1985 continue;
1986 }
1987
1988 bt_cb(skb)->control.retries++;
1989 control = bt_cb(skb)->control;
1990
1991 if (chan->max_tx != 0 &&
1992 bt_cb(skb)->control.retries > chan->max_tx) {
1993 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
1994 l2cap_send_disconn_req(chan, ECONNRESET);
1995 l2cap_seq_list_clear(&chan->retrans_list);
1996 break;
1997 }
1998
1999 control.reqseq = chan->buffer_seq;
2000 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
2001 control.final = 1;
2002 else
2003 control.final = 0;
2004
2005 if (skb_cloned(skb)) {
2006 /* Cloned sk_buffs are read-only, so we need a
2007 * writeable copy
2008 */
2009 tx_skb = skb_copy(skb, GFP_KERNEL);
2010 } else {
2011 tx_skb = skb_clone(skb, GFP_KERNEL);
2012 }
2013
2014 if (!tx_skb) {
2015 l2cap_seq_list_clear(&chan->retrans_list);
2016 break;
2017 }
2018
2019 /* Update skb contents */
2020 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
2021 put_unaligned_le32(__pack_extended_control(&control),
2022 tx_skb->data + L2CAP_HDR_SIZE);
2023 } else {
2024 put_unaligned_le16(__pack_enhanced_control(&control),
2025 tx_skb->data + L2CAP_HDR_SIZE);
2026 }
2027
2028 if (chan->fcs == L2CAP_FCS_CRC16) {
2029 u16 fcs = crc16(0, (u8 *) tx_skb->data, tx_skb->len);
2030 put_unaligned_le16(fcs, skb_put(tx_skb,
2031 L2CAP_FCS_SIZE));
2032 }
2033
2034 l2cap_do_send(chan, tx_skb);
2035
2036 BT_DBG("Resent txseq %d", control.txseq);
2037
2038 chan->last_acked_seq = chan->buffer_seq;
2039 }
2040 }
2041
2042 static void l2cap_retransmit(struct l2cap_chan *chan,
2043 struct l2cap_ctrl *control)
2044 {
2045 BT_DBG("chan %p, control %p", chan, control);
2046
2047 l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
2048 l2cap_ertm_resend(chan);
2049 }
2050
2051 static void l2cap_retransmit_all(struct l2cap_chan *chan,
2052 struct l2cap_ctrl *control)
2053 {
2054 struct sk_buff *skb;
2055
2056 BT_DBG("chan %p, control %p", chan, control);
2057
2058 if (control->poll)
2059 set_bit(CONN_SEND_FBIT, &chan->conn_state);
2060
2061 l2cap_seq_list_clear(&chan->retrans_list);
2062
2063 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2064 return;
2065
2066 if (chan->unacked_frames) {
2067 skb_queue_walk(&chan->tx_q, skb) {
2068 if (bt_cb(skb)->control.txseq == control->reqseq ||
2069 skb == chan->tx_send_head)
2070 break;
2071 }
2072
2073 skb_queue_walk_from(&chan->tx_q, skb) {
2074 if (skb == chan->tx_send_head)
2075 break;
2076
2077 l2cap_seq_list_append(&chan->retrans_list,
2078 bt_cb(skb)->control.txseq);
2079 }
2080
2081 l2cap_ertm_resend(chan);
2082 }
2083 }
2084
2085 static void l2cap_send_ack(struct l2cap_chan *chan)
2086 {
2087 struct l2cap_ctrl control;
2088 u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2089 chan->last_acked_seq);
2090 int threshold;
2091
2092 BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2093 chan, chan->last_acked_seq, chan->buffer_seq);
2094
2095 memset(&control, 0, sizeof(control));
2096 control.sframe = 1;
2097
2098 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
2099 chan->rx_state == L2CAP_RX_STATE_RECV) {
2100 __clear_ack_timer(chan);
2101 control.super = L2CAP_SUPER_RNR;
2102 control.reqseq = chan->buffer_seq;
2103 l2cap_send_sframe(chan, &control);
2104 } else {
2105 if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
2106 l2cap_ertm_send(chan);
2107 /* If any i-frames were sent, they included an ack */
2108 if (chan->buffer_seq == chan->last_acked_seq)
2109 frames_to_ack = 0;
2110 }
2111
2112 /* Ack now if the window is 3/4ths full.
2113 * Calculate without mul or div
2114 */
2115 threshold = chan->ack_win;
2116 threshold += threshold << 1;
2117 threshold >>= 2;
2118
2119 BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
2120 threshold);
2121
2122 if (frames_to_ack >= threshold) {
2123 __clear_ack_timer(chan);
2124 control.super = L2CAP_SUPER_RR;
2125 control.reqseq = chan->buffer_seq;
2126 l2cap_send_sframe(chan, &control);
2127 frames_to_ack = 0;
2128 }
2129
2130 if (frames_to_ack)
2131 __set_ack_timer(chan);
2132 }
2133 }
2134
2135 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
2136 struct msghdr *msg, int len,
2137 int count, struct sk_buff *skb)
2138 {
2139 struct l2cap_conn *conn = chan->conn;
2140 struct sk_buff **frag;
2141 int sent = 0;
2142
2143 if (chan->ops->memcpy_fromiovec(chan, skb_put(skb, count),
2144 msg->msg_iov, count))
2145 return -EFAULT;
2146
2147 sent += count;
2148 len -= count;
2149
2150 /* Continuation fragments (no L2CAP header) */
2151 frag = &skb_shinfo(skb)->frag_list;
2152 while (len) {
2153 struct sk_buff *tmp;
2154
2155 count = min_t(unsigned int, conn->mtu, len);
2156
2157 tmp = chan->ops->alloc_skb(chan, 0, count,
2158 msg->msg_flags & MSG_DONTWAIT);
2159 if (IS_ERR(tmp))
2160 return PTR_ERR(tmp);
2161
2162 *frag = tmp;
2163
2164 if (chan->ops->memcpy_fromiovec(chan, skb_put(*frag, count),
2165 msg->msg_iov, count))
2166 return -EFAULT;
2167
2168 sent += count;
2169 len -= count;
2170
2171 skb->len += (*frag)->len;
2172 skb->data_len += (*frag)->len;
2173
2174 frag = &(*frag)->next;
2175 }
2176
2177 return sent;
2178 }
2179
2180 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
2181 struct msghdr *msg, size_t len)
2182 {
2183 struct l2cap_conn *conn = chan->conn;
2184 struct sk_buff *skb;
2185 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
2186 struct l2cap_hdr *lh;
2187
2188 BT_DBG("chan %p psm 0x%2.2x len %zu", chan,
2189 __le16_to_cpu(chan->psm), len);
2190
2191 count = min_t(unsigned int, (conn->mtu - hlen), len);
2192
2193 skb = chan->ops->alloc_skb(chan, hlen, count,
2194 msg->msg_flags & MSG_DONTWAIT);
2195 if (IS_ERR(skb))
2196 return skb;
2197
2198 /* Create L2CAP header */
2199 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2200 lh->cid = cpu_to_le16(chan->dcid);
2201 lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
2202 put_unaligned(chan->psm, (__le16 *) skb_put(skb, L2CAP_PSMLEN_SIZE));
2203
2204 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2205 if (unlikely(err < 0)) {
2206 kfree_skb(skb);
2207 return ERR_PTR(err);
2208 }
2209 return skb;
2210 }
2211
2212 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2213 struct msghdr *msg, size_t len)
2214 {
2215 struct l2cap_conn *conn = chan->conn;
2216 struct sk_buff *skb;
2217 int err, count;
2218 struct l2cap_hdr *lh;
2219
2220 BT_DBG("chan %p len %zu", chan, len);
2221
2222 count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2223
2224 skb = chan->ops->alloc_skb(chan, L2CAP_HDR_SIZE, count,
2225 msg->msg_flags & MSG_DONTWAIT);
2226 if (IS_ERR(skb))
2227 return skb;
2228
2229 /* Create L2CAP header */
2230 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2231 lh->cid = cpu_to_le16(chan->dcid);
2232 lh->len = cpu_to_le16(len);
2233
2234 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2235 if (unlikely(err < 0)) {
2236 kfree_skb(skb);
2237 return ERR_PTR(err);
2238 }
2239 return skb;
2240 }
2241
2242 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2243 struct msghdr *msg, size_t len,
2244 u16 sdulen)
2245 {
2246 struct l2cap_conn *conn = chan->conn;
2247 struct sk_buff *skb;
2248 int err, count, hlen;
2249 struct l2cap_hdr *lh;
2250
2251 BT_DBG("chan %p len %zu", chan, len);
2252
2253 if (!conn)
2254 return ERR_PTR(-ENOTCONN);
2255
2256 hlen = __ertm_hdr_size(chan);
2257
2258 if (sdulen)
2259 hlen += L2CAP_SDULEN_SIZE;
2260
2261 if (chan->fcs == L2CAP_FCS_CRC16)
2262 hlen += L2CAP_FCS_SIZE;
2263
2264 count = min_t(unsigned int, (conn->mtu - hlen), len);
2265
2266 skb = chan->ops->alloc_skb(chan, hlen, count,
2267 msg->msg_flags & MSG_DONTWAIT);
2268 if (IS_ERR(skb))
2269 return skb;
2270
2271 /* Create L2CAP header */
2272 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2273 lh->cid = cpu_to_le16(chan->dcid);
2274 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2275
2276 /* Control header is populated later */
2277 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2278 put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2279 else
2280 put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2281
2282 if (sdulen)
2283 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2284
2285 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2286 if (unlikely(err < 0)) {
2287 kfree_skb(skb);
2288 return ERR_PTR(err);
2289 }
2290
2291 bt_cb(skb)->control.fcs = chan->fcs;
2292 bt_cb(skb)->control.retries = 0;
2293 return skb;
2294 }
2295
2296 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2297 struct sk_buff_head *seg_queue,
2298 struct msghdr *msg, size_t len)
2299 {
2300 struct sk_buff *skb;
2301 u16 sdu_len;
2302 size_t pdu_len;
2303 u8 sar;
2304
2305 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2306
2307 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2308 * so fragmented skbs are not used. The HCI layer's handling
2309 * of fragmented skbs is not compatible with ERTM's queueing.
2310 */
2311
2312 /* PDU size is derived from the HCI MTU */
2313 pdu_len = chan->conn->mtu;
2314
2315 /* Constrain PDU size for BR/EDR connections */
2316 if (!chan->hs_hcon)
2317 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2318
2319 /* Adjust for largest possible L2CAP overhead. */
2320 if (chan->fcs)
2321 pdu_len -= L2CAP_FCS_SIZE;
2322
2323 pdu_len -= __ertm_hdr_size(chan);
2324
2325 /* Remote device may have requested smaller PDUs */
2326 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2327
2328 if (len <= pdu_len) {
2329 sar = L2CAP_SAR_UNSEGMENTED;
2330 sdu_len = 0;
2331 pdu_len = len;
2332 } else {
2333 sar = L2CAP_SAR_START;
2334 sdu_len = len;
2335 pdu_len -= L2CAP_SDULEN_SIZE;
2336 }
2337
2338 while (len > 0) {
2339 skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2340
2341 if (IS_ERR(skb)) {
2342 __skb_queue_purge(seg_queue);
2343 return PTR_ERR(skb);
2344 }
2345
2346 bt_cb(skb)->control.sar = sar;
2347 __skb_queue_tail(seg_queue, skb);
2348
2349 len -= pdu_len;
2350 if (sdu_len) {
2351 sdu_len = 0;
2352 pdu_len += L2CAP_SDULEN_SIZE;
2353 }
2354
2355 if (len <= pdu_len) {
2356 sar = L2CAP_SAR_END;
2357 pdu_len = len;
2358 } else {
2359 sar = L2CAP_SAR_CONTINUE;
2360 }
2361 }
2362
2363 return 0;
2364 }
2365
2366 static struct sk_buff *l2cap_create_le_flowctl_pdu(struct l2cap_chan *chan,
2367 struct msghdr *msg,
2368 size_t len, u16 sdulen)
2369 {
2370 struct l2cap_conn *conn = chan->conn;
2371 struct sk_buff *skb;
2372 int err, count, hlen;
2373 struct l2cap_hdr *lh;
2374
2375 BT_DBG("chan %p len %zu", chan, len);
2376
2377 if (!conn)
2378 return ERR_PTR(-ENOTCONN);
2379
2380 hlen = L2CAP_HDR_SIZE;
2381
2382 if (sdulen)
2383 hlen += L2CAP_SDULEN_SIZE;
2384
2385 count = min_t(unsigned int, (conn->mtu - hlen), len);
2386
2387 skb = chan->ops->alloc_skb(chan, hlen, count,
2388 msg->msg_flags & MSG_DONTWAIT);
2389 if (IS_ERR(skb))
2390 return skb;
2391
2392 /* Create L2CAP header */
2393 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2394 lh->cid = cpu_to_le16(chan->dcid);
2395 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2396
2397 if (sdulen)
2398 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2399
2400 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2401 if (unlikely(err < 0)) {
2402 kfree_skb(skb);
2403 return ERR_PTR(err);
2404 }
2405
2406 return skb;
2407 }
2408
2409 static int l2cap_segment_le_sdu(struct l2cap_chan *chan,
2410 struct sk_buff_head *seg_queue,
2411 struct msghdr *msg, size_t len)
2412 {
2413 struct sk_buff *skb;
2414 size_t pdu_len;
2415 u16 sdu_len;
2416
2417 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2418
2419 pdu_len = chan->conn->mtu - L2CAP_HDR_SIZE;
2420
2421 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2422
2423 sdu_len = len;
2424 pdu_len -= L2CAP_SDULEN_SIZE;
2425
2426 while (len > 0) {
2427 if (len <= pdu_len)
2428 pdu_len = len;
2429
2430 skb = l2cap_create_le_flowctl_pdu(chan, msg, pdu_len, sdu_len);
2431 if (IS_ERR(skb)) {
2432 __skb_queue_purge(seg_queue);
2433 return PTR_ERR(skb);
2434 }
2435
2436 __skb_queue_tail(seg_queue, skb);
2437
2438 len -= pdu_len;
2439
2440 if (sdu_len) {
2441 sdu_len = 0;
2442 pdu_len += L2CAP_SDULEN_SIZE;
2443 }
2444 }
2445
2446 return 0;
2447 }
2448
2449 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
2450 {
2451 struct sk_buff *skb;
2452 int err;
2453 struct sk_buff_head seg_queue;
2454
2455 if (!chan->conn)
2456 return -ENOTCONN;
2457
2458 /* Connectionless channel */
2459 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2460 skb = l2cap_create_connless_pdu(chan, msg, len);
2461 if (IS_ERR(skb))
2462 return PTR_ERR(skb);
2463
2464 /* Channel lock is released before requesting new skb and then
2465 * reacquired thus we need to recheck channel state.
2466 */
2467 if (chan->state != BT_CONNECTED) {
2468 kfree_skb(skb);
2469 return -ENOTCONN;
2470 }
2471
2472 l2cap_do_send(chan, skb);
2473 return len;
2474 }
2475
2476 switch (chan->mode) {
2477 case L2CAP_MODE_LE_FLOWCTL:
2478 /* Check outgoing MTU */
2479 if (len > chan->omtu)
2480 return -EMSGSIZE;
2481
2482 if (!chan->tx_credits)
2483 return -EAGAIN;
2484
2485 __skb_queue_head_init(&seg_queue);
2486
2487 err = l2cap_segment_le_sdu(chan, &seg_queue, msg, len);
2488
2489 if (chan->state != BT_CONNECTED) {
2490 __skb_queue_purge(&seg_queue);
2491 err = -ENOTCONN;
2492 }
2493
2494 if (err)
2495 return err;
2496
2497 skb_queue_splice_tail_init(&seg_queue, &chan->tx_q);
2498
2499 while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
2500 l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
2501 chan->tx_credits--;
2502 }
2503
2504 if (!chan->tx_credits)
2505 chan->ops->suspend(chan);
2506
2507 err = len;
2508
2509 break;
2510
2511 case L2CAP_MODE_BASIC:
2512 /* Check outgoing MTU */
2513 if (len > chan->omtu)
2514 return -EMSGSIZE;
2515
2516 /* Create a basic PDU */
2517 skb = l2cap_create_basic_pdu(chan, msg, len);
2518 if (IS_ERR(skb))
2519 return PTR_ERR(skb);
2520
2521 /* Channel lock is released before requesting new skb and then
2522 * reacquired thus we need to recheck channel state.
2523 */
2524 if (chan->state != BT_CONNECTED) {
2525 kfree_skb(skb);
2526 return -ENOTCONN;
2527 }
2528
2529 l2cap_do_send(chan, skb);
2530 err = len;
2531 break;
2532
2533 case L2CAP_MODE_ERTM:
2534 case L2CAP_MODE_STREAMING:
2535 /* Check outgoing MTU */
2536 if (len > chan->omtu) {
2537 err = -EMSGSIZE;
2538 break;
2539 }
2540
2541 __skb_queue_head_init(&seg_queue);
2542
2543 /* Do segmentation before calling in to the state machine,
2544 * since it's possible to block while waiting for memory
2545 * allocation.
2546 */
2547 err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2548
2549 /* The channel could have been closed while segmenting,
2550 * check that it is still connected.
2551 */
2552 if (chan->state != BT_CONNECTED) {
2553 __skb_queue_purge(&seg_queue);
2554 err = -ENOTCONN;
2555 }
2556
2557 if (err)
2558 break;
2559
2560 if (chan->mode == L2CAP_MODE_ERTM)
2561 l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2562 else
2563 l2cap_streaming_send(chan, &seg_queue);
2564
2565 err = len;
2566
2567 /* If the skbs were not queued for sending, they'll still be in
2568 * seg_queue and need to be purged.
2569 */
2570 __skb_queue_purge(&seg_queue);
2571 break;
2572
2573 default:
2574 BT_DBG("bad state %1.1x", chan->mode);
2575 err = -EBADFD;
2576 }
2577
2578 return err;
2579 }
2580 EXPORT_SYMBOL_GPL(l2cap_chan_send);
2581
2582 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2583 {
2584 struct l2cap_ctrl control;
2585 u16 seq;
2586
2587 BT_DBG("chan %p, txseq %u", chan, txseq);
2588
2589 memset(&control, 0, sizeof(control));
2590 control.sframe = 1;
2591 control.super = L2CAP_SUPER_SREJ;
2592
2593 for (seq = chan->expected_tx_seq; seq != txseq;
2594 seq = __next_seq(chan, seq)) {
2595 if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2596 control.reqseq = seq;
2597 l2cap_send_sframe(chan, &control);
2598 l2cap_seq_list_append(&chan->srej_list, seq);
2599 }
2600 }
2601
2602 chan->expected_tx_seq = __next_seq(chan, txseq);
2603 }
2604
2605 static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2606 {
2607 struct l2cap_ctrl control;
2608
2609 BT_DBG("chan %p", chan);
2610
2611 if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2612 return;
2613
2614 memset(&control, 0, sizeof(control));
2615 control.sframe = 1;
2616 control.super = L2CAP_SUPER_SREJ;
2617 control.reqseq = chan->srej_list.tail;
2618 l2cap_send_sframe(chan, &control);
2619 }
2620
2621 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2622 {
2623 struct l2cap_ctrl control;
2624 u16 initial_head;
2625 u16 seq;
2626
2627 BT_DBG("chan %p, txseq %u", chan, txseq);
2628
2629 memset(&control, 0, sizeof(control));
2630 control.sframe = 1;
2631 control.super = L2CAP_SUPER_SREJ;
2632
2633 /* Capture initial list head to allow only one pass through the list. */
2634 initial_head = chan->srej_list.head;
2635
2636 do {
2637 seq = l2cap_seq_list_pop(&chan->srej_list);
2638 if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2639 break;
2640
2641 control.reqseq = seq;
2642 l2cap_send_sframe(chan, &control);
2643 l2cap_seq_list_append(&chan->srej_list, seq);
2644 } while (chan->srej_list.head != initial_head);
2645 }
2646
2647 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2648 {
2649 struct sk_buff *acked_skb;
2650 u16 ackseq;
2651
2652 BT_DBG("chan %p, reqseq %u", chan, reqseq);
2653
2654 if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2655 return;
2656
2657 BT_DBG("expected_ack_seq %u, unacked_frames %u",
2658 chan->expected_ack_seq, chan->unacked_frames);
2659
2660 for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2661 ackseq = __next_seq(chan, ackseq)) {
2662
2663 acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2664 if (acked_skb) {
2665 skb_unlink(acked_skb, &chan->tx_q);
2666 kfree_skb(acked_skb);
2667 chan->unacked_frames--;
2668 }
2669 }
2670
2671 chan->expected_ack_seq = reqseq;
2672
2673 if (chan->unacked_frames == 0)
2674 __clear_retrans_timer(chan);
2675
2676 BT_DBG("unacked_frames %u", chan->unacked_frames);
2677 }
2678
2679 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2680 {
2681 BT_DBG("chan %p", chan);
2682
2683 chan->expected_tx_seq = chan->buffer_seq;
2684 l2cap_seq_list_clear(&chan->srej_list);
2685 skb_queue_purge(&chan->srej_q);
2686 chan->rx_state = L2CAP_RX_STATE_RECV;
2687 }
2688
2689 static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2690 struct l2cap_ctrl *control,
2691 struct sk_buff_head *skbs, u8 event)
2692 {
2693 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2694 event);
2695
2696 switch (event) {
2697 case L2CAP_EV_DATA_REQUEST:
2698 if (chan->tx_send_head == NULL)
2699 chan->tx_send_head = skb_peek(skbs);
2700
2701 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2702 l2cap_ertm_send(chan);
2703 break;
2704 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2705 BT_DBG("Enter LOCAL_BUSY");
2706 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2707
2708 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2709 /* The SREJ_SENT state must be aborted if we are to
2710 * enter the LOCAL_BUSY state.
2711 */
2712 l2cap_abort_rx_srej_sent(chan);
2713 }
2714
2715 l2cap_send_ack(chan);
2716
2717 break;
2718 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2719 BT_DBG("Exit LOCAL_BUSY");
2720 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2721
2722 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2723 struct l2cap_ctrl local_control;
2724
2725 memset(&local_control, 0, sizeof(local_control));
2726 local_control.sframe = 1;
2727 local_control.super = L2CAP_SUPER_RR;
2728 local_control.poll = 1;
2729 local_control.reqseq = chan->buffer_seq;
2730 l2cap_send_sframe(chan, &local_control);
2731
2732 chan->retry_count = 1;
2733 __set_monitor_timer(chan);
2734 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2735 }
2736 break;
2737 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2738 l2cap_process_reqseq(chan, control->reqseq);
2739 break;
2740 case L2CAP_EV_EXPLICIT_POLL:
2741 l2cap_send_rr_or_rnr(chan, 1);
2742 chan->retry_count = 1;
2743 __set_monitor_timer(chan);
2744 __clear_ack_timer(chan);
2745 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2746 break;
2747 case L2CAP_EV_RETRANS_TO:
2748 l2cap_send_rr_or_rnr(chan, 1);
2749 chan->retry_count = 1;
2750 __set_monitor_timer(chan);
2751 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2752 break;
2753 case L2CAP_EV_RECV_FBIT:
2754 /* Nothing to process */
2755 break;
2756 default:
2757 break;
2758 }
2759 }
2760
2761 static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2762 struct l2cap_ctrl *control,
2763 struct sk_buff_head *skbs, u8 event)
2764 {
2765 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2766 event);
2767
2768 switch (event) {
2769 case L2CAP_EV_DATA_REQUEST:
2770 if (chan->tx_send_head == NULL)
2771 chan->tx_send_head = skb_peek(skbs);
2772 /* Queue data, but don't send. */
2773 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2774 break;
2775 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2776 BT_DBG("Enter LOCAL_BUSY");
2777 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2778
2779 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2780 /* The SREJ_SENT state must be aborted if we are to
2781 * enter the LOCAL_BUSY state.
2782 */
2783 l2cap_abort_rx_srej_sent(chan);
2784 }
2785
2786 l2cap_send_ack(chan);
2787
2788 break;
2789 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2790 BT_DBG("Exit LOCAL_BUSY");
2791 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2792
2793 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2794 struct l2cap_ctrl local_control;
2795 memset(&local_control, 0, sizeof(local_control));
2796 local_control.sframe = 1;
2797 local_control.super = L2CAP_SUPER_RR;
2798 local_control.poll = 1;
2799 local_control.reqseq = chan->buffer_seq;
2800 l2cap_send_sframe(chan, &local_control);
2801
2802 chan->retry_count = 1;
2803 __set_monitor_timer(chan);
2804 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2805 }
2806 break;
2807 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2808 l2cap_process_reqseq(chan, control->reqseq);
2809
2810 /* Fall through */
2811
2812 case L2CAP_EV_RECV_FBIT:
2813 if (control && control->final) {
2814 __clear_monitor_timer(chan);
2815 if (chan->unacked_frames > 0)
2816 __set_retrans_timer(chan);
2817 chan->retry_count = 0;
2818 chan->tx_state = L2CAP_TX_STATE_XMIT;
2819 BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
2820 }
2821 break;
2822 case L2CAP_EV_EXPLICIT_POLL:
2823 /* Ignore */
2824 break;
2825 case L2CAP_EV_MONITOR_TO:
2826 if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
2827 l2cap_send_rr_or_rnr(chan, 1);
2828 __set_monitor_timer(chan);
2829 chan->retry_count++;
2830 } else {
2831 l2cap_send_disconn_req(chan, ECONNABORTED);
2832 }
2833 break;
2834 default:
2835 break;
2836 }
2837 }
2838
2839 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
2840 struct sk_buff_head *skbs, u8 event)
2841 {
2842 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2843 chan, control, skbs, event, chan->tx_state);
2844
2845 switch (chan->tx_state) {
2846 case L2CAP_TX_STATE_XMIT:
2847 l2cap_tx_state_xmit(chan, control, skbs, event);
2848 break;
2849 case L2CAP_TX_STATE_WAIT_F:
2850 l2cap_tx_state_wait_f(chan, control, skbs, event);
2851 break;
2852 default:
2853 /* Ignore event */
2854 break;
2855 }
2856 }
2857
2858 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
2859 struct l2cap_ctrl *control)
2860 {
2861 BT_DBG("chan %p, control %p", chan, control);
2862 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
2863 }
2864
2865 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
2866 struct l2cap_ctrl *control)
2867 {
2868 BT_DBG("chan %p, control %p", chan, control);
2869 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
2870 }
2871
2872 /* Copy frame to all raw sockets on that connection */
2873 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2874 {
2875 struct sk_buff *nskb;
2876 struct l2cap_chan *chan;
2877
2878 BT_DBG("conn %p", conn);
2879
2880 mutex_lock(&conn->chan_lock);
2881
2882 list_for_each_entry(chan, &conn->chan_l, list) {
2883 if (chan->chan_type != L2CAP_CHAN_RAW)
2884 continue;
2885
2886 /* Don't send frame to the channel it came from */
2887 if (bt_cb(skb)->chan == chan)
2888 continue;
2889
2890 nskb = skb_clone(skb, GFP_KERNEL);
2891 if (!nskb)
2892 continue;
2893 if (chan->ops->recv(chan, nskb))
2894 kfree_skb(nskb);
2895 }
2896
2897 mutex_unlock(&conn->chan_lock);
2898 }
2899
2900 /* ---- L2CAP signalling commands ---- */
2901 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
2902 u8 ident, u16 dlen, void *data)
2903 {
2904 struct sk_buff *skb, **frag;
2905 struct l2cap_cmd_hdr *cmd;
2906 struct l2cap_hdr *lh;
2907 int len, count;
2908
2909 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
2910 conn, code, ident, dlen);
2911
2912 if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE)
2913 return NULL;
2914
2915 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2916 count = min_t(unsigned int, conn->mtu, len);
2917
2918 skb = bt_skb_alloc(count, GFP_KERNEL);
2919 if (!skb)
2920 return NULL;
2921
2922 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2923 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2924
2925 if (conn->hcon->type == LE_LINK)
2926 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2927 else
2928 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
2929
2930 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2931 cmd->code = code;
2932 cmd->ident = ident;
2933 cmd->len = cpu_to_le16(dlen);
2934
2935 if (dlen) {
2936 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2937 memcpy(skb_put(skb, count), data, count);
2938 data += count;
2939 }
2940
2941 len -= skb->len;
2942
2943 /* Continuation fragments (no L2CAP header) */
2944 frag = &skb_shinfo(skb)->frag_list;
2945 while (len) {
2946 count = min_t(unsigned int, conn->mtu, len);
2947
2948 *frag = bt_skb_alloc(count, GFP_KERNEL);
2949 if (!*frag)
2950 goto fail;
2951
2952 memcpy(skb_put(*frag, count), data, count);
2953
2954 len -= count;
2955 data += count;
2956
2957 frag = &(*frag)->next;
2958 }
2959
2960 return skb;
2961
2962 fail:
2963 kfree_skb(skb);
2964 return NULL;
2965 }
2966
2967 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
2968 unsigned long *val)
2969 {
2970 struct l2cap_conf_opt *opt = *ptr;
2971 int len;
2972
2973 len = L2CAP_CONF_OPT_SIZE + opt->len;
2974 *ptr += len;
2975
2976 *type = opt->type;
2977 *olen = opt->len;
2978
2979 switch (opt->len) {
2980 case 1:
2981 *val = *((u8 *) opt->val);
2982 break;
2983
2984 case 2:
2985 *val = get_unaligned_le16(opt->val);
2986 break;
2987
2988 case 4:
2989 *val = get_unaligned_le32(opt->val);
2990 break;
2991
2992 default:
2993 *val = (unsigned long) opt->val;
2994 break;
2995 }
2996
2997 BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
2998 return len;
2999 }
3000
3001 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
3002 {
3003 struct l2cap_conf_opt *opt = *ptr;
3004
3005 BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
3006
3007 opt->type = type;
3008 opt->len = len;
3009
3010 switch (len) {
3011 case 1:
3012 *((u8 *) opt->val) = val;
3013 break;
3014
3015 case 2:
3016 put_unaligned_le16(val, opt->val);
3017 break;
3018
3019 case 4:
3020 put_unaligned_le32(val, opt->val);
3021 break;
3022
3023 default:
3024 memcpy(opt->val, (void *) val, len);
3025 break;
3026 }
3027
3028 *ptr += L2CAP_CONF_OPT_SIZE + len;
3029 }
3030
3031 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
3032 {
3033 struct l2cap_conf_efs efs;
3034
3035 switch (chan->mode) {
3036 case L2CAP_MODE_ERTM:
3037 efs.id = chan->local_id;
3038 efs.stype = chan->local_stype;
3039 efs.msdu = cpu_to_le16(chan->local_msdu);
3040 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
3041 efs.acc_lat = cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
3042 efs.flush_to = cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO);
3043 break;
3044
3045 case L2CAP_MODE_STREAMING:
3046 efs.id = 1;
3047 efs.stype = L2CAP_SERV_BESTEFFORT;
3048 efs.msdu = cpu_to_le16(chan->local_msdu);
3049 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
3050 efs.acc_lat = 0;
3051 efs.flush_to = 0;
3052 break;
3053
3054 default:
3055 return;
3056 }
3057
3058 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
3059 (unsigned long) &efs);
3060 }
3061
3062 static void l2cap_ack_timeout(struct work_struct *work)
3063 {
3064 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
3065 ack_timer.work);
3066 u16 frames_to_ack;
3067
3068 BT_DBG("chan %p", chan);
3069
3070 l2cap_chan_lock(chan);
3071
3072 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
3073 chan->last_acked_seq);
3074
3075 if (frames_to_ack)
3076 l2cap_send_rr_or_rnr(chan, 0);
3077
3078 l2cap_chan_unlock(chan);
3079 l2cap_chan_put(chan);
3080 }
3081
3082 int l2cap_ertm_init(struct l2cap_chan *chan)
3083 {
3084 int err;
3085
3086 chan->next_tx_seq = 0;
3087 chan->expected_tx_seq = 0;
3088 chan->expected_ack_seq = 0;
3089 chan->unacked_frames = 0;
3090 chan->buffer_seq = 0;
3091 chan->frames_sent = 0;
3092 chan->last_acked_seq = 0;
3093 chan->sdu = NULL;
3094 chan->sdu_last_frag = NULL;
3095 chan->sdu_len = 0;
3096
3097 skb_queue_head_init(&chan->tx_q);
3098
3099 chan->local_amp_id = AMP_ID_BREDR;
3100 chan->move_id = AMP_ID_BREDR;
3101 chan->move_state = L2CAP_MOVE_STABLE;
3102 chan->move_role = L2CAP_MOVE_ROLE_NONE;
3103
3104 if (chan->mode != L2CAP_MODE_ERTM)
3105 return 0;
3106
3107 chan->rx_state = L2CAP_RX_STATE_RECV;
3108 chan->tx_state = L2CAP_TX_STATE_XMIT;
3109
3110 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
3111 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
3112 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
3113
3114 skb_queue_head_init(&chan->srej_q);
3115
3116 err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
3117 if (err < 0)
3118 return err;
3119
3120 err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
3121 if (err < 0)
3122 l2cap_seq_list_free(&chan->srej_list);
3123
3124 return err;
3125 }
3126
3127 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
3128 {
3129 switch (mode) {
3130 case L2CAP_MODE_STREAMING:
3131 case L2CAP_MODE_ERTM:
3132 if (l2cap_mode_supported(mode, remote_feat_mask))
3133 return mode;
3134 /* fall through */
3135 default:
3136 return L2CAP_MODE_BASIC;
3137 }
3138 }
3139
3140 static inline bool __l2cap_ews_supported(struct l2cap_conn *conn)
3141 {
3142 return conn->hs_enabled && conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
3143 }
3144
3145 static inline bool __l2cap_efs_supported(struct l2cap_conn *conn)
3146 {
3147 return conn->hs_enabled && conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
3148 }
3149
3150 static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan,
3151 struct l2cap_conf_rfc *rfc)
3152 {
3153 if (chan->local_amp_id != AMP_ID_BREDR && chan->hs_hcon) {
3154 u64 ertm_to = chan->hs_hcon->hdev->amp_be_flush_to;
3155
3156 /* Class 1 devices have must have ERTM timeouts
3157 * exceeding the Link Supervision Timeout. The
3158 * default Link Supervision Timeout for AMP
3159 * controllers is 10 seconds.
3160 *
3161 * Class 1 devices use 0xffffffff for their
3162 * best-effort flush timeout, so the clamping logic
3163 * will result in a timeout that meets the above
3164 * requirement. ERTM timeouts are 16-bit values, so
3165 * the maximum timeout is 65.535 seconds.
3166 */
3167
3168 /* Convert timeout to milliseconds and round */
3169 ertm_to = DIV_ROUND_UP_ULL(ertm_to, 1000);
3170
3171 /* This is the recommended formula for class 2 devices
3172 * that start ERTM timers when packets are sent to the
3173 * controller.
3174 */
3175 ertm_to = 3 * ertm_to + 500;
3176
3177 if (ertm_to > 0xffff)
3178 ertm_to = 0xffff;
3179
3180 rfc->retrans_timeout = cpu_to_le16((u16) ertm_to);
3181 rfc->monitor_timeout = rfc->retrans_timeout;
3182 } else {
3183 rfc->retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3184 rfc->monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3185 }
3186 }
3187
3188 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
3189 {
3190 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
3191 __l2cap_ews_supported(chan->conn)) {
3192 /* use extended control field */
3193 set_bit(FLAG_EXT_CTRL, &chan->flags);
3194 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3195 } else {
3196 chan->tx_win = min_t(u16, chan->tx_win,
3197 L2CAP_DEFAULT_TX_WINDOW);
3198 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
3199 }
3200 chan->ack_win = chan->tx_win;
3201 }
3202
3203 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
3204 {
3205 struct l2cap_conf_req *req = data;
3206 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
3207 void *ptr = req->data;
3208 u16 size;
3209
3210 BT_DBG("chan %p", chan);
3211
3212 if (chan->num_conf_req || chan->num_conf_rsp)
3213 goto done;
3214
3215 switch (chan->mode) {
3216 case L2CAP_MODE_STREAMING:
3217 case L2CAP_MODE_ERTM:
3218 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
3219 break;
3220
3221 if (__l2cap_efs_supported(chan->conn))
3222 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3223
3224 /* fall through */
3225 default:
3226 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
3227 break;
3228 }
3229
3230 done:
3231 if (chan->imtu != L2CAP_DEFAULT_MTU)
3232 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3233
3234 switch (chan->mode) {
3235 case L2CAP_MODE_BASIC:
3236 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
3237 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
3238 break;
3239
3240 rfc.mode = L2CAP_MODE_BASIC;
3241 rfc.txwin_size = 0;
3242 rfc.max_transmit = 0;
3243 rfc.retrans_timeout = 0;
3244 rfc.monitor_timeout = 0;
3245 rfc.max_pdu_size = 0;
3246
3247 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3248 (unsigned long) &rfc);
3249 break;
3250
3251 case L2CAP_MODE_ERTM:
3252 rfc.mode = L2CAP_MODE_ERTM;
3253 rfc.max_transmit = chan->max_tx;
3254
3255 __l2cap_set_ertm_timeouts(chan, &rfc);
3256
3257 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3258 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3259 L2CAP_FCS_SIZE);
3260 rfc.max_pdu_size = cpu_to_le16(size);
3261
3262 l2cap_txwin_setup(chan);
3263
3264 rfc.txwin_size = min_t(u16, chan->tx_win,
3265 L2CAP_DEFAULT_TX_WINDOW);
3266
3267 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3268 (unsigned long) &rfc);
3269
3270 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3271 l2cap_add_opt_efs(&ptr, chan);
3272
3273 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3274 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3275 chan->tx_win);
3276
3277 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3278 if (chan->fcs == L2CAP_FCS_NONE ||
3279 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3280 chan->fcs = L2CAP_FCS_NONE;
3281 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3282 chan->fcs);
3283 }
3284 break;
3285
3286 case L2CAP_MODE_STREAMING:
3287 l2cap_txwin_setup(chan);
3288 rfc.mode = L2CAP_MODE_STREAMING;
3289 rfc.txwin_size = 0;
3290 rfc.max_transmit = 0;
3291 rfc.retrans_timeout = 0;
3292 rfc.monitor_timeout = 0;
3293
3294 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3295 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3296 L2CAP_FCS_SIZE);
3297 rfc.max_pdu_size = cpu_to_le16(size);
3298
3299 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3300 (unsigned long) &rfc);
3301
3302 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3303 l2cap_add_opt_efs(&ptr, chan);
3304
3305 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3306 if (chan->fcs == L2CAP_FCS_NONE ||
3307 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3308 chan->fcs = L2CAP_FCS_NONE;
3309 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3310 chan->fcs);
3311 }
3312 break;
3313 }
3314
3315 req->dcid = cpu_to_le16(chan->dcid);
3316 req->flags = cpu_to_le16(0);
3317
3318 return ptr - data;
3319 }
3320
3321 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
3322 {
3323 struct l2cap_conf_rsp *rsp = data;
3324 void *ptr = rsp->data;
3325 void *req = chan->conf_req;
3326 int len = chan->conf_len;
3327 int type, hint, olen;
3328 unsigned long val;
3329 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3330 struct l2cap_conf_efs efs;
3331 u8 remote_efs = 0;
3332 u16 mtu = L2CAP_DEFAULT_MTU;
3333 u16 result = L2CAP_CONF_SUCCESS;
3334 u16 size;
3335
3336 BT_DBG("chan %p", chan);
3337
3338 while (len >= L2CAP_CONF_OPT_SIZE) {
3339 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
3340
3341 hint = type & L2CAP_CONF_HINT;
3342 type &= L2CAP_CONF_MASK;
3343
3344 switch (type) {
3345 case L2CAP_CONF_MTU:
3346 mtu = val;
3347 break;
3348
3349 case L2CAP_CONF_FLUSH_TO:
3350 chan->flush_to = val;
3351 break;
3352
3353 case L2CAP_CONF_QOS:
3354 break;
3355
3356 case L2CAP_CONF_RFC:
3357 if (olen == sizeof(rfc))
3358 memcpy(&rfc, (void *) val, olen);
3359 break;
3360
3361 case L2CAP_CONF_FCS:
3362 if (val == L2CAP_FCS_NONE)
3363 set_bit(CONF_RECV_NO_FCS, &chan->conf_state);
3364 break;
3365
3366 case L2CAP_CONF_EFS:
3367 remote_efs = 1;
3368 if (olen == sizeof(efs))
3369 memcpy(&efs, (void *) val, olen);
3370 break;
3371
3372 case L2CAP_CONF_EWS:
3373 if (!chan->conn->hs_enabled)
3374 return -ECONNREFUSED;
3375
3376 set_bit(FLAG_EXT_CTRL, &chan->flags);
3377 set_bit(CONF_EWS_RECV, &chan->conf_state);
3378 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3379 chan->remote_tx_win = val;
3380 break;
3381
3382 default:
3383 if (hint)
3384 break;
3385
3386 result = L2CAP_CONF_UNKNOWN;
3387 *((u8 *) ptr++) = type;
3388 break;
3389 }
3390 }
3391
3392 if (chan->num_conf_rsp || chan->num_conf_req > 1)
3393 goto done;
3394
3395 switch (chan->mode) {
3396 case L2CAP_MODE_STREAMING:
3397 case L2CAP_MODE_ERTM:
3398 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
3399 chan->mode = l2cap_select_mode(rfc.mode,
3400 chan->conn->feat_mask);
3401 break;
3402 }
3403
3404 if (remote_efs) {
3405 if (__l2cap_efs_supported(chan->conn))
3406 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3407 else
3408 return -ECONNREFUSED;
3409 }
3410
3411 if (chan->mode != rfc.mode)
3412 return -ECONNREFUSED;
3413
3414 break;
3415 }
3416
3417 done:
3418 if (chan->mode != rfc.mode) {
3419 result = L2CAP_CONF_UNACCEPT;
3420 rfc.mode = chan->mode;
3421
3422 if (chan->num_conf_rsp == 1)
3423 return -ECONNREFUSED;
3424
3425 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3426 (unsigned long) &rfc);
3427 }
3428
3429 if (result == L2CAP_CONF_SUCCESS) {
3430 /* Configure output options and let the other side know
3431 * which ones we don't like. */
3432
3433 if (mtu < L2CAP_DEFAULT_MIN_MTU)
3434 result = L2CAP_CONF_UNACCEPT;
3435 else {
3436 chan->omtu = mtu;
3437 set_bit(CONF_MTU_DONE, &chan->conf_state);
3438 }
3439 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
3440
3441 if (remote_efs) {
3442 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3443 efs.stype != L2CAP_SERV_NOTRAFIC &&
3444 efs.stype != chan->local_stype) {
3445
3446 result = L2CAP_CONF_UNACCEPT;
3447
3448 if (chan->num_conf_req >= 1)
3449 return -ECONNREFUSED;
3450
3451 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3452 sizeof(efs),
3453 (unsigned long) &efs);
3454 } else {
3455 /* Send PENDING Conf Rsp */
3456 result = L2CAP_CONF_PENDING;
3457 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3458 }
3459 }
3460
3461 switch (rfc.mode) {
3462 case L2CAP_MODE_BASIC:
3463 chan->fcs = L2CAP_FCS_NONE;
3464 set_bit(CONF_MODE_DONE, &chan->conf_state);
3465 break;
3466
3467 case L2CAP_MODE_ERTM:
3468 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3469 chan->remote_tx_win = rfc.txwin_size;
3470 else
3471 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3472
3473 chan->remote_max_tx = rfc.max_transmit;
3474
3475 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3476 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3477 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3478 rfc.max_pdu_size = cpu_to_le16(size);
3479 chan->remote_mps = size;
3480
3481 __l2cap_set_ertm_timeouts(chan, &rfc);
3482
3483 set_bit(CONF_MODE_DONE, &chan->conf_state);
3484
3485 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3486 sizeof(rfc), (unsigned long) &rfc);
3487
3488 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3489 chan->remote_id = efs.id;
3490 chan->remote_stype = efs.stype;
3491 chan->remote_msdu = le16_to_cpu(efs.msdu);
3492 chan->remote_flush_to =
3493 le32_to_cpu(efs.flush_to);
3494 chan->remote_acc_lat =
3495 le32_to_cpu(efs.acc_lat);
3496 chan->remote_sdu_itime =
3497 le32_to_cpu(efs.sdu_itime);
3498 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3499 sizeof(efs),
3500 (unsigned long) &efs);
3501 }
3502 break;
3503
3504 case L2CAP_MODE_STREAMING:
3505 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3506 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3507 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3508 rfc.max_pdu_size = cpu_to_le16(size);
3509 chan->remote_mps = size;
3510
3511 set_bit(CONF_MODE_DONE, &chan->conf_state);
3512
3513 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3514 (unsigned long) &rfc);
3515
3516 break;
3517
3518 default:
3519 result = L2CAP_CONF_UNACCEPT;
3520
3521 memset(&rfc, 0, sizeof(rfc));
3522 rfc.mode = chan->mode;
3523 }
3524
3525 if (result == L2CAP_CONF_SUCCESS)
3526 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3527 }
3528 rsp->scid = cpu_to_le16(chan->dcid);
3529 rsp->result = cpu_to_le16(result);
3530 rsp->flags = cpu_to_le16(0);
3531
3532 return ptr - data;
3533 }
3534
3535 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
3536 void *data, u16 *result)
3537 {
3538 struct l2cap_conf_req *req = data;
3539 void *ptr = req->data;
3540 int type, olen;
3541 unsigned long val;
3542 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3543 struct l2cap_conf_efs efs;
3544
3545 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3546
3547 while (len >= L2CAP_CONF_OPT_SIZE) {
3548 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3549
3550 switch (type) {
3551 case L2CAP_CONF_MTU:
3552 if (val < L2CAP_DEFAULT_MIN_MTU) {
3553 *result = L2CAP_CONF_UNACCEPT;
3554 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3555 } else
3556 chan->imtu = val;
3557 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3558 break;
3559
3560 case L2CAP_CONF_FLUSH_TO:
3561 chan->flush_to = val;
3562 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
3563 2, chan->flush_to);
3564 break;
3565
3566 case L2CAP_CONF_RFC:
3567 if (olen == sizeof(rfc))
3568 memcpy(&rfc, (void *)val, olen);
3569
3570 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3571 rfc.mode != chan->mode)
3572 return -ECONNREFUSED;
3573
3574 chan->fcs = 0;
3575
3576 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3577 sizeof(rfc), (unsigned long) &rfc);
3578 break;
3579
3580 case L2CAP_CONF_EWS:
3581 chan->ack_win = min_t(u16, val, chan->ack_win);
3582 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3583 chan->tx_win);
3584 break;
3585
3586 case L2CAP_CONF_EFS:
3587 if (olen == sizeof(efs))
3588 memcpy(&efs, (void *)val, olen);
3589
3590 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3591 efs.stype != L2CAP_SERV_NOTRAFIC &&
3592 efs.stype != chan->local_stype)
3593 return -ECONNREFUSED;
3594
3595 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
3596 (unsigned long) &efs);
3597 break;
3598
3599 case L2CAP_CONF_FCS:
3600 if (*result == L2CAP_CONF_PENDING)
3601 if (val == L2CAP_FCS_NONE)
3602 set_bit(CONF_RECV_NO_FCS,
3603 &chan->conf_state);
3604 break;
3605 }
3606 }
3607
3608 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3609 return -ECONNREFUSED;
3610
3611 chan->mode = rfc.mode;
3612
3613 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3614 switch (rfc.mode) {
3615 case L2CAP_MODE_ERTM:
3616 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3617 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3618 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3619 if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3620 chan->ack_win = min_t(u16, chan->ack_win,
3621 rfc.txwin_size);
3622
3623 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3624 chan->local_msdu = le16_to_cpu(efs.msdu);
3625 chan->local_sdu_itime =
3626 le32_to_cpu(efs.sdu_itime);
3627 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3628 chan->local_flush_to =
3629 le32_to_cpu(efs.flush_to);
3630 }
3631 break;
3632
3633 case L2CAP_MODE_STREAMING:
3634 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3635 }
3636 }
3637
3638 req->dcid = cpu_to_le16(chan->dcid);
3639 req->flags = cpu_to_le16(0);
3640
3641 return ptr - data;
3642 }
3643
3644 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data,
3645 u16 result, u16 flags)
3646 {
3647 struct l2cap_conf_rsp *rsp = data;
3648 void *ptr = rsp->data;
3649
3650 BT_DBG("chan %p", chan);
3651
3652 rsp->scid = cpu_to_le16(chan->dcid);
3653 rsp->result = cpu_to_le16(result);
3654 rsp->flags = cpu_to_le16(flags);
3655
3656 return ptr - data;
3657 }
3658
3659 void __l2cap_le_connect_rsp_defer(struct l2cap_chan *chan)
3660 {
3661 struct l2cap_le_conn_rsp rsp;
3662 struct l2cap_conn *conn = chan->conn;
3663
3664 BT_DBG("chan %p", chan);
3665
3666 rsp.dcid = cpu_to_le16(chan->scid);
3667 rsp.mtu = cpu_to_le16(chan->imtu);
3668 rsp.mps = cpu_to_le16(chan->mps);
3669 rsp.credits = cpu_to_le16(chan->rx_credits);
3670 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
3671
3672 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
3673 &rsp);
3674 }
3675
3676 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
3677 {
3678 struct l2cap_conn_rsp rsp;
3679 struct l2cap_conn *conn = chan->conn;
3680 u8 buf[128];
3681 u8 rsp_code;
3682
3683 rsp.scid = cpu_to_le16(chan->dcid);
3684 rsp.dcid = cpu_to_le16(chan->scid);
3685 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
3686 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
3687
3688 if (chan->hs_hcon)
3689 rsp_code = L2CAP_CREATE_CHAN_RSP;
3690 else
3691 rsp_code = L2CAP_CONN_RSP;
3692
3693 BT_DBG("chan %p rsp_code %u", chan, rsp_code);
3694
3695 l2cap_send_cmd(conn, chan->ident, rsp_code, sizeof(rsp), &rsp);
3696
3697 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3698 return;
3699
3700 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3701 l2cap_build_conf_req(chan, buf), buf);
3702 chan->num_conf_req++;
3703 }
3704
3705 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
3706 {
3707 int type, olen;
3708 unsigned long val;
3709 /* Use sane default values in case a misbehaving remote device
3710 * did not send an RFC or extended window size option.
3711 */
3712 u16 txwin_ext = chan->ack_win;
3713 struct l2cap_conf_rfc rfc = {
3714 .mode = chan->mode,
3715 .retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
3716 .monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
3717 .max_pdu_size = cpu_to_le16(chan->imtu),
3718 .txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
3719 };
3720
3721 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
3722
3723 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
3724 return;
3725
3726 while (len >= L2CAP_CONF_OPT_SIZE) {
3727 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3728
3729 switch (type) {
3730 case L2CAP_CONF_RFC:
3731 if (olen == sizeof(rfc))
3732 memcpy(&rfc, (void *)val, olen);
3733 break;
3734 case L2CAP_CONF_EWS:
3735 txwin_ext = val;
3736 break;
3737 }
3738 }
3739
3740 switch (rfc.mode) {
3741 case L2CAP_MODE_ERTM:
3742 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3743 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3744 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3745 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3746 chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
3747 else
3748 chan->ack_win = min_t(u16, chan->ack_win,
3749 rfc.txwin_size);
3750 break;
3751 case L2CAP_MODE_STREAMING:
3752 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3753 }
3754 }
3755
3756 static inline int l2cap_command_rej(struct l2cap_conn *conn,
3757 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3758 u8 *data)
3759 {
3760 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
3761
3762 if (cmd_len < sizeof(*rej))
3763 return -EPROTO;
3764
3765 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
3766 return 0;
3767
3768 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
3769 cmd->ident == conn->info_ident) {
3770 cancel_delayed_work(&conn->info_timer);
3771
3772 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3773 conn->info_ident = 0;
3774
3775 l2cap_conn_start(conn);
3776 }
3777
3778 return 0;
3779 }
3780
3781 static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
3782 struct l2cap_cmd_hdr *cmd,
3783 u8 *data, u8 rsp_code, u8 amp_id)
3784 {
3785 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
3786 struct l2cap_conn_rsp rsp;
3787 struct l2cap_chan *chan = NULL, *pchan;
3788 int result, status = L2CAP_CS_NO_INFO;
3789
3790 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
3791 __le16 psm = req->psm;
3792
3793 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
3794
3795 /* Check if we have socket listening on psm */
3796 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
3797 &conn->hcon->dst, ACL_LINK);
3798 if (!pchan) {
3799 result = L2CAP_CR_BAD_PSM;
3800 goto sendresp;
3801 }
3802
3803 mutex_lock(&conn->chan_lock);
3804 l2cap_chan_lock(pchan);
3805
3806 /* Check if the ACL is secure enough (if not SDP) */
3807 if (psm != cpu_to_le16(L2CAP_PSM_SDP) &&
3808 !hci_conn_check_link_mode(conn->hcon)) {
3809 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
3810 result = L2CAP_CR_SEC_BLOCK;
3811 goto response;
3812 }
3813
3814 result = L2CAP_CR_NO_MEM;
3815
3816 /* Check if we already have channel with that dcid */
3817 if (__l2cap_get_chan_by_dcid(conn, scid))
3818 goto response;
3819
3820 chan = pchan->ops->new_connection(pchan);
3821 if (!chan)
3822 goto response;
3823
3824 /* For certain devices (ex: HID mouse), support for authentication,
3825 * pairing and bonding is optional. For such devices, inorder to avoid
3826 * the ACL alive for too long after L2CAP disconnection, reset the ACL
3827 * disc_timeout back to HCI_DISCONN_TIMEOUT during L2CAP connect.
3828 */
3829 conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
3830
3831 bacpy(&chan->src, &conn->hcon->src);
3832 bacpy(&chan->dst, &conn->hcon->dst);
3833 chan->src_type = bdaddr_type(conn->hcon, conn->hcon->src_type);
3834 chan->dst_type = bdaddr_type(conn->hcon, conn->hcon->dst_type);
3835 chan->psm = psm;
3836 chan->dcid = scid;
3837 chan->local_amp_id = amp_id;
3838
3839 __l2cap_chan_add(conn, chan);
3840
3841 dcid = chan->scid;
3842
3843 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
3844
3845 chan->ident = cmd->ident;
3846
3847 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
3848 if (l2cap_chan_check_security(chan)) {
3849 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
3850 l2cap_state_change(chan, BT_CONNECT2);
3851 result = L2CAP_CR_PEND;
3852 status = L2CAP_CS_AUTHOR_PEND;
3853 chan->ops->defer(chan);
3854 } else {
3855 /* Force pending result for AMP controllers.
3856 * The connection will succeed after the
3857 * physical link is up.
3858 */
3859 if (amp_id == AMP_ID_BREDR) {
3860 l2cap_state_change(chan, BT_CONFIG);
3861 result = L2CAP_CR_SUCCESS;
3862 } else {
3863 l2cap_state_change(chan, BT_CONNECT2);
3864 result = L2CAP_CR_PEND;
3865 }
3866 status = L2CAP_CS_NO_INFO;
3867 }
3868 } else {
3869 l2cap_state_change(chan, BT_CONNECT2);
3870 result = L2CAP_CR_PEND;
3871 status = L2CAP_CS_AUTHEN_PEND;
3872 }
3873 } else {
3874 l2cap_state_change(chan, BT_CONNECT2);
3875 result = L2CAP_CR_PEND;
3876 status = L2CAP_CS_NO_INFO;
3877 }
3878
3879 response:
3880 l2cap_chan_unlock(pchan);
3881 mutex_unlock(&conn->chan_lock);
3882
3883 sendresp:
3884 rsp.scid = cpu_to_le16(scid);
3885 rsp.dcid = cpu_to_le16(dcid);
3886 rsp.result = cpu_to_le16(result);
3887 rsp.status = cpu_to_le16(status);
3888 l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
3889
3890 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
3891 struct l2cap_info_req info;
3892 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3893
3894 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
3895 conn->info_ident = l2cap_get_ident(conn);
3896
3897 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
3898
3899 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
3900 sizeof(info), &info);
3901 }
3902
3903 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
3904 result == L2CAP_CR_SUCCESS) {
3905 u8 buf[128];
3906 set_bit(CONF_REQ_SENT, &chan->conf_state);
3907 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3908 l2cap_build_conf_req(chan, buf), buf);
3909 chan->num_conf_req++;
3910 }
3911
3912 return chan;
3913 }
3914
3915 static int l2cap_connect_req(struct l2cap_conn *conn,
3916 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3917 {
3918 struct hci_dev *hdev = conn->hcon->hdev;
3919 struct hci_conn *hcon = conn->hcon;
3920
3921 if (cmd_len < sizeof(struct l2cap_conn_req))
3922 return -EPROTO;
3923
3924 hci_dev_lock(hdev);
3925 if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
3926 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &hcon->flags))
3927 mgmt_device_connected(hdev, &hcon->dst, hcon->type,
3928 hcon->dst_type, 0, NULL, 0,
3929 hcon->dev_class);
3930 hci_dev_unlock(hdev);
3931
3932 l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);
3933 return 0;
3934 }
3935
3936 static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
3937 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3938 u8 *data)
3939 {
3940 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
3941 u16 scid, dcid, result, status;
3942 struct l2cap_chan *chan;
3943 u8 req[128];
3944 int err;
3945
3946 if (cmd_len < sizeof(*rsp))
3947 return -EPROTO;
3948
3949 scid = __le16_to_cpu(rsp->scid);
3950 dcid = __le16_to_cpu(rsp->dcid);
3951 result = __le16_to_cpu(rsp->result);
3952 status = __le16_to_cpu(rsp->status);
3953
3954 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
3955 dcid, scid, result, status);
3956
3957 mutex_lock(&conn->chan_lock);
3958
3959 if (scid) {
3960 chan = __l2cap_get_chan_by_scid(conn, scid);
3961 if (!chan) {
3962 err = -EBADSLT;
3963 goto unlock;
3964 }
3965 } else {
3966 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
3967 if (!chan) {
3968 err = -EBADSLT;
3969 goto unlock;
3970 }
3971 }
3972
3973 err = 0;
3974
3975 l2cap_chan_lock(chan);
3976
3977 switch (result) {
3978 case L2CAP_CR_SUCCESS:
3979 l2cap_state_change(chan, BT_CONFIG);
3980 chan->ident = 0;
3981 chan->dcid = dcid;
3982 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
3983
3984 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3985 break;
3986
3987 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3988 l2cap_build_conf_req(chan, req), req);
3989 chan->num_conf_req++;
3990 break;
3991
3992 case L2CAP_CR_PEND:
3993 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
3994 break;
3995
3996 default:
3997 l2cap_chan_del(chan, ECONNREFUSED);
3998 break;
3999 }
4000
4001 l2cap_chan_unlock(chan);
4002
4003 unlock:
4004 mutex_unlock(&conn->chan_lock);
4005
4006 return err;
4007 }
4008
4009 static inline void set_default_fcs(struct l2cap_chan *chan)
4010 {
4011 /* FCS is enabled only in ERTM or streaming mode, if one or both
4012 * sides request it.
4013 */
4014 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
4015 chan->fcs = L2CAP_FCS_NONE;
4016 else if (!test_bit(CONF_RECV_NO_FCS, &chan->conf_state))
4017 chan->fcs = L2CAP_FCS_CRC16;
4018 }
4019
4020 static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data,
4021 u8 ident, u16 flags)
4022 {
4023 struct l2cap_conn *conn = chan->conn;
4024
4025 BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident,
4026 flags);
4027
4028 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
4029 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
4030
4031 l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP,
4032 l2cap_build_conf_rsp(chan, data,
4033 L2CAP_CONF_SUCCESS, flags), data);
4034 }
4035
4036 static void cmd_reject_invalid_cid(struct l2cap_conn *conn, u8 ident,
4037 u16 scid, u16 dcid)
4038 {
4039 struct l2cap_cmd_rej_cid rej;
4040
4041 rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
4042 rej.scid = __cpu_to_le16(scid);
4043 rej.dcid = __cpu_to_le16(dcid);
4044
4045 l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
4046 }
4047
4048 static inline int l2cap_config_req(struct l2cap_conn *conn,
4049 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4050 u8 *data)
4051 {
4052 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
4053 u16 dcid, flags;
4054 u8 rsp[64];
4055 struct l2cap_chan *chan;
4056 int len, err = 0;
4057
4058 if (cmd_len < sizeof(*req))
4059 return -EPROTO;
4060
4061 dcid = __le16_to_cpu(req->dcid);
4062 flags = __le16_to_cpu(req->flags);
4063
4064 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
4065
4066 chan = l2cap_get_chan_by_scid(conn, dcid);
4067 if (!chan) {
4068 cmd_reject_invalid_cid(conn, cmd->ident, dcid, 0);
4069 return 0;
4070 }
4071
4072 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
4073 cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4074 chan->dcid);
4075 goto unlock;
4076 }
4077
4078 /* Reject if config buffer is too small. */
4079 len = cmd_len - sizeof(*req);
4080 if (chan->conf_len + len > sizeof(chan->conf_req)) {
4081 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4082 l2cap_build_conf_rsp(chan, rsp,
4083 L2CAP_CONF_REJECT, flags), rsp);
4084 goto unlock;
4085 }
4086
4087 /* Store config. */
4088 memcpy(chan->conf_req + chan->conf_len, req->data, len);
4089 chan->conf_len += len;
4090
4091 if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
4092 /* Incomplete config. Send empty response. */
4093 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4094 l2cap_build_conf_rsp(chan, rsp,
4095 L2CAP_CONF_SUCCESS, flags), rsp);
4096 goto unlock;
4097 }
4098
4099 /* Complete config. */
4100 len = l2cap_parse_conf_req(chan, rsp);
4101 if (len < 0) {
4102 l2cap_send_disconn_req(chan, ECONNRESET);
4103 goto unlock;
4104 }
4105
4106 chan->ident = cmd->ident;
4107 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
4108 chan->num_conf_rsp++;
4109
4110 /* Reset config buffer. */
4111 chan->conf_len = 0;
4112
4113 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
4114 goto unlock;
4115
4116 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4117 set_default_fcs(chan);
4118
4119 if (chan->mode == L2CAP_MODE_ERTM ||
4120 chan->mode == L2CAP_MODE_STREAMING)
4121 err = l2cap_ertm_init(chan);
4122
4123 if (err < 0)
4124 l2cap_send_disconn_req(chan, -err);
4125 else
4126 l2cap_chan_ready(chan);
4127
4128 goto unlock;
4129 }
4130
4131 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
4132 u8 buf[64];
4133 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4134 l2cap_build_conf_req(chan, buf), buf);
4135 chan->num_conf_req++;
4136 }
4137
4138 /* Got Conf Rsp PENDING from remote side and asume we sent
4139 Conf Rsp PENDING in the code above */
4140 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
4141 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4142
4143 /* check compatibility */
4144
4145 /* Send rsp for BR/EDR channel */
4146 if (!chan->hs_hcon)
4147 l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
4148 else
4149 chan->ident = cmd->ident;
4150 }
4151
4152 unlock:
4153 l2cap_chan_unlock(chan);
4154 return err;
4155 }
4156
4157 static inline int l2cap_config_rsp(struct l2cap_conn *conn,
4158 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4159 u8 *data)
4160 {
4161 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
4162 u16 scid, flags, result;
4163 struct l2cap_chan *chan;
4164 int len = cmd_len - sizeof(*rsp);
4165 int err = 0;
4166
4167 if (cmd_len < sizeof(*rsp))
4168 return -EPROTO;
4169
4170 scid = __le16_to_cpu(rsp->scid);
4171 flags = __le16_to_cpu(rsp->flags);
4172 result = __le16_to_cpu(rsp->result);
4173
4174 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
4175 result, len);
4176
4177 chan = l2cap_get_chan_by_scid(conn, scid);
4178 if (!chan)
4179 return 0;
4180
4181 switch (result) {
4182 case L2CAP_CONF_SUCCESS:
4183 l2cap_conf_rfc_get(chan, rsp->data, len);
4184 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4185 break;
4186
4187 case L2CAP_CONF_PENDING:
4188 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4189
4190 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4191 char buf[64];
4192
4193 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4194 buf, &result);
4195 if (len < 0) {
4196 l2cap_send_disconn_req(chan, ECONNRESET);
4197 goto done;
4198 }
4199
4200 if (!chan->hs_hcon) {
4201 l2cap_send_efs_conf_rsp(chan, buf, cmd->ident,
4202 0);
4203 } else {
4204 if (l2cap_check_efs(chan)) {
4205 amp_create_logical_link(chan);
4206 chan->ident = cmd->ident;
4207 }
4208 }
4209 }
4210 goto done;
4211
4212 case L2CAP_CONF_UNACCEPT:
4213 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
4214 char req[64];
4215
4216 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
4217 l2cap_send_disconn_req(chan, ECONNRESET);
4218 goto done;
4219 }
4220
4221 /* throw out any old stored conf requests */
4222 result = L2CAP_CONF_SUCCESS;
4223 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4224 req, &result);
4225 if (len < 0) {
4226 l2cap_send_disconn_req(chan, ECONNRESET);
4227 goto done;
4228 }
4229
4230 l2cap_send_cmd(conn, l2cap_get_ident(conn),
4231 L2CAP_CONF_REQ, len, req);
4232 chan->num_conf_req++;
4233 if (result != L2CAP_CONF_SUCCESS)
4234 goto done;
4235 break;
4236 }
4237
4238 default:
4239 l2cap_chan_set_err(chan, ECONNRESET);
4240
4241 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
4242 l2cap_send_disconn_req(chan, ECONNRESET);
4243 goto done;
4244 }
4245
4246 if (flags & L2CAP_CONF_FLAG_CONTINUATION)
4247 goto done;
4248
4249 set_bit(CONF_INPUT_DONE, &chan->conf_state);
4250
4251 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
4252 set_default_fcs(chan);
4253
4254 if (chan->mode == L2CAP_MODE_ERTM ||
4255 chan->mode == L2CAP_MODE_STREAMING)
4256 err = l2cap_ertm_init(chan);
4257
4258 if (err < 0)
4259 l2cap_send_disconn_req(chan, -err);
4260 else
4261 l2cap_chan_ready(chan);
4262 }
4263
4264 done:
4265 l2cap_chan_unlock(chan);
4266 return err;
4267 }
4268
4269 static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
4270 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4271 u8 *data)
4272 {
4273 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
4274 struct l2cap_disconn_rsp rsp;
4275 u16 dcid, scid;
4276 struct l2cap_chan *chan;
4277
4278 if (cmd_len != sizeof(*req))
4279 return -EPROTO;
4280
4281 scid = __le16_to_cpu(req->scid);
4282 dcid = __le16_to_cpu(req->dcid);
4283
4284 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
4285
4286 mutex_lock(&conn->chan_lock);
4287
4288 chan = __l2cap_get_chan_by_scid(conn, dcid);
4289 if (!chan) {
4290 mutex_unlock(&conn->chan_lock);
4291 cmd_reject_invalid_cid(conn, cmd->ident, dcid, scid);
4292 return 0;
4293 }
4294
4295 l2cap_chan_lock(chan);
4296
4297 rsp.dcid = cpu_to_le16(chan->scid);
4298 rsp.scid = cpu_to_le16(chan->dcid);
4299 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
4300
4301 chan->ops->set_shutdown(chan);
4302
4303 l2cap_chan_hold(chan);
4304 l2cap_chan_del(chan, ECONNRESET);
4305
4306 l2cap_chan_unlock(chan);
4307
4308 chan->ops->close(chan);
4309 l2cap_chan_put(chan);
4310
4311 mutex_unlock(&conn->chan_lock);
4312
4313 return 0;
4314 }
4315
4316 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
4317 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4318 u8 *data)
4319 {
4320 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
4321 u16 dcid, scid;
4322 struct l2cap_chan *chan;
4323
4324 if (cmd_len != sizeof(*rsp))
4325 return -EPROTO;
4326
4327 scid = __le16_to_cpu(rsp->scid);
4328 dcid = __le16_to_cpu(rsp->dcid);
4329
4330 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
4331
4332 mutex_lock(&conn->chan_lock);
4333
4334 chan = __l2cap_get_chan_by_scid(conn, scid);
4335 if (!chan) {
4336 mutex_unlock(&conn->chan_lock);
4337 return 0;
4338 }
4339
4340 l2cap_chan_lock(chan);
4341
4342 l2cap_chan_hold(chan);
4343 l2cap_chan_del(chan, 0);
4344
4345 l2cap_chan_unlock(chan);
4346
4347 chan->ops->close(chan);
4348 l2cap_chan_put(chan);
4349
4350 mutex_unlock(&conn->chan_lock);
4351
4352 return 0;
4353 }
4354
4355 static inline int l2cap_information_req(struct l2cap_conn *conn,
4356 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4357 u8 *data)
4358 {
4359 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
4360 u16 type;
4361
4362 if (cmd_len != sizeof(*req))
4363 return -EPROTO;
4364
4365 type = __le16_to_cpu(req->type);
4366
4367 BT_DBG("type 0x%4.4x", type);
4368
4369 if (type == L2CAP_IT_FEAT_MASK) {
4370 u8 buf[8];
4371 u32 feat_mask = l2cap_feat_mask;
4372 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4373 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
4374 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4375 if (!disable_ertm)
4376 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
4377 | L2CAP_FEAT_FCS;
4378 if (conn->hs_enabled)
4379 feat_mask |= L2CAP_FEAT_EXT_FLOW
4380 | L2CAP_FEAT_EXT_WINDOW;
4381
4382 put_unaligned_le32(feat_mask, rsp->data);
4383 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4384 buf);
4385 } else if (type == L2CAP_IT_FIXED_CHAN) {
4386 u8 buf[12];
4387 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4388
4389 if (conn->hs_enabled)
4390 l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
4391 else
4392 l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
4393
4394 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4395 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4396 memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
4397 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4398 buf);
4399 } else {
4400 struct l2cap_info_rsp rsp;
4401 rsp.type = cpu_to_le16(type);
4402 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
4403 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp),
4404 &rsp);
4405 }
4406
4407 return 0;
4408 }
4409
4410 static inline int l2cap_information_rsp(struct l2cap_conn *conn,
4411 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4412 u8 *data)
4413 {
4414 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
4415 u16 type, result;
4416
4417 if (cmd_len < sizeof(*rsp))
4418 return -EPROTO;
4419
4420 type = __le16_to_cpu(rsp->type);
4421 result = __le16_to_cpu(rsp->result);
4422
4423 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
4424
4425 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
4426 if (cmd->ident != conn->info_ident ||
4427 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
4428 return 0;
4429
4430 cancel_delayed_work(&conn->info_timer);
4431
4432 if (result != L2CAP_IR_SUCCESS) {
4433 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4434 conn->info_ident = 0;
4435
4436 l2cap_conn_start(conn);
4437
4438 return 0;
4439 }
4440
4441 switch (type) {
4442 case L2CAP_IT_FEAT_MASK:
4443 conn->feat_mask = get_unaligned_le32(rsp->data);
4444
4445 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
4446 struct l2cap_info_req req;
4447 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4448
4449 conn->info_ident = l2cap_get_ident(conn);
4450
4451 l2cap_send_cmd(conn, conn->info_ident,
4452 L2CAP_INFO_REQ, sizeof(req), &req);
4453 } else {
4454 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4455 conn->info_ident = 0;
4456
4457 l2cap_conn_start(conn);
4458 }
4459 break;
4460
4461 case L2CAP_IT_FIXED_CHAN:
4462 conn->fixed_chan_mask = rsp->data[0];
4463 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4464 conn->info_ident = 0;
4465
4466 l2cap_conn_start(conn);
4467 break;
4468 }
4469
4470 return 0;
4471 }
4472
4473 static int l2cap_create_channel_req(struct l2cap_conn *conn,
4474 struct l2cap_cmd_hdr *cmd,
4475 u16 cmd_len, void *data)
4476 {
4477 struct l2cap_create_chan_req *req = data;
4478 struct l2cap_create_chan_rsp rsp;
4479 struct l2cap_chan *chan;
4480 struct hci_dev *hdev;
4481 u16 psm, scid;
4482
4483 if (cmd_len != sizeof(*req))
4484 return -EPROTO;
4485
4486 if (!conn->hs_enabled)
4487 return -EINVAL;
4488
4489 psm = le16_to_cpu(req->psm);
4490 scid = le16_to_cpu(req->scid);
4491
4492 BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id);
4493
4494 /* For controller id 0 make BR/EDR connection */
4495 if (req->amp_id == AMP_ID_BREDR) {
4496 l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4497 req->amp_id);
4498 return 0;
4499 }
4500
4501 /* Validate AMP controller id */
4502 hdev = hci_dev_get(req->amp_id);
4503 if (!hdev)
4504 goto error;
4505
4506 if (hdev->dev_type != HCI_AMP || !test_bit(HCI_UP, &hdev->flags)) {
4507 hci_dev_put(hdev);
4508 goto error;
4509 }
4510
4511 chan = l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4512 req->amp_id);
4513 if (chan) {
4514 struct amp_mgr *mgr = conn->hcon->amp_mgr;
4515 struct hci_conn *hs_hcon;
4516
4517 hs_hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK,
4518 &conn->hcon->dst);
4519 if (!hs_hcon) {
4520 hci_dev_put(hdev);
4521 cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4522 chan->dcid);
4523 return 0;
4524 }
4525
4526 BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr, chan, hs_hcon);
4527
4528 mgr->bredr_chan = chan;
4529 chan->hs_hcon = hs_hcon;
4530 chan->fcs = L2CAP_FCS_NONE;
4531 conn->mtu = hdev->block_mtu;
4532 }
4533
4534 hci_dev_put(hdev);
4535
4536 return 0;
4537
4538 error:
4539 rsp.dcid = 0;
4540 rsp.scid = cpu_to_le16(scid);
4541 rsp.result = cpu_to_le16(L2CAP_CR_BAD_AMP);
4542 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4543
4544 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
4545 sizeof(rsp), &rsp);
4546
4547 return 0;
4548 }
4549
4550 static void l2cap_send_move_chan_req(struct l2cap_chan *chan, u8 dest_amp_id)
4551 {
4552 struct l2cap_move_chan_req req;
4553 u8 ident;
4554
4555 BT_DBG("chan %p, dest_amp_id %d", chan, dest_amp_id);
4556
4557 ident = l2cap_get_ident(chan->conn);
4558 chan->ident = ident;
4559
4560 req.icid = cpu_to_le16(chan->scid);
4561 req.dest_amp_id = dest_amp_id;
4562
4563 l2cap_send_cmd(chan->conn, ident, L2CAP_MOVE_CHAN_REQ, sizeof(req),
4564 &req);
4565
4566 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4567 }
4568
4569 static void l2cap_send_move_chan_rsp(struct l2cap_chan *chan, u16 result)
4570 {
4571 struct l2cap_move_chan_rsp rsp;
4572
4573 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4574
4575 rsp.icid = cpu_to_le16(chan->dcid);
4576 rsp.result = cpu_to_le16(result);
4577
4578 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_RSP,
4579 sizeof(rsp), &rsp);
4580 }
4581
4582 static void l2cap_send_move_chan_cfm(struct l2cap_chan *chan, u16 result)
4583 {
4584 struct l2cap_move_chan_cfm cfm;
4585
4586 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4587
4588 chan->ident = l2cap_get_ident(chan->conn);
4589
4590 cfm.icid = cpu_to_le16(chan->scid);
4591 cfm.result = cpu_to_le16(result);
4592
4593 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_CFM,
4594 sizeof(cfm), &cfm);
4595
4596 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4597 }
4598
4599 static void l2cap_send_move_chan_cfm_icid(struct l2cap_conn *conn, u16 icid)
4600 {
4601 struct l2cap_move_chan_cfm cfm;
4602
4603 BT_DBG("conn %p, icid 0x%4.4x", conn, icid);
4604
4605 cfm.icid = cpu_to_le16(icid);
4606 cfm.result = cpu_to_le16(L2CAP_MC_UNCONFIRMED);
4607
4608 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_MOVE_CHAN_CFM,
4609 sizeof(cfm), &cfm);
4610 }
4611
4612 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
4613 u16 icid)
4614 {
4615 struct l2cap_move_chan_cfm_rsp rsp;
4616
4617 BT_DBG("icid 0x%4.4x", icid);
4618
4619 rsp.icid = cpu_to_le16(icid);
4620 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4621 }
4622
4623 static void __release_logical_link(struct l2cap_chan *chan)
4624 {
4625 chan->hs_hchan = NULL;
4626 chan->hs_hcon = NULL;
4627
4628 /* Placeholder - release the logical link */
4629 }
4630
4631 static void l2cap_logical_fail(struct l2cap_chan *chan)
4632 {
4633 /* Logical link setup failed */
4634 if (chan->state != BT_CONNECTED) {
4635 /* Create channel failure, disconnect */
4636 l2cap_send_disconn_req(chan, ECONNRESET);
4637 return;
4638 }
4639
4640 switch (chan->move_role) {
4641 case L2CAP_MOVE_ROLE_RESPONDER:
4642 l2cap_move_done(chan);
4643 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_SUPP);
4644 break;
4645 case L2CAP_MOVE_ROLE_INITIATOR:
4646 if (chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_COMP ||
4647 chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_CFM) {
4648 /* Remote has only sent pending or
4649 * success responses, clean up
4650 */
4651 l2cap_move_done(chan);
4652 }
4653
4654 /* Other amp move states imply that the move
4655 * has already aborted
4656 */
4657 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4658 break;
4659 }
4660 }
4661
4662 static void l2cap_logical_finish_create(struct l2cap_chan *chan,
4663 struct hci_chan *hchan)
4664 {
4665 struct l2cap_conf_rsp rsp;
4666
4667 chan->hs_hchan = hchan;
4668 chan->hs_hcon->l2cap_data = chan->conn;
4669
4670 l2cap_send_efs_conf_rsp(chan, &rsp, chan->ident, 0);
4671
4672 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4673 int err;
4674
4675 set_default_fcs(chan);
4676
4677 err = l2cap_ertm_init(chan);
4678 if (err < 0)
4679 l2cap_send_disconn_req(chan, -err);
4680 else
4681 l2cap_chan_ready(chan);
4682 }
4683 }
4684
4685 static void l2cap_logical_finish_move(struct l2cap_chan *chan,
4686 struct hci_chan *hchan)
4687 {
4688 chan->hs_hcon = hchan->conn;
4689 chan->hs_hcon->l2cap_data = chan->conn;
4690
4691 BT_DBG("move_state %d", chan->move_state);
4692
4693 switch (chan->move_state) {
4694 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
4695 /* Move confirm will be sent after a success
4696 * response is received
4697 */
4698 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4699 break;
4700 case L2CAP_MOVE_WAIT_LOGICAL_CFM:
4701 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4702 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4703 } else if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
4704 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
4705 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4706 } else if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4707 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4708 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4709 }
4710 break;
4711 default:
4712 /* Move was not in expected state, free the channel */
4713 __release_logical_link(chan);
4714
4715 chan->move_state = L2CAP_MOVE_STABLE;
4716 }
4717 }
4718
4719 /* Call with chan locked */
4720 void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan,
4721 u8 status)
4722 {
4723 BT_DBG("chan %p, hchan %p, status %d", chan, hchan, status);
4724
4725 if (status) {
4726 l2cap_logical_fail(chan);
4727 __release_logical_link(chan);
4728 return;
4729 }
4730
4731 if (chan->state != BT_CONNECTED) {
4732 /* Ignore logical link if channel is on BR/EDR */
4733 if (chan->local_amp_id != AMP_ID_BREDR)
4734 l2cap_logical_finish_create(chan, hchan);
4735 } else {
4736 l2cap_logical_finish_move(chan, hchan);
4737 }
4738 }
4739
4740 void l2cap_move_start(struct l2cap_chan *chan)
4741 {
4742 BT_DBG("chan %p", chan);
4743
4744 if (chan->local_amp_id == AMP_ID_BREDR) {
4745 if (chan->chan_policy != BT_CHANNEL_POLICY_AMP_PREFERRED)
4746 return;
4747 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4748 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4749 /* Placeholder - start physical link setup */
4750 } else {
4751 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4752 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4753 chan->move_id = 0;
4754 l2cap_move_setup(chan);
4755 l2cap_send_move_chan_req(chan, 0);
4756 }
4757 }
4758
4759 static void l2cap_do_create(struct l2cap_chan *chan, int result,
4760 u8 local_amp_id, u8 remote_amp_id)
4761 {
4762 BT_DBG("chan %p state %s %u -> %u", chan, state_to_string(chan->state),
4763 local_amp_id, remote_amp_id);
4764
4765 chan->fcs = L2CAP_FCS_NONE;
4766
4767 /* Outgoing channel on AMP */
4768 if (chan->state == BT_CONNECT) {
4769 if (result == L2CAP_CR_SUCCESS) {
4770 chan->local_amp_id = local_amp_id;
4771 l2cap_send_create_chan_req(chan, remote_amp_id);
4772 } else {
4773 /* Revert to BR/EDR connect */
4774 l2cap_send_conn_req(chan);
4775 }
4776
4777 return;
4778 }
4779
4780 /* Incoming channel on AMP */
4781 if (__l2cap_no_conn_pending(chan)) {
4782 struct l2cap_conn_rsp rsp;
4783 char buf[128];
4784 rsp.scid = cpu_to_le16(chan->dcid);
4785 rsp.dcid = cpu_to_le16(chan->scid);
4786
4787 if (result == L2CAP_CR_SUCCESS) {
4788 /* Send successful response */
4789 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
4790 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4791 } else {
4792 /* Send negative response */
4793 rsp.result = cpu_to_le16(L2CAP_CR_NO_MEM);
4794 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4795 }
4796
4797 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_RSP,
4798 sizeof(rsp), &rsp);
4799
4800 if (result == L2CAP_CR_SUCCESS) {
4801 l2cap_state_change(chan, BT_CONFIG);
4802 set_bit(CONF_REQ_SENT, &chan->conf_state);
4803 l2cap_send_cmd(chan->conn, l2cap_get_ident(chan->conn),
4804 L2CAP_CONF_REQ,
4805 l2cap_build_conf_req(chan, buf), buf);
4806 chan->num_conf_req++;
4807 }
4808 }
4809 }
4810
4811 static void l2cap_do_move_initiate(struct l2cap_chan *chan, u8 local_amp_id,
4812 u8 remote_amp_id)
4813 {
4814 l2cap_move_setup(chan);
4815 chan->move_id = local_amp_id;
4816 chan->move_state = L2CAP_MOVE_WAIT_RSP;
4817
4818 l2cap_send_move_chan_req(chan, remote_amp_id);
4819 }
4820
4821 static void l2cap_do_move_respond(struct l2cap_chan *chan, int result)
4822 {
4823 struct hci_chan *hchan = NULL;
4824
4825 /* Placeholder - get hci_chan for logical link */
4826
4827 if (hchan) {
4828 if (hchan->state == BT_CONNECTED) {
4829 /* Logical link is ready to go */
4830 chan->hs_hcon = hchan->conn;
4831 chan->hs_hcon->l2cap_data = chan->conn;
4832 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4833 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4834
4835 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
4836 } else {
4837 /* Wait for logical link to be ready */
4838 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4839 }
4840 } else {
4841 /* Logical link not available */
4842 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_ALLOWED);
4843 }
4844 }
4845
4846 static void l2cap_do_move_cancel(struct l2cap_chan *chan, int result)
4847 {
4848 if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4849 u8 rsp_result;
4850 if (result == -EINVAL)
4851 rsp_result = L2CAP_MR_BAD_ID;
4852 else
4853 rsp_result = L2CAP_MR_NOT_ALLOWED;
4854
4855 l2cap_send_move_chan_rsp(chan, rsp_result);
4856 }
4857
4858 chan->move_role = L2CAP_MOVE_ROLE_NONE;
4859 chan->move_state = L2CAP_MOVE_STABLE;
4860
4861 /* Restart data transmission */
4862 l2cap_ertm_send(chan);
4863 }
4864
4865 /* Invoke with locked chan */
4866 void __l2cap_physical_cfm(struct l2cap_chan *chan, int result)
4867 {
4868 u8 local_amp_id = chan->local_amp_id;
4869 u8 remote_amp_id = chan->remote_amp_id;
4870
4871 BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d",
4872 chan, result, local_amp_id, remote_amp_id);
4873
4874 if (chan->state == BT_DISCONN || chan->state == BT_CLOSED) {
4875 l2cap_chan_unlock(chan);
4876 return;
4877 }
4878
4879 if (chan->state != BT_CONNECTED) {
4880 l2cap_do_create(chan, result, local_amp_id, remote_amp_id);
4881 } else if (result != L2CAP_MR_SUCCESS) {
4882 l2cap_do_move_cancel(chan, result);
4883 } else {
4884 switch (chan->move_role) {
4885 case L2CAP_MOVE_ROLE_INITIATOR:
4886 l2cap_do_move_initiate(chan, local_amp_id,
4887 remote_amp_id);
4888 break;
4889 case L2CAP_MOVE_ROLE_RESPONDER:
4890 l2cap_do_move_respond(chan, result);
4891 break;
4892 default:
4893 l2cap_do_move_cancel(chan, result);
4894 break;
4895 }
4896 }
4897 }
4898
4899 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
4900 struct l2cap_cmd_hdr *cmd,
4901 u16 cmd_len, void *data)
4902 {
4903 struct l2cap_move_chan_req *req = data;
4904 struct l2cap_move_chan_rsp rsp;
4905 struct l2cap_chan *chan;
4906 u16 icid = 0;
4907 u16 result = L2CAP_MR_NOT_ALLOWED;
4908
4909 if (cmd_len != sizeof(*req))
4910 return -EPROTO;
4911
4912 icid = le16_to_cpu(req->icid);
4913
4914 BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id);
4915
4916 if (!conn->hs_enabled)
4917 return -EINVAL;
4918
4919 chan = l2cap_get_chan_by_dcid(conn, icid);
4920 if (!chan) {
4921 rsp.icid = cpu_to_le16(icid);
4922 rsp.result = cpu_to_le16(L2CAP_MR_NOT_ALLOWED);
4923 l2cap_send_cmd(conn, cmd->ident, L2CAP_MOVE_CHAN_RSP,
4924 sizeof(rsp), &rsp);
4925 return 0;
4926 }
4927
4928 chan->ident = cmd->ident;
4929
4930 if (chan->scid < L2CAP_CID_DYN_START ||
4931 chan->chan_policy == BT_CHANNEL_POLICY_BREDR_ONLY ||
4932 (chan->mode != L2CAP_MODE_ERTM &&
4933 chan->mode != L2CAP_MODE_STREAMING)) {
4934 result = L2CAP_MR_NOT_ALLOWED;
4935 goto send_move_response;
4936 }
4937
4938 if (chan->local_amp_id == req->dest_amp_id) {
4939 result = L2CAP_MR_SAME_ID;
4940 goto send_move_response;
4941 }
4942
4943 if (req->dest_amp_id != AMP_ID_BREDR) {
4944 struct hci_dev *hdev;
4945 hdev = hci_dev_get(req->dest_amp_id);
4946 if (!hdev || hdev->dev_type != HCI_AMP ||
4947 !test_bit(HCI_UP, &hdev->flags)) {
4948 if (hdev)
4949 hci_dev_put(hdev);
4950
4951 result = L2CAP_MR_BAD_ID;
4952 goto send_move_response;
4953 }
4954 hci_dev_put(hdev);
4955 }
4956
4957 /* Detect a move collision. Only send a collision response
4958 * if this side has "lost", otherwise proceed with the move.
4959 * The winner has the larger bd_addr.
4960 */
4961 if ((__chan_is_moving(chan) ||
4962 chan->move_role != L2CAP_MOVE_ROLE_NONE) &&
4963 bacmp(&conn->hcon->src, &conn->hcon->dst) > 0) {
4964 result = L2CAP_MR_COLLISION;
4965 goto send_move_response;
4966 }
4967
4968 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
4969 l2cap_move_setup(chan);
4970 chan->move_id = req->dest_amp_id;
4971 icid = chan->dcid;
4972
4973 if (req->dest_amp_id == AMP_ID_BREDR) {
4974 /* Moving to BR/EDR */
4975 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4976 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4977 result = L2CAP_MR_PEND;
4978 } else {
4979 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4980 result = L2CAP_MR_SUCCESS;
4981 }
4982 } else {
4983 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4984 /* Placeholder - uncomment when amp functions are available */
4985 /*amp_accept_physical(chan, req->dest_amp_id);*/
4986 result = L2CAP_MR_PEND;
4987 }
4988
4989 send_move_response:
4990 l2cap_send_move_chan_rsp(chan, result);
4991
4992 l2cap_chan_unlock(chan);
4993
4994 return 0;
4995 }
4996
4997 static void l2cap_move_continue(struct l2cap_conn *conn, u16 icid, u16 result)
4998 {
4999 struct l2cap_chan *chan;
5000 struct hci_chan *hchan = NULL;
5001
5002 chan = l2cap_get_chan_by_scid(conn, icid);
5003 if (!chan) {
5004 l2cap_send_move_chan_cfm_icid(conn, icid);
5005 return;
5006 }
5007
5008 __clear_chan_timer(chan);
5009 if (result == L2CAP_MR_PEND)
5010 __set_chan_timer(chan, L2CAP_MOVE_ERTX_TIMEOUT);
5011
5012 switch (chan->move_state) {
5013 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
5014 /* Move confirm will be sent when logical link
5015 * is complete.
5016 */
5017 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5018 break;
5019 case L2CAP_MOVE_WAIT_RSP_SUCCESS:
5020 if (result == L2CAP_MR_PEND) {
5021 break;
5022 } else if (test_bit(CONN_LOCAL_BUSY,
5023 &chan->conn_state)) {
5024 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
5025 } else {
5026 /* Logical link is up or moving to BR/EDR,
5027 * proceed with move
5028 */
5029 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
5030 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5031 }
5032 break;
5033 case L2CAP_MOVE_WAIT_RSP:
5034 /* Moving to AMP */
5035 if (result == L2CAP_MR_SUCCESS) {
5036 /* Remote is ready, send confirm immediately
5037 * after logical link is ready
5038 */
5039 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5040 } else {
5041 /* Both logical link and move success
5042 * are required to confirm
5043 */
5044 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_COMP;
5045 }
5046
5047 /* Placeholder - get hci_chan for logical link */
5048 if (!hchan) {
5049 /* Logical link not available */
5050 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5051 break;
5052 }
5053
5054 /* If the logical link is not yet connected, do not
5055 * send confirmation.
5056 */
5057 if (hchan->state != BT_CONNECTED)
5058 break;
5059
5060 /* Logical link is already ready to go */
5061
5062 chan->hs_hcon = hchan->conn;
5063 chan->hs_hcon->l2cap_data = chan->conn;
5064
5065 if (result == L2CAP_MR_SUCCESS) {
5066 /* Can confirm now */
5067 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5068 } else {
5069 /* Now only need move success
5070 * to confirm
5071 */
5072 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
5073 }
5074
5075 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
5076 break;
5077 default:
5078 /* Any other amp move state means the move failed. */
5079 chan->move_id = chan->local_amp_id;
5080 l2cap_move_done(chan);
5081 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5082 }
5083
5084 l2cap_chan_unlock(chan);
5085 }
5086
5087 static void l2cap_move_fail(struct l2cap_conn *conn, u8 ident, u16 icid,
5088 u16 result)
5089 {
5090 struct l2cap_chan *chan;
5091
5092 chan = l2cap_get_chan_by_ident(conn, ident);
5093 if (!chan) {
5094 /* Could not locate channel, icid is best guess */
5095 l2cap_send_move_chan_cfm_icid(conn, icid);
5096 return;
5097 }
5098
5099 __clear_chan_timer(chan);
5100
5101 if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
5102 if (result == L2CAP_MR_COLLISION) {
5103 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5104 } else {
5105 /* Cleanup - cancel move */
5106 chan->move_id = chan->local_amp_id;
5107 l2cap_move_done(chan);
5108 }
5109 }
5110
5111 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5112
5113 l2cap_chan_unlock(chan);
5114 }
5115
5116 static int l2cap_move_channel_rsp(struct l2cap_conn *conn,
5117 struct l2cap_cmd_hdr *cmd,
5118 u16 cmd_len, void *data)
5119 {
5120 struct l2cap_move_chan_rsp *rsp = data;
5121 u16 icid, result;
5122
5123 if (cmd_len != sizeof(*rsp))
5124 return -EPROTO;
5125
5126 icid = le16_to_cpu(rsp->icid);
5127 result = le16_to_cpu(rsp->result);
5128
5129 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5130
5131 if (result == L2CAP_MR_SUCCESS || result == L2CAP_MR_PEND)
5132 l2cap_move_continue(conn, icid, result);
5133 else
5134 l2cap_move_fail(conn, cmd->ident, icid, result);
5135
5136 return 0;
5137 }
5138
5139 static int l2cap_move_channel_confirm(struct l2cap_conn *conn,
5140 struct l2cap_cmd_hdr *cmd,
5141 u16 cmd_len, void *data)
5142 {
5143 struct l2cap_move_chan_cfm *cfm = data;
5144 struct l2cap_chan *chan;
5145 u16 icid, result;
5146
5147 if (cmd_len != sizeof(*cfm))
5148 return -EPROTO;
5149
5150 icid = le16_to_cpu(cfm->icid);
5151 result = le16_to_cpu(cfm->result);
5152
5153 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5154
5155 chan = l2cap_get_chan_by_dcid(conn, icid);
5156 if (!chan) {
5157 /* Spec requires a response even if the icid was not found */
5158 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5159 return 0;
5160 }
5161
5162 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM) {
5163 if (result == L2CAP_MC_CONFIRMED) {
5164 chan->local_amp_id = chan->move_id;
5165 if (chan->local_amp_id == AMP_ID_BREDR)
5166 __release_logical_link(chan);
5167 } else {
5168 chan->move_id = chan->local_amp_id;
5169 }
5170
5171 l2cap_move_done(chan);
5172 }
5173
5174 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5175
5176 l2cap_chan_unlock(chan);
5177
5178 return 0;
5179 }
5180
5181 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
5182 struct l2cap_cmd_hdr *cmd,
5183 u16 cmd_len, void *data)
5184 {
5185 struct l2cap_move_chan_cfm_rsp *rsp = data;
5186 struct l2cap_chan *chan;
5187 u16 icid;
5188
5189 if (cmd_len != sizeof(*rsp))
5190 return -EPROTO;
5191
5192 icid = le16_to_cpu(rsp->icid);
5193
5194 BT_DBG("icid 0x%4.4x", icid);
5195
5196 chan = l2cap_get_chan_by_scid(conn, icid);
5197 if (!chan)
5198 return 0;
5199
5200 __clear_chan_timer(chan);
5201
5202 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM_RSP) {
5203 chan->local_amp_id = chan->move_id;
5204
5205 if (chan->local_amp_id == AMP_ID_BREDR && chan->hs_hchan)
5206 __release_logical_link(chan);
5207
5208 l2cap_move_done(chan);
5209 }
5210
5211 l2cap_chan_unlock(chan);
5212
5213 return 0;
5214 }
5215
5216 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
5217 struct l2cap_cmd_hdr *cmd,
5218 u16 cmd_len, u8 *data)
5219 {
5220 struct hci_conn *hcon = conn->hcon;
5221 struct l2cap_conn_param_update_req *req;
5222 struct l2cap_conn_param_update_rsp rsp;
5223 u16 min, max, latency, to_multiplier;
5224 int err;
5225
5226 if (!test_bit(HCI_CONN_MASTER, &hcon->flags))
5227 return -EINVAL;
5228
5229 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
5230 return -EPROTO;
5231
5232 req = (struct l2cap_conn_param_update_req *) data;
5233 min = __le16_to_cpu(req->min);
5234 max = __le16_to_cpu(req->max);
5235 latency = __le16_to_cpu(req->latency);
5236 to_multiplier = __le16_to_cpu(req->to_multiplier);
5237
5238 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
5239 min, max, latency, to_multiplier);
5240
5241 memset(&rsp, 0, sizeof(rsp));
5242
5243 err = hci_check_conn_params(min, max, latency, to_multiplier);
5244 if (err)
5245 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
5246 else
5247 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
5248
5249 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
5250 sizeof(rsp), &rsp);
5251
5252 if (!err) {
5253 mgmt_new_conn_param(hcon->hdev, &hcon->dst, hcon->dst_type,
5254 min, max, latency, to_multiplier);
5255
5256 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
5257 }
5258
5259 return 0;
5260 }
5261
5262 static int l2cap_le_connect_rsp(struct l2cap_conn *conn,
5263 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5264 u8 *data)
5265 {
5266 struct l2cap_le_conn_rsp *rsp = (struct l2cap_le_conn_rsp *) data;
5267 u16 dcid, mtu, mps, credits, result;
5268 struct l2cap_chan *chan;
5269 int err;
5270
5271 if (cmd_len < sizeof(*rsp))
5272 return -EPROTO;
5273
5274 dcid = __le16_to_cpu(rsp->dcid);
5275 mtu = __le16_to_cpu(rsp->mtu);
5276 mps = __le16_to_cpu(rsp->mps);
5277 credits = __le16_to_cpu(rsp->credits);
5278 result = __le16_to_cpu(rsp->result);
5279
5280 if (result == L2CAP_CR_SUCCESS && (mtu < 23 || mps < 23))
5281 return -EPROTO;
5282
5283 BT_DBG("dcid 0x%4.4x mtu %u mps %u credits %u result 0x%2.2x",
5284 dcid, mtu, mps, credits, result);
5285
5286 mutex_lock(&conn->chan_lock);
5287
5288 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5289 if (!chan) {
5290 err = -EBADSLT;
5291 goto unlock;
5292 }
5293
5294 err = 0;
5295
5296 l2cap_chan_lock(chan);
5297
5298 switch (result) {
5299 case L2CAP_CR_SUCCESS:
5300 chan->ident = 0;
5301 chan->dcid = dcid;
5302 chan->omtu = mtu;
5303 chan->remote_mps = mps;
5304 chan->tx_credits = credits;
5305 l2cap_chan_ready(chan);
5306 break;
5307
5308 default:
5309 l2cap_chan_del(chan, ECONNREFUSED);
5310 break;
5311 }
5312
5313 l2cap_chan_unlock(chan);
5314
5315 unlock:
5316 mutex_unlock(&conn->chan_lock);
5317
5318 return err;
5319 }
5320
5321 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
5322 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5323 u8 *data)
5324 {
5325 int err = 0;
5326
5327 switch (cmd->code) {
5328 case L2CAP_COMMAND_REJ:
5329 l2cap_command_rej(conn, cmd, cmd_len, data);
5330 break;
5331
5332 case L2CAP_CONN_REQ:
5333 err = l2cap_connect_req(conn, cmd, cmd_len, data);
5334 break;
5335
5336 case L2CAP_CONN_RSP:
5337 case L2CAP_CREATE_CHAN_RSP:
5338 l2cap_connect_create_rsp(conn, cmd, cmd_len, data);
5339 break;
5340
5341 case L2CAP_CONF_REQ:
5342 err = l2cap_config_req(conn, cmd, cmd_len, data);
5343 break;
5344
5345 case L2CAP_CONF_RSP:
5346 l2cap_config_rsp(conn, cmd, cmd_len, data);
5347 break;
5348
5349 case L2CAP_DISCONN_REQ:
5350 err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5351 break;
5352
5353 case L2CAP_DISCONN_RSP:
5354 l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5355 break;
5356
5357 case L2CAP_ECHO_REQ:
5358 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
5359 break;
5360
5361 case L2CAP_ECHO_RSP:
5362 break;
5363
5364 case L2CAP_INFO_REQ:
5365 err = l2cap_information_req(conn, cmd, cmd_len, data);
5366 break;
5367
5368 case L2CAP_INFO_RSP:
5369 l2cap_information_rsp(conn, cmd, cmd_len, data);
5370 break;
5371
5372 case L2CAP_CREATE_CHAN_REQ:
5373 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
5374 break;
5375
5376 case L2CAP_MOVE_CHAN_REQ:
5377 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
5378 break;
5379
5380 case L2CAP_MOVE_CHAN_RSP:
5381 l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
5382 break;
5383
5384 case L2CAP_MOVE_CHAN_CFM:
5385 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
5386 break;
5387
5388 case L2CAP_MOVE_CHAN_CFM_RSP:
5389 l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
5390 break;
5391
5392 default:
5393 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
5394 err = -EINVAL;
5395 break;
5396 }
5397
5398 return err;
5399 }
5400
5401 static int l2cap_le_connect_req(struct l2cap_conn *conn,
5402 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5403 u8 *data)
5404 {
5405 struct l2cap_le_conn_req *req = (struct l2cap_le_conn_req *) data;
5406 struct l2cap_le_conn_rsp rsp;
5407 struct l2cap_chan *chan, *pchan;
5408 u16 dcid, scid, credits, mtu, mps;
5409 __le16 psm;
5410 u8 result;
5411
5412 if (cmd_len != sizeof(*req))
5413 return -EPROTO;
5414
5415 scid = __le16_to_cpu(req->scid);
5416 mtu = __le16_to_cpu(req->mtu);
5417 mps = __le16_to_cpu(req->mps);
5418 psm = req->psm;
5419 dcid = 0;
5420 credits = 0;
5421
5422 if (mtu < 23 || mps < 23)
5423 return -EPROTO;
5424
5425 BT_DBG("psm 0x%2.2x scid 0x%4.4x mtu %u mps %u", __le16_to_cpu(psm),
5426 scid, mtu, mps);
5427
5428 /* Check if we have socket listening on psm */
5429 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
5430 &conn->hcon->dst, LE_LINK);
5431 if (!pchan) {
5432 result = L2CAP_CR_BAD_PSM;
5433 chan = NULL;
5434 goto response;
5435 }
5436
5437 mutex_lock(&conn->chan_lock);
5438 l2cap_chan_lock(pchan);
5439
5440 if (!smp_sufficient_security(conn->hcon, pchan->sec_level)) {
5441 result = L2CAP_CR_AUTHENTICATION;
5442 chan = NULL;
5443 goto response_unlock;
5444 }
5445
5446 /* Check if we already have channel with that dcid */
5447 if (__l2cap_get_chan_by_dcid(conn, scid)) {
5448 result = L2CAP_CR_NO_MEM;
5449 chan = NULL;
5450 goto response_unlock;
5451 }
5452
5453 chan = pchan->ops->new_connection(pchan);
5454 if (!chan) {
5455 result = L2CAP_CR_NO_MEM;
5456 goto response_unlock;
5457 }
5458
5459 l2cap_le_flowctl_init(chan);
5460
5461 bacpy(&chan->src, &conn->hcon->src);
5462 bacpy(&chan->dst, &conn->hcon->dst);
5463 chan->src_type = bdaddr_type(conn->hcon, conn->hcon->src_type);
5464 chan->dst_type = bdaddr_type(conn->hcon, conn->hcon->dst_type);
5465 chan->psm = psm;
5466 chan->dcid = scid;
5467 chan->omtu = mtu;
5468 chan->remote_mps = mps;
5469 chan->tx_credits = __le16_to_cpu(req->credits);
5470
5471 __l2cap_chan_add(conn, chan);
5472 dcid = chan->scid;
5473 credits = chan->rx_credits;
5474
5475 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
5476
5477 chan->ident = cmd->ident;
5478
5479 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
5480 l2cap_state_change(chan, BT_CONNECT2);
5481 result = L2CAP_CR_PEND;
5482 chan->ops->defer(chan);
5483 } else {
5484 l2cap_chan_ready(chan);
5485 result = L2CAP_CR_SUCCESS;
5486 }
5487
5488 response_unlock:
5489 l2cap_chan_unlock(pchan);
5490 mutex_unlock(&conn->chan_lock);
5491
5492 if (result == L2CAP_CR_PEND)
5493 return 0;
5494
5495 response:
5496 if (chan) {
5497 rsp.mtu = cpu_to_le16(chan->imtu);
5498 rsp.mps = cpu_to_le16(chan->mps);
5499 } else {
5500 rsp.mtu = 0;
5501 rsp.mps = 0;
5502 }
5503
5504 rsp.dcid = cpu_to_le16(dcid);
5505 rsp.credits = cpu_to_le16(credits);
5506 rsp.result = cpu_to_le16(result);
5507
5508 l2cap_send_cmd(conn, cmd->ident, L2CAP_LE_CONN_RSP, sizeof(rsp), &rsp);
5509
5510 return 0;
5511 }
5512
5513 static inline int l2cap_le_credits(struct l2cap_conn *conn,
5514 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5515 u8 *data)
5516 {
5517 struct l2cap_le_credits *pkt;
5518 struct l2cap_chan *chan;
5519 u16 cid, credits, max_credits;
5520
5521 if (cmd_len != sizeof(*pkt))
5522 return -EPROTO;
5523
5524 pkt = (struct l2cap_le_credits *) data;
5525 cid = __le16_to_cpu(pkt->cid);
5526 credits = __le16_to_cpu(pkt->credits);
5527
5528 BT_DBG("cid 0x%4.4x credits 0x%4.4x", cid, credits);
5529
5530 chan = l2cap_get_chan_by_dcid(conn, cid);
5531 if (!chan)
5532 return -EBADSLT;
5533
5534 max_credits = LE_FLOWCTL_MAX_CREDITS - chan->tx_credits;
5535 if (credits > max_credits) {
5536 BT_ERR("LE credits overflow");
5537 l2cap_send_disconn_req(chan, ECONNRESET);
5538
5539 /* Return 0 so that we don't trigger an unnecessary
5540 * command reject packet.
5541 */
5542 return 0;
5543 }
5544
5545 chan->tx_credits += credits;
5546
5547 while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
5548 l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
5549 chan->tx_credits--;
5550 }
5551
5552 if (chan->tx_credits)
5553 chan->ops->resume(chan);
5554
5555 l2cap_chan_unlock(chan);
5556
5557 return 0;
5558 }
5559
5560 static inline int l2cap_le_command_rej(struct l2cap_conn *conn,
5561 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5562 u8 *data)
5563 {
5564 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
5565 struct l2cap_chan *chan;
5566
5567 if (cmd_len < sizeof(*rej))
5568 return -EPROTO;
5569
5570 mutex_lock(&conn->chan_lock);
5571
5572 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5573 if (!chan)
5574 goto done;
5575
5576 l2cap_chan_lock(chan);
5577 l2cap_chan_del(chan, ECONNREFUSED);
5578 l2cap_chan_unlock(chan);
5579
5580 done:
5581 mutex_unlock(&conn->chan_lock);
5582 return 0;
5583 }
5584
5585 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
5586 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5587 u8 *data)
5588 {
5589 int err = 0;
5590
5591 switch (cmd->code) {
5592 case L2CAP_COMMAND_REJ:
5593 l2cap_le_command_rej(conn, cmd, cmd_len, data);
5594 break;
5595
5596 case L2CAP_CONN_PARAM_UPDATE_REQ:
5597 err = l2cap_conn_param_update_req(conn, cmd, cmd_len, data);
5598 break;
5599
5600 case L2CAP_CONN_PARAM_UPDATE_RSP:
5601 break;
5602
5603 case L2CAP_LE_CONN_RSP:
5604 l2cap_le_connect_rsp(conn, cmd, cmd_len, data);
5605 break;
5606
5607 case L2CAP_LE_CONN_REQ:
5608 err = l2cap_le_connect_req(conn, cmd, cmd_len, data);
5609 break;
5610
5611 case L2CAP_LE_CREDITS:
5612 err = l2cap_le_credits(conn, cmd, cmd_len, data);
5613 break;
5614
5615 case L2CAP_DISCONN_REQ:
5616 err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5617 break;
5618
5619 case L2CAP_DISCONN_RSP:
5620 l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5621 break;
5622
5623 default:
5624 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
5625 err = -EINVAL;
5626 break;
5627 }
5628
5629 return err;
5630 }
5631
5632 static inline void l2cap_le_sig_channel(struct l2cap_conn *conn,
5633 struct sk_buff *skb)
5634 {
5635 struct hci_conn *hcon = conn->hcon;
5636 struct l2cap_cmd_hdr *cmd;
5637 u16 len;
5638 int err;
5639
5640 if (hcon->type != LE_LINK)
5641 goto drop;
5642
5643 if (skb->len < L2CAP_CMD_HDR_SIZE)
5644 goto drop;
5645
5646 cmd = (void *) skb->data;
5647 skb_pull(skb, L2CAP_CMD_HDR_SIZE);
5648
5649 len = le16_to_cpu(cmd->len);
5650
5651 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len, cmd->ident);
5652
5653 if (len != skb->len || !cmd->ident) {
5654 BT_DBG("corrupted command");
5655 goto drop;
5656 }
5657
5658 err = l2cap_le_sig_cmd(conn, cmd, len, skb->data);
5659 if (err) {
5660 struct l2cap_cmd_rej_unk rej;
5661
5662 BT_ERR("Wrong link type (%d)", err);
5663
5664 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5665 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
5666 sizeof(rej), &rej);
5667 }
5668
5669 drop:
5670 kfree_skb(skb);
5671 }
5672
5673 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
5674 struct sk_buff *skb)
5675 {
5676 struct hci_conn *hcon = conn->hcon;
5677 u8 *data = skb->data;
5678 int len = skb->len;
5679 struct l2cap_cmd_hdr cmd;
5680 int err;
5681
5682 l2cap_raw_recv(conn, skb);
5683
5684 if (hcon->type != ACL_LINK)
5685 goto drop;
5686
5687 while (len >= L2CAP_CMD_HDR_SIZE) {
5688 u16 cmd_len;
5689 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
5690 data += L2CAP_CMD_HDR_SIZE;
5691 len -= L2CAP_CMD_HDR_SIZE;
5692
5693 cmd_len = le16_to_cpu(cmd.len);
5694
5695 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len,
5696 cmd.ident);
5697
5698 if (cmd_len > len || !cmd.ident) {
5699 BT_DBG("corrupted command");
5700 break;
5701 }
5702
5703 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
5704 if (err) {
5705 struct l2cap_cmd_rej_unk rej;
5706
5707 BT_ERR("Wrong link type (%d)", err);
5708
5709 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5710 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ,
5711 sizeof(rej), &rej);
5712 }
5713
5714 data += cmd_len;
5715 len -= cmd_len;
5716 }
5717
5718 drop:
5719 kfree_skb(skb);
5720 }
5721
5722 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
5723 {
5724 u16 our_fcs, rcv_fcs;
5725 int hdr_size;
5726
5727 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
5728 hdr_size = L2CAP_EXT_HDR_SIZE;
5729 else
5730 hdr_size = L2CAP_ENH_HDR_SIZE;
5731
5732 if (chan->fcs == L2CAP_FCS_CRC16) {
5733 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
5734 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
5735 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
5736
5737 if (our_fcs != rcv_fcs)
5738 return -EBADMSG;
5739 }
5740 return 0;
5741 }
5742
5743 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
5744 {
5745 struct l2cap_ctrl control;
5746
5747 BT_DBG("chan %p", chan);
5748
5749 memset(&control, 0, sizeof(control));
5750 control.sframe = 1;
5751 control.final = 1;
5752 control.reqseq = chan->buffer_seq;
5753 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5754
5755 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5756 control.super = L2CAP_SUPER_RNR;
5757 l2cap_send_sframe(chan, &control);
5758 }
5759
5760 if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
5761 chan->unacked_frames > 0)
5762 __set_retrans_timer(chan);
5763
5764 /* Send pending iframes */
5765 l2cap_ertm_send(chan);
5766
5767 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
5768 test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
5769 /* F-bit wasn't sent in an s-frame or i-frame yet, so
5770 * send it now.
5771 */
5772 control.super = L2CAP_SUPER_RR;
5773 l2cap_send_sframe(chan, &control);
5774 }
5775 }
5776
5777 static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag,
5778 struct sk_buff **last_frag)
5779 {
5780 /* skb->len reflects data in skb as well as all fragments
5781 * skb->data_len reflects only data in fragments
5782 */
5783 if (!skb_has_frag_list(skb))
5784 skb_shinfo(skb)->frag_list = new_frag;
5785
5786 new_frag->next = NULL;
5787
5788 (*last_frag)->next = new_frag;
5789 *last_frag = new_frag;
5790
5791 skb->len += new_frag->len;
5792 skb->data_len += new_frag->len;
5793 skb->truesize += new_frag->truesize;
5794 }
5795
5796 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
5797 struct l2cap_ctrl *control)
5798 {
5799 int err = -EINVAL;
5800
5801 switch (control->sar) {
5802 case L2CAP_SAR_UNSEGMENTED:
5803 if (chan->sdu)
5804 break;
5805
5806 err = chan->ops->recv(chan, skb);
5807 break;
5808
5809 case L2CAP_SAR_START:
5810 if (chan->sdu)
5811 break;
5812
5813 chan->sdu_len = get_unaligned_le16(skb->data);
5814 skb_pull(skb, L2CAP_SDULEN_SIZE);
5815
5816 if (chan->sdu_len > chan->imtu) {
5817 err = -EMSGSIZE;
5818 break;
5819 }
5820
5821 if (skb->len >= chan->sdu_len)
5822 break;
5823
5824 chan->sdu = skb;
5825 chan->sdu_last_frag = skb;
5826
5827 skb = NULL;
5828 err = 0;
5829 break;
5830
5831 case L2CAP_SAR_CONTINUE:
5832 if (!chan->sdu)
5833 break;
5834
5835 append_skb_frag(chan->sdu, skb,
5836 &chan->sdu_last_frag);
5837 skb = NULL;
5838
5839 if (chan->sdu->len >= chan->sdu_len)
5840 break;
5841
5842 err = 0;
5843 break;
5844
5845 case L2CAP_SAR_END:
5846 if (!chan->sdu)
5847 break;
5848
5849 append_skb_frag(chan->sdu, skb,
5850 &chan->sdu_last_frag);
5851 skb = NULL;
5852
5853 if (chan->sdu->len != chan->sdu_len)
5854 break;
5855
5856 err = chan->ops->recv(chan, chan->sdu);
5857
5858 if (!err) {
5859 /* Reassembly complete */
5860 chan->sdu = NULL;
5861 chan->sdu_last_frag = NULL;
5862 chan->sdu_len = 0;
5863 }
5864 break;
5865 }
5866
5867 if (err) {
5868 kfree_skb(skb);
5869 kfree_skb(chan->sdu);
5870 chan->sdu = NULL;
5871 chan->sdu_last_frag = NULL;
5872 chan->sdu_len = 0;
5873 }
5874
5875 return err;
5876 }
5877
5878 static int l2cap_resegment(struct l2cap_chan *chan)
5879 {
5880 /* Placeholder */
5881 return 0;
5882 }
5883
5884 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
5885 {
5886 u8 event;
5887
5888 if (chan->mode != L2CAP_MODE_ERTM)
5889 return;
5890
5891 event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
5892 l2cap_tx(chan, NULL, NULL, event);
5893 }
5894
5895 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
5896 {
5897 int err = 0;
5898 /* Pass sequential frames to l2cap_reassemble_sdu()
5899 * until a gap is encountered.
5900 */
5901
5902 BT_DBG("chan %p", chan);
5903
5904 while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5905 struct sk_buff *skb;
5906 BT_DBG("Searching for skb with txseq %d (queue len %d)",
5907 chan->buffer_seq, skb_queue_len(&chan->srej_q));
5908
5909 skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
5910
5911 if (!skb)
5912 break;
5913
5914 skb_unlink(skb, &chan->srej_q);
5915 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
5916 err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->control);
5917 if (err)
5918 break;
5919 }
5920
5921 if (skb_queue_empty(&chan->srej_q)) {
5922 chan->rx_state = L2CAP_RX_STATE_RECV;
5923 l2cap_send_ack(chan);
5924 }
5925
5926 return err;
5927 }
5928
5929 static void l2cap_handle_srej(struct l2cap_chan *chan,
5930 struct l2cap_ctrl *control)
5931 {
5932 struct sk_buff *skb;
5933
5934 BT_DBG("chan %p, control %p", chan, control);
5935
5936 if (control->reqseq == chan->next_tx_seq) {
5937 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
5938 l2cap_send_disconn_req(chan, ECONNRESET);
5939 return;
5940 }
5941
5942 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
5943
5944 if (skb == NULL) {
5945 BT_DBG("Seq %d not available for retransmission",
5946 control->reqseq);
5947 return;
5948 }
5949
5950 if (chan->max_tx != 0 && bt_cb(skb)->control.retries >= chan->max_tx) {
5951 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
5952 l2cap_send_disconn_req(chan, ECONNRESET);
5953 return;
5954 }
5955
5956 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5957
5958 if (control->poll) {
5959 l2cap_pass_to_tx(chan, control);
5960
5961 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5962 l2cap_retransmit(chan, control);
5963 l2cap_ertm_send(chan);
5964
5965 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5966 set_bit(CONN_SREJ_ACT, &chan->conn_state);
5967 chan->srej_save_reqseq = control->reqseq;
5968 }
5969 } else {
5970 l2cap_pass_to_tx_fbit(chan, control);
5971
5972 if (control->final) {
5973 if (chan->srej_save_reqseq != control->reqseq ||
5974 !test_and_clear_bit(CONN_SREJ_ACT,
5975 &chan->conn_state))
5976 l2cap_retransmit(chan, control);
5977 } else {
5978 l2cap_retransmit(chan, control);
5979 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5980 set_bit(CONN_SREJ_ACT, &chan->conn_state);
5981 chan->srej_save_reqseq = control->reqseq;
5982 }
5983 }
5984 }
5985 }
5986
5987 static void l2cap_handle_rej(struct l2cap_chan *chan,
5988 struct l2cap_ctrl *control)
5989 {
5990 struct sk_buff *skb;
5991
5992 BT_DBG("chan %p, control %p", chan, control);
5993
5994 if (control->reqseq == chan->next_tx_seq) {
5995 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
5996 l2cap_send_disconn_req(chan, ECONNRESET);
5997 return;
5998 }
5999
6000 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
6001
6002 if (chan->max_tx && skb &&
6003 bt_cb(skb)->control.retries >= chan->max_tx) {
6004 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
6005 l2cap_send_disconn_req(chan, ECONNRESET);
6006 return;
6007 }
6008
6009 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6010
6011 l2cap_pass_to_tx(chan, control);
6012
6013 if (control->final) {
6014 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
6015 l2cap_retransmit_all(chan, control);
6016 } else {
6017 l2cap_retransmit_all(chan, control);
6018 l2cap_ertm_send(chan);
6019 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
6020 set_bit(CONN_REJ_ACT, &chan->conn_state);
6021 }
6022 }
6023
6024 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
6025 {
6026 BT_DBG("chan %p, txseq %d", chan, txseq);
6027
6028 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
6029 chan->expected_tx_seq);
6030
6031 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
6032 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6033 chan->tx_win) {
6034 /* See notes below regarding "double poll" and
6035 * invalid packets.
6036 */
6037 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6038 BT_DBG("Invalid/Ignore - after SREJ");
6039 return L2CAP_TXSEQ_INVALID_IGNORE;
6040 } else {
6041 BT_DBG("Invalid - in window after SREJ sent");
6042 return L2CAP_TXSEQ_INVALID;
6043 }
6044 }
6045
6046 if (chan->srej_list.head == txseq) {
6047 BT_DBG("Expected SREJ");
6048 return L2CAP_TXSEQ_EXPECTED_SREJ;
6049 }
6050
6051 if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
6052 BT_DBG("Duplicate SREJ - txseq already stored");
6053 return L2CAP_TXSEQ_DUPLICATE_SREJ;
6054 }
6055
6056 if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
6057 BT_DBG("Unexpected SREJ - not requested");
6058 return L2CAP_TXSEQ_UNEXPECTED_SREJ;
6059 }
6060 }
6061
6062 if (chan->expected_tx_seq == txseq) {
6063 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6064 chan->tx_win) {
6065 BT_DBG("Invalid - txseq outside tx window");
6066 return L2CAP_TXSEQ_INVALID;
6067 } else {
6068 BT_DBG("Expected");
6069 return L2CAP_TXSEQ_EXPECTED;
6070 }
6071 }
6072
6073 if (__seq_offset(chan, txseq, chan->last_acked_seq) <
6074 __seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) {
6075 BT_DBG("Duplicate - expected_tx_seq later than txseq");
6076 return L2CAP_TXSEQ_DUPLICATE;
6077 }
6078
6079 if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
6080 /* A source of invalid packets is a "double poll" condition,
6081 * where delays cause us to send multiple poll packets. If
6082 * the remote stack receives and processes both polls,
6083 * sequence numbers can wrap around in such a way that a
6084 * resent frame has a sequence number that looks like new data
6085 * with a sequence gap. This would trigger an erroneous SREJ
6086 * request.
6087 *
6088 * Fortunately, this is impossible with a tx window that's
6089 * less than half of the maximum sequence number, which allows
6090 * invalid frames to be safely ignored.
6091 *
6092 * With tx window sizes greater than half of the tx window
6093 * maximum, the frame is invalid and cannot be ignored. This
6094 * causes a disconnect.
6095 */
6096
6097 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6098 BT_DBG("Invalid/Ignore - txseq outside tx window");
6099 return L2CAP_TXSEQ_INVALID_IGNORE;
6100 } else {
6101 BT_DBG("Invalid - txseq outside tx window");
6102 return L2CAP_TXSEQ_INVALID;
6103 }
6104 } else {
6105 BT_DBG("Unexpected - txseq indicates missing frames");
6106 return L2CAP_TXSEQ_UNEXPECTED;
6107 }
6108 }
6109
6110 static int l2cap_rx_state_recv(struct l2cap_chan *chan,
6111 struct l2cap_ctrl *control,
6112 struct sk_buff *skb, u8 event)
6113 {
6114 int err = 0;
6115 bool skb_in_use = false;
6116
6117 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6118 event);
6119
6120 switch (event) {
6121 case L2CAP_EV_RECV_IFRAME:
6122 switch (l2cap_classify_txseq(chan, control->txseq)) {
6123 case L2CAP_TXSEQ_EXPECTED:
6124 l2cap_pass_to_tx(chan, control);
6125
6126 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6127 BT_DBG("Busy, discarding expected seq %d",
6128 control->txseq);
6129 break;
6130 }
6131
6132 chan->expected_tx_seq = __next_seq(chan,
6133 control->txseq);
6134
6135 chan->buffer_seq = chan->expected_tx_seq;
6136 skb_in_use = true;
6137
6138 err = l2cap_reassemble_sdu(chan, skb, control);
6139 if (err)
6140 break;
6141
6142 if (control->final) {
6143 if (!test_and_clear_bit(CONN_REJ_ACT,
6144 &chan->conn_state)) {
6145 control->final = 0;
6146 l2cap_retransmit_all(chan, control);
6147 l2cap_ertm_send(chan);
6148 }
6149 }
6150
6151 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
6152 l2cap_send_ack(chan);
6153 break;
6154 case L2CAP_TXSEQ_UNEXPECTED:
6155 l2cap_pass_to_tx(chan, control);
6156
6157 /* Can't issue SREJ frames in the local busy state.
6158 * Drop this frame, it will be seen as missing
6159 * when local busy is exited.
6160 */
6161 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6162 BT_DBG("Busy, discarding unexpected seq %d",
6163 control->txseq);
6164 break;
6165 }
6166
6167 /* There was a gap in the sequence, so an SREJ
6168 * must be sent for each missing frame. The
6169 * current frame is stored for later use.
6170 */
6171 skb_queue_tail(&chan->srej_q, skb);
6172 skb_in_use = true;
6173 BT_DBG("Queued %p (queue len %d)", skb,
6174 skb_queue_len(&chan->srej_q));
6175
6176 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
6177 l2cap_seq_list_clear(&chan->srej_list);
6178 l2cap_send_srej(chan, control->txseq);
6179
6180 chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
6181 break;
6182 case L2CAP_TXSEQ_DUPLICATE:
6183 l2cap_pass_to_tx(chan, control);
6184 break;
6185 case L2CAP_TXSEQ_INVALID_IGNORE:
6186 break;
6187 case L2CAP_TXSEQ_INVALID:
6188 default:
6189 l2cap_send_disconn_req(chan, ECONNRESET);
6190 break;
6191 }
6192 break;
6193 case L2CAP_EV_RECV_RR:
6194 l2cap_pass_to_tx(chan, control);
6195 if (control->final) {
6196 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6197
6198 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state) &&
6199 !__chan_is_moving(chan)) {
6200 control->final = 0;
6201 l2cap_retransmit_all(chan, control);
6202 }
6203
6204 l2cap_ertm_send(chan);
6205 } else if (control->poll) {
6206 l2cap_send_i_or_rr_or_rnr(chan);
6207 } else {
6208 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6209 &chan->conn_state) &&
6210 chan->unacked_frames)
6211 __set_retrans_timer(chan);
6212
6213 l2cap_ertm_send(chan);
6214 }
6215 break;
6216 case L2CAP_EV_RECV_RNR:
6217 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6218 l2cap_pass_to_tx(chan, control);
6219 if (control && control->poll) {
6220 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6221 l2cap_send_rr_or_rnr(chan, 0);
6222 }
6223 __clear_retrans_timer(chan);
6224 l2cap_seq_list_clear(&chan->retrans_list);
6225 break;
6226 case L2CAP_EV_RECV_REJ:
6227 l2cap_handle_rej(chan, control);
6228 break;
6229 case L2CAP_EV_RECV_SREJ:
6230 l2cap_handle_srej(chan, control);
6231 break;
6232 default:
6233 break;
6234 }
6235
6236 if (skb && !skb_in_use) {
6237 BT_DBG("Freeing %p", skb);
6238 kfree_skb(skb);
6239 }
6240
6241 return err;
6242 }
6243
6244 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
6245 struct l2cap_ctrl *control,
6246 struct sk_buff *skb, u8 event)
6247 {
6248 int err = 0;
6249 u16 txseq = control->txseq;
6250 bool skb_in_use = false;
6251
6252 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6253 event);
6254
6255 switch (event) {
6256 case L2CAP_EV_RECV_IFRAME:
6257 switch (l2cap_classify_txseq(chan, txseq)) {
6258 case L2CAP_TXSEQ_EXPECTED:
6259 /* Keep frame for reassembly later */
6260 l2cap_pass_to_tx(chan, control);
6261 skb_queue_tail(&chan->srej_q, skb);
6262 skb_in_use = true;
6263 BT_DBG("Queued %p (queue len %d)", skb,
6264 skb_queue_len(&chan->srej_q));
6265
6266 chan->expected_tx_seq = __next_seq(chan, txseq);
6267 break;
6268 case L2CAP_TXSEQ_EXPECTED_SREJ:
6269 l2cap_seq_list_pop(&chan->srej_list);
6270
6271 l2cap_pass_to_tx(chan, control);
6272 skb_queue_tail(&chan->srej_q, skb);
6273 skb_in_use = true;
6274 BT_DBG("Queued %p (queue len %d)", skb,
6275 skb_queue_len(&chan->srej_q));
6276
6277 err = l2cap_rx_queued_iframes(chan);
6278 if (err)
6279 break;
6280
6281 break;
6282 case L2CAP_TXSEQ_UNEXPECTED:
6283 /* Got a frame that can't be reassembled yet.
6284 * Save it for later, and send SREJs to cover
6285 * the missing frames.
6286 */
6287 skb_queue_tail(&chan->srej_q, skb);
6288 skb_in_use = true;
6289 BT_DBG("Queued %p (queue len %d)", skb,
6290 skb_queue_len(&chan->srej_q));
6291
6292 l2cap_pass_to_tx(chan, control);
6293 l2cap_send_srej(chan, control->txseq);
6294 break;
6295 case L2CAP_TXSEQ_UNEXPECTED_SREJ:
6296 /* This frame was requested with an SREJ, but
6297 * some expected retransmitted frames are
6298 * missing. Request retransmission of missing
6299 * SREJ'd frames.
6300 */
6301 skb_queue_tail(&chan->srej_q, skb);
6302 skb_in_use = true;
6303 BT_DBG("Queued %p (queue len %d)", skb,
6304 skb_queue_len(&chan->srej_q));
6305
6306 l2cap_pass_to_tx(chan, control);
6307 l2cap_send_srej_list(chan, control->txseq);
6308 break;
6309 case L2CAP_TXSEQ_DUPLICATE_SREJ:
6310 /* We've already queued this frame. Drop this copy. */
6311 l2cap_pass_to_tx(chan, control);
6312 break;
6313 case L2CAP_TXSEQ_DUPLICATE:
6314 /* Expecting a later sequence number, so this frame
6315 * was already received. Ignore it completely.
6316 */
6317 break;
6318 case L2CAP_TXSEQ_INVALID_IGNORE:
6319 break;
6320 case L2CAP_TXSEQ_INVALID:
6321 default:
6322 l2cap_send_disconn_req(chan, ECONNRESET);
6323 break;
6324 }
6325 break;
6326 case L2CAP_EV_RECV_RR:
6327 l2cap_pass_to_tx(chan, control);
6328 if (control->final) {
6329 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6330
6331 if (!test_and_clear_bit(CONN_REJ_ACT,
6332 &chan->conn_state)) {
6333 control->final = 0;
6334 l2cap_retransmit_all(chan, control);
6335 }
6336
6337 l2cap_ertm_send(chan);
6338 } else if (control->poll) {
6339 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6340 &chan->conn_state) &&
6341 chan->unacked_frames) {
6342 __set_retrans_timer(chan);
6343 }
6344
6345 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6346 l2cap_send_srej_tail(chan);
6347 } else {
6348 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6349 &chan->conn_state) &&
6350 chan->unacked_frames)
6351 __set_retrans_timer(chan);
6352
6353 l2cap_send_ack(chan);
6354 }
6355 break;
6356 case L2CAP_EV_RECV_RNR:
6357 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6358 l2cap_pass_to_tx(chan, control);
6359 if (control->poll) {
6360 l2cap_send_srej_tail(chan);
6361 } else {
6362 struct l2cap_ctrl rr_control;
6363 memset(&rr_control, 0, sizeof(rr_control));
6364 rr_control.sframe = 1;
6365 rr_control.super = L2CAP_SUPER_RR;
6366 rr_control.reqseq = chan->buffer_seq;
6367 l2cap_send_sframe(chan, &rr_control);
6368 }
6369
6370 break;
6371 case L2CAP_EV_RECV_REJ:
6372 l2cap_handle_rej(chan, control);
6373 break;
6374 case L2CAP_EV_RECV_SREJ:
6375 l2cap_handle_srej(chan, control);
6376 break;
6377 }
6378
6379 if (skb && !skb_in_use) {
6380 BT_DBG("Freeing %p", skb);
6381 kfree_skb(skb);
6382 }
6383
6384 return err;
6385 }
6386
6387 static int l2cap_finish_move(struct l2cap_chan *chan)
6388 {
6389 BT_DBG("chan %p", chan);
6390
6391 chan->rx_state = L2CAP_RX_STATE_RECV;
6392
6393 if (chan->hs_hcon)
6394 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6395 else
6396 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6397
6398 return l2cap_resegment(chan);
6399 }
6400
6401 static int l2cap_rx_state_wait_p(struct l2cap_chan *chan,
6402 struct l2cap_ctrl *control,
6403 struct sk_buff *skb, u8 event)
6404 {
6405 int err;
6406
6407 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6408 event);
6409
6410 if (!control->poll)
6411 return -EPROTO;
6412
6413 l2cap_process_reqseq(chan, control->reqseq);
6414
6415 if (!skb_queue_empty(&chan->tx_q))
6416 chan->tx_send_head = skb_peek(&chan->tx_q);
6417 else
6418 chan->tx_send_head = NULL;
6419
6420 /* Rewind next_tx_seq to the point expected
6421 * by the receiver.
6422 */
6423 chan->next_tx_seq = control->reqseq;
6424 chan->unacked_frames = 0;
6425
6426 err = l2cap_finish_move(chan);
6427 if (err)
6428 return err;
6429
6430 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6431 l2cap_send_i_or_rr_or_rnr(chan);
6432
6433 if (event == L2CAP_EV_RECV_IFRAME)
6434 return -EPROTO;
6435
6436 return l2cap_rx_state_recv(chan, control, NULL, event);
6437 }
6438
6439 static int l2cap_rx_state_wait_f(struct l2cap_chan *chan,
6440 struct l2cap_ctrl *control,
6441 struct sk_buff *skb, u8 event)
6442 {
6443 int err;
6444
6445 if (!control->final)
6446 return -EPROTO;
6447
6448 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6449
6450 chan->rx_state = L2CAP_RX_STATE_RECV;
6451 l2cap_process_reqseq(chan, control->reqseq);
6452
6453 if (!skb_queue_empty(&chan->tx_q))
6454 chan->tx_send_head = skb_peek(&chan->tx_q);
6455 else
6456 chan->tx_send_head = NULL;
6457
6458 /* Rewind next_tx_seq to the point expected
6459 * by the receiver.
6460 */
6461 chan->next_tx_seq = control->reqseq;
6462 chan->unacked_frames = 0;
6463
6464 if (chan->hs_hcon)
6465 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6466 else
6467 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6468
6469 err = l2cap_resegment(chan);
6470
6471 if (!err)
6472 err = l2cap_rx_state_recv(chan, control, skb, event);
6473
6474 return err;
6475 }
6476
6477 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
6478 {
6479 /* Make sure reqseq is for a packet that has been sent but not acked */
6480 u16 unacked;
6481
6482 unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
6483 return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
6484 }
6485
6486 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6487 struct sk_buff *skb, u8 event)
6488 {
6489 int err = 0;
6490
6491 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
6492 control, skb, event, chan->rx_state);
6493
6494 if (__valid_reqseq(chan, control->reqseq)) {
6495 switch (chan->rx_state) {
6496 case L2CAP_RX_STATE_RECV:
6497 err = l2cap_rx_state_recv(chan, control, skb, event);
6498 break;
6499 case L2CAP_RX_STATE_SREJ_SENT:
6500 err = l2cap_rx_state_srej_sent(chan, control, skb,
6501 event);
6502 break;
6503 case L2CAP_RX_STATE_WAIT_P:
6504 err = l2cap_rx_state_wait_p(chan, control, skb, event);
6505 break;
6506 case L2CAP_RX_STATE_WAIT_F:
6507 err = l2cap_rx_state_wait_f(chan, control, skb, event);
6508 break;
6509 default:
6510 /* shut it down */
6511 break;
6512 }
6513 } else {
6514 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
6515 control->reqseq, chan->next_tx_seq,
6516 chan->expected_ack_seq);
6517 l2cap_send_disconn_req(chan, ECONNRESET);
6518 }
6519
6520 return err;
6521 }
6522
6523 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6524 struct sk_buff *skb)
6525 {
6526 int err = 0;
6527
6528 BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
6529 chan->rx_state);
6530
6531 if (l2cap_classify_txseq(chan, control->txseq) ==
6532 L2CAP_TXSEQ_EXPECTED) {
6533 l2cap_pass_to_tx(chan, control);
6534
6535 BT_DBG("buffer_seq %d->%d", chan->buffer_seq,
6536 __next_seq(chan, chan->buffer_seq));
6537
6538 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6539
6540 l2cap_reassemble_sdu(chan, skb, control);
6541 } else {
6542 if (chan->sdu) {
6543 kfree_skb(chan->sdu);
6544 chan->sdu = NULL;
6545 }
6546 chan->sdu_last_frag = NULL;
6547 chan->sdu_len = 0;
6548
6549 if (skb) {
6550 BT_DBG("Freeing %p", skb);
6551 kfree_skb(skb);
6552 }
6553 }
6554
6555 chan->last_acked_seq = control->txseq;
6556 chan->expected_tx_seq = __next_seq(chan, control->txseq);
6557
6558 return err;
6559 }
6560
6561 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6562 {
6563 struct l2cap_ctrl *control = &bt_cb(skb)->control;
6564 u16 len;
6565 u8 event;
6566
6567 __unpack_control(chan, skb);
6568
6569 len = skb->len;
6570
6571 /*
6572 * We can just drop the corrupted I-frame here.
6573 * Receiver will miss it and start proper recovery
6574 * procedures and ask for retransmission.
6575 */
6576 if (l2cap_check_fcs(chan, skb))
6577 goto drop;
6578
6579 if (!control->sframe && control->sar == L2CAP_SAR_START)
6580 len -= L2CAP_SDULEN_SIZE;
6581
6582 if (chan->fcs == L2CAP_FCS_CRC16)
6583 len -= L2CAP_FCS_SIZE;
6584
6585 if (len > chan->mps) {
6586 l2cap_send_disconn_req(chan, ECONNRESET);
6587 goto drop;
6588 }
6589
6590 if (!control->sframe) {
6591 int err;
6592
6593 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
6594 control->sar, control->reqseq, control->final,
6595 control->txseq);
6596
6597 /* Validate F-bit - F=0 always valid, F=1 only
6598 * valid in TX WAIT_F
6599 */
6600 if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
6601 goto drop;
6602
6603 if (chan->mode != L2CAP_MODE_STREAMING) {
6604 event = L2CAP_EV_RECV_IFRAME;
6605 err = l2cap_rx(chan, control, skb, event);
6606 } else {
6607 err = l2cap_stream_rx(chan, control, skb);
6608 }
6609
6610 if (err)
6611 l2cap_send_disconn_req(chan, ECONNRESET);
6612 } else {
6613 const u8 rx_func_to_event[4] = {
6614 L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
6615 L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
6616 };
6617
6618 /* Only I-frames are expected in streaming mode */
6619 if (chan->mode == L2CAP_MODE_STREAMING)
6620 goto drop;
6621
6622 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
6623 control->reqseq, control->final, control->poll,
6624 control->super);
6625
6626 if (len != 0) {
6627 BT_ERR("Trailing bytes: %d in sframe", len);
6628 l2cap_send_disconn_req(chan, ECONNRESET);
6629 goto drop;
6630 }
6631
6632 /* Validate F and P bits */
6633 if (control->final && (control->poll ||
6634 chan->tx_state != L2CAP_TX_STATE_WAIT_F))
6635 goto drop;
6636
6637 event = rx_func_to_event[control->super];
6638 if (l2cap_rx(chan, control, skb, event))
6639 l2cap_send_disconn_req(chan, ECONNRESET);
6640 }
6641
6642 return 0;
6643
6644 drop:
6645 kfree_skb(skb);
6646 return 0;
6647 }
6648
6649 static void l2cap_chan_le_send_credits(struct l2cap_chan *chan)
6650 {
6651 struct l2cap_conn *conn = chan->conn;
6652 struct l2cap_le_credits pkt;
6653 u16 return_credits;
6654
6655 /* We return more credits to the sender only after the amount of
6656 * credits falls below half of the initial amount.
6657 */
6658 if (chan->rx_credits >= (le_max_credits + 1) / 2)
6659 return;
6660
6661 return_credits = le_max_credits - chan->rx_credits;
6662
6663 BT_DBG("chan %p returning %u credits to sender", chan, return_credits);
6664
6665 chan->rx_credits += return_credits;
6666
6667 pkt.cid = cpu_to_le16(chan->scid);
6668 pkt.credits = cpu_to_le16(return_credits);
6669
6670 chan->ident = l2cap_get_ident(conn);
6671
6672 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CREDITS, sizeof(pkt), &pkt);
6673 }
6674
6675 static int l2cap_le_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6676 {
6677 int err;
6678
6679 if (!chan->rx_credits) {
6680 BT_ERR("No credits to receive LE L2CAP data");
6681 l2cap_send_disconn_req(chan, ECONNRESET);
6682 return -ENOBUFS;
6683 }
6684
6685 if (chan->imtu < skb->len) {
6686 BT_ERR("Too big LE L2CAP PDU");
6687 return -ENOBUFS;
6688 }
6689
6690 chan->rx_credits--;
6691 BT_DBG("rx_credits %u -> %u", chan->rx_credits + 1, chan->rx_credits);
6692
6693 l2cap_chan_le_send_credits(chan);
6694
6695 err = 0;
6696
6697 if (!chan->sdu) {
6698 u16 sdu_len;
6699
6700 sdu_len = get_unaligned_le16(skb->data);
6701 skb_pull(skb, L2CAP_SDULEN_SIZE);
6702
6703 BT_DBG("Start of new SDU. sdu_len %u skb->len %u imtu %u",
6704 sdu_len, skb->len, chan->imtu);
6705
6706 if (sdu_len > chan->imtu) {
6707 BT_ERR("Too big LE L2CAP SDU length received");
6708 err = -EMSGSIZE;
6709 goto failed;
6710 }
6711
6712 if (skb->len > sdu_len) {
6713 BT_ERR("Too much LE L2CAP data received");
6714 err = -EINVAL;
6715 goto failed;
6716 }
6717
6718 if (skb->len == sdu_len)
6719 return chan->ops->recv(chan, skb);
6720
6721 chan->sdu = skb;
6722 chan->sdu_len = sdu_len;
6723 chan->sdu_last_frag = skb;
6724
6725 return 0;
6726 }
6727
6728 BT_DBG("SDU fragment. chan->sdu->len %u skb->len %u chan->sdu_len %u",
6729 chan->sdu->len, skb->len, chan->sdu_len);
6730
6731 if (chan->sdu->len + skb->len > chan->sdu_len) {
6732 BT_ERR("Too much LE L2CAP data received");
6733 err = -EINVAL;
6734 goto failed;
6735 }
6736
6737 append_skb_frag(chan->sdu, skb, &chan->sdu_last_frag);
6738 skb = NULL;
6739
6740 if (chan->sdu->len == chan->sdu_len) {
6741 err = chan->ops->recv(chan, chan->sdu);
6742 if (!err) {
6743 chan->sdu = NULL;
6744 chan->sdu_last_frag = NULL;
6745 chan->sdu_len = 0;
6746 }
6747 }
6748
6749 failed:
6750 if (err) {
6751 kfree_skb(skb);
6752 kfree_skb(chan->sdu);
6753 chan->sdu = NULL;
6754 chan->sdu_last_frag = NULL;
6755 chan->sdu_len = 0;
6756 }
6757
6758 /* We can't return an error here since we took care of the skb
6759 * freeing internally. An error return would cause the caller to
6760 * do a double-free of the skb.
6761 */
6762 return 0;
6763 }
6764
6765 static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
6766 struct sk_buff *skb)
6767 {
6768 struct l2cap_chan *chan;
6769
6770 chan = l2cap_get_chan_by_scid(conn, cid);
6771 if (!chan) {
6772 if (cid == L2CAP_CID_A2MP) {
6773 chan = a2mp_channel_create(conn, skb);
6774 if (!chan) {
6775 kfree_skb(skb);
6776 return;
6777 }
6778
6779 l2cap_chan_lock(chan);
6780 } else {
6781 BT_DBG("unknown cid 0x%4.4x", cid);
6782 /* Drop packet and return */
6783 kfree_skb(skb);
6784 return;
6785 }
6786 }
6787
6788 BT_DBG("chan %p, len %d", chan, skb->len);
6789
6790 if (chan->state != BT_CONNECTED)
6791 goto drop;
6792
6793 switch (chan->mode) {
6794 case L2CAP_MODE_LE_FLOWCTL:
6795 if (l2cap_le_data_rcv(chan, skb) < 0)
6796 goto drop;
6797
6798 goto done;
6799
6800 case L2CAP_MODE_BASIC:
6801 /* If socket recv buffers overflows we drop data here
6802 * which is *bad* because L2CAP has to be reliable.
6803 * But we don't have any other choice. L2CAP doesn't
6804 * provide flow control mechanism. */
6805
6806 if (chan->imtu < skb->len) {
6807 BT_ERR("Dropping L2CAP data: receive buffer overflow");
6808 goto drop;
6809 }
6810
6811 if (!chan->ops->recv(chan, skb))
6812 goto done;
6813 break;
6814
6815 case L2CAP_MODE_ERTM:
6816 case L2CAP_MODE_STREAMING:
6817 l2cap_data_rcv(chan, skb);
6818 goto done;
6819
6820 default:
6821 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
6822 break;
6823 }
6824
6825 drop:
6826 kfree_skb(skb);
6827
6828 done:
6829 l2cap_chan_unlock(chan);
6830 }
6831
6832 static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
6833 struct sk_buff *skb)
6834 {
6835 struct hci_conn *hcon = conn->hcon;
6836 struct l2cap_chan *chan;
6837
6838 if (hcon->type != ACL_LINK)
6839 goto drop;
6840
6841 chan = l2cap_global_chan_by_psm(0, psm, &hcon->src, &hcon->dst,
6842 ACL_LINK);
6843 if (!chan)
6844 goto drop;
6845
6846 BT_DBG("chan %p, len %d", chan, skb->len);
6847
6848 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
6849 goto drop;
6850
6851 if (chan->imtu < skb->len)
6852 goto drop;
6853
6854 /* Store remote BD_ADDR and PSM for msg_name */
6855 bacpy(&bt_cb(skb)->bdaddr, &hcon->dst);
6856 bt_cb(skb)->psm = psm;
6857
6858 if (!chan->ops->recv(chan, skb))
6859 return;
6860
6861 drop:
6862 kfree_skb(skb);
6863 }
6864
6865 static void l2cap_att_channel(struct l2cap_conn *conn,
6866 struct sk_buff *skb)
6867 {
6868 struct hci_conn *hcon = conn->hcon;
6869 struct l2cap_chan *chan;
6870
6871 if (hcon->type != LE_LINK)
6872 goto drop;
6873
6874 chan = l2cap_global_chan_by_scid(BT_CONNECTED, L2CAP_CID_ATT,
6875 &hcon->src, &hcon->dst);
6876 if (!chan)
6877 goto drop;
6878
6879 BT_DBG("chan %p, len %d", chan, skb->len);
6880
6881 if (chan->imtu < skb->len)
6882 goto drop;
6883
6884 if (!chan->ops->recv(chan, skb))
6885 return;
6886
6887 drop:
6888 kfree_skb(skb);
6889 }
6890
6891 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
6892 {
6893 struct l2cap_hdr *lh = (void *) skb->data;
6894 struct hci_conn *hcon = conn->hcon;
6895 u16 cid, len;
6896 __le16 psm;
6897
6898 if (hcon->state != BT_CONNECTED) {
6899 BT_DBG("queueing pending rx skb");
6900 skb_queue_tail(&conn->pending_rx, skb);
6901 return;
6902 }
6903
6904 skb_pull(skb, L2CAP_HDR_SIZE);
6905 cid = __le16_to_cpu(lh->cid);
6906 len = __le16_to_cpu(lh->len);
6907
6908 if (len != skb->len) {
6909 kfree_skb(skb);
6910 return;
6911 }
6912
6913 if (hci_blacklist_lookup(hcon->hdev, &hcon->dst,
6914 bdaddr_type(hcon, hcon->dst_type))) {
6915 kfree_skb(skb);
6916 return;
6917 }
6918
6919 BT_DBG("len %d, cid 0x%4.4x", len, cid);
6920
6921 switch (cid) {
6922 case L2CAP_CID_SIGNALING:
6923 l2cap_sig_channel(conn, skb);
6924 break;
6925
6926 case L2CAP_CID_CONN_LESS:
6927 psm = get_unaligned((__le16 *) skb->data);
6928 skb_pull(skb, L2CAP_PSMLEN_SIZE);
6929 l2cap_conless_channel(conn, psm, skb);
6930 break;
6931
6932 case L2CAP_CID_ATT:
6933 l2cap_att_channel(conn, skb);
6934 break;
6935
6936 case L2CAP_CID_LE_SIGNALING:
6937 l2cap_le_sig_channel(conn, skb);
6938 break;
6939
6940 case L2CAP_CID_SMP:
6941 if (smp_sig_channel(conn, skb))
6942 l2cap_conn_del(conn->hcon, EACCES);
6943 break;
6944
6945 default:
6946 l2cap_data_channel(conn, cid, skb);
6947 break;
6948 }
6949 }
6950
6951 static void process_pending_rx(struct work_struct *work)
6952 {
6953 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
6954 pending_rx_work);
6955 struct sk_buff *skb;
6956
6957 BT_DBG("");
6958
6959 while ((skb = skb_dequeue(&conn->pending_rx)))
6960 l2cap_recv_frame(conn, skb);
6961 }
6962
6963 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
6964 {
6965 struct l2cap_conn *conn = hcon->l2cap_data;
6966 struct hci_chan *hchan;
6967
6968 if (conn)
6969 return conn;
6970
6971 hchan = hci_chan_create(hcon);
6972 if (!hchan)
6973 return NULL;
6974
6975 conn = kzalloc(sizeof(struct l2cap_conn), GFP_KERNEL);
6976 if (!conn) {
6977 hci_chan_del(hchan);
6978 return NULL;
6979 }
6980
6981 kref_init(&conn->ref);
6982 hcon->l2cap_data = conn;
6983 conn->hcon = hcon;
6984 hci_conn_get(conn->hcon);
6985 conn->hchan = hchan;
6986
6987 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
6988
6989 switch (hcon->type) {
6990 case LE_LINK:
6991 if (hcon->hdev->le_mtu) {
6992 conn->mtu = hcon->hdev->le_mtu;
6993 break;
6994 }
6995 /* fall through */
6996 default:
6997 conn->mtu = hcon->hdev->acl_mtu;
6998 break;
6999 }
7000
7001 conn->feat_mask = 0;
7002
7003 if (hcon->type == ACL_LINK)
7004 conn->hs_enabled = test_bit(HCI_HS_ENABLED,
7005 &hcon->hdev->dev_flags);
7006
7007 spin_lock_init(&conn->lock);
7008 mutex_init(&conn->chan_lock);
7009
7010 INIT_LIST_HEAD(&conn->chan_l);
7011 INIT_LIST_HEAD(&conn->users);
7012
7013 if (hcon->type == LE_LINK)
7014 INIT_DELAYED_WORK(&conn->security_timer, security_timeout);
7015 else
7016 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
7017
7018 skb_queue_head_init(&conn->pending_rx);
7019 INIT_WORK(&conn->pending_rx_work, process_pending_rx);
7020
7021 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
7022
7023 return conn;
7024 }
7025
7026 static bool is_valid_psm(u16 psm, u8 dst_type) {
7027 if (!psm)
7028 return false;
7029
7030 if (bdaddr_type_is_le(dst_type))
7031 return (psm <= 0x00ff);
7032
7033 /* PSM must be odd and lsb of upper byte must be 0 */
7034 return ((psm & 0x0101) == 0x0001);
7035 }
7036
7037 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
7038 bdaddr_t *dst, u8 dst_type)
7039 {
7040 struct l2cap_conn *conn;
7041 struct hci_conn *hcon;
7042 struct hci_dev *hdev;
7043 __u8 auth_type;
7044 int err;
7045
7046 BT_DBG("%pMR -> %pMR (type %u) psm 0x%2.2x", &chan->src, dst,
7047 dst_type, __le16_to_cpu(psm));
7048
7049 hdev = hci_get_route(dst, &chan->src);
7050 if (!hdev)
7051 return -EHOSTUNREACH;
7052
7053 hci_dev_lock(hdev);
7054
7055 l2cap_chan_lock(chan);
7056
7057 if (!is_valid_psm(__le16_to_cpu(psm), dst_type) && !cid &&
7058 chan->chan_type != L2CAP_CHAN_RAW) {
7059 err = -EINVAL;
7060 goto done;
7061 }
7062
7063 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !psm) {
7064 err = -EINVAL;
7065 goto done;
7066 }
7067
7068 if (chan->chan_type == L2CAP_CHAN_FIXED && !cid) {
7069 err = -EINVAL;
7070 goto done;
7071 }
7072
7073 switch (chan->mode) {
7074 case L2CAP_MODE_BASIC:
7075 break;
7076 case L2CAP_MODE_LE_FLOWCTL:
7077 l2cap_le_flowctl_init(chan);
7078 break;
7079 case L2CAP_MODE_ERTM:
7080 case L2CAP_MODE_STREAMING:
7081 if (!disable_ertm)
7082 break;
7083 /* fall through */
7084 default:
7085 err = -ENOTSUPP;
7086 goto done;
7087 }
7088
7089 switch (chan->state) {
7090 case BT_CONNECT:
7091 case BT_CONNECT2:
7092 case BT_CONFIG:
7093 /* Already connecting */
7094 err = 0;
7095 goto done;
7096
7097 case BT_CONNECTED:
7098 /* Already connected */
7099 err = -EISCONN;
7100 goto done;
7101
7102 case BT_OPEN:
7103 case BT_BOUND:
7104 /* Can connect */
7105 break;
7106
7107 default:
7108 err = -EBADFD;
7109 goto done;
7110 }
7111
7112 /* Set destination address and psm */
7113 bacpy(&chan->dst, dst);
7114 chan->dst_type = dst_type;
7115
7116 chan->psm = psm;
7117 chan->dcid = cid;
7118
7119 auth_type = l2cap_get_auth_type(chan);
7120
7121 if (bdaddr_type_is_le(dst_type)) {
7122 /* Convert from L2CAP channel address type to HCI address type
7123 */
7124 if (dst_type == BDADDR_LE_PUBLIC)
7125 dst_type = ADDR_LE_DEV_PUBLIC;
7126 else
7127 dst_type = ADDR_LE_DEV_RANDOM;
7128
7129 hcon = hci_connect_le(hdev, dst, dst_type, chan->sec_level,
7130 auth_type);
7131 } else {
7132 hcon = hci_connect_acl(hdev, dst, chan->sec_level, auth_type);
7133 }
7134
7135 if (IS_ERR(hcon)) {
7136 err = PTR_ERR(hcon);
7137 goto done;
7138 }
7139
7140 conn = l2cap_conn_add(hcon);
7141 if (!conn) {
7142 hci_conn_drop(hcon);
7143 err = -ENOMEM;
7144 goto done;
7145 }
7146
7147 if (cid && __l2cap_get_chan_by_dcid(conn, cid)) {
7148 hci_conn_drop(hcon);
7149 err = -EBUSY;
7150 goto done;
7151 }
7152
7153 /* Update source addr of the socket */
7154 bacpy(&chan->src, &hcon->src);
7155 chan->src_type = bdaddr_type(hcon, hcon->src_type);
7156
7157 l2cap_chan_unlock(chan);
7158 l2cap_chan_add(conn, chan);
7159 l2cap_chan_lock(chan);
7160
7161 /* l2cap_chan_add takes its own ref so we can drop this one */
7162 hci_conn_drop(hcon);
7163
7164 l2cap_state_change(chan, BT_CONNECT);
7165 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
7166
7167 /* Release chan->sport so that it can be reused by other
7168 * sockets (as it's only used for listening sockets).
7169 */
7170 write_lock(&chan_list_lock);
7171 chan->sport = 0;
7172 write_unlock(&chan_list_lock);
7173
7174 if (hcon->state == BT_CONNECTED) {
7175 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
7176 __clear_chan_timer(chan);
7177 if (l2cap_chan_check_security(chan))
7178 l2cap_state_change(chan, BT_CONNECTED);
7179 } else
7180 l2cap_do_start(chan);
7181 }
7182
7183 err = 0;
7184
7185 done:
7186 l2cap_chan_unlock(chan);
7187 hci_dev_unlock(hdev);
7188 hci_dev_put(hdev);
7189 return err;
7190 }
7191 EXPORT_SYMBOL_GPL(l2cap_chan_connect);
7192
7193 /* ---- L2CAP interface with lower layer (HCI) ---- */
7194
7195 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
7196 {
7197 int exact = 0, lm1 = 0, lm2 = 0;
7198 struct l2cap_chan *c;
7199
7200 BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
7201
7202 /* Find listening sockets and check their link_mode */
7203 read_lock(&chan_list_lock);
7204 list_for_each_entry(c, &chan_list, global_l) {
7205 if (c->state != BT_LISTEN)
7206 continue;
7207
7208 if (!bacmp(&c->src, &hdev->bdaddr)) {
7209 lm1 |= HCI_LM_ACCEPT;
7210 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
7211 lm1 |= HCI_LM_MASTER;
7212 exact++;
7213 } else if (!bacmp(&c->src, BDADDR_ANY)) {
7214 lm2 |= HCI_LM_ACCEPT;
7215 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
7216 lm2 |= HCI_LM_MASTER;
7217 }
7218 }
7219 read_unlock(&chan_list_lock);
7220
7221 return exact ? lm1 : lm2;
7222 }
7223
7224 void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
7225 {
7226 struct l2cap_conn *conn;
7227
7228 BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
7229
7230 if (!status) {
7231 conn = l2cap_conn_add(hcon);
7232 if (conn)
7233 l2cap_conn_ready(conn);
7234 } else {
7235 l2cap_conn_del(hcon, bt_to_errno(status));
7236 }
7237 }
7238
7239 int l2cap_disconn_ind(struct hci_conn *hcon)
7240 {
7241 struct l2cap_conn *conn = hcon->l2cap_data;
7242
7243 BT_DBG("hcon %p", hcon);
7244
7245 if (!conn)
7246 return HCI_ERROR_REMOTE_USER_TERM;
7247 return conn->disc_reason;
7248 }
7249
7250 void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
7251 {
7252 BT_DBG("hcon %p reason %d", hcon, reason);
7253
7254 l2cap_conn_del(hcon, bt_to_errno(reason));
7255 }
7256
7257 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
7258 {
7259 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
7260 return;
7261
7262 if (encrypt == 0x00) {
7263 if (chan->sec_level == BT_SECURITY_MEDIUM) {
7264 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
7265 } else if (chan->sec_level == BT_SECURITY_HIGH ||
7266 chan->sec_level == BT_SECURITY_FIPS)
7267 l2cap_chan_close(chan, ECONNREFUSED);
7268 } else {
7269 if (chan->sec_level == BT_SECURITY_MEDIUM)
7270 __clear_chan_timer(chan);
7271 }
7272 }
7273
7274 int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
7275 {
7276 struct l2cap_conn *conn = hcon->l2cap_data;
7277 struct l2cap_chan *chan;
7278
7279 if (!conn)
7280 return 0;
7281
7282 BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
7283
7284 if (hcon->type == LE_LINK) {
7285 if (!status && encrypt)
7286 smp_distribute_keys(conn);
7287 cancel_delayed_work(&conn->security_timer);
7288 }
7289
7290 mutex_lock(&conn->chan_lock);
7291
7292 list_for_each_entry(chan, &conn->chan_l, list) {
7293 l2cap_chan_lock(chan);
7294
7295 BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
7296 state_to_string(chan->state));
7297
7298 if (chan->scid == L2CAP_CID_A2MP) {
7299 l2cap_chan_unlock(chan);
7300 continue;
7301 }
7302
7303 if (chan->scid == L2CAP_CID_ATT) {
7304 if (!status && encrypt) {
7305 chan->sec_level = hcon->sec_level;
7306 l2cap_chan_ready(chan);
7307 }
7308
7309 l2cap_chan_unlock(chan);
7310 continue;
7311 }
7312
7313 if (!__l2cap_no_conn_pending(chan)) {
7314 l2cap_chan_unlock(chan);
7315 continue;
7316 }
7317
7318 if (!status && (chan->state == BT_CONNECTED ||
7319 chan->state == BT_CONFIG)) {
7320 chan->ops->resume(chan);
7321 l2cap_check_encryption(chan, encrypt);
7322 l2cap_chan_unlock(chan);
7323 continue;
7324 }
7325
7326 if (chan->state == BT_CONNECT) {
7327 if (!status)
7328 l2cap_start_connection(chan);
7329 else
7330 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
7331 } else if (chan->state == BT_CONNECT2) {
7332 struct l2cap_conn_rsp rsp;
7333 __u16 res, stat;
7334
7335 if (!status) {
7336 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
7337 res = L2CAP_CR_PEND;
7338 stat = L2CAP_CS_AUTHOR_PEND;
7339 chan->ops->defer(chan);
7340 } else {
7341 l2cap_state_change(chan, BT_CONFIG);
7342 res = L2CAP_CR_SUCCESS;
7343 stat = L2CAP_CS_NO_INFO;
7344 }
7345 } else {
7346 l2cap_state_change(chan, BT_DISCONN);
7347 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
7348 res = L2CAP_CR_SEC_BLOCK;
7349 stat = L2CAP_CS_NO_INFO;
7350 }
7351
7352 rsp.scid = cpu_to_le16(chan->dcid);
7353 rsp.dcid = cpu_to_le16(chan->scid);
7354 rsp.result = cpu_to_le16(res);
7355 rsp.status = cpu_to_le16(stat);
7356 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
7357 sizeof(rsp), &rsp);
7358
7359 if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
7360 res == L2CAP_CR_SUCCESS) {
7361 char buf[128];
7362 set_bit(CONF_REQ_SENT, &chan->conf_state);
7363 l2cap_send_cmd(conn, l2cap_get_ident(conn),
7364 L2CAP_CONF_REQ,
7365 l2cap_build_conf_req(chan, buf),
7366 buf);
7367 chan->num_conf_req++;
7368 }
7369 }
7370
7371 l2cap_chan_unlock(chan);
7372 }
7373
7374 mutex_unlock(&conn->chan_lock);
7375
7376 return 0;
7377 }
7378
7379 int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
7380 {
7381 struct l2cap_conn *conn = hcon->l2cap_data;
7382 struct l2cap_hdr *hdr;
7383 int len;
7384
7385 /* For AMP controller do not create l2cap conn */
7386 if (!conn && hcon->hdev->dev_type != HCI_BREDR)
7387 goto drop;
7388
7389 if (!conn)
7390 conn = l2cap_conn_add(hcon);
7391
7392 if (!conn)
7393 goto drop;
7394
7395 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
7396
7397 switch (flags) {
7398 case ACL_START:
7399 case ACL_START_NO_FLUSH:
7400 case ACL_COMPLETE:
7401 if (conn->rx_len) {
7402 BT_ERR("Unexpected start frame (len %d)", skb->len);
7403 kfree_skb(conn->rx_skb);
7404 conn->rx_skb = NULL;
7405 conn->rx_len = 0;
7406 l2cap_conn_unreliable(conn, ECOMM);
7407 }
7408
7409 /* Start fragment always begin with Basic L2CAP header */
7410 if (skb->len < L2CAP_HDR_SIZE) {
7411 BT_ERR("Frame is too short (len %d)", skb->len);
7412 l2cap_conn_unreliable(conn, ECOMM);
7413 goto drop;
7414 }
7415
7416 hdr = (struct l2cap_hdr *) skb->data;
7417 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
7418
7419 if (len == skb->len) {
7420 /* Complete frame received */
7421 l2cap_recv_frame(conn, skb);
7422 return 0;
7423 }
7424
7425 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
7426
7427 if (skb->len > len) {
7428 BT_ERR("Frame is too long (len %d, expected len %d)",
7429 skb->len, len);
7430 l2cap_conn_unreliable(conn, ECOMM);
7431 goto drop;
7432 }
7433
7434 /* Allocate skb for the complete frame (with header) */
7435 conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL);
7436 if (!conn->rx_skb)
7437 goto drop;
7438
7439 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
7440 skb->len);
7441 conn->rx_len = len - skb->len;
7442 break;
7443
7444 case ACL_CONT:
7445 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
7446
7447 if (!conn->rx_len) {
7448 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
7449 l2cap_conn_unreliable(conn, ECOMM);
7450 goto drop;
7451 }
7452
7453 if (skb->len > conn->rx_len) {
7454 BT_ERR("Fragment is too long (len %d, expected %d)",
7455 skb->len, conn->rx_len);
7456 kfree_skb(conn->rx_skb);
7457 conn->rx_skb = NULL;
7458 conn->rx_len = 0;
7459 l2cap_conn_unreliable(conn, ECOMM);
7460 goto drop;
7461 }
7462
7463 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
7464 skb->len);
7465 conn->rx_len -= skb->len;
7466
7467 if (!conn->rx_len) {
7468 /* Complete frame received. l2cap_recv_frame
7469 * takes ownership of the skb so set the global
7470 * rx_skb pointer to NULL first.
7471 */
7472 struct sk_buff *rx_skb = conn->rx_skb;
7473 conn->rx_skb = NULL;
7474 l2cap_recv_frame(conn, rx_skb);
7475 }
7476 break;
7477 }
7478
7479 drop:
7480 kfree_skb(skb);
7481 return 0;
7482 }
7483
7484 static int l2cap_debugfs_show(struct seq_file *f, void *p)
7485 {
7486 struct l2cap_chan *c;
7487
7488 read_lock(&chan_list_lock);
7489
7490 list_for_each_entry(c, &chan_list, global_l) {
7491 seq_printf(f, "%pMR %pMR %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
7492 &c->src, &c->dst,
7493 c->state, __le16_to_cpu(c->psm),
7494 c->scid, c->dcid, c->imtu, c->omtu,
7495 c->sec_level, c->mode);
7496 }
7497
7498 read_unlock(&chan_list_lock);
7499
7500 return 0;
7501 }
7502
7503 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
7504 {
7505 return single_open(file, l2cap_debugfs_show, inode->i_private);
7506 }
7507
7508 static const struct file_operations l2cap_debugfs_fops = {
7509 .open = l2cap_debugfs_open,
7510 .read = seq_read,
7511 .llseek = seq_lseek,
7512 .release = single_release,
7513 };
7514
7515 static struct dentry *l2cap_debugfs;
7516
7517 int __init l2cap_init(void)
7518 {
7519 int err;
7520
7521 err = l2cap_init_sockets();
7522 if (err < 0)
7523 return err;
7524
7525 if (IS_ERR_OR_NULL(bt_debugfs))
7526 return 0;
7527
7528 l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
7529 NULL, &l2cap_debugfs_fops);
7530
7531 debugfs_create_u16("l2cap_le_max_credits", 0644, bt_debugfs,
7532 &le_max_credits);
7533 debugfs_create_u16("l2cap_le_default_mps", 0644, bt_debugfs,
7534 &le_default_mps);
7535
7536 return 0;
7537 }
7538
7539 void l2cap_exit(void)
7540 {
7541 debugfs_remove(l2cap_debugfs);
7542 l2cap_cleanup_sockets();
7543 }
7544
7545 module_param(disable_ertm, bool, 0644);
7546 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
This page took 0.293298 seconds and 5 git commands to generate.