Bluetooth: move l2cap_sock_connect() to l2cap_sock.c
[deliverable/linux.git] / net / bluetooth / l2cap_core.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6
7 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License version 2 as
11 published by the Free Software Foundation;
12
13 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
14 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
16 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
17 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
18 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
19 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
20 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21
22 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
23 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
24 SOFTWARE IS DISCLAIMED.
25 */
26
27 /* Bluetooth L2CAP core. */
28
29 #include <linux/module.h>
30
31 #include <linux/types.h>
32 #include <linux/capability.h>
33 #include <linux/errno.h>
34 #include <linux/kernel.h>
35 #include <linux/sched.h>
36 #include <linux/slab.h>
37 #include <linux/poll.h>
38 #include <linux/fcntl.h>
39 #include <linux/init.h>
40 #include <linux/interrupt.h>
41 #include <linux/socket.h>
42 #include <linux/skbuff.h>
43 #include <linux/list.h>
44 #include <linux/device.h>
45 #include <linux/debugfs.h>
46 #include <linux/seq_file.h>
47 #include <linux/uaccess.h>
48 #include <linux/crc16.h>
49 #include <net/sock.h>
50
51 #include <asm/system.h>
52 #include <asm/unaligned.h>
53
54 #include <net/bluetooth/bluetooth.h>
55 #include <net/bluetooth/hci_core.h>
56 #include <net/bluetooth/l2cap.h>
57
58 #define VERSION "2.15"
59
60 int disable_ertm;
61
62 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
63 static u8 l2cap_fixed_chan[8] = { 0x02, };
64
65 static struct workqueue_struct *_busy_wq;
66
67 struct bt_sock_list l2cap_sk_list = {
68 .lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock)
69 };
70
71 static void l2cap_busy_work(struct work_struct *work);
72
73 static void l2cap_sock_close(struct sock *sk);
74
75 static int l2cap_build_conf_req(struct sock *sk, void *data);
76 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
77 u8 code, u8 ident, u16 dlen, void *data);
78
79 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb);
80
81 /* ---- L2CAP timers ---- */
82 void l2cap_sock_set_timer(struct sock *sk, long timeout)
83 {
84 BT_DBG("sk %p state %d timeout %ld", sk, sk->sk_state, timeout);
85 sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout);
86 }
87
88 static void l2cap_sock_clear_timer(struct sock *sk)
89 {
90 BT_DBG("sock %p state %d", sk, sk->sk_state);
91 sk_stop_timer(sk, &sk->sk_timer);
92 }
93
94 /* ---- L2CAP channels ---- */
95 static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid)
96 {
97 struct sock *s;
98 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
99 if (l2cap_pi(s)->dcid == cid)
100 break;
101 }
102 return s;
103 }
104
105 static struct sock *__l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
106 {
107 struct sock *s;
108 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
109 if (l2cap_pi(s)->scid == cid)
110 break;
111 }
112 return s;
113 }
114
115 /* Find channel with given SCID.
116 * Returns locked socket */
117 static inline struct sock *l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
118 {
119 struct sock *s;
120 read_lock(&l->lock);
121 s = __l2cap_get_chan_by_scid(l, cid);
122 if (s)
123 bh_lock_sock(s);
124 read_unlock(&l->lock);
125 return s;
126 }
127
128 static struct sock *__l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
129 {
130 struct sock *s;
131 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
132 if (l2cap_pi(s)->ident == ident)
133 break;
134 }
135 return s;
136 }
137
138 static inline struct sock *l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
139 {
140 struct sock *s;
141 read_lock(&l->lock);
142 s = __l2cap_get_chan_by_ident(l, ident);
143 if (s)
144 bh_lock_sock(s);
145 read_unlock(&l->lock);
146 return s;
147 }
148
149 static u16 l2cap_alloc_cid(struct l2cap_chan_list *l)
150 {
151 u16 cid = L2CAP_CID_DYN_START;
152
153 for (; cid < L2CAP_CID_DYN_END; cid++) {
154 if (!__l2cap_get_chan_by_scid(l, cid))
155 return cid;
156 }
157
158 return 0;
159 }
160
161 static inline void __l2cap_chan_link(struct l2cap_chan_list *l, struct sock *sk)
162 {
163 sock_hold(sk);
164
165 if (l->head)
166 l2cap_pi(l->head)->prev_c = sk;
167
168 l2cap_pi(sk)->next_c = l->head;
169 l2cap_pi(sk)->prev_c = NULL;
170 l->head = sk;
171 }
172
173 static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk)
174 {
175 struct sock *next = l2cap_pi(sk)->next_c, *prev = l2cap_pi(sk)->prev_c;
176
177 write_lock_bh(&l->lock);
178 if (sk == l->head)
179 l->head = next;
180
181 if (next)
182 l2cap_pi(next)->prev_c = prev;
183 if (prev)
184 l2cap_pi(prev)->next_c = next;
185 write_unlock_bh(&l->lock);
186
187 __sock_put(sk);
188 }
189
190 static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
191 {
192 struct l2cap_chan_list *l = &conn->chan_list;
193
194 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
195 l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
196
197 conn->disc_reason = 0x13;
198
199 l2cap_pi(sk)->conn = conn;
200
201 if (sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM) {
202 /* Alloc CID for connection-oriented socket */
203 l2cap_pi(sk)->scid = l2cap_alloc_cid(l);
204 } else if (sk->sk_type == SOCK_DGRAM) {
205 /* Connectionless socket */
206 l2cap_pi(sk)->scid = L2CAP_CID_CONN_LESS;
207 l2cap_pi(sk)->dcid = L2CAP_CID_CONN_LESS;
208 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
209 } else {
210 /* Raw socket can send/recv signalling messages only */
211 l2cap_pi(sk)->scid = L2CAP_CID_SIGNALING;
212 l2cap_pi(sk)->dcid = L2CAP_CID_SIGNALING;
213 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
214 }
215
216 __l2cap_chan_link(l, sk);
217
218 if (parent)
219 bt_accept_enqueue(parent, sk);
220 }
221
222 /* Delete channel.
223 * Must be called on the locked socket. */
224 static void l2cap_chan_del(struct sock *sk, int err)
225 {
226 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
227 struct sock *parent = bt_sk(sk)->parent;
228
229 l2cap_sock_clear_timer(sk);
230
231 BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
232
233 if (conn) {
234 /* Unlink from channel list */
235 l2cap_chan_unlink(&conn->chan_list, sk);
236 l2cap_pi(sk)->conn = NULL;
237 hci_conn_put(conn->hcon);
238 }
239
240 sk->sk_state = BT_CLOSED;
241 sock_set_flag(sk, SOCK_ZAPPED);
242
243 if (err)
244 sk->sk_err = err;
245
246 if (parent) {
247 bt_accept_unlink(sk);
248 parent->sk_data_ready(parent, 0);
249 } else
250 sk->sk_state_change(sk);
251
252 skb_queue_purge(TX_QUEUE(sk));
253
254 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
255 struct srej_list *l, *tmp;
256
257 del_timer(&l2cap_pi(sk)->retrans_timer);
258 del_timer(&l2cap_pi(sk)->monitor_timer);
259 del_timer(&l2cap_pi(sk)->ack_timer);
260
261 skb_queue_purge(SREJ_QUEUE(sk));
262 skb_queue_purge(BUSY_QUEUE(sk));
263
264 list_for_each_entry_safe(l, tmp, SREJ_LIST(sk), list) {
265 list_del(&l->list);
266 kfree(l);
267 }
268 }
269 }
270
271 static inline u8 l2cap_get_auth_type(struct sock *sk)
272 {
273 if (sk->sk_type == SOCK_RAW) {
274 switch (l2cap_pi(sk)->sec_level) {
275 case BT_SECURITY_HIGH:
276 return HCI_AT_DEDICATED_BONDING_MITM;
277 case BT_SECURITY_MEDIUM:
278 return HCI_AT_DEDICATED_BONDING;
279 default:
280 return HCI_AT_NO_BONDING;
281 }
282 } else if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
283 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
284 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
285
286 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
287 return HCI_AT_NO_BONDING_MITM;
288 else
289 return HCI_AT_NO_BONDING;
290 } else {
291 switch (l2cap_pi(sk)->sec_level) {
292 case BT_SECURITY_HIGH:
293 return HCI_AT_GENERAL_BONDING_MITM;
294 case BT_SECURITY_MEDIUM:
295 return HCI_AT_GENERAL_BONDING;
296 default:
297 return HCI_AT_NO_BONDING;
298 }
299 }
300 }
301
302 /* Service level security */
303 static inline int l2cap_check_security(struct sock *sk)
304 {
305 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
306 __u8 auth_type;
307
308 auth_type = l2cap_get_auth_type(sk);
309
310 return hci_conn_security(conn->hcon, l2cap_pi(sk)->sec_level,
311 auth_type);
312 }
313
314 static inline u8 l2cap_get_ident(struct l2cap_conn *conn)
315 {
316 u8 id;
317
318 /* Get next available identificator.
319 * 1 - 128 are used by kernel.
320 * 129 - 199 are reserved.
321 * 200 - 254 are used by utilities like l2ping, etc.
322 */
323
324 spin_lock_bh(&conn->lock);
325
326 if (++conn->tx_ident > 128)
327 conn->tx_ident = 1;
328
329 id = conn->tx_ident;
330
331 spin_unlock_bh(&conn->lock);
332
333 return id;
334 }
335
336 static inline void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
337 {
338 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
339 u8 flags;
340
341 BT_DBG("code 0x%2.2x", code);
342
343 if (!skb)
344 return;
345
346 if (lmp_no_flush_capable(conn->hcon->hdev))
347 flags = ACL_START_NO_FLUSH;
348 else
349 flags = ACL_START;
350
351 hci_send_acl(conn->hcon, skb, flags);
352 }
353
354 static inline void l2cap_send_sframe(struct l2cap_pinfo *pi, u16 control)
355 {
356 struct sk_buff *skb;
357 struct l2cap_hdr *lh;
358 struct l2cap_conn *conn = pi->conn;
359 struct sock *sk = (struct sock *)pi;
360 int count, hlen = L2CAP_HDR_SIZE + 2;
361 u8 flags;
362
363 if (sk->sk_state != BT_CONNECTED)
364 return;
365
366 if (pi->fcs == L2CAP_FCS_CRC16)
367 hlen += 2;
368
369 BT_DBG("pi %p, control 0x%2.2x", pi, control);
370
371 count = min_t(unsigned int, conn->mtu, hlen);
372 control |= L2CAP_CTRL_FRAME_TYPE;
373
374 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
375 control |= L2CAP_CTRL_FINAL;
376 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
377 }
378
379 if (pi->conn_state & L2CAP_CONN_SEND_PBIT) {
380 control |= L2CAP_CTRL_POLL;
381 pi->conn_state &= ~L2CAP_CONN_SEND_PBIT;
382 }
383
384 skb = bt_skb_alloc(count, GFP_ATOMIC);
385 if (!skb)
386 return;
387
388 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
389 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
390 lh->cid = cpu_to_le16(pi->dcid);
391 put_unaligned_le16(control, skb_put(skb, 2));
392
393 if (pi->fcs == L2CAP_FCS_CRC16) {
394 u16 fcs = crc16(0, (u8 *)lh, count - 2);
395 put_unaligned_le16(fcs, skb_put(skb, 2));
396 }
397
398 if (lmp_no_flush_capable(conn->hcon->hdev))
399 flags = ACL_START_NO_FLUSH;
400 else
401 flags = ACL_START;
402
403 hci_send_acl(pi->conn->hcon, skb, flags);
404 }
405
406 static inline void l2cap_send_rr_or_rnr(struct l2cap_pinfo *pi, u16 control)
407 {
408 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
409 control |= L2CAP_SUPER_RCV_NOT_READY;
410 pi->conn_state |= L2CAP_CONN_RNR_SENT;
411 } else
412 control |= L2CAP_SUPER_RCV_READY;
413
414 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
415
416 l2cap_send_sframe(pi, control);
417 }
418
419 static inline int __l2cap_no_conn_pending(struct sock *sk)
420 {
421 return !(l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND);
422 }
423
424 static void l2cap_do_start(struct sock *sk)
425 {
426 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
427
428 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
429 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
430 return;
431
432 if (l2cap_check_security(sk) && __l2cap_no_conn_pending(sk)) {
433 struct l2cap_conn_req req;
434 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
435 req.psm = l2cap_pi(sk)->psm;
436
437 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
438 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
439
440 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
441 L2CAP_CONN_REQ, sizeof(req), &req);
442 }
443 } else {
444 struct l2cap_info_req req;
445 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
446
447 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
448 conn->info_ident = l2cap_get_ident(conn);
449
450 mod_timer(&conn->info_timer, jiffies +
451 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
452
453 l2cap_send_cmd(conn, conn->info_ident,
454 L2CAP_INFO_REQ, sizeof(req), &req);
455 }
456 }
457
458 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
459 {
460 u32 local_feat_mask = l2cap_feat_mask;
461 if (!disable_ertm)
462 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
463
464 switch (mode) {
465 case L2CAP_MODE_ERTM:
466 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
467 case L2CAP_MODE_STREAMING:
468 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
469 default:
470 return 0x00;
471 }
472 }
473
474 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct sock *sk, int err)
475 {
476 struct l2cap_disconn_req req;
477
478 if (!conn)
479 return;
480
481 skb_queue_purge(TX_QUEUE(sk));
482
483 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
484 del_timer(&l2cap_pi(sk)->retrans_timer);
485 del_timer(&l2cap_pi(sk)->monitor_timer);
486 del_timer(&l2cap_pi(sk)->ack_timer);
487 }
488
489 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
490 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
491 l2cap_send_cmd(conn, l2cap_get_ident(conn),
492 L2CAP_DISCONN_REQ, sizeof(req), &req);
493
494 sk->sk_state = BT_DISCONN;
495 sk->sk_err = err;
496 }
497
498 /* ---- L2CAP connections ---- */
499 static void l2cap_conn_start(struct l2cap_conn *conn)
500 {
501 struct l2cap_chan_list *l = &conn->chan_list;
502 struct sock_del_list del, *tmp1, *tmp2;
503 struct sock *sk;
504
505 BT_DBG("conn %p", conn);
506
507 INIT_LIST_HEAD(&del.list);
508
509 read_lock(&l->lock);
510
511 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
512 bh_lock_sock(sk);
513
514 if (sk->sk_type != SOCK_SEQPACKET &&
515 sk->sk_type != SOCK_STREAM) {
516 bh_unlock_sock(sk);
517 continue;
518 }
519
520 if (sk->sk_state == BT_CONNECT) {
521 struct l2cap_conn_req req;
522
523 if (!l2cap_check_security(sk) ||
524 !__l2cap_no_conn_pending(sk)) {
525 bh_unlock_sock(sk);
526 continue;
527 }
528
529 if (!l2cap_mode_supported(l2cap_pi(sk)->mode,
530 conn->feat_mask)
531 && l2cap_pi(sk)->conf_state &
532 L2CAP_CONF_STATE2_DEVICE) {
533 tmp1 = kzalloc(sizeof(struct sock_del_list),
534 GFP_ATOMIC);
535 tmp1->sk = sk;
536 list_add_tail(&tmp1->list, &del.list);
537 bh_unlock_sock(sk);
538 continue;
539 }
540
541 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
542 req.psm = l2cap_pi(sk)->psm;
543
544 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
545 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
546
547 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
548 L2CAP_CONN_REQ, sizeof(req), &req);
549
550 } else if (sk->sk_state == BT_CONNECT2) {
551 struct l2cap_conn_rsp rsp;
552 char buf[128];
553 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
554 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
555
556 if (l2cap_check_security(sk)) {
557 if (bt_sk(sk)->defer_setup) {
558 struct sock *parent = bt_sk(sk)->parent;
559 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
560 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
561 parent->sk_data_ready(parent, 0);
562
563 } else {
564 sk->sk_state = BT_CONFIG;
565 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
566 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
567 }
568 } else {
569 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
570 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
571 }
572
573 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
574 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
575
576 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT ||
577 rsp.result != L2CAP_CR_SUCCESS) {
578 bh_unlock_sock(sk);
579 continue;
580 }
581
582 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
583 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
584 l2cap_build_conf_req(sk, buf), buf);
585 l2cap_pi(sk)->num_conf_req++;
586 }
587
588 bh_unlock_sock(sk);
589 }
590
591 read_unlock(&l->lock);
592
593 list_for_each_entry_safe(tmp1, tmp2, &del.list, list) {
594 bh_lock_sock(tmp1->sk);
595 __l2cap_sock_close(tmp1->sk, ECONNRESET);
596 bh_unlock_sock(tmp1->sk);
597 list_del(&tmp1->list);
598 kfree(tmp1);
599 }
600 }
601
602 static void l2cap_conn_ready(struct l2cap_conn *conn)
603 {
604 struct l2cap_chan_list *l = &conn->chan_list;
605 struct sock *sk;
606
607 BT_DBG("conn %p", conn);
608
609 read_lock(&l->lock);
610
611 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
612 bh_lock_sock(sk);
613
614 if (sk->sk_type != SOCK_SEQPACKET &&
615 sk->sk_type != SOCK_STREAM) {
616 l2cap_sock_clear_timer(sk);
617 sk->sk_state = BT_CONNECTED;
618 sk->sk_state_change(sk);
619 } else if (sk->sk_state == BT_CONNECT)
620 l2cap_do_start(sk);
621
622 bh_unlock_sock(sk);
623 }
624
625 read_unlock(&l->lock);
626 }
627
628 /* Notify sockets that we cannot guaranty reliability anymore */
629 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
630 {
631 struct l2cap_chan_list *l = &conn->chan_list;
632 struct sock *sk;
633
634 BT_DBG("conn %p", conn);
635
636 read_lock(&l->lock);
637
638 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
639 if (l2cap_pi(sk)->force_reliable)
640 sk->sk_err = err;
641 }
642
643 read_unlock(&l->lock);
644 }
645
646 static void l2cap_info_timeout(unsigned long arg)
647 {
648 struct l2cap_conn *conn = (void *) arg;
649
650 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
651 conn->info_ident = 0;
652
653 l2cap_conn_start(conn);
654 }
655
656 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
657 {
658 struct l2cap_conn *conn = hcon->l2cap_data;
659
660 if (conn || status)
661 return conn;
662
663 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
664 if (!conn)
665 return NULL;
666
667 hcon->l2cap_data = conn;
668 conn->hcon = hcon;
669
670 BT_DBG("hcon %p conn %p", hcon, conn);
671
672 conn->mtu = hcon->hdev->acl_mtu;
673 conn->src = &hcon->hdev->bdaddr;
674 conn->dst = &hcon->dst;
675
676 conn->feat_mask = 0;
677
678 spin_lock_init(&conn->lock);
679 rwlock_init(&conn->chan_list.lock);
680
681 setup_timer(&conn->info_timer, l2cap_info_timeout,
682 (unsigned long) conn);
683
684 conn->disc_reason = 0x13;
685
686 return conn;
687 }
688
689 static void l2cap_conn_del(struct hci_conn *hcon, int err)
690 {
691 struct l2cap_conn *conn = hcon->l2cap_data;
692 struct sock *sk;
693
694 if (!conn)
695 return;
696
697 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
698
699 kfree_skb(conn->rx_skb);
700
701 /* Kill channels */
702 while ((sk = conn->chan_list.head)) {
703 bh_lock_sock(sk);
704 l2cap_chan_del(sk, err);
705 bh_unlock_sock(sk);
706 l2cap_sock_kill(sk);
707 }
708
709 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
710 del_timer_sync(&conn->info_timer);
711
712 hcon->l2cap_data = NULL;
713 kfree(conn);
714 }
715
716 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
717 {
718 struct l2cap_chan_list *l = &conn->chan_list;
719 write_lock_bh(&l->lock);
720 __l2cap_chan_add(conn, sk, parent);
721 write_unlock_bh(&l->lock);
722 }
723
724 /* ---- Socket interface ---- */
725
726 /* Find socket with psm and source bdaddr.
727 * Returns closest match.
728 */
729 static struct sock *l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
730 {
731 struct sock *sk = NULL, *sk1 = NULL;
732 struct hlist_node *node;
733
734 read_lock(&l2cap_sk_list.lock);
735
736 sk_for_each(sk, node, &l2cap_sk_list.head) {
737 if (state && sk->sk_state != state)
738 continue;
739
740 if (l2cap_pi(sk)->psm == psm) {
741 /* Exact match. */
742 if (!bacmp(&bt_sk(sk)->src, src))
743 break;
744
745 /* Closest match */
746 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
747 sk1 = sk;
748 }
749 }
750
751 read_unlock(&l2cap_sk_list.lock);
752
753 return node ? sk : sk1;
754 }
755
756 static void l2cap_sock_cleanup_listen(struct sock *parent)
757 {
758 struct sock *sk;
759
760 BT_DBG("parent %p", parent);
761
762 /* Close not yet accepted channels */
763 while ((sk = bt_accept_dequeue(parent, NULL)))
764 l2cap_sock_close(sk);
765
766 parent->sk_state = BT_CLOSED;
767 sock_set_flag(parent, SOCK_ZAPPED);
768 }
769
770 /* Kill socket (only if zapped and orphan)
771 * Must be called on unlocked socket.
772 */
773 void l2cap_sock_kill(struct sock *sk)
774 {
775 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
776 return;
777
778 BT_DBG("sk %p state %d", sk, sk->sk_state);
779
780 /* Kill poor orphan */
781 bt_sock_unlink(&l2cap_sk_list, sk);
782 sock_set_flag(sk, SOCK_DEAD);
783 sock_put(sk);
784 }
785
786 void __l2cap_sock_close(struct sock *sk, int reason)
787 {
788 BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
789
790 switch (sk->sk_state) {
791 case BT_LISTEN:
792 l2cap_sock_cleanup_listen(sk);
793 break;
794
795 case BT_CONNECTED:
796 case BT_CONFIG:
797 if (sk->sk_type == SOCK_SEQPACKET ||
798 sk->sk_type == SOCK_STREAM) {
799 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
800
801 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
802 l2cap_send_disconn_req(conn, sk, reason);
803 } else
804 l2cap_chan_del(sk, reason);
805 break;
806
807 case BT_CONNECT2:
808 if (sk->sk_type == SOCK_SEQPACKET ||
809 sk->sk_type == SOCK_STREAM) {
810 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
811 struct l2cap_conn_rsp rsp;
812 __u16 result;
813
814 if (bt_sk(sk)->defer_setup)
815 result = L2CAP_CR_SEC_BLOCK;
816 else
817 result = L2CAP_CR_BAD_PSM;
818 sk->sk_state = BT_DISCONN;
819
820 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
821 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
822 rsp.result = cpu_to_le16(result);
823 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
824 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
825 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
826 } else
827 l2cap_chan_del(sk, reason);
828 break;
829
830 case BT_CONNECT:
831 case BT_DISCONN:
832 l2cap_chan_del(sk, reason);
833 break;
834
835 default:
836 sock_set_flag(sk, SOCK_ZAPPED);
837 break;
838 }
839 }
840
841 /* Must be called on unlocked socket. */
842 static void l2cap_sock_close(struct sock *sk)
843 {
844 l2cap_sock_clear_timer(sk);
845 lock_sock(sk);
846 __l2cap_sock_close(sk, ECONNRESET);
847 release_sock(sk);
848 l2cap_sock_kill(sk);
849 }
850
851 int l2cap_do_connect(struct sock *sk)
852 {
853 bdaddr_t *src = &bt_sk(sk)->src;
854 bdaddr_t *dst = &bt_sk(sk)->dst;
855 struct l2cap_conn *conn;
856 struct hci_conn *hcon;
857 struct hci_dev *hdev;
858 __u8 auth_type;
859 int err;
860
861 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
862 l2cap_pi(sk)->psm);
863
864 hdev = hci_get_route(dst, src);
865 if (!hdev)
866 return -EHOSTUNREACH;
867
868 hci_dev_lock_bh(hdev);
869
870 err = -ENOMEM;
871
872 auth_type = l2cap_get_auth_type(sk);
873
874 hcon = hci_connect(hdev, ACL_LINK, dst,
875 l2cap_pi(sk)->sec_level, auth_type);
876 if (!hcon)
877 goto done;
878
879 conn = l2cap_conn_add(hcon, 0);
880 if (!conn) {
881 hci_conn_put(hcon);
882 goto done;
883 }
884
885 err = 0;
886
887 /* Update source addr of the socket */
888 bacpy(src, conn->src);
889
890 l2cap_chan_add(conn, sk, NULL);
891
892 sk->sk_state = BT_CONNECT;
893 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
894
895 if (hcon->state == BT_CONNECTED) {
896 if (sk->sk_type != SOCK_SEQPACKET &&
897 sk->sk_type != SOCK_STREAM) {
898 l2cap_sock_clear_timer(sk);
899 if (l2cap_check_security(sk))
900 sk->sk_state = BT_CONNECTED;
901 } else
902 l2cap_do_start(sk);
903 }
904
905 done:
906 hci_dev_unlock_bh(hdev);
907 hci_dev_put(hdev);
908 return err;
909 }
910
911 static int __l2cap_wait_ack(struct sock *sk)
912 {
913 DECLARE_WAITQUEUE(wait, current);
914 int err = 0;
915 int timeo = HZ/5;
916
917 add_wait_queue(sk_sleep(sk), &wait);
918 while ((l2cap_pi(sk)->unacked_frames > 0 && l2cap_pi(sk)->conn)) {
919 set_current_state(TASK_INTERRUPTIBLE);
920
921 if (!timeo)
922 timeo = HZ/5;
923
924 if (signal_pending(current)) {
925 err = sock_intr_errno(timeo);
926 break;
927 }
928
929 release_sock(sk);
930 timeo = schedule_timeout(timeo);
931 lock_sock(sk);
932
933 err = sock_error(sk);
934 if (err)
935 break;
936 }
937 set_current_state(TASK_RUNNING);
938 remove_wait_queue(sk_sleep(sk), &wait);
939 return err;
940 }
941
942 static void l2cap_monitor_timeout(unsigned long arg)
943 {
944 struct sock *sk = (void *) arg;
945
946 BT_DBG("sk %p", sk);
947
948 bh_lock_sock(sk);
949 if (l2cap_pi(sk)->retry_count >= l2cap_pi(sk)->remote_max_tx) {
950 l2cap_send_disconn_req(l2cap_pi(sk)->conn, sk, ECONNABORTED);
951 bh_unlock_sock(sk);
952 return;
953 }
954
955 l2cap_pi(sk)->retry_count++;
956 __mod_monitor_timer();
957
958 l2cap_send_rr_or_rnr(l2cap_pi(sk), L2CAP_CTRL_POLL);
959 bh_unlock_sock(sk);
960 }
961
962 static void l2cap_retrans_timeout(unsigned long arg)
963 {
964 struct sock *sk = (void *) arg;
965
966 BT_DBG("sk %p", sk);
967
968 bh_lock_sock(sk);
969 l2cap_pi(sk)->retry_count = 1;
970 __mod_monitor_timer();
971
972 l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F;
973
974 l2cap_send_rr_or_rnr(l2cap_pi(sk), L2CAP_CTRL_POLL);
975 bh_unlock_sock(sk);
976 }
977
978 static void l2cap_drop_acked_frames(struct sock *sk)
979 {
980 struct sk_buff *skb;
981
982 while ((skb = skb_peek(TX_QUEUE(sk))) &&
983 l2cap_pi(sk)->unacked_frames) {
984 if (bt_cb(skb)->tx_seq == l2cap_pi(sk)->expected_ack_seq)
985 break;
986
987 skb = skb_dequeue(TX_QUEUE(sk));
988 kfree_skb(skb);
989
990 l2cap_pi(sk)->unacked_frames--;
991 }
992
993 if (!l2cap_pi(sk)->unacked_frames)
994 del_timer(&l2cap_pi(sk)->retrans_timer);
995 }
996
997 static inline void l2cap_do_send(struct sock *sk, struct sk_buff *skb)
998 {
999 struct l2cap_pinfo *pi = l2cap_pi(sk);
1000 struct hci_conn *hcon = pi->conn->hcon;
1001 u16 flags;
1002
1003 BT_DBG("sk %p, skb %p len %d", sk, skb, skb->len);
1004
1005 if (!pi->flushable && lmp_no_flush_capable(hcon->hdev))
1006 flags = ACL_START_NO_FLUSH;
1007 else
1008 flags = ACL_START;
1009
1010 hci_send_acl(hcon, skb, flags);
1011 }
1012
1013 static void l2cap_streaming_send(struct sock *sk)
1014 {
1015 struct sk_buff *skb;
1016 struct l2cap_pinfo *pi = l2cap_pi(sk);
1017 u16 control, fcs;
1018
1019 while ((skb = skb_dequeue(TX_QUEUE(sk)))) {
1020 control = get_unaligned_le16(skb->data + L2CAP_HDR_SIZE);
1021 control |= pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT;
1022 put_unaligned_le16(control, skb->data + L2CAP_HDR_SIZE);
1023
1024 if (pi->fcs == L2CAP_FCS_CRC16) {
1025 fcs = crc16(0, (u8 *)skb->data, skb->len - 2);
1026 put_unaligned_le16(fcs, skb->data + skb->len - 2);
1027 }
1028
1029 l2cap_do_send(sk, skb);
1030
1031 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1032 }
1033 }
1034
1035 static void l2cap_retransmit_one_frame(struct sock *sk, u8 tx_seq)
1036 {
1037 struct l2cap_pinfo *pi = l2cap_pi(sk);
1038 struct sk_buff *skb, *tx_skb;
1039 u16 control, fcs;
1040
1041 skb = skb_peek(TX_QUEUE(sk));
1042 if (!skb)
1043 return;
1044
1045 do {
1046 if (bt_cb(skb)->tx_seq == tx_seq)
1047 break;
1048
1049 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1050 return;
1051
1052 } while ((skb = skb_queue_next(TX_QUEUE(sk), skb)));
1053
1054 if (pi->remote_max_tx &&
1055 bt_cb(skb)->retries == pi->remote_max_tx) {
1056 l2cap_send_disconn_req(pi->conn, sk, ECONNABORTED);
1057 return;
1058 }
1059
1060 tx_skb = skb_clone(skb, GFP_ATOMIC);
1061 bt_cb(skb)->retries++;
1062 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1063
1064 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
1065 control |= L2CAP_CTRL_FINAL;
1066 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1067 }
1068
1069 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1070 | (tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1071
1072 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1073
1074 if (pi->fcs == L2CAP_FCS_CRC16) {
1075 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1076 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1077 }
1078
1079 l2cap_do_send(sk, tx_skb);
1080 }
1081
1082 static int l2cap_ertm_send(struct sock *sk)
1083 {
1084 struct sk_buff *skb, *tx_skb;
1085 struct l2cap_pinfo *pi = l2cap_pi(sk);
1086 u16 control, fcs;
1087 int nsent = 0;
1088
1089 if (sk->sk_state != BT_CONNECTED)
1090 return -ENOTCONN;
1091
1092 while ((skb = sk->sk_send_head) && (!l2cap_tx_window_full(sk))) {
1093
1094 if (pi->remote_max_tx &&
1095 bt_cb(skb)->retries == pi->remote_max_tx) {
1096 l2cap_send_disconn_req(pi->conn, sk, ECONNABORTED);
1097 break;
1098 }
1099
1100 tx_skb = skb_clone(skb, GFP_ATOMIC);
1101
1102 bt_cb(skb)->retries++;
1103
1104 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1105 control &= L2CAP_CTRL_SAR;
1106
1107 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
1108 control |= L2CAP_CTRL_FINAL;
1109 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1110 }
1111 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1112 | (pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1113 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1114
1115
1116 if (pi->fcs == L2CAP_FCS_CRC16) {
1117 fcs = crc16(0, (u8 *)skb->data, tx_skb->len - 2);
1118 put_unaligned_le16(fcs, skb->data + tx_skb->len - 2);
1119 }
1120
1121 l2cap_do_send(sk, tx_skb);
1122
1123 __mod_retrans_timer();
1124
1125 bt_cb(skb)->tx_seq = pi->next_tx_seq;
1126 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1127
1128 pi->unacked_frames++;
1129 pi->frames_sent++;
1130
1131 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1132 sk->sk_send_head = NULL;
1133 else
1134 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1135
1136 nsent++;
1137 }
1138
1139 return nsent;
1140 }
1141
1142 static int l2cap_retransmit_frames(struct sock *sk)
1143 {
1144 struct l2cap_pinfo *pi = l2cap_pi(sk);
1145 int ret;
1146
1147 if (!skb_queue_empty(TX_QUEUE(sk)))
1148 sk->sk_send_head = TX_QUEUE(sk)->next;
1149
1150 pi->next_tx_seq = pi->expected_ack_seq;
1151 ret = l2cap_ertm_send(sk);
1152 return ret;
1153 }
1154
1155 static void l2cap_send_ack(struct l2cap_pinfo *pi)
1156 {
1157 struct sock *sk = (struct sock *)pi;
1158 u16 control = 0;
1159
1160 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1161
1162 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
1163 control |= L2CAP_SUPER_RCV_NOT_READY;
1164 pi->conn_state |= L2CAP_CONN_RNR_SENT;
1165 l2cap_send_sframe(pi, control);
1166 return;
1167 }
1168
1169 if (l2cap_ertm_send(sk) > 0)
1170 return;
1171
1172 control |= L2CAP_SUPER_RCV_READY;
1173 l2cap_send_sframe(pi, control);
1174 }
1175
1176 static void l2cap_send_srejtail(struct sock *sk)
1177 {
1178 struct srej_list *tail;
1179 u16 control;
1180
1181 control = L2CAP_SUPER_SELECT_REJECT;
1182 control |= L2CAP_CTRL_FINAL;
1183
1184 tail = list_entry(SREJ_LIST(sk)->prev, struct srej_list, list);
1185 control |= tail->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1186
1187 l2cap_send_sframe(l2cap_pi(sk), control);
1188 }
1189
1190 static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1191 {
1192 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1193 struct sk_buff **frag;
1194 int err, sent = 0;
1195
1196 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1197 return -EFAULT;
1198
1199 sent += count;
1200 len -= count;
1201
1202 /* Continuation fragments (no L2CAP header) */
1203 frag = &skb_shinfo(skb)->frag_list;
1204 while (len) {
1205 count = min_t(unsigned int, conn->mtu, len);
1206
1207 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1208 if (!*frag)
1209 return err;
1210 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1211 return -EFAULT;
1212
1213 sent += count;
1214 len -= count;
1215
1216 frag = &(*frag)->next;
1217 }
1218
1219 return sent;
1220 }
1221
1222 static struct sk_buff *l2cap_create_connless_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1223 {
1224 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1225 struct sk_buff *skb;
1226 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1227 struct l2cap_hdr *lh;
1228
1229 BT_DBG("sk %p len %d", sk, (int)len);
1230
1231 count = min_t(unsigned int, (conn->mtu - hlen), len);
1232 skb = bt_skb_send_alloc(sk, count + hlen,
1233 msg->msg_flags & MSG_DONTWAIT, &err);
1234 if (!skb)
1235 return ERR_PTR(err);
1236
1237 /* Create L2CAP header */
1238 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1239 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1240 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1241 put_unaligned_le16(l2cap_pi(sk)->psm, skb_put(skb, 2));
1242
1243 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1244 if (unlikely(err < 0)) {
1245 kfree_skb(skb);
1246 return ERR_PTR(err);
1247 }
1248 return skb;
1249 }
1250
1251 static struct sk_buff *l2cap_create_basic_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1252 {
1253 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1254 struct sk_buff *skb;
1255 int err, count, hlen = L2CAP_HDR_SIZE;
1256 struct l2cap_hdr *lh;
1257
1258 BT_DBG("sk %p len %d", sk, (int)len);
1259
1260 count = min_t(unsigned int, (conn->mtu - hlen), len);
1261 skb = bt_skb_send_alloc(sk, count + hlen,
1262 msg->msg_flags & MSG_DONTWAIT, &err);
1263 if (!skb)
1264 return ERR_PTR(err);
1265
1266 /* Create L2CAP header */
1267 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1268 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1269 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1270
1271 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1272 if (unlikely(err < 0)) {
1273 kfree_skb(skb);
1274 return ERR_PTR(err);
1275 }
1276 return skb;
1277 }
1278
1279 static struct sk_buff *l2cap_create_iframe_pdu(struct sock *sk, struct msghdr *msg, size_t len, u16 control, u16 sdulen)
1280 {
1281 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1282 struct sk_buff *skb;
1283 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1284 struct l2cap_hdr *lh;
1285
1286 BT_DBG("sk %p len %d", sk, (int)len);
1287
1288 if (!conn)
1289 return ERR_PTR(-ENOTCONN);
1290
1291 if (sdulen)
1292 hlen += 2;
1293
1294 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1295 hlen += 2;
1296
1297 count = min_t(unsigned int, (conn->mtu - hlen), len);
1298 skb = bt_skb_send_alloc(sk, count + hlen,
1299 msg->msg_flags & MSG_DONTWAIT, &err);
1300 if (!skb)
1301 return ERR_PTR(err);
1302
1303 /* Create L2CAP header */
1304 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1305 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1306 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1307 put_unaligned_le16(control, skb_put(skb, 2));
1308 if (sdulen)
1309 put_unaligned_le16(sdulen, skb_put(skb, 2));
1310
1311 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1312 if (unlikely(err < 0)) {
1313 kfree_skb(skb);
1314 return ERR_PTR(err);
1315 }
1316
1317 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1318 put_unaligned_le16(0, skb_put(skb, 2));
1319
1320 bt_cb(skb)->retries = 0;
1321 return skb;
1322 }
1323
1324 static inline int l2cap_sar_segment_sdu(struct sock *sk, struct msghdr *msg, size_t len)
1325 {
1326 struct l2cap_pinfo *pi = l2cap_pi(sk);
1327 struct sk_buff *skb;
1328 struct sk_buff_head sar_queue;
1329 u16 control;
1330 size_t size = 0;
1331
1332 skb_queue_head_init(&sar_queue);
1333 control = L2CAP_SDU_START;
1334 skb = l2cap_create_iframe_pdu(sk, msg, pi->remote_mps, control, len);
1335 if (IS_ERR(skb))
1336 return PTR_ERR(skb);
1337
1338 __skb_queue_tail(&sar_queue, skb);
1339 len -= pi->remote_mps;
1340 size += pi->remote_mps;
1341
1342 while (len > 0) {
1343 size_t buflen;
1344
1345 if (len > pi->remote_mps) {
1346 control = L2CAP_SDU_CONTINUE;
1347 buflen = pi->remote_mps;
1348 } else {
1349 control = L2CAP_SDU_END;
1350 buflen = len;
1351 }
1352
1353 skb = l2cap_create_iframe_pdu(sk, msg, buflen, control, 0);
1354 if (IS_ERR(skb)) {
1355 skb_queue_purge(&sar_queue);
1356 return PTR_ERR(skb);
1357 }
1358
1359 __skb_queue_tail(&sar_queue, skb);
1360 len -= buflen;
1361 size += buflen;
1362 }
1363 skb_queue_splice_tail(&sar_queue, TX_QUEUE(sk));
1364 if (sk->sk_send_head == NULL)
1365 sk->sk_send_head = sar_queue.next;
1366
1367 return size;
1368 }
1369
1370 int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len)
1371 {
1372 struct sock *sk = sock->sk;
1373 struct l2cap_pinfo *pi = l2cap_pi(sk);
1374 struct sk_buff *skb;
1375 u16 control;
1376 int err;
1377
1378 BT_DBG("sock %p, sk %p", sock, sk);
1379
1380 err = sock_error(sk);
1381 if (err)
1382 return err;
1383
1384 if (msg->msg_flags & MSG_OOB)
1385 return -EOPNOTSUPP;
1386
1387 lock_sock(sk);
1388
1389 if (sk->sk_state != BT_CONNECTED) {
1390 err = -ENOTCONN;
1391 goto done;
1392 }
1393
1394 /* Connectionless channel */
1395 if (sk->sk_type == SOCK_DGRAM) {
1396 skb = l2cap_create_connless_pdu(sk, msg, len);
1397 if (IS_ERR(skb)) {
1398 err = PTR_ERR(skb);
1399 } else {
1400 l2cap_do_send(sk, skb);
1401 err = len;
1402 }
1403 goto done;
1404 }
1405
1406 switch (pi->mode) {
1407 case L2CAP_MODE_BASIC:
1408 /* Check outgoing MTU */
1409 if (len > pi->omtu) {
1410 err = -EMSGSIZE;
1411 goto done;
1412 }
1413
1414 /* Create a basic PDU */
1415 skb = l2cap_create_basic_pdu(sk, msg, len);
1416 if (IS_ERR(skb)) {
1417 err = PTR_ERR(skb);
1418 goto done;
1419 }
1420
1421 l2cap_do_send(sk, skb);
1422 err = len;
1423 break;
1424
1425 case L2CAP_MODE_ERTM:
1426 case L2CAP_MODE_STREAMING:
1427 /* Entire SDU fits into one PDU */
1428 if (len <= pi->remote_mps) {
1429 control = L2CAP_SDU_UNSEGMENTED;
1430 skb = l2cap_create_iframe_pdu(sk, msg, len, control, 0);
1431 if (IS_ERR(skb)) {
1432 err = PTR_ERR(skb);
1433 goto done;
1434 }
1435 __skb_queue_tail(TX_QUEUE(sk), skb);
1436
1437 if (sk->sk_send_head == NULL)
1438 sk->sk_send_head = skb;
1439
1440 } else {
1441 /* Segment SDU into multiples PDUs */
1442 err = l2cap_sar_segment_sdu(sk, msg, len);
1443 if (err < 0)
1444 goto done;
1445 }
1446
1447 if (pi->mode == L2CAP_MODE_STREAMING) {
1448 l2cap_streaming_send(sk);
1449 } else {
1450 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
1451 (pi->conn_state & L2CAP_CONN_WAIT_F)) {
1452 err = len;
1453 break;
1454 }
1455 err = l2cap_ertm_send(sk);
1456 }
1457
1458 if (err >= 0)
1459 err = len;
1460 break;
1461
1462 default:
1463 BT_DBG("bad state %1.1x", pi->mode);
1464 err = -EBADFD;
1465 }
1466
1467 done:
1468 release_sock(sk);
1469 return err;
1470 }
1471
1472 int l2cap_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags)
1473 {
1474 struct sock *sk = sock->sk;
1475
1476 lock_sock(sk);
1477
1478 if (sk->sk_state == BT_CONNECT2 && bt_sk(sk)->defer_setup) {
1479 struct l2cap_conn_rsp rsp;
1480 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1481 u8 buf[128];
1482
1483 sk->sk_state = BT_CONFIG;
1484
1485 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1486 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
1487 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1488 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1489 l2cap_send_cmd(l2cap_pi(sk)->conn, l2cap_pi(sk)->ident,
1490 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1491
1492 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT) {
1493 release_sock(sk);
1494 return 0;
1495 }
1496
1497 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
1498 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1499 l2cap_build_conf_req(sk, buf), buf);
1500 l2cap_pi(sk)->num_conf_req++;
1501
1502 release_sock(sk);
1503 return 0;
1504 }
1505
1506 release_sock(sk);
1507
1508 if (sock->type == SOCK_STREAM)
1509 return bt_sock_stream_recvmsg(iocb, sock, msg, len, flags);
1510
1511 return bt_sock_recvmsg(iocb, sock, msg, len, flags);
1512 }
1513
1514 int l2cap_sock_shutdown(struct socket *sock, int how)
1515 {
1516 struct sock *sk = sock->sk;
1517 int err = 0;
1518
1519 BT_DBG("sock %p, sk %p", sock, sk);
1520
1521 if (!sk)
1522 return 0;
1523
1524 lock_sock(sk);
1525 if (!sk->sk_shutdown) {
1526 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
1527 err = __l2cap_wait_ack(sk);
1528
1529 sk->sk_shutdown = SHUTDOWN_MASK;
1530 l2cap_sock_clear_timer(sk);
1531 __l2cap_sock_close(sk, 0);
1532
1533 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
1534 err = bt_sock_wait_state(sk, BT_CLOSED,
1535 sk->sk_lingertime);
1536 }
1537
1538 if (!err && sk->sk_err)
1539 err = -sk->sk_err;
1540
1541 release_sock(sk);
1542 return err;
1543 }
1544
1545 static void l2cap_chan_ready(struct sock *sk)
1546 {
1547 struct sock *parent = bt_sk(sk)->parent;
1548
1549 BT_DBG("sk %p, parent %p", sk, parent);
1550
1551 l2cap_pi(sk)->conf_state = 0;
1552 l2cap_sock_clear_timer(sk);
1553
1554 if (!parent) {
1555 /* Outgoing channel.
1556 * Wake up socket sleeping on connect.
1557 */
1558 sk->sk_state = BT_CONNECTED;
1559 sk->sk_state_change(sk);
1560 } else {
1561 /* Incoming channel.
1562 * Wake up socket sleeping on accept.
1563 */
1564 parent->sk_data_ready(parent, 0);
1565 }
1566 }
1567
1568 /* Copy frame to all raw sockets on that connection */
1569 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
1570 {
1571 struct l2cap_chan_list *l = &conn->chan_list;
1572 struct sk_buff *nskb;
1573 struct sock *sk;
1574
1575 BT_DBG("conn %p", conn);
1576
1577 read_lock(&l->lock);
1578 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
1579 if (sk->sk_type != SOCK_RAW)
1580 continue;
1581
1582 /* Don't send frame to the socket it came from */
1583 if (skb->sk == sk)
1584 continue;
1585 nskb = skb_clone(skb, GFP_ATOMIC);
1586 if (!nskb)
1587 continue;
1588
1589 if (sock_queue_rcv_skb(sk, nskb))
1590 kfree_skb(nskb);
1591 }
1592 read_unlock(&l->lock);
1593 }
1594
1595 /* ---- L2CAP signalling commands ---- */
1596 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
1597 u8 code, u8 ident, u16 dlen, void *data)
1598 {
1599 struct sk_buff *skb, **frag;
1600 struct l2cap_cmd_hdr *cmd;
1601 struct l2cap_hdr *lh;
1602 int len, count;
1603
1604 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
1605 conn, code, ident, dlen);
1606
1607 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
1608 count = min_t(unsigned int, conn->mtu, len);
1609
1610 skb = bt_skb_alloc(count, GFP_ATOMIC);
1611 if (!skb)
1612 return NULL;
1613
1614 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1615 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
1616 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
1617
1618 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
1619 cmd->code = code;
1620 cmd->ident = ident;
1621 cmd->len = cpu_to_le16(dlen);
1622
1623 if (dlen) {
1624 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
1625 memcpy(skb_put(skb, count), data, count);
1626 data += count;
1627 }
1628
1629 len -= skb->len;
1630
1631 /* Continuation fragments (no L2CAP header) */
1632 frag = &skb_shinfo(skb)->frag_list;
1633 while (len) {
1634 count = min_t(unsigned int, conn->mtu, len);
1635
1636 *frag = bt_skb_alloc(count, GFP_ATOMIC);
1637 if (!*frag)
1638 goto fail;
1639
1640 memcpy(skb_put(*frag, count), data, count);
1641
1642 len -= count;
1643 data += count;
1644
1645 frag = &(*frag)->next;
1646 }
1647
1648 return skb;
1649
1650 fail:
1651 kfree_skb(skb);
1652 return NULL;
1653 }
1654
1655 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
1656 {
1657 struct l2cap_conf_opt *opt = *ptr;
1658 int len;
1659
1660 len = L2CAP_CONF_OPT_SIZE + opt->len;
1661 *ptr += len;
1662
1663 *type = opt->type;
1664 *olen = opt->len;
1665
1666 switch (opt->len) {
1667 case 1:
1668 *val = *((u8 *) opt->val);
1669 break;
1670
1671 case 2:
1672 *val = get_unaligned_le16(opt->val);
1673 break;
1674
1675 case 4:
1676 *val = get_unaligned_le32(opt->val);
1677 break;
1678
1679 default:
1680 *val = (unsigned long) opt->val;
1681 break;
1682 }
1683
1684 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
1685 return len;
1686 }
1687
1688 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
1689 {
1690 struct l2cap_conf_opt *opt = *ptr;
1691
1692 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
1693
1694 opt->type = type;
1695 opt->len = len;
1696
1697 switch (len) {
1698 case 1:
1699 *((u8 *) opt->val) = val;
1700 break;
1701
1702 case 2:
1703 put_unaligned_le16(val, opt->val);
1704 break;
1705
1706 case 4:
1707 put_unaligned_le32(val, opt->val);
1708 break;
1709
1710 default:
1711 memcpy(opt->val, (void *) val, len);
1712 break;
1713 }
1714
1715 *ptr += L2CAP_CONF_OPT_SIZE + len;
1716 }
1717
1718 static void l2cap_ack_timeout(unsigned long arg)
1719 {
1720 struct sock *sk = (void *) arg;
1721
1722 bh_lock_sock(sk);
1723 l2cap_send_ack(l2cap_pi(sk));
1724 bh_unlock_sock(sk);
1725 }
1726
1727 static inline void l2cap_ertm_init(struct sock *sk)
1728 {
1729 l2cap_pi(sk)->expected_ack_seq = 0;
1730 l2cap_pi(sk)->unacked_frames = 0;
1731 l2cap_pi(sk)->buffer_seq = 0;
1732 l2cap_pi(sk)->num_acked = 0;
1733 l2cap_pi(sk)->frames_sent = 0;
1734
1735 setup_timer(&l2cap_pi(sk)->retrans_timer,
1736 l2cap_retrans_timeout, (unsigned long) sk);
1737 setup_timer(&l2cap_pi(sk)->monitor_timer,
1738 l2cap_monitor_timeout, (unsigned long) sk);
1739 setup_timer(&l2cap_pi(sk)->ack_timer,
1740 l2cap_ack_timeout, (unsigned long) sk);
1741
1742 __skb_queue_head_init(SREJ_QUEUE(sk));
1743 __skb_queue_head_init(BUSY_QUEUE(sk));
1744
1745 INIT_WORK(&l2cap_pi(sk)->busy_work, l2cap_busy_work);
1746
1747 sk->sk_backlog_rcv = l2cap_ertm_data_rcv;
1748 }
1749
1750 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
1751 {
1752 switch (mode) {
1753 case L2CAP_MODE_STREAMING:
1754 case L2CAP_MODE_ERTM:
1755 if (l2cap_mode_supported(mode, remote_feat_mask))
1756 return mode;
1757 /* fall through */
1758 default:
1759 return L2CAP_MODE_BASIC;
1760 }
1761 }
1762
1763 static int l2cap_build_conf_req(struct sock *sk, void *data)
1764 {
1765 struct l2cap_pinfo *pi = l2cap_pi(sk);
1766 struct l2cap_conf_req *req = data;
1767 struct l2cap_conf_rfc rfc = { .mode = pi->mode };
1768 void *ptr = req->data;
1769
1770 BT_DBG("sk %p", sk);
1771
1772 if (pi->num_conf_req || pi->num_conf_rsp)
1773 goto done;
1774
1775 switch (pi->mode) {
1776 case L2CAP_MODE_STREAMING:
1777 case L2CAP_MODE_ERTM:
1778 if (pi->conf_state & L2CAP_CONF_STATE2_DEVICE)
1779 break;
1780
1781 /* fall through */
1782 default:
1783 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
1784 break;
1785 }
1786
1787 done:
1788 if (pi->imtu != L2CAP_DEFAULT_MTU)
1789 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
1790
1791 switch (pi->mode) {
1792 case L2CAP_MODE_BASIC:
1793 if (!(pi->conn->feat_mask & L2CAP_FEAT_ERTM) &&
1794 !(pi->conn->feat_mask & L2CAP_FEAT_STREAMING))
1795 break;
1796
1797 rfc.mode = L2CAP_MODE_BASIC;
1798 rfc.txwin_size = 0;
1799 rfc.max_transmit = 0;
1800 rfc.retrans_timeout = 0;
1801 rfc.monitor_timeout = 0;
1802 rfc.max_pdu_size = 0;
1803
1804 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
1805 (unsigned long) &rfc);
1806 break;
1807
1808 case L2CAP_MODE_ERTM:
1809 rfc.mode = L2CAP_MODE_ERTM;
1810 rfc.txwin_size = pi->tx_win;
1811 rfc.max_transmit = pi->max_tx;
1812 rfc.retrans_timeout = 0;
1813 rfc.monitor_timeout = 0;
1814 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
1815 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
1816 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
1817
1818 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
1819 (unsigned long) &rfc);
1820
1821 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
1822 break;
1823
1824 if (pi->fcs == L2CAP_FCS_NONE ||
1825 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
1826 pi->fcs = L2CAP_FCS_NONE;
1827 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
1828 }
1829 break;
1830
1831 case L2CAP_MODE_STREAMING:
1832 rfc.mode = L2CAP_MODE_STREAMING;
1833 rfc.txwin_size = 0;
1834 rfc.max_transmit = 0;
1835 rfc.retrans_timeout = 0;
1836 rfc.monitor_timeout = 0;
1837 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
1838 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
1839 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
1840
1841 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
1842 (unsigned long) &rfc);
1843
1844 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
1845 break;
1846
1847 if (pi->fcs == L2CAP_FCS_NONE ||
1848 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
1849 pi->fcs = L2CAP_FCS_NONE;
1850 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
1851 }
1852 break;
1853 }
1854
1855 /* FIXME: Need actual value of the flush timeout */
1856 //if (flush_to != L2CAP_DEFAULT_FLUSH_TO)
1857 // l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, pi->flush_to);
1858
1859 req->dcid = cpu_to_le16(pi->dcid);
1860 req->flags = cpu_to_le16(0);
1861
1862 return ptr - data;
1863 }
1864
1865 static int l2cap_parse_conf_req(struct sock *sk, void *data)
1866 {
1867 struct l2cap_pinfo *pi = l2cap_pi(sk);
1868 struct l2cap_conf_rsp *rsp = data;
1869 void *ptr = rsp->data;
1870 void *req = pi->conf_req;
1871 int len = pi->conf_len;
1872 int type, hint, olen;
1873 unsigned long val;
1874 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
1875 u16 mtu = L2CAP_DEFAULT_MTU;
1876 u16 result = L2CAP_CONF_SUCCESS;
1877
1878 BT_DBG("sk %p", sk);
1879
1880 while (len >= L2CAP_CONF_OPT_SIZE) {
1881 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
1882
1883 hint = type & L2CAP_CONF_HINT;
1884 type &= L2CAP_CONF_MASK;
1885
1886 switch (type) {
1887 case L2CAP_CONF_MTU:
1888 mtu = val;
1889 break;
1890
1891 case L2CAP_CONF_FLUSH_TO:
1892 pi->flush_to = val;
1893 break;
1894
1895 case L2CAP_CONF_QOS:
1896 break;
1897
1898 case L2CAP_CONF_RFC:
1899 if (olen == sizeof(rfc))
1900 memcpy(&rfc, (void *) val, olen);
1901 break;
1902
1903 case L2CAP_CONF_FCS:
1904 if (val == L2CAP_FCS_NONE)
1905 pi->conf_state |= L2CAP_CONF_NO_FCS_RECV;
1906
1907 break;
1908
1909 default:
1910 if (hint)
1911 break;
1912
1913 result = L2CAP_CONF_UNKNOWN;
1914 *((u8 *) ptr++) = type;
1915 break;
1916 }
1917 }
1918
1919 if (pi->num_conf_rsp || pi->num_conf_req > 1)
1920 goto done;
1921
1922 switch (pi->mode) {
1923 case L2CAP_MODE_STREAMING:
1924 case L2CAP_MODE_ERTM:
1925 if (!(pi->conf_state & L2CAP_CONF_STATE2_DEVICE)) {
1926 pi->mode = l2cap_select_mode(rfc.mode,
1927 pi->conn->feat_mask);
1928 break;
1929 }
1930
1931 if (pi->mode != rfc.mode)
1932 return -ECONNREFUSED;
1933
1934 break;
1935 }
1936
1937 done:
1938 if (pi->mode != rfc.mode) {
1939 result = L2CAP_CONF_UNACCEPT;
1940 rfc.mode = pi->mode;
1941
1942 if (pi->num_conf_rsp == 1)
1943 return -ECONNREFUSED;
1944
1945 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
1946 sizeof(rfc), (unsigned long) &rfc);
1947 }
1948
1949
1950 if (result == L2CAP_CONF_SUCCESS) {
1951 /* Configure output options and let the other side know
1952 * which ones we don't like. */
1953
1954 if (mtu < L2CAP_DEFAULT_MIN_MTU)
1955 result = L2CAP_CONF_UNACCEPT;
1956 else {
1957 pi->omtu = mtu;
1958 pi->conf_state |= L2CAP_CONF_MTU_DONE;
1959 }
1960 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
1961
1962 switch (rfc.mode) {
1963 case L2CAP_MODE_BASIC:
1964 pi->fcs = L2CAP_FCS_NONE;
1965 pi->conf_state |= L2CAP_CONF_MODE_DONE;
1966 break;
1967
1968 case L2CAP_MODE_ERTM:
1969 pi->remote_tx_win = rfc.txwin_size;
1970 pi->remote_max_tx = rfc.max_transmit;
1971
1972 if (le16_to_cpu(rfc.max_pdu_size) > pi->conn->mtu - 10)
1973 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
1974
1975 pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
1976
1977 rfc.retrans_timeout =
1978 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO);
1979 rfc.monitor_timeout =
1980 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO);
1981
1982 pi->conf_state |= L2CAP_CONF_MODE_DONE;
1983
1984 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
1985 sizeof(rfc), (unsigned long) &rfc);
1986
1987 break;
1988
1989 case L2CAP_MODE_STREAMING:
1990 if (le16_to_cpu(rfc.max_pdu_size) > pi->conn->mtu - 10)
1991 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
1992
1993 pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
1994
1995 pi->conf_state |= L2CAP_CONF_MODE_DONE;
1996
1997 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
1998 sizeof(rfc), (unsigned long) &rfc);
1999
2000 break;
2001
2002 default:
2003 result = L2CAP_CONF_UNACCEPT;
2004
2005 memset(&rfc, 0, sizeof(rfc));
2006 rfc.mode = pi->mode;
2007 }
2008
2009 if (result == L2CAP_CONF_SUCCESS)
2010 pi->conf_state |= L2CAP_CONF_OUTPUT_DONE;
2011 }
2012 rsp->scid = cpu_to_le16(pi->dcid);
2013 rsp->result = cpu_to_le16(result);
2014 rsp->flags = cpu_to_le16(0x0000);
2015
2016 return ptr - data;
2017 }
2018
2019 static int l2cap_parse_conf_rsp(struct sock *sk, void *rsp, int len, void *data, u16 *result)
2020 {
2021 struct l2cap_pinfo *pi = l2cap_pi(sk);
2022 struct l2cap_conf_req *req = data;
2023 void *ptr = req->data;
2024 int type, olen;
2025 unsigned long val;
2026 struct l2cap_conf_rfc rfc;
2027
2028 BT_DBG("sk %p, rsp %p, len %d, req %p", sk, rsp, len, data);
2029
2030 while (len >= L2CAP_CONF_OPT_SIZE) {
2031 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2032
2033 switch (type) {
2034 case L2CAP_CONF_MTU:
2035 if (val < L2CAP_DEFAULT_MIN_MTU) {
2036 *result = L2CAP_CONF_UNACCEPT;
2037 pi->imtu = L2CAP_DEFAULT_MIN_MTU;
2038 } else
2039 pi->imtu = val;
2040 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
2041 break;
2042
2043 case L2CAP_CONF_FLUSH_TO:
2044 pi->flush_to = val;
2045 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
2046 2, pi->flush_to);
2047 break;
2048
2049 case L2CAP_CONF_RFC:
2050 if (olen == sizeof(rfc))
2051 memcpy(&rfc, (void *)val, olen);
2052
2053 if ((pi->conf_state & L2CAP_CONF_STATE2_DEVICE) &&
2054 rfc.mode != pi->mode)
2055 return -ECONNREFUSED;
2056
2057 pi->fcs = 0;
2058
2059 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2060 sizeof(rfc), (unsigned long) &rfc);
2061 break;
2062 }
2063 }
2064
2065 if (pi->mode == L2CAP_MODE_BASIC && pi->mode != rfc.mode)
2066 return -ECONNREFUSED;
2067
2068 pi->mode = rfc.mode;
2069
2070 if (*result == L2CAP_CONF_SUCCESS) {
2071 switch (rfc.mode) {
2072 case L2CAP_MODE_ERTM:
2073 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2074 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2075 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2076 break;
2077 case L2CAP_MODE_STREAMING:
2078 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2079 }
2080 }
2081
2082 req->dcid = cpu_to_le16(pi->dcid);
2083 req->flags = cpu_to_le16(0x0000);
2084
2085 return ptr - data;
2086 }
2087
2088 static int l2cap_build_conf_rsp(struct sock *sk, void *data, u16 result, u16 flags)
2089 {
2090 struct l2cap_conf_rsp *rsp = data;
2091 void *ptr = rsp->data;
2092
2093 BT_DBG("sk %p", sk);
2094
2095 rsp->scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2096 rsp->result = cpu_to_le16(result);
2097 rsp->flags = cpu_to_le16(flags);
2098
2099 return ptr - data;
2100 }
2101
2102 static void l2cap_conf_rfc_get(struct sock *sk, void *rsp, int len)
2103 {
2104 struct l2cap_pinfo *pi = l2cap_pi(sk);
2105 int type, olen;
2106 unsigned long val;
2107 struct l2cap_conf_rfc rfc;
2108
2109 BT_DBG("sk %p, rsp %p, len %d", sk, rsp, len);
2110
2111 if ((pi->mode != L2CAP_MODE_ERTM) && (pi->mode != L2CAP_MODE_STREAMING))
2112 return;
2113
2114 while (len >= L2CAP_CONF_OPT_SIZE) {
2115 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2116
2117 switch (type) {
2118 case L2CAP_CONF_RFC:
2119 if (olen == sizeof(rfc))
2120 memcpy(&rfc, (void *)val, olen);
2121 goto done;
2122 }
2123 }
2124
2125 done:
2126 switch (rfc.mode) {
2127 case L2CAP_MODE_ERTM:
2128 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2129 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2130 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2131 break;
2132 case L2CAP_MODE_STREAMING:
2133 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2134 }
2135 }
2136
2137 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2138 {
2139 struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data;
2140
2141 if (rej->reason != 0x0000)
2142 return 0;
2143
2144 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2145 cmd->ident == conn->info_ident) {
2146 del_timer(&conn->info_timer);
2147
2148 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2149 conn->info_ident = 0;
2150
2151 l2cap_conn_start(conn);
2152 }
2153
2154 return 0;
2155 }
2156
2157 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2158 {
2159 struct l2cap_chan_list *list = &conn->chan_list;
2160 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2161 struct l2cap_conn_rsp rsp;
2162 struct sock *parent, *sk = NULL;
2163 int result, status = L2CAP_CS_NO_INFO;
2164
2165 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2166 __le16 psm = req->psm;
2167
2168 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
2169
2170 /* Check if we have socket listening on psm */
2171 parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src);
2172 if (!parent) {
2173 result = L2CAP_CR_BAD_PSM;
2174 goto sendresp;
2175 }
2176
2177 bh_lock_sock(parent);
2178
2179 /* Check if the ACL is secure enough (if not SDP) */
2180 if (psm != cpu_to_le16(0x0001) &&
2181 !hci_conn_check_link_mode(conn->hcon)) {
2182 conn->disc_reason = 0x05;
2183 result = L2CAP_CR_SEC_BLOCK;
2184 goto response;
2185 }
2186
2187 result = L2CAP_CR_NO_MEM;
2188
2189 /* Check for backlog size */
2190 if (sk_acceptq_is_full(parent)) {
2191 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2192 goto response;
2193 }
2194
2195 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
2196 if (!sk)
2197 goto response;
2198
2199 write_lock_bh(&list->lock);
2200
2201 /* Check if we already have channel with that dcid */
2202 if (__l2cap_get_chan_by_dcid(list, scid)) {
2203 write_unlock_bh(&list->lock);
2204 sock_set_flag(sk, SOCK_ZAPPED);
2205 l2cap_sock_kill(sk);
2206 goto response;
2207 }
2208
2209 hci_conn_hold(conn->hcon);
2210
2211 l2cap_sock_init(sk, parent);
2212 bacpy(&bt_sk(sk)->src, conn->src);
2213 bacpy(&bt_sk(sk)->dst, conn->dst);
2214 l2cap_pi(sk)->psm = psm;
2215 l2cap_pi(sk)->dcid = scid;
2216
2217 __l2cap_chan_add(conn, sk, parent);
2218 dcid = l2cap_pi(sk)->scid;
2219
2220 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
2221
2222 l2cap_pi(sk)->ident = cmd->ident;
2223
2224 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2225 if (l2cap_check_security(sk)) {
2226 if (bt_sk(sk)->defer_setup) {
2227 sk->sk_state = BT_CONNECT2;
2228 result = L2CAP_CR_PEND;
2229 status = L2CAP_CS_AUTHOR_PEND;
2230 parent->sk_data_ready(parent, 0);
2231 } else {
2232 sk->sk_state = BT_CONFIG;
2233 result = L2CAP_CR_SUCCESS;
2234 status = L2CAP_CS_NO_INFO;
2235 }
2236 } else {
2237 sk->sk_state = BT_CONNECT2;
2238 result = L2CAP_CR_PEND;
2239 status = L2CAP_CS_AUTHEN_PEND;
2240 }
2241 } else {
2242 sk->sk_state = BT_CONNECT2;
2243 result = L2CAP_CR_PEND;
2244 status = L2CAP_CS_NO_INFO;
2245 }
2246
2247 write_unlock_bh(&list->lock);
2248
2249 response:
2250 bh_unlock_sock(parent);
2251
2252 sendresp:
2253 rsp.scid = cpu_to_le16(scid);
2254 rsp.dcid = cpu_to_le16(dcid);
2255 rsp.result = cpu_to_le16(result);
2256 rsp.status = cpu_to_le16(status);
2257 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2258
2259 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2260 struct l2cap_info_req info;
2261 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2262
2263 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2264 conn->info_ident = l2cap_get_ident(conn);
2265
2266 mod_timer(&conn->info_timer, jiffies +
2267 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
2268
2269 l2cap_send_cmd(conn, conn->info_ident,
2270 L2CAP_INFO_REQ, sizeof(info), &info);
2271 }
2272
2273 if (sk && !(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT) &&
2274 result == L2CAP_CR_SUCCESS) {
2275 u8 buf[128];
2276 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
2277 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2278 l2cap_build_conf_req(sk, buf), buf);
2279 l2cap_pi(sk)->num_conf_req++;
2280 }
2281
2282 return 0;
2283 }
2284
2285 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2286 {
2287 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
2288 u16 scid, dcid, result, status;
2289 struct sock *sk;
2290 u8 req[128];
2291
2292 scid = __le16_to_cpu(rsp->scid);
2293 dcid = __le16_to_cpu(rsp->dcid);
2294 result = __le16_to_cpu(rsp->result);
2295 status = __le16_to_cpu(rsp->status);
2296
2297 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
2298
2299 if (scid) {
2300 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2301 if (!sk)
2302 return -EFAULT;
2303 } else {
2304 sk = l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident);
2305 if (!sk)
2306 return -EFAULT;
2307 }
2308
2309 switch (result) {
2310 case L2CAP_CR_SUCCESS:
2311 sk->sk_state = BT_CONFIG;
2312 l2cap_pi(sk)->ident = 0;
2313 l2cap_pi(sk)->dcid = dcid;
2314 l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_CONNECT_PEND;
2315
2316 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)
2317 break;
2318
2319 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
2320
2321 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2322 l2cap_build_conf_req(sk, req), req);
2323 l2cap_pi(sk)->num_conf_req++;
2324 break;
2325
2326 case L2CAP_CR_PEND:
2327 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
2328 break;
2329
2330 default:
2331 /* don't delete l2cap channel if sk is owned by user */
2332 if (sock_owned_by_user(sk)) {
2333 sk->sk_state = BT_DISCONN;
2334 l2cap_sock_clear_timer(sk);
2335 l2cap_sock_set_timer(sk, HZ / 5);
2336 break;
2337 }
2338
2339 l2cap_chan_del(sk, ECONNREFUSED);
2340 break;
2341 }
2342
2343 bh_unlock_sock(sk);
2344 return 0;
2345 }
2346
2347 static inline void set_default_fcs(struct l2cap_pinfo *pi)
2348 {
2349 /* FCS is enabled only in ERTM or streaming mode, if one or both
2350 * sides request it.
2351 */
2352 if (pi->mode != L2CAP_MODE_ERTM && pi->mode != L2CAP_MODE_STREAMING)
2353 pi->fcs = L2CAP_FCS_NONE;
2354 else if (!(pi->conf_state & L2CAP_CONF_NO_FCS_RECV))
2355 pi->fcs = L2CAP_FCS_CRC16;
2356 }
2357
2358 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2359 {
2360 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
2361 u16 dcid, flags;
2362 u8 rsp[64];
2363 struct sock *sk;
2364 int len;
2365
2366 dcid = __le16_to_cpu(req->dcid);
2367 flags = __le16_to_cpu(req->flags);
2368
2369 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
2370
2371 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
2372 if (!sk)
2373 return -ENOENT;
2374
2375 if (sk->sk_state != BT_CONFIG) {
2376 struct l2cap_cmd_rej rej;
2377
2378 rej.reason = cpu_to_le16(0x0002);
2379 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
2380 sizeof(rej), &rej);
2381 goto unlock;
2382 }
2383
2384 /* Reject if config buffer is too small. */
2385 len = cmd_len - sizeof(*req);
2386 if (l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
2387 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2388 l2cap_build_conf_rsp(sk, rsp,
2389 L2CAP_CONF_REJECT, flags), rsp);
2390 goto unlock;
2391 }
2392
2393 /* Store config. */
2394 memcpy(l2cap_pi(sk)->conf_req + l2cap_pi(sk)->conf_len, req->data, len);
2395 l2cap_pi(sk)->conf_len += len;
2396
2397 if (flags & 0x0001) {
2398 /* Incomplete config. Send empty response. */
2399 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2400 l2cap_build_conf_rsp(sk, rsp,
2401 L2CAP_CONF_SUCCESS, 0x0001), rsp);
2402 goto unlock;
2403 }
2404
2405 /* Complete config. */
2406 len = l2cap_parse_conf_req(sk, rsp);
2407 if (len < 0) {
2408 l2cap_send_disconn_req(conn, sk, ECONNRESET);
2409 goto unlock;
2410 }
2411
2412 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
2413 l2cap_pi(sk)->num_conf_rsp++;
2414
2415 /* Reset config buffer. */
2416 l2cap_pi(sk)->conf_len = 0;
2417
2418 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE))
2419 goto unlock;
2420
2421 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
2422 set_default_fcs(l2cap_pi(sk));
2423
2424 sk->sk_state = BT_CONNECTED;
2425
2426 l2cap_pi(sk)->next_tx_seq = 0;
2427 l2cap_pi(sk)->expected_tx_seq = 0;
2428 __skb_queue_head_init(TX_QUEUE(sk));
2429 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
2430 l2cap_ertm_init(sk);
2431
2432 l2cap_chan_ready(sk);
2433 goto unlock;
2434 }
2435
2436 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) {
2437 u8 buf[64];
2438 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
2439 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2440 l2cap_build_conf_req(sk, buf), buf);
2441 l2cap_pi(sk)->num_conf_req++;
2442 }
2443
2444 unlock:
2445 bh_unlock_sock(sk);
2446 return 0;
2447 }
2448
2449 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2450 {
2451 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
2452 u16 scid, flags, result;
2453 struct sock *sk;
2454 int len = cmd->len - sizeof(*rsp);
2455
2456 scid = __le16_to_cpu(rsp->scid);
2457 flags = __le16_to_cpu(rsp->flags);
2458 result = __le16_to_cpu(rsp->result);
2459
2460 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
2461 scid, flags, result);
2462
2463 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2464 if (!sk)
2465 return 0;
2466
2467 switch (result) {
2468 case L2CAP_CONF_SUCCESS:
2469 l2cap_conf_rfc_get(sk, rsp->data, len);
2470 break;
2471
2472 case L2CAP_CONF_UNACCEPT:
2473 if (l2cap_pi(sk)->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
2474 char req[64];
2475
2476 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
2477 l2cap_send_disconn_req(conn, sk, ECONNRESET);
2478 goto done;
2479 }
2480
2481 /* throw out any old stored conf requests */
2482 result = L2CAP_CONF_SUCCESS;
2483 len = l2cap_parse_conf_rsp(sk, rsp->data,
2484 len, req, &result);
2485 if (len < 0) {
2486 l2cap_send_disconn_req(conn, sk, ECONNRESET);
2487 goto done;
2488 }
2489
2490 l2cap_send_cmd(conn, l2cap_get_ident(conn),
2491 L2CAP_CONF_REQ, len, req);
2492 l2cap_pi(sk)->num_conf_req++;
2493 if (result != L2CAP_CONF_SUCCESS)
2494 goto done;
2495 break;
2496 }
2497
2498 default:
2499 sk->sk_err = ECONNRESET;
2500 l2cap_sock_set_timer(sk, HZ * 5);
2501 l2cap_send_disconn_req(conn, sk, ECONNRESET);
2502 goto done;
2503 }
2504
2505 if (flags & 0x01)
2506 goto done;
2507
2508 l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE;
2509
2510 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) {
2511 set_default_fcs(l2cap_pi(sk));
2512
2513 sk->sk_state = BT_CONNECTED;
2514 l2cap_pi(sk)->next_tx_seq = 0;
2515 l2cap_pi(sk)->expected_tx_seq = 0;
2516 __skb_queue_head_init(TX_QUEUE(sk));
2517 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
2518 l2cap_ertm_init(sk);
2519
2520 l2cap_chan_ready(sk);
2521 }
2522
2523 done:
2524 bh_unlock_sock(sk);
2525 return 0;
2526 }
2527
2528 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2529 {
2530 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
2531 struct l2cap_disconn_rsp rsp;
2532 u16 dcid, scid;
2533 struct sock *sk;
2534
2535 scid = __le16_to_cpu(req->scid);
2536 dcid = __le16_to_cpu(req->dcid);
2537
2538 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
2539
2540 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
2541 if (!sk)
2542 return 0;
2543
2544 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
2545 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2546 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
2547
2548 sk->sk_shutdown = SHUTDOWN_MASK;
2549
2550 /* don't delete l2cap channel if sk is owned by user */
2551 if (sock_owned_by_user(sk)) {
2552 sk->sk_state = BT_DISCONN;
2553 l2cap_sock_clear_timer(sk);
2554 l2cap_sock_set_timer(sk, HZ / 5);
2555 bh_unlock_sock(sk);
2556 return 0;
2557 }
2558
2559 l2cap_chan_del(sk, ECONNRESET);
2560 bh_unlock_sock(sk);
2561
2562 l2cap_sock_kill(sk);
2563 return 0;
2564 }
2565
2566 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2567 {
2568 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
2569 u16 dcid, scid;
2570 struct sock *sk;
2571
2572 scid = __le16_to_cpu(rsp->scid);
2573 dcid = __le16_to_cpu(rsp->dcid);
2574
2575 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
2576
2577 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2578 if (!sk)
2579 return 0;
2580
2581 /* don't delete l2cap channel if sk is owned by user */
2582 if (sock_owned_by_user(sk)) {
2583 sk->sk_state = BT_DISCONN;
2584 l2cap_sock_clear_timer(sk);
2585 l2cap_sock_set_timer(sk, HZ / 5);
2586 bh_unlock_sock(sk);
2587 return 0;
2588 }
2589
2590 l2cap_chan_del(sk, 0);
2591 bh_unlock_sock(sk);
2592
2593 l2cap_sock_kill(sk);
2594 return 0;
2595 }
2596
2597 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2598 {
2599 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
2600 u16 type;
2601
2602 type = __le16_to_cpu(req->type);
2603
2604 BT_DBG("type 0x%4.4x", type);
2605
2606 if (type == L2CAP_IT_FEAT_MASK) {
2607 u8 buf[8];
2608 u32 feat_mask = l2cap_feat_mask;
2609 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2610 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2611 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2612 if (!disable_ertm)
2613 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
2614 | L2CAP_FEAT_FCS;
2615 put_unaligned_le32(feat_mask, rsp->data);
2616 l2cap_send_cmd(conn, cmd->ident,
2617 L2CAP_INFO_RSP, sizeof(buf), buf);
2618 } else if (type == L2CAP_IT_FIXED_CHAN) {
2619 u8 buf[12];
2620 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2621 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
2622 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2623 memcpy(buf + 4, l2cap_fixed_chan, 8);
2624 l2cap_send_cmd(conn, cmd->ident,
2625 L2CAP_INFO_RSP, sizeof(buf), buf);
2626 } else {
2627 struct l2cap_info_rsp rsp;
2628 rsp.type = cpu_to_le16(type);
2629 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
2630 l2cap_send_cmd(conn, cmd->ident,
2631 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
2632 }
2633
2634 return 0;
2635 }
2636
2637 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2638 {
2639 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
2640 u16 type, result;
2641
2642 type = __le16_to_cpu(rsp->type);
2643 result = __le16_to_cpu(rsp->result);
2644
2645 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
2646
2647 del_timer(&conn->info_timer);
2648
2649 if (result != L2CAP_IR_SUCCESS) {
2650 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2651 conn->info_ident = 0;
2652
2653 l2cap_conn_start(conn);
2654
2655 return 0;
2656 }
2657
2658 if (type == L2CAP_IT_FEAT_MASK) {
2659 conn->feat_mask = get_unaligned_le32(rsp->data);
2660
2661 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
2662 struct l2cap_info_req req;
2663 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
2664
2665 conn->info_ident = l2cap_get_ident(conn);
2666
2667 l2cap_send_cmd(conn, conn->info_ident,
2668 L2CAP_INFO_REQ, sizeof(req), &req);
2669 } else {
2670 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2671 conn->info_ident = 0;
2672
2673 l2cap_conn_start(conn);
2674 }
2675 } else if (type == L2CAP_IT_FIXED_CHAN) {
2676 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2677 conn->info_ident = 0;
2678
2679 l2cap_conn_start(conn);
2680 }
2681
2682 return 0;
2683 }
2684
2685 static inline void l2cap_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
2686 {
2687 u8 *data = skb->data;
2688 int len = skb->len;
2689 struct l2cap_cmd_hdr cmd;
2690 int err = 0;
2691
2692 l2cap_raw_recv(conn, skb);
2693
2694 while (len >= L2CAP_CMD_HDR_SIZE) {
2695 u16 cmd_len;
2696 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
2697 data += L2CAP_CMD_HDR_SIZE;
2698 len -= L2CAP_CMD_HDR_SIZE;
2699
2700 cmd_len = le16_to_cpu(cmd.len);
2701
2702 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
2703
2704 if (cmd_len > len || !cmd.ident) {
2705 BT_DBG("corrupted command");
2706 break;
2707 }
2708
2709 switch (cmd.code) {
2710 case L2CAP_COMMAND_REJ:
2711 l2cap_command_rej(conn, &cmd, data);
2712 break;
2713
2714 case L2CAP_CONN_REQ:
2715 err = l2cap_connect_req(conn, &cmd, data);
2716 break;
2717
2718 case L2CAP_CONN_RSP:
2719 err = l2cap_connect_rsp(conn, &cmd, data);
2720 break;
2721
2722 case L2CAP_CONF_REQ:
2723 err = l2cap_config_req(conn, &cmd, cmd_len, data);
2724 break;
2725
2726 case L2CAP_CONF_RSP:
2727 err = l2cap_config_rsp(conn, &cmd, data);
2728 break;
2729
2730 case L2CAP_DISCONN_REQ:
2731 err = l2cap_disconnect_req(conn, &cmd, data);
2732 break;
2733
2734 case L2CAP_DISCONN_RSP:
2735 err = l2cap_disconnect_rsp(conn, &cmd, data);
2736 break;
2737
2738 case L2CAP_ECHO_REQ:
2739 l2cap_send_cmd(conn, cmd.ident, L2CAP_ECHO_RSP, cmd_len, data);
2740 break;
2741
2742 case L2CAP_ECHO_RSP:
2743 break;
2744
2745 case L2CAP_INFO_REQ:
2746 err = l2cap_information_req(conn, &cmd, data);
2747 break;
2748
2749 case L2CAP_INFO_RSP:
2750 err = l2cap_information_rsp(conn, &cmd, data);
2751 break;
2752
2753 default:
2754 BT_ERR("Unknown signaling command 0x%2.2x", cmd.code);
2755 err = -EINVAL;
2756 break;
2757 }
2758
2759 if (err) {
2760 struct l2cap_cmd_rej rej;
2761 BT_DBG("error %d", err);
2762
2763 /* FIXME: Map err to a valid reason */
2764 rej.reason = cpu_to_le16(0);
2765 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
2766 }
2767
2768 data += cmd_len;
2769 len -= cmd_len;
2770 }
2771
2772 kfree_skb(skb);
2773 }
2774
2775 static int l2cap_check_fcs(struct l2cap_pinfo *pi, struct sk_buff *skb)
2776 {
2777 u16 our_fcs, rcv_fcs;
2778 int hdr_size = L2CAP_HDR_SIZE + 2;
2779
2780 if (pi->fcs == L2CAP_FCS_CRC16) {
2781 skb_trim(skb, skb->len - 2);
2782 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
2783 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
2784
2785 if (our_fcs != rcv_fcs)
2786 return -EBADMSG;
2787 }
2788 return 0;
2789 }
2790
2791 static inline void l2cap_send_i_or_rr_or_rnr(struct sock *sk)
2792 {
2793 struct l2cap_pinfo *pi = l2cap_pi(sk);
2794 u16 control = 0;
2795
2796 pi->frames_sent = 0;
2797
2798 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
2799
2800 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
2801 control |= L2CAP_SUPER_RCV_NOT_READY;
2802 l2cap_send_sframe(pi, control);
2803 pi->conn_state |= L2CAP_CONN_RNR_SENT;
2804 }
2805
2806 if (pi->conn_state & L2CAP_CONN_REMOTE_BUSY)
2807 l2cap_retransmit_frames(sk);
2808
2809 l2cap_ertm_send(sk);
2810
2811 if (!(pi->conn_state & L2CAP_CONN_LOCAL_BUSY) &&
2812 pi->frames_sent == 0) {
2813 control |= L2CAP_SUPER_RCV_READY;
2814 l2cap_send_sframe(pi, control);
2815 }
2816 }
2817
2818 static int l2cap_add_to_srej_queue(struct sock *sk, struct sk_buff *skb, u8 tx_seq, u8 sar)
2819 {
2820 struct sk_buff *next_skb;
2821 struct l2cap_pinfo *pi = l2cap_pi(sk);
2822 int tx_seq_offset, next_tx_seq_offset;
2823
2824 bt_cb(skb)->tx_seq = tx_seq;
2825 bt_cb(skb)->sar = sar;
2826
2827 next_skb = skb_peek(SREJ_QUEUE(sk));
2828 if (!next_skb) {
2829 __skb_queue_tail(SREJ_QUEUE(sk), skb);
2830 return 0;
2831 }
2832
2833 tx_seq_offset = (tx_seq - pi->buffer_seq) % 64;
2834 if (tx_seq_offset < 0)
2835 tx_seq_offset += 64;
2836
2837 do {
2838 if (bt_cb(next_skb)->tx_seq == tx_seq)
2839 return -EINVAL;
2840
2841 next_tx_seq_offset = (bt_cb(next_skb)->tx_seq -
2842 pi->buffer_seq) % 64;
2843 if (next_tx_seq_offset < 0)
2844 next_tx_seq_offset += 64;
2845
2846 if (next_tx_seq_offset > tx_seq_offset) {
2847 __skb_queue_before(SREJ_QUEUE(sk), next_skb, skb);
2848 return 0;
2849 }
2850
2851 if (skb_queue_is_last(SREJ_QUEUE(sk), next_skb))
2852 break;
2853
2854 } while ((next_skb = skb_queue_next(SREJ_QUEUE(sk), next_skb)));
2855
2856 __skb_queue_tail(SREJ_QUEUE(sk), skb);
2857
2858 return 0;
2859 }
2860
2861 static int l2cap_ertm_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control)
2862 {
2863 struct l2cap_pinfo *pi = l2cap_pi(sk);
2864 struct sk_buff *_skb;
2865 int err;
2866
2867 switch (control & L2CAP_CTRL_SAR) {
2868 case L2CAP_SDU_UNSEGMENTED:
2869 if (pi->conn_state & L2CAP_CONN_SAR_SDU)
2870 goto drop;
2871
2872 err = sock_queue_rcv_skb(sk, skb);
2873 if (!err)
2874 return err;
2875
2876 break;
2877
2878 case L2CAP_SDU_START:
2879 if (pi->conn_state & L2CAP_CONN_SAR_SDU)
2880 goto drop;
2881
2882 pi->sdu_len = get_unaligned_le16(skb->data);
2883
2884 if (pi->sdu_len > pi->imtu)
2885 goto disconnect;
2886
2887 pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC);
2888 if (!pi->sdu)
2889 return -ENOMEM;
2890
2891 /* pull sdu_len bytes only after alloc, because of Local Busy
2892 * condition we have to be sure that this will be executed
2893 * only once, i.e., when alloc does not fail */
2894 skb_pull(skb, 2);
2895
2896 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
2897
2898 pi->conn_state |= L2CAP_CONN_SAR_SDU;
2899 pi->partial_sdu_len = skb->len;
2900 break;
2901
2902 case L2CAP_SDU_CONTINUE:
2903 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
2904 goto disconnect;
2905
2906 if (!pi->sdu)
2907 goto disconnect;
2908
2909 pi->partial_sdu_len += skb->len;
2910 if (pi->partial_sdu_len > pi->sdu_len)
2911 goto drop;
2912
2913 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
2914
2915 break;
2916
2917 case L2CAP_SDU_END:
2918 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
2919 goto disconnect;
2920
2921 if (!pi->sdu)
2922 goto disconnect;
2923
2924 if (!(pi->conn_state & L2CAP_CONN_SAR_RETRY)) {
2925 pi->partial_sdu_len += skb->len;
2926
2927 if (pi->partial_sdu_len > pi->imtu)
2928 goto drop;
2929
2930 if (pi->partial_sdu_len != pi->sdu_len)
2931 goto drop;
2932
2933 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
2934 }
2935
2936 _skb = skb_clone(pi->sdu, GFP_ATOMIC);
2937 if (!_skb) {
2938 pi->conn_state |= L2CAP_CONN_SAR_RETRY;
2939 return -ENOMEM;
2940 }
2941
2942 err = sock_queue_rcv_skb(sk, _skb);
2943 if (err < 0) {
2944 kfree_skb(_skb);
2945 pi->conn_state |= L2CAP_CONN_SAR_RETRY;
2946 return err;
2947 }
2948
2949 pi->conn_state &= ~L2CAP_CONN_SAR_RETRY;
2950 pi->conn_state &= ~L2CAP_CONN_SAR_SDU;
2951
2952 kfree_skb(pi->sdu);
2953 break;
2954 }
2955
2956 kfree_skb(skb);
2957 return 0;
2958
2959 drop:
2960 kfree_skb(pi->sdu);
2961 pi->sdu = NULL;
2962
2963 disconnect:
2964 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
2965 kfree_skb(skb);
2966 return 0;
2967 }
2968
2969 static int l2cap_try_push_rx_skb(struct sock *sk)
2970 {
2971 struct l2cap_pinfo *pi = l2cap_pi(sk);
2972 struct sk_buff *skb;
2973 u16 control;
2974 int err;
2975
2976 while ((skb = skb_dequeue(BUSY_QUEUE(sk)))) {
2977 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
2978 err = l2cap_ertm_reassembly_sdu(sk, skb, control);
2979 if (err < 0) {
2980 skb_queue_head(BUSY_QUEUE(sk), skb);
2981 return -EBUSY;
2982 }
2983
2984 pi->buffer_seq = (pi->buffer_seq + 1) % 64;
2985 }
2986
2987 if (!(pi->conn_state & L2CAP_CONN_RNR_SENT))
2988 goto done;
2989
2990 control = pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
2991 control |= L2CAP_SUPER_RCV_READY | L2CAP_CTRL_POLL;
2992 l2cap_send_sframe(pi, control);
2993 l2cap_pi(sk)->retry_count = 1;
2994
2995 del_timer(&pi->retrans_timer);
2996 __mod_monitor_timer();
2997
2998 l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F;
2999
3000 done:
3001 pi->conn_state &= ~L2CAP_CONN_LOCAL_BUSY;
3002 pi->conn_state &= ~L2CAP_CONN_RNR_SENT;
3003
3004 BT_DBG("sk %p, Exit local busy", sk);
3005
3006 return 0;
3007 }
3008
3009 static void l2cap_busy_work(struct work_struct *work)
3010 {
3011 DECLARE_WAITQUEUE(wait, current);
3012 struct l2cap_pinfo *pi =
3013 container_of(work, struct l2cap_pinfo, busy_work);
3014 struct sock *sk = (struct sock *)pi;
3015 int n_tries = 0, timeo = HZ/5, err;
3016 struct sk_buff *skb;
3017
3018 lock_sock(sk);
3019
3020 add_wait_queue(sk_sleep(sk), &wait);
3021 while ((skb = skb_peek(BUSY_QUEUE(sk)))) {
3022 set_current_state(TASK_INTERRUPTIBLE);
3023
3024 if (n_tries++ > L2CAP_LOCAL_BUSY_TRIES) {
3025 err = -EBUSY;
3026 l2cap_send_disconn_req(pi->conn, sk, EBUSY);
3027 break;
3028 }
3029
3030 if (!timeo)
3031 timeo = HZ/5;
3032
3033 if (signal_pending(current)) {
3034 err = sock_intr_errno(timeo);
3035 break;
3036 }
3037
3038 release_sock(sk);
3039 timeo = schedule_timeout(timeo);
3040 lock_sock(sk);
3041
3042 err = sock_error(sk);
3043 if (err)
3044 break;
3045
3046 if (l2cap_try_push_rx_skb(sk) == 0)
3047 break;
3048 }
3049
3050 set_current_state(TASK_RUNNING);
3051 remove_wait_queue(sk_sleep(sk), &wait);
3052
3053 release_sock(sk);
3054 }
3055
3056 static int l2cap_push_rx_skb(struct sock *sk, struct sk_buff *skb, u16 control)
3057 {
3058 struct l2cap_pinfo *pi = l2cap_pi(sk);
3059 int sctrl, err;
3060
3061 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
3062 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
3063 __skb_queue_tail(BUSY_QUEUE(sk), skb);
3064 return l2cap_try_push_rx_skb(sk);
3065
3066
3067 }
3068
3069 err = l2cap_ertm_reassembly_sdu(sk, skb, control);
3070 if (err >= 0) {
3071 pi->buffer_seq = (pi->buffer_seq + 1) % 64;
3072 return err;
3073 }
3074
3075 /* Busy Condition */
3076 BT_DBG("sk %p, Enter local busy", sk);
3077
3078 pi->conn_state |= L2CAP_CONN_LOCAL_BUSY;
3079 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
3080 __skb_queue_tail(BUSY_QUEUE(sk), skb);
3081
3082 sctrl = pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3083 sctrl |= L2CAP_SUPER_RCV_NOT_READY;
3084 l2cap_send_sframe(pi, sctrl);
3085
3086 pi->conn_state |= L2CAP_CONN_RNR_SENT;
3087
3088 del_timer(&pi->ack_timer);
3089
3090 queue_work(_busy_wq, &pi->busy_work);
3091
3092 return err;
3093 }
3094
3095 static int l2cap_streaming_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control)
3096 {
3097 struct l2cap_pinfo *pi = l2cap_pi(sk);
3098 struct sk_buff *_skb;
3099 int err = -EINVAL;
3100
3101 /*
3102 * TODO: We have to notify the userland if some data is lost with the
3103 * Streaming Mode.
3104 */
3105
3106 switch (control & L2CAP_CTRL_SAR) {
3107 case L2CAP_SDU_UNSEGMENTED:
3108 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3109 kfree_skb(pi->sdu);
3110 break;
3111 }
3112
3113 err = sock_queue_rcv_skb(sk, skb);
3114 if (!err)
3115 return 0;
3116
3117 break;
3118
3119 case L2CAP_SDU_START:
3120 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3121 kfree_skb(pi->sdu);
3122 break;
3123 }
3124
3125 pi->sdu_len = get_unaligned_le16(skb->data);
3126 skb_pull(skb, 2);
3127
3128 if (pi->sdu_len > pi->imtu) {
3129 err = -EMSGSIZE;
3130 break;
3131 }
3132
3133 pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC);
3134 if (!pi->sdu) {
3135 err = -ENOMEM;
3136 break;
3137 }
3138
3139 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3140
3141 pi->conn_state |= L2CAP_CONN_SAR_SDU;
3142 pi->partial_sdu_len = skb->len;
3143 err = 0;
3144 break;
3145
3146 case L2CAP_SDU_CONTINUE:
3147 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3148 break;
3149
3150 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3151
3152 pi->partial_sdu_len += skb->len;
3153 if (pi->partial_sdu_len > pi->sdu_len)
3154 kfree_skb(pi->sdu);
3155 else
3156 err = 0;
3157
3158 break;
3159
3160 case L2CAP_SDU_END:
3161 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3162 break;
3163
3164 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3165
3166 pi->conn_state &= ~L2CAP_CONN_SAR_SDU;
3167 pi->partial_sdu_len += skb->len;
3168
3169 if (pi->partial_sdu_len > pi->imtu)
3170 goto drop;
3171
3172 if (pi->partial_sdu_len == pi->sdu_len) {
3173 _skb = skb_clone(pi->sdu, GFP_ATOMIC);
3174 err = sock_queue_rcv_skb(sk, _skb);
3175 if (err < 0)
3176 kfree_skb(_skb);
3177 }
3178 err = 0;
3179
3180 drop:
3181 kfree_skb(pi->sdu);
3182 break;
3183 }
3184
3185 kfree_skb(skb);
3186 return err;
3187 }
3188
3189 static void l2cap_check_srej_gap(struct sock *sk, u8 tx_seq)
3190 {
3191 struct sk_buff *skb;
3192 u16 control;
3193
3194 while ((skb = skb_peek(SREJ_QUEUE(sk)))) {
3195 if (bt_cb(skb)->tx_seq != tx_seq)
3196 break;
3197
3198 skb = skb_dequeue(SREJ_QUEUE(sk));
3199 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3200 l2cap_ertm_reassembly_sdu(sk, skb, control);
3201 l2cap_pi(sk)->buffer_seq_srej =
3202 (l2cap_pi(sk)->buffer_seq_srej + 1) % 64;
3203 tx_seq = (tx_seq + 1) % 64;
3204 }
3205 }
3206
3207 static void l2cap_resend_srejframe(struct sock *sk, u8 tx_seq)
3208 {
3209 struct l2cap_pinfo *pi = l2cap_pi(sk);
3210 struct srej_list *l, *tmp;
3211 u16 control;
3212
3213 list_for_each_entry_safe(l, tmp, SREJ_LIST(sk), list) {
3214 if (l->tx_seq == tx_seq) {
3215 list_del(&l->list);
3216 kfree(l);
3217 return;
3218 }
3219 control = L2CAP_SUPER_SELECT_REJECT;
3220 control |= l->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3221 l2cap_send_sframe(pi, control);
3222 list_del(&l->list);
3223 list_add_tail(&l->list, SREJ_LIST(sk));
3224 }
3225 }
3226
3227 static void l2cap_send_srejframe(struct sock *sk, u8 tx_seq)
3228 {
3229 struct l2cap_pinfo *pi = l2cap_pi(sk);
3230 struct srej_list *new;
3231 u16 control;
3232
3233 while (tx_seq != pi->expected_tx_seq) {
3234 control = L2CAP_SUPER_SELECT_REJECT;
3235 control |= pi->expected_tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3236 l2cap_send_sframe(pi, control);
3237
3238 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3239 new->tx_seq = pi->expected_tx_seq;
3240 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3241 list_add_tail(&new->list, SREJ_LIST(sk));
3242 }
3243 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3244 }
3245
3246 static inline int l2cap_data_channel_iframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
3247 {
3248 struct l2cap_pinfo *pi = l2cap_pi(sk);
3249 u8 tx_seq = __get_txseq(rx_control);
3250 u8 req_seq = __get_reqseq(rx_control);
3251 u8 sar = rx_control >> L2CAP_CTRL_SAR_SHIFT;
3252 int tx_seq_offset, expected_tx_seq_offset;
3253 int num_to_ack = (pi->tx_win/6) + 1;
3254 int err = 0;
3255
3256 BT_DBG("sk %p len %d tx_seq %d rx_control 0x%4.4x", sk, skb->len, tx_seq,
3257 rx_control);
3258
3259 if (L2CAP_CTRL_FINAL & rx_control &&
3260 l2cap_pi(sk)->conn_state & L2CAP_CONN_WAIT_F) {
3261 del_timer(&pi->monitor_timer);
3262 if (pi->unacked_frames > 0)
3263 __mod_retrans_timer();
3264 pi->conn_state &= ~L2CAP_CONN_WAIT_F;
3265 }
3266
3267 pi->expected_ack_seq = req_seq;
3268 l2cap_drop_acked_frames(sk);
3269
3270 if (tx_seq == pi->expected_tx_seq)
3271 goto expected;
3272
3273 tx_seq_offset = (tx_seq - pi->buffer_seq) % 64;
3274 if (tx_seq_offset < 0)
3275 tx_seq_offset += 64;
3276
3277 /* invalid tx_seq */
3278 if (tx_seq_offset >= pi->tx_win) {
3279 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
3280 goto drop;
3281 }
3282
3283 if (pi->conn_state == L2CAP_CONN_LOCAL_BUSY)
3284 goto drop;
3285
3286 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3287 struct srej_list *first;
3288
3289 first = list_first_entry(SREJ_LIST(sk),
3290 struct srej_list, list);
3291 if (tx_seq == first->tx_seq) {
3292 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3293 l2cap_check_srej_gap(sk, tx_seq);
3294
3295 list_del(&first->list);
3296 kfree(first);
3297
3298 if (list_empty(SREJ_LIST(sk))) {
3299 pi->buffer_seq = pi->buffer_seq_srej;
3300 pi->conn_state &= ~L2CAP_CONN_SREJ_SENT;
3301 l2cap_send_ack(pi);
3302 BT_DBG("sk %p, Exit SREJ_SENT", sk);
3303 }
3304 } else {
3305 struct srej_list *l;
3306
3307 /* duplicated tx_seq */
3308 if (l2cap_add_to_srej_queue(sk, skb, tx_seq, sar) < 0)
3309 goto drop;
3310
3311 list_for_each_entry(l, SREJ_LIST(sk), list) {
3312 if (l->tx_seq == tx_seq) {
3313 l2cap_resend_srejframe(sk, tx_seq);
3314 return 0;
3315 }
3316 }
3317 l2cap_send_srejframe(sk, tx_seq);
3318 }
3319 } else {
3320 expected_tx_seq_offset =
3321 (pi->expected_tx_seq - pi->buffer_seq) % 64;
3322 if (expected_tx_seq_offset < 0)
3323 expected_tx_seq_offset += 64;
3324
3325 /* duplicated tx_seq */
3326 if (tx_seq_offset < expected_tx_seq_offset)
3327 goto drop;
3328
3329 pi->conn_state |= L2CAP_CONN_SREJ_SENT;
3330
3331 BT_DBG("sk %p, Enter SREJ", sk);
3332
3333 INIT_LIST_HEAD(SREJ_LIST(sk));
3334 pi->buffer_seq_srej = pi->buffer_seq;
3335
3336 __skb_queue_head_init(SREJ_QUEUE(sk));
3337 __skb_queue_head_init(BUSY_QUEUE(sk));
3338 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3339
3340 pi->conn_state |= L2CAP_CONN_SEND_PBIT;
3341
3342 l2cap_send_srejframe(sk, tx_seq);
3343
3344 del_timer(&pi->ack_timer);
3345 }
3346 return 0;
3347
3348 expected:
3349 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3350
3351 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3352 bt_cb(skb)->tx_seq = tx_seq;
3353 bt_cb(skb)->sar = sar;
3354 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3355 return 0;
3356 }
3357
3358 err = l2cap_push_rx_skb(sk, skb, rx_control);
3359 if (err < 0)
3360 return 0;
3361
3362 if (rx_control & L2CAP_CTRL_FINAL) {
3363 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
3364 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
3365 else
3366 l2cap_retransmit_frames(sk);
3367 }
3368
3369 __mod_ack_timer();
3370
3371 pi->num_acked = (pi->num_acked + 1) % num_to_ack;
3372 if (pi->num_acked == num_to_ack - 1)
3373 l2cap_send_ack(pi);
3374
3375 return 0;
3376
3377 drop:
3378 kfree_skb(skb);
3379 return 0;
3380 }
3381
3382 static inline void l2cap_data_channel_rrframe(struct sock *sk, u16 rx_control)
3383 {
3384 struct l2cap_pinfo *pi = l2cap_pi(sk);
3385
3386 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, __get_reqseq(rx_control),
3387 rx_control);
3388
3389 pi->expected_ack_seq = __get_reqseq(rx_control);
3390 l2cap_drop_acked_frames(sk);
3391
3392 if (rx_control & L2CAP_CTRL_POLL) {
3393 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
3394 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3395 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
3396 (pi->unacked_frames > 0))
3397 __mod_retrans_timer();
3398
3399 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3400 l2cap_send_srejtail(sk);
3401 } else {
3402 l2cap_send_i_or_rr_or_rnr(sk);
3403 }
3404
3405 } else if (rx_control & L2CAP_CTRL_FINAL) {
3406 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3407
3408 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
3409 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
3410 else
3411 l2cap_retransmit_frames(sk);
3412
3413 } else {
3414 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
3415 (pi->unacked_frames > 0))
3416 __mod_retrans_timer();
3417
3418 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3419 if (pi->conn_state & L2CAP_CONN_SREJ_SENT)
3420 l2cap_send_ack(pi);
3421 else
3422 l2cap_ertm_send(sk);
3423 }
3424 }
3425
3426 static inline void l2cap_data_channel_rejframe(struct sock *sk, u16 rx_control)
3427 {
3428 struct l2cap_pinfo *pi = l2cap_pi(sk);
3429 u8 tx_seq = __get_reqseq(rx_control);
3430
3431 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, tx_seq, rx_control);
3432
3433 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3434
3435 pi->expected_ack_seq = tx_seq;
3436 l2cap_drop_acked_frames(sk);
3437
3438 if (rx_control & L2CAP_CTRL_FINAL) {
3439 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
3440 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
3441 else
3442 l2cap_retransmit_frames(sk);
3443 } else {
3444 l2cap_retransmit_frames(sk);
3445
3446 if (pi->conn_state & L2CAP_CONN_WAIT_F)
3447 pi->conn_state |= L2CAP_CONN_REJ_ACT;
3448 }
3449 }
3450 static inline void l2cap_data_channel_srejframe(struct sock *sk, u16 rx_control)
3451 {
3452 struct l2cap_pinfo *pi = l2cap_pi(sk);
3453 u8 tx_seq = __get_reqseq(rx_control);
3454
3455 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, tx_seq, rx_control);
3456
3457 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3458
3459 if (rx_control & L2CAP_CTRL_POLL) {
3460 pi->expected_ack_seq = tx_seq;
3461 l2cap_drop_acked_frames(sk);
3462
3463 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
3464 l2cap_retransmit_one_frame(sk, tx_seq);
3465
3466 l2cap_ertm_send(sk);
3467
3468 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
3469 pi->srej_save_reqseq = tx_seq;
3470 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
3471 }
3472 } else if (rx_control & L2CAP_CTRL_FINAL) {
3473 if ((pi->conn_state & L2CAP_CONN_SREJ_ACT) &&
3474 pi->srej_save_reqseq == tx_seq)
3475 pi->conn_state &= ~L2CAP_CONN_SREJ_ACT;
3476 else
3477 l2cap_retransmit_one_frame(sk, tx_seq);
3478 } else {
3479 l2cap_retransmit_one_frame(sk, tx_seq);
3480 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
3481 pi->srej_save_reqseq = tx_seq;
3482 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
3483 }
3484 }
3485 }
3486
3487 static inline void l2cap_data_channel_rnrframe(struct sock *sk, u16 rx_control)
3488 {
3489 struct l2cap_pinfo *pi = l2cap_pi(sk);
3490 u8 tx_seq = __get_reqseq(rx_control);
3491
3492 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, tx_seq, rx_control);
3493
3494 pi->conn_state |= L2CAP_CONN_REMOTE_BUSY;
3495 pi->expected_ack_seq = tx_seq;
3496 l2cap_drop_acked_frames(sk);
3497
3498 if (rx_control & L2CAP_CTRL_POLL)
3499 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
3500
3501 if (!(pi->conn_state & L2CAP_CONN_SREJ_SENT)) {
3502 del_timer(&pi->retrans_timer);
3503 if (rx_control & L2CAP_CTRL_POLL)
3504 l2cap_send_rr_or_rnr(pi, L2CAP_CTRL_FINAL);
3505 return;
3506 }
3507
3508 if (rx_control & L2CAP_CTRL_POLL)
3509 l2cap_send_srejtail(sk);
3510 else
3511 l2cap_send_sframe(pi, L2CAP_SUPER_RCV_READY);
3512 }
3513
3514 static inline int l2cap_data_channel_sframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
3515 {
3516 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len);
3517
3518 if (L2CAP_CTRL_FINAL & rx_control &&
3519 l2cap_pi(sk)->conn_state & L2CAP_CONN_WAIT_F) {
3520 del_timer(&l2cap_pi(sk)->monitor_timer);
3521 if (l2cap_pi(sk)->unacked_frames > 0)
3522 __mod_retrans_timer();
3523 l2cap_pi(sk)->conn_state &= ~L2CAP_CONN_WAIT_F;
3524 }
3525
3526 switch (rx_control & L2CAP_CTRL_SUPERVISE) {
3527 case L2CAP_SUPER_RCV_READY:
3528 l2cap_data_channel_rrframe(sk, rx_control);
3529 break;
3530
3531 case L2CAP_SUPER_REJECT:
3532 l2cap_data_channel_rejframe(sk, rx_control);
3533 break;
3534
3535 case L2CAP_SUPER_SELECT_REJECT:
3536 l2cap_data_channel_srejframe(sk, rx_control);
3537 break;
3538
3539 case L2CAP_SUPER_RCV_NOT_READY:
3540 l2cap_data_channel_rnrframe(sk, rx_control);
3541 break;
3542 }
3543
3544 kfree_skb(skb);
3545 return 0;
3546 }
3547
3548 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb)
3549 {
3550 struct l2cap_pinfo *pi = l2cap_pi(sk);
3551 u16 control;
3552 u8 req_seq;
3553 int len, next_tx_seq_offset, req_seq_offset;
3554
3555 control = get_unaligned_le16(skb->data);
3556 skb_pull(skb, 2);
3557 len = skb->len;
3558
3559 /*
3560 * We can just drop the corrupted I-frame here.
3561 * Receiver will miss it and start proper recovery
3562 * procedures and ask retransmission.
3563 */
3564 if (l2cap_check_fcs(pi, skb))
3565 goto drop;
3566
3567 if (__is_sar_start(control) && __is_iframe(control))
3568 len -= 2;
3569
3570 if (pi->fcs == L2CAP_FCS_CRC16)
3571 len -= 2;
3572
3573 if (len > pi->mps) {
3574 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
3575 goto drop;
3576 }
3577
3578 req_seq = __get_reqseq(control);
3579 req_seq_offset = (req_seq - pi->expected_ack_seq) % 64;
3580 if (req_seq_offset < 0)
3581 req_seq_offset += 64;
3582
3583 next_tx_seq_offset =
3584 (pi->next_tx_seq - pi->expected_ack_seq) % 64;
3585 if (next_tx_seq_offset < 0)
3586 next_tx_seq_offset += 64;
3587
3588 /* check for invalid req-seq */
3589 if (req_seq_offset > next_tx_seq_offset) {
3590 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
3591 goto drop;
3592 }
3593
3594 if (__is_iframe(control)) {
3595 if (len < 0) {
3596 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
3597 goto drop;
3598 }
3599
3600 l2cap_data_channel_iframe(sk, control, skb);
3601 } else {
3602 if (len != 0) {
3603 BT_ERR("%d", len);
3604 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
3605 goto drop;
3606 }
3607
3608 l2cap_data_channel_sframe(sk, control, skb);
3609 }
3610
3611 return 0;
3612
3613 drop:
3614 kfree_skb(skb);
3615 return 0;
3616 }
3617
3618 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
3619 {
3620 struct sock *sk;
3621 struct l2cap_pinfo *pi;
3622 u16 control;
3623 u8 tx_seq;
3624 int len;
3625
3626 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
3627 if (!sk) {
3628 BT_DBG("unknown cid 0x%4.4x", cid);
3629 goto drop;
3630 }
3631
3632 pi = l2cap_pi(sk);
3633
3634 BT_DBG("sk %p, len %d", sk, skb->len);
3635
3636 if (sk->sk_state != BT_CONNECTED)
3637 goto drop;
3638
3639 switch (pi->mode) {
3640 case L2CAP_MODE_BASIC:
3641 /* If socket recv buffers overflows we drop data here
3642 * which is *bad* because L2CAP has to be reliable.
3643 * But we don't have any other choice. L2CAP doesn't
3644 * provide flow control mechanism. */
3645
3646 if (pi->imtu < skb->len)
3647 goto drop;
3648
3649 if (!sock_queue_rcv_skb(sk, skb))
3650 goto done;
3651 break;
3652
3653 case L2CAP_MODE_ERTM:
3654 if (!sock_owned_by_user(sk)) {
3655 l2cap_ertm_data_rcv(sk, skb);
3656 } else {
3657 if (sk_add_backlog(sk, skb))
3658 goto drop;
3659 }
3660
3661 goto done;
3662
3663 case L2CAP_MODE_STREAMING:
3664 control = get_unaligned_le16(skb->data);
3665 skb_pull(skb, 2);
3666 len = skb->len;
3667
3668 if (l2cap_check_fcs(pi, skb))
3669 goto drop;
3670
3671 if (__is_sar_start(control))
3672 len -= 2;
3673
3674 if (pi->fcs == L2CAP_FCS_CRC16)
3675 len -= 2;
3676
3677 if (len > pi->mps || len < 0 || __is_sframe(control))
3678 goto drop;
3679
3680 tx_seq = __get_txseq(control);
3681
3682 if (pi->expected_tx_seq == tx_seq)
3683 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3684 else
3685 pi->expected_tx_seq = (tx_seq + 1) % 64;
3686
3687 l2cap_streaming_reassembly_sdu(sk, skb, control);
3688
3689 goto done;
3690
3691 default:
3692 BT_DBG("sk %p: bad mode 0x%2.2x", sk, pi->mode);
3693 break;
3694 }
3695
3696 drop:
3697 kfree_skb(skb);
3698
3699 done:
3700 if (sk)
3701 bh_unlock_sock(sk);
3702
3703 return 0;
3704 }
3705
3706 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
3707 {
3708 struct sock *sk;
3709
3710 sk = l2cap_get_sock_by_psm(0, psm, conn->src);
3711 if (!sk)
3712 goto drop;
3713
3714 bh_lock_sock(sk);
3715
3716 BT_DBG("sk %p, len %d", sk, skb->len);
3717
3718 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
3719 goto drop;
3720
3721 if (l2cap_pi(sk)->imtu < skb->len)
3722 goto drop;
3723
3724 if (!sock_queue_rcv_skb(sk, skb))
3725 goto done;
3726
3727 drop:
3728 kfree_skb(skb);
3729
3730 done:
3731 if (sk)
3732 bh_unlock_sock(sk);
3733 return 0;
3734 }
3735
3736 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
3737 {
3738 struct l2cap_hdr *lh = (void *) skb->data;
3739 u16 cid, len;
3740 __le16 psm;
3741
3742 skb_pull(skb, L2CAP_HDR_SIZE);
3743 cid = __le16_to_cpu(lh->cid);
3744 len = __le16_to_cpu(lh->len);
3745
3746 if (len != skb->len) {
3747 kfree_skb(skb);
3748 return;
3749 }
3750
3751 BT_DBG("len %d, cid 0x%4.4x", len, cid);
3752
3753 switch (cid) {
3754 case L2CAP_CID_SIGNALING:
3755 l2cap_sig_channel(conn, skb);
3756 break;
3757
3758 case L2CAP_CID_CONN_LESS:
3759 psm = get_unaligned_le16(skb->data);
3760 skb_pull(skb, 2);
3761 l2cap_conless_channel(conn, psm, skb);
3762 break;
3763
3764 default:
3765 l2cap_data_channel(conn, cid, skb);
3766 break;
3767 }
3768 }
3769
3770 /* ---- L2CAP interface with lower layer (HCI) ---- */
3771
3772 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3773 {
3774 int exact = 0, lm1 = 0, lm2 = 0;
3775 register struct sock *sk;
3776 struct hlist_node *node;
3777
3778 if (type != ACL_LINK)
3779 return -EINVAL;
3780
3781 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
3782
3783 /* Find listening sockets and check their link_mode */
3784 read_lock(&l2cap_sk_list.lock);
3785 sk_for_each(sk, node, &l2cap_sk_list.head) {
3786 if (sk->sk_state != BT_LISTEN)
3787 continue;
3788
3789 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
3790 lm1 |= HCI_LM_ACCEPT;
3791 if (l2cap_pi(sk)->role_switch)
3792 lm1 |= HCI_LM_MASTER;
3793 exact++;
3794 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
3795 lm2 |= HCI_LM_ACCEPT;
3796 if (l2cap_pi(sk)->role_switch)
3797 lm2 |= HCI_LM_MASTER;
3798 }
3799 }
3800 read_unlock(&l2cap_sk_list.lock);
3801
3802 return exact ? lm1 : lm2;
3803 }
3804
3805 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
3806 {
3807 struct l2cap_conn *conn;
3808
3809 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
3810
3811 if (hcon->type != ACL_LINK)
3812 return -EINVAL;
3813
3814 if (!status) {
3815 conn = l2cap_conn_add(hcon, status);
3816 if (conn)
3817 l2cap_conn_ready(conn);
3818 } else
3819 l2cap_conn_del(hcon, bt_err(status));
3820
3821 return 0;
3822 }
3823
3824 static int l2cap_disconn_ind(struct hci_conn *hcon)
3825 {
3826 struct l2cap_conn *conn = hcon->l2cap_data;
3827
3828 BT_DBG("hcon %p", hcon);
3829
3830 if (hcon->type != ACL_LINK || !conn)
3831 return 0x13;
3832
3833 return conn->disc_reason;
3834 }
3835
3836 static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
3837 {
3838 BT_DBG("hcon %p reason %d", hcon, reason);
3839
3840 if (hcon->type != ACL_LINK)
3841 return -EINVAL;
3842
3843 l2cap_conn_del(hcon, bt_err(reason));
3844
3845 return 0;
3846 }
3847
3848 static inline void l2cap_check_encryption(struct sock *sk, u8 encrypt)
3849 {
3850 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM)
3851 return;
3852
3853 if (encrypt == 0x00) {
3854 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM) {
3855 l2cap_sock_clear_timer(sk);
3856 l2cap_sock_set_timer(sk, HZ * 5);
3857 } else if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
3858 __l2cap_sock_close(sk, ECONNREFUSED);
3859 } else {
3860 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM)
3861 l2cap_sock_clear_timer(sk);
3862 }
3863 }
3864
3865 static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
3866 {
3867 struct l2cap_chan_list *l;
3868 struct l2cap_conn *conn = hcon->l2cap_data;
3869 struct sock *sk;
3870
3871 if (!conn)
3872 return 0;
3873
3874 l = &conn->chan_list;
3875
3876 BT_DBG("conn %p", conn);
3877
3878 read_lock(&l->lock);
3879
3880 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
3881 bh_lock_sock(sk);
3882
3883 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND) {
3884 bh_unlock_sock(sk);
3885 continue;
3886 }
3887
3888 if (!status && (sk->sk_state == BT_CONNECTED ||
3889 sk->sk_state == BT_CONFIG)) {
3890 l2cap_check_encryption(sk, encrypt);
3891 bh_unlock_sock(sk);
3892 continue;
3893 }
3894
3895 if (sk->sk_state == BT_CONNECT) {
3896 if (!status) {
3897 struct l2cap_conn_req req;
3898 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
3899 req.psm = l2cap_pi(sk)->psm;
3900
3901 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
3902 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
3903
3904 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
3905 L2CAP_CONN_REQ, sizeof(req), &req);
3906 } else {
3907 l2cap_sock_clear_timer(sk);
3908 l2cap_sock_set_timer(sk, HZ / 10);
3909 }
3910 } else if (sk->sk_state == BT_CONNECT2) {
3911 struct l2cap_conn_rsp rsp;
3912 __u16 result;
3913
3914 if (!status) {
3915 sk->sk_state = BT_CONFIG;
3916 result = L2CAP_CR_SUCCESS;
3917 } else {
3918 sk->sk_state = BT_DISCONN;
3919 l2cap_sock_set_timer(sk, HZ / 10);
3920 result = L2CAP_CR_SEC_BLOCK;
3921 }
3922
3923 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
3924 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
3925 rsp.result = cpu_to_le16(result);
3926 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
3927 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
3928 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
3929 }
3930
3931 bh_unlock_sock(sk);
3932 }
3933
3934 read_unlock(&l->lock);
3935
3936 return 0;
3937 }
3938
3939 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
3940 {
3941 struct l2cap_conn *conn = hcon->l2cap_data;
3942
3943 if (!conn)
3944 conn = l2cap_conn_add(hcon, 0);
3945
3946 if (!conn)
3947 goto drop;
3948
3949 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
3950
3951 if (!(flags & ACL_CONT)) {
3952 struct l2cap_hdr *hdr;
3953 struct sock *sk;
3954 u16 cid;
3955 int len;
3956
3957 if (conn->rx_len) {
3958 BT_ERR("Unexpected start frame (len %d)", skb->len);
3959 kfree_skb(conn->rx_skb);
3960 conn->rx_skb = NULL;
3961 conn->rx_len = 0;
3962 l2cap_conn_unreliable(conn, ECOMM);
3963 }
3964
3965 /* Start fragment always begin with Basic L2CAP header */
3966 if (skb->len < L2CAP_HDR_SIZE) {
3967 BT_ERR("Frame is too short (len %d)", skb->len);
3968 l2cap_conn_unreliable(conn, ECOMM);
3969 goto drop;
3970 }
3971
3972 hdr = (struct l2cap_hdr *) skb->data;
3973 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
3974 cid = __le16_to_cpu(hdr->cid);
3975
3976 if (len == skb->len) {
3977 /* Complete frame received */
3978 l2cap_recv_frame(conn, skb);
3979 return 0;
3980 }
3981
3982 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
3983
3984 if (skb->len > len) {
3985 BT_ERR("Frame is too long (len %d, expected len %d)",
3986 skb->len, len);
3987 l2cap_conn_unreliable(conn, ECOMM);
3988 goto drop;
3989 }
3990
3991 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
3992
3993 if (sk && l2cap_pi(sk)->imtu < len - L2CAP_HDR_SIZE) {
3994 BT_ERR("Frame exceeding recv MTU (len %d, MTU %d)",
3995 len, l2cap_pi(sk)->imtu);
3996 bh_unlock_sock(sk);
3997 l2cap_conn_unreliable(conn, ECOMM);
3998 goto drop;
3999 }
4000
4001 if (sk)
4002 bh_unlock_sock(sk);
4003
4004 /* Allocate skb for the complete frame (with header) */
4005 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
4006 if (!conn->rx_skb)
4007 goto drop;
4008
4009 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4010 skb->len);
4011 conn->rx_len = len - skb->len;
4012 } else {
4013 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
4014
4015 if (!conn->rx_len) {
4016 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
4017 l2cap_conn_unreliable(conn, ECOMM);
4018 goto drop;
4019 }
4020
4021 if (skb->len > conn->rx_len) {
4022 BT_ERR("Fragment is too long (len %d, expected %d)",
4023 skb->len, conn->rx_len);
4024 kfree_skb(conn->rx_skb);
4025 conn->rx_skb = NULL;
4026 conn->rx_len = 0;
4027 l2cap_conn_unreliable(conn, ECOMM);
4028 goto drop;
4029 }
4030
4031 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4032 skb->len);
4033 conn->rx_len -= skb->len;
4034
4035 if (!conn->rx_len) {
4036 /* Complete frame received */
4037 l2cap_recv_frame(conn, conn->rx_skb);
4038 conn->rx_skb = NULL;
4039 }
4040 }
4041
4042 drop:
4043 kfree_skb(skb);
4044 return 0;
4045 }
4046
4047 static int l2cap_debugfs_show(struct seq_file *f, void *p)
4048 {
4049 struct sock *sk;
4050 struct hlist_node *node;
4051
4052 read_lock_bh(&l2cap_sk_list.lock);
4053
4054 sk_for_each(sk, node, &l2cap_sk_list.head) {
4055 struct l2cap_pinfo *pi = l2cap_pi(sk);
4056
4057 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d\n",
4058 batostr(&bt_sk(sk)->src),
4059 batostr(&bt_sk(sk)->dst),
4060 sk->sk_state, __le16_to_cpu(pi->psm),
4061 pi->scid, pi->dcid,
4062 pi->imtu, pi->omtu, pi->sec_level);
4063 }
4064
4065 read_unlock_bh(&l2cap_sk_list.lock);
4066
4067 return 0;
4068 }
4069
4070 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
4071 {
4072 return single_open(file, l2cap_debugfs_show, inode->i_private);
4073 }
4074
4075 static const struct file_operations l2cap_debugfs_fops = {
4076 .open = l2cap_debugfs_open,
4077 .read = seq_read,
4078 .llseek = seq_lseek,
4079 .release = single_release,
4080 };
4081
4082 static struct dentry *l2cap_debugfs;
4083
4084 static struct hci_proto l2cap_hci_proto = {
4085 .name = "L2CAP",
4086 .id = HCI_PROTO_L2CAP,
4087 .connect_ind = l2cap_connect_ind,
4088 .connect_cfm = l2cap_connect_cfm,
4089 .disconn_ind = l2cap_disconn_ind,
4090 .disconn_cfm = l2cap_disconn_cfm,
4091 .security_cfm = l2cap_security_cfm,
4092 .recv_acldata = l2cap_recv_acldata
4093 };
4094
4095 static int __init l2cap_init(void)
4096 {
4097 int err;
4098
4099 err = l2cap_init_sockets();
4100 if (err < 0)
4101 return err;
4102
4103 _busy_wq = create_singlethread_workqueue("l2cap");
4104 if (!_busy_wq) {
4105 err = -ENOMEM;
4106 goto error;
4107 }
4108
4109 err = hci_register_proto(&l2cap_hci_proto);
4110 if (err < 0) {
4111 BT_ERR("L2CAP protocol registration failed");
4112 bt_sock_unregister(BTPROTO_L2CAP);
4113 goto error;
4114 }
4115
4116 if (bt_debugfs) {
4117 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
4118 bt_debugfs, NULL, &l2cap_debugfs_fops);
4119 if (!l2cap_debugfs)
4120 BT_ERR("Failed to create L2CAP debug file");
4121 }
4122
4123 BT_INFO("L2CAP ver %s", VERSION);
4124 BT_INFO("L2CAP socket layer initialized");
4125
4126 return 0;
4127
4128 error:
4129 destroy_workqueue(_busy_wq);
4130 l2cap_cleanup_sockets();
4131 return err;
4132 }
4133
4134 static void __exit l2cap_exit(void)
4135 {
4136 debugfs_remove(l2cap_debugfs);
4137
4138 flush_workqueue(_busy_wq);
4139 destroy_workqueue(_busy_wq);
4140
4141 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
4142 BT_ERR("L2CAP protocol unregistration failed");
4143
4144 l2cap_cleanup_sockets();
4145 }
4146
4147 void l2cap_load(void)
4148 {
4149 /* Dummy function to trigger automatic L2CAP module loading by
4150 * other modules that use L2CAP sockets but don't use any other
4151 * symbols from it. */
4152 }
4153 EXPORT_SYMBOL(l2cap_load);
4154
4155 module_init(l2cap_init);
4156 module_exit(l2cap_exit);
4157
4158 module_param(disable_ertm, bool, 0644);
4159 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
4160
4161 MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
4162 MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION);
4163 MODULE_VERSION(VERSION);
4164 MODULE_LICENSE("GPL");
4165 MODULE_ALIAS("bt-proto-0");
This page took 0.206344 seconds and 5 git commands to generate.