Bluetooth: check L2CAP length in first ACL fragment
[deliverable/linux.git] / net / bluetooth / l2cap.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6
7 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License version 2 as
11 published by the Free Software Foundation;
12
13 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
14 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
16 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
17 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
18 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
19 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
20 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21
22 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
23 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
24 SOFTWARE IS DISCLAIMED.
25 */
26
27 /* Bluetooth L2CAP core and sockets. */
28
29 #include <linux/module.h>
30
31 #include <linux/types.h>
32 #include <linux/capability.h>
33 #include <linux/errno.h>
34 #include <linux/kernel.h>
35 #include <linux/sched.h>
36 #include <linux/slab.h>
37 #include <linux/poll.h>
38 #include <linux/fcntl.h>
39 #include <linux/init.h>
40 #include <linux/interrupt.h>
41 #include <linux/socket.h>
42 #include <linux/skbuff.h>
43 #include <linux/list.h>
44 #include <linux/device.h>
45 #include <linux/debugfs.h>
46 #include <linux/seq_file.h>
47 #include <linux/uaccess.h>
48 #include <linux/crc16.h>
49 #include <net/sock.h>
50
51 #include <asm/system.h>
52 #include <asm/unaligned.h>
53
54 #include <net/bluetooth/bluetooth.h>
55 #include <net/bluetooth/hci_core.h>
56 #include <net/bluetooth/l2cap.h>
57
58 #define VERSION "2.15"
59
60 static int disable_ertm = 0;
61
62 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
63 static u8 l2cap_fixed_chan[8] = { 0x02, };
64
65 static const struct proto_ops l2cap_sock_ops;
66
67 static struct workqueue_struct *_busy_wq;
68
69 static struct bt_sock_list l2cap_sk_list = {
70 .lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock)
71 };
72
73 static void l2cap_busy_work(struct work_struct *work);
74
75 static void __l2cap_sock_close(struct sock *sk, int reason);
76 static void l2cap_sock_close(struct sock *sk);
77 static void l2cap_sock_kill(struct sock *sk);
78
79 static int l2cap_build_conf_req(struct sock *sk, void *data);
80 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
81 u8 code, u8 ident, u16 dlen, void *data);
82
83 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb);
84
85 /* ---- L2CAP timers ---- */
86 static void l2cap_sock_timeout(unsigned long arg)
87 {
88 struct sock *sk = (struct sock *) arg;
89 int reason;
90
91 BT_DBG("sock %p state %d", sk, sk->sk_state);
92
93 bh_lock_sock(sk);
94
95 if (sk->sk_state == BT_CONNECTED || sk->sk_state == BT_CONFIG)
96 reason = ECONNREFUSED;
97 else if (sk->sk_state == BT_CONNECT &&
98 l2cap_pi(sk)->sec_level != BT_SECURITY_SDP)
99 reason = ECONNREFUSED;
100 else
101 reason = ETIMEDOUT;
102
103 __l2cap_sock_close(sk, reason);
104
105 bh_unlock_sock(sk);
106
107 l2cap_sock_kill(sk);
108 sock_put(sk);
109 }
110
111 static void l2cap_sock_set_timer(struct sock *sk, long timeout)
112 {
113 BT_DBG("sk %p state %d timeout %ld", sk, sk->sk_state, timeout);
114 sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout);
115 }
116
117 static void l2cap_sock_clear_timer(struct sock *sk)
118 {
119 BT_DBG("sock %p state %d", sk, sk->sk_state);
120 sk_stop_timer(sk, &sk->sk_timer);
121 }
122
123 /* ---- L2CAP channels ---- */
124 static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid)
125 {
126 struct sock *s;
127 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
128 if (l2cap_pi(s)->dcid == cid)
129 break;
130 }
131 return s;
132 }
133
134 static struct sock *__l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
135 {
136 struct sock *s;
137 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
138 if (l2cap_pi(s)->scid == cid)
139 break;
140 }
141 return s;
142 }
143
144 /* Find channel with given SCID.
145 * Returns locked socket */
146 static inline struct sock *l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
147 {
148 struct sock *s;
149 read_lock(&l->lock);
150 s = __l2cap_get_chan_by_scid(l, cid);
151 if (s)
152 bh_lock_sock(s);
153 read_unlock(&l->lock);
154 return s;
155 }
156
157 static struct sock *__l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
158 {
159 struct sock *s;
160 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
161 if (l2cap_pi(s)->ident == ident)
162 break;
163 }
164 return s;
165 }
166
167 static inline struct sock *l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
168 {
169 struct sock *s;
170 read_lock(&l->lock);
171 s = __l2cap_get_chan_by_ident(l, ident);
172 if (s)
173 bh_lock_sock(s);
174 read_unlock(&l->lock);
175 return s;
176 }
177
178 static u16 l2cap_alloc_cid(struct l2cap_chan_list *l)
179 {
180 u16 cid = L2CAP_CID_DYN_START;
181
182 for (; cid < L2CAP_CID_DYN_END; cid++) {
183 if (!__l2cap_get_chan_by_scid(l, cid))
184 return cid;
185 }
186
187 return 0;
188 }
189
190 static inline void __l2cap_chan_link(struct l2cap_chan_list *l, struct sock *sk)
191 {
192 sock_hold(sk);
193
194 if (l->head)
195 l2cap_pi(l->head)->prev_c = sk;
196
197 l2cap_pi(sk)->next_c = l->head;
198 l2cap_pi(sk)->prev_c = NULL;
199 l->head = sk;
200 }
201
202 static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk)
203 {
204 struct sock *next = l2cap_pi(sk)->next_c, *prev = l2cap_pi(sk)->prev_c;
205
206 write_lock_bh(&l->lock);
207 if (sk == l->head)
208 l->head = next;
209
210 if (next)
211 l2cap_pi(next)->prev_c = prev;
212 if (prev)
213 l2cap_pi(prev)->next_c = next;
214 write_unlock_bh(&l->lock);
215
216 __sock_put(sk);
217 }
218
219 static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
220 {
221 struct l2cap_chan_list *l = &conn->chan_list;
222
223 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
224 l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
225
226 conn->disc_reason = 0x13;
227
228 l2cap_pi(sk)->conn = conn;
229
230 if (sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM) {
231 /* Alloc CID for connection-oriented socket */
232 l2cap_pi(sk)->scid = l2cap_alloc_cid(l);
233 } else if (sk->sk_type == SOCK_DGRAM) {
234 /* Connectionless socket */
235 l2cap_pi(sk)->scid = L2CAP_CID_CONN_LESS;
236 l2cap_pi(sk)->dcid = L2CAP_CID_CONN_LESS;
237 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
238 } else {
239 /* Raw socket can send/recv signalling messages only */
240 l2cap_pi(sk)->scid = L2CAP_CID_SIGNALING;
241 l2cap_pi(sk)->dcid = L2CAP_CID_SIGNALING;
242 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
243 }
244
245 __l2cap_chan_link(l, sk);
246
247 if (parent)
248 bt_accept_enqueue(parent, sk);
249 }
250
251 /* Delete channel.
252 * Must be called on the locked socket. */
253 static void l2cap_chan_del(struct sock *sk, int err)
254 {
255 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
256 struct sock *parent = bt_sk(sk)->parent;
257
258 l2cap_sock_clear_timer(sk);
259
260 BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
261
262 if (conn) {
263 /* Unlink from channel list */
264 l2cap_chan_unlink(&conn->chan_list, sk);
265 l2cap_pi(sk)->conn = NULL;
266 hci_conn_put(conn->hcon);
267 }
268
269 sk->sk_state = BT_CLOSED;
270 sock_set_flag(sk, SOCK_ZAPPED);
271
272 if (err)
273 sk->sk_err = err;
274
275 if (parent) {
276 bt_accept_unlink(sk);
277 parent->sk_data_ready(parent, 0);
278 } else
279 sk->sk_state_change(sk);
280
281 skb_queue_purge(TX_QUEUE(sk));
282
283 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
284 struct srej_list *l, *tmp;
285
286 del_timer(&l2cap_pi(sk)->retrans_timer);
287 del_timer(&l2cap_pi(sk)->monitor_timer);
288 del_timer(&l2cap_pi(sk)->ack_timer);
289
290 skb_queue_purge(SREJ_QUEUE(sk));
291 skb_queue_purge(BUSY_QUEUE(sk));
292
293 list_for_each_entry_safe(l, tmp, SREJ_LIST(sk), list) {
294 list_del(&l->list);
295 kfree(l);
296 }
297 }
298 }
299
300 /* Service level security */
301 static inline int l2cap_check_security(struct sock *sk)
302 {
303 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
304 __u8 auth_type;
305
306 if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
307 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
308 auth_type = HCI_AT_NO_BONDING_MITM;
309 else
310 auth_type = HCI_AT_NO_BONDING;
311
312 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
313 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
314 } else {
315 switch (l2cap_pi(sk)->sec_level) {
316 case BT_SECURITY_HIGH:
317 auth_type = HCI_AT_GENERAL_BONDING_MITM;
318 break;
319 case BT_SECURITY_MEDIUM:
320 auth_type = HCI_AT_GENERAL_BONDING;
321 break;
322 default:
323 auth_type = HCI_AT_NO_BONDING;
324 break;
325 }
326 }
327
328 return hci_conn_security(conn->hcon, l2cap_pi(sk)->sec_level,
329 auth_type);
330 }
331
332 static inline u8 l2cap_get_ident(struct l2cap_conn *conn)
333 {
334 u8 id;
335
336 /* Get next available identificator.
337 * 1 - 128 are used by kernel.
338 * 129 - 199 are reserved.
339 * 200 - 254 are used by utilities like l2ping, etc.
340 */
341
342 spin_lock_bh(&conn->lock);
343
344 if (++conn->tx_ident > 128)
345 conn->tx_ident = 1;
346
347 id = conn->tx_ident;
348
349 spin_unlock_bh(&conn->lock);
350
351 return id;
352 }
353
354 static inline void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
355 {
356 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
357
358 BT_DBG("code 0x%2.2x", code);
359
360 if (!skb)
361 return;
362
363 hci_send_acl(conn->hcon, skb, 0);
364 }
365
366 static inline void l2cap_send_sframe(struct l2cap_pinfo *pi, u16 control)
367 {
368 struct sk_buff *skb;
369 struct l2cap_hdr *lh;
370 struct l2cap_conn *conn = pi->conn;
371 struct sock *sk = (struct sock *)pi;
372 int count, hlen = L2CAP_HDR_SIZE + 2;
373
374 if (sk->sk_state != BT_CONNECTED)
375 return;
376
377 if (pi->fcs == L2CAP_FCS_CRC16)
378 hlen += 2;
379
380 BT_DBG("pi %p, control 0x%2.2x", pi, control);
381
382 count = min_t(unsigned int, conn->mtu, hlen);
383 control |= L2CAP_CTRL_FRAME_TYPE;
384
385 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
386 control |= L2CAP_CTRL_FINAL;
387 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
388 }
389
390 if (pi->conn_state & L2CAP_CONN_SEND_PBIT) {
391 control |= L2CAP_CTRL_POLL;
392 pi->conn_state &= ~L2CAP_CONN_SEND_PBIT;
393 }
394
395 skb = bt_skb_alloc(count, GFP_ATOMIC);
396 if (!skb)
397 return;
398
399 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
400 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
401 lh->cid = cpu_to_le16(pi->dcid);
402 put_unaligned_le16(control, skb_put(skb, 2));
403
404 if (pi->fcs == L2CAP_FCS_CRC16) {
405 u16 fcs = crc16(0, (u8 *)lh, count - 2);
406 put_unaligned_le16(fcs, skb_put(skb, 2));
407 }
408
409 hci_send_acl(pi->conn->hcon, skb, 0);
410 }
411
412 static inline void l2cap_send_rr_or_rnr(struct l2cap_pinfo *pi, u16 control)
413 {
414 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
415 control |= L2CAP_SUPER_RCV_NOT_READY;
416 pi->conn_state |= L2CAP_CONN_RNR_SENT;
417 } else
418 control |= L2CAP_SUPER_RCV_READY;
419
420 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
421
422 l2cap_send_sframe(pi, control);
423 }
424
425 static inline int __l2cap_no_conn_pending(struct sock *sk)
426 {
427 return !(l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND);
428 }
429
430 static void l2cap_do_start(struct sock *sk)
431 {
432 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
433
434 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
435 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
436 return;
437
438 if (l2cap_check_security(sk) && __l2cap_no_conn_pending(sk)) {
439 struct l2cap_conn_req req;
440 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
441 req.psm = l2cap_pi(sk)->psm;
442
443 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
444 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
445
446 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
447 L2CAP_CONN_REQ, sizeof(req), &req);
448 }
449 } else {
450 struct l2cap_info_req req;
451 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
452
453 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
454 conn->info_ident = l2cap_get_ident(conn);
455
456 mod_timer(&conn->info_timer, jiffies +
457 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
458
459 l2cap_send_cmd(conn, conn->info_ident,
460 L2CAP_INFO_REQ, sizeof(req), &req);
461 }
462 }
463
464 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
465 {
466 u32 local_feat_mask = l2cap_feat_mask;
467 if (!disable_ertm)
468 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
469
470 switch (mode) {
471 case L2CAP_MODE_ERTM:
472 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
473 case L2CAP_MODE_STREAMING:
474 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
475 default:
476 return 0x00;
477 }
478 }
479
480 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct sock *sk, int err)
481 {
482 struct l2cap_disconn_req req;
483
484 if (!conn)
485 return;
486
487 skb_queue_purge(TX_QUEUE(sk));
488
489 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
490 del_timer(&l2cap_pi(sk)->retrans_timer);
491 del_timer(&l2cap_pi(sk)->monitor_timer);
492 del_timer(&l2cap_pi(sk)->ack_timer);
493 }
494
495 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
496 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
497 l2cap_send_cmd(conn, l2cap_get_ident(conn),
498 L2CAP_DISCONN_REQ, sizeof(req), &req);
499
500 sk->sk_state = BT_DISCONN;
501 sk->sk_err = err;
502 }
503
504 /* ---- L2CAP connections ---- */
505 static void l2cap_conn_start(struct l2cap_conn *conn)
506 {
507 struct l2cap_chan_list *l = &conn->chan_list;
508 struct sock_del_list del, *tmp1, *tmp2;
509 struct sock *sk;
510
511 BT_DBG("conn %p", conn);
512
513 INIT_LIST_HEAD(&del.list);
514
515 read_lock(&l->lock);
516
517 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
518 bh_lock_sock(sk);
519
520 if (sk->sk_type != SOCK_SEQPACKET &&
521 sk->sk_type != SOCK_STREAM) {
522 bh_unlock_sock(sk);
523 continue;
524 }
525
526 if (sk->sk_state == BT_CONNECT) {
527 struct l2cap_conn_req req;
528
529 if (!l2cap_check_security(sk) ||
530 !__l2cap_no_conn_pending(sk)) {
531 bh_unlock_sock(sk);
532 continue;
533 }
534
535 if (!l2cap_mode_supported(l2cap_pi(sk)->mode,
536 conn->feat_mask)
537 && l2cap_pi(sk)->conf_state &
538 L2CAP_CONF_STATE2_DEVICE) {
539 tmp1 = kzalloc(sizeof(struct sock_del_list),
540 GFP_ATOMIC);
541 tmp1->sk = sk;
542 list_add_tail(&tmp1->list, &del.list);
543 bh_unlock_sock(sk);
544 continue;
545 }
546
547 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
548 req.psm = l2cap_pi(sk)->psm;
549
550 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
551 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
552
553 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
554 L2CAP_CONN_REQ, sizeof(req), &req);
555
556 } else if (sk->sk_state == BT_CONNECT2) {
557 struct l2cap_conn_rsp rsp;
558 char buf[128];
559 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
560 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
561
562 if (l2cap_check_security(sk)) {
563 if (bt_sk(sk)->defer_setup) {
564 struct sock *parent = bt_sk(sk)->parent;
565 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
566 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
567 parent->sk_data_ready(parent, 0);
568
569 } else {
570 sk->sk_state = BT_CONFIG;
571 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
572 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
573 }
574 } else {
575 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
576 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
577 }
578
579 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
580 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
581
582 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT ||
583 rsp.result != L2CAP_CR_SUCCESS) {
584 bh_unlock_sock(sk);
585 continue;
586 }
587
588 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
589 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
590 l2cap_build_conf_req(sk, buf), buf);
591 l2cap_pi(sk)->num_conf_req++;
592 }
593
594 bh_unlock_sock(sk);
595 }
596
597 read_unlock(&l->lock);
598
599 list_for_each_entry_safe(tmp1, tmp2, &del.list, list) {
600 bh_lock_sock(tmp1->sk);
601 __l2cap_sock_close(tmp1->sk, ECONNRESET);
602 bh_unlock_sock(tmp1->sk);
603 list_del(&tmp1->list);
604 kfree(tmp1);
605 }
606 }
607
608 static void l2cap_conn_ready(struct l2cap_conn *conn)
609 {
610 struct l2cap_chan_list *l = &conn->chan_list;
611 struct sock *sk;
612
613 BT_DBG("conn %p", conn);
614
615 read_lock(&l->lock);
616
617 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
618 bh_lock_sock(sk);
619
620 if (sk->sk_type != SOCK_SEQPACKET &&
621 sk->sk_type != SOCK_STREAM) {
622 l2cap_sock_clear_timer(sk);
623 sk->sk_state = BT_CONNECTED;
624 sk->sk_state_change(sk);
625 } else if (sk->sk_state == BT_CONNECT)
626 l2cap_do_start(sk);
627
628 bh_unlock_sock(sk);
629 }
630
631 read_unlock(&l->lock);
632 }
633
634 /* Notify sockets that we cannot guaranty reliability anymore */
635 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
636 {
637 struct l2cap_chan_list *l = &conn->chan_list;
638 struct sock *sk;
639
640 BT_DBG("conn %p", conn);
641
642 read_lock(&l->lock);
643
644 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
645 if (l2cap_pi(sk)->force_reliable)
646 sk->sk_err = err;
647 }
648
649 read_unlock(&l->lock);
650 }
651
652 static void l2cap_info_timeout(unsigned long arg)
653 {
654 struct l2cap_conn *conn = (void *) arg;
655
656 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
657 conn->info_ident = 0;
658
659 l2cap_conn_start(conn);
660 }
661
662 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
663 {
664 struct l2cap_conn *conn = hcon->l2cap_data;
665
666 if (conn || status)
667 return conn;
668
669 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
670 if (!conn)
671 return NULL;
672
673 hcon->l2cap_data = conn;
674 conn->hcon = hcon;
675
676 BT_DBG("hcon %p conn %p", hcon, conn);
677
678 conn->mtu = hcon->hdev->acl_mtu;
679 conn->src = &hcon->hdev->bdaddr;
680 conn->dst = &hcon->dst;
681
682 conn->feat_mask = 0;
683
684 spin_lock_init(&conn->lock);
685 rwlock_init(&conn->chan_list.lock);
686
687 setup_timer(&conn->info_timer, l2cap_info_timeout,
688 (unsigned long) conn);
689
690 conn->disc_reason = 0x13;
691
692 return conn;
693 }
694
695 static void l2cap_conn_del(struct hci_conn *hcon, int err)
696 {
697 struct l2cap_conn *conn = hcon->l2cap_data;
698 struct sock *sk;
699
700 if (!conn)
701 return;
702
703 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
704
705 kfree_skb(conn->rx_skb);
706
707 /* Kill channels */
708 while ((sk = conn->chan_list.head)) {
709 bh_lock_sock(sk);
710 l2cap_chan_del(sk, err);
711 bh_unlock_sock(sk);
712 l2cap_sock_kill(sk);
713 }
714
715 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
716 del_timer_sync(&conn->info_timer);
717
718 hcon->l2cap_data = NULL;
719 kfree(conn);
720 }
721
722 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
723 {
724 struct l2cap_chan_list *l = &conn->chan_list;
725 write_lock_bh(&l->lock);
726 __l2cap_chan_add(conn, sk, parent);
727 write_unlock_bh(&l->lock);
728 }
729
730 /* ---- Socket interface ---- */
731 static struct sock *__l2cap_get_sock_by_addr(__le16 psm, bdaddr_t *src)
732 {
733 struct sock *sk;
734 struct hlist_node *node;
735 sk_for_each(sk, node, &l2cap_sk_list.head)
736 if (l2cap_pi(sk)->sport == psm && !bacmp(&bt_sk(sk)->src, src))
737 goto found;
738 sk = NULL;
739 found:
740 return sk;
741 }
742
743 /* Find socket with psm and source bdaddr.
744 * Returns closest match.
745 */
746 static struct sock *__l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
747 {
748 struct sock *sk = NULL, *sk1 = NULL;
749 struct hlist_node *node;
750
751 sk_for_each(sk, node, &l2cap_sk_list.head) {
752 if (state && sk->sk_state != state)
753 continue;
754
755 if (l2cap_pi(sk)->psm == psm) {
756 /* Exact match. */
757 if (!bacmp(&bt_sk(sk)->src, src))
758 break;
759
760 /* Closest match */
761 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
762 sk1 = sk;
763 }
764 }
765 return node ? sk : sk1;
766 }
767
768 /* Find socket with given address (psm, src).
769 * Returns locked socket */
770 static inline struct sock *l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
771 {
772 struct sock *s;
773 read_lock(&l2cap_sk_list.lock);
774 s = __l2cap_get_sock_by_psm(state, psm, src);
775 if (s)
776 bh_lock_sock(s);
777 read_unlock(&l2cap_sk_list.lock);
778 return s;
779 }
780
781 static void l2cap_sock_destruct(struct sock *sk)
782 {
783 BT_DBG("sk %p", sk);
784
785 skb_queue_purge(&sk->sk_receive_queue);
786 skb_queue_purge(&sk->sk_write_queue);
787 }
788
789 static void l2cap_sock_cleanup_listen(struct sock *parent)
790 {
791 struct sock *sk;
792
793 BT_DBG("parent %p", parent);
794
795 /* Close not yet accepted channels */
796 while ((sk = bt_accept_dequeue(parent, NULL)))
797 l2cap_sock_close(sk);
798
799 parent->sk_state = BT_CLOSED;
800 sock_set_flag(parent, SOCK_ZAPPED);
801 }
802
803 /* Kill socket (only if zapped and orphan)
804 * Must be called on unlocked socket.
805 */
806 static void l2cap_sock_kill(struct sock *sk)
807 {
808 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
809 return;
810
811 BT_DBG("sk %p state %d", sk, sk->sk_state);
812
813 /* Kill poor orphan */
814 bt_sock_unlink(&l2cap_sk_list, sk);
815 sock_set_flag(sk, SOCK_DEAD);
816 sock_put(sk);
817 }
818
819 static void __l2cap_sock_close(struct sock *sk, int reason)
820 {
821 BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
822
823 switch (sk->sk_state) {
824 case BT_LISTEN:
825 l2cap_sock_cleanup_listen(sk);
826 break;
827
828 case BT_CONNECTED:
829 case BT_CONFIG:
830 if (sk->sk_type == SOCK_SEQPACKET ||
831 sk->sk_type == SOCK_STREAM) {
832 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
833
834 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
835 l2cap_send_disconn_req(conn, sk, reason);
836 } else
837 l2cap_chan_del(sk, reason);
838 break;
839
840 case BT_CONNECT2:
841 if (sk->sk_type == SOCK_SEQPACKET ||
842 sk->sk_type == SOCK_STREAM) {
843 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
844 struct l2cap_conn_rsp rsp;
845 __u16 result;
846
847 if (bt_sk(sk)->defer_setup)
848 result = L2CAP_CR_SEC_BLOCK;
849 else
850 result = L2CAP_CR_BAD_PSM;
851
852 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
853 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
854 rsp.result = cpu_to_le16(result);
855 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
856 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
857 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
858 } else
859 l2cap_chan_del(sk, reason);
860 break;
861
862 case BT_CONNECT:
863 case BT_DISCONN:
864 l2cap_chan_del(sk, reason);
865 break;
866
867 default:
868 sock_set_flag(sk, SOCK_ZAPPED);
869 break;
870 }
871 }
872
873 /* Must be called on unlocked socket. */
874 static void l2cap_sock_close(struct sock *sk)
875 {
876 l2cap_sock_clear_timer(sk);
877 lock_sock(sk);
878 __l2cap_sock_close(sk, ECONNRESET);
879 release_sock(sk);
880 l2cap_sock_kill(sk);
881 }
882
883 static void l2cap_sock_init(struct sock *sk, struct sock *parent)
884 {
885 struct l2cap_pinfo *pi = l2cap_pi(sk);
886
887 BT_DBG("sk %p", sk);
888
889 if (parent) {
890 sk->sk_type = parent->sk_type;
891 bt_sk(sk)->defer_setup = bt_sk(parent)->defer_setup;
892
893 pi->imtu = l2cap_pi(parent)->imtu;
894 pi->omtu = l2cap_pi(parent)->omtu;
895 pi->conf_state = l2cap_pi(parent)->conf_state;
896 pi->mode = l2cap_pi(parent)->mode;
897 pi->fcs = l2cap_pi(parent)->fcs;
898 pi->max_tx = l2cap_pi(parent)->max_tx;
899 pi->tx_win = l2cap_pi(parent)->tx_win;
900 pi->sec_level = l2cap_pi(parent)->sec_level;
901 pi->role_switch = l2cap_pi(parent)->role_switch;
902 pi->force_reliable = l2cap_pi(parent)->force_reliable;
903 } else {
904 pi->imtu = L2CAP_DEFAULT_MTU;
905 pi->omtu = 0;
906 if (!disable_ertm && sk->sk_type == SOCK_STREAM) {
907 pi->mode = L2CAP_MODE_ERTM;
908 pi->conf_state |= L2CAP_CONF_STATE2_DEVICE;
909 } else {
910 pi->mode = L2CAP_MODE_BASIC;
911 }
912 pi->max_tx = L2CAP_DEFAULT_MAX_TX;
913 pi->fcs = L2CAP_FCS_CRC16;
914 pi->tx_win = L2CAP_DEFAULT_TX_WINDOW;
915 pi->sec_level = BT_SECURITY_LOW;
916 pi->role_switch = 0;
917 pi->force_reliable = 0;
918 }
919
920 /* Default config options */
921 pi->conf_len = 0;
922 pi->flush_to = L2CAP_DEFAULT_FLUSH_TO;
923 skb_queue_head_init(TX_QUEUE(sk));
924 skb_queue_head_init(SREJ_QUEUE(sk));
925 skb_queue_head_init(BUSY_QUEUE(sk));
926 INIT_LIST_HEAD(SREJ_LIST(sk));
927 }
928
929 static struct proto l2cap_proto = {
930 .name = "L2CAP",
931 .owner = THIS_MODULE,
932 .obj_size = sizeof(struct l2cap_pinfo)
933 };
934
935 static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio)
936 {
937 struct sock *sk;
938
939 sk = sk_alloc(net, PF_BLUETOOTH, prio, &l2cap_proto);
940 if (!sk)
941 return NULL;
942
943 sock_init_data(sock, sk);
944 INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
945
946 sk->sk_destruct = l2cap_sock_destruct;
947 sk->sk_sndtimeo = msecs_to_jiffies(L2CAP_CONN_TIMEOUT);
948
949 sock_reset_flag(sk, SOCK_ZAPPED);
950
951 sk->sk_protocol = proto;
952 sk->sk_state = BT_OPEN;
953
954 setup_timer(&sk->sk_timer, l2cap_sock_timeout, (unsigned long) sk);
955
956 bt_sock_link(&l2cap_sk_list, sk);
957 return sk;
958 }
959
960 static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol,
961 int kern)
962 {
963 struct sock *sk;
964
965 BT_DBG("sock %p", sock);
966
967 sock->state = SS_UNCONNECTED;
968
969 if (sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM &&
970 sock->type != SOCK_DGRAM && sock->type != SOCK_RAW)
971 return -ESOCKTNOSUPPORT;
972
973 if (sock->type == SOCK_RAW && !kern && !capable(CAP_NET_RAW))
974 return -EPERM;
975
976 sock->ops = &l2cap_sock_ops;
977
978 sk = l2cap_sock_alloc(net, sock, protocol, GFP_ATOMIC);
979 if (!sk)
980 return -ENOMEM;
981
982 l2cap_sock_init(sk, NULL);
983 return 0;
984 }
985
986 static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
987 {
988 struct sock *sk = sock->sk;
989 struct sockaddr_l2 la;
990 int len, err = 0;
991
992 BT_DBG("sk %p", sk);
993
994 if (!addr || addr->sa_family != AF_BLUETOOTH)
995 return -EINVAL;
996
997 memset(&la, 0, sizeof(la));
998 len = min_t(unsigned int, sizeof(la), alen);
999 memcpy(&la, addr, len);
1000
1001 if (la.l2_cid)
1002 return -EINVAL;
1003
1004 lock_sock(sk);
1005
1006 if (sk->sk_state != BT_OPEN) {
1007 err = -EBADFD;
1008 goto done;
1009 }
1010
1011 if (la.l2_psm) {
1012 __u16 psm = __le16_to_cpu(la.l2_psm);
1013
1014 /* PSM must be odd and lsb of upper byte must be 0 */
1015 if ((psm & 0x0101) != 0x0001) {
1016 err = -EINVAL;
1017 goto done;
1018 }
1019
1020 /* Restrict usage of well-known PSMs */
1021 if (psm < 0x1001 && !capable(CAP_NET_BIND_SERVICE)) {
1022 err = -EACCES;
1023 goto done;
1024 }
1025 }
1026
1027 write_lock_bh(&l2cap_sk_list.lock);
1028
1029 if (la.l2_psm && __l2cap_get_sock_by_addr(la.l2_psm, &la.l2_bdaddr)) {
1030 err = -EADDRINUSE;
1031 } else {
1032 /* Save source address */
1033 bacpy(&bt_sk(sk)->src, &la.l2_bdaddr);
1034 l2cap_pi(sk)->psm = la.l2_psm;
1035 l2cap_pi(sk)->sport = la.l2_psm;
1036 sk->sk_state = BT_BOUND;
1037
1038 if (__le16_to_cpu(la.l2_psm) == 0x0001 ||
1039 __le16_to_cpu(la.l2_psm) == 0x0003)
1040 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
1041 }
1042
1043 write_unlock_bh(&l2cap_sk_list.lock);
1044
1045 done:
1046 release_sock(sk);
1047 return err;
1048 }
1049
1050 static int l2cap_do_connect(struct sock *sk)
1051 {
1052 bdaddr_t *src = &bt_sk(sk)->src;
1053 bdaddr_t *dst = &bt_sk(sk)->dst;
1054 struct l2cap_conn *conn;
1055 struct hci_conn *hcon;
1056 struct hci_dev *hdev;
1057 __u8 auth_type;
1058 int err;
1059
1060 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
1061 l2cap_pi(sk)->psm);
1062
1063 hdev = hci_get_route(dst, src);
1064 if (!hdev)
1065 return -EHOSTUNREACH;
1066
1067 hci_dev_lock_bh(hdev);
1068
1069 err = -ENOMEM;
1070
1071 if (sk->sk_type == SOCK_RAW) {
1072 switch (l2cap_pi(sk)->sec_level) {
1073 case BT_SECURITY_HIGH:
1074 auth_type = HCI_AT_DEDICATED_BONDING_MITM;
1075 break;
1076 case BT_SECURITY_MEDIUM:
1077 auth_type = HCI_AT_DEDICATED_BONDING;
1078 break;
1079 default:
1080 auth_type = HCI_AT_NO_BONDING;
1081 break;
1082 }
1083 } else if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
1084 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
1085 auth_type = HCI_AT_NO_BONDING_MITM;
1086 else
1087 auth_type = HCI_AT_NO_BONDING;
1088
1089 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
1090 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
1091 } else {
1092 switch (l2cap_pi(sk)->sec_level) {
1093 case BT_SECURITY_HIGH:
1094 auth_type = HCI_AT_GENERAL_BONDING_MITM;
1095 break;
1096 case BT_SECURITY_MEDIUM:
1097 auth_type = HCI_AT_GENERAL_BONDING;
1098 break;
1099 default:
1100 auth_type = HCI_AT_NO_BONDING;
1101 break;
1102 }
1103 }
1104
1105 hcon = hci_connect(hdev, ACL_LINK, dst,
1106 l2cap_pi(sk)->sec_level, auth_type);
1107 if (!hcon)
1108 goto done;
1109
1110 conn = l2cap_conn_add(hcon, 0);
1111 if (!conn) {
1112 hci_conn_put(hcon);
1113 goto done;
1114 }
1115
1116 err = 0;
1117
1118 /* Update source addr of the socket */
1119 bacpy(src, conn->src);
1120
1121 l2cap_chan_add(conn, sk, NULL);
1122
1123 sk->sk_state = BT_CONNECT;
1124 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
1125
1126 if (hcon->state == BT_CONNECTED) {
1127 if (sk->sk_type != SOCK_SEQPACKET &&
1128 sk->sk_type != SOCK_STREAM) {
1129 l2cap_sock_clear_timer(sk);
1130 sk->sk_state = BT_CONNECTED;
1131 } else
1132 l2cap_do_start(sk);
1133 }
1134
1135 done:
1136 hci_dev_unlock_bh(hdev);
1137 hci_dev_put(hdev);
1138 return err;
1139 }
1140
1141 static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags)
1142 {
1143 struct sock *sk = sock->sk;
1144 struct sockaddr_l2 la;
1145 int len, err = 0;
1146
1147 BT_DBG("sk %p", sk);
1148
1149 if (!addr || alen < sizeof(addr->sa_family) ||
1150 addr->sa_family != AF_BLUETOOTH)
1151 return -EINVAL;
1152
1153 memset(&la, 0, sizeof(la));
1154 len = min_t(unsigned int, sizeof(la), alen);
1155 memcpy(&la, addr, len);
1156
1157 if (la.l2_cid)
1158 return -EINVAL;
1159
1160 lock_sock(sk);
1161
1162 if ((sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM)
1163 && !la.l2_psm) {
1164 err = -EINVAL;
1165 goto done;
1166 }
1167
1168 switch (l2cap_pi(sk)->mode) {
1169 case L2CAP_MODE_BASIC:
1170 break;
1171 case L2CAP_MODE_ERTM:
1172 case L2CAP_MODE_STREAMING:
1173 if (!disable_ertm)
1174 break;
1175 /* fall through */
1176 default:
1177 err = -ENOTSUPP;
1178 goto done;
1179 }
1180
1181 switch (sk->sk_state) {
1182 case BT_CONNECT:
1183 case BT_CONNECT2:
1184 case BT_CONFIG:
1185 /* Already connecting */
1186 goto wait;
1187
1188 case BT_CONNECTED:
1189 /* Already connected */
1190 err = -EISCONN;
1191 goto done;
1192
1193 case BT_OPEN:
1194 case BT_BOUND:
1195 /* Can connect */
1196 break;
1197
1198 default:
1199 err = -EBADFD;
1200 goto done;
1201 }
1202
1203 /* PSM must be odd and lsb of upper byte must be 0 */
1204 if ((__le16_to_cpu(la.l2_psm) & 0x0101) != 0x0001 &&
1205 sk->sk_type != SOCK_RAW) {
1206 err = -EINVAL;
1207 goto done;
1208 }
1209
1210 /* Set destination address and psm */
1211 bacpy(&bt_sk(sk)->dst, &la.l2_bdaddr);
1212 l2cap_pi(sk)->psm = la.l2_psm;
1213
1214 err = l2cap_do_connect(sk);
1215 if (err)
1216 goto done;
1217
1218 wait:
1219 err = bt_sock_wait_state(sk, BT_CONNECTED,
1220 sock_sndtimeo(sk, flags & O_NONBLOCK));
1221 done:
1222 release_sock(sk);
1223 return err;
1224 }
1225
1226 static int l2cap_sock_listen(struct socket *sock, int backlog)
1227 {
1228 struct sock *sk = sock->sk;
1229 int err = 0;
1230
1231 BT_DBG("sk %p backlog %d", sk, backlog);
1232
1233 lock_sock(sk);
1234
1235 if ((sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM)
1236 || sk->sk_state != BT_BOUND) {
1237 err = -EBADFD;
1238 goto done;
1239 }
1240
1241 switch (l2cap_pi(sk)->mode) {
1242 case L2CAP_MODE_BASIC:
1243 break;
1244 case L2CAP_MODE_ERTM:
1245 case L2CAP_MODE_STREAMING:
1246 if (!disable_ertm)
1247 break;
1248 /* fall through */
1249 default:
1250 err = -ENOTSUPP;
1251 goto done;
1252 }
1253
1254 if (!l2cap_pi(sk)->psm) {
1255 bdaddr_t *src = &bt_sk(sk)->src;
1256 u16 psm;
1257
1258 err = -EINVAL;
1259
1260 write_lock_bh(&l2cap_sk_list.lock);
1261
1262 for (psm = 0x1001; psm < 0x1100; psm += 2)
1263 if (!__l2cap_get_sock_by_addr(cpu_to_le16(psm), src)) {
1264 l2cap_pi(sk)->psm = cpu_to_le16(psm);
1265 l2cap_pi(sk)->sport = cpu_to_le16(psm);
1266 err = 0;
1267 break;
1268 }
1269
1270 write_unlock_bh(&l2cap_sk_list.lock);
1271
1272 if (err < 0)
1273 goto done;
1274 }
1275
1276 sk->sk_max_ack_backlog = backlog;
1277 sk->sk_ack_backlog = 0;
1278 sk->sk_state = BT_LISTEN;
1279
1280 done:
1281 release_sock(sk);
1282 return err;
1283 }
1284
1285 static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int flags)
1286 {
1287 DECLARE_WAITQUEUE(wait, current);
1288 struct sock *sk = sock->sk, *nsk;
1289 long timeo;
1290 int err = 0;
1291
1292 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1293
1294 if (sk->sk_state != BT_LISTEN) {
1295 err = -EBADFD;
1296 goto done;
1297 }
1298
1299 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
1300
1301 BT_DBG("sk %p timeo %ld", sk, timeo);
1302
1303 /* Wait for an incoming connection. (wake-one). */
1304 add_wait_queue_exclusive(sk_sleep(sk), &wait);
1305 while (!(nsk = bt_accept_dequeue(sk, newsock))) {
1306 set_current_state(TASK_INTERRUPTIBLE);
1307 if (!timeo) {
1308 err = -EAGAIN;
1309 break;
1310 }
1311
1312 release_sock(sk);
1313 timeo = schedule_timeout(timeo);
1314 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1315
1316 if (sk->sk_state != BT_LISTEN) {
1317 err = -EBADFD;
1318 break;
1319 }
1320
1321 if (signal_pending(current)) {
1322 err = sock_intr_errno(timeo);
1323 break;
1324 }
1325 }
1326 set_current_state(TASK_RUNNING);
1327 remove_wait_queue(sk_sleep(sk), &wait);
1328
1329 if (err)
1330 goto done;
1331
1332 newsock->state = SS_CONNECTED;
1333
1334 BT_DBG("new socket %p", nsk);
1335
1336 done:
1337 release_sock(sk);
1338 return err;
1339 }
1340
1341 static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer)
1342 {
1343 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
1344 struct sock *sk = sock->sk;
1345
1346 BT_DBG("sock %p, sk %p", sock, sk);
1347
1348 addr->sa_family = AF_BLUETOOTH;
1349 *len = sizeof(struct sockaddr_l2);
1350
1351 if (peer) {
1352 la->l2_psm = l2cap_pi(sk)->psm;
1353 bacpy(&la->l2_bdaddr, &bt_sk(sk)->dst);
1354 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1355 } else {
1356 la->l2_psm = l2cap_pi(sk)->sport;
1357 bacpy(&la->l2_bdaddr, &bt_sk(sk)->src);
1358 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->scid);
1359 }
1360
1361 return 0;
1362 }
1363
1364 static int __l2cap_wait_ack(struct sock *sk)
1365 {
1366 DECLARE_WAITQUEUE(wait, current);
1367 int err = 0;
1368 int timeo = HZ/5;
1369
1370 add_wait_queue(sk_sleep(sk), &wait);
1371 while ((l2cap_pi(sk)->unacked_frames > 0 && l2cap_pi(sk)->conn)) {
1372 set_current_state(TASK_INTERRUPTIBLE);
1373
1374 if (!timeo)
1375 timeo = HZ/5;
1376
1377 if (signal_pending(current)) {
1378 err = sock_intr_errno(timeo);
1379 break;
1380 }
1381
1382 release_sock(sk);
1383 timeo = schedule_timeout(timeo);
1384 lock_sock(sk);
1385
1386 err = sock_error(sk);
1387 if (err)
1388 break;
1389 }
1390 set_current_state(TASK_RUNNING);
1391 remove_wait_queue(sk_sleep(sk), &wait);
1392 return err;
1393 }
1394
1395 static void l2cap_monitor_timeout(unsigned long arg)
1396 {
1397 struct sock *sk = (void *) arg;
1398
1399 BT_DBG("sk %p", sk);
1400
1401 bh_lock_sock(sk);
1402 if (l2cap_pi(sk)->retry_count >= l2cap_pi(sk)->remote_max_tx) {
1403 l2cap_send_disconn_req(l2cap_pi(sk)->conn, sk, ECONNABORTED);
1404 bh_unlock_sock(sk);
1405 return;
1406 }
1407
1408 l2cap_pi(sk)->retry_count++;
1409 __mod_monitor_timer();
1410
1411 l2cap_send_rr_or_rnr(l2cap_pi(sk), L2CAP_CTRL_POLL);
1412 bh_unlock_sock(sk);
1413 }
1414
1415 static void l2cap_retrans_timeout(unsigned long arg)
1416 {
1417 struct sock *sk = (void *) arg;
1418
1419 BT_DBG("sk %p", sk);
1420
1421 bh_lock_sock(sk);
1422 l2cap_pi(sk)->retry_count = 1;
1423 __mod_monitor_timer();
1424
1425 l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F;
1426
1427 l2cap_send_rr_or_rnr(l2cap_pi(sk), L2CAP_CTRL_POLL);
1428 bh_unlock_sock(sk);
1429 }
1430
1431 static void l2cap_drop_acked_frames(struct sock *sk)
1432 {
1433 struct sk_buff *skb;
1434
1435 while ((skb = skb_peek(TX_QUEUE(sk))) &&
1436 l2cap_pi(sk)->unacked_frames) {
1437 if (bt_cb(skb)->tx_seq == l2cap_pi(sk)->expected_ack_seq)
1438 break;
1439
1440 skb = skb_dequeue(TX_QUEUE(sk));
1441 kfree_skb(skb);
1442
1443 l2cap_pi(sk)->unacked_frames--;
1444 }
1445
1446 if (!l2cap_pi(sk)->unacked_frames)
1447 del_timer(&l2cap_pi(sk)->retrans_timer);
1448 }
1449
1450 static inline void l2cap_do_send(struct sock *sk, struct sk_buff *skb)
1451 {
1452 struct l2cap_pinfo *pi = l2cap_pi(sk);
1453
1454 BT_DBG("sk %p, skb %p len %d", sk, skb, skb->len);
1455
1456 hci_send_acl(pi->conn->hcon, skb, 0);
1457 }
1458
1459 static void l2cap_streaming_send(struct sock *sk)
1460 {
1461 struct sk_buff *skb;
1462 struct l2cap_pinfo *pi = l2cap_pi(sk);
1463 u16 control, fcs;
1464
1465 while ((skb = skb_dequeue(TX_QUEUE(sk)))) {
1466 control = get_unaligned_le16(skb->data + L2CAP_HDR_SIZE);
1467 control |= pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT;
1468 put_unaligned_le16(control, skb->data + L2CAP_HDR_SIZE);
1469
1470 if (pi->fcs == L2CAP_FCS_CRC16) {
1471 fcs = crc16(0, (u8 *)skb->data, skb->len - 2);
1472 put_unaligned_le16(fcs, skb->data + skb->len - 2);
1473 }
1474
1475 l2cap_do_send(sk, skb);
1476
1477 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1478 }
1479 }
1480
1481 static void l2cap_retransmit_one_frame(struct sock *sk, u8 tx_seq)
1482 {
1483 struct l2cap_pinfo *pi = l2cap_pi(sk);
1484 struct sk_buff *skb, *tx_skb;
1485 u16 control, fcs;
1486
1487 skb = skb_peek(TX_QUEUE(sk));
1488 if (!skb)
1489 return;
1490
1491 do {
1492 if (bt_cb(skb)->tx_seq == tx_seq)
1493 break;
1494
1495 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1496 return;
1497
1498 } while ((skb = skb_queue_next(TX_QUEUE(sk), skb)));
1499
1500 if (pi->remote_max_tx &&
1501 bt_cb(skb)->retries == pi->remote_max_tx) {
1502 l2cap_send_disconn_req(pi->conn, sk, ECONNABORTED);
1503 return;
1504 }
1505
1506 tx_skb = skb_clone(skb, GFP_ATOMIC);
1507 bt_cb(skb)->retries++;
1508 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1509
1510 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
1511 control |= L2CAP_CTRL_FINAL;
1512 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1513 }
1514
1515 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1516 | (tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1517
1518 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1519
1520 if (pi->fcs == L2CAP_FCS_CRC16) {
1521 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1522 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1523 }
1524
1525 l2cap_do_send(sk, tx_skb);
1526 }
1527
1528 static int l2cap_ertm_send(struct sock *sk)
1529 {
1530 struct sk_buff *skb, *tx_skb;
1531 struct l2cap_pinfo *pi = l2cap_pi(sk);
1532 u16 control, fcs;
1533 int nsent = 0;
1534
1535 if (sk->sk_state != BT_CONNECTED)
1536 return -ENOTCONN;
1537
1538 while ((skb = sk->sk_send_head) && (!l2cap_tx_window_full(sk))) {
1539
1540 if (pi->remote_max_tx &&
1541 bt_cb(skb)->retries == pi->remote_max_tx) {
1542 l2cap_send_disconn_req(pi->conn, sk, ECONNABORTED);
1543 break;
1544 }
1545
1546 tx_skb = skb_clone(skb, GFP_ATOMIC);
1547
1548 bt_cb(skb)->retries++;
1549
1550 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1551 control &= L2CAP_CTRL_SAR;
1552
1553 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
1554 control |= L2CAP_CTRL_FINAL;
1555 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1556 }
1557 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1558 | (pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1559 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1560
1561
1562 if (pi->fcs == L2CAP_FCS_CRC16) {
1563 fcs = crc16(0, (u8 *)skb->data, tx_skb->len - 2);
1564 put_unaligned_le16(fcs, skb->data + tx_skb->len - 2);
1565 }
1566
1567 l2cap_do_send(sk, tx_skb);
1568
1569 __mod_retrans_timer();
1570
1571 bt_cb(skb)->tx_seq = pi->next_tx_seq;
1572 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1573
1574 pi->unacked_frames++;
1575 pi->frames_sent++;
1576
1577 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1578 sk->sk_send_head = NULL;
1579 else
1580 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1581
1582 nsent++;
1583 }
1584
1585 return nsent;
1586 }
1587
1588 static int l2cap_retransmit_frames(struct sock *sk)
1589 {
1590 struct l2cap_pinfo *pi = l2cap_pi(sk);
1591 int ret;
1592
1593 if (!skb_queue_empty(TX_QUEUE(sk)))
1594 sk->sk_send_head = TX_QUEUE(sk)->next;
1595
1596 pi->next_tx_seq = pi->expected_ack_seq;
1597 ret = l2cap_ertm_send(sk);
1598 return ret;
1599 }
1600
1601 static void l2cap_send_ack(struct l2cap_pinfo *pi)
1602 {
1603 struct sock *sk = (struct sock *)pi;
1604 u16 control = 0;
1605
1606 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1607
1608 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
1609 control |= L2CAP_SUPER_RCV_NOT_READY;
1610 pi->conn_state |= L2CAP_CONN_RNR_SENT;
1611 l2cap_send_sframe(pi, control);
1612 return;
1613 }
1614
1615 if (l2cap_ertm_send(sk) > 0)
1616 return;
1617
1618 control |= L2CAP_SUPER_RCV_READY;
1619 l2cap_send_sframe(pi, control);
1620 }
1621
1622 static void l2cap_send_srejtail(struct sock *sk)
1623 {
1624 struct srej_list *tail;
1625 u16 control;
1626
1627 control = L2CAP_SUPER_SELECT_REJECT;
1628 control |= L2CAP_CTRL_FINAL;
1629
1630 tail = list_entry(SREJ_LIST(sk)->prev, struct srej_list, list);
1631 control |= tail->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1632
1633 l2cap_send_sframe(l2cap_pi(sk), control);
1634 }
1635
1636 static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1637 {
1638 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1639 struct sk_buff **frag;
1640 int err, sent = 0;
1641
1642 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1643 return -EFAULT;
1644
1645 sent += count;
1646 len -= count;
1647
1648 /* Continuation fragments (no L2CAP header) */
1649 frag = &skb_shinfo(skb)->frag_list;
1650 while (len) {
1651 count = min_t(unsigned int, conn->mtu, len);
1652
1653 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1654 if (!*frag)
1655 return -EFAULT;
1656 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1657 return -EFAULT;
1658
1659 sent += count;
1660 len -= count;
1661
1662 frag = &(*frag)->next;
1663 }
1664
1665 return sent;
1666 }
1667
1668 static struct sk_buff *l2cap_create_connless_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1669 {
1670 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1671 struct sk_buff *skb;
1672 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1673 struct l2cap_hdr *lh;
1674
1675 BT_DBG("sk %p len %d", sk, (int)len);
1676
1677 count = min_t(unsigned int, (conn->mtu - hlen), len);
1678 skb = bt_skb_send_alloc(sk, count + hlen,
1679 msg->msg_flags & MSG_DONTWAIT, &err);
1680 if (!skb)
1681 return ERR_PTR(-ENOMEM);
1682
1683 /* Create L2CAP header */
1684 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1685 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1686 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1687 put_unaligned_le16(l2cap_pi(sk)->psm, skb_put(skb, 2));
1688
1689 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1690 if (unlikely(err < 0)) {
1691 kfree_skb(skb);
1692 return ERR_PTR(err);
1693 }
1694 return skb;
1695 }
1696
1697 static struct sk_buff *l2cap_create_basic_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1698 {
1699 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1700 struct sk_buff *skb;
1701 int err, count, hlen = L2CAP_HDR_SIZE;
1702 struct l2cap_hdr *lh;
1703
1704 BT_DBG("sk %p len %d", sk, (int)len);
1705
1706 count = min_t(unsigned int, (conn->mtu - hlen), len);
1707 skb = bt_skb_send_alloc(sk, count + hlen,
1708 msg->msg_flags & MSG_DONTWAIT, &err);
1709 if (!skb)
1710 return ERR_PTR(-ENOMEM);
1711
1712 /* Create L2CAP header */
1713 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1714 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1715 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1716
1717 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1718 if (unlikely(err < 0)) {
1719 kfree_skb(skb);
1720 return ERR_PTR(err);
1721 }
1722 return skb;
1723 }
1724
1725 static struct sk_buff *l2cap_create_iframe_pdu(struct sock *sk, struct msghdr *msg, size_t len, u16 control, u16 sdulen)
1726 {
1727 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1728 struct sk_buff *skb;
1729 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1730 struct l2cap_hdr *lh;
1731
1732 BT_DBG("sk %p len %d", sk, (int)len);
1733
1734 if (!conn)
1735 return ERR_PTR(-ENOTCONN);
1736
1737 if (sdulen)
1738 hlen += 2;
1739
1740 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1741 hlen += 2;
1742
1743 count = min_t(unsigned int, (conn->mtu - hlen), len);
1744 skb = bt_skb_send_alloc(sk, count + hlen,
1745 msg->msg_flags & MSG_DONTWAIT, &err);
1746 if (!skb)
1747 return ERR_PTR(-ENOMEM);
1748
1749 /* Create L2CAP header */
1750 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1751 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1752 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1753 put_unaligned_le16(control, skb_put(skb, 2));
1754 if (sdulen)
1755 put_unaligned_le16(sdulen, skb_put(skb, 2));
1756
1757 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1758 if (unlikely(err < 0)) {
1759 kfree_skb(skb);
1760 return ERR_PTR(err);
1761 }
1762
1763 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1764 put_unaligned_le16(0, skb_put(skb, 2));
1765
1766 bt_cb(skb)->retries = 0;
1767 return skb;
1768 }
1769
1770 static inline int l2cap_sar_segment_sdu(struct sock *sk, struct msghdr *msg, size_t len)
1771 {
1772 struct l2cap_pinfo *pi = l2cap_pi(sk);
1773 struct sk_buff *skb;
1774 struct sk_buff_head sar_queue;
1775 u16 control;
1776 size_t size = 0;
1777
1778 skb_queue_head_init(&sar_queue);
1779 control = L2CAP_SDU_START;
1780 skb = l2cap_create_iframe_pdu(sk, msg, pi->remote_mps, control, len);
1781 if (IS_ERR(skb))
1782 return PTR_ERR(skb);
1783
1784 __skb_queue_tail(&sar_queue, skb);
1785 len -= pi->remote_mps;
1786 size += pi->remote_mps;
1787
1788 while (len > 0) {
1789 size_t buflen;
1790
1791 if (len > pi->remote_mps) {
1792 control = L2CAP_SDU_CONTINUE;
1793 buflen = pi->remote_mps;
1794 } else {
1795 control = L2CAP_SDU_END;
1796 buflen = len;
1797 }
1798
1799 skb = l2cap_create_iframe_pdu(sk, msg, buflen, control, 0);
1800 if (IS_ERR(skb)) {
1801 skb_queue_purge(&sar_queue);
1802 return PTR_ERR(skb);
1803 }
1804
1805 __skb_queue_tail(&sar_queue, skb);
1806 len -= buflen;
1807 size += buflen;
1808 }
1809 skb_queue_splice_tail(&sar_queue, TX_QUEUE(sk));
1810 if (sk->sk_send_head == NULL)
1811 sk->sk_send_head = sar_queue.next;
1812
1813 return size;
1814 }
1815
1816 static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len)
1817 {
1818 struct sock *sk = sock->sk;
1819 struct l2cap_pinfo *pi = l2cap_pi(sk);
1820 struct sk_buff *skb;
1821 u16 control;
1822 int err;
1823
1824 BT_DBG("sock %p, sk %p", sock, sk);
1825
1826 err = sock_error(sk);
1827 if (err)
1828 return err;
1829
1830 if (msg->msg_flags & MSG_OOB)
1831 return -EOPNOTSUPP;
1832
1833 lock_sock(sk);
1834
1835 if (sk->sk_state != BT_CONNECTED) {
1836 err = -ENOTCONN;
1837 goto done;
1838 }
1839
1840 /* Connectionless channel */
1841 if (sk->sk_type == SOCK_DGRAM) {
1842 skb = l2cap_create_connless_pdu(sk, msg, len);
1843 if (IS_ERR(skb)) {
1844 err = PTR_ERR(skb);
1845 } else {
1846 l2cap_do_send(sk, skb);
1847 err = len;
1848 }
1849 goto done;
1850 }
1851
1852 switch (pi->mode) {
1853 case L2CAP_MODE_BASIC:
1854 /* Check outgoing MTU */
1855 if (len > pi->omtu) {
1856 err = -EMSGSIZE;
1857 goto done;
1858 }
1859
1860 /* Create a basic PDU */
1861 skb = l2cap_create_basic_pdu(sk, msg, len);
1862 if (IS_ERR(skb)) {
1863 err = PTR_ERR(skb);
1864 goto done;
1865 }
1866
1867 l2cap_do_send(sk, skb);
1868 err = len;
1869 break;
1870
1871 case L2CAP_MODE_ERTM:
1872 case L2CAP_MODE_STREAMING:
1873 /* Entire SDU fits into one PDU */
1874 if (len <= pi->remote_mps) {
1875 control = L2CAP_SDU_UNSEGMENTED;
1876 skb = l2cap_create_iframe_pdu(sk, msg, len, control, 0);
1877 if (IS_ERR(skb)) {
1878 err = PTR_ERR(skb);
1879 goto done;
1880 }
1881 __skb_queue_tail(TX_QUEUE(sk), skb);
1882
1883 if (sk->sk_send_head == NULL)
1884 sk->sk_send_head = skb;
1885
1886 } else {
1887 /* Segment SDU into multiples PDUs */
1888 err = l2cap_sar_segment_sdu(sk, msg, len);
1889 if (err < 0)
1890 goto done;
1891 }
1892
1893 if (pi->mode == L2CAP_MODE_STREAMING) {
1894 l2cap_streaming_send(sk);
1895 } else {
1896 if (pi->conn_state & L2CAP_CONN_REMOTE_BUSY &&
1897 pi->conn_state && L2CAP_CONN_WAIT_F) {
1898 err = len;
1899 break;
1900 }
1901 err = l2cap_ertm_send(sk);
1902 }
1903
1904 if (err >= 0)
1905 err = len;
1906 break;
1907
1908 default:
1909 BT_DBG("bad state %1.1x", pi->mode);
1910 err = -EBADFD;
1911 }
1912
1913 done:
1914 release_sock(sk);
1915 return err;
1916 }
1917
1918 static int l2cap_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags)
1919 {
1920 struct sock *sk = sock->sk;
1921
1922 lock_sock(sk);
1923
1924 if (sk->sk_state == BT_CONNECT2 && bt_sk(sk)->defer_setup) {
1925 struct l2cap_conn_rsp rsp;
1926 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1927 u8 buf[128];
1928
1929 sk->sk_state = BT_CONFIG;
1930
1931 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1932 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
1933 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1934 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1935 l2cap_send_cmd(l2cap_pi(sk)->conn, l2cap_pi(sk)->ident,
1936 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1937
1938 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT) {
1939 release_sock(sk);
1940 return 0;
1941 }
1942
1943 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
1944 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1945 l2cap_build_conf_req(sk, buf), buf);
1946 l2cap_pi(sk)->num_conf_req++;
1947
1948 release_sock(sk);
1949 return 0;
1950 }
1951
1952 release_sock(sk);
1953
1954 if (sock->type == SOCK_STREAM)
1955 return bt_sock_stream_recvmsg(iocb, sock, msg, len, flags);
1956
1957 return bt_sock_recvmsg(iocb, sock, msg, len, flags);
1958 }
1959
1960 static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __user *optval, unsigned int optlen)
1961 {
1962 struct sock *sk = sock->sk;
1963 struct l2cap_options opts;
1964 int len, err = 0;
1965 u32 opt;
1966
1967 BT_DBG("sk %p", sk);
1968
1969 lock_sock(sk);
1970
1971 switch (optname) {
1972 case L2CAP_OPTIONS:
1973 if (sk->sk_state == BT_CONNECTED) {
1974 err = -EINVAL;
1975 break;
1976 }
1977
1978 opts.imtu = l2cap_pi(sk)->imtu;
1979 opts.omtu = l2cap_pi(sk)->omtu;
1980 opts.flush_to = l2cap_pi(sk)->flush_to;
1981 opts.mode = l2cap_pi(sk)->mode;
1982 opts.fcs = l2cap_pi(sk)->fcs;
1983 opts.max_tx = l2cap_pi(sk)->max_tx;
1984 opts.txwin_size = (__u16)l2cap_pi(sk)->tx_win;
1985
1986 len = min_t(unsigned int, sizeof(opts), optlen);
1987 if (copy_from_user((char *) &opts, optval, len)) {
1988 err = -EFAULT;
1989 break;
1990 }
1991
1992 if (opts.txwin_size > L2CAP_DEFAULT_TX_WINDOW) {
1993 err = -EINVAL;
1994 break;
1995 }
1996
1997 l2cap_pi(sk)->mode = opts.mode;
1998 switch (l2cap_pi(sk)->mode) {
1999 case L2CAP_MODE_BASIC:
2000 l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_STATE2_DEVICE;
2001 break;
2002 case L2CAP_MODE_ERTM:
2003 case L2CAP_MODE_STREAMING:
2004 if (!disable_ertm)
2005 break;
2006 /* fall through */
2007 default:
2008 err = -EINVAL;
2009 break;
2010 }
2011
2012 l2cap_pi(sk)->imtu = opts.imtu;
2013 l2cap_pi(sk)->omtu = opts.omtu;
2014 l2cap_pi(sk)->fcs = opts.fcs;
2015 l2cap_pi(sk)->max_tx = opts.max_tx;
2016 l2cap_pi(sk)->tx_win = (__u8)opts.txwin_size;
2017 break;
2018
2019 case L2CAP_LM:
2020 if (get_user(opt, (u32 __user *) optval)) {
2021 err = -EFAULT;
2022 break;
2023 }
2024
2025 if (opt & L2CAP_LM_AUTH)
2026 l2cap_pi(sk)->sec_level = BT_SECURITY_LOW;
2027 if (opt & L2CAP_LM_ENCRYPT)
2028 l2cap_pi(sk)->sec_level = BT_SECURITY_MEDIUM;
2029 if (opt & L2CAP_LM_SECURE)
2030 l2cap_pi(sk)->sec_level = BT_SECURITY_HIGH;
2031
2032 l2cap_pi(sk)->role_switch = (opt & L2CAP_LM_MASTER);
2033 l2cap_pi(sk)->force_reliable = (opt & L2CAP_LM_RELIABLE);
2034 break;
2035
2036 default:
2037 err = -ENOPROTOOPT;
2038 break;
2039 }
2040
2041 release_sock(sk);
2042 return err;
2043 }
2044
2045 static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
2046 {
2047 struct sock *sk = sock->sk;
2048 struct bt_security sec;
2049 int len, err = 0;
2050 u32 opt;
2051
2052 BT_DBG("sk %p", sk);
2053
2054 if (level == SOL_L2CAP)
2055 return l2cap_sock_setsockopt_old(sock, optname, optval, optlen);
2056
2057 if (level != SOL_BLUETOOTH)
2058 return -ENOPROTOOPT;
2059
2060 lock_sock(sk);
2061
2062 switch (optname) {
2063 case BT_SECURITY:
2064 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM
2065 && sk->sk_type != SOCK_RAW) {
2066 err = -EINVAL;
2067 break;
2068 }
2069
2070 sec.level = BT_SECURITY_LOW;
2071
2072 len = min_t(unsigned int, sizeof(sec), optlen);
2073 if (copy_from_user((char *) &sec, optval, len)) {
2074 err = -EFAULT;
2075 break;
2076 }
2077
2078 if (sec.level < BT_SECURITY_LOW ||
2079 sec.level > BT_SECURITY_HIGH) {
2080 err = -EINVAL;
2081 break;
2082 }
2083
2084 l2cap_pi(sk)->sec_level = sec.level;
2085 break;
2086
2087 case BT_DEFER_SETUP:
2088 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
2089 err = -EINVAL;
2090 break;
2091 }
2092
2093 if (get_user(opt, (u32 __user *) optval)) {
2094 err = -EFAULT;
2095 break;
2096 }
2097
2098 bt_sk(sk)->defer_setup = opt;
2099 break;
2100
2101 default:
2102 err = -ENOPROTOOPT;
2103 break;
2104 }
2105
2106 release_sock(sk);
2107 return err;
2108 }
2109
2110 static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __user *optval, int __user *optlen)
2111 {
2112 struct sock *sk = sock->sk;
2113 struct l2cap_options opts;
2114 struct l2cap_conninfo cinfo;
2115 int len, err = 0;
2116 u32 opt;
2117
2118 BT_DBG("sk %p", sk);
2119
2120 if (get_user(len, optlen))
2121 return -EFAULT;
2122
2123 lock_sock(sk);
2124
2125 switch (optname) {
2126 case L2CAP_OPTIONS:
2127 opts.imtu = l2cap_pi(sk)->imtu;
2128 opts.omtu = l2cap_pi(sk)->omtu;
2129 opts.flush_to = l2cap_pi(sk)->flush_to;
2130 opts.mode = l2cap_pi(sk)->mode;
2131 opts.fcs = l2cap_pi(sk)->fcs;
2132 opts.max_tx = l2cap_pi(sk)->max_tx;
2133 opts.txwin_size = (__u16)l2cap_pi(sk)->tx_win;
2134
2135 len = min_t(unsigned int, len, sizeof(opts));
2136 if (copy_to_user(optval, (char *) &opts, len))
2137 err = -EFAULT;
2138
2139 break;
2140
2141 case L2CAP_LM:
2142 switch (l2cap_pi(sk)->sec_level) {
2143 case BT_SECURITY_LOW:
2144 opt = L2CAP_LM_AUTH;
2145 break;
2146 case BT_SECURITY_MEDIUM:
2147 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT;
2148 break;
2149 case BT_SECURITY_HIGH:
2150 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT |
2151 L2CAP_LM_SECURE;
2152 break;
2153 default:
2154 opt = 0;
2155 break;
2156 }
2157
2158 if (l2cap_pi(sk)->role_switch)
2159 opt |= L2CAP_LM_MASTER;
2160
2161 if (l2cap_pi(sk)->force_reliable)
2162 opt |= L2CAP_LM_RELIABLE;
2163
2164 if (put_user(opt, (u32 __user *) optval))
2165 err = -EFAULT;
2166 break;
2167
2168 case L2CAP_CONNINFO:
2169 if (sk->sk_state != BT_CONNECTED &&
2170 !(sk->sk_state == BT_CONNECT2 &&
2171 bt_sk(sk)->defer_setup)) {
2172 err = -ENOTCONN;
2173 break;
2174 }
2175
2176 cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
2177 memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3);
2178
2179 len = min_t(unsigned int, len, sizeof(cinfo));
2180 if (copy_to_user(optval, (char *) &cinfo, len))
2181 err = -EFAULT;
2182
2183 break;
2184
2185 default:
2186 err = -ENOPROTOOPT;
2187 break;
2188 }
2189
2190 release_sock(sk);
2191 return err;
2192 }
2193
2194 static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
2195 {
2196 struct sock *sk = sock->sk;
2197 struct bt_security sec;
2198 int len, err = 0;
2199
2200 BT_DBG("sk %p", sk);
2201
2202 if (level == SOL_L2CAP)
2203 return l2cap_sock_getsockopt_old(sock, optname, optval, optlen);
2204
2205 if (level != SOL_BLUETOOTH)
2206 return -ENOPROTOOPT;
2207
2208 if (get_user(len, optlen))
2209 return -EFAULT;
2210
2211 lock_sock(sk);
2212
2213 switch (optname) {
2214 case BT_SECURITY:
2215 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM
2216 && sk->sk_type != SOCK_RAW) {
2217 err = -EINVAL;
2218 break;
2219 }
2220
2221 sec.level = l2cap_pi(sk)->sec_level;
2222
2223 len = min_t(unsigned int, len, sizeof(sec));
2224 if (copy_to_user(optval, (char *) &sec, len))
2225 err = -EFAULT;
2226
2227 break;
2228
2229 case BT_DEFER_SETUP:
2230 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
2231 err = -EINVAL;
2232 break;
2233 }
2234
2235 if (put_user(bt_sk(sk)->defer_setup, (u32 __user *) optval))
2236 err = -EFAULT;
2237
2238 break;
2239
2240 default:
2241 err = -ENOPROTOOPT;
2242 break;
2243 }
2244
2245 release_sock(sk);
2246 return err;
2247 }
2248
2249 static int l2cap_sock_shutdown(struct socket *sock, int how)
2250 {
2251 struct sock *sk = sock->sk;
2252 int err = 0;
2253
2254 BT_DBG("sock %p, sk %p", sock, sk);
2255
2256 if (!sk)
2257 return 0;
2258
2259 lock_sock(sk);
2260 if (!sk->sk_shutdown) {
2261 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
2262 err = __l2cap_wait_ack(sk);
2263
2264 sk->sk_shutdown = SHUTDOWN_MASK;
2265 l2cap_sock_clear_timer(sk);
2266 __l2cap_sock_close(sk, 0);
2267
2268 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
2269 err = bt_sock_wait_state(sk, BT_CLOSED,
2270 sk->sk_lingertime);
2271 }
2272
2273 if (!err && sk->sk_err)
2274 err = -sk->sk_err;
2275
2276 release_sock(sk);
2277 return err;
2278 }
2279
2280 static int l2cap_sock_release(struct socket *sock)
2281 {
2282 struct sock *sk = sock->sk;
2283 int err;
2284
2285 BT_DBG("sock %p, sk %p", sock, sk);
2286
2287 if (!sk)
2288 return 0;
2289
2290 err = l2cap_sock_shutdown(sock, 2);
2291
2292 sock_orphan(sk);
2293 l2cap_sock_kill(sk);
2294 return err;
2295 }
2296
2297 static void l2cap_chan_ready(struct sock *sk)
2298 {
2299 struct sock *parent = bt_sk(sk)->parent;
2300
2301 BT_DBG("sk %p, parent %p", sk, parent);
2302
2303 l2cap_pi(sk)->conf_state = 0;
2304 l2cap_sock_clear_timer(sk);
2305
2306 if (!parent) {
2307 /* Outgoing channel.
2308 * Wake up socket sleeping on connect.
2309 */
2310 sk->sk_state = BT_CONNECTED;
2311 sk->sk_state_change(sk);
2312 } else {
2313 /* Incoming channel.
2314 * Wake up socket sleeping on accept.
2315 */
2316 parent->sk_data_ready(parent, 0);
2317 }
2318 }
2319
2320 /* Copy frame to all raw sockets on that connection */
2321 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2322 {
2323 struct l2cap_chan_list *l = &conn->chan_list;
2324 struct sk_buff *nskb;
2325 struct sock *sk;
2326
2327 BT_DBG("conn %p", conn);
2328
2329 read_lock(&l->lock);
2330 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
2331 if (sk->sk_type != SOCK_RAW)
2332 continue;
2333
2334 /* Don't send frame to the socket it came from */
2335 if (skb->sk == sk)
2336 continue;
2337 nskb = skb_clone(skb, GFP_ATOMIC);
2338 if (!nskb)
2339 continue;
2340
2341 if (sock_queue_rcv_skb(sk, nskb))
2342 kfree_skb(nskb);
2343 }
2344 read_unlock(&l->lock);
2345 }
2346
2347 /* ---- L2CAP signalling commands ---- */
2348 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
2349 u8 code, u8 ident, u16 dlen, void *data)
2350 {
2351 struct sk_buff *skb, **frag;
2352 struct l2cap_cmd_hdr *cmd;
2353 struct l2cap_hdr *lh;
2354 int len, count;
2355
2356 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
2357 conn, code, ident, dlen);
2358
2359 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2360 count = min_t(unsigned int, conn->mtu, len);
2361
2362 skb = bt_skb_alloc(count, GFP_ATOMIC);
2363 if (!skb)
2364 return NULL;
2365
2366 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2367 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2368 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
2369
2370 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2371 cmd->code = code;
2372 cmd->ident = ident;
2373 cmd->len = cpu_to_le16(dlen);
2374
2375 if (dlen) {
2376 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2377 memcpy(skb_put(skb, count), data, count);
2378 data += count;
2379 }
2380
2381 len -= skb->len;
2382
2383 /* Continuation fragments (no L2CAP header) */
2384 frag = &skb_shinfo(skb)->frag_list;
2385 while (len) {
2386 count = min_t(unsigned int, conn->mtu, len);
2387
2388 *frag = bt_skb_alloc(count, GFP_ATOMIC);
2389 if (!*frag)
2390 goto fail;
2391
2392 memcpy(skb_put(*frag, count), data, count);
2393
2394 len -= count;
2395 data += count;
2396
2397 frag = &(*frag)->next;
2398 }
2399
2400 return skb;
2401
2402 fail:
2403 kfree_skb(skb);
2404 return NULL;
2405 }
2406
2407 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
2408 {
2409 struct l2cap_conf_opt *opt = *ptr;
2410 int len;
2411
2412 len = L2CAP_CONF_OPT_SIZE + opt->len;
2413 *ptr += len;
2414
2415 *type = opt->type;
2416 *olen = opt->len;
2417
2418 switch (opt->len) {
2419 case 1:
2420 *val = *((u8 *) opt->val);
2421 break;
2422
2423 case 2:
2424 *val = __le16_to_cpu(*((__le16 *) opt->val));
2425 break;
2426
2427 case 4:
2428 *val = __le32_to_cpu(*((__le32 *) opt->val));
2429 break;
2430
2431 default:
2432 *val = (unsigned long) opt->val;
2433 break;
2434 }
2435
2436 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
2437 return len;
2438 }
2439
2440 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2441 {
2442 struct l2cap_conf_opt *opt = *ptr;
2443
2444 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
2445
2446 opt->type = type;
2447 opt->len = len;
2448
2449 switch (len) {
2450 case 1:
2451 *((u8 *) opt->val) = val;
2452 break;
2453
2454 case 2:
2455 *((__le16 *) opt->val) = cpu_to_le16(val);
2456 break;
2457
2458 case 4:
2459 *((__le32 *) opt->val) = cpu_to_le32(val);
2460 break;
2461
2462 default:
2463 memcpy(opt->val, (void *) val, len);
2464 break;
2465 }
2466
2467 *ptr += L2CAP_CONF_OPT_SIZE + len;
2468 }
2469
2470 static void l2cap_ack_timeout(unsigned long arg)
2471 {
2472 struct sock *sk = (void *) arg;
2473
2474 bh_lock_sock(sk);
2475 l2cap_send_ack(l2cap_pi(sk));
2476 bh_unlock_sock(sk);
2477 }
2478
2479 static inline void l2cap_ertm_init(struct sock *sk)
2480 {
2481 l2cap_pi(sk)->expected_ack_seq = 0;
2482 l2cap_pi(sk)->unacked_frames = 0;
2483 l2cap_pi(sk)->buffer_seq = 0;
2484 l2cap_pi(sk)->num_acked = 0;
2485 l2cap_pi(sk)->frames_sent = 0;
2486
2487 setup_timer(&l2cap_pi(sk)->retrans_timer,
2488 l2cap_retrans_timeout, (unsigned long) sk);
2489 setup_timer(&l2cap_pi(sk)->monitor_timer,
2490 l2cap_monitor_timeout, (unsigned long) sk);
2491 setup_timer(&l2cap_pi(sk)->ack_timer,
2492 l2cap_ack_timeout, (unsigned long) sk);
2493
2494 __skb_queue_head_init(SREJ_QUEUE(sk));
2495 __skb_queue_head_init(BUSY_QUEUE(sk));
2496
2497 INIT_WORK(&l2cap_pi(sk)->busy_work, l2cap_busy_work);
2498
2499 sk->sk_backlog_rcv = l2cap_ertm_data_rcv;
2500 }
2501
2502 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2503 {
2504 switch (mode) {
2505 case L2CAP_MODE_STREAMING:
2506 case L2CAP_MODE_ERTM:
2507 if (l2cap_mode_supported(mode, remote_feat_mask))
2508 return mode;
2509 /* fall through */
2510 default:
2511 return L2CAP_MODE_BASIC;
2512 }
2513 }
2514
2515 static int l2cap_build_conf_req(struct sock *sk, void *data)
2516 {
2517 struct l2cap_pinfo *pi = l2cap_pi(sk);
2518 struct l2cap_conf_req *req = data;
2519 struct l2cap_conf_rfc rfc = { .mode = pi->mode };
2520 void *ptr = req->data;
2521
2522 BT_DBG("sk %p", sk);
2523
2524 if (pi->num_conf_req || pi->num_conf_rsp)
2525 goto done;
2526
2527 switch (pi->mode) {
2528 case L2CAP_MODE_STREAMING:
2529 case L2CAP_MODE_ERTM:
2530 if (pi->conf_state & L2CAP_CONF_STATE2_DEVICE)
2531 break;
2532
2533 /* fall through */
2534 default:
2535 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
2536 break;
2537 }
2538
2539 done:
2540 switch (pi->mode) {
2541 case L2CAP_MODE_BASIC:
2542 if (pi->imtu != L2CAP_DEFAULT_MTU)
2543 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
2544
2545 if (!(pi->conn->feat_mask & L2CAP_FEAT_ERTM) &&
2546 !(pi->conn->feat_mask & L2CAP_FEAT_STREAMING))
2547 break;
2548
2549 rfc.mode = L2CAP_MODE_BASIC;
2550 rfc.txwin_size = 0;
2551 rfc.max_transmit = 0;
2552 rfc.retrans_timeout = 0;
2553 rfc.monitor_timeout = 0;
2554 rfc.max_pdu_size = 0;
2555
2556 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2557 (unsigned long) &rfc);
2558 break;
2559
2560 case L2CAP_MODE_ERTM:
2561 rfc.mode = L2CAP_MODE_ERTM;
2562 rfc.txwin_size = pi->tx_win;
2563 rfc.max_transmit = pi->max_tx;
2564 rfc.retrans_timeout = 0;
2565 rfc.monitor_timeout = 0;
2566 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2567 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
2568 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
2569
2570 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2571 (unsigned long) &rfc);
2572
2573 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
2574 break;
2575
2576 if (pi->fcs == L2CAP_FCS_NONE ||
2577 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
2578 pi->fcs = L2CAP_FCS_NONE;
2579 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
2580 }
2581 break;
2582
2583 case L2CAP_MODE_STREAMING:
2584 rfc.mode = L2CAP_MODE_STREAMING;
2585 rfc.txwin_size = 0;
2586 rfc.max_transmit = 0;
2587 rfc.retrans_timeout = 0;
2588 rfc.monitor_timeout = 0;
2589 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2590 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
2591 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
2592
2593 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2594 (unsigned long) &rfc);
2595
2596 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
2597 break;
2598
2599 if (pi->fcs == L2CAP_FCS_NONE ||
2600 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
2601 pi->fcs = L2CAP_FCS_NONE;
2602 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
2603 }
2604 break;
2605 }
2606
2607 /* FIXME: Need actual value of the flush timeout */
2608 //if (flush_to != L2CAP_DEFAULT_FLUSH_TO)
2609 // l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, pi->flush_to);
2610
2611 req->dcid = cpu_to_le16(pi->dcid);
2612 req->flags = cpu_to_le16(0);
2613
2614 return ptr - data;
2615 }
2616
2617 static int l2cap_parse_conf_req(struct sock *sk, void *data)
2618 {
2619 struct l2cap_pinfo *pi = l2cap_pi(sk);
2620 struct l2cap_conf_rsp *rsp = data;
2621 void *ptr = rsp->data;
2622 void *req = pi->conf_req;
2623 int len = pi->conf_len;
2624 int type, hint, olen;
2625 unsigned long val;
2626 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2627 u16 mtu = L2CAP_DEFAULT_MTU;
2628 u16 result = L2CAP_CONF_SUCCESS;
2629
2630 BT_DBG("sk %p", sk);
2631
2632 while (len >= L2CAP_CONF_OPT_SIZE) {
2633 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2634
2635 hint = type & L2CAP_CONF_HINT;
2636 type &= L2CAP_CONF_MASK;
2637
2638 switch (type) {
2639 case L2CAP_CONF_MTU:
2640 mtu = val;
2641 break;
2642
2643 case L2CAP_CONF_FLUSH_TO:
2644 pi->flush_to = val;
2645 break;
2646
2647 case L2CAP_CONF_QOS:
2648 break;
2649
2650 case L2CAP_CONF_RFC:
2651 if (olen == sizeof(rfc))
2652 memcpy(&rfc, (void *) val, olen);
2653 break;
2654
2655 case L2CAP_CONF_FCS:
2656 if (val == L2CAP_FCS_NONE)
2657 pi->conf_state |= L2CAP_CONF_NO_FCS_RECV;
2658
2659 break;
2660
2661 default:
2662 if (hint)
2663 break;
2664
2665 result = L2CAP_CONF_UNKNOWN;
2666 *((u8 *) ptr++) = type;
2667 break;
2668 }
2669 }
2670
2671 if (pi->num_conf_rsp || pi->num_conf_req > 1)
2672 goto done;
2673
2674 switch (pi->mode) {
2675 case L2CAP_MODE_STREAMING:
2676 case L2CAP_MODE_ERTM:
2677 if (!(pi->conf_state & L2CAP_CONF_STATE2_DEVICE)) {
2678 pi->mode = l2cap_select_mode(rfc.mode,
2679 pi->conn->feat_mask);
2680 break;
2681 }
2682
2683 if (pi->mode != rfc.mode)
2684 return -ECONNREFUSED;
2685
2686 break;
2687 }
2688
2689 done:
2690 if (pi->mode != rfc.mode) {
2691 result = L2CAP_CONF_UNACCEPT;
2692 rfc.mode = pi->mode;
2693
2694 if (pi->num_conf_rsp == 1)
2695 return -ECONNREFUSED;
2696
2697 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2698 sizeof(rfc), (unsigned long) &rfc);
2699 }
2700
2701
2702 if (result == L2CAP_CONF_SUCCESS) {
2703 /* Configure output options and let the other side know
2704 * which ones we don't like. */
2705
2706 if (mtu < L2CAP_DEFAULT_MIN_MTU)
2707 result = L2CAP_CONF_UNACCEPT;
2708 else {
2709 pi->omtu = mtu;
2710 pi->conf_state |= L2CAP_CONF_MTU_DONE;
2711 }
2712 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
2713
2714 switch (rfc.mode) {
2715 case L2CAP_MODE_BASIC:
2716 pi->fcs = L2CAP_FCS_NONE;
2717 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2718 break;
2719
2720 case L2CAP_MODE_ERTM:
2721 pi->remote_tx_win = rfc.txwin_size;
2722 pi->remote_max_tx = rfc.max_transmit;
2723
2724 if (le16_to_cpu(rfc.max_pdu_size) > pi->conn->mtu - 10)
2725 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
2726
2727 pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
2728
2729 rfc.retrans_timeout =
2730 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO);
2731 rfc.monitor_timeout =
2732 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO);
2733
2734 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2735
2736 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2737 sizeof(rfc), (unsigned long) &rfc);
2738
2739 break;
2740
2741 case L2CAP_MODE_STREAMING:
2742 if (le16_to_cpu(rfc.max_pdu_size) > pi->conn->mtu - 10)
2743 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
2744
2745 pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
2746
2747 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2748
2749 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2750 sizeof(rfc), (unsigned long) &rfc);
2751
2752 break;
2753
2754 default:
2755 result = L2CAP_CONF_UNACCEPT;
2756
2757 memset(&rfc, 0, sizeof(rfc));
2758 rfc.mode = pi->mode;
2759 }
2760
2761 if (result == L2CAP_CONF_SUCCESS)
2762 pi->conf_state |= L2CAP_CONF_OUTPUT_DONE;
2763 }
2764 rsp->scid = cpu_to_le16(pi->dcid);
2765 rsp->result = cpu_to_le16(result);
2766 rsp->flags = cpu_to_le16(0x0000);
2767
2768 return ptr - data;
2769 }
2770
2771 static int l2cap_parse_conf_rsp(struct sock *sk, void *rsp, int len, void *data, u16 *result)
2772 {
2773 struct l2cap_pinfo *pi = l2cap_pi(sk);
2774 struct l2cap_conf_req *req = data;
2775 void *ptr = req->data;
2776 int type, olen;
2777 unsigned long val;
2778 struct l2cap_conf_rfc rfc;
2779
2780 BT_DBG("sk %p, rsp %p, len %d, req %p", sk, rsp, len, data);
2781
2782 while (len >= L2CAP_CONF_OPT_SIZE) {
2783 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2784
2785 switch (type) {
2786 case L2CAP_CONF_MTU:
2787 if (val < L2CAP_DEFAULT_MIN_MTU) {
2788 *result = L2CAP_CONF_UNACCEPT;
2789 pi->imtu = L2CAP_DEFAULT_MIN_MTU;
2790 } else
2791 pi->imtu = val;
2792 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
2793 break;
2794
2795 case L2CAP_CONF_FLUSH_TO:
2796 pi->flush_to = val;
2797 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
2798 2, pi->flush_to);
2799 break;
2800
2801 case L2CAP_CONF_RFC:
2802 if (olen == sizeof(rfc))
2803 memcpy(&rfc, (void *)val, olen);
2804
2805 if ((pi->conf_state & L2CAP_CONF_STATE2_DEVICE) &&
2806 rfc.mode != pi->mode)
2807 return -ECONNREFUSED;
2808
2809 pi->fcs = 0;
2810
2811 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2812 sizeof(rfc), (unsigned long) &rfc);
2813 break;
2814 }
2815 }
2816
2817 if (pi->mode == L2CAP_MODE_BASIC && pi->mode != rfc.mode)
2818 return -ECONNREFUSED;
2819
2820 pi->mode = rfc.mode;
2821
2822 if (*result == L2CAP_CONF_SUCCESS) {
2823 switch (rfc.mode) {
2824 case L2CAP_MODE_ERTM:
2825 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2826 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2827 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2828 break;
2829 case L2CAP_MODE_STREAMING:
2830 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2831 }
2832 }
2833
2834 req->dcid = cpu_to_le16(pi->dcid);
2835 req->flags = cpu_to_le16(0x0000);
2836
2837 return ptr - data;
2838 }
2839
2840 static int l2cap_build_conf_rsp(struct sock *sk, void *data, u16 result, u16 flags)
2841 {
2842 struct l2cap_conf_rsp *rsp = data;
2843 void *ptr = rsp->data;
2844
2845 BT_DBG("sk %p", sk);
2846
2847 rsp->scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2848 rsp->result = cpu_to_le16(result);
2849 rsp->flags = cpu_to_le16(flags);
2850
2851 return ptr - data;
2852 }
2853
2854 static void l2cap_conf_rfc_get(struct sock *sk, void *rsp, int len)
2855 {
2856 struct l2cap_pinfo *pi = l2cap_pi(sk);
2857 int type, olen;
2858 unsigned long val;
2859 struct l2cap_conf_rfc rfc;
2860
2861 BT_DBG("sk %p, rsp %p, len %d", sk, rsp, len);
2862
2863 if ((pi->mode != L2CAP_MODE_ERTM) && (pi->mode != L2CAP_MODE_STREAMING))
2864 return;
2865
2866 while (len >= L2CAP_CONF_OPT_SIZE) {
2867 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2868
2869 switch (type) {
2870 case L2CAP_CONF_RFC:
2871 if (olen == sizeof(rfc))
2872 memcpy(&rfc, (void *)val, olen);
2873 goto done;
2874 }
2875 }
2876
2877 done:
2878 switch (rfc.mode) {
2879 case L2CAP_MODE_ERTM:
2880 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2881 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2882 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2883 break;
2884 case L2CAP_MODE_STREAMING:
2885 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2886 }
2887 }
2888
2889 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2890 {
2891 struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data;
2892
2893 if (rej->reason != 0x0000)
2894 return 0;
2895
2896 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2897 cmd->ident == conn->info_ident) {
2898 del_timer(&conn->info_timer);
2899
2900 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2901 conn->info_ident = 0;
2902
2903 l2cap_conn_start(conn);
2904 }
2905
2906 return 0;
2907 }
2908
2909 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2910 {
2911 struct l2cap_chan_list *list = &conn->chan_list;
2912 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2913 struct l2cap_conn_rsp rsp;
2914 struct sock *parent, *uninitialized_var(sk);
2915 int result, status = L2CAP_CS_NO_INFO;
2916
2917 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2918 __le16 psm = req->psm;
2919
2920 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
2921
2922 /* Check if we have socket listening on psm */
2923 parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src);
2924 if (!parent) {
2925 result = L2CAP_CR_BAD_PSM;
2926 goto sendresp;
2927 }
2928
2929 /* Check if the ACL is secure enough (if not SDP) */
2930 if (psm != cpu_to_le16(0x0001) &&
2931 !hci_conn_check_link_mode(conn->hcon)) {
2932 conn->disc_reason = 0x05;
2933 result = L2CAP_CR_SEC_BLOCK;
2934 goto response;
2935 }
2936
2937 result = L2CAP_CR_NO_MEM;
2938
2939 /* Check for backlog size */
2940 if (sk_acceptq_is_full(parent)) {
2941 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2942 goto response;
2943 }
2944
2945 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
2946 if (!sk)
2947 goto response;
2948
2949 write_lock_bh(&list->lock);
2950
2951 /* Check if we already have channel with that dcid */
2952 if (__l2cap_get_chan_by_dcid(list, scid)) {
2953 write_unlock_bh(&list->lock);
2954 sock_set_flag(sk, SOCK_ZAPPED);
2955 l2cap_sock_kill(sk);
2956 goto response;
2957 }
2958
2959 hci_conn_hold(conn->hcon);
2960
2961 l2cap_sock_init(sk, parent);
2962 bacpy(&bt_sk(sk)->src, conn->src);
2963 bacpy(&bt_sk(sk)->dst, conn->dst);
2964 l2cap_pi(sk)->psm = psm;
2965 l2cap_pi(sk)->dcid = scid;
2966
2967 __l2cap_chan_add(conn, sk, parent);
2968 dcid = l2cap_pi(sk)->scid;
2969
2970 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
2971
2972 l2cap_pi(sk)->ident = cmd->ident;
2973
2974 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2975 if (l2cap_check_security(sk)) {
2976 if (bt_sk(sk)->defer_setup) {
2977 sk->sk_state = BT_CONNECT2;
2978 result = L2CAP_CR_PEND;
2979 status = L2CAP_CS_AUTHOR_PEND;
2980 parent->sk_data_ready(parent, 0);
2981 } else {
2982 sk->sk_state = BT_CONFIG;
2983 result = L2CAP_CR_SUCCESS;
2984 status = L2CAP_CS_NO_INFO;
2985 }
2986 } else {
2987 sk->sk_state = BT_CONNECT2;
2988 result = L2CAP_CR_PEND;
2989 status = L2CAP_CS_AUTHEN_PEND;
2990 }
2991 } else {
2992 sk->sk_state = BT_CONNECT2;
2993 result = L2CAP_CR_PEND;
2994 status = L2CAP_CS_NO_INFO;
2995 }
2996
2997 write_unlock_bh(&list->lock);
2998
2999 response:
3000 bh_unlock_sock(parent);
3001
3002 sendresp:
3003 rsp.scid = cpu_to_le16(scid);
3004 rsp.dcid = cpu_to_le16(dcid);
3005 rsp.result = cpu_to_le16(result);
3006 rsp.status = cpu_to_le16(status);
3007 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
3008
3009 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
3010 struct l2cap_info_req info;
3011 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3012
3013 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
3014 conn->info_ident = l2cap_get_ident(conn);
3015
3016 mod_timer(&conn->info_timer, jiffies +
3017 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
3018
3019 l2cap_send_cmd(conn, conn->info_ident,
3020 L2CAP_INFO_REQ, sizeof(info), &info);
3021 }
3022
3023 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT) &&
3024 result == L2CAP_CR_SUCCESS) {
3025 u8 buf[128];
3026 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
3027 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3028 l2cap_build_conf_req(sk, buf), buf);
3029 l2cap_pi(sk)->num_conf_req++;
3030 }
3031
3032 return 0;
3033 }
3034
3035 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3036 {
3037 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
3038 u16 scid, dcid, result, status;
3039 struct sock *sk;
3040 u8 req[128];
3041
3042 scid = __le16_to_cpu(rsp->scid);
3043 dcid = __le16_to_cpu(rsp->dcid);
3044 result = __le16_to_cpu(rsp->result);
3045 status = __le16_to_cpu(rsp->status);
3046
3047 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
3048
3049 if (scid) {
3050 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
3051 if (!sk)
3052 return -EFAULT;
3053 } else {
3054 sk = l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident);
3055 if (!sk)
3056 return -EFAULT;
3057 }
3058
3059 switch (result) {
3060 case L2CAP_CR_SUCCESS:
3061 sk->sk_state = BT_CONFIG;
3062 l2cap_pi(sk)->ident = 0;
3063 l2cap_pi(sk)->dcid = dcid;
3064 l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_CONNECT_PEND;
3065
3066 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)
3067 break;
3068
3069 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
3070
3071 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3072 l2cap_build_conf_req(sk, req), req);
3073 l2cap_pi(sk)->num_conf_req++;
3074 break;
3075
3076 case L2CAP_CR_PEND:
3077 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
3078 break;
3079
3080 default:
3081 l2cap_chan_del(sk, ECONNREFUSED);
3082 break;
3083 }
3084
3085 bh_unlock_sock(sk);
3086 return 0;
3087 }
3088
3089 static inline void set_default_fcs(struct l2cap_pinfo *pi)
3090 {
3091 /* FCS is enabled only in ERTM or streaming mode, if one or both
3092 * sides request it.
3093 */
3094 if (pi->mode != L2CAP_MODE_ERTM && pi->mode != L2CAP_MODE_STREAMING)
3095 pi->fcs = L2CAP_FCS_NONE;
3096 else if (!(pi->conf_state & L2CAP_CONF_NO_FCS_RECV))
3097 pi->fcs = L2CAP_FCS_CRC16;
3098 }
3099
3100 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3101 {
3102 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
3103 u16 dcid, flags;
3104 u8 rsp[64];
3105 struct sock *sk;
3106 int len;
3107
3108 dcid = __le16_to_cpu(req->dcid);
3109 flags = __le16_to_cpu(req->flags);
3110
3111 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
3112
3113 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
3114 if (!sk)
3115 return -ENOENT;
3116
3117 if (sk->sk_state == BT_DISCONN)
3118 goto unlock;
3119
3120 /* Reject if config buffer is too small. */
3121 len = cmd_len - sizeof(*req);
3122 if (l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
3123 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3124 l2cap_build_conf_rsp(sk, rsp,
3125 L2CAP_CONF_REJECT, flags), rsp);
3126 goto unlock;
3127 }
3128
3129 /* Store config. */
3130 memcpy(l2cap_pi(sk)->conf_req + l2cap_pi(sk)->conf_len, req->data, len);
3131 l2cap_pi(sk)->conf_len += len;
3132
3133 if (flags & 0x0001) {
3134 /* Incomplete config. Send empty response. */
3135 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3136 l2cap_build_conf_rsp(sk, rsp,
3137 L2CAP_CONF_SUCCESS, 0x0001), rsp);
3138 goto unlock;
3139 }
3140
3141 /* Complete config. */
3142 len = l2cap_parse_conf_req(sk, rsp);
3143 if (len < 0) {
3144 l2cap_send_disconn_req(conn, sk, ECONNRESET);
3145 goto unlock;
3146 }
3147
3148 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
3149 l2cap_pi(sk)->num_conf_rsp++;
3150
3151 /* Reset config buffer. */
3152 l2cap_pi(sk)->conf_len = 0;
3153
3154 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE))
3155 goto unlock;
3156
3157 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
3158 set_default_fcs(l2cap_pi(sk));
3159
3160 sk->sk_state = BT_CONNECTED;
3161
3162 l2cap_pi(sk)->next_tx_seq = 0;
3163 l2cap_pi(sk)->expected_tx_seq = 0;
3164 __skb_queue_head_init(TX_QUEUE(sk));
3165 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
3166 l2cap_ertm_init(sk);
3167
3168 l2cap_chan_ready(sk);
3169 goto unlock;
3170 }
3171
3172 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) {
3173 u8 buf[64];
3174 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3175 l2cap_build_conf_req(sk, buf), buf);
3176 l2cap_pi(sk)->num_conf_req++;
3177 }
3178
3179 unlock:
3180 bh_unlock_sock(sk);
3181 return 0;
3182 }
3183
3184 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3185 {
3186 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
3187 u16 scid, flags, result;
3188 struct sock *sk;
3189 int len = cmd->len - sizeof(*rsp);
3190
3191 scid = __le16_to_cpu(rsp->scid);
3192 flags = __le16_to_cpu(rsp->flags);
3193 result = __le16_to_cpu(rsp->result);
3194
3195 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
3196 scid, flags, result);
3197
3198 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
3199 if (!sk)
3200 return 0;
3201
3202 switch (result) {
3203 case L2CAP_CONF_SUCCESS:
3204 l2cap_conf_rfc_get(sk, rsp->data, len);
3205 break;
3206
3207 case L2CAP_CONF_UNACCEPT:
3208 if (l2cap_pi(sk)->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
3209 char req[64];
3210
3211 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
3212 l2cap_send_disconn_req(conn, sk, ECONNRESET);
3213 goto done;
3214 }
3215
3216 /* throw out any old stored conf requests */
3217 result = L2CAP_CONF_SUCCESS;
3218 len = l2cap_parse_conf_rsp(sk, rsp->data,
3219 len, req, &result);
3220 if (len < 0) {
3221 l2cap_send_disconn_req(conn, sk, ECONNRESET);
3222 goto done;
3223 }
3224
3225 l2cap_send_cmd(conn, l2cap_get_ident(conn),
3226 L2CAP_CONF_REQ, len, req);
3227 l2cap_pi(sk)->num_conf_req++;
3228 if (result != L2CAP_CONF_SUCCESS)
3229 goto done;
3230 break;
3231 }
3232
3233 default:
3234 sk->sk_err = ECONNRESET;
3235 l2cap_sock_set_timer(sk, HZ * 5);
3236 l2cap_send_disconn_req(conn, sk, ECONNRESET);
3237 goto done;
3238 }
3239
3240 if (flags & 0x01)
3241 goto done;
3242
3243 l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE;
3244
3245 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) {
3246 set_default_fcs(l2cap_pi(sk));
3247
3248 sk->sk_state = BT_CONNECTED;
3249 l2cap_pi(sk)->next_tx_seq = 0;
3250 l2cap_pi(sk)->expected_tx_seq = 0;
3251 __skb_queue_head_init(TX_QUEUE(sk));
3252 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
3253 l2cap_ertm_init(sk);
3254
3255 l2cap_chan_ready(sk);
3256 }
3257
3258 done:
3259 bh_unlock_sock(sk);
3260 return 0;
3261 }
3262
3263 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3264 {
3265 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
3266 struct l2cap_disconn_rsp rsp;
3267 u16 dcid, scid;
3268 struct sock *sk;
3269
3270 scid = __le16_to_cpu(req->scid);
3271 dcid = __le16_to_cpu(req->dcid);
3272
3273 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
3274
3275 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
3276 if (!sk)
3277 return 0;
3278
3279 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
3280 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
3281 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
3282
3283 sk->sk_shutdown = SHUTDOWN_MASK;
3284
3285 l2cap_chan_del(sk, ECONNRESET);
3286 bh_unlock_sock(sk);
3287
3288 l2cap_sock_kill(sk);
3289 return 0;
3290 }
3291
3292 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3293 {
3294 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
3295 u16 dcid, scid;
3296 struct sock *sk;
3297
3298 scid = __le16_to_cpu(rsp->scid);
3299 dcid = __le16_to_cpu(rsp->dcid);
3300
3301 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
3302
3303 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
3304 if (!sk)
3305 return 0;
3306
3307 l2cap_chan_del(sk, 0);
3308 bh_unlock_sock(sk);
3309
3310 l2cap_sock_kill(sk);
3311 return 0;
3312 }
3313
3314 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3315 {
3316 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
3317 u16 type;
3318
3319 type = __le16_to_cpu(req->type);
3320
3321 BT_DBG("type 0x%4.4x", type);
3322
3323 if (type == L2CAP_IT_FEAT_MASK) {
3324 u8 buf[8];
3325 u32 feat_mask = l2cap_feat_mask;
3326 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3327 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3328 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3329 if (!disable_ertm)
3330 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
3331 | L2CAP_FEAT_FCS;
3332 put_unaligned_le32(feat_mask, rsp->data);
3333 l2cap_send_cmd(conn, cmd->ident,
3334 L2CAP_INFO_RSP, sizeof(buf), buf);
3335 } else if (type == L2CAP_IT_FIXED_CHAN) {
3336 u8 buf[12];
3337 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3338 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3339 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3340 memcpy(buf + 4, l2cap_fixed_chan, 8);
3341 l2cap_send_cmd(conn, cmd->ident,
3342 L2CAP_INFO_RSP, sizeof(buf), buf);
3343 } else {
3344 struct l2cap_info_rsp rsp;
3345 rsp.type = cpu_to_le16(type);
3346 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
3347 l2cap_send_cmd(conn, cmd->ident,
3348 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
3349 }
3350
3351 return 0;
3352 }
3353
3354 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3355 {
3356 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
3357 u16 type, result;
3358
3359 type = __le16_to_cpu(rsp->type);
3360 result = __le16_to_cpu(rsp->result);
3361
3362 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
3363
3364 del_timer(&conn->info_timer);
3365
3366 if (result != L2CAP_IR_SUCCESS) {
3367 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3368 conn->info_ident = 0;
3369
3370 l2cap_conn_start(conn);
3371
3372 return 0;
3373 }
3374
3375 if (type == L2CAP_IT_FEAT_MASK) {
3376 conn->feat_mask = get_unaligned_le32(rsp->data);
3377
3378 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
3379 struct l2cap_info_req req;
3380 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3381
3382 conn->info_ident = l2cap_get_ident(conn);
3383
3384 l2cap_send_cmd(conn, conn->info_ident,
3385 L2CAP_INFO_REQ, sizeof(req), &req);
3386 } else {
3387 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3388 conn->info_ident = 0;
3389
3390 l2cap_conn_start(conn);
3391 }
3392 } else if (type == L2CAP_IT_FIXED_CHAN) {
3393 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3394 conn->info_ident = 0;
3395
3396 l2cap_conn_start(conn);
3397 }
3398
3399 return 0;
3400 }
3401
3402 static inline void l2cap_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
3403 {
3404 u8 *data = skb->data;
3405 int len = skb->len;
3406 struct l2cap_cmd_hdr cmd;
3407 int err = 0;
3408
3409 l2cap_raw_recv(conn, skb);
3410
3411 while (len >= L2CAP_CMD_HDR_SIZE) {
3412 u16 cmd_len;
3413 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
3414 data += L2CAP_CMD_HDR_SIZE;
3415 len -= L2CAP_CMD_HDR_SIZE;
3416
3417 cmd_len = le16_to_cpu(cmd.len);
3418
3419 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
3420
3421 if (cmd_len > len || !cmd.ident) {
3422 BT_DBG("corrupted command");
3423 break;
3424 }
3425
3426 switch (cmd.code) {
3427 case L2CAP_COMMAND_REJ:
3428 l2cap_command_rej(conn, &cmd, data);
3429 break;
3430
3431 case L2CAP_CONN_REQ:
3432 err = l2cap_connect_req(conn, &cmd, data);
3433 break;
3434
3435 case L2CAP_CONN_RSP:
3436 err = l2cap_connect_rsp(conn, &cmd, data);
3437 break;
3438
3439 case L2CAP_CONF_REQ:
3440 err = l2cap_config_req(conn, &cmd, cmd_len, data);
3441 break;
3442
3443 case L2CAP_CONF_RSP:
3444 err = l2cap_config_rsp(conn, &cmd, data);
3445 break;
3446
3447 case L2CAP_DISCONN_REQ:
3448 err = l2cap_disconnect_req(conn, &cmd, data);
3449 break;
3450
3451 case L2CAP_DISCONN_RSP:
3452 err = l2cap_disconnect_rsp(conn, &cmd, data);
3453 break;
3454
3455 case L2CAP_ECHO_REQ:
3456 l2cap_send_cmd(conn, cmd.ident, L2CAP_ECHO_RSP, cmd_len, data);
3457 break;
3458
3459 case L2CAP_ECHO_RSP:
3460 break;
3461
3462 case L2CAP_INFO_REQ:
3463 err = l2cap_information_req(conn, &cmd, data);
3464 break;
3465
3466 case L2CAP_INFO_RSP:
3467 err = l2cap_information_rsp(conn, &cmd, data);
3468 break;
3469
3470 default:
3471 BT_ERR("Unknown signaling command 0x%2.2x", cmd.code);
3472 err = -EINVAL;
3473 break;
3474 }
3475
3476 if (err) {
3477 struct l2cap_cmd_rej rej;
3478 BT_DBG("error %d", err);
3479
3480 /* FIXME: Map err to a valid reason */
3481 rej.reason = cpu_to_le16(0);
3482 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3483 }
3484
3485 data += cmd_len;
3486 len -= cmd_len;
3487 }
3488
3489 kfree_skb(skb);
3490 }
3491
3492 static int l2cap_check_fcs(struct l2cap_pinfo *pi, struct sk_buff *skb)
3493 {
3494 u16 our_fcs, rcv_fcs;
3495 int hdr_size = L2CAP_HDR_SIZE + 2;
3496
3497 if (pi->fcs == L2CAP_FCS_CRC16) {
3498 skb_trim(skb, skb->len - 2);
3499 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
3500 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
3501
3502 if (our_fcs != rcv_fcs)
3503 return -EBADMSG;
3504 }
3505 return 0;
3506 }
3507
3508 static inline void l2cap_send_i_or_rr_or_rnr(struct sock *sk)
3509 {
3510 struct l2cap_pinfo *pi = l2cap_pi(sk);
3511 u16 control = 0;
3512
3513 pi->frames_sent = 0;
3514
3515 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3516
3517 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
3518 control |= L2CAP_SUPER_RCV_NOT_READY;
3519 l2cap_send_sframe(pi, control);
3520 pi->conn_state |= L2CAP_CONN_RNR_SENT;
3521 }
3522
3523 if (pi->conn_state & L2CAP_CONN_REMOTE_BUSY)
3524 l2cap_retransmit_frames(sk);
3525
3526 l2cap_ertm_send(sk);
3527
3528 if (!(pi->conn_state & L2CAP_CONN_LOCAL_BUSY) &&
3529 pi->frames_sent == 0) {
3530 control |= L2CAP_SUPER_RCV_READY;
3531 l2cap_send_sframe(pi, control);
3532 }
3533 }
3534
3535 static int l2cap_add_to_srej_queue(struct sock *sk, struct sk_buff *skb, u8 tx_seq, u8 sar)
3536 {
3537 struct sk_buff *next_skb;
3538 struct l2cap_pinfo *pi = l2cap_pi(sk);
3539 int tx_seq_offset, next_tx_seq_offset;
3540
3541 bt_cb(skb)->tx_seq = tx_seq;
3542 bt_cb(skb)->sar = sar;
3543
3544 next_skb = skb_peek(SREJ_QUEUE(sk));
3545 if (!next_skb) {
3546 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3547 return 0;
3548 }
3549
3550 tx_seq_offset = (tx_seq - pi->buffer_seq) % 64;
3551 if (tx_seq_offset < 0)
3552 tx_seq_offset += 64;
3553
3554 do {
3555 if (bt_cb(next_skb)->tx_seq == tx_seq)
3556 return -EINVAL;
3557
3558 next_tx_seq_offset = (bt_cb(next_skb)->tx_seq -
3559 pi->buffer_seq) % 64;
3560 if (next_tx_seq_offset < 0)
3561 next_tx_seq_offset += 64;
3562
3563 if (next_tx_seq_offset > tx_seq_offset) {
3564 __skb_queue_before(SREJ_QUEUE(sk), next_skb, skb);
3565 return 0;
3566 }
3567
3568 if (skb_queue_is_last(SREJ_QUEUE(sk), next_skb))
3569 break;
3570
3571 } while ((next_skb = skb_queue_next(SREJ_QUEUE(sk), next_skb)));
3572
3573 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3574
3575 return 0;
3576 }
3577
3578 static int l2cap_ertm_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control)
3579 {
3580 struct l2cap_pinfo *pi = l2cap_pi(sk);
3581 struct sk_buff *_skb;
3582 int err;
3583
3584 switch (control & L2CAP_CTRL_SAR) {
3585 case L2CAP_SDU_UNSEGMENTED:
3586 if (pi->conn_state & L2CAP_CONN_SAR_SDU)
3587 goto drop;
3588
3589 err = sock_queue_rcv_skb(sk, skb);
3590 if (!err)
3591 return err;
3592
3593 break;
3594
3595 case L2CAP_SDU_START:
3596 if (pi->conn_state & L2CAP_CONN_SAR_SDU)
3597 goto drop;
3598
3599 pi->sdu_len = get_unaligned_le16(skb->data);
3600
3601 if (pi->sdu_len > pi->imtu)
3602 goto disconnect;
3603
3604 pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC);
3605 if (!pi->sdu)
3606 return -ENOMEM;
3607
3608 /* pull sdu_len bytes only after alloc, because of Local Busy
3609 * condition we have to be sure that this will be executed
3610 * only once, i.e., when alloc does not fail */
3611 skb_pull(skb, 2);
3612
3613 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3614
3615 pi->conn_state |= L2CAP_CONN_SAR_SDU;
3616 pi->partial_sdu_len = skb->len;
3617 break;
3618
3619 case L2CAP_SDU_CONTINUE:
3620 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3621 goto disconnect;
3622
3623 if (!pi->sdu)
3624 goto disconnect;
3625
3626 pi->partial_sdu_len += skb->len;
3627 if (pi->partial_sdu_len > pi->sdu_len)
3628 goto drop;
3629
3630 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3631
3632 break;
3633
3634 case L2CAP_SDU_END:
3635 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3636 goto disconnect;
3637
3638 if (!pi->sdu)
3639 goto disconnect;
3640
3641 if (!(pi->conn_state & L2CAP_CONN_SAR_RETRY)) {
3642 pi->partial_sdu_len += skb->len;
3643
3644 if (pi->partial_sdu_len > pi->imtu)
3645 goto drop;
3646
3647 if (pi->partial_sdu_len != pi->sdu_len)
3648 goto drop;
3649
3650 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3651 }
3652
3653 _skb = skb_clone(pi->sdu, GFP_ATOMIC);
3654 if (!_skb) {
3655 pi->conn_state |= L2CAP_CONN_SAR_RETRY;
3656 return -ENOMEM;
3657 }
3658
3659 err = sock_queue_rcv_skb(sk, _skb);
3660 if (err < 0) {
3661 kfree_skb(_skb);
3662 pi->conn_state |= L2CAP_CONN_SAR_RETRY;
3663 return err;
3664 }
3665
3666 pi->conn_state &= ~L2CAP_CONN_SAR_RETRY;
3667 pi->conn_state &= ~L2CAP_CONN_SAR_SDU;
3668
3669 kfree_skb(pi->sdu);
3670 break;
3671 }
3672
3673 kfree_skb(skb);
3674 return 0;
3675
3676 drop:
3677 kfree_skb(pi->sdu);
3678 pi->sdu = NULL;
3679
3680 disconnect:
3681 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
3682 kfree_skb(skb);
3683 return 0;
3684 }
3685
3686 static int l2cap_try_push_rx_skb(struct sock *sk)
3687 {
3688 struct l2cap_pinfo *pi = l2cap_pi(sk);
3689 struct sk_buff *skb;
3690 u16 control;
3691 int err;
3692
3693 while ((skb = skb_dequeue(BUSY_QUEUE(sk)))) {
3694 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3695 err = l2cap_ertm_reassembly_sdu(sk, skb, control);
3696 if (err < 0) {
3697 skb_queue_head(BUSY_QUEUE(sk), skb);
3698 return -EBUSY;
3699 }
3700
3701 pi->buffer_seq = (pi->buffer_seq + 1) % 64;
3702 }
3703
3704 if (!(pi->conn_state & L2CAP_CONN_RNR_SENT))
3705 goto done;
3706
3707 control = pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3708 control |= L2CAP_SUPER_RCV_READY | L2CAP_CTRL_POLL;
3709 l2cap_send_sframe(pi, control);
3710 l2cap_pi(sk)->retry_count = 1;
3711
3712 del_timer(&pi->retrans_timer);
3713 __mod_monitor_timer();
3714
3715 l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F;
3716
3717 done:
3718 pi->conn_state &= ~L2CAP_CONN_LOCAL_BUSY;
3719 pi->conn_state &= ~L2CAP_CONN_RNR_SENT;
3720
3721 BT_DBG("sk %p, Exit local busy", sk);
3722
3723 return 0;
3724 }
3725
3726 static void l2cap_busy_work(struct work_struct *work)
3727 {
3728 DECLARE_WAITQUEUE(wait, current);
3729 struct l2cap_pinfo *pi =
3730 container_of(work, struct l2cap_pinfo, busy_work);
3731 struct sock *sk = (struct sock *)pi;
3732 int n_tries = 0, timeo = HZ/5, err;
3733 struct sk_buff *skb;
3734
3735 lock_sock(sk);
3736
3737 add_wait_queue(sk_sleep(sk), &wait);
3738 while ((skb = skb_peek(BUSY_QUEUE(sk)))) {
3739 set_current_state(TASK_INTERRUPTIBLE);
3740
3741 if (n_tries++ > L2CAP_LOCAL_BUSY_TRIES) {
3742 err = -EBUSY;
3743 l2cap_send_disconn_req(pi->conn, sk, EBUSY);
3744 break;
3745 }
3746
3747 if (!timeo)
3748 timeo = HZ/5;
3749
3750 if (signal_pending(current)) {
3751 err = sock_intr_errno(timeo);
3752 break;
3753 }
3754
3755 release_sock(sk);
3756 timeo = schedule_timeout(timeo);
3757 lock_sock(sk);
3758
3759 err = sock_error(sk);
3760 if (err)
3761 break;
3762
3763 if (l2cap_try_push_rx_skb(sk) == 0)
3764 break;
3765 }
3766
3767 set_current_state(TASK_RUNNING);
3768 remove_wait_queue(sk_sleep(sk), &wait);
3769
3770 release_sock(sk);
3771 }
3772
3773 static int l2cap_push_rx_skb(struct sock *sk, struct sk_buff *skb, u16 control)
3774 {
3775 struct l2cap_pinfo *pi = l2cap_pi(sk);
3776 int sctrl, err;
3777
3778 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
3779 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
3780 __skb_queue_tail(BUSY_QUEUE(sk), skb);
3781 return l2cap_try_push_rx_skb(sk);
3782
3783
3784 }
3785
3786 err = l2cap_ertm_reassembly_sdu(sk, skb, control);
3787 if (err >= 0) {
3788 pi->buffer_seq = (pi->buffer_seq + 1) % 64;
3789 return err;
3790 }
3791
3792 /* Busy Condition */
3793 BT_DBG("sk %p, Enter local busy", sk);
3794
3795 pi->conn_state |= L2CAP_CONN_LOCAL_BUSY;
3796 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
3797 __skb_queue_tail(BUSY_QUEUE(sk), skb);
3798
3799 sctrl = pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3800 sctrl |= L2CAP_SUPER_RCV_NOT_READY;
3801 l2cap_send_sframe(pi, sctrl);
3802
3803 pi->conn_state |= L2CAP_CONN_RNR_SENT;
3804
3805 del_timer(&pi->ack_timer);
3806
3807 queue_work(_busy_wq, &pi->busy_work);
3808
3809 return err;
3810 }
3811
3812 static int l2cap_streaming_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control)
3813 {
3814 struct l2cap_pinfo *pi = l2cap_pi(sk);
3815 struct sk_buff *_skb;
3816 int err = -EINVAL;
3817
3818 /*
3819 * TODO: We have to notify the userland if some data is lost with the
3820 * Streaming Mode.
3821 */
3822
3823 switch (control & L2CAP_CTRL_SAR) {
3824 case L2CAP_SDU_UNSEGMENTED:
3825 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3826 kfree_skb(pi->sdu);
3827 break;
3828 }
3829
3830 err = sock_queue_rcv_skb(sk, skb);
3831 if (!err)
3832 return 0;
3833
3834 break;
3835
3836 case L2CAP_SDU_START:
3837 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3838 kfree_skb(pi->sdu);
3839 break;
3840 }
3841
3842 pi->sdu_len = get_unaligned_le16(skb->data);
3843 skb_pull(skb, 2);
3844
3845 if (pi->sdu_len > pi->imtu) {
3846 err = -EMSGSIZE;
3847 break;
3848 }
3849
3850 pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC);
3851 if (!pi->sdu) {
3852 err = -ENOMEM;
3853 break;
3854 }
3855
3856 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3857
3858 pi->conn_state |= L2CAP_CONN_SAR_SDU;
3859 pi->partial_sdu_len = skb->len;
3860 err = 0;
3861 break;
3862
3863 case L2CAP_SDU_CONTINUE:
3864 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3865 break;
3866
3867 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3868
3869 pi->partial_sdu_len += skb->len;
3870 if (pi->partial_sdu_len > pi->sdu_len)
3871 kfree_skb(pi->sdu);
3872 else
3873 err = 0;
3874
3875 break;
3876
3877 case L2CAP_SDU_END:
3878 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3879 break;
3880
3881 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3882
3883 pi->conn_state &= ~L2CAP_CONN_SAR_SDU;
3884 pi->partial_sdu_len += skb->len;
3885
3886 if (pi->partial_sdu_len > pi->imtu)
3887 goto drop;
3888
3889 if (pi->partial_sdu_len == pi->sdu_len) {
3890 _skb = skb_clone(pi->sdu, GFP_ATOMIC);
3891 err = sock_queue_rcv_skb(sk, _skb);
3892 if (err < 0)
3893 kfree_skb(_skb);
3894 }
3895 err = 0;
3896
3897 drop:
3898 kfree_skb(pi->sdu);
3899 break;
3900 }
3901
3902 kfree_skb(skb);
3903 return err;
3904 }
3905
3906 static void l2cap_check_srej_gap(struct sock *sk, u8 tx_seq)
3907 {
3908 struct sk_buff *skb;
3909 u16 control;
3910
3911 while ((skb = skb_peek(SREJ_QUEUE(sk)))) {
3912 if (bt_cb(skb)->tx_seq != tx_seq)
3913 break;
3914
3915 skb = skb_dequeue(SREJ_QUEUE(sk));
3916 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3917 l2cap_ertm_reassembly_sdu(sk, skb, control);
3918 l2cap_pi(sk)->buffer_seq_srej =
3919 (l2cap_pi(sk)->buffer_seq_srej + 1) % 64;
3920 tx_seq = (tx_seq + 1) % 64;
3921 }
3922 }
3923
3924 static void l2cap_resend_srejframe(struct sock *sk, u8 tx_seq)
3925 {
3926 struct l2cap_pinfo *pi = l2cap_pi(sk);
3927 struct srej_list *l, *tmp;
3928 u16 control;
3929
3930 list_for_each_entry_safe(l, tmp, SREJ_LIST(sk), list) {
3931 if (l->tx_seq == tx_seq) {
3932 list_del(&l->list);
3933 kfree(l);
3934 return;
3935 }
3936 control = L2CAP_SUPER_SELECT_REJECT;
3937 control |= l->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3938 l2cap_send_sframe(pi, control);
3939 list_del(&l->list);
3940 list_add_tail(&l->list, SREJ_LIST(sk));
3941 }
3942 }
3943
3944 static void l2cap_send_srejframe(struct sock *sk, u8 tx_seq)
3945 {
3946 struct l2cap_pinfo *pi = l2cap_pi(sk);
3947 struct srej_list *new;
3948 u16 control;
3949
3950 while (tx_seq != pi->expected_tx_seq) {
3951 control = L2CAP_SUPER_SELECT_REJECT;
3952 control |= pi->expected_tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3953 l2cap_send_sframe(pi, control);
3954
3955 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3956 new->tx_seq = pi->expected_tx_seq;
3957 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3958 list_add_tail(&new->list, SREJ_LIST(sk));
3959 }
3960 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3961 }
3962
3963 static inline int l2cap_data_channel_iframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
3964 {
3965 struct l2cap_pinfo *pi = l2cap_pi(sk);
3966 u8 tx_seq = __get_txseq(rx_control);
3967 u8 req_seq = __get_reqseq(rx_control);
3968 u8 sar = rx_control >> L2CAP_CTRL_SAR_SHIFT;
3969 int tx_seq_offset, expected_tx_seq_offset;
3970 int num_to_ack = (pi->tx_win/6) + 1;
3971 int err = 0;
3972
3973 BT_DBG("sk %p len %d tx_seq %d rx_control 0x%4.4x", sk, skb->len, tx_seq,
3974 rx_control);
3975
3976 if (L2CAP_CTRL_FINAL & rx_control &&
3977 l2cap_pi(sk)->conn_state & L2CAP_CONN_WAIT_F) {
3978 del_timer(&pi->monitor_timer);
3979 if (pi->unacked_frames > 0)
3980 __mod_retrans_timer();
3981 pi->conn_state &= ~L2CAP_CONN_WAIT_F;
3982 }
3983
3984 pi->expected_ack_seq = req_seq;
3985 l2cap_drop_acked_frames(sk);
3986
3987 if (tx_seq == pi->expected_tx_seq)
3988 goto expected;
3989
3990 tx_seq_offset = (tx_seq - pi->buffer_seq) % 64;
3991 if (tx_seq_offset < 0)
3992 tx_seq_offset += 64;
3993
3994 /* invalid tx_seq */
3995 if (tx_seq_offset >= pi->tx_win) {
3996 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
3997 goto drop;
3998 }
3999
4000 if (pi->conn_state == L2CAP_CONN_LOCAL_BUSY)
4001 goto drop;
4002
4003 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
4004 struct srej_list *first;
4005
4006 first = list_first_entry(SREJ_LIST(sk),
4007 struct srej_list, list);
4008 if (tx_seq == first->tx_seq) {
4009 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
4010 l2cap_check_srej_gap(sk, tx_seq);
4011
4012 list_del(&first->list);
4013 kfree(first);
4014
4015 if (list_empty(SREJ_LIST(sk))) {
4016 pi->buffer_seq = pi->buffer_seq_srej;
4017 pi->conn_state &= ~L2CAP_CONN_SREJ_SENT;
4018 l2cap_send_ack(pi);
4019 BT_DBG("sk %p, Exit SREJ_SENT", sk);
4020 }
4021 } else {
4022 struct srej_list *l;
4023
4024 /* duplicated tx_seq */
4025 if (l2cap_add_to_srej_queue(sk, skb, tx_seq, sar) < 0)
4026 goto drop;
4027
4028 list_for_each_entry(l, SREJ_LIST(sk), list) {
4029 if (l->tx_seq == tx_seq) {
4030 l2cap_resend_srejframe(sk, tx_seq);
4031 return 0;
4032 }
4033 }
4034 l2cap_send_srejframe(sk, tx_seq);
4035 }
4036 } else {
4037 expected_tx_seq_offset =
4038 (pi->expected_tx_seq - pi->buffer_seq) % 64;
4039 if (expected_tx_seq_offset < 0)
4040 expected_tx_seq_offset += 64;
4041
4042 /* duplicated tx_seq */
4043 if (tx_seq_offset < expected_tx_seq_offset)
4044 goto drop;
4045
4046 pi->conn_state |= L2CAP_CONN_SREJ_SENT;
4047
4048 BT_DBG("sk %p, Enter SREJ", sk);
4049
4050 INIT_LIST_HEAD(SREJ_LIST(sk));
4051 pi->buffer_seq_srej = pi->buffer_seq;
4052
4053 __skb_queue_head_init(SREJ_QUEUE(sk));
4054 __skb_queue_head_init(BUSY_QUEUE(sk));
4055 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
4056
4057 pi->conn_state |= L2CAP_CONN_SEND_PBIT;
4058
4059 l2cap_send_srejframe(sk, tx_seq);
4060
4061 del_timer(&pi->ack_timer);
4062 }
4063 return 0;
4064
4065 expected:
4066 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
4067
4068 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
4069 bt_cb(skb)->tx_seq = tx_seq;
4070 bt_cb(skb)->sar = sar;
4071 __skb_queue_tail(SREJ_QUEUE(sk), skb);
4072 return 0;
4073 }
4074
4075 err = l2cap_push_rx_skb(sk, skb, rx_control);
4076 if (err < 0)
4077 return 0;
4078
4079 if (rx_control & L2CAP_CTRL_FINAL) {
4080 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
4081 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
4082 else
4083 l2cap_retransmit_frames(sk);
4084 }
4085
4086 __mod_ack_timer();
4087
4088 pi->num_acked = (pi->num_acked + 1) % num_to_ack;
4089 if (pi->num_acked == num_to_ack - 1)
4090 l2cap_send_ack(pi);
4091
4092 return 0;
4093
4094 drop:
4095 kfree_skb(skb);
4096 return 0;
4097 }
4098
4099 static inline void l2cap_data_channel_rrframe(struct sock *sk, u16 rx_control)
4100 {
4101 struct l2cap_pinfo *pi = l2cap_pi(sk);
4102
4103 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, __get_reqseq(rx_control),
4104 rx_control);
4105
4106 pi->expected_ack_seq = __get_reqseq(rx_control);
4107 l2cap_drop_acked_frames(sk);
4108
4109 if (rx_control & L2CAP_CTRL_POLL) {
4110 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
4111 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
4112 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
4113 (pi->unacked_frames > 0))
4114 __mod_retrans_timer();
4115
4116 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
4117 l2cap_send_srejtail(sk);
4118 } else {
4119 l2cap_send_i_or_rr_or_rnr(sk);
4120 }
4121
4122 } else if (rx_control & L2CAP_CTRL_FINAL) {
4123 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
4124
4125 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
4126 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
4127 else
4128 l2cap_retransmit_frames(sk);
4129
4130 } else {
4131 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
4132 (pi->unacked_frames > 0))
4133 __mod_retrans_timer();
4134
4135 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
4136 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
4137 l2cap_send_ack(pi);
4138 } else {
4139 l2cap_ertm_send(sk);
4140 }
4141 }
4142 }
4143
4144 static inline void l2cap_data_channel_rejframe(struct sock *sk, u16 rx_control)
4145 {
4146 struct l2cap_pinfo *pi = l2cap_pi(sk);
4147 u8 tx_seq = __get_reqseq(rx_control);
4148
4149 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, tx_seq, rx_control);
4150
4151 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
4152
4153 pi->expected_ack_seq = tx_seq;
4154 l2cap_drop_acked_frames(sk);
4155
4156 if (rx_control & L2CAP_CTRL_FINAL) {
4157 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
4158 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
4159 else
4160 l2cap_retransmit_frames(sk);
4161 } else {
4162 l2cap_retransmit_frames(sk);
4163
4164 if (pi->conn_state & L2CAP_CONN_WAIT_F)
4165 pi->conn_state |= L2CAP_CONN_REJ_ACT;
4166 }
4167 }
4168 static inline void l2cap_data_channel_srejframe(struct sock *sk, u16 rx_control)
4169 {
4170 struct l2cap_pinfo *pi = l2cap_pi(sk);
4171 u8 tx_seq = __get_reqseq(rx_control);
4172
4173 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, tx_seq, rx_control);
4174
4175 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
4176
4177 if (rx_control & L2CAP_CTRL_POLL) {
4178 pi->expected_ack_seq = tx_seq;
4179 l2cap_drop_acked_frames(sk);
4180
4181 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
4182 l2cap_retransmit_one_frame(sk, tx_seq);
4183
4184 l2cap_ertm_send(sk);
4185
4186 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
4187 pi->srej_save_reqseq = tx_seq;
4188 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
4189 }
4190 } else if (rx_control & L2CAP_CTRL_FINAL) {
4191 if ((pi->conn_state & L2CAP_CONN_SREJ_ACT) &&
4192 pi->srej_save_reqseq == tx_seq)
4193 pi->conn_state &= ~L2CAP_CONN_SREJ_ACT;
4194 else
4195 l2cap_retransmit_one_frame(sk, tx_seq);
4196 } else {
4197 l2cap_retransmit_one_frame(sk, tx_seq);
4198 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
4199 pi->srej_save_reqseq = tx_seq;
4200 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
4201 }
4202 }
4203 }
4204
4205 static inline void l2cap_data_channel_rnrframe(struct sock *sk, u16 rx_control)
4206 {
4207 struct l2cap_pinfo *pi = l2cap_pi(sk);
4208 u8 tx_seq = __get_reqseq(rx_control);
4209
4210 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, tx_seq, rx_control);
4211
4212 pi->conn_state |= L2CAP_CONN_REMOTE_BUSY;
4213 pi->expected_ack_seq = tx_seq;
4214 l2cap_drop_acked_frames(sk);
4215
4216 if (rx_control & L2CAP_CTRL_POLL)
4217 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
4218
4219 if (!(pi->conn_state & L2CAP_CONN_SREJ_SENT)) {
4220 del_timer(&pi->retrans_timer);
4221 if (rx_control & L2CAP_CTRL_POLL)
4222 l2cap_send_rr_or_rnr(pi, L2CAP_CTRL_FINAL);
4223 return;
4224 }
4225
4226 if (rx_control & L2CAP_CTRL_POLL)
4227 l2cap_send_srejtail(sk);
4228 else
4229 l2cap_send_sframe(pi, L2CAP_SUPER_RCV_READY);
4230 }
4231
4232 static inline int l2cap_data_channel_sframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
4233 {
4234 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len);
4235
4236 if (L2CAP_CTRL_FINAL & rx_control &&
4237 l2cap_pi(sk)->conn_state & L2CAP_CONN_WAIT_F) {
4238 del_timer(&l2cap_pi(sk)->monitor_timer);
4239 if (l2cap_pi(sk)->unacked_frames > 0)
4240 __mod_retrans_timer();
4241 l2cap_pi(sk)->conn_state &= ~L2CAP_CONN_WAIT_F;
4242 }
4243
4244 switch (rx_control & L2CAP_CTRL_SUPERVISE) {
4245 case L2CAP_SUPER_RCV_READY:
4246 l2cap_data_channel_rrframe(sk, rx_control);
4247 break;
4248
4249 case L2CAP_SUPER_REJECT:
4250 l2cap_data_channel_rejframe(sk, rx_control);
4251 break;
4252
4253 case L2CAP_SUPER_SELECT_REJECT:
4254 l2cap_data_channel_srejframe(sk, rx_control);
4255 break;
4256
4257 case L2CAP_SUPER_RCV_NOT_READY:
4258 l2cap_data_channel_rnrframe(sk, rx_control);
4259 break;
4260 }
4261
4262 kfree_skb(skb);
4263 return 0;
4264 }
4265
4266 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb)
4267 {
4268 struct l2cap_pinfo *pi = l2cap_pi(sk);
4269 u16 control;
4270 u8 req_seq;
4271 int len, next_tx_seq_offset, req_seq_offset;
4272
4273 control = get_unaligned_le16(skb->data);
4274 skb_pull(skb, 2);
4275 len = skb->len;
4276
4277 /*
4278 * We can just drop the corrupted I-frame here.
4279 * Receiver will miss it and start proper recovery
4280 * procedures and ask retransmission.
4281 */
4282 if (l2cap_check_fcs(pi, skb))
4283 goto drop;
4284
4285 if (__is_sar_start(control) && __is_iframe(control))
4286 len -= 2;
4287
4288 if (pi->fcs == L2CAP_FCS_CRC16)
4289 len -= 2;
4290
4291 if (len > pi->mps) {
4292 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
4293 goto drop;
4294 }
4295
4296 req_seq = __get_reqseq(control);
4297 req_seq_offset = (req_seq - pi->expected_ack_seq) % 64;
4298 if (req_seq_offset < 0)
4299 req_seq_offset += 64;
4300
4301 next_tx_seq_offset =
4302 (pi->next_tx_seq - pi->expected_ack_seq) % 64;
4303 if (next_tx_seq_offset < 0)
4304 next_tx_seq_offset += 64;
4305
4306 /* check for invalid req-seq */
4307 if (req_seq_offset > next_tx_seq_offset) {
4308 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
4309 goto drop;
4310 }
4311
4312 if (__is_iframe(control)) {
4313 if (len < 0) {
4314 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
4315 goto drop;
4316 }
4317
4318 l2cap_data_channel_iframe(sk, control, skb);
4319 } else {
4320 if (len != 0) {
4321 BT_ERR("%d", len);
4322 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
4323 goto drop;
4324 }
4325
4326 l2cap_data_channel_sframe(sk, control, skb);
4327 }
4328
4329 return 0;
4330
4331 drop:
4332 kfree_skb(skb);
4333 return 0;
4334 }
4335
4336 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
4337 {
4338 struct sock *sk;
4339 struct l2cap_pinfo *pi;
4340 u16 control;
4341 u8 tx_seq;
4342 int len;
4343
4344 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
4345 if (!sk) {
4346 BT_DBG("unknown cid 0x%4.4x", cid);
4347 goto drop;
4348 }
4349
4350 pi = l2cap_pi(sk);
4351
4352 BT_DBG("sk %p, len %d", sk, skb->len);
4353
4354 if (sk->sk_state != BT_CONNECTED)
4355 goto drop;
4356
4357 switch (pi->mode) {
4358 case L2CAP_MODE_BASIC:
4359 /* If socket recv buffers overflows we drop data here
4360 * which is *bad* because L2CAP has to be reliable.
4361 * But we don't have any other choice. L2CAP doesn't
4362 * provide flow control mechanism. */
4363
4364 if (pi->imtu < skb->len)
4365 goto drop;
4366
4367 if (!sock_queue_rcv_skb(sk, skb))
4368 goto done;
4369 break;
4370
4371 case L2CAP_MODE_ERTM:
4372 if (!sock_owned_by_user(sk)) {
4373 l2cap_ertm_data_rcv(sk, skb);
4374 } else {
4375 if (sk_add_backlog(sk, skb))
4376 goto drop;
4377 }
4378
4379 goto done;
4380
4381 case L2CAP_MODE_STREAMING:
4382 control = get_unaligned_le16(skb->data);
4383 skb_pull(skb, 2);
4384 len = skb->len;
4385
4386 if (l2cap_check_fcs(pi, skb))
4387 goto drop;
4388
4389 if (__is_sar_start(control))
4390 len -= 2;
4391
4392 if (pi->fcs == L2CAP_FCS_CRC16)
4393 len -= 2;
4394
4395 if (len > pi->mps || len < 0 || __is_sframe(control))
4396 goto drop;
4397
4398 tx_seq = __get_txseq(control);
4399
4400 if (pi->expected_tx_seq == tx_seq)
4401 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
4402 else
4403 pi->expected_tx_seq = (tx_seq + 1) % 64;
4404
4405 l2cap_streaming_reassembly_sdu(sk, skb, control);
4406
4407 goto done;
4408
4409 default:
4410 BT_DBG("sk %p: bad mode 0x%2.2x", sk, pi->mode);
4411 break;
4412 }
4413
4414 drop:
4415 kfree_skb(skb);
4416
4417 done:
4418 if (sk)
4419 bh_unlock_sock(sk);
4420
4421 return 0;
4422 }
4423
4424 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
4425 {
4426 struct sock *sk;
4427
4428 sk = l2cap_get_sock_by_psm(0, psm, conn->src);
4429 if (!sk)
4430 goto drop;
4431
4432 BT_DBG("sk %p, len %d", sk, skb->len);
4433
4434 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
4435 goto drop;
4436
4437 if (l2cap_pi(sk)->imtu < skb->len)
4438 goto drop;
4439
4440 if (!sock_queue_rcv_skb(sk, skb))
4441 goto done;
4442
4443 drop:
4444 kfree_skb(skb);
4445
4446 done:
4447 if (sk)
4448 bh_unlock_sock(sk);
4449 return 0;
4450 }
4451
4452 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
4453 {
4454 struct l2cap_hdr *lh = (void *) skb->data;
4455 u16 cid, len;
4456 __le16 psm;
4457
4458 skb_pull(skb, L2CAP_HDR_SIZE);
4459 cid = __le16_to_cpu(lh->cid);
4460 len = __le16_to_cpu(lh->len);
4461
4462 if (len != skb->len) {
4463 kfree_skb(skb);
4464 return;
4465 }
4466
4467 BT_DBG("len %d, cid 0x%4.4x", len, cid);
4468
4469 switch (cid) {
4470 case L2CAP_CID_SIGNALING:
4471 l2cap_sig_channel(conn, skb);
4472 break;
4473
4474 case L2CAP_CID_CONN_LESS:
4475 psm = get_unaligned_le16(skb->data);
4476 skb_pull(skb, 2);
4477 l2cap_conless_channel(conn, psm, skb);
4478 break;
4479
4480 default:
4481 l2cap_data_channel(conn, cid, skb);
4482 break;
4483 }
4484 }
4485
4486 /* ---- L2CAP interface with lower layer (HCI) ---- */
4487
4488 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
4489 {
4490 int exact = 0, lm1 = 0, lm2 = 0;
4491 register struct sock *sk;
4492 struct hlist_node *node;
4493
4494 if (type != ACL_LINK)
4495 return -EINVAL;
4496
4497 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
4498
4499 /* Find listening sockets and check their link_mode */
4500 read_lock(&l2cap_sk_list.lock);
4501 sk_for_each(sk, node, &l2cap_sk_list.head) {
4502 if (sk->sk_state != BT_LISTEN)
4503 continue;
4504
4505 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
4506 lm1 |= HCI_LM_ACCEPT;
4507 if (l2cap_pi(sk)->role_switch)
4508 lm1 |= HCI_LM_MASTER;
4509 exact++;
4510 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
4511 lm2 |= HCI_LM_ACCEPT;
4512 if (l2cap_pi(sk)->role_switch)
4513 lm2 |= HCI_LM_MASTER;
4514 }
4515 }
4516 read_unlock(&l2cap_sk_list.lock);
4517
4518 return exact ? lm1 : lm2;
4519 }
4520
4521 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
4522 {
4523 struct l2cap_conn *conn;
4524
4525 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
4526
4527 if (hcon->type != ACL_LINK)
4528 return -EINVAL;
4529
4530 if (!status) {
4531 conn = l2cap_conn_add(hcon, status);
4532 if (conn)
4533 l2cap_conn_ready(conn);
4534 } else
4535 l2cap_conn_del(hcon, bt_err(status));
4536
4537 return 0;
4538 }
4539
4540 static int l2cap_disconn_ind(struct hci_conn *hcon)
4541 {
4542 struct l2cap_conn *conn = hcon->l2cap_data;
4543
4544 BT_DBG("hcon %p", hcon);
4545
4546 if (hcon->type != ACL_LINK || !conn)
4547 return 0x13;
4548
4549 return conn->disc_reason;
4550 }
4551
4552 static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
4553 {
4554 BT_DBG("hcon %p reason %d", hcon, reason);
4555
4556 if (hcon->type != ACL_LINK)
4557 return -EINVAL;
4558
4559 l2cap_conn_del(hcon, bt_err(reason));
4560
4561 return 0;
4562 }
4563
4564 static inline void l2cap_check_encryption(struct sock *sk, u8 encrypt)
4565 {
4566 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM)
4567 return;
4568
4569 if (encrypt == 0x00) {
4570 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM) {
4571 l2cap_sock_clear_timer(sk);
4572 l2cap_sock_set_timer(sk, HZ * 5);
4573 } else if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
4574 __l2cap_sock_close(sk, ECONNREFUSED);
4575 } else {
4576 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM)
4577 l2cap_sock_clear_timer(sk);
4578 }
4579 }
4580
4581 static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
4582 {
4583 struct l2cap_chan_list *l;
4584 struct l2cap_conn *conn = hcon->l2cap_data;
4585 struct sock *sk;
4586
4587 if (!conn)
4588 return 0;
4589
4590 l = &conn->chan_list;
4591
4592 BT_DBG("conn %p", conn);
4593
4594 read_lock(&l->lock);
4595
4596 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
4597 bh_lock_sock(sk);
4598
4599 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND) {
4600 bh_unlock_sock(sk);
4601 continue;
4602 }
4603
4604 if (!status && (sk->sk_state == BT_CONNECTED ||
4605 sk->sk_state == BT_CONFIG)) {
4606 l2cap_check_encryption(sk, encrypt);
4607 bh_unlock_sock(sk);
4608 continue;
4609 }
4610
4611 if (sk->sk_state == BT_CONNECT) {
4612 if (!status) {
4613 struct l2cap_conn_req req;
4614 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
4615 req.psm = l2cap_pi(sk)->psm;
4616
4617 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
4618 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
4619
4620 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
4621 L2CAP_CONN_REQ, sizeof(req), &req);
4622 } else {
4623 l2cap_sock_clear_timer(sk);
4624 l2cap_sock_set_timer(sk, HZ / 10);
4625 }
4626 } else if (sk->sk_state == BT_CONNECT2) {
4627 struct l2cap_conn_rsp rsp;
4628 __u16 result;
4629
4630 if (!status) {
4631 sk->sk_state = BT_CONFIG;
4632 result = L2CAP_CR_SUCCESS;
4633 } else {
4634 sk->sk_state = BT_DISCONN;
4635 l2cap_sock_set_timer(sk, HZ / 10);
4636 result = L2CAP_CR_SEC_BLOCK;
4637 }
4638
4639 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
4640 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
4641 rsp.result = cpu_to_le16(result);
4642 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4643 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
4644 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
4645 }
4646
4647 bh_unlock_sock(sk);
4648 }
4649
4650 read_unlock(&l->lock);
4651
4652 return 0;
4653 }
4654
4655 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
4656 {
4657 struct l2cap_conn *conn = hcon->l2cap_data;
4658
4659 if (!conn && !(conn = l2cap_conn_add(hcon, 0)))
4660 goto drop;
4661
4662 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
4663
4664 if (flags & ACL_START) {
4665 struct l2cap_hdr *hdr;
4666 struct sock *sk;
4667 u16 cid;
4668 int len;
4669
4670 if (conn->rx_len) {
4671 BT_ERR("Unexpected start frame (len %d)", skb->len);
4672 kfree_skb(conn->rx_skb);
4673 conn->rx_skb = NULL;
4674 conn->rx_len = 0;
4675 l2cap_conn_unreliable(conn, ECOMM);
4676 }
4677
4678 if (skb->len < 2) {
4679 BT_ERR("Frame is too short (len %d)", skb->len);
4680 l2cap_conn_unreliable(conn, ECOMM);
4681 goto drop;
4682 }
4683
4684 hdr = (struct l2cap_hdr *) skb->data;
4685 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
4686 cid = __le16_to_cpu(hdr->cid);
4687
4688 if (len == skb->len) {
4689 /* Complete frame received */
4690 l2cap_recv_frame(conn, skb);
4691 return 0;
4692 }
4693
4694 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
4695
4696 if (skb->len > len) {
4697 BT_ERR("Frame is too long (len %d, expected len %d)",
4698 skb->len, len);
4699 l2cap_conn_unreliable(conn, ECOMM);
4700 goto drop;
4701 }
4702
4703 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
4704
4705 if (sk && l2cap_pi(sk)->imtu < len - L2CAP_HDR_SIZE) {
4706 BT_ERR("Frame exceeding recv MTU (len %d, MTU %d)",
4707 len, l2cap_pi(sk)->imtu);
4708 bh_unlock_sock(sk);
4709 l2cap_conn_unreliable(conn, ECOMM);
4710 goto drop;
4711 }
4712
4713 if (sk)
4714 bh_unlock_sock(sk);
4715
4716 /* Allocate skb for the complete frame (with header) */
4717 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
4718 if (!conn->rx_skb)
4719 goto drop;
4720
4721 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4722 skb->len);
4723 conn->rx_len = len - skb->len;
4724 } else {
4725 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
4726
4727 if (!conn->rx_len) {
4728 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
4729 l2cap_conn_unreliable(conn, ECOMM);
4730 goto drop;
4731 }
4732
4733 if (skb->len > conn->rx_len) {
4734 BT_ERR("Fragment is too long (len %d, expected %d)",
4735 skb->len, conn->rx_len);
4736 kfree_skb(conn->rx_skb);
4737 conn->rx_skb = NULL;
4738 conn->rx_len = 0;
4739 l2cap_conn_unreliable(conn, ECOMM);
4740 goto drop;
4741 }
4742
4743 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4744 skb->len);
4745 conn->rx_len -= skb->len;
4746
4747 if (!conn->rx_len) {
4748 /* Complete frame received */
4749 l2cap_recv_frame(conn, conn->rx_skb);
4750 conn->rx_skb = NULL;
4751 }
4752 }
4753
4754 drop:
4755 kfree_skb(skb);
4756 return 0;
4757 }
4758
4759 static int l2cap_debugfs_show(struct seq_file *f, void *p)
4760 {
4761 struct sock *sk;
4762 struct hlist_node *node;
4763
4764 read_lock_bh(&l2cap_sk_list.lock);
4765
4766 sk_for_each(sk, node, &l2cap_sk_list.head) {
4767 struct l2cap_pinfo *pi = l2cap_pi(sk);
4768
4769 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d\n",
4770 batostr(&bt_sk(sk)->src),
4771 batostr(&bt_sk(sk)->dst),
4772 sk->sk_state, __le16_to_cpu(pi->psm),
4773 pi->scid, pi->dcid,
4774 pi->imtu, pi->omtu, pi->sec_level);
4775 }
4776
4777 read_unlock_bh(&l2cap_sk_list.lock);
4778
4779 return 0;
4780 }
4781
4782 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
4783 {
4784 return single_open(file, l2cap_debugfs_show, inode->i_private);
4785 }
4786
4787 static const struct file_operations l2cap_debugfs_fops = {
4788 .open = l2cap_debugfs_open,
4789 .read = seq_read,
4790 .llseek = seq_lseek,
4791 .release = single_release,
4792 };
4793
4794 static struct dentry *l2cap_debugfs;
4795
4796 static const struct proto_ops l2cap_sock_ops = {
4797 .family = PF_BLUETOOTH,
4798 .owner = THIS_MODULE,
4799 .release = l2cap_sock_release,
4800 .bind = l2cap_sock_bind,
4801 .connect = l2cap_sock_connect,
4802 .listen = l2cap_sock_listen,
4803 .accept = l2cap_sock_accept,
4804 .getname = l2cap_sock_getname,
4805 .sendmsg = l2cap_sock_sendmsg,
4806 .recvmsg = l2cap_sock_recvmsg,
4807 .poll = bt_sock_poll,
4808 .ioctl = bt_sock_ioctl,
4809 .mmap = sock_no_mmap,
4810 .socketpair = sock_no_socketpair,
4811 .shutdown = l2cap_sock_shutdown,
4812 .setsockopt = l2cap_sock_setsockopt,
4813 .getsockopt = l2cap_sock_getsockopt
4814 };
4815
4816 static const struct net_proto_family l2cap_sock_family_ops = {
4817 .family = PF_BLUETOOTH,
4818 .owner = THIS_MODULE,
4819 .create = l2cap_sock_create,
4820 };
4821
4822 static struct hci_proto l2cap_hci_proto = {
4823 .name = "L2CAP",
4824 .id = HCI_PROTO_L2CAP,
4825 .connect_ind = l2cap_connect_ind,
4826 .connect_cfm = l2cap_connect_cfm,
4827 .disconn_ind = l2cap_disconn_ind,
4828 .disconn_cfm = l2cap_disconn_cfm,
4829 .security_cfm = l2cap_security_cfm,
4830 .recv_acldata = l2cap_recv_acldata
4831 };
4832
4833 static int __init l2cap_init(void)
4834 {
4835 int err;
4836
4837 err = proto_register(&l2cap_proto, 0);
4838 if (err < 0)
4839 return err;
4840
4841 _busy_wq = create_singlethread_workqueue("l2cap");
4842 if (!_busy_wq)
4843 goto error;
4844
4845 err = bt_sock_register(BTPROTO_L2CAP, &l2cap_sock_family_ops);
4846 if (err < 0) {
4847 BT_ERR("L2CAP socket registration failed");
4848 goto error;
4849 }
4850
4851 err = hci_register_proto(&l2cap_hci_proto);
4852 if (err < 0) {
4853 BT_ERR("L2CAP protocol registration failed");
4854 bt_sock_unregister(BTPROTO_L2CAP);
4855 goto error;
4856 }
4857
4858 if (bt_debugfs) {
4859 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
4860 bt_debugfs, NULL, &l2cap_debugfs_fops);
4861 if (!l2cap_debugfs)
4862 BT_ERR("Failed to create L2CAP debug file");
4863 }
4864
4865 BT_INFO("L2CAP ver %s", VERSION);
4866 BT_INFO("L2CAP socket layer initialized");
4867
4868 return 0;
4869
4870 error:
4871 proto_unregister(&l2cap_proto);
4872 return err;
4873 }
4874
4875 static void __exit l2cap_exit(void)
4876 {
4877 debugfs_remove(l2cap_debugfs);
4878
4879 flush_workqueue(_busy_wq);
4880 destroy_workqueue(_busy_wq);
4881
4882 if (bt_sock_unregister(BTPROTO_L2CAP) < 0)
4883 BT_ERR("L2CAP socket unregistration failed");
4884
4885 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
4886 BT_ERR("L2CAP protocol unregistration failed");
4887
4888 proto_unregister(&l2cap_proto);
4889 }
4890
4891 void l2cap_load(void)
4892 {
4893 /* Dummy function to trigger automatic L2CAP module loading by
4894 * other modules that use L2CAP sockets but don't use any other
4895 * symbols from it. */
4896 }
4897 EXPORT_SYMBOL(l2cap_load);
4898
4899 module_init(l2cap_init);
4900 module_exit(l2cap_exit);
4901
4902 module_param(disable_ertm, bool, 0644);
4903 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
4904
4905 MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
4906 MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION);
4907 MODULE_VERSION(VERSION);
4908 MODULE_LICENSE("GPL");
4909 MODULE_ALIAS("bt-proto-0");
This page took 0.20849 seconds and 6 git commands to generate.