Bluetooth: l2cap: fix misuse of logical operation in place of bitop
[deliverable/linux.git] / net / bluetooth / l2cap.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6
7 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License version 2 as
11 published by the Free Software Foundation;
12
13 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
14 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
16 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
17 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
18 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
19 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
20 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21
22 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
23 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
24 SOFTWARE IS DISCLAIMED.
25 */
26
27 /* Bluetooth L2CAP core and sockets. */
28
29 #include <linux/module.h>
30
31 #include <linux/types.h>
32 #include <linux/capability.h>
33 #include <linux/errno.h>
34 #include <linux/kernel.h>
35 #include <linux/sched.h>
36 #include <linux/slab.h>
37 #include <linux/poll.h>
38 #include <linux/fcntl.h>
39 #include <linux/init.h>
40 #include <linux/interrupt.h>
41 #include <linux/socket.h>
42 #include <linux/skbuff.h>
43 #include <linux/list.h>
44 #include <linux/device.h>
45 #include <linux/debugfs.h>
46 #include <linux/seq_file.h>
47 #include <linux/uaccess.h>
48 #include <linux/crc16.h>
49 #include <net/sock.h>
50
51 #include <asm/system.h>
52 #include <asm/unaligned.h>
53
54 #include <net/bluetooth/bluetooth.h>
55 #include <net/bluetooth/hci_core.h>
56 #include <net/bluetooth/l2cap.h>
57
58 #define VERSION "2.15"
59
60 static int disable_ertm;
61
62 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
63 static u8 l2cap_fixed_chan[8] = { 0x02, };
64
65 static const struct proto_ops l2cap_sock_ops;
66
67 static struct workqueue_struct *_busy_wq;
68
69 static struct bt_sock_list l2cap_sk_list = {
70 .lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock)
71 };
72
73 static void l2cap_busy_work(struct work_struct *work);
74
75 static void __l2cap_sock_close(struct sock *sk, int reason);
76 static void l2cap_sock_close(struct sock *sk);
77 static void l2cap_sock_kill(struct sock *sk);
78
79 static int l2cap_build_conf_req(struct sock *sk, void *data);
80 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
81 u8 code, u8 ident, u16 dlen, void *data);
82
83 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb);
84
85 /* ---- L2CAP timers ---- */
86 static void l2cap_sock_set_timer(struct sock *sk, long timeout)
87 {
88 BT_DBG("sk %p state %d timeout %ld", sk, sk->sk_state, timeout);
89 sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout);
90 }
91
92 static void l2cap_sock_clear_timer(struct sock *sk)
93 {
94 BT_DBG("sock %p state %d", sk, sk->sk_state);
95 sk_stop_timer(sk, &sk->sk_timer);
96 }
97
98 static void l2cap_sock_timeout(unsigned long arg)
99 {
100 struct sock *sk = (struct sock *) arg;
101 int reason;
102
103 BT_DBG("sock %p state %d", sk, sk->sk_state);
104
105 bh_lock_sock(sk);
106
107 if (sock_owned_by_user(sk)) {
108 /* sk is owned by user. Try again later */
109 l2cap_sock_set_timer(sk, HZ / 5);
110 bh_unlock_sock(sk);
111 sock_put(sk);
112 return;
113 }
114
115 if (sk->sk_state == BT_CONNECTED || sk->sk_state == BT_CONFIG)
116 reason = ECONNREFUSED;
117 else if (sk->sk_state == BT_CONNECT &&
118 l2cap_pi(sk)->sec_level != BT_SECURITY_SDP)
119 reason = ECONNREFUSED;
120 else
121 reason = ETIMEDOUT;
122
123 __l2cap_sock_close(sk, reason);
124
125 bh_unlock_sock(sk);
126
127 l2cap_sock_kill(sk);
128 sock_put(sk);
129 }
130
131 /* ---- L2CAP channels ---- */
132 static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid)
133 {
134 struct sock *s;
135 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
136 if (l2cap_pi(s)->dcid == cid)
137 break;
138 }
139 return s;
140 }
141
142 static struct sock *__l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
143 {
144 struct sock *s;
145 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
146 if (l2cap_pi(s)->scid == cid)
147 break;
148 }
149 return s;
150 }
151
152 /* Find channel with given SCID.
153 * Returns locked socket */
154 static inline struct sock *l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
155 {
156 struct sock *s;
157 read_lock(&l->lock);
158 s = __l2cap_get_chan_by_scid(l, cid);
159 if (s)
160 bh_lock_sock(s);
161 read_unlock(&l->lock);
162 return s;
163 }
164
165 static struct sock *__l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
166 {
167 struct sock *s;
168 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
169 if (l2cap_pi(s)->ident == ident)
170 break;
171 }
172 return s;
173 }
174
175 static inline struct sock *l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
176 {
177 struct sock *s;
178 read_lock(&l->lock);
179 s = __l2cap_get_chan_by_ident(l, ident);
180 if (s)
181 bh_lock_sock(s);
182 read_unlock(&l->lock);
183 return s;
184 }
185
186 static u16 l2cap_alloc_cid(struct l2cap_chan_list *l)
187 {
188 u16 cid = L2CAP_CID_DYN_START;
189
190 for (; cid < L2CAP_CID_DYN_END; cid++) {
191 if (!__l2cap_get_chan_by_scid(l, cid))
192 return cid;
193 }
194
195 return 0;
196 }
197
198 static inline void __l2cap_chan_link(struct l2cap_chan_list *l, struct sock *sk)
199 {
200 sock_hold(sk);
201
202 if (l->head)
203 l2cap_pi(l->head)->prev_c = sk;
204
205 l2cap_pi(sk)->next_c = l->head;
206 l2cap_pi(sk)->prev_c = NULL;
207 l->head = sk;
208 }
209
210 static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk)
211 {
212 struct sock *next = l2cap_pi(sk)->next_c, *prev = l2cap_pi(sk)->prev_c;
213
214 write_lock_bh(&l->lock);
215 if (sk == l->head)
216 l->head = next;
217
218 if (next)
219 l2cap_pi(next)->prev_c = prev;
220 if (prev)
221 l2cap_pi(prev)->next_c = next;
222 write_unlock_bh(&l->lock);
223
224 __sock_put(sk);
225 }
226
227 static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
228 {
229 struct l2cap_chan_list *l = &conn->chan_list;
230
231 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
232 l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
233
234 conn->disc_reason = 0x13;
235
236 l2cap_pi(sk)->conn = conn;
237
238 if (sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM) {
239 /* Alloc CID for connection-oriented socket */
240 l2cap_pi(sk)->scid = l2cap_alloc_cid(l);
241 } else if (sk->sk_type == SOCK_DGRAM) {
242 /* Connectionless socket */
243 l2cap_pi(sk)->scid = L2CAP_CID_CONN_LESS;
244 l2cap_pi(sk)->dcid = L2CAP_CID_CONN_LESS;
245 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
246 } else {
247 /* Raw socket can send/recv signalling messages only */
248 l2cap_pi(sk)->scid = L2CAP_CID_SIGNALING;
249 l2cap_pi(sk)->dcid = L2CAP_CID_SIGNALING;
250 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
251 }
252
253 __l2cap_chan_link(l, sk);
254
255 if (parent)
256 bt_accept_enqueue(parent, sk);
257 }
258
259 /* Delete channel.
260 * Must be called on the locked socket. */
261 static void l2cap_chan_del(struct sock *sk, int err)
262 {
263 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
264 struct sock *parent = bt_sk(sk)->parent;
265
266 l2cap_sock_clear_timer(sk);
267
268 BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
269
270 if (conn) {
271 /* Unlink from channel list */
272 l2cap_chan_unlink(&conn->chan_list, sk);
273 l2cap_pi(sk)->conn = NULL;
274 hci_conn_put(conn->hcon);
275 }
276
277 sk->sk_state = BT_CLOSED;
278 sock_set_flag(sk, SOCK_ZAPPED);
279
280 if (err)
281 sk->sk_err = err;
282
283 if (parent) {
284 bt_accept_unlink(sk);
285 parent->sk_data_ready(parent, 0);
286 } else
287 sk->sk_state_change(sk);
288
289 skb_queue_purge(TX_QUEUE(sk));
290
291 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
292 struct srej_list *l, *tmp;
293
294 del_timer(&l2cap_pi(sk)->retrans_timer);
295 del_timer(&l2cap_pi(sk)->monitor_timer);
296 del_timer(&l2cap_pi(sk)->ack_timer);
297
298 skb_queue_purge(SREJ_QUEUE(sk));
299 skb_queue_purge(BUSY_QUEUE(sk));
300
301 list_for_each_entry_safe(l, tmp, SREJ_LIST(sk), list) {
302 list_del(&l->list);
303 kfree(l);
304 }
305 }
306 }
307
308 /* Service level security */
309 static inline int l2cap_check_security(struct sock *sk)
310 {
311 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
312 __u8 auth_type;
313
314 if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
315 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
316 auth_type = HCI_AT_NO_BONDING_MITM;
317 else
318 auth_type = HCI_AT_NO_BONDING;
319
320 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
321 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
322 } else {
323 switch (l2cap_pi(sk)->sec_level) {
324 case BT_SECURITY_HIGH:
325 auth_type = HCI_AT_GENERAL_BONDING_MITM;
326 break;
327 case BT_SECURITY_MEDIUM:
328 auth_type = HCI_AT_GENERAL_BONDING;
329 break;
330 default:
331 auth_type = HCI_AT_NO_BONDING;
332 break;
333 }
334 }
335
336 return hci_conn_security(conn->hcon, l2cap_pi(sk)->sec_level,
337 auth_type);
338 }
339
340 static inline u8 l2cap_get_ident(struct l2cap_conn *conn)
341 {
342 u8 id;
343
344 /* Get next available identificator.
345 * 1 - 128 are used by kernel.
346 * 129 - 199 are reserved.
347 * 200 - 254 are used by utilities like l2ping, etc.
348 */
349
350 spin_lock_bh(&conn->lock);
351
352 if (++conn->tx_ident > 128)
353 conn->tx_ident = 1;
354
355 id = conn->tx_ident;
356
357 spin_unlock_bh(&conn->lock);
358
359 return id;
360 }
361
362 static inline void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
363 {
364 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
365
366 BT_DBG("code 0x%2.2x", code);
367
368 if (!skb)
369 return;
370
371 hci_send_acl(conn->hcon, skb, 0);
372 }
373
374 static inline void l2cap_send_sframe(struct l2cap_pinfo *pi, u16 control)
375 {
376 struct sk_buff *skb;
377 struct l2cap_hdr *lh;
378 struct l2cap_conn *conn = pi->conn;
379 struct sock *sk = (struct sock *)pi;
380 int count, hlen = L2CAP_HDR_SIZE + 2;
381
382 if (sk->sk_state != BT_CONNECTED)
383 return;
384
385 if (pi->fcs == L2CAP_FCS_CRC16)
386 hlen += 2;
387
388 BT_DBG("pi %p, control 0x%2.2x", pi, control);
389
390 count = min_t(unsigned int, conn->mtu, hlen);
391 control |= L2CAP_CTRL_FRAME_TYPE;
392
393 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
394 control |= L2CAP_CTRL_FINAL;
395 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
396 }
397
398 if (pi->conn_state & L2CAP_CONN_SEND_PBIT) {
399 control |= L2CAP_CTRL_POLL;
400 pi->conn_state &= ~L2CAP_CONN_SEND_PBIT;
401 }
402
403 skb = bt_skb_alloc(count, GFP_ATOMIC);
404 if (!skb)
405 return;
406
407 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
408 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
409 lh->cid = cpu_to_le16(pi->dcid);
410 put_unaligned_le16(control, skb_put(skb, 2));
411
412 if (pi->fcs == L2CAP_FCS_CRC16) {
413 u16 fcs = crc16(0, (u8 *)lh, count - 2);
414 put_unaligned_le16(fcs, skb_put(skb, 2));
415 }
416
417 hci_send_acl(pi->conn->hcon, skb, 0);
418 }
419
420 static inline void l2cap_send_rr_or_rnr(struct l2cap_pinfo *pi, u16 control)
421 {
422 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
423 control |= L2CAP_SUPER_RCV_NOT_READY;
424 pi->conn_state |= L2CAP_CONN_RNR_SENT;
425 } else
426 control |= L2CAP_SUPER_RCV_READY;
427
428 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
429
430 l2cap_send_sframe(pi, control);
431 }
432
433 static inline int __l2cap_no_conn_pending(struct sock *sk)
434 {
435 return !(l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND);
436 }
437
438 static void l2cap_do_start(struct sock *sk)
439 {
440 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
441
442 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
443 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
444 return;
445
446 if (l2cap_check_security(sk) && __l2cap_no_conn_pending(sk)) {
447 struct l2cap_conn_req req;
448 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
449 req.psm = l2cap_pi(sk)->psm;
450
451 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
452 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
453
454 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
455 L2CAP_CONN_REQ, sizeof(req), &req);
456 }
457 } else {
458 struct l2cap_info_req req;
459 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
460
461 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
462 conn->info_ident = l2cap_get_ident(conn);
463
464 mod_timer(&conn->info_timer, jiffies +
465 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
466
467 l2cap_send_cmd(conn, conn->info_ident,
468 L2CAP_INFO_REQ, sizeof(req), &req);
469 }
470 }
471
472 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
473 {
474 u32 local_feat_mask = l2cap_feat_mask;
475 if (!disable_ertm)
476 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
477
478 switch (mode) {
479 case L2CAP_MODE_ERTM:
480 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
481 case L2CAP_MODE_STREAMING:
482 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
483 default:
484 return 0x00;
485 }
486 }
487
488 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct sock *sk, int err)
489 {
490 struct l2cap_disconn_req req;
491
492 if (!conn)
493 return;
494
495 skb_queue_purge(TX_QUEUE(sk));
496
497 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
498 del_timer(&l2cap_pi(sk)->retrans_timer);
499 del_timer(&l2cap_pi(sk)->monitor_timer);
500 del_timer(&l2cap_pi(sk)->ack_timer);
501 }
502
503 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
504 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
505 l2cap_send_cmd(conn, l2cap_get_ident(conn),
506 L2CAP_DISCONN_REQ, sizeof(req), &req);
507
508 sk->sk_state = BT_DISCONN;
509 sk->sk_err = err;
510 }
511
512 /* ---- L2CAP connections ---- */
513 static void l2cap_conn_start(struct l2cap_conn *conn)
514 {
515 struct l2cap_chan_list *l = &conn->chan_list;
516 struct sock_del_list del, *tmp1, *tmp2;
517 struct sock *sk;
518
519 BT_DBG("conn %p", conn);
520
521 INIT_LIST_HEAD(&del.list);
522
523 read_lock(&l->lock);
524
525 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
526 bh_lock_sock(sk);
527
528 if (sk->sk_type != SOCK_SEQPACKET &&
529 sk->sk_type != SOCK_STREAM) {
530 bh_unlock_sock(sk);
531 continue;
532 }
533
534 if (sk->sk_state == BT_CONNECT) {
535 struct l2cap_conn_req req;
536
537 if (!l2cap_check_security(sk) ||
538 !__l2cap_no_conn_pending(sk)) {
539 bh_unlock_sock(sk);
540 continue;
541 }
542
543 if (!l2cap_mode_supported(l2cap_pi(sk)->mode,
544 conn->feat_mask)
545 && l2cap_pi(sk)->conf_state &
546 L2CAP_CONF_STATE2_DEVICE) {
547 tmp1 = kzalloc(sizeof(struct sock_del_list),
548 GFP_ATOMIC);
549 tmp1->sk = sk;
550 list_add_tail(&tmp1->list, &del.list);
551 bh_unlock_sock(sk);
552 continue;
553 }
554
555 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
556 req.psm = l2cap_pi(sk)->psm;
557
558 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
559 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
560
561 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
562 L2CAP_CONN_REQ, sizeof(req), &req);
563
564 } else if (sk->sk_state == BT_CONNECT2) {
565 struct l2cap_conn_rsp rsp;
566 char buf[128];
567 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
568 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
569
570 if (l2cap_check_security(sk)) {
571 if (bt_sk(sk)->defer_setup) {
572 struct sock *parent = bt_sk(sk)->parent;
573 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
574 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
575 parent->sk_data_ready(parent, 0);
576
577 } else {
578 sk->sk_state = BT_CONFIG;
579 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
580 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
581 }
582 } else {
583 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
584 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
585 }
586
587 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
588 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
589
590 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT ||
591 rsp.result != L2CAP_CR_SUCCESS) {
592 bh_unlock_sock(sk);
593 continue;
594 }
595
596 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
597 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
598 l2cap_build_conf_req(sk, buf), buf);
599 l2cap_pi(sk)->num_conf_req++;
600 }
601
602 bh_unlock_sock(sk);
603 }
604
605 read_unlock(&l->lock);
606
607 list_for_each_entry_safe(tmp1, tmp2, &del.list, list) {
608 bh_lock_sock(tmp1->sk);
609 __l2cap_sock_close(tmp1->sk, ECONNRESET);
610 bh_unlock_sock(tmp1->sk);
611 list_del(&tmp1->list);
612 kfree(tmp1);
613 }
614 }
615
616 static void l2cap_conn_ready(struct l2cap_conn *conn)
617 {
618 struct l2cap_chan_list *l = &conn->chan_list;
619 struct sock *sk;
620
621 BT_DBG("conn %p", conn);
622
623 read_lock(&l->lock);
624
625 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
626 bh_lock_sock(sk);
627
628 if (sk->sk_type != SOCK_SEQPACKET &&
629 sk->sk_type != SOCK_STREAM) {
630 l2cap_sock_clear_timer(sk);
631 sk->sk_state = BT_CONNECTED;
632 sk->sk_state_change(sk);
633 } else if (sk->sk_state == BT_CONNECT)
634 l2cap_do_start(sk);
635
636 bh_unlock_sock(sk);
637 }
638
639 read_unlock(&l->lock);
640 }
641
642 /* Notify sockets that we cannot guaranty reliability anymore */
643 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
644 {
645 struct l2cap_chan_list *l = &conn->chan_list;
646 struct sock *sk;
647
648 BT_DBG("conn %p", conn);
649
650 read_lock(&l->lock);
651
652 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
653 if (l2cap_pi(sk)->force_reliable)
654 sk->sk_err = err;
655 }
656
657 read_unlock(&l->lock);
658 }
659
660 static void l2cap_info_timeout(unsigned long arg)
661 {
662 struct l2cap_conn *conn = (void *) arg;
663
664 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
665 conn->info_ident = 0;
666
667 l2cap_conn_start(conn);
668 }
669
670 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
671 {
672 struct l2cap_conn *conn = hcon->l2cap_data;
673
674 if (conn || status)
675 return conn;
676
677 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
678 if (!conn)
679 return NULL;
680
681 hcon->l2cap_data = conn;
682 conn->hcon = hcon;
683
684 BT_DBG("hcon %p conn %p", hcon, conn);
685
686 conn->mtu = hcon->hdev->acl_mtu;
687 conn->src = &hcon->hdev->bdaddr;
688 conn->dst = &hcon->dst;
689
690 conn->feat_mask = 0;
691
692 spin_lock_init(&conn->lock);
693 rwlock_init(&conn->chan_list.lock);
694
695 setup_timer(&conn->info_timer, l2cap_info_timeout,
696 (unsigned long) conn);
697
698 conn->disc_reason = 0x13;
699
700 return conn;
701 }
702
703 static void l2cap_conn_del(struct hci_conn *hcon, int err)
704 {
705 struct l2cap_conn *conn = hcon->l2cap_data;
706 struct sock *sk;
707
708 if (!conn)
709 return;
710
711 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
712
713 kfree_skb(conn->rx_skb);
714
715 /* Kill channels */
716 while ((sk = conn->chan_list.head)) {
717 bh_lock_sock(sk);
718 l2cap_chan_del(sk, err);
719 bh_unlock_sock(sk);
720 l2cap_sock_kill(sk);
721 }
722
723 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
724 del_timer_sync(&conn->info_timer);
725
726 hcon->l2cap_data = NULL;
727 kfree(conn);
728 }
729
730 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
731 {
732 struct l2cap_chan_list *l = &conn->chan_list;
733 write_lock_bh(&l->lock);
734 __l2cap_chan_add(conn, sk, parent);
735 write_unlock_bh(&l->lock);
736 }
737
738 /* ---- Socket interface ---- */
739 static struct sock *__l2cap_get_sock_by_addr(__le16 psm, bdaddr_t *src)
740 {
741 struct sock *sk;
742 struct hlist_node *node;
743 sk_for_each(sk, node, &l2cap_sk_list.head)
744 if (l2cap_pi(sk)->sport == psm && !bacmp(&bt_sk(sk)->src, src))
745 goto found;
746 sk = NULL;
747 found:
748 return sk;
749 }
750
751 /* Find socket with psm and source bdaddr.
752 * Returns closest match.
753 */
754 static struct sock *l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
755 {
756 struct sock *sk = NULL, *sk1 = NULL;
757 struct hlist_node *node;
758
759 read_lock(&l2cap_sk_list.lock);
760
761 sk_for_each(sk, node, &l2cap_sk_list.head) {
762 if (state && sk->sk_state != state)
763 continue;
764
765 if (l2cap_pi(sk)->psm == psm) {
766 /* Exact match. */
767 if (!bacmp(&bt_sk(sk)->src, src))
768 break;
769
770 /* Closest match */
771 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
772 sk1 = sk;
773 }
774 }
775
776 read_unlock(&l2cap_sk_list.lock);
777
778 return node ? sk : sk1;
779 }
780
781 static void l2cap_sock_destruct(struct sock *sk)
782 {
783 BT_DBG("sk %p", sk);
784
785 skb_queue_purge(&sk->sk_receive_queue);
786 skb_queue_purge(&sk->sk_write_queue);
787 }
788
789 static void l2cap_sock_cleanup_listen(struct sock *parent)
790 {
791 struct sock *sk;
792
793 BT_DBG("parent %p", parent);
794
795 /* Close not yet accepted channels */
796 while ((sk = bt_accept_dequeue(parent, NULL)))
797 l2cap_sock_close(sk);
798
799 parent->sk_state = BT_CLOSED;
800 sock_set_flag(parent, SOCK_ZAPPED);
801 }
802
803 /* Kill socket (only if zapped and orphan)
804 * Must be called on unlocked socket.
805 */
806 static void l2cap_sock_kill(struct sock *sk)
807 {
808 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
809 return;
810
811 BT_DBG("sk %p state %d", sk, sk->sk_state);
812
813 /* Kill poor orphan */
814 bt_sock_unlink(&l2cap_sk_list, sk);
815 sock_set_flag(sk, SOCK_DEAD);
816 sock_put(sk);
817 }
818
819 static void __l2cap_sock_close(struct sock *sk, int reason)
820 {
821 BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
822
823 switch (sk->sk_state) {
824 case BT_LISTEN:
825 l2cap_sock_cleanup_listen(sk);
826 break;
827
828 case BT_CONNECTED:
829 case BT_CONFIG:
830 if (sk->sk_type == SOCK_SEQPACKET ||
831 sk->sk_type == SOCK_STREAM) {
832 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
833
834 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
835 l2cap_send_disconn_req(conn, sk, reason);
836 } else
837 l2cap_chan_del(sk, reason);
838 break;
839
840 case BT_CONNECT2:
841 if (sk->sk_type == SOCK_SEQPACKET ||
842 sk->sk_type == SOCK_STREAM) {
843 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
844 struct l2cap_conn_rsp rsp;
845 __u16 result;
846
847 if (bt_sk(sk)->defer_setup)
848 result = L2CAP_CR_SEC_BLOCK;
849 else
850 result = L2CAP_CR_BAD_PSM;
851
852 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
853 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
854 rsp.result = cpu_to_le16(result);
855 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
856 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
857 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
858 } else
859 l2cap_chan_del(sk, reason);
860 break;
861
862 case BT_CONNECT:
863 case BT_DISCONN:
864 l2cap_chan_del(sk, reason);
865 break;
866
867 default:
868 sock_set_flag(sk, SOCK_ZAPPED);
869 break;
870 }
871 }
872
873 /* Must be called on unlocked socket. */
874 static void l2cap_sock_close(struct sock *sk)
875 {
876 l2cap_sock_clear_timer(sk);
877 lock_sock(sk);
878 __l2cap_sock_close(sk, ECONNRESET);
879 release_sock(sk);
880 l2cap_sock_kill(sk);
881 }
882
883 static void l2cap_sock_init(struct sock *sk, struct sock *parent)
884 {
885 struct l2cap_pinfo *pi = l2cap_pi(sk);
886
887 BT_DBG("sk %p", sk);
888
889 if (parent) {
890 sk->sk_type = parent->sk_type;
891 bt_sk(sk)->defer_setup = bt_sk(parent)->defer_setup;
892
893 pi->imtu = l2cap_pi(parent)->imtu;
894 pi->omtu = l2cap_pi(parent)->omtu;
895 pi->conf_state = l2cap_pi(parent)->conf_state;
896 pi->mode = l2cap_pi(parent)->mode;
897 pi->fcs = l2cap_pi(parent)->fcs;
898 pi->max_tx = l2cap_pi(parent)->max_tx;
899 pi->tx_win = l2cap_pi(parent)->tx_win;
900 pi->sec_level = l2cap_pi(parent)->sec_level;
901 pi->role_switch = l2cap_pi(parent)->role_switch;
902 pi->force_reliable = l2cap_pi(parent)->force_reliable;
903 } else {
904 pi->imtu = L2CAP_DEFAULT_MTU;
905 pi->omtu = 0;
906 if (!disable_ertm && sk->sk_type == SOCK_STREAM) {
907 pi->mode = L2CAP_MODE_ERTM;
908 pi->conf_state |= L2CAP_CONF_STATE2_DEVICE;
909 } else {
910 pi->mode = L2CAP_MODE_BASIC;
911 }
912 pi->max_tx = L2CAP_DEFAULT_MAX_TX;
913 pi->fcs = L2CAP_FCS_CRC16;
914 pi->tx_win = L2CAP_DEFAULT_TX_WINDOW;
915 pi->sec_level = BT_SECURITY_LOW;
916 pi->role_switch = 0;
917 pi->force_reliable = 0;
918 }
919
920 /* Default config options */
921 pi->conf_len = 0;
922 pi->flush_to = L2CAP_DEFAULT_FLUSH_TO;
923 skb_queue_head_init(TX_QUEUE(sk));
924 skb_queue_head_init(SREJ_QUEUE(sk));
925 skb_queue_head_init(BUSY_QUEUE(sk));
926 INIT_LIST_HEAD(SREJ_LIST(sk));
927 }
928
929 static struct proto l2cap_proto = {
930 .name = "L2CAP",
931 .owner = THIS_MODULE,
932 .obj_size = sizeof(struct l2cap_pinfo)
933 };
934
935 static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio)
936 {
937 struct sock *sk;
938
939 sk = sk_alloc(net, PF_BLUETOOTH, prio, &l2cap_proto);
940 if (!sk)
941 return NULL;
942
943 sock_init_data(sock, sk);
944 INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
945
946 sk->sk_destruct = l2cap_sock_destruct;
947 sk->sk_sndtimeo = msecs_to_jiffies(L2CAP_CONN_TIMEOUT);
948
949 sock_reset_flag(sk, SOCK_ZAPPED);
950
951 sk->sk_protocol = proto;
952 sk->sk_state = BT_OPEN;
953
954 setup_timer(&sk->sk_timer, l2cap_sock_timeout, (unsigned long) sk);
955
956 bt_sock_link(&l2cap_sk_list, sk);
957 return sk;
958 }
959
960 static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol,
961 int kern)
962 {
963 struct sock *sk;
964
965 BT_DBG("sock %p", sock);
966
967 sock->state = SS_UNCONNECTED;
968
969 if (sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM &&
970 sock->type != SOCK_DGRAM && sock->type != SOCK_RAW)
971 return -ESOCKTNOSUPPORT;
972
973 if (sock->type == SOCK_RAW && !kern && !capable(CAP_NET_RAW))
974 return -EPERM;
975
976 sock->ops = &l2cap_sock_ops;
977
978 sk = l2cap_sock_alloc(net, sock, protocol, GFP_ATOMIC);
979 if (!sk)
980 return -ENOMEM;
981
982 l2cap_sock_init(sk, NULL);
983 return 0;
984 }
985
986 static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
987 {
988 struct sock *sk = sock->sk;
989 struct sockaddr_l2 la;
990 int len, err = 0;
991
992 BT_DBG("sk %p", sk);
993
994 if (!addr || addr->sa_family != AF_BLUETOOTH)
995 return -EINVAL;
996
997 memset(&la, 0, sizeof(la));
998 len = min_t(unsigned int, sizeof(la), alen);
999 memcpy(&la, addr, len);
1000
1001 if (la.l2_cid)
1002 return -EINVAL;
1003
1004 lock_sock(sk);
1005
1006 if (sk->sk_state != BT_OPEN) {
1007 err = -EBADFD;
1008 goto done;
1009 }
1010
1011 if (la.l2_psm) {
1012 __u16 psm = __le16_to_cpu(la.l2_psm);
1013
1014 /* PSM must be odd and lsb of upper byte must be 0 */
1015 if ((psm & 0x0101) != 0x0001) {
1016 err = -EINVAL;
1017 goto done;
1018 }
1019
1020 /* Restrict usage of well-known PSMs */
1021 if (psm < 0x1001 && !capable(CAP_NET_BIND_SERVICE)) {
1022 err = -EACCES;
1023 goto done;
1024 }
1025 }
1026
1027 write_lock_bh(&l2cap_sk_list.lock);
1028
1029 if (la.l2_psm && __l2cap_get_sock_by_addr(la.l2_psm, &la.l2_bdaddr)) {
1030 err = -EADDRINUSE;
1031 } else {
1032 /* Save source address */
1033 bacpy(&bt_sk(sk)->src, &la.l2_bdaddr);
1034 l2cap_pi(sk)->psm = la.l2_psm;
1035 l2cap_pi(sk)->sport = la.l2_psm;
1036 sk->sk_state = BT_BOUND;
1037
1038 if (__le16_to_cpu(la.l2_psm) == 0x0001 ||
1039 __le16_to_cpu(la.l2_psm) == 0x0003)
1040 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
1041 }
1042
1043 write_unlock_bh(&l2cap_sk_list.lock);
1044
1045 done:
1046 release_sock(sk);
1047 return err;
1048 }
1049
1050 static int l2cap_do_connect(struct sock *sk)
1051 {
1052 bdaddr_t *src = &bt_sk(sk)->src;
1053 bdaddr_t *dst = &bt_sk(sk)->dst;
1054 struct l2cap_conn *conn;
1055 struct hci_conn *hcon;
1056 struct hci_dev *hdev;
1057 __u8 auth_type;
1058 int err;
1059
1060 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
1061 l2cap_pi(sk)->psm);
1062
1063 hdev = hci_get_route(dst, src);
1064 if (!hdev)
1065 return -EHOSTUNREACH;
1066
1067 hci_dev_lock_bh(hdev);
1068
1069 err = -ENOMEM;
1070
1071 if (sk->sk_type == SOCK_RAW) {
1072 switch (l2cap_pi(sk)->sec_level) {
1073 case BT_SECURITY_HIGH:
1074 auth_type = HCI_AT_DEDICATED_BONDING_MITM;
1075 break;
1076 case BT_SECURITY_MEDIUM:
1077 auth_type = HCI_AT_DEDICATED_BONDING;
1078 break;
1079 default:
1080 auth_type = HCI_AT_NO_BONDING;
1081 break;
1082 }
1083 } else if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
1084 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
1085 auth_type = HCI_AT_NO_BONDING_MITM;
1086 else
1087 auth_type = HCI_AT_NO_BONDING;
1088
1089 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
1090 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
1091 } else {
1092 switch (l2cap_pi(sk)->sec_level) {
1093 case BT_SECURITY_HIGH:
1094 auth_type = HCI_AT_GENERAL_BONDING_MITM;
1095 break;
1096 case BT_SECURITY_MEDIUM:
1097 auth_type = HCI_AT_GENERAL_BONDING;
1098 break;
1099 default:
1100 auth_type = HCI_AT_NO_BONDING;
1101 break;
1102 }
1103 }
1104
1105 hcon = hci_connect(hdev, ACL_LINK, dst,
1106 l2cap_pi(sk)->sec_level, auth_type);
1107 if (!hcon)
1108 goto done;
1109
1110 conn = l2cap_conn_add(hcon, 0);
1111 if (!conn) {
1112 hci_conn_put(hcon);
1113 goto done;
1114 }
1115
1116 err = 0;
1117
1118 /* Update source addr of the socket */
1119 bacpy(src, conn->src);
1120
1121 l2cap_chan_add(conn, sk, NULL);
1122
1123 sk->sk_state = BT_CONNECT;
1124 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
1125
1126 if (hcon->state == BT_CONNECTED) {
1127 if (sk->sk_type != SOCK_SEQPACKET &&
1128 sk->sk_type != SOCK_STREAM) {
1129 l2cap_sock_clear_timer(sk);
1130 sk->sk_state = BT_CONNECTED;
1131 } else
1132 l2cap_do_start(sk);
1133 }
1134
1135 done:
1136 hci_dev_unlock_bh(hdev);
1137 hci_dev_put(hdev);
1138 return err;
1139 }
1140
1141 static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags)
1142 {
1143 struct sock *sk = sock->sk;
1144 struct sockaddr_l2 la;
1145 int len, err = 0;
1146
1147 BT_DBG("sk %p", sk);
1148
1149 if (!addr || alen < sizeof(addr->sa_family) ||
1150 addr->sa_family != AF_BLUETOOTH)
1151 return -EINVAL;
1152
1153 memset(&la, 0, sizeof(la));
1154 len = min_t(unsigned int, sizeof(la), alen);
1155 memcpy(&la, addr, len);
1156
1157 if (la.l2_cid)
1158 return -EINVAL;
1159
1160 lock_sock(sk);
1161
1162 if ((sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM)
1163 && !la.l2_psm) {
1164 err = -EINVAL;
1165 goto done;
1166 }
1167
1168 switch (l2cap_pi(sk)->mode) {
1169 case L2CAP_MODE_BASIC:
1170 break;
1171 case L2CAP_MODE_ERTM:
1172 case L2CAP_MODE_STREAMING:
1173 if (!disable_ertm)
1174 break;
1175 /* fall through */
1176 default:
1177 err = -ENOTSUPP;
1178 goto done;
1179 }
1180
1181 switch (sk->sk_state) {
1182 case BT_CONNECT:
1183 case BT_CONNECT2:
1184 case BT_CONFIG:
1185 /* Already connecting */
1186 goto wait;
1187
1188 case BT_CONNECTED:
1189 /* Already connected */
1190 err = -EISCONN;
1191 goto done;
1192
1193 case BT_OPEN:
1194 case BT_BOUND:
1195 /* Can connect */
1196 break;
1197
1198 default:
1199 err = -EBADFD;
1200 goto done;
1201 }
1202
1203 /* PSM must be odd and lsb of upper byte must be 0 */
1204 if ((__le16_to_cpu(la.l2_psm) & 0x0101) != 0x0001 &&
1205 sk->sk_type != SOCK_RAW) {
1206 err = -EINVAL;
1207 goto done;
1208 }
1209
1210 /* Set destination address and psm */
1211 bacpy(&bt_sk(sk)->dst, &la.l2_bdaddr);
1212 l2cap_pi(sk)->psm = la.l2_psm;
1213
1214 err = l2cap_do_connect(sk);
1215 if (err)
1216 goto done;
1217
1218 wait:
1219 err = bt_sock_wait_state(sk, BT_CONNECTED,
1220 sock_sndtimeo(sk, flags & O_NONBLOCK));
1221 done:
1222 release_sock(sk);
1223 return err;
1224 }
1225
1226 static int l2cap_sock_listen(struct socket *sock, int backlog)
1227 {
1228 struct sock *sk = sock->sk;
1229 int err = 0;
1230
1231 BT_DBG("sk %p backlog %d", sk, backlog);
1232
1233 lock_sock(sk);
1234
1235 if ((sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM)
1236 || sk->sk_state != BT_BOUND) {
1237 err = -EBADFD;
1238 goto done;
1239 }
1240
1241 switch (l2cap_pi(sk)->mode) {
1242 case L2CAP_MODE_BASIC:
1243 break;
1244 case L2CAP_MODE_ERTM:
1245 case L2CAP_MODE_STREAMING:
1246 if (!disable_ertm)
1247 break;
1248 /* fall through */
1249 default:
1250 err = -ENOTSUPP;
1251 goto done;
1252 }
1253
1254 if (!l2cap_pi(sk)->psm) {
1255 bdaddr_t *src = &bt_sk(sk)->src;
1256 u16 psm;
1257
1258 err = -EINVAL;
1259
1260 write_lock_bh(&l2cap_sk_list.lock);
1261
1262 for (psm = 0x1001; psm < 0x1100; psm += 2)
1263 if (!__l2cap_get_sock_by_addr(cpu_to_le16(psm), src)) {
1264 l2cap_pi(sk)->psm = cpu_to_le16(psm);
1265 l2cap_pi(sk)->sport = cpu_to_le16(psm);
1266 err = 0;
1267 break;
1268 }
1269
1270 write_unlock_bh(&l2cap_sk_list.lock);
1271
1272 if (err < 0)
1273 goto done;
1274 }
1275
1276 sk->sk_max_ack_backlog = backlog;
1277 sk->sk_ack_backlog = 0;
1278 sk->sk_state = BT_LISTEN;
1279
1280 done:
1281 release_sock(sk);
1282 return err;
1283 }
1284
1285 static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int flags)
1286 {
1287 DECLARE_WAITQUEUE(wait, current);
1288 struct sock *sk = sock->sk, *nsk;
1289 long timeo;
1290 int err = 0;
1291
1292 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1293
1294 if (sk->sk_state != BT_LISTEN) {
1295 err = -EBADFD;
1296 goto done;
1297 }
1298
1299 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
1300
1301 BT_DBG("sk %p timeo %ld", sk, timeo);
1302
1303 /* Wait for an incoming connection. (wake-one). */
1304 add_wait_queue_exclusive(sk_sleep(sk), &wait);
1305 while (!(nsk = bt_accept_dequeue(sk, newsock))) {
1306 set_current_state(TASK_INTERRUPTIBLE);
1307 if (!timeo) {
1308 err = -EAGAIN;
1309 break;
1310 }
1311
1312 release_sock(sk);
1313 timeo = schedule_timeout(timeo);
1314 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1315
1316 if (sk->sk_state != BT_LISTEN) {
1317 err = -EBADFD;
1318 break;
1319 }
1320
1321 if (signal_pending(current)) {
1322 err = sock_intr_errno(timeo);
1323 break;
1324 }
1325 }
1326 set_current_state(TASK_RUNNING);
1327 remove_wait_queue(sk_sleep(sk), &wait);
1328
1329 if (err)
1330 goto done;
1331
1332 newsock->state = SS_CONNECTED;
1333
1334 BT_DBG("new socket %p", nsk);
1335
1336 done:
1337 release_sock(sk);
1338 return err;
1339 }
1340
1341 static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer)
1342 {
1343 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
1344 struct sock *sk = sock->sk;
1345
1346 BT_DBG("sock %p, sk %p", sock, sk);
1347
1348 addr->sa_family = AF_BLUETOOTH;
1349 *len = sizeof(struct sockaddr_l2);
1350
1351 if (peer) {
1352 la->l2_psm = l2cap_pi(sk)->psm;
1353 bacpy(&la->l2_bdaddr, &bt_sk(sk)->dst);
1354 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1355 } else {
1356 la->l2_psm = l2cap_pi(sk)->sport;
1357 bacpy(&la->l2_bdaddr, &bt_sk(sk)->src);
1358 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->scid);
1359 }
1360
1361 return 0;
1362 }
1363
1364 static int __l2cap_wait_ack(struct sock *sk)
1365 {
1366 DECLARE_WAITQUEUE(wait, current);
1367 int err = 0;
1368 int timeo = HZ/5;
1369
1370 add_wait_queue(sk_sleep(sk), &wait);
1371 while ((l2cap_pi(sk)->unacked_frames > 0 && l2cap_pi(sk)->conn)) {
1372 set_current_state(TASK_INTERRUPTIBLE);
1373
1374 if (!timeo)
1375 timeo = HZ/5;
1376
1377 if (signal_pending(current)) {
1378 err = sock_intr_errno(timeo);
1379 break;
1380 }
1381
1382 release_sock(sk);
1383 timeo = schedule_timeout(timeo);
1384 lock_sock(sk);
1385
1386 err = sock_error(sk);
1387 if (err)
1388 break;
1389 }
1390 set_current_state(TASK_RUNNING);
1391 remove_wait_queue(sk_sleep(sk), &wait);
1392 return err;
1393 }
1394
1395 static void l2cap_monitor_timeout(unsigned long arg)
1396 {
1397 struct sock *sk = (void *) arg;
1398
1399 BT_DBG("sk %p", sk);
1400
1401 bh_lock_sock(sk);
1402 if (l2cap_pi(sk)->retry_count >= l2cap_pi(sk)->remote_max_tx) {
1403 l2cap_send_disconn_req(l2cap_pi(sk)->conn, sk, ECONNABORTED);
1404 bh_unlock_sock(sk);
1405 return;
1406 }
1407
1408 l2cap_pi(sk)->retry_count++;
1409 __mod_monitor_timer();
1410
1411 l2cap_send_rr_or_rnr(l2cap_pi(sk), L2CAP_CTRL_POLL);
1412 bh_unlock_sock(sk);
1413 }
1414
1415 static void l2cap_retrans_timeout(unsigned long arg)
1416 {
1417 struct sock *sk = (void *) arg;
1418
1419 BT_DBG("sk %p", sk);
1420
1421 bh_lock_sock(sk);
1422 l2cap_pi(sk)->retry_count = 1;
1423 __mod_monitor_timer();
1424
1425 l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F;
1426
1427 l2cap_send_rr_or_rnr(l2cap_pi(sk), L2CAP_CTRL_POLL);
1428 bh_unlock_sock(sk);
1429 }
1430
1431 static void l2cap_drop_acked_frames(struct sock *sk)
1432 {
1433 struct sk_buff *skb;
1434
1435 while ((skb = skb_peek(TX_QUEUE(sk))) &&
1436 l2cap_pi(sk)->unacked_frames) {
1437 if (bt_cb(skb)->tx_seq == l2cap_pi(sk)->expected_ack_seq)
1438 break;
1439
1440 skb = skb_dequeue(TX_QUEUE(sk));
1441 kfree_skb(skb);
1442
1443 l2cap_pi(sk)->unacked_frames--;
1444 }
1445
1446 if (!l2cap_pi(sk)->unacked_frames)
1447 del_timer(&l2cap_pi(sk)->retrans_timer);
1448 }
1449
1450 static inline void l2cap_do_send(struct sock *sk, struct sk_buff *skb)
1451 {
1452 struct l2cap_pinfo *pi = l2cap_pi(sk);
1453
1454 BT_DBG("sk %p, skb %p len %d", sk, skb, skb->len);
1455
1456 hci_send_acl(pi->conn->hcon, skb, 0);
1457 }
1458
1459 static void l2cap_streaming_send(struct sock *sk)
1460 {
1461 struct sk_buff *skb;
1462 struct l2cap_pinfo *pi = l2cap_pi(sk);
1463 u16 control, fcs;
1464
1465 while ((skb = skb_dequeue(TX_QUEUE(sk)))) {
1466 control = get_unaligned_le16(skb->data + L2CAP_HDR_SIZE);
1467 control |= pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT;
1468 put_unaligned_le16(control, skb->data + L2CAP_HDR_SIZE);
1469
1470 if (pi->fcs == L2CAP_FCS_CRC16) {
1471 fcs = crc16(0, (u8 *)skb->data, skb->len - 2);
1472 put_unaligned_le16(fcs, skb->data + skb->len - 2);
1473 }
1474
1475 l2cap_do_send(sk, skb);
1476
1477 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1478 }
1479 }
1480
1481 static void l2cap_retransmit_one_frame(struct sock *sk, u8 tx_seq)
1482 {
1483 struct l2cap_pinfo *pi = l2cap_pi(sk);
1484 struct sk_buff *skb, *tx_skb;
1485 u16 control, fcs;
1486
1487 skb = skb_peek(TX_QUEUE(sk));
1488 if (!skb)
1489 return;
1490
1491 do {
1492 if (bt_cb(skb)->tx_seq == tx_seq)
1493 break;
1494
1495 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1496 return;
1497
1498 } while ((skb = skb_queue_next(TX_QUEUE(sk), skb)));
1499
1500 if (pi->remote_max_tx &&
1501 bt_cb(skb)->retries == pi->remote_max_tx) {
1502 l2cap_send_disconn_req(pi->conn, sk, ECONNABORTED);
1503 return;
1504 }
1505
1506 tx_skb = skb_clone(skb, GFP_ATOMIC);
1507 bt_cb(skb)->retries++;
1508 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1509
1510 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
1511 control |= L2CAP_CTRL_FINAL;
1512 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1513 }
1514
1515 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1516 | (tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1517
1518 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1519
1520 if (pi->fcs == L2CAP_FCS_CRC16) {
1521 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1522 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1523 }
1524
1525 l2cap_do_send(sk, tx_skb);
1526 }
1527
1528 static int l2cap_ertm_send(struct sock *sk)
1529 {
1530 struct sk_buff *skb, *tx_skb;
1531 struct l2cap_pinfo *pi = l2cap_pi(sk);
1532 u16 control, fcs;
1533 int nsent = 0;
1534
1535 if (sk->sk_state != BT_CONNECTED)
1536 return -ENOTCONN;
1537
1538 while ((skb = sk->sk_send_head) && (!l2cap_tx_window_full(sk))) {
1539
1540 if (pi->remote_max_tx &&
1541 bt_cb(skb)->retries == pi->remote_max_tx) {
1542 l2cap_send_disconn_req(pi->conn, sk, ECONNABORTED);
1543 break;
1544 }
1545
1546 tx_skb = skb_clone(skb, GFP_ATOMIC);
1547
1548 bt_cb(skb)->retries++;
1549
1550 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1551 control &= L2CAP_CTRL_SAR;
1552
1553 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
1554 control |= L2CAP_CTRL_FINAL;
1555 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1556 }
1557 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1558 | (pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1559 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1560
1561
1562 if (pi->fcs == L2CAP_FCS_CRC16) {
1563 fcs = crc16(0, (u8 *)skb->data, tx_skb->len - 2);
1564 put_unaligned_le16(fcs, skb->data + tx_skb->len - 2);
1565 }
1566
1567 l2cap_do_send(sk, tx_skb);
1568
1569 __mod_retrans_timer();
1570
1571 bt_cb(skb)->tx_seq = pi->next_tx_seq;
1572 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1573
1574 pi->unacked_frames++;
1575 pi->frames_sent++;
1576
1577 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1578 sk->sk_send_head = NULL;
1579 else
1580 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1581
1582 nsent++;
1583 }
1584
1585 return nsent;
1586 }
1587
1588 static int l2cap_retransmit_frames(struct sock *sk)
1589 {
1590 struct l2cap_pinfo *pi = l2cap_pi(sk);
1591 int ret;
1592
1593 if (!skb_queue_empty(TX_QUEUE(sk)))
1594 sk->sk_send_head = TX_QUEUE(sk)->next;
1595
1596 pi->next_tx_seq = pi->expected_ack_seq;
1597 ret = l2cap_ertm_send(sk);
1598 return ret;
1599 }
1600
1601 static void l2cap_send_ack(struct l2cap_pinfo *pi)
1602 {
1603 struct sock *sk = (struct sock *)pi;
1604 u16 control = 0;
1605
1606 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1607
1608 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
1609 control |= L2CAP_SUPER_RCV_NOT_READY;
1610 pi->conn_state |= L2CAP_CONN_RNR_SENT;
1611 l2cap_send_sframe(pi, control);
1612 return;
1613 }
1614
1615 if (l2cap_ertm_send(sk) > 0)
1616 return;
1617
1618 control |= L2CAP_SUPER_RCV_READY;
1619 l2cap_send_sframe(pi, control);
1620 }
1621
1622 static void l2cap_send_srejtail(struct sock *sk)
1623 {
1624 struct srej_list *tail;
1625 u16 control;
1626
1627 control = L2CAP_SUPER_SELECT_REJECT;
1628 control |= L2CAP_CTRL_FINAL;
1629
1630 tail = list_entry(SREJ_LIST(sk)->prev, struct srej_list, list);
1631 control |= tail->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1632
1633 l2cap_send_sframe(l2cap_pi(sk), control);
1634 }
1635
1636 static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1637 {
1638 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1639 struct sk_buff **frag;
1640 int err, sent = 0;
1641
1642 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1643 return -EFAULT;
1644
1645 sent += count;
1646 len -= count;
1647
1648 /* Continuation fragments (no L2CAP header) */
1649 frag = &skb_shinfo(skb)->frag_list;
1650 while (len) {
1651 count = min_t(unsigned int, conn->mtu, len);
1652
1653 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1654 if (!*frag)
1655 return err;
1656 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1657 return -EFAULT;
1658
1659 sent += count;
1660 len -= count;
1661
1662 frag = &(*frag)->next;
1663 }
1664
1665 return sent;
1666 }
1667
1668 static struct sk_buff *l2cap_create_connless_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1669 {
1670 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1671 struct sk_buff *skb;
1672 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1673 struct l2cap_hdr *lh;
1674
1675 BT_DBG("sk %p len %d", sk, (int)len);
1676
1677 count = min_t(unsigned int, (conn->mtu - hlen), len);
1678 skb = bt_skb_send_alloc(sk, count + hlen,
1679 msg->msg_flags & MSG_DONTWAIT, &err);
1680 if (!skb)
1681 return ERR_PTR(err);
1682
1683 /* Create L2CAP header */
1684 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1685 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1686 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1687 put_unaligned_le16(l2cap_pi(sk)->psm, skb_put(skb, 2));
1688
1689 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1690 if (unlikely(err < 0)) {
1691 kfree_skb(skb);
1692 return ERR_PTR(err);
1693 }
1694 return skb;
1695 }
1696
1697 static struct sk_buff *l2cap_create_basic_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1698 {
1699 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1700 struct sk_buff *skb;
1701 int err, count, hlen = L2CAP_HDR_SIZE;
1702 struct l2cap_hdr *lh;
1703
1704 BT_DBG("sk %p len %d", sk, (int)len);
1705
1706 count = min_t(unsigned int, (conn->mtu - hlen), len);
1707 skb = bt_skb_send_alloc(sk, count + hlen,
1708 msg->msg_flags & MSG_DONTWAIT, &err);
1709 if (!skb)
1710 return ERR_PTR(err);
1711
1712 /* Create L2CAP header */
1713 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1714 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1715 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1716
1717 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1718 if (unlikely(err < 0)) {
1719 kfree_skb(skb);
1720 return ERR_PTR(err);
1721 }
1722 return skb;
1723 }
1724
1725 static struct sk_buff *l2cap_create_iframe_pdu(struct sock *sk, struct msghdr *msg, size_t len, u16 control, u16 sdulen)
1726 {
1727 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1728 struct sk_buff *skb;
1729 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1730 struct l2cap_hdr *lh;
1731
1732 BT_DBG("sk %p len %d", sk, (int)len);
1733
1734 if (!conn)
1735 return ERR_PTR(-ENOTCONN);
1736
1737 if (sdulen)
1738 hlen += 2;
1739
1740 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1741 hlen += 2;
1742
1743 count = min_t(unsigned int, (conn->mtu - hlen), len);
1744 skb = bt_skb_send_alloc(sk, count + hlen,
1745 msg->msg_flags & MSG_DONTWAIT, &err);
1746 if (!skb)
1747 return ERR_PTR(err);
1748
1749 /* Create L2CAP header */
1750 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1751 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1752 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1753 put_unaligned_le16(control, skb_put(skb, 2));
1754 if (sdulen)
1755 put_unaligned_le16(sdulen, skb_put(skb, 2));
1756
1757 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1758 if (unlikely(err < 0)) {
1759 kfree_skb(skb);
1760 return ERR_PTR(err);
1761 }
1762
1763 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1764 put_unaligned_le16(0, skb_put(skb, 2));
1765
1766 bt_cb(skb)->retries = 0;
1767 return skb;
1768 }
1769
1770 static inline int l2cap_sar_segment_sdu(struct sock *sk, struct msghdr *msg, size_t len)
1771 {
1772 struct l2cap_pinfo *pi = l2cap_pi(sk);
1773 struct sk_buff *skb;
1774 struct sk_buff_head sar_queue;
1775 u16 control;
1776 size_t size = 0;
1777
1778 skb_queue_head_init(&sar_queue);
1779 control = L2CAP_SDU_START;
1780 skb = l2cap_create_iframe_pdu(sk, msg, pi->remote_mps, control, len);
1781 if (IS_ERR(skb))
1782 return PTR_ERR(skb);
1783
1784 __skb_queue_tail(&sar_queue, skb);
1785 len -= pi->remote_mps;
1786 size += pi->remote_mps;
1787
1788 while (len > 0) {
1789 size_t buflen;
1790
1791 if (len > pi->remote_mps) {
1792 control = L2CAP_SDU_CONTINUE;
1793 buflen = pi->remote_mps;
1794 } else {
1795 control = L2CAP_SDU_END;
1796 buflen = len;
1797 }
1798
1799 skb = l2cap_create_iframe_pdu(sk, msg, buflen, control, 0);
1800 if (IS_ERR(skb)) {
1801 skb_queue_purge(&sar_queue);
1802 return PTR_ERR(skb);
1803 }
1804
1805 __skb_queue_tail(&sar_queue, skb);
1806 len -= buflen;
1807 size += buflen;
1808 }
1809 skb_queue_splice_tail(&sar_queue, TX_QUEUE(sk));
1810 if (sk->sk_send_head == NULL)
1811 sk->sk_send_head = sar_queue.next;
1812
1813 return size;
1814 }
1815
1816 static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len)
1817 {
1818 struct sock *sk = sock->sk;
1819 struct l2cap_pinfo *pi = l2cap_pi(sk);
1820 struct sk_buff *skb;
1821 u16 control;
1822 int err;
1823
1824 BT_DBG("sock %p, sk %p", sock, sk);
1825
1826 err = sock_error(sk);
1827 if (err)
1828 return err;
1829
1830 if (msg->msg_flags & MSG_OOB)
1831 return -EOPNOTSUPP;
1832
1833 lock_sock(sk);
1834
1835 if (sk->sk_state != BT_CONNECTED) {
1836 err = -ENOTCONN;
1837 goto done;
1838 }
1839
1840 /* Connectionless channel */
1841 if (sk->sk_type == SOCK_DGRAM) {
1842 skb = l2cap_create_connless_pdu(sk, msg, len);
1843 if (IS_ERR(skb)) {
1844 err = PTR_ERR(skb);
1845 } else {
1846 l2cap_do_send(sk, skb);
1847 err = len;
1848 }
1849 goto done;
1850 }
1851
1852 switch (pi->mode) {
1853 case L2CAP_MODE_BASIC:
1854 /* Check outgoing MTU */
1855 if (len > pi->omtu) {
1856 err = -EMSGSIZE;
1857 goto done;
1858 }
1859
1860 /* Create a basic PDU */
1861 skb = l2cap_create_basic_pdu(sk, msg, len);
1862 if (IS_ERR(skb)) {
1863 err = PTR_ERR(skb);
1864 goto done;
1865 }
1866
1867 l2cap_do_send(sk, skb);
1868 err = len;
1869 break;
1870
1871 case L2CAP_MODE_ERTM:
1872 case L2CAP_MODE_STREAMING:
1873 /* Entire SDU fits into one PDU */
1874 if (len <= pi->remote_mps) {
1875 control = L2CAP_SDU_UNSEGMENTED;
1876 skb = l2cap_create_iframe_pdu(sk, msg, len, control, 0);
1877 if (IS_ERR(skb)) {
1878 err = PTR_ERR(skb);
1879 goto done;
1880 }
1881 __skb_queue_tail(TX_QUEUE(sk), skb);
1882
1883 if (sk->sk_send_head == NULL)
1884 sk->sk_send_head = skb;
1885
1886 } else {
1887 /* Segment SDU into multiples PDUs */
1888 err = l2cap_sar_segment_sdu(sk, msg, len);
1889 if (err < 0)
1890 goto done;
1891 }
1892
1893 if (pi->mode == L2CAP_MODE_STREAMING) {
1894 l2cap_streaming_send(sk);
1895 } else {
1896 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
1897 (pi->conn_state & L2CAP_CONN_WAIT_F)) {
1898 err = len;
1899 break;
1900 }
1901 err = l2cap_ertm_send(sk);
1902 }
1903
1904 if (err >= 0)
1905 err = len;
1906 break;
1907
1908 default:
1909 BT_DBG("bad state %1.1x", pi->mode);
1910 err = -EBADFD;
1911 }
1912
1913 done:
1914 release_sock(sk);
1915 return err;
1916 }
1917
1918 static int l2cap_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags)
1919 {
1920 struct sock *sk = sock->sk;
1921
1922 lock_sock(sk);
1923
1924 if (sk->sk_state == BT_CONNECT2 && bt_sk(sk)->defer_setup) {
1925 struct l2cap_conn_rsp rsp;
1926 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1927 u8 buf[128];
1928
1929 sk->sk_state = BT_CONFIG;
1930
1931 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1932 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
1933 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1934 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1935 l2cap_send_cmd(l2cap_pi(sk)->conn, l2cap_pi(sk)->ident,
1936 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1937
1938 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT) {
1939 release_sock(sk);
1940 return 0;
1941 }
1942
1943 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
1944 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1945 l2cap_build_conf_req(sk, buf), buf);
1946 l2cap_pi(sk)->num_conf_req++;
1947
1948 release_sock(sk);
1949 return 0;
1950 }
1951
1952 release_sock(sk);
1953
1954 if (sock->type == SOCK_STREAM)
1955 return bt_sock_stream_recvmsg(iocb, sock, msg, len, flags);
1956
1957 return bt_sock_recvmsg(iocb, sock, msg, len, flags);
1958 }
1959
1960 static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __user *optval, unsigned int optlen)
1961 {
1962 struct sock *sk = sock->sk;
1963 struct l2cap_options opts;
1964 int len, err = 0;
1965 u32 opt;
1966
1967 BT_DBG("sk %p", sk);
1968
1969 lock_sock(sk);
1970
1971 switch (optname) {
1972 case L2CAP_OPTIONS:
1973 if (sk->sk_state == BT_CONNECTED) {
1974 err = -EINVAL;
1975 break;
1976 }
1977
1978 opts.imtu = l2cap_pi(sk)->imtu;
1979 opts.omtu = l2cap_pi(sk)->omtu;
1980 opts.flush_to = l2cap_pi(sk)->flush_to;
1981 opts.mode = l2cap_pi(sk)->mode;
1982 opts.fcs = l2cap_pi(sk)->fcs;
1983 opts.max_tx = l2cap_pi(sk)->max_tx;
1984 opts.txwin_size = (__u16)l2cap_pi(sk)->tx_win;
1985
1986 len = min_t(unsigned int, sizeof(opts), optlen);
1987 if (copy_from_user((char *) &opts, optval, len)) {
1988 err = -EFAULT;
1989 break;
1990 }
1991
1992 if (opts.txwin_size > L2CAP_DEFAULT_TX_WINDOW) {
1993 err = -EINVAL;
1994 break;
1995 }
1996
1997 l2cap_pi(sk)->mode = opts.mode;
1998 switch (l2cap_pi(sk)->mode) {
1999 case L2CAP_MODE_BASIC:
2000 l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_STATE2_DEVICE;
2001 break;
2002 case L2CAP_MODE_ERTM:
2003 case L2CAP_MODE_STREAMING:
2004 if (!disable_ertm)
2005 break;
2006 /* fall through */
2007 default:
2008 err = -EINVAL;
2009 break;
2010 }
2011
2012 l2cap_pi(sk)->imtu = opts.imtu;
2013 l2cap_pi(sk)->omtu = opts.omtu;
2014 l2cap_pi(sk)->fcs = opts.fcs;
2015 l2cap_pi(sk)->max_tx = opts.max_tx;
2016 l2cap_pi(sk)->tx_win = (__u8)opts.txwin_size;
2017 break;
2018
2019 case L2CAP_LM:
2020 if (get_user(opt, (u32 __user *) optval)) {
2021 err = -EFAULT;
2022 break;
2023 }
2024
2025 if (opt & L2CAP_LM_AUTH)
2026 l2cap_pi(sk)->sec_level = BT_SECURITY_LOW;
2027 if (opt & L2CAP_LM_ENCRYPT)
2028 l2cap_pi(sk)->sec_level = BT_SECURITY_MEDIUM;
2029 if (opt & L2CAP_LM_SECURE)
2030 l2cap_pi(sk)->sec_level = BT_SECURITY_HIGH;
2031
2032 l2cap_pi(sk)->role_switch = (opt & L2CAP_LM_MASTER);
2033 l2cap_pi(sk)->force_reliable = (opt & L2CAP_LM_RELIABLE);
2034 break;
2035
2036 default:
2037 err = -ENOPROTOOPT;
2038 break;
2039 }
2040
2041 release_sock(sk);
2042 return err;
2043 }
2044
2045 static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
2046 {
2047 struct sock *sk = sock->sk;
2048 struct bt_security sec;
2049 int len, err = 0;
2050 u32 opt;
2051
2052 BT_DBG("sk %p", sk);
2053
2054 if (level == SOL_L2CAP)
2055 return l2cap_sock_setsockopt_old(sock, optname, optval, optlen);
2056
2057 if (level != SOL_BLUETOOTH)
2058 return -ENOPROTOOPT;
2059
2060 lock_sock(sk);
2061
2062 switch (optname) {
2063 case BT_SECURITY:
2064 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM
2065 && sk->sk_type != SOCK_RAW) {
2066 err = -EINVAL;
2067 break;
2068 }
2069
2070 sec.level = BT_SECURITY_LOW;
2071
2072 len = min_t(unsigned int, sizeof(sec), optlen);
2073 if (copy_from_user((char *) &sec, optval, len)) {
2074 err = -EFAULT;
2075 break;
2076 }
2077
2078 if (sec.level < BT_SECURITY_LOW ||
2079 sec.level > BT_SECURITY_HIGH) {
2080 err = -EINVAL;
2081 break;
2082 }
2083
2084 l2cap_pi(sk)->sec_level = sec.level;
2085 break;
2086
2087 case BT_DEFER_SETUP:
2088 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
2089 err = -EINVAL;
2090 break;
2091 }
2092
2093 if (get_user(opt, (u32 __user *) optval)) {
2094 err = -EFAULT;
2095 break;
2096 }
2097
2098 bt_sk(sk)->defer_setup = opt;
2099 break;
2100
2101 default:
2102 err = -ENOPROTOOPT;
2103 break;
2104 }
2105
2106 release_sock(sk);
2107 return err;
2108 }
2109
2110 static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __user *optval, int __user *optlen)
2111 {
2112 struct sock *sk = sock->sk;
2113 struct l2cap_options opts;
2114 struct l2cap_conninfo cinfo;
2115 int len, err = 0;
2116 u32 opt;
2117
2118 BT_DBG("sk %p", sk);
2119
2120 if (get_user(len, optlen))
2121 return -EFAULT;
2122
2123 lock_sock(sk);
2124
2125 switch (optname) {
2126 case L2CAP_OPTIONS:
2127 opts.imtu = l2cap_pi(sk)->imtu;
2128 opts.omtu = l2cap_pi(sk)->omtu;
2129 opts.flush_to = l2cap_pi(sk)->flush_to;
2130 opts.mode = l2cap_pi(sk)->mode;
2131 opts.fcs = l2cap_pi(sk)->fcs;
2132 opts.max_tx = l2cap_pi(sk)->max_tx;
2133 opts.txwin_size = (__u16)l2cap_pi(sk)->tx_win;
2134
2135 len = min_t(unsigned int, len, sizeof(opts));
2136 if (copy_to_user(optval, (char *) &opts, len))
2137 err = -EFAULT;
2138
2139 break;
2140
2141 case L2CAP_LM:
2142 switch (l2cap_pi(sk)->sec_level) {
2143 case BT_SECURITY_LOW:
2144 opt = L2CAP_LM_AUTH;
2145 break;
2146 case BT_SECURITY_MEDIUM:
2147 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT;
2148 break;
2149 case BT_SECURITY_HIGH:
2150 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT |
2151 L2CAP_LM_SECURE;
2152 break;
2153 default:
2154 opt = 0;
2155 break;
2156 }
2157
2158 if (l2cap_pi(sk)->role_switch)
2159 opt |= L2CAP_LM_MASTER;
2160
2161 if (l2cap_pi(sk)->force_reliable)
2162 opt |= L2CAP_LM_RELIABLE;
2163
2164 if (put_user(opt, (u32 __user *) optval))
2165 err = -EFAULT;
2166 break;
2167
2168 case L2CAP_CONNINFO:
2169 if (sk->sk_state != BT_CONNECTED &&
2170 !(sk->sk_state == BT_CONNECT2 &&
2171 bt_sk(sk)->defer_setup)) {
2172 err = -ENOTCONN;
2173 break;
2174 }
2175
2176 cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
2177 memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3);
2178
2179 len = min_t(unsigned int, len, sizeof(cinfo));
2180 if (copy_to_user(optval, (char *) &cinfo, len))
2181 err = -EFAULT;
2182
2183 break;
2184
2185 default:
2186 err = -ENOPROTOOPT;
2187 break;
2188 }
2189
2190 release_sock(sk);
2191 return err;
2192 }
2193
2194 static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
2195 {
2196 struct sock *sk = sock->sk;
2197 struct bt_security sec;
2198 int len, err = 0;
2199
2200 BT_DBG("sk %p", sk);
2201
2202 if (level == SOL_L2CAP)
2203 return l2cap_sock_getsockopt_old(sock, optname, optval, optlen);
2204
2205 if (level != SOL_BLUETOOTH)
2206 return -ENOPROTOOPT;
2207
2208 if (get_user(len, optlen))
2209 return -EFAULT;
2210
2211 lock_sock(sk);
2212
2213 switch (optname) {
2214 case BT_SECURITY:
2215 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM
2216 && sk->sk_type != SOCK_RAW) {
2217 err = -EINVAL;
2218 break;
2219 }
2220
2221 sec.level = l2cap_pi(sk)->sec_level;
2222
2223 len = min_t(unsigned int, len, sizeof(sec));
2224 if (copy_to_user(optval, (char *) &sec, len))
2225 err = -EFAULT;
2226
2227 break;
2228
2229 case BT_DEFER_SETUP:
2230 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
2231 err = -EINVAL;
2232 break;
2233 }
2234
2235 if (put_user(bt_sk(sk)->defer_setup, (u32 __user *) optval))
2236 err = -EFAULT;
2237
2238 break;
2239
2240 default:
2241 err = -ENOPROTOOPT;
2242 break;
2243 }
2244
2245 release_sock(sk);
2246 return err;
2247 }
2248
2249 static int l2cap_sock_shutdown(struct socket *sock, int how)
2250 {
2251 struct sock *sk = sock->sk;
2252 int err = 0;
2253
2254 BT_DBG("sock %p, sk %p", sock, sk);
2255
2256 if (!sk)
2257 return 0;
2258
2259 lock_sock(sk);
2260 if (!sk->sk_shutdown) {
2261 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
2262 err = __l2cap_wait_ack(sk);
2263
2264 sk->sk_shutdown = SHUTDOWN_MASK;
2265 l2cap_sock_clear_timer(sk);
2266 __l2cap_sock_close(sk, 0);
2267
2268 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
2269 err = bt_sock_wait_state(sk, BT_CLOSED,
2270 sk->sk_lingertime);
2271 }
2272
2273 if (!err && sk->sk_err)
2274 err = -sk->sk_err;
2275
2276 release_sock(sk);
2277 return err;
2278 }
2279
2280 static int l2cap_sock_release(struct socket *sock)
2281 {
2282 struct sock *sk = sock->sk;
2283 int err;
2284
2285 BT_DBG("sock %p, sk %p", sock, sk);
2286
2287 if (!sk)
2288 return 0;
2289
2290 err = l2cap_sock_shutdown(sock, 2);
2291
2292 sock_orphan(sk);
2293 l2cap_sock_kill(sk);
2294 return err;
2295 }
2296
2297 static void l2cap_chan_ready(struct sock *sk)
2298 {
2299 struct sock *parent = bt_sk(sk)->parent;
2300
2301 BT_DBG("sk %p, parent %p", sk, parent);
2302
2303 l2cap_pi(sk)->conf_state = 0;
2304 l2cap_sock_clear_timer(sk);
2305
2306 if (!parent) {
2307 /* Outgoing channel.
2308 * Wake up socket sleeping on connect.
2309 */
2310 sk->sk_state = BT_CONNECTED;
2311 sk->sk_state_change(sk);
2312 } else {
2313 /* Incoming channel.
2314 * Wake up socket sleeping on accept.
2315 */
2316 parent->sk_data_ready(parent, 0);
2317 }
2318 }
2319
2320 /* Copy frame to all raw sockets on that connection */
2321 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2322 {
2323 struct l2cap_chan_list *l = &conn->chan_list;
2324 struct sk_buff *nskb;
2325 struct sock *sk;
2326
2327 BT_DBG("conn %p", conn);
2328
2329 read_lock(&l->lock);
2330 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
2331 if (sk->sk_type != SOCK_RAW)
2332 continue;
2333
2334 /* Don't send frame to the socket it came from */
2335 if (skb->sk == sk)
2336 continue;
2337 nskb = skb_clone(skb, GFP_ATOMIC);
2338 if (!nskb)
2339 continue;
2340
2341 if (sock_queue_rcv_skb(sk, nskb))
2342 kfree_skb(nskb);
2343 }
2344 read_unlock(&l->lock);
2345 }
2346
2347 /* ---- L2CAP signalling commands ---- */
2348 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
2349 u8 code, u8 ident, u16 dlen, void *data)
2350 {
2351 struct sk_buff *skb, **frag;
2352 struct l2cap_cmd_hdr *cmd;
2353 struct l2cap_hdr *lh;
2354 int len, count;
2355
2356 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
2357 conn, code, ident, dlen);
2358
2359 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2360 count = min_t(unsigned int, conn->mtu, len);
2361
2362 skb = bt_skb_alloc(count, GFP_ATOMIC);
2363 if (!skb)
2364 return NULL;
2365
2366 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2367 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2368 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
2369
2370 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2371 cmd->code = code;
2372 cmd->ident = ident;
2373 cmd->len = cpu_to_le16(dlen);
2374
2375 if (dlen) {
2376 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2377 memcpy(skb_put(skb, count), data, count);
2378 data += count;
2379 }
2380
2381 len -= skb->len;
2382
2383 /* Continuation fragments (no L2CAP header) */
2384 frag = &skb_shinfo(skb)->frag_list;
2385 while (len) {
2386 count = min_t(unsigned int, conn->mtu, len);
2387
2388 *frag = bt_skb_alloc(count, GFP_ATOMIC);
2389 if (!*frag)
2390 goto fail;
2391
2392 memcpy(skb_put(*frag, count), data, count);
2393
2394 len -= count;
2395 data += count;
2396
2397 frag = &(*frag)->next;
2398 }
2399
2400 return skb;
2401
2402 fail:
2403 kfree_skb(skb);
2404 return NULL;
2405 }
2406
2407 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
2408 {
2409 struct l2cap_conf_opt *opt = *ptr;
2410 int len;
2411
2412 len = L2CAP_CONF_OPT_SIZE + opt->len;
2413 *ptr += len;
2414
2415 *type = opt->type;
2416 *olen = opt->len;
2417
2418 switch (opt->len) {
2419 case 1:
2420 *val = *((u8 *) opt->val);
2421 break;
2422
2423 case 2:
2424 *val = get_unaligned_le16(opt->val);
2425 break;
2426
2427 case 4:
2428 *val = get_unaligned_le32(opt->val);
2429 break;
2430
2431 default:
2432 *val = (unsigned long) opt->val;
2433 break;
2434 }
2435
2436 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
2437 return len;
2438 }
2439
2440 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2441 {
2442 struct l2cap_conf_opt *opt = *ptr;
2443
2444 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
2445
2446 opt->type = type;
2447 opt->len = len;
2448
2449 switch (len) {
2450 case 1:
2451 *((u8 *) opt->val) = val;
2452 break;
2453
2454 case 2:
2455 put_unaligned_le16(val, opt->val);
2456 break;
2457
2458 case 4:
2459 put_unaligned_le32(val, opt->val);
2460 break;
2461
2462 default:
2463 memcpy(opt->val, (void *) val, len);
2464 break;
2465 }
2466
2467 *ptr += L2CAP_CONF_OPT_SIZE + len;
2468 }
2469
2470 static void l2cap_ack_timeout(unsigned long arg)
2471 {
2472 struct sock *sk = (void *) arg;
2473
2474 bh_lock_sock(sk);
2475 l2cap_send_ack(l2cap_pi(sk));
2476 bh_unlock_sock(sk);
2477 }
2478
2479 static inline void l2cap_ertm_init(struct sock *sk)
2480 {
2481 l2cap_pi(sk)->expected_ack_seq = 0;
2482 l2cap_pi(sk)->unacked_frames = 0;
2483 l2cap_pi(sk)->buffer_seq = 0;
2484 l2cap_pi(sk)->num_acked = 0;
2485 l2cap_pi(sk)->frames_sent = 0;
2486
2487 setup_timer(&l2cap_pi(sk)->retrans_timer,
2488 l2cap_retrans_timeout, (unsigned long) sk);
2489 setup_timer(&l2cap_pi(sk)->monitor_timer,
2490 l2cap_monitor_timeout, (unsigned long) sk);
2491 setup_timer(&l2cap_pi(sk)->ack_timer,
2492 l2cap_ack_timeout, (unsigned long) sk);
2493
2494 __skb_queue_head_init(SREJ_QUEUE(sk));
2495 __skb_queue_head_init(BUSY_QUEUE(sk));
2496
2497 INIT_WORK(&l2cap_pi(sk)->busy_work, l2cap_busy_work);
2498
2499 sk->sk_backlog_rcv = l2cap_ertm_data_rcv;
2500 }
2501
2502 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2503 {
2504 switch (mode) {
2505 case L2CAP_MODE_STREAMING:
2506 case L2CAP_MODE_ERTM:
2507 if (l2cap_mode_supported(mode, remote_feat_mask))
2508 return mode;
2509 /* fall through */
2510 default:
2511 return L2CAP_MODE_BASIC;
2512 }
2513 }
2514
2515 static int l2cap_build_conf_req(struct sock *sk, void *data)
2516 {
2517 struct l2cap_pinfo *pi = l2cap_pi(sk);
2518 struct l2cap_conf_req *req = data;
2519 struct l2cap_conf_rfc rfc = { .mode = pi->mode };
2520 void *ptr = req->data;
2521
2522 BT_DBG("sk %p", sk);
2523
2524 if (pi->num_conf_req || pi->num_conf_rsp)
2525 goto done;
2526
2527 switch (pi->mode) {
2528 case L2CAP_MODE_STREAMING:
2529 case L2CAP_MODE_ERTM:
2530 if (pi->conf_state & L2CAP_CONF_STATE2_DEVICE)
2531 break;
2532
2533 /* fall through */
2534 default:
2535 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
2536 break;
2537 }
2538
2539 done:
2540 switch (pi->mode) {
2541 case L2CAP_MODE_BASIC:
2542 if (pi->imtu != L2CAP_DEFAULT_MTU)
2543 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
2544
2545 if (!(pi->conn->feat_mask & L2CAP_FEAT_ERTM) &&
2546 !(pi->conn->feat_mask & L2CAP_FEAT_STREAMING))
2547 break;
2548
2549 rfc.mode = L2CAP_MODE_BASIC;
2550 rfc.txwin_size = 0;
2551 rfc.max_transmit = 0;
2552 rfc.retrans_timeout = 0;
2553 rfc.monitor_timeout = 0;
2554 rfc.max_pdu_size = 0;
2555
2556 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2557 (unsigned long) &rfc);
2558 break;
2559
2560 case L2CAP_MODE_ERTM:
2561 rfc.mode = L2CAP_MODE_ERTM;
2562 rfc.txwin_size = pi->tx_win;
2563 rfc.max_transmit = pi->max_tx;
2564 rfc.retrans_timeout = 0;
2565 rfc.monitor_timeout = 0;
2566 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2567 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
2568 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
2569
2570 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2571 (unsigned long) &rfc);
2572
2573 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
2574 break;
2575
2576 if (pi->fcs == L2CAP_FCS_NONE ||
2577 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
2578 pi->fcs = L2CAP_FCS_NONE;
2579 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
2580 }
2581 break;
2582
2583 case L2CAP_MODE_STREAMING:
2584 rfc.mode = L2CAP_MODE_STREAMING;
2585 rfc.txwin_size = 0;
2586 rfc.max_transmit = 0;
2587 rfc.retrans_timeout = 0;
2588 rfc.monitor_timeout = 0;
2589 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2590 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
2591 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
2592
2593 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2594 (unsigned long) &rfc);
2595
2596 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
2597 break;
2598
2599 if (pi->fcs == L2CAP_FCS_NONE ||
2600 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
2601 pi->fcs = L2CAP_FCS_NONE;
2602 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
2603 }
2604 break;
2605 }
2606
2607 /* FIXME: Need actual value of the flush timeout */
2608 //if (flush_to != L2CAP_DEFAULT_FLUSH_TO)
2609 // l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, pi->flush_to);
2610
2611 req->dcid = cpu_to_le16(pi->dcid);
2612 req->flags = cpu_to_le16(0);
2613
2614 return ptr - data;
2615 }
2616
2617 static int l2cap_parse_conf_req(struct sock *sk, void *data)
2618 {
2619 struct l2cap_pinfo *pi = l2cap_pi(sk);
2620 struct l2cap_conf_rsp *rsp = data;
2621 void *ptr = rsp->data;
2622 void *req = pi->conf_req;
2623 int len = pi->conf_len;
2624 int type, hint, olen;
2625 unsigned long val;
2626 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2627 u16 mtu = L2CAP_DEFAULT_MTU;
2628 u16 result = L2CAP_CONF_SUCCESS;
2629
2630 BT_DBG("sk %p", sk);
2631
2632 while (len >= L2CAP_CONF_OPT_SIZE) {
2633 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2634
2635 hint = type & L2CAP_CONF_HINT;
2636 type &= L2CAP_CONF_MASK;
2637
2638 switch (type) {
2639 case L2CAP_CONF_MTU:
2640 mtu = val;
2641 break;
2642
2643 case L2CAP_CONF_FLUSH_TO:
2644 pi->flush_to = val;
2645 break;
2646
2647 case L2CAP_CONF_QOS:
2648 break;
2649
2650 case L2CAP_CONF_RFC:
2651 if (olen == sizeof(rfc))
2652 memcpy(&rfc, (void *) val, olen);
2653 break;
2654
2655 case L2CAP_CONF_FCS:
2656 if (val == L2CAP_FCS_NONE)
2657 pi->conf_state |= L2CAP_CONF_NO_FCS_RECV;
2658
2659 break;
2660
2661 default:
2662 if (hint)
2663 break;
2664
2665 result = L2CAP_CONF_UNKNOWN;
2666 *((u8 *) ptr++) = type;
2667 break;
2668 }
2669 }
2670
2671 if (pi->num_conf_rsp || pi->num_conf_req > 1)
2672 goto done;
2673
2674 switch (pi->mode) {
2675 case L2CAP_MODE_STREAMING:
2676 case L2CAP_MODE_ERTM:
2677 if (!(pi->conf_state & L2CAP_CONF_STATE2_DEVICE)) {
2678 pi->mode = l2cap_select_mode(rfc.mode,
2679 pi->conn->feat_mask);
2680 break;
2681 }
2682
2683 if (pi->mode != rfc.mode)
2684 return -ECONNREFUSED;
2685
2686 break;
2687 }
2688
2689 done:
2690 if (pi->mode != rfc.mode) {
2691 result = L2CAP_CONF_UNACCEPT;
2692 rfc.mode = pi->mode;
2693
2694 if (pi->num_conf_rsp == 1)
2695 return -ECONNREFUSED;
2696
2697 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2698 sizeof(rfc), (unsigned long) &rfc);
2699 }
2700
2701
2702 if (result == L2CAP_CONF_SUCCESS) {
2703 /* Configure output options and let the other side know
2704 * which ones we don't like. */
2705
2706 if (mtu < L2CAP_DEFAULT_MIN_MTU)
2707 result = L2CAP_CONF_UNACCEPT;
2708 else {
2709 pi->omtu = mtu;
2710 pi->conf_state |= L2CAP_CONF_MTU_DONE;
2711 }
2712 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
2713
2714 switch (rfc.mode) {
2715 case L2CAP_MODE_BASIC:
2716 pi->fcs = L2CAP_FCS_NONE;
2717 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2718 break;
2719
2720 case L2CAP_MODE_ERTM:
2721 pi->remote_tx_win = rfc.txwin_size;
2722 pi->remote_max_tx = rfc.max_transmit;
2723
2724 if (le16_to_cpu(rfc.max_pdu_size) > pi->conn->mtu - 10)
2725 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
2726
2727 pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
2728
2729 rfc.retrans_timeout =
2730 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO);
2731 rfc.monitor_timeout =
2732 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO);
2733
2734 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2735
2736 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2737 sizeof(rfc), (unsigned long) &rfc);
2738
2739 break;
2740
2741 case L2CAP_MODE_STREAMING:
2742 if (le16_to_cpu(rfc.max_pdu_size) > pi->conn->mtu - 10)
2743 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
2744
2745 pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
2746
2747 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2748
2749 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2750 sizeof(rfc), (unsigned long) &rfc);
2751
2752 break;
2753
2754 default:
2755 result = L2CAP_CONF_UNACCEPT;
2756
2757 memset(&rfc, 0, sizeof(rfc));
2758 rfc.mode = pi->mode;
2759 }
2760
2761 if (result == L2CAP_CONF_SUCCESS)
2762 pi->conf_state |= L2CAP_CONF_OUTPUT_DONE;
2763 }
2764 rsp->scid = cpu_to_le16(pi->dcid);
2765 rsp->result = cpu_to_le16(result);
2766 rsp->flags = cpu_to_le16(0x0000);
2767
2768 return ptr - data;
2769 }
2770
2771 static int l2cap_parse_conf_rsp(struct sock *sk, void *rsp, int len, void *data, u16 *result)
2772 {
2773 struct l2cap_pinfo *pi = l2cap_pi(sk);
2774 struct l2cap_conf_req *req = data;
2775 void *ptr = req->data;
2776 int type, olen;
2777 unsigned long val;
2778 struct l2cap_conf_rfc rfc;
2779
2780 BT_DBG("sk %p, rsp %p, len %d, req %p", sk, rsp, len, data);
2781
2782 while (len >= L2CAP_CONF_OPT_SIZE) {
2783 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2784
2785 switch (type) {
2786 case L2CAP_CONF_MTU:
2787 if (val < L2CAP_DEFAULT_MIN_MTU) {
2788 *result = L2CAP_CONF_UNACCEPT;
2789 pi->imtu = L2CAP_DEFAULT_MIN_MTU;
2790 } else
2791 pi->imtu = val;
2792 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
2793 break;
2794
2795 case L2CAP_CONF_FLUSH_TO:
2796 pi->flush_to = val;
2797 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
2798 2, pi->flush_to);
2799 break;
2800
2801 case L2CAP_CONF_RFC:
2802 if (olen == sizeof(rfc))
2803 memcpy(&rfc, (void *)val, olen);
2804
2805 if ((pi->conf_state & L2CAP_CONF_STATE2_DEVICE) &&
2806 rfc.mode != pi->mode)
2807 return -ECONNREFUSED;
2808
2809 pi->fcs = 0;
2810
2811 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2812 sizeof(rfc), (unsigned long) &rfc);
2813 break;
2814 }
2815 }
2816
2817 if (pi->mode == L2CAP_MODE_BASIC && pi->mode != rfc.mode)
2818 return -ECONNREFUSED;
2819
2820 pi->mode = rfc.mode;
2821
2822 if (*result == L2CAP_CONF_SUCCESS) {
2823 switch (rfc.mode) {
2824 case L2CAP_MODE_ERTM:
2825 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2826 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2827 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2828 break;
2829 case L2CAP_MODE_STREAMING:
2830 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2831 }
2832 }
2833
2834 req->dcid = cpu_to_le16(pi->dcid);
2835 req->flags = cpu_to_le16(0x0000);
2836
2837 return ptr - data;
2838 }
2839
2840 static int l2cap_build_conf_rsp(struct sock *sk, void *data, u16 result, u16 flags)
2841 {
2842 struct l2cap_conf_rsp *rsp = data;
2843 void *ptr = rsp->data;
2844
2845 BT_DBG("sk %p", sk);
2846
2847 rsp->scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2848 rsp->result = cpu_to_le16(result);
2849 rsp->flags = cpu_to_le16(flags);
2850
2851 return ptr - data;
2852 }
2853
2854 static void l2cap_conf_rfc_get(struct sock *sk, void *rsp, int len)
2855 {
2856 struct l2cap_pinfo *pi = l2cap_pi(sk);
2857 int type, olen;
2858 unsigned long val;
2859 struct l2cap_conf_rfc rfc;
2860
2861 BT_DBG("sk %p, rsp %p, len %d", sk, rsp, len);
2862
2863 if ((pi->mode != L2CAP_MODE_ERTM) && (pi->mode != L2CAP_MODE_STREAMING))
2864 return;
2865
2866 while (len >= L2CAP_CONF_OPT_SIZE) {
2867 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2868
2869 switch (type) {
2870 case L2CAP_CONF_RFC:
2871 if (olen == sizeof(rfc))
2872 memcpy(&rfc, (void *)val, olen);
2873 goto done;
2874 }
2875 }
2876
2877 done:
2878 switch (rfc.mode) {
2879 case L2CAP_MODE_ERTM:
2880 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2881 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2882 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2883 break;
2884 case L2CAP_MODE_STREAMING:
2885 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2886 }
2887 }
2888
2889 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2890 {
2891 struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data;
2892
2893 if (rej->reason != 0x0000)
2894 return 0;
2895
2896 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2897 cmd->ident == conn->info_ident) {
2898 del_timer(&conn->info_timer);
2899
2900 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2901 conn->info_ident = 0;
2902
2903 l2cap_conn_start(conn);
2904 }
2905
2906 return 0;
2907 }
2908
2909 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2910 {
2911 struct l2cap_chan_list *list = &conn->chan_list;
2912 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2913 struct l2cap_conn_rsp rsp;
2914 struct sock *parent, *sk = NULL;
2915 int result, status = L2CAP_CS_NO_INFO;
2916
2917 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2918 __le16 psm = req->psm;
2919
2920 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
2921
2922 /* Check if we have socket listening on psm */
2923 parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src);
2924 if (!parent) {
2925 result = L2CAP_CR_BAD_PSM;
2926 goto sendresp;
2927 }
2928
2929 bh_lock_sock(parent);
2930
2931 /* Check if the ACL is secure enough (if not SDP) */
2932 if (psm != cpu_to_le16(0x0001) &&
2933 !hci_conn_check_link_mode(conn->hcon)) {
2934 conn->disc_reason = 0x05;
2935 result = L2CAP_CR_SEC_BLOCK;
2936 goto response;
2937 }
2938
2939 result = L2CAP_CR_NO_MEM;
2940
2941 /* Check for backlog size */
2942 if (sk_acceptq_is_full(parent)) {
2943 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2944 goto response;
2945 }
2946
2947 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
2948 if (!sk)
2949 goto response;
2950
2951 write_lock_bh(&list->lock);
2952
2953 /* Check if we already have channel with that dcid */
2954 if (__l2cap_get_chan_by_dcid(list, scid)) {
2955 write_unlock_bh(&list->lock);
2956 sock_set_flag(sk, SOCK_ZAPPED);
2957 l2cap_sock_kill(sk);
2958 goto response;
2959 }
2960
2961 hci_conn_hold(conn->hcon);
2962
2963 l2cap_sock_init(sk, parent);
2964 bacpy(&bt_sk(sk)->src, conn->src);
2965 bacpy(&bt_sk(sk)->dst, conn->dst);
2966 l2cap_pi(sk)->psm = psm;
2967 l2cap_pi(sk)->dcid = scid;
2968
2969 __l2cap_chan_add(conn, sk, parent);
2970 dcid = l2cap_pi(sk)->scid;
2971
2972 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
2973
2974 l2cap_pi(sk)->ident = cmd->ident;
2975
2976 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2977 if (l2cap_check_security(sk)) {
2978 if (bt_sk(sk)->defer_setup) {
2979 sk->sk_state = BT_CONNECT2;
2980 result = L2CAP_CR_PEND;
2981 status = L2CAP_CS_AUTHOR_PEND;
2982 parent->sk_data_ready(parent, 0);
2983 } else {
2984 sk->sk_state = BT_CONFIG;
2985 result = L2CAP_CR_SUCCESS;
2986 status = L2CAP_CS_NO_INFO;
2987 }
2988 } else {
2989 sk->sk_state = BT_CONNECT2;
2990 result = L2CAP_CR_PEND;
2991 status = L2CAP_CS_AUTHEN_PEND;
2992 }
2993 } else {
2994 sk->sk_state = BT_CONNECT2;
2995 result = L2CAP_CR_PEND;
2996 status = L2CAP_CS_NO_INFO;
2997 }
2998
2999 write_unlock_bh(&list->lock);
3000
3001 response:
3002 bh_unlock_sock(parent);
3003
3004 sendresp:
3005 rsp.scid = cpu_to_le16(scid);
3006 rsp.dcid = cpu_to_le16(dcid);
3007 rsp.result = cpu_to_le16(result);
3008 rsp.status = cpu_to_le16(status);
3009 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
3010
3011 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
3012 struct l2cap_info_req info;
3013 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3014
3015 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
3016 conn->info_ident = l2cap_get_ident(conn);
3017
3018 mod_timer(&conn->info_timer, jiffies +
3019 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
3020
3021 l2cap_send_cmd(conn, conn->info_ident,
3022 L2CAP_INFO_REQ, sizeof(info), &info);
3023 }
3024
3025 if (sk && !(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT) &&
3026 result == L2CAP_CR_SUCCESS) {
3027 u8 buf[128];
3028 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
3029 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3030 l2cap_build_conf_req(sk, buf), buf);
3031 l2cap_pi(sk)->num_conf_req++;
3032 }
3033
3034 return 0;
3035 }
3036
3037 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3038 {
3039 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
3040 u16 scid, dcid, result, status;
3041 struct sock *sk;
3042 u8 req[128];
3043
3044 scid = __le16_to_cpu(rsp->scid);
3045 dcid = __le16_to_cpu(rsp->dcid);
3046 result = __le16_to_cpu(rsp->result);
3047 status = __le16_to_cpu(rsp->status);
3048
3049 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
3050
3051 if (scid) {
3052 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
3053 if (!sk)
3054 return -EFAULT;
3055 } else {
3056 sk = l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident);
3057 if (!sk)
3058 return -EFAULT;
3059 }
3060
3061 switch (result) {
3062 case L2CAP_CR_SUCCESS:
3063 sk->sk_state = BT_CONFIG;
3064 l2cap_pi(sk)->ident = 0;
3065 l2cap_pi(sk)->dcid = dcid;
3066 l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_CONNECT_PEND;
3067
3068 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)
3069 break;
3070
3071 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
3072
3073 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3074 l2cap_build_conf_req(sk, req), req);
3075 l2cap_pi(sk)->num_conf_req++;
3076 break;
3077
3078 case L2CAP_CR_PEND:
3079 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
3080 break;
3081
3082 default:
3083 /* don't delete l2cap channel if sk is owned by user */
3084 if (sock_owned_by_user(sk)) {
3085 sk->sk_state = BT_DISCONN;
3086 l2cap_sock_clear_timer(sk);
3087 l2cap_sock_set_timer(sk, HZ / 5);
3088 break;
3089 }
3090
3091 l2cap_chan_del(sk, ECONNREFUSED);
3092 break;
3093 }
3094
3095 bh_unlock_sock(sk);
3096 return 0;
3097 }
3098
3099 static inline void set_default_fcs(struct l2cap_pinfo *pi)
3100 {
3101 /* FCS is enabled only in ERTM or streaming mode, if one or both
3102 * sides request it.
3103 */
3104 if (pi->mode != L2CAP_MODE_ERTM && pi->mode != L2CAP_MODE_STREAMING)
3105 pi->fcs = L2CAP_FCS_NONE;
3106 else if (!(pi->conf_state & L2CAP_CONF_NO_FCS_RECV))
3107 pi->fcs = L2CAP_FCS_CRC16;
3108 }
3109
3110 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3111 {
3112 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
3113 u16 dcid, flags;
3114 u8 rsp[64];
3115 struct sock *sk;
3116 int len;
3117
3118 dcid = __le16_to_cpu(req->dcid);
3119 flags = __le16_to_cpu(req->flags);
3120
3121 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
3122
3123 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
3124 if (!sk)
3125 return -ENOENT;
3126
3127 if (sk->sk_state != BT_CONFIG) {
3128 struct l2cap_cmd_rej rej;
3129
3130 rej.reason = cpu_to_le16(0x0002);
3131 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
3132 sizeof(rej), &rej);
3133 goto unlock;
3134 }
3135
3136 /* Reject if config buffer is too small. */
3137 len = cmd_len - sizeof(*req);
3138 if (l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
3139 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3140 l2cap_build_conf_rsp(sk, rsp,
3141 L2CAP_CONF_REJECT, flags), rsp);
3142 goto unlock;
3143 }
3144
3145 /* Store config. */
3146 memcpy(l2cap_pi(sk)->conf_req + l2cap_pi(sk)->conf_len, req->data, len);
3147 l2cap_pi(sk)->conf_len += len;
3148
3149 if (flags & 0x0001) {
3150 /* Incomplete config. Send empty response. */
3151 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3152 l2cap_build_conf_rsp(sk, rsp,
3153 L2CAP_CONF_SUCCESS, 0x0001), rsp);
3154 goto unlock;
3155 }
3156
3157 /* Complete config. */
3158 len = l2cap_parse_conf_req(sk, rsp);
3159 if (len < 0) {
3160 l2cap_send_disconn_req(conn, sk, ECONNRESET);
3161 goto unlock;
3162 }
3163
3164 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
3165 l2cap_pi(sk)->num_conf_rsp++;
3166
3167 /* Reset config buffer. */
3168 l2cap_pi(sk)->conf_len = 0;
3169
3170 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE))
3171 goto unlock;
3172
3173 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
3174 set_default_fcs(l2cap_pi(sk));
3175
3176 sk->sk_state = BT_CONNECTED;
3177
3178 l2cap_pi(sk)->next_tx_seq = 0;
3179 l2cap_pi(sk)->expected_tx_seq = 0;
3180 __skb_queue_head_init(TX_QUEUE(sk));
3181 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
3182 l2cap_ertm_init(sk);
3183
3184 l2cap_chan_ready(sk);
3185 goto unlock;
3186 }
3187
3188 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) {
3189 u8 buf[64];
3190 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
3191 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3192 l2cap_build_conf_req(sk, buf), buf);
3193 l2cap_pi(sk)->num_conf_req++;
3194 }
3195
3196 unlock:
3197 bh_unlock_sock(sk);
3198 return 0;
3199 }
3200
3201 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3202 {
3203 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
3204 u16 scid, flags, result;
3205 struct sock *sk;
3206 int len = cmd->len - sizeof(*rsp);
3207
3208 scid = __le16_to_cpu(rsp->scid);
3209 flags = __le16_to_cpu(rsp->flags);
3210 result = __le16_to_cpu(rsp->result);
3211
3212 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
3213 scid, flags, result);
3214
3215 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
3216 if (!sk)
3217 return 0;
3218
3219 switch (result) {
3220 case L2CAP_CONF_SUCCESS:
3221 l2cap_conf_rfc_get(sk, rsp->data, len);
3222 break;
3223
3224 case L2CAP_CONF_UNACCEPT:
3225 if (l2cap_pi(sk)->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
3226 char req[64];
3227
3228 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
3229 l2cap_send_disconn_req(conn, sk, ECONNRESET);
3230 goto done;
3231 }
3232
3233 /* throw out any old stored conf requests */
3234 result = L2CAP_CONF_SUCCESS;
3235 len = l2cap_parse_conf_rsp(sk, rsp->data,
3236 len, req, &result);
3237 if (len < 0) {
3238 l2cap_send_disconn_req(conn, sk, ECONNRESET);
3239 goto done;
3240 }
3241
3242 l2cap_send_cmd(conn, l2cap_get_ident(conn),
3243 L2CAP_CONF_REQ, len, req);
3244 l2cap_pi(sk)->num_conf_req++;
3245 if (result != L2CAP_CONF_SUCCESS)
3246 goto done;
3247 break;
3248 }
3249
3250 default:
3251 sk->sk_err = ECONNRESET;
3252 l2cap_sock_set_timer(sk, HZ * 5);
3253 l2cap_send_disconn_req(conn, sk, ECONNRESET);
3254 goto done;
3255 }
3256
3257 if (flags & 0x01)
3258 goto done;
3259
3260 l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE;
3261
3262 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) {
3263 set_default_fcs(l2cap_pi(sk));
3264
3265 sk->sk_state = BT_CONNECTED;
3266 l2cap_pi(sk)->next_tx_seq = 0;
3267 l2cap_pi(sk)->expected_tx_seq = 0;
3268 __skb_queue_head_init(TX_QUEUE(sk));
3269 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
3270 l2cap_ertm_init(sk);
3271
3272 l2cap_chan_ready(sk);
3273 }
3274
3275 done:
3276 bh_unlock_sock(sk);
3277 return 0;
3278 }
3279
3280 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3281 {
3282 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
3283 struct l2cap_disconn_rsp rsp;
3284 u16 dcid, scid;
3285 struct sock *sk;
3286
3287 scid = __le16_to_cpu(req->scid);
3288 dcid = __le16_to_cpu(req->dcid);
3289
3290 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
3291
3292 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
3293 if (!sk)
3294 return 0;
3295
3296 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
3297 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
3298 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
3299
3300 sk->sk_shutdown = SHUTDOWN_MASK;
3301
3302 /* don't delete l2cap channel if sk is owned by user */
3303 if (sock_owned_by_user(sk)) {
3304 sk->sk_state = BT_DISCONN;
3305 l2cap_sock_clear_timer(sk);
3306 l2cap_sock_set_timer(sk, HZ / 5);
3307 bh_unlock_sock(sk);
3308 return 0;
3309 }
3310
3311 l2cap_chan_del(sk, ECONNRESET);
3312 bh_unlock_sock(sk);
3313
3314 l2cap_sock_kill(sk);
3315 return 0;
3316 }
3317
3318 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3319 {
3320 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
3321 u16 dcid, scid;
3322 struct sock *sk;
3323
3324 scid = __le16_to_cpu(rsp->scid);
3325 dcid = __le16_to_cpu(rsp->dcid);
3326
3327 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
3328
3329 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
3330 if (!sk)
3331 return 0;
3332
3333 /* don't delete l2cap channel if sk is owned by user */
3334 if (sock_owned_by_user(sk)) {
3335 sk->sk_state = BT_DISCONN;
3336 l2cap_sock_clear_timer(sk);
3337 l2cap_sock_set_timer(sk, HZ / 5);
3338 bh_unlock_sock(sk);
3339 return 0;
3340 }
3341
3342 l2cap_chan_del(sk, 0);
3343 bh_unlock_sock(sk);
3344
3345 l2cap_sock_kill(sk);
3346 return 0;
3347 }
3348
3349 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3350 {
3351 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
3352 u16 type;
3353
3354 type = __le16_to_cpu(req->type);
3355
3356 BT_DBG("type 0x%4.4x", type);
3357
3358 if (type == L2CAP_IT_FEAT_MASK) {
3359 u8 buf[8];
3360 u32 feat_mask = l2cap_feat_mask;
3361 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3362 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3363 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3364 if (!disable_ertm)
3365 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
3366 | L2CAP_FEAT_FCS;
3367 put_unaligned_le32(feat_mask, rsp->data);
3368 l2cap_send_cmd(conn, cmd->ident,
3369 L2CAP_INFO_RSP, sizeof(buf), buf);
3370 } else if (type == L2CAP_IT_FIXED_CHAN) {
3371 u8 buf[12];
3372 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3373 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3374 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3375 memcpy(buf + 4, l2cap_fixed_chan, 8);
3376 l2cap_send_cmd(conn, cmd->ident,
3377 L2CAP_INFO_RSP, sizeof(buf), buf);
3378 } else {
3379 struct l2cap_info_rsp rsp;
3380 rsp.type = cpu_to_le16(type);
3381 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
3382 l2cap_send_cmd(conn, cmd->ident,
3383 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
3384 }
3385
3386 return 0;
3387 }
3388
3389 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3390 {
3391 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
3392 u16 type, result;
3393
3394 type = __le16_to_cpu(rsp->type);
3395 result = __le16_to_cpu(rsp->result);
3396
3397 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
3398
3399 del_timer(&conn->info_timer);
3400
3401 if (result != L2CAP_IR_SUCCESS) {
3402 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3403 conn->info_ident = 0;
3404
3405 l2cap_conn_start(conn);
3406
3407 return 0;
3408 }
3409
3410 if (type == L2CAP_IT_FEAT_MASK) {
3411 conn->feat_mask = get_unaligned_le32(rsp->data);
3412
3413 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
3414 struct l2cap_info_req req;
3415 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3416
3417 conn->info_ident = l2cap_get_ident(conn);
3418
3419 l2cap_send_cmd(conn, conn->info_ident,
3420 L2CAP_INFO_REQ, sizeof(req), &req);
3421 } else {
3422 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3423 conn->info_ident = 0;
3424
3425 l2cap_conn_start(conn);
3426 }
3427 } else if (type == L2CAP_IT_FIXED_CHAN) {
3428 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3429 conn->info_ident = 0;
3430
3431 l2cap_conn_start(conn);
3432 }
3433
3434 return 0;
3435 }
3436
3437 static inline void l2cap_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
3438 {
3439 u8 *data = skb->data;
3440 int len = skb->len;
3441 struct l2cap_cmd_hdr cmd;
3442 int err = 0;
3443
3444 l2cap_raw_recv(conn, skb);
3445
3446 while (len >= L2CAP_CMD_HDR_SIZE) {
3447 u16 cmd_len;
3448 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
3449 data += L2CAP_CMD_HDR_SIZE;
3450 len -= L2CAP_CMD_HDR_SIZE;
3451
3452 cmd_len = le16_to_cpu(cmd.len);
3453
3454 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
3455
3456 if (cmd_len > len || !cmd.ident) {
3457 BT_DBG("corrupted command");
3458 break;
3459 }
3460
3461 switch (cmd.code) {
3462 case L2CAP_COMMAND_REJ:
3463 l2cap_command_rej(conn, &cmd, data);
3464 break;
3465
3466 case L2CAP_CONN_REQ:
3467 err = l2cap_connect_req(conn, &cmd, data);
3468 break;
3469
3470 case L2CAP_CONN_RSP:
3471 err = l2cap_connect_rsp(conn, &cmd, data);
3472 break;
3473
3474 case L2CAP_CONF_REQ:
3475 err = l2cap_config_req(conn, &cmd, cmd_len, data);
3476 break;
3477
3478 case L2CAP_CONF_RSP:
3479 err = l2cap_config_rsp(conn, &cmd, data);
3480 break;
3481
3482 case L2CAP_DISCONN_REQ:
3483 err = l2cap_disconnect_req(conn, &cmd, data);
3484 break;
3485
3486 case L2CAP_DISCONN_RSP:
3487 err = l2cap_disconnect_rsp(conn, &cmd, data);
3488 break;
3489
3490 case L2CAP_ECHO_REQ:
3491 l2cap_send_cmd(conn, cmd.ident, L2CAP_ECHO_RSP, cmd_len, data);
3492 break;
3493
3494 case L2CAP_ECHO_RSP:
3495 break;
3496
3497 case L2CAP_INFO_REQ:
3498 err = l2cap_information_req(conn, &cmd, data);
3499 break;
3500
3501 case L2CAP_INFO_RSP:
3502 err = l2cap_information_rsp(conn, &cmd, data);
3503 break;
3504
3505 default:
3506 BT_ERR("Unknown signaling command 0x%2.2x", cmd.code);
3507 err = -EINVAL;
3508 break;
3509 }
3510
3511 if (err) {
3512 struct l2cap_cmd_rej rej;
3513 BT_DBG("error %d", err);
3514
3515 /* FIXME: Map err to a valid reason */
3516 rej.reason = cpu_to_le16(0);
3517 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3518 }
3519
3520 data += cmd_len;
3521 len -= cmd_len;
3522 }
3523
3524 kfree_skb(skb);
3525 }
3526
3527 static int l2cap_check_fcs(struct l2cap_pinfo *pi, struct sk_buff *skb)
3528 {
3529 u16 our_fcs, rcv_fcs;
3530 int hdr_size = L2CAP_HDR_SIZE + 2;
3531
3532 if (pi->fcs == L2CAP_FCS_CRC16) {
3533 skb_trim(skb, skb->len - 2);
3534 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
3535 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
3536
3537 if (our_fcs != rcv_fcs)
3538 return -EBADMSG;
3539 }
3540 return 0;
3541 }
3542
3543 static inline void l2cap_send_i_or_rr_or_rnr(struct sock *sk)
3544 {
3545 struct l2cap_pinfo *pi = l2cap_pi(sk);
3546 u16 control = 0;
3547
3548 pi->frames_sent = 0;
3549
3550 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3551
3552 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
3553 control |= L2CAP_SUPER_RCV_NOT_READY;
3554 l2cap_send_sframe(pi, control);
3555 pi->conn_state |= L2CAP_CONN_RNR_SENT;
3556 }
3557
3558 if (pi->conn_state & L2CAP_CONN_REMOTE_BUSY)
3559 l2cap_retransmit_frames(sk);
3560
3561 l2cap_ertm_send(sk);
3562
3563 if (!(pi->conn_state & L2CAP_CONN_LOCAL_BUSY) &&
3564 pi->frames_sent == 0) {
3565 control |= L2CAP_SUPER_RCV_READY;
3566 l2cap_send_sframe(pi, control);
3567 }
3568 }
3569
3570 static int l2cap_add_to_srej_queue(struct sock *sk, struct sk_buff *skb, u8 tx_seq, u8 sar)
3571 {
3572 struct sk_buff *next_skb;
3573 struct l2cap_pinfo *pi = l2cap_pi(sk);
3574 int tx_seq_offset, next_tx_seq_offset;
3575
3576 bt_cb(skb)->tx_seq = tx_seq;
3577 bt_cb(skb)->sar = sar;
3578
3579 next_skb = skb_peek(SREJ_QUEUE(sk));
3580 if (!next_skb) {
3581 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3582 return 0;
3583 }
3584
3585 tx_seq_offset = (tx_seq - pi->buffer_seq) % 64;
3586 if (tx_seq_offset < 0)
3587 tx_seq_offset += 64;
3588
3589 do {
3590 if (bt_cb(next_skb)->tx_seq == tx_seq)
3591 return -EINVAL;
3592
3593 next_tx_seq_offset = (bt_cb(next_skb)->tx_seq -
3594 pi->buffer_seq) % 64;
3595 if (next_tx_seq_offset < 0)
3596 next_tx_seq_offset += 64;
3597
3598 if (next_tx_seq_offset > tx_seq_offset) {
3599 __skb_queue_before(SREJ_QUEUE(sk), next_skb, skb);
3600 return 0;
3601 }
3602
3603 if (skb_queue_is_last(SREJ_QUEUE(sk), next_skb))
3604 break;
3605
3606 } while ((next_skb = skb_queue_next(SREJ_QUEUE(sk), next_skb)));
3607
3608 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3609
3610 return 0;
3611 }
3612
3613 static int l2cap_ertm_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control)
3614 {
3615 struct l2cap_pinfo *pi = l2cap_pi(sk);
3616 struct sk_buff *_skb;
3617 int err;
3618
3619 switch (control & L2CAP_CTRL_SAR) {
3620 case L2CAP_SDU_UNSEGMENTED:
3621 if (pi->conn_state & L2CAP_CONN_SAR_SDU)
3622 goto drop;
3623
3624 err = sock_queue_rcv_skb(sk, skb);
3625 if (!err)
3626 return err;
3627
3628 break;
3629
3630 case L2CAP_SDU_START:
3631 if (pi->conn_state & L2CAP_CONN_SAR_SDU)
3632 goto drop;
3633
3634 pi->sdu_len = get_unaligned_le16(skb->data);
3635
3636 if (pi->sdu_len > pi->imtu)
3637 goto disconnect;
3638
3639 pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC);
3640 if (!pi->sdu)
3641 return -ENOMEM;
3642
3643 /* pull sdu_len bytes only after alloc, because of Local Busy
3644 * condition we have to be sure that this will be executed
3645 * only once, i.e., when alloc does not fail */
3646 skb_pull(skb, 2);
3647
3648 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3649
3650 pi->conn_state |= L2CAP_CONN_SAR_SDU;
3651 pi->partial_sdu_len = skb->len;
3652 break;
3653
3654 case L2CAP_SDU_CONTINUE:
3655 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3656 goto disconnect;
3657
3658 if (!pi->sdu)
3659 goto disconnect;
3660
3661 pi->partial_sdu_len += skb->len;
3662 if (pi->partial_sdu_len > pi->sdu_len)
3663 goto drop;
3664
3665 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3666
3667 break;
3668
3669 case L2CAP_SDU_END:
3670 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3671 goto disconnect;
3672
3673 if (!pi->sdu)
3674 goto disconnect;
3675
3676 if (!(pi->conn_state & L2CAP_CONN_SAR_RETRY)) {
3677 pi->partial_sdu_len += skb->len;
3678
3679 if (pi->partial_sdu_len > pi->imtu)
3680 goto drop;
3681
3682 if (pi->partial_sdu_len != pi->sdu_len)
3683 goto drop;
3684
3685 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3686 }
3687
3688 _skb = skb_clone(pi->sdu, GFP_ATOMIC);
3689 if (!_skb) {
3690 pi->conn_state |= L2CAP_CONN_SAR_RETRY;
3691 return -ENOMEM;
3692 }
3693
3694 err = sock_queue_rcv_skb(sk, _skb);
3695 if (err < 0) {
3696 kfree_skb(_skb);
3697 pi->conn_state |= L2CAP_CONN_SAR_RETRY;
3698 return err;
3699 }
3700
3701 pi->conn_state &= ~L2CAP_CONN_SAR_RETRY;
3702 pi->conn_state &= ~L2CAP_CONN_SAR_SDU;
3703
3704 kfree_skb(pi->sdu);
3705 break;
3706 }
3707
3708 kfree_skb(skb);
3709 return 0;
3710
3711 drop:
3712 kfree_skb(pi->sdu);
3713 pi->sdu = NULL;
3714
3715 disconnect:
3716 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
3717 kfree_skb(skb);
3718 return 0;
3719 }
3720
3721 static int l2cap_try_push_rx_skb(struct sock *sk)
3722 {
3723 struct l2cap_pinfo *pi = l2cap_pi(sk);
3724 struct sk_buff *skb;
3725 u16 control;
3726 int err;
3727
3728 while ((skb = skb_dequeue(BUSY_QUEUE(sk)))) {
3729 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3730 err = l2cap_ertm_reassembly_sdu(sk, skb, control);
3731 if (err < 0) {
3732 skb_queue_head(BUSY_QUEUE(sk), skb);
3733 return -EBUSY;
3734 }
3735
3736 pi->buffer_seq = (pi->buffer_seq + 1) % 64;
3737 }
3738
3739 if (!(pi->conn_state & L2CAP_CONN_RNR_SENT))
3740 goto done;
3741
3742 control = pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3743 control |= L2CAP_SUPER_RCV_READY | L2CAP_CTRL_POLL;
3744 l2cap_send_sframe(pi, control);
3745 l2cap_pi(sk)->retry_count = 1;
3746
3747 del_timer(&pi->retrans_timer);
3748 __mod_monitor_timer();
3749
3750 l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F;
3751
3752 done:
3753 pi->conn_state &= ~L2CAP_CONN_LOCAL_BUSY;
3754 pi->conn_state &= ~L2CAP_CONN_RNR_SENT;
3755
3756 BT_DBG("sk %p, Exit local busy", sk);
3757
3758 return 0;
3759 }
3760
3761 static void l2cap_busy_work(struct work_struct *work)
3762 {
3763 DECLARE_WAITQUEUE(wait, current);
3764 struct l2cap_pinfo *pi =
3765 container_of(work, struct l2cap_pinfo, busy_work);
3766 struct sock *sk = (struct sock *)pi;
3767 int n_tries = 0, timeo = HZ/5, err;
3768 struct sk_buff *skb;
3769
3770 lock_sock(sk);
3771
3772 add_wait_queue(sk_sleep(sk), &wait);
3773 while ((skb = skb_peek(BUSY_QUEUE(sk)))) {
3774 set_current_state(TASK_INTERRUPTIBLE);
3775
3776 if (n_tries++ > L2CAP_LOCAL_BUSY_TRIES) {
3777 err = -EBUSY;
3778 l2cap_send_disconn_req(pi->conn, sk, EBUSY);
3779 break;
3780 }
3781
3782 if (!timeo)
3783 timeo = HZ/5;
3784
3785 if (signal_pending(current)) {
3786 err = sock_intr_errno(timeo);
3787 break;
3788 }
3789
3790 release_sock(sk);
3791 timeo = schedule_timeout(timeo);
3792 lock_sock(sk);
3793
3794 err = sock_error(sk);
3795 if (err)
3796 break;
3797
3798 if (l2cap_try_push_rx_skb(sk) == 0)
3799 break;
3800 }
3801
3802 set_current_state(TASK_RUNNING);
3803 remove_wait_queue(sk_sleep(sk), &wait);
3804
3805 release_sock(sk);
3806 }
3807
3808 static int l2cap_push_rx_skb(struct sock *sk, struct sk_buff *skb, u16 control)
3809 {
3810 struct l2cap_pinfo *pi = l2cap_pi(sk);
3811 int sctrl, err;
3812
3813 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
3814 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
3815 __skb_queue_tail(BUSY_QUEUE(sk), skb);
3816 return l2cap_try_push_rx_skb(sk);
3817
3818
3819 }
3820
3821 err = l2cap_ertm_reassembly_sdu(sk, skb, control);
3822 if (err >= 0) {
3823 pi->buffer_seq = (pi->buffer_seq + 1) % 64;
3824 return err;
3825 }
3826
3827 /* Busy Condition */
3828 BT_DBG("sk %p, Enter local busy", sk);
3829
3830 pi->conn_state |= L2CAP_CONN_LOCAL_BUSY;
3831 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
3832 __skb_queue_tail(BUSY_QUEUE(sk), skb);
3833
3834 sctrl = pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3835 sctrl |= L2CAP_SUPER_RCV_NOT_READY;
3836 l2cap_send_sframe(pi, sctrl);
3837
3838 pi->conn_state |= L2CAP_CONN_RNR_SENT;
3839
3840 del_timer(&pi->ack_timer);
3841
3842 queue_work(_busy_wq, &pi->busy_work);
3843
3844 return err;
3845 }
3846
3847 static int l2cap_streaming_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control)
3848 {
3849 struct l2cap_pinfo *pi = l2cap_pi(sk);
3850 struct sk_buff *_skb;
3851 int err = -EINVAL;
3852
3853 /*
3854 * TODO: We have to notify the userland if some data is lost with the
3855 * Streaming Mode.
3856 */
3857
3858 switch (control & L2CAP_CTRL_SAR) {
3859 case L2CAP_SDU_UNSEGMENTED:
3860 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3861 kfree_skb(pi->sdu);
3862 break;
3863 }
3864
3865 err = sock_queue_rcv_skb(sk, skb);
3866 if (!err)
3867 return 0;
3868
3869 break;
3870
3871 case L2CAP_SDU_START:
3872 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3873 kfree_skb(pi->sdu);
3874 break;
3875 }
3876
3877 pi->sdu_len = get_unaligned_le16(skb->data);
3878 skb_pull(skb, 2);
3879
3880 if (pi->sdu_len > pi->imtu) {
3881 err = -EMSGSIZE;
3882 break;
3883 }
3884
3885 pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC);
3886 if (!pi->sdu) {
3887 err = -ENOMEM;
3888 break;
3889 }
3890
3891 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3892
3893 pi->conn_state |= L2CAP_CONN_SAR_SDU;
3894 pi->partial_sdu_len = skb->len;
3895 err = 0;
3896 break;
3897
3898 case L2CAP_SDU_CONTINUE:
3899 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3900 break;
3901
3902 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3903
3904 pi->partial_sdu_len += skb->len;
3905 if (pi->partial_sdu_len > pi->sdu_len)
3906 kfree_skb(pi->sdu);
3907 else
3908 err = 0;
3909
3910 break;
3911
3912 case L2CAP_SDU_END:
3913 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3914 break;
3915
3916 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3917
3918 pi->conn_state &= ~L2CAP_CONN_SAR_SDU;
3919 pi->partial_sdu_len += skb->len;
3920
3921 if (pi->partial_sdu_len > pi->imtu)
3922 goto drop;
3923
3924 if (pi->partial_sdu_len == pi->sdu_len) {
3925 _skb = skb_clone(pi->sdu, GFP_ATOMIC);
3926 err = sock_queue_rcv_skb(sk, _skb);
3927 if (err < 0)
3928 kfree_skb(_skb);
3929 }
3930 err = 0;
3931
3932 drop:
3933 kfree_skb(pi->sdu);
3934 break;
3935 }
3936
3937 kfree_skb(skb);
3938 return err;
3939 }
3940
3941 static void l2cap_check_srej_gap(struct sock *sk, u8 tx_seq)
3942 {
3943 struct sk_buff *skb;
3944 u16 control;
3945
3946 while ((skb = skb_peek(SREJ_QUEUE(sk)))) {
3947 if (bt_cb(skb)->tx_seq != tx_seq)
3948 break;
3949
3950 skb = skb_dequeue(SREJ_QUEUE(sk));
3951 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3952 l2cap_ertm_reassembly_sdu(sk, skb, control);
3953 l2cap_pi(sk)->buffer_seq_srej =
3954 (l2cap_pi(sk)->buffer_seq_srej + 1) % 64;
3955 tx_seq = (tx_seq + 1) % 64;
3956 }
3957 }
3958
3959 static void l2cap_resend_srejframe(struct sock *sk, u8 tx_seq)
3960 {
3961 struct l2cap_pinfo *pi = l2cap_pi(sk);
3962 struct srej_list *l, *tmp;
3963 u16 control;
3964
3965 list_for_each_entry_safe(l, tmp, SREJ_LIST(sk), list) {
3966 if (l->tx_seq == tx_seq) {
3967 list_del(&l->list);
3968 kfree(l);
3969 return;
3970 }
3971 control = L2CAP_SUPER_SELECT_REJECT;
3972 control |= l->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3973 l2cap_send_sframe(pi, control);
3974 list_del(&l->list);
3975 list_add_tail(&l->list, SREJ_LIST(sk));
3976 }
3977 }
3978
3979 static void l2cap_send_srejframe(struct sock *sk, u8 tx_seq)
3980 {
3981 struct l2cap_pinfo *pi = l2cap_pi(sk);
3982 struct srej_list *new;
3983 u16 control;
3984
3985 while (tx_seq != pi->expected_tx_seq) {
3986 control = L2CAP_SUPER_SELECT_REJECT;
3987 control |= pi->expected_tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3988 l2cap_send_sframe(pi, control);
3989
3990 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3991 new->tx_seq = pi->expected_tx_seq;
3992 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3993 list_add_tail(&new->list, SREJ_LIST(sk));
3994 }
3995 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3996 }
3997
3998 static inline int l2cap_data_channel_iframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
3999 {
4000 struct l2cap_pinfo *pi = l2cap_pi(sk);
4001 u8 tx_seq = __get_txseq(rx_control);
4002 u8 req_seq = __get_reqseq(rx_control);
4003 u8 sar = rx_control >> L2CAP_CTRL_SAR_SHIFT;
4004 int tx_seq_offset, expected_tx_seq_offset;
4005 int num_to_ack = (pi->tx_win/6) + 1;
4006 int err = 0;
4007
4008 BT_DBG("sk %p len %d tx_seq %d rx_control 0x%4.4x", sk, skb->len, tx_seq,
4009 rx_control);
4010
4011 if (L2CAP_CTRL_FINAL & rx_control &&
4012 l2cap_pi(sk)->conn_state & L2CAP_CONN_WAIT_F) {
4013 del_timer(&pi->monitor_timer);
4014 if (pi->unacked_frames > 0)
4015 __mod_retrans_timer();
4016 pi->conn_state &= ~L2CAP_CONN_WAIT_F;
4017 }
4018
4019 pi->expected_ack_seq = req_seq;
4020 l2cap_drop_acked_frames(sk);
4021
4022 if (tx_seq == pi->expected_tx_seq)
4023 goto expected;
4024
4025 tx_seq_offset = (tx_seq - pi->buffer_seq) % 64;
4026 if (tx_seq_offset < 0)
4027 tx_seq_offset += 64;
4028
4029 /* invalid tx_seq */
4030 if (tx_seq_offset >= pi->tx_win) {
4031 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
4032 goto drop;
4033 }
4034
4035 if (pi->conn_state == L2CAP_CONN_LOCAL_BUSY)
4036 goto drop;
4037
4038 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
4039 struct srej_list *first;
4040
4041 first = list_first_entry(SREJ_LIST(sk),
4042 struct srej_list, list);
4043 if (tx_seq == first->tx_seq) {
4044 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
4045 l2cap_check_srej_gap(sk, tx_seq);
4046
4047 list_del(&first->list);
4048 kfree(first);
4049
4050 if (list_empty(SREJ_LIST(sk))) {
4051 pi->buffer_seq = pi->buffer_seq_srej;
4052 pi->conn_state &= ~L2CAP_CONN_SREJ_SENT;
4053 l2cap_send_ack(pi);
4054 BT_DBG("sk %p, Exit SREJ_SENT", sk);
4055 }
4056 } else {
4057 struct srej_list *l;
4058
4059 /* duplicated tx_seq */
4060 if (l2cap_add_to_srej_queue(sk, skb, tx_seq, sar) < 0)
4061 goto drop;
4062
4063 list_for_each_entry(l, SREJ_LIST(sk), list) {
4064 if (l->tx_seq == tx_seq) {
4065 l2cap_resend_srejframe(sk, tx_seq);
4066 return 0;
4067 }
4068 }
4069 l2cap_send_srejframe(sk, tx_seq);
4070 }
4071 } else {
4072 expected_tx_seq_offset =
4073 (pi->expected_tx_seq - pi->buffer_seq) % 64;
4074 if (expected_tx_seq_offset < 0)
4075 expected_tx_seq_offset += 64;
4076
4077 /* duplicated tx_seq */
4078 if (tx_seq_offset < expected_tx_seq_offset)
4079 goto drop;
4080
4081 pi->conn_state |= L2CAP_CONN_SREJ_SENT;
4082
4083 BT_DBG("sk %p, Enter SREJ", sk);
4084
4085 INIT_LIST_HEAD(SREJ_LIST(sk));
4086 pi->buffer_seq_srej = pi->buffer_seq;
4087
4088 __skb_queue_head_init(SREJ_QUEUE(sk));
4089 __skb_queue_head_init(BUSY_QUEUE(sk));
4090 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
4091
4092 pi->conn_state |= L2CAP_CONN_SEND_PBIT;
4093
4094 l2cap_send_srejframe(sk, tx_seq);
4095
4096 del_timer(&pi->ack_timer);
4097 }
4098 return 0;
4099
4100 expected:
4101 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
4102
4103 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
4104 bt_cb(skb)->tx_seq = tx_seq;
4105 bt_cb(skb)->sar = sar;
4106 __skb_queue_tail(SREJ_QUEUE(sk), skb);
4107 return 0;
4108 }
4109
4110 err = l2cap_push_rx_skb(sk, skb, rx_control);
4111 if (err < 0)
4112 return 0;
4113
4114 if (rx_control & L2CAP_CTRL_FINAL) {
4115 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
4116 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
4117 else
4118 l2cap_retransmit_frames(sk);
4119 }
4120
4121 __mod_ack_timer();
4122
4123 pi->num_acked = (pi->num_acked + 1) % num_to_ack;
4124 if (pi->num_acked == num_to_ack - 1)
4125 l2cap_send_ack(pi);
4126
4127 return 0;
4128
4129 drop:
4130 kfree_skb(skb);
4131 return 0;
4132 }
4133
4134 static inline void l2cap_data_channel_rrframe(struct sock *sk, u16 rx_control)
4135 {
4136 struct l2cap_pinfo *pi = l2cap_pi(sk);
4137
4138 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, __get_reqseq(rx_control),
4139 rx_control);
4140
4141 pi->expected_ack_seq = __get_reqseq(rx_control);
4142 l2cap_drop_acked_frames(sk);
4143
4144 if (rx_control & L2CAP_CTRL_POLL) {
4145 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
4146 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
4147 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
4148 (pi->unacked_frames > 0))
4149 __mod_retrans_timer();
4150
4151 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
4152 l2cap_send_srejtail(sk);
4153 } else {
4154 l2cap_send_i_or_rr_or_rnr(sk);
4155 }
4156
4157 } else if (rx_control & L2CAP_CTRL_FINAL) {
4158 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
4159
4160 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
4161 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
4162 else
4163 l2cap_retransmit_frames(sk);
4164
4165 } else {
4166 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
4167 (pi->unacked_frames > 0))
4168 __mod_retrans_timer();
4169
4170 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
4171 if (pi->conn_state & L2CAP_CONN_SREJ_SENT)
4172 l2cap_send_ack(pi);
4173 else
4174 l2cap_ertm_send(sk);
4175 }
4176 }
4177
4178 static inline void l2cap_data_channel_rejframe(struct sock *sk, u16 rx_control)
4179 {
4180 struct l2cap_pinfo *pi = l2cap_pi(sk);
4181 u8 tx_seq = __get_reqseq(rx_control);
4182
4183 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, tx_seq, rx_control);
4184
4185 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
4186
4187 pi->expected_ack_seq = tx_seq;
4188 l2cap_drop_acked_frames(sk);
4189
4190 if (rx_control & L2CAP_CTRL_FINAL) {
4191 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
4192 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
4193 else
4194 l2cap_retransmit_frames(sk);
4195 } else {
4196 l2cap_retransmit_frames(sk);
4197
4198 if (pi->conn_state & L2CAP_CONN_WAIT_F)
4199 pi->conn_state |= L2CAP_CONN_REJ_ACT;
4200 }
4201 }
4202 static inline void l2cap_data_channel_srejframe(struct sock *sk, u16 rx_control)
4203 {
4204 struct l2cap_pinfo *pi = l2cap_pi(sk);
4205 u8 tx_seq = __get_reqseq(rx_control);
4206
4207 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, tx_seq, rx_control);
4208
4209 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
4210
4211 if (rx_control & L2CAP_CTRL_POLL) {
4212 pi->expected_ack_seq = tx_seq;
4213 l2cap_drop_acked_frames(sk);
4214
4215 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
4216 l2cap_retransmit_one_frame(sk, tx_seq);
4217
4218 l2cap_ertm_send(sk);
4219
4220 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
4221 pi->srej_save_reqseq = tx_seq;
4222 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
4223 }
4224 } else if (rx_control & L2CAP_CTRL_FINAL) {
4225 if ((pi->conn_state & L2CAP_CONN_SREJ_ACT) &&
4226 pi->srej_save_reqseq == tx_seq)
4227 pi->conn_state &= ~L2CAP_CONN_SREJ_ACT;
4228 else
4229 l2cap_retransmit_one_frame(sk, tx_seq);
4230 } else {
4231 l2cap_retransmit_one_frame(sk, tx_seq);
4232 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
4233 pi->srej_save_reqseq = tx_seq;
4234 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
4235 }
4236 }
4237 }
4238
4239 static inline void l2cap_data_channel_rnrframe(struct sock *sk, u16 rx_control)
4240 {
4241 struct l2cap_pinfo *pi = l2cap_pi(sk);
4242 u8 tx_seq = __get_reqseq(rx_control);
4243
4244 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, tx_seq, rx_control);
4245
4246 pi->conn_state |= L2CAP_CONN_REMOTE_BUSY;
4247 pi->expected_ack_seq = tx_seq;
4248 l2cap_drop_acked_frames(sk);
4249
4250 if (rx_control & L2CAP_CTRL_POLL)
4251 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
4252
4253 if (!(pi->conn_state & L2CAP_CONN_SREJ_SENT)) {
4254 del_timer(&pi->retrans_timer);
4255 if (rx_control & L2CAP_CTRL_POLL)
4256 l2cap_send_rr_or_rnr(pi, L2CAP_CTRL_FINAL);
4257 return;
4258 }
4259
4260 if (rx_control & L2CAP_CTRL_POLL)
4261 l2cap_send_srejtail(sk);
4262 else
4263 l2cap_send_sframe(pi, L2CAP_SUPER_RCV_READY);
4264 }
4265
4266 static inline int l2cap_data_channel_sframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
4267 {
4268 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len);
4269
4270 if (L2CAP_CTRL_FINAL & rx_control &&
4271 l2cap_pi(sk)->conn_state & L2CAP_CONN_WAIT_F) {
4272 del_timer(&l2cap_pi(sk)->monitor_timer);
4273 if (l2cap_pi(sk)->unacked_frames > 0)
4274 __mod_retrans_timer();
4275 l2cap_pi(sk)->conn_state &= ~L2CAP_CONN_WAIT_F;
4276 }
4277
4278 switch (rx_control & L2CAP_CTRL_SUPERVISE) {
4279 case L2CAP_SUPER_RCV_READY:
4280 l2cap_data_channel_rrframe(sk, rx_control);
4281 break;
4282
4283 case L2CAP_SUPER_REJECT:
4284 l2cap_data_channel_rejframe(sk, rx_control);
4285 break;
4286
4287 case L2CAP_SUPER_SELECT_REJECT:
4288 l2cap_data_channel_srejframe(sk, rx_control);
4289 break;
4290
4291 case L2CAP_SUPER_RCV_NOT_READY:
4292 l2cap_data_channel_rnrframe(sk, rx_control);
4293 break;
4294 }
4295
4296 kfree_skb(skb);
4297 return 0;
4298 }
4299
4300 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb)
4301 {
4302 struct l2cap_pinfo *pi = l2cap_pi(sk);
4303 u16 control;
4304 u8 req_seq;
4305 int len, next_tx_seq_offset, req_seq_offset;
4306
4307 control = get_unaligned_le16(skb->data);
4308 skb_pull(skb, 2);
4309 len = skb->len;
4310
4311 /*
4312 * We can just drop the corrupted I-frame here.
4313 * Receiver will miss it and start proper recovery
4314 * procedures and ask retransmission.
4315 */
4316 if (l2cap_check_fcs(pi, skb))
4317 goto drop;
4318
4319 if (__is_sar_start(control) && __is_iframe(control))
4320 len -= 2;
4321
4322 if (pi->fcs == L2CAP_FCS_CRC16)
4323 len -= 2;
4324
4325 if (len > pi->mps) {
4326 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
4327 goto drop;
4328 }
4329
4330 req_seq = __get_reqseq(control);
4331 req_seq_offset = (req_seq - pi->expected_ack_seq) % 64;
4332 if (req_seq_offset < 0)
4333 req_seq_offset += 64;
4334
4335 next_tx_seq_offset =
4336 (pi->next_tx_seq - pi->expected_ack_seq) % 64;
4337 if (next_tx_seq_offset < 0)
4338 next_tx_seq_offset += 64;
4339
4340 /* check for invalid req-seq */
4341 if (req_seq_offset > next_tx_seq_offset) {
4342 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
4343 goto drop;
4344 }
4345
4346 if (__is_iframe(control)) {
4347 if (len < 0) {
4348 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
4349 goto drop;
4350 }
4351
4352 l2cap_data_channel_iframe(sk, control, skb);
4353 } else {
4354 if (len != 0) {
4355 BT_ERR("%d", len);
4356 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
4357 goto drop;
4358 }
4359
4360 l2cap_data_channel_sframe(sk, control, skb);
4361 }
4362
4363 return 0;
4364
4365 drop:
4366 kfree_skb(skb);
4367 return 0;
4368 }
4369
4370 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
4371 {
4372 struct sock *sk;
4373 struct l2cap_pinfo *pi;
4374 u16 control;
4375 u8 tx_seq;
4376 int len;
4377
4378 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
4379 if (!sk) {
4380 BT_DBG("unknown cid 0x%4.4x", cid);
4381 goto drop;
4382 }
4383
4384 pi = l2cap_pi(sk);
4385
4386 BT_DBG("sk %p, len %d", sk, skb->len);
4387
4388 if (sk->sk_state != BT_CONNECTED)
4389 goto drop;
4390
4391 switch (pi->mode) {
4392 case L2CAP_MODE_BASIC:
4393 /* If socket recv buffers overflows we drop data here
4394 * which is *bad* because L2CAP has to be reliable.
4395 * But we don't have any other choice. L2CAP doesn't
4396 * provide flow control mechanism. */
4397
4398 if (pi->imtu < skb->len)
4399 goto drop;
4400
4401 if (!sock_queue_rcv_skb(sk, skb))
4402 goto done;
4403 break;
4404
4405 case L2CAP_MODE_ERTM:
4406 if (!sock_owned_by_user(sk)) {
4407 l2cap_ertm_data_rcv(sk, skb);
4408 } else {
4409 if (sk_add_backlog(sk, skb))
4410 goto drop;
4411 }
4412
4413 goto done;
4414
4415 case L2CAP_MODE_STREAMING:
4416 control = get_unaligned_le16(skb->data);
4417 skb_pull(skb, 2);
4418 len = skb->len;
4419
4420 if (l2cap_check_fcs(pi, skb))
4421 goto drop;
4422
4423 if (__is_sar_start(control))
4424 len -= 2;
4425
4426 if (pi->fcs == L2CAP_FCS_CRC16)
4427 len -= 2;
4428
4429 if (len > pi->mps || len < 0 || __is_sframe(control))
4430 goto drop;
4431
4432 tx_seq = __get_txseq(control);
4433
4434 if (pi->expected_tx_seq == tx_seq)
4435 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
4436 else
4437 pi->expected_tx_seq = (tx_seq + 1) % 64;
4438
4439 l2cap_streaming_reassembly_sdu(sk, skb, control);
4440
4441 goto done;
4442
4443 default:
4444 BT_DBG("sk %p: bad mode 0x%2.2x", sk, pi->mode);
4445 break;
4446 }
4447
4448 drop:
4449 kfree_skb(skb);
4450
4451 done:
4452 if (sk)
4453 bh_unlock_sock(sk);
4454
4455 return 0;
4456 }
4457
4458 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
4459 {
4460 struct sock *sk;
4461
4462 sk = l2cap_get_sock_by_psm(0, psm, conn->src);
4463 if (!sk)
4464 goto drop;
4465
4466 bh_lock_sock(sk);
4467
4468 BT_DBG("sk %p, len %d", sk, skb->len);
4469
4470 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
4471 goto drop;
4472
4473 if (l2cap_pi(sk)->imtu < skb->len)
4474 goto drop;
4475
4476 if (!sock_queue_rcv_skb(sk, skb))
4477 goto done;
4478
4479 drop:
4480 kfree_skb(skb);
4481
4482 done:
4483 if (sk)
4484 bh_unlock_sock(sk);
4485 return 0;
4486 }
4487
4488 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
4489 {
4490 struct l2cap_hdr *lh = (void *) skb->data;
4491 u16 cid, len;
4492 __le16 psm;
4493
4494 skb_pull(skb, L2CAP_HDR_SIZE);
4495 cid = __le16_to_cpu(lh->cid);
4496 len = __le16_to_cpu(lh->len);
4497
4498 if (len != skb->len) {
4499 kfree_skb(skb);
4500 return;
4501 }
4502
4503 BT_DBG("len %d, cid 0x%4.4x", len, cid);
4504
4505 switch (cid) {
4506 case L2CAP_CID_SIGNALING:
4507 l2cap_sig_channel(conn, skb);
4508 break;
4509
4510 case L2CAP_CID_CONN_LESS:
4511 psm = get_unaligned_le16(skb->data);
4512 skb_pull(skb, 2);
4513 l2cap_conless_channel(conn, psm, skb);
4514 break;
4515
4516 default:
4517 l2cap_data_channel(conn, cid, skb);
4518 break;
4519 }
4520 }
4521
4522 /* ---- L2CAP interface with lower layer (HCI) ---- */
4523
4524 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
4525 {
4526 int exact = 0, lm1 = 0, lm2 = 0;
4527 register struct sock *sk;
4528 struct hlist_node *node;
4529
4530 if (type != ACL_LINK)
4531 return -EINVAL;
4532
4533 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
4534
4535 /* Find listening sockets and check their link_mode */
4536 read_lock(&l2cap_sk_list.lock);
4537 sk_for_each(sk, node, &l2cap_sk_list.head) {
4538 if (sk->sk_state != BT_LISTEN)
4539 continue;
4540
4541 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
4542 lm1 |= HCI_LM_ACCEPT;
4543 if (l2cap_pi(sk)->role_switch)
4544 lm1 |= HCI_LM_MASTER;
4545 exact++;
4546 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
4547 lm2 |= HCI_LM_ACCEPT;
4548 if (l2cap_pi(sk)->role_switch)
4549 lm2 |= HCI_LM_MASTER;
4550 }
4551 }
4552 read_unlock(&l2cap_sk_list.lock);
4553
4554 return exact ? lm1 : lm2;
4555 }
4556
4557 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
4558 {
4559 struct l2cap_conn *conn;
4560
4561 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
4562
4563 if (hcon->type != ACL_LINK)
4564 return -EINVAL;
4565
4566 if (!status) {
4567 conn = l2cap_conn_add(hcon, status);
4568 if (conn)
4569 l2cap_conn_ready(conn);
4570 } else
4571 l2cap_conn_del(hcon, bt_err(status));
4572
4573 return 0;
4574 }
4575
4576 static int l2cap_disconn_ind(struct hci_conn *hcon)
4577 {
4578 struct l2cap_conn *conn = hcon->l2cap_data;
4579
4580 BT_DBG("hcon %p", hcon);
4581
4582 if (hcon->type != ACL_LINK || !conn)
4583 return 0x13;
4584
4585 return conn->disc_reason;
4586 }
4587
4588 static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
4589 {
4590 BT_DBG("hcon %p reason %d", hcon, reason);
4591
4592 if (hcon->type != ACL_LINK)
4593 return -EINVAL;
4594
4595 l2cap_conn_del(hcon, bt_err(reason));
4596
4597 return 0;
4598 }
4599
4600 static inline void l2cap_check_encryption(struct sock *sk, u8 encrypt)
4601 {
4602 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM)
4603 return;
4604
4605 if (encrypt == 0x00) {
4606 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM) {
4607 l2cap_sock_clear_timer(sk);
4608 l2cap_sock_set_timer(sk, HZ * 5);
4609 } else if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
4610 __l2cap_sock_close(sk, ECONNREFUSED);
4611 } else {
4612 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM)
4613 l2cap_sock_clear_timer(sk);
4614 }
4615 }
4616
4617 static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
4618 {
4619 struct l2cap_chan_list *l;
4620 struct l2cap_conn *conn = hcon->l2cap_data;
4621 struct sock *sk;
4622
4623 if (!conn)
4624 return 0;
4625
4626 l = &conn->chan_list;
4627
4628 BT_DBG("conn %p", conn);
4629
4630 read_lock(&l->lock);
4631
4632 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
4633 bh_lock_sock(sk);
4634
4635 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND) {
4636 bh_unlock_sock(sk);
4637 continue;
4638 }
4639
4640 if (!status && (sk->sk_state == BT_CONNECTED ||
4641 sk->sk_state == BT_CONFIG)) {
4642 l2cap_check_encryption(sk, encrypt);
4643 bh_unlock_sock(sk);
4644 continue;
4645 }
4646
4647 if (sk->sk_state == BT_CONNECT) {
4648 if (!status) {
4649 struct l2cap_conn_req req;
4650 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
4651 req.psm = l2cap_pi(sk)->psm;
4652
4653 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
4654 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
4655
4656 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
4657 L2CAP_CONN_REQ, sizeof(req), &req);
4658 } else {
4659 l2cap_sock_clear_timer(sk);
4660 l2cap_sock_set_timer(sk, HZ / 10);
4661 }
4662 } else if (sk->sk_state == BT_CONNECT2) {
4663 struct l2cap_conn_rsp rsp;
4664 __u16 result;
4665
4666 if (!status) {
4667 sk->sk_state = BT_CONFIG;
4668 result = L2CAP_CR_SUCCESS;
4669 } else {
4670 sk->sk_state = BT_DISCONN;
4671 l2cap_sock_set_timer(sk, HZ / 10);
4672 result = L2CAP_CR_SEC_BLOCK;
4673 }
4674
4675 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
4676 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
4677 rsp.result = cpu_to_le16(result);
4678 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4679 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
4680 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
4681 }
4682
4683 bh_unlock_sock(sk);
4684 }
4685
4686 read_unlock(&l->lock);
4687
4688 return 0;
4689 }
4690
4691 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
4692 {
4693 struct l2cap_conn *conn = hcon->l2cap_data;
4694
4695 if (!conn && !(conn = l2cap_conn_add(hcon, 0)))
4696 goto drop;
4697
4698 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
4699
4700 if (flags & ACL_START) {
4701 struct l2cap_hdr *hdr;
4702 struct sock *sk;
4703 u16 cid;
4704 int len;
4705
4706 if (conn->rx_len) {
4707 BT_ERR("Unexpected start frame (len %d)", skb->len);
4708 kfree_skb(conn->rx_skb);
4709 conn->rx_skb = NULL;
4710 conn->rx_len = 0;
4711 l2cap_conn_unreliable(conn, ECOMM);
4712 }
4713
4714 /* Start fragment always begin with Basic L2CAP header */
4715 if (skb->len < L2CAP_HDR_SIZE) {
4716 BT_ERR("Frame is too short (len %d)", skb->len);
4717 l2cap_conn_unreliable(conn, ECOMM);
4718 goto drop;
4719 }
4720
4721 hdr = (struct l2cap_hdr *) skb->data;
4722 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
4723 cid = __le16_to_cpu(hdr->cid);
4724
4725 if (len == skb->len) {
4726 /* Complete frame received */
4727 l2cap_recv_frame(conn, skb);
4728 return 0;
4729 }
4730
4731 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
4732
4733 if (skb->len > len) {
4734 BT_ERR("Frame is too long (len %d, expected len %d)",
4735 skb->len, len);
4736 l2cap_conn_unreliable(conn, ECOMM);
4737 goto drop;
4738 }
4739
4740 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
4741
4742 if (sk && l2cap_pi(sk)->imtu < len - L2CAP_HDR_SIZE) {
4743 BT_ERR("Frame exceeding recv MTU (len %d, MTU %d)",
4744 len, l2cap_pi(sk)->imtu);
4745 bh_unlock_sock(sk);
4746 l2cap_conn_unreliable(conn, ECOMM);
4747 goto drop;
4748 }
4749
4750 if (sk)
4751 bh_unlock_sock(sk);
4752
4753 /* Allocate skb for the complete frame (with header) */
4754 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
4755 if (!conn->rx_skb)
4756 goto drop;
4757
4758 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4759 skb->len);
4760 conn->rx_len = len - skb->len;
4761 } else {
4762 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
4763
4764 if (!conn->rx_len) {
4765 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
4766 l2cap_conn_unreliable(conn, ECOMM);
4767 goto drop;
4768 }
4769
4770 if (skb->len > conn->rx_len) {
4771 BT_ERR("Fragment is too long (len %d, expected %d)",
4772 skb->len, conn->rx_len);
4773 kfree_skb(conn->rx_skb);
4774 conn->rx_skb = NULL;
4775 conn->rx_len = 0;
4776 l2cap_conn_unreliable(conn, ECOMM);
4777 goto drop;
4778 }
4779
4780 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4781 skb->len);
4782 conn->rx_len -= skb->len;
4783
4784 if (!conn->rx_len) {
4785 /* Complete frame received */
4786 l2cap_recv_frame(conn, conn->rx_skb);
4787 conn->rx_skb = NULL;
4788 }
4789 }
4790
4791 drop:
4792 kfree_skb(skb);
4793 return 0;
4794 }
4795
4796 static int l2cap_debugfs_show(struct seq_file *f, void *p)
4797 {
4798 struct sock *sk;
4799 struct hlist_node *node;
4800
4801 read_lock_bh(&l2cap_sk_list.lock);
4802
4803 sk_for_each(sk, node, &l2cap_sk_list.head) {
4804 struct l2cap_pinfo *pi = l2cap_pi(sk);
4805
4806 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d\n",
4807 batostr(&bt_sk(sk)->src),
4808 batostr(&bt_sk(sk)->dst),
4809 sk->sk_state, __le16_to_cpu(pi->psm),
4810 pi->scid, pi->dcid,
4811 pi->imtu, pi->omtu, pi->sec_level);
4812 }
4813
4814 read_unlock_bh(&l2cap_sk_list.lock);
4815
4816 return 0;
4817 }
4818
4819 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
4820 {
4821 return single_open(file, l2cap_debugfs_show, inode->i_private);
4822 }
4823
4824 static const struct file_operations l2cap_debugfs_fops = {
4825 .open = l2cap_debugfs_open,
4826 .read = seq_read,
4827 .llseek = seq_lseek,
4828 .release = single_release,
4829 };
4830
4831 static struct dentry *l2cap_debugfs;
4832
4833 static const struct proto_ops l2cap_sock_ops = {
4834 .family = PF_BLUETOOTH,
4835 .owner = THIS_MODULE,
4836 .release = l2cap_sock_release,
4837 .bind = l2cap_sock_bind,
4838 .connect = l2cap_sock_connect,
4839 .listen = l2cap_sock_listen,
4840 .accept = l2cap_sock_accept,
4841 .getname = l2cap_sock_getname,
4842 .sendmsg = l2cap_sock_sendmsg,
4843 .recvmsg = l2cap_sock_recvmsg,
4844 .poll = bt_sock_poll,
4845 .ioctl = bt_sock_ioctl,
4846 .mmap = sock_no_mmap,
4847 .socketpair = sock_no_socketpair,
4848 .shutdown = l2cap_sock_shutdown,
4849 .setsockopt = l2cap_sock_setsockopt,
4850 .getsockopt = l2cap_sock_getsockopt
4851 };
4852
4853 static const struct net_proto_family l2cap_sock_family_ops = {
4854 .family = PF_BLUETOOTH,
4855 .owner = THIS_MODULE,
4856 .create = l2cap_sock_create,
4857 };
4858
4859 static struct hci_proto l2cap_hci_proto = {
4860 .name = "L2CAP",
4861 .id = HCI_PROTO_L2CAP,
4862 .connect_ind = l2cap_connect_ind,
4863 .connect_cfm = l2cap_connect_cfm,
4864 .disconn_ind = l2cap_disconn_ind,
4865 .disconn_cfm = l2cap_disconn_cfm,
4866 .security_cfm = l2cap_security_cfm,
4867 .recv_acldata = l2cap_recv_acldata
4868 };
4869
4870 static int __init l2cap_init(void)
4871 {
4872 int err;
4873
4874 err = proto_register(&l2cap_proto, 0);
4875 if (err < 0)
4876 return err;
4877
4878 _busy_wq = create_singlethread_workqueue("l2cap");
4879 if (!_busy_wq) {
4880 proto_unregister(&l2cap_proto);
4881 return -ENOMEM;
4882 }
4883
4884 err = bt_sock_register(BTPROTO_L2CAP, &l2cap_sock_family_ops);
4885 if (err < 0) {
4886 BT_ERR("L2CAP socket registration failed");
4887 goto error;
4888 }
4889
4890 err = hci_register_proto(&l2cap_hci_proto);
4891 if (err < 0) {
4892 BT_ERR("L2CAP protocol registration failed");
4893 bt_sock_unregister(BTPROTO_L2CAP);
4894 goto error;
4895 }
4896
4897 if (bt_debugfs) {
4898 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
4899 bt_debugfs, NULL, &l2cap_debugfs_fops);
4900 if (!l2cap_debugfs)
4901 BT_ERR("Failed to create L2CAP debug file");
4902 }
4903
4904 BT_INFO("L2CAP ver %s", VERSION);
4905 BT_INFO("L2CAP socket layer initialized");
4906
4907 return 0;
4908
4909 error:
4910 destroy_workqueue(_busy_wq);
4911 proto_unregister(&l2cap_proto);
4912 return err;
4913 }
4914
4915 static void __exit l2cap_exit(void)
4916 {
4917 debugfs_remove(l2cap_debugfs);
4918
4919 flush_workqueue(_busy_wq);
4920 destroy_workqueue(_busy_wq);
4921
4922 if (bt_sock_unregister(BTPROTO_L2CAP) < 0)
4923 BT_ERR("L2CAP socket unregistration failed");
4924
4925 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
4926 BT_ERR("L2CAP protocol unregistration failed");
4927
4928 proto_unregister(&l2cap_proto);
4929 }
4930
4931 void l2cap_load(void)
4932 {
4933 /* Dummy function to trigger automatic L2CAP module loading by
4934 * other modules that use L2CAP sockets but don't use any other
4935 * symbols from it. */
4936 }
4937 EXPORT_SYMBOL(l2cap_load);
4938
4939 module_init(l2cap_init);
4940 module_exit(l2cap_exit);
4941
4942 module_param(disable_ertm, bool, 0644);
4943 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
4944
4945 MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
4946 MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION);
4947 MODULE_VERSION(VERSION);
4948 MODULE_LICENSE("GPL");
4949 MODULE_ALIAS("bt-proto-0");
This page took 0.138108 seconds and 5 git commands to generate.