Bluetooth: Fix configuration of the MPS value
[deliverable/linux.git] / net / bluetooth / l2cap.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth L2CAP core and sockets. */
26
27 #include <linux/module.h>
28
29 #include <linux/types.h>
30 #include <linux/capability.h>
31 #include <linux/errno.h>
32 #include <linux/kernel.h>
33 #include <linux/sched.h>
34 #include <linux/slab.h>
35 #include <linux/poll.h>
36 #include <linux/fcntl.h>
37 #include <linux/init.h>
38 #include <linux/interrupt.h>
39 #include <linux/socket.h>
40 #include <linux/skbuff.h>
41 #include <linux/list.h>
42 #include <linux/device.h>
43 #include <linux/debugfs.h>
44 #include <linux/seq_file.h>
45 #include <linux/uaccess.h>
46 #include <linux/crc16.h>
47 #include <net/sock.h>
48
49 #include <asm/system.h>
50 #include <asm/unaligned.h>
51
52 #include <net/bluetooth/bluetooth.h>
53 #include <net/bluetooth/hci_core.h>
54 #include <net/bluetooth/l2cap.h>
55
56 #define VERSION "2.14"
57
58 static int enable_ertm = 0;
59 static int max_transmit = L2CAP_DEFAULT_MAX_TX;
60
61 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
62 static u8 l2cap_fixed_chan[8] = { 0x02, };
63
64 static const struct proto_ops l2cap_sock_ops;
65
66 static struct bt_sock_list l2cap_sk_list = {
67 .lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock)
68 };
69
70 static void __l2cap_sock_close(struct sock *sk, int reason);
71 static void l2cap_sock_close(struct sock *sk);
72 static void l2cap_sock_kill(struct sock *sk);
73
74 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
75 u8 code, u8 ident, u16 dlen, void *data);
76
77 /* ---- L2CAP timers ---- */
78 static void l2cap_sock_timeout(unsigned long arg)
79 {
80 struct sock *sk = (struct sock *) arg;
81 int reason;
82
83 BT_DBG("sock %p state %d", sk, sk->sk_state);
84
85 bh_lock_sock(sk);
86
87 if (sk->sk_state == BT_CONNECTED || sk->sk_state == BT_CONFIG)
88 reason = ECONNREFUSED;
89 else if (sk->sk_state == BT_CONNECT &&
90 l2cap_pi(sk)->sec_level != BT_SECURITY_SDP)
91 reason = ECONNREFUSED;
92 else
93 reason = ETIMEDOUT;
94
95 __l2cap_sock_close(sk, reason);
96
97 bh_unlock_sock(sk);
98
99 l2cap_sock_kill(sk);
100 sock_put(sk);
101 }
102
103 static void l2cap_sock_set_timer(struct sock *sk, long timeout)
104 {
105 BT_DBG("sk %p state %d timeout %ld", sk, sk->sk_state, timeout);
106 sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout);
107 }
108
109 static void l2cap_sock_clear_timer(struct sock *sk)
110 {
111 BT_DBG("sock %p state %d", sk, sk->sk_state);
112 sk_stop_timer(sk, &sk->sk_timer);
113 }
114
115 /* ---- L2CAP channels ---- */
116 static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid)
117 {
118 struct sock *s;
119 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
120 if (l2cap_pi(s)->dcid == cid)
121 break;
122 }
123 return s;
124 }
125
126 static struct sock *__l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
127 {
128 struct sock *s;
129 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
130 if (l2cap_pi(s)->scid == cid)
131 break;
132 }
133 return s;
134 }
135
136 /* Find channel with given SCID.
137 * Returns locked socket */
138 static inline struct sock *l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
139 {
140 struct sock *s;
141 read_lock(&l->lock);
142 s = __l2cap_get_chan_by_scid(l, cid);
143 if (s)
144 bh_lock_sock(s);
145 read_unlock(&l->lock);
146 return s;
147 }
148
149 static struct sock *__l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
150 {
151 struct sock *s;
152 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
153 if (l2cap_pi(s)->ident == ident)
154 break;
155 }
156 return s;
157 }
158
159 static inline struct sock *l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
160 {
161 struct sock *s;
162 read_lock(&l->lock);
163 s = __l2cap_get_chan_by_ident(l, ident);
164 if (s)
165 bh_lock_sock(s);
166 read_unlock(&l->lock);
167 return s;
168 }
169
170 static u16 l2cap_alloc_cid(struct l2cap_chan_list *l)
171 {
172 u16 cid = L2CAP_CID_DYN_START;
173
174 for (; cid < L2CAP_CID_DYN_END; cid++) {
175 if (!__l2cap_get_chan_by_scid(l, cid))
176 return cid;
177 }
178
179 return 0;
180 }
181
182 static inline void __l2cap_chan_link(struct l2cap_chan_list *l, struct sock *sk)
183 {
184 sock_hold(sk);
185
186 if (l->head)
187 l2cap_pi(l->head)->prev_c = sk;
188
189 l2cap_pi(sk)->next_c = l->head;
190 l2cap_pi(sk)->prev_c = NULL;
191 l->head = sk;
192 }
193
194 static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk)
195 {
196 struct sock *next = l2cap_pi(sk)->next_c, *prev = l2cap_pi(sk)->prev_c;
197
198 write_lock_bh(&l->lock);
199 if (sk == l->head)
200 l->head = next;
201
202 if (next)
203 l2cap_pi(next)->prev_c = prev;
204 if (prev)
205 l2cap_pi(prev)->next_c = next;
206 write_unlock_bh(&l->lock);
207
208 __sock_put(sk);
209 }
210
211 static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
212 {
213 struct l2cap_chan_list *l = &conn->chan_list;
214
215 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
216 l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
217
218 conn->disc_reason = 0x13;
219
220 l2cap_pi(sk)->conn = conn;
221
222 if (sk->sk_type == SOCK_SEQPACKET) {
223 /* Alloc CID for connection-oriented socket */
224 l2cap_pi(sk)->scid = l2cap_alloc_cid(l);
225 } else if (sk->sk_type == SOCK_DGRAM) {
226 /* Connectionless socket */
227 l2cap_pi(sk)->scid = L2CAP_CID_CONN_LESS;
228 l2cap_pi(sk)->dcid = L2CAP_CID_CONN_LESS;
229 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
230 } else {
231 /* Raw socket can send/recv signalling messages only */
232 l2cap_pi(sk)->scid = L2CAP_CID_SIGNALING;
233 l2cap_pi(sk)->dcid = L2CAP_CID_SIGNALING;
234 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
235 }
236
237 __l2cap_chan_link(l, sk);
238
239 if (parent)
240 bt_accept_enqueue(parent, sk);
241 }
242
243 /* Delete channel.
244 * Must be called on the locked socket. */
245 static void l2cap_chan_del(struct sock *sk, int err)
246 {
247 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
248 struct sock *parent = bt_sk(sk)->parent;
249
250 l2cap_sock_clear_timer(sk);
251
252 BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
253
254 if (conn) {
255 /* Unlink from channel list */
256 l2cap_chan_unlink(&conn->chan_list, sk);
257 l2cap_pi(sk)->conn = NULL;
258 hci_conn_put(conn->hcon);
259 }
260
261 sk->sk_state = BT_CLOSED;
262 sock_set_flag(sk, SOCK_ZAPPED);
263
264 if (err)
265 sk->sk_err = err;
266
267 if (parent) {
268 bt_accept_unlink(sk);
269 parent->sk_data_ready(parent, 0);
270 } else
271 sk->sk_state_change(sk);
272 }
273
274 /* Service level security */
275 static inline int l2cap_check_security(struct sock *sk)
276 {
277 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
278 __u8 auth_type;
279
280 if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
281 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
282 auth_type = HCI_AT_NO_BONDING_MITM;
283 else
284 auth_type = HCI_AT_NO_BONDING;
285
286 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
287 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
288 } else {
289 switch (l2cap_pi(sk)->sec_level) {
290 case BT_SECURITY_HIGH:
291 auth_type = HCI_AT_GENERAL_BONDING_MITM;
292 break;
293 case BT_SECURITY_MEDIUM:
294 auth_type = HCI_AT_GENERAL_BONDING;
295 break;
296 default:
297 auth_type = HCI_AT_NO_BONDING;
298 break;
299 }
300 }
301
302 return hci_conn_security(conn->hcon, l2cap_pi(sk)->sec_level,
303 auth_type);
304 }
305
306 static inline u8 l2cap_get_ident(struct l2cap_conn *conn)
307 {
308 u8 id;
309
310 /* Get next available identificator.
311 * 1 - 128 are used by kernel.
312 * 129 - 199 are reserved.
313 * 200 - 254 are used by utilities like l2ping, etc.
314 */
315
316 spin_lock_bh(&conn->lock);
317
318 if (++conn->tx_ident > 128)
319 conn->tx_ident = 1;
320
321 id = conn->tx_ident;
322
323 spin_unlock_bh(&conn->lock);
324
325 return id;
326 }
327
328 static inline int l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
329 {
330 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
331
332 BT_DBG("code 0x%2.2x", code);
333
334 if (!skb)
335 return -ENOMEM;
336
337 return hci_send_acl(conn->hcon, skb, 0);
338 }
339
340 static inline int l2cap_send_sframe(struct l2cap_pinfo *pi, u16 control)
341 {
342 struct sk_buff *skb;
343 struct l2cap_hdr *lh;
344 struct l2cap_conn *conn = pi->conn;
345 int count, hlen = L2CAP_HDR_SIZE + 2;
346
347 if (pi->fcs == L2CAP_FCS_CRC16)
348 hlen += 2;
349
350 BT_DBG("pi %p, control 0x%2.2x", pi, control);
351
352 count = min_t(unsigned int, conn->mtu, hlen);
353 control |= L2CAP_CTRL_FRAME_TYPE;
354
355 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
356 control |= L2CAP_CTRL_FINAL;
357 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
358 }
359
360 if (pi->conn_state & L2CAP_CONN_SEND_PBIT) {
361 control |= L2CAP_CTRL_POLL;
362 pi->conn_state &= ~L2CAP_CONN_SEND_PBIT;
363 }
364
365 skb = bt_skb_alloc(count, GFP_ATOMIC);
366 if (!skb)
367 return -ENOMEM;
368
369 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
370 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
371 lh->cid = cpu_to_le16(pi->dcid);
372 put_unaligned_le16(control, skb_put(skb, 2));
373
374 if (pi->fcs == L2CAP_FCS_CRC16) {
375 u16 fcs = crc16(0, (u8 *)lh, count - 2);
376 put_unaligned_le16(fcs, skb_put(skb, 2));
377 }
378
379 return hci_send_acl(pi->conn->hcon, skb, 0);
380 }
381
382 static inline int l2cap_send_rr_or_rnr(struct l2cap_pinfo *pi, u16 control)
383 {
384 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY)
385 control |= L2CAP_SUPER_RCV_NOT_READY;
386 else
387 control |= L2CAP_SUPER_RCV_READY;
388
389 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
390
391 return l2cap_send_sframe(pi, control);
392 }
393
394 static void l2cap_do_start(struct sock *sk)
395 {
396 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
397
398 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
399 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
400 return;
401
402 if (l2cap_check_security(sk)) {
403 struct l2cap_conn_req req;
404 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
405 req.psm = l2cap_pi(sk)->psm;
406
407 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
408
409 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
410 L2CAP_CONN_REQ, sizeof(req), &req);
411 }
412 } else {
413 struct l2cap_info_req req;
414 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
415
416 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
417 conn->info_ident = l2cap_get_ident(conn);
418
419 mod_timer(&conn->info_timer, jiffies +
420 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
421
422 l2cap_send_cmd(conn, conn->info_ident,
423 L2CAP_INFO_REQ, sizeof(req), &req);
424 }
425 }
426
427 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct sock *sk)
428 {
429 struct l2cap_disconn_req req;
430
431 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
432 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
433 l2cap_send_cmd(conn, l2cap_get_ident(conn),
434 L2CAP_DISCONN_REQ, sizeof(req), &req);
435 }
436
437 /* ---- L2CAP connections ---- */
438 static void l2cap_conn_start(struct l2cap_conn *conn)
439 {
440 struct l2cap_chan_list *l = &conn->chan_list;
441 struct sock *sk;
442
443 BT_DBG("conn %p", conn);
444
445 read_lock(&l->lock);
446
447 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
448 bh_lock_sock(sk);
449
450 if (sk->sk_type != SOCK_SEQPACKET) {
451 bh_unlock_sock(sk);
452 continue;
453 }
454
455 if (sk->sk_state == BT_CONNECT) {
456 if (l2cap_check_security(sk)) {
457 struct l2cap_conn_req req;
458 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
459 req.psm = l2cap_pi(sk)->psm;
460
461 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
462
463 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
464 L2CAP_CONN_REQ, sizeof(req), &req);
465 }
466 } else if (sk->sk_state == BT_CONNECT2) {
467 struct l2cap_conn_rsp rsp;
468 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
469 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
470
471 if (l2cap_check_security(sk)) {
472 if (bt_sk(sk)->defer_setup) {
473 struct sock *parent = bt_sk(sk)->parent;
474 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
475 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
476 parent->sk_data_ready(parent, 0);
477
478 } else {
479 sk->sk_state = BT_CONFIG;
480 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
481 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
482 }
483 } else {
484 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
485 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
486 }
487
488 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
489 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
490 }
491
492 bh_unlock_sock(sk);
493 }
494
495 read_unlock(&l->lock);
496 }
497
498 static void l2cap_conn_ready(struct l2cap_conn *conn)
499 {
500 struct l2cap_chan_list *l = &conn->chan_list;
501 struct sock *sk;
502
503 BT_DBG("conn %p", conn);
504
505 read_lock(&l->lock);
506
507 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
508 bh_lock_sock(sk);
509
510 if (sk->sk_type != SOCK_SEQPACKET) {
511 l2cap_sock_clear_timer(sk);
512 sk->sk_state = BT_CONNECTED;
513 sk->sk_state_change(sk);
514 } else if (sk->sk_state == BT_CONNECT)
515 l2cap_do_start(sk);
516
517 bh_unlock_sock(sk);
518 }
519
520 read_unlock(&l->lock);
521 }
522
523 /* Notify sockets that we cannot guaranty reliability anymore */
524 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
525 {
526 struct l2cap_chan_list *l = &conn->chan_list;
527 struct sock *sk;
528
529 BT_DBG("conn %p", conn);
530
531 read_lock(&l->lock);
532
533 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
534 if (l2cap_pi(sk)->force_reliable)
535 sk->sk_err = err;
536 }
537
538 read_unlock(&l->lock);
539 }
540
541 static void l2cap_info_timeout(unsigned long arg)
542 {
543 struct l2cap_conn *conn = (void *) arg;
544
545 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
546 conn->info_ident = 0;
547
548 l2cap_conn_start(conn);
549 }
550
551 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
552 {
553 struct l2cap_conn *conn = hcon->l2cap_data;
554
555 if (conn || status)
556 return conn;
557
558 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
559 if (!conn)
560 return NULL;
561
562 hcon->l2cap_data = conn;
563 conn->hcon = hcon;
564
565 BT_DBG("hcon %p conn %p", hcon, conn);
566
567 conn->mtu = hcon->hdev->acl_mtu;
568 conn->src = &hcon->hdev->bdaddr;
569 conn->dst = &hcon->dst;
570
571 conn->feat_mask = 0;
572
573 spin_lock_init(&conn->lock);
574 rwlock_init(&conn->chan_list.lock);
575
576 setup_timer(&conn->info_timer, l2cap_info_timeout,
577 (unsigned long) conn);
578
579 conn->disc_reason = 0x13;
580
581 return conn;
582 }
583
584 static void l2cap_conn_del(struct hci_conn *hcon, int err)
585 {
586 struct l2cap_conn *conn = hcon->l2cap_data;
587 struct sock *sk;
588
589 if (!conn)
590 return;
591
592 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
593
594 kfree_skb(conn->rx_skb);
595
596 /* Kill channels */
597 while ((sk = conn->chan_list.head)) {
598 bh_lock_sock(sk);
599 l2cap_chan_del(sk, err);
600 bh_unlock_sock(sk);
601 l2cap_sock_kill(sk);
602 }
603
604 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
605 del_timer_sync(&conn->info_timer);
606
607 hcon->l2cap_data = NULL;
608 kfree(conn);
609 }
610
611 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
612 {
613 struct l2cap_chan_list *l = &conn->chan_list;
614 write_lock_bh(&l->lock);
615 __l2cap_chan_add(conn, sk, parent);
616 write_unlock_bh(&l->lock);
617 }
618
619 /* ---- Socket interface ---- */
620 static struct sock *__l2cap_get_sock_by_addr(__le16 psm, bdaddr_t *src)
621 {
622 struct sock *sk;
623 struct hlist_node *node;
624 sk_for_each(sk, node, &l2cap_sk_list.head)
625 if (l2cap_pi(sk)->sport == psm && !bacmp(&bt_sk(sk)->src, src))
626 goto found;
627 sk = NULL;
628 found:
629 return sk;
630 }
631
632 /* Find socket with psm and source bdaddr.
633 * Returns closest match.
634 */
635 static struct sock *__l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
636 {
637 struct sock *sk = NULL, *sk1 = NULL;
638 struct hlist_node *node;
639
640 sk_for_each(sk, node, &l2cap_sk_list.head) {
641 if (state && sk->sk_state != state)
642 continue;
643
644 if (l2cap_pi(sk)->psm == psm) {
645 /* Exact match. */
646 if (!bacmp(&bt_sk(sk)->src, src))
647 break;
648
649 /* Closest match */
650 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
651 sk1 = sk;
652 }
653 }
654 return node ? sk : sk1;
655 }
656
657 /* Find socket with given address (psm, src).
658 * Returns locked socket */
659 static inline struct sock *l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
660 {
661 struct sock *s;
662 read_lock(&l2cap_sk_list.lock);
663 s = __l2cap_get_sock_by_psm(state, psm, src);
664 if (s)
665 bh_lock_sock(s);
666 read_unlock(&l2cap_sk_list.lock);
667 return s;
668 }
669
670 static void l2cap_sock_destruct(struct sock *sk)
671 {
672 BT_DBG("sk %p", sk);
673
674 skb_queue_purge(&sk->sk_receive_queue);
675 skb_queue_purge(&sk->sk_write_queue);
676 }
677
678 static void l2cap_sock_cleanup_listen(struct sock *parent)
679 {
680 struct sock *sk;
681
682 BT_DBG("parent %p", parent);
683
684 /* Close not yet accepted channels */
685 while ((sk = bt_accept_dequeue(parent, NULL)))
686 l2cap_sock_close(sk);
687
688 parent->sk_state = BT_CLOSED;
689 sock_set_flag(parent, SOCK_ZAPPED);
690 }
691
692 /* Kill socket (only if zapped and orphan)
693 * Must be called on unlocked socket.
694 */
695 static void l2cap_sock_kill(struct sock *sk)
696 {
697 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
698 return;
699
700 BT_DBG("sk %p state %d", sk, sk->sk_state);
701
702 /* Kill poor orphan */
703 bt_sock_unlink(&l2cap_sk_list, sk);
704 sock_set_flag(sk, SOCK_DEAD);
705 sock_put(sk);
706 }
707
708 static void __l2cap_sock_close(struct sock *sk, int reason)
709 {
710 BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
711
712 switch (sk->sk_state) {
713 case BT_LISTEN:
714 l2cap_sock_cleanup_listen(sk);
715 break;
716
717 case BT_CONNECTED:
718 case BT_CONFIG:
719 if (sk->sk_type == SOCK_SEQPACKET) {
720 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
721
722 sk->sk_state = BT_DISCONN;
723 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
724 l2cap_send_disconn_req(conn, sk);
725 } else
726 l2cap_chan_del(sk, reason);
727 break;
728
729 case BT_CONNECT2:
730 if (sk->sk_type == SOCK_SEQPACKET) {
731 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
732 struct l2cap_conn_rsp rsp;
733 __u16 result;
734
735 if (bt_sk(sk)->defer_setup)
736 result = L2CAP_CR_SEC_BLOCK;
737 else
738 result = L2CAP_CR_BAD_PSM;
739
740 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
741 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
742 rsp.result = cpu_to_le16(result);
743 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
744 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
745 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
746 } else
747 l2cap_chan_del(sk, reason);
748 break;
749
750 case BT_CONNECT:
751 case BT_DISCONN:
752 l2cap_chan_del(sk, reason);
753 break;
754
755 default:
756 sock_set_flag(sk, SOCK_ZAPPED);
757 break;
758 }
759 }
760
761 /* Must be called on unlocked socket. */
762 static void l2cap_sock_close(struct sock *sk)
763 {
764 l2cap_sock_clear_timer(sk);
765 lock_sock(sk);
766 __l2cap_sock_close(sk, ECONNRESET);
767 release_sock(sk);
768 l2cap_sock_kill(sk);
769 }
770
771 static void l2cap_sock_init(struct sock *sk, struct sock *parent)
772 {
773 struct l2cap_pinfo *pi = l2cap_pi(sk);
774
775 BT_DBG("sk %p", sk);
776
777 if (parent) {
778 sk->sk_type = parent->sk_type;
779 bt_sk(sk)->defer_setup = bt_sk(parent)->defer_setup;
780
781 pi->imtu = l2cap_pi(parent)->imtu;
782 pi->omtu = l2cap_pi(parent)->omtu;
783 pi->mode = l2cap_pi(parent)->mode;
784 pi->fcs = l2cap_pi(parent)->fcs;
785 pi->sec_level = l2cap_pi(parent)->sec_level;
786 pi->role_switch = l2cap_pi(parent)->role_switch;
787 pi->force_reliable = l2cap_pi(parent)->force_reliable;
788 } else {
789 pi->imtu = L2CAP_DEFAULT_MTU;
790 pi->omtu = 0;
791 pi->mode = L2CAP_MODE_BASIC;
792 pi->fcs = L2CAP_FCS_CRC16;
793 pi->sec_level = BT_SECURITY_LOW;
794 pi->role_switch = 0;
795 pi->force_reliable = 0;
796 }
797
798 /* Default config options */
799 pi->conf_len = 0;
800 pi->flush_to = L2CAP_DEFAULT_FLUSH_TO;
801 skb_queue_head_init(TX_QUEUE(sk));
802 skb_queue_head_init(SREJ_QUEUE(sk));
803 INIT_LIST_HEAD(SREJ_LIST(sk));
804 }
805
806 static struct proto l2cap_proto = {
807 .name = "L2CAP",
808 .owner = THIS_MODULE,
809 .obj_size = sizeof(struct l2cap_pinfo)
810 };
811
812 static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio)
813 {
814 struct sock *sk;
815
816 sk = sk_alloc(net, PF_BLUETOOTH, prio, &l2cap_proto);
817 if (!sk)
818 return NULL;
819
820 sock_init_data(sock, sk);
821 INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
822
823 sk->sk_destruct = l2cap_sock_destruct;
824 sk->sk_sndtimeo = msecs_to_jiffies(L2CAP_CONN_TIMEOUT);
825
826 sock_reset_flag(sk, SOCK_ZAPPED);
827
828 sk->sk_protocol = proto;
829 sk->sk_state = BT_OPEN;
830
831 setup_timer(&sk->sk_timer, l2cap_sock_timeout, (unsigned long) sk);
832
833 bt_sock_link(&l2cap_sk_list, sk);
834 return sk;
835 }
836
837 static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol,
838 int kern)
839 {
840 struct sock *sk;
841
842 BT_DBG("sock %p", sock);
843
844 sock->state = SS_UNCONNECTED;
845
846 if (sock->type != SOCK_SEQPACKET &&
847 sock->type != SOCK_DGRAM && sock->type != SOCK_RAW)
848 return -ESOCKTNOSUPPORT;
849
850 if (sock->type == SOCK_RAW && !kern && !capable(CAP_NET_RAW))
851 return -EPERM;
852
853 sock->ops = &l2cap_sock_ops;
854
855 sk = l2cap_sock_alloc(net, sock, protocol, GFP_ATOMIC);
856 if (!sk)
857 return -ENOMEM;
858
859 l2cap_sock_init(sk, NULL);
860 return 0;
861 }
862
863 static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
864 {
865 struct sock *sk = sock->sk;
866 struct sockaddr_l2 la;
867 int len, err = 0;
868
869 BT_DBG("sk %p", sk);
870
871 if (!addr || addr->sa_family != AF_BLUETOOTH)
872 return -EINVAL;
873
874 memset(&la, 0, sizeof(la));
875 len = min_t(unsigned int, sizeof(la), alen);
876 memcpy(&la, addr, len);
877
878 if (la.l2_cid)
879 return -EINVAL;
880
881 lock_sock(sk);
882
883 if (sk->sk_state != BT_OPEN) {
884 err = -EBADFD;
885 goto done;
886 }
887
888 if (la.l2_psm && __le16_to_cpu(la.l2_psm) < 0x1001 &&
889 !capable(CAP_NET_BIND_SERVICE)) {
890 err = -EACCES;
891 goto done;
892 }
893
894 write_lock_bh(&l2cap_sk_list.lock);
895
896 if (la.l2_psm && __l2cap_get_sock_by_addr(la.l2_psm, &la.l2_bdaddr)) {
897 err = -EADDRINUSE;
898 } else {
899 /* Save source address */
900 bacpy(&bt_sk(sk)->src, &la.l2_bdaddr);
901 l2cap_pi(sk)->psm = la.l2_psm;
902 l2cap_pi(sk)->sport = la.l2_psm;
903 sk->sk_state = BT_BOUND;
904
905 if (__le16_to_cpu(la.l2_psm) == 0x0001 ||
906 __le16_to_cpu(la.l2_psm) == 0x0003)
907 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
908 }
909
910 write_unlock_bh(&l2cap_sk_list.lock);
911
912 done:
913 release_sock(sk);
914 return err;
915 }
916
917 static int l2cap_do_connect(struct sock *sk)
918 {
919 bdaddr_t *src = &bt_sk(sk)->src;
920 bdaddr_t *dst = &bt_sk(sk)->dst;
921 struct l2cap_conn *conn;
922 struct hci_conn *hcon;
923 struct hci_dev *hdev;
924 __u8 auth_type;
925 int err;
926
927 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
928 l2cap_pi(sk)->psm);
929
930 hdev = hci_get_route(dst, src);
931 if (!hdev)
932 return -EHOSTUNREACH;
933
934 hci_dev_lock_bh(hdev);
935
936 err = -ENOMEM;
937
938 if (sk->sk_type == SOCK_RAW) {
939 switch (l2cap_pi(sk)->sec_level) {
940 case BT_SECURITY_HIGH:
941 auth_type = HCI_AT_DEDICATED_BONDING_MITM;
942 break;
943 case BT_SECURITY_MEDIUM:
944 auth_type = HCI_AT_DEDICATED_BONDING;
945 break;
946 default:
947 auth_type = HCI_AT_NO_BONDING;
948 break;
949 }
950 } else if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
951 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
952 auth_type = HCI_AT_NO_BONDING_MITM;
953 else
954 auth_type = HCI_AT_NO_BONDING;
955
956 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
957 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
958 } else {
959 switch (l2cap_pi(sk)->sec_level) {
960 case BT_SECURITY_HIGH:
961 auth_type = HCI_AT_GENERAL_BONDING_MITM;
962 break;
963 case BT_SECURITY_MEDIUM:
964 auth_type = HCI_AT_GENERAL_BONDING;
965 break;
966 default:
967 auth_type = HCI_AT_NO_BONDING;
968 break;
969 }
970 }
971
972 hcon = hci_connect(hdev, ACL_LINK, dst,
973 l2cap_pi(sk)->sec_level, auth_type);
974 if (!hcon)
975 goto done;
976
977 conn = l2cap_conn_add(hcon, 0);
978 if (!conn) {
979 hci_conn_put(hcon);
980 goto done;
981 }
982
983 err = 0;
984
985 /* Update source addr of the socket */
986 bacpy(src, conn->src);
987
988 l2cap_chan_add(conn, sk, NULL);
989
990 sk->sk_state = BT_CONNECT;
991 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
992
993 if (hcon->state == BT_CONNECTED) {
994 if (sk->sk_type != SOCK_SEQPACKET) {
995 l2cap_sock_clear_timer(sk);
996 sk->sk_state = BT_CONNECTED;
997 } else
998 l2cap_do_start(sk);
999 }
1000
1001 done:
1002 hci_dev_unlock_bh(hdev);
1003 hci_dev_put(hdev);
1004 return err;
1005 }
1006
1007 static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags)
1008 {
1009 struct sock *sk = sock->sk;
1010 struct sockaddr_l2 la;
1011 int len, err = 0;
1012
1013 BT_DBG("sk %p", sk);
1014
1015 if (!addr || alen < sizeof(addr->sa_family) ||
1016 addr->sa_family != AF_BLUETOOTH)
1017 return -EINVAL;
1018
1019 memset(&la, 0, sizeof(la));
1020 len = min_t(unsigned int, sizeof(la), alen);
1021 memcpy(&la, addr, len);
1022
1023 if (la.l2_cid)
1024 return -EINVAL;
1025
1026 lock_sock(sk);
1027
1028 if (sk->sk_type == SOCK_SEQPACKET && !la.l2_psm) {
1029 err = -EINVAL;
1030 goto done;
1031 }
1032
1033 switch (l2cap_pi(sk)->mode) {
1034 case L2CAP_MODE_BASIC:
1035 break;
1036 case L2CAP_MODE_ERTM:
1037 case L2CAP_MODE_STREAMING:
1038 if (enable_ertm)
1039 break;
1040 /* fall through */
1041 default:
1042 err = -ENOTSUPP;
1043 goto done;
1044 }
1045
1046 switch (sk->sk_state) {
1047 case BT_CONNECT:
1048 case BT_CONNECT2:
1049 case BT_CONFIG:
1050 /* Already connecting */
1051 goto wait;
1052
1053 case BT_CONNECTED:
1054 /* Already connected */
1055 goto done;
1056
1057 case BT_OPEN:
1058 case BT_BOUND:
1059 /* Can connect */
1060 break;
1061
1062 default:
1063 err = -EBADFD;
1064 goto done;
1065 }
1066
1067 /* Set destination address and psm */
1068 bacpy(&bt_sk(sk)->dst, &la.l2_bdaddr);
1069 l2cap_pi(sk)->psm = la.l2_psm;
1070
1071 err = l2cap_do_connect(sk);
1072 if (err)
1073 goto done;
1074
1075 wait:
1076 err = bt_sock_wait_state(sk, BT_CONNECTED,
1077 sock_sndtimeo(sk, flags & O_NONBLOCK));
1078 done:
1079 release_sock(sk);
1080 return err;
1081 }
1082
1083 static int l2cap_sock_listen(struct socket *sock, int backlog)
1084 {
1085 struct sock *sk = sock->sk;
1086 int err = 0;
1087
1088 BT_DBG("sk %p backlog %d", sk, backlog);
1089
1090 lock_sock(sk);
1091
1092 if (sk->sk_state != BT_BOUND || sock->type != SOCK_SEQPACKET) {
1093 err = -EBADFD;
1094 goto done;
1095 }
1096
1097 switch (l2cap_pi(sk)->mode) {
1098 case L2CAP_MODE_BASIC:
1099 break;
1100 case L2CAP_MODE_ERTM:
1101 case L2CAP_MODE_STREAMING:
1102 if (enable_ertm)
1103 break;
1104 /* fall through */
1105 default:
1106 err = -ENOTSUPP;
1107 goto done;
1108 }
1109
1110 if (!l2cap_pi(sk)->psm) {
1111 bdaddr_t *src = &bt_sk(sk)->src;
1112 u16 psm;
1113
1114 err = -EINVAL;
1115
1116 write_lock_bh(&l2cap_sk_list.lock);
1117
1118 for (psm = 0x1001; psm < 0x1100; psm += 2)
1119 if (!__l2cap_get_sock_by_addr(cpu_to_le16(psm), src)) {
1120 l2cap_pi(sk)->psm = cpu_to_le16(psm);
1121 l2cap_pi(sk)->sport = cpu_to_le16(psm);
1122 err = 0;
1123 break;
1124 }
1125
1126 write_unlock_bh(&l2cap_sk_list.lock);
1127
1128 if (err < 0)
1129 goto done;
1130 }
1131
1132 sk->sk_max_ack_backlog = backlog;
1133 sk->sk_ack_backlog = 0;
1134 sk->sk_state = BT_LISTEN;
1135
1136 done:
1137 release_sock(sk);
1138 return err;
1139 }
1140
1141 static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int flags)
1142 {
1143 DECLARE_WAITQUEUE(wait, current);
1144 struct sock *sk = sock->sk, *nsk;
1145 long timeo;
1146 int err = 0;
1147
1148 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1149
1150 if (sk->sk_state != BT_LISTEN) {
1151 err = -EBADFD;
1152 goto done;
1153 }
1154
1155 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
1156
1157 BT_DBG("sk %p timeo %ld", sk, timeo);
1158
1159 /* Wait for an incoming connection. (wake-one). */
1160 add_wait_queue_exclusive(sk_sleep(sk), &wait);
1161 while (!(nsk = bt_accept_dequeue(sk, newsock))) {
1162 set_current_state(TASK_INTERRUPTIBLE);
1163 if (!timeo) {
1164 err = -EAGAIN;
1165 break;
1166 }
1167
1168 release_sock(sk);
1169 timeo = schedule_timeout(timeo);
1170 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1171
1172 if (sk->sk_state != BT_LISTEN) {
1173 err = -EBADFD;
1174 break;
1175 }
1176
1177 if (signal_pending(current)) {
1178 err = sock_intr_errno(timeo);
1179 break;
1180 }
1181 }
1182 set_current_state(TASK_RUNNING);
1183 remove_wait_queue(sk_sleep(sk), &wait);
1184
1185 if (err)
1186 goto done;
1187
1188 newsock->state = SS_CONNECTED;
1189
1190 BT_DBG("new socket %p", nsk);
1191
1192 done:
1193 release_sock(sk);
1194 return err;
1195 }
1196
1197 static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer)
1198 {
1199 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
1200 struct sock *sk = sock->sk;
1201
1202 BT_DBG("sock %p, sk %p", sock, sk);
1203
1204 addr->sa_family = AF_BLUETOOTH;
1205 *len = sizeof(struct sockaddr_l2);
1206
1207 if (peer) {
1208 la->l2_psm = l2cap_pi(sk)->psm;
1209 bacpy(&la->l2_bdaddr, &bt_sk(sk)->dst);
1210 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1211 } else {
1212 la->l2_psm = l2cap_pi(sk)->sport;
1213 bacpy(&la->l2_bdaddr, &bt_sk(sk)->src);
1214 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->scid);
1215 }
1216
1217 return 0;
1218 }
1219
1220 static void l2cap_monitor_timeout(unsigned long arg)
1221 {
1222 struct sock *sk = (void *) arg;
1223 u16 control;
1224
1225 bh_lock_sock(sk);
1226 if (l2cap_pi(sk)->retry_count >= l2cap_pi(sk)->remote_max_tx) {
1227 l2cap_send_disconn_req(l2cap_pi(sk)->conn, sk);
1228 bh_unlock_sock(sk);
1229 return;
1230 }
1231
1232 l2cap_pi(sk)->retry_count++;
1233 __mod_monitor_timer();
1234
1235 control = L2CAP_CTRL_POLL;
1236 l2cap_send_rr_or_rnr(l2cap_pi(sk), control);
1237 bh_unlock_sock(sk);
1238 }
1239
1240 static void l2cap_retrans_timeout(unsigned long arg)
1241 {
1242 struct sock *sk = (void *) arg;
1243 u16 control;
1244
1245 bh_lock_sock(sk);
1246 l2cap_pi(sk)->retry_count = 1;
1247 __mod_monitor_timer();
1248
1249 l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F;
1250
1251 control = L2CAP_CTRL_POLL;
1252 l2cap_send_rr_or_rnr(l2cap_pi(sk), control);
1253 bh_unlock_sock(sk);
1254 }
1255
1256 static void l2cap_drop_acked_frames(struct sock *sk)
1257 {
1258 struct sk_buff *skb;
1259
1260 while ((skb = skb_peek(TX_QUEUE(sk)))) {
1261 if (bt_cb(skb)->tx_seq == l2cap_pi(sk)->expected_ack_seq)
1262 break;
1263
1264 skb = skb_dequeue(TX_QUEUE(sk));
1265 kfree_skb(skb);
1266
1267 l2cap_pi(sk)->unacked_frames--;
1268 }
1269
1270 if (!l2cap_pi(sk)->unacked_frames)
1271 del_timer(&l2cap_pi(sk)->retrans_timer);
1272
1273 return;
1274 }
1275
1276 static inline int l2cap_do_send(struct sock *sk, struct sk_buff *skb)
1277 {
1278 struct l2cap_pinfo *pi = l2cap_pi(sk);
1279 int err;
1280
1281 BT_DBG("sk %p, skb %p len %d", sk, skb, skb->len);
1282
1283 err = hci_send_acl(pi->conn->hcon, skb, 0);
1284 if (err < 0)
1285 kfree_skb(skb);
1286
1287 return err;
1288 }
1289
1290 static int l2cap_streaming_send(struct sock *sk)
1291 {
1292 struct sk_buff *skb, *tx_skb;
1293 struct l2cap_pinfo *pi = l2cap_pi(sk);
1294 u16 control, fcs;
1295 int err;
1296
1297 while ((skb = sk->sk_send_head)) {
1298 tx_skb = skb_clone(skb, GFP_ATOMIC);
1299
1300 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1301 control |= pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT;
1302 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1303
1304 if (pi->fcs == L2CAP_FCS_CRC16) {
1305 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1306 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1307 }
1308
1309 err = l2cap_do_send(sk, tx_skb);
1310 if (err < 0) {
1311 l2cap_send_disconn_req(pi->conn, sk);
1312 return err;
1313 }
1314
1315 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1316
1317 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1318 sk->sk_send_head = NULL;
1319 else
1320 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1321
1322 skb = skb_dequeue(TX_QUEUE(sk));
1323 kfree_skb(skb);
1324 }
1325 return 0;
1326 }
1327
1328 static int l2cap_retransmit_frame(struct sock *sk, u8 tx_seq)
1329 {
1330 struct l2cap_pinfo *pi = l2cap_pi(sk);
1331 struct sk_buff *skb, *tx_skb;
1332 u16 control, fcs;
1333 int err;
1334
1335 skb = skb_peek(TX_QUEUE(sk));
1336 do {
1337 if (bt_cb(skb)->tx_seq != tx_seq) {
1338 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1339 break;
1340 skb = skb_queue_next(TX_QUEUE(sk), skb);
1341 continue;
1342 }
1343
1344 if (pi->remote_max_tx &&
1345 bt_cb(skb)->retries == pi->remote_max_tx) {
1346 l2cap_send_disconn_req(pi->conn, sk);
1347 break;
1348 }
1349
1350 tx_skb = skb_clone(skb, GFP_ATOMIC);
1351 bt_cb(skb)->retries++;
1352 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1353 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1354 | (tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1355 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1356
1357 if (pi->fcs == L2CAP_FCS_CRC16) {
1358 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1359 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1360 }
1361
1362 err = l2cap_do_send(sk, tx_skb);
1363 if (err < 0) {
1364 l2cap_send_disconn_req(pi->conn, sk);
1365 return err;
1366 }
1367 break;
1368 } while(1);
1369 return 0;
1370 }
1371
1372 static int l2cap_ertm_send(struct sock *sk)
1373 {
1374 struct sk_buff *skb, *tx_skb;
1375 struct l2cap_pinfo *pi = l2cap_pi(sk);
1376 u16 control, fcs;
1377 int err, nsent = 0;
1378
1379 if (pi->conn_state & L2CAP_CONN_WAIT_F)
1380 return 0;
1381
1382 while ((skb = sk->sk_send_head) && (!l2cap_tx_window_full(sk)) &&
1383 !(pi->conn_state & L2CAP_CONN_REMOTE_BUSY)) {
1384
1385 if (pi->remote_max_tx &&
1386 bt_cb(skb)->retries == pi->remote_max_tx) {
1387 l2cap_send_disconn_req(pi->conn, sk);
1388 break;
1389 }
1390
1391 tx_skb = skb_clone(skb, GFP_ATOMIC);
1392
1393 bt_cb(skb)->retries++;
1394
1395 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1396 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
1397 control |= L2CAP_CTRL_FINAL;
1398 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1399 }
1400 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1401 | (pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1402 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1403
1404
1405 if (pi->fcs == L2CAP_FCS_CRC16) {
1406 fcs = crc16(0, (u8 *)skb->data, tx_skb->len - 2);
1407 put_unaligned_le16(fcs, skb->data + tx_skb->len - 2);
1408 }
1409
1410 err = l2cap_do_send(sk, tx_skb);
1411 if (err < 0) {
1412 l2cap_send_disconn_req(pi->conn, sk);
1413 return err;
1414 }
1415 __mod_retrans_timer();
1416
1417 bt_cb(skb)->tx_seq = pi->next_tx_seq;
1418 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1419
1420 pi->unacked_frames++;
1421 pi->frames_sent++;
1422
1423 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1424 sk->sk_send_head = NULL;
1425 else
1426 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1427
1428 nsent++;
1429 }
1430
1431 return nsent;
1432 }
1433
1434 static int l2cap_send_ack(struct l2cap_pinfo *pi)
1435 {
1436 struct sock *sk = (struct sock *)pi;
1437 u16 control = 0;
1438
1439 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1440
1441 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
1442 control |= L2CAP_SUPER_RCV_NOT_READY;
1443 return l2cap_send_sframe(pi, control);
1444 } else if (l2cap_ertm_send(sk) == 0) {
1445 control |= L2CAP_SUPER_RCV_READY;
1446 return l2cap_send_sframe(pi, control);
1447 }
1448 return 0;
1449 }
1450
1451 static int l2cap_send_srejtail(struct sock *sk)
1452 {
1453 struct srej_list *tail;
1454 u16 control;
1455
1456 control = L2CAP_SUPER_SELECT_REJECT;
1457 control |= L2CAP_CTRL_FINAL;
1458
1459 tail = list_entry(SREJ_LIST(sk)->prev, struct srej_list, list);
1460 control |= tail->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1461
1462 l2cap_send_sframe(l2cap_pi(sk), control);
1463
1464 return 0;
1465 }
1466
1467 static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1468 {
1469 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1470 struct sk_buff **frag;
1471 int err, sent = 0;
1472
1473 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count)) {
1474 return -EFAULT;
1475 }
1476
1477 sent += count;
1478 len -= count;
1479
1480 /* Continuation fragments (no L2CAP header) */
1481 frag = &skb_shinfo(skb)->frag_list;
1482 while (len) {
1483 count = min_t(unsigned int, conn->mtu, len);
1484
1485 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1486 if (!*frag)
1487 return -EFAULT;
1488 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1489 return -EFAULT;
1490
1491 sent += count;
1492 len -= count;
1493
1494 frag = &(*frag)->next;
1495 }
1496
1497 return sent;
1498 }
1499
1500 static struct sk_buff *l2cap_create_connless_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1501 {
1502 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1503 struct sk_buff *skb;
1504 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1505 struct l2cap_hdr *lh;
1506
1507 BT_DBG("sk %p len %d", sk, (int)len);
1508
1509 count = min_t(unsigned int, (conn->mtu - hlen), len);
1510 skb = bt_skb_send_alloc(sk, count + hlen,
1511 msg->msg_flags & MSG_DONTWAIT, &err);
1512 if (!skb)
1513 return ERR_PTR(-ENOMEM);
1514
1515 /* Create L2CAP header */
1516 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1517 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1518 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1519 put_unaligned_le16(l2cap_pi(sk)->psm, skb_put(skb, 2));
1520
1521 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1522 if (unlikely(err < 0)) {
1523 kfree_skb(skb);
1524 return ERR_PTR(err);
1525 }
1526 return skb;
1527 }
1528
1529 static struct sk_buff *l2cap_create_basic_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1530 {
1531 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1532 struct sk_buff *skb;
1533 int err, count, hlen = L2CAP_HDR_SIZE;
1534 struct l2cap_hdr *lh;
1535
1536 BT_DBG("sk %p len %d", sk, (int)len);
1537
1538 count = min_t(unsigned int, (conn->mtu - hlen), len);
1539 skb = bt_skb_send_alloc(sk, count + hlen,
1540 msg->msg_flags & MSG_DONTWAIT, &err);
1541 if (!skb)
1542 return ERR_PTR(-ENOMEM);
1543
1544 /* Create L2CAP header */
1545 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1546 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1547 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1548
1549 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1550 if (unlikely(err < 0)) {
1551 kfree_skb(skb);
1552 return ERR_PTR(err);
1553 }
1554 return skb;
1555 }
1556
1557 static struct sk_buff *l2cap_create_iframe_pdu(struct sock *sk, struct msghdr *msg, size_t len, u16 control, u16 sdulen)
1558 {
1559 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1560 struct sk_buff *skb;
1561 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1562 struct l2cap_hdr *lh;
1563
1564 BT_DBG("sk %p len %d", sk, (int)len);
1565
1566 if (sdulen)
1567 hlen += 2;
1568
1569 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1570 hlen += 2;
1571
1572 count = min_t(unsigned int, (conn->mtu - hlen), len);
1573 skb = bt_skb_send_alloc(sk, count + hlen,
1574 msg->msg_flags & MSG_DONTWAIT, &err);
1575 if (!skb)
1576 return ERR_PTR(-ENOMEM);
1577
1578 /* Create L2CAP header */
1579 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1580 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1581 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1582 put_unaligned_le16(control, skb_put(skb, 2));
1583 if (sdulen)
1584 put_unaligned_le16(sdulen, skb_put(skb, 2));
1585
1586 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1587 if (unlikely(err < 0)) {
1588 kfree_skb(skb);
1589 return ERR_PTR(err);
1590 }
1591
1592 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1593 put_unaligned_le16(0, skb_put(skb, 2));
1594
1595 bt_cb(skb)->retries = 0;
1596 return skb;
1597 }
1598
1599 static inline int l2cap_sar_segment_sdu(struct sock *sk, struct msghdr *msg, size_t len)
1600 {
1601 struct l2cap_pinfo *pi = l2cap_pi(sk);
1602 struct sk_buff *skb;
1603 struct sk_buff_head sar_queue;
1604 u16 control;
1605 size_t size = 0;
1606
1607 __skb_queue_head_init(&sar_queue);
1608 control = L2CAP_SDU_START;
1609 skb = l2cap_create_iframe_pdu(sk, msg, pi->remote_mps, control, len);
1610 if (IS_ERR(skb))
1611 return PTR_ERR(skb);
1612
1613 __skb_queue_tail(&sar_queue, skb);
1614 len -= pi->remote_mps;
1615 size += pi->remote_mps;
1616 control = 0;
1617
1618 while (len > 0) {
1619 size_t buflen;
1620
1621 if (len > pi->remote_mps) {
1622 control |= L2CAP_SDU_CONTINUE;
1623 buflen = pi->remote_mps;
1624 } else {
1625 control |= L2CAP_SDU_END;
1626 buflen = len;
1627 }
1628
1629 skb = l2cap_create_iframe_pdu(sk, msg, buflen, control, 0);
1630 if (IS_ERR(skb)) {
1631 skb_queue_purge(&sar_queue);
1632 return PTR_ERR(skb);
1633 }
1634
1635 __skb_queue_tail(&sar_queue, skb);
1636 len -= buflen;
1637 size += buflen;
1638 control = 0;
1639 }
1640 skb_queue_splice_tail(&sar_queue, TX_QUEUE(sk));
1641 if (sk->sk_send_head == NULL)
1642 sk->sk_send_head = sar_queue.next;
1643
1644 return size;
1645 }
1646
1647 static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len)
1648 {
1649 struct sock *sk = sock->sk;
1650 struct l2cap_pinfo *pi = l2cap_pi(sk);
1651 struct sk_buff *skb;
1652 u16 control;
1653 int err;
1654
1655 BT_DBG("sock %p, sk %p", sock, sk);
1656
1657 err = sock_error(sk);
1658 if (err)
1659 return err;
1660
1661 if (msg->msg_flags & MSG_OOB)
1662 return -EOPNOTSUPP;
1663
1664 lock_sock(sk);
1665
1666 if (sk->sk_state != BT_CONNECTED) {
1667 err = -ENOTCONN;
1668 goto done;
1669 }
1670
1671 /* Connectionless channel */
1672 if (sk->sk_type == SOCK_DGRAM) {
1673 skb = l2cap_create_connless_pdu(sk, msg, len);
1674 if (IS_ERR(skb))
1675 err = PTR_ERR(skb);
1676 else
1677 err = l2cap_do_send(sk, skb);
1678 goto done;
1679 }
1680
1681 switch (pi->mode) {
1682 case L2CAP_MODE_BASIC:
1683 /* Check outgoing MTU */
1684 if (len > pi->omtu) {
1685 err = -EINVAL;
1686 goto done;
1687 }
1688
1689 /* Create a basic PDU */
1690 skb = l2cap_create_basic_pdu(sk, msg, len);
1691 if (IS_ERR(skb)) {
1692 err = PTR_ERR(skb);
1693 goto done;
1694 }
1695
1696 err = l2cap_do_send(sk, skb);
1697 if (!err)
1698 err = len;
1699 break;
1700
1701 case L2CAP_MODE_ERTM:
1702 case L2CAP_MODE_STREAMING:
1703 /* Entire SDU fits into one PDU */
1704 if (len <= pi->remote_mps) {
1705 control = L2CAP_SDU_UNSEGMENTED;
1706 skb = l2cap_create_iframe_pdu(sk, msg, len, control, 0);
1707 if (IS_ERR(skb)) {
1708 err = PTR_ERR(skb);
1709 goto done;
1710 }
1711 __skb_queue_tail(TX_QUEUE(sk), skb);
1712 if (sk->sk_send_head == NULL)
1713 sk->sk_send_head = skb;
1714 } else {
1715 /* Segment SDU into multiples PDUs */
1716 err = l2cap_sar_segment_sdu(sk, msg, len);
1717 if (err < 0)
1718 goto done;
1719 }
1720
1721 if (pi->mode == L2CAP_MODE_STREAMING)
1722 err = l2cap_streaming_send(sk);
1723 else
1724 err = l2cap_ertm_send(sk);
1725
1726 if (err >= 0)
1727 err = len;
1728 break;
1729
1730 default:
1731 BT_DBG("bad state %1.1x", pi->mode);
1732 err = -EINVAL;
1733 }
1734
1735 done:
1736 release_sock(sk);
1737 return err;
1738 }
1739
1740 static int l2cap_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags)
1741 {
1742 struct sock *sk = sock->sk;
1743
1744 lock_sock(sk);
1745
1746 if (sk->sk_state == BT_CONNECT2 && bt_sk(sk)->defer_setup) {
1747 struct l2cap_conn_rsp rsp;
1748
1749 sk->sk_state = BT_CONFIG;
1750
1751 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1752 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
1753 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1754 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1755 l2cap_send_cmd(l2cap_pi(sk)->conn, l2cap_pi(sk)->ident,
1756 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1757
1758 release_sock(sk);
1759 return 0;
1760 }
1761
1762 release_sock(sk);
1763
1764 return bt_sock_recvmsg(iocb, sock, msg, len, flags);
1765 }
1766
1767 static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __user *optval, unsigned int optlen)
1768 {
1769 struct sock *sk = sock->sk;
1770 struct l2cap_options opts;
1771 int len, err = 0;
1772 u32 opt;
1773
1774 BT_DBG("sk %p", sk);
1775
1776 lock_sock(sk);
1777
1778 switch (optname) {
1779 case L2CAP_OPTIONS:
1780 opts.imtu = l2cap_pi(sk)->imtu;
1781 opts.omtu = l2cap_pi(sk)->omtu;
1782 opts.flush_to = l2cap_pi(sk)->flush_to;
1783 opts.mode = l2cap_pi(sk)->mode;
1784 opts.fcs = l2cap_pi(sk)->fcs;
1785
1786 len = min_t(unsigned int, sizeof(opts), optlen);
1787 if (copy_from_user((char *) &opts, optval, len)) {
1788 err = -EFAULT;
1789 break;
1790 }
1791
1792 l2cap_pi(sk)->imtu = opts.imtu;
1793 l2cap_pi(sk)->omtu = opts.omtu;
1794 l2cap_pi(sk)->mode = opts.mode;
1795 l2cap_pi(sk)->fcs = opts.fcs;
1796 break;
1797
1798 case L2CAP_LM:
1799 if (get_user(opt, (u32 __user *) optval)) {
1800 err = -EFAULT;
1801 break;
1802 }
1803
1804 if (opt & L2CAP_LM_AUTH)
1805 l2cap_pi(sk)->sec_level = BT_SECURITY_LOW;
1806 if (opt & L2CAP_LM_ENCRYPT)
1807 l2cap_pi(sk)->sec_level = BT_SECURITY_MEDIUM;
1808 if (opt & L2CAP_LM_SECURE)
1809 l2cap_pi(sk)->sec_level = BT_SECURITY_HIGH;
1810
1811 l2cap_pi(sk)->role_switch = (opt & L2CAP_LM_MASTER);
1812 l2cap_pi(sk)->force_reliable = (opt & L2CAP_LM_RELIABLE);
1813 break;
1814
1815 default:
1816 err = -ENOPROTOOPT;
1817 break;
1818 }
1819
1820 release_sock(sk);
1821 return err;
1822 }
1823
1824 static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
1825 {
1826 struct sock *sk = sock->sk;
1827 struct bt_security sec;
1828 int len, err = 0;
1829 u32 opt;
1830
1831 BT_DBG("sk %p", sk);
1832
1833 if (level == SOL_L2CAP)
1834 return l2cap_sock_setsockopt_old(sock, optname, optval, optlen);
1835
1836 if (level != SOL_BLUETOOTH)
1837 return -ENOPROTOOPT;
1838
1839 lock_sock(sk);
1840
1841 switch (optname) {
1842 case BT_SECURITY:
1843 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_RAW) {
1844 err = -EINVAL;
1845 break;
1846 }
1847
1848 sec.level = BT_SECURITY_LOW;
1849
1850 len = min_t(unsigned int, sizeof(sec), optlen);
1851 if (copy_from_user((char *) &sec, optval, len)) {
1852 err = -EFAULT;
1853 break;
1854 }
1855
1856 if (sec.level < BT_SECURITY_LOW ||
1857 sec.level > BT_SECURITY_HIGH) {
1858 err = -EINVAL;
1859 break;
1860 }
1861
1862 l2cap_pi(sk)->sec_level = sec.level;
1863 break;
1864
1865 case BT_DEFER_SETUP:
1866 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
1867 err = -EINVAL;
1868 break;
1869 }
1870
1871 if (get_user(opt, (u32 __user *) optval)) {
1872 err = -EFAULT;
1873 break;
1874 }
1875
1876 bt_sk(sk)->defer_setup = opt;
1877 break;
1878
1879 default:
1880 err = -ENOPROTOOPT;
1881 break;
1882 }
1883
1884 release_sock(sk);
1885 return err;
1886 }
1887
1888 static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __user *optval, int __user *optlen)
1889 {
1890 struct sock *sk = sock->sk;
1891 struct l2cap_options opts;
1892 struct l2cap_conninfo cinfo;
1893 int len, err = 0;
1894 u32 opt;
1895
1896 BT_DBG("sk %p", sk);
1897
1898 if (get_user(len, optlen))
1899 return -EFAULT;
1900
1901 lock_sock(sk);
1902
1903 switch (optname) {
1904 case L2CAP_OPTIONS:
1905 opts.imtu = l2cap_pi(sk)->imtu;
1906 opts.omtu = l2cap_pi(sk)->omtu;
1907 opts.flush_to = l2cap_pi(sk)->flush_to;
1908 opts.mode = l2cap_pi(sk)->mode;
1909 opts.fcs = l2cap_pi(sk)->fcs;
1910
1911 len = min_t(unsigned int, len, sizeof(opts));
1912 if (copy_to_user(optval, (char *) &opts, len))
1913 err = -EFAULT;
1914
1915 break;
1916
1917 case L2CAP_LM:
1918 switch (l2cap_pi(sk)->sec_level) {
1919 case BT_SECURITY_LOW:
1920 opt = L2CAP_LM_AUTH;
1921 break;
1922 case BT_SECURITY_MEDIUM:
1923 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT;
1924 break;
1925 case BT_SECURITY_HIGH:
1926 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT |
1927 L2CAP_LM_SECURE;
1928 break;
1929 default:
1930 opt = 0;
1931 break;
1932 }
1933
1934 if (l2cap_pi(sk)->role_switch)
1935 opt |= L2CAP_LM_MASTER;
1936
1937 if (l2cap_pi(sk)->force_reliable)
1938 opt |= L2CAP_LM_RELIABLE;
1939
1940 if (put_user(opt, (u32 __user *) optval))
1941 err = -EFAULT;
1942 break;
1943
1944 case L2CAP_CONNINFO:
1945 if (sk->sk_state != BT_CONNECTED &&
1946 !(sk->sk_state == BT_CONNECT2 &&
1947 bt_sk(sk)->defer_setup)) {
1948 err = -ENOTCONN;
1949 break;
1950 }
1951
1952 cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
1953 memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3);
1954
1955 len = min_t(unsigned int, len, sizeof(cinfo));
1956 if (copy_to_user(optval, (char *) &cinfo, len))
1957 err = -EFAULT;
1958
1959 break;
1960
1961 default:
1962 err = -ENOPROTOOPT;
1963 break;
1964 }
1965
1966 release_sock(sk);
1967 return err;
1968 }
1969
1970 static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
1971 {
1972 struct sock *sk = sock->sk;
1973 struct bt_security sec;
1974 int len, err = 0;
1975
1976 BT_DBG("sk %p", sk);
1977
1978 if (level == SOL_L2CAP)
1979 return l2cap_sock_getsockopt_old(sock, optname, optval, optlen);
1980
1981 if (level != SOL_BLUETOOTH)
1982 return -ENOPROTOOPT;
1983
1984 if (get_user(len, optlen))
1985 return -EFAULT;
1986
1987 lock_sock(sk);
1988
1989 switch (optname) {
1990 case BT_SECURITY:
1991 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_RAW) {
1992 err = -EINVAL;
1993 break;
1994 }
1995
1996 sec.level = l2cap_pi(sk)->sec_level;
1997
1998 len = min_t(unsigned int, len, sizeof(sec));
1999 if (copy_to_user(optval, (char *) &sec, len))
2000 err = -EFAULT;
2001
2002 break;
2003
2004 case BT_DEFER_SETUP:
2005 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
2006 err = -EINVAL;
2007 break;
2008 }
2009
2010 if (put_user(bt_sk(sk)->defer_setup, (u32 __user *) optval))
2011 err = -EFAULT;
2012
2013 break;
2014
2015 default:
2016 err = -ENOPROTOOPT;
2017 break;
2018 }
2019
2020 release_sock(sk);
2021 return err;
2022 }
2023
2024 static int l2cap_sock_shutdown(struct socket *sock, int how)
2025 {
2026 struct sock *sk = sock->sk;
2027 int err = 0;
2028
2029 BT_DBG("sock %p, sk %p", sock, sk);
2030
2031 if (!sk)
2032 return 0;
2033
2034 lock_sock(sk);
2035 if (!sk->sk_shutdown) {
2036 sk->sk_shutdown = SHUTDOWN_MASK;
2037 l2cap_sock_clear_timer(sk);
2038 __l2cap_sock_close(sk, 0);
2039
2040 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
2041 err = bt_sock_wait_state(sk, BT_CLOSED,
2042 sk->sk_lingertime);
2043 }
2044 release_sock(sk);
2045 return err;
2046 }
2047
2048 static int l2cap_sock_release(struct socket *sock)
2049 {
2050 struct sock *sk = sock->sk;
2051 int err;
2052
2053 BT_DBG("sock %p, sk %p", sock, sk);
2054
2055 if (!sk)
2056 return 0;
2057
2058 err = l2cap_sock_shutdown(sock, 2);
2059
2060 sock_orphan(sk);
2061 l2cap_sock_kill(sk);
2062 return err;
2063 }
2064
2065 static void l2cap_chan_ready(struct sock *sk)
2066 {
2067 struct sock *parent = bt_sk(sk)->parent;
2068
2069 BT_DBG("sk %p, parent %p", sk, parent);
2070
2071 l2cap_pi(sk)->conf_state = 0;
2072 l2cap_sock_clear_timer(sk);
2073
2074 if (!parent) {
2075 /* Outgoing channel.
2076 * Wake up socket sleeping on connect.
2077 */
2078 sk->sk_state = BT_CONNECTED;
2079 sk->sk_state_change(sk);
2080 } else {
2081 /* Incoming channel.
2082 * Wake up socket sleeping on accept.
2083 */
2084 parent->sk_data_ready(parent, 0);
2085 }
2086 }
2087
2088 /* Copy frame to all raw sockets on that connection */
2089 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2090 {
2091 struct l2cap_chan_list *l = &conn->chan_list;
2092 struct sk_buff *nskb;
2093 struct sock *sk;
2094
2095 BT_DBG("conn %p", conn);
2096
2097 read_lock(&l->lock);
2098 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
2099 if (sk->sk_type != SOCK_RAW)
2100 continue;
2101
2102 /* Don't send frame to the socket it came from */
2103 if (skb->sk == sk)
2104 continue;
2105 nskb = skb_clone(skb, GFP_ATOMIC);
2106 if (!nskb)
2107 continue;
2108
2109 if (sock_queue_rcv_skb(sk, nskb))
2110 kfree_skb(nskb);
2111 }
2112 read_unlock(&l->lock);
2113 }
2114
2115 /* ---- L2CAP signalling commands ---- */
2116 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
2117 u8 code, u8 ident, u16 dlen, void *data)
2118 {
2119 struct sk_buff *skb, **frag;
2120 struct l2cap_cmd_hdr *cmd;
2121 struct l2cap_hdr *lh;
2122 int len, count;
2123
2124 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
2125 conn, code, ident, dlen);
2126
2127 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2128 count = min_t(unsigned int, conn->mtu, len);
2129
2130 skb = bt_skb_alloc(count, GFP_ATOMIC);
2131 if (!skb)
2132 return NULL;
2133
2134 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2135 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2136 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
2137
2138 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2139 cmd->code = code;
2140 cmd->ident = ident;
2141 cmd->len = cpu_to_le16(dlen);
2142
2143 if (dlen) {
2144 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2145 memcpy(skb_put(skb, count), data, count);
2146 data += count;
2147 }
2148
2149 len -= skb->len;
2150
2151 /* Continuation fragments (no L2CAP header) */
2152 frag = &skb_shinfo(skb)->frag_list;
2153 while (len) {
2154 count = min_t(unsigned int, conn->mtu, len);
2155
2156 *frag = bt_skb_alloc(count, GFP_ATOMIC);
2157 if (!*frag)
2158 goto fail;
2159
2160 memcpy(skb_put(*frag, count), data, count);
2161
2162 len -= count;
2163 data += count;
2164
2165 frag = &(*frag)->next;
2166 }
2167
2168 return skb;
2169
2170 fail:
2171 kfree_skb(skb);
2172 return NULL;
2173 }
2174
2175 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
2176 {
2177 struct l2cap_conf_opt *opt = *ptr;
2178 int len;
2179
2180 len = L2CAP_CONF_OPT_SIZE + opt->len;
2181 *ptr += len;
2182
2183 *type = opt->type;
2184 *olen = opt->len;
2185
2186 switch (opt->len) {
2187 case 1:
2188 *val = *((u8 *) opt->val);
2189 break;
2190
2191 case 2:
2192 *val = __le16_to_cpu(*((__le16 *) opt->val));
2193 break;
2194
2195 case 4:
2196 *val = __le32_to_cpu(*((__le32 *) opt->val));
2197 break;
2198
2199 default:
2200 *val = (unsigned long) opt->val;
2201 break;
2202 }
2203
2204 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
2205 return len;
2206 }
2207
2208 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2209 {
2210 struct l2cap_conf_opt *opt = *ptr;
2211
2212 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
2213
2214 opt->type = type;
2215 opt->len = len;
2216
2217 switch (len) {
2218 case 1:
2219 *((u8 *) opt->val) = val;
2220 break;
2221
2222 case 2:
2223 *((__le16 *) opt->val) = cpu_to_le16(val);
2224 break;
2225
2226 case 4:
2227 *((__le32 *) opt->val) = cpu_to_le32(val);
2228 break;
2229
2230 default:
2231 memcpy(opt->val, (void *) val, len);
2232 break;
2233 }
2234
2235 *ptr += L2CAP_CONF_OPT_SIZE + len;
2236 }
2237
2238 static void l2cap_ack_timeout(unsigned long arg)
2239 {
2240 struct sock *sk = (void *) arg;
2241
2242 bh_lock_sock(sk);
2243 l2cap_send_ack(l2cap_pi(sk));
2244 bh_unlock_sock(sk);
2245 }
2246
2247 static inline void l2cap_ertm_init(struct sock *sk)
2248 {
2249 l2cap_pi(sk)->expected_ack_seq = 0;
2250 l2cap_pi(sk)->unacked_frames = 0;
2251 l2cap_pi(sk)->buffer_seq = 0;
2252 l2cap_pi(sk)->num_to_ack = 0;
2253 l2cap_pi(sk)->frames_sent = 0;
2254
2255 setup_timer(&l2cap_pi(sk)->retrans_timer,
2256 l2cap_retrans_timeout, (unsigned long) sk);
2257 setup_timer(&l2cap_pi(sk)->monitor_timer,
2258 l2cap_monitor_timeout, (unsigned long) sk);
2259 setup_timer(&l2cap_pi(sk)->ack_timer,
2260 l2cap_ack_timeout, (unsigned long) sk);
2261
2262 __skb_queue_head_init(SREJ_QUEUE(sk));
2263 }
2264
2265 static int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
2266 {
2267 u32 local_feat_mask = l2cap_feat_mask;
2268 if (enable_ertm)
2269 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
2270
2271 switch (mode) {
2272 case L2CAP_MODE_ERTM:
2273 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
2274 case L2CAP_MODE_STREAMING:
2275 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
2276 default:
2277 return 0x00;
2278 }
2279 }
2280
2281 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2282 {
2283 switch (mode) {
2284 case L2CAP_MODE_STREAMING:
2285 case L2CAP_MODE_ERTM:
2286 if (l2cap_mode_supported(mode, remote_feat_mask))
2287 return mode;
2288 /* fall through */
2289 default:
2290 return L2CAP_MODE_BASIC;
2291 }
2292 }
2293
2294 static int l2cap_build_conf_req(struct sock *sk, void *data)
2295 {
2296 struct l2cap_pinfo *pi = l2cap_pi(sk);
2297 struct l2cap_conf_req *req = data;
2298 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2299 void *ptr = req->data;
2300
2301 BT_DBG("sk %p", sk);
2302
2303 if (pi->num_conf_req || pi->num_conf_rsp)
2304 goto done;
2305
2306 switch (pi->mode) {
2307 case L2CAP_MODE_STREAMING:
2308 case L2CAP_MODE_ERTM:
2309 pi->conf_state |= L2CAP_CONF_STATE2_DEVICE;
2310 if (!l2cap_mode_supported(pi->mode, pi->conn->feat_mask))
2311 l2cap_send_disconn_req(pi->conn, sk);
2312 break;
2313 default:
2314 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
2315 break;
2316 }
2317
2318 done:
2319 switch (pi->mode) {
2320 case L2CAP_MODE_BASIC:
2321 if (pi->imtu != L2CAP_DEFAULT_MTU)
2322 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
2323 break;
2324
2325 case L2CAP_MODE_ERTM:
2326 rfc.mode = L2CAP_MODE_ERTM;
2327 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
2328 rfc.max_transmit = max_transmit;
2329 rfc.retrans_timeout = 0;
2330 rfc.monitor_timeout = 0;
2331 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2332 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
2333 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
2334
2335 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2336 sizeof(rfc), (unsigned long) &rfc);
2337
2338 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
2339 break;
2340
2341 if (pi->fcs == L2CAP_FCS_NONE ||
2342 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
2343 pi->fcs = L2CAP_FCS_NONE;
2344 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
2345 }
2346 break;
2347
2348 case L2CAP_MODE_STREAMING:
2349 rfc.mode = L2CAP_MODE_STREAMING;
2350 rfc.txwin_size = 0;
2351 rfc.max_transmit = 0;
2352 rfc.retrans_timeout = 0;
2353 rfc.monitor_timeout = 0;
2354 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2355 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
2356 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
2357
2358 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2359 sizeof(rfc), (unsigned long) &rfc);
2360
2361 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
2362 break;
2363
2364 if (pi->fcs == L2CAP_FCS_NONE ||
2365 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
2366 pi->fcs = L2CAP_FCS_NONE;
2367 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
2368 }
2369 break;
2370 }
2371
2372 /* FIXME: Need actual value of the flush timeout */
2373 //if (flush_to != L2CAP_DEFAULT_FLUSH_TO)
2374 // l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, pi->flush_to);
2375
2376 req->dcid = cpu_to_le16(pi->dcid);
2377 req->flags = cpu_to_le16(0);
2378
2379 return ptr - data;
2380 }
2381
2382 static int l2cap_parse_conf_req(struct sock *sk, void *data)
2383 {
2384 struct l2cap_pinfo *pi = l2cap_pi(sk);
2385 struct l2cap_conf_rsp *rsp = data;
2386 void *ptr = rsp->data;
2387 void *req = pi->conf_req;
2388 int len = pi->conf_len;
2389 int type, hint, olen;
2390 unsigned long val;
2391 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2392 u16 mtu = L2CAP_DEFAULT_MTU;
2393 u16 result = L2CAP_CONF_SUCCESS;
2394
2395 BT_DBG("sk %p", sk);
2396
2397 while (len >= L2CAP_CONF_OPT_SIZE) {
2398 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2399
2400 hint = type & L2CAP_CONF_HINT;
2401 type &= L2CAP_CONF_MASK;
2402
2403 switch (type) {
2404 case L2CAP_CONF_MTU:
2405 mtu = val;
2406 break;
2407
2408 case L2CAP_CONF_FLUSH_TO:
2409 pi->flush_to = val;
2410 break;
2411
2412 case L2CAP_CONF_QOS:
2413 break;
2414
2415 case L2CAP_CONF_RFC:
2416 if (olen == sizeof(rfc))
2417 memcpy(&rfc, (void *) val, olen);
2418 break;
2419
2420 case L2CAP_CONF_FCS:
2421 if (val == L2CAP_FCS_NONE)
2422 pi->conf_state |= L2CAP_CONF_NO_FCS_RECV;
2423
2424 break;
2425
2426 default:
2427 if (hint)
2428 break;
2429
2430 result = L2CAP_CONF_UNKNOWN;
2431 *((u8 *) ptr++) = type;
2432 break;
2433 }
2434 }
2435
2436 if (pi->num_conf_rsp || pi->num_conf_req)
2437 goto done;
2438
2439 switch (pi->mode) {
2440 case L2CAP_MODE_STREAMING:
2441 case L2CAP_MODE_ERTM:
2442 pi->conf_state |= L2CAP_CONF_STATE2_DEVICE;
2443 if (!l2cap_mode_supported(pi->mode, pi->conn->feat_mask))
2444 return -ECONNREFUSED;
2445 break;
2446 default:
2447 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
2448 break;
2449 }
2450
2451 done:
2452 if (pi->mode != rfc.mode) {
2453 result = L2CAP_CONF_UNACCEPT;
2454 rfc.mode = pi->mode;
2455
2456 if (pi->num_conf_rsp == 1)
2457 return -ECONNREFUSED;
2458
2459 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2460 sizeof(rfc), (unsigned long) &rfc);
2461 }
2462
2463
2464 if (result == L2CAP_CONF_SUCCESS) {
2465 /* Configure output options and let the other side know
2466 * which ones we don't like. */
2467
2468 if (mtu < L2CAP_DEFAULT_MIN_MTU)
2469 result = L2CAP_CONF_UNACCEPT;
2470 else {
2471 pi->omtu = mtu;
2472 pi->conf_state |= L2CAP_CONF_MTU_DONE;
2473 }
2474 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
2475
2476 switch (rfc.mode) {
2477 case L2CAP_MODE_BASIC:
2478 pi->fcs = L2CAP_FCS_NONE;
2479 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2480 break;
2481
2482 case L2CAP_MODE_ERTM:
2483 pi->remote_tx_win = rfc.txwin_size;
2484 pi->remote_max_tx = rfc.max_transmit;
2485 if (rfc.max_pdu_size > pi->conn->mtu - 10)
2486 rfc.max_pdu_size = le16_to_cpu(pi->conn->mtu - 10);
2487
2488 pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
2489
2490 rfc.retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
2491 rfc.monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
2492
2493 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2494
2495 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2496 sizeof(rfc), (unsigned long) &rfc);
2497
2498 break;
2499
2500 case L2CAP_MODE_STREAMING:
2501 if (rfc.max_pdu_size > pi->conn->mtu - 10)
2502 rfc.max_pdu_size = le16_to_cpu(pi->conn->mtu - 10);
2503
2504 pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
2505
2506 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2507
2508 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2509 sizeof(rfc), (unsigned long) &rfc);
2510
2511 break;
2512
2513 default:
2514 result = L2CAP_CONF_UNACCEPT;
2515
2516 memset(&rfc, 0, sizeof(rfc));
2517 rfc.mode = pi->mode;
2518 }
2519
2520 if (result == L2CAP_CONF_SUCCESS)
2521 pi->conf_state |= L2CAP_CONF_OUTPUT_DONE;
2522 }
2523 rsp->scid = cpu_to_le16(pi->dcid);
2524 rsp->result = cpu_to_le16(result);
2525 rsp->flags = cpu_to_le16(0x0000);
2526
2527 return ptr - data;
2528 }
2529
2530 static int l2cap_parse_conf_rsp(struct sock *sk, void *rsp, int len, void *data, u16 *result)
2531 {
2532 struct l2cap_pinfo *pi = l2cap_pi(sk);
2533 struct l2cap_conf_req *req = data;
2534 void *ptr = req->data;
2535 int type, olen;
2536 unsigned long val;
2537 struct l2cap_conf_rfc rfc;
2538
2539 BT_DBG("sk %p, rsp %p, len %d, req %p", sk, rsp, len, data);
2540
2541 while (len >= L2CAP_CONF_OPT_SIZE) {
2542 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2543
2544 switch (type) {
2545 case L2CAP_CONF_MTU:
2546 if (val < L2CAP_DEFAULT_MIN_MTU) {
2547 *result = L2CAP_CONF_UNACCEPT;
2548 pi->omtu = L2CAP_DEFAULT_MIN_MTU;
2549 } else
2550 pi->omtu = val;
2551 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
2552 break;
2553
2554 case L2CAP_CONF_FLUSH_TO:
2555 pi->flush_to = val;
2556 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
2557 2, pi->flush_to);
2558 break;
2559
2560 case L2CAP_CONF_RFC:
2561 if (olen == sizeof(rfc))
2562 memcpy(&rfc, (void *)val, olen);
2563
2564 if ((pi->conf_state & L2CAP_CONF_STATE2_DEVICE) &&
2565 rfc.mode != pi->mode)
2566 return -ECONNREFUSED;
2567
2568 pi->mode = rfc.mode;
2569 pi->fcs = 0;
2570
2571 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2572 sizeof(rfc), (unsigned long) &rfc);
2573 break;
2574 }
2575 }
2576
2577 if (*result == L2CAP_CONF_SUCCESS) {
2578 switch (rfc.mode) {
2579 case L2CAP_MODE_ERTM:
2580 pi->remote_tx_win = rfc.txwin_size;
2581 pi->retrans_timeout = rfc.retrans_timeout;
2582 pi->monitor_timeout = rfc.monitor_timeout;
2583 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2584 break;
2585 case L2CAP_MODE_STREAMING:
2586 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2587 }
2588 }
2589
2590 req->dcid = cpu_to_le16(pi->dcid);
2591 req->flags = cpu_to_le16(0x0000);
2592
2593 return ptr - data;
2594 }
2595
2596 static int l2cap_build_conf_rsp(struct sock *sk, void *data, u16 result, u16 flags)
2597 {
2598 struct l2cap_conf_rsp *rsp = data;
2599 void *ptr = rsp->data;
2600
2601 BT_DBG("sk %p", sk);
2602
2603 rsp->scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2604 rsp->result = cpu_to_le16(result);
2605 rsp->flags = cpu_to_le16(flags);
2606
2607 return ptr - data;
2608 }
2609
2610 static void l2cap_conf_rfc_get(struct sock *sk, void *rsp, int len)
2611 {
2612 struct l2cap_pinfo *pi = l2cap_pi(sk);
2613 int type, olen;
2614 unsigned long val;
2615 struct l2cap_conf_rfc rfc;
2616
2617 BT_DBG("sk %p, rsp %p, len %d", sk, rsp, len);
2618
2619 if ((pi->mode != L2CAP_MODE_ERTM) && (pi->mode != L2CAP_MODE_STREAMING))
2620 return;
2621
2622 while (len >= L2CAP_CONF_OPT_SIZE) {
2623 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2624
2625 switch (type) {
2626 case L2CAP_CONF_RFC:
2627 if (olen == sizeof(rfc))
2628 memcpy(&rfc, (void *)val, olen);
2629 goto done;
2630 }
2631 }
2632
2633 done:
2634 switch (rfc.mode) {
2635 case L2CAP_MODE_ERTM:
2636 pi->remote_tx_win = rfc.txwin_size;
2637 pi->retrans_timeout = rfc.retrans_timeout;
2638 pi->monitor_timeout = rfc.monitor_timeout;
2639 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2640 break;
2641 case L2CAP_MODE_STREAMING:
2642 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2643 }
2644 }
2645
2646 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2647 {
2648 struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data;
2649
2650 if (rej->reason != 0x0000)
2651 return 0;
2652
2653 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2654 cmd->ident == conn->info_ident) {
2655 del_timer(&conn->info_timer);
2656
2657 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2658 conn->info_ident = 0;
2659
2660 l2cap_conn_start(conn);
2661 }
2662
2663 return 0;
2664 }
2665
2666 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2667 {
2668 struct l2cap_chan_list *list = &conn->chan_list;
2669 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2670 struct l2cap_conn_rsp rsp;
2671 struct sock *sk, *parent;
2672 int result, status = L2CAP_CS_NO_INFO;
2673
2674 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2675 __le16 psm = req->psm;
2676
2677 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
2678
2679 /* Check if we have socket listening on psm */
2680 parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src);
2681 if (!parent) {
2682 result = L2CAP_CR_BAD_PSM;
2683 goto sendresp;
2684 }
2685
2686 /* Check if the ACL is secure enough (if not SDP) */
2687 if (psm != cpu_to_le16(0x0001) &&
2688 !hci_conn_check_link_mode(conn->hcon)) {
2689 conn->disc_reason = 0x05;
2690 result = L2CAP_CR_SEC_BLOCK;
2691 goto response;
2692 }
2693
2694 result = L2CAP_CR_NO_MEM;
2695
2696 /* Check for backlog size */
2697 if (sk_acceptq_is_full(parent)) {
2698 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2699 goto response;
2700 }
2701
2702 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
2703 if (!sk)
2704 goto response;
2705
2706 write_lock_bh(&list->lock);
2707
2708 /* Check if we already have channel with that dcid */
2709 if (__l2cap_get_chan_by_dcid(list, scid)) {
2710 write_unlock_bh(&list->lock);
2711 sock_set_flag(sk, SOCK_ZAPPED);
2712 l2cap_sock_kill(sk);
2713 goto response;
2714 }
2715
2716 hci_conn_hold(conn->hcon);
2717
2718 l2cap_sock_init(sk, parent);
2719 bacpy(&bt_sk(sk)->src, conn->src);
2720 bacpy(&bt_sk(sk)->dst, conn->dst);
2721 l2cap_pi(sk)->psm = psm;
2722 l2cap_pi(sk)->dcid = scid;
2723
2724 __l2cap_chan_add(conn, sk, parent);
2725 dcid = l2cap_pi(sk)->scid;
2726
2727 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
2728
2729 l2cap_pi(sk)->ident = cmd->ident;
2730
2731 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2732 if (l2cap_check_security(sk)) {
2733 if (bt_sk(sk)->defer_setup) {
2734 sk->sk_state = BT_CONNECT2;
2735 result = L2CAP_CR_PEND;
2736 status = L2CAP_CS_AUTHOR_PEND;
2737 parent->sk_data_ready(parent, 0);
2738 } else {
2739 sk->sk_state = BT_CONFIG;
2740 result = L2CAP_CR_SUCCESS;
2741 status = L2CAP_CS_NO_INFO;
2742 }
2743 } else {
2744 sk->sk_state = BT_CONNECT2;
2745 result = L2CAP_CR_PEND;
2746 status = L2CAP_CS_AUTHEN_PEND;
2747 }
2748 } else {
2749 sk->sk_state = BT_CONNECT2;
2750 result = L2CAP_CR_PEND;
2751 status = L2CAP_CS_NO_INFO;
2752 }
2753
2754 write_unlock_bh(&list->lock);
2755
2756 response:
2757 bh_unlock_sock(parent);
2758
2759 sendresp:
2760 rsp.scid = cpu_to_le16(scid);
2761 rsp.dcid = cpu_to_le16(dcid);
2762 rsp.result = cpu_to_le16(result);
2763 rsp.status = cpu_to_le16(status);
2764 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2765
2766 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2767 struct l2cap_info_req info;
2768 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2769
2770 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2771 conn->info_ident = l2cap_get_ident(conn);
2772
2773 mod_timer(&conn->info_timer, jiffies +
2774 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
2775
2776 l2cap_send_cmd(conn, conn->info_ident,
2777 L2CAP_INFO_REQ, sizeof(info), &info);
2778 }
2779
2780 return 0;
2781 }
2782
2783 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2784 {
2785 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
2786 u16 scid, dcid, result, status;
2787 struct sock *sk;
2788 u8 req[128];
2789
2790 scid = __le16_to_cpu(rsp->scid);
2791 dcid = __le16_to_cpu(rsp->dcid);
2792 result = __le16_to_cpu(rsp->result);
2793 status = __le16_to_cpu(rsp->status);
2794
2795 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
2796
2797 if (scid) {
2798 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2799 if (!sk)
2800 return 0;
2801 } else {
2802 sk = l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident);
2803 if (!sk)
2804 return 0;
2805 }
2806
2807 switch (result) {
2808 case L2CAP_CR_SUCCESS:
2809 sk->sk_state = BT_CONFIG;
2810 l2cap_pi(sk)->ident = 0;
2811 l2cap_pi(sk)->dcid = dcid;
2812 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
2813
2814 l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_CONNECT_PEND;
2815
2816 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2817 l2cap_build_conf_req(sk, req), req);
2818 l2cap_pi(sk)->num_conf_req++;
2819 break;
2820
2821 case L2CAP_CR_PEND:
2822 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
2823 break;
2824
2825 default:
2826 l2cap_chan_del(sk, ECONNREFUSED);
2827 break;
2828 }
2829
2830 bh_unlock_sock(sk);
2831 return 0;
2832 }
2833
2834 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2835 {
2836 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
2837 u16 dcid, flags;
2838 u8 rsp[64];
2839 struct sock *sk;
2840 int len;
2841
2842 dcid = __le16_to_cpu(req->dcid);
2843 flags = __le16_to_cpu(req->flags);
2844
2845 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
2846
2847 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
2848 if (!sk)
2849 return -ENOENT;
2850
2851 if (sk->sk_state == BT_DISCONN)
2852 goto unlock;
2853
2854 /* Reject if config buffer is too small. */
2855 len = cmd_len - sizeof(*req);
2856 if (l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
2857 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2858 l2cap_build_conf_rsp(sk, rsp,
2859 L2CAP_CONF_REJECT, flags), rsp);
2860 goto unlock;
2861 }
2862
2863 /* Store config. */
2864 memcpy(l2cap_pi(sk)->conf_req + l2cap_pi(sk)->conf_len, req->data, len);
2865 l2cap_pi(sk)->conf_len += len;
2866
2867 if (flags & 0x0001) {
2868 /* Incomplete config. Send empty response. */
2869 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2870 l2cap_build_conf_rsp(sk, rsp,
2871 L2CAP_CONF_SUCCESS, 0x0001), rsp);
2872 goto unlock;
2873 }
2874
2875 /* Complete config. */
2876 len = l2cap_parse_conf_req(sk, rsp);
2877 if (len < 0) {
2878 l2cap_send_disconn_req(conn, sk);
2879 goto unlock;
2880 }
2881
2882 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
2883 l2cap_pi(sk)->num_conf_rsp++;
2884
2885 /* Reset config buffer. */
2886 l2cap_pi(sk)->conf_len = 0;
2887
2888 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE))
2889 goto unlock;
2890
2891 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
2892 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_NO_FCS_RECV) ||
2893 l2cap_pi(sk)->fcs != L2CAP_FCS_NONE)
2894 l2cap_pi(sk)->fcs = L2CAP_FCS_CRC16;
2895
2896 sk->sk_state = BT_CONNECTED;
2897
2898 l2cap_pi(sk)->next_tx_seq = 0;
2899 l2cap_pi(sk)->expected_tx_seq = 0;
2900 __skb_queue_head_init(TX_QUEUE(sk));
2901 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
2902 l2cap_ertm_init(sk);
2903
2904 l2cap_chan_ready(sk);
2905 goto unlock;
2906 }
2907
2908 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) {
2909 u8 buf[64];
2910 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2911 l2cap_build_conf_req(sk, buf), buf);
2912 l2cap_pi(sk)->num_conf_req++;
2913 }
2914
2915 unlock:
2916 bh_unlock_sock(sk);
2917 return 0;
2918 }
2919
2920 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2921 {
2922 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
2923 u16 scid, flags, result;
2924 struct sock *sk;
2925 int len = cmd->len - sizeof(*rsp);
2926
2927 scid = __le16_to_cpu(rsp->scid);
2928 flags = __le16_to_cpu(rsp->flags);
2929 result = __le16_to_cpu(rsp->result);
2930
2931 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
2932 scid, flags, result);
2933
2934 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2935 if (!sk)
2936 return 0;
2937
2938 switch (result) {
2939 case L2CAP_CONF_SUCCESS:
2940 l2cap_conf_rfc_get(sk, rsp->data, len);
2941 break;
2942
2943 case L2CAP_CONF_UNACCEPT:
2944 if (l2cap_pi(sk)->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
2945 char req[64];
2946
2947 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
2948 l2cap_send_disconn_req(conn, sk);
2949 goto done;
2950 }
2951
2952 /* throw out any old stored conf requests */
2953 result = L2CAP_CONF_SUCCESS;
2954 len = l2cap_parse_conf_rsp(sk, rsp->data,
2955 len, req, &result);
2956 if (len < 0) {
2957 l2cap_send_disconn_req(conn, sk);
2958 goto done;
2959 }
2960
2961 l2cap_send_cmd(conn, l2cap_get_ident(conn),
2962 L2CAP_CONF_REQ, len, req);
2963 l2cap_pi(sk)->num_conf_req++;
2964 if (result != L2CAP_CONF_SUCCESS)
2965 goto done;
2966 break;
2967 }
2968
2969 default:
2970 sk->sk_state = BT_DISCONN;
2971 sk->sk_err = ECONNRESET;
2972 l2cap_sock_set_timer(sk, HZ * 5);
2973 l2cap_send_disconn_req(conn, sk);
2974 goto done;
2975 }
2976
2977 if (flags & 0x01)
2978 goto done;
2979
2980 l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE;
2981
2982 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) {
2983 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_NO_FCS_RECV) ||
2984 l2cap_pi(sk)->fcs != L2CAP_FCS_NONE)
2985 l2cap_pi(sk)->fcs = L2CAP_FCS_CRC16;
2986
2987 sk->sk_state = BT_CONNECTED;
2988 l2cap_pi(sk)->next_tx_seq = 0;
2989 l2cap_pi(sk)->expected_tx_seq = 0;
2990 __skb_queue_head_init(TX_QUEUE(sk));
2991 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
2992 l2cap_ertm_init(sk);
2993
2994 l2cap_chan_ready(sk);
2995 }
2996
2997 done:
2998 bh_unlock_sock(sk);
2999 return 0;
3000 }
3001
3002 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3003 {
3004 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
3005 struct l2cap_disconn_rsp rsp;
3006 u16 dcid, scid;
3007 struct sock *sk;
3008
3009 scid = __le16_to_cpu(req->scid);
3010 dcid = __le16_to_cpu(req->dcid);
3011
3012 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
3013
3014 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
3015 if (!sk)
3016 return 0;
3017
3018 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
3019 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
3020 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
3021
3022 sk->sk_shutdown = SHUTDOWN_MASK;
3023
3024 skb_queue_purge(TX_QUEUE(sk));
3025
3026 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
3027 skb_queue_purge(SREJ_QUEUE(sk));
3028 del_timer(&l2cap_pi(sk)->retrans_timer);
3029 del_timer(&l2cap_pi(sk)->monitor_timer);
3030 del_timer(&l2cap_pi(sk)->ack_timer);
3031 }
3032
3033 l2cap_chan_del(sk, ECONNRESET);
3034 bh_unlock_sock(sk);
3035
3036 l2cap_sock_kill(sk);
3037 return 0;
3038 }
3039
3040 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3041 {
3042 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
3043 u16 dcid, scid;
3044 struct sock *sk;
3045
3046 scid = __le16_to_cpu(rsp->scid);
3047 dcid = __le16_to_cpu(rsp->dcid);
3048
3049 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
3050
3051 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
3052 if (!sk)
3053 return 0;
3054
3055 skb_queue_purge(TX_QUEUE(sk));
3056
3057 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
3058 skb_queue_purge(SREJ_QUEUE(sk));
3059 del_timer(&l2cap_pi(sk)->retrans_timer);
3060 del_timer(&l2cap_pi(sk)->monitor_timer);
3061 del_timer(&l2cap_pi(sk)->ack_timer);
3062 }
3063
3064 l2cap_chan_del(sk, 0);
3065 bh_unlock_sock(sk);
3066
3067 l2cap_sock_kill(sk);
3068 return 0;
3069 }
3070
3071 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3072 {
3073 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
3074 u16 type;
3075
3076 type = __le16_to_cpu(req->type);
3077
3078 BT_DBG("type 0x%4.4x", type);
3079
3080 if (type == L2CAP_IT_FEAT_MASK) {
3081 u8 buf[8];
3082 u32 feat_mask = l2cap_feat_mask;
3083 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3084 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3085 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3086 if (enable_ertm)
3087 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
3088 | L2CAP_FEAT_FCS;
3089 put_unaligned_le32(feat_mask, rsp->data);
3090 l2cap_send_cmd(conn, cmd->ident,
3091 L2CAP_INFO_RSP, sizeof(buf), buf);
3092 } else if (type == L2CAP_IT_FIXED_CHAN) {
3093 u8 buf[12];
3094 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3095 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3096 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3097 memcpy(buf + 4, l2cap_fixed_chan, 8);
3098 l2cap_send_cmd(conn, cmd->ident,
3099 L2CAP_INFO_RSP, sizeof(buf), buf);
3100 } else {
3101 struct l2cap_info_rsp rsp;
3102 rsp.type = cpu_to_le16(type);
3103 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
3104 l2cap_send_cmd(conn, cmd->ident,
3105 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
3106 }
3107
3108 return 0;
3109 }
3110
3111 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3112 {
3113 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
3114 u16 type, result;
3115
3116 type = __le16_to_cpu(rsp->type);
3117 result = __le16_to_cpu(rsp->result);
3118
3119 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
3120
3121 del_timer(&conn->info_timer);
3122
3123 if (type == L2CAP_IT_FEAT_MASK) {
3124 conn->feat_mask = get_unaligned_le32(rsp->data);
3125
3126 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
3127 struct l2cap_info_req req;
3128 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3129
3130 conn->info_ident = l2cap_get_ident(conn);
3131
3132 l2cap_send_cmd(conn, conn->info_ident,
3133 L2CAP_INFO_REQ, sizeof(req), &req);
3134 } else {
3135 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3136 conn->info_ident = 0;
3137
3138 l2cap_conn_start(conn);
3139 }
3140 } else if (type == L2CAP_IT_FIXED_CHAN) {
3141 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3142 conn->info_ident = 0;
3143
3144 l2cap_conn_start(conn);
3145 }
3146
3147 return 0;
3148 }
3149
3150 static inline void l2cap_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
3151 {
3152 u8 *data = skb->data;
3153 int len = skb->len;
3154 struct l2cap_cmd_hdr cmd;
3155 int err = 0;
3156
3157 l2cap_raw_recv(conn, skb);
3158
3159 while (len >= L2CAP_CMD_HDR_SIZE) {
3160 u16 cmd_len;
3161 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
3162 data += L2CAP_CMD_HDR_SIZE;
3163 len -= L2CAP_CMD_HDR_SIZE;
3164
3165 cmd_len = le16_to_cpu(cmd.len);
3166
3167 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
3168
3169 if (cmd_len > len || !cmd.ident) {
3170 BT_DBG("corrupted command");
3171 break;
3172 }
3173
3174 switch (cmd.code) {
3175 case L2CAP_COMMAND_REJ:
3176 l2cap_command_rej(conn, &cmd, data);
3177 break;
3178
3179 case L2CAP_CONN_REQ:
3180 err = l2cap_connect_req(conn, &cmd, data);
3181 break;
3182
3183 case L2CAP_CONN_RSP:
3184 err = l2cap_connect_rsp(conn, &cmd, data);
3185 break;
3186
3187 case L2CAP_CONF_REQ:
3188 err = l2cap_config_req(conn, &cmd, cmd_len, data);
3189 break;
3190
3191 case L2CAP_CONF_RSP:
3192 err = l2cap_config_rsp(conn, &cmd, data);
3193 break;
3194
3195 case L2CAP_DISCONN_REQ:
3196 err = l2cap_disconnect_req(conn, &cmd, data);
3197 break;
3198
3199 case L2CAP_DISCONN_RSP:
3200 err = l2cap_disconnect_rsp(conn, &cmd, data);
3201 break;
3202
3203 case L2CAP_ECHO_REQ:
3204 l2cap_send_cmd(conn, cmd.ident, L2CAP_ECHO_RSP, cmd_len, data);
3205 break;
3206
3207 case L2CAP_ECHO_RSP:
3208 break;
3209
3210 case L2CAP_INFO_REQ:
3211 err = l2cap_information_req(conn, &cmd, data);
3212 break;
3213
3214 case L2CAP_INFO_RSP:
3215 err = l2cap_information_rsp(conn, &cmd, data);
3216 break;
3217
3218 default:
3219 BT_ERR("Unknown signaling command 0x%2.2x", cmd.code);
3220 err = -EINVAL;
3221 break;
3222 }
3223
3224 if (err) {
3225 struct l2cap_cmd_rej rej;
3226 BT_DBG("error %d", err);
3227
3228 /* FIXME: Map err to a valid reason */
3229 rej.reason = cpu_to_le16(0);
3230 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3231 }
3232
3233 data += cmd_len;
3234 len -= cmd_len;
3235 }
3236
3237 kfree_skb(skb);
3238 }
3239
3240 static int l2cap_check_fcs(struct l2cap_pinfo *pi, struct sk_buff *skb)
3241 {
3242 u16 our_fcs, rcv_fcs;
3243 int hdr_size = L2CAP_HDR_SIZE + 2;
3244
3245 if (pi->fcs == L2CAP_FCS_CRC16) {
3246 skb_trim(skb, skb->len - 2);
3247 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
3248 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
3249
3250 if (our_fcs != rcv_fcs)
3251 return -EINVAL;
3252 }
3253 return 0;
3254 }
3255
3256 static inline void l2cap_send_i_or_rr_or_rnr(struct sock *sk)
3257 {
3258 struct l2cap_pinfo *pi = l2cap_pi(sk);
3259 u16 control = 0;
3260
3261 pi->frames_sent = 0;
3262 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
3263
3264 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3265
3266 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
3267 control |= L2CAP_SUPER_RCV_NOT_READY | L2CAP_CTRL_FINAL;
3268 l2cap_send_sframe(pi, control);
3269 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
3270 }
3271
3272 if (pi->conn_state & L2CAP_CONN_REMOTE_BUSY && pi->unacked_frames > 0)
3273 __mod_retrans_timer();
3274
3275 l2cap_ertm_send(sk);
3276
3277 if (!(pi->conn_state & L2CAP_CONN_LOCAL_BUSY) &&
3278 pi->frames_sent == 0) {
3279 control |= L2CAP_SUPER_RCV_READY;
3280 l2cap_send_sframe(pi, control);
3281 }
3282 }
3283
3284 static void l2cap_add_to_srej_queue(struct sock *sk, struct sk_buff *skb, u8 tx_seq, u8 sar)
3285 {
3286 struct sk_buff *next_skb;
3287
3288 bt_cb(skb)->tx_seq = tx_seq;
3289 bt_cb(skb)->sar = sar;
3290
3291 next_skb = skb_peek(SREJ_QUEUE(sk));
3292 if (!next_skb) {
3293 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3294 return;
3295 }
3296
3297 do {
3298 if (bt_cb(next_skb)->tx_seq > tx_seq) {
3299 __skb_queue_before(SREJ_QUEUE(sk), next_skb, skb);
3300 return;
3301 }
3302
3303 if (skb_queue_is_last(SREJ_QUEUE(sk), next_skb))
3304 break;
3305
3306 } while((next_skb = skb_queue_next(SREJ_QUEUE(sk), next_skb)));
3307
3308 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3309 }
3310
3311 static int l2cap_sar_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control)
3312 {
3313 struct l2cap_pinfo *pi = l2cap_pi(sk);
3314 struct sk_buff *_skb;
3315 int err = -EINVAL;
3316
3317 switch (control & L2CAP_CTRL_SAR) {
3318 case L2CAP_SDU_UNSEGMENTED:
3319 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3320 kfree_skb(pi->sdu);
3321 break;
3322 }
3323
3324 err = sock_queue_rcv_skb(sk, skb);
3325 if (!err)
3326 return 0;
3327
3328 break;
3329
3330 case L2CAP_SDU_START:
3331 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3332 kfree_skb(pi->sdu);
3333 break;
3334 }
3335
3336 pi->sdu_len = get_unaligned_le16(skb->data);
3337 skb_pull(skb, 2);
3338
3339 pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC);
3340 if (!pi->sdu) {
3341 err = -ENOMEM;
3342 break;
3343 }
3344
3345 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3346
3347 pi->conn_state |= L2CAP_CONN_SAR_SDU;
3348 pi->partial_sdu_len = skb->len;
3349 err = 0;
3350 break;
3351
3352 case L2CAP_SDU_CONTINUE:
3353 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3354 break;
3355
3356 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3357
3358 pi->partial_sdu_len += skb->len;
3359 if (pi->partial_sdu_len > pi->sdu_len)
3360 kfree_skb(pi->sdu);
3361 else
3362 err = 0;
3363
3364 break;
3365
3366 case L2CAP_SDU_END:
3367 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3368 break;
3369
3370 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3371
3372 pi->conn_state &= ~L2CAP_CONN_SAR_SDU;
3373 pi->partial_sdu_len += skb->len;
3374
3375 if (pi->partial_sdu_len > pi->imtu)
3376 goto drop;
3377
3378 if (pi->partial_sdu_len == pi->sdu_len) {
3379 _skb = skb_clone(pi->sdu, GFP_ATOMIC);
3380 err = sock_queue_rcv_skb(sk, _skb);
3381 if (err < 0)
3382 kfree_skb(_skb);
3383 }
3384 err = 0;
3385
3386 drop:
3387 kfree_skb(pi->sdu);
3388 break;
3389 }
3390
3391 kfree_skb(skb);
3392 return err;
3393 }
3394
3395 static void l2cap_check_srej_gap(struct sock *sk, u8 tx_seq)
3396 {
3397 struct sk_buff *skb;
3398 u16 control = 0;
3399
3400 while((skb = skb_peek(SREJ_QUEUE(sk)))) {
3401 if (bt_cb(skb)->tx_seq != tx_seq)
3402 break;
3403
3404 skb = skb_dequeue(SREJ_QUEUE(sk));
3405 control |= bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3406 l2cap_sar_reassembly_sdu(sk, skb, control);
3407 l2cap_pi(sk)->buffer_seq_srej =
3408 (l2cap_pi(sk)->buffer_seq_srej + 1) % 64;
3409 tx_seq++;
3410 }
3411 }
3412
3413 static void l2cap_resend_srejframe(struct sock *sk, u8 tx_seq)
3414 {
3415 struct l2cap_pinfo *pi = l2cap_pi(sk);
3416 struct srej_list *l, *tmp;
3417 u16 control;
3418
3419 list_for_each_entry_safe(l,tmp, SREJ_LIST(sk), list) {
3420 if (l->tx_seq == tx_seq) {
3421 list_del(&l->list);
3422 kfree(l);
3423 return;
3424 }
3425 control = L2CAP_SUPER_SELECT_REJECT;
3426 control |= l->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3427 l2cap_send_sframe(pi, control);
3428 list_del(&l->list);
3429 list_add_tail(&l->list, SREJ_LIST(sk));
3430 }
3431 }
3432
3433 static void l2cap_send_srejframe(struct sock *sk, u8 tx_seq)
3434 {
3435 struct l2cap_pinfo *pi = l2cap_pi(sk);
3436 struct srej_list *new;
3437 u16 control;
3438
3439 while (tx_seq != pi->expected_tx_seq) {
3440 control = L2CAP_SUPER_SELECT_REJECT;
3441 control |= pi->expected_tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3442 l2cap_send_sframe(pi, control);
3443
3444 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3445 new->tx_seq = pi->expected_tx_seq++;
3446 list_add_tail(&new->list, SREJ_LIST(sk));
3447 }
3448 pi->expected_tx_seq++;
3449 }
3450
3451 static inline int l2cap_data_channel_iframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
3452 {
3453 struct l2cap_pinfo *pi = l2cap_pi(sk);
3454 u8 tx_seq = __get_txseq(rx_control);
3455 u8 req_seq = __get_reqseq(rx_control);
3456 u8 sar = rx_control >> L2CAP_CTRL_SAR_SHIFT;
3457 int err = 0;
3458
3459 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len);
3460
3461 if (L2CAP_CTRL_FINAL & rx_control) {
3462 del_timer(&pi->monitor_timer);
3463 if (pi->unacked_frames > 0)
3464 __mod_retrans_timer();
3465 pi->conn_state &= ~L2CAP_CONN_WAIT_F;
3466 }
3467
3468 pi->expected_ack_seq = req_seq;
3469 l2cap_drop_acked_frames(sk);
3470
3471 if (tx_seq == pi->expected_tx_seq)
3472 goto expected;
3473
3474 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3475 struct srej_list *first;
3476
3477 first = list_first_entry(SREJ_LIST(sk),
3478 struct srej_list, list);
3479 if (tx_seq == first->tx_seq) {
3480 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3481 l2cap_check_srej_gap(sk, tx_seq);
3482
3483 list_del(&first->list);
3484 kfree(first);
3485
3486 if (list_empty(SREJ_LIST(sk))) {
3487 pi->buffer_seq = pi->buffer_seq_srej;
3488 pi->conn_state &= ~L2CAP_CONN_SREJ_SENT;
3489 }
3490 } else {
3491 struct srej_list *l;
3492 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3493
3494 list_for_each_entry(l, SREJ_LIST(sk), list) {
3495 if (l->tx_seq == tx_seq) {
3496 l2cap_resend_srejframe(sk, tx_seq);
3497 return 0;
3498 }
3499 }
3500 l2cap_send_srejframe(sk, tx_seq);
3501 }
3502 } else {
3503 pi->conn_state |= L2CAP_CONN_SREJ_SENT;
3504
3505 INIT_LIST_HEAD(SREJ_LIST(sk));
3506 pi->buffer_seq_srej = pi->buffer_seq;
3507
3508 __skb_queue_head_init(SREJ_QUEUE(sk));
3509 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3510
3511 pi->conn_state |= L2CAP_CONN_SEND_PBIT;
3512
3513 l2cap_send_srejframe(sk, tx_seq);
3514 }
3515 return 0;
3516
3517 expected:
3518 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3519
3520 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3521 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3522 return 0;
3523 }
3524
3525 if (rx_control & L2CAP_CTRL_FINAL) {
3526 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
3527 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
3528 else {
3529 sk->sk_send_head = TX_QUEUE(sk)->next;
3530 pi->next_tx_seq = pi->expected_ack_seq;
3531 l2cap_ertm_send(sk);
3532 }
3533 }
3534
3535 pi->buffer_seq = (pi->buffer_seq + 1) % 64;
3536
3537 err = l2cap_sar_reassembly_sdu(sk, skb, rx_control);
3538 if (err < 0)
3539 return err;
3540
3541 __mod_ack_timer();
3542
3543 pi->num_to_ack = (pi->num_to_ack + 1) % L2CAP_DEFAULT_NUM_TO_ACK;
3544 if (pi->num_to_ack == L2CAP_DEFAULT_NUM_TO_ACK - 1)
3545 l2cap_send_ack(pi);
3546
3547 return 0;
3548 }
3549
3550 static inline void l2cap_data_channel_rrframe(struct sock *sk, u16 rx_control)
3551 {
3552 struct l2cap_pinfo *pi = l2cap_pi(sk);
3553
3554 pi->expected_ack_seq = __get_reqseq(rx_control);
3555 l2cap_drop_acked_frames(sk);
3556
3557 if (rx_control & L2CAP_CTRL_POLL) {
3558 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3559 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
3560 (pi->unacked_frames > 0))
3561 __mod_retrans_timer();
3562
3563 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3564 l2cap_send_srejtail(sk);
3565 } else {
3566 l2cap_send_i_or_rr_or_rnr(sk);
3567 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3568 }
3569
3570 } else if (rx_control & L2CAP_CTRL_FINAL) {
3571 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3572
3573 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
3574 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
3575 else {
3576 sk->sk_send_head = TX_QUEUE(sk)->next;
3577 pi->next_tx_seq = pi->expected_ack_seq;
3578 l2cap_ertm_send(sk);
3579 }
3580
3581 } else {
3582 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
3583 (pi->unacked_frames > 0))
3584 __mod_retrans_timer();
3585
3586 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3587 if (pi->conn_state & L2CAP_CONN_SREJ_SENT)
3588 l2cap_send_ack(pi);
3589 else
3590 l2cap_ertm_send(sk);
3591 }
3592 }
3593
3594 static inline void l2cap_data_channel_rejframe(struct sock *sk, u16 rx_control)
3595 {
3596 struct l2cap_pinfo *pi = l2cap_pi(sk);
3597 u8 tx_seq = __get_reqseq(rx_control);
3598
3599 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3600
3601 pi->expected_ack_seq = tx_seq;
3602 l2cap_drop_acked_frames(sk);
3603
3604 if (rx_control & L2CAP_CTRL_FINAL) {
3605 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
3606 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
3607 else {
3608 sk->sk_send_head = TX_QUEUE(sk)->next;
3609 pi->next_tx_seq = pi->expected_ack_seq;
3610 l2cap_ertm_send(sk);
3611 }
3612 } else {
3613 sk->sk_send_head = TX_QUEUE(sk)->next;
3614 pi->next_tx_seq = pi->expected_ack_seq;
3615 l2cap_ertm_send(sk);
3616
3617 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
3618 pi->srej_save_reqseq = tx_seq;
3619 pi->conn_state |= L2CAP_CONN_REJ_ACT;
3620 }
3621 }
3622 }
3623 static inline void l2cap_data_channel_srejframe(struct sock *sk, u16 rx_control)
3624 {
3625 struct l2cap_pinfo *pi = l2cap_pi(sk);
3626 u8 tx_seq = __get_reqseq(rx_control);
3627
3628 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3629
3630 if (rx_control & L2CAP_CTRL_POLL) {
3631 pi->expected_ack_seq = tx_seq;
3632 l2cap_drop_acked_frames(sk);
3633 l2cap_retransmit_frame(sk, tx_seq);
3634 l2cap_ertm_send(sk);
3635 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
3636 pi->srej_save_reqseq = tx_seq;
3637 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
3638 }
3639 } else if (rx_control & L2CAP_CTRL_FINAL) {
3640 if ((pi->conn_state & L2CAP_CONN_SREJ_ACT) &&
3641 pi->srej_save_reqseq == tx_seq)
3642 pi->conn_state &= ~L2CAP_CONN_SREJ_ACT;
3643 else
3644 l2cap_retransmit_frame(sk, tx_seq);
3645 } else {
3646 l2cap_retransmit_frame(sk, tx_seq);
3647 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
3648 pi->srej_save_reqseq = tx_seq;
3649 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
3650 }
3651 }
3652 }
3653
3654 static inline void l2cap_data_channel_rnrframe(struct sock *sk, u16 rx_control)
3655 {
3656 struct l2cap_pinfo *pi = l2cap_pi(sk);
3657 u8 tx_seq = __get_reqseq(rx_control);
3658
3659 pi->conn_state |= L2CAP_CONN_REMOTE_BUSY;
3660 pi->expected_ack_seq = tx_seq;
3661 l2cap_drop_acked_frames(sk);
3662
3663 if (!(pi->conn_state & L2CAP_CONN_SREJ_SENT)) {
3664 del_timer(&pi->retrans_timer);
3665 if (rx_control & L2CAP_CTRL_POLL) {
3666 u16 control = L2CAP_CTRL_FINAL;
3667 l2cap_send_rr_or_rnr(pi, control);
3668 }
3669 return;
3670 }
3671
3672 if (rx_control & L2CAP_CTRL_POLL)
3673 l2cap_send_srejtail(sk);
3674 else
3675 l2cap_send_sframe(pi, L2CAP_SUPER_RCV_READY);
3676 }
3677
3678 static inline int l2cap_data_channel_sframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
3679 {
3680 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len);
3681
3682 if (L2CAP_CTRL_FINAL & rx_control) {
3683 del_timer(&l2cap_pi(sk)->monitor_timer);
3684 if (l2cap_pi(sk)->unacked_frames > 0)
3685 __mod_retrans_timer();
3686 l2cap_pi(sk)->conn_state &= ~L2CAP_CONN_WAIT_F;
3687 }
3688
3689 switch (rx_control & L2CAP_CTRL_SUPERVISE) {
3690 case L2CAP_SUPER_RCV_READY:
3691 l2cap_data_channel_rrframe(sk, rx_control);
3692 break;
3693
3694 case L2CAP_SUPER_REJECT:
3695 l2cap_data_channel_rejframe(sk, rx_control);
3696 break;
3697
3698 case L2CAP_SUPER_SELECT_REJECT:
3699 l2cap_data_channel_srejframe(sk, rx_control);
3700 break;
3701
3702 case L2CAP_SUPER_RCV_NOT_READY:
3703 l2cap_data_channel_rnrframe(sk, rx_control);
3704 break;
3705 }
3706
3707 kfree_skb(skb);
3708 return 0;
3709 }
3710
3711 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
3712 {
3713 struct sock *sk;
3714 struct l2cap_pinfo *pi;
3715 u16 control, len;
3716 u8 tx_seq;
3717
3718 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
3719 if (!sk) {
3720 BT_DBG("unknown cid 0x%4.4x", cid);
3721 goto drop;
3722 }
3723
3724 pi = l2cap_pi(sk);
3725
3726 BT_DBG("sk %p, len %d", sk, skb->len);
3727
3728 if (sk->sk_state != BT_CONNECTED)
3729 goto drop;
3730
3731 switch (pi->mode) {
3732 case L2CAP_MODE_BASIC:
3733 /* If socket recv buffers overflows we drop data here
3734 * which is *bad* because L2CAP has to be reliable.
3735 * But we don't have any other choice. L2CAP doesn't
3736 * provide flow control mechanism. */
3737
3738 if (pi->imtu < skb->len)
3739 goto drop;
3740
3741 if (!sock_queue_rcv_skb(sk, skb))
3742 goto done;
3743 break;
3744
3745 case L2CAP_MODE_ERTM:
3746 control = get_unaligned_le16(skb->data);
3747 skb_pull(skb, 2);
3748 len = skb->len;
3749
3750 if (__is_sar_start(control))
3751 len -= 2;
3752
3753 if (pi->fcs == L2CAP_FCS_CRC16)
3754 len -= 2;
3755
3756 /*
3757 * We can just drop the corrupted I-frame here.
3758 * Receiver will miss it and start proper recovery
3759 * procedures and ask retransmission.
3760 */
3761 if (len > pi->mps)
3762 goto drop;
3763
3764 if (l2cap_check_fcs(pi, skb))
3765 goto drop;
3766
3767 if (__is_iframe(control)) {
3768 if (len < 4)
3769 goto drop;
3770
3771 l2cap_data_channel_iframe(sk, control, skb);
3772 } else {
3773 if (len != 0)
3774 goto drop;
3775
3776 l2cap_data_channel_sframe(sk, control, skb);
3777 }
3778
3779 goto done;
3780
3781 case L2CAP_MODE_STREAMING:
3782 control = get_unaligned_le16(skb->data);
3783 skb_pull(skb, 2);
3784 len = skb->len;
3785
3786 if (__is_sar_start(control))
3787 len -= 2;
3788
3789 if (pi->fcs == L2CAP_FCS_CRC16)
3790 len -= 2;
3791
3792 if (len > pi->mps || len < 4 || __is_sframe(control))
3793 goto drop;
3794
3795 if (l2cap_check_fcs(pi, skb))
3796 goto drop;
3797
3798 tx_seq = __get_txseq(control);
3799
3800 if (pi->expected_tx_seq == tx_seq)
3801 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3802 else
3803 pi->expected_tx_seq = (tx_seq + 1) % 64;
3804
3805 l2cap_sar_reassembly_sdu(sk, skb, control);
3806
3807 goto done;
3808
3809 default:
3810 BT_DBG("sk %p: bad mode 0x%2.2x", sk, pi->mode);
3811 break;
3812 }
3813
3814 drop:
3815 kfree_skb(skb);
3816
3817 done:
3818 if (sk)
3819 bh_unlock_sock(sk);
3820
3821 return 0;
3822 }
3823
3824 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
3825 {
3826 struct sock *sk;
3827
3828 sk = l2cap_get_sock_by_psm(0, psm, conn->src);
3829 if (!sk)
3830 goto drop;
3831
3832 BT_DBG("sk %p, len %d", sk, skb->len);
3833
3834 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
3835 goto drop;
3836
3837 if (l2cap_pi(sk)->imtu < skb->len)
3838 goto drop;
3839
3840 if (!sock_queue_rcv_skb(sk, skb))
3841 goto done;
3842
3843 drop:
3844 kfree_skb(skb);
3845
3846 done:
3847 if (sk)
3848 bh_unlock_sock(sk);
3849 return 0;
3850 }
3851
3852 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
3853 {
3854 struct l2cap_hdr *lh = (void *) skb->data;
3855 u16 cid, len;
3856 __le16 psm;
3857
3858 skb_pull(skb, L2CAP_HDR_SIZE);
3859 cid = __le16_to_cpu(lh->cid);
3860 len = __le16_to_cpu(lh->len);
3861
3862 if (len != skb->len) {
3863 kfree_skb(skb);
3864 return;
3865 }
3866
3867 BT_DBG("len %d, cid 0x%4.4x", len, cid);
3868
3869 switch (cid) {
3870 case L2CAP_CID_SIGNALING:
3871 l2cap_sig_channel(conn, skb);
3872 break;
3873
3874 case L2CAP_CID_CONN_LESS:
3875 psm = get_unaligned_le16(skb->data);
3876 skb_pull(skb, 2);
3877 l2cap_conless_channel(conn, psm, skb);
3878 break;
3879
3880 default:
3881 l2cap_data_channel(conn, cid, skb);
3882 break;
3883 }
3884 }
3885
3886 /* ---- L2CAP interface with lower layer (HCI) ---- */
3887
3888 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3889 {
3890 int exact = 0, lm1 = 0, lm2 = 0;
3891 register struct sock *sk;
3892 struct hlist_node *node;
3893
3894 if (type != ACL_LINK)
3895 return 0;
3896
3897 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
3898
3899 /* Find listening sockets and check their link_mode */
3900 read_lock(&l2cap_sk_list.lock);
3901 sk_for_each(sk, node, &l2cap_sk_list.head) {
3902 if (sk->sk_state != BT_LISTEN)
3903 continue;
3904
3905 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
3906 lm1 |= HCI_LM_ACCEPT;
3907 if (l2cap_pi(sk)->role_switch)
3908 lm1 |= HCI_LM_MASTER;
3909 exact++;
3910 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
3911 lm2 |= HCI_LM_ACCEPT;
3912 if (l2cap_pi(sk)->role_switch)
3913 lm2 |= HCI_LM_MASTER;
3914 }
3915 }
3916 read_unlock(&l2cap_sk_list.lock);
3917
3918 return exact ? lm1 : lm2;
3919 }
3920
3921 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
3922 {
3923 struct l2cap_conn *conn;
3924
3925 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
3926
3927 if (hcon->type != ACL_LINK)
3928 return 0;
3929
3930 if (!status) {
3931 conn = l2cap_conn_add(hcon, status);
3932 if (conn)
3933 l2cap_conn_ready(conn);
3934 } else
3935 l2cap_conn_del(hcon, bt_err(status));
3936
3937 return 0;
3938 }
3939
3940 static int l2cap_disconn_ind(struct hci_conn *hcon)
3941 {
3942 struct l2cap_conn *conn = hcon->l2cap_data;
3943
3944 BT_DBG("hcon %p", hcon);
3945
3946 if (hcon->type != ACL_LINK || !conn)
3947 return 0x13;
3948
3949 return conn->disc_reason;
3950 }
3951
3952 static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
3953 {
3954 BT_DBG("hcon %p reason %d", hcon, reason);
3955
3956 if (hcon->type != ACL_LINK)
3957 return 0;
3958
3959 l2cap_conn_del(hcon, bt_err(reason));
3960
3961 return 0;
3962 }
3963
3964 static inline void l2cap_check_encryption(struct sock *sk, u8 encrypt)
3965 {
3966 if (sk->sk_type != SOCK_SEQPACKET)
3967 return;
3968
3969 if (encrypt == 0x00) {
3970 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM) {
3971 l2cap_sock_clear_timer(sk);
3972 l2cap_sock_set_timer(sk, HZ * 5);
3973 } else if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
3974 __l2cap_sock_close(sk, ECONNREFUSED);
3975 } else {
3976 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM)
3977 l2cap_sock_clear_timer(sk);
3978 }
3979 }
3980
3981 static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
3982 {
3983 struct l2cap_chan_list *l;
3984 struct l2cap_conn *conn = hcon->l2cap_data;
3985 struct sock *sk;
3986
3987 if (!conn)
3988 return 0;
3989
3990 l = &conn->chan_list;
3991
3992 BT_DBG("conn %p", conn);
3993
3994 read_lock(&l->lock);
3995
3996 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
3997 bh_lock_sock(sk);
3998
3999 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND) {
4000 bh_unlock_sock(sk);
4001 continue;
4002 }
4003
4004 if (!status && (sk->sk_state == BT_CONNECTED ||
4005 sk->sk_state == BT_CONFIG)) {
4006 l2cap_check_encryption(sk, encrypt);
4007 bh_unlock_sock(sk);
4008 continue;
4009 }
4010
4011 if (sk->sk_state == BT_CONNECT) {
4012 if (!status) {
4013 struct l2cap_conn_req req;
4014 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
4015 req.psm = l2cap_pi(sk)->psm;
4016
4017 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
4018
4019 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
4020 L2CAP_CONN_REQ, sizeof(req), &req);
4021 } else {
4022 l2cap_sock_clear_timer(sk);
4023 l2cap_sock_set_timer(sk, HZ / 10);
4024 }
4025 } else if (sk->sk_state == BT_CONNECT2) {
4026 struct l2cap_conn_rsp rsp;
4027 __u16 result;
4028
4029 if (!status) {
4030 sk->sk_state = BT_CONFIG;
4031 result = L2CAP_CR_SUCCESS;
4032 } else {
4033 sk->sk_state = BT_DISCONN;
4034 l2cap_sock_set_timer(sk, HZ / 10);
4035 result = L2CAP_CR_SEC_BLOCK;
4036 }
4037
4038 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
4039 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
4040 rsp.result = cpu_to_le16(result);
4041 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4042 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
4043 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
4044 }
4045
4046 bh_unlock_sock(sk);
4047 }
4048
4049 read_unlock(&l->lock);
4050
4051 return 0;
4052 }
4053
4054 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
4055 {
4056 struct l2cap_conn *conn = hcon->l2cap_data;
4057
4058 if (!conn && !(conn = l2cap_conn_add(hcon, 0)))
4059 goto drop;
4060
4061 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
4062
4063 if (flags & ACL_START) {
4064 struct l2cap_hdr *hdr;
4065 int len;
4066
4067 if (conn->rx_len) {
4068 BT_ERR("Unexpected start frame (len %d)", skb->len);
4069 kfree_skb(conn->rx_skb);
4070 conn->rx_skb = NULL;
4071 conn->rx_len = 0;
4072 l2cap_conn_unreliable(conn, ECOMM);
4073 }
4074
4075 if (skb->len < 2) {
4076 BT_ERR("Frame is too short (len %d)", skb->len);
4077 l2cap_conn_unreliable(conn, ECOMM);
4078 goto drop;
4079 }
4080
4081 hdr = (struct l2cap_hdr *) skb->data;
4082 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
4083
4084 if (len == skb->len) {
4085 /* Complete frame received */
4086 l2cap_recv_frame(conn, skb);
4087 return 0;
4088 }
4089
4090 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
4091
4092 if (skb->len > len) {
4093 BT_ERR("Frame is too long (len %d, expected len %d)",
4094 skb->len, len);
4095 l2cap_conn_unreliable(conn, ECOMM);
4096 goto drop;
4097 }
4098
4099 /* Allocate skb for the complete frame (with header) */
4100 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
4101 if (!conn->rx_skb)
4102 goto drop;
4103
4104 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4105 skb->len);
4106 conn->rx_len = len - skb->len;
4107 } else {
4108 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
4109
4110 if (!conn->rx_len) {
4111 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
4112 l2cap_conn_unreliable(conn, ECOMM);
4113 goto drop;
4114 }
4115
4116 if (skb->len > conn->rx_len) {
4117 BT_ERR("Fragment is too long (len %d, expected %d)",
4118 skb->len, conn->rx_len);
4119 kfree_skb(conn->rx_skb);
4120 conn->rx_skb = NULL;
4121 conn->rx_len = 0;
4122 l2cap_conn_unreliable(conn, ECOMM);
4123 goto drop;
4124 }
4125
4126 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4127 skb->len);
4128 conn->rx_len -= skb->len;
4129
4130 if (!conn->rx_len) {
4131 /* Complete frame received */
4132 l2cap_recv_frame(conn, conn->rx_skb);
4133 conn->rx_skb = NULL;
4134 }
4135 }
4136
4137 drop:
4138 kfree_skb(skb);
4139 return 0;
4140 }
4141
4142 static int l2cap_debugfs_show(struct seq_file *f, void *p)
4143 {
4144 struct sock *sk;
4145 struct hlist_node *node;
4146
4147 read_lock_bh(&l2cap_sk_list.lock);
4148
4149 sk_for_each(sk, node, &l2cap_sk_list.head) {
4150 struct l2cap_pinfo *pi = l2cap_pi(sk);
4151
4152 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d\n",
4153 batostr(&bt_sk(sk)->src),
4154 batostr(&bt_sk(sk)->dst),
4155 sk->sk_state, __le16_to_cpu(pi->psm),
4156 pi->scid, pi->dcid,
4157 pi->imtu, pi->omtu, pi->sec_level);
4158 }
4159
4160 read_unlock_bh(&l2cap_sk_list.lock);
4161
4162 return 0;
4163 }
4164
4165 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
4166 {
4167 return single_open(file, l2cap_debugfs_show, inode->i_private);
4168 }
4169
4170 static const struct file_operations l2cap_debugfs_fops = {
4171 .open = l2cap_debugfs_open,
4172 .read = seq_read,
4173 .llseek = seq_lseek,
4174 .release = single_release,
4175 };
4176
4177 static struct dentry *l2cap_debugfs;
4178
4179 static const struct proto_ops l2cap_sock_ops = {
4180 .family = PF_BLUETOOTH,
4181 .owner = THIS_MODULE,
4182 .release = l2cap_sock_release,
4183 .bind = l2cap_sock_bind,
4184 .connect = l2cap_sock_connect,
4185 .listen = l2cap_sock_listen,
4186 .accept = l2cap_sock_accept,
4187 .getname = l2cap_sock_getname,
4188 .sendmsg = l2cap_sock_sendmsg,
4189 .recvmsg = l2cap_sock_recvmsg,
4190 .poll = bt_sock_poll,
4191 .ioctl = bt_sock_ioctl,
4192 .mmap = sock_no_mmap,
4193 .socketpair = sock_no_socketpair,
4194 .shutdown = l2cap_sock_shutdown,
4195 .setsockopt = l2cap_sock_setsockopt,
4196 .getsockopt = l2cap_sock_getsockopt
4197 };
4198
4199 static const struct net_proto_family l2cap_sock_family_ops = {
4200 .family = PF_BLUETOOTH,
4201 .owner = THIS_MODULE,
4202 .create = l2cap_sock_create,
4203 };
4204
4205 static struct hci_proto l2cap_hci_proto = {
4206 .name = "L2CAP",
4207 .id = HCI_PROTO_L2CAP,
4208 .connect_ind = l2cap_connect_ind,
4209 .connect_cfm = l2cap_connect_cfm,
4210 .disconn_ind = l2cap_disconn_ind,
4211 .disconn_cfm = l2cap_disconn_cfm,
4212 .security_cfm = l2cap_security_cfm,
4213 .recv_acldata = l2cap_recv_acldata
4214 };
4215
4216 static int __init l2cap_init(void)
4217 {
4218 int err;
4219
4220 err = proto_register(&l2cap_proto, 0);
4221 if (err < 0)
4222 return err;
4223
4224 err = bt_sock_register(BTPROTO_L2CAP, &l2cap_sock_family_ops);
4225 if (err < 0) {
4226 BT_ERR("L2CAP socket registration failed");
4227 goto error;
4228 }
4229
4230 err = hci_register_proto(&l2cap_hci_proto);
4231 if (err < 0) {
4232 BT_ERR("L2CAP protocol registration failed");
4233 bt_sock_unregister(BTPROTO_L2CAP);
4234 goto error;
4235 }
4236
4237 if (bt_debugfs) {
4238 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
4239 bt_debugfs, NULL, &l2cap_debugfs_fops);
4240 if (!l2cap_debugfs)
4241 BT_ERR("Failed to create L2CAP debug file");
4242 }
4243
4244 BT_INFO("L2CAP ver %s", VERSION);
4245 BT_INFO("L2CAP socket layer initialized");
4246
4247 return 0;
4248
4249 error:
4250 proto_unregister(&l2cap_proto);
4251 return err;
4252 }
4253
4254 static void __exit l2cap_exit(void)
4255 {
4256 debugfs_remove(l2cap_debugfs);
4257
4258 if (bt_sock_unregister(BTPROTO_L2CAP) < 0)
4259 BT_ERR("L2CAP socket unregistration failed");
4260
4261 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
4262 BT_ERR("L2CAP protocol unregistration failed");
4263
4264 proto_unregister(&l2cap_proto);
4265 }
4266
4267 void l2cap_load(void)
4268 {
4269 /* Dummy function to trigger automatic L2CAP module loading by
4270 * other modules that use L2CAP sockets but don't use any other
4271 * symbols from it. */
4272 return;
4273 }
4274 EXPORT_SYMBOL(l2cap_load);
4275
4276 module_init(l2cap_init);
4277 module_exit(l2cap_exit);
4278
4279 module_param(enable_ertm, bool, 0644);
4280 MODULE_PARM_DESC(enable_ertm, "Enable enhanced retransmission mode");
4281
4282 module_param(max_transmit, uint, 0644);
4283 MODULE_PARM_DESC(max_transmit, "Max transmit value (default = 3)");
4284
4285 MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
4286 MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION);
4287 MODULE_VERSION(VERSION);
4288 MODULE_LICENSE("GPL");
4289 MODULE_ALIAS("bt-proto-0");
This page took 0.122097 seconds and 5 git commands to generate.