Bluetooth: Add missing msecs to jiffies conversion
[deliverable/linux.git] / net / bluetooth / mgmt.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI Management interface */
26
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
29
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/mgmt.h>
33
34 #include "smp.h"
35
36 #define MGMT_VERSION 1
37 #define MGMT_REVISION 6
38
39 static const u16 mgmt_commands[] = {
40 MGMT_OP_READ_INDEX_LIST,
41 MGMT_OP_READ_INFO,
42 MGMT_OP_SET_POWERED,
43 MGMT_OP_SET_DISCOVERABLE,
44 MGMT_OP_SET_CONNECTABLE,
45 MGMT_OP_SET_FAST_CONNECTABLE,
46 MGMT_OP_SET_PAIRABLE,
47 MGMT_OP_SET_LINK_SECURITY,
48 MGMT_OP_SET_SSP,
49 MGMT_OP_SET_HS,
50 MGMT_OP_SET_LE,
51 MGMT_OP_SET_DEV_CLASS,
52 MGMT_OP_SET_LOCAL_NAME,
53 MGMT_OP_ADD_UUID,
54 MGMT_OP_REMOVE_UUID,
55 MGMT_OP_LOAD_LINK_KEYS,
56 MGMT_OP_LOAD_LONG_TERM_KEYS,
57 MGMT_OP_DISCONNECT,
58 MGMT_OP_GET_CONNECTIONS,
59 MGMT_OP_PIN_CODE_REPLY,
60 MGMT_OP_PIN_CODE_NEG_REPLY,
61 MGMT_OP_SET_IO_CAPABILITY,
62 MGMT_OP_PAIR_DEVICE,
63 MGMT_OP_CANCEL_PAIR_DEVICE,
64 MGMT_OP_UNPAIR_DEVICE,
65 MGMT_OP_USER_CONFIRM_REPLY,
66 MGMT_OP_USER_CONFIRM_NEG_REPLY,
67 MGMT_OP_USER_PASSKEY_REPLY,
68 MGMT_OP_USER_PASSKEY_NEG_REPLY,
69 MGMT_OP_READ_LOCAL_OOB_DATA,
70 MGMT_OP_ADD_REMOTE_OOB_DATA,
71 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
72 MGMT_OP_START_DISCOVERY,
73 MGMT_OP_STOP_DISCOVERY,
74 MGMT_OP_CONFIRM_NAME,
75 MGMT_OP_BLOCK_DEVICE,
76 MGMT_OP_UNBLOCK_DEVICE,
77 MGMT_OP_SET_DEVICE_ID,
78 MGMT_OP_SET_ADVERTISING,
79 MGMT_OP_SET_BREDR,
80 MGMT_OP_SET_STATIC_ADDRESS,
81 MGMT_OP_SET_SCAN_PARAMS,
82 MGMT_OP_SET_SECURE_CONN,
83 MGMT_OP_SET_DEBUG_KEYS,
84 MGMT_OP_SET_PRIVACY,
85 MGMT_OP_LOAD_IRKS,
86 MGMT_OP_GET_CONN_INFO,
87 };
88
89 static const u16 mgmt_events[] = {
90 MGMT_EV_CONTROLLER_ERROR,
91 MGMT_EV_INDEX_ADDED,
92 MGMT_EV_INDEX_REMOVED,
93 MGMT_EV_NEW_SETTINGS,
94 MGMT_EV_CLASS_OF_DEV_CHANGED,
95 MGMT_EV_LOCAL_NAME_CHANGED,
96 MGMT_EV_NEW_LINK_KEY,
97 MGMT_EV_NEW_LONG_TERM_KEY,
98 MGMT_EV_DEVICE_CONNECTED,
99 MGMT_EV_DEVICE_DISCONNECTED,
100 MGMT_EV_CONNECT_FAILED,
101 MGMT_EV_PIN_CODE_REQUEST,
102 MGMT_EV_USER_CONFIRM_REQUEST,
103 MGMT_EV_USER_PASSKEY_REQUEST,
104 MGMT_EV_AUTH_FAILED,
105 MGMT_EV_DEVICE_FOUND,
106 MGMT_EV_DISCOVERING,
107 MGMT_EV_DEVICE_BLOCKED,
108 MGMT_EV_DEVICE_UNBLOCKED,
109 MGMT_EV_DEVICE_UNPAIRED,
110 MGMT_EV_PASSKEY_NOTIFY,
111 MGMT_EV_NEW_IRK,
112 MGMT_EV_NEW_CSRK,
113 };
114
115 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
116
117 #define hdev_is_powered(hdev) (test_bit(HCI_UP, &hdev->flags) && \
118 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
119
120 struct pending_cmd {
121 struct list_head list;
122 u16 opcode;
123 int index;
124 void *param;
125 struct sock *sk;
126 void *user_data;
127 };
128
129 /* HCI to MGMT error code conversion table */
130 static u8 mgmt_status_table[] = {
131 MGMT_STATUS_SUCCESS,
132 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
133 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
134 MGMT_STATUS_FAILED, /* Hardware Failure */
135 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
136 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
137 MGMT_STATUS_AUTH_FAILED, /* PIN or Key Missing */
138 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
139 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
140 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
141 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
142 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
143 MGMT_STATUS_BUSY, /* Command Disallowed */
144 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
145 MGMT_STATUS_REJECTED, /* Rejected Security */
146 MGMT_STATUS_REJECTED, /* Rejected Personal */
147 MGMT_STATUS_TIMEOUT, /* Host Timeout */
148 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
149 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
150 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
151 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
152 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
153 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
154 MGMT_STATUS_BUSY, /* Repeated Attempts */
155 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
156 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
157 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
158 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
159 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
160 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
161 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
162 MGMT_STATUS_FAILED, /* Unspecified Error */
163 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
164 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
165 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
166 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
167 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
168 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
169 MGMT_STATUS_FAILED, /* Unit Link Key Used */
170 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
171 MGMT_STATUS_TIMEOUT, /* Instant Passed */
172 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
173 MGMT_STATUS_FAILED, /* Transaction Collision */
174 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
175 MGMT_STATUS_REJECTED, /* QoS Rejected */
176 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
177 MGMT_STATUS_REJECTED, /* Insufficient Security */
178 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
179 MGMT_STATUS_BUSY, /* Role Switch Pending */
180 MGMT_STATUS_FAILED, /* Slot Violation */
181 MGMT_STATUS_FAILED, /* Role Switch Failed */
182 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
183 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
184 MGMT_STATUS_BUSY, /* Host Busy Pairing */
185 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
186 MGMT_STATUS_BUSY, /* Controller Busy */
187 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
188 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
189 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
190 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
191 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
192 };
193
194 static u8 mgmt_status(u8 hci_status)
195 {
196 if (hci_status < ARRAY_SIZE(mgmt_status_table))
197 return mgmt_status_table[hci_status];
198
199 return MGMT_STATUS_FAILED;
200 }
201
202 static int cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status)
203 {
204 struct sk_buff *skb;
205 struct mgmt_hdr *hdr;
206 struct mgmt_ev_cmd_status *ev;
207 int err;
208
209 BT_DBG("sock %p, index %u, cmd %u, status %u", sk, index, cmd, status);
210
211 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev), GFP_KERNEL);
212 if (!skb)
213 return -ENOMEM;
214
215 hdr = (void *) skb_put(skb, sizeof(*hdr));
216
217 hdr->opcode = cpu_to_le16(MGMT_EV_CMD_STATUS);
218 hdr->index = cpu_to_le16(index);
219 hdr->len = cpu_to_le16(sizeof(*ev));
220
221 ev = (void *) skb_put(skb, sizeof(*ev));
222 ev->status = status;
223 ev->opcode = cpu_to_le16(cmd);
224
225 err = sock_queue_rcv_skb(sk, skb);
226 if (err < 0)
227 kfree_skb(skb);
228
229 return err;
230 }
231
232 static int cmd_complete(struct sock *sk, u16 index, u16 cmd, u8 status,
233 void *rp, size_t rp_len)
234 {
235 struct sk_buff *skb;
236 struct mgmt_hdr *hdr;
237 struct mgmt_ev_cmd_complete *ev;
238 int err;
239
240 BT_DBG("sock %p", sk);
241
242 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev) + rp_len, GFP_KERNEL);
243 if (!skb)
244 return -ENOMEM;
245
246 hdr = (void *) skb_put(skb, sizeof(*hdr));
247
248 hdr->opcode = cpu_to_le16(MGMT_EV_CMD_COMPLETE);
249 hdr->index = cpu_to_le16(index);
250 hdr->len = cpu_to_le16(sizeof(*ev) + rp_len);
251
252 ev = (void *) skb_put(skb, sizeof(*ev) + rp_len);
253 ev->opcode = cpu_to_le16(cmd);
254 ev->status = status;
255
256 if (rp)
257 memcpy(ev->data, rp, rp_len);
258
259 err = sock_queue_rcv_skb(sk, skb);
260 if (err < 0)
261 kfree_skb(skb);
262
263 return err;
264 }
265
266 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
267 u16 data_len)
268 {
269 struct mgmt_rp_read_version rp;
270
271 BT_DBG("sock %p", sk);
272
273 rp.version = MGMT_VERSION;
274 rp.revision = cpu_to_le16(MGMT_REVISION);
275
276 return cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0, &rp,
277 sizeof(rp));
278 }
279
280 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
281 u16 data_len)
282 {
283 struct mgmt_rp_read_commands *rp;
284 const u16 num_commands = ARRAY_SIZE(mgmt_commands);
285 const u16 num_events = ARRAY_SIZE(mgmt_events);
286 __le16 *opcode;
287 size_t rp_size;
288 int i, err;
289
290 BT_DBG("sock %p", sk);
291
292 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
293
294 rp = kmalloc(rp_size, GFP_KERNEL);
295 if (!rp)
296 return -ENOMEM;
297
298 rp->num_commands = cpu_to_le16(num_commands);
299 rp->num_events = cpu_to_le16(num_events);
300
301 for (i = 0, opcode = rp->opcodes; i < num_commands; i++, opcode++)
302 put_unaligned_le16(mgmt_commands[i], opcode);
303
304 for (i = 0; i < num_events; i++, opcode++)
305 put_unaligned_le16(mgmt_events[i], opcode);
306
307 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0, rp,
308 rp_size);
309 kfree(rp);
310
311 return err;
312 }
313
314 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
315 u16 data_len)
316 {
317 struct mgmt_rp_read_index_list *rp;
318 struct hci_dev *d;
319 size_t rp_len;
320 u16 count;
321 int err;
322
323 BT_DBG("sock %p", sk);
324
325 read_lock(&hci_dev_list_lock);
326
327 count = 0;
328 list_for_each_entry(d, &hci_dev_list, list) {
329 if (d->dev_type == HCI_BREDR)
330 count++;
331 }
332
333 rp_len = sizeof(*rp) + (2 * count);
334 rp = kmalloc(rp_len, GFP_ATOMIC);
335 if (!rp) {
336 read_unlock(&hci_dev_list_lock);
337 return -ENOMEM;
338 }
339
340 count = 0;
341 list_for_each_entry(d, &hci_dev_list, list) {
342 if (test_bit(HCI_SETUP, &d->dev_flags))
343 continue;
344
345 if (test_bit(HCI_USER_CHANNEL, &d->dev_flags))
346 continue;
347
348 if (d->dev_type == HCI_BREDR) {
349 rp->index[count++] = cpu_to_le16(d->id);
350 BT_DBG("Added hci%u", d->id);
351 }
352 }
353
354 rp->num_controllers = cpu_to_le16(count);
355 rp_len = sizeof(*rp) + (2 * count);
356
357 read_unlock(&hci_dev_list_lock);
358
359 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST, 0, rp,
360 rp_len);
361
362 kfree(rp);
363
364 return err;
365 }
366
367 static u32 get_supported_settings(struct hci_dev *hdev)
368 {
369 u32 settings = 0;
370
371 settings |= MGMT_SETTING_POWERED;
372 settings |= MGMT_SETTING_PAIRABLE;
373 settings |= MGMT_SETTING_DEBUG_KEYS;
374
375 if (lmp_bredr_capable(hdev)) {
376 settings |= MGMT_SETTING_CONNECTABLE;
377 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
378 settings |= MGMT_SETTING_FAST_CONNECTABLE;
379 settings |= MGMT_SETTING_DISCOVERABLE;
380 settings |= MGMT_SETTING_BREDR;
381 settings |= MGMT_SETTING_LINK_SECURITY;
382
383 if (lmp_ssp_capable(hdev)) {
384 settings |= MGMT_SETTING_SSP;
385 settings |= MGMT_SETTING_HS;
386 }
387
388 if (lmp_sc_capable(hdev) ||
389 test_bit(HCI_FORCE_SC, &hdev->dev_flags))
390 settings |= MGMT_SETTING_SECURE_CONN;
391 }
392
393 if (lmp_le_capable(hdev)) {
394 settings |= MGMT_SETTING_LE;
395 settings |= MGMT_SETTING_ADVERTISING;
396 settings |= MGMT_SETTING_PRIVACY;
397 }
398
399 return settings;
400 }
401
402 static u32 get_current_settings(struct hci_dev *hdev)
403 {
404 u32 settings = 0;
405
406 if (hdev_is_powered(hdev))
407 settings |= MGMT_SETTING_POWERED;
408
409 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
410 settings |= MGMT_SETTING_CONNECTABLE;
411
412 if (test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
413 settings |= MGMT_SETTING_FAST_CONNECTABLE;
414
415 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
416 settings |= MGMT_SETTING_DISCOVERABLE;
417
418 if (test_bit(HCI_PAIRABLE, &hdev->dev_flags))
419 settings |= MGMT_SETTING_PAIRABLE;
420
421 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
422 settings |= MGMT_SETTING_BREDR;
423
424 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
425 settings |= MGMT_SETTING_LE;
426
427 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
428 settings |= MGMT_SETTING_LINK_SECURITY;
429
430 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
431 settings |= MGMT_SETTING_SSP;
432
433 if (test_bit(HCI_HS_ENABLED, &hdev->dev_flags))
434 settings |= MGMT_SETTING_HS;
435
436 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
437 settings |= MGMT_SETTING_ADVERTISING;
438
439 if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags))
440 settings |= MGMT_SETTING_SECURE_CONN;
441
442 if (test_bit(HCI_DEBUG_KEYS, &hdev->dev_flags))
443 settings |= MGMT_SETTING_DEBUG_KEYS;
444
445 if (test_bit(HCI_PRIVACY, &hdev->dev_flags))
446 settings |= MGMT_SETTING_PRIVACY;
447
448 return settings;
449 }
450
451 #define PNP_INFO_SVCLASS_ID 0x1200
452
453 static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
454 {
455 u8 *ptr = data, *uuids_start = NULL;
456 struct bt_uuid *uuid;
457
458 if (len < 4)
459 return ptr;
460
461 list_for_each_entry(uuid, &hdev->uuids, list) {
462 u16 uuid16;
463
464 if (uuid->size != 16)
465 continue;
466
467 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
468 if (uuid16 < 0x1100)
469 continue;
470
471 if (uuid16 == PNP_INFO_SVCLASS_ID)
472 continue;
473
474 if (!uuids_start) {
475 uuids_start = ptr;
476 uuids_start[0] = 1;
477 uuids_start[1] = EIR_UUID16_ALL;
478 ptr += 2;
479 }
480
481 /* Stop if not enough space to put next UUID */
482 if ((ptr - data) + sizeof(u16) > len) {
483 uuids_start[1] = EIR_UUID16_SOME;
484 break;
485 }
486
487 *ptr++ = (uuid16 & 0x00ff);
488 *ptr++ = (uuid16 & 0xff00) >> 8;
489 uuids_start[0] += sizeof(uuid16);
490 }
491
492 return ptr;
493 }
494
495 static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
496 {
497 u8 *ptr = data, *uuids_start = NULL;
498 struct bt_uuid *uuid;
499
500 if (len < 6)
501 return ptr;
502
503 list_for_each_entry(uuid, &hdev->uuids, list) {
504 if (uuid->size != 32)
505 continue;
506
507 if (!uuids_start) {
508 uuids_start = ptr;
509 uuids_start[0] = 1;
510 uuids_start[1] = EIR_UUID32_ALL;
511 ptr += 2;
512 }
513
514 /* Stop if not enough space to put next UUID */
515 if ((ptr - data) + sizeof(u32) > len) {
516 uuids_start[1] = EIR_UUID32_SOME;
517 break;
518 }
519
520 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
521 ptr += sizeof(u32);
522 uuids_start[0] += sizeof(u32);
523 }
524
525 return ptr;
526 }
527
528 static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
529 {
530 u8 *ptr = data, *uuids_start = NULL;
531 struct bt_uuid *uuid;
532
533 if (len < 18)
534 return ptr;
535
536 list_for_each_entry(uuid, &hdev->uuids, list) {
537 if (uuid->size != 128)
538 continue;
539
540 if (!uuids_start) {
541 uuids_start = ptr;
542 uuids_start[0] = 1;
543 uuids_start[1] = EIR_UUID128_ALL;
544 ptr += 2;
545 }
546
547 /* Stop if not enough space to put next UUID */
548 if ((ptr - data) + 16 > len) {
549 uuids_start[1] = EIR_UUID128_SOME;
550 break;
551 }
552
553 memcpy(ptr, uuid->uuid, 16);
554 ptr += 16;
555 uuids_start[0] += 16;
556 }
557
558 return ptr;
559 }
560
561 static struct pending_cmd *mgmt_pending_find(u16 opcode, struct hci_dev *hdev)
562 {
563 struct pending_cmd *cmd;
564
565 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
566 if (cmd->opcode == opcode)
567 return cmd;
568 }
569
570 return NULL;
571 }
572
573 static u8 create_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
574 {
575 u8 ad_len = 0;
576 size_t name_len;
577
578 name_len = strlen(hdev->dev_name);
579 if (name_len > 0) {
580 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
581
582 if (name_len > max_len) {
583 name_len = max_len;
584 ptr[1] = EIR_NAME_SHORT;
585 } else
586 ptr[1] = EIR_NAME_COMPLETE;
587
588 ptr[0] = name_len + 1;
589
590 memcpy(ptr + 2, hdev->dev_name, name_len);
591
592 ad_len += (name_len + 2);
593 ptr += (name_len + 2);
594 }
595
596 return ad_len;
597 }
598
599 static void update_scan_rsp_data(struct hci_request *req)
600 {
601 struct hci_dev *hdev = req->hdev;
602 struct hci_cp_le_set_scan_rsp_data cp;
603 u8 len;
604
605 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
606 return;
607
608 memset(&cp, 0, sizeof(cp));
609
610 len = create_scan_rsp_data(hdev, cp.data);
611
612 if (hdev->scan_rsp_data_len == len &&
613 memcmp(cp.data, hdev->scan_rsp_data, len) == 0)
614 return;
615
616 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
617 hdev->scan_rsp_data_len = len;
618
619 cp.length = len;
620
621 hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
622 }
623
624 static u8 get_adv_discov_flags(struct hci_dev *hdev)
625 {
626 struct pending_cmd *cmd;
627
628 /* If there's a pending mgmt command the flags will not yet have
629 * their final values, so check for this first.
630 */
631 cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
632 if (cmd) {
633 struct mgmt_mode *cp = cmd->param;
634 if (cp->val == 0x01)
635 return LE_AD_GENERAL;
636 else if (cp->val == 0x02)
637 return LE_AD_LIMITED;
638 } else {
639 if (test_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags))
640 return LE_AD_LIMITED;
641 else if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
642 return LE_AD_GENERAL;
643 }
644
645 return 0;
646 }
647
648 static u8 create_adv_data(struct hci_dev *hdev, u8 *ptr)
649 {
650 u8 ad_len = 0, flags = 0;
651
652 flags |= get_adv_discov_flags(hdev);
653
654 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
655 flags |= LE_AD_NO_BREDR;
656
657 if (flags) {
658 BT_DBG("adv flags 0x%02x", flags);
659
660 ptr[0] = 2;
661 ptr[1] = EIR_FLAGS;
662 ptr[2] = flags;
663
664 ad_len += 3;
665 ptr += 3;
666 }
667
668 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
669 ptr[0] = 2;
670 ptr[1] = EIR_TX_POWER;
671 ptr[2] = (u8) hdev->adv_tx_power;
672
673 ad_len += 3;
674 ptr += 3;
675 }
676
677 return ad_len;
678 }
679
680 static void update_adv_data(struct hci_request *req)
681 {
682 struct hci_dev *hdev = req->hdev;
683 struct hci_cp_le_set_adv_data cp;
684 u8 len;
685
686 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
687 return;
688
689 memset(&cp, 0, sizeof(cp));
690
691 len = create_adv_data(hdev, cp.data);
692
693 if (hdev->adv_data_len == len &&
694 memcmp(cp.data, hdev->adv_data, len) == 0)
695 return;
696
697 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
698 hdev->adv_data_len = len;
699
700 cp.length = len;
701
702 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
703 }
704
705 static void create_eir(struct hci_dev *hdev, u8 *data)
706 {
707 u8 *ptr = data;
708 size_t name_len;
709
710 name_len = strlen(hdev->dev_name);
711
712 if (name_len > 0) {
713 /* EIR Data type */
714 if (name_len > 48) {
715 name_len = 48;
716 ptr[1] = EIR_NAME_SHORT;
717 } else
718 ptr[1] = EIR_NAME_COMPLETE;
719
720 /* EIR Data length */
721 ptr[0] = name_len + 1;
722
723 memcpy(ptr + 2, hdev->dev_name, name_len);
724
725 ptr += (name_len + 2);
726 }
727
728 if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
729 ptr[0] = 2;
730 ptr[1] = EIR_TX_POWER;
731 ptr[2] = (u8) hdev->inq_tx_power;
732
733 ptr += 3;
734 }
735
736 if (hdev->devid_source > 0) {
737 ptr[0] = 9;
738 ptr[1] = EIR_DEVICE_ID;
739
740 put_unaligned_le16(hdev->devid_source, ptr + 2);
741 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
742 put_unaligned_le16(hdev->devid_product, ptr + 6);
743 put_unaligned_le16(hdev->devid_version, ptr + 8);
744
745 ptr += 10;
746 }
747
748 ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
749 ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
750 ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
751 }
752
753 static void update_eir(struct hci_request *req)
754 {
755 struct hci_dev *hdev = req->hdev;
756 struct hci_cp_write_eir cp;
757
758 if (!hdev_is_powered(hdev))
759 return;
760
761 if (!lmp_ext_inq_capable(hdev))
762 return;
763
764 if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
765 return;
766
767 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
768 return;
769
770 memset(&cp, 0, sizeof(cp));
771
772 create_eir(hdev, cp.data);
773
774 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
775 return;
776
777 memcpy(hdev->eir, cp.data, sizeof(cp.data));
778
779 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
780 }
781
782 static u8 get_service_classes(struct hci_dev *hdev)
783 {
784 struct bt_uuid *uuid;
785 u8 val = 0;
786
787 list_for_each_entry(uuid, &hdev->uuids, list)
788 val |= uuid->svc_hint;
789
790 return val;
791 }
792
793 static void update_class(struct hci_request *req)
794 {
795 struct hci_dev *hdev = req->hdev;
796 u8 cod[3];
797
798 BT_DBG("%s", hdev->name);
799
800 if (!hdev_is_powered(hdev))
801 return;
802
803 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
804 return;
805
806 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
807 return;
808
809 cod[0] = hdev->minor_class;
810 cod[1] = hdev->major_class;
811 cod[2] = get_service_classes(hdev);
812
813 if (test_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags))
814 cod[1] |= 0x20;
815
816 if (memcmp(cod, hdev->dev_class, 3) == 0)
817 return;
818
819 hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
820 }
821
822 static bool get_connectable(struct hci_dev *hdev)
823 {
824 struct pending_cmd *cmd;
825
826 /* If there's a pending mgmt command the flag will not yet have
827 * it's final value, so check for this first.
828 */
829 cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
830 if (cmd) {
831 struct mgmt_mode *cp = cmd->param;
832 return cp->val;
833 }
834
835 return test_bit(HCI_CONNECTABLE, &hdev->dev_flags);
836 }
837
838 static void enable_advertising(struct hci_request *req)
839 {
840 struct hci_dev *hdev = req->hdev;
841 struct hci_cp_le_set_adv_param cp;
842 u8 own_addr_type, enable = 0x01;
843 bool connectable;
844
845 /* Clear the HCI_ADVERTISING bit temporarily so that the
846 * hci_update_random_address knows that it's safe to go ahead
847 * and write a new random address. The flag will be set back on
848 * as soon as the SET_ADV_ENABLE HCI command completes.
849 */
850 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
851
852 connectable = get_connectable(hdev);
853
854 /* Set require_privacy to true only when non-connectable
855 * advertising is used. In that case it is fine to use a
856 * non-resolvable private address.
857 */
858 if (hci_update_random_address(req, !connectable, &own_addr_type) < 0)
859 return;
860
861 memset(&cp, 0, sizeof(cp));
862 cp.min_interval = cpu_to_le16(0x0800);
863 cp.max_interval = cpu_to_le16(0x0800);
864 cp.type = connectable ? LE_ADV_IND : LE_ADV_NONCONN_IND;
865 cp.own_address_type = own_addr_type;
866 cp.channel_map = hdev->le_adv_channel_map;
867
868 hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
869
870 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
871 }
872
873 static void disable_advertising(struct hci_request *req)
874 {
875 u8 enable = 0x00;
876
877 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
878 }
879
880 static void service_cache_off(struct work_struct *work)
881 {
882 struct hci_dev *hdev = container_of(work, struct hci_dev,
883 service_cache.work);
884 struct hci_request req;
885
886 if (!test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
887 return;
888
889 hci_req_init(&req, hdev);
890
891 hci_dev_lock(hdev);
892
893 update_eir(&req);
894 update_class(&req);
895
896 hci_dev_unlock(hdev);
897
898 hci_req_run(&req, NULL);
899 }
900
901 static void rpa_expired(struct work_struct *work)
902 {
903 struct hci_dev *hdev = container_of(work, struct hci_dev,
904 rpa_expired.work);
905 struct hci_request req;
906
907 BT_DBG("");
908
909 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
910
911 if (!test_bit(HCI_ADVERTISING, &hdev->dev_flags) ||
912 hci_conn_num(hdev, LE_LINK) > 0)
913 return;
914
915 /* The generation of a new RPA and programming it into the
916 * controller happens in the enable_advertising() function.
917 */
918
919 hci_req_init(&req, hdev);
920
921 disable_advertising(&req);
922 enable_advertising(&req);
923
924 hci_req_run(&req, NULL);
925 }
926
927 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
928 {
929 if (test_and_set_bit(HCI_MGMT, &hdev->dev_flags))
930 return;
931
932 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
933 INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
934
935 /* Non-mgmt controlled devices get this bit set
936 * implicitly so that pairing works for them, however
937 * for mgmt we require user-space to explicitly enable
938 * it
939 */
940 clear_bit(HCI_PAIRABLE, &hdev->dev_flags);
941 }
942
943 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
944 void *data, u16 data_len)
945 {
946 struct mgmt_rp_read_info rp;
947
948 BT_DBG("sock %p %s", sk, hdev->name);
949
950 hci_dev_lock(hdev);
951
952 memset(&rp, 0, sizeof(rp));
953
954 bacpy(&rp.bdaddr, &hdev->bdaddr);
955
956 rp.version = hdev->hci_ver;
957 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
958
959 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
960 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
961
962 memcpy(rp.dev_class, hdev->dev_class, 3);
963
964 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
965 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
966
967 hci_dev_unlock(hdev);
968
969 return cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
970 sizeof(rp));
971 }
972
973 static void mgmt_pending_free(struct pending_cmd *cmd)
974 {
975 sock_put(cmd->sk);
976 kfree(cmd->param);
977 kfree(cmd);
978 }
979
980 static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
981 struct hci_dev *hdev, void *data,
982 u16 len)
983 {
984 struct pending_cmd *cmd;
985
986 cmd = kmalloc(sizeof(*cmd), GFP_KERNEL);
987 if (!cmd)
988 return NULL;
989
990 cmd->opcode = opcode;
991 cmd->index = hdev->id;
992
993 cmd->param = kmalloc(len, GFP_KERNEL);
994 if (!cmd->param) {
995 kfree(cmd);
996 return NULL;
997 }
998
999 if (data)
1000 memcpy(cmd->param, data, len);
1001
1002 cmd->sk = sk;
1003 sock_hold(sk);
1004
1005 list_add(&cmd->list, &hdev->mgmt_pending);
1006
1007 return cmd;
1008 }
1009
1010 static void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev,
1011 void (*cb)(struct pending_cmd *cmd,
1012 void *data),
1013 void *data)
1014 {
1015 struct pending_cmd *cmd, *tmp;
1016
1017 list_for_each_entry_safe(cmd, tmp, &hdev->mgmt_pending, list) {
1018 if (opcode > 0 && cmd->opcode != opcode)
1019 continue;
1020
1021 cb(cmd, data);
1022 }
1023 }
1024
1025 static void mgmt_pending_remove(struct pending_cmd *cmd)
1026 {
1027 list_del(&cmd->list);
1028 mgmt_pending_free(cmd);
1029 }
1030
1031 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1032 {
1033 __le32 settings = cpu_to_le32(get_current_settings(hdev));
1034
1035 return cmd_complete(sk, hdev->id, opcode, 0, &settings,
1036 sizeof(settings));
1037 }
1038
1039 static void clean_up_hci_complete(struct hci_dev *hdev, u8 status)
1040 {
1041 BT_DBG("%s status 0x%02x", hdev->name, status);
1042
1043 if (hci_conn_count(hdev) == 0) {
1044 cancel_delayed_work(&hdev->power_off);
1045 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1046 }
1047 }
1048
1049 static int clean_up_hci_state(struct hci_dev *hdev)
1050 {
1051 struct hci_request req;
1052 struct hci_conn *conn;
1053
1054 hci_req_init(&req, hdev);
1055
1056 if (test_bit(HCI_ISCAN, &hdev->flags) ||
1057 test_bit(HCI_PSCAN, &hdev->flags)) {
1058 u8 scan = 0x00;
1059 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1060 }
1061
1062 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
1063 disable_advertising(&req);
1064
1065 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags)) {
1066 hci_req_add_le_scan_disable(&req);
1067 }
1068
1069 list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1070 struct hci_cp_disconnect dc;
1071 struct hci_cp_reject_conn_req rej;
1072
1073 switch (conn->state) {
1074 case BT_CONNECTED:
1075 case BT_CONFIG:
1076 dc.handle = cpu_to_le16(conn->handle);
1077 dc.reason = 0x15; /* Terminated due to Power Off */
1078 hci_req_add(&req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
1079 break;
1080 case BT_CONNECT:
1081 if (conn->type == LE_LINK)
1082 hci_req_add(&req, HCI_OP_LE_CREATE_CONN_CANCEL,
1083 0, NULL);
1084 else if (conn->type == ACL_LINK)
1085 hci_req_add(&req, HCI_OP_CREATE_CONN_CANCEL,
1086 6, &conn->dst);
1087 break;
1088 case BT_CONNECT2:
1089 bacpy(&rej.bdaddr, &conn->dst);
1090 rej.reason = 0x15; /* Terminated due to Power Off */
1091 if (conn->type == ACL_LINK)
1092 hci_req_add(&req, HCI_OP_REJECT_CONN_REQ,
1093 sizeof(rej), &rej);
1094 else if (conn->type == SCO_LINK)
1095 hci_req_add(&req, HCI_OP_REJECT_SYNC_CONN_REQ,
1096 sizeof(rej), &rej);
1097 break;
1098 }
1099 }
1100
1101 return hci_req_run(&req, clean_up_hci_complete);
1102 }
1103
1104 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1105 u16 len)
1106 {
1107 struct mgmt_mode *cp = data;
1108 struct pending_cmd *cmd;
1109 int err;
1110
1111 BT_DBG("request for %s", hdev->name);
1112
1113 if (cp->val != 0x00 && cp->val != 0x01)
1114 return cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1115 MGMT_STATUS_INVALID_PARAMS);
1116
1117 hci_dev_lock(hdev);
1118
1119 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev)) {
1120 err = cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1121 MGMT_STATUS_BUSY);
1122 goto failed;
1123 }
1124
1125 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
1126 cancel_delayed_work(&hdev->power_off);
1127
1128 if (cp->val) {
1129 mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev,
1130 data, len);
1131 err = mgmt_powered(hdev, 1);
1132 goto failed;
1133 }
1134 }
1135
1136 if (!!cp->val == hdev_is_powered(hdev)) {
1137 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1138 goto failed;
1139 }
1140
1141 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1142 if (!cmd) {
1143 err = -ENOMEM;
1144 goto failed;
1145 }
1146
1147 if (cp->val) {
1148 queue_work(hdev->req_workqueue, &hdev->power_on);
1149 err = 0;
1150 } else {
1151 /* Disconnect connections, stop scans, etc */
1152 err = clean_up_hci_state(hdev);
1153 if (!err)
1154 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1155 HCI_POWER_OFF_TIMEOUT);
1156
1157 /* ENODATA means there were no HCI commands queued */
1158 if (err == -ENODATA) {
1159 cancel_delayed_work(&hdev->power_off);
1160 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1161 err = 0;
1162 }
1163 }
1164
1165 failed:
1166 hci_dev_unlock(hdev);
1167 return err;
1168 }
1169
1170 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 data_len,
1171 struct sock *skip_sk)
1172 {
1173 struct sk_buff *skb;
1174 struct mgmt_hdr *hdr;
1175
1176 skb = alloc_skb(sizeof(*hdr) + data_len, GFP_KERNEL);
1177 if (!skb)
1178 return -ENOMEM;
1179
1180 hdr = (void *) skb_put(skb, sizeof(*hdr));
1181 hdr->opcode = cpu_to_le16(event);
1182 if (hdev)
1183 hdr->index = cpu_to_le16(hdev->id);
1184 else
1185 hdr->index = cpu_to_le16(MGMT_INDEX_NONE);
1186 hdr->len = cpu_to_le16(data_len);
1187
1188 if (data)
1189 memcpy(skb_put(skb, data_len), data, data_len);
1190
1191 /* Time stamp */
1192 __net_timestamp(skb);
1193
1194 hci_send_to_control(skb, skip_sk);
1195 kfree_skb(skb);
1196
1197 return 0;
1198 }
1199
1200 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1201 {
1202 __le32 ev;
1203
1204 ev = cpu_to_le32(get_current_settings(hdev));
1205
1206 return mgmt_event(MGMT_EV_NEW_SETTINGS, hdev, &ev, sizeof(ev), skip);
1207 }
1208
1209 struct cmd_lookup {
1210 struct sock *sk;
1211 struct hci_dev *hdev;
1212 u8 mgmt_status;
1213 };
1214
1215 static void settings_rsp(struct pending_cmd *cmd, void *data)
1216 {
1217 struct cmd_lookup *match = data;
1218
1219 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1220
1221 list_del(&cmd->list);
1222
1223 if (match->sk == NULL) {
1224 match->sk = cmd->sk;
1225 sock_hold(match->sk);
1226 }
1227
1228 mgmt_pending_free(cmd);
1229 }
1230
1231 static void cmd_status_rsp(struct pending_cmd *cmd, void *data)
1232 {
1233 u8 *status = data;
1234
1235 cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1236 mgmt_pending_remove(cmd);
1237 }
1238
1239 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1240 {
1241 if (!lmp_bredr_capable(hdev))
1242 return MGMT_STATUS_NOT_SUPPORTED;
1243 else if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1244 return MGMT_STATUS_REJECTED;
1245 else
1246 return MGMT_STATUS_SUCCESS;
1247 }
1248
1249 static u8 mgmt_le_support(struct hci_dev *hdev)
1250 {
1251 if (!lmp_le_capable(hdev))
1252 return MGMT_STATUS_NOT_SUPPORTED;
1253 else if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
1254 return MGMT_STATUS_REJECTED;
1255 else
1256 return MGMT_STATUS_SUCCESS;
1257 }
1258
1259 static void set_discoverable_complete(struct hci_dev *hdev, u8 status)
1260 {
1261 struct pending_cmd *cmd;
1262 struct mgmt_mode *cp;
1263 struct hci_request req;
1264 bool changed;
1265
1266 BT_DBG("status 0x%02x", status);
1267
1268 hci_dev_lock(hdev);
1269
1270 cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
1271 if (!cmd)
1272 goto unlock;
1273
1274 if (status) {
1275 u8 mgmt_err = mgmt_status(status);
1276 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1277 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1278 goto remove_cmd;
1279 }
1280
1281 cp = cmd->param;
1282 if (cp->val) {
1283 changed = !test_and_set_bit(HCI_DISCOVERABLE,
1284 &hdev->dev_flags);
1285
1286 if (hdev->discov_timeout > 0) {
1287 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1288 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1289 to);
1290 }
1291 } else {
1292 changed = test_and_clear_bit(HCI_DISCOVERABLE,
1293 &hdev->dev_flags);
1294 }
1295
1296 send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1297
1298 if (changed)
1299 new_settings(hdev, cmd->sk);
1300
1301 /* When the discoverable mode gets changed, make sure
1302 * that class of device has the limited discoverable
1303 * bit correctly set.
1304 */
1305 hci_req_init(&req, hdev);
1306 update_class(&req);
1307 hci_req_run(&req, NULL);
1308
1309 remove_cmd:
1310 mgmt_pending_remove(cmd);
1311
1312 unlock:
1313 hci_dev_unlock(hdev);
1314 }
1315
1316 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1317 u16 len)
1318 {
1319 struct mgmt_cp_set_discoverable *cp = data;
1320 struct pending_cmd *cmd;
1321 struct hci_request req;
1322 u16 timeout;
1323 u8 scan;
1324 int err;
1325
1326 BT_DBG("request for %s", hdev->name);
1327
1328 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
1329 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1330 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1331 MGMT_STATUS_REJECTED);
1332
1333 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1334 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1335 MGMT_STATUS_INVALID_PARAMS);
1336
1337 timeout = __le16_to_cpu(cp->timeout);
1338
1339 /* Disabling discoverable requires that no timeout is set,
1340 * and enabling limited discoverable requires a timeout.
1341 */
1342 if ((cp->val == 0x00 && timeout > 0) ||
1343 (cp->val == 0x02 && timeout == 0))
1344 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1345 MGMT_STATUS_INVALID_PARAMS);
1346
1347 hci_dev_lock(hdev);
1348
1349 if (!hdev_is_powered(hdev) && timeout > 0) {
1350 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1351 MGMT_STATUS_NOT_POWERED);
1352 goto failed;
1353 }
1354
1355 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1356 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1357 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1358 MGMT_STATUS_BUSY);
1359 goto failed;
1360 }
1361
1362 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags)) {
1363 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1364 MGMT_STATUS_REJECTED);
1365 goto failed;
1366 }
1367
1368 if (!hdev_is_powered(hdev)) {
1369 bool changed = false;
1370
1371 /* Setting limited discoverable when powered off is
1372 * not a valid operation since it requires a timeout
1373 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1374 */
1375 if (!!cp->val != test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) {
1376 change_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1377 changed = true;
1378 }
1379
1380 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1381 if (err < 0)
1382 goto failed;
1383
1384 if (changed)
1385 err = new_settings(hdev, sk);
1386
1387 goto failed;
1388 }
1389
1390 /* If the current mode is the same, then just update the timeout
1391 * value with the new value. And if only the timeout gets updated,
1392 * then no need for any HCI transactions.
1393 */
1394 if (!!cp->val == test_bit(HCI_DISCOVERABLE, &hdev->dev_flags) &&
1395 (cp->val == 0x02) == test_bit(HCI_LIMITED_DISCOVERABLE,
1396 &hdev->dev_flags)) {
1397 cancel_delayed_work(&hdev->discov_off);
1398 hdev->discov_timeout = timeout;
1399
1400 if (cp->val && hdev->discov_timeout > 0) {
1401 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1402 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1403 to);
1404 }
1405
1406 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1407 goto failed;
1408 }
1409
1410 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1411 if (!cmd) {
1412 err = -ENOMEM;
1413 goto failed;
1414 }
1415
1416 /* Cancel any potential discoverable timeout that might be
1417 * still active and store new timeout value. The arming of
1418 * the timeout happens in the complete handler.
1419 */
1420 cancel_delayed_work(&hdev->discov_off);
1421 hdev->discov_timeout = timeout;
1422
1423 /* Limited discoverable mode */
1424 if (cp->val == 0x02)
1425 set_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1426 else
1427 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1428
1429 hci_req_init(&req, hdev);
1430
1431 /* The procedure for LE-only controllers is much simpler - just
1432 * update the advertising data.
1433 */
1434 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1435 goto update_ad;
1436
1437 scan = SCAN_PAGE;
1438
1439 if (cp->val) {
1440 struct hci_cp_write_current_iac_lap hci_cp;
1441
1442 if (cp->val == 0x02) {
1443 /* Limited discoverable mode */
1444 hci_cp.num_iac = min_t(u8, hdev->num_iac, 2);
1445 hci_cp.iac_lap[0] = 0x00; /* LIAC */
1446 hci_cp.iac_lap[1] = 0x8b;
1447 hci_cp.iac_lap[2] = 0x9e;
1448 hci_cp.iac_lap[3] = 0x33; /* GIAC */
1449 hci_cp.iac_lap[4] = 0x8b;
1450 hci_cp.iac_lap[5] = 0x9e;
1451 } else {
1452 /* General discoverable mode */
1453 hci_cp.num_iac = 1;
1454 hci_cp.iac_lap[0] = 0x33; /* GIAC */
1455 hci_cp.iac_lap[1] = 0x8b;
1456 hci_cp.iac_lap[2] = 0x9e;
1457 }
1458
1459 hci_req_add(&req, HCI_OP_WRITE_CURRENT_IAC_LAP,
1460 (hci_cp.num_iac * 3) + 1, &hci_cp);
1461
1462 scan |= SCAN_INQUIRY;
1463 } else {
1464 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1465 }
1466
1467 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1468
1469 update_ad:
1470 update_adv_data(&req);
1471
1472 err = hci_req_run(&req, set_discoverable_complete);
1473 if (err < 0)
1474 mgmt_pending_remove(cmd);
1475
1476 failed:
1477 hci_dev_unlock(hdev);
1478 return err;
1479 }
1480
1481 static void write_fast_connectable(struct hci_request *req, bool enable)
1482 {
1483 struct hci_dev *hdev = req->hdev;
1484 struct hci_cp_write_page_scan_activity acp;
1485 u8 type;
1486
1487 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1488 return;
1489
1490 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1491 return;
1492
1493 if (enable) {
1494 type = PAGE_SCAN_TYPE_INTERLACED;
1495
1496 /* 160 msec page scan interval */
1497 acp.interval = cpu_to_le16(0x0100);
1498 } else {
1499 type = PAGE_SCAN_TYPE_STANDARD; /* default */
1500
1501 /* default 1.28 sec page scan */
1502 acp.interval = cpu_to_le16(0x0800);
1503 }
1504
1505 acp.window = cpu_to_le16(0x0012);
1506
1507 if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
1508 __cpu_to_le16(hdev->page_scan_window) != acp.window)
1509 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
1510 sizeof(acp), &acp);
1511
1512 if (hdev->page_scan_type != type)
1513 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
1514 }
1515
1516 static void set_connectable_complete(struct hci_dev *hdev, u8 status)
1517 {
1518 struct pending_cmd *cmd;
1519 struct mgmt_mode *cp;
1520 bool changed;
1521
1522 BT_DBG("status 0x%02x", status);
1523
1524 hci_dev_lock(hdev);
1525
1526 cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1527 if (!cmd)
1528 goto unlock;
1529
1530 if (status) {
1531 u8 mgmt_err = mgmt_status(status);
1532 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1533 goto remove_cmd;
1534 }
1535
1536 cp = cmd->param;
1537 if (cp->val)
1538 changed = !test_and_set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1539 else
1540 changed = test_and_clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1541
1542 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1543
1544 if (changed)
1545 new_settings(hdev, cmd->sk);
1546
1547 remove_cmd:
1548 mgmt_pending_remove(cmd);
1549
1550 unlock:
1551 hci_dev_unlock(hdev);
1552 }
1553
1554 static int set_connectable_update_settings(struct hci_dev *hdev,
1555 struct sock *sk, u8 val)
1556 {
1557 bool changed = false;
1558 int err;
1559
1560 if (!!val != test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
1561 changed = true;
1562
1563 if (val) {
1564 set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1565 } else {
1566 clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1567 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1568 }
1569
1570 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1571 if (err < 0)
1572 return err;
1573
1574 if (changed)
1575 return new_settings(hdev, sk);
1576
1577 return 0;
1578 }
1579
1580 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1581 u16 len)
1582 {
1583 struct mgmt_mode *cp = data;
1584 struct pending_cmd *cmd;
1585 struct hci_request req;
1586 u8 scan;
1587 int err;
1588
1589 BT_DBG("request for %s", hdev->name);
1590
1591 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
1592 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1593 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1594 MGMT_STATUS_REJECTED);
1595
1596 if (cp->val != 0x00 && cp->val != 0x01)
1597 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1598 MGMT_STATUS_INVALID_PARAMS);
1599
1600 hci_dev_lock(hdev);
1601
1602 if (!hdev_is_powered(hdev)) {
1603 err = set_connectable_update_settings(hdev, sk, cp->val);
1604 goto failed;
1605 }
1606
1607 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1608 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1609 err = cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1610 MGMT_STATUS_BUSY);
1611 goto failed;
1612 }
1613
1614 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1615 if (!cmd) {
1616 err = -ENOMEM;
1617 goto failed;
1618 }
1619
1620 hci_req_init(&req, hdev);
1621
1622 /* If BR/EDR is not enabled and we disable advertising as a
1623 * by-product of disabling connectable, we need to update the
1624 * advertising flags.
1625 */
1626 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1627 if (!cp->val) {
1628 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1629 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1630 }
1631 update_adv_data(&req);
1632 } else if (cp->val != test_bit(HCI_PSCAN, &hdev->flags)) {
1633 if (cp->val) {
1634 scan = SCAN_PAGE;
1635 } else {
1636 scan = 0;
1637
1638 if (test_bit(HCI_ISCAN, &hdev->flags) &&
1639 hdev->discov_timeout > 0)
1640 cancel_delayed_work(&hdev->discov_off);
1641 }
1642
1643 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1644 }
1645
1646 /* If we're going from non-connectable to connectable or
1647 * vice-versa when fast connectable is enabled ensure that fast
1648 * connectable gets disabled. write_fast_connectable won't do
1649 * anything if the page scan parameters are already what they
1650 * should be.
1651 */
1652 if (cp->val || test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
1653 write_fast_connectable(&req, false);
1654
1655 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags) &&
1656 hci_conn_num(hdev, LE_LINK) == 0) {
1657 disable_advertising(&req);
1658 enable_advertising(&req);
1659 }
1660
1661 err = hci_req_run(&req, set_connectable_complete);
1662 if (err < 0) {
1663 mgmt_pending_remove(cmd);
1664 if (err == -ENODATA)
1665 err = set_connectable_update_settings(hdev, sk,
1666 cp->val);
1667 goto failed;
1668 }
1669
1670 failed:
1671 hci_dev_unlock(hdev);
1672 return err;
1673 }
1674
1675 static int set_pairable(struct sock *sk, struct hci_dev *hdev, void *data,
1676 u16 len)
1677 {
1678 struct mgmt_mode *cp = data;
1679 bool changed;
1680 int err;
1681
1682 BT_DBG("request for %s", hdev->name);
1683
1684 if (cp->val != 0x00 && cp->val != 0x01)
1685 return cmd_status(sk, hdev->id, MGMT_OP_SET_PAIRABLE,
1686 MGMT_STATUS_INVALID_PARAMS);
1687
1688 hci_dev_lock(hdev);
1689
1690 if (cp->val)
1691 changed = !test_and_set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1692 else
1693 changed = test_and_clear_bit(HCI_PAIRABLE, &hdev->dev_flags);
1694
1695 err = send_settings_rsp(sk, MGMT_OP_SET_PAIRABLE, hdev);
1696 if (err < 0)
1697 goto unlock;
1698
1699 if (changed)
1700 err = new_settings(hdev, sk);
1701
1702 unlock:
1703 hci_dev_unlock(hdev);
1704 return err;
1705 }
1706
1707 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1708 u16 len)
1709 {
1710 struct mgmt_mode *cp = data;
1711 struct pending_cmd *cmd;
1712 u8 val, status;
1713 int err;
1714
1715 BT_DBG("request for %s", hdev->name);
1716
1717 status = mgmt_bredr_support(hdev);
1718 if (status)
1719 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1720 status);
1721
1722 if (cp->val != 0x00 && cp->val != 0x01)
1723 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1724 MGMT_STATUS_INVALID_PARAMS);
1725
1726 hci_dev_lock(hdev);
1727
1728 if (!hdev_is_powered(hdev)) {
1729 bool changed = false;
1730
1731 if (!!cp->val != test_bit(HCI_LINK_SECURITY,
1732 &hdev->dev_flags)) {
1733 change_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
1734 changed = true;
1735 }
1736
1737 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1738 if (err < 0)
1739 goto failed;
1740
1741 if (changed)
1742 err = new_settings(hdev, sk);
1743
1744 goto failed;
1745 }
1746
1747 if (mgmt_pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1748 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1749 MGMT_STATUS_BUSY);
1750 goto failed;
1751 }
1752
1753 val = !!cp->val;
1754
1755 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1756 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1757 goto failed;
1758 }
1759
1760 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1761 if (!cmd) {
1762 err = -ENOMEM;
1763 goto failed;
1764 }
1765
1766 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1767 if (err < 0) {
1768 mgmt_pending_remove(cmd);
1769 goto failed;
1770 }
1771
1772 failed:
1773 hci_dev_unlock(hdev);
1774 return err;
1775 }
1776
1777 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1778 {
1779 struct mgmt_mode *cp = data;
1780 struct pending_cmd *cmd;
1781 u8 status;
1782 int err;
1783
1784 BT_DBG("request for %s", hdev->name);
1785
1786 status = mgmt_bredr_support(hdev);
1787 if (status)
1788 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
1789
1790 if (!lmp_ssp_capable(hdev))
1791 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1792 MGMT_STATUS_NOT_SUPPORTED);
1793
1794 if (cp->val != 0x00 && cp->val != 0x01)
1795 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1796 MGMT_STATUS_INVALID_PARAMS);
1797
1798 hci_dev_lock(hdev);
1799
1800 if (!hdev_is_powered(hdev)) {
1801 bool changed;
1802
1803 if (cp->val) {
1804 changed = !test_and_set_bit(HCI_SSP_ENABLED,
1805 &hdev->dev_flags);
1806 } else {
1807 changed = test_and_clear_bit(HCI_SSP_ENABLED,
1808 &hdev->dev_flags);
1809 if (!changed)
1810 changed = test_and_clear_bit(HCI_HS_ENABLED,
1811 &hdev->dev_flags);
1812 else
1813 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
1814 }
1815
1816 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1817 if (err < 0)
1818 goto failed;
1819
1820 if (changed)
1821 err = new_settings(hdev, sk);
1822
1823 goto failed;
1824 }
1825
1826 if (mgmt_pending_find(MGMT_OP_SET_SSP, hdev) ||
1827 mgmt_pending_find(MGMT_OP_SET_HS, hdev)) {
1828 err = cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1829 MGMT_STATUS_BUSY);
1830 goto failed;
1831 }
1832
1833 if (!!cp->val == test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1834 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1835 goto failed;
1836 }
1837
1838 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
1839 if (!cmd) {
1840 err = -ENOMEM;
1841 goto failed;
1842 }
1843
1844 err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val);
1845 if (err < 0) {
1846 mgmt_pending_remove(cmd);
1847 goto failed;
1848 }
1849
1850 failed:
1851 hci_dev_unlock(hdev);
1852 return err;
1853 }
1854
1855 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1856 {
1857 struct mgmt_mode *cp = data;
1858 bool changed;
1859 u8 status;
1860 int err;
1861
1862 BT_DBG("request for %s", hdev->name);
1863
1864 status = mgmt_bredr_support(hdev);
1865 if (status)
1866 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
1867
1868 if (!lmp_ssp_capable(hdev))
1869 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1870 MGMT_STATUS_NOT_SUPPORTED);
1871
1872 if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
1873 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1874 MGMT_STATUS_REJECTED);
1875
1876 if (cp->val != 0x00 && cp->val != 0x01)
1877 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1878 MGMT_STATUS_INVALID_PARAMS);
1879
1880 hci_dev_lock(hdev);
1881
1882 if (cp->val) {
1883 changed = !test_and_set_bit(HCI_HS_ENABLED, &hdev->dev_flags);
1884 } else {
1885 if (hdev_is_powered(hdev)) {
1886 err = cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1887 MGMT_STATUS_REJECTED);
1888 goto unlock;
1889 }
1890
1891 changed = test_and_clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
1892 }
1893
1894 err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
1895 if (err < 0)
1896 goto unlock;
1897
1898 if (changed)
1899 err = new_settings(hdev, sk);
1900
1901 unlock:
1902 hci_dev_unlock(hdev);
1903 return err;
1904 }
1905
1906 static void le_enable_complete(struct hci_dev *hdev, u8 status)
1907 {
1908 struct cmd_lookup match = { NULL, hdev };
1909
1910 if (status) {
1911 u8 mgmt_err = mgmt_status(status);
1912
1913 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
1914 &mgmt_err);
1915 return;
1916 }
1917
1918 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
1919
1920 new_settings(hdev, match.sk);
1921
1922 if (match.sk)
1923 sock_put(match.sk);
1924
1925 /* Make sure the controller has a good default for
1926 * advertising data. Restrict the update to when LE
1927 * has actually been enabled. During power on, the
1928 * update in powered_update_hci will take care of it.
1929 */
1930 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1931 struct hci_request req;
1932
1933 hci_dev_lock(hdev);
1934
1935 hci_req_init(&req, hdev);
1936 update_adv_data(&req);
1937 update_scan_rsp_data(&req);
1938 hci_req_run(&req, NULL);
1939
1940 hci_dev_unlock(hdev);
1941 }
1942 }
1943
1944 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1945 {
1946 struct mgmt_mode *cp = data;
1947 struct hci_cp_write_le_host_supported hci_cp;
1948 struct pending_cmd *cmd;
1949 struct hci_request req;
1950 int err;
1951 u8 val, enabled;
1952
1953 BT_DBG("request for %s", hdev->name);
1954
1955 if (!lmp_le_capable(hdev))
1956 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1957 MGMT_STATUS_NOT_SUPPORTED);
1958
1959 if (cp->val != 0x00 && cp->val != 0x01)
1960 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1961 MGMT_STATUS_INVALID_PARAMS);
1962
1963 /* LE-only devices do not allow toggling LE on/off */
1964 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1965 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1966 MGMT_STATUS_REJECTED);
1967
1968 hci_dev_lock(hdev);
1969
1970 val = !!cp->val;
1971 enabled = lmp_host_le_capable(hdev);
1972
1973 if (!hdev_is_powered(hdev) || val == enabled) {
1974 bool changed = false;
1975
1976 if (val != test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1977 change_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1978 changed = true;
1979 }
1980
1981 if (!val && test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
1982 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
1983 changed = true;
1984 }
1985
1986 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
1987 if (err < 0)
1988 goto unlock;
1989
1990 if (changed)
1991 err = new_settings(hdev, sk);
1992
1993 goto unlock;
1994 }
1995
1996 if (mgmt_pending_find(MGMT_OP_SET_LE, hdev) ||
1997 mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
1998 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1999 MGMT_STATUS_BUSY);
2000 goto unlock;
2001 }
2002
2003 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2004 if (!cmd) {
2005 err = -ENOMEM;
2006 goto unlock;
2007 }
2008
2009 hci_req_init(&req, hdev);
2010
2011 memset(&hci_cp, 0, sizeof(hci_cp));
2012
2013 if (val) {
2014 hci_cp.le = val;
2015 hci_cp.simul = lmp_le_br_capable(hdev);
2016 } else {
2017 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
2018 disable_advertising(&req);
2019 }
2020
2021 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
2022 &hci_cp);
2023
2024 err = hci_req_run(&req, le_enable_complete);
2025 if (err < 0)
2026 mgmt_pending_remove(cmd);
2027
2028 unlock:
2029 hci_dev_unlock(hdev);
2030 return err;
2031 }
2032
2033 /* This is a helper function to test for pending mgmt commands that can
2034 * cause CoD or EIR HCI commands. We can only allow one such pending
2035 * mgmt command at a time since otherwise we cannot easily track what
2036 * the current values are, will be, and based on that calculate if a new
2037 * HCI command needs to be sent and if yes with what value.
2038 */
2039 static bool pending_eir_or_class(struct hci_dev *hdev)
2040 {
2041 struct pending_cmd *cmd;
2042
2043 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2044 switch (cmd->opcode) {
2045 case MGMT_OP_ADD_UUID:
2046 case MGMT_OP_REMOVE_UUID:
2047 case MGMT_OP_SET_DEV_CLASS:
2048 case MGMT_OP_SET_POWERED:
2049 return true;
2050 }
2051 }
2052
2053 return false;
2054 }
2055
2056 static const u8 bluetooth_base_uuid[] = {
2057 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2058 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2059 };
2060
2061 static u8 get_uuid_size(const u8 *uuid)
2062 {
2063 u32 val;
2064
2065 if (memcmp(uuid, bluetooth_base_uuid, 12))
2066 return 128;
2067
2068 val = get_unaligned_le32(&uuid[12]);
2069 if (val > 0xffff)
2070 return 32;
2071
2072 return 16;
2073 }
2074
2075 static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
2076 {
2077 struct pending_cmd *cmd;
2078
2079 hci_dev_lock(hdev);
2080
2081 cmd = mgmt_pending_find(mgmt_op, hdev);
2082 if (!cmd)
2083 goto unlock;
2084
2085 cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(status),
2086 hdev->dev_class, 3);
2087
2088 mgmt_pending_remove(cmd);
2089
2090 unlock:
2091 hci_dev_unlock(hdev);
2092 }
2093
2094 static void add_uuid_complete(struct hci_dev *hdev, u8 status)
2095 {
2096 BT_DBG("status 0x%02x", status);
2097
2098 mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
2099 }
2100
2101 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2102 {
2103 struct mgmt_cp_add_uuid *cp = data;
2104 struct pending_cmd *cmd;
2105 struct hci_request req;
2106 struct bt_uuid *uuid;
2107 int err;
2108
2109 BT_DBG("request for %s", hdev->name);
2110
2111 hci_dev_lock(hdev);
2112
2113 if (pending_eir_or_class(hdev)) {
2114 err = cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2115 MGMT_STATUS_BUSY);
2116 goto failed;
2117 }
2118
2119 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2120 if (!uuid) {
2121 err = -ENOMEM;
2122 goto failed;
2123 }
2124
2125 memcpy(uuid->uuid, cp->uuid, 16);
2126 uuid->svc_hint = cp->svc_hint;
2127 uuid->size = get_uuid_size(cp->uuid);
2128
2129 list_add_tail(&uuid->list, &hdev->uuids);
2130
2131 hci_req_init(&req, hdev);
2132
2133 update_class(&req);
2134 update_eir(&req);
2135
2136 err = hci_req_run(&req, add_uuid_complete);
2137 if (err < 0) {
2138 if (err != -ENODATA)
2139 goto failed;
2140
2141 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
2142 hdev->dev_class, 3);
2143 goto failed;
2144 }
2145
2146 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2147 if (!cmd) {
2148 err = -ENOMEM;
2149 goto failed;
2150 }
2151
2152 err = 0;
2153
2154 failed:
2155 hci_dev_unlock(hdev);
2156 return err;
2157 }
2158
2159 static bool enable_service_cache(struct hci_dev *hdev)
2160 {
2161 if (!hdev_is_powered(hdev))
2162 return false;
2163
2164 if (!test_and_set_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
2165 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2166 CACHE_TIMEOUT);
2167 return true;
2168 }
2169
2170 return false;
2171 }
2172
2173 static void remove_uuid_complete(struct hci_dev *hdev, u8 status)
2174 {
2175 BT_DBG("status 0x%02x", status);
2176
2177 mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
2178 }
2179
2180 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2181 u16 len)
2182 {
2183 struct mgmt_cp_remove_uuid *cp = data;
2184 struct pending_cmd *cmd;
2185 struct bt_uuid *match, *tmp;
2186 u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2187 struct hci_request req;
2188 int err, found;
2189
2190 BT_DBG("request for %s", hdev->name);
2191
2192 hci_dev_lock(hdev);
2193
2194 if (pending_eir_or_class(hdev)) {
2195 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2196 MGMT_STATUS_BUSY);
2197 goto unlock;
2198 }
2199
2200 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2201 hci_uuids_clear(hdev);
2202
2203 if (enable_service_cache(hdev)) {
2204 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2205 0, hdev->dev_class, 3);
2206 goto unlock;
2207 }
2208
2209 goto update_class;
2210 }
2211
2212 found = 0;
2213
2214 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2215 if (memcmp(match->uuid, cp->uuid, 16) != 0)
2216 continue;
2217
2218 list_del(&match->list);
2219 kfree(match);
2220 found++;
2221 }
2222
2223 if (found == 0) {
2224 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2225 MGMT_STATUS_INVALID_PARAMS);
2226 goto unlock;
2227 }
2228
2229 update_class:
2230 hci_req_init(&req, hdev);
2231
2232 update_class(&req);
2233 update_eir(&req);
2234
2235 err = hci_req_run(&req, remove_uuid_complete);
2236 if (err < 0) {
2237 if (err != -ENODATA)
2238 goto unlock;
2239
2240 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
2241 hdev->dev_class, 3);
2242 goto unlock;
2243 }
2244
2245 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2246 if (!cmd) {
2247 err = -ENOMEM;
2248 goto unlock;
2249 }
2250
2251 err = 0;
2252
2253 unlock:
2254 hci_dev_unlock(hdev);
2255 return err;
2256 }
2257
2258 static void set_class_complete(struct hci_dev *hdev, u8 status)
2259 {
2260 BT_DBG("status 0x%02x", status);
2261
2262 mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
2263 }
2264
2265 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2266 u16 len)
2267 {
2268 struct mgmt_cp_set_dev_class *cp = data;
2269 struct pending_cmd *cmd;
2270 struct hci_request req;
2271 int err;
2272
2273 BT_DBG("request for %s", hdev->name);
2274
2275 if (!lmp_bredr_capable(hdev))
2276 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2277 MGMT_STATUS_NOT_SUPPORTED);
2278
2279 hci_dev_lock(hdev);
2280
2281 if (pending_eir_or_class(hdev)) {
2282 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2283 MGMT_STATUS_BUSY);
2284 goto unlock;
2285 }
2286
2287 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2288 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2289 MGMT_STATUS_INVALID_PARAMS);
2290 goto unlock;
2291 }
2292
2293 hdev->major_class = cp->major;
2294 hdev->minor_class = cp->minor;
2295
2296 if (!hdev_is_powered(hdev)) {
2297 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2298 hdev->dev_class, 3);
2299 goto unlock;
2300 }
2301
2302 hci_req_init(&req, hdev);
2303
2304 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
2305 hci_dev_unlock(hdev);
2306 cancel_delayed_work_sync(&hdev->service_cache);
2307 hci_dev_lock(hdev);
2308 update_eir(&req);
2309 }
2310
2311 update_class(&req);
2312
2313 err = hci_req_run(&req, set_class_complete);
2314 if (err < 0) {
2315 if (err != -ENODATA)
2316 goto unlock;
2317
2318 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2319 hdev->dev_class, 3);
2320 goto unlock;
2321 }
2322
2323 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2324 if (!cmd) {
2325 err = -ENOMEM;
2326 goto unlock;
2327 }
2328
2329 err = 0;
2330
2331 unlock:
2332 hci_dev_unlock(hdev);
2333 return err;
2334 }
2335
2336 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2337 u16 len)
2338 {
2339 struct mgmt_cp_load_link_keys *cp = data;
2340 u16 key_count, expected_len;
2341 bool changed;
2342 int i;
2343
2344 BT_DBG("request for %s", hdev->name);
2345
2346 if (!lmp_bredr_capable(hdev))
2347 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2348 MGMT_STATUS_NOT_SUPPORTED);
2349
2350 key_count = __le16_to_cpu(cp->key_count);
2351
2352 expected_len = sizeof(*cp) + key_count *
2353 sizeof(struct mgmt_link_key_info);
2354 if (expected_len != len) {
2355 BT_ERR("load_link_keys: expected %u bytes, got %u bytes",
2356 expected_len, len);
2357 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2358 MGMT_STATUS_INVALID_PARAMS);
2359 }
2360
2361 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2362 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2363 MGMT_STATUS_INVALID_PARAMS);
2364
2365 BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys,
2366 key_count);
2367
2368 for (i = 0; i < key_count; i++) {
2369 struct mgmt_link_key_info *key = &cp->keys[i];
2370
2371 if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
2372 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2373 MGMT_STATUS_INVALID_PARAMS);
2374 }
2375
2376 hci_dev_lock(hdev);
2377
2378 hci_link_keys_clear(hdev);
2379
2380 if (cp->debug_keys)
2381 changed = !test_and_set_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
2382 else
2383 changed = test_and_clear_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
2384
2385 if (changed)
2386 new_settings(hdev, NULL);
2387
2388 for (i = 0; i < key_count; i++) {
2389 struct mgmt_link_key_info *key = &cp->keys[i];
2390
2391 hci_add_link_key(hdev, NULL, 0, &key->addr.bdaddr, key->val,
2392 key->type, key->pin_len);
2393 }
2394
2395 cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2396
2397 hci_dev_unlock(hdev);
2398
2399 return 0;
2400 }
2401
2402 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2403 u8 addr_type, struct sock *skip_sk)
2404 {
2405 struct mgmt_ev_device_unpaired ev;
2406
2407 bacpy(&ev.addr.bdaddr, bdaddr);
2408 ev.addr.type = addr_type;
2409
2410 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2411 skip_sk);
2412 }
2413
2414 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2415 u16 len)
2416 {
2417 struct mgmt_cp_unpair_device *cp = data;
2418 struct mgmt_rp_unpair_device rp;
2419 struct hci_cp_disconnect dc;
2420 struct pending_cmd *cmd;
2421 struct hci_conn *conn;
2422 int err;
2423
2424 memset(&rp, 0, sizeof(rp));
2425 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2426 rp.addr.type = cp->addr.type;
2427
2428 if (!bdaddr_type_is_valid(cp->addr.type))
2429 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2430 MGMT_STATUS_INVALID_PARAMS,
2431 &rp, sizeof(rp));
2432
2433 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2434 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2435 MGMT_STATUS_INVALID_PARAMS,
2436 &rp, sizeof(rp));
2437
2438 hci_dev_lock(hdev);
2439
2440 if (!hdev_is_powered(hdev)) {
2441 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2442 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2443 goto unlock;
2444 }
2445
2446 if (cp->addr.type == BDADDR_BREDR) {
2447 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2448 } else {
2449 u8 addr_type;
2450
2451 if (cp->addr.type == BDADDR_LE_PUBLIC)
2452 addr_type = ADDR_LE_DEV_PUBLIC;
2453 else
2454 addr_type = ADDR_LE_DEV_RANDOM;
2455
2456 hci_remove_irk(hdev, &cp->addr.bdaddr, addr_type);
2457
2458 hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
2459
2460 err = hci_remove_ltk(hdev, &cp->addr.bdaddr, addr_type);
2461 }
2462
2463 if (err < 0) {
2464 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2465 MGMT_STATUS_NOT_PAIRED, &rp, sizeof(rp));
2466 goto unlock;
2467 }
2468
2469 if (cp->disconnect) {
2470 if (cp->addr.type == BDADDR_BREDR)
2471 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2472 &cp->addr.bdaddr);
2473 else
2474 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK,
2475 &cp->addr.bdaddr);
2476 } else {
2477 conn = NULL;
2478 }
2479
2480 if (!conn) {
2481 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
2482 &rp, sizeof(rp));
2483 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
2484 goto unlock;
2485 }
2486
2487 cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
2488 sizeof(*cp));
2489 if (!cmd) {
2490 err = -ENOMEM;
2491 goto unlock;
2492 }
2493
2494 dc.handle = cpu_to_le16(conn->handle);
2495 dc.reason = 0x13; /* Remote User Terminated Connection */
2496 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2497 if (err < 0)
2498 mgmt_pending_remove(cmd);
2499
2500 unlock:
2501 hci_dev_unlock(hdev);
2502 return err;
2503 }
2504
2505 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
2506 u16 len)
2507 {
2508 struct mgmt_cp_disconnect *cp = data;
2509 struct mgmt_rp_disconnect rp;
2510 struct hci_cp_disconnect dc;
2511 struct pending_cmd *cmd;
2512 struct hci_conn *conn;
2513 int err;
2514
2515 BT_DBG("");
2516
2517 memset(&rp, 0, sizeof(rp));
2518 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2519 rp.addr.type = cp->addr.type;
2520
2521 if (!bdaddr_type_is_valid(cp->addr.type))
2522 return cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2523 MGMT_STATUS_INVALID_PARAMS,
2524 &rp, sizeof(rp));
2525
2526 hci_dev_lock(hdev);
2527
2528 if (!test_bit(HCI_UP, &hdev->flags)) {
2529 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2530 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2531 goto failed;
2532 }
2533
2534 if (mgmt_pending_find(MGMT_OP_DISCONNECT, hdev)) {
2535 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2536 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2537 goto failed;
2538 }
2539
2540 if (cp->addr.type == BDADDR_BREDR)
2541 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2542 &cp->addr.bdaddr);
2543 else
2544 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
2545
2546 if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
2547 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2548 MGMT_STATUS_NOT_CONNECTED, &rp, sizeof(rp));
2549 goto failed;
2550 }
2551
2552 cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
2553 if (!cmd) {
2554 err = -ENOMEM;
2555 goto failed;
2556 }
2557
2558 dc.handle = cpu_to_le16(conn->handle);
2559 dc.reason = HCI_ERROR_REMOTE_USER_TERM;
2560
2561 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2562 if (err < 0)
2563 mgmt_pending_remove(cmd);
2564
2565 failed:
2566 hci_dev_unlock(hdev);
2567 return err;
2568 }
2569
2570 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
2571 {
2572 switch (link_type) {
2573 case LE_LINK:
2574 switch (addr_type) {
2575 case ADDR_LE_DEV_PUBLIC:
2576 return BDADDR_LE_PUBLIC;
2577
2578 default:
2579 /* Fallback to LE Random address type */
2580 return BDADDR_LE_RANDOM;
2581 }
2582
2583 default:
2584 /* Fallback to BR/EDR type */
2585 return BDADDR_BREDR;
2586 }
2587 }
2588
2589 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
2590 u16 data_len)
2591 {
2592 struct mgmt_rp_get_connections *rp;
2593 struct hci_conn *c;
2594 size_t rp_len;
2595 int err;
2596 u16 i;
2597
2598 BT_DBG("");
2599
2600 hci_dev_lock(hdev);
2601
2602 if (!hdev_is_powered(hdev)) {
2603 err = cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
2604 MGMT_STATUS_NOT_POWERED);
2605 goto unlock;
2606 }
2607
2608 i = 0;
2609 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2610 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2611 i++;
2612 }
2613
2614 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2615 rp = kmalloc(rp_len, GFP_KERNEL);
2616 if (!rp) {
2617 err = -ENOMEM;
2618 goto unlock;
2619 }
2620
2621 i = 0;
2622 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2623 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2624 continue;
2625 bacpy(&rp->addr[i].bdaddr, &c->dst);
2626 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
2627 if (c->type == SCO_LINK || c->type == ESCO_LINK)
2628 continue;
2629 i++;
2630 }
2631
2632 rp->conn_count = cpu_to_le16(i);
2633
2634 /* Recalculate length in case of filtered SCO connections, etc */
2635 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2636
2637 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
2638 rp_len);
2639
2640 kfree(rp);
2641
2642 unlock:
2643 hci_dev_unlock(hdev);
2644 return err;
2645 }
2646
2647 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2648 struct mgmt_cp_pin_code_neg_reply *cp)
2649 {
2650 struct pending_cmd *cmd;
2651 int err;
2652
2653 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
2654 sizeof(*cp));
2655 if (!cmd)
2656 return -ENOMEM;
2657
2658 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2659 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
2660 if (err < 0)
2661 mgmt_pending_remove(cmd);
2662
2663 return err;
2664 }
2665
2666 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2667 u16 len)
2668 {
2669 struct hci_conn *conn;
2670 struct mgmt_cp_pin_code_reply *cp = data;
2671 struct hci_cp_pin_code_reply reply;
2672 struct pending_cmd *cmd;
2673 int err;
2674
2675 BT_DBG("");
2676
2677 hci_dev_lock(hdev);
2678
2679 if (!hdev_is_powered(hdev)) {
2680 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2681 MGMT_STATUS_NOT_POWERED);
2682 goto failed;
2683 }
2684
2685 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
2686 if (!conn) {
2687 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2688 MGMT_STATUS_NOT_CONNECTED);
2689 goto failed;
2690 }
2691
2692 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
2693 struct mgmt_cp_pin_code_neg_reply ncp;
2694
2695 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
2696
2697 BT_ERR("PIN code is not 16 bytes long");
2698
2699 err = send_pin_code_neg_reply(sk, hdev, &ncp);
2700 if (err >= 0)
2701 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2702 MGMT_STATUS_INVALID_PARAMS);
2703
2704 goto failed;
2705 }
2706
2707 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
2708 if (!cmd) {
2709 err = -ENOMEM;
2710 goto failed;
2711 }
2712
2713 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
2714 reply.pin_len = cp->pin_len;
2715 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
2716
2717 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
2718 if (err < 0)
2719 mgmt_pending_remove(cmd);
2720
2721 failed:
2722 hci_dev_unlock(hdev);
2723 return err;
2724 }
2725
2726 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
2727 u16 len)
2728 {
2729 struct mgmt_cp_set_io_capability *cp = data;
2730
2731 BT_DBG("");
2732
2733 hci_dev_lock(hdev);
2734
2735 hdev->io_capability = cp->io_capability;
2736
2737 BT_DBG("%s IO capability set to 0x%02x", hdev->name,
2738 hdev->io_capability);
2739
2740 hci_dev_unlock(hdev);
2741
2742 return cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0, NULL,
2743 0);
2744 }
2745
2746 static struct pending_cmd *find_pairing(struct hci_conn *conn)
2747 {
2748 struct hci_dev *hdev = conn->hdev;
2749 struct pending_cmd *cmd;
2750
2751 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2752 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
2753 continue;
2754
2755 if (cmd->user_data != conn)
2756 continue;
2757
2758 return cmd;
2759 }
2760
2761 return NULL;
2762 }
2763
2764 static void pairing_complete(struct pending_cmd *cmd, u8 status)
2765 {
2766 struct mgmt_rp_pair_device rp;
2767 struct hci_conn *conn = cmd->user_data;
2768
2769 bacpy(&rp.addr.bdaddr, &conn->dst);
2770 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
2771
2772 cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE, status,
2773 &rp, sizeof(rp));
2774
2775 /* So we don't get further callbacks for this connection */
2776 conn->connect_cfm_cb = NULL;
2777 conn->security_cfm_cb = NULL;
2778 conn->disconn_cfm_cb = NULL;
2779
2780 hci_conn_drop(conn);
2781
2782 mgmt_pending_remove(cmd);
2783 }
2784
2785 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
2786 {
2787 u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
2788 struct pending_cmd *cmd;
2789
2790 cmd = find_pairing(conn);
2791 if (cmd)
2792 pairing_complete(cmd, status);
2793 }
2794
2795 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
2796 {
2797 struct pending_cmd *cmd;
2798
2799 BT_DBG("status %u", status);
2800
2801 cmd = find_pairing(conn);
2802 if (!cmd)
2803 BT_DBG("Unable to find a pending command");
2804 else
2805 pairing_complete(cmd, mgmt_status(status));
2806 }
2807
2808 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
2809 {
2810 struct pending_cmd *cmd;
2811
2812 BT_DBG("status %u", status);
2813
2814 if (!status)
2815 return;
2816
2817 cmd = find_pairing(conn);
2818 if (!cmd)
2819 BT_DBG("Unable to find a pending command");
2820 else
2821 pairing_complete(cmd, mgmt_status(status));
2822 }
2823
2824 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2825 u16 len)
2826 {
2827 struct mgmt_cp_pair_device *cp = data;
2828 struct mgmt_rp_pair_device rp;
2829 struct pending_cmd *cmd;
2830 u8 sec_level, auth_type;
2831 struct hci_conn *conn;
2832 int err;
2833
2834 BT_DBG("");
2835
2836 memset(&rp, 0, sizeof(rp));
2837 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2838 rp.addr.type = cp->addr.type;
2839
2840 if (!bdaddr_type_is_valid(cp->addr.type))
2841 return cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2842 MGMT_STATUS_INVALID_PARAMS,
2843 &rp, sizeof(rp));
2844
2845 hci_dev_lock(hdev);
2846
2847 if (!hdev_is_powered(hdev)) {
2848 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2849 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2850 goto unlock;
2851 }
2852
2853 sec_level = BT_SECURITY_MEDIUM;
2854 auth_type = HCI_AT_DEDICATED_BONDING;
2855
2856 if (cp->addr.type == BDADDR_BREDR) {
2857 conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
2858 auth_type);
2859 } else {
2860 u8 addr_type;
2861
2862 /* Convert from L2CAP channel address type to HCI address type
2863 */
2864 if (cp->addr.type == BDADDR_LE_PUBLIC)
2865 addr_type = ADDR_LE_DEV_PUBLIC;
2866 else
2867 addr_type = ADDR_LE_DEV_RANDOM;
2868
2869 conn = hci_connect_le(hdev, &cp->addr.bdaddr, addr_type,
2870 sec_level, auth_type);
2871 }
2872
2873 if (IS_ERR(conn)) {
2874 int status;
2875
2876 if (PTR_ERR(conn) == -EBUSY)
2877 status = MGMT_STATUS_BUSY;
2878 else
2879 status = MGMT_STATUS_CONNECT_FAILED;
2880
2881 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2882 status, &rp,
2883 sizeof(rp));
2884 goto unlock;
2885 }
2886
2887 if (conn->connect_cfm_cb) {
2888 hci_conn_drop(conn);
2889 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2890 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2891 goto unlock;
2892 }
2893
2894 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
2895 if (!cmd) {
2896 err = -ENOMEM;
2897 hci_conn_drop(conn);
2898 goto unlock;
2899 }
2900
2901 /* For LE, just connecting isn't a proof that the pairing finished */
2902 if (cp->addr.type == BDADDR_BREDR) {
2903 conn->connect_cfm_cb = pairing_complete_cb;
2904 conn->security_cfm_cb = pairing_complete_cb;
2905 conn->disconn_cfm_cb = pairing_complete_cb;
2906 } else {
2907 conn->connect_cfm_cb = le_pairing_complete_cb;
2908 conn->security_cfm_cb = le_pairing_complete_cb;
2909 conn->disconn_cfm_cb = le_pairing_complete_cb;
2910 }
2911
2912 conn->io_capability = cp->io_cap;
2913 cmd->user_data = conn;
2914
2915 if (conn->state == BT_CONNECTED &&
2916 hci_conn_security(conn, sec_level, auth_type))
2917 pairing_complete(cmd, 0);
2918
2919 err = 0;
2920
2921 unlock:
2922 hci_dev_unlock(hdev);
2923 return err;
2924 }
2925
2926 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2927 u16 len)
2928 {
2929 struct mgmt_addr_info *addr = data;
2930 struct pending_cmd *cmd;
2931 struct hci_conn *conn;
2932 int err;
2933
2934 BT_DBG("");
2935
2936 hci_dev_lock(hdev);
2937
2938 if (!hdev_is_powered(hdev)) {
2939 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2940 MGMT_STATUS_NOT_POWERED);
2941 goto unlock;
2942 }
2943
2944 cmd = mgmt_pending_find(MGMT_OP_PAIR_DEVICE, hdev);
2945 if (!cmd) {
2946 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2947 MGMT_STATUS_INVALID_PARAMS);
2948 goto unlock;
2949 }
2950
2951 conn = cmd->user_data;
2952
2953 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
2954 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2955 MGMT_STATUS_INVALID_PARAMS);
2956 goto unlock;
2957 }
2958
2959 pairing_complete(cmd, MGMT_STATUS_CANCELLED);
2960
2961 err = cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
2962 addr, sizeof(*addr));
2963 unlock:
2964 hci_dev_unlock(hdev);
2965 return err;
2966 }
2967
2968 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
2969 struct mgmt_addr_info *addr, u16 mgmt_op,
2970 u16 hci_op, __le32 passkey)
2971 {
2972 struct pending_cmd *cmd;
2973 struct hci_conn *conn;
2974 int err;
2975
2976 hci_dev_lock(hdev);
2977
2978 if (!hdev_is_powered(hdev)) {
2979 err = cmd_complete(sk, hdev->id, mgmt_op,
2980 MGMT_STATUS_NOT_POWERED, addr,
2981 sizeof(*addr));
2982 goto done;
2983 }
2984
2985 if (addr->type == BDADDR_BREDR)
2986 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
2987 else
2988 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &addr->bdaddr);
2989
2990 if (!conn) {
2991 err = cmd_complete(sk, hdev->id, mgmt_op,
2992 MGMT_STATUS_NOT_CONNECTED, addr,
2993 sizeof(*addr));
2994 goto done;
2995 }
2996
2997 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
2998 /* Continue with pairing via SMP */
2999 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3000
3001 if (!err)
3002 err = cmd_complete(sk, hdev->id, mgmt_op,
3003 MGMT_STATUS_SUCCESS, addr,
3004 sizeof(*addr));
3005 else
3006 err = cmd_complete(sk, hdev->id, mgmt_op,
3007 MGMT_STATUS_FAILED, addr,
3008 sizeof(*addr));
3009
3010 goto done;
3011 }
3012
3013 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3014 if (!cmd) {
3015 err = -ENOMEM;
3016 goto done;
3017 }
3018
3019 /* Continue with pairing via HCI */
3020 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3021 struct hci_cp_user_passkey_reply cp;
3022
3023 bacpy(&cp.bdaddr, &addr->bdaddr);
3024 cp.passkey = passkey;
3025 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3026 } else
3027 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3028 &addr->bdaddr);
3029
3030 if (err < 0)
3031 mgmt_pending_remove(cmd);
3032
3033 done:
3034 hci_dev_unlock(hdev);
3035 return err;
3036 }
3037
3038 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3039 void *data, u16 len)
3040 {
3041 struct mgmt_cp_pin_code_neg_reply *cp = data;
3042
3043 BT_DBG("");
3044
3045 return user_pairing_resp(sk, hdev, &cp->addr,
3046 MGMT_OP_PIN_CODE_NEG_REPLY,
3047 HCI_OP_PIN_CODE_NEG_REPLY, 0);
3048 }
3049
3050 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3051 u16 len)
3052 {
3053 struct mgmt_cp_user_confirm_reply *cp = data;
3054
3055 BT_DBG("");
3056
3057 if (len != sizeof(*cp))
3058 return cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3059 MGMT_STATUS_INVALID_PARAMS);
3060
3061 return user_pairing_resp(sk, hdev, &cp->addr,
3062 MGMT_OP_USER_CONFIRM_REPLY,
3063 HCI_OP_USER_CONFIRM_REPLY, 0);
3064 }
3065
3066 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3067 void *data, u16 len)
3068 {
3069 struct mgmt_cp_user_confirm_neg_reply *cp = data;
3070
3071 BT_DBG("");
3072
3073 return user_pairing_resp(sk, hdev, &cp->addr,
3074 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3075 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3076 }
3077
3078 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3079 u16 len)
3080 {
3081 struct mgmt_cp_user_passkey_reply *cp = data;
3082
3083 BT_DBG("");
3084
3085 return user_pairing_resp(sk, hdev, &cp->addr,
3086 MGMT_OP_USER_PASSKEY_REPLY,
3087 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3088 }
3089
3090 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3091 void *data, u16 len)
3092 {
3093 struct mgmt_cp_user_passkey_neg_reply *cp = data;
3094
3095 BT_DBG("");
3096
3097 return user_pairing_resp(sk, hdev, &cp->addr,
3098 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3099 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3100 }
3101
3102 static void update_name(struct hci_request *req)
3103 {
3104 struct hci_dev *hdev = req->hdev;
3105 struct hci_cp_write_local_name cp;
3106
3107 memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
3108
3109 hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
3110 }
3111
3112 static void set_name_complete(struct hci_dev *hdev, u8 status)
3113 {
3114 struct mgmt_cp_set_local_name *cp;
3115 struct pending_cmd *cmd;
3116
3117 BT_DBG("status 0x%02x", status);
3118
3119 hci_dev_lock(hdev);
3120
3121 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
3122 if (!cmd)
3123 goto unlock;
3124
3125 cp = cmd->param;
3126
3127 if (status)
3128 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3129 mgmt_status(status));
3130 else
3131 cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3132 cp, sizeof(*cp));
3133
3134 mgmt_pending_remove(cmd);
3135
3136 unlock:
3137 hci_dev_unlock(hdev);
3138 }
3139
3140 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3141 u16 len)
3142 {
3143 struct mgmt_cp_set_local_name *cp = data;
3144 struct pending_cmd *cmd;
3145 struct hci_request req;
3146 int err;
3147
3148 BT_DBG("");
3149
3150 hci_dev_lock(hdev);
3151
3152 /* If the old values are the same as the new ones just return a
3153 * direct command complete event.
3154 */
3155 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3156 !memcmp(hdev->short_name, cp->short_name,
3157 sizeof(hdev->short_name))) {
3158 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3159 data, len);
3160 goto failed;
3161 }
3162
3163 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3164
3165 if (!hdev_is_powered(hdev)) {
3166 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3167
3168 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3169 data, len);
3170 if (err < 0)
3171 goto failed;
3172
3173 err = mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data, len,
3174 sk);
3175
3176 goto failed;
3177 }
3178
3179 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3180 if (!cmd) {
3181 err = -ENOMEM;
3182 goto failed;
3183 }
3184
3185 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3186
3187 hci_req_init(&req, hdev);
3188
3189 if (lmp_bredr_capable(hdev)) {
3190 update_name(&req);
3191 update_eir(&req);
3192 }
3193
3194 /* The name is stored in the scan response data and so
3195 * no need to udpate the advertising data here.
3196 */
3197 if (lmp_le_capable(hdev))
3198 update_scan_rsp_data(&req);
3199
3200 err = hci_req_run(&req, set_name_complete);
3201 if (err < 0)
3202 mgmt_pending_remove(cmd);
3203
3204 failed:
3205 hci_dev_unlock(hdev);
3206 return err;
3207 }
3208
3209 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
3210 void *data, u16 data_len)
3211 {
3212 struct pending_cmd *cmd;
3213 int err;
3214
3215 BT_DBG("%s", hdev->name);
3216
3217 hci_dev_lock(hdev);
3218
3219 if (!hdev_is_powered(hdev)) {
3220 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3221 MGMT_STATUS_NOT_POWERED);
3222 goto unlock;
3223 }
3224
3225 if (!lmp_ssp_capable(hdev)) {
3226 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3227 MGMT_STATUS_NOT_SUPPORTED);
3228 goto unlock;
3229 }
3230
3231 if (mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
3232 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3233 MGMT_STATUS_BUSY);
3234 goto unlock;
3235 }
3236
3237 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
3238 if (!cmd) {
3239 err = -ENOMEM;
3240 goto unlock;
3241 }
3242
3243 if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags))
3244 err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_EXT_DATA,
3245 0, NULL);
3246 else
3247 err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
3248
3249 if (err < 0)
3250 mgmt_pending_remove(cmd);
3251
3252 unlock:
3253 hci_dev_unlock(hdev);
3254 return err;
3255 }
3256
3257 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3258 void *data, u16 len)
3259 {
3260 int err;
3261
3262 BT_DBG("%s ", hdev->name);
3263
3264 hci_dev_lock(hdev);
3265
3266 if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
3267 struct mgmt_cp_add_remote_oob_data *cp = data;
3268 u8 status;
3269
3270 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
3271 cp->hash, cp->randomizer);
3272 if (err < 0)
3273 status = MGMT_STATUS_FAILED;
3274 else
3275 status = MGMT_STATUS_SUCCESS;
3276
3277 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3278 status, &cp->addr, sizeof(cp->addr));
3279 } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
3280 struct mgmt_cp_add_remote_oob_ext_data *cp = data;
3281 u8 status;
3282
3283 err = hci_add_remote_oob_ext_data(hdev, &cp->addr.bdaddr,
3284 cp->hash192,
3285 cp->randomizer192,
3286 cp->hash256,
3287 cp->randomizer256);
3288 if (err < 0)
3289 status = MGMT_STATUS_FAILED;
3290 else
3291 status = MGMT_STATUS_SUCCESS;
3292
3293 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3294 status, &cp->addr, sizeof(cp->addr));
3295 } else {
3296 BT_ERR("add_remote_oob_data: invalid length of %u bytes", len);
3297 err = cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3298 MGMT_STATUS_INVALID_PARAMS);
3299 }
3300
3301 hci_dev_unlock(hdev);
3302 return err;
3303 }
3304
3305 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3306 void *data, u16 len)
3307 {
3308 struct mgmt_cp_remove_remote_oob_data *cp = data;
3309 u8 status;
3310 int err;
3311
3312 BT_DBG("%s", hdev->name);
3313
3314 hci_dev_lock(hdev);
3315
3316 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr);
3317 if (err < 0)
3318 status = MGMT_STATUS_INVALID_PARAMS;
3319 else
3320 status = MGMT_STATUS_SUCCESS;
3321
3322 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
3323 status, &cp->addr, sizeof(cp->addr));
3324
3325 hci_dev_unlock(hdev);
3326 return err;
3327 }
3328
3329 static int mgmt_start_discovery_failed(struct hci_dev *hdev, u8 status)
3330 {
3331 struct pending_cmd *cmd;
3332 u8 type;
3333 int err;
3334
3335 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3336
3337 cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
3338 if (!cmd)
3339 return -ENOENT;
3340
3341 type = hdev->discovery.type;
3342
3343 err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
3344 &type, sizeof(type));
3345 mgmt_pending_remove(cmd);
3346
3347 return err;
3348 }
3349
3350 static void start_discovery_complete(struct hci_dev *hdev, u8 status)
3351 {
3352 unsigned long timeout = 0;
3353
3354 BT_DBG("status %d", status);
3355
3356 if (status) {
3357 hci_dev_lock(hdev);
3358 mgmt_start_discovery_failed(hdev, status);
3359 hci_dev_unlock(hdev);
3360 return;
3361 }
3362
3363 hci_dev_lock(hdev);
3364 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
3365 hci_dev_unlock(hdev);
3366
3367 switch (hdev->discovery.type) {
3368 case DISCOV_TYPE_LE:
3369 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
3370 break;
3371
3372 case DISCOV_TYPE_INTERLEAVED:
3373 timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
3374 break;
3375
3376 case DISCOV_TYPE_BREDR:
3377 break;
3378
3379 default:
3380 BT_ERR("Invalid discovery type %d", hdev->discovery.type);
3381 }
3382
3383 if (!timeout)
3384 return;
3385
3386 queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable, timeout);
3387 }
3388
3389 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
3390 void *data, u16 len)
3391 {
3392 struct mgmt_cp_start_discovery *cp = data;
3393 struct pending_cmd *cmd;
3394 struct hci_cp_le_set_scan_param param_cp;
3395 struct hci_cp_le_set_scan_enable enable_cp;
3396 struct hci_cp_inquiry inq_cp;
3397 struct hci_request req;
3398 /* General inquiry access code (GIAC) */
3399 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3400 u8 status, own_addr_type;
3401 int err;
3402
3403 BT_DBG("%s", hdev->name);
3404
3405 hci_dev_lock(hdev);
3406
3407 if (!hdev_is_powered(hdev)) {
3408 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3409 MGMT_STATUS_NOT_POWERED);
3410 goto failed;
3411 }
3412
3413 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags)) {
3414 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3415 MGMT_STATUS_BUSY);
3416 goto failed;
3417 }
3418
3419 if (hdev->discovery.state != DISCOVERY_STOPPED) {
3420 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3421 MGMT_STATUS_BUSY);
3422 goto failed;
3423 }
3424
3425 cmd = mgmt_pending_add(sk, MGMT_OP_START_DISCOVERY, hdev, NULL, 0);
3426 if (!cmd) {
3427 err = -ENOMEM;
3428 goto failed;
3429 }
3430
3431 hdev->discovery.type = cp->type;
3432
3433 hci_req_init(&req, hdev);
3434
3435 switch (hdev->discovery.type) {
3436 case DISCOV_TYPE_BREDR:
3437 status = mgmt_bredr_support(hdev);
3438 if (status) {
3439 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3440 status);
3441 mgmt_pending_remove(cmd);
3442 goto failed;
3443 }
3444
3445 if (test_bit(HCI_INQUIRY, &hdev->flags)) {
3446 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3447 MGMT_STATUS_BUSY);
3448 mgmt_pending_remove(cmd);
3449 goto failed;
3450 }
3451
3452 hci_inquiry_cache_flush(hdev);
3453
3454 memset(&inq_cp, 0, sizeof(inq_cp));
3455 memcpy(&inq_cp.lap, lap, sizeof(inq_cp.lap));
3456 inq_cp.length = DISCOV_BREDR_INQUIRY_LEN;
3457 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(inq_cp), &inq_cp);
3458 break;
3459
3460 case DISCOV_TYPE_LE:
3461 case DISCOV_TYPE_INTERLEAVED:
3462 status = mgmt_le_support(hdev);
3463 if (status) {
3464 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3465 status);
3466 mgmt_pending_remove(cmd);
3467 goto failed;
3468 }
3469
3470 if (hdev->discovery.type == DISCOV_TYPE_INTERLEAVED &&
3471 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
3472 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3473 MGMT_STATUS_NOT_SUPPORTED);
3474 mgmt_pending_remove(cmd);
3475 goto failed;
3476 }
3477
3478 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
3479 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3480 MGMT_STATUS_REJECTED);
3481 mgmt_pending_remove(cmd);
3482 goto failed;
3483 }
3484
3485 /* If controller is scanning, it means the background scanning
3486 * is running. Thus, we should temporarily stop it in order to
3487 * set the discovery scanning parameters.
3488 */
3489 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
3490 hci_req_add_le_scan_disable(&req);
3491
3492 memset(&param_cp, 0, sizeof(param_cp));
3493
3494 /* All active scans will be done with either a resolvable
3495 * private address (when privacy feature has been enabled)
3496 * or unresolvable private address.
3497 */
3498 err = hci_update_random_address(&req, true, &own_addr_type);
3499 if (err < 0) {
3500 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3501 MGMT_STATUS_FAILED);
3502 mgmt_pending_remove(cmd);
3503 goto failed;
3504 }
3505
3506 param_cp.type = LE_SCAN_ACTIVE;
3507 param_cp.interval = cpu_to_le16(DISCOV_LE_SCAN_INT);
3508 param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN);
3509 param_cp.own_address_type = own_addr_type;
3510 hci_req_add(&req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
3511 &param_cp);
3512
3513 memset(&enable_cp, 0, sizeof(enable_cp));
3514 enable_cp.enable = LE_SCAN_ENABLE;
3515 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
3516 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
3517 &enable_cp);
3518 break;
3519
3520 default:
3521 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3522 MGMT_STATUS_INVALID_PARAMS);
3523 mgmt_pending_remove(cmd);
3524 goto failed;
3525 }
3526
3527 err = hci_req_run(&req, start_discovery_complete);
3528 if (err < 0)
3529 mgmt_pending_remove(cmd);
3530 else
3531 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
3532
3533 failed:
3534 hci_dev_unlock(hdev);
3535 return err;
3536 }
3537
3538 static int mgmt_stop_discovery_failed(struct hci_dev *hdev, u8 status)
3539 {
3540 struct pending_cmd *cmd;
3541 int err;
3542
3543 cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
3544 if (!cmd)
3545 return -ENOENT;
3546
3547 err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
3548 &hdev->discovery.type, sizeof(hdev->discovery.type));
3549 mgmt_pending_remove(cmd);
3550
3551 return err;
3552 }
3553
3554 static void stop_discovery_complete(struct hci_dev *hdev, u8 status)
3555 {
3556 BT_DBG("status %d", status);
3557
3558 hci_dev_lock(hdev);
3559
3560 if (status) {
3561 mgmt_stop_discovery_failed(hdev, status);
3562 goto unlock;
3563 }
3564
3565 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3566
3567 unlock:
3568 hci_dev_unlock(hdev);
3569 }
3570
3571 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
3572 u16 len)
3573 {
3574 struct mgmt_cp_stop_discovery *mgmt_cp = data;
3575 struct pending_cmd *cmd;
3576 struct hci_cp_remote_name_req_cancel cp;
3577 struct inquiry_entry *e;
3578 struct hci_request req;
3579 int err;
3580
3581 BT_DBG("%s", hdev->name);
3582
3583 hci_dev_lock(hdev);
3584
3585 if (!hci_discovery_active(hdev)) {
3586 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
3587 MGMT_STATUS_REJECTED, &mgmt_cp->type,
3588 sizeof(mgmt_cp->type));
3589 goto unlock;
3590 }
3591
3592 if (hdev->discovery.type != mgmt_cp->type) {
3593 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
3594 MGMT_STATUS_INVALID_PARAMS, &mgmt_cp->type,
3595 sizeof(mgmt_cp->type));
3596 goto unlock;
3597 }
3598
3599 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, NULL, 0);
3600 if (!cmd) {
3601 err = -ENOMEM;
3602 goto unlock;
3603 }
3604
3605 hci_req_init(&req, hdev);
3606
3607 switch (hdev->discovery.state) {
3608 case DISCOVERY_FINDING:
3609 if (test_bit(HCI_INQUIRY, &hdev->flags)) {
3610 hci_req_add(&req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
3611 } else {
3612 cancel_delayed_work(&hdev->le_scan_disable);
3613
3614 hci_req_add_le_scan_disable(&req);
3615 }
3616
3617 break;
3618
3619 case DISCOVERY_RESOLVING:
3620 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
3621 NAME_PENDING);
3622 if (!e) {
3623 mgmt_pending_remove(cmd);
3624 err = cmd_complete(sk, hdev->id,
3625 MGMT_OP_STOP_DISCOVERY, 0,
3626 &mgmt_cp->type,
3627 sizeof(mgmt_cp->type));
3628 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3629 goto unlock;
3630 }
3631
3632 bacpy(&cp.bdaddr, &e->data.bdaddr);
3633 hci_req_add(&req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
3634 &cp);
3635
3636 break;
3637
3638 default:
3639 BT_DBG("unknown discovery state %u", hdev->discovery.state);
3640
3641 mgmt_pending_remove(cmd);
3642 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
3643 MGMT_STATUS_FAILED, &mgmt_cp->type,
3644 sizeof(mgmt_cp->type));
3645 goto unlock;
3646 }
3647
3648 err = hci_req_run(&req, stop_discovery_complete);
3649 if (err < 0)
3650 mgmt_pending_remove(cmd);
3651 else
3652 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
3653
3654 unlock:
3655 hci_dev_unlock(hdev);
3656 return err;
3657 }
3658
3659 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
3660 u16 len)
3661 {
3662 struct mgmt_cp_confirm_name *cp = data;
3663 struct inquiry_entry *e;
3664 int err;
3665
3666 BT_DBG("%s", hdev->name);
3667
3668 hci_dev_lock(hdev);
3669
3670 if (!hci_discovery_active(hdev)) {
3671 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
3672 MGMT_STATUS_FAILED, &cp->addr,
3673 sizeof(cp->addr));
3674 goto failed;
3675 }
3676
3677 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
3678 if (!e) {
3679 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
3680 MGMT_STATUS_INVALID_PARAMS, &cp->addr,
3681 sizeof(cp->addr));
3682 goto failed;
3683 }
3684
3685 if (cp->name_known) {
3686 e->name_state = NAME_KNOWN;
3687 list_del(&e->list);
3688 } else {
3689 e->name_state = NAME_NEEDED;
3690 hci_inquiry_cache_update_resolve(hdev, e);
3691 }
3692
3693 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0, &cp->addr,
3694 sizeof(cp->addr));
3695
3696 failed:
3697 hci_dev_unlock(hdev);
3698 return err;
3699 }
3700
3701 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
3702 u16 len)
3703 {
3704 struct mgmt_cp_block_device *cp = data;
3705 u8 status;
3706 int err;
3707
3708 BT_DBG("%s", hdev->name);
3709
3710 if (!bdaddr_type_is_valid(cp->addr.type))
3711 return cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
3712 MGMT_STATUS_INVALID_PARAMS,
3713 &cp->addr, sizeof(cp->addr));
3714
3715 hci_dev_lock(hdev);
3716
3717 err = hci_blacklist_add(hdev, &cp->addr.bdaddr, cp->addr.type);
3718 if (err < 0)
3719 status = MGMT_STATUS_FAILED;
3720 else
3721 status = MGMT_STATUS_SUCCESS;
3722
3723 err = cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
3724 &cp->addr, sizeof(cp->addr));
3725
3726 hci_dev_unlock(hdev);
3727
3728 return err;
3729 }
3730
3731 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
3732 u16 len)
3733 {
3734 struct mgmt_cp_unblock_device *cp = data;
3735 u8 status;
3736 int err;
3737
3738 BT_DBG("%s", hdev->name);
3739
3740 if (!bdaddr_type_is_valid(cp->addr.type))
3741 return cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
3742 MGMT_STATUS_INVALID_PARAMS,
3743 &cp->addr, sizeof(cp->addr));
3744
3745 hci_dev_lock(hdev);
3746
3747 err = hci_blacklist_del(hdev, &cp->addr.bdaddr, cp->addr.type);
3748 if (err < 0)
3749 status = MGMT_STATUS_INVALID_PARAMS;
3750 else
3751 status = MGMT_STATUS_SUCCESS;
3752
3753 err = cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
3754 &cp->addr, sizeof(cp->addr));
3755
3756 hci_dev_unlock(hdev);
3757
3758 return err;
3759 }
3760
3761 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
3762 u16 len)
3763 {
3764 struct mgmt_cp_set_device_id *cp = data;
3765 struct hci_request req;
3766 int err;
3767 __u16 source;
3768
3769 BT_DBG("%s", hdev->name);
3770
3771 source = __le16_to_cpu(cp->source);
3772
3773 if (source > 0x0002)
3774 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
3775 MGMT_STATUS_INVALID_PARAMS);
3776
3777 hci_dev_lock(hdev);
3778
3779 hdev->devid_source = source;
3780 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
3781 hdev->devid_product = __le16_to_cpu(cp->product);
3782 hdev->devid_version = __le16_to_cpu(cp->version);
3783
3784 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0, NULL, 0);
3785
3786 hci_req_init(&req, hdev);
3787 update_eir(&req);
3788 hci_req_run(&req, NULL);
3789
3790 hci_dev_unlock(hdev);
3791
3792 return err;
3793 }
3794
3795 static void set_advertising_complete(struct hci_dev *hdev, u8 status)
3796 {
3797 struct cmd_lookup match = { NULL, hdev };
3798
3799 if (status) {
3800 u8 mgmt_err = mgmt_status(status);
3801
3802 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
3803 cmd_status_rsp, &mgmt_err);
3804 return;
3805 }
3806
3807 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
3808 &match);
3809
3810 new_settings(hdev, match.sk);
3811
3812 if (match.sk)
3813 sock_put(match.sk);
3814 }
3815
3816 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
3817 u16 len)
3818 {
3819 struct mgmt_mode *cp = data;
3820 struct pending_cmd *cmd;
3821 struct hci_request req;
3822 u8 val, enabled, status;
3823 int err;
3824
3825 BT_DBG("request for %s", hdev->name);
3826
3827 status = mgmt_le_support(hdev);
3828 if (status)
3829 return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
3830 status);
3831
3832 if (cp->val != 0x00 && cp->val != 0x01)
3833 return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
3834 MGMT_STATUS_INVALID_PARAMS);
3835
3836 hci_dev_lock(hdev);
3837
3838 val = !!cp->val;
3839 enabled = test_bit(HCI_ADVERTISING, &hdev->dev_flags);
3840
3841 /* The following conditions are ones which mean that we should
3842 * not do any HCI communication but directly send a mgmt
3843 * response to user space (after toggling the flag if
3844 * necessary).
3845 */
3846 if (!hdev_is_powered(hdev) || val == enabled ||
3847 hci_conn_num(hdev, LE_LINK) > 0) {
3848 bool changed = false;
3849
3850 if (val != test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
3851 change_bit(HCI_ADVERTISING, &hdev->dev_flags);
3852 changed = true;
3853 }
3854
3855 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
3856 if (err < 0)
3857 goto unlock;
3858
3859 if (changed)
3860 err = new_settings(hdev, sk);
3861
3862 goto unlock;
3863 }
3864
3865 if (mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
3866 mgmt_pending_find(MGMT_OP_SET_LE, hdev)) {
3867 err = cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
3868 MGMT_STATUS_BUSY);
3869 goto unlock;
3870 }
3871
3872 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
3873 if (!cmd) {
3874 err = -ENOMEM;
3875 goto unlock;
3876 }
3877
3878 hci_req_init(&req, hdev);
3879
3880 if (val)
3881 enable_advertising(&req);
3882 else
3883 disable_advertising(&req);
3884
3885 err = hci_req_run(&req, set_advertising_complete);
3886 if (err < 0)
3887 mgmt_pending_remove(cmd);
3888
3889 unlock:
3890 hci_dev_unlock(hdev);
3891 return err;
3892 }
3893
3894 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
3895 void *data, u16 len)
3896 {
3897 struct mgmt_cp_set_static_address *cp = data;
3898 int err;
3899
3900 BT_DBG("%s", hdev->name);
3901
3902 if (!lmp_le_capable(hdev))
3903 return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
3904 MGMT_STATUS_NOT_SUPPORTED);
3905
3906 if (hdev_is_powered(hdev))
3907 return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
3908 MGMT_STATUS_REJECTED);
3909
3910 if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
3911 if (!bacmp(&cp->bdaddr, BDADDR_NONE))
3912 return cmd_status(sk, hdev->id,
3913 MGMT_OP_SET_STATIC_ADDRESS,
3914 MGMT_STATUS_INVALID_PARAMS);
3915
3916 /* Two most significant bits shall be set */
3917 if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
3918 return cmd_status(sk, hdev->id,
3919 MGMT_OP_SET_STATIC_ADDRESS,
3920 MGMT_STATUS_INVALID_PARAMS);
3921 }
3922
3923 hci_dev_lock(hdev);
3924
3925 bacpy(&hdev->static_addr, &cp->bdaddr);
3926
3927 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS, 0, NULL, 0);
3928
3929 hci_dev_unlock(hdev);
3930
3931 return err;
3932 }
3933
3934 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
3935 void *data, u16 len)
3936 {
3937 struct mgmt_cp_set_scan_params *cp = data;
3938 __u16 interval, window;
3939 int err;
3940
3941 BT_DBG("%s", hdev->name);
3942
3943 if (!lmp_le_capable(hdev))
3944 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
3945 MGMT_STATUS_NOT_SUPPORTED);
3946
3947 interval = __le16_to_cpu(cp->interval);
3948
3949 if (interval < 0x0004 || interval > 0x4000)
3950 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
3951 MGMT_STATUS_INVALID_PARAMS);
3952
3953 window = __le16_to_cpu(cp->window);
3954
3955 if (window < 0x0004 || window > 0x4000)
3956 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
3957 MGMT_STATUS_INVALID_PARAMS);
3958
3959 if (window > interval)
3960 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
3961 MGMT_STATUS_INVALID_PARAMS);
3962
3963 hci_dev_lock(hdev);
3964
3965 hdev->le_scan_interval = interval;
3966 hdev->le_scan_window = window;
3967
3968 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0, NULL, 0);
3969
3970 /* If background scan is running, restart it so new parameters are
3971 * loaded.
3972 */
3973 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags) &&
3974 hdev->discovery.state == DISCOVERY_STOPPED) {
3975 struct hci_request req;
3976
3977 hci_req_init(&req, hdev);
3978
3979 hci_req_add_le_scan_disable(&req);
3980 hci_req_add_le_passive_scan(&req);
3981
3982 hci_req_run(&req, NULL);
3983 }
3984
3985 hci_dev_unlock(hdev);
3986
3987 return err;
3988 }
3989
3990 static void fast_connectable_complete(struct hci_dev *hdev, u8 status)
3991 {
3992 struct pending_cmd *cmd;
3993
3994 BT_DBG("status 0x%02x", status);
3995
3996 hci_dev_lock(hdev);
3997
3998 cmd = mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
3999 if (!cmd)
4000 goto unlock;
4001
4002 if (status) {
4003 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4004 mgmt_status(status));
4005 } else {
4006 struct mgmt_mode *cp = cmd->param;
4007
4008 if (cp->val)
4009 set_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
4010 else
4011 clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
4012
4013 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4014 new_settings(hdev, cmd->sk);
4015 }
4016
4017 mgmt_pending_remove(cmd);
4018
4019 unlock:
4020 hci_dev_unlock(hdev);
4021 }
4022
4023 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
4024 void *data, u16 len)
4025 {
4026 struct mgmt_mode *cp = data;
4027 struct pending_cmd *cmd;
4028 struct hci_request req;
4029 int err;
4030
4031 BT_DBG("%s", hdev->name);
4032
4033 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) ||
4034 hdev->hci_ver < BLUETOOTH_VER_1_2)
4035 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4036 MGMT_STATUS_NOT_SUPPORTED);
4037
4038 if (cp->val != 0x00 && cp->val != 0x01)
4039 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4040 MGMT_STATUS_INVALID_PARAMS);
4041
4042 if (!hdev_is_powered(hdev))
4043 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4044 MGMT_STATUS_NOT_POWERED);
4045
4046 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
4047 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4048 MGMT_STATUS_REJECTED);
4049
4050 hci_dev_lock(hdev);
4051
4052 if (mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
4053 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4054 MGMT_STATUS_BUSY);
4055 goto unlock;
4056 }
4057
4058 if (!!cp->val == test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags)) {
4059 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
4060 hdev);
4061 goto unlock;
4062 }
4063
4064 cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
4065 data, len);
4066 if (!cmd) {
4067 err = -ENOMEM;
4068 goto unlock;
4069 }
4070
4071 hci_req_init(&req, hdev);
4072
4073 write_fast_connectable(&req, cp->val);
4074
4075 err = hci_req_run(&req, fast_connectable_complete);
4076 if (err < 0) {
4077 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4078 MGMT_STATUS_FAILED);
4079 mgmt_pending_remove(cmd);
4080 }
4081
4082 unlock:
4083 hci_dev_unlock(hdev);
4084
4085 return err;
4086 }
4087
4088 static void set_bredr_scan(struct hci_request *req)
4089 {
4090 struct hci_dev *hdev = req->hdev;
4091 u8 scan = 0;
4092
4093 /* Ensure that fast connectable is disabled. This function will
4094 * not do anything if the page scan parameters are already what
4095 * they should be.
4096 */
4097 write_fast_connectable(req, false);
4098
4099 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
4100 scan |= SCAN_PAGE;
4101 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
4102 scan |= SCAN_INQUIRY;
4103
4104 if (scan)
4105 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
4106 }
4107
4108 static void set_bredr_complete(struct hci_dev *hdev, u8 status)
4109 {
4110 struct pending_cmd *cmd;
4111
4112 BT_DBG("status 0x%02x", status);
4113
4114 hci_dev_lock(hdev);
4115
4116 cmd = mgmt_pending_find(MGMT_OP_SET_BREDR, hdev);
4117 if (!cmd)
4118 goto unlock;
4119
4120 if (status) {
4121 u8 mgmt_err = mgmt_status(status);
4122
4123 /* We need to restore the flag if related HCI commands
4124 * failed.
4125 */
4126 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4127
4128 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
4129 } else {
4130 send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
4131 new_settings(hdev, cmd->sk);
4132 }
4133
4134 mgmt_pending_remove(cmd);
4135
4136 unlock:
4137 hci_dev_unlock(hdev);
4138 }
4139
4140 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
4141 {
4142 struct mgmt_mode *cp = data;
4143 struct pending_cmd *cmd;
4144 struct hci_request req;
4145 int err;
4146
4147 BT_DBG("request for %s", hdev->name);
4148
4149 if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
4150 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4151 MGMT_STATUS_NOT_SUPPORTED);
4152
4153 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
4154 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4155 MGMT_STATUS_REJECTED);
4156
4157 if (cp->val != 0x00 && cp->val != 0x01)
4158 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4159 MGMT_STATUS_INVALID_PARAMS);
4160
4161 hci_dev_lock(hdev);
4162
4163 if (cp->val == test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
4164 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4165 goto unlock;
4166 }
4167
4168 if (!hdev_is_powered(hdev)) {
4169 if (!cp->val) {
4170 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
4171 clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
4172 clear_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
4173 clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
4174 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
4175 }
4176
4177 change_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4178
4179 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4180 if (err < 0)
4181 goto unlock;
4182
4183 err = new_settings(hdev, sk);
4184 goto unlock;
4185 }
4186
4187 /* Reject disabling when powered on */
4188 if (!cp->val) {
4189 err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4190 MGMT_STATUS_REJECTED);
4191 goto unlock;
4192 }
4193
4194 if (mgmt_pending_find(MGMT_OP_SET_BREDR, hdev)) {
4195 err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4196 MGMT_STATUS_BUSY);
4197 goto unlock;
4198 }
4199
4200 cmd = mgmt_pending_add(sk, MGMT_OP_SET_BREDR, hdev, data, len);
4201 if (!cmd) {
4202 err = -ENOMEM;
4203 goto unlock;
4204 }
4205
4206 /* We need to flip the bit already here so that update_adv_data
4207 * generates the correct flags.
4208 */
4209 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4210
4211 hci_req_init(&req, hdev);
4212
4213 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
4214 set_bredr_scan(&req);
4215
4216 /* Since only the advertising data flags will change, there
4217 * is no need to update the scan response data.
4218 */
4219 update_adv_data(&req);
4220
4221 err = hci_req_run(&req, set_bredr_complete);
4222 if (err < 0)
4223 mgmt_pending_remove(cmd);
4224
4225 unlock:
4226 hci_dev_unlock(hdev);
4227 return err;
4228 }
4229
4230 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
4231 void *data, u16 len)
4232 {
4233 struct mgmt_mode *cp = data;
4234 struct pending_cmd *cmd;
4235 u8 val, status;
4236 int err;
4237
4238 BT_DBG("request for %s", hdev->name);
4239
4240 status = mgmt_bredr_support(hdev);
4241 if (status)
4242 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4243 status);
4244
4245 if (!lmp_sc_capable(hdev) &&
4246 !test_bit(HCI_FORCE_SC, &hdev->dev_flags))
4247 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4248 MGMT_STATUS_NOT_SUPPORTED);
4249
4250 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4251 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4252 MGMT_STATUS_INVALID_PARAMS);
4253
4254 hci_dev_lock(hdev);
4255
4256 if (!hdev_is_powered(hdev)) {
4257 bool changed;
4258
4259 if (cp->val) {
4260 changed = !test_and_set_bit(HCI_SC_ENABLED,
4261 &hdev->dev_flags);
4262 if (cp->val == 0x02)
4263 set_bit(HCI_SC_ONLY, &hdev->dev_flags);
4264 else
4265 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4266 } else {
4267 changed = test_and_clear_bit(HCI_SC_ENABLED,
4268 &hdev->dev_flags);
4269 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4270 }
4271
4272 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
4273 if (err < 0)
4274 goto failed;
4275
4276 if (changed)
4277 err = new_settings(hdev, sk);
4278
4279 goto failed;
4280 }
4281
4282 if (mgmt_pending_find(MGMT_OP_SET_SECURE_CONN, hdev)) {
4283 err = cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4284 MGMT_STATUS_BUSY);
4285 goto failed;
4286 }
4287
4288 val = !!cp->val;
4289
4290 if (val == test_bit(HCI_SC_ENABLED, &hdev->dev_flags) &&
4291 (cp->val == 0x02) == test_bit(HCI_SC_ONLY, &hdev->dev_flags)) {
4292 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
4293 goto failed;
4294 }
4295
4296 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
4297 if (!cmd) {
4298 err = -ENOMEM;
4299 goto failed;
4300 }
4301
4302 err = hci_send_cmd(hdev, HCI_OP_WRITE_SC_SUPPORT, 1, &val);
4303 if (err < 0) {
4304 mgmt_pending_remove(cmd);
4305 goto failed;
4306 }
4307
4308 if (cp->val == 0x02)
4309 set_bit(HCI_SC_ONLY, &hdev->dev_flags);
4310 else
4311 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4312
4313 failed:
4314 hci_dev_unlock(hdev);
4315 return err;
4316 }
4317
4318 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
4319 void *data, u16 len)
4320 {
4321 struct mgmt_mode *cp = data;
4322 bool changed;
4323 int err;
4324
4325 BT_DBG("request for %s", hdev->name);
4326
4327 if (cp->val != 0x00 && cp->val != 0x01)
4328 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
4329 MGMT_STATUS_INVALID_PARAMS);
4330
4331 hci_dev_lock(hdev);
4332
4333 if (cp->val)
4334 changed = !test_and_set_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
4335 else
4336 changed = test_and_clear_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
4337
4338 err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
4339 if (err < 0)
4340 goto unlock;
4341
4342 if (changed)
4343 err = new_settings(hdev, sk);
4344
4345 unlock:
4346 hci_dev_unlock(hdev);
4347 return err;
4348 }
4349
4350 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
4351 u16 len)
4352 {
4353 struct mgmt_cp_set_privacy *cp = cp_data;
4354 bool changed;
4355 int err;
4356
4357 BT_DBG("request for %s", hdev->name);
4358
4359 if (!lmp_le_capable(hdev))
4360 return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4361 MGMT_STATUS_NOT_SUPPORTED);
4362
4363 if (cp->privacy != 0x00 && cp->privacy != 0x01)
4364 return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4365 MGMT_STATUS_INVALID_PARAMS);
4366
4367 if (hdev_is_powered(hdev))
4368 return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4369 MGMT_STATUS_REJECTED);
4370
4371 hci_dev_lock(hdev);
4372
4373 /* If user space supports this command it is also expected to
4374 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
4375 */
4376 set_bit(HCI_RPA_RESOLVING, &hdev->dev_flags);
4377
4378 if (cp->privacy) {
4379 changed = !test_and_set_bit(HCI_PRIVACY, &hdev->dev_flags);
4380 memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
4381 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
4382 } else {
4383 changed = test_and_clear_bit(HCI_PRIVACY, &hdev->dev_flags);
4384 memset(hdev->irk, 0, sizeof(hdev->irk));
4385 clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
4386 }
4387
4388 err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
4389 if (err < 0)
4390 goto unlock;
4391
4392 if (changed)
4393 err = new_settings(hdev, sk);
4394
4395 unlock:
4396 hci_dev_unlock(hdev);
4397 return err;
4398 }
4399
4400 static bool irk_is_valid(struct mgmt_irk_info *irk)
4401 {
4402 switch (irk->addr.type) {
4403 case BDADDR_LE_PUBLIC:
4404 return true;
4405
4406 case BDADDR_LE_RANDOM:
4407 /* Two most significant bits shall be set */
4408 if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
4409 return false;
4410 return true;
4411 }
4412
4413 return false;
4414 }
4415
4416 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
4417 u16 len)
4418 {
4419 struct mgmt_cp_load_irks *cp = cp_data;
4420 u16 irk_count, expected_len;
4421 int i, err;
4422
4423 BT_DBG("request for %s", hdev->name);
4424
4425 if (!lmp_le_capable(hdev))
4426 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
4427 MGMT_STATUS_NOT_SUPPORTED);
4428
4429 irk_count = __le16_to_cpu(cp->irk_count);
4430
4431 expected_len = sizeof(*cp) + irk_count * sizeof(struct mgmt_irk_info);
4432 if (expected_len != len) {
4433 BT_ERR("load_irks: expected %u bytes, got %u bytes",
4434 expected_len, len);
4435 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
4436 MGMT_STATUS_INVALID_PARAMS);
4437 }
4438
4439 BT_DBG("%s irk_count %u", hdev->name, irk_count);
4440
4441 for (i = 0; i < irk_count; i++) {
4442 struct mgmt_irk_info *key = &cp->irks[i];
4443
4444 if (!irk_is_valid(key))
4445 return cmd_status(sk, hdev->id,
4446 MGMT_OP_LOAD_IRKS,
4447 MGMT_STATUS_INVALID_PARAMS);
4448 }
4449
4450 hci_dev_lock(hdev);
4451
4452 hci_smp_irks_clear(hdev);
4453
4454 for (i = 0; i < irk_count; i++) {
4455 struct mgmt_irk_info *irk = &cp->irks[i];
4456 u8 addr_type;
4457
4458 if (irk->addr.type == BDADDR_LE_PUBLIC)
4459 addr_type = ADDR_LE_DEV_PUBLIC;
4460 else
4461 addr_type = ADDR_LE_DEV_RANDOM;
4462
4463 hci_add_irk(hdev, &irk->addr.bdaddr, addr_type, irk->val,
4464 BDADDR_ANY);
4465 }
4466
4467 set_bit(HCI_RPA_RESOLVING, &hdev->dev_flags);
4468
4469 err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
4470
4471 hci_dev_unlock(hdev);
4472
4473 return err;
4474 }
4475
4476 static bool ltk_is_valid(struct mgmt_ltk_info *key)
4477 {
4478 if (key->master != 0x00 && key->master != 0x01)
4479 return false;
4480
4481 switch (key->addr.type) {
4482 case BDADDR_LE_PUBLIC:
4483 return true;
4484
4485 case BDADDR_LE_RANDOM:
4486 /* Two most significant bits shall be set */
4487 if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
4488 return false;
4489 return true;
4490 }
4491
4492 return false;
4493 }
4494
4495 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
4496 void *cp_data, u16 len)
4497 {
4498 struct mgmt_cp_load_long_term_keys *cp = cp_data;
4499 u16 key_count, expected_len;
4500 int i, err;
4501
4502 BT_DBG("request for %s", hdev->name);
4503
4504 if (!lmp_le_capable(hdev))
4505 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
4506 MGMT_STATUS_NOT_SUPPORTED);
4507
4508 key_count = __le16_to_cpu(cp->key_count);
4509
4510 expected_len = sizeof(*cp) + key_count *
4511 sizeof(struct mgmt_ltk_info);
4512 if (expected_len != len) {
4513 BT_ERR("load_keys: expected %u bytes, got %u bytes",
4514 expected_len, len);
4515 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
4516 MGMT_STATUS_INVALID_PARAMS);
4517 }
4518
4519 BT_DBG("%s key_count %u", hdev->name, key_count);
4520
4521 for (i = 0; i < key_count; i++) {
4522 struct mgmt_ltk_info *key = &cp->keys[i];
4523
4524 if (!ltk_is_valid(key))
4525 return cmd_status(sk, hdev->id,
4526 MGMT_OP_LOAD_LONG_TERM_KEYS,
4527 MGMT_STATUS_INVALID_PARAMS);
4528 }
4529
4530 hci_dev_lock(hdev);
4531
4532 hci_smp_ltks_clear(hdev);
4533
4534 for (i = 0; i < key_count; i++) {
4535 struct mgmt_ltk_info *key = &cp->keys[i];
4536 u8 type, addr_type;
4537
4538 if (key->addr.type == BDADDR_LE_PUBLIC)
4539 addr_type = ADDR_LE_DEV_PUBLIC;
4540 else
4541 addr_type = ADDR_LE_DEV_RANDOM;
4542
4543 if (key->master)
4544 type = HCI_SMP_LTK;
4545 else
4546 type = HCI_SMP_LTK_SLAVE;
4547
4548 hci_add_ltk(hdev, &key->addr.bdaddr, addr_type, type,
4549 key->type, key->val, key->enc_size, key->ediv,
4550 key->rand);
4551 }
4552
4553 err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
4554 NULL, 0);
4555
4556 hci_dev_unlock(hdev);
4557
4558 return err;
4559 }
4560
4561 struct cmd_conn_lookup {
4562 struct hci_conn *conn;
4563 bool valid_tx_power;
4564 u8 mgmt_status;
4565 };
4566
4567 static void get_conn_info_complete(struct pending_cmd *cmd, void *data)
4568 {
4569 struct cmd_conn_lookup *match = data;
4570 struct mgmt_cp_get_conn_info *cp;
4571 struct mgmt_rp_get_conn_info rp;
4572 struct hci_conn *conn = cmd->user_data;
4573
4574 if (conn != match->conn)
4575 return;
4576
4577 cp = (struct mgmt_cp_get_conn_info *) cmd->param;
4578
4579 memset(&rp, 0, sizeof(rp));
4580 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
4581 rp.addr.type = cp->addr.type;
4582
4583 if (!match->mgmt_status) {
4584 rp.rssi = conn->rssi;
4585
4586 if (match->valid_tx_power) {
4587 rp.tx_power = conn->tx_power;
4588 rp.max_tx_power = conn->max_tx_power;
4589 } else {
4590 rp.tx_power = HCI_TX_POWER_INVALID;
4591 rp.max_tx_power = HCI_TX_POWER_INVALID;
4592 }
4593 }
4594
4595 cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO,
4596 match->mgmt_status, &rp, sizeof(rp));
4597
4598 hci_conn_drop(conn);
4599
4600 mgmt_pending_remove(cmd);
4601 }
4602
4603 static void conn_info_refresh_complete(struct hci_dev *hdev, u8 status)
4604 {
4605 struct hci_cp_read_rssi *cp;
4606 struct hci_conn *conn;
4607 struct cmd_conn_lookup match;
4608 u16 handle;
4609
4610 BT_DBG("status 0x%02x", status);
4611
4612 hci_dev_lock(hdev);
4613
4614 /* TX power data is valid in case request completed successfully,
4615 * otherwise we assume it's not valid. At the moment we assume that
4616 * either both or none of current and max values are valid to keep code
4617 * simple.
4618 */
4619 match.valid_tx_power = !status;
4620
4621 /* Commands sent in request are either Read RSSI or Read Transmit Power
4622 * Level so we check which one was last sent to retrieve connection
4623 * handle. Both commands have handle as first parameter so it's safe to
4624 * cast data on the same command struct.
4625 *
4626 * First command sent is always Read RSSI and we fail only if it fails.
4627 * In other case we simply override error to indicate success as we
4628 * already remembered if TX power value is actually valid.
4629 */
4630 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_RSSI);
4631 if (!cp) {
4632 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
4633 status = 0;
4634 }
4635
4636 if (!cp) {
4637 BT_ERR("invalid sent_cmd in response");
4638 goto unlock;
4639 }
4640
4641 handle = __le16_to_cpu(cp->handle);
4642 conn = hci_conn_hash_lookup_handle(hdev, handle);
4643 if (!conn) {
4644 BT_ERR("unknown handle (%d) in response", handle);
4645 goto unlock;
4646 }
4647
4648 match.conn = conn;
4649 match.mgmt_status = mgmt_status(status);
4650
4651 /* Cache refresh is complete, now reply for mgmt request for given
4652 * connection only.
4653 */
4654 mgmt_pending_foreach(MGMT_OP_GET_CONN_INFO, hdev,
4655 get_conn_info_complete, &match);
4656
4657 unlock:
4658 hci_dev_unlock(hdev);
4659 }
4660
4661 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
4662 u16 len)
4663 {
4664 struct mgmt_cp_get_conn_info *cp = data;
4665 struct mgmt_rp_get_conn_info rp;
4666 struct hci_conn *conn;
4667 unsigned long conn_info_age;
4668 int err = 0;
4669
4670 BT_DBG("%s", hdev->name);
4671
4672 memset(&rp, 0, sizeof(rp));
4673 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
4674 rp.addr.type = cp->addr.type;
4675
4676 if (!bdaddr_type_is_valid(cp->addr.type))
4677 return cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
4678 MGMT_STATUS_INVALID_PARAMS,
4679 &rp, sizeof(rp));
4680
4681 hci_dev_lock(hdev);
4682
4683 if (!hdev_is_powered(hdev)) {
4684 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
4685 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
4686 goto unlock;
4687 }
4688
4689 if (cp->addr.type == BDADDR_BREDR)
4690 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
4691 &cp->addr.bdaddr);
4692 else
4693 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
4694
4695 if (!conn || conn->state != BT_CONNECTED) {
4696 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
4697 MGMT_STATUS_NOT_CONNECTED, &rp, sizeof(rp));
4698 goto unlock;
4699 }
4700
4701 /* To avoid client trying to guess when to poll again for information we
4702 * calculate conn info age as random value between min/max set in hdev.
4703 */
4704 conn_info_age = hdev->conn_info_min_age +
4705 prandom_u32_max(hdev->conn_info_max_age -
4706 hdev->conn_info_min_age);
4707
4708 /* Query controller to refresh cached values if they are too old or were
4709 * never read.
4710 */
4711 if (time_after(jiffies, conn->conn_info_timestamp +
4712 msecs_to_jiffies(conn_info_age)) ||
4713 !conn->conn_info_timestamp) {
4714 struct hci_request req;
4715 struct hci_cp_read_tx_power req_txp_cp;
4716 struct hci_cp_read_rssi req_rssi_cp;
4717 struct pending_cmd *cmd;
4718
4719 hci_req_init(&req, hdev);
4720 req_rssi_cp.handle = cpu_to_le16(conn->handle);
4721 hci_req_add(&req, HCI_OP_READ_RSSI, sizeof(req_rssi_cp),
4722 &req_rssi_cp);
4723
4724 /* For LE links TX power does not change thus we don't need to
4725 * query for it once value is known.
4726 */
4727 if (!bdaddr_type_is_le(cp->addr.type) ||
4728 conn->tx_power == HCI_TX_POWER_INVALID) {
4729 req_txp_cp.handle = cpu_to_le16(conn->handle);
4730 req_txp_cp.type = 0x00;
4731 hci_req_add(&req, HCI_OP_READ_TX_POWER,
4732 sizeof(req_txp_cp), &req_txp_cp);
4733 }
4734
4735 /* Max TX power needs to be read only once per connection */
4736 if (conn->max_tx_power == HCI_TX_POWER_INVALID) {
4737 req_txp_cp.handle = cpu_to_le16(conn->handle);
4738 req_txp_cp.type = 0x01;
4739 hci_req_add(&req, HCI_OP_READ_TX_POWER,
4740 sizeof(req_txp_cp), &req_txp_cp);
4741 }
4742
4743 err = hci_req_run(&req, conn_info_refresh_complete);
4744 if (err < 0)
4745 goto unlock;
4746
4747 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CONN_INFO, hdev,
4748 data, len);
4749 if (!cmd) {
4750 err = -ENOMEM;
4751 goto unlock;
4752 }
4753
4754 hci_conn_hold(conn);
4755 cmd->user_data = conn;
4756
4757 conn->conn_info_timestamp = jiffies;
4758 } else {
4759 /* Cache is valid, just reply with values cached in hci_conn */
4760 rp.rssi = conn->rssi;
4761 rp.tx_power = conn->tx_power;
4762 rp.max_tx_power = conn->max_tx_power;
4763
4764 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
4765 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
4766 }
4767
4768 unlock:
4769 hci_dev_unlock(hdev);
4770 return err;
4771 }
4772
4773 static const struct mgmt_handler {
4774 int (*func) (struct sock *sk, struct hci_dev *hdev, void *data,
4775 u16 data_len);
4776 bool var_len;
4777 size_t data_len;
4778 } mgmt_handlers[] = {
4779 { NULL }, /* 0x0000 (no command) */
4780 { read_version, false, MGMT_READ_VERSION_SIZE },
4781 { read_commands, false, MGMT_READ_COMMANDS_SIZE },
4782 { read_index_list, false, MGMT_READ_INDEX_LIST_SIZE },
4783 { read_controller_info, false, MGMT_READ_INFO_SIZE },
4784 { set_powered, false, MGMT_SETTING_SIZE },
4785 { set_discoverable, false, MGMT_SET_DISCOVERABLE_SIZE },
4786 { set_connectable, false, MGMT_SETTING_SIZE },
4787 { set_fast_connectable, false, MGMT_SETTING_SIZE },
4788 { set_pairable, false, MGMT_SETTING_SIZE },
4789 { set_link_security, false, MGMT_SETTING_SIZE },
4790 { set_ssp, false, MGMT_SETTING_SIZE },
4791 { set_hs, false, MGMT_SETTING_SIZE },
4792 { set_le, false, MGMT_SETTING_SIZE },
4793 { set_dev_class, false, MGMT_SET_DEV_CLASS_SIZE },
4794 { set_local_name, false, MGMT_SET_LOCAL_NAME_SIZE },
4795 { add_uuid, false, MGMT_ADD_UUID_SIZE },
4796 { remove_uuid, false, MGMT_REMOVE_UUID_SIZE },
4797 { load_link_keys, true, MGMT_LOAD_LINK_KEYS_SIZE },
4798 { load_long_term_keys, true, MGMT_LOAD_LONG_TERM_KEYS_SIZE },
4799 { disconnect, false, MGMT_DISCONNECT_SIZE },
4800 { get_connections, false, MGMT_GET_CONNECTIONS_SIZE },
4801 { pin_code_reply, false, MGMT_PIN_CODE_REPLY_SIZE },
4802 { pin_code_neg_reply, false, MGMT_PIN_CODE_NEG_REPLY_SIZE },
4803 { set_io_capability, false, MGMT_SET_IO_CAPABILITY_SIZE },
4804 { pair_device, false, MGMT_PAIR_DEVICE_SIZE },
4805 { cancel_pair_device, false, MGMT_CANCEL_PAIR_DEVICE_SIZE },
4806 { unpair_device, false, MGMT_UNPAIR_DEVICE_SIZE },
4807 { user_confirm_reply, false, MGMT_USER_CONFIRM_REPLY_SIZE },
4808 { user_confirm_neg_reply, false, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
4809 { user_passkey_reply, false, MGMT_USER_PASSKEY_REPLY_SIZE },
4810 { user_passkey_neg_reply, false, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
4811 { read_local_oob_data, false, MGMT_READ_LOCAL_OOB_DATA_SIZE },
4812 { add_remote_oob_data, true, MGMT_ADD_REMOTE_OOB_DATA_SIZE },
4813 { remove_remote_oob_data, false, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
4814 { start_discovery, false, MGMT_START_DISCOVERY_SIZE },
4815 { stop_discovery, false, MGMT_STOP_DISCOVERY_SIZE },
4816 { confirm_name, false, MGMT_CONFIRM_NAME_SIZE },
4817 { block_device, false, MGMT_BLOCK_DEVICE_SIZE },
4818 { unblock_device, false, MGMT_UNBLOCK_DEVICE_SIZE },
4819 { set_device_id, false, MGMT_SET_DEVICE_ID_SIZE },
4820 { set_advertising, false, MGMT_SETTING_SIZE },
4821 { set_bredr, false, MGMT_SETTING_SIZE },
4822 { set_static_address, false, MGMT_SET_STATIC_ADDRESS_SIZE },
4823 { set_scan_params, false, MGMT_SET_SCAN_PARAMS_SIZE },
4824 { set_secure_conn, false, MGMT_SETTING_SIZE },
4825 { set_debug_keys, false, MGMT_SETTING_SIZE },
4826 { set_privacy, false, MGMT_SET_PRIVACY_SIZE },
4827 { load_irks, true, MGMT_LOAD_IRKS_SIZE },
4828 { get_conn_info, false, MGMT_GET_CONN_INFO_SIZE },
4829 };
4830
4831
4832 int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
4833 {
4834 void *buf;
4835 u8 *cp;
4836 struct mgmt_hdr *hdr;
4837 u16 opcode, index, len;
4838 struct hci_dev *hdev = NULL;
4839 const struct mgmt_handler *handler;
4840 int err;
4841
4842 BT_DBG("got %zu bytes", msglen);
4843
4844 if (msglen < sizeof(*hdr))
4845 return -EINVAL;
4846
4847 buf = kmalloc(msglen, GFP_KERNEL);
4848 if (!buf)
4849 return -ENOMEM;
4850
4851 if (memcpy_fromiovec(buf, msg->msg_iov, msglen)) {
4852 err = -EFAULT;
4853 goto done;
4854 }
4855
4856 hdr = buf;
4857 opcode = __le16_to_cpu(hdr->opcode);
4858 index = __le16_to_cpu(hdr->index);
4859 len = __le16_to_cpu(hdr->len);
4860
4861 if (len != msglen - sizeof(*hdr)) {
4862 err = -EINVAL;
4863 goto done;
4864 }
4865
4866 if (index != MGMT_INDEX_NONE) {
4867 hdev = hci_dev_get(index);
4868 if (!hdev) {
4869 err = cmd_status(sk, index, opcode,
4870 MGMT_STATUS_INVALID_INDEX);
4871 goto done;
4872 }
4873
4874 if (test_bit(HCI_SETUP, &hdev->dev_flags) ||
4875 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
4876 err = cmd_status(sk, index, opcode,
4877 MGMT_STATUS_INVALID_INDEX);
4878 goto done;
4879 }
4880 }
4881
4882 if (opcode >= ARRAY_SIZE(mgmt_handlers) ||
4883 mgmt_handlers[opcode].func == NULL) {
4884 BT_DBG("Unknown op %u", opcode);
4885 err = cmd_status(sk, index, opcode,
4886 MGMT_STATUS_UNKNOWN_COMMAND);
4887 goto done;
4888 }
4889
4890 if ((hdev && opcode < MGMT_OP_READ_INFO) ||
4891 (!hdev && opcode >= MGMT_OP_READ_INFO)) {
4892 err = cmd_status(sk, index, opcode,
4893 MGMT_STATUS_INVALID_INDEX);
4894 goto done;
4895 }
4896
4897 handler = &mgmt_handlers[opcode];
4898
4899 if ((handler->var_len && len < handler->data_len) ||
4900 (!handler->var_len && len != handler->data_len)) {
4901 err = cmd_status(sk, index, opcode,
4902 MGMT_STATUS_INVALID_PARAMS);
4903 goto done;
4904 }
4905
4906 if (hdev)
4907 mgmt_init_hdev(sk, hdev);
4908
4909 cp = buf + sizeof(*hdr);
4910
4911 err = handler->func(sk, hdev, cp, len);
4912 if (err < 0)
4913 goto done;
4914
4915 err = msglen;
4916
4917 done:
4918 if (hdev)
4919 hci_dev_put(hdev);
4920
4921 kfree(buf);
4922 return err;
4923 }
4924
4925 void mgmt_index_added(struct hci_dev *hdev)
4926 {
4927 if (hdev->dev_type != HCI_BREDR)
4928 return;
4929
4930 mgmt_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0, NULL);
4931 }
4932
4933 void mgmt_index_removed(struct hci_dev *hdev)
4934 {
4935 u8 status = MGMT_STATUS_INVALID_INDEX;
4936
4937 if (hdev->dev_type != HCI_BREDR)
4938 return;
4939
4940 mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status);
4941
4942 mgmt_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0, NULL);
4943 }
4944
4945 /* This function requires the caller holds hdev->lock */
4946 static void restart_le_auto_conns(struct hci_dev *hdev)
4947 {
4948 struct hci_conn_params *p;
4949
4950 list_for_each_entry(p, &hdev->le_conn_params, list) {
4951 if (p->auto_connect == HCI_AUTO_CONN_ALWAYS)
4952 hci_pend_le_conn_add(hdev, &p->addr, p->addr_type);
4953 }
4954 }
4955
4956 static void powered_complete(struct hci_dev *hdev, u8 status)
4957 {
4958 struct cmd_lookup match = { NULL, hdev };
4959
4960 BT_DBG("status 0x%02x", status);
4961
4962 hci_dev_lock(hdev);
4963
4964 restart_le_auto_conns(hdev);
4965
4966 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
4967
4968 new_settings(hdev, match.sk);
4969
4970 hci_dev_unlock(hdev);
4971
4972 if (match.sk)
4973 sock_put(match.sk);
4974 }
4975
4976 static int powered_update_hci(struct hci_dev *hdev)
4977 {
4978 struct hci_request req;
4979 u8 link_sec;
4980
4981 hci_req_init(&req, hdev);
4982
4983 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) &&
4984 !lmp_host_ssp_capable(hdev)) {
4985 u8 ssp = 1;
4986
4987 hci_req_add(&req, HCI_OP_WRITE_SSP_MODE, 1, &ssp);
4988 }
4989
4990 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
4991 lmp_bredr_capable(hdev)) {
4992 struct hci_cp_write_le_host_supported cp;
4993
4994 cp.le = 1;
4995 cp.simul = lmp_le_br_capable(hdev);
4996
4997 /* Check first if we already have the right
4998 * host state (host features set)
4999 */
5000 if (cp.le != lmp_host_le_capable(hdev) ||
5001 cp.simul != lmp_host_le_br_capable(hdev))
5002 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
5003 sizeof(cp), &cp);
5004 }
5005
5006 if (lmp_le_capable(hdev)) {
5007 /* Make sure the controller has a good default for
5008 * advertising data. This also applies to the case
5009 * where BR/EDR was toggled during the AUTO_OFF phase.
5010 */
5011 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
5012 update_adv_data(&req);
5013 update_scan_rsp_data(&req);
5014 }
5015
5016 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
5017 enable_advertising(&req);
5018 }
5019
5020 link_sec = test_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
5021 if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
5022 hci_req_add(&req, HCI_OP_WRITE_AUTH_ENABLE,
5023 sizeof(link_sec), &link_sec);
5024
5025 if (lmp_bredr_capable(hdev)) {
5026 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
5027 set_bredr_scan(&req);
5028 update_class(&req);
5029 update_name(&req);
5030 update_eir(&req);
5031 }
5032
5033 return hci_req_run(&req, powered_complete);
5034 }
5035
5036 int mgmt_powered(struct hci_dev *hdev, u8 powered)
5037 {
5038 struct cmd_lookup match = { NULL, hdev };
5039 u8 status_not_powered = MGMT_STATUS_NOT_POWERED;
5040 u8 zero_cod[] = { 0, 0, 0 };
5041 int err;
5042
5043 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
5044 return 0;
5045
5046 if (powered) {
5047 if (powered_update_hci(hdev) == 0)
5048 return 0;
5049
5050 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp,
5051 &match);
5052 goto new_settings;
5053 }
5054
5055 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
5056 mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status_not_powered);
5057
5058 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0)
5059 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
5060 zero_cod, sizeof(zero_cod), NULL);
5061
5062 new_settings:
5063 err = new_settings(hdev, match.sk);
5064
5065 if (match.sk)
5066 sock_put(match.sk);
5067
5068 return err;
5069 }
5070
5071 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
5072 {
5073 struct pending_cmd *cmd;
5074 u8 status;
5075
5076 cmd = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
5077 if (!cmd)
5078 return;
5079
5080 if (err == -ERFKILL)
5081 status = MGMT_STATUS_RFKILLED;
5082 else
5083 status = MGMT_STATUS_FAILED;
5084
5085 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
5086
5087 mgmt_pending_remove(cmd);
5088 }
5089
5090 void mgmt_discoverable_timeout(struct hci_dev *hdev)
5091 {
5092 struct hci_request req;
5093
5094 hci_dev_lock(hdev);
5095
5096 /* When discoverable timeout triggers, then just make sure
5097 * the limited discoverable flag is cleared. Even in the case
5098 * of a timeout triggered from general discoverable, it is
5099 * safe to unconditionally clear the flag.
5100 */
5101 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
5102 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
5103
5104 hci_req_init(&req, hdev);
5105 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
5106 u8 scan = SCAN_PAGE;
5107 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE,
5108 sizeof(scan), &scan);
5109 }
5110 update_class(&req);
5111 update_adv_data(&req);
5112 hci_req_run(&req, NULL);
5113
5114 hdev->discov_timeout = 0;
5115
5116 new_settings(hdev, NULL);
5117
5118 hci_dev_unlock(hdev);
5119 }
5120
5121 void mgmt_discoverable(struct hci_dev *hdev, u8 discoverable)
5122 {
5123 bool changed;
5124
5125 /* Nothing needed here if there's a pending command since that
5126 * commands request completion callback takes care of everything
5127 * necessary.
5128 */
5129 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev))
5130 return;
5131
5132 /* Powering off may clear the scan mode - don't let that interfere */
5133 if (!discoverable && mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
5134 return;
5135
5136 if (discoverable) {
5137 changed = !test_and_set_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
5138 } else {
5139 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
5140 changed = test_and_clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
5141 }
5142
5143 if (changed) {
5144 struct hci_request req;
5145
5146 /* In case this change in discoverable was triggered by
5147 * a disabling of connectable there could be a need to
5148 * update the advertising flags.
5149 */
5150 hci_req_init(&req, hdev);
5151 update_adv_data(&req);
5152 hci_req_run(&req, NULL);
5153
5154 new_settings(hdev, NULL);
5155 }
5156 }
5157
5158 void mgmt_connectable(struct hci_dev *hdev, u8 connectable)
5159 {
5160 bool changed;
5161
5162 /* Nothing needed here if there's a pending command since that
5163 * commands request completion callback takes care of everything
5164 * necessary.
5165 */
5166 if (mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev))
5167 return;
5168
5169 /* Powering off may clear the scan mode - don't let that interfere */
5170 if (!connectable && mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
5171 return;
5172
5173 if (connectable)
5174 changed = !test_and_set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
5175 else
5176 changed = test_and_clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
5177
5178 if (changed)
5179 new_settings(hdev, NULL);
5180 }
5181
5182 void mgmt_advertising(struct hci_dev *hdev, u8 advertising)
5183 {
5184 /* Powering off may stop advertising - don't let that interfere */
5185 if (!advertising && mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
5186 return;
5187
5188 if (advertising)
5189 set_bit(HCI_ADVERTISING, &hdev->dev_flags);
5190 else
5191 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
5192 }
5193
5194 void mgmt_write_scan_failed(struct hci_dev *hdev, u8 scan, u8 status)
5195 {
5196 u8 mgmt_err = mgmt_status(status);
5197
5198 if (scan & SCAN_PAGE)
5199 mgmt_pending_foreach(MGMT_OP_SET_CONNECTABLE, hdev,
5200 cmd_status_rsp, &mgmt_err);
5201
5202 if (scan & SCAN_INQUIRY)
5203 mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE, hdev,
5204 cmd_status_rsp, &mgmt_err);
5205 }
5206
5207 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
5208 bool persistent)
5209 {
5210 struct mgmt_ev_new_link_key ev;
5211
5212 memset(&ev, 0, sizeof(ev));
5213
5214 ev.store_hint = persistent;
5215 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
5216 ev.key.addr.type = BDADDR_BREDR;
5217 ev.key.type = key->type;
5218 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
5219 ev.key.pin_len = key->pin_len;
5220
5221 mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
5222 }
5223
5224 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
5225 {
5226 struct mgmt_ev_new_long_term_key ev;
5227
5228 memset(&ev, 0, sizeof(ev));
5229
5230 /* Devices using resolvable or non-resolvable random addresses
5231 * without providing an indentity resolving key don't require
5232 * to store long term keys. Their addresses will change the
5233 * next time around.
5234 *
5235 * Only when a remote device provides an identity address
5236 * make sure the long term key is stored. If the remote
5237 * identity is known, the long term keys are internally
5238 * mapped to the identity address. So allow static random
5239 * and public addresses here.
5240 */
5241 if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
5242 (key->bdaddr.b[5] & 0xc0) != 0xc0)
5243 ev.store_hint = 0x00;
5244 else
5245 ev.store_hint = persistent;
5246
5247 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
5248 ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
5249 ev.key.type = key->authenticated;
5250 ev.key.enc_size = key->enc_size;
5251 ev.key.ediv = key->ediv;
5252 ev.key.rand = key->rand;
5253
5254 if (key->type == HCI_SMP_LTK)
5255 ev.key.master = 1;
5256
5257 memcpy(ev.key.val, key->val, sizeof(key->val));
5258
5259 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
5260 }
5261
5262 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk)
5263 {
5264 struct mgmt_ev_new_irk ev;
5265
5266 memset(&ev, 0, sizeof(ev));
5267
5268 /* For identity resolving keys from devices that are already
5269 * using a public address or static random address, do not
5270 * ask for storing this key. The identity resolving key really
5271 * is only mandatory for devices using resovlable random
5272 * addresses.
5273 *
5274 * Storing all identity resolving keys has the downside that
5275 * they will be also loaded on next boot of they system. More
5276 * identity resolving keys, means more time during scanning is
5277 * needed to actually resolve these addresses.
5278 */
5279 if (bacmp(&irk->rpa, BDADDR_ANY))
5280 ev.store_hint = 0x01;
5281 else
5282 ev.store_hint = 0x00;
5283
5284 bacpy(&ev.rpa, &irk->rpa);
5285 bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
5286 ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
5287 memcpy(ev.irk.val, irk->val, sizeof(irk->val));
5288
5289 mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
5290 }
5291
5292 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
5293 bool persistent)
5294 {
5295 struct mgmt_ev_new_csrk ev;
5296
5297 memset(&ev, 0, sizeof(ev));
5298
5299 /* Devices using resolvable or non-resolvable random addresses
5300 * without providing an indentity resolving key don't require
5301 * to store signature resolving keys. Their addresses will change
5302 * the next time around.
5303 *
5304 * Only when a remote device provides an identity address
5305 * make sure the signature resolving key is stored. So allow
5306 * static random and public addresses here.
5307 */
5308 if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
5309 (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
5310 ev.store_hint = 0x00;
5311 else
5312 ev.store_hint = persistent;
5313
5314 bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
5315 ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
5316 ev.key.master = csrk->master;
5317 memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
5318
5319 mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
5320 }
5321
5322 static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type, u8 *data,
5323 u8 data_len)
5324 {
5325 eir[eir_len++] = sizeof(type) + data_len;
5326 eir[eir_len++] = type;
5327 memcpy(&eir[eir_len], data, data_len);
5328 eir_len += data_len;
5329
5330 return eir_len;
5331 }
5332
5333 void mgmt_device_connected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
5334 u8 addr_type, u32 flags, u8 *name, u8 name_len,
5335 u8 *dev_class)
5336 {
5337 char buf[512];
5338 struct mgmt_ev_device_connected *ev = (void *) buf;
5339 u16 eir_len = 0;
5340
5341 bacpy(&ev->addr.bdaddr, bdaddr);
5342 ev->addr.type = link_to_bdaddr(link_type, addr_type);
5343
5344 ev->flags = __cpu_to_le32(flags);
5345
5346 if (name_len > 0)
5347 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
5348 name, name_len);
5349
5350 if (dev_class && memcmp(dev_class, "\0\0\0", 3) != 0)
5351 eir_len = eir_append_data(ev->eir, eir_len,
5352 EIR_CLASS_OF_DEV, dev_class, 3);
5353
5354 ev->eir_len = cpu_to_le16(eir_len);
5355
5356 mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
5357 sizeof(*ev) + eir_len, NULL);
5358 }
5359
5360 static void disconnect_rsp(struct pending_cmd *cmd, void *data)
5361 {
5362 struct mgmt_cp_disconnect *cp = cmd->param;
5363 struct sock **sk = data;
5364 struct mgmt_rp_disconnect rp;
5365
5366 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5367 rp.addr.type = cp->addr.type;
5368
5369 cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT, 0, &rp,
5370 sizeof(rp));
5371
5372 *sk = cmd->sk;
5373 sock_hold(*sk);
5374
5375 mgmt_pending_remove(cmd);
5376 }
5377
5378 static void unpair_device_rsp(struct pending_cmd *cmd, void *data)
5379 {
5380 struct hci_dev *hdev = data;
5381 struct mgmt_cp_unpair_device *cp = cmd->param;
5382 struct mgmt_rp_unpair_device rp;
5383
5384 memset(&rp, 0, sizeof(rp));
5385 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5386 rp.addr.type = cp->addr.type;
5387
5388 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
5389
5390 cmd_complete(cmd->sk, cmd->index, cmd->opcode, 0, &rp, sizeof(rp));
5391
5392 mgmt_pending_remove(cmd);
5393 }
5394
5395 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
5396 u8 link_type, u8 addr_type, u8 reason,
5397 bool mgmt_connected)
5398 {
5399 struct mgmt_ev_device_disconnected ev;
5400 struct pending_cmd *power_off;
5401 struct sock *sk = NULL;
5402
5403 power_off = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
5404 if (power_off) {
5405 struct mgmt_mode *cp = power_off->param;
5406
5407 /* The connection is still in hci_conn_hash so test for 1
5408 * instead of 0 to know if this is the last one.
5409 */
5410 if (!cp->val && hci_conn_count(hdev) == 1) {
5411 cancel_delayed_work(&hdev->power_off);
5412 queue_work(hdev->req_workqueue, &hdev->power_off.work);
5413 }
5414 }
5415
5416 if (!mgmt_connected)
5417 return;
5418
5419 if (link_type != ACL_LINK && link_type != LE_LINK)
5420 return;
5421
5422 mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
5423
5424 bacpy(&ev.addr.bdaddr, bdaddr);
5425 ev.addr.type = link_to_bdaddr(link_type, addr_type);
5426 ev.reason = reason;
5427
5428 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
5429
5430 if (sk)
5431 sock_put(sk);
5432
5433 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
5434 hdev);
5435 }
5436
5437 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
5438 u8 link_type, u8 addr_type, u8 status)
5439 {
5440 u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
5441 struct mgmt_cp_disconnect *cp;
5442 struct mgmt_rp_disconnect rp;
5443 struct pending_cmd *cmd;
5444
5445 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
5446 hdev);
5447
5448 cmd = mgmt_pending_find(MGMT_OP_DISCONNECT, hdev);
5449 if (!cmd)
5450 return;
5451
5452 cp = cmd->param;
5453
5454 if (bacmp(bdaddr, &cp->addr.bdaddr))
5455 return;
5456
5457 if (cp->addr.type != bdaddr_type)
5458 return;
5459
5460 bacpy(&rp.addr.bdaddr, bdaddr);
5461 rp.addr.type = bdaddr_type;
5462
5463 cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT,
5464 mgmt_status(status), &rp, sizeof(rp));
5465
5466 mgmt_pending_remove(cmd);
5467 }
5468
5469 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
5470 u8 addr_type, u8 status)
5471 {
5472 struct mgmt_ev_connect_failed ev;
5473 struct pending_cmd *power_off;
5474
5475 power_off = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
5476 if (power_off) {
5477 struct mgmt_mode *cp = power_off->param;
5478
5479 /* The connection is still in hci_conn_hash so test for 1
5480 * instead of 0 to know if this is the last one.
5481 */
5482 if (!cp->val && hci_conn_count(hdev) == 1) {
5483 cancel_delayed_work(&hdev->power_off);
5484 queue_work(hdev->req_workqueue, &hdev->power_off.work);
5485 }
5486 }
5487
5488 bacpy(&ev.addr.bdaddr, bdaddr);
5489 ev.addr.type = link_to_bdaddr(link_type, addr_type);
5490 ev.status = mgmt_status(status);
5491
5492 mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
5493 }
5494
5495 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
5496 {
5497 struct mgmt_ev_pin_code_request ev;
5498
5499 bacpy(&ev.addr.bdaddr, bdaddr);
5500 ev.addr.type = BDADDR_BREDR;
5501 ev.secure = secure;
5502
5503 mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
5504 }
5505
5506 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
5507 u8 status)
5508 {
5509 struct pending_cmd *cmd;
5510 struct mgmt_rp_pin_code_reply rp;
5511
5512 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
5513 if (!cmd)
5514 return;
5515
5516 bacpy(&rp.addr.bdaddr, bdaddr);
5517 rp.addr.type = BDADDR_BREDR;
5518
5519 cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
5520 mgmt_status(status), &rp, sizeof(rp));
5521
5522 mgmt_pending_remove(cmd);
5523 }
5524
5525 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
5526 u8 status)
5527 {
5528 struct pending_cmd *cmd;
5529 struct mgmt_rp_pin_code_reply rp;
5530
5531 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
5532 if (!cmd)
5533 return;
5534
5535 bacpy(&rp.addr.bdaddr, bdaddr);
5536 rp.addr.type = BDADDR_BREDR;
5537
5538 cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_NEG_REPLY,
5539 mgmt_status(status), &rp, sizeof(rp));
5540
5541 mgmt_pending_remove(cmd);
5542 }
5543
5544 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
5545 u8 link_type, u8 addr_type, u32 value,
5546 u8 confirm_hint)
5547 {
5548 struct mgmt_ev_user_confirm_request ev;
5549
5550 BT_DBG("%s", hdev->name);
5551
5552 bacpy(&ev.addr.bdaddr, bdaddr);
5553 ev.addr.type = link_to_bdaddr(link_type, addr_type);
5554 ev.confirm_hint = confirm_hint;
5555 ev.value = cpu_to_le32(value);
5556
5557 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
5558 NULL);
5559 }
5560
5561 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
5562 u8 link_type, u8 addr_type)
5563 {
5564 struct mgmt_ev_user_passkey_request ev;
5565
5566 BT_DBG("%s", hdev->name);
5567
5568 bacpy(&ev.addr.bdaddr, bdaddr);
5569 ev.addr.type = link_to_bdaddr(link_type, addr_type);
5570
5571 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
5572 NULL);
5573 }
5574
5575 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
5576 u8 link_type, u8 addr_type, u8 status,
5577 u8 opcode)
5578 {
5579 struct pending_cmd *cmd;
5580 struct mgmt_rp_user_confirm_reply rp;
5581 int err;
5582
5583 cmd = mgmt_pending_find(opcode, hdev);
5584 if (!cmd)
5585 return -ENOENT;
5586
5587 bacpy(&rp.addr.bdaddr, bdaddr);
5588 rp.addr.type = link_to_bdaddr(link_type, addr_type);
5589 err = cmd_complete(cmd->sk, hdev->id, opcode, mgmt_status(status),
5590 &rp, sizeof(rp));
5591
5592 mgmt_pending_remove(cmd);
5593
5594 return err;
5595 }
5596
5597 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
5598 u8 link_type, u8 addr_type, u8 status)
5599 {
5600 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
5601 status, MGMT_OP_USER_CONFIRM_REPLY);
5602 }
5603
5604 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
5605 u8 link_type, u8 addr_type, u8 status)
5606 {
5607 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
5608 status,
5609 MGMT_OP_USER_CONFIRM_NEG_REPLY);
5610 }
5611
5612 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
5613 u8 link_type, u8 addr_type, u8 status)
5614 {
5615 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
5616 status, MGMT_OP_USER_PASSKEY_REPLY);
5617 }
5618
5619 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
5620 u8 link_type, u8 addr_type, u8 status)
5621 {
5622 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
5623 status,
5624 MGMT_OP_USER_PASSKEY_NEG_REPLY);
5625 }
5626
5627 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
5628 u8 link_type, u8 addr_type, u32 passkey,
5629 u8 entered)
5630 {
5631 struct mgmt_ev_passkey_notify ev;
5632
5633 BT_DBG("%s", hdev->name);
5634
5635 bacpy(&ev.addr.bdaddr, bdaddr);
5636 ev.addr.type = link_to_bdaddr(link_type, addr_type);
5637 ev.passkey = __cpu_to_le32(passkey);
5638 ev.entered = entered;
5639
5640 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
5641 }
5642
5643 void mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
5644 u8 addr_type, u8 status)
5645 {
5646 struct mgmt_ev_auth_failed ev;
5647
5648 bacpy(&ev.addr.bdaddr, bdaddr);
5649 ev.addr.type = link_to_bdaddr(link_type, addr_type);
5650 ev.status = mgmt_status(status);
5651
5652 mgmt_event(MGMT_EV_AUTH_FAILED, hdev, &ev, sizeof(ev), NULL);
5653 }
5654
5655 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
5656 {
5657 struct cmd_lookup match = { NULL, hdev };
5658 bool changed;
5659
5660 if (status) {
5661 u8 mgmt_err = mgmt_status(status);
5662 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
5663 cmd_status_rsp, &mgmt_err);
5664 return;
5665 }
5666
5667 if (test_bit(HCI_AUTH, &hdev->flags))
5668 changed = !test_and_set_bit(HCI_LINK_SECURITY,
5669 &hdev->dev_flags);
5670 else
5671 changed = test_and_clear_bit(HCI_LINK_SECURITY,
5672 &hdev->dev_flags);
5673
5674 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
5675 &match);
5676
5677 if (changed)
5678 new_settings(hdev, match.sk);
5679
5680 if (match.sk)
5681 sock_put(match.sk);
5682 }
5683
5684 static void clear_eir(struct hci_request *req)
5685 {
5686 struct hci_dev *hdev = req->hdev;
5687 struct hci_cp_write_eir cp;
5688
5689 if (!lmp_ext_inq_capable(hdev))
5690 return;
5691
5692 memset(hdev->eir, 0, sizeof(hdev->eir));
5693
5694 memset(&cp, 0, sizeof(cp));
5695
5696 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
5697 }
5698
5699 void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
5700 {
5701 struct cmd_lookup match = { NULL, hdev };
5702 struct hci_request req;
5703 bool changed = false;
5704
5705 if (status) {
5706 u8 mgmt_err = mgmt_status(status);
5707
5708 if (enable && test_and_clear_bit(HCI_SSP_ENABLED,
5709 &hdev->dev_flags)) {
5710 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
5711 new_settings(hdev, NULL);
5712 }
5713
5714 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
5715 &mgmt_err);
5716 return;
5717 }
5718
5719 if (enable) {
5720 changed = !test_and_set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
5721 } else {
5722 changed = test_and_clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
5723 if (!changed)
5724 changed = test_and_clear_bit(HCI_HS_ENABLED,
5725 &hdev->dev_flags);
5726 else
5727 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
5728 }
5729
5730 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
5731
5732 if (changed)
5733 new_settings(hdev, match.sk);
5734
5735 if (match.sk)
5736 sock_put(match.sk);
5737
5738 hci_req_init(&req, hdev);
5739
5740 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
5741 update_eir(&req);
5742 else
5743 clear_eir(&req);
5744
5745 hci_req_run(&req, NULL);
5746 }
5747
5748 void mgmt_sc_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
5749 {
5750 struct cmd_lookup match = { NULL, hdev };
5751 bool changed = false;
5752
5753 if (status) {
5754 u8 mgmt_err = mgmt_status(status);
5755
5756 if (enable) {
5757 if (test_and_clear_bit(HCI_SC_ENABLED,
5758 &hdev->dev_flags))
5759 new_settings(hdev, NULL);
5760 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
5761 }
5762
5763 mgmt_pending_foreach(MGMT_OP_SET_SECURE_CONN, hdev,
5764 cmd_status_rsp, &mgmt_err);
5765 return;
5766 }
5767
5768 if (enable) {
5769 changed = !test_and_set_bit(HCI_SC_ENABLED, &hdev->dev_flags);
5770 } else {
5771 changed = test_and_clear_bit(HCI_SC_ENABLED, &hdev->dev_flags);
5772 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
5773 }
5774
5775 mgmt_pending_foreach(MGMT_OP_SET_SECURE_CONN, hdev,
5776 settings_rsp, &match);
5777
5778 if (changed)
5779 new_settings(hdev, match.sk);
5780
5781 if (match.sk)
5782 sock_put(match.sk);
5783 }
5784
5785 static void sk_lookup(struct pending_cmd *cmd, void *data)
5786 {
5787 struct cmd_lookup *match = data;
5788
5789 if (match->sk == NULL) {
5790 match->sk = cmd->sk;
5791 sock_hold(match->sk);
5792 }
5793 }
5794
5795 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
5796 u8 status)
5797 {
5798 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
5799
5800 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
5801 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
5802 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
5803
5804 if (!status)
5805 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class, 3,
5806 NULL);
5807
5808 if (match.sk)
5809 sock_put(match.sk);
5810 }
5811
5812 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
5813 {
5814 struct mgmt_cp_set_local_name ev;
5815 struct pending_cmd *cmd;
5816
5817 if (status)
5818 return;
5819
5820 memset(&ev, 0, sizeof(ev));
5821 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
5822 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
5823
5824 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
5825 if (!cmd) {
5826 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
5827
5828 /* If this is a HCI command related to powering on the
5829 * HCI dev don't send any mgmt signals.
5830 */
5831 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
5832 return;
5833 }
5834
5835 mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
5836 cmd ? cmd->sk : NULL);
5837 }
5838
5839 void mgmt_read_local_oob_data_complete(struct hci_dev *hdev, u8 *hash192,
5840 u8 *randomizer192, u8 *hash256,
5841 u8 *randomizer256, u8 status)
5842 {
5843 struct pending_cmd *cmd;
5844
5845 BT_DBG("%s status %u", hdev->name, status);
5846
5847 cmd = mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
5848 if (!cmd)
5849 return;
5850
5851 if (status) {
5852 cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5853 mgmt_status(status));
5854 } else {
5855 if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags) &&
5856 hash256 && randomizer256) {
5857 struct mgmt_rp_read_local_oob_ext_data rp;
5858
5859 memcpy(rp.hash192, hash192, sizeof(rp.hash192));
5860 memcpy(rp.randomizer192, randomizer192,
5861 sizeof(rp.randomizer192));
5862
5863 memcpy(rp.hash256, hash256, sizeof(rp.hash256));
5864 memcpy(rp.randomizer256, randomizer256,
5865 sizeof(rp.randomizer256));
5866
5867 cmd_complete(cmd->sk, hdev->id,
5868 MGMT_OP_READ_LOCAL_OOB_DATA, 0,
5869 &rp, sizeof(rp));
5870 } else {
5871 struct mgmt_rp_read_local_oob_data rp;
5872
5873 memcpy(rp.hash, hash192, sizeof(rp.hash));
5874 memcpy(rp.randomizer, randomizer192,
5875 sizeof(rp.randomizer));
5876
5877 cmd_complete(cmd->sk, hdev->id,
5878 MGMT_OP_READ_LOCAL_OOB_DATA, 0,
5879 &rp, sizeof(rp));
5880 }
5881 }
5882
5883 mgmt_pending_remove(cmd);
5884 }
5885
5886 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
5887 u8 addr_type, u8 *dev_class, s8 rssi, u8 cfm_name,
5888 u8 ssp, u8 *eir, u16 eir_len, u8 *scan_rsp,
5889 u8 scan_rsp_len)
5890 {
5891 char buf[512];
5892 struct mgmt_ev_device_found *ev = (void *) buf;
5893 struct smp_irk *irk;
5894 size_t ev_size;
5895
5896 if (!hci_discovery_active(hdev))
5897 return;
5898
5899 /* Make sure that the buffer is big enough. The 5 extra bytes
5900 * are for the potential CoD field.
5901 */
5902 if (sizeof(*ev) + eir_len + scan_rsp_len + 5 > sizeof(buf))
5903 return;
5904
5905 memset(buf, 0, sizeof(buf));
5906
5907 irk = hci_get_irk(hdev, bdaddr, addr_type);
5908 if (irk) {
5909 bacpy(&ev->addr.bdaddr, &irk->bdaddr);
5910 ev->addr.type = link_to_bdaddr(link_type, irk->addr_type);
5911 } else {
5912 bacpy(&ev->addr.bdaddr, bdaddr);
5913 ev->addr.type = link_to_bdaddr(link_type, addr_type);
5914 }
5915
5916 ev->rssi = rssi;
5917 if (cfm_name)
5918 ev->flags |= cpu_to_le32(MGMT_DEV_FOUND_CONFIRM_NAME);
5919 if (!ssp)
5920 ev->flags |= cpu_to_le32(MGMT_DEV_FOUND_LEGACY_PAIRING);
5921
5922 if (eir_len > 0)
5923 memcpy(ev->eir, eir, eir_len);
5924
5925 if (dev_class && !eir_has_data_type(ev->eir, eir_len, EIR_CLASS_OF_DEV))
5926 eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
5927 dev_class, 3);
5928
5929 if (scan_rsp_len > 0)
5930 memcpy(ev->eir + eir_len, scan_rsp, scan_rsp_len);
5931
5932 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
5933 ev_size = sizeof(*ev) + eir_len + scan_rsp_len;
5934
5935 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
5936 }
5937
5938 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
5939 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
5940 {
5941 struct mgmt_ev_device_found *ev;
5942 char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
5943 u16 eir_len;
5944
5945 ev = (struct mgmt_ev_device_found *) buf;
5946
5947 memset(buf, 0, sizeof(buf));
5948
5949 bacpy(&ev->addr.bdaddr, bdaddr);
5950 ev->addr.type = link_to_bdaddr(link_type, addr_type);
5951 ev->rssi = rssi;
5952
5953 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
5954 name_len);
5955
5956 ev->eir_len = cpu_to_le16(eir_len);
5957
5958 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, sizeof(*ev) + eir_len, NULL);
5959 }
5960
5961 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
5962 {
5963 struct mgmt_ev_discovering ev;
5964 struct pending_cmd *cmd;
5965
5966 BT_DBG("%s discovering %u", hdev->name, discovering);
5967
5968 if (discovering)
5969 cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
5970 else
5971 cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
5972
5973 if (cmd != NULL) {
5974 u8 type = hdev->discovery.type;
5975
5976 cmd_complete(cmd->sk, hdev->id, cmd->opcode, 0, &type,
5977 sizeof(type));
5978 mgmt_pending_remove(cmd);
5979 }
5980
5981 memset(&ev, 0, sizeof(ev));
5982 ev.type = hdev->discovery.type;
5983 ev.discovering = discovering;
5984
5985 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
5986 }
5987
5988 int mgmt_device_blocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
5989 {
5990 struct pending_cmd *cmd;
5991 struct mgmt_ev_device_blocked ev;
5992
5993 cmd = mgmt_pending_find(MGMT_OP_BLOCK_DEVICE, hdev);
5994
5995 bacpy(&ev.addr.bdaddr, bdaddr);
5996 ev.addr.type = type;
5997
5998 return mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &ev, sizeof(ev),
5999 cmd ? cmd->sk : NULL);
6000 }
6001
6002 int mgmt_device_unblocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
6003 {
6004 struct pending_cmd *cmd;
6005 struct mgmt_ev_device_unblocked ev;
6006
6007 cmd = mgmt_pending_find(MGMT_OP_UNBLOCK_DEVICE, hdev);
6008
6009 bacpy(&ev.addr.bdaddr, bdaddr);
6010 ev.addr.type = type;
6011
6012 return mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &ev, sizeof(ev),
6013 cmd ? cmd->sk : NULL);
6014 }
6015
6016 static void adv_enable_complete(struct hci_dev *hdev, u8 status)
6017 {
6018 BT_DBG("%s status %u", hdev->name, status);
6019
6020 /* Clear the advertising mgmt setting if we failed to re-enable it */
6021 if (status) {
6022 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
6023 new_settings(hdev, NULL);
6024 }
6025 }
6026
6027 void mgmt_reenable_advertising(struct hci_dev *hdev)
6028 {
6029 struct hci_request req;
6030
6031 if (hci_conn_num(hdev, LE_LINK) > 0)
6032 return;
6033
6034 if (!test_bit(HCI_ADVERTISING, &hdev->dev_flags))
6035 return;
6036
6037 hci_req_init(&req, hdev);
6038 enable_advertising(&req);
6039
6040 /* If this fails we have no option but to let user space know
6041 * that we've disabled advertising.
6042 */
6043 if (hci_req_run(&req, adv_enable_complete) < 0) {
6044 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
6045 new_settings(hdev, NULL);
6046 }
6047 }
This page took 0.166892 seconds and 5 git commands to generate.